blob: 6f486ece34ed82b7b0910ffe1dc5204420cdf8ce [file] [log] [blame]
Samarjeet Banerjee77332312014-08-07 14:48:22 +05301/*
2 **************************************************************************
3 * Copyright (c) 2014, The Linux Foundation. All rights reserved.
4 * Permission to use, copy, modify, and/or distribute this software for
5 * any purpose with or without fee is hereby granted, provided that the
6 * above copyright notice and this permission notice appear in all copies.
7 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
8 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
9 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
10 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
11 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
12 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
13 * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
14 **************************************************************************
15 */
16
17/* nss_ipsecmgr.c
18 * NSS to HLOS IPSec Manager
19 */
20#include <linux/types.h>
21#include <linux/ip.h>
22#include <linux/skbuff.h>
23#include <linux/module.h>
24#include <linux/netdevice.h>
25#include <linux/rtnetlink.h>
26#include <asm/atomic.h>
27#include <nss_api_if.h>
28#include <nss_ipsec.h>
29#include "nss_ipsecmgr.h"
30
31
32#if defined(CONFIG_DYNAMIC_DEBUG)
33/*
34 * Compile messages for dynamic enable/disable
35 */
36#define nss_ipsecmgr_error(s, ...) pr_debug("%s[%d]:" s, __FUNCTION__, __LINE__, ##__VA_ARGS__)
37#define nss_ipsecmgr_warn(s, ...) pr_debug("%s[%d]:" s, __FUNCTION__, __LINE__, ##__VA_ARGS__)
38#define nss_ipsecmgr_info(s, ...) pr_debug("%s[%d]:" s, __FUNCTION__, __LINE__, ##__VA_ARGS__)
39#define nss_ipsecmgr_trace(s, ...) pr_debug("%s[%d]:" s, __FUNCTION__, __LINE__, ##__VA_ARGS__)
40
41#else
42/*
43 * Statically compile messages at different levels
44 */
45#define nss_ipsecmgr_error(s, ...) { \
46 if (NSS_IPSECMGR_DEBUG_LEVEL < NSS_IPSECMGR_DEBUG_LVL_ERROR) { \
47 pr_alert("%s[%d]:" s, __FUNCTION__, __LINE__, ##__VA_ARGS__); \
48 } \
49}
50#define nss_ipsecmgr_warn(s, ...) { \
51 if (NSS_IPSECMGR_DEBUG_LEVEL < NSS_IPSECMGR_DEBUG_LVL_WARN) { \
52 pr_warn("%s[%d]:" s, __FUNCTION__, __LINE__, ##__VA_ARGS__); \
53 } \
54}
55#define nss_ipsecmgr_info(s, ...) { \
56 if (NSS_IPSECMGR_DEBUG_LEVEL < NSS_IPSECMGR_DEBUG_LVL_INFO) { \
57 pr_notice("%s[%d]:" s, __FUNCTION__, __LINE__, ##__VA_ARGS__); \
58 } \
59}
60#define nss_ipsecmgr_trace(s, ...) { \
61 if (NSS_IPSECMGR_DEBUG_LEVEL < NSS_IPSECMGR_DEBUG_LVL_TRACE) { \
62 pr_info("%s[%d]:" s, __FUNCTION__, __LINE__, ##__VA_ARGS__); \
63 } \
64}
65
66#endif /* !CONFIG_DYNAMIC_DEBUG */
67
68/* NSS IPsec entry state */
69enum nss_ipsecmgr_entry_state {
70 NSS_IPSECMGR_ENTRY_STATE_INIT = 0, /* init state of the entry */
71 NSS_IPSECMGR_ENTRY_STATE_VALID = 1, /* entry is valid */
72 NSS_IPSECMGR_ENTRY_STATE_INVALID = 2, /* entry is invalid */
73 NSS_IPSECMGR_ENTRY_STATE_MAX
74};
75
76/* IPsec table entry */
77struct nss_ipsecmgr_tbl_entry {
78 struct nss_ipsec_rule_sel sel; /* rule selector */
79 struct nss_ipsec_sa_stats stats; /* per entry stats */
80 enum nss_ipsecmgr_entry_state state; /* state */
81};
82
83/* NSS IPsec table type */
84struct nss_ipsecmgr_tbl {
85 uint32_t total_tx; /* total packets tx'ed from ENCAP/DECAP */
86 uint32_t total_rx; /* total packets rx'ed at ENCAP/DECAP */
87 uint32_t total_dropped; /* total dropped packets at ENCAP/DECAP */
88 struct nss_ipsecmgr_tbl_entry entry[NSS_IPSEC_MAX_SA]; /* table entry */
89 uint32_t count; /* number of entries */
90 spinlock_t lock;
91};
92
93/* NSS IPsec manager private structure */
94struct nss_ipsecmgr_priv {
95 struct nss_ipsecmgr_tbl encap; /* encap table */
96 struct nss_ipsecmgr_tbl decap; /* decap table */
97
98 void *cb_ctx; /* callback context */
99 nss_ipsecmgr_callback_t cb_fn; /* callback function */
100 struct nss_ctx_instance *nss_ctx; /* NSS context */
101};
102
103typedef bool (*nss_ipsecmgr_op_t)(struct net_device *dev, struct nss_ipsecmgr_tbl *tbl, struct nss_ipsec_msg *nim);
104
105/* NSS IPsec manager operation (message types) complete routines */
106struct nss_ipsecmgr_ops {
107 nss_ipsecmgr_op_t verify_fn; /* verify function for the op */
108 nss_ipsecmgr_op_t commit_fn; /* commit function for the op */
109};
110
111/* driver global info */
112struct nss_ipsecmgr_drv {
113 atomic_t sa_count; /* number of SA's available for */
114};
115
116/*
117 **********************
118 * globals
119 **********************
120 */
121
122static struct nss_ipsecmgr_drv gbl_drv_ctx; /* global driver context */
123
124/*
125 **********************
126 * Helper Functions
127 **********************
128 */
129
130/*
131 * nss_ipsecmgr_get_tbl()
132 * return the table associated with the interface number
133 */
134static inline struct nss_ipsecmgr_tbl *nss_ipsecmgr_get_tbl(struct nss_ipsecmgr_priv *priv, uint16_t if_num)
135{
136 switch (if_num) {
137 case NSS_IPSEC_ENCAP_IF_NUMBER:
138 return &priv->encap;
139
140 case NSS_IPSEC_DECAP_IF_NUMBER:
141 return &priv->decap;
142
143 default:
144 return NULL;
145 }
146}
147
148/*
149 * nss_ipsecmgr_copy_encap_add()
150 * prepare the IPsec message for encap add operation
151 */
152static void nss_ipsecmgr_copy_encap_add(struct net_device *dev, struct nss_ipsec_rule *msg, union nss_ipsecmgr_rule *rule)
153{
154 struct nss_ipsecmgr_encap_add *encap = &rule->encap_add;
155 struct nss_ipsec_rule_sel *sel = &msg->sel;
156 struct nss_ipsec_rule_oip *oip = &msg->oip;
157 struct nss_ipsec_rule_data *data = &msg->data;
158
159 /*
160 * Populate the selectors for encap direction
161 */
162 sel->ipv4_src = encap->inner_ipv4_src;
163 sel->ipv4_dst = encap->inner_ipv4_dst;
164
165 sel->src_port = encap->inner_src_port;
166 sel->dst_port = encap->inner_dst_port;
167
168 sel->ipv4_proto = encap->inner_ipv4_proto;
169
170 /*
171 * Populate the outer IP data for encap direction
172 */
173 oip->ipv4_dst = encap->outer_ipv4_dst;
174 oip->ipv4_src = encap->outer_ipv4_src;
175
176 oip->esp_spi = encap->esp_spi;
177
178 oip->ipv4_ttl = encap->outer_ipv4_ttl;
179
180 /*
181 * Populate the data part
182 */
Radha krishna Simha Jiguruca4f85c2014-08-23 17:00:07 +0530183 data->cipher_algo = encap->cipher_algo;
Samarjeet Banerjee77332312014-08-07 14:48:22 +0530184 data->esp_seq_skip = (encap->esp_seq_skip == 1);
185
Radha krishna Simha Jiguruca4f85c2014-08-23 17:00:07 +0530186 data->esp_icv_len = encap->esp_icv_len;
187 data->auth_algo = encap->auth_algo;
188
Samarjeet Banerjee77332312014-08-07 14:48:22 +0530189 data->crypto_index = encap->crypto_index;
190
191 data->nat_t_req = encap->nat_t_req;
192}
193
194/*
195 * nss_ipsecmgr_copy_decap_add()
196 * prepare the IPsec message for decap add operation
197 */
198static void nss_ipsecmgr_copy_decap_add(struct net_device *dev, struct nss_ipsec_rule *msg, union nss_ipsecmgr_rule *rule)
199{
200 struct nss_ipsecmgr_decap_add *decap = &rule->decap_add;
201 struct nss_ipsec_rule_sel *sel = &msg->sel;
202 struct nss_ipsec_rule_data *data = &msg->data;
203 /*
204 * Populate the selectors for encap direction
205 */
206 sel->ipv4_src = decap->outer_ipv4_src;
207 sel->ipv4_dst = decap->outer_ipv4_dst;
208
209 sel->esp_spi = decap->esp_spi;
210
211 sel->ipv4_proto = IPPROTO_ESP;
212
213 /*
214 * Populate the data part
215 */
Radha krishna Simha Jiguruca4f85c2014-08-23 17:00:07 +0530216 data->cipher_algo = decap->cipher_algo;
Samarjeet Banerjee77332312014-08-07 14:48:22 +0530217
Radha krishna Simha Jiguruca4f85c2014-08-23 17:00:07 +0530218 data->esp_icv_len = decap->esp_icv_len;
219 data->auth_algo = decap->auth_algo;
220
Samarjeet Banerjee77332312014-08-07 14:48:22 +0530221 data->crypto_index = decap->crypto_index;
222
Tushar Mathur8684c402014-09-12 14:41:37 +0530223 data->nat_t_req = decap->nat_t_req;
Samarjeet Banerjee77332312014-08-07 14:48:22 +0530224 data->window_size = decap->window_size;
225}
226
227/*
228 * nss_ipsecmgr_copy_encap_del()
229 * prepare the IPsec message for encap del operation
230 */
231static void nss_ipsecmgr_copy_encap_del(struct net_device *dev, struct nss_ipsec_rule *msg, union nss_ipsecmgr_rule *rule)
232{
233 struct nss_ipsecmgr_encap_del *encap = &rule->encap_del;
234 struct nss_ipsec_rule_sel *sel = &msg->sel;
235
236 /*
237 * Populate the selectors for encap direction
238 */
239 sel->ipv4_src = encap->inner_ipv4_src;
240 sel->ipv4_dst = encap->inner_ipv4_dst;
241
242 sel->src_port = encap->inner_src_port;
243 sel->dst_port = encap->inner_dst_port;
244
245 sel->ipv4_proto = encap->inner_ipv4_proto;
246}
247
248/*
249 * nss_ipsecmgr_copy_decap_del()
250 * prepare the IPsec message for decap del operation
251 */
252static void nss_ipsecmgr_copy_decap_del(struct net_device *dev, struct nss_ipsec_rule *msg, union nss_ipsecmgr_rule *rule)
253{
254 struct nss_ipsecmgr_decap_del *decap = &rule->decap_del;
255 struct nss_ipsec_rule_sel *sel = &msg->sel;
256 /*
257 * Populate the selectors for encap direction
258 */
259 sel->ipv4_src = decap->outer_ipv4_src;
260 sel->ipv4_dst = decap->outer_ipv4_dst;
261
262 sel->esp_spi = decap->esp_spi;
263
264 sel->ipv4_proto = IPPROTO_ESP;
265}
266
267/*
268 **********************
269 * message handlers
270 **********************
271 */
272/*
273 * nss_ipsecmgr_verify_add()
274 * verify the Add operation before committing
275 */
276static bool nss_ipsecmgr_verify_add(struct net_device *dev, struct nss_ipsecmgr_tbl *tbl, struct nss_ipsec_msg *nim)
277{
278 struct nss_ipsec_rule *rule = &nim->msg.push;
279 struct nss_ipsecmgr_tbl_entry *entry;
280 uint32_t tbl_idx;
281
282 tbl_idx = rule->index;
283 if (tbl_idx >= NSS_IPSEC_MAX_SA) {
284 nss_ipsecmgr_error("table index out of range\n");
285 return false;
286 }
287
288 entry = &tbl->entry[tbl_idx];
289
290 /*
291 * XXX:Duplicate hit or hash collision; We need to handle
292 * hash collision
293 */
294 if (entry->state == NSS_IPSECMGR_ENTRY_STATE_VALID) {
295 return false;
296 }
297
298 /*
299 * Table full, XXX:must increment stats
300 */
301 if ((tbl->count + 1) >= NSS_IPSEC_MAX_SA) {
302 return false;
303 }
304
305 return true;
306}
307
308/*
309 * nss_ipsecmgr_commit_add()
310 * commit the Add operation
311 */
312static bool nss_ipsecmgr_commit_add(struct net_device *dev, struct nss_ipsecmgr_tbl *tbl, struct nss_ipsec_msg *nim)
313{
314 struct nss_ipsec_rule *rule = &nim->msg.push;
315 struct nss_ipsecmgr_tbl_entry *entry;
316 uint32_t tbl_idx;
317
318 /* Reduce the number of availabe SA(s) as we have successfully added one */
319 atomic_dec(&gbl_drv_ctx.sa_count);
320
321 tbl_idx = rule->index;
322 entry = &tbl->entry[tbl_idx];
323
324 tbl->count++;
325
326 memcpy(&entry->sel, &rule->sel, sizeof(struct nss_ipsec_rule_sel));
327 entry->state = NSS_IPSECMGR_ENTRY_STATE_VALID;
328
329 return true;
330}
331
332/*
333 * nss_ipsecmgr_verify_del()
334 * verify the del operation before committing
335 */
336static bool nss_ipsecmgr_verify_del(struct net_device *dev, struct nss_ipsecmgr_tbl *tbl, struct nss_ipsec_msg *nim)
337{
338 struct nss_ipsec_rule *rule = &nim->msg.push;
339 struct nss_ipsecmgr_tbl_entry *entry;
340 uint32_t tbl_idx;
341
342 tbl_idx = rule->index;
343 if (tbl_idx >= NSS_IPSEC_MAX_SA) {
344 nss_ipsecmgr_error("table index out of range\n");
345 return false;
346 }
347
348 entry = &tbl->entry[tbl_idx];
349
350 /*
351 * Entry already deleted, XXX:must increment stats
352 */
353 if (entry->state == NSS_IPSECMGR_ENTRY_STATE_INVALID) {
354 return false;
355 }
356
357 /*
358 * Entry never existed, XXX:must increment stats
359 */
360 if (entry->state == NSS_IPSECMGR_ENTRY_STATE_INIT) {
361 return false;
362 }
363
364 /*
365 * Table empty, XXX:must increment stats
366 */
367 if (tbl->count == 0) {
368 return false;
369 }
370
371 return true;
372}
373
374/*
375 * nss_ipsecmgr_commit_del()
376 * commit the Del operation
377 */
378static bool nss_ipsecmgr_commit_del(struct net_device *dev, struct nss_ipsecmgr_tbl *tbl, struct nss_ipsec_msg *nim)
379{
380 struct nss_ipsec_rule *rule = &nim->msg.push;
381 struct nss_ipsecmgr_tbl_entry *entry;
382 uint32_t tbl_idx;
383
384 /* Increase the number of available SA(s) as we have successfully freed one */
385 atomic_inc(&gbl_drv_ctx.sa_count);
386
387 tbl_idx = rule->index;
388 entry = &tbl->entry[tbl_idx];
389
390 tbl->count--;
391
392 memset(&entry->sel, 0, sizeof(struct nss_ipsec_rule_sel));
393 entry->state = NSS_IPSECMGR_ENTRY_STATE_INVALID;
394
395 return true;
396}
397
398/*
399 * nss_ipsecmgr_verify_stats()
400 * verify stats update operation
401 */
402static bool nss_ipsecmgr_verify_stats(struct net_device *dev, struct nss_ipsecmgr_tbl *tbl, struct nss_ipsec_msg *nim)
403{
404 struct nss_ipsec_stats *stats = &nim->msg.stats;
405
406 /*
407 * Table empty, nothing to update
408 */
409 if (tbl->count == 0) {
410 return false;
411 }
412
413 /*
414 * nothing to update
415 */
416 if (stats->num_entries == 0) {
417 return false;
418 }
419
420 return true;
421}
422
423/*
424 * nss_ipsecmgr_commit_stats()
425 * commit stats for the SA
426 */
427static bool nss_ipsecmgr_commit_stats(struct net_device *dev, struct nss_ipsecmgr_tbl *tbl, struct nss_ipsec_msg *nim)
428{
429 struct nss_ipsec_stats *stats = &nim->msg.stats;
430 struct nss_ipsec_sa_stats *sa_stats;
431 struct nss_ipsecmgr_tbl_entry *entry;
432 uint32_t tbl_idx;
433 int i;
434
435 tbl->total_tx = stats->total_tx;
436 tbl->total_rx = stats->total_rx;
437 tbl->total_dropped = stats->total_dropped;
438
439 for (i = 0; i < stats->num_entries; i++) {
440 sa_stats = &stats->sa[i];
441 tbl_idx = sa_stats->index;
442
443 entry = &tbl->entry[tbl_idx];
444
445 memcpy(&entry->stats, sa_stats, sizeof(struct nss_ipsec_sa_stats));
446 }
447
448 return true;
449}
450
451/*
452 * nss_ipsecmgr_verify_flush()
453 * verify the flush operation
454 */
455static bool nss_ipsecmgr_verify_flush(struct net_device *dev, struct nss_ipsecmgr_tbl *tbl, struct nss_ipsec_msg *nim)
456{
457 return true;
458}
459
460/*
461 * nss_ipsecmgr_commit_flush()
462 * commit the flush operation
463 */
464static bool nss_ipsecmgr_commit_flush(struct net_device *dev, struct nss_ipsecmgr_tbl *tbl, struct nss_ipsec_msg *nim)
465{
466 tbl->count = 0;
467
468 return true;
469}
470
471/*
472 * callback operation
473 */
474const static struct nss_ipsecmgr_ops cb_ops[NSS_IPSEC_MSG_TYPE_MAX] = {
475 [NSS_IPSEC_MSG_TYPE_ADD_RULE] = {nss_ipsecmgr_verify_add, nss_ipsecmgr_commit_add},
476 [NSS_IPSEC_MSG_TYPE_DEL_RULE] = {nss_ipsecmgr_verify_del, nss_ipsecmgr_commit_del},
477 [NSS_IPSEC_MSG_TYPE_SYNC_STATS] = {nss_ipsecmgr_verify_stats, nss_ipsecmgr_commit_stats},
478 [NSS_IPSEC_MSG_TYPE_FLUSH_TUN] = {nss_ipsecmgr_verify_flush, nss_ipsecmgr_commit_flush}
479};
480
481/*
482 * nss_ipsecmgr_op_receive()
483 * asynchronous event reception
484 */
485static void nss_ipsecmgr_op_receive(void *app_data, struct nss_ipsec_msg *nim)
486{
487 struct net_device *tun_dev = (struct net_device *)app_data;
488 struct nss_ipsecmgr_priv *priv;
489 struct nss_ipsecmgr_tbl *tbl;
490 nss_ipsecmgr_op_t verify_fn;
491 nss_ipsecmgr_op_t commit_fn;
492 struct net_device *dev;
493
494 BUG_ON(tun_dev == NULL);
495 BUG_ON(nim == NULL);
496
497 /* this holds the ref_cnt for the device */
498 dev = dev_get_by_index(&init_net, nim->tunnel_id);
499 if (dev == NULL) {
500 nss_ipsecmgr_error("event received on deallocated I/F (%d)\n", nim->tunnel_id);
501 return;
502 }
503
504 if (dev != tun_dev) {
505 nss_ipsecmgr_error("event received on incorrect I/F (%d)\n", nim->tunnel_id);
506 goto done;
507 }
508
509 priv = netdev_priv(dev);
510
511 tbl = nss_ipsecmgr_get_tbl(priv, nim->cm.interface);
512 if (tbl == NULL) {
513 nss_ipsecmgr_error("invalid interface number(%d)\n", nim->cm.interface);
514 goto done;
515 }
516
517 verify_fn = cb_ops[nim->cm.type].verify_fn;
518 commit_fn = cb_ops[nim->cm.type].commit_fn;
519
520 if (!verify_fn || !commit_fn) {
521 nss_ipsecmgr_error("unhandled type (%d)\n", nim->cm.type);
522 goto done;
523 }
524
525 /*
526 * Now we can operate on the entry of the table
527 */
528 spin_lock_bh(&tbl->lock);
529
530 if (verify_fn(dev, tbl, nim) == true) {
531 commit_fn(dev, tbl, nim);
532 }
533
534 spin_unlock_bh(&tbl->lock);
535done:
536 /* release the device as we are done */
537 dev_put(dev);
538}
539
540/*
541 * nss_ipsecmgr_op_send()
542 * Push a IPsec rule to NSS
543 */
544static bool nss_ipsecmgr_op_send(struct net_device *dev, struct nss_ipsec_msg *nim, uint32_t if_num, enum nss_ipsec_msg_type type)
545{
546 struct nss_ipsecmgr_priv *priv;
547 nss_tx_status_t status;
548
549 priv = netdev_priv(dev);
550 if (priv->nss_ctx == NULL) {
551 nss_ipsecmgr_error("Tunnel registration or de-registration is underway\n");
552 return false;
553 }
554
555 nim->tunnel_id = dev->ifindex;
556
557 nss_cmn_msg_init(&nim->cm, if_num, type, NSS_IPSEC_MSG_LEN, nss_ipsecmgr_op_receive, dev);
558
559 status = nss_ipsec_tx_msg(priv->nss_ctx, nim);
560 if (status != NSS_TX_SUCCESS) {
561 nss_ipsecmgr_error("unable to push rule(%d) for %s\n", type, dev->name);
562 return false;
563 }
564
565 return true;
566}
567
568/*
569 * nss_ipsecmgr_buf_receive()
570 * receive NSS exception packets
571 */
572static void nss_ipsecmgr_buf_receive(void *app_data, void *os_buf, __attribute((unused)) struct napi_struct *napi)
573{
574 struct net_device *dev = (struct net_device *)app_data;
575 struct sk_buff *skb = (struct sk_buff *)os_buf;
576 struct nss_ipsecmgr_priv *priv;
577 nss_ipsecmgr_callback_t cb_fn;
578 void *cb_ctx;
579 struct iphdr *ip;
580
581 BUG_ON(dev == NULL);
582 BUG_ON(skb == NULL);
583
584 /* hold the device till we process it */
585 dev_hold(dev);
586
587 /*
588 * XXX:need to ensure that the dev being accessed is not deleted
589 */
590 priv = netdev_priv(dev);
591
592 skb->dev = dev;
593
594 cb_fn = priv->cb_fn;
595 cb_ctx = priv->cb_ctx;
596
597 /*
598 * if tunnel creator gave a callback then send the packet without
599 * any modifications to him
600 */
601 if (cb_fn && cb_ctx) {
602 cb_fn(cb_ctx, skb);
603 goto done;
604 }
605
606 ip = (struct iphdr *)skb->data;
607 if ((ip->version != IPVERSION) || (ip->ihl != 5)) {
608 nss_ipsecmgr_error("dropping packets(IP version:%x, Header len:%x)\n", ip->version, ip->ihl);
609 dev_kfree_skb_any(skb);
610 goto done;
611 }
612
613 skb_reset_network_header(skb);
614 skb_reset_mac_header(skb);
615
616 skb->pkt_type = PACKET_HOST;
617 skb->protocol = cpu_to_be16(ETH_P_IP);
618 skb->skb_iif = dev->ifindex;
619
620 netif_receive_skb(skb);
621done:
622 /* release the device as we are done */
623 dev_put(dev);
624}
625
626/*
627 * nss_ipsecmgr_buf_drop()
628 * invalid buffer received;drop it and account it
629 */
630static void nss_ipsecmgr_buf_drop(void *app_data, void *os_buf, __attribute((unused)) struct napi_struct *napi)
631{
632 struct sk_buff *skb = (struct sk_buff *)os_buf;
633 struct iphdr *ip;
634
635 BUG_ON(skb == NULL);
636
637 ip = (struct iphdr *)skb->data;
638 if ((ip->version != IPVERSION) || (ip->ihl != 5)) {
639 nss_ipsecmgr_error("dropping packets(IP version:%x, Header len:%x)\n", ip->version, ip->ihl);
640 }
641
642 /* XXX: increment stats */
643
644 dev_kfree_skb_any(skb);
645}
646/*
647 **********************
648 * Netdev ops
649 **********************
650 */
651
652/*
653 * nss_ipsecmgr_tunnel_init()
654 * initiallizes the tunnel
655 */
656static int nss_ipsecmgr_tunnel_init(struct net_device *dev)
657{
658 struct nss_ipsecmgr_priv *priv;
659
660 priv = netdev_priv(dev);
661
662 priv->nss_ctx = nss_ipsec_get_context();
663
664 return 0;
665}
666
667/*
668 * nss_ipsecmgr_tunnel_exit()
669 * deinitiallizes the tunnel
670 */
671static void nss_ipsecmgr_tunnel_exit(struct net_device *dev)
672{
673 struct nss_ipsecmgr_priv *priv;
674
675 priv = netdev_priv(dev);
676
677 priv->nss_ctx = NULL;
678}
679
680/*
681 * nss_ipsecmgr_tunnel_open()
682 * open the tunnel for usage
683 */
684static int nss_ipsecmgr_tunnel_open(struct net_device *dev)
685{
686 struct nss_ipsecmgr_priv *priv;
687
688 priv = netdev_priv(dev);
689
690 netif_start_queue(dev);
691
692 return 0;
693}
694
695/*
696 * nss_ipsecmgr_tunnel_stop()
697 * stop the IPsec tunnel
698 */
699static int nss_ipsecmgr_tunnel_stop(struct net_device *dev)
700{
701 struct nss_ipsecmgr_priv *priv;
702
703 priv = netdev_priv(dev);
704
705 netif_stop_queue(dev);
706
707 return 0;
708}
709
710/*
711 * nss_ipsecmgr_tunnel_xmit()
712 * tunnel transmit function
713 */
714static netdev_tx_t nss_ipsecmgr_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
715{
716 nss_ipsecmgr_warn("tunnel transmit not implemented, freeing the skb\n");
717
718 dev_kfree_skb_any(skb);
719
720 return NETDEV_TX_OK;
721}
722
723/*
724 * nss_ipsecmgr_tunnel_stats()
725 * get tunnel statistics
726 */
727static struct net_device_stats *nss_ipsecmgr_tunnel_stats(struct net_device *dev)
728{
729 struct net_device_stats *stats = &dev->stats;
730 struct nss_ipsecmgr_priv *priv;
731 struct nss_ipsecmgr_tbl *encap;
732 struct nss_ipsecmgr_tbl *decap;
733
734 priv = netdev_priv(dev);
735
736 encap = &priv->encap;
737 decap = &priv->decap;
738
739 stats->rx_packets = encap->total_rx + decap->total_rx;
740 stats->tx_packets = encap->total_tx + decap->total_tx;
741 stats->tx_dropped = encap->total_dropped + decap->total_dropped;
742
743 return stats;
744}
745
746/* NSS IPsec tunnel operation */
747static const struct net_device_ops nss_ipsecmgr_tunnel_ops = {
748 .ndo_init = nss_ipsecmgr_tunnel_init,
749 .ndo_uninit = nss_ipsecmgr_tunnel_exit,
750 .ndo_start_xmit = nss_ipsecmgr_tunnel_xmit,
751 .ndo_open = nss_ipsecmgr_tunnel_open,
752 .ndo_stop = nss_ipsecmgr_tunnel_stop,
753 .ndo_get_stats = nss_ipsecmgr_tunnel_stats,
754};
755
756/*
757 * nss_ipsecmgr_tunnel_free()
758 * free an existing IPsec tunnel interface
759 */
760static void nss_ipsecmgr_tunnel_free(struct net_device *dev)
761{
762 struct nss_ipsecmgr_priv *priv = netdev_priv(dev);
763 struct nss_ipsecmgr_tbl *encap_tbl;
764 struct nss_ipsecmgr_tbl *decap_tbl;
765
766 encap_tbl = &priv->encap;
767 decap_tbl = &priv->decap;
768
769 nss_ipsecmgr_info("IPsec tunnel device(%s) freed\n", dev->name);
770 nss_ipsecmgr_info("Entries left: encap(%d), decap(%d)\n", encap_tbl->count, decap_tbl->count);
771
772 free_netdev(dev);
773}
774
775/*
776 * nss_ipsecmr_setup_tunnel()
777 * setup the IPsec tunnel
778 */
779static void nss_ipsecmgr_tunnel_setup(struct net_device *dev)
780{
781 dev->addr_len = ETH_ALEN;
782 dev->mtu = NSS_IPSECMGR_TUN_MTU(ETH_DATA_LEN);
783
784 dev->hard_header_len = NSS_IPSECMGR_TUN_MAX_HDR_LEN;
785 dev->needed_headroom = NSS_IPSECMGR_TUN_HEADROOM;
786 dev->needed_tailroom = NSS_IPSECMGR_TUN_TAILROOM;
787
788 dev->type = NSS_IPSEC_ARPHRD_IPSEC;
789
790 dev->ethtool_ops = NULL;
791 dev->header_ops = NULL;
792 dev->netdev_ops = &nss_ipsecmgr_tunnel_ops;
793
794 dev->destructor = nss_ipsecmgr_tunnel_free;
795
796 /*
797 * XXX:should get the MAC address from the ethernet device
798 */
799 memcpy(dev->dev_addr, "\xaa\xbb\xcc\xdd\xee\xff", dev->addr_len);
800 memset(dev->broadcast, 0xff, dev->addr_len);
801 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
802}
803
804/*
805 **********************
806 * Exported Functions
807 **********************
808 */
809
810/*
811 * nss_ipsecmgr_tunnel_add()
812 * add a IPsec pseudo tunnel device
813 */
814struct net_device *nss_ipsecmgr_tunnel_add(void *cb_ctx, nss_ipsecmgr_callback_t cb)
815{
816 struct net_device *dev;
817 struct nss_ipsecmgr_priv *priv;
818 int status;
819
820 dev = alloc_netdev(sizeof(struct nss_ipsecmgr_priv), NSS_IPSECMGR_TUN_NAME, nss_ipsecmgr_tunnel_setup);
821 if (!dev) {
822 nss_ipsecmgr_error("unable to allocate a tunnel device\n");
823 return NULL;
824 }
825
826 priv = netdev_priv(dev);
827
828 priv->cb_ctx = cb_ctx;
829 priv->cb_fn = cb;
830
831 spin_lock_init(&priv->encap.lock);
832 spin_lock_init(&priv->decap.lock);
833
834 status = rtnl_is_locked() ? register_netdevice(dev) : register_netdev(dev);
835 if (status < 0) {
836 goto fail;
837 }
838
839 /* if IPsec encap delivers a packet as exeception, then there is something wrong */
840 nss_ipsec_data_register(NSS_IPSEC_ENCAP_IF_NUMBER, nss_ipsecmgr_buf_drop, dev);
841
842 nss_ipsec_data_register(NSS_C2C_TX_INTERFACE, nss_ipsecmgr_buf_receive, dev);
843 nss_ipsec_data_register(NSS_IPSEC_DECAP_IF_NUMBER, nss_ipsecmgr_buf_receive, dev);
844
845 nss_ipsec_notify_register(NSS_IPSEC_ENCAP_IF_NUMBER, nss_ipsecmgr_op_receive, dev);
846 nss_ipsec_notify_register(NSS_IPSEC_DECAP_IF_NUMBER, nss_ipsecmgr_op_receive, dev);
847
848 return dev;
849
850fail:
851 free_netdev(dev);
852
853 return NULL;
854}
855EXPORT_SYMBOL(nss_ipsecmgr_tunnel_add);
856
857/*
858 * nss_ipsecmgr_del_tunnel()
859 * delete an existing IPsec tunnel
860 */
861bool nss_ipsecmgr_tunnel_del(struct net_device *dev)
862{
863 struct nss_ipsecmgr_priv *priv;
864 struct nss_ipsecmgr_tbl *encap;
865 struct nss_ipsecmgr_tbl *decap;
866 struct nss_ipsec_msg nim;
867 bool status;
868
869 priv = netdev_priv(dev);
870
871 encap = &priv->encap;
872 decap = &priv->decap;
873
874 /*
875 * Unregister the callbacks from the HLOS as we are no longer
876 * interested in exception data & async messages
877 */
878
879 nss_ipsec_data_unregister(priv->nss_ctx, NSS_C2C_TX_INTERFACE);
880
881 nss_ipsec_notify_unregister(priv->nss_ctx, NSS_IPSEC_ENCAP_IF_NUMBER);
882 nss_ipsec_notify_unregister(priv->nss_ctx, NSS_IPSEC_DECAP_IF_NUMBER);
883
884 nss_ipsec_data_unregister(priv->nss_ctx, NSS_IPSEC_DECAP_IF_NUMBER);
885 nss_ipsec_data_unregister(priv->nss_ctx, NSS_IPSEC_ENCAP_IF_NUMBER);
886
887 priv->cb_fn = NULL;
888 priv->cb_ctx = NULL;
889
890 /*
891 * Prepare to flush all SA(s) inside a tunnel
892 */
893 memset(&nim, 0, sizeof(struct nss_ipsec_msg));
894
895 status = nss_ipsecmgr_op_send(dev, &nim, NSS_IPSEC_ENCAP_IF_NUMBER, NSS_IPSEC_MSG_TYPE_FLUSH_TUN);
896 if (status != true) {
897 nss_ipsecmgr_error("unable complete tunnel deletion, for ingress rules\n");
898 goto fail;
899 }
900
901 /* release all SA(s) associated with the encap */
902 atomic_sub(encap->count, &gbl_drv_ctx.sa_count);
903
904 status = nss_ipsecmgr_op_send(dev, &nim, NSS_IPSEC_DECAP_IF_NUMBER, NSS_IPSEC_MSG_TYPE_FLUSH_TUN);
905 if (status != true) {
906 nss_ipsecmgr_error("unable complete tunnel deletion, for egress rules\n");
907 goto fail;
908 }
909
910 /* release all SA(s) associated with the decap */
911 atomic_sub(decap->count, &gbl_drv_ctx.sa_count);
912
913 /*
914 * The unregister should start here but the expectation is that the free would
915 * happen when the reference count goes down to '0'
916 */
917 rtnl_is_locked() ? unregister_netdevice(dev) : unregister_netdev(dev);
918
919 return true;
920fail:
921 /* caller must retry */
922 return false;
923}
924EXPORT_SYMBOL(nss_ipsecmgr_tunnel_del);
925
926/*
927 * nss_ipsecmgr_sa_add()
928 * add a new SA to the tunnel
929 */
930bool nss_ipsecmgr_sa_add(struct net_device *dev, union nss_ipsecmgr_rule *rule, enum nss_ipsecmgr_rule_type type)
931{
932 struct nss_ipsecmgr_priv *priv;
933 struct nss_ipsec_msg nim;
934 uint32_t if_num;
935
936 priv = netdev_priv(dev);
937
938 /* check if there are free SA(s) */
939 if (atomic_read(&gbl_drv_ctx.sa_count) == 0) {
940 return false;
941 }
942
943 memset(&nim, 0, sizeof(struct nss_ipsec_msg));
944
945 /* XXX: some basic validation of the passed rule needs to happen */
946
947 switch (type) {
948 case NSS_IPSECMGR_RULE_TYPE_ENCAP:
949 if_num = NSS_IPSEC_ENCAP_IF_NUMBER;
950 nss_ipsecmgr_copy_encap_add(dev, &nim.msg.push, rule);
951 break;
952
953 case NSS_IPSECMGR_RULE_TYPE_DECAP:
954 if_num = NSS_IPSEC_DECAP_IF_NUMBER;
955 nss_ipsecmgr_copy_decap_add(dev, &nim.msg.push, rule);
956 break;
957
958 default:
959 nss_ipsecmgr_error("Unknown rule type(%d) for Add operation\n", type);
960 return false;
961 }
962
963 return nss_ipsecmgr_op_send(dev, &nim, if_num, NSS_IPSEC_MSG_TYPE_ADD_RULE);
964
965}
966EXPORT_SYMBOL(nss_ipsecmgr_sa_add);
967
968/*
969 * nss_ipsecmgr_sa_del()
970 *
971 */
972bool nss_ipsecmgr_sa_del(struct net_device *dev, union nss_ipsecmgr_rule *rule, enum nss_ipsecmgr_rule_type type)
973{
974 struct nss_ipsecmgr_priv *priv;
975 struct nss_ipsec_msg nim;
976 uint32_t if_num;
977
978 priv = netdev_priv(dev);
979
980 /* check if all SA(s) are already freed */
981 if (atomic_read(&gbl_drv_ctx.sa_count) == NSS_IPSEC_MAX_SA) {
982 return false;
983 }
984
985 memset(&nim, 0, sizeof(struct nss_ipsec_msg));
986
987 /* XXX: some basic validation of the passed rule needs to happen */
988
989 switch (type) {
990 case NSS_IPSECMGR_RULE_TYPE_ENCAP:
991 if_num = NSS_IPSEC_ENCAP_IF_NUMBER;
992 nss_ipsecmgr_copy_encap_del(dev, &nim.msg.push, rule);
993 break;
994
995 case NSS_IPSECMGR_RULE_TYPE_DECAP:
996 if_num = NSS_IPSEC_DECAP_IF_NUMBER;
997 nss_ipsecmgr_copy_decap_del(dev, &nim.msg.push, rule);
998 break;
999
1000 default:
1001 nss_ipsecmgr_error("Unknown rule type(%d) for Del operation\n", type);
1002 return false;
1003 }
1004
1005 return nss_ipsecmgr_op_send(dev, &nim, if_num, NSS_IPSEC_MSG_TYPE_DEL_RULE);
1006}
1007EXPORT_SYMBOL(nss_ipsecmgr_sa_del);
1008
1009static int __init nss_ipsecmgr_init(void)
1010{
1011 nss_ipsecmgr_info("NSS IPsec manager loaded: Build date %s\n", __DATE__);
1012
1013 memset(&gbl_drv_ctx, 0, sizeof(struct nss_ipsecmgr_drv));
1014
1015 atomic_set(&gbl_drv_ctx.sa_count, NSS_IPSEC_MAX_SA);
1016
1017 return 0;
1018}
1019
1020
1021static void __exit nss_ipsecmgr_exit(void)
1022{
1023 nss_ipsecmgr_info("NSS IPsec manager unloader\n");
1024}
1025
1026module_init(nss_ipsecmgr_init);
1027module_exit(nss_ipsecmgr_exit);