crypto: introduce crypto infra
Change-Id: Ibf320b3e7b054b686f3af9a55afd5d5bda9b1048
Signed-off-by: Damjan Marion <damarion@cisco.com>
Signed-off-by: Filip Tehlar <ftehlar@cisco.com>
diff --git a/src/vnet/CMakeLists.txt b/src/vnet/CMakeLists.txt
index e486aa9..b6028e9 100644
--- a/src/vnet/CMakeLists.txt
+++ b/src/vnet/CMakeLists.txt
@@ -531,6 +531,20 @@
list(APPEND VNET_API_FILES bfd/bfd.api)
##############################################################################
+# Crypto
+##############################################################################
+
+list(APPEND VNET_SOURCES
+ crypto/cli.c
+ crypto/crypto.c
+ crypto/format.c
+)
+
+list(APPEND VNET_HEADERS
+ crypto/crypto.h
+)
+
+##############################################################################
# Layer 3 protocol: IPSec
##############################################################################
list(APPEND VNET_SOURCES
diff --git a/src/vnet/crypto/cli.c b/src/vnet/crypto/cli.c
new file mode 100644
index 0000000..d93577e
--- /dev/null
+++ b/src/vnet/crypto/cli.c
@@ -0,0 +1,101 @@
+/*
+ * Copyright (c) 2019 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <stdbool.h>
+#include <vlib/vlib.h>
+#include <vnet/crypto/crypto.h>
+
+static clib_error_t *
+show_crypto_engines_command_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ unformat_input_t _line_input, *line_input = &_line_input;
+ vnet_crypto_main_t *cm = &crypto_main;
+ vnet_crypto_engine_t *p;
+
+ if (unformat_user (input, unformat_line_input, line_input))
+ unformat_free (line_input);
+
+ if (vec_len (cm->engines) == 0)
+ {
+ vlib_cli_output (vm, "No crypto engines registered");
+ return 0;
+ }
+
+ vlib_cli_output (vm, "%-20s%-8s%s", "Name", "Prio", "Description");
+ /* *INDENT-OFF* */
+ vec_foreach (p, cm->engines)
+ {
+ vlib_cli_output (vm, "%-20s%-8u%s", p->name, p->priority, p->desc);
+ }
+ /* *INDENT-ON* */
+ return 0;
+}
+
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (show_crypto_engines_command, static) =
+{
+ .path = "show crypto engines",
+ .short_help = "show crypto engines",
+ .function = show_crypto_engines_command_fn,
+};
+
+static clib_error_t *
+show_crypto_handlers_command_fn (vlib_main_t * vm,
+ unformat_input_t * input, vlib_cli_command_t * cmd)
+{
+ vnet_crypto_main_t *cm = &crypto_main;
+ unformat_input_t _line_input, *line_input = &_line_input;
+ u8 *s = 0;
+
+ if (unformat_user (input, unformat_line_input, line_input))
+ unformat_free (line_input);
+
+ vlib_cli_output (vm, "%-40s%-20s%s", "Name", "Active", "Candidates");
+ for (int i = 1; i < VNET_CRYPTO_N_OP_TYPES; i++)
+ {
+ vnet_crypto_op_type_data_t *otd = cm->opt_data + i;
+ vnet_crypto_engine_t *e;
+
+ vec_reset_length (s);
+ vec_foreach (e, cm->engines)
+ {
+ if (e->ops_handlers[i] != 0)
+ s = format (s, "%U ", format_vnet_crypto_engine, e - cm->engines);
+ }
+ vlib_cli_output (vm, "%-40U%-20U%v", format_vnet_crypto_op, i,
+ format_vnet_crypto_engine, otd->active_engine_index,s);
+ }
+ vec_free (s);
+ return 0;
+}
+
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (show_crypto_handlers_command, static) =
+{
+ .path = "show crypto handlers",
+ .short_help = "show crypto handlers",
+ .function = show_crypto_handlers_command_fn,
+};
+/* *INDENT-ON* */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/crypto/crypto.c b/src/vnet/crypto/crypto.c
new file mode 100644
index 0000000..a6f45be
--- /dev/null
+++ b/src/vnet/crypto/crypto.c
@@ -0,0 +1,127 @@
+/*
+ * Copyright (c) 2018 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <stdbool.h>
+#include <vlib/vlib.h>
+#include <vnet/crypto/crypto.h>
+
+vnet_crypto_main_t crypto_main;
+
+u32
+vnet_crypto_process_ops (vlib_main_t * vm, vnet_crypto_op_t ops[], u32 n_ops)
+{
+ vnet_crypto_main_t *cm = &crypto_main;
+ u32 rv = 0, i;
+
+ for (i = 0; i < n_ops; i++)
+ {
+ vnet_crypto_op_type_t opt = ops[i].op;
+ vnet_crypto_op_t *opp = &ops[i];
+
+ if (cm->ops_handlers[opt])
+ rv += (cm->ops_handlers[opt]) (vm, &opp, 1);
+ else
+ ops[i].status = VNET_CRYPTO_OP_STATUS_FAIL_NO_HANDLER;
+ }
+
+ return rv;
+}
+
+u32
+vnet_crypto_register_engine (vlib_main_t * vm, char *name, int prio,
+ char *desc)
+{
+ vnet_crypto_main_t *cm = &crypto_main;
+ vnet_crypto_engine_t *p;
+
+ vec_add2 (cm->engines, p, 1);
+ p->name = name;
+ p->desc = desc;
+ p->priority = prio;
+
+ return p - cm->engines;
+}
+
+vlib_error_t *
+vnet_crypto_register_ops_handler (vlib_main_t * vm, u32 engine_index,
+ vnet_crypto_op_type_t opt,
+ vnet_crypto_ops_handler_t * fn)
+{
+ vnet_crypto_main_t *cm = &crypto_main;
+ vnet_crypto_engine_t *ae, *e = vec_elt_at_index (cm->engines, engine_index);
+ vnet_crypto_op_type_data_t *otd = cm->opt_data + opt;
+ vec_validate_aligned (cm->ops_handlers, VNET_CRYPTO_N_OP_TYPES - 1,
+ CLIB_CACHE_LINE_BYTES);
+ e->ops_handlers[opt] = fn;
+
+ if (otd->active_engine_index == ~0)
+ {
+ otd->active_engine_index = engine_index;
+ cm->ops_handlers[opt] = fn;
+ return 0;
+ }
+ ae = vec_elt_at_index (cm->engines, otd->active_engine_index);
+ if (ae->priority < e->priority)
+ {
+ otd->active_engine_index = engine_index;
+ cm->ops_handlers[opt] = fn;
+ }
+
+ return 0;
+}
+
+clib_error_t *
+vnet_crypto_init (vlib_main_t * vm)
+{
+ vnet_crypto_main_t *cm = &crypto_main;
+ vlib_thread_main_t *tm = vlib_get_thread_main ();
+ const char *enc = "encrypt";
+ const char *dec = "decrypt";
+ const char *hmac = "hmac";
+
+ vec_validate_aligned (cm->threads, tm->n_vlib_mains, CLIB_CACHE_LINE_BYTES);
+ vec_validate (cm->algs, VNET_CRYPTO_N_ALGS);
+
+#define _(n, s) \
+ cm->algs[VNET_CRYPTO_ALG_##n].name = s; \
+ cm->opt_data[VNET_CRYPTO_OP_##n##_ENC].alg = VNET_CRYPTO_ALG_##n; \
+ cm->opt_data[VNET_CRYPTO_OP_##n##_DEC].alg = VNET_CRYPTO_ALG_##n; \
+ cm->opt_data[VNET_CRYPTO_OP_##n##_ENC].desc = enc; \
+ cm->opt_data[VNET_CRYPTO_OP_##n##_DEC].desc = dec; \
+ cm->opt_data[VNET_CRYPTO_OP_##n##_ENC].active_engine_index = ~0; \
+ cm->opt_data[VNET_CRYPTO_OP_##n##_DEC].active_engine_index = ~0;
+ foreach_crypto_alg;
+#undef _
+
+#define _(n, s) \
+ cm->algs[VNET_CRYPTO_ALG_##n].name = s; \
+ cm->opt_data[VNET_CRYPTO_OP_##n##_HMAC].alg = VNET_CRYPTO_ALG_##n; \
+ cm->opt_data[VNET_CRYPTO_OP_##n##_HMAC].desc = hmac; \
+ cm->opt_data[VNET_CRYPTO_OP_##n##_HMAC].active_engine_index = ~0;
+ foreach_hmac_alg;
+#undef _
+
+ return 0;
+}
+
+VLIB_INIT_FUNCTION (vnet_crypto_init);
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/crypto/crypto.h b/src/vnet/crypto/crypto.h
new file mode 100644
index 0000000..9f4c85b
--- /dev/null
+++ b/src/vnet/crypto/crypto.h
@@ -0,0 +1,163 @@
+/*
+ * Copyright (c) 2019 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef included_vnet_crypto_crypto_h
+#define included_vnet_crypto_crypto_h
+
+#define VNET_CRYPTO_RING_SIZE 512
+
+#include <vlib/vlib.h>
+
+#define foreach_crypto_alg \
+ _(DES_CBC, "des-cbc") \
+ _(3DES_CBC, "3des-cbc") \
+ _(AES_128_CBC, "aes-128-cbc") \
+ _(AES_192_CBC, "aes-192-cbc") \
+ _(AES_256_CBC, "aes-256-cbc")
+
+#define foreach_hmac_alg \
+ _(SHA1, "sha-1") \
+ _(SHA224, "sha-224") \
+ _(SHA256, "sha-256") \
+ _(SHA384, "sha-384") \
+ _(SHA512, "sha-512")
+
+/* *INDENT-OFF* */
+typedef enum
+{
+#define _(n, s) VNET_CRYPTO_ALG_##n,
+ foreach_crypto_alg
+#undef _
+#define _(n, s) VNET_CRYPTO_ALG_##n,
+ foreach_hmac_alg
+#undef _
+ VNET_CRYPTO_N_ALGS,
+} vnet_crypto_alg_t;
+
+typedef enum
+{
+ VNET_CRYPTO_OP_NONE = 0,
+#define _(n, s) VNET_CRYPTO_OP_##n##_ENC, VNET_CRYPTO_OP_##n##_DEC,
+ foreach_crypto_alg
+#undef _
+#define _(n, s) VNET_CRYPTO_OP_##n##_HMAC,
+ foreach_hmac_alg
+#undef _
+ VNET_CRYPTO_N_OP_TYPES,
+} vnet_crypto_op_type_t;
+/* *INDENT-ON* */
+
+typedef struct
+{
+ char *name;
+} vnet_crypto_alg_data_t;
+
+typedef enum
+{
+ VNET_CRYPTO_OP_STATUS_PENDING,
+ VNET_CRYPTO_OP_STATUS_COMPLETED,
+ VNET_CRYPTO_OP_STATUS_FAIL_NO_HANDLER,
+} vnet_crypto_op_status_t;
+
+typedef struct
+{
+ CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
+ vnet_crypto_op_type_t op:16;
+ vnet_crypto_op_status_t status:8;
+ u8 key_len;
+ u16 flags;
+#define VNET_CRYPTO_OP_FLAG_INIT_IV 1
+ u32 len;
+ u8 *key;
+ u8 *iv;
+ u8 *src;
+ u8 *dst;
+} vnet_crypto_op_t;
+
+typedef struct
+{
+ vnet_crypto_alg_t alg;
+ const char *desc;
+ u32 active_engine_index;
+} vnet_crypto_op_type_data_t;
+
+typedef struct
+{
+ CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
+ u32 head;
+ u32 tail;
+ u32 size;
+ vnet_crypto_alg_t alg:8;
+ vnet_crypto_op_type_t op:8;
+ vnet_crypto_op_t *jobs[0];
+} vnet_crypto_queue_t;
+
+typedef struct
+{
+ CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
+ clib_bitmap_t *act_queues;
+ vnet_crypto_queue_t *queues[VNET_CRYPTO_N_OP_TYPES];
+} vnet_crypto_thread_t;
+
+typedef u32 (vnet_crypto_ops_handler_t) (vlib_main_t * vm,
+ vnet_crypto_op_t * ops[], u32 n_ops);
+
+u32 vnet_crypto_register_engine (vlib_main_t * vm, char *name, int prio,
+ char *desc);
+
+vlib_error_t *vnet_crypto_register_ops_handler (vlib_main_t * vm,
+ u32 provider_index,
+ vnet_crypto_op_type_t opt,
+ vnet_crypto_ops_handler_t *
+ f);
+
+typedef struct
+{
+ char *name;
+ char *desc;
+ int priority;
+ vnet_crypto_ops_handler_t *ops_handlers[VNET_CRYPTO_N_OP_TYPES];
+} vnet_crypto_engine_t;
+
+typedef struct
+{
+ vnet_crypto_alg_data_t *algs;
+ vnet_crypto_thread_t *threads;
+ vnet_crypto_ops_handler_t **ops_handlers;
+ vnet_crypto_op_type_data_t opt_data[VNET_CRYPTO_N_OP_TYPES];
+ vnet_crypto_engine_t *engines;
+} vnet_crypto_main_t;
+
+extern vnet_crypto_main_t crypto_main;
+
+u32 vnet_crypto_submit_ops (vlib_main_t * vm, vnet_crypto_op_t ** jobs,
+ u32 n_jobs);
+
+u32 vnet_crypto_process_ops (vlib_main_t * vm, vnet_crypto_op_t ops[],
+ u32 n_ops);
+
+format_function_t format_vnet_crypto_alg;
+format_function_t format_vnet_crypto_engine;
+format_function_t format_vnet_crypto_op;
+
+#endif /* included_vnet_crypto_crypto_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/crypto/format.c b/src/vnet/crypto/format.c
new file mode 100644
index 0000000..88c7c0f
--- /dev/null
+++ b/src/vnet/crypto/format.c
@@ -0,0 +1,61 @@
+/*
+ * Copyright (c) 2019 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <stdbool.h>
+#include <vlib/vlib.h>
+#include <vnet/crypto/crypto.h>
+
+u8 *
+format_vnet_crypto_alg (u8 * s, va_list * args)
+{
+ vnet_crypto_alg_t alg = va_arg (*args, vnet_crypto_alg_t);
+ vnet_crypto_main_t *cm = &crypto_main;
+ vnet_crypto_alg_data_t *d = vec_elt_at_index (cm->algs, alg);
+ return format (s, "%s", d->name);
+}
+
+u8 *
+format_vnet_crypto_op (u8 * s, va_list * args)
+{
+ vnet_crypto_main_t *cm = &crypto_main;
+ vnet_crypto_op_type_t op = va_arg (*args, vnet_crypto_op_type_t);
+ vnet_crypto_op_type_data_t *otd = cm->opt_data + op;
+
+ return format (s, "%s-%U", otd->desc, format_vnet_crypto_alg, otd->alg);
+}
+
+u8 *
+format_vnet_crypto_engine (u8 * s, va_list * args)
+{
+ vnet_crypto_main_t *cm = &crypto_main;
+ u32 crypto_engine_index = va_arg (*args, u32);
+ vnet_crypto_engine_t *e;
+
+ if (crypto_engine_index == ~0)
+ return s;
+
+ e = vec_elt_at_index (cm->engines, crypto_engine_index);
+
+ return format (s, "%s", e->name);
+}
+
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/ipsec/ah.h b/src/vnet/ipsec/ah.h
index f74ad9b..d0b4c21 100644
--- a/src/vnet/ipsec/ah.h
+++ b/src/vnet/ipsec/ah.h
@@ -15,15 +15,9 @@
#ifndef __AH_H__
#define __AH_H__
-
#include <vnet/ip/ip.h>
#include <vnet/ipsec/ipsec.h>
-#include <openssl/hmac.h>
-#include <openssl/rand.h>
-#include <openssl/evp.h>
-
-
typedef struct
{
unsigned char nexthdr;
diff --git a/src/vnet/ipsec/ah_decrypt.c b/src/vnet/ipsec/ah_decrypt.c
index 0fc4f48..2488fa9 100644
--- a/src/vnet/ipsec/ah_decrypt.c
+++ b/src/vnet/ipsec/ah_decrypt.c
@@ -84,7 +84,6 @@
{
u32 n_left_from, *from, next_index, *to_next, thread_index;
ipsec_main_t *im = &ipsec_main;
- ipsec_proto_main_t *em = &ipsec_proto_main;
from = vlib_frame_vector_args (from_frame);
n_left_from = from_frame->n_vectors;
int icv_size;
@@ -173,8 +172,7 @@
(&ipsec_sa_counters, thread_index, sa_index0,
1, i_b0->current_length);
- icv_size =
- em->ipsec_proto_main_integ_algs[sa0->integ_alg].trunc_size;
+ icv_size = im->integ_algs[sa0->integ_alg].trunc_size;
if (PREDICT_TRUE (sa0->integ_alg != IPSEC_INTEG_ALG_NONE))
{
u8 sig[64];
@@ -205,7 +203,7 @@
icv_padding_len =
ah_calc_icv_padding_len (icv_size, 0 /* is_ipv6 */ );
}
- hmac_calc (sa0->integ_alg, sa0->integ_key.data,
+ hmac_calc (vm, sa0->integ_alg, sa0->integ_key.data,
sa0->integ_key.len, (u8 *) ih4, i_b0->current_length,
sig, sa0->use_esn, sa0->seq_hi);
diff --git a/src/vnet/ipsec/ah_encrypt.c b/src/vnet/ipsec/ah_encrypt.c
index 2e561de..ce930bd 100644
--- a/src/vnet/ipsec/ah_encrypt.c
+++ b/src/vnet/ipsec/ah_encrypt.c
@@ -89,7 +89,6 @@
from = vlib_frame_vector_args (from_frame);
n_left_from = from_frame->n_vectors;
ipsec_main_t *im = &ipsec_main;
- ipsec_proto_main_t *em = &ipsec_proto_main;
next_index = node->cached_next_index;
thread_index = vm->thread_index;
@@ -153,8 +152,7 @@
adv = -sizeof (ah_header_t);
}
- icv_size =
- em->ipsec_proto_main_integ_algs[sa0->integ_alg].trunc_size;
+ icv_size = im->integ_algs[sa0->integ_alg].trunc_size;
const u8 padding_len = ah_calc_icv_padding_len (icv_size, is_ip6);
adv -= padding_len;
/* transport mode save the eth header before it is overwritten */
@@ -267,7 +265,7 @@
sizeof (ah_header_t);
clib_memset (digest, 0, icv_size);
- unsigned size = hmac_calc (sa0->integ_alg, sa0->integ_key.data,
+ unsigned size = hmac_calc (vm, sa0->integ_alg, sa0->integ_key.data,
sa0->integ_key.len,
vlib_buffer_get_current (i_b0),
i_b0->current_length, sig, sa0->use_esn,
diff --git a/src/vnet/ipsec/esp.h b/src/vnet/ipsec/esp.h
index 0047265..1730038 100644
--- a/src/vnet/ipsec/esp.h
+++ b/src/vnet/ipsec/esp.h
@@ -16,6 +16,7 @@
#define __ESP_H__
#include <vnet/ip/ip.h>
+#include <vnet/crypto/crypto.h>
#include <vnet/ipsec/ipsec.h>
typedef struct
@@ -202,106 +203,25 @@
return 0;
}
-always_inline void
-ipsec_proto_init ()
-{
- ipsec_proto_main_t *em = &ipsec_proto_main;
- vlib_thread_main_t *tm = vlib_get_thread_main ();
-
- clib_memset (em, 0, sizeof (em[0]));
-
- vec_validate (em->ipsec_proto_main_crypto_algs, IPSEC_CRYPTO_N_ALG - 1);
- em->ipsec_proto_main_crypto_algs[IPSEC_CRYPTO_ALG_AES_CBC_128].type =
- EVP_aes_128_cbc ();
- em->ipsec_proto_main_crypto_algs[IPSEC_CRYPTO_ALG_AES_CBC_192].type =
- EVP_aes_192_cbc ();
- em->ipsec_proto_main_crypto_algs[IPSEC_CRYPTO_ALG_AES_CBC_256].type =
- EVP_aes_256_cbc ();
- em->ipsec_proto_main_crypto_algs[IPSEC_CRYPTO_ALG_AES_CBC_128].iv_size = 16;
- em->ipsec_proto_main_crypto_algs[IPSEC_CRYPTO_ALG_AES_CBC_192].iv_size = 16;
- em->ipsec_proto_main_crypto_algs[IPSEC_CRYPTO_ALG_AES_CBC_256].iv_size = 16;
- em->ipsec_proto_main_crypto_algs[IPSEC_CRYPTO_ALG_AES_CBC_128].block_size =
- 16;
- em->ipsec_proto_main_crypto_algs[IPSEC_CRYPTO_ALG_AES_CBC_192].block_size =
- 16;
- em->ipsec_proto_main_crypto_algs[IPSEC_CRYPTO_ALG_AES_CBC_256].block_size =
- 16;
- em->ipsec_proto_main_crypto_algs[IPSEC_CRYPTO_ALG_DES_CBC].type =
- EVP_des_cbc ();
- em->ipsec_proto_main_crypto_algs[IPSEC_CRYPTO_ALG_3DES_CBC].type =
- EVP_des_ede3_cbc ();
- em->ipsec_proto_main_crypto_algs[IPSEC_CRYPTO_ALG_DES_CBC].block_size = 8;
- em->ipsec_proto_main_crypto_algs[IPSEC_CRYPTO_ALG_3DES_CBC].block_size = 8;
- em->ipsec_proto_main_crypto_algs[IPSEC_CRYPTO_ALG_DES_CBC].iv_size = 8;
- em->ipsec_proto_main_crypto_algs[IPSEC_CRYPTO_ALG_3DES_CBC].iv_size = 8;
-
- vec_validate (em->ipsec_proto_main_integ_algs, IPSEC_INTEG_N_ALG - 1);
- ipsec_proto_main_integ_alg_t *i;
-
- i = &em->ipsec_proto_main_integ_algs[IPSEC_INTEG_ALG_SHA1_96];
- i->md = EVP_sha1 ();
- i->trunc_size = 12;
-
- i = &em->ipsec_proto_main_integ_algs[IPSEC_INTEG_ALG_SHA_256_96];
- i->md = EVP_sha256 ();
- i->trunc_size = 12;
-
- i = &em->ipsec_proto_main_integ_algs[IPSEC_INTEG_ALG_SHA_256_128];
- i->md = EVP_sha256 ();
- i->trunc_size = 16;
-
- i = &em->ipsec_proto_main_integ_algs[IPSEC_INTEG_ALG_SHA_384_192];
- i->md = EVP_sha384 ();
- i->trunc_size = 24;
-
- i = &em->ipsec_proto_main_integ_algs[IPSEC_INTEG_ALG_SHA_512_256];
- i->md = EVP_sha512 ();
- i->trunc_size = 32;
-
- vec_validate_aligned (em->per_thread_data, tm->n_vlib_mains - 1,
- CLIB_CACHE_LINE_BYTES);
- int thread_id;
-
- for (thread_id = 0; thread_id < tm->n_vlib_mains; thread_id++)
- {
-#if OPENSSL_VERSION_NUMBER >= 0x10100000L
- em->per_thread_data[thread_id].encrypt_ctx = EVP_CIPHER_CTX_new ();
- em->per_thread_data[thread_id].decrypt_ctx = EVP_CIPHER_CTX_new ();
- em->per_thread_data[thread_id].hmac_ctx = HMAC_CTX_new ();
-#else
- EVP_CIPHER_CTX_init (&(em->per_thread_data[thread_id].encrypt_ctx));
- EVP_CIPHER_CTX_init (&(em->per_thread_data[thread_id].decrypt_ctx));
- HMAC_CTX_init (&(em->per_thread_data[thread_id].hmac_ctx));
-#endif
- }
-}
always_inline unsigned int
-hmac_calc (ipsec_integ_alg_t alg,
- u8 * key,
- int key_len,
+hmac_calc (vlib_main_t * vm, ipsec_integ_alg_t alg, u8 * key, int key_len,
u8 * data, int data_len, u8 * signature, u8 use_esn, u32 seq_hi)
{
- ipsec_proto_main_t *em = &ipsec_proto_main;
- u32 thread_index = vlib_get_thread_index ();
-#if OPENSSL_VERSION_NUMBER >= 0x10100000L
- HMAC_CTX *ctx = em->per_thread_data[thread_index].hmac_ctx;
-#else
- HMAC_CTX *ctx = &(em->per_thread_data[thread_index].hmac_ctx);
-#endif
- const EVP_MD *md = NULL;
- unsigned int len;
-
+ ipsec_main_t *im = &ipsec_main;
+ vnet_crypto_op_t _op, *op = &_op;
ASSERT (alg < IPSEC_INTEG_N_ALG);
- if (PREDICT_FALSE (em->ipsec_proto_main_integ_algs[alg].md == 0))
+ if (PREDICT_FALSE (im->integ_algs[alg].op_type == 0))
return 0;
- if (PREDICT_FALSE (alg != em->per_thread_data[thread_index].last_integ_alg))
- {
- md = em->ipsec_proto_main_integ_algs[alg].md;
- em->per_thread_data[thread_index].last_integ_alg = alg;
- }
+ op->op = im->integ_algs[alg].op_type;
+ op->key = key;
+ op->key_len = key_len;
+ op->src = data;
+ op->len = data_len;
+ op->dst = signature;
+#if 0
HMAC_Init_ex (ctx, key, key_len, md, NULL);
@@ -311,7 +231,9 @@
HMAC_Update (ctx, (u8 *) & seq_hi, sizeof (seq_hi));
HMAC_Final (ctx, signature, &len);
- return em->ipsec_proto_main_integ_algs[alg].trunc_size;
+#endif
+ vnet_crypto_process_ops (vm, op, 1);
+ return im->integ_algs[alg].trunc_size;
}
#endif /* __ESP_H__ */
diff --git a/src/vnet/ipsec/esp_decrypt.c b/src/vnet/ipsec/esp_decrypt.c
index 7e5114c..1ee7ce8 100644
--- a/src/vnet/ipsec/esp_decrypt.c
+++ b/src/vnet/ipsec/esp_decrypt.c
@@ -82,35 +82,28 @@
}
always_inline void
-esp_decrypt_cbc (ipsec_crypto_alg_t alg,
+esp_decrypt_cbc (vlib_main_t * vm, ipsec_crypto_alg_t alg,
u8 * in, u8 * out, size_t in_len, u8 * key, u8 * iv)
{
- ipsec_proto_main_t *em = &ipsec_proto_main;
- u32 thread_index = vlib_get_thread_index ();
-#if OPENSSL_VERSION_NUMBER >= 0x10100000L
- EVP_CIPHER_CTX *ctx = em->per_thread_data[thread_index].decrypt_ctx;
-#else
- EVP_CIPHER_CTX *ctx = &(em->per_thread_data[thread_index].decrypt_ctx);
-#endif
- const EVP_CIPHER *cipher = NULL;
- int out_len;
+ ipsec_main_t *im = &ipsec_main;
+ ipsec_main_crypto_alg_t *a;
+ vnet_crypto_op_t _op, *op = &_op;
ASSERT (alg < IPSEC_CRYPTO_N_ALG);
- if (PREDICT_FALSE (em->ipsec_proto_main_crypto_algs[alg].type == 0))
+ a = &im->crypto_algs[alg];
+
+ if (PREDICT_FALSE (a->dec_op_type == VNET_CRYPTO_OP_NONE))
return;
- if (PREDICT_FALSE
- (alg != em->per_thread_data[thread_index].last_decrypt_alg))
- {
- cipher = em->ipsec_proto_main_crypto_algs[alg].type;
- em->per_thread_data[thread_index].last_decrypt_alg = alg;
- }
+ op->op = a->dec_op_type;
+ op->iv = iv;
+ op->src = in;
+ op->dst = out;
+ op->len = in_len;
+ op->key = key;
- EVP_DecryptInit_ex (ctx, cipher, NULL, key, iv);
-
- EVP_DecryptUpdate (ctx, out, &out_len, in, in_len);
- EVP_DecryptFinal_ex (ctx, out + out_len, &out_len);
+ vnet_crypto_process_ops (vm, op, 1);
}
always_inline uword
@@ -119,7 +112,6 @@
int is_ip6)
{
ipsec_main_t *im = &ipsec_main;
- ipsec_proto_main_t *em = &ipsec_proto_main;
u32 *from = vlib_frame_vector_args (from_frame);
u32 n_left_from = from_frame->n_vectors;
u32 new_bufs[VLIB_FRAME_SIZE];
@@ -189,15 +181,13 @@
if (PREDICT_TRUE (sa0->integ_alg != IPSEC_INTEG_ALG_NONE))
{
u8 sig[64];
- int icv_size =
- em->ipsec_proto_main_integ_algs[sa0->integ_alg].trunc_size;
+ int icv_size = im->integ_algs[sa0->integ_alg].trunc_size;
clib_memset (sig, 0, sizeof (sig));
- u8 *icv =
- vlib_buffer_get_current (ib[0]) + ib[0]->current_length -
+ u8 *icv = vlib_buffer_get_current (ib[0]) + ib[0]->current_length -
icv_size;
ib[0]->current_length -= icv_size;
- hmac_calc (sa0->integ_alg, sa0->integ_key.data,
+ hmac_calc (vm, sa0->integ_alg, sa0->integ_key.data,
sa0->integ_key.len, (u8 *) esp0,
ib[0]->current_length, sig, sa0->use_esn, sa0->seq_hi);
@@ -227,10 +217,8 @@
(sa0->crypto_alg >= IPSEC_CRYPTO_ALG_DES_CBC &&
sa0->crypto_alg <= IPSEC_CRYPTO_ALG_3DES_CBC))
{
- const int BLOCK_SIZE =
- em->ipsec_proto_main_crypto_algs[sa0->crypto_alg].block_size;;
- const int IV_SIZE =
- em->ipsec_proto_main_crypto_algs[sa0->crypto_alg].iv_size;
+ const int BLOCK_SIZE = im->crypto_algs[sa0->crypto_alg].block_size;
+ const int IV_SIZE = im->crypto_algs[sa0->crypto_alg].iv_size;
esp_footer_t *f0;
u8 ip_hdr_size = 0;
@@ -263,7 +251,7 @@
}
}
- esp_decrypt_cbc (sa0->crypto_alg,
+ esp_decrypt_cbc (vm, sa0->crypto_alg,
esp0->data + IV_SIZE,
(u8 *) vlib_buffer_get_current (ob[0]) +
ip_hdr_size, BLOCK_SIZE * blocks,
diff --git a/src/vnet/ipsec/esp_encrypt.c b/src/vnet/ipsec/esp_encrypt.c
index 080a345..37c2c95 100644
--- a/src/vnet/ipsec/esp_encrypt.c
+++ b/src/vnet/ipsec/esp_encrypt.c
@@ -20,6 +20,8 @@
#include <vnet/ip/ip.h>
#include <vnet/udp/udp.h>
+#include <vnet/crypto/crypto.h>
+
#include <vnet/ipsec/ipsec.h>
#include <vnet/ipsec/esp.h>
@@ -88,33 +90,26 @@
esp_encrypt_cbc (vlib_main_t * vm, ipsec_crypto_alg_t alg,
u8 * in, u8 * out, size_t in_len, u8 * key, u8 * iv)
{
- ipsec_proto_main_t *em = &ipsec_proto_main;
- u32 thread_index = vm->thread_index;
-#if OPENSSL_VERSION_NUMBER >= 0x10100000L
- EVP_CIPHER_CTX *ctx = em->per_thread_data[thread_index].encrypt_ctx;
-#else
- EVP_CIPHER_CTX *ctx = &(em->per_thread_data[thread_index].encrypt_ctx);
-#endif
- const EVP_CIPHER *cipher = NULL;
- int out_len;
+ ipsec_main_t *im = &ipsec_main;
+ ipsec_main_crypto_alg_t *a;
+ vnet_crypto_op_t _op, *op = &_op;
ASSERT (alg < IPSEC_CRYPTO_N_ALG);
- if (PREDICT_FALSE
- (em->ipsec_proto_main_crypto_algs[alg].type == IPSEC_CRYPTO_ALG_NONE))
+ a = &im->crypto_algs[alg];
+
+ if (PREDICT_FALSE (a->enc_op_type == VNET_CRYPTO_OP_NONE))
return;
- if (PREDICT_FALSE
- (alg != em->per_thread_data[thread_index].last_encrypt_alg))
- {
- cipher = em->ipsec_proto_main_crypto_algs[alg].type;
- em->per_thread_data[thread_index].last_encrypt_alg = alg;
- }
+ op->op = a->enc_op_type;
+ op->flags = VNET_CRYPTO_OP_FLAG_INIT_IV;
+ op->iv = iv;
+ op->src = in;
+ op->dst = out;
+ op->len = in_len;
+ op->key = key;
- EVP_EncryptInit_ex (ctx, cipher, NULL, key, iv);
-
- EVP_EncryptUpdate (ctx, out, &out_len, in, in_len);
- EVP_EncryptFinal_ex (ctx, out + out_len, &out_len);
+ vnet_crypto_process_ops (vm, op, 1);
}
always_inline uword
@@ -125,7 +120,6 @@
u32 *from = vlib_frame_vector_args (from_frame);
u32 n_left_from = from_frame->n_vectors;
ipsec_main_t *im = &ipsec_main;
- ipsec_proto_main_t *em = &ipsec_proto_main;
u32 new_bufs[VLIB_FRAME_SIZE];
vlib_buffer_t *i_bufs[VLIB_FRAME_SIZE], **ib = i_bufs;
vlib_buffer_t *o_bufs[VLIB_FRAME_SIZE], **ob = o_bufs;
@@ -288,10 +282,8 @@
if (PREDICT_TRUE (sa0->crypto_alg != IPSEC_CRYPTO_ALG_NONE))
{
- const int BLOCK_SIZE =
- em->ipsec_proto_main_crypto_algs[sa0->crypto_alg].block_size;
- const int IV_SIZE =
- em->ipsec_proto_main_crypto_algs[sa0->crypto_alg].iv_size;
+ const int BLOCK_SIZE = im->crypto_algs[sa0->crypto_alg].block_size;
+ const int IV_SIZE = im->crypto_algs[sa0->crypto_alg].iv_size;
int blocks = 1 + (ib[0]->current_length + 1) / BLOCK_SIZE;
/* pad packet in input buffer */
@@ -314,13 +306,12 @@
vnet_buffer (ob[0])->sw_if_index[VLIB_RX] =
vnet_buffer (ib[0])->sw_if_index[VLIB_RX];
- u8 iv[em->ipsec_proto_main_crypto_algs[sa0->crypto_alg].iv_size];
- RAND_bytes (iv, sizeof (iv));
+ u8 *iv = vlib_buffer_get_current (ob[0]) + ip_udp_hdr_size +
+ sizeof (esp_header_t);
clib_memcpy_fast ((u8 *) vlib_buffer_get_current (ob[0]) +
ip_udp_hdr_size + sizeof (esp_header_t), iv,
- em->ipsec_proto_main_crypto_algs[sa0->
- crypto_alg].iv_size);
+ im->crypto_algs[sa0->crypto_alg].iv_size);
esp_encrypt_cbc (vm, sa0->crypto_alg,
(u8 *) vlib_buffer_get_current (ib[0]),
@@ -331,7 +322,7 @@
}
ob[0]->current_length +=
- hmac_calc (sa0->integ_alg, sa0->integ_key.data,
+ hmac_calc (vm, sa0->integ_alg, sa0->integ_key.data,
sa0->integ_key.len, (u8 *) o_esp0,
ob[0]->current_length - ip_udp_hdr_size,
vlib_buffer_get_current (ob[0]) + ob[0]->current_length,
diff --git a/src/vnet/ipsec/ipsec.c b/src/vnet/ipsec/ipsec.c
index ce93f32..e9d13a4 100644
--- a/src/vnet/ipsec/ipsec.c
+++ b/src/vnet/ipsec/ipsec.c
@@ -26,24 +26,6 @@
#include <vnet/ipsec/ah.h>
ipsec_main_t ipsec_main;
-ipsec_proto_main_t ipsec_proto_main;
-
-static void
-ipsec_rand_seed (void)
-{
- struct
- {
- time_t time;
- pid_t pid;
- void *p;
- } seed_data;
-
- seed_data.time = time (NULL);
- seed_data.pid = getpid ();
- seed_data.p = (void *) &seed_data;
-
- RAND_seed ((const void *) &seed_data, sizeof (seed_data));
-}
static clib_error_t *
ipsec_check_ah_support (ipsec_sa_t * sa)
@@ -240,8 +222,7 @@
{
clib_error_t *error;
ipsec_main_t *im = &ipsec_main;
-
- ipsec_rand_seed ();
+ ipsec_main_crypto_alg_t *a;
clib_memset (im, 0, sizeof (im[0]));
@@ -287,7 +268,55 @@
if ((error = vlib_call_init_function (vm, ipsec_tunnel_if_init)))
return error;
- ipsec_proto_init ();
+ vec_validate (im->crypto_algs, IPSEC_CRYPTO_N_ALG - 1);
+
+ a = im->crypto_algs + IPSEC_CRYPTO_ALG_DES_CBC;
+ a->enc_op_type = VNET_CRYPTO_OP_DES_CBC_ENC;
+ a->dec_op_type = VNET_CRYPTO_OP_DES_CBC_DEC;
+ a->iv_size = a->block_size = 8;
+
+ a = im->crypto_algs + IPSEC_CRYPTO_ALG_3DES_CBC;
+ a->enc_op_type = VNET_CRYPTO_OP_3DES_CBC_ENC;
+ a->dec_op_type = VNET_CRYPTO_OP_3DES_CBC_DEC;
+ a->iv_size = a->block_size = 8;
+
+ a = im->crypto_algs + IPSEC_CRYPTO_ALG_AES_CBC_128;
+ a->enc_op_type = VNET_CRYPTO_OP_AES_128_CBC_ENC;
+ a->dec_op_type = VNET_CRYPTO_OP_AES_128_CBC_DEC;
+ a->iv_size = a->block_size = 16;
+
+ a = im->crypto_algs + IPSEC_CRYPTO_ALG_AES_CBC_192;
+ a->enc_op_type = VNET_CRYPTO_OP_AES_192_CBC_ENC;
+ a->dec_op_type = VNET_CRYPTO_OP_AES_192_CBC_DEC;
+ a->iv_size = a->block_size = 16;
+
+ a = im->crypto_algs + IPSEC_CRYPTO_ALG_AES_CBC_256;
+ a->enc_op_type = VNET_CRYPTO_OP_AES_256_CBC_ENC;
+ a->dec_op_type = VNET_CRYPTO_OP_AES_256_CBC_DEC;
+ a->iv_size = a->block_size = 16;
+
+ vec_validate (im->integ_algs, IPSEC_INTEG_N_ALG - 1);
+ ipsec_main_integ_alg_t *i;
+
+ i = &im->integ_algs[IPSEC_INTEG_ALG_SHA1_96];
+ i->op_type = VNET_CRYPTO_OP_SHA1_HMAC;
+ i->trunc_size = 12;
+
+ i = &im->integ_algs[IPSEC_INTEG_ALG_SHA_256_96];
+ i->op_type = VNET_CRYPTO_OP_SHA1_HMAC;
+ i->trunc_size = 12;
+
+ i = &im->integ_algs[IPSEC_INTEG_ALG_SHA_256_128];
+ i->op_type = VNET_CRYPTO_OP_SHA256_HMAC;
+ i->trunc_size = 16;
+
+ i = &im->integ_algs[IPSEC_INTEG_ALG_SHA_384_192];
+ i->op_type = VNET_CRYPTO_OP_SHA384_HMAC;
+ i->trunc_size = 24;
+
+ i = &im->integ_algs[IPSEC_INTEG_ALG_SHA_512_256];
+ i->op_type = VNET_CRYPTO_OP_SHA512_HMAC;
+ i->trunc_size = 32;
return 0;
}
diff --git a/src/vnet/ipsec/ipsec.h b/src/vnet/ipsec/ipsec.h
index c877139..e38a4a8 100644
--- a/src/vnet/ipsec/ipsec.h
+++ b/src/vnet/ipsec/ipsec.h
@@ -16,12 +16,9 @@
#define __IPSEC_H__
#include <vnet/ip/ip.h>
+#include <vnet/crypto/crypto.h>
#include <vnet/feature/feature.h>
-#include <openssl/hmac.h>
-#include <openssl/rand.h>
-#include <openssl/evp.h>
-
#include <vppinfra/types.h>
#include <vppinfra/cache.h>
@@ -69,50 +66,17 @@
typedef struct
{
- const EVP_CIPHER *type;
+ vnet_crypto_op_type_t enc_op_type;
+ vnet_crypto_op_type_t dec_op_type;
u8 iv_size;
u8 block_size;
-} ipsec_proto_main_crypto_alg_t;
+} ipsec_main_crypto_alg_t;
typedef struct
{
- const EVP_MD *md;
+ vnet_crypto_op_type_t op_type;
u8 trunc_size;
-} ipsec_proto_main_integ_alg_t;
-
-typedef struct
-{
- CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
-#if OPENSSL_VERSION_NUMBER >= 0x10100000L
- EVP_CIPHER_CTX *encrypt_ctx;
-#else
- EVP_CIPHER_CTX encrypt_ctx;
-#endif
- CLIB_CACHE_LINE_ALIGN_MARK (cacheline1);
-#if OPENSSL_VERSION_NUMBER >= 0x10100000L
- EVP_CIPHER_CTX *decrypt_ctx;
-#else
- EVP_CIPHER_CTX decrypt_ctx;
-#endif
- CLIB_CACHE_LINE_ALIGN_MARK (cacheline2);
-#if OPENSSL_VERSION_NUMBER >= 0x10100000L
- HMAC_CTX *hmac_ctx;
-#else
- HMAC_CTX hmac_ctx;
-#endif
- ipsec_crypto_alg_t last_encrypt_alg;
- ipsec_crypto_alg_t last_decrypt_alg;
- ipsec_integ_alg_t last_integ_alg;
-} ipsec_proto_main_per_thread_data_t;
-
-typedef struct
-{
- ipsec_proto_main_crypto_alg_t *ipsec_proto_main_crypto_algs;
- ipsec_proto_main_integ_alg_t *ipsec_proto_main_integ_algs;
- ipsec_proto_main_per_thread_data_t *per_thread_data;
-} ipsec_proto_main_t;
-
-extern ipsec_proto_main_t ipsec_proto_main;
+} ipsec_main_integ_alg_t;
typedef struct
{
@@ -171,6 +135,12 @@
u32 ah_default_backend;
/* index of default esp backend */
u32 esp_default_backend;
+
+ /* crypto alg data */
+ ipsec_main_crypto_alg_t *crypto_algs;
+
+ /* crypto integ data */
+ ipsec_main_integ_alg_t *integ_algs;
} ipsec_main_t;
extern ipsec_main_t ipsec_main;