blob: f8c39c327ed6fa4487511d8929f88c93cb573fb9 [file] [log] [blame]
/*
* ipsec.c : IPSEC module functions
*
* Copyright (c) 2015 Cisco and/or its affiliates.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <vnet/vnet.h>
#include <vnet/api_errno.h>
#include <vnet/ip/ip.h>
#include <vnet/interface.h>
#include <vnet/udp/udp_local.h>
#include <vnet/ipsec/ipsec.h>
#include <vnet/ipsec/esp.h>
#include <vnet/ipsec/ah.h>
#include <vnet/ipsec/ipsec_tun.h>
#include <vnet/ipsec/ipsec_itf.h>
#include <vnet/ipsec/ipsec_spd_fp_lookup.h>
/* Flow cache is sized for 1 million flows with a load factor of .25.
*/
#define IPSEC4_OUT_SPD_DEFAULT_HASH_NUM_BUCKETS (1 << 22)
/* Flow cache is sized for 1 million flows with a load factor of .25.
*/
#define IPSEC4_SPD_DEFAULT_HASH_NUM_BUCKETS (1 << 22)
ipsec_main_t ipsec_main;
esp_async_post_next_t esp_encrypt_async_next;
esp_async_post_next_t esp_decrypt_async_next;
clib_error_t *
ipsec_register_next_header (vlib_main_t *vm, u8 next_header,
const char *next_node)
{
ipsec_main_t *im = &ipsec_main;
const vlib_node_t *node = vlib_get_node_by_name (vm, (u8 *) next_node);
/* -post nodes (eg. esp4-decrypt-post) are siblings of non-post nodes (eg.
* esp4-decrypt) and will therefore have the same next index */
const vlib_node_t *esp_decrypt_nodes[] = {
vlib_get_node (vm, im->esp4_decrypt_node_index),
vlib_get_node (vm, im->esp6_decrypt_node_index),
vlib_get_node (vm, im->esp4_decrypt_tun_node_index),
vlib_get_node (vm, im->esp6_decrypt_tun_node_index),
};
uword slot, max;
int i;
/* looks for a next_index value that we can use for all esp decrypt nodes to
* avoid maintaining different next index arrays... */
slot = vlib_node_get_next (vm, esp_decrypt_nodes[0]->index, node->index);
max = vec_len (esp_decrypt_nodes[0]->next_nodes);
for (i = 1; i < ARRAY_LEN (esp_decrypt_nodes); i++)
{
/* if next node already exists, check it shares the same next_index */
if (slot !=
vlib_node_get_next (vm, esp_decrypt_nodes[i]->index, node->index))
return clib_error_return (
0, "next node already exists with different next index");
/* compute a suitable slot from the max of all nodes next index */
max = clib_max (max, vec_len (esp_decrypt_nodes[i]->next_nodes));
}
if (~0 == slot)
{
/* next node not there yet, add it using the computed max */
slot = max;
for (i = 0; i < ARRAY_LEN (esp_decrypt_nodes); i++)
vlib_node_add_next_with_slot (vm, esp_decrypt_nodes[i]->index,
node->index, slot);
}
im->next_header_registrations[next_header] = slot;
return 0;
}
static clib_error_t *
ipsec_check_ah_support (ipsec_sa_t * sa)
{
ipsec_main_t *im = &ipsec_main;
if (sa->integ_alg == IPSEC_INTEG_ALG_NONE)
return clib_error_return (0, "unsupported none integ-alg");
if (!vnet_crypto_is_set_handler (im->integ_algs[sa->integ_alg].alg))
return clib_error_return (0, "No crypto engine support for %U",
format_ipsec_integ_alg, sa->integ_alg);
return 0;
}
static clib_error_t *
ipsec_check_esp_support (ipsec_sa_t * sa)
{
ipsec_main_t *im = &ipsec_main;
if (IPSEC_INTEG_ALG_NONE != sa->integ_alg)
{
if (!vnet_crypto_is_set_handler (im->integ_algs[sa->integ_alg].alg))
return clib_error_return (0, "No crypto engine support for %U",
format_ipsec_integ_alg, sa->integ_alg);
}
if (IPSEC_CRYPTO_ALG_NONE != sa->crypto_alg)
{
if (!vnet_crypto_is_set_handler (im->crypto_algs[sa->crypto_alg].alg))
return clib_error_return (0, "No crypto engine support for %U",
format_ipsec_crypto_alg, sa->crypto_alg);
}
return (0);
}
clib_error_t *
ipsec_add_del_sa_sess_cb (ipsec_main_t * im, u32 sa_index, u8 is_add)
{
ipsec_ah_backend_t *ah =
pool_elt_at_index (im->ah_backends, im->ah_current_backend);
if (ah->add_del_sa_sess_cb)
{
clib_error_t *err = ah->add_del_sa_sess_cb (sa_index, is_add);
if (err)
return err;
}
ipsec_esp_backend_t *esp =
pool_elt_at_index (im->esp_backends, im->esp_current_backend);
if (esp->add_del_sa_sess_cb)
{
clib_error_t *err = esp->add_del_sa_sess_cb (sa_index, is_add);
if (err)
return err;
}
return 0;
}
clib_error_t *
ipsec_check_support_cb (ipsec_main_t * im, ipsec_sa_t * sa)
{
clib_error_t *error = 0;
if (PREDICT_FALSE (sa->protocol == IPSEC_PROTOCOL_AH))
{
ipsec_ah_backend_t *ah =
pool_elt_at_index (im->ah_backends, im->ah_current_backend);
ASSERT (ah->check_support_cb);
error = ah->check_support_cb (sa);
}
else
{
ipsec_esp_backend_t *esp =
pool_elt_at_index (im->esp_backends, im->esp_current_backend);
ASSERT (esp->check_support_cb);
error = esp->check_support_cb (sa);
}
return error;
}
static void
ipsec_add_node (vlib_main_t * vm, const char *node_name,
const char *prev_node_name, u32 * out_node_index,
u32 * out_next_index)
{
vlib_node_t *prev_node, *node;
prev_node = vlib_get_node_by_name (vm, (u8 *) prev_node_name);
ASSERT (prev_node);
node = vlib_get_node_by_name (vm, (u8 *) node_name);
ASSERT (node);
*out_node_index = node->index;
*out_next_index = vlib_node_add_next (vm, prev_node->index, node->index);
}
static inline uword
ipsec_udp_registration_key (u16 port, u8 is_ip4)
{
uword key = (is_ip4) ? AF_IP4 : AF_IP6;
key |= (uword) (port << 16);
return key;
}
void
ipsec_unregister_udp_port (u16 port, u8 is_ip4)
{
ipsec_main_t *im = &ipsec_main;
u32 n_regs;
uword *p, key;
key = ipsec_udp_registration_key (port, is_ip4);
p = hash_get (im->udp_port_registrations, key);
ASSERT (p);
n_regs = p[0];
if (0 == --n_regs)
{
udp_unregister_dst_port (vlib_get_main (), port, is_ip4);
hash_unset (im->udp_port_registrations, key);
}
else
{
hash_unset (im->udp_port_registrations, key);
hash_set (im->udp_port_registrations, key, n_regs);
}
}
void
ipsec_register_udp_port (u16 port, u8 is_ip4)
{
ipsec_main_t *im = &ipsec_main;
u32 n_regs, node_index;
uword *p, key;
key = ipsec_udp_registration_key (port, is_ip4);
node_index =
(is_ip4) ? ipsec4_tun_input_node.index : ipsec6_tun_input_node.index;
p = hash_get (im->udp_port_registrations, key);
n_regs = (p ? p[0] : 0);
if (0 == n_regs++)
udp_register_dst_port (vlib_get_main (), port, node_index, is_ip4);
hash_unset (im->udp_port_registrations, key);
hash_set (im->udp_port_registrations, key, n_regs);
}
u32
ipsec_register_ah_backend (vlib_main_t * vm, ipsec_main_t * im,
const char *name,
const char *ah4_encrypt_node_name,
const char *ah4_decrypt_node_name,
const char *ah6_encrypt_node_name,
const char *ah6_decrypt_node_name,
check_support_cb_t ah_check_support_cb,
add_del_sa_sess_cb_t ah_add_del_sa_sess_cb)
{
ipsec_ah_backend_t *b;
pool_get (im->ah_backends, b);
b->name = format (0, "%s%c", name, 0);
ipsec_add_node (vm, ah4_encrypt_node_name, "ipsec4-output-feature",
&b->ah4_encrypt_node_index, &b->ah4_encrypt_next_index);
ipsec_add_node (vm, ah4_decrypt_node_name, "ipsec4-input-feature",
&b->ah4_decrypt_node_index, &b->ah4_decrypt_next_index);
ipsec_add_node (vm, ah6_encrypt_node_name, "ipsec6-output-feature",
&b->ah6_encrypt_node_index, &b->ah6_encrypt_next_index);
ipsec_add_node (vm, ah6_decrypt_node_name, "ipsec6-input-feature",
&b->ah6_decrypt_node_index, &b->ah6_decrypt_next_index);
b->check_support_cb = ah_check_support_cb;
b->add_del_sa_sess_cb = ah_add_del_sa_sess_cb;
return b - im->ah_backends;
}
u32
ipsec_register_esp_backend (
vlib_main_t *vm, ipsec_main_t *im, const char *name,
const char *esp4_encrypt_node_name, const char *esp4_encrypt_node_tun_name,
const char *esp4_decrypt_node_name, const char *esp4_decrypt_tun_node_name,
const char *esp6_encrypt_node_name, const char *esp6_encrypt_node_tun_name,
const char *esp6_decrypt_node_name, const char *esp6_decrypt_tun_node_name,
const char *esp_mpls_encrypt_node_tun_name,
check_support_cb_t esp_check_support_cb,
add_del_sa_sess_cb_t esp_add_del_sa_sess_cb)
{
ipsec_esp_backend_t *b;
pool_get (im->esp_backends, b);
b->name = format (0, "%s%c", name, 0);
ipsec_add_node (vm, esp4_encrypt_node_name, "ipsec4-output-feature",
&b->esp4_encrypt_node_index, &b->esp4_encrypt_next_index);
ipsec_add_node (vm, esp4_decrypt_node_name, "ipsec4-input-feature",
&b->esp4_decrypt_node_index, &b->esp4_decrypt_next_index);
ipsec_add_node (vm, esp6_encrypt_node_name, "ipsec6-output-feature",
&b->esp6_encrypt_node_index, &b->esp6_encrypt_next_index);
ipsec_add_node (vm, esp6_decrypt_node_name, "ipsec6-input-feature",
&b->esp6_decrypt_node_index, &b->esp6_decrypt_next_index);
ipsec_add_node (vm, esp4_decrypt_tun_node_name, "ipsec4-tun-input",
&b->esp4_decrypt_tun_node_index,
&b->esp4_decrypt_tun_next_index);
ipsec_add_node (vm, esp6_decrypt_tun_node_name, "ipsec6-tun-input",
&b->esp6_decrypt_tun_node_index,
&b->esp6_decrypt_tun_next_index);
b->esp6_encrypt_tun_node_index =
vlib_get_node_by_name (vm, (u8 *) esp6_encrypt_node_tun_name)->index;
b->esp_mpls_encrypt_tun_node_index =
vlib_get_node_by_name (vm, (u8 *) esp_mpls_encrypt_node_tun_name)->index;
b->esp4_encrypt_tun_node_index =
vlib_get_node_by_name (vm, (u8 *) esp4_encrypt_node_tun_name)->index;
b->check_support_cb = esp_check_support_cb;
b->add_del_sa_sess_cb = esp_add_del_sa_sess_cb;
return b - im->esp_backends;
}
clib_error_t *
ipsec_rsc_in_use (ipsec_main_t * im)
{
/* return an error is crypto resource are in use */
if (pool_elts (ipsec_sa_pool) > 0)
return clib_error_return (0, "%d SA entries configured",
pool_elts (ipsec_sa_pool));
if (ipsec_itf_count () > 0)
return clib_error_return (0, "%d IPSec interface configured",
ipsec_itf_count ());
return (NULL);
}
int
ipsec_select_ah_backend (ipsec_main_t * im, u32 backend_idx)
{
if (ipsec_rsc_in_use (im))
return VNET_API_ERROR_RSRC_IN_USE;
if (pool_is_free_index (im->ah_backends, backend_idx))
return VNET_API_ERROR_INVALID_VALUE;
ipsec_ah_backend_t *b = pool_elt_at_index (im->ah_backends, backend_idx);
im->ah_current_backend = backend_idx;
im->ah4_encrypt_node_index = b->ah4_encrypt_node_index;
im->ah4_decrypt_node_index = b->ah4_decrypt_node_index;
im->ah4_encrypt_next_index = b->ah4_encrypt_next_index;
im->ah4_decrypt_next_index = b->ah4_decrypt_next_index;
im->ah6_encrypt_node_index = b->ah6_encrypt_node_index;
im->ah6_decrypt_node_index = b->ah6_decrypt_node_index;
im->ah6_encrypt_next_index = b->ah6_encrypt_next_index;
im->ah6_decrypt_next_index = b->ah6_decrypt_next_index;
return 0;
}
int
ipsec_select_esp_backend (ipsec_main_t * im, u32 backend_idx)
{
if (ipsec_rsc_in_use (im))
return VNET_API_ERROR_RSRC_IN_USE;
if (pool_is_free_index (im->esp_backends, backend_idx))
return VNET_API_ERROR_INVALID_VALUE;
ipsec_esp_backend_t *b = pool_elt_at_index (im->esp_backends, backend_idx);
im->esp_current_backend = backend_idx;
im->esp4_encrypt_node_index = b->esp4_encrypt_node_index;
im->esp4_decrypt_node_index = b->esp4_decrypt_node_index;
im->esp4_encrypt_next_index = b->esp4_encrypt_next_index;
im->esp4_decrypt_next_index = b->esp4_decrypt_next_index;
im->esp6_encrypt_node_index = b->esp6_encrypt_node_index;
im->esp6_decrypt_node_index = b->esp6_decrypt_node_index;
im->esp6_encrypt_next_index = b->esp6_encrypt_next_index;
im->esp6_decrypt_next_index = b->esp6_decrypt_next_index;
im->esp4_decrypt_tun_node_index = b->esp4_decrypt_tun_node_index;
im->esp4_decrypt_tun_next_index = b->esp4_decrypt_tun_next_index;
im->esp6_decrypt_tun_node_index = b->esp6_decrypt_tun_node_index;
im->esp6_decrypt_tun_next_index = b->esp6_decrypt_tun_next_index;
im->esp4_encrypt_tun_node_index = b->esp4_encrypt_tun_node_index;
im->esp6_encrypt_tun_node_index = b->esp6_encrypt_tun_node_index;
im->esp_mpls_encrypt_tun_node_index = b->esp_mpls_encrypt_tun_node_index;
return 0;
}
void
ipsec_set_async_mode (u32 is_enabled)
{
ipsec_main_t *im = &ipsec_main;
ipsec_sa_t *sa;
im->async_mode = is_enabled;
/* change SA crypto op data */
pool_foreach (sa, ipsec_sa_pool)
ipsec_sa_set_async_mode (sa, is_enabled);
}
static void
crypto_engine_backend_register_post_node (vlib_main_t * vm)
{
esp_async_post_next_t *eit;
esp_async_post_next_t *dit;
eit = &esp_encrypt_async_next;
eit->esp4_post_next =
vnet_crypto_register_post_node (vm, "esp4-encrypt-post");
eit->esp6_post_next =
vnet_crypto_register_post_node (vm, "esp6-encrypt-post");
eit->esp4_tun_post_next =
vnet_crypto_register_post_node (vm, "esp4-encrypt-tun-post");
eit->esp6_tun_post_next =
vnet_crypto_register_post_node (vm, "esp6-encrypt-tun-post");
eit->esp_mpls_tun_post_next =
vnet_crypto_register_post_node (vm, "esp-mpls-encrypt-tun-post");
dit = &esp_decrypt_async_next;
dit->esp4_post_next =
vnet_crypto_register_post_node (vm, "esp4-decrypt-post");
dit->esp6_post_next =
vnet_crypto_register_post_node (vm, "esp6-decrypt-post");
dit->esp4_tun_post_next =
vnet_crypto_register_post_node (vm, "esp4-decrypt-tun-post");
dit->esp6_tun_post_next =
vnet_crypto_register_post_node (vm, "esp6-decrypt-tun-post");
}
static clib_error_t *
ipsec_init (vlib_main_t * vm)
{
clib_error_t *error;
ipsec_main_t *im = &ipsec_main;
ipsec_main_crypto_alg_t *a;
/* Backend registration requires the feature arcs to be set up */
if ((error = vlib_call_init_function (vm, vnet_feature_init)))
return (error);
im->vnet_main = vnet_get_main ();
im->vlib_main = vm;
im->spd_index_by_spd_id = hash_create (0, sizeof (uword));
im->sa_index_by_sa_id = hash_create (0, sizeof (uword));
im->spd_index_by_sw_if_index = hash_create (0, sizeof (uword));
vlib_node_t *node = vlib_get_node_by_name (vm, (u8 *) "error-drop");
ASSERT (node);
im->error_drop_node_index = node->index;
im->ah_current_backend = ~0;
im->esp_current_backend = ~0;
u32 idx = ipsec_register_ah_backend (vm, im, "crypto engine backend",
"ah4-encrypt",
"ah4-decrypt",
"ah6-encrypt",
"ah6-decrypt",
ipsec_check_ah_support,
NULL);
im->ah_default_backend = idx;
int rv = ipsec_select_ah_backend (im, idx);
ASSERT (0 == rv);
(void) (rv); // avoid warning
idx = ipsec_register_esp_backend (
vm, im, "crypto engine backend", "esp4-encrypt", "esp4-encrypt-tun",
"esp4-decrypt", "esp4-decrypt-tun", "esp6-encrypt", "esp6-encrypt-tun",
"esp6-decrypt", "esp6-decrypt-tun", "esp-mpls-encrypt-tun",
ipsec_check_esp_support, NULL);
im->esp_default_backend = idx;
rv = ipsec_select_esp_backend (im, idx);
ASSERT (0 == rv);
(void) (rv); // avoid warning
if ((error = vlib_call_init_function (vm, ipsec_cli_init)))
return error;
vec_validate (im->crypto_algs, IPSEC_CRYPTO_N_ALG - 1);
a = im->crypto_algs + IPSEC_CRYPTO_ALG_NONE;
a->enc_op_id = VNET_CRYPTO_OP_NONE;
a->dec_op_id = VNET_CRYPTO_OP_NONE;
a->alg = VNET_CRYPTO_ALG_NONE;
a->iv_size = 0;
a->block_align = 1;
a = im->crypto_algs + IPSEC_CRYPTO_ALG_DES_CBC;
a->enc_op_id = VNET_CRYPTO_OP_DES_CBC_ENC;
a->dec_op_id = VNET_CRYPTO_OP_DES_CBC_DEC;
a->alg = VNET_CRYPTO_ALG_DES_CBC;
a->iv_size = a->block_align = 8;
a = im->crypto_algs + IPSEC_CRYPTO_ALG_3DES_CBC;
a->enc_op_id = VNET_CRYPTO_OP_3DES_CBC_ENC;
a->dec_op_id = VNET_CRYPTO_OP_3DES_CBC_DEC;
a->alg = VNET_CRYPTO_ALG_3DES_CBC;
a->iv_size = a->block_align = 8;
a = im->crypto_algs + IPSEC_CRYPTO_ALG_AES_CBC_128;
a->enc_op_id = VNET_CRYPTO_OP_AES_128_CBC_ENC;
a->dec_op_id = VNET_CRYPTO_OP_AES_128_CBC_DEC;
a->alg = VNET_CRYPTO_ALG_AES_128_CBC;
a->iv_size = a->block_align = 16;
a = im->crypto_algs + IPSEC_CRYPTO_ALG_AES_CBC_192;
a->enc_op_id = VNET_CRYPTO_OP_AES_192_CBC_ENC;
a->dec_op_id = VNET_CRYPTO_OP_AES_192_CBC_DEC;
a->alg = VNET_CRYPTO_ALG_AES_192_CBC;
a->iv_size = a->block_align = 16;
a = im->crypto_algs + IPSEC_CRYPTO_ALG_AES_CBC_256;
a->enc_op_id = VNET_CRYPTO_OP_AES_256_CBC_ENC;
a->dec_op_id = VNET_CRYPTO_OP_AES_256_CBC_DEC;
a->alg = VNET_CRYPTO_ALG_AES_256_CBC;
a->iv_size = a->block_align = 16;
a = im->crypto_algs + IPSEC_CRYPTO_ALG_AES_CTR_128;
a->enc_op_id = VNET_CRYPTO_OP_AES_128_CTR_ENC;
a->dec_op_id = VNET_CRYPTO_OP_AES_128_CTR_DEC;
a->alg = VNET_CRYPTO_ALG_AES_128_CTR;
a->iv_size = 8;
a->block_align = 1;
a = im->crypto_algs + IPSEC_CRYPTO_ALG_AES_CTR_192;
a->enc_op_id = VNET_CRYPTO_OP_AES_192_CTR_ENC;
a->dec_op_id = VNET_CRYPTO_OP_AES_192_CTR_DEC;
a->alg = VNET_CRYPTO_ALG_AES_192_CTR;
a->iv_size = 8;
a->block_align = 1;
a = im->crypto_algs + IPSEC_CRYPTO_ALG_AES_CTR_256;
a->enc_op_id = VNET_CRYPTO_OP_AES_256_CTR_ENC;
a->dec_op_id = VNET_CRYPTO_OP_AES_256_CTR_DEC;
a->alg = VNET_CRYPTO_ALG_AES_256_CTR;
a->iv_size = 8;
a->block_align = 1;
a = im->crypto_algs + IPSEC_CRYPTO_ALG_AES_GCM_128;
a->enc_op_id = VNET_CRYPTO_OP_AES_128_GCM_ENC;
a->dec_op_id = VNET_CRYPTO_OP_AES_128_GCM_DEC;
a->alg = VNET_CRYPTO_ALG_AES_128_GCM;
a->iv_size = 8;
a->block_align = 1;
a->icv_size = 16;
a = im->crypto_algs + IPSEC_CRYPTO_ALG_AES_GCM_192;
a->enc_op_id = VNET_CRYPTO_OP_AES_192_GCM_ENC;
a->dec_op_id = VNET_CRYPTO_OP_AES_192_GCM_DEC;
a->alg = VNET_CRYPTO_ALG_AES_192_GCM;
a->iv_size = 8;
a->block_align = 1;
a->icv_size = 16;
a = im->crypto_algs + IPSEC_CRYPTO_ALG_AES_GCM_256;
a->enc_op_id = VNET_CRYPTO_OP_AES_256_GCM_ENC;
a->dec_op_id = VNET_CRYPTO_OP_AES_256_GCM_DEC;
a->alg = VNET_CRYPTO_ALG_AES_256_GCM;
a->iv_size = 8;
a->block_align = 1;
a->icv_size = 16;
a = im->crypto_algs + IPSEC_CRYPTO_ALG_CHACHA20_POLY1305;
a->enc_op_id = VNET_CRYPTO_OP_CHACHA20_POLY1305_ENC;
a->dec_op_id = VNET_CRYPTO_OP_CHACHA20_POLY1305_DEC;
a->alg = VNET_CRYPTO_ALG_CHACHA20_POLY1305;
a->iv_size = 8;
a->icv_size = 16;
a = im->crypto_algs + IPSEC_CRYPTO_ALG_AES_NULL_GMAC_128;
a->enc_op_id = VNET_CRYPTO_OP_AES_128_NULL_GMAC_ENC;
a->dec_op_id = VNET_CRYPTO_OP_AES_128_NULL_GMAC_DEC;
a->alg = VNET_CRYPTO_ALG_AES_128_GCM;
a->iv_size = 8;
a->block_align = 1;
a->icv_size = 16;
a = im->crypto_algs + IPSEC_CRYPTO_ALG_AES_NULL_GMAC_192;
a->enc_op_id = VNET_CRYPTO_OP_AES_192_NULL_GMAC_ENC;
a->dec_op_id = VNET_CRYPTO_OP_AES_192_NULL_GMAC_DEC;
a->alg = VNET_CRYPTO_ALG_AES_192_GCM;
a->iv_size = 8;
a->block_align = 1;
a->icv_size = 16;
a = im->crypto_algs + IPSEC_CRYPTO_ALG_AES_NULL_GMAC_256;
a->enc_op_id = VNET_CRYPTO_OP_AES_256_NULL_GMAC_ENC;
a->dec_op_id = VNET_CRYPTO_OP_AES_256_NULL_GMAC_DEC;
a->alg = VNET_CRYPTO_ALG_AES_256_GCM;
a->iv_size = 8;
a->block_align = 1;
a->icv_size = 16;
vec_validate (im->integ_algs, IPSEC_INTEG_N_ALG - 1);
ipsec_main_integ_alg_t *i;
i = &im->integ_algs[IPSEC_INTEG_ALG_MD5_96];
i->op_id = VNET_CRYPTO_OP_MD5_HMAC;
i->alg = VNET_CRYPTO_ALG_HMAC_MD5;
i->icv_size = 12;
i = &im->integ_algs[IPSEC_INTEG_ALG_SHA1_96];
i->op_id = VNET_CRYPTO_OP_SHA1_HMAC;
i->alg = VNET_CRYPTO_ALG_HMAC_SHA1;
i->icv_size = 12;
i = &im->integ_algs[IPSEC_INTEG_ALG_SHA_256_96];
i->op_id = VNET_CRYPTO_OP_SHA1_HMAC;
i->alg = VNET_CRYPTO_ALG_HMAC_SHA256;
i->icv_size = 12;
i = &im->integ_algs[IPSEC_INTEG_ALG_SHA_256_128];
i->op_id = VNET_CRYPTO_OP_SHA256_HMAC;
i->alg = VNET_CRYPTO_ALG_HMAC_SHA256;
i->icv_size = 16;
i = &im->integ_algs[IPSEC_INTEG_ALG_SHA_384_192];
i->op_id = VNET_CRYPTO_OP_SHA384_HMAC;
i->alg = VNET_CRYPTO_ALG_HMAC_SHA384;
i->icv_size = 24;
i = &im->integ_algs[IPSEC_INTEG_ALG_SHA_512_256];
i->op_id = VNET_CRYPTO_OP_SHA512_HMAC;
i->alg = VNET_CRYPTO_ALG_HMAC_SHA512;
i->icv_size = 32;
vec_validate_aligned (im->ptd, vlib_num_workers (), CLIB_CACHE_LINE_BYTES);
im->async_mode = 0;
crypto_engine_backend_register_post_node (vm);
im->ipsec4_out_spd_hash_tbl = NULL;
im->output_flow_cache_flag = 0;
im->ipsec4_out_spd_flow_cache_entries = 0;
im->epoch_count = 0;
im->ipsec4_out_spd_hash_num_buckets =
IPSEC4_OUT_SPD_DEFAULT_HASH_NUM_BUCKETS;
im->ipsec4_in_spd_hash_tbl = NULL;
im->input_flow_cache_flag = 0;
im->ipsec4_in_spd_flow_cache_entries = 0;
im->input_epoch_count = 0;
im->ipsec4_in_spd_hash_num_buckets = IPSEC4_SPD_DEFAULT_HASH_NUM_BUCKETS;
vec_validate_init_empty_aligned (im->next_header_registrations, 255, ~0,
CLIB_CACHE_LINE_BYTES);
im->fp_spd_ipv4_out_is_enabled = 0;
im->fp_spd_ipv6_out_is_enabled = 0;
im->fp_spd_ipv4_in_is_enabled = 0;
im->fp_spd_ipv6_in_is_enabled = 0;
im->fp_lookup_hash_buckets = IPSEC_FP_HASH_LOOKUP_HASH_BUCKETS;
return 0;
}
VLIB_INIT_FUNCTION (ipsec_init);
static clib_error_t *
ipsec_config (vlib_main_t *vm, unformat_input_t *input)
{
ipsec_main_t *im = &ipsec_main;
unformat_input_t sub_input;
u32 ipsec4_out_spd_hash_num_buckets;
u32 ipsec4_in_spd_hash_num_buckets;
u32 ipsec_spd_fp_num_buckets;
bool fp_spd_ip4_enabled = false;
bool fp_spd_ip6_enabled = false;
while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
{
if (unformat (input, "ipv6-outbound-spd-fast-path on"))
{
im->fp_spd_ipv6_out_is_enabled = 1;
fp_spd_ip6_enabled = true;
}
else if (unformat (input, "ipv6-outbound-spd-fast-path off"))
im->fp_spd_ipv6_out_is_enabled = 0;
else if (unformat (input, "ipv4-outbound-spd-fast-path on"))
{
im->fp_spd_ipv4_out_is_enabled = 1;
im->output_flow_cache_flag = 0;
fp_spd_ip4_enabled = true;
}
else if (unformat (input, "ipv4-outbound-spd-fast-path off"))
im->fp_spd_ipv4_out_is_enabled = 0;
else if (unformat (input, "ipv6-inbound-spd-fast-path on"))
{
im->fp_spd_ipv6_in_is_enabled = 1;
fp_spd_ip6_enabled = true;
}
else if (unformat (input, "ipv6-inbound-spd-fast-path off"))
im->fp_spd_ipv6_in_is_enabled = 0;
else if (unformat (input, "ipv4-inbound-spd-fast-path on"))
{
im->fp_spd_ipv4_in_is_enabled = 1;
im->input_flow_cache_flag = 0;
fp_spd_ip4_enabled = true;
}
else if (unformat (input, "ipv4-inbound-spd-fast-path off"))
im->fp_spd_ipv4_in_is_enabled = 0;
else if (unformat (input, "spd-fast-path-num-buckets %d",
&ipsec_spd_fp_num_buckets))
{
/* Number of bihash buckets is power of 2 >= input */
im->fp_lookup_hash_buckets = 1ULL
<< max_log2 (ipsec_spd_fp_num_buckets);
}
else if (unformat (input, "ipv4-outbound-spd-flow-cache on"))
im->output_flow_cache_flag = im->fp_spd_ipv4_out_is_enabled ? 0 : 1;
else if (unformat (input, "ipv4-outbound-spd-flow-cache off"))
im->output_flow_cache_flag = 0;
else if (unformat (input, "ipv4-outbound-spd-hash-buckets %d",
&ipsec4_out_spd_hash_num_buckets))
{
/* Size of hash is power of 2 >= number of buckets */
im->ipsec4_out_spd_hash_num_buckets =
1ULL << max_log2 (ipsec4_out_spd_hash_num_buckets);
}
else if (unformat (input, "ipv4-inbound-spd-flow-cache on"))
im->input_flow_cache_flag = im->fp_spd_ipv4_in_is_enabled ? 0 : 1;
else if (unformat (input, "ipv4-inbound-spd-flow-cache off"))
im->input_flow_cache_flag = 0;
else if (unformat (input, "ipv4-inbound-spd-hash-buckets %d",
&ipsec4_in_spd_hash_num_buckets))
{
im->ipsec4_in_spd_hash_num_buckets =
1ULL << max_log2 (ipsec4_in_spd_hash_num_buckets);
}
else if (unformat (input, "ip4 %U", unformat_vlib_cli_sub_input,
&sub_input))
{
uword table_size = ~0;
u32 n_buckets = ~0;
while (unformat_check_input (&sub_input) != UNFORMAT_END_OF_INPUT)
{
if (unformat (&sub_input, "num-buckets %u", &n_buckets))
;
else
return clib_error_return (0, "unknown input `%U'",
format_unformat_error, &sub_input);
}
ipsec_tun_table_init (AF_IP4, table_size, n_buckets);
}
else if (unformat (input, "ip6 %U", unformat_vlib_cli_sub_input,
&sub_input))
{
uword table_size = ~0;
u32 n_buckets = ~0;
while (unformat_check_input (&sub_input) != UNFORMAT_END_OF_INPUT)
{
if (unformat (&sub_input, "num-buckets %u", &n_buckets))
;
else
return clib_error_return (0, "unknown input `%U'",
format_unformat_error, &sub_input);
}
ipsec_tun_table_init (AF_IP6, table_size, n_buckets);
}
else
return clib_error_return (0, "unknown input `%U'",
format_unformat_error, input);
}
if (im->output_flow_cache_flag)
{
vec_add2 (im->ipsec4_out_spd_hash_tbl, im->ipsec4_out_spd_hash_tbl,
im->ipsec4_out_spd_hash_num_buckets);
}
if (im->input_flow_cache_flag)
{
vec_add2 (im->ipsec4_in_spd_hash_tbl, im->ipsec4_in_spd_hash_tbl,
im->ipsec4_in_spd_hash_num_buckets);
}
if (fp_spd_ip4_enabled)
pool_alloc_aligned (im->fp_ip4_lookup_hashes_pool,
IPSEC_FP_IP4_HASHES_POOL_SIZE, CLIB_CACHE_LINE_BYTES);
if (fp_spd_ip6_enabled)
pool_alloc_aligned (im->fp_ip6_lookup_hashes_pool,
IPSEC_FP_IP6_HASHES_POOL_SIZE, CLIB_CACHE_LINE_BYTES);
return 0;
}
VLIB_CONFIG_FUNCTION (ipsec_config, "ipsec");
/*
* fd.io coding-style-patch-verification: ON
*
* Local Variables:
* eval: (c-set-style "gnu")
* End:
*/