blob: d9ab1d855a8761f4c7ad291ec0889b6dfb98f155 [file] [log] [blame]
Ed Warnickecb9cada2015-12-08 15:45:58 -07001/*
2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
Sergio Gonzalez Monroya10f62b2016-11-25 13:36:12 +000015#ifndef __ESP_H__
16#define __ESP_H__
Ed Warnickecb9cada2015-12-08 15:45:58 -070017
Sergio Gonzalez Monroydb93cd92017-08-26 15:22:05 +010018#include <vnet/ip/ip.h>
19#include <vnet/ipsec/ipsec.h>
20
Ed Warnickecb9cada2015-12-08 15:45:58 -070021#include <openssl/hmac.h>
22#include <openssl/rand.h>
23#include <openssl/evp.h>
24
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -070025typedef struct
26{
Ed Warnickecb9cada2015-12-08 15:45:58 -070027 u32 spi;
28 u32 seq;
29 u8 data[0];
30} esp_header_t;
31
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -070032typedef struct
33{
Ed Warnickecb9cada2015-12-08 15:45:58 -070034 u8 pad_length;
35 u8 next_header;
36} esp_footer_t;
37
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -070038/* *INDENT-OFF* */
Ed Warnickecb9cada2015-12-08 15:45:58 -070039typedef CLIB_PACKED (struct {
40 ip4_header_t ip4;
41 esp_header_t esp;
42}) ip4_and_esp_header_t;
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -070043/* *INDENT-ON* */
Ed Warnickecb9cada2015-12-08 15:45:58 -070044
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -070045/* *INDENT-OFF* */
Ed Warnickecb9cada2015-12-08 15:45:58 -070046typedef CLIB_PACKED (struct {
47 ip6_header_t ip6;
48 esp_header_t esp;
49}) ip6_and_esp_header_t;
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -070050/* *INDENT-ON* */
Ed Warnickecb9cada2015-12-08 15:45:58 -070051
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -070052typedef struct
53{
54 const EVP_CIPHER *type;
Ed Warnickecb9cada2015-12-08 15:45:58 -070055} esp_crypto_alg_t;
56
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -070057typedef struct
58{
59 const EVP_MD *md;
Ed Warnickecb9cada2015-12-08 15:45:58 -070060 u8 trunc_size;
61} esp_integ_alg_t;
62
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -070063typedef struct
64{
65 CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
Marco Varlesef616d102017-11-09 15:16:20 +010066#if OPENSSL_VERSION_NUMBER >= 0x10100000L
67 EVP_CIPHER_CTX *encrypt_ctx;
68#else
Matthew Smith29d85102016-05-01 14:52:08 -050069 EVP_CIPHER_CTX encrypt_ctx;
Marco Varlesef616d102017-11-09 15:16:20 +010070#endif
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -070071 CLIB_CACHE_LINE_ALIGN_MARK (cacheline1);
Marco Varlesef616d102017-11-09 15:16:20 +010072#if OPENSSL_VERSION_NUMBER >= 0x10100000L
73 EVP_CIPHER_CTX *decrypt_ctx;
74#else
Matthew Smith29d85102016-05-01 14:52:08 -050075 EVP_CIPHER_CTX decrypt_ctx;
Marco Varlesef616d102017-11-09 15:16:20 +010076#endif
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -070077 CLIB_CACHE_LINE_ALIGN_MARK (cacheline2);
Marco Varlesef616d102017-11-09 15:16:20 +010078#if OPENSSL_VERSION_NUMBER >= 0x10100000L
79 HMAC_CTX *hmac_ctx;
80#else
Matthew Smith29d85102016-05-01 14:52:08 -050081 HMAC_CTX hmac_ctx;
Marco Varlesef616d102017-11-09 15:16:20 +010082#endif
Matthew Smith29d85102016-05-01 14:52:08 -050083 ipsec_crypto_alg_t last_encrypt_alg;
84 ipsec_crypto_alg_t last_decrypt_alg;
85 ipsec_integ_alg_t last_integ_alg;
86} esp_main_per_thread_data_t;
Ed Warnickecb9cada2015-12-08 15:45:58 -070087
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -070088typedef struct
89{
90 esp_crypto_alg_t *esp_crypto_algs;
91 esp_integ_alg_t *esp_integ_algs;
92 esp_main_per_thread_data_t *per_thread_data;
Ed Warnickecb9cada2015-12-08 15:45:58 -070093} esp_main_t;
94
Dave Wallace71612d62017-10-24 01:32:41 -040095extern esp_main_t esp_main;
Ed Warnickecb9cada2015-12-08 15:45:58 -070096
Sergio Gonzalez Monroya10f62b2016-11-25 13:36:12 +000097#define ESP_WINDOW_SIZE (64)
98#define ESP_SEQ_MAX (4294967295UL)
99
Sergio Gonzalez Monroydb93cd92017-08-26 15:22:05 +0100100u8 *format_esp_header (u8 * s, va_list * args);
Sergio Gonzalez Monroya10f62b2016-11-25 13:36:12 +0000101
102always_inline int
103esp_replay_check (ipsec_sa_t * sa, u32 seq)
104{
105 u32 diff;
106
107 if (PREDICT_TRUE (seq > sa->last_seq))
108 return 0;
109
110 diff = sa->last_seq - seq;
111
112 if (ESP_WINDOW_SIZE > diff)
113 return (sa->replay_window & (1ULL << diff)) ? 1 : 0;
114 else
115 return 1;
116
117 return 0;
118}
119
120always_inline int
121esp_replay_check_esn (ipsec_sa_t * sa, u32 seq)
122{
123 u32 tl = sa->last_seq;
124 u32 th = sa->last_seq_hi;
125 u32 diff = tl - seq;
126
127 if (PREDICT_TRUE (tl >= (ESP_WINDOW_SIZE - 1)))
128 {
129 if (seq >= (tl - ESP_WINDOW_SIZE + 1))
130 {
131 sa->seq_hi = th;
132 if (seq <= tl)
133 return (sa->replay_window & (1ULL << diff)) ? 1 : 0;
134 else
135 return 0;
136 }
137 else
138 {
139 sa->seq_hi = th + 1;
140 return 0;
141 }
142 }
143 else
144 {
145 if (seq >= (tl - ESP_WINDOW_SIZE + 1))
146 {
147 sa->seq_hi = th - 1;
148 return (sa->replay_window & (1ULL << diff)) ? 1 : 0;
149 }
150 else
151 {
152 sa->seq_hi = th;
153 if (seq <= tl)
154 return (sa->replay_window & (1ULL << diff)) ? 1 : 0;
155 else
156 return 0;
157 }
158 }
159
160 return 0;
161}
162
163/* TODO seq increment should be atomic to be accessed by multiple workers */
164always_inline void
165esp_replay_advance (ipsec_sa_t * sa, u32 seq)
166{
167 u32 pos;
168
169 if (seq > sa->last_seq)
170 {
171 pos = seq - sa->last_seq;
172 if (pos < ESP_WINDOW_SIZE)
173 sa->replay_window = ((sa->replay_window) << pos) | 1;
174 else
175 sa->replay_window = 1;
176 sa->last_seq = seq;
177 }
178 else
179 {
180 pos = sa->last_seq - seq;
181 sa->replay_window |= (1ULL << pos);
182 }
183}
184
185always_inline void
186esp_replay_advance_esn (ipsec_sa_t * sa, u32 seq)
187{
188 int wrap = sa->seq_hi - sa->last_seq_hi;
189 u32 pos;
190
191 if (wrap == 0 && seq > sa->last_seq)
192 {
193 pos = seq - sa->last_seq;
194 if (pos < ESP_WINDOW_SIZE)
195 sa->replay_window = ((sa->replay_window) << pos) | 1;
196 else
197 sa->replay_window = 1;
198 sa->last_seq = seq;
199 }
200 else if (wrap > 0)
201 {
202 pos = ~seq + sa->last_seq + 1;
203 if (pos < ESP_WINDOW_SIZE)
204 sa->replay_window = ((sa->replay_window) << pos) | 1;
205 else
206 sa->replay_window = 1;
207 sa->last_seq = seq;
208 sa->last_seq_hi = sa->seq_hi;
209 }
210 else if (wrap < 0)
211 {
212 pos = ~seq + sa->last_seq + 1;
213 sa->replay_window |= (1ULL << pos);
214 }
215 else
216 {
217 pos = sa->last_seq - seq;
218 sa->replay_window |= (1ULL << pos);
219 }
220}
221
222always_inline int
223esp_seq_advance (ipsec_sa_t * sa)
224{
225 if (PREDICT_TRUE (sa->use_esn))
226 {
227 if (PREDICT_FALSE (sa->seq == ESP_SEQ_MAX))
228 {
229 if (PREDICT_FALSE
230 (sa->use_anti_replay && sa->seq_hi == ESP_SEQ_MAX))
231 return 1;
232 sa->seq_hi++;
233 }
234 sa->seq++;
235 }
236 else
237 {
238 if (PREDICT_FALSE (sa->use_anti_replay && sa->seq == ESP_SEQ_MAX))
239 return 1;
240 sa->seq++;
241 }
242
243 return 0;
244}
245
Ed Warnickecb9cada2015-12-08 15:45:58 -0700246always_inline void
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700247esp_init ()
Ed Warnickecb9cada2015-12-08 15:45:58 -0700248{
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700249 esp_main_t *em = &esp_main;
250 vlib_thread_main_t *tm = vlib_get_thread_main ();
Ed Warnickecb9cada2015-12-08 15:45:58 -0700251
252 memset (em, 0, sizeof (em[0]));
253
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700254 vec_validate (em->esp_crypto_algs, IPSEC_CRYPTO_N_ALG - 1);
255 em->esp_crypto_algs[IPSEC_CRYPTO_ALG_AES_CBC_128].type = EVP_aes_128_cbc ();
256 em->esp_crypto_algs[IPSEC_CRYPTO_ALG_AES_CBC_192].type = EVP_aes_192_cbc ();
257 em->esp_crypto_algs[IPSEC_CRYPTO_ALG_AES_CBC_256].type = EVP_aes_256_cbc ();
Ed Warnickecb9cada2015-12-08 15:45:58 -0700258
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700259 vec_validate (em->esp_integ_algs, IPSEC_INTEG_N_ALG - 1);
260 esp_integ_alg_t *i;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700261
262 i = &em->esp_integ_algs[IPSEC_INTEG_ALG_SHA1_96];
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700263 i->md = EVP_sha1 ();
Ed Warnickecb9cada2015-12-08 15:45:58 -0700264 i->trunc_size = 12;
265
266 i = &em->esp_integ_algs[IPSEC_INTEG_ALG_SHA_256_96];
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700267 i->md = EVP_sha256 ();
Ed Warnickecb9cada2015-12-08 15:45:58 -0700268 i->trunc_size = 12;
269
270 i = &em->esp_integ_algs[IPSEC_INTEG_ALG_SHA_256_128];
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700271 i->md = EVP_sha256 ();
Ed Warnickecb9cada2015-12-08 15:45:58 -0700272 i->trunc_size = 16;
273
274 i = &em->esp_integ_algs[IPSEC_INTEG_ALG_SHA_384_192];
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700275 i->md = EVP_sha384 ();
Ed Warnickecb9cada2015-12-08 15:45:58 -0700276 i->trunc_size = 24;
277
278 i = &em->esp_integ_algs[IPSEC_INTEG_ALG_SHA_512_256];
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700279 i->md = EVP_sha512 ();
Ed Warnickecb9cada2015-12-08 15:45:58 -0700280 i->trunc_size = 32;
281
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700282 vec_validate_aligned (em->per_thread_data, tm->n_vlib_mains - 1,
283 CLIB_CACHE_LINE_BYTES);
Matthew Smith29d85102016-05-01 14:52:08 -0500284 int thread_id;
285
286 for (thread_id = 0; thread_id < tm->n_vlib_mains - 1; thread_id++)
287 {
Marco Varlesef616d102017-11-09 15:16:20 +0100288#if OPENSSL_VERSION_NUMBER >= 0x10100000L
289 em->per_thread_data[thread_id].encrypt_ctx = EVP_CIPHER_CTX_new ();
290 em->per_thread_data[thread_id].decrypt_ctx = EVP_CIPHER_CTX_new ();
291 em->per_thread_data[thread_id].hmac_ctx = HMAC_CTX_new ();
292#else
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700293 EVP_CIPHER_CTX_init (&(em->per_thread_data[thread_id].encrypt_ctx));
294 EVP_CIPHER_CTX_init (&(em->per_thread_data[thread_id].decrypt_ctx));
295 HMAC_CTX_init (&(em->per_thread_data[thread_id].hmac_ctx));
Marco Varlesef616d102017-11-09 15:16:20 +0100296#endif
Matthew Smith29d85102016-05-01 14:52:08 -0500297 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700298}
299
300always_inline unsigned int
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700301hmac_calc (ipsec_integ_alg_t alg,
302 u8 * key,
303 int key_len,
304 u8 * data, int data_len, u8 * signature, u8 use_esn, u32 seq_hi)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700305{
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700306 esp_main_t *em = &esp_main;
Damjan Marion586afd72017-04-05 19:18:20 +0200307 u32 thread_index = vlib_get_thread_index ();
Marco Varlesef616d102017-11-09 15:16:20 +0100308#if OPENSSL_VERSION_NUMBER >= 0x10100000L
309 HMAC_CTX *ctx = em->per_thread_data[thread_index].hmac_ctx;
310#else
Damjan Marion586afd72017-04-05 19:18:20 +0200311 HMAC_CTX *ctx = &(em->per_thread_data[thread_index].hmac_ctx);
Marco Varlesef616d102017-11-09 15:16:20 +0100312#endif
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700313 const EVP_MD *md = NULL;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700314 unsigned int len;
315
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700316 ASSERT (alg < IPSEC_INTEG_N_ALG);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700317
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700318 if (PREDICT_FALSE (em->esp_integ_algs[alg].md == 0))
Ed Warnickecb9cada2015-12-08 15:45:58 -0700319 return 0;
320
Damjan Marion586afd72017-04-05 19:18:20 +0200321 if (PREDICT_FALSE (alg != em->per_thread_data[thread_index].last_integ_alg))
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700322 {
323 md = em->esp_integ_algs[alg].md;
Damjan Marion586afd72017-04-05 19:18:20 +0200324 em->per_thread_data[thread_index].last_integ_alg = alg;
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700325 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700326
Marco Varlesef616d102017-11-09 15:16:20 +0100327 HMAC_Init_ex (ctx, key, key_len, md, NULL);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700328
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700329 HMAC_Update (ctx, data, data_len);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700330
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700331 if (PREDICT_TRUE (use_esn))
332 HMAC_Update (ctx, (u8 *) & seq_hi, sizeof (seq_hi));
333 HMAC_Final (ctx, signature, &len);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700334
335 return em->esp_integ_algs[alg].trunc_size;
336}
337
Sergio Gonzalez Monroya10f62b2016-11-25 13:36:12 +0000338#endif /* __ESP_H__ */
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700339
340/*
341 * fd.io coding-style-patch-verification: ON
342 *
343 * Local Variables:
344 * eval: (c-set-style "gnu")
345 * End:
346 */