Ed Warnicke | cb9cada | 2015-12-08 15:45:58 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (c) 2015 Cisco and/or its affiliates. |
| 3 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 4 | * you may not use this file except in compliance with the License. |
| 5 | * You may obtain a copy of the License at: |
| 6 | * |
| 7 | * http://www.apache.org/licenses/LICENSE-2.0 |
| 8 | * |
| 9 | * Unless required by applicable law or agreed to in writing, software |
| 10 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 11 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 12 | * See the License for the specific language governing permissions and |
| 13 | * limitations under the License. |
| 14 | */ |
Sergio Gonzalez Monroy | a10f62b | 2016-11-25 13:36:12 +0000 | [diff] [blame] | 15 | #ifndef __ESP_H__ |
| 16 | #define __ESP_H__ |
Ed Warnicke | cb9cada | 2015-12-08 15:45:58 -0700 | [diff] [blame] | 17 | |
Sergio Gonzalez Monroy | db93cd9 | 2017-08-26 15:22:05 +0100 | [diff] [blame] | 18 | #include <vnet/ip/ip.h> |
Damjan Marion | 91f17dc | 2019-03-18 18:59:25 +0100 | [diff] [blame] | 19 | #include <vnet/crypto/crypto.h> |
Sergio Gonzalez Monroy | db93cd9 | 2017-08-26 15:22:05 +0100 | [diff] [blame] | 20 | #include <vnet/ipsec/ipsec.h> |
Arthur de Kerhor | ad95b06 | 2022-11-16 19:12:05 +0100 | [diff] [blame] | 21 | #include <vnet/ipsec/ipsec.api_enum.h> |
Sergio Gonzalez Monroy | db93cd9 | 2017-08-26 15:22:05 +0100 | [diff] [blame] | 22 | |
Keith Burns (alagalah) | 166a9d4 | 2016-08-06 11:00:56 -0700 | [diff] [blame] | 23 | typedef struct |
| 24 | { |
Neale Ranns | 41afb33 | 2019-07-16 06:19:35 -0700 | [diff] [blame] | 25 | union |
| 26 | { |
| 27 | u32 spi; |
| 28 | u8 spi_bytes[4]; |
| 29 | }; |
Ed Warnicke | cb9cada | 2015-12-08 15:45:58 -0700 | [diff] [blame] | 30 | u32 seq; |
| 31 | u8 data[0]; |
| 32 | } esp_header_t; |
| 33 | |
Keith Burns (alagalah) | 166a9d4 | 2016-08-06 11:00:56 -0700 | [diff] [blame] | 34 | typedef struct |
| 35 | { |
Ed Warnicke | cb9cada | 2015-12-08 15:45:58 -0700 | [diff] [blame] | 36 | u8 pad_length; |
| 37 | u8 next_header; |
| 38 | } esp_footer_t; |
| 39 | |
Keith Burns (alagalah) | 166a9d4 | 2016-08-06 11:00:56 -0700 | [diff] [blame] | 40 | /* *INDENT-OFF* */ |
Ed Warnicke | cb9cada | 2015-12-08 15:45:58 -0700 | [diff] [blame] | 41 | typedef CLIB_PACKED (struct { |
| 42 | ip4_header_t ip4; |
| 43 | esp_header_t esp; |
| 44 | }) ip4_and_esp_header_t; |
Keith Burns (alagalah) | 166a9d4 | 2016-08-06 11:00:56 -0700 | [diff] [blame] | 45 | /* *INDENT-ON* */ |
Ed Warnicke | cb9cada | 2015-12-08 15:45:58 -0700 | [diff] [blame] | 46 | |
Keith Burns (alagalah) | 166a9d4 | 2016-08-06 11:00:56 -0700 | [diff] [blame] | 47 | /* *INDENT-OFF* */ |
Ed Warnicke | cb9cada | 2015-12-08 15:45:58 -0700 | [diff] [blame] | 48 | typedef CLIB_PACKED (struct { |
Klement Sekera | 4b089f2 | 2018-04-17 18:04:57 +0200 | [diff] [blame] | 49 | ip4_header_t ip4; |
| 50 | udp_header_t udp; |
| 51 | esp_header_t esp; |
| 52 | }) ip4_and_udp_and_esp_header_t; |
| 53 | /* *INDENT-ON* */ |
| 54 | |
| 55 | /* *INDENT-OFF* */ |
| 56 | typedef CLIB_PACKED (struct { |
Ed Warnicke | cb9cada | 2015-12-08 15:45:58 -0700 | [diff] [blame] | 57 | ip6_header_t ip6; |
| 58 | esp_header_t esp; |
| 59 | }) ip6_and_esp_header_t; |
Keith Burns (alagalah) | 166a9d4 | 2016-08-06 11:00:56 -0700 | [diff] [blame] | 60 | /* *INDENT-ON* */ |
Ed Warnicke | cb9cada | 2015-12-08 15:45:58 -0700 | [diff] [blame] | 61 | |
Neale Ranns | 47feb11 | 2019-04-11 15:14:07 +0000 | [diff] [blame] | 62 | /** |
Benoît Ganne | 490b927 | 2021-01-22 18:03:09 +0100 | [diff] [blame] | 63 | * AES counter mode nonce |
| 64 | */ |
| 65 | typedef struct |
| 66 | { |
| 67 | u32 salt; |
| 68 | u64 iv; |
| 69 | u32 ctr; /* counter: 1 in big-endian for ctr, unused for gcm */ |
| 70 | } __clib_packed esp_ctr_nonce_t; |
| 71 | |
| 72 | STATIC_ASSERT_SIZEOF (esp_ctr_nonce_t, 16); |
| 73 | |
| 74 | /** |
Neale Ranns | 47feb11 | 2019-04-11 15:14:07 +0000 | [diff] [blame] | 75 | * AES GCM Additional Authentication data |
| 76 | */ |
| 77 | typedef struct esp_aead_t_ |
| 78 | { |
| 79 | /** |
| 80 | * for GCM: when using ESN it's: |
| 81 | * SPI, seq-hi, seg-low |
| 82 | * else |
| 83 | * SPI, seq-low |
| 84 | */ |
| 85 | u32 data[3]; |
| 86 | } __clib_packed esp_aead_t; |
| 87 | |
Damjan Marion | c59b9a2 | 2019-03-19 15:38:40 +0100 | [diff] [blame] | 88 | #define ESP_SEQ_MAX (4294967295UL) |
Sergio Gonzalez Monroy | a10f62b | 2016-11-25 13:36:12 +0000 | [diff] [blame] | 89 | |
Sergio Gonzalez Monroy | db93cd9 | 2017-08-26 15:22:05 +0100 | [diff] [blame] | 90 | u8 *format_esp_header (u8 * s, va_list * args); |
Sergio Gonzalez Monroy | a10f62b | 2016-11-25 13:36:12 +0000 | [diff] [blame] | 91 | |
Sergio Gonzalez Monroy | a10f62b | 2016-11-25 13:36:12 +0000 | [diff] [blame] | 92 | /* TODO seq increment should be atomic to be accessed by multiple workers */ |
Sergio Gonzalez Monroy | a10f62b | 2016-11-25 13:36:12 +0000 | [diff] [blame] | 93 | always_inline int |
| 94 | esp_seq_advance (ipsec_sa_t * sa) |
| 95 | { |
Damjan Marion | 1e3aa5e | 2019-03-28 10:58:59 +0100 | [diff] [blame] | 96 | if (PREDICT_TRUE (ipsec_sa_is_set_USE_ESN (sa))) |
Sergio Gonzalez Monroy | a10f62b | 2016-11-25 13:36:12 +0000 | [diff] [blame] | 97 | { |
| 98 | if (PREDICT_FALSE (sa->seq == ESP_SEQ_MAX)) |
| 99 | { |
Damjan Marion | d709cbc | 2019-03-26 13:16:42 +0100 | [diff] [blame] | 100 | if (PREDICT_FALSE (ipsec_sa_is_set_USE_ANTI_REPLAY (sa) && |
| 101 | sa->seq_hi == ESP_SEQ_MAX)) |
Sergio Gonzalez Monroy | a10f62b | 2016-11-25 13:36:12 +0000 | [diff] [blame] | 102 | return 1; |
| 103 | sa->seq_hi++; |
| 104 | } |
| 105 | sa->seq++; |
| 106 | } |
| 107 | else |
| 108 | { |
Damjan Marion | d709cbc | 2019-03-26 13:16:42 +0100 | [diff] [blame] | 109 | if (PREDICT_FALSE (ipsec_sa_is_set_USE_ANTI_REPLAY (sa) && |
| 110 | sa->seq == ESP_SEQ_MAX)) |
Sergio Gonzalez Monroy | a10f62b | 2016-11-25 13:36:12 +0000 | [diff] [blame] | 111 | return 1; |
| 112 | sa->seq++; |
| 113 | } |
| 114 | |
| 115 | return 0; |
| 116 | } |
| 117 | |
Fan Zhang | f539578 | 2020-04-29 14:00:03 +0100 | [diff] [blame] | 118 | always_inline u16 |
Neale Ranns | 5b89110 | 2021-06-28 13:31:28 +0000 | [diff] [blame] | 119 | esp_aad_fill (u8 *data, const esp_header_t *esp, const ipsec_sa_t *sa, |
| 120 | u32 seq_hi) |
Neale Ranns | 47feb11 | 2019-04-11 15:14:07 +0000 | [diff] [blame] | 121 | { |
| 122 | esp_aead_t *aad; |
| 123 | |
Fan Zhang | f539578 | 2020-04-29 14:00:03 +0100 | [diff] [blame] | 124 | aad = (esp_aead_t *) data; |
Damjan Marion | 30b8b4a | 2019-05-29 18:49:25 +0200 | [diff] [blame] | 125 | aad->data[0] = esp->spi; |
Neale Ranns | 47feb11 | 2019-04-11 15:14:07 +0000 | [diff] [blame] | 126 | |
| 127 | if (ipsec_sa_is_set_USE_ESN (sa)) |
| 128 | { |
| 129 | /* SPI, seq-hi, seq-low */ |
Neale Ranns | 5b89110 | 2021-06-28 13:31:28 +0000 | [diff] [blame] | 130 | aad->data[1] = (u32) clib_host_to_net_u32 (seq_hi); |
Damjan Marion | 30b8b4a | 2019-05-29 18:49:25 +0200 | [diff] [blame] | 131 | aad->data[2] = esp->seq; |
Fan Zhang | f539578 | 2020-04-29 14:00:03 +0100 | [diff] [blame] | 132 | return 12; |
Neale Ranns | 47feb11 | 2019-04-11 15:14:07 +0000 | [diff] [blame] | 133 | } |
| 134 | else |
Damjan Marion | 30b8b4a | 2019-05-29 18:49:25 +0200 | [diff] [blame] | 135 | { |
| 136 | /* SPI, seq-low */ |
| 137 | aad->data[1] = esp->seq; |
Fan Zhang | f539578 | 2020-04-29 14:00:03 +0100 | [diff] [blame] | 138 | return 8; |
Damjan Marion | 30b8b4a | 2019-05-29 18:49:25 +0200 | [diff] [blame] | 139 | } |
Neale Ranns | 47feb11 | 2019-04-11 15:14:07 +0000 | [diff] [blame] | 140 | } |
Fan Zhang | f539578 | 2020-04-29 14:00:03 +0100 | [diff] [blame] | 141 | |
Arthur de Kerhor | ad95b06 | 2022-11-16 19:12:05 +0100 | [diff] [blame] | 142 | always_inline u32 |
| 143 | esp_encrypt_err_to_sa_err (u32 err) |
Fan Zhang | 18f0e31 | 2020-10-19 13:08:34 +0100 | [diff] [blame] | 144 | { |
Arthur de Kerhor | ad95b06 | 2022-11-16 19:12:05 +0100 | [diff] [blame] | 145 | switch (err) |
| 146 | { |
| 147 | case ESP_ENCRYPT_ERROR_HANDOFF: |
| 148 | return IPSEC_SA_ERROR_HANDOFF; |
| 149 | case ESP_ENCRYPT_ERROR_SEQ_CYCLED: |
| 150 | return IPSEC_SA_ERROR_SEQ_CYCLED; |
| 151 | case ESP_ENCRYPT_ERROR_CRYPTO_ENGINE_ERROR: |
| 152 | return IPSEC_SA_ERROR_CRYPTO_ENGINE_ERROR; |
| 153 | case ESP_ENCRYPT_ERROR_CRYPTO_QUEUE_FULL: |
| 154 | return IPSEC_SA_ERROR_CRYPTO_QUEUE_FULL; |
| 155 | case ESP_ENCRYPT_ERROR_NO_BUFFERS: |
| 156 | return IPSEC_SA_ERROR_NO_BUFFERS; |
| 157 | case ESP_ENCRYPT_ERROR_NO_ENCRYPTION: |
| 158 | return IPSEC_SA_ERROR_NO_ENCRYPTION; |
| 159 | } |
| 160 | return ~0; |
| 161 | } |
| 162 | |
| 163 | always_inline u32 |
| 164 | esp_decrypt_err_to_sa_err (u32 err) |
| 165 | { |
| 166 | switch (err) |
| 167 | { |
| 168 | case ESP_DECRYPT_ERROR_HANDOFF: |
| 169 | return IPSEC_SA_ERROR_HANDOFF; |
| 170 | case ESP_DECRYPT_ERROR_DECRYPTION_FAILED: |
| 171 | return IPSEC_SA_ERROR_DECRYPTION_FAILED; |
| 172 | case ESP_DECRYPT_ERROR_INTEG_ERROR: |
| 173 | return IPSEC_SA_ERROR_INTEG_ERROR; |
| 174 | case ESP_DECRYPT_ERROR_CRYPTO_ENGINE_ERROR: |
| 175 | return IPSEC_SA_ERROR_CRYPTO_ENGINE_ERROR; |
| 176 | case ESP_DECRYPT_ERROR_REPLAY: |
| 177 | return IPSEC_SA_ERROR_REPLAY; |
| 178 | case ESP_DECRYPT_ERROR_RUNT: |
| 179 | return IPSEC_SA_ERROR_RUNT; |
| 180 | case ESP_DECRYPT_ERROR_NO_BUFFERS: |
| 181 | return IPSEC_SA_ERROR_NO_BUFFERS; |
| 182 | case ESP_DECRYPT_ERROR_OVERSIZED_HEADER: |
| 183 | return IPSEC_SA_ERROR_OVERSIZED_HEADER; |
| 184 | case ESP_DECRYPT_ERROR_NO_TAIL_SPACE: |
| 185 | return IPSEC_SA_ERROR_NO_TAIL_SPACE; |
| 186 | case ESP_DECRYPT_ERROR_TUN_NO_PROTO: |
| 187 | return IPSEC_SA_ERROR_TUN_NO_PROTO; |
| 188 | case ESP_DECRYPT_ERROR_UNSUP_PAYLOAD: |
| 189 | return IPSEC_SA_ERROR_UNSUP_PAYLOAD; |
| 190 | } |
| 191 | return ~0; |
| 192 | } |
| 193 | |
| 194 | always_inline void |
| 195 | esp_encrypt_set_next_index (vlib_buffer_t *b, vlib_node_runtime_t *node, |
| 196 | u32 thread_index, u32 err, u16 index, u16 *nexts, |
| 197 | u16 drop_next, u32 sa_index) |
| 198 | { |
| 199 | ipsec_set_next_index (b, node, thread_index, err, |
| 200 | esp_encrypt_err_to_sa_err (err), index, nexts, |
| 201 | drop_next, sa_index); |
| 202 | } |
| 203 | |
| 204 | always_inline void |
| 205 | esp_decrypt_set_next_index (vlib_buffer_t *b, vlib_node_runtime_t *node, |
| 206 | u32 thread_index, u32 err, u16 index, u16 *nexts, |
| 207 | u16 drop_next, u32 sa_index) |
| 208 | { |
| 209 | ipsec_set_next_index (b, node, thread_index, err, |
| 210 | esp_decrypt_err_to_sa_err (err), index, nexts, |
| 211 | drop_next, sa_index); |
Fan Zhang | 18f0e31 | 2020-10-19 13:08:34 +0100 | [diff] [blame] | 212 | } |
| 213 | |
Fan Zhang | f539578 | 2020-04-29 14:00:03 +0100 | [diff] [blame] | 214 | /** |
| 215 | * The post data structure to for esp_encrypt/decrypt_inline to write to |
| 216 | * vib_buffer_t opaque unused field, and for post nodes to pick up after |
| 217 | * dequeue. |
| 218 | **/ |
| 219 | typedef struct |
| 220 | { |
| 221 | union |
| 222 | { |
| 223 | struct |
| 224 | { |
| 225 | u8 icv_sz; |
| 226 | u8 iv_sz; |
| 227 | ipsec_sa_flags_t flags; |
| 228 | u32 sa_index; |
| 229 | }; |
| 230 | u64 sa_data; |
| 231 | }; |
| 232 | |
| 233 | u32 seq; |
| 234 | i16 current_data; |
| 235 | i16 current_length; |
| 236 | u16 hdr_sz; |
| 237 | u16 is_chain; |
Neale Ranns | 5b89110 | 2021-06-28 13:31:28 +0000 | [diff] [blame] | 238 | u32 seq_hi; |
Fan Zhang | f539578 | 2020-04-29 14:00:03 +0100 | [diff] [blame] | 239 | } esp_decrypt_packet_data_t; |
| 240 | |
| 241 | STATIC_ASSERT_SIZEOF (esp_decrypt_packet_data_t, 3 * sizeof (u64)); |
Benoît Ganne | 490b927 | 2021-01-22 18:03:09 +0100 | [diff] [blame] | 242 | STATIC_ASSERT_OFFSET_OF (esp_decrypt_packet_data_t, seq, sizeof (u64)); |
Fan Zhang | f539578 | 2020-04-29 14:00:03 +0100 | [diff] [blame] | 243 | |
| 244 | /* we are forced to store the decrypt post data into 2 separate places - |
| 245 | vlib_opaque and opaque2. */ |
| 246 | typedef struct |
| 247 | { |
| 248 | vlib_buffer_t *lb; |
| 249 | u32 free_buffer_index; |
| 250 | u8 icv_removed; |
| 251 | } esp_decrypt_packet_data2_t; |
| 252 | |
| 253 | typedef union |
| 254 | { |
| 255 | u16 next_index; |
| 256 | esp_decrypt_packet_data_t decrypt_data; |
| 257 | } esp_post_data_t; |
| 258 | |
| 259 | STATIC_ASSERT (sizeof (esp_post_data_t) <= |
| 260 | STRUCT_SIZE_OF (vnet_buffer_opaque_t, unused), |
| 261 | "Custom meta-data too large for vnet_buffer_opaque_t"); |
| 262 | |
| 263 | #define esp_post_data(b) \ |
| 264 | ((esp_post_data_t *)((u8 *)((b)->opaque) \ |
| 265 | + STRUCT_OFFSET_OF (vnet_buffer_opaque_t, unused))) |
| 266 | |
| 267 | STATIC_ASSERT (sizeof (esp_decrypt_packet_data2_t) <= |
| 268 | STRUCT_SIZE_OF (vnet_buffer_opaque2_t, unused), |
| 269 | "Custom meta-data too large for vnet_buffer_opaque2_t"); |
| 270 | |
| 271 | #define esp_post_data2(b) \ |
| 272 | ((esp_decrypt_packet_data2_t *)((u8 *)((b)->opaque2) \ |
| 273 | + STRUCT_OFFSET_OF (vnet_buffer_opaque2_t, unused))) |
| 274 | |
| 275 | typedef struct |
| 276 | { |
| 277 | /* esp post node index for async crypto */ |
| 278 | u32 esp4_post_next; |
| 279 | u32 esp6_post_next; |
| 280 | u32 esp4_tun_post_next; |
| 281 | u32 esp6_tun_post_next; |
Neale Ranns | 4a58e49 | 2020-12-21 13:19:10 +0000 | [diff] [blame] | 282 | u32 esp_mpls_tun_post_next; |
Fan Zhang | f539578 | 2020-04-29 14:00:03 +0100 | [diff] [blame] | 283 | } esp_async_post_next_t; |
| 284 | |
| 285 | extern esp_async_post_next_t esp_encrypt_async_next; |
| 286 | extern esp_async_post_next_t esp_decrypt_async_next; |
| 287 | |
Xiaoming Jiang | 0c1454c | 2023-05-05 02:28:20 +0000 | [diff] [blame] | 288 | /* when submitting a frame is failed, drop all buffers in the frame */ |
| 289 | always_inline u32 |
| 290 | esp_async_recycle_failed_submit (vlib_main_t *vm, vnet_crypto_async_frame_t *f, |
| 291 | vlib_node_runtime_t *node, u32 err, |
| 292 | u32 ipsec_sa_err, u16 index, u32 *from, |
| 293 | u16 *nexts, u16 drop_next_index, |
| 294 | bool is_encrypt) |
| 295 | { |
| 296 | vlib_buffer_t *b; |
| 297 | u32 n_drop = f->n_elts; |
| 298 | u32 *bi = f->buffer_indices; |
| 299 | |
| 300 | while (n_drop--) |
| 301 | { |
| 302 | u32 sa_index; |
| 303 | |
| 304 | from[index] = bi[0]; |
| 305 | b = vlib_get_buffer (vm, bi[0]); |
| 306 | |
| 307 | if (is_encrypt) |
| 308 | { |
| 309 | sa_index = vnet_buffer (b)->ipsec.sad_index; |
| 310 | } |
| 311 | else |
| 312 | { |
| 313 | sa_index = esp_post_data (b)->decrypt_data.sa_index; |
| 314 | } |
| 315 | |
| 316 | ipsec_set_next_index (b, node, vm->thread_index, err, ipsec_sa_err, |
| 317 | index, nexts, drop_next_index, sa_index); |
| 318 | bi++; |
| 319 | index++; |
| 320 | } |
| 321 | |
| 322 | return (f->n_elts); |
| 323 | } |
| 324 | |
Sergio Gonzalez Monroy | a10f62b | 2016-11-25 13:36:12 +0000 | [diff] [blame] | 325 | #endif /* __ESP_H__ */ |
Keith Burns (alagalah) | 166a9d4 | 2016-08-06 11:00:56 -0700 | [diff] [blame] | 326 | |
| 327 | /* |
| 328 | * fd.io coding-style-patch-verification: ON |
| 329 | * |
| 330 | * Local Variables: |
| 331 | * eval: (c-set-style "gnu") |
| 332 | * End: |
| 333 | */ |