blob: 9979446b71ae624fee2f5a0ba7e408ced2d6abff [file] [log] [blame]
Ed Warnickecb9cada2015-12-08 15:45:58 -07001/*
2 * ipsec_if_in.c : IPSec interface input node
3 *
4 * Copyright (c) 2015 Cisco and/or its affiliates.
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at:
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18#include <vnet/vnet.h>
19#include <vnet/api_errno.h>
20#include <vnet/ip/ip.h>
21
22#include <vnet/ipsec/ipsec.h>
23#include <vnet/ipsec/esp.h>
Neale Ranns918c1612019-02-21 23:34:59 -080024#include <vnet/ipsec/ipsec_io.h>
Ed Warnickecb9cada2015-12-08 15:45:58 -070025
26/* Statistics (not really errors) */
Matthew Smith831fd642018-05-15 22:03:05 -050027#define foreach_ipsec_if_input_error \
28_(RX, "good packets received") \
Neale Ranns8d7c5022019-02-06 01:41:05 -080029_(DISABLED, "ipsec packets received on disabled interface") \
30_(NO_TUNNEL, "no matching tunnel")
Ed Warnickecb9cada2015-12-08 15:45:58 -070031
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -070032static char *ipsec_if_input_error_strings[] = {
Ed Warnickecb9cada2015-12-08 15:45:58 -070033#define _(sym,string) string,
34 foreach_ipsec_if_input_error
35#undef _
36};
37
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -070038typedef enum
39{
Ed Warnickecb9cada2015-12-08 15:45:58 -070040#define _(sym,str) IPSEC_IF_INPUT_ERROR_##sym,
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -070041 foreach_ipsec_if_input_error
Ed Warnickecb9cada2015-12-08 15:45:58 -070042#undef _
43 IPSEC_IF_INPUT_N_ERROR,
44} ipsec_if_input_error_t;
45
Ed Warnickecb9cada2015-12-08 15:45:58 -070046
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -070047typedef struct
48{
Ed Warnickecb9cada2015-12-08 15:45:58 -070049 u32 spi;
50 u32 seq;
51} ipsec_if_input_trace_t;
52
Kingwel Xiec69ac312019-02-04 01:49:29 -080053static u8 *
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -070054format_ipsec_if_input_trace (u8 * s, va_list * args)
Ed Warnickecb9cada2015-12-08 15:45:58 -070055{
56 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
57 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -070058 ipsec_if_input_trace_t *t = va_arg (*args, ipsec_if_input_trace_t *);
Ed Warnickecb9cada2015-12-08 15:45:58 -070059
60 s = format (s, "IPSec: spi %u seq %u", t->spi, t->seq);
61 return s;
62}
63
Klement Sekerab8f35442018-10-29 13:38:19 +010064VLIB_NODE_FN (ipsec_if_input_node) (vlib_main_t * vm,
65 vlib_node_runtime_t * node,
66 vlib_frame_t * from_frame)
Ed Warnickecb9cada2015-12-08 15:45:58 -070067{
68 ipsec_main_t *im = &ipsec_main;
Matthew Smith01034be2017-05-16 11:51:18 -050069 vnet_main_t *vnm = im->vnet_main;
70 vnet_interface_main_t *vim = &vnm->interface_main;
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -070071 u32 *from, *to_next = 0, next_index;
Matthew Smith01034be2017-05-16 11:51:18 -050072 u32 n_left_from, last_sw_if_index = ~0;
Damjan Marion067cd622018-07-11 12:47:43 +020073 u32 thread_index = vm->thread_index;
Matthew Smith01034be2017-05-16 11:51:18 -050074 u64 n_bytes = 0, n_packets = 0;
Neale Rannsfe480f62019-02-28 12:03:58 +000075 const ipsec_tunnel_if_t *last_t = NULL;
Matthew Smith831fd642018-05-15 22:03:05 -050076 vlib_combined_counter_main_t *rx_counter;
77 vlib_combined_counter_main_t *drop_counter;
Neale Ranns8d7c5022019-02-06 01:41:05 -080078 u32 n_disabled = 0, n_no_tunnel = 0;
Matthew Smith831fd642018-05-15 22:03:05 -050079
80 rx_counter = vim->combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX;
81 drop_counter = vim->combined_sw_if_counters + VNET_INTERFACE_COUNTER_DROP;
Ed Warnickecb9cada2015-12-08 15:45:58 -070082
83 from = vlib_frame_vector_args (from_frame);
84 n_left_from = from_frame->n_vectors;
85 next_index = node->cached_next_index;
86
87 while (n_left_from > 0)
88 {
89 u32 n_left_to_next;
90
91 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
92
Neale Ranns77eb28f2019-03-04 14:13:14 +000093 while (n_left_from >= 4 && n_left_to_next >= 2)
94 {
95 u32 bi0, bi1, next0, next1, sw_if_index0, sw_if_index1;
96 const esp_header_t *esp0, *esp1;
97 const ip4_header_t *ip0, *ip1;
98 vlib_buffer_t *b0, *b1;
99 uword *p0, *p1;
100 u32 len0, len1;
101 u64 key0, key1;
102
103 /* Prefetch next iteration. */
104 {
105 vlib_buffer_t *p2, *p3;
106
107 p2 = vlib_get_buffer (vm, from[2]);
108 p3 = vlib_get_buffer (vm, from[3]);
109
110 vlib_prefetch_buffer_header (p2, STORE);
111 vlib_prefetch_buffer_header (p3, STORE);
112
113 CLIB_PREFETCH (p2->data, sizeof (ip0[0]), STORE);
114 CLIB_PREFETCH (p3->data, sizeof (ip0[0]), STORE);
115 }
116
117 bi0 = to_next[0] = from[0];
118 bi1 = to_next[1] = from[1];
119
120 from += 2;
121 n_left_from -= 2;
122 to_next += 2;
123 n_left_to_next -= 2;
124 next0 = next1 = IPSEC_INPUT_NEXT_DROP;
125
126 b0 = vlib_get_buffer (vm, bi0);
127 b1 = vlib_get_buffer (vm, bi1);
128 ip0 = vlib_buffer_get_current (b0);
129 ip1 = vlib_buffer_get_current (b1);
130 esp0 = (const esp_header_t *) ((u8 *) ip0 + ip4_header_bytes (ip0));
131 esp1 = (const esp_header_t *) ((u8 *) ip1 + ip4_header_bytes (ip1));
132
133 key0 = (u64) ip0->src_address.as_u32 << 32 | (u64) esp0->spi;
134 key1 = (u64) ip1->src_address.as_u32 << 32 | (u64) esp1->spi;
135
136 p0 = hash_get (im->ipsec_if_pool_index_by_key, key0);
137 p1 = hash_get (im->ipsec_if_pool_index_by_key, key1);
138
139 /* stats for the tunnel include all the data after the IP header
140 just like a norml IP-IP tunnel */
141 vlib_buffer_advance (b0, ip4_header_bytes (ip0));
142 vlib_buffer_advance (b1, ip4_header_bytes (ip1));
143 len0 = vlib_buffer_length_in_chain (vm, b0);
144 len1 = vlib_buffer_length_in_chain (vm, b1);
145
146 if (PREDICT_TRUE (NULL != p0))
147 {
148 const ipsec_tunnel_if_t *t0;
149
150 t0 = pool_elt_at_index (im->tunnel_interfaces, p0[0]);
151 vnet_buffer (b0)->ipsec.sad_index = t0->input_sa_index;
152
153 if (PREDICT_TRUE (t0->hw_if_index != ~0))
154 {
155 vnet_buffer (b0)->ipsec.flags = 0;
156 sw_if_index0 = t0->sw_if_index;
157 vnet_buffer (b0)->sw_if_index[VLIB_RX] = sw_if_index0;
158
159 if (PREDICT_FALSE
160 (!(t0->flags & VNET_HW_INTERFACE_FLAG_LINK_UP)))
161 {
162 vlib_increment_combined_counter
163 (drop_counter, thread_index, sw_if_index0, 1, len0);
164 b0->error = node->errors[IPSEC_IF_INPUT_ERROR_DISABLED];
165 n_disabled++;
166 goto pkt1;
167 }
168
169 if (PREDICT_TRUE (sw_if_index0 == last_sw_if_index))
170 {
171 n_packets++;
172 n_bytes += len0;
173 }
174 else
175 {
176 if (last_t)
177 {
178 vlib_increment_combined_counter
179 (rx_counter, thread_index, sw_if_index0,
180 n_packets, n_bytes);
181 }
182
183 last_sw_if_index = sw_if_index0;
184 last_t = t0;
185 n_packets = 1;
186 n_bytes = len0;
187 }
188 }
189 else
190 {
191 vnet_buffer (b0)->ipsec.flags = IPSEC_FLAG_IPSEC_GRE_TUNNEL;
192 }
193
194 next0 = im->esp4_decrypt_next_index;
195 }
196 else
197 {
198 b0->error = node->errors[IPSEC_IF_INPUT_ERROR_NO_TUNNEL];
199 n_no_tunnel++;
200 }
201
202 pkt1:
203 if (PREDICT_TRUE (NULL != p1))
204 {
205 const ipsec_tunnel_if_t *t1;
206
207 t1 = pool_elt_at_index (im->tunnel_interfaces, p1[0]);
208 vnet_buffer (b1)->ipsec.sad_index = t1->input_sa_index;
209
210 if (PREDICT_TRUE (t1->hw_if_index != ~0))
211 {
212 vnet_buffer (b1)->ipsec.flags = 0;
213 sw_if_index1 = t1->sw_if_index;
214 vnet_buffer (b1)->sw_if_index[VLIB_RX] = sw_if_index1;
215
216 if (PREDICT_FALSE
217 (!(t1->flags & VNET_HW_INTERFACE_FLAG_LINK_UP)))
218 {
219 vlib_increment_combined_counter
220 (drop_counter, thread_index, sw_if_index1, 1, len1);
221 b1->error = node->errors[IPSEC_IF_INPUT_ERROR_DISABLED];
222 n_disabled++;
223 goto trace1;
224 }
225
226 if (PREDICT_TRUE (sw_if_index1 == last_sw_if_index))
227 {
228 n_packets++;
229 n_bytes += len1;
230 }
231 else
232 {
233 if (last_t)
234 {
235 vlib_increment_combined_counter
236 (rx_counter, thread_index, sw_if_index1,
237 n_packets, n_bytes);
238 }
239
240 last_sw_if_index = sw_if_index1;
241 last_t = t1;
242 n_packets = 1;
243 n_bytes = len1;
244 }
245 }
246 else
247 {
248 vnet_buffer (b1)->ipsec.flags = IPSEC_FLAG_IPSEC_GRE_TUNNEL;
249 }
250
251 next1 = im->esp4_decrypt_next_index;
252 }
253 else
254 {
255 b1->error = node->errors[IPSEC_IF_INPUT_ERROR_NO_TUNNEL];
256 n_no_tunnel++;
257 }
258
259 trace1:
260 if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
261 {
262 ipsec_if_input_trace_t *tr =
263 vlib_add_trace (vm, node, b0, sizeof (*tr));
264 tr->spi = clib_host_to_net_u32 (esp0->spi);
265 tr->seq = clib_host_to_net_u32 (esp0->seq);
266 }
267 if (PREDICT_FALSE (b1->flags & VLIB_BUFFER_IS_TRACED))
268 {
269 ipsec_if_input_trace_t *tr =
270 vlib_add_trace (vm, node, b1, sizeof (*tr));
271 tr->spi = clib_host_to_net_u32 (esp1->spi);
272 tr->seq = clib_host_to_net_u32 (esp1->seq);
273 }
274
275 vlib_validate_buffer_enqueue_x2 (vm, node, next_index, to_next,
276 n_left_to_next,
277 bi0, bi1, next0, next1);
278 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700279 while (n_left_from > 0 && n_left_to_next > 0)
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700280 {
Matthew Smith01034be2017-05-16 11:51:18 -0500281 u32 bi0, next0, sw_if_index0;
Neale Rannsfe480f62019-02-28 12:03:58 +0000282 const esp_header_t *esp0;
283 const ip4_header_t *ip0;
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700284 vlib_buffer_t *b0;
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700285 uword *p;
Matthew Smith831fd642018-05-15 22:03:05 -0500286 u32 len0;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700287
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700288 bi0 = to_next[0] = from[0];
289 from += 1;
290 n_left_from -= 1;
291 to_next += 1;
292 n_left_to_next -= 1;
293 b0 = vlib_get_buffer (vm, bi0);
294 ip0 = vlib_buffer_get_current (b0);
Neale Rannsfe480f62019-02-28 12:03:58 +0000295 esp0 = (const esp_header_t *) ((u8 *) ip0 + ip4_header_bytes (ip0));
Ed Warnickecb9cada2015-12-08 15:45:58 -0700296
Sergio Gonzalez Monroyd04b60b2017-01-20 15:35:23 +0000297 next0 = IPSEC_INPUT_NEXT_DROP;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700298
Neale Ranns7ba8fe22019-02-11 11:24:07 +0000299 u64 key = (u64) ip0->src_address.as_u32 << 32 | (u64) esp0->spi;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700300
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700301 p = hash_get (im->ipsec_if_pool_index_by_key, key);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700302
Neale Rannsfe480f62019-02-28 12:03:58 +0000303 /* stats for the tunnel include all the data after the IP header
304 just like a norml IP-IP tunnel */
305 vlib_buffer_advance (b0, ip4_header_bytes (ip0));
Matthew Smith831fd642018-05-15 22:03:05 -0500306 len0 = vlib_buffer_length_in_chain (vm, b0);
307
Neale Rannsfe480f62019-02-28 12:03:58 +0000308 if (PREDICT_TRUE (NULL != p))
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700309 {
Neale Rannsfe480f62019-02-28 12:03:58 +0000310 const ipsec_tunnel_if_t *t0;
Matthew Smith01034be2017-05-16 11:51:18 -0500311
Neale Rannsfe480f62019-02-28 12:03:58 +0000312 t0 = pool_elt_at_index (im->tunnel_interfaces, p[0]);
313 vnet_buffer (b0)->ipsec.sad_index = t0->input_sa_index;
314
315 if (PREDICT_TRUE (t0->hw_if_index != ~0))
316 {
Matthew Smith01034be2017-05-16 11:51:18 -0500317 vnet_buffer (b0)->ipsec.flags = 0;
Neale Rannsfe480f62019-02-28 12:03:58 +0000318 sw_if_index0 = t0->sw_if_index;
Matthew Smith02e14b52017-09-14 09:05:35 -0500319 vnet_buffer (b0)->sw_if_index[VLIB_RX] = sw_if_index0;
Matthew Smith01034be2017-05-16 11:51:18 -0500320
Matthew Smith831fd642018-05-15 22:03:05 -0500321 if (PREDICT_FALSE
Neale Rannsfe480f62019-02-28 12:03:58 +0000322 (!(t0->flags & VNET_HW_INTERFACE_FLAG_LINK_UP)))
Matthew Smith831fd642018-05-15 22:03:05 -0500323 {
324 vlib_increment_combined_counter
325 (drop_counter, thread_index, sw_if_index0, 1, len0);
326 b0->error = node->errors[IPSEC_IF_INPUT_ERROR_DISABLED];
327 n_disabled++;
328 goto trace;
329 }
330
Matthew Smith01034be2017-05-16 11:51:18 -0500331 if (PREDICT_TRUE (sw_if_index0 == last_sw_if_index))
332 {
333 n_packets++;
Matthew Smith831fd642018-05-15 22:03:05 -0500334 n_bytes += len0;
Matthew Smith01034be2017-05-16 11:51:18 -0500335 }
336 else
337 {
Matthew Smith01034be2017-05-16 11:51:18 -0500338 if (last_t)
339 {
340 vlib_increment_combined_counter
Matthew Smith831fd642018-05-15 22:03:05 -0500341 (rx_counter, thread_index, sw_if_index0,
342 n_packets, n_bytes);
Matthew Smith01034be2017-05-16 11:51:18 -0500343 }
344
345 last_sw_if_index = sw_if_index0;
Neale Rannsfe480f62019-02-28 12:03:58 +0000346 last_t = t0;
Matthew Smith01034be2017-05-16 11:51:18 -0500347 n_packets = 1;
Matthew Smith831fd642018-05-15 22:03:05 -0500348 n_bytes = len0;
Matthew Smith01034be2017-05-16 11:51:18 -0500349 }
350 }
351 else
352 {
353 vnet_buffer (b0)->ipsec.flags = IPSEC_FLAG_IPSEC_GRE_TUNNEL;
354 }
355
Klement Sekerabe5a5dd2018-10-09 16:05:48 +0200356 next0 = im->esp4_decrypt_next_index;
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700357 }
Neale Ranns8d7c5022019-02-06 01:41:05 -0800358 else
359 {
360 b0->error = node->errors[IPSEC_IF_INPUT_ERROR_NO_TUNNEL];
361 n_no_tunnel++;
362 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700363
Matthew Smith831fd642018-05-15 22:03:05 -0500364 trace:
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700365 if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
366 {
367 ipsec_if_input_trace_t *tr =
368 vlib_add_trace (vm, node, b0, sizeof (*tr));
369 tr->spi = clib_host_to_net_u32 (esp0->spi);
370 tr->seq = clib_host_to_net_u32 (esp0->seq);
371 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700372
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700373 vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
374 n_left_to_next, bi0, next0);
375 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700376 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
377 }
378
Matthew Smith01034be2017-05-16 11:51:18 -0500379 if (last_t)
380 {
Matthew Smith831fd642018-05-15 22:03:05 -0500381 vlib_increment_combined_counter (rx_counter,
Matthew Smith01034be2017-05-16 11:51:18 -0500382 thread_index,
383 last_sw_if_index, n_packets, n_bytes);
384 }
385
Ed Warnickecb9cada2015-12-08 15:45:58 -0700386 vlib_node_increment_counter (vm, ipsec_if_input_node.index,
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700387 IPSEC_IF_INPUT_ERROR_RX,
Matthew Smith831fd642018-05-15 22:03:05 -0500388 from_frame->n_vectors - n_disabled);
389
390 vlib_node_increment_counter (vm, ipsec_if_input_node.index,
391 IPSEC_IF_INPUT_ERROR_DISABLED, n_disabled);
Neale Ranns8d7c5022019-02-06 01:41:05 -0800392 vlib_node_increment_counter (vm, ipsec_if_input_node.index,
393 IPSEC_IF_INPUT_ERROR_DISABLED, n_no_tunnel);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700394
395 return from_frame->n_vectors;
396}
397
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700398/* *INDENT-OFF* */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700399VLIB_REGISTER_NODE (ipsec_if_input_node) = {
Ed Warnickecb9cada2015-12-08 15:45:58 -0700400 .name = "ipsec-if-input",
401 .vector_size = sizeof (u32),
402 .format_trace = format_ipsec_if_input_trace,
403 .type = VLIB_NODE_TYPE_INTERNAL,
404
405 .n_errors = ARRAY_LEN(ipsec_if_input_error_strings),
406 .error_strings = ipsec_if_input_error_strings,
407
Pierre Pfister057b3562018-12-10 17:01:01 +0100408 .sibling_of = "ipsec4-input-feature",
Damjan Marion1c80e832016-05-11 23:07:18 +0200409};
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700410/* *INDENT-ON* */
Damjan Marion1c80e832016-05-11 23:07:18 +0200411
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700412/*
413 * fd.io coding-style-patch-verification: ON
414 *
415 * Local Variables:
416 * eval: (c-set-style "gnu")
417 * End:
418 */