blob: 09166bccf5b25955e04054b8e637149680074376 [file] [log] [blame]
Ed Warnickecb9cada2015-12-08 15:45:58 -07001/*
2 * decap.c : IPSec tunnel decapsulation
3 *
4 * Copyright (c) 2015 Cisco and/or its affiliates.
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at:
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18#include <vnet/vnet.h>
19#include <vnet/api_errno.h>
20#include <vnet/ip/ip.h>
Damjan Marion8b3191e2016-11-09 19:54:20 +010021#include <vnet/feature/feature.h>
Ed Warnickecb9cada2015-12-08 15:45:58 -070022
23#include <vnet/ipsec/ipsec.h>
24#include <vnet/ipsec/esp.h>
“mukeshyadav1984”430ac932017-11-23 02:39:33 -080025#include <vnet/ipsec/ah.h>
Neale Ranns918c1612019-02-21 23:34:59 -080026#include <vnet/ipsec/ipsec_io.h>
Ed Warnickecb9cada2015-12-08 15:45:58 -070027
ShivaShankarK05464832020-04-14 14:01:03 +053028#define foreach_ipsec_input_error \
29_(RX_PKTS, "IPSec pkts received") \
30_(RX_POLICY_MATCH, "IPSec policy match") \
31_(RX_POLICY_NO_MATCH, "IPSec policy not matched") \
32_(RX_POLICY_BYPASS, "IPSec policy bypass") \
33_(RX_POLICY_DISCARD, "IPSec policy discard")
Ed Warnickecb9cada2015-12-08 15:45:58 -070034
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -070035typedef enum
36{
Ed Warnickecb9cada2015-12-08 15:45:58 -070037#define _(sym,str) IPSEC_INPUT_ERROR_##sym,
38 foreach_ipsec_input_error
39#undef _
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -070040 IPSEC_INPUT_N_ERROR,
Ed Warnickecb9cada2015-12-08 15:45:58 -070041} ipsec_input_error_t;
42
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -070043static char *ipsec_input_error_strings[] = {
Ed Warnickecb9cada2015-12-08 15:45:58 -070044#define _(sym,string) string,
45 foreach_ipsec_input_error
46#undef _
47};
48
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -070049typedef struct
50{
Neale Rannsa09c1ff2019-02-04 01:10:30 -080051 ip_protocol_t proto;
Pierre Pfister62219272018-11-26 09:29:00 +010052 u32 spd;
Neale Rannsa09c1ff2019-02-04 01:10:30 -080053 u32 policy_index;
Matus Fabian5539a072016-09-07 05:57:09 -070054 u32 sa_id;
Ed Warnickecb9cada2015-12-08 15:45:58 -070055 u32 spi;
56 u32 seq;
57} ipsec_input_trace_t;
58
59/* packet trace format function */
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -070060static u8 *
61format_ipsec_input_trace (u8 * s, va_list * args)
Ed Warnickecb9cada2015-12-08 15:45:58 -070062{
63 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
64 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -070065 ipsec_input_trace_t *t = va_arg (*args, ipsec_input_trace_t *);
Ed Warnickecb9cada2015-12-08 15:45:58 -070066
Guillaume Solignac8f818cc2019-05-15 12:02:33 +020067 s = format (s, "%U: sa_id %u spd %u policy %d spi %u (0x%08x) seq %u",
Neale Rannsa09c1ff2019-02-04 01:10:30 -080068 format_ip_protocol, t->proto, t->sa_id,
Guillaume Solignac8f818cc2019-05-15 12:02:33 +020069 t->spd, t->policy_index, t->spi, t->spi, t->seq);
Matus Fabian5539a072016-09-07 05:57:09 -070070
Ed Warnickecb9cada2015-12-08 15:45:58 -070071 return s;
72}
73
Zachary Leaf7cd35f52021-06-25 08:11:15 -050074always_inline void
75ipsec4_input_spd_add_flow_cache_entry (ipsec_main_t *im, u32 sa, u32 da,
76 ipsec_spd_policy_type_t policy_type,
77 u32 pol_id)
78{
79 u64 hash;
80 u8 is_overwrite = 0, is_stale_overwrite = 0;
81 /* Store in network byte order to avoid conversion on lookup */
82 ipsec4_inbound_spd_tuple_t ip4_tuple = {
83 .ip4_src_addr = (ip4_address_t) clib_host_to_net_u32 (sa),
84 .ip4_dest_addr = (ip4_address_t) clib_host_to_net_u32 (da),
85 .policy_type = policy_type
86 };
87
88 ip4_tuple.kv_16_8.value =
89 (((u64) pol_id) << 32) | ((u64) im->input_epoch_count);
90
91 hash = ipsec4_hash_16_8 (&ip4_tuple.kv_16_8);
92 hash &= (im->ipsec4_in_spd_hash_num_buckets - 1);
93
94 ipsec_spinlock_lock (&im->ipsec4_in_spd_hash_tbl[hash].bucket_lock);
95 /* Check if we are overwriting an existing entry so we know
96 whether to increment the flow cache counter. Since flow
97 cache counter is reset on any policy add/remove, but
98 hash table values are not, we need to check if the entry
99 we are overwriting is stale or not. If it's a stale entry
100 overwrite, we still want to increment flow cache counter */
101 is_overwrite = (im->ipsec4_in_spd_hash_tbl[hash].value != 0);
102 /* Check if we are overwriting a stale entry by comparing
103 with current epoch count */
104 if (PREDICT_FALSE (is_overwrite))
105 is_stale_overwrite =
106 (im->input_epoch_count !=
107 ((u32) (im->ipsec4_in_spd_hash_tbl[hash].value & 0xFFFFFFFF)));
108 clib_memcpy_fast (&im->ipsec4_in_spd_hash_tbl[hash], &ip4_tuple.kv_16_8,
109 sizeof (ip4_tuple.kv_16_8));
110 ipsec_spinlock_unlock (&im->ipsec4_in_spd_hash_tbl[hash].bucket_lock);
111
112 /* Increment the counter to track active flow cache entries
113 when entering a fresh entry or overwriting a stale one */
114 if (!is_overwrite || is_stale_overwrite)
115 clib_atomic_fetch_add_relax (&im->ipsec4_in_spd_flow_cache_entries, 1);
116
117 return;
118}
119
Ed Warnickecb9cada2015-12-08 15:45:58 -0700120always_inline ipsec_policy_t *
Zachary Leaf7cd35f52021-06-25 08:11:15 -0500121ipsec4_input_spd_find_flow_cache_entry (ipsec_main_t *im, u32 sa, u32 da,
122 ipsec_spd_policy_type_t policy_type)
123{
124 ipsec_policy_t *p = NULL;
125 ipsec4_hash_kv_16_8_t kv_result;
126 u64 hash;
127 ipsec4_inbound_spd_tuple_t ip4_tuple = { .ip4_src_addr = (ip4_address_t) sa,
128 .ip4_dest_addr = (ip4_address_t) da,
129 .policy_type = policy_type };
130
131 hash = ipsec4_hash_16_8 (&ip4_tuple.kv_16_8);
132 hash &= (im->ipsec4_in_spd_hash_num_buckets - 1);
133
134 ipsec_spinlock_lock (&im->ipsec4_in_spd_hash_tbl[hash].bucket_lock);
135 kv_result = im->ipsec4_in_spd_hash_tbl[hash];
136 ipsec_spinlock_unlock (&im->ipsec4_in_spd_hash_tbl[hash].bucket_lock);
137
138 if (ipsec4_hash_key_compare_16_8 ((u64 *) &ip4_tuple.kv_16_8,
139 (u64 *) &kv_result))
140 {
141 if (im->input_epoch_count == ((u32) (kv_result.value & 0xFFFFFFFF)))
142 {
143 /* Get the policy based on the index */
144 p =
145 pool_elt_at_index (im->policies, ((u32) (kv_result.value >> 32)));
146 }
147 }
148
149 return p;
150}
151
152always_inline ipsec_policy_t *
153ipsec_input_policy_match (ipsec_spd_t *spd, u32 sa, u32 da,
ShivaShankarK05464832020-04-14 14:01:03 +0530154 ipsec_spd_policy_type_t policy_type)
155{
156 ipsec_main_t *im = &ipsec_main;
157 ipsec_policy_t *p;
158 u32 *i;
159
160 vec_foreach (i, spd->policies[policy_type])
161 {
162 p = pool_elt_at_index (im->policies, *i);
163
164 if (da < clib_net_to_host_u32 (p->laddr.start.ip4.as_u32))
165 continue;
166
167 if (da > clib_net_to_host_u32 (p->laddr.stop.ip4.as_u32))
168 continue;
169
170 if (sa < clib_net_to_host_u32 (p->raddr.start.ip4.as_u32))
171 continue;
172
173 if (sa > clib_net_to_host_u32 (p->raddr.stop.ip4.as_u32))
174 continue;
175
Zachary Leaf7cd35f52021-06-25 08:11:15 -0500176 if (im->input_flow_cache_flag)
177 {
178 /* Add an Entry in Flow cache */
179 ipsec4_input_spd_add_flow_cache_entry (im, sa, da, policy_type, *i);
180 }
ShivaShankarK05464832020-04-14 14:01:03 +0530181 return p;
182 }
183 return 0;
184}
185
186always_inline ipsec_policy_t *
Zachary Leaf7cd35f52021-06-25 08:11:15 -0500187ipsec_input_protect_policy_match (ipsec_spd_t *spd, u32 sa, u32 da, u32 spi)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700188{
189 ipsec_main_t *im = &ipsec_main;
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700190 ipsec_policy_t *p;
191 ipsec_sa_t *s;
192 u32 *i;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700193
Neale Rannsa09c1ff2019-02-04 01:10:30 -0800194 vec_foreach (i, spd->policies[IPSEC_SPD_POLICY_IP4_INBOUND_PROTECT])
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700195 {
Neale Rannsa09c1ff2019-02-04 01:10:30 -0800196 p = pool_elt_at_index (im->policies, *i);
Neale Rannsc5fe57d2021-02-25 16:01:28 +0000197 s = ipsec_sa_get (p->sa_index);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700198
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700199 if (spi != s->spi)
200 continue;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700201
Damjan Mariond709cbc2019-03-26 13:16:42 +0100202 if (ipsec_sa_is_set_IS_TUNNEL (s))
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700203 {
Neale Ranns9ec846c2021-02-09 14:04:02 +0000204 if (da != clib_net_to_host_u32 (s->tunnel.t_dst.ip.ip4.as_u32))
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700205 continue;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700206
Neale Ranns9ec846c2021-02-09 14:04:02 +0000207 if (sa != clib_net_to_host_u32 (s->tunnel.t_src.ip.ip4.as_u32))
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700208 continue;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700209
Zachary Leaf7cd35f52021-06-25 08:11:15 -0500210 goto return_policy;
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700211 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700212
Neale Rannsd2029bc2019-07-11 09:31:19 +0000213 if (da < clib_net_to_host_u32 (p->laddr.start.ip4.as_u32))
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700214 continue;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700215
Neale Rannsd2029bc2019-07-11 09:31:19 +0000216 if (da > clib_net_to_host_u32 (p->laddr.stop.ip4.as_u32))
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700217 continue;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700218
Neale Rannsd2029bc2019-07-11 09:31:19 +0000219 if (sa < clib_net_to_host_u32 (p->raddr.start.ip4.as_u32))
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700220 continue;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700221
Neale Rannsd2029bc2019-07-11 09:31:19 +0000222 if (sa > clib_net_to_host_u32 (p->raddr.stop.ip4.as_u32))
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700223 continue;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700224
Zachary Leaf7cd35f52021-06-25 08:11:15 -0500225 return_policy:
226 if (im->input_flow_cache_flag)
227 {
228 /* Add an Entry in Flow cache */
229 ipsec4_input_spd_add_flow_cache_entry (
230 im, sa, da, IPSEC_SPD_POLICY_IP4_INBOUND_PROTECT, *i);
231 }
232
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700233 return p;
234 }
235 return 0;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700236}
237
238always_inline uword
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700239ip6_addr_match_range (ip6_address_t * a, ip6_address_t * la,
240 ip6_address_t * ua)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700241{
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700242 if ((memcmp (a->as_u64, la->as_u64, 2 * sizeof (u64)) >= 0) &&
243 (memcmp (a->as_u64, ua->as_u64, 2 * sizeof (u64)) <= 0))
Ed Warnickecb9cada2015-12-08 15:45:58 -0700244 return 1;
245 return 0;
246}
247
248always_inline ipsec_policy_t *
Klement Sekerabe5a5dd2018-10-09 16:05:48 +0200249ipsec6_input_protect_policy_match (ipsec_spd_t * spd,
250 ip6_address_t * sa,
251 ip6_address_t * da, u32 spi)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700252{
253 ipsec_main_t *im = &ipsec_main;
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700254 ipsec_policy_t *p;
255 ipsec_sa_t *s;
256 u32 *i;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700257
Neale Rannsa09c1ff2019-02-04 01:10:30 -0800258 vec_foreach (i, spd->policies[IPSEC_SPD_POLICY_IP6_INBOUND_PROTECT])
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700259 {
Neale Rannsa09c1ff2019-02-04 01:10:30 -0800260 p = pool_elt_at_index (im->policies, *i);
Neale Rannsc5fe57d2021-02-25 16:01:28 +0000261 s = ipsec_sa_get (p->sa_index);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700262
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700263 if (spi != s->spi)
264 continue;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700265
Damjan Mariond709cbc2019-03-26 13:16:42 +0100266 if (ipsec_sa_is_set_IS_TUNNEL (s))
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700267 {
Neale Ranns9ec846c2021-02-09 14:04:02 +0000268 if (!ip6_address_is_equal (sa, &s->tunnel.t_src.ip.ip6))
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700269 continue;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700270
Neale Ranns9ec846c2021-02-09 14:04:02 +0000271 if (!ip6_address_is_equal (da, &s->tunnel.t_dst.ip.ip6))
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700272 continue;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700273
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700274 return p;
275 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700276
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700277 if (!ip6_addr_match_range (sa, &p->raddr.start.ip6, &p->raddr.stop.ip6))
278 continue;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700279
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700280 if (!ip6_addr_match_range (da, &p->laddr.start.ip6, &p->laddr.stop.ip6))
281 continue;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700282
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700283 return p;
284 }
285 return 0;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700286}
287
Damjan Mariond770cfc2019-09-02 19:00:33 +0200288extern vlib_node_registration_t ipsec4_input_node;
Jean-Mickael Guerin8941ec22016-03-04 14:14:21 +0100289
Klement Sekerab8f35442018-10-29 13:38:19 +0100290VLIB_NODE_FN (ipsec4_input_node) (vlib_main_t * vm,
291 vlib_node_runtime_t * node,
ShivaShankarK05464832020-04-14 14:01:03 +0530292 vlib_frame_t * frame)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700293{
ShivaShankarK05464832020-04-14 14:01:03 +0530294 u32 n_left_from, *from, thread_index;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700295 ipsec_main_t *im = &ipsec_main;
ShivaShankarK05464832020-04-14 14:01:03 +0530296 u64 ipsec_unprocessed = 0, ipsec_matched = 0;
297 u64 ipsec_dropped = 0, ipsec_bypassed = 0;
298 vlib_buffer_t *bufs[VLIB_FRAME_SIZE];
299 vlib_buffer_t **b = bufs;
300 u16 nexts[VLIB_FRAME_SIZE], *next;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700301
ShivaShankarK05464832020-04-14 14:01:03 +0530302 from = vlib_frame_vector_args (frame);
303 n_left_from = frame->n_vectors;
304 next = nexts;
305 vlib_get_buffers (vm, from, bufs, n_left_from);
Neale Rannsa09c1ff2019-02-04 01:10:30 -0800306 thread_index = vm->thread_index;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700307
Ed Warnickecb9cada2015-12-08 15:45:58 -0700308
309 while (n_left_from > 0)
310 {
ShivaShankarK05464832020-04-14 14:01:03 +0530311 u32 next32, pi0;
312 ip4_header_t *ip0;
313 esp_header_t *esp0 = NULL;
314 ah_header_t *ah0;
315 ip4_ipsec_config_t *c0;
316 ipsec_spd_t *spd0;
317 ipsec_policy_t *p0 = NULL;
318 u8 has_space0;
Zachary Leaf7cd35f52021-06-25 08:11:15 -0500319 bool search_flow_cache = false;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700320
ShivaShankarK05464832020-04-14 14:01:03 +0530321 if (n_left_from > 2)
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700322 {
ShivaShankarK05464832020-04-14 14:01:03 +0530323 vlib_prefetch_buffer_data (b[1], LOAD);
324 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700325
ShivaShankarK05464832020-04-14 14:01:03 +0530326 b[0]->flags |= VNET_BUFFER_F_IS_IP4;
327 b[0]->flags &= ~VNET_BUFFER_F_IS_IP6;
328 c0 = vnet_feature_next_with_data (&next32, b[0], sizeof (c0[0]));
329 next[0] = (u16) next32;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700330
ShivaShankarK05464832020-04-14 14:01:03 +0530331 spd0 = pool_elt_at_index (im->spds, c0->spd_index);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700332
ShivaShankarK05464832020-04-14 14:01:03 +0530333 ip0 = vlib_buffer_get_current (b[0]);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700334
ShivaShankarK05464832020-04-14 14:01:03 +0530335 if (PREDICT_TRUE
336 (ip0->protocol == IP_PROTOCOL_IPSEC_ESP
337 || ip0->protocol == IP_PROTOCOL_UDP))
338 {
Ed Warnickecb9cada2015-12-08 15:45:58 -0700339
ShivaShankarK05464832020-04-14 14:01:03 +0530340 esp0 = (esp_header_t *) ((u8 *) ip0 + ip4_header_bytes (ip0));
341 if (PREDICT_FALSE (ip0->protocol == IP_PROTOCOL_UDP))
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700342 {
ShivaShankarK05464832020-04-14 14:01:03 +0530343 /* FIXME Skip, if not a UDP encapsulated packet */
344 esp0 = (esp_header_t *) ((u8 *) esp0 + sizeof (udp_header_t));
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700345 }
ShivaShankarK05464832020-04-14 14:01:03 +0530346
Zachary Leaf7cd35f52021-06-25 08:11:15 -0500347 // if flow cache is enabled, first search through flow cache for a
348 // policy match for either protect, bypass or discard rules, in that
349 // order. if no match is found search_flow_cache is set to false (1)
350 // and we revert back to linear search
351 search_flow_cache = im->input_flow_cache_flag;
352
353 esp_or_udp:
354 if (search_flow_cache) // attempt to match policy in flow cache
355 {
356 p0 = ipsec4_input_spd_find_flow_cache_entry (
357 im, ip0->src_address.as_u32, ip0->dst_address.as_u32,
358 IPSEC_SPD_POLICY_IP4_INBOUND_PROTECT);
359 }
360
361 else // linear search if flow cache is not enabled,
362 // or flow cache search just failed
363 {
364 p0 = ipsec_input_protect_policy_match (
365 spd0, clib_net_to_host_u32 (ip0->src_address.as_u32),
366 clib_net_to_host_u32 (ip0->dst_address.as_u32),
367 clib_net_to_host_u32 (esp0->spi));
368 }
ShivaShankarK05464832020-04-14 14:01:03 +0530369
370 has_space0 =
371 vlib_buffer_has_space (b[0],
372 (clib_address_t) (esp0 + 1) -
373 (clib_address_t) ip0);
374
375 if (PREDICT_TRUE ((p0 != NULL) & (has_space0)))
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700376 {
ShivaShankarK05464832020-04-14 14:01:03 +0530377 ipsec_matched += 1;
“mukeshyadav1984”430ac932017-11-23 02:39:33 -0800378
ShivaShankarK05464832020-04-14 14:01:03 +0530379 pi0 = p0 - im->policies;
380 vlib_increment_combined_counter
381 (&ipsec_spd_policy_counters,
382 thread_index, pi0, 1, clib_net_to_host_u16 (ip0->length));
Benoît Gannef7f49642019-10-25 15:26:27 +0200383
ShivaShankarK05464832020-04-14 14:01:03 +0530384 vnet_buffer (b[0])->ipsec.sad_index = p0->sa_index;
385 next[0] = im->esp4_decrypt_next_index;
386 vlib_buffer_advance (b[0], ((u8 *) esp0 - (u8 *) ip0));
387 goto trace0;
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700388 }
Kingwel Xiec69ac312019-02-04 01:49:29 -0800389 else
390 {
ShivaShankarK05464832020-04-14 14:01:03 +0530391 p0 = 0;
392 pi0 = ~0;
393 };
394
Zachary Leaf7cd35f52021-06-25 08:11:15 -0500395 if (search_flow_cache)
396 {
397 p0 = ipsec4_input_spd_find_flow_cache_entry (
398 im, ip0->src_address.as_u32, ip0->dst_address.as_u32,
399 IPSEC_SPD_POLICY_IP4_INBOUND_BYPASS);
400 }
401
402 else
403 {
404 p0 = ipsec_input_policy_match (
405 spd0, clib_net_to_host_u32 (ip0->src_address.as_u32),
406 clib_net_to_host_u32 (ip0->dst_address.as_u32),
407 IPSEC_SPD_POLICY_IP4_INBOUND_BYPASS);
408 }
409
ShivaShankarK05464832020-04-14 14:01:03 +0530410 if (PREDICT_TRUE ((p0 != NULL)))
411 {
412 ipsec_bypassed += 1;
Zachary Leaffbab65b2021-06-07 03:01:07 -0500413
ShivaShankarK05464832020-04-14 14:01:03 +0530414 pi0 = p0 - im->policies;
Zachary Leaffbab65b2021-06-07 03:01:07 -0500415 vlib_increment_combined_counter (
416 &ipsec_spd_policy_counters, thread_index, pi0, 1,
417 clib_net_to_host_u16 (ip0->length));
418
ShivaShankarK05464832020-04-14 14:01:03 +0530419 goto trace0;
420 }
421 else
422 {
423 p0 = 0;
424 pi0 = ~0;
425 };
426
Zachary Leaf7cd35f52021-06-25 08:11:15 -0500427 if (search_flow_cache)
428 {
429 p0 = ipsec4_input_spd_find_flow_cache_entry (
430 im, ip0->src_address.as_u32, ip0->dst_address.as_u32,
431 IPSEC_SPD_POLICY_IP4_INBOUND_DISCARD);
432 }
433
434 else
435 {
436 p0 = ipsec_input_policy_match (
437 spd0, clib_net_to_host_u32 (ip0->src_address.as_u32),
438 clib_net_to_host_u32 (ip0->dst_address.as_u32),
439 IPSEC_SPD_POLICY_IP4_INBOUND_DISCARD);
440 }
441
ShivaShankarK05464832020-04-14 14:01:03 +0530442 if (PREDICT_TRUE ((p0 != NULL)))
443 {
444 ipsec_dropped += 1;
Zachary Leaffbab65b2021-06-07 03:01:07 -0500445
ShivaShankarK05464832020-04-14 14:01:03 +0530446 pi0 = p0 - im->policies;
Zachary Leaffbab65b2021-06-07 03:01:07 -0500447 vlib_increment_combined_counter (
448 &ipsec_spd_policy_counters, thread_index, pi0, 1,
449 clib_net_to_host_u16 (ip0->length));
450
ShivaShankarK05464832020-04-14 14:01:03 +0530451 next[0] = IPSEC_INPUT_NEXT_DROP;
452 goto trace0;
453 }
454 else
455 {
456 p0 = 0;
457 pi0 = ~0;
458 };
Zachary Leaf26fec712021-10-26 10:05:58 -0500459
Zachary Leaf7cd35f52021-06-25 08:11:15 -0500460 // flow cache search failed, try again with linear search
461 if (search_flow_cache && p0 == NULL)
462 {
463 search_flow_cache = false;
464 goto esp_or_udp;
465 }
466
Zachary Leaf26fec712021-10-26 10:05:58 -0500467 /* Drop by default if no match on PROTECT, BYPASS or DISCARD */
468 ipsec_unprocessed += 1;
469 next[0] = IPSEC_INPUT_NEXT_DROP;
470
ShivaShankarK05464832020-04-14 14:01:03 +0530471 trace0:
472 if (PREDICT_FALSE (node->flags & VLIB_NODE_FLAG_TRACE) &&
473 PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED))
474 {
475 ipsec_input_trace_t *tr =
476 vlib_add_trace (vm, node, b[0], sizeof (*tr));
477
478 tr->proto = ip0->protocol;
479 tr->sa_id = p0 ? p0->sa_id : ~0;
480 tr->spi = has_space0 ? clib_net_to_host_u32 (esp0->spi) : ~0;
481 tr->seq = has_space0 ? clib_net_to_host_u32 (esp0->seq) : ~0;
482 tr->spd = spd0->id;
483 tr->policy_index = pi0;
484 }
485 }
486 else if (ip0->protocol == IP_PROTOCOL_IPSEC_AH)
487 {
488 ah0 = (ah_header_t *) ((u8 *) ip0 + ip4_header_bytes (ip0));
Zachary Leaf7cd35f52021-06-25 08:11:15 -0500489
490 // if flow cache is enabled, first search through flow cache for a
491 // policy match and revert back to linear search on failure
492 search_flow_cache = im->input_flow_cache_flag;
493
494 ah:
495 if (search_flow_cache)
496 {
497 p0 = ipsec4_input_spd_find_flow_cache_entry (
498 im, ip0->src_address.as_u32, ip0->dst_address.as_u32,
499 IPSEC_SPD_POLICY_IP4_INBOUND_PROTECT);
500 }
501
502 else
503 {
504 p0 = ipsec_input_protect_policy_match (
505 spd0, clib_net_to_host_u32 (ip0->src_address.as_u32),
506 clib_net_to_host_u32 (ip0->dst_address.as_u32),
507 clib_net_to_host_u32 (ah0->spi));
508 }
ShivaShankarK05464832020-04-14 14:01:03 +0530509
510 has_space0 =
511 vlib_buffer_has_space (b[0],
512 (clib_address_t) (ah0 + 1) -
513 (clib_address_t) ip0);
514
515 if (PREDICT_TRUE ((p0 != NULL) & (has_space0)))
516 {
517 ipsec_matched += 1;
518
519 pi0 = p0 - im->policies;
520 vlib_increment_combined_counter
521 (&ipsec_spd_policy_counters,
522 thread_index, pi0, 1, clib_net_to_host_u16 (ip0->length));
523
524 vnet_buffer (b[0])->ipsec.sad_index = p0->sa_index;
525 next[0] = im->ah4_decrypt_next_index;
526 goto trace1;
527 }
528 else
529 {
530 p0 = 0;
531 pi0 = ~0;
Kingwel Xiec69ac312019-02-04 01:49:29 -0800532 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700533
Zachary Leaf7cd35f52021-06-25 08:11:15 -0500534 if (search_flow_cache)
535 {
536 p0 = ipsec4_input_spd_find_flow_cache_entry (
537 im, ip0->src_address.as_u32, ip0->dst_address.as_u32,
538 IPSEC_SPD_POLICY_IP4_INBOUND_BYPASS);
539 }
540
541 else
542 {
543 p0 = ipsec_input_policy_match (
544 spd0, clib_net_to_host_u32 (ip0->src_address.as_u32),
545 clib_net_to_host_u32 (ip0->dst_address.as_u32),
546 IPSEC_SPD_POLICY_IP4_INBOUND_BYPASS);
547 }
548
ShivaShankarK05464832020-04-14 14:01:03 +0530549 if (PREDICT_TRUE ((p0 != NULL)))
550 {
551 ipsec_bypassed += 1;
Zachary Leaffbab65b2021-06-07 03:01:07 -0500552
ShivaShankarK05464832020-04-14 14:01:03 +0530553 pi0 = p0 - im->policies;
Zachary Leaffbab65b2021-06-07 03:01:07 -0500554 vlib_increment_combined_counter (
555 &ipsec_spd_policy_counters, thread_index, pi0, 1,
556 clib_net_to_host_u16 (ip0->length));
557
ShivaShankarK05464832020-04-14 14:01:03 +0530558 goto trace1;
559 }
560 else
561 {
562 p0 = 0;
563 pi0 = ~0;
564 };
565
Zachary Leaf7cd35f52021-06-25 08:11:15 -0500566 if (search_flow_cache)
567 {
568 p0 = ipsec4_input_spd_find_flow_cache_entry (
569 im, ip0->src_address.as_u32, ip0->dst_address.as_u32,
570 IPSEC_SPD_POLICY_IP4_INBOUND_DISCARD);
571 }
572
573 else
574 {
575 p0 = ipsec_input_policy_match (
576 spd0, clib_net_to_host_u32 (ip0->src_address.as_u32),
577 clib_net_to_host_u32 (ip0->dst_address.as_u32),
578 IPSEC_SPD_POLICY_IP4_INBOUND_DISCARD);
579 }
580
ShivaShankarK05464832020-04-14 14:01:03 +0530581 if (PREDICT_TRUE ((p0 != NULL)))
582 {
583 ipsec_dropped += 1;
Zachary Leaffbab65b2021-06-07 03:01:07 -0500584
ShivaShankarK05464832020-04-14 14:01:03 +0530585 pi0 = p0 - im->policies;
Zachary Leaffbab65b2021-06-07 03:01:07 -0500586 vlib_increment_combined_counter (
587 &ipsec_spd_policy_counters, thread_index, pi0, 1,
588 clib_net_to_host_u16 (ip0->length));
589
ShivaShankarK05464832020-04-14 14:01:03 +0530590 next[0] = IPSEC_INPUT_NEXT_DROP;
591 goto trace1;
592 }
593 else
594 {
595 p0 = 0;
596 pi0 = ~0;
597 };
Zachary Leaf26fec712021-10-26 10:05:58 -0500598
Zachary Leaf7cd35f52021-06-25 08:11:15 -0500599 // flow cache search failed, retry with linear search
600 if (search_flow_cache && p0 == NULL)
601 {
602 search_flow_cache = false;
603 goto ah;
604 }
605
Zachary Leaf26fec712021-10-26 10:05:58 -0500606 /* Drop by default if no match on PROTECT, BYPASS or DISCARD */
607 ipsec_unprocessed += 1;
608 next[0] = IPSEC_INPUT_NEXT_DROP;
609
ShivaShankarK05464832020-04-14 14:01:03 +0530610 trace1:
611 if (PREDICT_FALSE (node->flags & VLIB_NODE_FLAG_TRACE) &&
612 PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED))
613 {
614 ipsec_input_trace_t *tr =
615 vlib_add_trace (vm, node, b[0], sizeof (*tr));
616
617 tr->proto = ip0->protocol;
618 tr->sa_id = p0 ? p0->sa_id : ~0;
619 tr->spi = has_space0 ? clib_net_to_host_u32 (ah0->spi) : ~0;
620 tr->seq = has_space0 ? clib_net_to_host_u32 (ah0->seq_no) : ~0;
621 tr->spd = spd0->id;
622 tr->policy_index = pi0;
623 }
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700624 }
ShivaShankarK05464832020-04-14 14:01:03 +0530625 else
626 {
627 ipsec_unprocessed += 1;
628 }
629 n_left_from -= 1;
630 b += 1;
631 next += 1;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700632 }
Kingwel Xiec69ac312019-02-04 01:49:29 -0800633
ShivaShankarK05464832020-04-14 14:01:03 +0530634 vlib_buffer_enqueue_to_next (vm, node, from, nexts, frame->n_vectors);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700635
Kingwel Xiec69ac312019-02-04 01:49:29 -0800636 vlib_node_increment_counter (vm, ipsec4_input_node.index,
ShivaShankarK05464832020-04-14 14:01:03 +0530637 IPSEC_INPUT_ERROR_RX_PKTS, frame->n_vectors);
638
639 vlib_node_increment_counter (vm, ipsec4_input_node.index,
640 IPSEC_INPUT_ERROR_RX_POLICY_MATCH,
Kingwel Xiec69ac312019-02-04 01:49:29 -0800641 ipsec_matched);
ShivaShankarK05464832020-04-14 14:01:03 +0530642
643 vlib_node_increment_counter (vm, ipsec4_input_node.index,
644 IPSEC_INPUT_ERROR_RX_POLICY_NO_MATCH,
645 ipsec_unprocessed);
646
647 vlib_node_increment_counter (vm, ipsec4_input_node.index,
648 IPSEC_INPUT_ERROR_RX_POLICY_DISCARD,
649 ipsec_dropped);
650
651 vlib_node_increment_counter (vm, ipsec4_input_node.index,
652 IPSEC_INPUT_ERROR_RX_POLICY_BYPASS,
653 ipsec_bypassed);
654
655 return frame->n_vectors;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700656}
657
658
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700659/* *INDENT-OFF* */
Damjan Mariond770cfc2019-09-02 19:00:33 +0200660VLIB_REGISTER_NODE (ipsec4_input_node) = {
Pierre Pfister057b3562018-12-10 17:01:01 +0100661 .name = "ipsec4-input-feature",
Ed Warnickecb9cada2015-12-08 15:45:58 -0700662 .vector_size = sizeof (u32),
663 .format_trace = format_ipsec_input_trace,
664 .type = VLIB_NODE_TYPE_INTERNAL,
Ed Warnickecb9cada2015-12-08 15:45:58 -0700665 .n_errors = ARRAY_LEN(ipsec_input_error_strings),
666 .error_strings = ipsec_input_error_strings,
Ed Warnickecb9cada2015-12-08 15:45:58 -0700667 .n_next_nodes = IPSEC_INPUT_N_NEXT,
668 .next_nodes = {
669#define _(s,n) [IPSEC_INPUT_NEXT_##s] = n,
670 foreach_ipsec_input_next
671#undef _
672 },
673};
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700674/* *INDENT-ON* */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700675
Damjan Mariond770cfc2019-09-02 19:00:33 +0200676extern vlib_node_registration_t ipsec6_input_node;
Damjan Marion1c80e832016-05-11 23:07:18 +0200677
Klement Sekerab8f35442018-10-29 13:38:19 +0100678
679VLIB_NODE_FN (ipsec6_input_node) (vlib_main_t * vm,
680 vlib_node_runtime_t * node,
681 vlib_frame_t * from_frame)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700682{
Neale Rannsa09c1ff2019-02-04 01:10:30 -0800683 u32 n_left_from, *from, next_index, *to_next, thread_index;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700684 ipsec_main_t *im = &ipsec_main;
Kingwel Xiec69ac312019-02-04 01:49:29 -0800685 u32 ipsec_unprocessed = 0;
686 u32 ipsec_matched = 0;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700687
688 from = vlib_frame_vector_args (from_frame);
689 n_left_from = from_frame->n_vectors;
Neale Rannsa09c1ff2019-02-04 01:10:30 -0800690 thread_index = vm->thread_index;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700691
692 next_index = node->cached_next_index;
693
694 while (n_left_from > 0)
695 {
696 u32 n_left_to_next;
697
698 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
699
700 while (n_left_from > 0 && n_left_to_next > 0)
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700701 {
Neale Rannsa09c1ff2019-02-04 01:10:30 -0800702 u32 bi0, next0, pi0;
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700703 vlib_buffer_t *b0;
704 ip6_header_t *ip0;
705 esp_header_t *esp0;
706 ip4_ipsec_config_t *c0;
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700707 ipsec_spd_t *spd0;
Matus Fabian5539a072016-09-07 05:57:09 -0700708 ipsec_policy_t *p0 = 0;
Klement Sekera611864f2018-09-26 11:19:00 +0200709 ah_header_t *ah0;
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700710 u32 header_size = sizeof (ip0[0]);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700711
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700712 bi0 = to_next[0] = from[0];
713 from += 1;
714 n_left_from -= 1;
715 to_next += 1;
716 n_left_to_next -= 1;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700717
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700718 b0 = vlib_get_buffer (vm, bi0);
Szymon Sliwa65a27272018-05-09 14:28:08 +0200719 b0->flags |= VNET_BUFFER_F_IS_IP6;
720 b0->flags &= ~VNET_BUFFER_F_IS_IP4;
Damjan Marion7d98a122018-07-19 20:42:08 +0200721 c0 = vnet_feature_next_with_data (&next0, b0, sizeof (c0[0]));
Ed Warnickecb9cada2015-12-08 15:45:58 -0700722
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700723 spd0 = pool_elt_at_index (im->spds, c0->spd_index);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700724
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700725 ip0 = vlib_buffer_get_current (b0);
726 esp0 = (esp_header_t *) ((u8 *) ip0 + header_size);
Klement Sekera611864f2018-09-26 11:19:00 +0200727 ah0 = (ah_header_t *) ((u8 *) ip0 + header_size);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700728
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700729 if (PREDICT_TRUE (ip0->protocol == IP_PROTOCOL_IPSEC_ESP))
730 {
Ed Warnickecb9cada2015-12-08 15:45:58 -0700731#if 0
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700732 clib_warning
733 ("packet received from %U to %U spi %u size %u spd_id %u",
734 format_ip6_address, &ip0->src_address, format_ip6_address,
735 &ip0->dst_address, clib_net_to_host_u32 (esp0->spi),
736 clib_net_to_host_u16 (ip0->payload_length) + header_size,
737 spd0->id);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700738#endif
Klement Sekerabe5a5dd2018-10-09 16:05:48 +0200739 p0 = ipsec6_input_protect_policy_match (spd0,
740 &ip0->src_address,
741 &ip0->dst_address,
742 clib_net_to_host_u32
743 (esp0->spi));
Ed Warnickecb9cada2015-12-08 15:45:58 -0700744
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700745 if (PREDICT_TRUE (p0 != 0))
746 {
Kingwel Xiec69ac312019-02-04 01:49:29 -0800747 ipsec_matched += 1;
748
Neale Rannsa09c1ff2019-02-04 01:10:30 -0800749 pi0 = p0 - im->policies;
750 vlib_increment_combined_counter
751 (&ipsec_spd_policy_counters,
752 thread_index, pi0, 1,
753 clib_net_to_host_u16 (ip0->payload_length) +
754 header_size);
Kingwel Xiec69ac312019-02-04 01:49:29 -0800755
Damjan Marion9c6ae5f2016-11-15 23:20:01 +0100756 vnet_buffer (b0)->ipsec.sad_index = p0->sa_index;
Klement Sekerabe5a5dd2018-10-09 16:05:48 +0200757 next0 = im->esp6_decrypt_next_index;
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700758 vlib_buffer_advance (b0, header_size);
759 goto trace0;
760 }
Neale Rannsa09c1ff2019-02-04 01:10:30 -0800761 else
762 {
763 pi0 = ~0;
Zachary Leaf26fec712021-10-26 10:05:58 -0500764 ipsec_unprocessed += 1;
765 next0 = IPSEC_INPUT_NEXT_DROP;
Neale Rannsa09c1ff2019-02-04 01:10:30 -0800766 }
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700767 }
Klement Sekera611864f2018-09-26 11:19:00 +0200768 else if (ip0->protocol == IP_PROTOCOL_IPSEC_AH)
769 {
Klement Sekerabe5a5dd2018-10-09 16:05:48 +0200770 p0 = ipsec6_input_protect_policy_match (spd0,
771 &ip0->src_address,
772 &ip0->dst_address,
773 clib_net_to_host_u32
774 (ah0->spi));
Klement Sekera611864f2018-09-26 11:19:00 +0200775
776 if (PREDICT_TRUE (p0 != 0))
777 {
Kingwel Xiec69ac312019-02-04 01:49:29 -0800778 ipsec_matched += 1;
Neale Rannsa09c1ff2019-02-04 01:10:30 -0800779 pi0 = p0 - im->policies;
780 vlib_increment_combined_counter
781 (&ipsec_spd_policy_counters,
782 thread_index, pi0, 1,
783 clib_net_to_host_u16 (ip0->payload_length) +
784 header_size);
Kingwel Xiec69ac312019-02-04 01:49:29 -0800785
Klement Sekera611864f2018-09-26 11:19:00 +0200786 vnet_buffer (b0)->ipsec.sad_index = p0->sa_index;
Klement Sekerabe5a5dd2018-10-09 16:05:48 +0200787 next0 = im->ah6_decrypt_next_index;
Klement Sekera611864f2018-09-26 11:19:00 +0200788 goto trace0;
789 }
Neale Rannsa09c1ff2019-02-04 01:10:30 -0800790 else
791 {
792 pi0 = ~0;
Zachary Leaf26fec712021-10-26 10:05:58 -0500793 ipsec_unprocessed += 1;
794 next0 = IPSEC_INPUT_NEXT_DROP;
Neale Rannsa09c1ff2019-02-04 01:10:30 -0800795 }
Klement Sekera611864f2018-09-26 11:19:00 +0200796 }
Kingwel Xiec69ac312019-02-04 01:49:29 -0800797 else
798 {
799 ipsec_unprocessed += 1;
800 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700801
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700802 trace0:
Neale Rannsa09c1ff2019-02-04 01:10:30 -0800803 if (PREDICT_FALSE (node->flags & VLIB_NODE_FLAG_TRACE) &&
804 PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700805 {
806 ipsec_input_trace_t *tr =
807 vlib_add_trace (vm, node, b0, sizeof (*tr));
Neale Rannsa09c1ff2019-02-04 01:10:30 -0800808
809 if (p0)
810 tr->sa_id = p0->sa_id;
811 tr->proto = ip0->protocol;
812 tr->spi = clib_net_to_host_u32 (esp0->spi);
813 tr->seq = clib_net_to_host_u32 (esp0->seq);
814 tr->spd = spd0->id;
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700815 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700816
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700817 vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
818 n_left_to_next, bi0, next0);
819 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700820 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
821 }
Kingwel Xiec69ac312019-02-04 01:49:29 -0800822
Klement Sekerabe5a5dd2018-10-09 16:05:48 +0200823 vlib_node_increment_counter (vm, ipsec6_input_node.index,
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700824 IPSEC_INPUT_ERROR_RX_PKTS,
Kingwel Xiec69ac312019-02-04 01:49:29 -0800825 from_frame->n_vectors - ipsec_unprocessed);
826
827 vlib_node_increment_counter (vm, ipsec6_input_node.index,
ShivaShankarK05464832020-04-14 14:01:03 +0530828 IPSEC_INPUT_ERROR_RX_POLICY_MATCH,
Kingwel Xiec69ac312019-02-04 01:49:29 -0800829 ipsec_matched);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700830
831 return from_frame->n_vectors;
832}
833
834
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700835/* *INDENT-OFF* */
Damjan Mariond770cfc2019-09-02 19:00:33 +0200836VLIB_REGISTER_NODE (ipsec6_input_node) = {
Pierre Pfister057b3562018-12-10 17:01:01 +0100837 .name = "ipsec6-input-feature",
Ed Warnickecb9cada2015-12-08 15:45:58 -0700838 .vector_size = sizeof (u32),
839 .format_trace = format_ipsec_input_trace,
840 .type = VLIB_NODE_TYPE_INTERNAL,
Ed Warnickecb9cada2015-12-08 15:45:58 -0700841 .n_errors = ARRAY_LEN(ipsec_input_error_strings),
842 .error_strings = ipsec_input_error_strings,
Kingwel Xiec69ac312019-02-04 01:49:29 -0800843 .n_next_nodes = IPSEC_INPUT_N_NEXT,
844 .next_nodes = {
845#define _(s,n) [IPSEC_INPUT_NEXT_##s] = n,
846 foreach_ipsec_input_next
847#undef _
848 },
Ed Warnickecb9cada2015-12-08 15:45:58 -0700849};
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700850/* *INDENT-ON* */
Damjan Marion1c80e832016-05-11 23:07:18 +0200851
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700852/*
853 * fd.io coding-style-patch-verification: ON
854 *
855 * Local Variables:
856 * eval: (c-set-style "gnu")
857 * End:
858 */