blob: 6d6608d7a323936274dbe3eceae649fdf073a259 [file] [log] [blame]
Ed Warnickecb9cada2015-12-08 15:45:58 -07001/*
2 * ipsec_output.c : IPSec output node
3 *
4 * Copyright (c) 2015 Cisco and/or its affiliates.
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at:
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18#include <vnet/vnet.h>
19#include <vnet/api_errno.h>
20#include <vnet/ip/ip.h>
21
22#include <vnet/ipsec/ipsec.h>
Neale Ranns918c1612019-02-21 23:34:59 -080023#include <vnet/ipsec/ipsec_io.h>
Ed Warnickecb9cada2015-12-08 15:45:58 -070024
Ed Warnickecb9cada2015-12-08 15:45:58 -070025#define foreach_ipsec_output_error \
26 _(RX_PKTS, "IPSec pkts received") \
27 _(POLICY_DISCARD, "IPSec policy discard") \
28 _(POLICY_NO_MATCH, "IPSec policy (no match)") \
29 _(POLICY_PROTECT, "IPSec policy protect") \
30 _(POLICY_BYPASS, "IPSec policy bypass") \
31 _(ENCAPS_FAILED, "IPSec encapsulation failed")
32
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -070033typedef enum
34{
Ed Warnickecb9cada2015-12-08 15:45:58 -070035#define _(sym,str) IPSEC_OUTPUT_ERROR_##sym,
36 foreach_ipsec_output_error
37#undef _
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -070038 IPSEC_DECAP_N_ERROR,
Ed Warnickecb9cada2015-12-08 15:45:58 -070039} ipsec_output_error_t;
40
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -070041static char *ipsec_output_error_strings[] = {
Ed Warnickecb9cada2015-12-08 15:45:58 -070042#define _(sym,string) string,
43 foreach_ipsec_output_error
44#undef _
45};
46
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -070047typedef struct
48{
Ed Warnickecb9cada2015-12-08 15:45:58 -070049 u32 spd_id;
Neale Rannsa09c1ff2019-02-04 01:10:30 -080050 u32 policy_id;
Ed Warnickecb9cada2015-12-08 15:45:58 -070051} ipsec_output_trace_t;
52
53/* packet trace format function */
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -070054static u8 *
55format_ipsec_output_trace (u8 * s, va_list * args)
Ed Warnickecb9cada2015-12-08 15:45:58 -070056{
57 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
58 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -070059 ipsec_output_trace_t *t = va_arg (*args, ipsec_output_trace_t *);
Ed Warnickecb9cada2015-12-08 15:45:58 -070060
Neale Rannsa09c1ff2019-02-04 01:10:30 -080061 s = format (s, "spd %u policy %d", t->spd_id, t->policy_id);
62
Ed Warnickecb9cada2015-12-08 15:45:58 -070063 return s;
64}
65
Govindarajan Mohandoss6d7dfcb2021-03-19 19:20:49 +000066always_inline void
67ipsec4_out_spd_add_flow_cache_entry (ipsec_main_t *im, u8 pr, u32 la, u32 ra,
68 u16 lp, u16 rp, u32 pol_id)
69{
70 u64 hash;
71 u8 overwrite = 0, stale_overwrite = 0;
72 ipsec4_spd_5tuple_t ip4_5tuple = { .ip4_addr = { (ip4_address_t) la,
73 (ip4_address_t) ra },
74 .port = { lp, rp },
75 .proto = pr };
76
77 ip4_5tuple.kv_16_8.value = (((u64) pol_id) << 32) | ((u64) im->epoch_count);
78
79 hash = ipsec4_hash_16_8 (&ip4_5tuple.kv_16_8);
80 hash &= (im->ipsec4_out_spd_hash_num_buckets - 1);
81
82 ipsec_spinlock_lock (&im->ipsec4_out_spd_hash_tbl[hash].bucket_lock);
83 /* Check if we are overwriting an existing entry so we know
84 whether to increment the flow cache counter. Since flow
85 cache counter is reset on any policy add/remove, but
86 hash table values are not, we also need to check if the entry
87 we are overwriting is stale or not. If it's a stale entry
88 overwrite, we still want to increment flow cache counter */
89 overwrite = (im->ipsec4_out_spd_hash_tbl[hash].value != 0);
90 /* Check for stale entry by comparing with current epoch count */
91 if (PREDICT_FALSE (overwrite))
92 stale_overwrite =
93 (im->epoch_count !=
94 ((u32) (im->ipsec4_out_spd_hash_tbl[hash].value & 0xFFFFFFFF)));
95 clib_memcpy_fast (&im->ipsec4_out_spd_hash_tbl[hash], &ip4_5tuple.kv_16_8,
96 sizeof (ip4_5tuple.kv_16_8));
97 ipsec_spinlock_unlock (&im->ipsec4_out_spd_hash_tbl[hash].bucket_lock);
98
99 /* Increment the counter to track active flow cache entries
100 when entering a fresh entry or overwriting a stale one */
101 if (!overwrite || stale_overwrite)
102 clib_atomic_fetch_add_relax (&im->ipsec4_out_spd_flow_cache_entries, 1);
103
104 return;
105}
106
Ed Warnickecb9cada2015-12-08 15:45:58 -0700107always_inline ipsec_policy_t *
Govindarajan Mohandoss6d7dfcb2021-03-19 19:20:49 +0000108ipsec4_out_spd_find_flow_cache_entry (ipsec_main_t *im, u8 pr, u32 la, u32 ra,
109 u16 lp, u16 rp)
110{
111 ipsec_policy_t *p = NULL;
112 ipsec4_hash_kv_16_8_t kv_result;
113 u64 hash;
114
115 if (PREDICT_FALSE ((pr != IP_PROTOCOL_TCP) && (pr != IP_PROTOCOL_UDP) &&
116 (pr != IP_PROTOCOL_SCTP)))
117 {
118 lp = 0;
119 rp = 0;
120 }
121 ipsec4_spd_5tuple_t ip4_5tuple = { .ip4_addr = { (ip4_address_t) la,
122 (ip4_address_t) ra },
123 .port = { lp, rp },
124 .proto = pr };
125
126 hash = ipsec4_hash_16_8 (&ip4_5tuple.kv_16_8);
127 hash &= (im->ipsec4_out_spd_hash_num_buckets - 1);
128
129 ipsec_spinlock_lock (&im->ipsec4_out_spd_hash_tbl[hash].bucket_lock);
130 kv_result = im->ipsec4_out_spd_hash_tbl[hash];
131 ipsec_spinlock_unlock (&im->ipsec4_out_spd_hash_tbl[hash].bucket_lock);
132
133 if (ipsec4_hash_key_compare_16_8 ((u64 *) &ip4_5tuple.kv_16_8,
134 (u64 *) &kv_result))
135 {
136 if (im->epoch_count == ((u32) (kv_result.value & 0xFFFFFFFF)))
137 {
138 /* Get the policy based on the index */
139 p =
140 pool_elt_at_index (im->policies, ((u32) (kv_result.value >> 32)));
141 }
142 }
143
144 return p;
145}
146
147always_inline ipsec_policy_t *
148ipsec_output_policy_match (ipsec_spd_t *spd, u8 pr, u32 la, u32 ra, u16 lp,
149 u16 rp, u8 flow_cache_enabled)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700150{
Neale Rannsa09c1ff2019-02-04 01:10:30 -0800151 ipsec_main_t *im = &ipsec_main;
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700152 ipsec_policy_t *p;
153 u32 *i;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700154
Damjan Marion3f54b182016-08-16 11:27:02 +0200155 if (!spd)
156 return 0;
157
Neale Rannsa09c1ff2019-02-04 01:10:30 -0800158 vec_foreach (i, spd->policies[IPSEC_SPD_POLICY_IP4_OUTBOUND])
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700159 {
Neale Rannsa09c1ff2019-02-04 01:10:30 -0800160 p = pool_elt_at_index (im->policies, *i);
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700161 if (PREDICT_FALSE (p->protocol && (p->protocol != pr)))
162 continue;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700163
Neale Rannsa4d24312019-07-10 13:46:21 +0000164 if (ra < clib_net_to_host_u32 (p->raddr.start.ip4.as_u32))
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700165 continue;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700166
Neale Rannsa4d24312019-07-10 13:46:21 +0000167 if (ra > clib_net_to_host_u32 (p->raddr.stop.ip4.as_u32))
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700168 continue;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700169
Neale Rannsa4d24312019-07-10 13:46:21 +0000170 if (la < clib_net_to_host_u32 (p->laddr.start.ip4.as_u32))
Radu Nicolaude412ce2018-03-12 13:52:41 +0000171 continue;
172
Neale Rannsa4d24312019-07-10 13:46:21 +0000173 if (la > clib_net_to_host_u32 (p->laddr.stop.ip4.as_u32))
Radu Nicolaude412ce2018-03-12 13:52:41 +0000174 continue;
175
Govindarajan Mohandoss6d7dfcb2021-03-19 19:20:49 +0000176 if (PREDICT_FALSE ((pr != IP_PROTOCOL_TCP) && (pr != IP_PROTOCOL_UDP) &&
177 (pr != IP_PROTOCOL_SCTP)))
178 {
179 lp = 0;
180 rp = 0;
181 goto add_flow_cache;
182 }
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700183
184 if (lp < p->lport.start)
185 continue;
186
187 if (lp > p->lport.stop)
188 continue;
189
190 if (rp < p->rport.start)
191 continue;
192
193 if (rp > p->rport.stop)
194 continue;
195
Govindarajan Mohandoss6d7dfcb2021-03-19 19:20:49 +0000196 add_flow_cache:
197 if (flow_cache_enabled)
198 {
199 /* Add an Entry in Flow cache */
200 ipsec4_out_spd_add_flow_cache_entry (
201 im, pr, clib_host_to_net_u32 (la), clib_host_to_net_u32 (ra),
202 clib_host_to_net_u16 (lp), clib_host_to_net_u16 (rp), *i);
203 }
204
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700205 return p;
206 }
207 return 0;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700208}
209
210always_inline uword
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700211ip6_addr_match_range (ip6_address_t * a, ip6_address_t * la,
212 ip6_address_t * ua)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700213{
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700214 if ((memcmp (a->as_u64, la->as_u64, 2 * sizeof (u64)) >= 0) &&
215 (memcmp (a->as_u64, ua->as_u64, 2 * sizeof (u64)) <= 0))
Ed Warnickecb9cada2015-12-08 15:45:58 -0700216 return 1;
217 return 0;
218}
219
220always_inline ipsec_policy_t *
Klement Sekerabe5a5dd2018-10-09 16:05:48 +0200221ipsec6_output_policy_match (ipsec_spd_t * spd,
222 ip6_address_t * la,
223 ip6_address_t * ra, u16 lp, u16 rp, u8 pr)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700224{
Neale Rannsa09c1ff2019-02-04 01:10:30 -0800225 ipsec_main_t *im = &ipsec_main;
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700226 ipsec_policy_t *p;
227 u32 *i;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700228
Damjan Marion3f54b182016-08-16 11:27:02 +0200229 if (!spd)
230 return 0;
231
Neale Rannsa09c1ff2019-02-04 01:10:30 -0800232 vec_foreach (i, spd->policies[IPSEC_SPD_POLICY_IP6_OUTBOUND])
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700233 {
Neale Rannsa09c1ff2019-02-04 01:10:30 -0800234 p = pool_elt_at_index (im->policies, *i);
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700235 if (PREDICT_FALSE (p->protocol && (p->protocol != pr)))
236 continue;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700237
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700238 if (!ip6_addr_match_range (ra, &p->raddr.start.ip6, &p->raddr.stop.ip6))
239 continue;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700240
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700241 if (!ip6_addr_match_range (la, &p->laddr.start.ip6, &p->laddr.stop.ip6))
242 continue;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700243
Marco Varlese191a5942017-10-30 18:17:21 +0100244 if (PREDICT_FALSE
245 ((pr != IP_PROTOCOL_TCP) && (pr != IP_PROTOCOL_UDP)
246 && (pr != IP_PROTOCOL_SCTP)))
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700247 return p;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700248
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700249 if (lp < p->lport.start)
250 continue;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700251
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700252 if (lp > p->lport.stop)
253 continue;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700254
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700255 if (rp < p->rport.start)
256 continue;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700257
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700258 if (rp > p->rport.stop)
259 continue;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700260
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700261 return p;
262 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700263
264 return 0;
265}
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700266
Matus Fabian08a6f012016-11-15 06:08:51 -0800267static inline uword
268ipsec_output_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
269 vlib_frame_t * from_frame, int is_ipv6)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700270{
271 ipsec_main_t *im = &ipsec_main;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700272
Neale Rannsa09c1ff2019-02-04 01:10:30 -0800273 u32 *from, *to_next = 0, thread_index;
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700274 u32 n_left_from, sw_if_index0, last_sw_if_index = (u32) ~ 0;
275 u32 next_node_index = (u32) ~ 0, last_next_node_index = (u32) ~ 0;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700276 vlib_frame_t *f = 0;
277 u32 spd_index0 = ~0;
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700278 ipsec_spd_t *spd0 = 0;
Klement Sekera31da2e32018-06-24 22:49:55 +0200279 int bogus;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700280 u64 nc_protect = 0, nc_bypass = 0, nc_discard = 0, nc_nomatch = 0;
Zachary Leaf7cd35f52021-06-25 08:11:15 -0500281 u8 flow_cache_enabled = im->output_flow_cache_flag;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700282
283 from = vlib_frame_vector_args (from_frame);
284 n_left_from = from_frame->n_vectors;
Neale Rannsa09c1ff2019-02-04 01:10:30 -0800285 thread_index = vm->thread_index;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700286
287 while (n_left_from > 0)
288 {
Zhiyong Yangf9221162019-04-22 00:18:38 -0400289 u32 bi0, pi0, bi1;
290 vlib_buffer_t *b0, *b1;
Govindarajan Mohandoss6d7dfcb2021-03-19 19:20:49 +0000291 ipsec_policy_t *p0 = NULL;
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700292 ip4_header_t *ip0;
293 ip6_header_t *ip6_0 = 0;
294 udp_header_t *udp0;
Florin Corasfb28e9a2016-09-06 15:18:21 +0200295 u32 iph_offset = 0;
Klement Sekera31da2e32018-06-24 22:49:55 +0200296 tcp_header_t *tcp0;
Neale Rannsa09c1ff2019-02-04 01:10:30 -0800297 u64 bytes0;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700298
299 bi0 = from[0];
300 b0 = vlib_get_buffer (vm, bi0);
Vratko Polakea6a34c2019-04-23 15:52:28 +0200301 if (n_left_from > 1)
302 {
303 bi1 = from[1];
304 b1 = vlib_get_buffer (vm, bi1);
305 CLIB_PREFETCH (b1, CLIB_CACHE_LINE_BYTES * 2, STORE);
306 vlib_prefetch_buffer_data (b1, LOAD);
307 }
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700308 sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_TX];
Florin Corasfb28e9a2016-09-06 15:18:21 +0200309 iph_offset = vnet_buffer (b0)->ip.save_rewrite_length;
310 ip0 = (ip4_header_t *) ((u8 *) vlib_buffer_get_current (b0)
311 + iph_offset);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700312
Ed Warnickecb9cada2015-12-08 15:45:58 -0700313 /* lookup for SPD only if sw_if_index is changed */
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700314 if (PREDICT_FALSE (last_sw_if_index != sw_if_index0))
315 {
316 uword *p = hash_get (im->spd_index_by_sw_if_index, sw_if_index0);
Dave Barach47d41ad2020-02-17 09:13:26 -0500317 ALWAYS_ASSERT (p);
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700318 spd_index0 = p[0];
319 spd0 = pool_elt_at_index (im->spds, spd_index0);
320 last_sw_if_index = sw_if_index0;
321 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700322
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700323 if (is_ipv6)
324 {
Matus Fabian08a6f012016-11-15 06:08:51 -0800325 ip6_0 = (ip6_header_t *) ((u8 *) vlib_buffer_get_current (b0)
326 + iph_offset);
327
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700328 udp0 = ip6_next_header (ip6_0);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700329#if 0
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700330 clib_warning
331 ("packet received from %U port %u to %U port %u spd_id %u",
332 format_ip6_address, &ip6_0->src_address,
333 clib_net_to_host_u16 (udp0->src_port), format_ip6_address,
334 &ip6_0->dst_address, clib_net_to_host_u16 (udp0->dst_port),
335 spd0->id);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700336#endif
337
Klement Sekerabe5a5dd2018-10-09 16:05:48 +0200338 p0 = ipsec6_output_policy_match (spd0,
339 &ip6_0->src_address,
340 &ip6_0->dst_address,
Neale Rannsa4d24312019-07-10 13:46:21 +0000341 clib_net_to_host_u16
342 (udp0->src_port),
343 clib_net_to_host_u16
344 (udp0->dst_port), ip6_0->protocol);
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700345 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700346 else
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700347 {
348 udp0 = (udp_header_t *) ((u8 *) ip0 + ip4_header_bytes (ip0));
Ed Warnickecb9cada2015-12-08 15:45:58 -0700349
350#if 0
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700351 clib_warning ("packet received from %U to %U port %u",
352 format_ip4_address, ip0->src_address.as_u8,
353 format_ip4_address, ip0->dst_address.as_u8,
354 clib_net_to_host_u16 (udp0->dst_port));
355 clib_warning ("sw_if_index0 %u spd_index0 %u spd_id %u",
356 sw_if_index0, spd_index0, spd0->id);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700357#endif
358
Govindarajan Mohandoss6d7dfcb2021-03-19 19:20:49 +0000359 /*
360 * Check whether flow cache is enabled.
361 */
362 if (flow_cache_enabled)
363 {
364 p0 = ipsec4_out_spd_find_flow_cache_entry (
365 im, ip0->protocol, ip0->src_address.as_u32,
366 ip0->dst_address.as_u32, udp0->src_port, udp0->dst_port);
367 }
368
369 /* Fall back to linear search if flow cache lookup fails */
370 if (p0 == NULL)
371 {
372 p0 = ipsec_output_policy_match (
373 spd0, ip0->protocol,
374 clib_net_to_host_u32 (ip0->src_address.as_u32),
375 clib_net_to_host_u32 (ip0->dst_address.as_u32),
376 clib_net_to_host_u16 (udp0->src_port),
377 clib_net_to_host_u16 (udp0->dst_port), flow_cache_enabled);
378 }
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700379 }
Klement Sekera31da2e32018-06-24 22:49:55 +0200380 tcp0 = (void *) udp0;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700381
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700382 if (PREDICT_TRUE (p0 != NULL))
383 {
Neale Rannsa09c1ff2019-02-04 01:10:30 -0800384 pi0 = p0 - im->policies;
385
386 vlib_prefetch_combined_counter (&ipsec_spd_policy_counters,
387 thread_index, pi0);
388
389 if (is_ipv6)
390 {
391 bytes0 = clib_net_to_host_u16 (ip6_0->payload_length);
392 bytes0 += sizeof (ip6_header_t);
393 }
394 else
395 {
396 bytes0 = clib_net_to_host_u16 (ip0->length);
397 }
398
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700399 if (p0->policy == IPSEC_POLICY_ACTION_PROTECT)
400 {
“mukeshyadav1984”430ac932017-11-23 02:39:33 -0800401 ipsec_sa_t *sa = 0;
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700402 nc_protect++;
Neale Rannsc5fe57d2021-02-25 16:01:28 +0000403 sa = ipsec_sa_get (p0->sa_index);
“mukeshyadav1984”430ac932017-11-23 02:39:33 -0800404 if (sa->protocol == IPSEC_PROTOCOL_ESP)
Klement Sekerabe5a5dd2018-10-09 16:05:48 +0200405 if (is_ipv6)
406 next_node_index = im->esp6_encrypt_node_index;
407 else
408 next_node_index = im->esp4_encrypt_node_index;
409 else if (is_ipv6)
410 next_node_index = im->ah6_encrypt_node_index;
“mukeshyadav1984”430ac932017-11-23 02:39:33 -0800411 else
Klement Sekerabe5a5dd2018-10-09 16:05:48 +0200412 next_node_index = im->ah4_encrypt_node_index;
Damjan Marion9c6ae5f2016-11-15 23:20:01 +0100413 vnet_buffer (b0)->ipsec.sad_index = p0->sa_index;
Neale Rannsa09c1ff2019-02-04 01:10:30 -0800414
Mohsin Kazmi68095382021-02-10 11:26:24 +0100415 if (PREDICT_FALSE (b0->flags & VNET_BUFFER_F_OFFLOAD))
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700416 {
Mohsin Kazmi36f7a6a2021-05-05 14:26:38 +0200417 vnet_buffer_oflags_t oflags = vnet_buffer (b0)->oflags;
Mohsin Kazmi68095382021-02-10 11:26:24 +0100418
419 /*
420 * Clearing offload flags before checksum is computed
421 * It guarantees the cache hit!
422 */
423 vnet_buffer_offload_flags_clear (b0, oflags);
424
425 if (is_ipv6)
Klement Sekera31da2e32018-06-24 22:49:55 +0200426 {
Mohsin Kazmi68095382021-02-10 11:26:24 +0100427 if (PREDICT_FALSE (oflags &
428 VNET_BUFFER_OFFLOAD_F_TCP_CKSUM))
429 {
430 tcp0->checksum = ip6_tcp_udp_icmp_compute_checksum (
431 vm, b0, ip6_0, &bogus);
432 }
433 if (PREDICT_FALSE (oflags &
434 VNET_BUFFER_OFFLOAD_F_UDP_CKSUM))
435 {
436 udp0->checksum = ip6_tcp_udp_icmp_compute_checksum (
437 vm, b0, ip6_0, &bogus);
438 }
Klement Sekera31da2e32018-06-24 22:49:55 +0200439 }
Mohsin Kazmi68095382021-02-10 11:26:24 +0100440 else
Klement Sekera31da2e32018-06-24 22:49:55 +0200441 {
Mohsin Kazmi68095382021-02-10 11:26:24 +0100442 if (PREDICT_FALSE (oflags &
443 VNET_BUFFER_OFFLOAD_F_IP_CKSUM))
444 {
445 ip0->checksum = ip4_header_checksum (ip0);
446 }
447 if (PREDICT_FALSE (oflags &
448 VNET_BUFFER_OFFLOAD_F_TCP_CKSUM))
449 {
450 tcp0->checksum =
451 ip4_tcp_udp_compute_checksum (vm, b0, ip0);
452 }
453 if (PREDICT_FALSE (oflags &
454 VNET_BUFFER_OFFLOAD_F_UDP_CKSUM))
455 {
456 udp0->checksum =
457 ip4_tcp_udp_compute_checksum (vm, b0, ip0);
458 }
Klement Sekera31da2e32018-06-24 22:49:55 +0200459 }
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700460 }
Klement Sekera31da2e32018-06-24 22:49:55 +0200461 vlib_buffer_advance (b0, iph_offset);
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700462 }
463 else if (p0->policy == IPSEC_POLICY_ACTION_BYPASS)
464 {
465 nc_bypass++;
Matus Fabian08a6f012016-11-15 06:08:51 -0800466 next_node_index = get_next_output_feature_node_index (b0, node);
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700467 }
468 else
469 {
470 nc_discard++;
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700471 next_node_index = im->error_drop_node_index;
472 }
Neale Rannsa09c1ff2019-02-04 01:10:30 -0800473 vlib_increment_combined_counter
474 (&ipsec_spd_policy_counters, thread_index, pi0, 1, bytes0);
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700475 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700476 else
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700477 {
Neale Rannsa09c1ff2019-02-04 01:10:30 -0800478 pi0 = ~0;
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700479 nc_nomatch++;
480 next_node_index = im->error_drop_node_index;
481 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700482
Ed Warnickecb9cada2015-12-08 15:45:58 -0700483 from += 1;
484 n_left_from -= 1;
485
Damjan Marion3f54b182016-08-16 11:27:02 +0200486 if (PREDICT_FALSE ((last_next_node_index != next_node_index) || f == 0))
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700487 {
488 /* if this is not 1st frame */
489 if (f)
490 vlib_put_frame_to_node (vm, last_next_node_index, f);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700491
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700492 last_next_node_index = next_node_index;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700493
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700494 f = vlib_get_frame_to_node (vm, next_node_index);
Kingwel Xie2fab01e2018-10-10 21:03:10 -0400495
496 /* frame->frame_flags, copy it from node */
497 /* Copy trace flag from next_frame and from runtime. */
498 f->frame_flags |= node->flags & VLIB_NODE_FLAG_TRACE;
499
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700500 to_next = vlib_frame_vector_args (f);
501 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700502
503 to_next[0] = bi0;
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700504 to_next += 1;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700505 f->n_vectors++;
506
Neale Rannsa09c1ff2019-02-04 01:10:30 -0800507 if (PREDICT_FALSE (node->flags & VLIB_NODE_FLAG_TRACE) &&
508 PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700509 {
510 ipsec_output_trace_t *tr =
511 vlib_add_trace (vm, node, b0, sizeof (*tr));
512 if (spd0)
513 tr->spd_id = spd0->id;
Neale Rannsa09c1ff2019-02-04 01:10:30 -0800514 tr->policy_id = pi0;
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700515 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700516 }
517
518 vlib_put_frame_to_node (vm, next_node_index, f);
Matus Fabian08a6f012016-11-15 06:08:51 -0800519 vlib_node_increment_counter (vm, node->node_index,
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700520 IPSEC_OUTPUT_ERROR_POLICY_PROTECT, nc_protect);
Matus Fabian08a6f012016-11-15 06:08:51 -0800521 vlib_node_increment_counter (vm, node->node_index,
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700522 IPSEC_OUTPUT_ERROR_POLICY_BYPASS, nc_bypass);
Matus Fabian08a6f012016-11-15 06:08:51 -0800523 vlib_node_increment_counter (vm, node->node_index,
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700524 IPSEC_OUTPUT_ERROR_POLICY_DISCARD, nc_discard);
Matus Fabian08a6f012016-11-15 06:08:51 -0800525 vlib_node_increment_counter (vm, node->node_index,
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700526 IPSEC_OUTPUT_ERROR_POLICY_NO_MATCH,
527 nc_nomatch);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700528 return from_frame->n_vectors;
529}
530
Klement Sekerab8f35442018-10-29 13:38:19 +0100531VLIB_NODE_FN (ipsec4_output_node) (vlib_main_t * vm,
532 vlib_node_runtime_t * node,
533 vlib_frame_t * frame)
Matus Fabian08a6f012016-11-15 06:08:51 -0800534{
535 return ipsec_output_inline (vm, node, frame, 0);
536}
537
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700538/* *INDENT-OFF* */
Klement Sekerab8f35442018-10-29 13:38:19 +0100539VLIB_REGISTER_NODE (ipsec4_output_node) = {
Pierre Pfister057b3562018-12-10 17:01:01 +0100540 .name = "ipsec4-output-feature",
Ed Warnickecb9cada2015-12-08 15:45:58 -0700541 .vector_size = sizeof (u32),
542 .format_trace = format_ipsec_output_trace,
543 .type = VLIB_NODE_TYPE_INTERNAL,
544
545 .n_errors = ARRAY_LEN(ipsec_output_error_strings),
546 .error_strings = ipsec_output_error_strings,
547
548 .n_next_nodes = IPSEC_OUTPUT_N_NEXT,
549 .next_nodes = {
550#define _(s,n) [IPSEC_OUTPUT_NEXT_##s] = n,
Ed Warnickecb9cada2015-12-08 15:45:58 -0700551 foreach_ipsec_output_next
552#undef _
553 },
554};
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700555/* *INDENT-ON* */
Damjan Marione936bbe2016-02-25 23:17:38 +0100556
Klement Sekerab8f35442018-10-29 13:38:19 +0100557VLIB_NODE_FN (ipsec6_output_node) (vlib_main_t * vm,
558 vlib_node_runtime_t * node,
559 vlib_frame_t * frame)
Matus Fabian08a6f012016-11-15 06:08:51 -0800560{
561 return ipsec_output_inline (vm, node, frame, 1);
562}
563
Klement Sekerab8f35442018-10-29 13:38:19 +0100564VLIB_REGISTER_NODE (ipsec6_output_node) = {
Pierre Pfister057b3562018-12-10 17:01:01 +0100565 .name = "ipsec6-output-feature",
Matus Fabian08a6f012016-11-15 06:08:51 -0800566 .vector_size = sizeof (u32),
567 .format_trace = format_ipsec_output_trace,
568 .type = VLIB_NODE_TYPE_INTERNAL,
569
570 .n_errors = ARRAY_LEN(ipsec_output_error_strings),
571 .error_strings = ipsec_output_error_strings,
572
573 .n_next_nodes = IPSEC_OUTPUT_N_NEXT,
574 .next_nodes = {
575#define _(s,n) [IPSEC_OUTPUT_NEXT_##s] = n,
576 foreach_ipsec_output_next
577#undef _
578 },
579};
Matus Fabian08a6f012016-11-15 06:08:51 -0800580