blob: 96c6f27fee1830fbac9a9264d4b692aaafb21ae1 [file] [log] [blame]
Ed Warnickecb9cada2015-12-08 15:45:58 -07001/*
2 * ipsec_output.c : IPSec output node
3 *
4 * Copyright (c) 2015 Cisco and/or its affiliates.
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at:
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18#include <vnet/vnet.h>
19#include <vnet/api_errno.h>
20#include <vnet/ip/ip.h>
21
22#include <vnet/ipsec/ipsec.h>
Neale Ranns918c1612019-02-21 23:34:59 -080023#include <vnet/ipsec/ipsec_io.h>
Piotr Bronowski829bff82022-05-10 14:06:29 +000024#include <vnet/ipsec/ipsec_output.h>
Ed Warnickecb9cada2015-12-08 15:45:58 -070025
Ed Warnickecb9cada2015-12-08 15:45:58 -070026#define foreach_ipsec_output_error \
27 _(RX_PKTS, "IPSec pkts received") \
28 _(POLICY_DISCARD, "IPSec policy discard") \
29 _(POLICY_NO_MATCH, "IPSec policy (no match)") \
30 _(POLICY_PROTECT, "IPSec policy protect") \
31 _(POLICY_BYPASS, "IPSec policy bypass") \
32 _(ENCAPS_FAILED, "IPSec encapsulation failed")
33
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -070034typedef enum
35{
Ed Warnickecb9cada2015-12-08 15:45:58 -070036#define _(sym,str) IPSEC_OUTPUT_ERROR_##sym,
37 foreach_ipsec_output_error
38#undef _
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -070039 IPSEC_DECAP_N_ERROR,
Ed Warnickecb9cada2015-12-08 15:45:58 -070040} ipsec_output_error_t;
41
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -070042static char *ipsec_output_error_strings[] = {
Ed Warnickecb9cada2015-12-08 15:45:58 -070043#define _(sym,string) string,
44 foreach_ipsec_output_error
45#undef _
46};
47
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -070048typedef struct
49{
Ed Warnickecb9cada2015-12-08 15:45:58 -070050 u32 spd_id;
Neale Rannsa09c1ff2019-02-04 01:10:30 -080051 u32 policy_id;
Ed Warnickecb9cada2015-12-08 15:45:58 -070052} ipsec_output_trace_t;
53
54/* packet trace format function */
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -070055static u8 *
56format_ipsec_output_trace (u8 * s, va_list * args)
Ed Warnickecb9cada2015-12-08 15:45:58 -070057{
58 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
59 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -070060 ipsec_output_trace_t *t = va_arg (*args, ipsec_output_trace_t *);
Ed Warnickecb9cada2015-12-08 15:45:58 -070061
Neale Rannsa09c1ff2019-02-04 01:10:30 -080062 s = format (s, "spd %u policy %d", t->spd_id, t->policy_id);
63
Ed Warnickecb9cada2015-12-08 15:45:58 -070064 return s;
65}
66
Ed Warnickecb9cada2015-12-08 15:45:58 -070067always_inline uword
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -070068ip6_addr_match_range (ip6_address_t * a, ip6_address_t * la,
69 ip6_address_t * ua)
Ed Warnickecb9cada2015-12-08 15:45:58 -070070{
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -070071 if ((memcmp (a->as_u64, la->as_u64, 2 * sizeof (u64)) >= 0) &&
72 (memcmp (a->as_u64, ua->as_u64, 2 * sizeof (u64)) <= 0))
Ed Warnickecb9cada2015-12-08 15:45:58 -070073 return 1;
74 return 0;
75}
76
Piotr Bronowskie1dce372022-05-10 14:06:29 +000077always_inline void
78ipsec_fp_5tuple_from_ip6_range (ipsec_fp_5tuple_t *tuple, ip6_address_t *la,
79 ip6_address_t *ra, u16 lp, u16 rp, u8 pr)
80
81{
82 clib_memcpy_fast (&tuple->ip6_laddr, la, sizeof (ip6_address_t));
83 clib_memcpy_fast (&tuple->ip6_laddr, la, sizeof (ip6_address_t));
84
85 tuple->lport = lp;
86 tuple->rport = rp;
87 tuple->protocol = pr;
88 tuple->is_ipv6 = 1;
89}
90
Ed Warnickecb9cada2015-12-08 15:45:58 -070091always_inline ipsec_policy_t *
Klement Sekerabe5a5dd2018-10-09 16:05:48 +020092ipsec6_output_policy_match (ipsec_spd_t * spd,
93 ip6_address_t * la,
94 ip6_address_t * ra, u16 lp, u16 rp, u8 pr)
Ed Warnickecb9cada2015-12-08 15:45:58 -070095{
Neale Rannsa09c1ff2019-02-04 01:10:30 -080096 ipsec_main_t *im = &ipsec_main;
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -070097 ipsec_policy_t *p;
Piotr Bronowskie1dce372022-05-10 14:06:29 +000098 ipsec_policy_t *policies[1];
99 ipsec_fp_5tuple_t tuples[1];
100 u32 fp_policy_ids[1];
Piotr Bronowski829bff82022-05-10 14:06:29 +0000101
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700102 u32 *i;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700103
Damjan Marion3f54b182016-08-16 11:27:02 +0200104 if (!spd)
105 return 0;
106
Piotr Bronowskie1dce372022-05-10 14:06:29 +0000107 ipsec_fp_5tuple_from_ip6_range (&tuples[0], la, ra, lp, rp, pr);
108 if (im->fp_spd_is_enabled &&
109 (0 == ipsec_fp_out_policy_match_n (&spd->fp_spd, 1, tuples, policies,
110 fp_policy_ids, 1)))
111 {
112 p = policies[0];
113 i = fp_policy_ids;
114 }
115
Neale Rannsa09c1ff2019-02-04 01:10:30 -0800116 vec_foreach (i, spd->policies[IPSEC_SPD_POLICY_IP6_OUTBOUND])
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700117 {
Neale Rannsa09c1ff2019-02-04 01:10:30 -0800118 p = pool_elt_at_index (im->policies, *i);
Piotr Bronowski815c6a42022-06-09 09:09:28 +0000119 if (PREDICT_FALSE ((p->protocol != IPSEC_POLICY_PROTOCOL_ANY) &&
120 (p->protocol != pr)))
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700121 continue;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700122
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700123 if (!ip6_addr_match_range (ra, &p->raddr.start.ip6, &p->raddr.stop.ip6))
124 continue;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700125
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700126 if (!ip6_addr_match_range (la, &p->laddr.start.ip6, &p->laddr.stop.ip6))
127 continue;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700128
Marco Varlese191a5942017-10-30 18:17:21 +0100129 if (PREDICT_FALSE
130 ((pr != IP_PROTOCOL_TCP) && (pr != IP_PROTOCOL_UDP)
131 && (pr != IP_PROTOCOL_SCTP)))
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700132 return p;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700133
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700134 if (lp < p->lport.start)
135 continue;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700136
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700137 if (lp > p->lport.stop)
138 continue;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700139
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700140 if (rp < p->rport.start)
141 continue;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700142
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700143 if (rp > p->rport.stop)
144 continue;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700145
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700146 return p;
147 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700148
149 return 0;
150}
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700151
Matus Fabian08a6f012016-11-15 06:08:51 -0800152static inline uword
153ipsec_output_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
154 vlib_frame_t * from_frame, int is_ipv6)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700155{
156 ipsec_main_t *im = &ipsec_main;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700157
Neale Rannsa09c1ff2019-02-04 01:10:30 -0800158 u32 *from, *to_next = 0, thread_index;
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700159 u32 n_left_from, sw_if_index0, last_sw_if_index = (u32) ~ 0;
160 u32 next_node_index = (u32) ~ 0, last_next_node_index = (u32) ~ 0;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700161 vlib_frame_t *f = 0;
162 u32 spd_index0 = ~0;
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700163 ipsec_spd_t *spd0 = 0;
Klement Sekera31da2e32018-06-24 22:49:55 +0200164 int bogus;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700165 u64 nc_protect = 0, nc_bypass = 0, nc_discard = 0, nc_nomatch = 0;
Zachary Leaf7cd35f52021-06-25 08:11:15 -0500166 u8 flow_cache_enabled = im->output_flow_cache_flag;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700167
168 from = vlib_frame_vector_args (from_frame);
169 n_left_from = from_frame->n_vectors;
Neale Rannsa09c1ff2019-02-04 01:10:30 -0800170 thread_index = vm->thread_index;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700171
172 while (n_left_from > 0)
173 {
Zhiyong Yangf9221162019-04-22 00:18:38 -0400174 u32 bi0, pi0, bi1;
175 vlib_buffer_t *b0, *b1;
Govindarajan Mohandoss6d7dfcb2021-03-19 19:20:49 +0000176 ipsec_policy_t *p0 = NULL;
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700177 ip4_header_t *ip0;
178 ip6_header_t *ip6_0 = 0;
179 udp_header_t *udp0;
Florin Corasfb28e9a2016-09-06 15:18:21 +0200180 u32 iph_offset = 0;
Klement Sekera31da2e32018-06-24 22:49:55 +0200181 tcp_header_t *tcp0;
Neale Rannsa09c1ff2019-02-04 01:10:30 -0800182 u64 bytes0;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700183
184 bi0 = from[0];
185 b0 = vlib_get_buffer (vm, bi0);
Vratko Polakea6a34c2019-04-23 15:52:28 +0200186 if (n_left_from > 1)
187 {
188 bi1 = from[1];
189 b1 = vlib_get_buffer (vm, bi1);
190 CLIB_PREFETCH (b1, CLIB_CACHE_LINE_BYTES * 2, STORE);
191 vlib_prefetch_buffer_data (b1, LOAD);
192 }
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700193 sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_TX];
Florin Corasfb28e9a2016-09-06 15:18:21 +0200194 iph_offset = vnet_buffer (b0)->ip.save_rewrite_length;
195 ip0 = (ip4_header_t *) ((u8 *) vlib_buffer_get_current (b0)
196 + iph_offset);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700197
Ed Warnickecb9cada2015-12-08 15:45:58 -0700198 /* lookup for SPD only if sw_if_index is changed */
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700199 if (PREDICT_FALSE (last_sw_if_index != sw_if_index0))
200 {
201 uword *p = hash_get (im->spd_index_by_sw_if_index, sw_if_index0);
Dave Barach47d41ad2020-02-17 09:13:26 -0500202 ALWAYS_ASSERT (p);
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700203 spd_index0 = p[0];
204 spd0 = pool_elt_at_index (im->spds, spd_index0);
205 last_sw_if_index = sw_if_index0;
206 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700207
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700208 if (is_ipv6)
209 {
Matus Fabian08a6f012016-11-15 06:08:51 -0800210 ip6_0 = (ip6_header_t *) ((u8 *) vlib_buffer_get_current (b0)
211 + iph_offset);
212
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700213 udp0 = ip6_next_header (ip6_0);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700214#if 0
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700215 clib_warning
216 ("packet received from %U port %u to %U port %u spd_id %u",
217 format_ip6_address, &ip6_0->src_address,
218 clib_net_to_host_u16 (udp0->src_port), format_ip6_address,
219 &ip6_0->dst_address, clib_net_to_host_u16 (udp0->dst_port),
220 spd0->id);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700221#endif
222
Klement Sekerabe5a5dd2018-10-09 16:05:48 +0200223 p0 = ipsec6_output_policy_match (spd0,
224 &ip6_0->src_address,
225 &ip6_0->dst_address,
Neale Rannsa4d24312019-07-10 13:46:21 +0000226 clib_net_to_host_u16
227 (udp0->src_port),
228 clib_net_to_host_u16
229 (udp0->dst_port), ip6_0->protocol);
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700230 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700231 else
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700232 {
233 udp0 = (udp_header_t *) ((u8 *) ip0 + ip4_header_bytes (ip0));
Ed Warnickecb9cada2015-12-08 15:45:58 -0700234
235#if 0
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700236 clib_warning ("packet received from %U to %U port %u",
237 format_ip4_address, ip0->src_address.as_u8,
238 format_ip4_address, ip0->dst_address.as_u8,
239 clib_net_to_host_u16 (udp0->dst_port));
240 clib_warning ("sw_if_index0 %u spd_index0 %u spd_id %u",
241 sw_if_index0, spd_index0, spd0->id);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700242#endif
243
Govindarajan Mohandoss6d7dfcb2021-03-19 19:20:49 +0000244 /*
245 * Check whether flow cache is enabled.
246 */
247 if (flow_cache_enabled)
248 {
249 p0 = ipsec4_out_spd_find_flow_cache_entry (
250 im, ip0->protocol, ip0->src_address.as_u32,
251 ip0->dst_address.as_u32, udp0->src_port, udp0->dst_port);
252 }
253
254 /* Fall back to linear search if flow cache lookup fails */
255 if (p0 == NULL)
256 {
257 p0 = ipsec_output_policy_match (
258 spd0, ip0->protocol,
259 clib_net_to_host_u32 (ip0->src_address.as_u32),
260 clib_net_to_host_u32 (ip0->dst_address.as_u32),
261 clib_net_to_host_u16 (udp0->src_port),
262 clib_net_to_host_u16 (udp0->dst_port), flow_cache_enabled);
263 }
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700264 }
Klement Sekera31da2e32018-06-24 22:49:55 +0200265 tcp0 = (void *) udp0;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700266
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700267 if (PREDICT_TRUE (p0 != NULL))
268 {
Neale Rannsa09c1ff2019-02-04 01:10:30 -0800269 pi0 = p0 - im->policies;
270
271 vlib_prefetch_combined_counter (&ipsec_spd_policy_counters,
272 thread_index, pi0);
273
274 if (is_ipv6)
275 {
276 bytes0 = clib_net_to_host_u16 (ip6_0->payload_length);
277 bytes0 += sizeof (ip6_header_t);
278 }
279 else
280 {
281 bytes0 = clib_net_to_host_u16 (ip0->length);
282 }
283
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700284 if (p0->policy == IPSEC_POLICY_ACTION_PROTECT)
285 {
“mukeshyadav1984”430ac932017-11-23 02:39:33 -0800286 ipsec_sa_t *sa = 0;
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700287 nc_protect++;
Neale Rannsc5fe57d2021-02-25 16:01:28 +0000288 sa = ipsec_sa_get (p0->sa_index);
“mukeshyadav1984”430ac932017-11-23 02:39:33 -0800289 if (sa->protocol == IPSEC_PROTOCOL_ESP)
Klement Sekerabe5a5dd2018-10-09 16:05:48 +0200290 if (is_ipv6)
291 next_node_index = im->esp6_encrypt_node_index;
292 else
293 next_node_index = im->esp4_encrypt_node_index;
294 else if (is_ipv6)
295 next_node_index = im->ah6_encrypt_node_index;
“mukeshyadav1984”430ac932017-11-23 02:39:33 -0800296 else
Klement Sekerabe5a5dd2018-10-09 16:05:48 +0200297 next_node_index = im->ah4_encrypt_node_index;
Damjan Marion9c6ae5f2016-11-15 23:20:01 +0100298 vnet_buffer (b0)->ipsec.sad_index = p0->sa_index;
Neale Rannsa09c1ff2019-02-04 01:10:30 -0800299
Mohsin Kazmi68095382021-02-10 11:26:24 +0100300 if (PREDICT_FALSE (b0->flags & VNET_BUFFER_F_OFFLOAD))
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700301 {
Mohsin Kazmi36f7a6a2021-05-05 14:26:38 +0200302 vnet_buffer_oflags_t oflags = vnet_buffer (b0)->oflags;
Mohsin Kazmi68095382021-02-10 11:26:24 +0100303
304 /*
305 * Clearing offload flags before checksum is computed
306 * It guarantees the cache hit!
307 */
308 vnet_buffer_offload_flags_clear (b0, oflags);
309
310 if (is_ipv6)
Klement Sekera31da2e32018-06-24 22:49:55 +0200311 {
Mohsin Kazmi68095382021-02-10 11:26:24 +0100312 if (PREDICT_FALSE (oflags &
313 VNET_BUFFER_OFFLOAD_F_TCP_CKSUM))
314 {
315 tcp0->checksum = ip6_tcp_udp_icmp_compute_checksum (
316 vm, b0, ip6_0, &bogus);
317 }
318 if (PREDICT_FALSE (oflags &
319 VNET_BUFFER_OFFLOAD_F_UDP_CKSUM))
320 {
321 udp0->checksum = ip6_tcp_udp_icmp_compute_checksum (
322 vm, b0, ip6_0, &bogus);
323 }
Klement Sekera31da2e32018-06-24 22:49:55 +0200324 }
Mohsin Kazmi68095382021-02-10 11:26:24 +0100325 else
Klement Sekera31da2e32018-06-24 22:49:55 +0200326 {
Mohsin Kazmi68095382021-02-10 11:26:24 +0100327 if (PREDICT_FALSE (oflags &
328 VNET_BUFFER_OFFLOAD_F_IP_CKSUM))
329 {
330 ip0->checksum = ip4_header_checksum (ip0);
331 }
332 if (PREDICT_FALSE (oflags &
333 VNET_BUFFER_OFFLOAD_F_TCP_CKSUM))
334 {
335 tcp0->checksum =
336 ip4_tcp_udp_compute_checksum (vm, b0, ip0);
337 }
338 if (PREDICT_FALSE (oflags &
339 VNET_BUFFER_OFFLOAD_F_UDP_CKSUM))
340 {
341 udp0->checksum =
342 ip4_tcp_udp_compute_checksum (vm, b0, ip0);
343 }
Klement Sekera31da2e32018-06-24 22:49:55 +0200344 }
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700345 }
Klement Sekera31da2e32018-06-24 22:49:55 +0200346 vlib_buffer_advance (b0, iph_offset);
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700347 }
348 else if (p0->policy == IPSEC_POLICY_ACTION_BYPASS)
349 {
350 nc_bypass++;
Matus Fabian08a6f012016-11-15 06:08:51 -0800351 next_node_index = get_next_output_feature_node_index (b0, node);
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700352 }
353 else
354 {
355 nc_discard++;
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700356 next_node_index = im->error_drop_node_index;
357 }
Neale Rannsa09c1ff2019-02-04 01:10:30 -0800358 vlib_increment_combined_counter
359 (&ipsec_spd_policy_counters, thread_index, pi0, 1, bytes0);
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700360 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700361 else
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700362 {
Neale Rannsa09c1ff2019-02-04 01:10:30 -0800363 pi0 = ~0;
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700364 nc_nomatch++;
365 next_node_index = im->error_drop_node_index;
366 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700367
Ed Warnickecb9cada2015-12-08 15:45:58 -0700368 from += 1;
369 n_left_from -= 1;
370
Damjan Marion3f54b182016-08-16 11:27:02 +0200371 if (PREDICT_FALSE ((last_next_node_index != next_node_index) || f == 0))
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700372 {
373 /* if this is not 1st frame */
374 if (f)
375 vlib_put_frame_to_node (vm, last_next_node_index, f);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700376
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700377 last_next_node_index = next_node_index;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700378
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700379 f = vlib_get_frame_to_node (vm, next_node_index);
Kingwel Xie2fab01e2018-10-10 21:03:10 -0400380
381 /* frame->frame_flags, copy it from node */
382 /* Copy trace flag from next_frame and from runtime. */
383 f->frame_flags |= node->flags & VLIB_NODE_FLAG_TRACE;
384
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700385 to_next = vlib_frame_vector_args (f);
386 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700387
388 to_next[0] = bi0;
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700389 to_next += 1;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700390 f->n_vectors++;
391
Neale Rannsa09c1ff2019-02-04 01:10:30 -0800392 if (PREDICT_FALSE (node->flags & VLIB_NODE_FLAG_TRACE) &&
393 PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700394 {
395 ipsec_output_trace_t *tr =
396 vlib_add_trace (vm, node, b0, sizeof (*tr));
397 if (spd0)
398 tr->spd_id = spd0->id;
Neale Rannsa09c1ff2019-02-04 01:10:30 -0800399 tr->policy_id = pi0;
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700400 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700401 }
402
403 vlib_put_frame_to_node (vm, next_node_index, f);
Matus Fabian08a6f012016-11-15 06:08:51 -0800404 vlib_node_increment_counter (vm, node->node_index,
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700405 IPSEC_OUTPUT_ERROR_POLICY_PROTECT, nc_protect);
Matus Fabian08a6f012016-11-15 06:08:51 -0800406 vlib_node_increment_counter (vm, node->node_index,
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700407 IPSEC_OUTPUT_ERROR_POLICY_BYPASS, nc_bypass);
Matus Fabian08a6f012016-11-15 06:08:51 -0800408 vlib_node_increment_counter (vm, node->node_index,
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700409 IPSEC_OUTPUT_ERROR_POLICY_DISCARD, nc_discard);
Matus Fabian08a6f012016-11-15 06:08:51 -0800410 vlib_node_increment_counter (vm, node->node_index,
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700411 IPSEC_OUTPUT_ERROR_POLICY_NO_MATCH,
412 nc_nomatch);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700413 return from_frame->n_vectors;
414}
415
Klement Sekerab8f35442018-10-29 13:38:19 +0100416VLIB_NODE_FN (ipsec4_output_node) (vlib_main_t * vm,
417 vlib_node_runtime_t * node,
418 vlib_frame_t * frame)
Matus Fabian08a6f012016-11-15 06:08:51 -0800419{
420 return ipsec_output_inline (vm, node, frame, 0);
421}
422
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700423/* *INDENT-OFF* */
Klement Sekerab8f35442018-10-29 13:38:19 +0100424VLIB_REGISTER_NODE (ipsec4_output_node) = {
Pierre Pfister057b3562018-12-10 17:01:01 +0100425 .name = "ipsec4-output-feature",
Ed Warnickecb9cada2015-12-08 15:45:58 -0700426 .vector_size = sizeof (u32),
427 .format_trace = format_ipsec_output_trace,
428 .type = VLIB_NODE_TYPE_INTERNAL,
429
430 .n_errors = ARRAY_LEN(ipsec_output_error_strings),
431 .error_strings = ipsec_output_error_strings,
432
433 .n_next_nodes = IPSEC_OUTPUT_N_NEXT,
434 .next_nodes = {
435#define _(s,n) [IPSEC_OUTPUT_NEXT_##s] = n,
Ed Warnickecb9cada2015-12-08 15:45:58 -0700436 foreach_ipsec_output_next
437#undef _
438 },
439};
Keith Burns (alagalah)166a9d42016-08-06 11:00:56 -0700440/* *INDENT-ON* */
Damjan Marione936bbe2016-02-25 23:17:38 +0100441
Klement Sekerab8f35442018-10-29 13:38:19 +0100442VLIB_NODE_FN (ipsec6_output_node) (vlib_main_t * vm,
443 vlib_node_runtime_t * node,
444 vlib_frame_t * frame)
Matus Fabian08a6f012016-11-15 06:08:51 -0800445{
446 return ipsec_output_inline (vm, node, frame, 1);
447}
448
Klement Sekerab8f35442018-10-29 13:38:19 +0100449VLIB_REGISTER_NODE (ipsec6_output_node) = {
Pierre Pfister057b3562018-12-10 17:01:01 +0100450 .name = "ipsec6-output-feature",
Matus Fabian08a6f012016-11-15 06:08:51 -0800451 .vector_size = sizeof (u32),
452 .format_trace = format_ipsec_output_trace,
453 .type = VLIB_NODE_TYPE_INTERNAL,
454
455 .n_errors = ARRAY_LEN(ipsec_output_error_strings),
456 .error_strings = ipsec_output_error_strings,
457
458 .n_next_nodes = IPSEC_OUTPUT_N_NEXT,
459 .next_nodes = {
460#define _(s,n) [IPSEC_OUTPUT_NEXT_##s] = n,
461 foreach_ipsec_output_next
462#undef _
463 },
464};
Matus Fabian08a6f012016-11-15 06:08:51 -0800465