blob: 1239ed1f192f41f945753bbd4bc030b73c893dc4 [file] [log] [blame]
Piotr Bronowski829bff82022-05-10 14:06:29 +00001/*
2 *------------------------------------------------------------------
3 * Copyright (c) 2021 Intel and/or its affiliates.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 *------------------------------------------------------------------
16 */
17
18#ifndef IPSEC_OUTPUT_H
19#define IPSEC_OUTPUT_H
20
21#include <vppinfra/types.h>
22#include <vnet/ipsec/ipsec_spd.h>
Piotr Bronowskie1dce372022-05-10 14:06:29 +000023#include <vnet/ipsec/ipsec_spd_fp_lookup.h>
Piotr Bronowski829bff82022-05-10 14:06:29 +000024
25always_inline void
26ipsec4_out_spd_add_flow_cache_entry (ipsec_main_t *im, u8 pr, u32 la, u32 ra,
27 u16 lp, u16 rp, u32 pol_id)
28{
29 u64 hash;
30 u8 overwrite = 0, stale_overwrite = 0;
31 ipsec4_spd_5tuple_t ip4_5tuple = { .ip4_addr = { (ip4_address_t) la,
32 (ip4_address_t) ra },
33 .port = { lp, rp },
34 .proto = pr };
35
36 ip4_5tuple.kv_16_8.value = (((u64) pol_id) << 32) | ((u64) im->epoch_count);
37
38 hash = ipsec4_hash_16_8 (&ip4_5tuple.kv_16_8);
39 hash &= (im->ipsec4_out_spd_hash_num_buckets - 1);
40
41 ipsec_spinlock_lock (&im->ipsec4_out_spd_hash_tbl[hash].bucket_lock);
42 /* Check if we are overwriting an existing entry so we know
43 whether to increment the flow cache counter. Since flow
44 cache counter is reset on any policy add/remove, but
45 hash table values are not, we also need to check if the entry
46 we are overwriting is stale or not. If it's a stale entry
47 overwrite, we still want to increment flow cache counter */
48 overwrite = (im->ipsec4_out_spd_hash_tbl[hash].value != 0);
49 /* Check for stale entry by comparing with current epoch count */
50 if (PREDICT_FALSE (overwrite))
51 stale_overwrite =
52 (im->epoch_count !=
53 ((u32) (im->ipsec4_out_spd_hash_tbl[hash].value & 0xFFFFFFFF)));
54 clib_memcpy_fast (&im->ipsec4_out_spd_hash_tbl[hash], &ip4_5tuple.kv_16_8,
55 sizeof (ip4_5tuple.kv_16_8));
56 ipsec_spinlock_unlock (&im->ipsec4_out_spd_hash_tbl[hash].bucket_lock);
57
58 /* Increment the counter to track active flow cache entries
59 when entering a fresh entry or overwriting a stale one */
60 if (!overwrite || stale_overwrite)
61 clib_atomic_fetch_add_relax (&im->ipsec4_out_spd_flow_cache_entries, 1);
62
63 return;
64}
65
Piotr Bronowskie1dce372022-05-10 14:06:29 +000066always_inline void
67ipsec4_out_spd_add_flow_cache_entry_n (ipsec_main_t *im,
68 ipsec4_spd_5tuple_t *ip4_5tuple,
69 u32 pol_id)
70{
71 u64 hash;
72 u8 overwrite = 0, stale_overwrite = 0;
73
74 ip4_5tuple->kv_16_8.value = (((u64) pol_id) << 32) | ((u64) im->epoch_count);
75
76 hash = ipsec4_hash_16_8 (&ip4_5tuple->kv_16_8);
77 hash &= (im->ipsec4_out_spd_hash_num_buckets - 1);
78
79 ipsec_spinlock_lock (&im->ipsec4_out_spd_hash_tbl[hash].bucket_lock);
80 /* Check if we are overwriting an existing entry so we know
81 whether to increment the flow cache counter. Since flow
82 cache counter is reset on any policy add/remove, but
83 hash table values are not, we also need to check if the entry
84 we are overwriting is stale or not. If it's a stale entry
85 overwrite, we still want to increment flow cache counter */
86 overwrite = (im->ipsec4_out_spd_hash_tbl[hash].value != 0);
87 /* Check for stale entry by comparing with current epoch count */
88 if (PREDICT_FALSE (overwrite))
89 stale_overwrite =
90 (im->epoch_count !=
91 ((u32) (im->ipsec4_out_spd_hash_tbl[hash].value & 0xFFFFFFFF)));
92 clib_memcpy_fast (&im->ipsec4_out_spd_hash_tbl[hash], &ip4_5tuple->kv_16_8,
93 sizeof (ip4_5tuple->kv_16_8));
94 ipsec_spinlock_unlock (&im->ipsec4_out_spd_hash_tbl[hash].bucket_lock);
95
96 /* Increment the counter to track active flow cache entries
97 when entering a fresh entry or overwriting a stale one */
98 if (!overwrite || stale_overwrite)
99 clib_atomic_fetch_add_relax (&im->ipsec4_out_spd_flow_cache_entries, 1);
100
101 return;
102}
103
104always_inline void
105ipsec_fp_5tuple_from_ip4_range (ipsec_fp_5tuple_t *tuple, u32 la, u32 ra,
106 u16 lp, u16 rp, u8 pr)
107{
108 clib_memset (tuple->l3_zero_pad, 0, sizeof (tuple->l3_zero_pad));
109 tuple->laddr.as_u32 = clib_host_to_net_u32 (la);
110 tuple->raddr.as_u32 = clib_host_to_net_u32 (ra);
111
112 if (PREDICT_FALSE ((pr != IP_PROTOCOL_TCP) && (pr != IP_PROTOCOL_UDP) &&
113 (pr != IP_PROTOCOL_SCTP)))
114 {
115 tuple->lport = 0;
116 tuple->rport = 0;
117 }
118 else
119 {
120 tuple->lport = lp;
121 tuple->rport = rp;
122 }
123
124 tuple->protocol = pr;
125 tuple->is_ipv6 = 0;
126}
127
128always_inline void
129ipsec_fp_5tuple_from_ip4_range_n (ipsec_fp_5tuple_t *tuples,
130 ipsec4_spd_5tuple_t *ip4_5tuple, u32 n)
131{
132 u32 n_left = n;
133 ipsec_fp_5tuple_t *tuple = tuples;
134
135 while (n_left)
136 {
137 clib_memset (tuple->l3_zero_pad, 0, sizeof (tuple->l3_zero_pad));
138 tuple->laddr.as_u32 =
139 clib_host_to_net_u32 (ip4_5tuple->ip4_addr[0].as_u32);
140 tuple->raddr.as_u32 =
141 clib_host_to_net_u32 (ip4_5tuple->ip4_addr[1].as_u32);
142 if (PREDICT_FALSE ((ip4_5tuple->proto != IP_PROTOCOL_TCP) &&
143 (ip4_5tuple->proto != IP_PROTOCOL_UDP) &&
144 (ip4_5tuple->proto != IP_PROTOCOL_SCTP)))
145 {
146 tuple->lport = 0;
147 tuple->rport = 0;
148 }
149 else
150 {
151 tuple->lport = ip4_5tuple->port[0];
152 tuple->rport = ip4_5tuple->port[1];
153 }
154 tuple->protocol = ip4_5tuple->proto;
155 tuple->is_ipv6 = 0;
156 n_left--;
157 tuple++;
158 }
159}
160
161always_inline int
162ipsec_output_policy_match_n (ipsec_spd_t *spd,
163 ipsec4_spd_5tuple_t *ip4_5tuples,
164 ipsec_policy_t **policies, u32 n,
165 u8 flow_cache_enabled)
166{
167 ipsec_main_t *im = &ipsec_main;
168 ipsec_policy_t *p;
169 ipsec_policy_t **pp = policies;
170 u32 n_left = n;
171 ipsec4_spd_5tuple_t *ip4_5tuple = ip4_5tuples;
172 u32 policy_ids[n], *policy_id = policy_ids;
173 ipsec_fp_5tuple_t tuples[n];
174 u32 *i;
175 u32 counter = 0;
176
177 if (!spd)
178 return 0;
179
180 clib_memset (policies, 0, n * sizeof (ipsec_policy_t *));
181
Piotr Bronowski86f82082022-07-08 12:45:05 +0000182 if (im->ipv4_fp_spd_is_enabled)
Piotr Bronowskie1dce372022-05-10 14:06:29 +0000183 {
184 ipsec_fp_5tuple_from_ip4_range_n (tuples, ip4_5tuples, n);
185 counter += ipsec_fp_out_policy_match_n (&spd->fp_spd, 0, tuples,
186 policies, policy_ids, n);
187 }
188
189 while (n_left)
190 {
191 if (*pp != 0)
192 goto next;
193
194 vec_foreach (i, spd->policies[IPSEC_SPD_POLICY_IP4_OUTBOUND])
195 {
196 p = pool_elt_at_index (im->policies, *i);
197 if (PREDICT_FALSE (p->protocol &&
198 (p->protocol != ip4_5tuple->proto)))
199 continue;
200
201 if (ip4_5tuple->ip4_addr[0].as_u32 <
202 clib_net_to_host_u32 (p->raddr.start.ip4.as_u32))
203 continue;
204
205 if (ip4_5tuple->ip4_addr[1].as_u32 >
206 clib_net_to_host_u32 (p->raddr.stop.ip4.as_u32))
207 continue;
208
209 if (ip4_5tuple->ip4_addr[0].as_u32 <
210 clib_net_to_host_u32 (p->laddr.start.ip4.as_u32))
211 continue;
212
213 if (ip4_5tuple->ip4_addr[1].as_u32 >
214 clib_net_to_host_u32 (p->laddr.stop.ip4.as_u32))
215 continue;
216
217 if (PREDICT_FALSE ((ip4_5tuple->proto != IP_PROTOCOL_TCP) &&
218 (ip4_5tuple->proto != IP_PROTOCOL_UDP) &&
219 (ip4_5tuple->proto != IP_PROTOCOL_SCTP)))
220 {
221 ip4_5tuple->port[0] = 0;
222 ip4_5tuple->port[1] = 0;
223 goto add_policy;
224 }
225
226 if (ip4_5tuple->port[0] < p->lport.start)
227 continue;
228
229 if (ip4_5tuple->port[0] > p->lport.stop)
230 continue;
231
232 if (ip4_5tuple->port[1] < p->rport.start)
233 continue;
234
235 if (ip4_5tuple->port[1] > p->rport.stop)
236 continue;
237
238 add_policy:
239 *pp = p;
240 *policy_id = *i;
241 counter++;
242 break;
243 }
244
245 next:
246 n_left--;
247 pp++;
248 ip4_5tuple++;
249 policy_id++;
250 }
251
252 if (flow_cache_enabled)
253 {
254 n_left = n;
255 policy_id = policy_ids;
256 ip4_5tuple = ip4_5tuples;
257 pp = policies;
258
259 while (n_left)
260 {
261 if (*pp != NULL)
262 {
263 /* Add an Entry in Flow cache */
264 ipsec4_out_spd_add_flow_cache_entry_n (im, ip4_5tuple,
265 *policy_id);
266 }
267
268 n_left--;
269 policy_id++;
270 ip4_5tuple++;
271 pp++;
272 }
273 }
274
275 return counter;
276}
277
Piotr Bronowski829bff82022-05-10 14:06:29 +0000278always_inline ipsec_policy_t *
279ipsec4_out_spd_find_flow_cache_entry (ipsec_main_t *im, u8 pr, u32 la, u32 ra,
280 u16 lp, u16 rp)
281{
282 ipsec_policy_t *p = NULL;
283 ipsec4_hash_kv_16_8_t kv_result;
284 u64 hash;
285
286 if (PREDICT_FALSE ((pr != IP_PROTOCOL_TCP) && (pr != IP_PROTOCOL_UDP) &&
287 (pr != IP_PROTOCOL_SCTP)))
288 {
289 lp = 0;
290 rp = 0;
291 }
292 ipsec4_spd_5tuple_t ip4_5tuple = { .ip4_addr = { (ip4_address_t) la,
293 (ip4_address_t) ra },
294 .port = { lp, rp },
295 .proto = pr };
296
297 hash = ipsec4_hash_16_8 (&ip4_5tuple.kv_16_8);
298 hash &= (im->ipsec4_out_spd_hash_num_buckets - 1);
299
300 ipsec_spinlock_lock (&im->ipsec4_out_spd_hash_tbl[hash].bucket_lock);
301 kv_result = im->ipsec4_out_spd_hash_tbl[hash];
302 ipsec_spinlock_unlock (&im->ipsec4_out_spd_hash_tbl[hash].bucket_lock);
303
304 if (ipsec4_hash_key_compare_16_8 ((u64 *) &ip4_5tuple.kv_16_8,
305 (u64 *) &kv_result))
306 {
307 if (im->epoch_count == ((u32) (kv_result.value & 0xFFFFFFFF)))
308 {
309 /* Get the policy based on the index */
310 p =
311 pool_elt_at_index (im->policies, ((u32) (kv_result.value >> 32)));
312 }
313 }
314
315 return p;
316}
317
318always_inline ipsec_policy_t *
319ipsec_output_policy_match (ipsec_spd_t *spd, u8 pr, u32 la, u32 ra, u16 lp,
320 u16 rp, u8 flow_cache_enabled)
321{
322 ipsec_main_t *im = &ipsec_main;
323 ipsec_policy_t *p;
Piotr Bronowskie1dce372022-05-10 14:06:29 +0000324 ipsec_policy_t *policies[1];
325 ipsec_fp_5tuple_t tuples[1];
326 u32 fp_policy_ids[1];
327
Piotr Bronowski829bff82022-05-10 14:06:29 +0000328 u32 *i;
329
330 if (!spd)
331 return 0;
332
Piotr Bronowski86f82082022-07-08 12:45:05 +0000333 if (im->ipv4_fp_spd_is_enabled)
Piotr Bronowskie1dce372022-05-10 14:06:29 +0000334 {
Piotr Bronowski86f82082022-07-08 12:45:05 +0000335 ipsec_fp_5tuple_from_ip4_range (&tuples[0], la, ra, lp, rp, pr);
336 ipsec_fp_out_policy_match_n (&spd->fp_spd, 0, tuples, policies,
337 fp_policy_ids, 1);
Piotr Bronowskie1dce372022-05-10 14:06:29 +0000338 p = policies[0];
339 i = fp_policy_ids;
340 if (PREDICT_FALSE ((pr != IP_PROTOCOL_TCP) && (pr != IP_PROTOCOL_UDP) &&
341 (pr != IP_PROTOCOL_SCTP)))
342 {
343 lp = 0;
344 rp = 0;
345 }
346 goto add_flow_cache;
347 }
348
Piotr Bronowski829bff82022-05-10 14:06:29 +0000349 vec_foreach (i, spd->policies[IPSEC_SPD_POLICY_IP4_OUTBOUND])
350 {
351 p = pool_elt_at_index (im->policies, *i);
352 if (PREDICT_FALSE ((p->protocol != IPSEC_POLICY_PROTOCOL_ANY) &&
353 (p->protocol != pr)))
354 continue;
355
356 if (ra < clib_net_to_host_u32 (p->raddr.start.ip4.as_u32))
357 continue;
358
359 if (ra > clib_net_to_host_u32 (p->raddr.stop.ip4.as_u32))
360 continue;
361
362 if (la < clib_net_to_host_u32 (p->laddr.start.ip4.as_u32))
363 continue;
364
365 if (la > clib_net_to_host_u32 (p->laddr.stop.ip4.as_u32))
366 continue;
367
368 if (PREDICT_FALSE ((pr != IP_PROTOCOL_TCP) && (pr != IP_PROTOCOL_UDP) &&
369 (pr != IP_PROTOCOL_SCTP)))
370 {
371 lp = 0;
372 rp = 0;
373 goto add_flow_cache;
374 }
375
376 if (lp < p->lport.start)
377 continue;
378
379 if (lp > p->lport.stop)
380 continue;
381
382 if (rp < p->rport.start)
383 continue;
384
385 if (rp > p->rport.stop)
386 continue;
387
388 add_flow_cache:
389 if (flow_cache_enabled)
390 {
391 /* Add an Entry in Flow cache */
392 ipsec4_out_spd_add_flow_cache_entry (
393 im, pr, clib_host_to_net_u32 (la), clib_host_to_net_u32 (ra),
394 clib_host_to_net_u16 (lp), clib_host_to_net_u16 (rp), *i);
395 }
396
397 return p;
398 }
399 return 0;
400}
401
Piotr Bronowski86f82082022-07-08 12:45:05 +0000402always_inline uword
403ip6_addr_match_range (ip6_address_t *a, ip6_address_t *la, ip6_address_t *ua)
404{
405 if ((memcmp (a->as_u64, la->as_u64, 2 * sizeof (u64)) >= 0) &&
406 (memcmp (a->as_u64, ua->as_u64, 2 * sizeof (u64)) <= 0))
407 return 1;
408 return 0;
409}
410
411always_inline void
412ipsec_fp_5tuple_from_ip6_range (ipsec_fp_5tuple_t *tuple, ip6_address_t *la,
413 ip6_address_t *ra, u16 lp, u16 rp, u8 pr)
414
415{
416 clib_memcpy (&tuple->ip6_laddr, la, sizeof (ip6_address_t));
417 clib_memcpy (&tuple->ip6_raddr, ra, sizeof (ip6_address_t));
418
419 tuple->lport = lp;
420 tuple->rport = rp;
421 tuple->protocol = pr;
422 tuple->is_ipv6 = 1;
423}
424
425always_inline ipsec_policy_t *
426ipsec6_output_policy_match (ipsec_spd_t *spd, ip6_address_t *la,
427 ip6_address_t *ra, u16 lp, u16 rp, u8 pr)
428{
429 ipsec_main_t *im = &ipsec_main;
430 ipsec_policy_t *p;
431 ipsec_policy_t *policies[1];
432 ipsec_fp_5tuple_t tuples[1];
433 u32 fp_policy_ids[1];
434
435 u32 *i;
436
437 if (!spd)
438 return 0;
439
440 if (im->ipv6_fp_spd_is_enabled)
441 {
442
443 ipsec_fp_5tuple_from_ip6_range (&tuples[0], la, ra, lp, rp, pr);
444 ipsec_fp_out_policy_match_n (&spd->fp_spd, 1, tuples, policies,
445 fp_policy_ids, 1);
446 p = policies[0];
447 i = fp_policy_ids;
448 return p;
449 }
450
451 vec_foreach (i, spd->policies[IPSEC_SPD_POLICY_IP6_OUTBOUND])
452 {
453 p = pool_elt_at_index (im->policies, *i);
454 if (PREDICT_FALSE ((p->protocol != IPSEC_POLICY_PROTOCOL_ANY) &&
455 (p->protocol != pr)))
456 continue;
457
458 if (!ip6_addr_match_range (ra, &p->raddr.start.ip6, &p->raddr.stop.ip6))
459 continue;
460
461 if (!ip6_addr_match_range (la, &p->laddr.start.ip6, &p->laddr.stop.ip6))
462 continue;
463
464 if (PREDICT_FALSE ((pr != IP_PROTOCOL_TCP) && (pr != IP_PROTOCOL_UDP) &&
465 (pr != IP_PROTOCOL_SCTP)))
466 return p;
467
468 if (lp < p->lport.start)
469 continue;
470
471 if (lp > p->lport.stop)
472 continue;
473
474 if (rp < p->rport.start)
475 continue;
476
477 if (rp > p->rport.stop)
478 continue;
479
480 return p;
481 }
482
483 return 0;
484}
485
Piotr Bronowski829bff82022-05-10 14:06:29 +0000486#endif /* !IPSEC_OUTPUT_H */