blob: 6608e3c625952cb717999366a49a3ac05129b43c [file] [log] [blame]
Piotr Bronowski829bff82022-05-10 14:06:29 +00001/*
2 *------------------------------------------------------------------
3 * Copyright (c) 2021 Intel and/or its affiliates.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 *------------------------------------------------------------------
16 */
17
18#ifndef IPSEC_OUTPUT_H
19#define IPSEC_OUTPUT_H
20
21#include <vppinfra/types.h>
22#include <vnet/ipsec/ipsec_spd.h>
Piotr Bronowskie1dce372022-05-10 14:06:29 +000023#include <vnet/ipsec/ipsec_spd_fp_lookup.h>
Piotr Bronowski829bff82022-05-10 14:06:29 +000024
25always_inline void
26ipsec4_out_spd_add_flow_cache_entry (ipsec_main_t *im, u8 pr, u32 la, u32 ra,
27 u16 lp, u16 rp, u32 pol_id)
28{
29 u64 hash;
30 u8 overwrite = 0, stale_overwrite = 0;
31 ipsec4_spd_5tuple_t ip4_5tuple = { .ip4_addr = { (ip4_address_t) la,
32 (ip4_address_t) ra },
33 .port = { lp, rp },
34 .proto = pr };
35
36 ip4_5tuple.kv_16_8.value = (((u64) pol_id) << 32) | ((u64) im->epoch_count);
37
38 hash = ipsec4_hash_16_8 (&ip4_5tuple.kv_16_8);
39 hash &= (im->ipsec4_out_spd_hash_num_buckets - 1);
40
41 ipsec_spinlock_lock (&im->ipsec4_out_spd_hash_tbl[hash].bucket_lock);
42 /* Check if we are overwriting an existing entry so we know
43 whether to increment the flow cache counter. Since flow
44 cache counter is reset on any policy add/remove, but
45 hash table values are not, we also need to check if the entry
46 we are overwriting is stale or not. If it's a stale entry
47 overwrite, we still want to increment flow cache counter */
48 overwrite = (im->ipsec4_out_spd_hash_tbl[hash].value != 0);
49 /* Check for stale entry by comparing with current epoch count */
50 if (PREDICT_FALSE (overwrite))
51 stale_overwrite =
52 (im->epoch_count !=
53 ((u32) (im->ipsec4_out_spd_hash_tbl[hash].value & 0xFFFFFFFF)));
54 clib_memcpy_fast (&im->ipsec4_out_spd_hash_tbl[hash], &ip4_5tuple.kv_16_8,
55 sizeof (ip4_5tuple.kv_16_8));
56 ipsec_spinlock_unlock (&im->ipsec4_out_spd_hash_tbl[hash].bucket_lock);
57
58 /* Increment the counter to track active flow cache entries
59 when entering a fresh entry or overwriting a stale one */
60 if (!overwrite || stale_overwrite)
61 clib_atomic_fetch_add_relax (&im->ipsec4_out_spd_flow_cache_entries, 1);
62
63 return;
64}
65
Piotr Bronowskie1dce372022-05-10 14:06:29 +000066always_inline void
67ipsec4_out_spd_add_flow_cache_entry_n (ipsec_main_t *im,
68 ipsec4_spd_5tuple_t *ip4_5tuple,
69 u32 pol_id)
70{
71 u64 hash;
72 u8 overwrite = 0, stale_overwrite = 0;
73
74 ip4_5tuple->kv_16_8.value = (((u64) pol_id) << 32) | ((u64) im->epoch_count);
75
76 hash = ipsec4_hash_16_8 (&ip4_5tuple->kv_16_8);
77 hash &= (im->ipsec4_out_spd_hash_num_buckets - 1);
78
79 ipsec_spinlock_lock (&im->ipsec4_out_spd_hash_tbl[hash].bucket_lock);
80 /* Check if we are overwriting an existing entry so we know
81 whether to increment the flow cache counter. Since flow
82 cache counter is reset on any policy add/remove, but
83 hash table values are not, we also need to check if the entry
84 we are overwriting is stale or not. If it's a stale entry
85 overwrite, we still want to increment flow cache counter */
86 overwrite = (im->ipsec4_out_spd_hash_tbl[hash].value != 0);
87 /* Check for stale entry by comparing with current epoch count */
88 if (PREDICT_FALSE (overwrite))
89 stale_overwrite =
90 (im->epoch_count !=
91 ((u32) (im->ipsec4_out_spd_hash_tbl[hash].value & 0xFFFFFFFF)));
92 clib_memcpy_fast (&im->ipsec4_out_spd_hash_tbl[hash], &ip4_5tuple->kv_16_8,
93 sizeof (ip4_5tuple->kv_16_8));
94 ipsec_spinlock_unlock (&im->ipsec4_out_spd_hash_tbl[hash].bucket_lock);
95
96 /* Increment the counter to track active flow cache entries
97 when entering a fresh entry or overwriting a stale one */
98 if (!overwrite || stale_overwrite)
99 clib_atomic_fetch_add_relax (&im->ipsec4_out_spd_flow_cache_entries, 1);
100
101 return;
102}
103
104always_inline void
105ipsec_fp_5tuple_from_ip4_range (ipsec_fp_5tuple_t *tuple, u32 la, u32 ra,
106 u16 lp, u16 rp, u8 pr)
107{
108 clib_memset (tuple->l3_zero_pad, 0, sizeof (tuple->l3_zero_pad));
109 tuple->laddr.as_u32 = clib_host_to_net_u32 (la);
110 tuple->raddr.as_u32 = clib_host_to_net_u32 (ra);
111
112 if (PREDICT_FALSE ((pr != IP_PROTOCOL_TCP) && (pr != IP_PROTOCOL_UDP) &&
113 (pr != IP_PROTOCOL_SCTP)))
114 {
115 tuple->lport = 0;
116 tuple->rport = 0;
117 }
118 else
119 {
120 tuple->lport = lp;
121 tuple->rport = rp;
122 }
123
124 tuple->protocol = pr;
125 tuple->is_ipv6 = 0;
126}
127
128always_inline void
129ipsec_fp_5tuple_from_ip4_range_n (ipsec_fp_5tuple_t *tuples,
130 ipsec4_spd_5tuple_t *ip4_5tuple, u32 n)
131{
132 u32 n_left = n;
133 ipsec_fp_5tuple_t *tuple = tuples;
134
135 while (n_left)
136 {
137 clib_memset (tuple->l3_zero_pad, 0, sizeof (tuple->l3_zero_pad));
138 tuple->laddr.as_u32 =
139 clib_host_to_net_u32 (ip4_5tuple->ip4_addr[0].as_u32);
140 tuple->raddr.as_u32 =
141 clib_host_to_net_u32 (ip4_5tuple->ip4_addr[1].as_u32);
142 if (PREDICT_FALSE ((ip4_5tuple->proto != IP_PROTOCOL_TCP) &&
143 (ip4_5tuple->proto != IP_PROTOCOL_UDP) &&
144 (ip4_5tuple->proto != IP_PROTOCOL_SCTP)))
145 {
146 tuple->lport = 0;
147 tuple->rport = 0;
148 }
149 else
150 {
151 tuple->lport = ip4_5tuple->port[0];
152 tuple->rport = ip4_5tuple->port[1];
153 }
154 tuple->protocol = ip4_5tuple->proto;
155 tuple->is_ipv6 = 0;
156 n_left--;
157 tuple++;
158 }
159}
160
161always_inline int
162ipsec_output_policy_match_n (ipsec_spd_t *spd,
163 ipsec4_spd_5tuple_t *ip4_5tuples,
164 ipsec_policy_t **policies, u32 n,
165 u8 flow_cache_enabled)
166{
167 ipsec_main_t *im = &ipsec_main;
168 ipsec_policy_t *p;
169 ipsec_policy_t **pp = policies;
170 u32 n_left = n;
171 ipsec4_spd_5tuple_t *ip4_5tuple = ip4_5tuples;
172 u32 policy_ids[n], *policy_id = policy_ids;
173 ipsec_fp_5tuple_t tuples[n];
174 u32 *i;
175 u32 counter = 0;
176
177 if (!spd)
178 return 0;
179
180 clib_memset (policies, 0, n * sizeof (ipsec_policy_t *));
181
182 if (im->fp_spd_is_enabled)
183 {
184 ipsec_fp_5tuple_from_ip4_range_n (tuples, ip4_5tuples, n);
185 counter += ipsec_fp_out_policy_match_n (&spd->fp_spd, 0, tuples,
186 policies, policy_ids, n);
187 }
188
189 while (n_left)
190 {
191 if (*pp != 0)
192 goto next;
193
194 vec_foreach (i, spd->policies[IPSEC_SPD_POLICY_IP4_OUTBOUND])
195 {
196 p = pool_elt_at_index (im->policies, *i);
197 if (PREDICT_FALSE (p->protocol &&
198 (p->protocol != ip4_5tuple->proto)))
199 continue;
200
201 if (ip4_5tuple->ip4_addr[0].as_u32 <
202 clib_net_to_host_u32 (p->raddr.start.ip4.as_u32))
203 continue;
204
205 if (ip4_5tuple->ip4_addr[1].as_u32 >
206 clib_net_to_host_u32 (p->raddr.stop.ip4.as_u32))
207 continue;
208
209 if (ip4_5tuple->ip4_addr[0].as_u32 <
210 clib_net_to_host_u32 (p->laddr.start.ip4.as_u32))
211 continue;
212
213 if (ip4_5tuple->ip4_addr[1].as_u32 >
214 clib_net_to_host_u32 (p->laddr.stop.ip4.as_u32))
215 continue;
216
217 if (PREDICT_FALSE ((ip4_5tuple->proto != IP_PROTOCOL_TCP) &&
218 (ip4_5tuple->proto != IP_PROTOCOL_UDP) &&
219 (ip4_5tuple->proto != IP_PROTOCOL_SCTP)))
220 {
221 ip4_5tuple->port[0] = 0;
222 ip4_5tuple->port[1] = 0;
223 goto add_policy;
224 }
225
226 if (ip4_5tuple->port[0] < p->lport.start)
227 continue;
228
229 if (ip4_5tuple->port[0] > p->lport.stop)
230 continue;
231
232 if (ip4_5tuple->port[1] < p->rport.start)
233 continue;
234
235 if (ip4_5tuple->port[1] > p->rport.stop)
236 continue;
237
238 add_policy:
239 *pp = p;
240 *policy_id = *i;
241 counter++;
242 break;
243 }
244
245 next:
246 n_left--;
247 pp++;
248 ip4_5tuple++;
249 policy_id++;
250 }
251
252 if (flow_cache_enabled)
253 {
254 n_left = n;
255 policy_id = policy_ids;
256 ip4_5tuple = ip4_5tuples;
257 pp = policies;
258
259 while (n_left)
260 {
261 if (*pp != NULL)
262 {
263 /* Add an Entry in Flow cache */
264 ipsec4_out_spd_add_flow_cache_entry_n (im, ip4_5tuple,
265 *policy_id);
266 }
267
268 n_left--;
269 policy_id++;
270 ip4_5tuple++;
271 pp++;
272 }
273 }
274
275 return counter;
276}
277
Piotr Bronowski829bff82022-05-10 14:06:29 +0000278always_inline ipsec_policy_t *
279ipsec4_out_spd_find_flow_cache_entry (ipsec_main_t *im, u8 pr, u32 la, u32 ra,
280 u16 lp, u16 rp)
281{
282 ipsec_policy_t *p = NULL;
283 ipsec4_hash_kv_16_8_t kv_result;
284 u64 hash;
285
286 if (PREDICT_FALSE ((pr != IP_PROTOCOL_TCP) && (pr != IP_PROTOCOL_UDP) &&
287 (pr != IP_PROTOCOL_SCTP)))
288 {
289 lp = 0;
290 rp = 0;
291 }
292 ipsec4_spd_5tuple_t ip4_5tuple = { .ip4_addr = { (ip4_address_t) la,
293 (ip4_address_t) ra },
294 .port = { lp, rp },
295 .proto = pr };
296
297 hash = ipsec4_hash_16_8 (&ip4_5tuple.kv_16_8);
298 hash &= (im->ipsec4_out_spd_hash_num_buckets - 1);
299
300 ipsec_spinlock_lock (&im->ipsec4_out_spd_hash_tbl[hash].bucket_lock);
301 kv_result = im->ipsec4_out_spd_hash_tbl[hash];
302 ipsec_spinlock_unlock (&im->ipsec4_out_spd_hash_tbl[hash].bucket_lock);
303
304 if (ipsec4_hash_key_compare_16_8 ((u64 *) &ip4_5tuple.kv_16_8,
305 (u64 *) &kv_result))
306 {
307 if (im->epoch_count == ((u32) (kv_result.value & 0xFFFFFFFF)))
308 {
309 /* Get the policy based on the index */
310 p =
311 pool_elt_at_index (im->policies, ((u32) (kv_result.value >> 32)));
312 }
313 }
314
315 return p;
316}
317
318always_inline ipsec_policy_t *
319ipsec_output_policy_match (ipsec_spd_t *spd, u8 pr, u32 la, u32 ra, u16 lp,
320 u16 rp, u8 flow_cache_enabled)
321{
322 ipsec_main_t *im = &ipsec_main;
323 ipsec_policy_t *p;
Piotr Bronowskie1dce372022-05-10 14:06:29 +0000324 ipsec_policy_t *policies[1];
325 ipsec_fp_5tuple_t tuples[1];
326 u32 fp_policy_ids[1];
327
Piotr Bronowski829bff82022-05-10 14:06:29 +0000328 u32 *i;
329
330 if (!spd)
331 return 0;
332
Piotr Bronowskie1dce372022-05-10 14:06:29 +0000333 ipsec_fp_5tuple_from_ip4_range (&tuples[0], la, ra, lp, rp, pr);
334
335 if (im->fp_spd_is_enabled &&
336 (1 == ipsec_fp_out_policy_match_n (&spd->fp_spd, 0, tuples, policies,
337 fp_policy_ids, 1)))
338 {
339 p = policies[0];
340 i = fp_policy_ids;
341 if (PREDICT_FALSE ((pr != IP_PROTOCOL_TCP) && (pr != IP_PROTOCOL_UDP) &&
342 (pr != IP_PROTOCOL_SCTP)))
343 {
344 lp = 0;
345 rp = 0;
346 }
347 goto add_flow_cache;
348 }
349
Piotr Bronowski829bff82022-05-10 14:06:29 +0000350 vec_foreach (i, spd->policies[IPSEC_SPD_POLICY_IP4_OUTBOUND])
351 {
352 p = pool_elt_at_index (im->policies, *i);
353 if (PREDICT_FALSE ((p->protocol != IPSEC_POLICY_PROTOCOL_ANY) &&
354 (p->protocol != pr)))
355 continue;
356
357 if (ra < clib_net_to_host_u32 (p->raddr.start.ip4.as_u32))
358 continue;
359
360 if (ra > clib_net_to_host_u32 (p->raddr.stop.ip4.as_u32))
361 continue;
362
363 if (la < clib_net_to_host_u32 (p->laddr.start.ip4.as_u32))
364 continue;
365
366 if (la > clib_net_to_host_u32 (p->laddr.stop.ip4.as_u32))
367 continue;
368
369 if (PREDICT_FALSE ((pr != IP_PROTOCOL_TCP) && (pr != IP_PROTOCOL_UDP) &&
370 (pr != IP_PROTOCOL_SCTP)))
371 {
372 lp = 0;
373 rp = 0;
374 goto add_flow_cache;
375 }
376
377 if (lp < p->lport.start)
378 continue;
379
380 if (lp > p->lport.stop)
381 continue;
382
383 if (rp < p->rport.start)
384 continue;
385
386 if (rp > p->rport.stop)
387 continue;
388
389 add_flow_cache:
390 if (flow_cache_enabled)
391 {
392 /* Add an Entry in Flow cache */
393 ipsec4_out_spd_add_flow_cache_entry (
394 im, pr, clib_host_to_net_u32 (la), clib_host_to_net_u32 (ra),
395 clib_host_to_net_u16 (lp), clib_host_to_net_u16 (rp), *i);
396 }
397
398 return p;
399 }
400 return 0;
401}
402
403#endif /* !IPSEC_OUTPUT_H */