blob: 1334491b2288d1e0a588888b2696ce01c8da5c7a [file] [log] [blame]
Neale Ranns999c8ee2019-02-01 03:31:24 -08001/*
2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16#include <vnet/ipsec/ipsec.h>
17
Neale Rannsa09c1ff2019-02-04 01:10:30 -080018/**
19 * @brief
20 * Policy packet & bytes counters
21 */
22vlib_combined_counter_main_t ipsec_spd_policy_counters = {
23 .name = "policy",
24 .stat_segment_name = "/net/ipsec/policy",
25};
26
27static int
Neale Ranns999c8ee2019-02-01 03:31:24 -080028ipsec_spd_entry_sort (void *a1, void *a2)
29{
Neale Rannsa09c1ff2019-02-04 01:10:30 -080030 ipsec_main_t *im = &ipsec_main;
Neale Ranns999c8ee2019-02-01 03:31:24 -080031 u32 *id1 = a1;
32 u32 *id2 = a2;
Neale Ranns999c8ee2019-02-01 03:31:24 -080033 ipsec_policy_t *p1, *p2;
34
Neale Rannsa09c1ff2019-02-04 01:10:30 -080035 p1 = pool_elt_at_index (im->policies, *id1);
36 p2 = pool_elt_at_index (im->policies, *id2);
Neale Ranns999c8ee2019-02-01 03:31:24 -080037 if (p1 && p2)
38 return p2->priority - p1->priority;
39
40 return 0;
41}
42
43int
Neale Ranns9f231d42019-03-19 10:06:00 +000044ipsec_policy_mk_type (bool is_outbound,
45 bool is_ipv6,
46 ipsec_policy_action_t action,
47 ipsec_spd_policy_type_t * type)
48{
49 if (is_outbound)
50 {
51 *type = (is_ipv6 ?
52 IPSEC_SPD_POLICY_IP6_OUTBOUND : IPSEC_SPD_POLICY_IP4_OUTBOUND);
53 return (0);
54 }
55 else
56 {
57 switch (action)
58 {
59 case IPSEC_POLICY_ACTION_PROTECT:
60 *type = (is_ipv6 ?
61 IPSEC_SPD_POLICY_IP6_INBOUND_PROTECT :
62 IPSEC_SPD_POLICY_IP4_INBOUND_PROTECT);
63 return (0);
64 case IPSEC_POLICY_ACTION_BYPASS:
65 *type = (is_ipv6 ?
66 IPSEC_SPD_POLICY_IP6_INBOUND_BYPASS :
67 IPSEC_SPD_POLICY_IP4_INBOUND_BYPASS);
68 return (0);
69 case IPSEC_POLICY_ACTION_DISCARD:
ShivaShankarK05464832020-04-14 14:01:03 +053070 *type = (is_ipv6 ?
71 IPSEC_SPD_POLICY_IP6_INBOUND_DISCARD :
72 IPSEC_SPD_POLICY_IP4_INBOUND_DISCARD);
73 return (0);
Neale Ranns9f231d42019-03-19 10:06:00 +000074 case IPSEC_POLICY_ACTION_RESOLVE:
75 break;
76 }
77 }
78
79 /* Unsupported type */
80 return (-1);
81}
82
83int
Neale Rannsa09c1ff2019-02-04 01:10:30 -080084ipsec_add_del_policy (vlib_main_t * vm,
85 ipsec_policy_t * policy, int is_add, u32 * stat_index)
Neale Ranns999c8ee2019-02-01 03:31:24 -080086{
87 ipsec_main_t *im = &ipsec_main;
88 ipsec_spd_t *spd = 0;
89 ipsec_policy_t *vp;
Neale Ranns999c8ee2019-02-01 03:31:24 -080090 u32 spd_index;
Neale Rannsa09c1ff2019-02-04 01:10:30 -080091 uword *p;
Neale Ranns999c8ee2019-02-01 03:31:24 -080092
Neale Ranns999c8ee2019-02-01 03:31:24 -080093 p = hash_get (im->spd_index_by_spd_id, policy->id);
94
95 if (!p)
96 return VNET_API_ERROR_SYSCALL_ERROR_1;
97
98 spd_index = p[0];
99 spd = pool_elt_at_index (im->spds, spd_index);
100 if (!spd)
101 return VNET_API_ERROR_SYSCALL_ERROR_1;
102
Zachary Leaf7cd35f52021-06-25 08:11:15 -0500103 if (im->output_flow_cache_flag && !policy->is_ipv6 &&
Govindarajan Mohandoss6d7dfcb2021-03-19 19:20:49 +0000104 policy->type == IPSEC_SPD_POLICY_IP4_OUTBOUND)
105 {
106 /*
107 * Flow cache entry is valid only when epoch_count value in control
108 * plane and data plane match. Otherwise, flow cache entry is considered
109 * stale. To avoid the race condition of using old epoch_count value
110 * in data plane after the roll over of epoch_count in control plane,
111 * entire flow cache is reset.
112 */
113 if (im->epoch_count == 0xFFFFFFFF)
114 {
115 /* Reset all the entries in flow cache */
116 clib_memset_u8 (im->ipsec4_out_spd_hash_tbl, 0,
117 im->ipsec4_out_spd_hash_num_buckets *
118 (sizeof (*(im->ipsec4_out_spd_hash_tbl))));
119 }
120 /* Increment epoch counter by 1 */
121 clib_atomic_fetch_add_relax (&im->epoch_count, 1);
122 /* Reset spd flow cache counter since all old entries are stale */
123 clib_atomic_store_relax_n (&im->ipsec4_out_spd_flow_cache_entries, 0);
124 }
125
Zachary Leaf7cd35f52021-06-25 08:11:15 -0500126 if ((policy->type == IPSEC_SPD_POLICY_IP4_INBOUND_PROTECT ||
127 policy->type == IPSEC_SPD_POLICY_IP4_INBOUND_BYPASS ||
128 policy->type == IPSEC_SPD_POLICY_IP4_INBOUND_DISCARD) &&
129 im->input_flow_cache_flag && !policy->is_ipv6)
130 {
131 /*
132 * Flow cache entry is valid only when input_epoch_count value in control
133 * plane and data plane match. Otherwise, flow cache entry is considered
134 * stale. To avoid the race condition of using old input_epoch_count
135 * value in data plane after the roll over of input_epoch_count in
136 * control plane, entire flow cache is reset.
137 */
138 if (im->input_epoch_count == 0xFFFFFFFF)
139 {
140 /* Reset all the entries in flow cache */
141 clib_memset_u8 (im->ipsec4_in_spd_hash_tbl, 0,
142 im->ipsec4_in_spd_hash_num_buckets *
143 (sizeof (*(im->ipsec4_in_spd_hash_tbl))));
144 }
145 /* Increment epoch counter by 1 */
146 clib_atomic_fetch_add_relax (&im->input_epoch_count, 1);
147 /* Reset spd flow cache counter since all old entries are stale */
148 im->ipsec4_in_spd_flow_cache_entries = 0;
149 }
150
Neale Ranns999c8ee2019-02-01 03:31:24 -0800151 if (is_add)
152 {
153 u32 policy_index;
154
Neale Ranns495d7ff2019-07-12 09:15:26 +0000155 if (policy->policy == IPSEC_POLICY_ACTION_PROTECT)
156 {
157 index_t sa_index = ipsec_sa_find_and_lock (policy->sa_id);
158
159 if (INDEX_INVALID == sa_index)
160 return VNET_API_ERROR_SYSCALL_ERROR_1;
161 policy->sa_index = sa_index;
162 }
163 else
164 policy->sa_index = INDEX_INVALID;
165
Piotr Bronowski04643102022-05-10 13:18:22 +0000166 /**
167 * Try adding the policy into fast path SPD first. Only adding to
168 * traditional SPD when failed.
169 **/
Piotr Bronowski86f82082022-07-08 12:45:05 +0000170 if ((im->ipv4_fp_spd_is_enabled &&
171 policy->type == IPSEC_SPD_POLICY_IP4_OUTBOUND) ||
172 (im->ipv6_fp_spd_is_enabled &&
173 policy->type == IPSEC_SPD_POLICY_IP6_OUTBOUND))
Piotr Bronowski04643102022-05-10 13:18:22 +0000174 return ipsec_fp_add_del_policy ((void *) &spd->fp_spd, policy, 1,
175 stat_index);
176
Neale Rannsa09c1ff2019-02-04 01:10:30 -0800177 pool_get (im->policies, vp);
Neale Ranns999c8ee2019-02-01 03:31:24 -0800178 clib_memcpy (vp, policy, sizeof (*vp));
Neale Rannsa09c1ff2019-02-04 01:10:30 -0800179 policy_index = vp - im->policies;
Neale Ranns999c8ee2019-02-01 03:31:24 -0800180
Neale Rannsa09c1ff2019-02-04 01:10:30 -0800181 vlib_validate_combined_counter (&ipsec_spd_policy_counters,
182 policy_index);
183 vlib_zero_combined_counter (&ipsec_spd_policy_counters, policy_index);
Neale Ranns9f231d42019-03-19 10:06:00 +0000184 vec_add1 (spd->policies[policy->type], policy_index);
185 vec_sort_with_function (spd->policies[policy->type],
186 ipsec_spd_entry_sort);
Neale Rannsa09c1ff2019-02-04 01:10:30 -0800187 *stat_index = policy_index;
Neale Ranns999c8ee2019-02-01 03:31:24 -0800188 }
189 else
190 {
Neale Rannsa09c1ff2019-02-04 01:10:30 -0800191 u32 ii;
192
Piotr Bronowski04643102022-05-10 13:18:22 +0000193 /**
194 * Try to delete the policy from the fast path SPD first. Delete from
195 * traditional SPD when fp delete fails.
196 **/
Piotr Bronowski86f82082022-07-08 12:45:05 +0000197
198 if ((im->ipv4_fp_spd_is_enabled &&
199 policy->type == IPSEC_SPD_POLICY_IP4_OUTBOUND) ||
200 (im->ipv6_fp_spd_is_enabled &&
201 policy->type == IPSEC_SPD_POLICY_IP6_OUTBOUND))
Piotr Bronowski04643102022-05-10 13:18:22 +0000202 return ipsec_fp_add_del_policy ((void *) &spd->fp_spd, policy, 0,
203 stat_index);
204
Neale Ranns50d50692019-03-26 08:26:39 +0000205 vec_foreach_index (ii, (spd->policies[policy->type]))
Neale Rannsa09c1ff2019-02-04 01:10:30 -0800206 {
Neale Ranns50d50692019-03-26 08:26:39 +0000207 vp = pool_elt_at_index (im->policies,
208 spd->policies[policy->type][ii]);
209 if (ipsec_policy_is_equal (vp, policy))
210 {
Gabriel Oginskiaacd3ed2022-02-18 08:05:00 +0000211 vec_delete (spd->policies[policy->type], 1, ii);
Neale Ranns495d7ff2019-07-12 09:15:26 +0000212 ipsec_sa_unlock (vp->sa_index);
Neale Ranns50d50692019-03-26 08:26:39 +0000213 pool_put (im->policies, vp);
214 break;
215 }
Neale Rannsa09c1ff2019-02-04 01:10:30 -0800216 }
Neale Ranns999c8ee2019-02-01 03:31:24 -0800217 }
218
219 return 0;
220}
221
Piotr Bronowski04643102022-05-10 13:18:22 +0000222static_always_inline void
223release_mask_type_index (ipsec_main_t *im, u32 mask_type_index)
224{
225 ipsec_fp_mask_type_entry_t *mte =
226 pool_elt_at_index (im->fp_mask_types, mask_type_index);
227 mte->refcount--;
228 if (mte->refcount == 0)
229 {
230 /* this entry is not in use anymore */
231 ASSERT (clib_memset (mte, 0xae, sizeof (*mte)) == EOK);
232 pool_put (im->fp_mask_types, mte);
233 }
234}
235
236static_always_inline u32
237find_mask_type_index (ipsec_main_t *im, ipsec_fp_5tuple_t *mask)
238{
239 ipsec_fp_mask_type_entry_t *mte;
240
241 pool_foreach (mte, im->fp_mask_types)
242 {
243 if (memcmp (&mte->mask, mask, sizeof (*mask)) == 0)
244 return (mte - im->fp_mask_types);
245 }
246
247 return ~0;
248}
249
250static_always_inline void
Piotr Bronowski86f82082022-07-08 12:45:05 +0000251fill_ip6_hash_policy_kv (ipsec_fp_5tuple_t *match, ipsec_fp_5tuple_t *mask,
252 clib_bihash_kv_40_8_t *kv)
Piotr Bronowski04643102022-05-10 13:18:22 +0000253{
254 ipsec_fp_lookup_value_t *kv_val = (ipsec_fp_lookup_value_t *) &kv->value;
Piotr Bronowski81880602022-07-18 16:45:22 +0000255 u64 *pmatch = (u64 *) match->kv_40_8.key;
256 u64 *pmask = (u64 *) mask->kv_40_8.key;
257 u64 *pkey = (u64 *) kv->key;
Piotr Bronowski04643102022-05-10 13:18:22 +0000258
259 *pkey++ = *pmatch++ & *pmask++;
260 *pkey++ = *pmatch++ & *pmask++;
261 *pkey++ = *pmatch++ & *pmask++;
262 *pkey++ = *pmatch++ & *pmask++;
Piotr Bronowski86f82082022-07-08 12:45:05 +0000263 *pkey = *pmatch & *pmask;
Piotr Bronowski04643102022-05-10 13:18:22 +0000264
265 kv_val->as_u64 = 0;
266}
267
268static_always_inline void
Piotr Bronowski86f82082022-07-08 12:45:05 +0000269fill_ip4_hash_policy_kv (ipsec_fp_5tuple_t *match, ipsec_fp_5tuple_t *mask,
270 clib_bihash_kv_16_8_t *kv)
Piotr Bronowski04643102022-05-10 13:18:22 +0000271{
272 ipsec_fp_lookup_value_t *kv_val = (ipsec_fp_lookup_value_t *) &kv->value;
Piotr Bronowski81880602022-07-18 16:45:22 +0000273 u64 *pmatch = (u64 *) match->kv_16_8.key;
274 u64 *pmask = (u64 *) mask->kv_16_8.key;
Piotr Bronowski04643102022-05-10 13:18:22 +0000275 u64 *pkey = (u64 *) kv->key;
276
277 *pkey++ = *pmatch++ & *pmask++;
Piotr Bronowski81880602022-07-18 16:45:22 +0000278 *pkey = *pmatch & *pmask;
Piotr Bronowski04643102022-05-10 13:18:22 +0000279
280 kv_val->as_u64 = 0;
281}
282
283static_always_inline u16
284get_highest_set_bit_u16 (u16 x)
285{
286 x |= x >> 8;
287 x |= x >> 4;
288 x |= x >> 2;
289 x |= x >> 1;
290 return x ^= x >> 1;
291}
292
293static_always_inline u32
294get_highest_set_bit_u32 (u32 x)
295{
296 x |= x >> 16;
297 x |= x >> 8;
298 x |= x >> 4;
299 x |= x >> 2;
300 x |= x >> 1;
301 return x ^= x >> 1;
302}
303
Piotr Bronowski86f82082022-07-08 12:45:05 +0000304static_always_inline u64
305mask_out_highest_set_bit_u64 (u64 x)
306{
307 x |= x >> 32;
308 x |= x >> 16;
309 x |= x >> 8;
310 x |= x >> 4;
311 x |= x >> 2;
312 x |= x >> 1;
313 return ~x;
314}
315
316static_always_inline void
317ipsec_fp_get_policy_ports_mask (ipsec_policy_t *policy,
318 ipsec_fp_5tuple_t *mask)
319{
320 if (PREDICT_TRUE ((policy->protocol == IP_PROTOCOL_TCP) ||
321 (policy->protocol == IP_PROTOCOL_UDP) ||
322 (policy->protocol == IP_PROTOCOL_SCTP)))
323 {
324 mask->lport = policy->lport.start ^ policy->lport.stop;
325 mask->rport = policy->rport.start ^ policy->rport.stop;
326
327 mask->lport = get_highest_set_bit_u16 (mask->lport);
328 mask->lport = ~(mask->lport - 1) & (~mask->lport);
329
330 mask->rport = get_highest_set_bit_u16 (mask->rport);
331 mask->rport = ~(mask->rport - 1) & (~mask->rport);
332 }
333 else
334 {
335 mask->lport = 0;
336 mask->rport = 0;
337 }
338
339 mask->protocol = (policy->protocol == IPSEC_POLICY_PROTOCOL_ANY) ? 0 : ~0;
340}
341
Piotr Bronowski04643102022-05-10 13:18:22 +0000342static_always_inline void
343ipsec_fp_ip4_get_policy_mask (ipsec_policy_t *policy, ipsec_fp_5tuple_t *mask)
344{
345 u32 *pladdr_start = (u32 *) &policy->laddr.start.ip4;
346 u32 *pladdr_stop = (u32 *) &policy->laddr.stop.ip4;
347 u32 *plmask = (u32 *) &mask->laddr;
348 u32 *praddr_start = (u32 *) &policy->raddr.start.ip4;
349 u32 *praddr_stop = (u32 *) &policy->raddr.stop.ip4;
350 u32 *prmask = (u32 *) &mask->raddr;
351
Piotr Bronowski81880602022-07-18 16:45:22 +0000352 clib_memset_u8 (mask, 0xff, sizeof (ipsec_fp_5tuple_t));
353 clib_memset_u8 (&mask->l3_zero_pad, 0, sizeof (mask->l3_zero_pad));
354
Piotr Bronowski04643102022-05-10 13:18:22 +0000355 /* find bits where start != stop */
356 *plmask = *pladdr_start ^ *pladdr_stop;
357 *prmask = *praddr_start ^ *praddr_stop;
358 /* Find most significant bit set (that is the first position
359 * start differs from stop). Mask out everything after that bit and
360 * the bit itself. Remember that policy stores start and stop in the net
361 * order.
362 */
363 *plmask = get_highest_set_bit_u32 (clib_net_to_host_u32 (*plmask));
364 *plmask = clib_host_to_net_u32 (~(*plmask - 1) & (~*plmask));
365
366 *prmask = get_highest_set_bit_u32 (clib_net_to_host_u32 (*prmask));
367 *prmask = clib_host_to_net_u32 (~(*prmask - 1) & (~*prmask));
368
369 if (PREDICT_TRUE ((policy->protocol == IP_PROTOCOL_TCP) ||
370 (policy->protocol == IP_PROTOCOL_UDP) ||
371 (policy->protocol == IP_PROTOCOL_SCTP)))
372 {
373 mask->lport = policy->lport.start ^ policy->lport.stop;
374 mask->rport = policy->rport.start ^ policy->rport.stop;
375
376 mask->lport = get_highest_set_bit_u16 (mask->lport);
377 mask->lport = ~(mask->lport - 1) & (~mask->lport);
378
379 mask->rport = get_highest_set_bit_u16 (mask->rport);
380 mask->rport = ~(mask->rport - 1) & (~mask->rport);
381 }
382 else
383 {
384 mask->lport = 0;
385 mask->rport = 0;
386 }
387
388 mask->protocol = (policy->protocol == IPSEC_POLICY_PROTOCOL_ANY) ? 0 : ~0;
389}
390
Piotr Bronowski86f82082022-07-08 12:45:05 +0000391static_always_inline void
Piotr Bronowski04643102022-05-10 13:18:22 +0000392ipsec_fp_ip6_get_policy_mask (ipsec_policy_t *policy, ipsec_fp_5tuple_t *mask)
393{
394 u64 *pladdr_start = (u64 *) &policy->laddr.start;
395 u64 *pladdr_stop = (u64 *) &policy->laddr.stop;
Piotr Bronowski86f82082022-07-08 12:45:05 +0000396 u64 *plmask = (u64 *) &mask->ip6_laddr;
Piotr Bronowski04643102022-05-10 13:18:22 +0000397 u64 *praddr_start = (u64 *) &policy->raddr.start;
398 u64 *praddr_stop = (u64 *) &policy->raddr.stop;
399 u64 *prmask = (u64 *) &mask->ip6_raddr;
Piotr Bronowski04643102022-05-10 13:18:22 +0000400
Piotr Bronowski81880602022-07-18 16:45:22 +0000401 clib_memset_u8 (mask, 0xff, sizeof (ipsec_fp_5tuple_t));
Piotr Bronowski04643102022-05-10 13:18:22 +0000402
Piotr Bronowski86f82082022-07-08 12:45:05 +0000403 *plmask = (*pladdr_start++ ^ *pladdr_stop++);
Piotr Bronowski04643102022-05-10 13:18:22 +0000404
Piotr Bronowski86f82082022-07-08 12:45:05 +0000405 *prmask = (*praddr_start++ ^ *praddr_stop++);
Piotr Bronowski04643102022-05-10 13:18:22 +0000406
Piotr Bronowski86f82082022-07-08 12:45:05 +0000407 /* Find most significant bit set (that is the first position
408 * start differs from stop). Mask out everything after that bit and
409 * the bit itself. Remember that policy stores start and stop in the net
410 * order.
411 */
412 *plmask = clib_host_to_net_u64 (
413 mask_out_highest_set_bit_u64 (clib_net_to_host_u64 (*plmask)));
Piotr Bronowski04643102022-05-10 13:18:22 +0000414
Piotr Bronowski86f82082022-07-08 12:45:05 +0000415 if (*plmask++ & clib_host_to_net_u64 (0x1))
416 {
417 *plmask = (*pladdr_start ^ *pladdr_stop);
418 *plmask = clib_host_to_net_u64 (
419 mask_out_highest_set_bit_u64 (clib_net_to_host_u64 (*plmask)));
420 }
421 else
422 *plmask = 0;
Piotr Bronowski04643102022-05-10 13:18:22 +0000423
Piotr Bronowski86f82082022-07-08 12:45:05 +0000424 *prmask = clib_host_to_net_u64 (
425 mask_out_highest_set_bit_u64 (clib_net_to_host_u64 (*prmask)));
Piotr Bronowski04643102022-05-10 13:18:22 +0000426
Piotr Bronowski86f82082022-07-08 12:45:05 +0000427 if (*prmask++ & clib_host_to_net_u64 (0x1))
428 {
429 *prmask = (*pladdr_start ^ *pladdr_stop);
430 *prmask = clib_host_to_net_u64 (
431 mask_out_highest_set_bit_u64 (clib_net_to_host_u64 (*prmask)));
432 }
433 else
434 *prmask = 0;
Piotr Bronowski04643102022-05-10 13:18:22 +0000435
Piotr Bronowski86f82082022-07-08 12:45:05 +0000436 ipsec_fp_get_policy_ports_mask (policy, mask);
Piotr Bronowski04643102022-05-10 13:18:22 +0000437}
438
439static_always_inline void
440ipsec_fp_get_policy_5tuple (ipsec_policy_t *policy, ipsec_fp_5tuple_t *tuple)
441{
442 memset (tuple, 0, sizeof (*tuple));
443 tuple->is_ipv6 = policy->is_ipv6;
444 if (tuple->is_ipv6)
445 {
446 tuple->ip6_laddr = policy->laddr.start.ip6;
447 tuple->ip6_raddr = policy->raddr.start.ip6;
448 }
449 else
450 {
451 tuple->laddr = policy->laddr.start.ip4;
452 tuple->raddr = policy->raddr.start.ip4;
453 }
454
455 tuple->protocol = policy->protocol;
456
457 tuple->lport = policy->lport.start;
458 tuple->rport = policy->rport.start;
459}
460
461int
462ipsec_fp_ip4_add_policy (ipsec_main_t *im, ipsec_spd_fp_t *fp_spd,
463 ipsec_policy_t *policy, u32 *stat_index)
464{
465 u32 mask_index;
466 ipsec_policy_t *vp;
467 ipsec_fp_mask_type_entry_t *mte;
468 u32 policy_index;
469 clib_bihash_kv_16_8_t kv;
470 clib_bihash_kv_16_8_t result;
471 ipsec_fp_lookup_value_t *result_val =
472 (ipsec_fp_lookup_value_t *) &result.value;
473 ipsec_fp_lookup_value_t *key_val = (ipsec_fp_lookup_value_t *) &kv.value;
474
475 ipsec_fp_5tuple_t mask, policy_5tuple;
476 int res;
477
478 ipsec_fp_ip4_get_policy_mask (policy, &mask);
479 pool_get (im->policies, vp);
480 policy_index = vp - im->policies;
481 vlib_validate_combined_counter (&ipsec_spd_policy_counters, policy_index);
482 vlib_zero_combined_counter (&ipsec_spd_policy_counters, policy_index);
483 *stat_index = policy_index;
484 mask_index = find_mask_type_index (im, &mask);
485
486 if (mask_index == ~0)
487 {
488 /* mask type not found, we need to create a new entry */
489 pool_get (im->fp_mask_types, mte);
490 mask_index = mte - im->fp_mask_types;
491 mte->refcount = 0;
492 }
493 else
494 mte = im->fp_mask_types + mask_index;
495
496 policy->fp_mask_type_id = mask_index;
497 ipsec_fp_get_policy_5tuple (policy, &policy_5tuple);
498
Piotr Bronowski86f82082022-07-08 12:45:05 +0000499 fill_ip4_hash_policy_kv (&policy_5tuple, &mask, &kv);
Piotr Bronowski04643102022-05-10 13:18:22 +0000500
501 res = clib_bihash_search_inline_2_16_8 (&fp_spd->fp_ip4_lookup_hash, &kv,
502 &result);
503 if (res != 0)
504 {
505 /* key was not found crate a new entry */
506 vec_add1 (key_val->fp_policies_ids, policy_index);
507 res = clib_bihash_add_del_16_8 (&fp_spd->fp_ip4_lookup_hash, &kv, 1);
508 if (res != 0)
509 goto error;
510 }
511 else
512 {
513
514 if (vec_max_len (result_val->fp_policies_ids) !=
515 vec_len (result_val->fp_policies_ids))
516 {
517 /* no need to resize */
518 vec_add1 (result_val->fp_policies_ids, policy_index);
519 }
520 else
521 {
522 vec_add1 (result_val->fp_policies_ids, policy_index);
523
524 res =
525 clib_bihash_add_del_16_8 (&fp_spd->fp_ip4_lookup_hash, &result, 1);
526
527 if (res != 0)
528 goto error;
529 }
530 }
531
532 if (mte->refcount == 0)
533 {
534 clib_memcpy (&mte->mask, &mask, sizeof (mask));
535 mte->refcount = 0;
536 vec_add1 (fp_spd->fp_mask_types[policy->type], mask_index);
537 }
538
539 mte->refcount++;
540 vec_add1 (fp_spd->fp_policies[policy->type], policy_index);
541 clib_memcpy (vp, policy, sizeof (*vp));
542
543 return 0;
544
545error:
546 pool_put (im->policies, vp);
547 release_mask_type_index (im, mask_index);
548 return -1;
549}
550
551int
552ipsec_fp_ip6_add_policy (ipsec_main_t *im, ipsec_spd_fp_t *fp_spd,
553 ipsec_policy_t *policy, u32 *stat_index)
554{
555
556 u32 mask_index;
557 ipsec_policy_t *vp;
558 ipsec_fp_mask_type_entry_t *mte;
559 u32 policy_index;
560 clib_bihash_kv_40_8_t kv;
561 clib_bihash_kv_40_8_t result;
562 ipsec_fp_lookup_value_t *result_val =
563 (ipsec_fp_lookup_value_t *) &result.value;
564 ipsec_fp_lookup_value_t *key_val = (ipsec_fp_lookup_value_t *) &kv.value;
565
566 ipsec_fp_5tuple_t mask, policy_5tuple;
567 int res;
Piotr Bronowski86f82082022-07-08 12:45:05 +0000568 ipsec_fp_ip6_get_policy_mask (policy, &mask);
Piotr Bronowski04643102022-05-10 13:18:22 +0000569
570 pool_get (im->policies, vp);
571 policy_index = vp - im->policies;
572 vlib_validate_combined_counter (&ipsec_spd_policy_counters, policy_index);
573 vlib_zero_combined_counter (&ipsec_spd_policy_counters, policy_index);
574 *stat_index = policy_index;
575 mask_index = find_mask_type_index (im, &mask);
576
577 if (mask_index == ~0)
578 {
579 /* mask type not found, we need to create a new entry */
580 pool_get (im->fp_mask_types, mte);
581 mask_index = mte - im->fp_mask_types;
582 mte->refcount = 0;
583 }
584 else
585 mte = im->fp_mask_types + mask_index;
586
587 policy->fp_mask_type_id = mask_index;
Piotr Bronowski04643102022-05-10 13:18:22 +0000588 ipsec_fp_get_policy_5tuple (policy, &policy_5tuple);
589
Piotr Bronowski86f82082022-07-08 12:45:05 +0000590 fill_ip6_hash_policy_kv (&policy_5tuple, &mask, &kv);
Piotr Bronowski04643102022-05-10 13:18:22 +0000591
592 res = clib_bihash_search_inline_2_40_8 (&fp_spd->fp_ip6_lookup_hash, &kv,
593 &result);
594 if (res != 0)
595 {
596 /* key was not found crate a new entry */
597 vec_add1 (key_val->fp_policies_ids, policy_index);
598 res = clib_bihash_add_del_40_8 (&fp_spd->fp_ip6_lookup_hash, &kv, 1);
599 if (res != 0)
600 goto error;
601 }
602 else
603 {
604
605 if (vec_max_len (result_val->fp_policies_ids) !=
606 vec_len (result_val->fp_policies_ids))
607 {
608 /* no need to resize */
609 vec_add1 (result_val->fp_policies_ids, policy_index);
610 }
611 else
612 {
613 vec_add1 (result_val->fp_policies_ids, policy_index);
614
615 res =
616 clib_bihash_add_del_40_8 (&fp_spd->fp_ip6_lookup_hash, &result, 1);
617
618 if (res != 0)
619 goto error;
620 }
621 }
622
623 if (mte->refcount == 0)
624 {
625 clib_memcpy (&mte->mask, &mask, sizeof (mask));
626 mte->refcount = 0;
627 vec_add1 (fp_spd->fp_mask_types[policy->type], mask_index);
628 }
629
630 mte->refcount++;
631 vec_add1 (fp_spd->fp_policies[policy->type], policy_index);
632 clib_memcpy (vp, policy, sizeof (*vp));
633
634 return 0;
635
636error:
637 pool_put (im->policies, vp);
638 release_mask_type_index (im, mask_index);
639 return -1;
640}
641
642int
643ipsec_fp_ip6_del_policy (ipsec_main_t *im, ipsec_spd_fp_t *fp_spd,
644 ipsec_policy_t *policy)
645{
646 int res;
647 ipsec_fp_5tuple_t mask = { 0 }, policy_5tuple;
648 clib_bihash_kv_40_8_t kv;
649 clib_bihash_kv_40_8_t result;
650 ipsec_fp_lookup_value_t *result_val =
651 (ipsec_fp_lookup_value_t *) &result.value;
652
653 ipsec_policy_t *vp;
654 u32 ii, iii, imt;
655
656 ipsec_fp_ip6_get_policy_mask (policy, &mask);
657 ipsec_fp_get_policy_5tuple (policy, &policy_5tuple);
Piotr Bronowski86f82082022-07-08 12:45:05 +0000658 fill_ip6_hash_policy_kv (&policy_5tuple, &mask, &kv);
Piotr Bronowski04643102022-05-10 13:18:22 +0000659 res = clib_bihash_search_inline_2_40_8 (&fp_spd->fp_ip6_lookup_hash, &kv,
660 &result);
661 if (res != 0)
662 return -1;
663
664 res = -1;
665 vec_foreach_index (ii, result_val->fp_policies_ids)
666 {
667 vp =
668 pool_elt_at_index (im->policies, *(result_val->fp_policies_ids + ii));
669 if (ipsec_policy_is_equal (vp, policy))
670 {
671 vec_foreach_index (iii, fp_spd->fp_policies[policy->type])
672 {
673 if (*(fp_spd->fp_policies[policy->type] + iii) ==
674 *(result_val->fp_policies_ids + ii))
675 {
676 if (vec_len (result_val->fp_policies_ids) == 1)
677 {
678 vec_free (result_val->fp_policies_ids);
679 clib_bihash_add_del_40_8 (&fp_spd->fp_ip6_lookup_hash,
680 &result, 0);
681 }
682 else
683 {
684 vec_del1 (result_val->fp_policies_ids, ii);
685 }
686 vec_del1 (fp_spd->fp_policies[policy->type], iii);
687
688 vec_foreach_index (imt, fp_spd->fp_mask_types[policy->type])
689 {
690 if (*(fp_spd->fp_mask_types[policy->type] + imt) ==
691 vp->fp_mask_type_id)
692 {
693 ipsec_fp_mask_type_entry_t *mte = pool_elt_at_index (
694 im->fp_mask_types, vp->fp_mask_type_id);
695
696 if (mte->refcount == 1)
697 vec_del1 (fp_spd->fp_mask_types[policy->type],
698 imt);
699 break;
700 }
701 }
702
703 res = 0;
704 break;
705 }
706 }
707
708 if (res != 0)
709 continue;
710 else
711 {
712 release_mask_type_index (im, vp->fp_mask_type_id);
713 ipsec_sa_unlock (vp->sa_index);
714 pool_put (im->policies, vp);
715 return 0;
716 }
717 }
718 }
719 return -1;
720}
721
722int
723ipsec_fp_ip4_del_policy (ipsec_main_t *im, ipsec_spd_fp_t *fp_spd,
724 ipsec_policy_t *policy)
725{
726 int res;
727 ipsec_fp_5tuple_t mask = { 0 }, policy_5tuple;
728 clib_bihash_kv_16_8_t kv;
729 clib_bihash_kv_16_8_t result;
730 ipsec_fp_lookup_value_t *result_val =
731 (ipsec_fp_lookup_value_t *) &result.value;
732
733 ipsec_policy_t *vp;
734 u32 ii, iii, imt;
735
736 ipsec_fp_ip4_get_policy_mask (policy, &mask);
737 ipsec_fp_get_policy_5tuple (policy, &policy_5tuple);
Piotr Bronowski86f82082022-07-08 12:45:05 +0000738 fill_ip4_hash_policy_kv (&policy_5tuple, &mask, &kv);
Piotr Bronowski04643102022-05-10 13:18:22 +0000739 res = clib_bihash_search_inline_2_16_8 (&fp_spd->fp_ip4_lookup_hash, &kv,
740 &result);
741 if (res != 0)
742 return -1;
743
744 res = -1;
745 vec_foreach_index (ii, result_val->fp_policies_ids)
746 {
747 vp =
748 pool_elt_at_index (im->policies, *(result_val->fp_policies_ids + ii));
749 if (ipsec_policy_is_equal (vp, policy))
750 {
751 vec_foreach_index (iii, fp_spd->fp_policies[policy->type])
752 {
753 if (*(fp_spd->fp_policies[policy->type] + iii) ==
754 *(result_val->fp_policies_ids + ii))
755 {
756 if (vec_len (result_val->fp_policies_ids) == 1)
757 {
758 vec_free (result_val->fp_policies_ids);
759 clib_bihash_add_del_16_8 (&fp_spd->fp_ip4_lookup_hash,
760 &result, 0);
761 }
762 else
763 {
764 vec_del1 (result_val->fp_policies_ids, ii);
765 }
766 vec_del1 (fp_spd->fp_policies[policy->type], iii);
767
768 vec_foreach_index (imt, fp_spd->fp_mask_types[policy->type])
769 {
770 if (*(fp_spd->fp_mask_types[policy->type] + imt) ==
771 vp->fp_mask_type_id)
772 {
773 ipsec_fp_mask_type_entry_t *mte = pool_elt_at_index (
774 im->fp_mask_types, vp->fp_mask_type_id);
775
776 if (mte->refcount == 1)
777 vec_del1 (fp_spd->fp_mask_types[policy->type],
778 imt);
779 break;
780 }
781 }
782
783 res = 0;
784 break;
785 }
786 }
787
788 if (res != 0)
789 continue;
790 else
791 {
792 release_mask_type_index (im, vp->fp_mask_type_id);
793 ipsec_sa_unlock (vp->sa_index);
794 pool_put (im->policies, vp);
795 return 0;
796 }
797 }
798 }
799 return -1;
800}
801
802int
803ipsec_fp_add_del_policy (void *fp_spd, ipsec_policy_t *policy, int is_add,
804 u32 *stat_index)
805{
806 ipsec_main_t *im = &ipsec_main;
807
808 if (is_add)
809 if (policy->is_ipv6)
810 return ipsec_fp_ip6_add_policy (im, (ipsec_spd_fp_t *) fp_spd, policy,
811 stat_index);
812 else
813 return ipsec_fp_ip4_add_policy (im, (ipsec_spd_fp_t *) fp_spd, policy,
814 stat_index);
815
816 else if (policy->is_ipv6)
817
818 return ipsec_fp_ip6_del_policy (im, (ipsec_spd_fp_t *) fp_spd, policy);
819 else
820 return ipsec_fp_ip4_del_policy (im, (ipsec_spd_fp_t *) fp_spd, policy);
821}
822
Neale Ranns999c8ee2019-02-01 03:31:24 -0800823/*
824 * fd.io coding-style-patch-verification: ON
825 *
826 * Local Variables:
827 * eval: (c-set-style "gnu")
828 * End:
829 */