blob: 6a66a2de2691c6cb3ac31b354585dc7203efeeca [file] [log] [blame]
Neale Ranns999c8ee2019-02-01 03:31:24 -08001/*
2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16#include <vnet/ipsec/ipsec.h>
17
Neale Rannsa09c1ff2019-02-04 01:10:30 -080018/**
19 * @brief
20 * Policy packet & bytes counters
21 */
22vlib_combined_counter_main_t ipsec_spd_policy_counters = {
23 .name = "policy",
24 .stat_segment_name = "/net/ipsec/policy",
25};
26
Neale Ranns999c8ee2019-02-01 03:31:24 -080027int
Neale Ranns9f231d42019-03-19 10:06:00 +000028ipsec_policy_mk_type (bool is_outbound,
29 bool is_ipv6,
30 ipsec_policy_action_t action,
31 ipsec_spd_policy_type_t * type)
32{
33 if (is_outbound)
34 {
35 *type = (is_ipv6 ?
36 IPSEC_SPD_POLICY_IP6_OUTBOUND : IPSEC_SPD_POLICY_IP4_OUTBOUND);
37 return (0);
38 }
39 else
40 {
41 switch (action)
42 {
43 case IPSEC_POLICY_ACTION_PROTECT:
44 *type = (is_ipv6 ?
45 IPSEC_SPD_POLICY_IP6_INBOUND_PROTECT :
46 IPSEC_SPD_POLICY_IP4_INBOUND_PROTECT);
47 return (0);
48 case IPSEC_POLICY_ACTION_BYPASS:
49 *type = (is_ipv6 ?
50 IPSEC_SPD_POLICY_IP6_INBOUND_BYPASS :
51 IPSEC_SPD_POLICY_IP4_INBOUND_BYPASS);
52 return (0);
53 case IPSEC_POLICY_ACTION_DISCARD:
ShivaShankarK05464832020-04-14 14:01:03 +053054 *type = (is_ipv6 ?
55 IPSEC_SPD_POLICY_IP6_INBOUND_DISCARD :
56 IPSEC_SPD_POLICY_IP4_INBOUND_DISCARD);
57 return (0);
Neale Ranns9f231d42019-03-19 10:06:00 +000058 case IPSEC_POLICY_ACTION_RESOLVE:
59 break;
60 }
61 }
62
63 /* Unsupported type */
64 return (-1);
65}
66
Piotr Bronowski993b6be2022-08-31 13:48:14 +000067static_always_inline int
68ipsec_is_policy_inbound (ipsec_policy_t *policy)
69{
70 if (policy->type == IPSEC_SPD_POLICY_IP4_INBOUND_PROTECT ||
71 policy->type == IPSEC_SPD_POLICY_IP4_INBOUND_BYPASS ||
Piotr Bronowski06abf232022-09-20 14:44:36 +000072 policy->type == IPSEC_SPD_POLICY_IP4_INBOUND_DISCARD ||
73 policy->type == IPSEC_SPD_POLICY_IP6_INBOUND_PROTECT ||
74 policy->type == IPSEC_SPD_POLICY_IP6_INBOUND_BYPASS ||
75 policy->type == IPSEC_SPD_POLICY_IP6_INBOUND_DISCARD)
Piotr Bronowski993b6be2022-08-31 13:48:14 +000076 return 1;
77
78 return 0;
79}
80
Piotr Bronowski06abf232022-09-20 14:44:36 +000081static_always_inline int
82ipsec_is_fp_enabled (ipsec_main_t *im, ipsec_spd_t *spd,
83 ipsec_policy_t *policy)
84{
85 if ((im->fp_spd_ipv4_out_is_enabled &&
86 PREDICT_TRUE (INDEX_INVALID != spd->fp_spd.ip4_out_lookup_hash_idx) &&
87 policy->type == IPSEC_SPD_POLICY_IP4_OUTBOUND) ||
88 (im->fp_spd_ipv4_in_is_enabled &&
89 PREDICT_TRUE (INDEX_INVALID != spd->fp_spd.ip4_in_lookup_hash_idx) &&
90 (policy->type == IPSEC_SPD_POLICY_IP4_INBOUND_PROTECT ||
91 policy->type == IPSEC_SPD_POLICY_IP4_INBOUND_BYPASS ||
92 policy->type == IPSEC_SPD_POLICY_IP4_INBOUND_DISCARD)) ||
93 (im->fp_spd_ipv6_in_is_enabled &&
94 PREDICT_TRUE (INDEX_INVALID != spd->fp_spd.ip6_in_lookup_hash_idx) &&
95 (policy->type == IPSEC_SPD_POLICY_IP6_INBOUND_PROTECT ||
96 policy->type == IPSEC_SPD_POLICY_IP6_INBOUND_BYPASS ||
97 policy->type == IPSEC_SPD_POLICY_IP6_INBOUND_DISCARD)) ||
98 (im->fp_spd_ipv6_out_is_enabled &&
99 PREDICT_TRUE (INDEX_INVALID != spd->fp_spd.ip6_out_lookup_hash_idx) &&
100 policy->type == IPSEC_SPD_POLICY_IP6_OUTBOUND))
101 return 1;
102 return 0;
103}
104
Neale Ranns9f231d42019-03-19 10:06:00 +0000105int
Neale Rannsa09c1ff2019-02-04 01:10:30 -0800106ipsec_add_del_policy (vlib_main_t * vm,
107 ipsec_policy_t * policy, int is_add, u32 * stat_index)
Neale Ranns999c8ee2019-02-01 03:31:24 -0800108{
109 ipsec_main_t *im = &ipsec_main;
110 ipsec_spd_t *spd = 0;
111 ipsec_policy_t *vp;
Neale Ranns999c8ee2019-02-01 03:31:24 -0800112 u32 spd_index;
Neale Rannsa09c1ff2019-02-04 01:10:30 -0800113 uword *p;
Neale Ranns999c8ee2019-02-01 03:31:24 -0800114
Neale Ranns999c8ee2019-02-01 03:31:24 -0800115 p = hash_get (im->spd_index_by_spd_id, policy->id);
116
117 if (!p)
118 return VNET_API_ERROR_SYSCALL_ERROR_1;
119
120 spd_index = p[0];
121 spd = pool_elt_at_index (im->spds, spd_index);
122 if (!spd)
123 return VNET_API_ERROR_SYSCALL_ERROR_1;
124
Zachary Leaf7cd35f52021-06-25 08:11:15 -0500125 if (im->output_flow_cache_flag && !policy->is_ipv6 &&
Govindarajan Mohandoss6d7dfcb2021-03-19 19:20:49 +0000126 policy->type == IPSEC_SPD_POLICY_IP4_OUTBOUND)
127 {
128 /*
129 * Flow cache entry is valid only when epoch_count value in control
130 * plane and data plane match. Otherwise, flow cache entry is considered
131 * stale. To avoid the race condition of using old epoch_count value
132 * in data plane after the roll over of epoch_count in control plane,
133 * entire flow cache is reset.
134 */
135 if (im->epoch_count == 0xFFFFFFFF)
136 {
137 /* Reset all the entries in flow cache */
138 clib_memset_u8 (im->ipsec4_out_spd_hash_tbl, 0,
139 im->ipsec4_out_spd_hash_num_buckets *
140 (sizeof (*(im->ipsec4_out_spd_hash_tbl))));
141 }
142 /* Increment epoch counter by 1 */
143 clib_atomic_fetch_add_relax (&im->epoch_count, 1);
144 /* Reset spd flow cache counter since all old entries are stale */
145 clib_atomic_store_relax_n (&im->ipsec4_out_spd_flow_cache_entries, 0);
146 }
147
Zachary Leaf7cd35f52021-06-25 08:11:15 -0500148 if ((policy->type == IPSEC_SPD_POLICY_IP4_INBOUND_PROTECT ||
149 policy->type == IPSEC_SPD_POLICY_IP4_INBOUND_BYPASS ||
150 policy->type == IPSEC_SPD_POLICY_IP4_INBOUND_DISCARD) &&
151 im->input_flow_cache_flag && !policy->is_ipv6)
152 {
153 /*
154 * Flow cache entry is valid only when input_epoch_count value in control
155 * plane and data plane match. Otherwise, flow cache entry is considered
156 * stale. To avoid the race condition of using old input_epoch_count
157 * value in data plane after the roll over of input_epoch_count in
158 * control plane, entire flow cache is reset.
159 */
160 if (im->input_epoch_count == 0xFFFFFFFF)
161 {
162 /* Reset all the entries in flow cache */
163 clib_memset_u8 (im->ipsec4_in_spd_hash_tbl, 0,
164 im->ipsec4_in_spd_hash_num_buckets *
165 (sizeof (*(im->ipsec4_in_spd_hash_tbl))));
166 }
167 /* Increment epoch counter by 1 */
168 clib_atomic_fetch_add_relax (&im->input_epoch_count, 1);
169 /* Reset spd flow cache counter since all old entries are stale */
170 im->ipsec4_in_spd_flow_cache_entries = 0;
171 }
172
Neale Ranns999c8ee2019-02-01 03:31:24 -0800173 if (is_add)
174 {
175 u32 policy_index;
Xiaoming Jiange479eae2022-10-08 02:40:45 +0000176 u32 i;
Neale Ranns999c8ee2019-02-01 03:31:24 -0800177
Neale Ranns495d7ff2019-07-12 09:15:26 +0000178 if (policy->policy == IPSEC_POLICY_ACTION_PROTECT)
179 {
180 index_t sa_index = ipsec_sa_find_and_lock (policy->sa_id);
181
182 if (INDEX_INVALID == sa_index)
183 return VNET_API_ERROR_SYSCALL_ERROR_1;
184 policy->sa_index = sa_index;
185 }
186 else
187 policy->sa_index = INDEX_INVALID;
188
Piotr Bronowski04643102022-05-10 13:18:22 +0000189 /**
190 * Try adding the policy into fast path SPD first. Only adding to
191 * traditional SPD when failed.
192 **/
Piotr Bronowski06abf232022-09-20 14:44:36 +0000193 if (ipsec_is_fp_enabled (im, spd, policy))
Piotr Bronowski04643102022-05-10 13:18:22 +0000194 return ipsec_fp_add_del_policy ((void *) &spd->fp_spd, policy, 1,
195 stat_index);
196
Neale Rannsa09c1ff2019-02-04 01:10:30 -0800197 pool_get (im->policies, vp);
Neale Ranns999c8ee2019-02-01 03:31:24 -0800198 clib_memcpy (vp, policy, sizeof (*vp));
Neale Rannsa09c1ff2019-02-04 01:10:30 -0800199 policy_index = vp - im->policies;
Neale Ranns999c8ee2019-02-01 03:31:24 -0800200
Neale Rannsa09c1ff2019-02-04 01:10:30 -0800201 vlib_validate_combined_counter (&ipsec_spd_policy_counters,
202 policy_index);
203 vlib_zero_combined_counter (&ipsec_spd_policy_counters, policy_index);
Xiaoming Jiange479eae2022-10-08 02:40:45 +0000204
205 vec_foreach_index (i, spd->policies[policy->type])
206 {
207 ipsec_policy_t *p =
208 pool_elt_at_index (im->policies, spd->policies[policy->type][i]);
209
210 if (p->priority <= vp->priority)
211 {
212 break;
213 }
214 }
215
216 vec_insert_elts (spd->policies[policy->type], &policy_index, 1, i);
217
Neale Rannsa09c1ff2019-02-04 01:10:30 -0800218 *stat_index = policy_index;
Neale Ranns999c8ee2019-02-01 03:31:24 -0800219 }
220 else
221 {
Neale Rannsa09c1ff2019-02-04 01:10:30 -0800222 u32 ii;
223
Piotr Bronowski04643102022-05-10 13:18:22 +0000224 /**
225 * Try to delete the policy from the fast path SPD first. Delete from
226 * traditional SPD when fp delete fails.
227 **/
Piotr Bronowski86f82082022-07-08 12:45:05 +0000228
Piotr Bronowski06abf232022-09-20 14:44:36 +0000229 if (ipsec_is_fp_enabled (im, spd, policy))
230
Piotr Bronowski993b6be2022-08-31 13:48:14 +0000231 {
232 if (policy->policy == IPSEC_POLICY_ACTION_PROTECT)
233 {
234 index_t sa_index = ipsec_sa_find_and_lock (policy->sa_id);
235
236 if (INDEX_INVALID == sa_index)
237 return VNET_API_ERROR_SYSCALL_ERROR_1;
238 policy->sa_index = sa_index;
239 ipsec_sa_unlock_id (policy->sa_id);
240 }
241 else
242 policy->sa_index = INDEX_INVALID;
243
244 return ipsec_fp_add_del_policy ((void *) &spd->fp_spd, policy, 0,
245 stat_index);
246 }
Piotr Bronowski04643102022-05-10 13:18:22 +0000247
Neale Ranns50d50692019-03-26 08:26:39 +0000248 vec_foreach_index (ii, (spd->policies[policy->type]))
Neale Rannsa09c1ff2019-02-04 01:10:30 -0800249 {
Neale Ranns50d50692019-03-26 08:26:39 +0000250 vp = pool_elt_at_index (im->policies,
251 spd->policies[policy->type][ii]);
252 if (ipsec_policy_is_equal (vp, policy))
253 {
Gabriel Oginskiaacd3ed2022-02-18 08:05:00 +0000254 vec_delete (spd->policies[policy->type], 1, ii);
Neale Ranns495d7ff2019-07-12 09:15:26 +0000255 ipsec_sa_unlock (vp->sa_index);
Neale Ranns50d50692019-03-26 08:26:39 +0000256 pool_put (im->policies, vp);
257 break;
258 }
Neale Rannsa09c1ff2019-02-04 01:10:30 -0800259 }
Neale Ranns999c8ee2019-02-01 03:31:24 -0800260 }
261
262 return 0;
263}
264
Piotr Bronowski04643102022-05-10 13:18:22 +0000265static_always_inline void
Piotr Bronowski993b6be2022-08-31 13:48:14 +0000266ipsec_fp_release_mask_type (ipsec_main_t *im, u32 mask_type_index)
Piotr Bronowski04643102022-05-10 13:18:22 +0000267{
268 ipsec_fp_mask_type_entry_t *mte =
269 pool_elt_at_index (im->fp_mask_types, mask_type_index);
270 mte->refcount--;
271 if (mte->refcount == 0)
272 {
273 /* this entry is not in use anymore */
274 ASSERT (clib_memset (mte, 0xae, sizeof (*mte)) == EOK);
275 pool_put (im->fp_mask_types, mte);
276 }
277}
278
279static_always_inline u32
280find_mask_type_index (ipsec_main_t *im, ipsec_fp_5tuple_t *mask)
281{
282 ipsec_fp_mask_type_entry_t *mte;
283
284 pool_foreach (mte, im->fp_mask_types)
285 {
286 if (memcmp (&mte->mask, mask, sizeof (*mask)) == 0)
287 return (mte - im->fp_mask_types);
288 }
289
290 return ~0;
291}
292
293static_always_inline void
Piotr Bronowski86f82082022-07-08 12:45:05 +0000294fill_ip6_hash_policy_kv (ipsec_fp_5tuple_t *match, ipsec_fp_5tuple_t *mask,
295 clib_bihash_kv_40_8_t *kv)
Piotr Bronowski04643102022-05-10 13:18:22 +0000296{
297 ipsec_fp_lookup_value_t *kv_val = (ipsec_fp_lookup_value_t *) &kv->value;
Piotr Bronowski81880602022-07-18 16:45:22 +0000298 u64 *pmatch = (u64 *) match->kv_40_8.key;
299 u64 *pmask = (u64 *) mask->kv_40_8.key;
300 u64 *pkey = (u64 *) kv->key;
Piotr Bronowski04643102022-05-10 13:18:22 +0000301
302 *pkey++ = *pmatch++ & *pmask++;
303 *pkey++ = *pmatch++ & *pmask++;
304 *pkey++ = *pmatch++ & *pmask++;
305 *pkey++ = *pmatch++ & *pmask++;
Piotr Bronowski86f82082022-07-08 12:45:05 +0000306 *pkey = *pmatch & *pmask;
Piotr Bronowski04643102022-05-10 13:18:22 +0000307
308 kv_val->as_u64 = 0;
309}
310
311static_always_inline void
Piotr Bronowski86f82082022-07-08 12:45:05 +0000312fill_ip4_hash_policy_kv (ipsec_fp_5tuple_t *match, ipsec_fp_5tuple_t *mask,
313 clib_bihash_kv_16_8_t *kv)
Piotr Bronowski04643102022-05-10 13:18:22 +0000314{
315 ipsec_fp_lookup_value_t *kv_val = (ipsec_fp_lookup_value_t *) &kv->value;
Piotr Bronowski81880602022-07-18 16:45:22 +0000316 u64 *pmatch = (u64 *) match->kv_16_8.key;
317 u64 *pmask = (u64 *) mask->kv_16_8.key;
Piotr Bronowski04643102022-05-10 13:18:22 +0000318 u64 *pkey = (u64 *) kv->key;
319
320 *pkey++ = *pmatch++ & *pmask++;
Piotr Bronowski81880602022-07-18 16:45:22 +0000321 *pkey = *pmatch & *pmask;
Piotr Bronowski04643102022-05-10 13:18:22 +0000322
323 kv_val->as_u64 = 0;
324}
325
326static_always_inline u16
Piotr Bronowski993b6be2022-08-31 13:48:14 +0000327mask_out_highest_set_bit_u16 (u16 x)
Piotr Bronowski04643102022-05-10 13:18:22 +0000328{
329 x |= x >> 8;
330 x |= x >> 4;
331 x |= x >> 2;
332 x |= x >> 1;
Piotr Bronowski993b6be2022-08-31 13:48:14 +0000333 return ~x;
Piotr Bronowski04643102022-05-10 13:18:22 +0000334}
335
336static_always_inline u32
Piotr Bronowski993b6be2022-08-31 13:48:14 +0000337mask_out_highest_set_bit_u32 (u32 x)
Piotr Bronowski04643102022-05-10 13:18:22 +0000338{
339 x |= x >> 16;
340 x |= x >> 8;
341 x |= x >> 4;
342 x |= x >> 2;
343 x |= x >> 1;
Piotr Bronowski993b6be2022-08-31 13:48:14 +0000344 return ~x;
Piotr Bronowski04643102022-05-10 13:18:22 +0000345}
346
Piotr Bronowski86f82082022-07-08 12:45:05 +0000347static_always_inline u64
348mask_out_highest_set_bit_u64 (u64 x)
349{
350 x |= x >> 32;
351 x |= x >> 16;
352 x |= x >> 8;
353 x |= x >> 4;
354 x |= x >> 2;
355 x |= x >> 1;
356 return ~x;
357}
358
359static_always_inline void
360ipsec_fp_get_policy_ports_mask (ipsec_policy_t *policy,
361 ipsec_fp_5tuple_t *mask)
362{
363 if (PREDICT_TRUE ((policy->protocol == IP_PROTOCOL_TCP) ||
364 (policy->protocol == IP_PROTOCOL_UDP) ||
365 (policy->protocol == IP_PROTOCOL_SCTP)))
366 {
367 mask->lport = policy->lport.start ^ policy->lport.stop;
368 mask->rport = policy->rport.start ^ policy->rport.stop;
369
Piotr Bronowski993b6be2022-08-31 13:48:14 +0000370 mask->lport = mask_out_highest_set_bit_u16 (mask->lport);
Piotr Bronowski86f82082022-07-08 12:45:05 +0000371
Piotr Bronowski993b6be2022-08-31 13:48:14 +0000372 mask->rport = mask_out_highest_set_bit_u16 (mask->rport);
Piotr Bronowski86f82082022-07-08 12:45:05 +0000373 }
374 else
375 {
376 mask->lport = 0;
377 mask->rport = 0;
378 }
379
380 mask->protocol = (policy->protocol == IPSEC_POLICY_PROTOCOL_ANY) ? 0 : ~0;
381}
382
Piotr Bronowski04643102022-05-10 13:18:22 +0000383static_always_inline void
Piotr Bronowski993b6be2022-08-31 13:48:14 +0000384ipsec_fp_ip4_get_policy_mask (ipsec_policy_t *policy, ipsec_fp_5tuple_t *mask,
385 bool inbound)
Piotr Bronowski04643102022-05-10 13:18:22 +0000386{
387 u32 *pladdr_start = (u32 *) &policy->laddr.start.ip4;
388 u32 *pladdr_stop = (u32 *) &policy->laddr.stop.ip4;
389 u32 *plmask = (u32 *) &mask->laddr;
390 u32 *praddr_start = (u32 *) &policy->raddr.start.ip4;
391 u32 *praddr_stop = (u32 *) &policy->raddr.stop.ip4;
392 u32 *prmask = (u32 *) &mask->raddr;
393
Piotr Bronowski81880602022-07-18 16:45:22 +0000394 clib_memset_u8 (mask, 0xff, sizeof (ipsec_fp_5tuple_t));
395 clib_memset_u8 (&mask->l3_zero_pad, 0, sizeof (mask->l3_zero_pad));
396
Piotr Bronowski645a5882023-02-13 18:18:59 +0000397 if (inbound && (policy->type == IPSEC_SPD_POLICY_IP4_INBOUND_PROTECT &&
398 policy->sa_index != INDEX_INVALID))
399 {
400 ipsec_sa_t *s = ipsec_sa_get (policy->sa_index);
401
402 if (ipsec_sa_is_set_IS_TUNNEL (s))
403 goto set_spi_mask;
404 }
405
Piotr Bronowski04643102022-05-10 13:18:22 +0000406 /* find bits where start != stop */
407 *plmask = *pladdr_start ^ *pladdr_stop;
408 *prmask = *praddr_start ^ *praddr_stop;
409 /* Find most significant bit set (that is the first position
410 * start differs from stop). Mask out everything after that bit and
411 * the bit itself. Remember that policy stores start and stop in the net
412 * order.
413 */
Piotr Bronowski993b6be2022-08-31 13:48:14 +0000414 *plmask = clib_host_to_net_u32 (
415 mask_out_highest_set_bit_u32 (clib_net_to_host_u32 (*plmask)));
Piotr Bronowski04643102022-05-10 13:18:22 +0000416
Piotr Bronowski993b6be2022-08-31 13:48:14 +0000417 *prmask = clib_host_to_net_u32 (
418 mask_out_highest_set_bit_u32 (clib_net_to_host_u32 (*prmask)));
Piotr Bronowski04643102022-05-10 13:18:22 +0000419
Piotr Bronowski645a5882023-02-13 18:18:59 +0000420set_spi_mask:
Piotr Bronowski993b6be2022-08-31 13:48:14 +0000421 if (inbound)
Piotr Bronowski04643102022-05-10 13:18:22 +0000422 {
Piotr Bronowski993b6be2022-08-31 13:48:14 +0000423 if (policy->type != IPSEC_SPD_POLICY_IP4_INBOUND_PROTECT)
424 mask->spi = 0;
Piotr Bronowski04643102022-05-10 13:18:22 +0000425
Piotr Bronowski993b6be2022-08-31 13:48:14 +0000426 mask->protocol = 0;
Piotr Bronowski04643102022-05-10 13:18:22 +0000427 }
428 else
429 {
Piotr Bronowski993b6be2022-08-31 13:48:14 +0000430 mask->action = 0;
431 ipsec_fp_get_policy_ports_mask (policy, mask);
Piotr Bronowski04643102022-05-10 13:18:22 +0000432 }
Piotr Bronowski04643102022-05-10 13:18:22 +0000433}
434
Piotr Bronowski86f82082022-07-08 12:45:05 +0000435static_always_inline void
Piotr Bronowski06abf232022-09-20 14:44:36 +0000436ipsec_fp_ip6_get_policy_mask (ipsec_policy_t *policy, ipsec_fp_5tuple_t *mask,
437 bool inbound)
Piotr Bronowski04643102022-05-10 13:18:22 +0000438{
439 u64 *pladdr_start = (u64 *) &policy->laddr.start;
440 u64 *pladdr_stop = (u64 *) &policy->laddr.stop;
Piotr Bronowski86f82082022-07-08 12:45:05 +0000441 u64 *plmask = (u64 *) &mask->ip6_laddr;
Piotr Bronowski04643102022-05-10 13:18:22 +0000442 u64 *praddr_start = (u64 *) &policy->raddr.start;
443 u64 *praddr_stop = (u64 *) &policy->raddr.stop;
444 u64 *prmask = (u64 *) &mask->ip6_raddr;
Piotr Bronowski04643102022-05-10 13:18:22 +0000445
Piotr Bronowski81880602022-07-18 16:45:22 +0000446 clib_memset_u8 (mask, 0xff, sizeof (ipsec_fp_5tuple_t));
Piotr Bronowski04643102022-05-10 13:18:22 +0000447
Piotr Bronowski645a5882023-02-13 18:18:59 +0000448 if (inbound && (policy->type == IPSEC_SPD_POLICY_IP6_INBOUND_PROTECT &&
449 policy->sa_index != INDEX_INVALID))
450 {
451 ipsec_sa_t *s = ipsec_sa_get (policy->sa_index);
452
453 if (ipsec_sa_is_set_IS_TUNNEL (s))
454 goto set_spi_mask;
455 }
456
Piotr Bronowski86f82082022-07-08 12:45:05 +0000457 *plmask = (*pladdr_start++ ^ *pladdr_stop++);
Piotr Bronowski04643102022-05-10 13:18:22 +0000458
Piotr Bronowski86f82082022-07-08 12:45:05 +0000459 *prmask = (*praddr_start++ ^ *praddr_stop++);
Piotr Bronowski04643102022-05-10 13:18:22 +0000460
Piotr Bronowski86f82082022-07-08 12:45:05 +0000461 /* Find most significant bit set (that is the first position
462 * start differs from stop). Mask out everything after that bit and
463 * the bit itself. Remember that policy stores start and stop in the net
464 * order.
465 */
466 *plmask = clib_host_to_net_u64 (
467 mask_out_highest_set_bit_u64 (clib_net_to_host_u64 (*plmask)));
Piotr Bronowski04643102022-05-10 13:18:22 +0000468
Piotr Bronowski86f82082022-07-08 12:45:05 +0000469 if (*plmask++ & clib_host_to_net_u64 (0x1))
470 {
471 *plmask = (*pladdr_start ^ *pladdr_stop);
472 *plmask = clib_host_to_net_u64 (
473 mask_out_highest_set_bit_u64 (clib_net_to_host_u64 (*plmask)));
474 }
475 else
476 *plmask = 0;
Piotr Bronowski04643102022-05-10 13:18:22 +0000477
Piotr Bronowski86f82082022-07-08 12:45:05 +0000478 *prmask = clib_host_to_net_u64 (
479 mask_out_highest_set_bit_u64 (clib_net_to_host_u64 (*prmask)));
Piotr Bronowski04643102022-05-10 13:18:22 +0000480
Piotr Bronowski86f82082022-07-08 12:45:05 +0000481 if (*prmask++ & clib_host_to_net_u64 (0x1))
482 {
483 *prmask = (*pladdr_start ^ *pladdr_stop);
484 *prmask = clib_host_to_net_u64 (
485 mask_out_highest_set_bit_u64 (clib_net_to_host_u64 (*prmask)));
486 }
487 else
488 *prmask = 0;
Piotr Bronowski645a5882023-02-13 18:18:59 +0000489set_spi_mask:
Piotr Bronowski06abf232022-09-20 14:44:36 +0000490 if (inbound)
491 {
Piotr Bronowski645a5882023-02-13 18:18:59 +0000492 if (policy->type != IPSEC_SPD_POLICY_IP6_INBOUND_PROTECT)
Piotr Bronowski06abf232022-09-20 14:44:36 +0000493 mask->spi = 0;
494
495 mask->protocol = 0;
496 }
497 else
498 {
499 mask->action = 0;
500 ipsec_fp_get_policy_ports_mask (policy, mask);
501 }
Piotr Bronowski04643102022-05-10 13:18:22 +0000502}
503
504static_always_inline void
Piotr Bronowski993b6be2022-08-31 13:48:14 +0000505ipsec_fp_get_policy_5tuple (ipsec_policy_t *policy, ipsec_fp_5tuple_t *tuple,
506 bool inbound)
Piotr Bronowski04643102022-05-10 13:18:22 +0000507{
508 memset (tuple, 0, sizeof (*tuple));
509 tuple->is_ipv6 = policy->is_ipv6;
510 if (tuple->is_ipv6)
511 {
512 tuple->ip6_laddr = policy->laddr.start.ip6;
513 tuple->ip6_raddr = policy->raddr.start.ip6;
514 }
515 else
516 {
517 tuple->laddr = policy->laddr.start.ip4;
518 tuple->raddr = policy->raddr.start.ip4;
519 }
520
Piotr Bronowski993b6be2022-08-31 13:48:14 +0000521 if (inbound)
522 {
523
524 if ((policy->type == IPSEC_SPD_POLICY_IP4_INBOUND_PROTECT ||
525 policy->type == IPSEC_SPD_POLICY_IP6_INBOUND_PROTECT) &&
526 policy->sa_index != INDEX_INVALID)
527 {
528 ipsec_sa_t *s = ipsec_sa_get (policy->sa_index);
Piotr Bronowski645a5882023-02-13 18:18:59 +0000529
Piotr Bronowski993b6be2022-08-31 13:48:14 +0000530 tuple->spi = s->spi;
Piotr Bronowski645a5882023-02-13 18:18:59 +0000531 if (ipsec_sa_is_set_IS_TUNNEL (s))
532 {
533 if (tuple->is_ipv6)
534 {
535 tuple->ip6_laddr = s->tunnel.t_dst.ip.ip6;
536 tuple->ip6_raddr = s->tunnel.t_src.ip.ip6;
537 }
538 else
539 {
540 tuple->laddr = s->tunnel.t_dst.ip.ip4;
541 tuple->raddr = s->tunnel.t_src.ip.ip4;
542 }
543 }
Piotr Bronowski993b6be2022-08-31 13:48:14 +0000544 }
545 else
546 tuple->spi = INDEX_INVALID;
547 tuple->action = policy->type;
548 return;
549 }
550
Piotr Bronowski04643102022-05-10 13:18:22 +0000551 tuple->protocol = policy->protocol;
Piotr Bronowski04643102022-05-10 13:18:22 +0000552 tuple->lport = policy->lport.start;
553 tuple->rport = policy->rport.start;
554}
555
Piotr Bronowski993b6be2022-08-31 13:48:14 +0000556static_always_inline int
557ipsec_fp_mask_type_idx_cmp (ipsec_fp_mask_id_t *mask_id, u32 *idx)
558{
559 return mask_id->mask_type_idx == *idx;
560}
561
Piotr Bronowski04643102022-05-10 13:18:22 +0000562int
563ipsec_fp_ip4_add_policy (ipsec_main_t *im, ipsec_spd_fp_t *fp_spd,
564 ipsec_policy_t *policy, u32 *stat_index)
565{
Piotr Bronowski993b6be2022-08-31 13:48:14 +0000566 u32 mask_index, searched_idx;
Piotr Bronowski04643102022-05-10 13:18:22 +0000567 ipsec_policy_t *vp;
568 ipsec_fp_mask_type_entry_t *mte;
569 u32 policy_index;
570 clib_bihash_kv_16_8_t kv;
571 clib_bihash_kv_16_8_t result;
572 ipsec_fp_lookup_value_t *result_val =
573 (ipsec_fp_lookup_value_t *) &result.value;
574 ipsec_fp_lookup_value_t *key_val = (ipsec_fp_lookup_value_t *) &kv.value;
575
576 ipsec_fp_5tuple_t mask, policy_5tuple;
577 int res;
Piotr Bronowski993b6be2022-08-31 13:48:14 +0000578 bool inbound = ipsec_is_policy_inbound (policy);
579 clib_bihash_16_8_t *bihash_table =
580 inbound ? pool_elt_at_index (im->fp_ip4_lookup_hashes_pool,
581 fp_spd->ip4_in_lookup_hash_idx) :
582 pool_elt_at_index (im->fp_ip4_lookup_hashes_pool,
583 fp_spd->ip4_out_lookup_hash_idx);
Piotr Bronowski04643102022-05-10 13:18:22 +0000584
Piotr Bronowski993b6be2022-08-31 13:48:14 +0000585 ipsec_fp_ip4_get_policy_mask (policy, &mask, inbound);
Piotr Bronowski04643102022-05-10 13:18:22 +0000586 pool_get (im->policies, vp);
587 policy_index = vp - im->policies;
588 vlib_validate_combined_counter (&ipsec_spd_policy_counters, policy_index);
589 vlib_zero_combined_counter (&ipsec_spd_policy_counters, policy_index);
590 *stat_index = policy_index;
591 mask_index = find_mask_type_index (im, &mask);
592
593 if (mask_index == ~0)
594 {
595 /* mask type not found, we need to create a new entry */
596 pool_get (im->fp_mask_types, mte);
597 mask_index = mte - im->fp_mask_types;
598 mte->refcount = 0;
599 }
600 else
601 mte = im->fp_mask_types + mask_index;
602
603 policy->fp_mask_type_id = mask_index;
Piotr Bronowski993b6be2022-08-31 13:48:14 +0000604 ipsec_fp_get_policy_5tuple (policy, &policy_5tuple, inbound);
Piotr Bronowski04643102022-05-10 13:18:22 +0000605
Piotr Bronowski86f82082022-07-08 12:45:05 +0000606 fill_ip4_hash_policy_kv (&policy_5tuple, &mask, &kv);
Piotr Bronowski04643102022-05-10 13:18:22 +0000607
Piotr Bronowski993b6be2022-08-31 13:48:14 +0000608 res = clib_bihash_search_inline_2_16_8 (bihash_table, &kv, &result);
Piotr Bronowski04643102022-05-10 13:18:22 +0000609 if (res != 0)
610 {
611 /* key was not found crate a new entry */
612 vec_add1 (key_val->fp_policies_ids, policy_index);
Piotr Bronowski993b6be2022-08-31 13:48:14 +0000613 res = clib_bihash_add_del_16_8 (bihash_table, &kv, 1);
614
Piotr Bronowski04643102022-05-10 13:18:22 +0000615 if (res != 0)
616 goto error;
617 }
618 else
619 {
620
621 if (vec_max_len (result_val->fp_policies_ids) !=
622 vec_len (result_val->fp_policies_ids))
623 {
624 /* no need to resize */
625 vec_add1 (result_val->fp_policies_ids, policy_index);
626 }
627 else
628 {
629 vec_add1 (result_val->fp_policies_ids, policy_index);
630
Piotr Bronowski993b6be2022-08-31 13:48:14 +0000631 res = clib_bihash_add_del_16_8 (bihash_table, &result, 1);
Piotr Bronowski04643102022-05-10 13:18:22 +0000632
633 if (res != 0)
634 goto error;
635 }
636 }
637
638 if (mte->refcount == 0)
639 {
640 clib_memcpy (&mte->mask, &mask, sizeof (mask));
641 mte->refcount = 0;
Piotr Bronowski04643102022-05-10 13:18:22 +0000642 }
643
Piotr Bronowski993b6be2022-08-31 13:48:14 +0000644 searched_idx =
645 vec_search_with_function (fp_spd->fp_mask_ids[policy->type], &mask_index,
646 ipsec_fp_mask_type_idx_cmp);
647 if (~0 == searched_idx)
648 {
649 ipsec_fp_mask_id_t mask_id = { mask_index, 1 };
650 vec_add1 (fp_spd->fp_mask_ids[policy->type], mask_id);
651 }
652 else
653 (fp_spd->fp_mask_ids[policy->type] + searched_idx)->refcount++;
654
Piotr Bronowski04643102022-05-10 13:18:22 +0000655 mte->refcount++;
Piotr Bronowski04643102022-05-10 13:18:22 +0000656 clib_memcpy (vp, policy, sizeof (*vp));
657
658 return 0;
659
660error:
661 pool_put (im->policies, vp);
Piotr Bronowski993b6be2022-08-31 13:48:14 +0000662 ipsec_fp_release_mask_type (im, mask_index);
Piotr Bronowski04643102022-05-10 13:18:22 +0000663 return -1;
664}
665
666int
667ipsec_fp_ip6_add_policy (ipsec_main_t *im, ipsec_spd_fp_t *fp_spd,
668 ipsec_policy_t *policy, u32 *stat_index)
669{
670
Piotr Bronowski993b6be2022-08-31 13:48:14 +0000671 u32 mask_index, searched_idx;
Piotr Bronowski04643102022-05-10 13:18:22 +0000672 ipsec_policy_t *vp;
673 ipsec_fp_mask_type_entry_t *mte;
674 u32 policy_index;
675 clib_bihash_kv_40_8_t kv;
676 clib_bihash_kv_40_8_t result;
677 ipsec_fp_lookup_value_t *result_val =
678 (ipsec_fp_lookup_value_t *) &result.value;
679 ipsec_fp_lookup_value_t *key_val = (ipsec_fp_lookup_value_t *) &kv.value;
680
681 ipsec_fp_5tuple_t mask, policy_5tuple;
682 int res;
Piotr Bronowski993b6be2022-08-31 13:48:14 +0000683 bool inbound = ipsec_is_policy_inbound (policy);
Piotr Bronowski04643102022-05-10 13:18:22 +0000684
Piotr Bronowski06abf232022-09-20 14:44:36 +0000685 ipsec_fp_ip6_get_policy_mask (policy, &mask, inbound);
Piotr Bronowski04643102022-05-10 13:18:22 +0000686 pool_get (im->policies, vp);
687 policy_index = vp - im->policies;
688 vlib_validate_combined_counter (&ipsec_spd_policy_counters, policy_index);
689 vlib_zero_combined_counter (&ipsec_spd_policy_counters, policy_index);
690 *stat_index = policy_index;
691 mask_index = find_mask_type_index (im, &mask);
Piotr Bronowski993b6be2022-08-31 13:48:14 +0000692 clib_bihash_40_8_t *bihash_table =
693 inbound ? pool_elt_at_index (im->fp_ip6_lookup_hashes_pool,
694 fp_spd->ip6_in_lookup_hash_idx) :
695 pool_elt_at_index (im->fp_ip6_lookup_hashes_pool,
696 fp_spd->ip6_out_lookup_hash_idx);
Piotr Bronowski04643102022-05-10 13:18:22 +0000697
698 if (mask_index == ~0)
699 {
700 /* mask type not found, we need to create a new entry */
701 pool_get (im->fp_mask_types, mte);
702 mask_index = mte - im->fp_mask_types;
703 mte->refcount = 0;
704 }
705 else
706 mte = im->fp_mask_types + mask_index;
707
708 policy->fp_mask_type_id = mask_index;
Piotr Bronowski993b6be2022-08-31 13:48:14 +0000709 ipsec_fp_get_policy_5tuple (policy, &policy_5tuple, inbound);
Piotr Bronowski04643102022-05-10 13:18:22 +0000710
Piotr Bronowski86f82082022-07-08 12:45:05 +0000711 fill_ip6_hash_policy_kv (&policy_5tuple, &mask, &kv);
Piotr Bronowski04643102022-05-10 13:18:22 +0000712
Piotr Bronowski993b6be2022-08-31 13:48:14 +0000713 res = clib_bihash_search_inline_2_40_8 (bihash_table, &kv, &result);
Piotr Bronowski04643102022-05-10 13:18:22 +0000714 if (res != 0)
715 {
716 /* key was not found crate a new entry */
717 vec_add1 (key_val->fp_policies_ids, policy_index);
Piotr Bronowski993b6be2022-08-31 13:48:14 +0000718 res = clib_bihash_add_del_40_8 (bihash_table, &kv, 1);
Piotr Bronowski04643102022-05-10 13:18:22 +0000719 if (res != 0)
720 goto error;
721 }
722 else
723 {
724
725 if (vec_max_len (result_val->fp_policies_ids) !=
726 vec_len (result_val->fp_policies_ids))
727 {
728 /* no need to resize */
729 vec_add1 (result_val->fp_policies_ids, policy_index);
730 }
731 else
732 {
733 vec_add1 (result_val->fp_policies_ids, policy_index);
734
Piotr Bronowski993b6be2022-08-31 13:48:14 +0000735 res = clib_bihash_add_del_40_8 (bihash_table, &result, 1);
Piotr Bronowski04643102022-05-10 13:18:22 +0000736
737 if (res != 0)
738 goto error;
739 }
740 }
741
742 if (mte->refcount == 0)
743 {
744 clib_memcpy (&mte->mask, &mask, sizeof (mask));
745 mte->refcount = 0;
Piotr Bronowski04643102022-05-10 13:18:22 +0000746 }
747
Piotr Bronowski993b6be2022-08-31 13:48:14 +0000748 searched_idx =
749 vec_search_with_function (fp_spd->fp_mask_ids[policy->type], &mask_index,
750 ipsec_fp_mask_type_idx_cmp);
751 if (~0 == searched_idx)
752 {
753 ipsec_fp_mask_id_t mask_id = { mask_index, 1 };
754 vec_add1 (fp_spd->fp_mask_ids[policy->type], mask_id);
755 }
756 else
757 (fp_spd->fp_mask_ids[policy->type] + searched_idx)->refcount++;
758
Piotr Bronowski04643102022-05-10 13:18:22 +0000759 mte->refcount++;
Piotr Bronowski04643102022-05-10 13:18:22 +0000760 clib_memcpy (vp, policy, sizeof (*vp));
761
762 return 0;
763
764error:
765 pool_put (im->policies, vp);
Piotr Bronowski993b6be2022-08-31 13:48:14 +0000766 ipsec_fp_release_mask_type (im, mask_index);
Piotr Bronowski04643102022-05-10 13:18:22 +0000767 return -1;
768}
769
770int
771ipsec_fp_ip6_del_policy (ipsec_main_t *im, ipsec_spd_fp_t *fp_spd,
772 ipsec_policy_t *policy)
773{
774 int res;
775 ipsec_fp_5tuple_t mask = { 0 }, policy_5tuple;
776 clib_bihash_kv_40_8_t kv;
777 clib_bihash_kv_40_8_t result;
778 ipsec_fp_lookup_value_t *result_val =
779 (ipsec_fp_lookup_value_t *) &result.value;
Piotr Bronowski993b6be2022-08-31 13:48:14 +0000780 bool inbound = ipsec_is_policy_inbound (policy);
781 clib_bihash_40_8_t *bihash_table =
782 inbound ? pool_elt_at_index (im->fp_ip6_lookup_hashes_pool,
783 fp_spd->ip6_in_lookup_hash_idx) :
784 pool_elt_at_index (im->fp_ip6_lookup_hashes_pool,
785 fp_spd->ip6_out_lookup_hash_idx);
Piotr Bronowski04643102022-05-10 13:18:22 +0000786
787 ipsec_policy_t *vp;
Piotr Bronowski14bf6a82022-10-09 23:05:00 +0000788 u32 ii, imt;
Piotr Bronowski04643102022-05-10 13:18:22 +0000789
Piotr Bronowski06abf232022-09-20 14:44:36 +0000790 ipsec_fp_ip6_get_policy_mask (policy, &mask, inbound);
Piotr Bronowski993b6be2022-08-31 13:48:14 +0000791 ipsec_fp_get_policy_5tuple (policy, &policy_5tuple, inbound);
Piotr Bronowski86f82082022-07-08 12:45:05 +0000792 fill_ip6_hash_policy_kv (&policy_5tuple, &mask, &kv);
Piotr Bronowski993b6be2022-08-31 13:48:14 +0000793 res = clib_bihash_search_inline_2_40_8 (bihash_table, &kv, &result);
Piotr Bronowski04643102022-05-10 13:18:22 +0000794 if (res != 0)
795 return -1;
796
Piotr Bronowski04643102022-05-10 13:18:22 +0000797 vec_foreach_index (ii, result_val->fp_policies_ids)
798 {
799 vp =
800 pool_elt_at_index (im->policies, *(result_val->fp_policies_ids + ii));
801 if (ipsec_policy_is_equal (vp, policy))
802 {
Piotr Bronowski14bf6a82022-10-09 23:05:00 +0000803 if (vec_len (result_val->fp_policies_ids) == 1)
Piotr Bronowski04643102022-05-10 13:18:22 +0000804 {
Piotr Bronowski14bf6a82022-10-09 23:05:00 +0000805 vec_free (result_val->fp_policies_ids);
806 clib_bihash_add_del_40_8 (bihash_table, &result, 0);
807 }
808 else
809 vec_del1 (result_val->fp_policies_ids, ii);
810
811 vec_foreach_index (imt, fp_spd->fp_mask_ids[policy->type])
812 {
813 if ((fp_spd->fp_mask_ids[policy->type] + imt)->mask_type_idx ==
814 vp->fp_mask_type_id)
Piotr Bronowski04643102022-05-10 13:18:22 +0000815 {
Piotr Bronowski04643102022-05-10 13:18:22 +0000816
Piotr Bronowski14bf6a82022-10-09 23:05:00 +0000817 if ((fp_spd->fp_mask_ids[policy->type] + imt)->refcount-- ==
818 1)
819 vec_del1 (fp_spd->fp_mask_ids[policy->type], imt);
Piotr Bronowski04643102022-05-10 13:18:22 +0000820
Piotr Bronowski04643102022-05-10 13:18:22 +0000821 break;
822 }
823 }
824
Piotr Bronowski14bf6a82022-10-09 23:05:00 +0000825 ipsec_fp_release_mask_type (im, vp->fp_mask_type_id);
826 ipsec_sa_unlock (vp->sa_index);
827 pool_put (im->policies, vp);
828 return 0;
Piotr Bronowski04643102022-05-10 13:18:22 +0000829 }
830 }
831 return -1;
832}
833
834int
835ipsec_fp_ip4_del_policy (ipsec_main_t *im, ipsec_spd_fp_t *fp_spd,
836 ipsec_policy_t *policy)
837{
838 int res;
839 ipsec_fp_5tuple_t mask = { 0 }, policy_5tuple;
840 clib_bihash_kv_16_8_t kv;
841 clib_bihash_kv_16_8_t result;
842 ipsec_fp_lookup_value_t *result_val =
843 (ipsec_fp_lookup_value_t *) &result.value;
Piotr Bronowski993b6be2022-08-31 13:48:14 +0000844 bool inbound = ipsec_is_policy_inbound (policy);
Piotr Bronowski04643102022-05-10 13:18:22 +0000845 ipsec_policy_t *vp;
Piotr Bronowski14bf6a82022-10-09 23:05:00 +0000846 u32 ii, imt;
Piotr Bronowski993b6be2022-08-31 13:48:14 +0000847 clib_bihash_16_8_t *bihash_table =
848 inbound ? pool_elt_at_index (im->fp_ip4_lookup_hashes_pool,
849 fp_spd->ip4_in_lookup_hash_idx) :
850 pool_elt_at_index (im->fp_ip4_lookup_hashes_pool,
851 fp_spd->ip4_out_lookup_hash_idx);
Piotr Bronowski04643102022-05-10 13:18:22 +0000852
Piotr Bronowski993b6be2022-08-31 13:48:14 +0000853 ipsec_fp_ip4_get_policy_mask (policy, &mask, inbound);
854 ipsec_fp_get_policy_5tuple (policy, &policy_5tuple, inbound);
Piotr Bronowski86f82082022-07-08 12:45:05 +0000855 fill_ip4_hash_policy_kv (&policy_5tuple, &mask, &kv);
Piotr Bronowski993b6be2022-08-31 13:48:14 +0000856 res = clib_bihash_search_inline_2_16_8 (bihash_table, &kv, &result);
857
Piotr Bronowski04643102022-05-10 13:18:22 +0000858 if (res != 0)
859 return -1;
860
Piotr Bronowski04643102022-05-10 13:18:22 +0000861 vec_foreach_index (ii, result_val->fp_policies_ids)
862 {
863 vp =
864 pool_elt_at_index (im->policies, *(result_val->fp_policies_ids + ii));
865 if (ipsec_policy_is_equal (vp, policy))
866 {
Piotr Bronowski14bf6a82022-10-09 23:05:00 +0000867 if (vec_len (result_val->fp_policies_ids) == 1)
Piotr Bronowski04643102022-05-10 13:18:22 +0000868 {
Piotr Bronowski14bf6a82022-10-09 23:05:00 +0000869 vec_free (result_val->fp_policies_ids);
870 clib_bihash_add_del_16_8 (bihash_table, &result, 0);
871 }
872 else
873 vec_del1 (result_val->fp_policies_ids, ii);
874
875 vec_foreach_index (imt, fp_spd->fp_mask_ids[policy->type])
876 {
877 if ((fp_spd->fp_mask_ids[policy->type] + imt)->mask_type_idx ==
878 vp->fp_mask_type_id)
Piotr Bronowski04643102022-05-10 13:18:22 +0000879 {
Piotr Bronowski04643102022-05-10 13:18:22 +0000880
Piotr Bronowski14bf6a82022-10-09 23:05:00 +0000881 if ((fp_spd->fp_mask_ids[policy->type] + imt)->refcount-- ==
882 1)
883 vec_del1 (fp_spd->fp_mask_ids[policy->type], imt);
Piotr Bronowski04643102022-05-10 13:18:22 +0000884
Piotr Bronowski04643102022-05-10 13:18:22 +0000885 break;
886 }
887 }
Piotr Bronowski14bf6a82022-10-09 23:05:00 +0000888 ipsec_fp_release_mask_type (im, vp->fp_mask_type_id);
889 ipsec_sa_unlock (vp->sa_index);
890 pool_put (im->policies, vp);
891 return 0;
Piotr Bronowski04643102022-05-10 13:18:22 +0000892 }
893 }
894 return -1;
895}
896
897int
898ipsec_fp_add_del_policy (void *fp_spd, ipsec_policy_t *policy, int is_add,
899 u32 *stat_index)
900{
901 ipsec_main_t *im = &ipsec_main;
902
903 if (is_add)
904 if (policy->is_ipv6)
905 return ipsec_fp_ip6_add_policy (im, (ipsec_spd_fp_t *) fp_spd, policy,
906 stat_index);
907 else
908 return ipsec_fp_ip4_add_policy (im, (ipsec_spd_fp_t *) fp_spd, policy,
909 stat_index);
910
911 else if (policy->is_ipv6)
912
913 return ipsec_fp_ip6_del_policy (im, (ipsec_spd_fp_t *) fp_spd, policy);
914 else
915 return ipsec_fp_ip4_del_policy (im, (ipsec_spd_fp_t *) fp_spd, policy);
916}
917
Neale Ranns999c8ee2019-02-01 03:31:24 -0800918/*
919 * fd.io coding-style-patch-verification: ON
920 *
921 * Local Variables:
922 * eval: (c-set-style "gnu")
923 * End:
924 */