blob: 5261621b64a68e003a7038719b42b67b82b98a60 [file] [log] [blame]
Neale Ranns999c8ee2019-02-01 03:31:24 -08001/*
2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16#include <vnet/ipsec/ipsec.h>
17
Neale Rannsa09c1ff2019-02-04 01:10:30 -080018/**
19 * @brief
20 * Policy packet & bytes counters
21 */
22vlib_combined_counter_main_t ipsec_spd_policy_counters = {
23 .name = "policy",
24 .stat_segment_name = "/net/ipsec/policy",
25};
26
27static int
Neale Ranns999c8ee2019-02-01 03:31:24 -080028ipsec_spd_entry_sort (void *a1, void *a2)
29{
Neale Rannsa09c1ff2019-02-04 01:10:30 -080030 ipsec_main_t *im = &ipsec_main;
Neale Ranns999c8ee2019-02-01 03:31:24 -080031 u32 *id1 = a1;
32 u32 *id2 = a2;
Neale Ranns999c8ee2019-02-01 03:31:24 -080033 ipsec_policy_t *p1, *p2;
34
Neale Rannsa09c1ff2019-02-04 01:10:30 -080035 p1 = pool_elt_at_index (im->policies, *id1);
36 p2 = pool_elt_at_index (im->policies, *id2);
Neale Ranns999c8ee2019-02-01 03:31:24 -080037 if (p1 && p2)
38 return p2->priority - p1->priority;
39
40 return 0;
41}
42
43int
Neale Ranns9f231d42019-03-19 10:06:00 +000044ipsec_policy_mk_type (bool is_outbound,
45 bool is_ipv6,
46 ipsec_policy_action_t action,
47 ipsec_spd_policy_type_t * type)
48{
49 if (is_outbound)
50 {
51 *type = (is_ipv6 ?
52 IPSEC_SPD_POLICY_IP6_OUTBOUND : IPSEC_SPD_POLICY_IP4_OUTBOUND);
53 return (0);
54 }
55 else
56 {
57 switch (action)
58 {
59 case IPSEC_POLICY_ACTION_PROTECT:
60 *type = (is_ipv6 ?
61 IPSEC_SPD_POLICY_IP6_INBOUND_PROTECT :
62 IPSEC_SPD_POLICY_IP4_INBOUND_PROTECT);
63 return (0);
64 case IPSEC_POLICY_ACTION_BYPASS:
65 *type = (is_ipv6 ?
66 IPSEC_SPD_POLICY_IP6_INBOUND_BYPASS :
67 IPSEC_SPD_POLICY_IP4_INBOUND_BYPASS);
68 return (0);
69 case IPSEC_POLICY_ACTION_DISCARD:
ShivaShankarK05464832020-04-14 14:01:03 +053070 *type = (is_ipv6 ?
71 IPSEC_SPD_POLICY_IP6_INBOUND_DISCARD :
72 IPSEC_SPD_POLICY_IP4_INBOUND_DISCARD);
73 return (0);
Neale Ranns9f231d42019-03-19 10:06:00 +000074 case IPSEC_POLICY_ACTION_RESOLVE:
75 break;
76 }
77 }
78
79 /* Unsupported type */
80 return (-1);
81}
82
Piotr Bronowski993b6be2022-08-31 13:48:14 +000083static_always_inline int
84ipsec_is_policy_inbound (ipsec_policy_t *policy)
85{
86 if (policy->type == IPSEC_SPD_POLICY_IP4_INBOUND_PROTECT ||
87 policy->type == IPSEC_SPD_POLICY_IP4_INBOUND_BYPASS ||
Piotr Bronowski06abf232022-09-20 14:44:36 +000088 policy->type == IPSEC_SPD_POLICY_IP4_INBOUND_DISCARD ||
89 policy->type == IPSEC_SPD_POLICY_IP6_INBOUND_PROTECT ||
90 policy->type == IPSEC_SPD_POLICY_IP6_INBOUND_BYPASS ||
91 policy->type == IPSEC_SPD_POLICY_IP6_INBOUND_DISCARD)
Piotr Bronowski993b6be2022-08-31 13:48:14 +000092 return 1;
93
94 return 0;
95}
96
Piotr Bronowski06abf232022-09-20 14:44:36 +000097static_always_inline int
98ipsec_is_fp_enabled (ipsec_main_t *im, ipsec_spd_t *spd,
99 ipsec_policy_t *policy)
100{
101 if ((im->fp_spd_ipv4_out_is_enabled &&
102 PREDICT_TRUE (INDEX_INVALID != spd->fp_spd.ip4_out_lookup_hash_idx) &&
103 policy->type == IPSEC_SPD_POLICY_IP4_OUTBOUND) ||
104 (im->fp_spd_ipv4_in_is_enabled &&
105 PREDICT_TRUE (INDEX_INVALID != spd->fp_spd.ip4_in_lookup_hash_idx) &&
106 (policy->type == IPSEC_SPD_POLICY_IP4_INBOUND_PROTECT ||
107 policy->type == IPSEC_SPD_POLICY_IP4_INBOUND_BYPASS ||
108 policy->type == IPSEC_SPD_POLICY_IP4_INBOUND_DISCARD)) ||
109 (im->fp_spd_ipv6_in_is_enabled &&
110 PREDICT_TRUE (INDEX_INVALID != spd->fp_spd.ip6_in_lookup_hash_idx) &&
111 (policy->type == IPSEC_SPD_POLICY_IP6_INBOUND_PROTECT ||
112 policy->type == IPSEC_SPD_POLICY_IP6_INBOUND_BYPASS ||
113 policy->type == IPSEC_SPD_POLICY_IP6_INBOUND_DISCARD)) ||
114 (im->fp_spd_ipv6_out_is_enabled &&
115 PREDICT_TRUE (INDEX_INVALID != spd->fp_spd.ip6_out_lookup_hash_idx) &&
116 policy->type == IPSEC_SPD_POLICY_IP6_OUTBOUND))
117 return 1;
118 return 0;
119}
120
Neale Ranns9f231d42019-03-19 10:06:00 +0000121int
Neale Rannsa09c1ff2019-02-04 01:10:30 -0800122ipsec_add_del_policy (vlib_main_t * vm,
123 ipsec_policy_t * policy, int is_add, u32 * stat_index)
Neale Ranns999c8ee2019-02-01 03:31:24 -0800124{
125 ipsec_main_t *im = &ipsec_main;
126 ipsec_spd_t *spd = 0;
127 ipsec_policy_t *vp;
Neale Ranns999c8ee2019-02-01 03:31:24 -0800128 u32 spd_index;
Neale Rannsa09c1ff2019-02-04 01:10:30 -0800129 uword *p;
Neale Ranns999c8ee2019-02-01 03:31:24 -0800130
Neale Ranns999c8ee2019-02-01 03:31:24 -0800131 p = hash_get (im->spd_index_by_spd_id, policy->id);
132
133 if (!p)
134 return VNET_API_ERROR_SYSCALL_ERROR_1;
135
136 spd_index = p[0];
137 spd = pool_elt_at_index (im->spds, spd_index);
138 if (!spd)
139 return VNET_API_ERROR_SYSCALL_ERROR_1;
140
Zachary Leaf7cd35f52021-06-25 08:11:15 -0500141 if (im->output_flow_cache_flag && !policy->is_ipv6 &&
Govindarajan Mohandoss6d7dfcb2021-03-19 19:20:49 +0000142 policy->type == IPSEC_SPD_POLICY_IP4_OUTBOUND)
143 {
144 /*
145 * Flow cache entry is valid only when epoch_count value in control
146 * plane and data plane match. Otherwise, flow cache entry is considered
147 * stale. To avoid the race condition of using old epoch_count value
148 * in data plane after the roll over of epoch_count in control plane,
149 * entire flow cache is reset.
150 */
151 if (im->epoch_count == 0xFFFFFFFF)
152 {
153 /* Reset all the entries in flow cache */
154 clib_memset_u8 (im->ipsec4_out_spd_hash_tbl, 0,
155 im->ipsec4_out_spd_hash_num_buckets *
156 (sizeof (*(im->ipsec4_out_spd_hash_tbl))));
157 }
158 /* Increment epoch counter by 1 */
159 clib_atomic_fetch_add_relax (&im->epoch_count, 1);
160 /* Reset spd flow cache counter since all old entries are stale */
161 clib_atomic_store_relax_n (&im->ipsec4_out_spd_flow_cache_entries, 0);
162 }
163
Zachary Leaf7cd35f52021-06-25 08:11:15 -0500164 if ((policy->type == IPSEC_SPD_POLICY_IP4_INBOUND_PROTECT ||
165 policy->type == IPSEC_SPD_POLICY_IP4_INBOUND_BYPASS ||
166 policy->type == IPSEC_SPD_POLICY_IP4_INBOUND_DISCARD) &&
167 im->input_flow_cache_flag && !policy->is_ipv6)
168 {
169 /*
170 * Flow cache entry is valid only when input_epoch_count value in control
171 * plane and data plane match. Otherwise, flow cache entry is considered
172 * stale. To avoid the race condition of using old input_epoch_count
173 * value in data plane after the roll over of input_epoch_count in
174 * control plane, entire flow cache is reset.
175 */
176 if (im->input_epoch_count == 0xFFFFFFFF)
177 {
178 /* Reset all the entries in flow cache */
179 clib_memset_u8 (im->ipsec4_in_spd_hash_tbl, 0,
180 im->ipsec4_in_spd_hash_num_buckets *
181 (sizeof (*(im->ipsec4_in_spd_hash_tbl))));
182 }
183 /* Increment epoch counter by 1 */
184 clib_atomic_fetch_add_relax (&im->input_epoch_count, 1);
185 /* Reset spd flow cache counter since all old entries are stale */
186 im->ipsec4_in_spd_flow_cache_entries = 0;
187 }
188
Neale Ranns999c8ee2019-02-01 03:31:24 -0800189 if (is_add)
190 {
191 u32 policy_index;
192
Neale Ranns495d7ff2019-07-12 09:15:26 +0000193 if (policy->policy == IPSEC_POLICY_ACTION_PROTECT)
194 {
195 index_t sa_index = ipsec_sa_find_and_lock (policy->sa_id);
196
197 if (INDEX_INVALID == sa_index)
198 return VNET_API_ERROR_SYSCALL_ERROR_1;
199 policy->sa_index = sa_index;
200 }
201 else
202 policy->sa_index = INDEX_INVALID;
203
Piotr Bronowski04643102022-05-10 13:18:22 +0000204 /**
205 * Try adding the policy into fast path SPD first. Only adding to
206 * traditional SPD when failed.
207 **/
Piotr Bronowski06abf232022-09-20 14:44:36 +0000208 if (ipsec_is_fp_enabled (im, spd, policy))
Piotr Bronowski04643102022-05-10 13:18:22 +0000209 return ipsec_fp_add_del_policy ((void *) &spd->fp_spd, policy, 1,
210 stat_index);
211
Neale Rannsa09c1ff2019-02-04 01:10:30 -0800212 pool_get (im->policies, vp);
Neale Ranns999c8ee2019-02-01 03:31:24 -0800213 clib_memcpy (vp, policy, sizeof (*vp));
Neale Rannsa09c1ff2019-02-04 01:10:30 -0800214 policy_index = vp - im->policies;
Neale Ranns999c8ee2019-02-01 03:31:24 -0800215
Neale Rannsa09c1ff2019-02-04 01:10:30 -0800216 vlib_validate_combined_counter (&ipsec_spd_policy_counters,
217 policy_index);
218 vlib_zero_combined_counter (&ipsec_spd_policy_counters, policy_index);
Neale Ranns9f231d42019-03-19 10:06:00 +0000219 vec_add1 (spd->policies[policy->type], policy_index);
220 vec_sort_with_function (spd->policies[policy->type],
221 ipsec_spd_entry_sort);
Neale Rannsa09c1ff2019-02-04 01:10:30 -0800222 *stat_index = policy_index;
Neale Ranns999c8ee2019-02-01 03:31:24 -0800223 }
224 else
225 {
Neale Rannsa09c1ff2019-02-04 01:10:30 -0800226 u32 ii;
227
Piotr Bronowski04643102022-05-10 13:18:22 +0000228 /**
229 * Try to delete the policy from the fast path SPD first. Delete from
230 * traditional SPD when fp delete fails.
231 **/
Piotr Bronowski86f82082022-07-08 12:45:05 +0000232
Piotr Bronowski06abf232022-09-20 14:44:36 +0000233 if (ipsec_is_fp_enabled (im, spd, policy))
234
Piotr Bronowski993b6be2022-08-31 13:48:14 +0000235 {
236 if (policy->policy == IPSEC_POLICY_ACTION_PROTECT)
237 {
238 index_t sa_index = ipsec_sa_find_and_lock (policy->sa_id);
239
240 if (INDEX_INVALID == sa_index)
241 return VNET_API_ERROR_SYSCALL_ERROR_1;
242 policy->sa_index = sa_index;
243 ipsec_sa_unlock_id (policy->sa_id);
244 }
245 else
246 policy->sa_index = INDEX_INVALID;
247
248 return ipsec_fp_add_del_policy ((void *) &spd->fp_spd, policy, 0,
249 stat_index);
250 }
Piotr Bronowski04643102022-05-10 13:18:22 +0000251
Neale Ranns50d50692019-03-26 08:26:39 +0000252 vec_foreach_index (ii, (spd->policies[policy->type]))
Neale Rannsa09c1ff2019-02-04 01:10:30 -0800253 {
Neale Ranns50d50692019-03-26 08:26:39 +0000254 vp = pool_elt_at_index (im->policies,
255 spd->policies[policy->type][ii]);
256 if (ipsec_policy_is_equal (vp, policy))
257 {
Gabriel Oginskiaacd3ed2022-02-18 08:05:00 +0000258 vec_delete (spd->policies[policy->type], 1, ii);
Neale Ranns495d7ff2019-07-12 09:15:26 +0000259 ipsec_sa_unlock (vp->sa_index);
Neale Ranns50d50692019-03-26 08:26:39 +0000260 pool_put (im->policies, vp);
261 break;
262 }
Neale Rannsa09c1ff2019-02-04 01:10:30 -0800263 }
Neale Ranns999c8ee2019-02-01 03:31:24 -0800264 }
265
266 return 0;
267}
268
Piotr Bronowski04643102022-05-10 13:18:22 +0000269static_always_inline void
Piotr Bronowski993b6be2022-08-31 13:48:14 +0000270ipsec_fp_release_mask_type (ipsec_main_t *im, u32 mask_type_index)
Piotr Bronowski04643102022-05-10 13:18:22 +0000271{
272 ipsec_fp_mask_type_entry_t *mte =
273 pool_elt_at_index (im->fp_mask_types, mask_type_index);
274 mte->refcount--;
275 if (mte->refcount == 0)
276 {
277 /* this entry is not in use anymore */
278 ASSERT (clib_memset (mte, 0xae, sizeof (*mte)) == EOK);
279 pool_put (im->fp_mask_types, mte);
280 }
281}
282
283static_always_inline u32
284find_mask_type_index (ipsec_main_t *im, ipsec_fp_5tuple_t *mask)
285{
286 ipsec_fp_mask_type_entry_t *mte;
287
288 pool_foreach (mte, im->fp_mask_types)
289 {
290 if (memcmp (&mte->mask, mask, sizeof (*mask)) == 0)
291 return (mte - im->fp_mask_types);
292 }
293
294 return ~0;
295}
296
297static_always_inline void
Piotr Bronowski86f82082022-07-08 12:45:05 +0000298fill_ip6_hash_policy_kv (ipsec_fp_5tuple_t *match, ipsec_fp_5tuple_t *mask,
299 clib_bihash_kv_40_8_t *kv)
Piotr Bronowski04643102022-05-10 13:18:22 +0000300{
301 ipsec_fp_lookup_value_t *kv_val = (ipsec_fp_lookup_value_t *) &kv->value;
Piotr Bronowski81880602022-07-18 16:45:22 +0000302 u64 *pmatch = (u64 *) match->kv_40_8.key;
303 u64 *pmask = (u64 *) mask->kv_40_8.key;
304 u64 *pkey = (u64 *) kv->key;
Piotr Bronowski04643102022-05-10 13:18:22 +0000305
306 *pkey++ = *pmatch++ & *pmask++;
307 *pkey++ = *pmatch++ & *pmask++;
308 *pkey++ = *pmatch++ & *pmask++;
309 *pkey++ = *pmatch++ & *pmask++;
Piotr Bronowski86f82082022-07-08 12:45:05 +0000310 *pkey = *pmatch & *pmask;
Piotr Bronowski04643102022-05-10 13:18:22 +0000311
312 kv_val->as_u64 = 0;
313}
314
315static_always_inline void
Piotr Bronowski86f82082022-07-08 12:45:05 +0000316fill_ip4_hash_policy_kv (ipsec_fp_5tuple_t *match, ipsec_fp_5tuple_t *mask,
317 clib_bihash_kv_16_8_t *kv)
Piotr Bronowski04643102022-05-10 13:18:22 +0000318{
319 ipsec_fp_lookup_value_t *kv_val = (ipsec_fp_lookup_value_t *) &kv->value;
Piotr Bronowski81880602022-07-18 16:45:22 +0000320 u64 *pmatch = (u64 *) match->kv_16_8.key;
321 u64 *pmask = (u64 *) mask->kv_16_8.key;
Piotr Bronowski04643102022-05-10 13:18:22 +0000322 u64 *pkey = (u64 *) kv->key;
323
324 *pkey++ = *pmatch++ & *pmask++;
Piotr Bronowski81880602022-07-18 16:45:22 +0000325 *pkey = *pmatch & *pmask;
Piotr Bronowski04643102022-05-10 13:18:22 +0000326
327 kv_val->as_u64 = 0;
328}
329
330static_always_inline u16
Piotr Bronowski993b6be2022-08-31 13:48:14 +0000331mask_out_highest_set_bit_u16 (u16 x)
Piotr Bronowski04643102022-05-10 13:18:22 +0000332{
333 x |= x >> 8;
334 x |= x >> 4;
335 x |= x >> 2;
336 x |= x >> 1;
Piotr Bronowski993b6be2022-08-31 13:48:14 +0000337 return ~x;
Piotr Bronowski04643102022-05-10 13:18:22 +0000338}
339
340static_always_inline u32
Piotr Bronowski993b6be2022-08-31 13:48:14 +0000341mask_out_highest_set_bit_u32 (u32 x)
Piotr Bronowski04643102022-05-10 13:18:22 +0000342{
343 x |= x >> 16;
344 x |= x >> 8;
345 x |= x >> 4;
346 x |= x >> 2;
347 x |= x >> 1;
Piotr Bronowski993b6be2022-08-31 13:48:14 +0000348 return ~x;
Piotr Bronowski04643102022-05-10 13:18:22 +0000349}
350
Piotr Bronowski86f82082022-07-08 12:45:05 +0000351static_always_inline u64
352mask_out_highest_set_bit_u64 (u64 x)
353{
354 x |= x >> 32;
355 x |= x >> 16;
356 x |= x >> 8;
357 x |= x >> 4;
358 x |= x >> 2;
359 x |= x >> 1;
360 return ~x;
361}
362
363static_always_inline void
364ipsec_fp_get_policy_ports_mask (ipsec_policy_t *policy,
365 ipsec_fp_5tuple_t *mask)
366{
367 if (PREDICT_TRUE ((policy->protocol == IP_PROTOCOL_TCP) ||
368 (policy->protocol == IP_PROTOCOL_UDP) ||
369 (policy->protocol == IP_PROTOCOL_SCTP)))
370 {
371 mask->lport = policy->lport.start ^ policy->lport.stop;
372 mask->rport = policy->rport.start ^ policy->rport.stop;
373
Piotr Bronowski993b6be2022-08-31 13:48:14 +0000374 mask->lport = mask_out_highest_set_bit_u16 (mask->lport);
Piotr Bronowski86f82082022-07-08 12:45:05 +0000375
Piotr Bronowski993b6be2022-08-31 13:48:14 +0000376 mask->rport = mask_out_highest_set_bit_u16 (mask->rport);
Piotr Bronowski86f82082022-07-08 12:45:05 +0000377 }
378 else
379 {
380 mask->lport = 0;
381 mask->rport = 0;
382 }
383
384 mask->protocol = (policy->protocol == IPSEC_POLICY_PROTOCOL_ANY) ? 0 : ~0;
Piotr Bronowski993b6be2022-08-31 13:48:14 +0000385 mask->action = 0;
Piotr Bronowski86f82082022-07-08 12:45:05 +0000386}
387
Piotr Bronowski04643102022-05-10 13:18:22 +0000388static_always_inline void
Piotr Bronowski993b6be2022-08-31 13:48:14 +0000389ipsec_fp_ip4_get_policy_mask (ipsec_policy_t *policy, ipsec_fp_5tuple_t *mask,
390 bool inbound)
Piotr Bronowski04643102022-05-10 13:18:22 +0000391{
392 u32 *pladdr_start = (u32 *) &policy->laddr.start.ip4;
393 u32 *pladdr_stop = (u32 *) &policy->laddr.stop.ip4;
394 u32 *plmask = (u32 *) &mask->laddr;
395 u32 *praddr_start = (u32 *) &policy->raddr.start.ip4;
396 u32 *praddr_stop = (u32 *) &policy->raddr.stop.ip4;
397 u32 *prmask = (u32 *) &mask->raddr;
398
Piotr Bronowski81880602022-07-18 16:45:22 +0000399 clib_memset_u8 (mask, 0xff, sizeof (ipsec_fp_5tuple_t));
400 clib_memset_u8 (&mask->l3_zero_pad, 0, sizeof (mask->l3_zero_pad));
401
Piotr Bronowski04643102022-05-10 13:18:22 +0000402 /* find bits where start != stop */
403 *plmask = *pladdr_start ^ *pladdr_stop;
404 *prmask = *praddr_start ^ *praddr_stop;
405 /* Find most significant bit set (that is the first position
406 * start differs from stop). Mask out everything after that bit and
407 * the bit itself. Remember that policy stores start and stop in the net
408 * order.
409 */
Piotr Bronowski993b6be2022-08-31 13:48:14 +0000410 *plmask = clib_host_to_net_u32 (
411 mask_out_highest_set_bit_u32 (clib_net_to_host_u32 (*plmask)));
Piotr Bronowski04643102022-05-10 13:18:22 +0000412
Piotr Bronowski993b6be2022-08-31 13:48:14 +0000413 *prmask = clib_host_to_net_u32 (
414 mask_out_highest_set_bit_u32 (clib_net_to_host_u32 (*prmask)));
Piotr Bronowski04643102022-05-10 13:18:22 +0000415
Piotr Bronowski993b6be2022-08-31 13:48:14 +0000416 if (inbound)
Piotr Bronowski04643102022-05-10 13:18:22 +0000417 {
Piotr Bronowski993b6be2022-08-31 13:48:14 +0000418 if (policy->type != IPSEC_SPD_POLICY_IP4_INBOUND_PROTECT)
419 mask->spi = 0;
Piotr Bronowski04643102022-05-10 13:18:22 +0000420
Piotr Bronowski993b6be2022-08-31 13:48:14 +0000421 mask->protocol = 0;
Piotr Bronowski04643102022-05-10 13:18:22 +0000422 }
423 else
424 {
Piotr Bronowski993b6be2022-08-31 13:48:14 +0000425 mask->action = 0;
426 ipsec_fp_get_policy_ports_mask (policy, mask);
Piotr Bronowski04643102022-05-10 13:18:22 +0000427 }
Piotr Bronowski04643102022-05-10 13:18:22 +0000428}
429
Piotr Bronowski86f82082022-07-08 12:45:05 +0000430static_always_inline void
Piotr Bronowski06abf232022-09-20 14:44:36 +0000431ipsec_fp_ip6_get_policy_mask (ipsec_policy_t *policy, ipsec_fp_5tuple_t *mask,
432 bool inbound)
Piotr Bronowski04643102022-05-10 13:18:22 +0000433{
434 u64 *pladdr_start = (u64 *) &policy->laddr.start;
435 u64 *pladdr_stop = (u64 *) &policy->laddr.stop;
Piotr Bronowski86f82082022-07-08 12:45:05 +0000436 u64 *plmask = (u64 *) &mask->ip6_laddr;
Piotr Bronowski04643102022-05-10 13:18:22 +0000437 u64 *praddr_start = (u64 *) &policy->raddr.start;
438 u64 *praddr_stop = (u64 *) &policy->raddr.stop;
439 u64 *prmask = (u64 *) &mask->ip6_raddr;
Piotr Bronowski04643102022-05-10 13:18:22 +0000440
Piotr Bronowski81880602022-07-18 16:45:22 +0000441 clib_memset_u8 (mask, 0xff, sizeof (ipsec_fp_5tuple_t));
Piotr Bronowski04643102022-05-10 13:18:22 +0000442
Piotr Bronowski86f82082022-07-08 12:45:05 +0000443 *plmask = (*pladdr_start++ ^ *pladdr_stop++);
Piotr Bronowski04643102022-05-10 13:18:22 +0000444
Piotr Bronowski86f82082022-07-08 12:45:05 +0000445 *prmask = (*praddr_start++ ^ *praddr_stop++);
Piotr Bronowski04643102022-05-10 13:18:22 +0000446
Piotr Bronowski86f82082022-07-08 12:45:05 +0000447 /* Find most significant bit set (that is the first position
448 * start differs from stop). Mask out everything after that bit and
449 * the bit itself. Remember that policy stores start and stop in the net
450 * order.
451 */
452 *plmask = clib_host_to_net_u64 (
453 mask_out_highest_set_bit_u64 (clib_net_to_host_u64 (*plmask)));
Piotr Bronowski04643102022-05-10 13:18:22 +0000454
Piotr Bronowski86f82082022-07-08 12:45:05 +0000455 if (*plmask++ & clib_host_to_net_u64 (0x1))
456 {
457 *plmask = (*pladdr_start ^ *pladdr_stop);
458 *plmask = clib_host_to_net_u64 (
459 mask_out_highest_set_bit_u64 (clib_net_to_host_u64 (*plmask)));
460 }
461 else
462 *plmask = 0;
Piotr Bronowski04643102022-05-10 13:18:22 +0000463
Piotr Bronowski86f82082022-07-08 12:45:05 +0000464 *prmask = clib_host_to_net_u64 (
465 mask_out_highest_set_bit_u64 (clib_net_to_host_u64 (*prmask)));
Piotr Bronowski04643102022-05-10 13:18:22 +0000466
Piotr Bronowski86f82082022-07-08 12:45:05 +0000467 if (*prmask++ & clib_host_to_net_u64 (0x1))
468 {
469 *prmask = (*pladdr_start ^ *pladdr_stop);
470 *prmask = clib_host_to_net_u64 (
471 mask_out_highest_set_bit_u64 (clib_net_to_host_u64 (*prmask)));
472 }
473 else
474 *prmask = 0;
Piotr Bronowski04643102022-05-10 13:18:22 +0000475
Piotr Bronowski06abf232022-09-20 14:44:36 +0000476 if (inbound)
477 {
478 if (policy->type != IPSEC_SPD_POLICY_IP4_INBOUND_PROTECT)
479 mask->spi = 0;
480
481 mask->protocol = 0;
482 }
483 else
484 {
485 mask->action = 0;
486 ipsec_fp_get_policy_ports_mask (policy, mask);
487 }
Piotr Bronowski04643102022-05-10 13:18:22 +0000488}
489
490static_always_inline void
Piotr Bronowski993b6be2022-08-31 13:48:14 +0000491ipsec_fp_get_policy_5tuple (ipsec_policy_t *policy, ipsec_fp_5tuple_t *tuple,
492 bool inbound)
Piotr Bronowski04643102022-05-10 13:18:22 +0000493{
494 memset (tuple, 0, sizeof (*tuple));
495 tuple->is_ipv6 = policy->is_ipv6;
496 if (tuple->is_ipv6)
497 {
498 tuple->ip6_laddr = policy->laddr.start.ip6;
499 tuple->ip6_raddr = policy->raddr.start.ip6;
500 }
501 else
502 {
503 tuple->laddr = policy->laddr.start.ip4;
504 tuple->raddr = policy->raddr.start.ip4;
505 }
506
Piotr Bronowski993b6be2022-08-31 13:48:14 +0000507 if (inbound)
508 {
509
510 if ((policy->type == IPSEC_SPD_POLICY_IP4_INBOUND_PROTECT ||
511 policy->type == IPSEC_SPD_POLICY_IP6_INBOUND_PROTECT) &&
512 policy->sa_index != INDEX_INVALID)
513 {
514 ipsec_sa_t *s = ipsec_sa_get (policy->sa_index);
515 tuple->spi = s->spi;
516 }
517 else
518 tuple->spi = INDEX_INVALID;
519 tuple->action = policy->type;
520 return;
521 }
522
Piotr Bronowski04643102022-05-10 13:18:22 +0000523 tuple->protocol = policy->protocol;
524
525 tuple->lport = policy->lport.start;
526 tuple->rport = policy->rport.start;
527}
528
Piotr Bronowski993b6be2022-08-31 13:48:14 +0000529static_always_inline int
530ipsec_fp_mask_type_idx_cmp (ipsec_fp_mask_id_t *mask_id, u32 *idx)
531{
532 return mask_id->mask_type_idx == *idx;
533}
534
Piotr Bronowski04643102022-05-10 13:18:22 +0000535int
536ipsec_fp_ip4_add_policy (ipsec_main_t *im, ipsec_spd_fp_t *fp_spd,
537 ipsec_policy_t *policy, u32 *stat_index)
538{
Piotr Bronowski993b6be2022-08-31 13:48:14 +0000539 u32 mask_index, searched_idx;
Piotr Bronowski04643102022-05-10 13:18:22 +0000540 ipsec_policy_t *vp;
541 ipsec_fp_mask_type_entry_t *mte;
542 u32 policy_index;
543 clib_bihash_kv_16_8_t kv;
544 clib_bihash_kv_16_8_t result;
545 ipsec_fp_lookup_value_t *result_val =
546 (ipsec_fp_lookup_value_t *) &result.value;
547 ipsec_fp_lookup_value_t *key_val = (ipsec_fp_lookup_value_t *) &kv.value;
548
549 ipsec_fp_5tuple_t mask, policy_5tuple;
550 int res;
Piotr Bronowski993b6be2022-08-31 13:48:14 +0000551 bool inbound = ipsec_is_policy_inbound (policy);
552 clib_bihash_16_8_t *bihash_table =
553 inbound ? pool_elt_at_index (im->fp_ip4_lookup_hashes_pool,
554 fp_spd->ip4_in_lookup_hash_idx) :
555 pool_elt_at_index (im->fp_ip4_lookup_hashes_pool,
556 fp_spd->ip4_out_lookup_hash_idx);
Piotr Bronowski04643102022-05-10 13:18:22 +0000557
Piotr Bronowski993b6be2022-08-31 13:48:14 +0000558 ipsec_fp_ip4_get_policy_mask (policy, &mask, inbound);
Piotr Bronowski04643102022-05-10 13:18:22 +0000559 pool_get (im->policies, vp);
560 policy_index = vp - im->policies;
561 vlib_validate_combined_counter (&ipsec_spd_policy_counters, policy_index);
562 vlib_zero_combined_counter (&ipsec_spd_policy_counters, policy_index);
563 *stat_index = policy_index;
564 mask_index = find_mask_type_index (im, &mask);
565
566 if (mask_index == ~0)
567 {
568 /* mask type not found, we need to create a new entry */
569 pool_get (im->fp_mask_types, mte);
570 mask_index = mte - im->fp_mask_types;
571 mte->refcount = 0;
572 }
573 else
574 mte = im->fp_mask_types + mask_index;
575
576 policy->fp_mask_type_id = mask_index;
Piotr Bronowski993b6be2022-08-31 13:48:14 +0000577 ipsec_fp_get_policy_5tuple (policy, &policy_5tuple, inbound);
Piotr Bronowski04643102022-05-10 13:18:22 +0000578
Piotr Bronowski86f82082022-07-08 12:45:05 +0000579 fill_ip4_hash_policy_kv (&policy_5tuple, &mask, &kv);
Piotr Bronowski04643102022-05-10 13:18:22 +0000580
Piotr Bronowski993b6be2022-08-31 13:48:14 +0000581 res = clib_bihash_search_inline_2_16_8 (bihash_table, &kv, &result);
Piotr Bronowski04643102022-05-10 13:18:22 +0000582 if (res != 0)
583 {
584 /* key was not found crate a new entry */
585 vec_add1 (key_val->fp_policies_ids, policy_index);
Piotr Bronowski993b6be2022-08-31 13:48:14 +0000586 res = clib_bihash_add_del_16_8 (bihash_table, &kv, 1);
587
Piotr Bronowski04643102022-05-10 13:18:22 +0000588 if (res != 0)
589 goto error;
590 }
591 else
592 {
593
594 if (vec_max_len (result_val->fp_policies_ids) !=
595 vec_len (result_val->fp_policies_ids))
596 {
597 /* no need to resize */
598 vec_add1 (result_val->fp_policies_ids, policy_index);
599 }
600 else
601 {
602 vec_add1 (result_val->fp_policies_ids, policy_index);
603
Piotr Bronowski993b6be2022-08-31 13:48:14 +0000604 res = clib_bihash_add_del_16_8 (bihash_table, &result, 1);
Piotr Bronowski04643102022-05-10 13:18:22 +0000605
606 if (res != 0)
607 goto error;
608 }
609 }
610
611 if (mte->refcount == 0)
612 {
613 clib_memcpy (&mte->mask, &mask, sizeof (mask));
614 mte->refcount = 0;
Piotr Bronowski04643102022-05-10 13:18:22 +0000615 }
616
Piotr Bronowski993b6be2022-08-31 13:48:14 +0000617 searched_idx =
618 vec_search_with_function (fp_spd->fp_mask_ids[policy->type], &mask_index,
619 ipsec_fp_mask_type_idx_cmp);
620 if (~0 == searched_idx)
621 {
622 ipsec_fp_mask_id_t mask_id = { mask_index, 1 };
623 vec_add1 (fp_spd->fp_mask_ids[policy->type], mask_id);
624 }
625 else
626 (fp_spd->fp_mask_ids[policy->type] + searched_idx)->refcount++;
627
Piotr Bronowski04643102022-05-10 13:18:22 +0000628 mte->refcount++;
629 vec_add1 (fp_spd->fp_policies[policy->type], policy_index);
630 clib_memcpy (vp, policy, sizeof (*vp));
631
632 return 0;
633
634error:
635 pool_put (im->policies, vp);
Piotr Bronowski993b6be2022-08-31 13:48:14 +0000636 ipsec_fp_release_mask_type (im, mask_index);
Piotr Bronowski04643102022-05-10 13:18:22 +0000637 return -1;
638}
639
640int
641ipsec_fp_ip6_add_policy (ipsec_main_t *im, ipsec_spd_fp_t *fp_spd,
642 ipsec_policy_t *policy, u32 *stat_index)
643{
644
Piotr Bronowski993b6be2022-08-31 13:48:14 +0000645 u32 mask_index, searched_idx;
Piotr Bronowski04643102022-05-10 13:18:22 +0000646 ipsec_policy_t *vp;
647 ipsec_fp_mask_type_entry_t *mte;
648 u32 policy_index;
649 clib_bihash_kv_40_8_t kv;
650 clib_bihash_kv_40_8_t result;
651 ipsec_fp_lookup_value_t *result_val =
652 (ipsec_fp_lookup_value_t *) &result.value;
653 ipsec_fp_lookup_value_t *key_val = (ipsec_fp_lookup_value_t *) &kv.value;
654
655 ipsec_fp_5tuple_t mask, policy_5tuple;
656 int res;
Piotr Bronowski993b6be2022-08-31 13:48:14 +0000657 bool inbound = ipsec_is_policy_inbound (policy);
Piotr Bronowski04643102022-05-10 13:18:22 +0000658
Piotr Bronowski06abf232022-09-20 14:44:36 +0000659 ipsec_fp_ip6_get_policy_mask (policy, &mask, inbound);
Piotr Bronowski04643102022-05-10 13:18:22 +0000660 pool_get (im->policies, vp);
661 policy_index = vp - im->policies;
662 vlib_validate_combined_counter (&ipsec_spd_policy_counters, policy_index);
663 vlib_zero_combined_counter (&ipsec_spd_policy_counters, policy_index);
664 *stat_index = policy_index;
665 mask_index = find_mask_type_index (im, &mask);
Piotr Bronowski993b6be2022-08-31 13:48:14 +0000666 clib_bihash_40_8_t *bihash_table =
667 inbound ? pool_elt_at_index (im->fp_ip6_lookup_hashes_pool,
668 fp_spd->ip6_in_lookup_hash_idx) :
669 pool_elt_at_index (im->fp_ip6_lookup_hashes_pool,
670 fp_spd->ip6_out_lookup_hash_idx);
Piotr Bronowski04643102022-05-10 13:18:22 +0000671
672 if (mask_index == ~0)
673 {
674 /* mask type not found, we need to create a new entry */
675 pool_get (im->fp_mask_types, mte);
676 mask_index = mte - im->fp_mask_types;
677 mte->refcount = 0;
678 }
679 else
680 mte = im->fp_mask_types + mask_index;
681
682 policy->fp_mask_type_id = mask_index;
Piotr Bronowski993b6be2022-08-31 13:48:14 +0000683 ipsec_fp_get_policy_5tuple (policy, &policy_5tuple, inbound);
Piotr Bronowski04643102022-05-10 13:18:22 +0000684
Piotr Bronowski86f82082022-07-08 12:45:05 +0000685 fill_ip6_hash_policy_kv (&policy_5tuple, &mask, &kv);
Piotr Bronowski04643102022-05-10 13:18:22 +0000686
Piotr Bronowski993b6be2022-08-31 13:48:14 +0000687 res = clib_bihash_search_inline_2_40_8 (bihash_table, &kv, &result);
Piotr Bronowski04643102022-05-10 13:18:22 +0000688 if (res != 0)
689 {
690 /* key was not found crate a new entry */
691 vec_add1 (key_val->fp_policies_ids, policy_index);
Piotr Bronowski993b6be2022-08-31 13:48:14 +0000692 res = clib_bihash_add_del_40_8 (bihash_table, &kv, 1);
Piotr Bronowski04643102022-05-10 13:18:22 +0000693 if (res != 0)
694 goto error;
695 }
696 else
697 {
698
699 if (vec_max_len (result_val->fp_policies_ids) !=
700 vec_len (result_val->fp_policies_ids))
701 {
702 /* no need to resize */
703 vec_add1 (result_val->fp_policies_ids, policy_index);
704 }
705 else
706 {
707 vec_add1 (result_val->fp_policies_ids, policy_index);
708
Piotr Bronowski993b6be2022-08-31 13:48:14 +0000709 res = clib_bihash_add_del_40_8 (bihash_table, &result, 1);
Piotr Bronowski04643102022-05-10 13:18:22 +0000710
711 if (res != 0)
712 goto error;
713 }
714 }
715
716 if (mte->refcount == 0)
717 {
718 clib_memcpy (&mte->mask, &mask, sizeof (mask));
719 mte->refcount = 0;
Piotr Bronowski04643102022-05-10 13:18:22 +0000720 }
721
Piotr Bronowski993b6be2022-08-31 13:48:14 +0000722 searched_idx =
723 vec_search_with_function (fp_spd->fp_mask_ids[policy->type], &mask_index,
724 ipsec_fp_mask_type_idx_cmp);
725 if (~0 == searched_idx)
726 {
727 ipsec_fp_mask_id_t mask_id = { mask_index, 1 };
728 vec_add1 (fp_spd->fp_mask_ids[policy->type], mask_id);
729 }
730 else
731 (fp_spd->fp_mask_ids[policy->type] + searched_idx)->refcount++;
732
Piotr Bronowski04643102022-05-10 13:18:22 +0000733 mte->refcount++;
734 vec_add1 (fp_spd->fp_policies[policy->type], policy_index);
735 clib_memcpy (vp, policy, sizeof (*vp));
736
737 return 0;
738
739error:
740 pool_put (im->policies, vp);
Piotr Bronowski993b6be2022-08-31 13:48:14 +0000741 ipsec_fp_release_mask_type (im, mask_index);
Piotr Bronowski04643102022-05-10 13:18:22 +0000742 return -1;
743}
744
745int
746ipsec_fp_ip6_del_policy (ipsec_main_t *im, ipsec_spd_fp_t *fp_spd,
747 ipsec_policy_t *policy)
748{
749 int res;
750 ipsec_fp_5tuple_t mask = { 0 }, policy_5tuple;
751 clib_bihash_kv_40_8_t kv;
752 clib_bihash_kv_40_8_t result;
753 ipsec_fp_lookup_value_t *result_val =
754 (ipsec_fp_lookup_value_t *) &result.value;
Piotr Bronowski993b6be2022-08-31 13:48:14 +0000755 bool inbound = ipsec_is_policy_inbound (policy);
756 clib_bihash_40_8_t *bihash_table =
757 inbound ? pool_elt_at_index (im->fp_ip6_lookup_hashes_pool,
758 fp_spd->ip6_in_lookup_hash_idx) :
759 pool_elt_at_index (im->fp_ip6_lookup_hashes_pool,
760 fp_spd->ip6_out_lookup_hash_idx);
Piotr Bronowski04643102022-05-10 13:18:22 +0000761
762 ipsec_policy_t *vp;
763 u32 ii, iii, imt;
764
Piotr Bronowski06abf232022-09-20 14:44:36 +0000765 ipsec_fp_ip6_get_policy_mask (policy, &mask, inbound);
Piotr Bronowski993b6be2022-08-31 13:48:14 +0000766 ipsec_fp_get_policy_5tuple (policy, &policy_5tuple, inbound);
Piotr Bronowski86f82082022-07-08 12:45:05 +0000767 fill_ip6_hash_policy_kv (&policy_5tuple, &mask, &kv);
Piotr Bronowski993b6be2022-08-31 13:48:14 +0000768 res = clib_bihash_search_inline_2_40_8 (bihash_table, &kv, &result);
Piotr Bronowski04643102022-05-10 13:18:22 +0000769 if (res != 0)
770 return -1;
771
772 res = -1;
773 vec_foreach_index (ii, result_val->fp_policies_ids)
774 {
775 vp =
776 pool_elt_at_index (im->policies, *(result_val->fp_policies_ids + ii));
777 if (ipsec_policy_is_equal (vp, policy))
778 {
779 vec_foreach_index (iii, fp_spd->fp_policies[policy->type])
780 {
781 if (*(fp_spd->fp_policies[policy->type] + iii) ==
782 *(result_val->fp_policies_ids + ii))
783 {
784 if (vec_len (result_val->fp_policies_ids) == 1)
785 {
786 vec_free (result_val->fp_policies_ids);
Piotr Bronowski993b6be2022-08-31 13:48:14 +0000787 clib_bihash_add_del_40_8 (bihash_table, &result, 0);
Piotr Bronowski04643102022-05-10 13:18:22 +0000788 }
789 else
790 {
791 vec_del1 (result_val->fp_policies_ids, ii);
792 }
793 vec_del1 (fp_spd->fp_policies[policy->type], iii);
794
Piotr Bronowski993b6be2022-08-31 13:48:14 +0000795 vec_foreach_index (imt, fp_spd->fp_mask_ids[policy->type])
Piotr Bronowski04643102022-05-10 13:18:22 +0000796 {
Piotr Bronowski993b6be2022-08-31 13:48:14 +0000797 if ((fp_spd->fp_mask_ids[policy->type] + imt)
798 ->mask_type_idx == vp->fp_mask_type_id)
Piotr Bronowski04643102022-05-10 13:18:22 +0000799 {
Piotr Bronowski04643102022-05-10 13:18:22 +0000800
Piotr Bronowski993b6be2022-08-31 13:48:14 +0000801 if ((fp_spd->fp_mask_ids[policy->type] + imt)
802 ->refcount-- == 1)
803 vec_del1 (fp_spd->fp_mask_ids[policy->type], imt);
804
Piotr Bronowski04643102022-05-10 13:18:22 +0000805 break;
806 }
807 }
808
809 res = 0;
810 break;
811 }
812 }
813
814 if (res != 0)
815 continue;
816 else
817 {
Piotr Bronowski993b6be2022-08-31 13:48:14 +0000818 ipsec_fp_release_mask_type (im, vp->fp_mask_type_id);
Piotr Bronowski04643102022-05-10 13:18:22 +0000819 ipsec_sa_unlock (vp->sa_index);
820 pool_put (im->policies, vp);
821 return 0;
822 }
823 }
824 }
825 return -1;
826}
827
828int
829ipsec_fp_ip4_del_policy (ipsec_main_t *im, ipsec_spd_fp_t *fp_spd,
830 ipsec_policy_t *policy)
831{
832 int res;
833 ipsec_fp_5tuple_t mask = { 0 }, policy_5tuple;
834 clib_bihash_kv_16_8_t kv;
835 clib_bihash_kv_16_8_t result;
836 ipsec_fp_lookup_value_t *result_val =
837 (ipsec_fp_lookup_value_t *) &result.value;
Piotr Bronowski993b6be2022-08-31 13:48:14 +0000838 bool inbound = ipsec_is_policy_inbound (policy);
Piotr Bronowski04643102022-05-10 13:18:22 +0000839 ipsec_policy_t *vp;
840 u32 ii, iii, imt;
Piotr Bronowski993b6be2022-08-31 13:48:14 +0000841 clib_bihash_16_8_t *bihash_table =
842 inbound ? pool_elt_at_index (im->fp_ip4_lookup_hashes_pool,
843 fp_spd->ip4_in_lookup_hash_idx) :
844 pool_elt_at_index (im->fp_ip4_lookup_hashes_pool,
845 fp_spd->ip4_out_lookup_hash_idx);
Piotr Bronowski04643102022-05-10 13:18:22 +0000846
Piotr Bronowski993b6be2022-08-31 13:48:14 +0000847 ipsec_fp_ip4_get_policy_mask (policy, &mask, inbound);
848 ipsec_fp_get_policy_5tuple (policy, &policy_5tuple, inbound);
Piotr Bronowski86f82082022-07-08 12:45:05 +0000849 fill_ip4_hash_policy_kv (&policy_5tuple, &mask, &kv);
Piotr Bronowski993b6be2022-08-31 13:48:14 +0000850 res = clib_bihash_search_inline_2_16_8 (bihash_table, &kv, &result);
851
Piotr Bronowski04643102022-05-10 13:18:22 +0000852 if (res != 0)
853 return -1;
854
855 res = -1;
856 vec_foreach_index (ii, result_val->fp_policies_ids)
857 {
858 vp =
859 pool_elt_at_index (im->policies, *(result_val->fp_policies_ids + ii));
860 if (ipsec_policy_is_equal (vp, policy))
861 {
862 vec_foreach_index (iii, fp_spd->fp_policies[policy->type])
863 {
864 if (*(fp_spd->fp_policies[policy->type] + iii) ==
865 *(result_val->fp_policies_ids + ii))
866 {
867 if (vec_len (result_val->fp_policies_ids) == 1)
868 {
869 vec_free (result_val->fp_policies_ids);
Piotr Bronowski993b6be2022-08-31 13:48:14 +0000870 clib_bihash_add_del_16_8 (bihash_table, &result, 0);
Piotr Bronowski04643102022-05-10 13:18:22 +0000871 }
872 else
873 {
874 vec_del1 (result_val->fp_policies_ids, ii);
875 }
876 vec_del1 (fp_spd->fp_policies[policy->type], iii);
877
Piotr Bronowski993b6be2022-08-31 13:48:14 +0000878 vec_foreach_index (imt, fp_spd->fp_mask_ids[policy->type])
Piotr Bronowski04643102022-05-10 13:18:22 +0000879 {
Piotr Bronowski993b6be2022-08-31 13:48:14 +0000880 if ((fp_spd->fp_mask_ids[policy->type] + imt)
881 ->mask_type_idx == vp->fp_mask_type_id)
Piotr Bronowski04643102022-05-10 13:18:22 +0000882 {
Piotr Bronowski04643102022-05-10 13:18:22 +0000883
Piotr Bronowski993b6be2022-08-31 13:48:14 +0000884 if ((fp_spd->fp_mask_ids[policy->type] + imt)
885 ->refcount-- == 1)
886 vec_del1 (fp_spd->fp_mask_ids[policy->type], imt);
887
Piotr Bronowski04643102022-05-10 13:18:22 +0000888 break;
889 }
890 }
891
892 res = 0;
893 break;
894 }
895 }
896
897 if (res != 0)
898 continue;
899 else
900 {
Piotr Bronowski993b6be2022-08-31 13:48:14 +0000901 ipsec_fp_release_mask_type (im, vp->fp_mask_type_id);
Piotr Bronowski04643102022-05-10 13:18:22 +0000902 ipsec_sa_unlock (vp->sa_index);
903 pool_put (im->policies, vp);
904 return 0;
905 }
906 }
907 }
908 return -1;
909}
910
911int
912ipsec_fp_add_del_policy (void *fp_spd, ipsec_policy_t *policy, int is_add,
913 u32 *stat_index)
914{
915 ipsec_main_t *im = &ipsec_main;
916
917 if (is_add)
918 if (policy->is_ipv6)
919 return ipsec_fp_ip6_add_policy (im, (ipsec_spd_fp_t *) fp_spd, policy,
920 stat_index);
921 else
922 return ipsec_fp_ip4_add_policy (im, (ipsec_spd_fp_t *) fp_spd, policy,
923 stat_index);
924
925 else if (policy->is_ipv6)
926
927 return ipsec_fp_ip6_del_policy (im, (ipsec_spd_fp_t *) fp_spd, policy);
928 else
929 return ipsec_fp_ip4_del_policy (im, (ipsec_spd_fp_t *) fp_spd, policy);
930}
931
Neale Ranns999c8ee2019-02-01 03:31:24 -0800932/*
933 * fd.io coding-style-patch-verification: ON
934 *
935 * Local Variables:
936 * eval: (c-set-style "gnu")
937 * End:
938 */