blob: 72da408c1617bba75396df5d2d70763ef28a2998 [file] [log] [blame]
Neale Ranns999c8ee2019-02-01 03:31:24 -08001/*
2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16#include <vnet/ipsec/ipsec.h>
17
Neale Rannsa09c1ff2019-02-04 01:10:30 -080018/**
19 * @brief
20 * Policy packet & bytes counters
21 */
22vlib_combined_counter_main_t ipsec_spd_policy_counters = {
23 .name = "policy",
24 .stat_segment_name = "/net/ipsec/policy",
25};
26
27static int
28ipsec_policy_is_equal (ipsec_policy_t * p1, ipsec_policy_t * p2)
29{
30 if (p1->priority != p2->priority)
31 return 0;
Neale Ranns9f231d42019-03-19 10:06:00 +000032 if (p1->type != p2->type)
Neale Rannsa09c1ff2019-02-04 01:10:30 -080033 return (0);
34 if (p1->policy != p2->policy)
35 return (0);
36 if (p1->sa_id != p2->sa_id)
37 return (0);
38 if (p1->protocol != p2->protocol)
39 return (0);
40 if (p1->lport.start != p2->lport.start)
41 return (0);
42 if (p1->lport.stop != p2->lport.stop)
43 return (0);
44 if (p1->rport.start != p2->rport.start)
45 return (0);
46 if (p1->rport.stop != p2->rport.stop)
47 return (0);
48 if (p1->is_ipv6 != p2->is_ipv6)
49 return (0);
50 if (p2->is_ipv6)
51 {
52 if (p1->laddr.start.ip6.as_u64[0] != p2->laddr.start.ip6.as_u64[0])
53 return (0);
54 if (p1->laddr.start.ip6.as_u64[1] != p2->laddr.start.ip6.as_u64[1])
55 return (0);
56 if (p1->laddr.stop.ip6.as_u64[0] != p2->laddr.stop.ip6.as_u64[0])
57 return (0);
58 if (p1->laddr.stop.ip6.as_u64[1] != p2->laddr.stop.ip6.as_u64[1])
59 return (0);
60 if (p1->raddr.start.ip6.as_u64[0] != p2->raddr.start.ip6.as_u64[0])
61 return (0);
62 if (p1->raddr.start.ip6.as_u64[1] != p2->raddr.start.ip6.as_u64[1])
63 return (0);
64 if (p1->raddr.stop.ip6.as_u64[0] != p2->raddr.stop.ip6.as_u64[0])
65 return (0);
66 if (p1->laddr.stop.ip6.as_u64[1] != p2->laddr.stop.ip6.as_u64[1])
67 return (0);
68 }
69 else
70 {
71 if (p1->laddr.start.ip4.as_u32 != p2->laddr.start.ip4.as_u32)
72 return (0);
73 if (p1->laddr.stop.ip4.as_u32 != p2->laddr.stop.ip4.as_u32)
74 return (0);
75 if (p1->raddr.start.ip4.as_u32 != p2->raddr.start.ip4.as_u32)
76 return (0);
77 if (p1->raddr.stop.ip4.as_u32 != p2->raddr.stop.ip4.as_u32)
78 return (0);
79 }
80 return (1);
81}
82
Neale Ranns999c8ee2019-02-01 03:31:24 -080083static int
84ipsec_spd_entry_sort (void *a1, void *a2)
85{
Neale Rannsa09c1ff2019-02-04 01:10:30 -080086 ipsec_main_t *im = &ipsec_main;
Neale Ranns999c8ee2019-02-01 03:31:24 -080087 u32 *id1 = a1;
88 u32 *id2 = a2;
Neale Ranns999c8ee2019-02-01 03:31:24 -080089 ipsec_policy_t *p1, *p2;
90
Neale Rannsa09c1ff2019-02-04 01:10:30 -080091 p1 = pool_elt_at_index (im->policies, *id1);
92 p2 = pool_elt_at_index (im->policies, *id2);
Neale Ranns999c8ee2019-02-01 03:31:24 -080093 if (p1 && p2)
94 return p2->priority - p1->priority;
95
96 return 0;
97}
98
99int
Neale Ranns9f231d42019-03-19 10:06:00 +0000100ipsec_policy_mk_type (bool is_outbound,
101 bool is_ipv6,
102 ipsec_policy_action_t action,
103 ipsec_spd_policy_type_t * type)
104{
105 if (is_outbound)
106 {
107 *type = (is_ipv6 ?
108 IPSEC_SPD_POLICY_IP6_OUTBOUND : IPSEC_SPD_POLICY_IP4_OUTBOUND);
109 return (0);
110 }
111 else
112 {
113 switch (action)
114 {
115 case IPSEC_POLICY_ACTION_PROTECT:
116 *type = (is_ipv6 ?
117 IPSEC_SPD_POLICY_IP6_INBOUND_PROTECT :
118 IPSEC_SPD_POLICY_IP4_INBOUND_PROTECT);
119 return (0);
120 case IPSEC_POLICY_ACTION_BYPASS:
121 *type = (is_ipv6 ?
122 IPSEC_SPD_POLICY_IP6_INBOUND_BYPASS :
123 IPSEC_SPD_POLICY_IP4_INBOUND_BYPASS);
124 return (0);
125 case IPSEC_POLICY_ACTION_DISCARD:
ShivaShankarK05464832020-04-14 14:01:03 +0530126 *type = (is_ipv6 ?
127 IPSEC_SPD_POLICY_IP6_INBOUND_DISCARD :
128 IPSEC_SPD_POLICY_IP4_INBOUND_DISCARD);
129 return (0);
Neale Ranns9f231d42019-03-19 10:06:00 +0000130 case IPSEC_POLICY_ACTION_RESOLVE:
131 break;
132 }
133 }
134
135 /* Unsupported type */
136 return (-1);
137}
138
139int
Neale Rannsa09c1ff2019-02-04 01:10:30 -0800140ipsec_add_del_policy (vlib_main_t * vm,
141 ipsec_policy_t * policy, int is_add, u32 * stat_index)
Neale Ranns999c8ee2019-02-01 03:31:24 -0800142{
143 ipsec_main_t *im = &ipsec_main;
144 ipsec_spd_t *spd = 0;
145 ipsec_policy_t *vp;
Neale Ranns999c8ee2019-02-01 03:31:24 -0800146 u32 spd_index;
Neale Rannsa09c1ff2019-02-04 01:10:30 -0800147 uword *p;
Neale Ranns999c8ee2019-02-01 03:31:24 -0800148
Neale Ranns999c8ee2019-02-01 03:31:24 -0800149 p = hash_get (im->spd_index_by_spd_id, policy->id);
150
151 if (!p)
152 return VNET_API_ERROR_SYSCALL_ERROR_1;
153
154 spd_index = p[0];
155 spd = pool_elt_at_index (im->spds, spd_index);
156 if (!spd)
157 return VNET_API_ERROR_SYSCALL_ERROR_1;
158
Zachary Leaf7cd35f52021-06-25 08:11:15 -0500159 if (im->output_flow_cache_flag && !policy->is_ipv6 &&
Govindarajan Mohandoss6d7dfcb2021-03-19 19:20:49 +0000160 policy->type == IPSEC_SPD_POLICY_IP4_OUTBOUND)
161 {
162 /*
163 * Flow cache entry is valid only when epoch_count value in control
164 * plane and data plane match. Otherwise, flow cache entry is considered
165 * stale. To avoid the race condition of using old epoch_count value
166 * in data plane after the roll over of epoch_count in control plane,
167 * entire flow cache is reset.
168 */
169 if (im->epoch_count == 0xFFFFFFFF)
170 {
171 /* Reset all the entries in flow cache */
172 clib_memset_u8 (im->ipsec4_out_spd_hash_tbl, 0,
173 im->ipsec4_out_spd_hash_num_buckets *
174 (sizeof (*(im->ipsec4_out_spd_hash_tbl))));
175 }
176 /* Increment epoch counter by 1 */
177 clib_atomic_fetch_add_relax (&im->epoch_count, 1);
178 /* Reset spd flow cache counter since all old entries are stale */
179 clib_atomic_store_relax_n (&im->ipsec4_out_spd_flow_cache_entries, 0);
180 }
181
Zachary Leaf7cd35f52021-06-25 08:11:15 -0500182 if ((policy->type == IPSEC_SPD_POLICY_IP4_INBOUND_PROTECT ||
183 policy->type == IPSEC_SPD_POLICY_IP4_INBOUND_BYPASS ||
184 policy->type == IPSEC_SPD_POLICY_IP4_INBOUND_DISCARD) &&
185 im->input_flow_cache_flag && !policy->is_ipv6)
186 {
187 /*
188 * Flow cache entry is valid only when input_epoch_count value in control
189 * plane and data plane match. Otherwise, flow cache entry is considered
190 * stale. To avoid the race condition of using old input_epoch_count
191 * value in data plane after the roll over of input_epoch_count in
192 * control plane, entire flow cache is reset.
193 */
194 if (im->input_epoch_count == 0xFFFFFFFF)
195 {
196 /* Reset all the entries in flow cache */
197 clib_memset_u8 (im->ipsec4_in_spd_hash_tbl, 0,
198 im->ipsec4_in_spd_hash_num_buckets *
199 (sizeof (*(im->ipsec4_in_spd_hash_tbl))));
200 }
201 /* Increment epoch counter by 1 */
202 clib_atomic_fetch_add_relax (&im->input_epoch_count, 1);
203 /* Reset spd flow cache counter since all old entries are stale */
204 im->ipsec4_in_spd_flow_cache_entries = 0;
205 }
206
Neale Ranns999c8ee2019-02-01 03:31:24 -0800207 if (is_add)
208 {
209 u32 policy_index;
210
Neale Ranns495d7ff2019-07-12 09:15:26 +0000211 if (policy->policy == IPSEC_POLICY_ACTION_PROTECT)
212 {
213 index_t sa_index = ipsec_sa_find_and_lock (policy->sa_id);
214
215 if (INDEX_INVALID == sa_index)
216 return VNET_API_ERROR_SYSCALL_ERROR_1;
217 policy->sa_index = sa_index;
218 }
219 else
220 policy->sa_index = INDEX_INVALID;
221
Neale Rannsa09c1ff2019-02-04 01:10:30 -0800222 pool_get (im->policies, vp);
Neale Ranns999c8ee2019-02-01 03:31:24 -0800223 clib_memcpy (vp, policy, sizeof (*vp));
Neale Rannsa09c1ff2019-02-04 01:10:30 -0800224 policy_index = vp - im->policies;
Neale Ranns999c8ee2019-02-01 03:31:24 -0800225
Neale Rannsa09c1ff2019-02-04 01:10:30 -0800226 vlib_validate_combined_counter (&ipsec_spd_policy_counters,
227 policy_index);
228 vlib_zero_combined_counter (&ipsec_spd_policy_counters, policy_index);
Neale Ranns999c8ee2019-02-01 03:31:24 -0800229
Neale Ranns9f231d42019-03-19 10:06:00 +0000230 vec_add1 (spd->policies[policy->type], policy_index);
231 vec_sort_with_function (spd->policies[policy->type],
232 ipsec_spd_entry_sort);
Neale Rannsa09c1ff2019-02-04 01:10:30 -0800233 *stat_index = policy_index;
Neale Ranns999c8ee2019-02-01 03:31:24 -0800234 }
235 else
236 {
Neale Rannsa09c1ff2019-02-04 01:10:30 -0800237 u32 ii;
238
Neale Ranns50d50692019-03-26 08:26:39 +0000239 vec_foreach_index (ii, (spd->policies[policy->type]))
Neale Rannsa09c1ff2019-02-04 01:10:30 -0800240 {
Neale Ranns50d50692019-03-26 08:26:39 +0000241 vp = pool_elt_at_index (im->policies,
242 spd->policies[policy->type][ii]);
243 if (ipsec_policy_is_equal (vp, policy))
244 {
Gabriel Oginskiaacd3ed2022-02-18 08:05:00 +0000245 vec_delete (spd->policies[policy->type], 1, ii);
Neale Ranns495d7ff2019-07-12 09:15:26 +0000246 ipsec_sa_unlock (vp->sa_index);
Neale Ranns50d50692019-03-26 08:26:39 +0000247 pool_put (im->policies, vp);
248 break;
249 }
Neale Rannsa09c1ff2019-02-04 01:10:30 -0800250 }
Neale Ranns999c8ee2019-02-01 03:31:24 -0800251 }
252
253 return 0;
254}
255
256/*
257 * fd.io coding-style-patch-verification: ON
258 *
259 * Local Variables:
260 * eval: (c-set-style "gnu")
261 * End:
262 */