blob: 6a5536eceb995d8ee6c043e0464abc37f3e8b2ad [file] [log] [blame]
Matus Fabian694265d2016-08-10 01:55:36 -07001/*
2 * Copyright (c) 2016 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15/**
Chris Luke16bcf7d2016-09-01 14:31:46 -040016 * @file
Matus Fabian694265d2016-08-10 01:55:36 -070017 * @brief L2-GRE over IPSec packet processing.
18 *
Chris Luke16bcf7d2016-09-01 14:31:46 -040019 * Removes GRE header from the packet and sends it to the l2-input node.
Matus Fabian694265d2016-08-10 01:55:36 -070020*/
21
22#include <vlib/vlib.h>
23#include <vnet/pg/pg.h>
24#include <vnet/ipsec-gre/ipsec_gre.h>
25#include <vppinfra/sparse_vec.h>
26
27#define foreach_ipsec_gre_input_next \
28_(PUNT, "error-punt") \
29_(DROP, "error-drop") \
30_(L2_INPUT, "l2-input")
31
32typedef enum {
33#define _(s,n) IPSEC_GRE_INPUT_NEXT_##s,
34 foreach_ipsec_gre_input_next
35#undef _
36 IPSEC_GRE_INPUT_N_NEXT,
37} ipsec_gre_input_next_t;
38
39typedef struct {
40 u32 tunnel_id;
41 u32 length;
42 ip4_address_t src;
43 ip4_address_t dst;
44} ipsec_gre_rx_trace_t;
45
46u8 * format_ipsec_gre_rx_trace (u8 * s, va_list * args)
47{
48 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
49 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
50 ipsec_gre_rx_trace_t * t = va_arg (*args, ipsec_gre_rx_trace_t *);
51
52 s = format (s, "GRE: tunnel %d len %d src %U dst %U",
53 t->tunnel_id, clib_net_to_host_u16(t->length),
54 format_ip4_address, &t->src.as_u8,
55 format_ip4_address, &t->dst.as_u8);
56 return s;
57}
58
59/**
60 * @brief L2-GRE over IPSec input node.
61 * @node ipsec-gre-input
62 *
63 * This node remove GRE header.
64 *
Chris Luked4024f52016-09-06 09:32:36 -040065 * @param vm vlib_main_t corresponding to the current thread.
66 * @param node vlib_node_runtime_t data for this node.
67 * @param from_frame vlib_frame_t whose contents should be dispatched.
Matus Fabian694265d2016-08-10 01:55:36 -070068 *
69 * @par Graph mechanics: buffer metadata, next index usage
70 *
Chris Luke16bcf7d2016-09-01 14:31:46 -040071 * <em>Uses:</em>
Matus Fabian694265d2016-08-10 01:55:36 -070072 * - <code>ip->src_address</code> and <code>ip->dst_address</code>
73 * - Match tunnel by source and destination addresses in GRE IP header.
74 *
75 * <em>Sets:</em>
76 * - <code>vnet_buffer(b)->gre.src</code>
77 * - Save tunnel source IPv4 address.
78 * - <code>vnet_buffer(b)->gre.dst</code>
79 * - Save tunnel destination IPv4 address.
80 * - <code>vnet_buffer(b)->sw_if_index[VLIB_RX]</code>
81 * - Set input sw_if_index to IPSec-GRE tunnel for learning.
82 *
83 * <em>Next Index:</em>
84 * - Dispatches the packet to the l2-input node.
85*/
86static uword
87ipsec_gre_input (vlib_main_t * vm,
88 vlib_node_runtime_t * node,
89 vlib_frame_t * from_frame)
90{
91 ipsec_gre_main_t * igm = &ipsec_gre_main;
92 u32 n_left_from, next_index, * from, * to_next;
93 u64 cached_tunnel_key = (u64) ~0;
94 u32 cached_tunnel_sw_if_index = 0, tunnel_sw_if_index;
Ciara Loftus7eac9162016-09-30 15:47:03 +010095 u32 tun_src0, tun_dst0;
96 u32 tun_src1, tun_dst1;
Matus Fabian694265d2016-08-10 01:55:36 -070097
98 from = vlib_frame_vector_args (from_frame);
99 n_left_from = from_frame->n_vectors;
100
101 next_index = node->cached_next_index;
102
103 while (n_left_from > 0)
104 {
105 u32 n_left_to_next;
106
107 vlib_get_next_frame (vm, node, next_index,
108 to_next, n_left_to_next);
109
110 while (n_left_from >= 4 && n_left_to_next >= 2)
111 {
112 u32 bi0, bi1;
113 vlib_buffer_t * b0, * b1;
114 gre_header_t * h0, * h1;
115 u16 version0, version1, protocol0, protocol1;
116 int verr0, verr1;
117 u32 next0, next1;
118 ip4_header_t *ip0, *ip1;
119
120 /* Prefetch next iteration. */
121 {
122 vlib_buffer_t * p2, * p3;
123
124 p2 = vlib_get_buffer (vm, from[2]);
125 p3 = vlib_get_buffer (vm, from[3]);
126
127 vlib_prefetch_buffer_header (p2, LOAD);
128 vlib_prefetch_buffer_header (p3, LOAD);
129
130 CLIB_PREFETCH (p2->data, sizeof (h0[0]), LOAD);
131 CLIB_PREFETCH (p3->data, sizeof (h1[0]), LOAD);
132 }
133
134 bi0 = from[0];
135 bi1 = from[1];
136 to_next[0] = bi0;
137 to_next[1] = bi1;
138 from += 2;
139 to_next += 2;
140 n_left_to_next -= 2;
141 n_left_from -= 2;
142
143 b0 = vlib_get_buffer (vm, bi0);
144 b1 = vlib_get_buffer (vm, bi1);
145
146 /* ip4_local hands us the ip header, not the gre header */
147 ip0 = vlib_buffer_get_current (b0);
148 ip1 = vlib_buffer_get_current (b1);
149
150 /* Save src + dst ip4 address */
Ciara Loftus7eac9162016-09-30 15:47:03 +0100151 tun_src0 = ip0->src_address.as_u32;
152 tun_dst0 = ip0->dst_address.as_u32;
153 tun_src1 = ip1->src_address.as_u32;
154 tun_dst1 = ip1->dst_address.as_u32;
Matus Fabian694265d2016-08-10 01:55:36 -0700155
156 vlib_buffer_advance (b0, sizeof (*ip0));
157 vlib_buffer_advance (b1, sizeof (*ip1));
158
159 h0 = vlib_buffer_get_current (b0);
160 h1 = vlib_buffer_get_current (b1);
161
162 protocol0 = clib_net_to_host_u16 (h0->protocol);
163 protocol1 = clib_net_to_host_u16 (h1->protocol);
164 if (PREDICT_TRUE(protocol0 == 0x0001))
165 {
166 next0 = IPSEC_GRE_INPUT_NEXT_L2_INPUT;
167 b0->error = node->errors[IPSEC_GRE_ERROR_NONE];
168 }
169 else
170 {
Matus Fabian694265d2016-08-10 01:55:36 -0700171 b0->error = node->errors[IPSEC_GRE_ERROR_UNKNOWN_PROTOCOL];
172 next0 = IPSEC_GRE_INPUT_NEXT_DROP;
173 }
174 if (PREDICT_TRUE(protocol1 == 0x0001))
175 {
176 next1 = IPSEC_GRE_INPUT_NEXT_L2_INPUT;
177 b1->error = node->errors[IPSEC_GRE_ERROR_NONE];
178 }
179 else
180 {
Matus Fabian694265d2016-08-10 01:55:36 -0700181 b1->error = node->errors[IPSEC_GRE_ERROR_UNKNOWN_PROTOCOL];
182 next1 = IPSEC_GRE_INPUT_NEXT_DROP;
183 }
184
185 version0 = clib_net_to_host_u16 (h0->flags_and_version);
186 verr0 = version0 & GRE_VERSION_MASK;
187 version1 = clib_net_to_host_u16 (h1->flags_and_version);
188 verr1 = version1 & GRE_VERSION_MASK;
189
190 b0->error = verr0 ? node->errors[IPSEC_GRE_ERROR_UNSUPPORTED_VERSION]
191 : b0->error;
192 next0 = verr0 ? IPSEC_GRE_INPUT_NEXT_DROP : next0;
193 b1->error = verr1 ? node->errors[IPSEC_GRE_ERROR_UNSUPPORTED_VERSION]
194 : b1->error;
195 next1 = verr1 ? IPSEC_GRE_INPUT_NEXT_DROP : next1;
196
197 /* For L2 payload set input sw_if_index to GRE tunnel for learning */
198 if (PREDICT_TRUE(next0 == IPSEC_GRE_INPUT_NEXT_L2_INPUT))
199 {
Ciara Loftus7eac9162016-09-30 15:47:03 +0100200 u64 key = ((u64)(tun_dst0) << 32) | (u64)(tun_src0);
Matus Fabian694265d2016-08-10 01:55:36 -0700201
202 if (cached_tunnel_key != key)
203 {
204 vnet_hw_interface_t * hi;
205 ipsec_gre_tunnel_t * t;
206 uword * p;
207
208 p = hash_get (igm->tunnel_by_key, key);
209 if (!p)
210 {
211 next0 = IPSEC_GRE_INPUT_NEXT_DROP;
212 b0->error = node->errors[IPSEC_GRE_ERROR_NO_SUCH_TUNNEL];
213 goto drop0;
214 }
215 t = pool_elt_at_index (igm->tunnels, p[0]);
216 hi = vnet_get_hw_interface (igm->vnet_main,
217 t->hw_if_index);
218 tunnel_sw_if_index = hi->sw_if_index;
219 cached_tunnel_sw_if_index = tunnel_sw_if_index;
220 }
221 else
222 {
223 tunnel_sw_if_index = cached_tunnel_sw_if_index;
224 }
225 vnet_buffer(b0)->sw_if_index[VLIB_RX] = tunnel_sw_if_index;
226 }
227
228drop0:
229 /* For L2 payload set input sw_if_index to GRE tunnel for learning */
230 if (PREDICT_TRUE(next1 == IPSEC_GRE_INPUT_NEXT_L2_INPUT))
231 {
Ciara Loftus7eac9162016-09-30 15:47:03 +0100232 u64 key = ((u64)(tun_dst1) << 32) | (u64)(tun_src1);
Matus Fabian694265d2016-08-10 01:55:36 -0700233
234 if (cached_tunnel_key != key)
235 {
236 vnet_hw_interface_t * hi;
237 ipsec_gre_tunnel_t * t;
238 uword * p;
239
240 p = hash_get (igm->tunnel_by_key, key);
241 if (!p)
242 {
243 next1 = IPSEC_GRE_INPUT_NEXT_DROP;
244 b1->error = node->errors[IPSEC_GRE_ERROR_NO_SUCH_TUNNEL];
245 goto drop1;
246 }
247 t = pool_elt_at_index (igm->tunnels, p[0]);
248 hi = vnet_get_hw_interface (igm->vnet_main,
249 t->hw_if_index);
250 tunnel_sw_if_index = hi->sw_if_index;
251 cached_tunnel_sw_if_index = tunnel_sw_if_index;
252 }
253 else
254 {
255 tunnel_sw_if_index = cached_tunnel_sw_if_index;
256 }
257 vnet_buffer(b1)->sw_if_index[VLIB_RX] = tunnel_sw_if_index;
258 }
259
260drop1:
261 if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
262 {
263 ipsec_gre_rx_trace_t *tr = vlib_add_trace (vm, node,
264 b0, sizeof (*tr));
265 tr->tunnel_id = ~0;
266 tr->length = ip0->length;
267 tr->src.as_u32 = ip0->src_address.as_u32;
268 tr->dst.as_u32 = ip0->dst_address.as_u32;
269 }
270
271 if (PREDICT_FALSE(b1->flags & VLIB_BUFFER_IS_TRACED))
272 {
273 ipsec_gre_rx_trace_t *tr = vlib_add_trace (vm, node,
274 b1, sizeof (*tr));
275 tr->tunnel_id = ~0;
276 tr->length = ip1->length;
277 tr->src.as_u32 = ip1->src_address.as_u32;
278 tr->dst.as_u32 = ip1->dst_address.as_u32;
279 }
280
281 vlib_buffer_advance (b0, sizeof (*h0));
282 vlib_buffer_advance (b1, sizeof (*h1));
283
284 vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
285 to_next, n_left_to_next,
286 bi0, bi1, next0, next1);
287 }
288
289 while (n_left_from > 0 && n_left_to_next > 0)
290 {
291 u32 bi0;
292 vlib_buffer_t * b0;
293 gre_header_t * h0;
294 ip4_header_t * ip0;
295 u16 version0, protocol0;
296 int verr0;
297 u32 next0;
Ciara Loftus7eac9162016-09-30 15:47:03 +0100298 u32 tun_src0, tun_dst0;
Matus Fabian694265d2016-08-10 01:55:36 -0700299
300 bi0 = from[0];
301 to_next[0] = bi0;
302 from += 1;
303 to_next += 1;
304 n_left_from -= 1;
305 n_left_to_next -= 1;
306
307 b0 = vlib_get_buffer (vm, bi0);
308 ip0 = vlib_buffer_get_current (b0);
309
Ciara Loftus7eac9162016-09-30 15:47:03 +0100310 tun_src0 = ip0->src_address.as_u32;
311 tun_dst0 = ip0->dst_address.as_u32;
Matus Fabian694265d2016-08-10 01:55:36 -0700312
313 vlib_buffer_advance (b0, sizeof (*ip0));
314
315 h0 = vlib_buffer_get_current (b0);
316
317 protocol0 = clib_net_to_host_u16 (h0->protocol);
318 if (PREDICT_TRUE(protocol0 == 0x0001))
319 {
320 next0 = IPSEC_GRE_INPUT_NEXT_L2_INPUT;
321 b0->error = node->errors[IPSEC_GRE_ERROR_NONE];
322 }
323 else
324 {
Matus Fabian694265d2016-08-10 01:55:36 -0700325 b0->error = node->errors[IPSEC_GRE_ERROR_UNKNOWN_PROTOCOL];
326 next0 = IPSEC_GRE_INPUT_NEXT_DROP;
327 }
328
329 version0 = clib_net_to_host_u16 (h0->flags_and_version);
330 verr0 = version0 & GRE_VERSION_MASK;
331 b0->error = verr0 ? node->errors[IPSEC_GRE_ERROR_UNSUPPORTED_VERSION]
332 : b0->error;
333 next0 = verr0 ? IPSEC_GRE_INPUT_NEXT_DROP : next0;
334
335 /* For L2 payload set input sw_if_index to GRE tunnel for learning */
336 if (PREDICT_FALSE(next0 == IPSEC_GRE_INPUT_NEXT_L2_INPUT))
337 {
Ciara Loftus7eac9162016-09-30 15:47:03 +0100338 u64 key = ((u64)(tun_dst0) << 32) | (u64)(tun_src0);
Matus Fabian694265d2016-08-10 01:55:36 -0700339
340 if (cached_tunnel_key != key)
341 {
342 vnet_hw_interface_t * hi;
343 ipsec_gre_tunnel_t * t;
344 uword * p;
345
346 p = hash_get (igm->tunnel_by_key, key);
347 if (!p)
348 {
349 next0 = IPSEC_GRE_INPUT_NEXT_DROP;
350 b0->error = node->errors[IPSEC_GRE_ERROR_NO_SUCH_TUNNEL];
351 goto drop;
352 }
353 t = pool_elt_at_index (igm->tunnels, p[0]);
354 hi = vnet_get_hw_interface (igm->vnet_main,
355 t->hw_if_index);
356 tunnel_sw_if_index = hi->sw_if_index;
357 cached_tunnel_sw_if_index = tunnel_sw_if_index;
358 }
359 else
360 {
361 tunnel_sw_if_index = cached_tunnel_sw_if_index;
362 }
363 vnet_buffer(b0)->sw_if_index[VLIB_RX] = tunnel_sw_if_index;
364 }
365
366drop:
367 if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
368 {
369 ipsec_gre_rx_trace_t *tr = vlib_add_trace (vm, node,
370 b0, sizeof (*tr));
371 tr->tunnel_id = ~0;
372 tr->length = ip0->length;
373 tr->src.as_u32 = ip0->src_address.as_u32;
374 tr->dst.as_u32 = ip0->dst_address.as_u32;
375 }
376
377 vlib_buffer_advance (b0, sizeof (*h0));
378
379 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
380 to_next, n_left_to_next,
381 bi0, next0);
382 }
383
384 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
385 }
386 vlib_node_increment_counter (vm, ipsec_gre_input_node.index,
387 IPSEC_GRE_ERROR_PKTS_DECAP, from_frame->n_vectors);
388 return from_frame->n_vectors;
389}
390
391static char * ipsec_gre_error_strings[] = {
392#define ipsec_gre_error(n,s) s,
393#include "error.def"
394#undef ipsec_gre_error
395};
396
397VLIB_REGISTER_NODE (ipsec_gre_input_node) = {
398 .function = ipsec_gre_input,
399 .name = "ipsec-gre-input",
400 /* Takes a vector of packets. */
401 .vector_size = sizeof (u32),
402
403 .n_errors = IPSEC_GRE_N_ERROR,
404 .error_strings = ipsec_gre_error_strings,
405
406 .n_next_nodes = IPSEC_GRE_INPUT_N_NEXT,
407 .next_nodes = {
408#define _(s,n) [IPSEC_GRE_INPUT_NEXT_##s] = n,
409 foreach_ipsec_gre_input_next
410#undef _
411 },
412
413 .format_trace = format_ipsec_gre_rx_trace,
414};
415
416VLIB_NODE_FUNCTION_MULTIARCH (ipsec_gre_input_node, ipsec_gre_input)
417
418static clib_error_t * ipsec_gre_input_init (vlib_main_t * vm)
419{
420 {
421 clib_error_t * error;
422 error = vlib_call_init_function (vm, ipsec_gre_init);
423 if (error)
424 clib_error_report (error);
425 }
426
427 return 0;
428}
429
430VLIB_INIT_FUNCTION (ipsec_gre_input_init);