blob: d20f248a6c8cd4f340323b83228ed3090c19ca9e [file] [log] [blame]
Matus Fabian694265d2016-08-10 01:55:36 -07001/*
2 * Copyright (c) 2016 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15/**
Chris Luke16bcf7d2016-09-01 14:31:46 -040016 * @file
Matus Fabian694265d2016-08-10 01:55:36 -070017 * @brief L2-GRE over IPSec packet processing.
18 *
Chris Luke16bcf7d2016-09-01 14:31:46 -040019 * Removes GRE header from the packet and sends it to the l2-input node.
Matus Fabian694265d2016-08-10 01:55:36 -070020*/
21
22#include <vlib/vlib.h>
23#include <vnet/pg/pg.h>
24#include <vnet/ipsec-gre/ipsec_gre.h>
25#include <vppinfra/sparse_vec.h>
26
27#define foreach_ipsec_gre_input_next \
28_(PUNT, "error-punt") \
29_(DROP, "error-drop") \
30_(L2_INPUT, "l2-input")
31
32typedef enum {
33#define _(s,n) IPSEC_GRE_INPUT_NEXT_##s,
34 foreach_ipsec_gre_input_next
35#undef _
36 IPSEC_GRE_INPUT_N_NEXT,
37} ipsec_gre_input_next_t;
38
39typedef struct {
40 u32 tunnel_id;
41 u32 length;
42 ip4_address_t src;
43 ip4_address_t dst;
44} ipsec_gre_rx_trace_t;
45
46u8 * format_ipsec_gre_rx_trace (u8 * s, va_list * args)
47{
48 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
49 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
50 ipsec_gre_rx_trace_t * t = va_arg (*args, ipsec_gre_rx_trace_t *);
51
52 s = format (s, "GRE: tunnel %d len %d src %U dst %U",
53 t->tunnel_id, clib_net_to_host_u16(t->length),
54 format_ip4_address, &t->src.as_u8,
55 format_ip4_address, &t->dst.as_u8);
56 return s;
57}
58
59/**
60 * @brief L2-GRE over IPSec input node.
61 * @node ipsec-gre-input
62 *
63 * This node remove GRE header.
64 *
Chris Luked4024f52016-09-06 09:32:36 -040065 * @param vm vlib_main_t corresponding to the current thread.
66 * @param node vlib_node_runtime_t data for this node.
67 * @param from_frame vlib_frame_t whose contents should be dispatched.
Matus Fabian694265d2016-08-10 01:55:36 -070068 *
69 * @par Graph mechanics: buffer metadata, next index usage
70 *
Chris Luke16bcf7d2016-09-01 14:31:46 -040071 * <em>Uses:</em>
Matus Fabian694265d2016-08-10 01:55:36 -070072 * - <code>ip->src_address</code> and <code>ip->dst_address</code>
73 * - Match tunnel by source and destination addresses in GRE IP header.
74 *
75 * <em>Sets:</em>
76 * - <code>vnet_buffer(b)->gre.src</code>
77 * - Save tunnel source IPv4 address.
78 * - <code>vnet_buffer(b)->gre.dst</code>
79 * - Save tunnel destination IPv4 address.
80 * - <code>vnet_buffer(b)->sw_if_index[VLIB_RX]</code>
81 * - Set input sw_if_index to IPSec-GRE tunnel for learning.
82 *
83 * <em>Next Index:</em>
84 * - Dispatches the packet to the l2-input node.
85*/
86static uword
87ipsec_gre_input (vlib_main_t * vm,
88 vlib_node_runtime_t * node,
89 vlib_frame_t * from_frame)
90{
91 ipsec_gre_main_t * igm = &ipsec_gre_main;
92 u32 n_left_from, next_index, * from, * to_next;
93 u64 cached_tunnel_key = (u64) ~0;
94 u32 cached_tunnel_sw_if_index = 0, tunnel_sw_if_index;
95
96 from = vlib_frame_vector_args (from_frame);
97 n_left_from = from_frame->n_vectors;
98
99 next_index = node->cached_next_index;
100
101 while (n_left_from > 0)
102 {
103 u32 n_left_to_next;
104
105 vlib_get_next_frame (vm, node, next_index,
106 to_next, n_left_to_next);
107
108 while (n_left_from >= 4 && n_left_to_next >= 2)
109 {
110 u32 bi0, bi1;
111 vlib_buffer_t * b0, * b1;
112 gre_header_t * h0, * h1;
113 u16 version0, version1, protocol0, protocol1;
114 int verr0, verr1;
115 u32 next0, next1;
116 ip4_header_t *ip0, *ip1;
117
118 /* Prefetch next iteration. */
119 {
120 vlib_buffer_t * p2, * p3;
121
122 p2 = vlib_get_buffer (vm, from[2]);
123 p3 = vlib_get_buffer (vm, from[3]);
124
125 vlib_prefetch_buffer_header (p2, LOAD);
126 vlib_prefetch_buffer_header (p3, LOAD);
127
128 CLIB_PREFETCH (p2->data, sizeof (h0[0]), LOAD);
129 CLIB_PREFETCH (p3->data, sizeof (h1[0]), LOAD);
130 }
131
132 bi0 = from[0];
133 bi1 = from[1];
134 to_next[0] = bi0;
135 to_next[1] = bi1;
136 from += 2;
137 to_next += 2;
138 n_left_to_next -= 2;
139 n_left_from -= 2;
140
141 b0 = vlib_get_buffer (vm, bi0);
142 b1 = vlib_get_buffer (vm, bi1);
143
144 /* ip4_local hands us the ip header, not the gre header */
145 ip0 = vlib_buffer_get_current (b0);
146 ip1 = vlib_buffer_get_current (b1);
147
148 /* Save src + dst ip4 address */
149 vnet_buffer(b0)->gre.src = ip0->src_address.as_u32;
150 vnet_buffer(b0)->gre.dst = ip0->dst_address.as_u32;
151 vnet_buffer(b1)->gre.src = ip1->src_address.as_u32;
152 vnet_buffer(b1)->gre.dst = ip1->dst_address.as_u32;
153
154 vlib_buffer_advance (b0, sizeof (*ip0));
155 vlib_buffer_advance (b1, sizeof (*ip1));
156
157 h0 = vlib_buffer_get_current (b0);
158 h1 = vlib_buffer_get_current (b1);
159
160 protocol0 = clib_net_to_host_u16 (h0->protocol);
161 protocol1 = clib_net_to_host_u16 (h1->protocol);
162 if (PREDICT_TRUE(protocol0 == 0x0001))
163 {
164 next0 = IPSEC_GRE_INPUT_NEXT_L2_INPUT;
165 b0->error = node->errors[IPSEC_GRE_ERROR_NONE];
166 }
167 else
168 {
169 clib_warning("unknown GRE protocol: %d", protocol0);
170 b0->error = node->errors[IPSEC_GRE_ERROR_UNKNOWN_PROTOCOL];
171 next0 = IPSEC_GRE_INPUT_NEXT_DROP;
172 }
173 if (PREDICT_TRUE(protocol1 == 0x0001))
174 {
175 next1 = IPSEC_GRE_INPUT_NEXT_L2_INPUT;
176 b1->error = node->errors[IPSEC_GRE_ERROR_NONE];
177 }
178 else
179 {
180 clib_warning("unknown GRE protocol: %d", protocol1);
181 b1->error = node->errors[IPSEC_GRE_ERROR_UNKNOWN_PROTOCOL];
182 next1 = IPSEC_GRE_INPUT_NEXT_DROP;
183 }
184
185 version0 = clib_net_to_host_u16 (h0->flags_and_version);
186 verr0 = version0 & GRE_VERSION_MASK;
187 version1 = clib_net_to_host_u16 (h1->flags_and_version);
188 verr1 = version1 & GRE_VERSION_MASK;
189
190 b0->error = verr0 ? node->errors[IPSEC_GRE_ERROR_UNSUPPORTED_VERSION]
191 : b0->error;
192 next0 = verr0 ? IPSEC_GRE_INPUT_NEXT_DROP : next0;
193 b1->error = verr1 ? node->errors[IPSEC_GRE_ERROR_UNSUPPORTED_VERSION]
194 : b1->error;
195 next1 = verr1 ? IPSEC_GRE_INPUT_NEXT_DROP : next1;
196
197 /* For L2 payload set input sw_if_index to GRE tunnel for learning */
198 if (PREDICT_TRUE(next0 == IPSEC_GRE_INPUT_NEXT_L2_INPUT))
199 {
200 u64 key = ((u64)(vnet_buffer(b0)->gre.dst) << 32) |
201 (u64)(vnet_buffer(b0)->gre.src);
202
203 if (cached_tunnel_key != key)
204 {
205 vnet_hw_interface_t * hi;
206 ipsec_gre_tunnel_t * t;
207 uword * p;
208
209 p = hash_get (igm->tunnel_by_key, key);
210 if (!p)
211 {
212 next0 = IPSEC_GRE_INPUT_NEXT_DROP;
213 b0->error = node->errors[IPSEC_GRE_ERROR_NO_SUCH_TUNNEL];
214 goto drop0;
215 }
216 t = pool_elt_at_index (igm->tunnels, p[0]);
217 hi = vnet_get_hw_interface (igm->vnet_main,
218 t->hw_if_index);
219 tunnel_sw_if_index = hi->sw_if_index;
220 cached_tunnel_sw_if_index = tunnel_sw_if_index;
221 }
222 else
223 {
224 tunnel_sw_if_index = cached_tunnel_sw_if_index;
225 }
226 vnet_buffer(b0)->sw_if_index[VLIB_RX] = tunnel_sw_if_index;
227 }
228
229drop0:
230 /* For L2 payload set input sw_if_index to GRE tunnel for learning */
231 if (PREDICT_TRUE(next1 == IPSEC_GRE_INPUT_NEXT_L2_INPUT))
232 {
233 u64 key = ((u64)(vnet_buffer(b1)->gre.dst) << 32) |
234 (u64)(vnet_buffer(b1)->gre.src);
235
236 if (cached_tunnel_key != key)
237 {
238 vnet_hw_interface_t * hi;
239 ipsec_gre_tunnel_t * t;
240 uword * p;
241
242 p = hash_get (igm->tunnel_by_key, key);
243 if (!p)
244 {
245 next1 = IPSEC_GRE_INPUT_NEXT_DROP;
246 b1->error = node->errors[IPSEC_GRE_ERROR_NO_SUCH_TUNNEL];
247 goto drop1;
248 }
249 t = pool_elt_at_index (igm->tunnels, p[0]);
250 hi = vnet_get_hw_interface (igm->vnet_main,
251 t->hw_if_index);
252 tunnel_sw_if_index = hi->sw_if_index;
253 cached_tunnel_sw_if_index = tunnel_sw_if_index;
254 }
255 else
256 {
257 tunnel_sw_if_index = cached_tunnel_sw_if_index;
258 }
259 vnet_buffer(b1)->sw_if_index[VLIB_RX] = tunnel_sw_if_index;
260 }
261
262drop1:
263 if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
264 {
265 ipsec_gre_rx_trace_t *tr = vlib_add_trace (vm, node,
266 b0, sizeof (*tr));
267 tr->tunnel_id = ~0;
268 tr->length = ip0->length;
269 tr->src.as_u32 = ip0->src_address.as_u32;
270 tr->dst.as_u32 = ip0->dst_address.as_u32;
271 }
272
273 if (PREDICT_FALSE(b1->flags & VLIB_BUFFER_IS_TRACED))
274 {
275 ipsec_gre_rx_trace_t *tr = vlib_add_trace (vm, node,
276 b1, sizeof (*tr));
277 tr->tunnel_id = ~0;
278 tr->length = ip1->length;
279 tr->src.as_u32 = ip1->src_address.as_u32;
280 tr->dst.as_u32 = ip1->dst_address.as_u32;
281 }
282
283 vlib_buffer_advance (b0, sizeof (*h0));
284 vlib_buffer_advance (b1, sizeof (*h1));
285
286 vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
287 to_next, n_left_to_next,
288 bi0, bi1, next0, next1);
289 }
290
291 while (n_left_from > 0 && n_left_to_next > 0)
292 {
293 u32 bi0;
294 vlib_buffer_t * b0;
295 gre_header_t * h0;
296 ip4_header_t * ip0;
297 u16 version0, protocol0;
298 int verr0;
299 u32 next0;
300
301 bi0 = from[0];
302 to_next[0] = bi0;
303 from += 1;
304 to_next += 1;
305 n_left_from -= 1;
306 n_left_to_next -= 1;
307
308 b0 = vlib_get_buffer (vm, bi0);
309 ip0 = vlib_buffer_get_current (b0);
310
311 vnet_buffer(b0)->gre.src = ip0->src_address.as_u32;
312 vnet_buffer(b0)->gre.dst = ip0->dst_address.as_u32;
313
314 vlib_buffer_advance (b0, sizeof (*ip0));
315
316 h0 = vlib_buffer_get_current (b0);
317
318 protocol0 = clib_net_to_host_u16 (h0->protocol);
319 if (PREDICT_TRUE(protocol0 == 0x0001))
320 {
321 next0 = IPSEC_GRE_INPUT_NEXT_L2_INPUT;
322 b0->error = node->errors[IPSEC_GRE_ERROR_NONE];
323 }
324 else
325 {
326 clib_warning("unknown GRE protocol: %d", protocol0);
327 b0->error = node->errors[IPSEC_GRE_ERROR_UNKNOWN_PROTOCOL];
328 next0 = IPSEC_GRE_INPUT_NEXT_DROP;
329 }
330
331 version0 = clib_net_to_host_u16 (h0->flags_and_version);
332 verr0 = version0 & GRE_VERSION_MASK;
333 b0->error = verr0 ? node->errors[IPSEC_GRE_ERROR_UNSUPPORTED_VERSION]
334 : b0->error;
335 next0 = verr0 ? IPSEC_GRE_INPUT_NEXT_DROP : next0;
336
337 /* For L2 payload set input sw_if_index to GRE tunnel for learning */
338 if (PREDICT_FALSE(next0 == IPSEC_GRE_INPUT_NEXT_L2_INPUT))
339 {
340 u64 key = ((u64)(vnet_buffer(b0)->gre.dst) << 32) |
341 (u64)(vnet_buffer(b0)->gre.src);
342
343 if (cached_tunnel_key != key)
344 {
345 vnet_hw_interface_t * hi;
346 ipsec_gre_tunnel_t * t;
347 uword * p;
348
349 p = hash_get (igm->tunnel_by_key, key);
350 if (!p)
351 {
352 next0 = IPSEC_GRE_INPUT_NEXT_DROP;
353 b0->error = node->errors[IPSEC_GRE_ERROR_NO_SUCH_TUNNEL];
354 goto drop;
355 }
356 t = pool_elt_at_index (igm->tunnels, p[0]);
357 hi = vnet_get_hw_interface (igm->vnet_main,
358 t->hw_if_index);
359 tunnel_sw_if_index = hi->sw_if_index;
360 cached_tunnel_sw_if_index = tunnel_sw_if_index;
361 }
362 else
363 {
364 tunnel_sw_if_index = cached_tunnel_sw_if_index;
365 }
366 vnet_buffer(b0)->sw_if_index[VLIB_RX] = tunnel_sw_if_index;
367 }
368
369drop:
370 if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
371 {
372 ipsec_gre_rx_trace_t *tr = vlib_add_trace (vm, node,
373 b0, sizeof (*tr));
374 tr->tunnel_id = ~0;
375 tr->length = ip0->length;
376 tr->src.as_u32 = ip0->src_address.as_u32;
377 tr->dst.as_u32 = ip0->dst_address.as_u32;
378 }
379
380 vlib_buffer_advance (b0, sizeof (*h0));
381
382 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
383 to_next, n_left_to_next,
384 bi0, next0);
385 }
386
387 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
388 }
389 vlib_node_increment_counter (vm, ipsec_gre_input_node.index,
390 IPSEC_GRE_ERROR_PKTS_DECAP, from_frame->n_vectors);
391 return from_frame->n_vectors;
392}
393
394static char * ipsec_gre_error_strings[] = {
395#define ipsec_gre_error(n,s) s,
396#include "error.def"
397#undef ipsec_gre_error
398};
399
400VLIB_REGISTER_NODE (ipsec_gre_input_node) = {
401 .function = ipsec_gre_input,
402 .name = "ipsec-gre-input",
403 /* Takes a vector of packets. */
404 .vector_size = sizeof (u32),
405
406 .n_errors = IPSEC_GRE_N_ERROR,
407 .error_strings = ipsec_gre_error_strings,
408
409 .n_next_nodes = IPSEC_GRE_INPUT_N_NEXT,
410 .next_nodes = {
411#define _(s,n) [IPSEC_GRE_INPUT_NEXT_##s] = n,
412 foreach_ipsec_gre_input_next
413#undef _
414 },
415
416 .format_trace = format_ipsec_gre_rx_trace,
417};
418
419VLIB_NODE_FUNCTION_MULTIARCH (ipsec_gre_input_node, ipsec_gre_input)
420
421static clib_error_t * ipsec_gre_input_init (vlib_main_t * vm)
422{
423 {
424 clib_error_t * error;
425 error = vlib_call_init_function (vm, ipsec_gre_init);
426 if (error)
427 clib_error_report (error);
428 }
429
430 return 0;
431}
432
433VLIB_INIT_FUNCTION (ipsec_gre_input_init);