blob: 4b121324fb65501d5a9f562e25361b4ccb49773b [file] [log] [blame]
Neale Ranns32e1c012016-11-22 17:07:28 +00001/*
2 * Copyright (c) 2016 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16#include <vnet/mfib/mfib_itf.h>
17#include <vnet/mfib/mfib_entry.h>
18#include <vnet/dpo/replicate_dpo.h>
19#include <vnet/mfib/ip4_mfib.h>
20#include <vnet/mfib/ip6_mfib.h>
21#include <vnet/mfib/mfib_signal.h>
22#include <vnet/fib/ip4_fib.h>
23#include <vnet/fib/ip6_fib.h>
24
25#include <vnet/ip/ip4.h>
26#include <vnet/vnet.h>
27
28typedef struct mfib_forward_lookup_trace_t_ {
29 u32 entry_index;
30 u32 fib_index;
31} mfib_forward_lookup_trace_t;
32
33static u8 *
34format_mfib_forward_lookup_trace (u8 * s, va_list * args)
35{
36 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
37 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
38 mfib_forward_lookup_trace_t * t = va_arg (*args, mfib_forward_lookup_trace_t *);
39
40 s = format (s, "fib %d entry %d", t->fib_index, t->entry_index);
41 return s;
42}
43
44/* Common trace function for all ip4-forward next nodes. */
45void
46mfib_forward_lookup_trace (vlib_main_t * vm,
47 vlib_node_runtime_t * node,
48 vlib_frame_t * frame)
49{
50 u32 * from, n_left;
51 ip4_main_t * im = &ip4_main;
52
53 n_left = frame->n_vectors;
54 from = vlib_frame_vector_args (frame);
55
56 while (n_left >= 4)
57 {
58 mfib_forward_lookup_trace_t * t0, * t1;
59 vlib_buffer_t * b0, * b1;
60 u32 bi0, bi1;
61
62 /* Prefetch next iteration. */
63 vlib_prefetch_buffer_with_index (vm, from[2], LOAD);
64 vlib_prefetch_buffer_with_index (vm, from[3], LOAD);
65
66 bi0 = from[0];
67 bi1 = from[1];
68
69 b0 = vlib_get_buffer (vm, bi0);
70 b1 = vlib_get_buffer (vm, bi1);
71
72 if (b0->flags & VLIB_BUFFER_IS_TRACED)
73 {
74 t0 = vlib_add_trace (vm, node, b0, sizeof (t0[0]));
75 t0->entry_index = vnet_buffer (b0)->ip.adj_index[VLIB_TX];
76 t0->fib_index = vec_elt (im->mfib_index_by_sw_if_index,
77 vnet_buffer(b1)->sw_if_index[VLIB_RX]);
78 }
79 if (b1->flags & VLIB_BUFFER_IS_TRACED)
80 {
81 t1 = vlib_add_trace (vm, node, b1, sizeof (t1[0]));
82 t1->entry_index = vnet_buffer (b1)->ip.adj_index[VLIB_TX];
83 t1->fib_index = vec_elt (im->mfib_index_by_sw_if_index,
84 vnet_buffer(b1)->sw_if_index[VLIB_RX]);
85 }
86 from += 2;
87 n_left -= 2;
88 }
89
90 while (n_left >= 1)
91 {
92 mfib_forward_lookup_trace_t * t0;
93 vlib_buffer_t * b0;
94 u32 bi0;
95
96 bi0 = from[0];
97
98 b0 = vlib_get_buffer (vm, bi0);
99
100 if (b0->flags & VLIB_BUFFER_IS_TRACED)
101 {
102 t0 = vlib_add_trace (vm, node, b0, sizeof (t0[0]));
103 t0->entry_index = vnet_buffer (b0)->ip.adj_index[VLIB_TX];
104 t0->fib_index = vec_elt (im->mfib_index_by_sw_if_index,
105 vnet_buffer(b0)->sw_if_index[VLIB_RX]);
106 }
107 from += 1;
108 n_left -= 1;
109 }
110}
111
112typedef enum mfib_forward_lookup_next_t_ {
113 MFIB_FORWARD_LOOKUP_NEXT_RPF,
114 MFIB_FORWARD_LOOKUP_N_NEXT,
115} mfib_forward_lookup_next_t;
116
117static uword
118mfib_forward_lookup (vlib_main_t * vm,
119 vlib_node_runtime_t * node,
120 vlib_frame_t * frame,
121 int is_v4)
122{
123 u32 n_left_from, n_left_to_next, * from, * to_next;
124
125 from = vlib_frame_vector_args (frame);
126 n_left_from = frame->n_vectors;
127
128 while (n_left_from > 0)
129 {
130 vlib_get_next_frame (vm, node, MFIB_FORWARD_LOOKUP_NEXT_RPF,
131 to_next, n_left_to_next);
132
133 while (n_left_from > 0 && n_left_to_next > 0)
134 {
135 fib_node_index_t mfei0;
136 vlib_buffer_t * p0;
137 u32 fib_index0;
138 u32 pi0;
139
140 pi0 = from[0];
141 to_next[0] = pi0;
142 from += 1;
143 to_next += 1;
144 n_left_to_next -= 1;
145 n_left_from -= 1;
146
147 p0 = vlib_get_buffer (vm, pi0);
148
149 if (is_v4)
150 {
151 ip4_header_t * ip0;
152
153 fib_index0 = vec_elt (ip4_main.mfib_index_by_sw_if_index,
154 vnet_buffer(p0)->sw_if_index[VLIB_RX]);
155 ip0 = vlib_buffer_get_current (p0);
156 mfei0 = ip4_mfib_table_lookup(ip4_mfib_get(fib_index0),
157 &ip0->src_address,
158 &ip0->dst_address,
159 64);
160 }
161 else
162 {
163 ip6_header_t * ip0;
164
165 fib_index0 = vec_elt (ip6_main.mfib_index_by_sw_if_index,
166 vnet_buffer(p0)->sw_if_index[VLIB_RX]);
167 ip0 = vlib_buffer_get_current (p0);
168 mfei0 = ip6_mfib_table_lookup2(ip6_mfib_get(fib_index0),
169 &ip0->src_address,
170 &ip0->dst_address);
171 }
172
173 vnet_buffer (p0)->ip.adj_index[VLIB_TX] = mfei0;
174 }
175
176 vlib_put_next_frame(vm, node,
177 MFIB_FORWARD_LOOKUP_NEXT_RPF,
178 n_left_to_next);
179 }
180
181 if (node->flags & VLIB_NODE_FLAG_TRACE)
182 mfib_forward_lookup_trace(vm, node, frame);
183
184 return frame->n_vectors;
185}
186
187static uword
188ip4_mfib_forward_lookup (vlib_main_t * vm,
189 vlib_node_runtime_t * node,
190 vlib_frame_t * frame)
191{
192 return (mfib_forward_lookup (vm, node, frame, 1));
193}
194
195VLIB_REGISTER_NODE (ip4_mfib_forward_lookup_node, static) = {
196 .function = ip4_mfib_forward_lookup,
197 .name = "ip4-mfib-forward-lookup",
198 .vector_size = sizeof (u32),
199
200 .format_trace = format_mfib_forward_lookup_trace,
201
202 .n_next_nodes = MFIB_FORWARD_LOOKUP_N_NEXT,
203 .next_nodes = {
204 [MFIB_FORWARD_LOOKUP_NEXT_RPF] = "ip4-mfib-forward-rpf",
205 },
206};
207
208VLIB_NODE_FUNCTION_MULTIARCH (ip4_mfib_forward_lookup_node,
209 ip4_mfib_forward_lookup)
210
211static uword
212ip6_mfib_forward_lookup (vlib_main_t * vm,
213 vlib_node_runtime_t * node,
214 vlib_frame_t * frame)
215{
216 return (mfib_forward_lookup (vm, node, frame, 0));
217}
218
219VLIB_REGISTER_NODE (ip6_mfib_forward_lookup_node, static) = {
220 .function = ip6_mfib_forward_lookup,
221 .name = "ip6-mfib-forward-lookup",
222 .vector_size = sizeof (u32),
223
224 .format_trace = format_mfib_forward_lookup_trace,
225
226 .n_next_nodes = MFIB_FORWARD_LOOKUP_N_NEXT,
227 .next_nodes = {
228 [MFIB_FORWARD_LOOKUP_NEXT_RPF] = "ip6-mfib-forward-rpf",
229 },
230};
231
232VLIB_NODE_FUNCTION_MULTIARCH (ip6_mfib_forward_lookup_node,
233 ip6_mfib_forward_lookup)
234
235
236typedef struct mfib_forward_rpf_trace_t_ {
237 u32 entry_index;
238 u32 sw_if_index;
239 mfib_itf_flags_t itf_flags;
240} mfib_forward_rpf_trace_t;
241
242typedef enum mfib_forward_rpf_next_t_ {
243 MFIB_FORWARD_RPF_NEXT_DROP,
244 MFIB_FORWARD_RPF_N_NEXT,
245} mfib_forward_rpf_next_t;
246
247static u8 *
248format_mfib_forward_rpf_trace (u8 * s, va_list * args)
249{
250 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
251 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
252 mfib_forward_rpf_trace_t * t = va_arg (*args, mfib_forward_rpf_trace_t *);
253
254 s = format (s, "entry %d", t->entry_index);
Neale Rannsce111d22018-01-23 08:38:50 -0800255 s = format (s, " itf %d", t->sw_if_index);
256 s = format (s, " flags %U", format_mfib_itf_flags, t->itf_flags);
Neale Ranns32e1c012016-11-22 17:07:28 +0000257
258 return s;
259}
260
261static int
262mfib_forward_connected_check (vlib_buffer_t * b0,
263 u32 sw_if_index,
264 int is_v4)
265{
266 /*
267 * Lookup the source of the IP packet in the
268 * FIB. return true if the entry is attached.
269 */
270 index_t lbi0;
271
272 if (is_v4)
273 {
274 load_balance_t *lb0;
275 ip4_header_t *ip0;
276
277 ip0 = vlib_buffer_get_current(b0);
278
279 lbi0 = ip4_fib_forwarding_lookup(
280 ip4_fib_table_get_index_for_sw_if_index(
281 sw_if_index),
282 &ip0->src_address);
283 lb0 = load_balance_get(lbi0);
284
285 return (FIB_ENTRY_FLAG_ATTACHED &
286 lb0->lb_fib_entry_flags);
287 }
288 else
289 {
290 ASSERT(0);
291 }
292 return (0);
293}
294
295static void
296mfib_forward_itf_signal (vlib_main_t *vm,
297 const mfib_entry_t *mfe,
298 mfib_itf_t *mfi,
299 vlib_buffer_t *b0)
300{
301 mfib_itf_flags_t old_flags;
302
Sirshak Das2f6d7bb2018-10-03 22:53:51 +0000303 old_flags = clib_atomic_fetch_or(&mfi->mfi_flags,
304 MFIB_ITF_FLAG_SIGNAL_PRESENT);
Neale Ranns32e1c012016-11-22 17:07:28 +0000305
306 if (!(old_flags & MFIB_ITF_FLAG_SIGNAL_PRESENT))
307 {
308 /*
309 * we were the lucky ones to set the signal present flag
310 */
311 if (!(old_flags & MFIB_ITF_FLAG_DONT_PRESERVE))
312 {
313 /*
314 * preserve a copy of the packet for the control
315 * plane to examine.
316 * Only allow one preserved packet at at time, since
317 * when the signal present flag is cleared so is the
318 * preserved packet.
319 */
320 mfib_signal_push(mfe, mfi, b0);
321 }
322 else
323 {
324 /*
325 * The control plane just wants the signal, not the packet as well
326 */
327 mfib_signal_push(mfe, mfi, NULL);
328 }
329 }
330 /*
331 * else
332 * there is already a signal present on this interface that the
333 * control plane has not yet acknowledged
334 */
335}
336
337always_inline uword
338mfib_forward_rpf (vlib_main_t * vm,
339 vlib_node_runtime_t * node,
340 vlib_frame_t * frame,
341 int is_v4)
342{
343 u32 n_left_from, n_left_to_next, * from, * to_next;
344 mfib_forward_rpf_next_t next;
Neale Rannsce111d22018-01-23 08:38:50 -0800345 vlib_node_runtime_t *error_node;
Neale Ranns32e1c012016-11-22 17:07:28 +0000346
Neale Rannsce111d22018-01-23 08:38:50 -0800347 if (is_v4)
348 error_node = vlib_node_get_runtime (vm, ip4_input_node.index);
349 else
350 error_node = vlib_node_get_runtime (vm, ip6_input_node.index);
Neale Ranns32e1c012016-11-22 17:07:28 +0000351 from = vlib_frame_vector_args (frame);
352 n_left_from = frame->n_vectors;
353 next = MFIB_FORWARD_RPF_NEXT_DROP;
354
355 while (n_left_from > 0)
356 {
357 vlib_get_next_frame (vm, node, next,
358 to_next, n_left_to_next);
359
360 while (n_left_from > 0 && n_left_to_next > 0)
361 {
362 fib_node_index_t mfei0;
363 const mfib_entry_t *mfe0;
364 mfib_itf_t *mfi0;
365 vlib_buffer_t * b0;
366 u32 pi0, next0;
367 mfib_itf_flags_t iflags0;
368 mfib_entry_flags_t eflags0;
Neale Rannsce111d22018-01-23 08:38:50 -0800369 u8 error0;
Neale Ranns32e1c012016-11-22 17:07:28 +0000370
371 pi0 = from[0];
372 to_next[0] = pi0;
373 from += 1;
374 to_next += 1;
375 n_left_to_next -= 1;
376 n_left_from -= 1;
377
Neale Rannsce111d22018-01-23 08:38:50 -0800378 error0 = IP4_ERROR_NONE;
Neale Ranns32e1c012016-11-22 17:07:28 +0000379 b0 = vlib_get_buffer (vm, pi0);
380 mfei0 = vnet_buffer (b0)->ip.adj_index[VLIB_TX];
381 mfe0 = mfib_entry_get(mfei0);
382 mfi0 = mfib_entry_get_itf(mfe0,
383 vnet_buffer(b0)->sw_if_index[VLIB_RX]);
384
385 /*
386 * throughout this function we are 'PREDICT' optimising
387 * for the case of throughput traffic that is not replicated
388 * to the host stack nor sets local flags
389 */
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800390
391 /*
392 * If the mfib entry has a configured RPF-ID check that
393 * in preference to an interface based RPF
394 */
395 if (MFIB_RPF_ID_NONE != mfe0->mfe_rpf_id)
Neale Ranns32e1c012016-11-22 17:07:28 +0000396 {
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800397 iflags0 = (mfe0->mfe_rpf_id == vnet_buffer(b0)->ip.rpf_id ?
398 MFIB_ITF_FLAG_ACCEPT :
399 MFIB_ITF_FLAG_NONE);
Neale Ranns32e1c012016-11-22 17:07:28 +0000400 }
401 else
402 {
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800403 if (PREDICT_TRUE(NULL != mfi0))
404 {
405 iflags0 = mfi0->mfi_flags;
406 }
407 else
408 {
409 iflags0 = MFIB_ITF_FLAG_NONE;
410 }
Neale Ranns32e1c012016-11-22 17:07:28 +0000411 }
412 eflags0 = mfe0->mfe_flags;
413
414 if (PREDICT_FALSE(eflags0 & MFIB_ENTRY_FLAG_CONNECTED))
415 {
416 /*
417 * lookup the source in the unicast FIB - check it
418 * matches a connected.
419 */
420 if (mfib_forward_connected_check(
421 b0,
422 vnet_buffer(b0)->sw_if_index[VLIB_RX],
423 is_v4))
424 {
425 mfib_forward_itf_signal(vm, mfe0, mfi0, b0);
426 }
427 }
428 if (PREDICT_FALSE((eflags0 & MFIB_ENTRY_FLAG_SIGNAL) ^
429 (iflags0 & MFIB_ITF_FLAG_NEGATE_SIGNAL)))
430 {
431 /*
432 * Entry signal XOR interface negate-signal
433 */
434 if (NULL != mfi0)
435 {
436 mfib_forward_itf_signal(vm, mfe0, mfi0, b0);
437 }
438 }
439
440 if (PREDICT_TRUE((iflags0 & MFIB_ITF_FLAG_ACCEPT) ||
441 (eflags0 & MFIB_ENTRY_FLAG_ACCEPT_ALL_ITF)))
442 {
443 /*
444 * This interface is accepting packets for the matching entry
445 */
446 next0 = mfe0->mfe_rep.dpoi_next_node;
447
448 vnet_buffer(b0)->ip.adj_index[VLIB_TX] =
449 mfe0->mfe_rep.dpoi_index;
450 }
451 else
452 {
453 next0 = MFIB_FORWARD_RPF_NEXT_DROP;
Neale Rannsce111d22018-01-23 08:38:50 -0800454 error0 = IP4_ERROR_RPF_FAILURE;
Neale Ranns32e1c012016-11-22 17:07:28 +0000455 }
456
Neale Rannsce111d22018-01-23 08:38:50 -0800457 b0->error = error0 ? error_node->errors[error0] : 0;
458
Neale Ranns32e1c012016-11-22 17:07:28 +0000459 if (b0->flags & VLIB_BUFFER_IS_TRACED)
460 {
461 mfib_forward_rpf_trace_t *t0;
462
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800463 t0 = vlib_add_trace (vm, node, b0, sizeof (*t0));
Neale Ranns32e1c012016-11-22 17:07:28 +0000464 t0->entry_index = mfei0;
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800465 t0->itf_flags = iflags0;
Neale Ranns32e1c012016-11-22 17:07:28 +0000466 if (NULL == mfi0)
467 {
468 t0->sw_if_index = ~0;
Neale Ranns32e1c012016-11-22 17:07:28 +0000469 }
470 else
471 {
472 t0->sw_if_index = mfi0->mfi_sw_if_index;
Neale Ranns32e1c012016-11-22 17:07:28 +0000473 }
474 }
475 vlib_validate_buffer_enqueue_x1 (vm, node, next,
476 to_next, n_left_to_next,
477 pi0, next0);
478 }
479
480 vlib_put_next_frame(vm, node, next, n_left_to_next);
481 }
482
483 return frame->n_vectors;
484}
485
486static uword
487ip4_mfib_forward_rpf (vlib_main_t * vm,
488 vlib_node_runtime_t * node,
489 vlib_frame_t * frame)
490{
491 return (mfib_forward_rpf(vm, node, frame, 1));
492}
493
494
495VLIB_REGISTER_NODE (ip4_mfib_forward_rpf_node, static) = {
496 .function = ip4_mfib_forward_rpf,
497 .name = "ip4-mfib-forward-rpf",
498 .vector_size = sizeof (u32),
499
500 .format_trace = format_mfib_forward_rpf_trace,
501
502 .n_next_nodes = MFIB_FORWARD_RPF_N_NEXT,
503 .next_nodes = {
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800504 [MFIB_FORWARD_RPF_NEXT_DROP] = "ip4-drop",
Neale Ranns32e1c012016-11-22 17:07:28 +0000505 },
506};
507
508VLIB_NODE_FUNCTION_MULTIARCH (ip4_mfib_forward_rpf_node,
509 ip4_mfib_forward_rpf)
510
511static uword
512ip6_mfib_forward_rpf (vlib_main_t * vm,
513 vlib_node_runtime_t * node,
514 vlib_frame_t * frame)
515{
516 return (mfib_forward_rpf(vm, node, frame, 1));
517}
518
519
520VLIB_REGISTER_NODE (ip6_mfib_forward_rpf_node, static) = {
521 .function = ip6_mfib_forward_rpf,
522 .name = "ip6-mfib-forward-rpf",
523 .vector_size = sizeof (u32),
524
525 .format_trace = format_mfib_forward_rpf_trace,
526
527 .n_next_nodes = MFIB_FORWARD_RPF_N_NEXT,
528 .next_nodes = {
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800529 [MFIB_FORWARD_RPF_NEXT_DROP] = "ip6-drop",
Neale Ranns32e1c012016-11-22 17:07:28 +0000530 },
531};
532
533VLIB_NODE_FUNCTION_MULTIARCH (ip6_mfib_forward_rpf_node,
534 ip6_mfib_forward_rpf)
535