blob: 889317fd83c9412aae8400bbdaef2e124d4703db [file] [log] [blame]
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001/*
2 * Copyright (c) 2016 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16#include <vlib/vlib.h>
17#include <vnet/vnet.h>
18#include <vnet/ip/format.h>
19#include <vnet/ip/ip.h>
20#include <vnet/dpo/drop_dpo.h>
21#include <vnet/dpo/receive_dpo.h>
22#include <vnet/dpo/load_balance_map.h>
23#include <vnet/dpo/lookup_dpo.h>
Neale Ranns0f26c5a2017-03-01 15:12:11 -080024#include <vnet/dpo/interface_dpo.h>
25#include <vnet/dpo/mpls_disposition.h>
Neale Ranns0bfe5d82016-08-25 15:29:12 +010026
27#include <vnet/adj/adj.h>
Neale Ranns32e1c012016-11-22 17:07:28 +000028#include <vnet/adj/adj_mcast.h>
Neale Ranns0bfe5d82016-08-25 15:29:12 +010029
Neale Ranns3ee44042016-10-03 13:05:48 +010030#include <vnet/fib/fib_path.h>
31#include <vnet/fib/fib_node.h>
32#include <vnet/fib/fib_table.h>
33#include <vnet/fib/fib_entry.h>
34#include <vnet/fib/fib_path_list.h>
35#include <vnet/fib/fib_internal.h>
36#include <vnet/fib/fib_urpf_list.h>
Neale Rannsa3af3372017-03-28 03:49:52 -070037#include <vnet/fib/mpls_fib.h>
Neale Ranns0bfe5d82016-08-25 15:29:12 +010038
39/**
40 * Enurmeration of path types
41 */
42typedef enum fib_path_type_t_ {
43 /**
44 * Marker. Add new types after this one.
45 */
46 FIB_PATH_TYPE_FIRST = 0,
47 /**
48 * Attached-nexthop. An interface and a nexthop are known.
49 */
50 FIB_PATH_TYPE_ATTACHED_NEXT_HOP = FIB_PATH_TYPE_FIRST,
51 /**
52 * attached. Only the interface is known.
53 */
54 FIB_PATH_TYPE_ATTACHED,
55 /**
56 * recursive. Only the next-hop is known.
57 */
58 FIB_PATH_TYPE_RECURSIVE,
59 /**
60 * special. nothing is known. so we drop.
61 */
62 FIB_PATH_TYPE_SPECIAL,
63 /**
64 * exclusive. user provided adj.
65 */
66 FIB_PATH_TYPE_EXCLUSIVE,
67 /**
68 * deag. Link to a lookup adj in the next table
69 */
70 FIB_PATH_TYPE_DEAG,
71 /**
Neale Ranns0f26c5a2017-03-01 15:12:11 -080072 * interface receive.
73 */
74 FIB_PATH_TYPE_INTF_RX,
75 /**
Neale Ranns0bfe5d82016-08-25 15:29:12 +010076 * receive. it's for-us.
77 */
78 FIB_PATH_TYPE_RECEIVE,
79 /**
80 * Marker. Add new types before this one, then update it.
81 */
82 FIB_PATH_TYPE_LAST = FIB_PATH_TYPE_RECEIVE,
83} __attribute__ ((packed)) fib_path_type_t;
84
85/**
86 * The maximum number of path_types
87 */
88#define FIB_PATH_TYPE_MAX (FIB_PATH_TYPE_LAST + 1)
89
90#define FIB_PATH_TYPES { \
91 [FIB_PATH_TYPE_ATTACHED_NEXT_HOP] = "attached-nexthop", \
92 [FIB_PATH_TYPE_ATTACHED] = "attached", \
93 [FIB_PATH_TYPE_RECURSIVE] = "recursive", \
94 [FIB_PATH_TYPE_SPECIAL] = "special", \
95 [FIB_PATH_TYPE_EXCLUSIVE] = "exclusive", \
96 [FIB_PATH_TYPE_DEAG] = "deag", \
Neale Ranns0f26c5a2017-03-01 15:12:11 -080097 [FIB_PATH_TYPE_INTF_RX] = "intf-rx", \
Neale Ranns0bfe5d82016-08-25 15:29:12 +010098 [FIB_PATH_TYPE_RECEIVE] = "receive", \
99}
100
101#define FOR_EACH_FIB_PATH_TYPE(_item) \
102 for (_item = FIB_PATH_TYPE_FIRST; _item <= FIB_PATH_TYPE_LAST; _item++)
103
104/**
105 * Enurmeration of path operational (i.e. derived) attributes
106 */
107typedef enum fib_path_oper_attribute_t_ {
108 /**
109 * Marker. Add new types after this one.
110 */
111 FIB_PATH_OPER_ATTRIBUTE_FIRST = 0,
112 /**
113 * The path forms part of a recursive loop.
114 */
115 FIB_PATH_OPER_ATTRIBUTE_RECURSIVE_LOOP = FIB_PATH_OPER_ATTRIBUTE_FIRST,
116 /**
117 * The path is resolved
118 */
119 FIB_PATH_OPER_ATTRIBUTE_RESOLVED,
120 /**
Neale Ranns4b919a52017-03-11 05:55:21 -0800121 * The path is attached, despite what the next-hop may say.
122 */
123 FIB_PATH_OPER_ATTRIBUTE_ATTACHED,
124 /**
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100125 * The path has become a permanent drop.
126 */
127 FIB_PATH_OPER_ATTRIBUTE_DROP,
128 /**
129 * Marker. Add new types before this one, then update it.
130 */
131 FIB_PATH_OPER_ATTRIBUTE_LAST = FIB_PATH_OPER_ATTRIBUTE_DROP,
132} __attribute__ ((packed)) fib_path_oper_attribute_t;
133
134/**
135 * The maximum number of path operational attributes
136 */
137#define FIB_PATH_OPER_ATTRIBUTE_MAX (FIB_PATH_OPER_ATTRIBUTE_LAST + 1)
138
139#define FIB_PATH_OPER_ATTRIBUTES { \
140 [FIB_PATH_OPER_ATTRIBUTE_RECURSIVE_LOOP] = "recursive-loop", \
141 [FIB_PATH_OPER_ATTRIBUTE_RESOLVED] = "resolved", \
142 [FIB_PATH_OPER_ATTRIBUTE_DROP] = "drop", \
143}
144
145#define FOR_EACH_FIB_PATH_OPER_ATTRIBUTE(_item) \
146 for (_item = FIB_PATH_OPER_ATTRIBUTE_FIRST; \
147 _item <= FIB_PATH_OPER_ATTRIBUTE_LAST; \
148 _item++)
149
150/**
151 * Path flags from the attributes
152 */
153typedef enum fib_path_oper_flags_t_ {
154 FIB_PATH_OPER_FLAG_NONE = 0,
155 FIB_PATH_OPER_FLAG_RECURSIVE_LOOP = (1 << FIB_PATH_OPER_ATTRIBUTE_RECURSIVE_LOOP),
156 FIB_PATH_OPER_FLAG_DROP = (1 << FIB_PATH_OPER_ATTRIBUTE_DROP),
157 FIB_PATH_OPER_FLAG_RESOLVED = (1 << FIB_PATH_OPER_ATTRIBUTE_RESOLVED),
Neale Ranns4b919a52017-03-11 05:55:21 -0800158 FIB_PATH_OPER_FLAG_ATTACHED = (1 << FIB_PATH_OPER_ATTRIBUTE_ATTACHED),
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100159} __attribute__ ((packed)) fib_path_oper_flags_t;
160
161/**
162 * A FIB path
163 */
164typedef struct fib_path_t_ {
165 /**
166 * A path is a node in the FIB graph.
167 */
168 fib_node_t fp_node;
169
170 /**
171 * The index of the path-list to which this path belongs
172 */
173 u32 fp_pl_index;
174
175 /**
176 * This marks the start of the memory area used to hash
177 * the path
178 */
179 STRUCT_MARK(path_hash_start);
180
181 /**
182 * Configuration Flags
183 */
184 fib_path_cfg_flags_t fp_cfg_flags;
185
186 /**
187 * The type of the path. This is the selector for the union
188 */
189 fib_path_type_t fp_type;
190
191 /**
192 * The protocol of the next-hop, i.e. the address family of the
193 * next-hop's address. We can't derive this from the address itself
194 * since the address can be all zeros
195 */
196 fib_protocol_t fp_nh_proto;
197
198 /**
199 * UCMP [unnormalised] weigt
200 */
201 u32 fp_weight;
202
203 /**
204 * per-type union of the data required to resolve the path
205 */
206 union {
207 struct {
208 /**
209 * The next-hop
210 */
211 ip46_address_t fp_nh;
212 /**
213 * The interface
214 */
215 u32 fp_interface;
216 } attached_next_hop;
217 struct {
218 /**
219 * The interface
220 */
221 u32 fp_interface;
222 } attached;
223 struct {
Neale Rannsad422ed2016-11-02 14:20:04 +0000224 union
225 {
226 /**
227 * The next-hop
228 */
229 ip46_address_t fp_ip;
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800230 struct {
231 /**
232 * The local label to resolve through.
233 */
234 mpls_label_t fp_local_label;
235 /**
236 * The EOS bit of the resolving label
237 */
238 mpls_eos_bit_t fp_eos;
239 };
Neale Rannsad422ed2016-11-02 14:20:04 +0000240 } fp_nh;
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100241 /**
242 * The FIB table index in which to find the next-hop.
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100243 */
244 fib_node_index_t fp_tbl_id;
245 } recursive;
246 struct {
247 /**
Neale Rannsad422ed2016-11-02 14:20:04 +0000248 * The FIB index in which to perfom the next lookup
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100249 */
250 fib_node_index_t fp_tbl_id;
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800251 /**
252 * The RPF-ID to tag the packets with
253 */
254 fib_rpf_id_t fp_rpf_id;
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100255 } deag;
256 struct {
257 } special;
258 struct {
259 /**
260 * The user provided 'exclusive' DPO
261 */
262 dpo_id_t fp_ex_dpo;
263 } exclusive;
264 struct {
265 /**
266 * The interface on which the local address is configured
267 */
268 u32 fp_interface;
269 /**
270 * The next-hop
271 */
272 ip46_address_t fp_addr;
273 } receive;
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800274 struct {
275 /**
276 * The interface on which the packets will be input.
277 */
278 u32 fp_interface;
279 } intf_rx;
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100280 };
281 STRUCT_MARK(path_hash_end);
282
283 /**
284 * Memebers in this last section represent information that is
285 * dervied during resolution. It should not be copied to new paths
286 * nor compared.
287 */
288
289 /**
290 * Operational Flags
291 */
292 fib_path_oper_flags_t fp_oper_flags;
293
294 /**
295 * the resolving via fib. not part of the union, since it it not part
296 * of the path's hash.
297 */
298 fib_node_index_t fp_via_fib;
299
300 /**
301 * The Data-path objects through which this path resolves for IP.
302 */
303 dpo_id_t fp_dpo;
304
305 /**
306 * the index of this path in the parent's child list.
307 */
308 u32 fp_sibling;
309} fib_path_t;
310
311/*
312 * Array of strings/names for the path types and attributes
313 */
314static const char *fib_path_type_names[] = FIB_PATH_TYPES;
315static const char *fib_path_oper_attribute_names[] = FIB_PATH_OPER_ATTRIBUTES;
316static const char *fib_path_cfg_attribute_names[] = FIB_PATH_CFG_ATTRIBUTES;
317
318/*
319 * The memory pool from which we allocate all the paths
320 */
321static fib_path_t *fib_path_pool;
322
323/*
324 * Debug macro
325 */
326#ifdef FIB_DEBUG
327#define FIB_PATH_DBG(_p, _fmt, _args...) \
328{ \
329 u8 *_tmp = NULL; \
330 _tmp = fib_path_format(fib_path_get_index(_p), _tmp); \
331 clib_warning("path:[%d:%s]:" _fmt, \
332 fib_path_get_index(_p), _tmp, \
333 ##_args); \
334 vec_free(_tmp); \
335}
336#else
337#define FIB_PATH_DBG(_p, _fmt, _args...)
338#endif
339
340static fib_path_t *
341fib_path_get (fib_node_index_t index)
342{
343 return (pool_elt_at_index(fib_path_pool, index));
344}
345
346static fib_node_index_t
347fib_path_get_index (fib_path_t *path)
348{
349 return (path - fib_path_pool);
350}
351
352static fib_node_t *
353fib_path_get_node (fib_node_index_t index)
354{
355 return ((fib_node_t*)fib_path_get(index));
356}
357
358static fib_path_t*
359fib_path_from_fib_node (fib_node_t *node)
360{
361#if CLIB_DEBUG > 0
362 ASSERT(FIB_NODE_TYPE_PATH == node->fn_type);
363#endif
364 return ((fib_path_t*)node);
365}
366
367u8 *
368format_fib_path (u8 * s, va_list * args)
369{
370 fib_path_t *path = va_arg (*args, fib_path_t *);
371 vnet_main_t * vnm = vnet_get_main();
372 fib_path_oper_attribute_t oattr;
373 fib_path_cfg_attribute_t cattr;
374
375 s = format (s, " index:%d ", fib_path_get_index(path));
376 s = format (s, "pl-index:%d ", path->fp_pl_index);
377 s = format (s, "%U ", format_fib_protocol, path->fp_nh_proto);
378 s = format (s, "weight=%d ", path->fp_weight);
379 s = format (s, "%s: ", fib_path_type_names[path->fp_type]);
380 if (FIB_PATH_OPER_FLAG_NONE != path->fp_oper_flags) {
381 s = format(s, " oper-flags:");
382 FOR_EACH_FIB_PATH_OPER_ATTRIBUTE(oattr) {
383 if ((1<<oattr) & path->fp_oper_flags) {
384 s = format (s, "%s,", fib_path_oper_attribute_names[oattr]);
385 }
386 }
387 }
388 if (FIB_PATH_CFG_FLAG_NONE != path->fp_cfg_flags) {
389 s = format(s, " cfg-flags:");
390 FOR_EACH_FIB_PATH_CFG_ATTRIBUTE(cattr) {
391 if ((1<<cattr) & path->fp_cfg_flags) {
392 s = format (s, "%s,", fib_path_cfg_attribute_names[cattr]);
393 }
394 }
395 }
396 s = format(s, "\n ");
397
398 switch (path->fp_type)
399 {
400 case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
401 s = format (s, "%U", format_ip46_address,
402 &path->attached_next_hop.fp_nh,
403 IP46_TYPE_ANY);
404 if (path->fp_oper_flags & FIB_PATH_OPER_FLAG_DROP)
405 {
406 s = format (s, " if_index:%d", path->attached_next_hop.fp_interface);
407 }
408 else
409 {
410 s = format (s, " %U",
411 format_vnet_sw_interface_name,
412 vnm,
413 vnet_get_sw_interface(
414 vnm,
415 path->attached_next_hop.fp_interface));
416 if (vnet_sw_interface_is_p2p(vnet_get_main(),
417 path->attached_next_hop.fp_interface))
418 {
419 s = format (s, " (p2p)");
420 }
421 }
422 if (!dpo_id_is_valid(&path->fp_dpo))
423 {
424 s = format(s, "\n unresolved");
425 }
426 else
427 {
428 s = format(s, "\n %U",
429 format_dpo_id,
430 &path->fp_dpo, 13);
431 }
432 break;
433 case FIB_PATH_TYPE_ATTACHED:
434 if (path->fp_oper_flags & FIB_PATH_OPER_FLAG_DROP)
435 {
436 s = format (s, " if_index:%d", path->attached_next_hop.fp_interface);
437 }
438 else
439 {
440 s = format (s, " %U",
441 format_vnet_sw_interface_name,
442 vnm,
443 vnet_get_sw_interface(
444 vnm,
445 path->attached.fp_interface));
446 }
447 break;
448 case FIB_PATH_TYPE_RECURSIVE:
Neale Rannsad422ed2016-11-02 14:20:04 +0000449 if (FIB_PROTOCOL_MPLS == path->fp_nh_proto)
450 {
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800451 s = format (s, "via %U %U",
Neale Rannsad422ed2016-11-02 14:20:04 +0000452 format_mpls_unicast_label,
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800453 path->recursive.fp_nh.fp_local_label,
454 format_mpls_eos_bit,
455 path->recursive.fp_nh.fp_eos);
Neale Rannsad422ed2016-11-02 14:20:04 +0000456 }
457 else
458 {
459 s = format (s, "via %U",
460 format_ip46_address,
461 &path->recursive.fp_nh.fp_ip,
462 IP46_TYPE_ANY);
463 }
464 s = format (s, " in fib:%d",
465 path->recursive.fp_tbl_id,
466 path->fp_via_fib);
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100467 s = format (s, " via-fib:%d", path->fp_via_fib);
468 s = format (s, " via-dpo:[%U:%d]",
469 format_dpo_type, path->fp_dpo.dpoi_type,
470 path->fp_dpo.dpoi_index);
471
472 break;
473 case FIB_PATH_TYPE_RECEIVE:
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800474 case FIB_PATH_TYPE_INTF_RX:
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100475 case FIB_PATH_TYPE_SPECIAL:
476 case FIB_PATH_TYPE_DEAG:
477 case FIB_PATH_TYPE_EXCLUSIVE:
478 if (dpo_id_is_valid(&path->fp_dpo))
479 {
480 s = format(s, "%U", format_dpo_id,
481 &path->fp_dpo, 2);
482 }
483 break;
484 }
485 return (s);
486}
487
488u8 *
489fib_path_format (fib_node_index_t pi, u8 *s)
490{
491 fib_path_t *path;
492
493 path = fib_path_get(pi);
494 ASSERT(NULL != path);
495
496 return (format (s, "%U", format_fib_path, path));
497}
498
499u8 *
500fib_path_adj_format (fib_node_index_t pi,
501 u32 indent,
502 u8 *s)
503{
504 fib_path_t *path;
505
506 path = fib_path_get(pi);
507 ASSERT(NULL != path);
508
509 if (!dpo_id_is_valid(&path->fp_dpo))
510 {
511 s = format(s, " unresolved");
512 }
513 else
514 {
515 s = format(s, "%U", format_dpo_id,
516 &path->fp_dpo, 2);
517 }
518
519 return (s);
520}
521
522/*
523 * fib_path_last_lock_gone
524 *
525 * We don't share paths, we share path lists, so the [un]lock functions
526 * are no-ops
527 */
528static void
529fib_path_last_lock_gone (fib_node_t *node)
530{
531 ASSERT(0);
532}
533
534static const adj_index_t
535fib_path_attached_next_hop_get_adj (fib_path_t *path,
Neale Ranns924d03a2016-10-19 08:25:46 +0100536 vnet_link_t link)
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100537{
538 if (vnet_sw_interface_is_p2p(vnet_get_main(),
539 path->attached_next_hop.fp_interface))
540 {
541 /*
542 * if the interface is p2p then the adj for the specific
543 * neighbour on that link will never exist. on p2p links
544 * the subnet address (the attached route) links to the
545 * auto-adj (see below), we want that adj here too.
546 */
547 return (adj_nbr_add_or_lock(path->fp_nh_proto,
548 link,
549 &zero_addr,
550 path->attached_next_hop.fp_interface));
551 }
552 else
553 {
554 return (adj_nbr_add_or_lock(path->fp_nh_proto,
555 link,
556 &path->attached_next_hop.fp_nh,
557 path->attached_next_hop.fp_interface));
558 }
559}
560
561static void
562fib_path_attached_next_hop_set (fib_path_t *path)
563{
564 /*
565 * resolve directly via the adjacnecy discribed by the
566 * interface and next-hop
567 */
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100568 dpo_set(&path->fp_dpo,
569 DPO_ADJACENCY,
570 fib_proto_to_dpo(path->fp_nh_proto),
571 fib_path_attached_next_hop_get_adj(
572 path,
573 fib_proto_to_link(path->fp_nh_proto)));
574
575 /*
576 * become a child of the adjacency so we receive updates
577 * when its rewrite changes
578 */
579 path->fp_sibling = adj_child_add(path->fp_dpo.dpoi_index,
580 FIB_NODE_TYPE_PATH,
581 fib_path_get_index(path));
Neale Ranns88fc83e2017-04-05 08:11:14 -0700582
583 if (!vnet_sw_interface_is_admin_up(vnet_get_main(),
584 path->attached_next_hop.fp_interface) ||
585 !adj_is_up(path->fp_dpo.dpoi_index))
586 {
587 path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
588 }
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100589}
590
591/*
592 * create of update the paths recursive adj
593 */
594static void
595fib_path_recursive_adj_update (fib_path_t *path,
596 fib_forward_chain_type_t fct,
597 dpo_id_t *dpo)
598{
Neale Ranns948e00f2016-10-20 13:39:34 +0100599 dpo_id_t via_dpo = DPO_INVALID;
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100600
601 /*
602 * get the DPO to resolve through from the via-entry
603 */
604 fib_entry_contribute_forwarding(path->fp_via_fib,
605 fct,
606 &via_dpo);
607
608
609 /*
610 * hope for the best - clear if restrictions apply.
611 */
612 path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RESOLVED;
613
614 /*
615 * Validate any recursion constraints and over-ride the via
616 * adj if not met
617 */
618 if (path->fp_oper_flags & FIB_PATH_OPER_FLAG_RECURSIVE_LOOP)
619 {
620 path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
621 dpo_copy(&via_dpo, drop_dpo_get(fib_proto_to_dpo(path->fp_nh_proto)));
622 }
623 else if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RESOLVE_HOST)
624 {
625 /*
626 * the via FIB must be a host route.
627 * note the via FIB just added will always be a host route
628 * since it is an RR source added host route. So what we need to
629 * check is whether the route has other sources. If it does then
630 * some other source has added it as a host route. If it doesn't
631 * then it was added only here and inherits forwarding from a cover.
632 * the cover is not a host route.
633 * The RR source is the lowest priority source, so we check if it
634 * is the best. if it is there are no other sources.
635 */
636 if (fib_entry_get_best_source(path->fp_via_fib) >= FIB_SOURCE_RR)
637 {
638 path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
639 dpo_copy(&via_dpo, drop_dpo_get(fib_proto_to_dpo(path->fp_nh_proto)));
640
641 /*
642 * PIC edge trigger. let the load-balance maps know
643 */
644 load_balance_map_path_state_change(fib_path_get_index(path));
645 }
646 }
647 else if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RESOLVE_ATTACHED)
648 {
649 /*
650 * RR source entries inherit the flags from the cover, so
651 * we can check the via directly
652 */
653 if (!(FIB_ENTRY_FLAG_ATTACHED & fib_entry_get_flags(path->fp_via_fib)))
654 {
655 path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
656 dpo_copy(&via_dpo, drop_dpo_get(fib_proto_to_dpo(path->fp_nh_proto)));
657
658 /*
659 * PIC edge trigger. let the load-balance maps know
660 */
661 load_balance_map_path_state_change(fib_path_get_index(path));
662 }
663 }
Neale Ranns88fc83e2017-04-05 08:11:14 -0700664 /*
665 * check for over-riding factors on the FIB entry itself
666 */
667 if (!fib_entry_is_resolved(path->fp_via_fib))
668 {
669 path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
670 dpo_copy(&via_dpo, drop_dpo_get(fib_proto_to_dpo(path->fp_nh_proto)));
671
672 /*
673 * PIC edge trigger. let the load-balance maps know
674 */
675 load_balance_map_path_state_change(fib_path_get_index(path));
676 }
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100677
678 /*
679 * update the path's contributed DPO
680 */
681 dpo_copy(dpo, &via_dpo);
682
683 FIB_PATH_DBG(path, "recursive update: %U",
684 fib_get_lookup_main(path->fp_nh_proto),
685 &path->fp_dpo, 2);
686
687 dpo_reset(&via_dpo);
688}
689
690/*
691 * fib_path_is_permanent_drop
692 *
693 * Return !0 if the path is configured to permanently drop,
694 * despite other attributes.
695 */
696static int
697fib_path_is_permanent_drop (fib_path_t *path)
698{
699 return ((path->fp_cfg_flags & FIB_PATH_CFG_FLAG_DROP) ||
700 (path->fp_oper_flags & FIB_PATH_OPER_FLAG_DROP));
701}
702
703/*
704 * fib_path_unresolve
705 *
706 * Remove our dependency on the resolution target
707 */
708static void
709fib_path_unresolve (fib_path_t *path)
710{
711 /*
712 * the forced drop path does not need unresolving
713 */
714 if (fib_path_is_permanent_drop(path))
715 {
716 return;
717 }
718
719 switch (path->fp_type)
720 {
721 case FIB_PATH_TYPE_RECURSIVE:
722 if (FIB_NODE_INDEX_INVALID != path->fp_via_fib)
723 {
724 fib_prefix_t pfx;
725
Neale Rannsad422ed2016-11-02 14:20:04 +0000726 fib_entry_get_prefix(path->fp_via_fib, &pfx);
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100727 fib_entry_child_remove(path->fp_via_fib,
728 path->fp_sibling);
729 fib_table_entry_special_remove(path->recursive.fp_tbl_id,
730 &pfx,
731 FIB_SOURCE_RR);
732 path->fp_via_fib = FIB_NODE_INDEX_INVALID;
733 }
734 break;
735 case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
736 case FIB_PATH_TYPE_ATTACHED:
737 adj_child_remove(path->fp_dpo.dpoi_index,
738 path->fp_sibling);
739 adj_unlock(path->fp_dpo.dpoi_index);
740 break;
741 case FIB_PATH_TYPE_EXCLUSIVE:
742 dpo_reset(&path->exclusive.fp_ex_dpo);
743 break;
744 case FIB_PATH_TYPE_SPECIAL:
745 case FIB_PATH_TYPE_RECEIVE:
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800746 case FIB_PATH_TYPE_INTF_RX:
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100747 case FIB_PATH_TYPE_DEAG:
748 /*
749 * these hold only the path's DPO, which is reset below.
750 */
751 break;
752 }
753
754 /*
755 * release the adj we were holding and pick up the
756 * drop just in case.
757 */
758 dpo_reset(&path->fp_dpo);
759 path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
760
761 return;
762}
763
764static fib_forward_chain_type_t
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800765fib_path_to_chain_type (const fib_path_t *path)
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100766{
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800767 switch (path->fp_nh_proto)
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100768 {
769 case FIB_PROTOCOL_IP4:
770 return (FIB_FORW_CHAIN_TYPE_UNICAST_IP4);
771 case FIB_PROTOCOL_IP6:
772 return (FIB_FORW_CHAIN_TYPE_UNICAST_IP6);
773 case FIB_PROTOCOL_MPLS:
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800774 if (FIB_PATH_TYPE_RECURSIVE == path->fp_type &&
775 MPLS_EOS == path->recursive.fp_nh.fp_eos)
776 {
777 return (FIB_FORW_CHAIN_TYPE_MPLS_EOS);
778 }
779 else
780 {
Neale Ranns9f171f52017-04-11 08:56:53 -0700781 return (FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS);
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800782 }
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100783 }
784 return (FIB_FORW_CHAIN_TYPE_UNICAST_IP4);
785}
786
787/*
788 * fib_path_back_walk_notify
789 *
790 * A back walk has reach this path.
791 */
792static fib_node_back_walk_rc_t
793fib_path_back_walk_notify (fib_node_t *node,
794 fib_node_back_walk_ctx_t *ctx)
795{
796 fib_path_t *path;
797
798 path = fib_path_from_fib_node(node);
799
800 switch (path->fp_type)
801 {
802 case FIB_PATH_TYPE_RECURSIVE:
803 if (FIB_NODE_BW_REASON_FLAG_EVALUATE & ctx->fnbw_reason)
804 {
805 /*
806 * modify the recursive adjacency to use the new forwarding
807 * of the via-fib.
808 * this update is visible to packets in flight in the DP.
809 */
810 fib_path_recursive_adj_update(
811 path,
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800812 fib_path_to_chain_type(path),
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100813 &path->fp_dpo);
814 }
Neale Rannsad95b5d2016-11-10 20:35:14 +0000815 if ((FIB_NODE_BW_REASON_FLAG_ADJ_UPDATE & ctx->fnbw_reason) ||
816 (FIB_NODE_BW_REASON_FLAG_ADJ_DOWN & ctx->fnbw_reason))
Neale Rannsb80c5362016-10-08 13:03:40 +0100817 {
818 /*
819 * ADJ updates (complete<->incomplete) do not need to propagate to
820 * recursive entries.
821 * The only reason its needed as far back as here, is that the adj
822 * and the incomplete adj are a different DPO type, so the LBs need
823 * to re-stack.
824 * If this walk was quashed in the fib_entry, then any non-fib_path
825 * children (like tunnels that collapse out the LB when they stack)
826 * would not see the update.
827 */
828 return (FIB_NODE_BACK_WALK_CONTINUE);
829 }
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100830 break;
831 case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
832 /*
833FIXME comment
834 * ADJ_UPDATE backwalk pass silently through here and up to
835 * the path-list when the multipath adj collapse occurs.
836 * The reason we do this is that the assumtption is that VPP
837 * runs in an environment where the Control-Plane is remote
838 * and hence reacts slowly to link up down. In order to remove
839 * this down link from the ECMP set quickly, we back-walk.
840 * VPP also has dedicated CPUs, so we are not stealing resources
841 * from the CP to do so.
842 */
843 if (FIB_NODE_BW_REASON_FLAG_INTERFACE_UP & ctx->fnbw_reason)
844 {
Neale Ranns8b37b872016-11-21 12:25:22 +0000845 if (path->fp_oper_flags & FIB_PATH_OPER_FLAG_RESOLVED)
846 {
847 /*
848 * alreday resolved. no need to walk back again
849 */
850 return (FIB_NODE_BACK_WALK_CONTINUE);
851 }
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100852 path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RESOLVED;
853 }
854 if (FIB_NODE_BW_REASON_FLAG_INTERFACE_DOWN & ctx->fnbw_reason)
855 {
Neale Ranns8b37b872016-11-21 12:25:22 +0000856 if (!(path->fp_oper_flags & FIB_PATH_OPER_FLAG_RESOLVED))
857 {
858 /*
859 * alreday unresolved. no need to walk back again
860 */
861 return (FIB_NODE_BACK_WALK_CONTINUE);
862 }
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100863 path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
864 }
865 if (FIB_NODE_BW_REASON_FLAG_INTERFACE_DELETE & ctx->fnbw_reason)
866 {
867 /*
868 * The interface this path resolves through has been deleted.
869 * This will leave the path in a permanent drop state. The route
870 * needs to be removed and readded (and hence the path-list deleted)
871 * before it can forward again.
872 */
873 fib_path_unresolve(path);
874 path->fp_oper_flags |= FIB_PATH_OPER_FLAG_DROP;
875 }
876 if (FIB_NODE_BW_REASON_FLAG_ADJ_UPDATE & ctx->fnbw_reason)
877 {
878 /*
879 * restack the DPO to pick up the correct DPO sub-type
880 */
Neale Ranns8b37b872016-11-21 12:25:22 +0000881 uword if_is_up;
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100882 adj_index_t ai;
883
Neale Ranns8b37b872016-11-21 12:25:22 +0000884 if_is_up = vnet_sw_interface_is_admin_up(
885 vnet_get_main(),
886 path->attached_next_hop.fp_interface);
887
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100888 ai = fib_path_attached_next_hop_get_adj(
889 path,
890 fib_proto_to_link(path->fp_nh_proto));
891
Neale Ranns88fc83e2017-04-05 08:11:14 -0700892 path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
893 if (if_is_up && adj_is_up(ai))
894 {
895 path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RESOLVED;
896 }
897
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100898 dpo_set(&path->fp_dpo, DPO_ADJACENCY,
899 fib_proto_to_dpo(path->fp_nh_proto),
900 ai);
901 adj_unlock(ai);
Neale Ranns8b37b872016-11-21 12:25:22 +0000902
903 if (!if_is_up)
904 {
905 /*
906 * If the interface is not up there is no reason to walk
907 * back to children. if we did they would only evalute
908 * that this path is unresolved and hence it would
909 * not contribute the adjacency - so it would be wasted
910 * CPU time.
911 */
912 return (FIB_NODE_BACK_WALK_CONTINUE);
913 }
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100914 }
Neale Rannsad95b5d2016-11-10 20:35:14 +0000915 if (FIB_NODE_BW_REASON_FLAG_ADJ_DOWN & ctx->fnbw_reason)
916 {
Neale Ranns8b37b872016-11-21 12:25:22 +0000917 if (!(path->fp_oper_flags & FIB_PATH_OPER_FLAG_RESOLVED))
918 {
919 /*
920 * alreday unresolved. no need to walk back again
921 */
922 return (FIB_NODE_BACK_WALK_CONTINUE);
923 }
Neale Rannsad95b5d2016-11-10 20:35:14 +0000924 /*
925 * the adj has gone down. the path is no longer resolved.
926 */
927 path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
928 }
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100929 break;
930 case FIB_PATH_TYPE_ATTACHED:
931 /*
932 * FIXME; this could schedule a lower priority walk, since attached
933 * routes are not usually in ECMP configurations so the backwalk to
934 * the FIB entry does not need to be high priority
935 */
936 if (FIB_NODE_BW_REASON_FLAG_INTERFACE_UP & ctx->fnbw_reason)
937 {
938 path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RESOLVED;
939 }
940 if (FIB_NODE_BW_REASON_FLAG_INTERFACE_DOWN & ctx->fnbw_reason)
941 {
942 path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
943 }
944 if (FIB_NODE_BW_REASON_FLAG_INTERFACE_DELETE & ctx->fnbw_reason)
945 {
946 fib_path_unresolve(path);
947 path->fp_oper_flags |= FIB_PATH_OPER_FLAG_DROP;
948 }
949 break;
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800950 case FIB_PATH_TYPE_INTF_RX:
951 ASSERT(0);
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100952 case FIB_PATH_TYPE_DEAG:
953 /*
954 * FIXME When VRF delete is allowed this will need a poke.
955 */
956 case FIB_PATH_TYPE_SPECIAL:
957 case FIB_PATH_TYPE_RECEIVE:
958 case FIB_PATH_TYPE_EXCLUSIVE:
959 /*
960 * these path types have no parents. so to be
961 * walked from one is unexpected.
962 */
963 ASSERT(0);
964 break;
965 }
966
967 /*
968 * propagate the backwalk further to the path-list
969 */
970 fib_path_list_back_walk(path->fp_pl_index, ctx);
971
972 return (FIB_NODE_BACK_WALK_CONTINUE);
973}
974
Neale Ranns6c3ebcc2016-10-02 21:20:15 +0100975static void
976fib_path_memory_show (void)
977{
978 fib_show_memory_usage("Path",
979 pool_elts(fib_path_pool),
980 pool_len(fib_path_pool),
981 sizeof(fib_path_t));
982}
983
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100984/*
985 * The FIB path's graph node virtual function table
986 */
987static const fib_node_vft_t fib_path_vft = {
988 .fnv_get = fib_path_get_node,
989 .fnv_last_lock = fib_path_last_lock_gone,
990 .fnv_back_walk = fib_path_back_walk_notify,
Neale Ranns6c3ebcc2016-10-02 21:20:15 +0100991 .fnv_mem_show = fib_path_memory_show,
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100992};
993
994static fib_path_cfg_flags_t
995fib_path_route_flags_to_cfg_flags (const fib_route_path_t *rpath)
996{
Neale Ranns450cd302016-11-09 17:49:42 +0000997 fib_path_cfg_flags_t cfg_flags = FIB_PATH_CFG_FLAG_NONE;
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100998
999 if (rpath->frp_flags & FIB_ROUTE_PATH_RESOLVE_VIA_HOST)
1000 cfg_flags |= FIB_PATH_CFG_FLAG_RESOLVE_HOST;
1001 if (rpath->frp_flags & FIB_ROUTE_PATH_RESOLVE_VIA_ATTACHED)
1002 cfg_flags |= FIB_PATH_CFG_FLAG_RESOLVE_ATTACHED;
Neale Ranns32e1c012016-11-22 17:07:28 +00001003 if (rpath->frp_flags & FIB_ROUTE_PATH_LOCAL)
1004 cfg_flags |= FIB_PATH_CFG_FLAG_LOCAL;
Neale Ranns4b919a52017-03-11 05:55:21 -08001005 if (rpath->frp_flags & FIB_ROUTE_PATH_ATTACHED)
1006 cfg_flags |= FIB_PATH_CFG_FLAG_ATTACHED;
Neale Ranns0f26c5a2017-03-01 15:12:11 -08001007 if (rpath->frp_flags & FIB_ROUTE_PATH_INTF_RX)
1008 cfg_flags |= FIB_PATH_CFG_FLAG_INTF_RX;
1009 if (rpath->frp_flags & FIB_ROUTE_PATH_RPF_ID)
1010 cfg_flags |= FIB_PATH_CFG_FLAG_RPF_ID;
1011 if (rpath->frp_flags & FIB_ROUTE_PATH_EXCLUSIVE)
1012 cfg_flags |= FIB_PATH_CFG_FLAG_EXCLUSIVE;
1013 if (rpath->frp_flags & FIB_ROUTE_PATH_DROP)
1014 cfg_flags |= FIB_PATH_CFG_FLAG_DROP;
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001015
1016 return (cfg_flags);
1017}
1018
1019/*
1020 * fib_path_create
1021 *
1022 * Create and initialise a new path object.
1023 * return the index of the path.
1024 */
1025fib_node_index_t
1026fib_path_create (fib_node_index_t pl_index,
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001027 const fib_route_path_t *rpath)
1028{
1029 fib_path_t *path;
1030
1031 pool_get(fib_path_pool, path);
1032 memset(path, 0, sizeof(*path));
1033
1034 fib_node_init(&path->fp_node,
1035 FIB_NODE_TYPE_PATH);
1036
1037 dpo_reset(&path->fp_dpo);
1038 path->fp_pl_index = pl_index;
Neale Ranns0f26c5a2017-03-01 15:12:11 -08001039 path->fp_nh_proto = rpath->frp_proto;
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001040 path->fp_via_fib = FIB_NODE_INDEX_INVALID;
1041 path->fp_weight = rpath->frp_weight;
Neale Ranns0bd36ea2016-11-16 11:47:44 +00001042 if (0 == path->fp_weight)
1043 {
1044 /*
1045 * a weight of 0 is a meaningless value. We could either reject it, and thus force
1046 * clients to always use 1, or we can accept it and fixup approrpiately.
1047 */
1048 path->fp_weight = 1;
1049 }
Neale Ranns0f26c5a2017-03-01 15:12:11 -08001050 path->fp_cfg_flags = fib_path_route_flags_to_cfg_flags(rpath);
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001051
1052 /*
1053 * deduce the path's tpye from the parementers and save what is needed.
1054 */
Neale Ranns32e1c012016-11-22 17:07:28 +00001055 if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_LOCAL)
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001056 {
Neale Ranns32e1c012016-11-22 17:07:28 +00001057 path->fp_type = FIB_PATH_TYPE_RECEIVE;
1058 path->receive.fp_interface = rpath->frp_sw_if_index;
1059 path->receive.fp_addr = rpath->frp_addr;
1060 }
Neale Ranns0f26c5a2017-03-01 15:12:11 -08001061 else if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_INTF_RX)
1062 {
1063 path->fp_type = FIB_PATH_TYPE_INTF_RX;
1064 path->intf_rx.fp_interface = rpath->frp_sw_if_index;
1065 }
1066 else if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RPF_ID)
1067 {
1068 path->fp_type = FIB_PATH_TYPE_DEAG;
1069 path->deag.fp_tbl_id = rpath->frp_fib_index;
1070 path->deag.fp_rpf_id = rpath->frp_rpf_id;
1071 }
Neale Ranns32e1c012016-11-22 17:07:28 +00001072 else if (~0 != rpath->frp_sw_if_index)
1073 {
1074 if (ip46_address_is_zero(&rpath->frp_addr))
1075 {
1076 path->fp_type = FIB_PATH_TYPE_ATTACHED;
1077 path->attached.fp_interface = rpath->frp_sw_if_index;
1078 }
1079 else
1080 {
1081 path->fp_type = FIB_PATH_TYPE_ATTACHED_NEXT_HOP;
1082 path->attached_next_hop.fp_interface = rpath->frp_sw_if_index;
1083 path->attached_next_hop.fp_nh = rpath->frp_addr;
1084 }
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001085 }
1086 else
1087 {
1088 if (ip46_address_is_zero(&rpath->frp_addr))
1089 {
1090 if (~0 == rpath->frp_fib_index)
1091 {
1092 path->fp_type = FIB_PATH_TYPE_SPECIAL;
1093 }
1094 else
1095 {
1096 path->fp_type = FIB_PATH_TYPE_DEAG;
1097 path->deag.fp_tbl_id = rpath->frp_fib_index;
1098 }
1099 }
1100 else
1101 {
1102 path->fp_type = FIB_PATH_TYPE_RECURSIVE;
Neale Rannsad422ed2016-11-02 14:20:04 +00001103 if (FIB_PROTOCOL_MPLS == path->fp_nh_proto)
1104 {
1105 path->recursive.fp_nh.fp_local_label = rpath->frp_local_label;
Neale Ranns0f26c5a2017-03-01 15:12:11 -08001106 path->recursive.fp_nh.fp_eos = rpath->frp_eos;
Neale Rannsad422ed2016-11-02 14:20:04 +00001107 }
1108 else
1109 {
1110 path->recursive.fp_nh.fp_ip = rpath->frp_addr;
1111 }
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001112 path->recursive.fp_tbl_id = rpath->frp_fib_index;
1113 }
1114 }
1115
1116 FIB_PATH_DBG(path, "create");
1117
1118 return (fib_path_get_index(path));
1119}
1120
1121/*
1122 * fib_path_create_special
1123 *
1124 * Create and initialise a new path object.
1125 * return the index of the path.
1126 */
1127fib_node_index_t
1128fib_path_create_special (fib_node_index_t pl_index,
1129 fib_protocol_t nh_proto,
1130 fib_path_cfg_flags_t flags,
1131 const dpo_id_t *dpo)
1132{
1133 fib_path_t *path;
1134
1135 pool_get(fib_path_pool, path);
1136 memset(path, 0, sizeof(*path));
1137
1138 fib_node_init(&path->fp_node,
1139 FIB_NODE_TYPE_PATH);
1140 dpo_reset(&path->fp_dpo);
1141
1142 path->fp_pl_index = pl_index;
1143 path->fp_weight = 1;
1144 path->fp_nh_proto = nh_proto;
1145 path->fp_via_fib = FIB_NODE_INDEX_INVALID;
1146 path->fp_cfg_flags = flags;
1147
1148 if (FIB_PATH_CFG_FLAG_DROP & flags)
1149 {
1150 path->fp_type = FIB_PATH_TYPE_SPECIAL;
1151 }
1152 else if (FIB_PATH_CFG_FLAG_LOCAL & flags)
1153 {
1154 path->fp_type = FIB_PATH_TYPE_RECEIVE;
1155 path->attached.fp_interface = FIB_NODE_INDEX_INVALID;
1156 }
1157 else
1158 {
1159 path->fp_type = FIB_PATH_TYPE_EXCLUSIVE;
1160 ASSERT(NULL != dpo);
1161 dpo_copy(&path->exclusive.fp_ex_dpo, dpo);
1162 }
1163
1164 return (fib_path_get_index(path));
1165}
1166
1167/*
1168 * fib_path_copy
1169 *
1170 * Copy a path. return index of new path.
1171 */
1172fib_node_index_t
1173fib_path_copy (fib_node_index_t path_index,
1174 fib_node_index_t path_list_index)
1175{
1176 fib_path_t *path, *orig_path;
1177
1178 pool_get(fib_path_pool, path);
1179
1180 orig_path = fib_path_get(path_index);
1181 ASSERT(NULL != orig_path);
1182
1183 memcpy(path, orig_path, sizeof(*path));
1184
1185 FIB_PATH_DBG(path, "create-copy:%d", path_index);
1186
1187 /*
1188 * reset the dynamic section
1189 */
1190 fib_node_init(&path->fp_node, FIB_NODE_TYPE_PATH);
1191 path->fp_oper_flags = FIB_PATH_OPER_FLAG_NONE;
1192 path->fp_pl_index = path_list_index;
1193 path->fp_via_fib = FIB_NODE_INDEX_INVALID;
1194 memset(&path->fp_dpo, 0, sizeof(path->fp_dpo));
1195 dpo_reset(&path->fp_dpo);
1196
1197 return (fib_path_get_index(path));
1198}
1199
1200/*
1201 * fib_path_destroy
1202 *
1203 * destroy a path that is no longer required
1204 */
1205void
1206fib_path_destroy (fib_node_index_t path_index)
1207{
1208 fib_path_t *path;
1209
1210 path = fib_path_get(path_index);
1211
1212 ASSERT(NULL != path);
1213 FIB_PATH_DBG(path, "destroy");
1214
1215 fib_path_unresolve(path);
1216
1217 fib_node_deinit(&path->fp_node);
1218 pool_put(fib_path_pool, path);
1219}
1220
1221/*
1222 * fib_path_destroy
1223 *
1224 * destroy a path that is no longer required
1225 */
1226uword
1227fib_path_hash (fib_node_index_t path_index)
1228{
1229 fib_path_t *path;
1230
1231 path = fib_path_get(path_index);
1232
1233 return (hash_memory(STRUCT_MARK_PTR(path, path_hash_start),
1234 (STRUCT_OFFSET_OF(fib_path_t, path_hash_end) -
1235 STRUCT_OFFSET_OF(fib_path_t, path_hash_start)),
1236 0));
1237}
1238
1239/*
1240 * fib_path_cmp_i
1241 *
1242 * Compare two paths for equivalence.
1243 */
1244static int
1245fib_path_cmp_i (const fib_path_t *path1,
1246 const fib_path_t *path2)
1247{
1248 int res;
1249
1250 res = 1;
1251
1252 /*
1253 * paths of different types and protocol are not equal.
1254 * different weights only are the same path.
1255 */
1256 if (path1->fp_type != path2->fp_type)
1257 {
1258 res = (path1->fp_type - path2->fp_type);
1259 }
Neale Ranns32e1c012016-11-22 17:07:28 +00001260 else if (path1->fp_nh_proto != path2->fp_nh_proto)
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001261 {
1262 res = (path1->fp_nh_proto - path2->fp_nh_proto);
1263 }
1264 else
1265 {
1266 /*
1267 * both paths are of the same type.
1268 * consider each type and its attributes in turn.
1269 */
1270 switch (path1->fp_type)
1271 {
1272 case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
1273 res = ip46_address_cmp(&path1->attached_next_hop.fp_nh,
1274 &path2->attached_next_hop.fp_nh);
1275 if (0 == res) {
Neale Ranns0f26c5a2017-03-01 15:12:11 -08001276 res = (path1->attached_next_hop.fp_interface -
1277 path2->attached_next_hop.fp_interface);
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001278 }
1279 break;
1280 case FIB_PATH_TYPE_ATTACHED:
Neale Ranns0f26c5a2017-03-01 15:12:11 -08001281 res = (path1->attached.fp_interface -
1282 path2->attached.fp_interface);
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001283 break;
1284 case FIB_PATH_TYPE_RECURSIVE:
1285 res = ip46_address_cmp(&path1->recursive.fp_nh,
1286 &path2->recursive.fp_nh);
1287
1288 if (0 == res)
1289 {
1290 res = (path1->recursive.fp_tbl_id - path2->recursive.fp_tbl_id);
1291 }
1292 break;
1293 case FIB_PATH_TYPE_DEAG:
1294 res = (path1->deag.fp_tbl_id - path2->deag.fp_tbl_id);
Neale Ranns0f26c5a2017-03-01 15:12:11 -08001295 if (0 == res)
1296 {
1297 res = (path1->deag.fp_rpf_id - path2->deag.fp_rpf_id);
1298 }
1299 break;
1300 case FIB_PATH_TYPE_INTF_RX:
1301 res = (path1->intf_rx.fp_interface - path2->intf_rx.fp_interface);
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001302 break;
1303 case FIB_PATH_TYPE_SPECIAL:
1304 case FIB_PATH_TYPE_RECEIVE:
1305 case FIB_PATH_TYPE_EXCLUSIVE:
1306 res = 0;
1307 break;
1308 }
1309 }
1310 return (res);
1311}
1312
1313/*
1314 * fib_path_cmp_for_sort
1315 *
1316 * Compare two paths for equivalence. Used during path sorting.
1317 * As usual 0 means equal.
1318 */
1319int
1320fib_path_cmp_for_sort (void * v1,
1321 void * v2)
1322{
1323 fib_node_index_t *pi1 = v1, *pi2 = v2;
1324 fib_path_t *path1, *path2;
1325
1326 path1 = fib_path_get(*pi1);
1327 path2 = fib_path_get(*pi2);
1328
1329 return (fib_path_cmp_i(path1, path2));
1330}
1331
1332/*
1333 * fib_path_cmp
1334 *
1335 * Compare two paths for equivalence.
1336 */
1337int
1338fib_path_cmp (fib_node_index_t pi1,
1339 fib_node_index_t pi2)
1340{
1341 fib_path_t *path1, *path2;
1342
1343 path1 = fib_path_get(pi1);
1344 path2 = fib_path_get(pi2);
1345
1346 return (fib_path_cmp_i(path1, path2));
1347}
1348
1349int
1350fib_path_cmp_w_route_path (fib_node_index_t path_index,
1351 const fib_route_path_t *rpath)
1352{
1353 fib_path_t *path;
1354 int res;
1355
1356 path = fib_path_get(path_index);
1357
1358 res = 1;
1359
1360 if (path->fp_weight != rpath->frp_weight)
1361 {
1362 res = (path->fp_weight - rpath->frp_weight);
1363 }
1364 else
1365 {
1366 /*
1367 * both paths are of the same type.
1368 * consider each type and its attributes in turn.
1369 */
1370 switch (path->fp_type)
1371 {
1372 case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
1373 res = ip46_address_cmp(&path->attached_next_hop.fp_nh,
1374 &rpath->frp_addr);
1375 if (0 == res)
1376 {
Neale Ranns0f26c5a2017-03-01 15:12:11 -08001377 res = (path->attached_next_hop.fp_interface -
1378 rpath->frp_sw_if_index);
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001379 }
1380 break;
1381 case FIB_PATH_TYPE_ATTACHED:
Neale Ranns0f26c5a2017-03-01 15:12:11 -08001382 res = (path->attached.fp_interface - rpath->frp_sw_if_index);
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001383 break;
1384 case FIB_PATH_TYPE_RECURSIVE:
Neale Rannsad422ed2016-11-02 14:20:04 +00001385 if (FIB_PROTOCOL_MPLS == path->fp_nh_proto)
1386 {
1387 res = path->recursive.fp_nh.fp_local_label - rpath->frp_local_label;
Neale Ranns0f26c5a2017-03-01 15:12:11 -08001388
1389 if (res == 0)
1390 {
1391 res = path->recursive.fp_nh.fp_eos - rpath->frp_eos;
1392 }
Neale Rannsad422ed2016-11-02 14:20:04 +00001393 }
1394 else
1395 {
1396 res = ip46_address_cmp(&path->recursive.fp_nh.fp_ip,
1397 &rpath->frp_addr);
1398 }
1399
1400 if (0 == res)
1401 {
1402 res = (path->recursive.fp_tbl_id - rpath->frp_fib_index);
1403 }
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001404 break;
Neale Ranns0f26c5a2017-03-01 15:12:11 -08001405 case FIB_PATH_TYPE_INTF_RX:
1406 res = (path->intf_rx.fp_interface - rpath->frp_sw_if_index);
1407 break;
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001408 case FIB_PATH_TYPE_DEAG:
1409 res = (path->deag.fp_tbl_id - rpath->frp_fib_index);
Neale Ranns0f26c5a2017-03-01 15:12:11 -08001410 if (0 == res)
1411 {
1412 res = (path->deag.fp_rpf_id - rpath->frp_rpf_id);
1413 }
1414 break;
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001415 case FIB_PATH_TYPE_SPECIAL:
1416 case FIB_PATH_TYPE_RECEIVE:
1417 case FIB_PATH_TYPE_EXCLUSIVE:
1418 res = 0;
1419 break;
1420 }
1421 }
1422 return (res);
1423}
1424
1425/*
1426 * fib_path_recursive_loop_detect
1427 *
1428 * A forward walk of the FIB object graph to detect for a cycle/loop. This
1429 * walk is initiated when an entry is linking to a new path list or from an old.
1430 * The entry vector passed contains all the FIB entrys that are children of this
1431 * path (it is all the entries encountered on the walk so far). If this vector
1432 * contains the entry this path resolve via, then a loop is about to form.
1433 * The loop must be allowed to form, since we need the dependencies in place
1434 * so that we can track when the loop breaks.
1435 * However, we MUST not produce a loop in the forwarding graph (else packets
1436 * would loop around the switch path until the loop breaks), so we mark recursive
1437 * paths as looped so that they do not contribute forwarding information.
1438 * By marking the path as looped, an etry such as;
1439 * X/Y
1440 * via a.a.a.a (looped)
1441 * via b.b.b.b (not looped)
1442 * can still forward using the info provided by b.b.b.b only
1443 */
1444int
1445fib_path_recursive_loop_detect (fib_node_index_t path_index,
1446 fib_node_index_t **entry_indicies)
1447{
1448 fib_path_t *path;
1449
1450 path = fib_path_get(path_index);
1451
1452 /*
1453 * the forced drop path is never looped, cos it is never resolved.
1454 */
1455 if (fib_path_is_permanent_drop(path))
1456 {
1457 return (0);
1458 }
1459
1460 switch (path->fp_type)
1461 {
1462 case FIB_PATH_TYPE_RECURSIVE:
1463 {
1464 fib_node_index_t *entry_index, *entries;
1465 int looped = 0;
1466 entries = *entry_indicies;
1467
1468 vec_foreach(entry_index, entries) {
1469 if (*entry_index == path->fp_via_fib)
1470 {
1471 /*
1472 * the entry that is about to link to this path-list (or
1473 * one of this path-list's children) is the same entry that
1474 * this recursive path resolves through. this is a cycle.
1475 * abort the walk.
1476 */
1477 looped = 1;
1478 break;
1479 }
1480 }
1481
1482 if (looped)
1483 {
1484 FIB_PATH_DBG(path, "recursive loop formed");
1485 path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RECURSIVE_LOOP;
1486
1487 dpo_copy(&path->fp_dpo,
1488 drop_dpo_get(fib_proto_to_dpo(path->fp_nh_proto)));
1489 }
1490 else
1491 {
1492 /*
1493 * no loop here yet. keep forward walking the graph.
1494 */
1495 if (fib_entry_recursive_loop_detect(path->fp_via_fib, entry_indicies))
1496 {
1497 FIB_PATH_DBG(path, "recursive loop formed");
1498 path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RECURSIVE_LOOP;
1499 }
1500 else
1501 {
1502 FIB_PATH_DBG(path, "recursive loop cleared");
1503 path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RECURSIVE_LOOP;
1504 }
1505 }
1506 break;
1507 }
1508 case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
1509 case FIB_PATH_TYPE_ATTACHED:
1510 case FIB_PATH_TYPE_SPECIAL:
1511 case FIB_PATH_TYPE_DEAG:
1512 case FIB_PATH_TYPE_RECEIVE:
Neale Ranns0f26c5a2017-03-01 15:12:11 -08001513 case FIB_PATH_TYPE_INTF_RX:
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001514 case FIB_PATH_TYPE_EXCLUSIVE:
1515 /*
1516 * these path types cannot be part of a loop, since they are the leaves
1517 * of the graph.
1518 */
1519 break;
1520 }
1521
1522 return (fib_path_is_looped(path_index));
1523}
1524
1525int
1526fib_path_resolve (fib_node_index_t path_index)
1527{
1528 fib_path_t *path;
1529
1530 path = fib_path_get(path_index);
1531
1532 /*
1533 * hope for the best.
1534 */
1535 path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RESOLVED;
1536
1537 /*
1538 * the forced drop path resolves via the drop adj
1539 */
1540 if (fib_path_is_permanent_drop(path))
1541 {
1542 dpo_copy(&path->fp_dpo,
1543 drop_dpo_get(fib_proto_to_dpo(path->fp_nh_proto)));
1544 path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
1545 return (fib_path_is_resolved(path_index));
1546 }
1547
1548 switch (path->fp_type)
1549 {
1550 case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
1551 fib_path_attached_next_hop_set(path);
1552 break;
1553 case FIB_PATH_TYPE_ATTACHED:
1554 /*
1555 * path->attached.fp_interface
1556 */
1557 if (!vnet_sw_interface_is_admin_up(vnet_get_main(),
1558 path->attached.fp_interface))
1559 {
1560 path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
1561 }
1562 if (vnet_sw_interface_is_p2p(vnet_get_main(),
1563 path->attached.fp_interface))
1564 {
1565 /*
1566 * point-2-point interfaces do not require a glean, since
1567 * there is nothing to ARP. Install a rewrite/nbr adj instead
1568 */
1569 dpo_set(&path->fp_dpo,
1570 DPO_ADJACENCY,
1571 fib_proto_to_dpo(path->fp_nh_proto),
1572 adj_nbr_add_or_lock(
1573 path->fp_nh_proto,
1574 fib_proto_to_link(path->fp_nh_proto),
1575 &zero_addr,
1576 path->attached.fp_interface));
1577 }
1578 else
1579 {
1580 dpo_set(&path->fp_dpo,
1581 DPO_ADJACENCY_GLEAN,
1582 fib_proto_to_dpo(path->fp_nh_proto),
1583 adj_glean_add_or_lock(path->fp_nh_proto,
1584 path->attached.fp_interface,
1585 NULL));
1586 }
1587 /*
1588 * become a child of the adjacency so we receive updates
1589 * when the interface state changes
1590 */
1591 path->fp_sibling = adj_child_add(path->fp_dpo.dpoi_index,
1592 FIB_NODE_TYPE_PATH,
1593 fib_path_get_index(path));
1594
1595 break;
1596 case FIB_PATH_TYPE_RECURSIVE:
1597 {
1598 /*
1599 * Create a RR source entry in the table for the address
1600 * that this path recurses through.
1601 * This resolve action is recursive, hence we may create
1602 * more paths in the process. more creates mean maybe realloc
1603 * of this path.
1604 */
1605 fib_node_index_t fei;
1606 fib_prefix_t pfx;
1607
1608 ASSERT(FIB_NODE_INDEX_INVALID == path->fp_via_fib);
1609
Neale Rannsad422ed2016-11-02 14:20:04 +00001610 if (FIB_PROTOCOL_MPLS == path->fp_nh_proto)
1611 {
Neale Ranns0f26c5a2017-03-01 15:12:11 -08001612 fib_prefix_from_mpls_label(path->recursive.fp_nh.fp_local_label,
1613 path->recursive.fp_nh.fp_eos,
1614 &pfx);
Neale Rannsad422ed2016-11-02 14:20:04 +00001615 }
1616 else
1617 {
1618 fib_prefix_from_ip46_addr(&path->recursive.fp_nh.fp_ip, &pfx);
1619 }
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001620
1621 fei = fib_table_entry_special_add(path->recursive.fp_tbl_id,
1622 &pfx,
1623 FIB_SOURCE_RR,
Neale Rannsa0558302017-04-13 00:44:52 -07001624 FIB_ENTRY_FLAG_NONE);
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001625
1626 path = fib_path_get(path_index);
1627 path->fp_via_fib = fei;
1628
1629 /*
1630 * become a dependent child of the entry so the path is
1631 * informed when the forwarding for the entry changes.
1632 */
1633 path->fp_sibling = fib_entry_child_add(path->fp_via_fib,
1634 FIB_NODE_TYPE_PATH,
1635 fib_path_get_index(path));
1636
1637 /*
1638 * create and configure the IP DPO
1639 */
1640 fib_path_recursive_adj_update(
1641 path,
Neale Ranns0f26c5a2017-03-01 15:12:11 -08001642 fib_path_to_chain_type(path),
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001643 &path->fp_dpo);
1644
1645 break;
1646 }
1647 case FIB_PATH_TYPE_SPECIAL:
1648 /*
1649 * Resolve via the drop
1650 */
1651 dpo_copy(&path->fp_dpo,
1652 drop_dpo_get(fib_proto_to_dpo(path->fp_nh_proto)));
1653 break;
1654 case FIB_PATH_TYPE_DEAG:
Neale Ranns0f26c5a2017-03-01 15:12:11 -08001655 {
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001656 /*
1657 * Resolve via a lookup DPO.
1658 * FIXME. control plane should add routes with a table ID
1659 */
Neale Ranns0f26c5a2017-03-01 15:12:11 -08001660 lookup_cast_t cast;
1661
1662 cast = (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RPF_ID ?
1663 LOOKUP_MULTICAST :
1664 LOOKUP_UNICAST);
1665
1666 lookup_dpo_add_or_lock_w_fib_index(path->deag.fp_tbl_id,
1667 fib_proto_to_dpo(path->fp_nh_proto),
1668 cast,
1669 LOOKUP_INPUT_DST_ADDR,
1670 LOOKUP_TABLE_FROM_CONFIG,
1671 &path->fp_dpo);
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001672 break;
Neale Ranns0f26c5a2017-03-01 15:12:11 -08001673 }
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001674 case FIB_PATH_TYPE_RECEIVE:
1675 /*
1676 * Resolve via a receive DPO.
1677 */
1678 receive_dpo_add_or_lock(fib_proto_to_dpo(path->fp_nh_proto),
1679 path->receive.fp_interface,
1680 &path->receive.fp_addr,
1681 &path->fp_dpo);
1682 break;
Neale Ranns0f26c5a2017-03-01 15:12:11 -08001683 case FIB_PATH_TYPE_INTF_RX: {
1684 /*
1685 * Resolve via a receive DPO.
1686 */
1687 interface_dpo_add_or_lock(fib_proto_to_dpo(path->fp_nh_proto),
1688 path->intf_rx.fp_interface,
1689 &path->fp_dpo);
1690 break;
1691 }
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001692 case FIB_PATH_TYPE_EXCLUSIVE:
1693 /*
1694 * Resolve via the user provided DPO
1695 */
1696 dpo_copy(&path->fp_dpo, &path->exclusive.fp_ex_dpo);
1697 break;
1698 }
1699
1700 return (fib_path_is_resolved(path_index));
1701}
1702
1703u32
1704fib_path_get_resolving_interface (fib_node_index_t path_index)
1705{
1706 fib_path_t *path;
1707
1708 path = fib_path_get(path_index);
1709
1710 switch (path->fp_type)
1711 {
1712 case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
1713 return (path->attached_next_hop.fp_interface);
1714 case FIB_PATH_TYPE_ATTACHED:
1715 return (path->attached.fp_interface);
1716 case FIB_PATH_TYPE_RECEIVE:
1717 return (path->receive.fp_interface);
1718 case FIB_PATH_TYPE_RECURSIVE:
1719 return (fib_entry_get_resolving_interface(path->fp_via_fib));
Neale Ranns0f26c5a2017-03-01 15:12:11 -08001720 case FIB_PATH_TYPE_INTF_RX:
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001721 case FIB_PATH_TYPE_SPECIAL:
1722 case FIB_PATH_TYPE_DEAG:
1723 case FIB_PATH_TYPE_EXCLUSIVE:
1724 break;
1725 }
1726 return (~0);
1727}
1728
1729adj_index_t
1730fib_path_get_adj (fib_node_index_t path_index)
1731{
1732 fib_path_t *path;
1733
1734 path = fib_path_get(path_index);
1735
1736 ASSERT(dpo_is_adj(&path->fp_dpo));
1737 if (dpo_is_adj(&path->fp_dpo))
1738 {
1739 return (path->fp_dpo.dpoi_index);
1740 }
1741 return (ADJ_INDEX_INVALID);
1742}
1743
1744int
1745fib_path_get_weight (fib_node_index_t path_index)
1746{
1747 fib_path_t *path;
1748
1749 path = fib_path_get(path_index);
1750
1751 ASSERT(path);
1752
1753 return (path->fp_weight);
1754}
1755
Neale Ranns3ee44042016-10-03 13:05:48 +01001756/**
1757 * @brief Contribute the path's adjacency to the list passed.
1758 * By calling this function over all paths, recursively, a child
1759 * can construct its full set of forwarding adjacencies, and hence its
1760 * uRPF list.
1761 */
1762void
1763fib_path_contribute_urpf (fib_node_index_t path_index,
1764 index_t urpf)
1765{
1766 fib_path_t *path;
1767
Neale Ranns3ee44042016-10-03 13:05:48 +01001768 path = fib_path_get(path_index);
1769
Neale Ranns88fc83e2017-04-05 08:11:14 -07001770 /*
1771 * resolved and unresolved paths contribute to the RPF list.
1772 */
Neale Ranns3ee44042016-10-03 13:05:48 +01001773 switch (path->fp_type)
1774 {
1775 case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
1776 fib_urpf_list_append(urpf, path->attached_next_hop.fp_interface);
1777 break;
1778
1779 case FIB_PATH_TYPE_ATTACHED:
1780 fib_urpf_list_append(urpf, path->attached.fp_interface);
1781 break;
1782
1783 case FIB_PATH_TYPE_RECURSIVE:
Neale Ranns88fc83e2017-04-05 08:11:14 -07001784 if (FIB_NODE_INDEX_INVALID != path->fp_via_fib)
1785 {
1786 /*
1787 * there's unresolved due to constraints, and there's unresolved
1788 * due to ain't go no via. can't do nowt w'out via.
1789 */
1790 fib_entry_contribute_urpf(path->fp_via_fib, urpf);
1791 }
Neale Ranns3ee44042016-10-03 13:05:48 +01001792 break;
1793
1794 case FIB_PATH_TYPE_EXCLUSIVE:
1795 case FIB_PATH_TYPE_SPECIAL:
1796 /*
1797 * these path types may link to an adj, if that's what
1798 * the clinet gave
1799 */
1800 if (dpo_is_adj(&path->fp_dpo))
1801 {
1802 ip_adjacency_t *adj;
1803
1804 adj = adj_get(path->fp_dpo.dpoi_index);
1805
1806 fib_urpf_list_append(urpf, adj->rewrite_header.sw_if_index);
1807 }
1808 break;
1809
1810 case FIB_PATH_TYPE_DEAG:
1811 case FIB_PATH_TYPE_RECEIVE:
Neale Ranns0f26c5a2017-03-01 15:12:11 -08001812 case FIB_PATH_TYPE_INTF_RX:
Neale Ranns3ee44042016-10-03 13:05:48 +01001813 /*
1814 * these path types don't link to an adj
1815 */
1816 break;
1817 }
1818}
1819
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001820void
Neale Ranns0f26c5a2017-03-01 15:12:11 -08001821fib_path_stack_mpls_disp (fib_node_index_t path_index,
1822 dpo_proto_t payload_proto,
1823 dpo_id_t *dpo)
1824{
1825 fib_path_t *path;
1826
1827 path = fib_path_get(path_index);
1828
1829 ASSERT(path);
1830
1831 switch (path->fp_type)
1832 {
1833 case FIB_PATH_TYPE_DEAG:
1834 {
1835 dpo_id_t tmp = DPO_INVALID;
1836
1837 dpo_copy(&tmp, dpo);
1838 dpo_set(dpo,
1839 DPO_MPLS_DISPOSITION,
1840 payload_proto,
1841 mpls_disp_dpo_create(payload_proto,
1842 path->deag.fp_rpf_id,
1843 &tmp));
1844 dpo_reset(&tmp);
1845 break;
1846 }
1847 case FIB_PATH_TYPE_RECEIVE:
1848 case FIB_PATH_TYPE_ATTACHED:
1849 case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
1850 case FIB_PATH_TYPE_RECURSIVE:
1851 case FIB_PATH_TYPE_INTF_RX:
1852 case FIB_PATH_TYPE_EXCLUSIVE:
1853 case FIB_PATH_TYPE_SPECIAL:
1854 break;
1855 }
1856}
1857
1858void
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001859fib_path_contribute_forwarding (fib_node_index_t path_index,
1860 fib_forward_chain_type_t fct,
1861 dpo_id_t *dpo)
1862{
1863 fib_path_t *path;
1864
1865 path = fib_path_get(path_index);
1866
1867 ASSERT(path);
1868 ASSERT(FIB_FORW_CHAIN_TYPE_MPLS_EOS != fct);
1869
1870 FIB_PATH_DBG(path, "contribute");
1871
1872 /*
1873 * The DPO stored in the path was created when the path was resolved.
1874 * This then represents the path's 'native' protocol; IP.
1875 * For all others will need to go find something else.
1876 */
Neale Ranns0f26c5a2017-03-01 15:12:11 -08001877 if (fib_path_to_chain_type(path) == fct)
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001878 {
1879 dpo_copy(dpo, &path->fp_dpo);
1880 }
Neale Ranns5e575b12016-10-03 09:40:25 +01001881 else
1882 {
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001883 switch (path->fp_type)
1884 {
1885 case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
1886 switch (fct)
1887 {
1888 case FIB_FORW_CHAIN_TYPE_UNICAST_IP4:
1889 case FIB_FORW_CHAIN_TYPE_UNICAST_IP6:
1890 case FIB_FORW_CHAIN_TYPE_MPLS_EOS:
1891 case FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS:
Neale Ranns5e575b12016-10-03 09:40:25 +01001892 case FIB_FORW_CHAIN_TYPE_ETHERNET:
Florin Corasce1b4c72017-01-26 14:25:34 -08001893 case FIB_FORW_CHAIN_TYPE_NSH:
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001894 {
1895 adj_index_t ai;
1896
1897 /*
Neale Rannsad422ed2016-11-02 14:20:04 +00001898 * get a appropriate link type adj.
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001899 */
1900 ai = fib_path_attached_next_hop_get_adj(
1901 path,
1902 fib_forw_chain_type_to_link_type(fct));
1903 dpo_set(dpo, DPO_ADJACENCY,
1904 fib_forw_chain_type_to_dpo_proto(fct), ai);
1905 adj_unlock(ai);
1906
1907 break;
1908 }
Neale Ranns32e1c012016-11-22 17:07:28 +00001909 case FIB_FORW_CHAIN_TYPE_MCAST_IP4:
1910 case FIB_FORW_CHAIN_TYPE_MCAST_IP6:
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001911 break;
Neale Ranns32e1c012016-11-22 17:07:28 +00001912 }
1913 break;
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001914 case FIB_PATH_TYPE_RECURSIVE:
1915 switch (fct)
1916 {
1917 case FIB_FORW_CHAIN_TYPE_MPLS_EOS:
1918 case FIB_FORW_CHAIN_TYPE_UNICAST_IP4:
1919 case FIB_FORW_CHAIN_TYPE_UNICAST_IP6:
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001920 case FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS:
Neale Ranns32e1c012016-11-22 17:07:28 +00001921 case FIB_FORW_CHAIN_TYPE_MCAST_IP4:
1922 case FIB_FORW_CHAIN_TYPE_MCAST_IP6:
Neale Ranns0f26c5a2017-03-01 15:12:11 -08001923 fib_path_recursive_adj_update(path, fct, dpo);
1924 break;
Neale Ranns5e575b12016-10-03 09:40:25 +01001925 case FIB_FORW_CHAIN_TYPE_ETHERNET:
Florin Corasce1b4c72017-01-26 14:25:34 -08001926 case FIB_FORW_CHAIN_TYPE_NSH:
Neale Ranns5e575b12016-10-03 09:40:25 +01001927 ASSERT(0);
1928 break;
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001929 }
1930 break;
1931 case FIB_PATH_TYPE_DEAG:
Neale Ranns32e1c012016-11-22 17:07:28 +00001932 switch (fct)
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001933 {
1934 case FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS:
1935 lookup_dpo_add_or_lock_w_table_id(MPLS_FIB_DEFAULT_TABLE_ID,
1936 DPO_PROTO_MPLS,
Neale Ranns0f26c5a2017-03-01 15:12:11 -08001937 LOOKUP_UNICAST,
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001938 LOOKUP_INPUT_DST_ADDR,
1939 LOOKUP_TABLE_FROM_CONFIG,
1940 dpo);
1941 break;
Neale Ranns0f26c5a2017-03-01 15:12:11 -08001942 case FIB_FORW_CHAIN_TYPE_MPLS_EOS:
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001943 case FIB_FORW_CHAIN_TYPE_UNICAST_IP4:
1944 case FIB_FORW_CHAIN_TYPE_UNICAST_IP6:
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001945 dpo_copy(dpo, &path->fp_dpo);
Neale Ranns32e1c012016-11-22 17:07:28 +00001946 break;
1947 case FIB_FORW_CHAIN_TYPE_MCAST_IP4:
1948 case FIB_FORW_CHAIN_TYPE_MCAST_IP6:
Neale Ranns5e575b12016-10-03 09:40:25 +01001949 case FIB_FORW_CHAIN_TYPE_ETHERNET:
Florin Corasce1b4c72017-01-26 14:25:34 -08001950 case FIB_FORW_CHAIN_TYPE_NSH:
Neale Ranns5e575b12016-10-03 09:40:25 +01001951 ASSERT(0);
1952 break;
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001953 }
1954 break;
1955 case FIB_PATH_TYPE_EXCLUSIVE:
1956 dpo_copy(dpo, &path->exclusive.fp_ex_dpo);
1957 break;
1958 case FIB_PATH_TYPE_ATTACHED:
Neale Ranns32e1c012016-11-22 17:07:28 +00001959 switch (fct)
1960 {
1961 case FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS:
1962 case FIB_FORW_CHAIN_TYPE_UNICAST_IP4:
1963 case FIB_FORW_CHAIN_TYPE_UNICAST_IP6:
1964 case FIB_FORW_CHAIN_TYPE_MPLS_EOS:
1965 case FIB_FORW_CHAIN_TYPE_ETHERNET:
Florin Corasce1b4c72017-01-26 14:25:34 -08001966 case FIB_FORW_CHAIN_TYPE_NSH:
Neale Ranns32e1c012016-11-22 17:07:28 +00001967 break;
1968 case FIB_FORW_CHAIN_TYPE_MCAST_IP4:
1969 case FIB_FORW_CHAIN_TYPE_MCAST_IP6:
1970 {
1971 adj_index_t ai;
1972
1973 /*
1974 * Create the adj needed for sending IP multicast traffic
1975 */
1976 ai = adj_mcast_add_or_lock(path->fp_nh_proto,
1977 fib_forw_chain_type_to_link_type(fct),
1978 path->attached.fp_interface);
Neale Ranns0f26c5a2017-03-01 15:12:11 -08001979 dpo_set(dpo, DPO_ADJACENCY,
Neale Ranns32e1c012016-11-22 17:07:28 +00001980 fib_forw_chain_type_to_dpo_proto(fct),
1981 ai);
1982 adj_unlock(ai);
1983 }
1984 break;
1985 }
1986 break;
Neale Ranns0f26c5a2017-03-01 15:12:11 -08001987 case FIB_PATH_TYPE_INTF_RX:
1988 /*
1989 * Create the adj needed for sending IP multicast traffic
1990 */
1991 interface_dpo_add_or_lock(fib_forw_chain_type_to_dpo_proto(fct),
1992 path->attached.fp_interface,
1993 dpo);
1994 break;
Neale Ranns32e1c012016-11-22 17:07:28 +00001995 case FIB_PATH_TYPE_RECEIVE:
1996 case FIB_PATH_TYPE_SPECIAL:
1997 dpo_copy(dpo, &path->fp_dpo);
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001998 break;
1999 }
Neale Ranns0bfe5d82016-08-25 15:29:12 +01002000 }
2001}
2002
2003load_balance_path_t *
2004fib_path_append_nh_for_multipath_hash (fib_node_index_t path_index,
2005 fib_forward_chain_type_t fct,
2006 load_balance_path_t *hash_key)
2007{
2008 load_balance_path_t *mnh;
2009 fib_path_t *path;
2010
2011 path = fib_path_get(path_index);
2012
2013 ASSERT(path);
2014
2015 if (fib_path_is_resolved(path_index))
2016 {
2017 vec_add2(hash_key, mnh, 1);
2018
2019 mnh->path_weight = path->fp_weight;
2020 mnh->path_index = path_index;
Neale Ranns5e575b12016-10-03 09:40:25 +01002021 fib_path_contribute_forwarding(path_index, fct, &mnh->path_dpo);
Neale Ranns0bfe5d82016-08-25 15:29:12 +01002022 }
2023
2024 return (hash_key);
2025}
2026
2027int
Neale Rannsf12a83f2017-04-18 09:09:40 -07002028fib_path_is_recursive_constrained (fib_node_index_t path_index)
Neale Ranns0bfe5d82016-08-25 15:29:12 +01002029{
2030 fib_path_t *path;
2031
2032 path = fib_path_get(path_index);
2033
Neale Rannsf12a83f2017-04-18 09:09:40 -07002034 return ((FIB_PATH_TYPE_RECURSIVE == path->fp_type) &&
2035 ((path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RESOLVE_ATTACHED) ||
2036 (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RESOLVE_HOST)));
Neale Ranns0bfe5d82016-08-25 15:29:12 +01002037}
2038
2039int
2040fib_path_is_exclusive (fib_node_index_t path_index)
2041{
2042 fib_path_t *path;
2043
2044 path = fib_path_get(path_index);
2045
2046 return (FIB_PATH_TYPE_EXCLUSIVE == path->fp_type);
2047}
2048
2049int
2050fib_path_is_deag (fib_node_index_t path_index)
2051{
2052 fib_path_t *path;
2053
2054 path = fib_path_get(path_index);
2055
2056 return (FIB_PATH_TYPE_DEAG == path->fp_type);
2057}
2058
2059int
2060fib_path_is_resolved (fib_node_index_t path_index)
2061{
2062 fib_path_t *path;
2063
2064 path = fib_path_get(path_index);
2065
2066 return (dpo_id_is_valid(&path->fp_dpo) &&
2067 (path->fp_oper_flags & FIB_PATH_OPER_FLAG_RESOLVED) &&
2068 !fib_path_is_looped(path_index) &&
2069 !fib_path_is_permanent_drop(path));
2070}
2071
2072int
2073fib_path_is_looped (fib_node_index_t path_index)
2074{
2075 fib_path_t *path;
2076
2077 path = fib_path_get(path_index);
2078
2079 return (path->fp_oper_flags & FIB_PATH_OPER_FLAG_RECURSIVE_LOOP);
2080}
2081
Steven01b07122016-11-02 10:40:09 -07002082int
2083fib_path_encode (fib_node_index_t path_list_index,
2084 fib_node_index_t path_index,
2085 void *ctx)
2086{
2087 fib_route_path_encode_t **api_rpaths = ctx;
2088 fib_route_path_encode_t *api_rpath;
2089 fib_path_t *path;
2090
2091 path = fib_path_get(path_index);
2092 if (!path)
2093 return (0);
2094 vec_add2(*api_rpaths, api_rpath, 1);
2095 api_rpath->rpath.frp_weight = path->fp_weight;
2096 api_rpath->rpath.frp_proto = path->fp_nh_proto;
2097 api_rpath->rpath.frp_sw_if_index = ~0;
2098 api_rpath->dpo = path->exclusive.fp_ex_dpo;
2099 switch (path->fp_type)
2100 {
2101 case FIB_PATH_TYPE_RECEIVE:
2102 api_rpath->rpath.frp_addr = path->receive.fp_addr;
2103 api_rpath->rpath.frp_sw_if_index = path->receive.fp_interface;
2104 break;
2105 case FIB_PATH_TYPE_ATTACHED:
2106 api_rpath->rpath.frp_sw_if_index = path->attached.fp_interface;
2107 break;
2108 case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
2109 api_rpath->rpath.frp_sw_if_index = path->attached_next_hop.fp_interface;
2110 api_rpath->rpath.frp_addr = path->attached_next_hop.fp_nh;
2111 break;
2112 case FIB_PATH_TYPE_SPECIAL:
2113 break;
2114 case FIB_PATH_TYPE_DEAG:
2115 break;
2116 case FIB_PATH_TYPE_RECURSIVE:
Neale Rannsad422ed2016-11-02 14:20:04 +00002117 api_rpath->rpath.frp_addr = path->recursive.fp_nh.fp_ip;
Steven01b07122016-11-02 10:40:09 -07002118 break;
2119 default:
2120 break;
2121 }
2122 return (1);
2123}
2124
Neale Rannsad422ed2016-11-02 14:20:04 +00002125fib_protocol_t
2126fib_path_get_proto (fib_node_index_t path_index)
2127{
2128 fib_path_t *path;
2129
2130 path = fib_path_get(path_index);
2131
2132 return (path->fp_nh_proto);
2133}
2134
Neale Ranns0bfe5d82016-08-25 15:29:12 +01002135void
2136fib_path_module_init (void)
2137{
2138 fib_node_register_type (FIB_NODE_TYPE_PATH, &fib_path_vft);
2139}
2140
2141static clib_error_t *
2142show_fib_path_command (vlib_main_t * vm,
2143 unformat_input_t * input,
2144 vlib_cli_command_t * cmd)
2145{
Neale Ranns33a7dd52016-10-07 15:14:33 +01002146 fib_node_index_t pi;
Neale Ranns0bfe5d82016-08-25 15:29:12 +01002147 fib_path_t *path;
2148
Neale Ranns33a7dd52016-10-07 15:14:33 +01002149 if (unformat (input, "%d", &pi))
2150 {
2151 /*
2152 * show one in detail
2153 */
2154 if (!pool_is_free_index(fib_path_pool, pi))
2155 {
2156 path = fib_path_get(pi);
2157 u8 *s = fib_path_format(pi, NULL);
2158 s = format(s, "children:");
2159 s = fib_node_children_format(path->fp_node.fn_children, s);
2160 vlib_cli_output (vm, "%s", s);
2161 vec_free(s);
2162 }
2163 else
2164 {
2165 vlib_cli_output (vm, "path %d invalid", pi);
2166 }
2167 }
2168 else
2169 {
2170 vlib_cli_output (vm, "FIB Paths");
2171 pool_foreach(path, fib_path_pool,
2172 ({
2173 vlib_cli_output (vm, "%U", format_fib_path, path);
2174 }));
2175 }
Neale Ranns0bfe5d82016-08-25 15:29:12 +01002176
2177 return (NULL);
2178}
2179
2180VLIB_CLI_COMMAND (show_fib_path, static) = {
2181 .path = "show fib paths",
2182 .function = show_fib_path_command,
2183 .short_help = "show fib paths",
2184};