blob: f126333425a7bf5ac2948ed6c93eeed90cde9441 [file] [log] [blame]
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001/*
2 * Copyright (c) 2016 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16#include <vlib/vlib.h>
17#include <vnet/vnet.h>
18#include <vnet/ip/format.h>
19#include <vnet/ip/ip.h>
20#include <vnet/dpo/drop_dpo.h>
21#include <vnet/dpo/receive_dpo.h>
22#include <vnet/dpo/load_balance_map.h>
23#include <vnet/dpo/lookup_dpo.h>
Neale Ranns43161a82017-08-12 02:12:00 -070024#include <vnet/dpo/interface_rx_dpo.h>
Neale Ranns0f26c5a2017-03-01 15:12:11 -080025#include <vnet/dpo/mpls_disposition.h>
Neale Ranns0bfe5d82016-08-25 15:29:12 +010026
27#include <vnet/adj/adj.h>
Neale Ranns32e1c012016-11-22 17:07:28 +000028#include <vnet/adj/adj_mcast.h>
Neale Ranns0bfe5d82016-08-25 15:29:12 +010029
Neale Ranns3ee44042016-10-03 13:05:48 +010030#include <vnet/fib/fib_path.h>
31#include <vnet/fib/fib_node.h>
32#include <vnet/fib/fib_table.h>
33#include <vnet/fib/fib_entry.h>
34#include <vnet/fib/fib_path_list.h>
35#include <vnet/fib/fib_internal.h>
36#include <vnet/fib/fib_urpf_list.h>
Neale Rannsa3af3372017-03-28 03:49:52 -070037#include <vnet/fib/mpls_fib.h>
Neale Ranns0bfe5d82016-08-25 15:29:12 +010038
39/**
40 * Enurmeration of path types
41 */
42typedef enum fib_path_type_t_ {
43 /**
44 * Marker. Add new types after this one.
45 */
46 FIB_PATH_TYPE_FIRST = 0,
47 /**
48 * Attached-nexthop. An interface and a nexthop are known.
49 */
50 FIB_PATH_TYPE_ATTACHED_NEXT_HOP = FIB_PATH_TYPE_FIRST,
51 /**
52 * attached. Only the interface is known.
53 */
54 FIB_PATH_TYPE_ATTACHED,
55 /**
56 * recursive. Only the next-hop is known.
57 */
58 FIB_PATH_TYPE_RECURSIVE,
59 /**
60 * special. nothing is known. so we drop.
61 */
62 FIB_PATH_TYPE_SPECIAL,
63 /**
64 * exclusive. user provided adj.
65 */
66 FIB_PATH_TYPE_EXCLUSIVE,
67 /**
68 * deag. Link to a lookup adj in the next table
69 */
70 FIB_PATH_TYPE_DEAG,
71 /**
Neale Ranns0f26c5a2017-03-01 15:12:11 -080072 * interface receive.
73 */
74 FIB_PATH_TYPE_INTF_RX,
75 /**
Neale Ranns0bfe5d82016-08-25 15:29:12 +010076 * receive. it's for-us.
77 */
78 FIB_PATH_TYPE_RECEIVE,
79 /**
80 * Marker. Add new types before this one, then update it.
81 */
82 FIB_PATH_TYPE_LAST = FIB_PATH_TYPE_RECEIVE,
83} __attribute__ ((packed)) fib_path_type_t;
84
85/**
86 * The maximum number of path_types
87 */
88#define FIB_PATH_TYPE_MAX (FIB_PATH_TYPE_LAST + 1)
89
90#define FIB_PATH_TYPES { \
91 [FIB_PATH_TYPE_ATTACHED_NEXT_HOP] = "attached-nexthop", \
92 [FIB_PATH_TYPE_ATTACHED] = "attached", \
93 [FIB_PATH_TYPE_RECURSIVE] = "recursive", \
94 [FIB_PATH_TYPE_SPECIAL] = "special", \
95 [FIB_PATH_TYPE_EXCLUSIVE] = "exclusive", \
96 [FIB_PATH_TYPE_DEAG] = "deag", \
Neale Ranns0f26c5a2017-03-01 15:12:11 -080097 [FIB_PATH_TYPE_INTF_RX] = "intf-rx", \
Neale Ranns0bfe5d82016-08-25 15:29:12 +010098 [FIB_PATH_TYPE_RECEIVE] = "receive", \
99}
100
101#define FOR_EACH_FIB_PATH_TYPE(_item) \
102 for (_item = FIB_PATH_TYPE_FIRST; _item <= FIB_PATH_TYPE_LAST; _item++)
103
104/**
105 * Enurmeration of path operational (i.e. derived) attributes
106 */
107typedef enum fib_path_oper_attribute_t_ {
108 /**
109 * Marker. Add new types after this one.
110 */
111 FIB_PATH_OPER_ATTRIBUTE_FIRST = 0,
112 /**
113 * The path forms part of a recursive loop.
114 */
115 FIB_PATH_OPER_ATTRIBUTE_RECURSIVE_LOOP = FIB_PATH_OPER_ATTRIBUTE_FIRST,
116 /**
117 * The path is resolved
118 */
119 FIB_PATH_OPER_ATTRIBUTE_RESOLVED,
120 /**
Neale Ranns4b919a52017-03-11 05:55:21 -0800121 * The path is attached, despite what the next-hop may say.
122 */
123 FIB_PATH_OPER_ATTRIBUTE_ATTACHED,
124 /**
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100125 * The path has become a permanent drop.
126 */
127 FIB_PATH_OPER_ATTRIBUTE_DROP,
128 /**
129 * Marker. Add new types before this one, then update it.
130 */
131 FIB_PATH_OPER_ATTRIBUTE_LAST = FIB_PATH_OPER_ATTRIBUTE_DROP,
132} __attribute__ ((packed)) fib_path_oper_attribute_t;
133
134/**
135 * The maximum number of path operational attributes
136 */
137#define FIB_PATH_OPER_ATTRIBUTE_MAX (FIB_PATH_OPER_ATTRIBUTE_LAST + 1)
138
139#define FIB_PATH_OPER_ATTRIBUTES { \
140 [FIB_PATH_OPER_ATTRIBUTE_RECURSIVE_LOOP] = "recursive-loop", \
141 [FIB_PATH_OPER_ATTRIBUTE_RESOLVED] = "resolved", \
142 [FIB_PATH_OPER_ATTRIBUTE_DROP] = "drop", \
143}
144
145#define FOR_EACH_FIB_PATH_OPER_ATTRIBUTE(_item) \
146 for (_item = FIB_PATH_OPER_ATTRIBUTE_FIRST; \
147 _item <= FIB_PATH_OPER_ATTRIBUTE_LAST; \
148 _item++)
149
150/**
151 * Path flags from the attributes
152 */
153typedef enum fib_path_oper_flags_t_ {
154 FIB_PATH_OPER_FLAG_NONE = 0,
155 FIB_PATH_OPER_FLAG_RECURSIVE_LOOP = (1 << FIB_PATH_OPER_ATTRIBUTE_RECURSIVE_LOOP),
156 FIB_PATH_OPER_FLAG_DROP = (1 << FIB_PATH_OPER_ATTRIBUTE_DROP),
157 FIB_PATH_OPER_FLAG_RESOLVED = (1 << FIB_PATH_OPER_ATTRIBUTE_RESOLVED),
Neale Ranns4b919a52017-03-11 05:55:21 -0800158 FIB_PATH_OPER_FLAG_ATTACHED = (1 << FIB_PATH_OPER_ATTRIBUTE_ATTACHED),
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100159} __attribute__ ((packed)) fib_path_oper_flags_t;
160
161/**
162 * A FIB path
163 */
164typedef struct fib_path_t_ {
165 /**
166 * A path is a node in the FIB graph.
167 */
168 fib_node_t fp_node;
169
170 /**
171 * The index of the path-list to which this path belongs
172 */
173 u32 fp_pl_index;
174
175 /**
176 * This marks the start of the memory area used to hash
177 * the path
178 */
179 STRUCT_MARK(path_hash_start);
180
181 /**
182 * Configuration Flags
183 */
184 fib_path_cfg_flags_t fp_cfg_flags;
185
186 /**
187 * The type of the path. This is the selector for the union
188 */
189 fib_path_type_t fp_type;
190
191 /**
192 * The protocol of the next-hop, i.e. the address family of the
193 * next-hop's address. We can't derive this from the address itself
194 * since the address can be all zeros
195 */
Neale Rannsda78f952017-05-24 09:15:43 -0700196 dpo_proto_t fp_nh_proto;
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100197
198 /**
Neale Ranns57b58602017-07-15 07:37:25 -0700199 * UCMP [unnormalised] weigth
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100200 */
Neale Rannsa0a908f2017-08-01 11:40:03 -0700201 u8 fp_weight;
202
Neale Ranns57b58602017-07-15 07:37:25 -0700203 /**
204 * A path preference. 0 is the best.
205 * Only paths of the best preference, that are 'up', are considered
206 * for forwarding.
207 */
Neale Rannsa0a908f2017-08-01 11:40:03 -0700208 u8 fp_preference;
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100209
210 /**
211 * per-type union of the data required to resolve the path
212 */
213 union {
214 struct {
215 /**
216 * The next-hop
217 */
218 ip46_address_t fp_nh;
219 /**
220 * The interface
221 */
222 u32 fp_interface;
223 } attached_next_hop;
224 struct {
225 /**
226 * The interface
227 */
228 u32 fp_interface;
229 } attached;
230 struct {
Neale Rannsad422ed2016-11-02 14:20:04 +0000231 union
232 {
233 /**
234 * The next-hop
235 */
236 ip46_address_t fp_ip;
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800237 struct {
238 /**
239 * The local label to resolve through.
240 */
241 mpls_label_t fp_local_label;
242 /**
243 * The EOS bit of the resolving label
244 */
245 mpls_eos_bit_t fp_eos;
246 };
Neale Rannsad422ed2016-11-02 14:20:04 +0000247 } fp_nh;
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100248 /**
249 * The FIB table index in which to find the next-hop.
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100250 */
251 fib_node_index_t fp_tbl_id;
252 } recursive;
253 struct {
254 /**
Neale Rannsad422ed2016-11-02 14:20:04 +0000255 * The FIB index in which to perfom the next lookup
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100256 */
257 fib_node_index_t fp_tbl_id;
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800258 /**
259 * The RPF-ID to tag the packets with
260 */
261 fib_rpf_id_t fp_rpf_id;
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100262 } deag;
263 struct {
264 } special;
265 struct {
266 /**
267 * The user provided 'exclusive' DPO
268 */
269 dpo_id_t fp_ex_dpo;
270 } exclusive;
271 struct {
272 /**
273 * The interface on which the local address is configured
274 */
275 u32 fp_interface;
276 /**
277 * The next-hop
278 */
279 ip46_address_t fp_addr;
280 } receive;
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800281 struct {
282 /**
283 * The interface on which the packets will be input.
284 */
285 u32 fp_interface;
286 } intf_rx;
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100287 };
288 STRUCT_MARK(path_hash_end);
289
290 /**
291 * Memebers in this last section represent information that is
292 * dervied during resolution. It should not be copied to new paths
293 * nor compared.
294 */
295
296 /**
297 * Operational Flags
298 */
299 fib_path_oper_flags_t fp_oper_flags;
300
301 /**
302 * the resolving via fib. not part of the union, since it it not part
303 * of the path's hash.
304 */
305 fib_node_index_t fp_via_fib;
306
307 /**
308 * The Data-path objects through which this path resolves for IP.
309 */
310 dpo_id_t fp_dpo;
311
312 /**
313 * the index of this path in the parent's child list.
314 */
315 u32 fp_sibling;
316} fib_path_t;
317
318/*
319 * Array of strings/names for the path types and attributes
320 */
321static const char *fib_path_type_names[] = FIB_PATH_TYPES;
322static const char *fib_path_oper_attribute_names[] = FIB_PATH_OPER_ATTRIBUTES;
323static const char *fib_path_cfg_attribute_names[] = FIB_PATH_CFG_ATTRIBUTES;
324
325/*
326 * The memory pool from which we allocate all the paths
327 */
328static fib_path_t *fib_path_pool;
329
330/*
331 * Debug macro
332 */
333#ifdef FIB_DEBUG
334#define FIB_PATH_DBG(_p, _fmt, _args...) \
335{ \
336 u8 *_tmp = NULL; \
337 _tmp = fib_path_format(fib_path_get_index(_p), _tmp); \
338 clib_warning("path:[%d:%s]:" _fmt, \
339 fib_path_get_index(_p), _tmp, \
340 ##_args); \
341 vec_free(_tmp); \
342}
343#else
344#define FIB_PATH_DBG(_p, _fmt, _args...)
345#endif
346
347static fib_path_t *
348fib_path_get (fib_node_index_t index)
349{
350 return (pool_elt_at_index(fib_path_pool, index));
351}
352
353static fib_node_index_t
354fib_path_get_index (fib_path_t *path)
355{
356 return (path - fib_path_pool);
357}
358
359static fib_node_t *
360fib_path_get_node (fib_node_index_t index)
361{
362 return ((fib_node_t*)fib_path_get(index));
363}
364
365static fib_path_t*
366fib_path_from_fib_node (fib_node_t *node)
367{
368#if CLIB_DEBUG > 0
369 ASSERT(FIB_NODE_TYPE_PATH == node->fn_type);
370#endif
371 return ((fib_path_t*)node);
372}
373
374u8 *
375format_fib_path (u8 * s, va_list * args)
376{
377 fib_path_t *path = va_arg (*args, fib_path_t *);
378 vnet_main_t * vnm = vnet_get_main();
379 fib_path_oper_attribute_t oattr;
380 fib_path_cfg_attribute_t cattr;
381
382 s = format (s, " index:%d ", fib_path_get_index(path));
383 s = format (s, "pl-index:%d ", path->fp_pl_index);
Neale Rannsda78f952017-05-24 09:15:43 -0700384 s = format (s, "%U ", format_dpo_proto, path->fp_nh_proto);
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100385 s = format (s, "weight=%d ", path->fp_weight);
Neale Ranns57b58602017-07-15 07:37:25 -0700386 s = format (s, "pref=%d ", path->fp_preference);
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100387 s = format (s, "%s: ", fib_path_type_names[path->fp_type]);
388 if (FIB_PATH_OPER_FLAG_NONE != path->fp_oper_flags) {
389 s = format(s, " oper-flags:");
390 FOR_EACH_FIB_PATH_OPER_ATTRIBUTE(oattr) {
391 if ((1<<oattr) & path->fp_oper_flags) {
392 s = format (s, "%s,", fib_path_oper_attribute_names[oattr]);
393 }
394 }
395 }
396 if (FIB_PATH_CFG_FLAG_NONE != path->fp_cfg_flags) {
397 s = format(s, " cfg-flags:");
398 FOR_EACH_FIB_PATH_CFG_ATTRIBUTE(cattr) {
399 if ((1<<cattr) & path->fp_cfg_flags) {
400 s = format (s, "%s,", fib_path_cfg_attribute_names[cattr]);
401 }
402 }
403 }
404 s = format(s, "\n ");
405
406 switch (path->fp_type)
407 {
408 case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
409 s = format (s, "%U", format_ip46_address,
410 &path->attached_next_hop.fp_nh,
411 IP46_TYPE_ANY);
412 if (path->fp_oper_flags & FIB_PATH_OPER_FLAG_DROP)
413 {
414 s = format (s, " if_index:%d", path->attached_next_hop.fp_interface);
415 }
416 else
417 {
418 s = format (s, " %U",
419 format_vnet_sw_interface_name,
420 vnm,
421 vnet_get_sw_interface(
422 vnm,
423 path->attached_next_hop.fp_interface));
424 if (vnet_sw_interface_is_p2p(vnet_get_main(),
425 path->attached_next_hop.fp_interface))
426 {
427 s = format (s, " (p2p)");
428 }
429 }
430 if (!dpo_id_is_valid(&path->fp_dpo))
431 {
432 s = format(s, "\n unresolved");
433 }
434 else
435 {
436 s = format(s, "\n %U",
437 format_dpo_id,
438 &path->fp_dpo, 13);
439 }
440 break;
441 case FIB_PATH_TYPE_ATTACHED:
442 if (path->fp_oper_flags & FIB_PATH_OPER_FLAG_DROP)
443 {
444 s = format (s, " if_index:%d", path->attached_next_hop.fp_interface);
445 }
446 else
447 {
448 s = format (s, " %U",
449 format_vnet_sw_interface_name,
450 vnm,
451 vnet_get_sw_interface(
452 vnm,
453 path->attached.fp_interface));
454 }
455 break;
456 case FIB_PATH_TYPE_RECURSIVE:
Neale Rannsda78f952017-05-24 09:15:43 -0700457 if (DPO_PROTO_MPLS == path->fp_nh_proto)
Neale Rannsad422ed2016-11-02 14:20:04 +0000458 {
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800459 s = format (s, "via %U %U",
Neale Rannsad422ed2016-11-02 14:20:04 +0000460 format_mpls_unicast_label,
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800461 path->recursive.fp_nh.fp_local_label,
462 format_mpls_eos_bit,
463 path->recursive.fp_nh.fp_eos);
Neale Rannsad422ed2016-11-02 14:20:04 +0000464 }
465 else
466 {
467 s = format (s, "via %U",
468 format_ip46_address,
469 &path->recursive.fp_nh.fp_ip,
470 IP46_TYPE_ANY);
471 }
472 s = format (s, " in fib:%d",
473 path->recursive.fp_tbl_id,
474 path->fp_via_fib);
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100475 s = format (s, " via-fib:%d", path->fp_via_fib);
476 s = format (s, " via-dpo:[%U:%d]",
477 format_dpo_type, path->fp_dpo.dpoi_type,
478 path->fp_dpo.dpoi_index);
479
480 break;
481 case FIB_PATH_TYPE_RECEIVE:
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800482 case FIB_PATH_TYPE_INTF_RX:
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100483 case FIB_PATH_TYPE_SPECIAL:
484 case FIB_PATH_TYPE_DEAG:
485 case FIB_PATH_TYPE_EXCLUSIVE:
486 if (dpo_id_is_valid(&path->fp_dpo))
487 {
488 s = format(s, "%U", format_dpo_id,
489 &path->fp_dpo, 2);
490 }
491 break;
492 }
493 return (s);
494}
495
496u8 *
497fib_path_format (fib_node_index_t pi, u8 *s)
498{
499 fib_path_t *path;
500
501 path = fib_path_get(pi);
502 ASSERT(NULL != path);
503
504 return (format (s, "%U", format_fib_path, path));
505}
506
507u8 *
508fib_path_adj_format (fib_node_index_t pi,
509 u32 indent,
510 u8 *s)
511{
512 fib_path_t *path;
513
514 path = fib_path_get(pi);
515 ASSERT(NULL != path);
516
517 if (!dpo_id_is_valid(&path->fp_dpo))
518 {
519 s = format(s, " unresolved");
520 }
521 else
522 {
523 s = format(s, "%U", format_dpo_id,
524 &path->fp_dpo, 2);
525 }
526
527 return (s);
528}
529
530/*
531 * fib_path_last_lock_gone
532 *
533 * We don't share paths, we share path lists, so the [un]lock functions
534 * are no-ops
535 */
536static void
537fib_path_last_lock_gone (fib_node_t *node)
538{
539 ASSERT(0);
540}
541
542static const adj_index_t
543fib_path_attached_next_hop_get_adj (fib_path_t *path,
Neale Ranns924d03a2016-10-19 08:25:46 +0100544 vnet_link_t link)
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100545{
546 if (vnet_sw_interface_is_p2p(vnet_get_main(),
547 path->attached_next_hop.fp_interface))
548 {
549 /*
550 * if the interface is p2p then the adj for the specific
551 * neighbour on that link will never exist. on p2p links
552 * the subnet address (the attached route) links to the
553 * auto-adj (see below), we want that adj here too.
554 */
Neale Rannsda78f952017-05-24 09:15:43 -0700555 return (adj_nbr_add_or_lock(dpo_proto_to_fib(path->fp_nh_proto),
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100556 link,
557 &zero_addr,
558 path->attached_next_hop.fp_interface));
559 }
560 else
561 {
Neale Rannsda78f952017-05-24 09:15:43 -0700562 return (adj_nbr_add_or_lock(dpo_proto_to_fib(path->fp_nh_proto),
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100563 link,
564 &path->attached_next_hop.fp_nh,
565 path->attached_next_hop.fp_interface));
566 }
567}
568
569static void
570fib_path_attached_next_hop_set (fib_path_t *path)
571{
572 /*
573 * resolve directly via the adjacnecy discribed by the
574 * interface and next-hop
575 */
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100576 dpo_set(&path->fp_dpo,
577 DPO_ADJACENCY,
Neale Rannsda78f952017-05-24 09:15:43 -0700578 path->fp_nh_proto,
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100579 fib_path_attached_next_hop_get_adj(
580 path,
Neale Rannsda78f952017-05-24 09:15:43 -0700581 dpo_proto_to_link(path->fp_nh_proto)));
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100582
583 /*
584 * become a child of the adjacency so we receive updates
585 * when its rewrite changes
586 */
587 path->fp_sibling = adj_child_add(path->fp_dpo.dpoi_index,
588 FIB_NODE_TYPE_PATH,
589 fib_path_get_index(path));
Neale Ranns88fc83e2017-04-05 08:11:14 -0700590
591 if (!vnet_sw_interface_is_admin_up(vnet_get_main(),
592 path->attached_next_hop.fp_interface) ||
593 !adj_is_up(path->fp_dpo.dpoi_index))
594 {
595 path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
596 }
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100597}
598
Neale Ranns8c4611b2017-05-23 03:43:47 -0700599static const adj_index_t
600fib_path_attached_get_adj (fib_path_t *path,
601 vnet_link_t link)
602{
603 if (vnet_sw_interface_is_p2p(vnet_get_main(),
604 path->attached.fp_interface))
605 {
606 /*
607 * point-2-point interfaces do not require a glean, since
608 * there is nothing to ARP. Install a rewrite/nbr adj instead
609 */
Neale Rannsda78f952017-05-24 09:15:43 -0700610 return (adj_nbr_add_or_lock(dpo_proto_to_fib(path->fp_nh_proto),
Neale Ranns8c4611b2017-05-23 03:43:47 -0700611 link,
612 &zero_addr,
613 path->attached.fp_interface));
614 }
615 else
616 {
Neale Rannsda78f952017-05-24 09:15:43 -0700617 return (adj_glean_add_or_lock(dpo_proto_to_fib(path->fp_nh_proto),
Neale Ranns8c4611b2017-05-23 03:43:47 -0700618 path->attached.fp_interface,
619 NULL));
620 }
621}
622
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100623/*
624 * create of update the paths recursive adj
625 */
626static void
627fib_path_recursive_adj_update (fib_path_t *path,
628 fib_forward_chain_type_t fct,
629 dpo_id_t *dpo)
630{
Neale Ranns948e00f2016-10-20 13:39:34 +0100631 dpo_id_t via_dpo = DPO_INVALID;
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100632
633 /*
634 * get the DPO to resolve through from the via-entry
635 */
636 fib_entry_contribute_forwarding(path->fp_via_fib,
637 fct,
638 &via_dpo);
639
640
641 /*
642 * hope for the best - clear if restrictions apply.
643 */
644 path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RESOLVED;
645
646 /*
647 * Validate any recursion constraints and over-ride the via
648 * adj if not met
649 */
650 if (path->fp_oper_flags & FIB_PATH_OPER_FLAG_RECURSIVE_LOOP)
651 {
652 path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
Neale Rannsda78f952017-05-24 09:15:43 -0700653 dpo_copy(&via_dpo, drop_dpo_get(path->fp_nh_proto));
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100654 }
655 else if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RESOLVE_HOST)
656 {
657 /*
658 * the via FIB must be a host route.
659 * note the via FIB just added will always be a host route
660 * since it is an RR source added host route. So what we need to
661 * check is whether the route has other sources. If it does then
662 * some other source has added it as a host route. If it doesn't
663 * then it was added only here and inherits forwarding from a cover.
664 * the cover is not a host route.
665 * The RR source is the lowest priority source, so we check if it
666 * is the best. if it is there are no other sources.
667 */
668 if (fib_entry_get_best_source(path->fp_via_fib) >= FIB_SOURCE_RR)
669 {
670 path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
Neale Rannsda78f952017-05-24 09:15:43 -0700671 dpo_copy(&via_dpo, drop_dpo_get(path->fp_nh_proto));
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100672
673 /*
674 * PIC edge trigger. let the load-balance maps know
675 */
676 load_balance_map_path_state_change(fib_path_get_index(path));
677 }
678 }
679 else if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RESOLVE_ATTACHED)
680 {
681 /*
682 * RR source entries inherit the flags from the cover, so
683 * we can check the via directly
684 */
685 if (!(FIB_ENTRY_FLAG_ATTACHED & fib_entry_get_flags(path->fp_via_fib)))
686 {
687 path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
Neale Rannsda78f952017-05-24 09:15:43 -0700688 dpo_copy(&via_dpo, drop_dpo_get(path->fp_nh_proto));
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100689
690 /*
691 * PIC edge trigger. let the load-balance maps know
692 */
693 load_balance_map_path_state_change(fib_path_get_index(path));
694 }
695 }
Neale Ranns88fc83e2017-04-05 08:11:14 -0700696 /*
697 * check for over-riding factors on the FIB entry itself
698 */
699 if (!fib_entry_is_resolved(path->fp_via_fib))
700 {
701 path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
Neale Rannsda78f952017-05-24 09:15:43 -0700702 dpo_copy(&via_dpo, drop_dpo_get(path->fp_nh_proto));
Neale Ranns88fc83e2017-04-05 08:11:14 -0700703
704 /*
705 * PIC edge trigger. let the load-balance maps know
706 */
707 load_balance_map_path_state_change(fib_path_get_index(path));
708 }
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100709
710 /*
Neale Ranns57b58602017-07-15 07:37:25 -0700711 * If this path is contributing a drop, then it's not resolved
712 */
713 if (dpo_is_drop(&via_dpo) || load_balance_is_drop(&via_dpo))
714 {
715 path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
716 }
717
718 /*
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100719 * update the path's contributed DPO
720 */
721 dpo_copy(dpo, &via_dpo);
722
Neale Rannsda78f952017-05-24 09:15:43 -0700723 FIB_PATH_DBG(path, "recursive update:");
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100724
725 dpo_reset(&via_dpo);
726}
727
728/*
729 * fib_path_is_permanent_drop
730 *
731 * Return !0 if the path is configured to permanently drop,
732 * despite other attributes.
733 */
734static int
735fib_path_is_permanent_drop (fib_path_t *path)
736{
737 return ((path->fp_cfg_flags & FIB_PATH_CFG_FLAG_DROP) ||
738 (path->fp_oper_flags & FIB_PATH_OPER_FLAG_DROP));
739}
740
741/*
742 * fib_path_unresolve
743 *
744 * Remove our dependency on the resolution target
745 */
746static void
747fib_path_unresolve (fib_path_t *path)
748{
749 /*
750 * the forced drop path does not need unresolving
751 */
752 if (fib_path_is_permanent_drop(path))
753 {
754 return;
755 }
756
757 switch (path->fp_type)
758 {
759 case FIB_PATH_TYPE_RECURSIVE:
760 if (FIB_NODE_INDEX_INVALID != path->fp_via_fib)
761 {
762 fib_prefix_t pfx;
763
Neale Rannsad422ed2016-11-02 14:20:04 +0000764 fib_entry_get_prefix(path->fp_via_fib, &pfx);
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100765 fib_entry_child_remove(path->fp_via_fib,
766 path->fp_sibling);
767 fib_table_entry_special_remove(path->recursive.fp_tbl_id,
768 &pfx,
769 FIB_SOURCE_RR);
770 path->fp_via_fib = FIB_NODE_INDEX_INVALID;
771 }
772 break;
773 case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
774 case FIB_PATH_TYPE_ATTACHED:
775 adj_child_remove(path->fp_dpo.dpoi_index,
776 path->fp_sibling);
777 adj_unlock(path->fp_dpo.dpoi_index);
778 break;
779 case FIB_PATH_TYPE_EXCLUSIVE:
780 dpo_reset(&path->exclusive.fp_ex_dpo);
781 break;
782 case FIB_PATH_TYPE_SPECIAL:
783 case FIB_PATH_TYPE_RECEIVE:
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800784 case FIB_PATH_TYPE_INTF_RX:
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100785 case FIB_PATH_TYPE_DEAG:
786 /*
787 * these hold only the path's DPO, which is reset below.
788 */
789 break;
790 }
791
792 /*
793 * release the adj we were holding and pick up the
794 * drop just in case.
795 */
796 dpo_reset(&path->fp_dpo);
797 path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
798
799 return;
800}
801
802static fib_forward_chain_type_t
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800803fib_path_to_chain_type (const fib_path_t *path)
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100804{
Neale Rannsda78f952017-05-24 09:15:43 -0700805 if (DPO_PROTO_MPLS == path->fp_nh_proto)
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100806 {
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800807 if (FIB_PATH_TYPE_RECURSIVE == path->fp_type &&
808 MPLS_EOS == path->recursive.fp_nh.fp_eos)
809 {
810 return (FIB_FORW_CHAIN_TYPE_MPLS_EOS);
811 }
812 else
813 {
Neale Ranns9f171f52017-04-11 08:56:53 -0700814 return (FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS);
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800815 }
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100816 }
Neale Rannsda78f952017-05-24 09:15:43 -0700817 else
818 {
819 return (fib_forw_chain_type_from_dpo_proto(path->fp_nh_proto));
820 }
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100821}
822
823/*
824 * fib_path_back_walk_notify
825 *
826 * A back walk has reach this path.
827 */
828static fib_node_back_walk_rc_t
829fib_path_back_walk_notify (fib_node_t *node,
830 fib_node_back_walk_ctx_t *ctx)
831{
832 fib_path_t *path;
833
834 path = fib_path_from_fib_node(node);
835
836 switch (path->fp_type)
837 {
838 case FIB_PATH_TYPE_RECURSIVE:
839 if (FIB_NODE_BW_REASON_FLAG_EVALUATE & ctx->fnbw_reason)
840 {
841 /*
842 * modify the recursive adjacency to use the new forwarding
843 * of the via-fib.
844 * this update is visible to packets in flight in the DP.
845 */
846 fib_path_recursive_adj_update(
847 path,
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800848 fib_path_to_chain_type(path),
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100849 &path->fp_dpo);
850 }
Neale Rannsad95b5d2016-11-10 20:35:14 +0000851 if ((FIB_NODE_BW_REASON_FLAG_ADJ_UPDATE & ctx->fnbw_reason) ||
852 (FIB_NODE_BW_REASON_FLAG_ADJ_DOWN & ctx->fnbw_reason))
Neale Rannsb80c5362016-10-08 13:03:40 +0100853 {
854 /*
855 * ADJ updates (complete<->incomplete) do not need to propagate to
856 * recursive entries.
857 * The only reason its needed as far back as here, is that the adj
858 * and the incomplete adj are a different DPO type, so the LBs need
859 * to re-stack.
860 * If this walk was quashed in the fib_entry, then any non-fib_path
861 * children (like tunnels that collapse out the LB when they stack)
862 * would not see the update.
863 */
864 return (FIB_NODE_BACK_WALK_CONTINUE);
865 }
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100866 break;
867 case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
868 /*
869FIXME comment
870 * ADJ_UPDATE backwalk pass silently through here and up to
871 * the path-list when the multipath adj collapse occurs.
872 * The reason we do this is that the assumtption is that VPP
873 * runs in an environment where the Control-Plane is remote
874 * and hence reacts slowly to link up down. In order to remove
875 * this down link from the ECMP set quickly, we back-walk.
876 * VPP also has dedicated CPUs, so we are not stealing resources
877 * from the CP to do so.
878 */
879 if (FIB_NODE_BW_REASON_FLAG_INTERFACE_UP & ctx->fnbw_reason)
880 {
Neale Ranns8b37b872016-11-21 12:25:22 +0000881 if (path->fp_oper_flags & FIB_PATH_OPER_FLAG_RESOLVED)
882 {
883 /*
884 * alreday resolved. no need to walk back again
885 */
886 return (FIB_NODE_BACK_WALK_CONTINUE);
887 }
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100888 path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RESOLVED;
889 }
890 if (FIB_NODE_BW_REASON_FLAG_INTERFACE_DOWN & ctx->fnbw_reason)
891 {
Neale Ranns8b37b872016-11-21 12:25:22 +0000892 if (!(path->fp_oper_flags & FIB_PATH_OPER_FLAG_RESOLVED))
893 {
894 /*
895 * alreday unresolved. no need to walk back again
896 */
897 return (FIB_NODE_BACK_WALK_CONTINUE);
898 }
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100899 path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
900 }
901 if (FIB_NODE_BW_REASON_FLAG_INTERFACE_DELETE & ctx->fnbw_reason)
902 {
903 /*
904 * The interface this path resolves through has been deleted.
905 * This will leave the path in a permanent drop state. The route
906 * needs to be removed and readded (and hence the path-list deleted)
907 * before it can forward again.
908 */
909 fib_path_unresolve(path);
910 path->fp_oper_flags |= FIB_PATH_OPER_FLAG_DROP;
911 }
912 if (FIB_NODE_BW_REASON_FLAG_ADJ_UPDATE & ctx->fnbw_reason)
913 {
914 /*
915 * restack the DPO to pick up the correct DPO sub-type
916 */
Neale Ranns8b37b872016-11-21 12:25:22 +0000917 uword if_is_up;
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100918 adj_index_t ai;
919
Neale Ranns8b37b872016-11-21 12:25:22 +0000920 if_is_up = vnet_sw_interface_is_admin_up(
921 vnet_get_main(),
922 path->attached_next_hop.fp_interface);
923
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100924 ai = fib_path_attached_next_hop_get_adj(
925 path,
Neale Rannsda78f952017-05-24 09:15:43 -0700926 dpo_proto_to_link(path->fp_nh_proto));
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100927
Neale Ranns88fc83e2017-04-05 08:11:14 -0700928 path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
929 if (if_is_up && adj_is_up(ai))
930 {
931 path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RESOLVED;
932 }
933
Neale Rannsda78f952017-05-24 09:15:43 -0700934 dpo_set(&path->fp_dpo, DPO_ADJACENCY, path->fp_nh_proto, ai);
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100935 adj_unlock(ai);
Neale Ranns8b37b872016-11-21 12:25:22 +0000936
937 if (!if_is_up)
938 {
939 /*
940 * If the interface is not up there is no reason to walk
941 * back to children. if we did they would only evalute
942 * that this path is unresolved and hence it would
943 * not contribute the adjacency - so it would be wasted
944 * CPU time.
945 */
946 return (FIB_NODE_BACK_WALK_CONTINUE);
947 }
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100948 }
Neale Rannsad95b5d2016-11-10 20:35:14 +0000949 if (FIB_NODE_BW_REASON_FLAG_ADJ_DOWN & ctx->fnbw_reason)
950 {
Neale Ranns8b37b872016-11-21 12:25:22 +0000951 if (!(path->fp_oper_flags & FIB_PATH_OPER_FLAG_RESOLVED))
952 {
953 /*
954 * alreday unresolved. no need to walk back again
955 */
956 return (FIB_NODE_BACK_WALK_CONTINUE);
957 }
Neale Rannsad95b5d2016-11-10 20:35:14 +0000958 /*
959 * the adj has gone down. the path is no longer resolved.
960 */
961 path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
962 }
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100963 break;
964 case FIB_PATH_TYPE_ATTACHED:
965 /*
966 * FIXME; this could schedule a lower priority walk, since attached
967 * routes are not usually in ECMP configurations so the backwalk to
968 * the FIB entry does not need to be high priority
969 */
970 if (FIB_NODE_BW_REASON_FLAG_INTERFACE_UP & ctx->fnbw_reason)
971 {
972 path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RESOLVED;
973 }
974 if (FIB_NODE_BW_REASON_FLAG_INTERFACE_DOWN & ctx->fnbw_reason)
975 {
976 path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
977 }
978 if (FIB_NODE_BW_REASON_FLAG_INTERFACE_DELETE & ctx->fnbw_reason)
979 {
980 fib_path_unresolve(path);
981 path->fp_oper_flags |= FIB_PATH_OPER_FLAG_DROP;
982 }
983 break;
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800984 case FIB_PATH_TYPE_INTF_RX:
985 ASSERT(0);
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100986 case FIB_PATH_TYPE_DEAG:
987 /*
988 * FIXME When VRF delete is allowed this will need a poke.
989 */
990 case FIB_PATH_TYPE_SPECIAL:
991 case FIB_PATH_TYPE_RECEIVE:
992 case FIB_PATH_TYPE_EXCLUSIVE:
993 /*
994 * these path types have no parents. so to be
995 * walked from one is unexpected.
996 */
997 ASSERT(0);
998 break;
999 }
1000
1001 /*
1002 * propagate the backwalk further to the path-list
1003 */
1004 fib_path_list_back_walk(path->fp_pl_index, ctx);
1005
1006 return (FIB_NODE_BACK_WALK_CONTINUE);
1007}
1008
Neale Ranns6c3ebcc2016-10-02 21:20:15 +01001009static void
1010fib_path_memory_show (void)
1011{
1012 fib_show_memory_usage("Path",
1013 pool_elts(fib_path_pool),
1014 pool_len(fib_path_pool),
1015 sizeof(fib_path_t));
1016}
1017
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001018/*
1019 * The FIB path's graph node virtual function table
1020 */
1021static const fib_node_vft_t fib_path_vft = {
1022 .fnv_get = fib_path_get_node,
1023 .fnv_last_lock = fib_path_last_lock_gone,
1024 .fnv_back_walk = fib_path_back_walk_notify,
Neale Ranns6c3ebcc2016-10-02 21:20:15 +01001025 .fnv_mem_show = fib_path_memory_show,
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001026};
1027
1028static fib_path_cfg_flags_t
1029fib_path_route_flags_to_cfg_flags (const fib_route_path_t *rpath)
1030{
Neale Ranns450cd302016-11-09 17:49:42 +00001031 fib_path_cfg_flags_t cfg_flags = FIB_PATH_CFG_FLAG_NONE;
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001032
1033 if (rpath->frp_flags & FIB_ROUTE_PATH_RESOLVE_VIA_HOST)
1034 cfg_flags |= FIB_PATH_CFG_FLAG_RESOLVE_HOST;
1035 if (rpath->frp_flags & FIB_ROUTE_PATH_RESOLVE_VIA_ATTACHED)
1036 cfg_flags |= FIB_PATH_CFG_FLAG_RESOLVE_ATTACHED;
Neale Ranns32e1c012016-11-22 17:07:28 +00001037 if (rpath->frp_flags & FIB_ROUTE_PATH_LOCAL)
1038 cfg_flags |= FIB_PATH_CFG_FLAG_LOCAL;
Neale Ranns4b919a52017-03-11 05:55:21 -08001039 if (rpath->frp_flags & FIB_ROUTE_PATH_ATTACHED)
1040 cfg_flags |= FIB_PATH_CFG_FLAG_ATTACHED;
Neale Ranns0f26c5a2017-03-01 15:12:11 -08001041 if (rpath->frp_flags & FIB_ROUTE_PATH_INTF_RX)
1042 cfg_flags |= FIB_PATH_CFG_FLAG_INTF_RX;
1043 if (rpath->frp_flags & FIB_ROUTE_PATH_RPF_ID)
1044 cfg_flags |= FIB_PATH_CFG_FLAG_RPF_ID;
1045 if (rpath->frp_flags & FIB_ROUTE_PATH_EXCLUSIVE)
1046 cfg_flags |= FIB_PATH_CFG_FLAG_EXCLUSIVE;
1047 if (rpath->frp_flags & FIB_ROUTE_PATH_DROP)
1048 cfg_flags |= FIB_PATH_CFG_FLAG_DROP;
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001049
1050 return (cfg_flags);
1051}
1052
1053/*
1054 * fib_path_create
1055 *
1056 * Create and initialise a new path object.
1057 * return the index of the path.
1058 */
1059fib_node_index_t
1060fib_path_create (fib_node_index_t pl_index,
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001061 const fib_route_path_t *rpath)
1062{
1063 fib_path_t *path;
1064
1065 pool_get(fib_path_pool, path);
1066 memset(path, 0, sizeof(*path));
1067
1068 fib_node_init(&path->fp_node,
1069 FIB_NODE_TYPE_PATH);
1070
1071 dpo_reset(&path->fp_dpo);
1072 path->fp_pl_index = pl_index;
Neale Ranns0f26c5a2017-03-01 15:12:11 -08001073 path->fp_nh_proto = rpath->frp_proto;
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001074 path->fp_via_fib = FIB_NODE_INDEX_INVALID;
1075 path->fp_weight = rpath->frp_weight;
Neale Ranns0bd36ea2016-11-16 11:47:44 +00001076 if (0 == path->fp_weight)
1077 {
1078 /*
1079 * a weight of 0 is a meaningless value. We could either reject it, and thus force
1080 * clients to always use 1, or we can accept it and fixup approrpiately.
1081 */
1082 path->fp_weight = 1;
1083 }
Neale Ranns57b58602017-07-15 07:37:25 -07001084 path->fp_preference = rpath->frp_preference;
Neale Ranns0f26c5a2017-03-01 15:12:11 -08001085 path->fp_cfg_flags = fib_path_route_flags_to_cfg_flags(rpath);
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001086
1087 /*
1088 * deduce the path's tpye from the parementers and save what is needed.
1089 */
Neale Ranns32e1c012016-11-22 17:07:28 +00001090 if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_LOCAL)
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001091 {
Neale Ranns32e1c012016-11-22 17:07:28 +00001092 path->fp_type = FIB_PATH_TYPE_RECEIVE;
1093 path->receive.fp_interface = rpath->frp_sw_if_index;
1094 path->receive.fp_addr = rpath->frp_addr;
1095 }
Neale Ranns0f26c5a2017-03-01 15:12:11 -08001096 else if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_INTF_RX)
1097 {
1098 path->fp_type = FIB_PATH_TYPE_INTF_RX;
1099 path->intf_rx.fp_interface = rpath->frp_sw_if_index;
1100 }
1101 else if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RPF_ID)
1102 {
1103 path->fp_type = FIB_PATH_TYPE_DEAG;
1104 path->deag.fp_tbl_id = rpath->frp_fib_index;
1105 path->deag.fp_rpf_id = rpath->frp_rpf_id;
1106 }
Neale Ranns32e1c012016-11-22 17:07:28 +00001107 else if (~0 != rpath->frp_sw_if_index)
1108 {
1109 if (ip46_address_is_zero(&rpath->frp_addr))
1110 {
1111 path->fp_type = FIB_PATH_TYPE_ATTACHED;
1112 path->attached.fp_interface = rpath->frp_sw_if_index;
1113 }
1114 else
1115 {
1116 path->fp_type = FIB_PATH_TYPE_ATTACHED_NEXT_HOP;
1117 path->attached_next_hop.fp_interface = rpath->frp_sw_if_index;
1118 path->attached_next_hop.fp_nh = rpath->frp_addr;
1119 }
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001120 }
1121 else
1122 {
1123 if (ip46_address_is_zero(&rpath->frp_addr))
1124 {
1125 if (~0 == rpath->frp_fib_index)
1126 {
1127 path->fp_type = FIB_PATH_TYPE_SPECIAL;
1128 }
1129 else
1130 {
1131 path->fp_type = FIB_PATH_TYPE_DEAG;
1132 path->deag.fp_tbl_id = rpath->frp_fib_index;
1133 }
1134 }
1135 else
1136 {
1137 path->fp_type = FIB_PATH_TYPE_RECURSIVE;
Neale Rannsda78f952017-05-24 09:15:43 -07001138 if (DPO_PROTO_MPLS == path->fp_nh_proto)
Neale Rannsad422ed2016-11-02 14:20:04 +00001139 {
1140 path->recursive.fp_nh.fp_local_label = rpath->frp_local_label;
Neale Ranns0f26c5a2017-03-01 15:12:11 -08001141 path->recursive.fp_nh.fp_eos = rpath->frp_eos;
Neale Rannsad422ed2016-11-02 14:20:04 +00001142 }
1143 else
1144 {
1145 path->recursive.fp_nh.fp_ip = rpath->frp_addr;
1146 }
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001147 path->recursive.fp_tbl_id = rpath->frp_fib_index;
1148 }
1149 }
1150
1151 FIB_PATH_DBG(path, "create");
1152
1153 return (fib_path_get_index(path));
1154}
1155
1156/*
1157 * fib_path_create_special
1158 *
1159 * Create and initialise a new path object.
1160 * return the index of the path.
1161 */
1162fib_node_index_t
1163fib_path_create_special (fib_node_index_t pl_index,
Neale Rannsda78f952017-05-24 09:15:43 -07001164 dpo_proto_t nh_proto,
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001165 fib_path_cfg_flags_t flags,
1166 const dpo_id_t *dpo)
1167{
1168 fib_path_t *path;
1169
1170 pool_get(fib_path_pool, path);
1171 memset(path, 0, sizeof(*path));
1172
1173 fib_node_init(&path->fp_node,
1174 FIB_NODE_TYPE_PATH);
1175 dpo_reset(&path->fp_dpo);
1176
1177 path->fp_pl_index = pl_index;
1178 path->fp_weight = 1;
Neale Ranns57b58602017-07-15 07:37:25 -07001179 path->fp_preference = 0;
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001180 path->fp_nh_proto = nh_proto;
1181 path->fp_via_fib = FIB_NODE_INDEX_INVALID;
1182 path->fp_cfg_flags = flags;
1183
1184 if (FIB_PATH_CFG_FLAG_DROP & flags)
1185 {
1186 path->fp_type = FIB_PATH_TYPE_SPECIAL;
1187 }
1188 else if (FIB_PATH_CFG_FLAG_LOCAL & flags)
1189 {
1190 path->fp_type = FIB_PATH_TYPE_RECEIVE;
1191 path->attached.fp_interface = FIB_NODE_INDEX_INVALID;
1192 }
1193 else
1194 {
1195 path->fp_type = FIB_PATH_TYPE_EXCLUSIVE;
1196 ASSERT(NULL != dpo);
1197 dpo_copy(&path->exclusive.fp_ex_dpo, dpo);
1198 }
1199
1200 return (fib_path_get_index(path));
1201}
1202
1203/*
1204 * fib_path_copy
1205 *
1206 * Copy a path. return index of new path.
1207 */
1208fib_node_index_t
1209fib_path_copy (fib_node_index_t path_index,
1210 fib_node_index_t path_list_index)
1211{
1212 fib_path_t *path, *orig_path;
1213
1214 pool_get(fib_path_pool, path);
1215
1216 orig_path = fib_path_get(path_index);
1217 ASSERT(NULL != orig_path);
1218
1219 memcpy(path, orig_path, sizeof(*path));
1220
1221 FIB_PATH_DBG(path, "create-copy:%d", path_index);
1222
1223 /*
1224 * reset the dynamic section
1225 */
1226 fib_node_init(&path->fp_node, FIB_NODE_TYPE_PATH);
1227 path->fp_oper_flags = FIB_PATH_OPER_FLAG_NONE;
1228 path->fp_pl_index = path_list_index;
1229 path->fp_via_fib = FIB_NODE_INDEX_INVALID;
1230 memset(&path->fp_dpo, 0, sizeof(path->fp_dpo));
1231 dpo_reset(&path->fp_dpo);
1232
1233 return (fib_path_get_index(path));
1234}
1235
1236/*
1237 * fib_path_destroy
1238 *
1239 * destroy a path that is no longer required
1240 */
1241void
1242fib_path_destroy (fib_node_index_t path_index)
1243{
1244 fib_path_t *path;
1245
1246 path = fib_path_get(path_index);
1247
1248 ASSERT(NULL != path);
1249 FIB_PATH_DBG(path, "destroy");
1250
1251 fib_path_unresolve(path);
1252
1253 fib_node_deinit(&path->fp_node);
1254 pool_put(fib_path_pool, path);
1255}
1256
1257/*
1258 * fib_path_destroy
1259 *
1260 * destroy a path that is no longer required
1261 */
1262uword
1263fib_path_hash (fib_node_index_t path_index)
1264{
1265 fib_path_t *path;
1266
1267 path = fib_path_get(path_index);
1268
1269 return (hash_memory(STRUCT_MARK_PTR(path, path_hash_start),
1270 (STRUCT_OFFSET_OF(fib_path_t, path_hash_end) -
1271 STRUCT_OFFSET_OF(fib_path_t, path_hash_start)),
1272 0));
1273}
1274
1275/*
1276 * fib_path_cmp_i
1277 *
1278 * Compare two paths for equivalence.
1279 */
1280static int
1281fib_path_cmp_i (const fib_path_t *path1,
1282 const fib_path_t *path2)
1283{
1284 int res;
1285
1286 res = 1;
1287
1288 /*
1289 * paths of different types and protocol are not equal.
Neale Ranns57b58602017-07-15 07:37:25 -07001290 * different weights and/or preference only are the same path.
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001291 */
1292 if (path1->fp_type != path2->fp_type)
1293 {
1294 res = (path1->fp_type - path2->fp_type);
1295 }
Neale Ranns32e1c012016-11-22 17:07:28 +00001296 else if (path1->fp_nh_proto != path2->fp_nh_proto)
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001297 {
1298 res = (path1->fp_nh_proto - path2->fp_nh_proto);
1299 }
1300 else
1301 {
1302 /*
1303 * both paths are of the same type.
1304 * consider each type and its attributes in turn.
1305 */
1306 switch (path1->fp_type)
1307 {
1308 case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
1309 res = ip46_address_cmp(&path1->attached_next_hop.fp_nh,
1310 &path2->attached_next_hop.fp_nh);
1311 if (0 == res) {
Neale Ranns0f26c5a2017-03-01 15:12:11 -08001312 res = (path1->attached_next_hop.fp_interface -
1313 path2->attached_next_hop.fp_interface);
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001314 }
1315 break;
1316 case FIB_PATH_TYPE_ATTACHED:
Neale Ranns0f26c5a2017-03-01 15:12:11 -08001317 res = (path1->attached.fp_interface -
1318 path2->attached.fp_interface);
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001319 break;
1320 case FIB_PATH_TYPE_RECURSIVE:
1321 res = ip46_address_cmp(&path1->recursive.fp_nh,
1322 &path2->recursive.fp_nh);
1323
1324 if (0 == res)
1325 {
1326 res = (path1->recursive.fp_tbl_id - path2->recursive.fp_tbl_id);
1327 }
1328 break;
1329 case FIB_PATH_TYPE_DEAG:
1330 res = (path1->deag.fp_tbl_id - path2->deag.fp_tbl_id);
Neale Ranns0f26c5a2017-03-01 15:12:11 -08001331 if (0 == res)
1332 {
1333 res = (path1->deag.fp_rpf_id - path2->deag.fp_rpf_id);
1334 }
1335 break;
1336 case FIB_PATH_TYPE_INTF_RX:
1337 res = (path1->intf_rx.fp_interface - path2->intf_rx.fp_interface);
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001338 break;
1339 case FIB_PATH_TYPE_SPECIAL:
1340 case FIB_PATH_TYPE_RECEIVE:
1341 case FIB_PATH_TYPE_EXCLUSIVE:
1342 res = 0;
1343 break;
1344 }
1345 }
1346 return (res);
1347}
1348
1349/*
1350 * fib_path_cmp_for_sort
1351 *
1352 * Compare two paths for equivalence. Used during path sorting.
1353 * As usual 0 means equal.
1354 */
1355int
1356fib_path_cmp_for_sort (void * v1,
1357 void * v2)
1358{
1359 fib_node_index_t *pi1 = v1, *pi2 = v2;
1360 fib_path_t *path1, *path2;
1361
1362 path1 = fib_path_get(*pi1);
1363 path2 = fib_path_get(*pi2);
1364
Neale Ranns57b58602017-07-15 07:37:25 -07001365 /*
1366 * when sorting paths we want the highest preference paths
1367 * first, so that the choices set built is in prefernce order
1368 */
1369 if (path1->fp_preference != path2->fp_preference)
1370 {
1371 return (path1->fp_preference - path2->fp_preference);
1372 }
1373
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001374 return (fib_path_cmp_i(path1, path2));
1375}
1376
1377/*
1378 * fib_path_cmp
1379 *
1380 * Compare two paths for equivalence.
1381 */
1382int
1383fib_path_cmp (fib_node_index_t pi1,
1384 fib_node_index_t pi2)
1385{
1386 fib_path_t *path1, *path2;
1387
1388 path1 = fib_path_get(pi1);
1389 path2 = fib_path_get(pi2);
1390
1391 return (fib_path_cmp_i(path1, path2));
1392}
1393
1394int
1395fib_path_cmp_w_route_path (fib_node_index_t path_index,
1396 const fib_route_path_t *rpath)
1397{
1398 fib_path_t *path;
1399 int res;
1400
1401 path = fib_path_get(path_index);
1402
1403 res = 1;
1404
1405 if (path->fp_weight != rpath->frp_weight)
1406 {
1407 res = (path->fp_weight - rpath->frp_weight);
1408 }
1409 else
1410 {
1411 /*
1412 * both paths are of the same type.
1413 * consider each type and its attributes in turn.
1414 */
1415 switch (path->fp_type)
1416 {
1417 case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
1418 res = ip46_address_cmp(&path->attached_next_hop.fp_nh,
1419 &rpath->frp_addr);
1420 if (0 == res)
1421 {
Neale Ranns0f26c5a2017-03-01 15:12:11 -08001422 res = (path->attached_next_hop.fp_interface -
1423 rpath->frp_sw_if_index);
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001424 }
1425 break;
1426 case FIB_PATH_TYPE_ATTACHED:
Neale Ranns0f26c5a2017-03-01 15:12:11 -08001427 res = (path->attached.fp_interface - rpath->frp_sw_if_index);
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001428 break;
1429 case FIB_PATH_TYPE_RECURSIVE:
Neale Rannsda78f952017-05-24 09:15:43 -07001430 if (DPO_PROTO_MPLS == path->fp_nh_proto)
Neale Rannsad422ed2016-11-02 14:20:04 +00001431 {
1432 res = path->recursive.fp_nh.fp_local_label - rpath->frp_local_label;
Neale Ranns0f26c5a2017-03-01 15:12:11 -08001433
1434 if (res == 0)
1435 {
1436 res = path->recursive.fp_nh.fp_eos - rpath->frp_eos;
1437 }
Neale Rannsad422ed2016-11-02 14:20:04 +00001438 }
1439 else
1440 {
1441 res = ip46_address_cmp(&path->recursive.fp_nh.fp_ip,
1442 &rpath->frp_addr);
1443 }
1444
1445 if (0 == res)
1446 {
1447 res = (path->recursive.fp_tbl_id - rpath->frp_fib_index);
1448 }
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001449 break;
Neale Ranns0f26c5a2017-03-01 15:12:11 -08001450 case FIB_PATH_TYPE_INTF_RX:
1451 res = (path->intf_rx.fp_interface - rpath->frp_sw_if_index);
1452 break;
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001453 case FIB_PATH_TYPE_DEAG:
1454 res = (path->deag.fp_tbl_id - rpath->frp_fib_index);
Neale Ranns0f26c5a2017-03-01 15:12:11 -08001455 if (0 == res)
1456 {
1457 res = (path->deag.fp_rpf_id - rpath->frp_rpf_id);
1458 }
1459 break;
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001460 case FIB_PATH_TYPE_SPECIAL:
1461 case FIB_PATH_TYPE_RECEIVE:
1462 case FIB_PATH_TYPE_EXCLUSIVE:
1463 res = 0;
1464 break;
1465 }
1466 }
1467 return (res);
1468}
1469
1470/*
1471 * fib_path_recursive_loop_detect
1472 *
1473 * A forward walk of the FIB object graph to detect for a cycle/loop. This
1474 * walk is initiated when an entry is linking to a new path list or from an old.
1475 * The entry vector passed contains all the FIB entrys that are children of this
1476 * path (it is all the entries encountered on the walk so far). If this vector
1477 * contains the entry this path resolve via, then a loop is about to form.
1478 * The loop must be allowed to form, since we need the dependencies in place
1479 * so that we can track when the loop breaks.
1480 * However, we MUST not produce a loop in the forwarding graph (else packets
1481 * would loop around the switch path until the loop breaks), so we mark recursive
1482 * paths as looped so that they do not contribute forwarding information.
1483 * By marking the path as looped, an etry such as;
1484 * X/Y
1485 * via a.a.a.a (looped)
1486 * via b.b.b.b (not looped)
1487 * can still forward using the info provided by b.b.b.b only
1488 */
1489int
1490fib_path_recursive_loop_detect (fib_node_index_t path_index,
1491 fib_node_index_t **entry_indicies)
1492{
1493 fib_path_t *path;
1494
1495 path = fib_path_get(path_index);
1496
1497 /*
1498 * the forced drop path is never looped, cos it is never resolved.
1499 */
1500 if (fib_path_is_permanent_drop(path))
1501 {
1502 return (0);
1503 }
1504
1505 switch (path->fp_type)
1506 {
1507 case FIB_PATH_TYPE_RECURSIVE:
1508 {
1509 fib_node_index_t *entry_index, *entries;
1510 int looped = 0;
1511 entries = *entry_indicies;
1512
1513 vec_foreach(entry_index, entries) {
1514 if (*entry_index == path->fp_via_fib)
1515 {
1516 /*
1517 * the entry that is about to link to this path-list (or
1518 * one of this path-list's children) is the same entry that
1519 * this recursive path resolves through. this is a cycle.
1520 * abort the walk.
1521 */
1522 looped = 1;
1523 break;
1524 }
1525 }
1526
1527 if (looped)
1528 {
1529 FIB_PATH_DBG(path, "recursive loop formed");
1530 path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RECURSIVE_LOOP;
1531
Neale Rannsda78f952017-05-24 09:15:43 -07001532 dpo_copy(&path->fp_dpo, drop_dpo_get(path->fp_nh_proto));
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001533 }
1534 else
1535 {
1536 /*
1537 * no loop here yet. keep forward walking the graph.
1538 */
1539 if (fib_entry_recursive_loop_detect(path->fp_via_fib, entry_indicies))
1540 {
1541 FIB_PATH_DBG(path, "recursive loop formed");
1542 path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RECURSIVE_LOOP;
1543 }
1544 else
1545 {
1546 FIB_PATH_DBG(path, "recursive loop cleared");
1547 path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RECURSIVE_LOOP;
1548 }
1549 }
1550 break;
1551 }
1552 case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
1553 case FIB_PATH_TYPE_ATTACHED:
1554 case FIB_PATH_TYPE_SPECIAL:
1555 case FIB_PATH_TYPE_DEAG:
1556 case FIB_PATH_TYPE_RECEIVE:
Neale Ranns0f26c5a2017-03-01 15:12:11 -08001557 case FIB_PATH_TYPE_INTF_RX:
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001558 case FIB_PATH_TYPE_EXCLUSIVE:
1559 /*
1560 * these path types cannot be part of a loop, since they are the leaves
1561 * of the graph.
1562 */
1563 break;
1564 }
1565
1566 return (fib_path_is_looped(path_index));
1567}
1568
1569int
1570fib_path_resolve (fib_node_index_t path_index)
1571{
1572 fib_path_t *path;
1573
1574 path = fib_path_get(path_index);
1575
1576 /*
1577 * hope for the best.
1578 */
1579 path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RESOLVED;
1580
1581 /*
1582 * the forced drop path resolves via the drop adj
1583 */
1584 if (fib_path_is_permanent_drop(path))
1585 {
Neale Rannsda78f952017-05-24 09:15:43 -07001586 dpo_copy(&path->fp_dpo, drop_dpo_get(path->fp_nh_proto));
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001587 path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
1588 return (fib_path_is_resolved(path_index));
1589 }
1590
1591 switch (path->fp_type)
1592 {
1593 case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
1594 fib_path_attached_next_hop_set(path);
1595 break;
1596 case FIB_PATH_TYPE_ATTACHED:
1597 /*
1598 * path->attached.fp_interface
1599 */
1600 if (!vnet_sw_interface_is_admin_up(vnet_get_main(),
1601 path->attached.fp_interface))
1602 {
1603 path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
1604 }
Neale Ranns8c4611b2017-05-23 03:43:47 -07001605 dpo_set(&path->fp_dpo,
1606 DPO_ADJACENCY,
Neale Rannsda78f952017-05-24 09:15:43 -07001607 path->fp_nh_proto,
Neale Ranns8c4611b2017-05-23 03:43:47 -07001608 fib_path_attached_get_adj(path,
Neale Rannsda78f952017-05-24 09:15:43 -07001609 dpo_proto_to_link(path->fp_nh_proto)));
Neale Ranns8c4611b2017-05-23 03:43:47 -07001610
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001611 /*
1612 * become a child of the adjacency so we receive updates
1613 * when the interface state changes
1614 */
1615 path->fp_sibling = adj_child_add(path->fp_dpo.dpoi_index,
1616 FIB_NODE_TYPE_PATH,
1617 fib_path_get_index(path));
1618
1619 break;
1620 case FIB_PATH_TYPE_RECURSIVE:
1621 {
1622 /*
1623 * Create a RR source entry in the table for the address
1624 * that this path recurses through.
1625 * This resolve action is recursive, hence we may create
1626 * more paths in the process. more creates mean maybe realloc
1627 * of this path.
1628 */
1629 fib_node_index_t fei;
1630 fib_prefix_t pfx;
1631
1632 ASSERT(FIB_NODE_INDEX_INVALID == path->fp_via_fib);
1633
Neale Rannsda78f952017-05-24 09:15:43 -07001634 if (DPO_PROTO_MPLS == path->fp_nh_proto)
Neale Rannsad422ed2016-11-02 14:20:04 +00001635 {
Neale Ranns0f26c5a2017-03-01 15:12:11 -08001636 fib_prefix_from_mpls_label(path->recursive.fp_nh.fp_local_label,
1637 path->recursive.fp_nh.fp_eos,
1638 &pfx);
Neale Rannsad422ed2016-11-02 14:20:04 +00001639 }
1640 else
1641 {
1642 fib_prefix_from_ip46_addr(&path->recursive.fp_nh.fp_ip, &pfx);
1643 }
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001644
1645 fei = fib_table_entry_special_add(path->recursive.fp_tbl_id,
1646 &pfx,
1647 FIB_SOURCE_RR,
Neale Rannsa0558302017-04-13 00:44:52 -07001648 FIB_ENTRY_FLAG_NONE);
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001649
1650 path = fib_path_get(path_index);
1651 path->fp_via_fib = fei;
1652
1653 /*
1654 * become a dependent child of the entry so the path is
1655 * informed when the forwarding for the entry changes.
1656 */
1657 path->fp_sibling = fib_entry_child_add(path->fp_via_fib,
1658 FIB_NODE_TYPE_PATH,
1659 fib_path_get_index(path));
1660
1661 /*
1662 * create and configure the IP DPO
1663 */
1664 fib_path_recursive_adj_update(
1665 path,
Neale Ranns0f26c5a2017-03-01 15:12:11 -08001666 fib_path_to_chain_type(path),
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001667 &path->fp_dpo);
1668
1669 break;
1670 }
1671 case FIB_PATH_TYPE_SPECIAL:
1672 /*
1673 * Resolve via the drop
1674 */
Neale Rannsda78f952017-05-24 09:15:43 -07001675 dpo_copy(&path->fp_dpo, drop_dpo_get(path->fp_nh_proto));
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001676 break;
1677 case FIB_PATH_TYPE_DEAG:
Neale Ranns0f26c5a2017-03-01 15:12:11 -08001678 {
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001679 /*
1680 * Resolve via a lookup DPO.
1681 * FIXME. control plane should add routes with a table ID
1682 */
Neale Ranns0f26c5a2017-03-01 15:12:11 -08001683 lookup_cast_t cast;
1684
1685 cast = (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RPF_ID ?
1686 LOOKUP_MULTICAST :
1687 LOOKUP_UNICAST);
1688
1689 lookup_dpo_add_or_lock_w_fib_index(path->deag.fp_tbl_id,
Neale Rannsda78f952017-05-24 09:15:43 -07001690 path->fp_nh_proto,
Neale Ranns0f26c5a2017-03-01 15:12:11 -08001691 cast,
1692 LOOKUP_INPUT_DST_ADDR,
1693 LOOKUP_TABLE_FROM_CONFIG,
1694 &path->fp_dpo);
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001695 break;
Neale Ranns0f26c5a2017-03-01 15:12:11 -08001696 }
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001697 case FIB_PATH_TYPE_RECEIVE:
1698 /*
1699 * Resolve via a receive DPO.
1700 */
Neale Rannsda78f952017-05-24 09:15:43 -07001701 receive_dpo_add_or_lock(path->fp_nh_proto,
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001702 path->receive.fp_interface,
1703 &path->receive.fp_addr,
1704 &path->fp_dpo);
1705 break;
Neale Ranns0f26c5a2017-03-01 15:12:11 -08001706 case FIB_PATH_TYPE_INTF_RX: {
1707 /*
1708 * Resolve via a receive DPO.
1709 */
Neale Ranns43161a82017-08-12 02:12:00 -07001710 interface_rx_dpo_add_or_lock(path->fp_nh_proto,
1711 path->intf_rx.fp_interface,
1712 &path->fp_dpo);
Neale Ranns0f26c5a2017-03-01 15:12:11 -08001713 break;
1714 }
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001715 case FIB_PATH_TYPE_EXCLUSIVE:
1716 /*
1717 * Resolve via the user provided DPO
1718 */
1719 dpo_copy(&path->fp_dpo, &path->exclusive.fp_ex_dpo);
1720 break;
1721 }
1722
1723 return (fib_path_is_resolved(path_index));
1724}
1725
1726u32
1727fib_path_get_resolving_interface (fib_node_index_t path_index)
1728{
1729 fib_path_t *path;
1730
1731 path = fib_path_get(path_index);
1732
1733 switch (path->fp_type)
1734 {
1735 case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
1736 return (path->attached_next_hop.fp_interface);
1737 case FIB_PATH_TYPE_ATTACHED:
1738 return (path->attached.fp_interface);
1739 case FIB_PATH_TYPE_RECEIVE:
1740 return (path->receive.fp_interface);
1741 case FIB_PATH_TYPE_RECURSIVE:
Neale Ranns08b16482017-05-13 05:52:58 -07001742 if (fib_path_is_resolved(path_index))
1743 {
1744 return (fib_entry_get_resolving_interface(path->fp_via_fib));
1745 }
1746 break;
Neale Ranns0f26c5a2017-03-01 15:12:11 -08001747 case FIB_PATH_TYPE_INTF_RX:
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001748 case FIB_PATH_TYPE_SPECIAL:
1749 case FIB_PATH_TYPE_DEAG:
1750 case FIB_PATH_TYPE_EXCLUSIVE:
1751 break;
1752 }
1753 return (~0);
1754}
1755
1756adj_index_t
1757fib_path_get_adj (fib_node_index_t path_index)
1758{
1759 fib_path_t *path;
1760
1761 path = fib_path_get(path_index);
1762
1763 ASSERT(dpo_is_adj(&path->fp_dpo));
1764 if (dpo_is_adj(&path->fp_dpo))
1765 {
1766 return (path->fp_dpo.dpoi_index);
1767 }
1768 return (ADJ_INDEX_INVALID);
1769}
1770
Neale Ranns57b58602017-07-15 07:37:25 -07001771u16
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001772fib_path_get_weight (fib_node_index_t path_index)
1773{
1774 fib_path_t *path;
1775
1776 path = fib_path_get(path_index);
1777
1778 ASSERT(path);
1779
1780 return (path->fp_weight);
1781}
1782
Neale Ranns57b58602017-07-15 07:37:25 -07001783u16
1784fib_path_get_preference (fib_node_index_t path_index)
1785{
1786 fib_path_t *path;
1787
1788 path = fib_path_get(path_index);
1789
1790 ASSERT(path);
1791
1792 return (path->fp_preference);
1793}
1794
Neale Ranns3ee44042016-10-03 13:05:48 +01001795/**
1796 * @brief Contribute the path's adjacency to the list passed.
1797 * By calling this function over all paths, recursively, a child
1798 * can construct its full set of forwarding adjacencies, and hence its
1799 * uRPF list.
1800 */
1801void
1802fib_path_contribute_urpf (fib_node_index_t path_index,
1803 index_t urpf)
1804{
1805 fib_path_t *path;
1806
Neale Ranns3ee44042016-10-03 13:05:48 +01001807 path = fib_path_get(path_index);
1808
Neale Ranns88fc83e2017-04-05 08:11:14 -07001809 /*
1810 * resolved and unresolved paths contribute to the RPF list.
1811 */
Neale Ranns3ee44042016-10-03 13:05:48 +01001812 switch (path->fp_type)
1813 {
1814 case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
1815 fib_urpf_list_append(urpf, path->attached_next_hop.fp_interface);
1816 break;
1817
1818 case FIB_PATH_TYPE_ATTACHED:
1819 fib_urpf_list_append(urpf, path->attached.fp_interface);
1820 break;
1821
1822 case FIB_PATH_TYPE_RECURSIVE:
Neale Ranns08b16482017-05-13 05:52:58 -07001823 if (FIB_NODE_INDEX_INVALID != path->fp_via_fib &&
1824 !fib_path_is_looped(path_index))
Neale Ranns88fc83e2017-04-05 08:11:14 -07001825 {
1826 /*
1827 * there's unresolved due to constraints, and there's unresolved
Neale Ranns08b16482017-05-13 05:52:58 -07001828 * due to ain't got no via. can't do nowt w'out via.
Neale Ranns88fc83e2017-04-05 08:11:14 -07001829 */
1830 fib_entry_contribute_urpf(path->fp_via_fib, urpf);
1831 }
Neale Ranns3ee44042016-10-03 13:05:48 +01001832 break;
1833
1834 case FIB_PATH_TYPE_EXCLUSIVE:
1835 case FIB_PATH_TYPE_SPECIAL:
1836 /*
1837 * these path types may link to an adj, if that's what
1838 * the clinet gave
1839 */
1840 if (dpo_is_adj(&path->fp_dpo))
1841 {
1842 ip_adjacency_t *adj;
1843
1844 adj = adj_get(path->fp_dpo.dpoi_index);
1845
1846 fib_urpf_list_append(urpf, adj->rewrite_header.sw_if_index);
1847 }
1848 break;
1849
1850 case FIB_PATH_TYPE_DEAG:
1851 case FIB_PATH_TYPE_RECEIVE:
Neale Ranns0f26c5a2017-03-01 15:12:11 -08001852 case FIB_PATH_TYPE_INTF_RX:
Neale Ranns3ee44042016-10-03 13:05:48 +01001853 /*
1854 * these path types don't link to an adj
1855 */
1856 break;
1857 }
1858}
1859
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001860void
Neale Ranns0f26c5a2017-03-01 15:12:11 -08001861fib_path_stack_mpls_disp (fib_node_index_t path_index,
1862 dpo_proto_t payload_proto,
1863 dpo_id_t *dpo)
1864{
1865 fib_path_t *path;
1866
1867 path = fib_path_get(path_index);
1868
1869 ASSERT(path);
1870
1871 switch (path->fp_type)
1872 {
1873 case FIB_PATH_TYPE_DEAG:
1874 {
1875 dpo_id_t tmp = DPO_INVALID;
1876
1877 dpo_copy(&tmp, dpo);
1878 dpo_set(dpo,
1879 DPO_MPLS_DISPOSITION,
1880 payload_proto,
1881 mpls_disp_dpo_create(payload_proto,
1882 path->deag.fp_rpf_id,
1883 &tmp));
1884 dpo_reset(&tmp);
1885 break;
1886 }
1887 case FIB_PATH_TYPE_RECEIVE:
1888 case FIB_PATH_TYPE_ATTACHED:
1889 case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
1890 case FIB_PATH_TYPE_RECURSIVE:
1891 case FIB_PATH_TYPE_INTF_RX:
1892 case FIB_PATH_TYPE_EXCLUSIVE:
1893 case FIB_PATH_TYPE_SPECIAL:
1894 break;
1895 }
1896}
1897
1898void
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001899fib_path_contribute_forwarding (fib_node_index_t path_index,
1900 fib_forward_chain_type_t fct,
1901 dpo_id_t *dpo)
1902{
1903 fib_path_t *path;
1904
1905 path = fib_path_get(path_index);
1906
1907 ASSERT(path);
1908 ASSERT(FIB_FORW_CHAIN_TYPE_MPLS_EOS != fct);
1909
1910 FIB_PATH_DBG(path, "contribute");
1911
1912 /*
1913 * The DPO stored in the path was created when the path was resolved.
1914 * This then represents the path's 'native' protocol; IP.
1915 * For all others will need to go find something else.
1916 */
Neale Ranns0f26c5a2017-03-01 15:12:11 -08001917 if (fib_path_to_chain_type(path) == fct)
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001918 {
1919 dpo_copy(dpo, &path->fp_dpo);
1920 }
Neale Ranns5e575b12016-10-03 09:40:25 +01001921 else
1922 {
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001923 switch (path->fp_type)
1924 {
1925 case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
1926 switch (fct)
1927 {
1928 case FIB_FORW_CHAIN_TYPE_UNICAST_IP4:
1929 case FIB_FORW_CHAIN_TYPE_UNICAST_IP6:
1930 case FIB_FORW_CHAIN_TYPE_MPLS_EOS:
1931 case FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS:
Neale Ranns5e575b12016-10-03 09:40:25 +01001932 case FIB_FORW_CHAIN_TYPE_ETHERNET:
Florin Corasce1b4c72017-01-26 14:25:34 -08001933 case FIB_FORW_CHAIN_TYPE_NSH:
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001934 {
1935 adj_index_t ai;
1936
1937 /*
Neale Rannsad422ed2016-11-02 14:20:04 +00001938 * get a appropriate link type adj.
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001939 */
1940 ai = fib_path_attached_next_hop_get_adj(
1941 path,
1942 fib_forw_chain_type_to_link_type(fct));
1943 dpo_set(dpo, DPO_ADJACENCY,
1944 fib_forw_chain_type_to_dpo_proto(fct), ai);
1945 adj_unlock(ai);
1946
1947 break;
1948 }
Neale Ranns32e1c012016-11-22 17:07:28 +00001949 case FIB_FORW_CHAIN_TYPE_MCAST_IP4:
1950 case FIB_FORW_CHAIN_TYPE_MCAST_IP6:
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001951 break;
Neale Ranns32e1c012016-11-22 17:07:28 +00001952 }
1953 break;
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001954 case FIB_PATH_TYPE_RECURSIVE:
1955 switch (fct)
1956 {
1957 case FIB_FORW_CHAIN_TYPE_MPLS_EOS:
1958 case FIB_FORW_CHAIN_TYPE_UNICAST_IP4:
1959 case FIB_FORW_CHAIN_TYPE_UNICAST_IP6:
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001960 case FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS:
Neale Ranns32e1c012016-11-22 17:07:28 +00001961 case FIB_FORW_CHAIN_TYPE_MCAST_IP4:
1962 case FIB_FORW_CHAIN_TYPE_MCAST_IP6:
Neale Ranns0f26c5a2017-03-01 15:12:11 -08001963 fib_path_recursive_adj_update(path, fct, dpo);
1964 break;
Neale Ranns5e575b12016-10-03 09:40:25 +01001965 case FIB_FORW_CHAIN_TYPE_ETHERNET:
Florin Corasce1b4c72017-01-26 14:25:34 -08001966 case FIB_FORW_CHAIN_TYPE_NSH:
Neale Ranns5e575b12016-10-03 09:40:25 +01001967 ASSERT(0);
1968 break;
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001969 }
1970 break;
1971 case FIB_PATH_TYPE_DEAG:
Neale Ranns32e1c012016-11-22 17:07:28 +00001972 switch (fct)
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001973 {
1974 case FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS:
1975 lookup_dpo_add_or_lock_w_table_id(MPLS_FIB_DEFAULT_TABLE_ID,
1976 DPO_PROTO_MPLS,
Neale Ranns0f26c5a2017-03-01 15:12:11 -08001977 LOOKUP_UNICAST,
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001978 LOOKUP_INPUT_DST_ADDR,
1979 LOOKUP_TABLE_FROM_CONFIG,
1980 dpo);
1981 break;
Neale Ranns0f26c5a2017-03-01 15:12:11 -08001982 case FIB_FORW_CHAIN_TYPE_MPLS_EOS:
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001983 case FIB_FORW_CHAIN_TYPE_UNICAST_IP4:
1984 case FIB_FORW_CHAIN_TYPE_UNICAST_IP6:
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001985 dpo_copy(dpo, &path->fp_dpo);
Neale Ranns32e1c012016-11-22 17:07:28 +00001986 break;
1987 case FIB_FORW_CHAIN_TYPE_MCAST_IP4:
1988 case FIB_FORW_CHAIN_TYPE_MCAST_IP6:
Neale Ranns5e575b12016-10-03 09:40:25 +01001989 case FIB_FORW_CHAIN_TYPE_ETHERNET:
Florin Corasce1b4c72017-01-26 14:25:34 -08001990 case FIB_FORW_CHAIN_TYPE_NSH:
Neale Ranns5e575b12016-10-03 09:40:25 +01001991 ASSERT(0);
1992 break;
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001993 }
1994 break;
1995 case FIB_PATH_TYPE_EXCLUSIVE:
1996 dpo_copy(dpo, &path->exclusive.fp_ex_dpo);
1997 break;
1998 case FIB_PATH_TYPE_ATTACHED:
Neale Ranns32e1c012016-11-22 17:07:28 +00001999 switch (fct)
2000 {
2001 case FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS:
2002 case FIB_FORW_CHAIN_TYPE_UNICAST_IP4:
2003 case FIB_FORW_CHAIN_TYPE_UNICAST_IP6:
2004 case FIB_FORW_CHAIN_TYPE_MPLS_EOS:
2005 case FIB_FORW_CHAIN_TYPE_ETHERNET:
Florin Corasce1b4c72017-01-26 14:25:34 -08002006 case FIB_FORW_CHAIN_TYPE_NSH:
Neale Ranns8c4611b2017-05-23 03:43:47 -07002007 {
2008 adj_index_t ai;
2009
2010 /*
2011 * get a appropriate link type adj.
2012 */
2013 ai = fib_path_attached_get_adj(
2014 path,
2015 fib_forw_chain_type_to_link_type(fct));
2016 dpo_set(dpo, DPO_ADJACENCY,
2017 fib_forw_chain_type_to_dpo_proto(fct), ai);
2018 adj_unlock(ai);
2019 break;
2020 }
Neale Ranns32e1c012016-11-22 17:07:28 +00002021 case FIB_FORW_CHAIN_TYPE_MCAST_IP4:
2022 case FIB_FORW_CHAIN_TYPE_MCAST_IP6:
2023 {
2024 adj_index_t ai;
2025
2026 /*
2027 * Create the adj needed for sending IP multicast traffic
2028 */
Neale Rannsda78f952017-05-24 09:15:43 -07002029 ai = adj_mcast_add_or_lock(dpo_proto_to_fib(path->fp_nh_proto),
Neale Ranns32e1c012016-11-22 17:07:28 +00002030 fib_forw_chain_type_to_link_type(fct),
2031 path->attached.fp_interface);
Neale Ranns0f26c5a2017-03-01 15:12:11 -08002032 dpo_set(dpo, DPO_ADJACENCY,
Neale Ranns32e1c012016-11-22 17:07:28 +00002033 fib_forw_chain_type_to_dpo_proto(fct),
2034 ai);
2035 adj_unlock(ai);
2036 }
2037 break;
2038 }
2039 break;
Neale Ranns0f26c5a2017-03-01 15:12:11 -08002040 case FIB_PATH_TYPE_INTF_RX:
2041 /*
2042 * Create the adj needed for sending IP multicast traffic
2043 */
Neale Ranns43161a82017-08-12 02:12:00 -07002044 interface_rx_dpo_add_or_lock(fib_forw_chain_type_to_dpo_proto(fct),
2045 path->attached.fp_interface,
2046 dpo);
Neale Ranns0f26c5a2017-03-01 15:12:11 -08002047 break;
Neale Ranns32e1c012016-11-22 17:07:28 +00002048 case FIB_PATH_TYPE_RECEIVE:
2049 case FIB_PATH_TYPE_SPECIAL:
2050 dpo_copy(dpo, &path->fp_dpo);
Neale Ranns0bfe5d82016-08-25 15:29:12 +01002051 break;
2052 }
Neale Ranns0bfe5d82016-08-25 15:29:12 +01002053 }
2054}
2055
2056load_balance_path_t *
2057fib_path_append_nh_for_multipath_hash (fib_node_index_t path_index,
2058 fib_forward_chain_type_t fct,
2059 load_balance_path_t *hash_key)
2060{
2061 load_balance_path_t *mnh;
2062 fib_path_t *path;
2063
2064 path = fib_path_get(path_index);
2065
2066 ASSERT(path);
2067
2068 if (fib_path_is_resolved(path_index))
2069 {
2070 vec_add2(hash_key, mnh, 1);
2071
2072 mnh->path_weight = path->fp_weight;
2073 mnh->path_index = path_index;
Neale Ranns5e575b12016-10-03 09:40:25 +01002074 fib_path_contribute_forwarding(path_index, fct, &mnh->path_dpo);
Neale Ranns0bfe5d82016-08-25 15:29:12 +01002075 }
2076
2077 return (hash_key);
2078}
2079
2080int
Neale Rannsf12a83f2017-04-18 09:09:40 -07002081fib_path_is_recursive_constrained (fib_node_index_t path_index)
Neale Ranns0bfe5d82016-08-25 15:29:12 +01002082{
2083 fib_path_t *path;
2084
2085 path = fib_path_get(path_index);
2086
Neale Rannsf12a83f2017-04-18 09:09:40 -07002087 return ((FIB_PATH_TYPE_RECURSIVE == path->fp_type) &&
2088 ((path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RESOLVE_ATTACHED) ||
2089 (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RESOLVE_HOST)));
Neale Ranns0bfe5d82016-08-25 15:29:12 +01002090}
2091
2092int
2093fib_path_is_exclusive (fib_node_index_t path_index)
2094{
2095 fib_path_t *path;
2096
2097 path = fib_path_get(path_index);
2098
2099 return (FIB_PATH_TYPE_EXCLUSIVE == path->fp_type);
2100}
2101
2102int
2103fib_path_is_deag (fib_node_index_t path_index)
2104{
2105 fib_path_t *path;
2106
2107 path = fib_path_get(path_index);
2108
2109 return (FIB_PATH_TYPE_DEAG == path->fp_type);
2110}
2111
2112int
2113fib_path_is_resolved (fib_node_index_t path_index)
2114{
2115 fib_path_t *path;
2116
2117 path = fib_path_get(path_index);
2118
2119 return (dpo_id_is_valid(&path->fp_dpo) &&
2120 (path->fp_oper_flags & FIB_PATH_OPER_FLAG_RESOLVED) &&
2121 !fib_path_is_looped(path_index) &&
2122 !fib_path_is_permanent_drop(path));
2123}
2124
2125int
2126fib_path_is_looped (fib_node_index_t path_index)
2127{
2128 fib_path_t *path;
2129
2130 path = fib_path_get(path_index);
2131
2132 return (path->fp_oper_flags & FIB_PATH_OPER_FLAG_RECURSIVE_LOOP);
2133}
2134
Neale Ranns81424992017-05-18 03:03:22 -07002135fib_path_list_walk_rc_t
Steven01b07122016-11-02 10:40:09 -07002136fib_path_encode (fib_node_index_t path_list_index,
2137 fib_node_index_t path_index,
2138 void *ctx)
2139{
2140 fib_route_path_encode_t **api_rpaths = ctx;
2141 fib_route_path_encode_t *api_rpath;
2142 fib_path_t *path;
2143
2144 path = fib_path_get(path_index);
2145 if (!path)
Neale Ranns81424992017-05-18 03:03:22 -07002146 return (FIB_PATH_LIST_WALK_CONTINUE);
Steven01b07122016-11-02 10:40:09 -07002147 vec_add2(*api_rpaths, api_rpath, 1);
2148 api_rpath->rpath.frp_weight = path->fp_weight;
Neale Ranns57b58602017-07-15 07:37:25 -07002149 api_rpath->rpath.frp_preference = path->fp_preference;
Steven01b07122016-11-02 10:40:09 -07002150 api_rpath->rpath.frp_proto = path->fp_nh_proto;
2151 api_rpath->rpath.frp_sw_if_index = ~0;
2152 api_rpath->dpo = path->exclusive.fp_ex_dpo;
2153 switch (path->fp_type)
2154 {
2155 case FIB_PATH_TYPE_RECEIVE:
2156 api_rpath->rpath.frp_addr = path->receive.fp_addr;
2157 api_rpath->rpath.frp_sw_if_index = path->receive.fp_interface;
Florin Coras91d341c2017-07-29 11:50:31 -07002158 api_rpath->dpo = path->fp_dpo;
Steven01b07122016-11-02 10:40:09 -07002159 break;
2160 case FIB_PATH_TYPE_ATTACHED:
2161 api_rpath->rpath.frp_sw_if_index = path->attached.fp_interface;
Florin Coras91d341c2017-07-29 11:50:31 -07002162 api_rpath->dpo = path->fp_dpo;
Steven01b07122016-11-02 10:40:09 -07002163 break;
2164 case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
2165 api_rpath->rpath.frp_sw_if_index = path->attached_next_hop.fp_interface;
2166 api_rpath->rpath.frp_addr = path->attached_next_hop.fp_nh;
2167 break;
2168 case FIB_PATH_TYPE_SPECIAL:
2169 break;
2170 case FIB_PATH_TYPE_DEAG:
Neale Ranns7b7ba572017-10-01 12:08:10 -07002171 api_rpath->rpath.frp_fib_index = path->deag.fp_tbl_id;
2172 api_rpath->dpo = path->fp_dpo;
Steven01b07122016-11-02 10:40:09 -07002173 break;
2174 case FIB_PATH_TYPE_RECURSIVE:
Neale Rannsad422ed2016-11-02 14:20:04 +00002175 api_rpath->rpath.frp_addr = path->recursive.fp_nh.fp_ip;
Steven01b07122016-11-02 10:40:09 -07002176 break;
2177 default:
2178 break;
2179 }
Neale Ranns81424992017-05-18 03:03:22 -07002180 return (FIB_PATH_LIST_WALK_CONTINUE);
Steven01b07122016-11-02 10:40:09 -07002181}
2182
Neale Rannsda78f952017-05-24 09:15:43 -07002183dpo_proto_t
Neale Rannsad422ed2016-11-02 14:20:04 +00002184fib_path_get_proto (fib_node_index_t path_index)
2185{
2186 fib_path_t *path;
2187
2188 path = fib_path_get(path_index);
2189
2190 return (path->fp_nh_proto);
2191}
2192
Neale Ranns0bfe5d82016-08-25 15:29:12 +01002193void
2194fib_path_module_init (void)
2195{
2196 fib_node_register_type (FIB_NODE_TYPE_PATH, &fib_path_vft);
2197}
2198
2199static clib_error_t *
2200show_fib_path_command (vlib_main_t * vm,
2201 unformat_input_t * input,
2202 vlib_cli_command_t * cmd)
2203{
Neale Ranns33a7dd52016-10-07 15:14:33 +01002204 fib_node_index_t pi;
Neale Ranns0bfe5d82016-08-25 15:29:12 +01002205 fib_path_t *path;
2206
Neale Ranns33a7dd52016-10-07 15:14:33 +01002207 if (unformat (input, "%d", &pi))
2208 {
2209 /*
2210 * show one in detail
2211 */
2212 if (!pool_is_free_index(fib_path_pool, pi))
2213 {
2214 path = fib_path_get(pi);
2215 u8 *s = fib_path_format(pi, NULL);
2216 s = format(s, "children:");
2217 s = fib_node_children_format(path->fp_node.fn_children, s);
2218 vlib_cli_output (vm, "%s", s);
2219 vec_free(s);
2220 }
2221 else
2222 {
2223 vlib_cli_output (vm, "path %d invalid", pi);
2224 }
2225 }
2226 else
2227 {
2228 vlib_cli_output (vm, "FIB Paths");
2229 pool_foreach(path, fib_path_pool,
2230 ({
2231 vlib_cli_output (vm, "%U", format_fib_path, path);
2232 }));
2233 }
Neale Ranns0bfe5d82016-08-25 15:29:12 +01002234
2235 return (NULL);
2236}
2237
2238VLIB_CLI_COMMAND (show_fib_path, static) = {
2239 .path = "show fib paths",
2240 .function = show_fib_path_command,
2241 .short_help = "show fib paths",
2242};