blob: 889d17def9cd4b5e755fb2591b263148df941b39 [file] [log] [blame]
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001/*
2 * Copyright (c) 2016 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16#include <vlib/vlib.h>
17#include <vnet/vnet.h>
18#include <vnet/ip/format.h>
19#include <vnet/ip/ip.h>
20#include <vnet/dpo/drop_dpo.h>
21#include <vnet/dpo/receive_dpo.h>
22#include <vnet/dpo/load_balance_map.h>
23#include <vnet/dpo/lookup_dpo.h>
Neale Ranns43161a82017-08-12 02:12:00 -070024#include <vnet/dpo/interface_rx_dpo.h>
Neale Ranns0f26c5a2017-03-01 15:12:11 -080025#include <vnet/dpo/mpls_disposition.h>
Neale Ranns6f631152017-10-03 08:20:21 -070026#include <vnet/dpo/l2_bridge_dpo.h>
Neale Ranns0bfe5d82016-08-25 15:29:12 +010027
28#include <vnet/adj/adj.h>
Neale Ranns32e1c012016-11-22 17:07:28 +000029#include <vnet/adj/adj_mcast.h>
Neale Ranns0bfe5d82016-08-25 15:29:12 +010030
Neale Ranns3ee44042016-10-03 13:05:48 +010031#include <vnet/fib/fib_path.h>
32#include <vnet/fib/fib_node.h>
33#include <vnet/fib/fib_table.h>
34#include <vnet/fib/fib_entry.h>
35#include <vnet/fib/fib_path_list.h>
36#include <vnet/fib/fib_internal.h>
37#include <vnet/fib/fib_urpf_list.h>
Neale Rannsa3af3372017-03-28 03:49:52 -070038#include <vnet/fib/mpls_fib.h>
Neale Ranns0bfe5d82016-08-25 15:29:12 +010039
40/**
41 * Enurmeration of path types
42 */
43typedef enum fib_path_type_t_ {
44 /**
45 * Marker. Add new types after this one.
46 */
47 FIB_PATH_TYPE_FIRST = 0,
48 /**
49 * Attached-nexthop. An interface and a nexthop are known.
50 */
51 FIB_PATH_TYPE_ATTACHED_NEXT_HOP = FIB_PATH_TYPE_FIRST,
52 /**
53 * attached. Only the interface is known.
54 */
55 FIB_PATH_TYPE_ATTACHED,
56 /**
57 * recursive. Only the next-hop is known.
58 */
59 FIB_PATH_TYPE_RECURSIVE,
60 /**
61 * special. nothing is known. so we drop.
62 */
63 FIB_PATH_TYPE_SPECIAL,
64 /**
65 * exclusive. user provided adj.
66 */
67 FIB_PATH_TYPE_EXCLUSIVE,
68 /**
69 * deag. Link to a lookup adj in the next table
70 */
71 FIB_PATH_TYPE_DEAG,
72 /**
Neale Ranns0f26c5a2017-03-01 15:12:11 -080073 * interface receive.
74 */
75 FIB_PATH_TYPE_INTF_RX,
76 /**
Neale Ranns0bfe5d82016-08-25 15:29:12 +010077 * receive. it's for-us.
78 */
79 FIB_PATH_TYPE_RECEIVE,
80 /**
81 * Marker. Add new types before this one, then update it.
82 */
83 FIB_PATH_TYPE_LAST = FIB_PATH_TYPE_RECEIVE,
84} __attribute__ ((packed)) fib_path_type_t;
85
86/**
87 * The maximum number of path_types
88 */
89#define FIB_PATH_TYPE_MAX (FIB_PATH_TYPE_LAST + 1)
90
91#define FIB_PATH_TYPES { \
92 [FIB_PATH_TYPE_ATTACHED_NEXT_HOP] = "attached-nexthop", \
93 [FIB_PATH_TYPE_ATTACHED] = "attached", \
94 [FIB_PATH_TYPE_RECURSIVE] = "recursive", \
95 [FIB_PATH_TYPE_SPECIAL] = "special", \
96 [FIB_PATH_TYPE_EXCLUSIVE] = "exclusive", \
97 [FIB_PATH_TYPE_DEAG] = "deag", \
Neale Ranns0f26c5a2017-03-01 15:12:11 -080098 [FIB_PATH_TYPE_INTF_RX] = "intf-rx", \
Neale Ranns0bfe5d82016-08-25 15:29:12 +010099 [FIB_PATH_TYPE_RECEIVE] = "receive", \
100}
101
102#define FOR_EACH_FIB_PATH_TYPE(_item) \
103 for (_item = FIB_PATH_TYPE_FIRST; _item <= FIB_PATH_TYPE_LAST; _item++)
104
105/**
106 * Enurmeration of path operational (i.e. derived) attributes
107 */
108typedef enum fib_path_oper_attribute_t_ {
109 /**
110 * Marker. Add new types after this one.
111 */
112 FIB_PATH_OPER_ATTRIBUTE_FIRST = 0,
113 /**
114 * The path forms part of a recursive loop.
115 */
116 FIB_PATH_OPER_ATTRIBUTE_RECURSIVE_LOOP = FIB_PATH_OPER_ATTRIBUTE_FIRST,
117 /**
118 * The path is resolved
119 */
120 FIB_PATH_OPER_ATTRIBUTE_RESOLVED,
121 /**
Neale Ranns4b919a52017-03-11 05:55:21 -0800122 * The path is attached, despite what the next-hop may say.
123 */
124 FIB_PATH_OPER_ATTRIBUTE_ATTACHED,
125 /**
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100126 * The path has become a permanent drop.
127 */
128 FIB_PATH_OPER_ATTRIBUTE_DROP,
129 /**
130 * Marker. Add new types before this one, then update it.
131 */
132 FIB_PATH_OPER_ATTRIBUTE_LAST = FIB_PATH_OPER_ATTRIBUTE_DROP,
133} __attribute__ ((packed)) fib_path_oper_attribute_t;
134
135/**
136 * The maximum number of path operational attributes
137 */
138#define FIB_PATH_OPER_ATTRIBUTE_MAX (FIB_PATH_OPER_ATTRIBUTE_LAST + 1)
139
140#define FIB_PATH_OPER_ATTRIBUTES { \
141 [FIB_PATH_OPER_ATTRIBUTE_RECURSIVE_LOOP] = "recursive-loop", \
142 [FIB_PATH_OPER_ATTRIBUTE_RESOLVED] = "resolved", \
143 [FIB_PATH_OPER_ATTRIBUTE_DROP] = "drop", \
144}
145
146#define FOR_EACH_FIB_PATH_OPER_ATTRIBUTE(_item) \
147 for (_item = FIB_PATH_OPER_ATTRIBUTE_FIRST; \
148 _item <= FIB_PATH_OPER_ATTRIBUTE_LAST; \
149 _item++)
150
151/**
152 * Path flags from the attributes
153 */
154typedef enum fib_path_oper_flags_t_ {
155 FIB_PATH_OPER_FLAG_NONE = 0,
156 FIB_PATH_OPER_FLAG_RECURSIVE_LOOP = (1 << FIB_PATH_OPER_ATTRIBUTE_RECURSIVE_LOOP),
157 FIB_PATH_OPER_FLAG_DROP = (1 << FIB_PATH_OPER_ATTRIBUTE_DROP),
158 FIB_PATH_OPER_FLAG_RESOLVED = (1 << FIB_PATH_OPER_ATTRIBUTE_RESOLVED),
Neale Ranns4b919a52017-03-11 05:55:21 -0800159 FIB_PATH_OPER_FLAG_ATTACHED = (1 << FIB_PATH_OPER_ATTRIBUTE_ATTACHED),
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100160} __attribute__ ((packed)) fib_path_oper_flags_t;
161
162/**
163 * A FIB path
164 */
165typedef struct fib_path_t_ {
166 /**
167 * A path is a node in the FIB graph.
168 */
169 fib_node_t fp_node;
170
171 /**
172 * The index of the path-list to which this path belongs
173 */
174 u32 fp_pl_index;
175
176 /**
177 * This marks the start of the memory area used to hash
178 * the path
179 */
180 STRUCT_MARK(path_hash_start);
181
182 /**
183 * Configuration Flags
184 */
185 fib_path_cfg_flags_t fp_cfg_flags;
186
187 /**
188 * The type of the path. This is the selector for the union
189 */
190 fib_path_type_t fp_type;
191
192 /**
193 * The protocol of the next-hop, i.e. the address family of the
194 * next-hop's address. We can't derive this from the address itself
195 * since the address can be all zeros
196 */
Neale Rannsda78f952017-05-24 09:15:43 -0700197 dpo_proto_t fp_nh_proto;
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100198
199 /**
Neale Ranns57b58602017-07-15 07:37:25 -0700200 * UCMP [unnormalised] weigth
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100201 */
Neale Rannsa0a908f2017-08-01 11:40:03 -0700202 u8 fp_weight;
203
Neale Ranns57b58602017-07-15 07:37:25 -0700204 /**
205 * A path preference. 0 is the best.
206 * Only paths of the best preference, that are 'up', are considered
207 * for forwarding.
208 */
Neale Rannsa0a908f2017-08-01 11:40:03 -0700209 u8 fp_preference;
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100210
211 /**
212 * per-type union of the data required to resolve the path
213 */
214 union {
215 struct {
216 /**
217 * The next-hop
218 */
219 ip46_address_t fp_nh;
220 /**
221 * The interface
222 */
223 u32 fp_interface;
224 } attached_next_hop;
225 struct {
226 /**
227 * The interface
228 */
229 u32 fp_interface;
230 } attached;
231 struct {
Neale Rannsad422ed2016-11-02 14:20:04 +0000232 union
233 {
234 /**
235 * The next-hop
236 */
237 ip46_address_t fp_ip;
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800238 struct {
239 /**
240 * The local label to resolve through.
241 */
242 mpls_label_t fp_local_label;
243 /**
244 * The EOS bit of the resolving label
245 */
246 mpls_eos_bit_t fp_eos;
247 };
Neale Rannsad422ed2016-11-02 14:20:04 +0000248 } fp_nh;
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100249 /**
250 * The FIB table index in which to find the next-hop.
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100251 */
252 fib_node_index_t fp_tbl_id;
253 } recursive;
254 struct {
255 /**
Neale Rannsad422ed2016-11-02 14:20:04 +0000256 * The FIB index in which to perfom the next lookup
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100257 */
258 fib_node_index_t fp_tbl_id;
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800259 /**
260 * The RPF-ID to tag the packets with
261 */
262 fib_rpf_id_t fp_rpf_id;
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100263 } deag;
264 struct {
265 } special;
266 struct {
267 /**
268 * The user provided 'exclusive' DPO
269 */
270 dpo_id_t fp_ex_dpo;
271 } exclusive;
272 struct {
273 /**
274 * The interface on which the local address is configured
275 */
276 u32 fp_interface;
277 /**
278 * The next-hop
279 */
280 ip46_address_t fp_addr;
281 } receive;
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800282 struct {
283 /**
284 * The interface on which the packets will be input.
285 */
286 u32 fp_interface;
287 } intf_rx;
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100288 };
289 STRUCT_MARK(path_hash_end);
290
291 /**
292 * Memebers in this last section represent information that is
293 * dervied during resolution. It should not be copied to new paths
294 * nor compared.
295 */
296
297 /**
298 * Operational Flags
299 */
300 fib_path_oper_flags_t fp_oper_flags;
301
302 /**
303 * the resolving via fib. not part of the union, since it it not part
304 * of the path's hash.
305 */
306 fib_node_index_t fp_via_fib;
307
308 /**
309 * The Data-path objects through which this path resolves for IP.
310 */
311 dpo_id_t fp_dpo;
312
313 /**
314 * the index of this path in the parent's child list.
315 */
316 u32 fp_sibling;
317} fib_path_t;
318
319/*
320 * Array of strings/names for the path types and attributes
321 */
322static const char *fib_path_type_names[] = FIB_PATH_TYPES;
323static const char *fib_path_oper_attribute_names[] = FIB_PATH_OPER_ATTRIBUTES;
324static const char *fib_path_cfg_attribute_names[] = FIB_PATH_CFG_ATTRIBUTES;
325
326/*
327 * The memory pool from which we allocate all the paths
328 */
329static fib_path_t *fib_path_pool;
330
331/*
332 * Debug macro
333 */
334#ifdef FIB_DEBUG
335#define FIB_PATH_DBG(_p, _fmt, _args...) \
336{ \
337 u8 *_tmp = NULL; \
338 _tmp = fib_path_format(fib_path_get_index(_p), _tmp); \
339 clib_warning("path:[%d:%s]:" _fmt, \
340 fib_path_get_index(_p), _tmp, \
341 ##_args); \
342 vec_free(_tmp); \
343}
344#else
345#define FIB_PATH_DBG(_p, _fmt, _args...)
346#endif
347
348static fib_path_t *
349fib_path_get (fib_node_index_t index)
350{
351 return (pool_elt_at_index(fib_path_pool, index));
352}
353
354static fib_node_index_t
355fib_path_get_index (fib_path_t *path)
356{
357 return (path - fib_path_pool);
358}
359
360static fib_node_t *
361fib_path_get_node (fib_node_index_t index)
362{
363 return ((fib_node_t*)fib_path_get(index));
364}
365
366static fib_path_t*
367fib_path_from_fib_node (fib_node_t *node)
368{
369#if CLIB_DEBUG > 0
370 ASSERT(FIB_NODE_TYPE_PATH == node->fn_type);
371#endif
372 return ((fib_path_t*)node);
373}
374
375u8 *
376format_fib_path (u8 * s, va_list * args)
377{
378 fib_path_t *path = va_arg (*args, fib_path_t *);
379 vnet_main_t * vnm = vnet_get_main();
380 fib_path_oper_attribute_t oattr;
381 fib_path_cfg_attribute_t cattr;
382
383 s = format (s, " index:%d ", fib_path_get_index(path));
384 s = format (s, "pl-index:%d ", path->fp_pl_index);
Neale Rannsda78f952017-05-24 09:15:43 -0700385 s = format (s, "%U ", format_dpo_proto, path->fp_nh_proto);
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100386 s = format (s, "weight=%d ", path->fp_weight);
Neale Ranns57b58602017-07-15 07:37:25 -0700387 s = format (s, "pref=%d ", path->fp_preference);
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100388 s = format (s, "%s: ", fib_path_type_names[path->fp_type]);
389 if (FIB_PATH_OPER_FLAG_NONE != path->fp_oper_flags) {
390 s = format(s, " oper-flags:");
391 FOR_EACH_FIB_PATH_OPER_ATTRIBUTE(oattr) {
392 if ((1<<oattr) & path->fp_oper_flags) {
393 s = format (s, "%s,", fib_path_oper_attribute_names[oattr]);
394 }
395 }
396 }
397 if (FIB_PATH_CFG_FLAG_NONE != path->fp_cfg_flags) {
398 s = format(s, " cfg-flags:");
399 FOR_EACH_FIB_PATH_CFG_ATTRIBUTE(cattr) {
400 if ((1<<cattr) & path->fp_cfg_flags) {
401 s = format (s, "%s,", fib_path_cfg_attribute_names[cattr]);
402 }
403 }
404 }
405 s = format(s, "\n ");
406
407 switch (path->fp_type)
408 {
409 case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
410 s = format (s, "%U", format_ip46_address,
411 &path->attached_next_hop.fp_nh,
412 IP46_TYPE_ANY);
413 if (path->fp_oper_flags & FIB_PATH_OPER_FLAG_DROP)
414 {
415 s = format (s, " if_index:%d", path->attached_next_hop.fp_interface);
416 }
417 else
418 {
419 s = format (s, " %U",
420 format_vnet_sw_interface_name,
421 vnm,
422 vnet_get_sw_interface(
423 vnm,
424 path->attached_next_hop.fp_interface));
425 if (vnet_sw_interface_is_p2p(vnet_get_main(),
426 path->attached_next_hop.fp_interface))
427 {
428 s = format (s, " (p2p)");
429 }
430 }
431 if (!dpo_id_is_valid(&path->fp_dpo))
432 {
433 s = format(s, "\n unresolved");
434 }
435 else
436 {
437 s = format(s, "\n %U",
438 format_dpo_id,
439 &path->fp_dpo, 13);
440 }
441 break;
442 case FIB_PATH_TYPE_ATTACHED:
443 if (path->fp_oper_flags & FIB_PATH_OPER_FLAG_DROP)
444 {
445 s = format (s, " if_index:%d", path->attached_next_hop.fp_interface);
446 }
447 else
448 {
449 s = format (s, " %U",
450 format_vnet_sw_interface_name,
451 vnm,
452 vnet_get_sw_interface(
453 vnm,
454 path->attached.fp_interface));
455 }
456 break;
457 case FIB_PATH_TYPE_RECURSIVE:
Neale Rannsda78f952017-05-24 09:15:43 -0700458 if (DPO_PROTO_MPLS == path->fp_nh_proto)
Neale Rannsad422ed2016-11-02 14:20:04 +0000459 {
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800460 s = format (s, "via %U %U",
Neale Rannsad422ed2016-11-02 14:20:04 +0000461 format_mpls_unicast_label,
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800462 path->recursive.fp_nh.fp_local_label,
463 format_mpls_eos_bit,
464 path->recursive.fp_nh.fp_eos);
Neale Rannsad422ed2016-11-02 14:20:04 +0000465 }
466 else
467 {
468 s = format (s, "via %U",
469 format_ip46_address,
470 &path->recursive.fp_nh.fp_ip,
471 IP46_TYPE_ANY);
472 }
473 s = format (s, " in fib:%d",
474 path->recursive.fp_tbl_id,
475 path->fp_via_fib);
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100476 s = format (s, " via-fib:%d", path->fp_via_fib);
477 s = format (s, " via-dpo:[%U:%d]",
478 format_dpo_type, path->fp_dpo.dpoi_type,
479 path->fp_dpo.dpoi_index);
480
481 break;
482 case FIB_PATH_TYPE_RECEIVE:
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800483 case FIB_PATH_TYPE_INTF_RX:
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100484 case FIB_PATH_TYPE_SPECIAL:
485 case FIB_PATH_TYPE_DEAG:
486 case FIB_PATH_TYPE_EXCLUSIVE:
487 if (dpo_id_is_valid(&path->fp_dpo))
488 {
489 s = format(s, "%U", format_dpo_id,
490 &path->fp_dpo, 2);
491 }
492 break;
493 }
494 return (s);
495}
496
497u8 *
498fib_path_format (fib_node_index_t pi, u8 *s)
499{
500 fib_path_t *path;
501
502 path = fib_path_get(pi);
503 ASSERT(NULL != path);
504
505 return (format (s, "%U", format_fib_path, path));
506}
507
508u8 *
509fib_path_adj_format (fib_node_index_t pi,
510 u32 indent,
511 u8 *s)
512{
513 fib_path_t *path;
514
515 path = fib_path_get(pi);
516 ASSERT(NULL != path);
517
518 if (!dpo_id_is_valid(&path->fp_dpo))
519 {
520 s = format(s, " unresolved");
521 }
522 else
523 {
524 s = format(s, "%U", format_dpo_id,
525 &path->fp_dpo, 2);
526 }
527
528 return (s);
529}
530
531/*
532 * fib_path_last_lock_gone
533 *
534 * We don't share paths, we share path lists, so the [un]lock functions
535 * are no-ops
536 */
537static void
538fib_path_last_lock_gone (fib_node_t *node)
539{
540 ASSERT(0);
541}
542
543static const adj_index_t
544fib_path_attached_next_hop_get_adj (fib_path_t *path,
Neale Ranns924d03a2016-10-19 08:25:46 +0100545 vnet_link_t link)
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100546{
547 if (vnet_sw_interface_is_p2p(vnet_get_main(),
548 path->attached_next_hop.fp_interface))
549 {
550 /*
551 * if the interface is p2p then the adj for the specific
552 * neighbour on that link will never exist. on p2p links
553 * the subnet address (the attached route) links to the
554 * auto-adj (see below), we want that adj here too.
555 */
Neale Rannsda78f952017-05-24 09:15:43 -0700556 return (adj_nbr_add_or_lock(dpo_proto_to_fib(path->fp_nh_proto),
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100557 link,
558 &zero_addr,
559 path->attached_next_hop.fp_interface));
560 }
561 else
562 {
Neale Rannsda78f952017-05-24 09:15:43 -0700563 return (adj_nbr_add_or_lock(dpo_proto_to_fib(path->fp_nh_proto),
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100564 link,
565 &path->attached_next_hop.fp_nh,
566 path->attached_next_hop.fp_interface));
567 }
568}
569
570static void
571fib_path_attached_next_hop_set (fib_path_t *path)
572{
573 /*
574 * resolve directly via the adjacnecy discribed by the
575 * interface and next-hop
576 */
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100577 dpo_set(&path->fp_dpo,
578 DPO_ADJACENCY,
Neale Rannsda78f952017-05-24 09:15:43 -0700579 path->fp_nh_proto,
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100580 fib_path_attached_next_hop_get_adj(
581 path,
Neale Rannsda78f952017-05-24 09:15:43 -0700582 dpo_proto_to_link(path->fp_nh_proto)));
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100583
584 /*
585 * become a child of the adjacency so we receive updates
586 * when its rewrite changes
587 */
588 path->fp_sibling = adj_child_add(path->fp_dpo.dpoi_index,
589 FIB_NODE_TYPE_PATH,
590 fib_path_get_index(path));
Neale Ranns88fc83e2017-04-05 08:11:14 -0700591
592 if (!vnet_sw_interface_is_admin_up(vnet_get_main(),
593 path->attached_next_hop.fp_interface) ||
594 !adj_is_up(path->fp_dpo.dpoi_index))
595 {
596 path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
597 }
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100598}
599
Neale Ranns8c4611b2017-05-23 03:43:47 -0700600static const adj_index_t
601fib_path_attached_get_adj (fib_path_t *path,
602 vnet_link_t link)
603{
604 if (vnet_sw_interface_is_p2p(vnet_get_main(),
605 path->attached.fp_interface))
606 {
607 /*
608 * point-2-point interfaces do not require a glean, since
609 * there is nothing to ARP. Install a rewrite/nbr adj instead
610 */
Neale Rannsda78f952017-05-24 09:15:43 -0700611 return (adj_nbr_add_or_lock(dpo_proto_to_fib(path->fp_nh_proto),
Neale Ranns8c4611b2017-05-23 03:43:47 -0700612 link,
613 &zero_addr,
614 path->attached.fp_interface));
615 }
616 else
617 {
Neale Rannsda78f952017-05-24 09:15:43 -0700618 return (adj_glean_add_or_lock(dpo_proto_to_fib(path->fp_nh_proto),
Neale Ranns8c4611b2017-05-23 03:43:47 -0700619 path->attached.fp_interface,
620 NULL));
621 }
622}
623
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100624/*
625 * create of update the paths recursive adj
626 */
627static void
628fib_path_recursive_adj_update (fib_path_t *path,
629 fib_forward_chain_type_t fct,
630 dpo_id_t *dpo)
631{
Neale Ranns948e00f2016-10-20 13:39:34 +0100632 dpo_id_t via_dpo = DPO_INVALID;
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100633
634 /*
635 * get the DPO to resolve through from the via-entry
636 */
637 fib_entry_contribute_forwarding(path->fp_via_fib,
638 fct,
639 &via_dpo);
640
641
642 /*
643 * hope for the best - clear if restrictions apply.
644 */
645 path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RESOLVED;
646
647 /*
648 * Validate any recursion constraints and over-ride the via
649 * adj if not met
650 */
651 if (path->fp_oper_flags & FIB_PATH_OPER_FLAG_RECURSIVE_LOOP)
652 {
653 path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
Neale Rannsda78f952017-05-24 09:15:43 -0700654 dpo_copy(&via_dpo, drop_dpo_get(path->fp_nh_proto));
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100655 }
656 else if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RESOLVE_HOST)
657 {
658 /*
659 * the via FIB must be a host route.
660 * note the via FIB just added will always be a host route
661 * since it is an RR source added host route. So what we need to
662 * check is whether the route has other sources. If it does then
663 * some other source has added it as a host route. If it doesn't
664 * then it was added only here and inherits forwarding from a cover.
665 * the cover is not a host route.
666 * The RR source is the lowest priority source, so we check if it
667 * is the best. if it is there are no other sources.
668 */
669 if (fib_entry_get_best_source(path->fp_via_fib) >= FIB_SOURCE_RR)
670 {
671 path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
Neale Rannsda78f952017-05-24 09:15:43 -0700672 dpo_copy(&via_dpo, drop_dpo_get(path->fp_nh_proto));
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100673
674 /*
675 * PIC edge trigger. let the load-balance maps know
676 */
677 load_balance_map_path_state_change(fib_path_get_index(path));
678 }
679 }
680 else if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RESOLVE_ATTACHED)
681 {
682 /*
683 * RR source entries inherit the flags from the cover, so
684 * we can check the via directly
685 */
686 if (!(FIB_ENTRY_FLAG_ATTACHED & fib_entry_get_flags(path->fp_via_fib)))
687 {
688 path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
Neale Rannsda78f952017-05-24 09:15:43 -0700689 dpo_copy(&via_dpo, drop_dpo_get(path->fp_nh_proto));
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100690
691 /*
692 * PIC edge trigger. let the load-balance maps know
693 */
694 load_balance_map_path_state_change(fib_path_get_index(path));
695 }
696 }
Neale Ranns88fc83e2017-04-05 08:11:14 -0700697 /*
698 * check for over-riding factors on the FIB entry itself
699 */
700 if (!fib_entry_is_resolved(path->fp_via_fib))
701 {
702 path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
Neale Rannsda78f952017-05-24 09:15:43 -0700703 dpo_copy(&via_dpo, drop_dpo_get(path->fp_nh_proto));
Neale Ranns88fc83e2017-04-05 08:11:14 -0700704
705 /*
706 * PIC edge trigger. let the load-balance maps know
707 */
708 load_balance_map_path_state_change(fib_path_get_index(path));
709 }
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100710
711 /*
Neale Ranns57b58602017-07-15 07:37:25 -0700712 * If this path is contributing a drop, then it's not resolved
713 */
714 if (dpo_is_drop(&via_dpo) || load_balance_is_drop(&via_dpo))
715 {
716 path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
717 }
718
719 /*
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100720 * update the path's contributed DPO
721 */
722 dpo_copy(dpo, &via_dpo);
723
Neale Rannsda78f952017-05-24 09:15:43 -0700724 FIB_PATH_DBG(path, "recursive update:");
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100725
726 dpo_reset(&via_dpo);
727}
728
729/*
730 * fib_path_is_permanent_drop
731 *
732 * Return !0 if the path is configured to permanently drop,
733 * despite other attributes.
734 */
735static int
736fib_path_is_permanent_drop (fib_path_t *path)
737{
738 return ((path->fp_cfg_flags & FIB_PATH_CFG_FLAG_DROP) ||
739 (path->fp_oper_flags & FIB_PATH_OPER_FLAG_DROP));
740}
741
742/*
743 * fib_path_unresolve
744 *
745 * Remove our dependency on the resolution target
746 */
747static void
748fib_path_unresolve (fib_path_t *path)
749{
750 /*
751 * the forced drop path does not need unresolving
752 */
753 if (fib_path_is_permanent_drop(path))
754 {
755 return;
756 }
757
758 switch (path->fp_type)
759 {
760 case FIB_PATH_TYPE_RECURSIVE:
761 if (FIB_NODE_INDEX_INVALID != path->fp_via_fib)
762 {
763 fib_prefix_t pfx;
764
Neale Rannsad422ed2016-11-02 14:20:04 +0000765 fib_entry_get_prefix(path->fp_via_fib, &pfx);
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100766 fib_entry_child_remove(path->fp_via_fib,
767 path->fp_sibling);
768 fib_table_entry_special_remove(path->recursive.fp_tbl_id,
769 &pfx,
770 FIB_SOURCE_RR);
771 path->fp_via_fib = FIB_NODE_INDEX_INVALID;
772 }
773 break;
774 case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100775 adj_child_remove(path->fp_dpo.dpoi_index,
776 path->fp_sibling);
777 adj_unlock(path->fp_dpo.dpoi_index);
778 break;
Neale Ranns6f631152017-10-03 08:20:21 -0700779 case FIB_PATH_TYPE_ATTACHED:
780 if (DPO_PROTO_ETHERNET != path->fp_nh_proto)
781 {
782 adj_child_remove(path->fp_dpo.dpoi_index,
783 path->fp_sibling);
784 adj_unlock(path->fp_dpo.dpoi_index);
785 }
786 break;
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100787 case FIB_PATH_TYPE_EXCLUSIVE:
788 dpo_reset(&path->exclusive.fp_ex_dpo);
789 break;
790 case FIB_PATH_TYPE_SPECIAL:
791 case FIB_PATH_TYPE_RECEIVE:
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800792 case FIB_PATH_TYPE_INTF_RX:
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100793 case FIB_PATH_TYPE_DEAG:
794 /*
795 * these hold only the path's DPO, which is reset below.
796 */
797 break;
798 }
799
800 /*
801 * release the adj we were holding and pick up the
802 * drop just in case.
803 */
804 dpo_reset(&path->fp_dpo);
805 path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
806
807 return;
808}
809
810static fib_forward_chain_type_t
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800811fib_path_to_chain_type (const fib_path_t *path)
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100812{
Neale Rannsda78f952017-05-24 09:15:43 -0700813 if (DPO_PROTO_MPLS == path->fp_nh_proto)
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100814 {
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800815 if (FIB_PATH_TYPE_RECURSIVE == path->fp_type &&
816 MPLS_EOS == path->recursive.fp_nh.fp_eos)
817 {
818 return (FIB_FORW_CHAIN_TYPE_MPLS_EOS);
819 }
820 else
821 {
Neale Ranns9f171f52017-04-11 08:56:53 -0700822 return (FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS);
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800823 }
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100824 }
Neale Rannsda78f952017-05-24 09:15:43 -0700825 else
826 {
827 return (fib_forw_chain_type_from_dpo_proto(path->fp_nh_proto));
828 }
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100829}
830
831/*
832 * fib_path_back_walk_notify
833 *
834 * A back walk has reach this path.
835 */
836static fib_node_back_walk_rc_t
837fib_path_back_walk_notify (fib_node_t *node,
838 fib_node_back_walk_ctx_t *ctx)
839{
840 fib_path_t *path;
841
842 path = fib_path_from_fib_node(node);
843
844 switch (path->fp_type)
845 {
846 case FIB_PATH_TYPE_RECURSIVE:
847 if (FIB_NODE_BW_REASON_FLAG_EVALUATE & ctx->fnbw_reason)
848 {
849 /*
850 * modify the recursive adjacency to use the new forwarding
851 * of the via-fib.
852 * this update is visible to packets in flight in the DP.
853 */
854 fib_path_recursive_adj_update(
855 path,
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800856 fib_path_to_chain_type(path),
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100857 &path->fp_dpo);
858 }
Neale Rannsad95b5d2016-11-10 20:35:14 +0000859 if ((FIB_NODE_BW_REASON_FLAG_ADJ_UPDATE & ctx->fnbw_reason) ||
860 (FIB_NODE_BW_REASON_FLAG_ADJ_DOWN & ctx->fnbw_reason))
Neale Rannsb80c5362016-10-08 13:03:40 +0100861 {
862 /*
863 * ADJ updates (complete<->incomplete) do not need to propagate to
864 * recursive entries.
865 * The only reason its needed as far back as here, is that the adj
866 * and the incomplete adj are a different DPO type, so the LBs need
867 * to re-stack.
868 * If this walk was quashed in the fib_entry, then any non-fib_path
869 * children (like tunnels that collapse out the LB when they stack)
870 * would not see the update.
871 */
872 return (FIB_NODE_BACK_WALK_CONTINUE);
873 }
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100874 break;
875 case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
876 /*
877FIXME comment
878 * ADJ_UPDATE backwalk pass silently through here and up to
879 * the path-list when the multipath adj collapse occurs.
880 * The reason we do this is that the assumtption is that VPP
881 * runs in an environment where the Control-Plane is remote
882 * and hence reacts slowly to link up down. In order to remove
883 * this down link from the ECMP set quickly, we back-walk.
884 * VPP also has dedicated CPUs, so we are not stealing resources
885 * from the CP to do so.
886 */
887 if (FIB_NODE_BW_REASON_FLAG_INTERFACE_UP & ctx->fnbw_reason)
888 {
Neale Ranns8b37b872016-11-21 12:25:22 +0000889 if (path->fp_oper_flags & FIB_PATH_OPER_FLAG_RESOLVED)
890 {
891 /*
892 * alreday resolved. no need to walk back again
893 */
894 return (FIB_NODE_BACK_WALK_CONTINUE);
895 }
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100896 path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RESOLVED;
897 }
898 if (FIB_NODE_BW_REASON_FLAG_INTERFACE_DOWN & ctx->fnbw_reason)
899 {
Neale Ranns8b37b872016-11-21 12:25:22 +0000900 if (!(path->fp_oper_flags & FIB_PATH_OPER_FLAG_RESOLVED))
901 {
902 /*
903 * alreday unresolved. no need to walk back again
904 */
905 return (FIB_NODE_BACK_WALK_CONTINUE);
906 }
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100907 path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
908 }
909 if (FIB_NODE_BW_REASON_FLAG_INTERFACE_DELETE & ctx->fnbw_reason)
910 {
911 /*
912 * The interface this path resolves through has been deleted.
913 * This will leave the path in a permanent drop state. The route
914 * needs to be removed and readded (and hence the path-list deleted)
915 * before it can forward again.
916 */
917 fib_path_unresolve(path);
918 path->fp_oper_flags |= FIB_PATH_OPER_FLAG_DROP;
919 }
920 if (FIB_NODE_BW_REASON_FLAG_ADJ_UPDATE & ctx->fnbw_reason)
921 {
922 /*
923 * restack the DPO to pick up the correct DPO sub-type
924 */
Neale Ranns8b37b872016-11-21 12:25:22 +0000925 uword if_is_up;
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100926 adj_index_t ai;
927
Neale Ranns8b37b872016-11-21 12:25:22 +0000928 if_is_up = vnet_sw_interface_is_admin_up(
929 vnet_get_main(),
930 path->attached_next_hop.fp_interface);
931
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100932 ai = fib_path_attached_next_hop_get_adj(
933 path,
Neale Rannsda78f952017-05-24 09:15:43 -0700934 dpo_proto_to_link(path->fp_nh_proto));
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100935
Neale Ranns88fc83e2017-04-05 08:11:14 -0700936 path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
937 if (if_is_up && adj_is_up(ai))
938 {
939 path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RESOLVED;
940 }
941
Neale Rannsda78f952017-05-24 09:15:43 -0700942 dpo_set(&path->fp_dpo, DPO_ADJACENCY, path->fp_nh_proto, ai);
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100943 adj_unlock(ai);
Neale Ranns8b37b872016-11-21 12:25:22 +0000944
945 if (!if_is_up)
946 {
947 /*
948 * If the interface is not up there is no reason to walk
949 * back to children. if we did they would only evalute
950 * that this path is unresolved and hence it would
951 * not contribute the adjacency - so it would be wasted
952 * CPU time.
953 */
954 return (FIB_NODE_BACK_WALK_CONTINUE);
955 }
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100956 }
Neale Rannsad95b5d2016-11-10 20:35:14 +0000957 if (FIB_NODE_BW_REASON_FLAG_ADJ_DOWN & ctx->fnbw_reason)
958 {
Neale Ranns8b37b872016-11-21 12:25:22 +0000959 if (!(path->fp_oper_flags & FIB_PATH_OPER_FLAG_RESOLVED))
960 {
961 /*
962 * alreday unresolved. no need to walk back again
963 */
964 return (FIB_NODE_BACK_WALK_CONTINUE);
965 }
Neale Rannsad95b5d2016-11-10 20:35:14 +0000966 /*
967 * the adj has gone down. the path is no longer resolved.
968 */
969 path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
970 }
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100971 break;
972 case FIB_PATH_TYPE_ATTACHED:
973 /*
974 * FIXME; this could schedule a lower priority walk, since attached
975 * routes are not usually in ECMP configurations so the backwalk to
976 * the FIB entry does not need to be high priority
977 */
978 if (FIB_NODE_BW_REASON_FLAG_INTERFACE_UP & ctx->fnbw_reason)
979 {
980 path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RESOLVED;
981 }
982 if (FIB_NODE_BW_REASON_FLAG_INTERFACE_DOWN & ctx->fnbw_reason)
983 {
984 path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
985 }
986 if (FIB_NODE_BW_REASON_FLAG_INTERFACE_DELETE & ctx->fnbw_reason)
987 {
988 fib_path_unresolve(path);
989 path->fp_oper_flags |= FIB_PATH_OPER_FLAG_DROP;
990 }
991 break;
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800992 case FIB_PATH_TYPE_INTF_RX:
993 ASSERT(0);
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100994 case FIB_PATH_TYPE_DEAG:
995 /*
996 * FIXME When VRF delete is allowed this will need a poke.
997 */
998 case FIB_PATH_TYPE_SPECIAL:
999 case FIB_PATH_TYPE_RECEIVE:
1000 case FIB_PATH_TYPE_EXCLUSIVE:
1001 /*
1002 * these path types have no parents. so to be
1003 * walked from one is unexpected.
1004 */
1005 ASSERT(0);
1006 break;
1007 }
1008
1009 /*
1010 * propagate the backwalk further to the path-list
1011 */
1012 fib_path_list_back_walk(path->fp_pl_index, ctx);
1013
1014 return (FIB_NODE_BACK_WALK_CONTINUE);
1015}
1016
Neale Ranns6c3ebcc2016-10-02 21:20:15 +01001017static void
1018fib_path_memory_show (void)
1019{
1020 fib_show_memory_usage("Path",
1021 pool_elts(fib_path_pool),
1022 pool_len(fib_path_pool),
1023 sizeof(fib_path_t));
1024}
1025
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001026/*
1027 * The FIB path's graph node virtual function table
1028 */
1029static const fib_node_vft_t fib_path_vft = {
1030 .fnv_get = fib_path_get_node,
1031 .fnv_last_lock = fib_path_last_lock_gone,
1032 .fnv_back_walk = fib_path_back_walk_notify,
Neale Ranns6c3ebcc2016-10-02 21:20:15 +01001033 .fnv_mem_show = fib_path_memory_show,
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001034};
1035
1036static fib_path_cfg_flags_t
1037fib_path_route_flags_to_cfg_flags (const fib_route_path_t *rpath)
1038{
Neale Ranns450cd302016-11-09 17:49:42 +00001039 fib_path_cfg_flags_t cfg_flags = FIB_PATH_CFG_FLAG_NONE;
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001040
1041 if (rpath->frp_flags & FIB_ROUTE_PATH_RESOLVE_VIA_HOST)
1042 cfg_flags |= FIB_PATH_CFG_FLAG_RESOLVE_HOST;
1043 if (rpath->frp_flags & FIB_ROUTE_PATH_RESOLVE_VIA_ATTACHED)
1044 cfg_flags |= FIB_PATH_CFG_FLAG_RESOLVE_ATTACHED;
Neale Ranns32e1c012016-11-22 17:07:28 +00001045 if (rpath->frp_flags & FIB_ROUTE_PATH_LOCAL)
1046 cfg_flags |= FIB_PATH_CFG_FLAG_LOCAL;
Neale Ranns4b919a52017-03-11 05:55:21 -08001047 if (rpath->frp_flags & FIB_ROUTE_PATH_ATTACHED)
1048 cfg_flags |= FIB_PATH_CFG_FLAG_ATTACHED;
Neale Ranns0f26c5a2017-03-01 15:12:11 -08001049 if (rpath->frp_flags & FIB_ROUTE_PATH_INTF_RX)
1050 cfg_flags |= FIB_PATH_CFG_FLAG_INTF_RX;
1051 if (rpath->frp_flags & FIB_ROUTE_PATH_RPF_ID)
1052 cfg_flags |= FIB_PATH_CFG_FLAG_RPF_ID;
1053 if (rpath->frp_flags & FIB_ROUTE_PATH_EXCLUSIVE)
1054 cfg_flags |= FIB_PATH_CFG_FLAG_EXCLUSIVE;
1055 if (rpath->frp_flags & FIB_ROUTE_PATH_DROP)
1056 cfg_flags |= FIB_PATH_CFG_FLAG_DROP;
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001057
1058 return (cfg_flags);
1059}
1060
1061/*
1062 * fib_path_create
1063 *
1064 * Create and initialise a new path object.
1065 * return the index of the path.
1066 */
1067fib_node_index_t
1068fib_path_create (fib_node_index_t pl_index,
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001069 const fib_route_path_t *rpath)
1070{
1071 fib_path_t *path;
1072
1073 pool_get(fib_path_pool, path);
1074 memset(path, 0, sizeof(*path));
1075
1076 fib_node_init(&path->fp_node,
1077 FIB_NODE_TYPE_PATH);
1078
1079 dpo_reset(&path->fp_dpo);
1080 path->fp_pl_index = pl_index;
Neale Ranns0f26c5a2017-03-01 15:12:11 -08001081 path->fp_nh_proto = rpath->frp_proto;
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001082 path->fp_via_fib = FIB_NODE_INDEX_INVALID;
1083 path->fp_weight = rpath->frp_weight;
Neale Ranns0bd36ea2016-11-16 11:47:44 +00001084 if (0 == path->fp_weight)
1085 {
1086 /*
1087 * a weight of 0 is a meaningless value. We could either reject it, and thus force
1088 * clients to always use 1, or we can accept it and fixup approrpiately.
1089 */
1090 path->fp_weight = 1;
1091 }
Neale Ranns57b58602017-07-15 07:37:25 -07001092 path->fp_preference = rpath->frp_preference;
Neale Ranns0f26c5a2017-03-01 15:12:11 -08001093 path->fp_cfg_flags = fib_path_route_flags_to_cfg_flags(rpath);
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001094
1095 /*
1096 * deduce the path's tpye from the parementers and save what is needed.
1097 */
Neale Ranns32e1c012016-11-22 17:07:28 +00001098 if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_LOCAL)
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001099 {
Neale Ranns32e1c012016-11-22 17:07:28 +00001100 path->fp_type = FIB_PATH_TYPE_RECEIVE;
1101 path->receive.fp_interface = rpath->frp_sw_if_index;
1102 path->receive.fp_addr = rpath->frp_addr;
1103 }
Neale Ranns0f26c5a2017-03-01 15:12:11 -08001104 else if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_INTF_RX)
1105 {
1106 path->fp_type = FIB_PATH_TYPE_INTF_RX;
1107 path->intf_rx.fp_interface = rpath->frp_sw_if_index;
1108 }
1109 else if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RPF_ID)
1110 {
1111 path->fp_type = FIB_PATH_TYPE_DEAG;
1112 path->deag.fp_tbl_id = rpath->frp_fib_index;
1113 path->deag.fp_rpf_id = rpath->frp_rpf_id;
1114 }
Neale Ranns32e1c012016-11-22 17:07:28 +00001115 else if (~0 != rpath->frp_sw_if_index)
1116 {
1117 if (ip46_address_is_zero(&rpath->frp_addr))
1118 {
1119 path->fp_type = FIB_PATH_TYPE_ATTACHED;
1120 path->attached.fp_interface = rpath->frp_sw_if_index;
1121 }
1122 else
1123 {
1124 path->fp_type = FIB_PATH_TYPE_ATTACHED_NEXT_HOP;
1125 path->attached_next_hop.fp_interface = rpath->frp_sw_if_index;
1126 path->attached_next_hop.fp_nh = rpath->frp_addr;
1127 }
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001128 }
1129 else
1130 {
1131 if (ip46_address_is_zero(&rpath->frp_addr))
1132 {
1133 if (~0 == rpath->frp_fib_index)
1134 {
1135 path->fp_type = FIB_PATH_TYPE_SPECIAL;
1136 }
1137 else
1138 {
1139 path->fp_type = FIB_PATH_TYPE_DEAG;
1140 path->deag.fp_tbl_id = rpath->frp_fib_index;
1141 }
1142 }
1143 else
1144 {
1145 path->fp_type = FIB_PATH_TYPE_RECURSIVE;
Neale Rannsda78f952017-05-24 09:15:43 -07001146 if (DPO_PROTO_MPLS == path->fp_nh_proto)
Neale Rannsad422ed2016-11-02 14:20:04 +00001147 {
1148 path->recursive.fp_nh.fp_local_label = rpath->frp_local_label;
Neale Ranns0f26c5a2017-03-01 15:12:11 -08001149 path->recursive.fp_nh.fp_eos = rpath->frp_eos;
Neale Rannsad422ed2016-11-02 14:20:04 +00001150 }
1151 else
1152 {
1153 path->recursive.fp_nh.fp_ip = rpath->frp_addr;
1154 }
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001155 path->recursive.fp_tbl_id = rpath->frp_fib_index;
1156 }
1157 }
1158
1159 FIB_PATH_DBG(path, "create");
1160
1161 return (fib_path_get_index(path));
1162}
1163
1164/*
1165 * fib_path_create_special
1166 *
1167 * Create and initialise a new path object.
1168 * return the index of the path.
1169 */
1170fib_node_index_t
1171fib_path_create_special (fib_node_index_t pl_index,
Neale Rannsda78f952017-05-24 09:15:43 -07001172 dpo_proto_t nh_proto,
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001173 fib_path_cfg_flags_t flags,
1174 const dpo_id_t *dpo)
1175{
1176 fib_path_t *path;
1177
1178 pool_get(fib_path_pool, path);
1179 memset(path, 0, sizeof(*path));
1180
1181 fib_node_init(&path->fp_node,
1182 FIB_NODE_TYPE_PATH);
1183 dpo_reset(&path->fp_dpo);
1184
1185 path->fp_pl_index = pl_index;
1186 path->fp_weight = 1;
Neale Ranns57b58602017-07-15 07:37:25 -07001187 path->fp_preference = 0;
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001188 path->fp_nh_proto = nh_proto;
1189 path->fp_via_fib = FIB_NODE_INDEX_INVALID;
1190 path->fp_cfg_flags = flags;
1191
1192 if (FIB_PATH_CFG_FLAG_DROP & flags)
1193 {
1194 path->fp_type = FIB_PATH_TYPE_SPECIAL;
1195 }
1196 else if (FIB_PATH_CFG_FLAG_LOCAL & flags)
1197 {
1198 path->fp_type = FIB_PATH_TYPE_RECEIVE;
1199 path->attached.fp_interface = FIB_NODE_INDEX_INVALID;
1200 }
1201 else
1202 {
1203 path->fp_type = FIB_PATH_TYPE_EXCLUSIVE;
1204 ASSERT(NULL != dpo);
1205 dpo_copy(&path->exclusive.fp_ex_dpo, dpo);
1206 }
1207
1208 return (fib_path_get_index(path));
1209}
1210
1211/*
1212 * fib_path_copy
1213 *
1214 * Copy a path. return index of new path.
1215 */
1216fib_node_index_t
1217fib_path_copy (fib_node_index_t path_index,
1218 fib_node_index_t path_list_index)
1219{
1220 fib_path_t *path, *orig_path;
1221
1222 pool_get(fib_path_pool, path);
1223
1224 orig_path = fib_path_get(path_index);
1225 ASSERT(NULL != orig_path);
1226
1227 memcpy(path, orig_path, sizeof(*path));
1228
1229 FIB_PATH_DBG(path, "create-copy:%d", path_index);
1230
1231 /*
1232 * reset the dynamic section
1233 */
1234 fib_node_init(&path->fp_node, FIB_NODE_TYPE_PATH);
1235 path->fp_oper_flags = FIB_PATH_OPER_FLAG_NONE;
1236 path->fp_pl_index = path_list_index;
1237 path->fp_via_fib = FIB_NODE_INDEX_INVALID;
1238 memset(&path->fp_dpo, 0, sizeof(path->fp_dpo));
1239 dpo_reset(&path->fp_dpo);
1240
1241 return (fib_path_get_index(path));
1242}
1243
1244/*
1245 * fib_path_destroy
1246 *
1247 * destroy a path that is no longer required
1248 */
1249void
1250fib_path_destroy (fib_node_index_t path_index)
1251{
1252 fib_path_t *path;
1253
1254 path = fib_path_get(path_index);
1255
1256 ASSERT(NULL != path);
1257 FIB_PATH_DBG(path, "destroy");
1258
1259 fib_path_unresolve(path);
1260
1261 fib_node_deinit(&path->fp_node);
1262 pool_put(fib_path_pool, path);
1263}
1264
1265/*
1266 * fib_path_destroy
1267 *
1268 * destroy a path that is no longer required
1269 */
1270uword
1271fib_path_hash (fib_node_index_t path_index)
1272{
1273 fib_path_t *path;
1274
1275 path = fib_path_get(path_index);
1276
1277 return (hash_memory(STRUCT_MARK_PTR(path, path_hash_start),
1278 (STRUCT_OFFSET_OF(fib_path_t, path_hash_end) -
1279 STRUCT_OFFSET_OF(fib_path_t, path_hash_start)),
1280 0));
1281}
1282
1283/*
1284 * fib_path_cmp_i
1285 *
1286 * Compare two paths for equivalence.
1287 */
1288static int
1289fib_path_cmp_i (const fib_path_t *path1,
1290 const fib_path_t *path2)
1291{
1292 int res;
1293
1294 res = 1;
1295
1296 /*
1297 * paths of different types and protocol are not equal.
Neale Ranns57b58602017-07-15 07:37:25 -07001298 * different weights and/or preference only are the same path.
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001299 */
1300 if (path1->fp_type != path2->fp_type)
1301 {
1302 res = (path1->fp_type - path2->fp_type);
1303 }
Neale Ranns32e1c012016-11-22 17:07:28 +00001304 else if (path1->fp_nh_proto != path2->fp_nh_proto)
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001305 {
1306 res = (path1->fp_nh_proto - path2->fp_nh_proto);
1307 }
1308 else
1309 {
1310 /*
1311 * both paths are of the same type.
1312 * consider each type and its attributes in turn.
1313 */
1314 switch (path1->fp_type)
1315 {
1316 case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
1317 res = ip46_address_cmp(&path1->attached_next_hop.fp_nh,
1318 &path2->attached_next_hop.fp_nh);
1319 if (0 == res) {
Neale Ranns0f26c5a2017-03-01 15:12:11 -08001320 res = (path1->attached_next_hop.fp_interface -
1321 path2->attached_next_hop.fp_interface);
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001322 }
1323 break;
1324 case FIB_PATH_TYPE_ATTACHED:
Neale Ranns0f26c5a2017-03-01 15:12:11 -08001325 res = (path1->attached.fp_interface -
1326 path2->attached.fp_interface);
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001327 break;
1328 case FIB_PATH_TYPE_RECURSIVE:
1329 res = ip46_address_cmp(&path1->recursive.fp_nh,
1330 &path2->recursive.fp_nh);
1331
1332 if (0 == res)
1333 {
1334 res = (path1->recursive.fp_tbl_id - path2->recursive.fp_tbl_id);
1335 }
1336 break;
1337 case FIB_PATH_TYPE_DEAG:
1338 res = (path1->deag.fp_tbl_id - path2->deag.fp_tbl_id);
Neale Ranns0f26c5a2017-03-01 15:12:11 -08001339 if (0 == res)
1340 {
1341 res = (path1->deag.fp_rpf_id - path2->deag.fp_rpf_id);
1342 }
1343 break;
1344 case FIB_PATH_TYPE_INTF_RX:
1345 res = (path1->intf_rx.fp_interface - path2->intf_rx.fp_interface);
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001346 break;
1347 case FIB_PATH_TYPE_SPECIAL:
1348 case FIB_PATH_TYPE_RECEIVE:
1349 case FIB_PATH_TYPE_EXCLUSIVE:
1350 res = 0;
1351 break;
1352 }
1353 }
1354 return (res);
1355}
1356
1357/*
1358 * fib_path_cmp_for_sort
1359 *
1360 * Compare two paths for equivalence. Used during path sorting.
1361 * As usual 0 means equal.
1362 */
1363int
1364fib_path_cmp_for_sort (void * v1,
1365 void * v2)
1366{
1367 fib_node_index_t *pi1 = v1, *pi2 = v2;
1368 fib_path_t *path1, *path2;
1369
1370 path1 = fib_path_get(*pi1);
1371 path2 = fib_path_get(*pi2);
1372
Neale Ranns57b58602017-07-15 07:37:25 -07001373 /*
1374 * when sorting paths we want the highest preference paths
1375 * first, so that the choices set built is in prefernce order
1376 */
1377 if (path1->fp_preference != path2->fp_preference)
1378 {
1379 return (path1->fp_preference - path2->fp_preference);
1380 }
1381
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001382 return (fib_path_cmp_i(path1, path2));
1383}
1384
1385/*
1386 * fib_path_cmp
1387 *
1388 * Compare two paths for equivalence.
1389 */
1390int
1391fib_path_cmp (fib_node_index_t pi1,
1392 fib_node_index_t pi2)
1393{
1394 fib_path_t *path1, *path2;
1395
1396 path1 = fib_path_get(pi1);
1397 path2 = fib_path_get(pi2);
1398
1399 return (fib_path_cmp_i(path1, path2));
1400}
1401
1402int
1403fib_path_cmp_w_route_path (fib_node_index_t path_index,
1404 const fib_route_path_t *rpath)
1405{
1406 fib_path_t *path;
1407 int res;
1408
1409 path = fib_path_get(path_index);
1410
1411 res = 1;
1412
1413 if (path->fp_weight != rpath->frp_weight)
1414 {
1415 res = (path->fp_weight - rpath->frp_weight);
1416 }
1417 else
1418 {
1419 /*
1420 * both paths are of the same type.
1421 * consider each type and its attributes in turn.
1422 */
1423 switch (path->fp_type)
1424 {
1425 case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
1426 res = ip46_address_cmp(&path->attached_next_hop.fp_nh,
1427 &rpath->frp_addr);
1428 if (0 == res)
1429 {
Neale Ranns0f26c5a2017-03-01 15:12:11 -08001430 res = (path->attached_next_hop.fp_interface -
1431 rpath->frp_sw_if_index);
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001432 }
1433 break;
1434 case FIB_PATH_TYPE_ATTACHED:
Neale Ranns0f26c5a2017-03-01 15:12:11 -08001435 res = (path->attached.fp_interface - rpath->frp_sw_if_index);
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001436 break;
1437 case FIB_PATH_TYPE_RECURSIVE:
Neale Rannsda78f952017-05-24 09:15:43 -07001438 if (DPO_PROTO_MPLS == path->fp_nh_proto)
Neale Rannsad422ed2016-11-02 14:20:04 +00001439 {
1440 res = path->recursive.fp_nh.fp_local_label - rpath->frp_local_label;
Neale Ranns0f26c5a2017-03-01 15:12:11 -08001441
1442 if (res == 0)
1443 {
1444 res = path->recursive.fp_nh.fp_eos - rpath->frp_eos;
1445 }
Neale Rannsad422ed2016-11-02 14:20:04 +00001446 }
1447 else
1448 {
1449 res = ip46_address_cmp(&path->recursive.fp_nh.fp_ip,
1450 &rpath->frp_addr);
1451 }
1452
1453 if (0 == res)
1454 {
1455 res = (path->recursive.fp_tbl_id - rpath->frp_fib_index);
1456 }
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001457 break;
Neale Ranns0f26c5a2017-03-01 15:12:11 -08001458 case FIB_PATH_TYPE_INTF_RX:
1459 res = (path->intf_rx.fp_interface - rpath->frp_sw_if_index);
1460 break;
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001461 case FIB_PATH_TYPE_DEAG:
1462 res = (path->deag.fp_tbl_id - rpath->frp_fib_index);
Neale Ranns0f26c5a2017-03-01 15:12:11 -08001463 if (0 == res)
1464 {
1465 res = (path->deag.fp_rpf_id - rpath->frp_rpf_id);
1466 }
1467 break;
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001468 case FIB_PATH_TYPE_SPECIAL:
1469 case FIB_PATH_TYPE_RECEIVE:
1470 case FIB_PATH_TYPE_EXCLUSIVE:
1471 res = 0;
1472 break;
1473 }
1474 }
1475 return (res);
1476}
1477
1478/*
1479 * fib_path_recursive_loop_detect
1480 *
1481 * A forward walk of the FIB object graph to detect for a cycle/loop. This
1482 * walk is initiated when an entry is linking to a new path list or from an old.
1483 * The entry vector passed contains all the FIB entrys that are children of this
1484 * path (it is all the entries encountered on the walk so far). If this vector
1485 * contains the entry this path resolve via, then a loop is about to form.
1486 * The loop must be allowed to form, since we need the dependencies in place
1487 * so that we can track when the loop breaks.
1488 * However, we MUST not produce a loop in the forwarding graph (else packets
1489 * would loop around the switch path until the loop breaks), so we mark recursive
1490 * paths as looped so that they do not contribute forwarding information.
1491 * By marking the path as looped, an etry such as;
1492 * X/Y
1493 * via a.a.a.a (looped)
1494 * via b.b.b.b (not looped)
1495 * can still forward using the info provided by b.b.b.b only
1496 */
1497int
1498fib_path_recursive_loop_detect (fib_node_index_t path_index,
1499 fib_node_index_t **entry_indicies)
1500{
1501 fib_path_t *path;
1502
1503 path = fib_path_get(path_index);
1504
1505 /*
1506 * the forced drop path is never looped, cos it is never resolved.
1507 */
1508 if (fib_path_is_permanent_drop(path))
1509 {
1510 return (0);
1511 }
1512
1513 switch (path->fp_type)
1514 {
1515 case FIB_PATH_TYPE_RECURSIVE:
1516 {
1517 fib_node_index_t *entry_index, *entries;
1518 int looped = 0;
1519 entries = *entry_indicies;
1520
1521 vec_foreach(entry_index, entries) {
1522 if (*entry_index == path->fp_via_fib)
1523 {
1524 /*
1525 * the entry that is about to link to this path-list (or
1526 * one of this path-list's children) is the same entry that
1527 * this recursive path resolves through. this is a cycle.
1528 * abort the walk.
1529 */
1530 looped = 1;
1531 break;
1532 }
1533 }
1534
1535 if (looped)
1536 {
1537 FIB_PATH_DBG(path, "recursive loop formed");
1538 path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RECURSIVE_LOOP;
1539
Neale Rannsda78f952017-05-24 09:15:43 -07001540 dpo_copy(&path->fp_dpo, drop_dpo_get(path->fp_nh_proto));
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001541 }
1542 else
1543 {
1544 /*
1545 * no loop here yet. keep forward walking the graph.
1546 */
1547 if (fib_entry_recursive_loop_detect(path->fp_via_fib, entry_indicies))
1548 {
1549 FIB_PATH_DBG(path, "recursive loop formed");
1550 path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RECURSIVE_LOOP;
1551 }
1552 else
1553 {
1554 FIB_PATH_DBG(path, "recursive loop cleared");
1555 path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RECURSIVE_LOOP;
1556 }
1557 }
1558 break;
1559 }
1560 case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
1561 case FIB_PATH_TYPE_ATTACHED:
1562 case FIB_PATH_TYPE_SPECIAL:
1563 case FIB_PATH_TYPE_DEAG:
1564 case FIB_PATH_TYPE_RECEIVE:
Neale Ranns0f26c5a2017-03-01 15:12:11 -08001565 case FIB_PATH_TYPE_INTF_RX:
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001566 case FIB_PATH_TYPE_EXCLUSIVE:
1567 /*
1568 * these path types cannot be part of a loop, since they are the leaves
1569 * of the graph.
1570 */
1571 break;
1572 }
1573
1574 return (fib_path_is_looped(path_index));
1575}
1576
1577int
1578fib_path_resolve (fib_node_index_t path_index)
1579{
1580 fib_path_t *path;
1581
1582 path = fib_path_get(path_index);
1583
1584 /*
1585 * hope for the best.
1586 */
1587 path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RESOLVED;
1588
1589 /*
1590 * the forced drop path resolves via the drop adj
1591 */
1592 if (fib_path_is_permanent_drop(path))
1593 {
Neale Rannsda78f952017-05-24 09:15:43 -07001594 dpo_copy(&path->fp_dpo, drop_dpo_get(path->fp_nh_proto));
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001595 path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
1596 return (fib_path_is_resolved(path_index));
1597 }
1598
1599 switch (path->fp_type)
1600 {
1601 case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
1602 fib_path_attached_next_hop_set(path);
1603 break;
1604 case FIB_PATH_TYPE_ATTACHED:
Neale Ranns6f631152017-10-03 08:20:21 -07001605 if (DPO_PROTO_ETHERNET == path->fp_nh_proto)
1606 {
1607 l2_bridge_dpo_add_or_lock(path->attached.fp_interface,
1608 &path->fp_dpo);
1609 }
1610 else
1611 {
1612 /*
1613 * path->attached.fp_interface
1614 */
1615 if (!vnet_sw_interface_is_admin_up(vnet_get_main(),
1616 path->attached.fp_interface))
1617 {
1618 path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
1619 }
1620 dpo_set(&path->fp_dpo,
1621 DPO_ADJACENCY,
1622 path->fp_nh_proto,
1623 fib_path_attached_get_adj(path,
1624 dpo_proto_to_link(path->fp_nh_proto)));
Neale Ranns8c4611b2017-05-23 03:43:47 -07001625
Neale Ranns6f631152017-10-03 08:20:21 -07001626 /*
1627 * become a child of the adjacency so we receive updates
1628 * when the interface state changes
1629 */
1630 path->fp_sibling = adj_child_add(path->fp_dpo.dpoi_index,
1631 FIB_NODE_TYPE_PATH,
1632 fib_path_get_index(path));
1633 }
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001634 break;
1635 case FIB_PATH_TYPE_RECURSIVE:
1636 {
1637 /*
1638 * Create a RR source entry in the table for the address
1639 * that this path recurses through.
1640 * This resolve action is recursive, hence we may create
1641 * more paths in the process. more creates mean maybe realloc
1642 * of this path.
1643 */
1644 fib_node_index_t fei;
1645 fib_prefix_t pfx;
1646
1647 ASSERT(FIB_NODE_INDEX_INVALID == path->fp_via_fib);
1648
Neale Rannsda78f952017-05-24 09:15:43 -07001649 if (DPO_PROTO_MPLS == path->fp_nh_proto)
Neale Rannsad422ed2016-11-02 14:20:04 +00001650 {
Neale Ranns0f26c5a2017-03-01 15:12:11 -08001651 fib_prefix_from_mpls_label(path->recursive.fp_nh.fp_local_label,
1652 path->recursive.fp_nh.fp_eos,
1653 &pfx);
Neale Rannsad422ed2016-11-02 14:20:04 +00001654 }
1655 else
1656 {
1657 fib_prefix_from_ip46_addr(&path->recursive.fp_nh.fp_ip, &pfx);
1658 }
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001659
1660 fei = fib_table_entry_special_add(path->recursive.fp_tbl_id,
1661 &pfx,
1662 FIB_SOURCE_RR,
Neale Rannsa0558302017-04-13 00:44:52 -07001663 FIB_ENTRY_FLAG_NONE);
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001664
1665 path = fib_path_get(path_index);
1666 path->fp_via_fib = fei;
1667
1668 /*
1669 * become a dependent child of the entry so the path is
1670 * informed when the forwarding for the entry changes.
1671 */
1672 path->fp_sibling = fib_entry_child_add(path->fp_via_fib,
1673 FIB_NODE_TYPE_PATH,
1674 fib_path_get_index(path));
1675
1676 /*
1677 * create and configure the IP DPO
1678 */
1679 fib_path_recursive_adj_update(
1680 path,
Neale Ranns0f26c5a2017-03-01 15:12:11 -08001681 fib_path_to_chain_type(path),
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001682 &path->fp_dpo);
1683
1684 break;
1685 }
1686 case FIB_PATH_TYPE_SPECIAL:
1687 /*
1688 * Resolve via the drop
1689 */
Neale Rannsda78f952017-05-24 09:15:43 -07001690 dpo_copy(&path->fp_dpo, drop_dpo_get(path->fp_nh_proto));
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001691 break;
1692 case FIB_PATH_TYPE_DEAG:
Neale Ranns0f26c5a2017-03-01 15:12:11 -08001693 {
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001694 /*
1695 * Resolve via a lookup DPO.
1696 * FIXME. control plane should add routes with a table ID
1697 */
Neale Ranns0f26c5a2017-03-01 15:12:11 -08001698 lookup_cast_t cast;
1699
1700 cast = (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RPF_ID ?
1701 LOOKUP_MULTICAST :
1702 LOOKUP_UNICAST);
1703
1704 lookup_dpo_add_or_lock_w_fib_index(path->deag.fp_tbl_id,
Neale Rannsda78f952017-05-24 09:15:43 -07001705 path->fp_nh_proto,
Neale Ranns0f26c5a2017-03-01 15:12:11 -08001706 cast,
1707 LOOKUP_INPUT_DST_ADDR,
1708 LOOKUP_TABLE_FROM_CONFIG,
1709 &path->fp_dpo);
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001710 break;
Neale Ranns0f26c5a2017-03-01 15:12:11 -08001711 }
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001712 case FIB_PATH_TYPE_RECEIVE:
1713 /*
1714 * Resolve via a receive DPO.
1715 */
Neale Rannsda78f952017-05-24 09:15:43 -07001716 receive_dpo_add_or_lock(path->fp_nh_proto,
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001717 path->receive.fp_interface,
1718 &path->receive.fp_addr,
1719 &path->fp_dpo);
1720 break;
Neale Ranns0f26c5a2017-03-01 15:12:11 -08001721 case FIB_PATH_TYPE_INTF_RX: {
1722 /*
1723 * Resolve via a receive DPO.
1724 */
Neale Ranns43161a82017-08-12 02:12:00 -07001725 interface_rx_dpo_add_or_lock(path->fp_nh_proto,
1726 path->intf_rx.fp_interface,
1727 &path->fp_dpo);
Neale Ranns0f26c5a2017-03-01 15:12:11 -08001728 break;
1729 }
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001730 case FIB_PATH_TYPE_EXCLUSIVE:
1731 /*
1732 * Resolve via the user provided DPO
1733 */
1734 dpo_copy(&path->fp_dpo, &path->exclusive.fp_ex_dpo);
1735 break;
1736 }
1737
1738 return (fib_path_is_resolved(path_index));
1739}
1740
1741u32
1742fib_path_get_resolving_interface (fib_node_index_t path_index)
1743{
1744 fib_path_t *path;
1745
1746 path = fib_path_get(path_index);
1747
1748 switch (path->fp_type)
1749 {
1750 case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
1751 return (path->attached_next_hop.fp_interface);
1752 case FIB_PATH_TYPE_ATTACHED:
1753 return (path->attached.fp_interface);
1754 case FIB_PATH_TYPE_RECEIVE:
1755 return (path->receive.fp_interface);
1756 case FIB_PATH_TYPE_RECURSIVE:
Neale Ranns08b16482017-05-13 05:52:58 -07001757 if (fib_path_is_resolved(path_index))
1758 {
1759 return (fib_entry_get_resolving_interface(path->fp_via_fib));
1760 }
1761 break;
Neale Ranns0f26c5a2017-03-01 15:12:11 -08001762 case FIB_PATH_TYPE_INTF_RX:
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001763 case FIB_PATH_TYPE_SPECIAL:
1764 case FIB_PATH_TYPE_DEAG:
1765 case FIB_PATH_TYPE_EXCLUSIVE:
1766 break;
1767 }
1768 return (~0);
1769}
1770
1771adj_index_t
1772fib_path_get_adj (fib_node_index_t path_index)
1773{
1774 fib_path_t *path;
1775
1776 path = fib_path_get(path_index);
1777
1778 ASSERT(dpo_is_adj(&path->fp_dpo));
1779 if (dpo_is_adj(&path->fp_dpo))
1780 {
1781 return (path->fp_dpo.dpoi_index);
1782 }
1783 return (ADJ_INDEX_INVALID);
1784}
1785
Neale Ranns57b58602017-07-15 07:37:25 -07001786u16
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001787fib_path_get_weight (fib_node_index_t path_index)
1788{
1789 fib_path_t *path;
1790
1791 path = fib_path_get(path_index);
1792
1793 ASSERT(path);
1794
1795 return (path->fp_weight);
1796}
1797
Neale Ranns57b58602017-07-15 07:37:25 -07001798u16
1799fib_path_get_preference (fib_node_index_t path_index)
1800{
1801 fib_path_t *path;
1802
1803 path = fib_path_get(path_index);
1804
1805 ASSERT(path);
1806
1807 return (path->fp_preference);
1808}
1809
Neale Ranns3ee44042016-10-03 13:05:48 +01001810/**
1811 * @brief Contribute the path's adjacency to the list passed.
1812 * By calling this function over all paths, recursively, a child
1813 * can construct its full set of forwarding adjacencies, and hence its
1814 * uRPF list.
1815 */
1816void
1817fib_path_contribute_urpf (fib_node_index_t path_index,
1818 index_t urpf)
1819{
1820 fib_path_t *path;
1821
Neale Ranns3ee44042016-10-03 13:05:48 +01001822 path = fib_path_get(path_index);
1823
Neale Ranns88fc83e2017-04-05 08:11:14 -07001824 /*
1825 * resolved and unresolved paths contribute to the RPF list.
1826 */
Neale Ranns3ee44042016-10-03 13:05:48 +01001827 switch (path->fp_type)
1828 {
1829 case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
1830 fib_urpf_list_append(urpf, path->attached_next_hop.fp_interface);
1831 break;
1832
1833 case FIB_PATH_TYPE_ATTACHED:
1834 fib_urpf_list_append(urpf, path->attached.fp_interface);
1835 break;
1836
1837 case FIB_PATH_TYPE_RECURSIVE:
Neale Ranns08b16482017-05-13 05:52:58 -07001838 if (FIB_NODE_INDEX_INVALID != path->fp_via_fib &&
1839 !fib_path_is_looped(path_index))
Neale Ranns88fc83e2017-04-05 08:11:14 -07001840 {
1841 /*
1842 * there's unresolved due to constraints, and there's unresolved
Neale Ranns08b16482017-05-13 05:52:58 -07001843 * due to ain't got no via. can't do nowt w'out via.
Neale Ranns88fc83e2017-04-05 08:11:14 -07001844 */
1845 fib_entry_contribute_urpf(path->fp_via_fib, urpf);
1846 }
Neale Ranns3ee44042016-10-03 13:05:48 +01001847 break;
1848
1849 case FIB_PATH_TYPE_EXCLUSIVE:
1850 case FIB_PATH_TYPE_SPECIAL:
1851 /*
1852 * these path types may link to an adj, if that's what
1853 * the clinet gave
1854 */
1855 if (dpo_is_adj(&path->fp_dpo))
1856 {
1857 ip_adjacency_t *adj;
1858
1859 adj = adj_get(path->fp_dpo.dpoi_index);
1860
1861 fib_urpf_list_append(urpf, adj->rewrite_header.sw_if_index);
1862 }
1863 break;
1864
1865 case FIB_PATH_TYPE_DEAG:
1866 case FIB_PATH_TYPE_RECEIVE:
Neale Ranns0f26c5a2017-03-01 15:12:11 -08001867 case FIB_PATH_TYPE_INTF_RX:
Neale Ranns3ee44042016-10-03 13:05:48 +01001868 /*
1869 * these path types don't link to an adj
1870 */
1871 break;
1872 }
1873}
1874
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001875void
Neale Ranns0f26c5a2017-03-01 15:12:11 -08001876fib_path_stack_mpls_disp (fib_node_index_t path_index,
1877 dpo_proto_t payload_proto,
1878 dpo_id_t *dpo)
1879{
1880 fib_path_t *path;
1881
1882 path = fib_path_get(path_index);
1883
1884 ASSERT(path);
1885
1886 switch (path->fp_type)
1887 {
1888 case FIB_PATH_TYPE_DEAG:
1889 {
1890 dpo_id_t tmp = DPO_INVALID;
1891
1892 dpo_copy(&tmp, dpo);
1893 dpo_set(dpo,
1894 DPO_MPLS_DISPOSITION,
1895 payload_proto,
1896 mpls_disp_dpo_create(payload_proto,
1897 path->deag.fp_rpf_id,
1898 &tmp));
1899 dpo_reset(&tmp);
1900 break;
1901 }
1902 case FIB_PATH_TYPE_RECEIVE:
1903 case FIB_PATH_TYPE_ATTACHED:
1904 case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
1905 case FIB_PATH_TYPE_RECURSIVE:
1906 case FIB_PATH_TYPE_INTF_RX:
1907 case FIB_PATH_TYPE_EXCLUSIVE:
1908 case FIB_PATH_TYPE_SPECIAL:
1909 break;
1910 }
1911}
1912
1913void
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001914fib_path_contribute_forwarding (fib_node_index_t path_index,
1915 fib_forward_chain_type_t fct,
1916 dpo_id_t *dpo)
1917{
1918 fib_path_t *path;
1919
1920 path = fib_path_get(path_index);
1921
1922 ASSERT(path);
1923 ASSERT(FIB_FORW_CHAIN_TYPE_MPLS_EOS != fct);
1924
1925 FIB_PATH_DBG(path, "contribute");
1926
1927 /*
1928 * The DPO stored in the path was created when the path was resolved.
1929 * This then represents the path's 'native' protocol; IP.
1930 * For all others will need to go find something else.
1931 */
Neale Ranns0f26c5a2017-03-01 15:12:11 -08001932 if (fib_path_to_chain_type(path) == fct)
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001933 {
1934 dpo_copy(dpo, &path->fp_dpo);
1935 }
Neale Ranns5e575b12016-10-03 09:40:25 +01001936 else
1937 {
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001938 switch (path->fp_type)
1939 {
1940 case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
1941 switch (fct)
1942 {
1943 case FIB_FORW_CHAIN_TYPE_UNICAST_IP4:
1944 case FIB_FORW_CHAIN_TYPE_UNICAST_IP6:
1945 case FIB_FORW_CHAIN_TYPE_MPLS_EOS:
1946 case FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS:
Neale Ranns5e575b12016-10-03 09:40:25 +01001947 case FIB_FORW_CHAIN_TYPE_ETHERNET:
Florin Corasce1b4c72017-01-26 14:25:34 -08001948 case FIB_FORW_CHAIN_TYPE_NSH:
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001949 {
1950 adj_index_t ai;
1951
1952 /*
Neale Rannsad422ed2016-11-02 14:20:04 +00001953 * get a appropriate link type adj.
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001954 */
1955 ai = fib_path_attached_next_hop_get_adj(
1956 path,
1957 fib_forw_chain_type_to_link_type(fct));
1958 dpo_set(dpo, DPO_ADJACENCY,
1959 fib_forw_chain_type_to_dpo_proto(fct), ai);
1960 adj_unlock(ai);
1961
1962 break;
1963 }
Neale Ranns32e1c012016-11-22 17:07:28 +00001964 case FIB_FORW_CHAIN_TYPE_MCAST_IP4:
1965 case FIB_FORW_CHAIN_TYPE_MCAST_IP6:
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001966 break;
Neale Ranns32e1c012016-11-22 17:07:28 +00001967 }
1968 break;
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001969 case FIB_PATH_TYPE_RECURSIVE:
1970 switch (fct)
1971 {
1972 case FIB_FORW_CHAIN_TYPE_MPLS_EOS:
1973 case FIB_FORW_CHAIN_TYPE_UNICAST_IP4:
1974 case FIB_FORW_CHAIN_TYPE_UNICAST_IP6:
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001975 case FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS:
Neale Ranns32e1c012016-11-22 17:07:28 +00001976 case FIB_FORW_CHAIN_TYPE_MCAST_IP4:
1977 case FIB_FORW_CHAIN_TYPE_MCAST_IP6:
Neale Ranns0f26c5a2017-03-01 15:12:11 -08001978 fib_path_recursive_adj_update(path, fct, dpo);
1979 break;
Neale Ranns5e575b12016-10-03 09:40:25 +01001980 case FIB_FORW_CHAIN_TYPE_ETHERNET:
Florin Corasce1b4c72017-01-26 14:25:34 -08001981 case FIB_FORW_CHAIN_TYPE_NSH:
Neale Ranns5e575b12016-10-03 09:40:25 +01001982 ASSERT(0);
1983 break;
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001984 }
1985 break;
1986 case FIB_PATH_TYPE_DEAG:
Neale Ranns32e1c012016-11-22 17:07:28 +00001987 switch (fct)
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001988 {
1989 case FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS:
1990 lookup_dpo_add_or_lock_w_table_id(MPLS_FIB_DEFAULT_TABLE_ID,
1991 DPO_PROTO_MPLS,
Neale Ranns0f26c5a2017-03-01 15:12:11 -08001992 LOOKUP_UNICAST,
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001993 LOOKUP_INPUT_DST_ADDR,
1994 LOOKUP_TABLE_FROM_CONFIG,
1995 dpo);
1996 break;
Neale Ranns0f26c5a2017-03-01 15:12:11 -08001997 case FIB_FORW_CHAIN_TYPE_MPLS_EOS:
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001998 case FIB_FORW_CHAIN_TYPE_UNICAST_IP4:
1999 case FIB_FORW_CHAIN_TYPE_UNICAST_IP6:
Neale Ranns0bfe5d82016-08-25 15:29:12 +01002000 dpo_copy(dpo, &path->fp_dpo);
Neale Ranns32e1c012016-11-22 17:07:28 +00002001 break;
2002 case FIB_FORW_CHAIN_TYPE_MCAST_IP4:
2003 case FIB_FORW_CHAIN_TYPE_MCAST_IP6:
Neale Ranns5e575b12016-10-03 09:40:25 +01002004 case FIB_FORW_CHAIN_TYPE_ETHERNET:
Florin Corasce1b4c72017-01-26 14:25:34 -08002005 case FIB_FORW_CHAIN_TYPE_NSH:
Neale Ranns5e575b12016-10-03 09:40:25 +01002006 ASSERT(0);
2007 break;
Neale Ranns0bfe5d82016-08-25 15:29:12 +01002008 }
2009 break;
2010 case FIB_PATH_TYPE_EXCLUSIVE:
2011 dpo_copy(dpo, &path->exclusive.fp_ex_dpo);
2012 break;
2013 case FIB_PATH_TYPE_ATTACHED:
Neale Ranns6f631152017-10-03 08:20:21 -07002014 if (DPO_PROTO_ETHERNET == path->fp_nh_proto)
2015 {
2016 dpo_copy(dpo, &path->fp_dpo);
2017 break;
2018 }
Neale Ranns32e1c012016-11-22 17:07:28 +00002019 switch (fct)
2020 {
2021 case FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS:
2022 case FIB_FORW_CHAIN_TYPE_UNICAST_IP4:
2023 case FIB_FORW_CHAIN_TYPE_UNICAST_IP6:
2024 case FIB_FORW_CHAIN_TYPE_MPLS_EOS:
2025 case FIB_FORW_CHAIN_TYPE_ETHERNET:
Florin Corasce1b4c72017-01-26 14:25:34 -08002026 case FIB_FORW_CHAIN_TYPE_NSH:
Neale Ranns8c4611b2017-05-23 03:43:47 -07002027 {
2028 adj_index_t ai;
2029
2030 /*
2031 * get a appropriate link type adj.
2032 */
2033 ai = fib_path_attached_get_adj(
2034 path,
2035 fib_forw_chain_type_to_link_type(fct));
2036 dpo_set(dpo, DPO_ADJACENCY,
2037 fib_forw_chain_type_to_dpo_proto(fct), ai);
2038 adj_unlock(ai);
2039 break;
2040 }
Neale Ranns32e1c012016-11-22 17:07:28 +00002041 case FIB_FORW_CHAIN_TYPE_MCAST_IP4:
2042 case FIB_FORW_CHAIN_TYPE_MCAST_IP6:
2043 {
2044 adj_index_t ai;
2045
2046 /*
2047 * Create the adj needed for sending IP multicast traffic
2048 */
Neale Rannsda78f952017-05-24 09:15:43 -07002049 ai = adj_mcast_add_or_lock(dpo_proto_to_fib(path->fp_nh_proto),
Neale Ranns32e1c012016-11-22 17:07:28 +00002050 fib_forw_chain_type_to_link_type(fct),
2051 path->attached.fp_interface);
Neale Ranns0f26c5a2017-03-01 15:12:11 -08002052 dpo_set(dpo, DPO_ADJACENCY,
Neale Ranns32e1c012016-11-22 17:07:28 +00002053 fib_forw_chain_type_to_dpo_proto(fct),
2054 ai);
2055 adj_unlock(ai);
2056 }
2057 break;
2058 }
2059 break;
Neale Ranns0f26c5a2017-03-01 15:12:11 -08002060 case FIB_PATH_TYPE_INTF_RX:
2061 /*
2062 * Create the adj needed for sending IP multicast traffic
2063 */
Neale Ranns43161a82017-08-12 02:12:00 -07002064 interface_rx_dpo_add_or_lock(fib_forw_chain_type_to_dpo_proto(fct),
2065 path->attached.fp_interface,
2066 dpo);
Neale Ranns0f26c5a2017-03-01 15:12:11 -08002067 break;
Neale Ranns32e1c012016-11-22 17:07:28 +00002068 case FIB_PATH_TYPE_RECEIVE:
2069 case FIB_PATH_TYPE_SPECIAL:
2070 dpo_copy(dpo, &path->fp_dpo);
Neale Ranns0bfe5d82016-08-25 15:29:12 +01002071 break;
2072 }
Neale Ranns0bfe5d82016-08-25 15:29:12 +01002073 }
2074}
2075
2076load_balance_path_t *
2077fib_path_append_nh_for_multipath_hash (fib_node_index_t path_index,
2078 fib_forward_chain_type_t fct,
2079 load_balance_path_t *hash_key)
2080{
2081 load_balance_path_t *mnh;
2082 fib_path_t *path;
2083
2084 path = fib_path_get(path_index);
2085
2086 ASSERT(path);
2087
2088 if (fib_path_is_resolved(path_index))
2089 {
2090 vec_add2(hash_key, mnh, 1);
2091
2092 mnh->path_weight = path->fp_weight;
2093 mnh->path_index = path_index;
Neale Ranns5e575b12016-10-03 09:40:25 +01002094 fib_path_contribute_forwarding(path_index, fct, &mnh->path_dpo);
Neale Ranns0bfe5d82016-08-25 15:29:12 +01002095 }
2096
2097 return (hash_key);
2098}
2099
2100int
Neale Rannsf12a83f2017-04-18 09:09:40 -07002101fib_path_is_recursive_constrained (fib_node_index_t path_index)
Neale Ranns0bfe5d82016-08-25 15:29:12 +01002102{
2103 fib_path_t *path;
2104
2105 path = fib_path_get(path_index);
2106
Neale Rannsf12a83f2017-04-18 09:09:40 -07002107 return ((FIB_PATH_TYPE_RECURSIVE == path->fp_type) &&
2108 ((path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RESOLVE_ATTACHED) ||
2109 (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RESOLVE_HOST)));
Neale Ranns0bfe5d82016-08-25 15:29:12 +01002110}
2111
2112int
2113fib_path_is_exclusive (fib_node_index_t path_index)
2114{
2115 fib_path_t *path;
2116
2117 path = fib_path_get(path_index);
2118
2119 return (FIB_PATH_TYPE_EXCLUSIVE == path->fp_type);
2120}
2121
2122int
2123fib_path_is_deag (fib_node_index_t path_index)
2124{
2125 fib_path_t *path;
2126
2127 path = fib_path_get(path_index);
2128
2129 return (FIB_PATH_TYPE_DEAG == path->fp_type);
2130}
2131
2132int
2133fib_path_is_resolved (fib_node_index_t path_index)
2134{
2135 fib_path_t *path;
2136
2137 path = fib_path_get(path_index);
2138
2139 return (dpo_id_is_valid(&path->fp_dpo) &&
2140 (path->fp_oper_flags & FIB_PATH_OPER_FLAG_RESOLVED) &&
2141 !fib_path_is_looped(path_index) &&
2142 !fib_path_is_permanent_drop(path));
2143}
2144
2145int
2146fib_path_is_looped (fib_node_index_t path_index)
2147{
2148 fib_path_t *path;
2149
2150 path = fib_path_get(path_index);
2151
2152 return (path->fp_oper_flags & FIB_PATH_OPER_FLAG_RECURSIVE_LOOP);
2153}
2154
Neale Ranns81424992017-05-18 03:03:22 -07002155fib_path_list_walk_rc_t
Steven01b07122016-11-02 10:40:09 -07002156fib_path_encode (fib_node_index_t path_list_index,
2157 fib_node_index_t path_index,
2158 void *ctx)
2159{
2160 fib_route_path_encode_t **api_rpaths = ctx;
2161 fib_route_path_encode_t *api_rpath;
2162 fib_path_t *path;
2163
2164 path = fib_path_get(path_index);
2165 if (!path)
Neale Ranns81424992017-05-18 03:03:22 -07002166 return (FIB_PATH_LIST_WALK_CONTINUE);
Steven01b07122016-11-02 10:40:09 -07002167 vec_add2(*api_rpaths, api_rpath, 1);
2168 api_rpath->rpath.frp_weight = path->fp_weight;
Neale Ranns57b58602017-07-15 07:37:25 -07002169 api_rpath->rpath.frp_preference = path->fp_preference;
Steven01b07122016-11-02 10:40:09 -07002170 api_rpath->rpath.frp_proto = path->fp_nh_proto;
2171 api_rpath->rpath.frp_sw_if_index = ~0;
2172 api_rpath->dpo = path->exclusive.fp_ex_dpo;
2173 switch (path->fp_type)
2174 {
2175 case FIB_PATH_TYPE_RECEIVE:
2176 api_rpath->rpath.frp_addr = path->receive.fp_addr;
2177 api_rpath->rpath.frp_sw_if_index = path->receive.fp_interface;
Florin Coras91d341c2017-07-29 11:50:31 -07002178 api_rpath->dpo = path->fp_dpo;
Steven01b07122016-11-02 10:40:09 -07002179 break;
2180 case FIB_PATH_TYPE_ATTACHED:
2181 api_rpath->rpath.frp_sw_if_index = path->attached.fp_interface;
Florin Coras91d341c2017-07-29 11:50:31 -07002182 api_rpath->dpo = path->fp_dpo;
Steven01b07122016-11-02 10:40:09 -07002183 break;
2184 case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
2185 api_rpath->rpath.frp_sw_if_index = path->attached_next_hop.fp_interface;
2186 api_rpath->rpath.frp_addr = path->attached_next_hop.fp_nh;
2187 break;
2188 case FIB_PATH_TYPE_SPECIAL:
2189 break;
2190 case FIB_PATH_TYPE_DEAG:
Neale Ranns7b7ba572017-10-01 12:08:10 -07002191 api_rpath->rpath.frp_fib_index = path->deag.fp_tbl_id;
2192 api_rpath->dpo = path->fp_dpo;
Steven01b07122016-11-02 10:40:09 -07002193 break;
2194 case FIB_PATH_TYPE_RECURSIVE:
Neale Rannsad422ed2016-11-02 14:20:04 +00002195 api_rpath->rpath.frp_addr = path->recursive.fp_nh.fp_ip;
Steven01b07122016-11-02 10:40:09 -07002196 break;
2197 default:
2198 break;
2199 }
Neale Ranns81424992017-05-18 03:03:22 -07002200 return (FIB_PATH_LIST_WALK_CONTINUE);
Steven01b07122016-11-02 10:40:09 -07002201}
2202
Neale Rannsda78f952017-05-24 09:15:43 -07002203dpo_proto_t
Neale Rannsad422ed2016-11-02 14:20:04 +00002204fib_path_get_proto (fib_node_index_t path_index)
2205{
2206 fib_path_t *path;
2207
2208 path = fib_path_get(path_index);
2209
2210 return (path->fp_nh_proto);
2211}
2212
Neale Ranns0bfe5d82016-08-25 15:29:12 +01002213void
2214fib_path_module_init (void)
2215{
2216 fib_node_register_type (FIB_NODE_TYPE_PATH, &fib_path_vft);
2217}
2218
2219static clib_error_t *
2220show_fib_path_command (vlib_main_t * vm,
2221 unformat_input_t * input,
2222 vlib_cli_command_t * cmd)
2223{
Neale Ranns33a7dd52016-10-07 15:14:33 +01002224 fib_node_index_t pi;
Neale Ranns0bfe5d82016-08-25 15:29:12 +01002225 fib_path_t *path;
2226
Neale Ranns33a7dd52016-10-07 15:14:33 +01002227 if (unformat (input, "%d", &pi))
2228 {
2229 /*
2230 * show one in detail
2231 */
2232 if (!pool_is_free_index(fib_path_pool, pi))
2233 {
2234 path = fib_path_get(pi);
2235 u8 *s = fib_path_format(pi, NULL);
2236 s = format(s, "children:");
2237 s = fib_node_children_format(path->fp_node.fn_children, s);
2238 vlib_cli_output (vm, "%s", s);
2239 vec_free(s);
2240 }
2241 else
2242 {
2243 vlib_cli_output (vm, "path %d invalid", pi);
2244 }
2245 }
2246 else
2247 {
2248 vlib_cli_output (vm, "FIB Paths");
2249 pool_foreach(path, fib_path_pool,
2250 ({
2251 vlib_cli_output (vm, "%U", format_fib_path, path);
2252 }));
2253 }
Neale Ranns0bfe5d82016-08-25 15:29:12 +01002254
2255 return (NULL);
2256}
2257
2258VLIB_CLI_COMMAND (show_fib_path, static) = {
2259 .path = "show fib paths",
2260 .function = show_fib_path_command,
2261 .short_help = "show fib paths",
2262};