blob: 7b713a4b5c52e1bdaad39150d7e43f50bc6313b8 [file] [log] [blame]
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001/*
2 * Copyright (c) 2016 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16#include <vlib/vlib.h>
17#include <vnet/vnet.h>
18#include <vnet/ip/format.h>
19#include <vnet/ip/ip.h>
20#include <vnet/dpo/drop_dpo.h>
21#include <vnet/dpo/receive_dpo.h>
22#include <vnet/dpo/load_balance_map.h>
23#include <vnet/dpo/lookup_dpo.h>
Neale Ranns43161a82017-08-12 02:12:00 -070024#include <vnet/dpo/interface_rx_dpo.h>
Neale Ranns0f26c5a2017-03-01 15:12:11 -080025#include <vnet/dpo/mpls_disposition.h>
Neale Ranns6f631152017-10-03 08:20:21 -070026#include <vnet/dpo/l2_bridge_dpo.h>
Neale Ranns0bfe5d82016-08-25 15:29:12 +010027
28#include <vnet/adj/adj.h>
Neale Ranns32e1c012016-11-22 17:07:28 +000029#include <vnet/adj/adj_mcast.h>
Neale Ranns0bfe5d82016-08-25 15:29:12 +010030
Neale Ranns3ee44042016-10-03 13:05:48 +010031#include <vnet/fib/fib_path.h>
32#include <vnet/fib/fib_node.h>
33#include <vnet/fib/fib_table.h>
34#include <vnet/fib/fib_entry.h>
35#include <vnet/fib/fib_path_list.h>
36#include <vnet/fib/fib_internal.h>
37#include <vnet/fib/fib_urpf_list.h>
Neale Rannsa3af3372017-03-28 03:49:52 -070038#include <vnet/fib/mpls_fib.h>
Neale Ranns0bfe5d82016-08-25 15:29:12 +010039
40/**
41 * Enurmeration of path types
42 */
43typedef enum fib_path_type_t_ {
44 /**
45 * Marker. Add new types after this one.
46 */
47 FIB_PATH_TYPE_FIRST = 0,
48 /**
49 * Attached-nexthop. An interface and a nexthop are known.
50 */
51 FIB_PATH_TYPE_ATTACHED_NEXT_HOP = FIB_PATH_TYPE_FIRST,
52 /**
53 * attached. Only the interface is known.
54 */
55 FIB_PATH_TYPE_ATTACHED,
56 /**
57 * recursive. Only the next-hop is known.
58 */
59 FIB_PATH_TYPE_RECURSIVE,
60 /**
61 * special. nothing is known. so we drop.
62 */
63 FIB_PATH_TYPE_SPECIAL,
64 /**
65 * exclusive. user provided adj.
66 */
67 FIB_PATH_TYPE_EXCLUSIVE,
68 /**
69 * deag. Link to a lookup adj in the next table
70 */
71 FIB_PATH_TYPE_DEAG,
72 /**
Neale Ranns0f26c5a2017-03-01 15:12:11 -080073 * interface receive.
74 */
75 FIB_PATH_TYPE_INTF_RX,
76 /**
Neale Ranns0bfe5d82016-08-25 15:29:12 +010077 * receive. it's for-us.
78 */
79 FIB_PATH_TYPE_RECEIVE,
80 /**
81 * Marker. Add new types before this one, then update it.
82 */
83 FIB_PATH_TYPE_LAST = FIB_PATH_TYPE_RECEIVE,
84} __attribute__ ((packed)) fib_path_type_t;
85
86/**
87 * The maximum number of path_types
88 */
89#define FIB_PATH_TYPE_MAX (FIB_PATH_TYPE_LAST + 1)
90
91#define FIB_PATH_TYPES { \
92 [FIB_PATH_TYPE_ATTACHED_NEXT_HOP] = "attached-nexthop", \
93 [FIB_PATH_TYPE_ATTACHED] = "attached", \
94 [FIB_PATH_TYPE_RECURSIVE] = "recursive", \
95 [FIB_PATH_TYPE_SPECIAL] = "special", \
96 [FIB_PATH_TYPE_EXCLUSIVE] = "exclusive", \
97 [FIB_PATH_TYPE_DEAG] = "deag", \
Neale Ranns0f26c5a2017-03-01 15:12:11 -080098 [FIB_PATH_TYPE_INTF_RX] = "intf-rx", \
Neale Ranns0bfe5d82016-08-25 15:29:12 +010099 [FIB_PATH_TYPE_RECEIVE] = "receive", \
100}
101
102#define FOR_EACH_FIB_PATH_TYPE(_item) \
103 for (_item = FIB_PATH_TYPE_FIRST; _item <= FIB_PATH_TYPE_LAST; _item++)
104
105/**
106 * Enurmeration of path operational (i.e. derived) attributes
107 */
108typedef enum fib_path_oper_attribute_t_ {
109 /**
110 * Marker. Add new types after this one.
111 */
112 FIB_PATH_OPER_ATTRIBUTE_FIRST = 0,
113 /**
114 * The path forms part of a recursive loop.
115 */
116 FIB_PATH_OPER_ATTRIBUTE_RECURSIVE_LOOP = FIB_PATH_OPER_ATTRIBUTE_FIRST,
117 /**
118 * The path is resolved
119 */
120 FIB_PATH_OPER_ATTRIBUTE_RESOLVED,
121 /**
Neale Ranns4b919a52017-03-11 05:55:21 -0800122 * The path is attached, despite what the next-hop may say.
123 */
124 FIB_PATH_OPER_ATTRIBUTE_ATTACHED,
125 /**
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100126 * The path has become a permanent drop.
127 */
128 FIB_PATH_OPER_ATTRIBUTE_DROP,
129 /**
130 * Marker. Add new types before this one, then update it.
131 */
132 FIB_PATH_OPER_ATTRIBUTE_LAST = FIB_PATH_OPER_ATTRIBUTE_DROP,
133} __attribute__ ((packed)) fib_path_oper_attribute_t;
134
135/**
136 * The maximum number of path operational attributes
137 */
138#define FIB_PATH_OPER_ATTRIBUTE_MAX (FIB_PATH_OPER_ATTRIBUTE_LAST + 1)
139
140#define FIB_PATH_OPER_ATTRIBUTES { \
141 [FIB_PATH_OPER_ATTRIBUTE_RECURSIVE_LOOP] = "recursive-loop", \
142 [FIB_PATH_OPER_ATTRIBUTE_RESOLVED] = "resolved", \
143 [FIB_PATH_OPER_ATTRIBUTE_DROP] = "drop", \
144}
145
146#define FOR_EACH_FIB_PATH_OPER_ATTRIBUTE(_item) \
147 for (_item = FIB_PATH_OPER_ATTRIBUTE_FIRST; \
148 _item <= FIB_PATH_OPER_ATTRIBUTE_LAST; \
149 _item++)
150
151/**
152 * Path flags from the attributes
153 */
154typedef enum fib_path_oper_flags_t_ {
155 FIB_PATH_OPER_FLAG_NONE = 0,
156 FIB_PATH_OPER_FLAG_RECURSIVE_LOOP = (1 << FIB_PATH_OPER_ATTRIBUTE_RECURSIVE_LOOP),
157 FIB_PATH_OPER_FLAG_DROP = (1 << FIB_PATH_OPER_ATTRIBUTE_DROP),
158 FIB_PATH_OPER_FLAG_RESOLVED = (1 << FIB_PATH_OPER_ATTRIBUTE_RESOLVED),
Neale Ranns4b919a52017-03-11 05:55:21 -0800159 FIB_PATH_OPER_FLAG_ATTACHED = (1 << FIB_PATH_OPER_ATTRIBUTE_ATTACHED),
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100160} __attribute__ ((packed)) fib_path_oper_flags_t;
161
162/**
163 * A FIB path
164 */
165typedef struct fib_path_t_ {
166 /**
167 * A path is a node in the FIB graph.
168 */
169 fib_node_t fp_node;
170
171 /**
172 * The index of the path-list to which this path belongs
173 */
174 u32 fp_pl_index;
175
176 /**
177 * This marks the start of the memory area used to hash
178 * the path
179 */
180 STRUCT_MARK(path_hash_start);
181
182 /**
183 * Configuration Flags
184 */
185 fib_path_cfg_flags_t fp_cfg_flags;
186
187 /**
188 * The type of the path. This is the selector for the union
189 */
190 fib_path_type_t fp_type;
191
192 /**
193 * The protocol of the next-hop, i.e. the address family of the
194 * next-hop's address. We can't derive this from the address itself
195 * since the address can be all zeros
196 */
Neale Rannsda78f952017-05-24 09:15:43 -0700197 dpo_proto_t fp_nh_proto;
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100198
199 /**
Neale Ranns57b58602017-07-15 07:37:25 -0700200 * UCMP [unnormalised] weigth
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100201 */
Neale Rannsa0a908f2017-08-01 11:40:03 -0700202 u8 fp_weight;
203
Neale Ranns57b58602017-07-15 07:37:25 -0700204 /**
205 * A path preference. 0 is the best.
206 * Only paths of the best preference, that are 'up', are considered
207 * for forwarding.
208 */
Neale Rannsa0a908f2017-08-01 11:40:03 -0700209 u8 fp_preference;
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100210
211 /**
212 * per-type union of the data required to resolve the path
213 */
214 union {
215 struct {
216 /**
217 * The next-hop
218 */
219 ip46_address_t fp_nh;
220 /**
221 * The interface
222 */
223 u32 fp_interface;
224 } attached_next_hop;
225 struct {
226 /**
227 * The interface
228 */
229 u32 fp_interface;
230 } attached;
231 struct {
Neale Rannsad422ed2016-11-02 14:20:04 +0000232 union
233 {
234 /**
235 * The next-hop
236 */
237 ip46_address_t fp_ip;
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800238 struct {
239 /**
240 * The local label to resolve through.
241 */
242 mpls_label_t fp_local_label;
243 /**
244 * The EOS bit of the resolving label
245 */
246 mpls_eos_bit_t fp_eos;
247 };
Neale Rannsad422ed2016-11-02 14:20:04 +0000248 } fp_nh;
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100249 /**
250 * The FIB table index in which to find the next-hop.
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100251 */
252 fib_node_index_t fp_tbl_id;
253 } recursive;
254 struct {
255 /**
Neale Rannsad422ed2016-11-02 14:20:04 +0000256 * The FIB index in which to perfom the next lookup
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100257 */
258 fib_node_index_t fp_tbl_id;
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800259 /**
260 * The RPF-ID to tag the packets with
261 */
262 fib_rpf_id_t fp_rpf_id;
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100263 } deag;
264 struct {
265 } special;
266 struct {
267 /**
268 * The user provided 'exclusive' DPO
269 */
270 dpo_id_t fp_ex_dpo;
271 } exclusive;
272 struct {
273 /**
274 * The interface on which the local address is configured
275 */
276 u32 fp_interface;
277 /**
278 * The next-hop
279 */
280 ip46_address_t fp_addr;
281 } receive;
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800282 struct {
283 /**
284 * The interface on which the packets will be input.
285 */
286 u32 fp_interface;
287 } intf_rx;
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100288 };
289 STRUCT_MARK(path_hash_end);
290
291 /**
292 * Memebers in this last section represent information that is
293 * dervied during resolution. It should not be copied to new paths
294 * nor compared.
295 */
296
297 /**
298 * Operational Flags
299 */
300 fib_path_oper_flags_t fp_oper_flags;
301
302 /**
303 * the resolving via fib. not part of the union, since it it not part
304 * of the path's hash.
305 */
306 fib_node_index_t fp_via_fib;
307
308 /**
309 * The Data-path objects through which this path resolves for IP.
310 */
311 dpo_id_t fp_dpo;
312
313 /**
314 * the index of this path in the parent's child list.
315 */
316 u32 fp_sibling;
317} fib_path_t;
318
319/*
320 * Array of strings/names for the path types and attributes
321 */
322static const char *fib_path_type_names[] = FIB_PATH_TYPES;
323static const char *fib_path_oper_attribute_names[] = FIB_PATH_OPER_ATTRIBUTES;
324static const char *fib_path_cfg_attribute_names[] = FIB_PATH_CFG_ATTRIBUTES;
325
326/*
327 * The memory pool from which we allocate all the paths
328 */
329static fib_path_t *fib_path_pool;
330
331/*
332 * Debug macro
333 */
334#ifdef FIB_DEBUG
335#define FIB_PATH_DBG(_p, _fmt, _args...) \
336{ \
337 u8 *_tmp = NULL; \
338 _tmp = fib_path_format(fib_path_get_index(_p), _tmp); \
339 clib_warning("path:[%d:%s]:" _fmt, \
340 fib_path_get_index(_p), _tmp, \
341 ##_args); \
342 vec_free(_tmp); \
343}
344#else
345#define FIB_PATH_DBG(_p, _fmt, _args...)
346#endif
347
348static fib_path_t *
349fib_path_get (fib_node_index_t index)
350{
351 return (pool_elt_at_index(fib_path_pool, index));
352}
353
354static fib_node_index_t
355fib_path_get_index (fib_path_t *path)
356{
357 return (path - fib_path_pool);
358}
359
360static fib_node_t *
361fib_path_get_node (fib_node_index_t index)
362{
363 return ((fib_node_t*)fib_path_get(index));
364}
365
366static fib_path_t*
367fib_path_from_fib_node (fib_node_t *node)
368{
369#if CLIB_DEBUG > 0
370 ASSERT(FIB_NODE_TYPE_PATH == node->fn_type);
371#endif
372 return ((fib_path_t*)node);
373}
374
375u8 *
376format_fib_path (u8 * s, va_list * args)
377{
378 fib_path_t *path = va_arg (*args, fib_path_t *);
379 vnet_main_t * vnm = vnet_get_main();
380 fib_path_oper_attribute_t oattr;
381 fib_path_cfg_attribute_t cattr;
382
383 s = format (s, " index:%d ", fib_path_get_index(path));
384 s = format (s, "pl-index:%d ", path->fp_pl_index);
Neale Rannsda78f952017-05-24 09:15:43 -0700385 s = format (s, "%U ", format_dpo_proto, path->fp_nh_proto);
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100386 s = format (s, "weight=%d ", path->fp_weight);
Neale Ranns57b58602017-07-15 07:37:25 -0700387 s = format (s, "pref=%d ", path->fp_preference);
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100388 s = format (s, "%s: ", fib_path_type_names[path->fp_type]);
389 if (FIB_PATH_OPER_FLAG_NONE != path->fp_oper_flags) {
390 s = format(s, " oper-flags:");
391 FOR_EACH_FIB_PATH_OPER_ATTRIBUTE(oattr) {
392 if ((1<<oattr) & path->fp_oper_flags) {
393 s = format (s, "%s,", fib_path_oper_attribute_names[oattr]);
394 }
395 }
396 }
397 if (FIB_PATH_CFG_FLAG_NONE != path->fp_cfg_flags) {
398 s = format(s, " cfg-flags:");
399 FOR_EACH_FIB_PATH_CFG_ATTRIBUTE(cattr) {
400 if ((1<<cattr) & path->fp_cfg_flags) {
401 s = format (s, "%s,", fib_path_cfg_attribute_names[cattr]);
402 }
403 }
404 }
405 s = format(s, "\n ");
406
407 switch (path->fp_type)
408 {
409 case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
410 s = format (s, "%U", format_ip46_address,
411 &path->attached_next_hop.fp_nh,
412 IP46_TYPE_ANY);
413 if (path->fp_oper_flags & FIB_PATH_OPER_FLAG_DROP)
414 {
415 s = format (s, " if_index:%d", path->attached_next_hop.fp_interface);
416 }
417 else
418 {
419 s = format (s, " %U",
420 format_vnet_sw_interface_name,
421 vnm,
422 vnet_get_sw_interface(
423 vnm,
424 path->attached_next_hop.fp_interface));
425 if (vnet_sw_interface_is_p2p(vnet_get_main(),
426 path->attached_next_hop.fp_interface))
427 {
428 s = format (s, " (p2p)");
429 }
430 }
431 if (!dpo_id_is_valid(&path->fp_dpo))
432 {
433 s = format(s, "\n unresolved");
434 }
435 else
436 {
437 s = format(s, "\n %U",
438 format_dpo_id,
439 &path->fp_dpo, 13);
440 }
441 break;
442 case FIB_PATH_TYPE_ATTACHED:
443 if (path->fp_oper_flags & FIB_PATH_OPER_FLAG_DROP)
444 {
445 s = format (s, " if_index:%d", path->attached_next_hop.fp_interface);
446 }
447 else
448 {
449 s = format (s, " %U",
450 format_vnet_sw_interface_name,
451 vnm,
452 vnet_get_sw_interface(
453 vnm,
454 path->attached.fp_interface));
455 }
456 break;
457 case FIB_PATH_TYPE_RECURSIVE:
Neale Rannsda78f952017-05-24 09:15:43 -0700458 if (DPO_PROTO_MPLS == path->fp_nh_proto)
Neale Rannsad422ed2016-11-02 14:20:04 +0000459 {
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800460 s = format (s, "via %U %U",
Neale Rannsad422ed2016-11-02 14:20:04 +0000461 format_mpls_unicast_label,
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800462 path->recursive.fp_nh.fp_local_label,
463 format_mpls_eos_bit,
464 path->recursive.fp_nh.fp_eos);
Neale Rannsad422ed2016-11-02 14:20:04 +0000465 }
466 else
467 {
468 s = format (s, "via %U",
469 format_ip46_address,
470 &path->recursive.fp_nh.fp_ip,
471 IP46_TYPE_ANY);
472 }
473 s = format (s, " in fib:%d",
474 path->recursive.fp_tbl_id,
475 path->fp_via_fib);
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100476 s = format (s, " via-fib:%d", path->fp_via_fib);
477 s = format (s, " via-dpo:[%U:%d]",
478 format_dpo_type, path->fp_dpo.dpoi_type,
479 path->fp_dpo.dpoi_index);
480
481 break;
482 case FIB_PATH_TYPE_RECEIVE:
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800483 case FIB_PATH_TYPE_INTF_RX:
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100484 case FIB_PATH_TYPE_SPECIAL:
485 case FIB_PATH_TYPE_DEAG:
486 case FIB_PATH_TYPE_EXCLUSIVE:
487 if (dpo_id_is_valid(&path->fp_dpo))
488 {
489 s = format(s, "%U", format_dpo_id,
490 &path->fp_dpo, 2);
491 }
492 break;
493 }
494 return (s);
495}
496
497u8 *
498fib_path_format (fib_node_index_t pi, u8 *s)
499{
500 fib_path_t *path;
501
502 path = fib_path_get(pi);
503 ASSERT(NULL != path);
504
505 return (format (s, "%U", format_fib_path, path));
506}
507
508u8 *
509fib_path_adj_format (fib_node_index_t pi,
510 u32 indent,
511 u8 *s)
512{
513 fib_path_t *path;
514
515 path = fib_path_get(pi);
516 ASSERT(NULL != path);
517
518 if (!dpo_id_is_valid(&path->fp_dpo))
519 {
520 s = format(s, " unresolved");
521 }
522 else
523 {
524 s = format(s, "%U", format_dpo_id,
525 &path->fp_dpo, 2);
526 }
527
528 return (s);
529}
530
531/*
532 * fib_path_last_lock_gone
533 *
534 * We don't share paths, we share path lists, so the [un]lock functions
535 * are no-ops
536 */
537static void
538fib_path_last_lock_gone (fib_node_t *node)
539{
540 ASSERT(0);
541}
542
543static const adj_index_t
544fib_path_attached_next_hop_get_adj (fib_path_t *path,
Neale Ranns924d03a2016-10-19 08:25:46 +0100545 vnet_link_t link)
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100546{
547 if (vnet_sw_interface_is_p2p(vnet_get_main(),
548 path->attached_next_hop.fp_interface))
549 {
550 /*
551 * if the interface is p2p then the adj for the specific
552 * neighbour on that link will never exist. on p2p links
553 * the subnet address (the attached route) links to the
554 * auto-adj (see below), we want that adj here too.
555 */
Neale Rannsda78f952017-05-24 09:15:43 -0700556 return (adj_nbr_add_or_lock(dpo_proto_to_fib(path->fp_nh_proto),
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100557 link,
558 &zero_addr,
559 path->attached_next_hop.fp_interface));
560 }
561 else
562 {
Neale Rannsda78f952017-05-24 09:15:43 -0700563 return (adj_nbr_add_or_lock(dpo_proto_to_fib(path->fp_nh_proto),
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100564 link,
565 &path->attached_next_hop.fp_nh,
566 path->attached_next_hop.fp_interface));
567 }
568}
569
570static void
571fib_path_attached_next_hop_set (fib_path_t *path)
572{
573 /*
574 * resolve directly via the adjacnecy discribed by the
575 * interface and next-hop
576 */
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100577 dpo_set(&path->fp_dpo,
578 DPO_ADJACENCY,
Neale Rannsda78f952017-05-24 09:15:43 -0700579 path->fp_nh_proto,
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100580 fib_path_attached_next_hop_get_adj(
581 path,
Neale Rannsda78f952017-05-24 09:15:43 -0700582 dpo_proto_to_link(path->fp_nh_proto)));
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100583
584 /*
585 * become a child of the adjacency so we receive updates
586 * when its rewrite changes
587 */
588 path->fp_sibling = adj_child_add(path->fp_dpo.dpoi_index,
589 FIB_NODE_TYPE_PATH,
590 fib_path_get_index(path));
Neale Ranns88fc83e2017-04-05 08:11:14 -0700591
592 if (!vnet_sw_interface_is_admin_up(vnet_get_main(),
593 path->attached_next_hop.fp_interface) ||
594 !adj_is_up(path->fp_dpo.dpoi_index))
595 {
596 path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
597 }
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100598}
599
Neale Ranns8c4611b2017-05-23 03:43:47 -0700600static const adj_index_t
601fib_path_attached_get_adj (fib_path_t *path,
602 vnet_link_t link)
603{
604 if (vnet_sw_interface_is_p2p(vnet_get_main(),
605 path->attached.fp_interface))
606 {
607 /*
608 * point-2-point interfaces do not require a glean, since
609 * there is nothing to ARP. Install a rewrite/nbr adj instead
610 */
Neale Rannsda78f952017-05-24 09:15:43 -0700611 return (adj_nbr_add_or_lock(dpo_proto_to_fib(path->fp_nh_proto),
Neale Ranns8c4611b2017-05-23 03:43:47 -0700612 link,
613 &zero_addr,
614 path->attached.fp_interface));
615 }
616 else
617 {
Neale Rannsda78f952017-05-24 09:15:43 -0700618 return (adj_glean_add_or_lock(dpo_proto_to_fib(path->fp_nh_proto),
Neale Ranns8c4611b2017-05-23 03:43:47 -0700619 path->attached.fp_interface,
620 NULL));
621 }
622}
623
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100624/*
625 * create of update the paths recursive adj
626 */
627static void
628fib_path_recursive_adj_update (fib_path_t *path,
629 fib_forward_chain_type_t fct,
630 dpo_id_t *dpo)
631{
Neale Ranns948e00f2016-10-20 13:39:34 +0100632 dpo_id_t via_dpo = DPO_INVALID;
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100633
634 /*
635 * get the DPO to resolve through from the via-entry
636 */
637 fib_entry_contribute_forwarding(path->fp_via_fib,
638 fct,
639 &via_dpo);
640
641
642 /*
643 * hope for the best - clear if restrictions apply.
644 */
645 path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RESOLVED;
646
647 /*
648 * Validate any recursion constraints and over-ride the via
649 * adj if not met
650 */
651 if (path->fp_oper_flags & FIB_PATH_OPER_FLAG_RECURSIVE_LOOP)
652 {
653 path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
Neale Rannsda78f952017-05-24 09:15:43 -0700654 dpo_copy(&via_dpo, drop_dpo_get(path->fp_nh_proto));
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100655 }
656 else if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RESOLVE_HOST)
657 {
658 /*
659 * the via FIB must be a host route.
660 * note the via FIB just added will always be a host route
661 * since it is an RR source added host route. So what we need to
662 * check is whether the route has other sources. If it does then
663 * some other source has added it as a host route. If it doesn't
664 * then it was added only here and inherits forwarding from a cover.
665 * the cover is not a host route.
666 * The RR source is the lowest priority source, so we check if it
667 * is the best. if it is there are no other sources.
668 */
669 if (fib_entry_get_best_source(path->fp_via_fib) >= FIB_SOURCE_RR)
670 {
671 path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
Neale Rannsda78f952017-05-24 09:15:43 -0700672 dpo_copy(&via_dpo, drop_dpo_get(path->fp_nh_proto));
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100673
674 /*
675 * PIC edge trigger. let the load-balance maps know
676 */
677 load_balance_map_path_state_change(fib_path_get_index(path));
678 }
679 }
680 else if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RESOLVE_ATTACHED)
681 {
682 /*
683 * RR source entries inherit the flags from the cover, so
684 * we can check the via directly
685 */
686 if (!(FIB_ENTRY_FLAG_ATTACHED & fib_entry_get_flags(path->fp_via_fib)))
687 {
688 path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
Neale Rannsda78f952017-05-24 09:15:43 -0700689 dpo_copy(&via_dpo, drop_dpo_get(path->fp_nh_proto));
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100690
691 /*
692 * PIC edge trigger. let the load-balance maps know
693 */
694 load_balance_map_path_state_change(fib_path_get_index(path));
695 }
696 }
Neale Ranns88fc83e2017-04-05 08:11:14 -0700697 /*
698 * check for over-riding factors on the FIB entry itself
699 */
700 if (!fib_entry_is_resolved(path->fp_via_fib))
701 {
702 path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
Neale Rannsda78f952017-05-24 09:15:43 -0700703 dpo_copy(&via_dpo, drop_dpo_get(path->fp_nh_proto));
Neale Ranns88fc83e2017-04-05 08:11:14 -0700704
705 /*
706 * PIC edge trigger. let the load-balance maps know
707 */
708 load_balance_map_path_state_change(fib_path_get_index(path));
709 }
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100710
711 /*
Neale Ranns57b58602017-07-15 07:37:25 -0700712 * If this path is contributing a drop, then it's not resolved
713 */
714 if (dpo_is_drop(&via_dpo) || load_balance_is_drop(&via_dpo))
715 {
716 path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
717 }
718
719 /*
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100720 * update the path's contributed DPO
721 */
722 dpo_copy(dpo, &via_dpo);
723
Neale Rannsda78f952017-05-24 09:15:43 -0700724 FIB_PATH_DBG(path, "recursive update:");
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100725
726 dpo_reset(&via_dpo);
727}
728
729/*
730 * fib_path_is_permanent_drop
731 *
732 * Return !0 if the path is configured to permanently drop,
733 * despite other attributes.
734 */
735static int
736fib_path_is_permanent_drop (fib_path_t *path)
737{
738 return ((path->fp_cfg_flags & FIB_PATH_CFG_FLAG_DROP) ||
739 (path->fp_oper_flags & FIB_PATH_OPER_FLAG_DROP));
740}
741
742/*
743 * fib_path_unresolve
744 *
745 * Remove our dependency on the resolution target
746 */
747static void
748fib_path_unresolve (fib_path_t *path)
749{
750 /*
751 * the forced drop path does not need unresolving
752 */
753 if (fib_path_is_permanent_drop(path))
754 {
755 return;
756 }
757
758 switch (path->fp_type)
759 {
760 case FIB_PATH_TYPE_RECURSIVE:
761 if (FIB_NODE_INDEX_INVALID != path->fp_via_fib)
762 {
763 fib_prefix_t pfx;
764
Neale Rannsad422ed2016-11-02 14:20:04 +0000765 fib_entry_get_prefix(path->fp_via_fib, &pfx);
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100766 fib_entry_child_remove(path->fp_via_fib,
767 path->fp_sibling);
768 fib_table_entry_special_remove(path->recursive.fp_tbl_id,
769 &pfx,
770 FIB_SOURCE_RR);
771 path->fp_via_fib = FIB_NODE_INDEX_INVALID;
772 }
773 break;
774 case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100775 adj_child_remove(path->fp_dpo.dpoi_index,
776 path->fp_sibling);
777 adj_unlock(path->fp_dpo.dpoi_index);
778 break;
Neale Ranns6f631152017-10-03 08:20:21 -0700779 case FIB_PATH_TYPE_ATTACHED:
780 if (DPO_PROTO_ETHERNET != path->fp_nh_proto)
781 {
782 adj_child_remove(path->fp_dpo.dpoi_index,
783 path->fp_sibling);
784 adj_unlock(path->fp_dpo.dpoi_index);
785 }
786 break;
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100787 case FIB_PATH_TYPE_EXCLUSIVE:
788 dpo_reset(&path->exclusive.fp_ex_dpo);
789 break;
790 case FIB_PATH_TYPE_SPECIAL:
791 case FIB_PATH_TYPE_RECEIVE:
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800792 case FIB_PATH_TYPE_INTF_RX:
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100793 case FIB_PATH_TYPE_DEAG:
794 /*
795 * these hold only the path's DPO, which is reset below.
796 */
797 break;
798 }
799
800 /*
801 * release the adj we were holding and pick up the
802 * drop just in case.
803 */
804 dpo_reset(&path->fp_dpo);
805 path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
806
807 return;
808}
809
810static fib_forward_chain_type_t
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800811fib_path_to_chain_type (const fib_path_t *path)
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100812{
Neale Rannsda78f952017-05-24 09:15:43 -0700813 if (DPO_PROTO_MPLS == path->fp_nh_proto)
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100814 {
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800815 if (FIB_PATH_TYPE_RECURSIVE == path->fp_type &&
816 MPLS_EOS == path->recursive.fp_nh.fp_eos)
817 {
818 return (FIB_FORW_CHAIN_TYPE_MPLS_EOS);
819 }
820 else
821 {
Neale Ranns9f171f52017-04-11 08:56:53 -0700822 return (FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS);
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800823 }
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100824 }
Neale Rannsda78f952017-05-24 09:15:43 -0700825 else
826 {
827 return (fib_forw_chain_type_from_dpo_proto(path->fp_nh_proto));
828 }
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100829}
830
831/*
832 * fib_path_back_walk_notify
833 *
834 * A back walk has reach this path.
835 */
836static fib_node_back_walk_rc_t
837fib_path_back_walk_notify (fib_node_t *node,
838 fib_node_back_walk_ctx_t *ctx)
839{
840 fib_path_t *path;
841
842 path = fib_path_from_fib_node(node);
843
844 switch (path->fp_type)
845 {
846 case FIB_PATH_TYPE_RECURSIVE:
847 if (FIB_NODE_BW_REASON_FLAG_EVALUATE & ctx->fnbw_reason)
848 {
849 /*
850 * modify the recursive adjacency to use the new forwarding
851 * of the via-fib.
852 * this update is visible to packets in flight in the DP.
853 */
854 fib_path_recursive_adj_update(
855 path,
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800856 fib_path_to_chain_type(path),
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100857 &path->fp_dpo);
858 }
Neale Rannsad95b5d2016-11-10 20:35:14 +0000859 if ((FIB_NODE_BW_REASON_FLAG_ADJ_UPDATE & ctx->fnbw_reason) ||
860 (FIB_NODE_BW_REASON_FLAG_ADJ_DOWN & ctx->fnbw_reason))
Neale Rannsb80c5362016-10-08 13:03:40 +0100861 {
862 /*
863 * ADJ updates (complete<->incomplete) do not need to propagate to
864 * recursive entries.
865 * The only reason its needed as far back as here, is that the adj
866 * and the incomplete adj are a different DPO type, so the LBs need
867 * to re-stack.
868 * If this walk was quashed in the fib_entry, then any non-fib_path
869 * children (like tunnels that collapse out the LB when they stack)
870 * would not see the update.
871 */
872 return (FIB_NODE_BACK_WALK_CONTINUE);
873 }
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100874 break;
875 case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
876 /*
877FIXME comment
878 * ADJ_UPDATE backwalk pass silently through here and up to
879 * the path-list when the multipath adj collapse occurs.
880 * The reason we do this is that the assumtption is that VPP
881 * runs in an environment where the Control-Plane is remote
882 * and hence reacts slowly to link up down. In order to remove
883 * this down link from the ECMP set quickly, we back-walk.
884 * VPP also has dedicated CPUs, so we are not stealing resources
885 * from the CP to do so.
886 */
887 if (FIB_NODE_BW_REASON_FLAG_INTERFACE_UP & ctx->fnbw_reason)
888 {
Neale Ranns8b37b872016-11-21 12:25:22 +0000889 if (path->fp_oper_flags & FIB_PATH_OPER_FLAG_RESOLVED)
890 {
891 /*
892 * alreday resolved. no need to walk back again
893 */
894 return (FIB_NODE_BACK_WALK_CONTINUE);
895 }
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100896 path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RESOLVED;
897 }
898 if (FIB_NODE_BW_REASON_FLAG_INTERFACE_DOWN & ctx->fnbw_reason)
899 {
Neale Ranns8b37b872016-11-21 12:25:22 +0000900 if (!(path->fp_oper_flags & FIB_PATH_OPER_FLAG_RESOLVED))
901 {
902 /*
903 * alreday unresolved. no need to walk back again
904 */
905 return (FIB_NODE_BACK_WALK_CONTINUE);
906 }
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100907 path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
908 }
909 if (FIB_NODE_BW_REASON_FLAG_INTERFACE_DELETE & ctx->fnbw_reason)
910 {
911 /*
912 * The interface this path resolves through has been deleted.
913 * This will leave the path in a permanent drop state. The route
914 * needs to be removed and readded (and hence the path-list deleted)
915 * before it can forward again.
916 */
917 fib_path_unresolve(path);
918 path->fp_oper_flags |= FIB_PATH_OPER_FLAG_DROP;
919 }
920 if (FIB_NODE_BW_REASON_FLAG_ADJ_UPDATE & ctx->fnbw_reason)
921 {
922 /*
923 * restack the DPO to pick up the correct DPO sub-type
924 */
Neale Ranns8b37b872016-11-21 12:25:22 +0000925 uword if_is_up;
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100926 adj_index_t ai;
927
Neale Ranns8b37b872016-11-21 12:25:22 +0000928 if_is_up = vnet_sw_interface_is_admin_up(
929 vnet_get_main(),
930 path->attached_next_hop.fp_interface);
931
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100932 ai = fib_path_attached_next_hop_get_adj(
933 path,
Neale Rannsda78f952017-05-24 09:15:43 -0700934 dpo_proto_to_link(path->fp_nh_proto));
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100935
Neale Ranns88fc83e2017-04-05 08:11:14 -0700936 path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
937 if (if_is_up && adj_is_up(ai))
938 {
939 path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RESOLVED;
940 }
941
Neale Rannsda78f952017-05-24 09:15:43 -0700942 dpo_set(&path->fp_dpo, DPO_ADJACENCY, path->fp_nh_proto, ai);
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100943 adj_unlock(ai);
Neale Ranns8b37b872016-11-21 12:25:22 +0000944
945 if (!if_is_up)
946 {
947 /*
948 * If the interface is not up there is no reason to walk
949 * back to children. if we did they would only evalute
950 * that this path is unresolved and hence it would
951 * not contribute the adjacency - so it would be wasted
952 * CPU time.
953 */
954 return (FIB_NODE_BACK_WALK_CONTINUE);
955 }
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100956 }
Neale Rannsad95b5d2016-11-10 20:35:14 +0000957 if (FIB_NODE_BW_REASON_FLAG_ADJ_DOWN & ctx->fnbw_reason)
958 {
Neale Ranns8b37b872016-11-21 12:25:22 +0000959 if (!(path->fp_oper_flags & FIB_PATH_OPER_FLAG_RESOLVED))
960 {
961 /*
962 * alreday unresolved. no need to walk back again
963 */
964 return (FIB_NODE_BACK_WALK_CONTINUE);
965 }
Neale Rannsad95b5d2016-11-10 20:35:14 +0000966 /*
967 * the adj has gone down. the path is no longer resolved.
968 */
969 path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
970 }
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100971 break;
972 case FIB_PATH_TYPE_ATTACHED:
973 /*
974 * FIXME; this could schedule a lower priority walk, since attached
975 * routes are not usually in ECMP configurations so the backwalk to
976 * the FIB entry does not need to be high priority
977 */
978 if (FIB_NODE_BW_REASON_FLAG_INTERFACE_UP & ctx->fnbw_reason)
979 {
980 path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RESOLVED;
981 }
982 if (FIB_NODE_BW_REASON_FLAG_INTERFACE_DOWN & ctx->fnbw_reason)
983 {
984 path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
985 }
986 if (FIB_NODE_BW_REASON_FLAG_INTERFACE_DELETE & ctx->fnbw_reason)
987 {
988 fib_path_unresolve(path);
989 path->fp_oper_flags |= FIB_PATH_OPER_FLAG_DROP;
990 }
991 break;
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800992 case FIB_PATH_TYPE_INTF_RX:
993 ASSERT(0);
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100994 case FIB_PATH_TYPE_DEAG:
995 /*
996 * FIXME When VRF delete is allowed this will need a poke.
997 */
998 case FIB_PATH_TYPE_SPECIAL:
999 case FIB_PATH_TYPE_RECEIVE:
1000 case FIB_PATH_TYPE_EXCLUSIVE:
1001 /*
1002 * these path types have no parents. so to be
1003 * walked from one is unexpected.
1004 */
1005 ASSERT(0);
1006 break;
1007 }
1008
1009 /*
1010 * propagate the backwalk further to the path-list
1011 */
1012 fib_path_list_back_walk(path->fp_pl_index, ctx);
1013
1014 return (FIB_NODE_BACK_WALK_CONTINUE);
1015}
1016
Neale Ranns6c3ebcc2016-10-02 21:20:15 +01001017static void
1018fib_path_memory_show (void)
1019{
1020 fib_show_memory_usage("Path",
1021 pool_elts(fib_path_pool),
1022 pool_len(fib_path_pool),
1023 sizeof(fib_path_t));
1024}
1025
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001026/*
1027 * The FIB path's graph node virtual function table
1028 */
1029static const fib_node_vft_t fib_path_vft = {
1030 .fnv_get = fib_path_get_node,
1031 .fnv_last_lock = fib_path_last_lock_gone,
1032 .fnv_back_walk = fib_path_back_walk_notify,
Neale Ranns6c3ebcc2016-10-02 21:20:15 +01001033 .fnv_mem_show = fib_path_memory_show,
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001034};
1035
1036static fib_path_cfg_flags_t
1037fib_path_route_flags_to_cfg_flags (const fib_route_path_t *rpath)
1038{
Neale Ranns450cd302016-11-09 17:49:42 +00001039 fib_path_cfg_flags_t cfg_flags = FIB_PATH_CFG_FLAG_NONE;
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001040
1041 if (rpath->frp_flags & FIB_ROUTE_PATH_RESOLVE_VIA_HOST)
1042 cfg_flags |= FIB_PATH_CFG_FLAG_RESOLVE_HOST;
1043 if (rpath->frp_flags & FIB_ROUTE_PATH_RESOLVE_VIA_ATTACHED)
1044 cfg_flags |= FIB_PATH_CFG_FLAG_RESOLVE_ATTACHED;
Neale Ranns32e1c012016-11-22 17:07:28 +00001045 if (rpath->frp_flags & FIB_ROUTE_PATH_LOCAL)
1046 cfg_flags |= FIB_PATH_CFG_FLAG_LOCAL;
Neale Ranns4b919a52017-03-11 05:55:21 -08001047 if (rpath->frp_flags & FIB_ROUTE_PATH_ATTACHED)
1048 cfg_flags |= FIB_PATH_CFG_FLAG_ATTACHED;
Neale Ranns0f26c5a2017-03-01 15:12:11 -08001049 if (rpath->frp_flags & FIB_ROUTE_PATH_INTF_RX)
1050 cfg_flags |= FIB_PATH_CFG_FLAG_INTF_RX;
1051 if (rpath->frp_flags & FIB_ROUTE_PATH_RPF_ID)
1052 cfg_flags |= FIB_PATH_CFG_FLAG_RPF_ID;
1053 if (rpath->frp_flags & FIB_ROUTE_PATH_EXCLUSIVE)
1054 cfg_flags |= FIB_PATH_CFG_FLAG_EXCLUSIVE;
1055 if (rpath->frp_flags & FIB_ROUTE_PATH_DROP)
1056 cfg_flags |= FIB_PATH_CFG_FLAG_DROP;
Neale Ranns054c03a2017-10-13 05:15:07 -07001057 if (rpath->frp_flags & FIB_ROUTE_PATH_SOURCE_LOOKUP)
1058 cfg_flags |= FIB_PATH_CFG_FLAG_DEAG_SRC;
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001059
1060 return (cfg_flags);
1061}
1062
1063/*
1064 * fib_path_create
1065 *
1066 * Create and initialise a new path object.
1067 * return the index of the path.
1068 */
1069fib_node_index_t
1070fib_path_create (fib_node_index_t pl_index,
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001071 const fib_route_path_t *rpath)
1072{
1073 fib_path_t *path;
1074
1075 pool_get(fib_path_pool, path);
1076 memset(path, 0, sizeof(*path));
1077
1078 fib_node_init(&path->fp_node,
1079 FIB_NODE_TYPE_PATH);
1080
1081 dpo_reset(&path->fp_dpo);
1082 path->fp_pl_index = pl_index;
Neale Ranns0f26c5a2017-03-01 15:12:11 -08001083 path->fp_nh_proto = rpath->frp_proto;
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001084 path->fp_via_fib = FIB_NODE_INDEX_INVALID;
1085 path->fp_weight = rpath->frp_weight;
Neale Ranns0bd36ea2016-11-16 11:47:44 +00001086 if (0 == path->fp_weight)
1087 {
1088 /*
1089 * a weight of 0 is a meaningless value. We could either reject it, and thus force
1090 * clients to always use 1, or we can accept it and fixup approrpiately.
1091 */
1092 path->fp_weight = 1;
1093 }
Neale Ranns57b58602017-07-15 07:37:25 -07001094 path->fp_preference = rpath->frp_preference;
Neale Ranns0f26c5a2017-03-01 15:12:11 -08001095 path->fp_cfg_flags = fib_path_route_flags_to_cfg_flags(rpath);
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001096
1097 /*
1098 * deduce the path's tpye from the parementers and save what is needed.
1099 */
Neale Ranns32e1c012016-11-22 17:07:28 +00001100 if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_LOCAL)
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001101 {
Neale Ranns32e1c012016-11-22 17:07:28 +00001102 path->fp_type = FIB_PATH_TYPE_RECEIVE;
1103 path->receive.fp_interface = rpath->frp_sw_if_index;
1104 path->receive.fp_addr = rpath->frp_addr;
1105 }
Neale Ranns0f26c5a2017-03-01 15:12:11 -08001106 else if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_INTF_RX)
1107 {
1108 path->fp_type = FIB_PATH_TYPE_INTF_RX;
1109 path->intf_rx.fp_interface = rpath->frp_sw_if_index;
1110 }
1111 else if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RPF_ID)
1112 {
1113 path->fp_type = FIB_PATH_TYPE_DEAG;
1114 path->deag.fp_tbl_id = rpath->frp_fib_index;
1115 path->deag.fp_rpf_id = rpath->frp_rpf_id;
1116 }
Neale Ranns32e1c012016-11-22 17:07:28 +00001117 else if (~0 != rpath->frp_sw_if_index)
1118 {
1119 if (ip46_address_is_zero(&rpath->frp_addr))
1120 {
1121 path->fp_type = FIB_PATH_TYPE_ATTACHED;
1122 path->attached.fp_interface = rpath->frp_sw_if_index;
1123 }
1124 else
1125 {
1126 path->fp_type = FIB_PATH_TYPE_ATTACHED_NEXT_HOP;
1127 path->attached_next_hop.fp_interface = rpath->frp_sw_if_index;
1128 path->attached_next_hop.fp_nh = rpath->frp_addr;
1129 }
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001130 }
1131 else
1132 {
1133 if (ip46_address_is_zero(&rpath->frp_addr))
1134 {
1135 if (~0 == rpath->frp_fib_index)
1136 {
1137 path->fp_type = FIB_PATH_TYPE_SPECIAL;
1138 }
1139 else
1140 {
1141 path->fp_type = FIB_PATH_TYPE_DEAG;
1142 path->deag.fp_tbl_id = rpath->frp_fib_index;
1143 }
1144 }
1145 else
1146 {
1147 path->fp_type = FIB_PATH_TYPE_RECURSIVE;
Neale Rannsda78f952017-05-24 09:15:43 -07001148 if (DPO_PROTO_MPLS == path->fp_nh_proto)
Neale Rannsad422ed2016-11-02 14:20:04 +00001149 {
1150 path->recursive.fp_nh.fp_local_label = rpath->frp_local_label;
Neale Ranns0f26c5a2017-03-01 15:12:11 -08001151 path->recursive.fp_nh.fp_eos = rpath->frp_eos;
Neale Rannsad422ed2016-11-02 14:20:04 +00001152 }
1153 else
1154 {
1155 path->recursive.fp_nh.fp_ip = rpath->frp_addr;
1156 }
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001157 path->recursive.fp_tbl_id = rpath->frp_fib_index;
1158 }
1159 }
1160
1161 FIB_PATH_DBG(path, "create");
1162
1163 return (fib_path_get_index(path));
1164}
1165
1166/*
1167 * fib_path_create_special
1168 *
1169 * Create and initialise a new path object.
1170 * return the index of the path.
1171 */
1172fib_node_index_t
1173fib_path_create_special (fib_node_index_t pl_index,
Neale Rannsda78f952017-05-24 09:15:43 -07001174 dpo_proto_t nh_proto,
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001175 fib_path_cfg_flags_t flags,
1176 const dpo_id_t *dpo)
1177{
1178 fib_path_t *path;
1179
1180 pool_get(fib_path_pool, path);
1181 memset(path, 0, sizeof(*path));
1182
1183 fib_node_init(&path->fp_node,
1184 FIB_NODE_TYPE_PATH);
1185 dpo_reset(&path->fp_dpo);
1186
1187 path->fp_pl_index = pl_index;
1188 path->fp_weight = 1;
Neale Ranns57b58602017-07-15 07:37:25 -07001189 path->fp_preference = 0;
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001190 path->fp_nh_proto = nh_proto;
1191 path->fp_via_fib = FIB_NODE_INDEX_INVALID;
1192 path->fp_cfg_flags = flags;
1193
1194 if (FIB_PATH_CFG_FLAG_DROP & flags)
1195 {
1196 path->fp_type = FIB_PATH_TYPE_SPECIAL;
1197 }
1198 else if (FIB_PATH_CFG_FLAG_LOCAL & flags)
1199 {
1200 path->fp_type = FIB_PATH_TYPE_RECEIVE;
1201 path->attached.fp_interface = FIB_NODE_INDEX_INVALID;
1202 }
1203 else
1204 {
1205 path->fp_type = FIB_PATH_TYPE_EXCLUSIVE;
1206 ASSERT(NULL != dpo);
1207 dpo_copy(&path->exclusive.fp_ex_dpo, dpo);
1208 }
1209
1210 return (fib_path_get_index(path));
1211}
1212
1213/*
1214 * fib_path_copy
1215 *
1216 * Copy a path. return index of new path.
1217 */
1218fib_node_index_t
1219fib_path_copy (fib_node_index_t path_index,
1220 fib_node_index_t path_list_index)
1221{
1222 fib_path_t *path, *orig_path;
1223
1224 pool_get(fib_path_pool, path);
1225
1226 orig_path = fib_path_get(path_index);
1227 ASSERT(NULL != orig_path);
1228
1229 memcpy(path, orig_path, sizeof(*path));
1230
1231 FIB_PATH_DBG(path, "create-copy:%d", path_index);
1232
1233 /*
1234 * reset the dynamic section
1235 */
1236 fib_node_init(&path->fp_node, FIB_NODE_TYPE_PATH);
1237 path->fp_oper_flags = FIB_PATH_OPER_FLAG_NONE;
1238 path->fp_pl_index = path_list_index;
1239 path->fp_via_fib = FIB_NODE_INDEX_INVALID;
1240 memset(&path->fp_dpo, 0, sizeof(path->fp_dpo));
1241 dpo_reset(&path->fp_dpo);
1242
1243 return (fib_path_get_index(path));
1244}
1245
1246/*
1247 * fib_path_destroy
1248 *
1249 * destroy a path that is no longer required
1250 */
1251void
1252fib_path_destroy (fib_node_index_t path_index)
1253{
1254 fib_path_t *path;
1255
1256 path = fib_path_get(path_index);
1257
1258 ASSERT(NULL != path);
1259 FIB_PATH_DBG(path, "destroy");
1260
1261 fib_path_unresolve(path);
1262
1263 fib_node_deinit(&path->fp_node);
1264 pool_put(fib_path_pool, path);
1265}
1266
1267/*
1268 * fib_path_destroy
1269 *
1270 * destroy a path that is no longer required
1271 */
1272uword
1273fib_path_hash (fib_node_index_t path_index)
1274{
1275 fib_path_t *path;
1276
1277 path = fib_path_get(path_index);
1278
1279 return (hash_memory(STRUCT_MARK_PTR(path, path_hash_start),
1280 (STRUCT_OFFSET_OF(fib_path_t, path_hash_end) -
1281 STRUCT_OFFSET_OF(fib_path_t, path_hash_start)),
1282 0));
1283}
1284
1285/*
1286 * fib_path_cmp_i
1287 *
1288 * Compare two paths for equivalence.
1289 */
1290static int
1291fib_path_cmp_i (const fib_path_t *path1,
1292 const fib_path_t *path2)
1293{
1294 int res;
1295
1296 res = 1;
1297
1298 /*
1299 * paths of different types and protocol are not equal.
Neale Ranns57b58602017-07-15 07:37:25 -07001300 * different weights and/or preference only are the same path.
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001301 */
1302 if (path1->fp_type != path2->fp_type)
1303 {
1304 res = (path1->fp_type - path2->fp_type);
1305 }
Neale Ranns32e1c012016-11-22 17:07:28 +00001306 else if (path1->fp_nh_proto != path2->fp_nh_proto)
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001307 {
1308 res = (path1->fp_nh_proto - path2->fp_nh_proto);
1309 }
1310 else
1311 {
1312 /*
1313 * both paths are of the same type.
1314 * consider each type and its attributes in turn.
1315 */
1316 switch (path1->fp_type)
1317 {
1318 case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
1319 res = ip46_address_cmp(&path1->attached_next_hop.fp_nh,
1320 &path2->attached_next_hop.fp_nh);
1321 if (0 == res) {
Neale Ranns0f26c5a2017-03-01 15:12:11 -08001322 res = (path1->attached_next_hop.fp_interface -
1323 path2->attached_next_hop.fp_interface);
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001324 }
1325 break;
1326 case FIB_PATH_TYPE_ATTACHED:
Neale Ranns0f26c5a2017-03-01 15:12:11 -08001327 res = (path1->attached.fp_interface -
1328 path2->attached.fp_interface);
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001329 break;
1330 case FIB_PATH_TYPE_RECURSIVE:
1331 res = ip46_address_cmp(&path1->recursive.fp_nh,
1332 &path2->recursive.fp_nh);
1333
1334 if (0 == res)
1335 {
1336 res = (path1->recursive.fp_tbl_id - path2->recursive.fp_tbl_id);
1337 }
1338 break;
1339 case FIB_PATH_TYPE_DEAG:
1340 res = (path1->deag.fp_tbl_id - path2->deag.fp_tbl_id);
Neale Ranns0f26c5a2017-03-01 15:12:11 -08001341 if (0 == res)
1342 {
1343 res = (path1->deag.fp_rpf_id - path2->deag.fp_rpf_id);
1344 }
1345 break;
1346 case FIB_PATH_TYPE_INTF_RX:
1347 res = (path1->intf_rx.fp_interface - path2->intf_rx.fp_interface);
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001348 break;
1349 case FIB_PATH_TYPE_SPECIAL:
1350 case FIB_PATH_TYPE_RECEIVE:
1351 case FIB_PATH_TYPE_EXCLUSIVE:
1352 res = 0;
1353 break;
1354 }
1355 }
1356 return (res);
1357}
1358
1359/*
1360 * fib_path_cmp_for_sort
1361 *
1362 * Compare two paths for equivalence. Used during path sorting.
1363 * As usual 0 means equal.
1364 */
1365int
1366fib_path_cmp_for_sort (void * v1,
1367 void * v2)
1368{
1369 fib_node_index_t *pi1 = v1, *pi2 = v2;
1370 fib_path_t *path1, *path2;
1371
1372 path1 = fib_path_get(*pi1);
1373 path2 = fib_path_get(*pi2);
1374
Neale Ranns57b58602017-07-15 07:37:25 -07001375 /*
1376 * when sorting paths we want the highest preference paths
1377 * first, so that the choices set built is in prefernce order
1378 */
1379 if (path1->fp_preference != path2->fp_preference)
1380 {
1381 return (path1->fp_preference - path2->fp_preference);
1382 }
1383
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001384 return (fib_path_cmp_i(path1, path2));
1385}
1386
1387/*
1388 * fib_path_cmp
1389 *
1390 * Compare two paths for equivalence.
1391 */
1392int
1393fib_path_cmp (fib_node_index_t pi1,
1394 fib_node_index_t pi2)
1395{
1396 fib_path_t *path1, *path2;
1397
1398 path1 = fib_path_get(pi1);
1399 path2 = fib_path_get(pi2);
1400
1401 return (fib_path_cmp_i(path1, path2));
1402}
1403
1404int
1405fib_path_cmp_w_route_path (fib_node_index_t path_index,
1406 const fib_route_path_t *rpath)
1407{
1408 fib_path_t *path;
1409 int res;
1410
1411 path = fib_path_get(path_index);
1412
1413 res = 1;
1414
1415 if (path->fp_weight != rpath->frp_weight)
1416 {
1417 res = (path->fp_weight - rpath->frp_weight);
1418 }
1419 else
1420 {
1421 /*
1422 * both paths are of the same type.
1423 * consider each type and its attributes in turn.
1424 */
1425 switch (path->fp_type)
1426 {
1427 case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
1428 res = ip46_address_cmp(&path->attached_next_hop.fp_nh,
1429 &rpath->frp_addr);
1430 if (0 == res)
1431 {
Neale Ranns0f26c5a2017-03-01 15:12:11 -08001432 res = (path->attached_next_hop.fp_interface -
1433 rpath->frp_sw_if_index);
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001434 }
1435 break;
1436 case FIB_PATH_TYPE_ATTACHED:
Neale Ranns0f26c5a2017-03-01 15:12:11 -08001437 res = (path->attached.fp_interface - rpath->frp_sw_if_index);
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001438 break;
1439 case FIB_PATH_TYPE_RECURSIVE:
Neale Rannsda78f952017-05-24 09:15:43 -07001440 if (DPO_PROTO_MPLS == path->fp_nh_proto)
Neale Rannsad422ed2016-11-02 14:20:04 +00001441 {
1442 res = path->recursive.fp_nh.fp_local_label - rpath->frp_local_label;
Neale Ranns0f26c5a2017-03-01 15:12:11 -08001443
1444 if (res == 0)
1445 {
1446 res = path->recursive.fp_nh.fp_eos - rpath->frp_eos;
1447 }
Neale Rannsad422ed2016-11-02 14:20:04 +00001448 }
1449 else
1450 {
1451 res = ip46_address_cmp(&path->recursive.fp_nh.fp_ip,
1452 &rpath->frp_addr);
1453 }
1454
1455 if (0 == res)
1456 {
1457 res = (path->recursive.fp_tbl_id - rpath->frp_fib_index);
1458 }
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001459 break;
Neale Ranns0f26c5a2017-03-01 15:12:11 -08001460 case FIB_PATH_TYPE_INTF_RX:
1461 res = (path->intf_rx.fp_interface - rpath->frp_sw_if_index);
1462 break;
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001463 case FIB_PATH_TYPE_DEAG:
1464 res = (path->deag.fp_tbl_id - rpath->frp_fib_index);
Neale Ranns0f26c5a2017-03-01 15:12:11 -08001465 if (0 == res)
1466 {
1467 res = (path->deag.fp_rpf_id - rpath->frp_rpf_id);
1468 }
1469 break;
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001470 case FIB_PATH_TYPE_SPECIAL:
1471 case FIB_PATH_TYPE_RECEIVE:
1472 case FIB_PATH_TYPE_EXCLUSIVE:
1473 res = 0;
1474 break;
1475 }
1476 }
1477 return (res);
1478}
1479
1480/*
1481 * fib_path_recursive_loop_detect
1482 *
1483 * A forward walk of the FIB object graph to detect for a cycle/loop. This
1484 * walk is initiated when an entry is linking to a new path list or from an old.
1485 * The entry vector passed contains all the FIB entrys that are children of this
1486 * path (it is all the entries encountered on the walk so far). If this vector
1487 * contains the entry this path resolve via, then a loop is about to form.
1488 * The loop must be allowed to form, since we need the dependencies in place
1489 * so that we can track when the loop breaks.
1490 * However, we MUST not produce a loop in the forwarding graph (else packets
1491 * would loop around the switch path until the loop breaks), so we mark recursive
1492 * paths as looped so that they do not contribute forwarding information.
1493 * By marking the path as looped, an etry such as;
1494 * X/Y
1495 * via a.a.a.a (looped)
1496 * via b.b.b.b (not looped)
1497 * can still forward using the info provided by b.b.b.b only
1498 */
1499int
1500fib_path_recursive_loop_detect (fib_node_index_t path_index,
1501 fib_node_index_t **entry_indicies)
1502{
1503 fib_path_t *path;
1504
1505 path = fib_path_get(path_index);
1506
1507 /*
1508 * the forced drop path is never looped, cos it is never resolved.
1509 */
1510 if (fib_path_is_permanent_drop(path))
1511 {
1512 return (0);
1513 }
1514
1515 switch (path->fp_type)
1516 {
1517 case FIB_PATH_TYPE_RECURSIVE:
1518 {
1519 fib_node_index_t *entry_index, *entries;
1520 int looped = 0;
1521 entries = *entry_indicies;
1522
1523 vec_foreach(entry_index, entries) {
1524 if (*entry_index == path->fp_via_fib)
1525 {
1526 /*
1527 * the entry that is about to link to this path-list (or
1528 * one of this path-list's children) is the same entry that
1529 * this recursive path resolves through. this is a cycle.
1530 * abort the walk.
1531 */
1532 looped = 1;
1533 break;
1534 }
1535 }
1536
1537 if (looped)
1538 {
1539 FIB_PATH_DBG(path, "recursive loop formed");
1540 path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RECURSIVE_LOOP;
1541
Neale Rannsda78f952017-05-24 09:15:43 -07001542 dpo_copy(&path->fp_dpo, drop_dpo_get(path->fp_nh_proto));
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001543 }
1544 else
1545 {
1546 /*
1547 * no loop here yet. keep forward walking the graph.
1548 */
1549 if (fib_entry_recursive_loop_detect(path->fp_via_fib, entry_indicies))
1550 {
1551 FIB_PATH_DBG(path, "recursive loop formed");
1552 path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RECURSIVE_LOOP;
1553 }
1554 else
1555 {
1556 FIB_PATH_DBG(path, "recursive loop cleared");
1557 path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RECURSIVE_LOOP;
1558 }
1559 }
1560 break;
1561 }
1562 case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
1563 case FIB_PATH_TYPE_ATTACHED:
1564 case FIB_PATH_TYPE_SPECIAL:
1565 case FIB_PATH_TYPE_DEAG:
1566 case FIB_PATH_TYPE_RECEIVE:
Neale Ranns0f26c5a2017-03-01 15:12:11 -08001567 case FIB_PATH_TYPE_INTF_RX:
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001568 case FIB_PATH_TYPE_EXCLUSIVE:
1569 /*
1570 * these path types cannot be part of a loop, since they are the leaves
1571 * of the graph.
1572 */
1573 break;
1574 }
1575
1576 return (fib_path_is_looped(path_index));
1577}
1578
1579int
1580fib_path_resolve (fib_node_index_t path_index)
1581{
1582 fib_path_t *path;
1583
1584 path = fib_path_get(path_index);
1585
1586 /*
1587 * hope for the best.
1588 */
1589 path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RESOLVED;
1590
1591 /*
1592 * the forced drop path resolves via the drop adj
1593 */
1594 if (fib_path_is_permanent_drop(path))
1595 {
Neale Rannsda78f952017-05-24 09:15:43 -07001596 dpo_copy(&path->fp_dpo, drop_dpo_get(path->fp_nh_proto));
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001597 path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
1598 return (fib_path_is_resolved(path_index));
1599 }
1600
1601 switch (path->fp_type)
1602 {
1603 case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
1604 fib_path_attached_next_hop_set(path);
1605 break;
1606 case FIB_PATH_TYPE_ATTACHED:
Neale Ranns6f631152017-10-03 08:20:21 -07001607 if (DPO_PROTO_ETHERNET == path->fp_nh_proto)
1608 {
1609 l2_bridge_dpo_add_or_lock(path->attached.fp_interface,
1610 &path->fp_dpo);
1611 }
1612 else
1613 {
1614 /*
1615 * path->attached.fp_interface
1616 */
1617 if (!vnet_sw_interface_is_admin_up(vnet_get_main(),
1618 path->attached.fp_interface))
1619 {
1620 path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
1621 }
1622 dpo_set(&path->fp_dpo,
1623 DPO_ADJACENCY,
1624 path->fp_nh_proto,
1625 fib_path_attached_get_adj(path,
1626 dpo_proto_to_link(path->fp_nh_proto)));
Neale Ranns8c4611b2017-05-23 03:43:47 -07001627
Neale Ranns6f631152017-10-03 08:20:21 -07001628 /*
1629 * become a child of the adjacency so we receive updates
1630 * when the interface state changes
1631 */
1632 path->fp_sibling = adj_child_add(path->fp_dpo.dpoi_index,
1633 FIB_NODE_TYPE_PATH,
1634 fib_path_get_index(path));
1635 }
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001636 break;
1637 case FIB_PATH_TYPE_RECURSIVE:
1638 {
1639 /*
1640 * Create a RR source entry in the table for the address
1641 * that this path recurses through.
1642 * This resolve action is recursive, hence we may create
1643 * more paths in the process. more creates mean maybe realloc
1644 * of this path.
1645 */
1646 fib_node_index_t fei;
1647 fib_prefix_t pfx;
1648
1649 ASSERT(FIB_NODE_INDEX_INVALID == path->fp_via_fib);
1650
Neale Rannsda78f952017-05-24 09:15:43 -07001651 if (DPO_PROTO_MPLS == path->fp_nh_proto)
Neale Rannsad422ed2016-11-02 14:20:04 +00001652 {
Neale Ranns0f26c5a2017-03-01 15:12:11 -08001653 fib_prefix_from_mpls_label(path->recursive.fp_nh.fp_local_label,
1654 path->recursive.fp_nh.fp_eos,
1655 &pfx);
Neale Rannsad422ed2016-11-02 14:20:04 +00001656 }
1657 else
1658 {
1659 fib_prefix_from_ip46_addr(&path->recursive.fp_nh.fp_ip, &pfx);
1660 }
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001661
1662 fei = fib_table_entry_special_add(path->recursive.fp_tbl_id,
1663 &pfx,
1664 FIB_SOURCE_RR,
Neale Rannsa0558302017-04-13 00:44:52 -07001665 FIB_ENTRY_FLAG_NONE);
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001666
1667 path = fib_path_get(path_index);
1668 path->fp_via_fib = fei;
1669
1670 /*
1671 * become a dependent child of the entry so the path is
1672 * informed when the forwarding for the entry changes.
1673 */
1674 path->fp_sibling = fib_entry_child_add(path->fp_via_fib,
1675 FIB_NODE_TYPE_PATH,
1676 fib_path_get_index(path));
1677
1678 /*
1679 * create and configure the IP DPO
1680 */
1681 fib_path_recursive_adj_update(
1682 path,
Neale Ranns0f26c5a2017-03-01 15:12:11 -08001683 fib_path_to_chain_type(path),
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001684 &path->fp_dpo);
1685
1686 break;
1687 }
1688 case FIB_PATH_TYPE_SPECIAL:
1689 /*
1690 * Resolve via the drop
1691 */
Neale Rannsda78f952017-05-24 09:15:43 -07001692 dpo_copy(&path->fp_dpo, drop_dpo_get(path->fp_nh_proto));
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001693 break;
1694 case FIB_PATH_TYPE_DEAG:
Neale Ranns0f26c5a2017-03-01 15:12:11 -08001695 {
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001696 /*
1697 * Resolve via a lookup DPO.
1698 * FIXME. control plane should add routes with a table ID
1699 */
Neale Ranns054c03a2017-10-13 05:15:07 -07001700 lookup_input_t input;
Neale Ranns0f26c5a2017-03-01 15:12:11 -08001701 lookup_cast_t cast;
Neale Ranns054c03a2017-10-13 05:15:07 -07001702
Neale Ranns0f26c5a2017-03-01 15:12:11 -08001703 cast = (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RPF_ID ?
1704 LOOKUP_MULTICAST :
1705 LOOKUP_UNICAST);
Neale Ranns054c03a2017-10-13 05:15:07 -07001706 input = (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_DEAG_SRC ?
1707 LOOKUP_INPUT_SRC_ADDR :
1708 LOOKUP_INPUT_DST_ADDR);
Neale Ranns0f26c5a2017-03-01 15:12:11 -08001709
1710 lookup_dpo_add_or_lock_w_fib_index(path->deag.fp_tbl_id,
Neale Rannsda78f952017-05-24 09:15:43 -07001711 path->fp_nh_proto,
Neale Ranns0f26c5a2017-03-01 15:12:11 -08001712 cast,
Neale Ranns054c03a2017-10-13 05:15:07 -07001713 input,
Neale Ranns0f26c5a2017-03-01 15:12:11 -08001714 LOOKUP_TABLE_FROM_CONFIG,
1715 &path->fp_dpo);
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001716 break;
Neale Ranns0f26c5a2017-03-01 15:12:11 -08001717 }
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001718 case FIB_PATH_TYPE_RECEIVE:
1719 /*
1720 * Resolve via a receive DPO.
1721 */
Neale Rannsda78f952017-05-24 09:15:43 -07001722 receive_dpo_add_or_lock(path->fp_nh_proto,
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001723 path->receive.fp_interface,
1724 &path->receive.fp_addr,
1725 &path->fp_dpo);
1726 break;
Neale Ranns0f26c5a2017-03-01 15:12:11 -08001727 case FIB_PATH_TYPE_INTF_RX: {
1728 /*
1729 * Resolve via a receive DPO.
1730 */
Neale Ranns43161a82017-08-12 02:12:00 -07001731 interface_rx_dpo_add_or_lock(path->fp_nh_proto,
1732 path->intf_rx.fp_interface,
1733 &path->fp_dpo);
Neale Ranns0f26c5a2017-03-01 15:12:11 -08001734 break;
1735 }
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001736 case FIB_PATH_TYPE_EXCLUSIVE:
1737 /*
1738 * Resolve via the user provided DPO
1739 */
1740 dpo_copy(&path->fp_dpo, &path->exclusive.fp_ex_dpo);
1741 break;
1742 }
1743
1744 return (fib_path_is_resolved(path_index));
1745}
1746
1747u32
1748fib_path_get_resolving_interface (fib_node_index_t path_index)
1749{
1750 fib_path_t *path;
1751
1752 path = fib_path_get(path_index);
1753
1754 switch (path->fp_type)
1755 {
1756 case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
1757 return (path->attached_next_hop.fp_interface);
1758 case FIB_PATH_TYPE_ATTACHED:
1759 return (path->attached.fp_interface);
1760 case FIB_PATH_TYPE_RECEIVE:
1761 return (path->receive.fp_interface);
1762 case FIB_PATH_TYPE_RECURSIVE:
Neale Ranns08b16482017-05-13 05:52:58 -07001763 if (fib_path_is_resolved(path_index))
1764 {
1765 return (fib_entry_get_resolving_interface(path->fp_via_fib));
1766 }
1767 break;
Neale Ranns0f26c5a2017-03-01 15:12:11 -08001768 case FIB_PATH_TYPE_INTF_RX:
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001769 case FIB_PATH_TYPE_SPECIAL:
1770 case FIB_PATH_TYPE_DEAG:
1771 case FIB_PATH_TYPE_EXCLUSIVE:
1772 break;
1773 }
1774 return (~0);
1775}
1776
1777adj_index_t
1778fib_path_get_adj (fib_node_index_t path_index)
1779{
1780 fib_path_t *path;
1781
1782 path = fib_path_get(path_index);
1783
1784 ASSERT(dpo_is_adj(&path->fp_dpo));
1785 if (dpo_is_adj(&path->fp_dpo))
1786 {
1787 return (path->fp_dpo.dpoi_index);
1788 }
1789 return (ADJ_INDEX_INVALID);
1790}
1791
Neale Ranns57b58602017-07-15 07:37:25 -07001792u16
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001793fib_path_get_weight (fib_node_index_t path_index)
1794{
1795 fib_path_t *path;
1796
1797 path = fib_path_get(path_index);
1798
1799 ASSERT(path);
1800
1801 return (path->fp_weight);
1802}
1803
Neale Ranns57b58602017-07-15 07:37:25 -07001804u16
1805fib_path_get_preference (fib_node_index_t path_index)
1806{
1807 fib_path_t *path;
1808
1809 path = fib_path_get(path_index);
1810
1811 ASSERT(path);
1812
1813 return (path->fp_preference);
1814}
1815
Neale Ranns3ee44042016-10-03 13:05:48 +01001816/**
1817 * @brief Contribute the path's adjacency to the list passed.
1818 * By calling this function over all paths, recursively, a child
1819 * can construct its full set of forwarding adjacencies, and hence its
1820 * uRPF list.
1821 */
1822void
1823fib_path_contribute_urpf (fib_node_index_t path_index,
1824 index_t urpf)
1825{
1826 fib_path_t *path;
1827
Neale Ranns3ee44042016-10-03 13:05:48 +01001828 path = fib_path_get(path_index);
1829
Neale Ranns88fc83e2017-04-05 08:11:14 -07001830 /*
1831 * resolved and unresolved paths contribute to the RPF list.
1832 */
Neale Ranns3ee44042016-10-03 13:05:48 +01001833 switch (path->fp_type)
1834 {
1835 case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
1836 fib_urpf_list_append(urpf, path->attached_next_hop.fp_interface);
1837 break;
1838
1839 case FIB_PATH_TYPE_ATTACHED:
1840 fib_urpf_list_append(urpf, path->attached.fp_interface);
1841 break;
1842
1843 case FIB_PATH_TYPE_RECURSIVE:
Neale Ranns08b16482017-05-13 05:52:58 -07001844 if (FIB_NODE_INDEX_INVALID != path->fp_via_fib &&
1845 !fib_path_is_looped(path_index))
Neale Ranns88fc83e2017-04-05 08:11:14 -07001846 {
1847 /*
1848 * there's unresolved due to constraints, and there's unresolved
Neale Ranns08b16482017-05-13 05:52:58 -07001849 * due to ain't got no via. can't do nowt w'out via.
Neale Ranns88fc83e2017-04-05 08:11:14 -07001850 */
1851 fib_entry_contribute_urpf(path->fp_via_fib, urpf);
1852 }
Neale Ranns3ee44042016-10-03 13:05:48 +01001853 break;
1854
1855 case FIB_PATH_TYPE_EXCLUSIVE:
1856 case FIB_PATH_TYPE_SPECIAL:
1857 /*
1858 * these path types may link to an adj, if that's what
1859 * the clinet gave
1860 */
1861 if (dpo_is_adj(&path->fp_dpo))
1862 {
1863 ip_adjacency_t *adj;
1864
1865 adj = adj_get(path->fp_dpo.dpoi_index);
1866
1867 fib_urpf_list_append(urpf, adj->rewrite_header.sw_if_index);
1868 }
1869 break;
1870
1871 case FIB_PATH_TYPE_DEAG:
1872 case FIB_PATH_TYPE_RECEIVE:
Neale Ranns0f26c5a2017-03-01 15:12:11 -08001873 case FIB_PATH_TYPE_INTF_RX:
Neale Ranns3ee44042016-10-03 13:05:48 +01001874 /*
1875 * these path types don't link to an adj
1876 */
1877 break;
1878 }
1879}
1880
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001881void
Neale Ranns0f26c5a2017-03-01 15:12:11 -08001882fib_path_stack_mpls_disp (fib_node_index_t path_index,
1883 dpo_proto_t payload_proto,
1884 dpo_id_t *dpo)
1885{
1886 fib_path_t *path;
1887
1888 path = fib_path_get(path_index);
1889
1890 ASSERT(path);
1891
1892 switch (path->fp_type)
1893 {
1894 case FIB_PATH_TYPE_DEAG:
1895 {
1896 dpo_id_t tmp = DPO_INVALID;
1897
1898 dpo_copy(&tmp, dpo);
1899 dpo_set(dpo,
1900 DPO_MPLS_DISPOSITION,
1901 payload_proto,
1902 mpls_disp_dpo_create(payload_proto,
1903 path->deag.fp_rpf_id,
1904 &tmp));
1905 dpo_reset(&tmp);
1906 break;
1907 }
1908 case FIB_PATH_TYPE_RECEIVE:
1909 case FIB_PATH_TYPE_ATTACHED:
1910 case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
1911 case FIB_PATH_TYPE_RECURSIVE:
1912 case FIB_PATH_TYPE_INTF_RX:
1913 case FIB_PATH_TYPE_EXCLUSIVE:
1914 case FIB_PATH_TYPE_SPECIAL:
1915 break;
1916 }
1917}
1918
1919void
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001920fib_path_contribute_forwarding (fib_node_index_t path_index,
1921 fib_forward_chain_type_t fct,
1922 dpo_id_t *dpo)
1923{
1924 fib_path_t *path;
1925
1926 path = fib_path_get(path_index);
1927
1928 ASSERT(path);
1929 ASSERT(FIB_FORW_CHAIN_TYPE_MPLS_EOS != fct);
1930
1931 FIB_PATH_DBG(path, "contribute");
1932
1933 /*
1934 * The DPO stored in the path was created when the path was resolved.
1935 * This then represents the path's 'native' protocol; IP.
1936 * For all others will need to go find something else.
1937 */
Neale Ranns0f26c5a2017-03-01 15:12:11 -08001938 if (fib_path_to_chain_type(path) == fct)
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001939 {
1940 dpo_copy(dpo, &path->fp_dpo);
1941 }
Neale Ranns5e575b12016-10-03 09:40:25 +01001942 else
1943 {
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001944 switch (path->fp_type)
1945 {
1946 case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
1947 switch (fct)
1948 {
1949 case FIB_FORW_CHAIN_TYPE_UNICAST_IP4:
1950 case FIB_FORW_CHAIN_TYPE_UNICAST_IP6:
1951 case FIB_FORW_CHAIN_TYPE_MPLS_EOS:
1952 case FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS:
Neale Ranns5e575b12016-10-03 09:40:25 +01001953 case FIB_FORW_CHAIN_TYPE_ETHERNET:
Florin Corasce1b4c72017-01-26 14:25:34 -08001954 case FIB_FORW_CHAIN_TYPE_NSH:
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001955 {
1956 adj_index_t ai;
1957
1958 /*
Neale Rannsad422ed2016-11-02 14:20:04 +00001959 * get a appropriate link type adj.
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001960 */
1961 ai = fib_path_attached_next_hop_get_adj(
1962 path,
1963 fib_forw_chain_type_to_link_type(fct));
1964 dpo_set(dpo, DPO_ADJACENCY,
1965 fib_forw_chain_type_to_dpo_proto(fct), ai);
1966 adj_unlock(ai);
1967
1968 break;
1969 }
Neale Ranns32e1c012016-11-22 17:07:28 +00001970 case FIB_FORW_CHAIN_TYPE_MCAST_IP4:
1971 case FIB_FORW_CHAIN_TYPE_MCAST_IP6:
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001972 break;
Neale Ranns32e1c012016-11-22 17:07:28 +00001973 }
1974 break;
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001975 case FIB_PATH_TYPE_RECURSIVE:
1976 switch (fct)
1977 {
1978 case FIB_FORW_CHAIN_TYPE_MPLS_EOS:
1979 case FIB_FORW_CHAIN_TYPE_UNICAST_IP4:
1980 case FIB_FORW_CHAIN_TYPE_UNICAST_IP6:
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001981 case FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS:
Neale Ranns32e1c012016-11-22 17:07:28 +00001982 case FIB_FORW_CHAIN_TYPE_MCAST_IP4:
1983 case FIB_FORW_CHAIN_TYPE_MCAST_IP6:
Neale Ranns0f26c5a2017-03-01 15:12:11 -08001984 fib_path_recursive_adj_update(path, fct, dpo);
1985 break;
Neale Ranns5e575b12016-10-03 09:40:25 +01001986 case FIB_FORW_CHAIN_TYPE_ETHERNET:
Florin Corasce1b4c72017-01-26 14:25:34 -08001987 case FIB_FORW_CHAIN_TYPE_NSH:
Neale Ranns5e575b12016-10-03 09:40:25 +01001988 ASSERT(0);
1989 break;
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001990 }
1991 break;
1992 case FIB_PATH_TYPE_DEAG:
Neale Ranns32e1c012016-11-22 17:07:28 +00001993 switch (fct)
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001994 {
1995 case FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS:
1996 lookup_dpo_add_or_lock_w_table_id(MPLS_FIB_DEFAULT_TABLE_ID,
1997 DPO_PROTO_MPLS,
Neale Ranns0f26c5a2017-03-01 15:12:11 -08001998 LOOKUP_UNICAST,
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001999 LOOKUP_INPUT_DST_ADDR,
2000 LOOKUP_TABLE_FROM_CONFIG,
2001 dpo);
2002 break;
Neale Ranns0f26c5a2017-03-01 15:12:11 -08002003 case FIB_FORW_CHAIN_TYPE_MPLS_EOS:
Neale Ranns0bfe5d82016-08-25 15:29:12 +01002004 case FIB_FORW_CHAIN_TYPE_UNICAST_IP4:
2005 case FIB_FORW_CHAIN_TYPE_UNICAST_IP6:
Neale Ranns0bfe5d82016-08-25 15:29:12 +01002006 dpo_copy(dpo, &path->fp_dpo);
Neale Ranns32e1c012016-11-22 17:07:28 +00002007 break;
2008 case FIB_FORW_CHAIN_TYPE_MCAST_IP4:
2009 case FIB_FORW_CHAIN_TYPE_MCAST_IP6:
Neale Ranns5e575b12016-10-03 09:40:25 +01002010 case FIB_FORW_CHAIN_TYPE_ETHERNET:
Florin Corasce1b4c72017-01-26 14:25:34 -08002011 case FIB_FORW_CHAIN_TYPE_NSH:
Neale Ranns5e575b12016-10-03 09:40:25 +01002012 ASSERT(0);
2013 break;
Neale Ranns0bfe5d82016-08-25 15:29:12 +01002014 }
2015 break;
2016 case FIB_PATH_TYPE_EXCLUSIVE:
2017 dpo_copy(dpo, &path->exclusive.fp_ex_dpo);
2018 break;
2019 case FIB_PATH_TYPE_ATTACHED:
Neale Ranns6f631152017-10-03 08:20:21 -07002020 if (DPO_PROTO_ETHERNET == path->fp_nh_proto)
2021 {
2022 dpo_copy(dpo, &path->fp_dpo);
2023 break;
2024 }
Neale Ranns32e1c012016-11-22 17:07:28 +00002025 switch (fct)
2026 {
2027 case FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS:
2028 case FIB_FORW_CHAIN_TYPE_UNICAST_IP4:
2029 case FIB_FORW_CHAIN_TYPE_UNICAST_IP6:
2030 case FIB_FORW_CHAIN_TYPE_MPLS_EOS:
2031 case FIB_FORW_CHAIN_TYPE_ETHERNET:
Florin Corasce1b4c72017-01-26 14:25:34 -08002032 case FIB_FORW_CHAIN_TYPE_NSH:
Neale Ranns8c4611b2017-05-23 03:43:47 -07002033 {
2034 adj_index_t ai;
2035
2036 /*
2037 * get a appropriate link type adj.
2038 */
2039 ai = fib_path_attached_get_adj(
2040 path,
2041 fib_forw_chain_type_to_link_type(fct));
2042 dpo_set(dpo, DPO_ADJACENCY,
2043 fib_forw_chain_type_to_dpo_proto(fct), ai);
2044 adj_unlock(ai);
2045 break;
2046 }
Neale Ranns32e1c012016-11-22 17:07:28 +00002047 case FIB_FORW_CHAIN_TYPE_MCAST_IP4:
2048 case FIB_FORW_CHAIN_TYPE_MCAST_IP6:
2049 {
2050 adj_index_t ai;
2051
2052 /*
2053 * Create the adj needed for sending IP multicast traffic
2054 */
Neale Rannsda78f952017-05-24 09:15:43 -07002055 ai = adj_mcast_add_or_lock(dpo_proto_to_fib(path->fp_nh_proto),
Neale Ranns32e1c012016-11-22 17:07:28 +00002056 fib_forw_chain_type_to_link_type(fct),
2057 path->attached.fp_interface);
Neale Ranns0f26c5a2017-03-01 15:12:11 -08002058 dpo_set(dpo, DPO_ADJACENCY,
Neale Ranns32e1c012016-11-22 17:07:28 +00002059 fib_forw_chain_type_to_dpo_proto(fct),
2060 ai);
2061 adj_unlock(ai);
2062 }
2063 break;
2064 }
2065 break;
Neale Ranns0f26c5a2017-03-01 15:12:11 -08002066 case FIB_PATH_TYPE_INTF_RX:
2067 /*
2068 * Create the adj needed for sending IP multicast traffic
2069 */
Neale Ranns43161a82017-08-12 02:12:00 -07002070 interface_rx_dpo_add_or_lock(fib_forw_chain_type_to_dpo_proto(fct),
2071 path->attached.fp_interface,
2072 dpo);
Neale Ranns0f26c5a2017-03-01 15:12:11 -08002073 break;
Neale Ranns32e1c012016-11-22 17:07:28 +00002074 case FIB_PATH_TYPE_RECEIVE:
2075 case FIB_PATH_TYPE_SPECIAL:
2076 dpo_copy(dpo, &path->fp_dpo);
Neale Ranns0bfe5d82016-08-25 15:29:12 +01002077 break;
2078 }
Neale Ranns0bfe5d82016-08-25 15:29:12 +01002079 }
2080}
2081
2082load_balance_path_t *
2083fib_path_append_nh_for_multipath_hash (fib_node_index_t path_index,
2084 fib_forward_chain_type_t fct,
2085 load_balance_path_t *hash_key)
2086{
2087 load_balance_path_t *mnh;
2088 fib_path_t *path;
2089
2090 path = fib_path_get(path_index);
2091
2092 ASSERT(path);
2093
2094 if (fib_path_is_resolved(path_index))
2095 {
2096 vec_add2(hash_key, mnh, 1);
2097
2098 mnh->path_weight = path->fp_weight;
2099 mnh->path_index = path_index;
Neale Ranns5e575b12016-10-03 09:40:25 +01002100 fib_path_contribute_forwarding(path_index, fct, &mnh->path_dpo);
Neale Ranns0bfe5d82016-08-25 15:29:12 +01002101 }
2102
2103 return (hash_key);
2104}
2105
2106int
Neale Rannsf12a83f2017-04-18 09:09:40 -07002107fib_path_is_recursive_constrained (fib_node_index_t path_index)
Neale Ranns0bfe5d82016-08-25 15:29:12 +01002108{
2109 fib_path_t *path;
2110
2111 path = fib_path_get(path_index);
2112
Neale Rannsf12a83f2017-04-18 09:09:40 -07002113 return ((FIB_PATH_TYPE_RECURSIVE == path->fp_type) &&
2114 ((path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RESOLVE_ATTACHED) ||
2115 (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RESOLVE_HOST)));
Neale Ranns0bfe5d82016-08-25 15:29:12 +01002116}
2117
2118int
2119fib_path_is_exclusive (fib_node_index_t path_index)
2120{
2121 fib_path_t *path;
2122
2123 path = fib_path_get(path_index);
2124
2125 return (FIB_PATH_TYPE_EXCLUSIVE == path->fp_type);
2126}
2127
2128int
2129fib_path_is_deag (fib_node_index_t path_index)
2130{
2131 fib_path_t *path;
2132
2133 path = fib_path_get(path_index);
2134
2135 return (FIB_PATH_TYPE_DEAG == path->fp_type);
2136}
2137
2138int
2139fib_path_is_resolved (fib_node_index_t path_index)
2140{
2141 fib_path_t *path;
2142
2143 path = fib_path_get(path_index);
2144
2145 return (dpo_id_is_valid(&path->fp_dpo) &&
2146 (path->fp_oper_flags & FIB_PATH_OPER_FLAG_RESOLVED) &&
2147 !fib_path_is_looped(path_index) &&
2148 !fib_path_is_permanent_drop(path));
2149}
2150
2151int
2152fib_path_is_looped (fib_node_index_t path_index)
2153{
2154 fib_path_t *path;
2155
2156 path = fib_path_get(path_index);
2157
2158 return (path->fp_oper_flags & FIB_PATH_OPER_FLAG_RECURSIVE_LOOP);
2159}
2160
Neale Ranns81424992017-05-18 03:03:22 -07002161fib_path_list_walk_rc_t
Steven01b07122016-11-02 10:40:09 -07002162fib_path_encode (fib_node_index_t path_list_index,
2163 fib_node_index_t path_index,
2164 void *ctx)
2165{
2166 fib_route_path_encode_t **api_rpaths = ctx;
2167 fib_route_path_encode_t *api_rpath;
2168 fib_path_t *path;
2169
2170 path = fib_path_get(path_index);
2171 if (!path)
Neale Ranns81424992017-05-18 03:03:22 -07002172 return (FIB_PATH_LIST_WALK_CONTINUE);
Steven01b07122016-11-02 10:40:09 -07002173 vec_add2(*api_rpaths, api_rpath, 1);
2174 api_rpath->rpath.frp_weight = path->fp_weight;
Neale Ranns57b58602017-07-15 07:37:25 -07002175 api_rpath->rpath.frp_preference = path->fp_preference;
Steven01b07122016-11-02 10:40:09 -07002176 api_rpath->rpath.frp_proto = path->fp_nh_proto;
2177 api_rpath->rpath.frp_sw_if_index = ~0;
2178 api_rpath->dpo = path->exclusive.fp_ex_dpo;
2179 switch (path->fp_type)
2180 {
2181 case FIB_PATH_TYPE_RECEIVE:
2182 api_rpath->rpath.frp_addr = path->receive.fp_addr;
2183 api_rpath->rpath.frp_sw_if_index = path->receive.fp_interface;
Florin Coras91d341c2017-07-29 11:50:31 -07002184 api_rpath->dpo = path->fp_dpo;
Steven01b07122016-11-02 10:40:09 -07002185 break;
2186 case FIB_PATH_TYPE_ATTACHED:
2187 api_rpath->rpath.frp_sw_if_index = path->attached.fp_interface;
Florin Coras91d341c2017-07-29 11:50:31 -07002188 api_rpath->dpo = path->fp_dpo;
Steven01b07122016-11-02 10:40:09 -07002189 break;
2190 case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
2191 api_rpath->rpath.frp_sw_if_index = path->attached_next_hop.fp_interface;
2192 api_rpath->rpath.frp_addr = path->attached_next_hop.fp_nh;
2193 break;
2194 case FIB_PATH_TYPE_SPECIAL:
2195 break;
2196 case FIB_PATH_TYPE_DEAG:
Neale Ranns7b7ba572017-10-01 12:08:10 -07002197 api_rpath->rpath.frp_fib_index = path->deag.fp_tbl_id;
2198 api_rpath->dpo = path->fp_dpo;
Steven01b07122016-11-02 10:40:09 -07002199 break;
2200 case FIB_PATH_TYPE_RECURSIVE:
Neale Rannsad422ed2016-11-02 14:20:04 +00002201 api_rpath->rpath.frp_addr = path->recursive.fp_nh.fp_ip;
Steven01b07122016-11-02 10:40:09 -07002202 break;
2203 default:
2204 break;
2205 }
Neale Ranns81424992017-05-18 03:03:22 -07002206 return (FIB_PATH_LIST_WALK_CONTINUE);
Steven01b07122016-11-02 10:40:09 -07002207}
2208
Neale Rannsda78f952017-05-24 09:15:43 -07002209dpo_proto_t
Neale Rannsad422ed2016-11-02 14:20:04 +00002210fib_path_get_proto (fib_node_index_t path_index)
2211{
2212 fib_path_t *path;
2213
2214 path = fib_path_get(path_index);
2215
2216 return (path->fp_nh_proto);
2217}
2218
Neale Ranns0bfe5d82016-08-25 15:29:12 +01002219void
2220fib_path_module_init (void)
2221{
2222 fib_node_register_type (FIB_NODE_TYPE_PATH, &fib_path_vft);
2223}
2224
2225static clib_error_t *
2226show_fib_path_command (vlib_main_t * vm,
2227 unformat_input_t * input,
2228 vlib_cli_command_t * cmd)
2229{
Neale Ranns33a7dd52016-10-07 15:14:33 +01002230 fib_node_index_t pi;
Neale Ranns0bfe5d82016-08-25 15:29:12 +01002231 fib_path_t *path;
2232
Neale Ranns33a7dd52016-10-07 15:14:33 +01002233 if (unformat (input, "%d", &pi))
2234 {
2235 /*
2236 * show one in detail
2237 */
2238 if (!pool_is_free_index(fib_path_pool, pi))
2239 {
2240 path = fib_path_get(pi);
2241 u8 *s = fib_path_format(pi, NULL);
2242 s = format(s, "children:");
2243 s = fib_node_children_format(path->fp_node.fn_children, s);
2244 vlib_cli_output (vm, "%s", s);
2245 vec_free(s);
2246 }
2247 else
2248 {
2249 vlib_cli_output (vm, "path %d invalid", pi);
2250 }
2251 }
2252 else
2253 {
2254 vlib_cli_output (vm, "FIB Paths");
2255 pool_foreach(path, fib_path_pool,
2256 ({
2257 vlib_cli_output (vm, "%U", format_fib_path, path);
2258 }));
2259 }
Neale Ranns0bfe5d82016-08-25 15:29:12 +01002260
2261 return (NULL);
2262}
2263
2264VLIB_CLI_COMMAND (show_fib_path, static) = {
2265 .path = "show fib paths",
2266 .function = show_fib_path_command,
2267 .short_help = "show fib paths",
2268};