blob: 00a3fa63d449f916a451ff21e085c28d0ed41710 [file] [log] [blame]
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001/*
2 * Copyright (c) 2016 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16#include <vlib/vlib.h>
17#include <vnet/vnet.h>
18#include <vnet/ip/format.h>
19#include <vnet/ip/ip.h>
20#include <vnet/dpo/drop_dpo.h>
21#include <vnet/dpo/receive_dpo.h>
22#include <vnet/dpo/load_balance_map.h>
23#include <vnet/dpo/lookup_dpo.h>
Neale Ranns0f26c5a2017-03-01 15:12:11 -080024#include <vnet/dpo/interface_dpo.h>
25#include <vnet/dpo/mpls_disposition.h>
Neale Ranns0bfe5d82016-08-25 15:29:12 +010026
27#include <vnet/adj/adj.h>
Neale Ranns32e1c012016-11-22 17:07:28 +000028#include <vnet/adj/adj_mcast.h>
Neale Ranns0bfe5d82016-08-25 15:29:12 +010029
Neale Ranns3ee44042016-10-03 13:05:48 +010030#include <vnet/fib/fib_path.h>
31#include <vnet/fib/fib_node.h>
32#include <vnet/fib/fib_table.h>
33#include <vnet/fib/fib_entry.h>
34#include <vnet/fib/fib_path_list.h>
35#include <vnet/fib/fib_internal.h>
36#include <vnet/fib/fib_urpf_list.h>
Neale Rannsa3af3372017-03-28 03:49:52 -070037#include <vnet/fib/mpls_fib.h>
Neale Ranns0bfe5d82016-08-25 15:29:12 +010038
39/**
40 * Enurmeration of path types
41 */
42typedef enum fib_path_type_t_ {
43 /**
44 * Marker. Add new types after this one.
45 */
46 FIB_PATH_TYPE_FIRST = 0,
47 /**
48 * Attached-nexthop. An interface and a nexthop are known.
49 */
50 FIB_PATH_TYPE_ATTACHED_NEXT_HOP = FIB_PATH_TYPE_FIRST,
51 /**
52 * attached. Only the interface is known.
53 */
54 FIB_PATH_TYPE_ATTACHED,
55 /**
56 * recursive. Only the next-hop is known.
57 */
58 FIB_PATH_TYPE_RECURSIVE,
59 /**
60 * special. nothing is known. so we drop.
61 */
62 FIB_PATH_TYPE_SPECIAL,
63 /**
64 * exclusive. user provided adj.
65 */
66 FIB_PATH_TYPE_EXCLUSIVE,
67 /**
68 * deag. Link to a lookup adj in the next table
69 */
70 FIB_PATH_TYPE_DEAG,
71 /**
Neale Ranns0f26c5a2017-03-01 15:12:11 -080072 * interface receive.
73 */
74 FIB_PATH_TYPE_INTF_RX,
75 /**
Neale Ranns0bfe5d82016-08-25 15:29:12 +010076 * receive. it's for-us.
77 */
78 FIB_PATH_TYPE_RECEIVE,
79 /**
80 * Marker. Add new types before this one, then update it.
81 */
82 FIB_PATH_TYPE_LAST = FIB_PATH_TYPE_RECEIVE,
83} __attribute__ ((packed)) fib_path_type_t;
84
85/**
86 * The maximum number of path_types
87 */
88#define FIB_PATH_TYPE_MAX (FIB_PATH_TYPE_LAST + 1)
89
90#define FIB_PATH_TYPES { \
91 [FIB_PATH_TYPE_ATTACHED_NEXT_HOP] = "attached-nexthop", \
92 [FIB_PATH_TYPE_ATTACHED] = "attached", \
93 [FIB_PATH_TYPE_RECURSIVE] = "recursive", \
94 [FIB_PATH_TYPE_SPECIAL] = "special", \
95 [FIB_PATH_TYPE_EXCLUSIVE] = "exclusive", \
96 [FIB_PATH_TYPE_DEAG] = "deag", \
Neale Ranns0f26c5a2017-03-01 15:12:11 -080097 [FIB_PATH_TYPE_INTF_RX] = "intf-rx", \
Neale Ranns0bfe5d82016-08-25 15:29:12 +010098 [FIB_PATH_TYPE_RECEIVE] = "receive", \
99}
100
101#define FOR_EACH_FIB_PATH_TYPE(_item) \
102 for (_item = FIB_PATH_TYPE_FIRST; _item <= FIB_PATH_TYPE_LAST; _item++)
103
104/**
105 * Enurmeration of path operational (i.e. derived) attributes
106 */
107typedef enum fib_path_oper_attribute_t_ {
108 /**
109 * Marker. Add new types after this one.
110 */
111 FIB_PATH_OPER_ATTRIBUTE_FIRST = 0,
112 /**
113 * The path forms part of a recursive loop.
114 */
115 FIB_PATH_OPER_ATTRIBUTE_RECURSIVE_LOOP = FIB_PATH_OPER_ATTRIBUTE_FIRST,
116 /**
117 * The path is resolved
118 */
119 FIB_PATH_OPER_ATTRIBUTE_RESOLVED,
120 /**
Neale Ranns4b919a52017-03-11 05:55:21 -0800121 * The path is attached, despite what the next-hop may say.
122 */
123 FIB_PATH_OPER_ATTRIBUTE_ATTACHED,
124 /**
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100125 * The path has become a permanent drop.
126 */
127 FIB_PATH_OPER_ATTRIBUTE_DROP,
128 /**
129 * Marker. Add new types before this one, then update it.
130 */
131 FIB_PATH_OPER_ATTRIBUTE_LAST = FIB_PATH_OPER_ATTRIBUTE_DROP,
132} __attribute__ ((packed)) fib_path_oper_attribute_t;
133
134/**
135 * The maximum number of path operational attributes
136 */
137#define FIB_PATH_OPER_ATTRIBUTE_MAX (FIB_PATH_OPER_ATTRIBUTE_LAST + 1)
138
139#define FIB_PATH_OPER_ATTRIBUTES { \
140 [FIB_PATH_OPER_ATTRIBUTE_RECURSIVE_LOOP] = "recursive-loop", \
141 [FIB_PATH_OPER_ATTRIBUTE_RESOLVED] = "resolved", \
142 [FIB_PATH_OPER_ATTRIBUTE_DROP] = "drop", \
143}
144
145#define FOR_EACH_FIB_PATH_OPER_ATTRIBUTE(_item) \
146 for (_item = FIB_PATH_OPER_ATTRIBUTE_FIRST; \
147 _item <= FIB_PATH_OPER_ATTRIBUTE_LAST; \
148 _item++)
149
150/**
151 * Path flags from the attributes
152 */
153typedef enum fib_path_oper_flags_t_ {
154 FIB_PATH_OPER_FLAG_NONE = 0,
155 FIB_PATH_OPER_FLAG_RECURSIVE_LOOP = (1 << FIB_PATH_OPER_ATTRIBUTE_RECURSIVE_LOOP),
156 FIB_PATH_OPER_FLAG_DROP = (1 << FIB_PATH_OPER_ATTRIBUTE_DROP),
157 FIB_PATH_OPER_FLAG_RESOLVED = (1 << FIB_PATH_OPER_ATTRIBUTE_RESOLVED),
Neale Ranns4b919a52017-03-11 05:55:21 -0800158 FIB_PATH_OPER_FLAG_ATTACHED = (1 << FIB_PATH_OPER_ATTRIBUTE_ATTACHED),
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100159} __attribute__ ((packed)) fib_path_oper_flags_t;
160
161/**
162 * A FIB path
163 */
164typedef struct fib_path_t_ {
165 /**
166 * A path is a node in the FIB graph.
167 */
168 fib_node_t fp_node;
169
170 /**
171 * The index of the path-list to which this path belongs
172 */
173 u32 fp_pl_index;
174
175 /**
176 * This marks the start of the memory area used to hash
177 * the path
178 */
179 STRUCT_MARK(path_hash_start);
180
181 /**
182 * Configuration Flags
183 */
184 fib_path_cfg_flags_t fp_cfg_flags;
185
186 /**
187 * The type of the path. This is the selector for the union
188 */
189 fib_path_type_t fp_type;
190
191 /**
192 * The protocol of the next-hop, i.e. the address family of the
193 * next-hop's address. We can't derive this from the address itself
194 * since the address can be all zeros
195 */
196 fib_protocol_t fp_nh_proto;
197
198 /**
Neale Ranns57b58602017-07-15 07:37:25 -0700199 * UCMP [unnormalised] weigth
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100200 */
Neale Ranns57b58602017-07-15 07:37:25 -0700201 u16 fp_weight;
202 /**
203 * A path preference. 0 is the best.
204 * Only paths of the best preference, that are 'up', are considered
205 * for forwarding.
206 */
207 u16 fp_preference;
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100208
209 /**
210 * per-type union of the data required to resolve the path
211 */
212 union {
213 struct {
214 /**
215 * The next-hop
216 */
217 ip46_address_t fp_nh;
218 /**
219 * The interface
220 */
221 u32 fp_interface;
222 } attached_next_hop;
223 struct {
224 /**
225 * The interface
226 */
227 u32 fp_interface;
228 } attached;
229 struct {
Neale Rannsad422ed2016-11-02 14:20:04 +0000230 union
231 {
232 /**
233 * The next-hop
234 */
235 ip46_address_t fp_ip;
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800236 struct {
237 /**
238 * The local label to resolve through.
239 */
240 mpls_label_t fp_local_label;
241 /**
242 * The EOS bit of the resolving label
243 */
244 mpls_eos_bit_t fp_eos;
245 };
Neale Rannsad422ed2016-11-02 14:20:04 +0000246 } fp_nh;
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100247 /**
248 * The FIB table index in which to find the next-hop.
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100249 */
250 fib_node_index_t fp_tbl_id;
251 } recursive;
252 struct {
253 /**
Neale Rannsad422ed2016-11-02 14:20:04 +0000254 * The FIB index in which to perfom the next lookup
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100255 */
256 fib_node_index_t fp_tbl_id;
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800257 /**
258 * The RPF-ID to tag the packets with
259 */
260 fib_rpf_id_t fp_rpf_id;
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100261 } deag;
262 struct {
263 } special;
264 struct {
265 /**
266 * The user provided 'exclusive' DPO
267 */
268 dpo_id_t fp_ex_dpo;
269 } exclusive;
270 struct {
271 /**
272 * The interface on which the local address is configured
273 */
274 u32 fp_interface;
275 /**
276 * The next-hop
277 */
278 ip46_address_t fp_addr;
279 } receive;
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800280 struct {
281 /**
282 * The interface on which the packets will be input.
283 */
284 u32 fp_interface;
285 } intf_rx;
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100286 };
287 STRUCT_MARK(path_hash_end);
288
289 /**
290 * Memebers in this last section represent information that is
291 * dervied during resolution. It should not be copied to new paths
292 * nor compared.
293 */
294
295 /**
296 * Operational Flags
297 */
298 fib_path_oper_flags_t fp_oper_flags;
299
300 /**
301 * the resolving via fib. not part of the union, since it it not part
302 * of the path's hash.
303 */
304 fib_node_index_t fp_via_fib;
305
306 /**
307 * The Data-path objects through which this path resolves for IP.
308 */
309 dpo_id_t fp_dpo;
310
311 /**
312 * the index of this path in the parent's child list.
313 */
314 u32 fp_sibling;
315} fib_path_t;
316
317/*
318 * Array of strings/names for the path types and attributes
319 */
320static const char *fib_path_type_names[] = FIB_PATH_TYPES;
321static const char *fib_path_oper_attribute_names[] = FIB_PATH_OPER_ATTRIBUTES;
322static const char *fib_path_cfg_attribute_names[] = FIB_PATH_CFG_ATTRIBUTES;
323
324/*
325 * The memory pool from which we allocate all the paths
326 */
327static fib_path_t *fib_path_pool;
328
329/*
330 * Debug macro
331 */
332#ifdef FIB_DEBUG
333#define FIB_PATH_DBG(_p, _fmt, _args...) \
334{ \
335 u8 *_tmp = NULL; \
336 _tmp = fib_path_format(fib_path_get_index(_p), _tmp); \
337 clib_warning("path:[%d:%s]:" _fmt, \
338 fib_path_get_index(_p), _tmp, \
339 ##_args); \
340 vec_free(_tmp); \
341}
342#else
343#define FIB_PATH_DBG(_p, _fmt, _args...)
344#endif
345
346static fib_path_t *
347fib_path_get (fib_node_index_t index)
348{
349 return (pool_elt_at_index(fib_path_pool, index));
350}
351
352static fib_node_index_t
353fib_path_get_index (fib_path_t *path)
354{
355 return (path - fib_path_pool);
356}
357
358static fib_node_t *
359fib_path_get_node (fib_node_index_t index)
360{
361 return ((fib_node_t*)fib_path_get(index));
362}
363
364static fib_path_t*
365fib_path_from_fib_node (fib_node_t *node)
366{
367#if CLIB_DEBUG > 0
368 ASSERT(FIB_NODE_TYPE_PATH == node->fn_type);
369#endif
370 return ((fib_path_t*)node);
371}
372
373u8 *
374format_fib_path (u8 * s, va_list * args)
375{
376 fib_path_t *path = va_arg (*args, fib_path_t *);
377 vnet_main_t * vnm = vnet_get_main();
378 fib_path_oper_attribute_t oattr;
379 fib_path_cfg_attribute_t cattr;
380
381 s = format (s, " index:%d ", fib_path_get_index(path));
382 s = format (s, "pl-index:%d ", path->fp_pl_index);
383 s = format (s, "%U ", format_fib_protocol, path->fp_nh_proto);
384 s = format (s, "weight=%d ", path->fp_weight);
Neale Ranns57b58602017-07-15 07:37:25 -0700385 s = format (s, "pref=%d ", path->fp_preference);
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100386 s = format (s, "%s: ", fib_path_type_names[path->fp_type]);
387 if (FIB_PATH_OPER_FLAG_NONE != path->fp_oper_flags) {
388 s = format(s, " oper-flags:");
389 FOR_EACH_FIB_PATH_OPER_ATTRIBUTE(oattr) {
390 if ((1<<oattr) & path->fp_oper_flags) {
391 s = format (s, "%s,", fib_path_oper_attribute_names[oattr]);
392 }
393 }
394 }
395 if (FIB_PATH_CFG_FLAG_NONE != path->fp_cfg_flags) {
396 s = format(s, " cfg-flags:");
397 FOR_EACH_FIB_PATH_CFG_ATTRIBUTE(cattr) {
398 if ((1<<cattr) & path->fp_cfg_flags) {
399 s = format (s, "%s,", fib_path_cfg_attribute_names[cattr]);
400 }
401 }
402 }
403 s = format(s, "\n ");
404
405 switch (path->fp_type)
406 {
407 case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
408 s = format (s, "%U", format_ip46_address,
409 &path->attached_next_hop.fp_nh,
410 IP46_TYPE_ANY);
411 if (path->fp_oper_flags & FIB_PATH_OPER_FLAG_DROP)
412 {
413 s = format (s, " if_index:%d", path->attached_next_hop.fp_interface);
414 }
415 else
416 {
417 s = format (s, " %U",
418 format_vnet_sw_interface_name,
419 vnm,
420 vnet_get_sw_interface(
421 vnm,
422 path->attached_next_hop.fp_interface));
423 if (vnet_sw_interface_is_p2p(vnet_get_main(),
424 path->attached_next_hop.fp_interface))
425 {
426 s = format (s, " (p2p)");
427 }
428 }
429 if (!dpo_id_is_valid(&path->fp_dpo))
430 {
431 s = format(s, "\n unresolved");
432 }
433 else
434 {
435 s = format(s, "\n %U",
436 format_dpo_id,
437 &path->fp_dpo, 13);
438 }
439 break;
440 case FIB_PATH_TYPE_ATTACHED:
441 if (path->fp_oper_flags & FIB_PATH_OPER_FLAG_DROP)
442 {
443 s = format (s, " if_index:%d", path->attached_next_hop.fp_interface);
444 }
445 else
446 {
447 s = format (s, " %U",
448 format_vnet_sw_interface_name,
449 vnm,
450 vnet_get_sw_interface(
451 vnm,
452 path->attached.fp_interface));
453 }
454 break;
455 case FIB_PATH_TYPE_RECURSIVE:
Neale Rannsad422ed2016-11-02 14:20:04 +0000456 if (FIB_PROTOCOL_MPLS == path->fp_nh_proto)
457 {
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800458 s = format (s, "via %U %U",
Neale Rannsad422ed2016-11-02 14:20:04 +0000459 format_mpls_unicast_label,
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800460 path->recursive.fp_nh.fp_local_label,
461 format_mpls_eos_bit,
462 path->recursive.fp_nh.fp_eos);
Neale Rannsad422ed2016-11-02 14:20:04 +0000463 }
464 else
465 {
466 s = format (s, "via %U",
467 format_ip46_address,
468 &path->recursive.fp_nh.fp_ip,
469 IP46_TYPE_ANY);
470 }
471 s = format (s, " in fib:%d",
472 path->recursive.fp_tbl_id,
473 path->fp_via_fib);
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100474 s = format (s, " via-fib:%d", path->fp_via_fib);
475 s = format (s, " via-dpo:[%U:%d]",
476 format_dpo_type, path->fp_dpo.dpoi_type,
477 path->fp_dpo.dpoi_index);
478
479 break;
480 case FIB_PATH_TYPE_RECEIVE:
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800481 case FIB_PATH_TYPE_INTF_RX:
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100482 case FIB_PATH_TYPE_SPECIAL:
483 case FIB_PATH_TYPE_DEAG:
484 case FIB_PATH_TYPE_EXCLUSIVE:
485 if (dpo_id_is_valid(&path->fp_dpo))
486 {
487 s = format(s, "%U", format_dpo_id,
488 &path->fp_dpo, 2);
489 }
490 break;
491 }
492 return (s);
493}
494
495u8 *
496fib_path_format (fib_node_index_t pi, u8 *s)
497{
498 fib_path_t *path;
499
500 path = fib_path_get(pi);
501 ASSERT(NULL != path);
502
503 return (format (s, "%U", format_fib_path, path));
504}
505
506u8 *
507fib_path_adj_format (fib_node_index_t pi,
508 u32 indent,
509 u8 *s)
510{
511 fib_path_t *path;
512
513 path = fib_path_get(pi);
514 ASSERT(NULL != path);
515
516 if (!dpo_id_is_valid(&path->fp_dpo))
517 {
518 s = format(s, " unresolved");
519 }
520 else
521 {
522 s = format(s, "%U", format_dpo_id,
523 &path->fp_dpo, 2);
524 }
525
526 return (s);
527}
528
529/*
530 * fib_path_last_lock_gone
531 *
532 * We don't share paths, we share path lists, so the [un]lock functions
533 * are no-ops
534 */
535static void
536fib_path_last_lock_gone (fib_node_t *node)
537{
538 ASSERT(0);
539}
540
541static const adj_index_t
542fib_path_attached_next_hop_get_adj (fib_path_t *path,
Neale Ranns924d03a2016-10-19 08:25:46 +0100543 vnet_link_t link)
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100544{
545 if (vnet_sw_interface_is_p2p(vnet_get_main(),
546 path->attached_next_hop.fp_interface))
547 {
548 /*
549 * if the interface is p2p then the adj for the specific
550 * neighbour on that link will never exist. on p2p links
551 * the subnet address (the attached route) links to the
552 * auto-adj (see below), we want that adj here too.
553 */
554 return (adj_nbr_add_or_lock(path->fp_nh_proto,
555 link,
556 &zero_addr,
557 path->attached_next_hop.fp_interface));
558 }
559 else
560 {
561 return (adj_nbr_add_or_lock(path->fp_nh_proto,
562 link,
563 &path->attached_next_hop.fp_nh,
564 path->attached_next_hop.fp_interface));
565 }
566}
567
568static void
569fib_path_attached_next_hop_set (fib_path_t *path)
570{
571 /*
572 * resolve directly via the adjacnecy discribed by the
573 * interface and next-hop
574 */
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100575 dpo_set(&path->fp_dpo,
576 DPO_ADJACENCY,
577 fib_proto_to_dpo(path->fp_nh_proto),
578 fib_path_attached_next_hop_get_adj(
579 path,
580 fib_proto_to_link(path->fp_nh_proto)));
581
582 /*
583 * become a child of the adjacency so we receive updates
584 * when its rewrite changes
585 */
586 path->fp_sibling = adj_child_add(path->fp_dpo.dpoi_index,
587 FIB_NODE_TYPE_PATH,
588 fib_path_get_index(path));
Neale Ranns88fc83e2017-04-05 08:11:14 -0700589
590 if (!vnet_sw_interface_is_admin_up(vnet_get_main(),
591 path->attached_next_hop.fp_interface) ||
592 !adj_is_up(path->fp_dpo.dpoi_index))
593 {
594 path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
595 }
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100596}
597
Neale Ranns8c4611b2017-05-23 03:43:47 -0700598static const adj_index_t
599fib_path_attached_get_adj (fib_path_t *path,
600 vnet_link_t link)
601{
602 if (vnet_sw_interface_is_p2p(vnet_get_main(),
603 path->attached.fp_interface))
604 {
605 /*
606 * point-2-point interfaces do not require a glean, since
607 * there is nothing to ARP. Install a rewrite/nbr adj instead
608 */
609 return (adj_nbr_add_or_lock(path->fp_nh_proto,
610 link,
611 &zero_addr,
612 path->attached.fp_interface));
613 }
614 else
615 {
616 return (adj_glean_add_or_lock(path->fp_nh_proto,
617 path->attached.fp_interface,
618 NULL));
619 }
620}
621
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100622/*
623 * create of update the paths recursive adj
624 */
625static void
626fib_path_recursive_adj_update (fib_path_t *path,
627 fib_forward_chain_type_t fct,
628 dpo_id_t *dpo)
629{
Neale Ranns948e00f2016-10-20 13:39:34 +0100630 dpo_id_t via_dpo = DPO_INVALID;
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100631
632 /*
633 * get the DPO to resolve through from the via-entry
634 */
635 fib_entry_contribute_forwarding(path->fp_via_fib,
636 fct,
637 &via_dpo);
638
639
640 /*
641 * hope for the best - clear if restrictions apply.
642 */
643 path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RESOLVED;
644
645 /*
646 * Validate any recursion constraints and over-ride the via
647 * adj if not met
648 */
649 if (path->fp_oper_flags & FIB_PATH_OPER_FLAG_RECURSIVE_LOOP)
650 {
651 path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
652 dpo_copy(&via_dpo, drop_dpo_get(fib_proto_to_dpo(path->fp_nh_proto)));
653 }
654 else if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RESOLVE_HOST)
655 {
656 /*
657 * the via FIB must be a host route.
658 * note the via FIB just added will always be a host route
659 * since it is an RR source added host route. So what we need to
660 * check is whether the route has other sources. If it does then
661 * some other source has added it as a host route. If it doesn't
662 * then it was added only here and inherits forwarding from a cover.
663 * the cover is not a host route.
664 * The RR source is the lowest priority source, so we check if it
665 * is the best. if it is there are no other sources.
666 */
667 if (fib_entry_get_best_source(path->fp_via_fib) >= FIB_SOURCE_RR)
668 {
669 path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
670 dpo_copy(&via_dpo, drop_dpo_get(fib_proto_to_dpo(path->fp_nh_proto)));
671
672 /*
673 * PIC edge trigger. let the load-balance maps know
674 */
675 load_balance_map_path_state_change(fib_path_get_index(path));
676 }
677 }
678 else if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RESOLVE_ATTACHED)
679 {
680 /*
681 * RR source entries inherit the flags from the cover, so
682 * we can check the via directly
683 */
684 if (!(FIB_ENTRY_FLAG_ATTACHED & fib_entry_get_flags(path->fp_via_fib)))
685 {
686 path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
687 dpo_copy(&via_dpo, drop_dpo_get(fib_proto_to_dpo(path->fp_nh_proto)));
688
689 /*
690 * PIC edge trigger. let the load-balance maps know
691 */
692 load_balance_map_path_state_change(fib_path_get_index(path));
693 }
694 }
Neale Ranns88fc83e2017-04-05 08:11:14 -0700695 /*
696 * check for over-riding factors on the FIB entry itself
697 */
698 if (!fib_entry_is_resolved(path->fp_via_fib))
699 {
700 path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
701 dpo_copy(&via_dpo, drop_dpo_get(fib_proto_to_dpo(path->fp_nh_proto)));
702
703 /*
704 * PIC edge trigger. let the load-balance maps know
705 */
706 load_balance_map_path_state_change(fib_path_get_index(path));
707 }
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100708
709 /*
Neale Ranns57b58602017-07-15 07:37:25 -0700710 * If this path is contributing a drop, then it's not resolved
711 */
712 if (dpo_is_drop(&via_dpo) || load_balance_is_drop(&via_dpo))
713 {
714 path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
715 }
716
717 /*
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100718 * update the path's contributed DPO
719 */
720 dpo_copy(dpo, &via_dpo);
721
722 FIB_PATH_DBG(path, "recursive update: %U",
723 fib_get_lookup_main(path->fp_nh_proto),
724 &path->fp_dpo, 2);
725
726 dpo_reset(&via_dpo);
727}
728
729/*
730 * fib_path_is_permanent_drop
731 *
732 * Return !0 if the path is configured to permanently drop,
733 * despite other attributes.
734 */
735static int
736fib_path_is_permanent_drop (fib_path_t *path)
737{
738 return ((path->fp_cfg_flags & FIB_PATH_CFG_FLAG_DROP) ||
739 (path->fp_oper_flags & FIB_PATH_OPER_FLAG_DROP));
740}
741
742/*
743 * fib_path_unresolve
744 *
745 * Remove our dependency on the resolution target
746 */
747static void
748fib_path_unresolve (fib_path_t *path)
749{
750 /*
751 * the forced drop path does not need unresolving
752 */
753 if (fib_path_is_permanent_drop(path))
754 {
755 return;
756 }
757
758 switch (path->fp_type)
759 {
760 case FIB_PATH_TYPE_RECURSIVE:
761 if (FIB_NODE_INDEX_INVALID != path->fp_via_fib)
762 {
763 fib_prefix_t pfx;
764
Neale Rannsad422ed2016-11-02 14:20:04 +0000765 fib_entry_get_prefix(path->fp_via_fib, &pfx);
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100766 fib_entry_child_remove(path->fp_via_fib,
767 path->fp_sibling);
768 fib_table_entry_special_remove(path->recursive.fp_tbl_id,
769 &pfx,
770 FIB_SOURCE_RR);
771 path->fp_via_fib = FIB_NODE_INDEX_INVALID;
772 }
773 break;
774 case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
775 case FIB_PATH_TYPE_ATTACHED:
776 adj_child_remove(path->fp_dpo.dpoi_index,
777 path->fp_sibling);
778 adj_unlock(path->fp_dpo.dpoi_index);
779 break;
780 case FIB_PATH_TYPE_EXCLUSIVE:
781 dpo_reset(&path->exclusive.fp_ex_dpo);
782 break;
783 case FIB_PATH_TYPE_SPECIAL:
784 case FIB_PATH_TYPE_RECEIVE:
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800785 case FIB_PATH_TYPE_INTF_RX:
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100786 case FIB_PATH_TYPE_DEAG:
787 /*
788 * these hold only the path's DPO, which is reset below.
789 */
790 break;
791 }
792
793 /*
794 * release the adj we were holding and pick up the
795 * drop just in case.
796 */
797 dpo_reset(&path->fp_dpo);
798 path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
799
800 return;
801}
802
803static fib_forward_chain_type_t
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800804fib_path_to_chain_type (const fib_path_t *path)
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100805{
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800806 switch (path->fp_nh_proto)
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100807 {
808 case FIB_PROTOCOL_IP4:
809 return (FIB_FORW_CHAIN_TYPE_UNICAST_IP4);
810 case FIB_PROTOCOL_IP6:
811 return (FIB_FORW_CHAIN_TYPE_UNICAST_IP6);
812 case FIB_PROTOCOL_MPLS:
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800813 if (FIB_PATH_TYPE_RECURSIVE == path->fp_type &&
814 MPLS_EOS == path->recursive.fp_nh.fp_eos)
815 {
816 return (FIB_FORW_CHAIN_TYPE_MPLS_EOS);
817 }
818 else
819 {
Neale Ranns9f171f52017-04-11 08:56:53 -0700820 return (FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS);
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800821 }
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100822 }
823 return (FIB_FORW_CHAIN_TYPE_UNICAST_IP4);
824}
825
826/*
827 * fib_path_back_walk_notify
828 *
829 * A back walk has reach this path.
830 */
831static fib_node_back_walk_rc_t
832fib_path_back_walk_notify (fib_node_t *node,
833 fib_node_back_walk_ctx_t *ctx)
834{
835 fib_path_t *path;
836
837 path = fib_path_from_fib_node(node);
838
839 switch (path->fp_type)
840 {
841 case FIB_PATH_TYPE_RECURSIVE:
842 if (FIB_NODE_BW_REASON_FLAG_EVALUATE & ctx->fnbw_reason)
843 {
844 /*
845 * modify the recursive adjacency to use the new forwarding
846 * of the via-fib.
847 * this update is visible to packets in flight in the DP.
848 */
849 fib_path_recursive_adj_update(
850 path,
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800851 fib_path_to_chain_type(path),
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100852 &path->fp_dpo);
853 }
Neale Rannsad95b5d2016-11-10 20:35:14 +0000854 if ((FIB_NODE_BW_REASON_FLAG_ADJ_UPDATE & ctx->fnbw_reason) ||
855 (FIB_NODE_BW_REASON_FLAG_ADJ_DOWN & ctx->fnbw_reason))
Neale Rannsb80c5362016-10-08 13:03:40 +0100856 {
857 /*
858 * ADJ updates (complete<->incomplete) do not need to propagate to
859 * recursive entries.
860 * The only reason its needed as far back as here, is that the adj
861 * and the incomplete adj are a different DPO type, so the LBs need
862 * to re-stack.
863 * If this walk was quashed in the fib_entry, then any non-fib_path
864 * children (like tunnels that collapse out the LB when they stack)
865 * would not see the update.
866 */
867 return (FIB_NODE_BACK_WALK_CONTINUE);
868 }
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100869 break;
870 case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
871 /*
872FIXME comment
873 * ADJ_UPDATE backwalk pass silently through here and up to
874 * the path-list when the multipath adj collapse occurs.
875 * The reason we do this is that the assumtption is that VPP
876 * runs in an environment where the Control-Plane is remote
877 * and hence reacts slowly to link up down. In order to remove
878 * this down link from the ECMP set quickly, we back-walk.
879 * VPP also has dedicated CPUs, so we are not stealing resources
880 * from the CP to do so.
881 */
882 if (FIB_NODE_BW_REASON_FLAG_INTERFACE_UP & ctx->fnbw_reason)
883 {
Neale Ranns8b37b872016-11-21 12:25:22 +0000884 if (path->fp_oper_flags & FIB_PATH_OPER_FLAG_RESOLVED)
885 {
886 /*
887 * alreday resolved. no need to walk back again
888 */
889 return (FIB_NODE_BACK_WALK_CONTINUE);
890 }
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100891 path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RESOLVED;
892 }
893 if (FIB_NODE_BW_REASON_FLAG_INTERFACE_DOWN & ctx->fnbw_reason)
894 {
Neale Ranns8b37b872016-11-21 12:25:22 +0000895 if (!(path->fp_oper_flags & FIB_PATH_OPER_FLAG_RESOLVED))
896 {
897 /*
898 * alreday unresolved. no need to walk back again
899 */
900 return (FIB_NODE_BACK_WALK_CONTINUE);
901 }
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100902 path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
903 }
904 if (FIB_NODE_BW_REASON_FLAG_INTERFACE_DELETE & ctx->fnbw_reason)
905 {
906 /*
907 * The interface this path resolves through has been deleted.
908 * This will leave the path in a permanent drop state. The route
909 * needs to be removed and readded (and hence the path-list deleted)
910 * before it can forward again.
911 */
912 fib_path_unresolve(path);
913 path->fp_oper_flags |= FIB_PATH_OPER_FLAG_DROP;
914 }
915 if (FIB_NODE_BW_REASON_FLAG_ADJ_UPDATE & ctx->fnbw_reason)
916 {
917 /*
918 * restack the DPO to pick up the correct DPO sub-type
919 */
Neale Ranns8b37b872016-11-21 12:25:22 +0000920 uword if_is_up;
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100921 adj_index_t ai;
922
Neale Ranns8b37b872016-11-21 12:25:22 +0000923 if_is_up = vnet_sw_interface_is_admin_up(
924 vnet_get_main(),
925 path->attached_next_hop.fp_interface);
926
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100927 ai = fib_path_attached_next_hop_get_adj(
928 path,
929 fib_proto_to_link(path->fp_nh_proto));
930
Neale Ranns88fc83e2017-04-05 08:11:14 -0700931 path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
932 if (if_is_up && adj_is_up(ai))
933 {
934 path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RESOLVED;
935 }
936
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100937 dpo_set(&path->fp_dpo, DPO_ADJACENCY,
938 fib_proto_to_dpo(path->fp_nh_proto),
939 ai);
940 adj_unlock(ai);
Neale Ranns8b37b872016-11-21 12:25:22 +0000941
942 if (!if_is_up)
943 {
944 /*
945 * If the interface is not up there is no reason to walk
946 * back to children. if we did they would only evalute
947 * that this path is unresolved and hence it would
948 * not contribute the adjacency - so it would be wasted
949 * CPU time.
950 */
951 return (FIB_NODE_BACK_WALK_CONTINUE);
952 }
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100953 }
Neale Rannsad95b5d2016-11-10 20:35:14 +0000954 if (FIB_NODE_BW_REASON_FLAG_ADJ_DOWN & ctx->fnbw_reason)
955 {
Neale Ranns8b37b872016-11-21 12:25:22 +0000956 if (!(path->fp_oper_flags & FIB_PATH_OPER_FLAG_RESOLVED))
957 {
958 /*
959 * alreday unresolved. no need to walk back again
960 */
961 return (FIB_NODE_BACK_WALK_CONTINUE);
962 }
Neale Rannsad95b5d2016-11-10 20:35:14 +0000963 /*
964 * the adj has gone down. the path is no longer resolved.
965 */
966 path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
967 }
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100968 break;
969 case FIB_PATH_TYPE_ATTACHED:
970 /*
971 * FIXME; this could schedule a lower priority walk, since attached
972 * routes are not usually in ECMP configurations so the backwalk to
973 * the FIB entry does not need to be high priority
974 */
975 if (FIB_NODE_BW_REASON_FLAG_INTERFACE_UP & ctx->fnbw_reason)
976 {
977 path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RESOLVED;
978 }
979 if (FIB_NODE_BW_REASON_FLAG_INTERFACE_DOWN & ctx->fnbw_reason)
980 {
981 path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
982 }
983 if (FIB_NODE_BW_REASON_FLAG_INTERFACE_DELETE & ctx->fnbw_reason)
984 {
985 fib_path_unresolve(path);
986 path->fp_oper_flags |= FIB_PATH_OPER_FLAG_DROP;
987 }
988 break;
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800989 case FIB_PATH_TYPE_INTF_RX:
990 ASSERT(0);
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100991 case FIB_PATH_TYPE_DEAG:
992 /*
993 * FIXME When VRF delete is allowed this will need a poke.
994 */
995 case FIB_PATH_TYPE_SPECIAL:
996 case FIB_PATH_TYPE_RECEIVE:
997 case FIB_PATH_TYPE_EXCLUSIVE:
998 /*
999 * these path types have no parents. so to be
1000 * walked from one is unexpected.
1001 */
1002 ASSERT(0);
1003 break;
1004 }
1005
1006 /*
1007 * propagate the backwalk further to the path-list
1008 */
1009 fib_path_list_back_walk(path->fp_pl_index, ctx);
1010
1011 return (FIB_NODE_BACK_WALK_CONTINUE);
1012}
1013
Neale Ranns6c3ebcc2016-10-02 21:20:15 +01001014static void
1015fib_path_memory_show (void)
1016{
1017 fib_show_memory_usage("Path",
1018 pool_elts(fib_path_pool),
1019 pool_len(fib_path_pool),
1020 sizeof(fib_path_t));
1021}
1022
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001023/*
1024 * The FIB path's graph node virtual function table
1025 */
1026static const fib_node_vft_t fib_path_vft = {
1027 .fnv_get = fib_path_get_node,
1028 .fnv_last_lock = fib_path_last_lock_gone,
1029 .fnv_back_walk = fib_path_back_walk_notify,
Neale Ranns6c3ebcc2016-10-02 21:20:15 +01001030 .fnv_mem_show = fib_path_memory_show,
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001031};
1032
1033static fib_path_cfg_flags_t
1034fib_path_route_flags_to_cfg_flags (const fib_route_path_t *rpath)
1035{
Neale Ranns450cd302016-11-09 17:49:42 +00001036 fib_path_cfg_flags_t cfg_flags = FIB_PATH_CFG_FLAG_NONE;
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001037
1038 if (rpath->frp_flags & FIB_ROUTE_PATH_RESOLVE_VIA_HOST)
1039 cfg_flags |= FIB_PATH_CFG_FLAG_RESOLVE_HOST;
1040 if (rpath->frp_flags & FIB_ROUTE_PATH_RESOLVE_VIA_ATTACHED)
1041 cfg_flags |= FIB_PATH_CFG_FLAG_RESOLVE_ATTACHED;
Neale Ranns32e1c012016-11-22 17:07:28 +00001042 if (rpath->frp_flags & FIB_ROUTE_PATH_LOCAL)
1043 cfg_flags |= FIB_PATH_CFG_FLAG_LOCAL;
Neale Ranns4b919a52017-03-11 05:55:21 -08001044 if (rpath->frp_flags & FIB_ROUTE_PATH_ATTACHED)
1045 cfg_flags |= FIB_PATH_CFG_FLAG_ATTACHED;
Neale Ranns0f26c5a2017-03-01 15:12:11 -08001046 if (rpath->frp_flags & FIB_ROUTE_PATH_INTF_RX)
1047 cfg_flags |= FIB_PATH_CFG_FLAG_INTF_RX;
1048 if (rpath->frp_flags & FIB_ROUTE_PATH_RPF_ID)
1049 cfg_flags |= FIB_PATH_CFG_FLAG_RPF_ID;
1050 if (rpath->frp_flags & FIB_ROUTE_PATH_EXCLUSIVE)
1051 cfg_flags |= FIB_PATH_CFG_FLAG_EXCLUSIVE;
1052 if (rpath->frp_flags & FIB_ROUTE_PATH_DROP)
1053 cfg_flags |= FIB_PATH_CFG_FLAG_DROP;
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001054
1055 return (cfg_flags);
1056}
1057
1058/*
1059 * fib_path_create
1060 *
1061 * Create and initialise a new path object.
1062 * return the index of the path.
1063 */
1064fib_node_index_t
1065fib_path_create (fib_node_index_t pl_index,
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001066 const fib_route_path_t *rpath)
1067{
1068 fib_path_t *path;
1069
1070 pool_get(fib_path_pool, path);
1071 memset(path, 0, sizeof(*path));
1072
1073 fib_node_init(&path->fp_node,
1074 FIB_NODE_TYPE_PATH);
1075
1076 dpo_reset(&path->fp_dpo);
1077 path->fp_pl_index = pl_index;
Neale Ranns0f26c5a2017-03-01 15:12:11 -08001078 path->fp_nh_proto = rpath->frp_proto;
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001079 path->fp_via_fib = FIB_NODE_INDEX_INVALID;
1080 path->fp_weight = rpath->frp_weight;
Neale Ranns0bd36ea2016-11-16 11:47:44 +00001081 if (0 == path->fp_weight)
1082 {
1083 /*
1084 * a weight of 0 is a meaningless value. We could either reject it, and thus force
1085 * clients to always use 1, or we can accept it and fixup approrpiately.
1086 */
1087 path->fp_weight = 1;
1088 }
Neale Ranns57b58602017-07-15 07:37:25 -07001089 path->fp_preference = rpath->frp_preference;
Neale Ranns0f26c5a2017-03-01 15:12:11 -08001090 path->fp_cfg_flags = fib_path_route_flags_to_cfg_flags(rpath);
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001091
1092 /*
1093 * deduce the path's tpye from the parementers and save what is needed.
1094 */
Neale Ranns32e1c012016-11-22 17:07:28 +00001095 if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_LOCAL)
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001096 {
Neale Ranns32e1c012016-11-22 17:07:28 +00001097 path->fp_type = FIB_PATH_TYPE_RECEIVE;
1098 path->receive.fp_interface = rpath->frp_sw_if_index;
1099 path->receive.fp_addr = rpath->frp_addr;
1100 }
Neale Ranns0f26c5a2017-03-01 15:12:11 -08001101 else if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_INTF_RX)
1102 {
1103 path->fp_type = FIB_PATH_TYPE_INTF_RX;
1104 path->intf_rx.fp_interface = rpath->frp_sw_if_index;
1105 }
1106 else if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RPF_ID)
1107 {
1108 path->fp_type = FIB_PATH_TYPE_DEAG;
1109 path->deag.fp_tbl_id = rpath->frp_fib_index;
1110 path->deag.fp_rpf_id = rpath->frp_rpf_id;
1111 }
Neale Ranns32e1c012016-11-22 17:07:28 +00001112 else if (~0 != rpath->frp_sw_if_index)
1113 {
1114 if (ip46_address_is_zero(&rpath->frp_addr))
1115 {
1116 path->fp_type = FIB_PATH_TYPE_ATTACHED;
1117 path->attached.fp_interface = rpath->frp_sw_if_index;
1118 }
1119 else
1120 {
1121 path->fp_type = FIB_PATH_TYPE_ATTACHED_NEXT_HOP;
1122 path->attached_next_hop.fp_interface = rpath->frp_sw_if_index;
1123 path->attached_next_hop.fp_nh = rpath->frp_addr;
1124 }
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001125 }
1126 else
1127 {
1128 if (ip46_address_is_zero(&rpath->frp_addr))
1129 {
1130 if (~0 == rpath->frp_fib_index)
1131 {
1132 path->fp_type = FIB_PATH_TYPE_SPECIAL;
1133 }
1134 else
1135 {
1136 path->fp_type = FIB_PATH_TYPE_DEAG;
1137 path->deag.fp_tbl_id = rpath->frp_fib_index;
1138 }
1139 }
1140 else
1141 {
1142 path->fp_type = FIB_PATH_TYPE_RECURSIVE;
Neale Rannsad422ed2016-11-02 14:20:04 +00001143 if (FIB_PROTOCOL_MPLS == path->fp_nh_proto)
1144 {
1145 path->recursive.fp_nh.fp_local_label = rpath->frp_local_label;
Neale Ranns0f26c5a2017-03-01 15:12:11 -08001146 path->recursive.fp_nh.fp_eos = rpath->frp_eos;
Neale Rannsad422ed2016-11-02 14:20:04 +00001147 }
1148 else
1149 {
1150 path->recursive.fp_nh.fp_ip = rpath->frp_addr;
1151 }
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001152 path->recursive.fp_tbl_id = rpath->frp_fib_index;
1153 }
1154 }
1155
1156 FIB_PATH_DBG(path, "create");
1157
1158 return (fib_path_get_index(path));
1159}
1160
1161/*
1162 * fib_path_create_special
1163 *
1164 * Create and initialise a new path object.
1165 * return the index of the path.
1166 */
1167fib_node_index_t
1168fib_path_create_special (fib_node_index_t pl_index,
1169 fib_protocol_t nh_proto,
1170 fib_path_cfg_flags_t flags,
1171 const dpo_id_t *dpo)
1172{
1173 fib_path_t *path;
1174
1175 pool_get(fib_path_pool, path);
1176 memset(path, 0, sizeof(*path));
1177
1178 fib_node_init(&path->fp_node,
1179 FIB_NODE_TYPE_PATH);
1180 dpo_reset(&path->fp_dpo);
1181
1182 path->fp_pl_index = pl_index;
1183 path->fp_weight = 1;
Neale Ranns57b58602017-07-15 07:37:25 -07001184 path->fp_preference = 0;
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001185 path->fp_nh_proto = nh_proto;
1186 path->fp_via_fib = FIB_NODE_INDEX_INVALID;
1187 path->fp_cfg_flags = flags;
1188
1189 if (FIB_PATH_CFG_FLAG_DROP & flags)
1190 {
1191 path->fp_type = FIB_PATH_TYPE_SPECIAL;
1192 }
1193 else if (FIB_PATH_CFG_FLAG_LOCAL & flags)
1194 {
1195 path->fp_type = FIB_PATH_TYPE_RECEIVE;
1196 path->attached.fp_interface = FIB_NODE_INDEX_INVALID;
1197 }
1198 else
1199 {
1200 path->fp_type = FIB_PATH_TYPE_EXCLUSIVE;
1201 ASSERT(NULL != dpo);
1202 dpo_copy(&path->exclusive.fp_ex_dpo, dpo);
1203 }
1204
1205 return (fib_path_get_index(path));
1206}
1207
1208/*
1209 * fib_path_copy
1210 *
1211 * Copy a path. return index of new path.
1212 */
1213fib_node_index_t
1214fib_path_copy (fib_node_index_t path_index,
1215 fib_node_index_t path_list_index)
1216{
1217 fib_path_t *path, *orig_path;
1218
1219 pool_get(fib_path_pool, path);
1220
1221 orig_path = fib_path_get(path_index);
1222 ASSERT(NULL != orig_path);
1223
1224 memcpy(path, orig_path, sizeof(*path));
1225
1226 FIB_PATH_DBG(path, "create-copy:%d", path_index);
1227
1228 /*
1229 * reset the dynamic section
1230 */
1231 fib_node_init(&path->fp_node, FIB_NODE_TYPE_PATH);
1232 path->fp_oper_flags = FIB_PATH_OPER_FLAG_NONE;
1233 path->fp_pl_index = path_list_index;
1234 path->fp_via_fib = FIB_NODE_INDEX_INVALID;
1235 memset(&path->fp_dpo, 0, sizeof(path->fp_dpo));
1236 dpo_reset(&path->fp_dpo);
1237
1238 return (fib_path_get_index(path));
1239}
1240
1241/*
1242 * fib_path_destroy
1243 *
1244 * destroy a path that is no longer required
1245 */
1246void
1247fib_path_destroy (fib_node_index_t path_index)
1248{
1249 fib_path_t *path;
1250
1251 path = fib_path_get(path_index);
1252
1253 ASSERT(NULL != path);
1254 FIB_PATH_DBG(path, "destroy");
1255
1256 fib_path_unresolve(path);
1257
1258 fib_node_deinit(&path->fp_node);
1259 pool_put(fib_path_pool, path);
1260}
1261
1262/*
1263 * fib_path_destroy
1264 *
1265 * destroy a path that is no longer required
1266 */
1267uword
1268fib_path_hash (fib_node_index_t path_index)
1269{
1270 fib_path_t *path;
1271
1272 path = fib_path_get(path_index);
1273
1274 return (hash_memory(STRUCT_MARK_PTR(path, path_hash_start),
1275 (STRUCT_OFFSET_OF(fib_path_t, path_hash_end) -
1276 STRUCT_OFFSET_OF(fib_path_t, path_hash_start)),
1277 0));
1278}
1279
1280/*
1281 * fib_path_cmp_i
1282 *
1283 * Compare two paths for equivalence.
1284 */
1285static int
1286fib_path_cmp_i (const fib_path_t *path1,
1287 const fib_path_t *path2)
1288{
1289 int res;
1290
1291 res = 1;
1292
1293 /*
1294 * paths of different types and protocol are not equal.
Neale Ranns57b58602017-07-15 07:37:25 -07001295 * different weights and/or preference only are the same path.
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001296 */
1297 if (path1->fp_type != path2->fp_type)
1298 {
1299 res = (path1->fp_type - path2->fp_type);
1300 }
Neale Ranns32e1c012016-11-22 17:07:28 +00001301 else if (path1->fp_nh_proto != path2->fp_nh_proto)
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001302 {
1303 res = (path1->fp_nh_proto - path2->fp_nh_proto);
1304 }
1305 else
1306 {
1307 /*
1308 * both paths are of the same type.
1309 * consider each type and its attributes in turn.
1310 */
1311 switch (path1->fp_type)
1312 {
1313 case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
1314 res = ip46_address_cmp(&path1->attached_next_hop.fp_nh,
1315 &path2->attached_next_hop.fp_nh);
1316 if (0 == res) {
Neale Ranns0f26c5a2017-03-01 15:12:11 -08001317 res = (path1->attached_next_hop.fp_interface -
1318 path2->attached_next_hop.fp_interface);
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001319 }
1320 break;
1321 case FIB_PATH_TYPE_ATTACHED:
Neale Ranns0f26c5a2017-03-01 15:12:11 -08001322 res = (path1->attached.fp_interface -
1323 path2->attached.fp_interface);
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001324 break;
1325 case FIB_PATH_TYPE_RECURSIVE:
1326 res = ip46_address_cmp(&path1->recursive.fp_nh,
1327 &path2->recursive.fp_nh);
1328
1329 if (0 == res)
1330 {
1331 res = (path1->recursive.fp_tbl_id - path2->recursive.fp_tbl_id);
1332 }
1333 break;
1334 case FIB_PATH_TYPE_DEAG:
1335 res = (path1->deag.fp_tbl_id - path2->deag.fp_tbl_id);
Neale Ranns0f26c5a2017-03-01 15:12:11 -08001336 if (0 == res)
1337 {
1338 res = (path1->deag.fp_rpf_id - path2->deag.fp_rpf_id);
1339 }
1340 break;
1341 case FIB_PATH_TYPE_INTF_RX:
1342 res = (path1->intf_rx.fp_interface - path2->intf_rx.fp_interface);
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001343 break;
1344 case FIB_PATH_TYPE_SPECIAL:
1345 case FIB_PATH_TYPE_RECEIVE:
1346 case FIB_PATH_TYPE_EXCLUSIVE:
1347 res = 0;
1348 break;
1349 }
1350 }
1351 return (res);
1352}
1353
1354/*
1355 * fib_path_cmp_for_sort
1356 *
1357 * Compare two paths for equivalence. Used during path sorting.
1358 * As usual 0 means equal.
1359 */
1360int
1361fib_path_cmp_for_sort (void * v1,
1362 void * v2)
1363{
1364 fib_node_index_t *pi1 = v1, *pi2 = v2;
1365 fib_path_t *path1, *path2;
1366
1367 path1 = fib_path_get(*pi1);
1368 path2 = fib_path_get(*pi2);
1369
Neale Ranns57b58602017-07-15 07:37:25 -07001370 /*
1371 * when sorting paths we want the highest preference paths
1372 * first, so that the choices set built is in prefernce order
1373 */
1374 if (path1->fp_preference != path2->fp_preference)
1375 {
1376 return (path1->fp_preference - path2->fp_preference);
1377 }
1378
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001379 return (fib_path_cmp_i(path1, path2));
1380}
1381
1382/*
1383 * fib_path_cmp
1384 *
1385 * Compare two paths for equivalence.
1386 */
1387int
1388fib_path_cmp (fib_node_index_t pi1,
1389 fib_node_index_t pi2)
1390{
1391 fib_path_t *path1, *path2;
1392
1393 path1 = fib_path_get(pi1);
1394 path2 = fib_path_get(pi2);
1395
1396 return (fib_path_cmp_i(path1, path2));
1397}
1398
1399int
1400fib_path_cmp_w_route_path (fib_node_index_t path_index,
1401 const fib_route_path_t *rpath)
1402{
1403 fib_path_t *path;
1404 int res;
1405
1406 path = fib_path_get(path_index);
1407
1408 res = 1;
1409
1410 if (path->fp_weight != rpath->frp_weight)
1411 {
1412 res = (path->fp_weight - rpath->frp_weight);
1413 }
1414 else
1415 {
1416 /*
1417 * both paths are of the same type.
1418 * consider each type and its attributes in turn.
1419 */
1420 switch (path->fp_type)
1421 {
1422 case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
1423 res = ip46_address_cmp(&path->attached_next_hop.fp_nh,
1424 &rpath->frp_addr);
1425 if (0 == res)
1426 {
Neale Ranns0f26c5a2017-03-01 15:12:11 -08001427 res = (path->attached_next_hop.fp_interface -
1428 rpath->frp_sw_if_index);
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001429 }
1430 break;
1431 case FIB_PATH_TYPE_ATTACHED:
Neale Ranns0f26c5a2017-03-01 15:12:11 -08001432 res = (path->attached.fp_interface - rpath->frp_sw_if_index);
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001433 break;
1434 case FIB_PATH_TYPE_RECURSIVE:
Neale Rannsad422ed2016-11-02 14:20:04 +00001435 if (FIB_PROTOCOL_MPLS == path->fp_nh_proto)
1436 {
1437 res = path->recursive.fp_nh.fp_local_label - rpath->frp_local_label;
Neale Ranns0f26c5a2017-03-01 15:12:11 -08001438
1439 if (res == 0)
1440 {
1441 res = path->recursive.fp_nh.fp_eos - rpath->frp_eos;
1442 }
Neale Rannsad422ed2016-11-02 14:20:04 +00001443 }
1444 else
1445 {
1446 res = ip46_address_cmp(&path->recursive.fp_nh.fp_ip,
1447 &rpath->frp_addr);
1448 }
1449
1450 if (0 == res)
1451 {
1452 res = (path->recursive.fp_tbl_id - rpath->frp_fib_index);
1453 }
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001454 break;
Neale Ranns0f26c5a2017-03-01 15:12:11 -08001455 case FIB_PATH_TYPE_INTF_RX:
1456 res = (path->intf_rx.fp_interface - rpath->frp_sw_if_index);
1457 break;
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001458 case FIB_PATH_TYPE_DEAG:
1459 res = (path->deag.fp_tbl_id - rpath->frp_fib_index);
Neale Ranns0f26c5a2017-03-01 15:12:11 -08001460 if (0 == res)
1461 {
1462 res = (path->deag.fp_rpf_id - rpath->frp_rpf_id);
1463 }
1464 break;
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001465 case FIB_PATH_TYPE_SPECIAL:
1466 case FIB_PATH_TYPE_RECEIVE:
1467 case FIB_PATH_TYPE_EXCLUSIVE:
1468 res = 0;
1469 break;
1470 }
1471 }
1472 return (res);
1473}
1474
1475/*
1476 * fib_path_recursive_loop_detect
1477 *
1478 * A forward walk of the FIB object graph to detect for a cycle/loop. This
1479 * walk is initiated when an entry is linking to a new path list or from an old.
1480 * The entry vector passed contains all the FIB entrys that are children of this
1481 * path (it is all the entries encountered on the walk so far). If this vector
1482 * contains the entry this path resolve via, then a loop is about to form.
1483 * The loop must be allowed to form, since we need the dependencies in place
1484 * so that we can track when the loop breaks.
1485 * However, we MUST not produce a loop in the forwarding graph (else packets
1486 * would loop around the switch path until the loop breaks), so we mark recursive
1487 * paths as looped so that they do not contribute forwarding information.
1488 * By marking the path as looped, an etry such as;
1489 * X/Y
1490 * via a.a.a.a (looped)
1491 * via b.b.b.b (not looped)
1492 * can still forward using the info provided by b.b.b.b only
1493 */
1494int
1495fib_path_recursive_loop_detect (fib_node_index_t path_index,
1496 fib_node_index_t **entry_indicies)
1497{
1498 fib_path_t *path;
1499
1500 path = fib_path_get(path_index);
1501
1502 /*
1503 * the forced drop path is never looped, cos it is never resolved.
1504 */
1505 if (fib_path_is_permanent_drop(path))
1506 {
1507 return (0);
1508 }
1509
1510 switch (path->fp_type)
1511 {
1512 case FIB_PATH_TYPE_RECURSIVE:
1513 {
1514 fib_node_index_t *entry_index, *entries;
1515 int looped = 0;
1516 entries = *entry_indicies;
1517
1518 vec_foreach(entry_index, entries) {
1519 if (*entry_index == path->fp_via_fib)
1520 {
1521 /*
1522 * the entry that is about to link to this path-list (or
1523 * one of this path-list's children) is the same entry that
1524 * this recursive path resolves through. this is a cycle.
1525 * abort the walk.
1526 */
1527 looped = 1;
1528 break;
1529 }
1530 }
1531
1532 if (looped)
1533 {
1534 FIB_PATH_DBG(path, "recursive loop formed");
1535 path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RECURSIVE_LOOP;
1536
1537 dpo_copy(&path->fp_dpo,
1538 drop_dpo_get(fib_proto_to_dpo(path->fp_nh_proto)));
1539 }
1540 else
1541 {
1542 /*
1543 * no loop here yet. keep forward walking the graph.
1544 */
1545 if (fib_entry_recursive_loop_detect(path->fp_via_fib, entry_indicies))
1546 {
1547 FIB_PATH_DBG(path, "recursive loop formed");
1548 path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RECURSIVE_LOOP;
1549 }
1550 else
1551 {
1552 FIB_PATH_DBG(path, "recursive loop cleared");
1553 path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RECURSIVE_LOOP;
1554 }
1555 }
1556 break;
1557 }
1558 case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
1559 case FIB_PATH_TYPE_ATTACHED:
1560 case FIB_PATH_TYPE_SPECIAL:
1561 case FIB_PATH_TYPE_DEAG:
1562 case FIB_PATH_TYPE_RECEIVE:
Neale Ranns0f26c5a2017-03-01 15:12:11 -08001563 case FIB_PATH_TYPE_INTF_RX:
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001564 case FIB_PATH_TYPE_EXCLUSIVE:
1565 /*
1566 * these path types cannot be part of a loop, since they are the leaves
1567 * of the graph.
1568 */
1569 break;
1570 }
1571
1572 return (fib_path_is_looped(path_index));
1573}
1574
1575int
1576fib_path_resolve (fib_node_index_t path_index)
1577{
1578 fib_path_t *path;
1579
1580 path = fib_path_get(path_index);
1581
1582 /*
1583 * hope for the best.
1584 */
1585 path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RESOLVED;
1586
1587 /*
1588 * the forced drop path resolves via the drop adj
1589 */
1590 if (fib_path_is_permanent_drop(path))
1591 {
1592 dpo_copy(&path->fp_dpo,
1593 drop_dpo_get(fib_proto_to_dpo(path->fp_nh_proto)));
1594 path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
1595 return (fib_path_is_resolved(path_index));
1596 }
1597
1598 switch (path->fp_type)
1599 {
1600 case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
1601 fib_path_attached_next_hop_set(path);
1602 break;
1603 case FIB_PATH_TYPE_ATTACHED:
1604 /*
1605 * path->attached.fp_interface
1606 */
1607 if (!vnet_sw_interface_is_admin_up(vnet_get_main(),
1608 path->attached.fp_interface))
1609 {
1610 path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED;
1611 }
Neale Ranns8c4611b2017-05-23 03:43:47 -07001612 dpo_set(&path->fp_dpo,
1613 DPO_ADJACENCY,
1614 fib_proto_to_dpo(path->fp_nh_proto),
1615 fib_path_attached_get_adj(path,
1616 fib_proto_to_link(path->fp_nh_proto)));
1617
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001618 /*
1619 * become a child of the adjacency so we receive updates
1620 * when the interface state changes
1621 */
1622 path->fp_sibling = adj_child_add(path->fp_dpo.dpoi_index,
1623 FIB_NODE_TYPE_PATH,
1624 fib_path_get_index(path));
1625
1626 break;
1627 case FIB_PATH_TYPE_RECURSIVE:
1628 {
1629 /*
1630 * Create a RR source entry in the table for the address
1631 * that this path recurses through.
1632 * This resolve action is recursive, hence we may create
1633 * more paths in the process. more creates mean maybe realloc
1634 * of this path.
1635 */
1636 fib_node_index_t fei;
1637 fib_prefix_t pfx;
1638
1639 ASSERT(FIB_NODE_INDEX_INVALID == path->fp_via_fib);
1640
Neale Rannsad422ed2016-11-02 14:20:04 +00001641 if (FIB_PROTOCOL_MPLS == path->fp_nh_proto)
1642 {
Neale Ranns0f26c5a2017-03-01 15:12:11 -08001643 fib_prefix_from_mpls_label(path->recursive.fp_nh.fp_local_label,
1644 path->recursive.fp_nh.fp_eos,
1645 &pfx);
Neale Rannsad422ed2016-11-02 14:20:04 +00001646 }
1647 else
1648 {
1649 fib_prefix_from_ip46_addr(&path->recursive.fp_nh.fp_ip, &pfx);
1650 }
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001651
1652 fei = fib_table_entry_special_add(path->recursive.fp_tbl_id,
1653 &pfx,
1654 FIB_SOURCE_RR,
Neale Rannsa0558302017-04-13 00:44:52 -07001655 FIB_ENTRY_FLAG_NONE);
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001656
1657 path = fib_path_get(path_index);
1658 path->fp_via_fib = fei;
1659
1660 /*
1661 * become a dependent child of the entry so the path is
1662 * informed when the forwarding for the entry changes.
1663 */
1664 path->fp_sibling = fib_entry_child_add(path->fp_via_fib,
1665 FIB_NODE_TYPE_PATH,
1666 fib_path_get_index(path));
1667
1668 /*
1669 * create and configure the IP DPO
1670 */
1671 fib_path_recursive_adj_update(
1672 path,
Neale Ranns0f26c5a2017-03-01 15:12:11 -08001673 fib_path_to_chain_type(path),
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001674 &path->fp_dpo);
1675
1676 break;
1677 }
1678 case FIB_PATH_TYPE_SPECIAL:
1679 /*
1680 * Resolve via the drop
1681 */
1682 dpo_copy(&path->fp_dpo,
1683 drop_dpo_get(fib_proto_to_dpo(path->fp_nh_proto)));
1684 break;
1685 case FIB_PATH_TYPE_DEAG:
Neale Ranns0f26c5a2017-03-01 15:12:11 -08001686 {
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001687 /*
1688 * Resolve via a lookup DPO.
1689 * FIXME. control plane should add routes with a table ID
1690 */
Neale Ranns0f26c5a2017-03-01 15:12:11 -08001691 lookup_cast_t cast;
1692
1693 cast = (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RPF_ID ?
1694 LOOKUP_MULTICAST :
1695 LOOKUP_UNICAST);
1696
1697 lookup_dpo_add_or_lock_w_fib_index(path->deag.fp_tbl_id,
1698 fib_proto_to_dpo(path->fp_nh_proto),
1699 cast,
1700 LOOKUP_INPUT_DST_ADDR,
1701 LOOKUP_TABLE_FROM_CONFIG,
1702 &path->fp_dpo);
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001703 break;
Neale Ranns0f26c5a2017-03-01 15:12:11 -08001704 }
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001705 case FIB_PATH_TYPE_RECEIVE:
1706 /*
1707 * Resolve via a receive DPO.
1708 */
1709 receive_dpo_add_or_lock(fib_proto_to_dpo(path->fp_nh_proto),
1710 path->receive.fp_interface,
1711 &path->receive.fp_addr,
1712 &path->fp_dpo);
1713 break;
Neale Ranns0f26c5a2017-03-01 15:12:11 -08001714 case FIB_PATH_TYPE_INTF_RX: {
1715 /*
1716 * Resolve via a receive DPO.
1717 */
1718 interface_dpo_add_or_lock(fib_proto_to_dpo(path->fp_nh_proto),
1719 path->intf_rx.fp_interface,
1720 &path->fp_dpo);
1721 break;
1722 }
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001723 case FIB_PATH_TYPE_EXCLUSIVE:
1724 /*
1725 * Resolve via the user provided DPO
1726 */
1727 dpo_copy(&path->fp_dpo, &path->exclusive.fp_ex_dpo);
1728 break;
1729 }
1730
1731 return (fib_path_is_resolved(path_index));
1732}
1733
1734u32
1735fib_path_get_resolving_interface (fib_node_index_t path_index)
1736{
1737 fib_path_t *path;
1738
1739 path = fib_path_get(path_index);
1740
1741 switch (path->fp_type)
1742 {
1743 case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
1744 return (path->attached_next_hop.fp_interface);
1745 case FIB_PATH_TYPE_ATTACHED:
1746 return (path->attached.fp_interface);
1747 case FIB_PATH_TYPE_RECEIVE:
1748 return (path->receive.fp_interface);
1749 case FIB_PATH_TYPE_RECURSIVE:
Neale Ranns08b16482017-05-13 05:52:58 -07001750 if (fib_path_is_resolved(path_index))
1751 {
1752 return (fib_entry_get_resolving_interface(path->fp_via_fib));
1753 }
1754 break;
Neale Ranns0f26c5a2017-03-01 15:12:11 -08001755 case FIB_PATH_TYPE_INTF_RX:
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001756 case FIB_PATH_TYPE_SPECIAL:
1757 case FIB_PATH_TYPE_DEAG:
1758 case FIB_PATH_TYPE_EXCLUSIVE:
1759 break;
1760 }
1761 return (~0);
1762}
1763
1764adj_index_t
1765fib_path_get_adj (fib_node_index_t path_index)
1766{
1767 fib_path_t *path;
1768
1769 path = fib_path_get(path_index);
1770
1771 ASSERT(dpo_is_adj(&path->fp_dpo));
1772 if (dpo_is_adj(&path->fp_dpo))
1773 {
1774 return (path->fp_dpo.dpoi_index);
1775 }
1776 return (ADJ_INDEX_INVALID);
1777}
1778
Neale Ranns57b58602017-07-15 07:37:25 -07001779u16
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001780fib_path_get_weight (fib_node_index_t path_index)
1781{
1782 fib_path_t *path;
1783
1784 path = fib_path_get(path_index);
1785
1786 ASSERT(path);
1787
1788 return (path->fp_weight);
1789}
1790
Neale Ranns57b58602017-07-15 07:37:25 -07001791u16
1792fib_path_get_preference (fib_node_index_t path_index)
1793{
1794 fib_path_t *path;
1795
1796 path = fib_path_get(path_index);
1797
1798 ASSERT(path);
1799
1800 return (path->fp_preference);
1801}
1802
Neale Ranns3ee44042016-10-03 13:05:48 +01001803/**
1804 * @brief Contribute the path's adjacency to the list passed.
1805 * By calling this function over all paths, recursively, a child
1806 * can construct its full set of forwarding adjacencies, and hence its
1807 * uRPF list.
1808 */
1809void
1810fib_path_contribute_urpf (fib_node_index_t path_index,
1811 index_t urpf)
1812{
1813 fib_path_t *path;
1814
Neale Ranns3ee44042016-10-03 13:05:48 +01001815 path = fib_path_get(path_index);
1816
Neale Ranns88fc83e2017-04-05 08:11:14 -07001817 /*
1818 * resolved and unresolved paths contribute to the RPF list.
1819 */
Neale Ranns3ee44042016-10-03 13:05:48 +01001820 switch (path->fp_type)
1821 {
1822 case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
1823 fib_urpf_list_append(urpf, path->attached_next_hop.fp_interface);
1824 break;
1825
1826 case FIB_PATH_TYPE_ATTACHED:
1827 fib_urpf_list_append(urpf, path->attached.fp_interface);
1828 break;
1829
1830 case FIB_PATH_TYPE_RECURSIVE:
Neale Ranns08b16482017-05-13 05:52:58 -07001831 if (FIB_NODE_INDEX_INVALID != path->fp_via_fib &&
1832 !fib_path_is_looped(path_index))
Neale Ranns88fc83e2017-04-05 08:11:14 -07001833 {
1834 /*
1835 * there's unresolved due to constraints, and there's unresolved
Neale Ranns08b16482017-05-13 05:52:58 -07001836 * due to ain't got no via. can't do nowt w'out via.
Neale Ranns88fc83e2017-04-05 08:11:14 -07001837 */
1838 fib_entry_contribute_urpf(path->fp_via_fib, urpf);
1839 }
Neale Ranns3ee44042016-10-03 13:05:48 +01001840 break;
1841
1842 case FIB_PATH_TYPE_EXCLUSIVE:
1843 case FIB_PATH_TYPE_SPECIAL:
1844 /*
1845 * these path types may link to an adj, if that's what
1846 * the clinet gave
1847 */
1848 if (dpo_is_adj(&path->fp_dpo))
1849 {
1850 ip_adjacency_t *adj;
1851
1852 adj = adj_get(path->fp_dpo.dpoi_index);
1853
1854 fib_urpf_list_append(urpf, adj->rewrite_header.sw_if_index);
1855 }
1856 break;
1857
1858 case FIB_PATH_TYPE_DEAG:
1859 case FIB_PATH_TYPE_RECEIVE:
Neale Ranns0f26c5a2017-03-01 15:12:11 -08001860 case FIB_PATH_TYPE_INTF_RX:
Neale Ranns3ee44042016-10-03 13:05:48 +01001861 /*
1862 * these path types don't link to an adj
1863 */
1864 break;
1865 }
1866}
1867
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001868void
Neale Ranns0f26c5a2017-03-01 15:12:11 -08001869fib_path_stack_mpls_disp (fib_node_index_t path_index,
1870 dpo_proto_t payload_proto,
1871 dpo_id_t *dpo)
1872{
1873 fib_path_t *path;
1874
1875 path = fib_path_get(path_index);
1876
1877 ASSERT(path);
1878
1879 switch (path->fp_type)
1880 {
1881 case FIB_PATH_TYPE_DEAG:
1882 {
1883 dpo_id_t tmp = DPO_INVALID;
1884
1885 dpo_copy(&tmp, dpo);
1886 dpo_set(dpo,
1887 DPO_MPLS_DISPOSITION,
1888 payload_proto,
1889 mpls_disp_dpo_create(payload_proto,
1890 path->deag.fp_rpf_id,
1891 &tmp));
1892 dpo_reset(&tmp);
1893 break;
1894 }
1895 case FIB_PATH_TYPE_RECEIVE:
1896 case FIB_PATH_TYPE_ATTACHED:
1897 case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
1898 case FIB_PATH_TYPE_RECURSIVE:
1899 case FIB_PATH_TYPE_INTF_RX:
1900 case FIB_PATH_TYPE_EXCLUSIVE:
1901 case FIB_PATH_TYPE_SPECIAL:
1902 break;
1903 }
1904}
1905
1906void
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001907fib_path_contribute_forwarding (fib_node_index_t path_index,
1908 fib_forward_chain_type_t fct,
1909 dpo_id_t *dpo)
1910{
1911 fib_path_t *path;
1912
1913 path = fib_path_get(path_index);
1914
1915 ASSERT(path);
1916 ASSERT(FIB_FORW_CHAIN_TYPE_MPLS_EOS != fct);
1917
1918 FIB_PATH_DBG(path, "contribute");
1919
1920 /*
1921 * The DPO stored in the path was created when the path was resolved.
1922 * This then represents the path's 'native' protocol; IP.
1923 * For all others will need to go find something else.
1924 */
Neale Ranns0f26c5a2017-03-01 15:12:11 -08001925 if (fib_path_to_chain_type(path) == fct)
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001926 {
1927 dpo_copy(dpo, &path->fp_dpo);
1928 }
Neale Ranns5e575b12016-10-03 09:40:25 +01001929 else
1930 {
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001931 switch (path->fp_type)
1932 {
1933 case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
1934 switch (fct)
1935 {
1936 case FIB_FORW_CHAIN_TYPE_UNICAST_IP4:
1937 case FIB_FORW_CHAIN_TYPE_UNICAST_IP6:
1938 case FIB_FORW_CHAIN_TYPE_MPLS_EOS:
1939 case FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS:
Neale Ranns5e575b12016-10-03 09:40:25 +01001940 case FIB_FORW_CHAIN_TYPE_ETHERNET:
Florin Corasce1b4c72017-01-26 14:25:34 -08001941 case FIB_FORW_CHAIN_TYPE_NSH:
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001942 {
1943 adj_index_t ai;
1944
1945 /*
Neale Rannsad422ed2016-11-02 14:20:04 +00001946 * get a appropriate link type adj.
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001947 */
1948 ai = fib_path_attached_next_hop_get_adj(
1949 path,
1950 fib_forw_chain_type_to_link_type(fct));
1951 dpo_set(dpo, DPO_ADJACENCY,
1952 fib_forw_chain_type_to_dpo_proto(fct), ai);
1953 adj_unlock(ai);
1954
1955 break;
1956 }
Neale Ranns32e1c012016-11-22 17:07:28 +00001957 case FIB_FORW_CHAIN_TYPE_MCAST_IP4:
1958 case FIB_FORW_CHAIN_TYPE_MCAST_IP6:
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001959 break;
Neale Ranns32e1c012016-11-22 17:07:28 +00001960 }
1961 break;
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001962 case FIB_PATH_TYPE_RECURSIVE:
1963 switch (fct)
1964 {
1965 case FIB_FORW_CHAIN_TYPE_MPLS_EOS:
1966 case FIB_FORW_CHAIN_TYPE_UNICAST_IP4:
1967 case FIB_FORW_CHAIN_TYPE_UNICAST_IP6:
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001968 case FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS:
Neale Ranns32e1c012016-11-22 17:07:28 +00001969 case FIB_FORW_CHAIN_TYPE_MCAST_IP4:
1970 case FIB_FORW_CHAIN_TYPE_MCAST_IP6:
Neale Ranns0f26c5a2017-03-01 15:12:11 -08001971 fib_path_recursive_adj_update(path, fct, dpo);
1972 break;
Neale Ranns5e575b12016-10-03 09:40:25 +01001973 case FIB_FORW_CHAIN_TYPE_ETHERNET:
Florin Corasce1b4c72017-01-26 14:25:34 -08001974 case FIB_FORW_CHAIN_TYPE_NSH:
Neale Ranns5e575b12016-10-03 09:40:25 +01001975 ASSERT(0);
1976 break;
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001977 }
1978 break;
1979 case FIB_PATH_TYPE_DEAG:
Neale Ranns32e1c012016-11-22 17:07:28 +00001980 switch (fct)
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001981 {
1982 case FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS:
1983 lookup_dpo_add_or_lock_w_table_id(MPLS_FIB_DEFAULT_TABLE_ID,
1984 DPO_PROTO_MPLS,
Neale Ranns0f26c5a2017-03-01 15:12:11 -08001985 LOOKUP_UNICAST,
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001986 LOOKUP_INPUT_DST_ADDR,
1987 LOOKUP_TABLE_FROM_CONFIG,
1988 dpo);
1989 break;
Neale Ranns0f26c5a2017-03-01 15:12:11 -08001990 case FIB_FORW_CHAIN_TYPE_MPLS_EOS:
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001991 case FIB_FORW_CHAIN_TYPE_UNICAST_IP4:
1992 case FIB_FORW_CHAIN_TYPE_UNICAST_IP6:
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001993 dpo_copy(dpo, &path->fp_dpo);
Neale Ranns32e1c012016-11-22 17:07:28 +00001994 break;
1995 case FIB_FORW_CHAIN_TYPE_MCAST_IP4:
1996 case FIB_FORW_CHAIN_TYPE_MCAST_IP6:
Neale Ranns5e575b12016-10-03 09:40:25 +01001997 case FIB_FORW_CHAIN_TYPE_ETHERNET:
Florin Corasce1b4c72017-01-26 14:25:34 -08001998 case FIB_FORW_CHAIN_TYPE_NSH:
Neale Ranns5e575b12016-10-03 09:40:25 +01001999 ASSERT(0);
2000 break;
Neale Ranns0bfe5d82016-08-25 15:29:12 +01002001 }
2002 break;
2003 case FIB_PATH_TYPE_EXCLUSIVE:
2004 dpo_copy(dpo, &path->exclusive.fp_ex_dpo);
2005 break;
2006 case FIB_PATH_TYPE_ATTACHED:
Neale Ranns32e1c012016-11-22 17:07:28 +00002007 switch (fct)
2008 {
2009 case FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS:
2010 case FIB_FORW_CHAIN_TYPE_UNICAST_IP4:
2011 case FIB_FORW_CHAIN_TYPE_UNICAST_IP6:
2012 case FIB_FORW_CHAIN_TYPE_MPLS_EOS:
2013 case FIB_FORW_CHAIN_TYPE_ETHERNET:
Florin Corasce1b4c72017-01-26 14:25:34 -08002014 case FIB_FORW_CHAIN_TYPE_NSH:
Neale Ranns8c4611b2017-05-23 03:43:47 -07002015 {
2016 adj_index_t ai;
2017
2018 /*
2019 * get a appropriate link type adj.
2020 */
2021 ai = fib_path_attached_get_adj(
2022 path,
2023 fib_forw_chain_type_to_link_type(fct));
2024 dpo_set(dpo, DPO_ADJACENCY,
2025 fib_forw_chain_type_to_dpo_proto(fct), ai);
2026 adj_unlock(ai);
2027 break;
2028 }
Neale Ranns32e1c012016-11-22 17:07:28 +00002029 case FIB_FORW_CHAIN_TYPE_MCAST_IP4:
2030 case FIB_FORW_CHAIN_TYPE_MCAST_IP6:
2031 {
2032 adj_index_t ai;
2033
2034 /*
2035 * Create the adj needed for sending IP multicast traffic
2036 */
2037 ai = adj_mcast_add_or_lock(path->fp_nh_proto,
2038 fib_forw_chain_type_to_link_type(fct),
2039 path->attached.fp_interface);
Neale Ranns0f26c5a2017-03-01 15:12:11 -08002040 dpo_set(dpo, DPO_ADJACENCY,
Neale Ranns32e1c012016-11-22 17:07:28 +00002041 fib_forw_chain_type_to_dpo_proto(fct),
2042 ai);
2043 adj_unlock(ai);
2044 }
2045 break;
2046 }
2047 break;
Neale Ranns0f26c5a2017-03-01 15:12:11 -08002048 case FIB_PATH_TYPE_INTF_RX:
2049 /*
2050 * Create the adj needed for sending IP multicast traffic
2051 */
2052 interface_dpo_add_or_lock(fib_forw_chain_type_to_dpo_proto(fct),
2053 path->attached.fp_interface,
2054 dpo);
2055 break;
Neale Ranns32e1c012016-11-22 17:07:28 +00002056 case FIB_PATH_TYPE_RECEIVE:
2057 case FIB_PATH_TYPE_SPECIAL:
2058 dpo_copy(dpo, &path->fp_dpo);
Neale Ranns0bfe5d82016-08-25 15:29:12 +01002059 break;
2060 }
Neale Ranns0bfe5d82016-08-25 15:29:12 +01002061 }
2062}
2063
2064load_balance_path_t *
2065fib_path_append_nh_for_multipath_hash (fib_node_index_t path_index,
2066 fib_forward_chain_type_t fct,
2067 load_balance_path_t *hash_key)
2068{
2069 load_balance_path_t *mnh;
2070 fib_path_t *path;
2071
2072 path = fib_path_get(path_index);
2073
2074 ASSERT(path);
2075
2076 if (fib_path_is_resolved(path_index))
2077 {
2078 vec_add2(hash_key, mnh, 1);
2079
2080 mnh->path_weight = path->fp_weight;
2081 mnh->path_index = path_index;
Neale Ranns5e575b12016-10-03 09:40:25 +01002082 fib_path_contribute_forwarding(path_index, fct, &mnh->path_dpo);
Neale Ranns0bfe5d82016-08-25 15:29:12 +01002083 }
2084
2085 return (hash_key);
2086}
2087
2088int
Neale Rannsf12a83f2017-04-18 09:09:40 -07002089fib_path_is_recursive_constrained (fib_node_index_t path_index)
Neale Ranns0bfe5d82016-08-25 15:29:12 +01002090{
2091 fib_path_t *path;
2092
2093 path = fib_path_get(path_index);
2094
Neale Rannsf12a83f2017-04-18 09:09:40 -07002095 return ((FIB_PATH_TYPE_RECURSIVE == path->fp_type) &&
2096 ((path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RESOLVE_ATTACHED) ||
2097 (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RESOLVE_HOST)));
Neale Ranns0bfe5d82016-08-25 15:29:12 +01002098}
2099
2100int
2101fib_path_is_exclusive (fib_node_index_t path_index)
2102{
2103 fib_path_t *path;
2104
2105 path = fib_path_get(path_index);
2106
2107 return (FIB_PATH_TYPE_EXCLUSIVE == path->fp_type);
2108}
2109
2110int
2111fib_path_is_deag (fib_node_index_t path_index)
2112{
2113 fib_path_t *path;
2114
2115 path = fib_path_get(path_index);
2116
2117 return (FIB_PATH_TYPE_DEAG == path->fp_type);
2118}
2119
2120int
2121fib_path_is_resolved (fib_node_index_t path_index)
2122{
2123 fib_path_t *path;
2124
2125 path = fib_path_get(path_index);
2126
2127 return (dpo_id_is_valid(&path->fp_dpo) &&
2128 (path->fp_oper_flags & FIB_PATH_OPER_FLAG_RESOLVED) &&
2129 !fib_path_is_looped(path_index) &&
2130 !fib_path_is_permanent_drop(path));
2131}
2132
2133int
2134fib_path_is_looped (fib_node_index_t path_index)
2135{
2136 fib_path_t *path;
2137
2138 path = fib_path_get(path_index);
2139
2140 return (path->fp_oper_flags & FIB_PATH_OPER_FLAG_RECURSIVE_LOOP);
2141}
2142
Neale Ranns81424992017-05-18 03:03:22 -07002143fib_path_list_walk_rc_t
Steven01b07122016-11-02 10:40:09 -07002144fib_path_encode (fib_node_index_t path_list_index,
2145 fib_node_index_t path_index,
2146 void *ctx)
2147{
2148 fib_route_path_encode_t **api_rpaths = ctx;
2149 fib_route_path_encode_t *api_rpath;
2150 fib_path_t *path;
2151
2152 path = fib_path_get(path_index);
2153 if (!path)
Neale Ranns81424992017-05-18 03:03:22 -07002154 return (FIB_PATH_LIST_WALK_CONTINUE);
Steven01b07122016-11-02 10:40:09 -07002155 vec_add2(*api_rpaths, api_rpath, 1);
2156 api_rpath->rpath.frp_weight = path->fp_weight;
Neale Ranns57b58602017-07-15 07:37:25 -07002157 api_rpath->rpath.frp_preference = path->fp_preference;
Steven01b07122016-11-02 10:40:09 -07002158 api_rpath->rpath.frp_proto = path->fp_nh_proto;
2159 api_rpath->rpath.frp_sw_if_index = ~0;
2160 api_rpath->dpo = path->exclusive.fp_ex_dpo;
2161 switch (path->fp_type)
2162 {
2163 case FIB_PATH_TYPE_RECEIVE:
2164 api_rpath->rpath.frp_addr = path->receive.fp_addr;
2165 api_rpath->rpath.frp_sw_if_index = path->receive.fp_interface;
Florin Coras91d341c2017-07-29 11:50:31 -07002166 api_rpath->dpo = path->fp_dpo;
Steven01b07122016-11-02 10:40:09 -07002167 break;
2168 case FIB_PATH_TYPE_ATTACHED:
2169 api_rpath->rpath.frp_sw_if_index = path->attached.fp_interface;
Florin Coras91d341c2017-07-29 11:50:31 -07002170 api_rpath->dpo = path->fp_dpo;
Steven01b07122016-11-02 10:40:09 -07002171 break;
2172 case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
2173 api_rpath->rpath.frp_sw_if_index = path->attached_next_hop.fp_interface;
2174 api_rpath->rpath.frp_addr = path->attached_next_hop.fp_nh;
2175 break;
2176 case FIB_PATH_TYPE_SPECIAL:
2177 break;
2178 case FIB_PATH_TYPE_DEAG:
2179 break;
2180 case FIB_PATH_TYPE_RECURSIVE:
Neale Rannsad422ed2016-11-02 14:20:04 +00002181 api_rpath->rpath.frp_addr = path->recursive.fp_nh.fp_ip;
Steven01b07122016-11-02 10:40:09 -07002182 break;
2183 default:
2184 break;
2185 }
Neale Ranns81424992017-05-18 03:03:22 -07002186 return (FIB_PATH_LIST_WALK_CONTINUE);
Steven01b07122016-11-02 10:40:09 -07002187}
2188
Neale Rannsad422ed2016-11-02 14:20:04 +00002189fib_protocol_t
2190fib_path_get_proto (fib_node_index_t path_index)
2191{
2192 fib_path_t *path;
2193
2194 path = fib_path_get(path_index);
2195
2196 return (path->fp_nh_proto);
2197}
2198
Neale Ranns0bfe5d82016-08-25 15:29:12 +01002199void
2200fib_path_module_init (void)
2201{
2202 fib_node_register_type (FIB_NODE_TYPE_PATH, &fib_path_vft);
2203}
2204
2205static clib_error_t *
2206show_fib_path_command (vlib_main_t * vm,
2207 unformat_input_t * input,
2208 vlib_cli_command_t * cmd)
2209{
Neale Ranns33a7dd52016-10-07 15:14:33 +01002210 fib_node_index_t pi;
Neale Ranns0bfe5d82016-08-25 15:29:12 +01002211 fib_path_t *path;
2212
Neale Ranns33a7dd52016-10-07 15:14:33 +01002213 if (unformat (input, "%d", &pi))
2214 {
2215 /*
2216 * show one in detail
2217 */
2218 if (!pool_is_free_index(fib_path_pool, pi))
2219 {
2220 path = fib_path_get(pi);
2221 u8 *s = fib_path_format(pi, NULL);
2222 s = format(s, "children:");
2223 s = fib_node_children_format(path->fp_node.fn_children, s);
2224 vlib_cli_output (vm, "%s", s);
2225 vec_free(s);
2226 }
2227 else
2228 {
2229 vlib_cli_output (vm, "path %d invalid", pi);
2230 }
2231 }
2232 else
2233 {
2234 vlib_cli_output (vm, "FIB Paths");
2235 pool_foreach(path, fib_path_pool,
2236 ({
2237 vlib_cli_output (vm, "%U", format_fib_path, path);
2238 }));
2239 }
Neale Ranns0bfe5d82016-08-25 15:29:12 +01002240
2241 return (NULL);
2242}
2243
2244VLIB_CLI_COMMAND (show_fib_path, static) = {
2245 .path = "show fib paths",
2246 .function = show_fib_path_command,
2247 .short_help = "show fib paths",
2248};