blob: 1ca8a2e1df9accf15babd464f3c65ebae41b3537 [file] [log] [blame]
Neale Rannsd792d9c2017-10-21 10:53:20 -07001/*
2 * Copyright (c) 2016 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16#include <vnet/fib/fib_entry.h>
17#include <vnet/fib/fib_table.h>
18#include <vnet/fib/fib_walk.h>
Neale Ranns91286372017-12-05 13:24:04 -080019#include <vnet/fib/fib_path_list.h>
Neale Rannsd792d9c2017-10-21 10:53:20 -070020
21#include <vnet/bier/bier_table.h>
22#include <vnet/bier/bier_fmask.h>
23#include <vnet/bier/bier_bit_string.h>
24#include <vnet/bier/bier_disp_table.h>
25
26#include <vnet/mpls/mpls.h>
27#include <vnet/dpo/drop_dpo.h>
28#include <vnet/dpo/load_balance.h>
29
30/*
31 * attributes names for formatting
32 */
33static const char *const bier_fmask_attr_names[] = BIER_FMASK_ATTR_NAMES;
34
35/*
36 * pool of BIER fmask objects
37 */
38bier_fmask_t *bier_fmask_pool;
39
Neale Ranns586479a2018-06-07 02:08:07 -070040/**
41 * Stats for each BIER fmask object
42 */
43vlib_combined_counter_main_t bier_fmask_counters;
44
Neale Rannsd792d9c2017-10-21 10:53:20 -070045static inline index_t
46bier_fmask_get_index (const bier_fmask_t *bfm)
47{
48 return (bfm - bier_fmask_pool);
49}
50
51static void
52bier_fmask_bits_init (bier_fmask_bits_t *bits,
53 bier_hdr_len_id_t hlid)
54{
55 bits->bfmb_refs = clib_mem_alloc(sizeof(bits->bfmb_refs[0]) *
56 bier_hdr_len_id_to_num_bits(hlid));
Dave Barachb7b92992018-10-17 10:38:51 -040057 clib_memset(bits->bfmb_refs,
Neale Rannsd792d9c2017-10-21 10:53:20 -070058 0,
59 (sizeof(bits->bfmb_refs[0]) *
60 bier_hdr_len_id_to_num_bits(hlid)));
61
62 bits->bfmb_input_reset_string.bbs_len =
63 bier_hdr_len_id_to_num_buckets(hlid);
64
65 /*
66 * The buckets are accessed in the switch path
67 */
68 bits->bfmb_input_reset_string.bbs_buckets =
69 clib_mem_alloc_aligned(
70 sizeof(bits->bfmb_input_reset_string.bbs_buckets[0]) *
71 bier_hdr_len_id_to_num_buckets(hlid),
72 CLIB_CACHE_LINE_BYTES);
Dave Barachb7b92992018-10-17 10:38:51 -040073 clib_memset(bits->bfmb_input_reset_string.bbs_buckets,
Neale Rannsd792d9c2017-10-21 10:53:20 -070074 0,
75 sizeof(bits->bfmb_input_reset_string.bbs_buckets[0]) *
76 bier_hdr_len_id_to_num_buckets(hlid));
77}
78
79static void
80bier_fmask_stack (bier_fmask_t *bfm)
81{
82 dpo_id_t via_dpo = DPO_INVALID;
Neale Ranns91286372017-12-05 13:24:04 -080083 fib_forward_chain_type_t fct;
Neale Rannsd792d9c2017-10-21 10:53:20 -070084
Neale Ranns91286372017-12-05 13:24:04 -080085 if (bfm->bfm_flags & BIER_FMASK_FLAG_MPLS)
Neale Rannsd792d9c2017-10-21 10:53:20 -070086 {
Neale Ranns91286372017-12-05 13:24:04 -080087 fct = FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS;
Neale Rannsd792d9c2017-10-21 10:53:20 -070088 }
89 else
90 {
Neale Ranns91286372017-12-05 13:24:04 -080091 fct = FIB_FORW_CHAIN_TYPE_BIER;
Neale Rannsd792d9c2017-10-21 10:53:20 -070092 }
93
Neale Ranns91286372017-12-05 13:24:04 -080094 fib_path_list_contribute_forwarding(bfm->bfm_pl, fct,
95 FIB_PATH_LIST_FWD_FLAG_COLLAPSE,
96 &via_dpo);
97
Neale Rannsd792d9c2017-10-21 10:53:20 -070098 /*
Neale Ranns91286372017-12-05 13:24:04 -080099 * If the via PL entry provides no forwarding (i.e. a drop)
100 * then neither does this fmask. That way children consider this fmask
Neale Rannsd792d9c2017-10-21 10:53:20 -0700101 * unresolved and other ECMP options are used instead.
102 */
Neale Ranns91286372017-12-05 13:24:04 -0800103 if (dpo_is_drop(&via_dpo))
Neale Rannsd792d9c2017-10-21 10:53:20 -0700104 {
105 bfm->bfm_flags &= ~BIER_FMASK_FLAG_FORWARDING;
106 }
107 else
108 {
109 bfm->bfm_flags |= BIER_FMASK_FLAG_FORWARDING;
110 }
111
112 dpo_stack(DPO_BIER_FMASK,
113 DPO_PROTO_BIER,
114 &bfm->bfm_dpo,
115 &via_dpo);
116 dpo_reset(&via_dpo);
117}
118
119void
120bier_fmask_contribute_forwarding (index_t bfmi,
121 dpo_id_t *dpo)
122{
123 bier_fmask_t *bfm;
124
125 bfm = bier_fmask_get(bfmi);
126
127 if (bfm->bfm_flags & BIER_FMASK_FLAG_FORWARDING)
128 {
129 dpo_set(dpo,
130 DPO_BIER_FMASK,
131 DPO_PROTO_BIER,
132 bfmi);
133 }
134 else
135 {
136 dpo_copy(dpo, drop_dpo_get(DPO_PROTO_BIER));
137 }
138}
139
Neale Rannsd792d9c2017-10-21 10:53:20 -0700140u32
141bier_fmask_child_add (fib_node_index_t bfmi,
142 fib_node_type_t child_type,
143 fib_node_index_t child_index)
144{
145 return (fib_node_child_add(FIB_NODE_TYPE_BIER_FMASK,
146 bfmi,
147 child_type,
148 child_index));
149};
150
151void
152bier_fmask_child_remove (fib_node_index_t bfmi,
153 u32 sibling_index)
154{
Neale Ranns91286372017-12-05 13:24:04 -0800155 if (INDEX_INVALID == bfmi)
156 {
157 return;
158 }
159
Neale Rannsd792d9c2017-10-21 10:53:20 -0700160 fib_node_child_remove(FIB_NODE_TYPE_BIER_FMASK,
161 bfmi,
162 sibling_index);
163}
164
165static void
166bier_fmask_init (bier_fmask_t *bfm,
167 const bier_fmask_id_t *fmid,
Neale Rannsef90ed02018-09-13 08:45:12 -0700168 const fib_route_path_t *rpath)
Neale Rannsd792d9c2017-10-21 10:53:20 -0700169{
170 const bier_table_id_t *btid;
Neale Rannsef90ed02018-09-13 08:45:12 -0700171 fib_route_path_t *rpaths;
Neale Rannsd792d9c2017-10-21 10:53:20 -0700172 mpls_label_t olabel;
173
Dave Barachb7b92992018-10-17 10:38:51 -0400174 clib_memset(bfm, 0, sizeof(*bfm));
Neale Rannsef90ed02018-09-13 08:45:12 -0700175
Neale Ranns586479a2018-06-07 02:08:07 -0700176 bfm->bfm_id = clib_mem_alloc(sizeof(*bfm->bfm_id));
177
178 fib_node_init(&bfm->bfm_node, FIB_NODE_TYPE_BIER_FMASK);
Neale Ranns91286372017-12-05 13:24:04 -0800179 *bfm->bfm_id = *fmid;
Neale Rannsd792d9c2017-10-21 10:53:20 -0700180 dpo_reset(&bfm->bfm_dpo);
Neale Ranns91286372017-12-05 13:24:04 -0800181 btid = bier_table_get_id(bfm->bfm_id->bfmi_bti);
182 bier_fmask_bits_init(&bfm->bfm_bits, btid->bti_hdr_len);
Neale Rannsd792d9c2017-10-21 10:53:20 -0700183
Neale Ranns9c0a3c42018-09-07 08:57:41 -0700184 if (rpath->frp_flags & FIB_ROUTE_PATH_UDP_ENCAP)
185 {
186 bfm->bfm_id->bfmi_nh_type = BIER_NH_UDP;
187 }
188 else if (ip46_address_is_zero(&(bfm->bfm_id->bfmi_nh)))
Neale Rannsd792d9c2017-10-21 10:53:20 -0700189 {
190 bfm->bfm_flags |= BIER_FMASK_FLAG_DISP;
191 }
192
193 if (!(bfm->bfm_flags & BIER_FMASK_FLAG_DISP))
194 {
Neale Rannsef90ed02018-09-13 08:45:12 -0700195 if (NULL != rpath->frp_label_stack)
Neale Ranns91286372017-12-05 13:24:04 -0800196 {
Neale Rannsef90ed02018-09-13 08:45:12 -0700197 olabel = rpath->frp_label_stack[0].fml_value;
Neale Ranns91286372017-12-05 13:24:04 -0800198 vnet_mpls_uc_set_label(&bfm->bfm_label, olabel);
199 vnet_mpls_uc_set_exp(&bfm->bfm_label, 0);
200 vnet_mpls_uc_set_s(&bfm->bfm_label, 1);
Neale Ranns31ed7442018-02-23 05:29:09 -0800201 vnet_mpls_uc_set_ttl(&bfm->bfm_label, 64);
Neale Ranns91286372017-12-05 13:24:04 -0800202 bfm->bfm_flags |= BIER_FMASK_FLAG_MPLS;
203 }
204 else
205 {
206 bier_bift_id_t id;
207
208 /*
209 * not an MPLS label
210 */
211 bfm->bfm_flags &= ~BIER_FMASK_FLAG_MPLS;
212
213 /*
214 * use a label as encoded for BIFT value
215 */
216 id = bier_bift_id_encode(btid->bti_set,
217 btid->bti_sub_domain,
218 btid->bti_hdr_len);
219 vnet_mpls_uc_set_label(&bfm->bfm_label, id);
220 vnet_mpls_uc_set_s(&bfm->bfm_label, 1);
221 vnet_mpls_uc_set_exp(&bfm->bfm_label, 0);
Neale Ranns31ed7442018-02-23 05:29:09 -0800222 vnet_mpls_uc_set_ttl(&bfm->bfm_label, 64);
Neale Ranns91286372017-12-05 13:24:04 -0800223 }
Neale Ranns31ed7442018-02-23 05:29:09 -0800224 bfm->bfm_label = clib_host_to_net_u32(bfm->bfm_label);
Neale Rannsd792d9c2017-10-21 10:53:20 -0700225 }
226
Neale Rannsef90ed02018-09-13 08:45:12 -0700227 rpaths = NULL;
228 vec_add1(rpaths, *rpath);
Neale Ranns91286372017-12-05 13:24:04 -0800229 bfm->bfm_pl = fib_path_list_create((FIB_PATH_LIST_FLAG_SHARED |
230 FIB_PATH_LIST_FLAG_NO_URPF),
231 rpaths);
232 bfm->bfm_sibling = fib_path_list_child_add(bfm->bfm_pl,
233 FIB_NODE_TYPE_BIER_FMASK,
234 bier_fmask_get_index(bfm));
Neale Rannsef90ed02018-09-13 08:45:12 -0700235 vec_free(rpaths);
Neale Ranns91286372017-12-05 13:24:04 -0800236 bier_fmask_stack(bfm);
Neale Rannsd792d9c2017-10-21 10:53:20 -0700237}
238
239static void
240bier_fmask_destroy (bier_fmask_t *bfm)
241{
242 clib_mem_free(bfm->bfm_bits.bfmb_refs);
243 clib_mem_free(bfm->bfm_bits.bfmb_input_reset_string.bbs_buckets);
244
Neale Ranns91286372017-12-05 13:24:04 -0800245 bier_fmask_db_remove(bfm->bfm_id);
246 fib_path_list_child_remove(bfm->bfm_pl,
247 bfm->bfm_sibling);
248 dpo_reset(&bfm->bfm_dpo);
249 clib_mem_free(bfm->bfm_id);
Neale Rannsd792d9c2017-10-21 10:53:20 -0700250 pool_put(bier_fmask_pool, bfm);
251}
252
253void
254bier_fmask_unlock (index_t bfmi)
255{
256 bier_fmask_t *bfm;
257
258 if (INDEX_INVALID == bfmi)
259 {
260 return;
261 }
262
263 bfm = bier_fmask_get(bfmi);
264
265 fib_node_unlock(&bfm->bfm_node);
266}
267
268void
269bier_fmask_lock (index_t bfmi)
270{
271 bier_fmask_t *bfm;
272
273 if (INDEX_INVALID == bfmi)
274 {
275 return;
276 }
277
278 bfm = bier_fmask_get(bfmi);
279
280 fib_node_lock(&bfm->bfm_node);
281}
282
283index_t
284bier_fmask_create_and_lock (const bier_fmask_id_t *fmid,
Neale Rannsef90ed02018-09-13 08:45:12 -0700285 const fib_route_path_t *rpath)
Neale Rannsd792d9c2017-10-21 10:53:20 -0700286{
287 bier_fmask_t *bfm;
Neale Ranns586479a2018-06-07 02:08:07 -0700288 index_t bfmi;
Neale Rannsd792d9c2017-10-21 10:53:20 -0700289
290 pool_get_aligned(bier_fmask_pool, bfm, CLIB_CACHE_LINE_BYTES);
Neale Ranns586479a2018-06-07 02:08:07 -0700291 bfmi = bier_fmask_get_index(bfm);
Neale Rannsd792d9c2017-10-21 10:53:20 -0700292
Neale Ranns586479a2018-06-07 02:08:07 -0700293 vlib_validate_combined_counter (&(bier_fmask_counters), bfmi);
294 vlib_zero_combined_counter (&(bier_fmask_counters), bfmi);
Neale Rannsd792d9c2017-10-21 10:53:20 -0700295
Neale Rannsef90ed02018-09-13 08:45:12 -0700296 bier_fmask_init(bfm, fmid, rpath);
Neale Rannsd792d9c2017-10-21 10:53:20 -0700297
Neale Ranns586479a2018-06-07 02:08:07 -0700298 bier_fmask_lock(bfmi);
Neale Rannsd792d9c2017-10-21 10:53:20 -0700299
Neale Ranns586479a2018-06-07 02:08:07 -0700300 return (bfmi);
Neale Rannsd792d9c2017-10-21 10:53:20 -0700301}
302
303void
304bier_fmask_link (index_t bfmi,
305 bier_bp_t bp)
306{
307 bier_fmask_t *bfm;
308
309 bfm = bier_fmask_get(bfmi);
310
311 if (0 == bfm->bfm_bits.bfmb_refs[BIER_BP_TO_INDEX(bp)])
312 {
313 /*
314 * 0 -> 1 transistion - set the bit in the string
315 */
316 bier_bit_string_set_bit(&bfm->bfm_bits.bfmb_input_reset_string, bp);
317 }
318
319 ++bfm->bfm_bits.bfmb_refs[BIER_BP_TO_INDEX(bp)];
320 ++bfm->bfm_bits.bfmb_count;
321}
322
323void
324bier_fmask_unlink (index_t bfmi,
325 bier_bp_t bp)
326{
327 bier_fmask_t *bfm;
328
329 bfm = bier_fmask_get(bfmi);
330
331 --bfm->bfm_bits.bfmb_refs[BIER_BP_TO_INDEX(bp)];
332 --bfm->bfm_bits.bfmb_count;
333
334 if (0 == bfm->bfm_bits.bfmb_refs[BIER_BP_TO_INDEX(bp)])
335 {
336 /*
337 * 1 -> 0 transistion - clear the bit in the string
338 */
339 bier_bit_string_clear_bit(&bfm->bfm_bits.bfmb_input_reset_string, bp);
340 }
341}
342
343u8*
344format_bier_fmask (u8 *s, va_list *ap)
345{
346 index_t bfmi = va_arg(*ap, index_t);
347 u32 indent = va_arg(*ap, u32);
348 bier_fmask_attributes_t attr;
349 bier_fmask_t *bfm;
Neale Ranns586479a2018-06-07 02:08:07 -0700350 vlib_counter_t to;
Neale Rannsd792d9c2017-10-21 10:53:20 -0700351
352 if (pool_is_free_index(bier_fmask_pool, bfmi))
353 {
354 return (format(s, "No BIER f-mask %d", bfmi));
355 }
356
357 bfm = bier_fmask_get(bfmi);
358
359 s = format(s, "fmask: nh:%U bs:%U locks:%d ",
Neale Ranns91286372017-12-05 13:24:04 -0800360 format_ip46_address, &bfm->bfm_id->bfmi_nh, IP46_TYPE_ANY,
Neale Rannsd792d9c2017-10-21 10:53:20 -0700361 format_bier_bit_string, &bfm->bfm_bits.bfmb_input_reset_string,
362 bfm->bfm_node.fn_locks);
363 s = format(s, "flags:");
364 FOR_EACH_BIER_FMASK_ATTR(attr) {
365 if ((1<<attr) & bfm->bfm_flags) {
366 s = format (s, "%s,", bier_fmask_attr_names[attr]);
367 }
368 }
Neale Ranns586479a2018-06-07 02:08:07 -0700369 vlib_get_combined_counter (&(bier_fmask_counters), bfmi, &to);
370 s = format (s, " to:[%Ld:%Ld]]", to.packets, to.bytes);
Neale Ranns91286372017-12-05 13:24:04 -0800371 s = format(s, "\n");
372 s = fib_path_list_format(bfm->bfm_pl, s);
373
374 if (bfm->bfm_flags & BIER_FMASK_FLAG_MPLS)
375 {
376 s = format(s, " output-label:%U",
377 format_mpls_unicast_label,
Neale Ranns2303cb12018-02-21 04:57:17 -0800378 vnet_mpls_uc_get_label(clib_net_to_host_u32(bfm->bfm_label)));
Neale Ranns91286372017-12-05 13:24:04 -0800379 }
380 else
381 {
382 s = format(s, " output-bfit:[%U]",
383 format_bier_bift_id,
Neale Ranns2303cb12018-02-21 04:57:17 -0800384 vnet_mpls_uc_get_label(clib_net_to_host_u32(bfm->bfm_label)));
Neale Ranns91286372017-12-05 13:24:04 -0800385 }
386 s = format(s, "\n %U%U",
Neale Rannsd792d9c2017-10-21 10:53:20 -0700387 format_white_space, indent,
388 format_dpo_id, &bfm->bfm_dpo, indent+2);
389
390 return (s);
391}
392
Neale Ranns586479a2018-06-07 02:08:07 -0700393void
394bier_fmask_get_stats (index_t bfmi, u64 * packets, u64 * bytes)
395{
396 vlib_counter_t to;
397
398 vlib_get_combined_counter (&(bier_fmask_counters), bfmi, &to);
399
400 *packets = to.packets;
401 *bytes = to.bytes;
402}
403
404void
405bier_fmask_encode (index_t bfmi,
406 bier_table_id_t *btid,
407 fib_route_path_encode_t *rpath)
408{
409 bier_fmask_t *bfm;
410
411 bfm = bier_fmask_get(bfmi);
412 *btid = *bier_table_get_id(bfm->bfm_id->bfmi_bti);
413
Dave Barachb7b92992018-10-17 10:38:51 -0400414 clib_memset(rpath, 0, sizeof(*rpath));
Neale Ranns586479a2018-06-07 02:08:07 -0700415
416 rpath->rpath.frp_sw_if_index = ~0;
417
418 switch (bfm->bfm_id->bfmi_nh_type)
419 {
420 case BIER_NH_UDP:
421 rpath->rpath.frp_flags = FIB_ROUTE_PATH_UDP_ENCAP;
422 rpath->rpath.frp_udp_encap_id = bfm->bfm_id->bfmi_id;
423 break;
424 case BIER_NH_IP:
425 memcpy(&rpath->rpath.frp_addr, &bfm->bfm_id->bfmi_nh,
426 sizeof(rpath->rpath.frp_addr));
427 break;
428 }
429}
Neale Rannsd792d9c2017-10-21 10:53:20 -0700430
431static fib_node_t *
432bier_fmask_get_node (fib_node_index_t index)
433{
434 bier_fmask_t *bfm = bier_fmask_get(index);
435 return (&(bfm->bfm_node));
436}
437
438static bier_fmask_t*
439bier_fmask_get_from_node (fib_node_t *node)
440{
441 return ((bier_fmask_t*)(((char*)node) -
442 STRUCT_OFFSET_OF(bier_fmask_t,
443 bfm_node)));
444}
445
446/*
447 * bier_fmask_last_lock_gone
448 */
449static void
450bier_fmask_last_lock_gone (fib_node_t *node)
451{
452 bier_fmask_destroy(bier_fmask_get_from_node(node));
453}
454
455/*
456 * bier_fmask_back_walk_notify
457 *
458 * A back walk has reached this BIER fmask
459 */
460static fib_node_back_walk_rc_t
461bier_fmask_back_walk_notify (fib_node_t *node,
462 fib_node_back_walk_ctx_t *ctx)
463{
464 /*
465 * re-stack the fmask on the n-eos of the via
466 */
467 bier_fmask_t *bfm = bier_fmask_get_from_node(node);
468
469 bier_fmask_stack(bfm);
470
471 /*
472 * propagate further up the graph.
473 * we can do this synchronously since the fan out is small.
474 */
475 fib_walk_sync(FIB_NODE_TYPE_BIER_FMASK, bier_fmask_get_index(bfm), ctx);
476
477 return (FIB_NODE_BACK_WALK_CONTINUE);
478}
479
480/*
481 * The BIER fmask's graph node virtual function table
482 */
483static const fib_node_vft_t bier_fmask_vft = {
484 .fnv_get = bier_fmask_get_node,
485 .fnv_last_lock = bier_fmask_last_lock_gone,
486 .fnv_back_walk = bier_fmask_back_walk_notify,
487};
488
489static void
490bier_fmask_dpo_lock (dpo_id_t *dpo)
491{
492}
493
494static void
495bier_fmask_dpo_unlock (dpo_id_t *dpo)
496{
497}
498
499static void
500bier_fmask_dpo_mem_show (void)
501{
502 fib_show_memory_usage("BIER-fmask",
503 pool_elts(bier_fmask_pool),
504 pool_len(bier_fmask_pool),
505 sizeof(bier_fmask_t));
506}
507
508const static dpo_vft_t bier_fmask_dpo_vft = {
509 .dv_lock = bier_fmask_dpo_lock,
510 .dv_unlock = bier_fmask_dpo_unlock,
511 .dv_mem_show = bier_fmask_dpo_mem_show,
512 .dv_format = format_bier_fmask,
513};
514
515const static char *const bier_fmask_mpls_nodes[] =
516{
Gabriel Ganne0f8a96c2017-11-14 14:43:34 +0100517 "bier-output",
518 NULL
Neale Rannsd792d9c2017-10-21 10:53:20 -0700519};
520const static char * const * const bier_fmask_nodes[DPO_PROTO_NUM] =
521{
522 [DPO_PROTO_BIER] = bier_fmask_mpls_nodes,
523 [DPO_PROTO_MPLS] = bier_fmask_mpls_nodes,
524};
525
526clib_error_t *
527bier_fmask_module_init (vlib_main_t * vm)
528{
529 fib_node_register_type (FIB_NODE_TYPE_BIER_FMASK, &bier_fmask_vft);
530 dpo_register(DPO_BIER_FMASK, &bier_fmask_dpo_vft, bier_fmask_nodes);
531
532 return (NULL);
533}
534
535VLIB_INIT_FUNCTION (bier_fmask_module_init);
536
537static clib_error_t *
538bier_fmask_show (vlib_main_t * vm,
539 unformat_input_t * input,
540 vlib_cli_command_t * cmd)
541{
542 bier_fmask_t *bfm;
543 index_t bfmi;
544
545 bfmi = INDEX_INVALID;
546
547 while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT) {
548 if (unformat (input, "%d", &bfmi))
549 {
550 ;
551 } else
552 {
553 break;
554 }
555 }
556
557 if (INDEX_INVALID == bfmi)
558 {
559 pool_foreach(bfm, bier_fmask_pool,
560 ({
Neale Ranns91286372017-12-05 13:24:04 -0800561 vlib_cli_output (vm, "[@%d] %U",
562 bier_fmask_get_index(bfm),
Neale Rannsd792d9c2017-10-21 10:53:20 -0700563 format_bier_fmask, bier_fmask_get_index(bfm), 0);
564 }));
565 }
566 else
567 {
568 vlib_cli_output (vm, "%U", format_bier_fmask, bfmi, 0);
569 }
570
571 return (NULL);
572}
573
574VLIB_CLI_COMMAND (show_bier_fmask, static) = {
575 .path = "show bier fmask",
576 .short_help = "show bier fmask",
577 .function = bier_fmask_show,
578};