blob: 5f7bf8c3b257a1323088063ba979ee6ca963b06f [file] [log] [blame]
Neale Rannsad422ed2016-11-02 14:20:04 +00001/*
2 * mpls_tunnel.c: MPLS tunnel interfaces (i.e. for RSVP-TE)
3 *
4 * Copyright (c) 2012 Cisco and/or its affiliates.
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at:
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18#include <vnet/vnet.h>
Neale Rannsad422ed2016-11-02 14:20:04 +000019#include <vnet/mpls/mpls_tunnel.h>
Neale Ranns0f26c5a2017-03-01 15:12:11 -080020#include <vnet/mpls/mpls_types.h>
Neale Rannsad422ed2016-11-02 14:20:04 +000021#include <vnet/ip/ip.h>
22#include <vnet/fib/fib_path_list.h>
23#include <vnet/adj/adj_midchain.h>
Neale Ranns0f26c5a2017-03-01 15:12:11 -080024#include <vnet/adj/adj_mcast.h>
25#include <vnet/dpo/replicate_dpo.h>
Neale Ranns227038a2017-04-21 01:07:59 -070026#include <vnet/fib/mpls_fib.h>
Neale Rannsad422ed2016-11-02 14:20:04 +000027
28/**
29 * @brief pool of tunnel instances
30 */
31static mpls_tunnel_t *mpls_tunnel_pool;
32
33/**
Neale Rannsad422ed2016-11-02 14:20:04 +000034 * @brief DB of SW index to tunnel index
35 */
36static u32 *mpls_tunnel_db;
37
38/**
Neale Ranns0f26c5a2017-03-01 15:12:11 -080039 * @brief MPLS tunnel flags strings
40 */
41static const char *mpls_tunnel_attribute_names[] = MPLS_TUNNEL_ATTRIBUTES;
42
43/**
Neale Ranns444e8002020-10-08 10:06:32 +000044 * @brief Packet trace structure
45 */
46typedef struct mpls_tunnel_trace_t_
47{
48 /**
49 * Tunnel-id / index in tunnel vector
50 */
51 u32 tunnel_id;
52} mpls_tunnel_trace_t;
53
54static u8 *
55format_mpls_tunnel_tx_trace (u8 * s,
56 va_list * args)
57{
58 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
59 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
60 mpls_tunnel_trace_t * t = va_arg (*args, mpls_tunnel_trace_t *);
61
62 s = format (s, "MPLS: tunnel %d", t->tunnel_id);
63 return s;
64}
65
66typedef enum
67{
68 MPLS_TUNNEL_ENCAP_NEXT_L2_MIDCHAIN,
69 MPLS_TUNNEL_ENCAP_N_NEXT,
70} mpls_tunnel_encap_next_t;
71
72/**
73 * @brief TX function. Only called L2. L3 traffic uses the adj-midchains
74 */
75VLIB_NODE_FN (mpls_tunnel_tx) (vlib_main_t * vm,
76 vlib_node_runtime_t * node,
77 vlib_frame_t * frame)
78{
79 u32 *from = vlib_frame_vector_args (frame);
80 vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b;
81 u16 nexts[VLIB_FRAME_SIZE], *next;
82 u32 n_left;
83
84 n_left = frame->n_vectors;
85 b = bufs;
86 next = nexts;
87
88 vlib_get_buffers (vm, from, bufs, n_left);
89
90 while (n_left > 2)
91 {
92 const mpls_tunnel_t *mt0, *mt1;
93 u32 sw_if_index0, sw_if_index1;
94
95 sw_if_index0 = vnet_buffer(b[0])->sw_if_index[VLIB_TX];
96 sw_if_index1 = vnet_buffer(b[1])->sw_if_index[VLIB_TX];
97
98 mt0 = pool_elt_at_index(mpls_tunnel_pool,
99 mpls_tunnel_db[sw_if_index0]);
100 mt1 = pool_elt_at_index(mpls_tunnel_pool,
101 mpls_tunnel_db[sw_if_index1]);
102
103 vnet_buffer(b[0])->ip.adj_index[VLIB_TX] = mt0->mt_l2_lb.dpoi_index;
104 vnet_buffer(b[1])->ip.adj_index[VLIB_TX] = mt1->mt_l2_lb.dpoi_index;
105 next[0] = mt0->mt_l2_lb.dpoi_next_node;
106 next[1] = mt1->mt_l2_lb.dpoi_next_node;
107
108 /* since we are coming out of the L2 world, where the vlib_buffer
109 * union is used for other things, make sure it is clean for
110 * MPLS from now on.
111 */
112 vnet_buffer(b[0])->mpls.first = 0;
113 vnet_buffer(b[1])->mpls.first = 0;
114
115 if (PREDICT_FALSE(b[0]->flags & VLIB_BUFFER_IS_TRACED))
116 {
117 mpls_tunnel_trace_t *tr = vlib_add_trace (vm, node,
118 b[0], sizeof (*tr));
119 tr->tunnel_id = mpls_tunnel_db[sw_if_index0];
120 }
121 if (PREDICT_FALSE(b[1]->flags & VLIB_BUFFER_IS_TRACED))
122 {
123 mpls_tunnel_trace_t *tr = vlib_add_trace (vm, node,
124 b[1], sizeof (*tr));
125 tr->tunnel_id = mpls_tunnel_db[sw_if_index1];
126 }
127
128 b += 2;
129 n_left -= 2;
130 next += 2;
131 }
132 while (n_left)
133 {
134 const mpls_tunnel_t *mt0;
135 u32 sw_if_index0;
136
137 sw_if_index0 = vnet_buffer(b[0])->sw_if_index[VLIB_TX];
138 mt0 = pool_elt_at_index(mpls_tunnel_pool,
139 mpls_tunnel_db[sw_if_index0]);
140
141 vnet_buffer(b[0])->ip.adj_index[VLIB_TX] = mt0->mt_l2_lb.dpoi_index;
142 next[0] = mt0->mt_l2_lb.dpoi_next_node;
143
144 /* since we are coming out of the L2 world, where the vlib_buffer
145 * union is used for other things, make sure it is clean for
146 * MPLS from now on.
147 */
148 vnet_buffer(b[0])->mpls.first = 0;
149
150 if (PREDICT_FALSE(b[0]->flags & VLIB_BUFFER_IS_TRACED))
151 {
152 mpls_tunnel_trace_t *tr = vlib_add_trace (vm, node,
153 b[0], sizeof (*tr));
154 tr->tunnel_id = mpls_tunnel_db[sw_if_index0];
155 }
156
157 b += 1;
158 n_left -= 1;
159 next += 1;
160 }
161
162 vlib_buffer_enqueue_to_next (vm, node, from, nexts, frame->n_vectors);
163
164 return frame->n_vectors;
165}
166
167VLIB_REGISTER_NODE (mpls_tunnel_tx) =
168{
169 .name = "mpls-tunnel-tx",
170 .vector_size = sizeof (u32),
171 .format_trace = format_mpls_tunnel_tx_trace,
172 .type = VLIB_NODE_TYPE_INTERNAL,
173 .n_errors = 0,
174 .n_next_nodes = 0,
175 /* MPLS_TUNNEL_ENCAP_N_NEXT, */
176 /* .next_nodes = { */
177 /* [MPLS_TUNNEL_ENCAP_NEXT_L2_MIDCHAIN] = "mpls-load-balance", */
178 /* }, */
179};
180
181/**
Neale Rannsad422ed2016-11-02 14:20:04 +0000182 * @brief Get a tunnel object from a SW interface index
183 */
184static mpls_tunnel_t*
185mpls_tunnel_get_from_sw_if_index (u32 sw_if_index)
186{
Eyal Baricd307742018-07-22 12:45:15 +0300187 if ((vec_len(mpls_tunnel_db) <= sw_if_index) ||
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800188 (~0 == mpls_tunnel_db[sw_if_index]))
189 return (NULL);
Neale Rannsad422ed2016-11-02 14:20:04 +0000190
191 return (pool_elt_at_index(mpls_tunnel_pool,
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800192 mpls_tunnel_db[sw_if_index]));
Neale Rannsad422ed2016-11-02 14:20:04 +0000193}
194
195/**
196 * @brief Build a rewrite string for the MPLS tunnel.
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800197 */
198static u8*
199mpls_tunnel_build_rewrite_i (void)
200{
201 /*
Jim Thompson2bc81692019-04-08 02:19:03 -0500202 * passing the adj code a NULL rewrite means 'i don't have one cos
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800203 * t'other end is unresolved'. That's not the case here. For the mpls
204 * tunnel there are just no bytes of encap to apply in the adj. We'll impose
205 * the label stack once we choose a path. So return a zero length rewrite.
206 */
207 u8 *rewrite = NULL;
208
209 vec_validate(rewrite, 0);
210 vec_reset_length(rewrite);
211
212 return (rewrite);
213}
214
215/**
216 * @brief Build a rewrite string for the MPLS tunnel.
Neale Rannsad422ed2016-11-02 14:20:04 +0000217 */
218static u8*
219mpls_tunnel_build_rewrite (vnet_main_t * vnm,
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800220 u32 sw_if_index,
221 vnet_link_t link_type,
222 const void *dst_address)
Neale Rannsad422ed2016-11-02 14:20:04 +0000223{
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800224 return (mpls_tunnel_build_rewrite_i());
225}
Neale Rannsad422ed2016-11-02 14:20:04 +0000226
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800227typedef struct mpls_tunnel_collect_forwarding_ctx_t_
228{
229 load_balance_path_t * next_hops;
230 const mpls_tunnel_t *mt;
231 fib_forward_chain_type_t fct;
232} mpls_tunnel_collect_forwarding_ctx_t;
233
Neale Ranns81424992017-05-18 03:03:22 -0700234static fib_path_list_walk_rc_t
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800235mpls_tunnel_collect_forwarding (fib_node_index_t pl_index,
236 fib_node_index_t path_index,
237 void *arg)
238{
239 mpls_tunnel_collect_forwarding_ctx_t *ctx;
240 fib_path_ext_t *path_ext;
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800241
242 ctx = arg;
Neale Rannsad422ed2016-11-02 14:20:04 +0000243
Neale Ranns3b222a32016-12-02 15:41:03 +0000244 /*
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800245 * if the path is not resolved, don't include it.
Neale Ranns3b222a32016-12-02 15:41:03 +0000246 */
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800247 if (!fib_path_is_resolved(path_index))
Neale Rannsad422ed2016-11-02 14:20:04 +0000248 {
Neale Ranns81424992017-05-18 03:03:22 -0700249 return (FIB_PATH_LIST_WALK_CONTINUE);
Neale Rannsad422ed2016-11-02 14:20:04 +0000250 }
251
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800252 /*
253 * get the matching path-extension for the path being visited.
254 */
Neale Ranns81424992017-05-18 03:03:22 -0700255 path_ext = fib_path_ext_list_find_by_path_index(&ctx->mt->mt_path_exts,
256 path_index);
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800257
Neale Ranns31ed7442018-02-23 05:29:09 -0800258 /*
259 * we don't want IP TTL decrements for packets hitting the MPLS labels
260 * we stack on, since the IP TTL decrement is done by the adj
261 */
262 path_ext->fpe_mpls_flags |= FIB_PATH_EXT_MPLS_FLAG_NO_IP_TTL_DECR;
263
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800264 /*
Neale Ranns19bd1902018-03-19 02:32:57 -0700265 * found a matching extension. stack it to obtain the forwarding
266 * info for this path.
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800267 */
Neale Ranns53962fb2021-12-20 18:18:42 +0000268 ctx->next_hops =
269 fib_path_ext_stack (path_ext, DPO_PROTO_MPLS, ctx->fct, ctx->next_hops);
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800270
Neale Ranns81424992017-05-18 03:03:22 -0700271 return (FIB_PATH_LIST_WALK_CONTINUE);
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800272}
273
274static void
275mpls_tunnel_mk_lb (mpls_tunnel_t *mt,
276 vnet_link_t linkt,
277 fib_forward_chain_type_t fct,
278 dpo_id_t *dpo_lb)
279{
280 dpo_proto_t lb_proto;
281
282 /*
283 * If the entry has path extensions then we construct a load-balance
284 * by stacking the extensions on the forwarding chains of the paths.
285 * Otherwise we use the load-balance of the path-list
286 */
287 mpls_tunnel_collect_forwarding_ctx_t ctx = {
288 .mt = mt,
289 .next_hops = NULL,
290 .fct = fct,
291 };
292
293 /*
294 * As an optimisation we allocate the vector of next-hops to be sized
295 * equal to the maximum nuber of paths we will need, which is also the
296 * most likely number we will need, since in most cases the paths are 'up'.
297 */
298 vec_validate(ctx.next_hops, fib_path_list_get_n_paths(mt->mt_path_list));
299 vec_reset_length(ctx.next_hops);
300
Neale Rannsda78f952017-05-24 09:15:43 -0700301 lb_proto = fib_forw_chain_type_to_dpo_proto(fct);
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800302
Neale Ranns6a30b5f2018-09-25 07:22:36 -0700303 if (FIB_NODE_INDEX_INVALID != mt->mt_path_list)
304 {
305 fib_path_list_walk(mt->mt_path_list,
306 mpls_tunnel_collect_forwarding,
307 &ctx);
308 }
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800309
310 if (!dpo_id_is_valid(dpo_lb))
311 {
312 /*
313 * first time create
314 */
315 if (mt->mt_flags & MPLS_TUNNEL_FLAG_MCAST)
316 {
317 dpo_set(dpo_lb,
318 DPO_REPLICATE,
319 lb_proto,
320 replicate_create(0, lb_proto));
321 }
322 else
323 {
324 flow_hash_config_t fhc;
325
Neale Ranns227038a2017-04-21 01:07:59 -0700326 switch (linkt)
327 {
328 case VNET_LINK_MPLS:
329 fhc = MPLS_FLOW_HASH_DEFAULT;
330 break;
331 case VNET_LINK_IP4:
332 case VNET_LINK_IP6:
333 fhc = IP_FLOW_HASH_DEFAULT;
334 break;
335 default:
336 fhc = 0;
337 break;
338 }
339
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800340 dpo_set(dpo_lb,
341 DPO_LOAD_BALANCE,
342 lb_proto,
343 load_balance_create(0, lb_proto, fhc));
344 }
345 }
346
347 if (mt->mt_flags & MPLS_TUNNEL_FLAG_MCAST)
348 {
349 /*
350 * MPLS multicast
351 */
352 replicate_multipath_update(dpo_lb, ctx.next_hops);
Neale Ranns3b222a32016-12-02 15:41:03 +0000353 }
Neale Rannsad422ed2016-11-02 14:20:04 +0000354 else
Neale Ranns3b222a32016-12-02 15:41:03 +0000355 {
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800356 load_balance_multipath_update(dpo_lb,
357 ctx.next_hops,
358 LOAD_BALANCE_FLAG_NONE);
359 vec_free(ctx.next_hops);
Neale Ranns3b222a32016-12-02 15:41:03 +0000360 }
Neale Rannsad422ed2016-11-02 14:20:04 +0000361}
362
363/**
364 * mpls_tunnel_stack
365 *
366 * 'stack' (resolve the recursion for) the tunnel's midchain adjacency
367 */
368static void
369mpls_tunnel_stack (adj_index_t ai)
370{
371 ip_adjacency_t *adj;
372 mpls_tunnel_t *mt;
373 u32 sw_if_index;
374
375 adj = adj_get(ai);
376 sw_if_index = adj->rewrite_header.sw_if_index;
377
378 mt = mpls_tunnel_get_from_sw_if_index(sw_if_index);
379
Neale Ranns097fa662018-05-01 05:17:55 -0700380 if (NULL == mt || FIB_NODE_INDEX_INVALID == mt->mt_path_list)
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800381 return;
Neale Rannsad422ed2016-11-02 14:20:04 +0000382
Neale Ranns901cbb92019-02-19 02:10:13 -0800383 if (FIB_NODE_INDEX_INVALID == mt->mt_path_list)
384 {
385 adj_nbr_midchain_unstack(ai);
386 return;
387 }
388
Neale Rannsad422ed2016-11-02 14:20:04 +0000389 /*
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800390 * while we're stacking the adj, remove the tunnel from the child list
391 * of the path list. this breaks a circular dependency of walk updates
392 * where the create of adjacencies in the children can lead to walks
393 * that get back here.
394 */
395 fib_path_list_lock(mt->mt_path_list);
396
397 fib_path_list_child_remove(mt->mt_path_list,
398 mt->mt_sibling_index);
399
400 /*
401 * Construct the DPO (load-balance or replicate) that we can stack
402 * the tunnel's midchain on
Neale Rannsad422ed2016-11-02 14:20:04 +0000403 */
404 if (vnet_hw_interface_get_flags(vnet_get_main(),
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800405 mt->mt_hw_if_index) &
406 VNET_HW_INTERFACE_FLAG_LINK_UP)
Neale Rannsad422ed2016-11-02 14:20:04 +0000407 {
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800408 dpo_id_t dpo = DPO_INVALID;
Neale Rannsad422ed2016-11-02 14:20:04 +0000409
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800410 mpls_tunnel_mk_lb(mt,
411 adj->ia_link,
Neale Ranns31ed7442018-02-23 05:29:09 -0800412 fib_forw_chain_type_from_link_type(
413 adj_get_link_type(ai)),
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800414 &dpo);
Neale Rannsad422ed2016-11-02 14:20:04 +0000415
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800416 adj_nbr_midchain_stack(ai, &dpo);
417 dpo_reset(&dpo);
Neale Rannsad422ed2016-11-02 14:20:04 +0000418 }
419 else
420 {
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800421 adj_nbr_midchain_unstack(ai);
Neale Rannsad422ed2016-11-02 14:20:04 +0000422 }
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800423
424 mt->mt_sibling_index = fib_path_list_child_add(mt->mt_path_list,
425 FIB_NODE_TYPE_MPLS_TUNNEL,
426 mt - mpls_tunnel_pool);
427
Neale Rannsc13548a2017-05-24 10:53:43 -0700428 fib_path_list_unlock(mt->mt_path_list);
Neale Rannsad422ed2016-11-02 14:20:04 +0000429}
430
431/**
432 * @brief Call back when restacking all adjacencies on a MPLS interface
433 */
434static adj_walk_rc_t
435mpls_adj_walk_cb (adj_index_t ai,
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800436 void *ctx)
Neale Rannsad422ed2016-11-02 14:20:04 +0000437{
438 mpls_tunnel_stack(ai);
439
440 return (ADJ_WALK_RC_CONTINUE);
441}
442
443static void
444mpls_tunnel_restack (mpls_tunnel_t *mt)
445{
446 fib_protocol_t proto;
447
448 /*
449 * walk all the adjacencies on the MPLS interface and restack them
450 */
Neale Rannsda78f952017-05-24 09:15:43 -0700451 if (mt->mt_flags & MPLS_TUNNEL_FLAG_L2)
Neale Rannsad422ed2016-11-02 14:20:04 +0000452 {
Neale Rannsda78f952017-05-24 09:15:43 -0700453 /*
454 * Stack a load-balance that drops, whilst we have no paths
455 */
Neale Rannsda78f952017-05-24 09:15:43 -0700456 dpo_id_t dpo = DPO_INVALID;
457
458 mpls_tunnel_mk_lb(mt,
459 VNET_LINK_MPLS,
460 FIB_FORW_CHAIN_TYPE_ETHERNET,
461 &dpo);
462
Neale Ranns444e8002020-10-08 10:06:32 +0000463 dpo_stack_from_node(mpls_tunnel_tx.index,
Neale Rannsda78f952017-05-24 09:15:43 -0700464 &mt->mt_l2_lb,
465 &dpo);
466 dpo_reset(&dpo);
467 }
468 else
469 {
Benoît Ganne4069f412020-07-17 11:38:58 +0200470 FOR_EACH_FIB_IP_PROTOCOL(proto)
Neale Rannsda78f952017-05-24 09:15:43 -0700471 {
472 adj_nbr_walk(mt->mt_sw_if_index,
473 proto,
474 mpls_adj_walk_cb,
475 NULL);
476 }
Neale Rannsad422ed2016-11-02 14:20:04 +0000477 }
478}
479
480static clib_error_t *
481mpls_tunnel_admin_up_down (vnet_main_t * vnm,
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800482 u32 hw_if_index,
483 u32 flags)
Neale Rannsad422ed2016-11-02 14:20:04 +0000484{
485 vnet_hw_interface_t * hi;
486 mpls_tunnel_t *mt;
487
488 hi = vnet_get_hw_interface (vnm, hw_if_index);
489
490 mt = mpls_tunnel_get_from_sw_if_index(hi->sw_if_index);
491
492 if (NULL == mt)
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800493 return (NULL);
Neale Rannsad422ed2016-11-02 14:20:04 +0000494
495 if (flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP)
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800496 vnet_hw_interface_set_flags (vnm, hw_if_index,
497 VNET_HW_INTERFACE_FLAG_LINK_UP);
Neale Rannsad422ed2016-11-02 14:20:04 +0000498 else
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800499 vnet_hw_interface_set_flags (vnm, hw_if_index, 0 /* down */);
Neale Rannsad422ed2016-11-02 14:20:04 +0000500
501 mpls_tunnel_restack(mt);
502
503 return (NULL);
504}
505
506/**
507 * @brief Fixup the adj rewrite post encap. This is a no-op since the
508 * rewrite is a stack of labels.
509 */
510static void
511mpls_tunnel_fixup (vlib_main_t *vm,
Neale Ranns960eeea2019-12-02 23:28:50 +0000512 const ip_adjacency_t *adj,
Neale Rannsdb14f5a2018-01-29 10:43:33 -0800513 vlib_buffer_t *b0,
514 const void*data)
Neale Rannsad422ed2016-11-02 14:20:04 +0000515{
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800516 /*
517 * A no-op w.r.t. the header. but reset the 'have we pushed any
518 * MPLS labels onto the packet' flag. That way when we enter the
519 * tunnel we'll get a TTL set to 255
520 */
521 vnet_buffer(b0)->mpls.first = 0;
Neale Rannsad422ed2016-11-02 14:20:04 +0000522}
523
524static void
525mpls_tunnel_update_adj (vnet_main_t * vnm,
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800526 u32 sw_if_index,
527 adj_index_t ai)
Neale Rannsad422ed2016-11-02 14:20:04 +0000528{
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800529 ip_adjacency_t *adj;
530
531 ASSERT(ADJ_INDEX_INVALID != ai);
532
533 adj = adj_get(ai);
534
535 switch (adj->lookup_next_index)
536 {
537 case IP_LOOKUP_NEXT_ARP:
538 case IP_LOOKUP_NEXT_GLEAN:
Neale Ranns1855b8e2018-07-11 10:31:26 -0700539 case IP_LOOKUP_NEXT_BCAST:
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800540 adj_nbr_midchain_update_rewrite(ai, mpls_tunnel_fixup,
Neale Rannsdb14f5a2018-01-29 10:43:33 -0800541 NULL,
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800542 ADJ_FLAG_NONE,
543 mpls_tunnel_build_rewrite_i());
544 break;
545 case IP_LOOKUP_NEXT_MCAST:
546 /*
547 * Construct a partial rewrite from the known ethernet mcast dest MAC
548 * There's no MAC fixup, so the last 2 parameters are 0
549 */
550 adj_mcast_midchain_update_rewrite(ai, mpls_tunnel_fixup,
Neale Rannsdb14f5a2018-01-29 10:43:33 -0800551 NULL,
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800552 ADJ_FLAG_NONE,
553 mpls_tunnel_build_rewrite_i(),
554 0, 0);
555 break;
556
557 case IP_LOOKUP_NEXT_DROP:
558 case IP_LOOKUP_NEXT_PUNT:
559 case IP_LOOKUP_NEXT_LOCAL:
560 case IP_LOOKUP_NEXT_REWRITE:
561 case IP_LOOKUP_NEXT_MIDCHAIN:
562 case IP_LOOKUP_NEXT_MCAST_MIDCHAIN:
563 case IP_LOOKUP_NEXT_ICMP_ERROR:
564 case IP_LOOKUP_N_NEXT:
565 ASSERT (0);
566 break;
567 }
Neale Rannsad422ed2016-11-02 14:20:04 +0000568
569 mpls_tunnel_stack(ai);
570}
571
572static u8 *
573format_mpls_tunnel_name (u8 * s, va_list * args)
574{
575 u32 dev_instance = va_arg (*args, u32);
576 return format (s, "mpls-tunnel%d", dev_instance);
577}
578
579static u8 *
580format_mpls_tunnel_device (u8 * s, va_list * args)
581{
582 u32 dev_instance = va_arg (*args, u32);
583 CLIB_UNUSED (int verbose) = va_arg (*args, int);
584
585 return (format (s, "MPLS-tunnel: id %d\n", dev_instance));
586}
587
Neale Rannsad422ed2016-11-02 14:20:04 +0000588VNET_DEVICE_CLASS (mpls_tunnel_class) = {
589 .name = "MPLS tunnel device",
590 .format_device_name = format_mpls_tunnel_name,
591 .format_device = format_mpls_tunnel_device,
592 .format_tx_trace = format_mpls_tunnel_tx_trace,
Neale Rannsad422ed2016-11-02 14:20:04 +0000593 .admin_up_down_function = mpls_tunnel_admin_up_down,
594};
595
596VNET_HW_INTERFACE_CLASS (mpls_tunnel_hw_interface_class) = {
597 .name = "MPLS-Tunnel",
Neale Rannsad422ed2016-11-02 14:20:04 +0000598 .update_adjacency = mpls_tunnel_update_adj,
599 .build_rewrite = mpls_tunnel_build_rewrite,
600 .flags = VNET_HW_INTERFACE_CLASS_FLAG_P2P,
601};
602
603const mpls_tunnel_t *
604mpls_tunnel_get (u32 mti)
605{
606 return (pool_elt_at_index(mpls_tunnel_pool, mti));
607}
608
609/**
610 * @brief Walk all the MPLS tunnels
611 */
612void
613mpls_tunnel_walk (mpls_tunnel_walk_cb_t cb,
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800614 void *ctx)
Neale Rannsad422ed2016-11-02 14:20:04 +0000615{
616 u32 mti;
617
Damjan Marionb2c31b62020-12-13 21:47:40 +0100618 pool_foreach_index (mti, mpls_tunnel_pool)
619 {
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800620 cb(mti, ctx);
Damjan Marionb2c31b62020-12-13 21:47:40 +0100621 }
Neale Rannsad422ed2016-11-02 14:20:04 +0000622}
623
624void
625vnet_mpls_tunnel_del (u32 sw_if_index)
626{
627 mpls_tunnel_t *mt;
628
629 mt = mpls_tunnel_get_from_sw_if_index(sw_if_index);
630
631 if (NULL == mt)
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800632 return;
Neale Rannsad422ed2016-11-02 14:20:04 +0000633
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800634 if (FIB_NODE_INDEX_INVALID != mt->mt_path_list)
635 fib_path_list_child_remove(mt->mt_path_list,
636 mt->mt_sibling_index);
Neale Rannsda78f952017-05-24 09:15:43 -0700637 dpo_reset(&mt->mt_l2_lb);
Neale Rannsad422ed2016-11-02 14:20:04 +0000638
Neale Ranns6fdcc3d2021-10-08 07:30:47 +0000639 vnet_reset_interface_l3_output_node (vlib_get_main (), mt->mt_sw_if_index);
Neale Rannsa9aedb62018-08-24 05:43:35 -0700640 vnet_delete_hw_interface (vnet_get_main(), mt->mt_hw_if_index);
641
Neale Rannsad422ed2016-11-02 14:20:04 +0000642 pool_put(mpls_tunnel_pool, mt);
643 mpls_tunnel_db[sw_if_index] = ~0;
644}
645
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800646u32
647vnet_mpls_tunnel_create (u8 l2_only,
IJsbrand Wijnands39ae0a02020-03-05 10:56:26 -0800648 u8 is_multicast,
649 u8 *tag)
Neale Rannsad422ed2016-11-02 14:20:04 +0000650{
651 vnet_hw_interface_t * hi;
652 mpls_tunnel_t *mt;
653 vnet_main_t * vnm;
654 u32 mti;
655
656 vnm = vnet_get_main();
657 pool_get(mpls_tunnel_pool, mt);
Dave Barachb7b92992018-10-17 10:38:51 -0400658 clib_memset (mt, 0, sizeof (*mt));
Neale Rannsad422ed2016-11-02 14:20:04 +0000659 mti = mt - mpls_tunnel_pool;
660 fib_node_init(&mt->mt_node, FIB_NODE_TYPE_MPLS_TUNNEL);
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800661 mt->mt_path_list = FIB_NODE_INDEX_INVALID;
662 mt->mt_sibling_index = FIB_NODE_INDEX_INVALID;
663
664 if (is_multicast)
665 mt->mt_flags |= MPLS_TUNNEL_FLAG_MCAST;
Neale Rannsda78f952017-05-24 09:15:43 -0700666 if (l2_only)
667 mt->mt_flags |= MPLS_TUNNEL_FLAG_L2;
IJsbrand Wijnands39ae0a02020-03-05 10:56:26 -0800668 if (tag)
669 memcpy(mt->mt_tag, tag, sizeof(mt->mt_tag));
670 else
671 mt->mt_tag[0] = '\0';
Neale Rannsad422ed2016-11-02 14:20:04 +0000672
673 /*
Neale Rannsa9aedb62018-08-24 05:43:35 -0700674 * Create a new tunnel HW interface
Neale Rannsad422ed2016-11-02 14:20:04 +0000675 */
Neale Rannsa9aedb62018-08-24 05:43:35 -0700676 mt->mt_hw_if_index = vnet_register_interface(
677 vnm,
678 mpls_tunnel_class.index,
679 mti,
680 mpls_tunnel_hw_interface_class.index,
681 mti);
682 hi = vnet_get_hw_interface (vnm, mt->mt_hw_if_index);
Neale Rannsad422ed2016-11-02 14:20:04 +0000683
Neale Ranns444e8002020-10-08 10:06:32 +0000684 if (mt->mt_flags & MPLS_TUNNEL_FLAG_L2)
685 vnet_set_interface_output_node (vnm, mt->mt_hw_if_index,
686 mpls_tunnel_tx.index);
Neale Ranns6fdcc3d2021-10-08 07:30:47 +0000687 else
688 vnet_set_interface_l3_output_node (vnm->vlib_main, hi->sw_if_index,
689 (u8 *) "tunnel-output");
Neale Ranns444e8002020-10-08 10:06:32 +0000690
Ole Troand7231612018-06-07 10:17:57 +0200691 /* Standard default MPLS tunnel MTU. */
692 vnet_sw_interface_set_mtu (vnm, hi->sw_if_index, 9000);
693
Neale Rannsad422ed2016-11-02 14:20:04 +0000694 /*
695 * Add the new tunnel to the tunnel DB - key:SW if index
696 */
697 mt->mt_sw_if_index = hi->sw_if_index;
698 vec_validate_init_empty(mpls_tunnel_db, mt->mt_sw_if_index, ~0);
699 mpls_tunnel_db[mt->mt_sw_if_index] = mti;
700
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800701 return (mt->mt_sw_if_index);
702}
703
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800704void
705vnet_mpls_tunnel_path_add (u32 sw_if_index,
706 fib_route_path_t *rpaths)
707{
Neale Ranns097fa662018-05-01 05:17:55 -0700708 fib_route_path_t *rpath;
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800709 mpls_tunnel_t *mt;
710 u32 mti;
711
712 mt = mpls_tunnel_get_from_sw_if_index(sw_if_index);
713
714 if (NULL == mt)
715 return;
716
717 mti = mt - mpls_tunnel_pool;
718
Neale Rannsad422ed2016-11-02 14:20:04 +0000719 /*
720 * construct a path-list from the path provided
721 */
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800722 if (FIB_NODE_INDEX_INVALID == mt->mt_path_list)
Neale Rannsad422ed2016-11-02 14:20:04 +0000723 {
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800724 mt->mt_path_list = fib_path_list_create(FIB_PATH_LIST_FLAG_SHARED, rpaths);
725 mt->mt_sibling_index = fib_path_list_child_add(mt->mt_path_list,
726 FIB_NODE_TYPE_MPLS_TUNNEL,
727 mti);
Neale Rannsad422ed2016-11-02 14:20:04 +0000728 }
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800729 else
730 {
731 fib_node_index_t old_pl_index;
Neale Rannsad422ed2016-11-02 14:20:04 +0000732
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800733 old_pl_index = mt->mt_path_list;
734
735 mt->mt_path_list =
736 fib_path_list_copy_and_path_add(old_pl_index,
737 FIB_PATH_LIST_FLAG_SHARED,
738 rpaths);
739
740 fib_path_list_child_remove(old_pl_index,
741 mt->mt_sibling_index);
742 mt->mt_sibling_index = fib_path_list_child_add(mt->mt_path_list,
743 FIB_NODE_TYPE_MPLS_TUNNEL,
744 mti);
745 /*
746 * re-resolve all the path-extensions with the new path-list
747 */
Neale Ranns81424992017-05-18 03:03:22 -0700748 fib_path_ext_list_resolve(&mt->mt_path_exts, mt->mt_path_list);
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800749 }
Neale Ranns097fa662018-05-01 05:17:55 -0700750 vec_foreach(rpath, rpaths)
751 {
752 fib_path_ext_list_insert(&mt->mt_path_exts,
753 mt->mt_path_list,
754 FIB_PATH_EXT_MPLS,
755 rpath);
756 }
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800757 mpls_tunnel_restack(mt);
Neale Rannsad422ed2016-11-02 14:20:04 +0000758}
759
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800760int
761vnet_mpls_tunnel_path_remove (u32 sw_if_index,
762 fib_route_path_t *rpaths)
763{
764 mpls_tunnel_t *mt;
765 u32 mti;
766
767 mt = mpls_tunnel_get_from_sw_if_index(sw_if_index);
768
769 if (NULL == mt)
770 return (0);
771
772 mti = mt - mpls_tunnel_pool;
773
774 /*
775 * construct a path-list from the path provided
776 */
777 if (FIB_NODE_INDEX_INVALID == mt->mt_path_list)
778 {
779 /* can't remove a path if we have onoe */
780 return (0);
781 }
782 else
783 {
784 fib_node_index_t old_pl_index;
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800785
786 old_pl_index = mt->mt_path_list;
787
Neale Ranns13b2ba22019-01-30 06:00:19 -0800788 fib_path_list_lock(old_pl_index);
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800789 mt->mt_path_list =
790 fib_path_list_copy_and_path_remove(old_pl_index,
791 FIB_PATH_LIST_FLAG_SHARED,
792 rpaths);
793
794 fib_path_list_child_remove(old_pl_index,
795 mt->mt_sibling_index);
796
797 if (FIB_NODE_INDEX_INVALID == mt->mt_path_list)
798 {
799 /* no paths left */
Neale Ranns13b2ba22019-01-30 06:00:19 -0800800 fib_path_list_unlock(old_pl_index);
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800801 return (0);
802 }
803 else
804 {
805 mt->mt_sibling_index =
806 fib_path_list_child_add(mt->mt_path_list,
807 FIB_NODE_TYPE_MPLS_TUNNEL,
808 mti);
809 }
810 /*
811 * find the matching path extension and remove it
812 */
Neale Ranns81424992017-05-18 03:03:22 -0700813 fib_path_ext_list_remove(&mt->mt_path_exts,
814 FIB_PATH_EXT_MPLS,
815 rpaths);
816
817 /*
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800818 * re-resolve all the path-extensions with the new path-list
819 */
Neale Ranns81424992017-05-18 03:03:22 -0700820 fib_path_ext_list_resolve(&mt->mt_path_exts,
821 mt->mt_path_list);
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800822
823 mpls_tunnel_restack(mt);
Neale Ranns13b2ba22019-01-30 06:00:19 -0800824 fib_path_list_unlock(old_pl_index);
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800825 }
826
827 return (fib_path_list_get_n_paths(mt->mt_path_list));
828}
829
Neale Rannsf5fa5ae2018-09-26 05:07:25 -0700830int
831vnet_mpls_tunnel_get_index (u32 sw_if_index)
832{
833 mpls_tunnel_t *mt;
834
835 mt = mpls_tunnel_get_from_sw_if_index(sw_if_index);
836
837 if (NULL == mt)
838 return (~0);
839
840 return (mt - mpls_tunnel_pool);
841}
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800842
Neale Rannsad422ed2016-11-02 14:20:04 +0000843static clib_error_t *
844vnet_create_mpls_tunnel_command_fn (vlib_main_t * vm,
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800845 unformat_input_t * input,
846 vlib_cli_command_t * cmd)
Neale Rannsad422ed2016-11-02 14:20:04 +0000847{
848 unformat_input_t _line_input, * line_input = &_line_input;
849 vnet_main_t * vnm = vnet_get_main();
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800850 u8 is_del = 0, l2_only = 0, is_multicast =0;
Neale Rannsad422ed2016-11-02 14:20:04 +0000851 fib_route_path_t rpath, *rpaths = NULL;
Neale Ranns70ed8ae2017-11-15 12:54:46 -0800852 u32 sw_if_index = ~0, payload_proto;
Billy McFalla9a20e72017-02-15 11:39:12 -0500853 clib_error_t *error = NULL;
Neale Rannsad422ed2016-11-02 14:20:04 +0000854
Dave Barachb7b92992018-10-17 10:38:51 -0400855 clib_memset(&rpath, 0, sizeof(rpath));
Neale Ranns70ed8ae2017-11-15 12:54:46 -0800856 payload_proto = DPO_PROTO_MPLS;
Neale Rannsad422ed2016-11-02 14:20:04 +0000857
858 /* Get a line of input. */
859 if (! unformat_user (input, unformat_line_input, line_input))
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800860 return 0;
Neale Rannsad422ed2016-11-02 14:20:04 +0000861
862 while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
863 {
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800864 if (unformat (line_input, "del %U",
865 unformat_vnet_sw_interface, vnm,
866 &sw_if_index))
867 is_del = 1;
Neale Rannsc13548a2017-05-24 10:53:43 -0700868 else if (unformat (line_input, "add %U",
869 unformat_vnet_sw_interface, vnm,
870 &sw_if_index))
871 is_del = 0;
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800872 else if (unformat (line_input, "add"))
873 is_del = 0;
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800874 else if (unformat (line_input, "l2-only"))
875 l2_only = 1;
876 else if (unformat (line_input, "multicast"))
877 is_multicast = 1;
Neale Ranns70ed8ae2017-11-15 12:54:46 -0800878 else if (unformat (line_input, "via %U",
879 unformat_fib_route_path,
880 &rpath, &payload_proto))
881 vec_add1(rpaths, rpath);
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800882 else
883 {
884 error = clib_error_return (0, "unknown input '%U'",
885 format_unformat_error, line_input);
886 goto done;
887 }
Neale Rannsad422ed2016-11-02 14:20:04 +0000888 }
889
890 if (is_del)
891 {
Neale Rannsa9aedb62018-08-24 05:43:35 -0700892 if (NULL == rpaths)
893 {
894 vnet_mpls_tunnel_del(sw_if_index);
895 }
896 else if (!vnet_mpls_tunnel_path_remove(sw_if_index, rpaths))
Neale Rannsc13548a2017-05-24 10:53:43 -0700897 {
898 vnet_mpls_tunnel_del(sw_if_index);
899 }
Neale Rannsad422ed2016-11-02 14:20:04 +0000900 }
901 else
902 {
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800903 if (0 == vec_len(rpath.frp_label_stack))
904 {
905 error = clib_error_return (0, "No Output Labels '%U'",
906 format_unformat_error, line_input);
907 goto done;
908 }
Neale Rannsad422ed2016-11-02 14:20:04 +0000909
Neale Rannsc13548a2017-05-24 10:53:43 -0700910 if (~0 == sw_if_index)
911 {
IJsbrand Wijnands39ae0a02020-03-05 10:56:26 -0800912 sw_if_index = vnet_mpls_tunnel_create(l2_only, is_multicast, NULL);
Neale Rannsc13548a2017-05-24 10:53:43 -0700913 }
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800914 vnet_mpls_tunnel_path_add(sw_if_index, rpaths);
Neale Rannsad422ed2016-11-02 14:20:04 +0000915 }
916
Billy McFalla9a20e72017-02-15 11:39:12 -0500917done:
Neale Rannsad422ed2016-11-02 14:20:04 +0000918 vec_free(rpaths);
Billy McFalla9a20e72017-02-15 11:39:12 -0500919 unformat_free (line_input);
Neale Rannsad422ed2016-11-02 14:20:04 +0000920
Billy McFalla9a20e72017-02-15 11:39:12 -0500921 return error;
Neale Rannsad422ed2016-11-02 14:20:04 +0000922}
923
924/*?
925 * This command create a uni-directional MPLS tunnel
926 *
927 * @cliexpar
928 * @cliexstart{create mpls tunnel}
929 * create mpls tunnel via 10.0.0.1 GigEthernet0/8/0 out-label 33 out-label 34
930 * @cliexend
931 ?*/
932VLIB_CLI_COMMAND (create_mpls_tunnel_command, static) = {
933 .path = "mpls tunnel",
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800934 .short_help =
Neale Ranns70ed8ae2017-11-15 12:54:46 -0800935 "mpls tunnel [multicast] [l2-only] via [next-hop-address] [next-hop-interface] [next-hop-table <value>] [weight <value>] [preference <value>] [udp-encap-id <value>] [ip4-lookup-in-table <value>] [ip6-lookup-in-table <value>] [mpls-lookup-in-table <value>] [resolve-via-host] [resolve-via-connected] [rx-ip4 <interface>] [out-labels <value value value>]",
Neale Rannsad422ed2016-11-02 14:20:04 +0000936 .function = vnet_create_mpls_tunnel_command_fn,
937};
938
939static u8 *
940format_mpls_tunnel (u8 * s, va_list * args)
941{
942 mpls_tunnel_t *mt = va_arg (*args, mpls_tunnel_t *);
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800943 mpls_tunnel_attribute_t attr;
Neale Rannsad422ed2016-11-02 14:20:04 +0000944
Neale Ranns514e59d2019-01-25 03:18:27 -0800945 s = format(s, "mpls-tunnel%d: sw_if_index:%d hw_if_index:%d",
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800946 mt - mpls_tunnel_pool,
947 mt->mt_sw_if_index,
948 mt->mt_hw_if_index);
949 if (MPLS_TUNNEL_FLAG_NONE != mt->mt_flags) {
950 s = format(s, " \n flags:");
951 FOR_EACH_MPLS_TUNNEL_ATTRIBUTE(attr) {
952 if ((1<<attr) & mt->mt_flags) {
953 s = format (s, "%s,", mpls_tunnel_attribute_names[attr]);
954 }
955 }
Neale Rannsad422ed2016-11-02 14:20:04 +0000956 }
957 s = format(s, "\n via:\n");
958 s = fib_path_list_format(mt->mt_path_list, s);
Neale Ranns81424992017-05-18 03:03:22 -0700959 s = format(s, "%U", format_fib_path_ext_list, &mt->mt_path_exts);
Neale Rannsad422ed2016-11-02 14:20:04 +0000960 s = format(s, "\n");
961
Neale Rannsda78f952017-05-24 09:15:43 -0700962 if (mt->mt_flags & MPLS_TUNNEL_FLAG_L2)
963 {
964 s = format(s, " forwarding: %U\n",
965 format_fib_forw_chain_type,
966 FIB_FORW_CHAIN_TYPE_ETHERNET);
967 s = format(s, " %U\n", format_dpo_id, &mt->mt_l2_lb, 2);
968 }
969
Neale Rannsad422ed2016-11-02 14:20:04 +0000970 return (s);
971}
972
973static clib_error_t *
974show_mpls_tunnel_command_fn (vlib_main_t * vm,
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800975 unformat_input_t * input,
976 vlib_cli_command_t * cmd)
Neale Rannsad422ed2016-11-02 14:20:04 +0000977{
978 mpls_tunnel_t * mt;
979 u32 mti = ~0;
980
981 if (pool_elts (mpls_tunnel_pool) == 0)
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800982 vlib_cli_output (vm, "No MPLS tunnels configured...");
Neale Rannsad422ed2016-11-02 14:20:04 +0000983
984 while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
985 {
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800986 if (unformat (input, "%d", &mti))
987 ;
988 else
989 break;
Neale Rannsad422ed2016-11-02 14:20:04 +0000990 }
991
992 if (~0 == mti)
993 {
Damjan Marionb2c31b62020-12-13 21:47:40 +0100994 pool_foreach (mt, mpls_tunnel_pool)
995 {
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800996 vlib_cli_output (vm, "[@%d] %U",
997 mt - mpls_tunnel_pool,
998 format_mpls_tunnel, mt);
Damjan Marionb2c31b62020-12-13 21:47:40 +0100999 }
Neale Rannsad422ed2016-11-02 14:20:04 +00001000 }
1001 else
1002 {
Neale Ranns0f26c5a2017-03-01 15:12:11 -08001003 if (pool_is_free_index(mpls_tunnel_pool, mti))
Paul Vinciguerrabdc0e6b2018-09-22 05:32:50 -07001004 return clib_error_return (0, "Not a tunnel index %d", mti);
Neale Rannsad422ed2016-11-02 14:20:04 +00001005
Neale Ranns0f26c5a2017-03-01 15:12:11 -08001006 mt = pool_elt_at_index(mpls_tunnel_pool, mti);
Neale Rannsad422ed2016-11-02 14:20:04 +00001007
Neale Ranns0f26c5a2017-03-01 15:12:11 -08001008 vlib_cli_output (vm, "[@%d] %U",
1009 mt - mpls_tunnel_pool,
1010 format_mpls_tunnel, mt);
Neale Rannsad422ed2016-11-02 14:20:04 +00001011 }
1012
1013 return 0;
1014}
1015
1016/*?
1017 * This command to show MPLS tunnels
1018 *
1019 * @cliexpar
1020 * @cliexstart{sh mpls tunnel 2}
1021 * [@2] mpls_tunnel2: sw_if_index:5 hw_if_index:5
1022 * label-stack:
Neale Ranns0f26c5a2017-03-01 15:12:11 -08001023 * 3,
Neale Rannsad422ed2016-11-02 14:20:04 +00001024 * via:
1025 * index:26 locks:1 proto:ipv4 uPRF-list:26 len:1 itfs:[2, ]
1026 * index:26 pl-index:26 ipv4 weight=1 attached-nexthop: oper-flags:resolved,
1027 * 10.0.0.2 loop0
1028 * [@0]: ipv4 via 10.0.0.2 loop0: IP4: de:ad:00:00:00:00 -> 00:00:11:aa:bb:cc
1029 * @cliexend
1030 ?*/
1031VLIB_CLI_COMMAND (show_mpls_tunnel_command, static) = {
1032 .path = "show mpls tunnel",
1033 .function = show_mpls_tunnel_command_fn,
1034};
1035
1036static mpls_tunnel_t *
1037mpls_tunnel_from_fib_node (fib_node_t *node)
1038{
Neale Rannsad422ed2016-11-02 14:20:04 +00001039 ASSERT(FIB_NODE_TYPE_MPLS_TUNNEL == node->fn_type);
Neale Rannsad422ed2016-11-02 14:20:04 +00001040 return ((mpls_tunnel_t*) (((char*)node) -
1041 STRUCT_OFFSET_OF(mpls_tunnel_t, mt_node)));
1042}
1043
1044/**
1045 * Function definition to backwalk a FIB node
1046 */
1047static fib_node_back_walk_rc_t
1048mpls_tunnel_back_walk (fib_node_t *node,
Neale Ranns0f26c5a2017-03-01 15:12:11 -08001049 fib_node_back_walk_ctx_t *ctx)
Neale Rannsad422ed2016-11-02 14:20:04 +00001050{
1051 mpls_tunnel_restack(mpls_tunnel_from_fib_node(node));
1052
1053 return (FIB_NODE_BACK_WALK_CONTINUE);
1054}
1055
1056/**
1057 * Function definition to get a FIB node from its index
1058 */
1059static fib_node_t*
1060mpls_tunnel_fib_node_get (fib_node_index_t index)
1061{
1062 mpls_tunnel_t * mt;
1063
1064 mt = pool_elt_at_index(mpls_tunnel_pool, index);
1065
1066 return (&mt->mt_node);
1067}
1068
1069/**
1070 * Function definition to inform the FIB node that its last lock has gone.
1071 */
1072static void
1073mpls_tunnel_last_lock_gone (fib_node_t *node)
1074{
1075 /*
1076 * The MPLS MPLS tunnel is a root of the graph. As such
1077 * it never has children and thus is never locked.
1078 */
1079 ASSERT(0);
1080}
1081
1082/*
1083 * Virtual function table registered by MPLS MPLS tunnels
1084 * for participation in the FIB object graph.
1085 */
1086const static fib_node_vft_t mpls_vft = {
1087 .fnv_get = mpls_tunnel_fib_node_get,
1088 .fnv_last_lock = mpls_tunnel_last_lock_gone,
1089 .fnv_back_walk = mpls_tunnel_back_walk,
1090};
1091
1092static clib_error_t *
1093mpls_tunnel_init (vlib_main_t *vm)
1094{
1095 fib_node_register_type(FIB_NODE_TYPE_MPLS_TUNNEL, &mpls_vft);
1096
1097 return 0;
1098}
1099VLIB_INIT_FUNCTION(mpls_tunnel_init);