blob: 4715958e99c4d7bb702c79ab51a7b857204a9597 [file] [log] [blame]
Neale Rannsad422ed2016-11-02 14:20:04 +00001/*
2 * mpls_tunnel.c: MPLS tunnel interfaces (i.e. for RSVP-TE)
3 *
4 * Copyright (c) 2012 Cisco and/or its affiliates.
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at:
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18#include <vnet/vnet.h>
Neale Rannsad422ed2016-11-02 14:20:04 +000019#include <vnet/mpls/mpls_tunnel.h>
Neale Ranns0f26c5a2017-03-01 15:12:11 -080020#include <vnet/mpls/mpls_types.h>
Neale Rannsad422ed2016-11-02 14:20:04 +000021#include <vnet/ip/ip.h>
22#include <vnet/fib/fib_path_list.h>
23#include <vnet/adj/adj_midchain.h>
Neale Ranns0f26c5a2017-03-01 15:12:11 -080024#include <vnet/adj/adj_mcast.h>
25#include <vnet/dpo/replicate_dpo.h>
Neale Ranns227038a2017-04-21 01:07:59 -070026#include <vnet/fib/mpls_fib.h>
Neale Rannsad422ed2016-11-02 14:20:04 +000027
28/**
29 * @brief pool of tunnel instances
30 */
31static mpls_tunnel_t *mpls_tunnel_pool;
32
33/**
Neale Rannsad422ed2016-11-02 14:20:04 +000034 * @brief DB of SW index to tunnel index
35 */
36static u32 *mpls_tunnel_db;
37
38/**
Neale Ranns0f26c5a2017-03-01 15:12:11 -080039 * @brief MPLS tunnel flags strings
40 */
41static const char *mpls_tunnel_attribute_names[] = MPLS_TUNNEL_ATTRIBUTES;
42
43/**
Neale Ranns444e8002020-10-08 10:06:32 +000044 * @brief Packet trace structure
45 */
46typedef struct mpls_tunnel_trace_t_
47{
48 /**
49 * Tunnel-id / index in tunnel vector
50 */
51 u32 tunnel_id;
52} mpls_tunnel_trace_t;
53
54static u8 *
55format_mpls_tunnel_tx_trace (u8 * s,
56 va_list * args)
57{
58 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
59 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
60 mpls_tunnel_trace_t * t = va_arg (*args, mpls_tunnel_trace_t *);
61
62 s = format (s, "MPLS: tunnel %d", t->tunnel_id);
63 return s;
64}
65
66typedef enum
67{
68 MPLS_TUNNEL_ENCAP_NEXT_L2_MIDCHAIN,
69 MPLS_TUNNEL_ENCAP_N_NEXT,
70} mpls_tunnel_encap_next_t;
71
72/**
73 * @brief TX function. Only called L2. L3 traffic uses the adj-midchains
74 */
75VLIB_NODE_FN (mpls_tunnel_tx) (vlib_main_t * vm,
76 vlib_node_runtime_t * node,
77 vlib_frame_t * frame)
78{
79 u32 *from = vlib_frame_vector_args (frame);
80 vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b;
81 u16 nexts[VLIB_FRAME_SIZE], *next;
82 u32 n_left;
83
84 n_left = frame->n_vectors;
85 b = bufs;
86 next = nexts;
87
88 vlib_get_buffers (vm, from, bufs, n_left);
89
90 while (n_left > 2)
91 {
92 const mpls_tunnel_t *mt0, *mt1;
93 u32 sw_if_index0, sw_if_index1;
94
95 sw_if_index0 = vnet_buffer(b[0])->sw_if_index[VLIB_TX];
96 sw_if_index1 = vnet_buffer(b[1])->sw_if_index[VLIB_TX];
97
98 mt0 = pool_elt_at_index(mpls_tunnel_pool,
99 mpls_tunnel_db[sw_if_index0]);
100 mt1 = pool_elt_at_index(mpls_tunnel_pool,
101 mpls_tunnel_db[sw_if_index1]);
102
103 vnet_buffer(b[0])->ip.adj_index[VLIB_TX] = mt0->mt_l2_lb.dpoi_index;
104 vnet_buffer(b[1])->ip.adj_index[VLIB_TX] = mt1->mt_l2_lb.dpoi_index;
105 next[0] = mt0->mt_l2_lb.dpoi_next_node;
106 next[1] = mt1->mt_l2_lb.dpoi_next_node;
107
108 /* since we are coming out of the L2 world, where the vlib_buffer
109 * union is used for other things, make sure it is clean for
110 * MPLS from now on.
111 */
112 vnet_buffer(b[0])->mpls.first = 0;
113 vnet_buffer(b[1])->mpls.first = 0;
114
115 if (PREDICT_FALSE(b[0]->flags & VLIB_BUFFER_IS_TRACED))
116 {
117 mpls_tunnel_trace_t *tr = vlib_add_trace (vm, node,
118 b[0], sizeof (*tr));
119 tr->tunnel_id = mpls_tunnel_db[sw_if_index0];
120 }
121 if (PREDICT_FALSE(b[1]->flags & VLIB_BUFFER_IS_TRACED))
122 {
123 mpls_tunnel_trace_t *tr = vlib_add_trace (vm, node,
124 b[1], sizeof (*tr));
125 tr->tunnel_id = mpls_tunnel_db[sw_if_index1];
126 }
127
128 b += 2;
129 n_left -= 2;
130 next += 2;
131 }
132 while (n_left)
133 {
134 const mpls_tunnel_t *mt0;
135 u32 sw_if_index0;
136
137 sw_if_index0 = vnet_buffer(b[0])->sw_if_index[VLIB_TX];
138 mt0 = pool_elt_at_index(mpls_tunnel_pool,
139 mpls_tunnel_db[sw_if_index0]);
140
141 vnet_buffer(b[0])->ip.adj_index[VLIB_TX] = mt0->mt_l2_lb.dpoi_index;
142 next[0] = mt0->mt_l2_lb.dpoi_next_node;
143
144 /* since we are coming out of the L2 world, where the vlib_buffer
145 * union is used for other things, make sure it is clean for
146 * MPLS from now on.
147 */
148 vnet_buffer(b[0])->mpls.first = 0;
149
150 if (PREDICT_FALSE(b[0]->flags & VLIB_BUFFER_IS_TRACED))
151 {
152 mpls_tunnel_trace_t *tr = vlib_add_trace (vm, node,
153 b[0], sizeof (*tr));
154 tr->tunnel_id = mpls_tunnel_db[sw_if_index0];
155 }
156
157 b += 1;
158 n_left -= 1;
159 next += 1;
160 }
161
162 vlib_buffer_enqueue_to_next (vm, node, from, nexts, frame->n_vectors);
163
164 return frame->n_vectors;
165}
166
167VLIB_REGISTER_NODE (mpls_tunnel_tx) =
168{
169 .name = "mpls-tunnel-tx",
170 .vector_size = sizeof (u32),
171 .format_trace = format_mpls_tunnel_tx_trace,
172 .type = VLIB_NODE_TYPE_INTERNAL,
173 .n_errors = 0,
174 .n_next_nodes = 0,
175 /* MPLS_TUNNEL_ENCAP_N_NEXT, */
176 /* .next_nodes = { */
177 /* [MPLS_TUNNEL_ENCAP_NEXT_L2_MIDCHAIN] = "mpls-load-balance", */
178 /* }, */
179};
180
181/**
Neale Rannsad422ed2016-11-02 14:20:04 +0000182 * @brief Get a tunnel object from a SW interface index
183 */
184static mpls_tunnel_t*
185mpls_tunnel_get_from_sw_if_index (u32 sw_if_index)
186{
Eyal Baricd307742018-07-22 12:45:15 +0300187 if ((vec_len(mpls_tunnel_db) <= sw_if_index) ||
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800188 (~0 == mpls_tunnel_db[sw_if_index]))
189 return (NULL);
Neale Rannsad422ed2016-11-02 14:20:04 +0000190
191 return (pool_elt_at_index(mpls_tunnel_pool,
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800192 mpls_tunnel_db[sw_if_index]));
Neale Rannsad422ed2016-11-02 14:20:04 +0000193}
194
195/**
196 * @brief Build a rewrite string for the MPLS tunnel.
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800197 */
198static u8*
199mpls_tunnel_build_rewrite_i (void)
200{
201 /*
Jim Thompson2bc81692019-04-08 02:19:03 -0500202 * passing the adj code a NULL rewrite means 'i don't have one cos
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800203 * t'other end is unresolved'. That's not the case here. For the mpls
204 * tunnel there are just no bytes of encap to apply in the adj. We'll impose
205 * the label stack once we choose a path. So return a zero length rewrite.
206 */
207 u8 *rewrite = NULL;
208
209 vec_validate(rewrite, 0);
210 vec_reset_length(rewrite);
211
212 return (rewrite);
213}
214
215/**
216 * @brief Build a rewrite string for the MPLS tunnel.
Neale Rannsad422ed2016-11-02 14:20:04 +0000217 */
218static u8*
219mpls_tunnel_build_rewrite (vnet_main_t * vnm,
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800220 u32 sw_if_index,
221 vnet_link_t link_type,
222 const void *dst_address)
Neale Rannsad422ed2016-11-02 14:20:04 +0000223{
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800224 return (mpls_tunnel_build_rewrite_i());
225}
Neale Rannsad422ed2016-11-02 14:20:04 +0000226
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800227typedef struct mpls_tunnel_collect_forwarding_ctx_t_
228{
229 load_balance_path_t * next_hops;
230 const mpls_tunnel_t *mt;
231 fib_forward_chain_type_t fct;
232} mpls_tunnel_collect_forwarding_ctx_t;
233
Neale Ranns81424992017-05-18 03:03:22 -0700234static fib_path_list_walk_rc_t
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800235mpls_tunnel_collect_forwarding (fib_node_index_t pl_index,
236 fib_node_index_t path_index,
237 void *arg)
238{
239 mpls_tunnel_collect_forwarding_ctx_t *ctx;
240 fib_path_ext_t *path_ext;
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800241
242 ctx = arg;
Neale Rannsad422ed2016-11-02 14:20:04 +0000243
Neale Ranns3b222a32016-12-02 15:41:03 +0000244 /*
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800245 * if the path is not resolved, don't include it.
Neale Ranns3b222a32016-12-02 15:41:03 +0000246 */
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800247 if (!fib_path_is_resolved(path_index))
Neale Rannsad422ed2016-11-02 14:20:04 +0000248 {
Neale Ranns81424992017-05-18 03:03:22 -0700249 return (FIB_PATH_LIST_WALK_CONTINUE);
Neale Rannsad422ed2016-11-02 14:20:04 +0000250 }
251
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800252 /*
253 * get the matching path-extension for the path being visited.
254 */
Neale Ranns81424992017-05-18 03:03:22 -0700255 path_ext = fib_path_ext_list_find_by_path_index(&ctx->mt->mt_path_exts,
256 path_index);
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800257
Neale Ranns31ed7442018-02-23 05:29:09 -0800258 /*
259 * we don't want IP TTL decrements for packets hitting the MPLS labels
260 * we stack on, since the IP TTL decrement is done by the adj
261 */
262 path_ext->fpe_mpls_flags |= FIB_PATH_EXT_MPLS_FLAG_NO_IP_TTL_DECR;
263
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800264 /*
Neale Ranns19bd1902018-03-19 02:32:57 -0700265 * found a matching extension. stack it to obtain the forwarding
266 * info for this path.
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800267 */
Neale Ranns19bd1902018-03-19 02:32:57 -0700268 ctx->next_hops = fib_path_ext_stack(path_ext,
269 ctx->fct,
270 ctx->fct,
271 ctx->next_hops);
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800272
Neale Ranns81424992017-05-18 03:03:22 -0700273 return (FIB_PATH_LIST_WALK_CONTINUE);
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800274}
275
276static void
277mpls_tunnel_mk_lb (mpls_tunnel_t *mt,
278 vnet_link_t linkt,
279 fib_forward_chain_type_t fct,
280 dpo_id_t *dpo_lb)
281{
282 dpo_proto_t lb_proto;
283
284 /*
285 * If the entry has path extensions then we construct a load-balance
286 * by stacking the extensions on the forwarding chains of the paths.
287 * Otherwise we use the load-balance of the path-list
288 */
289 mpls_tunnel_collect_forwarding_ctx_t ctx = {
290 .mt = mt,
291 .next_hops = NULL,
292 .fct = fct,
293 };
294
295 /*
296 * As an optimisation we allocate the vector of next-hops to be sized
297 * equal to the maximum nuber of paths we will need, which is also the
298 * most likely number we will need, since in most cases the paths are 'up'.
299 */
300 vec_validate(ctx.next_hops, fib_path_list_get_n_paths(mt->mt_path_list));
301 vec_reset_length(ctx.next_hops);
302
Neale Rannsda78f952017-05-24 09:15:43 -0700303 lb_proto = fib_forw_chain_type_to_dpo_proto(fct);
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800304
Neale Ranns6a30b5f2018-09-25 07:22:36 -0700305 if (FIB_NODE_INDEX_INVALID != mt->mt_path_list)
306 {
307 fib_path_list_walk(mt->mt_path_list,
308 mpls_tunnel_collect_forwarding,
309 &ctx);
310 }
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800311
312 if (!dpo_id_is_valid(dpo_lb))
313 {
314 /*
315 * first time create
316 */
317 if (mt->mt_flags & MPLS_TUNNEL_FLAG_MCAST)
318 {
319 dpo_set(dpo_lb,
320 DPO_REPLICATE,
321 lb_proto,
322 replicate_create(0, lb_proto));
323 }
324 else
325 {
326 flow_hash_config_t fhc;
327
Neale Ranns227038a2017-04-21 01:07:59 -0700328 switch (linkt)
329 {
330 case VNET_LINK_MPLS:
331 fhc = MPLS_FLOW_HASH_DEFAULT;
332 break;
333 case VNET_LINK_IP4:
334 case VNET_LINK_IP6:
335 fhc = IP_FLOW_HASH_DEFAULT;
336 break;
337 default:
338 fhc = 0;
339 break;
340 }
341
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800342 dpo_set(dpo_lb,
343 DPO_LOAD_BALANCE,
344 lb_proto,
345 load_balance_create(0, lb_proto, fhc));
346 }
347 }
348
349 if (mt->mt_flags & MPLS_TUNNEL_FLAG_MCAST)
350 {
351 /*
352 * MPLS multicast
353 */
354 replicate_multipath_update(dpo_lb, ctx.next_hops);
Neale Ranns3b222a32016-12-02 15:41:03 +0000355 }
Neale Rannsad422ed2016-11-02 14:20:04 +0000356 else
Neale Ranns3b222a32016-12-02 15:41:03 +0000357 {
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800358 load_balance_multipath_update(dpo_lb,
359 ctx.next_hops,
360 LOAD_BALANCE_FLAG_NONE);
361 vec_free(ctx.next_hops);
Neale Ranns3b222a32016-12-02 15:41:03 +0000362 }
Neale Rannsad422ed2016-11-02 14:20:04 +0000363}
364
365/**
366 * mpls_tunnel_stack
367 *
368 * 'stack' (resolve the recursion for) the tunnel's midchain adjacency
369 */
370static void
371mpls_tunnel_stack (adj_index_t ai)
372{
373 ip_adjacency_t *adj;
374 mpls_tunnel_t *mt;
375 u32 sw_if_index;
376
377 adj = adj_get(ai);
378 sw_if_index = adj->rewrite_header.sw_if_index;
379
380 mt = mpls_tunnel_get_from_sw_if_index(sw_if_index);
381
Neale Ranns097fa662018-05-01 05:17:55 -0700382 if (NULL == mt || FIB_NODE_INDEX_INVALID == mt->mt_path_list)
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800383 return;
Neale Rannsad422ed2016-11-02 14:20:04 +0000384
Neale Ranns901cbb92019-02-19 02:10:13 -0800385 if (FIB_NODE_INDEX_INVALID == mt->mt_path_list)
386 {
387 adj_nbr_midchain_unstack(ai);
388 return;
389 }
390
Neale Rannsad422ed2016-11-02 14:20:04 +0000391 /*
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800392 * while we're stacking the adj, remove the tunnel from the child list
393 * of the path list. this breaks a circular dependency of walk updates
394 * where the create of adjacencies in the children can lead to walks
395 * that get back here.
396 */
397 fib_path_list_lock(mt->mt_path_list);
398
399 fib_path_list_child_remove(mt->mt_path_list,
400 mt->mt_sibling_index);
401
402 /*
403 * Construct the DPO (load-balance or replicate) that we can stack
404 * the tunnel's midchain on
Neale Rannsad422ed2016-11-02 14:20:04 +0000405 */
406 if (vnet_hw_interface_get_flags(vnet_get_main(),
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800407 mt->mt_hw_if_index) &
408 VNET_HW_INTERFACE_FLAG_LINK_UP)
Neale Rannsad422ed2016-11-02 14:20:04 +0000409 {
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800410 dpo_id_t dpo = DPO_INVALID;
Neale Rannsad422ed2016-11-02 14:20:04 +0000411
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800412 mpls_tunnel_mk_lb(mt,
413 adj->ia_link,
Neale Ranns31ed7442018-02-23 05:29:09 -0800414 fib_forw_chain_type_from_link_type(
415 adj_get_link_type(ai)),
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800416 &dpo);
Neale Rannsad422ed2016-11-02 14:20:04 +0000417
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800418 adj_nbr_midchain_stack(ai, &dpo);
419 dpo_reset(&dpo);
Neale Rannsad422ed2016-11-02 14:20:04 +0000420 }
421 else
422 {
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800423 adj_nbr_midchain_unstack(ai);
Neale Rannsad422ed2016-11-02 14:20:04 +0000424 }
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800425
426 mt->mt_sibling_index = fib_path_list_child_add(mt->mt_path_list,
427 FIB_NODE_TYPE_MPLS_TUNNEL,
428 mt - mpls_tunnel_pool);
429
Neale Rannsc13548a2017-05-24 10:53:43 -0700430 fib_path_list_unlock(mt->mt_path_list);
Neale Rannsad422ed2016-11-02 14:20:04 +0000431}
432
433/**
434 * @brief Call back when restacking all adjacencies on a MPLS interface
435 */
436static adj_walk_rc_t
437mpls_adj_walk_cb (adj_index_t ai,
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800438 void *ctx)
Neale Rannsad422ed2016-11-02 14:20:04 +0000439{
440 mpls_tunnel_stack(ai);
441
442 return (ADJ_WALK_RC_CONTINUE);
443}
444
445static void
446mpls_tunnel_restack (mpls_tunnel_t *mt)
447{
448 fib_protocol_t proto;
449
450 /*
451 * walk all the adjacencies on the MPLS interface and restack them
452 */
Neale Rannsda78f952017-05-24 09:15:43 -0700453 if (mt->mt_flags & MPLS_TUNNEL_FLAG_L2)
Neale Rannsad422ed2016-11-02 14:20:04 +0000454 {
Neale Rannsda78f952017-05-24 09:15:43 -0700455 /*
456 * Stack a load-balance that drops, whilst we have no paths
457 */
Neale Rannsda78f952017-05-24 09:15:43 -0700458 dpo_id_t dpo = DPO_INVALID;
459
460 mpls_tunnel_mk_lb(mt,
461 VNET_LINK_MPLS,
462 FIB_FORW_CHAIN_TYPE_ETHERNET,
463 &dpo);
464
Neale Ranns444e8002020-10-08 10:06:32 +0000465 dpo_stack_from_node(mpls_tunnel_tx.index,
Neale Rannsda78f952017-05-24 09:15:43 -0700466 &mt->mt_l2_lb,
467 &dpo);
468 dpo_reset(&dpo);
469 }
470 else
471 {
Benoît Ganne4069f412020-07-17 11:38:58 +0200472 FOR_EACH_FIB_IP_PROTOCOL(proto)
Neale Rannsda78f952017-05-24 09:15:43 -0700473 {
474 adj_nbr_walk(mt->mt_sw_if_index,
475 proto,
476 mpls_adj_walk_cb,
477 NULL);
478 }
Neale Rannsad422ed2016-11-02 14:20:04 +0000479 }
480}
481
482static clib_error_t *
483mpls_tunnel_admin_up_down (vnet_main_t * vnm,
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800484 u32 hw_if_index,
485 u32 flags)
Neale Rannsad422ed2016-11-02 14:20:04 +0000486{
487 vnet_hw_interface_t * hi;
488 mpls_tunnel_t *mt;
489
490 hi = vnet_get_hw_interface (vnm, hw_if_index);
491
492 mt = mpls_tunnel_get_from_sw_if_index(hi->sw_if_index);
493
494 if (NULL == mt)
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800495 return (NULL);
Neale Rannsad422ed2016-11-02 14:20:04 +0000496
497 if (flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP)
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800498 vnet_hw_interface_set_flags (vnm, hw_if_index,
499 VNET_HW_INTERFACE_FLAG_LINK_UP);
Neale Rannsad422ed2016-11-02 14:20:04 +0000500 else
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800501 vnet_hw_interface_set_flags (vnm, hw_if_index, 0 /* down */);
Neale Rannsad422ed2016-11-02 14:20:04 +0000502
503 mpls_tunnel_restack(mt);
504
505 return (NULL);
506}
507
508/**
509 * @brief Fixup the adj rewrite post encap. This is a no-op since the
510 * rewrite is a stack of labels.
511 */
512static void
513mpls_tunnel_fixup (vlib_main_t *vm,
Neale Ranns960eeea2019-12-02 23:28:50 +0000514 const ip_adjacency_t *adj,
Neale Rannsdb14f5a2018-01-29 10:43:33 -0800515 vlib_buffer_t *b0,
516 const void*data)
Neale Rannsad422ed2016-11-02 14:20:04 +0000517{
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800518 /*
519 * A no-op w.r.t. the header. but reset the 'have we pushed any
520 * MPLS labels onto the packet' flag. That way when we enter the
521 * tunnel we'll get a TTL set to 255
522 */
523 vnet_buffer(b0)->mpls.first = 0;
Neale Rannsad422ed2016-11-02 14:20:04 +0000524}
525
526static void
527mpls_tunnel_update_adj (vnet_main_t * vnm,
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800528 u32 sw_if_index,
529 adj_index_t ai)
Neale Rannsad422ed2016-11-02 14:20:04 +0000530{
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800531 ip_adjacency_t *adj;
532
533 ASSERT(ADJ_INDEX_INVALID != ai);
534
535 adj = adj_get(ai);
536
537 switch (adj->lookup_next_index)
538 {
539 case IP_LOOKUP_NEXT_ARP:
540 case IP_LOOKUP_NEXT_GLEAN:
Neale Ranns1855b8e2018-07-11 10:31:26 -0700541 case IP_LOOKUP_NEXT_BCAST:
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800542 adj_nbr_midchain_update_rewrite(ai, mpls_tunnel_fixup,
Neale Rannsdb14f5a2018-01-29 10:43:33 -0800543 NULL,
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800544 ADJ_FLAG_NONE,
545 mpls_tunnel_build_rewrite_i());
546 break;
547 case IP_LOOKUP_NEXT_MCAST:
548 /*
549 * Construct a partial rewrite from the known ethernet mcast dest MAC
550 * There's no MAC fixup, so the last 2 parameters are 0
551 */
552 adj_mcast_midchain_update_rewrite(ai, mpls_tunnel_fixup,
Neale Rannsdb14f5a2018-01-29 10:43:33 -0800553 NULL,
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800554 ADJ_FLAG_NONE,
555 mpls_tunnel_build_rewrite_i(),
556 0, 0);
557 break;
558
559 case IP_LOOKUP_NEXT_DROP:
560 case IP_LOOKUP_NEXT_PUNT:
561 case IP_LOOKUP_NEXT_LOCAL:
562 case IP_LOOKUP_NEXT_REWRITE:
563 case IP_LOOKUP_NEXT_MIDCHAIN:
564 case IP_LOOKUP_NEXT_MCAST_MIDCHAIN:
565 case IP_LOOKUP_NEXT_ICMP_ERROR:
566 case IP_LOOKUP_N_NEXT:
567 ASSERT (0);
568 break;
569 }
Neale Rannsad422ed2016-11-02 14:20:04 +0000570
571 mpls_tunnel_stack(ai);
572}
573
574static u8 *
575format_mpls_tunnel_name (u8 * s, va_list * args)
576{
577 u32 dev_instance = va_arg (*args, u32);
578 return format (s, "mpls-tunnel%d", dev_instance);
579}
580
581static u8 *
582format_mpls_tunnel_device (u8 * s, va_list * args)
583{
584 u32 dev_instance = va_arg (*args, u32);
585 CLIB_UNUSED (int verbose) = va_arg (*args, int);
586
587 return (format (s, "MPLS-tunnel: id %d\n", dev_instance));
588}
589
Neale Rannsad422ed2016-11-02 14:20:04 +0000590VNET_DEVICE_CLASS (mpls_tunnel_class) = {
591 .name = "MPLS tunnel device",
592 .format_device_name = format_mpls_tunnel_name,
593 .format_device = format_mpls_tunnel_device,
594 .format_tx_trace = format_mpls_tunnel_tx_trace,
Neale Rannsad422ed2016-11-02 14:20:04 +0000595 .admin_up_down_function = mpls_tunnel_admin_up_down,
596};
597
598VNET_HW_INTERFACE_CLASS (mpls_tunnel_hw_interface_class) = {
599 .name = "MPLS-Tunnel",
Neale Rannsad422ed2016-11-02 14:20:04 +0000600 .update_adjacency = mpls_tunnel_update_adj,
601 .build_rewrite = mpls_tunnel_build_rewrite,
602 .flags = VNET_HW_INTERFACE_CLASS_FLAG_P2P,
603};
604
605const mpls_tunnel_t *
606mpls_tunnel_get (u32 mti)
607{
608 return (pool_elt_at_index(mpls_tunnel_pool, mti));
609}
610
611/**
612 * @brief Walk all the MPLS tunnels
613 */
614void
615mpls_tunnel_walk (mpls_tunnel_walk_cb_t cb,
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800616 void *ctx)
Neale Rannsad422ed2016-11-02 14:20:04 +0000617{
618 u32 mti;
619
Damjan Marionb2c31b62020-12-13 21:47:40 +0100620 pool_foreach_index (mti, mpls_tunnel_pool)
621 {
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800622 cb(mti, ctx);
Damjan Marionb2c31b62020-12-13 21:47:40 +0100623 }
Neale Rannsad422ed2016-11-02 14:20:04 +0000624}
625
626void
627vnet_mpls_tunnel_del (u32 sw_if_index)
628{
629 mpls_tunnel_t *mt;
630
631 mt = mpls_tunnel_get_from_sw_if_index(sw_if_index);
632
633 if (NULL == mt)
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800634 return;
Neale Rannsad422ed2016-11-02 14:20:04 +0000635
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800636 if (FIB_NODE_INDEX_INVALID != mt->mt_path_list)
637 fib_path_list_child_remove(mt->mt_path_list,
638 mt->mt_sibling_index);
Neale Rannsda78f952017-05-24 09:15:43 -0700639 dpo_reset(&mt->mt_l2_lb);
Neale Rannsad422ed2016-11-02 14:20:04 +0000640
Neale Ranns6fdcc3d2021-10-08 07:30:47 +0000641 vnet_reset_interface_l3_output_node (vlib_get_main (), mt->mt_sw_if_index);
Neale Rannsa9aedb62018-08-24 05:43:35 -0700642 vnet_delete_hw_interface (vnet_get_main(), mt->mt_hw_if_index);
643
Neale Rannsad422ed2016-11-02 14:20:04 +0000644 pool_put(mpls_tunnel_pool, mt);
645 mpls_tunnel_db[sw_if_index] = ~0;
646}
647
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800648u32
649vnet_mpls_tunnel_create (u8 l2_only,
IJsbrand Wijnands39ae0a02020-03-05 10:56:26 -0800650 u8 is_multicast,
651 u8 *tag)
Neale Rannsad422ed2016-11-02 14:20:04 +0000652{
653 vnet_hw_interface_t * hi;
654 mpls_tunnel_t *mt;
655 vnet_main_t * vnm;
656 u32 mti;
657
658 vnm = vnet_get_main();
659 pool_get(mpls_tunnel_pool, mt);
Dave Barachb7b92992018-10-17 10:38:51 -0400660 clib_memset (mt, 0, sizeof (*mt));
Neale Rannsad422ed2016-11-02 14:20:04 +0000661 mti = mt - mpls_tunnel_pool;
662 fib_node_init(&mt->mt_node, FIB_NODE_TYPE_MPLS_TUNNEL);
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800663 mt->mt_path_list = FIB_NODE_INDEX_INVALID;
664 mt->mt_sibling_index = FIB_NODE_INDEX_INVALID;
665
666 if (is_multicast)
667 mt->mt_flags |= MPLS_TUNNEL_FLAG_MCAST;
Neale Rannsda78f952017-05-24 09:15:43 -0700668 if (l2_only)
669 mt->mt_flags |= MPLS_TUNNEL_FLAG_L2;
IJsbrand Wijnands39ae0a02020-03-05 10:56:26 -0800670 if (tag)
671 memcpy(mt->mt_tag, tag, sizeof(mt->mt_tag));
672 else
673 mt->mt_tag[0] = '\0';
Neale Rannsad422ed2016-11-02 14:20:04 +0000674
675 /*
Neale Rannsa9aedb62018-08-24 05:43:35 -0700676 * Create a new tunnel HW interface
Neale Rannsad422ed2016-11-02 14:20:04 +0000677 */
Neale Rannsa9aedb62018-08-24 05:43:35 -0700678 mt->mt_hw_if_index = vnet_register_interface(
679 vnm,
680 mpls_tunnel_class.index,
681 mti,
682 mpls_tunnel_hw_interface_class.index,
683 mti);
684 hi = vnet_get_hw_interface (vnm, mt->mt_hw_if_index);
Neale Rannsad422ed2016-11-02 14:20:04 +0000685
Neale Ranns444e8002020-10-08 10:06:32 +0000686 if (mt->mt_flags & MPLS_TUNNEL_FLAG_L2)
687 vnet_set_interface_output_node (vnm, mt->mt_hw_if_index,
688 mpls_tunnel_tx.index);
Neale Ranns6fdcc3d2021-10-08 07:30:47 +0000689 else
690 vnet_set_interface_l3_output_node (vnm->vlib_main, hi->sw_if_index,
691 (u8 *) "tunnel-output");
Neale Ranns444e8002020-10-08 10:06:32 +0000692
Ole Troand7231612018-06-07 10:17:57 +0200693 /* Standard default MPLS tunnel MTU. */
694 vnet_sw_interface_set_mtu (vnm, hi->sw_if_index, 9000);
695
Neale Rannsad422ed2016-11-02 14:20:04 +0000696 /*
697 * Add the new tunnel to the tunnel DB - key:SW if index
698 */
699 mt->mt_sw_if_index = hi->sw_if_index;
700 vec_validate_init_empty(mpls_tunnel_db, mt->mt_sw_if_index, ~0);
701 mpls_tunnel_db[mt->mt_sw_if_index] = mti;
702
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800703 return (mt->mt_sw_if_index);
704}
705
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800706void
707vnet_mpls_tunnel_path_add (u32 sw_if_index,
708 fib_route_path_t *rpaths)
709{
Neale Ranns097fa662018-05-01 05:17:55 -0700710 fib_route_path_t *rpath;
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800711 mpls_tunnel_t *mt;
712 u32 mti;
713
714 mt = mpls_tunnel_get_from_sw_if_index(sw_if_index);
715
716 if (NULL == mt)
717 return;
718
719 mti = mt - mpls_tunnel_pool;
720
Neale Rannsad422ed2016-11-02 14:20:04 +0000721 /*
722 * construct a path-list from the path provided
723 */
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800724 if (FIB_NODE_INDEX_INVALID == mt->mt_path_list)
Neale Rannsad422ed2016-11-02 14:20:04 +0000725 {
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800726 mt->mt_path_list = fib_path_list_create(FIB_PATH_LIST_FLAG_SHARED, rpaths);
727 mt->mt_sibling_index = fib_path_list_child_add(mt->mt_path_list,
728 FIB_NODE_TYPE_MPLS_TUNNEL,
729 mti);
Neale Rannsad422ed2016-11-02 14:20:04 +0000730 }
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800731 else
732 {
733 fib_node_index_t old_pl_index;
Neale Rannsad422ed2016-11-02 14:20:04 +0000734
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800735 old_pl_index = mt->mt_path_list;
736
737 mt->mt_path_list =
738 fib_path_list_copy_and_path_add(old_pl_index,
739 FIB_PATH_LIST_FLAG_SHARED,
740 rpaths);
741
742 fib_path_list_child_remove(old_pl_index,
743 mt->mt_sibling_index);
744 mt->mt_sibling_index = fib_path_list_child_add(mt->mt_path_list,
745 FIB_NODE_TYPE_MPLS_TUNNEL,
746 mti);
747 /*
748 * re-resolve all the path-extensions with the new path-list
749 */
Neale Ranns81424992017-05-18 03:03:22 -0700750 fib_path_ext_list_resolve(&mt->mt_path_exts, mt->mt_path_list);
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800751 }
Neale Ranns097fa662018-05-01 05:17:55 -0700752 vec_foreach(rpath, rpaths)
753 {
754 fib_path_ext_list_insert(&mt->mt_path_exts,
755 mt->mt_path_list,
756 FIB_PATH_EXT_MPLS,
757 rpath);
758 }
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800759 mpls_tunnel_restack(mt);
Neale Rannsad422ed2016-11-02 14:20:04 +0000760}
761
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800762int
763vnet_mpls_tunnel_path_remove (u32 sw_if_index,
764 fib_route_path_t *rpaths)
765{
766 mpls_tunnel_t *mt;
767 u32 mti;
768
769 mt = mpls_tunnel_get_from_sw_if_index(sw_if_index);
770
771 if (NULL == mt)
772 return (0);
773
774 mti = mt - mpls_tunnel_pool;
775
776 /*
777 * construct a path-list from the path provided
778 */
779 if (FIB_NODE_INDEX_INVALID == mt->mt_path_list)
780 {
781 /* can't remove a path if we have onoe */
782 return (0);
783 }
784 else
785 {
786 fib_node_index_t old_pl_index;
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800787
788 old_pl_index = mt->mt_path_list;
789
Neale Ranns13b2ba22019-01-30 06:00:19 -0800790 fib_path_list_lock(old_pl_index);
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800791 mt->mt_path_list =
792 fib_path_list_copy_and_path_remove(old_pl_index,
793 FIB_PATH_LIST_FLAG_SHARED,
794 rpaths);
795
796 fib_path_list_child_remove(old_pl_index,
797 mt->mt_sibling_index);
798
799 if (FIB_NODE_INDEX_INVALID == mt->mt_path_list)
800 {
801 /* no paths left */
Neale Ranns13b2ba22019-01-30 06:00:19 -0800802 fib_path_list_unlock(old_pl_index);
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800803 return (0);
804 }
805 else
806 {
807 mt->mt_sibling_index =
808 fib_path_list_child_add(mt->mt_path_list,
809 FIB_NODE_TYPE_MPLS_TUNNEL,
810 mti);
811 }
812 /*
813 * find the matching path extension and remove it
814 */
Neale Ranns81424992017-05-18 03:03:22 -0700815 fib_path_ext_list_remove(&mt->mt_path_exts,
816 FIB_PATH_EXT_MPLS,
817 rpaths);
818
819 /*
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800820 * re-resolve all the path-extensions with the new path-list
821 */
Neale Ranns81424992017-05-18 03:03:22 -0700822 fib_path_ext_list_resolve(&mt->mt_path_exts,
823 mt->mt_path_list);
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800824
825 mpls_tunnel_restack(mt);
Neale Ranns13b2ba22019-01-30 06:00:19 -0800826 fib_path_list_unlock(old_pl_index);
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800827 }
828
829 return (fib_path_list_get_n_paths(mt->mt_path_list));
830}
831
Neale Rannsf5fa5ae2018-09-26 05:07:25 -0700832int
833vnet_mpls_tunnel_get_index (u32 sw_if_index)
834{
835 mpls_tunnel_t *mt;
836
837 mt = mpls_tunnel_get_from_sw_if_index(sw_if_index);
838
839 if (NULL == mt)
840 return (~0);
841
842 return (mt - mpls_tunnel_pool);
843}
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800844
Neale Rannsad422ed2016-11-02 14:20:04 +0000845static clib_error_t *
846vnet_create_mpls_tunnel_command_fn (vlib_main_t * vm,
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800847 unformat_input_t * input,
848 vlib_cli_command_t * cmd)
Neale Rannsad422ed2016-11-02 14:20:04 +0000849{
850 unformat_input_t _line_input, * line_input = &_line_input;
851 vnet_main_t * vnm = vnet_get_main();
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800852 u8 is_del = 0, l2_only = 0, is_multicast =0;
Neale Rannsad422ed2016-11-02 14:20:04 +0000853 fib_route_path_t rpath, *rpaths = NULL;
Neale Ranns70ed8ae2017-11-15 12:54:46 -0800854 u32 sw_if_index = ~0, payload_proto;
Billy McFalla9a20e72017-02-15 11:39:12 -0500855 clib_error_t *error = NULL;
Neale Rannsad422ed2016-11-02 14:20:04 +0000856
Dave Barachb7b92992018-10-17 10:38:51 -0400857 clib_memset(&rpath, 0, sizeof(rpath));
Neale Ranns70ed8ae2017-11-15 12:54:46 -0800858 payload_proto = DPO_PROTO_MPLS;
Neale Rannsad422ed2016-11-02 14:20:04 +0000859
860 /* Get a line of input. */
861 if (! unformat_user (input, unformat_line_input, line_input))
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800862 return 0;
Neale Rannsad422ed2016-11-02 14:20:04 +0000863
864 while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
865 {
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800866 if (unformat (line_input, "del %U",
867 unformat_vnet_sw_interface, vnm,
868 &sw_if_index))
869 is_del = 1;
Neale Rannsc13548a2017-05-24 10:53:43 -0700870 else if (unformat (line_input, "add %U",
871 unformat_vnet_sw_interface, vnm,
872 &sw_if_index))
873 is_del = 0;
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800874 else if (unformat (line_input, "add"))
875 is_del = 0;
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800876 else if (unformat (line_input, "l2-only"))
877 l2_only = 1;
878 else if (unformat (line_input, "multicast"))
879 is_multicast = 1;
Neale Ranns70ed8ae2017-11-15 12:54:46 -0800880 else if (unformat (line_input, "via %U",
881 unformat_fib_route_path,
882 &rpath, &payload_proto))
883 vec_add1(rpaths, rpath);
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800884 else
885 {
886 error = clib_error_return (0, "unknown input '%U'",
887 format_unformat_error, line_input);
888 goto done;
889 }
Neale Rannsad422ed2016-11-02 14:20:04 +0000890 }
891
892 if (is_del)
893 {
Neale Rannsa9aedb62018-08-24 05:43:35 -0700894 if (NULL == rpaths)
895 {
896 vnet_mpls_tunnel_del(sw_if_index);
897 }
898 else if (!vnet_mpls_tunnel_path_remove(sw_if_index, rpaths))
Neale Rannsc13548a2017-05-24 10:53:43 -0700899 {
900 vnet_mpls_tunnel_del(sw_if_index);
901 }
Neale Rannsad422ed2016-11-02 14:20:04 +0000902 }
903 else
904 {
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800905 if (0 == vec_len(rpath.frp_label_stack))
906 {
907 error = clib_error_return (0, "No Output Labels '%U'",
908 format_unformat_error, line_input);
909 goto done;
910 }
Neale Rannsad422ed2016-11-02 14:20:04 +0000911
Neale Rannsc13548a2017-05-24 10:53:43 -0700912 if (~0 == sw_if_index)
913 {
IJsbrand Wijnands39ae0a02020-03-05 10:56:26 -0800914 sw_if_index = vnet_mpls_tunnel_create(l2_only, is_multicast, NULL);
Neale Rannsc13548a2017-05-24 10:53:43 -0700915 }
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800916 vnet_mpls_tunnel_path_add(sw_if_index, rpaths);
Neale Rannsad422ed2016-11-02 14:20:04 +0000917 }
918
Billy McFalla9a20e72017-02-15 11:39:12 -0500919done:
Neale Rannsad422ed2016-11-02 14:20:04 +0000920 vec_free(rpaths);
Billy McFalla9a20e72017-02-15 11:39:12 -0500921 unformat_free (line_input);
Neale Rannsad422ed2016-11-02 14:20:04 +0000922
Billy McFalla9a20e72017-02-15 11:39:12 -0500923 return error;
Neale Rannsad422ed2016-11-02 14:20:04 +0000924}
925
926/*?
927 * This command create a uni-directional MPLS tunnel
928 *
929 * @cliexpar
930 * @cliexstart{create mpls tunnel}
931 * create mpls tunnel via 10.0.0.1 GigEthernet0/8/0 out-label 33 out-label 34
932 * @cliexend
933 ?*/
934VLIB_CLI_COMMAND (create_mpls_tunnel_command, static) = {
935 .path = "mpls tunnel",
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800936 .short_help =
Neale Ranns70ed8ae2017-11-15 12:54:46 -0800937 "mpls tunnel [multicast] [l2-only] via [next-hop-address] [next-hop-interface] [next-hop-table <value>] [weight <value>] [preference <value>] [udp-encap-id <value>] [ip4-lookup-in-table <value>] [ip6-lookup-in-table <value>] [mpls-lookup-in-table <value>] [resolve-via-host] [resolve-via-connected] [rx-ip4 <interface>] [out-labels <value value value>]",
Neale Rannsad422ed2016-11-02 14:20:04 +0000938 .function = vnet_create_mpls_tunnel_command_fn,
939};
940
941static u8 *
942format_mpls_tunnel (u8 * s, va_list * args)
943{
944 mpls_tunnel_t *mt = va_arg (*args, mpls_tunnel_t *);
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800945 mpls_tunnel_attribute_t attr;
Neale Rannsad422ed2016-11-02 14:20:04 +0000946
Neale Ranns514e59d2019-01-25 03:18:27 -0800947 s = format(s, "mpls-tunnel%d: sw_if_index:%d hw_if_index:%d",
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800948 mt - mpls_tunnel_pool,
949 mt->mt_sw_if_index,
950 mt->mt_hw_if_index);
951 if (MPLS_TUNNEL_FLAG_NONE != mt->mt_flags) {
952 s = format(s, " \n flags:");
953 FOR_EACH_MPLS_TUNNEL_ATTRIBUTE(attr) {
954 if ((1<<attr) & mt->mt_flags) {
955 s = format (s, "%s,", mpls_tunnel_attribute_names[attr]);
956 }
957 }
Neale Rannsad422ed2016-11-02 14:20:04 +0000958 }
959 s = format(s, "\n via:\n");
960 s = fib_path_list_format(mt->mt_path_list, s);
Neale Ranns81424992017-05-18 03:03:22 -0700961 s = format(s, "%U", format_fib_path_ext_list, &mt->mt_path_exts);
Neale Rannsad422ed2016-11-02 14:20:04 +0000962 s = format(s, "\n");
963
Neale Rannsda78f952017-05-24 09:15:43 -0700964 if (mt->mt_flags & MPLS_TUNNEL_FLAG_L2)
965 {
966 s = format(s, " forwarding: %U\n",
967 format_fib_forw_chain_type,
968 FIB_FORW_CHAIN_TYPE_ETHERNET);
969 s = format(s, " %U\n", format_dpo_id, &mt->mt_l2_lb, 2);
970 }
971
Neale Rannsad422ed2016-11-02 14:20:04 +0000972 return (s);
973}
974
975static clib_error_t *
976show_mpls_tunnel_command_fn (vlib_main_t * vm,
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800977 unformat_input_t * input,
978 vlib_cli_command_t * cmd)
Neale Rannsad422ed2016-11-02 14:20:04 +0000979{
980 mpls_tunnel_t * mt;
981 u32 mti = ~0;
982
983 if (pool_elts (mpls_tunnel_pool) == 0)
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800984 vlib_cli_output (vm, "No MPLS tunnels configured...");
Neale Rannsad422ed2016-11-02 14:20:04 +0000985
986 while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
987 {
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800988 if (unformat (input, "%d", &mti))
989 ;
990 else
991 break;
Neale Rannsad422ed2016-11-02 14:20:04 +0000992 }
993
994 if (~0 == mti)
995 {
Damjan Marionb2c31b62020-12-13 21:47:40 +0100996 pool_foreach (mt, mpls_tunnel_pool)
997 {
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800998 vlib_cli_output (vm, "[@%d] %U",
999 mt - mpls_tunnel_pool,
1000 format_mpls_tunnel, mt);
Damjan Marionb2c31b62020-12-13 21:47:40 +01001001 }
Neale Rannsad422ed2016-11-02 14:20:04 +00001002 }
1003 else
1004 {
Neale Ranns0f26c5a2017-03-01 15:12:11 -08001005 if (pool_is_free_index(mpls_tunnel_pool, mti))
Paul Vinciguerrabdc0e6b2018-09-22 05:32:50 -07001006 return clib_error_return (0, "Not a tunnel index %d", mti);
Neale Rannsad422ed2016-11-02 14:20:04 +00001007
Neale Ranns0f26c5a2017-03-01 15:12:11 -08001008 mt = pool_elt_at_index(mpls_tunnel_pool, mti);
Neale Rannsad422ed2016-11-02 14:20:04 +00001009
Neale Ranns0f26c5a2017-03-01 15:12:11 -08001010 vlib_cli_output (vm, "[@%d] %U",
1011 mt - mpls_tunnel_pool,
1012 format_mpls_tunnel, mt);
Neale Rannsad422ed2016-11-02 14:20:04 +00001013 }
1014
1015 return 0;
1016}
1017
1018/*?
1019 * This command to show MPLS tunnels
1020 *
1021 * @cliexpar
1022 * @cliexstart{sh mpls tunnel 2}
1023 * [@2] mpls_tunnel2: sw_if_index:5 hw_if_index:5
1024 * label-stack:
Neale Ranns0f26c5a2017-03-01 15:12:11 -08001025 * 3,
Neale Rannsad422ed2016-11-02 14:20:04 +00001026 * via:
1027 * index:26 locks:1 proto:ipv4 uPRF-list:26 len:1 itfs:[2, ]
1028 * index:26 pl-index:26 ipv4 weight=1 attached-nexthop: oper-flags:resolved,
1029 * 10.0.0.2 loop0
1030 * [@0]: ipv4 via 10.0.0.2 loop0: IP4: de:ad:00:00:00:00 -> 00:00:11:aa:bb:cc
1031 * @cliexend
1032 ?*/
1033VLIB_CLI_COMMAND (show_mpls_tunnel_command, static) = {
1034 .path = "show mpls tunnel",
1035 .function = show_mpls_tunnel_command_fn,
1036};
1037
1038static mpls_tunnel_t *
1039mpls_tunnel_from_fib_node (fib_node_t *node)
1040{
Neale Rannsad422ed2016-11-02 14:20:04 +00001041 ASSERT(FIB_NODE_TYPE_MPLS_TUNNEL == node->fn_type);
Neale Rannsad422ed2016-11-02 14:20:04 +00001042 return ((mpls_tunnel_t*) (((char*)node) -
1043 STRUCT_OFFSET_OF(mpls_tunnel_t, mt_node)));
1044}
1045
1046/**
1047 * Function definition to backwalk a FIB node
1048 */
1049static fib_node_back_walk_rc_t
1050mpls_tunnel_back_walk (fib_node_t *node,
Neale Ranns0f26c5a2017-03-01 15:12:11 -08001051 fib_node_back_walk_ctx_t *ctx)
Neale Rannsad422ed2016-11-02 14:20:04 +00001052{
1053 mpls_tunnel_restack(mpls_tunnel_from_fib_node(node));
1054
1055 return (FIB_NODE_BACK_WALK_CONTINUE);
1056}
1057
1058/**
1059 * Function definition to get a FIB node from its index
1060 */
1061static fib_node_t*
1062mpls_tunnel_fib_node_get (fib_node_index_t index)
1063{
1064 mpls_tunnel_t * mt;
1065
1066 mt = pool_elt_at_index(mpls_tunnel_pool, index);
1067
1068 return (&mt->mt_node);
1069}
1070
1071/**
1072 * Function definition to inform the FIB node that its last lock has gone.
1073 */
1074static void
1075mpls_tunnel_last_lock_gone (fib_node_t *node)
1076{
1077 /*
1078 * The MPLS MPLS tunnel is a root of the graph. As such
1079 * it never has children and thus is never locked.
1080 */
1081 ASSERT(0);
1082}
1083
1084/*
1085 * Virtual function table registered by MPLS MPLS tunnels
1086 * for participation in the FIB object graph.
1087 */
1088const static fib_node_vft_t mpls_vft = {
1089 .fnv_get = mpls_tunnel_fib_node_get,
1090 .fnv_last_lock = mpls_tunnel_last_lock_gone,
1091 .fnv_back_walk = mpls_tunnel_back_walk,
1092};
1093
1094static clib_error_t *
1095mpls_tunnel_init (vlib_main_t *vm)
1096{
1097 fib_node_register_type(FIB_NODE_TYPE_MPLS_TUNNEL, &mpls_vft);
1098
1099 return 0;
1100}
1101VLIB_INIT_FUNCTION(mpls_tunnel_init);