blob: b178a902e32e1dbe6feb6327c64f5f182e807e6d [file] [log] [blame]
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001/*
2 * Copyright (c) 2016 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16#include <vnet/ip/ip.h>
17#include <vnet/dpo/mpls_label_dpo.h>
18#include <vnet/mpls/mpls.h>
19
20/*
21 * pool of all MPLS Label DPOs
22 */
23mpls_label_dpo_t *mpls_label_dpo_pool;
24
25static mpls_label_dpo_t *
26mpls_label_dpo_alloc (void)
27{
28 mpls_label_dpo_t *mld;
29
30 pool_get_aligned(mpls_label_dpo_pool, mld, CLIB_CACHE_LINE_BYTES);
31 memset(mld, 0, sizeof(*mld));
32
33 dpo_reset(&mld->mld_dpo);
34
35 return (mld);
36}
37
38static index_t
39mpls_label_dpo_get_index (mpls_label_dpo_t *mld)
40{
41 return (mld - mpls_label_dpo_pool);
42}
43
44index_t
Neale Rannsad422ed2016-11-02 14:20:04 +000045mpls_label_dpo_create (mpls_label_t *label_stack,
Neale Ranns0bfe5d82016-08-25 15:29:12 +010046 mpls_eos_bit_t eos,
47 u8 ttl,
48 u8 exp,
Neale Rannsad422ed2016-11-02 14:20:04 +000049 dpo_proto_t payload_proto,
Neale Ranns0bfe5d82016-08-25 15:29:12 +010050 const dpo_id_t *dpo)
51{
52 mpls_label_dpo_t *mld;
Neale Rannsad422ed2016-11-02 14:20:04 +000053 u32 ii;
Neale Ranns0bfe5d82016-08-25 15:29:12 +010054
55 mld = mpls_label_dpo_alloc();
Neale Rannsad422ed2016-11-02 14:20:04 +000056 mld->mld_n_labels = vec_len(label_stack);
Neale Ranns9ca18c62016-12-10 21:08:09 +000057 mld->mld_n_hdr_bytes = mld->mld_n_labels * sizeof(mld->mld_hdr[0]);
Neale Rannsad422ed2016-11-02 14:20:04 +000058 mld->mld_payload_proto = payload_proto;
Neale Ranns0bfe5d82016-08-25 15:29:12 +010059
60 /*
Neale Rannsad422ed2016-11-02 14:20:04 +000061 * construct label rewrite headers for each value value passed.
Neale Ranns0bfe5d82016-08-25 15:29:12 +010062 * get the header in network byte order since we will paint it
63 * on a packet in the data-plane
64 */
Neale Ranns0bfe5d82016-08-25 15:29:12 +010065
Neale Rannsad422ed2016-11-02 14:20:04 +000066 for (ii = 0; ii < mld->mld_n_labels-1; ii++)
67 {
68 vnet_mpls_uc_set_label(&mld->mld_hdr[ii].label_exp_s_ttl, label_stack[ii]);
69 vnet_mpls_uc_set_ttl(&mld->mld_hdr[ii].label_exp_s_ttl, 255);
70 vnet_mpls_uc_set_exp(&mld->mld_hdr[ii].label_exp_s_ttl, 0);
71 vnet_mpls_uc_set_s(&mld->mld_hdr[ii].label_exp_s_ttl, MPLS_NON_EOS);
72 mld->mld_hdr[ii].label_exp_s_ttl =
73 clib_host_to_net_u32(mld->mld_hdr[ii].label_exp_s_ttl);
74 }
75
76 /*
77 * the inner most label
78 */
79 ii = mld->mld_n_labels-1;
80
81 vnet_mpls_uc_set_label(&mld->mld_hdr[ii].label_exp_s_ttl, label_stack[ii]);
82 vnet_mpls_uc_set_ttl(&mld->mld_hdr[ii].label_exp_s_ttl, ttl);
83 vnet_mpls_uc_set_exp(&mld->mld_hdr[ii].label_exp_s_ttl, exp);
84 vnet_mpls_uc_set_s(&mld->mld_hdr[ii].label_exp_s_ttl, eos);
85 mld->mld_hdr[ii].label_exp_s_ttl =
86 clib_host_to_net_u32(mld->mld_hdr[ii].label_exp_s_ttl);
87
88 /*
89 * stack this label objct on its parent.
90 */
91 dpo_stack(DPO_MPLS_LABEL,
92 mld->mld_payload_proto,
93 &mld->mld_dpo,
94 dpo);
Neale Ranns0bfe5d82016-08-25 15:29:12 +010095
96 return (mpls_label_dpo_get_index(mld));
97}
98
99u8*
100format_mpls_label_dpo (u8 *s, va_list *args)
101{
102 index_t index = va_arg (*args, index_t);
103 u32 indent = va_arg (*args, u32);
104 mpls_unicast_header_t hdr;
105 mpls_label_dpo_t *mld;
Neale Rannsad422ed2016-11-02 14:20:04 +0000106 u32 ii;
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100107
108 mld = mpls_label_dpo_get(index);
109
Neale Ranns8fe8cc22016-11-01 10:05:08 +0000110 s = format(s, "mpls-label:[%d]:", index);
Neale Rannsad422ed2016-11-02 14:20:04 +0000111
112 for (ii = 0; ii < mld->mld_n_labels; ii++)
113 {
114 hdr.label_exp_s_ttl =
115 clib_net_to_host_u32(mld->mld_hdr[ii].label_exp_s_ttl);
116 s = format(s, "%U", format_mpls_header, hdr);
117 }
118
119 s = format(s, "\n%U", format_white_space, indent);
Neale Ranns8fe8cc22016-11-01 10:05:08 +0000120 s = format(s, "%U", format_dpo_id, &mld->mld_dpo, indent+2);
121
122 return (s);
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100123}
124
125static void
126mpls_label_dpo_lock (dpo_id_t *dpo)
127{
128 mpls_label_dpo_t *mld;
129
130 mld = mpls_label_dpo_get(dpo->dpoi_index);
131
132 mld->mld_locks++;
133}
134
135static void
136mpls_label_dpo_unlock (dpo_id_t *dpo)
137{
138 mpls_label_dpo_t *mld;
139
140 mld = mpls_label_dpo_get(dpo->dpoi_index);
141
142 mld->mld_locks--;
143
144 if (0 == mld->mld_locks)
145 {
146 dpo_reset(&mld->mld_dpo);
147 pool_put(mpls_label_dpo_pool, mld);
148 }
149}
150
151/**
152 * @brief A struct to hold tracing information for the MPLS label imposition
153 * node.
154 */
155typedef struct mpls_label_imposition_trace_t_
156{
157 /**
158 * The MPLS header imposed
159 */
160 mpls_unicast_header_t hdr;
161} mpls_label_imposition_trace_t;
162
Neale Ranns696e88d2017-03-16 07:34:55 -0400163always_inline mpls_unicast_header_t *
164mpls_label_paint (vlib_buffer_t * b0,
165 mpls_label_dpo_t *mld0,
166 u8 ttl0)
167{
168 mpls_unicast_header_t *hdr0;
169
170 vlib_buffer_advance(b0, -(mld0->mld_n_hdr_bytes));
171
172 hdr0 = vlib_buffer_get_current(b0);
173
Pablo Camarillo5d73eec2017-04-24 17:51:56 +0200174 if (1 == mld0->mld_n_labels)
Neale Ranns696e88d2017-03-16 07:34:55 -0400175 {
176 /* optimise for the common case of one label */
177 *hdr0 = mld0->mld_hdr[0];
178 }
179 else
180 {
181 clib_memcpy(hdr0, mld0->mld_hdr, mld0->mld_n_hdr_bytes);
182 hdr0 = hdr0 + (mld0->mld_n_labels - 1);
183 }
184 /* fixup the TTL for the inner most label */
185 ((char*)hdr0)[3] = ttl0;
186
187 return (hdr0);
188}
189
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100190always_inline uword
Neale Rannsad422ed2016-11-02 14:20:04 +0000191mpls_label_imposition_inline (vlib_main_t * vm,
192 vlib_node_runtime_t * node,
193 vlib_frame_t * from_frame,
194 u8 payload_is_ip4,
Neale Rannsda78f952017-05-24 09:15:43 -0700195 u8 payload_is_ip6,
196 u8 payload_is_ethernet)
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100197{
198 u32 n_left_from, next_index, * from, * to_next;
199
200 from = vlib_frame_vector_args (from_frame);
201 n_left_from = from_frame->n_vectors;
202
203 next_index = node->cached_next_index;
204
205 while (n_left_from > 0)
206 {
207 u32 n_left_to_next;
208
209 vlib_get_next_frame(vm, node, next_index, to_next, n_left_to_next);
210
Neale Ranns696e88d2017-03-16 07:34:55 -0400211 while (n_left_from >= 8 && n_left_to_next >= 4)
Neale Ranns9ca18c62016-12-10 21:08:09 +0000212 {
Neale Ranns696e88d2017-03-16 07:34:55 -0400213 u32 bi0, mldi0, bi1, mldi1, bi2, mldi2, bi3, mldi3;
214 mpls_unicast_header_t *hdr0, *hdr1, *hdr2, *hdr3;
215 mpls_label_dpo_t *mld0, *mld1, *mld2, *mld3;
216 vlib_buffer_t * b0, *b1, * b2, *b3;
217 u32 next0, next1, next2, next3;
218 u8 ttl0, ttl1,ttl2, ttl3 ;
Neale Ranns9ca18c62016-12-10 21:08:09 +0000219
220 bi0 = to_next[0] = from[0];
221 bi1 = to_next[1] = from[1];
Neale Ranns696e88d2017-03-16 07:34:55 -0400222 bi2 = to_next[2] = from[2];
223 bi3 = to_next[3] = from[3];
Neale Ranns9ca18c62016-12-10 21:08:09 +0000224
225 /* Prefetch next iteration. */
226 {
Neale Ranns696e88d2017-03-16 07:34:55 -0400227 vlib_buffer_t * p2, * p3, *p4, *p5;
Neale Ranns9ca18c62016-12-10 21:08:09 +0000228
229 p2 = vlib_get_buffer (vm, from[2]);
230 p3 = vlib_get_buffer (vm, from[3]);
Neale Ranns696e88d2017-03-16 07:34:55 -0400231 p4 = vlib_get_buffer (vm, from[4]);
232 p5 = vlib_get_buffer (vm, from[5]);
Neale Ranns9ca18c62016-12-10 21:08:09 +0000233
234 vlib_prefetch_buffer_header (p2, STORE);
235 vlib_prefetch_buffer_header (p3, STORE);
Neale Ranns696e88d2017-03-16 07:34:55 -0400236 vlib_prefetch_buffer_header (p4, STORE);
237 vlib_prefetch_buffer_header (p5, STORE);
Neale Ranns9ca18c62016-12-10 21:08:09 +0000238
239 CLIB_PREFETCH (p2->data, sizeof (hdr0[0]), STORE);
240 CLIB_PREFETCH (p3->data, sizeof (hdr0[0]), STORE);
Neale Ranns696e88d2017-03-16 07:34:55 -0400241 CLIB_PREFETCH (p4->data, sizeof (hdr0[0]), STORE);
242 CLIB_PREFETCH (p5->data, sizeof (hdr0[0]), STORE);
Neale Ranns9ca18c62016-12-10 21:08:09 +0000243 }
244
Neale Ranns696e88d2017-03-16 07:34:55 -0400245 from += 4;
246 to_next += 4;
247 n_left_from -= 4;
248 n_left_to_next -= 4;
Neale Ranns9ca18c62016-12-10 21:08:09 +0000249
250 b0 = vlib_get_buffer (vm, bi0);
251 b1 = vlib_get_buffer (vm, bi1);
Neale Ranns696e88d2017-03-16 07:34:55 -0400252 b2 = vlib_get_buffer (vm, bi2);
253 b3 = vlib_get_buffer (vm, bi3);
Neale Ranns9ca18c62016-12-10 21:08:09 +0000254
255 /* dst lookup was done by ip4 lookup */
256 mldi0 = vnet_buffer(b0)->ip.adj_index[VLIB_TX];
257 mldi1 = vnet_buffer(b1)->ip.adj_index[VLIB_TX];
Neale Ranns696e88d2017-03-16 07:34:55 -0400258 mldi2 = vnet_buffer(b2)->ip.adj_index[VLIB_TX];
259 mldi3 = vnet_buffer(b3)->ip.adj_index[VLIB_TX];
Neale Ranns9ca18c62016-12-10 21:08:09 +0000260 mld0 = mpls_label_dpo_get(mldi0);
261 mld1 = mpls_label_dpo_get(mldi1);
Neale Ranns696e88d2017-03-16 07:34:55 -0400262 mld2 = mpls_label_dpo_get(mldi2);
263 mld3 = mpls_label_dpo_get(mldi3);
Neale Ranns9ca18c62016-12-10 21:08:09 +0000264
265 if (payload_is_ip4)
266 {
267 /*
268 * decrement the TTL on ingress to the LSP
269 */
270 ip4_header_t * ip0 = vlib_buffer_get_current(b0);
271 ip4_header_t * ip1 = vlib_buffer_get_current(b1);
Neale Ranns696e88d2017-03-16 07:34:55 -0400272 ip4_header_t * ip2 = vlib_buffer_get_current(b2);
273 ip4_header_t * ip3 = vlib_buffer_get_current(b3);
Neale Ranns9ca18c62016-12-10 21:08:09 +0000274 u32 checksum0;
275 u32 checksum1;
Neale Ranns696e88d2017-03-16 07:34:55 -0400276 u32 checksum2;
277 u32 checksum3;
Neale Ranns9ca18c62016-12-10 21:08:09 +0000278
279 checksum0 = ip0->checksum + clib_host_to_net_u16 (0x0100);
280 checksum1 = ip1->checksum + clib_host_to_net_u16 (0x0100);
Neale Ranns696e88d2017-03-16 07:34:55 -0400281 checksum2 = ip2->checksum + clib_host_to_net_u16 (0x0100);
282 checksum3 = ip3->checksum + clib_host_to_net_u16 (0x0100);
Neale Ranns9ca18c62016-12-10 21:08:09 +0000283
284 checksum0 += checksum0 >= 0xffff;
285 checksum1 += checksum1 >= 0xffff;
Neale Ranns696e88d2017-03-16 07:34:55 -0400286 checksum2 += checksum2 >= 0xffff;
287 checksum3 += checksum3 >= 0xffff;
Neale Ranns9ca18c62016-12-10 21:08:09 +0000288
289 ip0->checksum = checksum0;
290 ip1->checksum = checksum1;
Neale Ranns696e88d2017-03-16 07:34:55 -0400291 ip2->checksum = checksum2;
292 ip3->checksum = checksum3;
Neale Ranns9ca18c62016-12-10 21:08:09 +0000293
294 ip0->ttl -= 1;
295 ip1->ttl -= 1;
Neale Ranns696e88d2017-03-16 07:34:55 -0400296 ip2->ttl -= 1;
297 ip3->ttl -= 1;
Neale Ranns9ca18c62016-12-10 21:08:09 +0000298
299 ttl1 = ip1->ttl;
300 ttl0 = ip0->ttl;
Neale Ranns696e88d2017-03-16 07:34:55 -0400301 ttl3 = ip3->ttl;
302 ttl2 = ip2->ttl;
Neale Ranns9ca18c62016-12-10 21:08:09 +0000303 }
304 else if (payload_is_ip6)
305 {
306 /*
307 * decrement the TTL on ingress to the LSP
308 */
309 ip6_header_t * ip0 = vlib_buffer_get_current(b0);
310 ip6_header_t * ip1 = vlib_buffer_get_current(b1);
Neale Ranns696e88d2017-03-16 07:34:55 -0400311 ip6_header_t * ip2 = vlib_buffer_get_current(b2);
312 ip6_header_t * ip3 = vlib_buffer_get_current(b3);
Neale Ranns9ca18c62016-12-10 21:08:09 +0000313
314 ip0->hop_limit -= 1;
315 ip1->hop_limit -= 1;
Neale Ranns696e88d2017-03-16 07:34:55 -0400316 ip2->hop_limit -= 1;
317 ip3->hop_limit -= 1;
Neale Ranns9ca18c62016-12-10 21:08:09 +0000318
319 ttl0 = ip0->hop_limit;
320 ttl1 = ip1->hop_limit;
Neale Ranns696e88d2017-03-16 07:34:55 -0400321 ttl2 = ip2->hop_limit;
322 ttl3 = ip3->hop_limit;
Neale Ranns9ca18c62016-12-10 21:08:09 +0000323 }
Neale Rannsda78f952017-05-24 09:15:43 -0700324 else if (payload_is_ethernet)
325 {
326 /*
327 * nothing to chang ein the ethernet header
328 */
329 ttl0 = ttl1 = ttl2 = ttl3 = 255;
330 }
Neale Ranns9ca18c62016-12-10 21:08:09 +0000331 else
332 {
333 /*
334 * else, the packet to be encapped is an MPLS packet
335 */
336 if (PREDICT_TRUE(vnet_buffer(b0)->mpls.first))
337 {
338 /*
339 * The first label to be imposed on the packet. this is a label swap.
340 * in which case we stashed the TTL and EXP bits in the
341 * packet in the lookup node
342 */
343 ASSERT(0 != vnet_buffer (b0)->mpls.ttl);
344
345 ttl0 = vnet_buffer(b0)->mpls.ttl - 1;
346 }
347 else
348 {
349 /*
350 * not the first label. implying we are recusring down a chain of
351 * output labels.
352 * Each layer is considered a new LSP - hence the TTL is reset.
353 */
354 ttl0 = 255;
355 }
356 if (PREDICT_TRUE(vnet_buffer(b1)->mpls.first))
357 {
358 ASSERT(1 != vnet_buffer (b1)->mpls.ttl);
359 ttl1 = vnet_buffer(b1)->mpls.ttl - 1;
360 }
361 else
362 {
363 ttl1 = 255;
364 }
Neale Ranns696e88d2017-03-16 07:34:55 -0400365 if (PREDICT_TRUE(vnet_buffer(b2)->mpls.first))
366 {
Neale Ranns71275e32017-05-25 12:38:58 -0700367 ASSERT(1 != vnet_buffer (b2)->mpls.ttl);
Neale Ranns696e88d2017-03-16 07:34:55 -0400368
369 ttl2 = vnet_buffer(b2)->mpls.ttl - 1;
370 }
371 else
372 {
373 ttl2 = 255;
374 }
375 if (PREDICT_TRUE(vnet_buffer(b3)->mpls.first))
376 {
377 ASSERT(1 != vnet_buffer (b3)->mpls.ttl);
378 ttl3 = vnet_buffer(b3)->mpls.ttl - 1;
379 }
380 else
381 {
382 ttl3 = 255;
383 }
Neale Ranns9ca18c62016-12-10 21:08:09 +0000384 }
385 vnet_buffer(b0)->mpls.first = 0;
386 vnet_buffer(b1)->mpls.first = 0;
Neale Ranns696e88d2017-03-16 07:34:55 -0400387 vnet_buffer(b2)->mpls.first = 0;
388 vnet_buffer(b3)->mpls.first = 0;
Neale Ranns9ca18c62016-12-10 21:08:09 +0000389
390 /* Paint the MPLS header */
Neale Ranns696e88d2017-03-16 07:34:55 -0400391 hdr0 = mpls_label_paint(b0, mld0, ttl0);
392 hdr1 = mpls_label_paint(b1, mld1, ttl1);
393 hdr2 = mpls_label_paint(b2, mld2, ttl2);
394 hdr3 = mpls_label_paint(b3, mld3, ttl3);
Neale Ranns9ca18c62016-12-10 21:08:09 +0000395
396 next0 = mld0->mld_dpo.dpoi_next_node;
397 next1 = mld1->mld_dpo.dpoi_next_node;
Neale Ranns696e88d2017-03-16 07:34:55 -0400398 next2 = mld2->mld_dpo.dpoi_next_node;
399 next3 = mld3->mld_dpo.dpoi_next_node;
Neale Ranns9ca18c62016-12-10 21:08:09 +0000400 vnet_buffer(b0)->ip.adj_index[VLIB_TX] = mld0->mld_dpo.dpoi_index;
401 vnet_buffer(b1)->ip.adj_index[VLIB_TX] = mld1->mld_dpo.dpoi_index;
Neale Ranns696e88d2017-03-16 07:34:55 -0400402 vnet_buffer(b2)->ip.adj_index[VLIB_TX] = mld2->mld_dpo.dpoi_index;
403 vnet_buffer(b3)->ip.adj_index[VLIB_TX] = mld3->mld_dpo.dpoi_index;
Neale Ranns9ca18c62016-12-10 21:08:09 +0000404
405 if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
406 {
407 mpls_label_imposition_trace_t *tr =
408 vlib_add_trace (vm, node, b0, sizeof (*tr));
409 tr->hdr = *hdr0;
410 }
411 if (PREDICT_FALSE(b1->flags & VLIB_BUFFER_IS_TRACED))
412 {
413 mpls_label_imposition_trace_t *tr =
414 vlib_add_trace (vm, node, b1, sizeof (*tr));
415 tr->hdr = *hdr1;
416 }
Neale Ranns696e88d2017-03-16 07:34:55 -0400417 if (PREDICT_FALSE(b2->flags & VLIB_BUFFER_IS_TRACED))
418 {
419 mpls_label_imposition_trace_t *tr =
420 vlib_add_trace (vm, node, b2, sizeof (*tr));
421 tr->hdr = *hdr2;
422 }
423 if (PREDICT_FALSE(b3->flags & VLIB_BUFFER_IS_TRACED))
424 {
425 mpls_label_imposition_trace_t *tr =
426 vlib_add_trace (vm, node, b3, sizeof (*tr));
427 tr->hdr = *hdr3;
428 }
Neale Ranns9ca18c62016-12-10 21:08:09 +0000429
Neale Ranns696e88d2017-03-16 07:34:55 -0400430 vlib_validate_buffer_enqueue_x4(vm, node, next_index, to_next,
Neale Ranns9ca18c62016-12-10 21:08:09 +0000431 n_left_to_next,
Neale Ranns696e88d2017-03-16 07:34:55 -0400432 bi0, bi1, bi2, bi3,
433 next0, next1, next2, next3);
Neale Ranns9ca18c62016-12-10 21:08:09 +0000434 }
435
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100436 while (n_left_from > 0 && n_left_to_next > 0)
437 {
438 mpls_unicast_header_t *hdr0;
439 mpls_label_dpo_t *mld0;
440 vlib_buffer_t * b0;
441 u32 bi0, mldi0;
442 u32 next0;
Neale Rannsad422ed2016-11-02 14:20:04 +0000443 u8 ttl;
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100444
445 bi0 = from[0];
446 to_next[0] = bi0;
447 from += 1;
448 to_next += 1;
449 n_left_from -= 1;
450 n_left_to_next -= 1;
451
452 b0 = vlib_get_buffer (vm, bi0);
453
454 /* dst lookup was done by ip4 lookup */
455 mldi0 = vnet_buffer(b0)->ip.adj_index[VLIB_TX];
456 mld0 = mpls_label_dpo_get(mldi0);
457
Neale Rannsad422ed2016-11-02 14:20:04 +0000458 if (payload_is_ip4)
459 {
460 /*
461 * decrement the TTL on ingress to the LSP
462 */
463 ip4_header_t * ip0 = vlib_buffer_get_current(b0);
464 u32 checksum0;
465
466 checksum0 = ip0->checksum + clib_host_to_net_u16 (0x0100);
467 checksum0 += checksum0 >= 0xffff;
468
469 ip0->checksum = checksum0;
470 ip0->ttl -= 1;
471 ttl = ip0->ttl;
472 }
473 else if (payload_is_ip6)
474 {
475 /*
476 * decrement the TTL on ingress to the LSP
477 */
478 ip6_header_t * ip0 = vlib_buffer_get_current(b0);
479
480 ip0->hop_limit -= 1;
481 ttl = ip0->hop_limit;
482 }
483 else
484 {
485 /*
486 * else, the packet to be encapped is an MPLS packet
487 */
488 if (vnet_buffer(b0)->mpls.first)
489 {
490 /*
491 * The first label to be imposed on the packet. this is a label swap.
492 * in which case we stashed the TTL and EXP bits in the
493 * packet in the lookup node
494 */
495 ASSERT(0 != vnet_buffer (b0)->mpls.ttl);
496
497 ttl = vnet_buffer(b0)->mpls.ttl - 1;
498 }
499 else
500 {
501 /*
502 * not the first label. implying we are recusring down a chain of
503 * output labels.
504 * Each layer is considered a new LSP - hence the TTL is reset.
505 */
506 ttl = 255;
507 }
508 }
509 vnet_buffer(b0)->mpls.first = 0;
510
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100511 /* Paint the MPLS header */
Neale Ranns9ca18c62016-12-10 21:08:09 +0000512 vlib_buffer_advance(b0, -(mld0->mld_n_hdr_bytes));
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100513 hdr0 = vlib_buffer_get_current(b0);
Neale Ranns9ca18c62016-12-10 21:08:09 +0000514 clib_memcpy(hdr0, mld0->mld_hdr, mld0->mld_n_hdr_bytes);
Neale Rannsad422ed2016-11-02 14:20:04 +0000515
516 /* fixup the TTL for the inner most label */
517 hdr0 = hdr0 + (mld0->mld_n_labels - 1);
518 ((char*)hdr0)[3] = ttl;
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100519
520 next0 = mld0->mld_dpo.dpoi_next_node;
521 vnet_buffer(b0)->ip.adj_index[VLIB_TX] = mld0->mld_dpo.dpoi_index;
522
Neale Ranns9ca18c62016-12-10 21:08:09 +0000523 if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100524 {
525 mpls_label_imposition_trace_t *tr =
526 vlib_add_trace (vm, node, b0, sizeof (*tr));
527 tr->hdr = *hdr0;
528 }
529
530 vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next,
531 n_left_to_next, bi0, next0);
532 }
533 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
534 }
535 return from_frame->n_vectors;
536}
537
538static u8 *
539format_mpls_label_imposition_trace (u8 * s, va_list * args)
540{
541 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
542 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
543 mpls_label_imposition_trace_t * t;
544 mpls_unicast_header_t hdr;
545 uword indent;
546
547 t = va_arg (*args, mpls_label_imposition_trace_t *);
548 indent = format_get_indent (s);
549 hdr.label_exp_s_ttl = clib_net_to_host_u32(t->hdr.label_exp_s_ttl);
550
551 s = format (s, "%Umpls-header:%U",
552 format_white_space, indent,
553 format_mpls_header, hdr);
554 return (s);
555}
556
Neale Rannsad422ed2016-11-02 14:20:04 +0000557static uword
558mpls_label_imposition (vlib_main_t * vm,
559 vlib_node_runtime_t * node,
560 vlib_frame_t * frame)
561{
Neale Rannsda78f952017-05-24 09:15:43 -0700562 return (mpls_label_imposition_inline(vm, node, frame, 0, 0, 0));
Neale Rannsad422ed2016-11-02 14:20:04 +0000563}
564
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100565VLIB_REGISTER_NODE (mpls_label_imposition_node) = {
566 .function = mpls_label_imposition,
567 .name = "mpls-label-imposition",
568 .vector_size = sizeof (u32),
569
570 .format_trace = format_mpls_label_imposition_trace,
571 .n_next_nodes = 1,
572 .next_nodes = {
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800573 [0] = "mpls-drop",
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100574 }
575};
Neale Rannsad422ed2016-11-02 14:20:04 +0000576VLIB_NODE_FUNCTION_MULTIARCH (mpls_label_imposition_node,
577 mpls_label_imposition)
578
579static uword
580ip4_mpls_label_imposition (vlib_main_t * vm,
581 vlib_node_runtime_t * node,
582 vlib_frame_t * frame)
583{
Neale Rannsda78f952017-05-24 09:15:43 -0700584 return (mpls_label_imposition_inline(vm, node, frame, 1, 0, 0));
Neale Rannsad422ed2016-11-02 14:20:04 +0000585}
586
587VLIB_REGISTER_NODE (ip4_mpls_label_imposition_node) = {
588 .function = ip4_mpls_label_imposition,
589 .name = "ip4-mpls-label-imposition",
590 .vector_size = sizeof (u32),
591
592 .format_trace = format_mpls_label_imposition_trace,
593 .n_next_nodes = 1,
594 .next_nodes = {
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800595 [0] = "ip4-drop",
Neale Rannsad422ed2016-11-02 14:20:04 +0000596 }
597};
598VLIB_NODE_FUNCTION_MULTIARCH (ip4_mpls_label_imposition_node,
599 ip4_mpls_label_imposition)
600
601static uword
602ip6_mpls_label_imposition (vlib_main_t * vm,
603 vlib_node_runtime_t * node,
604 vlib_frame_t * frame)
605{
Neale Rannsda78f952017-05-24 09:15:43 -0700606 return (mpls_label_imposition_inline(vm, node, frame, 0, 1, 0));
Neale Rannsad422ed2016-11-02 14:20:04 +0000607}
608
609VLIB_REGISTER_NODE (ip6_mpls_label_imposition_node) = {
610 .function = ip6_mpls_label_imposition,
611 .name = "ip6-mpls-label-imposition",
612 .vector_size = sizeof (u32),
613
614 .format_trace = format_mpls_label_imposition_trace,
615 .n_next_nodes = 1,
616 .next_nodes = {
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800617 [0] = "ip6-drop",
Neale Rannsad422ed2016-11-02 14:20:04 +0000618 }
619};
620VLIB_NODE_FUNCTION_MULTIARCH (ip6_mpls_label_imposition_node,
621 ip6_mpls_label_imposition)
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100622
Neale Rannsda78f952017-05-24 09:15:43 -0700623static uword
624ethernet_mpls_label_imposition (vlib_main_t * vm,
625 vlib_node_runtime_t * node,
626 vlib_frame_t * frame)
627{
628 return (mpls_label_imposition_inline(vm, node, frame, 0, 0, 1));
629}
630
631VLIB_REGISTER_NODE (ethernet_mpls_label_imposition_node) = {
632 .function = ethernet_mpls_label_imposition,
633 .name = "ethernet-mpls-label-imposition",
634 .vector_size = sizeof (u32),
635
636 .format_trace = format_mpls_label_imposition_trace,
637 .n_next_nodes = 1,
638 .next_nodes = {
639 [0] = "error-drop",
640 }
641};
642VLIB_NODE_FUNCTION_MULTIARCH (ethernet_mpls_label_imposition_node,
643 ethernet_mpls_label_imposition)
644
Neale Ranns6c3ebcc2016-10-02 21:20:15 +0100645static void
646mpls_label_dpo_mem_show (void)
647{
648 fib_show_memory_usage("MPLS label",
649 pool_elts(mpls_label_dpo_pool),
650 pool_len(mpls_label_dpo_pool),
651 sizeof(mpls_label_dpo_t));
652}
653
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100654const static dpo_vft_t mld_vft = {
655 .dv_lock = mpls_label_dpo_lock,
656 .dv_unlock = mpls_label_dpo_unlock,
657 .dv_format = format_mpls_label_dpo,
Neale Ranns6c3ebcc2016-10-02 21:20:15 +0100658 .dv_mem_show = mpls_label_dpo_mem_show,
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100659};
660
661const static char* const mpls_label_imp_ip4_nodes[] =
662{
Neale Rannsad422ed2016-11-02 14:20:04 +0000663 "ip4-mpls-label-imposition",
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100664 NULL,
665};
666const static char* const mpls_label_imp_ip6_nodes[] =
667{
Neale Rannsad422ed2016-11-02 14:20:04 +0000668 "ip6-mpls-label-imposition",
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100669 NULL,
670};
671const static char* const mpls_label_imp_mpls_nodes[] =
672{
673 "mpls-label-imposition",
674 NULL,
675};
Neale Rannsda78f952017-05-24 09:15:43 -0700676const static char* const mpls_label_imp_ethernet_nodes[] =
677{
678 "ethernet-mpls-label-imposition",
679 NULL,
680};
681
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100682const static char* const * const mpls_label_imp_nodes[DPO_PROTO_NUM] =
683{
684 [DPO_PROTO_IP4] = mpls_label_imp_ip4_nodes,
685 [DPO_PROTO_IP6] = mpls_label_imp_ip6_nodes,
686 [DPO_PROTO_MPLS] = mpls_label_imp_mpls_nodes,
Neale Rannsda78f952017-05-24 09:15:43 -0700687 [DPO_PROTO_ETHERNET] = mpls_label_imp_ethernet_nodes,
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100688};
689
690
691void
692mpls_label_dpo_module_init (void)
693{
694 dpo_register(DPO_MPLS_LABEL, &mld_vft, mpls_label_imp_nodes);
695}