blob: 4d84b9001ce3ad8017cfce5bd05428e21622b796 [file] [log] [blame]
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001/*
2 * Copyright (c) 2016 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16#include <vnet/ip/ip.h>
17#include <vnet/dpo/mpls_label_dpo.h>
18#include <vnet/mpls/mpls.h>
19
20/*
21 * pool of all MPLS Label DPOs
22 */
23mpls_label_dpo_t *mpls_label_dpo_pool;
24
25static mpls_label_dpo_t *
26mpls_label_dpo_alloc (void)
27{
28 mpls_label_dpo_t *mld;
29
30 pool_get_aligned(mpls_label_dpo_pool, mld, CLIB_CACHE_LINE_BYTES);
31 memset(mld, 0, sizeof(*mld));
32
33 dpo_reset(&mld->mld_dpo);
34
35 return (mld);
36}
37
38static index_t
39mpls_label_dpo_get_index (mpls_label_dpo_t *mld)
40{
41 return (mld - mpls_label_dpo_pool);
42}
43
44index_t
Neale Rannsad422ed2016-11-02 14:20:04 +000045mpls_label_dpo_create (mpls_label_t *label_stack,
Neale Ranns0bfe5d82016-08-25 15:29:12 +010046 mpls_eos_bit_t eos,
47 u8 ttl,
48 u8 exp,
Neale Rannsad422ed2016-11-02 14:20:04 +000049 dpo_proto_t payload_proto,
Neale Ranns0bfe5d82016-08-25 15:29:12 +010050 const dpo_id_t *dpo)
51{
52 mpls_label_dpo_t *mld;
Neale Rannsad422ed2016-11-02 14:20:04 +000053 u32 ii;
Neale Ranns0bfe5d82016-08-25 15:29:12 +010054
55 mld = mpls_label_dpo_alloc();
Neale Rannsad422ed2016-11-02 14:20:04 +000056 mld->mld_n_labels = vec_len(label_stack);
Neale Ranns9ca18c62016-12-10 21:08:09 +000057 mld->mld_n_hdr_bytes = mld->mld_n_labels * sizeof(mld->mld_hdr[0]);
Neale Rannsad422ed2016-11-02 14:20:04 +000058 mld->mld_payload_proto = payload_proto;
Neale Ranns0bfe5d82016-08-25 15:29:12 +010059
60 /*
Neale Rannsad422ed2016-11-02 14:20:04 +000061 * construct label rewrite headers for each value value passed.
Neale Ranns0bfe5d82016-08-25 15:29:12 +010062 * get the header in network byte order since we will paint it
63 * on a packet in the data-plane
64 */
Neale Ranns0bfe5d82016-08-25 15:29:12 +010065
Neale Rannsad422ed2016-11-02 14:20:04 +000066 for (ii = 0; ii < mld->mld_n_labels-1; ii++)
67 {
68 vnet_mpls_uc_set_label(&mld->mld_hdr[ii].label_exp_s_ttl, label_stack[ii]);
69 vnet_mpls_uc_set_ttl(&mld->mld_hdr[ii].label_exp_s_ttl, 255);
70 vnet_mpls_uc_set_exp(&mld->mld_hdr[ii].label_exp_s_ttl, 0);
71 vnet_mpls_uc_set_s(&mld->mld_hdr[ii].label_exp_s_ttl, MPLS_NON_EOS);
72 mld->mld_hdr[ii].label_exp_s_ttl =
73 clib_host_to_net_u32(mld->mld_hdr[ii].label_exp_s_ttl);
74 }
75
76 /*
77 * the inner most label
78 */
79 ii = mld->mld_n_labels-1;
80
81 vnet_mpls_uc_set_label(&mld->mld_hdr[ii].label_exp_s_ttl, label_stack[ii]);
82 vnet_mpls_uc_set_ttl(&mld->mld_hdr[ii].label_exp_s_ttl, ttl);
83 vnet_mpls_uc_set_exp(&mld->mld_hdr[ii].label_exp_s_ttl, exp);
84 vnet_mpls_uc_set_s(&mld->mld_hdr[ii].label_exp_s_ttl, eos);
85 mld->mld_hdr[ii].label_exp_s_ttl =
86 clib_host_to_net_u32(mld->mld_hdr[ii].label_exp_s_ttl);
87
88 /*
89 * stack this label objct on its parent.
90 */
91 dpo_stack(DPO_MPLS_LABEL,
92 mld->mld_payload_proto,
93 &mld->mld_dpo,
94 dpo);
Neale Ranns0bfe5d82016-08-25 15:29:12 +010095
96 return (mpls_label_dpo_get_index(mld));
97}
98
99u8*
100format_mpls_label_dpo (u8 *s, va_list *args)
101{
102 index_t index = va_arg (*args, index_t);
103 u32 indent = va_arg (*args, u32);
104 mpls_unicast_header_t hdr;
105 mpls_label_dpo_t *mld;
Neale Rannsad422ed2016-11-02 14:20:04 +0000106 u32 ii;
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100107
108 mld = mpls_label_dpo_get(index);
109
Neale Ranns8fe8cc22016-11-01 10:05:08 +0000110 s = format(s, "mpls-label:[%d]:", index);
Neale Rannsad422ed2016-11-02 14:20:04 +0000111
112 for (ii = 0; ii < mld->mld_n_labels; ii++)
113 {
114 hdr.label_exp_s_ttl =
115 clib_net_to_host_u32(mld->mld_hdr[ii].label_exp_s_ttl);
116 s = format(s, "%U", format_mpls_header, hdr);
117 }
118
119 s = format(s, "\n%U", format_white_space, indent);
Neale Ranns8fe8cc22016-11-01 10:05:08 +0000120 s = format(s, "%U", format_dpo_id, &mld->mld_dpo, indent+2);
121
122 return (s);
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100123}
124
125static void
126mpls_label_dpo_lock (dpo_id_t *dpo)
127{
128 mpls_label_dpo_t *mld;
129
130 mld = mpls_label_dpo_get(dpo->dpoi_index);
131
132 mld->mld_locks++;
133}
134
135static void
136mpls_label_dpo_unlock (dpo_id_t *dpo)
137{
138 mpls_label_dpo_t *mld;
139
140 mld = mpls_label_dpo_get(dpo->dpoi_index);
141
142 mld->mld_locks--;
143
144 if (0 == mld->mld_locks)
145 {
146 dpo_reset(&mld->mld_dpo);
147 pool_put(mpls_label_dpo_pool, mld);
148 }
149}
150
151/**
152 * @brief A struct to hold tracing information for the MPLS label imposition
153 * node.
154 */
155typedef struct mpls_label_imposition_trace_t_
156{
157 /**
158 * The MPLS header imposed
159 */
160 mpls_unicast_header_t hdr;
161} mpls_label_imposition_trace_t;
162
Neale Ranns696e88d2017-03-16 07:34:55 -0400163always_inline mpls_unicast_header_t *
164mpls_label_paint (vlib_buffer_t * b0,
165 mpls_label_dpo_t *mld0,
166 u8 ttl0)
167{
168 mpls_unicast_header_t *hdr0;
169
170 vlib_buffer_advance(b0, -(mld0->mld_n_hdr_bytes));
171
172 hdr0 = vlib_buffer_get_current(b0);
173
174 if (PREDICT_TRUE(1 == mld0->mld_n_labels))
175 {
176 /* optimise for the common case of one label */
177 *hdr0 = mld0->mld_hdr[0];
178 }
179 else
180 {
181 clib_memcpy(hdr0, mld0->mld_hdr, mld0->mld_n_hdr_bytes);
182 hdr0 = hdr0 + (mld0->mld_n_labels - 1);
183 }
184 /* fixup the TTL for the inner most label */
185 ((char*)hdr0)[3] = ttl0;
186
187 return (hdr0);
188}
189
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100190always_inline uword
Neale Rannsad422ed2016-11-02 14:20:04 +0000191mpls_label_imposition_inline (vlib_main_t * vm,
192 vlib_node_runtime_t * node,
193 vlib_frame_t * from_frame,
194 u8 payload_is_ip4,
195 u8 payload_is_ip6)
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100196{
197 u32 n_left_from, next_index, * from, * to_next;
198
199 from = vlib_frame_vector_args (from_frame);
200 n_left_from = from_frame->n_vectors;
201
202 next_index = node->cached_next_index;
203
204 while (n_left_from > 0)
205 {
206 u32 n_left_to_next;
207
208 vlib_get_next_frame(vm, node, next_index, to_next, n_left_to_next);
209
Neale Ranns696e88d2017-03-16 07:34:55 -0400210 while (n_left_from >= 8 && n_left_to_next >= 4)
Neale Ranns9ca18c62016-12-10 21:08:09 +0000211 {
Neale Ranns696e88d2017-03-16 07:34:55 -0400212 u32 bi0, mldi0, bi1, mldi1, bi2, mldi2, bi3, mldi3;
213 mpls_unicast_header_t *hdr0, *hdr1, *hdr2, *hdr3;
214 mpls_label_dpo_t *mld0, *mld1, *mld2, *mld3;
215 vlib_buffer_t * b0, *b1, * b2, *b3;
216 u32 next0, next1, next2, next3;
217 u8 ttl0, ttl1,ttl2, ttl3 ;
Neale Ranns9ca18c62016-12-10 21:08:09 +0000218
219 bi0 = to_next[0] = from[0];
220 bi1 = to_next[1] = from[1];
Neale Ranns696e88d2017-03-16 07:34:55 -0400221 bi2 = to_next[2] = from[2];
222 bi3 = to_next[3] = from[3];
Neale Ranns9ca18c62016-12-10 21:08:09 +0000223
224 /* Prefetch next iteration. */
225 {
Neale Ranns696e88d2017-03-16 07:34:55 -0400226 vlib_buffer_t * p2, * p3, *p4, *p5;
Neale Ranns9ca18c62016-12-10 21:08:09 +0000227
228 p2 = vlib_get_buffer (vm, from[2]);
229 p3 = vlib_get_buffer (vm, from[3]);
Neale Ranns696e88d2017-03-16 07:34:55 -0400230 p4 = vlib_get_buffer (vm, from[4]);
231 p5 = vlib_get_buffer (vm, from[5]);
Neale Ranns9ca18c62016-12-10 21:08:09 +0000232
233 vlib_prefetch_buffer_header (p2, STORE);
234 vlib_prefetch_buffer_header (p3, STORE);
Neale Ranns696e88d2017-03-16 07:34:55 -0400235 vlib_prefetch_buffer_header (p4, STORE);
236 vlib_prefetch_buffer_header (p5, STORE);
Neale Ranns9ca18c62016-12-10 21:08:09 +0000237
238 CLIB_PREFETCH (p2->data, sizeof (hdr0[0]), STORE);
239 CLIB_PREFETCH (p3->data, sizeof (hdr0[0]), STORE);
Neale Ranns696e88d2017-03-16 07:34:55 -0400240 CLIB_PREFETCH (p4->data, sizeof (hdr0[0]), STORE);
241 CLIB_PREFETCH (p5->data, sizeof (hdr0[0]), STORE);
Neale Ranns9ca18c62016-12-10 21:08:09 +0000242 }
243
Neale Ranns696e88d2017-03-16 07:34:55 -0400244 from += 4;
245 to_next += 4;
246 n_left_from -= 4;
247 n_left_to_next -= 4;
Neale Ranns9ca18c62016-12-10 21:08:09 +0000248
249 b0 = vlib_get_buffer (vm, bi0);
250 b1 = vlib_get_buffer (vm, bi1);
Neale Ranns696e88d2017-03-16 07:34:55 -0400251 b2 = vlib_get_buffer (vm, bi2);
252 b3 = vlib_get_buffer (vm, bi3);
Neale Ranns9ca18c62016-12-10 21:08:09 +0000253
254 /* dst lookup was done by ip4 lookup */
255 mldi0 = vnet_buffer(b0)->ip.adj_index[VLIB_TX];
256 mldi1 = vnet_buffer(b1)->ip.adj_index[VLIB_TX];
Neale Ranns696e88d2017-03-16 07:34:55 -0400257 mldi2 = vnet_buffer(b2)->ip.adj_index[VLIB_TX];
258 mldi3 = vnet_buffer(b3)->ip.adj_index[VLIB_TX];
Neale Ranns9ca18c62016-12-10 21:08:09 +0000259 mld0 = mpls_label_dpo_get(mldi0);
260 mld1 = mpls_label_dpo_get(mldi1);
Neale Ranns696e88d2017-03-16 07:34:55 -0400261 mld2 = mpls_label_dpo_get(mldi2);
262 mld3 = mpls_label_dpo_get(mldi3);
Neale Ranns9ca18c62016-12-10 21:08:09 +0000263
264 if (payload_is_ip4)
265 {
266 /*
267 * decrement the TTL on ingress to the LSP
268 */
269 ip4_header_t * ip0 = vlib_buffer_get_current(b0);
270 ip4_header_t * ip1 = vlib_buffer_get_current(b1);
Neale Ranns696e88d2017-03-16 07:34:55 -0400271 ip4_header_t * ip2 = vlib_buffer_get_current(b2);
272 ip4_header_t * ip3 = vlib_buffer_get_current(b3);
Neale Ranns9ca18c62016-12-10 21:08:09 +0000273 u32 checksum0;
274 u32 checksum1;
Neale Ranns696e88d2017-03-16 07:34:55 -0400275 u32 checksum2;
276 u32 checksum3;
Neale Ranns9ca18c62016-12-10 21:08:09 +0000277
278 checksum0 = ip0->checksum + clib_host_to_net_u16 (0x0100);
279 checksum1 = ip1->checksum + clib_host_to_net_u16 (0x0100);
Neale Ranns696e88d2017-03-16 07:34:55 -0400280 checksum2 = ip2->checksum + clib_host_to_net_u16 (0x0100);
281 checksum3 = ip3->checksum + clib_host_to_net_u16 (0x0100);
Neale Ranns9ca18c62016-12-10 21:08:09 +0000282
283 checksum0 += checksum0 >= 0xffff;
284 checksum1 += checksum1 >= 0xffff;
Neale Ranns696e88d2017-03-16 07:34:55 -0400285 checksum2 += checksum2 >= 0xffff;
286 checksum3 += checksum3 >= 0xffff;
Neale Ranns9ca18c62016-12-10 21:08:09 +0000287
288 ip0->checksum = checksum0;
289 ip1->checksum = checksum1;
Neale Ranns696e88d2017-03-16 07:34:55 -0400290 ip2->checksum = checksum2;
291 ip3->checksum = checksum3;
Neale Ranns9ca18c62016-12-10 21:08:09 +0000292
293 ip0->ttl -= 1;
294 ip1->ttl -= 1;
Neale Ranns696e88d2017-03-16 07:34:55 -0400295 ip2->ttl -= 1;
296 ip3->ttl -= 1;
Neale Ranns9ca18c62016-12-10 21:08:09 +0000297
298 ttl1 = ip1->ttl;
299 ttl0 = ip0->ttl;
Neale Ranns696e88d2017-03-16 07:34:55 -0400300 ttl3 = ip3->ttl;
301 ttl2 = ip2->ttl;
Neale Ranns9ca18c62016-12-10 21:08:09 +0000302 }
303 else if (payload_is_ip6)
304 {
305 /*
306 * decrement the TTL on ingress to the LSP
307 */
308 ip6_header_t * ip0 = vlib_buffer_get_current(b0);
309 ip6_header_t * ip1 = vlib_buffer_get_current(b1);
Neale Ranns696e88d2017-03-16 07:34:55 -0400310 ip6_header_t * ip2 = vlib_buffer_get_current(b2);
311 ip6_header_t * ip3 = vlib_buffer_get_current(b3);
Neale Ranns9ca18c62016-12-10 21:08:09 +0000312
313 ip0->hop_limit -= 1;
314 ip1->hop_limit -= 1;
Neale Ranns696e88d2017-03-16 07:34:55 -0400315 ip2->hop_limit -= 1;
316 ip3->hop_limit -= 1;
Neale Ranns9ca18c62016-12-10 21:08:09 +0000317
318 ttl0 = ip0->hop_limit;
319 ttl1 = ip1->hop_limit;
Neale Ranns696e88d2017-03-16 07:34:55 -0400320 ttl2 = ip2->hop_limit;
321 ttl3 = ip3->hop_limit;
Neale Ranns9ca18c62016-12-10 21:08:09 +0000322 }
323 else
324 {
325 /*
326 * else, the packet to be encapped is an MPLS packet
327 */
328 if (PREDICT_TRUE(vnet_buffer(b0)->mpls.first))
329 {
330 /*
331 * The first label to be imposed on the packet. this is a label swap.
332 * in which case we stashed the TTL and EXP bits in the
333 * packet in the lookup node
334 */
335 ASSERT(0 != vnet_buffer (b0)->mpls.ttl);
336
337 ttl0 = vnet_buffer(b0)->mpls.ttl - 1;
338 }
339 else
340 {
341 /*
342 * not the first label. implying we are recusring down a chain of
343 * output labels.
344 * Each layer is considered a new LSP - hence the TTL is reset.
345 */
346 ttl0 = 255;
347 }
348 if (PREDICT_TRUE(vnet_buffer(b1)->mpls.first))
349 {
350 ASSERT(1 != vnet_buffer (b1)->mpls.ttl);
351 ttl1 = vnet_buffer(b1)->mpls.ttl - 1;
352 }
353 else
354 {
355 ttl1 = 255;
356 }
Neale Ranns696e88d2017-03-16 07:34:55 -0400357 if (PREDICT_TRUE(vnet_buffer(b2)->mpls.first))
358 {
359 ASSERT(2 != vnet_buffer (b2)->mpls.ttl);
360
361 ttl2 = vnet_buffer(b2)->mpls.ttl - 1;
362 }
363 else
364 {
365 ttl2 = 255;
366 }
367 if (PREDICT_TRUE(vnet_buffer(b3)->mpls.first))
368 {
369 ASSERT(1 != vnet_buffer (b3)->mpls.ttl);
370 ttl3 = vnet_buffer(b3)->mpls.ttl - 1;
371 }
372 else
373 {
374 ttl3 = 255;
375 }
Neale Ranns9ca18c62016-12-10 21:08:09 +0000376 }
377 vnet_buffer(b0)->mpls.first = 0;
378 vnet_buffer(b1)->mpls.first = 0;
Neale Ranns696e88d2017-03-16 07:34:55 -0400379 vnet_buffer(b2)->mpls.first = 0;
380 vnet_buffer(b3)->mpls.first = 0;
Neale Ranns9ca18c62016-12-10 21:08:09 +0000381
382 /* Paint the MPLS header */
Neale Ranns696e88d2017-03-16 07:34:55 -0400383 hdr0 = mpls_label_paint(b0, mld0, ttl0);
384 hdr1 = mpls_label_paint(b1, mld1, ttl1);
385 hdr2 = mpls_label_paint(b2, mld2, ttl2);
386 hdr3 = mpls_label_paint(b3, mld3, ttl3);
Neale Ranns9ca18c62016-12-10 21:08:09 +0000387
388 next0 = mld0->mld_dpo.dpoi_next_node;
389 next1 = mld1->mld_dpo.dpoi_next_node;
Neale Ranns696e88d2017-03-16 07:34:55 -0400390 next2 = mld2->mld_dpo.dpoi_next_node;
391 next3 = mld3->mld_dpo.dpoi_next_node;
Neale Ranns9ca18c62016-12-10 21:08:09 +0000392 vnet_buffer(b0)->ip.adj_index[VLIB_TX] = mld0->mld_dpo.dpoi_index;
393 vnet_buffer(b1)->ip.adj_index[VLIB_TX] = mld1->mld_dpo.dpoi_index;
Neale Ranns696e88d2017-03-16 07:34:55 -0400394 vnet_buffer(b2)->ip.adj_index[VLIB_TX] = mld2->mld_dpo.dpoi_index;
395 vnet_buffer(b3)->ip.adj_index[VLIB_TX] = mld3->mld_dpo.dpoi_index;
Neale Ranns9ca18c62016-12-10 21:08:09 +0000396
397 if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
398 {
399 mpls_label_imposition_trace_t *tr =
400 vlib_add_trace (vm, node, b0, sizeof (*tr));
401 tr->hdr = *hdr0;
402 }
403 if (PREDICT_FALSE(b1->flags & VLIB_BUFFER_IS_TRACED))
404 {
405 mpls_label_imposition_trace_t *tr =
406 vlib_add_trace (vm, node, b1, sizeof (*tr));
407 tr->hdr = *hdr1;
408 }
Neale Ranns696e88d2017-03-16 07:34:55 -0400409 if (PREDICT_FALSE(b2->flags & VLIB_BUFFER_IS_TRACED))
410 {
411 mpls_label_imposition_trace_t *tr =
412 vlib_add_trace (vm, node, b2, sizeof (*tr));
413 tr->hdr = *hdr2;
414 }
415 if (PREDICT_FALSE(b3->flags & VLIB_BUFFER_IS_TRACED))
416 {
417 mpls_label_imposition_trace_t *tr =
418 vlib_add_trace (vm, node, b3, sizeof (*tr));
419 tr->hdr = *hdr3;
420 }
Neale Ranns9ca18c62016-12-10 21:08:09 +0000421
Neale Ranns696e88d2017-03-16 07:34:55 -0400422 vlib_validate_buffer_enqueue_x4(vm, node, next_index, to_next,
Neale Ranns9ca18c62016-12-10 21:08:09 +0000423 n_left_to_next,
Neale Ranns696e88d2017-03-16 07:34:55 -0400424 bi0, bi1, bi2, bi3,
425 next0, next1, next2, next3);
Neale Ranns9ca18c62016-12-10 21:08:09 +0000426 }
427
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100428 while (n_left_from > 0 && n_left_to_next > 0)
429 {
430 mpls_unicast_header_t *hdr0;
431 mpls_label_dpo_t *mld0;
432 vlib_buffer_t * b0;
433 u32 bi0, mldi0;
434 u32 next0;
Neale Rannsad422ed2016-11-02 14:20:04 +0000435 u8 ttl;
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100436
437 bi0 = from[0];
438 to_next[0] = bi0;
439 from += 1;
440 to_next += 1;
441 n_left_from -= 1;
442 n_left_to_next -= 1;
443
444 b0 = vlib_get_buffer (vm, bi0);
445
446 /* dst lookup was done by ip4 lookup */
447 mldi0 = vnet_buffer(b0)->ip.adj_index[VLIB_TX];
448 mld0 = mpls_label_dpo_get(mldi0);
449
Neale Rannsad422ed2016-11-02 14:20:04 +0000450 if (payload_is_ip4)
451 {
452 /*
453 * decrement the TTL on ingress to the LSP
454 */
455 ip4_header_t * ip0 = vlib_buffer_get_current(b0);
456 u32 checksum0;
457
458 checksum0 = ip0->checksum + clib_host_to_net_u16 (0x0100);
459 checksum0 += checksum0 >= 0xffff;
460
461 ip0->checksum = checksum0;
462 ip0->ttl -= 1;
463 ttl = ip0->ttl;
464 }
465 else if (payload_is_ip6)
466 {
467 /*
468 * decrement the TTL on ingress to the LSP
469 */
470 ip6_header_t * ip0 = vlib_buffer_get_current(b0);
471
472 ip0->hop_limit -= 1;
473 ttl = ip0->hop_limit;
474 }
475 else
476 {
477 /*
478 * else, the packet to be encapped is an MPLS packet
479 */
480 if (vnet_buffer(b0)->mpls.first)
481 {
482 /*
483 * The first label to be imposed on the packet. this is a label swap.
484 * in which case we stashed the TTL and EXP bits in the
485 * packet in the lookup node
486 */
487 ASSERT(0 != vnet_buffer (b0)->mpls.ttl);
488
489 ttl = vnet_buffer(b0)->mpls.ttl - 1;
490 }
491 else
492 {
493 /*
494 * not the first label. implying we are recusring down a chain of
495 * output labels.
496 * Each layer is considered a new LSP - hence the TTL is reset.
497 */
498 ttl = 255;
499 }
500 }
501 vnet_buffer(b0)->mpls.first = 0;
502
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100503 /* Paint the MPLS header */
Neale Ranns9ca18c62016-12-10 21:08:09 +0000504 vlib_buffer_advance(b0, -(mld0->mld_n_hdr_bytes));
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100505 hdr0 = vlib_buffer_get_current(b0);
Neale Ranns9ca18c62016-12-10 21:08:09 +0000506 clib_memcpy(hdr0, mld0->mld_hdr, mld0->mld_n_hdr_bytes);
Neale Rannsad422ed2016-11-02 14:20:04 +0000507
508 /* fixup the TTL for the inner most label */
509 hdr0 = hdr0 + (mld0->mld_n_labels - 1);
510 ((char*)hdr0)[3] = ttl;
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100511
512 next0 = mld0->mld_dpo.dpoi_next_node;
513 vnet_buffer(b0)->ip.adj_index[VLIB_TX] = mld0->mld_dpo.dpoi_index;
514
Neale Ranns9ca18c62016-12-10 21:08:09 +0000515 if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100516 {
517 mpls_label_imposition_trace_t *tr =
518 vlib_add_trace (vm, node, b0, sizeof (*tr));
519 tr->hdr = *hdr0;
520 }
521
522 vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next,
523 n_left_to_next, bi0, next0);
524 }
525 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
526 }
527 return from_frame->n_vectors;
528}
529
530static u8 *
531format_mpls_label_imposition_trace (u8 * s, va_list * args)
532{
533 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
534 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
535 mpls_label_imposition_trace_t * t;
536 mpls_unicast_header_t hdr;
537 uword indent;
538
539 t = va_arg (*args, mpls_label_imposition_trace_t *);
540 indent = format_get_indent (s);
541 hdr.label_exp_s_ttl = clib_net_to_host_u32(t->hdr.label_exp_s_ttl);
542
543 s = format (s, "%Umpls-header:%U",
544 format_white_space, indent,
545 format_mpls_header, hdr);
546 return (s);
547}
548
Neale Rannsad422ed2016-11-02 14:20:04 +0000549static uword
550mpls_label_imposition (vlib_main_t * vm,
551 vlib_node_runtime_t * node,
552 vlib_frame_t * frame)
553{
554 return (mpls_label_imposition_inline(vm, node, frame, 0, 0));
555}
556
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100557VLIB_REGISTER_NODE (mpls_label_imposition_node) = {
558 .function = mpls_label_imposition,
559 .name = "mpls-label-imposition",
560 .vector_size = sizeof (u32),
561
562 .format_trace = format_mpls_label_imposition_trace,
563 .n_next_nodes = 1,
564 .next_nodes = {
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800565 [0] = "mpls-drop",
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100566 }
567};
Neale Rannsad422ed2016-11-02 14:20:04 +0000568VLIB_NODE_FUNCTION_MULTIARCH (mpls_label_imposition_node,
569 mpls_label_imposition)
570
571static uword
572ip4_mpls_label_imposition (vlib_main_t * vm,
573 vlib_node_runtime_t * node,
574 vlib_frame_t * frame)
575{
576 return (mpls_label_imposition_inline(vm, node, frame, 1, 0));
577}
578
579VLIB_REGISTER_NODE (ip4_mpls_label_imposition_node) = {
580 .function = ip4_mpls_label_imposition,
581 .name = "ip4-mpls-label-imposition",
582 .vector_size = sizeof (u32),
583
584 .format_trace = format_mpls_label_imposition_trace,
585 .n_next_nodes = 1,
586 .next_nodes = {
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800587 [0] = "ip4-drop",
Neale Rannsad422ed2016-11-02 14:20:04 +0000588 }
589};
590VLIB_NODE_FUNCTION_MULTIARCH (ip4_mpls_label_imposition_node,
591 ip4_mpls_label_imposition)
592
593static uword
594ip6_mpls_label_imposition (vlib_main_t * vm,
595 vlib_node_runtime_t * node,
596 vlib_frame_t * frame)
597{
598 return (mpls_label_imposition_inline(vm, node, frame, 0, 1));
599}
600
601VLIB_REGISTER_NODE (ip6_mpls_label_imposition_node) = {
602 .function = ip6_mpls_label_imposition,
603 .name = "ip6-mpls-label-imposition",
604 .vector_size = sizeof (u32),
605
606 .format_trace = format_mpls_label_imposition_trace,
607 .n_next_nodes = 1,
608 .next_nodes = {
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800609 [0] = "ip6-drop",
Neale Rannsad422ed2016-11-02 14:20:04 +0000610 }
611};
612VLIB_NODE_FUNCTION_MULTIARCH (ip6_mpls_label_imposition_node,
613 ip6_mpls_label_imposition)
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100614
Neale Ranns6c3ebcc2016-10-02 21:20:15 +0100615static void
616mpls_label_dpo_mem_show (void)
617{
618 fib_show_memory_usage("MPLS label",
619 pool_elts(mpls_label_dpo_pool),
620 pool_len(mpls_label_dpo_pool),
621 sizeof(mpls_label_dpo_t));
622}
623
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100624const static dpo_vft_t mld_vft = {
625 .dv_lock = mpls_label_dpo_lock,
626 .dv_unlock = mpls_label_dpo_unlock,
627 .dv_format = format_mpls_label_dpo,
Neale Ranns6c3ebcc2016-10-02 21:20:15 +0100628 .dv_mem_show = mpls_label_dpo_mem_show,
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100629};
630
631const static char* const mpls_label_imp_ip4_nodes[] =
632{
Neale Rannsad422ed2016-11-02 14:20:04 +0000633 "ip4-mpls-label-imposition",
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100634 NULL,
635};
636const static char* const mpls_label_imp_ip6_nodes[] =
637{
Neale Rannsad422ed2016-11-02 14:20:04 +0000638 "ip6-mpls-label-imposition",
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100639 NULL,
640};
641const static char* const mpls_label_imp_mpls_nodes[] =
642{
643 "mpls-label-imposition",
644 NULL,
645};
646const static char* const * const mpls_label_imp_nodes[DPO_PROTO_NUM] =
647{
648 [DPO_PROTO_IP4] = mpls_label_imp_ip4_nodes,
649 [DPO_PROTO_IP6] = mpls_label_imp_ip6_nodes,
650 [DPO_PROTO_MPLS] = mpls_label_imp_mpls_nodes,
651};
652
653
654void
655mpls_label_dpo_module_init (void)
656{
657 dpo_register(DPO_MPLS_LABEL, &mld_vft, mpls_label_imp_nodes);
658}