blob: fa5177ab9ea1109f1d7127a2e6e59bee714a2e59 [file] [log] [blame]
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001/*
2 * Copyright (c) 2016 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16#include <vnet/ip/ip.h>
17#include <vnet/dpo/mpls_label_dpo.h>
18#include <vnet/mpls/mpls.h>
Neale Rannsf363ebd2017-12-06 00:45:33 -080019#include <vnet/dpo/drop_dpo.h>
Neale Ranns0bfe5d82016-08-25 15:29:12 +010020
21/*
22 * pool of all MPLS Label DPOs
23 */
24mpls_label_dpo_t *mpls_label_dpo_pool;
25
26static mpls_label_dpo_t *
27mpls_label_dpo_alloc (void)
28{
29 mpls_label_dpo_t *mld;
30
31 pool_get_aligned(mpls_label_dpo_pool, mld, CLIB_CACHE_LINE_BYTES);
32 memset(mld, 0, sizeof(*mld));
33
34 dpo_reset(&mld->mld_dpo);
35
36 return (mld);
37}
38
39static index_t
40mpls_label_dpo_get_index (mpls_label_dpo_t *mld)
41{
42 return (mld - mpls_label_dpo_pool);
43}
44
45index_t
Neale Rannsad422ed2016-11-02 14:20:04 +000046mpls_label_dpo_create (mpls_label_t *label_stack,
Neale Ranns0bfe5d82016-08-25 15:29:12 +010047 mpls_eos_bit_t eos,
48 u8 ttl,
49 u8 exp,
Neale Rannsad422ed2016-11-02 14:20:04 +000050 dpo_proto_t payload_proto,
Neale Ranns0bfe5d82016-08-25 15:29:12 +010051 const dpo_id_t *dpo)
52{
53 mpls_label_dpo_t *mld;
Neale Rannsad422ed2016-11-02 14:20:04 +000054 u32 ii;
Neale Ranns0bfe5d82016-08-25 15:29:12 +010055
56 mld = mpls_label_dpo_alloc();
Neale Rannsf363ebd2017-12-06 00:45:33 -080057
58 if (MPLS_LABEL_DPO_MAX_N_LABELS < vec_len(label_stack))
59 {
60 clib_warning("Label stack size exceeded");
61 dpo_stack(DPO_MPLS_LABEL,
62 mld->mld_payload_proto,
63 &mld->mld_dpo,
64 drop_dpo_get(DPO_PROTO_MPLS));
65 return (mpls_label_dpo_get_index(mld));
66 }
67
Neale Rannsad422ed2016-11-02 14:20:04 +000068 mld->mld_n_labels = vec_len(label_stack);
Neale Ranns9ca18c62016-12-10 21:08:09 +000069 mld->mld_n_hdr_bytes = mld->mld_n_labels * sizeof(mld->mld_hdr[0]);
Neale Rannsad422ed2016-11-02 14:20:04 +000070 mld->mld_payload_proto = payload_proto;
Neale Ranns0bfe5d82016-08-25 15:29:12 +010071
72 /*
Neale Rannsad422ed2016-11-02 14:20:04 +000073 * construct label rewrite headers for each value value passed.
Neale Ranns0bfe5d82016-08-25 15:29:12 +010074 * get the header in network byte order since we will paint it
75 * on a packet in the data-plane
76 */
Neale Ranns0bfe5d82016-08-25 15:29:12 +010077
Neale Rannsad422ed2016-11-02 14:20:04 +000078 for (ii = 0; ii < mld->mld_n_labels-1; ii++)
79 {
80 vnet_mpls_uc_set_label(&mld->mld_hdr[ii].label_exp_s_ttl, label_stack[ii]);
81 vnet_mpls_uc_set_ttl(&mld->mld_hdr[ii].label_exp_s_ttl, 255);
82 vnet_mpls_uc_set_exp(&mld->mld_hdr[ii].label_exp_s_ttl, 0);
83 vnet_mpls_uc_set_s(&mld->mld_hdr[ii].label_exp_s_ttl, MPLS_NON_EOS);
84 mld->mld_hdr[ii].label_exp_s_ttl =
85 clib_host_to_net_u32(mld->mld_hdr[ii].label_exp_s_ttl);
86 }
87
88 /*
89 * the inner most label
90 */
91 ii = mld->mld_n_labels-1;
92
93 vnet_mpls_uc_set_label(&mld->mld_hdr[ii].label_exp_s_ttl, label_stack[ii]);
94 vnet_mpls_uc_set_ttl(&mld->mld_hdr[ii].label_exp_s_ttl, ttl);
95 vnet_mpls_uc_set_exp(&mld->mld_hdr[ii].label_exp_s_ttl, exp);
96 vnet_mpls_uc_set_s(&mld->mld_hdr[ii].label_exp_s_ttl, eos);
97 mld->mld_hdr[ii].label_exp_s_ttl =
98 clib_host_to_net_u32(mld->mld_hdr[ii].label_exp_s_ttl);
99
100 /*
101 * stack this label objct on its parent.
102 */
103 dpo_stack(DPO_MPLS_LABEL,
104 mld->mld_payload_proto,
105 &mld->mld_dpo,
106 dpo);
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100107
108 return (mpls_label_dpo_get_index(mld));
109}
110
111u8*
112format_mpls_label_dpo (u8 *s, va_list *args)
113{
114 index_t index = va_arg (*args, index_t);
115 u32 indent = va_arg (*args, u32);
116 mpls_unicast_header_t hdr;
117 mpls_label_dpo_t *mld;
Neale Rannsad422ed2016-11-02 14:20:04 +0000118 u32 ii;
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100119
Neale Ranns8fe8cc22016-11-01 10:05:08 +0000120 s = format(s, "mpls-label:[%d]:", index);
Neale Rannsad422ed2016-11-02 14:20:04 +0000121
Neale Ranns15002542017-09-10 04:39:11 -0700122 if (pool_is_free_index(mpls_label_dpo_pool, index))
123 {
124 /*
125 * the packet trace can be printed after the DPO has been deleted
126 */
127 return (s);
128 }
129
130 mld = mpls_label_dpo_get(index);
131
Neale Rannsad422ed2016-11-02 14:20:04 +0000132 for (ii = 0; ii < mld->mld_n_labels; ii++)
133 {
134 hdr.label_exp_s_ttl =
135 clib_net_to_host_u32(mld->mld_hdr[ii].label_exp_s_ttl);
136 s = format(s, "%U", format_mpls_header, hdr);
137 }
138
139 s = format(s, "\n%U", format_white_space, indent);
Neale Ranns8fe8cc22016-11-01 10:05:08 +0000140 s = format(s, "%U", format_dpo_id, &mld->mld_dpo, indent+2);
141
142 return (s);
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100143}
144
145static void
146mpls_label_dpo_lock (dpo_id_t *dpo)
147{
148 mpls_label_dpo_t *mld;
149
150 mld = mpls_label_dpo_get(dpo->dpoi_index);
151
152 mld->mld_locks++;
153}
154
155static void
156mpls_label_dpo_unlock (dpo_id_t *dpo)
157{
158 mpls_label_dpo_t *mld;
159
160 mld = mpls_label_dpo_get(dpo->dpoi_index);
161
162 mld->mld_locks--;
163
164 if (0 == mld->mld_locks)
165 {
166 dpo_reset(&mld->mld_dpo);
167 pool_put(mpls_label_dpo_pool, mld);
168 }
169}
170
171/**
172 * @brief A struct to hold tracing information for the MPLS label imposition
173 * node.
174 */
175typedef struct mpls_label_imposition_trace_t_
176{
177 /**
178 * The MPLS header imposed
179 */
180 mpls_unicast_header_t hdr;
181} mpls_label_imposition_trace_t;
182
Neale Ranns696e88d2017-03-16 07:34:55 -0400183always_inline mpls_unicast_header_t *
184mpls_label_paint (vlib_buffer_t * b0,
185 mpls_label_dpo_t *mld0,
186 u8 ttl0)
187{
188 mpls_unicast_header_t *hdr0;
189
190 vlib_buffer_advance(b0, -(mld0->mld_n_hdr_bytes));
191
192 hdr0 = vlib_buffer_get_current(b0);
193
Pablo Camarillo5d73eec2017-04-24 17:51:56 +0200194 if (1 == mld0->mld_n_labels)
Neale Ranns696e88d2017-03-16 07:34:55 -0400195 {
196 /* optimise for the common case of one label */
197 *hdr0 = mld0->mld_hdr[0];
198 }
199 else
200 {
201 clib_memcpy(hdr0, mld0->mld_hdr, mld0->mld_n_hdr_bytes);
202 hdr0 = hdr0 + (mld0->mld_n_labels - 1);
203 }
204 /* fixup the TTL for the inner most label */
205 ((char*)hdr0)[3] = ttl0;
206
207 return (hdr0);
208}
209
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100210always_inline uword
Neale Rannsad422ed2016-11-02 14:20:04 +0000211mpls_label_imposition_inline (vlib_main_t * vm,
212 vlib_node_runtime_t * node,
213 vlib_frame_t * from_frame,
214 u8 payload_is_ip4,
Neale Rannsda78f952017-05-24 09:15:43 -0700215 u8 payload_is_ip6,
216 u8 payload_is_ethernet)
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100217{
218 u32 n_left_from, next_index, * from, * to_next;
219
220 from = vlib_frame_vector_args (from_frame);
221 n_left_from = from_frame->n_vectors;
222
223 next_index = node->cached_next_index;
224
225 while (n_left_from > 0)
226 {
227 u32 n_left_to_next;
228
229 vlib_get_next_frame(vm, node, next_index, to_next, n_left_to_next);
230
Neale Ranns696e88d2017-03-16 07:34:55 -0400231 while (n_left_from >= 8 && n_left_to_next >= 4)
Neale Ranns9ca18c62016-12-10 21:08:09 +0000232 {
Neale Ranns696e88d2017-03-16 07:34:55 -0400233 u32 bi0, mldi0, bi1, mldi1, bi2, mldi2, bi3, mldi3;
234 mpls_unicast_header_t *hdr0, *hdr1, *hdr2, *hdr3;
235 mpls_label_dpo_t *mld0, *mld1, *mld2, *mld3;
236 vlib_buffer_t * b0, *b1, * b2, *b3;
237 u32 next0, next1, next2, next3;
238 u8 ttl0, ttl1,ttl2, ttl3 ;
Neale Ranns9ca18c62016-12-10 21:08:09 +0000239
240 bi0 = to_next[0] = from[0];
241 bi1 = to_next[1] = from[1];
Neale Ranns696e88d2017-03-16 07:34:55 -0400242 bi2 = to_next[2] = from[2];
243 bi3 = to_next[3] = from[3];
Neale Ranns9ca18c62016-12-10 21:08:09 +0000244
245 /* Prefetch next iteration. */
246 {
Neale Ranns696e88d2017-03-16 07:34:55 -0400247 vlib_buffer_t * p2, * p3, *p4, *p5;
Neale Ranns9ca18c62016-12-10 21:08:09 +0000248
249 p2 = vlib_get_buffer (vm, from[2]);
250 p3 = vlib_get_buffer (vm, from[3]);
Neale Ranns696e88d2017-03-16 07:34:55 -0400251 p4 = vlib_get_buffer (vm, from[4]);
252 p5 = vlib_get_buffer (vm, from[5]);
Neale Ranns9ca18c62016-12-10 21:08:09 +0000253
254 vlib_prefetch_buffer_header (p2, STORE);
255 vlib_prefetch_buffer_header (p3, STORE);
Neale Ranns696e88d2017-03-16 07:34:55 -0400256 vlib_prefetch_buffer_header (p4, STORE);
257 vlib_prefetch_buffer_header (p5, STORE);
Neale Ranns9ca18c62016-12-10 21:08:09 +0000258
259 CLIB_PREFETCH (p2->data, sizeof (hdr0[0]), STORE);
260 CLIB_PREFETCH (p3->data, sizeof (hdr0[0]), STORE);
Neale Ranns696e88d2017-03-16 07:34:55 -0400261 CLIB_PREFETCH (p4->data, sizeof (hdr0[0]), STORE);
262 CLIB_PREFETCH (p5->data, sizeof (hdr0[0]), STORE);
Neale Ranns9ca18c62016-12-10 21:08:09 +0000263 }
264
Neale Ranns696e88d2017-03-16 07:34:55 -0400265 from += 4;
266 to_next += 4;
267 n_left_from -= 4;
268 n_left_to_next -= 4;
Neale Ranns9ca18c62016-12-10 21:08:09 +0000269
270 b0 = vlib_get_buffer (vm, bi0);
271 b1 = vlib_get_buffer (vm, bi1);
Neale Ranns696e88d2017-03-16 07:34:55 -0400272 b2 = vlib_get_buffer (vm, bi2);
273 b3 = vlib_get_buffer (vm, bi3);
Neale Ranns9ca18c62016-12-10 21:08:09 +0000274
275 /* dst lookup was done by ip4 lookup */
276 mldi0 = vnet_buffer(b0)->ip.adj_index[VLIB_TX];
277 mldi1 = vnet_buffer(b1)->ip.adj_index[VLIB_TX];
Neale Ranns696e88d2017-03-16 07:34:55 -0400278 mldi2 = vnet_buffer(b2)->ip.adj_index[VLIB_TX];
279 mldi3 = vnet_buffer(b3)->ip.adj_index[VLIB_TX];
Neale Ranns9ca18c62016-12-10 21:08:09 +0000280 mld0 = mpls_label_dpo_get(mldi0);
281 mld1 = mpls_label_dpo_get(mldi1);
Neale Ranns696e88d2017-03-16 07:34:55 -0400282 mld2 = mpls_label_dpo_get(mldi2);
283 mld3 = mpls_label_dpo_get(mldi3);
Neale Ranns9ca18c62016-12-10 21:08:09 +0000284
285 if (payload_is_ip4)
286 {
287 /*
288 * decrement the TTL on ingress to the LSP
289 */
290 ip4_header_t * ip0 = vlib_buffer_get_current(b0);
291 ip4_header_t * ip1 = vlib_buffer_get_current(b1);
Neale Ranns696e88d2017-03-16 07:34:55 -0400292 ip4_header_t * ip2 = vlib_buffer_get_current(b2);
293 ip4_header_t * ip3 = vlib_buffer_get_current(b3);
Neale Ranns9ca18c62016-12-10 21:08:09 +0000294 u32 checksum0;
295 u32 checksum1;
Neale Ranns696e88d2017-03-16 07:34:55 -0400296 u32 checksum2;
297 u32 checksum3;
Neale Ranns9ca18c62016-12-10 21:08:09 +0000298
299 checksum0 = ip0->checksum + clib_host_to_net_u16 (0x0100);
300 checksum1 = ip1->checksum + clib_host_to_net_u16 (0x0100);
Neale Ranns696e88d2017-03-16 07:34:55 -0400301 checksum2 = ip2->checksum + clib_host_to_net_u16 (0x0100);
302 checksum3 = ip3->checksum + clib_host_to_net_u16 (0x0100);
Neale Ranns9ca18c62016-12-10 21:08:09 +0000303
304 checksum0 += checksum0 >= 0xffff;
305 checksum1 += checksum1 >= 0xffff;
Neale Ranns696e88d2017-03-16 07:34:55 -0400306 checksum2 += checksum2 >= 0xffff;
307 checksum3 += checksum3 >= 0xffff;
Neale Ranns9ca18c62016-12-10 21:08:09 +0000308
309 ip0->checksum = checksum0;
310 ip1->checksum = checksum1;
Neale Ranns696e88d2017-03-16 07:34:55 -0400311 ip2->checksum = checksum2;
312 ip3->checksum = checksum3;
Neale Ranns9ca18c62016-12-10 21:08:09 +0000313
314 ip0->ttl -= 1;
315 ip1->ttl -= 1;
Neale Ranns696e88d2017-03-16 07:34:55 -0400316 ip2->ttl -= 1;
317 ip3->ttl -= 1;
Neale Ranns9ca18c62016-12-10 21:08:09 +0000318
319 ttl1 = ip1->ttl;
320 ttl0 = ip0->ttl;
Neale Ranns696e88d2017-03-16 07:34:55 -0400321 ttl3 = ip3->ttl;
322 ttl2 = ip2->ttl;
Neale Ranns9ca18c62016-12-10 21:08:09 +0000323 }
324 else if (payload_is_ip6)
325 {
326 /*
327 * decrement the TTL on ingress to the LSP
328 */
329 ip6_header_t * ip0 = vlib_buffer_get_current(b0);
330 ip6_header_t * ip1 = vlib_buffer_get_current(b1);
Neale Ranns696e88d2017-03-16 07:34:55 -0400331 ip6_header_t * ip2 = vlib_buffer_get_current(b2);
332 ip6_header_t * ip3 = vlib_buffer_get_current(b3);
Neale Ranns9ca18c62016-12-10 21:08:09 +0000333
334 ip0->hop_limit -= 1;
335 ip1->hop_limit -= 1;
Neale Ranns696e88d2017-03-16 07:34:55 -0400336 ip2->hop_limit -= 1;
337 ip3->hop_limit -= 1;
Neale Ranns9ca18c62016-12-10 21:08:09 +0000338
339 ttl0 = ip0->hop_limit;
340 ttl1 = ip1->hop_limit;
Neale Ranns696e88d2017-03-16 07:34:55 -0400341 ttl2 = ip2->hop_limit;
342 ttl3 = ip3->hop_limit;
Neale Ranns9ca18c62016-12-10 21:08:09 +0000343 }
Neale Rannsda78f952017-05-24 09:15:43 -0700344 else if (payload_is_ethernet)
345 {
346 /*
347 * nothing to chang ein the ethernet header
348 */
349 ttl0 = ttl1 = ttl2 = ttl3 = 255;
350 }
Neale Ranns9ca18c62016-12-10 21:08:09 +0000351 else
352 {
353 /*
354 * else, the packet to be encapped is an MPLS packet
355 */
356 if (PREDICT_TRUE(vnet_buffer(b0)->mpls.first))
357 {
358 /*
359 * The first label to be imposed on the packet. this is a label swap.
360 * in which case we stashed the TTL and EXP bits in the
361 * packet in the lookup node
362 */
363 ASSERT(0 != vnet_buffer (b0)->mpls.ttl);
364
365 ttl0 = vnet_buffer(b0)->mpls.ttl - 1;
366 }
367 else
368 {
369 /*
370 * not the first label. implying we are recusring down a chain of
371 * output labels.
372 * Each layer is considered a new LSP - hence the TTL is reset.
373 */
374 ttl0 = 255;
375 }
376 if (PREDICT_TRUE(vnet_buffer(b1)->mpls.first))
377 {
378 ASSERT(1 != vnet_buffer (b1)->mpls.ttl);
379 ttl1 = vnet_buffer(b1)->mpls.ttl - 1;
380 }
381 else
382 {
383 ttl1 = 255;
384 }
Neale Ranns696e88d2017-03-16 07:34:55 -0400385 if (PREDICT_TRUE(vnet_buffer(b2)->mpls.first))
386 {
Neale Ranns71275e32017-05-25 12:38:58 -0700387 ASSERT(1 != vnet_buffer (b2)->mpls.ttl);
Neale Ranns696e88d2017-03-16 07:34:55 -0400388
389 ttl2 = vnet_buffer(b2)->mpls.ttl - 1;
390 }
391 else
392 {
393 ttl2 = 255;
394 }
395 if (PREDICT_TRUE(vnet_buffer(b3)->mpls.first))
396 {
397 ASSERT(1 != vnet_buffer (b3)->mpls.ttl);
398 ttl3 = vnet_buffer(b3)->mpls.ttl - 1;
399 }
400 else
401 {
402 ttl3 = 255;
403 }
Neale Ranns9ca18c62016-12-10 21:08:09 +0000404 }
405 vnet_buffer(b0)->mpls.first = 0;
406 vnet_buffer(b1)->mpls.first = 0;
Neale Ranns696e88d2017-03-16 07:34:55 -0400407 vnet_buffer(b2)->mpls.first = 0;
408 vnet_buffer(b3)->mpls.first = 0;
Neale Ranns9ca18c62016-12-10 21:08:09 +0000409
410 /* Paint the MPLS header */
Neale Ranns696e88d2017-03-16 07:34:55 -0400411 hdr0 = mpls_label_paint(b0, mld0, ttl0);
412 hdr1 = mpls_label_paint(b1, mld1, ttl1);
413 hdr2 = mpls_label_paint(b2, mld2, ttl2);
414 hdr3 = mpls_label_paint(b3, mld3, ttl3);
Neale Ranns9ca18c62016-12-10 21:08:09 +0000415
416 next0 = mld0->mld_dpo.dpoi_next_node;
417 next1 = mld1->mld_dpo.dpoi_next_node;
Neale Ranns696e88d2017-03-16 07:34:55 -0400418 next2 = mld2->mld_dpo.dpoi_next_node;
419 next3 = mld3->mld_dpo.dpoi_next_node;
Neale Ranns9ca18c62016-12-10 21:08:09 +0000420 vnet_buffer(b0)->ip.adj_index[VLIB_TX] = mld0->mld_dpo.dpoi_index;
421 vnet_buffer(b1)->ip.adj_index[VLIB_TX] = mld1->mld_dpo.dpoi_index;
Neale Ranns696e88d2017-03-16 07:34:55 -0400422 vnet_buffer(b2)->ip.adj_index[VLIB_TX] = mld2->mld_dpo.dpoi_index;
423 vnet_buffer(b3)->ip.adj_index[VLIB_TX] = mld3->mld_dpo.dpoi_index;
Neale Ranns9ca18c62016-12-10 21:08:09 +0000424
425 if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
426 {
427 mpls_label_imposition_trace_t *tr =
428 vlib_add_trace (vm, node, b0, sizeof (*tr));
429 tr->hdr = *hdr0;
430 }
431 if (PREDICT_FALSE(b1->flags & VLIB_BUFFER_IS_TRACED))
432 {
433 mpls_label_imposition_trace_t *tr =
434 vlib_add_trace (vm, node, b1, sizeof (*tr));
435 tr->hdr = *hdr1;
436 }
Neale Ranns696e88d2017-03-16 07:34:55 -0400437 if (PREDICT_FALSE(b2->flags & VLIB_BUFFER_IS_TRACED))
438 {
439 mpls_label_imposition_trace_t *tr =
440 vlib_add_trace (vm, node, b2, sizeof (*tr));
441 tr->hdr = *hdr2;
442 }
443 if (PREDICT_FALSE(b3->flags & VLIB_BUFFER_IS_TRACED))
444 {
445 mpls_label_imposition_trace_t *tr =
446 vlib_add_trace (vm, node, b3, sizeof (*tr));
447 tr->hdr = *hdr3;
448 }
Neale Ranns9ca18c62016-12-10 21:08:09 +0000449
Neale Ranns696e88d2017-03-16 07:34:55 -0400450 vlib_validate_buffer_enqueue_x4(vm, node, next_index, to_next,
Neale Ranns9ca18c62016-12-10 21:08:09 +0000451 n_left_to_next,
Neale Ranns696e88d2017-03-16 07:34:55 -0400452 bi0, bi1, bi2, bi3,
453 next0, next1, next2, next3);
Neale Ranns9ca18c62016-12-10 21:08:09 +0000454 }
455
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100456 while (n_left_from > 0 && n_left_to_next > 0)
457 {
458 mpls_unicast_header_t *hdr0;
459 mpls_label_dpo_t *mld0;
460 vlib_buffer_t * b0;
461 u32 bi0, mldi0;
462 u32 next0;
Neale Rannsad422ed2016-11-02 14:20:04 +0000463 u8 ttl;
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100464
465 bi0 = from[0];
466 to_next[0] = bi0;
467 from += 1;
468 to_next += 1;
469 n_left_from -= 1;
470 n_left_to_next -= 1;
471
472 b0 = vlib_get_buffer (vm, bi0);
473
474 /* dst lookup was done by ip4 lookup */
475 mldi0 = vnet_buffer(b0)->ip.adj_index[VLIB_TX];
476 mld0 = mpls_label_dpo_get(mldi0);
477
Neale Rannsad422ed2016-11-02 14:20:04 +0000478 if (payload_is_ip4)
479 {
480 /*
481 * decrement the TTL on ingress to the LSP
482 */
483 ip4_header_t * ip0 = vlib_buffer_get_current(b0);
484 u32 checksum0;
485
486 checksum0 = ip0->checksum + clib_host_to_net_u16 (0x0100);
487 checksum0 += checksum0 >= 0xffff;
488
489 ip0->checksum = checksum0;
490 ip0->ttl -= 1;
491 ttl = ip0->ttl;
492 }
493 else if (payload_is_ip6)
494 {
495 /*
496 * decrement the TTL on ingress to the LSP
497 */
498 ip6_header_t * ip0 = vlib_buffer_get_current(b0);
499
500 ip0->hop_limit -= 1;
501 ttl = ip0->hop_limit;
502 }
503 else
504 {
505 /*
506 * else, the packet to be encapped is an MPLS packet
507 */
508 if (vnet_buffer(b0)->mpls.first)
509 {
510 /*
511 * The first label to be imposed on the packet. this is a label swap.
512 * in which case we stashed the TTL and EXP bits in the
513 * packet in the lookup node
514 */
515 ASSERT(0 != vnet_buffer (b0)->mpls.ttl);
516
517 ttl = vnet_buffer(b0)->mpls.ttl - 1;
518 }
519 else
520 {
521 /*
522 * not the first label. implying we are recusring down a chain of
523 * output labels.
524 * Each layer is considered a new LSP - hence the TTL is reset.
525 */
526 ttl = 255;
527 }
528 }
529 vnet_buffer(b0)->mpls.first = 0;
530
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100531 /* Paint the MPLS header */
Neale Ranns9ca18c62016-12-10 21:08:09 +0000532 vlib_buffer_advance(b0, -(mld0->mld_n_hdr_bytes));
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100533 hdr0 = vlib_buffer_get_current(b0);
Neale Ranns9ca18c62016-12-10 21:08:09 +0000534 clib_memcpy(hdr0, mld0->mld_hdr, mld0->mld_n_hdr_bytes);
Neale Rannsad422ed2016-11-02 14:20:04 +0000535
536 /* fixup the TTL for the inner most label */
537 hdr0 = hdr0 + (mld0->mld_n_labels - 1);
538 ((char*)hdr0)[3] = ttl;
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100539
540 next0 = mld0->mld_dpo.dpoi_next_node;
541 vnet_buffer(b0)->ip.adj_index[VLIB_TX] = mld0->mld_dpo.dpoi_index;
542
Neale Ranns9ca18c62016-12-10 21:08:09 +0000543 if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100544 {
545 mpls_label_imposition_trace_t *tr =
546 vlib_add_trace (vm, node, b0, sizeof (*tr));
547 tr->hdr = *hdr0;
548 }
549
550 vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next,
551 n_left_to_next, bi0, next0);
552 }
553 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
554 }
555 return from_frame->n_vectors;
556}
557
558static u8 *
559format_mpls_label_imposition_trace (u8 * s, va_list * args)
560{
561 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
562 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
563 mpls_label_imposition_trace_t * t;
564 mpls_unicast_header_t hdr;
Christophe Fontained3c008d2017-10-02 18:10:54 +0200565 u32 indent;
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100566
567 t = va_arg (*args, mpls_label_imposition_trace_t *);
568 indent = format_get_indent (s);
569 hdr.label_exp_s_ttl = clib_net_to_host_u32(t->hdr.label_exp_s_ttl);
570
571 s = format (s, "%Umpls-header:%U",
572 format_white_space, indent,
573 format_mpls_header, hdr);
574 return (s);
575}
576
Neale Rannsad422ed2016-11-02 14:20:04 +0000577static uword
578mpls_label_imposition (vlib_main_t * vm,
579 vlib_node_runtime_t * node,
580 vlib_frame_t * frame)
581{
Neale Rannsda78f952017-05-24 09:15:43 -0700582 return (mpls_label_imposition_inline(vm, node, frame, 0, 0, 0));
Neale Rannsad422ed2016-11-02 14:20:04 +0000583}
584
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100585VLIB_REGISTER_NODE (mpls_label_imposition_node) = {
586 .function = mpls_label_imposition,
587 .name = "mpls-label-imposition",
588 .vector_size = sizeof (u32),
589
590 .format_trace = format_mpls_label_imposition_trace,
591 .n_next_nodes = 1,
592 .next_nodes = {
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800593 [0] = "mpls-drop",
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100594 }
595};
Neale Rannsad422ed2016-11-02 14:20:04 +0000596VLIB_NODE_FUNCTION_MULTIARCH (mpls_label_imposition_node,
597 mpls_label_imposition)
598
599static uword
600ip4_mpls_label_imposition (vlib_main_t * vm,
601 vlib_node_runtime_t * node,
602 vlib_frame_t * frame)
603{
Neale Rannsda78f952017-05-24 09:15:43 -0700604 return (mpls_label_imposition_inline(vm, node, frame, 1, 0, 0));
Neale Rannsad422ed2016-11-02 14:20:04 +0000605}
606
607VLIB_REGISTER_NODE (ip4_mpls_label_imposition_node) = {
608 .function = ip4_mpls_label_imposition,
609 .name = "ip4-mpls-label-imposition",
610 .vector_size = sizeof (u32),
611
612 .format_trace = format_mpls_label_imposition_trace,
613 .n_next_nodes = 1,
614 .next_nodes = {
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800615 [0] = "ip4-drop",
Neale Rannsad422ed2016-11-02 14:20:04 +0000616 }
617};
618VLIB_NODE_FUNCTION_MULTIARCH (ip4_mpls_label_imposition_node,
619 ip4_mpls_label_imposition)
620
621static uword
622ip6_mpls_label_imposition (vlib_main_t * vm,
623 vlib_node_runtime_t * node,
624 vlib_frame_t * frame)
625{
Neale Rannsda78f952017-05-24 09:15:43 -0700626 return (mpls_label_imposition_inline(vm, node, frame, 0, 1, 0));
Neale Rannsad422ed2016-11-02 14:20:04 +0000627}
628
629VLIB_REGISTER_NODE (ip6_mpls_label_imposition_node) = {
630 .function = ip6_mpls_label_imposition,
631 .name = "ip6-mpls-label-imposition",
632 .vector_size = sizeof (u32),
633
634 .format_trace = format_mpls_label_imposition_trace,
635 .n_next_nodes = 1,
636 .next_nodes = {
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800637 [0] = "ip6-drop",
Neale Rannsad422ed2016-11-02 14:20:04 +0000638 }
639};
640VLIB_NODE_FUNCTION_MULTIARCH (ip6_mpls_label_imposition_node,
641 ip6_mpls_label_imposition)
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100642
Neale Rannsda78f952017-05-24 09:15:43 -0700643static uword
644ethernet_mpls_label_imposition (vlib_main_t * vm,
645 vlib_node_runtime_t * node,
646 vlib_frame_t * frame)
647{
648 return (mpls_label_imposition_inline(vm, node, frame, 0, 0, 1));
649}
650
651VLIB_REGISTER_NODE (ethernet_mpls_label_imposition_node) = {
652 .function = ethernet_mpls_label_imposition,
653 .name = "ethernet-mpls-label-imposition",
654 .vector_size = sizeof (u32),
655
656 .format_trace = format_mpls_label_imposition_trace,
657 .n_next_nodes = 1,
658 .next_nodes = {
659 [0] = "error-drop",
660 }
661};
662VLIB_NODE_FUNCTION_MULTIARCH (ethernet_mpls_label_imposition_node,
663 ethernet_mpls_label_imposition)
664
Neale Ranns6c3ebcc2016-10-02 21:20:15 +0100665static void
666mpls_label_dpo_mem_show (void)
667{
668 fib_show_memory_usage("MPLS label",
669 pool_elts(mpls_label_dpo_pool),
670 pool_len(mpls_label_dpo_pool),
671 sizeof(mpls_label_dpo_t));
672}
673
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100674const static dpo_vft_t mld_vft = {
675 .dv_lock = mpls_label_dpo_lock,
676 .dv_unlock = mpls_label_dpo_unlock,
677 .dv_format = format_mpls_label_dpo,
Neale Ranns6c3ebcc2016-10-02 21:20:15 +0100678 .dv_mem_show = mpls_label_dpo_mem_show,
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100679};
680
681const static char* const mpls_label_imp_ip4_nodes[] =
682{
Neale Rannsad422ed2016-11-02 14:20:04 +0000683 "ip4-mpls-label-imposition",
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100684 NULL,
685};
686const static char* const mpls_label_imp_ip6_nodes[] =
687{
Neale Rannsad422ed2016-11-02 14:20:04 +0000688 "ip6-mpls-label-imposition",
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100689 NULL,
690};
691const static char* const mpls_label_imp_mpls_nodes[] =
692{
693 "mpls-label-imposition",
694 NULL,
695};
Neale Rannsda78f952017-05-24 09:15:43 -0700696const static char* const mpls_label_imp_ethernet_nodes[] =
697{
698 "ethernet-mpls-label-imposition",
699 NULL,
700};
701
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100702const static char* const * const mpls_label_imp_nodes[DPO_PROTO_NUM] =
703{
704 [DPO_PROTO_IP4] = mpls_label_imp_ip4_nodes,
705 [DPO_PROTO_IP6] = mpls_label_imp_ip6_nodes,
706 [DPO_PROTO_MPLS] = mpls_label_imp_mpls_nodes,
Neale Rannsda78f952017-05-24 09:15:43 -0700707 [DPO_PROTO_ETHERNET] = mpls_label_imp_ethernet_nodes,
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100708};
709
710
711void
712mpls_label_dpo_module_init (void)
713{
714 dpo_register(DPO_MPLS_LABEL, &mld_vft, mpls_label_imp_nodes);
715}