blob: c6e8dcc475c159239e6a4f025042fb0a645d208d [file] [log] [blame]
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001/*
2 * Copyright (c) 2016 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16#include <vnet/ip/ip.h>
17#include <vnet/dpo/mpls_label_dpo.h>
18#include <vnet/mpls/mpls.h>
19
20/*
21 * pool of all MPLS Label DPOs
22 */
23mpls_label_dpo_t *mpls_label_dpo_pool;
24
25static mpls_label_dpo_t *
26mpls_label_dpo_alloc (void)
27{
28 mpls_label_dpo_t *mld;
29
30 pool_get_aligned(mpls_label_dpo_pool, mld, CLIB_CACHE_LINE_BYTES);
31 memset(mld, 0, sizeof(*mld));
32
33 dpo_reset(&mld->mld_dpo);
34
35 return (mld);
36}
37
38static index_t
39mpls_label_dpo_get_index (mpls_label_dpo_t *mld)
40{
41 return (mld - mpls_label_dpo_pool);
42}
43
44index_t
Neale Rannsad422ed2016-11-02 14:20:04 +000045mpls_label_dpo_create (mpls_label_t *label_stack,
Neale Ranns0bfe5d82016-08-25 15:29:12 +010046 mpls_eos_bit_t eos,
47 u8 ttl,
48 u8 exp,
Neale Rannsad422ed2016-11-02 14:20:04 +000049 dpo_proto_t payload_proto,
Neale Ranns0bfe5d82016-08-25 15:29:12 +010050 const dpo_id_t *dpo)
51{
52 mpls_label_dpo_t *mld;
Neale Rannsad422ed2016-11-02 14:20:04 +000053 u32 ii;
Neale Ranns0bfe5d82016-08-25 15:29:12 +010054
55 mld = mpls_label_dpo_alloc();
Neale Rannsad422ed2016-11-02 14:20:04 +000056 mld->mld_n_labels = vec_len(label_stack);
Neale Ranns9ca18c62016-12-10 21:08:09 +000057 mld->mld_n_hdr_bytes = mld->mld_n_labels * sizeof(mld->mld_hdr[0]);
Neale Rannsad422ed2016-11-02 14:20:04 +000058 mld->mld_payload_proto = payload_proto;
Neale Ranns0bfe5d82016-08-25 15:29:12 +010059
60 /*
Neale Rannsad422ed2016-11-02 14:20:04 +000061 * construct label rewrite headers for each value value passed.
Neale Ranns0bfe5d82016-08-25 15:29:12 +010062 * get the header in network byte order since we will paint it
63 * on a packet in the data-plane
64 */
Neale Ranns0bfe5d82016-08-25 15:29:12 +010065
Neale Rannsad422ed2016-11-02 14:20:04 +000066 for (ii = 0; ii < mld->mld_n_labels-1; ii++)
67 {
68 vnet_mpls_uc_set_label(&mld->mld_hdr[ii].label_exp_s_ttl, label_stack[ii]);
69 vnet_mpls_uc_set_ttl(&mld->mld_hdr[ii].label_exp_s_ttl, 255);
70 vnet_mpls_uc_set_exp(&mld->mld_hdr[ii].label_exp_s_ttl, 0);
71 vnet_mpls_uc_set_s(&mld->mld_hdr[ii].label_exp_s_ttl, MPLS_NON_EOS);
72 mld->mld_hdr[ii].label_exp_s_ttl =
73 clib_host_to_net_u32(mld->mld_hdr[ii].label_exp_s_ttl);
74 }
75
76 /*
77 * the inner most label
78 */
79 ii = mld->mld_n_labels-1;
80
81 vnet_mpls_uc_set_label(&mld->mld_hdr[ii].label_exp_s_ttl, label_stack[ii]);
82 vnet_mpls_uc_set_ttl(&mld->mld_hdr[ii].label_exp_s_ttl, ttl);
83 vnet_mpls_uc_set_exp(&mld->mld_hdr[ii].label_exp_s_ttl, exp);
84 vnet_mpls_uc_set_s(&mld->mld_hdr[ii].label_exp_s_ttl, eos);
85 mld->mld_hdr[ii].label_exp_s_ttl =
86 clib_host_to_net_u32(mld->mld_hdr[ii].label_exp_s_ttl);
87
88 /*
89 * stack this label objct on its parent.
90 */
91 dpo_stack(DPO_MPLS_LABEL,
92 mld->mld_payload_proto,
93 &mld->mld_dpo,
94 dpo);
Neale Ranns0bfe5d82016-08-25 15:29:12 +010095
96 return (mpls_label_dpo_get_index(mld));
97}
98
99u8*
100format_mpls_label_dpo (u8 *s, va_list *args)
101{
102 index_t index = va_arg (*args, index_t);
103 u32 indent = va_arg (*args, u32);
104 mpls_unicast_header_t hdr;
105 mpls_label_dpo_t *mld;
Neale Rannsad422ed2016-11-02 14:20:04 +0000106 u32 ii;
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100107
Neale Ranns8fe8cc22016-11-01 10:05:08 +0000108 s = format(s, "mpls-label:[%d]:", index);
Neale Rannsad422ed2016-11-02 14:20:04 +0000109
Neale Ranns15002542017-09-10 04:39:11 -0700110 if (pool_is_free_index(mpls_label_dpo_pool, index))
111 {
112 /*
113 * the packet trace can be printed after the DPO has been deleted
114 */
115 return (s);
116 }
117
118 mld = mpls_label_dpo_get(index);
119
Neale Rannsad422ed2016-11-02 14:20:04 +0000120 for (ii = 0; ii < mld->mld_n_labels; ii++)
121 {
122 hdr.label_exp_s_ttl =
123 clib_net_to_host_u32(mld->mld_hdr[ii].label_exp_s_ttl);
124 s = format(s, "%U", format_mpls_header, hdr);
125 }
126
127 s = format(s, "\n%U", format_white_space, indent);
Neale Ranns8fe8cc22016-11-01 10:05:08 +0000128 s = format(s, "%U", format_dpo_id, &mld->mld_dpo, indent+2);
129
130 return (s);
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100131}
132
133static void
134mpls_label_dpo_lock (dpo_id_t *dpo)
135{
136 mpls_label_dpo_t *mld;
137
138 mld = mpls_label_dpo_get(dpo->dpoi_index);
139
140 mld->mld_locks++;
141}
142
143static void
144mpls_label_dpo_unlock (dpo_id_t *dpo)
145{
146 mpls_label_dpo_t *mld;
147
148 mld = mpls_label_dpo_get(dpo->dpoi_index);
149
150 mld->mld_locks--;
151
152 if (0 == mld->mld_locks)
153 {
154 dpo_reset(&mld->mld_dpo);
155 pool_put(mpls_label_dpo_pool, mld);
156 }
157}
158
159/**
160 * @brief A struct to hold tracing information for the MPLS label imposition
161 * node.
162 */
163typedef struct mpls_label_imposition_trace_t_
164{
165 /**
166 * The MPLS header imposed
167 */
168 mpls_unicast_header_t hdr;
169} mpls_label_imposition_trace_t;
170
Neale Ranns696e88d2017-03-16 07:34:55 -0400171always_inline mpls_unicast_header_t *
172mpls_label_paint (vlib_buffer_t * b0,
173 mpls_label_dpo_t *mld0,
174 u8 ttl0)
175{
176 mpls_unicast_header_t *hdr0;
177
178 vlib_buffer_advance(b0, -(mld0->mld_n_hdr_bytes));
179
180 hdr0 = vlib_buffer_get_current(b0);
181
Pablo Camarillo5d73eec2017-04-24 17:51:56 +0200182 if (1 == mld0->mld_n_labels)
Neale Ranns696e88d2017-03-16 07:34:55 -0400183 {
184 /* optimise for the common case of one label */
185 *hdr0 = mld0->mld_hdr[0];
186 }
187 else
188 {
189 clib_memcpy(hdr0, mld0->mld_hdr, mld0->mld_n_hdr_bytes);
190 hdr0 = hdr0 + (mld0->mld_n_labels - 1);
191 }
192 /* fixup the TTL for the inner most label */
193 ((char*)hdr0)[3] = ttl0;
194
195 return (hdr0);
196}
197
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100198always_inline uword
Neale Rannsad422ed2016-11-02 14:20:04 +0000199mpls_label_imposition_inline (vlib_main_t * vm,
200 vlib_node_runtime_t * node,
201 vlib_frame_t * from_frame,
202 u8 payload_is_ip4,
Neale Rannsda78f952017-05-24 09:15:43 -0700203 u8 payload_is_ip6,
204 u8 payload_is_ethernet)
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100205{
206 u32 n_left_from, next_index, * from, * to_next;
207
208 from = vlib_frame_vector_args (from_frame);
209 n_left_from = from_frame->n_vectors;
210
211 next_index = node->cached_next_index;
212
213 while (n_left_from > 0)
214 {
215 u32 n_left_to_next;
216
217 vlib_get_next_frame(vm, node, next_index, to_next, n_left_to_next);
218
Neale Ranns696e88d2017-03-16 07:34:55 -0400219 while (n_left_from >= 8 && n_left_to_next >= 4)
Neale Ranns9ca18c62016-12-10 21:08:09 +0000220 {
Neale Ranns696e88d2017-03-16 07:34:55 -0400221 u32 bi0, mldi0, bi1, mldi1, bi2, mldi2, bi3, mldi3;
222 mpls_unicast_header_t *hdr0, *hdr1, *hdr2, *hdr3;
223 mpls_label_dpo_t *mld0, *mld1, *mld2, *mld3;
224 vlib_buffer_t * b0, *b1, * b2, *b3;
225 u32 next0, next1, next2, next3;
226 u8 ttl0, ttl1,ttl2, ttl3 ;
Neale Ranns9ca18c62016-12-10 21:08:09 +0000227
228 bi0 = to_next[0] = from[0];
229 bi1 = to_next[1] = from[1];
Neale Ranns696e88d2017-03-16 07:34:55 -0400230 bi2 = to_next[2] = from[2];
231 bi3 = to_next[3] = from[3];
Neale Ranns9ca18c62016-12-10 21:08:09 +0000232
233 /* Prefetch next iteration. */
234 {
Neale Ranns696e88d2017-03-16 07:34:55 -0400235 vlib_buffer_t * p2, * p3, *p4, *p5;
Neale Ranns9ca18c62016-12-10 21:08:09 +0000236
237 p2 = vlib_get_buffer (vm, from[2]);
238 p3 = vlib_get_buffer (vm, from[3]);
Neale Ranns696e88d2017-03-16 07:34:55 -0400239 p4 = vlib_get_buffer (vm, from[4]);
240 p5 = vlib_get_buffer (vm, from[5]);
Neale Ranns9ca18c62016-12-10 21:08:09 +0000241
242 vlib_prefetch_buffer_header (p2, STORE);
243 vlib_prefetch_buffer_header (p3, STORE);
Neale Ranns696e88d2017-03-16 07:34:55 -0400244 vlib_prefetch_buffer_header (p4, STORE);
245 vlib_prefetch_buffer_header (p5, STORE);
Neale Ranns9ca18c62016-12-10 21:08:09 +0000246
247 CLIB_PREFETCH (p2->data, sizeof (hdr0[0]), STORE);
248 CLIB_PREFETCH (p3->data, sizeof (hdr0[0]), STORE);
Neale Ranns696e88d2017-03-16 07:34:55 -0400249 CLIB_PREFETCH (p4->data, sizeof (hdr0[0]), STORE);
250 CLIB_PREFETCH (p5->data, sizeof (hdr0[0]), STORE);
Neale Ranns9ca18c62016-12-10 21:08:09 +0000251 }
252
Neale Ranns696e88d2017-03-16 07:34:55 -0400253 from += 4;
254 to_next += 4;
255 n_left_from -= 4;
256 n_left_to_next -= 4;
Neale Ranns9ca18c62016-12-10 21:08:09 +0000257
258 b0 = vlib_get_buffer (vm, bi0);
259 b1 = vlib_get_buffer (vm, bi1);
Neale Ranns696e88d2017-03-16 07:34:55 -0400260 b2 = vlib_get_buffer (vm, bi2);
261 b3 = vlib_get_buffer (vm, bi3);
Neale Ranns9ca18c62016-12-10 21:08:09 +0000262
263 /* dst lookup was done by ip4 lookup */
264 mldi0 = vnet_buffer(b0)->ip.adj_index[VLIB_TX];
265 mldi1 = vnet_buffer(b1)->ip.adj_index[VLIB_TX];
Neale Ranns696e88d2017-03-16 07:34:55 -0400266 mldi2 = vnet_buffer(b2)->ip.adj_index[VLIB_TX];
267 mldi3 = vnet_buffer(b3)->ip.adj_index[VLIB_TX];
Neale Ranns9ca18c62016-12-10 21:08:09 +0000268 mld0 = mpls_label_dpo_get(mldi0);
269 mld1 = mpls_label_dpo_get(mldi1);
Neale Ranns696e88d2017-03-16 07:34:55 -0400270 mld2 = mpls_label_dpo_get(mldi2);
271 mld3 = mpls_label_dpo_get(mldi3);
Neale Ranns9ca18c62016-12-10 21:08:09 +0000272
273 if (payload_is_ip4)
274 {
275 /*
276 * decrement the TTL on ingress to the LSP
277 */
278 ip4_header_t * ip0 = vlib_buffer_get_current(b0);
279 ip4_header_t * ip1 = vlib_buffer_get_current(b1);
Neale Ranns696e88d2017-03-16 07:34:55 -0400280 ip4_header_t * ip2 = vlib_buffer_get_current(b2);
281 ip4_header_t * ip3 = vlib_buffer_get_current(b3);
Neale Ranns9ca18c62016-12-10 21:08:09 +0000282 u32 checksum0;
283 u32 checksum1;
Neale Ranns696e88d2017-03-16 07:34:55 -0400284 u32 checksum2;
285 u32 checksum3;
Neale Ranns9ca18c62016-12-10 21:08:09 +0000286
287 checksum0 = ip0->checksum + clib_host_to_net_u16 (0x0100);
288 checksum1 = ip1->checksum + clib_host_to_net_u16 (0x0100);
Neale Ranns696e88d2017-03-16 07:34:55 -0400289 checksum2 = ip2->checksum + clib_host_to_net_u16 (0x0100);
290 checksum3 = ip3->checksum + clib_host_to_net_u16 (0x0100);
Neale Ranns9ca18c62016-12-10 21:08:09 +0000291
292 checksum0 += checksum0 >= 0xffff;
293 checksum1 += checksum1 >= 0xffff;
Neale Ranns696e88d2017-03-16 07:34:55 -0400294 checksum2 += checksum2 >= 0xffff;
295 checksum3 += checksum3 >= 0xffff;
Neale Ranns9ca18c62016-12-10 21:08:09 +0000296
297 ip0->checksum = checksum0;
298 ip1->checksum = checksum1;
Neale Ranns696e88d2017-03-16 07:34:55 -0400299 ip2->checksum = checksum2;
300 ip3->checksum = checksum3;
Neale Ranns9ca18c62016-12-10 21:08:09 +0000301
302 ip0->ttl -= 1;
303 ip1->ttl -= 1;
Neale Ranns696e88d2017-03-16 07:34:55 -0400304 ip2->ttl -= 1;
305 ip3->ttl -= 1;
Neale Ranns9ca18c62016-12-10 21:08:09 +0000306
307 ttl1 = ip1->ttl;
308 ttl0 = ip0->ttl;
Neale Ranns696e88d2017-03-16 07:34:55 -0400309 ttl3 = ip3->ttl;
310 ttl2 = ip2->ttl;
Neale Ranns9ca18c62016-12-10 21:08:09 +0000311 }
312 else if (payload_is_ip6)
313 {
314 /*
315 * decrement the TTL on ingress to the LSP
316 */
317 ip6_header_t * ip0 = vlib_buffer_get_current(b0);
318 ip6_header_t * ip1 = vlib_buffer_get_current(b1);
Neale Ranns696e88d2017-03-16 07:34:55 -0400319 ip6_header_t * ip2 = vlib_buffer_get_current(b2);
320 ip6_header_t * ip3 = vlib_buffer_get_current(b3);
Neale Ranns9ca18c62016-12-10 21:08:09 +0000321
322 ip0->hop_limit -= 1;
323 ip1->hop_limit -= 1;
Neale Ranns696e88d2017-03-16 07:34:55 -0400324 ip2->hop_limit -= 1;
325 ip3->hop_limit -= 1;
Neale Ranns9ca18c62016-12-10 21:08:09 +0000326
327 ttl0 = ip0->hop_limit;
328 ttl1 = ip1->hop_limit;
Neale Ranns696e88d2017-03-16 07:34:55 -0400329 ttl2 = ip2->hop_limit;
330 ttl3 = ip3->hop_limit;
Neale Ranns9ca18c62016-12-10 21:08:09 +0000331 }
Neale Rannsda78f952017-05-24 09:15:43 -0700332 else if (payload_is_ethernet)
333 {
334 /*
335 * nothing to chang ein the ethernet header
336 */
337 ttl0 = ttl1 = ttl2 = ttl3 = 255;
338 }
Neale Ranns9ca18c62016-12-10 21:08:09 +0000339 else
340 {
341 /*
342 * else, the packet to be encapped is an MPLS packet
343 */
344 if (PREDICT_TRUE(vnet_buffer(b0)->mpls.first))
345 {
346 /*
347 * The first label to be imposed on the packet. this is a label swap.
348 * in which case we stashed the TTL and EXP bits in the
349 * packet in the lookup node
350 */
351 ASSERT(0 != vnet_buffer (b0)->mpls.ttl);
352
353 ttl0 = vnet_buffer(b0)->mpls.ttl - 1;
354 }
355 else
356 {
357 /*
358 * not the first label. implying we are recusring down a chain of
359 * output labels.
360 * Each layer is considered a new LSP - hence the TTL is reset.
361 */
362 ttl0 = 255;
363 }
364 if (PREDICT_TRUE(vnet_buffer(b1)->mpls.first))
365 {
366 ASSERT(1 != vnet_buffer (b1)->mpls.ttl);
367 ttl1 = vnet_buffer(b1)->mpls.ttl - 1;
368 }
369 else
370 {
371 ttl1 = 255;
372 }
Neale Ranns696e88d2017-03-16 07:34:55 -0400373 if (PREDICT_TRUE(vnet_buffer(b2)->mpls.first))
374 {
Neale Ranns71275e32017-05-25 12:38:58 -0700375 ASSERT(1 != vnet_buffer (b2)->mpls.ttl);
Neale Ranns696e88d2017-03-16 07:34:55 -0400376
377 ttl2 = vnet_buffer(b2)->mpls.ttl - 1;
378 }
379 else
380 {
381 ttl2 = 255;
382 }
383 if (PREDICT_TRUE(vnet_buffer(b3)->mpls.first))
384 {
385 ASSERT(1 != vnet_buffer (b3)->mpls.ttl);
386 ttl3 = vnet_buffer(b3)->mpls.ttl - 1;
387 }
388 else
389 {
390 ttl3 = 255;
391 }
Neale Ranns9ca18c62016-12-10 21:08:09 +0000392 }
393 vnet_buffer(b0)->mpls.first = 0;
394 vnet_buffer(b1)->mpls.first = 0;
Neale Ranns696e88d2017-03-16 07:34:55 -0400395 vnet_buffer(b2)->mpls.first = 0;
396 vnet_buffer(b3)->mpls.first = 0;
Neale Ranns9ca18c62016-12-10 21:08:09 +0000397
398 /* Paint the MPLS header */
Neale Ranns696e88d2017-03-16 07:34:55 -0400399 hdr0 = mpls_label_paint(b0, mld0, ttl0);
400 hdr1 = mpls_label_paint(b1, mld1, ttl1);
401 hdr2 = mpls_label_paint(b2, mld2, ttl2);
402 hdr3 = mpls_label_paint(b3, mld3, ttl3);
Neale Ranns9ca18c62016-12-10 21:08:09 +0000403
404 next0 = mld0->mld_dpo.dpoi_next_node;
405 next1 = mld1->mld_dpo.dpoi_next_node;
Neale Ranns696e88d2017-03-16 07:34:55 -0400406 next2 = mld2->mld_dpo.dpoi_next_node;
407 next3 = mld3->mld_dpo.dpoi_next_node;
Neale Ranns9ca18c62016-12-10 21:08:09 +0000408 vnet_buffer(b0)->ip.adj_index[VLIB_TX] = mld0->mld_dpo.dpoi_index;
409 vnet_buffer(b1)->ip.adj_index[VLIB_TX] = mld1->mld_dpo.dpoi_index;
Neale Ranns696e88d2017-03-16 07:34:55 -0400410 vnet_buffer(b2)->ip.adj_index[VLIB_TX] = mld2->mld_dpo.dpoi_index;
411 vnet_buffer(b3)->ip.adj_index[VLIB_TX] = mld3->mld_dpo.dpoi_index;
Neale Ranns9ca18c62016-12-10 21:08:09 +0000412
413 if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
414 {
415 mpls_label_imposition_trace_t *tr =
416 vlib_add_trace (vm, node, b0, sizeof (*tr));
417 tr->hdr = *hdr0;
418 }
419 if (PREDICT_FALSE(b1->flags & VLIB_BUFFER_IS_TRACED))
420 {
421 mpls_label_imposition_trace_t *tr =
422 vlib_add_trace (vm, node, b1, sizeof (*tr));
423 tr->hdr = *hdr1;
424 }
Neale Ranns696e88d2017-03-16 07:34:55 -0400425 if (PREDICT_FALSE(b2->flags & VLIB_BUFFER_IS_TRACED))
426 {
427 mpls_label_imposition_trace_t *tr =
428 vlib_add_trace (vm, node, b2, sizeof (*tr));
429 tr->hdr = *hdr2;
430 }
431 if (PREDICT_FALSE(b3->flags & VLIB_BUFFER_IS_TRACED))
432 {
433 mpls_label_imposition_trace_t *tr =
434 vlib_add_trace (vm, node, b3, sizeof (*tr));
435 tr->hdr = *hdr3;
436 }
Neale Ranns9ca18c62016-12-10 21:08:09 +0000437
Neale Ranns696e88d2017-03-16 07:34:55 -0400438 vlib_validate_buffer_enqueue_x4(vm, node, next_index, to_next,
Neale Ranns9ca18c62016-12-10 21:08:09 +0000439 n_left_to_next,
Neale Ranns696e88d2017-03-16 07:34:55 -0400440 bi0, bi1, bi2, bi3,
441 next0, next1, next2, next3);
Neale Ranns9ca18c62016-12-10 21:08:09 +0000442 }
443
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100444 while (n_left_from > 0 && n_left_to_next > 0)
445 {
446 mpls_unicast_header_t *hdr0;
447 mpls_label_dpo_t *mld0;
448 vlib_buffer_t * b0;
449 u32 bi0, mldi0;
450 u32 next0;
Neale Rannsad422ed2016-11-02 14:20:04 +0000451 u8 ttl;
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100452
453 bi0 = from[0];
454 to_next[0] = bi0;
455 from += 1;
456 to_next += 1;
457 n_left_from -= 1;
458 n_left_to_next -= 1;
459
460 b0 = vlib_get_buffer (vm, bi0);
461
462 /* dst lookup was done by ip4 lookup */
463 mldi0 = vnet_buffer(b0)->ip.adj_index[VLIB_TX];
464 mld0 = mpls_label_dpo_get(mldi0);
465
Neale Rannsad422ed2016-11-02 14:20:04 +0000466 if (payload_is_ip4)
467 {
468 /*
469 * decrement the TTL on ingress to the LSP
470 */
471 ip4_header_t * ip0 = vlib_buffer_get_current(b0);
472 u32 checksum0;
473
474 checksum0 = ip0->checksum + clib_host_to_net_u16 (0x0100);
475 checksum0 += checksum0 >= 0xffff;
476
477 ip0->checksum = checksum0;
478 ip0->ttl -= 1;
479 ttl = ip0->ttl;
480 }
481 else if (payload_is_ip6)
482 {
483 /*
484 * decrement the TTL on ingress to the LSP
485 */
486 ip6_header_t * ip0 = vlib_buffer_get_current(b0);
487
488 ip0->hop_limit -= 1;
489 ttl = ip0->hop_limit;
490 }
491 else
492 {
493 /*
494 * else, the packet to be encapped is an MPLS packet
495 */
496 if (vnet_buffer(b0)->mpls.first)
497 {
498 /*
499 * The first label to be imposed on the packet. this is a label swap.
500 * in which case we stashed the TTL and EXP bits in the
501 * packet in the lookup node
502 */
503 ASSERT(0 != vnet_buffer (b0)->mpls.ttl);
504
505 ttl = vnet_buffer(b0)->mpls.ttl - 1;
506 }
507 else
508 {
509 /*
510 * not the first label. implying we are recusring down a chain of
511 * output labels.
512 * Each layer is considered a new LSP - hence the TTL is reset.
513 */
514 ttl = 255;
515 }
516 }
517 vnet_buffer(b0)->mpls.first = 0;
518
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100519 /* Paint the MPLS header */
Neale Ranns9ca18c62016-12-10 21:08:09 +0000520 vlib_buffer_advance(b0, -(mld0->mld_n_hdr_bytes));
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100521 hdr0 = vlib_buffer_get_current(b0);
Neale Ranns9ca18c62016-12-10 21:08:09 +0000522 clib_memcpy(hdr0, mld0->mld_hdr, mld0->mld_n_hdr_bytes);
Neale Rannsad422ed2016-11-02 14:20:04 +0000523
524 /* fixup the TTL for the inner most label */
525 hdr0 = hdr0 + (mld0->mld_n_labels - 1);
526 ((char*)hdr0)[3] = ttl;
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100527
528 next0 = mld0->mld_dpo.dpoi_next_node;
529 vnet_buffer(b0)->ip.adj_index[VLIB_TX] = mld0->mld_dpo.dpoi_index;
530
Neale Ranns9ca18c62016-12-10 21:08:09 +0000531 if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100532 {
533 mpls_label_imposition_trace_t *tr =
534 vlib_add_trace (vm, node, b0, sizeof (*tr));
535 tr->hdr = *hdr0;
536 }
537
538 vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next,
539 n_left_to_next, bi0, next0);
540 }
541 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
542 }
543 return from_frame->n_vectors;
544}
545
546static u8 *
547format_mpls_label_imposition_trace (u8 * s, va_list * args)
548{
549 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
550 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
551 mpls_label_imposition_trace_t * t;
552 mpls_unicast_header_t hdr;
Christophe Fontained3c008d2017-10-02 18:10:54 +0200553 u32 indent;
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100554
555 t = va_arg (*args, mpls_label_imposition_trace_t *);
556 indent = format_get_indent (s);
557 hdr.label_exp_s_ttl = clib_net_to_host_u32(t->hdr.label_exp_s_ttl);
558
559 s = format (s, "%Umpls-header:%U",
560 format_white_space, indent,
561 format_mpls_header, hdr);
562 return (s);
563}
564
Neale Rannsad422ed2016-11-02 14:20:04 +0000565static uword
566mpls_label_imposition (vlib_main_t * vm,
567 vlib_node_runtime_t * node,
568 vlib_frame_t * frame)
569{
Neale Rannsda78f952017-05-24 09:15:43 -0700570 return (mpls_label_imposition_inline(vm, node, frame, 0, 0, 0));
Neale Rannsad422ed2016-11-02 14:20:04 +0000571}
572
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100573VLIB_REGISTER_NODE (mpls_label_imposition_node) = {
574 .function = mpls_label_imposition,
575 .name = "mpls-label-imposition",
576 .vector_size = sizeof (u32),
577
578 .format_trace = format_mpls_label_imposition_trace,
579 .n_next_nodes = 1,
580 .next_nodes = {
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800581 [0] = "mpls-drop",
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100582 }
583};
Neale Rannsad422ed2016-11-02 14:20:04 +0000584VLIB_NODE_FUNCTION_MULTIARCH (mpls_label_imposition_node,
585 mpls_label_imposition)
586
587static uword
588ip4_mpls_label_imposition (vlib_main_t * vm,
589 vlib_node_runtime_t * node,
590 vlib_frame_t * frame)
591{
Neale Rannsda78f952017-05-24 09:15:43 -0700592 return (mpls_label_imposition_inline(vm, node, frame, 1, 0, 0));
Neale Rannsad422ed2016-11-02 14:20:04 +0000593}
594
595VLIB_REGISTER_NODE (ip4_mpls_label_imposition_node) = {
596 .function = ip4_mpls_label_imposition,
597 .name = "ip4-mpls-label-imposition",
598 .vector_size = sizeof (u32),
599
600 .format_trace = format_mpls_label_imposition_trace,
601 .n_next_nodes = 1,
602 .next_nodes = {
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800603 [0] = "ip4-drop",
Neale Rannsad422ed2016-11-02 14:20:04 +0000604 }
605};
606VLIB_NODE_FUNCTION_MULTIARCH (ip4_mpls_label_imposition_node,
607 ip4_mpls_label_imposition)
608
609static uword
610ip6_mpls_label_imposition (vlib_main_t * vm,
611 vlib_node_runtime_t * node,
612 vlib_frame_t * frame)
613{
Neale Rannsda78f952017-05-24 09:15:43 -0700614 return (mpls_label_imposition_inline(vm, node, frame, 0, 1, 0));
Neale Rannsad422ed2016-11-02 14:20:04 +0000615}
616
617VLIB_REGISTER_NODE (ip6_mpls_label_imposition_node) = {
618 .function = ip6_mpls_label_imposition,
619 .name = "ip6-mpls-label-imposition",
620 .vector_size = sizeof (u32),
621
622 .format_trace = format_mpls_label_imposition_trace,
623 .n_next_nodes = 1,
624 .next_nodes = {
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800625 [0] = "ip6-drop",
Neale Rannsad422ed2016-11-02 14:20:04 +0000626 }
627};
628VLIB_NODE_FUNCTION_MULTIARCH (ip6_mpls_label_imposition_node,
629 ip6_mpls_label_imposition)
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100630
Neale Rannsda78f952017-05-24 09:15:43 -0700631static uword
632ethernet_mpls_label_imposition (vlib_main_t * vm,
633 vlib_node_runtime_t * node,
634 vlib_frame_t * frame)
635{
636 return (mpls_label_imposition_inline(vm, node, frame, 0, 0, 1));
637}
638
639VLIB_REGISTER_NODE (ethernet_mpls_label_imposition_node) = {
640 .function = ethernet_mpls_label_imposition,
641 .name = "ethernet-mpls-label-imposition",
642 .vector_size = sizeof (u32),
643
644 .format_trace = format_mpls_label_imposition_trace,
645 .n_next_nodes = 1,
646 .next_nodes = {
647 [0] = "error-drop",
648 }
649};
650VLIB_NODE_FUNCTION_MULTIARCH (ethernet_mpls_label_imposition_node,
651 ethernet_mpls_label_imposition)
652
Neale Ranns6c3ebcc2016-10-02 21:20:15 +0100653static void
654mpls_label_dpo_mem_show (void)
655{
656 fib_show_memory_usage("MPLS label",
657 pool_elts(mpls_label_dpo_pool),
658 pool_len(mpls_label_dpo_pool),
659 sizeof(mpls_label_dpo_t));
660}
661
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100662const static dpo_vft_t mld_vft = {
663 .dv_lock = mpls_label_dpo_lock,
664 .dv_unlock = mpls_label_dpo_unlock,
665 .dv_format = format_mpls_label_dpo,
Neale Ranns6c3ebcc2016-10-02 21:20:15 +0100666 .dv_mem_show = mpls_label_dpo_mem_show,
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100667};
668
669const static char* const mpls_label_imp_ip4_nodes[] =
670{
Neale Rannsad422ed2016-11-02 14:20:04 +0000671 "ip4-mpls-label-imposition",
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100672 NULL,
673};
674const static char* const mpls_label_imp_ip6_nodes[] =
675{
Neale Rannsad422ed2016-11-02 14:20:04 +0000676 "ip6-mpls-label-imposition",
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100677 NULL,
678};
679const static char* const mpls_label_imp_mpls_nodes[] =
680{
681 "mpls-label-imposition",
682 NULL,
683};
Neale Rannsda78f952017-05-24 09:15:43 -0700684const static char* const mpls_label_imp_ethernet_nodes[] =
685{
686 "ethernet-mpls-label-imposition",
687 NULL,
688};
689
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100690const static char* const * const mpls_label_imp_nodes[DPO_PROTO_NUM] =
691{
692 [DPO_PROTO_IP4] = mpls_label_imp_ip4_nodes,
693 [DPO_PROTO_IP6] = mpls_label_imp_ip6_nodes,
694 [DPO_PROTO_MPLS] = mpls_label_imp_mpls_nodes,
Neale Rannsda78f952017-05-24 09:15:43 -0700695 [DPO_PROTO_ETHERNET] = mpls_label_imp_ethernet_nodes,
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100696};
697
698
699void
700mpls_label_dpo_module_init (void)
701{
702 dpo_register(DPO_MPLS_LABEL, &mld_vft, mpls_label_imp_nodes);
703}