Neale Ranns | 0bfe5d8 | 2016-08-25 15:29:12 +0100 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (c) 2016 Cisco and/or its affiliates. |
| 3 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 4 | * you may not use this file except in compliance with the License. |
| 5 | * You may obtain a copy of the License at: |
| 6 | * |
| 7 | * http://www.apache.org/licenses/LICENSE-2.0 |
| 8 | * |
| 9 | * Unless required by applicable law or agreed to in writing, software |
| 10 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 11 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 12 | * See the License for the specific language governing permissions and |
| 13 | * limitations under the License. |
| 14 | */ |
| 15 | |
| 16 | #include <vnet/ip/ip.h> |
| 17 | #include <vnet/dpo/mpls_label_dpo.h> |
| 18 | #include <vnet/mpls/mpls.h> |
| 19 | |
| 20 | /* |
| 21 | * pool of all MPLS Label DPOs |
| 22 | */ |
| 23 | mpls_label_dpo_t *mpls_label_dpo_pool; |
| 24 | |
| 25 | static mpls_label_dpo_t * |
| 26 | mpls_label_dpo_alloc (void) |
| 27 | { |
| 28 | mpls_label_dpo_t *mld; |
| 29 | |
| 30 | pool_get_aligned(mpls_label_dpo_pool, mld, CLIB_CACHE_LINE_BYTES); |
| 31 | memset(mld, 0, sizeof(*mld)); |
| 32 | |
| 33 | dpo_reset(&mld->mld_dpo); |
| 34 | |
| 35 | return (mld); |
| 36 | } |
| 37 | |
| 38 | static index_t |
| 39 | mpls_label_dpo_get_index (mpls_label_dpo_t *mld) |
| 40 | { |
| 41 | return (mld - mpls_label_dpo_pool); |
| 42 | } |
| 43 | |
| 44 | index_t |
Neale Ranns | ad422ed | 2016-11-02 14:20:04 +0000 | [diff] [blame] | 45 | mpls_label_dpo_create (mpls_label_t *label_stack, |
Neale Ranns | 0bfe5d8 | 2016-08-25 15:29:12 +0100 | [diff] [blame] | 46 | mpls_eos_bit_t eos, |
| 47 | u8 ttl, |
| 48 | u8 exp, |
Neale Ranns | ad422ed | 2016-11-02 14:20:04 +0000 | [diff] [blame] | 49 | dpo_proto_t payload_proto, |
Neale Ranns | 0bfe5d8 | 2016-08-25 15:29:12 +0100 | [diff] [blame] | 50 | const dpo_id_t *dpo) |
| 51 | { |
| 52 | mpls_label_dpo_t *mld; |
Neale Ranns | ad422ed | 2016-11-02 14:20:04 +0000 | [diff] [blame] | 53 | u32 ii; |
Neale Ranns | 0bfe5d8 | 2016-08-25 15:29:12 +0100 | [diff] [blame] | 54 | |
| 55 | mld = mpls_label_dpo_alloc(); |
Neale Ranns | ad422ed | 2016-11-02 14:20:04 +0000 | [diff] [blame] | 56 | mld->mld_n_labels = vec_len(label_stack); |
Neale Ranns | 9ca18c6 | 2016-12-10 21:08:09 +0000 | [diff] [blame] | 57 | mld->mld_n_hdr_bytes = mld->mld_n_labels * sizeof(mld->mld_hdr[0]); |
Neale Ranns | ad422ed | 2016-11-02 14:20:04 +0000 | [diff] [blame] | 58 | mld->mld_payload_proto = payload_proto; |
Neale Ranns | 0bfe5d8 | 2016-08-25 15:29:12 +0100 | [diff] [blame] | 59 | |
| 60 | /* |
Neale Ranns | ad422ed | 2016-11-02 14:20:04 +0000 | [diff] [blame] | 61 | * construct label rewrite headers for each value value passed. |
Neale Ranns | 0bfe5d8 | 2016-08-25 15:29:12 +0100 | [diff] [blame] | 62 | * get the header in network byte order since we will paint it |
| 63 | * on a packet in the data-plane |
| 64 | */ |
Neale Ranns | 0bfe5d8 | 2016-08-25 15:29:12 +0100 | [diff] [blame] | 65 | |
Neale Ranns | ad422ed | 2016-11-02 14:20:04 +0000 | [diff] [blame] | 66 | for (ii = 0; ii < mld->mld_n_labels-1; ii++) |
| 67 | { |
| 68 | vnet_mpls_uc_set_label(&mld->mld_hdr[ii].label_exp_s_ttl, label_stack[ii]); |
| 69 | vnet_mpls_uc_set_ttl(&mld->mld_hdr[ii].label_exp_s_ttl, 255); |
| 70 | vnet_mpls_uc_set_exp(&mld->mld_hdr[ii].label_exp_s_ttl, 0); |
| 71 | vnet_mpls_uc_set_s(&mld->mld_hdr[ii].label_exp_s_ttl, MPLS_NON_EOS); |
| 72 | mld->mld_hdr[ii].label_exp_s_ttl = |
| 73 | clib_host_to_net_u32(mld->mld_hdr[ii].label_exp_s_ttl); |
| 74 | } |
| 75 | |
| 76 | /* |
| 77 | * the inner most label |
| 78 | */ |
| 79 | ii = mld->mld_n_labels-1; |
| 80 | |
| 81 | vnet_mpls_uc_set_label(&mld->mld_hdr[ii].label_exp_s_ttl, label_stack[ii]); |
| 82 | vnet_mpls_uc_set_ttl(&mld->mld_hdr[ii].label_exp_s_ttl, ttl); |
| 83 | vnet_mpls_uc_set_exp(&mld->mld_hdr[ii].label_exp_s_ttl, exp); |
| 84 | vnet_mpls_uc_set_s(&mld->mld_hdr[ii].label_exp_s_ttl, eos); |
| 85 | mld->mld_hdr[ii].label_exp_s_ttl = |
| 86 | clib_host_to_net_u32(mld->mld_hdr[ii].label_exp_s_ttl); |
| 87 | |
| 88 | /* |
| 89 | * stack this label objct on its parent. |
| 90 | */ |
| 91 | dpo_stack(DPO_MPLS_LABEL, |
| 92 | mld->mld_payload_proto, |
| 93 | &mld->mld_dpo, |
| 94 | dpo); |
Neale Ranns | 0bfe5d8 | 2016-08-25 15:29:12 +0100 | [diff] [blame] | 95 | |
| 96 | return (mpls_label_dpo_get_index(mld)); |
| 97 | } |
| 98 | |
| 99 | u8* |
| 100 | format_mpls_label_dpo (u8 *s, va_list *args) |
| 101 | { |
| 102 | index_t index = va_arg (*args, index_t); |
| 103 | u32 indent = va_arg (*args, u32); |
| 104 | mpls_unicast_header_t hdr; |
| 105 | mpls_label_dpo_t *mld; |
Neale Ranns | ad422ed | 2016-11-02 14:20:04 +0000 | [diff] [blame] | 106 | u32 ii; |
Neale Ranns | 0bfe5d8 | 2016-08-25 15:29:12 +0100 | [diff] [blame] | 107 | |
| 108 | mld = mpls_label_dpo_get(index); |
| 109 | |
Neale Ranns | 8fe8cc2 | 2016-11-01 10:05:08 +0000 | [diff] [blame] | 110 | s = format(s, "mpls-label:[%d]:", index); |
Neale Ranns | ad422ed | 2016-11-02 14:20:04 +0000 | [diff] [blame] | 111 | |
| 112 | for (ii = 0; ii < mld->mld_n_labels; ii++) |
| 113 | { |
| 114 | hdr.label_exp_s_ttl = |
| 115 | clib_net_to_host_u32(mld->mld_hdr[ii].label_exp_s_ttl); |
| 116 | s = format(s, "%U", format_mpls_header, hdr); |
| 117 | } |
| 118 | |
| 119 | s = format(s, "\n%U", format_white_space, indent); |
Neale Ranns | 8fe8cc2 | 2016-11-01 10:05:08 +0000 | [diff] [blame] | 120 | s = format(s, "%U", format_dpo_id, &mld->mld_dpo, indent+2); |
| 121 | |
| 122 | return (s); |
Neale Ranns | 0bfe5d8 | 2016-08-25 15:29:12 +0100 | [diff] [blame] | 123 | } |
| 124 | |
| 125 | static void |
| 126 | mpls_label_dpo_lock (dpo_id_t *dpo) |
| 127 | { |
| 128 | mpls_label_dpo_t *mld; |
| 129 | |
| 130 | mld = mpls_label_dpo_get(dpo->dpoi_index); |
| 131 | |
| 132 | mld->mld_locks++; |
| 133 | } |
| 134 | |
| 135 | static void |
| 136 | mpls_label_dpo_unlock (dpo_id_t *dpo) |
| 137 | { |
| 138 | mpls_label_dpo_t *mld; |
| 139 | |
| 140 | mld = mpls_label_dpo_get(dpo->dpoi_index); |
| 141 | |
| 142 | mld->mld_locks--; |
| 143 | |
| 144 | if (0 == mld->mld_locks) |
| 145 | { |
| 146 | dpo_reset(&mld->mld_dpo); |
| 147 | pool_put(mpls_label_dpo_pool, mld); |
| 148 | } |
| 149 | } |
| 150 | |
| 151 | /** |
| 152 | * @brief A struct to hold tracing information for the MPLS label imposition |
| 153 | * node. |
| 154 | */ |
| 155 | typedef struct mpls_label_imposition_trace_t_ |
| 156 | { |
| 157 | /** |
| 158 | * The MPLS header imposed |
| 159 | */ |
| 160 | mpls_unicast_header_t hdr; |
| 161 | } mpls_label_imposition_trace_t; |
| 162 | |
| 163 | always_inline uword |
Neale Ranns | ad422ed | 2016-11-02 14:20:04 +0000 | [diff] [blame] | 164 | mpls_label_imposition_inline (vlib_main_t * vm, |
| 165 | vlib_node_runtime_t * node, |
| 166 | vlib_frame_t * from_frame, |
| 167 | u8 payload_is_ip4, |
| 168 | u8 payload_is_ip6) |
Neale Ranns | 0bfe5d8 | 2016-08-25 15:29:12 +0100 | [diff] [blame] | 169 | { |
| 170 | u32 n_left_from, next_index, * from, * to_next; |
| 171 | |
| 172 | from = vlib_frame_vector_args (from_frame); |
| 173 | n_left_from = from_frame->n_vectors; |
| 174 | |
| 175 | next_index = node->cached_next_index; |
| 176 | |
| 177 | while (n_left_from > 0) |
| 178 | { |
| 179 | u32 n_left_to_next; |
| 180 | |
| 181 | vlib_get_next_frame(vm, node, next_index, to_next, n_left_to_next); |
| 182 | |
Neale Ranns | 9ca18c6 | 2016-12-10 21:08:09 +0000 | [diff] [blame] | 183 | while (n_left_from >= 4 && n_left_to_next >= 2) |
| 184 | { |
| 185 | mpls_unicast_header_t *hdr0, *hdr1; |
| 186 | mpls_label_dpo_t *mld0, *mld1; |
| 187 | u32 bi0, mldi0, bi1, mldi1; |
| 188 | vlib_buffer_t * b0, *b1; |
| 189 | u32 next0, next1; |
| 190 | u8 ttl0, ttl1; |
| 191 | |
| 192 | bi0 = to_next[0] = from[0]; |
| 193 | bi1 = to_next[1] = from[1]; |
| 194 | |
| 195 | /* Prefetch next iteration. */ |
| 196 | { |
| 197 | vlib_buffer_t * p2, * p3; |
| 198 | |
| 199 | p2 = vlib_get_buffer (vm, from[2]); |
| 200 | p3 = vlib_get_buffer (vm, from[3]); |
| 201 | |
| 202 | vlib_prefetch_buffer_header (p2, STORE); |
| 203 | vlib_prefetch_buffer_header (p3, STORE); |
| 204 | |
| 205 | CLIB_PREFETCH (p2->data, sizeof (hdr0[0]), STORE); |
| 206 | CLIB_PREFETCH (p3->data, sizeof (hdr0[0]), STORE); |
| 207 | } |
| 208 | |
| 209 | from += 2; |
| 210 | to_next += 2; |
| 211 | n_left_from -= 2; |
| 212 | n_left_to_next -= 2; |
| 213 | |
| 214 | b0 = vlib_get_buffer (vm, bi0); |
| 215 | b1 = vlib_get_buffer (vm, bi1); |
| 216 | |
| 217 | /* dst lookup was done by ip4 lookup */ |
| 218 | mldi0 = vnet_buffer(b0)->ip.adj_index[VLIB_TX]; |
| 219 | mldi1 = vnet_buffer(b1)->ip.adj_index[VLIB_TX]; |
| 220 | mld0 = mpls_label_dpo_get(mldi0); |
| 221 | mld1 = mpls_label_dpo_get(mldi1); |
| 222 | |
| 223 | if (payload_is_ip4) |
| 224 | { |
| 225 | /* |
| 226 | * decrement the TTL on ingress to the LSP |
| 227 | */ |
| 228 | ip4_header_t * ip0 = vlib_buffer_get_current(b0); |
| 229 | ip4_header_t * ip1 = vlib_buffer_get_current(b1); |
| 230 | u32 checksum0; |
| 231 | u32 checksum1; |
| 232 | |
| 233 | checksum0 = ip0->checksum + clib_host_to_net_u16 (0x0100); |
| 234 | checksum1 = ip1->checksum + clib_host_to_net_u16 (0x0100); |
| 235 | |
| 236 | checksum0 += checksum0 >= 0xffff; |
| 237 | checksum1 += checksum1 >= 0xffff; |
| 238 | |
| 239 | ip0->checksum = checksum0; |
| 240 | ip1->checksum = checksum1; |
| 241 | |
| 242 | ip0->ttl -= 1; |
| 243 | ip1->ttl -= 1; |
| 244 | |
| 245 | ttl1 = ip1->ttl; |
| 246 | ttl0 = ip0->ttl; |
| 247 | } |
| 248 | else if (payload_is_ip6) |
| 249 | { |
| 250 | /* |
| 251 | * decrement the TTL on ingress to the LSP |
| 252 | */ |
| 253 | ip6_header_t * ip0 = vlib_buffer_get_current(b0); |
| 254 | ip6_header_t * ip1 = vlib_buffer_get_current(b1); |
| 255 | |
| 256 | |
| 257 | ip0->hop_limit -= 1; |
| 258 | ip1->hop_limit -= 1; |
| 259 | |
| 260 | ttl0 = ip0->hop_limit; |
| 261 | ttl1 = ip1->hop_limit; |
| 262 | } |
| 263 | else |
| 264 | { |
| 265 | /* |
| 266 | * else, the packet to be encapped is an MPLS packet |
| 267 | */ |
| 268 | if (PREDICT_TRUE(vnet_buffer(b0)->mpls.first)) |
| 269 | { |
| 270 | /* |
| 271 | * The first label to be imposed on the packet. this is a label swap. |
| 272 | * in which case we stashed the TTL and EXP bits in the |
| 273 | * packet in the lookup node |
| 274 | */ |
| 275 | ASSERT(0 != vnet_buffer (b0)->mpls.ttl); |
| 276 | |
| 277 | ttl0 = vnet_buffer(b0)->mpls.ttl - 1; |
| 278 | } |
| 279 | else |
| 280 | { |
| 281 | /* |
| 282 | * not the first label. implying we are recusring down a chain of |
| 283 | * output labels. |
| 284 | * Each layer is considered a new LSP - hence the TTL is reset. |
| 285 | */ |
| 286 | ttl0 = 255; |
| 287 | } |
| 288 | if (PREDICT_TRUE(vnet_buffer(b1)->mpls.first)) |
| 289 | { |
| 290 | ASSERT(1 != vnet_buffer (b1)->mpls.ttl); |
| 291 | ttl1 = vnet_buffer(b1)->mpls.ttl - 1; |
| 292 | } |
| 293 | else |
| 294 | { |
| 295 | ttl1 = 255; |
| 296 | } |
| 297 | } |
| 298 | vnet_buffer(b0)->mpls.first = 0; |
| 299 | vnet_buffer(b1)->mpls.first = 0; |
| 300 | |
| 301 | /* Paint the MPLS header */ |
| 302 | vlib_buffer_advance(b0, -(mld0->mld_n_hdr_bytes)); |
| 303 | vlib_buffer_advance(b1, -(mld1->mld_n_hdr_bytes)); |
| 304 | |
| 305 | hdr0 = vlib_buffer_get_current(b0); |
| 306 | hdr1 = vlib_buffer_get_current(b1); |
| 307 | |
| 308 | clib_memcpy(hdr0, mld0->mld_hdr, mld0->mld_n_hdr_bytes); |
| 309 | clib_memcpy(hdr1, mld1->mld_hdr, mld1->mld_n_hdr_bytes); |
| 310 | |
| 311 | /* fixup the TTL for the inner most label */ |
| 312 | hdr0 = hdr0 + (mld0->mld_n_labels - 1); |
| 313 | hdr1 = hdr1 + (mld1->mld_n_labels - 1); |
| 314 | ((char*)hdr0)[3] = ttl0; |
| 315 | ((char*)hdr1)[3] = ttl1; |
| 316 | |
| 317 | next0 = mld0->mld_dpo.dpoi_next_node; |
| 318 | next1 = mld1->mld_dpo.dpoi_next_node; |
| 319 | vnet_buffer(b0)->ip.adj_index[VLIB_TX] = mld0->mld_dpo.dpoi_index; |
| 320 | vnet_buffer(b1)->ip.adj_index[VLIB_TX] = mld1->mld_dpo.dpoi_index; |
| 321 | |
| 322 | if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED)) |
| 323 | { |
| 324 | mpls_label_imposition_trace_t *tr = |
| 325 | vlib_add_trace (vm, node, b0, sizeof (*tr)); |
| 326 | tr->hdr = *hdr0; |
| 327 | } |
| 328 | if (PREDICT_FALSE(b1->flags & VLIB_BUFFER_IS_TRACED)) |
| 329 | { |
| 330 | mpls_label_imposition_trace_t *tr = |
| 331 | vlib_add_trace (vm, node, b1, sizeof (*tr)); |
| 332 | tr->hdr = *hdr1; |
| 333 | } |
| 334 | |
| 335 | vlib_validate_buffer_enqueue_x2(vm, node, next_index, to_next, |
| 336 | n_left_to_next, |
| 337 | bi0, bi1, next0, next1); |
| 338 | } |
| 339 | |
Neale Ranns | 0bfe5d8 | 2016-08-25 15:29:12 +0100 | [diff] [blame] | 340 | while (n_left_from > 0 && n_left_to_next > 0) |
| 341 | { |
| 342 | mpls_unicast_header_t *hdr0; |
| 343 | mpls_label_dpo_t *mld0; |
| 344 | vlib_buffer_t * b0; |
| 345 | u32 bi0, mldi0; |
| 346 | u32 next0; |
Neale Ranns | ad422ed | 2016-11-02 14:20:04 +0000 | [diff] [blame] | 347 | u8 ttl; |
Neale Ranns | 0bfe5d8 | 2016-08-25 15:29:12 +0100 | [diff] [blame] | 348 | |
| 349 | bi0 = from[0]; |
| 350 | to_next[0] = bi0; |
| 351 | from += 1; |
| 352 | to_next += 1; |
| 353 | n_left_from -= 1; |
| 354 | n_left_to_next -= 1; |
| 355 | |
| 356 | b0 = vlib_get_buffer (vm, bi0); |
| 357 | |
| 358 | /* dst lookup was done by ip4 lookup */ |
| 359 | mldi0 = vnet_buffer(b0)->ip.adj_index[VLIB_TX]; |
| 360 | mld0 = mpls_label_dpo_get(mldi0); |
| 361 | |
Neale Ranns | ad422ed | 2016-11-02 14:20:04 +0000 | [diff] [blame] | 362 | if (payload_is_ip4) |
| 363 | { |
| 364 | /* |
| 365 | * decrement the TTL on ingress to the LSP |
| 366 | */ |
| 367 | ip4_header_t * ip0 = vlib_buffer_get_current(b0); |
| 368 | u32 checksum0; |
| 369 | |
| 370 | checksum0 = ip0->checksum + clib_host_to_net_u16 (0x0100); |
| 371 | checksum0 += checksum0 >= 0xffff; |
| 372 | |
| 373 | ip0->checksum = checksum0; |
| 374 | ip0->ttl -= 1; |
| 375 | ttl = ip0->ttl; |
| 376 | } |
| 377 | else if (payload_is_ip6) |
| 378 | { |
| 379 | /* |
| 380 | * decrement the TTL on ingress to the LSP |
| 381 | */ |
| 382 | ip6_header_t * ip0 = vlib_buffer_get_current(b0); |
| 383 | |
| 384 | ip0->hop_limit -= 1; |
| 385 | ttl = ip0->hop_limit; |
| 386 | } |
| 387 | else |
| 388 | { |
| 389 | /* |
| 390 | * else, the packet to be encapped is an MPLS packet |
| 391 | */ |
| 392 | if (vnet_buffer(b0)->mpls.first) |
| 393 | { |
| 394 | /* |
| 395 | * The first label to be imposed on the packet. this is a label swap. |
| 396 | * in which case we stashed the TTL and EXP bits in the |
| 397 | * packet in the lookup node |
| 398 | */ |
| 399 | ASSERT(0 != vnet_buffer (b0)->mpls.ttl); |
| 400 | |
| 401 | ttl = vnet_buffer(b0)->mpls.ttl - 1; |
| 402 | } |
| 403 | else |
| 404 | { |
| 405 | /* |
| 406 | * not the first label. implying we are recusring down a chain of |
| 407 | * output labels. |
| 408 | * Each layer is considered a new LSP - hence the TTL is reset. |
| 409 | */ |
| 410 | ttl = 255; |
| 411 | } |
| 412 | } |
| 413 | vnet_buffer(b0)->mpls.first = 0; |
| 414 | |
Neale Ranns | 0bfe5d8 | 2016-08-25 15:29:12 +0100 | [diff] [blame] | 415 | /* Paint the MPLS header */ |
Neale Ranns | 9ca18c6 | 2016-12-10 21:08:09 +0000 | [diff] [blame] | 416 | vlib_buffer_advance(b0, -(mld0->mld_n_hdr_bytes)); |
Neale Ranns | 0bfe5d8 | 2016-08-25 15:29:12 +0100 | [diff] [blame] | 417 | hdr0 = vlib_buffer_get_current(b0); |
Neale Ranns | 9ca18c6 | 2016-12-10 21:08:09 +0000 | [diff] [blame] | 418 | clib_memcpy(hdr0, mld0->mld_hdr, mld0->mld_n_hdr_bytes); |
Neale Ranns | ad422ed | 2016-11-02 14:20:04 +0000 | [diff] [blame] | 419 | |
| 420 | /* fixup the TTL for the inner most label */ |
| 421 | hdr0 = hdr0 + (mld0->mld_n_labels - 1); |
| 422 | ((char*)hdr0)[3] = ttl; |
Neale Ranns | 0bfe5d8 | 2016-08-25 15:29:12 +0100 | [diff] [blame] | 423 | |
| 424 | next0 = mld0->mld_dpo.dpoi_next_node; |
| 425 | vnet_buffer(b0)->ip.adj_index[VLIB_TX] = mld0->mld_dpo.dpoi_index; |
| 426 | |
Neale Ranns | 9ca18c6 | 2016-12-10 21:08:09 +0000 | [diff] [blame] | 427 | if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED)) |
Neale Ranns | 0bfe5d8 | 2016-08-25 15:29:12 +0100 | [diff] [blame] | 428 | { |
| 429 | mpls_label_imposition_trace_t *tr = |
| 430 | vlib_add_trace (vm, node, b0, sizeof (*tr)); |
| 431 | tr->hdr = *hdr0; |
| 432 | } |
| 433 | |
| 434 | vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next, |
| 435 | n_left_to_next, bi0, next0); |
| 436 | } |
| 437 | vlib_put_next_frame (vm, node, next_index, n_left_to_next); |
| 438 | } |
| 439 | return from_frame->n_vectors; |
| 440 | } |
| 441 | |
| 442 | static u8 * |
| 443 | format_mpls_label_imposition_trace (u8 * s, va_list * args) |
| 444 | { |
| 445 | CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *); |
| 446 | CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *); |
| 447 | mpls_label_imposition_trace_t * t; |
| 448 | mpls_unicast_header_t hdr; |
| 449 | uword indent; |
| 450 | |
| 451 | t = va_arg (*args, mpls_label_imposition_trace_t *); |
| 452 | indent = format_get_indent (s); |
| 453 | hdr.label_exp_s_ttl = clib_net_to_host_u32(t->hdr.label_exp_s_ttl); |
| 454 | |
| 455 | s = format (s, "%Umpls-header:%U", |
| 456 | format_white_space, indent, |
| 457 | format_mpls_header, hdr); |
| 458 | return (s); |
| 459 | } |
| 460 | |
Neale Ranns | ad422ed | 2016-11-02 14:20:04 +0000 | [diff] [blame] | 461 | static uword |
| 462 | mpls_label_imposition (vlib_main_t * vm, |
| 463 | vlib_node_runtime_t * node, |
| 464 | vlib_frame_t * frame) |
| 465 | { |
| 466 | return (mpls_label_imposition_inline(vm, node, frame, 0, 0)); |
| 467 | } |
| 468 | |
Neale Ranns | 0bfe5d8 | 2016-08-25 15:29:12 +0100 | [diff] [blame] | 469 | VLIB_REGISTER_NODE (mpls_label_imposition_node) = { |
| 470 | .function = mpls_label_imposition, |
| 471 | .name = "mpls-label-imposition", |
| 472 | .vector_size = sizeof (u32), |
| 473 | |
| 474 | .format_trace = format_mpls_label_imposition_trace, |
| 475 | .n_next_nodes = 1, |
| 476 | .next_nodes = { |
| 477 | [0] = "error-drop", |
| 478 | } |
| 479 | }; |
Neale Ranns | ad422ed | 2016-11-02 14:20:04 +0000 | [diff] [blame] | 480 | VLIB_NODE_FUNCTION_MULTIARCH (mpls_label_imposition_node, |
| 481 | mpls_label_imposition) |
| 482 | |
| 483 | static uword |
| 484 | ip4_mpls_label_imposition (vlib_main_t * vm, |
| 485 | vlib_node_runtime_t * node, |
| 486 | vlib_frame_t * frame) |
| 487 | { |
| 488 | return (mpls_label_imposition_inline(vm, node, frame, 1, 0)); |
| 489 | } |
| 490 | |
| 491 | VLIB_REGISTER_NODE (ip4_mpls_label_imposition_node) = { |
| 492 | .function = ip4_mpls_label_imposition, |
| 493 | .name = "ip4-mpls-label-imposition", |
| 494 | .vector_size = sizeof (u32), |
| 495 | |
| 496 | .format_trace = format_mpls_label_imposition_trace, |
| 497 | .n_next_nodes = 1, |
| 498 | .next_nodes = { |
| 499 | [0] = "error-drop", |
| 500 | } |
| 501 | }; |
| 502 | VLIB_NODE_FUNCTION_MULTIARCH (ip4_mpls_label_imposition_node, |
| 503 | ip4_mpls_label_imposition) |
| 504 | |
| 505 | static uword |
| 506 | ip6_mpls_label_imposition (vlib_main_t * vm, |
| 507 | vlib_node_runtime_t * node, |
| 508 | vlib_frame_t * frame) |
| 509 | { |
| 510 | return (mpls_label_imposition_inline(vm, node, frame, 0, 1)); |
| 511 | } |
| 512 | |
| 513 | VLIB_REGISTER_NODE (ip6_mpls_label_imposition_node) = { |
| 514 | .function = ip6_mpls_label_imposition, |
| 515 | .name = "ip6-mpls-label-imposition", |
| 516 | .vector_size = sizeof (u32), |
| 517 | |
| 518 | .format_trace = format_mpls_label_imposition_trace, |
| 519 | .n_next_nodes = 1, |
| 520 | .next_nodes = { |
| 521 | [0] = "error-drop", |
| 522 | } |
| 523 | }; |
| 524 | VLIB_NODE_FUNCTION_MULTIARCH (ip6_mpls_label_imposition_node, |
| 525 | ip6_mpls_label_imposition) |
Neale Ranns | 0bfe5d8 | 2016-08-25 15:29:12 +0100 | [diff] [blame] | 526 | |
Neale Ranns | 6c3ebcc | 2016-10-02 21:20:15 +0100 | [diff] [blame] | 527 | static void |
| 528 | mpls_label_dpo_mem_show (void) |
| 529 | { |
| 530 | fib_show_memory_usage("MPLS label", |
| 531 | pool_elts(mpls_label_dpo_pool), |
| 532 | pool_len(mpls_label_dpo_pool), |
| 533 | sizeof(mpls_label_dpo_t)); |
| 534 | } |
| 535 | |
Neale Ranns | 0bfe5d8 | 2016-08-25 15:29:12 +0100 | [diff] [blame] | 536 | const static dpo_vft_t mld_vft = { |
| 537 | .dv_lock = mpls_label_dpo_lock, |
| 538 | .dv_unlock = mpls_label_dpo_unlock, |
| 539 | .dv_format = format_mpls_label_dpo, |
Neale Ranns | 6c3ebcc | 2016-10-02 21:20:15 +0100 | [diff] [blame] | 540 | .dv_mem_show = mpls_label_dpo_mem_show, |
Neale Ranns | 0bfe5d8 | 2016-08-25 15:29:12 +0100 | [diff] [blame] | 541 | }; |
| 542 | |
| 543 | const static char* const mpls_label_imp_ip4_nodes[] = |
| 544 | { |
Neale Ranns | ad422ed | 2016-11-02 14:20:04 +0000 | [diff] [blame] | 545 | "ip4-mpls-label-imposition", |
Neale Ranns | 0bfe5d8 | 2016-08-25 15:29:12 +0100 | [diff] [blame] | 546 | NULL, |
| 547 | }; |
| 548 | const static char* const mpls_label_imp_ip6_nodes[] = |
| 549 | { |
Neale Ranns | ad422ed | 2016-11-02 14:20:04 +0000 | [diff] [blame] | 550 | "ip6-mpls-label-imposition", |
Neale Ranns | 0bfe5d8 | 2016-08-25 15:29:12 +0100 | [diff] [blame] | 551 | NULL, |
| 552 | }; |
| 553 | const static char* const mpls_label_imp_mpls_nodes[] = |
| 554 | { |
| 555 | "mpls-label-imposition", |
| 556 | NULL, |
| 557 | }; |
| 558 | const static char* const * const mpls_label_imp_nodes[DPO_PROTO_NUM] = |
| 559 | { |
| 560 | [DPO_PROTO_IP4] = mpls_label_imp_ip4_nodes, |
| 561 | [DPO_PROTO_IP6] = mpls_label_imp_ip6_nodes, |
| 562 | [DPO_PROTO_MPLS] = mpls_label_imp_mpls_nodes, |
| 563 | }; |
| 564 | |
| 565 | |
| 566 | void |
| 567 | mpls_label_dpo_module_init (void) |
| 568 | { |
| 569 | dpo_register(DPO_MPLS_LABEL, &mld_vft, mpls_label_imp_nodes); |
| 570 | } |