blob: b2309fb62c0713ea43e4048bdee5723ca2399578 [file] [log] [blame]
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001/*
Neale Ranns1357f3b2016-10-16 12:01:42 -07002 * mpls_lookup.c: MPLS lookup
Neale Ranns0bfe5d82016-08-25 15:29:12 +01003 *
4 * Copyright (c) 2012-2014 Cisco and/or its affiliates.
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at:
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18#include <vlib/vlib.h>
19#include <vnet/pg/pg.h>
Neale Ranns6af1c042017-05-26 03:48:53 -070020#include <vnet/mpls/mpls_lookup.h>
Neale Ranns0bfe5d82016-08-25 15:29:12 +010021#include <vnet/fib/mpls_fib.h>
Neale Rannsf12a83f2017-04-18 09:09:40 -070022#include <vnet/dpo/load_balance_map.h>
Neale Ranns0f26c5a2017-03-01 15:12:11 -080023#include <vnet/dpo/replicate_dpo.h>
Neale Ranns0bfe5d82016-08-25 15:29:12 +010024
Neale Ranns0f26c5a2017-03-01 15:12:11 -080025/**
Neale Ranns0f26c5a2017-03-01 15:12:11 -080026 * The arc/edge from the MPLS lookup node to the MPLS replicate node
27 */
Filip Tehlar17fcd982019-03-05 04:32:11 -080028#ifndef CLIB_MARCH_VARIANT
Neale Ranns6af1c042017-05-26 03:48:53 -070029u32 mpls_lookup_to_replicate_edge;
Filip Tehlar17fcd982019-03-05 04:32:11 -080030#endif /* CLIB_MARCH_VARIANT */
Neale Ranns0bfe5d82016-08-25 15:29:12 +010031
32typedef struct {
33 u32 next_index;
34 u32 lb_index;
35 u32 lfib_index;
36 u32 label_net_byte_order;
Neale Ranns2be95c12016-11-19 13:50:04 +000037 u32 hash;
Neale Ranns0bfe5d82016-08-25 15:29:12 +010038} mpls_lookup_trace_t;
39
40static u8 *
41format_mpls_lookup_trace (u8 * s, va_list * args)
42{
43 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
44 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
45 mpls_lookup_trace_t * t = va_arg (*args, mpls_lookup_trace_t *);
46
Neale Ranns4f4599d2017-05-24 09:37:49 -070047 s = format (s, "MPLS: next [%d], lookup fib index %d, LB index %d hash %x "
Neale Ranns0bfe5d82016-08-25 15:29:12 +010048 "label %d eos %d",
Neale Ranns2be95c12016-11-19 13:50:04 +000049 t->next_index, t->lfib_index, t->lb_index, t->hash,
Neale Ranns0bfe5d82016-08-25 15:29:12 +010050 vnet_mpls_uc_get_label(
51 clib_net_to_host_u32(t->label_net_byte_order)),
Neale Rannsd6d67512017-05-31 10:34:35 -070052 vnet_mpls_uc_get_s(
53 clib_net_to_host_u32(t->label_net_byte_order)));
Neale Ranns0bfe5d82016-08-25 15:29:12 +010054 return s;
55}
56
Filip Tehlar17fcd982019-03-05 04:32:11 -080057VLIB_NODE_FN (mpls_lookup_node) (vlib_main_t * vm,
Neale Ranns0bfe5d82016-08-25 15:29:12 +010058 vlib_node_runtime_t * node,
59 vlib_frame_t * from_frame)
60{
61 vlib_combined_counter_main_t * cm = &load_balance_main.lbm_to_counters;
62 u32 n_left_from, next_index, * from, * to_next;
63 mpls_main_t * mm = &mpls_main;
Damjan Marion586afd72017-04-05 19:18:20 +020064 u32 thread_index = vlib_get_thread_index();
Neale Ranns0bfe5d82016-08-25 15:29:12 +010065
66 from = vlib_frame_vector_args (from_frame);
67 n_left_from = from_frame->n_vectors;
68 next_index = node->cached_next_index;
69
70 while (n_left_from > 0)
71 {
72 u32 n_left_to_next;
73
74 vlib_get_next_frame (vm, node, next_index,
75 to_next, n_left_to_next);
76
Neale Ranns696e88d2017-03-16 07:34:55 -040077 while (n_left_from >= 8 && n_left_to_next >= 4)
Neale Ranns2be95c12016-11-19 13:50:04 +000078 {
79 u32 lbi0, next0, lfib_index0, bi0, hash_c0;
80 const mpls_unicast_header_t * h0;
81 const load_balance_t *lb0;
82 const dpo_id_t *dpo0;
83 vlib_buffer_t * b0;
84 u32 lbi1, next1, lfib_index1, bi1, hash_c1;
85 const mpls_unicast_header_t * h1;
86 const load_balance_t *lb1;
87 const dpo_id_t *dpo1;
88 vlib_buffer_t * b1;
Neale Ranns696e88d2017-03-16 07:34:55 -040089 u32 lbi2, next2, lfib_index2, bi2, hash_c2;
90 const mpls_unicast_header_t * h2;
91 const load_balance_t *lb2;
92 const dpo_id_t *dpo2;
93 vlib_buffer_t * b2;
94 u32 lbi3, next3, lfib_index3, bi3, hash_c3;
95 const mpls_unicast_header_t * h3;
96 const load_balance_t *lb3;
97 const dpo_id_t *dpo3;
98 vlib_buffer_t * b3;
Neale Ranns2be95c12016-11-19 13:50:04 +000099
100 /* Prefetch next iteration. */
101 {
Neale Rannsbd4d6452018-05-02 05:15:16 -0700102 vlib_buffer_t *p4, *p5, *p6, *p7;
Neale Ranns2be95c12016-11-19 13:50:04 +0000103
Neale Ranns696e88d2017-03-16 07:34:55 -0400104 p4 = vlib_get_buffer (vm, from[4]);
105 p5 = vlib_get_buffer (vm, from[5]);
Neale Rannsbd4d6452018-05-02 05:15:16 -0700106 p6 = vlib_get_buffer (vm, from[6]);
107 p7 = vlib_get_buffer (vm, from[7]);
Neale Ranns2be95c12016-11-19 13:50:04 +0000108
Neale Ranns696e88d2017-03-16 07:34:55 -0400109 vlib_prefetch_buffer_header (p4, STORE);
110 vlib_prefetch_buffer_header (p5, STORE);
Neale Rannsbd4d6452018-05-02 05:15:16 -0700111 vlib_prefetch_buffer_header (p6, STORE);
112 vlib_prefetch_buffer_header (p7, STORE);
Neale Ranns2be95c12016-11-19 13:50:04 +0000113
Neale Rannsbd4d6452018-05-02 05:15:16 -0700114 CLIB_PREFETCH (p4->data, sizeof (h0[0]), LOAD);
115 CLIB_PREFETCH (p5->data, sizeof (h0[0]), LOAD);
116 CLIB_PREFETCH (p6->data, sizeof (h0[0]), LOAD);
117 CLIB_PREFETCH (p7->data, sizeof (h0[0]), LOAD);
Neale Ranns2be95c12016-11-19 13:50:04 +0000118 }
119
120 bi0 = to_next[0] = from[0];
121 bi1 = to_next[1] = from[1];
Neale Ranns696e88d2017-03-16 07:34:55 -0400122 bi2 = to_next[2] = from[2];
123 bi3 = to_next[3] = from[3];
Neale Ranns2be95c12016-11-19 13:50:04 +0000124
Neale Ranns696e88d2017-03-16 07:34:55 -0400125 from += 4;
126 n_left_from -= 4;
127 to_next += 4;
128 n_left_to_next -= 4;
Neale Ranns2be95c12016-11-19 13:50:04 +0000129
130 b0 = vlib_get_buffer (vm, bi0);
131 b1 = vlib_get_buffer (vm, bi1);
Neale Ranns696e88d2017-03-16 07:34:55 -0400132 b2 = vlib_get_buffer (vm, bi2);
133 b3 = vlib_get_buffer (vm, bi3);
Neale Ranns2be95c12016-11-19 13:50:04 +0000134 h0 = vlib_buffer_get_current (b0);
135 h1 = vlib_buffer_get_current (b1);
Neale Ranns696e88d2017-03-16 07:34:55 -0400136 h2 = vlib_buffer_get_current (b2);
137 h3 = vlib_buffer_get_current (b3);
Neale Ranns2be95c12016-11-19 13:50:04 +0000138
139 lfib_index0 = vec_elt(mm->fib_index_by_sw_if_index,
140 vnet_buffer(b0)->sw_if_index[VLIB_RX]);
141 lfib_index1 = vec_elt(mm->fib_index_by_sw_if_index,
142 vnet_buffer(b1)->sw_if_index[VLIB_RX]);
Neale Ranns696e88d2017-03-16 07:34:55 -0400143 lfib_index2 = vec_elt(mm->fib_index_by_sw_if_index,
144 vnet_buffer(b2)->sw_if_index[VLIB_RX]);
145 lfib_index3 = vec_elt(mm->fib_index_by_sw_if_index,
146 vnet_buffer(b3)->sw_if_index[VLIB_RX]);
Neale Ranns2be95c12016-11-19 13:50:04 +0000147
148 lbi0 = mpls_fib_table_forwarding_lookup (lfib_index0, h0);
149 lbi1 = mpls_fib_table_forwarding_lookup (lfib_index1, h1);
Neale Ranns696e88d2017-03-16 07:34:55 -0400150 lbi2 = mpls_fib_table_forwarding_lookup (lfib_index2, h2);
151 lbi3 = mpls_fib_table_forwarding_lookup (lfib_index3, h3);
152
Neale Ranns2be95c12016-11-19 13:50:04 +0000153 hash_c0 = vnet_buffer(b0)->ip.flow_hash = 0;
154 hash_c1 = vnet_buffer(b1)->ip.flow_hash = 0;
Neale Ranns696e88d2017-03-16 07:34:55 -0400155 hash_c2 = vnet_buffer(b2)->ip.flow_hash = 0;
156 hash_c3 = vnet_buffer(b3)->ip.flow_hash = 0;
Neale Ranns2be95c12016-11-19 13:50:04 +0000157
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800158 if (MPLS_IS_REPLICATE & lbi0)
Neale Ranns2be95c12016-11-19 13:50:04 +0000159 {
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800160 next0 = mpls_lookup_to_replicate_edge;
161 vnet_buffer (b0)->ip.adj_index[VLIB_TX] =
162 (lbi0 & ~MPLS_IS_REPLICATE);
Neale Ranns2be95c12016-11-19 13:50:04 +0000163 }
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800164 else
Neale Ranns2be95c12016-11-19 13:50:04 +0000165 {
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800166 lb0 = load_balance_get(lbi0);
Neale Rannsf12a83f2017-04-18 09:09:40 -0700167 ASSERT (lb0->lb_n_buckets > 0);
168 ASSERT (is_pow2 (lb0->lb_n_buckets));
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800169
170 if (PREDICT_FALSE(lb0->lb_n_buckets > 1))
171 {
172 hash_c0 = vnet_buffer (b0)->ip.flow_hash =
173 mpls_compute_flow_hash(h0, lb0->lb_hash_config);
Neale Rannsf12a83f2017-04-18 09:09:40 -0700174 dpo0 = load_balance_get_fwd_bucket
175 (lb0,
176 (hash_c0 & (lb0->lb_n_buckets_minus_1)));
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800177 }
Neale Rannsf12a83f2017-04-18 09:09:40 -0700178 else
179 {
180 dpo0 = load_balance_get_bucket_i (lb0, 0);
181 }
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800182 next0 = dpo0->dpoi_next_node;
183
184 vnet_buffer (b0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index;
185
186 vlib_increment_combined_counter
187 (cm, thread_index, lbi0, 1,
188 vlib_buffer_length_in_chain (vm, b0));
Neale Ranns2be95c12016-11-19 13:50:04 +0000189 }
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800190 if (MPLS_IS_REPLICATE & lbi1)
Neale Ranns696e88d2017-03-16 07:34:55 -0400191 {
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800192 next1 = mpls_lookup_to_replicate_edge;
193 vnet_buffer (b1)->ip.adj_index[VLIB_TX] =
194 (lbi1 & ~MPLS_IS_REPLICATE);
Neale Ranns696e88d2017-03-16 07:34:55 -0400195 }
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800196 else
Neale Ranns696e88d2017-03-16 07:34:55 -0400197 {
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800198 lb1 = load_balance_get(lbi1);
Neale Rannsf12a83f2017-04-18 09:09:40 -0700199 ASSERT (lb1->lb_n_buckets > 0);
200 ASSERT (is_pow2 (lb1->lb_n_buckets));
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800201
202 if (PREDICT_FALSE(lb1->lb_n_buckets > 1))
203 {
204 hash_c1 = vnet_buffer (b1)->ip.flow_hash =
205 mpls_compute_flow_hash(h1, lb1->lb_hash_config);
Neale Rannsf12a83f2017-04-18 09:09:40 -0700206 dpo1 = load_balance_get_fwd_bucket
207 (lb1,
208 (hash_c1 & (lb1->lb_n_buckets_minus_1)));
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800209 }
Neale Rannsf12a83f2017-04-18 09:09:40 -0700210 else
211 {
212 dpo1 = load_balance_get_bucket_i (lb1, 0);
213 }
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800214 next1 = dpo1->dpoi_next_node;
215
216 vnet_buffer (b1)->ip.adj_index[VLIB_TX] = dpo1->dpoi_index;
217
218 vlib_increment_combined_counter
219 (cm, thread_index, lbi1, 1,
220 vlib_buffer_length_in_chain (vm, b1));
Neale Ranns696e88d2017-03-16 07:34:55 -0400221 }
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800222 if (MPLS_IS_REPLICATE & lbi2)
223 {
224 next2 = mpls_lookup_to_replicate_edge;
225 vnet_buffer (b2)->ip.adj_index[VLIB_TX] =
226 (lbi2 & ~MPLS_IS_REPLICATE);
227 }
228 else
229 {
230 lb2 = load_balance_get(lbi2);
Neale Rannsf12a83f2017-04-18 09:09:40 -0700231 ASSERT (lb2->lb_n_buckets > 0);
232 ASSERT (is_pow2 (lb2->lb_n_buckets));
Neale Ranns2be95c12016-11-19 13:50:04 +0000233
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800234 if (PREDICT_FALSE(lb2->lb_n_buckets > 1))
235 {
236 hash_c2 = vnet_buffer (b2)->ip.flow_hash =
237 mpls_compute_flow_hash(h2, lb2->lb_hash_config);
Neale Rannsf12a83f2017-04-18 09:09:40 -0700238 dpo2 = load_balance_get_fwd_bucket
239 (lb2,
240 (hash_c2 & (lb2->lb_n_buckets_minus_1)));
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800241 }
Neale Rannsf12a83f2017-04-18 09:09:40 -0700242 else
243 {
244 dpo2 = load_balance_get_bucket_i (lb2, 0);
245 }
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800246 next2 = dpo2->dpoi_next_node;
Neale Ranns2be95c12016-11-19 13:50:04 +0000247
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800248 vnet_buffer (b2)->ip.adj_index[VLIB_TX] = dpo2->dpoi_index;
Neale Ranns2be95c12016-11-19 13:50:04 +0000249
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800250 vlib_increment_combined_counter
251 (cm, thread_index, lbi2, 1,
252 vlib_buffer_length_in_chain (vm, b2));
253 }
254 if (MPLS_IS_REPLICATE & lbi3)
255 {
256 next3 = mpls_lookup_to_replicate_edge;
257 vnet_buffer (b3)->ip.adj_index[VLIB_TX] =
258 (lbi3 & ~MPLS_IS_REPLICATE);
259 }
260 else
261 {
262 lb3 = load_balance_get(lbi3);
Neale Rannsf12a83f2017-04-18 09:09:40 -0700263 ASSERT (lb3->lb_n_buckets > 0);
264 ASSERT (is_pow2 (lb3->lb_n_buckets));
Neale Ranns2be95c12016-11-19 13:50:04 +0000265
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800266 if (PREDICT_FALSE(lb3->lb_n_buckets > 1))
267 {
268 hash_c3 = vnet_buffer (b3)->ip.flow_hash =
269 mpls_compute_flow_hash(h3, lb3->lb_hash_config);
Neale Rannsf12a83f2017-04-18 09:09:40 -0700270 dpo3 = load_balance_get_fwd_bucket
271 (lb3,
272 (hash_c3 & (lb3->lb_n_buckets_minus_1)));
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800273 }
Neale Rannsf12a83f2017-04-18 09:09:40 -0700274 else
275 {
276 dpo3 = load_balance_get_bucket_i (lb3, 0);
277 }
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800278 next3 = dpo3->dpoi_next_node;
Neale Ranns2be95c12016-11-19 13:50:04 +0000279
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800280 vnet_buffer (b3)->ip.adj_index[VLIB_TX] = dpo3->dpoi_index;
281
282 vlib_increment_combined_counter
283 (cm, thread_index, lbi3, 1,
284 vlib_buffer_length_in_chain (vm, b3));
285 }
Neale Ranns2be95c12016-11-19 13:50:04 +0000286
287 /*
Neale Rannsad422ed2016-11-02 14:20:04 +0000288 * before we pop the label copy th values we need to maintain.
289 * The label header is in network byte order.
290 * last byte is the TTL.
291 * bits 2 to 4 inclusive are the EXP bits
292 */
293 vnet_buffer (b0)->mpls.ttl = ((char*)h0)[3];
294 vnet_buffer (b0)->mpls.exp = (((char*)h0)[2] & 0xe) >> 1;
295 vnet_buffer (b0)->mpls.first = 1;
296 vnet_buffer (b1)->mpls.ttl = ((char*)h1)[3];
297 vnet_buffer (b1)->mpls.exp = (((char*)h1)[2] & 0xe) >> 1;
298 vnet_buffer (b1)->mpls.first = 1;
Neale Ranns696e88d2017-03-16 07:34:55 -0400299 vnet_buffer (b2)->mpls.ttl = ((char*)h2)[3];
300 vnet_buffer (b2)->mpls.exp = (((char*)h2)[2] & 0xe) >> 1;
301 vnet_buffer (b2)->mpls.first = 1;
302 vnet_buffer (b3)->mpls.ttl = ((char*)h3)[3];
303 vnet_buffer (b3)->mpls.exp = (((char*)h3)[2] & 0xe) >> 1;
304 vnet_buffer (b3)->mpls.first = 1;
Neale Rannsad422ed2016-11-02 14:20:04 +0000305
306 /*
Neale Ranns2be95c12016-11-19 13:50:04 +0000307 * pop the label that was just used in the lookup
308 */
309 vlib_buffer_advance(b0, sizeof(*h0));
310 vlib_buffer_advance(b1, sizeof(*h1));
Neale Ranns696e88d2017-03-16 07:34:55 -0400311 vlib_buffer_advance(b2, sizeof(*h2));
312 vlib_buffer_advance(b3, sizeof(*h3));
Neale Ranns2be95c12016-11-19 13:50:04 +0000313
314 if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
315 {
316 mpls_lookup_trace_t *tr = vlib_add_trace (vm, node,
317 b0, sizeof (*tr));
318 tr->next_index = next0;
319 tr->lb_index = lbi0;
320 tr->lfib_index = lfib_index0;
321 tr->hash = hash_c0;
322 tr->label_net_byte_order = h0->label_exp_s_ttl;
323 }
324
325 if (PREDICT_FALSE(b1->flags & VLIB_BUFFER_IS_TRACED))
326 {
327 mpls_lookup_trace_t *tr = vlib_add_trace (vm, node,
328 b1, sizeof (*tr));
329 tr->next_index = next1;
330 tr->lb_index = lbi1;
331 tr->lfib_index = lfib_index1;
332 tr->hash = hash_c1;
333 tr->label_net_byte_order = h1->label_exp_s_ttl;
334 }
335
Neale Ranns696e88d2017-03-16 07:34:55 -0400336 if (PREDICT_FALSE(b2->flags & VLIB_BUFFER_IS_TRACED))
337 {
338 mpls_lookup_trace_t *tr = vlib_add_trace (vm, node,
339 b2, sizeof (*tr));
340 tr->next_index = next2;
341 tr->lb_index = lbi2;
342 tr->lfib_index = lfib_index2;
343 tr->hash = hash_c2;
344 tr->label_net_byte_order = h2->label_exp_s_ttl;
345 }
346
347 if (PREDICT_FALSE(b3->flags & VLIB_BUFFER_IS_TRACED))
348 {
349 mpls_lookup_trace_t *tr = vlib_add_trace (vm, node,
350 b3, sizeof (*tr));
351 tr->next_index = next3;
352 tr->lb_index = lbi3;
353 tr->lfib_index = lfib_index3;
354 tr->hash = hash_c3;
355 tr->label_net_byte_order = h3->label_exp_s_ttl;
356 }
357
358 vlib_validate_buffer_enqueue_x4 (vm, node, next_index,
Neale Ranns2be95c12016-11-19 13:50:04 +0000359 to_next, n_left_to_next,
Neale Ranns696e88d2017-03-16 07:34:55 -0400360 bi0, bi1, bi2, bi3,
361 next0, next1, next2, next3);
Neale Ranns2be95c12016-11-19 13:50:04 +0000362 }
363
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100364 while (n_left_from > 0 && n_left_to_next > 0)
365 {
366 u32 lbi0, next0, lfib_index0, bi0, hash_c0;
367 const mpls_unicast_header_t * h0;
368 const load_balance_t *lb0;
369 const dpo_id_t *dpo0;
370 vlib_buffer_t * b0;
371
372 bi0 = from[0];
373 to_next[0] = bi0;
374 from += 1;
375 to_next += 1;
376 n_left_from -= 1;
377 n_left_to_next -= 1;
378
379 b0 = vlib_get_buffer (vm, bi0);
380 h0 = vlib_buffer_get_current (b0);
381
382 lfib_index0 = vec_elt(mm->fib_index_by_sw_if_index,
383 vnet_buffer(b0)->sw_if_index[VLIB_RX]);
384
Neale Rannsad422ed2016-11-02 14:20:04 +0000385 lbi0 = mpls_fib_table_forwarding_lookup(lfib_index0, h0);
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100386 hash_c0 = vnet_buffer(b0)->ip.flow_hash = 0;
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800387
388 if (MPLS_IS_REPLICATE & lbi0)
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100389 {
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800390 next0 = mpls_lookup_to_replicate_edge;
391 vnet_buffer (b0)->ip.adj_index[VLIB_TX] =
392 (lbi0 & ~MPLS_IS_REPLICATE);
393 }
394 else
395 {
396 lb0 = load_balance_get(lbi0);
Neale Rannsf12a83f2017-04-18 09:09:40 -0700397 ASSERT (lb0->lb_n_buckets > 0);
398 ASSERT (is_pow2 (lb0->lb_n_buckets));
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800399
400 if (PREDICT_FALSE(lb0->lb_n_buckets > 1))
401 {
402 hash_c0 = vnet_buffer (b0)->ip.flow_hash =
403 mpls_compute_flow_hash(h0, lb0->lb_hash_config);
Neale Rannsf12a83f2017-04-18 09:09:40 -0700404 dpo0 = load_balance_get_fwd_bucket
405 (lb0,
406 (hash_c0 & (lb0->lb_n_buckets_minus_1)));
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800407 }
Neale Rannsf12a83f2017-04-18 09:09:40 -0700408 else
409 {
410 dpo0 = load_balance_get_bucket_i (lb0, 0);
411 }
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800412 next0 = dpo0->dpoi_next_node;
413 vnet_buffer (b0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index;
414
415 vlib_increment_combined_counter
416 (cm, thread_index, lbi0, 1,
417 vlib_buffer_length_in_chain (vm, b0));
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100418 }
419
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100420 /*
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800421 * before we pop the label copy, values we need to maintain.
Neale Rannsad422ed2016-11-02 14:20:04 +0000422 * The label header is in network byte order.
423 * last byte is the TTL.
424 * bits 2 to 4 inclusive are the EXP bits
425 */
426 vnet_buffer (b0)->mpls.ttl = ((char*)h0)[3];
427 vnet_buffer (b0)->mpls.exp = (((char*)h0)[2] & 0xe) >> 1;
428 vnet_buffer (b0)->mpls.first = 1;
429
430 /*
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100431 * pop the label that was just used in the lookup
432 */
433 vlib_buffer_advance(b0, sizeof(*h0));
434
435 if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
436 {
437 mpls_lookup_trace_t *tr = vlib_add_trace (vm, node,
Neale Ranns2be95c12016-11-19 13:50:04 +0000438 b0, sizeof (*tr));
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100439 tr->next_index = next0;
440 tr->lb_index = lbi0;
441 tr->lfib_index = lfib_index0;
Neale Ranns2be95c12016-11-19 13:50:04 +0000442 tr->hash = hash_c0;
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100443 tr->label_net_byte_order = h0->label_exp_s_ttl;
444 }
445
446 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
447 to_next, n_left_to_next,
448 bi0, next0);
449 }
450
451 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
452 }
Filip Tehlar17fcd982019-03-05 04:32:11 -0800453 vlib_node_increment_counter (vm, mm->mpls_lookup_node_index,
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100454 MPLS_ERROR_PKTS_DECAP, from_frame->n_vectors);
455 return from_frame->n_vectors;
456}
457
458static char * mpls_error_strings[] = {
459#define mpls_error(n,s) s,
460#include "error.def"
461#undef mpls_error
462};
463
Filip Tehlar17fcd982019-03-05 04:32:11 -0800464VLIB_REGISTER_NODE (mpls_lookup_node) = {
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100465 .name = "mpls-lookup",
466 /* Takes a vector of packets. */
467 .vector_size = sizeof (u32),
468 .n_errors = MPLS_N_ERROR,
469 .error_strings = mpls_error_strings,
470
Neale Rannsf12a83f2017-04-18 09:09:40 -0700471 .sibling_of = "mpls-load-balance",
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100472
Neale Ranns1357f3b2016-10-16 12:01:42 -0700473 .format_buffer = format_mpls_header,
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100474 .format_trace = format_mpls_lookup_trace,
Neale Ranns1357f3b2016-10-16 12:01:42 -0700475 .unformat_buffer = unformat_mpls_header,
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100476};
477
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100478typedef struct {
479 u32 next_index;
480 u32 lb_index;
Neale Ranns2be95c12016-11-19 13:50:04 +0000481 u32 hash;
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100482} mpls_load_balance_trace_t;
483
484static u8 *
485format_mpls_load_balance_trace (u8 * s, va_list * args)
486{
487 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
488 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
489 mpls_load_balance_trace_t * t = va_arg (*args, mpls_load_balance_trace_t *);
490
Neale Ranns2be95c12016-11-19 13:50:04 +0000491 s = format (s, "MPLS: next [%d], LB index %d hash %d",
492 t->next_index, t->lb_index, t->hash);
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100493 return s;
494}
495
Filip Tehlar17fcd982019-03-05 04:32:11 -0800496VLIB_NODE_FN (mpls_load_balance_node) (vlib_main_t * vm,
Neale Ranns2be95c12016-11-19 13:50:04 +0000497 vlib_node_runtime_t * node,
498 vlib_frame_t * frame)
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100499{
500 vlib_combined_counter_main_t * cm = &load_balance_main.lbm_via_counters;
501 u32 n_left_from, n_left_to_next, * from, * to_next;
Damjan Marion586afd72017-04-05 19:18:20 +0200502 u32 thread_index = vlib_get_thread_index();
Neale Ranns2be95c12016-11-19 13:50:04 +0000503 u32 next;
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100504
505 from = vlib_frame_vector_args (frame);
506 n_left_from = frame->n_vectors;
507 next = node->cached_next_index;
508
509 while (n_left_from > 0)
510 {
511 vlib_get_next_frame (vm, node, next,
Neale Ranns2be95c12016-11-19 13:50:04 +0000512 to_next, n_left_to_next);
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100513
Neale Ranns2be95c12016-11-19 13:50:04 +0000514
515 while (n_left_from >= 4 && n_left_to_next >= 2)
516 {
Neale Ranns2be95c12016-11-19 13:50:04 +0000517 const load_balance_t *lb0, *lb1;
518 vlib_buffer_t * p0, *p1;
Neale Ranns696e88d2017-03-16 07:34:55 -0400519 u32 pi0, lbi0, hc0, pi1, lbi1, hc1, next0, next1;
Neale Ranns2be95c12016-11-19 13:50:04 +0000520 const mpls_unicast_header_t *mpls0, *mpls1;
521 const dpo_id_t *dpo0, *dpo1;
522
523 /* Prefetch next iteration. */
524 {
525 vlib_buffer_t * p2, * p3;
526
527 p2 = vlib_get_buffer (vm, from[2]);
528 p3 = vlib_get_buffer (vm, from[3]);
529
530 vlib_prefetch_buffer_header (p2, STORE);
531 vlib_prefetch_buffer_header (p3, STORE);
532
Neale Rannsbd4d6452018-05-02 05:15:16 -0700533 CLIB_PREFETCH (p2->data, sizeof (mpls0[0]), LOAD);
534 CLIB_PREFETCH (p3->data, sizeof (mpls0[0]), LOAD);
Neale Ranns2be95c12016-11-19 13:50:04 +0000535 }
536
537 pi0 = to_next[0] = from[0];
538 pi1 = to_next[1] = from[1];
539
540 from += 2;
541 n_left_from -= 2;
542 to_next += 2;
543 n_left_to_next -= 2;
544
545 p0 = vlib_get_buffer (vm, pi0);
546 p1 = vlib_get_buffer (vm, pi1);
547
548 mpls0 = vlib_buffer_get_current (p0);
549 mpls1 = vlib_buffer_get_current (p1);
550 lbi0 = vnet_buffer (p0)->ip.adj_index[VLIB_TX];
551 lbi1 = vnet_buffer (p1)->ip.adj_index[VLIB_TX];
552
553 lb0 = load_balance_get(lbi0);
554 lb1 = load_balance_get(lbi1);
555
556 /*
557 * this node is for via FIBs we can re-use the hash value from the
558 * to node if present.
559 * We don't want to use the same hash value at each level in the recursion
560 * graph as that would lead to polarisation
561 */
562 hc0 = vnet_buffer (p0)->ip.flow_hash = 0;
563 hc1 = vnet_buffer (p1)->ip.flow_hash = 0;
564
565 if (PREDICT_FALSE (lb0->lb_n_buckets > 1))
566 {
567 if (PREDICT_TRUE (vnet_buffer(p0)->ip.flow_hash))
568 {
569 hc0 = vnet_buffer(p0)->ip.flow_hash = vnet_buffer(p0)->ip.flow_hash >> 1;
570 }
571 else
572 {
573 hc0 = vnet_buffer(p0)->ip.flow_hash = mpls_compute_flow_hash(mpls0, hc0);
574 }
Neale Rannsf12a83f2017-04-18 09:09:40 -0700575 dpo0 = load_balance_get_fwd_bucket(lb0, (hc0 & lb0->lb_n_buckets_minus_1));
576 }
577 else
578 {
579 dpo0 = load_balance_get_bucket_i (lb0, 0);
Neale Ranns2be95c12016-11-19 13:50:04 +0000580 }
581 if (PREDICT_FALSE (lb1->lb_n_buckets > 1))
582 {
583 if (PREDICT_TRUE (vnet_buffer(p1)->ip.flow_hash))
584 {
585 hc1 = vnet_buffer(p1)->ip.flow_hash = vnet_buffer(p1)->ip.flow_hash >> 1;
586 }
587 else
588 {
589 hc1 = vnet_buffer(p1)->ip.flow_hash = mpls_compute_flow_hash(mpls1, hc1);
590 }
Neale Rannsf12a83f2017-04-18 09:09:40 -0700591 dpo1 = load_balance_get_fwd_bucket(lb1, (hc1 & lb1->lb_n_buckets_minus_1));
Neale Ranns2be95c12016-11-19 13:50:04 +0000592 }
Neale Rannsf12a83f2017-04-18 09:09:40 -0700593 else
594 {
595 dpo1 = load_balance_get_bucket_i (lb1, 0);
596 }
Neale Ranns2be95c12016-11-19 13:50:04 +0000597
598 next0 = dpo0->dpoi_next_node;
599 next1 = dpo1->dpoi_next_node;
600
601 vnet_buffer (p0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index;
602 vnet_buffer (p1)->ip.adj_index[VLIB_TX] = dpo1->dpoi_index;
603
604 vlib_increment_combined_counter
Damjan Marion586afd72017-04-05 19:18:20 +0200605 (cm, thread_index, lbi0, 1,
Neale Ranns2be95c12016-11-19 13:50:04 +0000606 vlib_buffer_length_in_chain (vm, p0));
607 vlib_increment_combined_counter
Damjan Marion586afd72017-04-05 19:18:20 +0200608 (cm, thread_index, lbi1, 1,
Neale Ranns2be95c12016-11-19 13:50:04 +0000609 vlib_buffer_length_in_chain (vm, p1));
610
611 if (PREDICT_FALSE(p0->flags & VLIB_BUFFER_IS_TRACED))
612 {
613 mpls_load_balance_trace_t *tr = vlib_add_trace (vm, node,
614 p0, sizeof (*tr));
615 tr->next_index = next0;
616 tr->lb_index = lbi0;
617 tr->hash = hc0;
618 }
619
620 vlib_validate_buffer_enqueue_x2 (vm, node, next,
621 to_next, n_left_to_next,
622 pi0, pi1, next0, next1);
623 }
624
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100625 while (n_left_from > 0 && n_left_to_next > 0)
Neale Ranns2be95c12016-11-19 13:50:04 +0000626 {
Neale Ranns2be95c12016-11-19 13:50:04 +0000627 const load_balance_t *lb0;
628 vlib_buffer_t * p0;
Neale Ranns696e88d2017-03-16 07:34:55 -0400629 u32 pi0, lbi0, hc0, next0;
Neale Ranns2be95c12016-11-19 13:50:04 +0000630 const mpls_unicast_header_t *mpls0;
631 const dpo_id_t *dpo0;
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100632
Neale Ranns2be95c12016-11-19 13:50:04 +0000633 pi0 = from[0];
634 to_next[0] = pi0;
635 from += 1;
636 to_next += 1;
637 n_left_to_next -= 1;
638 n_left_from -= 1;
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100639
Neale Ranns2be95c12016-11-19 13:50:04 +0000640 p0 = vlib_get_buffer (vm, pi0);
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100641
Neale Ranns2be95c12016-11-19 13:50:04 +0000642 mpls0 = vlib_buffer_get_current (p0);
643 lbi0 = vnet_buffer (p0)->ip.adj_index[VLIB_TX];
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100644
Neale Ranns2be95c12016-11-19 13:50:04 +0000645 lb0 = load_balance_get(lbi0);
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100646
Neale Ranns2be95c12016-11-19 13:50:04 +0000647 hc0 = vnet_buffer (p0)->ip.flow_hash = 0;
648 if (PREDICT_FALSE (lb0->lb_n_buckets > 1))
649 {
650 if (PREDICT_TRUE (vnet_buffer(p0)->ip.flow_hash))
651 {
652 hc0 = vnet_buffer(p0)->ip.flow_hash = vnet_buffer(p0)->ip.flow_hash >> 1;
653 }
654 else
655 {
656 hc0 = vnet_buffer(p0)->ip.flow_hash = mpls_compute_flow_hash(mpls0, hc0);
657 }
Neale Rannsf12a83f2017-04-18 09:09:40 -0700658 dpo0 = load_balance_get_fwd_bucket(lb0, (hc0 & lb0->lb_n_buckets_minus_1));
Neale Ranns2be95c12016-11-19 13:50:04 +0000659 }
Neale Rannsf12a83f2017-04-18 09:09:40 -0700660 else
661 {
662 dpo0 = load_balance_get_bucket_i (lb0, 0);
663 }
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100664
Neale Ranns2be95c12016-11-19 13:50:04 +0000665 next0 = dpo0->dpoi_next_node;
666 vnet_buffer (p0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index;
667
668 vlib_increment_combined_counter
Damjan Marion586afd72017-04-05 19:18:20 +0200669 (cm, thread_index, lbi0, 1,
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100670 vlib_buffer_length_in_chain (vm, p0));
671
Neale Ranns2be95c12016-11-19 13:50:04 +0000672 vlib_validate_buffer_enqueue_x1 (vm, node, next,
673 to_next, n_left_to_next,
674 pi0, next0);
675 }
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100676
677 vlib_put_next_frame (vm, node, next, n_left_to_next);
678 }
679
680 return frame->n_vectors;
681}
682
683VLIB_REGISTER_NODE (mpls_load_balance_node) = {
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100684 .name = "mpls-load-balance",
685 .vector_size = sizeof (u32),
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100686 .format_trace = format_mpls_load_balance_trace,
Neale Rannsf12a83f2017-04-18 09:09:40 -0700687 .n_next_nodes = 1,
688 .next_nodes =
689 {
Neale Rannsce9e0b42018-08-01 12:53:17 -0700690 [MPLS_LOOKUP_NEXT_DROP] = "mpls-drop",
Neale Rannsf12a83f2017-04-18 09:09:40 -0700691 },
692
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100693};
694
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800695
Filip Tehlar17fcd982019-03-05 04:32:11 -0800696#ifndef CLIB_MARCH_VARIANT
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800697static clib_error_t *
698mpls_lookup_init (vlib_main_t * vm)
699{
Filip Tehlar17fcd982019-03-05 04:32:11 -0800700 mpls_main_t *mm = &mpls_main;
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800701 clib_error_t * error;
Filip Tehlar17fcd982019-03-05 04:32:11 -0800702 vlib_node_t *node = vlib_get_node_by_name (vm, (u8*)"mpls-lookup" );
703
704 mm->mpls_lookup_node_index = node->index;
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800705
706 if ((error = vlib_call_init_function (vm, mpls_init)))
707 return error;
708
709 mpls_lookup_to_replicate_edge =
710 vlib_node_add_named_next(vm,
Filip Tehlar17fcd982019-03-05 04:32:11 -0800711 mm->mpls_lookup_node_index,
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800712 "mpls-replicate");
713
714 return (NULL);
715}
716
717VLIB_INIT_FUNCTION (mpls_lookup_init);
Filip Tehlar17fcd982019-03-05 04:32:11 -0800718#endif /* CLIB_MARCH_VARIANT */