blob: f7a21a1c744c845674725a546c6c20863dfddc50 [file] [log] [blame]
Neale Rannsd792d9c2017-10-21 10:53:20 -07001/*
2 * Copyright (c) 2016 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16#include <vnet/buffer.h>
17#include <vnet/vnet.h>
18
19#include <vnet/bier/bier_fmask.h>
20#include <vnet/bier/bier_hdr_inlines.h>
21#include <vnet/bier/bier_table.h>
22#include <vnet/bier/bier_fmask.h>
23
24/**
Paul Vinciguerra8feeaff2019-03-27 11:25:48 -070025 * Struct maintaining the per-worker thread data for BIER lookups
Neale Rannsd792d9c2017-10-21 10:53:20 -070026 */
27typedef struct bier_lookup_main_t_
28{
29 /* per-cpu vector of cloned packets */
30 u32 **blm_clones;
31 /* per-cpu vector of BIER fmasks */
32 u32 **blm_fmasks;
33} bier_lookup_main_t;
34
35/**
36 * Single instance of the lookup main
37 */
38static bier_lookup_main_t bier_lookup_main;
39
40static char * bier_lookup_error_strings[] = {
41#define bier_error(n,s) s,
42#include <vnet/bier/bier_lookup_error.def>
43#undef bier_error
44};
45
46/*
Paul Vinciguerrae6eefb62019-05-13 15:56:41 -040047 * Keep these values semantically the same as BIER lookup
Neale Rannsd792d9c2017-10-21 10:53:20 -070048 */
49#define foreach_bier_lookup_next \
50 _(DROP, "bier-drop") \
51 _(OUTPUT, "bier-output")
52
53typedef enum {
54#define _(s,n) BIER_LOOKUP_NEXT_##s,
55 foreach_bier_lookup_next
56#undef _
57 BIER_LOOKUP_N_NEXT,
58} bier_lookup_next_t;
59
60typedef enum {
61#define bier_error(n,s) BIER_LOOKUP_ERROR_##n,
62#include <vnet/bier/bier_lookup_error.def>
63#undef bier_error
64 BIER_LOOKUP_N_ERROR,
65} bier_lookup_error_t;
66
67vlib_node_registration_t bier_lookup_node;
68
69/**
Jim Thompsonf324dec2019-04-08 03:22:21 -050070 * @brief Packet trace record for a BIER lookup
Neale Rannsd792d9c2017-10-21 10:53:20 -070071 */
72typedef struct bier_lookup_trace_t_
73{
74 u32 next_index;
75 index_t bt_index;
76 index_t bfm_index;
77} bier_lookup_trace_t;
78
79static uword
80bier_lookup (vlib_main_t * vm,
81 vlib_node_runtime_t * node,
82 vlib_frame_t * from_frame)
83{
84 u32 n_left_from, next_index, * from, * to_next;
85 bier_lookup_main_t *blm = &bier_lookup_main;
86 u32 thread_index = vlib_get_thread_index();
Neale Rannsf0510722018-01-31 11:35:41 -080087 bier_bit_mask_bucket_t buckets_copy[BIER_HDR_BUCKETS_4096];
Neale Rannsd792d9c2017-10-21 10:53:20 -070088
89 from = vlib_frame_vector_args (from_frame);
90 n_left_from = from_frame->n_vectors;
91 next_index = BIER_LOOKUP_NEXT_DROP;
92
93 while (n_left_from > 0)
94 {
95 u32 n_left_to_next;
96
97 vlib_get_next_frame (vm, node, next_index,
98 to_next, n_left_to_next);
99
100 while (n_left_from > 0 && n_left_to_next > 0)
101 {
Neale Rannsd792d9c2017-10-21 10:53:20 -0700102 u32 next0, bi0, n_bytes, bti0, bfmi0;
103 const bier_fmask_t *bfm0;
104 const bier_table_t *bt0;
105 u16 index, num_buckets;
106 const bier_hdr_t *bh0;
107 bier_bit_string_t bbs;
108 vlib_buffer_t *b0;
109 bier_bp_t fbs;
110 int bucket;
111
112 bi0 = from[0];
113 from += 1;
114 n_left_from -= 1;
115
116 b0 = vlib_get_buffer (vm, bi0);
117 bh0 = vlib_buffer_get_current (b0);
118 bti0 = vnet_buffer(b0)->ip.adj_index[VLIB_TX];
119
120 /*
121 * default to drop so that if no bits are matched then
122 * that is where we go - DROP.
123 */
124 next0 = BIER_LOOKUP_NEXT_DROP;
125
126 /*
127 * At the imposition or input node,
128 * we stored the BIER Table index in the TX adjacency
129 */
130 bt0 = bier_table_get(vnet_buffer(b0)->ip.adj_index[VLIB_TX]);
131
132 /*
133 * we should only forward via one for the ECMP tables
134 */
135 ASSERT(!bier_table_is_main(bt0));
136
137 /*
138 * number of integer sized buckets
139 */
140 n_bytes = bier_hdr_len_id_to_num_buckets(bt0->bt_id.bti_hdr_len);
Neale Ranns91286372017-12-05 13:24:04 -0800141 vnet_buffer(b0)->mpls.bier.n_bytes = n_bytes;
Neale Rannsd792d9c2017-10-21 10:53:20 -0700142 vnet_buffer(b0)->sw_if_index[VLIB_TX] = ~0;
143 num_buckets = n_bytes / sizeof(int);
144 bier_bit_string_init(&bbs,
145 bt0->bt_id.bti_hdr_len,
146 buckets_copy);
147 memcpy(bbs.bbs_buckets, bh0->bh_bit_string, bbs.bbs_len);
148
149 /*
Benoît Ganne8a4bfda2019-07-22 14:21:46 +0200150 * reset the fmask storage vector
Neale Rannsd792d9c2017-10-21 10:53:20 -0700151 */
152 vec_reset_length (blm->blm_fmasks[thread_index]);
Neale Rannsd792d9c2017-10-21 10:53:20 -0700153
154 /*
155 * Loop through the buckets in the header
156 */
157 for (index = 0; index < num_buckets; index++) {
158 /*
159 * loop through each bit in the bucket
160 */
161 bucket = ((int*)bbs.bbs_buckets)[index];
162
163 while (bucket) {
164 fbs = bier_find_first_bit_string_set(bucket);
165 fbs += (((num_buckets - 1) - index) *
166 BIER_BIT_MASK_BITS_PER_INT);
167
168 bfmi0 = bier_table_fwd_lookup(bt0, fbs);
169
170 /*
171 * whatever happens, the bit we just looked for
172 * MUST be cleared from the packet
173 * otherwise we could be in this loop a while ...
174 */
175 bier_bit_string_clear_bit(&bbs, fbs);
176
177 if (PREDICT_TRUE(INDEX_INVALID != bfmi0))
178 {
179 bfm0 = bier_fmask_get(bfmi0);
Neale Rannsd792d9c2017-10-21 10:53:20 -0700180
181 /*
182 * use the bit-string on the fmask to reset
183 * the bits in the header we are walking
184 */
185 bier_bit_string_clear_string(
186 &bfm0->bfm_bits.bfmb_input_reset_string,
187 &bbs);
188 bucket = ((int*)bbs.bbs_buckets)[index];
189
190 /*
191 * the fmask is resolved so replicate a
192 * packet its way
193 */
194 next0 = BIER_LOOKUP_NEXT_OUTPUT;
195
196 vec_add1 (blm->blm_fmasks[thread_index], bfmi0);
197 } else {
198 /*
199 * go to the next bit-position set
200 */
Neale Rannsf0510722018-01-31 11:35:41 -0800201 vlib_node_increment_counter(
202 vm, node->node_index,
203 BIER_LOOKUP_ERROR_FMASK_UNRES, 1);
Neale Rannsd792d9c2017-10-21 10:53:20 -0700204 bucket = ((int*)bbs.bbs_buckets)[index];
205 continue;
206 }
207 }
208 }
209
210 /*
211 * Full mask now processed.
212 * Create the number of clones we need based on the number
213 * of fmasks we are sending to.
214 */
Neale Rannsf0510722018-01-31 11:35:41 -0800215 u16 num_cloned, clone;
Neale Rannsd792d9c2017-10-21 10:53:20 -0700216 u32 n_clones;
217
218 n_clones = vec_len(blm->blm_fmasks[thread_index]);
219
220 if (PREDICT_TRUE(0 != n_clones))
221 {
Benoît Ganne77100ef2020-04-16 12:40:54 +0200222 vec_set_len(blm->blm_clones[thread_index], n_clones);
Neale Rannsd792d9c2017-10-21 10:53:20 -0700223 num_cloned = vlib_buffer_clone(vm, bi0,
224 blm->blm_clones[thread_index],
Neale Rannsf0510722018-01-31 11:35:41 -0800225 n_clones,
Damjan Marionbd0da972018-10-31 10:59:02 +0100226 VLIB_BUFFER_CLONE_HEAD_SIZE);
Neale Rannsd792d9c2017-10-21 10:53:20 -0700227
Benoît Ganne8a4bfda2019-07-22 14:21:46 +0200228
229 if (num_cloned != n_clones)
Neale Rannsd792d9c2017-10-21 10:53:20 -0700230 {
Benoît Ganne77100ef2020-04-16 12:40:54 +0200231 vec_set_len(blm->blm_clones[thread_index], num_cloned);
Neale Rannsd792d9c2017-10-21 10:53:20 -0700232 vlib_node_increment_counter
233 (vm, node->node_index,
234 BIER_LOOKUP_ERROR_BUFFER_ALLOCATION_FAILURE, 1);
235 }
236
237 for (clone = 0; clone < num_cloned; clone++)
238 {
239 vlib_buffer_t *c0;
240 u32 ci0;
241
242 ci0 = blm->blm_clones[thread_index][clone];
243 c0 = vlib_get_buffer(vm, ci0);
Neale Ranns91286372017-12-05 13:24:04 -0800244 vnet_buffer(c0)->ip.adj_index[VLIB_TX] =
245 blm->blm_fmasks[thread_index][clone];
Neale Rannsd792d9c2017-10-21 10:53:20 -0700246
247 to_next[0] = ci0;
248 to_next += 1;
249 n_left_to_next -= 1;
250
251 if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
252 {
253 bier_lookup_trace_t *tr;
254
Neale Rannsd792d9c2017-10-21 10:53:20 -0700255 tr = vlib_add_trace (vm, node, c0, sizeof (*tr));
256 tr->bt_index = bti0;
257 tr->bfm_index = blm->blm_fmasks[thread_index][clone];
Neale Rannsd792d9c2017-10-21 10:53:20 -0700258 }
259
260 vlib_validate_buffer_enqueue_x1(vm, node, next_index,
261 to_next, n_left_to_next,
262 ci0, next0);
263
264 /*
265 * After the enqueue it is possible that we over-flow the
266 * frame of the to-next node. When this happens we need to
267 * 'put' that full frame to the node and get a fresh empty
268 * one. Note that these are macros with side effects that
269 * change to_next & n_left_to_next
270 */
271 if (PREDICT_FALSE(0 == n_left_to_next))
272 {
273 vlib_put_next_frame (vm, node, next_index,
274 n_left_to_next);
275 vlib_get_next_frame (vm, node, next_index,
276 to_next, n_left_to_next);
277 }
278 }
279 }
280 else
281 {
282 /*
283 * no clones/replications required. drop this packet
284 */
285 next0 = BIER_LOOKUP_NEXT_DROP;
286 to_next[0] = bi0;
287 to_next += 1;
288 n_left_to_next -= 1;
289
290 if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
291 {
292 bier_lookup_trace_t *tr;
293
294 tr = vlib_add_trace (vm, node, b0, sizeof (*tr));
295
296 tr->bt_index = bti0;
297 tr->bfm_index = ~0;
298 }
299
300 vlib_validate_buffer_enqueue_x1(vm, node, next_index,
301 to_next, n_left_to_next,
302 bi0, next0);
303 }
304 }
305
Neale Rannsf0510722018-01-31 11:35:41 -0800306 vlib_put_next_frame(vm, node, next_index, n_left_to_next);
Neale Rannsd792d9c2017-10-21 10:53:20 -0700307 }
308
Neale Rannsf0510722018-01-31 11:35:41 -0800309 vlib_node_increment_counter(vm, bier_lookup_node.index,
310 BIER_LOOKUP_ERROR_NONE,
311 from_frame->n_vectors);
Neale Rannsd792d9c2017-10-21 10:53:20 -0700312 return (from_frame->n_vectors);
313}
314
315static u8 *
316format_bier_lookup_trace (u8 * s, va_list * args)
317{
318 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
319 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
320 bier_lookup_trace_t * t = va_arg (*args, bier_lookup_trace_t *);
321
322 s = format (s, "BIER: next [%d], tbl:%d BFM:%d",
323 t->next_index,
324 t->bt_index,
325 t->bfm_index);
326 return s;
327}
328
329VLIB_REGISTER_NODE (bier_lookup_node) = {
330 .function = bier_lookup,
331 .name = "bier-lookup",
332 /* Takes a vector of packets. */
333 .vector_size = sizeof (u32),
334
335 .n_errors = BIER_LOOKUP_N_ERROR,
336 .error_strings = bier_lookup_error_strings,
337
338 .format_trace = format_bier_lookup_trace,
339 .n_next_nodes = BIER_LOOKUP_N_NEXT,
340 .next_nodes = {
341 [BIER_LOOKUP_NEXT_DROP] = "bier-drop",
342 [BIER_LOOKUP_NEXT_OUTPUT] = "bier-output",
343 },
344};
345
346clib_error_t *
347bier_lookup_module_init (vlib_main_t * vm)
348{
349 bier_lookup_main_t *blm = &bier_lookup_main;
350 u32 thread_index;
351
352 vec_validate (blm->blm_clones, vlib_num_workers());
353 vec_validate (blm->blm_fmasks, vlib_num_workers());
354
355 for (thread_index = 0;
356 thread_index <= vlib_num_workers();
357 thread_index++)
358 {
359 /*
Neale Rannsf0510722018-01-31 11:35:41 -0800360 * 1024 is the most we will ever need to support
361 * a Bit-Mask length of 1024
Neale Rannsd792d9c2017-10-21 10:53:20 -0700362 */
Neale Rannsf0510722018-01-31 11:35:41 -0800363 vec_validate(blm->blm_fmasks[thread_index], 1023);
364 vec_validate(blm->blm_clones[thread_index], 1023);
Neale Rannsd792d9c2017-10-21 10:53:20 -0700365 }
366
367 return 0;
368}
369
370VLIB_INIT_FUNCTION (bier_lookup_module_init);