blob: 83ff296e705fd2fa2302ceda97926b355866edbb [file] [log] [blame]
Damjan Marion1c229712021-04-21 12:55:15 +02001/* SPDX-License-Identifier: Apache-2.0
2 * Copyright(c) 2021 Cisco Systems, Inc.
3 */
4
Damjan Marionef0bac72021-04-22 18:08:28 +02005#include <vppinfra/clib.h>
Damjan Marion1c229712021-04-21 12:55:15 +02006#include <vlib/vlib.h>
Damjan Marion3295ddf2021-04-28 19:31:22 +02007#include <vppinfra/vector_funcs.h>
Damjan Marion1c229712021-04-21 12:55:15 +02008
Damjan Marionef0bac72021-04-22 18:08:28 +02009static_always_inline u32
Damjan Marione3e35552021-05-06 17:34:49 +020010enqueue_one (vlib_main_t *vm, vlib_node_runtime_t *node, u64 *used_elt_bmp,
Damjan Marionef0bac72021-04-22 18:08:28 +020011 u16 next_index, u32 *buffers, u16 *nexts, u32 n_buffers,
12 u32 n_left, u32 *tmp)
13{
Damjan Marione3e35552021-05-06 17:34:49 +020014 u64 match_bmp[VLIB_FRAME_SIZE / 64];
Damjan Marionef0bac72021-04-22 18:08:28 +020015 vlib_frame_t *f;
16 u32 n_extracted, n_free;
17 u32 *to;
18
19 f = vlib_get_next_frame_internal (vm, node, next_index, 0);
20
21 n_free = VLIB_FRAME_SIZE - f->n_vectors;
22
23 /* if frame contains enough space for worst case scenario, we can avoid
24 * use of tmp */
25 if (n_free >= n_left)
26 to = (u32 *) vlib_frame_vector_args (f) + f->n_vectors;
27 else
28 to = tmp;
29
Damjan Marione3e35552021-05-06 17:34:49 +020030 clib_mask_compare_u16 (next_index, nexts, match_bmp, n_buffers);
31
32 n_extracted = clib_compress_u32 (to, buffers, match_bmp, n_buffers);
33
34 for (int i = 0; i < ARRAY_LEN (match_bmp); i++)
35 used_elt_bmp[i] |= match_bmp[i];
Damjan Marionef0bac72021-04-22 18:08:28 +020036
37 if (to != tmp)
38 {
39 /* indices already written to frame, just close it */
40 vlib_put_next_frame (vm, node, next_index, n_free - n_extracted);
41 }
42 else if (n_free >= n_extracted)
43 {
44 /* enough space in the existing frame */
45 to = (u32 *) vlib_frame_vector_args (f) + f->n_vectors;
46 vlib_buffer_copy_indices (to, tmp, n_extracted);
47 vlib_put_next_frame (vm, node, next_index, n_free - n_extracted);
48 }
49 else
50 {
51 /* full frame */
52 to = (u32 *) vlib_frame_vector_args (f) + f->n_vectors;
53 vlib_buffer_copy_indices (to, tmp, n_free);
54 vlib_put_next_frame (vm, node, next_index, 0);
55
56 /* second frame */
57 u32 n_2nd_frame = n_extracted - n_free;
58 f = vlib_get_next_frame_internal (vm, node, next_index, 1);
59 to = vlib_frame_vector_args (f);
60 vlib_buffer_copy_indices (to, tmp + n_free, n_2nd_frame);
61 vlib_put_next_frame (vm, node, next_index,
62 VLIB_FRAME_SIZE - n_2nd_frame);
63 }
64
65 return n_left - n_extracted;
66}
67
Damjan Marion23c34882021-04-25 10:46:26 +020068void __clib_section (".vlib_buffer_enqueue_to_next_fn")
69CLIB_MULTIARCH_FN (vlib_buffer_enqueue_to_next_fn)
70(vlib_main_t *vm, vlib_node_runtime_t *node, u32 *buffers, u16 *nexts,
71 uword count)
Damjan Marion1c229712021-04-21 12:55:15 +020072{
Damjan Marionef0bac72021-04-22 18:08:28 +020073 u32 tmp[VLIB_FRAME_SIZE];
74 u32 n_left;
Damjan Marion1c229712021-04-21 12:55:15 +020075 u16 next_index;
76
Damjan Marionef0bac72021-04-22 18:08:28 +020077 while (count >= VLIB_FRAME_SIZE)
Damjan Marion1c229712021-04-21 12:55:15 +020078 {
Damjan Marione3e35552021-05-06 17:34:49 +020079 u64 used_elt_bmp[VLIB_FRAME_SIZE / 64] = {};
Damjan Marionef0bac72021-04-22 18:08:28 +020080 n_left = VLIB_FRAME_SIZE;
Damjan Marione3e35552021-05-06 17:34:49 +020081 u32 off = 0;
Damjan Marion1c229712021-04-21 12:55:15 +020082
Damjan Marionef0bac72021-04-22 18:08:28 +020083 next_index = nexts[0];
Damjan Marione3e35552021-05-06 17:34:49 +020084 n_left = enqueue_one (vm, node, used_elt_bmp, next_index, buffers, nexts,
Damjan Marionef0bac72021-04-22 18:08:28 +020085 VLIB_FRAME_SIZE, n_left, tmp);
Damjan Marion1c229712021-04-21 12:55:15 +020086
Damjan Marionef0bac72021-04-22 18:08:28 +020087 while (n_left)
Damjan Marion1c229712021-04-21 12:55:15 +020088 {
Damjan Marione3e35552021-05-06 17:34:49 +020089 while (PREDICT_FALSE (used_elt_bmp[off] == ~0))
90 off++;
91
92 next_index =
93 nexts[off * 64 + count_trailing_zeros (~used_elt_bmp[off])];
94 n_left = enqueue_one (vm, node, used_elt_bmp, next_index, buffers,
95 nexts, VLIB_FRAME_SIZE, n_left, tmp);
Damjan Marion1c229712021-04-21 12:55:15 +020096 }
97
Damjan Marionef0bac72021-04-22 18:08:28 +020098 buffers += VLIB_FRAME_SIZE;
99 nexts += VLIB_FRAME_SIZE;
100 count -= VLIB_FRAME_SIZE;
Damjan Marion1c229712021-04-21 12:55:15 +0200101 }
Damjan Marionef0bac72021-04-22 18:08:28 +0200102
103 if (count)
104 {
Damjan Marione3e35552021-05-06 17:34:49 +0200105 u64 used_elt_bmp[VLIB_FRAME_SIZE / 64] = {};
Damjan Marionef0bac72021-04-22 18:08:28 +0200106 next_index = nexts[0];
107 n_left = count;
Damjan Marione3e35552021-05-06 17:34:49 +0200108 u32 off = 0;
Damjan Marionef0bac72021-04-22 18:08:28 +0200109
Damjan Marione3e35552021-05-06 17:34:49 +0200110 n_left = enqueue_one (vm, node, used_elt_bmp, next_index, buffers, nexts,
111 count, n_left, tmp);
Damjan Marionef0bac72021-04-22 18:08:28 +0200112
113 while (n_left)
114 {
Damjan Marione3e35552021-05-06 17:34:49 +0200115 while (PREDICT_FALSE (used_elt_bmp[off] == ~0))
116 off++;
117
118 next_index =
119 nexts[off * 64 + count_trailing_zeros (~used_elt_bmp[off])];
120 n_left = enqueue_one (vm, node, used_elt_bmp, next_index, buffers,
121 nexts, count, n_left, tmp);
Damjan Marionef0bac72021-04-22 18:08:28 +0200122 }
123 }
Damjan Marion1c229712021-04-21 12:55:15 +0200124}
Damjan Marion23c34882021-04-25 10:46:26 +0200125
Damjan Marion1c229712021-04-21 12:55:15 +0200126CLIB_MARCH_FN_REGISTRATION (vlib_buffer_enqueue_to_next_fn);
127
128void __clib_section (".vlib_buffer_enqueue_to_single_next_fn")
Damjan Marion23c34882021-04-25 10:46:26 +0200129CLIB_MULTIARCH_FN (vlib_buffer_enqueue_to_single_next_fn)
130(vlib_main_t *vm, vlib_node_runtime_t *node, u32 *buffers, u16 next_index,
131 u32 count)
Damjan Marion1c229712021-04-21 12:55:15 +0200132{
133 u32 *to_next, n_left_to_next, n_enq;
134
135 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
136
137 if (PREDICT_TRUE (n_left_to_next >= count))
138 {
139 vlib_buffer_copy_indices (to_next, buffers, count);
140 n_left_to_next -= count;
141 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
142 return;
143 }
144
145 n_enq = n_left_to_next;
146next:
147 vlib_buffer_copy_indices (to_next, buffers, n_enq);
148 n_left_to_next -= n_enq;
149
150 if (PREDICT_FALSE (count > n_enq))
151 {
152 count -= n_enq;
153 buffers += n_enq;
154
155 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
156 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
157 n_enq = clib_min (n_left_to_next, count);
158 goto next;
159 }
160 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
161}
162CLIB_MARCH_FN_REGISTRATION (vlib_buffer_enqueue_to_single_next_fn);
163
164u32 __clib_section (".vlib_buffer_enqueue_to_thread_fn")
Damjan Marion23c34882021-04-25 10:46:26 +0200165CLIB_MULTIARCH_FN (vlib_buffer_enqueue_to_thread_fn)
Damjan Marion9e7a0b42021-05-14 14:50:01 +0200166(vlib_main_t *vm, vlib_node_runtime_t *node, u32 frame_queue_index,
167 u32 *buffer_indices, u16 *thread_indices, u32 n_packets,
168 int drop_on_congestion)
Damjan Marion1c229712021-04-21 12:55:15 +0200169{
170 vlib_thread_main_t *tm = vlib_get_thread_main ();
171 vlib_frame_queue_main_t *fqm;
172 vlib_frame_queue_per_thread_data_t *ptd;
173 u32 n_left = n_packets;
174 u32 drop_list[VLIB_FRAME_SIZE], *dbi = drop_list, n_drop = 0;
175 vlib_frame_queue_elt_t *hf = 0;
176 u32 n_left_to_next_thread = 0, *to_next_thread = 0;
177 u32 next_thread_index, current_thread_index = ~0;
178 int i;
179
180 fqm = vec_elt_at_index (tm->frame_queue_mains, frame_queue_index);
181 ptd = vec_elt_at_index (fqm->per_thread_data, vm->thread_index);
182
183 while (n_left)
184 {
185 next_thread_index = thread_indices[0];
186
187 if (next_thread_index != current_thread_index)
188 {
189 if (drop_on_congestion &&
190 is_vlib_frame_queue_congested (
191 frame_queue_index, next_thread_index, fqm->queue_hi_thresh,
192 ptd->congested_handoff_queue_by_thread_index))
193 {
194 dbi[0] = buffer_indices[0];
195 dbi++;
196 n_drop++;
197 goto next;
198 }
199
200 if (hf)
201 hf->n_vectors = VLIB_FRAME_SIZE - n_left_to_next_thread;
202
203 hf = vlib_get_worker_handoff_queue_elt (
204 frame_queue_index, next_thread_index,
205 ptd->handoff_queue_elt_by_thread_index);
206
207 n_left_to_next_thread = VLIB_FRAME_SIZE - hf->n_vectors;
208 to_next_thread = &hf->buffer_index[hf->n_vectors];
209 current_thread_index = next_thread_index;
210 }
211
212 to_next_thread[0] = buffer_indices[0];
213 to_next_thread++;
214 n_left_to_next_thread--;
215
216 if (n_left_to_next_thread == 0)
217 {
218 hf->n_vectors = VLIB_FRAME_SIZE;
219 vlib_put_frame_queue_elt (hf);
220 vlib_get_main_by_index (current_thread_index)->check_frame_queues =
221 1;
222 current_thread_index = ~0;
223 ptd->handoff_queue_elt_by_thread_index[next_thread_index] = 0;
224 hf = 0;
225 }
226
227 /* next */
228 next:
229 thread_indices += 1;
230 buffer_indices += 1;
231 n_left -= 1;
232 }
233
234 if (hf)
235 hf->n_vectors = VLIB_FRAME_SIZE - n_left_to_next_thread;
236
237 /* Ship frames to the thread nodes */
238 for (i = 0; i < vec_len (ptd->handoff_queue_elt_by_thread_index); i++)
239 {
240 if (ptd->handoff_queue_elt_by_thread_index[i])
241 {
242 hf = ptd->handoff_queue_elt_by_thread_index[i];
243 /*
244 * It works better to let the handoff node
245 * rate-adapt, always ship the handoff queue element.
246 */
247 if (1 || hf->n_vectors == hf->last_n_vectors)
248 {
249 vlib_put_frame_queue_elt (hf);
250 vlib_get_main_by_index (i)->check_frame_queues = 1;
251 ptd->handoff_queue_elt_by_thread_index[i] = 0;
252 }
253 else
254 hf->last_n_vectors = hf->n_vectors;
255 }
256 ptd->congested_handoff_queue_by_thread_index[i] =
257 (vlib_frame_queue_t *) (~0);
258 }
259
260 if (drop_on_congestion && n_drop)
261 vlib_buffer_free (vm, drop_list, n_drop);
262
263 return n_packets - n_drop;
264}
265
266CLIB_MARCH_FN_REGISTRATION (vlib_buffer_enqueue_to_thread_fn);
267
Damjan Marioneee099e2021-05-01 14:56:13 +0200268/*
269 * Check the frame queue to see if any frames are available.
270 * If so, pull the packets off the frames and put them to
271 * the handoff node.
272 */
273u32 __clib_section (".vlib_frame_queue_dequeue_fn")
274CLIB_MULTIARCH_FN (vlib_frame_queue_dequeue_fn)
275(vlib_main_t *vm, vlib_frame_queue_main_t *fqm)
276{
277 u32 thread_id = vm->thread_index;
278 vlib_frame_queue_t *fq = fqm->vlib_frame_queues[thread_id];
279 vlib_frame_queue_elt_t *elt;
280 u32 *from, *to;
281 vlib_frame_t *f;
282 int msg_type;
283 int processed = 0;
284 u32 vectors = 0;
285
286 ASSERT (fq);
287 ASSERT (vm == vlib_global_main.vlib_mains[thread_id]);
288
289 if (PREDICT_FALSE (fqm->node_index == ~0))
290 return 0;
291 /*
292 * Gather trace data for frame queues
293 */
294 if (PREDICT_FALSE (fq->trace))
295 {
296 frame_queue_trace_t *fqt;
297 frame_queue_nelt_counter_t *fqh;
298 u32 elix;
299
300 fqt = &fqm->frame_queue_traces[thread_id];
301
302 fqt->nelts = fq->nelts;
303 fqt->head = fq->head;
304 fqt->head_hint = fq->head_hint;
305 fqt->tail = fq->tail;
306 fqt->threshold = fq->vector_threshold;
307 fqt->n_in_use = fqt->tail - fqt->head;
308 if (fqt->n_in_use >= fqt->nelts)
309 {
310 // if beyond max then use max
311 fqt->n_in_use = fqt->nelts - 1;
312 }
313
314 /* Record the number of elements in use in the histogram */
315 fqh = &fqm->frame_queue_histogram[thread_id];
316 fqh->count[fqt->n_in_use]++;
317
318 /* Record a snapshot of the elements in use */
319 for (elix = 0; elix < fqt->nelts; elix++)
320 {
321 elt = fq->elts + ((fq->head + 1 + elix) & (fq->nelts - 1));
322 if (1 || elt->valid)
323 {
324 fqt->n_vectors[elix] = elt->n_vectors;
325 }
326 }
327 fqt->written = 1;
328 }
329
330 while (1)
331 {
332 vlib_buffer_t *b;
333 if (fq->head == fq->tail)
334 {
335 fq->head_hint = fq->head;
336 return processed;
337 }
338
339 elt = fq->elts + ((fq->head + 1) & (fq->nelts - 1));
340
341 if (!elt->valid)
342 {
343 fq->head_hint = fq->head;
344 return processed;
345 }
346
347 from = elt->buffer_index;
348 msg_type = elt->msg_type;
349
350 ASSERT (msg_type == VLIB_FRAME_QUEUE_ELT_DISPATCH_FRAME);
351 ASSERT (elt->n_vectors <= VLIB_FRAME_SIZE);
352
353 f = vlib_get_frame_to_node (vm, fqm->node_index);
354
355 /* If the first vector is traced, set the frame trace flag */
356 b = vlib_get_buffer (vm, from[0]);
357 if (b->flags & VLIB_BUFFER_IS_TRACED)
358 f->frame_flags |= VLIB_NODE_FLAG_TRACE;
359
360 to = vlib_frame_vector_args (f);
361
362 vlib_buffer_copy_indices (to, from, elt->n_vectors);
363
364 vectors += elt->n_vectors;
365 f->n_vectors = elt->n_vectors;
366 vlib_put_frame_to_node (vm, fqm->node_index, f);
367
368 elt->valid = 0;
369 elt->n_vectors = 0;
370 elt->msg_type = 0xfefefefe;
371 CLIB_MEMORY_BARRIER ();
372 fq->head++;
373 processed++;
374
375 /*
376 * Limit the number of packets pushed into the graph
377 */
378 if (vectors >= fq->vector_threshold)
379 {
380 fq->head_hint = fq->head;
381 return processed;
382 }
383 }
384 ASSERT (0);
385 return processed;
386}
387CLIB_MARCH_FN_REGISTRATION (vlib_frame_queue_dequeue_fn);
388
Damjan Marion1c229712021-04-21 12:55:15 +0200389#ifndef CLIB_MARCH_VARIANT
390vlib_buffer_func_main_t vlib_buffer_func_main;
391
392static clib_error_t *
393vlib_buffer_funcs_init (vlib_main_t *vm)
394{
395 vlib_buffer_func_main_t *bfm = &vlib_buffer_func_main;
396 bfm->buffer_enqueue_to_next_fn =
397 CLIB_MARCH_FN_POINTER (vlib_buffer_enqueue_to_next_fn);
398 bfm->buffer_enqueue_to_single_next_fn =
399 CLIB_MARCH_FN_POINTER (vlib_buffer_enqueue_to_single_next_fn);
400 bfm->buffer_enqueue_to_thread_fn =
401 CLIB_MARCH_FN_POINTER (vlib_buffer_enqueue_to_thread_fn);
Damjan Marioneee099e2021-05-01 14:56:13 +0200402 bfm->frame_queue_dequeue_fn =
403 CLIB_MARCH_FN_POINTER (vlib_frame_queue_dequeue_fn);
Damjan Marion1c229712021-04-21 12:55:15 +0200404 return 0;
405}
406
407VLIB_INIT_FUNCTION (vlib_buffer_funcs_init);
408#endif