blob: 0fa18d6dde2db361fe7c45cf8a7eb565031756da [file] [log] [blame]
Ed Warnickecb9cada2015-12-08 15:45:58 -07001/*
2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15/*
16 * buffer_node.h: VLIB buffer handling node helper macros/inlines
17 *
18 * Copyright (c) 2008 Eliot Dresselhaus
19 *
20 * Permission is hereby granted, free of charge, to any person obtaining
21 * a copy of this software and associated documentation files (the
22 * "Software"), to deal in the Software without restriction, including
23 * without limitation the rights to use, copy, modify, merge, publish,
24 * distribute, sublicense, and/or sell copies of the Software, and to
25 * permit persons to whom the Software is furnished to do so, subject to
26 * the following conditions:
27 *
28 * The above copyright notice and this permission notice shall be
29 * included in all copies or substantial portions of the Software.
30 *
31 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
32 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
33 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
34 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
35 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
36 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
37 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
38 */
39
40#ifndef included_vlib_buffer_node_h
41#define included_vlib_buffer_node_h
42
Dave Barach9b8ffd92016-07-08 08:13:45 -040043/** \file
Dave Barach9770e202016-07-06 10:29:27 -040044 vlib buffer/node functions
45*/
46
47/** \brief Finish enqueueing two buffers forward in the graph.
48 Standard dual loop boilerplate element. This is a MACRO,
Dave Barach9b8ffd92016-07-08 08:13:45 -040049 with MULTIPLE SIDE EFFECTS. In the ideal case,
Dave Barach9770e202016-07-06 10:29:27 -040050 <code>next_index == next0 == next1</code>,
51 which means that the speculative enqueue at the top of the dual loop
52 has correctly dealt with both packets. In that case, the macro does
53 nothing at all.
54
55 @param vm vlib_main_t pointer, varies by thread
56 @param node current node vlib_node_runtime_t pointer
57 @param next_index speculated next index used for both packets
58 @param to_next speculated vector pointer used for both packets
59 @param n_left_to_next number of slots left in speculated vector
60 @param bi0 first buffer index
61 @param bi1 second buffer index
62 @param next0 actual next index to be used for the first packet
63 @param next1 actual next index to be used for the second packet
64
65 @return @c next_index -- speculative next index to be used for future packets
66 @return @c to_next -- speculative frame to be used for future packets
67 @return @c n_left_to_next -- number of slots left in speculative frame
68*/
69
Ed Warnickecb9cada2015-12-08 15:45:58 -070070#define vlib_validate_buffer_enqueue_x2(vm,node,next_index,to_next,n_left_to_next,bi0,bi1,next0,next1) \
71do { \
Dave Barachc74b43c2020-04-09 17:24:07 -040072 ASSERT (bi0 != 0); \
73 ASSERT (bi1 != 0); \
Ed Warnickecb9cada2015-12-08 15:45:58 -070074 int enqueue_code = (next0 != next_index) + 2*(next1 != next_index); \
75 \
76 if (PREDICT_FALSE (enqueue_code != 0)) \
77 { \
78 switch (enqueue_code) \
79 { \
80 case 1: \
81 /* A B A */ \
82 to_next[-2] = bi1; \
83 to_next -= 1; \
84 n_left_to_next += 1; \
85 vlib_set_next_frame_buffer (vm, node, next0, bi0); \
86 break; \
87 \
88 case 2: \
89 /* A A B */ \
90 to_next -= 1; \
91 n_left_to_next += 1; \
92 vlib_set_next_frame_buffer (vm, node, next1, bi1); \
93 break; \
94 \
95 case 3: \
96 /* A B B or A B C */ \
97 to_next -= 2; \
98 n_left_to_next += 2; \
99 vlib_set_next_frame_buffer (vm, node, next0, bi0); \
100 vlib_set_next_frame_buffer (vm, node, next1, bi1); \
101 if (next0 == next1) \
102 { \
103 vlib_put_next_frame (vm, node, next_index, \
104 n_left_to_next); \
105 next_index = next1; \
106 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next); \
107 } \
108 } \
109 } \
110} while (0)
111
Dave Barach670909e2016-10-18 15:25:35 -0400112
113/** \brief Finish enqueueing four buffers forward in the graph.
114 Standard quad loop boilerplate element. This is a MACRO,
115 with MULTIPLE SIDE EFFECTS. In the ideal case,
116 <code>next_index == next0 == next1 == next2 == next3</code>,
117 which means that the speculative enqueue at the top of the quad loop
118 has correctly dealt with all four packets. In that case, the macro does
119 nothing at all.
120
121 @param vm vlib_main_t pointer, varies by thread
122 @param node current node vlib_node_runtime_t pointer
123 @param next_index speculated next index used for both packets
124 @param to_next speculated vector pointer used for both packets
125 @param n_left_to_next number of slots left in speculated vector
126 @param bi0 first buffer index
127 @param bi1 second buffer index
128 @param bi2 third buffer index
129 @param bi3 fourth buffer index
130 @param next0 actual next index to be used for the first packet
131 @param next1 actual next index to be used for the second packet
132 @param next2 actual next index to be used for the third packet
133 @param next3 actual next index to be used for the fourth packet
134
135 @return @c next_index -- speculative next index to be used for future packets
136 @return @c to_next -- speculative frame to be used for future packets
137 @return @c n_left_to_next -- number of slots left in speculative frame
138*/
139
140#define vlib_validate_buffer_enqueue_x4(vm,node,next_index,to_next,n_left_to_next,bi0,bi1,bi2,bi3,next0,next1,next2,next3) \
141do { \
Dave Barachc74b43c2020-04-09 17:24:07 -0400142 ASSERT (bi0 != 0); \
143 ASSERT (bi1 != 0); \
144 ASSERT (bi2 != 0); \
145 ASSERT (bi3 != 0); \
Dave Barach670909e2016-10-18 15:25:35 -0400146 /* After the fact: check the [speculative] enqueue to "next" */ \
Eyal Bari2e292c62017-12-04 13:57:45 +0200147 u32 fix_speculation = (next_index ^ next0) | (next_index ^ next1) \
148 | (next_index ^ next2) | (next_index ^ next3); \
Dave Barach670909e2016-10-18 15:25:35 -0400149 if (PREDICT_FALSE(fix_speculation)) \
150 { \
151 /* rewind... */ \
152 to_next -= 4; \
153 n_left_to_next += 4; \
154 \
Dave Barach6b9c6df2016-11-16 08:04:58 -0500155 /* If bi0 belongs to "next", send it there */ \
Dave Barach670909e2016-10-18 15:25:35 -0400156 if (next_index == next0) \
157 { \
Dave Barach6b9c6df2016-11-16 08:04:58 -0500158 to_next[0] = bi0; \
Dave Barach670909e2016-10-18 15:25:35 -0400159 to_next++; \
160 n_left_to_next --; \
161 } \
162 else /* send it where it needs to go */ \
Dave Barach6b9c6df2016-11-16 08:04:58 -0500163 vlib_set_next_frame_buffer (vm, node, next0, bi0); \
Dave Barach670909e2016-10-18 15:25:35 -0400164 \
165 if (next_index == next1) \
166 { \
Dave Barach6b9c6df2016-11-16 08:04:58 -0500167 to_next[0] = bi1; \
Dave Barach670909e2016-10-18 15:25:35 -0400168 to_next++; \
169 n_left_to_next --; \
170 } \
171 else \
Dave Barach6b9c6df2016-11-16 08:04:58 -0500172 vlib_set_next_frame_buffer (vm, node, next1, bi1); \
Dave Barach670909e2016-10-18 15:25:35 -0400173 \
174 if (next_index == next2) \
175 { \
Dave Barach6b9c6df2016-11-16 08:04:58 -0500176 to_next[0] = bi2; \
Dave Barach670909e2016-10-18 15:25:35 -0400177 to_next++; \
178 n_left_to_next --; \
179 } \
180 else \
Dave Barach6b9c6df2016-11-16 08:04:58 -0500181 vlib_set_next_frame_buffer (vm, node, next2, bi2); \
Dave Barach670909e2016-10-18 15:25:35 -0400182 \
183 if (next_index == next3) \
184 { \
Dave Barach6b9c6df2016-11-16 08:04:58 -0500185 to_next[0] = bi3; \
Dave Barach670909e2016-10-18 15:25:35 -0400186 to_next++; \
187 n_left_to_next --; \
188 } \
189 else \
Dave Barach670909e2016-10-18 15:25:35 -0400190 { \
Eyal Bari2e292c62017-12-04 13:57:45 +0200191 vlib_set_next_frame_buffer (vm, node, next3, bi3); \
192 \
193 /* Change speculation: last 2 packets went to the same node*/ \
194 if (next2 == next3) \
195 { \
196 vlib_put_next_frame (vm, node, next_index, n_left_to_next); \
197 next_index = next3; \
198 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next); \
199 } \
200 } \
Dave Barach670909e2016-10-18 15:25:35 -0400201 } \
202 } while(0);
203
Dave Barach9770e202016-07-06 10:29:27 -0400204/** \brief Finish enqueueing one buffer forward in the graph.
205 Standard single loop boilerplate element. This is a MACRO,
206 with MULTIPLE SIDE EFFECTS. In the ideal case,
207 <code>next_index == next0</code>,
208 which means that the speculative enqueue at the top of the single loop
209 has correctly dealt with the packet in hand. In that case, the macro does
210 nothing at all.
211
212 @param vm vlib_main_t pointer, varies by thread
213 @param node current node vlib_node_runtime_t pointer
214 @param next_index speculated next index used for both packets
215 @param to_next speculated vector pointer used for both packets
216 @param n_left_to_next number of slots left in speculated vector
217 @param bi0 first buffer index
218 @param next0 actual next index to be used for the first packet
219
220 @return @c next_index -- speculative next index to be used for future packets
221 @return @c to_next -- speculative frame to be used for future packets
222 @return @c n_left_to_next -- number of slots left in speculative frame
223*/
Ed Warnickecb9cada2015-12-08 15:45:58 -0700224#define vlib_validate_buffer_enqueue_x1(vm,node,next_index,to_next,n_left_to_next,bi0,next0) \
225do { \
Dave Barachc74b43c2020-04-09 17:24:07 -0400226 ASSERT (bi0 != 0); \
Ed Warnickecb9cada2015-12-08 15:45:58 -0700227 if (PREDICT_FALSE (next0 != next_index)) \
228 { \
229 vlib_put_next_frame (vm, node, next_index, n_left_to_next + 1); \
230 next_index = next0; \
231 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next); \
232 \
233 to_next[0] = bi0; \
234 to_next += 1; \
235 n_left_to_next -= 1; \
236 } \
237} while (0)
238
239always_inline uword
240generic_buffer_node_inline (vlib_main_t * vm,
241 vlib_node_runtime_t * node,
242 vlib_frame_t * frame,
243 uword sizeof_trace,
Dave Barach9b8ffd92016-07-08 08:13:45 -0400244 void *opaque1,
Ed Warnickecb9cada2015-12-08 15:45:58 -0700245 uword opaque2,
Dave Barach9b8ffd92016-07-08 08:13:45 -0400246 void (*two_buffers) (vlib_main_t * vm,
247 void *opaque1,
Ed Warnickecb9cada2015-12-08 15:45:58 -0700248 uword opaque2,
249 vlib_buffer_t * b0,
Dave Barach9b8ffd92016-07-08 08:13:45 -0400250 vlib_buffer_t * b1,
251 u32 * next0, u32 * next1),
252 void (*one_buffer) (vlib_main_t * vm,
253 void *opaque1, uword opaque2,
254 vlib_buffer_t * b0,
255 u32 * next0))
Ed Warnickecb9cada2015-12-08 15:45:58 -0700256{
Dave Barach9b8ffd92016-07-08 08:13:45 -0400257 u32 n_left_from, *from, *to_next;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700258 u32 next_index;
259
260 from = vlib_frame_vector_args (frame);
261 n_left_from = frame->n_vectors;
262 next_index = node->cached_next_index;
263
264 if (node->flags & VLIB_NODE_FLAG_TRACE)
265 vlib_trace_frame_buffers_only (vm, node, from, frame->n_vectors,
266 /* stride */ 1, sizeof_trace);
267
268 while (n_left_from > 0)
269 {
270 u32 n_left_to_next;
271
Dave Barach9b8ffd92016-07-08 08:13:45 -0400272 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700273
274 while (n_left_from >= 4 && n_left_to_next >= 2)
275 {
Dave Barach9b8ffd92016-07-08 08:13:45 -0400276 vlib_buffer_t *p0, *p1;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700277 u32 pi0, next0;
278 u32 pi1, next1;
279
280 /* Prefetch next iteration. */
281 {
Dave Barach9b8ffd92016-07-08 08:13:45 -0400282 vlib_buffer_t *p2, *p3;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700283
284 p2 = vlib_get_buffer (vm, from[2]);
285 p3 = vlib_get_buffer (vm, from[3]);
286
287 vlib_prefetch_buffer_header (p2, LOAD);
288 vlib_prefetch_buffer_header (p3, LOAD);
289
290 CLIB_PREFETCH (p2->data, 64, LOAD);
291 CLIB_PREFETCH (p3->data, 64, LOAD);
292 }
293
294 pi0 = to_next[0] = from[0];
295 pi1 = to_next[1] = from[1];
296 from += 2;
297 to_next += 2;
298 n_left_from -= 2;
299 n_left_to_next -= 2;
300
301 p0 = vlib_get_buffer (vm, pi0);
302 p1 = vlib_get_buffer (vm, pi1);
303
304 two_buffers (vm, opaque1, opaque2, p0, p1, &next0, &next1);
305
306 vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
307 to_next, n_left_to_next,
308 pi0, pi1, next0, next1);
309 }
Dave Barach9b8ffd92016-07-08 08:13:45 -0400310
Ed Warnickecb9cada2015-12-08 15:45:58 -0700311 while (n_left_from > 0 && n_left_to_next > 0)
312 {
Dave Barach9b8ffd92016-07-08 08:13:45 -0400313 vlib_buffer_t *p0;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700314 u32 pi0, next0;
315
316 pi0 = from[0];
317 to_next[0] = pi0;
318 from += 1;
319 to_next += 1;
320 n_left_from -= 1;
321 n_left_to_next -= 1;
322
323 p0 = vlib_get_buffer (vm, pi0);
324
325 one_buffer (vm, opaque1, opaque2, p0, &next0);
326
327 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
328 to_next, n_left_to_next,
329 pi0, next0);
330 }
331
332 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
333 }
334
335 return frame->n_vectors;
336}
337
Damjan Marion8c3f8a22018-05-17 21:12:13 +0200338static_always_inline void
339vlib_buffer_enqueue_to_next (vlib_main_t * vm, vlib_node_runtime_t * node,
340 u32 * buffers, u16 * nexts, uword count)
341{
342 u32 *to_next, n_left_to_next, max;
343 u16 next_index;
344
345 next_index = nexts[0];
346 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
347 max = clib_min (n_left_to_next, count);
348
349 while (count)
350 {
351 u32 n_enqueued;
352 if ((nexts[0] != next_index) || n_left_to_next == 0)
353 {
354 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
355 next_index = nexts[0];
356 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
357 max = clib_min (n_left_to_next, count);
358 }
359#if defined(CLIB_HAVE_VEC512)
Benoît Ganne9fb6d402019-04-15 15:28:21 +0200360 u16x32 next32 = CLIB_MEM_OVERFLOW_LOAD (u16x32_load_unaligned, nexts);
Damjan Marion8c3f8a22018-05-17 21:12:13 +0200361 next32 = (next32 == u16x32_splat (next32[0]));
362 u64 bitmap = u16x32_msb_mask (next32);
363 n_enqueued = count_trailing_zeros (~bitmap);
364#elif defined(CLIB_HAVE_VEC256)
Benoît Ganne9fb6d402019-04-15 15:28:21 +0200365 u16x16 next16 = CLIB_MEM_OVERFLOW_LOAD (u16x16_load_unaligned, nexts);
Damjan Marion8c3f8a22018-05-17 21:12:13 +0200366 next16 = (next16 == u16x16_splat (next16[0]));
367 u64 bitmap = u8x32_msb_mask ((u8x32) next16);
368 n_enqueued = count_trailing_zeros (~bitmap) / 2;
Damjan Marion398fdc12018-05-19 10:27:10 +0200369#elif defined(CLIB_HAVE_VEC128) && defined(CLIB_HAVE_VEC128_MSB_MASK)
Benoît Ganne9fb6d402019-04-15 15:28:21 +0200370 u16x8 next8 = CLIB_MEM_OVERFLOW_LOAD (u16x8_load_unaligned, nexts);
Damjan Marion8c3f8a22018-05-17 21:12:13 +0200371 next8 = (next8 == u16x8_splat (next8[0]));
372 u64 bitmap = u8x16_msb_mask ((u8x16) next8);
373 n_enqueued = count_trailing_zeros (~bitmap) / 2;
374#else
375 u16 x = 0;
Neale Ranns825fc482018-10-10 13:27:00 +0000376 if (count + 3 < max)
377 {
378 x |= next_index ^ nexts[1];
379 x |= next_index ^ nexts[2];
380 x |= next_index ^ nexts[3];
381 n_enqueued = (x == 0) ? 4 : 1;
382 }
383 else
384 n_enqueued = 1;
Damjan Marion8c3f8a22018-05-17 21:12:13 +0200385#endif
386
387 if (PREDICT_FALSE (n_enqueued > max))
388 n_enqueued = max;
389
390#ifdef CLIB_HAVE_VEC512
391 if (n_enqueued >= 32)
392 {
Damjan Marion64d557c2019-01-18 20:03:41 +0100393 vlib_buffer_copy_indices (to_next, buffers, 32);
Damjan Marion8c3f8a22018-05-17 21:12:13 +0200394 nexts += 32;
395 to_next += 32;
396 buffers += 32;
397 n_left_to_next -= 32;
398 count -= 32;
399 max -= 32;
400 continue;
401 }
402#endif
403
404#ifdef CLIB_HAVE_VEC256
405 if (n_enqueued >= 16)
406 {
Damjan Marion64d557c2019-01-18 20:03:41 +0100407 vlib_buffer_copy_indices (to_next, buffers, 16);
Damjan Marion8c3f8a22018-05-17 21:12:13 +0200408 nexts += 16;
409 to_next += 16;
410 buffers += 16;
411 n_left_to_next -= 16;
412 count -= 16;
413 max -= 16;
414 continue;
415 }
416#endif
417
418#ifdef CLIB_HAVE_VEC128
419 if (n_enqueued >= 8)
420 {
Damjan Marion64d557c2019-01-18 20:03:41 +0100421 vlib_buffer_copy_indices (to_next, buffers, 8);
Damjan Marion8c3f8a22018-05-17 21:12:13 +0200422 nexts += 8;
423 to_next += 8;
424 buffers += 8;
425 n_left_to_next -= 8;
426 count -= 8;
427 max -= 8;
428 continue;
429 }
430#endif
431
432 if (n_enqueued >= 4)
433 {
Damjan Marion64d557c2019-01-18 20:03:41 +0100434 vlib_buffer_copy_indices (to_next, buffers, 4);
Damjan Marion8c3f8a22018-05-17 21:12:13 +0200435 nexts += 4;
436 to_next += 4;
437 buffers += 4;
438 n_left_to_next -= 4;
439 count -= 4;
440 max -= 4;
441 continue;
442 }
443
444 /* copy */
445 to_next[0] = buffers[0];
446
447 /* next */
448 nexts += 1;
449 to_next += 1;
450 buffers += 1;
451 n_left_to_next -= 1;
452 count -= 1;
453 max -= 1;
454 }
455 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
456}
457
Damjan Marion142eb852018-11-20 09:56:01 +0100458static_always_inline void
459vlib_buffer_enqueue_to_single_next (vlib_main_t * vm,
460 vlib_node_runtime_t * node, u32 * buffers,
461 u16 next_index, u32 count)
462{
463 u32 *to_next, n_left_to_next, n_enq;
464
465 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
466
467 if (PREDICT_TRUE (n_left_to_next >= count))
468 {
Damjan Marion64d557c2019-01-18 20:03:41 +0100469 vlib_buffer_copy_indices (to_next, buffers, count);
Damjan Marion142eb852018-11-20 09:56:01 +0100470 n_left_to_next -= count;
471 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
472 return;
473 }
474
475 n_enq = n_left_to_next;
476next:
Damjan Marion64d557c2019-01-18 20:03:41 +0100477 vlib_buffer_copy_indices (to_next, buffers, n_enq);
Damjan Marion142eb852018-11-20 09:56:01 +0100478 n_left_to_next -= n_enq;
479
480 if (PREDICT_FALSE (count > n_enq))
481 {
482 count -= n_enq;
483 buffers += n_enq;
484
485 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
486 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
487 n_enq = clib_min (n_left_to_next, count);
488 goto next;
489 }
490 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
491}
492
Damjan Marion78fd7e82018-07-20 18:47:05 +0200493static_always_inline u32
Damjan Marion4d56e052018-07-19 17:52:31 +0200494vlib_buffer_enqueue_to_thread (vlib_main_t * vm, u32 frame_queue_index,
495 u32 * buffer_indices, u16 * thread_indices,
Damjan Marion78fd7e82018-07-20 18:47:05 +0200496 u32 n_packets, int drop_on_congestion)
Damjan Marion4d56e052018-07-19 17:52:31 +0200497{
498 vlib_thread_main_t *tm = vlib_get_thread_main ();
Damjan Marion78fd7e82018-07-20 18:47:05 +0200499 vlib_frame_queue_main_t *fqm;
500 vlib_frame_queue_per_thread_data_t *ptd;
501 u32 n_left = n_packets;
502 u32 drop_list[VLIB_FRAME_SIZE], *dbi = drop_list, n_drop = 0;
Damjan Marion4d56e052018-07-19 17:52:31 +0200503 vlib_frame_queue_elt_t *hf = 0;
504 u32 n_left_to_next_thread = 0, *to_next_thread = 0;
505 u32 next_thread_index, current_thread_index = ~0;
506 int i;
507
Damjan Marion78fd7e82018-07-20 18:47:05 +0200508 fqm = vec_elt_at_index (tm->frame_queue_mains, frame_queue_index);
509 ptd = vec_elt_at_index (fqm->per_thread_data, vm->thread_index);
Damjan Marion4d56e052018-07-19 17:52:31 +0200510
511 while (n_left)
512 {
513 next_thread_index = thread_indices[0];
514
515 if (next_thread_index != current_thread_index)
516 {
Damjan Marion78fd7e82018-07-20 18:47:05 +0200517 if (drop_on_congestion &&
518 is_vlib_frame_queue_congested
519 (frame_queue_index, next_thread_index, fqm->queue_hi_thresh,
520 ptd->congested_handoff_queue_by_thread_index))
521 {
522 dbi[0] = buffer_indices[0];
523 dbi++;
524 n_drop++;
525 goto next;
526 }
527
Damjan Marion4d56e052018-07-19 17:52:31 +0200528 if (hf)
529 hf->n_vectors = VLIB_FRAME_SIZE - n_left_to_next_thread;
530
531 hf = vlib_get_worker_handoff_queue_elt (frame_queue_index,
532 next_thread_index,
Damjan Marion78fd7e82018-07-20 18:47:05 +0200533 ptd->handoff_queue_elt_by_thread_index);
Damjan Marion4d56e052018-07-19 17:52:31 +0200534
535 n_left_to_next_thread = VLIB_FRAME_SIZE - hf->n_vectors;
536 to_next_thread = &hf->buffer_index[hf->n_vectors];
537 current_thread_index = next_thread_index;
538 }
539
540 to_next_thread[0] = buffer_indices[0];
541 to_next_thread++;
542 n_left_to_next_thread--;
543
544 if (n_left_to_next_thread == 0)
545 {
546 hf->n_vectors = VLIB_FRAME_SIZE;
547 vlib_put_frame_queue_elt (hf);
Dave Barach320998a2019-11-08 08:22:28 -0500548 vlib_mains[current_thread_index]->check_frame_queues = 1;
Damjan Marion4d56e052018-07-19 17:52:31 +0200549 current_thread_index = ~0;
Damjan Marion78fd7e82018-07-20 18:47:05 +0200550 ptd->handoff_queue_elt_by_thread_index[next_thread_index] = 0;
Damjan Marion4d56e052018-07-19 17:52:31 +0200551 hf = 0;
552 }
553
554 /* next */
Damjan Marion78fd7e82018-07-20 18:47:05 +0200555 next:
Damjan Marion4d56e052018-07-19 17:52:31 +0200556 thread_indices += 1;
557 buffer_indices += 1;
558 n_left -= 1;
559 }
560
561 if (hf)
562 hf->n_vectors = VLIB_FRAME_SIZE - n_left_to_next_thread;
563
564 /* Ship frames to the thread nodes */
Damjan Marion78fd7e82018-07-20 18:47:05 +0200565 for (i = 0; i < vec_len (ptd->handoff_queue_elt_by_thread_index); i++)
Damjan Marion4d56e052018-07-19 17:52:31 +0200566 {
Damjan Marion78fd7e82018-07-20 18:47:05 +0200567 if (ptd->handoff_queue_elt_by_thread_index[i])
Damjan Marion4d56e052018-07-19 17:52:31 +0200568 {
Damjan Marion78fd7e82018-07-20 18:47:05 +0200569 hf = ptd->handoff_queue_elt_by_thread_index[i];
Damjan Marion4d56e052018-07-19 17:52:31 +0200570 /*
571 * It works better to let the handoff node
572 * rate-adapt, always ship the handoff queue element.
573 */
574 if (1 || hf->n_vectors == hf->last_n_vectors)
575 {
576 vlib_put_frame_queue_elt (hf);
Dave Barach320998a2019-11-08 08:22:28 -0500577 vlib_mains[i]->check_frame_queues = 1;
Damjan Marion78fd7e82018-07-20 18:47:05 +0200578 ptd->handoff_queue_elt_by_thread_index[i] = 0;
Damjan Marion4d56e052018-07-19 17:52:31 +0200579 }
580 else
581 hf->last_n_vectors = hf->n_vectors;
582 }
Damjan Marion78fd7e82018-07-20 18:47:05 +0200583 ptd->congested_handoff_queue_by_thread_index[i] =
Damjan Marion4d56e052018-07-19 17:52:31 +0200584 (vlib_frame_queue_t *) (~0);
585 }
Damjan Marion78fd7e82018-07-20 18:47:05 +0200586
587 if (drop_on_congestion && n_drop)
588 vlib_buffer_free (vm, drop_list, n_drop);
589
590 return n_packets - n_drop;
Damjan Marion4d56e052018-07-19 17:52:31 +0200591}
592
Ed Warnickecb9cada2015-12-08 15:45:58 -0700593#endif /* included_vlib_buffer_node_h */
Dave Barach9b8ffd92016-07-08 08:13:45 -0400594
595/*
596 * fd.io coding-style-patch-verification: ON
597 *
598 * Local Variables:
599 * eval: (c-set-style "gnu")
600 * End:
601 */