blob: fde0eb23e14de7a712a33303e13d910960bcb719 [file] [log] [blame]
Ed Warnickecb9cada2015-12-08 15:45:58 -07001/*
2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15#include <vnet/vnet.h>
16#include <vppinfra/vec.h>
17#include <vppinfra/error.h>
18#include <vppinfra/format.h>
19#include <vppinfra/xxhash.h>
20
21#include <vnet/ethernet/ethernet.h>
22#include <vnet/devices/dpdk/dpdk.h>
23#include <vnet/classify/vnet_classify.h>
24#include <vnet/mpls-gre/packet.h>
25
26#include "dpdk_priv.h"
27
28#ifndef MAX
29#define MAX(a,b) ((a) < (b) ? (b) : (a))
30#endif
31
32#ifndef MIN
33#define MIN(a,b) ((a) < (b) ? (a) : (b))
34#endif
35
36/*
37 * At least in certain versions of ESXi, vmware e1000's don't honor the
38 * "strip rx CRC" bit. Set this flag to work around that bug FOR UNIT TEST ONLY.
39 *
40 * If wireshark complains like so:
41 *
42 * "Frame check sequence: 0x00000000 [incorrect, should be <hex-num>]"
43 * and you're using ESXi emulated e1000's, set this flag FOR UNIT TEST ONLY.
44 *
45 * Note: do NOT check in this file with this workaround enabled! You'll lose
46 * actual data from e.g. 10xGE interfaces. The extra 4 bytes annoy
47 * wireshark, but they're harmless...
48 */
49#define VMWARE_LENGTH_BUG_WORKAROUND 0
50
51typedef struct {
52 u32 cached_next_index;
53
54 /* convenience variables */
55 vlib_main_t * vlib_main;
56 vnet_main_t * vnet_main;
57} handoff_dispatch_main_t;
58
59typedef struct {
60 u32 buffer_index;
61 u32 next_index;
62 u32 sw_if_index;
63} handoff_dispatch_trace_t;
64
65/* packet trace format function */
66static u8 * format_handoff_dispatch_trace (u8 * s, va_list * args)
67{
68 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
69 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
70 handoff_dispatch_trace_t * t = va_arg (*args, handoff_dispatch_trace_t *);
71
72 s = format (s, "HANDOFF_DISPATCH: sw_if_index %d next_index %d buffer 0x%x",
73 t->sw_if_index,
74 t->next_index,
75 t->buffer_index);
76 return s;
77}
78
79handoff_dispatch_main_t handoff_dispatch_main;
80
81vlib_node_registration_t handoff_dispatch_node;
82
83#define foreach_handoff_dispatch_error \
84_(EXAMPLE, "example packets")
85
86typedef enum {
87#define _(sym,str) HANDOFF_DISPATCH_ERROR_##sym,
88 foreach_handoff_dispatch_error
89#undef _
90 HANDOFF_DISPATCH_N_ERROR,
91} handoff_dispatch_error_t;
92
93static char * handoff_dispatch_error_strings[] = {
94#define _(sym,string) string,
95 foreach_handoff_dispatch_error
96#undef _
97};
98
99static inline
100void vlib_put_handoff_queue_elt (vlib_frame_queue_elt_t * hf)
101{
102 CLIB_MEMORY_BARRIER();
103 hf->valid = 1;
104}
105
106static uword
107handoff_dispatch_node_fn (vlib_main_t * vm,
108 vlib_node_runtime_t * node,
109 vlib_frame_t * frame)
110{
111 u32 n_left_from, * from, * to_next;
112 dpdk_rx_next_t next_index;
113
114 from = vlib_frame_vector_args (frame);
115 n_left_from = frame->n_vectors;
116 next_index = node->cached_next_index;
117
118 while (n_left_from > 0)
119 {
120 u32 n_left_to_next;
121
122 vlib_get_next_frame (vm, node, next_index,
123 to_next, n_left_to_next);
124
125 while (n_left_from >= 4 && n_left_to_next >= 2)
126 {
127 u32 bi0, bi1;
128 vlib_buffer_t * b0, * b1;
129 u32 next0, next1;
130 u32 sw_if_index0, sw_if_index1;
131
132 /* Prefetch next iteration. */
133 {
134 vlib_buffer_t * p2, * p3;
135
136 p2 = vlib_get_buffer (vm, from[2]);
137 p3 = vlib_get_buffer (vm, from[3]);
138
139 vlib_prefetch_buffer_header (p2, LOAD);
140 vlib_prefetch_buffer_header (p3, LOAD);
141 }
142
143 /* speculatively enqueue b0 and b1 to the current next frame */
144 to_next[0] = bi0 = from[0];
145 to_next[1] = bi1 = from[1];
146 from += 2;
147 to_next += 2;
148 n_left_from -= 2;
149 n_left_to_next -= 2;
150
151 b0 = vlib_get_buffer (vm, bi0);
152 b1 = vlib_get_buffer (vm, bi1);
153
154 next0 = vnet_buffer(b0)->io_handoff.next_index;
155 next1 = vnet_buffer(b1)->io_handoff.next_index;
156
157 if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
158 {
159 vlib_trace_buffer (vm, node, next0, b0, /* follow_chain */ 0);
160 handoff_dispatch_trace_t *t =
161 vlib_add_trace (vm, node, b0, sizeof (*t));
162 sw_if_index0 = vnet_buffer(b0)->sw_if_index[VLIB_RX];
163 t->sw_if_index = sw_if_index0;
164 t->next_index = next0;
165 t->buffer_index = bi0;
166 }
167 if (PREDICT_FALSE(b1->flags & VLIB_BUFFER_IS_TRACED))
168 {
169 vlib_trace_buffer (vm, node, next1, b1, /* follow_chain */ 0);
170 handoff_dispatch_trace_t *t =
171 vlib_add_trace (vm, node, b1, sizeof (*t));
172 sw_if_index1 = vnet_buffer(b1)->sw_if_index[VLIB_RX];
173 t->sw_if_index = sw_if_index1;
174 t->next_index = next1;
175 t->buffer_index = bi1;
176 }
177
178 /* verify speculative enqueues, maybe switch current next frame */
179 vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
180 to_next, n_left_to_next,
181 bi0, bi1, next0, next1);
182 }
183
184 while (n_left_from > 0 && n_left_to_next > 0)
185 {
186 u32 bi0;
187 vlib_buffer_t * b0;
188 u32 next0;
189 u32 sw_if_index0;
190
191 /* speculatively enqueue b0 to the current next frame */
192 bi0 = from[0];
193 to_next[0] = bi0;
194 from += 1;
195 to_next += 1;
196 n_left_from -= 1;
197 n_left_to_next -= 1;
198
199 b0 = vlib_get_buffer (vm, bi0);
200
201 next0 = vnet_buffer(b0)->io_handoff.next_index;
202
203 if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
204 {
205 vlib_trace_buffer (vm, node, next0, b0, /* follow_chain */ 0);
206 handoff_dispatch_trace_t *t =
207 vlib_add_trace (vm, node, b0, sizeof (*t));
208 sw_if_index0 = vnet_buffer(b0)->sw_if_index[VLIB_RX];
209 t->sw_if_index = sw_if_index0;
210 t->next_index = next0;
211 t->buffer_index = bi0;
212 }
213
214 /* verify speculative enqueue, maybe switch current next frame */
215 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
216 to_next, n_left_to_next,
217 bi0, next0);
218 }
219
220 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
221 }
222
223 return frame->n_vectors;
224}
225
226VLIB_REGISTER_NODE (handoff_dispatch_node) = {
227 .function = handoff_dispatch_node_fn,
228 .name = "handoff-dispatch",
229 .vector_size = sizeof (u32),
230 .format_trace = format_handoff_dispatch_trace,
231 .type = VLIB_NODE_TYPE_INTERNAL,
232 .flags = VLIB_NODE_FLAG_IS_HANDOFF,
233
234 .n_errors = ARRAY_LEN(handoff_dispatch_error_strings),
235 .error_strings = handoff_dispatch_error_strings,
236
237 .n_next_nodes = DPDK_RX_N_NEXT,
238
239 .next_nodes = {
240 [DPDK_RX_NEXT_DROP] = "error-drop",
241 [DPDK_RX_NEXT_ETHERNET_INPUT] = "ethernet-input",
242 [DPDK_RX_NEXT_IP4_INPUT] = "ip4-input",
243 [DPDK_RX_NEXT_IP6_INPUT] = "ip6-input",
244 [DPDK_RX_NEXT_MPLS_INPUT] = "mpls-gre-input",
245 },
246};
247
248clib_error_t *handoff_dispatch_init (vlib_main_t *vm)
249{
250 handoff_dispatch_main_t * mp = &handoff_dispatch_main;
251
252 mp->vlib_main = vm;
253 mp->vnet_main = &vnet_main;
254
255 return 0;
256}
257
258VLIB_INIT_FUNCTION (handoff_dispatch_init);
259
260u32 dpdk_get_handoff_node_index (void)
261{
262 return handoff_dispatch_node.index;
263}
264
265static char * dpdk_error_strings[] = {
266#define _(n,s) s,
267 foreach_dpdk_error
268#undef _
269};
270
271typedef struct {
272 u32 buffer_index;
273 u16 device_index;
274 u16 queue_index;
275 struct rte_mbuf mb;
276 vlib_buffer_t buffer; /* Copy of VLIB buffer; pkt data stored in pre_data. */
277} dpdk_rx_dma_trace_t;
278
279static u8 * format_dpdk_rx_dma_trace (u8 * s, va_list * va)
280{
281 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*va, vlib_main_t *);
282 CLIB_UNUSED (vlib_node_t * node) = va_arg (*va, vlib_node_t *);
283 CLIB_UNUSED (vnet_main_t * vnm) = vnet_get_main();
284 dpdk_rx_dma_trace_t * t = va_arg (*va, dpdk_rx_dma_trace_t *);
285 dpdk_main_t * dm = &dpdk_main;
286 dpdk_device_t * xd = vec_elt_at_index (dm->devices, t->device_index);
287 format_function_t * f;
288 uword indent = format_get_indent (s);
289 vnet_sw_interface_t * sw = vnet_get_sw_interface (vnm, xd->vlib_sw_if_index);
290
291 s = format (s, "%U rx queue %d",
292 format_vnet_sw_interface_name, vnm, sw,
293 t->queue_index);
294
295 s = format (s, "\n%Ubuffer 0x%x: %U",
296 format_white_space, indent,
297 t->buffer_index,
298 format_vlib_buffer, &t->buffer);
299
300#ifdef RTE_LIBRTE_MBUF_EXT_RX_OLFLAGS
301 s = format (s, "\n%U%U",
302 format_white_space, indent,
303 format_dpdk_rx_rte_mbuf, &t->mb);
304#else
305 s = format (s, "\n%U%U",
306 format_white_space, indent,
307 format_dpdk_rte_mbuf, &t->mb);
308#endif /* RTE_LIBRTE_MBUF_EXT_RX_OLFLAGS */
309 f = node->format_buffer;
310 if (!f)
311 f = format_hex_bytes;
312 s = format (s, "\n%U%U", format_white_space, indent,
313 f, t->buffer.pre_data, sizeof (t->buffer.pre_data));
314
315 return s;
316}
317
318always_inline void
319dpdk_rx_next_and_error_from_mb_flags_x1 (dpdk_device_t *xd, struct rte_mbuf *mb,
320 vlib_buffer_t *b0,
321 u8 * next0, u8 * error0)
322{
323 u8 is0_ip4, is0_ip6, is0_mpls, n0;
324 uint16_t mb_flags = mb->ol_flags;
325
326 if (PREDICT_FALSE(mb_flags & (
327#ifdef RTE_LIBRTE_MBUF_EXT_RX_OLFLAGS
328 PKT_EXT_RX_PKT_ERROR | PKT_EXT_RX_BAD_FCS |
329#endif /* RTE_LIBRTE_MBUF_EXT_RX_OLFLAGS */
330 PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD
331 )))
332 {
333 /* some error was flagged. determine the drop reason */
334 n0 = DPDK_RX_NEXT_DROP;
335 *error0 =
336#ifdef RTE_LIBRTE_MBUF_EXT_RX_OLFLAGS
337 (mb_flags & PKT_EXT_RX_PKT_ERROR) ? DPDK_ERROR_RX_PACKET_ERROR :
338 (mb_flags & PKT_EXT_RX_BAD_FCS) ? DPDK_ERROR_RX_BAD_FCS :
339#endif /* RTE_LIBRTE_MBUF_EXT_RX_OLFLAGS */
340 (mb_flags & PKT_RX_IP_CKSUM_BAD) ? DPDK_ERROR_IP_CHECKSUM_ERROR :
341 (mb_flags & PKT_RX_L4_CKSUM_BAD) ? DPDK_ERROR_L4_CHECKSUM_ERROR :
342 DPDK_ERROR_NONE;
343 }
344 else
345 {
346 *error0 = DPDK_ERROR_NONE;
347 if (xd->per_interface_next_index != ~0)
348 n0 = xd->per_interface_next_index;
349 else if (mb_flags & PKT_RX_VLAN_PKT)
350 n0 = DPDK_RX_NEXT_ETHERNET_INPUT;
351 else
352 {
353 n0 = DPDK_RX_NEXT_ETHERNET_INPUT;
354#if RTE_VERSION >= RTE_VERSION_NUM(2, 1, 0, 0)
355 is0_ip4 = (mb->packet_type & (RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L3_IPV4_EXT)) != 0;
356#else
357 is0_ip4 = (mb_flags & (PKT_RX_IPV4_HDR | PKT_RX_IPV4_HDR_EXT)) != 0;
358#endif
359
360 if (PREDICT_TRUE(is0_ip4))
361 n0 = DPDK_RX_NEXT_IP4_INPUT;
362 else
363 {
364#if RTE_VERSION >= RTE_VERSION_NUM(2, 1, 0, 0)
365 is0_ip6 =
366 (mb->packet_type & (RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L3_IPV6_EXT)) != 0;
367#else
368 is0_ip6 =
369 (mb_flags & (PKT_RX_IPV6_HDR | PKT_RX_IPV6_HDR_EXT)) != 0;
370#endif
371 if (PREDICT_TRUE(is0_ip6))
372 n0 = DPDK_RX_NEXT_IP6_INPUT;
373 else
374 {
375 ethernet_header_t *h0 = (ethernet_header_t *) b0->data;
376 is0_mpls = (h0->type == clib_host_to_net_u16(ETHERNET_TYPE_MPLS_UNICAST));
377 n0 = is0_mpls ? DPDK_RX_NEXT_MPLS_INPUT : n0;
378 }
379 }
380 }
381 }
382 *next0 = n0;
383}
384
385void dpdk_rx_trace (dpdk_main_t * dm,
386 vlib_node_runtime_t * node,
387 dpdk_device_t * xd,
388 u16 queue_id,
389 u32 * buffers,
390 uword n_buffers)
391{
392 vlib_main_t * vm = vlib_get_main();
393 u32 * b, n_left;
394 u8 next0;
395
396 n_left = n_buffers;
397 b = buffers;
398
399 while (n_left >= 1)
400 {
401 u32 bi0;
402 vlib_buffer_t * b0;
403 dpdk_rx_dma_trace_t * t0;
404 struct rte_mbuf *mb;
405 u8 error0;
406
407 bi0 = b[0];
408 n_left -= 1;
409
410 b0 = vlib_get_buffer (vm, bi0);
411 mb = ((struct rte_mbuf *)b0) - 1;
412 dpdk_rx_next_and_error_from_mb_flags_x1 (xd, mb, b0,
413 &next0, &error0);
414 vlib_trace_buffer (vm, node, next0, b0, /* follow_chain */ 0);
415 t0 = vlib_add_trace (vm, node, b0, sizeof (t0[0]));
416 t0->queue_index = queue_id;
417 t0->device_index = xd->device_index;
418 t0->buffer_index = bi0;
419
420 memcpy (&t0->mb, mb, sizeof (t0->mb));
421 memcpy (&t0->buffer, b0, sizeof (b0[0]) - sizeof (b0->pre_data));
422 memcpy (t0->buffer.pre_data, b0->data, sizeof (t0->buffer.pre_data));
423
424#ifdef RTE_LIBRTE_MBUF_EXT_RX_OLFLAGS
425 /*
426 * Clear overloaded TX offload flags when a DPDK driver
427 * is using them for RX flags (e.g. Cisco VIC Ethernet driver)
428 */
429 mb->ol_flags &= PKT_EXT_RX_CLR_TX_FLAGS_MASK;
430#endif /* RTE_LIBRTE_MBUF_EXT_RX_OLFLAGS */
431
432 b += 1;
433 }
434}
435
436/*
437 * dpdk_efd_update_counters()
438 * Update EFD (early-fast-discard) counters
439 */
440void dpdk_efd_update_counters (dpdk_device_t *xd,
441 u32 n_buffers,
442 u16 enabled)
443{
444 if (enabled & DPDK_EFD_MONITOR_ENABLED)
445 {
446 u64 now = clib_cpu_time_now();
447 if (xd->efd_agent.last_poll_time > 0)
448 {
449 u64 elapsed_time = (now - xd->efd_agent.last_poll_time);
450 if (elapsed_time > xd->efd_agent.max_poll_delay)
451 xd->efd_agent.max_poll_delay = elapsed_time;
452 }
453 xd->efd_agent.last_poll_time = now;
454 }
455
456 xd->efd_agent.total_packet_cnt += n_buffers;
457 xd->efd_agent.last_burst_sz = n_buffers;
458
459 if (n_buffers > xd->efd_agent.max_burst_sz)
460 xd->efd_agent.max_burst_sz = n_buffers;
461
462 if (PREDICT_FALSE(n_buffers == VLIB_FRAME_SIZE))
463 {
464 xd->efd_agent.full_frames_cnt++;
465 xd->efd_agent.consec_full_frames_cnt++;
466 }
467 else
468 {
469 xd->efd_agent.consec_full_frames_cnt = 0;
470 }
471}
472
473/* is_efd_discardable()
474 * returns non zero DPDK error if packet meets early-fast-discard criteria,
475 * zero otherwise
476 */
477u32 is_efd_discardable (vlib_thread_main_t *tm,
478 vlib_buffer_t * b0,
479 struct rte_mbuf *mb)
480{
481 ethernet_header_t *eh = (ethernet_header_t *) b0->data;
482
483 if (eh->type == clib_host_to_net_u16(ETHERNET_TYPE_IP4))
484 {
485 ip4_header_t *ipv4 =
486 (ip4_header_t *)&(b0->data[sizeof(ethernet_header_t)]);
487 u8 pkt_prec = (ipv4->tos >> 5);
488
489 return (tm->efd.ip_prec_bitmap & (1 << pkt_prec) ?
490 DPDK_ERROR_IPV4_EFD_DROP_PKTS : DPDK_ERROR_NONE);
491 }
492 else if (eh->type == clib_net_to_host_u16(ETHERNET_TYPE_IP6))
493 {
494 ip6_header_t *ipv6 =
495 (ip6_header_t *)&(b0->data[sizeof(ethernet_header_t)]);
496 u8 pkt_tclass =
497 ((ipv6->ip_version_traffic_class_and_flow_label >> 20) & 0xff);
498
499 return (tm->efd.ip_prec_bitmap & (1 << pkt_tclass) ?
500 DPDK_ERROR_IPV6_EFD_DROP_PKTS : DPDK_ERROR_NONE);
501 }
502 else if (eh->type == clib_net_to_host_u16(ETHERNET_TYPE_MPLS_UNICAST))
503 {
504 mpls_unicast_header_t *mpls =
505 (mpls_unicast_header_t *)&(b0->data[sizeof(ethernet_header_t)]);
506 u8 pkt_exp = ((mpls->label_exp_s_ttl >> 9) & 0x07);
507
508 return (tm->efd.mpls_exp_bitmap & (1 << pkt_exp) ?
509 DPDK_ERROR_MPLS_EFD_DROP_PKTS : DPDK_ERROR_NONE);
510 }
511 else if ((eh->type == clib_net_to_host_u16(ETHERNET_TYPE_VLAN)) ||
512 (eh->type == clib_net_to_host_u16(ETHERNET_TYPE_DOT1AD)))
513 {
514 ethernet_vlan_header_t *vlan =
515 (ethernet_vlan_header_t *)&(b0->data[sizeof(ethernet_header_t)]);
516 u8 pkt_cos = ((vlan->priority_cfi_and_id >> 13) & 0x07);
517
518 return (tm->efd.vlan_cos_bitmap & (1 << pkt_cos) ?
519 DPDK_ERROR_VLAN_EFD_DROP_PKTS : DPDK_ERROR_NONE);
520 }
521
522 return DPDK_ERROR_NONE;
523}
524
525/*
526 * This function is used when there are no worker threads.
527 * The main thread performs IO and forwards the packets.
528 */
529static inline u32 dpdk_device_input ( dpdk_main_t * dm,
530 dpdk_device_t * xd,
531 vlib_node_runtime_t * node,
532 u32 cpu_index,
533 u16 queue_id)
534{
535 u32 n_buffers;
536 u32 next_index = DPDK_RX_NEXT_ETHERNET_INPUT;
537 u32 n_left_to_next, * to_next;
538 u32 mb_index;
539 vlib_main_t * vm = vlib_get_main();
540 uword n_rx_bytes = 0;
541 u32 n_trace, trace_cnt __attribute__((unused));
542 vlib_buffer_free_list_t * fl;
543 u8 efd_discard_burst = 0;
544
545 if (xd->admin_up == 0)
546 return 0;
547
548 n_buffers = dpdk_rx_burst(dm, xd, queue_id);
549
550 if (n_buffers == 0)
551 {
552 /* check if EFD (dpdk) is enabled */
553 if (PREDICT_FALSE(dm->efd.enabled))
554 {
555 /* reset a few stats */
556 xd->efd_agent.last_poll_time = 0;
557 xd->efd_agent.last_burst_sz = 0;
558 }
559 return 0;
560 }
561
562 vec_reset_length (xd->d_trace_buffers);
563 trace_cnt = n_trace = vlib_get_trace_count (vm, node);
564
565 fl = vlib_buffer_get_free_list (vm, VLIB_BUFFER_DEFAULT_FREE_LIST_INDEX);
566
567 /*
568 * DAW-FIXME: VMXNET3 device stop/start doesn't work,
569 * therefore fake the stop in the dpdk driver by
570 * silently dropping all of the incoming pkts instead of
571 * stopping the driver / hardware.
572 */
573 if (PREDICT_FALSE(xd->admin_up != 1))
574 {
575 for (mb_index = 0; mb_index < n_buffers; mb_index++)
576 rte_pktmbuf_free (xd->rx_vectors[queue_id][mb_index]);
577
578 return 0;
579 }
580
581 /* Check for congestion if EFD (Early-Fast-Discard) is enabled
582 * in any mode (e.g. dpdk, monitor, or drop_all)
583 */
584 if (PREDICT_FALSE(dm->efd.enabled))
585 {
586 /* update EFD counters */
587 dpdk_efd_update_counters(xd, n_buffers, dm->efd.enabled);
588
589 if (PREDICT_FALSE(dm->efd.enabled & DPDK_EFD_DROPALL_ENABLED))
590 {
591 /* discard all received packets */
592 for (mb_index = 0; mb_index < n_buffers; mb_index++)
593 rte_pktmbuf_free(xd->rx_vectors[queue_id][mb_index]);
594
595 xd->efd_agent.discard_cnt += n_buffers;
596 increment_efd_drop_counter(vm,
597 DPDK_ERROR_VLAN_EFD_DROP_PKTS,
598 n_buffers);
599
600 return 0;
601 }
602
603 if (PREDICT_FALSE(xd->efd_agent.consec_full_frames_cnt >=
604 dm->efd.consec_full_frames_hi_thresh))
605 {
606 u32 device_queue_sz = rte_eth_rx_queue_count(xd->device_index,
607 queue_id);
608 if (device_queue_sz >= dm->efd.queue_hi_thresh)
609 {
610 /* dpdk device queue has reached the critical threshold */
611 xd->efd_agent.congestion_cnt++;
612
613 /* apply EFD to packets from the burst */
614 efd_discard_burst = 1;
615 }
616 }
617 }
618
619 mb_index = 0;
620
621 while (n_buffers > 0)
622 {
623 u32 bi0;
624 u8 next0, error0;
625 u32 l3_offset0;
626 vlib_buffer_t * b0, * b_seg, * b_chain = 0;
627 u32 cntr_type;
628
629 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
630
631 while (n_buffers > 0 && n_left_to_next > 0)
632 {
633 u8 nb_seg = 1;
634 struct rte_mbuf *mb = xd->rx_vectors[queue_id][mb_index];
635 struct rte_mbuf *mb_seg = mb->next;
636
637 if (PREDICT_TRUE(n_buffers > 2))
638 {
639 struct rte_mbuf *pfmb = xd->rx_vectors[queue_id][mb_index+2];
640 vlib_buffer_t *bp = (vlib_buffer_t *)(pfmb+1);
641 CLIB_PREFETCH (pfmb, CLIB_CACHE_LINE_BYTES, STORE);
642 CLIB_PREFETCH (bp, CLIB_CACHE_LINE_BYTES, STORE);
643 }
644
645 ASSERT(mb);
646
647 b0 = (vlib_buffer_t *)(mb+1);
648
649 /* check whether EFD is looking for packets to discard */
650 if (PREDICT_FALSE(efd_discard_burst))
651 {
652 vlib_thread_main_t * tm = vlib_get_thread_main();
653
654 if (PREDICT_TRUE(cntr_type = is_efd_discardable(tm, b0, mb)))
655 {
656 rte_pktmbuf_free(mb);
657 xd->efd_agent.discard_cnt++;
658 increment_efd_drop_counter(vm,
659 cntr_type,
660 1);
661 n_buffers--;
662 mb_index++;
663 continue;
664 }
665 }
666
667 /* Prefetch one next segment if it exists. */
668 if (PREDICT_FALSE(mb->nb_segs > 1))
669 {
670 struct rte_mbuf *pfmb = mb->next;
671 vlib_buffer_t *bp = (vlib_buffer_t *)(pfmb+1);
672 CLIB_PREFETCH (pfmb, CLIB_CACHE_LINE_BYTES, LOAD);
673 CLIB_PREFETCH (bp, CLIB_CACHE_LINE_BYTES, STORE);
674 b_chain = b0;
675 }
676
677 vlib_buffer_init_for_free_list (b0, fl);
678 b0->clone_count = 0;
679
680 bi0 = vlib_get_buffer_index (vm, b0);
681
682 to_next[0] = bi0;
683 to_next++;
684 n_left_to_next--;
685
686 dpdk_rx_next_and_error_from_mb_flags_x1 (xd, mb, b0,
687 &next0, &error0);
688#ifdef RTE_LIBRTE_MBUF_EXT_RX_OLFLAGS
689 /*
690 * Clear overloaded TX offload flags when a DPDK driver
691 * is using them for RX flags (e.g. Cisco VIC Ethernet driver)
692 */
693
694 if (PREDICT_TRUE(trace_cnt == 0))
695 mb->ol_flags &= PKT_EXT_RX_CLR_TX_FLAGS_MASK;
696 else
697 trace_cnt--;
698#endif /* RTE_LIBRTE_MBUF_EXT_RX_OLFLAGS */
699
700 b0->error = node->errors[error0];
701
702 l3_offset0 = ((next0 == DPDK_RX_NEXT_IP4_INPUT ||
703 next0 == DPDK_RX_NEXT_IP6_INPUT ||
704 next0 == DPDK_RX_NEXT_MPLS_INPUT) ?
705 sizeof (ethernet_header_t) : 0);
706
707 b0->current_data = l3_offset0;
708 b0->current_length = mb->data_len - l3_offset0;
709 b0->flags = VLIB_BUFFER_TOTAL_LENGTH_VALID;
710
711 if (VMWARE_LENGTH_BUG_WORKAROUND)
712 b0->current_length -= 4;
713
714 vnet_buffer(b0)->sw_if_index[VLIB_RX] = xd->vlib_sw_if_index;
715 vnet_buffer(b0)->sw_if_index[VLIB_TX] = (u32)~0;
716 n_rx_bytes += mb->pkt_len;
717
718 /* Process subsequent segments of multi-segment packets */
719 while ((mb->nb_segs > 1) && (nb_seg < mb->nb_segs))
720 {
721 ASSERT(mb_seg != 0);
722
723 b_seg = (vlib_buffer_t *)(mb_seg+1);
724 vlib_buffer_init_for_free_list (b_seg, fl);
725 b_seg->clone_count = 0;
726
727 ASSERT((b_seg->flags & VLIB_BUFFER_NEXT_PRESENT) == 0);
728 ASSERT(b_seg->current_data == 0);
729
730 /*
731 * The driver (e.g. virtio) may not put the packet data at the start
732 * of the segment, so don't assume b_seg->current_data == 0 is correct.
733 */
734 b_seg->current_data = (mb_seg->buf_addr + mb_seg->data_off) - (void *)b_seg->data;
735
736 b_seg->current_length = mb_seg->data_len;
737 b0->total_length_not_including_first_buffer +=
738 mb_seg->data_len;
739
740 b_chain->flags |= VLIB_BUFFER_NEXT_PRESENT;
741 b_chain->next_buffer = vlib_get_buffer_index (vm, b_seg);
742
743 b_chain = b_seg;
744 mb_seg = mb_seg->next;
745 nb_seg++;
746 }
747
748 /*
749 * Turn this on if you run into
750 * "bad monkey" contexts, and you want to know exactly
751 * which nodes they've visited... See main.c...
752 */
753 VLIB_BUFFER_TRACE_TRAJECTORY_INIT(b0);
754
755 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
756 to_next, n_left_to_next,
757 bi0, next0);
758 if (PREDICT_FALSE (n_trace > mb_index))
759 vec_add1 (xd->d_trace_buffers, bi0);
760 n_buffers--;
761 mb_index++;
762 }
763 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
764 }
765
766 if (PREDICT_FALSE (vec_len (xd->d_trace_buffers) > 0))
767 {
768 dpdk_rx_trace (dm, node, xd, queue_id, xd->d_trace_buffers,
769 vec_len (xd->d_trace_buffers));
770 vlib_set_trace_count (vm, node, n_trace - vec_len (xd->d_trace_buffers));
771 }
772
773 vlib_increment_combined_counter
774 (vnet_get_main()->interface_main.combined_sw_if_counters
775 + VNET_INTERFACE_COUNTER_RX,
776 cpu_index,
777 xd->vlib_sw_if_index,
778 mb_index, n_rx_bytes);
779
780 dpdk_worker_t * dw = vec_elt_at_index(dm->workers, cpu_index);
781 dw->aggregate_rx_packets += mb_index;
782
783 return mb_index;
784}
785
786#if VIRL > 0
787#define VIRL_SPEED_LIMIT() \
788 /* Limit the input rate to 1000 vectors / sec */ \
789 { \
790 struct timespec ts, tsrem; \
791 \
792 ts.tv_sec = 0; \
793 ts.tv_nsec = 1000*1000; /* 1ms */ \
794 \
795 while (nanosleep(&ts, &tsrem) < 0) \
796 { \
797 ts = tsrem; \
798 } \
799 }
800#else
801#define VIRL_SPEED_LIMIT()
802#endif
803
804
805static uword
806dpdk_input (vlib_main_t * vm,
807 vlib_node_runtime_t * node,
808 vlib_frame_t * f)
809{
810 dpdk_main_t * dm = &dpdk_main;
811 dpdk_device_t * xd;
812 uword n_rx_packets = 0;
813 dpdk_device_and_queue_t * dq;
814 u32 cpu_index = os_get_cpu_number();
815
816 /*
817 * Poll all devices on this cpu for input/interrupts.
818 */
819 vec_foreach (dq, dm->devices_by_cpu[cpu_index])
820 {
821 xd = vec_elt_at_index(dm->devices, dq->device);
822 ASSERT(dq->queue_id == 0);
823 n_rx_packets += dpdk_device_input (dm, xd, node, cpu_index, 0);
824 }
825
826 VIRL_SPEED_LIMIT()
827
828 return n_rx_packets;
829}
830
831uword
832dpdk_input_rss (vlib_main_t * vm,
833 vlib_node_runtime_t * node,
834 vlib_frame_t * f)
835{
836 dpdk_main_t * dm = &dpdk_main;
837 dpdk_device_t * xd;
838 uword n_rx_packets = 0;
839 dpdk_device_and_queue_t * dq;
840 u32 cpu_index = os_get_cpu_number();
841
842 /*
843 * Poll all devices on this cpu for input/interrupts.
844 */
845 vec_foreach (dq, dm->devices_by_cpu[cpu_index])
846 {
847 xd = vec_elt_at_index(dm->devices, dq->device);
848 n_rx_packets += dpdk_device_input (dm, xd, node, cpu_index, dq->queue_id);
849 }
850
851 VIRL_SPEED_LIMIT()
852
853 return n_rx_packets;
854}
855
856VLIB_REGISTER_NODE (dpdk_input_node) = {
857 .function = dpdk_input,
858 .type = VLIB_NODE_TYPE_INPUT,
859 .name = "dpdk-input",
860
861 /* Will be enabled if/when hardware is detected. */
862 .state = VLIB_NODE_STATE_DISABLED,
863
864 .format_buffer = format_ethernet_header_with_length,
865 .format_trace = format_dpdk_rx_dma_trace,
866
867 .n_errors = DPDK_N_ERROR,
868 .error_strings = dpdk_error_strings,
869
870 .n_next_nodes = DPDK_RX_N_NEXT,
871 .next_nodes = {
872 [DPDK_RX_NEXT_DROP] = "error-drop",
873 [DPDK_RX_NEXT_ETHERNET_INPUT] = "ethernet-input",
874 [DPDK_RX_NEXT_IP4_INPUT] = "ip4-input-no-checksum",
875 [DPDK_RX_NEXT_IP6_INPUT] = "ip6-input",
876 [DPDK_RX_NEXT_MPLS_INPUT] = "mpls-gre-input",
877 },
878};
879
880/*
881 * Override the next nodes for the dpdk input nodes.
882 * Must be invoked prior to VLIB_INIT_FUNCTION calls.
883 */
884void dpdk_set_next_node (dpdk_rx_next_t next, char *name)
885{
886 vlib_node_registration_t *r = &dpdk_input_node;
887 vlib_node_registration_t *r_io = &dpdk_io_input_node;
888 vlib_node_registration_t *r_handoff = &handoff_dispatch_node;
889
890 switch (next)
891 {
892 case DPDK_RX_NEXT_IP4_INPUT:
893 case DPDK_RX_NEXT_IP6_INPUT:
894 case DPDK_RX_NEXT_MPLS_INPUT:
895 case DPDK_RX_NEXT_ETHERNET_INPUT:
896 r->next_nodes[next] = name;
897 r_io->next_nodes[next] = name;
898 r_handoff->next_nodes[next] = name;
899 break;
900
901 default:
902 clib_warning ("%s: illegal next %d\n", __FUNCTION__, next);
903 break;
904 }
905}
906
907inline vlib_frame_queue_elt_t *
908vlib_get_handoff_queue_elt (u32 vlib_worker_index)
909{
910 vlib_frame_queue_t *fq;
911 vlib_frame_queue_elt_t *elt;
912 u64 new_tail;
913
914 fq = vlib_frame_queues[vlib_worker_index];
915 ASSERT (fq);
916
917 new_tail = __sync_add_and_fetch (&fq->tail, 1);
918
919 /* Wait until a ring slot is available */
920 while (new_tail >= fq->head_hint + fq->nelts)
921 vlib_worker_thread_barrier_check ();
922
923 elt = fq->elts + (new_tail & (fq->nelts-1));
924
925 /* this would be very bad... */
926 while (elt->valid)
927 ;
928
929 elt->msg_type = VLIB_FRAME_QUEUE_ELT_DISPATCH_FRAME;
930 elt->last_n_vectors = elt->n_vectors = 0;
931
932 return elt;
933}
934
935inline vlib_frame_queue_elt_t *
936dpdk_get_handoff_queue_elt (
937 u32 vlib_worker_index,
938 vlib_frame_queue_elt_t ** handoff_queue_elt_by_worker_index)
939{
940 vlib_frame_queue_elt_t *elt;
941
942 if (handoff_queue_elt_by_worker_index [vlib_worker_index])
943 return handoff_queue_elt_by_worker_index [vlib_worker_index];
944
945 elt = vlib_get_handoff_queue_elt (vlib_worker_index);
946
947 handoff_queue_elt_by_worker_index [vlib_worker_index] = elt;
948
949 return elt;
950}
951
952static inline vlib_frame_queue_t *
953is_vlib_handoff_queue_congested (
954 u32 vlib_worker_index,
955 u32 queue_hi_thresh,
956 vlib_frame_queue_t ** handoff_queue_by_worker_index)
957{
958 vlib_frame_queue_t *fq;
959
960 fq = handoff_queue_by_worker_index [vlib_worker_index];
961 if (fq != (vlib_frame_queue_t *)(~0))
962 return fq;
963
964 fq = vlib_frame_queues[vlib_worker_index];
965 ASSERT (fq);
966
967 if (PREDICT_FALSE(fq->tail >= (fq->head_hint + queue_hi_thresh))) {
968 /* a valid entry in the array will indicate the queue has reached
969 * the specified threshold and is congested
970 */
971 handoff_queue_by_worker_index [vlib_worker_index] = fq;
972 fq->enqueue_full_events++;
973 return fq;
974 }
975
976 return NULL;
977}
978
979static inline u64 ipv4_get_key (ip4_header_t *ip)
980{
981 u64 hash_key;
982
983 hash_key = *((u64*)(&ip->address_pair)) ^ ip->protocol;
984
985 return hash_key;
986}
987
988static inline u64 ipv6_get_key (ip6_header_t *ip)
989{
990 u64 hash_key;
991
992 hash_key = ip->src_address.as_u64[0] ^
993 ip->src_address.as_u64[1] ^
994 ip->dst_address.as_u64[0] ^
995 ip->dst_address.as_u64[1] ^
996 ip->protocol;
997
998 return hash_key;
999}
1000
1001
1002#define MPLS_BOTTOM_OF_STACK_BIT_MASK 0x00000100U
1003#define MPLS_LABEL_MASK 0xFFFFF000U
1004
1005static inline u64 mpls_get_key (mpls_unicast_header_t *m)
1006{
1007 u64 hash_key;
1008 u8 ip_ver;
1009
1010
1011 /* find the bottom of the MPLS label stack. */
1012 if (PREDICT_TRUE(m->label_exp_s_ttl &
1013 clib_net_to_host_u32(MPLS_BOTTOM_OF_STACK_BIT_MASK))) {
1014 goto bottom_lbl_found;
1015 }
1016 m++;
1017
1018 if (PREDICT_TRUE(m->label_exp_s_ttl &
1019 clib_net_to_host_u32(MPLS_BOTTOM_OF_STACK_BIT_MASK))) {
1020 goto bottom_lbl_found;
1021 }
1022 m++;
1023
1024 if (m->label_exp_s_ttl & clib_net_to_host_u32(MPLS_BOTTOM_OF_STACK_BIT_MASK)) {
1025 goto bottom_lbl_found;
1026 }
1027 m++;
1028
1029 if (m->label_exp_s_ttl & clib_net_to_host_u32(MPLS_BOTTOM_OF_STACK_BIT_MASK)) {
1030 goto bottom_lbl_found;
1031 }
1032 m++;
1033
1034 if (m->label_exp_s_ttl & clib_net_to_host_u32(MPLS_BOTTOM_OF_STACK_BIT_MASK)) {
1035 goto bottom_lbl_found;
1036 }
1037
1038 /* the bottom label was not found - use the last label */
1039 hash_key = m->label_exp_s_ttl & clib_net_to_host_u32(MPLS_LABEL_MASK);
1040
1041 return hash_key;
1042
1043
1044bottom_lbl_found:
1045 m++;
1046 ip_ver = (*((u8 *)m) >> 4);
1047
1048 /* find out if it is IPV4 or IPV6 header */
1049 if (PREDICT_TRUE(ip_ver == 4)) {
1050 hash_key = ipv4_get_key((ip4_header_t *)m);
1051 } else if (PREDICT_TRUE(ip_ver == 6)) {
1052 hash_key = ipv6_get_key((ip6_header_t *)m);
1053 } else {
1054 /* use the bottom label */
1055 hash_key = (m-1)->label_exp_s_ttl & clib_net_to_host_u32(MPLS_LABEL_MASK);
1056 }
1057
1058 return hash_key;
1059
1060}
1061
1062static inline u64 eth_get_key (ethernet_header_t *h0)
1063{
1064 u64 hash_key;
1065
1066
1067 if (PREDICT_TRUE(h0->type) == clib_host_to_net_u16(ETHERNET_TYPE_IP4)) {
1068 hash_key = ipv4_get_key((ip4_header_t *)(h0+1));
1069 } else if (h0->type == clib_host_to_net_u16(ETHERNET_TYPE_IP6)) {
1070 hash_key = ipv6_get_key((ip6_header_t *)(h0+1));
1071 } else if (h0->type == clib_host_to_net_u16(ETHERNET_TYPE_MPLS_UNICAST)) {
1072 hash_key = mpls_get_key((mpls_unicast_header_t *)(h0+1));
1073 } else if ((h0->type == clib_host_to_net_u16(ETHERNET_TYPE_VLAN)) ||
1074 (h0->type == clib_host_to_net_u16(ETHERNET_TYPE_DOT1AD))) {
1075 ethernet_vlan_header_t * outer = (ethernet_vlan_header_t *)(h0 + 1);
1076
1077 outer = (outer->type == clib_host_to_net_u16(ETHERNET_TYPE_VLAN)) ?
1078 outer+1 : outer;
1079 if (PREDICT_TRUE(outer->type) == clib_host_to_net_u16(ETHERNET_TYPE_IP4)) {
1080 hash_key = ipv4_get_key((ip4_header_t *)(outer+1));
1081 } else if (outer->type == clib_host_to_net_u16 (ETHERNET_TYPE_IP6)) {
1082 hash_key = ipv6_get_key((ip6_header_t *)(outer+1));
1083 } else if (outer->type == clib_host_to_net_u16(ETHERNET_TYPE_MPLS_UNICAST)) {
1084 hash_key = mpls_get_key((mpls_unicast_header_t *)(outer+1));
1085 } else {
1086 hash_key = outer->type;
1087 }
1088 } else {
1089 hash_key = 0;
1090 }
1091
1092 return hash_key;
1093}
1094
1095/*
1096 * This function is used when dedicated IO threads feed the worker threads.
1097 *
1098 * Devices are allocated to this thread based on instances and instance_id.
1099 * If instances==0 then the function automatically determines the number
1100 * of instances of this thread, and allocates devices between them.
1101 * If instances != 0, then instance_id must be in the range 0..instances-1.
1102 * The function allocates devices among the specified number of instances,
1103 * with this thread having the given instance id. This option is used for
1104 * splitting devices among differently named "io"-type threads.
1105 */
1106void dpdk_io_thread (vlib_worker_thread_t * w,
1107 u32 instances,
1108 u32 instance_id,
1109 char *worker_name,
1110 dpdk_io_thread_callback_t callback)
1111{
1112 vlib_main_t * vm = vlib_get_main();
1113 vlib_thread_main_t * tm = vlib_get_thread_main();
1114 vlib_thread_registration_t * tr;
1115 dpdk_main_t * dm = &dpdk_main;
1116 char *io_name = w->registration->name;
1117 dpdk_device_t * xd;
1118 dpdk_device_t ** my_devices = 0;
1119 vlib_frame_queue_elt_t ** handoff_queue_elt_by_worker_index = 0;
1120 vlib_frame_queue_t ** congested_handoff_queue_by_worker_index = 0;
1121 vlib_frame_queue_elt_t * hf = 0;
1122 int i;
1123 u32 n_left_to_next_worker = 0, * to_next_worker = 0;
1124 u32 next_worker_index = 0;
1125 u32 current_worker_index = ~0;
1126 u32 cpu_index = os_get_cpu_number();
1127 u32 num_workers = 0;
1128 u32 num_devices = 0;
1129 uword * p;
1130 u16 queue_id = 0;
1131 vlib_node_runtime_t * node_trace;
1132 u32 first_worker_index = 0;
1133
1134 /* Wait until the dpdk init sequence is complete */
1135 while (dm->io_thread_release == 0)
1136 vlib_worker_thread_barrier_check();
1137
1138 clib_time_init (&vm->clib_time);
1139
1140 p = hash_get_mem (tm->thread_registrations_by_name, worker_name);
1141 ASSERT (p);
1142 tr = (vlib_thread_registration_t *) p[0];
1143 if (tr)
1144 {
1145 num_workers = tr->count;
1146 first_worker_index = tr->first_index;
1147 }
1148
1149 /* Allocate devices to this thread */
1150 if (instances == 0)
1151 {
1152 /* auto-assign */
1153 instance_id = w->instance_id;
1154
1155 p = hash_get_mem (tm->thread_registrations_by_name, io_name);
1156 tr = (vlib_thread_registration_t *) p[0];
1157 /* Otherwise, how did we get here */
1158 ASSERT (tr && tr->count);
1159 instances = tr->count;
1160 }
1161 else
1162 {
1163 /* manually assign */
1164 ASSERT (instance_id < instances);
1165 }
1166
1167 vec_validate (handoff_queue_elt_by_worker_index,
1168 first_worker_index + num_workers - 1);
1169
1170 vec_validate_init_empty (congested_handoff_queue_by_worker_index,
1171 first_worker_index + num_workers - 1,
1172 (vlib_frame_queue_t *)(~0));
1173
1174 /* packet tracing is triggered on the dpdk-input node for ease-of-use */
1175 node_trace = vlib_node_get_runtime (vm, dpdk_input_node.index);
1176
1177 /* And handle them... */
1178 while (1)
1179 {
1180 u32 n_buffers;
1181 u32 mb_index;
1182 uword n_rx_bytes = 0;
1183 u32 n_trace, trace_cnt __attribute__((unused));
1184 vlib_buffer_free_list_t * fl;
1185 u32 hash;
1186 u64 hash_key;
1187 u8 efd_discard_burst;
1188
1189 vlib_worker_thread_barrier_check ();
1190
1191 /* Invoke callback if supplied */
1192 if (PREDICT_FALSE(callback != NULL))
1193 callback(vm);
1194
1195 if (PREDICT_FALSE(vec_len(dm->devices) != num_devices))
1196 {
1197 vec_reset_length(my_devices);
1198 vec_foreach (xd, dm->devices)
1199 {
1200 if (((xd - dm->devices) % tr->count) == instance_id)
1201 {
1202 fprintf(stderr, "i/o thread %d (cpu %d) takes port %d\n",
1203 instance_id, (int) os_get_cpu_number(), (int) (xd - dm->devices));
1204 vec_add1 (my_devices, xd);
1205 }
1206 }
1207 num_devices = vec_len(dm->devices);
1208 }
1209
1210 for (i = 0; i < vec_len (my_devices); i++)
1211 {
1212 xd = my_devices[i];
1213
1214 if (!xd->admin_up)
1215 continue;
1216
1217 n_buffers = dpdk_rx_burst(dm, xd, 0 /* queue_id */);
1218
1219 if (n_buffers == 0)
1220 {
1221 /* check if EFD (dpdk) is enabled */
1222 if (PREDICT_FALSE(dm->efd.enabled))
1223 {
1224 /* reset a few stats */
1225 xd->efd_agent.last_poll_time = 0;
1226 xd->efd_agent.last_burst_sz = 0;
1227 }
1228 continue;
1229 }
1230
1231 vec_reset_length (xd->d_trace_buffers);
1232 trace_cnt = n_trace = vlib_get_trace_count (vm, node_trace);
1233
1234 /*
1235 * DAW-FIXME: VMXNET3 device stop/start doesn't work,
1236 * therefore fake the stop in the dpdk driver by
1237 * silently dropping all of the incoming pkts instead of
1238 * stopping the driver / hardware.
1239 */
1240 if (PREDICT_FALSE(xd->admin_up != 1))
1241 {
1242 for (mb_index = 0; mb_index < n_buffers; mb_index++)
1243 rte_pktmbuf_free (xd->rx_vectors[queue_id][mb_index]);
1244 continue;
1245 }
1246
1247 /* reset EFD action for the burst */
1248 efd_discard_burst = 0;
1249
1250 /* Check for congestion if EFD (Early-Fast-Discard) is enabled
1251 * in any mode (e.g. dpdk, monitor, or drop_all)
1252 */
1253 if (PREDICT_FALSE(dm->efd.enabled))
1254 {
1255 /* update EFD counters */
1256 dpdk_efd_update_counters(xd, n_buffers, dm->efd.enabled);
1257
1258 if (PREDICT_FALSE(dm->efd.enabled & DPDK_EFD_DROPALL_ENABLED))
1259 {
1260 /* drop all received packets */
1261 for (mb_index = 0; mb_index < n_buffers; mb_index++)
1262 rte_pktmbuf_free(xd->rx_vectors[queue_id][mb_index]);
1263
1264 xd->efd_agent.discard_cnt += n_buffers;
1265 increment_efd_drop_counter(vm,
1266 DPDK_ERROR_VLAN_EFD_DROP_PKTS,
1267 n_buffers);
1268
1269 continue;
1270 }
1271
1272 if (PREDICT_FALSE(xd->efd_agent.consec_full_frames_cnt >=
1273 dm->efd.consec_full_frames_hi_thresh))
1274 {
1275 u32 device_queue_sz = rte_eth_rx_queue_count(xd->device_index,
1276 queue_id);
1277 if (device_queue_sz >= dm->efd.queue_hi_thresh)
1278 {
1279 /* dpdk device queue has reached the critical threshold */
1280 xd->efd_agent.congestion_cnt++;
1281
1282 /* apply EFD to packets from the burst */
1283 efd_discard_burst = 1;
1284 }
1285 }
1286 }
1287
1288 fl = vlib_buffer_get_free_list
1289 (vm, VLIB_BUFFER_DEFAULT_FREE_LIST_INDEX);
1290
1291 mb_index = 0;
1292
1293 while (n_buffers > 0)
1294 {
1295 u32 bi0;
1296 u8 next0, error0;
1297 u32 l3_offset0;
1298 vlib_buffer_t * b0, * b_seg, * b_chain = 0;
1299 ethernet_header_t * h0;
1300 u8 nb_seg = 1;
1301 struct rte_mbuf *mb = xd->rx_vectors[queue_id][mb_index];
1302 struct rte_mbuf *mb_seg = mb->next;
1303
1304 if (PREDICT_TRUE(n_buffers > 1))
1305 {
1306 struct rte_mbuf *pfmb = xd->rx_vectors[queue_id][mb_index+2];
1307 vlib_buffer_t *bp = (vlib_buffer_t *)(pfmb+1);
1308 CLIB_PREFETCH (pfmb, CLIB_CACHE_LINE_BYTES, LOAD);
1309 CLIB_PREFETCH (bp, CLIB_CACHE_LINE_BYTES, STORE);
1310 CLIB_PREFETCH (bp->data, CLIB_CACHE_LINE_BYTES, LOAD);
1311 }
1312
1313 b0 = (vlib_buffer_t *)(mb+1);
1314
1315 /* check whether EFD is looking for packets to discard */
1316 if (PREDICT_FALSE(efd_discard_burst))
1317 {
1318 u32 cntr_type;
1319 if (PREDICT_TRUE(cntr_type = is_efd_discardable(tm, b0, mb)))
1320 {
1321 rte_pktmbuf_free(mb);
1322 xd->efd_agent.discard_cnt++;
1323 increment_efd_drop_counter(vm,
1324 cntr_type,
1325 1);
1326
1327 n_buffers--;
1328 mb_index++;
1329 continue;
1330 }
1331 }
1332
1333 /* Prefetch one next segment if it exists */
1334 if (PREDICT_FALSE(mb->nb_segs > 1))
1335 {
1336 struct rte_mbuf *pfmb = mb->next;
1337 vlib_buffer_t *bp = (vlib_buffer_t *)(pfmb+1);
1338 CLIB_PREFETCH (pfmb, CLIB_CACHE_LINE_BYTES, LOAD);
1339 CLIB_PREFETCH (bp, CLIB_CACHE_LINE_BYTES, STORE);
1340 b_chain = b0;
1341 }
1342
1343 bi0 = vlib_get_buffer_index (vm, b0);
1344 vlib_buffer_init_for_free_list (b0, fl);
1345 b0->clone_count = 0;
1346
1347 dpdk_rx_next_and_error_from_mb_flags_x1 (xd, mb, b0,
1348 &next0, &error0);
1349#ifdef RTE_LIBRTE_MBUF_EXT_RX_OLFLAGS
1350 /*
1351 * Clear overloaded TX offload flags when a DPDK driver
1352 * is using them for RX flags (e.g. Cisco VIC Ethernet driver)
1353 */
1354 if (PREDICT_TRUE(trace_cnt == 0))
1355 mb->ol_flags &= PKT_EXT_RX_CLR_TX_FLAGS_MASK;
1356 else
1357 trace_cnt--;
1358#endif /* RTE_LIBRTE_MBUF_EXT_RX_OLFLAGS */
1359
1360 if (error0)
1361 clib_warning ("bi %d error %d", bi0, error0);
1362
1363 b0->error = 0;
1364
1365 l3_offset0 = ((next0 == DPDK_RX_NEXT_IP4_INPUT ||
1366 next0 == DPDK_RX_NEXT_IP6_INPUT ||
1367 next0 == DPDK_RX_NEXT_MPLS_INPUT) ?
1368 sizeof (ethernet_header_t) : 0);
1369
1370 b0->current_data = l3_offset0;
1371 b0->current_length = mb->data_len - l3_offset0;
1372
1373 b0->flags = VLIB_BUFFER_TOTAL_LENGTH_VALID;
1374
1375 if (VMWARE_LENGTH_BUG_WORKAROUND)
1376 b0->current_length -= 4;
1377
1378 vnet_buffer(b0)->sw_if_index[VLIB_RX] = xd->vlib_sw_if_index;
1379 vnet_buffer(b0)->sw_if_index[VLIB_TX] = (u32)~0;
1380 vnet_buffer(b0)->io_handoff.next_index = next0;
1381 n_rx_bytes += mb->pkt_len;
1382
1383 /* Process subsequent segments of multi-segment packets */
1384 while ((mb->nb_segs > 1) && (nb_seg < mb->nb_segs))
1385 {
1386 ASSERT(mb_seg != 0);
1387
1388 b_seg = (vlib_buffer_t *)(mb_seg+1);
1389 vlib_buffer_init_for_free_list (b_seg, fl);
1390 b_seg->clone_count = 0;
1391
1392 ASSERT((b_seg->flags & VLIB_BUFFER_NEXT_PRESENT) == 0);
1393 ASSERT(b_seg->current_data == 0);
1394
1395 /*
1396 * The driver (e.g. virtio) may not put the packet data at the start
1397 * of the segment, so don't assume b_seg->current_data == 0 is correct.
1398 */
1399 b_seg->current_data = (mb_seg->buf_addr + mb_seg->data_off) - (void *)b_seg->data;
1400
1401 b_seg->current_length = mb_seg->data_len;
1402 b0->total_length_not_including_first_buffer +=
1403 mb_seg->data_len;
1404
1405 b_chain->flags |= VLIB_BUFFER_NEXT_PRESENT;
1406 b_chain->next_buffer = vlib_get_buffer_index (vm, b_seg);
1407
1408 b_chain = b_seg;
1409 mb_seg = mb_seg->next;
1410 nb_seg++;
1411 }
1412
1413 /*
1414 * Turn this on if you run into
1415 * "bad monkey" contexts, and you want to know exactly
1416 * which nodes they've visited... See main.c...
1417 */
1418 VLIB_BUFFER_TRACE_TRAJECTORY_INIT(b0);
1419
1420 if (PREDICT_FALSE (n_trace > mb_index))
1421 vec_add1 (xd->d_trace_buffers, bi0);
1422
1423 next_worker_index = first_worker_index;
1424
1425 /*
1426 * Force unknown traffic onto worker 0,
1427 * and into ethernet-input. $$$$ add more hashes.
1428 */
1429 h0 = (ethernet_header_t *) b0->data;
1430
1431 /* Compute ingress LB hash */
1432 hash_key = eth_get_key(h0);
1433 hash = (u32)clib_xxhash(hash_key);
1434
1435 if (PREDICT_TRUE (is_pow2(num_workers)))
1436 next_worker_index += hash & (num_workers - 1);
1437 else
1438 next_worker_index += hash % num_workers;
1439
1440 /* if EFD is enabled and not already discarding from dpdk,
1441 * check the worker ring/queue for congestion
1442 */
1443 if (PREDICT_FALSE(tm->efd.enabled && !efd_discard_burst))
1444 {
1445 vlib_frame_queue_t *fq;
1446
1447 /* fq will be valid if the ring is congested */
1448 fq = is_vlib_handoff_queue_congested(
1449 next_worker_index, tm->efd.queue_hi_thresh,
1450 congested_handoff_queue_by_worker_index);
1451
1452 if (PREDICT_FALSE(fq != NULL))
1453 {
1454 u32 cntr_type;
1455 if (PREDICT_TRUE(cntr_type =
1456 is_efd_discardable(tm, b0, mb)))
1457 {
1458 /* discard the packet */
1459 fq->enqueue_efd_discards++;
1460 increment_efd_drop_counter(vm, cntr_type, 1);
1461 rte_pktmbuf_free(mb);
1462 n_buffers--;
1463 mb_index++;
1464 continue;
1465 }
1466 }
1467 }
1468
1469 if (next_worker_index != current_worker_index)
1470 {
1471 if (hf)
1472 hf->n_vectors = VLIB_FRAME_SIZE - n_left_to_next_worker;
1473
1474 hf = dpdk_get_handoff_queue_elt(
1475 next_worker_index,
1476 handoff_queue_elt_by_worker_index);
1477
1478 n_left_to_next_worker = VLIB_FRAME_SIZE - hf->n_vectors;
1479 to_next_worker = &hf->buffer_index[hf->n_vectors];
1480 current_worker_index = next_worker_index;
1481 }
1482
1483 /* enqueue to correct worker thread */
1484 to_next_worker[0] = bi0;
1485 to_next_worker++;
1486 n_left_to_next_worker--;
1487
1488 if (n_left_to_next_worker == 0)
1489 {
1490 hf->n_vectors = VLIB_FRAME_SIZE;
1491 vlib_put_handoff_queue_elt(hf);
1492 current_worker_index = ~0;
1493 handoff_queue_elt_by_worker_index[next_worker_index] = 0;
1494 hf = 0;
1495 }
1496
1497 n_buffers--;
1498 mb_index++;
1499 }
1500
1501 if (PREDICT_FALSE (vec_len (xd->d_trace_buffers) > 0))
1502 {
1503 /* credit the trace to the trace node */
1504 dpdk_rx_trace (dm, node_trace, xd, queue_id, xd->d_trace_buffers,
1505 vec_len (xd->d_trace_buffers));
1506 vlib_set_trace_count (vm, node_trace, n_trace - vec_len (xd->d_trace_buffers));
1507 }
1508
1509 vlib_increment_combined_counter
1510 (vnet_get_main()->interface_main.combined_sw_if_counters
1511 + VNET_INTERFACE_COUNTER_RX,
1512 cpu_index,
1513 xd->vlib_sw_if_index,
1514 mb_index, n_rx_bytes);
1515
1516 dpdk_worker_t * dw = vec_elt_at_index(dm->workers, cpu_index);
1517 dw->aggregate_rx_packets += mb_index;
1518 }
1519
1520 if (hf)
1521 hf->n_vectors = VLIB_FRAME_SIZE - n_left_to_next_worker;
1522
1523 /* Ship frames to the worker nodes */
1524 for (i = 0; i < vec_len (handoff_queue_elt_by_worker_index); i++)
1525 {
1526 if (handoff_queue_elt_by_worker_index[i])
1527 {
1528 hf = handoff_queue_elt_by_worker_index[i];
1529 /*
1530 * It works better to let the handoff node
1531 * rate-adapt, always ship the handoff queue element.
1532 */
1533 if (1 || hf->n_vectors == hf->last_n_vectors)
1534 {
1535 vlib_put_handoff_queue_elt(hf);
1536 handoff_queue_elt_by_worker_index[i] = 0;
1537 }
1538 else
1539 hf->last_n_vectors = hf->n_vectors;
1540 }
1541 congested_handoff_queue_by_worker_index[i] = (vlib_frame_queue_t *)(~0);
1542 }
1543 hf = 0;
1544 current_worker_index = ~0;
1545
1546 vlib_increment_main_loop_counter (vm);
1547 }
1548}
1549
1550/*
1551 * This function is used when the main thread performs IO and feeds the
1552 * worker threads.
1553 */
1554static uword
1555dpdk_io_input (vlib_main_t * vm,
1556 vlib_node_runtime_t * node,
1557 vlib_frame_t * f)
1558{
1559 dpdk_main_t * dm = &dpdk_main;
1560 dpdk_device_t * xd;
1561 vlib_thread_main_t * tm = vlib_get_thread_main();
1562 uword n_rx_packets = 0;
1563 static vlib_frame_queue_elt_t ** handoff_queue_elt_by_worker_index;
1564 static vlib_frame_queue_t ** congested_handoff_queue_by_worker_index = 0;
1565 vlib_frame_queue_elt_t * hf = 0;
1566 int i;
1567 u32 n_left_to_next_worker = 0, * to_next_worker = 0;
1568 u32 next_worker_index = 0;
1569 u32 current_worker_index = ~0;
1570 u32 cpu_index = os_get_cpu_number();
1571 static int num_workers_set;
1572 static u32 num_workers;
1573 u16 queue_id = 0;
1574 vlib_node_runtime_t * node_trace;
1575 static u32 first_worker_index;
1576
1577 if (PREDICT_FALSE(num_workers_set == 0))
1578 {
1579 uword * p;
1580 vlib_thread_registration_t * tr;
1581 /* Only the standard vnet worker threads are supported */
1582 p = hash_get_mem (tm->thread_registrations_by_name, "workers");
1583 tr = (vlib_thread_registration_t *) p[0];
1584 if (tr)
1585 {
1586 num_workers = tr->count;
1587 first_worker_index = tr->first_index;
1588 }
1589 num_workers_set = 1;
1590 }
1591
1592 if (PREDICT_FALSE(handoff_queue_elt_by_worker_index == 0))
1593 {
1594 vec_validate (handoff_queue_elt_by_worker_index, tm->n_vlib_mains - 1);
1595
1596 vec_validate_init_empty (congested_handoff_queue_by_worker_index,
1597 first_worker_index + num_workers - 1,
1598 (vlib_frame_queue_t *)(~0));
1599 }
1600
1601 /* packet tracing is triggered on the dpdk-input node for ease-of-use */
1602 node_trace = vlib_node_get_runtime (vm, dpdk_input_node.index);
1603
1604 vec_foreach (xd, dm->devices)
1605 {
1606 u32 n_buffers;
1607 u32 mb_index;
1608 uword n_rx_bytes = 0;
1609 u32 n_trace, trace_cnt __attribute__((unused));
1610 vlib_buffer_free_list_t * fl;
1611 u32 hash;
1612 u64 hash_key;
1613 u8 efd_discard_burst = 0;
1614
1615 if (!xd->admin_up)
1616 continue;
1617
1618 n_buffers = dpdk_rx_burst(dm, xd, queue_id );
1619
1620 if (n_buffers == 0)
1621 {
1622 /* check if EFD (dpdk) is enabled */
1623 if (PREDICT_FALSE(dm->efd.enabled))
1624 {
1625 /* reset a few stats */
1626 xd->efd_agent.last_poll_time = 0;
1627 xd->efd_agent.last_burst_sz = 0;
1628 }
1629 continue;
1630 }
1631
1632 vec_reset_length (xd->d_trace_buffers);
1633 trace_cnt = n_trace = vlib_get_trace_count (vm, node_trace);
1634
1635 /*
1636 * DAW-FIXME: VMXNET3 device stop/start doesn't work,
1637 * therefore fake the stop in the dpdk driver by
1638 * silently dropping all of the incoming pkts instead of
1639 * stopping the driver / hardware.
1640 */
1641 if (PREDICT_FALSE(xd->admin_up != 1))
1642 {
1643 for (mb_index = 0; mb_index < n_buffers; mb_index++)
1644 rte_pktmbuf_free (xd->rx_vectors[queue_id][mb_index]);
1645 continue;
1646 }
1647
1648 /* Check for congestion if EFD (Early-Fast-Discard) is enabled
1649 * in any mode (e.g. dpdk, monitor, or drop_all)
1650 */
1651 if (PREDICT_FALSE(dm->efd.enabled))
1652 {
1653 /* update EFD counters */
1654 dpdk_efd_update_counters(xd, n_buffers, dm->efd.enabled);
1655
1656 if (PREDICT_FALSE(dm->efd.enabled & DPDK_EFD_DROPALL_ENABLED))
1657 {
1658 /* discard all received packets */
1659 for (mb_index = 0; mb_index < n_buffers; mb_index++)
1660 rte_pktmbuf_free(xd->rx_vectors[queue_id][mb_index]);
1661
1662 xd->efd_agent.discard_cnt += n_buffers;
1663 increment_efd_drop_counter(vm,
1664 DPDK_ERROR_VLAN_EFD_DROP_PKTS,
1665 n_buffers);
1666
1667 continue;
1668 }
1669
1670 if (PREDICT_FALSE(xd->efd_agent.consec_full_frames_cnt >=
1671 dm->efd.consec_full_frames_hi_thresh))
1672 {
1673 u32 device_queue_sz = rte_eth_rx_queue_count(xd->device_index,
1674 queue_id);
1675 if (device_queue_sz >= dm->efd.queue_hi_thresh)
1676 {
1677 /* dpdk device queue has reached the critical threshold */
1678 xd->efd_agent.congestion_cnt++;
1679
1680 /* apply EFD to packets from the burst */
1681 efd_discard_burst = 1;
1682 }
1683 }
1684 }
1685
1686 fl = vlib_buffer_get_free_list
1687 (vm, VLIB_BUFFER_DEFAULT_FREE_LIST_INDEX);
1688
1689 mb_index = 0;
1690
1691 while (n_buffers > 0)
1692 {
1693 u32 bi0;
1694 u8 next0, error0;
1695 u32 l3_offset0;
1696 vlib_buffer_t * b0, * b_seg, * b_chain = 0;
1697 ethernet_header_t * h0;
1698 u8 nb_seg = 1;
1699 struct rte_mbuf *mb = xd->rx_vectors[queue_id][mb_index];
1700 struct rte_mbuf *mb_seg = mb->next;
1701
1702 if (PREDICT_TRUE(n_buffers > 1))
1703 {
1704 struct rte_mbuf *pfmb = xd->rx_vectors[queue_id][mb_index+2];
1705 vlib_buffer_t *bp = (vlib_buffer_t *)(pfmb+1);
1706 CLIB_PREFETCH (pfmb, CLIB_CACHE_LINE_BYTES, LOAD);
1707 CLIB_PREFETCH (bp, CLIB_CACHE_LINE_BYTES, STORE);
1708 CLIB_PREFETCH (bp->data, CLIB_CACHE_LINE_BYTES, LOAD);
1709 }
1710
1711 b0 = (vlib_buffer_t *)(mb+1);
1712
1713 /* check whether EFD is looking for packets to discard */
1714 if (PREDICT_FALSE(efd_discard_burst))
1715 {
1716 u32 cntr_type;
1717 if (PREDICT_TRUE(cntr_type = is_efd_discardable(tm, b0, mb)))
1718 {
1719 rte_pktmbuf_free(mb);
1720 xd->efd_agent.discard_cnt++;
1721 increment_efd_drop_counter(vm,
1722 cntr_type,
1723 1);
1724
1725 n_buffers--;
1726 mb_index++;
1727 continue;
1728 }
1729 }
1730
1731 /* Prefetch one next segment if it exists */
1732 if (PREDICT_FALSE(mb->nb_segs > 1))
1733 {
1734 struct rte_mbuf *pfmb = mb->next;
1735 vlib_buffer_t *bp = (vlib_buffer_t *)(pfmb+1);
1736 CLIB_PREFETCH (pfmb, CLIB_CACHE_LINE_BYTES, LOAD);
1737 CLIB_PREFETCH (bp, CLIB_CACHE_LINE_BYTES, STORE);
1738 b_chain = b0;
1739 }
1740
1741 bi0 = vlib_get_buffer_index (vm, b0);
1742 vlib_buffer_init_for_free_list (b0, fl);
1743 b0->clone_count = 0;
1744
1745 dpdk_rx_next_and_error_from_mb_flags_x1 (xd, mb, b0,
1746 &next0, &error0);
1747#ifdef RTE_LIBRTE_MBUF_EXT_RX_OLFLAGS
1748 /*
1749 * Clear overloaded TX offload flags when a DPDK driver
1750 * is using them for RX flags (e.g. Cisco VIC Ethernet driver)
1751 */
1752 if (PREDICT_TRUE(trace_cnt == 0))
1753 mb->ol_flags &= PKT_EXT_RX_CLR_TX_FLAGS_MASK;
1754 else
1755 trace_cnt--;
1756#endif /* RTE_LIBRTE_MBUF_EXT_RX_OLFLAGS */
1757
1758 if (error0)
1759 clib_warning ("bi %d error %d", bi0, error0);
1760
1761 b0->error = 0;
1762
1763 l3_offset0 = ((next0 == DPDK_RX_NEXT_IP4_INPUT ||
1764 next0 == DPDK_RX_NEXT_IP6_INPUT ||
1765 next0 == DPDK_RX_NEXT_MPLS_INPUT) ?
1766 sizeof (ethernet_header_t) : 0);
1767
1768 b0->current_data = l3_offset0;
1769 b0->current_length = mb->data_len - l3_offset0;
1770
1771 b0->flags = VLIB_BUFFER_TOTAL_LENGTH_VALID;
1772
1773 if (VMWARE_LENGTH_BUG_WORKAROUND)
1774 b0->current_length -= 4;
1775
1776 vnet_buffer(b0)->sw_if_index[VLIB_RX] = xd->vlib_sw_if_index;
1777 vnet_buffer(b0)->sw_if_index[VLIB_TX] = (u32)~0;
1778 vnet_buffer(b0)->io_handoff.next_index = next0;
1779 n_rx_bytes += mb->pkt_len;
1780
1781 /* Process subsequent segments of multi-segment packets */
1782 while ((mb->nb_segs > 1) && (nb_seg < mb->nb_segs))
1783 {
1784 ASSERT(mb_seg != 0);
1785
1786 b_seg = (vlib_buffer_t *)(mb_seg+1);
1787 vlib_buffer_init_for_free_list (b_seg, fl);
1788 b_seg->clone_count = 0;
1789
1790 ASSERT((b_seg->flags & VLIB_BUFFER_NEXT_PRESENT) == 0);
1791 ASSERT(b_seg->current_data == 0);
1792
1793 /*
1794 * The driver (e.g. virtio) may not put the packet data at the start
1795 * of the segment, so don't assume b_seg->current_data == 0 is correct.
1796 */
1797 b_seg->current_data = (mb_seg->buf_addr + mb_seg->data_off) - (void *)b_seg->data;
1798
1799 b_seg->current_length = mb_seg->data_len;
1800 b0->total_length_not_including_first_buffer +=
1801 mb_seg->data_len;
1802
1803 b_chain->flags |= VLIB_BUFFER_NEXT_PRESENT;
1804 b_chain->next_buffer = vlib_get_buffer_index (vm, b_seg);
1805
1806 b_chain = b_seg;
1807 mb_seg = mb_seg->next;
1808 nb_seg++;
1809 }
1810
1811 /*
1812 * Turn this on if you run into
1813 * "bad monkey" contexts, and you want to know exactly
1814 * which nodes they've visited... See main.c...
1815 */
1816 VLIB_BUFFER_TRACE_TRAJECTORY_INIT(b0);
1817
1818 if (PREDICT_FALSE (n_trace > mb_index))
1819 vec_add1 (xd->d_trace_buffers, bi0);
1820
1821 next_worker_index = first_worker_index;
1822
1823 /*
1824 * Force unknown traffic onto worker 0,
1825 * and into ethernet-input. $$$$ add more hashes.
1826 */
1827 h0 = (ethernet_header_t *) b0->data;
1828
1829 /* Compute ingress LB hash */
1830 hash_key = eth_get_key(h0);
1831 hash = (u32)clib_xxhash(hash_key);
1832
1833 if (PREDICT_TRUE (is_pow2(num_workers)))
1834 next_worker_index += hash & (num_workers - 1);
1835 else
1836 next_worker_index += hash % num_workers;
1837
1838 /* if EFD is enabled and not already discarding from dpdk,
1839 * check the worker ring/queue for congestion
1840 */
1841 if (PREDICT_FALSE(tm->efd.enabled && !efd_discard_burst))
1842 {
1843 vlib_frame_queue_t *fq;
1844
1845 /* fq will be valid if the ring is congested */
1846 fq = is_vlib_handoff_queue_congested(
1847 next_worker_index, tm->efd.queue_hi_thresh,
1848 congested_handoff_queue_by_worker_index);
1849
1850 if (PREDICT_FALSE(fq != NULL))
1851 {
1852 u32 cntr_type;
1853 if (PREDICT_TRUE(cntr_type =
1854 is_efd_discardable(tm, b0, mb)))
1855 {
1856 /* discard the packet */
1857 fq->enqueue_efd_discards++;
1858 increment_efd_drop_counter(vm, cntr_type, 1);
1859 rte_pktmbuf_free(mb);
1860 n_buffers--;
1861 mb_index++;
1862 continue;
1863 }
1864 }
1865 }
1866
1867 if (next_worker_index != current_worker_index)
1868 {
1869 if (hf)
1870 hf->n_vectors = VLIB_FRAME_SIZE - n_left_to_next_worker;
1871
1872 hf = dpdk_get_handoff_queue_elt(
1873 next_worker_index,
1874 handoff_queue_elt_by_worker_index);
1875
1876 n_left_to_next_worker = VLIB_FRAME_SIZE - hf->n_vectors;
1877 to_next_worker = &hf->buffer_index[hf->n_vectors];
1878 current_worker_index = next_worker_index;
1879 }
1880
1881 /* enqueue to correct worker thread */
1882 to_next_worker[0] = bi0;
1883 to_next_worker++;
1884 n_left_to_next_worker--;
1885
1886 if (n_left_to_next_worker == 0)
1887 {
1888 hf->n_vectors = VLIB_FRAME_SIZE;
1889 vlib_put_handoff_queue_elt(hf);
1890 current_worker_index = ~0;
1891 handoff_queue_elt_by_worker_index[next_worker_index] = 0;
1892 hf = 0;
1893 }
1894
1895 n_buffers--;
1896 mb_index++;
1897 }
1898
1899 if (PREDICT_FALSE (vec_len (xd->d_trace_buffers) > 0))
1900 {
1901 /* credit the trace to the trace node */
1902 dpdk_rx_trace (dm, node_trace, xd, queue_id, xd->d_trace_buffers,
1903 vec_len (xd->d_trace_buffers));
1904 vlib_set_trace_count (vm, node_trace, n_trace - vec_len (xd->d_trace_buffers));
1905 }
1906
1907 vlib_increment_combined_counter
1908 (vnet_get_main()->interface_main.combined_sw_if_counters
1909 + VNET_INTERFACE_COUNTER_RX,
1910 cpu_index,
1911 xd->vlib_sw_if_index,
1912 mb_index, n_rx_bytes);
1913
1914 dpdk_worker_t * dw = vec_elt_at_index(dm->workers, cpu_index);
1915 dw->aggregate_rx_packets += mb_index;
1916 n_rx_packets += mb_index;
1917 }
1918
1919 if (hf)
1920 hf->n_vectors = VLIB_FRAME_SIZE - n_left_to_next_worker;
1921
1922 /* Ship frames to the worker nodes */
1923 for (i = 0; i < vec_len (handoff_queue_elt_by_worker_index); i++)
1924 {
1925 if (handoff_queue_elt_by_worker_index[i])
1926 {
1927 hf = handoff_queue_elt_by_worker_index[i];
1928 /*
1929 * It works better to let the handoff node
1930 * rate-adapt, always ship the handoff queue element.
1931 */
1932 if (1 || hf->n_vectors == hf->last_n_vectors)
1933 {
1934 vlib_put_handoff_queue_elt(hf);
1935 handoff_queue_elt_by_worker_index[i] = 0;
1936 }
1937 else
1938 hf->last_n_vectors = hf->n_vectors;
1939 }
1940 congested_handoff_queue_by_worker_index[i] = (vlib_frame_queue_t *)(~0);
1941 }
1942 hf = 0;
1943 current_worker_index = ~0;
1944 return n_rx_packets;
1945}
1946
1947VLIB_REGISTER_NODE (dpdk_io_input_node) = {
1948 .function = dpdk_io_input,
1949 .type = VLIB_NODE_TYPE_INPUT,
1950 .name = "dpdk-io-input",
1951
1952 /* Will be enabled if/when hardware is detected. */
1953 .state = VLIB_NODE_STATE_DISABLED,
1954
1955 .format_buffer = format_ethernet_header_with_length,
1956 .format_trace = format_dpdk_rx_dma_trace,
1957
1958 .n_errors = DPDK_N_ERROR,
1959 .error_strings = dpdk_error_strings,
1960
1961 .n_next_nodes = DPDK_RX_N_NEXT,
1962 .next_nodes = {
1963 [DPDK_RX_NEXT_DROP] = "error-drop",
1964 [DPDK_RX_NEXT_ETHERNET_INPUT] = "ethernet-input",
1965 [DPDK_RX_NEXT_IP4_INPUT] = "ip4-input-no-checksum",
1966 [DPDK_RX_NEXT_IP6_INPUT] = "ip6-input",
1967 [DPDK_RX_NEXT_MPLS_INPUT] = "mpls-gre-input",
1968 },
1969};
1970
1971/*
1972 * set_efd_bitmap()
1973 * Based on the operation type, set lower/upper bits for the given index value
1974 */
1975void
1976set_efd_bitmap (u8 *bitmap, u32 value, u32 op)
1977{
1978 int ix;
1979
1980 *bitmap = 0;
1981 for (ix = 0; ix < 8; ix++) {
1982 if (((op == EFD_OPERATION_LESS_THAN) && (ix < value)) ||
1983 ((op == EFD_OPERATION_GREATER_OR_EQUAL) && (ix >= value))){
1984 (*bitmap) |= (1 << ix);
1985 }
1986 }
1987}
1988
1989void
1990efd_config (u32 enabled,
1991 u32 ip_prec, u32 ip_op,
1992 u32 mpls_exp, u32 mpls_op,
1993 u32 vlan_cos, u32 vlan_op)
1994{
1995 vlib_thread_main_t * tm = vlib_get_thread_main();
1996 dpdk_main_t * dm = &dpdk_main;
1997
1998 if (enabled) {
1999 tm->efd.enabled |= VLIB_EFD_DISCARD_ENABLED;
2000 dm->efd.enabled |= DPDK_EFD_DISCARD_ENABLED;
2001 } else {
2002 tm->efd.enabled &= ~VLIB_EFD_DISCARD_ENABLED;
2003 dm->efd.enabled &= ~DPDK_EFD_DISCARD_ENABLED;
2004 }
2005
2006 set_efd_bitmap(&tm->efd.ip_prec_bitmap, ip_prec, ip_op);
2007 set_efd_bitmap(&tm->efd.mpls_exp_bitmap, mpls_exp, mpls_op);
2008 set_efd_bitmap(&tm->efd.vlan_cos_bitmap, vlan_cos, vlan_op);
2009
2010}