blob: 3e287a9a166bb11897e9c1ecde3951ea87fba2ec [file] [log] [blame]
Ed Warnickecb9cada2015-12-08 15:45:58 -07001/*
2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15#ifndef __included_dpdk_h__
16#define __included_dpdk_h__
17
18/* $$$$ We should rename always_inline -> clib_always_inline */
19#undef always_inline
20
21#include <rte_config.h>
22
23#include <rte_common.h>
24#include <rte_dev.h>
25#include <rte_log.h>
26#include <rte_memory.h>
27#include <rte_memcpy.h>
28#include <rte_memzone.h>
29#include <rte_tailq.h>
30#include <rte_eal.h>
31#include <rte_per_lcore.h>
32#include <rte_launch.h>
33#include <rte_atomic.h>
34#include <rte_cycles.h>
35#include <rte_prefetch.h>
36#include <rte_lcore.h>
37#include <rte_per_lcore.h>
38#include <rte_branch_prediction.h>
39#include <rte_interrupts.h>
40#include <rte_pci.h>
41#include <rte_random.h>
42#include <rte_debug.h>
43#include <rte_ether.h>
44#include <rte_ethdev.h>
45#include <rte_ring.h>
46#include <rte_mempool.h>
47#include <rte_mbuf.h>
48#include <rte_kni.h>
49#include <rte_virtio_net.h>
50#include <rte_pci_dev_ids.h>
51#include <rte_version.h>
52
53#include <vnet/unix/pcap.h>
54#include <vnet/devices/virtio/vhost-user.h>
55
56#if CLIB_DEBUG > 0
57#define always_inline static inline
58#else
59#define always_inline static inline __attribute__ ((__always_inline__))
60#endif
61
62#define MBUF_SIZE (2048 + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM)
63#define NB_MBUF (32<<10)
64
65vnet_device_class_t dpdk_device_class;
66vlib_node_registration_t dpdk_input_node;
67vlib_node_registration_t dpdk_io_input_node;
68vlib_node_registration_t handoff_dispatch_node;
69
70typedef enum {
71 VNET_DPDK_DEV_ETH = 1, /* Standard DPDK PMD driver */
72 VNET_DPDK_DEV_KNI, /* Kernel NIC Interface */
73 VNET_DPDK_DEV_VHOST_USER,
74 VNET_DPDK_DEV_UNKNOWN, /* must be last */
75} dpdk_device_type_t;
76
77#define foreach_dpdk_pmd \
Dave Barach61efa142016-01-22 08:23:09 -050078 _ ("rte_nicvf_pmd", THUNDERX) \
Ed Warnickecb9cada2015-12-08 15:45:58 -070079 _ ("rte_em_pmd", E1000EM) \
80 _ ("rte_igb_pmd", IGB) \
81 _ ("rte_igbvf_pmd", IGBVF) \
82 _ ("rte_ixgbe_pmd", IXGBE) \
83 _ ("rte_ixgbevf_pmd", IXGBEVF) \
84 _ ("rte_i40e_pmd", I40E) \
85 _ ("rte_i40evf_pmd", I40EVF) \
86 _ ("rte_virtio_pmd", VIRTIO) \
87 _ ("rte_vice_pmd", VICE) \
88 _ ("rte_enic_pmd", ENIC) \
89 _ ("rte_vmxnet3_pmd", VMXNET3) \
90 _ ("AF_PACKET PMD", AF_PACKET) \
Damjan Marion2068e982016-01-27 16:59:04 +010091 _ ("rte_pmd_fm10k", FM10K) \
92 _ ("rte_cxgbe_pmd", CXGBE)
Ed Warnickecb9cada2015-12-08 15:45:58 -070093
94typedef enum {
95 VNET_DPDK_PMD_NONE,
96#define _(s,f) VNET_DPDK_PMD_##f,
97 foreach_dpdk_pmd
98#undef _
99#ifdef NETMAP
100 VNET_DPDK_PMD_NETMAP,
101#endif
102 VNET_DPDK_PMD_UNKNOWN, /* must be last */
103} dpdk_pmd_t;
104
105typedef enum {
106 VNET_DPDK_PORT_TYPE_ETH_1G,
107 VNET_DPDK_PORT_TYPE_ETH_10G,
108 VNET_DPDK_PORT_TYPE_ETH_40G,
109 VNET_DPDK_PORT_TYPE_ETH_SWITCH,
110#ifdef NETMAP
111 VNET_DPDK_PORT_TYPE_NETMAP,
112#endif
113 VNET_DPDK_PORT_TYPE_AF_PACKET,
114 VNET_DPDK_PORT_TYPE_UNKNOWN,
115} dpdk_port_type_t;
116
117typedef struct {
118 f64 deadline;
119 vlib_frame_t * frame;
120} dpdk_frame_t;
121
122#define DPDK_EFD_MAX_DISCARD_RATE 10
123
124typedef struct {
125 u16 last_burst_sz;
126 u16 max_burst_sz;
127 u32 full_frames_cnt;
128 u32 consec_full_frames_cnt;
129 u32 congestion_cnt;
130 u64 last_poll_time;
131 u64 max_poll_delay;
132 u32 discard_cnt;
133 u32 total_packet_cnt;
134} dpdk_efd_agent_t;
135
136typedef struct {
137 int callfd;
138 int kickfd;
139 int errfd;
Shesha Sreenivasamurthyb8f45b32016-02-03 09:38:36 -0800140#if RTE_VERSION >= RTE_VERSION_NUM(2, 2, 0, 0)
141 int enabled;
142#endif
Ed Warnickecb9cada2015-12-08 15:45:58 -0700143 u32 callfd_idx;
144 u32 n_since_last_int;
145 f64 int_deadline;
146} dpdk_vu_vring;
147
148typedef struct {
149 u32 is_up;
150 u32 unix_fd;
151 u32 unix_file_index;
152 u32 client_fd;
153 char sock_filename[256];
154 int sock_errno;
155 u8 sock_is_server;
156 u8 active;
157
158 u64 feature_mask;
159 u32 num_vrings;
Shesha Sreenivasamurthyb8f45b32016-02-03 09:38:36 -0800160#if RTE_VERSION >= RTE_VERSION_NUM(2, 2, 0, 0)
161 dpdk_vu_vring vrings[VHOST_MAX_QUEUE_PAIRS * 2];
162#else
Ed Warnickecb9cada2015-12-08 15:45:58 -0700163 dpdk_vu_vring vrings[2];
Shesha Sreenivasamurthyb8f45b32016-02-03 09:38:36 -0800164#endif
Ed Warnickecb9cada2015-12-08 15:45:58 -0700165 u64 region_addr[VHOST_MEMORY_MAX_NREGIONS];
166 u32 region_fd[VHOST_MEMORY_MAX_NREGIONS];
167} dpdk_vu_intf_t;
168
169typedef void (*dpdk_flowcontrol_callback_t) (vlib_main_t *vm,
170 u32 hw_if_index,
171 u32 n_packets);
172
173/*
174 * The header for the tx_vector in dpdk_device_t.
175 * Head and tail are indexes into the tx_vector and are of type
176 * u64 so they never overflow.
177 */
178typedef struct {
179 u64 tx_head;
180 u64 tx_tail;
181} tx_ring_hdr_t;
182
183typedef struct {
184 CLIB_CACHE_LINE_ALIGN_MARK(cacheline0);
Damjan Marion85cdbd02016-02-12 18:00:23 +0100185 volatile u32 **lockp;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700186
187 /* Instance ID */
188 u32 device_index;
189
190 u32 vlib_hw_if_index;
191 u32 vlib_sw_if_index;
192
193 /* next node index if we decide to steal the rx graph arc */
194 u32 per_interface_next_index;
195
196 /* dpdk rte_mbuf rx and tx vectors, VLIB_FRAME_SIZE */
197 struct rte_mbuf *** tx_vectors; /* one per worker thread */
198 struct rte_mbuf *** rx_vectors;
199
200 /* vector of traced contexts, per device */
201 u32 * d_trace_buffers;
202
203 /* per-worker destination frame queue */
204 dpdk_frame_t * frames;
205
206 dpdk_device_type_t dev_type:8;
207 dpdk_pmd_t pmd:8;
208 i8 cpu_socket;
209
210 u8 admin_up;
211 u8 promisc;
212
213 CLIB_CACHE_LINE_ALIGN_MARK(cacheline1);
214
215 /* PMD related */
216 u16 tx_q_used;
217 u16 rx_q_used;
218 u16 nb_rx_desc;
219 u16 nb_tx_desc;
220 u16 * cpu_socket_id_by_queue;
221 struct rte_eth_conf port_conf;
222 struct rte_eth_txconf tx_conf;
223
224 /* KNI related */
225 struct rte_kni *kni;
226 u8 kni_port_id;
227
228 /* vhost-user related */
229 u32 vu_if_id;
230 struct virtio_net vu_vhost_dev;
231 u32 vu_is_running;
232 dpdk_vu_intf_t *vu_intf;
233
234 /* af_packet */
235 u8 af_packet_port_id;
236
237 struct rte_eth_link link;
238 f64 time_last_link_update;
239
240 struct rte_eth_stats stats;
241 struct rte_eth_stats last_stats;
242 struct rte_eth_xstats * xstats;
243 f64 time_last_stats_update;
244 dpdk_port_type_t port_type;
245
246 dpdk_efd_agent_t efd_agent;
247} dpdk_device_t;
248
249#define MAX_NELTS 32
250typedef struct {
251 CLIB_CACHE_LINE_ALIGN_MARK(cacheline0);
252 u64 head;
253 u64 head_hint;
254 u64 tail;
255 u32 n_in_use;
256 u32 nelts;
257 u32 written;
258 u32 threshold;
259 i32 n_vectors[MAX_NELTS];
260} frame_queue_trace_t;
261
262#define DPDK_TX_RING_SIZE (4 * 1024)
263
264#define DPDK_STATS_POLL_INTERVAL 10.0
265#define DPDK_LINK_POLL_INTERVAL 3.0
266
267typedef struct {
268 CLIB_CACHE_LINE_ALIGN_MARK(cacheline0);
269
270 /* total input packet counter */
271 u64 aggregate_rx_packets;
272} dpdk_worker_t;
273
274typedef struct {
275 u32 device;
276 u16 queue_id;
277} dpdk_device_and_queue_t;
278
279/* Early-Fast-Discard (EFD) */
280#define DPDK_EFD_DISABLED 0
281#define DPDK_EFD_DISCARD_ENABLED (1 << 0)
282#define DPDK_EFD_MONITOR_ENABLED (1 << 1)
283#define DPDK_EFD_DROPALL_ENABLED (1 << 2)
284
285#define DPDK_EFD_DEFAULT_DEVICE_QUEUE_HI_THRESH_PCT 90
286#define DPDK_EFD_DEFAULT_CONSEC_FULL_FRAMES_HI_THRESH 6
287
288typedef struct dpdk_efd_t {
289 u16 enabled;
290 u16 queue_hi_thresh;
291 u16 consec_full_frames_hi_thresh;
292 u16 pad;
293} dpdk_efd_t;
294
295typedef struct {
296
297 /* Devices */
298 dpdk_device_t * devices;
299 dpdk_device_and_queue_t ** devices_by_cpu;
300
301 /* per-thread recycle lists */
302 u32 ** recycle;
303
Dave Barachd81566f2016-02-15 11:34:13 -0500304 /* buffer flags template, configurable to enable/disable tcp / udp cksum */
305 u32 buffer_flags_template;
306
Ed Warnickecb9cada2015-12-08 15:45:58 -0700307 /* flow control callback. If 0 then flow control is disabled */
308 dpdk_flowcontrol_callback_t flowcontrol_callback;
309
310 /* vlib buffer free list, must be same size as an rte_mbuf */
311 u32 vlib_buffer_free_list_index;
312
313 /*
314 * format interface names ala xxxEthernet%d/%d/%d instead of
315 * xxxEthernet%x/%x/%x. For VIRL.
316 */
317 u8 interface_name_format_decimal;
318
319
320 /* dpdk worker "threads" */
321 dpdk_worker_t * workers;
322
323 /* Config stuff */
324 u8 ** eal_init_args;
Damjan Mariond8ff0e12016-02-16 12:29:57 +0100325 u8 * eal_init_args_str;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700326 u8 * eth_if_blacklist;
327 u8 * eth_if_whitelist;
328 u8 * uio_driver_name;
329 u8 no_multi_seg;
330
331 /* Required config parameters */
332 u8 coremask_set_manually;
333 u8 nchannels_set_manually;
334 u32 coremask;
335 u32 nchannels;
336 u32 num_mbufs;
337 u32 use_rss;
Damjan Marion85cdbd02016-02-12 18:00:23 +0100338 u32 max_tx_queues;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700339 u8 num_kni; /* while kni_init allows u32, port_id in callback fn is only u8 */
340
341 /* Ethernet input node index */
342 u32 ethernet_input_node_index;
343
344 /* dpdk i/o thread initialization barrier */
345 volatile u32 io_thread_release;
346
347 /* pcap tracing [only works if (CLIB_DEBUG > 0)] */
348 int tx_pcap_enable;
349 pcap_main_t pcap_main;
350 u8 * pcap_filename;
351 u32 pcap_sw_if_index;
352 u32 pcap_pkts_to_capture;
353
354 /* virtio vhost-user switch */
355 u8 use_virtio_vhost;
356
357 /* vhost-user coalescence frames config */
358 u32 vhost_coalesce_frames;
359 f64 vhost_coalesce_time;
360
361 /* hashes */
362 uword * dpdk_device_by_kni_port_id;
363 uword * vu_sw_if_index_by_listener_fd;
364 uword * vu_sw_if_index_by_sock_fd;
365 u32 * vu_inactive_interfaces_device_index;
366
367 u32 next_vu_if_id;
368
369 /* efd (early-fast-discard) settings */
370 dpdk_efd_t efd;
371
372 /*
373 * flag indicating that a posted admin up/down
374 * (via post_sw_interface_set_flags) is in progress
375 */
376 u8 admin_up_down_in_progress;
377
378 u8 have_io_threads;
379
380 /* which cpus are running dpdk-input */
381 int input_cpu_first_index;
382 int input_cpu_count;
383
384 /* convenience */
385 vlib_main_t * vlib_main;
386 vnet_main_t * vnet_main;
387} dpdk_main_t;
388
389dpdk_main_t dpdk_main;
390
391typedef enum {
392 DPDK_RX_NEXT_IP4_INPUT,
393 DPDK_RX_NEXT_IP6_INPUT,
394 DPDK_RX_NEXT_MPLS_INPUT,
395 DPDK_RX_NEXT_ETHERNET_INPUT,
396 DPDK_RX_NEXT_DROP,
397 DPDK_RX_N_NEXT,
398} dpdk_rx_next_t;
399
400void vnet_buffer_needs_dpdk_mb (vlib_buffer_t * b);
401
402void dpdk_set_next_node (dpdk_rx_next_t, char *);
403
404typedef void (*dpdk_io_thread_callback_t) (vlib_main_t *vm);
405
406void dpdk_io_thread (vlib_worker_thread_t * w,
407 u32 instances,
408 u32 instance_id,
409 char *worker_name,
410 dpdk_io_thread_callback_t callback);
411void dpdk_thread_input (dpdk_main_t * dm, dpdk_device_t * xd);
412
413clib_error_t * dpdk_port_setup (dpdk_main_t * dm, dpdk_device_t * xd);
414
415void dpdk_set_flowcontrol_callback (vlib_main_t *vm,
416 dpdk_flowcontrol_callback_t callback);
417
418u32 dpdk_interface_tx_vector (vlib_main_t * vm, u32 dev_instance);
419
420vlib_frame_queue_elt_t * vlib_get_handoff_queue_elt (u32 vlib_worker_index);
421
422u32 dpdk_get_handoff_node_index (void);
423
424void set_efd_bitmap (u8 *bitmap, u32 value, u32 op);
425
426#define foreach_dpdk_error \
427 _(NONE, "no error") \
428 _(RX_PACKET_ERROR, "Rx packet errors") \
429 _(RX_BAD_FCS, "Rx bad fcs") \
430 _(L4_CHECKSUM_ERROR, "Rx L4 checksum errors") \
431 _(IP_CHECKSUM_ERROR, "Rx ip checksum errors") \
432 _(RX_ALLOC_FAIL, "rx buf alloc from free list failed") \
433 _(RX_ALLOC_NO_PHYSMEM, "rx buf alloc failed no physmem") \
434 _(RX_ALLOC_DROP_PKTS, "rx packets dropped due to alloc error") \
435 _(IPV4_EFD_DROP_PKTS, "IPV4 Early Fast Discard rx drops") \
436 _(IPV6_EFD_DROP_PKTS, "IPV6 Early Fast Discard rx drops") \
437 _(MPLS_EFD_DROP_PKTS, "MPLS Early Fast Discard rx drops") \
438 _(VLAN_EFD_DROP_PKTS, "VLAN Early Fast Discard rx drops")
439
440typedef enum {
441#define _(f,s) DPDK_ERROR_##f,
442 foreach_dpdk_error
443#undef _
444 DPDK_N_ERROR,
445} dpdk_error_t;
446
447/*
448 * Increment EFD drop counter
449 */
450static_always_inline
451void increment_efd_drop_counter (vlib_main_t * vm, u32 counter_index, u32 count)
452{
453 vlib_node_t *my_n;
454
455 my_n = vlib_get_node (vm, dpdk_input_node.index);
456 vm->error_main.counters[my_n->error_heap_index+counter_index] += count;
457}
458
459void dpdk_update_link_state (dpdk_device_t * xd, f64 now);
Damjan Marion85cdbd02016-02-12 18:00:23 +0100460void dpdk_device_lock_init(dpdk_device_t * xd);
461void dpdk_device_lock_free(dpdk_device_t * xd);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700462void dpdk_efd_update_counters(dpdk_device_t *xd, u32 n_buffers, u16 enabled);
463u32 is_efd_discardable(vlib_thread_main_t *tm,
464 vlib_buffer_t * b0,
465 struct rte_mbuf *mb);
466
467/* dpdk vhost-user interrupt management */
468u8 dpdk_vhost_user_want_interrupt (dpdk_device_t *xd, int idx);
469void dpdk_vhost_user_send_interrupt (vlib_main_t * vm, dpdk_device_t * xd,
470 int idx);
471
472
473static inline u64 vnet_get_aggregate_rx_packets (void)
474{
475 dpdk_main_t * dm = &dpdk_main;
476 u64 sum = 0;
477 dpdk_worker_t * dw;
478
479 vec_foreach(dw, dm->workers)
480 sum += dw->aggregate_rx_packets;
481
482 return sum;
483}
484
485void dpdk_rx_trace (dpdk_main_t * dm,
486 vlib_node_runtime_t * node,
487 dpdk_device_t * xd,
488 u16 queue_id,
489 u32 * buffers,
490 uword n_buffers);
491
492#define EFD_OPERATION_LESS_THAN 0
493#define EFD_OPERATION_GREATER_OR_EQUAL 1
494
495void efd_config(u32 enabled,
496 u32 ip_prec, u32 ip_op,
497 u32 mpls_exp, u32 mpls_op,
498 u32 vlan_cos, u32 vlan_op);
499
500void post_sw_interface_set_flags (vlib_main_t *vm, u32 sw_if_index, u32 flags);
501
502typedef struct vhost_user_memory vhost_user_memory_t;
503
504void dpdk_vhost_user_process_init (void **ctx);
505void dpdk_vhost_user_process_cleanup (void *ctx);
506uword dpdk_vhost_user_process_if (vlib_main_t *vm, dpdk_device_t *xd, void *ctx);
507
508// vhost-user calls
509int dpdk_vhost_user_create_if (vnet_main_t * vnm, vlib_main_t * vm,
510 const char * sock_filename,
511 u8 is_server,
512 u32 * sw_if_index,
513 u64 feature_mask,
Pierre Pfisteref65cb02016-02-19 13:52:44 +0000514 u8 renumber, u32 custom_dev_instance,
515 u8 *hwaddr);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700516int dpdk_vhost_user_modify_if (vnet_main_t * vnm, vlib_main_t * vm,
517 const char * sock_filename,
518 u8 is_server,
519 u32 sw_if_index,
520 u64 feature_mask,
521 u8 renumber, u32 custom_dev_instance);
522int dpdk_vhost_user_delete_if (vnet_main_t * vnm, vlib_main_t * vm,
523 u32 sw_if_index);
524int dpdk_vhost_user_dump_ifs (vnet_main_t * vnm, vlib_main_t * vm,
525 vhost_user_intf_details_t **out_vuids);
526
527u32 dpdk_get_admin_up_down_in_progress (void);
528
529uword
530dpdk_input_rss (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * f);
531
532#endif /* __included_dpdk_h__ */