blob: 315daa77f6242085c0d1a2884a417b57ca1fcc1f [file] [log] [blame]
Damjan Marion00a9dca2016-08-17 17:05:46 +02001/*
Ed Warnickecb9cada2015-12-08 15:45:58 -07002 *------------------------------------------------------------------
3 * vhost.c - vhost-user
4 *
5 * Copyright (c) 2014 Cisco and/or its affiliates.
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at:
9 *
10 * http://www.apache.org/licenses/LICENSE-2.0
11 *
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 *------------------------------------------------------------------
18 */
19
20#include <fcntl.h> /* for open */
21#include <sys/ioctl.h>
22#include <sys/socket.h>
23#include <sys/un.h>
24#include <sys/stat.h>
25#include <sys/types.h>
26#include <sys/uio.h> /* for iovec */
27#include <netinet/in.h>
28#include <sys/vfs.h>
29
30#include <linux/if_arp.h>
31#include <linux/if_tun.h>
32
33#include <vlib/vlib.h>
34#include <vlib/unix/unix.h>
35
36#include <vnet/ip/ip.h>
37
38#include <vnet/ethernet/ethernet.h>
Damjan Marion8bdc63b2016-11-02 14:48:21 +010039#include <vnet/devices/devices.h>
Damjan Marion22311502016-10-28 20:30:15 +020040#include <vnet/feature/feature.h>
Ed Warnickecb9cada2015-12-08 15:45:58 -070041
42#include <vnet/devices/virtio/vhost-user.h>
43
Billy McFalla92501a2016-11-23 12:45:29 -050044/**
45 * @file
46 * @brief vHost User Device Driver.
47 *
48 * This file contains the source code for vHost User interface.
49 */
50
51
Ed Warnickecb9cada2015-12-08 15:45:58 -070052#define VHOST_USER_DEBUG_SOCKET 0
Pierre Pfister116ea4b2016-11-08 15:49:28 +000053#define VHOST_DEBUG_VQ 0
Ed Warnickecb9cada2015-12-08 15:45:58 -070054
55#if VHOST_USER_DEBUG_SOCKET == 1
56#define DBG_SOCK(args...) clib_warning(args);
57#else
58#define DBG_SOCK(args...)
59#endif
60
Pierre Pfister116ea4b2016-11-08 15:49:28 +000061#if VHOST_DEBUG_VQ == 1
Ed Warnickecb9cada2015-12-08 15:45:58 -070062#define DBG_VQ(args...) clib_warning(args);
63#else
64#define DBG_VQ(args...)
65#endif
66
Pierre Pfisterd3eb90e2016-11-29 15:36:14 +000067/*
68 * When an RX queue is down but active, received packets
69 * must be discarded. This value controls up to how many
70 * packets will be discarded during each round.
71 */
72#define VHOST_USER_DOWN_DISCARD_COUNT 256
73
74/*
75 * When the number of available buffers gets under this threshold,
76 * RX node will start discarding packets.
77 */
78#define VHOST_USER_RX_BUFFER_STARVATION 32
79
80/*
81 * On the receive side, the host should free descriptors as soon
82 * as possible in order to avoid TX drop in the VM.
83 * This value controls the number of copy operations that are stacked
84 * before copy is done for all and descriptors are given back to
85 * the guest.
86 * The value 64 was obtained by testing (48 and 128 were not as good).
87 */
88#define VHOST_USER_RX_COPY_THRESHOLD 64
89
Pierre Pfisterdbb3c252016-11-22 10:33:34 +000090#define UNIX_GET_FD(unixfd_idx) \
91 (unixfd_idx != ~0) ? \
92 pool_elt_at_index (unix_main.file_pool, \
93 unixfd_idx)->file_descriptor : -1;
94
Pierre Pfister116ea4b2016-11-08 15:49:28 +000095#define foreach_virtio_trace_flags \
96 _ (SIMPLE_CHAINED, 0, "Simple descriptor chaining") \
97 _ (SINGLE_DESC, 1, "Single descriptor packet") \
98 _ (INDIRECT, 2, "Indirect descriptor") \
99 _ (MAP_ERROR, 4, "Memory mapping error")
100
101typedef enum
102{
103#define _(n,i,s) VIRTIO_TRACE_F_##n,
104 foreach_virtio_trace_flags
105#undef _
106} virtio_trace_flag_t;
107
Ed Warnickecb9cada2015-12-08 15:45:58 -0700108vlib_node_registration_t vhost_user_input_node;
109
110#define foreach_vhost_user_tx_func_error \
Pierre Pfister328e99b2016-02-12 13:18:42 +0000111 _(NONE, "no error") \
Pierre Pfisterd3eb90e2016-11-29 15:36:14 +0000112 _(NOT_READY, "vhost vring not ready") \
113 _(DOWN, "vhost interface is down") \
Ed Warnickecb9cada2015-12-08 15:45:58 -0700114 _(PKT_DROP_NOBUF, "tx packet drops (no available descriptors)") \
Pierre Pfisterba1d0462016-07-27 16:38:20 +0100115 _(PKT_DROP_NOMRG, "tx packet drops (cannot merge descriptors)") \
116 _(MMAP_FAIL, "mmap failure") \
117 _(INDIRECT_OVERFLOW, "indirect descriptor table overflow")
Ed Warnickecb9cada2015-12-08 15:45:58 -0700118
Damjan Marion00a9dca2016-08-17 17:05:46 +0200119typedef enum
120{
Ed Warnickecb9cada2015-12-08 15:45:58 -0700121#define _(f,s) VHOST_USER_TX_FUNC_ERROR_##f,
122 foreach_vhost_user_tx_func_error
123#undef _
Damjan Marion00a9dca2016-08-17 17:05:46 +0200124 VHOST_USER_TX_FUNC_N_ERROR,
Ed Warnickecb9cada2015-12-08 15:45:58 -0700125} vhost_user_tx_func_error_t;
126
Damjan Marion00a9dca2016-08-17 17:05:46 +0200127static char *vhost_user_tx_func_error_strings[] = {
Ed Warnickecb9cada2015-12-08 15:45:58 -0700128#define _(n,s) s,
Damjan Marion00a9dca2016-08-17 17:05:46 +0200129 foreach_vhost_user_tx_func_error
Ed Warnickecb9cada2015-12-08 15:45:58 -0700130#undef _
131};
132
133#define foreach_vhost_user_input_func_error \
134 _(NO_ERROR, "no error") \
Pierre Pfister328e99b2016-02-12 13:18:42 +0000135 _(NO_BUFFER, "no available buffer") \
136 _(MMAP_FAIL, "mmap failure") \
Pierre Pfisterba1d0462016-07-27 16:38:20 +0100137 _(INDIRECT_OVERFLOW, "indirect descriptor overflows table") \
138 _(UNDERSIZED_FRAME, "undersized ethernet frame received (< 14 bytes)") \
139 _(FULL_RX_QUEUE, "full rx queue (possible driver tx drop)")
Ed Warnickecb9cada2015-12-08 15:45:58 -0700140
Damjan Marion00a9dca2016-08-17 17:05:46 +0200141typedef enum
142{
Ed Warnickecb9cada2015-12-08 15:45:58 -0700143#define _(f,s) VHOST_USER_INPUT_FUNC_ERROR_##f,
144 foreach_vhost_user_input_func_error
145#undef _
Damjan Marion00a9dca2016-08-17 17:05:46 +0200146 VHOST_USER_INPUT_FUNC_N_ERROR,
Ed Warnickecb9cada2015-12-08 15:45:58 -0700147} vhost_user_input_func_error_t;
148
Damjan Marion00a9dca2016-08-17 17:05:46 +0200149static char *vhost_user_input_func_error_strings[] = {
Ed Warnickecb9cada2015-12-08 15:45:58 -0700150#define _(n,s) s,
Damjan Marion00a9dca2016-08-17 17:05:46 +0200151 foreach_vhost_user_input_func_error
Ed Warnickecb9cada2015-12-08 15:45:58 -0700152#undef _
153};
154
Damjan Marion00a9dca2016-08-17 17:05:46 +0200155/* *INDENT-OFF* */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700156static vhost_user_main_t vhost_user_main = {
157 .mtu_bytes = 1518,
158};
159
160VNET_HW_INTERFACE_CLASS (vhost_interface_class, static) = {
161 .name = "vhost-user",
162};
Damjan Marion00a9dca2016-08-17 17:05:46 +0200163/* *INDENT-ON* */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700164
Damjan Marion00a9dca2016-08-17 17:05:46 +0200165static u8 *
166format_vhost_user_interface_name (u8 * s, va_list * args)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700167{
168 u32 i = va_arg (*args, u32);
169 u32 show_dev_instance = ~0;
Damjan Marion00a9dca2016-08-17 17:05:46 +0200170 vhost_user_main_t *vum = &vhost_user_main;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700171
172 if (i < vec_len (vum->show_dev_instance_by_real_dev_instance))
173 show_dev_instance = vum->show_dev_instance_by_real_dev_instance[i];
174
175 if (show_dev_instance != ~0)
176 i = show_dev_instance;
177
178 s = format (s, "VirtualEthernet0/0/%d", i);
179 return s;
180}
181
Damjan Marion00a9dca2016-08-17 17:05:46 +0200182static int
183vhost_user_name_renumber (vnet_hw_interface_t * hi, u32 new_dev_instance)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700184{
Pierre Pfisterdbb3c252016-11-22 10:33:34 +0000185 // FIXME: check if the new dev instance is already used
Damjan Marion00a9dca2016-08-17 17:05:46 +0200186 vhost_user_main_t *vum = &vhost_user_main;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700187 vec_validate_init_empty (vum->show_dev_instance_by_real_dev_instance,
Damjan Marion00a9dca2016-08-17 17:05:46 +0200188 hi->dev_instance, ~0);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700189
Damjan Marion00a9dca2016-08-17 17:05:46 +0200190 vum->show_dev_instance_by_real_dev_instance[hi->dev_instance] =
Ed Warnickecb9cada2015-12-08 15:45:58 -0700191 new_dev_instance;
192
Damjan Marion00a9dca2016-08-17 17:05:46 +0200193 DBG_SOCK ("renumbered vhost-user interface dev_instance %d to %d",
194 hi->dev_instance, new_dev_instance);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700195
196 return 0;
197}
198
Pierre Pfister11f92052016-09-21 08:08:55 +0100199static_always_inline void *
200map_guest_mem (vhost_user_intf_t * vui, uword addr, u32 * hint)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700201{
Pierre Pfister11f92052016-09-21 08:08:55 +0100202 int i = *hint;
203 if (PREDICT_TRUE ((vui->regions[i].guest_phys_addr <= addr) &&
204 ((vui->regions[i].guest_phys_addr +
205 vui->regions[i].memory_size) > addr)))
206 {
207 return (void *) (vui->region_mmap_addr[i] + addr -
208 vui->regions[i].guest_phys_addr);
209 }
Damjan Marion37623702016-09-20 11:25:27 +0200210#if __SSE4_2__
211 __m128i rl, rh, al, ah, r;
212 al = _mm_set1_epi64x (addr + 1);
213 ah = _mm_set1_epi64x (addr);
214
215 rl = _mm_loadu_si128 ((__m128i *) & vui->region_guest_addr_lo[0]);
216 rl = _mm_cmpgt_epi64 (al, rl);
217 rh = _mm_loadu_si128 ((__m128i *) & vui->region_guest_addr_hi[0]);
218 rh = _mm_cmpgt_epi64 (rh, ah);
219 r = _mm_and_si128 (rl, rh);
220
221 rl = _mm_loadu_si128 ((__m128i *) & vui->region_guest_addr_lo[2]);
222 rl = _mm_cmpgt_epi64 (al, rl);
223 rh = _mm_loadu_si128 ((__m128i *) & vui->region_guest_addr_hi[2]);
224 rh = _mm_cmpgt_epi64 (rh, ah);
225 r = _mm_blend_epi16 (r, _mm_and_si128 (rl, rh), 0x22);
226
227 rl = _mm_loadu_si128 ((__m128i *) & vui->region_guest_addr_lo[4]);
228 rl = _mm_cmpgt_epi64 (al, rl);
229 rh = _mm_loadu_si128 ((__m128i *) & vui->region_guest_addr_hi[4]);
230 rh = _mm_cmpgt_epi64 (rh, ah);
231 r = _mm_blend_epi16 (r, _mm_and_si128 (rl, rh), 0x44);
232
233 rl = _mm_loadu_si128 ((__m128i *) & vui->region_guest_addr_lo[6]);
234 rl = _mm_cmpgt_epi64 (al, rl);
235 rh = _mm_loadu_si128 ((__m128i *) & vui->region_guest_addr_hi[6]);
236 rh = _mm_cmpgt_epi64 (rh, ah);
237 r = _mm_blend_epi16 (r, _mm_and_si128 (rl, rh), 0x88);
238
239 r = _mm_shuffle_epi8 (r, _mm_set_epi64x (0, 0x0e060c040a020800));
Damjan Marion0b49e2b2017-02-09 21:49:06 +0100240 i = __builtin_ctzll (_mm_movemask_epi8 (r) |
241 (1 << VHOST_MEMORY_MAX_NREGIONS));
Damjan Marion37623702016-09-20 11:25:27 +0200242
243 if (i < vui->nregions)
244 {
Pierre Pfister11f92052016-09-21 08:08:55 +0100245 *hint = i;
Damjan Marion37623702016-09-20 11:25:27 +0200246 return (void *) (vui->region_mmap_addr[i] + addr -
247 vui->regions[i].guest_phys_addr);
248 }
249
250#else
Damjan Marion00a9dca2016-08-17 17:05:46 +0200251 for (i = 0; i < vui->nregions; i++)
252 {
253 if ((vui->regions[i].guest_phys_addr <= addr) &&
254 ((vui->regions[i].guest_phys_addr + vui->regions[i].memory_size) >
255 addr))
256 {
Pierre Pfister11f92052016-09-21 08:08:55 +0100257 *hint = i;
Damjan Marion00a9dca2016-08-17 17:05:46 +0200258 return (void *) (vui->region_mmap_addr[i] + addr -
259 vui->regions[i].guest_phys_addr);
260 }
261 }
Damjan Marion37623702016-09-20 11:25:27 +0200262#endif
Damjan Marion00a9dca2016-08-17 17:05:46 +0200263 DBG_VQ ("failed to map guest mem addr %llx", addr);
Pierre Pfister11f92052016-09-21 08:08:55 +0100264 *hint = 0;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700265 return 0;
266}
267
Damjan Marion00a9dca2016-08-17 17:05:46 +0200268static inline void *
269map_user_mem (vhost_user_intf_t * vui, uword addr)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700270{
271 int i;
Damjan Marion00a9dca2016-08-17 17:05:46 +0200272 for (i = 0; i < vui->nregions; i++)
273 {
274 if ((vui->regions[i].userspace_addr <= addr) &&
275 ((vui->regions[i].userspace_addr + vui->regions[i].memory_size) >
276 addr))
277 {
278 return (void *) (vui->region_mmap_addr[i] + addr -
279 vui->regions[i].userspace_addr);
280 }
281 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700282 return 0;
283}
284
Damjan Marion00a9dca2016-08-17 17:05:46 +0200285static long
286get_huge_page_size (int fd)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700287{
288 struct statfs s;
Damjan Marion00a9dca2016-08-17 17:05:46 +0200289 fstatfs (fd, &s);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700290 return s.f_bsize;
291}
292
Damjan Marion00a9dca2016-08-17 17:05:46 +0200293static void
294unmap_all_mem_regions (vhost_user_intf_t * vui)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700295{
Damjan Marion00a9dca2016-08-17 17:05:46 +0200296 int i, r;
297 for (i = 0; i < vui->nregions; i++)
298 {
299 if (vui->region_mmap_addr[i] != (void *) -1)
300 {
Ed Warnickecb9cada2015-12-08 15:45:58 -0700301
Damjan Marion00a9dca2016-08-17 17:05:46 +0200302 long page_sz = get_huge_page_size (vui->region_mmap_fd[i]);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700303
Damjan Marion00a9dca2016-08-17 17:05:46 +0200304 ssize_t map_sz = (vui->regions[i].memory_size +
305 vui->regions[i].mmap_offset +
306 page_sz) & ~(page_sz - 1);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700307
Damjan Marion00a9dca2016-08-17 17:05:46 +0200308 r =
309 munmap (vui->region_mmap_addr[i] - vui->regions[i].mmap_offset,
310 map_sz);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700311
Damjan Marion00a9dca2016-08-17 17:05:46 +0200312 DBG_SOCK
313 ("unmap memory region %d addr 0x%lx len 0x%lx page_sz 0x%x", i,
314 vui->region_mmap_addr[i], map_sz, page_sz);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700315
Damjan Marion00a9dca2016-08-17 17:05:46 +0200316 vui->region_mmap_addr[i] = (void *) -1;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700317
Damjan Marion00a9dca2016-08-17 17:05:46 +0200318 if (r == -1)
319 {
320 clib_warning ("failed to unmap memory region (errno %d)",
321 errno);
322 }
323 close (vui->region_mmap_fd[i]);
324 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700325 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700326 vui->nregions = 0;
327}
328
Pierre Pfistere21c5282016-09-21 08:04:59 +0100329static void
330vhost_user_tx_thread_placement (vhost_user_intf_t * vui)
331{
332 //Let's try to assign one queue to each thread
333 u32 qid = 0;
334 u32 cpu_index = 0;
335 vui->use_tx_spinlock = 0;
336 while (1)
337 {
338 for (qid = 0; qid < VHOST_VRING_MAX_N / 2; qid++)
339 {
340 vhost_user_vring_t *rxvq = &vui->vrings[VHOST_VRING_IDX_RX (qid)];
341 if (!rxvq->started || !rxvq->enabled)
342 continue;
343
344 vui->per_cpu_tx_qid[cpu_index] = qid;
345 cpu_index++;
346 if (cpu_index == vlib_get_thread_main ()->n_vlib_mains)
347 return;
348 }
349 //We need to loop, meaning the spinlock has to be used
350 vui->use_tx_spinlock = 1;
351 if (cpu_index == 0)
352 {
353 //Could not find a single valid one
354 for (cpu_index = 0;
355 cpu_index < vlib_get_thread_main ()->n_vlib_mains; cpu_index++)
356 {
357 vui->per_cpu_tx_qid[cpu_index] = 0;
358 }
359 return;
360 }
361 }
362}
363
364static void
365vhost_user_rx_thread_placement ()
366{
367 vhost_user_main_t *vum = &vhost_user_main;
368 vhost_user_intf_t *vui;
369 vhost_cpu_t *vhc;
370 u32 *workers = 0;
371
372 //Let's list all workers cpu indexes
373 u32 i;
374 for (i = vum->input_cpu_first_index;
375 i < vum->input_cpu_first_index + vum->input_cpu_count; i++)
376 {
377 vlib_node_set_state (vlib_mains ? vlib_mains[i] : &vlib_global_main,
378 vhost_user_input_node.index,
379 VLIB_NODE_STATE_DISABLED);
380 vec_add1 (workers, i);
381 }
382
383 vec_foreach (vhc, vum->cpus)
384 {
385 vec_reset_length (vhc->rx_queues);
386 }
387
388 i = 0;
Pierre Pfisterdbb3c252016-11-22 10:33:34 +0000389 vhost_iface_and_queue_t iaq;
390 /* *INDENT-OFF* */
391 pool_foreach (vui, vum->vhost_user_interfaces, {
392 u32 *vui_workers = vec_len (vui->workers) ? vui->workers : workers;
393 u32 qid;
394 for (qid = 0; qid < VHOST_VRING_MAX_N / 2; qid++)
395 {
396 vhost_user_vring_t *txvq =
397 &vui->vrings[VHOST_VRING_IDX_TX (qid)];
398 if (!txvq->started)
399 continue;
Pierre Pfistere21c5282016-09-21 08:04:59 +0100400
Pierre Pfisterdbb3c252016-11-22 10:33:34 +0000401 i %= vec_len (vui_workers);
402 u32 cpu_index = vui_workers[i];
403 i++;
404 vhc = &vum->cpus[cpu_index];
Pierre Pfistere21c5282016-09-21 08:04:59 +0100405
Pierre Pfisterdbb3c252016-11-22 10:33:34 +0000406 iaq.qid = qid;
407 iaq.vhost_iface_index = vui - vum->vhost_user_interfaces;
408 vec_add1 (vhc->rx_queues, iaq);
409 vlib_node_set_state (vlib_mains ? vlib_mains[cpu_index] :
410 &vlib_global_main, vhost_user_input_node.index,
411 VLIB_NODE_STATE_POLLING);
412 }
413 });
414 /* *INDENT-ON* */
Pierre Pfistere21c5282016-09-21 08:04:59 +0100415}
416
417static int
418vhost_user_thread_placement (u32 sw_if_index, u32 worker_thread_index, u8 del)
419{
420 vhost_user_main_t *vum = &vhost_user_main;
421 vhost_user_intf_t *vui;
422 vnet_hw_interface_t *hw;
423
424 if (worker_thread_index < vum->input_cpu_first_index ||
425 worker_thread_index >=
426 vum->input_cpu_first_index + vum->input_cpu_count)
427 return -1;
428
429 if (!(hw = vnet_get_sup_hw_interface (vnet_get_main (), sw_if_index)))
430 return -2;
431
Pierre Pfisterdbb3c252016-11-22 10:33:34 +0000432 vui = pool_elt_at_index (vum->vhost_user_interfaces, hw->dev_instance);
Pierre Pfistere21c5282016-09-21 08:04:59 +0100433 u32 found = ~0, *w;
434 vec_foreach (w, vui->workers)
435 {
436 if (*w == worker_thread_index)
437 {
438 found = w - vui->workers;
439 break;
440 }
441 }
442
443 if (del)
444 {
445 if (found == ~0)
446 return -3;
447 vec_del1 (vui->workers, found);
448 }
449 else if (found == ~0)
450 {
451 vec_add1 (vui->workers, worker_thread_index);
452 }
453
454 vhost_user_rx_thread_placement ();
455 return 0;
456}
457
458/** @brief Returns whether at least one TX and one RX vring are enabled */
459int
460vhost_user_intf_ready (vhost_user_intf_t * vui)
461{
462 int i, found[2] = { }; //RX + TX
463
464 for (i = 0; i < VHOST_VRING_MAX_N; i++)
465 if (vui->vrings[i].started && vui->vrings[i].enabled)
466 found[i & 1] = 1;
467
468 return found[0] && found[1];
469}
470
471static void
472vhost_user_update_iface_state (vhost_user_intf_t * vui)
473{
474 /* if we have pointers to descriptor table, go up */
475 int is_up = vhost_user_intf_ready (vui);
476 if (is_up != vui->is_up)
477 {
478 DBG_SOCK ("interface %d %s", vui->sw_if_index,
479 is_up ? "ready" : "down");
480 vnet_hw_interface_set_flags (vnet_get_main (), vui->hw_if_index,
481 is_up ? VNET_HW_INTERFACE_FLAG_LINK_UP :
482 0);
483 vui->is_up = is_up;
484 }
485 vhost_user_rx_thread_placement ();
486 vhost_user_tx_thread_placement (vui);
487}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700488
Damjan Marion00a9dca2016-08-17 17:05:46 +0200489static clib_error_t *
490vhost_user_callfd_read_ready (unix_file_t * uf)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700491{
Damjan Marion00a9dca2016-08-17 17:05:46 +0200492 __attribute__ ((unused)) int n;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700493 u8 buff[8];
Damjan Marion00a9dca2016-08-17 17:05:46 +0200494 n = read (uf->file_descriptor, ((char *) &buff), 8);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700495 return 0;
496}
497
Pierre Pfistere21c5282016-09-21 08:04:59 +0100498static clib_error_t *
499vhost_user_kickfd_read_ready (unix_file_t * uf)
500{
501 __attribute__ ((unused)) int n;
502 u8 buff[8];
503 vhost_user_intf_t *vui =
Pierre Pfisterdbb3c252016-11-22 10:33:34 +0000504 pool_elt_at_index (vhost_user_main.vhost_user_interfaces,
505 uf->private_data >> 8);
Pierre Pfistere21c5282016-09-21 08:04:59 +0100506 u32 qid = uf->private_data & 0xff;
507 n = read (uf->file_descriptor, ((char *) &buff), 8);
508 DBG_SOCK ("if %d KICK queue %d", uf->private_data >> 8, qid);
509
510 vlib_worker_thread_barrier_sync (vlib_get_main ());
511 vui->vrings[qid].started = 1;
512 vhost_user_update_iface_state (vui);
513 vlib_worker_thread_barrier_release (vlib_get_main ());
514 return 0;
515}
516
517/**
518 * @brief Try once to lock the vring
519 * @return 0 on success, non-zero on failure.
520 */
521static inline int
522vhost_user_vring_try_lock (vhost_user_intf_t * vui, u32 qid)
523{
524 return __sync_lock_test_and_set (vui->vring_locks[qid], 1);
525}
526
527/**
528 * @brief Spin until the vring is successfully locked
529 */
530static inline void
531vhost_user_vring_lock (vhost_user_intf_t * vui, u32 qid)
532{
533 while (vhost_user_vring_try_lock (vui, qid))
534 ;
535}
536
537/**
538 * @brief Unlock the vring lock
539 */
540static inline void
541vhost_user_vring_unlock (vhost_user_intf_t * vui, u32 qid)
542{
543 *vui->vring_locks[qid] = 0;
544}
545
546static inline void
547vhost_user_vring_init (vhost_user_intf_t * vui, u32 qid)
548{
549 vhost_user_vring_t *vring = &vui->vrings[qid];
550 memset (vring, 0, sizeof (*vring));
Pierre Pfisterdbb3c252016-11-22 10:33:34 +0000551 vring->kickfd_idx = ~0;
552 vring->callfd_idx = ~0;
Pierre Pfistere21c5282016-09-21 08:04:59 +0100553 vring->errfd = -1;
554
555 /*
556 * We have a bug with some qemu 2.5, and this may be a fix.
557 * Feel like interpretation holy text, but this is from vhost-user.txt.
558 * "
559 * One queue pair is enabled initially. More queues are enabled
560 * dynamically, by sending message VHOST_USER_SET_VRING_ENABLE.
561 * "
562 * Don't know who's right, but this is what DPDK does.
563 */
564 if (qid == 0 || qid == 1)
565 vring->enabled = 1;
566}
567
568static inline void
569vhost_user_vring_close (vhost_user_intf_t * vui, u32 qid)
570{
571 vhost_user_vring_t *vring = &vui->vrings[qid];
Pierre Pfisterdbb3c252016-11-22 10:33:34 +0000572 if (vring->kickfd_idx != ~0)
Pierre Pfistere21c5282016-09-21 08:04:59 +0100573 {
574 unix_file_t *uf = pool_elt_at_index (unix_main.file_pool,
575 vring->kickfd_idx);
576 unix_file_del (&unix_main, uf);
Pierre Pfisterdbb3c252016-11-22 10:33:34 +0000577 vring->kickfd_idx = ~0;
Pierre Pfistere21c5282016-09-21 08:04:59 +0100578 }
Pierre Pfisterdbb3c252016-11-22 10:33:34 +0000579 if (vring->callfd_idx != ~0)
Pierre Pfistere21c5282016-09-21 08:04:59 +0100580 {
581 unix_file_t *uf = pool_elt_at_index (unix_main.file_pool,
582 vring->callfd_idx);
583 unix_file_del (&unix_main, uf);
Pierre Pfisterdbb3c252016-11-22 10:33:34 +0000584 vring->callfd_idx = ~0;
Pierre Pfistere21c5282016-09-21 08:04:59 +0100585 }
586 if (vring->errfd != -1)
587 close (vring->errfd);
588 vhost_user_vring_init (vui, qid);
589}
590
Damjan Marion00a9dca2016-08-17 17:05:46 +0200591static inline void
592vhost_user_if_disconnect (vhost_user_intf_t * vui)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700593{
Damjan Marion00a9dca2016-08-17 17:05:46 +0200594 vnet_main_t *vnm = vnet_get_main ();
Ed Warnickecb9cada2015-12-08 15:45:58 -0700595 int q;
596
Damjan Marion00a9dca2016-08-17 17:05:46 +0200597 vnet_hw_interface_set_flags (vnm, vui->hw_if_index, 0);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700598
Damjan Marion00a9dca2016-08-17 17:05:46 +0200599 if (vui->unix_file_index != ~0)
600 {
Ed Warnickecb9cada2015-12-08 15:45:58 -0700601 unix_file_del (&unix_main, unix_main.file_pool + vui->unix_file_index);
602 vui->unix_file_index = ~0;
Damjan Marion00a9dca2016-08-17 17:05:46 +0200603 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700604
Ed Warnickecb9cada2015-12-08 15:45:58 -0700605 vui->is_up = 0;
Steve Shin44489572016-09-22 12:08:55 -0700606
Pierre Pfistere21c5282016-09-21 08:04:59 +0100607 for (q = 0; q < VHOST_VRING_MAX_N; q++)
608 vhost_user_vring_close (vui, q);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700609
Damjan Marion00a9dca2016-08-17 17:05:46 +0200610 unmap_all_mem_regions (vui);
611 DBG_SOCK ("interface ifindex %d disconnected", vui->sw_if_index);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700612}
613
Yoann Desmouceaux4667c222016-02-24 22:51:00 +0100614#define VHOST_LOG_PAGE 0x1000
Pierre Pfisterd3eb90e2016-11-29 15:36:14 +0000615static_always_inline void
616vhost_user_log_dirty_pages_2 (vhost_user_intf_t * vui,
617 u64 addr, u64 len, u8 is_host_address)
Yoann Desmouceaux4667c222016-02-24 22:51:00 +0100618{
Damjan Marion00a9dca2016-08-17 17:05:46 +0200619 if (PREDICT_TRUE (vui->log_base_addr == 0
620 || !(vui->features & (1 << FEAT_VHOST_F_LOG_ALL))))
621 {
622 return;
623 }
Pierre Pfisterd3eb90e2016-11-29 15:36:14 +0000624 if (is_host_address)
625 {
626 addr = (u64) map_user_mem (vui, (uword) addr);
627 }
Damjan Marion00a9dca2016-08-17 17:05:46 +0200628 if (PREDICT_FALSE ((addr + len - 1) / VHOST_LOG_PAGE / 8 >= vui->log_size))
629 {
630 DBG_SOCK ("vhost_user_log_dirty_pages(): out of range\n");
631 return;
632 }
Yoann Desmouceaux4667c222016-02-24 22:51:00 +0100633
Damjan Marion00a9dca2016-08-17 17:05:46 +0200634 CLIB_MEMORY_BARRIER ();
Yoann Desmouceaux4667c222016-02-24 22:51:00 +0100635 u64 page = addr / VHOST_LOG_PAGE;
Damjan Marion00a9dca2016-08-17 17:05:46 +0200636 while (page * VHOST_LOG_PAGE < addr + len)
637 {
638 ((u8 *) vui->log_base_addr)[page / 8] |= 1 << page % 8;
639 page++;
640 }
Yoann Desmouceaux4667c222016-02-24 22:51:00 +0100641}
642
Pierre Pfisterd3eb90e2016-11-29 15:36:14 +0000643static_always_inline void
644vhost_user_log_dirty_pages (vhost_user_intf_t * vui, u64 addr, u64 len)
645{
646 vhost_user_log_dirty_pages_2 (vui, addr, len, 0);
647}
648
Yoann Desmouceaux4667c222016-02-24 22:51:00 +0100649#define vhost_user_log_dirty_ring(vui, vq, member) \
Yoann Desmouceauxfe2da0e2016-03-08 14:54:28 +0100650 if (PREDICT_FALSE(vq->log_used)) { \
Damjan Marion8d281b32016-08-24 14:32:39 +0200651 vhost_user_log_dirty_pages(vui, vq->log_guest_addr + STRUCT_OFFSET_OF(vring_used_t, member), \
Yoann Desmouceauxfe2da0e2016-03-08 14:54:28 +0100652 sizeof(vq->used->member)); \
653 }
Yoann Desmouceaux4667c222016-02-24 22:51:00 +0100654
Damjan Marion00a9dca2016-08-17 17:05:46 +0200655static clib_error_t *
656vhost_user_socket_read (unix_file_t * uf)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700657{
658 int n, i;
659 int fd, number_of_fds = 0;
660 int fds[VHOST_MEMORY_MAX_NREGIONS];
661 vhost_user_msg_t msg;
662 struct msghdr mh;
663 struct iovec iov[1];
Damjan Marion00a9dca2016-08-17 17:05:46 +0200664 vhost_user_main_t *vum = &vhost_user_main;
665 vhost_user_intf_t *vui;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700666 struct cmsghdr *cmsg;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700667 u8 q;
Damjan Marion00a9dca2016-08-17 17:05:46 +0200668 unix_file_t template = { 0 };
669 vnet_main_t *vnm = vnet_get_main ();
Ed Warnickecb9cada2015-12-08 15:45:58 -0700670
Pierre Pfisterdbb3c252016-11-22 10:33:34 +0000671 vui = pool_elt_at_index (vum->vhost_user_interfaces, uf->private_data);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700672
Damjan Marion00a9dca2016-08-17 17:05:46 +0200673 char control[CMSG_SPACE (VHOST_MEMORY_MAX_NREGIONS * sizeof (int))];
Ed Warnickecb9cada2015-12-08 15:45:58 -0700674
Damjan Marion00a9dca2016-08-17 17:05:46 +0200675 memset (&mh, 0, sizeof (mh));
676 memset (control, 0, sizeof (control));
Ed Warnickecb9cada2015-12-08 15:45:58 -0700677
Damjan Marion00a9dca2016-08-17 17:05:46 +0200678 for (i = 0; i < VHOST_MEMORY_MAX_NREGIONS; i++)
Damjan Mariona290d7c2016-08-16 12:37:24 +0200679 fds[i] = -1;
680
Ed Warnickecb9cada2015-12-08 15:45:58 -0700681 /* set the payload */
682 iov[0].iov_base = (void *) &msg;
683 iov[0].iov_len = VHOST_USER_MSG_HDR_SZ;
684
685 mh.msg_iov = iov;
686 mh.msg_iovlen = 1;
687 mh.msg_control = control;
Damjan Marion00a9dca2016-08-17 17:05:46 +0200688 mh.msg_controllen = sizeof (control);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700689
Damjan Marion00a9dca2016-08-17 17:05:46 +0200690 n = recvmsg (uf->file_descriptor, &mh, 0);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700691
Pierre Pfistere21c5282016-09-21 08:04:59 +0100692 /* Stop workers to avoid end of the world */
693 vlib_worker_thread_barrier_sync (vlib_get_main ());
694
Ed Warnickecb9cada2015-12-08 15:45:58 -0700695 if (n != VHOST_USER_MSG_HDR_SZ)
Pierre Pfistere21c5282016-09-21 08:04:59 +0100696 {
697 if (n == -1)
698 {
699 DBG_SOCK ("recvmsg returned error %d %s", errno, strerror (errno));
700 }
701 else
702 {
703 DBG_SOCK ("n (%d) != VHOST_USER_MSG_HDR_SZ (%d)",
704 n, VHOST_USER_MSG_HDR_SZ);
705 }
706 goto close_socket;
707 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700708
Damjan Marion00a9dca2016-08-17 17:05:46 +0200709 if (mh.msg_flags & MSG_CTRUNC)
710 {
Pierre Pfistere21c5282016-09-21 08:04:59 +0100711 DBG_SOCK ("MSG_CTRUNC is set");
Damjan Marion00a9dca2016-08-17 17:05:46 +0200712 goto close_socket;
713 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700714
Damjan Marion00a9dca2016-08-17 17:05:46 +0200715 cmsg = CMSG_FIRSTHDR (&mh);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700716
717 if (cmsg && (cmsg->cmsg_len > 0) && (cmsg->cmsg_level == SOL_SOCKET) &&
718 (cmsg->cmsg_type == SCM_RIGHTS) &&
Damjan Marion00a9dca2016-08-17 17:05:46 +0200719 (cmsg->cmsg_len - CMSG_LEN (0) <=
720 VHOST_MEMORY_MAX_NREGIONS * sizeof (int)))
721 {
722 number_of_fds = (cmsg->cmsg_len - CMSG_LEN (0)) / sizeof (int);
723 clib_memcpy (fds, CMSG_DATA (cmsg), number_of_fds * sizeof (int));
724 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700725
Damjan Marion00a9dca2016-08-17 17:05:46 +0200726 /* version 1, no reply bit set */
727 if ((msg.flags & 7) != 1)
728 {
729 DBG_SOCK ("malformed message received. closing socket");
730 goto close_socket;
731 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700732
733 {
Pierre Pfistere21c5282016-09-21 08:04:59 +0100734 int rv;
735 rv =
736 read (uf->file_descriptor, ((char *) &msg) + VHOST_USER_MSG_HDR_SZ,
737 msg.size);
738 if (rv < 0)
739 {
740 DBG_SOCK ("read failed %s", strerror (errno));
741 goto close_socket;
742 }
743 else if (rv != msg.size)
744 {
745 DBG_SOCK ("message too short (read %dB should be %dB)", rv, msg.size);
746 goto close_socket;
747 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700748 }
749
Damjan Marion00a9dca2016-08-17 17:05:46 +0200750 switch (msg.request)
751 {
Ed Warnickecb9cada2015-12-08 15:45:58 -0700752 case VHOST_USER_GET_FEATURES:
Ed Warnickecb9cada2015-12-08 15:45:58 -0700753 msg.flags |= 4;
Pierre Pfistere21c5282016-09-21 08:04:59 +0100754 msg.u64 = (1ULL << FEAT_VIRTIO_NET_F_MRG_RXBUF) |
755 (1ULL << FEAT_VIRTIO_NET_F_CTRL_VQ) |
756 (1ULL << FEAT_VIRTIO_F_ANY_LAYOUT) |
757 (1ULL << FEAT_VIRTIO_F_INDIRECT_DESC) |
758 (1ULL << FEAT_VHOST_F_LOG_ALL) |
759 (1ULL << FEAT_VIRTIO_NET_F_GUEST_ANNOUNCE) |
760 (1ULL << FEAT_VIRTIO_NET_F_MQ) |
761 (1ULL << FEAT_VHOST_USER_F_PROTOCOL_FEATURES) |
762 (1ULL << FEAT_VIRTIO_F_VERSION_1);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700763 msg.u64 &= vui->feature_mask;
Damjan Marion00a9dca2016-08-17 17:05:46 +0200764 msg.size = sizeof (msg.u64);
Pierre Pfistere21c5282016-09-21 08:04:59 +0100765 DBG_SOCK ("if %d msg VHOST_USER_GET_FEATURES - reply 0x%016llx",
766 vui->hw_if_index, msg.u64);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700767 break;
768
769 case VHOST_USER_SET_FEATURES:
Damjan Marion00a9dca2016-08-17 17:05:46 +0200770 DBG_SOCK ("if %d msg VHOST_USER_SET_FEATURES features 0x%016llx",
771 vui->hw_if_index, msg.u64);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700772
773 vui->features = msg.u64;
Yoann Desmouceaux4667c222016-02-24 22:51:00 +0100774
Pierre Pfistere21c5282016-09-21 08:04:59 +0100775 if (vui->features &
776 ((1 << FEAT_VIRTIO_NET_F_MRG_RXBUF) |
777 (1ULL << FEAT_VIRTIO_F_VERSION_1)))
Damjan Marion00a9dca2016-08-17 17:05:46 +0200778 vui->virtio_net_hdr_sz = 12;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700779 else
Damjan Marion00a9dca2016-08-17 17:05:46 +0200780 vui->virtio_net_hdr_sz = 10;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700781
Damjan Marion00a9dca2016-08-17 17:05:46 +0200782 vui->is_any_layout =
783 (vui->features & (1 << FEAT_VIRTIO_F_ANY_LAYOUT)) ? 1 : 0;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700784
785 ASSERT (vui->virtio_net_hdr_sz < VLIB_BUFFER_PRE_DATA_SIZE);
Damjan Marion00a9dca2016-08-17 17:05:46 +0200786 vnet_hw_interface_set_flags (vnm, vui->hw_if_index, 0);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700787 vui->is_up = 0;
788
Pierre Pfistere21c5282016-09-21 08:04:59 +0100789 /*for (q = 0; q < VHOST_VRING_MAX_N; q++)
790 vhost_user_vring_close(&vui->vrings[q]); */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700791
792 break;
793
794 case VHOST_USER_SET_MEM_TABLE:
Damjan Marion00a9dca2016-08-17 17:05:46 +0200795 DBG_SOCK ("if %d msg VHOST_USER_SET_MEM_TABLE nregions %d",
796 vui->hw_if_index, msg.memory.nregions);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700797
798 if ((msg.memory.nregions < 1) ||
Damjan Marion00a9dca2016-08-17 17:05:46 +0200799 (msg.memory.nregions > VHOST_MEMORY_MAX_NREGIONS))
800 {
Ed Warnickecb9cada2015-12-08 15:45:58 -0700801
Damjan Marion00a9dca2016-08-17 17:05:46 +0200802 DBG_SOCK ("number of mem regions must be between 1 and %i",
803 VHOST_MEMORY_MAX_NREGIONS);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700804
Damjan Marion00a9dca2016-08-17 17:05:46 +0200805 goto close_socket;
806 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700807
Damjan Marion00a9dca2016-08-17 17:05:46 +0200808 if (msg.memory.nregions != number_of_fds)
809 {
810 DBG_SOCK ("each memory region must have FD");
811 goto close_socket;
812 }
813 unmap_all_mem_regions (vui);
814 for (i = 0; i < msg.memory.nregions; i++)
815 {
816 clib_memcpy (&(vui->regions[i]), &msg.memory.regions[i],
817 sizeof (vhost_user_memory_region_t));
Ed Warnickecb9cada2015-12-08 15:45:58 -0700818
Damjan Marion00a9dca2016-08-17 17:05:46 +0200819 long page_sz = get_huge_page_size (fds[i]);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700820
Damjan Marion00a9dca2016-08-17 17:05:46 +0200821 /* align size to 2M page */
822 ssize_t map_sz = (vui->regions[i].memory_size +
823 vui->regions[i].mmap_offset +
824 page_sz) & ~(page_sz - 1);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700825
Damjan Marion00a9dca2016-08-17 17:05:46 +0200826 vui->region_mmap_addr[i] = mmap (0, map_sz, PROT_READ | PROT_WRITE,
827 MAP_SHARED, fds[i], 0);
Damjan Marion37623702016-09-20 11:25:27 +0200828 vui->region_guest_addr_lo[i] = vui->regions[i].guest_phys_addr;
829 vui->region_guest_addr_hi[i] = vui->regions[i].guest_phys_addr +
830 vui->regions[i].memory_size;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700831
Damjan Marion00a9dca2016-08-17 17:05:46 +0200832 DBG_SOCK
833 ("map memory region %d addr 0 len 0x%lx fd %d mapped 0x%lx "
834 "page_sz 0x%x", i, map_sz, fds[i], vui->region_mmap_addr[i],
835 page_sz);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700836
Damjan Marion00a9dca2016-08-17 17:05:46 +0200837 if (vui->region_mmap_addr[i] == MAP_FAILED)
838 {
839 clib_warning ("failed to map memory. errno is %d", errno);
840 goto close_socket;
841 }
842 vui->region_mmap_addr[i] += vui->regions[i].mmap_offset;
843 vui->region_mmap_fd[i] = fds[i];
844 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700845 vui->nregions = msg.memory.nregions;
846 break;
847
848 case VHOST_USER_SET_VRING_NUM:
Damjan Marion00a9dca2016-08-17 17:05:46 +0200849 DBG_SOCK ("if %d msg VHOST_USER_SET_VRING_NUM idx %d num %d",
850 vui->hw_if_index, msg.state.index, msg.state.num);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700851
Damjan Marion00a9dca2016-08-17 17:05:46 +0200852 if ((msg.state.num > 32768) || /* maximum ring size is 32768 */
853 (msg.state.num == 0) || /* it cannot be zero */
Pierre Pfistere21c5282016-09-21 08:04:59 +0100854 ((msg.state.num - 1) & msg.state.num)) /* must be power of 2 */
Damjan Marion00a9dca2016-08-17 17:05:46 +0200855 goto close_socket;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700856 vui->vrings[msg.state.index].qsz = msg.state.num;
857 break;
858
859 case VHOST_USER_SET_VRING_ADDR:
Damjan Marion00a9dca2016-08-17 17:05:46 +0200860 DBG_SOCK ("if %d msg VHOST_USER_SET_VRING_ADDR idx %d",
861 vui->hw_if_index, msg.state.index);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700862
Pierre Pfistere21c5282016-09-21 08:04:59 +0100863 if (msg.state.index >= VHOST_VRING_MAX_N)
864 {
865 DBG_SOCK ("invalid vring index VHOST_USER_SET_VRING_ADDR:"
866 " %d >= %d", msg.state.index, VHOST_VRING_MAX_N);
867 goto close_socket;
868 }
869
870 if (msg.size < sizeof (msg.addr))
871 {
872 DBG_SOCK ("vhost message is too short (%d < %d)",
873 msg.size, sizeof (msg.addr));
874 goto close_socket;
875 }
876
Damjan Marion00a9dca2016-08-17 17:05:46 +0200877 vui->vrings[msg.state.index].desc = (vring_desc_t *)
878 map_user_mem (vui, msg.addr.desc_user_addr);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700879 vui->vrings[msg.state.index].used = (vring_used_t *)
Damjan Marion00a9dca2016-08-17 17:05:46 +0200880 map_user_mem (vui, msg.addr.used_user_addr);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700881 vui->vrings[msg.state.index].avail = (vring_avail_t *)
Damjan Marion00a9dca2016-08-17 17:05:46 +0200882 map_user_mem (vui, msg.addr.avail_user_addr);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700883
884 if ((vui->vrings[msg.state.index].desc == NULL) ||
Damjan Marion00a9dca2016-08-17 17:05:46 +0200885 (vui->vrings[msg.state.index].used == NULL) ||
886 (vui->vrings[msg.state.index].avail == NULL))
887 {
888 DBG_SOCK ("failed to map user memory for hw_if_index %d",
889 vui->hw_if_index);
890 goto close_socket;
891 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700892
Yoann Desmouceaux4667c222016-02-24 22:51:00 +0100893 vui->vrings[msg.state.index].log_guest_addr = msg.addr.log_guest_addr;
Yoann Desmouceauxfe2da0e2016-03-08 14:54:28 +0100894 vui->vrings[msg.state.index].log_used =
Damjan Marion00a9dca2016-08-17 17:05:46 +0200895 (msg.addr.flags & (1 << VHOST_VRING_F_LOG)) ? 1 : 0;
Yoann Desmouceaux4667c222016-02-24 22:51:00 +0100896
897 /* Spec says: If VHOST_USER_F_PROTOCOL_FEATURES has not been negotiated,
Damjan Marion00a9dca2016-08-17 17:05:46 +0200898 the ring is initialized in an enabled state. */
Damjan Marion00a9dca2016-08-17 17:05:46 +0200899 if (!(vui->features & (1 << FEAT_VHOST_USER_F_PROTOCOL_FEATURES)))
900 {
901 vui->vrings[msg.state.index].enabled = 1;
902 }
Yoann Desmouceaux4667c222016-02-24 22:51:00 +0100903
Ed Warnickecb9cada2015-12-08 15:45:58 -0700904 vui->vrings[msg.state.index].last_used_idx =
Damjan Marion10eb1ea2016-10-13 10:02:19 +0200905 vui->vrings[msg.state.index].last_avail_idx =
Damjan Marion00a9dca2016-08-17 17:05:46 +0200906 vui->vrings[msg.state.index].used->idx;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700907
908 /* tell driver that we don't want interrupts */
Pierre Pfistere21c5282016-09-21 08:04:59 +0100909 vui->vrings[msg.state.index].used->flags = VRING_USED_F_NO_NOTIFY;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700910 break;
911
912 case VHOST_USER_SET_OWNER:
Damjan Marion00a9dca2016-08-17 17:05:46 +0200913 DBG_SOCK ("if %d msg VHOST_USER_SET_OWNER", vui->hw_if_index);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700914 break;
915
916 case VHOST_USER_RESET_OWNER:
Damjan Marion00a9dca2016-08-17 17:05:46 +0200917 DBG_SOCK ("if %d msg VHOST_USER_RESET_OWNER", vui->hw_if_index);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700918 break;
919
920 case VHOST_USER_SET_VRING_CALL:
Damjan Marion00a9dca2016-08-17 17:05:46 +0200921 DBG_SOCK ("if %d msg VHOST_USER_SET_VRING_CALL u64 %d",
922 vui->hw_if_index, msg.u64);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700923
924 q = (u8) (msg.u64 & 0xFF);
925
Pierre Pfistere21c5282016-09-21 08:04:59 +0100926 /* if there is old fd, delete and close it */
Pierre Pfisterdbb3c252016-11-22 10:33:34 +0000927 if (vui->vrings[q].callfd_idx != ~0)
Pierre Pfistere21c5282016-09-21 08:04:59 +0100928 {
929 unix_file_t *uf = pool_elt_at_index (unix_main.file_pool,
930 vui->vrings[q].callfd_idx);
931 unix_file_del (&unix_main, uf);
Pierre Pfisterdbb3c252016-11-22 10:33:34 +0000932 vui->vrings[q].callfd_idx = ~0;
Pierre Pfistere21c5282016-09-21 08:04:59 +0100933 }
934
Ed Warnickecb9cada2015-12-08 15:45:58 -0700935 if (!(msg.u64 & 0x100))
Damjan Marion00a9dca2016-08-17 17:05:46 +0200936 {
937 if (number_of_fds != 1)
Damjan Marion00a9dca2016-08-17 17:05:46 +0200938 {
Pierre Pfistere21c5282016-09-21 08:04:59 +0100939 DBG_SOCK ("More than one fd received !");
940 goto close_socket;
Damjan Marion00a9dca2016-08-17 17:05:46 +0200941 }
Pierre Pfistere21c5282016-09-21 08:04:59 +0100942
Damjan Marion00a9dca2016-08-17 17:05:46 +0200943 template.read_function = vhost_user_callfd_read_ready;
944 template.file_descriptor = fds[0];
Pierre Pfisterdbb3c252016-11-22 10:33:34 +0000945 template.private_data =
946 ((vui - vhost_user_main.vhost_user_interfaces) << 8) + q;
Damjan Marion00a9dca2016-08-17 17:05:46 +0200947 vui->vrings[q].callfd_idx = unix_file_add (&unix_main, &template);
948 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700949 else
Pierre Pfisterdbb3c252016-11-22 10:33:34 +0000950 vui->vrings[q].callfd_idx = ~0;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700951 break;
952
953 case VHOST_USER_SET_VRING_KICK:
Damjan Marion00a9dca2016-08-17 17:05:46 +0200954 DBG_SOCK ("if %d msg VHOST_USER_SET_VRING_KICK u64 %d",
955 vui->hw_if_index, msg.u64);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700956
957 q = (u8) (msg.u64 & 0xFF);
958
Pierre Pfisterdbb3c252016-11-22 10:33:34 +0000959 if (vui->vrings[q].kickfd_idx != ~0)
Pierre Pfistere21c5282016-09-21 08:04:59 +0100960 {
961 unix_file_t *uf = pool_elt_at_index (unix_main.file_pool,
Pierre Pfisterdbb3c252016-11-22 10:33:34 +0000962 vui->vrings[q].kickfd_idx);
Pierre Pfistere21c5282016-09-21 08:04:59 +0100963 unix_file_del (&unix_main, uf);
Pierre Pfisterdbb3c252016-11-22 10:33:34 +0000964 vui->vrings[q].kickfd_idx = ~0;
Pierre Pfistere21c5282016-09-21 08:04:59 +0100965 }
966
Ed Warnickecb9cada2015-12-08 15:45:58 -0700967 if (!(msg.u64 & 0x100))
Damjan Marion00a9dca2016-08-17 17:05:46 +0200968 {
969 if (number_of_fds != 1)
Pierre Pfistere21c5282016-09-21 08:04:59 +0100970 {
971 DBG_SOCK ("More than one fd received !");
972 goto close_socket;
973 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700974
Pierre Pfistere21c5282016-09-21 08:04:59 +0100975 template.read_function = vhost_user_kickfd_read_ready;
976 template.file_descriptor = fds[0];
977 template.private_data =
Pierre Pfisterdbb3c252016-11-22 10:33:34 +0000978 (((uword) (vui - vhost_user_main.vhost_user_interfaces)) << 8) +
979 q;
Pierre Pfistere21c5282016-09-21 08:04:59 +0100980 vui->vrings[q].kickfd_idx = unix_file_add (&unix_main, &template);
Damjan Marion00a9dca2016-08-17 17:05:46 +0200981 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700982 else
Pierre Pfistere21c5282016-09-21 08:04:59 +0100983 {
Pierre Pfisterdbb3c252016-11-22 10:33:34 +0000984 //When no kickfd is set, the queue is initialized as started
985 vui->vrings[q].kickfd_idx = ~0;
Pierre Pfistere21c5282016-09-21 08:04:59 +0100986 vui->vrings[q].started = 1;
987 }
988
Ed Warnickecb9cada2015-12-08 15:45:58 -0700989 break;
990
991 case VHOST_USER_SET_VRING_ERR:
Damjan Marion00a9dca2016-08-17 17:05:46 +0200992 DBG_SOCK ("if %d msg VHOST_USER_SET_VRING_ERR u64 %d",
993 vui->hw_if_index, msg.u64);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700994
995 q = (u8) (msg.u64 & 0xFF);
996
Pierre Pfistere21c5282016-09-21 08:04:59 +0100997 if (vui->vrings[q].errfd != -1)
998 close (vui->vrings[q].errfd);
999
Ed Warnickecb9cada2015-12-08 15:45:58 -07001000 if (!(msg.u64 & 0x100))
Damjan Marion00a9dca2016-08-17 17:05:46 +02001001 {
1002 if (number_of_fds != 1)
1003 goto close_socket;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001004
Pierre Pfistere21c5282016-09-21 08:04:59 +01001005 vui->vrings[q].errfd = fds[0];
Damjan Marion00a9dca2016-08-17 17:05:46 +02001006 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07001007 else
Pierre Pfistere21c5282016-09-21 08:04:59 +01001008 vui->vrings[q].errfd = -1;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001009
Ed Warnickecb9cada2015-12-08 15:45:58 -07001010 break;
1011
1012 case VHOST_USER_SET_VRING_BASE:
Damjan Marion00a9dca2016-08-17 17:05:46 +02001013 DBG_SOCK ("if %d msg VHOST_USER_SET_VRING_BASE idx %d num %d",
1014 vui->hw_if_index, msg.state.index, msg.state.num);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001015
1016 vui->vrings[msg.state.index].last_avail_idx = msg.state.num;
1017 break;
1018
1019 case VHOST_USER_GET_VRING_BASE:
Damjan Marion00a9dca2016-08-17 17:05:46 +02001020 DBG_SOCK ("if %d msg VHOST_USER_GET_VRING_BASE idx %d num %d",
1021 vui->hw_if_index, msg.state.index, msg.state.num);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001022
Pierre Pfistere21c5282016-09-21 08:04:59 +01001023 if (msg.state.index >= VHOST_VRING_MAX_N)
1024 {
1025 DBG_SOCK ("invalid vring index VHOST_USER_GET_VRING_BASE:"
1026 " %d >= %d", msg.state.index, VHOST_VRING_MAX_N);
1027 goto close_socket;
1028 }
1029
Yoann Desmouceaux4667c222016-02-24 22:51:00 +01001030 /* Spec says: Client must [...] stop ring upon receiving VHOST_USER_GET_VRING_BASE. */
Pierre Pfistere21c5282016-09-21 08:04:59 +01001031 vhost_user_vring_close (vui, msg.state.index);
Yoann Desmouceaux4667c222016-02-24 22:51:00 +01001032
1033 msg.state.num = vui->vrings[msg.state.index].last_avail_idx;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001034 msg.flags |= 4;
Damjan Marion00a9dca2016-08-17 17:05:46 +02001035 msg.size = sizeof (msg.state);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001036 break;
1037
1038 case VHOST_USER_NONE:
Damjan Marion00a9dca2016-08-17 17:05:46 +02001039 DBG_SOCK ("if %d msg VHOST_USER_NONE", vui->hw_if_index);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001040
1041 break;
1042
1043 case VHOST_USER_SET_LOG_BASE:
Damjan Marion00a9dca2016-08-17 17:05:46 +02001044 {
1045 DBG_SOCK ("if %d msg VHOST_USER_SET_LOG_BASE", vui->hw_if_index);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001046
Damjan Marion00a9dca2016-08-17 17:05:46 +02001047 if (msg.size != sizeof (msg.log))
1048 {
1049 DBG_SOCK
1050 ("invalid msg size for VHOST_USER_SET_LOG_BASE: %d instead of %d",
1051 msg.size, sizeof (msg.log));
1052 goto close_socket;
1053 }
1054
1055 if (!
1056 (vui->protocol_features & (1 << VHOST_USER_PROTOCOL_F_LOG_SHMFD)))
1057 {
1058 DBG_SOCK
1059 ("VHOST_USER_PROTOCOL_F_LOG_SHMFD not set but VHOST_USER_SET_LOG_BASE received");
1060 goto close_socket;
1061 }
1062
1063 fd = fds[0];
1064 /* align size to 2M page */
1065 long page_sz = get_huge_page_size (fd);
1066 ssize_t map_sz =
1067 (msg.log.size + msg.log.offset + page_sz) & ~(page_sz - 1);
1068
1069 vui->log_base_addr = mmap (0, map_sz, PROT_READ | PROT_WRITE,
1070 MAP_SHARED, fd, 0);
1071
1072 DBG_SOCK
1073 ("map log region addr 0 len 0x%lx off 0x%lx fd %d mapped 0x%lx",
1074 map_sz, msg.log.offset, fd, vui->log_base_addr);
1075
1076 if (vui->log_base_addr == MAP_FAILED)
1077 {
1078 clib_warning ("failed to map memory. errno is %d", errno);
1079 goto close_socket;
1080 }
1081
1082 vui->log_base_addr += msg.log.offset;
1083 vui->log_size = msg.log.size;
1084
1085 msg.flags |= 4;
1086 msg.size = sizeof (msg.u64);
1087
1088 break;
Yoann Desmouceaux4667c222016-02-24 22:51:00 +01001089 }
1090
Ed Warnickecb9cada2015-12-08 15:45:58 -07001091 case VHOST_USER_SET_LOG_FD:
Damjan Marion00a9dca2016-08-17 17:05:46 +02001092 DBG_SOCK ("if %d msg VHOST_USER_SET_LOG_FD", vui->hw_if_index);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001093
1094 break;
1095
Yoann Desmouceaux4667c222016-02-24 22:51:00 +01001096 case VHOST_USER_GET_PROTOCOL_FEATURES:
Damjan Marion00a9dca2016-08-17 17:05:46 +02001097 DBG_SOCK ("if %d msg VHOST_USER_GET_PROTOCOL_FEATURES",
1098 vui->hw_if_index);
Yoann Desmouceaux4667c222016-02-24 22:51:00 +01001099
1100 msg.flags |= 4;
Pierre Pfistere21c5282016-09-21 08:04:59 +01001101 msg.u64 = (1 << VHOST_USER_PROTOCOL_F_LOG_SHMFD) |
1102 (1 << VHOST_USER_PROTOCOL_F_MQ);
Damjan Marion00a9dca2016-08-17 17:05:46 +02001103 msg.size = sizeof (msg.u64);
Yoann Desmouceaux4667c222016-02-24 22:51:00 +01001104 break;
1105
1106 case VHOST_USER_SET_PROTOCOL_FEATURES:
Damjan Marion00a9dca2016-08-17 17:05:46 +02001107 DBG_SOCK ("if %d msg VHOST_USER_SET_PROTOCOL_FEATURES features 0x%lx",
1108 vui->hw_if_index, msg.u64);
Yoann Desmouceaux4667c222016-02-24 22:51:00 +01001109
1110 vui->protocol_features = msg.u64;
1111
1112 break;
1113
Pierre Pfistere21c5282016-09-21 08:04:59 +01001114 case VHOST_USER_GET_QUEUE_NUM:
1115 DBG_SOCK ("if %d msg VHOST_USER_GET_QUEUE_NUM", vui->hw_if_index);
1116 msg.flags |= 4;
1117 msg.u64 = VHOST_VRING_MAX_N;
1118 msg.size = sizeof (msg.u64);
1119 break;
1120
Yoann Desmouceaux4667c222016-02-24 22:51:00 +01001121 case VHOST_USER_SET_VRING_ENABLE:
Pierre Pfistere21c5282016-09-21 08:04:59 +01001122 DBG_SOCK ("if %d VHOST_USER_SET_VRING_ENABLE: %s queue %d",
1123 vui->hw_if_index, msg.state.num ? "enable" : "disable",
1124 msg.state.index);
1125 if (msg.state.index >= VHOST_VRING_MAX_N)
1126 {
1127 DBG_SOCK ("invalid vring index VHOST_USER_SET_VRING_ENABLE:"
1128 " %d >= %d", msg.state.index, VHOST_VRING_MAX_N);
1129 goto close_socket;
1130 }
1131
Yoann Desmouceaux4667c222016-02-24 22:51:00 +01001132 vui->vrings[msg.state.index].enabled = msg.state.num;
1133 break;
1134
Ed Warnickecb9cada2015-12-08 15:45:58 -07001135 default:
Damjan Marion00a9dca2016-08-17 17:05:46 +02001136 DBG_SOCK ("unknown vhost-user message %d received. closing socket",
1137 msg.request);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001138 goto close_socket;
Damjan Marion00a9dca2016-08-17 17:05:46 +02001139 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07001140
Ed Warnickecb9cada2015-12-08 15:45:58 -07001141 /* if we need to reply */
1142 if (msg.flags & 4)
Damjan Marion00a9dca2016-08-17 17:05:46 +02001143 {
1144 n =
1145 send (uf->file_descriptor, &msg, VHOST_USER_MSG_HDR_SZ + msg.size, 0);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001146 if (n != (msg.size + VHOST_USER_MSG_HDR_SZ))
Pierre Pfistere21c5282016-09-21 08:04:59 +01001147 {
1148 DBG_SOCK ("could not send message response");
1149 goto close_socket;
1150 }
Damjan Marion00a9dca2016-08-17 17:05:46 +02001151 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07001152
Pierre Pfistere21c5282016-09-21 08:04:59 +01001153 vhost_user_update_iface_state (vui);
1154 vlib_worker_thread_barrier_release (vlib_get_main ());
Ed Warnickecb9cada2015-12-08 15:45:58 -07001155 return 0;
1156
1157close_socket:
Damjan Marion00a9dca2016-08-17 17:05:46 +02001158 vhost_user_if_disconnect (vui);
Pierre Pfistere21c5282016-09-21 08:04:59 +01001159 vhost_user_update_iface_state (vui);
1160 vlib_worker_thread_barrier_release (vlib_get_main ());
Ed Warnickecb9cada2015-12-08 15:45:58 -07001161 return 0;
1162}
1163
Damjan Marion00a9dca2016-08-17 17:05:46 +02001164static clib_error_t *
1165vhost_user_socket_error (unix_file_t * uf)
Ed Warnickecb9cada2015-12-08 15:45:58 -07001166{
Pierre Pfistere21c5282016-09-21 08:04:59 +01001167 vlib_main_t *vm = vlib_get_main ();
Damjan Marion00a9dca2016-08-17 17:05:46 +02001168 vhost_user_main_t *vum = &vhost_user_main;
Pierre Pfisterdbb3c252016-11-22 10:33:34 +00001169 vhost_user_intf_t *vui =
1170 pool_elt_at_index (vum->vhost_user_interfaces, uf->private_data);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001171
Pierre Pfistere21c5282016-09-21 08:04:59 +01001172 DBG_SOCK ("socket error on if %d", vui->sw_if_index);
1173 vlib_worker_thread_barrier_sync (vm);
Damjan Marion00a9dca2016-08-17 17:05:46 +02001174 vhost_user_if_disconnect (vui);
Pierre Pfistere21c5282016-09-21 08:04:59 +01001175 vhost_user_rx_thread_placement ();
1176 vlib_worker_thread_barrier_release (vm);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001177 return 0;
1178}
1179
Damjan Marion00a9dca2016-08-17 17:05:46 +02001180static clib_error_t *
1181vhost_user_socksvr_accept_ready (unix_file_t * uf)
Ed Warnickecb9cada2015-12-08 15:45:58 -07001182{
1183 int client_fd, client_len;
1184 struct sockaddr_un client;
Damjan Marion00a9dca2016-08-17 17:05:46 +02001185 unix_file_t template = { 0 };
1186 vhost_user_main_t *vum = &vhost_user_main;
1187 vhost_user_intf_t *vui;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001188
Pierre Pfisterdbb3c252016-11-22 10:33:34 +00001189 vui = pool_elt_at_index (vum->vhost_user_interfaces, uf->private_data);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001190
Damjan Marion00a9dca2016-08-17 17:05:46 +02001191 client_len = sizeof (client);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001192 client_fd = accept (uf->file_descriptor,
Damjan Marion00a9dca2016-08-17 17:05:46 +02001193 (struct sockaddr *) &client,
1194 (socklen_t *) & client_len);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001195
1196 if (client_fd < 0)
Damjan Marion00a9dca2016-08-17 17:05:46 +02001197 return clib_error_return_unix (0, "accept");
Ed Warnickecb9cada2015-12-08 15:45:58 -07001198
Pierre Pfistere21c5282016-09-21 08:04:59 +01001199 DBG_SOCK ("New client socket for vhost interface %d", vui->sw_if_index);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001200 template.read_function = vhost_user_socket_read;
1201 template.error_function = vhost_user_socket_error;
1202 template.file_descriptor = client_fd;
Pierre Pfisterdbb3c252016-11-22 10:33:34 +00001203 template.private_data = vui - vhost_user_main.vhost_user_interfaces;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001204 vui->unix_file_index = unix_file_add (&unix_main, &template);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001205 return 0;
1206}
1207
1208static clib_error_t *
1209vhost_user_init (vlib_main_t * vm)
1210{
Damjan Marion00a9dca2016-08-17 17:05:46 +02001211 clib_error_t *error;
1212 vhost_user_main_t *vum = &vhost_user_main;
1213 vlib_thread_main_t *tm = vlib_get_thread_main ();
Damjan Marion0dafaa72016-09-20 23:21:02 +02001214 vlib_thread_registration_t *tr;
1215 uword *p;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001216
1217 error = vlib_call_init_function (vm, ip4_init);
1218 if (error)
1219 return error;
1220
Ed Warnickecb9cada2015-12-08 15:45:58 -07001221 vum->coalesce_frames = 32;
1222 vum->coalesce_time = 1e-3;
1223
Pierre Pfistere21c5282016-09-21 08:04:59 +01001224 vec_validate (vum->cpus, tm->n_vlib_mains - 1);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001225
Pierre Pfisterd3eb90e2016-11-29 15:36:14 +00001226 vhost_cpu_t *cpu;
1227 vec_foreach (cpu, vum->cpus)
1228 {
1229 /* This is actually not necessary as validate already zeroes it
1230 * Just keeping the loop here for later because I am lazy. */
1231 cpu->rx_buffers_len = 0;
1232 }
1233
Damjan Marion0dafaa72016-09-20 23:21:02 +02001234 /* find out which cpus will be used for input */
1235 vum->input_cpu_first_index = 0;
1236 vum->input_cpu_count = 1;
1237 p = hash_get_mem (tm->thread_registrations_by_name, "workers");
1238 tr = p ? (vlib_thread_registration_t *) p[0] : 0;
1239
1240 if (tr && tr->count > 0)
1241 {
1242 vum->input_cpu_first_index = tr->first_index;
1243 vum->input_cpu_count = tr->count;
1244 }
1245
Pierre Pfisterdbb3c252016-11-22 10:33:34 +00001246 vum->random = random_default_seed ();
1247
Ed Warnickecb9cada2015-12-08 15:45:58 -07001248 return 0;
1249}
1250
1251VLIB_INIT_FUNCTION (vhost_user_init);
1252
1253static clib_error_t *
1254vhost_user_exit (vlib_main_t * vm)
1255{
1256 /* TODO cleanup */
1257 return 0;
1258}
1259
1260VLIB_MAIN_LOOP_EXIT_FUNCTION (vhost_user_exit);
1261
Damjan Marion00a9dca2016-08-17 17:05:46 +02001262static u8 *
Pierre Pfister116ea4b2016-11-08 15:49:28 +00001263format_vhost_trace (u8 * s, va_list * va)
Ed Warnickecb9cada2015-12-08 15:45:58 -07001264{
1265 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*va, vlib_main_t *);
1266 CLIB_UNUSED (vlib_node_t * node) = va_arg (*va, vlib_node_t *);
Damjan Marion00a9dca2016-08-17 17:05:46 +02001267 CLIB_UNUSED (vnet_main_t * vnm) = vnet_get_main ();
1268 vhost_user_main_t *vum = &vhost_user_main;
Pierre Pfister116ea4b2016-11-08 15:49:28 +00001269 vhost_trace_t *t = va_arg (*va, vhost_trace_t *);
Pierre Pfisterdbb3c252016-11-22 10:33:34 +00001270 vhost_user_intf_t *vui = pool_elt_at_index (vum->vhost_user_interfaces,
1271 t->device_index);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001272
Damjan Marion00a9dca2016-08-17 17:05:46 +02001273 vnet_sw_interface_t *sw = vnet_get_sw_interface (vnm, vui->sw_if_index);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001274
Ed Warnickecb9cada2015-12-08 15:45:58 -07001275 uword indent = format_get_indent (s);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001276
Pierre Pfister116ea4b2016-11-08 15:49:28 +00001277 s = format (s, "%U %U queue %d\n", format_white_space, indent,
Pierre Pfistere21c5282016-09-21 08:04:59 +01001278 format_vnet_sw_interface_name, vnm, sw, t->qid);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001279
Pierre Pfister116ea4b2016-11-08 15:49:28 +00001280 s = format (s, "%U virtio flags:\n", format_white_space, indent);
1281#define _(n,i,st) \
1282 if (t->virtio_ring_flags & (1 << VIRTIO_TRACE_F_##n)) \
1283 s = format (s, "%U %s %s\n", format_white_space, indent, #n, st);
1284 foreach_virtio_trace_flags
1285#undef _
1286 s = format (s, "%U virtio_net_hdr first_desc_len %u\n",
1287 format_white_space, indent, t->first_desc_len);
1288
1289 s = format (s, "%U flags 0x%02x gso_type %u\n",
Damjan Marion00a9dca2016-08-17 17:05:46 +02001290 format_white_space, indent,
Pierre Pfister116ea4b2016-11-08 15:49:28 +00001291 t->hdr.hdr.flags, t->hdr.hdr.gso_type);
1292
1293 if (vui->virtio_net_hdr_sz == 12)
1294 s = format (s, "%U num_buff %u",
1295 format_white_space, indent, t->hdr.num_buffers);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001296
1297 return s;
1298}
1299
Damjan Marion00a9dca2016-08-17 17:05:46 +02001300void
Pierre Pfister116ea4b2016-11-08 15:49:28 +00001301vhost_user_rx_trace (vhost_trace_t * t,
1302 vhost_user_intf_t * vui, u16 qid,
1303 vlib_buffer_t * b, vhost_user_vring_t * txvq)
Ed Warnickecb9cada2015-12-08 15:45:58 -07001304{
Damjan Marion00a9dca2016-08-17 17:05:46 +02001305 vhost_user_main_t *vum = &vhost_user_main;
Pierre Pfister116ea4b2016-11-08 15:49:28 +00001306 u32 qsz_mask = txvq->qsz - 1;
1307 u32 last_avail_idx = txvq->last_avail_idx;
1308 u32 desc_current = txvq->avail->ring[last_avail_idx & qsz_mask];
1309 vring_desc_t *hdr_desc = 0;
1310 virtio_net_hdr_mrg_rxbuf_t *hdr;
1311 u32 hint = 0;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001312
Pierre Pfister116ea4b2016-11-08 15:49:28 +00001313 memset (t, 0, sizeof (*t));
1314 t->device_index = vui - vum->vhost_user_interfaces;
1315 t->qid = qid;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001316
Pierre Pfister116ea4b2016-11-08 15:49:28 +00001317 hdr_desc = &txvq->desc[desc_current];
1318 if (txvq->desc[desc_current].flags & VIRTQ_DESC_F_INDIRECT)
Damjan Marion00a9dca2016-08-17 17:05:46 +02001319 {
Pierre Pfister116ea4b2016-11-08 15:49:28 +00001320 t->virtio_ring_flags |= 1 << VIRTIO_TRACE_F_INDIRECT;
Pierre Pfisterd3eb90e2016-11-29 15:36:14 +00001321 /* Header is the first here */
Pierre Pfister116ea4b2016-11-08 15:49:28 +00001322 hdr_desc = map_guest_mem (vui, txvq->desc[desc_current].addr, &hint);
1323 }
1324 if (txvq->desc[desc_current].flags & VIRTQ_DESC_F_NEXT)
1325 {
1326 t->virtio_ring_flags |= 1 << VIRTIO_TRACE_F_SIMPLE_CHAINED;
1327 }
1328 if (!(txvq->desc[desc_current].flags & VIRTQ_DESC_F_NEXT) &&
1329 !(txvq->desc[desc_current].flags & VIRTQ_DESC_F_INDIRECT))
1330 {
1331 t->virtio_ring_flags |= 1 << VIRTIO_TRACE_F_SINGLE_DESC;
1332 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07001333
Pierre Pfister116ea4b2016-11-08 15:49:28 +00001334 t->first_desc_len = hdr_desc ? hdr_desc->len : 0;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001335
Pierre Pfister116ea4b2016-11-08 15:49:28 +00001336 if (!hdr_desc || !(hdr = map_guest_mem (vui, hdr_desc->addr, &hint)))
1337 {
1338 t->virtio_ring_flags |= 1 << VIRTIO_TRACE_F_MAP_ERROR;
1339 }
1340 else
1341 {
1342 u32 len = vui->virtio_net_hdr_sz;
1343 memcpy (&t->hdr, hdr, len > hdr_desc->len ? hdr_desc->len : len);
Damjan Marion00a9dca2016-08-17 17:05:46 +02001344 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07001345}
1346
Damjan Marion00a9dca2016-08-17 17:05:46 +02001347static inline void
1348vhost_user_send_call (vlib_main_t * vm, vhost_user_vring_t * vq)
Ed Warnickecb9cada2015-12-08 15:45:58 -07001349{
Damjan Marion00a9dca2016-08-17 17:05:46 +02001350 vhost_user_main_t *vum = &vhost_user_main;
1351 u64 x = 1;
Pierre Pfisterdbb3c252016-11-22 10:33:34 +00001352 int fd = UNIX_GET_FD (vq->callfd_idx);
Damjan Marion00a9dca2016-08-17 17:05:46 +02001353 int rv __attribute__ ((unused));
Pierre Pfisterdbb3c252016-11-22 10:33:34 +00001354 /* TODO: pay attention to rv */
1355 rv = write (fd, &x, sizeof (x));
Damjan Marion00a9dca2016-08-17 17:05:46 +02001356 vq->n_since_last_int = 0;
1357 vq->int_deadline = vlib_time_now (vm) + vum->coalesce_time;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001358}
1359
Pierre Pfisterd3eb90e2016-11-29 15:36:14 +00001360static_always_inline u32
1361vhost_user_input_copy (vhost_user_intf_t * vui, vhost_copy_t * cpy,
1362 u16 copy_len, u32 * map_hint)
1363{
1364 void *src0, *src1, *src2, *src3;
1365 if (PREDICT_TRUE (copy_len >= 4))
1366 {
1367 if (PREDICT_FALSE (!(src2 = map_guest_mem (vui, cpy[0].src, map_hint))))
1368 return 1;
1369 if (PREDICT_FALSE (!(src3 = map_guest_mem (vui, cpy[1].src, map_hint))))
1370 return 1;
1371
1372 while (PREDICT_TRUE (copy_len >= 4))
1373 {
1374 src0 = src2;
1375 src1 = src3;
1376
1377 if (PREDICT_FALSE
1378 (!(src2 = map_guest_mem (vui, cpy[2].src, map_hint))))
1379 return 1;
1380 if (PREDICT_FALSE
1381 (!(src3 = map_guest_mem (vui, cpy[3].src, map_hint))))
1382 return 1;
1383
1384 CLIB_PREFETCH (src2, 64, LOAD);
1385 CLIB_PREFETCH (src3, 64, LOAD);
1386
1387 clib_memcpy ((void *) cpy[0].dst, src0, cpy[0].len);
1388 clib_memcpy ((void *) cpy[1].dst, src1, cpy[1].len);
1389 copy_len -= 2;
1390 cpy += 2;
1391 }
1392 }
1393 while (copy_len)
1394 {
1395 if (PREDICT_FALSE (!(src0 = map_guest_mem (vui, cpy->src, map_hint))))
1396 return 1;
1397 clib_memcpy ((void *) cpy->dst, src0, cpy->len);
1398 copy_len -= 1;
1399 cpy += 1;
1400 }
1401 return 0;
1402}
1403
1404/**
1405 * Try to discard packets from the tx ring (VPP RX path).
1406 * Returns the number of discarded packets.
1407 */
1408u32
1409vhost_user_rx_discard_packet (vlib_main_t * vm,
1410 vhost_user_intf_t * vui,
1411 vhost_user_vring_t * txvq, u32 discard_max)
1412{
1413 /*
1414 * On the RX side, each packet corresponds to one descriptor
1415 * (it is the same whether it is a shallow descriptor, chained, or indirect).
1416 * Therefore, discarding a packet is like discarding a descriptor.
1417 */
1418 u32 discarded_packets = 0;
1419 u32 avail_idx = txvq->avail->idx;
1420 u16 qsz_mask = txvq->qsz - 1;
1421 while (discarded_packets != discard_max)
1422 {
1423 if (avail_idx == txvq->last_avail_idx)
1424 goto out;
1425
1426 u16 desc_chain_head =
1427 txvq->avail->ring[txvq->last_avail_idx & qsz_mask];
1428 txvq->last_avail_idx++;
1429 txvq->used->ring[txvq->last_used_idx & qsz_mask].id = desc_chain_head;
1430 txvq->used->ring[txvq->last_used_idx & qsz_mask].len = 0;
1431 vhost_user_log_dirty_ring (vui, txvq,
1432 ring[txvq->last_used_idx & qsz_mask]);
1433 txvq->last_used_idx++;
1434 discarded_packets++;
1435 }
1436
1437out:
1438 CLIB_MEMORY_BARRIER ();
1439 txvq->used->idx = txvq->last_used_idx;
1440 vhost_user_log_dirty_ring (vui, txvq, idx);
1441 return discarded_packets;
1442}
1443
1444/*
1445 * In case of overflow, we need to rewind the array of allocated buffers.
1446 */
1447static void
1448vhost_user_input_rewind_buffers (vlib_main_t * vm,
1449 vhost_cpu_t * cpu, vlib_buffer_t * b_head)
1450{
1451 u32 bi_current = cpu->rx_buffers[cpu->rx_buffers_len];
1452 vlib_buffer_t *b_current = vlib_get_buffer (vm, bi_current);
1453 b_current->current_length = 0;
1454 b_current->flags = 0;
1455 while (b_current != b_head)
1456 {
1457 cpu->rx_buffers_len++;
1458 bi_current = cpu->rx_buffers[cpu->rx_buffers_len];
1459 b_current = vlib_get_buffer (vm, bi_current);
1460 b_current->current_length = 0;
1461 b_current->flags = 0;
1462 }
1463}
Yoann Desmouceaux4667c222016-02-24 22:51:00 +01001464
Damjan Marion00a9dca2016-08-17 17:05:46 +02001465static u32
1466vhost_user_if_input (vlib_main_t * vm,
1467 vhost_user_main_t * vum,
Pierre Pfistere21c5282016-09-21 08:04:59 +01001468 vhost_user_intf_t * vui,
1469 u16 qid, vlib_node_runtime_t * node)
Ed Warnickecb9cada2015-12-08 15:45:58 -07001470{
Pierre Pfistere21c5282016-09-21 08:04:59 +01001471 vhost_user_vring_t *txvq = &vui->vrings[VHOST_VRING_IDX_TX (qid)];
Pierre Pfisterd3eb90e2016-11-29 15:36:14 +00001472 u16 n_rx_packets = 0;
1473 u32 n_rx_bytes = 0;
1474 u16 n_left;
Damjan Marion00a9dca2016-08-17 17:05:46 +02001475 u32 n_left_to_next, *to_next;
Pierre Pfisterd3eb90e2016-11-29 15:36:14 +00001476 u32 next_index = VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT;
1477 u32 n_trace = vlib_get_trace_count (vm, node);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001478 u16 qsz_mask;
Pierre Pfisterd3eb90e2016-11-29 15:36:14 +00001479 u32 map_hint = 0;
1480 u16 cpu_index = os_get_cpu_number ();
1481 u16 copy_len = 0;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001482
Pierre Pfisterd3eb90e2016-11-29 15:36:14 +00001483 {
1484 /* do we have pending interrupts ? */
1485 vhost_user_vring_t *rxvq = &vui->vrings[VHOST_VRING_IDX_RX (qid)];
1486 f64 now = vlib_time_now (vm);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001487
Pierre Pfisterd3eb90e2016-11-29 15:36:14 +00001488 if ((txvq->n_since_last_int) && (txvq->int_deadline < now))
1489 vhost_user_send_call (vm, txvq);
1490
1491 if ((rxvq->n_since_last_int) && (rxvq->int_deadline < now))
1492 vhost_user_send_call (vm, rxvq);
1493 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07001494
Damjan Marion00a9dca2016-08-17 17:05:46 +02001495 if (PREDICT_FALSE (txvq->avail->flags & 0xFFFE))
Ed Warnickecb9cada2015-12-08 15:45:58 -07001496 return 0;
1497
Pierre Pfisterba1d0462016-07-27 16:38:20 +01001498 n_left = (u16) (txvq->avail->idx - txvq->last_avail_idx);
1499
Ed Warnickecb9cada2015-12-08 15:45:58 -07001500 /* nothing to do */
Pierre Pfisterba1d0462016-07-27 16:38:20 +01001501 if (PREDICT_FALSE (n_left == 0))
Ed Warnickecb9cada2015-12-08 15:45:58 -07001502 return 0;
1503
Pierre Pfistere21c5282016-09-21 08:04:59 +01001504 if (PREDICT_FALSE (!vui->admin_up || !(txvq->enabled)))
Pierre Pfisterba1d0462016-07-27 16:38:20 +01001505 {
Pierre Pfistere21c5282016-09-21 08:04:59 +01001506 /*
1507 * Discard input packet if interface is admin down or vring is not
1508 * enabled.
1509 * "For example, for a networking device, in the disabled state
1510 * client must not supply any new RX packets, but must process
1511 * and discard any TX packets."
1512 */
Pierre Pfisterd3eb90e2016-11-29 15:36:14 +00001513 vhost_user_rx_discard_packet (vm, vui, txvq,
1514 VHOST_USER_DOWN_DISCARD_COUNT);
Damjan Marion00a9dca2016-08-17 17:05:46 +02001515 return 0;
1516 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07001517
Pierre Pfistere21c5282016-09-21 08:04:59 +01001518 if (PREDICT_FALSE (n_left == txvq->qsz))
1519 {
Pierre Pfisterd3eb90e2016-11-29 15:36:14 +00001520 /*
1521 * Informational error logging when VPP is not
1522 * receiving packets fast enough.
1523 */
Pierre Pfistere21c5282016-09-21 08:04:59 +01001524 vlib_error_count (vm, node->node_index,
1525 VHOST_USER_INPUT_FUNC_ERROR_FULL_RX_QUEUE, 1);
1526 }
1527
Ed Warnickecb9cada2015-12-08 15:45:58 -07001528 qsz_mask = txvq->qsz - 1;
1529
Pierre Pfister328e99b2016-02-12 13:18:42 +00001530 if (n_left > VLIB_FRAME_SIZE)
1531 n_left = VLIB_FRAME_SIZE;
1532
Pierre Pfisterd3eb90e2016-11-29 15:36:14 +00001533 /*
1534 * For small packets (<2kB), we will not need more than one vlib buffer
1535 * per packet. In case packets are bigger, we will just yeld at some point
1536 * in the loop and come back later. This is not an issue as for big packet,
1537 * processing cost really comes from the memory copy.
Pierre Pfister328e99b2016-02-12 13:18:42 +00001538 */
Pierre Pfisterd3eb90e2016-11-29 15:36:14 +00001539 if (PREDICT_FALSE (vum->cpus[cpu_index].rx_buffers_len < n_left + 1))
Damjan Marion00a9dca2016-08-17 17:05:46 +02001540 {
Pierre Pfisterd3eb90e2016-11-29 15:36:14 +00001541 u32 curr_len = vum->cpus[cpu_index].rx_buffers_len;
1542 vum->cpus[cpu_index].rx_buffers_len +=
Damjan Marion00a9dca2016-08-17 17:05:46 +02001543 vlib_buffer_alloc_from_free_list (vm,
Pierre Pfisterd3eb90e2016-11-29 15:36:14 +00001544 vum->cpus[cpu_index].rx_buffers +
Pierre Pfisterba1d0462016-07-27 16:38:20 +01001545 curr_len,
Pierre Pfisterd3eb90e2016-11-29 15:36:14 +00001546 VHOST_USER_RX_BUFFERS_N - curr_len,
Damjan Marion00a9dca2016-08-17 17:05:46 +02001547 VLIB_BUFFER_DEFAULT_FREE_LIST_INDEX);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001548
Pierre Pfisterd3eb90e2016-11-29 15:36:14 +00001549 if (PREDICT_FALSE
1550 (vum->cpus[cpu_index].rx_buffers_len <
1551 VHOST_USER_RX_BUFFER_STARVATION))
Damjan Marion00a9dca2016-08-17 17:05:46 +02001552 {
Pierre Pfisterd3eb90e2016-11-29 15:36:14 +00001553 /* In case of buffer starvation, discard some packets from the queue
1554 * and log the event.
1555 * We keep doing best effort for the remaining packets. */
1556 u32 flush = (n_left + 1 > vum->cpus[cpu_index].rx_buffers_len) ?
1557 n_left + 1 - vum->cpus[cpu_index].rx_buffers_len : 1;
1558 flush = vhost_user_rx_discard_packet (vm, vui, txvq, flush);
1559
1560 n_left -= flush;
1561 vlib_increment_simple_counter (vnet_main.
1562 interface_main.sw_if_counters +
1563 VNET_INTERFACE_COUNTER_DROP,
1564 os_get_cpu_number (),
1565 vui->sw_if_index, flush);
1566
1567 vlib_error_count (vm, vhost_user_input_node.index,
1568 VHOST_USER_INPUT_FUNC_ERROR_NO_BUFFER, flush);
Damjan Marion00a9dca2016-08-17 17:05:46 +02001569 }
1570 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07001571
Damjan Marion00a9dca2016-08-17 17:05:46 +02001572 while (n_left > 0)
1573 {
1574 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001575
Damjan Marion00a9dca2016-08-17 17:05:46 +02001576 while (n_left > 0 && n_left_to_next > 0)
1577 {
1578 vlib_buffer_t *b_head, *b_current;
Pierre Pfisterd3eb90e2016-11-29 15:36:14 +00001579 u32 bi_current;
1580 u16 desc_current;
1581 u32 desc_data_offset;
1582 vring_desc_t *desc_table = txvq->desc;
Damjan Marion00a9dca2016-08-17 17:05:46 +02001583
Pierre Pfisterd3eb90e2016-11-29 15:36:14 +00001584 if (PREDICT_FALSE (vum->cpus[cpu_index].rx_buffers_len <= 1))
Pierre Pfisterba1d0462016-07-27 16:38:20 +01001585 {
Pierre Pfisterd3eb90e2016-11-29 15:36:14 +00001586 /* Not enough rx_buffers
1587 * Note: We yeld on 1 so we don't need to do an additional
1588 * check for the next buffer prefetch.
1589 */
1590 n_left = 0;
1591 break;
Pierre Pfisterba1d0462016-07-27 16:38:20 +01001592 }
1593
Pierre Pfisterd3eb90e2016-11-29 15:36:14 +00001594 desc_current = txvq->avail->ring[txvq->last_avail_idx & qsz_mask];
1595 vum->cpus[cpu_index].rx_buffers_len--;
1596 bi_current = (vum->cpus[cpu_index].rx_buffers)
1597 [vum->cpus[cpu_index].rx_buffers_len];
1598 b_head = b_current = vlib_get_buffer (vm, bi_current);
1599 to_next[0] = bi_current; //We do that now so we can forget about bi_current
1600 to_next++;
1601 n_left_to_next--;
1602
1603 vlib_prefetch_buffer_with_index (vm,
1604 (vum->cpus[cpu_index].rx_buffers)
1605 [vum->cpus[cpu_index].
1606 rx_buffers_len - 1], LOAD);
1607
1608 /* Just preset the used descriptor id and length for later */
1609 txvq->used->ring[txvq->last_used_idx & qsz_mask].id = desc_current;
1610 txvq->used->ring[txvq->last_used_idx & qsz_mask].len = 0;
1611 vhost_user_log_dirty_ring (vui, txvq,
1612 ring[txvq->last_used_idx & qsz_mask]);
1613
1614 /* The buffer should already be initialized */
1615 b_head->total_length_not_including_first_buffer = 0;
1616 b_head->flags |= VLIB_BUFFER_TOTAL_LENGTH_VALID;
1617
Pierre Pfister116ea4b2016-11-08 15:49:28 +00001618 if (PREDICT_FALSE (n_trace))
1619 {
Pierre Pfisterd3eb90e2016-11-29 15:36:14 +00001620 //TODO: next_index is not exactly known at that point
Pierre Pfister116ea4b2016-11-08 15:49:28 +00001621 vlib_trace_buffer (vm, node, next_index, b_head,
1622 /* follow_chain */ 0);
1623 vhost_trace_t *t0 =
1624 vlib_add_trace (vm, node, b_head, sizeof (t0[0]));
1625 vhost_user_rx_trace (t0, vui, qid, b_head, txvq);
1626 n_trace--;
1627 vlib_set_trace_count (vm, node, n_trace);
1628 }
Damjan Marion00a9dca2016-08-17 17:05:46 +02001629
Pierre Pfisterd3eb90e2016-11-29 15:36:14 +00001630 /* This depends on the setup but is very consistent
1631 * So I think the CPU branch predictor will make a pretty good job
1632 * at optimizing the decision. */
1633 if (txvq->desc[desc_current].flags & VIRTQ_DESC_F_INDIRECT)
1634 {
1635 desc_table = map_guest_mem (vui, txvq->desc[desc_current].addr,
1636 &map_hint);
1637 desc_current = 0;
1638 if (PREDICT_FALSE (desc_table == 0))
1639 {
1640 //FIXME: Handle error by shutdown the queue
1641 goto out;
1642 }
1643 }
1644
Damjan Marion00a9dca2016-08-17 17:05:46 +02001645 if (PREDICT_TRUE (vui->is_any_layout) ||
Pierre Pfisterd3eb90e2016-11-29 15:36:14 +00001646 (!(desc_table[desc_current].flags & VIRTQ_DESC_F_NEXT)))
Damjan Marion00a9dca2016-08-17 17:05:46 +02001647 {
1648 /* ANYLAYOUT or single buffer */
Pierre Pfisterd3eb90e2016-11-29 15:36:14 +00001649 desc_data_offset = vui->virtio_net_hdr_sz;
Damjan Marion00a9dca2016-08-17 17:05:46 +02001650 }
1651 else
1652 {
1653 /* CSR case without ANYLAYOUT, skip 1st buffer */
Pierre Pfisterd3eb90e2016-11-29 15:36:14 +00001654 desc_data_offset = desc_table[desc_current].len;
Pierre Pfisterba1d0462016-07-27 16:38:20 +01001655 }
1656
Damjan Marion00a9dca2016-08-17 17:05:46 +02001657 while (1)
1658 {
Pierre Pfisterd3eb90e2016-11-29 15:36:14 +00001659 /* Get more input if necessary. Or end of packet. */
1660 if (desc_data_offset == desc_table[desc_current].len)
Damjan Marion00a9dca2016-08-17 17:05:46 +02001661 {
Pierre Pfisterd3eb90e2016-11-29 15:36:14 +00001662 if (PREDICT_FALSE (desc_table[desc_current].flags &
1663 VIRTQ_DESC_F_NEXT))
Damjan Marion00a9dca2016-08-17 17:05:46 +02001664 {
Pierre Pfisterd3eb90e2016-11-29 15:36:14 +00001665 desc_current = desc_table[desc_current].next;
1666 desc_data_offset = 0;
1667 }
1668 else
1669 {
1670 goto out;
Damjan Marion00a9dca2016-08-17 17:05:46 +02001671 }
1672 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07001673
Pierre Pfisterd3eb90e2016-11-29 15:36:14 +00001674 /* Get more output if necessary. Or end of packet. */
1675 if (PREDICT_FALSE
1676 (b_current->current_length == VLIB_BUFFER_DATA_SIZE))
1677 {
1678 if (PREDICT_FALSE
1679 (vum->cpus[cpu_index].rx_buffers_len == 0))
1680 {
Steven62411e72017-02-03 09:30:37 -08001681 /* Cancel speculation */
1682 to_next--;
1683 n_left_to_next++;
1684
Pierre Pfisterd3eb90e2016-11-29 15:36:14 +00001685 /*
1686 * Checking if there are some left buffers.
1687 * If not, just rewind the used buffers and stop.
1688 * Note: Scheduled copies are not cancelled. This is
1689 * not an issue as they would still be valid. Useless,
1690 * but valid.
1691 */
1692 vhost_user_input_rewind_buffers (vm,
1693 &vum->cpus[cpu_index],
1694 b_head);
1695 n_left = 0;
1696 goto stop;
1697 }
1698
1699 /* Get next output */
1700 vum->cpus[cpu_index].rx_buffers_len--;
1701 u32 bi_next =
1702 (vum->cpus[cpu_index].rx_buffers)[vum->cpus
1703 [cpu_index].rx_buffers_len];
1704 b_current->next_buffer = bi_next;
1705 b_current->flags |= VLIB_BUFFER_NEXT_PRESENT;
1706 bi_current = bi_next;
1707 b_current = vlib_get_buffer (vm, bi_current);
1708 }
1709
1710 /* Prepare a copy order executed later for the data */
1711 vhost_copy_t *cpy = &vum->cpus[cpu_index].copy[copy_len];
1712 copy_len++;
1713 u32 desc_data_l =
1714 desc_table[desc_current].len - desc_data_offset;
1715 cpy->len = VLIB_BUFFER_DATA_SIZE - b_current->current_length;
1716 cpy->len = (cpy->len > desc_data_l) ? desc_data_l : cpy->len;
1717 cpy->dst = (uword) vlib_buffer_get_current (b_current);
1718 cpy->src = desc_table[desc_current].addr + desc_data_offset;
1719
1720 desc_data_offset += cpy->len;
1721
1722 b_current->current_length += cpy->len;
1723 b_head->total_length_not_including_first_buffer += cpy->len;
Damjan Marion00a9dca2016-08-17 17:05:46 +02001724 }
Pierre Pfisterd3eb90e2016-11-29 15:36:14 +00001725
Pierre Pfisterba1d0462016-07-27 16:38:20 +01001726 out:
Pierre Pfisterd3eb90e2016-11-29 15:36:14 +00001727 CLIB_PREFETCH (&n_left, sizeof (n_left), LOAD);
1728
1729 n_rx_bytes += b_head->total_length_not_including_first_buffer;
1730 n_rx_packets++;
1731
1732 b_head->total_length_not_including_first_buffer -=
1733 b_head->current_length;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001734
Damjan Marion00a9dca2016-08-17 17:05:46 +02001735 /* consume the descriptor and return it as used */
1736 txvq->last_avail_idx++;
Damjan Marion00a9dca2016-08-17 17:05:46 +02001737 txvq->last_used_idx++;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001738
Damjan Marion00a9dca2016-08-17 17:05:46 +02001739 VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b_head);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001740
Damjan Marion00a9dca2016-08-17 17:05:46 +02001741 vnet_buffer (b_head)->sw_if_index[VLIB_RX] = vui->sw_if_index;
1742 vnet_buffer (b_head)->sw_if_index[VLIB_TX] = (u32) ~ 0;
Pierre Pfisterd3eb90e2016-11-29 15:36:14 +00001743 b_head->error = 0;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001744
Pierre Pfisterd3eb90e2016-11-29 15:36:14 +00001745 {
1746 u32 next0 = VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT;
Pierre Pfister328e99b2016-02-12 13:18:42 +00001747
Pierre Pfisterd3eb90e2016-11-29 15:36:14 +00001748 /* redirect if feature path enabled */
1749 vnet_feature_start_device_input_x1 (vui->sw_if_index, &next0,
1750 b_head, 0);
Damjan Marion22311502016-10-28 20:30:15 +02001751
Pierre Pfisterd3eb90e2016-11-29 15:36:14 +00001752 u32 bi = to_next[-1]; //Cannot use to_next[-1] in the macro
1753 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
1754 to_next, n_left_to_next,
1755 bi, next0);
1756 }
Damjan Marion22311502016-10-28 20:30:15 +02001757
Damjan Marion00a9dca2016-08-17 17:05:46 +02001758 n_left--;
Pierre Pfisterd3eb90e2016-11-29 15:36:14 +00001759
1760 /*
1761 * Although separating memory copies from virtio ring parsing
1762 * is beneficial, we can offer to perform the copies from time
1763 * to time in order to free some space in the ring.
1764 */
1765 if (PREDICT_FALSE (copy_len >= VHOST_USER_RX_COPY_THRESHOLD))
Pierre Pfistere21c5282016-09-21 08:04:59 +01001766 {
Pierre Pfisterd3eb90e2016-11-29 15:36:14 +00001767 if (PREDICT_FALSE
1768 (vhost_user_input_copy (vui, vum->cpus[cpu_index].copy,
1769 copy_len, &map_hint)))
1770 {
1771 clib_warning
1772 ("Memory mapping error on interface hw_if_index=%d "
1773 "(Shutting down - Switch interface down and up to restart)",
1774 vui->hw_if_index);
1775 vui->admin_up = 0;
1776 copy_len = 0;
1777 break;
1778 }
1779 copy_len = 0;
1780
1781 /* give buffers back to driver */
1782 CLIB_MEMORY_BARRIER ();
1783 txvq->used->idx = txvq->last_used_idx;
1784 vhost_user_log_dirty_ring (vui, txvq, idx);
Pierre Pfistere21c5282016-09-21 08:04:59 +01001785 }
Damjan Marion00a9dca2016-08-17 17:05:46 +02001786 }
Pierre Pfisterd3eb90e2016-11-29 15:36:14 +00001787 stop:
Damjan Marion00a9dca2016-08-17 17:05:46 +02001788 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001789 }
1790
Pierre Pfisterd3eb90e2016-11-29 15:36:14 +00001791 /* Do the memory copies */
1792 if (PREDICT_FALSE
1793 (vhost_user_input_copy (vui, vum->cpus[cpu_index].copy,
1794 copy_len, &map_hint)))
1795 {
1796 clib_warning ("Memory mapping error on interface hw_if_index=%d "
1797 "(Shutting down - Switch interface down and up to restart)",
1798 vui->hw_if_index);
1799 vui->admin_up = 0;
1800 }
Pierre Pfister328e99b2016-02-12 13:18:42 +00001801
1802 /* give buffers back to driver */
Damjan Marion00a9dca2016-08-17 17:05:46 +02001803 CLIB_MEMORY_BARRIER ();
Pierre Pfister328e99b2016-02-12 13:18:42 +00001804 txvq->used->idx = txvq->last_used_idx;
Damjan Marion00a9dca2016-08-17 17:05:46 +02001805 vhost_user_log_dirty_ring (vui, txvq, idx);
Pierre Pfister328e99b2016-02-12 13:18:42 +00001806
Ed Warnickecb9cada2015-12-08 15:45:58 -07001807 /* interrupt (call) handling */
Pierre Pfisterdbb3c252016-11-22 10:33:34 +00001808 if ((txvq->callfd_idx != ~0) && !(txvq->avail->flags & 1))
Damjan Marion00a9dca2016-08-17 17:05:46 +02001809 {
1810 txvq->n_since_last_int += n_rx_packets;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001811
Damjan Marion00a9dca2016-08-17 17:05:46 +02001812 if (txvq->n_since_last_int > vum->coalesce_frames)
1813 vhost_user_send_call (vm, txvq);
1814 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07001815
1816 /* increase rx counters */
1817 vlib_increment_combined_counter
Damjan Marion00a9dca2016-08-17 17:05:46 +02001818 (vnet_main.interface_main.combined_sw_if_counters
1819 + VNET_INTERFACE_COUNTER_RX,
1820 os_get_cpu_number (), vui->sw_if_index, n_rx_packets, n_rx_bytes);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001821
1822 return n_rx_packets;
1823}
1824
1825static uword
1826vhost_user_input (vlib_main_t * vm,
Damjan Marion00a9dca2016-08-17 17:05:46 +02001827 vlib_node_runtime_t * node, vlib_frame_t * f)
Ed Warnickecb9cada2015-12-08 15:45:58 -07001828{
Damjan Marion00a9dca2016-08-17 17:05:46 +02001829 vhost_user_main_t *vum = &vhost_user_main;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001830 uword n_rx_packets = 0;
Pierre Pfistere21c5282016-09-21 08:04:59 +01001831 u32 cpu_index = os_get_cpu_number ();
Ed Warnickecb9cada2015-12-08 15:45:58 -07001832
Pierre Pfistere21c5282016-09-21 08:04:59 +01001833
1834 vhost_iface_and_queue_t *vhiq;
1835 vec_foreach (vhiq, vum->cpus[cpu_index].rx_queues)
1836 {
1837 vhost_user_intf_t *vui =
1838 &vum->vhost_user_interfaces[vhiq->vhost_iface_index];
1839 n_rx_packets += vhost_user_if_input (vm, vum, vui, vhiq->qid, node);
1840 }
1841
Ed Warnickecb9cada2015-12-08 15:45:58 -07001842 return n_rx_packets;
1843}
1844
Damjan Marion00a9dca2016-08-17 17:05:46 +02001845/* *INDENT-OFF* */
Ed Warnickecb9cada2015-12-08 15:45:58 -07001846VLIB_REGISTER_NODE (vhost_user_input_node) = {
1847 .function = vhost_user_input,
1848 .type = VLIB_NODE_TYPE_INPUT,
1849 .name = "vhost-user-input",
Damjan Marion51327ac2016-11-09 11:59:42 +01001850 .sibling_of = "device-input",
Ed Warnickecb9cada2015-12-08 15:45:58 -07001851
1852 /* Will be enabled if/when hardware is detected. */
1853 .state = VLIB_NODE_STATE_DISABLED,
1854
1855 .format_buffer = format_ethernet_header_with_length,
Pierre Pfister116ea4b2016-11-08 15:49:28 +00001856 .format_trace = format_vhost_trace,
Ed Warnickecb9cada2015-12-08 15:45:58 -07001857
1858 .n_errors = VHOST_USER_INPUT_FUNC_N_ERROR,
1859 .error_strings = vhost_user_input_func_error_strings,
Ed Warnickecb9cada2015-12-08 15:45:58 -07001860};
1861
Damjan Marion1c80e832016-05-11 23:07:18 +02001862VLIB_NODE_FUNCTION_MULTIARCH (vhost_user_input_node, vhost_user_input)
Damjan Marion00a9dca2016-08-17 17:05:46 +02001863/* *INDENT-ON* */
Damjan Marion1c80e832016-05-11 23:07:18 +02001864
Pierre Pfister116ea4b2016-11-08 15:49:28 +00001865
1866void
1867vhost_user_tx_trace (vhost_trace_t * t,
1868 vhost_user_intf_t * vui, u16 qid,
1869 vlib_buffer_t * b, vhost_user_vring_t * rxvq)
1870{
1871 vhost_user_main_t *vum = &vhost_user_main;
1872 u32 qsz_mask = rxvq->qsz - 1;
1873 u32 last_avail_idx = rxvq->last_avail_idx;
1874 u32 desc_current = rxvq->avail->ring[last_avail_idx & qsz_mask];
1875 vring_desc_t *hdr_desc = 0;
1876 u32 hint = 0;
1877
1878 memset (t, 0, sizeof (*t));
1879 t->device_index = vui - vum->vhost_user_interfaces;
1880 t->qid = qid;
1881
1882 hdr_desc = &rxvq->desc[desc_current];
1883 if (rxvq->desc[desc_current].flags & VIRTQ_DESC_F_INDIRECT)
1884 {
1885 t->virtio_ring_flags |= 1 << VIRTIO_TRACE_F_INDIRECT;
Pierre Pfisterd3eb90e2016-11-29 15:36:14 +00001886 /* Header is the first here */
Pierre Pfister116ea4b2016-11-08 15:49:28 +00001887 hdr_desc = map_guest_mem (vui, rxvq->desc[desc_current].addr, &hint);
1888 }
1889 if (rxvq->desc[desc_current].flags & VIRTQ_DESC_F_NEXT)
1890 {
1891 t->virtio_ring_flags |= 1 << VIRTIO_TRACE_F_SIMPLE_CHAINED;
1892 }
1893 if (!(rxvq->desc[desc_current].flags & VIRTQ_DESC_F_NEXT) &&
1894 !(rxvq->desc[desc_current].flags & VIRTQ_DESC_F_INDIRECT))
1895 {
1896 t->virtio_ring_flags |= 1 << VIRTIO_TRACE_F_SINGLE_DESC;
1897 }
1898
1899 t->first_desc_len = hdr_desc ? hdr_desc->len : 0;
1900}
1901
Pierre Pfisterd3eb90e2016-11-29 15:36:14 +00001902static_always_inline u32
1903vhost_user_tx_copy (vhost_user_intf_t * vui, vhost_copy_t * cpy,
1904 u16 copy_len, u32 * map_hint)
1905{
1906 void *dst0, *dst1, *dst2, *dst3;
1907 if (PREDICT_TRUE (copy_len >= 4))
1908 {
1909 if (PREDICT_FALSE (!(dst2 = map_guest_mem (vui, cpy[0].dst, map_hint))))
1910 return 1;
1911 if (PREDICT_FALSE (!(dst3 = map_guest_mem (vui, cpy[1].dst, map_hint))))
1912 return 1;
1913 while (PREDICT_TRUE (copy_len >= 4))
1914 {
1915 dst0 = dst2;
1916 dst1 = dst3;
1917
1918 if (PREDICT_FALSE
1919 (!(dst2 = map_guest_mem (vui, cpy[2].dst, map_hint))))
1920 return 1;
1921 if (PREDICT_FALSE
1922 (!(dst3 = map_guest_mem (vui, cpy[3].dst, map_hint))))
1923 return 1;
1924
1925 CLIB_PREFETCH ((void *) cpy[2].src, 64, LOAD);
1926 CLIB_PREFETCH ((void *) cpy[3].src, 64, LOAD);
1927
1928 clib_memcpy (dst0, (void *) cpy[0].src, cpy[0].len);
1929 clib_memcpy (dst1, (void *) cpy[1].src, cpy[1].len);
1930
1931 vhost_user_log_dirty_pages_2 (vui, cpy[0].dst, cpy[0].len, 1);
1932 vhost_user_log_dirty_pages_2 (vui, cpy[1].dst, cpy[1].len, 1);
1933 copy_len -= 2;
1934 cpy += 2;
1935 }
1936 }
1937 while (copy_len)
1938 {
1939 if (PREDICT_FALSE (!(dst0 = map_guest_mem (vui, cpy->dst, map_hint))))
1940 return 1;
1941 clib_memcpy (dst0, (void *) cpy->src, cpy->len);
1942 vhost_user_log_dirty_pages_2 (vui, cpy->dst, cpy->len, 1);
1943 copy_len -= 1;
1944 cpy += 1;
1945 }
1946 return 0;
1947}
1948
1949
Ed Warnickecb9cada2015-12-08 15:45:58 -07001950static uword
Pierre Pfisterdbb3c252016-11-22 10:33:34 +00001951vhost_user_tx (vlib_main_t * vm,
1952 vlib_node_runtime_t * node, vlib_frame_t * frame)
Ed Warnickecb9cada2015-12-08 15:45:58 -07001953{
Damjan Marion00a9dca2016-08-17 17:05:46 +02001954 u32 *buffers = vlib_frame_args (frame);
Pierre Pfisterd3eb90e2016-11-29 15:36:14 +00001955 u32 n_left = frame->n_vectors;
Damjan Marion00a9dca2016-08-17 17:05:46 +02001956 vhost_user_main_t *vum = &vhost_user_main;
Damjan Marion00a9dca2016-08-17 17:05:46 +02001957 vnet_interface_output_runtime_t *rd = (void *) node->runtime_data;
1958 vhost_user_intf_t *vui =
Pierre Pfisterdbb3c252016-11-22 10:33:34 +00001959 pool_elt_at_index (vum->vhost_user_interfaces, rd->dev_instance);
Pierre Pfistere21c5282016-09-21 08:04:59 +01001960 u32 qid = ~0;
1961 vhost_user_vring_t *rxvq;
Ed Warnickecb9cada2015-12-08 15:45:58 -07001962 u16 qsz_mask;
Pierre Pfisterd3eb90e2016-11-29 15:36:14 +00001963 u8 error;
Pierre Pfistere21c5282016-09-21 08:04:59 +01001964 u32 cpu_index = os_get_cpu_number ();
Pierre Pfisterd3eb90e2016-11-29 15:36:14 +00001965 u32 map_hint = 0;
1966 u8 retry = 8;
1967 u16 copy_len;
1968 u16 tx_headers_len;
Pierre Pfisterba1d0462016-07-27 16:38:20 +01001969
Pierre Pfisterd3eb90e2016-11-29 15:36:14 +00001970 if (PREDICT_FALSE (!vui->admin_up))
1971 {
1972 error = VHOST_USER_TX_FUNC_ERROR_DOWN;
1973 goto done3;
1974 }
1975
1976 if (PREDICT_FALSE (!vui->is_up))
Damjan Marion00a9dca2016-08-17 17:05:46 +02001977 {
1978 error = VHOST_USER_TX_FUNC_ERROR_NOT_READY;
Pierre Pfistere21c5282016-09-21 08:04:59 +01001979 goto done3;
Damjan Marion00a9dca2016-08-17 17:05:46 +02001980 }
Damjan Marion920ecc22016-01-12 18:34:24 +01001981
Pierre Pfistere21c5282016-09-21 08:04:59 +01001982 qid =
Pierre Pfisterd3eb90e2016-11-29 15:36:14 +00001983 VHOST_VRING_IDX_RX (*vec_elt_at_index
1984 (vui->per_cpu_tx_qid, os_get_cpu_number ()));
Pierre Pfistere21c5282016-09-21 08:04:59 +01001985 rxvq = &vui->vrings[qid];
1986 if (PREDICT_FALSE (vui->use_tx_spinlock))
1987 vhost_user_vring_lock (vui, qid);
Ed Warnickecb9cada2015-12-08 15:45:58 -07001988
Damjan Marion00a9dca2016-08-17 17:05:46 +02001989 qsz_mask = rxvq->qsz - 1; /* qsz is always power of 2 */
Ed Warnickecb9cada2015-12-08 15:45:58 -07001990
Pierre Pfisterd3eb90e2016-11-29 15:36:14 +00001991retry:
1992 error = VHOST_USER_TX_FUNC_ERROR_NONE;
1993 tx_headers_len = 0;
1994 copy_len = 0;
1995 while (n_left > 0)
Damjan Marion00a9dca2016-08-17 17:05:46 +02001996 {
Pierre Pfisterd3eb90e2016-11-29 15:36:14 +00001997 vlib_buffer_t *b0, *current_b0;
1998 u16 desc_head, desc_index, desc_len;
1999 vring_desc_t *desc_table;
2000 uword buffer_map_addr;
2001 u32 buffer_len;
2002 u16 bytes_left;
Ed Warnickecb9cada2015-12-08 15:45:58 -07002003
Pierre Pfisterd3eb90e2016-11-29 15:36:14 +00002004 if (PREDICT_TRUE (n_left > 1))
2005 vlib_prefetch_buffer_with_index (vm, buffers[1], LOAD);
2006
2007 b0 = vlib_get_buffer (vm, buffers[0]);
2008
2009 if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
Pierre Pfister116ea4b2016-11-08 15:49:28 +00002010 {
Pierre Pfisterd3eb90e2016-11-29 15:36:14 +00002011 vum->cpus[cpu_index].current_trace =
2012 vlib_add_trace (vm, node, b0,
2013 sizeof (*vum->cpus[cpu_index].current_trace));
2014 vhost_user_tx_trace (vum->cpus[cpu_index].current_trace,
2015 vui, qid / 2, b0, rxvq);
2016 }
Pierre Pfister116ea4b2016-11-08 15:49:28 +00002017
Pierre Pfisterd3eb90e2016-11-29 15:36:14 +00002018 if (PREDICT_FALSE (rxvq->last_avail_idx == rxvq->avail->idx))
2019 {
2020 error = VHOST_USER_TX_FUNC_ERROR_PKT_DROP_NOBUF;
2021 goto done;
2022 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07002023
Pierre Pfisterd3eb90e2016-11-29 15:36:14 +00002024 desc_table = rxvq->desc;
2025 desc_head = desc_index =
2026 rxvq->avail->ring[rxvq->last_avail_idx & qsz_mask];
2027
2028 /* Go deeper in case of indirect descriptor
2029 * I don't know of any driver providing indirect for RX. */
2030 if (PREDICT_FALSE (rxvq->desc[desc_head].flags & VIRTQ_DESC_F_INDIRECT))
2031 {
2032 if (PREDICT_FALSE
2033 (rxvq->desc[desc_head].len < sizeof (vring_desc_t)))
Pierre Pfisterba1d0462016-07-27 16:38:20 +01002034 {
Pierre Pfisterd3eb90e2016-11-29 15:36:14 +00002035 error = VHOST_USER_TX_FUNC_ERROR_INDIRECT_OVERFLOW;
Pierre Pfisterba1d0462016-07-27 16:38:20 +01002036 goto done;
2037 }
2038 if (PREDICT_FALSE
Pierre Pfisterd3eb90e2016-11-29 15:36:14 +00002039 (!(desc_table =
2040 map_guest_mem (vui, rxvq->desc[desc_index].addr,
2041 &map_hint))))
Pierre Pfisterba1d0462016-07-27 16:38:20 +01002042 {
2043 error = VHOST_USER_TX_FUNC_ERROR_MMAP_FAIL;
2044 goto done;
2045 }
Pierre Pfisterd3eb90e2016-11-29 15:36:14 +00002046 desc_index = 0;
Damjan Marion00a9dca2016-08-17 17:05:46 +02002047 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07002048
Pierre Pfisterd3eb90e2016-11-29 15:36:14 +00002049 desc_len = vui->virtio_net_hdr_sz;
2050 buffer_map_addr = desc_table[desc_index].addr;
2051 buffer_len = desc_table[desc_index].len;
2052
2053 {
2054 // Get a header from the header array
2055 virtio_net_hdr_mrg_rxbuf_t *hdr =
2056 &vum->cpus[cpu_index].tx_headers[tx_headers_len];
2057 tx_headers_len++;
2058 hdr->hdr.flags = 0;
2059 hdr->hdr.gso_type = 0;
2060 hdr->num_buffers = 1; //This is local, no need to check
2061
2062 // Prepare a copy order executed later for the header
2063 vhost_copy_t *cpy = &vum->cpus[cpu_index].copy[copy_len];
2064 copy_len++;
2065 cpy->len = vui->virtio_net_hdr_sz;
2066 cpy->dst = buffer_map_addr;
2067 cpy->src = (uword) hdr;
2068 }
2069
2070 buffer_map_addr += vui->virtio_net_hdr_sz;
2071 buffer_len -= vui->virtio_net_hdr_sz;
2072 bytes_left = b0->current_length;
2073 current_b0 = b0;
2074 while (1)
2075 {
2076 if (buffer_len == 0)
2077 { //Get new output
2078 if (desc_table[desc_index].flags & VIRTQ_DESC_F_NEXT)
2079 {
2080 //Next one is chained
2081 desc_index = desc_table[desc_index].next;
2082 buffer_map_addr = desc_table[desc_index].addr;
2083 buffer_len = desc_table[desc_index].len;
2084 }
2085 else if (vui->virtio_net_hdr_sz == 12) //MRG is available
2086 {
2087 virtio_net_hdr_mrg_rxbuf_t *hdr =
2088 &vum->cpus[cpu_index].tx_headers[tx_headers_len - 1];
2089
2090 //Move from available to used buffer
2091 rxvq->used->ring[rxvq->last_used_idx & qsz_mask].id =
2092 desc_head;
2093 rxvq->used->ring[rxvq->last_used_idx & qsz_mask].len =
2094 desc_len;
2095 vhost_user_log_dirty_ring (vui, rxvq,
2096 ring[rxvq->last_used_idx &
2097 qsz_mask]);
2098
2099 rxvq->last_avail_idx++;
2100 rxvq->last_used_idx++;
2101 hdr->num_buffers++;
2102 desc_len = 0;
2103
2104 if (PREDICT_FALSE
2105 (rxvq->last_avail_idx == rxvq->avail->idx))
2106 {
2107 //Dequeue queued descriptors for this packet
2108 rxvq->last_used_idx -= hdr->num_buffers - 1;
2109 rxvq->last_avail_idx -= hdr->num_buffers - 1;
2110 error = VHOST_USER_TX_FUNC_ERROR_PKT_DROP_NOBUF;
2111 goto done;
2112 }
2113
2114 desc_table = rxvq->desc;
2115 desc_head = desc_index =
2116 rxvq->avail->ring[rxvq->last_avail_idx & qsz_mask];
2117 if (PREDICT_FALSE
2118 (rxvq->desc[desc_head].flags & VIRTQ_DESC_F_INDIRECT))
2119 {
2120 //It is seriously unlikely that a driver will put indirect descriptor
2121 //after non-indirect descriptor.
2122 if (PREDICT_FALSE
2123 (rxvq->desc[desc_head].len < sizeof (vring_desc_t)))
2124 {
2125 error = VHOST_USER_TX_FUNC_ERROR_INDIRECT_OVERFLOW;
2126 goto done;
2127 }
2128 if (PREDICT_FALSE
2129 (!(desc_table =
2130 map_guest_mem (vui,
2131 rxvq->desc[desc_index].addr,
2132 &map_hint))))
2133 {
2134 error = VHOST_USER_TX_FUNC_ERROR_MMAP_FAIL;
2135 goto done;
2136 }
2137 desc_index = 0;
2138 }
2139 buffer_map_addr = desc_table[desc_index].addr;
2140 buffer_len = desc_table[desc_index].len;
2141 }
2142 else
2143 {
2144 error = VHOST_USER_TX_FUNC_ERROR_PKT_DROP_NOMRG;
2145 goto done;
2146 }
2147 }
2148
2149 {
2150 vhost_copy_t *cpy = &vum->cpus[cpu_index].copy[copy_len];
2151 copy_len++;
2152 cpy->len = bytes_left;
2153 cpy->len = (cpy->len > buffer_len) ? buffer_len : cpy->len;
2154 cpy->dst = buffer_map_addr;
2155 cpy->src = (uword) vlib_buffer_get_current (current_b0) +
2156 current_b0->current_length - bytes_left;
2157
2158 bytes_left -= cpy->len;
2159 buffer_len -= cpy->len;
2160 buffer_map_addr += cpy->len;
2161 desc_len += cpy->len;
2162
Pierre Pfister14ac8012016-12-08 07:58:47 +00002163 CLIB_PREFETCH (&rxvq->desc, CLIB_CACHE_LINE_BYTES, LOAD);
Pierre Pfisterd3eb90e2016-11-29 15:36:14 +00002164 }
2165
2166 // Check if vlib buffer has more data. If not, get more or break.
2167 if (PREDICT_TRUE (!bytes_left))
2168 {
2169 if (PREDICT_FALSE
2170 (current_b0->flags & VLIB_BUFFER_NEXT_PRESENT))
2171 {
2172 current_b0 = vlib_get_buffer (vm, current_b0->next_buffer);
2173 bytes_left = current_b0->current_length;
2174 }
2175 else
2176 {
2177 //End of packet
2178 break;
2179 }
2180 }
2181 }
2182
2183 //Move from available to used ring
2184 rxvq->used->ring[rxvq->last_used_idx & qsz_mask].id = desc_head;
2185 rxvq->used->ring[rxvq->last_used_idx & qsz_mask].len = desc_len;
2186 vhost_user_log_dirty_ring (vui, rxvq,
2187 ring[rxvq->last_used_idx & qsz_mask]);
2188 rxvq->last_avail_idx++;
2189 rxvq->last_used_idx++;
2190
2191 if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
2192 {
2193 vum->cpus[cpu_index].current_trace->hdr =
2194 vum->cpus[cpu_index].tx_headers[tx_headers_len - 1];
2195 }
2196
2197 n_left--; //At the end for error counting when 'goto done' is invoked
2198 buffers++;
2199 }
2200
2201done:
2202 //Do the memory copies
2203 if (PREDICT_FALSE
2204 (vhost_user_tx_copy (vui, vum->cpus[cpu_index].copy,
2205 copy_len, &map_hint)))
2206 {
2207 clib_warning ("Memory mapping error on interface hw_if_index=%d "
2208 "(Shutting down - Switch interface down and up to restart)",
2209 vui->hw_if_index);
2210 vui->admin_up = 0;
2211 }
2212
2213 CLIB_MEMORY_BARRIER ();
2214 rxvq->used->idx = rxvq->last_used_idx;
2215 vhost_user_log_dirty_ring (vui, rxvq, idx);
2216
2217 /*
2218 * When n_left is set, error is always set to something too.
2219 * In case error is due to lack of remaining buffers, we go back up and
2220 * retry.
2221 * The idea is that it is better to waste some time on packets
2222 * that have been processed already than dropping them and get
2223 * more fresh packets with a good likelyhood that they will be dropped too.
2224 * This technique also gives more time to VM driver to pick-up packets.
2225 * In case the traffic flows from physical to virtual interfaces, this
2226 * technique will end-up leveraging the physical NIC buffer in order to
2227 * absorb the VM's CPU jitter.
2228 */
2229 if (n_left && (error == VHOST_USER_TX_FUNC_ERROR_PKT_DROP_NOBUF) && retry)
2230 {
2231 retry--;
2232 goto retry;
Damjan Marion00a9dca2016-08-17 17:05:46 +02002233 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07002234
Ed Warnickecb9cada2015-12-08 15:45:58 -07002235 /* interrupt (call) handling */
Pierre Pfisterdbb3c252016-11-22 10:33:34 +00002236 if ((rxvq->callfd_idx != ~0) && !(rxvq->avail->flags & 1))
Damjan Marion00a9dca2016-08-17 17:05:46 +02002237 {
Pierre Pfisterd3eb90e2016-11-29 15:36:14 +00002238 rxvq->n_since_last_int += frame->n_vectors - n_left;
Ed Warnickecb9cada2015-12-08 15:45:58 -07002239
Damjan Marion00a9dca2016-08-17 17:05:46 +02002240 if (rxvq->n_since_last_int > vum->coalesce_frames)
2241 vhost_user_send_call (vm, rxvq);
2242 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07002243
Pierre Pfistere21c5282016-09-21 08:04:59 +01002244 vhost_user_vring_unlock (vui, qid);
Ed Warnickecb9cada2015-12-08 15:45:58 -07002245
Pierre Pfistere21c5282016-09-21 08:04:59 +01002246done3:
Damjan Marion00a9dca2016-08-17 17:05:46 +02002247 if (PREDICT_FALSE (n_left && error != VHOST_USER_TX_FUNC_ERROR_NONE))
2248 {
2249 vlib_error_count (vm, node->node_index, error, n_left);
2250 vlib_increment_simple_counter
2251 (vnet_main.interface_main.sw_if_counters
2252 + VNET_INTERFACE_COUNTER_DROP,
2253 os_get_cpu_number (), vui->sw_if_index, n_left);
2254 }
Pierre Pfister328e99b2016-02-12 13:18:42 +00002255
Ed Warnickecb9cada2015-12-08 15:45:58 -07002256 vlib_buffer_free (vm, vlib_frame_args (frame), frame->n_vectors);
2257 return frame->n_vectors;
2258}
2259
2260static clib_error_t *
Damjan Marion00a9dca2016-08-17 17:05:46 +02002261vhost_user_interface_admin_up_down (vnet_main_t * vnm, u32 hw_if_index,
2262 u32 flags)
Ed Warnickecb9cada2015-12-08 15:45:58 -07002263{
Damjan Marion00a9dca2016-08-17 17:05:46 +02002264 vnet_hw_interface_t *hif = vnet_get_hw_interface (vnm, hw_if_index);
Ed Warnickecb9cada2015-12-08 15:45:58 -07002265 uword is_up = (flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP) != 0;
Damjan Marion00a9dca2016-08-17 17:05:46 +02002266 vhost_user_main_t *vum = &vhost_user_main;
2267 vhost_user_intf_t *vui =
Pierre Pfisterdbb3c252016-11-22 10:33:34 +00002268 pool_elt_at_index (vum->vhost_user_interfaces, hif->dev_instance);
Ed Warnickecb9cada2015-12-08 15:45:58 -07002269
2270 vui->admin_up = is_up;
2271
2272 if (is_up)
2273 vnet_hw_interface_set_flags (vnm, vui->hw_if_index,
Damjan Marion00a9dca2016-08-17 17:05:46 +02002274 VNET_HW_INTERFACE_FLAG_LINK_UP);
Ed Warnickecb9cada2015-12-08 15:45:58 -07002275
2276 return /* no error */ 0;
2277}
2278
Damjan Marion00a9dca2016-08-17 17:05:46 +02002279/* *INDENT-OFF* */
Ed Warnickecb9cada2015-12-08 15:45:58 -07002280VNET_DEVICE_CLASS (vhost_user_dev_class,static) = {
2281 .name = "vhost-user",
Pierre Pfisterdbb3c252016-11-22 10:33:34 +00002282 .tx_function = vhost_user_tx,
Ed Warnickecb9cada2015-12-08 15:45:58 -07002283 .tx_function_n_errors = VHOST_USER_TX_FUNC_N_ERROR,
2284 .tx_function_error_strings = vhost_user_tx_func_error_strings,
2285 .format_device_name = format_vhost_user_interface_name,
2286 .name_renumber = vhost_user_name_renumber,
2287 .admin_up_down_function = vhost_user_interface_admin_up_down,
Pierre Pfister116ea4b2016-11-08 15:49:28 +00002288 .format_tx_trace = format_vhost_trace,
Ed Warnickecb9cada2015-12-08 15:45:58 -07002289};
2290
Damjan Marion1c80e832016-05-11 23:07:18 +02002291VLIB_DEVICE_TX_FUNCTION_MULTIARCH (vhost_user_dev_class,
Pierre Pfisterdbb3c252016-11-22 10:33:34 +00002292 vhost_user_tx)
Damjan Marion00a9dca2016-08-17 17:05:46 +02002293/* *INDENT-ON* */
Damjan Marion1c80e832016-05-11 23:07:18 +02002294
Ed Warnickecb9cada2015-12-08 15:45:58 -07002295static uword
2296vhost_user_process (vlib_main_t * vm,
Damjan Marion00a9dca2016-08-17 17:05:46 +02002297 vlib_node_runtime_t * rt, vlib_frame_t * f)
Ed Warnickecb9cada2015-12-08 15:45:58 -07002298{
Damjan Marion00a9dca2016-08-17 17:05:46 +02002299 vhost_user_main_t *vum = &vhost_user_main;
2300 vhost_user_intf_t *vui;
2301 struct sockaddr_un sun;
2302 int sockfd;
2303 unix_file_t template = { 0 };
2304 f64 timeout = 3153600000.0 /* 100 years */ ;
2305 uword *event_data = 0;
Ed Warnickecb9cada2015-12-08 15:45:58 -07002306
Damjan Marion00a9dca2016-08-17 17:05:46 +02002307 sockfd = socket (AF_UNIX, SOCK_STREAM, 0);
2308 sun.sun_family = AF_UNIX;
2309 template.read_function = vhost_user_socket_read;
2310 template.error_function = vhost_user_socket_error;
Ed Warnickecb9cada2015-12-08 15:45:58 -07002311
Damjan Marion00a9dca2016-08-17 17:05:46 +02002312 if (sockfd < 0)
Ed Warnickecb9cada2015-12-08 15:45:58 -07002313 return 0;
Damjan Marion00a9dca2016-08-17 17:05:46 +02002314
2315 while (1)
2316 {
2317 vlib_process_wait_for_event_or_clock (vm, timeout);
2318 vlib_process_get_events (vm, &event_data);
2319 vec_reset_length (event_data);
2320
2321 timeout = 3.0;
2322
Pierre Pfisterdbb3c252016-11-22 10:33:34 +00002323 /* *INDENT-OFF* */
2324 pool_foreach (vui, vum->vhost_user_interfaces, {
Damjan Marion00a9dca2016-08-17 17:05:46 +02002325
Pierre Pfisterdbb3c252016-11-22 10:33:34 +00002326 if (vui->unix_server_index == ~0) { //Nothing to do for server sockets
2327 if (vui->unix_file_index == ~0)
2328 {
2329 /* try to connect */
2330 strncpy (sun.sun_path, (char *) vui->sock_filename,
2331 sizeof (sun.sun_path) - 1);
Damjan Marion00a9dca2016-08-17 17:05:46 +02002332
Andrew Yourtchenko0c3d4672017-01-03 16:52:22 +00002333 /* Avoid hanging VPP if the other end does not accept */
Dave Barach8f544962017-01-18 10:23:22 -05002334 if (fcntl(sockfd, F_SETFL, O_NONBLOCK) < 0)
2335 clib_unix_warning ("fcntl");
2336
Pierre Pfisterdbb3c252016-11-22 10:33:34 +00002337 if (connect (sockfd, (struct sockaddr *) &sun,
2338 sizeof (struct sockaddr_un)) == 0)
2339 {
Andrew Yourtchenko0c3d4672017-01-03 16:52:22 +00002340 /* Set the socket to blocking as it was before */
Dave Barach8f544962017-01-18 10:23:22 -05002341 if (fcntl(sockfd, F_SETFL, 0) < 0)
2342 clib_unix_warning ("fcntl2");
2343
Pierre Pfisterdbb3c252016-11-22 10:33:34 +00002344 vui->sock_errno = 0;
2345 template.file_descriptor = sockfd;
2346 template.private_data =
2347 vui - vhost_user_main.vhost_user_interfaces;
2348 vui->unix_file_index = unix_file_add (&unix_main, &template);
Damjan Marion00a9dca2016-08-17 17:05:46 +02002349
Pierre Pfisterdbb3c252016-11-22 10:33:34 +00002350 //Re-open for next connect
2351 if ((sockfd = socket (AF_UNIX, SOCK_STREAM, 0)) < 0) {
2352 clib_warning("Critical: Could not open unix socket");
2353 return 0;
2354 }
2355 }
2356 else
2357 {
2358 vui->sock_errno = errno;
2359 }
2360 }
2361 else
2362 {
2363 /* check if socket is alive */
2364 int error = 0;
2365 socklen_t len = sizeof (error);
2366 int fd = UNIX_GET_FD(vui->unix_file_index);
2367 int retval =
2368 getsockopt (fd, SOL_SOCKET, SO_ERROR, &error, &len);
Damjan Marion00a9dca2016-08-17 17:05:46 +02002369
Pierre Pfisterdbb3c252016-11-22 10:33:34 +00002370 if (retval)
2371 {
2372 DBG_SOCK ("getsockopt returned %d", retval);
2373 vhost_user_if_disconnect (vui);
2374 }
2375 }
Damjan Marion00a9dca2016-08-17 17:05:46 +02002376 }
Pierre Pfisterdbb3c252016-11-22 10:33:34 +00002377 });
2378 /* *INDENT-ON* */
Damjan Marion00a9dca2016-08-17 17:05:46 +02002379 }
2380 return 0;
Ed Warnickecb9cada2015-12-08 15:45:58 -07002381}
2382
Damjan Marion00a9dca2016-08-17 17:05:46 +02002383/* *INDENT-OFF* */
Ed Warnickecb9cada2015-12-08 15:45:58 -07002384VLIB_REGISTER_NODE (vhost_user_process_node,static) = {
2385 .function = vhost_user_process,
2386 .type = VLIB_NODE_TYPE_PROCESS,
2387 .name = "vhost-user-process",
2388};
Damjan Marion00a9dca2016-08-17 17:05:46 +02002389/* *INDENT-ON* */
Ed Warnickecb9cada2015-12-08 15:45:58 -07002390
Pierre Pfisterdbb3c252016-11-22 10:33:34 +00002391/**
2392 * Disables and reset interface structure.
2393 * It can then be either init again, or removed from used interfaces.
2394 */
2395static void
2396vhost_user_term_if (vhost_user_intf_t * vui)
Ed Warnickecb9cada2015-12-08 15:45:58 -07002397{
Ole Troan553a4112017-01-10 10:07:04 +01002398 int q;
2399
Pierre Pfister7a91b462016-11-21 12:50:38 +00002400 // Delete configured thread pinning
2401 vec_reset_length (vui->workers);
Ed Warnickecb9cada2015-12-08 15:45:58 -07002402 // disconnect interface sockets
Damjan Marion00a9dca2016-08-17 17:05:46 +02002403 vhost_user_if_disconnect (vui);
Pierre Pfisterfbb2ef62016-11-16 02:43:29 +00002404 vhost_user_update_iface_state (vui);
Ed Warnickecb9cada2015-12-08 15:45:58 -07002405
Ole Troan553a4112017-01-10 10:07:04 +01002406 for (q = 0; q < VHOST_VRING_MAX_N; q++)
2407 {
2408 clib_mem_free ((void *) vui->vring_locks[q]);
2409 }
2410
Pierre Pfisterdbb3c252016-11-22 10:33:34 +00002411 if (vui->unix_server_index != ~0)
2412 {
2413 //Close server socket
2414 unix_file_t *uf = pool_elt_at_index (unix_main.file_pool,
2415 vui->unix_server_index);
2416 unix_file_del (&unix_main, uf);
2417 vui->unix_server_index = ~0;
2418 }
2419}
Ed Warnickecb9cada2015-12-08 15:45:58 -07002420
Pierre Pfisterdbb3c252016-11-22 10:33:34 +00002421int
2422vhost_user_delete_if (vnet_main_t * vnm, vlib_main_t * vm, u32 sw_if_index)
2423{
2424 vhost_user_main_t *vum = &vhost_user_main;
2425 vhost_user_intf_t *vui;
2426 int rv = 0;
2427 vnet_hw_interface_t *hwif;
2428
2429 if (!(hwif = vnet_get_sup_hw_interface (vnm, sw_if_index)) ||
2430 hwif->dev_class_index != vhost_user_dev_class.index)
2431 return VNET_API_ERROR_INVALID_SW_IF_INDEX;
2432
2433 DBG_SOCK ("Deleting vhost-user interface %s (instance %d)",
2434 hwif->name, hwif->dev_instance);
2435
2436 vui = pool_elt_at_index (vum->vhost_user_interfaces, hwif->dev_instance);
2437
2438 // Disable and reset interface
2439 vhost_user_term_if (vui);
2440
Pierre Pfisterdbb3c252016-11-22 10:33:34 +00002441 // Reset renumbered iface
2442 if (hwif->dev_instance <
2443 vec_len (vum->show_dev_instance_by_real_dev_instance))
2444 vum->show_dev_instance_by_real_dev_instance[hwif->dev_instance] = ~0;
2445
2446 // Delete ethernet interface
Ed Warnickecb9cada2015-12-08 15:45:58 -07002447 ethernet_delete_interface (vnm, vui->hw_if_index);
Wojciech Decd8e47872017-01-17 21:45:11 +01002448
2449 // Back to pool
2450 pool_put (vum->vhost_user_interfaces, vui);
2451
Ed Warnickecb9cada2015-12-08 15:45:58 -07002452 return rv;
2453}
2454
Pierre Pfisterdbb3c252016-11-22 10:33:34 +00002455/**
2456 * Open server unix socket on specified sock_filename.
2457 */
Damjan Marion00a9dca2016-08-17 17:05:46 +02002458static int
Pierre Pfisterdbb3c252016-11-22 10:33:34 +00002459vhost_user_init_server_sock (const char *sock_filename, int *sock_fd)
Ed Warnickecb9cada2015-12-08 15:45:58 -07002460{
Pierre Pfister5afccb22016-07-25 14:32:02 +01002461 int rv = 0;
Damjan Marion00a9dca2016-08-17 17:05:46 +02002462 struct sockaddr_un un = { };
Ed Warnickecb9cada2015-12-08 15:45:58 -07002463 int fd;
2464 /* create listening socket */
Pierre Pfisterdbb3c252016-11-22 10:33:34 +00002465 if ((fd = socket (AF_UNIX, SOCK_STREAM, 0)) < 0)
2466 return VNET_API_ERROR_SYSCALL_ERROR_1;
Ed Warnickecb9cada2015-12-08 15:45:58 -07002467
2468 un.sun_family = AF_UNIX;
Damjan Marion00a9dca2016-08-17 17:05:46 +02002469 strncpy ((char *) un.sun_path, (char *) sock_filename,
2470 sizeof (un.sun_path) - 1);
Ed Warnickecb9cada2015-12-08 15:45:58 -07002471
2472 /* remove if exists */
Damjan Marion00a9dca2016-08-17 17:05:46 +02002473 unlink ((char *) sock_filename);
Ed Warnickecb9cada2015-12-08 15:45:58 -07002474
Damjan Marion00a9dca2016-08-17 17:05:46 +02002475 if (bind (fd, (struct sockaddr *) &un, sizeof (un)) == -1)
2476 {
2477 rv = VNET_API_ERROR_SYSCALL_ERROR_2;
2478 goto error;
2479 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07002480
Damjan Marion00a9dca2016-08-17 17:05:46 +02002481 if (listen (fd, 1) == -1)
2482 {
2483 rv = VNET_API_ERROR_SYSCALL_ERROR_3;
2484 goto error;
2485 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07002486
Pierre Pfisterdbb3c252016-11-22 10:33:34 +00002487 *sock_fd = fd;
2488 return 0;
Ed Warnickecb9cada2015-12-08 15:45:58 -07002489
2490error:
Damjan Marion00a9dca2016-08-17 17:05:46 +02002491 close (fd);
Ed Warnickecb9cada2015-12-08 15:45:58 -07002492 return rv;
2493}
2494
Pierre Pfisterdbb3c252016-11-22 10:33:34 +00002495/**
2496 * Create ethernet interface for vhost user interface.
2497 */
Damjan Marion00a9dca2016-08-17 17:05:46 +02002498static void
2499vhost_user_create_ethernet (vnet_main_t * vnm, vlib_main_t * vm,
2500 vhost_user_intf_t * vui, u8 * hwaddress)
Ed Warnickecb9cada2015-12-08 15:45:58 -07002501{
Damjan Marion00a9dca2016-08-17 17:05:46 +02002502 vhost_user_main_t *vum = &vhost_user_main;
Ed Warnickecb9cada2015-12-08 15:45:58 -07002503 u8 hwaddr[6];
Damjan Marion00a9dca2016-08-17 17:05:46 +02002504 clib_error_t *error;
Ed Warnickecb9cada2015-12-08 15:45:58 -07002505
2506 /* create hw and sw interface */
Damjan Marion00a9dca2016-08-17 17:05:46 +02002507 if (hwaddress)
2508 {
2509 clib_memcpy (hwaddr, hwaddress, 6);
2510 }
2511 else
2512 {
Pierre Pfisterdbb3c252016-11-22 10:33:34 +00002513 random_u32 (&vum->random);
2514 clib_memcpy (hwaddr + 2, &vum->random, sizeof (vum->random));
Damjan Marion00a9dca2016-08-17 17:05:46 +02002515 hwaddr[0] = 2;
2516 hwaddr[1] = 0xfe;
2517 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07002518
2519 error = ethernet_register_interface
2520 (vnm,
2521 vhost_user_dev_class.index,
Damjan Marion00a9dca2016-08-17 17:05:46 +02002522 vui - vum->vhost_user_interfaces /* device instance */ ,
2523 hwaddr /* ethernet address */ ,
2524 &vui->hw_if_index, 0 /* flag change */ );
Pierre Pfisterdbb3c252016-11-22 10:33:34 +00002525
Ed Warnickecb9cada2015-12-08 15:45:58 -07002526 if (error)
2527 clib_error_report (error);
Pierre Pfister328e99b2016-02-12 13:18:42 +00002528
2529 vnet_hw_interface_t *hi = vnet_get_hw_interface (vnm, vui->hw_if_index);
2530 hi->max_l3_packet_bytes[VLIB_RX] = hi->max_l3_packet_bytes[VLIB_TX] = 9000;
Ed Warnickecb9cada2015-12-08 15:45:58 -07002531}
2532
Pierre Pfisterdbb3c252016-11-22 10:33:34 +00002533/*
2534 * Initialize vui with specified attributes
2535 */
Damjan Marion00a9dca2016-08-17 17:05:46 +02002536static void
2537vhost_user_vui_init (vnet_main_t * vnm,
Pierre Pfisterdbb3c252016-11-22 10:33:34 +00002538 vhost_user_intf_t * vui,
2539 int server_sock_fd,
Damjan Marion00a9dca2016-08-17 17:05:46 +02002540 const char *sock_filename,
Pierre Pfisterdbb3c252016-11-22 10:33:34 +00002541 u64 feature_mask, u32 * sw_if_index)
Ed Warnickecb9cada2015-12-08 15:45:58 -07002542{
Damjan Marion00a9dca2016-08-17 17:05:46 +02002543 vnet_sw_interface_t *sw;
Ed Warnickecb9cada2015-12-08 15:45:58 -07002544 sw = vnet_get_hw_sw_interface (vnm, vui->hw_if_index);
Yoann Desmouceaux4667c222016-02-24 22:51:00 +01002545 int q;
Ed Warnickecb9cada2015-12-08 15:45:58 -07002546
Pierre Pfisterdbb3c252016-11-22 10:33:34 +00002547 if (server_sock_fd != -1)
2548 {
2549 unix_file_t template = { 0 };
2550 template.read_function = vhost_user_socksvr_accept_ready;
2551 template.file_descriptor = server_sock_fd;
2552 template.private_data = vui - vhost_user_main.vhost_user_interfaces; //hw index
2553 vui->unix_server_index = unix_file_add (&unix_main, &template);
2554 }
2555 else
2556 {
2557 vui->unix_server_index = ~0;
2558 }
2559
Ed Warnickecb9cada2015-12-08 15:45:58 -07002560 vui->sw_if_index = sw->sw_if_index;
Damjan Marion00a9dca2016-08-17 17:05:46 +02002561 strncpy (vui->sock_filename, sock_filename,
2562 ARRAY_LEN (vui->sock_filename) - 1);
Ed Warnickecb9cada2015-12-08 15:45:58 -07002563 vui->sock_errno = 0;
2564 vui->is_up = 0;
2565 vui->feature_mask = feature_mask;
Ed Warnickecb9cada2015-12-08 15:45:58 -07002566 vui->unix_file_index = ~0;
Yoann Desmouceaux4667c222016-02-24 22:51:00 +01002567 vui->log_base_addr = 0;
2568
Pierre Pfistere21c5282016-09-21 08:04:59 +01002569 for (q = 0; q < VHOST_VRING_MAX_N; q++)
2570 vhost_user_vring_init (vui, q);
Ed Warnickecb9cada2015-12-08 15:45:58 -07002571
Damjan Marion00a9dca2016-08-17 17:05:46 +02002572 vnet_hw_interface_set_flags (vnm, vui->hw_if_index, 0);
Ed Warnickecb9cada2015-12-08 15:45:58 -07002573
2574 if (sw_if_index)
Damjan Marion00a9dca2016-08-17 17:05:46 +02002575 *sw_if_index = vui->sw_if_index;
Ed Warnickecb9cada2015-12-08 15:45:58 -07002576
Pierre Pfistere21c5282016-09-21 08:04:59 +01002577 for (q = 0; q < VHOST_VRING_MAX_N; q++)
Damjan Marion00a9dca2016-08-17 17:05:46 +02002578 {
Pierre Pfistere21c5282016-09-21 08:04:59 +01002579 vui->vring_locks[q] = clib_mem_alloc_aligned (CLIB_CACHE_LINE_BYTES,
2580 CLIB_CACHE_LINE_BYTES);
2581 memset ((void *) vui->vring_locks[q], 0, CLIB_CACHE_LINE_BYTES);
Damjan Marion00a9dca2016-08-17 17:05:46 +02002582 }
Pierre Pfistere21c5282016-09-21 08:04:59 +01002583
2584 vec_validate (vui->per_cpu_tx_qid,
2585 vlib_get_thread_main ()->n_vlib_mains - 1);
2586 vhost_user_tx_thread_placement (vui);
Ed Warnickecb9cada2015-12-08 15:45:58 -07002587}
2588
Damjan Marion00a9dca2016-08-17 17:05:46 +02002589int
2590vhost_user_create_if (vnet_main_t * vnm, vlib_main_t * vm,
2591 const char *sock_filename,
2592 u8 is_server,
2593 u32 * sw_if_index,
2594 u64 feature_mask,
2595 u8 renumber, u32 custom_dev_instance, u8 * hwaddr)
Ed Warnickecb9cada2015-12-08 15:45:58 -07002596{
Damjan Marion00a9dca2016-08-17 17:05:46 +02002597 vhost_user_intf_t *vui = NULL;
Ed Warnickecb9cada2015-12-08 15:45:58 -07002598 u32 sw_if_idx = ~0;
Ed Warnickecb9cada2015-12-08 15:45:58 -07002599 int rv = 0;
Pierre Pfisterdbb3c252016-11-22 10:33:34 +00002600 int server_sock_fd = -1;
Ed Warnickecb9cada2015-12-08 15:45:58 -07002601
Wojciech Dec3cd9eed2017-01-03 10:38:37 +01002602 if (sock_filename == NULL || !(strlen (sock_filename) > 0))
2603 {
2604 return VNET_API_ERROR_INVALID_ARGUMENT;
2605 }
2606
Damjan Marion00a9dca2016-08-17 17:05:46 +02002607 if (is_server)
2608 {
Pierre Pfisterdbb3c252016-11-22 10:33:34 +00002609 if ((rv =
2610 vhost_user_init_server_sock (sock_filename, &server_sock_fd)) != 0)
Damjan Marion00a9dca2016-08-17 17:05:46 +02002611 {
2612 return rv;
2613 }
2614 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07002615
Pierre Pfisterdbb3c252016-11-22 10:33:34 +00002616 pool_get (vhost_user_main.vhost_user_interfaces, vui);
Ed Warnickecb9cada2015-12-08 15:45:58 -07002617
Pierre Pfisteref65cb02016-02-19 13:52:44 +00002618 vhost_user_create_ethernet (vnm, vm, vui, hwaddr);
Pierre Pfisterdbb3c252016-11-22 10:33:34 +00002619 vhost_user_vui_init (vnm, vui, server_sock_fd, sock_filename,
Damjan Marion00a9dca2016-08-17 17:05:46 +02002620 feature_mask, &sw_if_idx);
Ed Warnickecb9cada2015-12-08 15:45:58 -07002621
Damjan Marion00a9dca2016-08-17 17:05:46 +02002622 if (renumber)
Pierre Pfisterdbb3c252016-11-22 10:33:34 +00002623 vnet_interface_name_renumber (sw_if_idx, custom_dev_instance);
Ed Warnickecb9cada2015-12-08 15:45:58 -07002624
2625 if (sw_if_index)
Damjan Marion00a9dca2016-08-17 17:05:46 +02002626 *sw_if_index = sw_if_idx;
Ed Warnickecb9cada2015-12-08 15:45:58 -07002627
Pierre Pfisterdbb3c252016-11-22 10:33:34 +00002628 // Process node must connect
2629 vlib_process_signal_event (vm, vhost_user_process_node.index, 0, 0);
Ed Warnickecb9cada2015-12-08 15:45:58 -07002630 return rv;
2631}
2632
Damjan Marion00a9dca2016-08-17 17:05:46 +02002633int
2634vhost_user_modify_if (vnet_main_t * vnm, vlib_main_t * vm,
2635 const char *sock_filename,
2636 u8 is_server,
2637 u32 sw_if_index,
2638 u64 feature_mask, u8 renumber, u32 custom_dev_instance)
Ed Warnickecb9cada2015-12-08 15:45:58 -07002639{
Damjan Marion00a9dca2016-08-17 17:05:46 +02002640 vhost_user_main_t *vum = &vhost_user_main;
2641 vhost_user_intf_t *vui = NULL;
Ed Warnickecb9cada2015-12-08 15:45:58 -07002642 u32 sw_if_idx = ~0;
Pierre Pfisterdbb3c252016-11-22 10:33:34 +00002643 int server_sock_fd = -1;
Ed Warnickecb9cada2015-12-08 15:45:58 -07002644 int rv = 0;
Pierre Pfisterdbb3c252016-11-22 10:33:34 +00002645 vnet_hw_interface_t *hwif;
Ed Warnickecb9cada2015-12-08 15:45:58 -07002646
Pierre Pfisterdbb3c252016-11-22 10:33:34 +00002647 if (!(hwif = vnet_get_sup_hw_interface (vnm, sw_if_index)) ||
2648 hwif->dev_class_index != vhost_user_dev_class.index)
2649 return VNET_API_ERROR_INVALID_SW_IF_INDEX;
Ed Warnickecb9cada2015-12-08 15:45:58 -07002650
Pierre Pfisterdbb3c252016-11-22 10:33:34 +00002651 vui = vec_elt_at_index (vum->vhost_user_interfaces, hwif->dev_instance);
Ed Warnickecb9cada2015-12-08 15:45:58 -07002652
Pierre Pfisterdbb3c252016-11-22 10:33:34 +00002653 // First try to open server socket
Damjan Marion00a9dca2016-08-17 17:05:46 +02002654 if (is_server)
Pierre Pfisterdbb3c252016-11-22 10:33:34 +00002655 if ((rv = vhost_user_init_server_sock (sock_filename,
2656 &server_sock_fd)) != 0)
2657 return rv;
Ed Warnickecb9cada2015-12-08 15:45:58 -07002658
Pierre Pfisterdbb3c252016-11-22 10:33:34 +00002659 vhost_user_term_if (vui);
2660 vhost_user_vui_init (vnm, vui, server_sock_fd,
2661 sock_filename, feature_mask, &sw_if_idx);
Ed Warnickecb9cada2015-12-08 15:45:58 -07002662
Damjan Marion00a9dca2016-08-17 17:05:46 +02002663 if (renumber)
Pierre Pfisterdbb3c252016-11-22 10:33:34 +00002664 vnet_interface_name_renumber (sw_if_idx, custom_dev_instance);
Ed Warnickecb9cada2015-12-08 15:45:58 -07002665
Pierre Pfisterdbb3c252016-11-22 10:33:34 +00002666 // Process node must connect
2667 vlib_process_signal_event (vm, vhost_user_process_node.index, 0, 0);
Ed Warnickecb9cada2015-12-08 15:45:58 -07002668 return rv;
2669}
2670
2671clib_error_t *
2672vhost_user_connect_command_fn (vlib_main_t * vm,
Damjan Marion00a9dca2016-08-17 17:05:46 +02002673 unformat_input_t * input,
2674 vlib_cli_command_t * cmd)
Ed Warnickecb9cada2015-12-08 15:45:58 -07002675{
Damjan Marion00a9dca2016-08-17 17:05:46 +02002676 unformat_input_t _line_input, *line_input = &_line_input;
2677 u8 *sock_filename = NULL;
Ed Warnickecb9cada2015-12-08 15:45:58 -07002678 u32 sw_if_index;
2679 u8 is_server = 0;
Pierre Pfistere21c5282016-09-21 08:04:59 +01002680 u64 feature_mask = (u64) ~ (0ULL);
Ed Warnickecb9cada2015-12-08 15:45:58 -07002681 u8 renumber = 0;
2682 u32 custom_dev_instance = ~0;
Pierre Pfisteref65cb02016-02-19 13:52:44 +00002683 u8 hwaddr[6];
2684 u8 *hw = NULL;
Ed Warnickecb9cada2015-12-08 15:45:58 -07002685
2686 /* Get a line of input. */
Damjan Marion00a9dca2016-08-17 17:05:46 +02002687 if (!unformat_user (input, unformat_line_input, line_input))
Ed Warnickecb9cada2015-12-08 15:45:58 -07002688 return 0;
2689
Damjan Marion00a9dca2016-08-17 17:05:46 +02002690 while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
2691 {
2692 if (unformat (line_input, "socket %s", &sock_filename))
2693 ;
2694 else if (unformat (line_input, "server"))
2695 is_server = 1;
2696 else if (unformat (line_input, "feature-mask 0x%llx", &feature_mask))
2697 ;
2698 else
2699 if (unformat
2700 (line_input, "hwaddr %U", unformat_ethernet_address, hwaddr))
2701 hw = hwaddr;
2702 else if (unformat (line_input, "renumber %d", &custom_dev_instance))
2703 {
2704 renumber = 1;
2705 }
2706 else
2707 return clib_error_return (0, "unknown input `%U'",
2708 format_unformat_error, input);
Ed Warnickecb9cada2015-12-08 15:45:58 -07002709 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07002710 unformat_free (line_input);
2711
Damjan Marion00a9dca2016-08-17 17:05:46 +02002712 vnet_main_t *vnm = vnet_get_main ();
Ed Warnickecb9cada2015-12-08 15:45:58 -07002713
Pierre Pfister5afccb22016-07-25 14:32:02 +01002714 int rv;
Damjan Marion00a9dca2016-08-17 17:05:46 +02002715 if ((rv = vhost_user_create_if (vnm, vm, (char *) sock_filename,
2716 is_server, &sw_if_index, feature_mask,
2717 renumber, custom_dev_instance, hw)))
2718 {
2719 vec_free (sock_filename);
Pierre Pfister5afccb22016-07-25 14:32:02 +01002720 return clib_error_return (0, "vhost_user_create_if returned %d", rv);
Damjan Marion00a9dca2016-08-17 17:05:46 +02002721 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07002722
Damjan Marion00a9dca2016-08-17 17:05:46 +02002723 vec_free (sock_filename);
2724 vlib_cli_output (vm, "%U\n", format_vnet_sw_if_index_name, vnet_get_main (),
2725 sw_if_index);
Ed Warnickecb9cada2015-12-08 15:45:58 -07002726 return 0;
2727}
2728
2729clib_error_t *
2730vhost_user_delete_command_fn (vlib_main_t * vm,
Damjan Marion00a9dca2016-08-17 17:05:46 +02002731 unformat_input_t * input,
2732 vlib_cli_command_t * cmd)
Ed Warnickecb9cada2015-12-08 15:45:58 -07002733{
Damjan Marion00a9dca2016-08-17 17:05:46 +02002734 unformat_input_t _line_input, *line_input = &_line_input;
Ed Warnickecb9cada2015-12-08 15:45:58 -07002735 u32 sw_if_index = ~0;
Pierre Pfisterece983d2016-11-21 12:52:22 +00002736 vnet_main_t *vnm = vnet_get_main ();
Ed Warnickecb9cada2015-12-08 15:45:58 -07002737
2738 /* Get a line of input. */
Damjan Marion00a9dca2016-08-17 17:05:46 +02002739 if (!unformat_user (input, unformat_line_input, line_input))
Ed Warnickecb9cada2015-12-08 15:45:58 -07002740 return 0;
2741
Damjan Marion00a9dca2016-08-17 17:05:46 +02002742 while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
2743 {
2744 if (unformat (line_input, "sw_if_index %d", &sw_if_index))
2745 ;
Pierre Pfisterece983d2016-11-21 12:52:22 +00002746 else if (unformat
2747 (line_input, "%U", unformat_vnet_sw_interface, vnm,
2748 &sw_if_index))
2749 {
2750 vnet_hw_interface_t *hwif =
2751 vnet_get_sup_hw_interface (vnm, sw_if_index);
2752 if (hwif == NULL ||
2753 vhost_user_dev_class.index != hwif->dev_class_index)
2754 return clib_error_return (0, "Not a vhost interface");
2755 }
Damjan Marion00a9dca2016-08-17 17:05:46 +02002756 else
2757 return clib_error_return (0, "unknown input `%U'",
2758 format_unformat_error, input);
2759 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07002760 unformat_free (line_input);
Damjan Marion00a9dca2016-08-17 17:05:46 +02002761 vhost_user_delete_if (vnm, vm, sw_if_index);
Ed Warnickecb9cada2015-12-08 15:45:58 -07002762 return 0;
2763}
2764
Damjan Marion00a9dca2016-08-17 17:05:46 +02002765int
2766vhost_user_dump_ifs (vnet_main_t * vnm, vlib_main_t * vm,
2767 vhost_user_intf_details_t ** out_vuids)
Ed Warnickecb9cada2015-12-08 15:45:58 -07002768{
2769 int rv = 0;
Damjan Marion00a9dca2016-08-17 17:05:46 +02002770 vhost_user_main_t *vum = &vhost_user_main;
2771 vhost_user_intf_t *vui;
2772 vhost_user_intf_details_t *r_vuids = NULL;
2773 vhost_user_intf_details_t *vuid = NULL;
2774 u32 *hw_if_indices = 0;
2775 vnet_hw_interface_t *hi;
Ed Warnickecb9cada2015-12-08 15:45:58 -07002776 u8 *s = NULL;
2777 int i;
2778
2779 if (!out_vuids)
Damjan Marion00a9dca2016-08-17 17:05:46 +02002780 return -1;
Ed Warnickecb9cada2015-12-08 15:45:58 -07002781
Pierre Pfisterdbb3c252016-11-22 10:33:34 +00002782 pool_foreach (vui, vum->vhost_user_interfaces,
2783 vec_add1 (hw_if_indices, vui->hw_if_index);
2784 );
Ed Warnickecb9cada2015-12-08 15:45:58 -07002785
Damjan Marion00a9dca2016-08-17 17:05:46 +02002786 for (i = 0; i < vec_len (hw_if_indices); i++)
2787 {
2788 hi = vnet_get_hw_interface (vnm, hw_if_indices[i]);
Pierre Pfisterdbb3c252016-11-22 10:33:34 +00002789 vui = pool_elt_at_index (vum->vhost_user_interfaces, hi->dev_instance);
Ed Warnickecb9cada2015-12-08 15:45:58 -07002790
Damjan Marion00a9dca2016-08-17 17:05:46 +02002791 vec_add2 (r_vuids, vuid, 1);
2792 vuid->sw_if_index = vui->sw_if_index;
2793 vuid->virtio_net_hdr_sz = vui->virtio_net_hdr_sz;
2794 vuid->features = vui->features;
Damjan Marion00a9dca2016-08-17 17:05:46 +02002795 vuid->num_regions = vui->nregions;
Marek Gradzki0578cd12017-02-13 14:19:51 +01002796 vuid->is_server = vui->unix_server_index != ~0;
Damjan Marion00a9dca2016-08-17 17:05:46 +02002797 vuid->sock_errno = vui->sock_errno;
2798 strncpy ((char *) vuid->sock_filename, (char *) vui->sock_filename,
2799 ARRAY_LEN (vuid->sock_filename) - 1);
Ed Warnickecb9cada2015-12-08 15:45:58 -07002800
Damjan Marion00a9dca2016-08-17 17:05:46 +02002801 s = format (s, "%v%c", hi->name, 0);
Ed Warnickecb9cada2015-12-08 15:45:58 -07002802
Damjan Marion00a9dca2016-08-17 17:05:46 +02002803 strncpy ((char *) vuid->if_name, (char *) s,
2804 ARRAY_LEN (vuid->if_name) - 1);
2805 _vec_len (s) = 0;
2806 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07002807
2808 vec_free (s);
2809 vec_free (hw_if_indices);
2810
2811 *out_vuids = r_vuids;
2812
2813 return rv;
2814}
2815
2816clib_error_t *
2817show_vhost_user_command_fn (vlib_main_t * vm,
Damjan Marion00a9dca2016-08-17 17:05:46 +02002818 unformat_input_t * input,
2819 vlib_cli_command_t * cmd)
Ed Warnickecb9cada2015-12-08 15:45:58 -07002820{
Damjan Marion00a9dca2016-08-17 17:05:46 +02002821 clib_error_t *error = 0;
2822 vnet_main_t *vnm = vnet_get_main ();
2823 vhost_user_main_t *vum = &vhost_user_main;
2824 vhost_user_intf_t *vui;
2825 u32 hw_if_index, *hw_if_indices = 0;
2826 vnet_hw_interface_t *hi;
Pierre Pfistere21c5282016-09-21 08:04:59 +01002827 vhost_cpu_t *vhc;
2828 vhost_iface_and_queue_t *vhiq;
2829 u32 ci;
2830
Ed Warnickecb9cada2015-12-08 15:45:58 -07002831 int i, j, q;
2832 int show_descr = 0;
Damjan Marion00a9dca2016-08-17 17:05:46 +02002833 struct feat_struct
2834 {
2835 u8 bit;
2836 char *str;
2837 };
Ed Warnickecb9cada2015-12-08 15:45:58 -07002838 struct feat_struct *feat_entry;
2839
2840 static struct feat_struct feat_array[] = {
2841#define _(s,b) { .str = #s, .bit = b, },
Damjan Marion00a9dca2016-08-17 17:05:46 +02002842 foreach_virtio_net_feature
Ed Warnickecb9cada2015-12-08 15:45:58 -07002843#undef _
Damjan Marion00a9dca2016-08-17 17:05:46 +02002844 {.str = NULL}
Ed Warnickecb9cada2015-12-08 15:45:58 -07002845 };
2846
Pierre Pfistere21c5282016-09-21 08:04:59 +01002847#define foreach_protocol_feature \
2848 _(VHOST_USER_PROTOCOL_F_MQ) \
2849 _(VHOST_USER_PROTOCOL_F_LOG_SHMFD)
2850
2851 static struct feat_struct proto_feat_array[] = {
2852#define _(s) { .str = #s, .bit = s},
2853 foreach_protocol_feature
2854#undef _
2855 {.str = NULL}
2856 };
2857
Damjan Marion00a9dca2016-08-17 17:05:46 +02002858 while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
2859 {
2860 if (unformat
2861 (input, "%U", unformat_vnet_hw_interface, vnm, &hw_if_index))
2862 {
2863 vec_add1 (hw_if_indices, hw_if_index);
Damjan Marion00a9dca2016-08-17 17:05:46 +02002864 }
2865 else if (unformat (input, "descriptors") || unformat (input, "desc"))
2866 show_descr = 1;
2867 else
2868 {
2869 error = clib_error_return (0, "unknown input `%U'",
2870 format_unformat_error, input);
2871 goto done;
2872 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07002873 }
Damjan Marion00a9dca2016-08-17 17:05:46 +02002874 if (vec_len (hw_if_indices) == 0)
2875 {
Pierre Pfisterdbb3c252016-11-22 10:33:34 +00002876 pool_foreach (vui, vum->vhost_user_interfaces,
2877 vec_add1 (hw_if_indices, vui->hw_if_index);
2878 );
Ed Warnickecb9cada2015-12-08 15:45:58 -07002879 }
Damjan Marion00a9dca2016-08-17 17:05:46 +02002880 vlib_cli_output (vm, "Virtio vhost-user interfaces");
Pierre Pfistere21c5282016-09-21 08:04:59 +01002881 vlib_cli_output (vm, "Global:\n coalesce frames %d time %e",
Damjan Marion00a9dca2016-08-17 17:05:46 +02002882 vum->coalesce_frames, vum->coalesce_time);
2883
2884 for (i = 0; i < vec_len (hw_if_indices); i++)
2885 {
2886 hi = vnet_get_hw_interface (vnm, hw_if_indices[i]);
Pierre Pfisterdbb3c252016-11-22 10:33:34 +00002887 vui = pool_elt_at_index (vum->vhost_user_interfaces, hi->dev_instance);
Damjan Marion00a9dca2016-08-17 17:05:46 +02002888 vlib_cli_output (vm, "Interface: %s (ifindex %d)",
2889 hi->name, hw_if_indices[i]);
2890
Pierre Pfistere21c5282016-09-21 08:04:59 +01002891 vlib_cli_output (vm, "virtio_net_hdr_sz %d\n"
2892 " features mask (0x%llx): \n"
2893 " features (0x%llx): \n",
2894 vui->virtio_net_hdr_sz, vui->feature_mask,
2895 vui->features);
Damjan Marion00a9dca2016-08-17 17:05:46 +02002896
2897 feat_entry = (struct feat_struct *) &feat_array;
2898 while (feat_entry->str)
2899 {
Pierre Pfistere21c5282016-09-21 08:04:59 +01002900 if (vui->features & (1ULL << feat_entry->bit))
2901 vlib_cli_output (vm, " %s (%d)", feat_entry->str,
2902 feat_entry->bit);
2903 feat_entry++;
2904 }
2905
2906 vlib_cli_output (vm, " protocol features (0x%llx)",
2907 vui->protocol_features);
2908 feat_entry = (struct feat_struct *) &proto_feat_array;
2909 while (feat_entry->str)
2910 {
2911 if (vui->protocol_features & (1ULL << feat_entry->bit))
Damjan Marion00a9dca2016-08-17 17:05:46 +02002912 vlib_cli_output (vm, " %s (%d)", feat_entry->str,
2913 feat_entry->bit);
2914 feat_entry++;
2915 }
2916
2917 vlib_cli_output (vm, "\n");
2918
Damjan Marion00a9dca2016-08-17 17:05:46 +02002919 vlib_cli_output (vm, " socket filename %s type %s errno \"%s\"\n\n",
2920 vui->sock_filename,
Pierre Pfisterdbb3c252016-11-22 10:33:34 +00002921 (vui->unix_server_index != ~0) ? "server" : "client",
Damjan Marion00a9dca2016-08-17 17:05:46 +02002922 strerror (vui->sock_errno));
2923
Pierre Pfistere21c5282016-09-21 08:04:59 +01002924 vlib_cli_output (vm, " rx placement: ");
2925 vec_foreach (vhc, vum->cpus)
2926 {
2927 vec_foreach (vhiq, vhc->rx_queues)
2928 {
2929 if (vhiq->vhost_iface_index == vui - vum->vhost_user_interfaces)
2930 vlib_cli_output (vm, " thread %d on vring %d\n",
2931 vhc - vum->cpus, VHOST_VRING_IDX_TX (vhiq->qid));
2932 }
2933 }
2934
2935 vlib_cli_output (vm, " tx placement: %s\n",
2936 vui->use_tx_spinlock ? "spin-lock" : "lock-free");
2937
2938 vec_foreach_index (ci, vui->per_cpu_tx_qid)
2939 {
2940 vlib_cli_output (vm, " thread %d on vring %d\n", ci,
2941 VHOST_VRING_IDX_RX (vui->per_cpu_tx_qid[ci]));
2942 }
2943
2944 vlib_cli_output (vm, "\n");
2945
Damjan Marion00a9dca2016-08-17 17:05:46 +02002946 vlib_cli_output (vm, " Memory regions (total %d)\n", vui->nregions);
2947
2948 if (vui->nregions)
2949 {
2950 vlib_cli_output (vm,
2951 " region fd guest_phys_addr memory_size userspace_addr mmap_offset mmap_addr\n");
2952 vlib_cli_output (vm,
2953 " ====== ===== ================== ================== ================== ================== ==================\n");
2954 }
2955 for (j = 0; j < vui->nregions; j++)
2956 {
2957 vlib_cli_output (vm,
2958 " %d %-5d 0x%016lx 0x%016lx 0x%016lx 0x%016lx 0x%016lx\n",
2959 j, vui->region_mmap_fd[j],
2960 vui->regions[j].guest_phys_addr,
2961 vui->regions[j].memory_size,
2962 vui->regions[j].userspace_addr,
2963 vui->regions[j].mmap_offset,
2964 pointer_to_uword (vui->region_mmap_addr[j]));
2965 }
Pierre Pfistere21c5282016-09-21 08:04:59 +01002966 for (q = 0; q < VHOST_VRING_MAX_N; q++)
Damjan Marion00a9dca2016-08-17 17:05:46 +02002967 {
Pierre Pfistere21c5282016-09-21 08:04:59 +01002968 if (!vui->vrings[q].started)
2969 continue;
2970
2971 vlib_cli_output (vm, "\n Virtqueue %d (%s%s)\n", q,
2972 (q & 1) ? "RX" : "TX",
2973 vui->vrings[q].enabled ? "" : " disabled");
Damjan Marion00a9dca2016-08-17 17:05:46 +02002974
2975 vlib_cli_output (vm,
2976 " qsz %d last_avail_idx %d last_used_idx %d\n",
2977 vui->vrings[q].qsz, vui->vrings[q].last_avail_idx,
2978 vui->vrings[q].last_used_idx);
2979
2980 if (vui->vrings[q].avail && vui->vrings[q].used)
2981 vlib_cli_output (vm,
2982 " avail.flags %x avail.idx %d used.flags %x used.idx %d\n",
2983 vui->vrings[q].avail->flags,
2984 vui->vrings[q].avail->idx,
2985 vui->vrings[q].used->flags,
2986 vui->vrings[q].used->idx);
2987
Pierre Pfisterdbb3c252016-11-22 10:33:34 +00002988 int kickfd = UNIX_GET_FD (vui->vrings[q].kickfd_idx);
2989 int callfd = UNIX_GET_FD (vui->vrings[q].callfd_idx);
Damjan Marion00a9dca2016-08-17 17:05:46 +02002990 vlib_cli_output (vm, " kickfd %d callfd %d errfd %d\n",
Pierre Pfisterdbb3c252016-11-22 10:33:34 +00002991 kickfd, callfd, vui->vrings[q].errfd);
Damjan Marion00a9dca2016-08-17 17:05:46 +02002992
2993 if (show_descr)
2994 {
2995 vlib_cli_output (vm, "\n descriptor table:\n");
2996 vlib_cli_output (vm,
2997 " id addr len flags next user_addr\n");
2998 vlib_cli_output (vm,
2999 " ===== ================== ===== ====== ===== ==================\n");
3000 for (j = 0; j < vui->vrings[q].qsz; j++)
3001 {
Pierre Pfister11f92052016-09-21 08:08:55 +01003002 u32 mem_hint = 0;
Damjan Marion00a9dca2016-08-17 17:05:46 +02003003 vlib_cli_output (vm,
3004 " %-5d 0x%016lx %-5d 0x%04x %-5d 0x%016lx\n",
3005 j, vui->vrings[q].desc[j].addr,
3006 vui->vrings[q].desc[j].len,
3007 vui->vrings[q].desc[j].flags,
3008 vui->vrings[q].desc[j].next,
3009 pointer_to_uword (map_guest_mem
3010 (vui,
Pierre Pfisterba1d0462016-07-27 16:38:20 +01003011 vui->vrings[q].desc[j].
Pierre Pfister11f92052016-09-21 08:08:55 +01003012 addr, &mem_hint)));
Damjan Marion00a9dca2016-08-17 17:05:46 +02003013 }
3014 }
3015 }
3016 vlib_cli_output (vm, "\n");
3017 }
Ed Warnickecb9cada2015-12-08 15:45:58 -07003018done:
3019 vec_free (hw_if_indices);
3020 return error;
3021}
3022
Damjan Marion8d281b32016-08-24 14:32:39 +02003023/*
3024 * CLI functions
3025 */
3026
Billy McFalla92501a2016-11-23 12:45:29 -05003027/*?
3028 * Create a vHost User interface. Once created, a new virtual interface
3029 * will exist with the name '<em>VirtualEthernet0/0/x</em>', where '<em>x</em>'
3030 * is the next free index.
3031 *
3032 * There are several parameters associated with a vHost interface:
3033 *
3034 * - <b>socket <socket-filename></b> - Name of the linux socket used by QEMU/VM and
3035 * VPP to manage the vHost interface. If socket does not already exist, VPP will
3036 * create the socket.
3037 *
3038 * - <b>server</b> - Optional flag to indicate that VPP should be the server for the
3039 * linux socket. If not provided, VPP will be the client.
3040 *
3041 * - <b>feature-mask <hex></b> - Optional virtio/vhost feature set negotiated at
3042 * startup. By default, all supported features will be advertised. Otherwise,
3043 * provide the set of features desired.
3044 * - 0x000008000 (15) - VIRTIO_NET_F_MRG_RXBUF
3045 * - 0x000020000 (17) - VIRTIO_NET_F_CTRL_VQ
3046 * - 0x000200000 (21) - VIRTIO_NET_F_GUEST_ANNOUNCE
3047 * - 0x000400000 (22) - VIRTIO_NET_F_MQ
3048 * - 0x004000000 (26) - VHOST_F_LOG_ALL
3049 * - 0x008000000 (27) - VIRTIO_F_ANY_LAYOUT
3050 * - 0x010000000 (28) - VIRTIO_F_INDIRECT_DESC
3051 * - 0x040000000 (30) - VHOST_USER_F_PROTOCOL_FEATURES
3052 * - 0x100000000 (32) - VIRTIO_F_VERSION_1
3053 *
3054 * - <b>hwaddr <mac-addr></b> - Optional ethernet address, can be in either
3055 * X:X:X:X:X:X unix or X.X.X cisco format.
3056 *
3057 * - <b>renumber <dev_instance></b> - Optional parameter which allows the instance
3058 * in the name to be specified. If instance already exists, name will be used
3059 * anyway and multiple instances will have the same name. Use with caution.
3060 *
3061 * @cliexpar
3062 * Example of how to create a vhost interface with VPP as the client and all features enabled:
3063 * @cliexstart{create vhost-user socket /tmp/vhost1.sock}
3064 * VirtualEthernet0/0/0
3065 * @cliexend
3066 * Example of how to create a vhost interface with VPP as the server and with just
3067 * multiple queues enabled:
3068 * @cliexstart{create vhost-user socket /tmp/vhost2.sock server feature-mask 0x40400000}
3069 * VirtualEthernet0/0/1
3070 * @cliexend
3071 * Once the vHost interface is created, enable the interface using:
3072 * @cliexcmd{set interface state VirtualEthernet0/0/0 up}
3073?*/
Damjan Marion8d281b32016-08-24 14:32:39 +02003074/* *INDENT-OFF* */
3075VLIB_CLI_COMMAND (vhost_user_connect_command, static) = {
3076 .path = "create vhost-user",
Billy McFalla92501a2016-11-23 12:45:29 -05003077 .short_help = "create vhost-user socket <socket-filename> [server] [feature-mask <hex>] [hwaddr <mac-addr>] [renumber <dev_instance>]",
Damjan Marion8d281b32016-08-24 14:32:39 +02003078 .function = vhost_user_connect_command_fn,
3079};
Billy McFalla92501a2016-11-23 12:45:29 -05003080/* *INDENT-ON* */
Damjan Marion8d281b32016-08-24 14:32:39 +02003081
Billy McFalla92501a2016-11-23 12:45:29 -05003082/*?
3083 * Delete a vHost User interface using the interface name or the
3084 * software interface index. Use the '<em>show interfaces</em>'
3085 * command to determine the software interface index. On deletion,
3086 * the linux socket will not be deleted.
3087 *
3088 * @cliexpar
3089 * Example of how to delete a vhost interface by name:
3090 * @cliexcmd{delete vhost-user VirtualEthernet0/0/1}
3091 * Example of how to delete a vhost interface by software interface index:
3092 * @cliexcmd{delete vhost-user sw_if_index 1}
3093?*/
3094/* *INDENT-OFF* */
Damjan Marion8d281b32016-08-24 14:32:39 +02003095VLIB_CLI_COMMAND (vhost_user_delete_command, static) = {
3096 .path = "delete vhost-user",
Billy McFalla92501a2016-11-23 12:45:29 -05003097 .short_help = "delete vhost-user {<interface> | sw_if_index <sw_idx>}",
Damjan Marion8d281b32016-08-24 14:32:39 +02003098 .function = vhost_user_delete_command_fn,
3099};
3100
Billy McFalla92501a2016-11-23 12:45:29 -05003101/*?
3102 * Display the attributes of a single vHost User interface (provide interface
3103 * name), multiple vHost User interfaces (provide a list of interface names seperated
3104 * by spaces) or all Vhost User interfaces (omit an interface name to display all
3105 * vHost interfaces).
3106 *
3107 * @cliexpar
3108 * @parblock
3109 * Example of how to display a vhost interface:
3110 * @cliexstart{show vhost-user VirtualEthernet0/0/0}
3111 * Virtio vhost-user interfaces
3112 * Global:
3113 * coalesce frames 32 time 1e-3
3114 * Interface: VirtualEthernet0/0/0 (ifindex 1)
3115 * virtio_net_hdr_sz 12
3116 * features mask (0xffffffffffffffff):
3117 * features (0x50408000):
3118 * VIRTIO_NET_F_MRG_RXBUF (15)
3119 * VIRTIO_NET_F_MQ (22)
3120 * VIRTIO_F_INDIRECT_DESC (28)
3121 * VHOST_USER_F_PROTOCOL_FEATURES (30)
3122 * protocol features (0x3)
3123 * VHOST_USER_PROTOCOL_F_MQ (0)
3124 * VHOST_USER_PROTOCOL_F_LOG_SHMFD (1)
3125 *
3126 * socket filename /tmp/vhost1.sock type client errno "Success"
3127 *
3128 * rx placement:
3129 * thread 1 on vring 1
3130 * thread 1 on vring 5
3131 * thread 2 on vring 3
3132 * thread 2 on vring 7
3133 * tx placement: spin-lock
3134 * thread 0 on vring 0
3135 * thread 1 on vring 2
3136 * thread 2 on vring 0
3137 *
3138 * Memory regions (total 2)
3139 * region fd guest_phys_addr memory_size userspace_addr mmap_offset mmap_addr
3140 * ====== ===== ================== ================== ================== ================== ==================
3141 * 0 60 0x0000000000000000 0x00000000000a0000 0x00002aaaaac00000 0x0000000000000000 0x00002aab2b400000
3142 * 1 61 0x00000000000c0000 0x000000003ff40000 0x00002aaaaacc0000 0x00000000000c0000 0x00002aababcc0000
3143 *
3144 * Virtqueue 0 (TX)
3145 * qsz 256 last_avail_idx 0 last_used_idx 0
3146 * avail.flags 1 avail.idx 128 used.flags 1 used.idx 0
3147 * kickfd 62 callfd 64 errfd -1
3148 *
3149 * Virtqueue 1 (RX)
3150 * qsz 256 last_avail_idx 0 last_used_idx 0
3151 * avail.flags 1 avail.idx 0 used.flags 1 used.idx 0
3152 * kickfd 65 callfd 66 errfd -1
3153 *
3154 * Virtqueue 2 (TX)
3155 * qsz 256 last_avail_idx 0 last_used_idx 0
3156 * avail.flags 1 avail.idx 128 used.flags 1 used.idx 0
3157 * kickfd 63 callfd 70 errfd -1
3158 *
3159 * Virtqueue 3 (RX)
3160 * qsz 256 last_avail_idx 0 last_used_idx 0
3161 * avail.flags 1 avail.idx 0 used.flags 1 used.idx 0
3162 * kickfd 72 callfd 74 errfd -1
3163 *
3164 * Virtqueue 4 (TX disabled)
3165 * qsz 256 last_avail_idx 0 last_used_idx 0
3166 * avail.flags 1 avail.idx 0 used.flags 1 used.idx 0
3167 * kickfd 76 callfd 78 errfd -1
3168 *
3169 * Virtqueue 5 (RX disabled)
3170 * qsz 256 last_avail_idx 0 last_used_idx 0
3171 * avail.flags 1 avail.idx 0 used.flags 1 used.idx 0
3172 * kickfd 80 callfd 82 errfd -1
3173 *
3174 * Virtqueue 6 (TX disabled)
3175 * qsz 256 last_avail_idx 0 last_used_idx 0
3176 * avail.flags 1 avail.idx 0 used.flags 1 used.idx 0
3177 * kickfd 84 callfd 86 errfd -1
3178 *
3179 * Virtqueue 7 (RX disabled)
3180 * qsz 256 last_avail_idx 0 last_used_idx 0
3181 * avail.flags 1 avail.idx 0 used.flags 1 used.idx 0
3182 * kickfd 88 callfd 90 errfd -1
3183 *
3184 * @cliexend
3185 *
3186 * The optional '<em>descriptors</em>' parameter will display the same output as
3187 * the previous example but will include the descriptor table for each queue.
3188 * The output is truncated below:
3189 * @cliexstart{show vhost-user VirtualEthernet0/0/0 descriptors}
3190 * Virtio vhost-user interfaces
3191 * Global:
3192 * coalesce frames 32 time 1e-3
3193 * Interface: VirtualEthernet0/0/0 (ifindex 1)
3194 * virtio_net_hdr_sz 12
3195 * features mask (0xffffffffffffffff):
3196 * features (0x50408000):
3197 * VIRTIO_NET_F_MRG_RXBUF (15)
3198 * VIRTIO_NET_F_MQ (22)
3199 * :
3200 * Virtqueue 0 (TX)
3201 * qsz 256 last_avail_idx 0 last_used_idx 0
3202 * avail.flags 1 avail.idx 128 used.flags 1 used.idx 0
3203 * kickfd 62 callfd 64 errfd -1
3204 *
3205 * descriptor table:
3206 * id addr len flags next user_addr
3207 * ===== ================== ===== ====== ===== ==================
3208 * 0 0x0000000010b6e974 2060 0x0002 1 0x00002aabbc76e974
3209 * 1 0x0000000010b6e034 2060 0x0002 2 0x00002aabbc76e034
3210 * 2 0x0000000010b6d6f4 2060 0x0002 3 0x00002aabbc76d6f4
3211 * 3 0x0000000010b6cdb4 2060 0x0002 4 0x00002aabbc76cdb4
3212 * 4 0x0000000010b6c474 2060 0x0002 5 0x00002aabbc76c474
3213 * 5 0x0000000010b6bb34 2060 0x0002 6 0x00002aabbc76bb34
3214 * 6 0x0000000010b6b1f4 2060 0x0002 7 0x00002aabbc76b1f4
3215 * 7 0x0000000010b6a8b4 2060 0x0002 8 0x00002aabbc76a8b4
3216 * 8 0x0000000010b69f74 2060 0x0002 9 0x00002aabbc769f74
3217 * 9 0x0000000010b69634 2060 0x0002 10 0x00002aabbc769634
3218 * 10 0x0000000010b68cf4 2060 0x0002 11 0x00002aabbc768cf4
3219 * :
3220 * 249 0x0000000000000000 0 0x0000 250 0x00002aab2b400000
3221 * 250 0x0000000000000000 0 0x0000 251 0x00002aab2b400000
3222 * 251 0x0000000000000000 0 0x0000 252 0x00002aab2b400000
3223 * 252 0x0000000000000000 0 0x0000 253 0x00002aab2b400000
3224 * 253 0x0000000000000000 0 0x0000 254 0x00002aab2b400000
3225 * 254 0x0000000000000000 0 0x0000 255 0x00002aab2b400000
3226 * 255 0x0000000000000000 0 0x0000 32768 0x00002aab2b400000
3227 *
3228 * Virtqueue 1 (RX)
3229 * qsz 256 last_avail_idx 0 last_used_idx 0
3230 * :
3231 * @cliexend
3232 * @endparblock
3233?*/
3234/* *INDENT-OFF* */
Damjan Marion8d281b32016-08-24 14:32:39 +02003235VLIB_CLI_COMMAND (show_vhost_user_command, static) = {
3236 .path = "show vhost-user",
Billy McFalla92501a2016-11-23 12:45:29 -05003237 .short_help = "show vhost-user [<interface> [<interface> [..]]] [descriptors]",
Damjan Marion8d281b32016-08-24 14:32:39 +02003238 .function = show_vhost_user_command_fn,
3239};
3240/* *INDENT-ON* */
Damjan Marion8d281b32016-08-24 14:32:39 +02003241
Ed Warnickecb9cada2015-12-08 15:45:58 -07003242static clib_error_t *
3243vhost_user_config (vlib_main_t * vm, unformat_input_t * input)
3244{
Damjan Marion00a9dca2016-08-17 17:05:46 +02003245 vhost_user_main_t *vum = &vhost_user_main;
Ed Warnickecb9cada2015-12-08 15:45:58 -07003246
3247 while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
3248 {
3249 if (unformat (input, "coalesce-frames %d", &vum->coalesce_frames))
Damjan Marion00a9dca2016-08-17 17:05:46 +02003250 ;
Ed Warnickecb9cada2015-12-08 15:45:58 -07003251 else if (unformat (input, "coalesce-time %f", &vum->coalesce_time))
Damjan Marion00a9dca2016-08-17 17:05:46 +02003252 ;
Ed Warnickecb9cada2015-12-08 15:45:58 -07003253 else if (unformat (input, "dont-dump-memory"))
Damjan Marion00a9dca2016-08-17 17:05:46 +02003254 vum->dont_dump_vhost_user_memory = 1;
Ed Warnickecb9cada2015-12-08 15:45:58 -07003255 else
Damjan Marion00a9dca2016-08-17 17:05:46 +02003256 return clib_error_return (0, "unknown input `%U'",
3257 format_unformat_error, input);
Ed Warnickecb9cada2015-12-08 15:45:58 -07003258 }
3259
3260 return 0;
3261}
3262
3263/* vhost-user { ... } configuration. */
3264VLIB_CONFIG_FUNCTION (vhost_user_config, "vhost-user");
3265
3266void
3267vhost_user_unmap_all (void)
3268{
Damjan Marion00a9dca2016-08-17 17:05:46 +02003269 vhost_user_main_t *vum = &vhost_user_main;
3270 vhost_user_intf_t *vui;
Ed Warnickecb9cada2015-12-08 15:45:58 -07003271
3272 if (vum->dont_dump_vhost_user_memory)
3273 {
Pierre Pfisterdbb3c252016-11-22 10:33:34 +00003274 pool_foreach (vui, vum->vhost_user_interfaces,
3275 unmap_all_mem_regions (vui);
3276 );
Ed Warnickecb9cada2015-12-08 15:45:58 -07003277 }
3278}
Damjan Marion00a9dca2016-08-17 17:05:46 +02003279
Pierre Pfistere21c5282016-09-21 08:04:59 +01003280static clib_error_t *
3281vhost_thread_command_fn (vlib_main_t * vm,
3282 unformat_input_t * input, vlib_cli_command_t * cmd)
3283{
3284 unformat_input_t _line_input, *line_input = &_line_input;
3285 u32 worker_thread_index;
3286 u32 sw_if_index;
3287 u8 del = 0;
3288 int rv;
3289
3290 /* Get a line of input. */
3291 if (!unformat_user (input, unformat_line_input, line_input))
3292 return 0;
3293
3294 if (!unformat
3295 (line_input, "%U %d", unformat_vnet_sw_interface, vnet_get_main (),
3296 &sw_if_index, &worker_thread_index))
3297 {
3298 unformat_free (line_input);
3299 return clib_error_return (0, "unknown input `%U'",
3300 format_unformat_error, input);
3301 }
3302
3303 if (unformat (line_input, "del"))
3304 del = 1;
3305
3306 if ((rv =
3307 vhost_user_thread_placement (sw_if_index, worker_thread_index, del)))
3308 return clib_error_return (0, "vhost_user_thread_placement returned %d",
3309 rv);
3310 return 0;
3311}
3312
3313
Billy McFalla92501a2016-11-23 12:45:29 -05003314/*?
3315 * This command is used to move the RX processing for the given
3316 * interfaces to the provided thread. If the '<em>del</em>' option is used,
3317 * the forced thread assignment is removed and the thread assigment is
3318 * reassigned automatically. Use '<em>show vhost-user <interface></em>'
3319 * to see the thread assignment.
3320 *
3321 * @cliexpar
3322 * Example of how to move the RX processing for a given interface to a given thread:
3323 * @cliexcmd{vhost thread VirtualEthernet0/0/0 1}
3324 * Example of how to remove the forced thread assignment for a given interface:
3325 * @cliexcmd{vhost thread VirtualEthernet0/0/0 1 del}
3326?*/
Pierre Pfistere21c5282016-09-21 08:04:59 +01003327/* *INDENT-OFF* */
3328VLIB_CLI_COMMAND (vhost_user_thread_command, static) = {
3329 .path = "vhost thread",
3330 .short_help = "vhost thread <iface> <worker-index> [del]",
3331 .function = vhost_thread_command_fn,
3332};
3333/* *INDENT-ON* */
3334
Damjan Marion00a9dca2016-08-17 17:05:46 +02003335/*
3336 * fd.io coding-style-patch-verification: ON
3337 *
3338 * Local Variables:
3339 * eval: (c-set-style "gnu")
3340 * End:
3341 */