blob: 5bd60d4e96ef9560bd8a86f7c75051a29fbf6efc [file] [log] [blame]
Mohsin Kazmie7cde312018-06-26 17:20:11 +02001/*
2 *------------------------------------------------------------------
3 * vhost-user-output
4 *
5 * Copyright (c) 2014-2018 Cisco and/or its affiliates.
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at:
9 *
10 * http://www.apache.org/licenses/LICENSE-2.0
11 *
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 *------------------------------------------------------------------
18 */
19
Steven Luong4208a4c2019-05-06 08:51:56 -070020#include <stddef.h>
Mohsin Kazmie7cde312018-06-26 17:20:11 +020021#include <fcntl.h> /* for open */
22#include <sys/ioctl.h>
23#include <sys/socket.h>
24#include <sys/un.h>
25#include <sys/stat.h>
26#include <sys/types.h>
27#include <sys/uio.h> /* for iovec */
28#include <netinet/in.h>
29#include <sys/vfs.h>
30
31#include <linux/if_arp.h>
32#include <linux/if_tun.h>
33
34#include <vlib/vlib.h>
35#include <vlib/unix/unix.h>
36
37#include <vnet/ip/ip.h>
38
39#include <vnet/ethernet/ethernet.h>
40#include <vnet/devices/devices.h>
41#include <vnet/feature/feature.h>
42
Steven Luong4208a4c2019-05-06 08:51:56 -070043#include <vnet/devices/virtio/virtio.h>
Mohsin Kazmie7cde312018-06-26 17:20:11 +020044#include <vnet/devices/virtio/vhost_user.h>
45#include <vnet/devices/virtio/vhost_user_inline.h>
46
47/*
48 * On the transmit side, we keep processing the buffers from vlib in the while
49 * loop and prepare the copy order to be executed later. However, the static
50 * array which we keep the copy order is limited to VHOST_USER_COPY_ARRAY_N
51 * entries. In order to not corrupt memory, we have to do the copy when the
52 * static array reaches the copy threshold. We subtract 40 in case the code
53 * goes into the inner loop for a maximum of 64k frames which may require
Steven Luong73310052019-10-23 13:28:37 -070054 * more array entries. We subtract 200 because our default buffer size is
55 * 2048 and the default desc len is likely 1536. While it takes less than 40
56 * vlib buffers for the jumbo frame, it may take twice as much descriptors
57 * for the same jumbo frame. Use 200 for the extra head room.
Mohsin Kazmie7cde312018-06-26 17:20:11 +020058 */
Steven Luong73310052019-10-23 13:28:37 -070059#define VHOST_USER_TX_COPY_THRESHOLD (VHOST_USER_COPY_ARRAY_N - 200)
Mohsin Kazmie7cde312018-06-26 17:20:11 +020060
BenoƮt Ganne47727c02019-02-12 13:35:08 +010061extern vnet_device_class_t vhost_user_device_class;
Mohsin Kazmie7cde312018-06-26 17:20:11 +020062
63#define foreach_vhost_user_tx_func_error \
64 _(NONE, "no error") \
65 _(NOT_READY, "vhost vring not ready") \
66 _(DOWN, "vhost interface is down") \
67 _(PKT_DROP_NOBUF, "tx packet drops (no available descriptors)") \
68 _(PKT_DROP_NOMRG, "tx packet drops (cannot merge descriptors)") \
69 _(MMAP_FAIL, "mmap failure") \
70 _(INDIRECT_OVERFLOW, "indirect descriptor table overflow")
71
72typedef enum
73{
74#define _(f,s) VHOST_USER_TX_FUNC_ERROR_##f,
75 foreach_vhost_user_tx_func_error
76#undef _
77 VHOST_USER_TX_FUNC_N_ERROR,
78} vhost_user_tx_func_error_t;
79
80static __clib_unused char *vhost_user_tx_func_error_strings[] = {
81#define _(n,s) s,
82 foreach_vhost_user_tx_func_error
83#undef _
84};
85
86static __clib_unused u8 *
87format_vhost_user_interface_name (u8 * s, va_list * args)
88{
89 u32 i = va_arg (*args, u32);
90 u32 show_dev_instance = ~0;
91 vhost_user_main_t *vum = &vhost_user_main;
92
93 if (i < vec_len (vum->show_dev_instance_by_real_dev_instance))
94 show_dev_instance = vum->show_dev_instance_by_real_dev_instance[i];
95
96 if (show_dev_instance != ~0)
97 i = show_dev_instance;
98
99 s = format (s, "VirtualEthernet0/0/%d", i);
100 return s;
101}
102
103static __clib_unused int
104vhost_user_name_renumber (vnet_hw_interface_t * hi, u32 new_dev_instance)
105{
106 // FIXME: check if the new dev instance is already used
107 vhost_user_main_t *vum = &vhost_user_main;
Jerome Tollet2f54c272018-10-02 11:41:11 +0200108 vhost_user_intf_t *vui = pool_elt_at_index (vum->vhost_user_interfaces,
109 hi->dev_instance);
110
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200111 vec_validate_init_empty (vum->show_dev_instance_by_real_dev_instance,
112 hi->dev_instance, ~0);
113
114 vum->show_dev_instance_by_real_dev_instance[hi->dev_instance] =
115 new_dev_instance;
116
Jerome Tollet2f54c272018-10-02 11:41:11 +0200117 vu_log_debug (vui, "renumbered vhost-user interface dev_instance %d to %d",
118 hi->dev_instance, new_dev_instance);
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200119
120 return 0;
121}
122
123/**
124 * @brief Try once to lock the vring
125 * @return 0 on success, non-zero on failure.
126 */
127static_always_inline int
128vhost_user_vring_try_lock (vhost_user_intf_t * vui, u32 qid)
129{
Sirshak Das2f6d7bb2018-10-03 22:53:51 +0000130 return clib_atomic_test_and_set (vui->vring_locks[qid]);
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200131}
132
133/**
134 * @brief Spin until the vring is successfully locked
135 */
136static_always_inline void
137vhost_user_vring_lock (vhost_user_intf_t * vui, u32 qid)
138{
139 while (vhost_user_vring_try_lock (vui, qid))
140 ;
141}
142
143/**
144 * @brief Unlock the vring lock
145 */
146static_always_inline void
147vhost_user_vring_unlock (vhost_user_intf_t * vui, u32 qid)
148{
Sirshak Das2f6d7bb2018-10-03 22:53:51 +0000149 clib_atomic_release (vui->vring_locks[qid]);
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200150}
151
152static_always_inline void
153vhost_user_tx_trace (vhost_trace_t * t,
154 vhost_user_intf_t * vui, u16 qid,
155 vlib_buffer_t * b, vhost_user_vring_t * rxvq)
156{
157 vhost_user_main_t *vum = &vhost_user_main;
158 u32 last_avail_idx = rxvq->last_avail_idx;
159 u32 desc_current = rxvq->avail->ring[last_avail_idx & rxvq->qsz_mask];
160 vring_desc_t *hdr_desc = 0;
161 u32 hint = 0;
162
Dave Barachb7b92992018-10-17 10:38:51 -0400163 clib_memset (t, 0, sizeof (*t));
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200164 t->device_index = vui - vum->vhost_user_interfaces;
165 t->qid = qid;
166
167 hdr_desc = &rxvq->desc[desc_current];
168 if (rxvq->desc[desc_current].flags & VIRTQ_DESC_F_INDIRECT)
169 {
170 t->virtio_ring_flags |= 1 << VIRTIO_TRACE_F_INDIRECT;
171 /* Header is the first here */
172 hdr_desc = map_guest_mem (vui, rxvq->desc[desc_current].addr, &hint);
173 }
174 if (rxvq->desc[desc_current].flags & VIRTQ_DESC_F_NEXT)
175 {
176 t->virtio_ring_flags |= 1 << VIRTIO_TRACE_F_SIMPLE_CHAINED;
177 }
178 if (!(rxvq->desc[desc_current].flags & VIRTQ_DESC_F_NEXT) &&
179 !(rxvq->desc[desc_current].flags & VIRTQ_DESC_F_INDIRECT))
180 {
181 t->virtio_ring_flags |= 1 << VIRTIO_TRACE_F_SINGLE_DESC;
182 }
183
184 t->first_desc_len = hdr_desc ? hdr_desc->len : 0;
185}
186
187static_always_inline u32
188vhost_user_tx_copy (vhost_user_intf_t * vui, vhost_copy_t * cpy,
189 u16 copy_len, u32 * map_hint)
190{
191 void *dst0, *dst1, *dst2, *dst3;
192 if (PREDICT_TRUE (copy_len >= 4))
193 {
194 if (PREDICT_FALSE (!(dst2 = map_guest_mem (vui, cpy[0].dst, map_hint))))
195 return 1;
196 if (PREDICT_FALSE (!(dst3 = map_guest_mem (vui, cpy[1].dst, map_hint))))
197 return 1;
198 while (PREDICT_TRUE (copy_len >= 4))
199 {
200 dst0 = dst2;
201 dst1 = dst3;
202
203 if (PREDICT_FALSE
204 (!(dst2 = map_guest_mem (vui, cpy[2].dst, map_hint))))
205 return 1;
206 if (PREDICT_FALSE
207 (!(dst3 = map_guest_mem (vui, cpy[3].dst, map_hint))))
208 return 1;
209
210 CLIB_PREFETCH ((void *) cpy[2].src, 64, LOAD);
211 CLIB_PREFETCH ((void *) cpy[3].src, 64, LOAD);
212
Dave Barach178cf492018-11-13 16:34:13 -0500213 clib_memcpy_fast (dst0, (void *) cpy[0].src, cpy[0].len);
214 clib_memcpy_fast (dst1, (void *) cpy[1].src, cpy[1].len);
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200215
216 vhost_user_log_dirty_pages_2 (vui, cpy[0].dst, cpy[0].len, 1);
217 vhost_user_log_dirty_pages_2 (vui, cpy[1].dst, cpy[1].len, 1);
218 copy_len -= 2;
219 cpy += 2;
220 }
221 }
222 while (copy_len)
223 {
224 if (PREDICT_FALSE (!(dst0 = map_guest_mem (vui, cpy->dst, map_hint))))
225 return 1;
Dave Barach178cf492018-11-13 16:34:13 -0500226 clib_memcpy_fast (dst0, (void *) cpy->src, cpy->len);
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200227 vhost_user_log_dirty_pages_2 (vui, cpy->dst, cpy->len, 1);
228 copy_len -= 1;
229 cpy += 1;
230 }
231 return 0;
232}
233
Steven Luong4208a4c2019-05-06 08:51:56 -0700234static_always_inline void
235vhost_user_handle_tx_offload (vhost_user_intf_t * vui, vlib_buffer_t * b,
236 virtio_net_hdr_t * hdr)
237{
238 /* checksum offload */
239 if (b->flags & VNET_BUFFER_F_OFFLOAD_UDP_CKSUM)
240 {
241 hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
242 hdr->csum_start = vnet_buffer (b)->l4_hdr_offset;
243 hdr->csum_offset = offsetof (udp_header_t, checksum);
244 }
245 else if (b->flags & VNET_BUFFER_F_OFFLOAD_TCP_CKSUM)
246 {
247 hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
248 hdr->csum_start = vnet_buffer (b)->l4_hdr_offset;
249 hdr->csum_offset = offsetof (tcp_header_t, checksum);
250 }
251
252 /* GSO offload */
253 if (b->flags & VNET_BUFFER_F_GSO)
254 {
255 if (b->flags & VNET_BUFFER_F_OFFLOAD_TCP_CKSUM)
256 {
257 if ((b->flags & VNET_BUFFER_F_IS_IP4) &&
258 (vui->features & (1ULL << FEAT_VIRTIO_NET_F_GUEST_TSO4)))
259 {
260 hdr->gso_size = vnet_buffer2 (b)->gso_size;
261 hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
262 }
263 else if ((b->flags & VNET_BUFFER_F_IS_IP6) &&
264 (vui->features & (1ULL << FEAT_VIRTIO_NET_F_GUEST_TSO6)))
265 {
266 hdr->gso_size = vnet_buffer2 (b)->gso_size;
267 hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
268 }
269 }
270 else if ((vui->features & (1ULL << FEAT_VIRTIO_NET_F_GUEST_UFO)) &&
271 (b->flags & VNET_BUFFER_F_OFFLOAD_UDP_CKSUM))
272 {
273 hdr->gso_size = vnet_buffer2 (b)->gso_size;
274 hdr->gso_type = VIRTIO_NET_HDR_GSO_UDP;
275 }
276 }
277}
278
Mohsin Kazmidd8e7d02018-07-23 14:45:57 +0200279VNET_DEVICE_CLASS_TX_FN (vhost_user_device_class) (vlib_main_t * vm,
280 vlib_node_runtime_t *
281 node, vlib_frame_t * frame)
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200282{
Damjan Mariona3d59862018-11-10 10:23:00 +0100283 u32 *buffers = vlib_frame_vector_args (frame);
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200284 u32 n_left = frame->n_vectors;
285 vhost_user_main_t *vum = &vhost_user_main;
286 vnet_interface_output_runtime_t *rd = (void *) node->runtime_data;
287 vhost_user_intf_t *vui =
288 pool_elt_at_index (vum->vhost_user_interfaces, rd->dev_instance);
289 u32 qid = ~0;
290 vhost_user_vring_t *rxvq;
291 u8 error;
Damjan Marion067cd622018-07-11 12:47:43 +0200292 u32 thread_index = vm->thread_index;
Damjan Marion7e0b17d2018-11-20 21:07:03 +0100293 vhost_cpu_t *cpu = &vum->cpus[thread_index];
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200294 u32 map_hint = 0;
295 u8 retry = 8;
296 u16 copy_len;
297 u16 tx_headers_len;
298
299 if (PREDICT_FALSE (!vui->admin_up))
300 {
301 error = VHOST_USER_TX_FUNC_ERROR_DOWN;
302 goto done3;
303 }
304
Juraj Slobodab192feb2018-10-01 12:42:07 +0200305 if (PREDICT_FALSE (!vui->is_ready))
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200306 {
307 error = VHOST_USER_TX_FUNC_ERROR_NOT_READY;
308 goto done3;
309 }
310
Damjan Marion7e0b17d2018-11-20 21:07:03 +0100311 qid = VHOST_VRING_IDX_RX (*vec_elt_at_index (vui->per_cpu_tx_qid,
312 thread_index));
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200313 rxvq = &vui->vrings[qid];
Steven0c469982018-11-04 08:20:01 -0800314 if (PREDICT_FALSE (rxvq->avail == 0))
315 {
316 error = VHOST_USER_TX_FUNC_ERROR_MMAP_FAIL;
317 goto done3;
318 }
319
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200320 if (PREDICT_FALSE (vui->use_tx_spinlock))
321 vhost_user_vring_lock (vui, qid);
322
323retry:
324 error = VHOST_USER_TX_FUNC_ERROR_NONE;
325 tx_headers_len = 0;
326 copy_len = 0;
327 while (n_left > 0)
328 {
329 vlib_buffer_t *b0, *current_b0;
330 u16 desc_head, desc_index, desc_len;
331 vring_desc_t *desc_table;
332 uword buffer_map_addr;
333 u32 buffer_len;
334 u16 bytes_left;
335
336 if (PREDICT_TRUE (n_left > 1))
337 vlib_prefetch_buffer_with_index (vm, buffers[1], LOAD);
338
339 b0 = vlib_get_buffer (vm, buffers[0]);
340
341 if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
342 {
Damjan Marion7e0b17d2018-11-20 21:07:03 +0100343 cpu->current_trace = vlib_add_trace (vm, node, b0,
344 sizeof (*cpu->current_trace));
345 vhost_user_tx_trace (cpu->current_trace, vui, qid / 2, b0, rxvq);
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200346 }
347
348 if (PREDICT_FALSE (rxvq->last_avail_idx == rxvq->avail->idx))
349 {
350 error = VHOST_USER_TX_FUNC_ERROR_PKT_DROP_NOBUF;
351 goto done;
352 }
353
354 desc_table = rxvq->desc;
355 desc_head = desc_index =
356 rxvq->avail->ring[rxvq->last_avail_idx & rxvq->qsz_mask];
357
358 /* Go deeper in case of indirect descriptor
359 * I don't know of any driver providing indirect for RX. */
360 if (PREDICT_FALSE (rxvq->desc[desc_head].flags & VIRTQ_DESC_F_INDIRECT))
361 {
362 if (PREDICT_FALSE
363 (rxvq->desc[desc_head].len < sizeof (vring_desc_t)))
364 {
365 error = VHOST_USER_TX_FUNC_ERROR_INDIRECT_OVERFLOW;
366 goto done;
367 }
368 if (PREDICT_FALSE
369 (!(desc_table =
370 map_guest_mem (vui, rxvq->desc[desc_index].addr,
371 &map_hint))))
372 {
373 error = VHOST_USER_TX_FUNC_ERROR_MMAP_FAIL;
374 goto done;
375 }
376 desc_index = 0;
377 }
378
379 desc_len = vui->virtio_net_hdr_sz;
380 buffer_map_addr = desc_table[desc_index].addr;
381 buffer_len = desc_table[desc_index].len;
382
383 {
384 // Get a header from the header array
Damjan Marion7e0b17d2018-11-20 21:07:03 +0100385 virtio_net_hdr_mrg_rxbuf_t *hdr = &cpu->tx_headers[tx_headers_len];
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200386 tx_headers_len++;
387 hdr->hdr.flags = 0;
Steven Luong4208a4c2019-05-06 08:51:56 -0700388 hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE;
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200389 hdr->num_buffers = 1; //This is local, no need to check
390
Steven Luong4208a4c2019-05-06 08:51:56 -0700391 /* Guest supports csum offload? */
392 if (vui->features & (1ULL << FEAT_VIRTIO_NET_F_GUEST_CSUM))
393 vhost_user_handle_tx_offload (vui, b0, &hdr->hdr);
394
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200395 // Prepare a copy order executed later for the header
Steven Luong73310052019-10-23 13:28:37 -0700396 ASSERT (copy_len < VHOST_USER_COPY_ARRAY_N);
Damjan Marion7e0b17d2018-11-20 21:07:03 +0100397 vhost_copy_t *cpy = &cpu->copy[copy_len];
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200398 copy_len++;
399 cpy->len = vui->virtio_net_hdr_sz;
400 cpy->dst = buffer_map_addr;
401 cpy->src = (uword) hdr;
402 }
403
404 buffer_map_addr += vui->virtio_net_hdr_sz;
405 buffer_len -= vui->virtio_net_hdr_sz;
406 bytes_left = b0->current_length;
407 current_b0 = b0;
408 while (1)
409 {
410 if (buffer_len == 0)
411 { //Get new output
412 if (desc_table[desc_index].flags & VIRTQ_DESC_F_NEXT)
413 {
414 //Next one is chained
415 desc_index = desc_table[desc_index].next;
416 buffer_map_addr = desc_table[desc_index].addr;
417 buffer_len = desc_table[desc_index].len;
418 }
419 else if (vui->virtio_net_hdr_sz == 12) //MRG is available
420 {
421 virtio_net_hdr_mrg_rxbuf_t *hdr =
Damjan Marion7e0b17d2018-11-20 21:07:03 +0100422 &cpu->tx_headers[tx_headers_len - 1];
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200423
424 //Move from available to used buffer
425 rxvq->used->ring[rxvq->last_used_idx & rxvq->qsz_mask].id =
426 desc_head;
427 rxvq->used->ring[rxvq->last_used_idx & rxvq->qsz_mask].len =
428 desc_len;
429 vhost_user_log_dirty_ring (vui, rxvq,
430 ring[rxvq->last_used_idx &
431 rxvq->qsz_mask]);
432
433 rxvq->last_avail_idx++;
434 rxvq->last_used_idx++;
435 hdr->num_buffers++;
436 desc_len = 0;
437
438 if (PREDICT_FALSE
439 (rxvq->last_avail_idx == rxvq->avail->idx))
440 {
441 //Dequeue queued descriptors for this packet
442 rxvq->last_used_idx -= hdr->num_buffers - 1;
443 rxvq->last_avail_idx -= hdr->num_buffers - 1;
444 error = VHOST_USER_TX_FUNC_ERROR_PKT_DROP_NOBUF;
445 goto done;
446 }
447
448 desc_table = rxvq->desc;
449 desc_head = desc_index =
450 rxvq->avail->ring[rxvq->last_avail_idx & rxvq->qsz_mask];
451 if (PREDICT_FALSE
452 (rxvq->desc[desc_head].flags & VIRTQ_DESC_F_INDIRECT))
453 {
454 //It is seriously unlikely that a driver will put indirect descriptor
455 //after non-indirect descriptor.
456 if (PREDICT_FALSE
457 (rxvq->desc[desc_head].len < sizeof (vring_desc_t)))
458 {
459 error = VHOST_USER_TX_FUNC_ERROR_INDIRECT_OVERFLOW;
460 goto done;
461 }
462 if (PREDICT_FALSE
463 (!(desc_table =
464 map_guest_mem (vui,
465 rxvq->desc[desc_index].addr,
466 &map_hint))))
467 {
468 error = VHOST_USER_TX_FUNC_ERROR_MMAP_FAIL;
469 goto done;
470 }
471 desc_index = 0;
472 }
473 buffer_map_addr = desc_table[desc_index].addr;
474 buffer_len = desc_table[desc_index].len;
475 }
476 else
477 {
478 error = VHOST_USER_TX_FUNC_ERROR_PKT_DROP_NOMRG;
479 goto done;
480 }
481 }
482
483 {
Steven Luong73310052019-10-23 13:28:37 -0700484 ASSERT (copy_len < VHOST_USER_COPY_ARRAY_N);
Damjan Marion7e0b17d2018-11-20 21:07:03 +0100485 vhost_copy_t *cpy = &cpu->copy[copy_len];
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200486 copy_len++;
487 cpy->len = bytes_left;
488 cpy->len = (cpy->len > buffer_len) ? buffer_len : cpy->len;
489 cpy->dst = buffer_map_addr;
490 cpy->src = (uword) vlib_buffer_get_current (current_b0) +
491 current_b0->current_length - bytes_left;
492
493 bytes_left -= cpy->len;
494 buffer_len -= cpy->len;
495 buffer_map_addr += cpy->len;
496 desc_len += cpy->len;
497
498 CLIB_PREFETCH (&rxvq->desc, CLIB_CACHE_LINE_BYTES, LOAD);
499 }
500
501 // Check if vlib buffer has more data. If not, get more or break.
502 if (PREDICT_TRUE (!bytes_left))
503 {
504 if (PREDICT_FALSE
505 (current_b0->flags & VLIB_BUFFER_NEXT_PRESENT))
506 {
507 current_b0 = vlib_get_buffer (vm, current_b0->next_buffer);
508 bytes_left = current_b0->current_length;
509 }
510 else
511 {
512 //End of packet
513 break;
514 }
515 }
516 }
517
518 //Move from available to used ring
519 rxvq->used->ring[rxvq->last_used_idx & rxvq->qsz_mask].id = desc_head;
520 rxvq->used->ring[rxvq->last_used_idx & rxvq->qsz_mask].len = desc_len;
521 vhost_user_log_dirty_ring (vui, rxvq,
522 ring[rxvq->last_used_idx & rxvq->qsz_mask]);
523 rxvq->last_avail_idx++;
524 rxvq->last_used_idx++;
525
526 if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
527 {
Damjan Marion7e0b17d2018-11-20 21:07:03 +0100528 cpu->current_trace->hdr = cpu->tx_headers[tx_headers_len - 1];
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200529 }
530
531 n_left--; //At the end for error counting when 'goto done' is invoked
532
533 /*
534 * Do the copy periodically to prevent
Damjan Marion7e0b17d2018-11-20 21:07:03 +0100535 * cpu->copy array overflow and corrupt memory
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200536 */
537 if (PREDICT_FALSE (copy_len >= VHOST_USER_TX_COPY_THRESHOLD))
538 {
Damjan Marion7e0b17d2018-11-20 21:07:03 +0100539 if (PREDICT_FALSE (vhost_user_tx_copy (vui, cpu->copy, copy_len,
540 &map_hint)))
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200541 {
542 vlib_error_count (vm, node->node_index,
543 VHOST_USER_TX_FUNC_ERROR_MMAP_FAIL, 1);
544 }
545 copy_len = 0;
546
547 /* give buffers back to driver */
548 CLIB_MEMORY_BARRIER ();
549 rxvq->used->idx = rxvq->last_used_idx;
550 vhost_user_log_dirty_ring (vui, rxvq, idx);
551 }
552 buffers++;
553 }
554
555done:
556 //Do the memory copies
Damjan Marion7e0b17d2018-11-20 21:07:03 +0100557 if (PREDICT_FALSE (vhost_user_tx_copy (vui, cpu->copy, copy_len,
558 &map_hint)))
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200559 {
560 vlib_error_count (vm, node->node_index,
561 VHOST_USER_TX_FUNC_ERROR_MMAP_FAIL, 1);
562 }
563
564 CLIB_MEMORY_BARRIER ();
565 rxvq->used->idx = rxvq->last_used_idx;
566 vhost_user_log_dirty_ring (vui, rxvq, idx);
567
568 /*
569 * When n_left is set, error is always set to something too.
570 * In case error is due to lack of remaining buffers, we go back up and
571 * retry.
572 * The idea is that it is better to waste some time on packets
573 * that have been processed already than dropping them and get
Paul Vinciguerra97c998c2019-10-29 16:11:09 -0400574 * more fresh packets with a good likelihood that they will be dropped too.
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200575 * This technique also gives more time to VM driver to pick-up packets.
576 * In case the traffic flows from physical to virtual interfaces, this
577 * technique will end-up leveraging the physical NIC buffer in order to
578 * absorb the VM's CPU jitter.
579 */
580 if (n_left && (error == VHOST_USER_TX_FUNC_ERROR_PKT_DROP_NOBUF) && retry)
581 {
582 retry--;
583 goto retry;
584 }
585
586 /* interrupt (call) handling */
587 if ((rxvq->callfd_idx != ~0) &&
588 !(rxvq->avail->flags & VRING_AVAIL_F_NO_INTERRUPT))
589 {
590 rxvq->n_since_last_int += frame->n_vectors - n_left;
591
592 if (rxvq->n_since_last_int > vum->coalesce_frames)
593 vhost_user_send_call (vm, rxvq);
594 }
595
596 vhost_user_vring_unlock (vui, qid);
597
598done3:
599 if (PREDICT_FALSE (n_left && error != VHOST_USER_TX_FUNC_ERROR_NONE))
600 {
601 vlib_error_count (vm, node->node_index, error, n_left);
602 vlib_increment_simple_counter
603 (vnet_main.interface_main.sw_if_counters
604 + VNET_INTERFACE_COUNTER_DROP,
605 thread_index, vui->sw_if_index, n_left);
606 }
607
Damjan Mariona3d59862018-11-10 10:23:00 +0100608 vlib_buffer_free (vm, vlib_frame_vector_args (frame), frame->n_vectors);
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200609 return frame->n_vectors;
610}
611
612static __clib_unused clib_error_t *
613vhost_user_interface_rx_mode_change (vnet_main_t * vnm, u32 hw_if_index,
614 u32 qid, vnet_hw_interface_rx_mode mode)
615{
616 vlib_main_t *vm = vnm->vlib_main;
617 vnet_hw_interface_t *hif = vnet_get_hw_interface (vnm, hw_if_index);
618 vhost_user_main_t *vum = &vhost_user_main;
619 vhost_user_intf_t *vui =
620 pool_elt_at_index (vum->vhost_user_interfaces, hif->dev_instance);
621 vhost_user_vring_t *txvq = &vui->vrings[VHOST_VRING_IDX_TX (qid)];
622
623 if ((mode == VNET_HW_INTERFACE_RX_MODE_INTERRUPT) ||
624 (mode == VNET_HW_INTERFACE_RX_MODE_ADAPTIVE))
625 {
626 if (txvq->kickfd_idx == ~0)
627 {
628 // We cannot support interrupt mode if the driver opts out
629 return clib_error_return (0, "Driver does not support interrupt");
630 }
631 if (txvq->mode == VNET_HW_INTERFACE_RX_MODE_POLLING)
632 {
633 vum->ifq_count++;
634 // Start the timer if this is the first encounter on interrupt
635 // interface/queue
636 if ((vum->ifq_count == 1) &&
637 (vum->coalesce_time > 0.0) && (vum->coalesce_frames > 0))
638 vlib_process_signal_event (vm,
639 vhost_user_send_interrupt_node.index,
640 VHOST_USER_EVENT_START_TIMER, 0);
641 }
642 }
643 else if (mode == VNET_HW_INTERFACE_RX_MODE_POLLING)
644 {
645 if (((txvq->mode == VNET_HW_INTERFACE_RX_MODE_INTERRUPT) ||
646 (txvq->mode == VNET_HW_INTERFACE_RX_MODE_ADAPTIVE)) &&
647 vum->ifq_count)
648 {
649 vum->ifq_count--;
650 // Stop the timer if there is no more interrupt interface/queue
651 if ((vum->ifq_count == 0) &&
652 (vum->coalesce_time > 0.0) && (vum->coalesce_frames > 0))
653 vlib_process_signal_event (vm,
654 vhost_user_send_interrupt_node.index,
655 VHOST_USER_EVENT_STOP_TIMER, 0);
656 }
657 }
658
659 txvq->mode = mode;
660 if (mode == VNET_HW_INTERFACE_RX_MODE_POLLING)
661 txvq->used->flags = VRING_USED_F_NO_NOTIFY;
662 else if ((mode == VNET_HW_INTERFACE_RX_MODE_ADAPTIVE) ||
663 (mode == VNET_HW_INTERFACE_RX_MODE_INTERRUPT))
664 txvq->used->flags = 0;
665 else
666 {
Jerome Tollet2f54c272018-10-02 11:41:11 +0200667 vu_log_err (vui, "unhandled mode %d changed for if %d queue %d", mode,
668 hw_if_index, qid);
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200669 return clib_error_return (0, "unsupported");
670 }
671
672 return 0;
673}
674
675static __clib_unused clib_error_t *
676vhost_user_interface_admin_up_down (vnet_main_t * vnm, u32 hw_if_index,
677 u32 flags)
678{
679 vnet_hw_interface_t *hif = vnet_get_hw_interface (vnm, hw_if_index);
680 vhost_user_main_t *vum = &vhost_user_main;
681 vhost_user_intf_t *vui =
682 pool_elt_at_index (vum->vhost_user_interfaces, hif->dev_instance);
Juraj Slobodab192feb2018-10-01 12:42:07 +0200683 u8 link_old, link_new;
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200684
Juraj Slobodab192feb2018-10-01 12:42:07 +0200685 link_old = vui_is_link_up (vui);
686
687 vui->admin_up = (flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP) != 0;
688
689 link_new = vui_is_link_up (vui);
690
691 if (link_old != link_new)
692 vnet_hw_interface_set_flags (vnm, vui->hw_if_index, link_new ?
693 VNET_HW_INTERFACE_FLAG_LINK_UP : 0);
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200694
695 return /* no error */ 0;
696}
697
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200698/* *INDENT-OFF* */
699VNET_DEVICE_CLASS (vhost_user_device_class) = {
700 .name = "vhost-user",
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200701 .tx_function_n_errors = VHOST_USER_TX_FUNC_N_ERROR,
702 .tx_function_error_strings = vhost_user_tx_func_error_strings,
703 .format_device_name = format_vhost_user_interface_name,
704 .name_renumber = vhost_user_name_renumber,
705 .admin_up_down_function = vhost_user_interface_admin_up_down,
706 .rx_mode_change_function = vhost_user_interface_rx_mode_change,
707 .format_tx_trace = format_vhost_trace,
708};
709
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200710/* *INDENT-ON* */
711
712/*
713 * fd.io coding-style-patch-verification: ON
714 *
715 * Local Variables:
716 * eval: (c-set-style "gnu")
717 * End:
718 */