blob: 8dbddea46f5b5fa6feb1d588a2db20519f3ec321 [file] [log] [blame]
Mohsin Kazmie7cde312018-06-26 17:20:11 +02001/*
2 *------------------------------------------------------------------
3 * vhost-user-output
4 *
5 * Copyright (c) 2014-2018 Cisco and/or its affiliates.
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at:
9 *
10 * http://www.apache.org/licenses/LICENSE-2.0
11 *
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 *------------------------------------------------------------------
18 */
19
Steven Luong4208a4c2019-05-06 08:51:56 -070020#include <stddef.h>
Mohsin Kazmie7cde312018-06-26 17:20:11 +020021#include <fcntl.h> /* for open */
22#include <sys/ioctl.h>
23#include <sys/socket.h>
24#include <sys/un.h>
25#include <sys/stat.h>
26#include <sys/types.h>
27#include <sys/uio.h> /* for iovec */
28#include <netinet/in.h>
29#include <sys/vfs.h>
30
31#include <linux/if_arp.h>
32#include <linux/if_tun.h>
33
34#include <vlib/vlib.h>
35#include <vlib/unix/unix.h>
36
Mohsin Kazmie7cde312018-06-26 17:20:11 +020037#include <vnet/ethernet/ethernet.h>
38#include <vnet/devices/devices.h>
39#include <vnet/feature/feature.h>
40
41#include <vnet/devices/virtio/vhost_user.h>
42#include <vnet/devices/virtio/vhost_user_inline.h>
43
Mohsin Kazmi0b042092020-04-17 16:50:56 +000044#include <vnet/gso/hdr_offset_parser.h>
Mohsin Kazmie7cde312018-06-26 17:20:11 +020045/*
46 * On the transmit side, we keep processing the buffers from vlib in the while
47 * loop and prepare the copy order to be executed later. However, the static
48 * array which we keep the copy order is limited to VHOST_USER_COPY_ARRAY_N
49 * entries. In order to not corrupt memory, we have to do the copy when the
50 * static array reaches the copy threshold. We subtract 40 in case the code
51 * goes into the inner loop for a maximum of 64k frames which may require
Steven Luong73310052019-10-23 13:28:37 -070052 * more array entries. We subtract 200 because our default buffer size is
53 * 2048 and the default desc len is likely 1536. While it takes less than 40
54 * vlib buffers for the jumbo frame, it may take twice as much descriptors
55 * for the same jumbo frame. Use 200 for the extra head room.
Mohsin Kazmie7cde312018-06-26 17:20:11 +020056 */
Steven Luong73310052019-10-23 13:28:37 -070057#define VHOST_USER_TX_COPY_THRESHOLD (VHOST_USER_COPY_ARRAY_N - 200)
Mohsin Kazmie7cde312018-06-26 17:20:11 +020058
BenoƮt Ganne47727c02019-02-12 13:35:08 +010059extern vnet_device_class_t vhost_user_device_class;
Mohsin Kazmie7cde312018-06-26 17:20:11 +020060
61#define foreach_vhost_user_tx_func_error \
62 _(NONE, "no error") \
63 _(NOT_READY, "vhost vring not ready") \
64 _(DOWN, "vhost interface is down") \
65 _(PKT_DROP_NOBUF, "tx packet drops (no available descriptors)") \
66 _(PKT_DROP_NOMRG, "tx packet drops (cannot merge descriptors)") \
67 _(MMAP_FAIL, "mmap failure") \
68 _(INDIRECT_OVERFLOW, "indirect descriptor table overflow")
69
70typedef enum
71{
72#define _(f,s) VHOST_USER_TX_FUNC_ERROR_##f,
73 foreach_vhost_user_tx_func_error
74#undef _
75 VHOST_USER_TX_FUNC_N_ERROR,
76} vhost_user_tx_func_error_t;
77
78static __clib_unused char *vhost_user_tx_func_error_strings[] = {
79#define _(n,s) s,
80 foreach_vhost_user_tx_func_error
81#undef _
82};
83
84static __clib_unused u8 *
85format_vhost_user_interface_name (u8 * s, va_list * args)
86{
87 u32 i = va_arg (*args, u32);
88 u32 show_dev_instance = ~0;
89 vhost_user_main_t *vum = &vhost_user_main;
90
91 if (i < vec_len (vum->show_dev_instance_by_real_dev_instance))
92 show_dev_instance = vum->show_dev_instance_by_real_dev_instance[i];
93
94 if (show_dev_instance != ~0)
95 i = show_dev_instance;
96
97 s = format (s, "VirtualEthernet0/0/%d", i);
98 return s;
99}
100
101static __clib_unused int
102vhost_user_name_renumber (vnet_hw_interface_t * hi, u32 new_dev_instance)
103{
104 // FIXME: check if the new dev instance is already used
105 vhost_user_main_t *vum = &vhost_user_main;
Jerome Tollet2f54c272018-10-02 11:41:11 +0200106 vhost_user_intf_t *vui = pool_elt_at_index (vum->vhost_user_interfaces,
107 hi->dev_instance);
108
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200109 vec_validate_init_empty (vum->show_dev_instance_by_real_dev_instance,
110 hi->dev_instance, ~0);
111
112 vum->show_dev_instance_by_real_dev_instance[hi->dev_instance] =
113 new_dev_instance;
114
Jerome Tollet2f54c272018-10-02 11:41:11 +0200115 vu_log_debug (vui, "renumbered vhost-user interface dev_instance %d to %d",
116 hi->dev_instance, new_dev_instance);
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200117
118 return 0;
119}
120
121/**
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200122 * @brief Spin until the vring is successfully locked
123 */
124static_always_inline void
125vhost_user_vring_lock (vhost_user_intf_t * vui, u32 qid)
126{
Steven Luong2c1084a2020-12-10 20:44:22 -0800127 clib_spinlock_lock_if_init (&vui->vrings[qid].vring_lock);
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200128}
129
130/**
131 * @brief Unlock the vring lock
132 */
133static_always_inline void
134vhost_user_vring_unlock (vhost_user_intf_t * vui, u32 qid)
135{
Steven Luong2c1084a2020-12-10 20:44:22 -0800136 clib_spinlock_unlock_if_init (&vui->vrings[qid].vring_lock);
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200137}
138
139static_always_inline void
140vhost_user_tx_trace (vhost_trace_t * t,
141 vhost_user_intf_t * vui, u16 qid,
142 vlib_buffer_t * b, vhost_user_vring_t * rxvq)
143{
144 vhost_user_main_t *vum = &vhost_user_main;
145 u32 last_avail_idx = rxvq->last_avail_idx;
146 u32 desc_current = rxvq->avail->ring[last_avail_idx & rxvq->qsz_mask];
147 vring_desc_t *hdr_desc = 0;
148 u32 hint = 0;
149
Dave Barachb7b92992018-10-17 10:38:51 -0400150 clib_memset (t, 0, sizeof (*t));
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200151 t->device_index = vui - vum->vhost_user_interfaces;
152 t->qid = qid;
153
154 hdr_desc = &rxvq->desc[desc_current];
Mohsin Kazmia7a22812020-08-31 17:17:16 +0200155 if (rxvq->desc[desc_current].flags & VRING_DESC_F_INDIRECT)
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200156 {
157 t->virtio_ring_flags |= 1 << VIRTIO_TRACE_F_INDIRECT;
158 /* Header is the first here */
159 hdr_desc = map_guest_mem (vui, rxvq->desc[desc_current].addr, &hint);
160 }
Mohsin Kazmia7a22812020-08-31 17:17:16 +0200161 if (rxvq->desc[desc_current].flags & VRING_DESC_F_NEXT)
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200162 {
163 t->virtio_ring_flags |= 1 << VIRTIO_TRACE_F_SIMPLE_CHAINED;
164 }
Mohsin Kazmia7a22812020-08-31 17:17:16 +0200165 if (!(rxvq->desc[desc_current].flags & VRING_DESC_F_NEXT) &&
166 !(rxvq->desc[desc_current].flags & VRING_DESC_F_INDIRECT))
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200167 {
168 t->virtio_ring_flags |= 1 << VIRTIO_TRACE_F_SINGLE_DESC;
169 }
170
171 t->first_desc_len = hdr_desc ? hdr_desc->len : 0;
172}
173
174static_always_inline u32
175vhost_user_tx_copy (vhost_user_intf_t * vui, vhost_copy_t * cpy,
176 u16 copy_len, u32 * map_hint)
177{
178 void *dst0, *dst1, *dst2, *dst3;
179 if (PREDICT_TRUE (copy_len >= 4))
180 {
181 if (PREDICT_FALSE (!(dst2 = map_guest_mem (vui, cpy[0].dst, map_hint))))
182 return 1;
183 if (PREDICT_FALSE (!(dst3 = map_guest_mem (vui, cpy[1].dst, map_hint))))
184 return 1;
185 while (PREDICT_TRUE (copy_len >= 4))
186 {
187 dst0 = dst2;
188 dst1 = dst3;
189
190 if (PREDICT_FALSE
191 (!(dst2 = map_guest_mem (vui, cpy[2].dst, map_hint))))
192 return 1;
193 if (PREDICT_FALSE
194 (!(dst3 = map_guest_mem (vui, cpy[3].dst, map_hint))))
195 return 1;
196
197 CLIB_PREFETCH ((void *) cpy[2].src, 64, LOAD);
198 CLIB_PREFETCH ((void *) cpy[3].src, 64, LOAD);
199
Dave Barach178cf492018-11-13 16:34:13 -0500200 clib_memcpy_fast (dst0, (void *) cpy[0].src, cpy[0].len);
201 clib_memcpy_fast (dst1, (void *) cpy[1].src, cpy[1].len);
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200202
203 vhost_user_log_dirty_pages_2 (vui, cpy[0].dst, cpy[0].len, 1);
204 vhost_user_log_dirty_pages_2 (vui, cpy[1].dst, cpy[1].len, 1);
205 copy_len -= 2;
206 cpy += 2;
207 }
208 }
209 while (copy_len)
210 {
211 if (PREDICT_FALSE (!(dst0 = map_guest_mem (vui, cpy->dst, map_hint))))
212 return 1;
Dave Barach178cf492018-11-13 16:34:13 -0500213 clib_memcpy_fast (dst0, (void *) cpy->src, cpy->len);
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200214 vhost_user_log_dirty_pages_2 (vui, cpy->dst, cpy->len, 1);
215 copy_len -= 1;
216 cpy += 1;
217 }
218 return 0;
219}
220
Steven Luong4208a4c2019-05-06 08:51:56 -0700221static_always_inline void
222vhost_user_handle_tx_offload (vhost_user_intf_t * vui, vlib_buffer_t * b,
223 virtio_net_hdr_t * hdr)
224{
Mohsin Kazmi0b042092020-04-17 16:50:56 +0000225 generic_header_offset_t gho = { 0 };
Mohsin Kazmi84f91fa2020-04-23 17:59:49 +0200226 int is_ip4 = b->flags & VNET_BUFFER_F_IS_IP4;
227 int is_ip6 = b->flags & VNET_BUFFER_F_IS_IP6;
Mohsin Kazmi68095382021-02-10 11:26:24 +0100228 u32 oflags = vnet_buffer2 (b)->oflags;
Mohsin Kazmi84f91fa2020-04-23 17:59:49 +0200229
230 ASSERT (!(is_ip4 && is_ip6));
231 vnet_generic_header_offset_parser (b, &gho, 1 /* l2 */ , is_ip4, is_ip6);
Mohsin Kazmi68095382021-02-10 11:26:24 +0100232 if (oflags & VNET_BUFFER_OFFLOAD_F_IP_CKSUM)
Mohsin Kazmiaffc5f62019-12-26 20:42:18 +0100233 {
234 ip4_header_t *ip4;
235
236 ip4 =
237 (ip4_header_t *) (vlib_buffer_get_current (b) + gho.l3_hdr_offset);
238 ip4->checksum = ip4_header_checksum (ip4);
239 }
240
Steven Luong4208a4c2019-05-06 08:51:56 -0700241 /* checksum offload */
Mohsin Kazmi68095382021-02-10 11:26:24 +0100242 if (oflags & VNET_BUFFER_OFFLOAD_F_UDP_CKSUM)
Steven Luong4208a4c2019-05-06 08:51:56 -0700243 {
244 hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
Mohsin Kazmiaffc5f62019-12-26 20:42:18 +0100245 hdr->csum_start = gho.l4_hdr_offset;
Steven Luong4208a4c2019-05-06 08:51:56 -0700246 hdr->csum_offset = offsetof (udp_header_t, checksum);
247 }
Mohsin Kazmi68095382021-02-10 11:26:24 +0100248 else if (oflags & VNET_BUFFER_OFFLOAD_F_TCP_CKSUM)
Steven Luong4208a4c2019-05-06 08:51:56 -0700249 {
250 hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
Mohsin Kazmiaffc5f62019-12-26 20:42:18 +0100251 hdr->csum_start = gho.l4_hdr_offset;
Steven Luong4208a4c2019-05-06 08:51:56 -0700252 hdr->csum_offset = offsetof (tcp_header_t, checksum);
253 }
254
255 /* GSO offload */
256 if (b->flags & VNET_BUFFER_F_GSO)
257 {
Mohsin Kazmi68095382021-02-10 11:26:24 +0100258 if (oflags & VNET_BUFFER_OFFLOAD_F_TCP_CKSUM)
Steven Luong4208a4c2019-05-06 08:51:56 -0700259 {
Mohsin Kazmi84f91fa2020-04-23 17:59:49 +0200260 if (is_ip4 &&
Mohsin Kazmia7a22812020-08-31 17:17:16 +0200261 (vui->features & VIRTIO_FEATURE (VIRTIO_NET_F_GUEST_TSO4)))
Steven Luong4208a4c2019-05-06 08:51:56 -0700262 {
263 hdr->gso_size = vnet_buffer2 (b)->gso_size;
264 hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
265 }
Mohsin Kazmi84f91fa2020-04-23 17:59:49 +0200266 else if (is_ip6 &&
Mohsin Kazmia7a22812020-08-31 17:17:16 +0200267 (vui->features & VIRTIO_FEATURE (VIRTIO_NET_F_GUEST_TSO6)))
Steven Luong4208a4c2019-05-06 08:51:56 -0700268 {
269 hdr->gso_size = vnet_buffer2 (b)->gso_size;
270 hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
271 }
272 }
Mohsin Kazmia7a22812020-08-31 17:17:16 +0200273 else if ((vui->features & VIRTIO_FEATURE (VIRTIO_NET_F_GUEST_UFO)) &&
Mohsin Kazmi68095382021-02-10 11:26:24 +0100274 (oflags & VNET_BUFFER_OFFLOAD_F_UDP_CKSUM))
Steven Luong4208a4c2019-05-06 08:51:56 -0700275 {
276 hdr->gso_size = vnet_buffer2 (b)->gso_size;
277 hdr->gso_type = VIRTIO_NET_HDR_GSO_UDP;
278 }
279 }
280}
281
Steven Luongbc0d9ff2020-03-23 09:34:59 -0700282static_always_inline void
Steven Luong27ba5002020-11-17 13:30:44 -0800283vhost_user_mark_desc_available (vlib_main_t * vm, vhost_user_intf_t * vui,
284 vhost_user_vring_t * rxvq,
Steven Luongbc0d9ff2020-03-23 09:34:59 -0700285 u16 * n_descs_processed, u8 chained,
286 vlib_frame_t * frame, u32 n_left)
287{
288 u16 desc_idx, flags;
289 vring_packed_desc_t *desc_table = rxvq->packed_desc;
290 u16 last_used_idx = rxvq->last_used_idx;
291
292 if (PREDICT_FALSE (*n_descs_processed == 0))
293 return;
294
295 if (rxvq->used_wrap_counter)
296 flags = desc_table[last_used_idx & rxvq->qsz_mask].flags |
Mohsin Kazmia7a22812020-08-31 17:17:16 +0200297 (VRING_DESC_F_AVAIL | VRING_DESC_F_USED);
Steven Luongbc0d9ff2020-03-23 09:34:59 -0700298 else
299 flags = desc_table[last_used_idx & rxvq->qsz_mask].flags &
Mohsin Kazmia7a22812020-08-31 17:17:16 +0200300 ~(VRING_DESC_F_AVAIL | VRING_DESC_F_USED);
Steven Luongbc0d9ff2020-03-23 09:34:59 -0700301
302 vhost_user_advance_last_used_idx (rxvq);
303
304 for (desc_idx = 1; desc_idx < *n_descs_processed; desc_idx++)
305 {
306 if (rxvq->used_wrap_counter)
307 desc_table[rxvq->last_used_idx & rxvq->qsz_mask].flags |=
Mohsin Kazmia7a22812020-08-31 17:17:16 +0200308 (VRING_DESC_F_AVAIL | VRING_DESC_F_USED);
Steven Luongbc0d9ff2020-03-23 09:34:59 -0700309 else
310 desc_table[rxvq->last_used_idx & rxvq->qsz_mask].flags &=
Mohsin Kazmia7a22812020-08-31 17:17:16 +0200311 ~(VRING_DESC_F_AVAIL | VRING_DESC_F_USED);
Steven Luongbc0d9ff2020-03-23 09:34:59 -0700312 vhost_user_advance_last_used_idx (rxvq);
313 }
314
315 desc_table[last_used_idx & rxvq->qsz_mask].flags = flags;
316
317 *n_descs_processed = 0;
318
319 if (chained)
320 {
321 vring_packed_desc_t *desc_table = rxvq->packed_desc;
322
323 while (desc_table[rxvq->last_used_idx & rxvq->qsz_mask].flags &
Mohsin Kazmia7a22812020-08-31 17:17:16 +0200324 VRING_DESC_F_NEXT)
Steven Luongbc0d9ff2020-03-23 09:34:59 -0700325 vhost_user_advance_last_used_idx (rxvq);
326
327 /* Advance past the current chained table entries */
328 vhost_user_advance_last_used_idx (rxvq);
329 }
330
331 /* interrupt (call) handling */
332 if ((rxvq->callfd_idx != ~0) &&
333 (rxvq->avail_event->flags != VRING_EVENT_F_DISABLE))
334 {
335 vhost_user_main_t *vum = &vhost_user_main;
336
337 rxvq->n_since_last_int += frame->n_vectors - n_left;
338 if (rxvq->n_since_last_int > vum->coalesce_frames)
Steven Luong27ba5002020-11-17 13:30:44 -0800339 vhost_user_send_call (vm, vui, rxvq);
Steven Luongbc0d9ff2020-03-23 09:34:59 -0700340 }
341}
342
343static_always_inline void
344vhost_user_tx_trace_packed (vhost_trace_t * t, vhost_user_intf_t * vui,
345 u16 qid, vlib_buffer_t * b,
346 vhost_user_vring_t * rxvq)
347{
348 vhost_user_main_t *vum = &vhost_user_main;
349 u32 last_avail_idx = rxvq->last_avail_idx;
350 u32 desc_current = last_avail_idx & rxvq->qsz_mask;
351 vring_packed_desc_t *hdr_desc = 0;
352 u32 hint = 0;
353
354 clib_memset (t, 0, sizeof (*t));
355 t->device_index = vui - vum->vhost_user_interfaces;
356 t->qid = qid;
357
358 hdr_desc = &rxvq->packed_desc[desc_current];
Mohsin Kazmia7a22812020-08-31 17:17:16 +0200359 if (rxvq->packed_desc[desc_current].flags & VRING_DESC_F_INDIRECT)
Steven Luongbc0d9ff2020-03-23 09:34:59 -0700360 {
361 t->virtio_ring_flags |= 1 << VIRTIO_TRACE_F_INDIRECT;
362 /* Header is the first here */
363 hdr_desc = map_guest_mem (vui, rxvq->packed_desc[desc_current].addr,
364 &hint);
365 }
Mohsin Kazmia7a22812020-08-31 17:17:16 +0200366 if (rxvq->packed_desc[desc_current].flags & VRING_DESC_F_NEXT)
Steven Luongbc0d9ff2020-03-23 09:34:59 -0700367 {
368 t->virtio_ring_flags |= 1 << VIRTIO_TRACE_F_SIMPLE_CHAINED;
369 }
Mohsin Kazmia7a22812020-08-31 17:17:16 +0200370 if (!(rxvq->packed_desc[desc_current].flags & VRING_DESC_F_NEXT) &&
371 !(rxvq->packed_desc[desc_current].flags & VRING_DESC_F_INDIRECT))
Steven Luongbc0d9ff2020-03-23 09:34:59 -0700372 {
373 t->virtio_ring_flags |= 1 << VIRTIO_TRACE_F_SINGLE_DESC;
374 }
375
376 t->first_desc_len = hdr_desc ? hdr_desc->len : 0;
377}
378
379static_always_inline uword
380vhost_user_device_class_packed (vlib_main_t * vm, vlib_node_runtime_t * node,
381 vlib_frame_t * frame)
382{
383 u32 *buffers = vlib_frame_vector_args (frame);
384 u32 n_left = frame->n_vectors;
385 vhost_user_main_t *vum = &vhost_user_main;
386 vnet_interface_output_runtime_t *rd = (void *) node->runtime_data;
387 vhost_user_intf_t *vui =
388 pool_elt_at_index (vum->vhost_user_interfaces, rd->dev_instance);
389 u32 qid;
390 vhost_user_vring_t *rxvq;
391 u8 error;
392 u32 thread_index = vm->thread_index;
393 vhost_cpu_t *cpu = &vum->cpus[thread_index];
394 u32 map_hint = 0;
395 u8 retry = 8;
396 u16 copy_len;
397 u16 tx_headers_len;
398 vring_packed_desc_t *desc_table;
399 u32 or_flags;
400 u16 desc_head, desc_index, desc_len;
401 u16 n_descs_processed;
402 u8 indirect, chained;
403
404 qid = VHOST_VRING_IDX_RX (*vec_elt_at_index (vui->per_cpu_tx_qid,
405 thread_index));
406 rxvq = &vui->vrings[qid];
407
408retry:
409 error = VHOST_USER_TX_FUNC_ERROR_NONE;
410 tx_headers_len = 0;
411 copy_len = 0;
412 n_descs_processed = 0;
413
414 while (n_left > 0)
415 {
416 vlib_buffer_t *b0, *current_b0;
417 uword buffer_map_addr;
418 u32 buffer_len;
419 u16 bytes_left;
420 u32 total_desc_len = 0;
421 u16 n_entries = 0;
422
423 indirect = 0;
424 chained = 0;
425 if (PREDICT_TRUE (n_left > 1))
426 vlib_prefetch_buffer_with_index (vm, buffers[1], LOAD);
427
428 b0 = vlib_get_buffer (vm, buffers[0]);
429 if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
430 {
431 cpu->current_trace = vlib_add_trace (vm, node, b0,
432 sizeof (*cpu->current_trace));
433 vhost_user_tx_trace_packed (cpu->current_trace, vui, qid / 2, b0,
434 rxvq);
435 }
436
437 desc_table = rxvq->packed_desc;
438 desc_head = desc_index = rxvq->last_avail_idx & rxvq->qsz_mask;
439 if (PREDICT_FALSE (!vhost_user_packed_desc_available (rxvq, desc_head)))
440 {
441 error = VHOST_USER_TX_FUNC_ERROR_PKT_DROP_NOBUF;
442 goto done;
443 }
444 /*
445 * Go deeper in case of indirect descriptor.
446 * To test it, turn off mrg_rxbuf.
447 */
Mohsin Kazmia7a22812020-08-31 17:17:16 +0200448 if (desc_table[desc_head].flags & VRING_DESC_F_INDIRECT)
Steven Luongbc0d9ff2020-03-23 09:34:59 -0700449 {
450 indirect = 1;
451 if (PREDICT_FALSE (desc_table[desc_head].len <
452 sizeof (vring_packed_desc_t)))
453 {
454 error = VHOST_USER_TX_FUNC_ERROR_INDIRECT_OVERFLOW;
455 goto done;
456 }
457 n_entries = desc_table[desc_head].len >> 4;
458 desc_table = map_guest_mem (vui, desc_table[desc_index].addr,
459 &map_hint);
460 if (PREDICT_FALSE (desc_table == 0))
461 {
462 error = VHOST_USER_TX_FUNC_ERROR_MMAP_FAIL;
463 goto done;
464 }
465 desc_index = 0;
466 }
Mohsin Kazmia7a22812020-08-31 17:17:16 +0200467 else if (rxvq->packed_desc[desc_head].flags & VRING_DESC_F_NEXT)
Steven Luongbc0d9ff2020-03-23 09:34:59 -0700468 chained = 1;
469
470 desc_len = vui->virtio_net_hdr_sz;
471 buffer_map_addr = desc_table[desc_index].addr;
472 buffer_len = desc_table[desc_index].len;
473
474 /* Get a header from the header array */
475 virtio_net_hdr_mrg_rxbuf_t *hdr = &cpu->tx_headers[tx_headers_len];
476 tx_headers_len++;
477 hdr->hdr.flags = 0;
478 hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE;
479 hdr->num_buffers = 1;
480
Mohsin Kazmi68095382021-02-10 11:26:24 +0100481 or_flags = (b0->flags & VNET_BUFFER_F_OFFLOAD);
Steven Luongbc0d9ff2020-03-23 09:34:59 -0700482
483 /* Guest supports csum offload and buffer requires checksum offload? */
484 if (or_flags &&
Mohsin Kazmia7a22812020-08-31 17:17:16 +0200485 (vui->features & VIRTIO_FEATURE (VIRTIO_NET_F_GUEST_CSUM)))
Steven Luongbc0d9ff2020-03-23 09:34:59 -0700486 vhost_user_handle_tx_offload (vui, b0, &hdr->hdr);
487
488 /* Prepare a copy order executed later for the header */
489 ASSERT (copy_len < VHOST_USER_COPY_ARRAY_N);
490 vhost_copy_t *cpy = &cpu->copy[copy_len];
491 copy_len++;
492 cpy->len = vui->virtio_net_hdr_sz;
493 cpy->dst = buffer_map_addr;
494 cpy->src = (uword) hdr;
495
496 buffer_map_addr += vui->virtio_net_hdr_sz;
497 buffer_len -= vui->virtio_net_hdr_sz;
498 bytes_left = b0->current_length;
499 current_b0 = b0;
500 while (1)
501 {
502 if (buffer_len == 0)
503 {
504 /* Get new output */
505 if (chained)
506 {
507 /*
508 * Next one is chained
509 * Test it with both indirect and mrg_rxbuf off
510 */
511 if (PREDICT_FALSE (!(desc_table[desc_index].flags &
Mohsin Kazmia7a22812020-08-31 17:17:16 +0200512 VRING_DESC_F_NEXT)))
Steven Luongbc0d9ff2020-03-23 09:34:59 -0700513 {
514 /*
515 * Last descriptor in chain.
516 * Dequeue queued descriptors for this packet
517 */
518 vhost_user_dequeue_chained_descs (rxvq,
519 &n_descs_processed);
520 error = VHOST_USER_TX_FUNC_ERROR_PKT_DROP_NOBUF;
521 goto done;
522 }
523 vhost_user_advance_last_avail_idx (rxvq);
524 desc_index = rxvq->last_avail_idx & rxvq->qsz_mask;
525 n_descs_processed++;
526 buffer_map_addr = desc_table[desc_index].addr;
527 buffer_len = desc_table[desc_index].len;
528 total_desc_len += desc_len;
529 desc_len = 0;
530 }
531 else if (indirect)
532 {
533 /*
534 * Indirect table
535 * Test it with mrg_rxnuf off
536 */
537 if (PREDICT_TRUE (n_entries > 0))
538 n_entries--;
539 else
540 {
541 /* Dequeue queued descriptors for this packet */
542 vhost_user_dequeue_chained_descs (rxvq,
543 &n_descs_processed);
544 error = VHOST_USER_TX_FUNC_ERROR_INDIRECT_OVERFLOW;
545 goto done;
546 }
547 total_desc_len += desc_len;
548 desc_index = (desc_index + 1) & rxvq->qsz_mask;
549 buffer_map_addr = desc_table[desc_index].addr;
550 buffer_len = desc_table[desc_index].len;
551 desc_len = 0;
552 }
553 else if (vui->virtio_net_hdr_sz == 12)
554 {
555 /*
556 * MRG is available
557 * This is the default setting for the guest VM
558 */
559 virtio_net_hdr_mrg_rxbuf_t *hdr =
560 &cpu->tx_headers[tx_headers_len - 1];
561
562 desc_table[desc_index].len = desc_len;
563 vhost_user_advance_last_avail_idx (rxvq);
564 desc_head = desc_index =
565 rxvq->last_avail_idx & rxvq->qsz_mask;
566 hdr->num_buffers++;
567 n_descs_processed++;
568 desc_len = 0;
569
570 if (PREDICT_FALSE (!vhost_user_packed_desc_available
571 (rxvq, desc_index)))
572 {
573 /* Dequeue queued descriptors for this packet */
574 vhost_user_dequeue_descs (rxvq, hdr,
575 &n_descs_processed);
576 error = VHOST_USER_TX_FUNC_ERROR_PKT_DROP_NOBUF;
577 goto done;
578 }
579
580 buffer_map_addr = desc_table[desc_index].addr;
581 buffer_len = desc_table[desc_index].len;
582 }
583 else
584 {
585 error = VHOST_USER_TX_FUNC_ERROR_PKT_DROP_NOMRG;
586 goto done;
587 }
588 }
589
590 ASSERT (copy_len < VHOST_USER_COPY_ARRAY_N);
591 vhost_copy_t *cpy = &cpu->copy[copy_len];
592 copy_len++;
593 cpy->len = bytes_left;
594 cpy->len = (cpy->len > buffer_len) ? buffer_len : cpy->len;
595 cpy->dst = buffer_map_addr;
596 cpy->src = (uword) vlib_buffer_get_current (current_b0) +
597 current_b0->current_length - bytes_left;
598
599 bytes_left -= cpy->len;
600 buffer_len -= cpy->len;
601 buffer_map_addr += cpy->len;
602 desc_len += cpy->len;
603
604 CLIB_PREFETCH (&rxvq->packed_desc, CLIB_CACHE_LINE_BYTES, LOAD);
605
606 /* Check if vlib buffer has more data. If not, get more or break */
607 if (PREDICT_TRUE (!bytes_left))
608 {
609 if (PREDICT_FALSE
610 (current_b0->flags & VLIB_BUFFER_NEXT_PRESENT))
611 {
612 current_b0 = vlib_get_buffer (vm, current_b0->next_buffer);
613 bytes_left = current_b0->current_length;
614 }
615 else
616 {
617 /* End of packet */
618 break;
619 }
620 }
621 }
622
623 /* Move from available to used ring */
624 total_desc_len += desc_len;
625 rxvq->packed_desc[desc_head].len = total_desc_len;
626
627 vhost_user_advance_last_avail_table_idx (vui, rxvq, chained);
628 n_descs_processed++;
629
630 if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
631 cpu->current_trace->hdr = cpu->tx_headers[tx_headers_len - 1];
632
633 n_left--;
634
635 /*
636 * Do the copy periodically to prevent
637 * cpu->copy array overflow and corrupt memory
638 */
639 if (PREDICT_FALSE (copy_len >= VHOST_USER_TX_COPY_THRESHOLD) || chained)
640 {
641 if (PREDICT_FALSE (vhost_user_tx_copy (vui, cpu->copy, copy_len,
642 &map_hint)))
643 vlib_error_count (vm, node->node_index,
644 VHOST_USER_TX_FUNC_ERROR_MMAP_FAIL, 1);
645 copy_len = 0;
646
647 /* give buffers back to driver */
Steven Luong27ba5002020-11-17 13:30:44 -0800648 vhost_user_mark_desc_available (vm, vui, rxvq, &n_descs_processed,
Steven Luongbc0d9ff2020-03-23 09:34:59 -0700649 chained, frame, n_left);
650 }
651
652 buffers++;
653 }
654
655done:
656 if (PREDICT_TRUE (copy_len))
657 {
658 if (PREDICT_FALSE (vhost_user_tx_copy (vui, cpu->copy, copy_len,
659 &map_hint)))
660 vlib_error_count (vm, node->node_index,
661 VHOST_USER_TX_FUNC_ERROR_MMAP_FAIL, 1);
662
Steven Luong27ba5002020-11-17 13:30:44 -0800663 vhost_user_mark_desc_available (vm, vui, rxvq, &n_descs_processed,
664 chained, frame, n_left);
Steven Luongbc0d9ff2020-03-23 09:34:59 -0700665 }
666
667 /*
668 * When n_left is set, error is always set to something too.
669 * In case error is due to lack of remaining buffers, we go back up and
670 * retry.
671 * The idea is that it is better to waste some time on packets
672 * that have been processed already than dropping them and get
673 * more fresh packets with a good likelyhood that they will be dropped too.
674 * This technique also gives more time to VM driver to pick-up packets.
675 * In case the traffic flows from physical to virtual interfaces, this
676 * technique will end-up leveraging the physical NIC buffer in order to
677 * absorb the VM's CPU jitter.
678 */
679 if (n_left && (error == VHOST_USER_TX_FUNC_ERROR_PKT_DROP_NOBUF) && retry)
680 {
681 retry--;
682 goto retry;
683 }
684
685 vhost_user_vring_unlock (vui, qid);
686
687 if (PREDICT_FALSE (n_left && error != VHOST_USER_TX_FUNC_ERROR_NONE))
688 {
689 vlib_error_count (vm, node->node_index, error, n_left);
690 vlib_increment_simple_counter
691 (vnet_main.interface_main.sw_if_counters +
692 VNET_INTERFACE_COUNTER_DROP, thread_index, vui->sw_if_index, n_left);
693 }
694
695 vlib_buffer_free (vm, vlib_frame_vector_args (frame), frame->n_vectors);
696 return frame->n_vectors;
697}
698
Mohsin Kazmidd8e7d02018-07-23 14:45:57 +0200699VNET_DEVICE_CLASS_TX_FN (vhost_user_device_class) (vlib_main_t * vm,
700 vlib_node_runtime_t *
701 node, vlib_frame_t * frame)
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200702{
Damjan Mariona3d59862018-11-10 10:23:00 +0100703 u32 *buffers = vlib_frame_vector_args (frame);
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200704 u32 n_left = frame->n_vectors;
705 vhost_user_main_t *vum = &vhost_user_main;
706 vnet_interface_output_runtime_t *rd = (void *) node->runtime_data;
707 vhost_user_intf_t *vui =
708 pool_elt_at_index (vum->vhost_user_interfaces, rd->dev_instance);
709 u32 qid = ~0;
710 vhost_user_vring_t *rxvq;
711 u8 error;
Damjan Marion067cd622018-07-11 12:47:43 +0200712 u32 thread_index = vm->thread_index;
Damjan Marion7e0b17d2018-11-20 21:07:03 +0100713 vhost_cpu_t *cpu = &vum->cpus[thread_index];
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200714 u32 map_hint = 0;
715 u8 retry = 8;
716 u16 copy_len;
717 u16 tx_headers_len;
Steven Luong564e1672020-01-30 15:18:45 -0800718 u32 or_flags;
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200719
720 if (PREDICT_FALSE (!vui->admin_up))
721 {
722 error = VHOST_USER_TX_FUNC_ERROR_DOWN;
723 goto done3;
724 }
725
Juraj Slobodab192feb2018-10-01 12:42:07 +0200726 if (PREDICT_FALSE (!vui->is_ready))
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200727 {
728 error = VHOST_USER_TX_FUNC_ERROR_NOT_READY;
729 goto done3;
730 }
731
Damjan Marion7e0b17d2018-11-20 21:07:03 +0100732 qid = VHOST_VRING_IDX_RX (*vec_elt_at_index (vui->per_cpu_tx_qid,
733 thread_index));
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200734 rxvq = &vui->vrings[qid];
Steven0c469982018-11-04 08:20:01 -0800735 if (PREDICT_FALSE (rxvq->avail == 0))
736 {
737 error = VHOST_USER_TX_FUNC_ERROR_MMAP_FAIL;
738 goto done3;
739 }
740
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200741 if (PREDICT_FALSE (vui->use_tx_spinlock))
742 vhost_user_vring_lock (vui, qid);
743
Steven Luongbc0d9ff2020-03-23 09:34:59 -0700744 if (vhost_user_is_packed_ring_supported (vui))
745 return (vhost_user_device_class_packed (vm, node, frame));
746
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200747retry:
748 error = VHOST_USER_TX_FUNC_ERROR_NONE;
749 tx_headers_len = 0;
750 copy_len = 0;
751 while (n_left > 0)
752 {
753 vlib_buffer_t *b0, *current_b0;
754 u16 desc_head, desc_index, desc_len;
755 vring_desc_t *desc_table;
756 uword buffer_map_addr;
757 u32 buffer_len;
758 u16 bytes_left;
759
760 if (PREDICT_TRUE (n_left > 1))
761 vlib_prefetch_buffer_with_index (vm, buffers[1], LOAD);
762
763 b0 = vlib_get_buffer (vm, buffers[0]);
764
765 if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
766 {
Damjan Marion7e0b17d2018-11-20 21:07:03 +0100767 cpu->current_trace = vlib_add_trace (vm, node, b0,
768 sizeof (*cpu->current_trace));
769 vhost_user_tx_trace (cpu->current_trace, vui, qid / 2, b0, rxvq);
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200770 }
771
772 if (PREDICT_FALSE (rxvq->last_avail_idx == rxvq->avail->idx))
773 {
774 error = VHOST_USER_TX_FUNC_ERROR_PKT_DROP_NOBUF;
775 goto done;
776 }
777
778 desc_table = rxvq->desc;
779 desc_head = desc_index =
780 rxvq->avail->ring[rxvq->last_avail_idx & rxvq->qsz_mask];
781
782 /* Go deeper in case of indirect descriptor
783 * I don't know of any driver providing indirect for RX. */
Mohsin Kazmia7a22812020-08-31 17:17:16 +0200784 if (PREDICT_FALSE (rxvq->desc[desc_head].flags & VRING_DESC_F_INDIRECT))
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200785 {
786 if (PREDICT_FALSE
787 (rxvq->desc[desc_head].len < sizeof (vring_desc_t)))
788 {
789 error = VHOST_USER_TX_FUNC_ERROR_INDIRECT_OVERFLOW;
790 goto done;
791 }
792 if (PREDICT_FALSE
793 (!(desc_table =
794 map_guest_mem (vui, rxvq->desc[desc_index].addr,
795 &map_hint))))
796 {
797 error = VHOST_USER_TX_FUNC_ERROR_MMAP_FAIL;
798 goto done;
799 }
800 desc_index = 0;
801 }
802
803 desc_len = vui->virtio_net_hdr_sz;
804 buffer_map_addr = desc_table[desc_index].addr;
805 buffer_len = desc_table[desc_index].len;
806
807 {
808 // Get a header from the header array
Damjan Marion7e0b17d2018-11-20 21:07:03 +0100809 virtio_net_hdr_mrg_rxbuf_t *hdr = &cpu->tx_headers[tx_headers_len];
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200810 tx_headers_len++;
811 hdr->hdr.flags = 0;
Steven Luong4208a4c2019-05-06 08:51:56 -0700812 hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE;
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200813 hdr->num_buffers = 1; //This is local, no need to check
814
Mohsin Kazmi68095382021-02-10 11:26:24 +0100815 or_flags = (b0->flags & VNET_BUFFER_F_OFFLOAD);
Steven Luong564e1672020-01-30 15:18:45 -0800816
817 /* Guest supports csum offload and buffer requires checksum offload? */
818 if (or_flags
Mohsin Kazmia7a22812020-08-31 17:17:16 +0200819 && (vui->features & VIRTIO_FEATURE (VIRTIO_NET_F_GUEST_CSUM)))
Steven Luong4208a4c2019-05-06 08:51:56 -0700820 vhost_user_handle_tx_offload (vui, b0, &hdr->hdr);
821
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200822 // Prepare a copy order executed later for the header
Steven Luong73310052019-10-23 13:28:37 -0700823 ASSERT (copy_len < VHOST_USER_COPY_ARRAY_N);
Damjan Marion7e0b17d2018-11-20 21:07:03 +0100824 vhost_copy_t *cpy = &cpu->copy[copy_len];
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200825 copy_len++;
826 cpy->len = vui->virtio_net_hdr_sz;
827 cpy->dst = buffer_map_addr;
828 cpy->src = (uword) hdr;
829 }
830
831 buffer_map_addr += vui->virtio_net_hdr_sz;
832 buffer_len -= vui->virtio_net_hdr_sz;
833 bytes_left = b0->current_length;
834 current_b0 = b0;
835 while (1)
836 {
837 if (buffer_len == 0)
838 { //Get new output
Mohsin Kazmia7a22812020-08-31 17:17:16 +0200839 if (desc_table[desc_index].flags & VRING_DESC_F_NEXT)
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200840 {
841 //Next one is chained
842 desc_index = desc_table[desc_index].next;
843 buffer_map_addr = desc_table[desc_index].addr;
844 buffer_len = desc_table[desc_index].len;
845 }
846 else if (vui->virtio_net_hdr_sz == 12) //MRG is available
847 {
848 virtio_net_hdr_mrg_rxbuf_t *hdr =
Damjan Marion7e0b17d2018-11-20 21:07:03 +0100849 &cpu->tx_headers[tx_headers_len - 1];
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200850
851 //Move from available to used buffer
852 rxvq->used->ring[rxvq->last_used_idx & rxvq->qsz_mask].id =
853 desc_head;
854 rxvq->used->ring[rxvq->last_used_idx & rxvq->qsz_mask].len =
855 desc_len;
856 vhost_user_log_dirty_ring (vui, rxvq,
857 ring[rxvq->last_used_idx &
858 rxvq->qsz_mask]);
859
860 rxvq->last_avail_idx++;
861 rxvq->last_used_idx++;
862 hdr->num_buffers++;
863 desc_len = 0;
864
865 if (PREDICT_FALSE
866 (rxvq->last_avail_idx == rxvq->avail->idx))
867 {
868 //Dequeue queued descriptors for this packet
869 rxvq->last_used_idx -= hdr->num_buffers - 1;
870 rxvq->last_avail_idx -= hdr->num_buffers - 1;
871 error = VHOST_USER_TX_FUNC_ERROR_PKT_DROP_NOBUF;
872 goto done;
873 }
874
875 desc_table = rxvq->desc;
876 desc_head = desc_index =
877 rxvq->avail->ring[rxvq->last_avail_idx & rxvq->qsz_mask];
878 if (PREDICT_FALSE
Mohsin Kazmia7a22812020-08-31 17:17:16 +0200879 (rxvq->desc[desc_head].flags & VRING_DESC_F_INDIRECT))
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200880 {
881 //It is seriously unlikely that a driver will put indirect descriptor
882 //after non-indirect descriptor.
883 if (PREDICT_FALSE
884 (rxvq->desc[desc_head].len < sizeof (vring_desc_t)))
885 {
886 error = VHOST_USER_TX_FUNC_ERROR_INDIRECT_OVERFLOW;
887 goto done;
888 }
889 if (PREDICT_FALSE
890 (!(desc_table =
891 map_guest_mem (vui,
892 rxvq->desc[desc_index].addr,
893 &map_hint))))
894 {
895 error = VHOST_USER_TX_FUNC_ERROR_MMAP_FAIL;
896 goto done;
897 }
898 desc_index = 0;
899 }
900 buffer_map_addr = desc_table[desc_index].addr;
901 buffer_len = desc_table[desc_index].len;
902 }
903 else
904 {
905 error = VHOST_USER_TX_FUNC_ERROR_PKT_DROP_NOMRG;
906 goto done;
907 }
908 }
909
910 {
Steven Luong73310052019-10-23 13:28:37 -0700911 ASSERT (copy_len < VHOST_USER_COPY_ARRAY_N);
Damjan Marion7e0b17d2018-11-20 21:07:03 +0100912 vhost_copy_t *cpy = &cpu->copy[copy_len];
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200913 copy_len++;
914 cpy->len = bytes_left;
915 cpy->len = (cpy->len > buffer_len) ? buffer_len : cpy->len;
916 cpy->dst = buffer_map_addr;
917 cpy->src = (uword) vlib_buffer_get_current (current_b0) +
918 current_b0->current_length - bytes_left;
919
920 bytes_left -= cpy->len;
921 buffer_len -= cpy->len;
922 buffer_map_addr += cpy->len;
923 desc_len += cpy->len;
924
925 CLIB_PREFETCH (&rxvq->desc, CLIB_CACHE_LINE_BYTES, LOAD);
926 }
927
928 // Check if vlib buffer has more data. If not, get more or break.
929 if (PREDICT_TRUE (!bytes_left))
930 {
931 if (PREDICT_FALSE
932 (current_b0->flags & VLIB_BUFFER_NEXT_PRESENT))
933 {
934 current_b0 = vlib_get_buffer (vm, current_b0->next_buffer);
935 bytes_left = current_b0->current_length;
936 }
937 else
938 {
939 //End of packet
940 break;
941 }
942 }
943 }
944
945 //Move from available to used ring
946 rxvq->used->ring[rxvq->last_used_idx & rxvq->qsz_mask].id = desc_head;
947 rxvq->used->ring[rxvq->last_used_idx & rxvq->qsz_mask].len = desc_len;
948 vhost_user_log_dirty_ring (vui, rxvq,
949 ring[rxvq->last_used_idx & rxvq->qsz_mask]);
950 rxvq->last_avail_idx++;
951 rxvq->last_used_idx++;
952
953 if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
954 {
Damjan Marion7e0b17d2018-11-20 21:07:03 +0100955 cpu->current_trace->hdr = cpu->tx_headers[tx_headers_len - 1];
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200956 }
957
958 n_left--; //At the end for error counting when 'goto done' is invoked
959
960 /*
961 * Do the copy periodically to prevent
Damjan Marion7e0b17d2018-11-20 21:07:03 +0100962 * cpu->copy array overflow and corrupt memory
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200963 */
964 if (PREDICT_FALSE (copy_len >= VHOST_USER_TX_COPY_THRESHOLD))
965 {
Damjan Marion7e0b17d2018-11-20 21:07:03 +0100966 if (PREDICT_FALSE (vhost_user_tx_copy (vui, cpu->copy, copy_len,
967 &map_hint)))
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200968 {
969 vlib_error_count (vm, node->node_index,
970 VHOST_USER_TX_FUNC_ERROR_MMAP_FAIL, 1);
971 }
972 copy_len = 0;
973
974 /* give buffers back to driver */
975 CLIB_MEMORY_BARRIER ();
976 rxvq->used->idx = rxvq->last_used_idx;
977 vhost_user_log_dirty_ring (vui, rxvq, idx);
978 }
979 buffers++;
980 }
981
982done:
983 //Do the memory copies
Damjan Marion7e0b17d2018-11-20 21:07:03 +0100984 if (PREDICT_FALSE (vhost_user_tx_copy (vui, cpu->copy, copy_len,
985 &map_hint)))
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200986 {
987 vlib_error_count (vm, node->node_index,
988 VHOST_USER_TX_FUNC_ERROR_MMAP_FAIL, 1);
989 }
990
991 CLIB_MEMORY_BARRIER ();
992 rxvq->used->idx = rxvq->last_used_idx;
993 vhost_user_log_dirty_ring (vui, rxvq, idx);
994
995 /*
996 * When n_left is set, error is always set to something too.
997 * In case error is due to lack of remaining buffers, we go back up and
998 * retry.
999 * The idea is that it is better to waste some time on packets
1000 * that have been processed already than dropping them and get
Paul Vinciguerra97c998c2019-10-29 16:11:09 -04001001 * more fresh packets with a good likelihood that they will be dropped too.
Mohsin Kazmie7cde312018-06-26 17:20:11 +02001002 * This technique also gives more time to VM driver to pick-up packets.
1003 * In case the traffic flows from physical to virtual interfaces, this
1004 * technique will end-up leveraging the physical NIC buffer in order to
1005 * absorb the VM's CPU jitter.
1006 */
1007 if (n_left && (error == VHOST_USER_TX_FUNC_ERROR_PKT_DROP_NOBUF) && retry)
1008 {
1009 retry--;
1010 goto retry;
1011 }
1012
1013 /* interrupt (call) handling */
1014 if ((rxvq->callfd_idx != ~0) &&
1015 !(rxvq->avail->flags & VRING_AVAIL_F_NO_INTERRUPT))
1016 {
1017 rxvq->n_since_last_int += frame->n_vectors - n_left;
1018
1019 if (rxvq->n_since_last_int > vum->coalesce_frames)
Steven Luong27ba5002020-11-17 13:30:44 -08001020 vhost_user_send_call (vm, vui, rxvq);
Mohsin Kazmie7cde312018-06-26 17:20:11 +02001021 }
1022
1023 vhost_user_vring_unlock (vui, qid);
1024
1025done3:
1026 if (PREDICT_FALSE (n_left && error != VHOST_USER_TX_FUNC_ERROR_NONE))
1027 {
1028 vlib_error_count (vm, node->node_index, error, n_left);
1029 vlib_increment_simple_counter
1030 (vnet_main.interface_main.sw_if_counters
1031 + VNET_INTERFACE_COUNTER_DROP,
1032 thread_index, vui->sw_if_index, n_left);
1033 }
1034
Damjan Mariona3d59862018-11-10 10:23:00 +01001035 vlib_buffer_free (vm, vlib_frame_vector_args (frame), frame->n_vectors);
Mohsin Kazmie7cde312018-06-26 17:20:11 +02001036 return frame->n_vectors;
1037}
1038
1039static __clib_unused clib_error_t *
1040vhost_user_interface_rx_mode_change (vnet_main_t * vnm, u32 hw_if_index,
Damjan Marioneabd4242020-10-07 20:59:07 +02001041 u32 qid, vnet_hw_if_rx_mode mode)
Mohsin Kazmie7cde312018-06-26 17:20:11 +02001042{
1043 vlib_main_t *vm = vnm->vlib_main;
1044 vnet_hw_interface_t *hif = vnet_get_hw_interface (vnm, hw_if_index);
1045 vhost_user_main_t *vum = &vhost_user_main;
1046 vhost_user_intf_t *vui =
1047 pool_elt_at_index (vum->vhost_user_interfaces, hif->dev_instance);
1048 vhost_user_vring_t *txvq = &vui->vrings[VHOST_VRING_IDX_TX (qid)];
1049
Damjan Marioneabd4242020-10-07 20:59:07 +02001050 if ((mode == VNET_HW_IF_RX_MODE_INTERRUPT) ||
1051 (mode == VNET_HW_IF_RX_MODE_ADAPTIVE))
Mohsin Kazmie7cde312018-06-26 17:20:11 +02001052 {
1053 if (txvq->kickfd_idx == ~0)
1054 {
1055 // We cannot support interrupt mode if the driver opts out
1056 return clib_error_return (0, "Driver does not support interrupt");
1057 }
Damjan Marioneabd4242020-10-07 20:59:07 +02001058 if (txvq->mode == VNET_HW_IF_RX_MODE_POLLING)
Mohsin Kazmie7cde312018-06-26 17:20:11 +02001059 {
1060 vum->ifq_count++;
1061 // Start the timer if this is the first encounter on interrupt
1062 // interface/queue
1063 if ((vum->ifq_count == 1) &&
1064 (vum->coalesce_time > 0.0) && (vum->coalesce_frames > 0))
1065 vlib_process_signal_event (vm,
1066 vhost_user_send_interrupt_node.index,
1067 VHOST_USER_EVENT_START_TIMER, 0);
1068 }
1069 }
Damjan Marioneabd4242020-10-07 20:59:07 +02001070 else if (mode == VNET_HW_IF_RX_MODE_POLLING)
Mohsin Kazmie7cde312018-06-26 17:20:11 +02001071 {
Damjan Marioneabd4242020-10-07 20:59:07 +02001072 if (((txvq->mode == VNET_HW_IF_RX_MODE_INTERRUPT) ||
1073 (txvq->mode == VNET_HW_IF_RX_MODE_ADAPTIVE)) && vum->ifq_count)
Mohsin Kazmie7cde312018-06-26 17:20:11 +02001074 {
1075 vum->ifq_count--;
1076 // Stop the timer if there is no more interrupt interface/queue
1077 if ((vum->ifq_count == 0) &&
1078 (vum->coalesce_time > 0.0) && (vum->coalesce_frames > 0))
1079 vlib_process_signal_event (vm,
1080 vhost_user_send_interrupt_node.index,
1081 VHOST_USER_EVENT_STOP_TIMER, 0);
1082 }
1083 }
1084
1085 txvq->mode = mode;
Damjan Marioneabd4242020-10-07 20:59:07 +02001086 if (mode == VNET_HW_IF_RX_MODE_POLLING)
Mohsin Kazmie7cde312018-06-26 17:20:11 +02001087 txvq->used->flags = VRING_USED_F_NO_NOTIFY;
Damjan Marioneabd4242020-10-07 20:59:07 +02001088 else if ((mode == VNET_HW_IF_RX_MODE_ADAPTIVE) ||
1089 (mode == VNET_HW_IF_RX_MODE_INTERRUPT))
Mohsin Kazmie7cde312018-06-26 17:20:11 +02001090 txvq->used->flags = 0;
1091 else
1092 {
Jerome Tollet2f54c272018-10-02 11:41:11 +02001093 vu_log_err (vui, "unhandled mode %d changed for if %d queue %d", mode,
1094 hw_if_index, qid);
Mohsin Kazmie7cde312018-06-26 17:20:11 +02001095 return clib_error_return (0, "unsupported");
1096 }
1097
1098 return 0;
1099}
1100
1101static __clib_unused clib_error_t *
1102vhost_user_interface_admin_up_down (vnet_main_t * vnm, u32 hw_if_index,
1103 u32 flags)
1104{
1105 vnet_hw_interface_t *hif = vnet_get_hw_interface (vnm, hw_if_index);
1106 vhost_user_main_t *vum = &vhost_user_main;
1107 vhost_user_intf_t *vui =
1108 pool_elt_at_index (vum->vhost_user_interfaces, hif->dev_instance);
Juraj Slobodab192feb2018-10-01 12:42:07 +02001109 u8 link_old, link_new;
Mohsin Kazmie7cde312018-06-26 17:20:11 +02001110
Juraj Slobodab192feb2018-10-01 12:42:07 +02001111 link_old = vui_is_link_up (vui);
1112
1113 vui->admin_up = (flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP) != 0;
1114
1115 link_new = vui_is_link_up (vui);
1116
1117 if (link_old != link_new)
1118 vnet_hw_interface_set_flags (vnm, vui->hw_if_index, link_new ?
1119 VNET_HW_INTERFACE_FLAG_LINK_UP : 0);
Mohsin Kazmie7cde312018-06-26 17:20:11 +02001120
1121 return /* no error */ 0;
1122}
1123
Mohsin Kazmie7cde312018-06-26 17:20:11 +02001124/* *INDENT-OFF* */
1125VNET_DEVICE_CLASS (vhost_user_device_class) = {
1126 .name = "vhost-user",
Mohsin Kazmie7cde312018-06-26 17:20:11 +02001127 .tx_function_n_errors = VHOST_USER_TX_FUNC_N_ERROR,
1128 .tx_function_error_strings = vhost_user_tx_func_error_strings,
1129 .format_device_name = format_vhost_user_interface_name,
1130 .name_renumber = vhost_user_name_renumber,
1131 .admin_up_down_function = vhost_user_interface_admin_up_down,
1132 .rx_mode_change_function = vhost_user_interface_rx_mode_change,
1133 .format_tx_trace = format_vhost_trace,
1134};
1135
Mohsin Kazmie7cde312018-06-26 17:20:11 +02001136/* *INDENT-ON* */
1137
1138/*
1139 * fd.io coding-style-patch-verification: ON
1140 *
1141 * Local Variables:
1142 * eval: (c-set-style "gnu")
1143 * End:
1144 */