blob: 4f5eb3c1d76c5b72e1a55b055d576f3fb3c714ee [file] [log] [blame]
Mohsin Kazmie7cde312018-06-26 17:20:11 +02001/*
2 *------------------------------------------------------------------
3 * vhost-user-output
4 *
5 * Copyright (c) 2014-2018 Cisco and/or its affiliates.
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at:
9 *
10 * http://www.apache.org/licenses/LICENSE-2.0
11 *
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 *------------------------------------------------------------------
18 */
19
Steven Luong4208a4c2019-05-06 08:51:56 -070020#include <stddef.h>
Mohsin Kazmie7cde312018-06-26 17:20:11 +020021#include <fcntl.h> /* for open */
22#include <sys/ioctl.h>
23#include <sys/socket.h>
24#include <sys/un.h>
25#include <sys/stat.h>
26#include <sys/types.h>
27#include <sys/uio.h> /* for iovec */
28#include <netinet/in.h>
29#include <sys/vfs.h>
30
31#include <linux/if_arp.h>
32#include <linux/if_tun.h>
33
34#include <vlib/vlib.h>
35#include <vlib/unix/unix.h>
36
37#include <vnet/ip/ip.h>
38
39#include <vnet/ethernet/ethernet.h>
40#include <vnet/devices/devices.h>
41#include <vnet/feature/feature.h>
42
Steven Luong4208a4c2019-05-06 08:51:56 -070043#include <vnet/devices/virtio/virtio.h>
Mohsin Kazmie7cde312018-06-26 17:20:11 +020044#include <vnet/devices/virtio/vhost_user.h>
45#include <vnet/devices/virtio/vhost_user_inline.h>
46
Mohsin Kazmi0b042092020-04-17 16:50:56 +000047#include <vnet/gso/hdr_offset_parser.h>
Mohsin Kazmie7cde312018-06-26 17:20:11 +020048/*
49 * On the transmit side, we keep processing the buffers from vlib in the while
50 * loop and prepare the copy order to be executed later. However, the static
51 * array which we keep the copy order is limited to VHOST_USER_COPY_ARRAY_N
52 * entries. In order to not corrupt memory, we have to do the copy when the
53 * static array reaches the copy threshold. We subtract 40 in case the code
54 * goes into the inner loop for a maximum of 64k frames which may require
Steven Luong73310052019-10-23 13:28:37 -070055 * more array entries. We subtract 200 because our default buffer size is
56 * 2048 and the default desc len is likely 1536. While it takes less than 40
57 * vlib buffers for the jumbo frame, it may take twice as much descriptors
58 * for the same jumbo frame. Use 200 for the extra head room.
Mohsin Kazmie7cde312018-06-26 17:20:11 +020059 */
Steven Luong73310052019-10-23 13:28:37 -070060#define VHOST_USER_TX_COPY_THRESHOLD (VHOST_USER_COPY_ARRAY_N - 200)
Mohsin Kazmie7cde312018-06-26 17:20:11 +020061
BenoƮt Ganne47727c02019-02-12 13:35:08 +010062extern vnet_device_class_t vhost_user_device_class;
Mohsin Kazmie7cde312018-06-26 17:20:11 +020063
64#define foreach_vhost_user_tx_func_error \
65 _(NONE, "no error") \
66 _(NOT_READY, "vhost vring not ready") \
67 _(DOWN, "vhost interface is down") \
68 _(PKT_DROP_NOBUF, "tx packet drops (no available descriptors)") \
69 _(PKT_DROP_NOMRG, "tx packet drops (cannot merge descriptors)") \
70 _(MMAP_FAIL, "mmap failure") \
71 _(INDIRECT_OVERFLOW, "indirect descriptor table overflow")
72
73typedef enum
74{
75#define _(f,s) VHOST_USER_TX_FUNC_ERROR_##f,
76 foreach_vhost_user_tx_func_error
77#undef _
78 VHOST_USER_TX_FUNC_N_ERROR,
79} vhost_user_tx_func_error_t;
80
81static __clib_unused char *vhost_user_tx_func_error_strings[] = {
82#define _(n,s) s,
83 foreach_vhost_user_tx_func_error
84#undef _
85};
86
87static __clib_unused u8 *
88format_vhost_user_interface_name (u8 * s, va_list * args)
89{
90 u32 i = va_arg (*args, u32);
91 u32 show_dev_instance = ~0;
92 vhost_user_main_t *vum = &vhost_user_main;
93
94 if (i < vec_len (vum->show_dev_instance_by_real_dev_instance))
95 show_dev_instance = vum->show_dev_instance_by_real_dev_instance[i];
96
97 if (show_dev_instance != ~0)
98 i = show_dev_instance;
99
100 s = format (s, "VirtualEthernet0/0/%d", i);
101 return s;
102}
103
104static __clib_unused int
105vhost_user_name_renumber (vnet_hw_interface_t * hi, u32 new_dev_instance)
106{
107 // FIXME: check if the new dev instance is already used
108 vhost_user_main_t *vum = &vhost_user_main;
Jerome Tollet2f54c272018-10-02 11:41:11 +0200109 vhost_user_intf_t *vui = pool_elt_at_index (vum->vhost_user_interfaces,
110 hi->dev_instance);
111
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200112 vec_validate_init_empty (vum->show_dev_instance_by_real_dev_instance,
113 hi->dev_instance, ~0);
114
115 vum->show_dev_instance_by_real_dev_instance[hi->dev_instance] =
116 new_dev_instance;
117
Jerome Tollet2f54c272018-10-02 11:41:11 +0200118 vu_log_debug (vui, "renumbered vhost-user interface dev_instance %d to %d",
119 hi->dev_instance, new_dev_instance);
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200120
121 return 0;
122}
123
124/**
125 * @brief Try once to lock the vring
126 * @return 0 on success, non-zero on failure.
127 */
128static_always_inline int
129vhost_user_vring_try_lock (vhost_user_intf_t * vui, u32 qid)
130{
Sirshak Das2f6d7bb2018-10-03 22:53:51 +0000131 return clib_atomic_test_and_set (vui->vring_locks[qid]);
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200132}
133
134/**
135 * @brief Spin until the vring is successfully locked
136 */
137static_always_inline void
138vhost_user_vring_lock (vhost_user_intf_t * vui, u32 qid)
139{
140 while (vhost_user_vring_try_lock (vui, qid))
141 ;
142}
143
144/**
145 * @brief Unlock the vring lock
146 */
147static_always_inline void
148vhost_user_vring_unlock (vhost_user_intf_t * vui, u32 qid)
149{
Sirshak Das2f6d7bb2018-10-03 22:53:51 +0000150 clib_atomic_release (vui->vring_locks[qid]);
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200151}
152
153static_always_inline void
154vhost_user_tx_trace (vhost_trace_t * t,
155 vhost_user_intf_t * vui, u16 qid,
156 vlib_buffer_t * b, vhost_user_vring_t * rxvq)
157{
158 vhost_user_main_t *vum = &vhost_user_main;
159 u32 last_avail_idx = rxvq->last_avail_idx;
160 u32 desc_current = rxvq->avail->ring[last_avail_idx & rxvq->qsz_mask];
161 vring_desc_t *hdr_desc = 0;
162 u32 hint = 0;
163
Dave Barachb7b92992018-10-17 10:38:51 -0400164 clib_memset (t, 0, sizeof (*t));
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200165 t->device_index = vui - vum->vhost_user_interfaces;
166 t->qid = qid;
167
168 hdr_desc = &rxvq->desc[desc_current];
169 if (rxvq->desc[desc_current].flags & VIRTQ_DESC_F_INDIRECT)
170 {
171 t->virtio_ring_flags |= 1 << VIRTIO_TRACE_F_INDIRECT;
172 /* Header is the first here */
173 hdr_desc = map_guest_mem (vui, rxvq->desc[desc_current].addr, &hint);
174 }
175 if (rxvq->desc[desc_current].flags & VIRTQ_DESC_F_NEXT)
176 {
177 t->virtio_ring_flags |= 1 << VIRTIO_TRACE_F_SIMPLE_CHAINED;
178 }
179 if (!(rxvq->desc[desc_current].flags & VIRTQ_DESC_F_NEXT) &&
180 !(rxvq->desc[desc_current].flags & VIRTQ_DESC_F_INDIRECT))
181 {
182 t->virtio_ring_flags |= 1 << VIRTIO_TRACE_F_SINGLE_DESC;
183 }
184
185 t->first_desc_len = hdr_desc ? hdr_desc->len : 0;
186}
187
188static_always_inline u32
189vhost_user_tx_copy (vhost_user_intf_t * vui, vhost_copy_t * cpy,
190 u16 copy_len, u32 * map_hint)
191{
192 void *dst0, *dst1, *dst2, *dst3;
193 if (PREDICT_TRUE (copy_len >= 4))
194 {
195 if (PREDICT_FALSE (!(dst2 = map_guest_mem (vui, cpy[0].dst, map_hint))))
196 return 1;
197 if (PREDICT_FALSE (!(dst3 = map_guest_mem (vui, cpy[1].dst, map_hint))))
198 return 1;
199 while (PREDICT_TRUE (copy_len >= 4))
200 {
201 dst0 = dst2;
202 dst1 = dst3;
203
204 if (PREDICT_FALSE
205 (!(dst2 = map_guest_mem (vui, cpy[2].dst, map_hint))))
206 return 1;
207 if (PREDICT_FALSE
208 (!(dst3 = map_guest_mem (vui, cpy[3].dst, map_hint))))
209 return 1;
210
211 CLIB_PREFETCH ((void *) cpy[2].src, 64, LOAD);
212 CLIB_PREFETCH ((void *) cpy[3].src, 64, LOAD);
213
Dave Barach178cf492018-11-13 16:34:13 -0500214 clib_memcpy_fast (dst0, (void *) cpy[0].src, cpy[0].len);
215 clib_memcpy_fast (dst1, (void *) cpy[1].src, cpy[1].len);
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200216
217 vhost_user_log_dirty_pages_2 (vui, cpy[0].dst, cpy[0].len, 1);
218 vhost_user_log_dirty_pages_2 (vui, cpy[1].dst, cpy[1].len, 1);
219 copy_len -= 2;
220 cpy += 2;
221 }
222 }
223 while (copy_len)
224 {
225 if (PREDICT_FALSE (!(dst0 = map_guest_mem (vui, cpy->dst, map_hint))))
226 return 1;
Dave Barach178cf492018-11-13 16:34:13 -0500227 clib_memcpy_fast (dst0, (void *) cpy->src, cpy->len);
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200228 vhost_user_log_dirty_pages_2 (vui, cpy->dst, cpy->len, 1);
229 copy_len -= 1;
230 cpy += 1;
231 }
232 return 0;
233}
234
Steven Luong4208a4c2019-05-06 08:51:56 -0700235static_always_inline void
236vhost_user_handle_tx_offload (vhost_user_intf_t * vui, vlib_buffer_t * b,
237 virtio_net_hdr_t * hdr)
238{
Mohsin Kazmi0b042092020-04-17 16:50:56 +0000239 generic_header_offset_t gho = { 0 };
240 vnet_generic_header_offset_parser (b, &gho);
Mohsin Kazmiaffc5f62019-12-26 20:42:18 +0100241 if (b->flags & VNET_BUFFER_F_OFFLOAD_IP_CKSUM)
242 {
243 ip4_header_t *ip4;
244
245 ip4 =
246 (ip4_header_t *) (vlib_buffer_get_current (b) + gho.l3_hdr_offset);
247 ip4->checksum = ip4_header_checksum (ip4);
248 }
249
Steven Luong4208a4c2019-05-06 08:51:56 -0700250 /* checksum offload */
251 if (b->flags & VNET_BUFFER_F_OFFLOAD_UDP_CKSUM)
252 {
253 hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
Mohsin Kazmiaffc5f62019-12-26 20:42:18 +0100254 hdr->csum_start = gho.l4_hdr_offset;
Steven Luong4208a4c2019-05-06 08:51:56 -0700255 hdr->csum_offset = offsetof (udp_header_t, checksum);
Mohsin Kazmi0937fdf2020-03-25 20:37:16 +0000256 udp_header_t *udp =
257 (udp_header_t *) (vlib_buffer_get_current (b) + gho.l4_hdr_offset);
258 udp->checksum = 0;
Steven Luong4208a4c2019-05-06 08:51:56 -0700259 }
260 else if (b->flags & VNET_BUFFER_F_OFFLOAD_TCP_CKSUM)
261 {
262 hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
Mohsin Kazmiaffc5f62019-12-26 20:42:18 +0100263 hdr->csum_start = gho.l4_hdr_offset;
Steven Luong4208a4c2019-05-06 08:51:56 -0700264 hdr->csum_offset = offsetof (tcp_header_t, checksum);
Mohsin Kazmi0937fdf2020-03-25 20:37:16 +0000265 tcp_header_t *tcp =
266 (tcp_header_t *) (vlib_buffer_get_current (b) + gho.l4_hdr_offset);
267 tcp->checksum = 0;
Steven Luong4208a4c2019-05-06 08:51:56 -0700268 }
269
270 /* GSO offload */
271 if (b->flags & VNET_BUFFER_F_GSO)
272 {
273 if (b->flags & VNET_BUFFER_F_OFFLOAD_TCP_CKSUM)
274 {
275 if ((b->flags & VNET_BUFFER_F_IS_IP4) &&
276 (vui->features & (1ULL << FEAT_VIRTIO_NET_F_GUEST_TSO4)))
277 {
278 hdr->gso_size = vnet_buffer2 (b)->gso_size;
279 hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
280 }
281 else if ((b->flags & VNET_BUFFER_F_IS_IP6) &&
282 (vui->features & (1ULL << FEAT_VIRTIO_NET_F_GUEST_TSO6)))
283 {
284 hdr->gso_size = vnet_buffer2 (b)->gso_size;
285 hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
286 }
287 }
288 else if ((vui->features & (1ULL << FEAT_VIRTIO_NET_F_GUEST_UFO)) &&
289 (b->flags & VNET_BUFFER_F_OFFLOAD_UDP_CKSUM))
290 {
291 hdr->gso_size = vnet_buffer2 (b)->gso_size;
292 hdr->gso_type = VIRTIO_NET_HDR_GSO_UDP;
293 }
294 }
295}
296
Steven Luongbc0d9ff2020-03-23 09:34:59 -0700297static_always_inline void
298vhost_user_mark_desc_available (vlib_main_t * vm, vhost_user_vring_t * rxvq,
299 u16 * n_descs_processed, u8 chained,
300 vlib_frame_t * frame, u32 n_left)
301{
302 u16 desc_idx, flags;
303 vring_packed_desc_t *desc_table = rxvq->packed_desc;
304 u16 last_used_idx = rxvq->last_used_idx;
305
306 if (PREDICT_FALSE (*n_descs_processed == 0))
307 return;
308
309 if (rxvq->used_wrap_counter)
310 flags = desc_table[last_used_idx & rxvq->qsz_mask].flags |
311 (VIRTQ_DESC_F_AVAIL | VIRTQ_DESC_F_USED);
312 else
313 flags = desc_table[last_used_idx & rxvq->qsz_mask].flags &
314 ~(VIRTQ_DESC_F_AVAIL | VIRTQ_DESC_F_USED);
315
316 vhost_user_advance_last_used_idx (rxvq);
317
318 for (desc_idx = 1; desc_idx < *n_descs_processed; desc_idx++)
319 {
320 if (rxvq->used_wrap_counter)
321 desc_table[rxvq->last_used_idx & rxvq->qsz_mask].flags |=
322 (VIRTQ_DESC_F_AVAIL | VIRTQ_DESC_F_USED);
323 else
324 desc_table[rxvq->last_used_idx & rxvq->qsz_mask].flags &=
325 ~(VIRTQ_DESC_F_AVAIL | VIRTQ_DESC_F_USED);
326 vhost_user_advance_last_used_idx (rxvq);
327 }
328
329 desc_table[last_used_idx & rxvq->qsz_mask].flags = flags;
330
331 *n_descs_processed = 0;
332
333 if (chained)
334 {
335 vring_packed_desc_t *desc_table = rxvq->packed_desc;
336
337 while (desc_table[rxvq->last_used_idx & rxvq->qsz_mask].flags &
338 VIRTQ_DESC_F_NEXT)
339 vhost_user_advance_last_used_idx (rxvq);
340
341 /* Advance past the current chained table entries */
342 vhost_user_advance_last_used_idx (rxvq);
343 }
344
345 /* interrupt (call) handling */
346 if ((rxvq->callfd_idx != ~0) &&
347 (rxvq->avail_event->flags != VRING_EVENT_F_DISABLE))
348 {
349 vhost_user_main_t *vum = &vhost_user_main;
350
351 rxvq->n_since_last_int += frame->n_vectors - n_left;
352 if (rxvq->n_since_last_int > vum->coalesce_frames)
353 vhost_user_send_call (vm, rxvq);
354 }
355}
356
357static_always_inline void
358vhost_user_tx_trace_packed (vhost_trace_t * t, vhost_user_intf_t * vui,
359 u16 qid, vlib_buffer_t * b,
360 vhost_user_vring_t * rxvq)
361{
362 vhost_user_main_t *vum = &vhost_user_main;
363 u32 last_avail_idx = rxvq->last_avail_idx;
364 u32 desc_current = last_avail_idx & rxvq->qsz_mask;
365 vring_packed_desc_t *hdr_desc = 0;
366 u32 hint = 0;
367
368 clib_memset (t, 0, sizeof (*t));
369 t->device_index = vui - vum->vhost_user_interfaces;
370 t->qid = qid;
371
372 hdr_desc = &rxvq->packed_desc[desc_current];
373 if (rxvq->packed_desc[desc_current].flags & VIRTQ_DESC_F_INDIRECT)
374 {
375 t->virtio_ring_flags |= 1 << VIRTIO_TRACE_F_INDIRECT;
376 /* Header is the first here */
377 hdr_desc = map_guest_mem (vui, rxvq->packed_desc[desc_current].addr,
378 &hint);
379 }
380 if (rxvq->packed_desc[desc_current].flags & VIRTQ_DESC_F_NEXT)
381 {
382 t->virtio_ring_flags |= 1 << VIRTIO_TRACE_F_SIMPLE_CHAINED;
383 }
384 if (!(rxvq->packed_desc[desc_current].flags & VIRTQ_DESC_F_NEXT) &&
385 !(rxvq->packed_desc[desc_current].flags & VIRTQ_DESC_F_INDIRECT))
386 {
387 t->virtio_ring_flags |= 1 << VIRTIO_TRACE_F_SINGLE_DESC;
388 }
389
390 t->first_desc_len = hdr_desc ? hdr_desc->len : 0;
391}
392
393static_always_inline uword
394vhost_user_device_class_packed (vlib_main_t * vm, vlib_node_runtime_t * node,
395 vlib_frame_t * frame)
396{
397 u32 *buffers = vlib_frame_vector_args (frame);
398 u32 n_left = frame->n_vectors;
399 vhost_user_main_t *vum = &vhost_user_main;
400 vnet_interface_output_runtime_t *rd = (void *) node->runtime_data;
401 vhost_user_intf_t *vui =
402 pool_elt_at_index (vum->vhost_user_interfaces, rd->dev_instance);
403 u32 qid;
404 vhost_user_vring_t *rxvq;
405 u8 error;
406 u32 thread_index = vm->thread_index;
407 vhost_cpu_t *cpu = &vum->cpus[thread_index];
408 u32 map_hint = 0;
409 u8 retry = 8;
410 u16 copy_len;
411 u16 tx_headers_len;
412 vring_packed_desc_t *desc_table;
413 u32 or_flags;
414 u16 desc_head, desc_index, desc_len;
415 u16 n_descs_processed;
416 u8 indirect, chained;
417
418 qid = VHOST_VRING_IDX_RX (*vec_elt_at_index (vui->per_cpu_tx_qid,
419 thread_index));
420 rxvq = &vui->vrings[qid];
421
422retry:
423 error = VHOST_USER_TX_FUNC_ERROR_NONE;
424 tx_headers_len = 0;
425 copy_len = 0;
426 n_descs_processed = 0;
427
428 while (n_left > 0)
429 {
430 vlib_buffer_t *b0, *current_b0;
431 uword buffer_map_addr;
432 u32 buffer_len;
433 u16 bytes_left;
434 u32 total_desc_len = 0;
435 u16 n_entries = 0;
436
437 indirect = 0;
438 chained = 0;
439 if (PREDICT_TRUE (n_left > 1))
440 vlib_prefetch_buffer_with_index (vm, buffers[1], LOAD);
441
442 b0 = vlib_get_buffer (vm, buffers[0]);
443 if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
444 {
445 cpu->current_trace = vlib_add_trace (vm, node, b0,
446 sizeof (*cpu->current_trace));
447 vhost_user_tx_trace_packed (cpu->current_trace, vui, qid / 2, b0,
448 rxvq);
449 }
450
451 desc_table = rxvq->packed_desc;
452 desc_head = desc_index = rxvq->last_avail_idx & rxvq->qsz_mask;
453 if (PREDICT_FALSE (!vhost_user_packed_desc_available (rxvq, desc_head)))
454 {
455 error = VHOST_USER_TX_FUNC_ERROR_PKT_DROP_NOBUF;
456 goto done;
457 }
458 /*
459 * Go deeper in case of indirect descriptor.
460 * To test it, turn off mrg_rxbuf.
461 */
462 if (desc_table[desc_head].flags & VIRTQ_DESC_F_INDIRECT)
463 {
464 indirect = 1;
465 if (PREDICT_FALSE (desc_table[desc_head].len <
466 sizeof (vring_packed_desc_t)))
467 {
468 error = VHOST_USER_TX_FUNC_ERROR_INDIRECT_OVERFLOW;
469 goto done;
470 }
471 n_entries = desc_table[desc_head].len >> 4;
472 desc_table = map_guest_mem (vui, desc_table[desc_index].addr,
473 &map_hint);
474 if (PREDICT_FALSE (desc_table == 0))
475 {
476 error = VHOST_USER_TX_FUNC_ERROR_MMAP_FAIL;
477 goto done;
478 }
479 desc_index = 0;
480 }
481 else if (rxvq->packed_desc[desc_head].flags & VIRTQ_DESC_F_NEXT)
482 chained = 1;
483
484 desc_len = vui->virtio_net_hdr_sz;
485 buffer_map_addr = desc_table[desc_index].addr;
486 buffer_len = desc_table[desc_index].len;
487
488 /* Get a header from the header array */
489 virtio_net_hdr_mrg_rxbuf_t *hdr = &cpu->tx_headers[tx_headers_len];
490 tx_headers_len++;
491 hdr->hdr.flags = 0;
492 hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE;
493 hdr->num_buffers = 1;
494
495 or_flags = (b0->flags & VNET_BUFFER_F_OFFLOAD_IP_CKSUM) ||
496 (b0->flags & VNET_BUFFER_F_OFFLOAD_UDP_CKSUM) ||
497 (b0->flags & VNET_BUFFER_F_OFFLOAD_TCP_CKSUM);
498
499 /* Guest supports csum offload and buffer requires checksum offload? */
500 if (or_flags &&
501 (vui->features & (1ULL << FEAT_VIRTIO_NET_F_GUEST_CSUM)))
502 vhost_user_handle_tx_offload (vui, b0, &hdr->hdr);
503
504 /* Prepare a copy order executed later for the header */
505 ASSERT (copy_len < VHOST_USER_COPY_ARRAY_N);
506 vhost_copy_t *cpy = &cpu->copy[copy_len];
507 copy_len++;
508 cpy->len = vui->virtio_net_hdr_sz;
509 cpy->dst = buffer_map_addr;
510 cpy->src = (uword) hdr;
511
512 buffer_map_addr += vui->virtio_net_hdr_sz;
513 buffer_len -= vui->virtio_net_hdr_sz;
514 bytes_left = b0->current_length;
515 current_b0 = b0;
516 while (1)
517 {
518 if (buffer_len == 0)
519 {
520 /* Get new output */
521 if (chained)
522 {
523 /*
524 * Next one is chained
525 * Test it with both indirect and mrg_rxbuf off
526 */
527 if (PREDICT_FALSE (!(desc_table[desc_index].flags &
528 VIRTQ_DESC_F_NEXT)))
529 {
530 /*
531 * Last descriptor in chain.
532 * Dequeue queued descriptors for this packet
533 */
534 vhost_user_dequeue_chained_descs (rxvq,
535 &n_descs_processed);
536 error = VHOST_USER_TX_FUNC_ERROR_PKT_DROP_NOBUF;
537 goto done;
538 }
539 vhost_user_advance_last_avail_idx (rxvq);
540 desc_index = rxvq->last_avail_idx & rxvq->qsz_mask;
541 n_descs_processed++;
542 buffer_map_addr = desc_table[desc_index].addr;
543 buffer_len = desc_table[desc_index].len;
544 total_desc_len += desc_len;
545 desc_len = 0;
546 }
547 else if (indirect)
548 {
549 /*
550 * Indirect table
551 * Test it with mrg_rxnuf off
552 */
553 if (PREDICT_TRUE (n_entries > 0))
554 n_entries--;
555 else
556 {
557 /* Dequeue queued descriptors for this packet */
558 vhost_user_dequeue_chained_descs (rxvq,
559 &n_descs_processed);
560 error = VHOST_USER_TX_FUNC_ERROR_INDIRECT_OVERFLOW;
561 goto done;
562 }
563 total_desc_len += desc_len;
564 desc_index = (desc_index + 1) & rxvq->qsz_mask;
565 buffer_map_addr = desc_table[desc_index].addr;
566 buffer_len = desc_table[desc_index].len;
567 desc_len = 0;
568 }
569 else if (vui->virtio_net_hdr_sz == 12)
570 {
571 /*
572 * MRG is available
573 * This is the default setting for the guest VM
574 */
575 virtio_net_hdr_mrg_rxbuf_t *hdr =
576 &cpu->tx_headers[tx_headers_len - 1];
577
578 desc_table[desc_index].len = desc_len;
579 vhost_user_advance_last_avail_idx (rxvq);
580 desc_head = desc_index =
581 rxvq->last_avail_idx & rxvq->qsz_mask;
582 hdr->num_buffers++;
583 n_descs_processed++;
584 desc_len = 0;
585
586 if (PREDICT_FALSE (!vhost_user_packed_desc_available
587 (rxvq, desc_index)))
588 {
589 /* Dequeue queued descriptors for this packet */
590 vhost_user_dequeue_descs (rxvq, hdr,
591 &n_descs_processed);
592 error = VHOST_USER_TX_FUNC_ERROR_PKT_DROP_NOBUF;
593 goto done;
594 }
595
596 buffer_map_addr = desc_table[desc_index].addr;
597 buffer_len = desc_table[desc_index].len;
598 }
599 else
600 {
601 error = VHOST_USER_TX_FUNC_ERROR_PKT_DROP_NOMRG;
602 goto done;
603 }
604 }
605
606 ASSERT (copy_len < VHOST_USER_COPY_ARRAY_N);
607 vhost_copy_t *cpy = &cpu->copy[copy_len];
608 copy_len++;
609 cpy->len = bytes_left;
610 cpy->len = (cpy->len > buffer_len) ? buffer_len : cpy->len;
611 cpy->dst = buffer_map_addr;
612 cpy->src = (uword) vlib_buffer_get_current (current_b0) +
613 current_b0->current_length - bytes_left;
614
615 bytes_left -= cpy->len;
616 buffer_len -= cpy->len;
617 buffer_map_addr += cpy->len;
618 desc_len += cpy->len;
619
620 CLIB_PREFETCH (&rxvq->packed_desc, CLIB_CACHE_LINE_BYTES, LOAD);
621
622 /* Check if vlib buffer has more data. If not, get more or break */
623 if (PREDICT_TRUE (!bytes_left))
624 {
625 if (PREDICT_FALSE
626 (current_b0->flags & VLIB_BUFFER_NEXT_PRESENT))
627 {
628 current_b0 = vlib_get_buffer (vm, current_b0->next_buffer);
629 bytes_left = current_b0->current_length;
630 }
631 else
632 {
633 /* End of packet */
634 break;
635 }
636 }
637 }
638
639 /* Move from available to used ring */
640 total_desc_len += desc_len;
641 rxvq->packed_desc[desc_head].len = total_desc_len;
642
643 vhost_user_advance_last_avail_table_idx (vui, rxvq, chained);
644 n_descs_processed++;
645
646 if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
647 cpu->current_trace->hdr = cpu->tx_headers[tx_headers_len - 1];
648
649 n_left--;
650
651 /*
652 * Do the copy periodically to prevent
653 * cpu->copy array overflow and corrupt memory
654 */
655 if (PREDICT_FALSE (copy_len >= VHOST_USER_TX_COPY_THRESHOLD) || chained)
656 {
657 if (PREDICT_FALSE (vhost_user_tx_copy (vui, cpu->copy, copy_len,
658 &map_hint)))
659 vlib_error_count (vm, node->node_index,
660 VHOST_USER_TX_FUNC_ERROR_MMAP_FAIL, 1);
661 copy_len = 0;
662
663 /* give buffers back to driver */
664 vhost_user_mark_desc_available (vm, rxvq, &n_descs_processed,
665 chained, frame, n_left);
666 }
667
668 buffers++;
669 }
670
671done:
672 if (PREDICT_TRUE (copy_len))
673 {
674 if (PREDICT_FALSE (vhost_user_tx_copy (vui, cpu->copy, copy_len,
675 &map_hint)))
676 vlib_error_count (vm, node->node_index,
677 VHOST_USER_TX_FUNC_ERROR_MMAP_FAIL, 1);
678
679 vhost_user_mark_desc_available (vm, rxvq, &n_descs_processed, chained,
680 frame, n_left);
681 }
682
683 /*
684 * When n_left is set, error is always set to something too.
685 * In case error is due to lack of remaining buffers, we go back up and
686 * retry.
687 * The idea is that it is better to waste some time on packets
688 * that have been processed already than dropping them and get
689 * more fresh packets with a good likelyhood that they will be dropped too.
690 * This technique also gives more time to VM driver to pick-up packets.
691 * In case the traffic flows from physical to virtual interfaces, this
692 * technique will end-up leveraging the physical NIC buffer in order to
693 * absorb the VM's CPU jitter.
694 */
695 if (n_left && (error == VHOST_USER_TX_FUNC_ERROR_PKT_DROP_NOBUF) && retry)
696 {
697 retry--;
698 goto retry;
699 }
700
701 vhost_user_vring_unlock (vui, qid);
702
703 if (PREDICT_FALSE (n_left && error != VHOST_USER_TX_FUNC_ERROR_NONE))
704 {
705 vlib_error_count (vm, node->node_index, error, n_left);
706 vlib_increment_simple_counter
707 (vnet_main.interface_main.sw_if_counters +
708 VNET_INTERFACE_COUNTER_DROP, thread_index, vui->sw_if_index, n_left);
709 }
710
711 vlib_buffer_free (vm, vlib_frame_vector_args (frame), frame->n_vectors);
712 return frame->n_vectors;
713}
714
Mohsin Kazmidd8e7d02018-07-23 14:45:57 +0200715VNET_DEVICE_CLASS_TX_FN (vhost_user_device_class) (vlib_main_t * vm,
716 vlib_node_runtime_t *
717 node, vlib_frame_t * frame)
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200718{
Damjan Mariona3d59862018-11-10 10:23:00 +0100719 u32 *buffers = vlib_frame_vector_args (frame);
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200720 u32 n_left = frame->n_vectors;
721 vhost_user_main_t *vum = &vhost_user_main;
722 vnet_interface_output_runtime_t *rd = (void *) node->runtime_data;
723 vhost_user_intf_t *vui =
724 pool_elt_at_index (vum->vhost_user_interfaces, rd->dev_instance);
725 u32 qid = ~0;
726 vhost_user_vring_t *rxvq;
727 u8 error;
Damjan Marion067cd622018-07-11 12:47:43 +0200728 u32 thread_index = vm->thread_index;
Damjan Marion7e0b17d2018-11-20 21:07:03 +0100729 vhost_cpu_t *cpu = &vum->cpus[thread_index];
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200730 u32 map_hint = 0;
731 u8 retry = 8;
732 u16 copy_len;
733 u16 tx_headers_len;
Steven Luong564e1672020-01-30 15:18:45 -0800734 u32 or_flags;
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200735
736 if (PREDICT_FALSE (!vui->admin_up))
737 {
738 error = VHOST_USER_TX_FUNC_ERROR_DOWN;
739 goto done3;
740 }
741
Juraj Slobodab192feb2018-10-01 12:42:07 +0200742 if (PREDICT_FALSE (!vui->is_ready))
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200743 {
744 error = VHOST_USER_TX_FUNC_ERROR_NOT_READY;
745 goto done3;
746 }
747
Damjan Marion7e0b17d2018-11-20 21:07:03 +0100748 qid = VHOST_VRING_IDX_RX (*vec_elt_at_index (vui->per_cpu_tx_qid,
749 thread_index));
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200750 rxvq = &vui->vrings[qid];
Steven0c469982018-11-04 08:20:01 -0800751 if (PREDICT_FALSE (rxvq->avail == 0))
752 {
753 error = VHOST_USER_TX_FUNC_ERROR_MMAP_FAIL;
754 goto done3;
755 }
756
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200757 if (PREDICT_FALSE (vui->use_tx_spinlock))
758 vhost_user_vring_lock (vui, qid);
759
Steven Luongbc0d9ff2020-03-23 09:34:59 -0700760 if (vhost_user_is_packed_ring_supported (vui))
761 return (vhost_user_device_class_packed (vm, node, frame));
762
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200763retry:
764 error = VHOST_USER_TX_FUNC_ERROR_NONE;
765 tx_headers_len = 0;
766 copy_len = 0;
767 while (n_left > 0)
768 {
769 vlib_buffer_t *b0, *current_b0;
770 u16 desc_head, desc_index, desc_len;
771 vring_desc_t *desc_table;
772 uword buffer_map_addr;
773 u32 buffer_len;
774 u16 bytes_left;
775
776 if (PREDICT_TRUE (n_left > 1))
777 vlib_prefetch_buffer_with_index (vm, buffers[1], LOAD);
778
779 b0 = vlib_get_buffer (vm, buffers[0]);
780
781 if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
782 {
Damjan Marion7e0b17d2018-11-20 21:07:03 +0100783 cpu->current_trace = vlib_add_trace (vm, node, b0,
784 sizeof (*cpu->current_trace));
785 vhost_user_tx_trace (cpu->current_trace, vui, qid / 2, b0, rxvq);
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200786 }
787
788 if (PREDICT_FALSE (rxvq->last_avail_idx == rxvq->avail->idx))
789 {
790 error = VHOST_USER_TX_FUNC_ERROR_PKT_DROP_NOBUF;
791 goto done;
792 }
793
794 desc_table = rxvq->desc;
795 desc_head = desc_index =
796 rxvq->avail->ring[rxvq->last_avail_idx & rxvq->qsz_mask];
797
798 /* Go deeper in case of indirect descriptor
799 * I don't know of any driver providing indirect for RX. */
800 if (PREDICT_FALSE (rxvq->desc[desc_head].flags & VIRTQ_DESC_F_INDIRECT))
801 {
802 if (PREDICT_FALSE
803 (rxvq->desc[desc_head].len < sizeof (vring_desc_t)))
804 {
805 error = VHOST_USER_TX_FUNC_ERROR_INDIRECT_OVERFLOW;
806 goto done;
807 }
808 if (PREDICT_FALSE
809 (!(desc_table =
810 map_guest_mem (vui, rxvq->desc[desc_index].addr,
811 &map_hint))))
812 {
813 error = VHOST_USER_TX_FUNC_ERROR_MMAP_FAIL;
814 goto done;
815 }
816 desc_index = 0;
817 }
818
819 desc_len = vui->virtio_net_hdr_sz;
820 buffer_map_addr = desc_table[desc_index].addr;
821 buffer_len = desc_table[desc_index].len;
822
823 {
824 // Get a header from the header array
Damjan Marion7e0b17d2018-11-20 21:07:03 +0100825 virtio_net_hdr_mrg_rxbuf_t *hdr = &cpu->tx_headers[tx_headers_len];
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200826 tx_headers_len++;
827 hdr->hdr.flags = 0;
Steven Luong4208a4c2019-05-06 08:51:56 -0700828 hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE;
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200829 hdr->num_buffers = 1; //This is local, no need to check
830
Steven Luong564e1672020-01-30 15:18:45 -0800831 or_flags = (b0->flags & VNET_BUFFER_F_OFFLOAD_IP_CKSUM) ||
832 (b0->flags & VNET_BUFFER_F_OFFLOAD_UDP_CKSUM) ||
833 (b0->flags & VNET_BUFFER_F_OFFLOAD_TCP_CKSUM);
834
835 /* Guest supports csum offload and buffer requires checksum offload? */
836 if (or_flags
837 && (vui->features & (1ULL << FEAT_VIRTIO_NET_F_GUEST_CSUM)))
Steven Luong4208a4c2019-05-06 08:51:56 -0700838 vhost_user_handle_tx_offload (vui, b0, &hdr->hdr);
839
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200840 // Prepare a copy order executed later for the header
Steven Luong73310052019-10-23 13:28:37 -0700841 ASSERT (copy_len < VHOST_USER_COPY_ARRAY_N);
Damjan Marion7e0b17d2018-11-20 21:07:03 +0100842 vhost_copy_t *cpy = &cpu->copy[copy_len];
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200843 copy_len++;
844 cpy->len = vui->virtio_net_hdr_sz;
845 cpy->dst = buffer_map_addr;
846 cpy->src = (uword) hdr;
847 }
848
849 buffer_map_addr += vui->virtio_net_hdr_sz;
850 buffer_len -= vui->virtio_net_hdr_sz;
851 bytes_left = b0->current_length;
852 current_b0 = b0;
853 while (1)
854 {
855 if (buffer_len == 0)
856 { //Get new output
857 if (desc_table[desc_index].flags & VIRTQ_DESC_F_NEXT)
858 {
859 //Next one is chained
860 desc_index = desc_table[desc_index].next;
861 buffer_map_addr = desc_table[desc_index].addr;
862 buffer_len = desc_table[desc_index].len;
863 }
864 else if (vui->virtio_net_hdr_sz == 12) //MRG is available
865 {
866 virtio_net_hdr_mrg_rxbuf_t *hdr =
Damjan Marion7e0b17d2018-11-20 21:07:03 +0100867 &cpu->tx_headers[tx_headers_len - 1];
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200868
869 //Move from available to used buffer
870 rxvq->used->ring[rxvq->last_used_idx & rxvq->qsz_mask].id =
871 desc_head;
872 rxvq->used->ring[rxvq->last_used_idx & rxvq->qsz_mask].len =
873 desc_len;
874 vhost_user_log_dirty_ring (vui, rxvq,
875 ring[rxvq->last_used_idx &
876 rxvq->qsz_mask]);
877
878 rxvq->last_avail_idx++;
879 rxvq->last_used_idx++;
880 hdr->num_buffers++;
881 desc_len = 0;
882
883 if (PREDICT_FALSE
884 (rxvq->last_avail_idx == rxvq->avail->idx))
885 {
886 //Dequeue queued descriptors for this packet
887 rxvq->last_used_idx -= hdr->num_buffers - 1;
888 rxvq->last_avail_idx -= hdr->num_buffers - 1;
889 error = VHOST_USER_TX_FUNC_ERROR_PKT_DROP_NOBUF;
890 goto done;
891 }
892
893 desc_table = rxvq->desc;
894 desc_head = desc_index =
895 rxvq->avail->ring[rxvq->last_avail_idx & rxvq->qsz_mask];
896 if (PREDICT_FALSE
897 (rxvq->desc[desc_head].flags & VIRTQ_DESC_F_INDIRECT))
898 {
899 //It is seriously unlikely that a driver will put indirect descriptor
900 //after non-indirect descriptor.
901 if (PREDICT_FALSE
902 (rxvq->desc[desc_head].len < sizeof (vring_desc_t)))
903 {
904 error = VHOST_USER_TX_FUNC_ERROR_INDIRECT_OVERFLOW;
905 goto done;
906 }
907 if (PREDICT_FALSE
908 (!(desc_table =
909 map_guest_mem (vui,
910 rxvq->desc[desc_index].addr,
911 &map_hint))))
912 {
913 error = VHOST_USER_TX_FUNC_ERROR_MMAP_FAIL;
914 goto done;
915 }
916 desc_index = 0;
917 }
918 buffer_map_addr = desc_table[desc_index].addr;
919 buffer_len = desc_table[desc_index].len;
920 }
921 else
922 {
923 error = VHOST_USER_TX_FUNC_ERROR_PKT_DROP_NOMRG;
924 goto done;
925 }
926 }
927
928 {
Steven Luong73310052019-10-23 13:28:37 -0700929 ASSERT (copy_len < VHOST_USER_COPY_ARRAY_N);
Damjan Marion7e0b17d2018-11-20 21:07:03 +0100930 vhost_copy_t *cpy = &cpu->copy[copy_len];
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200931 copy_len++;
932 cpy->len = bytes_left;
933 cpy->len = (cpy->len > buffer_len) ? buffer_len : cpy->len;
934 cpy->dst = buffer_map_addr;
935 cpy->src = (uword) vlib_buffer_get_current (current_b0) +
936 current_b0->current_length - bytes_left;
937
938 bytes_left -= cpy->len;
939 buffer_len -= cpy->len;
940 buffer_map_addr += cpy->len;
941 desc_len += cpy->len;
942
943 CLIB_PREFETCH (&rxvq->desc, CLIB_CACHE_LINE_BYTES, LOAD);
944 }
945
946 // Check if vlib buffer has more data. If not, get more or break.
947 if (PREDICT_TRUE (!bytes_left))
948 {
949 if (PREDICT_FALSE
950 (current_b0->flags & VLIB_BUFFER_NEXT_PRESENT))
951 {
952 current_b0 = vlib_get_buffer (vm, current_b0->next_buffer);
953 bytes_left = current_b0->current_length;
954 }
955 else
956 {
957 //End of packet
958 break;
959 }
960 }
961 }
962
963 //Move from available to used ring
964 rxvq->used->ring[rxvq->last_used_idx & rxvq->qsz_mask].id = desc_head;
965 rxvq->used->ring[rxvq->last_used_idx & rxvq->qsz_mask].len = desc_len;
966 vhost_user_log_dirty_ring (vui, rxvq,
967 ring[rxvq->last_used_idx & rxvq->qsz_mask]);
968 rxvq->last_avail_idx++;
969 rxvq->last_used_idx++;
970
971 if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
972 {
Damjan Marion7e0b17d2018-11-20 21:07:03 +0100973 cpu->current_trace->hdr = cpu->tx_headers[tx_headers_len - 1];
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200974 }
975
976 n_left--; //At the end for error counting when 'goto done' is invoked
977
978 /*
979 * Do the copy periodically to prevent
Damjan Marion7e0b17d2018-11-20 21:07:03 +0100980 * cpu->copy array overflow and corrupt memory
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200981 */
982 if (PREDICT_FALSE (copy_len >= VHOST_USER_TX_COPY_THRESHOLD))
983 {
Damjan Marion7e0b17d2018-11-20 21:07:03 +0100984 if (PREDICT_FALSE (vhost_user_tx_copy (vui, cpu->copy, copy_len,
985 &map_hint)))
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200986 {
987 vlib_error_count (vm, node->node_index,
988 VHOST_USER_TX_FUNC_ERROR_MMAP_FAIL, 1);
989 }
990 copy_len = 0;
991
992 /* give buffers back to driver */
993 CLIB_MEMORY_BARRIER ();
994 rxvq->used->idx = rxvq->last_used_idx;
995 vhost_user_log_dirty_ring (vui, rxvq, idx);
996 }
997 buffers++;
998 }
999
1000done:
1001 //Do the memory copies
Damjan Marion7e0b17d2018-11-20 21:07:03 +01001002 if (PREDICT_FALSE (vhost_user_tx_copy (vui, cpu->copy, copy_len,
1003 &map_hint)))
Mohsin Kazmie7cde312018-06-26 17:20:11 +02001004 {
1005 vlib_error_count (vm, node->node_index,
1006 VHOST_USER_TX_FUNC_ERROR_MMAP_FAIL, 1);
1007 }
1008
1009 CLIB_MEMORY_BARRIER ();
1010 rxvq->used->idx = rxvq->last_used_idx;
1011 vhost_user_log_dirty_ring (vui, rxvq, idx);
1012
1013 /*
1014 * When n_left is set, error is always set to something too.
1015 * In case error is due to lack of remaining buffers, we go back up and
1016 * retry.
1017 * The idea is that it is better to waste some time on packets
1018 * that have been processed already than dropping them and get
Paul Vinciguerra97c998c2019-10-29 16:11:09 -04001019 * more fresh packets with a good likelihood that they will be dropped too.
Mohsin Kazmie7cde312018-06-26 17:20:11 +02001020 * This technique also gives more time to VM driver to pick-up packets.
1021 * In case the traffic flows from physical to virtual interfaces, this
1022 * technique will end-up leveraging the physical NIC buffer in order to
1023 * absorb the VM's CPU jitter.
1024 */
1025 if (n_left && (error == VHOST_USER_TX_FUNC_ERROR_PKT_DROP_NOBUF) && retry)
1026 {
1027 retry--;
1028 goto retry;
1029 }
1030
1031 /* interrupt (call) handling */
1032 if ((rxvq->callfd_idx != ~0) &&
1033 !(rxvq->avail->flags & VRING_AVAIL_F_NO_INTERRUPT))
1034 {
1035 rxvq->n_since_last_int += frame->n_vectors - n_left;
1036
1037 if (rxvq->n_since_last_int > vum->coalesce_frames)
1038 vhost_user_send_call (vm, rxvq);
1039 }
1040
1041 vhost_user_vring_unlock (vui, qid);
1042
1043done3:
1044 if (PREDICT_FALSE (n_left && error != VHOST_USER_TX_FUNC_ERROR_NONE))
1045 {
1046 vlib_error_count (vm, node->node_index, error, n_left);
1047 vlib_increment_simple_counter
1048 (vnet_main.interface_main.sw_if_counters
1049 + VNET_INTERFACE_COUNTER_DROP,
1050 thread_index, vui->sw_if_index, n_left);
1051 }
1052
Damjan Mariona3d59862018-11-10 10:23:00 +01001053 vlib_buffer_free (vm, vlib_frame_vector_args (frame), frame->n_vectors);
Mohsin Kazmie7cde312018-06-26 17:20:11 +02001054 return frame->n_vectors;
1055}
1056
1057static __clib_unused clib_error_t *
1058vhost_user_interface_rx_mode_change (vnet_main_t * vnm, u32 hw_if_index,
1059 u32 qid, vnet_hw_interface_rx_mode mode)
1060{
1061 vlib_main_t *vm = vnm->vlib_main;
1062 vnet_hw_interface_t *hif = vnet_get_hw_interface (vnm, hw_if_index);
1063 vhost_user_main_t *vum = &vhost_user_main;
1064 vhost_user_intf_t *vui =
1065 pool_elt_at_index (vum->vhost_user_interfaces, hif->dev_instance);
1066 vhost_user_vring_t *txvq = &vui->vrings[VHOST_VRING_IDX_TX (qid)];
1067
1068 if ((mode == VNET_HW_INTERFACE_RX_MODE_INTERRUPT) ||
1069 (mode == VNET_HW_INTERFACE_RX_MODE_ADAPTIVE))
1070 {
1071 if (txvq->kickfd_idx == ~0)
1072 {
1073 // We cannot support interrupt mode if the driver opts out
1074 return clib_error_return (0, "Driver does not support interrupt");
1075 }
1076 if (txvq->mode == VNET_HW_INTERFACE_RX_MODE_POLLING)
1077 {
1078 vum->ifq_count++;
1079 // Start the timer if this is the first encounter on interrupt
1080 // interface/queue
1081 if ((vum->ifq_count == 1) &&
1082 (vum->coalesce_time > 0.0) && (vum->coalesce_frames > 0))
1083 vlib_process_signal_event (vm,
1084 vhost_user_send_interrupt_node.index,
1085 VHOST_USER_EVENT_START_TIMER, 0);
1086 }
1087 }
1088 else if (mode == VNET_HW_INTERFACE_RX_MODE_POLLING)
1089 {
1090 if (((txvq->mode == VNET_HW_INTERFACE_RX_MODE_INTERRUPT) ||
1091 (txvq->mode == VNET_HW_INTERFACE_RX_MODE_ADAPTIVE)) &&
1092 vum->ifq_count)
1093 {
1094 vum->ifq_count--;
1095 // Stop the timer if there is no more interrupt interface/queue
1096 if ((vum->ifq_count == 0) &&
1097 (vum->coalesce_time > 0.0) && (vum->coalesce_frames > 0))
1098 vlib_process_signal_event (vm,
1099 vhost_user_send_interrupt_node.index,
1100 VHOST_USER_EVENT_STOP_TIMER, 0);
1101 }
1102 }
1103
1104 txvq->mode = mode;
1105 if (mode == VNET_HW_INTERFACE_RX_MODE_POLLING)
1106 txvq->used->flags = VRING_USED_F_NO_NOTIFY;
1107 else if ((mode == VNET_HW_INTERFACE_RX_MODE_ADAPTIVE) ||
1108 (mode == VNET_HW_INTERFACE_RX_MODE_INTERRUPT))
1109 txvq->used->flags = 0;
1110 else
1111 {
Jerome Tollet2f54c272018-10-02 11:41:11 +02001112 vu_log_err (vui, "unhandled mode %d changed for if %d queue %d", mode,
1113 hw_if_index, qid);
Mohsin Kazmie7cde312018-06-26 17:20:11 +02001114 return clib_error_return (0, "unsupported");
1115 }
1116
1117 return 0;
1118}
1119
1120static __clib_unused clib_error_t *
1121vhost_user_interface_admin_up_down (vnet_main_t * vnm, u32 hw_if_index,
1122 u32 flags)
1123{
1124 vnet_hw_interface_t *hif = vnet_get_hw_interface (vnm, hw_if_index);
1125 vhost_user_main_t *vum = &vhost_user_main;
1126 vhost_user_intf_t *vui =
1127 pool_elt_at_index (vum->vhost_user_interfaces, hif->dev_instance);
Juraj Slobodab192feb2018-10-01 12:42:07 +02001128 u8 link_old, link_new;
Mohsin Kazmie7cde312018-06-26 17:20:11 +02001129
Juraj Slobodab192feb2018-10-01 12:42:07 +02001130 link_old = vui_is_link_up (vui);
1131
1132 vui->admin_up = (flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP) != 0;
1133
1134 link_new = vui_is_link_up (vui);
1135
1136 if (link_old != link_new)
1137 vnet_hw_interface_set_flags (vnm, vui->hw_if_index, link_new ?
1138 VNET_HW_INTERFACE_FLAG_LINK_UP : 0);
Mohsin Kazmie7cde312018-06-26 17:20:11 +02001139
1140 return /* no error */ 0;
1141}
1142
Mohsin Kazmie7cde312018-06-26 17:20:11 +02001143/* *INDENT-OFF* */
1144VNET_DEVICE_CLASS (vhost_user_device_class) = {
1145 .name = "vhost-user",
Mohsin Kazmie7cde312018-06-26 17:20:11 +02001146 .tx_function_n_errors = VHOST_USER_TX_FUNC_N_ERROR,
1147 .tx_function_error_strings = vhost_user_tx_func_error_strings,
1148 .format_device_name = format_vhost_user_interface_name,
1149 .name_renumber = vhost_user_name_renumber,
1150 .admin_up_down_function = vhost_user_interface_admin_up_down,
1151 .rx_mode_change_function = vhost_user_interface_rx_mode_change,
1152 .format_tx_trace = format_vhost_trace,
1153};
1154
Mohsin Kazmie7cde312018-06-26 17:20:11 +02001155/* *INDENT-ON* */
1156
1157/*
1158 * fd.io coding-style-patch-verification: ON
1159 *
1160 * Local Variables:
1161 * eval: (c-set-style "gnu")
1162 * End:
1163 */