blob: c0c54d1b13a5010b90c4b36aa093013edc2cd4a2 [file] [log] [blame]
Mohsin Kazmie7cde312018-06-26 17:20:11 +02001/*
2 *------------------------------------------------------------------
3 * vhost-user-output
4 *
5 * Copyright (c) 2014-2018 Cisco and/or its affiliates.
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at:
9 *
10 * http://www.apache.org/licenses/LICENSE-2.0
11 *
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 *------------------------------------------------------------------
18 */
19
20#include <fcntl.h> /* for open */
21#include <sys/ioctl.h>
22#include <sys/socket.h>
23#include <sys/un.h>
24#include <sys/stat.h>
25#include <sys/types.h>
26#include <sys/uio.h> /* for iovec */
27#include <netinet/in.h>
28#include <sys/vfs.h>
29
30#include <linux/if_arp.h>
31#include <linux/if_tun.h>
32
33#include <vlib/vlib.h>
34#include <vlib/unix/unix.h>
35
36#include <vnet/ip/ip.h>
37
38#include <vnet/ethernet/ethernet.h>
39#include <vnet/devices/devices.h>
40#include <vnet/feature/feature.h>
41
42#include <vnet/devices/virtio/vhost_user.h>
43#include <vnet/devices/virtio/vhost_user_inline.h>
44
45/*
46 * On the transmit side, we keep processing the buffers from vlib in the while
47 * loop and prepare the copy order to be executed later. However, the static
48 * array which we keep the copy order is limited to VHOST_USER_COPY_ARRAY_N
49 * entries. In order to not corrupt memory, we have to do the copy when the
50 * static array reaches the copy threshold. We subtract 40 in case the code
51 * goes into the inner loop for a maximum of 64k frames which may require
52 * more array entries.
53 */
54#define VHOST_USER_TX_COPY_THRESHOLD (VHOST_USER_COPY_ARRAY_N - 40)
55
BenoƮt Ganne47727c02019-02-12 13:35:08 +010056extern vnet_device_class_t vhost_user_device_class;
Mohsin Kazmie7cde312018-06-26 17:20:11 +020057
58#define foreach_vhost_user_tx_func_error \
59 _(NONE, "no error") \
60 _(NOT_READY, "vhost vring not ready") \
61 _(DOWN, "vhost interface is down") \
62 _(PKT_DROP_NOBUF, "tx packet drops (no available descriptors)") \
63 _(PKT_DROP_NOMRG, "tx packet drops (cannot merge descriptors)") \
64 _(MMAP_FAIL, "mmap failure") \
65 _(INDIRECT_OVERFLOW, "indirect descriptor table overflow")
66
67typedef enum
68{
69#define _(f,s) VHOST_USER_TX_FUNC_ERROR_##f,
70 foreach_vhost_user_tx_func_error
71#undef _
72 VHOST_USER_TX_FUNC_N_ERROR,
73} vhost_user_tx_func_error_t;
74
75static __clib_unused char *vhost_user_tx_func_error_strings[] = {
76#define _(n,s) s,
77 foreach_vhost_user_tx_func_error
78#undef _
79};
80
81static __clib_unused u8 *
82format_vhost_user_interface_name (u8 * s, va_list * args)
83{
84 u32 i = va_arg (*args, u32);
85 u32 show_dev_instance = ~0;
86 vhost_user_main_t *vum = &vhost_user_main;
87
88 if (i < vec_len (vum->show_dev_instance_by_real_dev_instance))
89 show_dev_instance = vum->show_dev_instance_by_real_dev_instance[i];
90
91 if (show_dev_instance != ~0)
92 i = show_dev_instance;
93
94 s = format (s, "VirtualEthernet0/0/%d", i);
95 return s;
96}
97
98static __clib_unused int
99vhost_user_name_renumber (vnet_hw_interface_t * hi, u32 new_dev_instance)
100{
101 // FIXME: check if the new dev instance is already used
102 vhost_user_main_t *vum = &vhost_user_main;
Jerome Tollet2f54c272018-10-02 11:41:11 +0200103 vhost_user_intf_t *vui = pool_elt_at_index (vum->vhost_user_interfaces,
104 hi->dev_instance);
105
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200106 vec_validate_init_empty (vum->show_dev_instance_by_real_dev_instance,
107 hi->dev_instance, ~0);
108
109 vum->show_dev_instance_by_real_dev_instance[hi->dev_instance] =
110 new_dev_instance;
111
Jerome Tollet2f54c272018-10-02 11:41:11 +0200112 vu_log_debug (vui, "renumbered vhost-user interface dev_instance %d to %d",
113 hi->dev_instance, new_dev_instance);
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200114
115 return 0;
116}
117
118/**
119 * @brief Try once to lock the vring
120 * @return 0 on success, non-zero on failure.
121 */
122static_always_inline int
123vhost_user_vring_try_lock (vhost_user_intf_t * vui, u32 qid)
124{
Sirshak Das2f6d7bb2018-10-03 22:53:51 +0000125 return clib_atomic_test_and_set (vui->vring_locks[qid]);
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200126}
127
128/**
129 * @brief Spin until the vring is successfully locked
130 */
131static_always_inline void
132vhost_user_vring_lock (vhost_user_intf_t * vui, u32 qid)
133{
134 while (vhost_user_vring_try_lock (vui, qid))
135 ;
136}
137
138/**
139 * @brief Unlock the vring lock
140 */
141static_always_inline void
142vhost_user_vring_unlock (vhost_user_intf_t * vui, u32 qid)
143{
Sirshak Das2f6d7bb2018-10-03 22:53:51 +0000144 clib_atomic_release (vui->vring_locks[qid]);
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200145}
146
147static_always_inline void
148vhost_user_tx_trace (vhost_trace_t * t,
149 vhost_user_intf_t * vui, u16 qid,
150 vlib_buffer_t * b, vhost_user_vring_t * rxvq)
151{
152 vhost_user_main_t *vum = &vhost_user_main;
153 u32 last_avail_idx = rxvq->last_avail_idx;
154 u32 desc_current = rxvq->avail->ring[last_avail_idx & rxvq->qsz_mask];
155 vring_desc_t *hdr_desc = 0;
156 u32 hint = 0;
157
Dave Barachb7b92992018-10-17 10:38:51 -0400158 clib_memset (t, 0, sizeof (*t));
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200159 t->device_index = vui - vum->vhost_user_interfaces;
160 t->qid = qid;
161
162 hdr_desc = &rxvq->desc[desc_current];
163 if (rxvq->desc[desc_current].flags & VIRTQ_DESC_F_INDIRECT)
164 {
165 t->virtio_ring_flags |= 1 << VIRTIO_TRACE_F_INDIRECT;
166 /* Header is the first here */
167 hdr_desc = map_guest_mem (vui, rxvq->desc[desc_current].addr, &hint);
168 }
169 if (rxvq->desc[desc_current].flags & VIRTQ_DESC_F_NEXT)
170 {
171 t->virtio_ring_flags |= 1 << VIRTIO_TRACE_F_SIMPLE_CHAINED;
172 }
173 if (!(rxvq->desc[desc_current].flags & VIRTQ_DESC_F_NEXT) &&
174 !(rxvq->desc[desc_current].flags & VIRTQ_DESC_F_INDIRECT))
175 {
176 t->virtio_ring_flags |= 1 << VIRTIO_TRACE_F_SINGLE_DESC;
177 }
178
179 t->first_desc_len = hdr_desc ? hdr_desc->len : 0;
180}
181
182static_always_inline u32
183vhost_user_tx_copy (vhost_user_intf_t * vui, vhost_copy_t * cpy,
184 u16 copy_len, u32 * map_hint)
185{
186 void *dst0, *dst1, *dst2, *dst3;
187 if (PREDICT_TRUE (copy_len >= 4))
188 {
189 if (PREDICT_FALSE (!(dst2 = map_guest_mem (vui, cpy[0].dst, map_hint))))
190 return 1;
191 if (PREDICT_FALSE (!(dst3 = map_guest_mem (vui, cpy[1].dst, map_hint))))
192 return 1;
193 while (PREDICT_TRUE (copy_len >= 4))
194 {
195 dst0 = dst2;
196 dst1 = dst3;
197
198 if (PREDICT_FALSE
199 (!(dst2 = map_guest_mem (vui, cpy[2].dst, map_hint))))
200 return 1;
201 if (PREDICT_FALSE
202 (!(dst3 = map_guest_mem (vui, cpy[3].dst, map_hint))))
203 return 1;
204
205 CLIB_PREFETCH ((void *) cpy[2].src, 64, LOAD);
206 CLIB_PREFETCH ((void *) cpy[3].src, 64, LOAD);
207
Dave Barach178cf492018-11-13 16:34:13 -0500208 clib_memcpy_fast (dst0, (void *) cpy[0].src, cpy[0].len);
209 clib_memcpy_fast (dst1, (void *) cpy[1].src, cpy[1].len);
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200210
211 vhost_user_log_dirty_pages_2 (vui, cpy[0].dst, cpy[0].len, 1);
212 vhost_user_log_dirty_pages_2 (vui, cpy[1].dst, cpy[1].len, 1);
213 copy_len -= 2;
214 cpy += 2;
215 }
216 }
217 while (copy_len)
218 {
219 if (PREDICT_FALSE (!(dst0 = map_guest_mem (vui, cpy->dst, map_hint))))
220 return 1;
Dave Barach178cf492018-11-13 16:34:13 -0500221 clib_memcpy_fast (dst0, (void *) cpy->src, cpy->len);
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200222 vhost_user_log_dirty_pages_2 (vui, cpy->dst, cpy->len, 1);
223 copy_len -= 1;
224 cpy += 1;
225 }
226 return 0;
227}
228
Mohsin Kazmidd8e7d02018-07-23 14:45:57 +0200229VNET_DEVICE_CLASS_TX_FN (vhost_user_device_class) (vlib_main_t * vm,
230 vlib_node_runtime_t *
231 node, vlib_frame_t * frame)
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200232{
Damjan Mariona3d59862018-11-10 10:23:00 +0100233 u32 *buffers = vlib_frame_vector_args (frame);
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200234 u32 n_left = frame->n_vectors;
235 vhost_user_main_t *vum = &vhost_user_main;
236 vnet_interface_output_runtime_t *rd = (void *) node->runtime_data;
237 vhost_user_intf_t *vui =
238 pool_elt_at_index (vum->vhost_user_interfaces, rd->dev_instance);
239 u32 qid = ~0;
240 vhost_user_vring_t *rxvq;
241 u8 error;
Damjan Marion067cd622018-07-11 12:47:43 +0200242 u32 thread_index = vm->thread_index;
Damjan Marion7e0b17d2018-11-20 21:07:03 +0100243 vhost_cpu_t *cpu = &vum->cpus[thread_index];
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200244 u32 map_hint = 0;
245 u8 retry = 8;
246 u16 copy_len;
247 u16 tx_headers_len;
248
249 if (PREDICT_FALSE (!vui->admin_up))
250 {
251 error = VHOST_USER_TX_FUNC_ERROR_DOWN;
252 goto done3;
253 }
254
Juraj Slobodab192feb2018-10-01 12:42:07 +0200255 if (PREDICT_FALSE (!vui->is_ready))
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200256 {
257 error = VHOST_USER_TX_FUNC_ERROR_NOT_READY;
258 goto done3;
259 }
260
Damjan Marion7e0b17d2018-11-20 21:07:03 +0100261 qid = VHOST_VRING_IDX_RX (*vec_elt_at_index (vui->per_cpu_tx_qid,
262 thread_index));
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200263 rxvq = &vui->vrings[qid];
Steven0c469982018-11-04 08:20:01 -0800264 if (PREDICT_FALSE (rxvq->avail == 0))
265 {
266 error = VHOST_USER_TX_FUNC_ERROR_MMAP_FAIL;
267 goto done3;
268 }
269
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200270 if (PREDICT_FALSE (vui->use_tx_spinlock))
271 vhost_user_vring_lock (vui, qid);
272
273retry:
274 error = VHOST_USER_TX_FUNC_ERROR_NONE;
275 tx_headers_len = 0;
276 copy_len = 0;
277 while (n_left > 0)
278 {
279 vlib_buffer_t *b0, *current_b0;
280 u16 desc_head, desc_index, desc_len;
281 vring_desc_t *desc_table;
282 uword buffer_map_addr;
283 u32 buffer_len;
284 u16 bytes_left;
285
286 if (PREDICT_TRUE (n_left > 1))
287 vlib_prefetch_buffer_with_index (vm, buffers[1], LOAD);
288
289 b0 = vlib_get_buffer (vm, buffers[0]);
290
291 if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
292 {
Damjan Marion7e0b17d2018-11-20 21:07:03 +0100293 cpu->current_trace = vlib_add_trace (vm, node, b0,
294 sizeof (*cpu->current_trace));
295 vhost_user_tx_trace (cpu->current_trace, vui, qid / 2, b0, rxvq);
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200296 }
297
298 if (PREDICT_FALSE (rxvq->last_avail_idx == rxvq->avail->idx))
299 {
300 error = VHOST_USER_TX_FUNC_ERROR_PKT_DROP_NOBUF;
301 goto done;
302 }
303
304 desc_table = rxvq->desc;
305 desc_head = desc_index =
306 rxvq->avail->ring[rxvq->last_avail_idx & rxvq->qsz_mask];
307
308 /* Go deeper in case of indirect descriptor
309 * I don't know of any driver providing indirect for RX. */
310 if (PREDICT_FALSE (rxvq->desc[desc_head].flags & VIRTQ_DESC_F_INDIRECT))
311 {
312 if (PREDICT_FALSE
313 (rxvq->desc[desc_head].len < sizeof (vring_desc_t)))
314 {
315 error = VHOST_USER_TX_FUNC_ERROR_INDIRECT_OVERFLOW;
316 goto done;
317 }
318 if (PREDICT_FALSE
319 (!(desc_table =
320 map_guest_mem (vui, rxvq->desc[desc_index].addr,
321 &map_hint))))
322 {
323 error = VHOST_USER_TX_FUNC_ERROR_MMAP_FAIL;
324 goto done;
325 }
326 desc_index = 0;
327 }
328
329 desc_len = vui->virtio_net_hdr_sz;
330 buffer_map_addr = desc_table[desc_index].addr;
331 buffer_len = desc_table[desc_index].len;
332
333 {
334 // Get a header from the header array
Damjan Marion7e0b17d2018-11-20 21:07:03 +0100335 virtio_net_hdr_mrg_rxbuf_t *hdr = &cpu->tx_headers[tx_headers_len];
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200336 tx_headers_len++;
337 hdr->hdr.flags = 0;
338 hdr->hdr.gso_type = 0;
339 hdr->num_buffers = 1; //This is local, no need to check
340
341 // Prepare a copy order executed later for the header
Damjan Marion7e0b17d2018-11-20 21:07:03 +0100342 vhost_copy_t *cpy = &cpu->copy[copy_len];
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200343 copy_len++;
344 cpy->len = vui->virtio_net_hdr_sz;
345 cpy->dst = buffer_map_addr;
346 cpy->src = (uword) hdr;
347 }
348
349 buffer_map_addr += vui->virtio_net_hdr_sz;
350 buffer_len -= vui->virtio_net_hdr_sz;
351 bytes_left = b0->current_length;
352 current_b0 = b0;
353 while (1)
354 {
355 if (buffer_len == 0)
356 { //Get new output
357 if (desc_table[desc_index].flags & VIRTQ_DESC_F_NEXT)
358 {
359 //Next one is chained
360 desc_index = desc_table[desc_index].next;
361 buffer_map_addr = desc_table[desc_index].addr;
362 buffer_len = desc_table[desc_index].len;
363 }
364 else if (vui->virtio_net_hdr_sz == 12) //MRG is available
365 {
366 virtio_net_hdr_mrg_rxbuf_t *hdr =
Damjan Marion7e0b17d2018-11-20 21:07:03 +0100367 &cpu->tx_headers[tx_headers_len - 1];
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200368
369 //Move from available to used buffer
370 rxvq->used->ring[rxvq->last_used_idx & rxvq->qsz_mask].id =
371 desc_head;
372 rxvq->used->ring[rxvq->last_used_idx & rxvq->qsz_mask].len =
373 desc_len;
374 vhost_user_log_dirty_ring (vui, rxvq,
375 ring[rxvq->last_used_idx &
376 rxvq->qsz_mask]);
377
378 rxvq->last_avail_idx++;
379 rxvq->last_used_idx++;
380 hdr->num_buffers++;
381 desc_len = 0;
382
383 if (PREDICT_FALSE
384 (rxvq->last_avail_idx == rxvq->avail->idx))
385 {
386 //Dequeue queued descriptors for this packet
387 rxvq->last_used_idx -= hdr->num_buffers - 1;
388 rxvq->last_avail_idx -= hdr->num_buffers - 1;
389 error = VHOST_USER_TX_FUNC_ERROR_PKT_DROP_NOBUF;
390 goto done;
391 }
392
393 desc_table = rxvq->desc;
394 desc_head = desc_index =
395 rxvq->avail->ring[rxvq->last_avail_idx & rxvq->qsz_mask];
396 if (PREDICT_FALSE
397 (rxvq->desc[desc_head].flags & VIRTQ_DESC_F_INDIRECT))
398 {
399 //It is seriously unlikely that a driver will put indirect descriptor
400 //after non-indirect descriptor.
401 if (PREDICT_FALSE
402 (rxvq->desc[desc_head].len < sizeof (vring_desc_t)))
403 {
404 error = VHOST_USER_TX_FUNC_ERROR_INDIRECT_OVERFLOW;
405 goto done;
406 }
407 if (PREDICT_FALSE
408 (!(desc_table =
409 map_guest_mem (vui,
410 rxvq->desc[desc_index].addr,
411 &map_hint))))
412 {
413 error = VHOST_USER_TX_FUNC_ERROR_MMAP_FAIL;
414 goto done;
415 }
416 desc_index = 0;
417 }
418 buffer_map_addr = desc_table[desc_index].addr;
419 buffer_len = desc_table[desc_index].len;
420 }
421 else
422 {
423 error = VHOST_USER_TX_FUNC_ERROR_PKT_DROP_NOMRG;
424 goto done;
425 }
426 }
427
428 {
Damjan Marion7e0b17d2018-11-20 21:07:03 +0100429 vhost_copy_t *cpy = &cpu->copy[copy_len];
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200430 copy_len++;
431 cpy->len = bytes_left;
432 cpy->len = (cpy->len > buffer_len) ? buffer_len : cpy->len;
433 cpy->dst = buffer_map_addr;
434 cpy->src = (uword) vlib_buffer_get_current (current_b0) +
435 current_b0->current_length - bytes_left;
436
437 bytes_left -= cpy->len;
438 buffer_len -= cpy->len;
439 buffer_map_addr += cpy->len;
440 desc_len += cpy->len;
441
442 CLIB_PREFETCH (&rxvq->desc, CLIB_CACHE_LINE_BYTES, LOAD);
443 }
444
445 // Check if vlib buffer has more data. If not, get more or break.
446 if (PREDICT_TRUE (!bytes_left))
447 {
448 if (PREDICT_FALSE
449 (current_b0->flags & VLIB_BUFFER_NEXT_PRESENT))
450 {
451 current_b0 = vlib_get_buffer (vm, current_b0->next_buffer);
452 bytes_left = current_b0->current_length;
453 }
454 else
455 {
456 //End of packet
457 break;
458 }
459 }
460 }
461
462 //Move from available to used ring
463 rxvq->used->ring[rxvq->last_used_idx & rxvq->qsz_mask].id = desc_head;
464 rxvq->used->ring[rxvq->last_used_idx & rxvq->qsz_mask].len = desc_len;
465 vhost_user_log_dirty_ring (vui, rxvq,
466 ring[rxvq->last_used_idx & rxvq->qsz_mask]);
467 rxvq->last_avail_idx++;
468 rxvq->last_used_idx++;
469
470 if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
471 {
Damjan Marion7e0b17d2018-11-20 21:07:03 +0100472 cpu->current_trace->hdr = cpu->tx_headers[tx_headers_len - 1];
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200473 }
474
475 n_left--; //At the end for error counting when 'goto done' is invoked
476
477 /*
478 * Do the copy periodically to prevent
Damjan Marion7e0b17d2018-11-20 21:07:03 +0100479 * cpu->copy array overflow and corrupt memory
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200480 */
481 if (PREDICT_FALSE (copy_len >= VHOST_USER_TX_COPY_THRESHOLD))
482 {
Damjan Marion7e0b17d2018-11-20 21:07:03 +0100483 if (PREDICT_FALSE (vhost_user_tx_copy (vui, cpu->copy, copy_len,
484 &map_hint)))
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200485 {
486 vlib_error_count (vm, node->node_index,
487 VHOST_USER_TX_FUNC_ERROR_MMAP_FAIL, 1);
488 }
489 copy_len = 0;
490
491 /* give buffers back to driver */
492 CLIB_MEMORY_BARRIER ();
493 rxvq->used->idx = rxvq->last_used_idx;
494 vhost_user_log_dirty_ring (vui, rxvq, idx);
495 }
496 buffers++;
497 }
498
499done:
500 //Do the memory copies
Damjan Marion7e0b17d2018-11-20 21:07:03 +0100501 if (PREDICT_FALSE (vhost_user_tx_copy (vui, cpu->copy, copy_len,
502 &map_hint)))
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200503 {
504 vlib_error_count (vm, node->node_index,
505 VHOST_USER_TX_FUNC_ERROR_MMAP_FAIL, 1);
506 }
507
508 CLIB_MEMORY_BARRIER ();
509 rxvq->used->idx = rxvq->last_used_idx;
510 vhost_user_log_dirty_ring (vui, rxvq, idx);
511
512 /*
513 * When n_left is set, error is always set to something too.
514 * In case error is due to lack of remaining buffers, we go back up and
515 * retry.
516 * The idea is that it is better to waste some time on packets
517 * that have been processed already than dropping them and get
518 * more fresh packets with a good likelyhood that they will be dropped too.
519 * This technique also gives more time to VM driver to pick-up packets.
520 * In case the traffic flows from physical to virtual interfaces, this
521 * technique will end-up leveraging the physical NIC buffer in order to
522 * absorb the VM's CPU jitter.
523 */
524 if (n_left && (error == VHOST_USER_TX_FUNC_ERROR_PKT_DROP_NOBUF) && retry)
525 {
526 retry--;
527 goto retry;
528 }
529
530 /* interrupt (call) handling */
531 if ((rxvq->callfd_idx != ~0) &&
532 !(rxvq->avail->flags & VRING_AVAIL_F_NO_INTERRUPT))
533 {
534 rxvq->n_since_last_int += frame->n_vectors - n_left;
535
536 if (rxvq->n_since_last_int > vum->coalesce_frames)
537 vhost_user_send_call (vm, rxvq);
538 }
539
540 vhost_user_vring_unlock (vui, qid);
541
542done3:
543 if (PREDICT_FALSE (n_left && error != VHOST_USER_TX_FUNC_ERROR_NONE))
544 {
545 vlib_error_count (vm, node->node_index, error, n_left);
546 vlib_increment_simple_counter
547 (vnet_main.interface_main.sw_if_counters
548 + VNET_INTERFACE_COUNTER_DROP,
549 thread_index, vui->sw_if_index, n_left);
550 }
551
Damjan Mariona3d59862018-11-10 10:23:00 +0100552 vlib_buffer_free (vm, vlib_frame_vector_args (frame), frame->n_vectors);
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200553 return frame->n_vectors;
554}
555
556static __clib_unused clib_error_t *
557vhost_user_interface_rx_mode_change (vnet_main_t * vnm, u32 hw_if_index,
558 u32 qid, vnet_hw_interface_rx_mode mode)
559{
560 vlib_main_t *vm = vnm->vlib_main;
561 vnet_hw_interface_t *hif = vnet_get_hw_interface (vnm, hw_if_index);
562 vhost_user_main_t *vum = &vhost_user_main;
563 vhost_user_intf_t *vui =
564 pool_elt_at_index (vum->vhost_user_interfaces, hif->dev_instance);
565 vhost_user_vring_t *txvq = &vui->vrings[VHOST_VRING_IDX_TX (qid)];
566
567 if ((mode == VNET_HW_INTERFACE_RX_MODE_INTERRUPT) ||
568 (mode == VNET_HW_INTERFACE_RX_MODE_ADAPTIVE))
569 {
570 if (txvq->kickfd_idx == ~0)
571 {
572 // We cannot support interrupt mode if the driver opts out
573 return clib_error_return (0, "Driver does not support interrupt");
574 }
575 if (txvq->mode == VNET_HW_INTERFACE_RX_MODE_POLLING)
576 {
577 vum->ifq_count++;
578 // Start the timer if this is the first encounter on interrupt
579 // interface/queue
580 if ((vum->ifq_count == 1) &&
581 (vum->coalesce_time > 0.0) && (vum->coalesce_frames > 0))
582 vlib_process_signal_event (vm,
583 vhost_user_send_interrupt_node.index,
584 VHOST_USER_EVENT_START_TIMER, 0);
585 }
586 }
587 else if (mode == VNET_HW_INTERFACE_RX_MODE_POLLING)
588 {
589 if (((txvq->mode == VNET_HW_INTERFACE_RX_MODE_INTERRUPT) ||
590 (txvq->mode == VNET_HW_INTERFACE_RX_MODE_ADAPTIVE)) &&
591 vum->ifq_count)
592 {
593 vum->ifq_count--;
594 // Stop the timer if there is no more interrupt interface/queue
595 if ((vum->ifq_count == 0) &&
596 (vum->coalesce_time > 0.0) && (vum->coalesce_frames > 0))
597 vlib_process_signal_event (vm,
598 vhost_user_send_interrupt_node.index,
599 VHOST_USER_EVENT_STOP_TIMER, 0);
600 }
601 }
602
603 txvq->mode = mode;
604 if (mode == VNET_HW_INTERFACE_RX_MODE_POLLING)
605 txvq->used->flags = VRING_USED_F_NO_NOTIFY;
606 else if ((mode == VNET_HW_INTERFACE_RX_MODE_ADAPTIVE) ||
607 (mode == VNET_HW_INTERFACE_RX_MODE_INTERRUPT))
608 txvq->used->flags = 0;
609 else
610 {
Jerome Tollet2f54c272018-10-02 11:41:11 +0200611 vu_log_err (vui, "unhandled mode %d changed for if %d queue %d", mode,
612 hw_if_index, qid);
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200613 return clib_error_return (0, "unsupported");
614 }
615
616 return 0;
617}
618
619static __clib_unused clib_error_t *
620vhost_user_interface_admin_up_down (vnet_main_t * vnm, u32 hw_if_index,
621 u32 flags)
622{
623 vnet_hw_interface_t *hif = vnet_get_hw_interface (vnm, hw_if_index);
624 vhost_user_main_t *vum = &vhost_user_main;
625 vhost_user_intf_t *vui =
626 pool_elt_at_index (vum->vhost_user_interfaces, hif->dev_instance);
Juraj Slobodab192feb2018-10-01 12:42:07 +0200627 u8 link_old, link_new;
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200628
Juraj Slobodab192feb2018-10-01 12:42:07 +0200629 link_old = vui_is_link_up (vui);
630
631 vui->admin_up = (flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP) != 0;
632
633 link_new = vui_is_link_up (vui);
634
635 if (link_old != link_new)
636 vnet_hw_interface_set_flags (vnm, vui->hw_if_index, link_new ?
637 VNET_HW_INTERFACE_FLAG_LINK_UP : 0);
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200638
639 return /* no error */ 0;
640}
641
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200642/* *INDENT-OFF* */
643VNET_DEVICE_CLASS (vhost_user_device_class) = {
644 .name = "vhost-user",
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200645 .tx_function_n_errors = VHOST_USER_TX_FUNC_N_ERROR,
646 .tx_function_error_strings = vhost_user_tx_func_error_strings,
647 .format_device_name = format_vhost_user_interface_name,
648 .name_renumber = vhost_user_name_renumber,
649 .admin_up_down_function = vhost_user_interface_admin_up_down,
650 .rx_mode_change_function = vhost_user_interface_rx_mode_change,
651 .format_tx_trace = format_vhost_trace,
652};
653
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200654/* *INDENT-ON* */
655
656/*
657 * fd.io coding-style-patch-verification: ON
658 *
659 * Local Variables:
660 * eval: (c-set-style "gnu")
661 * End:
662 */