blob: 029c0c04cc5d719b232631e5599104bcd44b5ec5 [file] [log] [blame]
Mohsin Kazmie7cde312018-06-26 17:20:11 +02001/*
2 *------------------------------------------------------------------
3 * vhost-user-output
4 *
5 * Copyright (c) 2014-2018 Cisco and/or its affiliates.
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at:
9 *
10 * http://www.apache.org/licenses/LICENSE-2.0
11 *
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 *------------------------------------------------------------------
18 */
19
20#include <fcntl.h> /* for open */
21#include <sys/ioctl.h>
22#include <sys/socket.h>
23#include <sys/un.h>
24#include <sys/stat.h>
25#include <sys/types.h>
26#include <sys/uio.h> /* for iovec */
27#include <netinet/in.h>
28#include <sys/vfs.h>
29
30#include <linux/if_arp.h>
31#include <linux/if_tun.h>
32
33#include <vlib/vlib.h>
34#include <vlib/unix/unix.h>
35
36#include <vnet/ip/ip.h>
37
38#include <vnet/ethernet/ethernet.h>
39#include <vnet/devices/devices.h>
40#include <vnet/feature/feature.h>
41
42#include <vnet/devices/virtio/vhost_user.h>
43#include <vnet/devices/virtio/vhost_user_inline.h>
44
45/*
46 * On the transmit side, we keep processing the buffers from vlib in the while
47 * loop and prepare the copy order to be executed later. However, the static
48 * array which we keep the copy order is limited to VHOST_USER_COPY_ARRAY_N
49 * entries. In order to not corrupt memory, we have to do the copy when the
50 * static array reaches the copy threshold. We subtract 40 in case the code
51 * goes into the inner loop for a maximum of 64k frames which may require
52 * more array entries.
53 */
54#define VHOST_USER_TX_COPY_THRESHOLD (VHOST_USER_COPY_ARRAY_N - 40)
55
56vnet_device_class_t vhost_user_device_class;
57
58#define foreach_vhost_user_tx_func_error \
59 _(NONE, "no error") \
60 _(NOT_READY, "vhost vring not ready") \
61 _(DOWN, "vhost interface is down") \
62 _(PKT_DROP_NOBUF, "tx packet drops (no available descriptors)") \
63 _(PKT_DROP_NOMRG, "tx packet drops (cannot merge descriptors)") \
64 _(MMAP_FAIL, "mmap failure") \
65 _(INDIRECT_OVERFLOW, "indirect descriptor table overflow")
66
67typedef enum
68{
69#define _(f,s) VHOST_USER_TX_FUNC_ERROR_##f,
70 foreach_vhost_user_tx_func_error
71#undef _
72 VHOST_USER_TX_FUNC_N_ERROR,
73} vhost_user_tx_func_error_t;
74
75static __clib_unused char *vhost_user_tx_func_error_strings[] = {
76#define _(n,s) s,
77 foreach_vhost_user_tx_func_error
78#undef _
79};
80
81static __clib_unused u8 *
82format_vhost_user_interface_name (u8 * s, va_list * args)
83{
84 u32 i = va_arg (*args, u32);
85 u32 show_dev_instance = ~0;
86 vhost_user_main_t *vum = &vhost_user_main;
87
88 if (i < vec_len (vum->show_dev_instance_by_real_dev_instance))
89 show_dev_instance = vum->show_dev_instance_by_real_dev_instance[i];
90
91 if (show_dev_instance != ~0)
92 i = show_dev_instance;
93
94 s = format (s, "VirtualEthernet0/0/%d", i);
95 return s;
96}
97
98static __clib_unused int
99vhost_user_name_renumber (vnet_hw_interface_t * hi, u32 new_dev_instance)
100{
101 // FIXME: check if the new dev instance is already used
102 vhost_user_main_t *vum = &vhost_user_main;
103 vec_validate_init_empty (vum->show_dev_instance_by_real_dev_instance,
104 hi->dev_instance, ~0);
105
106 vum->show_dev_instance_by_real_dev_instance[hi->dev_instance] =
107 new_dev_instance;
108
109 DBG_SOCK ("renumbered vhost-user interface dev_instance %d to %d",
110 hi->dev_instance, new_dev_instance);
111
112 return 0;
113}
114
115/**
116 * @brief Try once to lock the vring
117 * @return 0 on success, non-zero on failure.
118 */
119static_always_inline int
120vhost_user_vring_try_lock (vhost_user_intf_t * vui, u32 qid)
121{
122 return __sync_lock_test_and_set (vui->vring_locks[qid], 1);
123}
124
125/**
126 * @brief Spin until the vring is successfully locked
127 */
128static_always_inline void
129vhost_user_vring_lock (vhost_user_intf_t * vui, u32 qid)
130{
131 while (vhost_user_vring_try_lock (vui, qid))
132 ;
133}
134
135/**
136 * @brief Unlock the vring lock
137 */
138static_always_inline void
139vhost_user_vring_unlock (vhost_user_intf_t * vui, u32 qid)
140{
141 *vui->vring_locks[qid] = 0;
142}
143
144static_always_inline void
145vhost_user_tx_trace (vhost_trace_t * t,
146 vhost_user_intf_t * vui, u16 qid,
147 vlib_buffer_t * b, vhost_user_vring_t * rxvq)
148{
149 vhost_user_main_t *vum = &vhost_user_main;
150 u32 last_avail_idx = rxvq->last_avail_idx;
151 u32 desc_current = rxvq->avail->ring[last_avail_idx & rxvq->qsz_mask];
152 vring_desc_t *hdr_desc = 0;
153 u32 hint = 0;
154
155 memset (t, 0, sizeof (*t));
156 t->device_index = vui - vum->vhost_user_interfaces;
157 t->qid = qid;
158
159 hdr_desc = &rxvq->desc[desc_current];
160 if (rxvq->desc[desc_current].flags & VIRTQ_DESC_F_INDIRECT)
161 {
162 t->virtio_ring_flags |= 1 << VIRTIO_TRACE_F_INDIRECT;
163 /* Header is the first here */
164 hdr_desc = map_guest_mem (vui, rxvq->desc[desc_current].addr, &hint);
165 }
166 if (rxvq->desc[desc_current].flags & VIRTQ_DESC_F_NEXT)
167 {
168 t->virtio_ring_flags |= 1 << VIRTIO_TRACE_F_SIMPLE_CHAINED;
169 }
170 if (!(rxvq->desc[desc_current].flags & VIRTQ_DESC_F_NEXT) &&
171 !(rxvq->desc[desc_current].flags & VIRTQ_DESC_F_INDIRECT))
172 {
173 t->virtio_ring_flags |= 1 << VIRTIO_TRACE_F_SINGLE_DESC;
174 }
175
176 t->first_desc_len = hdr_desc ? hdr_desc->len : 0;
177}
178
179static_always_inline u32
180vhost_user_tx_copy (vhost_user_intf_t * vui, vhost_copy_t * cpy,
181 u16 copy_len, u32 * map_hint)
182{
183 void *dst0, *dst1, *dst2, *dst3;
184 if (PREDICT_TRUE (copy_len >= 4))
185 {
186 if (PREDICT_FALSE (!(dst2 = map_guest_mem (vui, cpy[0].dst, map_hint))))
187 return 1;
188 if (PREDICT_FALSE (!(dst3 = map_guest_mem (vui, cpy[1].dst, map_hint))))
189 return 1;
190 while (PREDICT_TRUE (copy_len >= 4))
191 {
192 dst0 = dst2;
193 dst1 = dst3;
194
195 if (PREDICT_FALSE
196 (!(dst2 = map_guest_mem (vui, cpy[2].dst, map_hint))))
197 return 1;
198 if (PREDICT_FALSE
199 (!(dst3 = map_guest_mem (vui, cpy[3].dst, map_hint))))
200 return 1;
201
202 CLIB_PREFETCH ((void *) cpy[2].src, 64, LOAD);
203 CLIB_PREFETCH ((void *) cpy[3].src, 64, LOAD);
204
205 clib_memcpy (dst0, (void *) cpy[0].src, cpy[0].len);
206 clib_memcpy (dst1, (void *) cpy[1].src, cpy[1].len);
207
208 vhost_user_log_dirty_pages_2 (vui, cpy[0].dst, cpy[0].len, 1);
209 vhost_user_log_dirty_pages_2 (vui, cpy[1].dst, cpy[1].len, 1);
210 copy_len -= 2;
211 cpy += 2;
212 }
213 }
214 while (copy_len)
215 {
216 if (PREDICT_FALSE (!(dst0 = map_guest_mem (vui, cpy->dst, map_hint))))
217 return 1;
218 clib_memcpy (dst0, (void *) cpy->src, cpy->len);
219 vhost_user_log_dirty_pages_2 (vui, cpy->dst, cpy->len, 1);
220 copy_len -= 1;
221 cpy += 1;
222 }
223 return 0;
224}
225
Mohsin Kazmidd8e7d02018-07-23 14:45:57 +0200226VNET_DEVICE_CLASS_TX_FN (vhost_user_device_class) (vlib_main_t * vm,
227 vlib_node_runtime_t *
228 node, vlib_frame_t * frame)
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200229{
230 u32 *buffers = vlib_frame_args (frame);
231 u32 n_left = frame->n_vectors;
232 vhost_user_main_t *vum = &vhost_user_main;
233 vnet_interface_output_runtime_t *rd = (void *) node->runtime_data;
234 vhost_user_intf_t *vui =
235 pool_elt_at_index (vum->vhost_user_interfaces, rd->dev_instance);
236 u32 qid = ~0;
237 vhost_user_vring_t *rxvq;
238 u8 error;
Damjan Marion067cd622018-07-11 12:47:43 +0200239 u32 thread_index = vm->thread_index;
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200240 u32 map_hint = 0;
241 u8 retry = 8;
242 u16 copy_len;
243 u16 tx_headers_len;
244
245 if (PREDICT_FALSE (!vui->admin_up))
246 {
247 error = VHOST_USER_TX_FUNC_ERROR_DOWN;
248 goto done3;
249 }
250
251 if (PREDICT_FALSE (!vui->is_up))
252 {
253 error = VHOST_USER_TX_FUNC_ERROR_NOT_READY;
254 goto done3;
255 }
256
257 qid =
258 VHOST_VRING_IDX_RX (*vec_elt_at_index
259 (vui->per_cpu_tx_qid, thread_index));
260 rxvq = &vui->vrings[qid];
261 if (PREDICT_FALSE (vui->use_tx_spinlock))
262 vhost_user_vring_lock (vui, qid);
263
264retry:
265 error = VHOST_USER_TX_FUNC_ERROR_NONE;
266 tx_headers_len = 0;
267 copy_len = 0;
268 while (n_left > 0)
269 {
270 vlib_buffer_t *b0, *current_b0;
271 u16 desc_head, desc_index, desc_len;
272 vring_desc_t *desc_table;
273 uword buffer_map_addr;
274 u32 buffer_len;
275 u16 bytes_left;
276
277 if (PREDICT_TRUE (n_left > 1))
278 vlib_prefetch_buffer_with_index (vm, buffers[1], LOAD);
279
280 b0 = vlib_get_buffer (vm, buffers[0]);
281
282 if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
283 {
284 vum->cpus[thread_index].current_trace =
285 vlib_add_trace (vm, node, b0,
286 sizeof (*vum->cpus[thread_index].current_trace));
287 vhost_user_tx_trace (vum->cpus[thread_index].current_trace,
288 vui, qid / 2, b0, rxvq);
289 }
290
291 if (PREDICT_FALSE (rxvq->last_avail_idx == rxvq->avail->idx))
292 {
293 error = VHOST_USER_TX_FUNC_ERROR_PKT_DROP_NOBUF;
294 goto done;
295 }
296
297 desc_table = rxvq->desc;
298 desc_head = desc_index =
299 rxvq->avail->ring[rxvq->last_avail_idx & rxvq->qsz_mask];
300
301 /* Go deeper in case of indirect descriptor
302 * I don't know of any driver providing indirect for RX. */
303 if (PREDICT_FALSE (rxvq->desc[desc_head].flags & VIRTQ_DESC_F_INDIRECT))
304 {
305 if (PREDICT_FALSE
306 (rxvq->desc[desc_head].len < sizeof (vring_desc_t)))
307 {
308 error = VHOST_USER_TX_FUNC_ERROR_INDIRECT_OVERFLOW;
309 goto done;
310 }
311 if (PREDICT_FALSE
312 (!(desc_table =
313 map_guest_mem (vui, rxvq->desc[desc_index].addr,
314 &map_hint))))
315 {
316 error = VHOST_USER_TX_FUNC_ERROR_MMAP_FAIL;
317 goto done;
318 }
319 desc_index = 0;
320 }
321
322 desc_len = vui->virtio_net_hdr_sz;
323 buffer_map_addr = desc_table[desc_index].addr;
324 buffer_len = desc_table[desc_index].len;
325
326 {
327 // Get a header from the header array
328 virtio_net_hdr_mrg_rxbuf_t *hdr =
329 &vum->cpus[thread_index].tx_headers[tx_headers_len];
330 tx_headers_len++;
331 hdr->hdr.flags = 0;
332 hdr->hdr.gso_type = 0;
333 hdr->num_buffers = 1; //This is local, no need to check
334
335 // Prepare a copy order executed later for the header
336 vhost_copy_t *cpy = &vum->cpus[thread_index].copy[copy_len];
337 copy_len++;
338 cpy->len = vui->virtio_net_hdr_sz;
339 cpy->dst = buffer_map_addr;
340 cpy->src = (uword) hdr;
341 }
342
343 buffer_map_addr += vui->virtio_net_hdr_sz;
344 buffer_len -= vui->virtio_net_hdr_sz;
345 bytes_left = b0->current_length;
346 current_b0 = b0;
347 while (1)
348 {
349 if (buffer_len == 0)
350 { //Get new output
351 if (desc_table[desc_index].flags & VIRTQ_DESC_F_NEXT)
352 {
353 //Next one is chained
354 desc_index = desc_table[desc_index].next;
355 buffer_map_addr = desc_table[desc_index].addr;
356 buffer_len = desc_table[desc_index].len;
357 }
358 else if (vui->virtio_net_hdr_sz == 12) //MRG is available
359 {
360 virtio_net_hdr_mrg_rxbuf_t *hdr =
361 &vum->cpus[thread_index].tx_headers[tx_headers_len - 1];
362
363 //Move from available to used buffer
364 rxvq->used->ring[rxvq->last_used_idx & rxvq->qsz_mask].id =
365 desc_head;
366 rxvq->used->ring[rxvq->last_used_idx & rxvq->qsz_mask].len =
367 desc_len;
368 vhost_user_log_dirty_ring (vui, rxvq,
369 ring[rxvq->last_used_idx &
370 rxvq->qsz_mask]);
371
372 rxvq->last_avail_idx++;
373 rxvq->last_used_idx++;
374 hdr->num_buffers++;
375 desc_len = 0;
376
377 if (PREDICT_FALSE
378 (rxvq->last_avail_idx == rxvq->avail->idx))
379 {
380 //Dequeue queued descriptors for this packet
381 rxvq->last_used_idx -= hdr->num_buffers - 1;
382 rxvq->last_avail_idx -= hdr->num_buffers - 1;
383 error = VHOST_USER_TX_FUNC_ERROR_PKT_DROP_NOBUF;
384 goto done;
385 }
386
387 desc_table = rxvq->desc;
388 desc_head = desc_index =
389 rxvq->avail->ring[rxvq->last_avail_idx & rxvq->qsz_mask];
390 if (PREDICT_FALSE
391 (rxvq->desc[desc_head].flags & VIRTQ_DESC_F_INDIRECT))
392 {
393 //It is seriously unlikely that a driver will put indirect descriptor
394 //after non-indirect descriptor.
395 if (PREDICT_FALSE
396 (rxvq->desc[desc_head].len < sizeof (vring_desc_t)))
397 {
398 error = VHOST_USER_TX_FUNC_ERROR_INDIRECT_OVERFLOW;
399 goto done;
400 }
401 if (PREDICT_FALSE
402 (!(desc_table =
403 map_guest_mem (vui,
404 rxvq->desc[desc_index].addr,
405 &map_hint))))
406 {
407 error = VHOST_USER_TX_FUNC_ERROR_MMAP_FAIL;
408 goto done;
409 }
410 desc_index = 0;
411 }
412 buffer_map_addr = desc_table[desc_index].addr;
413 buffer_len = desc_table[desc_index].len;
414 }
415 else
416 {
417 error = VHOST_USER_TX_FUNC_ERROR_PKT_DROP_NOMRG;
418 goto done;
419 }
420 }
421
422 {
423 vhost_copy_t *cpy = &vum->cpus[thread_index].copy[copy_len];
424 copy_len++;
425 cpy->len = bytes_left;
426 cpy->len = (cpy->len > buffer_len) ? buffer_len : cpy->len;
427 cpy->dst = buffer_map_addr;
428 cpy->src = (uword) vlib_buffer_get_current (current_b0) +
429 current_b0->current_length - bytes_left;
430
431 bytes_left -= cpy->len;
432 buffer_len -= cpy->len;
433 buffer_map_addr += cpy->len;
434 desc_len += cpy->len;
435
436 CLIB_PREFETCH (&rxvq->desc, CLIB_CACHE_LINE_BYTES, LOAD);
437 }
438
439 // Check if vlib buffer has more data. If not, get more or break.
440 if (PREDICT_TRUE (!bytes_left))
441 {
442 if (PREDICT_FALSE
443 (current_b0->flags & VLIB_BUFFER_NEXT_PRESENT))
444 {
445 current_b0 = vlib_get_buffer (vm, current_b0->next_buffer);
446 bytes_left = current_b0->current_length;
447 }
448 else
449 {
450 //End of packet
451 break;
452 }
453 }
454 }
455
456 //Move from available to used ring
457 rxvq->used->ring[rxvq->last_used_idx & rxvq->qsz_mask].id = desc_head;
458 rxvq->used->ring[rxvq->last_used_idx & rxvq->qsz_mask].len = desc_len;
459 vhost_user_log_dirty_ring (vui, rxvq,
460 ring[rxvq->last_used_idx & rxvq->qsz_mask]);
461 rxvq->last_avail_idx++;
462 rxvq->last_used_idx++;
463
464 if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
465 {
466 vum->cpus[thread_index].current_trace->hdr =
467 vum->cpus[thread_index].tx_headers[tx_headers_len - 1];
468 }
469
470 n_left--; //At the end for error counting when 'goto done' is invoked
471
472 /*
473 * Do the copy periodically to prevent
474 * vum->cpus[thread_index].copy array overflow and corrupt memory
475 */
476 if (PREDICT_FALSE (copy_len >= VHOST_USER_TX_COPY_THRESHOLD))
477 {
478 if (PREDICT_FALSE
479 (vhost_user_tx_copy (vui, vum->cpus[thread_index].copy,
480 copy_len, &map_hint)))
481 {
482 vlib_error_count (vm, node->node_index,
483 VHOST_USER_TX_FUNC_ERROR_MMAP_FAIL, 1);
484 }
485 copy_len = 0;
486
487 /* give buffers back to driver */
488 CLIB_MEMORY_BARRIER ();
489 rxvq->used->idx = rxvq->last_used_idx;
490 vhost_user_log_dirty_ring (vui, rxvq, idx);
491 }
492 buffers++;
493 }
494
495done:
496 //Do the memory copies
497 if (PREDICT_FALSE
498 (vhost_user_tx_copy (vui, vum->cpus[thread_index].copy,
499 copy_len, &map_hint)))
500 {
501 vlib_error_count (vm, node->node_index,
502 VHOST_USER_TX_FUNC_ERROR_MMAP_FAIL, 1);
503 }
504
505 CLIB_MEMORY_BARRIER ();
506 rxvq->used->idx = rxvq->last_used_idx;
507 vhost_user_log_dirty_ring (vui, rxvq, idx);
508
509 /*
510 * When n_left is set, error is always set to something too.
511 * In case error is due to lack of remaining buffers, we go back up and
512 * retry.
513 * The idea is that it is better to waste some time on packets
514 * that have been processed already than dropping them and get
515 * more fresh packets with a good likelyhood that they will be dropped too.
516 * This technique also gives more time to VM driver to pick-up packets.
517 * In case the traffic flows from physical to virtual interfaces, this
518 * technique will end-up leveraging the physical NIC buffer in order to
519 * absorb the VM's CPU jitter.
520 */
521 if (n_left && (error == VHOST_USER_TX_FUNC_ERROR_PKT_DROP_NOBUF) && retry)
522 {
523 retry--;
524 goto retry;
525 }
526
527 /* interrupt (call) handling */
528 if ((rxvq->callfd_idx != ~0) &&
529 !(rxvq->avail->flags & VRING_AVAIL_F_NO_INTERRUPT))
530 {
531 rxvq->n_since_last_int += frame->n_vectors - n_left;
532
533 if (rxvq->n_since_last_int > vum->coalesce_frames)
534 vhost_user_send_call (vm, rxvq);
535 }
536
537 vhost_user_vring_unlock (vui, qid);
538
539done3:
540 if (PREDICT_FALSE (n_left && error != VHOST_USER_TX_FUNC_ERROR_NONE))
541 {
542 vlib_error_count (vm, node->node_index, error, n_left);
543 vlib_increment_simple_counter
544 (vnet_main.interface_main.sw_if_counters
545 + VNET_INTERFACE_COUNTER_DROP,
546 thread_index, vui->sw_if_index, n_left);
547 }
548
549 vlib_buffer_free (vm, vlib_frame_args (frame), frame->n_vectors);
550 return frame->n_vectors;
551}
552
553static __clib_unused clib_error_t *
554vhost_user_interface_rx_mode_change (vnet_main_t * vnm, u32 hw_if_index,
555 u32 qid, vnet_hw_interface_rx_mode mode)
556{
557 vlib_main_t *vm = vnm->vlib_main;
558 vnet_hw_interface_t *hif = vnet_get_hw_interface (vnm, hw_if_index);
559 vhost_user_main_t *vum = &vhost_user_main;
560 vhost_user_intf_t *vui =
561 pool_elt_at_index (vum->vhost_user_interfaces, hif->dev_instance);
562 vhost_user_vring_t *txvq = &vui->vrings[VHOST_VRING_IDX_TX (qid)];
563
564 if ((mode == VNET_HW_INTERFACE_RX_MODE_INTERRUPT) ||
565 (mode == VNET_HW_INTERFACE_RX_MODE_ADAPTIVE))
566 {
567 if (txvq->kickfd_idx == ~0)
568 {
569 // We cannot support interrupt mode if the driver opts out
570 return clib_error_return (0, "Driver does not support interrupt");
571 }
572 if (txvq->mode == VNET_HW_INTERFACE_RX_MODE_POLLING)
573 {
574 vum->ifq_count++;
575 // Start the timer if this is the first encounter on interrupt
576 // interface/queue
577 if ((vum->ifq_count == 1) &&
578 (vum->coalesce_time > 0.0) && (vum->coalesce_frames > 0))
579 vlib_process_signal_event (vm,
580 vhost_user_send_interrupt_node.index,
581 VHOST_USER_EVENT_START_TIMER, 0);
582 }
583 }
584 else if (mode == VNET_HW_INTERFACE_RX_MODE_POLLING)
585 {
586 if (((txvq->mode == VNET_HW_INTERFACE_RX_MODE_INTERRUPT) ||
587 (txvq->mode == VNET_HW_INTERFACE_RX_MODE_ADAPTIVE)) &&
588 vum->ifq_count)
589 {
590 vum->ifq_count--;
591 // Stop the timer if there is no more interrupt interface/queue
592 if ((vum->ifq_count == 0) &&
593 (vum->coalesce_time > 0.0) && (vum->coalesce_frames > 0))
594 vlib_process_signal_event (vm,
595 vhost_user_send_interrupt_node.index,
596 VHOST_USER_EVENT_STOP_TIMER, 0);
597 }
598 }
599
600 txvq->mode = mode;
601 if (mode == VNET_HW_INTERFACE_RX_MODE_POLLING)
602 txvq->used->flags = VRING_USED_F_NO_NOTIFY;
603 else if ((mode == VNET_HW_INTERFACE_RX_MODE_ADAPTIVE) ||
604 (mode == VNET_HW_INTERFACE_RX_MODE_INTERRUPT))
605 txvq->used->flags = 0;
606 else
607 {
608 clib_warning ("BUG: unhandled mode %d changed for if %d queue %d", mode,
609 hw_if_index, qid);
610 return clib_error_return (0, "unsupported");
611 }
612
613 return 0;
614}
615
616static __clib_unused clib_error_t *
617vhost_user_interface_admin_up_down (vnet_main_t * vnm, u32 hw_if_index,
618 u32 flags)
619{
620 vnet_hw_interface_t *hif = vnet_get_hw_interface (vnm, hw_if_index);
621 vhost_user_main_t *vum = &vhost_user_main;
622 vhost_user_intf_t *vui =
623 pool_elt_at_index (vum->vhost_user_interfaces, hif->dev_instance);
624 u32 hw_flags = 0;
625 vui->admin_up = (flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP) != 0;
626 hw_flags = vui->admin_up ? VNET_HW_INTERFACE_FLAG_LINK_UP : 0;
627
628 vnet_hw_interface_set_flags (vnm, vui->hw_if_index, hw_flags);
629
630 return /* no error */ 0;
631}
632
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200633/* *INDENT-OFF* */
634VNET_DEVICE_CLASS (vhost_user_device_class) = {
635 .name = "vhost-user",
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200636 .tx_function_n_errors = VHOST_USER_TX_FUNC_N_ERROR,
637 .tx_function_error_strings = vhost_user_tx_func_error_strings,
638 .format_device_name = format_vhost_user_interface_name,
639 .name_renumber = vhost_user_name_renumber,
640 .admin_up_down_function = vhost_user_interface_admin_up_down,
641 .rx_mode_change_function = vhost_user_interface_rx_mode_change,
642 .format_tx_trace = format_vhost_trace,
643};
644
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200645/* *INDENT-ON* */
646
647/*
648 * fd.io coding-style-patch-verification: ON
649 *
650 * Local Variables:
651 * eval: (c-set-style "gnu")
652 * End:
653 */