blob: f400f18307ac6edb4a33a81f4af042884a496cc1 [file] [log] [blame]
Mohsin Kazmie7cde312018-06-26 17:20:11 +02001/*
2 *------------------------------------------------------------------
3 * vhost-user-output
4 *
5 * Copyright (c) 2014-2018 Cisco and/or its affiliates.
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at:
9 *
10 * http://www.apache.org/licenses/LICENSE-2.0
11 *
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 *------------------------------------------------------------------
18 */
19
20#include <fcntl.h> /* for open */
21#include <sys/ioctl.h>
22#include <sys/socket.h>
23#include <sys/un.h>
24#include <sys/stat.h>
25#include <sys/types.h>
26#include <sys/uio.h> /* for iovec */
27#include <netinet/in.h>
28#include <sys/vfs.h>
29
30#include <linux/if_arp.h>
31#include <linux/if_tun.h>
32
33#include <vlib/vlib.h>
34#include <vlib/unix/unix.h>
35
36#include <vnet/ip/ip.h>
37
38#include <vnet/ethernet/ethernet.h>
39#include <vnet/devices/devices.h>
40#include <vnet/feature/feature.h>
41
42#include <vnet/devices/virtio/vhost_user.h>
43#include <vnet/devices/virtio/vhost_user_inline.h>
44
45/*
46 * On the transmit side, we keep processing the buffers from vlib in the while
47 * loop and prepare the copy order to be executed later. However, the static
48 * array which we keep the copy order is limited to VHOST_USER_COPY_ARRAY_N
49 * entries. In order to not corrupt memory, we have to do the copy when the
50 * static array reaches the copy threshold. We subtract 40 in case the code
51 * goes into the inner loop for a maximum of 64k frames which may require
52 * more array entries.
53 */
54#define VHOST_USER_TX_COPY_THRESHOLD (VHOST_USER_COPY_ARRAY_N - 40)
55
56vnet_device_class_t vhost_user_device_class;
57
58#define foreach_vhost_user_tx_func_error \
59 _(NONE, "no error") \
60 _(NOT_READY, "vhost vring not ready") \
61 _(DOWN, "vhost interface is down") \
62 _(PKT_DROP_NOBUF, "tx packet drops (no available descriptors)") \
63 _(PKT_DROP_NOMRG, "tx packet drops (cannot merge descriptors)") \
64 _(MMAP_FAIL, "mmap failure") \
65 _(INDIRECT_OVERFLOW, "indirect descriptor table overflow")
66
67typedef enum
68{
69#define _(f,s) VHOST_USER_TX_FUNC_ERROR_##f,
70 foreach_vhost_user_tx_func_error
71#undef _
72 VHOST_USER_TX_FUNC_N_ERROR,
73} vhost_user_tx_func_error_t;
74
75static __clib_unused char *vhost_user_tx_func_error_strings[] = {
76#define _(n,s) s,
77 foreach_vhost_user_tx_func_error
78#undef _
79};
80
81static __clib_unused u8 *
82format_vhost_user_interface_name (u8 * s, va_list * args)
83{
84 u32 i = va_arg (*args, u32);
85 u32 show_dev_instance = ~0;
86 vhost_user_main_t *vum = &vhost_user_main;
87
88 if (i < vec_len (vum->show_dev_instance_by_real_dev_instance))
89 show_dev_instance = vum->show_dev_instance_by_real_dev_instance[i];
90
91 if (show_dev_instance != ~0)
92 i = show_dev_instance;
93
94 s = format (s, "VirtualEthernet0/0/%d", i);
95 return s;
96}
97
98static __clib_unused int
99vhost_user_name_renumber (vnet_hw_interface_t * hi, u32 new_dev_instance)
100{
101 // FIXME: check if the new dev instance is already used
102 vhost_user_main_t *vum = &vhost_user_main;
103 vec_validate_init_empty (vum->show_dev_instance_by_real_dev_instance,
104 hi->dev_instance, ~0);
105
106 vum->show_dev_instance_by_real_dev_instance[hi->dev_instance] =
107 new_dev_instance;
108
109 DBG_SOCK ("renumbered vhost-user interface dev_instance %d to %d",
110 hi->dev_instance, new_dev_instance);
111
112 return 0;
113}
114
115/**
116 * @brief Try once to lock the vring
117 * @return 0 on success, non-zero on failure.
118 */
119static_always_inline int
120vhost_user_vring_try_lock (vhost_user_intf_t * vui, u32 qid)
121{
122 return __sync_lock_test_and_set (vui->vring_locks[qid], 1);
123}
124
125/**
126 * @brief Spin until the vring is successfully locked
127 */
128static_always_inline void
129vhost_user_vring_lock (vhost_user_intf_t * vui, u32 qid)
130{
131 while (vhost_user_vring_try_lock (vui, qid))
132 ;
133}
134
135/**
136 * @brief Unlock the vring lock
137 */
138static_always_inline void
139vhost_user_vring_unlock (vhost_user_intf_t * vui, u32 qid)
140{
141 *vui->vring_locks[qid] = 0;
142}
143
144static_always_inline void
145vhost_user_tx_trace (vhost_trace_t * t,
146 vhost_user_intf_t * vui, u16 qid,
147 vlib_buffer_t * b, vhost_user_vring_t * rxvq)
148{
149 vhost_user_main_t *vum = &vhost_user_main;
150 u32 last_avail_idx = rxvq->last_avail_idx;
151 u32 desc_current = rxvq->avail->ring[last_avail_idx & rxvq->qsz_mask];
152 vring_desc_t *hdr_desc = 0;
153 u32 hint = 0;
154
155 memset (t, 0, sizeof (*t));
156 t->device_index = vui - vum->vhost_user_interfaces;
157 t->qid = qid;
158
159 hdr_desc = &rxvq->desc[desc_current];
160 if (rxvq->desc[desc_current].flags & VIRTQ_DESC_F_INDIRECT)
161 {
162 t->virtio_ring_flags |= 1 << VIRTIO_TRACE_F_INDIRECT;
163 /* Header is the first here */
164 hdr_desc = map_guest_mem (vui, rxvq->desc[desc_current].addr, &hint);
165 }
166 if (rxvq->desc[desc_current].flags & VIRTQ_DESC_F_NEXT)
167 {
168 t->virtio_ring_flags |= 1 << VIRTIO_TRACE_F_SIMPLE_CHAINED;
169 }
170 if (!(rxvq->desc[desc_current].flags & VIRTQ_DESC_F_NEXT) &&
171 !(rxvq->desc[desc_current].flags & VIRTQ_DESC_F_INDIRECT))
172 {
173 t->virtio_ring_flags |= 1 << VIRTIO_TRACE_F_SINGLE_DESC;
174 }
175
176 t->first_desc_len = hdr_desc ? hdr_desc->len : 0;
177}
178
179static_always_inline u32
180vhost_user_tx_copy (vhost_user_intf_t * vui, vhost_copy_t * cpy,
181 u16 copy_len, u32 * map_hint)
182{
183 void *dst0, *dst1, *dst2, *dst3;
184 if (PREDICT_TRUE (copy_len >= 4))
185 {
186 if (PREDICT_FALSE (!(dst2 = map_guest_mem (vui, cpy[0].dst, map_hint))))
187 return 1;
188 if (PREDICT_FALSE (!(dst3 = map_guest_mem (vui, cpy[1].dst, map_hint))))
189 return 1;
190 while (PREDICT_TRUE (copy_len >= 4))
191 {
192 dst0 = dst2;
193 dst1 = dst3;
194
195 if (PREDICT_FALSE
196 (!(dst2 = map_guest_mem (vui, cpy[2].dst, map_hint))))
197 return 1;
198 if (PREDICT_FALSE
199 (!(dst3 = map_guest_mem (vui, cpy[3].dst, map_hint))))
200 return 1;
201
202 CLIB_PREFETCH ((void *) cpy[2].src, 64, LOAD);
203 CLIB_PREFETCH ((void *) cpy[3].src, 64, LOAD);
204
205 clib_memcpy (dst0, (void *) cpy[0].src, cpy[0].len);
206 clib_memcpy (dst1, (void *) cpy[1].src, cpy[1].len);
207
208 vhost_user_log_dirty_pages_2 (vui, cpy[0].dst, cpy[0].len, 1);
209 vhost_user_log_dirty_pages_2 (vui, cpy[1].dst, cpy[1].len, 1);
210 copy_len -= 2;
211 cpy += 2;
212 }
213 }
214 while (copy_len)
215 {
216 if (PREDICT_FALSE (!(dst0 = map_guest_mem (vui, cpy->dst, map_hint))))
217 return 1;
218 clib_memcpy (dst0, (void *) cpy->src, cpy->len);
219 vhost_user_log_dirty_pages_2 (vui, cpy->dst, cpy->len, 1);
220 copy_len -= 1;
221 cpy += 1;
222 }
223 return 0;
224}
225
226
227uword
228CLIB_MULTIARCH_FN (vhost_user_tx) (vlib_main_t * vm,
229 vlib_node_runtime_t * node,
230 vlib_frame_t * frame)
231{
232 u32 *buffers = vlib_frame_args (frame);
233 u32 n_left = frame->n_vectors;
234 vhost_user_main_t *vum = &vhost_user_main;
235 vnet_interface_output_runtime_t *rd = (void *) node->runtime_data;
236 vhost_user_intf_t *vui =
237 pool_elt_at_index (vum->vhost_user_interfaces, rd->dev_instance);
238 u32 qid = ~0;
239 vhost_user_vring_t *rxvq;
240 u8 error;
241 u32 thread_index = vlib_get_thread_index ();
242 u32 map_hint = 0;
243 u8 retry = 8;
244 u16 copy_len;
245 u16 tx_headers_len;
246
247 if (PREDICT_FALSE (!vui->admin_up))
248 {
249 error = VHOST_USER_TX_FUNC_ERROR_DOWN;
250 goto done3;
251 }
252
253 if (PREDICT_FALSE (!vui->is_up))
254 {
255 error = VHOST_USER_TX_FUNC_ERROR_NOT_READY;
256 goto done3;
257 }
258
259 qid =
260 VHOST_VRING_IDX_RX (*vec_elt_at_index
261 (vui->per_cpu_tx_qid, thread_index));
262 rxvq = &vui->vrings[qid];
263 if (PREDICT_FALSE (vui->use_tx_spinlock))
264 vhost_user_vring_lock (vui, qid);
265
266retry:
267 error = VHOST_USER_TX_FUNC_ERROR_NONE;
268 tx_headers_len = 0;
269 copy_len = 0;
270 while (n_left > 0)
271 {
272 vlib_buffer_t *b0, *current_b0;
273 u16 desc_head, desc_index, desc_len;
274 vring_desc_t *desc_table;
275 uword buffer_map_addr;
276 u32 buffer_len;
277 u16 bytes_left;
278
279 if (PREDICT_TRUE (n_left > 1))
280 vlib_prefetch_buffer_with_index (vm, buffers[1], LOAD);
281
282 b0 = vlib_get_buffer (vm, buffers[0]);
283
284 if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
285 {
286 vum->cpus[thread_index].current_trace =
287 vlib_add_trace (vm, node, b0,
288 sizeof (*vum->cpus[thread_index].current_trace));
289 vhost_user_tx_trace (vum->cpus[thread_index].current_trace,
290 vui, qid / 2, b0, rxvq);
291 }
292
293 if (PREDICT_FALSE (rxvq->last_avail_idx == rxvq->avail->idx))
294 {
295 error = VHOST_USER_TX_FUNC_ERROR_PKT_DROP_NOBUF;
296 goto done;
297 }
298
299 desc_table = rxvq->desc;
300 desc_head = desc_index =
301 rxvq->avail->ring[rxvq->last_avail_idx & rxvq->qsz_mask];
302
303 /* Go deeper in case of indirect descriptor
304 * I don't know of any driver providing indirect for RX. */
305 if (PREDICT_FALSE (rxvq->desc[desc_head].flags & VIRTQ_DESC_F_INDIRECT))
306 {
307 if (PREDICT_FALSE
308 (rxvq->desc[desc_head].len < sizeof (vring_desc_t)))
309 {
310 error = VHOST_USER_TX_FUNC_ERROR_INDIRECT_OVERFLOW;
311 goto done;
312 }
313 if (PREDICT_FALSE
314 (!(desc_table =
315 map_guest_mem (vui, rxvq->desc[desc_index].addr,
316 &map_hint))))
317 {
318 error = VHOST_USER_TX_FUNC_ERROR_MMAP_FAIL;
319 goto done;
320 }
321 desc_index = 0;
322 }
323
324 desc_len = vui->virtio_net_hdr_sz;
325 buffer_map_addr = desc_table[desc_index].addr;
326 buffer_len = desc_table[desc_index].len;
327
328 {
329 // Get a header from the header array
330 virtio_net_hdr_mrg_rxbuf_t *hdr =
331 &vum->cpus[thread_index].tx_headers[tx_headers_len];
332 tx_headers_len++;
333 hdr->hdr.flags = 0;
334 hdr->hdr.gso_type = 0;
335 hdr->num_buffers = 1; //This is local, no need to check
336
337 // Prepare a copy order executed later for the header
338 vhost_copy_t *cpy = &vum->cpus[thread_index].copy[copy_len];
339 copy_len++;
340 cpy->len = vui->virtio_net_hdr_sz;
341 cpy->dst = buffer_map_addr;
342 cpy->src = (uword) hdr;
343 }
344
345 buffer_map_addr += vui->virtio_net_hdr_sz;
346 buffer_len -= vui->virtio_net_hdr_sz;
347 bytes_left = b0->current_length;
348 current_b0 = b0;
349 while (1)
350 {
351 if (buffer_len == 0)
352 { //Get new output
353 if (desc_table[desc_index].flags & VIRTQ_DESC_F_NEXT)
354 {
355 //Next one is chained
356 desc_index = desc_table[desc_index].next;
357 buffer_map_addr = desc_table[desc_index].addr;
358 buffer_len = desc_table[desc_index].len;
359 }
360 else if (vui->virtio_net_hdr_sz == 12) //MRG is available
361 {
362 virtio_net_hdr_mrg_rxbuf_t *hdr =
363 &vum->cpus[thread_index].tx_headers[tx_headers_len - 1];
364
365 //Move from available to used buffer
366 rxvq->used->ring[rxvq->last_used_idx & rxvq->qsz_mask].id =
367 desc_head;
368 rxvq->used->ring[rxvq->last_used_idx & rxvq->qsz_mask].len =
369 desc_len;
370 vhost_user_log_dirty_ring (vui, rxvq,
371 ring[rxvq->last_used_idx &
372 rxvq->qsz_mask]);
373
374 rxvq->last_avail_idx++;
375 rxvq->last_used_idx++;
376 hdr->num_buffers++;
377 desc_len = 0;
378
379 if (PREDICT_FALSE
380 (rxvq->last_avail_idx == rxvq->avail->idx))
381 {
382 //Dequeue queued descriptors for this packet
383 rxvq->last_used_idx -= hdr->num_buffers - 1;
384 rxvq->last_avail_idx -= hdr->num_buffers - 1;
385 error = VHOST_USER_TX_FUNC_ERROR_PKT_DROP_NOBUF;
386 goto done;
387 }
388
389 desc_table = rxvq->desc;
390 desc_head = desc_index =
391 rxvq->avail->ring[rxvq->last_avail_idx & rxvq->qsz_mask];
392 if (PREDICT_FALSE
393 (rxvq->desc[desc_head].flags & VIRTQ_DESC_F_INDIRECT))
394 {
395 //It is seriously unlikely that a driver will put indirect descriptor
396 //after non-indirect descriptor.
397 if (PREDICT_FALSE
398 (rxvq->desc[desc_head].len < sizeof (vring_desc_t)))
399 {
400 error = VHOST_USER_TX_FUNC_ERROR_INDIRECT_OVERFLOW;
401 goto done;
402 }
403 if (PREDICT_FALSE
404 (!(desc_table =
405 map_guest_mem (vui,
406 rxvq->desc[desc_index].addr,
407 &map_hint))))
408 {
409 error = VHOST_USER_TX_FUNC_ERROR_MMAP_FAIL;
410 goto done;
411 }
412 desc_index = 0;
413 }
414 buffer_map_addr = desc_table[desc_index].addr;
415 buffer_len = desc_table[desc_index].len;
416 }
417 else
418 {
419 error = VHOST_USER_TX_FUNC_ERROR_PKT_DROP_NOMRG;
420 goto done;
421 }
422 }
423
424 {
425 vhost_copy_t *cpy = &vum->cpus[thread_index].copy[copy_len];
426 copy_len++;
427 cpy->len = bytes_left;
428 cpy->len = (cpy->len > buffer_len) ? buffer_len : cpy->len;
429 cpy->dst = buffer_map_addr;
430 cpy->src = (uword) vlib_buffer_get_current (current_b0) +
431 current_b0->current_length - bytes_left;
432
433 bytes_left -= cpy->len;
434 buffer_len -= cpy->len;
435 buffer_map_addr += cpy->len;
436 desc_len += cpy->len;
437
438 CLIB_PREFETCH (&rxvq->desc, CLIB_CACHE_LINE_BYTES, LOAD);
439 }
440
441 // Check if vlib buffer has more data. If not, get more or break.
442 if (PREDICT_TRUE (!bytes_left))
443 {
444 if (PREDICT_FALSE
445 (current_b0->flags & VLIB_BUFFER_NEXT_PRESENT))
446 {
447 current_b0 = vlib_get_buffer (vm, current_b0->next_buffer);
448 bytes_left = current_b0->current_length;
449 }
450 else
451 {
452 //End of packet
453 break;
454 }
455 }
456 }
457
458 //Move from available to used ring
459 rxvq->used->ring[rxvq->last_used_idx & rxvq->qsz_mask].id = desc_head;
460 rxvq->used->ring[rxvq->last_used_idx & rxvq->qsz_mask].len = desc_len;
461 vhost_user_log_dirty_ring (vui, rxvq,
462 ring[rxvq->last_used_idx & rxvq->qsz_mask]);
463 rxvq->last_avail_idx++;
464 rxvq->last_used_idx++;
465
466 if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
467 {
468 vum->cpus[thread_index].current_trace->hdr =
469 vum->cpus[thread_index].tx_headers[tx_headers_len - 1];
470 }
471
472 n_left--; //At the end for error counting when 'goto done' is invoked
473
474 /*
475 * Do the copy periodically to prevent
476 * vum->cpus[thread_index].copy array overflow and corrupt memory
477 */
478 if (PREDICT_FALSE (copy_len >= VHOST_USER_TX_COPY_THRESHOLD))
479 {
480 if (PREDICT_FALSE
481 (vhost_user_tx_copy (vui, vum->cpus[thread_index].copy,
482 copy_len, &map_hint)))
483 {
484 vlib_error_count (vm, node->node_index,
485 VHOST_USER_TX_FUNC_ERROR_MMAP_FAIL, 1);
486 }
487 copy_len = 0;
488
489 /* give buffers back to driver */
490 CLIB_MEMORY_BARRIER ();
491 rxvq->used->idx = rxvq->last_used_idx;
492 vhost_user_log_dirty_ring (vui, rxvq, idx);
493 }
494 buffers++;
495 }
496
497done:
498 //Do the memory copies
499 if (PREDICT_FALSE
500 (vhost_user_tx_copy (vui, vum->cpus[thread_index].copy,
501 copy_len, &map_hint)))
502 {
503 vlib_error_count (vm, node->node_index,
504 VHOST_USER_TX_FUNC_ERROR_MMAP_FAIL, 1);
505 }
506
507 CLIB_MEMORY_BARRIER ();
508 rxvq->used->idx = rxvq->last_used_idx;
509 vhost_user_log_dirty_ring (vui, rxvq, idx);
510
511 /*
512 * When n_left is set, error is always set to something too.
513 * In case error is due to lack of remaining buffers, we go back up and
514 * retry.
515 * The idea is that it is better to waste some time on packets
516 * that have been processed already than dropping them and get
517 * more fresh packets with a good likelyhood that they will be dropped too.
518 * This technique also gives more time to VM driver to pick-up packets.
519 * In case the traffic flows from physical to virtual interfaces, this
520 * technique will end-up leveraging the physical NIC buffer in order to
521 * absorb the VM's CPU jitter.
522 */
523 if (n_left && (error == VHOST_USER_TX_FUNC_ERROR_PKT_DROP_NOBUF) && retry)
524 {
525 retry--;
526 goto retry;
527 }
528
529 /* interrupt (call) handling */
530 if ((rxvq->callfd_idx != ~0) &&
531 !(rxvq->avail->flags & VRING_AVAIL_F_NO_INTERRUPT))
532 {
533 rxvq->n_since_last_int += frame->n_vectors - n_left;
534
535 if (rxvq->n_since_last_int > vum->coalesce_frames)
536 vhost_user_send_call (vm, rxvq);
537 }
538
539 vhost_user_vring_unlock (vui, qid);
540
541done3:
542 if (PREDICT_FALSE (n_left && error != VHOST_USER_TX_FUNC_ERROR_NONE))
543 {
544 vlib_error_count (vm, node->node_index, error, n_left);
545 vlib_increment_simple_counter
546 (vnet_main.interface_main.sw_if_counters
547 + VNET_INTERFACE_COUNTER_DROP,
548 thread_index, vui->sw_if_index, n_left);
549 }
550
551 vlib_buffer_free (vm, vlib_frame_args (frame), frame->n_vectors);
552 return frame->n_vectors;
553}
554
555static __clib_unused clib_error_t *
556vhost_user_interface_rx_mode_change (vnet_main_t * vnm, u32 hw_if_index,
557 u32 qid, vnet_hw_interface_rx_mode mode)
558{
559 vlib_main_t *vm = vnm->vlib_main;
560 vnet_hw_interface_t *hif = vnet_get_hw_interface (vnm, hw_if_index);
561 vhost_user_main_t *vum = &vhost_user_main;
562 vhost_user_intf_t *vui =
563 pool_elt_at_index (vum->vhost_user_interfaces, hif->dev_instance);
564 vhost_user_vring_t *txvq = &vui->vrings[VHOST_VRING_IDX_TX (qid)];
565
566 if ((mode == VNET_HW_INTERFACE_RX_MODE_INTERRUPT) ||
567 (mode == VNET_HW_INTERFACE_RX_MODE_ADAPTIVE))
568 {
569 if (txvq->kickfd_idx == ~0)
570 {
571 // We cannot support interrupt mode if the driver opts out
572 return clib_error_return (0, "Driver does not support interrupt");
573 }
574 if (txvq->mode == VNET_HW_INTERFACE_RX_MODE_POLLING)
575 {
576 vum->ifq_count++;
577 // Start the timer if this is the first encounter on interrupt
578 // interface/queue
579 if ((vum->ifq_count == 1) &&
580 (vum->coalesce_time > 0.0) && (vum->coalesce_frames > 0))
581 vlib_process_signal_event (vm,
582 vhost_user_send_interrupt_node.index,
583 VHOST_USER_EVENT_START_TIMER, 0);
584 }
585 }
586 else if (mode == VNET_HW_INTERFACE_RX_MODE_POLLING)
587 {
588 if (((txvq->mode == VNET_HW_INTERFACE_RX_MODE_INTERRUPT) ||
589 (txvq->mode == VNET_HW_INTERFACE_RX_MODE_ADAPTIVE)) &&
590 vum->ifq_count)
591 {
592 vum->ifq_count--;
593 // Stop the timer if there is no more interrupt interface/queue
594 if ((vum->ifq_count == 0) &&
595 (vum->coalesce_time > 0.0) && (vum->coalesce_frames > 0))
596 vlib_process_signal_event (vm,
597 vhost_user_send_interrupt_node.index,
598 VHOST_USER_EVENT_STOP_TIMER, 0);
599 }
600 }
601
602 txvq->mode = mode;
603 if (mode == VNET_HW_INTERFACE_RX_MODE_POLLING)
604 txvq->used->flags = VRING_USED_F_NO_NOTIFY;
605 else if ((mode == VNET_HW_INTERFACE_RX_MODE_ADAPTIVE) ||
606 (mode == VNET_HW_INTERFACE_RX_MODE_INTERRUPT))
607 txvq->used->flags = 0;
608 else
609 {
610 clib_warning ("BUG: unhandled mode %d changed for if %d queue %d", mode,
611 hw_if_index, qid);
612 return clib_error_return (0, "unsupported");
613 }
614
615 return 0;
616}
617
618static __clib_unused clib_error_t *
619vhost_user_interface_admin_up_down (vnet_main_t * vnm, u32 hw_if_index,
620 u32 flags)
621{
622 vnet_hw_interface_t *hif = vnet_get_hw_interface (vnm, hw_if_index);
623 vhost_user_main_t *vum = &vhost_user_main;
624 vhost_user_intf_t *vui =
625 pool_elt_at_index (vum->vhost_user_interfaces, hif->dev_instance);
626 u32 hw_flags = 0;
627 vui->admin_up = (flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP) != 0;
628 hw_flags = vui->admin_up ? VNET_HW_INTERFACE_FLAG_LINK_UP : 0;
629
630 vnet_hw_interface_set_flags (vnm, vui->hw_if_index, hw_flags);
631
632 return /* no error */ 0;
633}
634
635#ifndef CLIB_MARCH_VARIANT
636/* *INDENT-OFF* */
637VNET_DEVICE_CLASS (vhost_user_device_class) = {
638 .name = "vhost-user",
639 .tx_function = vhost_user_tx,
640 .tx_function_n_errors = VHOST_USER_TX_FUNC_N_ERROR,
641 .tx_function_error_strings = vhost_user_tx_func_error_strings,
642 .format_device_name = format_vhost_user_interface_name,
643 .name_renumber = vhost_user_name_renumber,
644 .admin_up_down_function = vhost_user_interface_admin_up_down,
645 .rx_mode_change_function = vhost_user_interface_rx_mode_change,
646 .format_tx_trace = format_vhost_trace,
647};
648
649#if __x86_64__
650vlib_node_function_t __clib_weak vhost_user_tx_avx512;
651vlib_node_function_t __clib_weak vhost_user_tx_avx2;
652static void __clib_constructor
653vhost_user_tx_multiarch_select (void)
654{
655 if (vhost_user_tx_avx512 && clib_cpu_supports_avx512f ())
656 vhost_user_device_class.tx_function = vhost_user_tx_avx512;
657 else if (vhost_user_tx_avx2 && clib_cpu_supports_avx2 ())
658 vhost_user_device_class.tx_function = vhost_user_tx_avx2;
659}
660#endif
661#endif
662
663/* *INDENT-ON* */
664
665/*
666 * fd.io coding-style-patch-verification: ON
667 *
668 * Local Variables:
669 * eval: (c-set-style "gnu")
670 * End:
671 */