blob: 1636bd8c4a997125798efa3acd055810e5d56279 [file] [log] [blame]
Mohsin Kazmie7cde312018-06-26 17:20:11 +02001/*
2 *------------------------------------------------------------------
3 * vhost-user-input
4 *
5 * Copyright (c) 2014-2018 Cisco and/or its affiliates.
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at:
9 *
10 * http://www.apache.org/licenses/LICENSE-2.0
11 *
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 *------------------------------------------------------------------
18 */
19
20#include <fcntl.h> /* for open */
21#include <sys/ioctl.h>
22#include <sys/socket.h>
23#include <sys/un.h>
24#include <sys/stat.h>
25#include <sys/types.h>
26#include <sys/uio.h> /* for iovec */
27#include <netinet/in.h>
28#include <sys/vfs.h>
29
30#include <linux/if_arp.h>
31#include <linux/if_tun.h>
32
33#include <vlib/vlib.h>
34#include <vlib/unix/unix.h>
35
Mohsin Kazmie7cde312018-06-26 17:20:11 +020036#include <vnet/ethernet/ethernet.h>
37#include <vnet/devices/devices.h>
38#include <vnet/feature/feature.h>
Neale Rannse4031132020-10-26 13:00:06 +000039#include <vnet/udp/udp_packet.h>
Damjan Marion94100532020-11-06 23:25:57 +010040#include <vnet/interface/rx_queue_funcs.h>
Mohsin Kazmie7cde312018-06-26 17:20:11 +020041
42#include <vnet/devices/virtio/vhost_user.h>
43#include <vnet/devices/virtio/vhost_user_inline.h>
44
45/*
46 * When an RX queue is down but active, received packets
47 * must be discarded. This value controls up to how many
48 * packets will be discarded during each round.
49 */
50#define VHOST_USER_DOWN_DISCARD_COUNT 256
51
52/*
53 * When the number of available buffers gets under this threshold,
54 * RX node will start discarding packets.
55 */
56#define VHOST_USER_RX_BUFFER_STARVATION 32
57
58/*
59 * On the receive side, the host should free descriptors as soon
60 * as possible in order to avoid TX drop in the VM.
61 * This value controls the number of copy operations that are stacked
62 * before copy is done for all and descriptors are given back to
63 * the guest.
64 * The value 64 was obtained by testing (48 and 128 were not as good).
65 */
66#define VHOST_USER_RX_COPY_THRESHOLD 64
67
Benoît Ganne47727c02019-02-12 13:35:08 +010068extern vlib_node_registration_t vhost_user_input_node;
Mohsin Kazmie7cde312018-06-26 17:20:11 +020069
70#define foreach_vhost_user_input_func_error \
71 _(NO_ERROR, "no error") \
72 _(NO_BUFFER, "no available buffer") \
73 _(MMAP_FAIL, "mmap failure") \
74 _(INDIRECT_OVERFLOW, "indirect descriptor overflows table") \
75 _(UNDERSIZED_FRAME, "undersized ethernet frame received (< 14 bytes)") \
Steven Luongbc0d9ff2020-03-23 09:34:59 -070076 _(NOT_READY, "vhost interface not ready or down") \
Mohsin Kazmie7cde312018-06-26 17:20:11 +020077 _(FULL_RX_QUEUE, "full rx queue (possible driver tx drop)")
78
79typedef enum
80{
81#define _(f,s) VHOST_USER_INPUT_FUNC_ERROR_##f,
82 foreach_vhost_user_input_func_error
83#undef _
84 VHOST_USER_INPUT_FUNC_N_ERROR,
85} vhost_user_input_func_error_t;
86
87static __clib_unused char *vhost_user_input_func_error_strings[] = {
88#define _(n,s) s,
89 foreach_vhost_user_input_func_error
90#undef _
91};
92
93static_always_inline void
94vhost_user_rx_trace (vhost_trace_t * t,
95 vhost_user_intf_t * vui, u16 qid,
Damjan Marionba1afaa2018-11-22 22:16:19 +010096 vlib_buffer_t * b, vhost_user_vring_t * txvq,
97 u16 last_avail_idx)
Mohsin Kazmie7cde312018-06-26 17:20:11 +020098{
99 vhost_user_main_t *vum = &vhost_user_main;
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200100 u32 desc_current = txvq->avail->ring[last_avail_idx & txvq->qsz_mask];
101 vring_desc_t *hdr_desc = 0;
102 virtio_net_hdr_mrg_rxbuf_t *hdr;
103 u32 hint = 0;
104
Dave Barachb7b92992018-10-17 10:38:51 -0400105 clib_memset (t, 0, sizeof (*t));
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200106 t->device_index = vui - vum->vhost_user_interfaces;
107 t->qid = qid;
108
109 hdr_desc = &txvq->desc[desc_current];
Mohsin Kazmia7a22812020-08-31 17:17:16 +0200110 if (txvq->desc[desc_current].flags & VRING_DESC_F_INDIRECT)
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200111 {
112 t->virtio_ring_flags |= 1 << VIRTIO_TRACE_F_INDIRECT;
113 /* Header is the first here */
114 hdr_desc = map_guest_mem (vui, txvq->desc[desc_current].addr, &hint);
115 }
Mohsin Kazmia7a22812020-08-31 17:17:16 +0200116 if (txvq->desc[desc_current].flags & VRING_DESC_F_NEXT)
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200117 {
118 t->virtio_ring_flags |= 1 << VIRTIO_TRACE_F_SIMPLE_CHAINED;
119 }
Mohsin Kazmia7a22812020-08-31 17:17:16 +0200120 if (!(txvq->desc[desc_current].flags & VRING_DESC_F_NEXT) &&
121 !(txvq->desc[desc_current].flags & VRING_DESC_F_INDIRECT))
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200122 {
123 t->virtio_ring_flags |= 1 << VIRTIO_TRACE_F_SINGLE_DESC;
124 }
125
126 t->first_desc_len = hdr_desc ? hdr_desc->len : 0;
127
128 if (!hdr_desc || !(hdr = map_guest_mem (vui, hdr_desc->addr, &hint)))
129 {
130 t->virtio_ring_flags |= 1 << VIRTIO_TRACE_F_MAP_ERROR;
131 }
132 else
133 {
134 u32 len = vui->virtio_net_hdr_sz;
135 memcpy (&t->hdr, hdr, len > hdr_desc->len ? hdr_desc->len : len);
136 }
137}
138
139static_always_inline u32
140vhost_user_input_copy (vhost_user_intf_t * vui, vhost_copy_t * cpy,
141 u16 copy_len, u32 * map_hint)
142{
143 void *src0, *src1, *src2, *src3;
144 if (PREDICT_TRUE (copy_len >= 4))
145 {
146 if (PREDICT_FALSE (!(src2 = map_guest_mem (vui, cpy[0].src, map_hint))))
147 return 1;
148 if (PREDICT_FALSE (!(src3 = map_guest_mem (vui, cpy[1].src, map_hint))))
149 return 1;
150
151 while (PREDICT_TRUE (copy_len >= 4))
152 {
153 src0 = src2;
154 src1 = src3;
155
156 if (PREDICT_FALSE
157 (!(src2 = map_guest_mem (vui, cpy[2].src, map_hint))))
158 return 1;
159 if (PREDICT_FALSE
160 (!(src3 = map_guest_mem (vui, cpy[3].src, map_hint))))
161 return 1;
162
163 CLIB_PREFETCH (src2, 64, LOAD);
164 CLIB_PREFETCH (src3, 64, LOAD);
165
Dave Barach178cf492018-11-13 16:34:13 -0500166 clib_memcpy_fast ((void *) cpy[0].dst, src0, cpy[0].len);
167 clib_memcpy_fast ((void *) cpy[1].dst, src1, cpy[1].len);
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200168 copy_len -= 2;
169 cpy += 2;
170 }
171 }
172 while (copy_len)
173 {
174 if (PREDICT_FALSE (!(src0 = map_guest_mem (vui, cpy->src, map_hint))))
175 return 1;
Dave Barach178cf492018-11-13 16:34:13 -0500176 clib_memcpy_fast ((void *) cpy->dst, src0, cpy->len);
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200177 copy_len -= 1;
178 cpy += 1;
179 }
180 return 0;
181}
182
183/**
184 * Try to discard packets from the tx ring (VPP RX path).
185 * Returns the number of discarded packets.
186 */
187static_always_inline u32
188vhost_user_rx_discard_packet (vlib_main_t * vm,
189 vhost_user_intf_t * vui,
190 vhost_user_vring_t * txvq, u32 discard_max)
191{
192 /*
193 * On the RX side, each packet corresponds to one descriptor
194 * (it is the same whether it is a shallow descriptor, chained, or indirect).
195 * Therefore, discarding a packet is like discarding a descriptor.
196 */
197 u32 discarded_packets = 0;
198 u32 avail_idx = txvq->avail->idx;
Damjan Marionba1afaa2018-11-22 22:16:19 +0100199 u16 mask = txvq->qsz_mask;
200 u16 last_avail_idx = txvq->last_avail_idx;
201 u16 last_used_idx = txvq->last_used_idx;
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200202 while (discarded_packets != discard_max)
203 {
Steven Luong7e5735d2019-03-12 21:35:42 -0700204 if (avail_idx == last_avail_idx)
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200205 goto out;
206
Damjan Marionba1afaa2018-11-22 22:16:19 +0100207 u16 desc_chain_head = txvq->avail->ring[last_avail_idx & mask];
208 last_avail_idx++;
209 txvq->used->ring[last_used_idx & mask].id = desc_chain_head;
210 txvq->used->ring[last_used_idx & mask].len = 0;
211 vhost_user_log_dirty_ring (vui, txvq, ring[last_used_idx & mask]);
212 last_used_idx++;
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200213 discarded_packets++;
214 }
215
216out:
Damjan Marionba1afaa2018-11-22 22:16:19 +0100217 txvq->last_avail_idx = last_avail_idx;
218 txvq->last_used_idx = last_used_idx;
Damjan Marion96e8cd02018-11-23 14:56:55 +0100219 CLIB_MEMORY_STORE_BARRIER ();
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200220 txvq->used->idx = txvq->last_used_idx;
221 vhost_user_log_dirty_ring (vui, txvq, idx);
222 return discarded_packets;
223}
224
225/*
226 * In case of overflow, we need to rewind the array of allocated buffers.
227 */
Damjan Marion46bf8662018-11-22 22:25:38 +0100228static_always_inline void
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200229vhost_user_input_rewind_buffers (vlib_main_t * vm,
230 vhost_cpu_t * cpu, vlib_buffer_t * b_head)
231{
232 u32 bi_current = cpu->rx_buffers[cpu->rx_buffers_len];
233 vlib_buffer_t *b_current = vlib_get_buffer (vm, bi_current);
234 b_current->current_length = 0;
235 b_current->flags = 0;
236 while (b_current != b_head)
237 {
238 cpu->rx_buffers_len++;
239 bi_current = cpu->rx_buffers[cpu->rx_buffers_len];
240 b_current = vlib_get_buffer (vm, bi_current);
241 b_current->current_length = 0;
242 b_current->flags = 0;
243 }
244 cpu->rx_buffers_len++;
245}
246
Steven Luong4208a4c2019-05-06 08:51:56 -0700247static_always_inline void
248vhost_user_handle_rx_offload (vlib_buffer_t * b0, u8 * b0_data,
249 virtio_net_hdr_t * hdr)
250{
251 u8 l4_hdr_sz = 0;
Steven Luongbc0d9ff2020-03-23 09:34:59 -0700252 u8 l4_proto = 0;
253 ethernet_header_t *eh = (ethernet_header_t *) b0_data;
254 u16 ethertype = clib_net_to_host_u16 (eh->type);
255 u16 l2hdr_sz = sizeof (ethernet_header_t);
Mohsin Kazmi68095382021-02-10 11:26:24 +0100256 u32 oflags = 0;
Steven Luong4208a4c2019-05-06 08:51:56 -0700257
Steven Luongbc0d9ff2020-03-23 09:34:59 -0700258 if (ethernet_frame_is_tagged (ethertype))
Steven Luong4208a4c2019-05-06 08:51:56 -0700259 {
Steven Luongbc0d9ff2020-03-23 09:34:59 -0700260 ethernet_vlan_header_t *vlan = (ethernet_vlan_header_t *) (eh + 1);
Steven Luong4208a4c2019-05-06 08:51:56 -0700261
Steven Luongbc0d9ff2020-03-23 09:34:59 -0700262 ethertype = clib_net_to_host_u16 (vlan->type);
263 l2hdr_sz += sizeof (*vlan);
264 if (ethertype == ETHERNET_TYPE_VLAN)
Steven Luong4208a4c2019-05-06 08:51:56 -0700265 {
Steven Luongbc0d9ff2020-03-23 09:34:59 -0700266 vlan++;
Steven Luong4208a4c2019-05-06 08:51:56 -0700267 ethertype = clib_net_to_host_u16 (vlan->type);
268 l2hdr_sz += sizeof (*vlan);
Steven Luong4208a4c2019-05-06 08:51:56 -0700269 }
Steven Luongbc0d9ff2020-03-23 09:34:59 -0700270 }
271 vnet_buffer (b0)->l2_hdr_offset = 0;
272 vnet_buffer (b0)->l3_hdr_offset = l2hdr_sz;
273 vnet_buffer (b0)->l4_hdr_offset = hdr->csum_start;
274 b0->flags |= (VNET_BUFFER_F_L2_HDR_OFFSET_VALID |
275 VNET_BUFFER_F_L3_HDR_OFFSET_VALID |
276 VNET_BUFFER_F_L4_HDR_OFFSET_VALID);
Steven Luong4208a4c2019-05-06 08:51:56 -0700277
Steven Luongbc0d9ff2020-03-23 09:34:59 -0700278 if (PREDICT_TRUE (ethertype == ETHERNET_TYPE_IP4))
279 {
280 ip4_header_t *ip4 = (ip4_header_t *) (b0_data + l2hdr_sz);
281 l4_proto = ip4->protocol;
Mohsin Kazmi68095382021-02-10 11:26:24 +0100282 b0->flags |= VNET_BUFFER_F_IS_IP4;
283 oflags |= VNET_BUFFER_OFFLOAD_F_IP_CKSUM;
Steven Luongbc0d9ff2020-03-23 09:34:59 -0700284 }
285 else if (PREDICT_TRUE (ethertype == ETHERNET_TYPE_IP6))
286 {
287 ip6_header_t *ip6 = (ip6_header_t *) (b0_data + l2hdr_sz);
288 l4_proto = ip6->protocol;
289 b0->flags |= VNET_BUFFER_F_IS_IP6;
290 }
Steven Luong4208a4c2019-05-06 08:51:56 -0700291
Steven Luongbc0d9ff2020-03-23 09:34:59 -0700292 if (l4_proto == IP_PROTOCOL_TCP)
293 {
294 tcp_header_t *tcp = (tcp_header_t *)
295 (b0_data + vnet_buffer (b0)->l4_hdr_offset);
296 l4_hdr_sz = tcp_header_bytes (tcp);
Mohsin Kazmi68095382021-02-10 11:26:24 +0100297 oflags |= VNET_BUFFER_OFFLOAD_F_TCP_CKSUM;
Steven Luongbc0d9ff2020-03-23 09:34:59 -0700298 }
299 else if (l4_proto == IP_PROTOCOL_UDP)
300 {
Steven Luongac0f5362020-10-12 10:43:28 -0700301 l4_hdr_sz = sizeof (udp_header_t);
Mohsin Kazmi68095382021-02-10 11:26:24 +0100302 oflags |= VNET_BUFFER_OFFLOAD_F_UDP_CKSUM;
Steven Luong4208a4c2019-05-06 08:51:56 -0700303 }
304
305 if (hdr->gso_type == VIRTIO_NET_HDR_GSO_UDP)
306 {
307 vnet_buffer2 (b0)->gso_size = hdr->gso_size;
308 vnet_buffer2 (b0)->gso_l4_hdr_sz = l4_hdr_sz;
309 b0->flags |= VNET_BUFFER_F_GSO;
310 }
311 else if (hdr->gso_type == VIRTIO_NET_HDR_GSO_TCPV4)
312 {
313 vnet_buffer2 (b0)->gso_size = hdr->gso_size;
314 vnet_buffer2 (b0)->gso_l4_hdr_sz = l4_hdr_sz;
315 b0->flags |= (VNET_BUFFER_F_GSO | VNET_BUFFER_F_IS_IP4);
316 }
317 else if (hdr->gso_type == VIRTIO_NET_HDR_GSO_TCPV6)
318 {
319 vnet_buffer2 (b0)->gso_size = hdr->gso_size;
320 vnet_buffer2 (b0)->gso_l4_hdr_sz = l4_hdr_sz;
321 b0->flags |= (VNET_BUFFER_F_GSO | VNET_BUFFER_F_IS_IP6);
322 }
Mohsin Kazmi68095382021-02-10 11:26:24 +0100323
324 if (oflags)
325 vnet_buffer_offload_flags_set (b0, oflags);
Steven Luong4208a4c2019-05-06 08:51:56 -0700326}
327
Steven Luongbc0d9ff2020-03-23 09:34:59 -0700328static_always_inline void
Steven Luong27ba5002020-11-17 13:30:44 -0800329vhost_user_input_do_interrupt (vlib_main_t * vm, vhost_user_intf_t * vui,
330 vhost_user_vring_t * txvq,
Steven Luongbc0d9ff2020-03-23 09:34:59 -0700331 vhost_user_vring_t * rxvq)
332{
333 f64 now = vlib_time_now (vm);
334
335 if ((txvq->n_since_last_int) && (txvq->int_deadline < now))
Steven Luong27ba5002020-11-17 13:30:44 -0800336 vhost_user_send_call (vm, vui, txvq);
Steven Luongbc0d9ff2020-03-23 09:34:59 -0700337
338 if ((rxvq->n_since_last_int) && (rxvq->int_deadline < now))
Steven Luong27ba5002020-11-17 13:30:44 -0800339 vhost_user_send_call (vm, vui, rxvq);
Steven Luongbc0d9ff2020-03-23 09:34:59 -0700340}
341
342static_always_inline void
343vhost_user_input_setup_frame (vlib_main_t * vm, vlib_node_runtime_t * node,
344 vhost_user_intf_t * vui,
345 u32 * current_config_index, u32 * next_index,
346 u32 ** to_next, u32 * n_left_to_next)
347{
348 vnet_feature_main_t *fm = &feature_main;
349 u8 feature_arc_idx = fm->device_input_feature_arc_index;
350
351 if (PREDICT_FALSE (vnet_have_features (feature_arc_idx, vui->sw_if_index)))
352 {
353 vnet_feature_config_main_t *cm;
354 cm = &fm->feature_config_mains[feature_arc_idx];
355 *current_config_index = vec_elt (cm->config_index_by_sw_if_index,
356 vui->sw_if_index);
357 vnet_get_config_data (&cm->config_main, current_config_index,
358 next_index, 0);
359 }
360
361 vlib_get_new_next_frame (vm, node, *next_index, *to_next, *n_left_to_next);
362
363 if (*next_index == VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT)
364 {
365 /* give some hints to ethernet-input */
366 vlib_next_frame_t *nf;
367 vlib_frame_t *f;
368 ethernet_input_frame_t *ef;
369 nf = vlib_node_runtime_get_next_frame (vm, node, *next_index);
370 f = vlib_get_frame (vm, nf->frame);
371 f->flags = ETH_INPUT_FRAME_F_SINGLE_SW_IF_IDX;
372
373 ef = vlib_frame_scalar_args (f);
374 ef->sw_if_index = vui->sw_if_index;
375 ef->hw_if_index = vui->hw_if_index;
376 vlib_frame_no_append (f);
377 }
378}
379
Damjan Marion46bf8662018-11-22 22:25:38 +0100380static_always_inline u32
Damjan Marion94100532020-11-06 23:25:57 +0100381vhost_user_if_input (vlib_main_t *vm, vhost_user_main_t *vum,
382 vhost_user_intf_t *vui, u16 qid,
383 vlib_node_runtime_t *node, u8 enable_csum)
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200384{
385 vhost_user_vring_t *txvq = &vui->vrings[VHOST_VRING_IDX_TX (qid)];
Damjan Marion9af45042018-11-21 09:51:42 +0100386 vnet_feature_main_t *fm = &feature_main;
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200387 u16 n_rx_packets = 0;
388 u32 n_rx_bytes = 0;
389 u16 n_left;
390 u32 n_left_to_next, *to_next;
391 u32 next_index = VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT;
392 u32 n_trace = vlib_get_trace_count (vm, node);
Damjan Marion8934a042019-02-09 23:29:26 +0100393 u32 buffer_data_size = vlib_buffer_get_default_data_size (vm);
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200394 u32 map_hint = 0;
Damjan Marion7e0b17d2018-11-20 21:07:03 +0100395 vhost_cpu_t *cpu = &vum->cpus[vm->thread_index];
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200396 u16 copy_len = 0;
Damjan Marion9af45042018-11-21 09:51:42 +0100397 u8 feature_arc_idx = fm->device_input_feature_arc_index;
398 u32 current_config_index = ~(u32) 0;
Damjan Marionba1afaa2018-11-22 22:16:19 +0100399 u16 mask = txvq->qsz_mask;
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200400
Yichen Wang28812a02018-08-28 23:05:27 -0700401 /* The descriptor table is not ready yet */
402 if (PREDICT_FALSE (txvq->avail == 0))
Damjan Marionba1afaa2018-11-22 22:16:19 +0100403 goto done;
Yichen Wang28812a02018-08-28 23:05:27 -0700404
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200405 {
406 /* do we have pending interrupts ? */
407 vhost_user_vring_t *rxvq = &vui->vrings[VHOST_VRING_IDX_RX (qid)];
Steven Luong27ba5002020-11-17 13:30:44 -0800408 vhost_user_input_do_interrupt (vm, vui, txvq, rxvq);
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200409 }
410
411 /*
412 * For adaptive mode, it is optimized to reduce interrupts.
413 * If the scheduler switches the input node to polling due
414 * to burst of traffic, we tell the driver no interrupt.
415 * When the traffic subsides, the scheduler switches the node back to
416 * interrupt mode. We must tell the driver we want interrupt.
417 */
Damjan Marion94100532020-11-06 23:25:57 +0100418 if (PREDICT_FALSE (txvq->mode == VNET_HW_IF_RX_MODE_ADAPTIVE))
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200419 {
420 if ((node->flags &
421 VLIB_NODE_FLAG_SWITCH_FROM_POLLING_TO_INTERRUPT_MODE) ||
422 !(node->flags &
423 VLIB_NODE_FLAG_SWITCH_FROM_INTERRUPT_TO_POLLING_MODE))
424 /* Tell driver we want notification */
425 txvq->used->flags = 0;
426 else
427 /* Tell driver we don't want notification */
428 txvq->used->flags = VRING_USED_F_NO_NOTIFY;
429 }
430
431 if (PREDICT_FALSE (txvq->avail->flags & 0xFFFE))
Damjan Marionba1afaa2018-11-22 22:16:19 +0100432 goto done;
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200433
434 n_left = (u16) (txvq->avail->idx - txvq->last_avail_idx);
435
436 /* nothing to do */
437 if (PREDICT_FALSE (n_left == 0))
Damjan Marionba1afaa2018-11-22 22:16:19 +0100438 goto done;
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200439
440 if (PREDICT_FALSE (!vui->admin_up || !(txvq->enabled)))
441 {
442 /*
443 * Discard input packet if interface is admin down or vring is not
444 * enabled.
445 * "For example, for a networking device, in the disabled state
446 * client must not supply any new RX packets, but must process
447 * and discard any TX packets."
448 */
449 vhost_user_rx_discard_packet (vm, vui, txvq,
450 VHOST_USER_DOWN_DISCARD_COUNT);
Damjan Marionba1afaa2018-11-22 22:16:19 +0100451 goto done;
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200452 }
453
Damjan Marionba1afaa2018-11-22 22:16:19 +0100454 if (PREDICT_FALSE (n_left == (mask + 1)))
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200455 {
456 /*
457 * Informational error logging when VPP is not
458 * receiving packets fast enough.
459 */
460 vlib_error_count (vm, node->node_index,
461 VHOST_USER_INPUT_FUNC_ERROR_FULL_RX_QUEUE, 1);
462 }
463
464 if (n_left > VLIB_FRAME_SIZE)
465 n_left = VLIB_FRAME_SIZE;
466
467 /*
468 * For small packets (<2kB), we will not need more than one vlib buffer
Paul Vinciguerra97c998c2019-10-29 16:11:09 -0400469 * per packet. In case packets are bigger, we will just yield at some point
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200470 * in the loop and come back later. This is not an issue as for big packet,
471 * processing cost really comes from the memory copy.
472 * The assumption is that big packets will fit in 40 buffers.
473 */
Damjan Marion7e0b17d2018-11-20 21:07:03 +0100474 if (PREDICT_FALSE (cpu->rx_buffers_len < n_left + 1 ||
475 cpu->rx_buffers_len < 40))
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200476 {
Damjan Marion7e0b17d2018-11-20 21:07:03 +0100477 u32 curr_len = cpu->rx_buffers_len;
478 cpu->rx_buffers_len +=
Damjan Marion671e60e2018-12-30 18:09:59 +0100479 vlib_buffer_alloc (vm, cpu->rx_buffers + curr_len,
480 VHOST_USER_RX_BUFFERS_N - curr_len);
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200481
482 if (PREDICT_FALSE
Damjan Marion7e0b17d2018-11-20 21:07:03 +0100483 (cpu->rx_buffers_len < VHOST_USER_RX_BUFFER_STARVATION))
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200484 {
485 /* In case of buffer starvation, discard some packets from the queue
486 * and log the event.
487 * We keep doing best effort for the remaining packets. */
Damjan Marion7e0b17d2018-11-20 21:07:03 +0100488 u32 flush = (n_left + 1 > cpu->rx_buffers_len) ?
489 n_left + 1 - cpu->rx_buffers_len : 1;
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200490 flush = vhost_user_rx_discard_packet (vm, vui, txvq, flush);
491
492 n_left -= flush;
493 vlib_increment_simple_counter (vnet_main.
494 interface_main.sw_if_counters +
495 VNET_INTERFACE_COUNTER_DROP,
Damjan Marion7e0b17d2018-11-20 21:07:03 +0100496 vm->thread_index, vui->sw_if_index,
497 flush);
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200498
499 vlib_error_count (vm, vhost_user_input_node.index,
500 VHOST_USER_INPUT_FUNC_ERROR_NO_BUFFER, flush);
501 }
502 }
503
Steven Luongbc0d9ff2020-03-23 09:34:59 -0700504 vhost_user_input_setup_frame (vm, node, vui, &current_config_index,
505 &next_index, &to_next, &n_left_to_next);
Damjan Marion9af45042018-11-21 09:51:42 +0100506
Damjan Marionba1afaa2018-11-22 22:16:19 +0100507 u16 last_avail_idx = txvq->last_avail_idx;
508 u16 last_used_idx = txvq->last_used_idx;
509
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200510 while (n_left > 0)
511 {
Damjan Marion92825382018-11-21 10:03:44 +0100512 vlib_buffer_t *b_head, *b_current;
513 u32 bi_current;
514 u16 desc_current;
515 u32 desc_data_offset;
516 vring_desc_t *desc_table = txvq->desc;
Damjan Marion6a8bfd42018-11-21 09:54:41 +0100517
Damjan Marion92825382018-11-21 10:03:44 +0100518 if (PREDICT_FALSE (cpu->rx_buffers_len <= 1))
Damjan Marion6a8bfd42018-11-21 09:54:41 +0100519 {
Damjan Marion92825382018-11-21 10:03:44 +0100520 /* Not enough rx_buffers
521 * Note: We yeld on 1 so we don't need to do an additional
522 * check for the next buffer prefetch.
523 */
524 n_left = 0;
525 break;
Damjan Marion6a8bfd42018-11-21 09:54:41 +0100526 }
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200527
Damjan Marionba1afaa2018-11-22 22:16:19 +0100528 desc_current = txvq->avail->ring[last_avail_idx & mask];
Damjan Marion92825382018-11-21 10:03:44 +0100529 cpu->rx_buffers_len--;
530 bi_current = cpu->rx_buffers[cpu->rx_buffers_len];
531 b_head = b_current = vlib_get_buffer (vm, bi_current);
532 to_next[0] = bi_current; //We do that now so we can forget about bi_current
533 to_next++;
534 n_left_to_next--;
535
536 vlib_prefetch_buffer_with_index
537 (vm, cpu->rx_buffers[cpu->rx_buffers_len - 1], LOAD);
538
539 /* Just preset the used descriptor id and length for later */
Damjan Marionba1afaa2018-11-22 22:16:19 +0100540 txvq->used->ring[last_used_idx & mask].id = desc_current;
541 txvq->used->ring[last_used_idx & mask].len = 0;
542 vhost_user_log_dirty_ring (vui, txvq, ring[last_used_idx & mask]);
Damjan Marion92825382018-11-21 10:03:44 +0100543
544 /* The buffer should already be initialized */
545 b_head->total_length_not_including_first_buffer = 0;
546 b_head->flags |= VLIB_BUFFER_TOTAL_LENGTH_VALID;
547
Benoît Ganne9a3973e2020-10-02 19:36:57 +0200548 if (PREDICT_FALSE
549 (n_trace > 0 && vlib_trace_buffer (vm, node, next_index, b_head,
550 /* follow_chain */ 0)))
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200551 {
Damjan Marion92825382018-11-21 10:03:44 +0100552 vhost_trace_t *t0 =
553 vlib_add_trace (vm, node, b_head, sizeof (t0[0]));
Damjan Marionba1afaa2018-11-22 22:16:19 +0100554 vhost_user_rx_trace (t0, vui, qid, b_head, txvq, last_avail_idx);
Damjan Marion92825382018-11-21 10:03:44 +0100555 n_trace--;
556 vlib_set_trace_count (vm, node, n_trace);
557 }
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200558
Damjan Marion92825382018-11-21 10:03:44 +0100559 /* This depends on the setup but is very consistent
560 * So I think the CPU branch predictor will make a pretty good job
561 * at optimizing the decision. */
Mohsin Kazmia7a22812020-08-31 17:17:16 +0200562 if (txvq->desc[desc_current].flags & VRING_DESC_F_INDIRECT)
Damjan Marion92825382018-11-21 10:03:44 +0100563 {
564 desc_table = map_guest_mem (vui, txvq->desc[desc_current].addr,
565 &map_hint);
566 desc_current = 0;
567 if (PREDICT_FALSE (desc_table == 0))
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200568 {
Damjan Marion92825382018-11-21 10:03:44 +0100569 vlib_error_count (vm, node->node_index,
570 VHOST_USER_INPUT_FUNC_ERROR_MMAP_FAIL, 1);
571 goto out;
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200572 }
Damjan Marion92825382018-11-21 10:03:44 +0100573 }
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200574
Benoît Ganne5ecc1e42020-01-24 18:06:01 +0100575 desc_data_offset = vui->virtio_net_hdr_sz;
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200576
Steven Luong4208a4c2019-05-06 08:51:56 -0700577 if (enable_csum)
578 {
579 virtio_net_hdr_mrg_rxbuf_t *hdr;
580 u8 *b_data;
Steven Luongb232d192020-03-17 09:01:30 -0700581 u16 current;
Steven Luong4208a4c2019-05-06 08:51:56 -0700582
Steven Luongb232d192020-03-17 09:01:30 -0700583 hdr = map_guest_mem (vui, desc_table[desc_current].addr, &map_hint);
Steven Luong5dedae72019-07-31 16:01:14 -0700584 if (PREDICT_FALSE (hdr == 0))
585 {
586 vlib_error_count (vm, node->node_index,
587 VHOST_USER_INPUT_FUNC_ERROR_MMAP_FAIL, 1);
588 goto out;
589 }
Steven Luongb232d192020-03-17 09:01:30 -0700590 if (hdr->hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM)
Steven Luong5dedae72019-07-31 16:01:14 -0700591 {
Steven Luongb232d192020-03-17 09:01:30 -0700592 if ((desc_data_offset == desc_table[desc_current].len) &&
Mohsin Kazmia7a22812020-08-31 17:17:16 +0200593 (desc_table[desc_current].flags & VRING_DESC_F_NEXT))
Steven Luong5dedae72019-07-31 16:01:14 -0700594 {
Steven Luongb232d192020-03-17 09:01:30 -0700595 current = desc_table[desc_current].next;
596 b_data = map_guest_mem (vui, desc_table[current].addr,
597 &map_hint);
598 if (PREDICT_FALSE (b_data == 0))
599 {
600 vlib_error_count (vm, node->node_index,
601 VHOST_USER_INPUT_FUNC_ERROR_MMAP_FAIL,
602 1);
603 goto out;
604 }
Steven Luong5dedae72019-07-31 16:01:14 -0700605 }
Steven Luongb232d192020-03-17 09:01:30 -0700606 else
607 b_data = (u8 *) hdr + desc_data_offset;
608
609 vhost_user_handle_rx_offload (b_head, b_data, &hdr->hdr);
Steven Luong5dedae72019-07-31 16:01:14 -0700610 }
Steven Luong4208a4c2019-05-06 08:51:56 -0700611 }
612
Damjan Marion92825382018-11-21 10:03:44 +0100613 while (1)
614 {
615 /* Get more input if necessary. Or end of packet. */
616 if (desc_data_offset == desc_table[desc_current].len)
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200617 {
Damjan Marion92825382018-11-21 10:03:44 +0100618 if (PREDICT_FALSE (desc_table[desc_current].flags &
Mohsin Kazmia7a22812020-08-31 17:17:16 +0200619 VRING_DESC_F_NEXT))
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200620 {
Damjan Marion92825382018-11-21 10:03:44 +0100621 desc_current = desc_table[desc_current].next;
622 desc_data_offset = 0;
623 }
624 else
625 {
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200626 goto out;
627 }
628 }
629
Damjan Marion92825382018-11-21 10:03:44 +0100630 /* Get more output if necessary. Or end of packet. */
Damjan Marion8934a042019-02-09 23:29:26 +0100631 if (PREDICT_FALSE (b_current->current_length == buffer_data_size))
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200632 {
Damjan Marion92825382018-11-21 10:03:44 +0100633 if (PREDICT_FALSE (cpu->rx_buffers_len == 0))
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200634 {
Damjan Marion92825382018-11-21 10:03:44 +0100635 /* Cancel speculation */
636 to_next--;
637 n_left_to_next++;
638
639 /*
640 * Checking if there are some left buffers.
641 * If not, just rewind the used buffers and stop.
642 * Note: Scheduled copies are not cancelled. This is
643 * not an issue as they would still be valid. Useless,
644 * but valid.
645 */
646 vhost_user_input_rewind_buffers (vm, cpu, b_head);
647 n_left = 0;
648 goto stop;
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200649 }
650
Damjan Marion92825382018-11-21 10:03:44 +0100651 /* Get next output */
652 cpu->rx_buffers_len--;
653 u32 bi_next = cpu->rx_buffers[cpu->rx_buffers_len];
654 b_current->next_buffer = bi_next;
655 b_current->flags |= VLIB_BUFFER_NEXT_PRESENT;
656 bi_current = bi_next;
657 b_current = vlib_get_buffer (vm, bi_current);
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200658 }
659
Damjan Marion92825382018-11-21 10:03:44 +0100660 /* Prepare a copy order executed later for the data */
Steven Luong73310052019-10-23 13:28:37 -0700661 ASSERT (copy_len < VHOST_USER_COPY_ARRAY_N);
Damjan Marion92825382018-11-21 10:03:44 +0100662 vhost_copy_t *cpy = &cpu->copy[copy_len];
663 copy_len++;
664 u32 desc_data_l = desc_table[desc_current].len - desc_data_offset;
Damjan Marion8934a042019-02-09 23:29:26 +0100665 cpy->len = buffer_data_size - b_current->current_length;
Damjan Marion92825382018-11-21 10:03:44 +0100666 cpy->len = (cpy->len > desc_data_l) ? desc_data_l : cpy->len;
667 cpy->dst = (uword) (vlib_buffer_get_current (b_current) +
668 b_current->current_length);
669 cpy->src = desc_table[desc_current].addr + desc_data_offset;
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200670
Damjan Marion92825382018-11-21 10:03:44 +0100671 desc_data_offset += cpy->len;
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200672
Damjan Marion92825382018-11-21 10:03:44 +0100673 b_current->current_length += cpy->len;
674 b_head->total_length_not_including_first_buffer += cpy->len;
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200675 }
Damjan Marion92825382018-11-21 10:03:44 +0100676
677 out:
678
679 n_rx_bytes += b_head->total_length_not_including_first_buffer;
680 n_rx_packets++;
681
682 b_head->total_length_not_including_first_buffer -=
683 b_head->current_length;
684
685 /* consume the descriptor and return it as used */
Damjan Marionba1afaa2018-11-22 22:16:19 +0100686 last_avail_idx++;
687 last_used_idx++;
Damjan Marion92825382018-11-21 10:03:44 +0100688
Damjan Marion92825382018-11-21 10:03:44 +0100689 vnet_buffer (b_head)->sw_if_index[VLIB_RX] = vui->sw_if_index;
690 vnet_buffer (b_head)->sw_if_index[VLIB_TX] = (u32) ~ 0;
691 b_head->error = 0;
692
693 if (current_config_index != ~(u32) 0)
694 {
695 b_head->current_config_index = current_config_index;
696 vnet_buffer (b_head)->feature_arc_index = feature_arc_idx;
697 }
698
699 n_left--;
700
701 /*
702 * Although separating memory copies from virtio ring parsing
703 * is beneficial, we can offer to perform the copies from time
704 * to time in order to free some space in the ring.
705 */
706 if (PREDICT_FALSE (copy_len >= VHOST_USER_RX_COPY_THRESHOLD))
707 {
708 if (PREDICT_FALSE (vhost_user_input_copy (vui, cpu->copy,
709 copy_len, &map_hint)))
710 {
711 vlib_error_count (vm, node->node_index,
712 VHOST_USER_INPUT_FUNC_ERROR_MMAP_FAIL, 1);
713 }
714 copy_len = 0;
715
716 /* give buffers back to driver */
Damjan Marion96e8cd02018-11-23 14:56:55 +0100717 CLIB_MEMORY_STORE_BARRIER ();
Damjan Marionba1afaa2018-11-22 22:16:19 +0100718 txvq->used->idx = last_used_idx;
Damjan Marion92825382018-11-21 10:03:44 +0100719 vhost_user_log_dirty_ring (vui, txvq, idx);
720 }
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200721 }
Damjan Marion92825382018-11-21 10:03:44 +0100722stop:
723 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200724
Damjan Marionba1afaa2018-11-22 22:16:19 +0100725 txvq->last_used_idx = last_used_idx;
726 txvq->last_avail_idx = last_avail_idx;
727
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200728 /* Do the memory copies */
Damjan Marion7e0b17d2018-11-20 21:07:03 +0100729 if (PREDICT_FALSE (vhost_user_input_copy (vui, cpu->copy, copy_len,
730 &map_hint)))
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200731 {
732 vlib_error_count (vm, node->node_index,
733 VHOST_USER_INPUT_FUNC_ERROR_MMAP_FAIL, 1);
734 }
735
736 /* give buffers back to driver */
Damjan Marion96e8cd02018-11-23 14:56:55 +0100737 CLIB_MEMORY_STORE_BARRIER ();
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200738 txvq->used->idx = txvq->last_used_idx;
739 vhost_user_log_dirty_ring (vui, txvq, idx);
740
741 /* interrupt (call) handling */
742 if ((txvq->callfd_idx != ~0) &&
743 !(txvq->avail->flags & VRING_AVAIL_F_NO_INTERRUPT))
744 {
745 txvq->n_since_last_int += n_rx_packets;
746
747 if (txvq->n_since_last_int > vum->coalesce_frames)
Steven Luong27ba5002020-11-17 13:30:44 -0800748 vhost_user_send_call (vm, vui, txvq);
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200749 }
750
751 /* increase rx counters */
752 vlib_increment_combined_counter
753 (vnet_main.interface_main.combined_sw_if_counters
Damjan Marion7e0b17d2018-11-20 21:07:03 +0100754 + VNET_INTERFACE_COUNTER_RX, vm->thread_index, vui->sw_if_index,
755 n_rx_packets, n_rx_bytes);
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200756
Damjan Marion7e0b17d2018-11-20 21:07:03 +0100757 vnet_device_increment_rx_packets (vm->thread_index, n_rx_packets);
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200758
Damjan Marionba1afaa2018-11-22 22:16:19 +0100759done:
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200760 return n_rx_packets;
761}
762
Steven Luongbc0d9ff2020-03-23 09:34:59 -0700763static_always_inline void
764vhost_user_mark_desc_consumed (vhost_user_intf_t * vui,
765 vhost_user_vring_t * txvq, u16 desc_head,
766 u16 n_descs_processed)
767{
768 vring_packed_desc_t *desc_table = txvq->packed_desc;
769 u16 desc_idx;
770 u16 mask = txvq->qsz_mask;
771
772 for (desc_idx = 0; desc_idx < n_descs_processed; desc_idx++)
773 {
774 if (txvq->used_wrap_counter)
775 desc_table[(desc_head + desc_idx) & mask].flags |=
Mohsin Kazmia7a22812020-08-31 17:17:16 +0200776 (VRING_DESC_F_AVAIL | VRING_DESC_F_USED);
Steven Luongbc0d9ff2020-03-23 09:34:59 -0700777 else
778 desc_table[(desc_head + desc_idx) & mask].flags &=
Mohsin Kazmia7a22812020-08-31 17:17:16 +0200779 ~(VRING_DESC_F_AVAIL | VRING_DESC_F_USED);
Steven Luongbc0d9ff2020-03-23 09:34:59 -0700780 vhost_user_advance_last_used_idx (txvq);
781 }
782}
783
784static_always_inline void
785vhost_user_rx_trace_packed (vhost_trace_t * t, vhost_user_intf_t * vui,
786 u16 qid, vhost_user_vring_t * txvq,
787 u16 desc_current)
788{
789 vhost_user_main_t *vum = &vhost_user_main;
790 vring_packed_desc_t *hdr_desc;
791 virtio_net_hdr_mrg_rxbuf_t *hdr;
792 u32 hint = 0;
793
794 clib_memset (t, 0, sizeof (*t));
795 t->device_index = vui - vum->vhost_user_interfaces;
796 t->qid = qid;
797
798 hdr_desc = &txvq->packed_desc[desc_current];
Mohsin Kazmia7a22812020-08-31 17:17:16 +0200799 if (txvq->packed_desc[desc_current].flags & VRING_DESC_F_INDIRECT)
Steven Luongbc0d9ff2020-03-23 09:34:59 -0700800 {
801 t->virtio_ring_flags |= 1 << VIRTIO_TRACE_F_INDIRECT;
802 /* Header is the first here */
803 hdr_desc = map_guest_mem (vui, txvq->packed_desc[desc_current].addr,
804 &hint);
805 }
Mohsin Kazmia7a22812020-08-31 17:17:16 +0200806 if (txvq->packed_desc[desc_current].flags & VRING_DESC_F_NEXT)
Steven Luongbc0d9ff2020-03-23 09:34:59 -0700807 t->virtio_ring_flags |= 1 << VIRTIO_TRACE_F_SIMPLE_CHAINED;
808
Mohsin Kazmia7a22812020-08-31 17:17:16 +0200809 if (!(txvq->packed_desc[desc_current].flags & VRING_DESC_F_NEXT) &&
810 !(txvq->packed_desc[desc_current].flags & VRING_DESC_F_INDIRECT))
Steven Luongbc0d9ff2020-03-23 09:34:59 -0700811 t->virtio_ring_flags |= 1 << VIRTIO_TRACE_F_SINGLE_DESC;
812
813 t->first_desc_len = hdr_desc ? hdr_desc->len : 0;
814
815 if (!hdr_desc || !(hdr = map_guest_mem (vui, hdr_desc->addr, &hint)))
816 t->virtio_ring_flags |= 1 << VIRTIO_TRACE_F_MAP_ERROR;
817 else
818 {
819 u32 len = vui->virtio_net_hdr_sz;
820 clib_memcpy_fast (&t->hdr, hdr,
821 len > hdr_desc->len ? hdr_desc->len : len);
822 }
823}
824
825static_always_inline u32
826vhost_user_rx_discard_packet_packed (vlib_main_t * vm,
827 vhost_user_intf_t * vui,
828 vhost_user_vring_t * txvq,
829 u32 discard_max)
830{
831 u32 discarded_packets = 0;
832 u16 mask = txvq->qsz_mask;
833 u16 desc_current, desc_head;
834
835 desc_head = desc_current = txvq->last_used_idx & mask;
836
837 /*
838 * On the RX side, each packet corresponds to one descriptor
839 * (it is the same whether it is a shallow descriptor, chained, or indirect).
840 * Therefore, discarding a packet is like discarding a descriptor.
841 */
842 while ((discarded_packets != discard_max) &&
843 vhost_user_packed_desc_available (txvq, desc_current))
844 {
845 vhost_user_advance_last_avail_idx (txvq);
846 discarded_packets++;
847 desc_current = (desc_current + 1) & mask;
848 }
849
850 if (PREDICT_TRUE (discarded_packets))
851 vhost_user_mark_desc_consumed (vui, txvq, desc_head, discarded_packets);
852 return (discarded_packets);
853}
854
855static_always_inline u32
856vhost_user_input_copy_packed (vhost_user_intf_t * vui, vhost_copy_t * cpy,
857 u16 copy_len, u32 * map_hint)
858{
859 void *src0, *src1, *src2, *src3, *src4, *src5, *src6, *src7;
860 u8 bad;
861 u32 rc = VHOST_USER_INPUT_FUNC_ERROR_NO_ERROR;
862
863 if (PREDICT_TRUE (copy_len >= 8))
864 {
865 src4 = map_guest_mem (vui, cpy[0].src, map_hint);
866 src5 = map_guest_mem (vui, cpy[1].src, map_hint);
867 src6 = map_guest_mem (vui, cpy[2].src, map_hint);
868 src7 = map_guest_mem (vui, cpy[3].src, map_hint);
869 bad = (src4 == 0) + (src5 == 0) + (src6 == 0) + (src7 == 0);
870 if (PREDICT_FALSE (bad))
871 goto one_by_one;
872 CLIB_PREFETCH (src4, 64, LOAD);
873 CLIB_PREFETCH (src5, 64, LOAD);
874 CLIB_PREFETCH (src6, 64, LOAD);
875 CLIB_PREFETCH (src7, 64, LOAD);
876
877 while (PREDICT_TRUE (copy_len >= 8))
878 {
879 src0 = src4;
880 src1 = src5;
881 src2 = src6;
882 src3 = src7;
883
884 src4 = map_guest_mem (vui, cpy[4].src, map_hint);
885 src5 = map_guest_mem (vui, cpy[5].src, map_hint);
886 src6 = map_guest_mem (vui, cpy[6].src, map_hint);
887 src7 = map_guest_mem (vui, cpy[7].src, map_hint);
888 bad = (src4 == 0) + (src5 == 0) + (src6 == 0) + (src7 == 0);
889 if (PREDICT_FALSE (bad))
890 break;
891
892 CLIB_PREFETCH (src4, 64, LOAD);
893 CLIB_PREFETCH (src5, 64, LOAD);
894 CLIB_PREFETCH (src6, 64, LOAD);
895 CLIB_PREFETCH (src7, 64, LOAD);
896
897 clib_memcpy_fast ((void *) cpy[0].dst, src0, cpy[0].len);
898 clib_memcpy_fast ((void *) cpy[1].dst, src1, cpy[1].len);
899 clib_memcpy_fast ((void *) cpy[2].dst, src2, cpy[2].len);
900 clib_memcpy_fast ((void *) cpy[3].dst, src3, cpy[3].len);
901 copy_len -= 4;
902 cpy += 4;
903 }
904 }
905
906one_by_one:
907 while (copy_len)
908 {
909 if (PREDICT_FALSE (!(src0 = map_guest_mem (vui, cpy->src, map_hint))))
910 {
911 rc = VHOST_USER_INPUT_FUNC_ERROR_MMAP_FAIL;
912 break;
913 }
914 clib_memcpy_fast ((void *) cpy->dst, src0, cpy->len);
915 copy_len -= 1;
916 cpy += 1;
917 }
918 return rc;
919}
920
921static_always_inline u32
922vhost_user_do_offload (vhost_user_intf_t * vui,
923 vring_packed_desc_t * desc_table, u16 desc_current,
924 u16 mask, vlib_buffer_t * b_head, u32 * map_hint)
925{
926 u32 rc = VHOST_USER_INPUT_FUNC_ERROR_NO_ERROR;
927 virtio_net_hdr_mrg_rxbuf_t *hdr;
928 u8 *b_data;
929 u32 desc_data_offset = vui->virtio_net_hdr_sz;
930
931 hdr = map_guest_mem (vui, desc_table[desc_current].addr, map_hint);
932 if (PREDICT_FALSE (hdr == 0))
933 rc = VHOST_USER_INPUT_FUNC_ERROR_MMAP_FAIL;
934 else if (hdr->hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM)
935 {
936 if (desc_data_offset == desc_table[desc_current].len)
937 {
938 desc_current = (desc_current + 1) & mask;
939 b_data =
940 map_guest_mem (vui, desc_table[desc_current].addr, map_hint);
941 if (PREDICT_FALSE (b_data == 0))
942 rc = VHOST_USER_INPUT_FUNC_ERROR_MMAP_FAIL;
943 else
944 vhost_user_handle_rx_offload (b_head, b_data, &hdr->hdr);
945 }
946 else
947 {
948 b_data = (u8 *) hdr + desc_data_offset;
949 vhost_user_handle_rx_offload (b_head, b_data, &hdr->hdr);
950 }
951 }
952
953 return rc;
954}
955
956static_always_inline u32
957vhost_user_compute_buffers_required (u32 desc_len, u32 buffer_data_size)
958{
959 div_t result;
960 u32 buffers_required;
961
962 if (PREDICT_TRUE (buffer_data_size == 2048))
963 {
964 buffers_required = desc_len >> 11;
965 if ((desc_len & 2047) != 0)
966 buffers_required++;
967 return (buffers_required);
968 }
969
970 if (desc_len < buffer_data_size)
971 return 1;
972
973 result = div (desc_len, buffer_data_size);
974 if (result.rem)
975 buffers_required = result.quot + 1;
976 else
977 buffers_required = result.quot;
978
979 return (buffers_required);
980}
981
982static_always_inline u32
983vhost_user_compute_indirect_desc_len (vhost_user_intf_t * vui,
984 vhost_user_vring_t * txvq,
985 u32 buffer_data_size, u16 desc_current,
986 u32 * map_hint)
987{
988 vring_packed_desc_t *desc_table = txvq->packed_desc;
989 u32 desc_len = 0;
990 u16 desc_data_offset = vui->virtio_net_hdr_sz;
991 u16 desc_idx = desc_current;
992 u32 n_descs;
993
994 n_descs = desc_table[desc_idx].len >> 4;
995 desc_table = map_guest_mem (vui, desc_table[desc_idx].addr, map_hint);
996 if (PREDICT_FALSE (desc_table == 0))
997 return 0;
998
999 for (desc_idx = 0; desc_idx < n_descs; desc_idx++)
1000 desc_len += desc_table[desc_idx].len;
1001
1002 if (PREDICT_TRUE (desc_len > desc_data_offset))
1003 desc_len -= desc_data_offset;
1004
1005 return vhost_user_compute_buffers_required (desc_len, buffer_data_size);
1006}
1007
1008static_always_inline u32
1009vhost_user_compute_chained_desc_len (vhost_user_intf_t * vui,
1010 vhost_user_vring_t * txvq,
1011 u32 buffer_data_size, u16 * current,
1012 u16 * n_left)
1013{
1014 vring_packed_desc_t *desc_table = txvq->packed_desc;
1015 u32 desc_len = 0;
1016 u16 mask = txvq->qsz_mask;
1017
Mohsin Kazmia7a22812020-08-31 17:17:16 +02001018 while (desc_table[*current].flags & VRING_DESC_F_NEXT)
Steven Luongbc0d9ff2020-03-23 09:34:59 -07001019 {
1020 desc_len += desc_table[*current].len;
1021 (*n_left)++;
1022 *current = (*current + 1) & mask;
1023 vhost_user_advance_last_avail_idx (txvq);
1024 }
1025 desc_len += desc_table[*current].len;
1026 (*n_left)++;
1027 *current = (*current + 1) & mask;
1028 vhost_user_advance_last_avail_idx (txvq);
1029
1030 if (PREDICT_TRUE (desc_len > vui->virtio_net_hdr_sz))
1031 desc_len -= vui->virtio_net_hdr_sz;
1032
1033 return vhost_user_compute_buffers_required (desc_len, buffer_data_size);
1034}
1035
1036static_always_inline void
1037vhost_user_assemble_packet (vring_packed_desc_t * desc_table,
1038 u16 * desc_idx, vlib_buffer_t * b_head,
1039 vlib_buffer_t ** b_current, u32 ** next,
1040 vlib_buffer_t *** b, u32 * bi_current,
1041 vhost_cpu_t * cpu, u16 * copy_len,
1042 u32 * buffers_used, u32 buffers_required,
1043 u32 * desc_data_offset, u32 buffer_data_size,
1044 u16 mask)
1045{
1046 u32 desc_data_l;
1047
1048 while (*desc_data_offset < desc_table[*desc_idx].len)
1049 {
1050 /* Get more output if necessary. Or end of packet. */
1051 if (PREDICT_FALSE ((*b_current)->current_length == buffer_data_size))
1052 {
1053 /* Get next output */
1054 u32 bi_next = **next;
1055 (*next)++;
1056 (*b_current)->next_buffer = bi_next;
1057 (*b_current)->flags |= VLIB_BUFFER_NEXT_PRESENT;
1058 *bi_current = bi_next;
1059 *b_current = **b;
1060 (*b)++;
1061 (*buffers_used)++;
1062 ASSERT (*buffers_used <= buffers_required);
1063 }
1064
1065 /* Prepare a copy order executed later for the data */
1066 ASSERT (*copy_len < VHOST_USER_COPY_ARRAY_N);
1067 vhost_copy_t *cpy = &cpu->copy[*copy_len];
1068 (*copy_len)++;
1069 desc_data_l = desc_table[*desc_idx].len - *desc_data_offset;
1070 cpy->len = buffer_data_size - (*b_current)->current_length;
1071 cpy->len = (cpy->len > desc_data_l) ? desc_data_l : cpy->len;
1072 cpy->dst = (uword) (vlib_buffer_get_current (*b_current) +
1073 (*b_current)->current_length);
1074 cpy->src = desc_table[*desc_idx].addr + *desc_data_offset;
1075
1076 *desc_data_offset += cpy->len;
1077
1078 (*b_current)->current_length += cpy->len;
1079 b_head->total_length_not_including_first_buffer += cpy->len;
1080 }
1081 *desc_idx = (*desc_idx + 1) & mask;;
1082 *desc_data_offset = 0;
1083}
1084
1085static_always_inline u32
Damjan Marion94100532020-11-06 23:25:57 +01001086vhost_user_if_input_packed (vlib_main_t *vm, vhost_user_main_t *vum,
1087 vhost_user_intf_t *vui, u16 qid,
1088 vlib_node_runtime_t *node, u8 enable_csum)
Steven Luongbc0d9ff2020-03-23 09:34:59 -07001089{
1090 vhost_user_vring_t *txvq = &vui->vrings[VHOST_VRING_IDX_TX (qid)];
1091 vnet_feature_main_t *fm = &feature_main;
1092 u8 feature_arc_idx = fm->device_input_feature_arc_index;
1093 u16 n_rx_packets = 0;
1094 u32 n_rx_bytes = 0;
1095 u16 n_left = 0;
1096 u32 buffers_required = 0;
1097 u32 n_left_to_next, *to_next;
1098 u32 next_index = VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT;
1099 u32 n_trace = vlib_get_trace_count (vm, node);
1100 u32 buffer_data_size = vlib_buffer_get_default_data_size (vm);
1101 u32 map_hint = 0;
1102 vhost_cpu_t *cpu = &vum->cpus[vm->thread_index];
1103 u16 copy_len = 0;
1104 u32 current_config_index = ~0;
1105 u16 mask = txvq->qsz_mask;
1106 u16 desc_current, desc_head, last_used_idx;
1107 vring_packed_desc_t *desc_table = 0;
1108 u32 n_descs_processed = 0;
1109 u32 rv;
1110 vlib_buffer_t **b;
1111 u32 *next;
1112 u32 buffers_used = 0;
1113 u16 current, n_descs_to_process;
1114
1115 /* The descriptor table is not ready yet */
1116 if (PREDICT_FALSE (txvq->packed_desc == 0))
1117 goto done;
1118
1119 /* do we have pending interrupts ? */
1120 vhost_user_vring_t *rxvq = &vui->vrings[VHOST_VRING_IDX_RX (qid)];
Steven Luong27ba5002020-11-17 13:30:44 -08001121 vhost_user_input_do_interrupt (vm, vui, txvq, rxvq);
Steven Luongbc0d9ff2020-03-23 09:34:59 -07001122
1123 /*
1124 * For adaptive mode, it is optimized to reduce interrupts.
1125 * If the scheduler switches the input node to polling due
1126 * to burst of traffic, we tell the driver no interrupt.
1127 * When the traffic subsides, the scheduler switches the node back to
1128 * interrupt mode. We must tell the driver we want interrupt.
1129 */
Damjan Marion94100532020-11-06 23:25:57 +01001130 if (PREDICT_FALSE (txvq->mode == VNET_HW_IF_RX_MODE_ADAPTIVE))
Steven Luongbc0d9ff2020-03-23 09:34:59 -07001131 {
1132 if ((node->flags &
1133 VLIB_NODE_FLAG_SWITCH_FROM_POLLING_TO_INTERRUPT_MODE) ||
1134 !(node->flags &
1135 VLIB_NODE_FLAG_SWITCH_FROM_INTERRUPT_TO_POLLING_MODE))
1136 /* Tell driver we want notification */
1137 txvq->used_event->flags = 0;
1138 else
1139 /* Tell driver we don't want notification */
1140 txvq->used_event->flags = VRING_EVENT_F_DISABLE;
1141 }
1142
1143 last_used_idx = txvq->last_used_idx & mask;
1144 desc_head = desc_current = last_used_idx;
1145
1146 if (vhost_user_packed_desc_available (txvq, desc_current) == 0)
1147 goto done;
1148
1149 if (PREDICT_FALSE (!vui->admin_up || !vui->is_ready || !(txvq->enabled)))
1150 {
1151 /*
1152 * Discard input packet if interface is admin down or vring is not
1153 * enabled.
1154 * "For example, for a networking device, in the disabled state
1155 * client must not supply any new RX packets, but must process
1156 * and discard any TX packets."
1157 */
1158 rv = vhost_user_rx_discard_packet_packed (vm, vui, txvq,
1159 VHOST_USER_DOWN_DISCARD_COUNT);
1160 vlib_error_count (vm, vhost_user_input_node.index,
1161 VHOST_USER_INPUT_FUNC_ERROR_NOT_READY, rv);
1162 goto done;
1163 }
1164
1165 vhost_user_input_setup_frame (vm, node, vui, &current_config_index,
1166 &next_index, &to_next, &n_left_to_next);
1167
1168 /*
1169 * Compute n_left and total buffers needed
1170 */
1171 desc_table = txvq->packed_desc;
1172 current = desc_current;
1173 while (vhost_user_packed_desc_available (txvq, current) &&
1174 (n_left < VLIB_FRAME_SIZE))
1175 {
Mohsin Kazmia7a22812020-08-31 17:17:16 +02001176 if (desc_table[current].flags & VRING_DESC_F_INDIRECT)
Steven Luongbc0d9ff2020-03-23 09:34:59 -07001177 {
1178 buffers_required +=
1179 vhost_user_compute_indirect_desc_len (vui, txvq, buffer_data_size,
1180 current, &map_hint);
1181 n_left++;
1182 current = (current + 1) & mask;
1183 vhost_user_advance_last_avail_idx (txvq);
1184 }
1185 else
1186 {
1187 buffers_required +=
1188 vhost_user_compute_chained_desc_len (vui, txvq, buffer_data_size,
1189 &current, &n_left);
1190 }
1191 }
1192
1193 /* Something is broken if we need more than 10000 buffers */
1194 if (PREDICT_FALSE ((buffers_required == 0) || (buffers_required > 10000)))
1195 {
1196 rv = vhost_user_rx_discard_packet_packed (vm, vui, txvq, n_left);
1197 vlib_error_count (vm, vhost_user_input_node.index,
1198 VHOST_USER_INPUT_FUNC_ERROR_NO_BUFFER, rv);
1199 goto done;
1200 }
1201
1202 vec_validate (cpu->to_next_list, buffers_required);
1203 rv = vlib_buffer_alloc (vm, cpu->to_next_list, buffers_required);
1204 if (PREDICT_FALSE (rv != buffers_required))
1205 {
1206 vlib_buffer_free (vm, cpu->to_next_list, rv);
1207 rv = vhost_user_rx_discard_packet_packed (vm, vui, txvq, n_left);
1208 vlib_error_count (vm, vhost_user_input_node.index,
1209 VHOST_USER_INPUT_FUNC_ERROR_NO_BUFFER, rv);
1210 goto done;
1211 }
1212
1213 next = cpu->to_next_list;
1214 vec_validate (cpu->rx_buffers_pdesc, buffers_required);
1215 vlib_get_buffers (vm, next, cpu->rx_buffers_pdesc, buffers_required);
1216 b = cpu->rx_buffers_pdesc;
1217 n_descs_processed = n_left;
1218
1219 while (n_left)
1220 {
1221 vlib_buffer_t *b_head, *b_current;
1222 u32 bi_current;
1223 u32 desc_data_offset;
1224 u16 desc_idx = desc_current;
1225 u32 n_descs;
1226
1227 desc_table = txvq->packed_desc;
1228 to_next[0] = bi_current = next[0];
1229 b_head = b_current = b[0];
1230 b++;
1231 buffers_used++;
1232 ASSERT (buffers_used <= buffers_required);
1233 to_next++;
1234 next++;
1235 n_left_to_next--;
1236
1237 /* The buffer should already be initialized */
1238 b_head->total_length_not_including_first_buffer = 0;
1239 b_head->flags |= VLIB_BUFFER_TOTAL_LENGTH_VALID;
1240 desc_data_offset = vui->virtio_net_hdr_sz;
1241 n_descs_to_process = 1;
1242
Mohsin Kazmia7a22812020-08-31 17:17:16 +02001243 if (desc_table[desc_idx].flags & VRING_DESC_F_INDIRECT)
Steven Luongbc0d9ff2020-03-23 09:34:59 -07001244 {
1245 n_descs = desc_table[desc_idx].len >> 4;
1246 desc_table = map_guest_mem (vui, desc_table[desc_idx].addr,
1247 &map_hint);
1248 desc_idx = 0;
1249 if (PREDICT_FALSE (desc_table == 0) ||
1250 (enable_csum &&
1251 (PREDICT_FALSE
1252 (vhost_user_do_offload
1253 (vui, desc_table, desc_idx, mask, b_head,
1254 &map_hint) != VHOST_USER_INPUT_FUNC_ERROR_NO_ERROR))))
1255 {
1256 vlib_error_count (vm, node->node_index,
1257 VHOST_USER_INPUT_FUNC_ERROR_MMAP_FAIL, 1);
1258 to_next--;
1259 next--;
1260 n_left_to_next++;
1261 buffers_used--;
1262 b--;
1263 goto out;
1264 }
1265 while (n_descs)
1266 {
1267 vhost_user_assemble_packet (desc_table, &desc_idx, b_head,
1268 &b_current, &next, &b, &bi_current,
1269 cpu, &copy_len, &buffers_used,
1270 buffers_required, &desc_data_offset,
1271 buffer_data_size, mask);
1272 n_descs--;
1273 }
1274 }
1275 else
1276 {
1277 if (enable_csum)
1278 {
1279 rv = vhost_user_do_offload (vui, desc_table, desc_idx, mask,
1280 b_head, &map_hint);
1281 if (PREDICT_FALSE (rv != VHOST_USER_INPUT_FUNC_ERROR_NO_ERROR))
1282 {
1283 vlib_error_count (vm, node->node_index, rv, 1);
1284 to_next--;
1285 next--;
1286 n_left_to_next++;
1287 buffers_used--;
1288 b--;
1289 goto out;
1290 }
1291 }
1292 /*
1293 * For chained descriptor, we process all chains in a single while
1294 * loop. So count how many descriptors in the chain.
1295 */
1296 n_descs_to_process = 1;
Mohsin Kazmia7a22812020-08-31 17:17:16 +02001297 while (desc_table[desc_idx].flags & VRING_DESC_F_NEXT)
Steven Luongbc0d9ff2020-03-23 09:34:59 -07001298 {
1299 vhost_user_assemble_packet (desc_table, &desc_idx, b_head,
1300 &b_current, &next, &b, &bi_current,
1301 cpu, &copy_len, &buffers_used,
1302 buffers_required, &desc_data_offset,
1303 buffer_data_size, mask);
1304 n_descs_to_process++;
1305 }
1306 vhost_user_assemble_packet (desc_table, &desc_idx, b_head,
1307 &b_current, &next, &b, &bi_current,
1308 cpu, &copy_len, &buffers_used,
1309 buffers_required, &desc_data_offset,
1310 buffer_data_size, mask);
1311 }
1312
1313 n_rx_bytes += b_head->total_length_not_including_first_buffer;
1314 n_rx_packets++;
1315
1316 b_head->total_length_not_including_first_buffer -=
1317 b_head->current_length;
1318
Steven Luongbc0d9ff2020-03-23 09:34:59 -07001319 vnet_buffer (b_head)->sw_if_index[VLIB_RX] = vui->sw_if_index;
1320 vnet_buffer (b_head)->sw_if_index[VLIB_TX] = ~0;
1321 b_head->error = 0;
1322
1323 if (current_config_index != ~0)
1324 {
1325 b_head->current_config_index = current_config_index;
1326 vnet_buffer (b_head)->feature_arc_index = feature_arc_idx;
1327 }
1328
1329 out:
1330 ASSERT (n_left >= n_descs_to_process);
1331 n_left -= n_descs_to_process;
1332
1333 /* advance to next descrptor */
1334 desc_current = (desc_current + n_descs_to_process) & mask;
1335
1336 /*
1337 * Although separating memory copies from virtio ring parsing
1338 * is beneficial, we can offer to perform the copies from time
1339 * to time in order to free some space in the ring.
1340 */
1341 if (PREDICT_FALSE (copy_len >= VHOST_USER_RX_COPY_THRESHOLD))
1342 {
1343 rv = vhost_user_input_copy_packed (vui, cpu->copy, copy_len,
1344 &map_hint);
1345 if (PREDICT_FALSE (rv != VHOST_USER_INPUT_FUNC_ERROR_NO_ERROR))
1346 vlib_error_count (vm, node->node_index, rv, 1);
1347 copy_len = 0;
1348 }
1349 }
1350 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
1351
1352 /* Do the memory copies */
1353 rv = vhost_user_input_copy_packed (vui, cpu->copy, copy_len, &map_hint);
1354 if (PREDICT_FALSE (rv != VHOST_USER_INPUT_FUNC_ERROR_NO_ERROR))
1355 vlib_error_count (vm, node->node_index, rv, 1);
1356
1357 /* Must do the tracing before giving buffers back to driver */
1358 if (PREDICT_FALSE (n_trace))
1359 {
1360 u32 left = n_rx_packets;
1361
1362 b = cpu->rx_buffers_pdesc;
1363 while (n_trace && left)
1364 {
Benoît Ganne9a3973e2020-10-02 19:36:57 +02001365 if (PREDICT_TRUE
1366 (vlib_trace_buffer
1367 (vm, node, next_index, b[0], /* follow_chain */ 0)))
1368 {
1369 vhost_trace_t *t0;
1370 t0 = vlib_add_trace (vm, node, b[0], sizeof (t0[0]));
1371 vhost_user_rx_trace_packed (t0, vui, qid, txvq, last_used_idx);
1372 last_used_idx = (last_used_idx + 1) & mask;
1373 n_trace--;
1374 vlib_set_trace_count (vm, node, n_trace);
1375 }
Steven Luongbc0d9ff2020-03-23 09:34:59 -07001376 left--;
Benoît Ganne9a3973e2020-10-02 19:36:57 +02001377 b++;
Steven Luongbc0d9ff2020-03-23 09:34:59 -07001378 }
1379 }
1380
1381 /*
1382 * Give buffers back to driver.
1383 */
1384 vhost_user_mark_desc_consumed (vui, txvq, desc_head, n_descs_processed);
1385
1386 /* interrupt (call) handling */
1387 if ((txvq->callfd_idx != ~0) &&
1388 (txvq->avail_event->flags != VRING_EVENT_F_DISABLE))
1389 {
1390 txvq->n_since_last_int += n_rx_packets;
1391 if (txvq->n_since_last_int > vum->coalesce_frames)
Steven Luong27ba5002020-11-17 13:30:44 -08001392 vhost_user_send_call (vm, vui, txvq);
Steven Luongbc0d9ff2020-03-23 09:34:59 -07001393 }
1394
1395 /* increase rx counters */
1396 vlib_increment_combined_counter
1397 (vnet_main.interface_main.combined_sw_if_counters
1398 + VNET_INTERFACE_COUNTER_RX, vm->thread_index, vui->sw_if_index,
1399 n_rx_packets, n_rx_bytes);
1400
1401 vnet_device_increment_rx_packets (vm->thread_index, n_rx_packets);
1402
1403 if (PREDICT_FALSE (buffers_used < buffers_required))
1404 vlib_buffer_free (vm, next, buffers_required - buffers_used);
1405
1406done:
1407 return n_rx_packets;
1408}
1409
Mohsin Kazmie7cde312018-06-26 17:20:11 +02001410VLIB_NODE_FN (vhost_user_input_node) (vlib_main_t * vm,
1411 vlib_node_runtime_t * node,
1412 vlib_frame_t * frame)
1413{
1414 vhost_user_main_t *vum = &vhost_user_main;
1415 uword n_rx_packets = 0;
1416 vhost_user_intf_t *vui;
Damjan Marion94100532020-11-06 23:25:57 +01001417 vnet_hw_if_rxq_poll_vector_t *pv = vnet_hw_if_get_rxq_poll_vector (vm, node);
1418 vnet_hw_if_rxq_poll_vector_t *pve;
Mohsin Kazmie7cde312018-06-26 17:20:11 +02001419
Damjan Marion94100532020-11-06 23:25:57 +01001420 vec_foreach (pve, pv)
1421 {
1422 vui = pool_elt_at_index (vum->vhost_user_interfaces, pve->dev_instance);
1423 if (vhost_user_is_packed_ring_supported (vui))
1424 {
1425 if (vui->features & VIRTIO_FEATURE (VIRTIO_NET_F_CSUM))
1426 n_rx_packets += vhost_user_if_input_packed (
1427 vm, vum, vui, pve->queue_id, node, 1);
1428 else
1429 n_rx_packets += vhost_user_if_input_packed (
1430 vm, vum, vui, pve->queue_id, node, 0);
1431 }
1432 else
1433 {
1434 if (vui->features & VIRTIO_FEATURE (VIRTIO_NET_F_CSUM))
1435 n_rx_packets +=
1436 vhost_user_if_input (vm, vum, vui, pve->queue_id, node, 1);
1437 else
1438 n_rx_packets +=
1439 vhost_user_if_input (vm, vum, vui, pve->queue_id, node, 0);
1440 }
1441 }
Mohsin Kazmie7cde312018-06-26 17:20:11 +02001442
1443 return n_rx_packets;
1444}
1445
Mohsin Kazmie7cde312018-06-26 17:20:11 +02001446/* *INDENT-OFF* */
1447VLIB_REGISTER_NODE (vhost_user_input_node) = {
1448 .type = VLIB_NODE_TYPE_INPUT,
1449 .name = "vhost-user-input",
1450 .sibling_of = "device-input",
Damjan Marion7ca5aaa2019-09-24 18:10:49 +02001451 .flags = VLIB_NODE_FLAG_TRACE_SUPPORTED,
Mohsin Kazmie7cde312018-06-26 17:20:11 +02001452
1453 /* Will be enabled if/when hardware is detected. */
1454 .state = VLIB_NODE_STATE_DISABLED,
1455
1456 .format_buffer = format_ethernet_header_with_length,
1457 .format_trace = format_vhost_trace,
1458
1459 .n_errors = VHOST_USER_INPUT_FUNC_N_ERROR,
1460 .error_strings = vhost_user_input_func_error_strings,
1461};
1462/* *INDENT-ON* */
Mohsin Kazmie7cde312018-06-26 17:20:11 +02001463
1464/*
1465 * fd.io coding-style-patch-verification: ON
1466 *
1467 * Local Variables:
1468 * eval: (c-set-style "gnu")
1469 * End:
1470 */