blob: 53230a61bc7dfc6c205218b09ade74ac2f2fa567 [file] [log] [blame]
Mohsin Kazmie7cde312018-06-26 17:20:11 +02001/*
2 *------------------------------------------------------------------
3 * vhost-user-input
4 *
5 * Copyright (c) 2014-2018 Cisco and/or its affiliates.
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at:
9 *
10 * http://www.apache.org/licenses/LICENSE-2.0
11 *
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 *------------------------------------------------------------------
18 */
19
20#include <fcntl.h> /* for open */
21#include <sys/ioctl.h>
22#include <sys/socket.h>
23#include <sys/un.h>
24#include <sys/stat.h>
25#include <sys/types.h>
26#include <sys/uio.h> /* for iovec */
27#include <netinet/in.h>
28#include <sys/vfs.h>
29
30#include <linux/if_arp.h>
31#include <linux/if_tun.h>
32
33#include <vlib/vlib.h>
34#include <vlib/unix/unix.h>
35
Mohsin Kazmie7cde312018-06-26 17:20:11 +020036#include <vnet/ethernet/ethernet.h>
37#include <vnet/devices/devices.h>
38#include <vnet/feature/feature.h>
Neale Rannse4031132020-10-26 13:00:06 +000039#include <vnet/udp/udp_packet.h>
Mohsin Kazmie7cde312018-06-26 17:20:11 +020040
41#include <vnet/devices/virtio/vhost_user.h>
42#include <vnet/devices/virtio/vhost_user_inline.h>
43
44/*
45 * When an RX queue is down but active, received packets
46 * must be discarded. This value controls up to how many
47 * packets will be discarded during each round.
48 */
49#define VHOST_USER_DOWN_DISCARD_COUNT 256
50
51/*
52 * When the number of available buffers gets under this threshold,
53 * RX node will start discarding packets.
54 */
55#define VHOST_USER_RX_BUFFER_STARVATION 32
56
57/*
58 * On the receive side, the host should free descriptors as soon
59 * as possible in order to avoid TX drop in the VM.
60 * This value controls the number of copy operations that are stacked
61 * before copy is done for all and descriptors are given back to
62 * the guest.
63 * The value 64 was obtained by testing (48 and 128 were not as good).
64 */
65#define VHOST_USER_RX_COPY_THRESHOLD 64
66
Benoît Ganne47727c02019-02-12 13:35:08 +010067extern vlib_node_registration_t vhost_user_input_node;
Mohsin Kazmie7cde312018-06-26 17:20:11 +020068
69#define foreach_vhost_user_input_func_error \
70 _(NO_ERROR, "no error") \
71 _(NO_BUFFER, "no available buffer") \
72 _(MMAP_FAIL, "mmap failure") \
73 _(INDIRECT_OVERFLOW, "indirect descriptor overflows table") \
74 _(UNDERSIZED_FRAME, "undersized ethernet frame received (< 14 bytes)") \
Steven Luongbc0d9ff2020-03-23 09:34:59 -070075 _(NOT_READY, "vhost interface not ready or down") \
Mohsin Kazmie7cde312018-06-26 17:20:11 +020076 _(FULL_RX_QUEUE, "full rx queue (possible driver tx drop)")
77
78typedef enum
79{
80#define _(f,s) VHOST_USER_INPUT_FUNC_ERROR_##f,
81 foreach_vhost_user_input_func_error
82#undef _
83 VHOST_USER_INPUT_FUNC_N_ERROR,
84} vhost_user_input_func_error_t;
85
86static __clib_unused char *vhost_user_input_func_error_strings[] = {
87#define _(n,s) s,
88 foreach_vhost_user_input_func_error
89#undef _
90};
91
92static_always_inline void
93vhost_user_rx_trace (vhost_trace_t * t,
94 vhost_user_intf_t * vui, u16 qid,
Damjan Marionba1afaa2018-11-22 22:16:19 +010095 vlib_buffer_t * b, vhost_user_vring_t * txvq,
96 u16 last_avail_idx)
Mohsin Kazmie7cde312018-06-26 17:20:11 +020097{
98 vhost_user_main_t *vum = &vhost_user_main;
Mohsin Kazmie7cde312018-06-26 17:20:11 +020099 u32 desc_current = txvq->avail->ring[last_avail_idx & txvq->qsz_mask];
100 vring_desc_t *hdr_desc = 0;
101 virtio_net_hdr_mrg_rxbuf_t *hdr;
102 u32 hint = 0;
103
Dave Barachb7b92992018-10-17 10:38:51 -0400104 clib_memset (t, 0, sizeof (*t));
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200105 t->device_index = vui - vum->vhost_user_interfaces;
106 t->qid = qid;
107
108 hdr_desc = &txvq->desc[desc_current];
Mohsin Kazmia7a22812020-08-31 17:17:16 +0200109 if (txvq->desc[desc_current].flags & VRING_DESC_F_INDIRECT)
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200110 {
111 t->virtio_ring_flags |= 1 << VIRTIO_TRACE_F_INDIRECT;
112 /* Header is the first here */
113 hdr_desc = map_guest_mem (vui, txvq->desc[desc_current].addr, &hint);
114 }
Mohsin Kazmia7a22812020-08-31 17:17:16 +0200115 if (txvq->desc[desc_current].flags & VRING_DESC_F_NEXT)
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200116 {
117 t->virtio_ring_flags |= 1 << VIRTIO_TRACE_F_SIMPLE_CHAINED;
118 }
Mohsin Kazmia7a22812020-08-31 17:17:16 +0200119 if (!(txvq->desc[desc_current].flags & VRING_DESC_F_NEXT) &&
120 !(txvq->desc[desc_current].flags & VRING_DESC_F_INDIRECT))
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200121 {
122 t->virtio_ring_flags |= 1 << VIRTIO_TRACE_F_SINGLE_DESC;
123 }
124
125 t->first_desc_len = hdr_desc ? hdr_desc->len : 0;
126
127 if (!hdr_desc || !(hdr = map_guest_mem (vui, hdr_desc->addr, &hint)))
128 {
129 t->virtio_ring_flags |= 1 << VIRTIO_TRACE_F_MAP_ERROR;
130 }
131 else
132 {
133 u32 len = vui->virtio_net_hdr_sz;
134 memcpy (&t->hdr, hdr, len > hdr_desc->len ? hdr_desc->len : len);
135 }
136}
137
138static_always_inline u32
139vhost_user_input_copy (vhost_user_intf_t * vui, vhost_copy_t * cpy,
140 u16 copy_len, u32 * map_hint)
141{
142 void *src0, *src1, *src2, *src3;
143 if (PREDICT_TRUE (copy_len >= 4))
144 {
145 if (PREDICT_FALSE (!(src2 = map_guest_mem (vui, cpy[0].src, map_hint))))
146 return 1;
147 if (PREDICT_FALSE (!(src3 = map_guest_mem (vui, cpy[1].src, map_hint))))
148 return 1;
149
150 while (PREDICT_TRUE (copy_len >= 4))
151 {
152 src0 = src2;
153 src1 = src3;
154
155 if (PREDICT_FALSE
156 (!(src2 = map_guest_mem (vui, cpy[2].src, map_hint))))
157 return 1;
158 if (PREDICT_FALSE
159 (!(src3 = map_guest_mem (vui, cpy[3].src, map_hint))))
160 return 1;
161
162 CLIB_PREFETCH (src2, 64, LOAD);
163 CLIB_PREFETCH (src3, 64, LOAD);
164
Dave Barach178cf492018-11-13 16:34:13 -0500165 clib_memcpy_fast ((void *) cpy[0].dst, src0, cpy[0].len);
166 clib_memcpy_fast ((void *) cpy[1].dst, src1, cpy[1].len);
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200167 copy_len -= 2;
168 cpy += 2;
169 }
170 }
171 while (copy_len)
172 {
173 if (PREDICT_FALSE (!(src0 = map_guest_mem (vui, cpy->src, map_hint))))
174 return 1;
Dave Barach178cf492018-11-13 16:34:13 -0500175 clib_memcpy_fast ((void *) cpy->dst, src0, cpy->len);
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200176 copy_len -= 1;
177 cpy += 1;
178 }
179 return 0;
180}
181
182/**
183 * Try to discard packets from the tx ring (VPP RX path).
184 * Returns the number of discarded packets.
185 */
186static_always_inline u32
187vhost_user_rx_discard_packet (vlib_main_t * vm,
188 vhost_user_intf_t * vui,
189 vhost_user_vring_t * txvq, u32 discard_max)
190{
191 /*
192 * On the RX side, each packet corresponds to one descriptor
193 * (it is the same whether it is a shallow descriptor, chained, or indirect).
194 * Therefore, discarding a packet is like discarding a descriptor.
195 */
196 u32 discarded_packets = 0;
197 u32 avail_idx = txvq->avail->idx;
Damjan Marionba1afaa2018-11-22 22:16:19 +0100198 u16 mask = txvq->qsz_mask;
199 u16 last_avail_idx = txvq->last_avail_idx;
200 u16 last_used_idx = txvq->last_used_idx;
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200201 while (discarded_packets != discard_max)
202 {
Steven Luong7e5735d2019-03-12 21:35:42 -0700203 if (avail_idx == last_avail_idx)
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200204 goto out;
205
Damjan Marionba1afaa2018-11-22 22:16:19 +0100206 u16 desc_chain_head = txvq->avail->ring[last_avail_idx & mask];
207 last_avail_idx++;
208 txvq->used->ring[last_used_idx & mask].id = desc_chain_head;
209 txvq->used->ring[last_used_idx & mask].len = 0;
210 vhost_user_log_dirty_ring (vui, txvq, ring[last_used_idx & mask]);
211 last_used_idx++;
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200212 discarded_packets++;
213 }
214
215out:
Damjan Marionba1afaa2018-11-22 22:16:19 +0100216 txvq->last_avail_idx = last_avail_idx;
217 txvq->last_used_idx = last_used_idx;
Damjan Marion96e8cd02018-11-23 14:56:55 +0100218 CLIB_MEMORY_STORE_BARRIER ();
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200219 txvq->used->idx = txvq->last_used_idx;
220 vhost_user_log_dirty_ring (vui, txvq, idx);
221 return discarded_packets;
222}
223
224/*
225 * In case of overflow, we need to rewind the array of allocated buffers.
226 */
Damjan Marion46bf8662018-11-22 22:25:38 +0100227static_always_inline void
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200228vhost_user_input_rewind_buffers (vlib_main_t * vm,
229 vhost_cpu_t * cpu, vlib_buffer_t * b_head)
230{
231 u32 bi_current = cpu->rx_buffers[cpu->rx_buffers_len];
232 vlib_buffer_t *b_current = vlib_get_buffer (vm, bi_current);
233 b_current->current_length = 0;
234 b_current->flags = 0;
235 while (b_current != b_head)
236 {
237 cpu->rx_buffers_len++;
238 bi_current = cpu->rx_buffers[cpu->rx_buffers_len];
239 b_current = vlib_get_buffer (vm, bi_current);
240 b_current->current_length = 0;
241 b_current->flags = 0;
242 }
243 cpu->rx_buffers_len++;
244}
245
Steven Luong4208a4c2019-05-06 08:51:56 -0700246static_always_inline void
247vhost_user_handle_rx_offload (vlib_buffer_t * b0, u8 * b0_data,
248 virtio_net_hdr_t * hdr)
249{
250 u8 l4_hdr_sz = 0;
Steven Luongbc0d9ff2020-03-23 09:34:59 -0700251 u8 l4_proto = 0;
252 ethernet_header_t *eh = (ethernet_header_t *) b0_data;
253 u16 ethertype = clib_net_to_host_u16 (eh->type);
254 u16 l2hdr_sz = sizeof (ethernet_header_t);
Steven Luong4208a4c2019-05-06 08:51:56 -0700255
Steven Luongbc0d9ff2020-03-23 09:34:59 -0700256 if (ethernet_frame_is_tagged (ethertype))
Steven Luong4208a4c2019-05-06 08:51:56 -0700257 {
Steven Luongbc0d9ff2020-03-23 09:34:59 -0700258 ethernet_vlan_header_t *vlan = (ethernet_vlan_header_t *) (eh + 1);
Steven Luong4208a4c2019-05-06 08:51:56 -0700259
Steven Luongbc0d9ff2020-03-23 09:34:59 -0700260 ethertype = clib_net_to_host_u16 (vlan->type);
261 l2hdr_sz += sizeof (*vlan);
262 if (ethertype == ETHERNET_TYPE_VLAN)
Steven Luong4208a4c2019-05-06 08:51:56 -0700263 {
Steven Luongbc0d9ff2020-03-23 09:34:59 -0700264 vlan++;
Steven Luong4208a4c2019-05-06 08:51:56 -0700265 ethertype = clib_net_to_host_u16 (vlan->type);
266 l2hdr_sz += sizeof (*vlan);
Steven Luong4208a4c2019-05-06 08:51:56 -0700267 }
Steven Luongbc0d9ff2020-03-23 09:34:59 -0700268 }
269 vnet_buffer (b0)->l2_hdr_offset = 0;
270 vnet_buffer (b0)->l3_hdr_offset = l2hdr_sz;
271 vnet_buffer (b0)->l4_hdr_offset = hdr->csum_start;
272 b0->flags |= (VNET_BUFFER_F_L2_HDR_OFFSET_VALID |
273 VNET_BUFFER_F_L3_HDR_OFFSET_VALID |
274 VNET_BUFFER_F_L4_HDR_OFFSET_VALID);
Steven Luong4208a4c2019-05-06 08:51:56 -0700275
Steven Luongbc0d9ff2020-03-23 09:34:59 -0700276 if (PREDICT_TRUE (ethertype == ETHERNET_TYPE_IP4))
277 {
278 ip4_header_t *ip4 = (ip4_header_t *) (b0_data + l2hdr_sz);
279 l4_proto = ip4->protocol;
280 b0->flags |= VNET_BUFFER_F_IS_IP4 | VNET_BUFFER_F_OFFLOAD_IP_CKSUM;
281 }
282 else if (PREDICT_TRUE (ethertype == ETHERNET_TYPE_IP6))
283 {
284 ip6_header_t *ip6 = (ip6_header_t *) (b0_data + l2hdr_sz);
285 l4_proto = ip6->protocol;
286 b0->flags |= VNET_BUFFER_F_IS_IP6;
287 }
Steven Luong4208a4c2019-05-06 08:51:56 -0700288
Steven Luongbc0d9ff2020-03-23 09:34:59 -0700289 if (l4_proto == IP_PROTOCOL_TCP)
290 {
291 tcp_header_t *tcp = (tcp_header_t *)
292 (b0_data + vnet_buffer (b0)->l4_hdr_offset);
293 l4_hdr_sz = tcp_header_bytes (tcp);
Steven Luongbc0d9ff2020-03-23 09:34:59 -0700294 b0->flags |= VNET_BUFFER_F_OFFLOAD_TCP_CKSUM;
295 }
296 else if (l4_proto == IP_PROTOCOL_UDP)
297 {
Steven Luongac0f5362020-10-12 10:43:28 -0700298 l4_hdr_sz = sizeof (udp_header_t);
Steven Luongbc0d9ff2020-03-23 09:34:59 -0700299 b0->flags |= VNET_BUFFER_F_OFFLOAD_UDP_CKSUM;
Steven Luong4208a4c2019-05-06 08:51:56 -0700300 }
301
302 if (hdr->gso_type == VIRTIO_NET_HDR_GSO_UDP)
303 {
304 vnet_buffer2 (b0)->gso_size = hdr->gso_size;
305 vnet_buffer2 (b0)->gso_l4_hdr_sz = l4_hdr_sz;
306 b0->flags |= VNET_BUFFER_F_GSO;
307 }
308 else if (hdr->gso_type == VIRTIO_NET_HDR_GSO_TCPV4)
309 {
310 vnet_buffer2 (b0)->gso_size = hdr->gso_size;
311 vnet_buffer2 (b0)->gso_l4_hdr_sz = l4_hdr_sz;
312 b0->flags |= (VNET_BUFFER_F_GSO | VNET_BUFFER_F_IS_IP4);
313 }
314 else if (hdr->gso_type == VIRTIO_NET_HDR_GSO_TCPV6)
315 {
316 vnet_buffer2 (b0)->gso_size = hdr->gso_size;
317 vnet_buffer2 (b0)->gso_l4_hdr_sz = l4_hdr_sz;
318 b0->flags |= (VNET_BUFFER_F_GSO | VNET_BUFFER_F_IS_IP6);
319 }
320}
321
Steven Luongbc0d9ff2020-03-23 09:34:59 -0700322static_always_inline void
323vhost_user_input_do_interrupt (vlib_main_t * vm, vhost_user_vring_t * txvq,
324 vhost_user_vring_t * rxvq)
325{
326 f64 now = vlib_time_now (vm);
327
328 if ((txvq->n_since_last_int) && (txvq->int_deadline < now))
329 vhost_user_send_call (vm, txvq);
330
331 if ((rxvq->n_since_last_int) && (rxvq->int_deadline < now))
332 vhost_user_send_call (vm, rxvq);
333}
334
335static_always_inline void
336vhost_user_input_setup_frame (vlib_main_t * vm, vlib_node_runtime_t * node,
337 vhost_user_intf_t * vui,
338 u32 * current_config_index, u32 * next_index,
339 u32 ** to_next, u32 * n_left_to_next)
340{
341 vnet_feature_main_t *fm = &feature_main;
342 u8 feature_arc_idx = fm->device_input_feature_arc_index;
343
344 if (PREDICT_FALSE (vnet_have_features (feature_arc_idx, vui->sw_if_index)))
345 {
346 vnet_feature_config_main_t *cm;
347 cm = &fm->feature_config_mains[feature_arc_idx];
348 *current_config_index = vec_elt (cm->config_index_by_sw_if_index,
349 vui->sw_if_index);
350 vnet_get_config_data (&cm->config_main, current_config_index,
351 next_index, 0);
352 }
353
354 vlib_get_new_next_frame (vm, node, *next_index, *to_next, *n_left_to_next);
355
356 if (*next_index == VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT)
357 {
358 /* give some hints to ethernet-input */
359 vlib_next_frame_t *nf;
360 vlib_frame_t *f;
361 ethernet_input_frame_t *ef;
362 nf = vlib_node_runtime_get_next_frame (vm, node, *next_index);
363 f = vlib_get_frame (vm, nf->frame);
364 f->flags = ETH_INPUT_FRAME_F_SINGLE_SW_IF_IDX;
365
366 ef = vlib_frame_scalar_args (f);
367 ef->sw_if_index = vui->sw_if_index;
368 ef->hw_if_index = vui->hw_if_index;
369 vlib_frame_no_append (f);
370 }
371}
372
Damjan Marion46bf8662018-11-22 22:25:38 +0100373static_always_inline u32
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200374vhost_user_if_input (vlib_main_t * vm,
375 vhost_user_main_t * vum,
376 vhost_user_intf_t * vui,
377 u16 qid, vlib_node_runtime_t * node,
Damjan Marioneabd4242020-10-07 20:59:07 +0200378 vnet_hw_if_rx_mode mode, u8 enable_csum)
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200379{
380 vhost_user_vring_t *txvq = &vui->vrings[VHOST_VRING_IDX_TX (qid)];
Damjan Marion9af45042018-11-21 09:51:42 +0100381 vnet_feature_main_t *fm = &feature_main;
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200382 u16 n_rx_packets = 0;
383 u32 n_rx_bytes = 0;
384 u16 n_left;
385 u32 n_left_to_next, *to_next;
386 u32 next_index = VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT;
387 u32 n_trace = vlib_get_trace_count (vm, node);
Damjan Marion8934a042019-02-09 23:29:26 +0100388 u32 buffer_data_size = vlib_buffer_get_default_data_size (vm);
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200389 u32 map_hint = 0;
Damjan Marion7e0b17d2018-11-20 21:07:03 +0100390 vhost_cpu_t *cpu = &vum->cpus[vm->thread_index];
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200391 u16 copy_len = 0;
Damjan Marion9af45042018-11-21 09:51:42 +0100392 u8 feature_arc_idx = fm->device_input_feature_arc_index;
393 u32 current_config_index = ~(u32) 0;
Damjan Marionba1afaa2018-11-22 22:16:19 +0100394 u16 mask = txvq->qsz_mask;
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200395
Yichen Wang28812a02018-08-28 23:05:27 -0700396 /* The descriptor table is not ready yet */
397 if (PREDICT_FALSE (txvq->avail == 0))
Damjan Marionba1afaa2018-11-22 22:16:19 +0100398 goto done;
Yichen Wang28812a02018-08-28 23:05:27 -0700399
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200400 {
401 /* do we have pending interrupts ? */
402 vhost_user_vring_t *rxvq = &vui->vrings[VHOST_VRING_IDX_RX (qid)];
Steven Luongbc0d9ff2020-03-23 09:34:59 -0700403 vhost_user_input_do_interrupt (vm, txvq, rxvq);
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200404 }
405
406 /*
407 * For adaptive mode, it is optimized to reduce interrupts.
408 * If the scheduler switches the input node to polling due
409 * to burst of traffic, we tell the driver no interrupt.
410 * When the traffic subsides, the scheduler switches the node back to
411 * interrupt mode. We must tell the driver we want interrupt.
412 */
Damjan Marioneabd4242020-10-07 20:59:07 +0200413 if (PREDICT_FALSE (mode == VNET_HW_IF_RX_MODE_ADAPTIVE))
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200414 {
415 if ((node->flags &
416 VLIB_NODE_FLAG_SWITCH_FROM_POLLING_TO_INTERRUPT_MODE) ||
417 !(node->flags &
418 VLIB_NODE_FLAG_SWITCH_FROM_INTERRUPT_TO_POLLING_MODE))
419 /* Tell driver we want notification */
420 txvq->used->flags = 0;
421 else
422 /* Tell driver we don't want notification */
423 txvq->used->flags = VRING_USED_F_NO_NOTIFY;
424 }
425
426 if (PREDICT_FALSE (txvq->avail->flags & 0xFFFE))
Damjan Marionba1afaa2018-11-22 22:16:19 +0100427 goto done;
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200428
429 n_left = (u16) (txvq->avail->idx - txvq->last_avail_idx);
430
431 /* nothing to do */
432 if (PREDICT_FALSE (n_left == 0))
Damjan Marionba1afaa2018-11-22 22:16:19 +0100433 goto done;
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200434
435 if (PREDICT_FALSE (!vui->admin_up || !(txvq->enabled)))
436 {
437 /*
438 * Discard input packet if interface is admin down or vring is not
439 * enabled.
440 * "For example, for a networking device, in the disabled state
441 * client must not supply any new RX packets, but must process
442 * and discard any TX packets."
443 */
444 vhost_user_rx_discard_packet (vm, vui, txvq,
445 VHOST_USER_DOWN_DISCARD_COUNT);
Damjan Marionba1afaa2018-11-22 22:16:19 +0100446 goto done;
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200447 }
448
Damjan Marionba1afaa2018-11-22 22:16:19 +0100449 if (PREDICT_FALSE (n_left == (mask + 1)))
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200450 {
451 /*
452 * Informational error logging when VPP is not
453 * receiving packets fast enough.
454 */
455 vlib_error_count (vm, node->node_index,
456 VHOST_USER_INPUT_FUNC_ERROR_FULL_RX_QUEUE, 1);
457 }
458
459 if (n_left > VLIB_FRAME_SIZE)
460 n_left = VLIB_FRAME_SIZE;
461
462 /*
463 * For small packets (<2kB), we will not need more than one vlib buffer
Paul Vinciguerra97c998c2019-10-29 16:11:09 -0400464 * per packet. In case packets are bigger, we will just yield at some point
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200465 * in the loop and come back later. This is not an issue as for big packet,
466 * processing cost really comes from the memory copy.
467 * The assumption is that big packets will fit in 40 buffers.
468 */
Damjan Marion7e0b17d2018-11-20 21:07:03 +0100469 if (PREDICT_FALSE (cpu->rx_buffers_len < n_left + 1 ||
470 cpu->rx_buffers_len < 40))
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200471 {
Damjan Marion7e0b17d2018-11-20 21:07:03 +0100472 u32 curr_len = cpu->rx_buffers_len;
473 cpu->rx_buffers_len +=
Damjan Marion671e60e2018-12-30 18:09:59 +0100474 vlib_buffer_alloc (vm, cpu->rx_buffers + curr_len,
475 VHOST_USER_RX_BUFFERS_N - curr_len);
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200476
477 if (PREDICT_FALSE
Damjan Marion7e0b17d2018-11-20 21:07:03 +0100478 (cpu->rx_buffers_len < VHOST_USER_RX_BUFFER_STARVATION))
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200479 {
480 /* In case of buffer starvation, discard some packets from the queue
481 * and log the event.
482 * We keep doing best effort for the remaining packets. */
Damjan Marion7e0b17d2018-11-20 21:07:03 +0100483 u32 flush = (n_left + 1 > cpu->rx_buffers_len) ?
484 n_left + 1 - cpu->rx_buffers_len : 1;
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200485 flush = vhost_user_rx_discard_packet (vm, vui, txvq, flush);
486
487 n_left -= flush;
488 vlib_increment_simple_counter (vnet_main.
489 interface_main.sw_if_counters +
490 VNET_INTERFACE_COUNTER_DROP,
Damjan Marion7e0b17d2018-11-20 21:07:03 +0100491 vm->thread_index, vui->sw_if_index,
492 flush);
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200493
494 vlib_error_count (vm, vhost_user_input_node.index,
495 VHOST_USER_INPUT_FUNC_ERROR_NO_BUFFER, flush);
496 }
497 }
498
Steven Luongbc0d9ff2020-03-23 09:34:59 -0700499 vhost_user_input_setup_frame (vm, node, vui, &current_config_index,
500 &next_index, &to_next, &n_left_to_next);
Damjan Marion9af45042018-11-21 09:51:42 +0100501
Damjan Marionba1afaa2018-11-22 22:16:19 +0100502 u16 last_avail_idx = txvq->last_avail_idx;
503 u16 last_used_idx = txvq->last_used_idx;
504
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200505 while (n_left > 0)
506 {
Damjan Marion92825382018-11-21 10:03:44 +0100507 vlib_buffer_t *b_head, *b_current;
508 u32 bi_current;
509 u16 desc_current;
510 u32 desc_data_offset;
511 vring_desc_t *desc_table = txvq->desc;
Damjan Marion6a8bfd42018-11-21 09:54:41 +0100512
Damjan Marion92825382018-11-21 10:03:44 +0100513 if (PREDICT_FALSE (cpu->rx_buffers_len <= 1))
Damjan Marion6a8bfd42018-11-21 09:54:41 +0100514 {
Damjan Marion92825382018-11-21 10:03:44 +0100515 /* Not enough rx_buffers
516 * Note: We yeld on 1 so we don't need to do an additional
517 * check for the next buffer prefetch.
518 */
519 n_left = 0;
520 break;
Damjan Marion6a8bfd42018-11-21 09:54:41 +0100521 }
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200522
Damjan Marionba1afaa2018-11-22 22:16:19 +0100523 desc_current = txvq->avail->ring[last_avail_idx & mask];
Damjan Marion92825382018-11-21 10:03:44 +0100524 cpu->rx_buffers_len--;
525 bi_current = cpu->rx_buffers[cpu->rx_buffers_len];
526 b_head = b_current = vlib_get_buffer (vm, bi_current);
527 to_next[0] = bi_current; //We do that now so we can forget about bi_current
528 to_next++;
529 n_left_to_next--;
530
531 vlib_prefetch_buffer_with_index
532 (vm, cpu->rx_buffers[cpu->rx_buffers_len - 1], LOAD);
533
534 /* Just preset the used descriptor id and length for later */
Damjan Marionba1afaa2018-11-22 22:16:19 +0100535 txvq->used->ring[last_used_idx & mask].id = desc_current;
536 txvq->used->ring[last_used_idx & mask].len = 0;
537 vhost_user_log_dirty_ring (vui, txvq, ring[last_used_idx & mask]);
Damjan Marion92825382018-11-21 10:03:44 +0100538
539 /* The buffer should already be initialized */
540 b_head->total_length_not_including_first_buffer = 0;
541 b_head->flags |= VLIB_BUFFER_TOTAL_LENGTH_VALID;
542
Benoît Ganne9a3973e2020-10-02 19:36:57 +0200543 if (PREDICT_FALSE
544 (n_trace > 0 && vlib_trace_buffer (vm, node, next_index, b_head,
545 /* follow_chain */ 0)))
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200546 {
Damjan Marion92825382018-11-21 10:03:44 +0100547 vhost_trace_t *t0 =
548 vlib_add_trace (vm, node, b_head, sizeof (t0[0]));
Damjan Marionba1afaa2018-11-22 22:16:19 +0100549 vhost_user_rx_trace (t0, vui, qid, b_head, txvq, last_avail_idx);
Damjan Marion92825382018-11-21 10:03:44 +0100550 n_trace--;
551 vlib_set_trace_count (vm, node, n_trace);
552 }
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200553
Damjan Marion92825382018-11-21 10:03:44 +0100554 /* This depends on the setup but is very consistent
555 * So I think the CPU branch predictor will make a pretty good job
556 * at optimizing the decision. */
Mohsin Kazmia7a22812020-08-31 17:17:16 +0200557 if (txvq->desc[desc_current].flags & VRING_DESC_F_INDIRECT)
Damjan Marion92825382018-11-21 10:03:44 +0100558 {
559 desc_table = map_guest_mem (vui, txvq->desc[desc_current].addr,
560 &map_hint);
561 desc_current = 0;
562 if (PREDICT_FALSE (desc_table == 0))
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200563 {
Damjan Marion92825382018-11-21 10:03:44 +0100564 vlib_error_count (vm, node->node_index,
565 VHOST_USER_INPUT_FUNC_ERROR_MMAP_FAIL, 1);
566 goto out;
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200567 }
Damjan Marion92825382018-11-21 10:03:44 +0100568 }
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200569
Benoît Ganne5ecc1e42020-01-24 18:06:01 +0100570 desc_data_offset = vui->virtio_net_hdr_sz;
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200571
Steven Luong4208a4c2019-05-06 08:51:56 -0700572 if (enable_csum)
573 {
574 virtio_net_hdr_mrg_rxbuf_t *hdr;
575 u8 *b_data;
Steven Luongb232d192020-03-17 09:01:30 -0700576 u16 current;
Steven Luong4208a4c2019-05-06 08:51:56 -0700577
Steven Luongb232d192020-03-17 09:01:30 -0700578 hdr = map_guest_mem (vui, desc_table[desc_current].addr, &map_hint);
Steven Luong5dedae72019-07-31 16:01:14 -0700579 if (PREDICT_FALSE (hdr == 0))
580 {
581 vlib_error_count (vm, node->node_index,
582 VHOST_USER_INPUT_FUNC_ERROR_MMAP_FAIL, 1);
583 goto out;
584 }
Steven Luongb232d192020-03-17 09:01:30 -0700585 if (hdr->hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM)
Steven Luong5dedae72019-07-31 16:01:14 -0700586 {
Steven Luongb232d192020-03-17 09:01:30 -0700587 if ((desc_data_offset == desc_table[desc_current].len) &&
Mohsin Kazmia7a22812020-08-31 17:17:16 +0200588 (desc_table[desc_current].flags & VRING_DESC_F_NEXT))
Steven Luong5dedae72019-07-31 16:01:14 -0700589 {
Steven Luongb232d192020-03-17 09:01:30 -0700590 current = desc_table[desc_current].next;
591 b_data = map_guest_mem (vui, desc_table[current].addr,
592 &map_hint);
593 if (PREDICT_FALSE (b_data == 0))
594 {
595 vlib_error_count (vm, node->node_index,
596 VHOST_USER_INPUT_FUNC_ERROR_MMAP_FAIL,
597 1);
598 goto out;
599 }
Steven Luong5dedae72019-07-31 16:01:14 -0700600 }
Steven Luongb232d192020-03-17 09:01:30 -0700601 else
602 b_data = (u8 *) hdr + desc_data_offset;
603
604 vhost_user_handle_rx_offload (b_head, b_data, &hdr->hdr);
Steven Luong5dedae72019-07-31 16:01:14 -0700605 }
Steven Luong4208a4c2019-05-06 08:51:56 -0700606 }
607
Damjan Marion92825382018-11-21 10:03:44 +0100608 while (1)
609 {
610 /* Get more input if necessary. Or end of packet. */
611 if (desc_data_offset == desc_table[desc_current].len)
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200612 {
Damjan Marion92825382018-11-21 10:03:44 +0100613 if (PREDICT_FALSE (desc_table[desc_current].flags &
Mohsin Kazmia7a22812020-08-31 17:17:16 +0200614 VRING_DESC_F_NEXT))
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200615 {
Damjan Marion92825382018-11-21 10:03:44 +0100616 desc_current = desc_table[desc_current].next;
617 desc_data_offset = 0;
618 }
619 else
620 {
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200621 goto out;
622 }
623 }
624
Damjan Marion92825382018-11-21 10:03:44 +0100625 /* Get more output if necessary. Or end of packet. */
Damjan Marion8934a042019-02-09 23:29:26 +0100626 if (PREDICT_FALSE (b_current->current_length == buffer_data_size))
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200627 {
Damjan Marion92825382018-11-21 10:03:44 +0100628 if (PREDICT_FALSE (cpu->rx_buffers_len == 0))
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200629 {
Damjan Marion92825382018-11-21 10:03:44 +0100630 /* Cancel speculation */
631 to_next--;
632 n_left_to_next++;
633
634 /*
635 * Checking if there are some left buffers.
636 * If not, just rewind the used buffers and stop.
637 * Note: Scheduled copies are not cancelled. This is
638 * not an issue as they would still be valid. Useless,
639 * but valid.
640 */
641 vhost_user_input_rewind_buffers (vm, cpu, b_head);
642 n_left = 0;
643 goto stop;
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200644 }
645
Damjan Marion92825382018-11-21 10:03:44 +0100646 /* Get next output */
647 cpu->rx_buffers_len--;
648 u32 bi_next = cpu->rx_buffers[cpu->rx_buffers_len];
649 b_current->next_buffer = bi_next;
650 b_current->flags |= VLIB_BUFFER_NEXT_PRESENT;
651 bi_current = bi_next;
652 b_current = vlib_get_buffer (vm, bi_current);
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200653 }
654
Damjan Marion92825382018-11-21 10:03:44 +0100655 /* Prepare a copy order executed later for the data */
Steven Luong73310052019-10-23 13:28:37 -0700656 ASSERT (copy_len < VHOST_USER_COPY_ARRAY_N);
Damjan Marion92825382018-11-21 10:03:44 +0100657 vhost_copy_t *cpy = &cpu->copy[copy_len];
658 copy_len++;
659 u32 desc_data_l = desc_table[desc_current].len - desc_data_offset;
Damjan Marion8934a042019-02-09 23:29:26 +0100660 cpy->len = buffer_data_size - b_current->current_length;
Damjan Marion92825382018-11-21 10:03:44 +0100661 cpy->len = (cpy->len > desc_data_l) ? desc_data_l : cpy->len;
662 cpy->dst = (uword) (vlib_buffer_get_current (b_current) +
663 b_current->current_length);
664 cpy->src = desc_table[desc_current].addr + desc_data_offset;
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200665
Damjan Marion92825382018-11-21 10:03:44 +0100666 desc_data_offset += cpy->len;
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200667
Damjan Marion92825382018-11-21 10:03:44 +0100668 b_current->current_length += cpy->len;
669 b_head->total_length_not_including_first_buffer += cpy->len;
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200670 }
Damjan Marion92825382018-11-21 10:03:44 +0100671
672 out:
673
674 n_rx_bytes += b_head->total_length_not_including_first_buffer;
675 n_rx_packets++;
676
677 b_head->total_length_not_including_first_buffer -=
678 b_head->current_length;
679
680 /* consume the descriptor and return it as used */
Damjan Marionba1afaa2018-11-22 22:16:19 +0100681 last_avail_idx++;
682 last_used_idx++;
Damjan Marion92825382018-11-21 10:03:44 +0100683
684 VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b_head);
685
686 vnet_buffer (b_head)->sw_if_index[VLIB_RX] = vui->sw_if_index;
687 vnet_buffer (b_head)->sw_if_index[VLIB_TX] = (u32) ~ 0;
688 b_head->error = 0;
689
690 if (current_config_index != ~(u32) 0)
691 {
692 b_head->current_config_index = current_config_index;
693 vnet_buffer (b_head)->feature_arc_index = feature_arc_idx;
694 }
695
696 n_left--;
697
698 /*
699 * Although separating memory copies from virtio ring parsing
700 * is beneficial, we can offer to perform the copies from time
701 * to time in order to free some space in the ring.
702 */
703 if (PREDICT_FALSE (copy_len >= VHOST_USER_RX_COPY_THRESHOLD))
704 {
705 if (PREDICT_FALSE (vhost_user_input_copy (vui, cpu->copy,
706 copy_len, &map_hint)))
707 {
708 vlib_error_count (vm, node->node_index,
709 VHOST_USER_INPUT_FUNC_ERROR_MMAP_FAIL, 1);
710 }
711 copy_len = 0;
712
713 /* give buffers back to driver */
Damjan Marion96e8cd02018-11-23 14:56:55 +0100714 CLIB_MEMORY_STORE_BARRIER ();
Damjan Marionba1afaa2018-11-22 22:16:19 +0100715 txvq->used->idx = last_used_idx;
Damjan Marion92825382018-11-21 10:03:44 +0100716 vhost_user_log_dirty_ring (vui, txvq, idx);
717 }
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200718 }
Damjan Marion92825382018-11-21 10:03:44 +0100719stop:
720 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200721
Damjan Marionba1afaa2018-11-22 22:16:19 +0100722 txvq->last_used_idx = last_used_idx;
723 txvq->last_avail_idx = last_avail_idx;
724
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200725 /* Do the memory copies */
Damjan Marion7e0b17d2018-11-20 21:07:03 +0100726 if (PREDICT_FALSE (vhost_user_input_copy (vui, cpu->copy, copy_len,
727 &map_hint)))
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200728 {
729 vlib_error_count (vm, node->node_index,
730 VHOST_USER_INPUT_FUNC_ERROR_MMAP_FAIL, 1);
731 }
732
733 /* give buffers back to driver */
Damjan Marion96e8cd02018-11-23 14:56:55 +0100734 CLIB_MEMORY_STORE_BARRIER ();
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200735 txvq->used->idx = txvq->last_used_idx;
736 vhost_user_log_dirty_ring (vui, txvq, idx);
737
738 /* interrupt (call) handling */
739 if ((txvq->callfd_idx != ~0) &&
740 !(txvq->avail->flags & VRING_AVAIL_F_NO_INTERRUPT))
741 {
742 txvq->n_since_last_int += n_rx_packets;
743
744 if (txvq->n_since_last_int > vum->coalesce_frames)
745 vhost_user_send_call (vm, txvq);
746 }
747
748 /* increase rx counters */
749 vlib_increment_combined_counter
750 (vnet_main.interface_main.combined_sw_if_counters
Damjan Marion7e0b17d2018-11-20 21:07:03 +0100751 + VNET_INTERFACE_COUNTER_RX, vm->thread_index, vui->sw_if_index,
752 n_rx_packets, n_rx_bytes);
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200753
Damjan Marion7e0b17d2018-11-20 21:07:03 +0100754 vnet_device_increment_rx_packets (vm->thread_index, n_rx_packets);
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200755
Damjan Marionba1afaa2018-11-22 22:16:19 +0100756done:
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200757 return n_rx_packets;
758}
759
Steven Luongbc0d9ff2020-03-23 09:34:59 -0700760static_always_inline void
761vhost_user_mark_desc_consumed (vhost_user_intf_t * vui,
762 vhost_user_vring_t * txvq, u16 desc_head,
763 u16 n_descs_processed)
764{
765 vring_packed_desc_t *desc_table = txvq->packed_desc;
766 u16 desc_idx;
767 u16 mask = txvq->qsz_mask;
768
769 for (desc_idx = 0; desc_idx < n_descs_processed; desc_idx++)
770 {
771 if (txvq->used_wrap_counter)
772 desc_table[(desc_head + desc_idx) & mask].flags |=
Mohsin Kazmia7a22812020-08-31 17:17:16 +0200773 (VRING_DESC_F_AVAIL | VRING_DESC_F_USED);
Steven Luongbc0d9ff2020-03-23 09:34:59 -0700774 else
775 desc_table[(desc_head + desc_idx) & mask].flags &=
Mohsin Kazmia7a22812020-08-31 17:17:16 +0200776 ~(VRING_DESC_F_AVAIL | VRING_DESC_F_USED);
Steven Luongbc0d9ff2020-03-23 09:34:59 -0700777 vhost_user_advance_last_used_idx (txvq);
778 }
779}
780
781static_always_inline void
782vhost_user_rx_trace_packed (vhost_trace_t * t, vhost_user_intf_t * vui,
783 u16 qid, vhost_user_vring_t * txvq,
784 u16 desc_current)
785{
786 vhost_user_main_t *vum = &vhost_user_main;
787 vring_packed_desc_t *hdr_desc;
788 virtio_net_hdr_mrg_rxbuf_t *hdr;
789 u32 hint = 0;
790
791 clib_memset (t, 0, sizeof (*t));
792 t->device_index = vui - vum->vhost_user_interfaces;
793 t->qid = qid;
794
795 hdr_desc = &txvq->packed_desc[desc_current];
Mohsin Kazmia7a22812020-08-31 17:17:16 +0200796 if (txvq->packed_desc[desc_current].flags & VRING_DESC_F_INDIRECT)
Steven Luongbc0d9ff2020-03-23 09:34:59 -0700797 {
798 t->virtio_ring_flags |= 1 << VIRTIO_TRACE_F_INDIRECT;
799 /* Header is the first here */
800 hdr_desc = map_guest_mem (vui, txvq->packed_desc[desc_current].addr,
801 &hint);
802 }
Mohsin Kazmia7a22812020-08-31 17:17:16 +0200803 if (txvq->packed_desc[desc_current].flags & VRING_DESC_F_NEXT)
Steven Luongbc0d9ff2020-03-23 09:34:59 -0700804 t->virtio_ring_flags |= 1 << VIRTIO_TRACE_F_SIMPLE_CHAINED;
805
Mohsin Kazmia7a22812020-08-31 17:17:16 +0200806 if (!(txvq->packed_desc[desc_current].flags & VRING_DESC_F_NEXT) &&
807 !(txvq->packed_desc[desc_current].flags & VRING_DESC_F_INDIRECT))
Steven Luongbc0d9ff2020-03-23 09:34:59 -0700808 t->virtio_ring_flags |= 1 << VIRTIO_TRACE_F_SINGLE_DESC;
809
810 t->first_desc_len = hdr_desc ? hdr_desc->len : 0;
811
812 if (!hdr_desc || !(hdr = map_guest_mem (vui, hdr_desc->addr, &hint)))
813 t->virtio_ring_flags |= 1 << VIRTIO_TRACE_F_MAP_ERROR;
814 else
815 {
816 u32 len = vui->virtio_net_hdr_sz;
817 clib_memcpy_fast (&t->hdr, hdr,
818 len > hdr_desc->len ? hdr_desc->len : len);
819 }
820}
821
822static_always_inline u32
823vhost_user_rx_discard_packet_packed (vlib_main_t * vm,
824 vhost_user_intf_t * vui,
825 vhost_user_vring_t * txvq,
826 u32 discard_max)
827{
828 u32 discarded_packets = 0;
829 u16 mask = txvq->qsz_mask;
830 u16 desc_current, desc_head;
831
832 desc_head = desc_current = txvq->last_used_idx & mask;
833
834 /*
835 * On the RX side, each packet corresponds to one descriptor
836 * (it is the same whether it is a shallow descriptor, chained, or indirect).
837 * Therefore, discarding a packet is like discarding a descriptor.
838 */
839 while ((discarded_packets != discard_max) &&
840 vhost_user_packed_desc_available (txvq, desc_current))
841 {
842 vhost_user_advance_last_avail_idx (txvq);
843 discarded_packets++;
844 desc_current = (desc_current + 1) & mask;
845 }
846
847 if (PREDICT_TRUE (discarded_packets))
848 vhost_user_mark_desc_consumed (vui, txvq, desc_head, discarded_packets);
849 return (discarded_packets);
850}
851
852static_always_inline u32
853vhost_user_input_copy_packed (vhost_user_intf_t * vui, vhost_copy_t * cpy,
854 u16 copy_len, u32 * map_hint)
855{
856 void *src0, *src1, *src2, *src3, *src4, *src5, *src6, *src7;
857 u8 bad;
858 u32 rc = VHOST_USER_INPUT_FUNC_ERROR_NO_ERROR;
859
860 if (PREDICT_TRUE (copy_len >= 8))
861 {
862 src4 = map_guest_mem (vui, cpy[0].src, map_hint);
863 src5 = map_guest_mem (vui, cpy[1].src, map_hint);
864 src6 = map_guest_mem (vui, cpy[2].src, map_hint);
865 src7 = map_guest_mem (vui, cpy[3].src, map_hint);
866 bad = (src4 == 0) + (src5 == 0) + (src6 == 0) + (src7 == 0);
867 if (PREDICT_FALSE (bad))
868 goto one_by_one;
869 CLIB_PREFETCH (src4, 64, LOAD);
870 CLIB_PREFETCH (src5, 64, LOAD);
871 CLIB_PREFETCH (src6, 64, LOAD);
872 CLIB_PREFETCH (src7, 64, LOAD);
873
874 while (PREDICT_TRUE (copy_len >= 8))
875 {
876 src0 = src4;
877 src1 = src5;
878 src2 = src6;
879 src3 = src7;
880
881 src4 = map_guest_mem (vui, cpy[4].src, map_hint);
882 src5 = map_guest_mem (vui, cpy[5].src, map_hint);
883 src6 = map_guest_mem (vui, cpy[6].src, map_hint);
884 src7 = map_guest_mem (vui, cpy[7].src, map_hint);
885 bad = (src4 == 0) + (src5 == 0) + (src6 == 0) + (src7 == 0);
886 if (PREDICT_FALSE (bad))
887 break;
888
889 CLIB_PREFETCH (src4, 64, LOAD);
890 CLIB_PREFETCH (src5, 64, LOAD);
891 CLIB_PREFETCH (src6, 64, LOAD);
892 CLIB_PREFETCH (src7, 64, LOAD);
893
894 clib_memcpy_fast ((void *) cpy[0].dst, src0, cpy[0].len);
895 clib_memcpy_fast ((void *) cpy[1].dst, src1, cpy[1].len);
896 clib_memcpy_fast ((void *) cpy[2].dst, src2, cpy[2].len);
897 clib_memcpy_fast ((void *) cpy[3].dst, src3, cpy[3].len);
898 copy_len -= 4;
899 cpy += 4;
900 }
901 }
902
903one_by_one:
904 while (copy_len)
905 {
906 if (PREDICT_FALSE (!(src0 = map_guest_mem (vui, cpy->src, map_hint))))
907 {
908 rc = VHOST_USER_INPUT_FUNC_ERROR_MMAP_FAIL;
909 break;
910 }
911 clib_memcpy_fast ((void *) cpy->dst, src0, cpy->len);
912 copy_len -= 1;
913 cpy += 1;
914 }
915 return rc;
916}
917
918static_always_inline u32
919vhost_user_do_offload (vhost_user_intf_t * vui,
920 vring_packed_desc_t * desc_table, u16 desc_current,
921 u16 mask, vlib_buffer_t * b_head, u32 * map_hint)
922{
923 u32 rc = VHOST_USER_INPUT_FUNC_ERROR_NO_ERROR;
924 virtio_net_hdr_mrg_rxbuf_t *hdr;
925 u8 *b_data;
926 u32 desc_data_offset = vui->virtio_net_hdr_sz;
927
928 hdr = map_guest_mem (vui, desc_table[desc_current].addr, map_hint);
929 if (PREDICT_FALSE (hdr == 0))
930 rc = VHOST_USER_INPUT_FUNC_ERROR_MMAP_FAIL;
931 else if (hdr->hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM)
932 {
933 if (desc_data_offset == desc_table[desc_current].len)
934 {
935 desc_current = (desc_current + 1) & mask;
936 b_data =
937 map_guest_mem (vui, desc_table[desc_current].addr, map_hint);
938 if (PREDICT_FALSE (b_data == 0))
939 rc = VHOST_USER_INPUT_FUNC_ERROR_MMAP_FAIL;
940 else
941 vhost_user_handle_rx_offload (b_head, b_data, &hdr->hdr);
942 }
943 else
944 {
945 b_data = (u8 *) hdr + desc_data_offset;
946 vhost_user_handle_rx_offload (b_head, b_data, &hdr->hdr);
947 }
948 }
949
950 return rc;
951}
952
953static_always_inline u32
954vhost_user_compute_buffers_required (u32 desc_len, u32 buffer_data_size)
955{
956 div_t result;
957 u32 buffers_required;
958
959 if (PREDICT_TRUE (buffer_data_size == 2048))
960 {
961 buffers_required = desc_len >> 11;
962 if ((desc_len & 2047) != 0)
963 buffers_required++;
964 return (buffers_required);
965 }
966
967 if (desc_len < buffer_data_size)
968 return 1;
969
970 result = div (desc_len, buffer_data_size);
971 if (result.rem)
972 buffers_required = result.quot + 1;
973 else
974 buffers_required = result.quot;
975
976 return (buffers_required);
977}
978
979static_always_inline u32
980vhost_user_compute_indirect_desc_len (vhost_user_intf_t * vui,
981 vhost_user_vring_t * txvq,
982 u32 buffer_data_size, u16 desc_current,
983 u32 * map_hint)
984{
985 vring_packed_desc_t *desc_table = txvq->packed_desc;
986 u32 desc_len = 0;
987 u16 desc_data_offset = vui->virtio_net_hdr_sz;
988 u16 desc_idx = desc_current;
989 u32 n_descs;
990
991 n_descs = desc_table[desc_idx].len >> 4;
992 desc_table = map_guest_mem (vui, desc_table[desc_idx].addr, map_hint);
993 if (PREDICT_FALSE (desc_table == 0))
994 return 0;
995
996 for (desc_idx = 0; desc_idx < n_descs; desc_idx++)
997 desc_len += desc_table[desc_idx].len;
998
999 if (PREDICT_TRUE (desc_len > desc_data_offset))
1000 desc_len -= desc_data_offset;
1001
1002 return vhost_user_compute_buffers_required (desc_len, buffer_data_size);
1003}
1004
1005static_always_inline u32
1006vhost_user_compute_chained_desc_len (vhost_user_intf_t * vui,
1007 vhost_user_vring_t * txvq,
1008 u32 buffer_data_size, u16 * current,
1009 u16 * n_left)
1010{
1011 vring_packed_desc_t *desc_table = txvq->packed_desc;
1012 u32 desc_len = 0;
1013 u16 mask = txvq->qsz_mask;
1014
Mohsin Kazmia7a22812020-08-31 17:17:16 +02001015 while (desc_table[*current].flags & VRING_DESC_F_NEXT)
Steven Luongbc0d9ff2020-03-23 09:34:59 -07001016 {
1017 desc_len += desc_table[*current].len;
1018 (*n_left)++;
1019 *current = (*current + 1) & mask;
1020 vhost_user_advance_last_avail_idx (txvq);
1021 }
1022 desc_len += desc_table[*current].len;
1023 (*n_left)++;
1024 *current = (*current + 1) & mask;
1025 vhost_user_advance_last_avail_idx (txvq);
1026
1027 if (PREDICT_TRUE (desc_len > vui->virtio_net_hdr_sz))
1028 desc_len -= vui->virtio_net_hdr_sz;
1029
1030 return vhost_user_compute_buffers_required (desc_len, buffer_data_size);
1031}
1032
1033static_always_inline void
1034vhost_user_assemble_packet (vring_packed_desc_t * desc_table,
1035 u16 * desc_idx, vlib_buffer_t * b_head,
1036 vlib_buffer_t ** b_current, u32 ** next,
1037 vlib_buffer_t *** b, u32 * bi_current,
1038 vhost_cpu_t * cpu, u16 * copy_len,
1039 u32 * buffers_used, u32 buffers_required,
1040 u32 * desc_data_offset, u32 buffer_data_size,
1041 u16 mask)
1042{
1043 u32 desc_data_l;
1044
1045 while (*desc_data_offset < desc_table[*desc_idx].len)
1046 {
1047 /* Get more output if necessary. Or end of packet. */
1048 if (PREDICT_FALSE ((*b_current)->current_length == buffer_data_size))
1049 {
1050 /* Get next output */
1051 u32 bi_next = **next;
1052 (*next)++;
1053 (*b_current)->next_buffer = bi_next;
1054 (*b_current)->flags |= VLIB_BUFFER_NEXT_PRESENT;
1055 *bi_current = bi_next;
1056 *b_current = **b;
1057 (*b)++;
1058 (*buffers_used)++;
1059 ASSERT (*buffers_used <= buffers_required);
1060 }
1061
1062 /* Prepare a copy order executed later for the data */
1063 ASSERT (*copy_len < VHOST_USER_COPY_ARRAY_N);
1064 vhost_copy_t *cpy = &cpu->copy[*copy_len];
1065 (*copy_len)++;
1066 desc_data_l = desc_table[*desc_idx].len - *desc_data_offset;
1067 cpy->len = buffer_data_size - (*b_current)->current_length;
1068 cpy->len = (cpy->len > desc_data_l) ? desc_data_l : cpy->len;
1069 cpy->dst = (uword) (vlib_buffer_get_current (*b_current) +
1070 (*b_current)->current_length);
1071 cpy->src = desc_table[*desc_idx].addr + *desc_data_offset;
1072
1073 *desc_data_offset += cpy->len;
1074
1075 (*b_current)->current_length += cpy->len;
1076 b_head->total_length_not_including_first_buffer += cpy->len;
1077 }
1078 *desc_idx = (*desc_idx + 1) & mask;;
1079 *desc_data_offset = 0;
1080}
1081
1082static_always_inline u32
1083vhost_user_if_input_packed (vlib_main_t * vm, vhost_user_main_t * vum,
1084 vhost_user_intf_t * vui, u16 qid,
1085 vlib_node_runtime_t * node,
Damjan Marioneabd4242020-10-07 20:59:07 +02001086 vnet_hw_if_rx_mode mode, u8 enable_csum)
Steven Luongbc0d9ff2020-03-23 09:34:59 -07001087{
1088 vhost_user_vring_t *txvq = &vui->vrings[VHOST_VRING_IDX_TX (qid)];
1089 vnet_feature_main_t *fm = &feature_main;
1090 u8 feature_arc_idx = fm->device_input_feature_arc_index;
1091 u16 n_rx_packets = 0;
1092 u32 n_rx_bytes = 0;
1093 u16 n_left = 0;
1094 u32 buffers_required = 0;
1095 u32 n_left_to_next, *to_next;
1096 u32 next_index = VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT;
1097 u32 n_trace = vlib_get_trace_count (vm, node);
1098 u32 buffer_data_size = vlib_buffer_get_default_data_size (vm);
1099 u32 map_hint = 0;
1100 vhost_cpu_t *cpu = &vum->cpus[vm->thread_index];
1101 u16 copy_len = 0;
1102 u32 current_config_index = ~0;
1103 u16 mask = txvq->qsz_mask;
1104 u16 desc_current, desc_head, last_used_idx;
1105 vring_packed_desc_t *desc_table = 0;
1106 u32 n_descs_processed = 0;
1107 u32 rv;
1108 vlib_buffer_t **b;
1109 u32 *next;
1110 u32 buffers_used = 0;
1111 u16 current, n_descs_to_process;
1112
1113 /* The descriptor table is not ready yet */
1114 if (PREDICT_FALSE (txvq->packed_desc == 0))
1115 goto done;
1116
1117 /* do we have pending interrupts ? */
1118 vhost_user_vring_t *rxvq = &vui->vrings[VHOST_VRING_IDX_RX (qid)];
1119 vhost_user_input_do_interrupt (vm, txvq, rxvq);
1120
1121 /*
1122 * For adaptive mode, it is optimized to reduce interrupts.
1123 * If the scheduler switches the input node to polling due
1124 * to burst of traffic, we tell the driver no interrupt.
1125 * When the traffic subsides, the scheduler switches the node back to
1126 * interrupt mode. We must tell the driver we want interrupt.
1127 */
Damjan Marioneabd4242020-10-07 20:59:07 +02001128 if (PREDICT_FALSE (mode == VNET_HW_IF_RX_MODE_ADAPTIVE))
Steven Luongbc0d9ff2020-03-23 09:34:59 -07001129 {
1130 if ((node->flags &
1131 VLIB_NODE_FLAG_SWITCH_FROM_POLLING_TO_INTERRUPT_MODE) ||
1132 !(node->flags &
1133 VLIB_NODE_FLAG_SWITCH_FROM_INTERRUPT_TO_POLLING_MODE))
1134 /* Tell driver we want notification */
1135 txvq->used_event->flags = 0;
1136 else
1137 /* Tell driver we don't want notification */
1138 txvq->used_event->flags = VRING_EVENT_F_DISABLE;
1139 }
1140
1141 last_used_idx = txvq->last_used_idx & mask;
1142 desc_head = desc_current = last_used_idx;
1143
1144 if (vhost_user_packed_desc_available (txvq, desc_current) == 0)
1145 goto done;
1146
1147 if (PREDICT_FALSE (!vui->admin_up || !vui->is_ready || !(txvq->enabled)))
1148 {
1149 /*
1150 * Discard input packet if interface is admin down or vring is not
1151 * enabled.
1152 * "For example, for a networking device, in the disabled state
1153 * client must not supply any new RX packets, but must process
1154 * and discard any TX packets."
1155 */
1156 rv = vhost_user_rx_discard_packet_packed (vm, vui, txvq,
1157 VHOST_USER_DOWN_DISCARD_COUNT);
1158 vlib_error_count (vm, vhost_user_input_node.index,
1159 VHOST_USER_INPUT_FUNC_ERROR_NOT_READY, rv);
1160 goto done;
1161 }
1162
1163 vhost_user_input_setup_frame (vm, node, vui, &current_config_index,
1164 &next_index, &to_next, &n_left_to_next);
1165
1166 /*
1167 * Compute n_left and total buffers needed
1168 */
1169 desc_table = txvq->packed_desc;
1170 current = desc_current;
1171 while (vhost_user_packed_desc_available (txvq, current) &&
1172 (n_left < VLIB_FRAME_SIZE))
1173 {
Mohsin Kazmia7a22812020-08-31 17:17:16 +02001174 if (desc_table[current].flags & VRING_DESC_F_INDIRECT)
Steven Luongbc0d9ff2020-03-23 09:34:59 -07001175 {
1176 buffers_required +=
1177 vhost_user_compute_indirect_desc_len (vui, txvq, buffer_data_size,
1178 current, &map_hint);
1179 n_left++;
1180 current = (current + 1) & mask;
1181 vhost_user_advance_last_avail_idx (txvq);
1182 }
1183 else
1184 {
1185 buffers_required +=
1186 vhost_user_compute_chained_desc_len (vui, txvq, buffer_data_size,
1187 &current, &n_left);
1188 }
1189 }
1190
1191 /* Something is broken if we need more than 10000 buffers */
1192 if (PREDICT_FALSE ((buffers_required == 0) || (buffers_required > 10000)))
1193 {
1194 rv = vhost_user_rx_discard_packet_packed (vm, vui, txvq, n_left);
1195 vlib_error_count (vm, vhost_user_input_node.index,
1196 VHOST_USER_INPUT_FUNC_ERROR_NO_BUFFER, rv);
1197 goto done;
1198 }
1199
1200 vec_validate (cpu->to_next_list, buffers_required);
1201 rv = vlib_buffer_alloc (vm, cpu->to_next_list, buffers_required);
1202 if (PREDICT_FALSE (rv != buffers_required))
1203 {
1204 vlib_buffer_free (vm, cpu->to_next_list, rv);
1205 rv = vhost_user_rx_discard_packet_packed (vm, vui, txvq, n_left);
1206 vlib_error_count (vm, vhost_user_input_node.index,
1207 VHOST_USER_INPUT_FUNC_ERROR_NO_BUFFER, rv);
1208 goto done;
1209 }
1210
1211 next = cpu->to_next_list;
1212 vec_validate (cpu->rx_buffers_pdesc, buffers_required);
1213 vlib_get_buffers (vm, next, cpu->rx_buffers_pdesc, buffers_required);
1214 b = cpu->rx_buffers_pdesc;
1215 n_descs_processed = n_left;
1216
1217 while (n_left)
1218 {
1219 vlib_buffer_t *b_head, *b_current;
1220 u32 bi_current;
1221 u32 desc_data_offset;
1222 u16 desc_idx = desc_current;
1223 u32 n_descs;
1224
1225 desc_table = txvq->packed_desc;
1226 to_next[0] = bi_current = next[0];
1227 b_head = b_current = b[0];
1228 b++;
1229 buffers_used++;
1230 ASSERT (buffers_used <= buffers_required);
1231 to_next++;
1232 next++;
1233 n_left_to_next--;
1234
1235 /* The buffer should already be initialized */
1236 b_head->total_length_not_including_first_buffer = 0;
1237 b_head->flags |= VLIB_BUFFER_TOTAL_LENGTH_VALID;
1238 desc_data_offset = vui->virtio_net_hdr_sz;
1239 n_descs_to_process = 1;
1240
Mohsin Kazmia7a22812020-08-31 17:17:16 +02001241 if (desc_table[desc_idx].flags & VRING_DESC_F_INDIRECT)
Steven Luongbc0d9ff2020-03-23 09:34:59 -07001242 {
1243 n_descs = desc_table[desc_idx].len >> 4;
1244 desc_table = map_guest_mem (vui, desc_table[desc_idx].addr,
1245 &map_hint);
1246 desc_idx = 0;
1247 if (PREDICT_FALSE (desc_table == 0) ||
1248 (enable_csum &&
1249 (PREDICT_FALSE
1250 (vhost_user_do_offload
1251 (vui, desc_table, desc_idx, mask, b_head,
1252 &map_hint) != VHOST_USER_INPUT_FUNC_ERROR_NO_ERROR))))
1253 {
1254 vlib_error_count (vm, node->node_index,
1255 VHOST_USER_INPUT_FUNC_ERROR_MMAP_FAIL, 1);
1256 to_next--;
1257 next--;
1258 n_left_to_next++;
1259 buffers_used--;
1260 b--;
1261 goto out;
1262 }
1263 while (n_descs)
1264 {
1265 vhost_user_assemble_packet (desc_table, &desc_idx, b_head,
1266 &b_current, &next, &b, &bi_current,
1267 cpu, &copy_len, &buffers_used,
1268 buffers_required, &desc_data_offset,
1269 buffer_data_size, mask);
1270 n_descs--;
1271 }
1272 }
1273 else
1274 {
1275 if (enable_csum)
1276 {
1277 rv = vhost_user_do_offload (vui, desc_table, desc_idx, mask,
1278 b_head, &map_hint);
1279 if (PREDICT_FALSE (rv != VHOST_USER_INPUT_FUNC_ERROR_NO_ERROR))
1280 {
1281 vlib_error_count (vm, node->node_index, rv, 1);
1282 to_next--;
1283 next--;
1284 n_left_to_next++;
1285 buffers_used--;
1286 b--;
1287 goto out;
1288 }
1289 }
1290 /*
1291 * For chained descriptor, we process all chains in a single while
1292 * loop. So count how many descriptors in the chain.
1293 */
1294 n_descs_to_process = 1;
Mohsin Kazmia7a22812020-08-31 17:17:16 +02001295 while (desc_table[desc_idx].flags & VRING_DESC_F_NEXT)
Steven Luongbc0d9ff2020-03-23 09:34:59 -07001296 {
1297 vhost_user_assemble_packet (desc_table, &desc_idx, b_head,
1298 &b_current, &next, &b, &bi_current,
1299 cpu, &copy_len, &buffers_used,
1300 buffers_required, &desc_data_offset,
1301 buffer_data_size, mask);
1302 n_descs_to_process++;
1303 }
1304 vhost_user_assemble_packet (desc_table, &desc_idx, b_head,
1305 &b_current, &next, &b, &bi_current,
1306 cpu, &copy_len, &buffers_used,
1307 buffers_required, &desc_data_offset,
1308 buffer_data_size, mask);
1309 }
1310
1311 n_rx_bytes += b_head->total_length_not_including_first_buffer;
1312 n_rx_packets++;
1313
1314 b_head->total_length_not_including_first_buffer -=
1315 b_head->current_length;
1316
1317 VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b_head);
1318
1319 vnet_buffer (b_head)->sw_if_index[VLIB_RX] = vui->sw_if_index;
1320 vnet_buffer (b_head)->sw_if_index[VLIB_TX] = ~0;
1321 b_head->error = 0;
1322
1323 if (current_config_index != ~0)
1324 {
1325 b_head->current_config_index = current_config_index;
1326 vnet_buffer (b_head)->feature_arc_index = feature_arc_idx;
1327 }
1328
1329 out:
1330 ASSERT (n_left >= n_descs_to_process);
1331 n_left -= n_descs_to_process;
1332
1333 /* advance to next descrptor */
1334 desc_current = (desc_current + n_descs_to_process) & mask;
1335
1336 /*
1337 * Although separating memory copies from virtio ring parsing
1338 * is beneficial, we can offer to perform the copies from time
1339 * to time in order to free some space in the ring.
1340 */
1341 if (PREDICT_FALSE (copy_len >= VHOST_USER_RX_COPY_THRESHOLD))
1342 {
1343 rv = vhost_user_input_copy_packed (vui, cpu->copy, copy_len,
1344 &map_hint);
1345 if (PREDICT_FALSE (rv != VHOST_USER_INPUT_FUNC_ERROR_NO_ERROR))
1346 vlib_error_count (vm, node->node_index, rv, 1);
1347 copy_len = 0;
1348 }
1349 }
1350 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
1351
1352 /* Do the memory copies */
1353 rv = vhost_user_input_copy_packed (vui, cpu->copy, copy_len, &map_hint);
1354 if (PREDICT_FALSE (rv != VHOST_USER_INPUT_FUNC_ERROR_NO_ERROR))
1355 vlib_error_count (vm, node->node_index, rv, 1);
1356
1357 /* Must do the tracing before giving buffers back to driver */
1358 if (PREDICT_FALSE (n_trace))
1359 {
1360 u32 left = n_rx_packets;
1361
1362 b = cpu->rx_buffers_pdesc;
1363 while (n_trace && left)
1364 {
Benoît Ganne9a3973e2020-10-02 19:36:57 +02001365 if (PREDICT_TRUE
1366 (vlib_trace_buffer
1367 (vm, node, next_index, b[0], /* follow_chain */ 0)))
1368 {
1369 vhost_trace_t *t0;
1370 t0 = vlib_add_trace (vm, node, b[0], sizeof (t0[0]));
1371 vhost_user_rx_trace_packed (t0, vui, qid, txvq, last_used_idx);
1372 last_used_idx = (last_used_idx + 1) & mask;
1373 n_trace--;
1374 vlib_set_trace_count (vm, node, n_trace);
1375 }
Steven Luongbc0d9ff2020-03-23 09:34:59 -07001376 left--;
Benoît Ganne9a3973e2020-10-02 19:36:57 +02001377 b++;
Steven Luongbc0d9ff2020-03-23 09:34:59 -07001378 }
1379 }
1380
1381 /*
1382 * Give buffers back to driver.
1383 */
1384 vhost_user_mark_desc_consumed (vui, txvq, desc_head, n_descs_processed);
1385
1386 /* interrupt (call) handling */
1387 if ((txvq->callfd_idx != ~0) &&
1388 (txvq->avail_event->flags != VRING_EVENT_F_DISABLE))
1389 {
1390 txvq->n_since_last_int += n_rx_packets;
1391 if (txvq->n_since_last_int > vum->coalesce_frames)
1392 vhost_user_send_call (vm, txvq);
1393 }
1394
1395 /* increase rx counters */
1396 vlib_increment_combined_counter
1397 (vnet_main.interface_main.combined_sw_if_counters
1398 + VNET_INTERFACE_COUNTER_RX, vm->thread_index, vui->sw_if_index,
1399 n_rx_packets, n_rx_bytes);
1400
1401 vnet_device_increment_rx_packets (vm->thread_index, n_rx_packets);
1402
1403 if (PREDICT_FALSE (buffers_used < buffers_required))
1404 vlib_buffer_free (vm, next, buffers_required - buffers_used);
1405
1406done:
1407 return n_rx_packets;
1408}
1409
Mohsin Kazmie7cde312018-06-26 17:20:11 +02001410VLIB_NODE_FN (vhost_user_input_node) (vlib_main_t * vm,
1411 vlib_node_runtime_t * node,
1412 vlib_frame_t * frame)
1413{
1414 vhost_user_main_t *vum = &vhost_user_main;
1415 uword n_rx_packets = 0;
1416 vhost_user_intf_t *vui;
1417 vnet_device_input_runtime_t *rt =
1418 (vnet_device_input_runtime_t *) node->runtime_data;
1419 vnet_device_and_queue_t *dq;
1420
1421 vec_foreach (dq, rt->devices_and_queues)
1422 {
Sirshak Das5b718d52018-10-12 09:38:27 -05001423 if ((node->state == VLIB_NODE_STATE_POLLING) ||
1424 clib_atomic_swap_acq_n (&dq->interrupt_pending, 0))
Mohsin Kazmie7cde312018-06-26 17:20:11 +02001425 {
1426 vui =
1427 pool_elt_at_index (vum->vhost_user_interfaces, dq->dev_instance);
Steven Luongbc0d9ff2020-03-23 09:34:59 -07001428 if (vhost_user_is_packed_ring_supported (vui))
1429 {
Mohsin Kazmia7a22812020-08-31 17:17:16 +02001430 if (vui->features & VIRTIO_FEATURE (VIRTIO_NET_F_CSUM))
Steven Luongbc0d9ff2020-03-23 09:34:59 -07001431 n_rx_packets += vhost_user_if_input_packed (vm, vum, vui,
1432 dq->queue_id, node,
1433 dq->mode, 1);
1434 else
1435 n_rx_packets += vhost_user_if_input_packed (vm, vum, vui,
1436 dq->queue_id, node,
1437 dq->mode, 0);
1438 }
Steven Luong4208a4c2019-05-06 08:51:56 -07001439 else
Steven Luongbc0d9ff2020-03-23 09:34:59 -07001440 {
Mohsin Kazmia7a22812020-08-31 17:17:16 +02001441 if (vui->features & VIRTIO_FEATURE (VIRTIO_NET_F_CSUM))
Steven Luongbc0d9ff2020-03-23 09:34:59 -07001442 n_rx_packets += vhost_user_if_input (vm, vum, vui, dq->queue_id,
1443 node, dq->mode, 1);
1444 else
1445 n_rx_packets += vhost_user_if_input (vm, vum, vui, dq->queue_id,
1446 node, dq->mode, 0);
1447 }
Mohsin Kazmie7cde312018-06-26 17:20:11 +02001448 }
1449 }
1450
1451 return n_rx_packets;
1452}
1453
Mohsin Kazmie7cde312018-06-26 17:20:11 +02001454/* *INDENT-OFF* */
1455VLIB_REGISTER_NODE (vhost_user_input_node) = {
1456 .type = VLIB_NODE_TYPE_INPUT,
1457 .name = "vhost-user-input",
1458 .sibling_of = "device-input",
Damjan Marion7ca5aaa2019-09-24 18:10:49 +02001459 .flags = VLIB_NODE_FLAG_TRACE_SUPPORTED,
Mohsin Kazmie7cde312018-06-26 17:20:11 +02001460
1461 /* Will be enabled if/when hardware is detected. */
1462 .state = VLIB_NODE_STATE_DISABLED,
1463
1464 .format_buffer = format_ethernet_header_with_length,
1465 .format_trace = format_vhost_trace,
1466
1467 .n_errors = VHOST_USER_INPUT_FUNC_N_ERROR,
1468 .error_strings = vhost_user_input_func_error_strings,
1469};
1470/* *INDENT-ON* */
Mohsin Kazmie7cde312018-06-26 17:20:11 +02001471
1472/*
1473 * fd.io coding-style-patch-verification: ON
1474 *
1475 * Local Variables:
1476 * eval: (c-set-style "gnu")
1477 * End:
1478 */