blob: dd899094225c23716326b29f40144ca1bc97a86f [file] [log] [blame]
Mohsin Kazmie7cde312018-06-26 17:20:11 +02001/*
2 *------------------------------------------------------------------
3 * vhost-user-input
4 *
5 * Copyright (c) 2014-2018 Cisco and/or its affiliates.
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at:
9 *
10 * http://www.apache.org/licenses/LICENSE-2.0
11 *
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 *------------------------------------------------------------------
18 */
19
20#include <fcntl.h> /* for open */
21#include <sys/ioctl.h>
22#include <sys/socket.h>
23#include <sys/un.h>
24#include <sys/stat.h>
25#include <sys/types.h>
26#include <sys/uio.h> /* for iovec */
27#include <netinet/in.h>
28#include <sys/vfs.h>
29
30#include <linux/if_arp.h>
31#include <linux/if_tun.h>
32
33#include <vlib/vlib.h>
34#include <vlib/unix/unix.h>
35
36#include <vnet/ip/ip.h>
37
38#include <vnet/ethernet/ethernet.h>
39#include <vnet/devices/devices.h>
40#include <vnet/feature/feature.h>
41
Steven Luong4208a4c2019-05-06 08:51:56 -070042#include <vnet/devices/virtio/virtio.h>
Mohsin Kazmie7cde312018-06-26 17:20:11 +020043#include <vnet/devices/virtio/vhost_user.h>
44#include <vnet/devices/virtio/vhost_user_inline.h>
45
46/*
47 * When an RX queue is down but active, received packets
48 * must be discarded. This value controls up to how many
49 * packets will be discarded during each round.
50 */
51#define VHOST_USER_DOWN_DISCARD_COUNT 256
52
53/*
54 * When the number of available buffers gets under this threshold,
55 * RX node will start discarding packets.
56 */
57#define VHOST_USER_RX_BUFFER_STARVATION 32
58
59/*
60 * On the receive side, the host should free descriptors as soon
61 * as possible in order to avoid TX drop in the VM.
62 * This value controls the number of copy operations that are stacked
63 * before copy is done for all and descriptors are given back to
64 * the guest.
65 * The value 64 was obtained by testing (48 and 128 were not as good).
66 */
67#define VHOST_USER_RX_COPY_THRESHOLD 64
68
BenoƮt Ganne47727c02019-02-12 13:35:08 +010069extern vlib_node_registration_t vhost_user_input_node;
Mohsin Kazmie7cde312018-06-26 17:20:11 +020070
71#define foreach_vhost_user_input_func_error \
72 _(NO_ERROR, "no error") \
73 _(NO_BUFFER, "no available buffer") \
74 _(MMAP_FAIL, "mmap failure") \
75 _(INDIRECT_OVERFLOW, "indirect descriptor overflows table") \
76 _(UNDERSIZED_FRAME, "undersized ethernet frame received (< 14 bytes)") \
Steven Luongbc0d9ff2020-03-23 09:34:59 -070077 _(NOT_READY, "vhost interface not ready or down") \
Mohsin Kazmie7cde312018-06-26 17:20:11 +020078 _(FULL_RX_QUEUE, "full rx queue (possible driver tx drop)")
79
80typedef enum
81{
82#define _(f,s) VHOST_USER_INPUT_FUNC_ERROR_##f,
83 foreach_vhost_user_input_func_error
84#undef _
85 VHOST_USER_INPUT_FUNC_N_ERROR,
86} vhost_user_input_func_error_t;
87
88static __clib_unused char *vhost_user_input_func_error_strings[] = {
89#define _(n,s) s,
90 foreach_vhost_user_input_func_error
91#undef _
92};
93
94static_always_inline void
95vhost_user_rx_trace (vhost_trace_t * t,
96 vhost_user_intf_t * vui, u16 qid,
Damjan Marionba1afaa2018-11-22 22:16:19 +010097 vlib_buffer_t * b, vhost_user_vring_t * txvq,
98 u16 last_avail_idx)
Mohsin Kazmie7cde312018-06-26 17:20:11 +020099{
100 vhost_user_main_t *vum = &vhost_user_main;
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200101 u32 desc_current = txvq->avail->ring[last_avail_idx & txvq->qsz_mask];
102 vring_desc_t *hdr_desc = 0;
103 virtio_net_hdr_mrg_rxbuf_t *hdr;
104 u32 hint = 0;
105
Dave Barachb7b92992018-10-17 10:38:51 -0400106 clib_memset (t, 0, sizeof (*t));
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200107 t->device_index = vui - vum->vhost_user_interfaces;
108 t->qid = qid;
109
110 hdr_desc = &txvq->desc[desc_current];
111 if (txvq->desc[desc_current].flags & VIRTQ_DESC_F_INDIRECT)
112 {
113 t->virtio_ring_flags |= 1 << VIRTIO_TRACE_F_INDIRECT;
114 /* Header is the first here */
115 hdr_desc = map_guest_mem (vui, txvq->desc[desc_current].addr, &hint);
116 }
117 if (txvq->desc[desc_current].flags & VIRTQ_DESC_F_NEXT)
118 {
119 t->virtio_ring_flags |= 1 << VIRTIO_TRACE_F_SIMPLE_CHAINED;
120 }
121 if (!(txvq->desc[desc_current].flags & VIRTQ_DESC_F_NEXT) &&
122 !(txvq->desc[desc_current].flags & VIRTQ_DESC_F_INDIRECT))
123 {
124 t->virtio_ring_flags |= 1 << VIRTIO_TRACE_F_SINGLE_DESC;
125 }
126
127 t->first_desc_len = hdr_desc ? hdr_desc->len : 0;
128
129 if (!hdr_desc || !(hdr = map_guest_mem (vui, hdr_desc->addr, &hint)))
130 {
131 t->virtio_ring_flags |= 1 << VIRTIO_TRACE_F_MAP_ERROR;
132 }
133 else
134 {
135 u32 len = vui->virtio_net_hdr_sz;
136 memcpy (&t->hdr, hdr, len > hdr_desc->len ? hdr_desc->len : len);
137 }
138}
139
140static_always_inline u32
141vhost_user_input_copy (vhost_user_intf_t * vui, vhost_copy_t * cpy,
142 u16 copy_len, u32 * map_hint)
143{
144 void *src0, *src1, *src2, *src3;
145 if (PREDICT_TRUE (copy_len >= 4))
146 {
147 if (PREDICT_FALSE (!(src2 = map_guest_mem (vui, cpy[0].src, map_hint))))
148 return 1;
149 if (PREDICT_FALSE (!(src3 = map_guest_mem (vui, cpy[1].src, map_hint))))
150 return 1;
151
152 while (PREDICT_TRUE (copy_len >= 4))
153 {
154 src0 = src2;
155 src1 = src3;
156
157 if (PREDICT_FALSE
158 (!(src2 = map_guest_mem (vui, cpy[2].src, map_hint))))
159 return 1;
160 if (PREDICT_FALSE
161 (!(src3 = map_guest_mem (vui, cpy[3].src, map_hint))))
162 return 1;
163
164 CLIB_PREFETCH (src2, 64, LOAD);
165 CLIB_PREFETCH (src3, 64, LOAD);
166
Dave Barach178cf492018-11-13 16:34:13 -0500167 clib_memcpy_fast ((void *) cpy[0].dst, src0, cpy[0].len);
168 clib_memcpy_fast ((void *) cpy[1].dst, src1, cpy[1].len);
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200169 copy_len -= 2;
170 cpy += 2;
171 }
172 }
173 while (copy_len)
174 {
175 if (PREDICT_FALSE (!(src0 = map_guest_mem (vui, cpy->src, map_hint))))
176 return 1;
Dave Barach178cf492018-11-13 16:34:13 -0500177 clib_memcpy_fast ((void *) cpy->dst, src0, cpy->len);
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200178 copy_len -= 1;
179 cpy += 1;
180 }
181 return 0;
182}
183
184/**
185 * Try to discard packets from the tx ring (VPP RX path).
186 * Returns the number of discarded packets.
187 */
188static_always_inline u32
189vhost_user_rx_discard_packet (vlib_main_t * vm,
190 vhost_user_intf_t * vui,
191 vhost_user_vring_t * txvq, u32 discard_max)
192{
193 /*
194 * On the RX side, each packet corresponds to one descriptor
195 * (it is the same whether it is a shallow descriptor, chained, or indirect).
196 * Therefore, discarding a packet is like discarding a descriptor.
197 */
198 u32 discarded_packets = 0;
199 u32 avail_idx = txvq->avail->idx;
Damjan Marionba1afaa2018-11-22 22:16:19 +0100200 u16 mask = txvq->qsz_mask;
201 u16 last_avail_idx = txvq->last_avail_idx;
202 u16 last_used_idx = txvq->last_used_idx;
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200203 while (discarded_packets != discard_max)
204 {
Steven Luong7e5735d2019-03-12 21:35:42 -0700205 if (avail_idx == last_avail_idx)
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200206 goto out;
207
Damjan Marionba1afaa2018-11-22 22:16:19 +0100208 u16 desc_chain_head = txvq->avail->ring[last_avail_idx & mask];
209 last_avail_idx++;
210 txvq->used->ring[last_used_idx & mask].id = desc_chain_head;
211 txvq->used->ring[last_used_idx & mask].len = 0;
212 vhost_user_log_dirty_ring (vui, txvq, ring[last_used_idx & mask]);
213 last_used_idx++;
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200214 discarded_packets++;
215 }
216
217out:
Damjan Marionba1afaa2018-11-22 22:16:19 +0100218 txvq->last_avail_idx = last_avail_idx;
219 txvq->last_used_idx = last_used_idx;
Damjan Marion96e8cd02018-11-23 14:56:55 +0100220 CLIB_MEMORY_STORE_BARRIER ();
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200221 txvq->used->idx = txvq->last_used_idx;
222 vhost_user_log_dirty_ring (vui, txvq, idx);
223 return discarded_packets;
224}
225
226/*
227 * In case of overflow, we need to rewind the array of allocated buffers.
228 */
Damjan Marion46bf8662018-11-22 22:25:38 +0100229static_always_inline void
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200230vhost_user_input_rewind_buffers (vlib_main_t * vm,
231 vhost_cpu_t * cpu, vlib_buffer_t * b_head)
232{
233 u32 bi_current = cpu->rx_buffers[cpu->rx_buffers_len];
234 vlib_buffer_t *b_current = vlib_get_buffer (vm, bi_current);
235 b_current->current_length = 0;
236 b_current->flags = 0;
237 while (b_current != b_head)
238 {
239 cpu->rx_buffers_len++;
240 bi_current = cpu->rx_buffers[cpu->rx_buffers_len];
241 b_current = vlib_get_buffer (vm, bi_current);
242 b_current->current_length = 0;
243 b_current->flags = 0;
244 }
245 cpu->rx_buffers_len++;
246}
247
Steven Luong4208a4c2019-05-06 08:51:56 -0700248static_always_inline void
249vhost_user_handle_rx_offload (vlib_buffer_t * b0, u8 * b0_data,
250 virtio_net_hdr_t * hdr)
251{
252 u8 l4_hdr_sz = 0;
Steven Luongbc0d9ff2020-03-23 09:34:59 -0700253 u8 l4_proto = 0;
254 ethernet_header_t *eh = (ethernet_header_t *) b0_data;
255 u16 ethertype = clib_net_to_host_u16 (eh->type);
256 u16 l2hdr_sz = sizeof (ethernet_header_t);
Steven Luong4208a4c2019-05-06 08:51:56 -0700257
Steven Luongbc0d9ff2020-03-23 09:34:59 -0700258 if (ethernet_frame_is_tagged (ethertype))
Steven Luong4208a4c2019-05-06 08:51:56 -0700259 {
Steven Luongbc0d9ff2020-03-23 09:34:59 -0700260 ethernet_vlan_header_t *vlan = (ethernet_vlan_header_t *) (eh + 1);
Steven Luong4208a4c2019-05-06 08:51:56 -0700261
Steven Luongbc0d9ff2020-03-23 09:34:59 -0700262 ethertype = clib_net_to_host_u16 (vlan->type);
263 l2hdr_sz += sizeof (*vlan);
264 if (ethertype == ETHERNET_TYPE_VLAN)
Steven Luong4208a4c2019-05-06 08:51:56 -0700265 {
Steven Luongbc0d9ff2020-03-23 09:34:59 -0700266 vlan++;
Steven Luong4208a4c2019-05-06 08:51:56 -0700267 ethertype = clib_net_to_host_u16 (vlan->type);
268 l2hdr_sz += sizeof (*vlan);
Steven Luong4208a4c2019-05-06 08:51:56 -0700269 }
Steven Luongbc0d9ff2020-03-23 09:34:59 -0700270 }
271 vnet_buffer (b0)->l2_hdr_offset = 0;
272 vnet_buffer (b0)->l3_hdr_offset = l2hdr_sz;
273 vnet_buffer (b0)->l4_hdr_offset = hdr->csum_start;
274 b0->flags |= (VNET_BUFFER_F_L2_HDR_OFFSET_VALID |
275 VNET_BUFFER_F_L3_HDR_OFFSET_VALID |
276 VNET_BUFFER_F_L4_HDR_OFFSET_VALID);
Steven Luong4208a4c2019-05-06 08:51:56 -0700277
Steven Luongbc0d9ff2020-03-23 09:34:59 -0700278 if (PREDICT_TRUE (ethertype == ETHERNET_TYPE_IP4))
279 {
280 ip4_header_t *ip4 = (ip4_header_t *) (b0_data + l2hdr_sz);
281 l4_proto = ip4->protocol;
282 b0->flags |= VNET_BUFFER_F_IS_IP4 | VNET_BUFFER_F_OFFLOAD_IP_CKSUM;
283 }
284 else if (PREDICT_TRUE (ethertype == ETHERNET_TYPE_IP6))
285 {
286 ip6_header_t *ip6 = (ip6_header_t *) (b0_data + l2hdr_sz);
287 l4_proto = ip6->protocol;
288 b0->flags |= VNET_BUFFER_F_IS_IP6;
289 }
Steven Luong4208a4c2019-05-06 08:51:56 -0700290
Steven Luongbc0d9ff2020-03-23 09:34:59 -0700291 if (l4_proto == IP_PROTOCOL_TCP)
292 {
293 tcp_header_t *tcp = (tcp_header_t *)
294 (b0_data + vnet_buffer (b0)->l4_hdr_offset);
295 l4_hdr_sz = tcp_header_bytes (tcp);
296 tcp->checksum = 0;
297 b0->flags |= VNET_BUFFER_F_OFFLOAD_TCP_CKSUM;
298 }
299 else if (l4_proto == IP_PROTOCOL_UDP)
300 {
301 udp_header_t *udp =
302 (udp_header_t *) (b0_data + vnet_buffer (b0)->l4_hdr_offset);
303 l4_hdr_sz = sizeof (*udp);
304 udp->checksum = 0;
305 b0->flags |= VNET_BUFFER_F_OFFLOAD_UDP_CKSUM;
Steven Luong4208a4c2019-05-06 08:51:56 -0700306 }
307
308 if (hdr->gso_type == VIRTIO_NET_HDR_GSO_UDP)
309 {
310 vnet_buffer2 (b0)->gso_size = hdr->gso_size;
311 vnet_buffer2 (b0)->gso_l4_hdr_sz = l4_hdr_sz;
312 b0->flags |= VNET_BUFFER_F_GSO;
313 }
314 else if (hdr->gso_type == VIRTIO_NET_HDR_GSO_TCPV4)
315 {
316 vnet_buffer2 (b0)->gso_size = hdr->gso_size;
317 vnet_buffer2 (b0)->gso_l4_hdr_sz = l4_hdr_sz;
318 b0->flags |= (VNET_BUFFER_F_GSO | VNET_BUFFER_F_IS_IP4);
319 }
320 else if (hdr->gso_type == VIRTIO_NET_HDR_GSO_TCPV6)
321 {
322 vnet_buffer2 (b0)->gso_size = hdr->gso_size;
323 vnet_buffer2 (b0)->gso_l4_hdr_sz = l4_hdr_sz;
324 b0->flags |= (VNET_BUFFER_F_GSO | VNET_BUFFER_F_IS_IP6);
325 }
326}
327
Steven Luongbc0d9ff2020-03-23 09:34:59 -0700328static_always_inline void
329vhost_user_input_do_interrupt (vlib_main_t * vm, vhost_user_vring_t * txvq,
330 vhost_user_vring_t * rxvq)
331{
332 f64 now = vlib_time_now (vm);
333
334 if ((txvq->n_since_last_int) && (txvq->int_deadline < now))
335 vhost_user_send_call (vm, txvq);
336
337 if ((rxvq->n_since_last_int) && (rxvq->int_deadline < now))
338 vhost_user_send_call (vm, rxvq);
339}
340
341static_always_inline void
342vhost_user_input_setup_frame (vlib_main_t * vm, vlib_node_runtime_t * node,
343 vhost_user_intf_t * vui,
344 u32 * current_config_index, u32 * next_index,
345 u32 ** to_next, u32 * n_left_to_next)
346{
347 vnet_feature_main_t *fm = &feature_main;
348 u8 feature_arc_idx = fm->device_input_feature_arc_index;
349
350 if (PREDICT_FALSE (vnet_have_features (feature_arc_idx, vui->sw_if_index)))
351 {
352 vnet_feature_config_main_t *cm;
353 cm = &fm->feature_config_mains[feature_arc_idx];
354 *current_config_index = vec_elt (cm->config_index_by_sw_if_index,
355 vui->sw_if_index);
356 vnet_get_config_data (&cm->config_main, current_config_index,
357 next_index, 0);
358 }
359
360 vlib_get_new_next_frame (vm, node, *next_index, *to_next, *n_left_to_next);
361
362 if (*next_index == VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT)
363 {
364 /* give some hints to ethernet-input */
365 vlib_next_frame_t *nf;
366 vlib_frame_t *f;
367 ethernet_input_frame_t *ef;
368 nf = vlib_node_runtime_get_next_frame (vm, node, *next_index);
369 f = vlib_get_frame (vm, nf->frame);
370 f->flags = ETH_INPUT_FRAME_F_SINGLE_SW_IF_IDX;
371
372 ef = vlib_frame_scalar_args (f);
373 ef->sw_if_index = vui->sw_if_index;
374 ef->hw_if_index = vui->hw_if_index;
375 vlib_frame_no_append (f);
376 }
377}
378
Damjan Marion46bf8662018-11-22 22:25:38 +0100379static_always_inline u32
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200380vhost_user_if_input (vlib_main_t * vm,
381 vhost_user_main_t * vum,
382 vhost_user_intf_t * vui,
383 u16 qid, vlib_node_runtime_t * node,
Steven Luong4208a4c2019-05-06 08:51:56 -0700384 vnet_hw_interface_rx_mode mode, u8 enable_csum)
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200385{
386 vhost_user_vring_t *txvq = &vui->vrings[VHOST_VRING_IDX_TX (qid)];
Damjan Marion9af45042018-11-21 09:51:42 +0100387 vnet_feature_main_t *fm = &feature_main;
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200388 u16 n_rx_packets = 0;
389 u32 n_rx_bytes = 0;
390 u16 n_left;
391 u32 n_left_to_next, *to_next;
392 u32 next_index = VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT;
393 u32 n_trace = vlib_get_trace_count (vm, node);
Damjan Marion8934a042019-02-09 23:29:26 +0100394 u32 buffer_data_size = vlib_buffer_get_default_data_size (vm);
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200395 u32 map_hint = 0;
Damjan Marion7e0b17d2018-11-20 21:07:03 +0100396 vhost_cpu_t *cpu = &vum->cpus[vm->thread_index];
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200397 u16 copy_len = 0;
Damjan Marion9af45042018-11-21 09:51:42 +0100398 u8 feature_arc_idx = fm->device_input_feature_arc_index;
399 u32 current_config_index = ~(u32) 0;
Damjan Marionba1afaa2018-11-22 22:16:19 +0100400 u16 mask = txvq->qsz_mask;
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200401
Yichen Wang28812a02018-08-28 23:05:27 -0700402 /* The descriptor table is not ready yet */
403 if (PREDICT_FALSE (txvq->avail == 0))
Damjan Marionba1afaa2018-11-22 22:16:19 +0100404 goto done;
Yichen Wang28812a02018-08-28 23:05:27 -0700405
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200406 {
407 /* do we have pending interrupts ? */
408 vhost_user_vring_t *rxvq = &vui->vrings[VHOST_VRING_IDX_RX (qid)];
Steven Luongbc0d9ff2020-03-23 09:34:59 -0700409 vhost_user_input_do_interrupt (vm, txvq, rxvq);
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200410 }
411
412 /*
413 * For adaptive mode, it is optimized to reduce interrupts.
414 * If the scheduler switches the input node to polling due
415 * to burst of traffic, we tell the driver no interrupt.
416 * When the traffic subsides, the scheduler switches the node back to
417 * interrupt mode. We must tell the driver we want interrupt.
418 */
419 if (PREDICT_FALSE (mode == VNET_HW_INTERFACE_RX_MODE_ADAPTIVE))
420 {
421 if ((node->flags &
422 VLIB_NODE_FLAG_SWITCH_FROM_POLLING_TO_INTERRUPT_MODE) ||
423 !(node->flags &
424 VLIB_NODE_FLAG_SWITCH_FROM_INTERRUPT_TO_POLLING_MODE))
425 /* Tell driver we want notification */
426 txvq->used->flags = 0;
427 else
428 /* Tell driver we don't want notification */
429 txvq->used->flags = VRING_USED_F_NO_NOTIFY;
430 }
431
432 if (PREDICT_FALSE (txvq->avail->flags & 0xFFFE))
Damjan Marionba1afaa2018-11-22 22:16:19 +0100433 goto done;
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200434
435 n_left = (u16) (txvq->avail->idx - txvq->last_avail_idx);
436
437 /* nothing to do */
438 if (PREDICT_FALSE (n_left == 0))
Damjan Marionba1afaa2018-11-22 22:16:19 +0100439 goto done;
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200440
441 if (PREDICT_FALSE (!vui->admin_up || !(txvq->enabled)))
442 {
443 /*
444 * Discard input packet if interface is admin down or vring is not
445 * enabled.
446 * "For example, for a networking device, in the disabled state
447 * client must not supply any new RX packets, but must process
448 * and discard any TX packets."
449 */
450 vhost_user_rx_discard_packet (vm, vui, txvq,
451 VHOST_USER_DOWN_DISCARD_COUNT);
Damjan Marionba1afaa2018-11-22 22:16:19 +0100452 goto done;
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200453 }
454
Damjan Marionba1afaa2018-11-22 22:16:19 +0100455 if (PREDICT_FALSE (n_left == (mask + 1)))
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200456 {
457 /*
458 * Informational error logging when VPP is not
459 * receiving packets fast enough.
460 */
461 vlib_error_count (vm, node->node_index,
462 VHOST_USER_INPUT_FUNC_ERROR_FULL_RX_QUEUE, 1);
463 }
464
465 if (n_left > VLIB_FRAME_SIZE)
466 n_left = VLIB_FRAME_SIZE;
467
468 /*
469 * For small packets (<2kB), we will not need more than one vlib buffer
Paul Vinciguerra97c998c2019-10-29 16:11:09 -0400470 * per packet. In case packets are bigger, we will just yield at some point
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200471 * in the loop and come back later. This is not an issue as for big packet,
472 * processing cost really comes from the memory copy.
473 * The assumption is that big packets will fit in 40 buffers.
474 */
Damjan Marion7e0b17d2018-11-20 21:07:03 +0100475 if (PREDICT_FALSE (cpu->rx_buffers_len < n_left + 1 ||
476 cpu->rx_buffers_len < 40))
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200477 {
Damjan Marion7e0b17d2018-11-20 21:07:03 +0100478 u32 curr_len = cpu->rx_buffers_len;
479 cpu->rx_buffers_len +=
Damjan Marion671e60e2018-12-30 18:09:59 +0100480 vlib_buffer_alloc (vm, cpu->rx_buffers + curr_len,
481 VHOST_USER_RX_BUFFERS_N - curr_len);
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200482
483 if (PREDICT_FALSE
Damjan Marion7e0b17d2018-11-20 21:07:03 +0100484 (cpu->rx_buffers_len < VHOST_USER_RX_BUFFER_STARVATION))
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200485 {
486 /* In case of buffer starvation, discard some packets from the queue
487 * and log the event.
488 * We keep doing best effort for the remaining packets. */
Damjan Marion7e0b17d2018-11-20 21:07:03 +0100489 u32 flush = (n_left + 1 > cpu->rx_buffers_len) ?
490 n_left + 1 - cpu->rx_buffers_len : 1;
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200491 flush = vhost_user_rx_discard_packet (vm, vui, txvq, flush);
492
493 n_left -= flush;
494 vlib_increment_simple_counter (vnet_main.
495 interface_main.sw_if_counters +
496 VNET_INTERFACE_COUNTER_DROP,
Damjan Marion7e0b17d2018-11-20 21:07:03 +0100497 vm->thread_index, vui->sw_if_index,
498 flush);
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200499
500 vlib_error_count (vm, vhost_user_input_node.index,
501 VHOST_USER_INPUT_FUNC_ERROR_NO_BUFFER, flush);
502 }
503 }
504
Steven Luongbc0d9ff2020-03-23 09:34:59 -0700505 vhost_user_input_setup_frame (vm, node, vui, &current_config_index,
506 &next_index, &to_next, &n_left_to_next);
Damjan Marion9af45042018-11-21 09:51:42 +0100507
Damjan Marionba1afaa2018-11-22 22:16:19 +0100508 u16 last_avail_idx = txvq->last_avail_idx;
509 u16 last_used_idx = txvq->last_used_idx;
510
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200511 while (n_left > 0)
512 {
Damjan Marion92825382018-11-21 10:03:44 +0100513 vlib_buffer_t *b_head, *b_current;
514 u32 bi_current;
515 u16 desc_current;
516 u32 desc_data_offset;
517 vring_desc_t *desc_table = txvq->desc;
Damjan Marion6a8bfd42018-11-21 09:54:41 +0100518
Damjan Marion92825382018-11-21 10:03:44 +0100519 if (PREDICT_FALSE (cpu->rx_buffers_len <= 1))
Damjan Marion6a8bfd42018-11-21 09:54:41 +0100520 {
Damjan Marion92825382018-11-21 10:03:44 +0100521 /* Not enough rx_buffers
522 * Note: We yeld on 1 so we don't need to do an additional
523 * check for the next buffer prefetch.
524 */
525 n_left = 0;
526 break;
Damjan Marion6a8bfd42018-11-21 09:54:41 +0100527 }
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200528
Damjan Marionba1afaa2018-11-22 22:16:19 +0100529 desc_current = txvq->avail->ring[last_avail_idx & mask];
Damjan Marion92825382018-11-21 10:03:44 +0100530 cpu->rx_buffers_len--;
531 bi_current = cpu->rx_buffers[cpu->rx_buffers_len];
532 b_head = b_current = vlib_get_buffer (vm, bi_current);
533 to_next[0] = bi_current; //We do that now so we can forget about bi_current
534 to_next++;
535 n_left_to_next--;
536
537 vlib_prefetch_buffer_with_index
538 (vm, cpu->rx_buffers[cpu->rx_buffers_len - 1], LOAD);
539
540 /* Just preset the used descriptor id and length for later */
Damjan Marionba1afaa2018-11-22 22:16:19 +0100541 txvq->used->ring[last_used_idx & mask].id = desc_current;
542 txvq->used->ring[last_used_idx & mask].len = 0;
543 vhost_user_log_dirty_ring (vui, txvq, ring[last_used_idx & mask]);
Damjan Marion92825382018-11-21 10:03:44 +0100544
545 /* The buffer should already be initialized */
546 b_head->total_length_not_including_first_buffer = 0;
547 b_head->flags |= VLIB_BUFFER_TOTAL_LENGTH_VALID;
548
549 if (PREDICT_FALSE (n_trace))
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200550 {
Damjan Marion92825382018-11-21 10:03:44 +0100551 vlib_trace_buffer (vm, node, next_index, b_head,
552 /* follow_chain */ 0);
553 vhost_trace_t *t0 =
554 vlib_add_trace (vm, node, b_head, sizeof (t0[0]));
Damjan Marionba1afaa2018-11-22 22:16:19 +0100555 vhost_user_rx_trace (t0, vui, qid, b_head, txvq, last_avail_idx);
Damjan Marion92825382018-11-21 10:03:44 +0100556 n_trace--;
557 vlib_set_trace_count (vm, node, n_trace);
558 }
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200559
Damjan Marion92825382018-11-21 10:03:44 +0100560 /* This depends on the setup but is very consistent
561 * So I think the CPU branch predictor will make a pretty good job
562 * at optimizing the decision. */
563 if (txvq->desc[desc_current].flags & VIRTQ_DESC_F_INDIRECT)
564 {
565 desc_table = map_guest_mem (vui, txvq->desc[desc_current].addr,
566 &map_hint);
567 desc_current = 0;
568 if (PREDICT_FALSE (desc_table == 0))
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200569 {
Damjan Marion92825382018-11-21 10:03:44 +0100570 vlib_error_count (vm, node->node_index,
571 VHOST_USER_INPUT_FUNC_ERROR_MMAP_FAIL, 1);
572 goto out;
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200573 }
Damjan Marion92825382018-11-21 10:03:44 +0100574 }
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200575
BenoƮt Ganne5ecc1e42020-01-24 18:06:01 +0100576 desc_data_offset = vui->virtio_net_hdr_sz;
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200577
Steven Luong4208a4c2019-05-06 08:51:56 -0700578 if (enable_csum)
579 {
580 virtio_net_hdr_mrg_rxbuf_t *hdr;
581 u8 *b_data;
Steven Luongb232d192020-03-17 09:01:30 -0700582 u16 current;
Steven Luong4208a4c2019-05-06 08:51:56 -0700583
Steven Luongb232d192020-03-17 09:01:30 -0700584 hdr = map_guest_mem (vui, desc_table[desc_current].addr, &map_hint);
Steven Luong5dedae72019-07-31 16:01:14 -0700585 if (PREDICT_FALSE (hdr == 0))
586 {
587 vlib_error_count (vm, node->node_index,
588 VHOST_USER_INPUT_FUNC_ERROR_MMAP_FAIL, 1);
589 goto out;
590 }
Steven Luongb232d192020-03-17 09:01:30 -0700591 if (hdr->hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM)
Steven Luong5dedae72019-07-31 16:01:14 -0700592 {
Steven Luongb232d192020-03-17 09:01:30 -0700593 if ((desc_data_offset == desc_table[desc_current].len) &&
594 (desc_table[desc_current].flags & VIRTQ_DESC_F_NEXT))
Steven Luong5dedae72019-07-31 16:01:14 -0700595 {
Steven Luongb232d192020-03-17 09:01:30 -0700596 current = desc_table[desc_current].next;
597 b_data = map_guest_mem (vui, desc_table[current].addr,
598 &map_hint);
599 if (PREDICT_FALSE (b_data == 0))
600 {
601 vlib_error_count (vm, node->node_index,
602 VHOST_USER_INPUT_FUNC_ERROR_MMAP_FAIL,
603 1);
604 goto out;
605 }
Steven Luong5dedae72019-07-31 16:01:14 -0700606 }
Steven Luongb232d192020-03-17 09:01:30 -0700607 else
608 b_data = (u8 *) hdr + desc_data_offset;
609
610 vhost_user_handle_rx_offload (b_head, b_data, &hdr->hdr);
Steven Luong5dedae72019-07-31 16:01:14 -0700611 }
Steven Luong4208a4c2019-05-06 08:51:56 -0700612 }
613
Damjan Marion92825382018-11-21 10:03:44 +0100614 while (1)
615 {
616 /* Get more input if necessary. Or end of packet. */
617 if (desc_data_offset == desc_table[desc_current].len)
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200618 {
Damjan Marion92825382018-11-21 10:03:44 +0100619 if (PREDICT_FALSE (desc_table[desc_current].flags &
620 VIRTQ_DESC_F_NEXT))
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200621 {
Damjan Marion92825382018-11-21 10:03:44 +0100622 desc_current = desc_table[desc_current].next;
623 desc_data_offset = 0;
624 }
625 else
626 {
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200627 goto out;
628 }
629 }
630
Damjan Marion92825382018-11-21 10:03:44 +0100631 /* Get more output if necessary. Or end of packet. */
Damjan Marion8934a042019-02-09 23:29:26 +0100632 if (PREDICT_FALSE (b_current->current_length == buffer_data_size))
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200633 {
Damjan Marion92825382018-11-21 10:03:44 +0100634 if (PREDICT_FALSE (cpu->rx_buffers_len == 0))
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200635 {
Damjan Marion92825382018-11-21 10:03:44 +0100636 /* Cancel speculation */
637 to_next--;
638 n_left_to_next++;
639
640 /*
641 * Checking if there are some left buffers.
642 * If not, just rewind the used buffers and stop.
643 * Note: Scheduled copies are not cancelled. This is
644 * not an issue as they would still be valid. Useless,
645 * but valid.
646 */
647 vhost_user_input_rewind_buffers (vm, cpu, b_head);
648 n_left = 0;
649 goto stop;
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200650 }
651
Damjan Marion92825382018-11-21 10:03:44 +0100652 /* Get next output */
653 cpu->rx_buffers_len--;
654 u32 bi_next = cpu->rx_buffers[cpu->rx_buffers_len];
655 b_current->next_buffer = bi_next;
656 b_current->flags |= VLIB_BUFFER_NEXT_PRESENT;
657 bi_current = bi_next;
658 b_current = vlib_get_buffer (vm, bi_current);
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200659 }
660
Damjan Marion92825382018-11-21 10:03:44 +0100661 /* Prepare a copy order executed later for the data */
Steven Luong73310052019-10-23 13:28:37 -0700662 ASSERT (copy_len < VHOST_USER_COPY_ARRAY_N);
Damjan Marion92825382018-11-21 10:03:44 +0100663 vhost_copy_t *cpy = &cpu->copy[copy_len];
664 copy_len++;
665 u32 desc_data_l = desc_table[desc_current].len - desc_data_offset;
Damjan Marion8934a042019-02-09 23:29:26 +0100666 cpy->len = buffer_data_size - b_current->current_length;
Damjan Marion92825382018-11-21 10:03:44 +0100667 cpy->len = (cpy->len > desc_data_l) ? desc_data_l : cpy->len;
668 cpy->dst = (uword) (vlib_buffer_get_current (b_current) +
669 b_current->current_length);
670 cpy->src = desc_table[desc_current].addr + desc_data_offset;
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200671
Damjan Marion92825382018-11-21 10:03:44 +0100672 desc_data_offset += cpy->len;
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200673
Damjan Marion92825382018-11-21 10:03:44 +0100674 b_current->current_length += cpy->len;
675 b_head->total_length_not_including_first_buffer += cpy->len;
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200676 }
Damjan Marion92825382018-11-21 10:03:44 +0100677
678 out:
679
680 n_rx_bytes += b_head->total_length_not_including_first_buffer;
681 n_rx_packets++;
682
683 b_head->total_length_not_including_first_buffer -=
684 b_head->current_length;
685
686 /* consume the descriptor and return it as used */
Damjan Marionba1afaa2018-11-22 22:16:19 +0100687 last_avail_idx++;
688 last_used_idx++;
Damjan Marion92825382018-11-21 10:03:44 +0100689
690 VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b_head);
691
692 vnet_buffer (b_head)->sw_if_index[VLIB_RX] = vui->sw_if_index;
693 vnet_buffer (b_head)->sw_if_index[VLIB_TX] = (u32) ~ 0;
694 b_head->error = 0;
695
696 if (current_config_index != ~(u32) 0)
697 {
698 b_head->current_config_index = current_config_index;
699 vnet_buffer (b_head)->feature_arc_index = feature_arc_idx;
700 }
701
702 n_left--;
703
704 /*
705 * Although separating memory copies from virtio ring parsing
706 * is beneficial, we can offer to perform the copies from time
707 * to time in order to free some space in the ring.
708 */
709 if (PREDICT_FALSE (copy_len >= VHOST_USER_RX_COPY_THRESHOLD))
710 {
711 if (PREDICT_FALSE (vhost_user_input_copy (vui, cpu->copy,
712 copy_len, &map_hint)))
713 {
714 vlib_error_count (vm, node->node_index,
715 VHOST_USER_INPUT_FUNC_ERROR_MMAP_FAIL, 1);
716 }
717 copy_len = 0;
718
719 /* give buffers back to driver */
Damjan Marion96e8cd02018-11-23 14:56:55 +0100720 CLIB_MEMORY_STORE_BARRIER ();
Damjan Marionba1afaa2018-11-22 22:16:19 +0100721 txvq->used->idx = last_used_idx;
Damjan Marion92825382018-11-21 10:03:44 +0100722 vhost_user_log_dirty_ring (vui, txvq, idx);
723 }
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200724 }
Damjan Marion92825382018-11-21 10:03:44 +0100725stop:
726 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200727
Damjan Marionba1afaa2018-11-22 22:16:19 +0100728 txvq->last_used_idx = last_used_idx;
729 txvq->last_avail_idx = last_avail_idx;
730
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200731 /* Do the memory copies */
Damjan Marion7e0b17d2018-11-20 21:07:03 +0100732 if (PREDICT_FALSE (vhost_user_input_copy (vui, cpu->copy, copy_len,
733 &map_hint)))
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200734 {
735 vlib_error_count (vm, node->node_index,
736 VHOST_USER_INPUT_FUNC_ERROR_MMAP_FAIL, 1);
737 }
738
739 /* give buffers back to driver */
Damjan Marion96e8cd02018-11-23 14:56:55 +0100740 CLIB_MEMORY_STORE_BARRIER ();
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200741 txvq->used->idx = txvq->last_used_idx;
742 vhost_user_log_dirty_ring (vui, txvq, idx);
743
744 /* interrupt (call) handling */
745 if ((txvq->callfd_idx != ~0) &&
746 !(txvq->avail->flags & VRING_AVAIL_F_NO_INTERRUPT))
747 {
748 txvq->n_since_last_int += n_rx_packets;
749
750 if (txvq->n_since_last_int > vum->coalesce_frames)
751 vhost_user_send_call (vm, txvq);
752 }
753
754 /* increase rx counters */
755 vlib_increment_combined_counter
756 (vnet_main.interface_main.combined_sw_if_counters
Damjan Marion7e0b17d2018-11-20 21:07:03 +0100757 + VNET_INTERFACE_COUNTER_RX, vm->thread_index, vui->sw_if_index,
758 n_rx_packets, n_rx_bytes);
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200759
Damjan Marion7e0b17d2018-11-20 21:07:03 +0100760 vnet_device_increment_rx_packets (vm->thread_index, n_rx_packets);
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200761
Damjan Marionba1afaa2018-11-22 22:16:19 +0100762done:
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200763 return n_rx_packets;
764}
765
Steven Luongbc0d9ff2020-03-23 09:34:59 -0700766static_always_inline void
767vhost_user_mark_desc_consumed (vhost_user_intf_t * vui,
768 vhost_user_vring_t * txvq, u16 desc_head,
769 u16 n_descs_processed)
770{
771 vring_packed_desc_t *desc_table = txvq->packed_desc;
772 u16 desc_idx;
773 u16 mask = txvq->qsz_mask;
774
775 for (desc_idx = 0; desc_idx < n_descs_processed; desc_idx++)
776 {
777 if (txvq->used_wrap_counter)
778 desc_table[(desc_head + desc_idx) & mask].flags |=
779 (VIRTQ_DESC_F_AVAIL | VIRTQ_DESC_F_USED);
780 else
781 desc_table[(desc_head + desc_idx) & mask].flags &=
782 ~(VIRTQ_DESC_F_AVAIL | VIRTQ_DESC_F_USED);
783 vhost_user_advance_last_used_idx (txvq);
784 }
785}
786
787static_always_inline void
788vhost_user_rx_trace_packed (vhost_trace_t * t, vhost_user_intf_t * vui,
789 u16 qid, vhost_user_vring_t * txvq,
790 u16 desc_current)
791{
792 vhost_user_main_t *vum = &vhost_user_main;
793 vring_packed_desc_t *hdr_desc;
794 virtio_net_hdr_mrg_rxbuf_t *hdr;
795 u32 hint = 0;
796
797 clib_memset (t, 0, sizeof (*t));
798 t->device_index = vui - vum->vhost_user_interfaces;
799 t->qid = qid;
800
801 hdr_desc = &txvq->packed_desc[desc_current];
802 if (txvq->packed_desc[desc_current].flags & VIRTQ_DESC_F_INDIRECT)
803 {
804 t->virtio_ring_flags |= 1 << VIRTIO_TRACE_F_INDIRECT;
805 /* Header is the first here */
806 hdr_desc = map_guest_mem (vui, txvq->packed_desc[desc_current].addr,
807 &hint);
808 }
809 if (txvq->packed_desc[desc_current].flags & VIRTQ_DESC_F_NEXT)
810 t->virtio_ring_flags |= 1 << VIRTIO_TRACE_F_SIMPLE_CHAINED;
811
812 if (!(txvq->packed_desc[desc_current].flags & VIRTQ_DESC_F_NEXT) &&
813 !(txvq->packed_desc[desc_current].flags & VIRTQ_DESC_F_INDIRECT))
814 t->virtio_ring_flags |= 1 << VIRTIO_TRACE_F_SINGLE_DESC;
815
816 t->first_desc_len = hdr_desc ? hdr_desc->len : 0;
817
818 if (!hdr_desc || !(hdr = map_guest_mem (vui, hdr_desc->addr, &hint)))
819 t->virtio_ring_flags |= 1 << VIRTIO_TRACE_F_MAP_ERROR;
820 else
821 {
822 u32 len = vui->virtio_net_hdr_sz;
823 clib_memcpy_fast (&t->hdr, hdr,
824 len > hdr_desc->len ? hdr_desc->len : len);
825 }
826}
827
828static_always_inline u32
829vhost_user_rx_discard_packet_packed (vlib_main_t * vm,
830 vhost_user_intf_t * vui,
831 vhost_user_vring_t * txvq,
832 u32 discard_max)
833{
834 u32 discarded_packets = 0;
835 u16 mask = txvq->qsz_mask;
836 u16 desc_current, desc_head;
837
838 desc_head = desc_current = txvq->last_used_idx & mask;
839
840 /*
841 * On the RX side, each packet corresponds to one descriptor
842 * (it is the same whether it is a shallow descriptor, chained, or indirect).
843 * Therefore, discarding a packet is like discarding a descriptor.
844 */
845 while ((discarded_packets != discard_max) &&
846 vhost_user_packed_desc_available (txvq, desc_current))
847 {
848 vhost_user_advance_last_avail_idx (txvq);
849 discarded_packets++;
850 desc_current = (desc_current + 1) & mask;
851 }
852
853 if (PREDICT_TRUE (discarded_packets))
854 vhost_user_mark_desc_consumed (vui, txvq, desc_head, discarded_packets);
855 return (discarded_packets);
856}
857
858static_always_inline u32
859vhost_user_input_copy_packed (vhost_user_intf_t * vui, vhost_copy_t * cpy,
860 u16 copy_len, u32 * map_hint)
861{
862 void *src0, *src1, *src2, *src3, *src4, *src5, *src6, *src7;
863 u8 bad;
864 u32 rc = VHOST_USER_INPUT_FUNC_ERROR_NO_ERROR;
865
866 if (PREDICT_TRUE (copy_len >= 8))
867 {
868 src4 = map_guest_mem (vui, cpy[0].src, map_hint);
869 src5 = map_guest_mem (vui, cpy[1].src, map_hint);
870 src6 = map_guest_mem (vui, cpy[2].src, map_hint);
871 src7 = map_guest_mem (vui, cpy[3].src, map_hint);
872 bad = (src4 == 0) + (src5 == 0) + (src6 == 0) + (src7 == 0);
873 if (PREDICT_FALSE (bad))
874 goto one_by_one;
875 CLIB_PREFETCH (src4, 64, LOAD);
876 CLIB_PREFETCH (src5, 64, LOAD);
877 CLIB_PREFETCH (src6, 64, LOAD);
878 CLIB_PREFETCH (src7, 64, LOAD);
879
880 while (PREDICT_TRUE (copy_len >= 8))
881 {
882 src0 = src4;
883 src1 = src5;
884 src2 = src6;
885 src3 = src7;
886
887 src4 = map_guest_mem (vui, cpy[4].src, map_hint);
888 src5 = map_guest_mem (vui, cpy[5].src, map_hint);
889 src6 = map_guest_mem (vui, cpy[6].src, map_hint);
890 src7 = map_guest_mem (vui, cpy[7].src, map_hint);
891 bad = (src4 == 0) + (src5 == 0) + (src6 == 0) + (src7 == 0);
892 if (PREDICT_FALSE (bad))
893 break;
894
895 CLIB_PREFETCH (src4, 64, LOAD);
896 CLIB_PREFETCH (src5, 64, LOAD);
897 CLIB_PREFETCH (src6, 64, LOAD);
898 CLIB_PREFETCH (src7, 64, LOAD);
899
900 clib_memcpy_fast ((void *) cpy[0].dst, src0, cpy[0].len);
901 clib_memcpy_fast ((void *) cpy[1].dst, src1, cpy[1].len);
902 clib_memcpy_fast ((void *) cpy[2].dst, src2, cpy[2].len);
903 clib_memcpy_fast ((void *) cpy[3].dst, src3, cpy[3].len);
904 copy_len -= 4;
905 cpy += 4;
906 }
907 }
908
909one_by_one:
910 while (copy_len)
911 {
912 if (PREDICT_FALSE (!(src0 = map_guest_mem (vui, cpy->src, map_hint))))
913 {
914 rc = VHOST_USER_INPUT_FUNC_ERROR_MMAP_FAIL;
915 break;
916 }
917 clib_memcpy_fast ((void *) cpy->dst, src0, cpy->len);
918 copy_len -= 1;
919 cpy += 1;
920 }
921 return rc;
922}
923
924static_always_inline u32
925vhost_user_do_offload (vhost_user_intf_t * vui,
926 vring_packed_desc_t * desc_table, u16 desc_current,
927 u16 mask, vlib_buffer_t * b_head, u32 * map_hint)
928{
929 u32 rc = VHOST_USER_INPUT_FUNC_ERROR_NO_ERROR;
930 virtio_net_hdr_mrg_rxbuf_t *hdr;
931 u8 *b_data;
932 u32 desc_data_offset = vui->virtio_net_hdr_sz;
933
934 hdr = map_guest_mem (vui, desc_table[desc_current].addr, map_hint);
935 if (PREDICT_FALSE (hdr == 0))
936 rc = VHOST_USER_INPUT_FUNC_ERROR_MMAP_FAIL;
937 else if (hdr->hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM)
938 {
939 if (desc_data_offset == desc_table[desc_current].len)
940 {
941 desc_current = (desc_current + 1) & mask;
942 b_data =
943 map_guest_mem (vui, desc_table[desc_current].addr, map_hint);
944 if (PREDICT_FALSE (b_data == 0))
945 rc = VHOST_USER_INPUT_FUNC_ERROR_MMAP_FAIL;
946 else
947 vhost_user_handle_rx_offload (b_head, b_data, &hdr->hdr);
948 }
949 else
950 {
951 b_data = (u8 *) hdr + desc_data_offset;
952 vhost_user_handle_rx_offload (b_head, b_data, &hdr->hdr);
953 }
954 }
955
956 return rc;
957}
958
959static_always_inline u32
960vhost_user_compute_buffers_required (u32 desc_len, u32 buffer_data_size)
961{
962 div_t result;
963 u32 buffers_required;
964
965 if (PREDICT_TRUE (buffer_data_size == 2048))
966 {
967 buffers_required = desc_len >> 11;
968 if ((desc_len & 2047) != 0)
969 buffers_required++;
970 return (buffers_required);
971 }
972
973 if (desc_len < buffer_data_size)
974 return 1;
975
976 result = div (desc_len, buffer_data_size);
977 if (result.rem)
978 buffers_required = result.quot + 1;
979 else
980 buffers_required = result.quot;
981
982 return (buffers_required);
983}
984
985static_always_inline u32
986vhost_user_compute_indirect_desc_len (vhost_user_intf_t * vui,
987 vhost_user_vring_t * txvq,
988 u32 buffer_data_size, u16 desc_current,
989 u32 * map_hint)
990{
991 vring_packed_desc_t *desc_table = txvq->packed_desc;
992 u32 desc_len = 0;
993 u16 desc_data_offset = vui->virtio_net_hdr_sz;
994 u16 desc_idx = desc_current;
995 u32 n_descs;
996
997 n_descs = desc_table[desc_idx].len >> 4;
998 desc_table = map_guest_mem (vui, desc_table[desc_idx].addr, map_hint);
999 if (PREDICT_FALSE (desc_table == 0))
1000 return 0;
1001
1002 for (desc_idx = 0; desc_idx < n_descs; desc_idx++)
1003 desc_len += desc_table[desc_idx].len;
1004
1005 if (PREDICT_TRUE (desc_len > desc_data_offset))
1006 desc_len -= desc_data_offset;
1007
1008 return vhost_user_compute_buffers_required (desc_len, buffer_data_size);
1009}
1010
1011static_always_inline u32
1012vhost_user_compute_chained_desc_len (vhost_user_intf_t * vui,
1013 vhost_user_vring_t * txvq,
1014 u32 buffer_data_size, u16 * current,
1015 u16 * n_left)
1016{
1017 vring_packed_desc_t *desc_table = txvq->packed_desc;
1018 u32 desc_len = 0;
1019 u16 mask = txvq->qsz_mask;
1020
1021 while (desc_table[*current].flags & VIRTQ_DESC_F_NEXT)
1022 {
1023 desc_len += desc_table[*current].len;
1024 (*n_left)++;
1025 *current = (*current + 1) & mask;
1026 vhost_user_advance_last_avail_idx (txvq);
1027 }
1028 desc_len += desc_table[*current].len;
1029 (*n_left)++;
1030 *current = (*current + 1) & mask;
1031 vhost_user_advance_last_avail_idx (txvq);
1032
1033 if (PREDICT_TRUE (desc_len > vui->virtio_net_hdr_sz))
1034 desc_len -= vui->virtio_net_hdr_sz;
1035
1036 return vhost_user_compute_buffers_required (desc_len, buffer_data_size);
1037}
1038
1039static_always_inline void
1040vhost_user_assemble_packet (vring_packed_desc_t * desc_table,
1041 u16 * desc_idx, vlib_buffer_t * b_head,
1042 vlib_buffer_t ** b_current, u32 ** next,
1043 vlib_buffer_t *** b, u32 * bi_current,
1044 vhost_cpu_t * cpu, u16 * copy_len,
1045 u32 * buffers_used, u32 buffers_required,
1046 u32 * desc_data_offset, u32 buffer_data_size,
1047 u16 mask)
1048{
1049 u32 desc_data_l;
1050
1051 while (*desc_data_offset < desc_table[*desc_idx].len)
1052 {
1053 /* Get more output if necessary. Or end of packet. */
1054 if (PREDICT_FALSE ((*b_current)->current_length == buffer_data_size))
1055 {
1056 /* Get next output */
1057 u32 bi_next = **next;
1058 (*next)++;
1059 (*b_current)->next_buffer = bi_next;
1060 (*b_current)->flags |= VLIB_BUFFER_NEXT_PRESENT;
1061 *bi_current = bi_next;
1062 *b_current = **b;
1063 (*b)++;
1064 (*buffers_used)++;
1065 ASSERT (*buffers_used <= buffers_required);
1066 }
1067
1068 /* Prepare a copy order executed later for the data */
1069 ASSERT (*copy_len < VHOST_USER_COPY_ARRAY_N);
1070 vhost_copy_t *cpy = &cpu->copy[*copy_len];
1071 (*copy_len)++;
1072 desc_data_l = desc_table[*desc_idx].len - *desc_data_offset;
1073 cpy->len = buffer_data_size - (*b_current)->current_length;
1074 cpy->len = (cpy->len > desc_data_l) ? desc_data_l : cpy->len;
1075 cpy->dst = (uword) (vlib_buffer_get_current (*b_current) +
1076 (*b_current)->current_length);
1077 cpy->src = desc_table[*desc_idx].addr + *desc_data_offset;
1078
1079 *desc_data_offset += cpy->len;
1080
1081 (*b_current)->current_length += cpy->len;
1082 b_head->total_length_not_including_first_buffer += cpy->len;
1083 }
1084 *desc_idx = (*desc_idx + 1) & mask;;
1085 *desc_data_offset = 0;
1086}
1087
1088static_always_inline u32
1089vhost_user_if_input_packed (vlib_main_t * vm, vhost_user_main_t * vum,
1090 vhost_user_intf_t * vui, u16 qid,
1091 vlib_node_runtime_t * node,
1092 vnet_hw_interface_rx_mode mode, u8 enable_csum)
1093{
1094 vhost_user_vring_t *txvq = &vui->vrings[VHOST_VRING_IDX_TX (qid)];
1095 vnet_feature_main_t *fm = &feature_main;
1096 u8 feature_arc_idx = fm->device_input_feature_arc_index;
1097 u16 n_rx_packets = 0;
1098 u32 n_rx_bytes = 0;
1099 u16 n_left = 0;
1100 u32 buffers_required = 0;
1101 u32 n_left_to_next, *to_next;
1102 u32 next_index = VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT;
1103 u32 n_trace = vlib_get_trace_count (vm, node);
1104 u32 buffer_data_size = vlib_buffer_get_default_data_size (vm);
1105 u32 map_hint = 0;
1106 vhost_cpu_t *cpu = &vum->cpus[vm->thread_index];
1107 u16 copy_len = 0;
1108 u32 current_config_index = ~0;
1109 u16 mask = txvq->qsz_mask;
1110 u16 desc_current, desc_head, last_used_idx;
1111 vring_packed_desc_t *desc_table = 0;
1112 u32 n_descs_processed = 0;
1113 u32 rv;
1114 vlib_buffer_t **b;
1115 u32 *next;
1116 u32 buffers_used = 0;
1117 u16 current, n_descs_to_process;
1118
1119 /* The descriptor table is not ready yet */
1120 if (PREDICT_FALSE (txvq->packed_desc == 0))
1121 goto done;
1122
1123 /* do we have pending interrupts ? */
1124 vhost_user_vring_t *rxvq = &vui->vrings[VHOST_VRING_IDX_RX (qid)];
1125 vhost_user_input_do_interrupt (vm, txvq, rxvq);
1126
1127 /*
1128 * For adaptive mode, it is optimized to reduce interrupts.
1129 * If the scheduler switches the input node to polling due
1130 * to burst of traffic, we tell the driver no interrupt.
1131 * When the traffic subsides, the scheduler switches the node back to
1132 * interrupt mode. We must tell the driver we want interrupt.
1133 */
1134 if (PREDICT_FALSE (mode == VNET_HW_INTERFACE_RX_MODE_ADAPTIVE))
1135 {
1136 if ((node->flags &
1137 VLIB_NODE_FLAG_SWITCH_FROM_POLLING_TO_INTERRUPT_MODE) ||
1138 !(node->flags &
1139 VLIB_NODE_FLAG_SWITCH_FROM_INTERRUPT_TO_POLLING_MODE))
1140 /* Tell driver we want notification */
1141 txvq->used_event->flags = 0;
1142 else
1143 /* Tell driver we don't want notification */
1144 txvq->used_event->flags = VRING_EVENT_F_DISABLE;
1145 }
1146
1147 last_used_idx = txvq->last_used_idx & mask;
1148 desc_head = desc_current = last_used_idx;
1149
1150 if (vhost_user_packed_desc_available (txvq, desc_current) == 0)
1151 goto done;
1152
1153 if (PREDICT_FALSE (!vui->admin_up || !vui->is_ready || !(txvq->enabled)))
1154 {
1155 /*
1156 * Discard input packet if interface is admin down or vring is not
1157 * enabled.
1158 * "For example, for a networking device, in the disabled state
1159 * client must not supply any new RX packets, but must process
1160 * and discard any TX packets."
1161 */
1162 rv = vhost_user_rx_discard_packet_packed (vm, vui, txvq,
1163 VHOST_USER_DOWN_DISCARD_COUNT);
1164 vlib_error_count (vm, vhost_user_input_node.index,
1165 VHOST_USER_INPUT_FUNC_ERROR_NOT_READY, rv);
1166 goto done;
1167 }
1168
1169 vhost_user_input_setup_frame (vm, node, vui, &current_config_index,
1170 &next_index, &to_next, &n_left_to_next);
1171
1172 /*
1173 * Compute n_left and total buffers needed
1174 */
1175 desc_table = txvq->packed_desc;
1176 current = desc_current;
1177 while (vhost_user_packed_desc_available (txvq, current) &&
1178 (n_left < VLIB_FRAME_SIZE))
1179 {
1180 if (desc_table[current].flags & VIRTQ_DESC_F_INDIRECT)
1181 {
1182 buffers_required +=
1183 vhost_user_compute_indirect_desc_len (vui, txvq, buffer_data_size,
1184 current, &map_hint);
1185 n_left++;
1186 current = (current + 1) & mask;
1187 vhost_user_advance_last_avail_idx (txvq);
1188 }
1189 else
1190 {
1191 buffers_required +=
1192 vhost_user_compute_chained_desc_len (vui, txvq, buffer_data_size,
1193 &current, &n_left);
1194 }
1195 }
1196
1197 /* Something is broken if we need more than 10000 buffers */
1198 if (PREDICT_FALSE ((buffers_required == 0) || (buffers_required > 10000)))
1199 {
1200 rv = vhost_user_rx_discard_packet_packed (vm, vui, txvq, n_left);
1201 vlib_error_count (vm, vhost_user_input_node.index,
1202 VHOST_USER_INPUT_FUNC_ERROR_NO_BUFFER, rv);
1203 goto done;
1204 }
1205
1206 vec_validate (cpu->to_next_list, buffers_required);
1207 rv = vlib_buffer_alloc (vm, cpu->to_next_list, buffers_required);
1208 if (PREDICT_FALSE (rv != buffers_required))
1209 {
1210 vlib_buffer_free (vm, cpu->to_next_list, rv);
1211 rv = vhost_user_rx_discard_packet_packed (vm, vui, txvq, n_left);
1212 vlib_error_count (vm, vhost_user_input_node.index,
1213 VHOST_USER_INPUT_FUNC_ERROR_NO_BUFFER, rv);
1214 goto done;
1215 }
1216
1217 next = cpu->to_next_list;
1218 vec_validate (cpu->rx_buffers_pdesc, buffers_required);
1219 vlib_get_buffers (vm, next, cpu->rx_buffers_pdesc, buffers_required);
1220 b = cpu->rx_buffers_pdesc;
1221 n_descs_processed = n_left;
1222
1223 while (n_left)
1224 {
1225 vlib_buffer_t *b_head, *b_current;
1226 u32 bi_current;
1227 u32 desc_data_offset;
1228 u16 desc_idx = desc_current;
1229 u32 n_descs;
1230
1231 desc_table = txvq->packed_desc;
1232 to_next[0] = bi_current = next[0];
1233 b_head = b_current = b[0];
1234 b++;
1235 buffers_used++;
1236 ASSERT (buffers_used <= buffers_required);
1237 to_next++;
1238 next++;
1239 n_left_to_next--;
1240
1241 /* The buffer should already be initialized */
1242 b_head->total_length_not_including_first_buffer = 0;
1243 b_head->flags |= VLIB_BUFFER_TOTAL_LENGTH_VALID;
1244 desc_data_offset = vui->virtio_net_hdr_sz;
1245 n_descs_to_process = 1;
1246
1247 if (desc_table[desc_idx].flags & VIRTQ_DESC_F_INDIRECT)
1248 {
1249 n_descs = desc_table[desc_idx].len >> 4;
1250 desc_table = map_guest_mem (vui, desc_table[desc_idx].addr,
1251 &map_hint);
1252 desc_idx = 0;
1253 if (PREDICT_FALSE (desc_table == 0) ||
1254 (enable_csum &&
1255 (PREDICT_FALSE
1256 (vhost_user_do_offload
1257 (vui, desc_table, desc_idx, mask, b_head,
1258 &map_hint) != VHOST_USER_INPUT_FUNC_ERROR_NO_ERROR))))
1259 {
1260 vlib_error_count (vm, node->node_index,
1261 VHOST_USER_INPUT_FUNC_ERROR_MMAP_FAIL, 1);
1262 to_next--;
1263 next--;
1264 n_left_to_next++;
1265 buffers_used--;
1266 b--;
1267 goto out;
1268 }
1269 while (n_descs)
1270 {
1271 vhost_user_assemble_packet (desc_table, &desc_idx, b_head,
1272 &b_current, &next, &b, &bi_current,
1273 cpu, &copy_len, &buffers_used,
1274 buffers_required, &desc_data_offset,
1275 buffer_data_size, mask);
1276 n_descs--;
1277 }
1278 }
1279 else
1280 {
1281 if (enable_csum)
1282 {
1283 rv = vhost_user_do_offload (vui, desc_table, desc_idx, mask,
1284 b_head, &map_hint);
1285 if (PREDICT_FALSE (rv != VHOST_USER_INPUT_FUNC_ERROR_NO_ERROR))
1286 {
1287 vlib_error_count (vm, node->node_index, rv, 1);
1288 to_next--;
1289 next--;
1290 n_left_to_next++;
1291 buffers_used--;
1292 b--;
1293 goto out;
1294 }
1295 }
1296 /*
1297 * For chained descriptor, we process all chains in a single while
1298 * loop. So count how many descriptors in the chain.
1299 */
1300 n_descs_to_process = 1;
1301 while (desc_table[desc_idx].flags & VIRTQ_DESC_F_NEXT)
1302 {
1303 vhost_user_assemble_packet (desc_table, &desc_idx, b_head,
1304 &b_current, &next, &b, &bi_current,
1305 cpu, &copy_len, &buffers_used,
1306 buffers_required, &desc_data_offset,
1307 buffer_data_size, mask);
1308 n_descs_to_process++;
1309 }
1310 vhost_user_assemble_packet (desc_table, &desc_idx, b_head,
1311 &b_current, &next, &b, &bi_current,
1312 cpu, &copy_len, &buffers_used,
1313 buffers_required, &desc_data_offset,
1314 buffer_data_size, mask);
1315 }
1316
1317 n_rx_bytes += b_head->total_length_not_including_first_buffer;
1318 n_rx_packets++;
1319
1320 b_head->total_length_not_including_first_buffer -=
1321 b_head->current_length;
1322
1323 VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b_head);
1324
1325 vnet_buffer (b_head)->sw_if_index[VLIB_RX] = vui->sw_if_index;
1326 vnet_buffer (b_head)->sw_if_index[VLIB_TX] = ~0;
1327 b_head->error = 0;
1328
1329 if (current_config_index != ~0)
1330 {
1331 b_head->current_config_index = current_config_index;
1332 vnet_buffer (b_head)->feature_arc_index = feature_arc_idx;
1333 }
1334
1335 out:
1336 ASSERT (n_left >= n_descs_to_process);
1337 n_left -= n_descs_to_process;
1338
1339 /* advance to next descrptor */
1340 desc_current = (desc_current + n_descs_to_process) & mask;
1341
1342 /*
1343 * Although separating memory copies from virtio ring parsing
1344 * is beneficial, we can offer to perform the copies from time
1345 * to time in order to free some space in the ring.
1346 */
1347 if (PREDICT_FALSE (copy_len >= VHOST_USER_RX_COPY_THRESHOLD))
1348 {
1349 rv = vhost_user_input_copy_packed (vui, cpu->copy, copy_len,
1350 &map_hint);
1351 if (PREDICT_FALSE (rv != VHOST_USER_INPUT_FUNC_ERROR_NO_ERROR))
1352 vlib_error_count (vm, node->node_index, rv, 1);
1353 copy_len = 0;
1354 }
1355 }
1356 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
1357
1358 /* Do the memory copies */
1359 rv = vhost_user_input_copy_packed (vui, cpu->copy, copy_len, &map_hint);
1360 if (PREDICT_FALSE (rv != VHOST_USER_INPUT_FUNC_ERROR_NO_ERROR))
1361 vlib_error_count (vm, node->node_index, rv, 1);
1362
1363 /* Must do the tracing before giving buffers back to driver */
1364 if (PREDICT_FALSE (n_trace))
1365 {
1366 u32 left = n_rx_packets;
1367
1368 b = cpu->rx_buffers_pdesc;
1369 while (n_trace && left)
1370 {
1371 vhost_trace_t *t0;
1372
1373 vlib_trace_buffer (vm, node, next_index, b[0],
1374 /* follow_chain */ 0);
1375 t0 = vlib_add_trace (vm, node, b[0], sizeof (t0[0]));
1376 b++;
1377 vhost_user_rx_trace_packed (t0, vui, qid, txvq, last_used_idx);
1378 last_used_idx = (last_used_idx + 1) & mask;
1379 n_trace--;
1380 left--;
1381 vlib_set_trace_count (vm, node, n_trace);
1382 }
1383 }
1384
1385 /*
1386 * Give buffers back to driver.
1387 */
1388 vhost_user_mark_desc_consumed (vui, txvq, desc_head, n_descs_processed);
1389
1390 /* interrupt (call) handling */
1391 if ((txvq->callfd_idx != ~0) &&
1392 (txvq->avail_event->flags != VRING_EVENT_F_DISABLE))
1393 {
1394 txvq->n_since_last_int += n_rx_packets;
1395 if (txvq->n_since_last_int > vum->coalesce_frames)
1396 vhost_user_send_call (vm, txvq);
1397 }
1398
1399 /* increase rx counters */
1400 vlib_increment_combined_counter
1401 (vnet_main.interface_main.combined_sw_if_counters
1402 + VNET_INTERFACE_COUNTER_RX, vm->thread_index, vui->sw_if_index,
1403 n_rx_packets, n_rx_bytes);
1404
1405 vnet_device_increment_rx_packets (vm->thread_index, n_rx_packets);
1406
1407 if (PREDICT_FALSE (buffers_used < buffers_required))
1408 vlib_buffer_free (vm, next, buffers_required - buffers_used);
1409
1410done:
1411 return n_rx_packets;
1412}
1413
Mohsin Kazmie7cde312018-06-26 17:20:11 +02001414VLIB_NODE_FN (vhost_user_input_node) (vlib_main_t * vm,
1415 vlib_node_runtime_t * node,
1416 vlib_frame_t * frame)
1417{
1418 vhost_user_main_t *vum = &vhost_user_main;
1419 uword n_rx_packets = 0;
1420 vhost_user_intf_t *vui;
1421 vnet_device_input_runtime_t *rt =
1422 (vnet_device_input_runtime_t *) node->runtime_data;
1423 vnet_device_and_queue_t *dq;
1424
1425 vec_foreach (dq, rt->devices_and_queues)
1426 {
Sirshak Das5b718d52018-10-12 09:38:27 -05001427 if ((node->state == VLIB_NODE_STATE_POLLING) ||
1428 clib_atomic_swap_acq_n (&dq->interrupt_pending, 0))
Mohsin Kazmie7cde312018-06-26 17:20:11 +02001429 {
1430 vui =
1431 pool_elt_at_index (vum->vhost_user_interfaces, dq->dev_instance);
Steven Luongbc0d9ff2020-03-23 09:34:59 -07001432 if (vhost_user_is_packed_ring_supported (vui))
1433 {
1434 if (vui->features & (1ULL << FEAT_VIRTIO_NET_F_CSUM))
1435 n_rx_packets += vhost_user_if_input_packed (vm, vum, vui,
1436 dq->queue_id, node,
1437 dq->mode, 1);
1438 else
1439 n_rx_packets += vhost_user_if_input_packed (vm, vum, vui,
1440 dq->queue_id, node,
1441 dq->mode, 0);
1442 }
Steven Luong4208a4c2019-05-06 08:51:56 -07001443 else
Steven Luongbc0d9ff2020-03-23 09:34:59 -07001444 {
1445 if (vui->features & (1ULL << FEAT_VIRTIO_NET_F_CSUM))
1446 n_rx_packets += vhost_user_if_input (vm, vum, vui, dq->queue_id,
1447 node, dq->mode, 1);
1448 else
1449 n_rx_packets += vhost_user_if_input (vm, vum, vui, dq->queue_id,
1450 node, dq->mode, 0);
1451 }
Mohsin Kazmie7cde312018-06-26 17:20:11 +02001452 }
1453 }
1454
1455 return n_rx_packets;
1456}
1457
Mohsin Kazmie7cde312018-06-26 17:20:11 +02001458/* *INDENT-OFF* */
1459VLIB_REGISTER_NODE (vhost_user_input_node) = {
1460 .type = VLIB_NODE_TYPE_INPUT,
1461 .name = "vhost-user-input",
1462 .sibling_of = "device-input",
Damjan Marion7ca5aaa2019-09-24 18:10:49 +02001463 .flags = VLIB_NODE_FLAG_TRACE_SUPPORTED,
Mohsin Kazmie7cde312018-06-26 17:20:11 +02001464
1465 /* Will be enabled if/when hardware is detected. */
1466 .state = VLIB_NODE_STATE_DISABLED,
1467
1468 .format_buffer = format_ethernet_header_with_length,
1469 .format_trace = format_vhost_trace,
1470
1471 .n_errors = VHOST_USER_INPUT_FUNC_N_ERROR,
1472 .error_strings = vhost_user_input_func_error_strings,
1473};
1474/* *INDENT-ON* */
Mohsin Kazmie7cde312018-06-26 17:20:11 +02001475
1476/*
1477 * fd.io coding-style-patch-verification: ON
1478 *
1479 * Local Variables:
1480 * eval: (c-set-style "gnu")
1481 * End:
1482 */