blob: bdb3d27245bf75f2e3938c2dc0477e8137e22f82 [file] [log] [blame]
Mohsin Kazmie7cde312018-06-26 17:20:11 +02001/*
2 *------------------------------------------------------------------
3 * vhost-user-input
4 *
5 * Copyright (c) 2014-2018 Cisco and/or its affiliates.
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at:
9 *
10 * http://www.apache.org/licenses/LICENSE-2.0
11 *
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 *------------------------------------------------------------------
18 */
19
20#include <fcntl.h> /* for open */
21#include <sys/ioctl.h>
22#include <sys/socket.h>
23#include <sys/un.h>
24#include <sys/stat.h>
25#include <sys/types.h>
26#include <sys/uio.h> /* for iovec */
27#include <netinet/in.h>
28#include <sys/vfs.h>
29
30#include <linux/if_arp.h>
31#include <linux/if_tun.h>
32
33#include <vlib/vlib.h>
34#include <vlib/unix/unix.h>
35
Mohsin Kazmie7cde312018-06-26 17:20:11 +020036#include <vnet/ethernet/ethernet.h>
37#include <vnet/devices/devices.h>
38#include <vnet/feature/feature.h>
Neale Rannse4031132020-10-26 13:00:06 +000039#include <vnet/udp/udp_packet.h>
Damjan Marion94100532020-11-06 23:25:57 +010040#include <vnet/interface/rx_queue_funcs.h>
Mohsin Kazmie7cde312018-06-26 17:20:11 +020041
42#include <vnet/devices/virtio/vhost_user.h>
43#include <vnet/devices/virtio/vhost_user_inline.h>
44
Neale Ranns68d48d92021-06-03 14:59:47 +000045#include <vnet/ip/ip4_packet.h>
46#include <vnet/ip/ip6_packet.h>
47
Mohsin Kazmie7cde312018-06-26 17:20:11 +020048/*
49 * When an RX queue is down but active, received packets
50 * must be discarded. This value controls up to how many
51 * packets will be discarded during each round.
52 */
53#define VHOST_USER_DOWN_DISCARD_COUNT 256
54
55/*
56 * When the number of available buffers gets under this threshold,
57 * RX node will start discarding packets.
58 */
59#define VHOST_USER_RX_BUFFER_STARVATION 32
60
61/*
62 * On the receive side, the host should free descriptors as soon
63 * as possible in order to avoid TX drop in the VM.
64 * This value controls the number of copy operations that are stacked
65 * before copy is done for all and descriptors are given back to
66 * the guest.
67 * The value 64 was obtained by testing (48 and 128 were not as good).
68 */
69#define VHOST_USER_RX_COPY_THRESHOLD 64
70
Benoît Ganne47727c02019-02-12 13:35:08 +010071extern vlib_node_registration_t vhost_user_input_node;
Mohsin Kazmie7cde312018-06-26 17:20:11 +020072
73#define foreach_vhost_user_input_func_error \
74 _(NO_ERROR, "no error") \
75 _(NO_BUFFER, "no available buffer") \
76 _(MMAP_FAIL, "mmap failure") \
77 _(INDIRECT_OVERFLOW, "indirect descriptor overflows table") \
78 _(UNDERSIZED_FRAME, "undersized ethernet frame received (< 14 bytes)") \
Steven Luongbc0d9ff2020-03-23 09:34:59 -070079 _(NOT_READY, "vhost interface not ready or down") \
Mohsin Kazmie7cde312018-06-26 17:20:11 +020080 _(FULL_RX_QUEUE, "full rx queue (possible driver tx drop)")
81
82typedef enum
83{
84#define _(f,s) VHOST_USER_INPUT_FUNC_ERROR_##f,
85 foreach_vhost_user_input_func_error
86#undef _
87 VHOST_USER_INPUT_FUNC_N_ERROR,
88} vhost_user_input_func_error_t;
89
90static __clib_unused char *vhost_user_input_func_error_strings[] = {
91#define _(n,s) s,
92 foreach_vhost_user_input_func_error
93#undef _
94};
95
96static_always_inline void
97vhost_user_rx_trace (vhost_trace_t * t,
98 vhost_user_intf_t * vui, u16 qid,
Damjan Marionba1afaa2018-11-22 22:16:19 +010099 vlib_buffer_t * b, vhost_user_vring_t * txvq,
100 u16 last_avail_idx)
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200101{
102 vhost_user_main_t *vum = &vhost_user_main;
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200103 u32 desc_current = txvq->avail->ring[last_avail_idx & txvq->qsz_mask];
104 vring_desc_t *hdr_desc = 0;
105 virtio_net_hdr_mrg_rxbuf_t *hdr;
106 u32 hint = 0;
107
Dave Barachb7b92992018-10-17 10:38:51 -0400108 clib_memset (t, 0, sizeof (*t));
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200109 t->device_index = vui - vum->vhost_user_interfaces;
110 t->qid = qid;
111
112 hdr_desc = &txvq->desc[desc_current];
Mohsin Kazmia7a22812020-08-31 17:17:16 +0200113 if (txvq->desc[desc_current].flags & VRING_DESC_F_INDIRECT)
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200114 {
115 t->virtio_ring_flags |= 1 << VIRTIO_TRACE_F_INDIRECT;
116 /* Header is the first here */
117 hdr_desc = map_guest_mem (vui, txvq->desc[desc_current].addr, &hint);
118 }
Mohsin Kazmia7a22812020-08-31 17:17:16 +0200119 if (txvq->desc[desc_current].flags & VRING_DESC_F_NEXT)
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200120 {
121 t->virtio_ring_flags |= 1 << VIRTIO_TRACE_F_SIMPLE_CHAINED;
122 }
Mohsin Kazmia7a22812020-08-31 17:17:16 +0200123 if (!(txvq->desc[desc_current].flags & VRING_DESC_F_NEXT) &&
124 !(txvq->desc[desc_current].flags & VRING_DESC_F_INDIRECT))
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200125 {
126 t->virtio_ring_flags |= 1 << VIRTIO_TRACE_F_SINGLE_DESC;
127 }
128
129 t->first_desc_len = hdr_desc ? hdr_desc->len : 0;
130
131 if (!hdr_desc || !(hdr = map_guest_mem (vui, hdr_desc->addr, &hint)))
132 {
133 t->virtio_ring_flags |= 1 << VIRTIO_TRACE_F_MAP_ERROR;
134 }
135 else
136 {
137 u32 len = vui->virtio_net_hdr_sz;
138 memcpy (&t->hdr, hdr, len > hdr_desc->len ? hdr_desc->len : len);
139 }
140}
141
142static_always_inline u32
143vhost_user_input_copy (vhost_user_intf_t * vui, vhost_copy_t * cpy,
144 u16 copy_len, u32 * map_hint)
145{
146 void *src0, *src1, *src2, *src3;
147 if (PREDICT_TRUE (copy_len >= 4))
148 {
149 if (PREDICT_FALSE (!(src2 = map_guest_mem (vui, cpy[0].src, map_hint))))
150 return 1;
151 if (PREDICT_FALSE (!(src3 = map_guest_mem (vui, cpy[1].src, map_hint))))
152 return 1;
153
154 while (PREDICT_TRUE (copy_len >= 4))
155 {
156 src0 = src2;
157 src1 = src3;
158
159 if (PREDICT_FALSE
160 (!(src2 = map_guest_mem (vui, cpy[2].src, map_hint))))
161 return 1;
162 if (PREDICT_FALSE
163 (!(src3 = map_guest_mem (vui, cpy[3].src, map_hint))))
164 return 1;
165
Damjan Marionaf7fb042021-07-15 11:54:41 +0200166 clib_prefetch_load (src2);
167 clib_prefetch_load (src3);
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200168
Dave Barach178cf492018-11-13 16:34:13 -0500169 clib_memcpy_fast ((void *) cpy[0].dst, src0, cpy[0].len);
170 clib_memcpy_fast ((void *) cpy[1].dst, src1, cpy[1].len);
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200171 copy_len -= 2;
172 cpy += 2;
173 }
174 }
175 while (copy_len)
176 {
177 if (PREDICT_FALSE (!(src0 = map_guest_mem (vui, cpy->src, map_hint))))
178 return 1;
Dave Barach178cf492018-11-13 16:34:13 -0500179 clib_memcpy_fast ((void *) cpy->dst, src0, cpy->len);
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200180 copy_len -= 1;
181 cpy += 1;
182 }
183 return 0;
184}
185
186/**
187 * Try to discard packets from the tx ring (VPP RX path).
188 * Returns the number of discarded packets.
189 */
190static_always_inline u32
191vhost_user_rx_discard_packet (vlib_main_t * vm,
192 vhost_user_intf_t * vui,
193 vhost_user_vring_t * txvq, u32 discard_max)
194{
195 /*
196 * On the RX side, each packet corresponds to one descriptor
197 * (it is the same whether it is a shallow descriptor, chained, or indirect).
198 * Therefore, discarding a packet is like discarding a descriptor.
199 */
200 u32 discarded_packets = 0;
201 u32 avail_idx = txvq->avail->idx;
Damjan Marionba1afaa2018-11-22 22:16:19 +0100202 u16 mask = txvq->qsz_mask;
203 u16 last_avail_idx = txvq->last_avail_idx;
204 u16 last_used_idx = txvq->last_used_idx;
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200205 while (discarded_packets != discard_max)
206 {
Steven Luong7e5735d2019-03-12 21:35:42 -0700207 if (avail_idx == last_avail_idx)
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200208 goto out;
209
Damjan Marionba1afaa2018-11-22 22:16:19 +0100210 u16 desc_chain_head = txvq->avail->ring[last_avail_idx & mask];
211 last_avail_idx++;
212 txvq->used->ring[last_used_idx & mask].id = desc_chain_head;
213 txvq->used->ring[last_used_idx & mask].len = 0;
214 vhost_user_log_dirty_ring (vui, txvq, ring[last_used_idx & mask]);
215 last_used_idx++;
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200216 discarded_packets++;
217 }
218
219out:
Damjan Marionba1afaa2018-11-22 22:16:19 +0100220 txvq->last_avail_idx = last_avail_idx;
221 txvq->last_used_idx = last_used_idx;
Damjan Marion96e8cd02018-11-23 14:56:55 +0100222 CLIB_MEMORY_STORE_BARRIER ();
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200223 txvq->used->idx = txvq->last_used_idx;
224 vhost_user_log_dirty_ring (vui, txvq, idx);
225 return discarded_packets;
226}
227
228/*
229 * In case of overflow, we need to rewind the array of allocated buffers.
230 */
Damjan Marion46bf8662018-11-22 22:25:38 +0100231static_always_inline void
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200232vhost_user_input_rewind_buffers (vlib_main_t * vm,
233 vhost_cpu_t * cpu, vlib_buffer_t * b_head)
234{
235 u32 bi_current = cpu->rx_buffers[cpu->rx_buffers_len];
236 vlib_buffer_t *b_current = vlib_get_buffer (vm, bi_current);
237 b_current->current_length = 0;
238 b_current->flags = 0;
239 while (b_current != b_head)
240 {
241 cpu->rx_buffers_len++;
242 bi_current = cpu->rx_buffers[cpu->rx_buffers_len];
243 b_current = vlib_get_buffer (vm, bi_current);
244 b_current->current_length = 0;
245 b_current->flags = 0;
246 }
247 cpu->rx_buffers_len++;
248}
249
Steven Luong4208a4c2019-05-06 08:51:56 -0700250static_always_inline void
251vhost_user_handle_rx_offload (vlib_buffer_t * b0, u8 * b0_data,
252 virtio_net_hdr_t * hdr)
253{
254 u8 l4_hdr_sz = 0;
Steven Luongbc0d9ff2020-03-23 09:34:59 -0700255 u8 l4_proto = 0;
256 ethernet_header_t *eh = (ethernet_header_t *) b0_data;
257 u16 ethertype = clib_net_to_host_u16 (eh->type);
258 u16 l2hdr_sz = sizeof (ethernet_header_t);
Mohsin Kazmi36f7a6a2021-05-05 14:26:38 +0200259 vnet_buffer_oflags_t oflags = 0;
Steven Luong4208a4c2019-05-06 08:51:56 -0700260
Steven Luongbc0d9ff2020-03-23 09:34:59 -0700261 if (ethernet_frame_is_tagged (ethertype))
Steven Luong4208a4c2019-05-06 08:51:56 -0700262 {
Steven Luongbc0d9ff2020-03-23 09:34:59 -0700263 ethernet_vlan_header_t *vlan = (ethernet_vlan_header_t *) (eh + 1);
Steven Luong4208a4c2019-05-06 08:51:56 -0700264
Steven Luongbc0d9ff2020-03-23 09:34:59 -0700265 ethertype = clib_net_to_host_u16 (vlan->type);
266 l2hdr_sz += sizeof (*vlan);
267 if (ethertype == ETHERNET_TYPE_VLAN)
Steven Luong4208a4c2019-05-06 08:51:56 -0700268 {
Steven Luongbc0d9ff2020-03-23 09:34:59 -0700269 vlan++;
Steven Luong4208a4c2019-05-06 08:51:56 -0700270 ethertype = clib_net_to_host_u16 (vlan->type);
271 l2hdr_sz += sizeof (*vlan);
Steven Luong4208a4c2019-05-06 08:51:56 -0700272 }
Steven Luongbc0d9ff2020-03-23 09:34:59 -0700273 }
274 vnet_buffer (b0)->l2_hdr_offset = 0;
275 vnet_buffer (b0)->l3_hdr_offset = l2hdr_sz;
276 vnet_buffer (b0)->l4_hdr_offset = hdr->csum_start;
277 b0->flags |= (VNET_BUFFER_F_L2_HDR_OFFSET_VALID |
278 VNET_BUFFER_F_L3_HDR_OFFSET_VALID |
279 VNET_BUFFER_F_L4_HDR_OFFSET_VALID);
Steven Luong4208a4c2019-05-06 08:51:56 -0700280
Steven Luongbc0d9ff2020-03-23 09:34:59 -0700281 if (PREDICT_TRUE (ethertype == ETHERNET_TYPE_IP4))
282 {
283 ip4_header_t *ip4 = (ip4_header_t *) (b0_data + l2hdr_sz);
284 l4_proto = ip4->protocol;
Mohsin Kazmi68095382021-02-10 11:26:24 +0100285 b0->flags |= VNET_BUFFER_F_IS_IP4;
286 oflags |= VNET_BUFFER_OFFLOAD_F_IP_CKSUM;
Steven Luongbc0d9ff2020-03-23 09:34:59 -0700287 }
288 else if (PREDICT_TRUE (ethertype == ETHERNET_TYPE_IP6))
289 {
290 ip6_header_t *ip6 = (ip6_header_t *) (b0_data + l2hdr_sz);
291 l4_proto = ip6->protocol;
292 b0->flags |= VNET_BUFFER_F_IS_IP6;
293 }
Steven Luong4208a4c2019-05-06 08:51:56 -0700294
Steven Luongbc0d9ff2020-03-23 09:34:59 -0700295 if (l4_proto == IP_PROTOCOL_TCP)
296 {
297 tcp_header_t *tcp = (tcp_header_t *)
298 (b0_data + vnet_buffer (b0)->l4_hdr_offset);
299 l4_hdr_sz = tcp_header_bytes (tcp);
Mohsin Kazmi68095382021-02-10 11:26:24 +0100300 oflags |= VNET_BUFFER_OFFLOAD_F_TCP_CKSUM;
Steven Luongbc0d9ff2020-03-23 09:34:59 -0700301 }
302 else if (l4_proto == IP_PROTOCOL_UDP)
303 {
Steven Luongac0f5362020-10-12 10:43:28 -0700304 l4_hdr_sz = sizeof (udp_header_t);
Mohsin Kazmi68095382021-02-10 11:26:24 +0100305 oflags |= VNET_BUFFER_OFFLOAD_F_UDP_CKSUM;
Steven Luong4208a4c2019-05-06 08:51:56 -0700306 }
307
308 if (hdr->gso_type == VIRTIO_NET_HDR_GSO_UDP)
309 {
310 vnet_buffer2 (b0)->gso_size = hdr->gso_size;
311 vnet_buffer2 (b0)->gso_l4_hdr_sz = l4_hdr_sz;
312 b0->flags |= VNET_BUFFER_F_GSO;
313 }
314 else if (hdr->gso_type == VIRTIO_NET_HDR_GSO_TCPV4)
315 {
316 vnet_buffer2 (b0)->gso_size = hdr->gso_size;
317 vnet_buffer2 (b0)->gso_l4_hdr_sz = l4_hdr_sz;
318 b0->flags |= (VNET_BUFFER_F_GSO | VNET_BUFFER_F_IS_IP4);
319 }
320 else if (hdr->gso_type == VIRTIO_NET_HDR_GSO_TCPV6)
321 {
322 vnet_buffer2 (b0)->gso_size = hdr->gso_size;
323 vnet_buffer2 (b0)->gso_l4_hdr_sz = l4_hdr_sz;
324 b0->flags |= (VNET_BUFFER_F_GSO | VNET_BUFFER_F_IS_IP6);
325 }
Mohsin Kazmi68095382021-02-10 11:26:24 +0100326
327 if (oflags)
328 vnet_buffer_offload_flags_set (b0, oflags);
Steven Luong4208a4c2019-05-06 08:51:56 -0700329}
330
Steven Luongbc0d9ff2020-03-23 09:34:59 -0700331static_always_inline void
Steven Luong27ba5002020-11-17 13:30:44 -0800332vhost_user_input_do_interrupt (vlib_main_t * vm, vhost_user_intf_t * vui,
333 vhost_user_vring_t * txvq,
Steven Luongbc0d9ff2020-03-23 09:34:59 -0700334 vhost_user_vring_t * rxvq)
335{
336 f64 now = vlib_time_now (vm);
337
338 if ((txvq->n_since_last_int) && (txvq->int_deadline < now))
Steven Luong27ba5002020-11-17 13:30:44 -0800339 vhost_user_send_call (vm, vui, txvq);
Steven Luongbc0d9ff2020-03-23 09:34:59 -0700340
341 if ((rxvq->n_since_last_int) && (rxvq->int_deadline < now))
Steven Luong27ba5002020-11-17 13:30:44 -0800342 vhost_user_send_call (vm, vui, rxvq);
Steven Luongbc0d9ff2020-03-23 09:34:59 -0700343}
344
345static_always_inline void
346vhost_user_input_setup_frame (vlib_main_t * vm, vlib_node_runtime_t * node,
347 vhost_user_intf_t * vui,
348 u32 * current_config_index, u32 * next_index,
349 u32 ** to_next, u32 * n_left_to_next)
350{
351 vnet_feature_main_t *fm = &feature_main;
352 u8 feature_arc_idx = fm->device_input_feature_arc_index;
353
354 if (PREDICT_FALSE (vnet_have_features (feature_arc_idx, vui->sw_if_index)))
355 {
356 vnet_feature_config_main_t *cm;
357 cm = &fm->feature_config_mains[feature_arc_idx];
358 *current_config_index = vec_elt (cm->config_index_by_sw_if_index,
359 vui->sw_if_index);
360 vnet_get_config_data (&cm->config_main, current_config_index,
361 next_index, 0);
362 }
363
364 vlib_get_new_next_frame (vm, node, *next_index, *to_next, *n_left_to_next);
365
366 if (*next_index == VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT)
367 {
368 /* give some hints to ethernet-input */
369 vlib_next_frame_t *nf;
370 vlib_frame_t *f;
371 ethernet_input_frame_t *ef;
372 nf = vlib_node_runtime_get_next_frame (vm, node, *next_index);
373 f = vlib_get_frame (vm, nf->frame);
374 f->flags = ETH_INPUT_FRAME_F_SINGLE_SW_IF_IDX;
375
376 ef = vlib_frame_scalar_args (f);
377 ef->sw_if_index = vui->sw_if_index;
378 ef->hw_if_index = vui->hw_if_index;
379 vlib_frame_no_append (f);
380 }
381}
382
Damjan Marion46bf8662018-11-22 22:25:38 +0100383static_always_inline u32
Damjan Marion94100532020-11-06 23:25:57 +0100384vhost_user_if_input (vlib_main_t *vm, vhost_user_main_t *vum,
385 vhost_user_intf_t *vui, u16 qid,
386 vlib_node_runtime_t *node, u8 enable_csum)
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200387{
388 vhost_user_vring_t *txvq = &vui->vrings[VHOST_VRING_IDX_TX (qid)];
Damjan Marion9af45042018-11-21 09:51:42 +0100389 vnet_feature_main_t *fm = &feature_main;
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200390 u16 n_rx_packets = 0;
391 u32 n_rx_bytes = 0;
392 u16 n_left;
393 u32 n_left_to_next, *to_next;
394 u32 next_index = VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT;
395 u32 n_trace = vlib_get_trace_count (vm, node);
Damjan Marion8934a042019-02-09 23:29:26 +0100396 u32 buffer_data_size = vlib_buffer_get_default_data_size (vm);
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200397 u32 map_hint = 0;
Damjan Marion7e0b17d2018-11-20 21:07:03 +0100398 vhost_cpu_t *cpu = &vum->cpus[vm->thread_index];
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200399 u16 copy_len = 0;
Damjan Marion9af45042018-11-21 09:51:42 +0100400 u8 feature_arc_idx = fm->device_input_feature_arc_index;
401 u32 current_config_index = ~(u32) 0;
Damjan Marionba1afaa2018-11-22 22:16:19 +0100402 u16 mask = txvq->qsz_mask;
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200403
Yichen Wang28812a02018-08-28 23:05:27 -0700404 /* The descriptor table is not ready yet */
405 if (PREDICT_FALSE (txvq->avail == 0))
Damjan Marionba1afaa2018-11-22 22:16:19 +0100406 goto done;
Yichen Wang28812a02018-08-28 23:05:27 -0700407
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200408 {
409 /* do we have pending interrupts ? */
410 vhost_user_vring_t *rxvq = &vui->vrings[VHOST_VRING_IDX_RX (qid)];
Steven Luong27ba5002020-11-17 13:30:44 -0800411 vhost_user_input_do_interrupt (vm, vui, txvq, rxvq);
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200412 }
413
414 /*
415 * For adaptive mode, it is optimized to reduce interrupts.
416 * If the scheduler switches the input node to polling due
417 * to burst of traffic, we tell the driver no interrupt.
418 * When the traffic subsides, the scheduler switches the node back to
419 * interrupt mode. We must tell the driver we want interrupt.
420 */
Damjan Marion94100532020-11-06 23:25:57 +0100421 if (PREDICT_FALSE (txvq->mode == VNET_HW_IF_RX_MODE_ADAPTIVE))
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200422 {
423 if ((node->flags &
424 VLIB_NODE_FLAG_SWITCH_FROM_POLLING_TO_INTERRUPT_MODE) ||
425 !(node->flags &
426 VLIB_NODE_FLAG_SWITCH_FROM_INTERRUPT_TO_POLLING_MODE))
427 /* Tell driver we want notification */
428 txvq->used->flags = 0;
429 else
430 /* Tell driver we don't want notification */
431 txvq->used->flags = VRING_USED_F_NO_NOTIFY;
432 }
433
434 if (PREDICT_FALSE (txvq->avail->flags & 0xFFFE))
Damjan Marionba1afaa2018-11-22 22:16:19 +0100435 goto done;
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200436
437 n_left = (u16) (txvq->avail->idx - txvq->last_avail_idx);
438
439 /* nothing to do */
440 if (PREDICT_FALSE (n_left == 0))
Damjan Marionba1afaa2018-11-22 22:16:19 +0100441 goto done;
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200442
443 if (PREDICT_FALSE (!vui->admin_up || !(txvq->enabled)))
444 {
445 /*
446 * Discard input packet if interface is admin down or vring is not
447 * enabled.
448 * "For example, for a networking device, in the disabled state
449 * client must not supply any new RX packets, but must process
450 * and discard any TX packets."
451 */
452 vhost_user_rx_discard_packet (vm, vui, txvq,
453 VHOST_USER_DOWN_DISCARD_COUNT);
Damjan Marionba1afaa2018-11-22 22:16:19 +0100454 goto done;
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200455 }
456
Damjan Marionba1afaa2018-11-22 22:16:19 +0100457 if (PREDICT_FALSE (n_left == (mask + 1)))
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200458 {
459 /*
460 * Informational error logging when VPP is not
461 * receiving packets fast enough.
462 */
463 vlib_error_count (vm, node->node_index,
464 VHOST_USER_INPUT_FUNC_ERROR_FULL_RX_QUEUE, 1);
465 }
466
467 if (n_left > VLIB_FRAME_SIZE)
468 n_left = VLIB_FRAME_SIZE;
469
470 /*
471 * For small packets (<2kB), we will not need more than one vlib buffer
Paul Vinciguerra97c998c2019-10-29 16:11:09 -0400472 * per packet. In case packets are bigger, we will just yield at some point
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200473 * in the loop and come back later. This is not an issue as for big packet,
474 * processing cost really comes from the memory copy.
475 * The assumption is that big packets will fit in 40 buffers.
476 */
Damjan Marion7e0b17d2018-11-20 21:07:03 +0100477 if (PREDICT_FALSE (cpu->rx_buffers_len < n_left + 1 ||
478 cpu->rx_buffers_len < 40))
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200479 {
Damjan Marion7e0b17d2018-11-20 21:07:03 +0100480 u32 curr_len = cpu->rx_buffers_len;
481 cpu->rx_buffers_len +=
Damjan Marion671e60e2018-12-30 18:09:59 +0100482 vlib_buffer_alloc (vm, cpu->rx_buffers + curr_len,
483 VHOST_USER_RX_BUFFERS_N - curr_len);
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200484
485 if (PREDICT_FALSE
Damjan Marion7e0b17d2018-11-20 21:07:03 +0100486 (cpu->rx_buffers_len < VHOST_USER_RX_BUFFER_STARVATION))
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200487 {
488 /* In case of buffer starvation, discard some packets from the queue
489 * and log the event.
490 * We keep doing best effort for the remaining packets. */
Damjan Marion7e0b17d2018-11-20 21:07:03 +0100491 u32 flush = (n_left + 1 > cpu->rx_buffers_len) ?
492 n_left + 1 - cpu->rx_buffers_len : 1;
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200493 flush = vhost_user_rx_discard_packet (vm, vui, txvq, flush);
494
495 n_left -= flush;
496 vlib_increment_simple_counter (vnet_main.
497 interface_main.sw_if_counters +
498 VNET_INTERFACE_COUNTER_DROP,
Damjan Marion7e0b17d2018-11-20 21:07:03 +0100499 vm->thread_index, vui->sw_if_index,
500 flush);
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200501
502 vlib_error_count (vm, vhost_user_input_node.index,
503 VHOST_USER_INPUT_FUNC_ERROR_NO_BUFFER, flush);
504 }
505 }
506
Steven Luongbc0d9ff2020-03-23 09:34:59 -0700507 vhost_user_input_setup_frame (vm, node, vui, &current_config_index,
508 &next_index, &to_next, &n_left_to_next);
Damjan Marion9af45042018-11-21 09:51:42 +0100509
Damjan Marionba1afaa2018-11-22 22:16:19 +0100510 u16 last_avail_idx = txvq->last_avail_idx;
511 u16 last_used_idx = txvq->last_used_idx;
512
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200513 while (n_left > 0)
514 {
Damjan Marion92825382018-11-21 10:03:44 +0100515 vlib_buffer_t *b_head, *b_current;
516 u32 bi_current;
517 u16 desc_current;
518 u32 desc_data_offset;
519 vring_desc_t *desc_table = txvq->desc;
Damjan Marion6a8bfd42018-11-21 09:54:41 +0100520
Damjan Marion92825382018-11-21 10:03:44 +0100521 if (PREDICT_FALSE (cpu->rx_buffers_len <= 1))
Damjan Marion6a8bfd42018-11-21 09:54:41 +0100522 {
Damjan Marion92825382018-11-21 10:03:44 +0100523 /* Not enough rx_buffers
524 * Note: We yeld on 1 so we don't need to do an additional
525 * check for the next buffer prefetch.
526 */
527 n_left = 0;
528 break;
Damjan Marion6a8bfd42018-11-21 09:54:41 +0100529 }
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200530
Damjan Marionba1afaa2018-11-22 22:16:19 +0100531 desc_current = txvq->avail->ring[last_avail_idx & mask];
Damjan Marion92825382018-11-21 10:03:44 +0100532 cpu->rx_buffers_len--;
533 bi_current = cpu->rx_buffers[cpu->rx_buffers_len];
534 b_head = b_current = vlib_get_buffer (vm, bi_current);
535 to_next[0] = bi_current; //We do that now so we can forget about bi_current
536 to_next++;
537 n_left_to_next--;
538
539 vlib_prefetch_buffer_with_index
540 (vm, cpu->rx_buffers[cpu->rx_buffers_len - 1], LOAD);
541
542 /* Just preset the used descriptor id and length for later */
Damjan Marionba1afaa2018-11-22 22:16:19 +0100543 txvq->used->ring[last_used_idx & mask].id = desc_current;
544 txvq->used->ring[last_used_idx & mask].len = 0;
545 vhost_user_log_dirty_ring (vui, txvq, ring[last_used_idx & mask]);
Damjan Marion92825382018-11-21 10:03:44 +0100546
547 /* The buffer should already be initialized */
548 b_head->total_length_not_including_first_buffer = 0;
549 b_head->flags |= VLIB_BUFFER_TOTAL_LENGTH_VALID;
550
Benoît Ganne9a3973e2020-10-02 19:36:57 +0200551 if (PREDICT_FALSE
552 (n_trace > 0 && vlib_trace_buffer (vm, node, next_index, b_head,
553 /* follow_chain */ 0)))
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200554 {
Damjan Marion92825382018-11-21 10:03:44 +0100555 vhost_trace_t *t0 =
556 vlib_add_trace (vm, node, b_head, sizeof (t0[0]));
Damjan Marionba1afaa2018-11-22 22:16:19 +0100557 vhost_user_rx_trace (t0, vui, qid, b_head, txvq, last_avail_idx);
Damjan Marion92825382018-11-21 10:03:44 +0100558 n_trace--;
559 vlib_set_trace_count (vm, node, n_trace);
560 }
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200561
Damjan Marion92825382018-11-21 10:03:44 +0100562 /* This depends on the setup but is very consistent
563 * So I think the CPU branch predictor will make a pretty good job
564 * at optimizing the decision. */
Mohsin Kazmia7a22812020-08-31 17:17:16 +0200565 if (txvq->desc[desc_current].flags & VRING_DESC_F_INDIRECT)
Damjan Marion92825382018-11-21 10:03:44 +0100566 {
567 desc_table = map_guest_mem (vui, txvq->desc[desc_current].addr,
568 &map_hint);
569 desc_current = 0;
570 if (PREDICT_FALSE (desc_table == 0))
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200571 {
Damjan Marion92825382018-11-21 10:03:44 +0100572 vlib_error_count (vm, node->node_index,
573 VHOST_USER_INPUT_FUNC_ERROR_MMAP_FAIL, 1);
574 goto out;
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200575 }
Damjan Marion92825382018-11-21 10:03:44 +0100576 }
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200577
Benoît Ganne5ecc1e42020-01-24 18:06:01 +0100578 desc_data_offset = vui->virtio_net_hdr_sz;
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200579
Steven Luong4208a4c2019-05-06 08:51:56 -0700580 if (enable_csum)
581 {
582 virtio_net_hdr_mrg_rxbuf_t *hdr;
583 u8 *b_data;
Steven Luongb232d192020-03-17 09:01:30 -0700584 u16 current;
Steven Luong4208a4c2019-05-06 08:51:56 -0700585
Steven Luongb232d192020-03-17 09:01:30 -0700586 hdr = map_guest_mem (vui, desc_table[desc_current].addr, &map_hint);
Steven Luong5dedae72019-07-31 16:01:14 -0700587 if (PREDICT_FALSE (hdr == 0))
588 {
589 vlib_error_count (vm, node->node_index,
590 VHOST_USER_INPUT_FUNC_ERROR_MMAP_FAIL, 1);
591 goto out;
592 }
Steven Luongb232d192020-03-17 09:01:30 -0700593 if (hdr->hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM)
Steven Luong5dedae72019-07-31 16:01:14 -0700594 {
Steven Luongb232d192020-03-17 09:01:30 -0700595 if ((desc_data_offset == desc_table[desc_current].len) &&
Mohsin Kazmia7a22812020-08-31 17:17:16 +0200596 (desc_table[desc_current].flags & VRING_DESC_F_NEXT))
Steven Luong5dedae72019-07-31 16:01:14 -0700597 {
Steven Luongb232d192020-03-17 09:01:30 -0700598 current = desc_table[desc_current].next;
599 b_data = map_guest_mem (vui, desc_table[current].addr,
600 &map_hint);
601 if (PREDICT_FALSE (b_data == 0))
602 {
603 vlib_error_count (vm, node->node_index,
604 VHOST_USER_INPUT_FUNC_ERROR_MMAP_FAIL,
605 1);
606 goto out;
607 }
Steven Luong5dedae72019-07-31 16:01:14 -0700608 }
Steven Luongb232d192020-03-17 09:01:30 -0700609 else
610 b_data = (u8 *) hdr + desc_data_offset;
611
612 vhost_user_handle_rx_offload (b_head, b_data, &hdr->hdr);
Steven Luong5dedae72019-07-31 16:01:14 -0700613 }
Steven Luong4208a4c2019-05-06 08:51:56 -0700614 }
615
Damjan Marion92825382018-11-21 10:03:44 +0100616 while (1)
617 {
618 /* Get more input if necessary. Or end of packet. */
619 if (desc_data_offset == desc_table[desc_current].len)
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200620 {
Damjan Marion92825382018-11-21 10:03:44 +0100621 if (PREDICT_FALSE (desc_table[desc_current].flags &
Mohsin Kazmia7a22812020-08-31 17:17:16 +0200622 VRING_DESC_F_NEXT))
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200623 {
Damjan Marion92825382018-11-21 10:03:44 +0100624 desc_current = desc_table[desc_current].next;
625 desc_data_offset = 0;
626 }
627 else
628 {
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200629 goto out;
630 }
631 }
632
Damjan Marion92825382018-11-21 10:03:44 +0100633 /* Get more output if necessary. Or end of packet. */
Damjan Marion8934a042019-02-09 23:29:26 +0100634 if (PREDICT_FALSE (b_current->current_length == buffer_data_size))
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200635 {
Damjan Marion92825382018-11-21 10:03:44 +0100636 if (PREDICT_FALSE (cpu->rx_buffers_len == 0))
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200637 {
Damjan Marion92825382018-11-21 10:03:44 +0100638 /* Cancel speculation */
639 to_next--;
640 n_left_to_next++;
641
642 /*
643 * Checking if there are some left buffers.
644 * If not, just rewind the used buffers and stop.
645 * Note: Scheduled copies are not cancelled. This is
646 * not an issue as they would still be valid. Useless,
647 * but valid.
648 */
649 vhost_user_input_rewind_buffers (vm, cpu, b_head);
650 n_left = 0;
651 goto stop;
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200652 }
653
Damjan Marion92825382018-11-21 10:03:44 +0100654 /* Get next output */
655 cpu->rx_buffers_len--;
656 u32 bi_next = cpu->rx_buffers[cpu->rx_buffers_len];
657 b_current->next_buffer = bi_next;
658 b_current->flags |= VLIB_BUFFER_NEXT_PRESENT;
659 bi_current = bi_next;
660 b_current = vlib_get_buffer (vm, bi_current);
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200661 }
662
Damjan Marion92825382018-11-21 10:03:44 +0100663 /* Prepare a copy order executed later for the data */
Steven Luong73310052019-10-23 13:28:37 -0700664 ASSERT (copy_len < VHOST_USER_COPY_ARRAY_N);
Damjan Marion92825382018-11-21 10:03:44 +0100665 vhost_copy_t *cpy = &cpu->copy[copy_len];
666 copy_len++;
667 u32 desc_data_l = desc_table[desc_current].len - desc_data_offset;
Damjan Marion8934a042019-02-09 23:29:26 +0100668 cpy->len = buffer_data_size - b_current->current_length;
Damjan Marion92825382018-11-21 10:03:44 +0100669 cpy->len = (cpy->len > desc_data_l) ? desc_data_l : cpy->len;
670 cpy->dst = (uword) (vlib_buffer_get_current (b_current) +
671 b_current->current_length);
672 cpy->src = desc_table[desc_current].addr + desc_data_offset;
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200673
Damjan Marion92825382018-11-21 10:03:44 +0100674 desc_data_offset += cpy->len;
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200675
Damjan Marion92825382018-11-21 10:03:44 +0100676 b_current->current_length += cpy->len;
677 b_head->total_length_not_including_first_buffer += cpy->len;
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200678 }
Damjan Marion92825382018-11-21 10:03:44 +0100679
680 out:
681
682 n_rx_bytes += b_head->total_length_not_including_first_buffer;
683 n_rx_packets++;
684
685 b_head->total_length_not_including_first_buffer -=
686 b_head->current_length;
687
688 /* consume the descriptor and return it as used */
Damjan Marionba1afaa2018-11-22 22:16:19 +0100689 last_avail_idx++;
690 last_used_idx++;
Damjan Marion92825382018-11-21 10:03:44 +0100691
Damjan Marion92825382018-11-21 10:03:44 +0100692 vnet_buffer (b_head)->sw_if_index[VLIB_RX] = vui->sw_if_index;
693 vnet_buffer (b_head)->sw_if_index[VLIB_TX] = (u32) ~ 0;
694 b_head->error = 0;
695
696 if (current_config_index != ~(u32) 0)
697 {
698 b_head->current_config_index = current_config_index;
699 vnet_buffer (b_head)->feature_arc_index = feature_arc_idx;
700 }
701
702 n_left--;
703
704 /*
705 * Although separating memory copies from virtio ring parsing
706 * is beneficial, we can offer to perform the copies from time
707 * to time in order to free some space in the ring.
708 */
709 if (PREDICT_FALSE (copy_len >= VHOST_USER_RX_COPY_THRESHOLD))
710 {
711 if (PREDICT_FALSE (vhost_user_input_copy (vui, cpu->copy,
712 copy_len, &map_hint)))
713 {
714 vlib_error_count (vm, node->node_index,
715 VHOST_USER_INPUT_FUNC_ERROR_MMAP_FAIL, 1);
716 }
717 copy_len = 0;
718
719 /* give buffers back to driver */
Damjan Marion96e8cd02018-11-23 14:56:55 +0100720 CLIB_MEMORY_STORE_BARRIER ();
Damjan Marionba1afaa2018-11-22 22:16:19 +0100721 txvq->used->idx = last_used_idx;
Damjan Marion92825382018-11-21 10:03:44 +0100722 vhost_user_log_dirty_ring (vui, txvq, idx);
723 }
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200724 }
Damjan Marion92825382018-11-21 10:03:44 +0100725stop:
726 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200727
Damjan Marionba1afaa2018-11-22 22:16:19 +0100728 txvq->last_used_idx = last_used_idx;
729 txvq->last_avail_idx = last_avail_idx;
730
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200731 /* Do the memory copies */
Damjan Marion7e0b17d2018-11-20 21:07:03 +0100732 if (PREDICT_FALSE (vhost_user_input_copy (vui, cpu->copy, copy_len,
733 &map_hint)))
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200734 {
735 vlib_error_count (vm, node->node_index,
736 VHOST_USER_INPUT_FUNC_ERROR_MMAP_FAIL, 1);
737 }
738
739 /* give buffers back to driver */
Damjan Marion96e8cd02018-11-23 14:56:55 +0100740 CLIB_MEMORY_STORE_BARRIER ();
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200741 txvq->used->idx = txvq->last_used_idx;
742 vhost_user_log_dirty_ring (vui, txvq, idx);
743
744 /* interrupt (call) handling */
745 if ((txvq->callfd_idx != ~0) &&
746 !(txvq->avail->flags & VRING_AVAIL_F_NO_INTERRUPT))
747 {
748 txvq->n_since_last_int += n_rx_packets;
749
750 if (txvq->n_since_last_int > vum->coalesce_frames)
Steven Luong27ba5002020-11-17 13:30:44 -0800751 vhost_user_send_call (vm, vui, txvq);
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200752 }
753
754 /* increase rx counters */
755 vlib_increment_combined_counter
756 (vnet_main.interface_main.combined_sw_if_counters
Damjan Marion7e0b17d2018-11-20 21:07:03 +0100757 + VNET_INTERFACE_COUNTER_RX, vm->thread_index, vui->sw_if_index,
758 n_rx_packets, n_rx_bytes);
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200759
Damjan Marion7e0b17d2018-11-20 21:07:03 +0100760 vnet_device_increment_rx_packets (vm->thread_index, n_rx_packets);
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200761
Damjan Marionba1afaa2018-11-22 22:16:19 +0100762done:
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200763 return n_rx_packets;
764}
765
Steven Luongbc0d9ff2020-03-23 09:34:59 -0700766static_always_inline void
767vhost_user_mark_desc_consumed (vhost_user_intf_t * vui,
768 vhost_user_vring_t * txvq, u16 desc_head,
769 u16 n_descs_processed)
770{
771 vring_packed_desc_t *desc_table = txvq->packed_desc;
772 u16 desc_idx;
773 u16 mask = txvq->qsz_mask;
774
775 for (desc_idx = 0; desc_idx < n_descs_processed; desc_idx++)
776 {
777 if (txvq->used_wrap_counter)
778 desc_table[(desc_head + desc_idx) & mask].flags |=
Mohsin Kazmia7a22812020-08-31 17:17:16 +0200779 (VRING_DESC_F_AVAIL | VRING_DESC_F_USED);
Steven Luongbc0d9ff2020-03-23 09:34:59 -0700780 else
781 desc_table[(desc_head + desc_idx) & mask].flags &=
Mohsin Kazmia7a22812020-08-31 17:17:16 +0200782 ~(VRING_DESC_F_AVAIL | VRING_DESC_F_USED);
Steven Luongbc0d9ff2020-03-23 09:34:59 -0700783 vhost_user_advance_last_used_idx (txvq);
784 }
785}
786
787static_always_inline void
788vhost_user_rx_trace_packed (vhost_trace_t * t, vhost_user_intf_t * vui,
789 u16 qid, vhost_user_vring_t * txvq,
790 u16 desc_current)
791{
792 vhost_user_main_t *vum = &vhost_user_main;
793 vring_packed_desc_t *hdr_desc;
794 virtio_net_hdr_mrg_rxbuf_t *hdr;
795 u32 hint = 0;
796
797 clib_memset (t, 0, sizeof (*t));
798 t->device_index = vui - vum->vhost_user_interfaces;
799 t->qid = qid;
800
801 hdr_desc = &txvq->packed_desc[desc_current];
Mohsin Kazmia7a22812020-08-31 17:17:16 +0200802 if (txvq->packed_desc[desc_current].flags & VRING_DESC_F_INDIRECT)
Steven Luongbc0d9ff2020-03-23 09:34:59 -0700803 {
804 t->virtio_ring_flags |= 1 << VIRTIO_TRACE_F_INDIRECT;
805 /* Header is the first here */
806 hdr_desc = map_guest_mem (vui, txvq->packed_desc[desc_current].addr,
807 &hint);
808 }
Mohsin Kazmia7a22812020-08-31 17:17:16 +0200809 if (txvq->packed_desc[desc_current].flags & VRING_DESC_F_NEXT)
Steven Luongbc0d9ff2020-03-23 09:34:59 -0700810 t->virtio_ring_flags |= 1 << VIRTIO_TRACE_F_SIMPLE_CHAINED;
811
Mohsin Kazmia7a22812020-08-31 17:17:16 +0200812 if (!(txvq->packed_desc[desc_current].flags & VRING_DESC_F_NEXT) &&
813 !(txvq->packed_desc[desc_current].flags & VRING_DESC_F_INDIRECT))
Steven Luongbc0d9ff2020-03-23 09:34:59 -0700814 t->virtio_ring_flags |= 1 << VIRTIO_TRACE_F_SINGLE_DESC;
815
816 t->first_desc_len = hdr_desc ? hdr_desc->len : 0;
817
818 if (!hdr_desc || !(hdr = map_guest_mem (vui, hdr_desc->addr, &hint)))
819 t->virtio_ring_flags |= 1 << VIRTIO_TRACE_F_MAP_ERROR;
820 else
821 {
822 u32 len = vui->virtio_net_hdr_sz;
823 clib_memcpy_fast (&t->hdr, hdr,
824 len > hdr_desc->len ? hdr_desc->len : len);
825 }
826}
827
828static_always_inline u32
829vhost_user_rx_discard_packet_packed (vlib_main_t * vm,
830 vhost_user_intf_t * vui,
831 vhost_user_vring_t * txvq,
832 u32 discard_max)
833{
834 u32 discarded_packets = 0;
835 u16 mask = txvq->qsz_mask;
836 u16 desc_current, desc_head;
837
838 desc_head = desc_current = txvq->last_used_idx & mask;
839
840 /*
841 * On the RX side, each packet corresponds to one descriptor
842 * (it is the same whether it is a shallow descriptor, chained, or indirect).
843 * Therefore, discarding a packet is like discarding a descriptor.
844 */
845 while ((discarded_packets != discard_max) &&
846 vhost_user_packed_desc_available (txvq, desc_current))
847 {
848 vhost_user_advance_last_avail_idx (txvq);
849 discarded_packets++;
850 desc_current = (desc_current + 1) & mask;
851 }
852
853 if (PREDICT_TRUE (discarded_packets))
854 vhost_user_mark_desc_consumed (vui, txvq, desc_head, discarded_packets);
855 return (discarded_packets);
856}
857
858static_always_inline u32
859vhost_user_input_copy_packed (vhost_user_intf_t * vui, vhost_copy_t * cpy,
860 u16 copy_len, u32 * map_hint)
861{
862 void *src0, *src1, *src2, *src3, *src4, *src5, *src6, *src7;
863 u8 bad;
864 u32 rc = VHOST_USER_INPUT_FUNC_ERROR_NO_ERROR;
865
866 if (PREDICT_TRUE (copy_len >= 8))
867 {
868 src4 = map_guest_mem (vui, cpy[0].src, map_hint);
869 src5 = map_guest_mem (vui, cpy[1].src, map_hint);
870 src6 = map_guest_mem (vui, cpy[2].src, map_hint);
871 src7 = map_guest_mem (vui, cpy[3].src, map_hint);
872 bad = (src4 == 0) + (src5 == 0) + (src6 == 0) + (src7 == 0);
873 if (PREDICT_FALSE (bad))
874 goto one_by_one;
Damjan Marionaf7fb042021-07-15 11:54:41 +0200875 clib_prefetch_load (src4);
876 clib_prefetch_load (src5);
877 clib_prefetch_load (src6);
878 clib_prefetch_load (src7);
Steven Luongbc0d9ff2020-03-23 09:34:59 -0700879
880 while (PREDICT_TRUE (copy_len >= 8))
881 {
882 src0 = src4;
883 src1 = src5;
884 src2 = src6;
885 src3 = src7;
886
887 src4 = map_guest_mem (vui, cpy[4].src, map_hint);
888 src5 = map_guest_mem (vui, cpy[5].src, map_hint);
889 src6 = map_guest_mem (vui, cpy[6].src, map_hint);
890 src7 = map_guest_mem (vui, cpy[7].src, map_hint);
891 bad = (src4 == 0) + (src5 == 0) + (src6 == 0) + (src7 == 0);
892 if (PREDICT_FALSE (bad))
893 break;
894
Damjan Marionaf7fb042021-07-15 11:54:41 +0200895 clib_prefetch_load (src4);
896 clib_prefetch_load (src5);
897 clib_prefetch_load (src6);
898 clib_prefetch_load (src7);
Steven Luongbc0d9ff2020-03-23 09:34:59 -0700899
900 clib_memcpy_fast ((void *) cpy[0].dst, src0, cpy[0].len);
901 clib_memcpy_fast ((void *) cpy[1].dst, src1, cpy[1].len);
902 clib_memcpy_fast ((void *) cpy[2].dst, src2, cpy[2].len);
903 clib_memcpy_fast ((void *) cpy[3].dst, src3, cpy[3].len);
904 copy_len -= 4;
905 cpy += 4;
906 }
907 }
908
909one_by_one:
910 while (copy_len)
911 {
912 if (PREDICT_FALSE (!(src0 = map_guest_mem (vui, cpy->src, map_hint))))
913 {
914 rc = VHOST_USER_INPUT_FUNC_ERROR_MMAP_FAIL;
915 break;
916 }
917 clib_memcpy_fast ((void *) cpy->dst, src0, cpy->len);
918 copy_len -= 1;
919 cpy += 1;
920 }
921 return rc;
922}
923
924static_always_inline u32
925vhost_user_do_offload (vhost_user_intf_t * vui,
926 vring_packed_desc_t * desc_table, u16 desc_current,
927 u16 mask, vlib_buffer_t * b_head, u32 * map_hint)
928{
929 u32 rc = VHOST_USER_INPUT_FUNC_ERROR_NO_ERROR;
930 virtio_net_hdr_mrg_rxbuf_t *hdr;
931 u8 *b_data;
932 u32 desc_data_offset = vui->virtio_net_hdr_sz;
933
934 hdr = map_guest_mem (vui, desc_table[desc_current].addr, map_hint);
935 if (PREDICT_FALSE (hdr == 0))
936 rc = VHOST_USER_INPUT_FUNC_ERROR_MMAP_FAIL;
937 else if (hdr->hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM)
938 {
939 if (desc_data_offset == desc_table[desc_current].len)
940 {
941 desc_current = (desc_current + 1) & mask;
942 b_data =
943 map_guest_mem (vui, desc_table[desc_current].addr, map_hint);
944 if (PREDICT_FALSE (b_data == 0))
945 rc = VHOST_USER_INPUT_FUNC_ERROR_MMAP_FAIL;
946 else
947 vhost_user_handle_rx_offload (b_head, b_data, &hdr->hdr);
948 }
949 else
950 {
951 b_data = (u8 *) hdr + desc_data_offset;
952 vhost_user_handle_rx_offload (b_head, b_data, &hdr->hdr);
953 }
954 }
955
956 return rc;
957}
958
959static_always_inline u32
960vhost_user_compute_buffers_required (u32 desc_len, u32 buffer_data_size)
961{
962 div_t result;
963 u32 buffers_required;
964
965 if (PREDICT_TRUE (buffer_data_size == 2048))
966 {
967 buffers_required = desc_len >> 11;
968 if ((desc_len & 2047) != 0)
969 buffers_required++;
970 return (buffers_required);
971 }
972
973 if (desc_len < buffer_data_size)
974 return 1;
975
976 result = div (desc_len, buffer_data_size);
977 if (result.rem)
978 buffers_required = result.quot + 1;
979 else
980 buffers_required = result.quot;
981
982 return (buffers_required);
983}
984
985static_always_inline u32
986vhost_user_compute_indirect_desc_len (vhost_user_intf_t * vui,
987 vhost_user_vring_t * txvq,
988 u32 buffer_data_size, u16 desc_current,
989 u32 * map_hint)
990{
991 vring_packed_desc_t *desc_table = txvq->packed_desc;
992 u32 desc_len = 0;
993 u16 desc_data_offset = vui->virtio_net_hdr_sz;
994 u16 desc_idx = desc_current;
995 u32 n_descs;
996
997 n_descs = desc_table[desc_idx].len >> 4;
998 desc_table = map_guest_mem (vui, desc_table[desc_idx].addr, map_hint);
999 if (PREDICT_FALSE (desc_table == 0))
1000 return 0;
1001
1002 for (desc_idx = 0; desc_idx < n_descs; desc_idx++)
1003 desc_len += desc_table[desc_idx].len;
1004
1005 if (PREDICT_TRUE (desc_len > desc_data_offset))
1006 desc_len -= desc_data_offset;
1007
1008 return vhost_user_compute_buffers_required (desc_len, buffer_data_size);
1009}
1010
1011static_always_inline u32
1012vhost_user_compute_chained_desc_len (vhost_user_intf_t * vui,
1013 vhost_user_vring_t * txvq,
1014 u32 buffer_data_size, u16 * current,
1015 u16 * n_left)
1016{
1017 vring_packed_desc_t *desc_table = txvq->packed_desc;
1018 u32 desc_len = 0;
1019 u16 mask = txvq->qsz_mask;
1020
Mohsin Kazmia7a22812020-08-31 17:17:16 +02001021 while (desc_table[*current].flags & VRING_DESC_F_NEXT)
Steven Luongbc0d9ff2020-03-23 09:34:59 -07001022 {
1023 desc_len += desc_table[*current].len;
1024 (*n_left)++;
1025 *current = (*current + 1) & mask;
1026 vhost_user_advance_last_avail_idx (txvq);
1027 }
1028 desc_len += desc_table[*current].len;
1029 (*n_left)++;
1030 *current = (*current + 1) & mask;
1031 vhost_user_advance_last_avail_idx (txvq);
1032
1033 if (PREDICT_TRUE (desc_len > vui->virtio_net_hdr_sz))
1034 desc_len -= vui->virtio_net_hdr_sz;
1035
1036 return vhost_user_compute_buffers_required (desc_len, buffer_data_size);
1037}
1038
1039static_always_inline void
1040vhost_user_assemble_packet (vring_packed_desc_t * desc_table,
1041 u16 * desc_idx, vlib_buffer_t * b_head,
1042 vlib_buffer_t ** b_current, u32 ** next,
1043 vlib_buffer_t *** b, u32 * bi_current,
1044 vhost_cpu_t * cpu, u16 * copy_len,
1045 u32 * buffers_used, u32 buffers_required,
1046 u32 * desc_data_offset, u32 buffer_data_size,
1047 u16 mask)
1048{
1049 u32 desc_data_l;
1050
1051 while (*desc_data_offset < desc_table[*desc_idx].len)
1052 {
1053 /* Get more output if necessary. Or end of packet. */
1054 if (PREDICT_FALSE ((*b_current)->current_length == buffer_data_size))
1055 {
1056 /* Get next output */
1057 u32 bi_next = **next;
1058 (*next)++;
1059 (*b_current)->next_buffer = bi_next;
1060 (*b_current)->flags |= VLIB_BUFFER_NEXT_PRESENT;
1061 *bi_current = bi_next;
1062 *b_current = **b;
1063 (*b)++;
1064 (*buffers_used)++;
1065 ASSERT (*buffers_used <= buffers_required);
1066 }
1067
1068 /* Prepare a copy order executed later for the data */
1069 ASSERT (*copy_len < VHOST_USER_COPY_ARRAY_N);
1070 vhost_copy_t *cpy = &cpu->copy[*copy_len];
1071 (*copy_len)++;
1072 desc_data_l = desc_table[*desc_idx].len - *desc_data_offset;
1073 cpy->len = buffer_data_size - (*b_current)->current_length;
1074 cpy->len = (cpy->len > desc_data_l) ? desc_data_l : cpy->len;
1075 cpy->dst = (uword) (vlib_buffer_get_current (*b_current) +
1076 (*b_current)->current_length);
1077 cpy->src = desc_table[*desc_idx].addr + *desc_data_offset;
1078
1079 *desc_data_offset += cpy->len;
1080
1081 (*b_current)->current_length += cpy->len;
1082 b_head->total_length_not_including_first_buffer += cpy->len;
1083 }
1084 *desc_idx = (*desc_idx + 1) & mask;;
1085 *desc_data_offset = 0;
1086}
1087
1088static_always_inline u32
Damjan Marion94100532020-11-06 23:25:57 +01001089vhost_user_if_input_packed (vlib_main_t *vm, vhost_user_main_t *vum,
1090 vhost_user_intf_t *vui, u16 qid,
1091 vlib_node_runtime_t *node, u8 enable_csum)
Steven Luongbc0d9ff2020-03-23 09:34:59 -07001092{
1093 vhost_user_vring_t *txvq = &vui->vrings[VHOST_VRING_IDX_TX (qid)];
1094 vnet_feature_main_t *fm = &feature_main;
1095 u8 feature_arc_idx = fm->device_input_feature_arc_index;
1096 u16 n_rx_packets = 0;
1097 u32 n_rx_bytes = 0;
1098 u16 n_left = 0;
1099 u32 buffers_required = 0;
1100 u32 n_left_to_next, *to_next;
1101 u32 next_index = VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT;
1102 u32 n_trace = vlib_get_trace_count (vm, node);
1103 u32 buffer_data_size = vlib_buffer_get_default_data_size (vm);
1104 u32 map_hint = 0;
1105 vhost_cpu_t *cpu = &vum->cpus[vm->thread_index];
1106 u16 copy_len = 0;
1107 u32 current_config_index = ~0;
1108 u16 mask = txvq->qsz_mask;
1109 u16 desc_current, desc_head, last_used_idx;
1110 vring_packed_desc_t *desc_table = 0;
1111 u32 n_descs_processed = 0;
1112 u32 rv;
1113 vlib_buffer_t **b;
1114 u32 *next;
1115 u32 buffers_used = 0;
1116 u16 current, n_descs_to_process;
1117
1118 /* The descriptor table is not ready yet */
1119 if (PREDICT_FALSE (txvq->packed_desc == 0))
1120 goto done;
1121
1122 /* do we have pending interrupts ? */
1123 vhost_user_vring_t *rxvq = &vui->vrings[VHOST_VRING_IDX_RX (qid)];
Steven Luong27ba5002020-11-17 13:30:44 -08001124 vhost_user_input_do_interrupt (vm, vui, txvq, rxvq);
Steven Luongbc0d9ff2020-03-23 09:34:59 -07001125
1126 /*
1127 * For adaptive mode, it is optimized to reduce interrupts.
1128 * If the scheduler switches the input node to polling due
1129 * to burst of traffic, we tell the driver no interrupt.
1130 * When the traffic subsides, the scheduler switches the node back to
1131 * interrupt mode. We must tell the driver we want interrupt.
1132 */
Damjan Marion94100532020-11-06 23:25:57 +01001133 if (PREDICT_FALSE (txvq->mode == VNET_HW_IF_RX_MODE_ADAPTIVE))
Steven Luongbc0d9ff2020-03-23 09:34:59 -07001134 {
1135 if ((node->flags &
1136 VLIB_NODE_FLAG_SWITCH_FROM_POLLING_TO_INTERRUPT_MODE) ||
1137 !(node->flags &
1138 VLIB_NODE_FLAG_SWITCH_FROM_INTERRUPT_TO_POLLING_MODE))
1139 /* Tell driver we want notification */
1140 txvq->used_event->flags = 0;
1141 else
1142 /* Tell driver we don't want notification */
1143 txvq->used_event->flags = VRING_EVENT_F_DISABLE;
1144 }
1145
1146 last_used_idx = txvq->last_used_idx & mask;
1147 desc_head = desc_current = last_used_idx;
1148
1149 if (vhost_user_packed_desc_available (txvq, desc_current) == 0)
1150 goto done;
1151
1152 if (PREDICT_FALSE (!vui->admin_up || !vui->is_ready || !(txvq->enabled)))
1153 {
1154 /*
1155 * Discard input packet if interface is admin down or vring is not
1156 * enabled.
1157 * "For example, for a networking device, in the disabled state
1158 * client must not supply any new RX packets, but must process
1159 * and discard any TX packets."
1160 */
1161 rv = vhost_user_rx_discard_packet_packed (vm, vui, txvq,
1162 VHOST_USER_DOWN_DISCARD_COUNT);
1163 vlib_error_count (vm, vhost_user_input_node.index,
1164 VHOST_USER_INPUT_FUNC_ERROR_NOT_READY, rv);
1165 goto done;
1166 }
1167
1168 vhost_user_input_setup_frame (vm, node, vui, &current_config_index,
1169 &next_index, &to_next, &n_left_to_next);
1170
1171 /*
1172 * Compute n_left and total buffers needed
1173 */
1174 desc_table = txvq->packed_desc;
1175 current = desc_current;
1176 while (vhost_user_packed_desc_available (txvq, current) &&
1177 (n_left < VLIB_FRAME_SIZE))
1178 {
Mohsin Kazmia7a22812020-08-31 17:17:16 +02001179 if (desc_table[current].flags & VRING_DESC_F_INDIRECT)
Steven Luongbc0d9ff2020-03-23 09:34:59 -07001180 {
1181 buffers_required +=
1182 vhost_user_compute_indirect_desc_len (vui, txvq, buffer_data_size,
1183 current, &map_hint);
1184 n_left++;
1185 current = (current + 1) & mask;
1186 vhost_user_advance_last_avail_idx (txvq);
1187 }
1188 else
1189 {
1190 buffers_required +=
1191 vhost_user_compute_chained_desc_len (vui, txvq, buffer_data_size,
1192 &current, &n_left);
1193 }
1194 }
1195
1196 /* Something is broken if we need more than 10000 buffers */
1197 if (PREDICT_FALSE ((buffers_required == 0) || (buffers_required > 10000)))
1198 {
1199 rv = vhost_user_rx_discard_packet_packed (vm, vui, txvq, n_left);
1200 vlib_error_count (vm, vhost_user_input_node.index,
1201 VHOST_USER_INPUT_FUNC_ERROR_NO_BUFFER, rv);
1202 goto done;
1203 }
1204
1205 vec_validate (cpu->to_next_list, buffers_required);
1206 rv = vlib_buffer_alloc (vm, cpu->to_next_list, buffers_required);
1207 if (PREDICT_FALSE (rv != buffers_required))
1208 {
1209 vlib_buffer_free (vm, cpu->to_next_list, rv);
1210 rv = vhost_user_rx_discard_packet_packed (vm, vui, txvq, n_left);
1211 vlib_error_count (vm, vhost_user_input_node.index,
1212 VHOST_USER_INPUT_FUNC_ERROR_NO_BUFFER, rv);
1213 goto done;
1214 }
1215
1216 next = cpu->to_next_list;
1217 vec_validate (cpu->rx_buffers_pdesc, buffers_required);
1218 vlib_get_buffers (vm, next, cpu->rx_buffers_pdesc, buffers_required);
1219 b = cpu->rx_buffers_pdesc;
1220 n_descs_processed = n_left;
1221
1222 while (n_left)
1223 {
1224 vlib_buffer_t *b_head, *b_current;
1225 u32 bi_current;
1226 u32 desc_data_offset;
1227 u16 desc_idx = desc_current;
1228 u32 n_descs;
1229
1230 desc_table = txvq->packed_desc;
1231 to_next[0] = bi_current = next[0];
1232 b_head = b_current = b[0];
1233 b++;
1234 buffers_used++;
1235 ASSERT (buffers_used <= buffers_required);
1236 to_next++;
1237 next++;
1238 n_left_to_next--;
1239
1240 /* The buffer should already be initialized */
1241 b_head->total_length_not_including_first_buffer = 0;
1242 b_head->flags |= VLIB_BUFFER_TOTAL_LENGTH_VALID;
1243 desc_data_offset = vui->virtio_net_hdr_sz;
1244 n_descs_to_process = 1;
1245
Mohsin Kazmia7a22812020-08-31 17:17:16 +02001246 if (desc_table[desc_idx].flags & VRING_DESC_F_INDIRECT)
Steven Luongbc0d9ff2020-03-23 09:34:59 -07001247 {
1248 n_descs = desc_table[desc_idx].len >> 4;
1249 desc_table = map_guest_mem (vui, desc_table[desc_idx].addr,
1250 &map_hint);
1251 desc_idx = 0;
1252 if (PREDICT_FALSE (desc_table == 0) ||
1253 (enable_csum &&
1254 (PREDICT_FALSE
1255 (vhost_user_do_offload
1256 (vui, desc_table, desc_idx, mask, b_head,
1257 &map_hint) != VHOST_USER_INPUT_FUNC_ERROR_NO_ERROR))))
1258 {
1259 vlib_error_count (vm, node->node_index,
1260 VHOST_USER_INPUT_FUNC_ERROR_MMAP_FAIL, 1);
1261 to_next--;
1262 next--;
1263 n_left_to_next++;
1264 buffers_used--;
1265 b--;
1266 goto out;
1267 }
1268 while (n_descs)
1269 {
1270 vhost_user_assemble_packet (desc_table, &desc_idx, b_head,
1271 &b_current, &next, &b, &bi_current,
1272 cpu, &copy_len, &buffers_used,
1273 buffers_required, &desc_data_offset,
1274 buffer_data_size, mask);
1275 n_descs--;
1276 }
1277 }
1278 else
1279 {
1280 if (enable_csum)
1281 {
1282 rv = vhost_user_do_offload (vui, desc_table, desc_idx, mask,
1283 b_head, &map_hint);
1284 if (PREDICT_FALSE (rv != VHOST_USER_INPUT_FUNC_ERROR_NO_ERROR))
1285 {
1286 vlib_error_count (vm, node->node_index, rv, 1);
1287 to_next--;
1288 next--;
1289 n_left_to_next++;
1290 buffers_used--;
1291 b--;
1292 goto out;
1293 }
1294 }
1295 /*
1296 * For chained descriptor, we process all chains in a single while
1297 * loop. So count how many descriptors in the chain.
1298 */
1299 n_descs_to_process = 1;
Mohsin Kazmia7a22812020-08-31 17:17:16 +02001300 while (desc_table[desc_idx].flags & VRING_DESC_F_NEXT)
Steven Luongbc0d9ff2020-03-23 09:34:59 -07001301 {
1302 vhost_user_assemble_packet (desc_table, &desc_idx, b_head,
1303 &b_current, &next, &b, &bi_current,
1304 cpu, &copy_len, &buffers_used,
1305 buffers_required, &desc_data_offset,
1306 buffer_data_size, mask);
1307 n_descs_to_process++;
1308 }
1309 vhost_user_assemble_packet (desc_table, &desc_idx, b_head,
1310 &b_current, &next, &b, &bi_current,
1311 cpu, &copy_len, &buffers_used,
1312 buffers_required, &desc_data_offset,
1313 buffer_data_size, mask);
1314 }
1315
1316 n_rx_bytes += b_head->total_length_not_including_first_buffer;
1317 n_rx_packets++;
1318
1319 b_head->total_length_not_including_first_buffer -=
1320 b_head->current_length;
1321
Steven Luongbc0d9ff2020-03-23 09:34:59 -07001322 vnet_buffer (b_head)->sw_if_index[VLIB_RX] = vui->sw_if_index;
1323 vnet_buffer (b_head)->sw_if_index[VLIB_TX] = ~0;
1324 b_head->error = 0;
1325
1326 if (current_config_index != ~0)
1327 {
1328 b_head->current_config_index = current_config_index;
1329 vnet_buffer (b_head)->feature_arc_index = feature_arc_idx;
1330 }
1331
1332 out:
1333 ASSERT (n_left >= n_descs_to_process);
1334 n_left -= n_descs_to_process;
1335
1336 /* advance to next descrptor */
1337 desc_current = (desc_current + n_descs_to_process) & mask;
1338
1339 /*
1340 * Although separating memory copies from virtio ring parsing
1341 * is beneficial, we can offer to perform the copies from time
1342 * to time in order to free some space in the ring.
1343 */
1344 if (PREDICT_FALSE (copy_len >= VHOST_USER_RX_COPY_THRESHOLD))
1345 {
1346 rv = vhost_user_input_copy_packed (vui, cpu->copy, copy_len,
1347 &map_hint);
1348 if (PREDICT_FALSE (rv != VHOST_USER_INPUT_FUNC_ERROR_NO_ERROR))
1349 vlib_error_count (vm, node->node_index, rv, 1);
1350 copy_len = 0;
1351 }
1352 }
1353 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
1354
1355 /* Do the memory copies */
1356 rv = vhost_user_input_copy_packed (vui, cpu->copy, copy_len, &map_hint);
1357 if (PREDICT_FALSE (rv != VHOST_USER_INPUT_FUNC_ERROR_NO_ERROR))
1358 vlib_error_count (vm, node->node_index, rv, 1);
1359
1360 /* Must do the tracing before giving buffers back to driver */
1361 if (PREDICT_FALSE (n_trace))
1362 {
1363 u32 left = n_rx_packets;
1364
1365 b = cpu->rx_buffers_pdesc;
1366 while (n_trace && left)
1367 {
Benoît Ganne9a3973e2020-10-02 19:36:57 +02001368 if (PREDICT_TRUE
1369 (vlib_trace_buffer
1370 (vm, node, next_index, b[0], /* follow_chain */ 0)))
1371 {
1372 vhost_trace_t *t0;
1373 t0 = vlib_add_trace (vm, node, b[0], sizeof (t0[0]));
1374 vhost_user_rx_trace_packed (t0, vui, qid, txvq, last_used_idx);
1375 last_used_idx = (last_used_idx + 1) & mask;
1376 n_trace--;
1377 vlib_set_trace_count (vm, node, n_trace);
1378 }
Steven Luongbc0d9ff2020-03-23 09:34:59 -07001379 left--;
Benoît Ganne9a3973e2020-10-02 19:36:57 +02001380 b++;
Steven Luongbc0d9ff2020-03-23 09:34:59 -07001381 }
1382 }
1383
1384 /*
1385 * Give buffers back to driver.
1386 */
1387 vhost_user_mark_desc_consumed (vui, txvq, desc_head, n_descs_processed);
1388
1389 /* interrupt (call) handling */
1390 if ((txvq->callfd_idx != ~0) &&
1391 (txvq->avail_event->flags != VRING_EVENT_F_DISABLE))
1392 {
1393 txvq->n_since_last_int += n_rx_packets;
1394 if (txvq->n_since_last_int > vum->coalesce_frames)
Steven Luong27ba5002020-11-17 13:30:44 -08001395 vhost_user_send_call (vm, vui, txvq);
Steven Luongbc0d9ff2020-03-23 09:34:59 -07001396 }
1397
1398 /* increase rx counters */
1399 vlib_increment_combined_counter
1400 (vnet_main.interface_main.combined_sw_if_counters
1401 + VNET_INTERFACE_COUNTER_RX, vm->thread_index, vui->sw_if_index,
1402 n_rx_packets, n_rx_bytes);
1403
1404 vnet_device_increment_rx_packets (vm->thread_index, n_rx_packets);
1405
1406 if (PREDICT_FALSE (buffers_used < buffers_required))
1407 vlib_buffer_free (vm, next, buffers_required - buffers_used);
1408
1409done:
1410 return n_rx_packets;
1411}
1412
Mohsin Kazmie7cde312018-06-26 17:20:11 +02001413VLIB_NODE_FN (vhost_user_input_node) (vlib_main_t * vm,
1414 vlib_node_runtime_t * node,
1415 vlib_frame_t * frame)
1416{
1417 vhost_user_main_t *vum = &vhost_user_main;
1418 uword n_rx_packets = 0;
1419 vhost_user_intf_t *vui;
Damjan Marion94100532020-11-06 23:25:57 +01001420 vnet_hw_if_rxq_poll_vector_t *pv = vnet_hw_if_get_rxq_poll_vector (vm, node);
1421 vnet_hw_if_rxq_poll_vector_t *pve;
Mohsin Kazmie7cde312018-06-26 17:20:11 +02001422
Damjan Marion94100532020-11-06 23:25:57 +01001423 vec_foreach (pve, pv)
1424 {
1425 vui = pool_elt_at_index (vum->vhost_user_interfaces, pve->dev_instance);
1426 if (vhost_user_is_packed_ring_supported (vui))
1427 {
1428 if (vui->features & VIRTIO_FEATURE (VIRTIO_NET_F_CSUM))
1429 n_rx_packets += vhost_user_if_input_packed (
1430 vm, vum, vui, pve->queue_id, node, 1);
1431 else
1432 n_rx_packets += vhost_user_if_input_packed (
1433 vm, vum, vui, pve->queue_id, node, 0);
1434 }
1435 else
1436 {
1437 if (vui->features & VIRTIO_FEATURE (VIRTIO_NET_F_CSUM))
1438 n_rx_packets +=
1439 vhost_user_if_input (vm, vum, vui, pve->queue_id, node, 1);
1440 else
1441 n_rx_packets +=
1442 vhost_user_if_input (vm, vum, vui, pve->queue_id, node, 0);
1443 }
1444 }
Mohsin Kazmie7cde312018-06-26 17:20:11 +02001445
1446 return n_rx_packets;
1447}
1448
Mohsin Kazmie7cde312018-06-26 17:20:11 +02001449/* *INDENT-OFF* */
1450VLIB_REGISTER_NODE (vhost_user_input_node) = {
1451 .type = VLIB_NODE_TYPE_INPUT,
1452 .name = "vhost-user-input",
1453 .sibling_of = "device-input",
Damjan Marion7ca5aaa2019-09-24 18:10:49 +02001454 .flags = VLIB_NODE_FLAG_TRACE_SUPPORTED,
Mohsin Kazmie7cde312018-06-26 17:20:11 +02001455
1456 /* Will be enabled if/when hardware is detected. */
1457 .state = VLIB_NODE_STATE_DISABLED,
1458
1459 .format_buffer = format_ethernet_header_with_length,
1460 .format_trace = format_vhost_trace,
1461
1462 .n_errors = VHOST_USER_INPUT_FUNC_N_ERROR,
1463 .error_strings = vhost_user_input_func_error_strings,
1464};
1465/* *INDENT-ON* */
Mohsin Kazmie7cde312018-06-26 17:20:11 +02001466
1467/*
1468 * fd.io coding-style-patch-verification: ON
1469 *
1470 * Local Variables:
1471 * eval: (c-set-style "gnu")
1472 * End:
1473 */