blob: 2d90ed1224d034a1ee9e2eb7770d05e2bcea01da [file] [log] [blame]
Mohsin Kazmie7cde312018-06-26 17:20:11 +02001/*
2 *------------------------------------------------------------------
3 * vhost-user-input
4 *
5 * Copyright (c) 2014-2018 Cisco and/or its affiliates.
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at:
9 *
10 * http://www.apache.org/licenses/LICENSE-2.0
11 *
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 *------------------------------------------------------------------
18 */
19
20#include <fcntl.h> /* for open */
21#include <sys/ioctl.h>
22#include <sys/socket.h>
23#include <sys/un.h>
24#include <sys/stat.h>
25#include <sys/types.h>
26#include <sys/uio.h> /* for iovec */
27#include <netinet/in.h>
28#include <sys/vfs.h>
29
30#include <linux/if_arp.h>
31#include <linux/if_tun.h>
32
33#include <vlib/vlib.h>
34#include <vlib/unix/unix.h>
35
36#include <vnet/ip/ip.h>
37
38#include <vnet/ethernet/ethernet.h>
39#include <vnet/devices/devices.h>
40#include <vnet/feature/feature.h>
41
Steven Luong4208a4c2019-05-06 08:51:56 -070042#include <vnet/devices/virtio/virtio.h>
Mohsin Kazmie7cde312018-06-26 17:20:11 +020043#include <vnet/devices/virtio/vhost_user.h>
44#include <vnet/devices/virtio/vhost_user_inline.h>
45
46/*
47 * When an RX queue is down but active, received packets
48 * must be discarded. This value controls up to how many
49 * packets will be discarded during each round.
50 */
51#define VHOST_USER_DOWN_DISCARD_COUNT 256
52
53/*
54 * When the number of available buffers gets under this threshold,
55 * RX node will start discarding packets.
56 */
57#define VHOST_USER_RX_BUFFER_STARVATION 32
58
59/*
60 * On the receive side, the host should free descriptors as soon
61 * as possible in order to avoid TX drop in the VM.
62 * This value controls the number of copy operations that are stacked
63 * before copy is done for all and descriptors are given back to
64 * the guest.
65 * The value 64 was obtained by testing (48 and 128 were not as good).
66 */
67#define VHOST_USER_RX_COPY_THRESHOLD 64
68
BenoƮt Ganne47727c02019-02-12 13:35:08 +010069extern vlib_node_registration_t vhost_user_input_node;
Mohsin Kazmie7cde312018-06-26 17:20:11 +020070
71#define foreach_vhost_user_input_func_error \
72 _(NO_ERROR, "no error") \
73 _(NO_BUFFER, "no available buffer") \
74 _(MMAP_FAIL, "mmap failure") \
75 _(INDIRECT_OVERFLOW, "indirect descriptor overflows table") \
76 _(UNDERSIZED_FRAME, "undersized ethernet frame received (< 14 bytes)") \
77 _(FULL_RX_QUEUE, "full rx queue (possible driver tx drop)")
78
79typedef enum
80{
81#define _(f,s) VHOST_USER_INPUT_FUNC_ERROR_##f,
82 foreach_vhost_user_input_func_error
83#undef _
84 VHOST_USER_INPUT_FUNC_N_ERROR,
85} vhost_user_input_func_error_t;
86
87static __clib_unused char *vhost_user_input_func_error_strings[] = {
88#define _(n,s) s,
89 foreach_vhost_user_input_func_error
90#undef _
91};
92
93static_always_inline void
94vhost_user_rx_trace (vhost_trace_t * t,
95 vhost_user_intf_t * vui, u16 qid,
Damjan Marionba1afaa2018-11-22 22:16:19 +010096 vlib_buffer_t * b, vhost_user_vring_t * txvq,
97 u16 last_avail_idx)
Mohsin Kazmie7cde312018-06-26 17:20:11 +020098{
99 vhost_user_main_t *vum = &vhost_user_main;
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200100 u32 desc_current = txvq->avail->ring[last_avail_idx & txvq->qsz_mask];
101 vring_desc_t *hdr_desc = 0;
102 virtio_net_hdr_mrg_rxbuf_t *hdr;
103 u32 hint = 0;
104
Dave Barachb7b92992018-10-17 10:38:51 -0400105 clib_memset (t, 0, sizeof (*t));
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200106 t->device_index = vui - vum->vhost_user_interfaces;
107 t->qid = qid;
108
109 hdr_desc = &txvq->desc[desc_current];
110 if (txvq->desc[desc_current].flags & VIRTQ_DESC_F_INDIRECT)
111 {
112 t->virtio_ring_flags |= 1 << VIRTIO_TRACE_F_INDIRECT;
113 /* Header is the first here */
114 hdr_desc = map_guest_mem (vui, txvq->desc[desc_current].addr, &hint);
115 }
116 if (txvq->desc[desc_current].flags & VIRTQ_DESC_F_NEXT)
117 {
118 t->virtio_ring_flags |= 1 << VIRTIO_TRACE_F_SIMPLE_CHAINED;
119 }
120 if (!(txvq->desc[desc_current].flags & VIRTQ_DESC_F_NEXT) &&
121 !(txvq->desc[desc_current].flags & VIRTQ_DESC_F_INDIRECT))
122 {
123 t->virtio_ring_flags |= 1 << VIRTIO_TRACE_F_SINGLE_DESC;
124 }
125
126 t->first_desc_len = hdr_desc ? hdr_desc->len : 0;
127
128 if (!hdr_desc || !(hdr = map_guest_mem (vui, hdr_desc->addr, &hint)))
129 {
130 t->virtio_ring_flags |= 1 << VIRTIO_TRACE_F_MAP_ERROR;
131 }
132 else
133 {
134 u32 len = vui->virtio_net_hdr_sz;
135 memcpy (&t->hdr, hdr, len > hdr_desc->len ? hdr_desc->len : len);
136 }
137}
138
139static_always_inline u32
140vhost_user_input_copy (vhost_user_intf_t * vui, vhost_copy_t * cpy,
141 u16 copy_len, u32 * map_hint)
142{
143 void *src0, *src1, *src2, *src3;
144 if (PREDICT_TRUE (copy_len >= 4))
145 {
146 if (PREDICT_FALSE (!(src2 = map_guest_mem (vui, cpy[0].src, map_hint))))
147 return 1;
148 if (PREDICT_FALSE (!(src3 = map_guest_mem (vui, cpy[1].src, map_hint))))
149 return 1;
150
151 while (PREDICT_TRUE (copy_len >= 4))
152 {
153 src0 = src2;
154 src1 = src3;
155
156 if (PREDICT_FALSE
157 (!(src2 = map_guest_mem (vui, cpy[2].src, map_hint))))
158 return 1;
159 if (PREDICT_FALSE
160 (!(src3 = map_guest_mem (vui, cpy[3].src, map_hint))))
161 return 1;
162
163 CLIB_PREFETCH (src2, 64, LOAD);
164 CLIB_PREFETCH (src3, 64, LOAD);
165
Dave Barach178cf492018-11-13 16:34:13 -0500166 clib_memcpy_fast ((void *) cpy[0].dst, src0, cpy[0].len);
167 clib_memcpy_fast ((void *) cpy[1].dst, src1, cpy[1].len);
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200168 copy_len -= 2;
169 cpy += 2;
170 }
171 }
172 while (copy_len)
173 {
174 if (PREDICT_FALSE (!(src0 = map_guest_mem (vui, cpy->src, map_hint))))
175 return 1;
Dave Barach178cf492018-11-13 16:34:13 -0500176 clib_memcpy_fast ((void *) cpy->dst, src0, cpy->len);
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200177 copy_len -= 1;
178 cpy += 1;
179 }
180 return 0;
181}
182
183/**
184 * Try to discard packets from the tx ring (VPP RX path).
185 * Returns the number of discarded packets.
186 */
187static_always_inline u32
188vhost_user_rx_discard_packet (vlib_main_t * vm,
189 vhost_user_intf_t * vui,
190 vhost_user_vring_t * txvq, u32 discard_max)
191{
192 /*
193 * On the RX side, each packet corresponds to one descriptor
194 * (it is the same whether it is a shallow descriptor, chained, or indirect).
195 * Therefore, discarding a packet is like discarding a descriptor.
196 */
197 u32 discarded_packets = 0;
198 u32 avail_idx = txvq->avail->idx;
Damjan Marionba1afaa2018-11-22 22:16:19 +0100199 u16 mask = txvq->qsz_mask;
200 u16 last_avail_idx = txvq->last_avail_idx;
201 u16 last_used_idx = txvq->last_used_idx;
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200202 while (discarded_packets != discard_max)
203 {
Steven Luong7e5735d2019-03-12 21:35:42 -0700204 if (avail_idx == last_avail_idx)
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200205 goto out;
206
Damjan Marionba1afaa2018-11-22 22:16:19 +0100207 u16 desc_chain_head = txvq->avail->ring[last_avail_idx & mask];
208 last_avail_idx++;
209 txvq->used->ring[last_used_idx & mask].id = desc_chain_head;
210 txvq->used->ring[last_used_idx & mask].len = 0;
211 vhost_user_log_dirty_ring (vui, txvq, ring[last_used_idx & mask]);
212 last_used_idx++;
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200213 discarded_packets++;
214 }
215
216out:
Damjan Marionba1afaa2018-11-22 22:16:19 +0100217 txvq->last_avail_idx = last_avail_idx;
218 txvq->last_used_idx = last_used_idx;
Damjan Marion96e8cd02018-11-23 14:56:55 +0100219 CLIB_MEMORY_STORE_BARRIER ();
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200220 txvq->used->idx = txvq->last_used_idx;
221 vhost_user_log_dirty_ring (vui, txvq, idx);
222 return discarded_packets;
223}
224
225/*
226 * In case of overflow, we need to rewind the array of allocated buffers.
227 */
Damjan Marion46bf8662018-11-22 22:25:38 +0100228static_always_inline void
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200229vhost_user_input_rewind_buffers (vlib_main_t * vm,
230 vhost_cpu_t * cpu, vlib_buffer_t * b_head)
231{
232 u32 bi_current = cpu->rx_buffers[cpu->rx_buffers_len];
233 vlib_buffer_t *b_current = vlib_get_buffer (vm, bi_current);
234 b_current->current_length = 0;
235 b_current->flags = 0;
236 while (b_current != b_head)
237 {
238 cpu->rx_buffers_len++;
239 bi_current = cpu->rx_buffers[cpu->rx_buffers_len];
240 b_current = vlib_get_buffer (vm, bi_current);
241 b_current->current_length = 0;
242 b_current->flags = 0;
243 }
244 cpu->rx_buffers_len++;
245}
246
Steven Luong4208a4c2019-05-06 08:51:56 -0700247static_always_inline void
248vhost_user_handle_rx_offload (vlib_buffer_t * b0, u8 * b0_data,
249 virtio_net_hdr_t * hdr)
250{
251 u8 l4_hdr_sz = 0;
252
253 if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM)
254 {
255 u8 l4_proto = 0;
256 ethernet_header_t *eh = (ethernet_header_t *) b0_data;
257 u16 ethertype = clib_net_to_host_u16 (eh->type);
258 u16 l2hdr_sz = sizeof (ethernet_header_t);
259
260 if (ethernet_frame_is_tagged (ethertype))
261 {
262 ethernet_vlan_header_t *vlan = (ethernet_vlan_header_t *) (eh + 1);
263
264 ethertype = clib_net_to_host_u16 (vlan->type);
265 l2hdr_sz += sizeof (*vlan);
266 if (ethertype == ETHERNET_TYPE_VLAN)
267 {
268 vlan++;
269 ethertype = clib_net_to_host_u16 (vlan->type);
270 l2hdr_sz += sizeof (*vlan);
271 }
272 }
273 vnet_buffer (b0)->l2_hdr_offset = 0;
274 vnet_buffer (b0)->l3_hdr_offset = l2hdr_sz;
275 vnet_buffer (b0)->l4_hdr_offset = hdr->csum_start;
276 b0->flags |= (VNET_BUFFER_F_L2_HDR_OFFSET_VALID |
277 VNET_BUFFER_F_L3_HDR_OFFSET_VALID |
278 VNET_BUFFER_F_L4_HDR_OFFSET_VALID |
279 VNET_BUFFER_F_OFFLOAD_IP_CKSUM);
280
281 if (PREDICT_TRUE (ethertype == ETHERNET_TYPE_IP4))
282 {
283 ip4_header_t *ip4 = (ip4_header_t *) (b0_data + l2hdr_sz);
284 l4_proto = ip4->protocol;
285 b0->flags |= VNET_BUFFER_F_IS_IP4;
286 }
287 else if (PREDICT_TRUE (ethertype == ETHERNET_TYPE_IP6))
288 {
289 ip6_header_t *ip6 = (ip6_header_t *) (b0_data + l2hdr_sz);
290 l4_proto = ip6->protocol;
291 b0->flags |= VNET_BUFFER_F_IS_IP6;
292 }
293
294 if (l4_proto == IP_PROTOCOL_TCP)
295 {
296 tcp_header_t *tcp = (tcp_header_t *)
297 (b0_data + vnet_buffer (b0)->l4_hdr_offset);
298 l4_hdr_sz = tcp_header_bytes (tcp);
299 tcp->checksum = 0;
300 b0->flags |= VNET_BUFFER_F_OFFLOAD_TCP_CKSUM;
301 }
302 else if (l4_proto == IP_PROTOCOL_UDP)
303 {
304 udp_header_t *udp =
305 (udp_header_t *) (b0_data + vnet_buffer (b0)->l4_hdr_offset);
306 l4_hdr_sz = sizeof (*udp);
307 udp->checksum = 0;
308 b0->flags |= VNET_BUFFER_F_OFFLOAD_UDP_CKSUM;
309 }
310 }
311
312 if (hdr->gso_type == VIRTIO_NET_HDR_GSO_UDP)
313 {
314 vnet_buffer2 (b0)->gso_size = hdr->gso_size;
315 vnet_buffer2 (b0)->gso_l4_hdr_sz = l4_hdr_sz;
316 b0->flags |= VNET_BUFFER_F_GSO;
317 }
318 else if (hdr->gso_type == VIRTIO_NET_HDR_GSO_TCPV4)
319 {
320 vnet_buffer2 (b0)->gso_size = hdr->gso_size;
321 vnet_buffer2 (b0)->gso_l4_hdr_sz = l4_hdr_sz;
322 b0->flags |= (VNET_BUFFER_F_GSO | VNET_BUFFER_F_IS_IP4);
323 }
324 else if (hdr->gso_type == VIRTIO_NET_HDR_GSO_TCPV6)
325 {
326 vnet_buffer2 (b0)->gso_size = hdr->gso_size;
327 vnet_buffer2 (b0)->gso_l4_hdr_sz = l4_hdr_sz;
328 b0->flags |= (VNET_BUFFER_F_GSO | VNET_BUFFER_F_IS_IP6);
329 }
330}
331
Damjan Marion46bf8662018-11-22 22:25:38 +0100332static_always_inline u32
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200333vhost_user_if_input (vlib_main_t * vm,
334 vhost_user_main_t * vum,
335 vhost_user_intf_t * vui,
336 u16 qid, vlib_node_runtime_t * node,
Steven Luong4208a4c2019-05-06 08:51:56 -0700337 vnet_hw_interface_rx_mode mode, u8 enable_csum)
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200338{
339 vhost_user_vring_t *txvq = &vui->vrings[VHOST_VRING_IDX_TX (qid)];
Damjan Marion9af45042018-11-21 09:51:42 +0100340 vnet_feature_main_t *fm = &feature_main;
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200341 u16 n_rx_packets = 0;
342 u32 n_rx_bytes = 0;
343 u16 n_left;
344 u32 n_left_to_next, *to_next;
345 u32 next_index = VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT;
346 u32 n_trace = vlib_get_trace_count (vm, node);
Damjan Marion8934a042019-02-09 23:29:26 +0100347 u32 buffer_data_size = vlib_buffer_get_default_data_size (vm);
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200348 u32 map_hint = 0;
Damjan Marion7e0b17d2018-11-20 21:07:03 +0100349 vhost_cpu_t *cpu = &vum->cpus[vm->thread_index];
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200350 u16 copy_len = 0;
Damjan Marion9af45042018-11-21 09:51:42 +0100351 u8 feature_arc_idx = fm->device_input_feature_arc_index;
352 u32 current_config_index = ~(u32) 0;
Damjan Marionba1afaa2018-11-22 22:16:19 +0100353 u16 mask = txvq->qsz_mask;
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200354
Yichen Wang28812a02018-08-28 23:05:27 -0700355 /* The descriptor table is not ready yet */
356 if (PREDICT_FALSE (txvq->avail == 0))
Damjan Marionba1afaa2018-11-22 22:16:19 +0100357 goto done;
Yichen Wang28812a02018-08-28 23:05:27 -0700358
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200359 {
360 /* do we have pending interrupts ? */
361 vhost_user_vring_t *rxvq = &vui->vrings[VHOST_VRING_IDX_RX (qid)];
362 f64 now = vlib_time_now (vm);
363
364 if ((txvq->n_since_last_int) && (txvq->int_deadline < now))
365 vhost_user_send_call (vm, txvq);
366
367 if ((rxvq->n_since_last_int) && (rxvq->int_deadline < now))
368 vhost_user_send_call (vm, rxvq);
369 }
370
371 /*
372 * For adaptive mode, it is optimized to reduce interrupts.
373 * If the scheduler switches the input node to polling due
374 * to burst of traffic, we tell the driver no interrupt.
375 * When the traffic subsides, the scheduler switches the node back to
376 * interrupt mode. We must tell the driver we want interrupt.
377 */
378 if (PREDICT_FALSE (mode == VNET_HW_INTERFACE_RX_MODE_ADAPTIVE))
379 {
380 if ((node->flags &
381 VLIB_NODE_FLAG_SWITCH_FROM_POLLING_TO_INTERRUPT_MODE) ||
382 !(node->flags &
383 VLIB_NODE_FLAG_SWITCH_FROM_INTERRUPT_TO_POLLING_MODE))
384 /* Tell driver we want notification */
385 txvq->used->flags = 0;
386 else
387 /* Tell driver we don't want notification */
388 txvq->used->flags = VRING_USED_F_NO_NOTIFY;
389 }
390
391 if (PREDICT_FALSE (txvq->avail->flags & 0xFFFE))
Damjan Marionba1afaa2018-11-22 22:16:19 +0100392 goto done;
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200393
394 n_left = (u16) (txvq->avail->idx - txvq->last_avail_idx);
395
396 /* nothing to do */
397 if (PREDICT_FALSE (n_left == 0))
Damjan Marionba1afaa2018-11-22 22:16:19 +0100398 goto done;
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200399
400 if (PREDICT_FALSE (!vui->admin_up || !(txvq->enabled)))
401 {
402 /*
403 * Discard input packet if interface is admin down or vring is not
404 * enabled.
405 * "For example, for a networking device, in the disabled state
406 * client must not supply any new RX packets, but must process
407 * and discard any TX packets."
408 */
409 vhost_user_rx_discard_packet (vm, vui, txvq,
410 VHOST_USER_DOWN_DISCARD_COUNT);
Damjan Marionba1afaa2018-11-22 22:16:19 +0100411 goto done;
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200412 }
413
Damjan Marionba1afaa2018-11-22 22:16:19 +0100414 if (PREDICT_FALSE (n_left == (mask + 1)))
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200415 {
416 /*
417 * Informational error logging when VPP is not
418 * receiving packets fast enough.
419 */
420 vlib_error_count (vm, node->node_index,
421 VHOST_USER_INPUT_FUNC_ERROR_FULL_RX_QUEUE, 1);
422 }
423
424 if (n_left > VLIB_FRAME_SIZE)
425 n_left = VLIB_FRAME_SIZE;
426
427 /*
428 * For small packets (<2kB), we will not need more than one vlib buffer
429 * per packet. In case packets are bigger, we will just yeld at some point
430 * in the loop and come back later. This is not an issue as for big packet,
431 * processing cost really comes from the memory copy.
432 * The assumption is that big packets will fit in 40 buffers.
433 */
Damjan Marion7e0b17d2018-11-20 21:07:03 +0100434 if (PREDICT_FALSE (cpu->rx_buffers_len < n_left + 1 ||
435 cpu->rx_buffers_len < 40))
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200436 {
Damjan Marion7e0b17d2018-11-20 21:07:03 +0100437 u32 curr_len = cpu->rx_buffers_len;
438 cpu->rx_buffers_len +=
Damjan Marion671e60e2018-12-30 18:09:59 +0100439 vlib_buffer_alloc (vm, cpu->rx_buffers + curr_len,
440 VHOST_USER_RX_BUFFERS_N - curr_len);
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200441
442 if (PREDICT_FALSE
Damjan Marion7e0b17d2018-11-20 21:07:03 +0100443 (cpu->rx_buffers_len < VHOST_USER_RX_BUFFER_STARVATION))
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200444 {
445 /* In case of buffer starvation, discard some packets from the queue
446 * and log the event.
447 * We keep doing best effort for the remaining packets. */
Damjan Marion7e0b17d2018-11-20 21:07:03 +0100448 u32 flush = (n_left + 1 > cpu->rx_buffers_len) ?
449 n_left + 1 - cpu->rx_buffers_len : 1;
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200450 flush = vhost_user_rx_discard_packet (vm, vui, txvq, flush);
451
452 n_left -= flush;
453 vlib_increment_simple_counter (vnet_main.
454 interface_main.sw_if_counters +
455 VNET_INTERFACE_COUNTER_DROP,
Damjan Marion7e0b17d2018-11-20 21:07:03 +0100456 vm->thread_index, vui->sw_if_index,
457 flush);
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200458
459 vlib_error_count (vm, vhost_user_input_node.index,
460 VHOST_USER_INPUT_FUNC_ERROR_NO_BUFFER, flush);
461 }
462 }
463
Damjan Marion9af45042018-11-21 09:51:42 +0100464 if (PREDICT_FALSE (vnet_have_features (feature_arc_idx, vui->sw_if_index)))
465 {
466 vnet_feature_config_main_t *cm;
467 cm = &fm->feature_config_mains[feature_arc_idx];
468 current_config_index = vec_elt (cm->config_index_by_sw_if_index,
469 vui->sw_if_index);
470 vnet_get_config_data (&cm->config_main, &current_config_index,
471 &next_index, 0);
472 }
473
Damjan Marionba1afaa2018-11-22 22:16:19 +0100474 u16 last_avail_idx = txvq->last_avail_idx;
475 u16 last_used_idx = txvq->last_used_idx;
476
Damjan Marion92825382018-11-21 10:03:44 +0100477 vlib_get_new_next_frame (vm, node, next_index, to_next, n_left_to_next);
478
479 if (next_index == VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT)
480 {
481 /* give some hints to ethernet-input */
482 vlib_next_frame_t *nf;
483 vlib_frame_t *f;
484 ethernet_input_frame_t *ef;
485 nf = vlib_node_runtime_get_next_frame (vm, node, next_index);
Andreas Schultz58b2eb12019-07-15 15:40:56 +0200486 f = vlib_get_frame (vm, nf->frame);
Damjan Marion92825382018-11-21 10:03:44 +0100487 f->flags = ETH_INPUT_FRAME_F_SINGLE_SW_IF_IDX;
488
489 ef = vlib_frame_scalar_args (f);
490 ef->sw_if_index = vui->sw_if_index;
491 ef->hw_if_index = vui->hw_if_index;
Damjan Marion296988d2019-02-21 20:24:54 +0100492 vlib_frame_no_append (f);
Damjan Marion92825382018-11-21 10:03:44 +0100493 }
494
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200495 while (n_left > 0)
496 {
Damjan Marion92825382018-11-21 10:03:44 +0100497 vlib_buffer_t *b_head, *b_current;
498 u32 bi_current;
499 u16 desc_current;
500 u32 desc_data_offset;
501 vring_desc_t *desc_table = txvq->desc;
Damjan Marion6a8bfd42018-11-21 09:54:41 +0100502
Damjan Marion92825382018-11-21 10:03:44 +0100503 if (PREDICT_FALSE (cpu->rx_buffers_len <= 1))
Damjan Marion6a8bfd42018-11-21 09:54:41 +0100504 {
Damjan Marion92825382018-11-21 10:03:44 +0100505 /* Not enough rx_buffers
506 * Note: We yeld on 1 so we don't need to do an additional
507 * check for the next buffer prefetch.
508 */
509 n_left = 0;
510 break;
Damjan Marion6a8bfd42018-11-21 09:54:41 +0100511 }
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200512
Damjan Marionba1afaa2018-11-22 22:16:19 +0100513 desc_current = txvq->avail->ring[last_avail_idx & mask];
Damjan Marion92825382018-11-21 10:03:44 +0100514 cpu->rx_buffers_len--;
515 bi_current = cpu->rx_buffers[cpu->rx_buffers_len];
516 b_head = b_current = vlib_get_buffer (vm, bi_current);
517 to_next[0] = bi_current; //We do that now so we can forget about bi_current
518 to_next++;
519 n_left_to_next--;
520
521 vlib_prefetch_buffer_with_index
522 (vm, cpu->rx_buffers[cpu->rx_buffers_len - 1], LOAD);
523
524 /* Just preset the used descriptor id and length for later */
Damjan Marionba1afaa2018-11-22 22:16:19 +0100525 txvq->used->ring[last_used_idx & mask].id = desc_current;
526 txvq->used->ring[last_used_idx & mask].len = 0;
527 vhost_user_log_dirty_ring (vui, txvq, ring[last_used_idx & mask]);
Damjan Marion92825382018-11-21 10:03:44 +0100528
529 /* The buffer should already be initialized */
530 b_head->total_length_not_including_first_buffer = 0;
531 b_head->flags |= VLIB_BUFFER_TOTAL_LENGTH_VALID;
532
533 if (PREDICT_FALSE (n_trace))
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200534 {
Damjan Marion92825382018-11-21 10:03:44 +0100535 vlib_trace_buffer (vm, node, next_index, b_head,
536 /* follow_chain */ 0);
537 vhost_trace_t *t0 =
538 vlib_add_trace (vm, node, b_head, sizeof (t0[0]));
Damjan Marionba1afaa2018-11-22 22:16:19 +0100539 vhost_user_rx_trace (t0, vui, qid, b_head, txvq, last_avail_idx);
Damjan Marion92825382018-11-21 10:03:44 +0100540 n_trace--;
541 vlib_set_trace_count (vm, node, n_trace);
542 }
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200543
Damjan Marion92825382018-11-21 10:03:44 +0100544 /* This depends on the setup but is very consistent
545 * So I think the CPU branch predictor will make a pretty good job
546 * at optimizing the decision. */
Steven Luong4208a4c2019-05-06 08:51:56 -0700547 u8 indirect = 0;
Damjan Marion92825382018-11-21 10:03:44 +0100548 if (txvq->desc[desc_current].flags & VIRTQ_DESC_F_INDIRECT)
549 {
550 desc_table = map_guest_mem (vui, txvq->desc[desc_current].addr,
551 &map_hint);
552 desc_current = 0;
Steven Luong4208a4c2019-05-06 08:51:56 -0700553 indirect = 1;
Damjan Marion92825382018-11-21 10:03:44 +0100554 if (PREDICT_FALSE (desc_table == 0))
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200555 {
Damjan Marion92825382018-11-21 10:03:44 +0100556 vlib_error_count (vm, node->node_index,
557 VHOST_USER_INPUT_FUNC_ERROR_MMAP_FAIL, 1);
558 goto out;
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200559 }
Damjan Marion92825382018-11-21 10:03:44 +0100560 }
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200561
Damjan Marion92825382018-11-21 10:03:44 +0100562 if (PREDICT_TRUE (vui->is_any_layout) ||
563 (!(desc_table[desc_current].flags & VIRTQ_DESC_F_NEXT)))
564 {
565 /* ANYLAYOUT or single buffer */
566 desc_data_offset = vui->virtio_net_hdr_sz;
567 }
568 else
569 {
570 /* CSR case without ANYLAYOUT, skip 1st buffer */
571 desc_data_offset = desc_table[desc_current].len;
572 }
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200573
Steven Luong4208a4c2019-05-06 08:51:56 -0700574 if (enable_csum)
575 {
576 virtio_net_hdr_mrg_rxbuf_t *hdr;
577 u8 *b_data;
578 u16 current = desc_current;
579 u32 data_offset = desc_data_offset;
580
581 if ((data_offset == desc_table[current].len) &&
582 (desc_table[current].flags & VIRTQ_DESC_F_NEXT))
583 {
584 current = desc_table[current].next;
585 data_offset = 0;
586 }
587 hdr = map_guest_mem (vui, desc_table[current].addr, &map_hint);
Steven Luong5dedae72019-07-31 16:01:14 -0700588 if (PREDICT_FALSE (hdr == 0))
589 {
590 vlib_error_count (vm, node->node_index,
591 VHOST_USER_INPUT_FUNC_ERROR_MMAP_FAIL, 1);
592 goto out;
593 }
Steven Luong4208a4c2019-05-06 08:51:56 -0700594 b_data = (u8 *) hdr + data_offset;
595 if (indirect)
Steven Luong5dedae72019-07-31 16:01:14 -0700596 {
597 hdr = map_guest_mem (vui, desc_table[desc_current].addr,
598 &map_hint);
599 if (PREDICT_FALSE (hdr == 0))
600 {
601 vlib_error_count (vm, node->node_index,
602 VHOST_USER_INPUT_FUNC_ERROR_MMAP_FAIL, 1);
603 goto out;
604 }
605 }
Steven Luong4208a4c2019-05-06 08:51:56 -0700606 vhost_user_handle_rx_offload (b_head, b_data, &hdr->hdr);
607 }
608
Damjan Marion92825382018-11-21 10:03:44 +0100609 while (1)
610 {
611 /* Get more input if necessary. Or end of packet. */
612 if (desc_data_offset == desc_table[desc_current].len)
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200613 {
Damjan Marion92825382018-11-21 10:03:44 +0100614 if (PREDICT_FALSE (desc_table[desc_current].flags &
615 VIRTQ_DESC_F_NEXT))
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200616 {
Damjan Marion92825382018-11-21 10:03:44 +0100617 desc_current = desc_table[desc_current].next;
618 desc_data_offset = 0;
619 }
620 else
621 {
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200622 goto out;
623 }
624 }
625
Damjan Marion92825382018-11-21 10:03:44 +0100626 /* Get more output if necessary. Or end of packet. */
Damjan Marion8934a042019-02-09 23:29:26 +0100627 if (PREDICT_FALSE (b_current->current_length == buffer_data_size))
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200628 {
Damjan Marion92825382018-11-21 10:03:44 +0100629 if (PREDICT_FALSE (cpu->rx_buffers_len == 0))
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200630 {
Damjan Marion92825382018-11-21 10:03:44 +0100631 /* Cancel speculation */
632 to_next--;
633 n_left_to_next++;
634
635 /*
636 * Checking if there are some left buffers.
637 * If not, just rewind the used buffers and stop.
638 * Note: Scheduled copies are not cancelled. This is
639 * not an issue as they would still be valid. Useless,
640 * but valid.
641 */
642 vhost_user_input_rewind_buffers (vm, cpu, b_head);
643 n_left = 0;
644 goto stop;
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200645 }
646
Damjan Marion92825382018-11-21 10:03:44 +0100647 /* Get next output */
648 cpu->rx_buffers_len--;
649 u32 bi_next = cpu->rx_buffers[cpu->rx_buffers_len];
650 b_current->next_buffer = bi_next;
651 b_current->flags |= VLIB_BUFFER_NEXT_PRESENT;
652 bi_current = bi_next;
653 b_current = vlib_get_buffer (vm, bi_current);
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200654 }
655
Damjan Marion92825382018-11-21 10:03:44 +0100656 /* Prepare a copy order executed later for the data */
657 vhost_copy_t *cpy = &cpu->copy[copy_len];
658 copy_len++;
659 u32 desc_data_l = desc_table[desc_current].len - desc_data_offset;
Damjan Marion8934a042019-02-09 23:29:26 +0100660 cpy->len = buffer_data_size - b_current->current_length;
Damjan Marion92825382018-11-21 10:03:44 +0100661 cpy->len = (cpy->len > desc_data_l) ? desc_data_l : cpy->len;
662 cpy->dst = (uword) (vlib_buffer_get_current (b_current) +
663 b_current->current_length);
664 cpy->src = desc_table[desc_current].addr + desc_data_offset;
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200665
Damjan Marion92825382018-11-21 10:03:44 +0100666 desc_data_offset += cpy->len;
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200667
Damjan Marion92825382018-11-21 10:03:44 +0100668 b_current->current_length += cpy->len;
669 b_head->total_length_not_including_first_buffer += cpy->len;
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200670 }
Damjan Marion92825382018-11-21 10:03:44 +0100671
672 out:
673
674 n_rx_bytes += b_head->total_length_not_including_first_buffer;
675 n_rx_packets++;
676
677 b_head->total_length_not_including_first_buffer -=
678 b_head->current_length;
679
680 /* consume the descriptor and return it as used */
Damjan Marionba1afaa2018-11-22 22:16:19 +0100681 last_avail_idx++;
682 last_used_idx++;
Damjan Marion92825382018-11-21 10:03:44 +0100683
684 VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b_head);
685
686 vnet_buffer (b_head)->sw_if_index[VLIB_RX] = vui->sw_if_index;
687 vnet_buffer (b_head)->sw_if_index[VLIB_TX] = (u32) ~ 0;
688 b_head->error = 0;
689
690 if (current_config_index != ~(u32) 0)
691 {
692 b_head->current_config_index = current_config_index;
693 vnet_buffer (b_head)->feature_arc_index = feature_arc_idx;
694 }
695
696 n_left--;
697
698 /*
699 * Although separating memory copies from virtio ring parsing
700 * is beneficial, we can offer to perform the copies from time
701 * to time in order to free some space in the ring.
702 */
703 if (PREDICT_FALSE (copy_len >= VHOST_USER_RX_COPY_THRESHOLD))
704 {
705 if (PREDICT_FALSE (vhost_user_input_copy (vui, cpu->copy,
706 copy_len, &map_hint)))
707 {
708 vlib_error_count (vm, node->node_index,
709 VHOST_USER_INPUT_FUNC_ERROR_MMAP_FAIL, 1);
710 }
711 copy_len = 0;
712
713 /* give buffers back to driver */
Damjan Marion96e8cd02018-11-23 14:56:55 +0100714 CLIB_MEMORY_STORE_BARRIER ();
Damjan Marionba1afaa2018-11-22 22:16:19 +0100715 txvq->used->idx = last_used_idx;
Damjan Marion92825382018-11-21 10:03:44 +0100716 vhost_user_log_dirty_ring (vui, txvq, idx);
717 }
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200718 }
Damjan Marion92825382018-11-21 10:03:44 +0100719stop:
720 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200721
Damjan Marionba1afaa2018-11-22 22:16:19 +0100722 txvq->last_used_idx = last_used_idx;
723 txvq->last_avail_idx = last_avail_idx;
724
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200725 /* Do the memory copies */
Damjan Marion7e0b17d2018-11-20 21:07:03 +0100726 if (PREDICT_FALSE (vhost_user_input_copy (vui, cpu->copy, copy_len,
727 &map_hint)))
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200728 {
729 vlib_error_count (vm, node->node_index,
730 VHOST_USER_INPUT_FUNC_ERROR_MMAP_FAIL, 1);
731 }
732
733 /* give buffers back to driver */
Damjan Marion96e8cd02018-11-23 14:56:55 +0100734 CLIB_MEMORY_STORE_BARRIER ();
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200735 txvq->used->idx = txvq->last_used_idx;
736 vhost_user_log_dirty_ring (vui, txvq, idx);
737
738 /* interrupt (call) handling */
739 if ((txvq->callfd_idx != ~0) &&
740 !(txvq->avail->flags & VRING_AVAIL_F_NO_INTERRUPT))
741 {
742 txvq->n_since_last_int += n_rx_packets;
743
744 if (txvq->n_since_last_int > vum->coalesce_frames)
745 vhost_user_send_call (vm, txvq);
746 }
747
748 /* increase rx counters */
749 vlib_increment_combined_counter
750 (vnet_main.interface_main.combined_sw_if_counters
Damjan Marion7e0b17d2018-11-20 21:07:03 +0100751 + VNET_INTERFACE_COUNTER_RX, vm->thread_index, vui->sw_if_index,
752 n_rx_packets, n_rx_bytes);
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200753
Damjan Marion7e0b17d2018-11-20 21:07:03 +0100754 vnet_device_increment_rx_packets (vm->thread_index, n_rx_packets);
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200755
Damjan Marionba1afaa2018-11-22 22:16:19 +0100756done:
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200757 return n_rx_packets;
758}
759
760VLIB_NODE_FN (vhost_user_input_node) (vlib_main_t * vm,
761 vlib_node_runtime_t * node,
762 vlib_frame_t * frame)
763{
764 vhost_user_main_t *vum = &vhost_user_main;
765 uword n_rx_packets = 0;
766 vhost_user_intf_t *vui;
767 vnet_device_input_runtime_t *rt =
768 (vnet_device_input_runtime_t *) node->runtime_data;
769 vnet_device_and_queue_t *dq;
770
771 vec_foreach (dq, rt->devices_and_queues)
772 {
Sirshak Das5b718d52018-10-12 09:38:27 -0500773 if ((node->state == VLIB_NODE_STATE_POLLING) ||
774 clib_atomic_swap_acq_n (&dq->interrupt_pending, 0))
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200775 {
776 vui =
777 pool_elt_at_index (vum->vhost_user_interfaces, dq->dev_instance);
Steven Luong4208a4c2019-05-06 08:51:56 -0700778 if (vui->features & (1ULL << FEAT_VIRTIO_NET_F_CSUM))
779 n_rx_packets +=
780 vhost_user_if_input (vm, vum, vui, dq->queue_id, node, dq->mode,
781 1);
782 else
783 n_rx_packets +=
784 vhost_user_if_input (vm, vum, vui, dq->queue_id, node, dq->mode,
785 0);
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200786 }
787 }
788
789 return n_rx_packets;
790}
791
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200792/* *INDENT-OFF* */
793VLIB_REGISTER_NODE (vhost_user_input_node) = {
794 .type = VLIB_NODE_TYPE_INPUT,
795 .name = "vhost-user-input",
796 .sibling_of = "device-input",
Damjan Marion7ca5aaa2019-09-24 18:10:49 +0200797 .flags = VLIB_NODE_FLAG_TRACE_SUPPORTED,
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200798
799 /* Will be enabled if/when hardware is detected. */
800 .state = VLIB_NODE_STATE_DISABLED,
801
802 .format_buffer = format_ethernet_header_with_length,
803 .format_trace = format_vhost_trace,
804
805 .n_errors = VHOST_USER_INPUT_FUNC_N_ERROR,
806 .error_strings = vhost_user_input_func_error_strings,
807};
808/* *INDENT-ON* */
Mohsin Kazmie7cde312018-06-26 17:20:11 +0200809
810/*
811 * fd.io coding-style-patch-verification: ON
812 *
813 * Local Variables:
814 * eval: (c-set-style "gnu")
815 * End:
816 */