Mohsin Kazmi | e7cde31 | 2018-06-26 17:20:11 +0200 | [diff] [blame] | 1 | /* |
| 2 | *------------------------------------------------------------------ |
| 3 | * vhost-user-output |
| 4 | * |
| 5 | * Copyright (c) 2014-2018 Cisco and/or its affiliates. |
| 6 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 7 | * you may not use this file except in compliance with the License. |
| 8 | * You may obtain a copy of the License at: |
| 9 | * |
| 10 | * http://www.apache.org/licenses/LICENSE-2.0 |
| 11 | * |
| 12 | * Unless required by applicable law or agreed to in writing, software |
| 13 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 15 | * See the License for the specific language governing permissions and |
| 16 | * limitations under the License. |
| 17 | *------------------------------------------------------------------ |
| 18 | */ |
| 19 | |
Steven Luong | 4208a4c | 2019-05-06 08:51:56 -0700 | [diff] [blame] | 20 | #include <stddef.h> |
Mohsin Kazmi | e7cde31 | 2018-06-26 17:20:11 +0200 | [diff] [blame] | 21 | #include <fcntl.h> /* for open */ |
| 22 | #include <sys/ioctl.h> |
| 23 | #include <sys/socket.h> |
| 24 | #include <sys/un.h> |
| 25 | #include <sys/stat.h> |
| 26 | #include <sys/types.h> |
| 27 | #include <sys/uio.h> /* for iovec */ |
| 28 | #include <netinet/in.h> |
| 29 | #include <sys/vfs.h> |
| 30 | |
| 31 | #include <linux/if_arp.h> |
| 32 | #include <linux/if_tun.h> |
| 33 | |
| 34 | #include <vlib/vlib.h> |
| 35 | #include <vlib/unix/unix.h> |
| 36 | |
| 37 | #include <vnet/ip/ip.h> |
| 38 | |
| 39 | #include <vnet/ethernet/ethernet.h> |
| 40 | #include <vnet/devices/devices.h> |
| 41 | #include <vnet/feature/feature.h> |
| 42 | |
Steven Luong | 4208a4c | 2019-05-06 08:51:56 -0700 | [diff] [blame] | 43 | #include <vnet/devices/virtio/virtio.h> |
Mohsin Kazmi | e7cde31 | 2018-06-26 17:20:11 +0200 | [diff] [blame] | 44 | #include <vnet/devices/virtio/vhost_user.h> |
| 45 | #include <vnet/devices/virtio/vhost_user_inline.h> |
| 46 | |
Mohsin Kazmi | 0b04209 | 2020-04-17 16:50:56 +0000 | [diff] [blame] | 47 | #include <vnet/gso/hdr_offset_parser.h> |
Mohsin Kazmi | e7cde31 | 2018-06-26 17:20:11 +0200 | [diff] [blame] | 48 | /* |
| 49 | * On the transmit side, we keep processing the buffers from vlib in the while |
| 50 | * loop and prepare the copy order to be executed later. However, the static |
| 51 | * array which we keep the copy order is limited to VHOST_USER_COPY_ARRAY_N |
| 52 | * entries. In order to not corrupt memory, we have to do the copy when the |
| 53 | * static array reaches the copy threshold. We subtract 40 in case the code |
| 54 | * goes into the inner loop for a maximum of 64k frames which may require |
Steven Luong | 7331005 | 2019-10-23 13:28:37 -0700 | [diff] [blame] | 55 | * more array entries. We subtract 200 because our default buffer size is |
| 56 | * 2048 and the default desc len is likely 1536. While it takes less than 40 |
| 57 | * vlib buffers for the jumbo frame, it may take twice as much descriptors |
| 58 | * for the same jumbo frame. Use 200 for the extra head room. |
Mohsin Kazmi | e7cde31 | 2018-06-26 17:20:11 +0200 | [diff] [blame] | 59 | */ |
Steven Luong | 7331005 | 2019-10-23 13:28:37 -0700 | [diff] [blame] | 60 | #define VHOST_USER_TX_COPY_THRESHOLD (VHOST_USER_COPY_ARRAY_N - 200) |
Mohsin Kazmi | e7cde31 | 2018-06-26 17:20:11 +0200 | [diff] [blame] | 61 | |
BenoƮt Ganne | 47727c0 | 2019-02-12 13:35:08 +0100 | [diff] [blame] | 62 | extern vnet_device_class_t vhost_user_device_class; |
Mohsin Kazmi | e7cde31 | 2018-06-26 17:20:11 +0200 | [diff] [blame] | 63 | |
| 64 | #define foreach_vhost_user_tx_func_error \ |
| 65 | _(NONE, "no error") \ |
| 66 | _(NOT_READY, "vhost vring not ready") \ |
| 67 | _(DOWN, "vhost interface is down") \ |
| 68 | _(PKT_DROP_NOBUF, "tx packet drops (no available descriptors)") \ |
| 69 | _(PKT_DROP_NOMRG, "tx packet drops (cannot merge descriptors)") \ |
| 70 | _(MMAP_FAIL, "mmap failure") \ |
| 71 | _(INDIRECT_OVERFLOW, "indirect descriptor table overflow") |
| 72 | |
| 73 | typedef enum |
| 74 | { |
| 75 | #define _(f,s) VHOST_USER_TX_FUNC_ERROR_##f, |
| 76 | foreach_vhost_user_tx_func_error |
| 77 | #undef _ |
| 78 | VHOST_USER_TX_FUNC_N_ERROR, |
| 79 | } vhost_user_tx_func_error_t; |
| 80 | |
| 81 | static __clib_unused char *vhost_user_tx_func_error_strings[] = { |
| 82 | #define _(n,s) s, |
| 83 | foreach_vhost_user_tx_func_error |
| 84 | #undef _ |
| 85 | }; |
| 86 | |
| 87 | static __clib_unused u8 * |
| 88 | format_vhost_user_interface_name (u8 * s, va_list * args) |
| 89 | { |
| 90 | u32 i = va_arg (*args, u32); |
| 91 | u32 show_dev_instance = ~0; |
| 92 | vhost_user_main_t *vum = &vhost_user_main; |
| 93 | |
| 94 | if (i < vec_len (vum->show_dev_instance_by_real_dev_instance)) |
| 95 | show_dev_instance = vum->show_dev_instance_by_real_dev_instance[i]; |
| 96 | |
| 97 | if (show_dev_instance != ~0) |
| 98 | i = show_dev_instance; |
| 99 | |
| 100 | s = format (s, "VirtualEthernet0/0/%d", i); |
| 101 | return s; |
| 102 | } |
| 103 | |
| 104 | static __clib_unused int |
| 105 | vhost_user_name_renumber (vnet_hw_interface_t * hi, u32 new_dev_instance) |
| 106 | { |
| 107 | // FIXME: check if the new dev instance is already used |
| 108 | vhost_user_main_t *vum = &vhost_user_main; |
Jerome Tollet | 2f54c27 | 2018-10-02 11:41:11 +0200 | [diff] [blame] | 109 | vhost_user_intf_t *vui = pool_elt_at_index (vum->vhost_user_interfaces, |
| 110 | hi->dev_instance); |
| 111 | |
Mohsin Kazmi | e7cde31 | 2018-06-26 17:20:11 +0200 | [diff] [blame] | 112 | vec_validate_init_empty (vum->show_dev_instance_by_real_dev_instance, |
| 113 | hi->dev_instance, ~0); |
| 114 | |
| 115 | vum->show_dev_instance_by_real_dev_instance[hi->dev_instance] = |
| 116 | new_dev_instance; |
| 117 | |
Jerome Tollet | 2f54c27 | 2018-10-02 11:41:11 +0200 | [diff] [blame] | 118 | vu_log_debug (vui, "renumbered vhost-user interface dev_instance %d to %d", |
| 119 | hi->dev_instance, new_dev_instance); |
Mohsin Kazmi | e7cde31 | 2018-06-26 17:20:11 +0200 | [diff] [blame] | 120 | |
| 121 | return 0; |
| 122 | } |
| 123 | |
| 124 | /** |
| 125 | * @brief Try once to lock the vring |
| 126 | * @return 0 on success, non-zero on failure. |
| 127 | */ |
| 128 | static_always_inline int |
| 129 | vhost_user_vring_try_lock (vhost_user_intf_t * vui, u32 qid) |
| 130 | { |
Sirshak Das | 2f6d7bb | 2018-10-03 22:53:51 +0000 | [diff] [blame] | 131 | return clib_atomic_test_and_set (vui->vring_locks[qid]); |
Mohsin Kazmi | e7cde31 | 2018-06-26 17:20:11 +0200 | [diff] [blame] | 132 | } |
| 133 | |
| 134 | /** |
| 135 | * @brief Spin until the vring is successfully locked |
| 136 | */ |
| 137 | static_always_inline void |
| 138 | vhost_user_vring_lock (vhost_user_intf_t * vui, u32 qid) |
| 139 | { |
| 140 | while (vhost_user_vring_try_lock (vui, qid)) |
| 141 | ; |
| 142 | } |
| 143 | |
| 144 | /** |
| 145 | * @brief Unlock the vring lock |
| 146 | */ |
| 147 | static_always_inline void |
| 148 | vhost_user_vring_unlock (vhost_user_intf_t * vui, u32 qid) |
| 149 | { |
Sirshak Das | 2f6d7bb | 2018-10-03 22:53:51 +0000 | [diff] [blame] | 150 | clib_atomic_release (vui->vring_locks[qid]); |
Mohsin Kazmi | e7cde31 | 2018-06-26 17:20:11 +0200 | [diff] [blame] | 151 | } |
| 152 | |
| 153 | static_always_inline void |
| 154 | vhost_user_tx_trace (vhost_trace_t * t, |
| 155 | vhost_user_intf_t * vui, u16 qid, |
| 156 | vlib_buffer_t * b, vhost_user_vring_t * rxvq) |
| 157 | { |
| 158 | vhost_user_main_t *vum = &vhost_user_main; |
| 159 | u32 last_avail_idx = rxvq->last_avail_idx; |
| 160 | u32 desc_current = rxvq->avail->ring[last_avail_idx & rxvq->qsz_mask]; |
| 161 | vring_desc_t *hdr_desc = 0; |
| 162 | u32 hint = 0; |
| 163 | |
Dave Barach | b7b9299 | 2018-10-17 10:38:51 -0400 | [diff] [blame] | 164 | clib_memset (t, 0, sizeof (*t)); |
Mohsin Kazmi | e7cde31 | 2018-06-26 17:20:11 +0200 | [diff] [blame] | 165 | t->device_index = vui - vum->vhost_user_interfaces; |
| 166 | t->qid = qid; |
| 167 | |
| 168 | hdr_desc = &rxvq->desc[desc_current]; |
| 169 | if (rxvq->desc[desc_current].flags & VIRTQ_DESC_F_INDIRECT) |
| 170 | { |
| 171 | t->virtio_ring_flags |= 1 << VIRTIO_TRACE_F_INDIRECT; |
| 172 | /* Header is the first here */ |
| 173 | hdr_desc = map_guest_mem (vui, rxvq->desc[desc_current].addr, &hint); |
| 174 | } |
| 175 | if (rxvq->desc[desc_current].flags & VIRTQ_DESC_F_NEXT) |
| 176 | { |
| 177 | t->virtio_ring_flags |= 1 << VIRTIO_TRACE_F_SIMPLE_CHAINED; |
| 178 | } |
| 179 | if (!(rxvq->desc[desc_current].flags & VIRTQ_DESC_F_NEXT) && |
| 180 | !(rxvq->desc[desc_current].flags & VIRTQ_DESC_F_INDIRECT)) |
| 181 | { |
| 182 | t->virtio_ring_flags |= 1 << VIRTIO_TRACE_F_SINGLE_DESC; |
| 183 | } |
| 184 | |
| 185 | t->first_desc_len = hdr_desc ? hdr_desc->len : 0; |
| 186 | } |
| 187 | |
| 188 | static_always_inline u32 |
| 189 | vhost_user_tx_copy (vhost_user_intf_t * vui, vhost_copy_t * cpy, |
| 190 | u16 copy_len, u32 * map_hint) |
| 191 | { |
| 192 | void *dst0, *dst1, *dst2, *dst3; |
| 193 | if (PREDICT_TRUE (copy_len >= 4)) |
| 194 | { |
| 195 | if (PREDICT_FALSE (!(dst2 = map_guest_mem (vui, cpy[0].dst, map_hint)))) |
| 196 | return 1; |
| 197 | if (PREDICT_FALSE (!(dst3 = map_guest_mem (vui, cpy[1].dst, map_hint)))) |
| 198 | return 1; |
| 199 | while (PREDICT_TRUE (copy_len >= 4)) |
| 200 | { |
| 201 | dst0 = dst2; |
| 202 | dst1 = dst3; |
| 203 | |
| 204 | if (PREDICT_FALSE |
| 205 | (!(dst2 = map_guest_mem (vui, cpy[2].dst, map_hint)))) |
| 206 | return 1; |
| 207 | if (PREDICT_FALSE |
| 208 | (!(dst3 = map_guest_mem (vui, cpy[3].dst, map_hint)))) |
| 209 | return 1; |
| 210 | |
| 211 | CLIB_PREFETCH ((void *) cpy[2].src, 64, LOAD); |
| 212 | CLIB_PREFETCH ((void *) cpy[3].src, 64, LOAD); |
| 213 | |
Dave Barach | 178cf49 | 2018-11-13 16:34:13 -0500 | [diff] [blame] | 214 | clib_memcpy_fast (dst0, (void *) cpy[0].src, cpy[0].len); |
| 215 | clib_memcpy_fast (dst1, (void *) cpy[1].src, cpy[1].len); |
Mohsin Kazmi | e7cde31 | 2018-06-26 17:20:11 +0200 | [diff] [blame] | 216 | |
| 217 | vhost_user_log_dirty_pages_2 (vui, cpy[0].dst, cpy[0].len, 1); |
| 218 | vhost_user_log_dirty_pages_2 (vui, cpy[1].dst, cpy[1].len, 1); |
| 219 | copy_len -= 2; |
| 220 | cpy += 2; |
| 221 | } |
| 222 | } |
| 223 | while (copy_len) |
| 224 | { |
| 225 | if (PREDICT_FALSE (!(dst0 = map_guest_mem (vui, cpy->dst, map_hint)))) |
| 226 | return 1; |
Dave Barach | 178cf49 | 2018-11-13 16:34:13 -0500 | [diff] [blame] | 227 | clib_memcpy_fast (dst0, (void *) cpy->src, cpy->len); |
Mohsin Kazmi | e7cde31 | 2018-06-26 17:20:11 +0200 | [diff] [blame] | 228 | vhost_user_log_dirty_pages_2 (vui, cpy->dst, cpy->len, 1); |
| 229 | copy_len -= 1; |
| 230 | cpy += 1; |
| 231 | } |
| 232 | return 0; |
| 233 | } |
| 234 | |
Steven Luong | 4208a4c | 2019-05-06 08:51:56 -0700 | [diff] [blame] | 235 | static_always_inline void |
| 236 | vhost_user_handle_tx_offload (vhost_user_intf_t * vui, vlib_buffer_t * b, |
| 237 | virtio_net_hdr_t * hdr) |
| 238 | { |
Mohsin Kazmi | 0b04209 | 2020-04-17 16:50:56 +0000 | [diff] [blame] | 239 | generic_header_offset_t gho = { 0 }; |
Mohsin Kazmi | 84f91fa | 2020-04-23 17:59:49 +0200 | [diff] [blame] | 240 | int is_ip4 = b->flags & VNET_BUFFER_F_IS_IP4; |
| 241 | int is_ip6 = b->flags & VNET_BUFFER_F_IS_IP6; |
| 242 | |
| 243 | ASSERT (!(is_ip4 && is_ip6)); |
| 244 | vnet_generic_header_offset_parser (b, &gho, 1 /* l2 */ , is_ip4, is_ip6); |
Mohsin Kazmi | affc5f6 | 2019-12-26 20:42:18 +0100 | [diff] [blame] | 245 | if (b->flags & VNET_BUFFER_F_OFFLOAD_IP_CKSUM) |
| 246 | { |
| 247 | ip4_header_t *ip4; |
| 248 | |
| 249 | ip4 = |
| 250 | (ip4_header_t *) (vlib_buffer_get_current (b) + gho.l3_hdr_offset); |
| 251 | ip4->checksum = ip4_header_checksum (ip4); |
| 252 | } |
| 253 | |
Steven Luong | 4208a4c | 2019-05-06 08:51:56 -0700 | [diff] [blame] | 254 | /* checksum offload */ |
| 255 | if (b->flags & VNET_BUFFER_F_OFFLOAD_UDP_CKSUM) |
| 256 | { |
| 257 | hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM; |
Mohsin Kazmi | affc5f6 | 2019-12-26 20:42:18 +0100 | [diff] [blame] | 258 | hdr->csum_start = gho.l4_hdr_offset; |
Steven Luong | 4208a4c | 2019-05-06 08:51:56 -0700 | [diff] [blame] | 259 | hdr->csum_offset = offsetof (udp_header_t, checksum); |
Mohsin Kazmi | 0937fdf | 2020-03-25 20:37:16 +0000 | [diff] [blame] | 260 | udp_header_t *udp = |
| 261 | (udp_header_t *) (vlib_buffer_get_current (b) + gho.l4_hdr_offset); |
| 262 | udp->checksum = 0; |
Steven Luong | 4208a4c | 2019-05-06 08:51:56 -0700 | [diff] [blame] | 263 | } |
| 264 | else if (b->flags & VNET_BUFFER_F_OFFLOAD_TCP_CKSUM) |
| 265 | { |
| 266 | hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM; |
Mohsin Kazmi | affc5f6 | 2019-12-26 20:42:18 +0100 | [diff] [blame] | 267 | hdr->csum_start = gho.l4_hdr_offset; |
Steven Luong | 4208a4c | 2019-05-06 08:51:56 -0700 | [diff] [blame] | 268 | hdr->csum_offset = offsetof (tcp_header_t, checksum); |
Mohsin Kazmi | 0937fdf | 2020-03-25 20:37:16 +0000 | [diff] [blame] | 269 | tcp_header_t *tcp = |
| 270 | (tcp_header_t *) (vlib_buffer_get_current (b) + gho.l4_hdr_offset); |
| 271 | tcp->checksum = 0; |
Steven Luong | 4208a4c | 2019-05-06 08:51:56 -0700 | [diff] [blame] | 272 | } |
| 273 | |
| 274 | /* GSO offload */ |
| 275 | if (b->flags & VNET_BUFFER_F_GSO) |
| 276 | { |
| 277 | if (b->flags & VNET_BUFFER_F_OFFLOAD_TCP_CKSUM) |
| 278 | { |
Mohsin Kazmi | 84f91fa | 2020-04-23 17:59:49 +0200 | [diff] [blame] | 279 | if (is_ip4 && |
Steven Luong | 4208a4c | 2019-05-06 08:51:56 -0700 | [diff] [blame] | 280 | (vui->features & (1ULL << FEAT_VIRTIO_NET_F_GUEST_TSO4))) |
| 281 | { |
| 282 | hdr->gso_size = vnet_buffer2 (b)->gso_size; |
| 283 | hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV4; |
| 284 | } |
Mohsin Kazmi | 84f91fa | 2020-04-23 17:59:49 +0200 | [diff] [blame] | 285 | else if (is_ip6 && |
Steven Luong | 4208a4c | 2019-05-06 08:51:56 -0700 | [diff] [blame] | 286 | (vui->features & (1ULL << FEAT_VIRTIO_NET_F_GUEST_TSO6))) |
| 287 | { |
| 288 | hdr->gso_size = vnet_buffer2 (b)->gso_size; |
| 289 | hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV6; |
| 290 | } |
| 291 | } |
| 292 | else if ((vui->features & (1ULL << FEAT_VIRTIO_NET_F_GUEST_UFO)) && |
| 293 | (b->flags & VNET_BUFFER_F_OFFLOAD_UDP_CKSUM)) |
| 294 | { |
| 295 | hdr->gso_size = vnet_buffer2 (b)->gso_size; |
| 296 | hdr->gso_type = VIRTIO_NET_HDR_GSO_UDP; |
| 297 | } |
| 298 | } |
| 299 | } |
| 300 | |
Steven Luong | bc0d9ff | 2020-03-23 09:34:59 -0700 | [diff] [blame] | 301 | static_always_inline void |
| 302 | vhost_user_mark_desc_available (vlib_main_t * vm, vhost_user_vring_t * rxvq, |
| 303 | u16 * n_descs_processed, u8 chained, |
| 304 | vlib_frame_t * frame, u32 n_left) |
| 305 | { |
| 306 | u16 desc_idx, flags; |
| 307 | vring_packed_desc_t *desc_table = rxvq->packed_desc; |
| 308 | u16 last_used_idx = rxvq->last_used_idx; |
| 309 | |
| 310 | if (PREDICT_FALSE (*n_descs_processed == 0)) |
| 311 | return; |
| 312 | |
| 313 | if (rxvq->used_wrap_counter) |
| 314 | flags = desc_table[last_used_idx & rxvq->qsz_mask].flags | |
| 315 | (VIRTQ_DESC_F_AVAIL | VIRTQ_DESC_F_USED); |
| 316 | else |
| 317 | flags = desc_table[last_used_idx & rxvq->qsz_mask].flags & |
| 318 | ~(VIRTQ_DESC_F_AVAIL | VIRTQ_DESC_F_USED); |
| 319 | |
| 320 | vhost_user_advance_last_used_idx (rxvq); |
| 321 | |
| 322 | for (desc_idx = 1; desc_idx < *n_descs_processed; desc_idx++) |
| 323 | { |
| 324 | if (rxvq->used_wrap_counter) |
| 325 | desc_table[rxvq->last_used_idx & rxvq->qsz_mask].flags |= |
| 326 | (VIRTQ_DESC_F_AVAIL | VIRTQ_DESC_F_USED); |
| 327 | else |
| 328 | desc_table[rxvq->last_used_idx & rxvq->qsz_mask].flags &= |
| 329 | ~(VIRTQ_DESC_F_AVAIL | VIRTQ_DESC_F_USED); |
| 330 | vhost_user_advance_last_used_idx (rxvq); |
| 331 | } |
| 332 | |
| 333 | desc_table[last_used_idx & rxvq->qsz_mask].flags = flags; |
| 334 | |
| 335 | *n_descs_processed = 0; |
| 336 | |
| 337 | if (chained) |
| 338 | { |
| 339 | vring_packed_desc_t *desc_table = rxvq->packed_desc; |
| 340 | |
| 341 | while (desc_table[rxvq->last_used_idx & rxvq->qsz_mask].flags & |
| 342 | VIRTQ_DESC_F_NEXT) |
| 343 | vhost_user_advance_last_used_idx (rxvq); |
| 344 | |
| 345 | /* Advance past the current chained table entries */ |
| 346 | vhost_user_advance_last_used_idx (rxvq); |
| 347 | } |
| 348 | |
| 349 | /* interrupt (call) handling */ |
| 350 | if ((rxvq->callfd_idx != ~0) && |
| 351 | (rxvq->avail_event->flags != VRING_EVENT_F_DISABLE)) |
| 352 | { |
| 353 | vhost_user_main_t *vum = &vhost_user_main; |
| 354 | |
| 355 | rxvq->n_since_last_int += frame->n_vectors - n_left; |
| 356 | if (rxvq->n_since_last_int > vum->coalesce_frames) |
| 357 | vhost_user_send_call (vm, rxvq); |
| 358 | } |
| 359 | } |
| 360 | |
| 361 | static_always_inline void |
| 362 | vhost_user_tx_trace_packed (vhost_trace_t * t, vhost_user_intf_t * vui, |
| 363 | u16 qid, vlib_buffer_t * b, |
| 364 | vhost_user_vring_t * rxvq) |
| 365 | { |
| 366 | vhost_user_main_t *vum = &vhost_user_main; |
| 367 | u32 last_avail_idx = rxvq->last_avail_idx; |
| 368 | u32 desc_current = last_avail_idx & rxvq->qsz_mask; |
| 369 | vring_packed_desc_t *hdr_desc = 0; |
| 370 | u32 hint = 0; |
| 371 | |
| 372 | clib_memset (t, 0, sizeof (*t)); |
| 373 | t->device_index = vui - vum->vhost_user_interfaces; |
| 374 | t->qid = qid; |
| 375 | |
| 376 | hdr_desc = &rxvq->packed_desc[desc_current]; |
| 377 | if (rxvq->packed_desc[desc_current].flags & VIRTQ_DESC_F_INDIRECT) |
| 378 | { |
| 379 | t->virtio_ring_flags |= 1 << VIRTIO_TRACE_F_INDIRECT; |
| 380 | /* Header is the first here */ |
| 381 | hdr_desc = map_guest_mem (vui, rxvq->packed_desc[desc_current].addr, |
| 382 | &hint); |
| 383 | } |
| 384 | if (rxvq->packed_desc[desc_current].flags & VIRTQ_DESC_F_NEXT) |
| 385 | { |
| 386 | t->virtio_ring_flags |= 1 << VIRTIO_TRACE_F_SIMPLE_CHAINED; |
| 387 | } |
| 388 | if (!(rxvq->packed_desc[desc_current].flags & VIRTQ_DESC_F_NEXT) && |
| 389 | !(rxvq->packed_desc[desc_current].flags & VIRTQ_DESC_F_INDIRECT)) |
| 390 | { |
| 391 | t->virtio_ring_flags |= 1 << VIRTIO_TRACE_F_SINGLE_DESC; |
| 392 | } |
| 393 | |
| 394 | t->first_desc_len = hdr_desc ? hdr_desc->len : 0; |
| 395 | } |
| 396 | |
| 397 | static_always_inline uword |
| 398 | vhost_user_device_class_packed (vlib_main_t * vm, vlib_node_runtime_t * node, |
| 399 | vlib_frame_t * frame) |
| 400 | { |
| 401 | u32 *buffers = vlib_frame_vector_args (frame); |
| 402 | u32 n_left = frame->n_vectors; |
| 403 | vhost_user_main_t *vum = &vhost_user_main; |
| 404 | vnet_interface_output_runtime_t *rd = (void *) node->runtime_data; |
| 405 | vhost_user_intf_t *vui = |
| 406 | pool_elt_at_index (vum->vhost_user_interfaces, rd->dev_instance); |
| 407 | u32 qid; |
| 408 | vhost_user_vring_t *rxvq; |
| 409 | u8 error; |
| 410 | u32 thread_index = vm->thread_index; |
| 411 | vhost_cpu_t *cpu = &vum->cpus[thread_index]; |
| 412 | u32 map_hint = 0; |
| 413 | u8 retry = 8; |
| 414 | u16 copy_len; |
| 415 | u16 tx_headers_len; |
| 416 | vring_packed_desc_t *desc_table; |
| 417 | u32 or_flags; |
| 418 | u16 desc_head, desc_index, desc_len; |
| 419 | u16 n_descs_processed; |
| 420 | u8 indirect, chained; |
| 421 | |
| 422 | qid = VHOST_VRING_IDX_RX (*vec_elt_at_index (vui->per_cpu_tx_qid, |
| 423 | thread_index)); |
| 424 | rxvq = &vui->vrings[qid]; |
| 425 | |
| 426 | retry: |
| 427 | error = VHOST_USER_TX_FUNC_ERROR_NONE; |
| 428 | tx_headers_len = 0; |
| 429 | copy_len = 0; |
| 430 | n_descs_processed = 0; |
| 431 | |
| 432 | while (n_left > 0) |
| 433 | { |
| 434 | vlib_buffer_t *b0, *current_b0; |
| 435 | uword buffer_map_addr; |
| 436 | u32 buffer_len; |
| 437 | u16 bytes_left; |
| 438 | u32 total_desc_len = 0; |
| 439 | u16 n_entries = 0; |
| 440 | |
| 441 | indirect = 0; |
| 442 | chained = 0; |
| 443 | if (PREDICT_TRUE (n_left > 1)) |
| 444 | vlib_prefetch_buffer_with_index (vm, buffers[1], LOAD); |
| 445 | |
| 446 | b0 = vlib_get_buffer (vm, buffers[0]); |
| 447 | if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED)) |
| 448 | { |
| 449 | cpu->current_trace = vlib_add_trace (vm, node, b0, |
| 450 | sizeof (*cpu->current_trace)); |
| 451 | vhost_user_tx_trace_packed (cpu->current_trace, vui, qid / 2, b0, |
| 452 | rxvq); |
| 453 | } |
| 454 | |
| 455 | desc_table = rxvq->packed_desc; |
| 456 | desc_head = desc_index = rxvq->last_avail_idx & rxvq->qsz_mask; |
| 457 | if (PREDICT_FALSE (!vhost_user_packed_desc_available (rxvq, desc_head))) |
| 458 | { |
| 459 | error = VHOST_USER_TX_FUNC_ERROR_PKT_DROP_NOBUF; |
| 460 | goto done; |
| 461 | } |
| 462 | /* |
| 463 | * Go deeper in case of indirect descriptor. |
| 464 | * To test it, turn off mrg_rxbuf. |
| 465 | */ |
| 466 | if (desc_table[desc_head].flags & VIRTQ_DESC_F_INDIRECT) |
| 467 | { |
| 468 | indirect = 1; |
| 469 | if (PREDICT_FALSE (desc_table[desc_head].len < |
| 470 | sizeof (vring_packed_desc_t))) |
| 471 | { |
| 472 | error = VHOST_USER_TX_FUNC_ERROR_INDIRECT_OVERFLOW; |
| 473 | goto done; |
| 474 | } |
| 475 | n_entries = desc_table[desc_head].len >> 4; |
| 476 | desc_table = map_guest_mem (vui, desc_table[desc_index].addr, |
| 477 | &map_hint); |
| 478 | if (PREDICT_FALSE (desc_table == 0)) |
| 479 | { |
| 480 | error = VHOST_USER_TX_FUNC_ERROR_MMAP_FAIL; |
| 481 | goto done; |
| 482 | } |
| 483 | desc_index = 0; |
| 484 | } |
| 485 | else if (rxvq->packed_desc[desc_head].flags & VIRTQ_DESC_F_NEXT) |
| 486 | chained = 1; |
| 487 | |
| 488 | desc_len = vui->virtio_net_hdr_sz; |
| 489 | buffer_map_addr = desc_table[desc_index].addr; |
| 490 | buffer_len = desc_table[desc_index].len; |
| 491 | |
| 492 | /* Get a header from the header array */ |
| 493 | virtio_net_hdr_mrg_rxbuf_t *hdr = &cpu->tx_headers[tx_headers_len]; |
| 494 | tx_headers_len++; |
| 495 | hdr->hdr.flags = 0; |
| 496 | hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE; |
| 497 | hdr->num_buffers = 1; |
| 498 | |
| 499 | or_flags = (b0->flags & VNET_BUFFER_F_OFFLOAD_IP_CKSUM) || |
| 500 | (b0->flags & VNET_BUFFER_F_OFFLOAD_UDP_CKSUM) || |
| 501 | (b0->flags & VNET_BUFFER_F_OFFLOAD_TCP_CKSUM); |
| 502 | |
| 503 | /* Guest supports csum offload and buffer requires checksum offload? */ |
| 504 | if (or_flags && |
| 505 | (vui->features & (1ULL << FEAT_VIRTIO_NET_F_GUEST_CSUM))) |
| 506 | vhost_user_handle_tx_offload (vui, b0, &hdr->hdr); |
| 507 | |
| 508 | /* Prepare a copy order executed later for the header */ |
| 509 | ASSERT (copy_len < VHOST_USER_COPY_ARRAY_N); |
| 510 | vhost_copy_t *cpy = &cpu->copy[copy_len]; |
| 511 | copy_len++; |
| 512 | cpy->len = vui->virtio_net_hdr_sz; |
| 513 | cpy->dst = buffer_map_addr; |
| 514 | cpy->src = (uword) hdr; |
| 515 | |
| 516 | buffer_map_addr += vui->virtio_net_hdr_sz; |
| 517 | buffer_len -= vui->virtio_net_hdr_sz; |
| 518 | bytes_left = b0->current_length; |
| 519 | current_b0 = b0; |
| 520 | while (1) |
| 521 | { |
| 522 | if (buffer_len == 0) |
| 523 | { |
| 524 | /* Get new output */ |
| 525 | if (chained) |
| 526 | { |
| 527 | /* |
| 528 | * Next one is chained |
| 529 | * Test it with both indirect and mrg_rxbuf off |
| 530 | */ |
| 531 | if (PREDICT_FALSE (!(desc_table[desc_index].flags & |
| 532 | VIRTQ_DESC_F_NEXT))) |
| 533 | { |
| 534 | /* |
| 535 | * Last descriptor in chain. |
| 536 | * Dequeue queued descriptors for this packet |
| 537 | */ |
| 538 | vhost_user_dequeue_chained_descs (rxvq, |
| 539 | &n_descs_processed); |
| 540 | error = VHOST_USER_TX_FUNC_ERROR_PKT_DROP_NOBUF; |
| 541 | goto done; |
| 542 | } |
| 543 | vhost_user_advance_last_avail_idx (rxvq); |
| 544 | desc_index = rxvq->last_avail_idx & rxvq->qsz_mask; |
| 545 | n_descs_processed++; |
| 546 | buffer_map_addr = desc_table[desc_index].addr; |
| 547 | buffer_len = desc_table[desc_index].len; |
| 548 | total_desc_len += desc_len; |
| 549 | desc_len = 0; |
| 550 | } |
| 551 | else if (indirect) |
| 552 | { |
| 553 | /* |
| 554 | * Indirect table |
| 555 | * Test it with mrg_rxnuf off |
| 556 | */ |
| 557 | if (PREDICT_TRUE (n_entries > 0)) |
| 558 | n_entries--; |
| 559 | else |
| 560 | { |
| 561 | /* Dequeue queued descriptors for this packet */ |
| 562 | vhost_user_dequeue_chained_descs (rxvq, |
| 563 | &n_descs_processed); |
| 564 | error = VHOST_USER_TX_FUNC_ERROR_INDIRECT_OVERFLOW; |
| 565 | goto done; |
| 566 | } |
| 567 | total_desc_len += desc_len; |
| 568 | desc_index = (desc_index + 1) & rxvq->qsz_mask; |
| 569 | buffer_map_addr = desc_table[desc_index].addr; |
| 570 | buffer_len = desc_table[desc_index].len; |
| 571 | desc_len = 0; |
| 572 | } |
| 573 | else if (vui->virtio_net_hdr_sz == 12) |
| 574 | { |
| 575 | /* |
| 576 | * MRG is available |
| 577 | * This is the default setting for the guest VM |
| 578 | */ |
| 579 | virtio_net_hdr_mrg_rxbuf_t *hdr = |
| 580 | &cpu->tx_headers[tx_headers_len - 1]; |
| 581 | |
| 582 | desc_table[desc_index].len = desc_len; |
| 583 | vhost_user_advance_last_avail_idx (rxvq); |
| 584 | desc_head = desc_index = |
| 585 | rxvq->last_avail_idx & rxvq->qsz_mask; |
| 586 | hdr->num_buffers++; |
| 587 | n_descs_processed++; |
| 588 | desc_len = 0; |
| 589 | |
| 590 | if (PREDICT_FALSE (!vhost_user_packed_desc_available |
| 591 | (rxvq, desc_index))) |
| 592 | { |
| 593 | /* Dequeue queued descriptors for this packet */ |
| 594 | vhost_user_dequeue_descs (rxvq, hdr, |
| 595 | &n_descs_processed); |
| 596 | error = VHOST_USER_TX_FUNC_ERROR_PKT_DROP_NOBUF; |
| 597 | goto done; |
| 598 | } |
| 599 | |
| 600 | buffer_map_addr = desc_table[desc_index].addr; |
| 601 | buffer_len = desc_table[desc_index].len; |
| 602 | } |
| 603 | else |
| 604 | { |
| 605 | error = VHOST_USER_TX_FUNC_ERROR_PKT_DROP_NOMRG; |
| 606 | goto done; |
| 607 | } |
| 608 | } |
| 609 | |
| 610 | ASSERT (copy_len < VHOST_USER_COPY_ARRAY_N); |
| 611 | vhost_copy_t *cpy = &cpu->copy[copy_len]; |
| 612 | copy_len++; |
| 613 | cpy->len = bytes_left; |
| 614 | cpy->len = (cpy->len > buffer_len) ? buffer_len : cpy->len; |
| 615 | cpy->dst = buffer_map_addr; |
| 616 | cpy->src = (uword) vlib_buffer_get_current (current_b0) + |
| 617 | current_b0->current_length - bytes_left; |
| 618 | |
| 619 | bytes_left -= cpy->len; |
| 620 | buffer_len -= cpy->len; |
| 621 | buffer_map_addr += cpy->len; |
| 622 | desc_len += cpy->len; |
| 623 | |
| 624 | CLIB_PREFETCH (&rxvq->packed_desc, CLIB_CACHE_LINE_BYTES, LOAD); |
| 625 | |
| 626 | /* Check if vlib buffer has more data. If not, get more or break */ |
| 627 | if (PREDICT_TRUE (!bytes_left)) |
| 628 | { |
| 629 | if (PREDICT_FALSE |
| 630 | (current_b0->flags & VLIB_BUFFER_NEXT_PRESENT)) |
| 631 | { |
| 632 | current_b0 = vlib_get_buffer (vm, current_b0->next_buffer); |
| 633 | bytes_left = current_b0->current_length; |
| 634 | } |
| 635 | else |
| 636 | { |
| 637 | /* End of packet */ |
| 638 | break; |
| 639 | } |
| 640 | } |
| 641 | } |
| 642 | |
| 643 | /* Move from available to used ring */ |
| 644 | total_desc_len += desc_len; |
| 645 | rxvq->packed_desc[desc_head].len = total_desc_len; |
| 646 | |
| 647 | vhost_user_advance_last_avail_table_idx (vui, rxvq, chained); |
| 648 | n_descs_processed++; |
| 649 | |
| 650 | if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED)) |
| 651 | cpu->current_trace->hdr = cpu->tx_headers[tx_headers_len - 1]; |
| 652 | |
| 653 | n_left--; |
| 654 | |
| 655 | /* |
| 656 | * Do the copy periodically to prevent |
| 657 | * cpu->copy array overflow and corrupt memory |
| 658 | */ |
| 659 | if (PREDICT_FALSE (copy_len >= VHOST_USER_TX_COPY_THRESHOLD) || chained) |
| 660 | { |
| 661 | if (PREDICT_FALSE (vhost_user_tx_copy (vui, cpu->copy, copy_len, |
| 662 | &map_hint))) |
| 663 | vlib_error_count (vm, node->node_index, |
| 664 | VHOST_USER_TX_FUNC_ERROR_MMAP_FAIL, 1); |
| 665 | copy_len = 0; |
| 666 | |
| 667 | /* give buffers back to driver */ |
| 668 | vhost_user_mark_desc_available (vm, rxvq, &n_descs_processed, |
| 669 | chained, frame, n_left); |
| 670 | } |
| 671 | |
| 672 | buffers++; |
| 673 | } |
| 674 | |
| 675 | done: |
| 676 | if (PREDICT_TRUE (copy_len)) |
| 677 | { |
| 678 | if (PREDICT_FALSE (vhost_user_tx_copy (vui, cpu->copy, copy_len, |
| 679 | &map_hint))) |
| 680 | vlib_error_count (vm, node->node_index, |
| 681 | VHOST_USER_TX_FUNC_ERROR_MMAP_FAIL, 1); |
| 682 | |
| 683 | vhost_user_mark_desc_available (vm, rxvq, &n_descs_processed, chained, |
| 684 | frame, n_left); |
| 685 | } |
| 686 | |
| 687 | /* |
| 688 | * When n_left is set, error is always set to something too. |
| 689 | * In case error is due to lack of remaining buffers, we go back up and |
| 690 | * retry. |
| 691 | * The idea is that it is better to waste some time on packets |
| 692 | * that have been processed already than dropping them and get |
| 693 | * more fresh packets with a good likelyhood that they will be dropped too. |
| 694 | * This technique also gives more time to VM driver to pick-up packets. |
| 695 | * In case the traffic flows from physical to virtual interfaces, this |
| 696 | * technique will end-up leveraging the physical NIC buffer in order to |
| 697 | * absorb the VM's CPU jitter. |
| 698 | */ |
| 699 | if (n_left && (error == VHOST_USER_TX_FUNC_ERROR_PKT_DROP_NOBUF) && retry) |
| 700 | { |
| 701 | retry--; |
| 702 | goto retry; |
| 703 | } |
| 704 | |
| 705 | vhost_user_vring_unlock (vui, qid); |
| 706 | |
| 707 | if (PREDICT_FALSE (n_left && error != VHOST_USER_TX_FUNC_ERROR_NONE)) |
| 708 | { |
| 709 | vlib_error_count (vm, node->node_index, error, n_left); |
| 710 | vlib_increment_simple_counter |
| 711 | (vnet_main.interface_main.sw_if_counters + |
| 712 | VNET_INTERFACE_COUNTER_DROP, thread_index, vui->sw_if_index, n_left); |
| 713 | } |
| 714 | |
| 715 | vlib_buffer_free (vm, vlib_frame_vector_args (frame), frame->n_vectors); |
| 716 | return frame->n_vectors; |
| 717 | } |
| 718 | |
Mohsin Kazmi | dd8e7d0 | 2018-07-23 14:45:57 +0200 | [diff] [blame] | 719 | VNET_DEVICE_CLASS_TX_FN (vhost_user_device_class) (vlib_main_t * vm, |
| 720 | vlib_node_runtime_t * |
| 721 | node, vlib_frame_t * frame) |
Mohsin Kazmi | e7cde31 | 2018-06-26 17:20:11 +0200 | [diff] [blame] | 722 | { |
Damjan Marion | a3d5986 | 2018-11-10 10:23:00 +0100 | [diff] [blame] | 723 | u32 *buffers = vlib_frame_vector_args (frame); |
Mohsin Kazmi | e7cde31 | 2018-06-26 17:20:11 +0200 | [diff] [blame] | 724 | u32 n_left = frame->n_vectors; |
| 725 | vhost_user_main_t *vum = &vhost_user_main; |
| 726 | vnet_interface_output_runtime_t *rd = (void *) node->runtime_data; |
| 727 | vhost_user_intf_t *vui = |
| 728 | pool_elt_at_index (vum->vhost_user_interfaces, rd->dev_instance); |
| 729 | u32 qid = ~0; |
| 730 | vhost_user_vring_t *rxvq; |
| 731 | u8 error; |
Damjan Marion | 067cd62 | 2018-07-11 12:47:43 +0200 | [diff] [blame] | 732 | u32 thread_index = vm->thread_index; |
Damjan Marion | 7e0b17d | 2018-11-20 21:07:03 +0100 | [diff] [blame] | 733 | vhost_cpu_t *cpu = &vum->cpus[thread_index]; |
Mohsin Kazmi | e7cde31 | 2018-06-26 17:20:11 +0200 | [diff] [blame] | 734 | u32 map_hint = 0; |
| 735 | u8 retry = 8; |
| 736 | u16 copy_len; |
| 737 | u16 tx_headers_len; |
Steven Luong | 564e167 | 2020-01-30 15:18:45 -0800 | [diff] [blame] | 738 | u32 or_flags; |
Mohsin Kazmi | e7cde31 | 2018-06-26 17:20:11 +0200 | [diff] [blame] | 739 | |
| 740 | if (PREDICT_FALSE (!vui->admin_up)) |
| 741 | { |
| 742 | error = VHOST_USER_TX_FUNC_ERROR_DOWN; |
| 743 | goto done3; |
| 744 | } |
| 745 | |
Juraj Sloboda | b192feb | 2018-10-01 12:42:07 +0200 | [diff] [blame] | 746 | if (PREDICT_FALSE (!vui->is_ready)) |
Mohsin Kazmi | e7cde31 | 2018-06-26 17:20:11 +0200 | [diff] [blame] | 747 | { |
| 748 | error = VHOST_USER_TX_FUNC_ERROR_NOT_READY; |
| 749 | goto done3; |
| 750 | } |
| 751 | |
Damjan Marion | 7e0b17d | 2018-11-20 21:07:03 +0100 | [diff] [blame] | 752 | qid = VHOST_VRING_IDX_RX (*vec_elt_at_index (vui->per_cpu_tx_qid, |
| 753 | thread_index)); |
Mohsin Kazmi | e7cde31 | 2018-06-26 17:20:11 +0200 | [diff] [blame] | 754 | rxvq = &vui->vrings[qid]; |
Steven | 0c46998 | 2018-11-04 08:20:01 -0800 | [diff] [blame] | 755 | if (PREDICT_FALSE (rxvq->avail == 0)) |
| 756 | { |
| 757 | error = VHOST_USER_TX_FUNC_ERROR_MMAP_FAIL; |
| 758 | goto done3; |
| 759 | } |
| 760 | |
Mohsin Kazmi | e7cde31 | 2018-06-26 17:20:11 +0200 | [diff] [blame] | 761 | if (PREDICT_FALSE (vui->use_tx_spinlock)) |
| 762 | vhost_user_vring_lock (vui, qid); |
| 763 | |
Steven Luong | bc0d9ff | 2020-03-23 09:34:59 -0700 | [diff] [blame] | 764 | if (vhost_user_is_packed_ring_supported (vui)) |
| 765 | return (vhost_user_device_class_packed (vm, node, frame)); |
| 766 | |
Mohsin Kazmi | e7cde31 | 2018-06-26 17:20:11 +0200 | [diff] [blame] | 767 | retry: |
| 768 | error = VHOST_USER_TX_FUNC_ERROR_NONE; |
| 769 | tx_headers_len = 0; |
| 770 | copy_len = 0; |
| 771 | while (n_left > 0) |
| 772 | { |
| 773 | vlib_buffer_t *b0, *current_b0; |
| 774 | u16 desc_head, desc_index, desc_len; |
| 775 | vring_desc_t *desc_table; |
| 776 | uword buffer_map_addr; |
| 777 | u32 buffer_len; |
| 778 | u16 bytes_left; |
| 779 | |
| 780 | if (PREDICT_TRUE (n_left > 1)) |
| 781 | vlib_prefetch_buffer_with_index (vm, buffers[1], LOAD); |
| 782 | |
| 783 | b0 = vlib_get_buffer (vm, buffers[0]); |
| 784 | |
| 785 | if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED)) |
| 786 | { |
Damjan Marion | 7e0b17d | 2018-11-20 21:07:03 +0100 | [diff] [blame] | 787 | cpu->current_trace = vlib_add_trace (vm, node, b0, |
| 788 | sizeof (*cpu->current_trace)); |
| 789 | vhost_user_tx_trace (cpu->current_trace, vui, qid / 2, b0, rxvq); |
Mohsin Kazmi | e7cde31 | 2018-06-26 17:20:11 +0200 | [diff] [blame] | 790 | } |
| 791 | |
| 792 | if (PREDICT_FALSE (rxvq->last_avail_idx == rxvq->avail->idx)) |
| 793 | { |
| 794 | error = VHOST_USER_TX_FUNC_ERROR_PKT_DROP_NOBUF; |
| 795 | goto done; |
| 796 | } |
| 797 | |
| 798 | desc_table = rxvq->desc; |
| 799 | desc_head = desc_index = |
| 800 | rxvq->avail->ring[rxvq->last_avail_idx & rxvq->qsz_mask]; |
| 801 | |
| 802 | /* Go deeper in case of indirect descriptor |
| 803 | * I don't know of any driver providing indirect for RX. */ |
| 804 | if (PREDICT_FALSE (rxvq->desc[desc_head].flags & VIRTQ_DESC_F_INDIRECT)) |
| 805 | { |
| 806 | if (PREDICT_FALSE |
| 807 | (rxvq->desc[desc_head].len < sizeof (vring_desc_t))) |
| 808 | { |
| 809 | error = VHOST_USER_TX_FUNC_ERROR_INDIRECT_OVERFLOW; |
| 810 | goto done; |
| 811 | } |
| 812 | if (PREDICT_FALSE |
| 813 | (!(desc_table = |
| 814 | map_guest_mem (vui, rxvq->desc[desc_index].addr, |
| 815 | &map_hint)))) |
| 816 | { |
| 817 | error = VHOST_USER_TX_FUNC_ERROR_MMAP_FAIL; |
| 818 | goto done; |
| 819 | } |
| 820 | desc_index = 0; |
| 821 | } |
| 822 | |
| 823 | desc_len = vui->virtio_net_hdr_sz; |
| 824 | buffer_map_addr = desc_table[desc_index].addr; |
| 825 | buffer_len = desc_table[desc_index].len; |
| 826 | |
| 827 | { |
| 828 | // Get a header from the header array |
Damjan Marion | 7e0b17d | 2018-11-20 21:07:03 +0100 | [diff] [blame] | 829 | virtio_net_hdr_mrg_rxbuf_t *hdr = &cpu->tx_headers[tx_headers_len]; |
Mohsin Kazmi | e7cde31 | 2018-06-26 17:20:11 +0200 | [diff] [blame] | 830 | tx_headers_len++; |
| 831 | hdr->hdr.flags = 0; |
Steven Luong | 4208a4c | 2019-05-06 08:51:56 -0700 | [diff] [blame] | 832 | hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE; |
Mohsin Kazmi | e7cde31 | 2018-06-26 17:20:11 +0200 | [diff] [blame] | 833 | hdr->num_buffers = 1; //This is local, no need to check |
| 834 | |
Steven Luong | 564e167 | 2020-01-30 15:18:45 -0800 | [diff] [blame] | 835 | or_flags = (b0->flags & VNET_BUFFER_F_OFFLOAD_IP_CKSUM) || |
| 836 | (b0->flags & VNET_BUFFER_F_OFFLOAD_UDP_CKSUM) || |
| 837 | (b0->flags & VNET_BUFFER_F_OFFLOAD_TCP_CKSUM); |
| 838 | |
| 839 | /* Guest supports csum offload and buffer requires checksum offload? */ |
| 840 | if (or_flags |
| 841 | && (vui->features & (1ULL << FEAT_VIRTIO_NET_F_GUEST_CSUM))) |
Steven Luong | 4208a4c | 2019-05-06 08:51:56 -0700 | [diff] [blame] | 842 | vhost_user_handle_tx_offload (vui, b0, &hdr->hdr); |
| 843 | |
Mohsin Kazmi | e7cde31 | 2018-06-26 17:20:11 +0200 | [diff] [blame] | 844 | // Prepare a copy order executed later for the header |
Steven Luong | 7331005 | 2019-10-23 13:28:37 -0700 | [diff] [blame] | 845 | ASSERT (copy_len < VHOST_USER_COPY_ARRAY_N); |
Damjan Marion | 7e0b17d | 2018-11-20 21:07:03 +0100 | [diff] [blame] | 846 | vhost_copy_t *cpy = &cpu->copy[copy_len]; |
Mohsin Kazmi | e7cde31 | 2018-06-26 17:20:11 +0200 | [diff] [blame] | 847 | copy_len++; |
| 848 | cpy->len = vui->virtio_net_hdr_sz; |
| 849 | cpy->dst = buffer_map_addr; |
| 850 | cpy->src = (uword) hdr; |
| 851 | } |
| 852 | |
| 853 | buffer_map_addr += vui->virtio_net_hdr_sz; |
| 854 | buffer_len -= vui->virtio_net_hdr_sz; |
| 855 | bytes_left = b0->current_length; |
| 856 | current_b0 = b0; |
| 857 | while (1) |
| 858 | { |
| 859 | if (buffer_len == 0) |
| 860 | { //Get new output |
| 861 | if (desc_table[desc_index].flags & VIRTQ_DESC_F_NEXT) |
| 862 | { |
| 863 | //Next one is chained |
| 864 | desc_index = desc_table[desc_index].next; |
| 865 | buffer_map_addr = desc_table[desc_index].addr; |
| 866 | buffer_len = desc_table[desc_index].len; |
| 867 | } |
| 868 | else if (vui->virtio_net_hdr_sz == 12) //MRG is available |
| 869 | { |
| 870 | virtio_net_hdr_mrg_rxbuf_t *hdr = |
Damjan Marion | 7e0b17d | 2018-11-20 21:07:03 +0100 | [diff] [blame] | 871 | &cpu->tx_headers[tx_headers_len - 1]; |
Mohsin Kazmi | e7cde31 | 2018-06-26 17:20:11 +0200 | [diff] [blame] | 872 | |
| 873 | //Move from available to used buffer |
| 874 | rxvq->used->ring[rxvq->last_used_idx & rxvq->qsz_mask].id = |
| 875 | desc_head; |
| 876 | rxvq->used->ring[rxvq->last_used_idx & rxvq->qsz_mask].len = |
| 877 | desc_len; |
| 878 | vhost_user_log_dirty_ring (vui, rxvq, |
| 879 | ring[rxvq->last_used_idx & |
| 880 | rxvq->qsz_mask]); |
| 881 | |
| 882 | rxvq->last_avail_idx++; |
| 883 | rxvq->last_used_idx++; |
| 884 | hdr->num_buffers++; |
| 885 | desc_len = 0; |
| 886 | |
| 887 | if (PREDICT_FALSE |
| 888 | (rxvq->last_avail_idx == rxvq->avail->idx)) |
| 889 | { |
| 890 | //Dequeue queued descriptors for this packet |
| 891 | rxvq->last_used_idx -= hdr->num_buffers - 1; |
| 892 | rxvq->last_avail_idx -= hdr->num_buffers - 1; |
| 893 | error = VHOST_USER_TX_FUNC_ERROR_PKT_DROP_NOBUF; |
| 894 | goto done; |
| 895 | } |
| 896 | |
| 897 | desc_table = rxvq->desc; |
| 898 | desc_head = desc_index = |
| 899 | rxvq->avail->ring[rxvq->last_avail_idx & rxvq->qsz_mask]; |
| 900 | if (PREDICT_FALSE |
| 901 | (rxvq->desc[desc_head].flags & VIRTQ_DESC_F_INDIRECT)) |
| 902 | { |
| 903 | //It is seriously unlikely that a driver will put indirect descriptor |
| 904 | //after non-indirect descriptor. |
| 905 | if (PREDICT_FALSE |
| 906 | (rxvq->desc[desc_head].len < sizeof (vring_desc_t))) |
| 907 | { |
| 908 | error = VHOST_USER_TX_FUNC_ERROR_INDIRECT_OVERFLOW; |
| 909 | goto done; |
| 910 | } |
| 911 | if (PREDICT_FALSE |
| 912 | (!(desc_table = |
| 913 | map_guest_mem (vui, |
| 914 | rxvq->desc[desc_index].addr, |
| 915 | &map_hint)))) |
| 916 | { |
| 917 | error = VHOST_USER_TX_FUNC_ERROR_MMAP_FAIL; |
| 918 | goto done; |
| 919 | } |
| 920 | desc_index = 0; |
| 921 | } |
| 922 | buffer_map_addr = desc_table[desc_index].addr; |
| 923 | buffer_len = desc_table[desc_index].len; |
| 924 | } |
| 925 | else |
| 926 | { |
| 927 | error = VHOST_USER_TX_FUNC_ERROR_PKT_DROP_NOMRG; |
| 928 | goto done; |
| 929 | } |
| 930 | } |
| 931 | |
| 932 | { |
Steven Luong | 7331005 | 2019-10-23 13:28:37 -0700 | [diff] [blame] | 933 | ASSERT (copy_len < VHOST_USER_COPY_ARRAY_N); |
Damjan Marion | 7e0b17d | 2018-11-20 21:07:03 +0100 | [diff] [blame] | 934 | vhost_copy_t *cpy = &cpu->copy[copy_len]; |
Mohsin Kazmi | e7cde31 | 2018-06-26 17:20:11 +0200 | [diff] [blame] | 935 | copy_len++; |
| 936 | cpy->len = bytes_left; |
| 937 | cpy->len = (cpy->len > buffer_len) ? buffer_len : cpy->len; |
| 938 | cpy->dst = buffer_map_addr; |
| 939 | cpy->src = (uword) vlib_buffer_get_current (current_b0) + |
| 940 | current_b0->current_length - bytes_left; |
| 941 | |
| 942 | bytes_left -= cpy->len; |
| 943 | buffer_len -= cpy->len; |
| 944 | buffer_map_addr += cpy->len; |
| 945 | desc_len += cpy->len; |
| 946 | |
| 947 | CLIB_PREFETCH (&rxvq->desc, CLIB_CACHE_LINE_BYTES, LOAD); |
| 948 | } |
| 949 | |
| 950 | // Check if vlib buffer has more data. If not, get more or break. |
| 951 | if (PREDICT_TRUE (!bytes_left)) |
| 952 | { |
| 953 | if (PREDICT_FALSE |
| 954 | (current_b0->flags & VLIB_BUFFER_NEXT_PRESENT)) |
| 955 | { |
| 956 | current_b0 = vlib_get_buffer (vm, current_b0->next_buffer); |
| 957 | bytes_left = current_b0->current_length; |
| 958 | } |
| 959 | else |
| 960 | { |
| 961 | //End of packet |
| 962 | break; |
| 963 | } |
| 964 | } |
| 965 | } |
| 966 | |
| 967 | //Move from available to used ring |
| 968 | rxvq->used->ring[rxvq->last_used_idx & rxvq->qsz_mask].id = desc_head; |
| 969 | rxvq->used->ring[rxvq->last_used_idx & rxvq->qsz_mask].len = desc_len; |
| 970 | vhost_user_log_dirty_ring (vui, rxvq, |
| 971 | ring[rxvq->last_used_idx & rxvq->qsz_mask]); |
| 972 | rxvq->last_avail_idx++; |
| 973 | rxvq->last_used_idx++; |
| 974 | |
| 975 | if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED)) |
| 976 | { |
Damjan Marion | 7e0b17d | 2018-11-20 21:07:03 +0100 | [diff] [blame] | 977 | cpu->current_trace->hdr = cpu->tx_headers[tx_headers_len - 1]; |
Mohsin Kazmi | e7cde31 | 2018-06-26 17:20:11 +0200 | [diff] [blame] | 978 | } |
| 979 | |
| 980 | n_left--; //At the end for error counting when 'goto done' is invoked |
| 981 | |
| 982 | /* |
| 983 | * Do the copy periodically to prevent |
Damjan Marion | 7e0b17d | 2018-11-20 21:07:03 +0100 | [diff] [blame] | 984 | * cpu->copy array overflow and corrupt memory |
Mohsin Kazmi | e7cde31 | 2018-06-26 17:20:11 +0200 | [diff] [blame] | 985 | */ |
| 986 | if (PREDICT_FALSE (copy_len >= VHOST_USER_TX_COPY_THRESHOLD)) |
| 987 | { |
Damjan Marion | 7e0b17d | 2018-11-20 21:07:03 +0100 | [diff] [blame] | 988 | if (PREDICT_FALSE (vhost_user_tx_copy (vui, cpu->copy, copy_len, |
| 989 | &map_hint))) |
Mohsin Kazmi | e7cde31 | 2018-06-26 17:20:11 +0200 | [diff] [blame] | 990 | { |
| 991 | vlib_error_count (vm, node->node_index, |
| 992 | VHOST_USER_TX_FUNC_ERROR_MMAP_FAIL, 1); |
| 993 | } |
| 994 | copy_len = 0; |
| 995 | |
| 996 | /* give buffers back to driver */ |
| 997 | CLIB_MEMORY_BARRIER (); |
| 998 | rxvq->used->idx = rxvq->last_used_idx; |
| 999 | vhost_user_log_dirty_ring (vui, rxvq, idx); |
| 1000 | } |
| 1001 | buffers++; |
| 1002 | } |
| 1003 | |
| 1004 | done: |
| 1005 | //Do the memory copies |
Damjan Marion | 7e0b17d | 2018-11-20 21:07:03 +0100 | [diff] [blame] | 1006 | if (PREDICT_FALSE (vhost_user_tx_copy (vui, cpu->copy, copy_len, |
| 1007 | &map_hint))) |
Mohsin Kazmi | e7cde31 | 2018-06-26 17:20:11 +0200 | [diff] [blame] | 1008 | { |
| 1009 | vlib_error_count (vm, node->node_index, |
| 1010 | VHOST_USER_TX_FUNC_ERROR_MMAP_FAIL, 1); |
| 1011 | } |
| 1012 | |
| 1013 | CLIB_MEMORY_BARRIER (); |
| 1014 | rxvq->used->idx = rxvq->last_used_idx; |
| 1015 | vhost_user_log_dirty_ring (vui, rxvq, idx); |
| 1016 | |
| 1017 | /* |
| 1018 | * When n_left is set, error is always set to something too. |
| 1019 | * In case error is due to lack of remaining buffers, we go back up and |
| 1020 | * retry. |
| 1021 | * The idea is that it is better to waste some time on packets |
| 1022 | * that have been processed already than dropping them and get |
Paul Vinciguerra | 97c998c | 2019-10-29 16:11:09 -0400 | [diff] [blame] | 1023 | * more fresh packets with a good likelihood that they will be dropped too. |
Mohsin Kazmi | e7cde31 | 2018-06-26 17:20:11 +0200 | [diff] [blame] | 1024 | * This technique also gives more time to VM driver to pick-up packets. |
| 1025 | * In case the traffic flows from physical to virtual interfaces, this |
| 1026 | * technique will end-up leveraging the physical NIC buffer in order to |
| 1027 | * absorb the VM's CPU jitter. |
| 1028 | */ |
| 1029 | if (n_left && (error == VHOST_USER_TX_FUNC_ERROR_PKT_DROP_NOBUF) && retry) |
| 1030 | { |
| 1031 | retry--; |
| 1032 | goto retry; |
| 1033 | } |
| 1034 | |
| 1035 | /* interrupt (call) handling */ |
| 1036 | if ((rxvq->callfd_idx != ~0) && |
| 1037 | !(rxvq->avail->flags & VRING_AVAIL_F_NO_INTERRUPT)) |
| 1038 | { |
| 1039 | rxvq->n_since_last_int += frame->n_vectors - n_left; |
| 1040 | |
| 1041 | if (rxvq->n_since_last_int > vum->coalesce_frames) |
| 1042 | vhost_user_send_call (vm, rxvq); |
| 1043 | } |
| 1044 | |
| 1045 | vhost_user_vring_unlock (vui, qid); |
| 1046 | |
| 1047 | done3: |
| 1048 | if (PREDICT_FALSE (n_left && error != VHOST_USER_TX_FUNC_ERROR_NONE)) |
| 1049 | { |
| 1050 | vlib_error_count (vm, node->node_index, error, n_left); |
| 1051 | vlib_increment_simple_counter |
| 1052 | (vnet_main.interface_main.sw_if_counters |
| 1053 | + VNET_INTERFACE_COUNTER_DROP, |
| 1054 | thread_index, vui->sw_if_index, n_left); |
| 1055 | } |
| 1056 | |
Damjan Marion | a3d5986 | 2018-11-10 10:23:00 +0100 | [diff] [blame] | 1057 | vlib_buffer_free (vm, vlib_frame_vector_args (frame), frame->n_vectors); |
Mohsin Kazmi | e7cde31 | 2018-06-26 17:20:11 +0200 | [diff] [blame] | 1058 | return frame->n_vectors; |
| 1059 | } |
| 1060 | |
| 1061 | static __clib_unused clib_error_t * |
| 1062 | vhost_user_interface_rx_mode_change (vnet_main_t * vnm, u32 hw_if_index, |
| 1063 | u32 qid, vnet_hw_interface_rx_mode mode) |
| 1064 | { |
| 1065 | vlib_main_t *vm = vnm->vlib_main; |
| 1066 | vnet_hw_interface_t *hif = vnet_get_hw_interface (vnm, hw_if_index); |
| 1067 | vhost_user_main_t *vum = &vhost_user_main; |
| 1068 | vhost_user_intf_t *vui = |
| 1069 | pool_elt_at_index (vum->vhost_user_interfaces, hif->dev_instance); |
| 1070 | vhost_user_vring_t *txvq = &vui->vrings[VHOST_VRING_IDX_TX (qid)]; |
| 1071 | |
| 1072 | if ((mode == VNET_HW_INTERFACE_RX_MODE_INTERRUPT) || |
| 1073 | (mode == VNET_HW_INTERFACE_RX_MODE_ADAPTIVE)) |
| 1074 | { |
| 1075 | if (txvq->kickfd_idx == ~0) |
| 1076 | { |
| 1077 | // We cannot support interrupt mode if the driver opts out |
| 1078 | return clib_error_return (0, "Driver does not support interrupt"); |
| 1079 | } |
| 1080 | if (txvq->mode == VNET_HW_INTERFACE_RX_MODE_POLLING) |
| 1081 | { |
| 1082 | vum->ifq_count++; |
| 1083 | // Start the timer if this is the first encounter on interrupt |
| 1084 | // interface/queue |
| 1085 | if ((vum->ifq_count == 1) && |
| 1086 | (vum->coalesce_time > 0.0) && (vum->coalesce_frames > 0)) |
| 1087 | vlib_process_signal_event (vm, |
| 1088 | vhost_user_send_interrupt_node.index, |
| 1089 | VHOST_USER_EVENT_START_TIMER, 0); |
| 1090 | } |
| 1091 | } |
| 1092 | else if (mode == VNET_HW_INTERFACE_RX_MODE_POLLING) |
| 1093 | { |
| 1094 | if (((txvq->mode == VNET_HW_INTERFACE_RX_MODE_INTERRUPT) || |
| 1095 | (txvq->mode == VNET_HW_INTERFACE_RX_MODE_ADAPTIVE)) && |
| 1096 | vum->ifq_count) |
| 1097 | { |
| 1098 | vum->ifq_count--; |
| 1099 | // Stop the timer if there is no more interrupt interface/queue |
| 1100 | if ((vum->ifq_count == 0) && |
| 1101 | (vum->coalesce_time > 0.0) && (vum->coalesce_frames > 0)) |
| 1102 | vlib_process_signal_event (vm, |
| 1103 | vhost_user_send_interrupt_node.index, |
| 1104 | VHOST_USER_EVENT_STOP_TIMER, 0); |
| 1105 | } |
| 1106 | } |
| 1107 | |
| 1108 | txvq->mode = mode; |
| 1109 | if (mode == VNET_HW_INTERFACE_RX_MODE_POLLING) |
| 1110 | txvq->used->flags = VRING_USED_F_NO_NOTIFY; |
| 1111 | else if ((mode == VNET_HW_INTERFACE_RX_MODE_ADAPTIVE) || |
| 1112 | (mode == VNET_HW_INTERFACE_RX_MODE_INTERRUPT)) |
| 1113 | txvq->used->flags = 0; |
| 1114 | else |
| 1115 | { |
Jerome Tollet | 2f54c27 | 2018-10-02 11:41:11 +0200 | [diff] [blame] | 1116 | vu_log_err (vui, "unhandled mode %d changed for if %d queue %d", mode, |
| 1117 | hw_if_index, qid); |
Mohsin Kazmi | e7cde31 | 2018-06-26 17:20:11 +0200 | [diff] [blame] | 1118 | return clib_error_return (0, "unsupported"); |
| 1119 | } |
| 1120 | |
| 1121 | return 0; |
| 1122 | } |
| 1123 | |
| 1124 | static __clib_unused clib_error_t * |
| 1125 | vhost_user_interface_admin_up_down (vnet_main_t * vnm, u32 hw_if_index, |
| 1126 | u32 flags) |
| 1127 | { |
| 1128 | vnet_hw_interface_t *hif = vnet_get_hw_interface (vnm, hw_if_index); |
| 1129 | vhost_user_main_t *vum = &vhost_user_main; |
| 1130 | vhost_user_intf_t *vui = |
| 1131 | pool_elt_at_index (vum->vhost_user_interfaces, hif->dev_instance); |
Juraj Sloboda | b192feb | 2018-10-01 12:42:07 +0200 | [diff] [blame] | 1132 | u8 link_old, link_new; |
Mohsin Kazmi | e7cde31 | 2018-06-26 17:20:11 +0200 | [diff] [blame] | 1133 | |
Juraj Sloboda | b192feb | 2018-10-01 12:42:07 +0200 | [diff] [blame] | 1134 | link_old = vui_is_link_up (vui); |
| 1135 | |
| 1136 | vui->admin_up = (flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP) != 0; |
| 1137 | |
| 1138 | link_new = vui_is_link_up (vui); |
| 1139 | |
| 1140 | if (link_old != link_new) |
| 1141 | vnet_hw_interface_set_flags (vnm, vui->hw_if_index, link_new ? |
| 1142 | VNET_HW_INTERFACE_FLAG_LINK_UP : 0); |
Mohsin Kazmi | e7cde31 | 2018-06-26 17:20:11 +0200 | [diff] [blame] | 1143 | |
| 1144 | return /* no error */ 0; |
| 1145 | } |
| 1146 | |
Mohsin Kazmi | e7cde31 | 2018-06-26 17:20:11 +0200 | [diff] [blame] | 1147 | /* *INDENT-OFF* */ |
| 1148 | VNET_DEVICE_CLASS (vhost_user_device_class) = { |
| 1149 | .name = "vhost-user", |
Mohsin Kazmi | e7cde31 | 2018-06-26 17:20:11 +0200 | [diff] [blame] | 1150 | .tx_function_n_errors = VHOST_USER_TX_FUNC_N_ERROR, |
| 1151 | .tx_function_error_strings = vhost_user_tx_func_error_strings, |
| 1152 | .format_device_name = format_vhost_user_interface_name, |
| 1153 | .name_renumber = vhost_user_name_renumber, |
| 1154 | .admin_up_down_function = vhost_user_interface_admin_up_down, |
| 1155 | .rx_mode_change_function = vhost_user_interface_rx_mode_change, |
| 1156 | .format_tx_trace = format_vhost_trace, |
| 1157 | }; |
| 1158 | |
Mohsin Kazmi | e7cde31 | 2018-06-26 17:20:11 +0200 | [diff] [blame] | 1159 | /* *INDENT-ON* */ |
| 1160 | |
| 1161 | /* |
| 1162 | * fd.io coding-style-patch-verification: ON |
| 1163 | * |
| 1164 | * Local Variables: |
| 1165 | * eval: (c-set-style "gnu") |
| 1166 | * End: |
| 1167 | */ |