virtio: support virtio 1.1 packed ring in vhost
virtio 1.1 defines a number of new features. Packed ring is among the most
notable and important one. It combines used, available, and descripptor rings
into one.
This patch provides experimental support for packed ring. To avoid
regression, when packed ring is configured for the interface, it is branched
to a separate RX and TX driver. Non packed ring should continue to perform
as it was before.
Packed ring is tested using qemu4.2 and ubuntu focal fossa (kernel 5.4.0-12)
on the guess VM which supports packed ring.
To configure VPP with packed ring, just add the optional keyword "packed"
when creating the vhost interface. To bring up the guest VM with packed ring,
add "packed=on" in the qemu launch command.
To facilitate troubleshooting, also added "verbose" option in
show vhost desc CLI to include displaying the indirect descriptors.
Known qemu reconnect issue -
If VPP is restarted, guest VMs also need to be restarted. The problem
is kernel virtio-net-pci keeps track of the previous available and used
indices. For virtio 1.0, these indices are in shared memory and qemu can
easily copy them to pass to the backend for reconnect. For virio 1.1, these
indices are no longer in shared memory. Qemu needs a new mechanism to retrieve
them and it is not currently implemented. So when the protocol reconnects,
qemu does not have the correct available and used indices to pass to the
backend. As a result, after the reconnect, virtio-net-pci is reading the TX
ring from the wrong position in the ring, not the same position which the
backend is writing. Similar problem exists also in the RX.
Type: feature
Signed-off-by: Steven Luong <sluong@cisco.com>
Change-Id: I5afc50b0bafab5a1de7a6dd10f399db3fafd144c
diff --git a/src/vnet/devices/virtio/vhost_user_inline.h b/src/vnet/devices/virtio/vhost_user_inline.h
index e4a1d59..ceaf78c 100644
--- a/src/vnet/devices/virtio/vhost_user_inline.h
+++ b/src/vnet/devices/virtio/vhost_user_inline.h
@@ -292,6 +292,85 @@
}
}
}
+
+static_always_inline u8
+vhost_user_packed_desc_available (vhost_user_vring_t * vring, u16 idx)
+{
+ return (((vring->packed_desc[idx].flags & VIRTQ_DESC_F_AVAIL) ==
+ vring->avail_wrap_counter));
+}
+
+static_always_inline void
+vhost_user_advance_last_avail_idx (vhost_user_vring_t * vring)
+{
+ vring->last_avail_idx++;
+ if (PREDICT_FALSE ((vring->last_avail_idx & vring->qsz_mask) == 0))
+ vring->avail_wrap_counter ^= VIRTQ_DESC_F_AVAIL;
+}
+
+static_always_inline void
+vhost_user_advance_last_avail_table_idx (vhost_user_intf_t * vui,
+ vhost_user_vring_t * vring,
+ u8 chained)
+{
+ if (chained)
+ {
+ vring_packed_desc_t *desc_table = vring->packed_desc;
+
+ /* pick up the slot of the next avail idx */
+ while (desc_table[vring->last_avail_idx & vring->qsz_mask].flags &
+ VIRTQ_DESC_F_NEXT)
+ vhost_user_advance_last_avail_idx (vring);
+ }
+
+ vhost_user_advance_last_avail_idx (vring);
+}
+
+static_always_inline void
+vhost_user_undo_advanced_last_avail_idx (vhost_user_vring_t * vring)
+{
+ if (PREDICT_FALSE ((vring->last_avail_idx & vring->qsz_mask) == 0))
+ vring->avail_wrap_counter ^= VIRTQ_DESC_F_AVAIL;
+ vring->last_avail_idx--;
+}
+
+static_always_inline void
+vhost_user_dequeue_descs (vhost_user_vring_t * rxvq,
+ virtio_net_hdr_mrg_rxbuf_t * hdr,
+ u16 * n_descs_processed)
+{
+ u16 i;
+
+ *n_descs_processed -= (hdr->num_buffers - 1);
+ for (i = 0; i < hdr->num_buffers - 1; i++)
+ vhost_user_undo_advanced_last_avail_idx (rxvq);
+}
+
+static_always_inline void
+vhost_user_dequeue_chained_descs (vhost_user_vring_t * rxvq,
+ u16 * n_descs_processed)
+{
+ while (*n_descs_processed)
+ {
+ vhost_user_undo_advanced_last_avail_idx (rxvq);
+ (*n_descs_processed)--;
+ }
+}
+
+static_always_inline void
+vhost_user_advance_last_used_idx (vhost_user_vring_t * vring)
+{
+ vring->last_used_idx++;
+ if (PREDICT_FALSE ((vring->last_used_idx & vring->qsz_mask) == 0))
+ vring->used_wrap_counter ^= 1;
+}
+
+static_always_inline u64
+vhost_user_is_packed_ring_supported (vhost_user_intf_t * vui)
+{
+ return (vui->features & (1ULL << FEAT_VIRTIO_F_RING_PACKED));
+}
+
#endif
/*