devices: af-packet gso mtu
Type: fix
Set the GSO flag when buffer length exceeds the
linux mtu. Don't listen for mtu changes on linux
side for now.
This also fixes a TX issue, as we only search for
valid frames on tx to the extent of n_left, we might
stay stuck.
Change-Id: Idf0bdd88990254a614962c2f7bc3e0292ccfd61a
Signed-off-by: Nathan Skrzypczak <nathan.skrzypczak@gmail.com>
diff --git a/src/vnet/devices/af_packet/device.c b/src/vnet/devices/af_packet/device.c
index 2886fb5..b6b99a0 100644
--- a/src/vnet/devices/af_packet/device.c
+++ b/src/vnet/devices/af_packet/device.c
@@ -67,7 +67,48 @@
static u8 *
format_af_packet_device (u8 * s, va_list * args)
{
- s = format (s, "Linux PACKET socket interface");
+ u32 dev_instance = va_arg (*args, u32);
+ u32 indent = format_get_indent (s);
+ int __clib_unused verbose = va_arg (*args, int);
+
+ af_packet_main_t *apm = &af_packet_main;
+ af_packet_if_t *apif = pool_elt_at_index (apm->interfaces, dev_instance);
+ clib_spinlock_lock_if_init (&apif->lockp);
+ u32 block_size = apif->tx_req->tp_block_size;
+ u32 frame_size = apif->tx_req->tp_frame_size;
+ u32 frame_num = apif->tx_req->tp_frame_nr;
+ int block = 0;
+ u8 *block_start = apif->tx_ring + block * block_size;
+ u32 tx_frame = apif->next_tx_frame;
+ struct tpacket2_hdr *tph;
+
+ s = format (s, "Linux PACKET socket interface\n");
+ s = format (s, "%Ublock:%d frame:%d\n", format_white_space, indent,
+ block_size, frame_size);
+ s = format (s, "%Unext frame:%d\n", format_white_space, indent,
+ apif->next_tx_frame);
+
+ int n_send_req = 0, n_avail = 0, n_sending = 0, n_tot = 0, n_wrong = 0;
+ do
+ {
+ tph = (struct tpacket2_hdr *) (block_start + tx_frame * frame_size);
+ tx_frame = (tx_frame + 1) % frame_num;
+ if (tph->tp_status == 0)
+ n_avail++;
+ else if (tph->tp_status & TP_STATUS_SEND_REQUEST)
+ n_send_req++;
+ else if (tph->tp_status & TP_STATUS_SENDING)
+ n_sending++;
+ else
+ n_wrong++;
+ n_tot++;
+ }
+ while (tx_frame != apif->next_tx_frame);
+ s = format (s, "%Uavailable:%d request:%d sending:%d wrong:%d total:%d\n",
+ format_white_space, indent, n_avail, n_send_req, n_sending,
+ n_wrong, n_tot);
+
+ clib_spinlock_unlock_if_init (&apif->lockp);
return s;
}
@@ -99,7 +140,7 @@
struct tpacket2_hdr *tph;
u32 frame_not_ready = 0;
- while (n_left > 0)
+ while (n_left)
{
u32 len;
u32 offset = 0;
@@ -108,13 +149,17 @@
u32 bi = buffers[0];
buffers++;
+ nextframe:
tph = (struct tpacket2_hdr *) (block_start + tx_frame * frame_size);
-
- if (PREDICT_FALSE
- (tph->tp_status & (TP_STATUS_SEND_REQUEST | TP_STATUS_SENDING)))
+ if (PREDICT_FALSE (tph->tp_status &
+ (TP_STATUS_SEND_REQUEST | TP_STATUS_SENDING)))
{
+ tx_frame = (tx_frame + 1) % frame_num;
frame_not_ready++;
- goto next;
+ /* check if we've exhausted the ring */
+ if (PREDICT_FALSE (frame_not_ready + n_sent == frame_num))
+ break;
+ goto nextframe;
}
do
@@ -132,7 +177,7 @@
tph->tp_len = tph->tp_snaplen = offset;
tph->tp_status = TP_STATUS_SEND_REQUEST;
n_sent++;
- next:
+
tx_frame = (tx_frame + 1) % frame_num;
/* check if we've exhausted the ring */
@@ -142,23 +187,22 @@
CLIB_MEMORY_BARRIER ();
- if (PREDICT_TRUE (n_sent))
- {
- apif->next_tx_frame = tx_frame;
+ apif->next_tx_frame = tx_frame;
- if (PREDICT_FALSE (sendto (apif->fd, NULL, 0,
- MSG_DONTWAIT, NULL, 0) == -1))
- {
- /* Uh-oh, drop & move on, but count whether it was fatal or not.
- * Note that we have no reliable way to properly determine the
- * disposition of the packets we just enqueued for delivery.
- */
- vlib_error_count (vm, node->node_index,
- unix_error_is_fatal (errno) ?
+ if (PREDICT_TRUE (n_sent))
+ if (PREDICT_FALSE (sendto (apif->fd, NULL, 0, MSG_DONTWAIT, NULL, 0) ==
+ -1))
+ {
+ /* Uh-oh, drop & move on, but count whether it was fatal or not.
+ * Note that we have no reliable way to properly determine the
+ * disposition of the packets we just enqueued for delivery.
+ */
+ vlib_error_count (vm, node->node_index,
+ unix_error_is_fatal (errno) ?
AF_PACKET_TX_ERROR_TXRING_FATAL :
- AF_PACKET_TX_ERROR_TXRING_EAGAIN, n_sent);
- }
- }
+ AF_PACKET_TX_ERROR_TXRING_EAGAIN,
+ n_sent);
+ }
clib_spinlock_unlock_if_init (&apif->lockp);