dpdk-hqos: don't hold up packets indefinitely under low load
Change-Id: If884637a6db0cb813a40920194795da2e98c8b23
Signed-off-by: David Hotham <david.hotham@metaswitch.com>
diff --git a/vnet/vnet/devices/dpdk/dpdk.h b/vnet/vnet/devices/dpdk/dpdk.h
index e34d4b9..dfbfce5 100644
--- a/vnet/vnet/devices/dpdk/dpdk.h
+++ b/vnet/vnet/devices/dpdk/dpdk.h
@@ -184,6 +184,7 @@
u32 hqos_burst_deq;
u32 pkts_enq_len;
u32 swq_pos;
+ u32 flush_count;
} dpdk_device_hqos_per_hqos_thread_t;
typedef struct
@@ -304,6 +305,10 @@
#define DPDK_HQOS_DBG_BYPASS 0
#endif
+#ifndef HQOS_FLUSH_COUNT_THRESHOLD
+#define HQOS_FLUSH_COUNT_THRESHOLD 100000
+#endif
+
typedef struct dpdk_device_config_hqos_t
{
u32 hqos_thread;
diff --git a/vnet/vnet/devices/dpdk/hqos.c b/vnet/vnet/devices/dpdk/hqos.c
index d05ae09..12bf3fa 100644
--- a/vnet/vnet/devices/dpdk/hqos.c
+++ b/vnet/vnet/devices/dpdk/hqos.c
@@ -351,6 +351,7 @@
vec_validate (xd->hqos_ht->pkts_deq, hqos->burst_deq - 1);
xd->hqos_ht->pkts_enq_len = 0;
xd->hqos_ht->swq_pos = 0;
+ xd->hqos_ht->flush_count = 0;
/* Set up per-thread device data for each worker thread */
for (i = 0; i < worker_thread_count; i++)
@@ -416,6 +417,7 @@
u32 pkts_enq_len = hqos->pkts_enq_len;
u32 swq_pos = hqos->swq_pos;
u32 n_swq = vec_len (hqos->swq), i;
+ u32 flush_count = hqos->flush_count;
for (i = 0; i < n_swq; i++)
{
@@ -446,10 +448,23 @@
rte_pktmbuf_free (pkts_enq[n_pkts]);
pkts_enq_len = 0;
+ flush_count = 0;
break;
}
}
+ if (pkts_enq_len)
+ {
+ flush_count++;
+ if (PREDICT_FALSE (flush_count == HQOS_FLUSH_COUNT_THRESHOLD))
+ {
+ rte_sched_port_enqueue (hqos->hqos, pkts_enq, pkts_enq_len);
+
+ pkts_enq_len = 0;
+ flush_count = 0;
+ }
+ }
hqos->pkts_enq_len = pkts_enq_len;
+ hqos->flush_count = flush_count;
/* Advance to next device */
dev_pos++;
@@ -490,6 +505,7 @@
u32 pkts_enq_len = hqos->pkts_enq_len;
u32 swq_pos = hqos->swq_pos;
u32 n_swq = vec_len (hqos->swq), i;
+ u32 flush_count = hqos->flush_count;
/*
* SWQ dequeue and HQoS enqueue for current device
@@ -517,10 +533,23 @@
rte_sched_port_enqueue (hqos->hqos, pkts_enq, pkts_enq_len);
pkts_enq_len = 0;
+ flush_count = 0;
break;
}
}
+ if (pkts_enq_len)
+ {
+ flush_count++;
+ if (PREDICT_FALSE (flush_count == HQOS_FLUSH_COUNT_THRESHOLD))
+ {
+ rte_sched_port_enqueue (hqos->hqos, pkts_enq, pkts_enq_len);
+
+ pkts_enq_len = 0;
+ flush_count = 0;
+ }
+ }
hqos->pkts_enq_len = pkts_enq_len;
+ hqos->flush_count = flush_count;
/*
* HQoS dequeue and HWQ TX enqueue for current device