blob: 2ce5004a4218052acbd48f6d29c380a5e3cb2bdf [file] [log] [blame]
From 3609c4fb4d07d4285e96187598f54cb21e9e9b08 Mon Sep 17 00:00:00 2001
From: Shesha Sreenivasamurthy <shesha@cisco.com>
Date: Wed, 2 Sep 2015 08:57:24 -0700
Subject: [PATCH 2/9] mbuf: rearrange rte_mbuf metadata to suit vpp
Offload structure in the second cache line, next pointer in the
first cache line. Issue reported to Intel.
---
.../linuxapp/eal/include/exec-env/rte_kni_common.h | 10 +++++++--
lib/librte_mbuf/rte_mbuf.h | 25 ++++++++++++++--------
2 files changed, 24 insertions(+), 11 deletions(-)
diff --git a/lib/librte_eal/linuxapp/eal/include/exec-env/rte_kni_common.h b/lib/librte_eal/linuxapp/eal/include/exec-env/rte_kni_common.h
index e9f38bd..d327f71 100644
--- a/lib/librte_eal/linuxapp/eal/include/exec-env/rte_kni_common.h
+++ b/lib/librte_eal/linuxapp/eal/include/exec-env/rte_kni_common.h
@@ -111,6 +111,10 @@ struct rte_kni_fifo {
* The kernel image of the rte_mbuf struct, with only the relevant fields.
* Padding is necessary to assure the offsets of these fields
*/
+/*
+ * offload in the second cache line, next in the first. Better for vpp
+ * at least as of right now.
+ */
struct rte_kni_mbuf {
void *buf_addr __attribute__((__aligned__(RTE_CACHE_LINE_SIZE)));
char pad0[10];
@@ -121,16 +125,18 @@ struct rte_kni_mbuf {
char pad2[4];
uint32_t pkt_len; /**< Total pkt len: sum of all segment data_len. */
uint16_t data_len; /**< Amount of data in segment buffer. */
+ char pad3[2];
#else
char pad2[2];
uint16_t data_len; /**< Amount of data in segment buffer. */
uint32_t pkt_len; /**< Total pkt len: sum of all segment data_len. */
+ char pad3[4];
#endif
+ void *next;
/* fields on second cache line */
- char pad3[8] __attribute__((__aligned__(RTE_CACHE_LINE_SIZE)));
+ char pad4[12] __attribute__((__aligned__(RTE_CACHE_LINE_SIZE)));
void *pool;
- void *next;
};
/*
diff --git a/lib/librte_mbuf/rte_mbuf.h b/lib/librte_mbuf/rte_mbuf.h
index 8c2db1b..61cbbd7 100644
--- a/lib/librte_mbuf/rte_mbuf.h
+++ b/lib/librte_mbuf/rte_mbuf.h
@@ -743,6 +743,12 @@ typedef uint64_t MARKER64[0]; /**< marker that allows us to overwrite 8 bytes
/**
* The generic rte_mbuf, containing a packet mbuf.
*/
+/*
+ * offload in the second cache line, next in the first. Better for vpp
+ * at least as of right now.
+ * If you change this structure, you must change the user-mode
+ * version in rte_mbuf.h
+ */
struct rte_mbuf {
MARKER cacheline0;
@@ -809,6 +815,16 @@ struct rte_mbuf {
uint16_t vlan_tci; /**< VLAN Tag Control Identifier (CPU order) */
uint16_t vlan_tci_outer; /**< Outer VLAN Tag Control Identifier (CPU order) */
#endif /* RTE_NEXT_ABI */
+ struct rte_mbuf *next; /**< Next segment of scattered packet. */
+
+ uint32_t seqn; /**< Sequence number. See also rte_reorder_insert() */
+#ifdef RTE_NEXT_ABI
+ uint16_t vlan_tci_outer; /**< Outer VLAN Tag Control Identifier (CPU order) */
+#endif /* RTE_NEXT_ABI */
+
+ /* second cache line - fields only used in slow path or on TX */
+ MARKER cacheline1 __rte_cache_aligned;
+
union {
uint32_t rss; /**< RSS hash result if RSS enabled */
struct {
@@ -828,21 +844,12 @@ struct rte_mbuf {
uint32_t usr; /**< User defined tags. See rte_distributor_process() */
} hash; /**< hash information */
- uint32_t seqn; /**< Sequence number. See also rte_reorder_insert() */
-#ifdef RTE_NEXT_ABI
- uint16_t vlan_tci_outer; /**< Outer VLAN Tag Control Identifier (CPU order) */
-#endif /* RTE_NEXT_ABI */
-
- /* second cache line - fields only used in slow path or on TX */
- MARKER cacheline1 __rte_cache_aligned;
-
union {
void *userdata; /**< Can be used for external metadata */
uint64_t udata64; /**< Allow 8-byte userdata on 32-bit */
};
struct rte_mempool *pool; /**< Pool from which mbuf was allocated. */
- struct rte_mbuf *next; /**< Next segment of scattered packet. */
/* fields to support TX offloads */
union {
--
2.5.0