avf: use write combining store for queues tail update

Performance improvement: on supported platforms, currently only Intel Tremont,
use a write combining store to update the tail pointers.
Also, Tremont node variant is added for all.

Type: improvement

Signed-off-by: Radu Nicolau <radu.nicolau@intel.com>
Change-Id: Ie9606e403b7d9655184f778e3ffee3027c8c9edd
diff --git a/src/vppinfra/cpu.h b/src/vppinfra/cpu.h
index 750b393..6a81218 100644
--- a/src/vppinfra/cpu.h
+++ b/src/vppinfra/cpu.h
@@ -114,29 +114,30 @@
 clib_march_fn_registration *fn##_march_fn_registrations = 0; \
 _CLIB_MARCH_FN_REGISTRATION(fn)
 #endif
-#define foreach_x86_64_flags \
-_ (sse3,     1, ecx, 0)   \
-_ (pclmulqdq, 1, ecx, 1)  \
-_ (ssse3,    1, ecx, 9)   \
-_ (sse41,    1, ecx, 19)  \
-_ (sse42,    1, ecx, 20)  \
-_ (avx,      1, ecx, 28)  \
-_ (rdrand,   1, ecx, 30)  \
-_ (avx2,     7, ebx, 5)   \
-_ (rtm,      7, ebx, 11)  \
-_ (pqm,      7, ebx, 12)  \
-_ (pqe,      7, ebx, 15)  \
-_ (avx512f,  7, ebx, 16)  \
-_ (rdseed,   7, ebx, 18)  \
-_ (x86_aes,  1, ecx, 25)  \
-_ (sha,      7, ebx, 29)  \
-_ (vaes,     7, ecx, 9)   \
-_ (vpclmulqdq, 7, ecx, 10)   \
-_ (avx512_vnni, 7, ecx, 11)   \
-_ (avx512_bitalg, 7, ecx, 12)   \
-_ (avx512_vpopcntdq, 7, ecx, 14)   \
-_ (invariant_tsc, 0x80000007, edx, 8)
-
+#define foreach_x86_64_flags                                                  \
+  _ (sse3, 1, ecx, 0)                                                         \
+  _ (pclmulqdq, 1, ecx, 1)                                                    \
+  _ (ssse3, 1, ecx, 9)                                                        \
+  _ (sse41, 1, ecx, 19)                                                       \
+  _ (sse42, 1, ecx, 20)                                                       \
+  _ (avx, 1, ecx, 28)                                                         \
+  _ (rdrand, 1, ecx, 30)                                                      \
+  _ (avx2, 7, ebx, 5)                                                         \
+  _ (rtm, 7, ebx, 11)                                                         \
+  _ (pqm, 7, ebx, 12)                                                         \
+  _ (pqe, 7, ebx, 15)                                                         \
+  _ (avx512f, 7, ebx, 16)                                                     \
+  _ (rdseed, 7, ebx, 18)                                                      \
+  _ (x86_aes, 1, ecx, 25)                                                     \
+  _ (sha, 7, ebx, 29)                                                         \
+  _ (vaes, 7, ecx, 9)                                                         \
+  _ (vpclmulqdq, 7, ecx, 10)                                                  \
+  _ (avx512_vnni, 7, ecx, 11)                                                 \
+  _ (avx512_bitalg, 7, ecx, 12)                                               \
+  _ (avx512_vpopcntdq, 7, ecx, 14)                                            \
+  _ (movdiri, 7, ecx, 27)                                                     \
+  _ (movdir64b, 7, ecx, 28)                                                   \
+  _ (invariant_tsc, 0x80000007, edx, 8)
 
 #define foreach_aarch64_flags \
 _ (fp,          0) \
@@ -264,6 +265,14 @@
 }
 
 static inline int
+clib_cpu_march_priority_trm ()
+{
+  if (clib_cpu_supports_movdiri ())
+    return 60;
+  return -1;
+}
+
+static inline int
 clib_cpu_march_priority_hsw ()
 {
   if (clib_cpu_supports_avx2 ())