vppinfra: more vector inlines

Type: improvement
Change-Id: Ie0de374b89ec3a17befecf3f08e94951597609ec
Signed-off-by: Damjan Marion <damarion@cisco.com>
diff --git a/src/vppinfra/vector_avx2.h b/src/vppinfra/vector_avx2.h
index 8cc1d77..0511ec7 100644
--- a/src/vppinfra/vector_avx2.h
+++ b/src/vppinfra/vector_avx2.h
@@ -162,6 +162,23 @@
   return (u16x16) _mm256_shuffle_epi8 ((__m256i) v, (__m256i) swap);
 }
 
+static_always_inline u8x32
+u8x32_shuffle (u8x32 v, u8x32 m)
+{
+  return (u8x32) _mm256_shuffle_epi8 ((__m256i) v, (__m256i) m);
+}
+
+#define u8x32_align_right(a, b, imm) \
+  (u8x32) _mm256_alignr_epi8 ((__m256i) a, (__m256i) b, imm)
+
+static_always_inline u32
+u32x8_sum_elts (u32x8 sum8)
+{
+  sum8 += (u32x8) u8x32_align_right (sum8, sum8, 8);
+  sum8 += (u32x8) u8x32_align_right (sum8, sum8, 4);
+  return sum8[0] + sum8[4];
+}
+
 static_always_inline u32x8
 u32x8_hadd (u32x8 v1, u32x8 v2)
 {
@@ -196,6 +213,14 @@
   return v & masks[16 - n_last];
 }
 
+#ifdef __AVX512F__
+static_always_inline u8x32
+u8x32_mask_load (u8x32 a, void *p, u32 mask)
+{
+  return (u8x32) _mm256_mask_loadu_epi8 ((__m256i) a, mask, p);
+}
+#endif
+
 static_always_inline f32x8
 f32x8_from_u32x8 (u32x8 v)
 {
diff --git a/src/vppinfra/vector_avx512.h b/src/vppinfra/vector_avx512.h
index 8a82650..6eb7c5e 100644
--- a/src/vppinfra/vector_avx512.h
+++ b/src/vppinfra/vector_avx512.h
@@ -192,6 +192,25 @@
 }
 
 static_always_inline u8x64
+u8x64_shuffle (u8x64 v, u8x64 m)
+{
+  return (u8x64) _mm512_shuffle_epi8 ((__m512i) v, (__m512i) m);
+}
+
+#define u8x64_align_right(a, b, imm) \
+  (u8x64) _mm512_alignr_epi8 ((__m512i) a, (__m512i) b, imm)
+
+static_always_inline u32
+u32x16_sum_elts (u32x16 sum16)
+{
+  u32x8 sum8;
+  sum16 += (u32x16) u8x64_align_right (sum16, sum16, 8);
+  sum16 += (u32x16) u8x64_align_right (sum16, sum16, 4);
+  sum8 = u32x16_extract_hi (sum16) + u32x16_extract_lo (sum16);
+  return sum8[0] + sum8[4];
+}
+
+static_always_inline u8x64
 u8x64_mask_load (u8x64 a, void *p, u64 mask)
 {
   return (u8x64) _mm512_mask_loadu_epi8 ((__m512i) a, mask, p);
diff --git a/src/vppinfra/vector_sse42.h b/src/vppinfra/vector_sse42.h
index 918ded3..8c28dd7 100644
--- a/src/vppinfra/vector_sse42.h
+++ b/src/vppinfra/vector_sse42.h
@@ -650,6 +650,14 @@
   return (u32x4) _mm_hadd_epi32 ((__m128i) v1, (__m128i) v2);
 }
 
+static_always_inline u32 __clib_unused
+u32x4_sum_elts (u32x4 sum4)
+{
+  sum4 += (u32x4) u8x16_align_right (sum4, sum4, 8);
+  sum4 += (u32x4) u8x16_align_right (sum4, sum4, 4);
+  return sum4[0];
+}
+
 static_always_inline u8x16
 u8x16_shuffle (u8x16 v, u8x16 m)
 {
@@ -756,6 +764,14 @@
   return a ^ b ^ c;
 }
 
+#ifdef __AVX512F__
+static_always_inline u8x16
+u8x16_mask_load (u8x16 a, void *p, u16 mask)
+{
+  return (u8x16) _mm_mask_loadu_epi8 ((__m128i) a, mask, p);
+}
+#endif
+
 #endif /* included_vector_sse2_h */
 
 /*