vppinfra: add 128-bit and 512-bit a ^ b ^ c shortcut
This allows us to combine 2 XOR operations into signle instruction
which makes difference in crypto op:
- in x86, by using ternary logic instruction
- on ARM, by using EOR3 instruction (available with sha3 feature)
Type: refactor
Change-Id: Ibdf9001840399d2f838d491ca81b57cbd8430433
Signed-off-by: Damjan Marion <damjan.marion@gmail.com>
diff --git a/src/plugins/crypto_native/ghash.h b/src/plugins/crypto_native/ghash.h
index 1ee1a99..a2886a4 100644
--- a/src/plugins/crypto_native/ghash.h
+++ b/src/plugins/crypto_native/ghash.h
@@ -105,18 +105,6 @@
#ifndef __ghash_h__
#define __ghash_h__
-/* on AVX-512 systems we can save a clock cycle by using ternary logic
- instruction to calculate a XOR b XOR c */
-static_always_inline u8x16
-ghash_xor3 (u8x16 a, u8x16 b, u8x16 c)
-{
-#if defined (__AVX512F__)
- return (u8x16) _mm_ternarylogic_epi32 ((__m128i) a, (__m128i) b,
- (__m128i) c, 0x96);
-#endif
- return a ^ b ^ c;
-}
-
static_always_inline u8x16
gmul_lo_lo (u8x16 a, u8x16 b)
{
@@ -204,8 +192,8 @@
if (gd->pending)
{
/* there is peding data from previous invocation so we can XOR */
- gd->hi = ghash_xor3 (gd->hi, gd->tmp_hi, hi);
- gd->lo = ghash_xor3 (gd->lo, gd->tmp_lo, lo);
+ gd->hi = u8x16_xor3 (gd->hi, gd->tmp_hi, hi);
+ gd->lo = u8x16_xor3 (gd->lo, gd->tmp_lo, lo);
gd->pending = 0;
}
else
@@ -217,7 +205,7 @@
}
/* gd->mid ^= a0 * b1 ^ a1 * b0 */
- gd->mid = ghash_xor3 (gd->mid, gmul_hi_lo (a, b), gmul_lo_hi (a, b));
+ gd->mid = u8x16_xor3 (gd->mid, gmul_hi_lo (a, b), gmul_lo_hi (a, b));
}
static_always_inline void
@@ -233,8 +221,8 @@
if (gd->pending)
{
- gd->lo = ghash_xor3 (gd->lo, gd->tmp_lo, midl);
- gd->hi = ghash_xor3 (gd->hi, gd->tmp_hi, midr);
+ gd->lo = u8x16_xor3 (gd->lo, gd->tmp_lo, midl);
+ gd->hi = u8x16_xor3 (gd->hi, gd->tmp_hi, midr);
}
else
{
@@ -255,7 +243,7 @@
static_always_inline u8x16
ghash_final (ghash_data_t * gd)
{
- return ghash_xor3 (gd->hi, u8x16_word_shift_right (gd->tmp_lo, 4),
+ return u8x16_xor3 (gd->hi, u8x16_word_shift_right (gd->tmp_lo, 4),
u8x16_word_shift_left (gd->tmp_hi, 4));
}
diff --git a/src/vppinfra/vector_avx512.h b/src/vppinfra/vector_avx512.h
index c54d8cd..29d96f8 100644
--- a/src/vppinfra/vector_avx512.h
+++ b/src/vppinfra/vector_avx512.h
@@ -143,6 +143,13 @@
#define u32x16_ternary_logic(a, b, c, d) \
(u32x16) _mm512_ternarylogic_epi32 ((__m512i) a, (__m512i) b, (__m512i) c, d)
+static_always_inline u8x64
+u8x64_xor3 (u8x64 a, u8x64 b, u8x64 c)
+{
+ return (u8x64) _mm512_ternarylogic_epi32 ((__m512i) a, (__m512i) b,
+ (__m512i) c, 0x96);
+}
+
static_always_inline void
u32x16_transpose (u32x16 m[16])
{
diff --git a/src/vppinfra/vector_neon.h b/src/vppinfra/vector_neon.h
index 81d99a6..3855f55 100644
--- a/src/vppinfra/vector_neon.h
+++ b/src/vppinfra/vector_neon.h
@@ -203,6 +203,17 @@
return (u8x16) vqtbl1q_u8 (v, mask);
}
+static_always_inline u8x16
+u8x16_xor3 (u8x16 a, u8x16 b, u8x16 c)
+{
+#if __GNUC__ == 8 && __ARM_FEATURE_SHA3 == 1
+ u8x16 r;
+__asm__ ("eor3 %0.16b,%1.16b,%2.16b,%3.16b": "=w" (r): "0" (a), "w" (b), "w" (c):);
+ return r;
+#endif
+ return a ^ b ^ c;
+}
+
#define CLIB_HAVE_VEC128_MSB_MASK
#define CLIB_HAVE_VEC128_UNALIGNED_LOAD_STORE
diff --git a/src/vppinfra/vector_sse42.h b/src/vppinfra/vector_sse42.h
index c22e86e..e75580e 100644
--- a/src/vppinfra/vector_sse42.h
+++ b/src/vppinfra/vector_sse42.h
@@ -746,6 +746,15 @@
return (u8x16) _mm_blendv_epi8 ((__m128i) v1, (__m128i) v2, (__m128i) mask);
}
+static_always_inline u8x16
+u8x16_xor3 (u8x16 a, u8x16 b, u8x16 c)
+{
+#if __AVX512F__
+ return (u8x16) _mm_ternarylogic_epi32 ((__m128i) a, (__m128i) b,
+ (__m128i) c, 0x96);
+#endif
+ return a ^ b ^ c;
+}
#endif /* included_vector_sse2_h */