blob: 96c78e4c30fc872e97586363caa18f9b44b35b02 [file] [log] [blame]
Damjan Marionc5766222018-04-16 00:18:34 +02001/*
2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16#ifndef included_vector_avx512_h
17#define included_vector_avx512_h
18
19#include <vppinfra/clib.h>
20#include <x86intrin.h>
21
Damjan Mariona52e1662018-05-19 00:04:23 +020022/* *INDENT-OFF* */
Damjan Marionc5766222018-04-16 00:18:34 +020023#define foreach_avx512_vec512i \
24 _(i,8,64,epi8) _(i,16,32,epi16) _(i,32,16,epi32) _(i,64,8,epi64)
25#define foreach_avx512_vec512u \
26 _(u,8,64,epi8) _(u,16,32,epi16) _(u,32,16,epi32) _(u,64,8,epi64)
27#define foreach_avx512_vec512f \
28 _(f,32,8,ps) _(f,64,4,pd)
29
Damjan Mariona52e1662018-05-19 00:04:23 +020030/* splat, load_unaligned, store_unaligned, is_all_zero, is_equal,
Damjan Marion1cf9a162018-05-23 20:21:51 +020031 is_all_equal, is_zero_mask */
BenoƮt Ganneb46334b2022-03-21 15:02:21 +010032#define _(t, s, c, i) \
33 static_always_inline t##s##x##c t##s##x##c##_splat (t##s x) \
34 { \
35 return (t##s##x##c) _mm512_set1_##i (x); \
36 } \
37 \
38 static_always_inline t##s##x##c t##s##x##c##_load_aligned (void *p) \
39 { \
40 return (t##s##x##c) _mm512_load_si512 (p); \
41 } \
42 \
43 static_always_inline void t##s##x##c##_store_aligned (t##s##x##c v, \
44 void *p) \
45 { \
46 _mm512_store_si512 ((__m512i *) p, (__m512i) v); \
47 } \
48 \
49 static_always_inline t##s##x##c t##s##x##c##_load_unaligned (void *p) \
50 { \
51 return (t##s##x##c) _mm512_loadu_si512 (p); \
52 } \
53 \
54 static_always_inline void t##s##x##c##_store_unaligned (t##s##x##c v, \
55 void *p) \
56 { \
57 _mm512_storeu_si512 ((__m512i *) p, (__m512i) v); \
58 } \
59 \
60 static_always_inline int t##s##x##c##_is_all_zero (t##s##x##c v) \
61 { \
62 return (_mm512_test_epi64_mask ((__m512i) v, (__m512i) v) == 0); \
63 } \
64 \
65 static_always_inline int t##s##x##c##_is_equal (t##s##x##c a, t##s##x##c b) \
66 { \
67 return (_mm512_cmpneq_epi64_mask ((__m512i) a, (__m512i) b) == 0); \
68 } \
69 \
70 static_always_inline int t##s##x##c##_is_all_equal (t##s##x##c v, t##s x) \
71 { \
72 return t##s##x##c##_is_equal (v, t##s##x##c##_splat (x)); \
73 } \
74 \
75 static_always_inline u##c t##s##x##c##_is_zero_mask (t##s##x##c v) \
76 { \
77 return _mm512_test_##i##_mask ((__m512i) v, (__m512i) v); \
78 } \
79 \
80 static_always_inline t##s##x##c t##s##x##c##_interleave_lo (t##s##x##c a, \
81 t##s##x##c b) \
82 { \
83 return (t##s##x##c) _mm512_unpacklo_##i ((__m512i) a, (__m512i) b); \
84 } \
85 \
86 static_always_inline t##s##x##c t##s##x##c##_interleave_hi (t##s##x##c a, \
87 t##s##x##c b) \
88 { \
89 return (t##s##x##c) _mm512_unpackhi_##i ((__m512i) a, (__m512i) b); \
90 }
Damjan Marionc5766222018-04-16 00:18:34 +020091
92foreach_avx512_vec512i foreach_avx512_vec512u
93#undef _
Damjan Marion8c3f8a22018-05-17 21:12:13 +020094/* *INDENT-ON* */
95
96static_always_inline u32
97u16x32_msb_mask (u16x32 v)
98{
99 return (u32) _mm512_movepi16_mask ((__m512i) v);
100}
101
Damjan Marion7d14aad2021-05-05 19:31:41 +0200102/* 512-bit packs */
103#define _(f, t, fn) \
104 always_inline t t##_pack (f lo, f hi) \
105 { \
106 return (t) fn ((__m512i) lo, (__m512i) hi); \
107 }
108
109_ (i16x32, i8x64, _mm512_packs_epi16)
110_ (i16x32, u8x64, _mm512_packus_epi16)
111_ (i32x16, i16x32, _mm512_packs_epi32)
112_ (i32x16, u16x32, _mm512_packus_epi32)
113#undef _
114
Damjan Marion2e5921b2021-11-28 22:57:15 +0100115static_always_inline u64x8
116u64x8_byte_swap (u64x8 v)
117{
118 u8x64 swap = {
119 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8,
120 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8,
121 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8,
122 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8,
123 };
124 return (u64x8) _mm512_shuffle_epi8 ((__m512i) v, (__m512i) swap);
125}
126
Damjan Marionc899dac2019-04-16 18:41:01 +0200127static_always_inline u32x16
128u32x16_byte_swap (u32x16 v)
129{
130 u8x64 swap = {
131 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12,
132 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12,
133 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12,
134 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12
135 };
136 return (u32x16) _mm512_shuffle_epi8 ((__m512i) v, (__m512i) swap);
137}
138
139static_always_inline u16x32
140u16x32_byte_swap (u16x32 v)
141{
142 u8x64 swap = {
143 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14,
144 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14,
145 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14,
146 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14
147 };
148 return (u16x32) _mm512_shuffle_epi8 ((__m512i) v, (__m512i) swap);
149}
150
Damjan Marion09aeee62021-04-20 21:28:45 +0200151#define _(f, t) \
152 static_always_inline t f##_extract_lo (f v) \
153 { \
154 return (t) _mm512_extracti64x4_epi64 ((__m512i) v, 0); \
155 } \
156 static_always_inline t f##_extract_hi (f v) \
157 { \
158 return (t) _mm512_extracti64x4_epi64 ((__m512i) v, 1); \
159 }
Damjan Marionc899dac2019-04-16 18:41:01 +0200160
Damjan Marion09aeee62021-04-20 21:28:45 +0200161_ (u64x8, u64x4)
162_ (u32x16, u32x8)
163_ (u16x32, u16x16)
164_ (u8x64, u8x32)
165#undef _
Damjan Marion627fb6a2020-02-16 13:07:13 +0100166
Damjan Marionc899dac2019-04-16 18:41:01 +0200167static_always_inline u32
168u32x16_min_scalar (u32x16 v)
169{
170 return u32x8_min_scalar (u32x8_min (u32x16_extract_lo (v),
171 u32x16_extract_hi (v)));
172}
173
Damjan Marion2cd8ad42019-04-17 16:05:54 +0200174static_always_inline u32x16
175u32x16_insert_lo (u32x16 r, u32x8 v)
176{
177 return (u32x16) _mm512_inserti64x4 ((__m512i) r, (__m256i) v, 0);
178}
179
180static_always_inline u32x16
181u32x16_insert_hi (u32x16 r, u32x8 v)
182{
183 return (u32x16) _mm512_inserti64x4 ((__m512i) r, (__m256i) v, 1);
184}
185
186static_always_inline u64x8
187u64x8_permute (u64x8 a, u64x8 b, u64x8 mask)
188{
189 return (u64x8) _mm512_permutex2var_epi64 ((__m512i) a, (__m512i) mask,
190 (__m512i) b);
191}
192
Damjan Marionc899dac2019-04-16 18:41:01 +0200193
194#define u32x16_ternary_logic(a, b, c, d) \
195 (u32x16) _mm512_ternarylogic_epi32 ((__m512i) a, (__m512i) b, (__m512i) c, d)
Damjan Marion4e083162019-04-12 17:44:35 +0200196
Damjan Marione84e9d72020-02-13 13:11:02 +0100197#define u8x64_insert_u8x16(a, b, n) \
198 (u8x64) _mm512_inserti64x2 ((__m512i) (a), (__m128i) (b), n)
199
200#define u8x64_extract_u8x16(a, n) \
201 (u8x16) _mm512_extracti64x2_epi64 ((__m512i) (a), n)
202
Damjan Marion627fb6a2020-02-16 13:07:13 +0100203#define u8x64_word_shift_left(a,n) (u8x64) _mm512_bslli_epi128((__m512i) a, n)
204#define u8x64_word_shift_right(a,n) (u8x64) _mm512_bsrli_epi128((__m512i) a, n)
205
Damjan Marionf75defa2020-02-13 18:14:06 +0100206static_always_inline u8x64
207u8x64_xor3 (u8x64 a, u8x64 b, u8x64 c)
208{
209 return (u8x64) _mm512_ternarylogic_epi32 ((__m512i) a, (__m512i) b,
210 (__m512i) c, 0x96);
211}
212
Damjan Marion2e5921b2021-11-28 22:57:15 +0100213static_always_inline u64x8
214u64x8_xor3 (u64x8 a, u64x8 b, u64x8 c)
215{
216 return (u64x8) _mm512_ternarylogic_epi32 ((__m512i) a, (__m512i) b,
217 (__m512i) c, 0x96);
218}
219
Damjan Marion47d8f5d2020-02-25 11:51:48 +0100220static_always_inline u8x64
221u8x64_reflect_u8x16 (u8x64 x)
222{
223 static const u8x64 mask = {
224 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0,
225 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0,
226 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0,
227 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0,
228 };
229 return (u8x64) _mm512_shuffle_epi8 ((__m512i) x, (__m512i) mask);
230}
231
Damjan Marion94dbf952020-07-15 20:18:39 +0200232#define u8x64_align_right(a, b, imm) \
233 (u8x64) _mm512_alignr_epi8 ((__m512i) a, (__m512i) b, imm)
234
Damjan Marion4dc098f2021-09-22 15:28:29 +0200235#define u64x8_align_right(a, b, imm) \
236 (u64x8) _mm512_alignr_epi64 ((__m512i) a, (__m512i) b, imm)
237
Damjan Marion94dbf952020-07-15 20:18:39 +0200238static_always_inline u32
239u32x16_sum_elts (u32x16 sum16)
240{
241 u32x8 sum8;
242 sum16 += (u32x16) u8x64_align_right (sum16, sum16, 8);
243 sum16 += (u32x16) u8x64_align_right (sum16, sum16, 4);
244 sum8 = u32x16_extract_hi (sum16) + u32x16_extract_lo (sum16);
245 return sum8[0] + sum8[4];
246}
247
Damjan Marione9848312021-04-25 10:51:51 +0200248#define _(t, m, p, i, e) \
249 static_always_inline t t##_mask_load (t a, void *p, m mask) \
250 { \
251 return (t) p##_mask_loadu_##e ((i) a, mask, p); \
252 } \
253 static_always_inline t t##_mask_load_zero (void *p, m mask) \
254 { \
255 return (t) p##_maskz_loadu_##e (mask, p); \
256 } \
257 static_always_inline void t##_mask_store (t a, void *p, m mask) \
258 { \
259 p##_mask_storeu_##e (p, mask, (i) a); \
260 }
Damjan Marion47d8f5d2020-02-25 11:51:48 +0100261
Damjan Marione9848312021-04-25 10:51:51 +0200262_ (u8x64, u64, _mm512, __m512i, epi8)
263_ (u8x32, u32, _mm256, __m256i, epi8)
264_ (u8x16, u16, _mm, __m128i, epi8)
265_ (u16x32, u32, _mm512, __m512i, epi16)
266_ (u16x16, u16, _mm256, __m256i, epi16)
267_ (u16x8, u8, _mm, __m128i, epi16)
268_ (u32x16, u16, _mm512, __m512i, epi32)
269_ (u32x8, u8, _mm256, __m256i, epi32)
270_ (u32x4, u8, _mm, __m128i, epi32)
271_ (u64x8, u8, _mm512, __m512i, epi64)
272_ (u64x4, u8, _mm256, __m256i, epi64)
273_ (u64x2, u8, _mm, __m128i, epi64)
274#undef _
275
Damjan Marion9ab2e5d2021-12-02 11:39:07 +0100276#define _(t, m, p, i, e) \
277 static_always_inline t t##_mask_and (t a, t b, m mask) \
278 { \
279 return (t) p##_mask_and_##e ((i) a, mask, (i) a, (i) b); \
280 } \
281 static_always_inline t t##_mask_andnot (t a, t b, m mask) \
282 { \
283 return (t) p##_mask_andnot_##e ((i) a, mask, (i) a, (i) b); \
284 } \
285 static_always_inline t t##_mask_xor (t a, t b, m mask) \
286 { \
287 return (t) p##_mask_xor_##e ((i) a, mask, (i) a, (i) b); \
288 } \
289 static_always_inline t t##_mask_or (t a, t b, m mask) \
290 { \
291 return (t) p##_mask_or_##e ((i) a, mask, (i) a, (i) b); \
292 }
293_ (u32x16, u16, _mm512, __m512i, epi32)
294_ (u32x8, u8, _mm256, __m256i, epi32)
295_ (u32x4, u8, _mm, __m128i, epi32)
296_ (u64x8, u8, _mm512, __m512i, epi64)
297_ (u64x4, u8, _mm256, __m256i, epi64)
298_ (u64x2, u8, _mm, __m128i, epi64)
299#undef _
300
Damjan Marione9848312021-04-25 10:51:51 +0200301#ifdef CLIB_HAVE_VEC512
302#define CLIB_HAVE_VEC512_MASK_LOAD_STORE
Damjan Marion9ab2e5d2021-12-02 11:39:07 +0100303#define CLIB_HAVE_VEC512_MASK_BITWISE_OPS
Damjan Marione9848312021-04-25 10:51:51 +0200304#endif
305#ifdef CLIB_HAVE_VEC256
306#define CLIB_HAVE_VEC256_MASK_LOAD_STORE
Damjan Marion9ab2e5d2021-12-02 11:39:07 +0100307#define CLIB_HAVE_VEC256_MASK_BITWISE_OPS
Damjan Marione9848312021-04-25 10:51:51 +0200308#endif
309#ifdef CLIB_HAVE_VEC128
310#define CLIB_HAVE_VEC128_MASK_LOAD_STORE
Damjan Marion9ab2e5d2021-12-02 11:39:07 +0100311#define CLIB_HAVE_VEC128_MASK_BITWISE_OPS
Damjan Marione9848312021-04-25 10:51:51 +0200312#endif
Damjan Marion47d8f5d2020-02-25 11:51:48 +0100313
314static_always_inline u8x64
315u8x64_splat_u8x16 (u8x16 a)
316{
317 return (u8x64) _mm512_broadcast_i64x2 ((__m128i) a);
318}
319
320static_always_inline u32x16
321u32x16_splat_u32x4 (u32x4 a)
322{
323 return (u32x16) _mm512_broadcast_i64x2 ((__m128i) a);
324}
325
326static_always_inline u32x16
327u32x16_mask_blend (u32x16 a, u32x16 b, u16 mask)
328{
329 return (u32x16) _mm512_mask_blend_epi32 (mask, (__m512i) a, (__m512i) b);
330}
331
332static_always_inline u8x64
333u8x64_mask_blend (u8x64 a, u8x64 b, u64 mask)
334{
335 return (u8x64) _mm512_mask_blend_epi8 (mask, (__m512i) a, (__m512i) b);
336}
337
Damjan Marion2e5921b2021-11-28 22:57:15 +0100338static_always_inline u8x64
Damjan Marion88019c42021-12-15 10:17:04 +0000339u8x64_permute (u8x64 idx, u8x64 a)
Damjan Marion2e5921b2021-11-28 22:57:15 +0100340{
Damjan Marion88019c42021-12-15 10:17:04 +0000341 return (u8x64) _mm512_permutexvar_epi8 ((__m512i) idx, (__m512i) a);
342}
343
344static_always_inline u8x64
345u8x64_permute2 (u8x64 idx, u8x64 a, u8x64 b)
346{
347 return (u8x64) _mm512_permutex2var_epi8 ((__m512i) a, (__m512i) idx,
348 (__m512i) b);
Damjan Marion2e5921b2021-11-28 22:57:15 +0100349}
350
Damjan Marion09aeee62021-04-20 21:28:45 +0200351#define _(t, m, e, p, it) \
352 static_always_inline m t##_is_equal_mask (t a, t b) \
353 { \
354 return p##_cmpeq_##e##_mask ((it) a, (it) b); \
355 }
356_ (u8x16, u16, epu8, _mm, __m128i)
357_ (u16x8, u8, epu16, _mm, __m128i)
358_ (u32x4, u8, epu32, _mm, __m128i)
359_ (u64x2, u8, epu64, _mm, __m128i)
360
361_ (u8x32, u32, epu8, _mm256, __m256i)
362_ (u16x16, u16, epu16, _mm256, __m256i)
363_ (u32x8, u8, epu32, _mm256, __m256i)
364_ (u64x4, u8, epu64, _mm256, __m256i)
365
366_ (u8x64, u64, epu8, _mm512, __m512i)
367_ (u16x32, u32, epu16, _mm512, __m512i)
368_ (u32x16, u16, epu32, _mm512, __m512i)
369_ (u64x8, u8, epu64, _mm512, __m512i)
370#undef _
371
Damjan Marion7459be12021-11-05 20:44:09 +0100372#define _(t, m, e, p, it) \
373 static_always_inline m t##_is_not_equal_mask (t a, t b) \
374 { \
375 return p##_cmpneq_##e##_mask ((it) a, (it) b); \
376 }
377_ (u8x16, u16, epu8, _mm, __m128i)
378_ (u16x8, u8, epu16, _mm, __m128i)
379_ (u32x4, u8, epu32, _mm, __m128i)
380_ (u64x2, u8, epu64, _mm, __m128i)
381
382_ (u8x32, u32, epu8, _mm256, __m256i)
383_ (u16x16, u16, epu16, _mm256, __m256i)
384_ (u32x8, u8, epu32, _mm256, __m256i)
385_ (u64x4, u8, epu64, _mm256, __m256i)
386
387_ (u8x64, u64, epu8, _mm512, __m512i)
388_ (u16x32, u32, epu16, _mm512, __m512i)
389_ (u32x16, u16, epu32, _mm512, __m512i)
390_ (u64x8, u8, epu64, _mm512, __m512i)
391#undef _
392
Damjan Marion09aeee62021-04-20 21:28:45 +0200393#define _(f, t, fn, it) \
394 static_always_inline t t##_from_##f (f x) { return (t) fn ((it) x); }
395_ (u16x16, u32x16, _mm512_cvtepi16_epi32, __m256i)
396_ (u32x16, u16x16, _mm512_cvtusepi32_epi16, __m512i)
397_ (u32x8, u16x8, _mm256_cvtusepi32_epi16, __m256i)
Damjan Marion54208852021-04-21 15:25:47 +0200398_ (u32x8, u64x8, _mm512_cvtepu32_epi64, __m256i)
Damjan Marion09aeee62021-04-20 21:28:45 +0200399#undef _
400
Damjan Marione9848312021-04-25 10:51:51 +0200401#define _(vt, mt, p, it, epi) \
Damjan Marion09aeee62021-04-20 21:28:45 +0200402 static_always_inline vt vt##_compress (vt a, mt mask) \
403 { \
Damjan Marione9848312021-04-25 10:51:51 +0200404 return (vt) p##_maskz_compress_##epi (mask, (it) a); \
Damjan Marion09aeee62021-04-20 21:28:45 +0200405 } \
406 static_always_inline vt vt##_expand (vt a, mt mask) \
407 { \
Damjan Marione9848312021-04-25 10:51:51 +0200408 return (vt) p##_maskz_expand_##epi (mask, (it) a); \
409 } \
410 static_always_inline void vt##_compress_store (vt v, mt mask, void *p) \
411 { \
412 p##_mask_compressstoreu_##epi (p, mask, (it) v); \
Damjan Marion09aeee62021-04-20 21:28:45 +0200413 }
414
Damjan Marione9848312021-04-25 10:51:51 +0200415_ (u64x8, u8, _mm512, __m512i, epi64)
416_ (u32x16, u16, _mm512, __m512i, epi32)
417_ (u64x4, u8, _mm256, __m256i, epi64)
418_ (u32x8, u8, _mm256, __m256i, epi32)
419_ (u64x2, u8, _mm, __m128i, epi64)
420_ (u32x4, u8, _mm, __m128i, epi32)
Damjan Marion09aeee62021-04-20 21:28:45 +0200421#ifdef __AVX512VBMI2__
Damjan Marione9848312021-04-25 10:51:51 +0200422_ (u16x32, u32, _mm512, __m512i, epi16)
423_ (u8x64, u64, _mm512, __m512i, epi8)
424_ (u16x16, u16, _mm256, __m256i, epi16)
425_ (u8x32, u32, _mm256, __m256i, epi8)
426_ (u16x8, u8, _mm, __m128i, epi16)
427_ (u8x16, u16, _mm, __m128i, epi8)
Damjan Marion09aeee62021-04-20 21:28:45 +0200428#endif
429#undef _
430
Damjan Marionef0bac72021-04-22 18:08:28 +0200431#ifdef CLIB_HAVE_VEC256
Damjan Marion09aeee62021-04-20 21:28:45 +0200432#define CLIB_HAVE_VEC256_COMPRESS
Mohsin Kazmi7da9b5b2021-08-27 18:57:16 +0200433#ifdef __AVX512VBMI2__
434#define CLIB_HAVE_VEC256_COMPRESS_U8_U16
435#endif
436
Damjan Marionef0bac72021-04-22 18:08:28 +0200437#endif
438#ifdef CLIB_HAVE_VEC512
Damjan Marion09aeee62021-04-20 21:28:45 +0200439#define CLIB_HAVE_VEC512_COMPRESS
Mohsin Kazmi7da9b5b2021-08-27 18:57:16 +0200440#ifdef __AVX512VBMI2__
441#define CLIB_HAVE_VEC512_COMPRESS_U8_U16
442#endif
443
Damjan Marionef0bac72021-04-22 18:08:28 +0200444#endif
Damjan Marion09aeee62021-04-20 21:28:45 +0200445
446#ifndef __AVX512VBMI2__
447static_always_inline u16x16
448u16x16_compress (u16x16 v, u16 mask)
Zhiyong Yang5e524172020-07-08 20:28:36 +0000449{
Damjan Marion09aeee62021-04-20 21:28:45 +0200450 return u16x16_from_u32x16 (u32x16_compress (u32x16_from_u16x16 (v), mask));
Zhiyong Yang5e524172020-07-08 20:28:36 +0000451}
452
Damjan Marion09aeee62021-04-20 21:28:45 +0200453static_always_inline u16x8
454u16x8_compress (u16x8 v, u8 mask)
455{
456 return u16x8_from_u32x8 (u32x8_compress (u32x8_from_u16x8 (v), mask));
457}
458#endif
459
Damjan Marion88019c42021-12-15 10:17:04 +0000460static_always_inline u64
461u64x8_hxor (u64x8 v)
462{
463 v ^= u64x8_align_right (v, v, 4);
464 v ^= u64x8_align_right (v, v, 2);
465 return v[0] ^ v[1];
466}
467
Damjan Marion4e083162019-04-12 17:44:35 +0200468static_always_inline void
469u32x16_transpose (u32x16 m[16])
470{
471 __m512i r[16], a, b, c, d, x, y;
472
473 /* *INDENT-OFF* */
474 __m512i pm1 = (__m512i) (u64x8) { 0, 1, 8, 9, 4, 5, 12, 13};
475 __m512i pm2 = (__m512i) (u64x8) { 2, 3, 10, 11, 6, 7, 14, 15};
476 __m512i pm3 = (__m512i) (u64x8) { 0, 1, 2, 3, 8, 9, 10, 11};
477 __m512i pm4 = (__m512i) (u64x8) { 4, 5, 6, 7, 12, 13, 14, 15};
478 /* *INDENT-ON* */
479
480 r[0] = _mm512_unpacklo_epi32 ((__m512i) m[0], (__m512i) m[1]);
481 r[1] = _mm512_unpacklo_epi32 ((__m512i) m[2], (__m512i) m[3]);
482 r[2] = _mm512_unpacklo_epi32 ((__m512i) m[4], (__m512i) m[5]);
483 r[3] = _mm512_unpacklo_epi32 ((__m512i) m[6], (__m512i) m[7]);
484 r[4] = _mm512_unpacklo_epi32 ((__m512i) m[8], (__m512i) m[9]);
485 r[5] = _mm512_unpacklo_epi32 ((__m512i) m[10], (__m512i) m[11]);
486 r[6] = _mm512_unpacklo_epi32 ((__m512i) m[12], (__m512i) m[13]);
487 r[7] = _mm512_unpacklo_epi32 ((__m512i) m[14], (__m512i) m[15]);
488
489 r[8] = _mm512_unpackhi_epi32 ((__m512i) m[0], (__m512i) m[1]);
490 r[9] = _mm512_unpackhi_epi32 ((__m512i) m[2], (__m512i) m[3]);
491 r[10] = _mm512_unpackhi_epi32 ((__m512i) m[4], (__m512i) m[5]);
492 r[11] = _mm512_unpackhi_epi32 ((__m512i) m[6], (__m512i) m[7]);
493 r[12] = _mm512_unpackhi_epi32 ((__m512i) m[8], (__m512i) m[9]);
494 r[13] = _mm512_unpackhi_epi32 ((__m512i) m[10], (__m512i) m[11]);
495 r[14] = _mm512_unpackhi_epi32 ((__m512i) m[12], (__m512i) m[13]);
496 r[15] = _mm512_unpackhi_epi32 ((__m512i) m[14], (__m512i) m[15]);
497
498 a = _mm512_unpacklo_epi64 (r[0], r[1]);
499 b = _mm512_unpacklo_epi64 (r[2], r[3]);
500 c = _mm512_unpacklo_epi64 (r[4], r[5]);
501 d = _mm512_unpacklo_epi64 (r[6], r[7]);
502 x = _mm512_permutex2var_epi64 (a, pm1, b);
503 y = _mm512_permutex2var_epi64 (c, pm1, d);
504 m[0] = (u32x16) _mm512_permutex2var_epi64 (x, pm3, y);
505 m[8] = (u32x16) _mm512_permutex2var_epi64 (x, pm4, y);
506 x = _mm512_permutex2var_epi64 (a, pm2, b);
507 y = _mm512_permutex2var_epi64 (c, pm2, d);
508 m[4] = (u32x16) _mm512_permutex2var_epi64 (x, pm3, y);
509 m[12] = (u32x16) _mm512_permutex2var_epi64 (x, pm4, y);
510
511 a = _mm512_unpacklo_epi64 (r[8], r[9]);
512 b = _mm512_unpacklo_epi64 (r[10], r[11]);
513 c = _mm512_unpacklo_epi64 (r[12], r[13]);
514 d = _mm512_unpacklo_epi64 (r[14], r[15]);
515 x = _mm512_permutex2var_epi64 (a, pm1, b);
516 y = _mm512_permutex2var_epi64 (c, pm1, d);
517 m[2] = (u32x16) _mm512_permutex2var_epi64 (x, pm3, y);
518 m[10] = (u32x16) _mm512_permutex2var_epi64 (x, pm4, y);
519 x = _mm512_permutex2var_epi64 (a, pm2, b);
520 y = _mm512_permutex2var_epi64 (c, pm2, d);
521 m[6] = (u32x16) _mm512_permutex2var_epi64 (x, pm3, y);
522 m[14] = (u32x16) _mm512_permutex2var_epi64 (x, pm4, y);
523
524 a = _mm512_unpackhi_epi64 (r[0], r[1]);
525 b = _mm512_unpackhi_epi64 (r[2], r[3]);
526 c = _mm512_unpackhi_epi64 (r[4], r[5]);
527 d = _mm512_unpackhi_epi64 (r[6], r[7]);
528 x = _mm512_permutex2var_epi64 (a, pm1, b);
529 y = _mm512_permutex2var_epi64 (c, pm1, d);
530 m[1] = (u32x16) _mm512_permutex2var_epi64 (x, pm3, y);
531 m[9] = (u32x16) _mm512_permutex2var_epi64 (x, pm4, y);
532 x = _mm512_permutex2var_epi64 (a, pm2, b);
533 y = _mm512_permutex2var_epi64 (c, pm2, d);
534 m[5] = (u32x16) _mm512_permutex2var_epi64 (x, pm3, y);
535 m[13] = (u32x16) _mm512_permutex2var_epi64 (x, pm4, y);
536
537 a = _mm512_unpackhi_epi64 (r[8], r[9]);
538 b = _mm512_unpackhi_epi64 (r[10], r[11]);
539 c = _mm512_unpackhi_epi64 (r[12], r[13]);
540 d = _mm512_unpackhi_epi64 (r[14], r[15]);
541 x = _mm512_permutex2var_epi64 (a, pm1, b);
542 y = _mm512_permutex2var_epi64 (c, pm1, d);
543 m[3] = (u32x16) _mm512_permutex2var_epi64 (x, pm3, y);
544 m[11] = (u32x16) _mm512_permutex2var_epi64 (x, pm4, y);
545 x = _mm512_permutex2var_epi64 (a, pm2, b);
546 y = _mm512_permutex2var_epi64 (c, pm2, d);
547 m[7] = (u32x16) _mm512_permutex2var_epi64 (x, pm3, y);
548 m[15] = (u32x16) _mm512_permutex2var_epi64 (x, pm4, y);
549}
550
551
552
553static_always_inline void
554u64x8_transpose (u64x8 m[8])
555{
556 __m512i r[8], x, y;
557
558 /* *INDENT-OFF* */
559 __m512i pm1 = (__m512i) (u64x8) { 0, 1, 8, 9, 4, 5, 12, 13};
560 __m512i pm2 = (__m512i) (u64x8) { 2, 3, 10, 11, 6, 7, 14, 15};
561 __m512i pm3 = (__m512i) (u64x8) { 0, 1, 2, 3, 8, 9, 10, 11};
562 __m512i pm4 = (__m512i) (u64x8) { 4, 5, 6, 7, 12, 13, 14, 15};
563 /* *INDENT-ON* */
564
565 r[0] = _mm512_unpacklo_epi64 ((__m512i) m[0], (__m512i) m[1]);
566 r[1] = _mm512_unpacklo_epi64 ((__m512i) m[2], (__m512i) m[3]);
567 r[2] = _mm512_unpacklo_epi64 ((__m512i) m[4], (__m512i) m[5]);
568 r[3] = _mm512_unpacklo_epi64 ((__m512i) m[6], (__m512i) m[7]);
569 r[4] = _mm512_unpackhi_epi64 ((__m512i) m[0], (__m512i) m[1]);
570 r[5] = _mm512_unpackhi_epi64 ((__m512i) m[2], (__m512i) m[3]);
571 r[6] = _mm512_unpackhi_epi64 ((__m512i) m[4], (__m512i) m[5]);
572 r[7] = _mm512_unpackhi_epi64 ((__m512i) m[6], (__m512i) m[7]);
573
574 x = _mm512_permutex2var_epi64 (r[0], pm1, r[1]);
575 y = _mm512_permutex2var_epi64 (r[2], pm1, r[3]);
576 m[0] = (u64x8) _mm512_permutex2var_epi64 (x, pm3, y);
577 m[4] = (u64x8) _mm512_permutex2var_epi64 (x, pm4, y);
578 x = _mm512_permutex2var_epi64 (r[0], pm2, r[1]);
579 y = _mm512_permutex2var_epi64 (r[2], pm2, r[3]);
580 m[2] = (u64x8) _mm512_permutex2var_epi64 (x, pm3, y);
581 m[6] = (u64x8) _mm512_permutex2var_epi64 (x, pm4, y);
582
583 x = _mm512_permutex2var_epi64 (r[4], pm1, r[5]);
584 y = _mm512_permutex2var_epi64 (r[6], pm1, r[7]);
585 m[1] = (u64x8) _mm512_permutex2var_epi64 (x, pm3, y);
586 m[5] = (u64x8) _mm512_permutex2var_epi64 (x, pm4, y);
587 x = _mm512_permutex2var_epi64 (r[4], pm2, r[5]);
588 y = _mm512_permutex2var_epi64 (r[6], pm2, r[7]);
589 m[3] = (u64x8) _mm512_permutex2var_epi64 (x, pm3, y);
590 m[7] = (u64x8) _mm512_permutex2var_epi64 (x, pm4, y);
591}
592
Damjan Marion8c3f8a22018-05-17 21:12:13 +0200593#endif /* included_vector_avx512_h */
Damjan Marionc5766222018-04-16 00:18:34 +0200594/*
595 * fd.io coding-style-patch-verification: ON
596 *
597 * Local Variables:
598 * eval: (c-set-style "gnu")
599 * End:
600 */