blob: f15a04ec128ddc3f6ddd64bf745ee5a3f751b684 [file] [log] [blame]
Damjan Marionc5766222018-04-16 00:18:34 +02001/*
2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16#ifndef included_vector_avx512_h
17#define included_vector_avx512_h
18
19#include <vppinfra/clib.h>
20#include <x86intrin.h>
21
Damjan Mariona52e1662018-05-19 00:04:23 +020022/* *INDENT-OFF* */
Damjan Marionc5766222018-04-16 00:18:34 +020023#define foreach_avx512_vec512i \
24 _(i,8,64,epi8) _(i,16,32,epi16) _(i,32,16,epi32) _(i,64,8,epi64)
25#define foreach_avx512_vec512u \
26 _(u,8,64,epi8) _(u,16,32,epi16) _(u,32,16,epi32) _(u,64,8,epi64)
27#define foreach_avx512_vec512f \
28 _(f,32,8,ps) _(f,64,4,pd)
29
Damjan Mariona52e1662018-05-19 00:04:23 +020030/* splat, load_unaligned, store_unaligned, is_all_zero, is_equal,
Damjan Marion1cf9a162018-05-23 20:21:51 +020031 is_all_equal, is_zero_mask */
BenoƮt Ganneb46334b2022-03-21 15:02:21 +010032#define _(t, s, c, i) \
33 static_always_inline t##s##x##c t##s##x##c##_splat (t##s x) \
34 { \
35 return (t##s##x##c) _mm512_set1_##i (x); \
36 } \
37 \
38 static_always_inline t##s##x##c t##s##x##c##_load_aligned (void *p) \
39 { \
40 return (t##s##x##c) _mm512_load_si512 (p); \
41 } \
42 \
43 static_always_inline void t##s##x##c##_store_aligned (t##s##x##c v, \
44 void *p) \
45 { \
46 _mm512_store_si512 ((__m512i *) p, (__m512i) v); \
47 } \
48 \
49 static_always_inline t##s##x##c t##s##x##c##_load_unaligned (void *p) \
50 { \
51 return (t##s##x##c) _mm512_loadu_si512 (p); \
52 } \
53 \
54 static_always_inline void t##s##x##c##_store_unaligned (t##s##x##c v, \
55 void *p) \
56 { \
57 _mm512_storeu_si512 ((__m512i *) p, (__m512i) v); \
58 } \
59 \
60 static_always_inline int t##s##x##c##_is_all_zero (t##s##x##c v) \
61 { \
62 return (_mm512_test_epi64_mask ((__m512i) v, (__m512i) v) == 0); \
63 } \
64 \
65 static_always_inline int t##s##x##c##_is_equal (t##s##x##c a, t##s##x##c b) \
66 { \
67 return (_mm512_cmpneq_epi64_mask ((__m512i) a, (__m512i) b) == 0); \
68 } \
69 \
70 static_always_inline int t##s##x##c##_is_all_equal (t##s##x##c v, t##s x) \
71 { \
72 return t##s##x##c##_is_equal (v, t##s##x##c##_splat (x)); \
73 } \
74 \
75 static_always_inline u##c t##s##x##c##_is_zero_mask (t##s##x##c v) \
76 { \
77 return _mm512_test_##i##_mask ((__m512i) v, (__m512i) v); \
78 } \
79 \
80 static_always_inline t##s##x##c t##s##x##c##_interleave_lo (t##s##x##c a, \
81 t##s##x##c b) \
82 { \
83 return (t##s##x##c) _mm512_unpacklo_##i ((__m512i) a, (__m512i) b); \
84 } \
85 \
86 static_always_inline t##s##x##c t##s##x##c##_interleave_hi (t##s##x##c a, \
87 t##s##x##c b) \
88 { \
89 return (t##s##x##c) _mm512_unpackhi_##i ((__m512i) a, (__m512i) b); \
90 }
Damjan Marionc5766222018-04-16 00:18:34 +020091
92foreach_avx512_vec512i foreach_avx512_vec512u
93#undef _
Damjan Marion8c3f8a22018-05-17 21:12:13 +020094/* *INDENT-ON* */
95
96static_always_inline u32
97u16x32_msb_mask (u16x32 v)
98{
99 return (u32) _mm512_movepi16_mask ((__m512i) v);
100}
101
Leyi Rong06386192023-03-08 13:46:05 +0800102#define u64x8_i64gather(index, base, scale) \
103 (u64x8) _mm512_i64gather_epi64 ((__m512i) index, base, scale)
104
Damjan Marion7d14aad2021-05-05 19:31:41 +0200105/* 512-bit packs */
106#define _(f, t, fn) \
107 always_inline t t##_pack (f lo, f hi) \
108 { \
109 return (t) fn ((__m512i) lo, (__m512i) hi); \
110 }
111
112_ (i16x32, i8x64, _mm512_packs_epi16)
113_ (i16x32, u8x64, _mm512_packus_epi16)
114_ (i32x16, i16x32, _mm512_packs_epi32)
115_ (i32x16, u16x32, _mm512_packus_epi32)
116#undef _
117
Damjan Marion2e5921b2021-11-28 22:57:15 +0100118static_always_inline u64x8
119u64x8_byte_swap (u64x8 v)
120{
121 u8x64 swap = {
122 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8,
123 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8,
124 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8,
125 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8,
126 };
127 return (u64x8) _mm512_shuffle_epi8 ((__m512i) v, (__m512i) swap);
128}
129
Damjan Marionc899dac2019-04-16 18:41:01 +0200130static_always_inline u32x16
131u32x16_byte_swap (u32x16 v)
132{
133 u8x64 swap = {
134 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12,
135 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12,
136 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12,
137 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12
138 };
139 return (u32x16) _mm512_shuffle_epi8 ((__m512i) v, (__m512i) swap);
140}
141
142static_always_inline u16x32
143u16x32_byte_swap (u16x32 v)
144{
145 u8x64 swap = {
146 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14,
147 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14,
148 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14,
149 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14
150 };
151 return (u16x32) _mm512_shuffle_epi8 ((__m512i) v, (__m512i) swap);
152}
153
Damjan Marion09aeee62021-04-20 21:28:45 +0200154#define _(f, t) \
155 static_always_inline t f##_extract_lo (f v) \
156 { \
157 return (t) _mm512_extracti64x4_epi64 ((__m512i) v, 0); \
158 } \
159 static_always_inline t f##_extract_hi (f v) \
160 { \
161 return (t) _mm512_extracti64x4_epi64 ((__m512i) v, 1); \
162 }
Damjan Marionc899dac2019-04-16 18:41:01 +0200163
Damjan Marion09aeee62021-04-20 21:28:45 +0200164_ (u64x8, u64x4)
165_ (u32x16, u32x8)
166_ (u16x32, u16x16)
167_ (u8x64, u8x32)
168#undef _
Damjan Marion627fb6a2020-02-16 13:07:13 +0100169
Damjan Marionc899dac2019-04-16 18:41:01 +0200170static_always_inline u32
171u32x16_min_scalar (u32x16 v)
172{
173 return u32x8_min_scalar (u32x8_min (u32x16_extract_lo (v),
174 u32x16_extract_hi (v)));
175}
176
Damjan Marion2cd8ad42019-04-17 16:05:54 +0200177static_always_inline u32x16
178u32x16_insert_lo (u32x16 r, u32x8 v)
179{
180 return (u32x16) _mm512_inserti64x4 ((__m512i) r, (__m256i) v, 0);
181}
182
183static_always_inline u32x16
184u32x16_insert_hi (u32x16 r, u32x8 v)
185{
186 return (u32x16) _mm512_inserti64x4 ((__m512i) r, (__m256i) v, 1);
187}
188
189static_always_inline u64x8
190u64x8_permute (u64x8 a, u64x8 b, u64x8 mask)
191{
192 return (u64x8) _mm512_permutex2var_epi64 ((__m512i) a, (__m512i) mask,
193 (__m512i) b);
194}
195
Damjan Marionc899dac2019-04-16 18:41:01 +0200196
197#define u32x16_ternary_logic(a, b, c, d) \
198 (u32x16) _mm512_ternarylogic_epi32 ((__m512i) a, (__m512i) b, (__m512i) c, d)
Damjan Marion4e083162019-04-12 17:44:35 +0200199
Damjan Marione84e9d72020-02-13 13:11:02 +0100200#define u8x64_insert_u8x16(a, b, n) \
201 (u8x64) _mm512_inserti64x2 ((__m512i) (a), (__m128i) (b), n)
202
203#define u8x64_extract_u8x16(a, n) \
204 (u8x16) _mm512_extracti64x2_epi64 ((__m512i) (a), n)
205
Damjan Marion627fb6a2020-02-16 13:07:13 +0100206#define u8x64_word_shift_left(a,n) (u8x64) _mm512_bslli_epi128((__m512i) a, n)
207#define u8x64_word_shift_right(a,n) (u8x64) _mm512_bsrli_epi128((__m512i) a, n)
208
Damjan Marionf75defa2020-02-13 18:14:06 +0100209static_always_inline u8x64
210u8x64_xor3 (u8x64 a, u8x64 b, u8x64 c)
211{
212 return (u8x64) _mm512_ternarylogic_epi32 ((__m512i) a, (__m512i) b,
213 (__m512i) c, 0x96);
214}
215
Damjan Marion2e5921b2021-11-28 22:57:15 +0100216static_always_inline u64x8
217u64x8_xor3 (u64x8 a, u64x8 b, u64x8 c)
218{
219 return (u64x8) _mm512_ternarylogic_epi32 ((__m512i) a, (__m512i) b,
220 (__m512i) c, 0x96);
221}
222
Damjan Marion47d8f5d2020-02-25 11:51:48 +0100223static_always_inline u8x64
224u8x64_reflect_u8x16 (u8x64 x)
225{
226 static const u8x64 mask = {
227 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0,
228 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0,
229 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0,
230 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0,
231 };
232 return (u8x64) _mm512_shuffle_epi8 ((__m512i) x, (__m512i) mask);
233}
234
Damjan Marion94dbf952020-07-15 20:18:39 +0200235#define u8x64_align_right(a, b, imm) \
236 (u8x64) _mm512_alignr_epi8 ((__m512i) a, (__m512i) b, imm)
237
Damjan Marion4dc098f2021-09-22 15:28:29 +0200238#define u64x8_align_right(a, b, imm) \
239 (u64x8) _mm512_alignr_epi64 ((__m512i) a, (__m512i) b, imm)
240
Damjan Marion94dbf952020-07-15 20:18:39 +0200241static_always_inline u32
242u32x16_sum_elts (u32x16 sum16)
243{
244 u32x8 sum8;
245 sum16 += (u32x16) u8x64_align_right (sum16, sum16, 8);
246 sum16 += (u32x16) u8x64_align_right (sum16, sum16, 4);
247 sum8 = u32x16_extract_hi (sum16) + u32x16_extract_lo (sum16);
248 return sum8[0] + sum8[4];
249}
250
Damjan Marione9848312021-04-25 10:51:51 +0200251#define _(t, m, p, i, e) \
252 static_always_inline t t##_mask_load (t a, void *p, m mask) \
253 { \
254 return (t) p##_mask_loadu_##e ((i) a, mask, p); \
255 } \
256 static_always_inline t t##_mask_load_zero (void *p, m mask) \
257 { \
258 return (t) p##_maskz_loadu_##e (mask, p); \
259 } \
260 static_always_inline void t##_mask_store (t a, void *p, m mask) \
261 { \
262 p##_mask_storeu_##e (p, mask, (i) a); \
263 }
Damjan Marion47d8f5d2020-02-25 11:51:48 +0100264
Damjan Marione9848312021-04-25 10:51:51 +0200265_ (u8x64, u64, _mm512, __m512i, epi8)
266_ (u8x32, u32, _mm256, __m256i, epi8)
267_ (u8x16, u16, _mm, __m128i, epi8)
268_ (u16x32, u32, _mm512, __m512i, epi16)
269_ (u16x16, u16, _mm256, __m256i, epi16)
270_ (u16x8, u8, _mm, __m128i, epi16)
271_ (u32x16, u16, _mm512, __m512i, epi32)
272_ (u32x8, u8, _mm256, __m256i, epi32)
273_ (u32x4, u8, _mm, __m128i, epi32)
274_ (u64x8, u8, _mm512, __m512i, epi64)
275_ (u64x4, u8, _mm256, __m256i, epi64)
276_ (u64x2, u8, _mm, __m128i, epi64)
277#undef _
278
Damjan Marion9ab2e5d2021-12-02 11:39:07 +0100279#define _(t, m, p, i, e) \
280 static_always_inline t t##_mask_and (t a, t b, m mask) \
281 { \
282 return (t) p##_mask_and_##e ((i) a, mask, (i) a, (i) b); \
283 } \
284 static_always_inline t t##_mask_andnot (t a, t b, m mask) \
285 { \
286 return (t) p##_mask_andnot_##e ((i) a, mask, (i) a, (i) b); \
287 } \
288 static_always_inline t t##_mask_xor (t a, t b, m mask) \
289 { \
290 return (t) p##_mask_xor_##e ((i) a, mask, (i) a, (i) b); \
291 } \
292 static_always_inline t t##_mask_or (t a, t b, m mask) \
293 { \
294 return (t) p##_mask_or_##e ((i) a, mask, (i) a, (i) b); \
295 }
296_ (u32x16, u16, _mm512, __m512i, epi32)
297_ (u32x8, u8, _mm256, __m256i, epi32)
298_ (u32x4, u8, _mm, __m128i, epi32)
299_ (u64x8, u8, _mm512, __m512i, epi64)
300_ (u64x4, u8, _mm256, __m256i, epi64)
301_ (u64x2, u8, _mm, __m128i, epi64)
302#undef _
303
Damjan Marione9848312021-04-25 10:51:51 +0200304#ifdef CLIB_HAVE_VEC512
305#define CLIB_HAVE_VEC512_MASK_LOAD_STORE
Damjan Marion9ab2e5d2021-12-02 11:39:07 +0100306#define CLIB_HAVE_VEC512_MASK_BITWISE_OPS
Damjan Marione9848312021-04-25 10:51:51 +0200307#endif
308#ifdef CLIB_HAVE_VEC256
309#define CLIB_HAVE_VEC256_MASK_LOAD_STORE
Damjan Marion9ab2e5d2021-12-02 11:39:07 +0100310#define CLIB_HAVE_VEC256_MASK_BITWISE_OPS
Damjan Marione9848312021-04-25 10:51:51 +0200311#endif
312#ifdef CLIB_HAVE_VEC128
313#define CLIB_HAVE_VEC128_MASK_LOAD_STORE
Damjan Marion9ab2e5d2021-12-02 11:39:07 +0100314#define CLIB_HAVE_VEC128_MASK_BITWISE_OPS
Damjan Marione9848312021-04-25 10:51:51 +0200315#endif
Damjan Marion47d8f5d2020-02-25 11:51:48 +0100316
317static_always_inline u8x64
318u8x64_splat_u8x16 (u8x16 a)
319{
320 return (u8x64) _mm512_broadcast_i64x2 ((__m128i) a);
321}
322
323static_always_inline u32x16
324u32x16_splat_u32x4 (u32x4 a)
325{
326 return (u32x16) _mm512_broadcast_i64x2 ((__m128i) a);
327}
328
Damjan Marion029bff42023-09-29 15:09:11 +0200329static_always_inline u64x8
330u64x8_splat_u64x2 (u64x2 a)
331{
332 return (u64x8) _mm512_broadcast_i64x2 ((__m128i) a);
333}
334
Damjan Marion47d8f5d2020-02-25 11:51:48 +0100335static_always_inline u32x16
336u32x16_mask_blend (u32x16 a, u32x16 b, u16 mask)
337{
338 return (u32x16) _mm512_mask_blend_epi32 (mask, (__m512i) a, (__m512i) b);
339}
340
341static_always_inline u8x64
342u8x64_mask_blend (u8x64 a, u8x64 b, u64 mask)
343{
344 return (u8x64) _mm512_mask_blend_epi8 (mask, (__m512i) a, (__m512i) b);
345}
346
Damjan Marion2e5921b2021-11-28 22:57:15 +0100347static_always_inline u8x64
Damjan Marion88019c42021-12-15 10:17:04 +0000348u8x64_permute (u8x64 idx, u8x64 a)
Damjan Marion2e5921b2021-11-28 22:57:15 +0100349{
Damjan Marion88019c42021-12-15 10:17:04 +0000350 return (u8x64) _mm512_permutexvar_epi8 ((__m512i) idx, (__m512i) a);
351}
352
353static_always_inline u8x64
354u8x64_permute2 (u8x64 idx, u8x64 a, u8x64 b)
355{
356 return (u8x64) _mm512_permutex2var_epi8 ((__m512i) a, (__m512i) idx,
357 (__m512i) b);
Damjan Marion2e5921b2021-11-28 22:57:15 +0100358}
359
Damjan Marion09aeee62021-04-20 21:28:45 +0200360#define _(t, m, e, p, it) \
361 static_always_inline m t##_is_equal_mask (t a, t b) \
362 { \
363 return p##_cmpeq_##e##_mask ((it) a, (it) b); \
364 }
365_ (u8x16, u16, epu8, _mm, __m128i)
366_ (u16x8, u8, epu16, _mm, __m128i)
367_ (u32x4, u8, epu32, _mm, __m128i)
368_ (u64x2, u8, epu64, _mm, __m128i)
369
370_ (u8x32, u32, epu8, _mm256, __m256i)
371_ (u16x16, u16, epu16, _mm256, __m256i)
372_ (u32x8, u8, epu32, _mm256, __m256i)
373_ (u64x4, u8, epu64, _mm256, __m256i)
374
375_ (u8x64, u64, epu8, _mm512, __m512i)
376_ (u16x32, u32, epu16, _mm512, __m512i)
377_ (u32x16, u16, epu32, _mm512, __m512i)
378_ (u64x8, u8, epu64, _mm512, __m512i)
379#undef _
380
Damjan Marion7459be12021-11-05 20:44:09 +0100381#define _(t, m, e, p, it) \
382 static_always_inline m t##_is_not_equal_mask (t a, t b) \
383 { \
384 return p##_cmpneq_##e##_mask ((it) a, (it) b); \
385 }
386_ (u8x16, u16, epu8, _mm, __m128i)
387_ (u16x8, u8, epu16, _mm, __m128i)
388_ (u32x4, u8, epu32, _mm, __m128i)
389_ (u64x2, u8, epu64, _mm, __m128i)
390
391_ (u8x32, u32, epu8, _mm256, __m256i)
392_ (u16x16, u16, epu16, _mm256, __m256i)
393_ (u32x8, u8, epu32, _mm256, __m256i)
394_ (u64x4, u8, epu64, _mm256, __m256i)
395
396_ (u8x64, u64, epu8, _mm512, __m512i)
397_ (u16x32, u32, epu16, _mm512, __m512i)
398_ (u32x16, u16, epu32, _mm512, __m512i)
399_ (u64x8, u8, epu64, _mm512, __m512i)
400#undef _
401
Damjan Marion09aeee62021-04-20 21:28:45 +0200402#define _(f, t, fn, it) \
403 static_always_inline t t##_from_##f (f x) { return (t) fn ((it) x); }
404_ (u16x16, u32x16, _mm512_cvtepi16_epi32, __m256i)
405_ (u32x16, u16x16, _mm512_cvtusepi32_epi16, __m512i)
406_ (u32x8, u16x8, _mm256_cvtusepi32_epi16, __m256i)
Damjan Marion54208852021-04-21 15:25:47 +0200407_ (u32x8, u64x8, _mm512_cvtepu32_epi64, __m256i)
Damjan Marion09aeee62021-04-20 21:28:45 +0200408#undef _
409
Damjan Marione9848312021-04-25 10:51:51 +0200410#define _(vt, mt, p, it, epi) \
Damjan Marion09aeee62021-04-20 21:28:45 +0200411 static_always_inline vt vt##_compress (vt a, mt mask) \
412 { \
Damjan Marione9848312021-04-25 10:51:51 +0200413 return (vt) p##_maskz_compress_##epi (mask, (it) a); \
Damjan Marion09aeee62021-04-20 21:28:45 +0200414 } \
415 static_always_inline vt vt##_expand (vt a, mt mask) \
416 { \
Damjan Marione9848312021-04-25 10:51:51 +0200417 return (vt) p##_maskz_expand_##epi (mask, (it) a); \
418 } \
419 static_always_inline void vt##_compress_store (vt v, mt mask, void *p) \
420 { \
421 p##_mask_compressstoreu_##epi (p, mask, (it) v); \
Damjan Marion09aeee62021-04-20 21:28:45 +0200422 }
423
Damjan Marione9848312021-04-25 10:51:51 +0200424_ (u64x8, u8, _mm512, __m512i, epi64)
425_ (u32x16, u16, _mm512, __m512i, epi32)
426_ (u64x4, u8, _mm256, __m256i, epi64)
427_ (u32x8, u8, _mm256, __m256i, epi32)
428_ (u64x2, u8, _mm, __m128i, epi64)
429_ (u32x4, u8, _mm, __m128i, epi32)
Damjan Marion09aeee62021-04-20 21:28:45 +0200430#ifdef __AVX512VBMI2__
Damjan Marione9848312021-04-25 10:51:51 +0200431_ (u16x32, u32, _mm512, __m512i, epi16)
432_ (u8x64, u64, _mm512, __m512i, epi8)
433_ (u16x16, u16, _mm256, __m256i, epi16)
434_ (u8x32, u32, _mm256, __m256i, epi8)
435_ (u16x8, u8, _mm, __m128i, epi16)
436_ (u8x16, u16, _mm, __m128i, epi8)
Damjan Marion09aeee62021-04-20 21:28:45 +0200437#endif
438#undef _
439
Damjan Marionef0bac72021-04-22 18:08:28 +0200440#ifdef CLIB_HAVE_VEC256
Damjan Marion09aeee62021-04-20 21:28:45 +0200441#define CLIB_HAVE_VEC256_COMPRESS
Mohsin Kazmi7da9b5b2021-08-27 18:57:16 +0200442#ifdef __AVX512VBMI2__
443#define CLIB_HAVE_VEC256_COMPRESS_U8_U16
444#endif
445
Damjan Marionef0bac72021-04-22 18:08:28 +0200446#endif
447#ifdef CLIB_HAVE_VEC512
Damjan Marion09aeee62021-04-20 21:28:45 +0200448#define CLIB_HAVE_VEC512_COMPRESS
Mohsin Kazmi7da9b5b2021-08-27 18:57:16 +0200449#ifdef __AVX512VBMI2__
450#define CLIB_HAVE_VEC512_COMPRESS_U8_U16
451#endif
452
Damjan Marionef0bac72021-04-22 18:08:28 +0200453#endif
Damjan Marion09aeee62021-04-20 21:28:45 +0200454
455#ifndef __AVX512VBMI2__
456static_always_inline u16x16
457u16x16_compress (u16x16 v, u16 mask)
Zhiyong Yang5e524172020-07-08 20:28:36 +0000458{
Damjan Marion09aeee62021-04-20 21:28:45 +0200459 return u16x16_from_u32x16 (u32x16_compress (u32x16_from_u16x16 (v), mask));
Zhiyong Yang5e524172020-07-08 20:28:36 +0000460}
461
Damjan Marion09aeee62021-04-20 21:28:45 +0200462static_always_inline u16x8
463u16x8_compress (u16x8 v, u8 mask)
464{
465 return u16x8_from_u32x8 (u32x8_compress (u32x8_from_u16x8 (v), mask));
466}
467#endif
468
Damjan Marion88019c42021-12-15 10:17:04 +0000469static_always_inline u64
470u64x8_hxor (u64x8 v)
471{
472 v ^= u64x8_align_right (v, v, 4);
473 v ^= u64x8_align_right (v, v, 2);
474 return v[0] ^ v[1];
475}
476
Damjan Marion4e083162019-04-12 17:44:35 +0200477static_always_inline void
478u32x16_transpose (u32x16 m[16])
479{
480 __m512i r[16], a, b, c, d, x, y;
481
482 /* *INDENT-OFF* */
483 __m512i pm1 = (__m512i) (u64x8) { 0, 1, 8, 9, 4, 5, 12, 13};
484 __m512i pm2 = (__m512i) (u64x8) { 2, 3, 10, 11, 6, 7, 14, 15};
485 __m512i pm3 = (__m512i) (u64x8) { 0, 1, 2, 3, 8, 9, 10, 11};
486 __m512i pm4 = (__m512i) (u64x8) { 4, 5, 6, 7, 12, 13, 14, 15};
487 /* *INDENT-ON* */
488
489 r[0] = _mm512_unpacklo_epi32 ((__m512i) m[0], (__m512i) m[1]);
490 r[1] = _mm512_unpacklo_epi32 ((__m512i) m[2], (__m512i) m[3]);
491 r[2] = _mm512_unpacklo_epi32 ((__m512i) m[4], (__m512i) m[5]);
492 r[3] = _mm512_unpacklo_epi32 ((__m512i) m[6], (__m512i) m[7]);
493 r[4] = _mm512_unpacklo_epi32 ((__m512i) m[8], (__m512i) m[9]);
494 r[5] = _mm512_unpacklo_epi32 ((__m512i) m[10], (__m512i) m[11]);
495 r[6] = _mm512_unpacklo_epi32 ((__m512i) m[12], (__m512i) m[13]);
496 r[7] = _mm512_unpacklo_epi32 ((__m512i) m[14], (__m512i) m[15]);
497
498 r[8] = _mm512_unpackhi_epi32 ((__m512i) m[0], (__m512i) m[1]);
499 r[9] = _mm512_unpackhi_epi32 ((__m512i) m[2], (__m512i) m[3]);
500 r[10] = _mm512_unpackhi_epi32 ((__m512i) m[4], (__m512i) m[5]);
501 r[11] = _mm512_unpackhi_epi32 ((__m512i) m[6], (__m512i) m[7]);
502 r[12] = _mm512_unpackhi_epi32 ((__m512i) m[8], (__m512i) m[9]);
503 r[13] = _mm512_unpackhi_epi32 ((__m512i) m[10], (__m512i) m[11]);
504 r[14] = _mm512_unpackhi_epi32 ((__m512i) m[12], (__m512i) m[13]);
505 r[15] = _mm512_unpackhi_epi32 ((__m512i) m[14], (__m512i) m[15]);
506
507 a = _mm512_unpacklo_epi64 (r[0], r[1]);
508 b = _mm512_unpacklo_epi64 (r[2], r[3]);
509 c = _mm512_unpacklo_epi64 (r[4], r[5]);
510 d = _mm512_unpacklo_epi64 (r[6], r[7]);
511 x = _mm512_permutex2var_epi64 (a, pm1, b);
512 y = _mm512_permutex2var_epi64 (c, pm1, d);
513 m[0] = (u32x16) _mm512_permutex2var_epi64 (x, pm3, y);
514 m[8] = (u32x16) _mm512_permutex2var_epi64 (x, pm4, y);
515 x = _mm512_permutex2var_epi64 (a, pm2, b);
516 y = _mm512_permutex2var_epi64 (c, pm2, d);
517 m[4] = (u32x16) _mm512_permutex2var_epi64 (x, pm3, y);
518 m[12] = (u32x16) _mm512_permutex2var_epi64 (x, pm4, y);
519
520 a = _mm512_unpacklo_epi64 (r[8], r[9]);
521 b = _mm512_unpacklo_epi64 (r[10], r[11]);
522 c = _mm512_unpacklo_epi64 (r[12], r[13]);
523 d = _mm512_unpacklo_epi64 (r[14], r[15]);
524 x = _mm512_permutex2var_epi64 (a, pm1, b);
525 y = _mm512_permutex2var_epi64 (c, pm1, d);
526 m[2] = (u32x16) _mm512_permutex2var_epi64 (x, pm3, y);
527 m[10] = (u32x16) _mm512_permutex2var_epi64 (x, pm4, y);
528 x = _mm512_permutex2var_epi64 (a, pm2, b);
529 y = _mm512_permutex2var_epi64 (c, pm2, d);
530 m[6] = (u32x16) _mm512_permutex2var_epi64 (x, pm3, y);
531 m[14] = (u32x16) _mm512_permutex2var_epi64 (x, pm4, y);
532
533 a = _mm512_unpackhi_epi64 (r[0], r[1]);
534 b = _mm512_unpackhi_epi64 (r[2], r[3]);
535 c = _mm512_unpackhi_epi64 (r[4], r[5]);
536 d = _mm512_unpackhi_epi64 (r[6], r[7]);
537 x = _mm512_permutex2var_epi64 (a, pm1, b);
538 y = _mm512_permutex2var_epi64 (c, pm1, d);
539 m[1] = (u32x16) _mm512_permutex2var_epi64 (x, pm3, y);
540 m[9] = (u32x16) _mm512_permutex2var_epi64 (x, pm4, y);
541 x = _mm512_permutex2var_epi64 (a, pm2, b);
542 y = _mm512_permutex2var_epi64 (c, pm2, d);
543 m[5] = (u32x16) _mm512_permutex2var_epi64 (x, pm3, y);
544 m[13] = (u32x16) _mm512_permutex2var_epi64 (x, pm4, y);
545
546 a = _mm512_unpackhi_epi64 (r[8], r[9]);
547 b = _mm512_unpackhi_epi64 (r[10], r[11]);
548 c = _mm512_unpackhi_epi64 (r[12], r[13]);
549 d = _mm512_unpackhi_epi64 (r[14], r[15]);
550 x = _mm512_permutex2var_epi64 (a, pm1, b);
551 y = _mm512_permutex2var_epi64 (c, pm1, d);
552 m[3] = (u32x16) _mm512_permutex2var_epi64 (x, pm3, y);
553 m[11] = (u32x16) _mm512_permutex2var_epi64 (x, pm4, y);
554 x = _mm512_permutex2var_epi64 (a, pm2, b);
555 y = _mm512_permutex2var_epi64 (c, pm2, d);
556 m[7] = (u32x16) _mm512_permutex2var_epi64 (x, pm3, y);
557 m[15] = (u32x16) _mm512_permutex2var_epi64 (x, pm4, y);
558}
559
560
561
562static_always_inline void
563u64x8_transpose (u64x8 m[8])
564{
565 __m512i r[8], x, y;
566
567 /* *INDENT-OFF* */
568 __m512i pm1 = (__m512i) (u64x8) { 0, 1, 8, 9, 4, 5, 12, 13};
569 __m512i pm2 = (__m512i) (u64x8) { 2, 3, 10, 11, 6, 7, 14, 15};
570 __m512i pm3 = (__m512i) (u64x8) { 0, 1, 2, 3, 8, 9, 10, 11};
571 __m512i pm4 = (__m512i) (u64x8) { 4, 5, 6, 7, 12, 13, 14, 15};
572 /* *INDENT-ON* */
573
574 r[0] = _mm512_unpacklo_epi64 ((__m512i) m[0], (__m512i) m[1]);
575 r[1] = _mm512_unpacklo_epi64 ((__m512i) m[2], (__m512i) m[3]);
576 r[2] = _mm512_unpacklo_epi64 ((__m512i) m[4], (__m512i) m[5]);
577 r[3] = _mm512_unpacklo_epi64 ((__m512i) m[6], (__m512i) m[7]);
578 r[4] = _mm512_unpackhi_epi64 ((__m512i) m[0], (__m512i) m[1]);
579 r[5] = _mm512_unpackhi_epi64 ((__m512i) m[2], (__m512i) m[3]);
580 r[6] = _mm512_unpackhi_epi64 ((__m512i) m[4], (__m512i) m[5]);
581 r[7] = _mm512_unpackhi_epi64 ((__m512i) m[6], (__m512i) m[7]);
582
583 x = _mm512_permutex2var_epi64 (r[0], pm1, r[1]);
584 y = _mm512_permutex2var_epi64 (r[2], pm1, r[3]);
585 m[0] = (u64x8) _mm512_permutex2var_epi64 (x, pm3, y);
586 m[4] = (u64x8) _mm512_permutex2var_epi64 (x, pm4, y);
587 x = _mm512_permutex2var_epi64 (r[0], pm2, r[1]);
588 y = _mm512_permutex2var_epi64 (r[2], pm2, r[3]);
589 m[2] = (u64x8) _mm512_permutex2var_epi64 (x, pm3, y);
590 m[6] = (u64x8) _mm512_permutex2var_epi64 (x, pm4, y);
591
592 x = _mm512_permutex2var_epi64 (r[4], pm1, r[5]);
593 y = _mm512_permutex2var_epi64 (r[6], pm1, r[7]);
594 m[1] = (u64x8) _mm512_permutex2var_epi64 (x, pm3, y);
595 m[5] = (u64x8) _mm512_permutex2var_epi64 (x, pm4, y);
596 x = _mm512_permutex2var_epi64 (r[4], pm2, r[5]);
597 y = _mm512_permutex2var_epi64 (r[6], pm2, r[7]);
598 m[3] = (u64x8) _mm512_permutex2var_epi64 (x, pm3, y);
599 m[7] = (u64x8) _mm512_permutex2var_epi64 (x, pm4, y);
600}
601
Damjan Marionb47376f2023-03-15 11:42:06 +0000602static_always_inline u8x64
603u8x64_load_partial (u8 *data, uword n)
604{
605 return u8x64_mask_load_zero (data, pow2_mask (n));
606}
607
608static_always_inline void
609u8x64_store_partial (u8x64 r, u8 *data, uword n)
610{
611 u8x64_mask_store (r, data, pow2_mask (n));
612}
613
Damjan Marion8c3f8a22018-05-17 21:12:13 +0200614#endif /* included_vector_avx512_h */
Damjan Marionc5766222018-04-16 00:18:34 +0200615/*
616 * fd.io coding-style-patch-verification: ON
617 *
618 * Local Variables:
619 * eval: (c-set-style "gnu")
620 * End:
621 */