blob: 699afec121241ffcb82271d6246b22511873824c [file] [log] [blame]
Damjan Marionc5766222018-04-16 00:18:34 +02001/*
2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16#ifndef included_vector_avx512_h
17#define included_vector_avx512_h
18
19#include <vppinfra/clib.h>
20#include <x86intrin.h>
21
22#define foreach_avx512_vec512i \
23 _(i,8,64,epi8) _(i,16,32,epi16) _(i,32,16,epi32) _(i,64,8,epi64)
24#define foreach_avx512_vec512u \
25 _(u,8,64,epi8) _(u,16,32,epi16) _(u,32,16,epi32) _(u,64,8,epi64)
26#define foreach_avx512_vec512f \
27 _(f,32,8,ps) _(f,64,4,pd)
28
Damjan Mariona52e1662018-05-19 00:04:23 +020029/* splat, load_unaligned, store_unaligned, is_all_zero, is_equal,
Damjan Marion1cf9a162018-05-23 20:21:51 +020030 is_all_equal, is_zero_mask */
BenoƮt Ganneb46334b2022-03-21 15:02:21 +010031#define _(t, s, c, i) \
32 static_always_inline t##s##x##c t##s##x##c##_splat (t##s x) \
33 { \
34 return (t##s##x##c) _mm512_set1_##i (x); \
35 } \
36 \
37 static_always_inline t##s##x##c t##s##x##c##_load_aligned (void *p) \
38 { \
39 return (t##s##x##c) _mm512_load_si512 (p); \
40 } \
41 \
42 static_always_inline void t##s##x##c##_store_aligned (t##s##x##c v, \
43 void *p) \
44 { \
45 _mm512_store_si512 ((__m512i *) p, (__m512i) v); \
46 } \
47 \
48 static_always_inline t##s##x##c t##s##x##c##_load_unaligned (void *p) \
49 { \
50 return (t##s##x##c) _mm512_loadu_si512 (p); \
51 } \
52 \
53 static_always_inline void t##s##x##c##_store_unaligned (t##s##x##c v, \
54 void *p) \
55 { \
56 _mm512_storeu_si512 ((__m512i *) p, (__m512i) v); \
57 } \
58 \
59 static_always_inline int t##s##x##c##_is_all_zero (t##s##x##c v) \
60 { \
61 return (_mm512_test_epi64_mask ((__m512i) v, (__m512i) v) == 0); \
62 } \
63 \
64 static_always_inline int t##s##x##c##_is_equal (t##s##x##c a, t##s##x##c b) \
65 { \
66 return (_mm512_cmpneq_epi64_mask ((__m512i) a, (__m512i) b) == 0); \
67 } \
68 \
69 static_always_inline int t##s##x##c##_is_all_equal (t##s##x##c v, t##s x) \
70 { \
71 return t##s##x##c##_is_equal (v, t##s##x##c##_splat (x)); \
72 } \
73 \
74 static_always_inline u##c t##s##x##c##_is_zero_mask (t##s##x##c v) \
75 { \
76 return _mm512_test_##i##_mask ((__m512i) v, (__m512i) v); \
77 } \
78 \
79 static_always_inline t##s##x##c t##s##x##c##_interleave_lo (t##s##x##c a, \
80 t##s##x##c b) \
81 { \
82 return (t##s##x##c) _mm512_unpacklo_##i ((__m512i) a, (__m512i) b); \
83 } \
84 \
85 static_always_inline t##s##x##c t##s##x##c##_interleave_hi (t##s##x##c a, \
86 t##s##x##c b) \
87 { \
88 return (t##s##x##c) _mm512_unpackhi_##i ((__m512i) a, (__m512i) b); \
89 }
Damjan Marionc5766222018-04-16 00:18:34 +020090
91foreach_avx512_vec512i foreach_avx512_vec512u
92#undef _
Damjan Marion8c3f8a22018-05-17 21:12:13 +020093
94static_always_inline u32
95u16x32_msb_mask (u16x32 v)
96{
97 return (u32) _mm512_movepi16_mask ((__m512i) v);
98}
99
Leyi Rong06386192023-03-08 13:46:05 +0800100#define u64x8_i64gather(index, base, scale) \
101 (u64x8) _mm512_i64gather_epi64 ((__m512i) index, base, scale)
102
Damjan Marion7d14aad2021-05-05 19:31:41 +0200103/* 512-bit packs */
104#define _(f, t, fn) \
105 always_inline t t##_pack (f lo, f hi) \
106 { \
107 return (t) fn ((__m512i) lo, (__m512i) hi); \
108 }
109
110_ (i16x32, i8x64, _mm512_packs_epi16)
111_ (i16x32, u8x64, _mm512_packus_epi16)
112_ (i32x16, i16x32, _mm512_packs_epi32)
113_ (i32x16, u16x32, _mm512_packus_epi32)
114#undef _
115
Damjan Marion2e5921b2021-11-28 22:57:15 +0100116static_always_inline u64x8
117u64x8_byte_swap (u64x8 v)
118{
119 u8x64 swap = {
120 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8,
121 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8,
122 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8,
123 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8,
124 };
125 return (u64x8) _mm512_shuffle_epi8 ((__m512i) v, (__m512i) swap);
126}
127
Damjan Marionc899dac2019-04-16 18:41:01 +0200128static_always_inline u32x16
129u32x16_byte_swap (u32x16 v)
130{
131 u8x64 swap = {
132 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12,
133 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12,
134 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12,
135 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12
136 };
137 return (u32x16) _mm512_shuffle_epi8 ((__m512i) v, (__m512i) swap);
138}
139
140static_always_inline u16x32
141u16x32_byte_swap (u16x32 v)
142{
143 u8x64 swap = {
144 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14,
145 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14,
146 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14,
147 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14
148 };
149 return (u16x32) _mm512_shuffle_epi8 ((__m512i) v, (__m512i) swap);
150}
151
Damjan Marion09aeee62021-04-20 21:28:45 +0200152#define _(f, t) \
153 static_always_inline t f##_extract_lo (f v) \
154 { \
155 return (t) _mm512_extracti64x4_epi64 ((__m512i) v, 0); \
156 } \
157 static_always_inline t f##_extract_hi (f v) \
158 { \
159 return (t) _mm512_extracti64x4_epi64 ((__m512i) v, 1); \
160 }
Damjan Marionc899dac2019-04-16 18:41:01 +0200161
Damjan Marion09aeee62021-04-20 21:28:45 +0200162_ (u64x8, u64x4)
163_ (u32x16, u32x8)
164_ (u16x32, u16x16)
165_ (u8x64, u8x32)
166#undef _
Damjan Marion627fb6a2020-02-16 13:07:13 +0100167
Damjan Marionc899dac2019-04-16 18:41:01 +0200168static_always_inline u32
169u32x16_min_scalar (u32x16 v)
170{
171 return u32x8_min_scalar (u32x8_min (u32x16_extract_lo (v),
172 u32x16_extract_hi (v)));
173}
174
Damjan Marion2cd8ad42019-04-17 16:05:54 +0200175static_always_inline u32x16
176u32x16_insert_lo (u32x16 r, u32x8 v)
177{
178 return (u32x16) _mm512_inserti64x4 ((__m512i) r, (__m256i) v, 0);
179}
180
181static_always_inline u32x16
182u32x16_insert_hi (u32x16 r, u32x8 v)
183{
184 return (u32x16) _mm512_inserti64x4 ((__m512i) r, (__m256i) v, 1);
185}
186
187static_always_inline u64x8
188u64x8_permute (u64x8 a, u64x8 b, u64x8 mask)
189{
190 return (u64x8) _mm512_permutex2var_epi64 ((__m512i) a, (__m512i) mask,
191 (__m512i) b);
192}
193
Damjan Marionc899dac2019-04-16 18:41:01 +0200194
195#define u32x16_ternary_logic(a, b, c, d) \
196 (u32x16) _mm512_ternarylogic_epi32 ((__m512i) a, (__m512i) b, (__m512i) c, d)
Damjan Marion4e083162019-04-12 17:44:35 +0200197
Damjan Marione84e9d72020-02-13 13:11:02 +0100198#define u8x64_insert_u8x16(a, b, n) \
199 (u8x64) _mm512_inserti64x2 ((__m512i) (a), (__m128i) (b), n)
200
201#define u8x64_extract_u8x16(a, n) \
202 (u8x16) _mm512_extracti64x2_epi64 ((__m512i) (a), n)
203
Damjan Marion627fb6a2020-02-16 13:07:13 +0100204#define u8x64_word_shift_left(a,n) (u8x64) _mm512_bslli_epi128((__m512i) a, n)
205#define u8x64_word_shift_right(a,n) (u8x64) _mm512_bsrli_epi128((__m512i) a, n)
206
Damjan Marionf75defa2020-02-13 18:14:06 +0100207static_always_inline u8x64
208u8x64_xor3 (u8x64 a, u8x64 b, u8x64 c)
209{
210 return (u8x64) _mm512_ternarylogic_epi32 ((__m512i) a, (__m512i) b,
211 (__m512i) c, 0x96);
212}
213
Damjan Marion2e5921b2021-11-28 22:57:15 +0100214static_always_inline u64x8
215u64x8_xor3 (u64x8 a, u64x8 b, u64x8 c)
216{
217 return (u64x8) _mm512_ternarylogic_epi32 ((__m512i) a, (__m512i) b,
218 (__m512i) c, 0x96);
219}
220
Damjan Marion47d8f5d2020-02-25 11:51:48 +0100221static_always_inline u8x64
222u8x64_reflect_u8x16 (u8x64 x)
223{
224 static const u8x64 mask = {
225 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0,
226 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0,
227 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0,
228 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0,
229 };
230 return (u8x64) _mm512_shuffle_epi8 ((__m512i) x, (__m512i) mask);
231}
232
Damjan Marion94dbf952020-07-15 20:18:39 +0200233#define u8x64_align_right(a, b, imm) \
234 (u8x64) _mm512_alignr_epi8 ((__m512i) a, (__m512i) b, imm)
235
Damjan Marion4dc098f2021-09-22 15:28:29 +0200236#define u64x8_align_right(a, b, imm) \
237 (u64x8) _mm512_alignr_epi64 ((__m512i) a, (__m512i) b, imm)
238
Damjan Marion94dbf952020-07-15 20:18:39 +0200239static_always_inline u32
240u32x16_sum_elts (u32x16 sum16)
241{
242 u32x8 sum8;
243 sum16 += (u32x16) u8x64_align_right (sum16, sum16, 8);
244 sum16 += (u32x16) u8x64_align_right (sum16, sum16, 4);
245 sum8 = u32x16_extract_hi (sum16) + u32x16_extract_lo (sum16);
246 return sum8[0] + sum8[4];
247}
248
Damjan Marione9848312021-04-25 10:51:51 +0200249#define _(t, m, p, i, e) \
250 static_always_inline t t##_mask_load (t a, void *p, m mask) \
251 { \
252 return (t) p##_mask_loadu_##e ((i) a, mask, p); \
253 } \
254 static_always_inline t t##_mask_load_zero (void *p, m mask) \
255 { \
256 return (t) p##_maskz_loadu_##e (mask, p); \
257 } \
258 static_always_inline void t##_mask_store (t a, void *p, m mask) \
259 { \
260 p##_mask_storeu_##e (p, mask, (i) a); \
261 }
Damjan Marion47d8f5d2020-02-25 11:51:48 +0100262
Damjan Marione9848312021-04-25 10:51:51 +0200263_ (u8x64, u64, _mm512, __m512i, epi8)
264_ (u8x32, u32, _mm256, __m256i, epi8)
265_ (u8x16, u16, _mm, __m128i, epi8)
266_ (u16x32, u32, _mm512, __m512i, epi16)
267_ (u16x16, u16, _mm256, __m256i, epi16)
268_ (u16x8, u8, _mm, __m128i, epi16)
269_ (u32x16, u16, _mm512, __m512i, epi32)
270_ (u32x8, u8, _mm256, __m256i, epi32)
271_ (u32x4, u8, _mm, __m128i, epi32)
272_ (u64x8, u8, _mm512, __m512i, epi64)
273_ (u64x4, u8, _mm256, __m256i, epi64)
274_ (u64x2, u8, _mm, __m128i, epi64)
275#undef _
276
Damjan Marion9ab2e5d2021-12-02 11:39:07 +0100277#define _(t, m, p, i, e) \
278 static_always_inline t t##_mask_and (t a, t b, m mask) \
279 { \
280 return (t) p##_mask_and_##e ((i) a, mask, (i) a, (i) b); \
281 } \
282 static_always_inline t t##_mask_andnot (t a, t b, m mask) \
283 { \
284 return (t) p##_mask_andnot_##e ((i) a, mask, (i) a, (i) b); \
285 } \
286 static_always_inline t t##_mask_xor (t a, t b, m mask) \
287 { \
288 return (t) p##_mask_xor_##e ((i) a, mask, (i) a, (i) b); \
289 } \
290 static_always_inline t t##_mask_or (t a, t b, m mask) \
291 { \
292 return (t) p##_mask_or_##e ((i) a, mask, (i) a, (i) b); \
293 }
294_ (u32x16, u16, _mm512, __m512i, epi32)
295_ (u32x8, u8, _mm256, __m256i, epi32)
296_ (u32x4, u8, _mm, __m128i, epi32)
297_ (u64x8, u8, _mm512, __m512i, epi64)
298_ (u64x4, u8, _mm256, __m256i, epi64)
299_ (u64x2, u8, _mm, __m128i, epi64)
300#undef _
301
Damjan Marione9848312021-04-25 10:51:51 +0200302#ifdef CLIB_HAVE_VEC512
303#define CLIB_HAVE_VEC512_MASK_LOAD_STORE
Damjan Marion9ab2e5d2021-12-02 11:39:07 +0100304#define CLIB_HAVE_VEC512_MASK_BITWISE_OPS
Damjan Marione9848312021-04-25 10:51:51 +0200305#endif
306#ifdef CLIB_HAVE_VEC256
307#define CLIB_HAVE_VEC256_MASK_LOAD_STORE
Damjan Marion9ab2e5d2021-12-02 11:39:07 +0100308#define CLIB_HAVE_VEC256_MASK_BITWISE_OPS
Damjan Marione9848312021-04-25 10:51:51 +0200309#endif
310#ifdef CLIB_HAVE_VEC128
311#define CLIB_HAVE_VEC128_MASK_LOAD_STORE
Damjan Marion9ab2e5d2021-12-02 11:39:07 +0100312#define CLIB_HAVE_VEC128_MASK_BITWISE_OPS
Damjan Marione9848312021-04-25 10:51:51 +0200313#endif
Damjan Marion47d8f5d2020-02-25 11:51:48 +0100314
315static_always_inline u8x64
316u8x64_splat_u8x16 (u8x16 a)
317{
318 return (u8x64) _mm512_broadcast_i64x2 ((__m128i) a);
319}
320
321static_always_inline u32x16
322u32x16_splat_u32x4 (u32x4 a)
323{
324 return (u32x16) _mm512_broadcast_i64x2 ((__m128i) a);
325}
326
Damjan Marion029bff42023-09-29 15:09:11 +0200327static_always_inline u64x8
328u64x8_splat_u64x2 (u64x2 a)
329{
330 return (u64x8) _mm512_broadcast_i64x2 ((__m128i) a);
331}
332
Damjan Marion47d8f5d2020-02-25 11:51:48 +0100333static_always_inline u32x16
334u32x16_mask_blend (u32x16 a, u32x16 b, u16 mask)
335{
336 return (u32x16) _mm512_mask_blend_epi32 (mask, (__m512i) a, (__m512i) b);
337}
338
339static_always_inline u8x64
340u8x64_mask_blend (u8x64 a, u8x64 b, u64 mask)
341{
342 return (u8x64) _mm512_mask_blend_epi8 (mask, (__m512i) a, (__m512i) b);
343}
344
Damjan Marion2e5921b2021-11-28 22:57:15 +0100345static_always_inline u8x64
Damjan Marion88019c42021-12-15 10:17:04 +0000346u8x64_permute (u8x64 idx, u8x64 a)
Damjan Marion2e5921b2021-11-28 22:57:15 +0100347{
Damjan Marion88019c42021-12-15 10:17:04 +0000348 return (u8x64) _mm512_permutexvar_epi8 ((__m512i) idx, (__m512i) a);
349}
350
351static_always_inline u8x64
352u8x64_permute2 (u8x64 idx, u8x64 a, u8x64 b)
353{
354 return (u8x64) _mm512_permutex2var_epi8 ((__m512i) a, (__m512i) idx,
355 (__m512i) b);
Damjan Marion2e5921b2021-11-28 22:57:15 +0100356}
357
Damjan Marion09aeee62021-04-20 21:28:45 +0200358#define _(t, m, e, p, it) \
359 static_always_inline m t##_is_equal_mask (t a, t b) \
360 { \
361 return p##_cmpeq_##e##_mask ((it) a, (it) b); \
362 }
363_ (u8x16, u16, epu8, _mm, __m128i)
364_ (u16x8, u8, epu16, _mm, __m128i)
365_ (u32x4, u8, epu32, _mm, __m128i)
366_ (u64x2, u8, epu64, _mm, __m128i)
367
368_ (u8x32, u32, epu8, _mm256, __m256i)
369_ (u16x16, u16, epu16, _mm256, __m256i)
370_ (u32x8, u8, epu32, _mm256, __m256i)
371_ (u64x4, u8, epu64, _mm256, __m256i)
372
373_ (u8x64, u64, epu8, _mm512, __m512i)
374_ (u16x32, u32, epu16, _mm512, __m512i)
375_ (u32x16, u16, epu32, _mm512, __m512i)
376_ (u64x8, u8, epu64, _mm512, __m512i)
377#undef _
378
Damjan Marion7459be12021-11-05 20:44:09 +0100379#define _(t, m, e, p, it) \
380 static_always_inline m t##_is_not_equal_mask (t a, t b) \
381 { \
382 return p##_cmpneq_##e##_mask ((it) a, (it) b); \
383 }
384_ (u8x16, u16, epu8, _mm, __m128i)
385_ (u16x8, u8, epu16, _mm, __m128i)
386_ (u32x4, u8, epu32, _mm, __m128i)
387_ (u64x2, u8, epu64, _mm, __m128i)
388
389_ (u8x32, u32, epu8, _mm256, __m256i)
390_ (u16x16, u16, epu16, _mm256, __m256i)
391_ (u32x8, u8, epu32, _mm256, __m256i)
392_ (u64x4, u8, epu64, _mm256, __m256i)
393
394_ (u8x64, u64, epu8, _mm512, __m512i)
395_ (u16x32, u32, epu16, _mm512, __m512i)
396_ (u32x16, u16, epu32, _mm512, __m512i)
397_ (u64x8, u8, epu64, _mm512, __m512i)
398#undef _
399
Damjan Marion09aeee62021-04-20 21:28:45 +0200400#define _(f, t, fn, it) \
401 static_always_inline t t##_from_##f (f x) { return (t) fn ((it) x); }
402_ (u16x16, u32x16, _mm512_cvtepi16_epi32, __m256i)
403_ (u32x16, u16x16, _mm512_cvtusepi32_epi16, __m512i)
404_ (u32x8, u16x8, _mm256_cvtusepi32_epi16, __m256i)
Damjan Marion54208852021-04-21 15:25:47 +0200405_ (u32x8, u64x8, _mm512_cvtepu32_epi64, __m256i)
Damjan Marion09aeee62021-04-20 21:28:45 +0200406#undef _
407
Damjan Marione9848312021-04-25 10:51:51 +0200408#define _(vt, mt, p, it, epi) \
Damjan Marion09aeee62021-04-20 21:28:45 +0200409 static_always_inline vt vt##_compress (vt a, mt mask) \
410 { \
Damjan Marione9848312021-04-25 10:51:51 +0200411 return (vt) p##_maskz_compress_##epi (mask, (it) a); \
Damjan Marion09aeee62021-04-20 21:28:45 +0200412 } \
413 static_always_inline vt vt##_expand (vt a, mt mask) \
414 { \
Damjan Marione9848312021-04-25 10:51:51 +0200415 return (vt) p##_maskz_expand_##epi (mask, (it) a); \
416 } \
417 static_always_inline void vt##_compress_store (vt v, mt mask, void *p) \
418 { \
419 p##_mask_compressstoreu_##epi (p, mask, (it) v); \
Damjan Marion09aeee62021-04-20 21:28:45 +0200420 }
421
Damjan Marione9848312021-04-25 10:51:51 +0200422_ (u64x8, u8, _mm512, __m512i, epi64)
423_ (u32x16, u16, _mm512, __m512i, epi32)
424_ (u64x4, u8, _mm256, __m256i, epi64)
425_ (u32x8, u8, _mm256, __m256i, epi32)
426_ (u64x2, u8, _mm, __m128i, epi64)
427_ (u32x4, u8, _mm, __m128i, epi32)
Damjan Marion09aeee62021-04-20 21:28:45 +0200428#ifdef __AVX512VBMI2__
Damjan Marione9848312021-04-25 10:51:51 +0200429_ (u16x32, u32, _mm512, __m512i, epi16)
430_ (u8x64, u64, _mm512, __m512i, epi8)
431_ (u16x16, u16, _mm256, __m256i, epi16)
432_ (u8x32, u32, _mm256, __m256i, epi8)
433_ (u16x8, u8, _mm, __m128i, epi16)
434_ (u8x16, u16, _mm, __m128i, epi8)
Damjan Marion09aeee62021-04-20 21:28:45 +0200435#endif
436#undef _
437
Damjan Marionef0bac72021-04-22 18:08:28 +0200438#ifdef CLIB_HAVE_VEC256
Damjan Marion09aeee62021-04-20 21:28:45 +0200439#define CLIB_HAVE_VEC256_COMPRESS
Mohsin Kazmi7da9b5b2021-08-27 18:57:16 +0200440#ifdef __AVX512VBMI2__
441#define CLIB_HAVE_VEC256_COMPRESS_U8_U16
442#endif
443
Damjan Marionef0bac72021-04-22 18:08:28 +0200444#endif
445#ifdef CLIB_HAVE_VEC512
Damjan Marion09aeee62021-04-20 21:28:45 +0200446#define CLIB_HAVE_VEC512_COMPRESS
Mohsin Kazmi7da9b5b2021-08-27 18:57:16 +0200447#ifdef __AVX512VBMI2__
448#define CLIB_HAVE_VEC512_COMPRESS_U8_U16
449#endif
450
Damjan Marionef0bac72021-04-22 18:08:28 +0200451#endif
Damjan Marion09aeee62021-04-20 21:28:45 +0200452
453#ifndef __AVX512VBMI2__
454static_always_inline u16x16
455u16x16_compress (u16x16 v, u16 mask)
Zhiyong Yang5e524172020-07-08 20:28:36 +0000456{
Damjan Marion09aeee62021-04-20 21:28:45 +0200457 return u16x16_from_u32x16 (u32x16_compress (u32x16_from_u16x16 (v), mask));
Zhiyong Yang5e524172020-07-08 20:28:36 +0000458}
459
Damjan Marion09aeee62021-04-20 21:28:45 +0200460static_always_inline u16x8
461u16x8_compress (u16x8 v, u8 mask)
462{
463 return u16x8_from_u32x8 (u32x8_compress (u32x8_from_u16x8 (v), mask));
464}
465#endif
466
Damjan Marion88019c42021-12-15 10:17:04 +0000467static_always_inline u64
468u64x8_hxor (u64x8 v)
469{
470 v ^= u64x8_align_right (v, v, 4);
471 v ^= u64x8_align_right (v, v, 2);
472 return v[0] ^ v[1];
473}
474
Damjan Marion4e083162019-04-12 17:44:35 +0200475static_always_inline void
476u32x16_transpose (u32x16 m[16])
477{
478 __m512i r[16], a, b, c, d, x, y;
479
Damjan Marion4e083162019-04-12 17:44:35 +0200480 __m512i pm1 = (__m512i) (u64x8) { 0, 1, 8, 9, 4, 5, 12, 13};
481 __m512i pm2 = (__m512i) (u64x8) { 2, 3, 10, 11, 6, 7, 14, 15};
482 __m512i pm3 = (__m512i) (u64x8) { 0, 1, 2, 3, 8, 9, 10, 11};
483 __m512i pm4 = (__m512i) (u64x8) { 4, 5, 6, 7, 12, 13, 14, 15};
Damjan Marion4e083162019-04-12 17:44:35 +0200484
485 r[0] = _mm512_unpacklo_epi32 ((__m512i) m[0], (__m512i) m[1]);
486 r[1] = _mm512_unpacklo_epi32 ((__m512i) m[2], (__m512i) m[3]);
487 r[2] = _mm512_unpacklo_epi32 ((__m512i) m[4], (__m512i) m[5]);
488 r[3] = _mm512_unpacklo_epi32 ((__m512i) m[6], (__m512i) m[7]);
489 r[4] = _mm512_unpacklo_epi32 ((__m512i) m[8], (__m512i) m[9]);
490 r[5] = _mm512_unpacklo_epi32 ((__m512i) m[10], (__m512i) m[11]);
491 r[6] = _mm512_unpacklo_epi32 ((__m512i) m[12], (__m512i) m[13]);
492 r[7] = _mm512_unpacklo_epi32 ((__m512i) m[14], (__m512i) m[15]);
493
494 r[8] = _mm512_unpackhi_epi32 ((__m512i) m[0], (__m512i) m[1]);
495 r[9] = _mm512_unpackhi_epi32 ((__m512i) m[2], (__m512i) m[3]);
496 r[10] = _mm512_unpackhi_epi32 ((__m512i) m[4], (__m512i) m[5]);
497 r[11] = _mm512_unpackhi_epi32 ((__m512i) m[6], (__m512i) m[7]);
498 r[12] = _mm512_unpackhi_epi32 ((__m512i) m[8], (__m512i) m[9]);
499 r[13] = _mm512_unpackhi_epi32 ((__m512i) m[10], (__m512i) m[11]);
500 r[14] = _mm512_unpackhi_epi32 ((__m512i) m[12], (__m512i) m[13]);
501 r[15] = _mm512_unpackhi_epi32 ((__m512i) m[14], (__m512i) m[15]);
502
503 a = _mm512_unpacklo_epi64 (r[0], r[1]);
504 b = _mm512_unpacklo_epi64 (r[2], r[3]);
505 c = _mm512_unpacklo_epi64 (r[4], r[5]);
506 d = _mm512_unpacklo_epi64 (r[6], r[7]);
507 x = _mm512_permutex2var_epi64 (a, pm1, b);
508 y = _mm512_permutex2var_epi64 (c, pm1, d);
509 m[0] = (u32x16) _mm512_permutex2var_epi64 (x, pm3, y);
510 m[8] = (u32x16) _mm512_permutex2var_epi64 (x, pm4, y);
511 x = _mm512_permutex2var_epi64 (a, pm2, b);
512 y = _mm512_permutex2var_epi64 (c, pm2, d);
513 m[4] = (u32x16) _mm512_permutex2var_epi64 (x, pm3, y);
514 m[12] = (u32x16) _mm512_permutex2var_epi64 (x, pm4, y);
515
516 a = _mm512_unpacklo_epi64 (r[8], r[9]);
517 b = _mm512_unpacklo_epi64 (r[10], r[11]);
518 c = _mm512_unpacklo_epi64 (r[12], r[13]);
519 d = _mm512_unpacklo_epi64 (r[14], r[15]);
520 x = _mm512_permutex2var_epi64 (a, pm1, b);
521 y = _mm512_permutex2var_epi64 (c, pm1, d);
522 m[2] = (u32x16) _mm512_permutex2var_epi64 (x, pm3, y);
523 m[10] = (u32x16) _mm512_permutex2var_epi64 (x, pm4, y);
524 x = _mm512_permutex2var_epi64 (a, pm2, b);
525 y = _mm512_permutex2var_epi64 (c, pm2, d);
526 m[6] = (u32x16) _mm512_permutex2var_epi64 (x, pm3, y);
527 m[14] = (u32x16) _mm512_permutex2var_epi64 (x, pm4, y);
528
529 a = _mm512_unpackhi_epi64 (r[0], r[1]);
530 b = _mm512_unpackhi_epi64 (r[2], r[3]);
531 c = _mm512_unpackhi_epi64 (r[4], r[5]);
532 d = _mm512_unpackhi_epi64 (r[6], r[7]);
533 x = _mm512_permutex2var_epi64 (a, pm1, b);
534 y = _mm512_permutex2var_epi64 (c, pm1, d);
535 m[1] = (u32x16) _mm512_permutex2var_epi64 (x, pm3, y);
536 m[9] = (u32x16) _mm512_permutex2var_epi64 (x, pm4, y);
537 x = _mm512_permutex2var_epi64 (a, pm2, b);
538 y = _mm512_permutex2var_epi64 (c, pm2, d);
539 m[5] = (u32x16) _mm512_permutex2var_epi64 (x, pm3, y);
540 m[13] = (u32x16) _mm512_permutex2var_epi64 (x, pm4, y);
541
542 a = _mm512_unpackhi_epi64 (r[8], r[9]);
543 b = _mm512_unpackhi_epi64 (r[10], r[11]);
544 c = _mm512_unpackhi_epi64 (r[12], r[13]);
545 d = _mm512_unpackhi_epi64 (r[14], r[15]);
546 x = _mm512_permutex2var_epi64 (a, pm1, b);
547 y = _mm512_permutex2var_epi64 (c, pm1, d);
548 m[3] = (u32x16) _mm512_permutex2var_epi64 (x, pm3, y);
549 m[11] = (u32x16) _mm512_permutex2var_epi64 (x, pm4, y);
550 x = _mm512_permutex2var_epi64 (a, pm2, b);
551 y = _mm512_permutex2var_epi64 (c, pm2, d);
552 m[7] = (u32x16) _mm512_permutex2var_epi64 (x, pm3, y);
553 m[15] = (u32x16) _mm512_permutex2var_epi64 (x, pm4, y);
554}
555
556
557
558static_always_inline void
559u64x8_transpose (u64x8 m[8])
560{
561 __m512i r[8], x, y;
562
Damjan Marion4e083162019-04-12 17:44:35 +0200563 __m512i pm1 = (__m512i) (u64x8) { 0, 1, 8, 9, 4, 5, 12, 13};
564 __m512i pm2 = (__m512i) (u64x8) { 2, 3, 10, 11, 6, 7, 14, 15};
565 __m512i pm3 = (__m512i) (u64x8) { 0, 1, 2, 3, 8, 9, 10, 11};
566 __m512i pm4 = (__m512i) (u64x8) { 4, 5, 6, 7, 12, 13, 14, 15};
Damjan Marion4e083162019-04-12 17:44:35 +0200567
568 r[0] = _mm512_unpacklo_epi64 ((__m512i) m[0], (__m512i) m[1]);
569 r[1] = _mm512_unpacklo_epi64 ((__m512i) m[2], (__m512i) m[3]);
570 r[2] = _mm512_unpacklo_epi64 ((__m512i) m[4], (__m512i) m[5]);
571 r[3] = _mm512_unpacklo_epi64 ((__m512i) m[6], (__m512i) m[7]);
572 r[4] = _mm512_unpackhi_epi64 ((__m512i) m[0], (__m512i) m[1]);
573 r[5] = _mm512_unpackhi_epi64 ((__m512i) m[2], (__m512i) m[3]);
574 r[6] = _mm512_unpackhi_epi64 ((__m512i) m[4], (__m512i) m[5]);
575 r[7] = _mm512_unpackhi_epi64 ((__m512i) m[6], (__m512i) m[7]);
576
577 x = _mm512_permutex2var_epi64 (r[0], pm1, r[1]);
578 y = _mm512_permutex2var_epi64 (r[2], pm1, r[3]);
579 m[0] = (u64x8) _mm512_permutex2var_epi64 (x, pm3, y);
580 m[4] = (u64x8) _mm512_permutex2var_epi64 (x, pm4, y);
581 x = _mm512_permutex2var_epi64 (r[0], pm2, r[1]);
582 y = _mm512_permutex2var_epi64 (r[2], pm2, r[3]);
583 m[2] = (u64x8) _mm512_permutex2var_epi64 (x, pm3, y);
584 m[6] = (u64x8) _mm512_permutex2var_epi64 (x, pm4, y);
585
586 x = _mm512_permutex2var_epi64 (r[4], pm1, r[5]);
587 y = _mm512_permutex2var_epi64 (r[6], pm1, r[7]);
588 m[1] = (u64x8) _mm512_permutex2var_epi64 (x, pm3, y);
589 m[5] = (u64x8) _mm512_permutex2var_epi64 (x, pm4, y);
590 x = _mm512_permutex2var_epi64 (r[4], pm2, r[5]);
591 y = _mm512_permutex2var_epi64 (r[6], pm2, r[7]);
592 m[3] = (u64x8) _mm512_permutex2var_epi64 (x, pm3, y);
593 m[7] = (u64x8) _mm512_permutex2var_epi64 (x, pm4, y);
594}
595
Damjan Marionb47376f2023-03-15 11:42:06 +0000596static_always_inline u8x64
597u8x64_load_partial (u8 *data, uword n)
598{
599 return u8x64_mask_load_zero (data, pow2_mask (n));
600}
601
602static_always_inline void
603u8x64_store_partial (u8x64 r, u8 *data, uword n)
604{
605 u8x64_mask_store (r, data, pow2_mask (n));
606}
607
Damjan Marion8c3f8a22018-05-17 21:12:13 +0200608#endif /* included_vector_avx512_h */
Damjan Marionc5766222018-04-16 00:18:34 +0200609/*
610 * fd.io coding-style-patch-verification: ON
611 *
612 * Local Variables:
613 * eval: (c-set-style "gnu")
614 * End:
615 */