blob: e71a8a70aa99ce8aa27b82c74a0d3fc2c08858e7 [file] [log] [blame]
Damjan Marionc5766222018-04-16 00:18:34 +02001/*
2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16#ifndef included_vector_avx512_h
17#define included_vector_avx512_h
18
19#include <vppinfra/clib.h>
20#include <x86intrin.h>
21
Damjan Mariona52e1662018-05-19 00:04:23 +020022/* *INDENT-OFF* */
Damjan Marionc5766222018-04-16 00:18:34 +020023#define foreach_avx512_vec512i \
24 _(i,8,64,epi8) _(i,16,32,epi16) _(i,32,16,epi32) _(i,64,8,epi64)
25#define foreach_avx512_vec512u \
26 _(u,8,64,epi8) _(u,16,32,epi16) _(u,32,16,epi32) _(u,64,8,epi64)
27#define foreach_avx512_vec512f \
28 _(f,32,8,ps) _(f,64,4,pd)
29
Damjan Mariona52e1662018-05-19 00:04:23 +020030/* splat, load_unaligned, store_unaligned, is_all_zero, is_equal,
Damjan Marion1cf9a162018-05-23 20:21:51 +020031 is_all_equal, is_zero_mask */
Damjan Marionc5766222018-04-16 00:18:34 +020032#define _(t, s, c, i) \
33static_always_inline t##s##x##c \
34t##s##x##c##_splat (t##s x) \
35{ return (t##s##x##c) _mm512_set1_##i (x); } \
36\
37static_always_inline t##s##x##c \
Zhiyong Yang0c7aa7a2020-03-28 08:40:25 +000038t##s##x##c##_load_aligned (void *p) \
39{ return (t##s##x##c) _mm512_load_si512 (p); } \
40\
41static_always_inline void \
42t##s##x##c##_store_aligned (t##s##x##c v, void *p) \
43{ _mm512_store_si512 ((__m512i *) p, (__m512i) v); } \
44\
45static_always_inline t##s##x##c \
Damjan Marionc5766222018-04-16 00:18:34 +020046t##s##x##c##_load_unaligned (void *p) \
47{ return (t##s##x##c) _mm512_loadu_si512 (p); } \
48\
49static_always_inline void \
50t##s##x##c##_store_unaligned (t##s##x##c v, void *p) \
51{ _mm512_storeu_si512 ((__m512i *) p, (__m512i) v); } \
52\
Damjan Mariona52e1662018-05-19 00:04:23 +020053static_always_inline int \
54t##s##x##c##_is_all_zero (t##s##x##c v) \
55{ return (_mm512_test_epi64_mask ((__m512i) v, (__m512i) v) == 0); } \
56\
57static_always_inline int \
58t##s##x##c##_is_equal (t##s##x##c a, t##s##x##c b) \
Damjan Marion14864772018-05-22 14:07:47 +020059{ return t##s##x##c##_is_all_zero (a ^ b); } \
Damjan Mariona52e1662018-05-19 00:04:23 +020060\
61static_always_inline int \
62t##s##x##c##_is_all_equal (t##s##x##c v, t##s x) \
63{ return t##s##x##c##_is_equal (v, t##s##x##c##_splat (x)); } \
Damjan Marion1cf9a162018-05-23 20:21:51 +020064\
65static_always_inline u##c \
66t##s##x##c##_is_zero_mask (t##s##x##c v) \
67{ return _mm512_test_##i##_mask ((__m512i) v, (__m512i) v); } \
Damjan Marion2cd8ad42019-04-17 16:05:54 +020068\
69static_always_inline t##s##x##c \
70t##s##x##c##_interleave_lo (t##s##x##c a, t##s##x##c b) \
71{ return (t##s##x##c) _mm512_unpacklo_##i ((__m512i) a, (__m512i) b); } \
72\
73static_always_inline t##s##x##c \
74t##s##x##c##_interleave_hi (t##s##x##c a, t##s##x##c b) \
75{ return (t##s##x##c) _mm512_unpackhi_##i ((__m512i) a, (__m512i) b); } \
Damjan Marion1cf9a162018-05-23 20:21:51 +020076
Damjan Marionc5766222018-04-16 00:18:34 +020077
78foreach_avx512_vec512i foreach_avx512_vec512u
79#undef _
Damjan Marion8c3f8a22018-05-17 21:12:13 +020080/* *INDENT-ON* */
81
82static_always_inline u32
83u16x32_msb_mask (u16x32 v)
84{
85 return (u32) _mm512_movepi16_mask ((__m512i) v);
86}
87
Damjan Marion7d14aad2021-05-05 19:31:41 +020088/* 512-bit packs */
89#define _(f, t, fn) \
90 always_inline t t##_pack (f lo, f hi) \
91 { \
92 return (t) fn ((__m512i) lo, (__m512i) hi); \
93 }
94
95_ (i16x32, i8x64, _mm512_packs_epi16)
96_ (i16x32, u8x64, _mm512_packus_epi16)
97_ (i32x16, i16x32, _mm512_packs_epi32)
98_ (i32x16, u16x32, _mm512_packus_epi32)
99#undef _
100
Damjan Marion2e5921b2021-11-28 22:57:15 +0100101static_always_inline u64x8
102u64x8_byte_swap (u64x8 v)
103{
104 u8x64 swap = {
105 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8,
106 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8,
107 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8,
108 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8,
109 };
110 return (u64x8) _mm512_shuffle_epi8 ((__m512i) v, (__m512i) swap);
111}
112
Damjan Marionc899dac2019-04-16 18:41:01 +0200113static_always_inline u32x16
114u32x16_byte_swap (u32x16 v)
115{
116 u8x64 swap = {
117 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12,
118 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12,
119 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12,
120 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12
121 };
122 return (u32x16) _mm512_shuffle_epi8 ((__m512i) v, (__m512i) swap);
123}
124
125static_always_inline u16x32
126u16x32_byte_swap (u16x32 v)
127{
128 u8x64 swap = {
129 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14,
130 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14,
131 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14,
132 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14
133 };
134 return (u16x32) _mm512_shuffle_epi8 ((__m512i) v, (__m512i) swap);
135}
136
Damjan Marion09aeee62021-04-20 21:28:45 +0200137#define _(f, t) \
138 static_always_inline t f##_extract_lo (f v) \
139 { \
140 return (t) _mm512_extracti64x4_epi64 ((__m512i) v, 0); \
141 } \
142 static_always_inline t f##_extract_hi (f v) \
143 { \
144 return (t) _mm512_extracti64x4_epi64 ((__m512i) v, 1); \
145 }
Damjan Marionc899dac2019-04-16 18:41:01 +0200146
Damjan Marion09aeee62021-04-20 21:28:45 +0200147_ (u64x8, u64x4)
148_ (u32x16, u32x8)
149_ (u16x32, u16x16)
150_ (u8x64, u8x32)
151#undef _
Damjan Marion627fb6a2020-02-16 13:07:13 +0100152
Damjan Marionc899dac2019-04-16 18:41:01 +0200153static_always_inline u32
154u32x16_min_scalar (u32x16 v)
155{
156 return u32x8_min_scalar (u32x8_min (u32x16_extract_lo (v),
157 u32x16_extract_hi (v)));
158}
159
Damjan Marion2cd8ad42019-04-17 16:05:54 +0200160static_always_inline u32x16
161u32x16_insert_lo (u32x16 r, u32x8 v)
162{
163 return (u32x16) _mm512_inserti64x4 ((__m512i) r, (__m256i) v, 0);
164}
165
166static_always_inline u32x16
167u32x16_insert_hi (u32x16 r, u32x8 v)
168{
169 return (u32x16) _mm512_inserti64x4 ((__m512i) r, (__m256i) v, 1);
170}
171
172static_always_inline u64x8
173u64x8_permute (u64x8 a, u64x8 b, u64x8 mask)
174{
175 return (u64x8) _mm512_permutex2var_epi64 ((__m512i) a, (__m512i) mask,
176 (__m512i) b);
177}
178
Damjan Marionc899dac2019-04-16 18:41:01 +0200179
180#define u32x16_ternary_logic(a, b, c, d) \
181 (u32x16) _mm512_ternarylogic_epi32 ((__m512i) a, (__m512i) b, (__m512i) c, d)
Damjan Marion4e083162019-04-12 17:44:35 +0200182
Damjan Marione84e9d72020-02-13 13:11:02 +0100183#define u8x64_insert_u8x16(a, b, n) \
184 (u8x64) _mm512_inserti64x2 ((__m512i) (a), (__m128i) (b), n)
185
186#define u8x64_extract_u8x16(a, n) \
187 (u8x16) _mm512_extracti64x2_epi64 ((__m512i) (a), n)
188
Damjan Marion627fb6a2020-02-16 13:07:13 +0100189#define u8x64_word_shift_left(a,n) (u8x64) _mm512_bslli_epi128((__m512i) a, n)
190#define u8x64_word_shift_right(a,n) (u8x64) _mm512_bsrli_epi128((__m512i) a, n)
191
Damjan Marionf75defa2020-02-13 18:14:06 +0100192static_always_inline u8x64
193u8x64_xor3 (u8x64 a, u8x64 b, u8x64 c)
194{
195 return (u8x64) _mm512_ternarylogic_epi32 ((__m512i) a, (__m512i) b,
196 (__m512i) c, 0x96);
197}
198
Damjan Marion2e5921b2021-11-28 22:57:15 +0100199static_always_inline u64x8
200u64x8_xor3 (u64x8 a, u64x8 b, u64x8 c)
201{
202 return (u64x8) _mm512_ternarylogic_epi32 ((__m512i) a, (__m512i) b,
203 (__m512i) c, 0x96);
204}
205
Damjan Marion47d8f5d2020-02-25 11:51:48 +0100206static_always_inline u8x64
207u8x64_reflect_u8x16 (u8x64 x)
208{
209 static const u8x64 mask = {
210 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0,
211 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0,
212 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0,
213 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0,
214 };
215 return (u8x64) _mm512_shuffle_epi8 ((__m512i) x, (__m512i) mask);
216}
217
Damjan Marion94dbf952020-07-15 20:18:39 +0200218#define u8x64_align_right(a, b, imm) \
219 (u8x64) _mm512_alignr_epi8 ((__m512i) a, (__m512i) b, imm)
220
Damjan Marion4dc098f2021-09-22 15:28:29 +0200221#define u64x8_align_right(a, b, imm) \
222 (u64x8) _mm512_alignr_epi64 ((__m512i) a, (__m512i) b, imm)
223
Damjan Marion94dbf952020-07-15 20:18:39 +0200224static_always_inline u32
225u32x16_sum_elts (u32x16 sum16)
226{
227 u32x8 sum8;
228 sum16 += (u32x16) u8x64_align_right (sum16, sum16, 8);
229 sum16 += (u32x16) u8x64_align_right (sum16, sum16, 4);
230 sum8 = u32x16_extract_hi (sum16) + u32x16_extract_lo (sum16);
231 return sum8[0] + sum8[4];
232}
233
Damjan Marione9848312021-04-25 10:51:51 +0200234#define _(t, m, p, i, e) \
235 static_always_inline t t##_mask_load (t a, void *p, m mask) \
236 { \
237 return (t) p##_mask_loadu_##e ((i) a, mask, p); \
238 } \
239 static_always_inline t t##_mask_load_zero (void *p, m mask) \
240 { \
241 return (t) p##_maskz_loadu_##e (mask, p); \
242 } \
243 static_always_inline void t##_mask_store (t a, void *p, m mask) \
244 { \
245 p##_mask_storeu_##e (p, mask, (i) a); \
246 }
Damjan Marion47d8f5d2020-02-25 11:51:48 +0100247
Damjan Marione9848312021-04-25 10:51:51 +0200248_ (u8x64, u64, _mm512, __m512i, epi8)
249_ (u8x32, u32, _mm256, __m256i, epi8)
250_ (u8x16, u16, _mm, __m128i, epi8)
251_ (u16x32, u32, _mm512, __m512i, epi16)
252_ (u16x16, u16, _mm256, __m256i, epi16)
253_ (u16x8, u8, _mm, __m128i, epi16)
254_ (u32x16, u16, _mm512, __m512i, epi32)
255_ (u32x8, u8, _mm256, __m256i, epi32)
256_ (u32x4, u8, _mm, __m128i, epi32)
257_ (u64x8, u8, _mm512, __m512i, epi64)
258_ (u64x4, u8, _mm256, __m256i, epi64)
259_ (u64x2, u8, _mm, __m128i, epi64)
260#undef _
261
Damjan Marion9ab2e5d2021-12-02 11:39:07 +0100262#define _(t, m, p, i, e) \
263 static_always_inline t t##_mask_and (t a, t b, m mask) \
264 { \
265 return (t) p##_mask_and_##e ((i) a, mask, (i) a, (i) b); \
266 } \
267 static_always_inline t t##_mask_andnot (t a, t b, m mask) \
268 { \
269 return (t) p##_mask_andnot_##e ((i) a, mask, (i) a, (i) b); \
270 } \
271 static_always_inline t t##_mask_xor (t a, t b, m mask) \
272 { \
273 return (t) p##_mask_xor_##e ((i) a, mask, (i) a, (i) b); \
274 } \
275 static_always_inline t t##_mask_or (t a, t b, m mask) \
276 { \
277 return (t) p##_mask_or_##e ((i) a, mask, (i) a, (i) b); \
278 }
279_ (u32x16, u16, _mm512, __m512i, epi32)
280_ (u32x8, u8, _mm256, __m256i, epi32)
281_ (u32x4, u8, _mm, __m128i, epi32)
282_ (u64x8, u8, _mm512, __m512i, epi64)
283_ (u64x4, u8, _mm256, __m256i, epi64)
284_ (u64x2, u8, _mm, __m128i, epi64)
285#undef _
286
Damjan Marione9848312021-04-25 10:51:51 +0200287#ifdef CLIB_HAVE_VEC512
288#define CLIB_HAVE_VEC512_MASK_LOAD_STORE
Damjan Marion9ab2e5d2021-12-02 11:39:07 +0100289#define CLIB_HAVE_VEC512_MASK_BITWISE_OPS
Damjan Marione9848312021-04-25 10:51:51 +0200290#endif
291#ifdef CLIB_HAVE_VEC256
292#define CLIB_HAVE_VEC256_MASK_LOAD_STORE
Damjan Marion9ab2e5d2021-12-02 11:39:07 +0100293#define CLIB_HAVE_VEC256_MASK_BITWISE_OPS
Damjan Marione9848312021-04-25 10:51:51 +0200294#endif
295#ifdef CLIB_HAVE_VEC128
296#define CLIB_HAVE_VEC128_MASK_LOAD_STORE
Damjan Marion9ab2e5d2021-12-02 11:39:07 +0100297#define CLIB_HAVE_VEC128_MASK_BITWISE_OPS
Damjan Marione9848312021-04-25 10:51:51 +0200298#endif
Damjan Marion47d8f5d2020-02-25 11:51:48 +0100299
300static_always_inline u8x64
301u8x64_splat_u8x16 (u8x16 a)
302{
303 return (u8x64) _mm512_broadcast_i64x2 ((__m128i) a);
304}
305
306static_always_inline u32x16
307u32x16_splat_u32x4 (u32x4 a)
308{
309 return (u32x16) _mm512_broadcast_i64x2 ((__m128i) a);
310}
311
312static_always_inline u32x16
313u32x16_mask_blend (u32x16 a, u32x16 b, u16 mask)
314{
315 return (u32x16) _mm512_mask_blend_epi32 (mask, (__m512i) a, (__m512i) b);
316}
317
318static_always_inline u8x64
319u8x64_mask_blend (u8x64 a, u8x64 b, u64 mask)
320{
321 return (u8x64) _mm512_mask_blend_epi8 (mask, (__m512i) a, (__m512i) b);
322}
323
Damjan Marion2e5921b2021-11-28 22:57:15 +0100324static_always_inline u8x64
Damjan Marion88019c42021-12-15 10:17:04 +0000325u8x64_permute (u8x64 idx, u8x64 a)
Damjan Marion2e5921b2021-11-28 22:57:15 +0100326{
Damjan Marion88019c42021-12-15 10:17:04 +0000327 return (u8x64) _mm512_permutexvar_epi8 ((__m512i) idx, (__m512i) a);
328}
329
330static_always_inline u8x64
331u8x64_permute2 (u8x64 idx, u8x64 a, u8x64 b)
332{
333 return (u8x64) _mm512_permutex2var_epi8 ((__m512i) a, (__m512i) idx,
334 (__m512i) b);
Damjan Marion2e5921b2021-11-28 22:57:15 +0100335}
336
Damjan Marion09aeee62021-04-20 21:28:45 +0200337#define _(t, m, e, p, it) \
338 static_always_inline m t##_is_equal_mask (t a, t b) \
339 { \
340 return p##_cmpeq_##e##_mask ((it) a, (it) b); \
341 }
342_ (u8x16, u16, epu8, _mm, __m128i)
343_ (u16x8, u8, epu16, _mm, __m128i)
344_ (u32x4, u8, epu32, _mm, __m128i)
345_ (u64x2, u8, epu64, _mm, __m128i)
346
347_ (u8x32, u32, epu8, _mm256, __m256i)
348_ (u16x16, u16, epu16, _mm256, __m256i)
349_ (u32x8, u8, epu32, _mm256, __m256i)
350_ (u64x4, u8, epu64, _mm256, __m256i)
351
352_ (u8x64, u64, epu8, _mm512, __m512i)
353_ (u16x32, u32, epu16, _mm512, __m512i)
354_ (u32x16, u16, epu32, _mm512, __m512i)
355_ (u64x8, u8, epu64, _mm512, __m512i)
356#undef _
357
Damjan Marion7459be12021-11-05 20:44:09 +0100358#define _(t, m, e, p, it) \
359 static_always_inline m t##_is_not_equal_mask (t a, t b) \
360 { \
361 return p##_cmpneq_##e##_mask ((it) a, (it) b); \
362 }
363_ (u8x16, u16, epu8, _mm, __m128i)
364_ (u16x8, u8, epu16, _mm, __m128i)
365_ (u32x4, u8, epu32, _mm, __m128i)
366_ (u64x2, u8, epu64, _mm, __m128i)
367
368_ (u8x32, u32, epu8, _mm256, __m256i)
369_ (u16x16, u16, epu16, _mm256, __m256i)
370_ (u32x8, u8, epu32, _mm256, __m256i)
371_ (u64x4, u8, epu64, _mm256, __m256i)
372
373_ (u8x64, u64, epu8, _mm512, __m512i)
374_ (u16x32, u32, epu16, _mm512, __m512i)
375_ (u32x16, u16, epu32, _mm512, __m512i)
376_ (u64x8, u8, epu64, _mm512, __m512i)
377#undef _
378
Damjan Marion09aeee62021-04-20 21:28:45 +0200379#define _(f, t, fn, it) \
380 static_always_inline t t##_from_##f (f x) { return (t) fn ((it) x); }
381_ (u16x16, u32x16, _mm512_cvtepi16_epi32, __m256i)
382_ (u32x16, u16x16, _mm512_cvtusepi32_epi16, __m512i)
383_ (u32x8, u16x8, _mm256_cvtusepi32_epi16, __m256i)
Damjan Marion54208852021-04-21 15:25:47 +0200384_ (u32x8, u64x8, _mm512_cvtepu32_epi64, __m256i)
Damjan Marion09aeee62021-04-20 21:28:45 +0200385#undef _
386
Damjan Marione9848312021-04-25 10:51:51 +0200387#define _(vt, mt, p, it, epi) \
Damjan Marion09aeee62021-04-20 21:28:45 +0200388 static_always_inline vt vt##_compress (vt a, mt mask) \
389 { \
Damjan Marione9848312021-04-25 10:51:51 +0200390 return (vt) p##_maskz_compress_##epi (mask, (it) a); \
Damjan Marion09aeee62021-04-20 21:28:45 +0200391 } \
392 static_always_inline vt vt##_expand (vt a, mt mask) \
393 { \
Damjan Marione9848312021-04-25 10:51:51 +0200394 return (vt) p##_maskz_expand_##epi (mask, (it) a); \
395 } \
396 static_always_inline void vt##_compress_store (vt v, mt mask, void *p) \
397 { \
398 p##_mask_compressstoreu_##epi (p, mask, (it) v); \
Damjan Marion09aeee62021-04-20 21:28:45 +0200399 }
400
Damjan Marione9848312021-04-25 10:51:51 +0200401_ (u64x8, u8, _mm512, __m512i, epi64)
402_ (u32x16, u16, _mm512, __m512i, epi32)
403_ (u64x4, u8, _mm256, __m256i, epi64)
404_ (u32x8, u8, _mm256, __m256i, epi32)
405_ (u64x2, u8, _mm, __m128i, epi64)
406_ (u32x4, u8, _mm, __m128i, epi32)
Damjan Marion09aeee62021-04-20 21:28:45 +0200407#ifdef __AVX512VBMI2__
Damjan Marione9848312021-04-25 10:51:51 +0200408_ (u16x32, u32, _mm512, __m512i, epi16)
409_ (u8x64, u64, _mm512, __m512i, epi8)
410_ (u16x16, u16, _mm256, __m256i, epi16)
411_ (u8x32, u32, _mm256, __m256i, epi8)
412_ (u16x8, u8, _mm, __m128i, epi16)
413_ (u8x16, u16, _mm, __m128i, epi8)
Damjan Marion09aeee62021-04-20 21:28:45 +0200414#endif
415#undef _
416
Damjan Marionef0bac72021-04-22 18:08:28 +0200417#ifdef CLIB_HAVE_VEC256
Damjan Marion09aeee62021-04-20 21:28:45 +0200418#define CLIB_HAVE_VEC256_COMPRESS
Mohsin Kazmi7da9b5b2021-08-27 18:57:16 +0200419#ifdef __AVX512VBMI2__
420#define CLIB_HAVE_VEC256_COMPRESS_U8_U16
421#endif
422
Damjan Marionef0bac72021-04-22 18:08:28 +0200423#endif
424#ifdef CLIB_HAVE_VEC512
Damjan Marion09aeee62021-04-20 21:28:45 +0200425#define CLIB_HAVE_VEC512_COMPRESS
Mohsin Kazmi7da9b5b2021-08-27 18:57:16 +0200426#ifdef __AVX512VBMI2__
427#define CLIB_HAVE_VEC512_COMPRESS_U8_U16
428#endif
429
Damjan Marionef0bac72021-04-22 18:08:28 +0200430#endif
Damjan Marion09aeee62021-04-20 21:28:45 +0200431
432#ifndef __AVX512VBMI2__
433static_always_inline u16x16
434u16x16_compress (u16x16 v, u16 mask)
Zhiyong Yang5e524172020-07-08 20:28:36 +0000435{
Damjan Marion09aeee62021-04-20 21:28:45 +0200436 return u16x16_from_u32x16 (u32x16_compress (u32x16_from_u16x16 (v), mask));
Zhiyong Yang5e524172020-07-08 20:28:36 +0000437}
438
Damjan Marion09aeee62021-04-20 21:28:45 +0200439static_always_inline u16x8
440u16x8_compress (u16x8 v, u8 mask)
441{
442 return u16x8_from_u32x8 (u32x8_compress (u32x8_from_u16x8 (v), mask));
443}
444#endif
445
Damjan Marion88019c42021-12-15 10:17:04 +0000446static_always_inline u64
447u64x8_hxor (u64x8 v)
448{
449 v ^= u64x8_align_right (v, v, 4);
450 v ^= u64x8_align_right (v, v, 2);
451 return v[0] ^ v[1];
452}
453
Damjan Marion4e083162019-04-12 17:44:35 +0200454static_always_inline void
455u32x16_transpose (u32x16 m[16])
456{
457 __m512i r[16], a, b, c, d, x, y;
458
459 /* *INDENT-OFF* */
460 __m512i pm1 = (__m512i) (u64x8) { 0, 1, 8, 9, 4, 5, 12, 13};
461 __m512i pm2 = (__m512i) (u64x8) { 2, 3, 10, 11, 6, 7, 14, 15};
462 __m512i pm3 = (__m512i) (u64x8) { 0, 1, 2, 3, 8, 9, 10, 11};
463 __m512i pm4 = (__m512i) (u64x8) { 4, 5, 6, 7, 12, 13, 14, 15};
464 /* *INDENT-ON* */
465
466 r[0] = _mm512_unpacklo_epi32 ((__m512i) m[0], (__m512i) m[1]);
467 r[1] = _mm512_unpacklo_epi32 ((__m512i) m[2], (__m512i) m[3]);
468 r[2] = _mm512_unpacklo_epi32 ((__m512i) m[4], (__m512i) m[5]);
469 r[3] = _mm512_unpacklo_epi32 ((__m512i) m[6], (__m512i) m[7]);
470 r[4] = _mm512_unpacklo_epi32 ((__m512i) m[8], (__m512i) m[9]);
471 r[5] = _mm512_unpacklo_epi32 ((__m512i) m[10], (__m512i) m[11]);
472 r[6] = _mm512_unpacklo_epi32 ((__m512i) m[12], (__m512i) m[13]);
473 r[7] = _mm512_unpacklo_epi32 ((__m512i) m[14], (__m512i) m[15]);
474
475 r[8] = _mm512_unpackhi_epi32 ((__m512i) m[0], (__m512i) m[1]);
476 r[9] = _mm512_unpackhi_epi32 ((__m512i) m[2], (__m512i) m[3]);
477 r[10] = _mm512_unpackhi_epi32 ((__m512i) m[4], (__m512i) m[5]);
478 r[11] = _mm512_unpackhi_epi32 ((__m512i) m[6], (__m512i) m[7]);
479 r[12] = _mm512_unpackhi_epi32 ((__m512i) m[8], (__m512i) m[9]);
480 r[13] = _mm512_unpackhi_epi32 ((__m512i) m[10], (__m512i) m[11]);
481 r[14] = _mm512_unpackhi_epi32 ((__m512i) m[12], (__m512i) m[13]);
482 r[15] = _mm512_unpackhi_epi32 ((__m512i) m[14], (__m512i) m[15]);
483
484 a = _mm512_unpacklo_epi64 (r[0], r[1]);
485 b = _mm512_unpacklo_epi64 (r[2], r[3]);
486 c = _mm512_unpacklo_epi64 (r[4], r[5]);
487 d = _mm512_unpacklo_epi64 (r[6], r[7]);
488 x = _mm512_permutex2var_epi64 (a, pm1, b);
489 y = _mm512_permutex2var_epi64 (c, pm1, d);
490 m[0] = (u32x16) _mm512_permutex2var_epi64 (x, pm3, y);
491 m[8] = (u32x16) _mm512_permutex2var_epi64 (x, pm4, y);
492 x = _mm512_permutex2var_epi64 (a, pm2, b);
493 y = _mm512_permutex2var_epi64 (c, pm2, d);
494 m[4] = (u32x16) _mm512_permutex2var_epi64 (x, pm3, y);
495 m[12] = (u32x16) _mm512_permutex2var_epi64 (x, pm4, y);
496
497 a = _mm512_unpacklo_epi64 (r[8], r[9]);
498 b = _mm512_unpacklo_epi64 (r[10], r[11]);
499 c = _mm512_unpacklo_epi64 (r[12], r[13]);
500 d = _mm512_unpacklo_epi64 (r[14], r[15]);
501 x = _mm512_permutex2var_epi64 (a, pm1, b);
502 y = _mm512_permutex2var_epi64 (c, pm1, d);
503 m[2] = (u32x16) _mm512_permutex2var_epi64 (x, pm3, y);
504 m[10] = (u32x16) _mm512_permutex2var_epi64 (x, pm4, y);
505 x = _mm512_permutex2var_epi64 (a, pm2, b);
506 y = _mm512_permutex2var_epi64 (c, pm2, d);
507 m[6] = (u32x16) _mm512_permutex2var_epi64 (x, pm3, y);
508 m[14] = (u32x16) _mm512_permutex2var_epi64 (x, pm4, y);
509
510 a = _mm512_unpackhi_epi64 (r[0], r[1]);
511 b = _mm512_unpackhi_epi64 (r[2], r[3]);
512 c = _mm512_unpackhi_epi64 (r[4], r[5]);
513 d = _mm512_unpackhi_epi64 (r[6], r[7]);
514 x = _mm512_permutex2var_epi64 (a, pm1, b);
515 y = _mm512_permutex2var_epi64 (c, pm1, d);
516 m[1] = (u32x16) _mm512_permutex2var_epi64 (x, pm3, y);
517 m[9] = (u32x16) _mm512_permutex2var_epi64 (x, pm4, y);
518 x = _mm512_permutex2var_epi64 (a, pm2, b);
519 y = _mm512_permutex2var_epi64 (c, pm2, d);
520 m[5] = (u32x16) _mm512_permutex2var_epi64 (x, pm3, y);
521 m[13] = (u32x16) _mm512_permutex2var_epi64 (x, pm4, y);
522
523 a = _mm512_unpackhi_epi64 (r[8], r[9]);
524 b = _mm512_unpackhi_epi64 (r[10], r[11]);
525 c = _mm512_unpackhi_epi64 (r[12], r[13]);
526 d = _mm512_unpackhi_epi64 (r[14], r[15]);
527 x = _mm512_permutex2var_epi64 (a, pm1, b);
528 y = _mm512_permutex2var_epi64 (c, pm1, d);
529 m[3] = (u32x16) _mm512_permutex2var_epi64 (x, pm3, y);
530 m[11] = (u32x16) _mm512_permutex2var_epi64 (x, pm4, y);
531 x = _mm512_permutex2var_epi64 (a, pm2, b);
532 y = _mm512_permutex2var_epi64 (c, pm2, d);
533 m[7] = (u32x16) _mm512_permutex2var_epi64 (x, pm3, y);
534 m[15] = (u32x16) _mm512_permutex2var_epi64 (x, pm4, y);
535}
536
537
538
539static_always_inline void
540u64x8_transpose (u64x8 m[8])
541{
542 __m512i r[8], x, y;
543
544 /* *INDENT-OFF* */
545 __m512i pm1 = (__m512i) (u64x8) { 0, 1, 8, 9, 4, 5, 12, 13};
546 __m512i pm2 = (__m512i) (u64x8) { 2, 3, 10, 11, 6, 7, 14, 15};
547 __m512i pm3 = (__m512i) (u64x8) { 0, 1, 2, 3, 8, 9, 10, 11};
548 __m512i pm4 = (__m512i) (u64x8) { 4, 5, 6, 7, 12, 13, 14, 15};
549 /* *INDENT-ON* */
550
551 r[0] = _mm512_unpacklo_epi64 ((__m512i) m[0], (__m512i) m[1]);
552 r[1] = _mm512_unpacklo_epi64 ((__m512i) m[2], (__m512i) m[3]);
553 r[2] = _mm512_unpacklo_epi64 ((__m512i) m[4], (__m512i) m[5]);
554 r[3] = _mm512_unpacklo_epi64 ((__m512i) m[6], (__m512i) m[7]);
555 r[4] = _mm512_unpackhi_epi64 ((__m512i) m[0], (__m512i) m[1]);
556 r[5] = _mm512_unpackhi_epi64 ((__m512i) m[2], (__m512i) m[3]);
557 r[6] = _mm512_unpackhi_epi64 ((__m512i) m[4], (__m512i) m[5]);
558 r[7] = _mm512_unpackhi_epi64 ((__m512i) m[6], (__m512i) m[7]);
559
560 x = _mm512_permutex2var_epi64 (r[0], pm1, r[1]);
561 y = _mm512_permutex2var_epi64 (r[2], pm1, r[3]);
562 m[0] = (u64x8) _mm512_permutex2var_epi64 (x, pm3, y);
563 m[4] = (u64x8) _mm512_permutex2var_epi64 (x, pm4, y);
564 x = _mm512_permutex2var_epi64 (r[0], pm2, r[1]);
565 y = _mm512_permutex2var_epi64 (r[2], pm2, r[3]);
566 m[2] = (u64x8) _mm512_permutex2var_epi64 (x, pm3, y);
567 m[6] = (u64x8) _mm512_permutex2var_epi64 (x, pm4, y);
568
569 x = _mm512_permutex2var_epi64 (r[4], pm1, r[5]);
570 y = _mm512_permutex2var_epi64 (r[6], pm1, r[7]);
571 m[1] = (u64x8) _mm512_permutex2var_epi64 (x, pm3, y);
572 m[5] = (u64x8) _mm512_permutex2var_epi64 (x, pm4, y);
573 x = _mm512_permutex2var_epi64 (r[4], pm2, r[5]);
574 y = _mm512_permutex2var_epi64 (r[6], pm2, r[7]);
575 m[3] = (u64x8) _mm512_permutex2var_epi64 (x, pm3, y);
576 m[7] = (u64x8) _mm512_permutex2var_epi64 (x, pm4, y);
577}
578
Damjan Marion8c3f8a22018-05-17 21:12:13 +0200579#endif /* included_vector_avx512_h */
Damjan Marionc5766222018-04-16 00:18:34 +0200580/*
581 * fd.io coding-style-patch-verification: ON
582 *
583 * Local Variables:
584 * eval: (c-set-style "gnu")
585 * End:
586 */