blob: 33f40ef7b5aa77f720e7cea9ab7a79165ba2643e [file] [log] [blame]
Damjan Marionc5766222018-04-16 00:18:34 +02001/*
2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16#ifndef included_vector_avx512_h
17#define included_vector_avx512_h
18
19#include <vppinfra/clib.h>
20#include <x86intrin.h>
21
Damjan Mariona52e1662018-05-19 00:04:23 +020022/* *INDENT-OFF* */
Damjan Marionc5766222018-04-16 00:18:34 +020023#define foreach_avx512_vec512i \
24 _(i,8,64,epi8) _(i,16,32,epi16) _(i,32,16,epi32) _(i,64,8,epi64)
25#define foreach_avx512_vec512u \
26 _(u,8,64,epi8) _(u,16,32,epi16) _(u,32,16,epi32) _(u,64,8,epi64)
27#define foreach_avx512_vec512f \
28 _(f,32,8,ps) _(f,64,4,pd)
29
Damjan Mariona52e1662018-05-19 00:04:23 +020030/* splat, load_unaligned, store_unaligned, is_all_zero, is_equal,
Damjan Marion1cf9a162018-05-23 20:21:51 +020031 is_all_equal, is_zero_mask */
Damjan Marionc5766222018-04-16 00:18:34 +020032#define _(t, s, c, i) \
33static_always_inline t##s##x##c \
34t##s##x##c##_splat (t##s x) \
35{ return (t##s##x##c) _mm512_set1_##i (x); } \
36\
37static_always_inline t##s##x##c \
Zhiyong Yang0c7aa7a2020-03-28 08:40:25 +000038t##s##x##c##_load_aligned (void *p) \
39{ return (t##s##x##c) _mm512_load_si512 (p); } \
40\
41static_always_inline void \
42t##s##x##c##_store_aligned (t##s##x##c v, void *p) \
43{ _mm512_store_si512 ((__m512i *) p, (__m512i) v); } \
44\
45static_always_inline t##s##x##c \
Damjan Marionc5766222018-04-16 00:18:34 +020046t##s##x##c##_load_unaligned (void *p) \
47{ return (t##s##x##c) _mm512_loadu_si512 (p); } \
48\
49static_always_inline void \
50t##s##x##c##_store_unaligned (t##s##x##c v, void *p) \
51{ _mm512_storeu_si512 ((__m512i *) p, (__m512i) v); } \
52\
Damjan Mariona52e1662018-05-19 00:04:23 +020053static_always_inline int \
54t##s##x##c##_is_all_zero (t##s##x##c v) \
55{ return (_mm512_test_epi64_mask ((__m512i) v, (__m512i) v) == 0); } \
56\
57static_always_inline int \
58t##s##x##c##_is_equal (t##s##x##c a, t##s##x##c b) \
Damjan Marion14864772018-05-22 14:07:47 +020059{ return t##s##x##c##_is_all_zero (a ^ b); } \
Damjan Mariona52e1662018-05-19 00:04:23 +020060\
61static_always_inline int \
62t##s##x##c##_is_all_equal (t##s##x##c v, t##s x) \
63{ return t##s##x##c##_is_equal (v, t##s##x##c##_splat (x)); } \
Damjan Marion1cf9a162018-05-23 20:21:51 +020064\
65static_always_inline u##c \
66t##s##x##c##_is_zero_mask (t##s##x##c v) \
67{ return _mm512_test_##i##_mask ((__m512i) v, (__m512i) v); } \
Damjan Marion2cd8ad42019-04-17 16:05:54 +020068\
69static_always_inline t##s##x##c \
70t##s##x##c##_interleave_lo (t##s##x##c a, t##s##x##c b) \
71{ return (t##s##x##c) _mm512_unpacklo_##i ((__m512i) a, (__m512i) b); } \
72\
73static_always_inline t##s##x##c \
74t##s##x##c##_interleave_hi (t##s##x##c a, t##s##x##c b) \
75{ return (t##s##x##c) _mm512_unpackhi_##i ((__m512i) a, (__m512i) b); } \
Damjan Marion1cf9a162018-05-23 20:21:51 +020076
Damjan Marionc5766222018-04-16 00:18:34 +020077
78foreach_avx512_vec512i foreach_avx512_vec512u
79#undef _
Damjan Marion8c3f8a22018-05-17 21:12:13 +020080/* *INDENT-ON* */
81
82static_always_inline u32
83u16x32_msb_mask (u16x32 v)
84{
85 return (u32) _mm512_movepi16_mask ((__m512i) v);
86}
87
Damjan Marion7d14aad2021-05-05 19:31:41 +020088/* 512-bit packs */
89#define _(f, t, fn) \
90 always_inline t t##_pack (f lo, f hi) \
91 { \
92 return (t) fn ((__m512i) lo, (__m512i) hi); \
93 }
94
95_ (i16x32, i8x64, _mm512_packs_epi16)
96_ (i16x32, u8x64, _mm512_packus_epi16)
97_ (i32x16, i16x32, _mm512_packs_epi32)
98_ (i32x16, u16x32, _mm512_packus_epi32)
99#undef _
100
Damjan Marionc899dac2019-04-16 18:41:01 +0200101static_always_inline u32x16
102u32x16_byte_swap (u32x16 v)
103{
104 u8x64 swap = {
105 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12,
106 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12,
107 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12,
108 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12
109 };
110 return (u32x16) _mm512_shuffle_epi8 ((__m512i) v, (__m512i) swap);
111}
112
113static_always_inline u16x32
114u16x32_byte_swap (u16x32 v)
115{
116 u8x64 swap = {
117 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14,
118 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14,
119 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14,
120 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14
121 };
122 return (u16x32) _mm512_shuffle_epi8 ((__m512i) v, (__m512i) swap);
123}
124
Damjan Marion09aeee62021-04-20 21:28:45 +0200125#define _(f, t) \
126 static_always_inline t f##_extract_lo (f v) \
127 { \
128 return (t) _mm512_extracti64x4_epi64 ((__m512i) v, 0); \
129 } \
130 static_always_inline t f##_extract_hi (f v) \
131 { \
132 return (t) _mm512_extracti64x4_epi64 ((__m512i) v, 1); \
133 }
Damjan Marionc899dac2019-04-16 18:41:01 +0200134
Damjan Marion09aeee62021-04-20 21:28:45 +0200135_ (u64x8, u64x4)
136_ (u32x16, u32x8)
137_ (u16x32, u16x16)
138_ (u8x64, u8x32)
139#undef _
Damjan Marion627fb6a2020-02-16 13:07:13 +0100140
Damjan Marionc899dac2019-04-16 18:41:01 +0200141static_always_inline u32
142u32x16_min_scalar (u32x16 v)
143{
144 return u32x8_min_scalar (u32x8_min (u32x16_extract_lo (v),
145 u32x16_extract_hi (v)));
146}
147
Damjan Marion2cd8ad42019-04-17 16:05:54 +0200148static_always_inline u32x16
149u32x16_insert_lo (u32x16 r, u32x8 v)
150{
151 return (u32x16) _mm512_inserti64x4 ((__m512i) r, (__m256i) v, 0);
152}
153
154static_always_inline u32x16
155u32x16_insert_hi (u32x16 r, u32x8 v)
156{
157 return (u32x16) _mm512_inserti64x4 ((__m512i) r, (__m256i) v, 1);
158}
159
160static_always_inline u64x8
161u64x8_permute (u64x8 a, u64x8 b, u64x8 mask)
162{
163 return (u64x8) _mm512_permutex2var_epi64 ((__m512i) a, (__m512i) mask,
164 (__m512i) b);
165}
166
Damjan Marionc899dac2019-04-16 18:41:01 +0200167
168#define u32x16_ternary_logic(a, b, c, d) \
169 (u32x16) _mm512_ternarylogic_epi32 ((__m512i) a, (__m512i) b, (__m512i) c, d)
Damjan Marion4e083162019-04-12 17:44:35 +0200170
Damjan Marione84e9d72020-02-13 13:11:02 +0100171#define u8x64_insert_u8x16(a, b, n) \
172 (u8x64) _mm512_inserti64x2 ((__m512i) (a), (__m128i) (b), n)
173
174#define u8x64_extract_u8x16(a, n) \
175 (u8x16) _mm512_extracti64x2_epi64 ((__m512i) (a), n)
176
Damjan Marion627fb6a2020-02-16 13:07:13 +0100177#define u8x64_word_shift_left(a,n) (u8x64) _mm512_bslli_epi128((__m512i) a, n)
178#define u8x64_word_shift_right(a,n) (u8x64) _mm512_bsrli_epi128((__m512i) a, n)
179
Damjan Marionf75defa2020-02-13 18:14:06 +0100180static_always_inline u8x64
181u8x64_xor3 (u8x64 a, u8x64 b, u8x64 c)
182{
183 return (u8x64) _mm512_ternarylogic_epi32 ((__m512i) a, (__m512i) b,
184 (__m512i) c, 0x96);
185}
186
Damjan Marion47d8f5d2020-02-25 11:51:48 +0100187static_always_inline u8x64
188u8x64_reflect_u8x16 (u8x64 x)
189{
190 static const u8x64 mask = {
191 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0,
192 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0,
193 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0,
194 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0,
195 };
196 return (u8x64) _mm512_shuffle_epi8 ((__m512i) x, (__m512i) mask);
197}
198
Damjan Marion94dbf952020-07-15 20:18:39 +0200199#define u8x64_align_right(a, b, imm) \
200 (u8x64) _mm512_alignr_epi8 ((__m512i) a, (__m512i) b, imm)
201
Damjan Marion4dc098f2021-09-22 15:28:29 +0200202#define u64x8_align_right(a, b, imm) \
203 (u64x8) _mm512_alignr_epi64 ((__m512i) a, (__m512i) b, imm)
204
Damjan Marion94dbf952020-07-15 20:18:39 +0200205static_always_inline u32
206u32x16_sum_elts (u32x16 sum16)
207{
208 u32x8 sum8;
209 sum16 += (u32x16) u8x64_align_right (sum16, sum16, 8);
210 sum16 += (u32x16) u8x64_align_right (sum16, sum16, 4);
211 sum8 = u32x16_extract_hi (sum16) + u32x16_extract_lo (sum16);
212 return sum8[0] + sum8[4];
213}
214
Damjan Marione9848312021-04-25 10:51:51 +0200215#define _(t, m, p, i, e) \
216 static_always_inline t t##_mask_load (t a, void *p, m mask) \
217 { \
218 return (t) p##_mask_loadu_##e ((i) a, mask, p); \
219 } \
220 static_always_inline t t##_mask_load_zero (void *p, m mask) \
221 { \
222 return (t) p##_maskz_loadu_##e (mask, p); \
223 } \
224 static_always_inline void t##_mask_store (t a, void *p, m mask) \
225 { \
226 p##_mask_storeu_##e (p, mask, (i) a); \
227 }
Damjan Marion47d8f5d2020-02-25 11:51:48 +0100228
Damjan Marione9848312021-04-25 10:51:51 +0200229_ (u8x64, u64, _mm512, __m512i, epi8)
230_ (u8x32, u32, _mm256, __m256i, epi8)
231_ (u8x16, u16, _mm, __m128i, epi8)
232_ (u16x32, u32, _mm512, __m512i, epi16)
233_ (u16x16, u16, _mm256, __m256i, epi16)
234_ (u16x8, u8, _mm, __m128i, epi16)
235_ (u32x16, u16, _mm512, __m512i, epi32)
236_ (u32x8, u8, _mm256, __m256i, epi32)
237_ (u32x4, u8, _mm, __m128i, epi32)
238_ (u64x8, u8, _mm512, __m512i, epi64)
239_ (u64x4, u8, _mm256, __m256i, epi64)
240_ (u64x2, u8, _mm, __m128i, epi64)
241#undef _
242
Damjan Marion9ab2e5d2021-12-02 11:39:07 +0100243#define _(t, m, p, i, e) \
244 static_always_inline t t##_mask_and (t a, t b, m mask) \
245 { \
246 return (t) p##_mask_and_##e ((i) a, mask, (i) a, (i) b); \
247 } \
248 static_always_inline t t##_mask_andnot (t a, t b, m mask) \
249 { \
250 return (t) p##_mask_andnot_##e ((i) a, mask, (i) a, (i) b); \
251 } \
252 static_always_inline t t##_mask_xor (t a, t b, m mask) \
253 { \
254 return (t) p##_mask_xor_##e ((i) a, mask, (i) a, (i) b); \
255 } \
256 static_always_inline t t##_mask_or (t a, t b, m mask) \
257 { \
258 return (t) p##_mask_or_##e ((i) a, mask, (i) a, (i) b); \
259 }
260_ (u32x16, u16, _mm512, __m512i, epi32)
261_ (u32x8, u8, _mm256, __m256i, epi32)
262_ (u32x4, u8, _mm, __m128i, epi32)
263_ (u64x8, u8, _mm512, __m512i, epi64)
264_ (u64x4, u8, _mm256, __m256i, epi64)
265_ (u64x2, u8, _mm, __m128i, epi64)
266#undef _
267
Damjan Marione9848312021-04-25 10:51:51 +0200268#ifdef CLIB_HAVE_VEC512
269#define CLIB_HAVE_VEC512_MASK_LOAD_STORE
Damjan Marion9ab2e5d2021-12-02 11:39:07 +0100270#define CLIB_HAVE_VEC512_MASK_BITWISE_OPS
Damjan Marione9848312021-04-25 10:51:51 +0200271#endif
272#ifdef CLIB_HAVE_VEC256
273#define CLIB_HAVE_VEC256_MASK_LOAD_STORE
Damjan Marion9ab2e5d2021-12-02 11:39:07 +0100274#define CLIB_HAVE_VEC256_MASK_BITWISE_OPS
Damjan Marione9848312021-04-25 10:51:51 +0200275#endif
276#ifdef CLIB_HAVE_VEC128
277#define CLIB_HAVE_VEC128_MASK_LOAD_STORE
Damjan Marion9ab2e5d2021-12-02 11:39:07 +0100278#define CLIB_HAVE_VEC128_MASK_BITWISE_OPS
Damjan Marione9848312021-04-25 10:51:51 +0200279#endif
Damjan Marion47d8f5d2020-02-25 11:51:48 +0100280
281static_always_inline u8x64
282u8x64_splat_u8x16 (u8x16 a)
283{
284 return (u8x64) _mm512_broadcast_i64x2 ((__m128i) a);
285}
286
287static_always_inline u32x16
288u32x16_splat_u32x4 (u32x4 a)
289{
290 return (u32x16) _mm512_broadcast_i64x2 ((__m128i) a);
291}
292
293static_always_inline u32x16
294u32x16_mask_blend (u32x16 a, u32x16 b, u16 mask)
295{
296 return (u32x16) _mm512_mask_blend_epi32 (mask, (__m512i) a, (__m512i) b);
297}
298
299static_always_inline u8x64
300u8x64_mask_blend (u8x64 a, u8x64 b, u64 mask)
301{
302 return (u8x64) _mm512_mask_blend_epi8 (mask, (__m512i) a, (__m512i) b);
303}
304
Damjan Marion09aeee62021-04-20 21:28:45 +0200305#define _(t, m, e, p, it) \
306 static_always_inline m t##_is_equal_mask (t a, t b) \
307 { \
308 return p##_cmpeq_##e##_mask ((it) a, (it) b); \
309 }
310_ (u8x16, u16, epu8, _mm, __m128i)
311_ (u16x8, u8, epu16, _mm, __m128i)
312_ (u32x4, u8, epu32, _mm, __m128i)
313_ (u64x2, u8, epu64, _mm, __m128i)
314
315_ (u8x32, u32, epu8, _mm256, __m256i)
316_ (u16x16, u16, epu16, _mm256, __m256i)
317_ (u32x8, u8, epu32, _mm256, __m256i)
318_ (u64x4, u8, epu64, _mm256, __m256i)
319
320_ (u8x64, u64, epu8, _mm512, __m512i)
321_ (u16x32, u32, epu16, _mm512, __m512i)
322_ (u32x16, u16, epu32, _mm512, __m512i)
323_ (u64x8, u8, epu64, _mm512, __m512i)
324#undef _
325
Damjan Marion7459be12021-11-05 20:44:09 +0100326#define _(t, m, e, p, it) \
327 static_always_inline m t##_is_not_equal_mask (t a, t b) \
328 { \
329 return p##_cmpneq_##e##_mask ((it) a, (it) b); \
330 }
331_ (u8x16, u16, epu8, _mm, __m128i)
332_ (u16x8, u8, epu16, _mm, __m128i)
333_ (u32x4, u8, epu32, _mm, __m128i)
334_ (u64x2, u8, epu64, _mm, __m128i)
335
336_ (u8x32, u32, epu8, _mm256, __m256i)
337_ (u16x16, u16, epu16, _mm256, __m256i)
338_ (u32x8, u8, epu32, _mm256, __m256i)
339_ (u64x4, u8, epu64, _mm256, __m256i)
340
341_ (u8x64, u64, epu8, _mm512, __m512i)
342_ (u16x32, u32, epu16, _mm512, __m512i)
343_ (u32x16, u16, epu32, _mm512, __m512i)
344_ (u64x8, u8, epu64, _mm512, __m512i)
345#undef _
346
Damjan Marion09aeee62021-04-20 21:28:45 +0200347#define _(f, t, fn, it) \
348 static_always_inline t t##_from_##f (f x) { return (t) fn ((it) x); }
349_ (u16x16, u32x16, _mm512_cvtepi16_epi32, __m256i)
350_ (u32x16, u16x16, _mm512_cvtusepi32_epi16, __m512i)
351_ (u32x8, u16x8, _mm256_cvtusepi32_epi16, __m256i)
Damjan Marion54208852021-04-21 15:25:47 +0200352_ (u32x8, u64x8, _mm512_cvtepu32_epi64, __m256i)
Damjan Marion09aeee62021-04-20 21:28:45 +0200353#undef _
354
Damjan Marione9848312021-04-25 10:51:51 +0200355#define _(vt, mt, p, it, epi) \
Damjan Marion09aeee62021-04-20 21:28:45 +0200356 static_always_inline vt vt##_compress (vt a, mt mask) \
357 { \
Damjan Marione9848312021-04-25 10:51:51 +0200358 return (vt) p##_maskz_compress_##epi (mask, (it) a); \
Damjan Marion09aeee62021-04-20 21:28:45 +0200359 } \
360 static_always_inline vt vt##_expand (vt a, mt mask) \
361 { \
Damjan Marione9848312021-04-25 10:51:51 +0200362 return (vt) p##_maskz_expand_##epi (mask, (it) a); \
363 } \
364 static_always_inline void vt##_compress_store (vt v, mt mask, void *p) \
365 { \
366 p##_mask_compressstoreu_##epi (p, mask, (it) v); \
Damjan Marion09aeee62021-04-20 21:28:45 +0200367 }
368
Damjan Marione9848312021-04-25 10:51:51 +0200369_ (u64x8, u8, _mm512, __m512i, epi64)
370_ (u32x16, u16, _mm512, __m512i, epi32)
371_ (u64x4, u8, _mm256, __m256i, epi64)
372_ (u32x8, u8, _mm256, __m256i, epi32)
373_ (u64x2, u8, _mm, __m128i, epi64)
374_ (u32x4, u8, _mm, __m128i, epi32)
Damjan Marion09aeee62021-04-20 21:28:45 +0200375#ifdef __AVX512VBMI2__
Damjan Marione9848312021-04-25 10:51:51 +0200376_ (u16x32, u32, _mm512, __m512i, epi16)
377_ (u8x64, u64, _mm512, __m512i, epi8)
378_ (u16x16, u16, _mm256, __m256i, epi16)
379_ (u8x32, u32, _mm256, __m256i, epi8)
380_ (u16x8, u8, _mm, __m128i, epi16)
381_ (u8x16, u16, _mm, __m128i, epi8)
Damjan Marion09aeee62021-04-20 21:28:45 +0200382#endif
383#undef _
384
Damjan Marionef0bac72021-04-22 18:08:28 +0200385#ifdef CLIB_HAVE_VEC256
Damjan Marion09aeee62021-04-20 21:28:45 +0200386#define CLIB_HAVE_VEC256_COMPRESS
Mohsin Kazmi7da9b5b2021-08-27 18:57:16 +0200387#ifdef __AVX512VBMI2__
388#define CLIB_HAVE_VEC256_COMPRESS_U8_U16
389#endif
390
Damjan Marionef0bac72021-04-22 18:08:28 +0200391#endif
392#ifdef CLIB_HAVE_VEC512
Damjan Marion09aeee62021-04-20 21:28:45 +0200393#define CLIB_HAVE_VEC512_COMPRESS
Mohsin Kazmi7da9b5b2021-08-27 18:57:16 +0200394#ifdef __AVX512VBMI2__
395#define CLIB_HAVE_VEC512_COMPRESS_U8_U16
396#endif
397
Damjan Marionef0bac72021-04-22 18:08:28 +0200398#endif
Damjan Marion09aeee62021-04-20 21:28:45 +0200399
400#ifndef __AVX512VBMI2__
401static_always_inline u16x16
402u16x16_compress (u16x16 v, u16 mask)
Zhiyong Yang5e524172020-07-08 20:28:36 +0000403{
Damjan Marion09aeee62021-04-20 21:28:45 +0200404 return u16x16_from_u32x16 (u32x16_compress (u32x16_from_u16x16 (v), mask));
Zhiyong Yang5e524172020-07-08 20:28:36 +0000405}
406
Damjan Marion09aeee62021-04-20 21:28:45 +0200407static_always_inline u16x8
408u16x8_compress (u16x8 v, u8 mask)
409{
410 return u16x8_from_u32x8 (u32x8_compress (u32x8_from_u16x8 (v), mask));
411}
412#endif
413
Damjan Marion4e083162019-04-12 17:44:35 +0200414static_always_inline void
415u32x16_transpose (u32x16 m[16])
416{
417 __m512i r[16], a, b, c, d, x, y;
418
419 /* *INDENT-OFF* */
420 __m512i pm1 = (__m512i) (u64x8) { 0, 1, 8, 9, 4, 5, 12, 13};
421 __m512i pm2 = (__m512i) (u64x8) { 2, 3, 10, 11, 6, 7, 14, 15};
422 __m512i pm3 = (__m512i) (u64x8) { 0, 1, 2, 3, 8, 9, 10, 11};
423 __m512i pm4 = (__m512i) (u64x8) { 4, 5, 6, 7, 12, 13, 14, 15};
424 /* *INDENT-ON* */
425
426 r[0] = _mm512_unpacklo_epi32 ((__m512i) m[0], (__m512i) m[1]);
427 r[1] = _mm512_unpacklo_epi32 ((__m512i) m[2], (__m512i) m[3]);
428 r[2] = _mm512_unpacklo_epi32 ((__m512i) m[4], (__m512i) m[5]);
429 r[3] = _mm512_unpacklo_epi32 ((__m512i) m[6], (__m512i) m[7]);
430 r[4] = _mm512_unpacklo_epi32 ((__m512i) m[8], (__m512i) m[9]);
431 r[5] = _mm512_unpacklo_epi32 ((__m512i) m[10], (__m512i) m[11]);
432 r[6] = _mm512_unpacklo_epi32 ((__m512i) m[12], (__m512i) m[13]);
433 r[7] = _mm512_unpacklo_epi32 ((__m512i) m[14], (__m512i) m[15]);
434
435 r[8] = _mm512_unpackhi_epi32 ((__m512i) m[0], (__m512i) m[1]);
436 r[9] = _mm512_unpackhi_epi32 ((__m512i) m[2], (__m512i) m[3]);
437 r[10] = _mm512_unpackhi_epi32 ((__m512i) m[4], (__m512i) m[5]);
438 r[11] = _mm512_unpackhi_epi32 ((__m512i) m[6], (__m512i) m[7]);
439 r[12] = _mm512_unpackhi_epi32 ((__m512i) m[8], (__m512i) m[9]);
440 r[13] = _mm512_unpackhi_epi32 ((__m512i) m[10], (__m512i) m[11]);
441 r[14] = _mm512_unpackhi_epi32 ((__m512i) m[12], (__m512i) m[13]);
442 r[15] = _mm512_unpackhi_epi32 ((__m512i) m[14], (__m512i) m[15]);
443
444 a = _mm512_unpacklo_epi64 (r[0], r[1]);
445 b = _mm512_unpacklo_epi64 (r[2], r[3]);
446 c = _mm512_unpacklo_epi64 (r[4], r[5]);
447 d = _mm512_unpacklo_epi64 (r[6], r[7]);
448 x = _mm512_permutex2var_epi64 (a, pm1, b);
449 y = _mm512_permutex2var_epi64 (c, pm1, d);
450 m[0] = (u32x16) _mm512_permutex2var_epi64 (x, pm3, y);
451 m[8] = (u32x16) _mm512_permutex2var_epi64 (x, pm4, y);
452 x = _mm512_permutex2var_epi64 (a, pm2, b);
453 y = _mm512_permutex2var_epi64 (c, pm2, d);
454 m[4] = (u32x16) _mm512_permutex2var_epi64 (x, pm3, y);
455 m[12] = (u32x16) _mm512_permutex2var_epi64 (x, pm4, y);
456
457 a = _mm512_unpacklo_epi64 (r[8], r[9]);
458 b = _mm512_unpacklo_epi64 (r[10], r[11]);
459 c = _mm512_unpacklo_epi64 (r[12], r[13]);
460 d = _mm512_unpacklo_epi64 (r[14], r[15]);
461 x = _mm512_permutex2var_epi64 (a, pm1, b);
462 y = _mm512_permutex2var_epi64 (c, pm1, d);
463 m[2] = (u32x16) _mm512_permutex2var_epi64 (x, pm3, y);
464 m[10] = (u32x16) _mm512_permutex2var_epi64 (x, pm4, y);
465 x = _mm512_permutex2var_epi64 (a, pm2, b);
466 y = _mm512_permutex2var_epi64 (c, pm2, d);
467 m[6] = (u32x16) _mm512_permutex2var_epi64 (x, pm3, y);
468 m[14] = (u32x16) _mm512_permutex2var_epi64 (x, pm4, y);
469
470 a = _mm512_unpackhi_epi64 (r[0], r[1]);
471 b = _mm512_unpackhi_epi64 (r[2], r[3]);
472 c = _mm512_unpackhi_epi64 (r[4], r[5]);
473 d = _mm512_unpackhi_epi64 (r[6], r[7]);
474 x = _mm512_permutex2var_epi64 (a, pm1, b);
475 y = _mm512_permutex2var_epi64 (c, pm1, d);
476 m[1] = (u32x16) _mm512_permutex2var_epi64 (x, pm3, y);
477 m[9] = (u32x16) _mm512_permutex2var_epi64 (x, pm4, y);
478 x = _mm512_permutex2var_epi64 (a, pm2, b);
479 y = _mm512_permutex2var_epi64 (c, pm2, d);
480 m[5] = (u32x16) _mm512_permutex2var_epi64 (x, pm3, y);
481 m[13] = (u32x16) _mm512_permutex2var_epi64 (x, pm4, y);
482
483 a = _mm512_unpackhi_epi64 (r[8], r[9]);
484 b = _mm512_unpackhi_epi64 (r[10], r[11]);
485 c = _mm512_unpackhi_epi64 (r[12], r[13]);
486 d = _mm512_unpackhi_epi64 (r[14], r[15]);
487 x = _mm512_permutex2var_epi64 (a, pm1, b);
488 y = _mm512_permutex2var_epi64 (c, pm1, d);
489 m[3] = (u32x16) _mm512_permutex2var_epi64 (x, pm3, y);
490 m[11] = (u32x16) _mm512_permutex2var_epi64 (x, pm4, y);
491 x = _mm512_permutex2var_epi64 (a, pm2, b);
492 y = _mm512_permutex2var_epi64 (c, pm2, d);
493 m[7] = (u32x16) _mm512_permutex2var_epi64 (x, pm3, y);
494 m[15] = (u32x16) _mm512_permutex2var_epi64 (x, pm4, y);
495}
496
497
498
499static_always_inline void
500u64x8_transpose (u64x8 m[8])
501{
502 __m512i r[8], x, y;
503
504 /* *INDENT-OFF* */
505 __m512i pm1 = (__m512i) (u64x8) { 0, 1, 8, 9, 4, 5, 12, 13};
506 __m512i pm2 = (__m512i) (u64x8) { 2, 3, 10, 11, 6, 7, 14, 15};
507 __m512i pm3 = (__m512i) (u64x8) { 0, 1, 2, 3, 8, 9, 10, 11};
508 __m512i pm4 = (__m512i) (u64x8) { 4, 5, 6, 7, 12, 13, 14, 15};
509 /* *INDENT-ON* */
510
511 r[0] = _mm512_unpacklo_epi64 ((__m512i) m[0], (__m512i) m[1]);
512 r[1] = _mm512_unpacklo_epi64 ((__m512i) m[2], (__m512i) m[3]);
513 r[2] = _mm512_unpacklo_epi64 ((__m512i) m[4], (__m512i) m[5]);
514 r[3] = _mm512_unpacklo_epi64 ((__m512i) m[6], (__m512i) m[7]);
515 r[4] = _mm512_unpackhi_epi64 ((__m512i) m[0], (__m512i) m[1]);
516 r[5] = _mm512_unpackhi_epi64 ((__m512i) m[2], (__m512i) m[3]);
517 r[6] = _mm512_unpackhi_epi64 ((__m512i) m[4], (__m512i) m[5]);
518 r[7] = _mm512_unpackhi_epi64 ((__m512i) m[6], (__m512i) m[7]);
519
520 x = _mm512_permutex2var_epi64 (r[0], pm1, r[1]);
521 y = _mm512_permutex2var_epi64 (r[2], pm1, r[3]);
522 m[0] = (u64x8) _mm512_permutex2var_epi64 (x, pm3, y);
523 m[4] = (u64x8) _mm512_permutex2var_epi64 (x, pm4, y);
524 x = _mm512_permutex2var_epi64 (r[0], pm2, r[1]);
525 y = _mm512_permutex2var_epi64 (r[2], pm2, r[3]);
526 m[2] = (u64x8) _mm512_permutex2var_epi64 (x, pm3, y);
527 m[6] = (u64x8) _mm512_permutex2var_epi64 (x, pm4, y);
528
529 x = _mm512_permutex2var_epi64 (r[4], pm1, r[5]);
530 y = _mm512_permutex2var_epi64 (r[6], pm1, r[7]);
531 m[1] = (u64x8) _mm512_permutex2var_epi64 (x, pm3, y);
532 m[5] = (u64x8) _mm512_permutex2var_epi64 (x, pm4, y);
533 x = _mm512_permutex2var_epi64 (r[4], pm2, r[5]);
534 y = _mm512_permutex2var_epi64 (r[6], pm2, r[7]);
535 m[3] = (u64x8) _mm512_permutex2var_epi64 (x, pm3, y);
536 m[7] = (u64x8) _mm512_permutex2var_epi64 (x, pm4, y);
537}
538
Damjan Marion8c3f8a22018-05-17 21:12:13 +0200539#endif /* included_vector_avx512_h */
Damjan Marionc5766222018-04-16 00:18:34 +0200540/*
541 * fd.io coding-style-patch-verification: ON
542 *
543 * Local Variables:
544 * eval: (c-set-style "gnu")
545 * End:
546 */