blob: 8acac2a3a9f8587e4b52177c307ea1d291f75a3a [file] [log] [blame]
Damjan Marionc5766222018-04-16 00:18:34 +02001/*
2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16#ifndef included_vector_avx512_h
17#define included_vector_avx512_h
18
19#include <vppinfra/clib.h>
20#include <x86intrin.h>
21
Damjan Mariona52e1662018-05-19 00:04:23 +020022/* *INDENT-OFF* */
Damjan Marionc5766222018-04-16 00:18:34 +020023#define foreach_avx512_vec512i \
24 _(i,8,64,epi8) _(i,16,32,epi16) _(i,32,16,epi32) _(i,64,8,epi64)
25#define foreach_avx512_vec512u \
26 _(u,8,64,epi8) _(u,16,32,epi16) _(u,32,16,epi32) _(u,64,8,epi64)
27#define foreach_avx512_vec512f \
28 _(f,32,8,ps) _(f,64,4,pd)
29
Damjan Mariona52e1662018-05-19 00:04:23 +020030/* splat, load_unaligned, store_unaligned, is_all_zero, is_equal,
Damjan Marion1cf9a162018-05-23 20:21:51 +020031 is_all_equal, is_zero_mask */
Damjan Marionc5766222018-04-16 00:18:34 +020032#define _(t, s, c, i) \
33static_always_inline t##s##x##c \
34t##s##x##c##_splat (t##s x) \
35{ return (t##s##x##c) _mm512_set1_##i (x); } \
36\
37static_always_inline t##s##x##c \
Zhiyong Yang0c7aa7a2020-03-28 08:40:25 +000038t##s##x##c##_load_aligned (void *p) \
39{ return (t##s##x##c) _mm512_load_si512 (p); } \
40\
41static_always_inline void \
42t##s##x##c##_store_aligned (t##s##x##c v, void *p) \
43{ _mm512_store_si512 ((__m512i *) p, (__m512i) v); } \
44\
45static_always_inline t##s##x##c \
Damjan Marionc5766222018-04-16 00:18:34 +020046t##s##x##c##_load_unaligned (void *p) \
47{ return (t##s##x##c) _mm512_loadu_si512 (p); } \
48\
49static_always_inline void \
50t##s##x##c##_store_unaligned (t##s##x##c v, void *p) \
51{ _mm512_storeu_si512 ((__m512i *) p, (__m512i) v); } \
52\
Damjan Mariona52e1662018-05-19 00:04:23 +020053static_always_inline int \
54t##s##x##c##_is_all_zero (t##s##x##c v) \
55{ return (_mm512_test_epi64_mask ((__m512i) v, (__m512i) v) == 0); } \
56\
57static_always_inline int \
58t##s##x##c##_is_equal (t##s##x##c a, t##s##x##c b) \
Damjan Marion14864772018-05-22 14:07:47 +020059{ return t##s##x##c##_is_all_zero (a ^ b); } \
Damjan Mariona52e1662018-05-19 00:04:23 +020060\
61static_always_inline int \
62t##s##x##c##_is_all_equal (t##s##x##c v, t##s x) \
63{ return t##s##x##c##_is_equal (v, t##s##x##c##_splat (x)); } \
Damjan Marion1cf9a162018-05-23 20:21:51 +020064\
65static_always_inline u##c \
66t##s##x##c##_is_zero_mask (t##s##x##c v) \
67{ return _mm512_test_##i##_mask ((__m512i) v, (__m512i) v); } \
Damjan Marion2cd8ad42019-04-17 16:05:54 +020068\
69static_always_inline t##s##x##c \
70t##s##x##c##_interleave_lo (t##s##x##c a, t##s##x##c b) \
71{ return (t##s##x##c) _mm512_unpacklo_##i ((__m512i) a, (__m512i) b); } \
72\
73static_always_inline t##s##x##c \
74t##s##x##c##_interleave_hi (t##s##x##c a, t##s##x##c b) \
75{ return (t##s##x##c) _mm512_unpackhi_##i ((__m512i) a, (__m512i) b); } \
Damjan Marion1cf9a162018-05-23 20:21:51 +020076
Damjan Marionc5766222018-04-16 00:18:34 +020077
78foreach_avx512_vec512i foreach_avx512_vec512u
79#undef _
Damjan Marion8c3f8a22018-05-17 21:12:13 +020080/* *INDENT-ON* */
81
82static_always_inline u32
83u16x32_msb_mask (u16x32 v)
84{
85 return (u32) _mm512_movepi16_mask ((__m512i) v);
86}
87
Damjan Marion7d14aad2021-05-05 19:31:41 +020088/* 512-bit packs */
89#define _(f, t, fn) \
90 always_inline t t##_pack (f lo, f hi) \
91 { \
92 return (t) fn ((__m512i) lo, (__m512i) hi); \
93 }
94
95_ (i16x32, i8x64, _mm512_packs_epi16)
96_ (i16x32, u8x64, _mm512_packus_epi16)
97_ (i32x16, i16x32, _mm512_packs_epi32)
98_ (i32x16, u16x32, _mm512_packus_epi32)
99#undef _
100
Damjan Marionc899dac2019-04-16 18:41:01 +0200101static_always_inline u32x16
102u32x16_byte_swap (u32x16 v)
103{
104 u8x64 swap = {
105 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12,
106 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12,
107 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12,
108 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12
109 };
110 return (u32x16) _mm512_shuffle_epi8 ((__m512i) v, (__m512i) swap);
111}
112
113static_always_inline u16x32
114u16x32_byte_swap (u16x32 v)
115{
116 u8x64 swap = {
117 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14,
118 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14,
119 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14,
120 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14
121 };
122 return (u16x32) _mm512_shuffle_epi8 ((__m512i) v, (__m512i) swap);
123}
124
Damjan Marion09aeee62021-04-20 21:28:45 +0200125#define _(f, t) \
126 static_always_inline t f##_extract_lo (f v) \
127 { \
128 return (t) _mm512_extracti64x4_epi64 ((__m512i) v, 0); \
129 } \
130 static_always_inline t f##_extract_hi (f v) \
131 { \
132 return (t) _mm512_extracti64x4_epi64 ((__m512i) v, 1); \
133 }
Damjan Marionc899dac2019-04-16 18:41:01 +0200134
Damjan Marion09aeee62021-04-20 21:28:45 +0200135_ (u64x8, u64x4)
136_ (u32x16, u32x8)
137_ (u16x32, u16x16)
138_ (u8x64, u8x32)
139#undef _
Damjan Marion627fb6a2020-02-16 13:07:13 +0100140
Damjan Marionc899dac2019-04-16 18:41:01 +0200141static_always_inline u32
142u32x16_min_scalar (u32x16 v)
143{
144 return u32x8_min_scalar (u32x8_min (u32x16_extract_lo (v),
145 u32x16_extract_hi (v)));
146}
147
Damjan Marion2cd8ad42019-04-17 16:05:54 +0200148static_always_inline u32x16
149u32x16_insert_lo (u32x16 r, u32x8 v)
150{
151 return (u32x16) _mm512_inserti64x4 ((__m512i) r, (__m256i) v, 0);
152}
153
154static_always_inline u32x16
155u32x16_insert_hi (u32x16 r, u32x8 v)
156{
157 return (u32x16) _mm512_inserti64x4 ((__m512i) r, (__m256i) v, 1);
158}
159
160static_always_inline u64x8
161u64x8_permute (u64x8 a, u64x8 b, u64x8 mask)
162{
163 return (u64x8) _mm512_permutex2var_epi64 ((__m512i) a, (__m512i) mask,
164 (__m512i) b);
165}
166
Damjan Marionc899dac2019-04-16 18:41:01 +0200167
168#define u32x16_ternary_logic(a, b, c, d) \
169 (u32x16) _mm512_ternarylogic_epi32 ((__m512i) a, (__m512i) b, (__m512i) c, d)
Damjan Marion4e083162019-04-12 17:44:35 +0200170
Damjan Marione84e9d72020-02-13 13:11:02 +0100171#define u8x64_insert_u8x16(a, b, n) \
172 (u8x64) _mm512_inserti64x2 ((__m512i) (a), (__m128i) (b), n)
173
174#define u8x64_extract_u8x16(a, n) \
175 (u8x16) _mm512_extracti64x2_epi64 ((__m512i) (a), n)
176
Damjan Marion627fb6a2020-02-16 13:07:13 +0100177#define u8x64_word_shift_left(a,n) (u8x64) _mm512_bslli_epi128((__m512i) a, n)
178#define u8x64_word_shift_right(a,n) (u8x64) _mm512_bsrli_epi128((__m512i) a, n)
179
Damjan Marionf75defa2020-02-13 18:14:06 +0100180static_always_inline u8x64
181u8x64_xor3 (u8x64 a, u8x64 b, u8x64 c)
182{
183 return (u8x64) _mm512_ternarylogic_epi32 ((__m512i) a, (__m512i) b,
184 (__m512i) c, 0x96);
185}
186
Damjan Marion47d8f5d2020-02-25 11:51:48 +0100187static_always_inline u8x64
188u8x64_reflect_u8x16 (u8x64 x)
189{
190 static const u8x64 mask = {
191 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0,
192 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0,
193 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0,
194 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0,
195 };
196 return (u8x64) _mm512_shuffle_epi8 ((__m512i) x, (__m512i) mask);
197}
198
199static_always_inline u8x64
Damjan Marion94dbf952020-07-15 20:18:39 +0200200u8x64_shuffle (u8x64 v, u8x64 m)
201{
202 return (u8x64) _mm512_shuffle_epi8 ((__m512i) v, (__m512i) m);
203}
204
205#define u8x64_align_right(a, b, imm) \
206 (u8x64) _mm512_alignr_epi8 ((__m512i) a, (__m512i) b, imm)
207
Damjan Marion4dc098f2021-09-22 15:28:29 +0200208#define u64x8_align_right(a, b, imm) \
209 (u64x8) _mm512_alignr_epi64 ((__m512i) a, (__m512i) b, imm)
210
Damjan Marion94dbf952020-07-15 20:18:39 +0200211static_always_inline u32
212u32x16_sum_elts (u32x16 sum16)
213{
214 u32x8 sum8;
215 sum16 += (u32x16) u8x64_align_right (sum16, sum16, 8);
216 sum16 += (u32x16) u8x64_align_right (sum16, sum16, 4);
217 sum8 = u32x16_extract_hi (sum16) + u32x16_extract_lo (sum16);
218 return sum8[0] + sum8[4];
219}
220
Damjan Marione9848312021-04-25 10:51:51 +0200221#define _(t, m, p, i, e) \
222 static_always_inline t t##_mask_load (t a, void *p, m mask) \
223 { \
224 return (t) p##_mask_loadu_##e ((i) a, mask, p); \
225 } \
226 static_always_inline t t##_mask_load_zero (void *p, m mask) \
227 { \
228 return (t) p##_maskz_loadu_##e (mask, p); \
229 } \
230 static_always_inline void t##_mask_store (t a, void *p, m mask) \
231 { \
232 p##_mask_storeu_##e (p, mask, (i) a); \
233 }
Damjan Marion47d8f5d2020-02-25 11:51:48 +0100234
Damjan Marione9848312021-04-25 10:51:51 +0200235_ (u8x64, u64, _mm512, __m512i, epi8)
236_ (u8x32, u32, _mm256, __m256i, epi8)
237_ (u8x16, u16, _mm, __m128i, epi8)
238_ (u16x32, u32, _mm512, __m512i, epi16)
239_ (u16x16, u16, _mm256, __m256i, epi16)
240_ (u16x8, u8, _mm, __m128i, epi16)
241_ (u32x16, u16, _mm512, __m512i, epi32)
242_ (u32x8, u8, _mm256, __m256i, epi32)
243_ (u32x4, u8, _mm, __m128i, epi32)
244_ (u64x8, u8, _mm512, __m512i, epi64)
245_ (u64x4, u8, _mm256, __m256i, epi64)
246_ (u64x2, u8, _mm, __m128i, epi64)
247#undef _
248
Damjan Marion9ab2e5d2021-12-02 11:39:07 +0100249#define _(t, m, p, i, e) \
250 static_always_inline t t##_mask_and (t a, t b, m mask) \
251 { \
252 return (t) p##_mask_and_##e ((i) a, mask, (i) a, (i) b); \
253 } \
254 static_always_inline t t##_mask_andnot (t a, t b, m mask) \
255 { \
256 return (t) p##_mask_andnot_##e ((i) a, mask, (i) a, (i) b); \
257 } \
258 static_always_inline t t##_mask_xor (t a, t b, m mask) \
259 { \
260 return (t) p##_mask_xor_##e ((i) a, mask, (i) a, (i) b); \
261 } \
262 static_always_inline t t##_mask_or (t a, t b, m mask) \
263 { \
264 return (t) p##_mask_or_##e ((i) a, mask, (i) a, (i) b); \
265 }
266_ (u32x16, u16, _mm512, __m512i, epi32)
267_ (u32x8, u8, _mm256, __m256i, epi32)
268_ (u32x4, u8, _mm, __m128i, epi32)
269_ (u64x8, u8, _mm512, __m512i, epi64)
270_ (u64x4, u8, _mm256, __m256i, epi64)
271_ (u64x2, u8, _mm, __m128i, epi64)
272#undef _
273
Damjan Marione9848312021-04-25 10:51:51 +0200274#ifdef CLIB_HAVE_VEC512
275#define CLIB_HAVE_VEC512_MASK_LOAD_STORE
Damjan Marion9ab2e5d2021-12-02 11:39:07 +0100276#define CLIB_HAVE_VEC512_MASK_BITWISE_OPS
Damjan Marione9848312021-04-25 10:51:51 +0200277#endif
278#ifdef CLIB_HAVE_VEC256
279#define CLIB_HAVE_VEC256_MASK_LOAD_STORE
Damjan Marion9ab2e5d2021-12-02 11:39:07 +0100280#define CLIB_HAVE_VEC256_MASK_BITWISE_OPS
Damjan Marione9848312021-04-25 10:51:51 +0200281#endif
282#ifdef CLIB_HAVE_VEC128
283#define CLIB_HAVE_VEC128_MASK_LOAD_STORE
Damjan Marion9ab2e5d2021-12-02 11:39:07 +0100284#define CLIB_HAVE_VEC128_MASK_BITWISE_OPS
Damjan Marione9848312021-04-25 10:51:51 +0200285#endif
Damjan Marion47d8f5d2020-02-25 11:51:48 +0100286
287static_always_inline u8x64
288u8x64_splat_u8x16 (u8x16 a)
289{
290 return (u8x64) _mm512_broadcast_i64x2 ((__m128i) a);
291}
292
293static_always_inline u32x16
294u32x16_splat_u32x4 (u32x4 a)
295{
296 return (u32x16) _mm512_broadcast_i64x2 ((__m128i) a);
297}
298
299static_always_inline u32x16
300u32x16_mask_blend (u32x16 a, u32x16 b, u16 mask)
301{
302 return (u32x16) _mm512_mask_blend_epi32 (mask, (__m512i) a, (__m512i) b);
303}
304
305static_always_inline u8x64
306u8x64_mask_blend (u8x64 a, u8x64 b, u64 mask)
307{
308 return (u8x64) _mm512_mask_blend_epi8 (mask, (__m512i) a, (__m512i) b);
309}
310
Damjan Marion09aeee62021-04-20 21:28:45 +0200311#define _(t, m, e, p, it) \
312 static_always_inline m t##_is_equal_mask (t a, t b) \
313 { \
314 return p##_cmpeq_##e##_mask ((it) a, (it) b); \
315 }
316_ (u8x16, u16, epu8, _mm, __m128i)
317_ (u16x8, u8, epu16, _mm, __m128i)
318_ (u32x4, u8, epu32, _mm, __m128i)
319_ (u64x2, u8, epu64, _mm, __m128i)
320
321_ (u8x32, u32, epu8, _mm256, __m256i)
322_ (u16x16, u16, epu16, _mm256, __m256i)
323_ (u32x8, u8, epu32, _mm256, __m256i)
324_ (u64x4, u8, epu64, _mm256, __m256i)
325
326_ (u8x64, u64, epu8, _mm512, __m512i)
327_ (u16x32, u32, epu16, _mm512, __m512i)
328_ (u32x16, u16, epu32, _mm512, __m512i)
329_ (u64x8, u8, epu64, _mm512, __m512i)
330#undef _
331
Damjan Marion7459be12021-11-05 20:44:09 +0100332#define _(t, m, e, p, it) \
333 static_always_inline m t##_is_not_equal_mask (t a, t b) \
334 { \
335 return p##_cmpneq_##e##_mask ((it) a, (it) b); \
336 }
337_ (u8x16, u16, epu8, _mm, __m128i)
338_ (u16x8, u8, epu16, _mm, __m128i)
339_ (u32x4, u8, epu32, _mm, __m128i)
340_ (u64x2, u8, epu64, _mm, __m128i)
341
342_ (u8x32, u32, epu8, _mm256, __m256i)
343_ (u16x16, u16, epu16, _mm256, __m256i)
344_ (u32x8, u8, epu32, _mm256, __m256i)
345_ (u64x4, u8, epu64, _mm256, __m256i)
346
347_ (u8x64, u64, epu8, _mm512, __m512i)
348_ (u16x32, u32, epu16, _mm512, __m512i)
349_ (u32x16, u16, epu32, _mm512, __m512i)
350_ (u64x8, u8, epu64, _mm512, __m512i)
351#undef _
352
Damjan Marion09aeee62021-04-20 21:28:45 +0200353#define _(f, t, fn, it) \
354 static_always_inline t t##_from_##f (f x) { return (t) fn ((it) x); }
355_ (u16x16, u32x16, _mm512_cvtepi16_epi32, __m256i)
356_ (u32x16, u16x16, _mm512_cvtusepi32_epi16, __m512i)
357_ (u32x8, u16x8, _mm256_cvtusepi32_epi16, __m256i)
Damjan Marion54208852021-04-21 15:25:47 +0200358_ (u32x8, u64x8, _mm512_cvtepu32_epi64, __m256i)
Damjan Marion09aeee62021-04-20 21:28:45 +0200359#undef _
360
Damjan Marione9848312021-04-25 10:51:51 +0200361#define _(vt, mt, p, it, epi) \
Damjan Marion09aeee62021-04-20 21:28:45 +0200362 static_always_inline vt vt##_compress (vt a, mt mask) \
363 { \
Damjan Marione9848312021-04-25 10:51:51 +0200364 return (vt) p##_maskz_compress_##epi (mask, (it) a); \
Damjan Marion09aeee62021-04-20 21:28:45 +0200365 } \
366 static_always_inline vt vt##_expand (vt a, mt mask) \
367 { \
Damjan Marione9848312021-04-25 10:51:51 +0200368 return (vt) p##_maskz_expand_##epi (mask, (it) a); \
369 } \
370 static_always_inline void vt##_compress_store (vt v, mt mask, void *p) \
371 { \
372 p##_mask_compressstoreu_##epi (p, mask, (it) v); \
Damjan Marion09aeee62021-04-20 21:28:45 +0200373 }
374
Damjan Marione9848312021-04-25 10:51:51 +0200375_ (u64x8, u8, _mm512, __m512i, epi64)
376_ (u32x16, u16, _mm512, __m512i, epi32)
377_ (u64x4, u8, _mm256, __m256i, epi64)
378_ (u32x8, u8, _mm256, __m256i, epi32)
379_ (u64x2, u8, _mm, __m128i, epi64)
380_ (u32x4, u8, _mm, __m128i, epi32)
Damjan Marion09aeee62021-04-20 21:28:45 +0200381#ifdef __AVX512VBMI2__
Damjan Marione9848312021-04-25 10:51:51 +0200382_ (u16x32, u32, _mm512, __m512i, epi16)
383_ (u8x64, u64, _mm512, __m512i, epi8)
384_ (u16x16, u16, _mm256, __m256i, epi16)
385_ (u8x32, u32, _mm256, __m256i, epi8)
386_ (u16x8, u8, _mm, __m128i, epi16)
387_ (u8x16, u16, _mm, __m128i, epi8)
Damjan Marion09aeee62021-04-20 21:28:45 +0200388#endif
389#undef _
390
Damjan Marionef0bac72021-04-22 18:08:28 +0200391#ifdef CLIB_HAVE_VEC256
Damjan Marion09aeee62021-04-20 21:28:45 +0200392#define CLIB_HAVE_VEC256_COMPRESS
Mohsin Kazmi7da9b5b2021-08-27 18:57:16 +0200393#ifdef __AVX512VBMI2__
394#define CLIB_HAVE_VEC256_COMPRESS_U8_U16
395#endif
396
Damjan Marionef0bac72021-04-22 18:08:28 +0200397#endif
398#ifdef CLIB_HAVE_VEC512
Damjan Marion09aeee62021-04-20 21:28:45 +0200399#define CLIB_HAVE_VEC512_COMPRESS
Mohsin Kazmi7da9b5b2021-08-27 18:57:16 +0200400#ifdef __AVX512VBMI2__
401#define CLIB_HAVE_VEC512_COMPRESS_U8_U16
402#endif
403
Damjan Marionef0bac72021-04-22 18:08:28 +0200404#endif
Damjan Marion09aeee62021-04-20 21:28:45 +0200405
406#ifndef __AVX512VBMI2__
407static_always_inline u16x16
408u16x16_compress (u16x16 v, u16 mask)
Zhiyong Yang5e524172020-07-08 20:28:36 +0000409{
Damjan Marion09aeee62021-04-20 21:28:45 +0200410 return u16x16_from_u32x16 (u32x16_compress (u32x16_from_u16x16 (v), mask));
Zhiyong Yang5e524172020-07-08 20:28:36 +0000411}
412
Damjan Marion09aeee62021-04-20 21:28:45 +0200413static_always_inline u16x8
414u16x8_compress (u16x8 v, u8 mask)
415{
416 return u16x8_from_u32x8 (u32x8_compress (u32x8_from_u16x8 (v), mask));
417}
418#endif
419
Damjan Marion4e083162019-04-12 17:44:35 +0200420static_always_inline void
421u32x16_transpose (u32x16 m[16])
422{
423 __m512i r[16], a, b, c, d, x, y;
424
425 /* *INDENT-OFF* */
426 __m512i pm1 = (__m512i) (u64x8) { 0, 1, 8, 9, 4, 5, 12, 13};
427 __m512i pm2 = (__m512i) (u64x8) { 2, 3, 10, 11, 6, 7, 14, 15};
428 __m512i pm3 = (__m512i) (u64x8) { 0, 1, 2, 3, 8, 9, 10, 11};
429 __m512i pm4 = (__m512i) (u64x8) { 4, 5, 6, 7, 12, 13, 14, 15};
430 /* *INDENT-ON* */
431
432 r[0] = _mm512_unpacklo_epi32 ((__m512i) m[0], (__m512i) m[1]);
433 r[1] = _mm512_unpacklo_epi32 ((__m512i) m[2], (__m512i) m[3]);
434 r[2] = _mm512_unpacklo_epi32 ((__m512i) m[4], (__m512i) m[5]);
435 r[3] = _mm512_unpacklo_epi32 ((__m512i) m[6], (__m512i) m[7]);
436 r[4] = _mm512_unpacklo_epi32 ((__m512i) m[8], (__m512i) m[9]);
437 r[5] = _mm512_unpacklo_epi32 ((__m512i) m[10], (__m512i) m[11]);
438 r[6] = _mm512_unpacklo_epi32 ((__m512i) m[12], (__m512i) m[13]);
439 r[7] = _mm512_unpacklo_epi32 ((__m512i) m[14], (__m512i) m[15]);
440
441 r[8] = _mm512_unpackhi_epi32 ((__m512i) m[0], (__m512i) m[1]);
442 r[9] = _mm512_unpackhi_epi32 ((__m512i) m[2], (__m512i) m[3]);
443 r[10] = _mm512_unpackhi_epi32 ((__m512i) m[4], (__m512i) m[5]);
444 r[11] = _mm512_unpackhi_epi32 ((__m512i) m[6], (__m512i) m[7]);
445 r[12] = _mm512_unpackhi_epi32 ((__m512i) m[8], (__m512i) m[9]);
446 r[13] = _mm512_unpackhi_epi32 ((__m512i) m[10], (__m512i) m[11]);
447 r[14] = _mm512_unpackhi_epi32 ((__m512i) m[12], (__m512i) m[13]);
448 r[15] = _mm512_unpackhi_epi32 ((__m512i) m[14], (__m512i) m[15]);
449
450 a = _mm512_unpacklo_epi64 (r[0], r[1]);
451 b = _mm512_unpacklo_epi64 (r[2], r[3]);
452 c = _mm512_unpacklo_epi64 (r[4], r[5]);
453 d = _mm512_unpacklo_epi64 (r[6], r[7]);
454 x = _mm512_permutex2var_epi64 (a, pm1, b);
455 y = _mm512_permutex2var_epi64 (c, pm1, d);
456 m[0] = (u32x16) _mm512_permutex2var_epi64 (x, pm3, y);
457 m[8] = (u32x16) _mm512_permutex2var_epi64 (x, pm4, y);
458 x = _mm512_permutex2var_epi64 (a, pm2, b);
459 y = _mm512_permutex2var_epi64 (c, pm2, d);
460 m[4] = (u32x16) _mm512_permutex2var_epi64 (x, pm3, y);
461 m[12] = (u32x16) _mm512_permutex2var_epi64 (x, pm4, y);
462
463 a = _mm512_unpacklo_epi64 (r[8], r[9]);
464 b = _mm512_unpacklo_epi64 (r[10], r[11]);
465 c = _mm512_unpacklo_epi64 (r[12], r[13]);
466 d = _mm512_unpacklo_epi64 (r[14], r[15]);
467 x = _mm512_permutex2var_epi64 (a, pm1, b);
468 y = _mm512_permutex2var_epi64 (c, pm1, d);
469 m[2] = (u32x16) _mm512_permutex2var_epi64 (x, pm3, y);
470 m[10] = (u32x16) _mm512_permutex2var_epi64 (x, pm4, y);
471 x = _mm512_permutex2var_epi64 (a, pm2, b);
472 y = _mm512_permutex2var_epi64 (c, pm2, d);
473 m[6] = (u32x16) _mm512_permutex2var_epi64 (x, pm3, y);
474 m[14] = (u32x16) _mm512_permutex2var_epi64 (x, pm4, y);
475
476 a = _mm512_unpackhi_epi64 (r[0], r[1]);
477 b = _mm512_unpackhi_epi64 (r[2], r[3]);
478 c = _mm512_unpackhi_epi64 (r[4], r[5]);
479 d = _mm512_unpackhi_epi64 (r[6], r[7]);
480 x = _mm512_permutex2var_epi64 (a, pm1, b);
481 y = _mm512_permutex2var_epi64 (c, pm1, d);
482 m[1] = (u32x16) _mm512_permutex2var_epi64 (x, pm3, y);
483 m[9] = (u32x16) _mm512_permutex2var_epi64 (x, pm4, y);
484 x = _mm512_permutex2var_epi64 (a, pm2, b);
485 y = _mm512_permutex2var_epi64 (c, pm2, d);
486 m[5] = (u32x16) _mm512_permutex2var_epi64 (x, pm3, y);
487 m[13] = (u32x16) _mm512_permutex2var_epi64 (x, pm4, y);
488
489 a = _mm512_unpackhi_epi64 (r[8], r[9]);
490 b = _mm512_unpackhi_epi64 (r[10], r[11]);
491 c = _mm512_unpackhi_epi64 (r[12], r[13]);
492 d = _mm512_unpackhi_epi64 (r[14], r[15]);
493 x = _mm512_permutex2var_epi64 (a, pm1, b);
494 y = _mm512_permutex2var_epi64 (c, pm1, d);
495 m[3] = (u32x16) _mm512_permutex2var_epi64 (x, pm3, y);
496 m[11] = (u32x16) _mm512_permutex2var_epi64 (x, pm4, y);
497 x = _mm512_permutex2var_epi64 (a, pm2, b);
498 y = _mm512_permutex2var_epi64 (c, pm2, d);
499 m[7] = (u32x16) _mm512_permutex2var_epi64 (x, pm3, y);
500 m[15] = (u32x16) _mm512_permutex2var_epi64 (x, pm4, y);
501}
502
503
504
505static_always_inline void
506u64x8_transpose (u64x8 m[8])
507{
508 __m512i r[8], x, y;
509
510 /* *INDENT-OFF* */
511 __m512i pm1 = (__m512i) (u64x8) { 0, 1, 8, 9, 4, 5, 12, 13};
512 __m512i pm2 = (__m512i) (u64x8) { 2, 3, 10, 11, 6, 7, 14, 15};
513 __m512i pm3 = (__m512i) (u64x8) { 0, 1, 2, 3, 8, 9, 10, 11};
514 __m512i pm4 = (__m512i) (u64x8) { 4, 5, 6, 7, 12, 13, 14, 15};
515 /* *INDENT-ON* */
516
517 r[0] = _mm512_unpacklo_epi64 ((__m512i) m[0], (__m512i) m[1]);
518 r[1] = _mm512_unpacklo_epi64 ((__m512i) m[2], (__m512i) m[3]);
519 r[2] = _mm512_unpacklo_epi64 ((__m512i) m[4], (__m512i) m[5]);
520 r[3] = _mm512_unpacklo_epi64 ((__m512i) m[6], (__m512i) m[7]);
521 r[4] = _mm512_unpackhi_epi64 ((__m512i) m[0], (__m512i) m[1]);
522 r[5] = _mm512_unpackhi_epi64 ((__m512i) m[2], (__m512i) m[3]);
523 r[6] = _mm512_unpackhi_epi64 ((__m512i) m[4], (__m512i) m[5]);
524 r[7] = _mm512_unpackhi_epi64 ((__m512i) m[6], (__m512i) m[7]);
525
526 x = _mm512_permutex2var_epi64 (r[0], pm1, r[1]);
527 y = _mm512_permutex2var_epi64 (r[2], pm1, r[3]);
528 m[0] = (u64x8) _mm512_permutex2var_epi64 (x, pm3, y);
529 m[4] = (u64x8) _mm512_permutex2var_epi64 (x, pm4, y);
530 x = _mm512_permutex2var_epi64 (r[0], pm2, r[1]);
531 y = _mm512_permutex2var_epi64 (r[2], pm2, r[3]);
532 m[2] = (u64x8) _mm512_permutex2var_epi64 (x, pm3, y);
533 m[6] = (u64x8) _mm512_permutex2var_epi64 (x, pm4, y);
534
535 x = _mm512_permutex2var_epi64 (r[4], pm1, r[5]);
536 y = _mm512_permutex2var_epi64 (r[6], pm1, r[7]);
537 m[1] = (u64x8) _mm512_permutex2var_epi64 (x, pm3, y);
538 m[5] = (u64x8) _mm512_permutex2var_epi64 (x, pm4, y);
539 x = _mm512_permutex2var_epi64 (r[4], pm2, r[5]);
540 y = _mm512_permutex2var_epi64 (r[6], pm2, r[7]);
541 m[3] = (u64x8) _mm512_permutex2var_epi64 (x, pm3, y);
542 m[7] = (u64x8) _mm512_permutex2var_epi64 (x, pm4, y);
543}
544
Damjan Marion8c3f8a22018-05-17 21:12:13 +0200545#endif /* included_vector_avx512_h */
Damjan Marionc5766222018-04-16 00:18:34 +0200546/*
547 * fd.io coding-style-patch-verification: ON
548 *
549 * Local Variables:
550 * eval: (c-set-style "gnu")
551 * End:
552 */