blob: 2f5763e3c926a9e4aaceb854b4660f0f3ffcc3bf [file] [log] [blame]
Damjan Marionc5766222018-04-16 00:18:34 +02001/*
2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16#ifndef included_vector_avx512_h
17#define included_vector_avx512_h
18
19#include <vppinfra/clib.h>
20#include <x86intrin.h>
21
Damjan Mariona52e1662018-05-19 00:04:23 +020022/* *INDENT-OFF* */
Damjan Marionc5766222018-04-16 00:18:34 +020023#define foreach_avx512_vec512i \
24 _(i,8,64,epi8) _(i,16,32,epi16) _(i,32,16,epi32) _(i,64,8,epi64)
25#define foreach_avx512_vec512u \
26 _(u,8,64,epi8) _(u,16,32,epi16) _(u,32,16,epi32) _(u,64,8,epi64)
27#define foreach_avx512_vec512f \
28 _(f,32,8,ps) _(f,64,4,pd)
29
Damjan Mariona52e1662018-05-19 00:04:23 +020030/* splat, load_unaligned, store_unaligned, is_all_zero, is_equal,
Damjan Marion1cf9a162018-05-23 20:21:51 +020031 is_all_equal, is_zero_mask */
Damjan Marionc5766222018-04-16 00:18:34 +020032#define _(t, s, c, i) \
33static_always_inline t##s##x##c \
34t##s##x##c##_splat (t##s x) \
35{ return (t##s##x##c) _mm512_set1_##i (x); } \
36\
37static_always_inline t##s##x##c \
Zhiyong Yang0c7aa7a2020-03-28 08:40:25 +000038t##s##x##c##_load_aligned (void *p) \
39{ return (t##s##x##c) _mm512_load_si512 (p); } \
40\
41static_always_inline void \
42t##s##x##c##_store_aligned (t##s##x##c v, void *p) \
43{ _mm512_store_si512 ((__m512i *) p, (__m512i) v); } \
44\
45static_always_inline t##s##x##c \
Damjan Marionc5766222018-04-16 00:18:34 +020046t##s##x##c##_load_unaligned (void *p) \
47{ return (t##s##x##c) _mm512_loadu_si512 (p); } \
48\
49static_always_inline void \
50t##s##x##c##_store_unaligned (t##s##x##c v, void *p) \
51{ _mm512_storeu_si512 ((__m512i *) p, (__m512i) v); } \
52\
Damjan Mariona52e1662018-05-19 00:04:23 +020053static_always_inline int \
54t##s##x##c##_is_all_zero (t##s##x##c v) \
55{ return (_mm512_test_epi64_mask ((__m512i) v, (__m512i) v) == 0); } \
56\
57static_always_inline int \
58t##s##x##c##_is_equal (t##s##x##c a, t##s##x##c b) \
Damjan Marion14864772018-05-22 14:07:47 +020059{ return t##s##x##c##_is_all_zero (a ^ b); } \
Damjan Mariona52e1662018-05-19 00:04:23 +020060\
61static_always_inline int \
62t##s##x##c##_is_all_equal (t##s##x##c v, t##s x) \
63{ return t##s##x##c##_is_equal (v, t##s##x##c##_splat (x)); } \
Damjan Marion1cf9a162018-05-23 20:21:51 +020064\
65static_always_inline u##c \
66t##s##x##c##_is_zero_mask (t##s##x##c v) \
67{ return _mm512_test_##i##_mask ((__m512i) v, (__m512i) v); } \
Damjan Marion2cd8ad42019-04-17 16:05:54 +020068\
69static_always_inline t##s##x##c \
70t##s##x##c##_interleave_lo (t##s##x##c a, t##s##x##c b) \
71{ return (t##s##x##c) _mm512_unpacklo_##i ((__m512i) a, (__m512i) b); } \
72\
73static_always_inline t##s##x##c \
74t##s##x##c##_interleave_hi (t##s##x##c a, t##s##x##c b) \
75{ return (t##s##x##c) _mm512_unpackhi_##i ((__m512i) a, (__m512i) b); } \
Damjan Marion1cf9a162018-05-23 20:21:51 +020076
Damjan Marionc5766222018-04-16 00:18:34 +020077
78foreach_avx512_vec512i foreach_avx512_vec512u
79#undef _
Damjan Marion8c3f8a22018-05-17 21:12:13 +020080/* *INDENT-ON* */
81
82static_always_inline u32
83u16x32_msb_mask (u16x32 v)
84{
85 return (u32) _mm512_movepi16_mask ((__m512i) v);
86}
87
Damjan Marionc899dac2019-04-16 18:41:01 +020088static_always_inline u32x16
89u32x16_byte_swap (u32x16 v)
90{
91 u8x64 swap = {
92 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12,
93 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12,
94 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12,
95 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12
96 };
97 return (u32x16) _mm512_shuffle_epi8 ((__m512i) v, (__m512i) swap);
98}
99
100static_always_inline u16x32
101u16x32_byte_swap (u16x32 v)
102{
103 u8x64 swap = {
104 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14,
105 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14,
106 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14,
107 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14
108 };
109 return (u16x32) _mm512_shuffle_epi8 ((__m512i) v, (__m512i) swap);
110}
111
Damjan Marion09aeee62021-04-20 21:28:45 +0200112#define _(f, t) \
113 static_always_inline t f##_extract_lo (f v) \
114 { \
115 return (t) _mm512_extracti64x4_epi64 ((__m512i) v, 0); \
116 } \
117 static_always_inline t f##_extract_hi (f v) \
118 { \
119 return (t) _mm512_extracti64x4_epi64 ((__m512i) v, 1); \
120 }
Damjan Marionc899dac2019-04-16 18:41:01 +0200121
Damjan Marion09aeee62021-04-20 21:28:45 +0200122_ (u64x8, u64x4)
123_ (u32x16, u32x8)
124_ (u16x32, u16x16)
125_ (u8x64, u8x32)
126#undef _
Damjan Marion627fb6a2020-02-16 13:07:13 +0100127
Damjan Marionc899dac2019-04-16 18:41:01 +0200128static_always_inline u32
129u32x16_min_scalar (u32x16 v)
130{
131 return u32x8_min_scalar (u32x8_min (u32x16_extract_lo (v),
132 u32x16_extract_hi (v)));
133}
134
Damjan Marion2cd8ad42019-04-17 16:05:54 +0200135static_always_inline u32x16
136u32x16_insert_lo (u32x16 r, u32x8 v)
137{
138 return (u32x16) _mm512_inserti64x4 ((__m512i) r, (__m256i) v, 0);
139}
140
141static_always_inline u32x16
142u32x16_insert_hi (u32x16 r, u32x8 v)
143{
144 return (u32x16) _mm512_inserti64x4 ((__m512i) r, (__m256i) v, 1);
145}
146
147static_always_inline u64x8
148u64x8_permute (u64x8 a, u64x8 b, u64x8 mask)
149{
150 return (u64x8) _mm512_permutex2var_epi64 ((__m512i) a, (__m512i) mask,
151 (__m512i) b);
152}
153
Damjan Marionc899dac2019-04-16 18:41:01 +0200154
155#define u32x16_ternary_logic(a, b, c, d) \
156 (u32x16) _mm512_ternarylogic_epi32 ((__m512i) a, (__m512i) b, (__m512i) c, d)
Damjan Marion4e083162019-04-12 17:44:35 +0200157
Damjan Marione84e9d72020-02-13 13:11:02 +0100158#define u8x64_insert_u8x16(a, b, n) \
159 (u8x64) _mm512_inserti64x2 ((__m512i) (a), (__m128i) (b), n)
160
161#define u8x64_extract_u8x16(a, n) \
162 (u8x16) _mm512_extracti64x2_epi64 ((__m512i) (a), n)
163
Damjan Marion627fb6a2020-02-16 13:07:13 +0100164#define u8x64_word_shift_left(a,n) (u8x64) _mm512_bslli_epi128((__m512i) a, n)
165#define u8x64_word_shift_right(a,n) (u8x64) _mm512_bsrli_epi128((__m512i) a, n)
166
Damjan Marionf75defa2020-02-13 18:14:06 +0100167static_always_inline u8x64
168u8x64_xor3 (u8x64 a, u8x64 b, u8x64 c)
169{
170 return (u8x64) _mm512_ternarylogic_epi32 ((__m512i) a, (__m512i) b,
171 (__m512i) c, 0x96);
172}
173
Damjan Marion47d8f5d2020-02-25 11:51:48 +0100174static_always_inline u8x64
175u8x64_reflect_u8x16 (u8x64 x)
176{
177 static const u8x64 mask = {
178 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0,
179 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0,
180 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0,
181 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0,
182 };
183 return (u8x64) _mm512_shuffle_epi8 ((__m512i) x, (__m512i) mask);
184}
185
186static_always_inline u8x64
Damjan Marion94dbf952020-07-15 20:18:39 +0200187u8x64_shuffle (u8x64 v, u8x64 m)
188{
189 return (u8x64) _mm512_shuffle_epi8 ((__m512i) v, (__m512i) m);
190}
191
192#define u8x64_align_right(a, b, imm) \
193 (u8x64) _mm512_alignr_epi8 ((__m512i) a, (__m512i) b, imm)
194
195static_always_inline u32
196u32x16_sum_elts (u32x16 sum16)
197{
198 u32x8 sum8;
199 sum16 += (u32x16) u8x64_align_right (sum16, sum16, 8);
200 sum16 += (u32x16) u8x64_align_right (sum16, sum16, 4);
201 sum8 = u32x16_extract_hi (sum16) + u32x16_extract_lo (sum16);
202 return sum8[0] + sum8[4];
203}
204
Damjan Marione9848312021-04-25 10:51:51 +0200205#define _(t, m, p, i, e) \
206 static_always_inline t t##_mask_load (t a, void *p, m mask) \
207 { \
208 return (t) p##_mask_loadu_##e ((i) a, mask, p); \
209 } \
210 static_always_inline t t##_mask_load_zero (void *p, m mask) \
211 { \
212 return (t) p##_maskz_loadu_##e (mask, p); \
213 } \
214 static_always_inline void t##_mask_store (t a, void *p, m mask) \
215 { \
216 p##_mask_storeu_##e (p, mask, (i) a); \
217 }
Damjan Marion47d8f5d2020-02-25 11:51:48 +0100218
Damjan Marione9848312021-04-25 10:51:51 +0200219_ (u8x64, u64, _mm512, __m512i, epi8)
220_ (u8x32, u32, _mm256, __m256i, epi8)
221_ (u8x16, u16, _mm, __m128i, epi8)
222_ (u16x32, u32, _mm512, __m512i, epi16)
223_ (u16x16, u16, _mm256, __m256i, epi16)
224_ (u16x8, u8, _mm, __m128i, epi16)
225_ (u32x16, u16, _mm512, __m512i, epi32)
226_ (u32x8, u8, _mm256, __m256i, epi32)
227_ (u32x4, u8, _mm, __m128i, epi32)
228_ (u64x8, u8, _mm512, __m512i, epi64)
229_ (u64x4, u8, _mm256, __m256i, epi64)
230_ (u64x2, u8, _mm, __m128i, epi64)
231#undef _
232
233#ifdef CLIB_HAVE_VEC512
234#define CLIB_HAVE_VEC512_MASK_LOAD_STORE
235#endif
236#ifdef CLIB_HAVE_VEC256
237#define CLIB_HAVE_VEC256_MASK_LOAD_STORE
238#endif
239#ifdef CLIB_HAVE_VEC128
240#define CLIB_HAVE_VEC128_MASK_LOAD_STORE
241#endif
Damjan Marion47d8f5d2020-02-25 11:51:48 +0100242
243static_always_inline u8x64
244u8x64_splat_u8x16 (u8x16 a)
245{
246 return (u8x64) _mm512_broadcast_i64x2 ((__m128i) a);
247}
248
249static_always_inline u32x16
250u32x16_splat_u32x4 (u32x4 a)
251{
252 return (u32x16) _mm512_broadcast_i64x2 ((__m128i) a);
253}
254
255static_always_inline u32x16
256u32x16_mask_blend (u32x16 a, u32x16 b, u16 mask)
257{
258 return (u32x16) _mm512_mask_blend_epi32 (mask, (__m512i) a, (__m512i) b);
259}
260
261static_always_inline u8x64
262u8x64_mask_blend (u8x64 a, u8x64 b, u64 mask)
263{
264 return (u8x64) _mm512_mask_blend_epi8 (mask, (__m512i) a, (__m512i) b);
265}
266
Damjan Marion09aeee62021-04-20 21:28:45 +0200267#define _(t, m, e, p, it) \
268 static_always_inline m t##_is_equal_mask (t a, t b) \
269 { \
270 return p##_cmpeq_##e##_mask ((it) a, (it) b); \
271 }
272_ (u8x16, u16, epu8, _mm, __m128i)
273_ (u16x8, u8, epu16, _mm, __m128i)
274_ (u32x4, u8, epu32, _mm, __m128i)
275_ (u64x2, u8, epu64, _mm, __m128i)
276
277_ (u8x32, u32, epu8, _mm256, __m256i)
278_ (u16x16, u16, epu16, _mm256, __m256i)
279_ (u32x8, u8, epu32, _mm256, __m256i)
280_ (u64x4, u8, epu64, _mm256, __m256i)
281
282_ (u8x64, u64, epu8, _mm512, __m512i)
283_ (u16x32, u32, epu16, _mm512, __m512i)
284_ (u32x16, u16, epu32, _mm512, __m512i)
285_ (u64x8, u8, epu64, _mm512, __m512i)
286#undef _
287
288#define _(f, t, fn, it) \
289 static_always_inline t t##_from_##f (f x) { return (t) fn ((it) x); }
290_ (u16x16, u32x16, _mm512_cvtepi16_epi32, __m256i)
291_ (u32x16, u16x16, _mm512_cvtusepi32_epi16, __m512i)
292_ (u32x8, u16x8, _mm256_cvtusepi32_epi16, __m256i)
Damjan Marion54208852021-04-21 15:25:47 +0200293_ (u32x8, u64x8, _mm512_cvtepu32_epi64, __m256i)
Damjan Marion09aeee62021-04-20 21:28:45 +0200294#undef _
295
Damjan Marione9848312021-04-25 10:51:51 +0200296#define _(vt, mt, p, it, epi) \
Damjan Marion09aeee62021-04-20 21:28:45 +0200297 static_always_inline vt vt##_compress (vt a, mt mask) \
298 { \
Damjan Marione9848312021-04-25 10:51:51 +0200299 return (vt) p##_maskz_compress_##epi (mask, (it) a); \
Damjan Marion09aeee62021-04-20 21:28:45 +0200300 } \
301 static_always_inline vt vt##_expand (vt a, mt mask) \
302 { \
Damjan Marione9848312021-04-25 10:51:51 +0200303 return (vt) p##_maskz_expand_##epi (mask, (it) a); \
304 } \
305 static_always_inline void vt##_compress_store (vt v, mt mask, void *p) \
306 { \
307 p##_mask_compressstoreu_##epi (p, mask, (it) v); \
Damjan Marion09aeee62021-04-20 21:28:45 +0200308 }
309
Damjan Marione9848312021-04-25 10:51:51 +0200310_ (u64x8, u8, _mm512, __m512i, epi64)
311_ (u32x16, u16, _mm512, __m512i, epi32)
312_ (u64x4, u8, _mm256, __m256i, epi64)
313_ (u32x8, u8, _mm256, __m256i, epi32)
314_ (u64x2, u8, _mm, __m128i, epi64)
315_ (u32x4, u8, _mm, __m128i, epi32)
Damjan Marion09aeee62021-04-20 21:28:45 +0200316#ifdef __AVX512VBMI2__
Damjan Marione9848312021-04-25 10:51:51 +0200317_ (u16x32, u32, _mm512, __m512i, epi16)
318_ (u8x64, u64, _mm512, __m512i, epi8)
319_ (u16x16, u16, _mm256, __m256i, epi16)
320_ (u8x32, u32, _mm256, __m256i, epi8)
321_ (u16x8, u8, _mm, __m128i, epi16)
322_ (u8x16, u16, _mm, __m128i, epi8)
Damjan Marion09aeee62021-04-20 21:28:45 +0200323#endif
324#undef _
325
Damjan Marionef0bac72021-04-22 18:08:28 +0200326#ifdef CLIB_HAVE_VEC256
Damjan Marion09aeee62021-04-20 21:28:45 +0200327#define CLIB_HAVE_VEC256_COMPRESS
Damjan Marionef0bac72021-04-22 18:08:28 +0200328#endif
329#ifdef CLIB_HAVE_VEC512
Damjan Marion09aeee62021-04-20 21:28:45 +0200330#define CLIB_HAVE_VEC512_COMPRESS
Damjan Marionef0bac72021-04-22 18:08:28 +0200331#endif
Damjan Marion09aeee62021-04-20 21:28:45 +0200332
333#ifndef __AVX512VBMI2__
334static_always_inline u16x16
335u16x16_compress (u16x16 v, u16 mask)
Zhiyong Yang5e524172020-07-08 20:28:36 +0000336{
Damjan Marion09aeee62021-04-20 21:28:45 +0200337 return u16x16_from_u32x16 (u32x16_compress (u32x16_from_u16x16 (v), mask));
Zhiyong Yang5e524172020-07-08 20:28:36 +0000338}
339
Damjan Marion09aeee62021-04-20 21:28:45 +0200340static_always_inline u16x8
341u16x8_compress (u16x8 v, u8 mask)
342{
343 return u16x8_from_u32x8 (u32x8_compress (u32x8_from_u16x8 (v), mask));
344}
345#endif
346
Damjan Marion4e083162019-04-12 17:44:35 +0200347static_always_inline void
348u32x16_transpose (u32x16 m[16])
349{
350 __m512i r[16], a, b, c, d, x, y;
351
352 /* *INDENT-OFF* */
353 __m512i pm1 = (__m512i) (u64x8) { 0, 1, 8, 9, 4, 5, 12, 13};
354 __m512i pm2 = (__m512i) (u64x8) { 2, 3, 10, 11, 6, 7, 14, 15};
355 __m512i pm3 = (__m512i) (u64x8) { 0, 1, 2, 3, 8, 9, 10, 11};
356 __m512i pm4 = (__m512i) (u64x8) { 4, 5, 6, 7, 12, 13, 14, 15};
357 /* *INDENT-ON* */
358
359 r[0] = _mm512_unpacklo_epi32 ((__m512i) m[0], (__m512i) m[1]);
360 r[1] = _mm512_unpacklo_epi32 ((__m512i) m[2], (__m512i) m[3]);
361 r[2] = _mm512_unpacklo_epi32 ((__m512i) m[4], (__m512i) m[5]);
362 r[3] = _mm512_unpacklo_epi32 ((__m512i) m[6], (__m512i) m[7]);
363 r[4] = _mm512_unpacklo_epi32 ((__m512i) m[8], (__m512i) m[9]);
364 r[5] = _mm512_unpacklo_epi32 ((__m512i) m[10], (__m512i) m[11]);
365 r[6] = _mm512_unpacklo_epi32 ((__m512i) m[12], (__m512i) m[13]);
366 r[7] = _mm512_unpacklo_epi32 ((__m512i) m[14], (__m512i) m[15]);
367
368 r[8] = _mm512_unpackhi_epi32 ((__m512i) m[0], (__m512i) m[1]);
369 r[9] = _mm512_unpackhi_epi32 ((__m512i) m[2], (__m512i) m[3]);
370 r[10] = _mm512_unpackhi_epi32 ((__m512i) m[4], (__m512i) m[5]);
371 r[11] = _mm512_unpackhi_epi32 ((__m512i) m[6], (__m512i) m[7]);
372 r[12] = _mm512_unpackhi_epi32 ((__m512i) m[8], (__m512i) m[9]);
373 r[13] = _mm512_unpackhi_epi32 ((__m512i) m[10], (__m512i) m[11]);
374 r[14] = _mm512_unpackhi_epi32 ((__m512i) m[12], (__m512i) m[13]);
375 r[15] = _mm512_unpackhi_epi32 ((__m512i) m[14], (__m512i) m[15]);
376
377 a = _mm512_unpacklo_epi64 (r[0], r[1]);
378 b = _mm512_unpacklo_epi64 (r[2], r[3]);
379 c = _mm512_unpacklo_epi64 (r[4], r[5]);
380 d = _mm512_unpacklo_epi64 (r[6], r[7]);
381 x = _mm512_permutex2var_epi64 (a, pm1, b);
382 y = _mm512_permutex2var_epi64 (c, pm1, d);
383 m[0] = (u32x16) _mm512_permutex2var_epi64 (x, pm3, y);
384 m[8] = (u32x16) _mm512_permutex2var_epi64 (x, pm4, y);
385 x = _mm512_permutex2var_epi64 (a, pm2, b);
386 y = _mm512_permutex2var_epi64 (c, pm2, d);
387 m[4] = (u32x16) _mm512_permutex2var_epi64 (x, pm3, y);
388 m[12] = (u32x16) _mm512_permutex2var_epi64 (x, pm4, y);
389
390 a = _mm512_unpacklo_epi64 (r[8], r[9]);
391 b = _mm512_unpacklo_epi64 (r[10], r[11]);
392 c = _mm512_unpacklo_epi64 (r[12], r[13]);
393 d = _mm512_unpacklo_epi64 (r[14], r[15]);
394 x = _mm512_permutex2var_epi64 (a, pm1, b);
395 y = _mm512_permutex2var_epi64 (c, pm1, d);
396 m[2] = (u32x16) _mm512_permutex2var_epi64 (x, pm3, y);
397 m[10] = (u32x16) _mm512_permutex2var_epi64 (x, pm4, y);
398 x = _mm512_permutex2var_epi64 (a, pm2, b);
399 y = _mm512_permutex2var_epi64 (c, pm2, d);
400 m[6] = (u32x16) _mm512_permutex2var_epi64 (x, pm3, y);
401 m[14] = (u32x16) _mm512_permutex2var_epi64 (x, pm4, y);
402
403 a = _mm512_unpackhi_epi64 (r[0], r[1]);
404 b = _mm512_unpackhi_epi64 (r[2], r[3]);
405 c = _mm512_unpackhi_epi64 (r[4], r[5]);
406 d = _mm512_unpackhi_epi64 (r[6], r[7]);
407 x = _mm512_permutex2var_epi64 (a, pm1, b);
408 y = _mm512_permutex2var_epi64 (c, pm1, d);
409 m[1] = (u32x16) _mm512_permutex2var_epi64 (x, pm3, y);
410 m[9] = (u32x16) _mm512_permutex2var_epi64 (x, pm4, y);
411 x = _mm512_permutex2var_epi64 (a, pm2, b);
412 y = _mm512_permutex2var_epi64 (c, pm2, d);
413 m[5] = (u32x16) _mm512_permutex2var_epi64 (x, pm3, y);
414 m[13] = (u32x16) _mm512_permutex2var_epi64 (x, pm4, y);
415
416 a = _mm512_unpackhi_epi64 (r[8], r[9]);
417 b = _mm512_unpackhi_epi64 (r[10], r[11]);
418 c = _mm512_unpackhi_epi64 (r[12], r[13]);
419 d = _mm512_unpackhi_epi64 (r[14], r[15]);
420 x = _mm512_permutex2var_epi64 (a, pm1, b);
421 y = _mm512_permutex2var_epi64 (c, pm1, d);
422 m[3] = (u32x16) _mm512_permutex2var_epi64 (x, pm3, y);
423 m[11] = (u32x16) _mm512_permutex2var_epi64 (x, pm4, y);
424 x = _mm512_permutex2var_epi64 (a, pm2, b);
425 y = _mm512_permutex2var_epi64 (c, pm2, d);
426 m[7] = (u32x16) _mm512_permutex2var_epi64 (x, pm3, y);
427 m[15] = (u32x16) _mm512_permutex2var_epi64 (x, pm4, y);
428}
429
430
431
432static_always_inline void
433u64x8_transpose (u64x8 m[8])
434{
435 __m512i r[8], x, y;
436
437 /* *INDENT-OFF* */
438 __m512i pm1 = (__m512i) (u64x8) { 0, 1, 8, 9, 4, 5, 12, 13};
439 __m512i pm2 = (__m512i) (u64x8) { 2, 3, 10, 11, 6, 7, 14, 15};
440 __m512i pm3 = (__m512i) (u64x8) { 0, 1, 2, 3, 8, 9, 10, 11};
441 __m512i pm4 = (__m512i) (u64x8) { 4, 5, 6, 7, 12, 13, 14, 15};
442 /* *INDENT-ON* */
443
444 r[0] = _mm512_unpacklo_epi64 ((__m512i) m[0], (__m512i) m[1]);
445 r[1] = _mm512_unpacklo_epi64 ((__m512i) m[2], (__m512i) m[3]);
446 r[2] = _mm512_unpacklo_epi64 ((__m512i) m[4], (__m512i) m[5]);
447 r[3] = _mm512_unpacklo_epi64 ((__m512i) m[6], (__m512i) m[7]);
448 r[4] = _mm512_unpackhi_epi64 ((__m512i) m[0], (__m512i) m[1]);
449 r[5] = _mm512_unpackhi_epi64 ((__m512i) m[2], (__m512i) m[3]);
450 r[6] = _mm512_unpackhi_epi64 ((__m512i) m[4], (__m512i) m[5]);
451 r[7] = _mm512_unpackhi_epi64 ((__m512i) m[6], (__m512i) m[7]);
452
453 x = _mm512_permutex2var_epi64 (r[0], pm1, r[1]);
454 y = _mm512_permutex2var_epi64 (r[2], pm1, r[3]);
455 m[0] = (u64x8) _mm512_permutex2var_epi64 (x, pm3, y);
456 m[4] = (u64x8) _mm512_permutex2var_epi64 (x, pm4, y);
457 x = _mm512_permutex2var_epi64 (r[0], pm2, r[1]);
458 y = _mm512_permutex2var_epi64 (r[2], pm2, r[3]);
459 m[2] = (u64x8) _mm512_permutex2var_epi64 (x, pm3, y);
460 m[6] = (u64x8) _mm512_permutex2var_epi64 (x, pm4, y);
461
462 x = _mm512_permutex2var_epi64 (r[4], pm1, r[5]);
463 y = _mm512_permutex2var_epi64 (r[6], pm1, r[7]);
464 m[1] = (u64x8) _mm512_permutex2var_epi64 (x, pm3, y);
465 m[5] = (u64x8) _mm512_permutex2var_epi64 (x, pm4, y);
466 x = _mm512_permutex2var_epi64 (r[4], pm2, r[5]);
467 y = _mm512_permutex2var_epi64 (r[6], pm2, r[7]);
468 m[3] = (u64x8) _mm512_permutex2var_epi64 (x, pm3, y);
469 m[7] = (u64x8) _mm512_permutex2var_epi64 (x, pm4, y);
470}
471
Damjan Marion8c3f8a22018-05-17 21:12:13 +0200472#endif /* included_vector_avx512_h */
Damjan Marionc5766222018-04-16 00:18:34 +0200473/*
474 * fd.io coding-style-patch-verification: ON
475 *
476 * Local Variables:
477 * eval: (c-set-style "gnu")
478 * End:
479 */