blob: 87999eee94728237d9fba86be424a2557f1aae34 [file] [log] [blame]
Damjan Marionc5766222018-04-16 00:18:34 +02001/*
2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16#ifndef included_vector_avx512_h
17#define included_vector_avx512_h
18
19#include <vppinfra/clib.h>
20#include <x86intrin.h>
21
Damjan Mariona52e1662018-05-19 00:04:23 +020022/* *INDENT-OFF* */
Damjan Marionc5766222018-04-16 00:18:34 +020023#define foreach_avx512_vec512i \
24 _(i,8,64,epi8) _(i,16,32,epi16) _(i,32,16,epi32) _(i,64,8,epi64)
25#define foreach_avx512_vec512u \
26 _(u,8,64,epi8) _(u,16,32,epi16) _(u,32,16,epi32) _(u,64,8,epi64)
27#define foreach_avx512_vec512f \
28 _(f,32,8,ps) _(f,64,4,pd)
29
Damjan Mariona52e1662018-05-19 00:04:23 +020030/* splat, load_unaligned, store_unaligned, is_all_zero, is_equal,
Damjan Marion1cf9a162018-05-23 20:21:51 +020031 is_all_equal, is_zero_mask */
Damjan Marionc5766222018-04-16 00:18:34 +020032#define _(t, s, c, i) \
33static_always_inline t##s##x##c \
34t##s##x##c##_splat (t##s x) \
35{ return (t##s##x##c) _mm512_set1_##i (x); } \
36\
37static_always_inline t##s##x##c \
Zhiyong Yang0c7aa7a2020-03-28 08:40:25 +000038t##s##x##c##_load_aligned (void *p) \
39{ return (t##s##x##c) _mm512_load_si512 (p); } \
40\
41static_always_inline void \
42t##s##x##c##_store_aligned (t##s##x##c v, void *p) \
43{ _mm512_store_si512 ((__m512i *) p, (__m512i) v); } \
44\
45static_always_inline t##s##x##c \
Damjan Marionc5766222018-04-16 00:18:34 +020046t##s##x##c##_load_unaligned (void *p) \
47{ return (t##s##x##c) _mm512_loadu_si512 (p); } \
48\
49static_always_inline void \
50t##s##x##c##_store_unaligned (t##s##x##c v, void *p) \
51{ _mm512_storeu_si512 ((__m512i *) p, (__m512i) v); } \
52\
Damjan Mariona52e1662018-05-19 00:04:23 +020053static_always_inline int \
54t##s##x##c##_is_all_zero (t##s##x##c v) \
55{ return (_mm512_test_epi64_mask ((__m512i) v, (__m512i) v) == 0); } \
56\
57static_always_inline int \
58t##s##x##c##_is_equal (t##s##x##c a, t##s##x##c b) \
Damjan Marion14864772018-05-22 14:07:47 +020059{ return t##s##x##c##_is_all_zero (a ^ b); } \
Damjan Mariona52e1662018-05-19 00:04:23 +020060\
61static_always_inline int \
62t##s##x##c##_is_all_equal (t##s##x##c v, t##s x) \
63{ return t##s##x##c##_is_equal (v, t##s##x##c##_splat (x)); } \
Damjan Marion1cf9a162018-05-23 20:21:51 +020064\
65static_always_inline u##c \
66t##s##x##c##_is_zero_mask (t##s##x##c v) \
67{ return _mm512_test_##i##_mask ((__m512i) v, (__m512i) v); } \
Damjan Marion2cd8ad42019-04-17 16:05:54 +020068\
69static_always_inline t##s##x##c \
70t##s##x##c##_interleave_lo (t##s##x##c a, t##s##x##c b) \
71{ return (t##s##x##c) _mm512_unpacklo_##i ((__m512i) a, (__m512i) b); } \
72\
73static_always_inline t##s##x##c \
74t##s##x##c##_interleave_hi (t##s##x##c a, t##s##x##c b) \
75{ return (t##s##x##c) _mm512_unpackhi_##i ((__m512i) a, (__m512i) b); } \
Damjan Marion1cf9a162018-05-23 20:21:51 +020076
Damjan Marionc5766222018-04-16 00:18:34 +020077
78foreach_avx512_vec512i foreach_avx512_vec512u
79#undef _
Damjan Marion8c3f8a22018-05-17 21:12:13 +020080/* *INDENT-ON* */
81
82static_always_inline u32
83u16x32_msb_mask (u16x32 v)
84{
85 return (u32) _mm512_movepi16_mask ((__m512i) v);
86}
87
Damjan Marionc899dac2019-04-16 18:41:01 +020088static_always_inline u32x16
89u32x16_byte_swap (u32x16 v)
90{
91 u8x64 swap = {
92 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12,
93 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12,
94 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12,
95 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12
96 };
97 return (u32x16) _mm512_shuffle_epi8 ((__m512i) v, (__m512i) swap);
98}
99
100static_always_inline u16x32
101u16x32_byte_swap (u16x32 v)
102{
103 u8x64 swap = {
104 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14,
105 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14,
106 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14,
107 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14
108 };
109 return (u16x32) _mm512_shuffle_epi8 ((__m512i) v, (__m512i) swap);
110}
111
Damjan Marion09aeee62021-04-20 21:28:45 +0200112#define _(f, t) \
113 static_always_inline t f##_extract_lo (f v) \
114 { \
115 return (t) _mm512_extracti64x4_epi64 ((__m512i) v, 0); \
116 } \
117 static_always_inline t f##_extract_hi (f v) \
118 { \
119 return (t) _mm512_extracti64x4_epi64 ((__m512i) v, 1); \
120 }
Damjan Marionc899dac2019-04-16 18:41:01 +0200121
Damjan Marion09aeee62021-04-20 21:28:45 +0200122_ (u64x8, u64x4)
123_ (u32x16, u32x8)
124_ (u16x32, u16x16)
125_ (u8x64, u8x32)
126#undef _
Damjan Marion627fb6a2020-02-16 13:07:13 +0100127
Damjan Marionc899dac2019-04-16 18:41:01 +0200128static_always_inline u32
129u32x16_min_scalar (u32x16 v)
130{
131 return u32x8_min_scalar (u32x8_min (u32x16_extract_lo (v),
132 u32x16_extract_hi (v)));
133}
134
Damjan Marion2cd8ad42019-04-17 16:05:54 +0200135static_always_inline u32x16
136u32x16_insert_lo (u32x16 r, u32x8 v)
137{
138 return (u32x16) _mm512_inserti64x4 ((__m512i) r, (__m256i) v, 0);
139}
140
141static_always_inline u32x16
142u32x16_insert_hi (u32x16 r, u32x8 v)
143{
144 return (u32x16) _mm512_inserti64x4 ((__m512i) r, (__m256i) v, 1);
145}
146
147static_always_inline u64x8
148u64x8_permute (u64x8 a, u64x8 b, u64x8 mask)
149{
150 return (u64x8) _mm512_permutex2var_epi64 ((__m512i) a, (__m512i) mask,
151 (__m512i) b);
152}
153
Damjan Marionc899dac2019-04-16 18:41:01 +0200154
155#define u32x16_ternary_logic(a, b, c, d) \
156 (u32x16) _mm512_ternarylogic_epi32 ((__m512i) a, (__m512i) b, (__m512i) c, d)
Damjan Marion4e083162019-04-12 17:44:35 +0200157
Damjan Marione84e9d72020-02-13 13:11:02 +0100158#define u8x64_insert_u8x16(a, b, n) \
159 (u8x64) _mm512_inserti64x2 ((__m512i) (a), (__m128i) (b), n)
160
161#define u8x64_extract_u8x16(a, n) \
162 (u8x16) _mm512_extracti64x2_epi64 ((__m512i) (a), n)
163
Damjan Marion627fb6a2020-02-16 13:07:13 +0100164#define u8x64_word_shift_left(a,n) (u8x64) _mm512_bslli_epi128((__m512i) a, n)
165#define u8x64_word_shift_right(a,n) (u8x64) _mm512_bsrli_epi128((__m512i) a, n)
166
Damjan Marionf75defa2020-02-13 18:14:06 +0100167static_always_inline u8x64
168u8x64_xor3 (u8x64 a, u8x64 b, u8x64 c)
169{
170 return (u8x64) _mm512_ternarylogic_epi32 ((__m512i) a, (__m512i) b,
171 (__m512i) c, 0x96);
172}
173
Damjan Marion47d8f5d2020-02-25 11:51:48 +0100174static_always_inline u8x64
175u8x64_reflect_u8x16 (u8x64 x)
176{
177 static const u8x64 mask = {
178 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0,
179 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0,
180 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0,
181 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0,
182 };
183 return (u8x64) _mm512_shuffle_epi8 ((__m512i) x, (__m512i) mask);
184}
185
186static_always_inline u8x64
Damjan Marion94dbf952020-07-15 20:18:39 +0200187u8x64_shuffle (u8x64 v, u8x64 m)
188{
189 return (u8x64) _mm512_shuffle_epi8 ((__m512i) v, (__m512i) m);
190}
191
192#define u8x64_align_right(a, b, imm) \
193 (u8x64) _mm512_alignr_epi8 ((__m512i) a, (__m512i) b, imm)
194
195static_always_inline u32
196u32x16_sum_elts (u32x16 sum16)
197{
198 u32x8 sum8;
199 sum16 += (u32x16) u8x64_align_right (sum16, sum16, 8);
200 sum16 += (u32x16) u8x64_align_right (sum16, sum16, 4);
201 sum8 = u32x16_extract_hi (sum16) + u32x16_extract_lo (sum16);
202 return sum8[0] + sum8[4];
203}
204
205static_always_inline u8x64
Damjan Marion47d8f5d2020-02-25 11:51:48 +0100206u8x64_mask_load (u8x64 a, void *p, u64 mask)
207{
208 return (u8x64) _mm512_mask_loadu_epi8 ((__m512i) a, mask, p);
209}
210
211static_always_inline void
212u8x64_mask_store (u8x64 a, void *p, u64 mask)
213{
214 _mm512_mask_storeu_epi8 (p, mask, (__m512i) a);
215}
216
217static_always_inline u8x64
218u8x64_splat_u8x16 (u8x16 a)
219{
220 return (u8x64) _mm512_broadcast_i64x2 ((__m128i) a);
221}
222
223static_always_inline u32x16
224u32x16_splat_u32x4 (u32x4 a)
225{
226 return (u32x16) _mm512_broadcast_i64x2 ((__m128i) a);
227}
228
229static_always_inline u32x16
230u32x16_mask_blend (u32x16 a, u32x16 b, u16 mask)
231{
232 return (u32x16) _mm512_mask_blend_epi32 (mask, (__m512i) a, (__m512i) b);
233}
234
235static_always_inline u8x64
236u8x64_mask_blend (u8x64 a, u8x64 b, u64 mask)
237{
238 return (u8x64) _mm512_mask_blend_epi8 (mask, (__m512i) a, (__m512i) b);
239}
240
Damjan Marion09aeee62021-04-20 21:28:45 +0200241#define _(t, m, e, p, it) \
242 static_always_inline m t##_is_equal_mask (t a, t b) \
243 { \
244 return p##_cmpeq_##e##_mask ((it) a, (it) b); \
245 }
246_ (u8x16, u16, epu8, _mm, __m128i)
247_ (u16x8, u8, epu16, _mm, __m128i)
248_ (u32x4, u8, epu32, _mm, __m128i)
249_ (u64x2, u8, epu64, _mm, __m128i)
250
251_ (u8x32, u32, epu8, _mm256, __m256i)
252_ (u16x16, u16, epu16, _mm256, __m256i)
253_ (u32x8, u8, epu32, _mm256, __m256i)
254_ (u64x4, u8, epu64, _mm256, __m256i)
255
256_ (u8x64, u64, epu8, _mm512, __m512i)
257_ (u16x32, u32, epu16, _mm512, __m512i)
258_ (u32x16, u16, epu32, _mm512, __m512i)
259_ (u64x8, u8, epu64, _mm512, __m512i)
260#undef _
261
262#define _(f, t, fn, it) \
263 static_always_inline t t##_from_##f (f x) { return (t) fn ((it) x); }
264_ (u16x16, u32x16, _mm512_cvtepi16_epi32, __m256i)
265_ (u32x16, u16x16, _mm512_cvtusepi32_epi16, __m512i)
266_ (u32x8, u16x8, _mm256_cvtusepi32_epi16, __m256i)
Damjan Marion54208852021-04-21 15:25:47 +0200267_ (u32x8, u64x8, _mm512_cvtepu32_epi64, __m256i)
Damjan Marion09aeee62021-04-20 21:28:45 +0200268#undef _
269
270#define _(vt, mt, bits, epi) \
271 static_always_inline vt vt##_compress (vt a, mt mask) \
272 { \
273 return (vt) _mm##bits##_maskz_compress_##epi (mask, (__m##bits##i) a); \
274 } \
275 static_always_inline vt vt##_expand (vt a, mt mask) \
276 { \
277 return (vt) _mm##bits##_maskz_expand_##epi (mask, (__m##bits##i) a); \
278 }
279
280_ (u64x8, u8, 512, epi64)
281_ (u32x16, u16, 512, epi32)
282_ (u64x4, u8, 256, epi64)
283_ (u32x8, u8, 256, epi32)
284#ifdef __AVX512VBMI2__
285_ (u16x32, u32, 512, epi16)
286_ (u8x64, u64, 512, epi8)
287_ (u16x16, u16, 256, epi16)
288_ (u8x32, u32, 256, epi8)
289#endif
290#undef _
291
292#define CLIB_HAVE_VEC256_COMPRESS
293#define CLIB_HAVE_VEC512_COMPRESS
294
295#ifndef __AVX512VBMI2__
296static_always_inline u16x16
297u16x16_compress (u16x16 v, u16 mask)
Zhiyong Yang5e524172020-07-08 20:28:36 +0000298{
Damjan Marion09aeee62021-04-20 21:28:45 +0200299 return u16x16_from_u32x16 (u32x16_compress (u32x16_from_u16x16 (v), mask));
Zhiyong Yang5e524172020-07-08 20:28:36 +0000300}
301
Damjan Marion09aeee62021-04-20 21:28:45 +0200302static_always_inline u16x8
303u16x8_compress (u16x8 v, u8 mask)
304{
305 return u16x8_from_u32x8 (u32x8_compress (u32x8_from_u16x8 (v), mask));
306}
307#endif
308
Damjan Marion4e083162019-04-12 17:44:35 +0200309static_always_inline void
310u32x16_transpose (u32x16 m[16])
311{
312 __m512i r[16], a, b, c, d, x, y;
313
314 /* *INDENT-OFF* */
315 __m512i pm1 = (__m512i) (u64x8) { 0, 1, 8, 9, 4, 5, 12, 13};
316 __m512i pm2 = (__m512i) (u64x8) { 2, 3, 10, 11, 6, 7, 14, 15};
317 __m512i pm3 = (__m512i) (u64x8) { 0, 1, 2, 3, 8, 9, 10, 11};
318 __m512i pm4 = (__m512i) (u64x8) { 4, 5, 6, 7, 12, 13, 14, 15};
319 /* *INDENT-ON* */
320
321 r[0] = _mm512_unpacklo_epi32 ((__m512i) m[0], (__m512i) m[1]);
322 r[1] = _mm512_unpacklo_epi32 ((__m512i) m[2], (__m512i) m[3]);
323 r[2] = _mm512_unpacklo_epi32 ((__m512i) m[4], (__m512i) m[5]);
324 r[3] = _mm512_unpacklo_epi32 ((__m512i) m[6], (__m512i) m[7]);
325 r[4] = _mm512_unpacklo_epi32 ((__m512i) m[8], (__m512i) m[9]);
326 r[5] = _mm512_unpacklo_epi32 ((__m512i) m[10], (__m512i) m[11]);
327 r[6] = _mm512_unpacklo_epi32 ((__m512i) m[12], (__m512i) m[13]);
328 r[7] = _mm512_unpacklo_epi32 ((__m512i) m[14], (__m512i) m[15]);
329
330 r[8] = _mm512_unpackhi_epi32 ((__m512i) m[0], (__m512i) m[1]);
331 r[9] = _mm512_unpackhi_epi32 ((__m512i) m[2], (__m512i) m[3]);
332 r[10] = _mm512_unpackhi_epi32 ((__m512i) m[4], (__m512i) m[5]);
333 r[11] = _mm512_unpackhi_epi32 ((__m512i) m[6], (__m512i) m[7]);
334 r[12] = _mm512_unpackhi_epi32 ((__m512i) m[8], (__m512i) m[9]);
335 r[13] = _mm512_unpackhi_epi32 ((__m512i) m[10], (__m512i) m[11]);
336 r[14] = _mm512_unpackhi_epi32 ((__m512i) m[12], (__m512i) m[13]);
337 r[15] = _mm512_unpackhi_epi32 ((__m512i) m[14], (__m512i) m[15]);
338
339 a = _mm512_unpacklo_epi64 (r[0], r[1]);
340 b = _mm512_unpacklo_epi64 (r[2], r[3]);
341 c = _mm512_unpacklo_epi64 (r[4], r[5]);
342 d = _mm512_unpacklo_epi64 (r[6], r[7]);
343 x = _mm512_permutex2var_epi64 (a, pm1, b);
344 y = _mm512_permutex2var_epi64 (c, pm1, d);
345 m[0] = (u32x16) _mm512_permutex2var_epi64 (x, pm3, y);
346 m[8] = (u32x16) _mm512_permutex2var_epi64 (x, pm4, y);
347 x = _mm512_permutex2var_epi64 (a, pm2, b);
348 y = _mm512_permutex2var_epi64 (c, pm2, d);
349 m[4] = (u32x16) _mm512_permutex2var_epi64 (x, pm3, y);
350 m[12] = (u32x16) _mm512_permutex2var_epi64 (x, pm4, y);
351
352 a = _mm512_unpacklo_epi64 (r[8], r[9]);
353 b = _mm512_unpacklo_epi64 (r[10], r[11]);
354 c = _mm512_unpacklo_epi64 (r[12], r[13]);
355 d = _mm512_unpacklo_epi64 (r[14], r[15]);
356 x = _mm512_permutex2var_epi64 (a, pm1, b);
357 y = _mm512_permutex2var_epi64 (c, pm1, d);
358 m[2] = (u32x16) _mm512_permutex2var_epi64 (x, pm3, y);
359 m[10] = (u32x16) _mm512_permutex2var_epi64 (x, pm4, y);
360 x = _mm512_permutex2var_epi64 (a, pm2, b);
361 y = _mm512_permutex2var_epi64 (c, pm2, d);
362 m[6] = (u32x16) _mm512_permutex2var_epi64 (x, pm3, y);
363 m[14] = (u32x16) _mm512_permutex2var_epi64 (x, pm4, y);
364
365 a = _mm512_unpackhi_epi64 (r[0], r[1]);
366 b = _mm512_unpackhi_epi64 (r[2], r[3]);
367 c = _mm512_unpackhi_epi64 (r[4], r[5]);
368 d = _mm512_unpackhi_epi64 (r[6], r[7]);
369 x = _mm512_permutex2var_epi64 (a, pm1, b);
370 y = _mm512_permutex2var_epi64 (c, pm1, d);
371 m[1] = (u32x16) _mm512_permutex2var_epi64 (x, pm3, y);
372 m[9] = (u32x16) _mm512_permutex2var_epi64 (x, pm4, y);
373 x = _mm512_permutex2var_epi64 (a, pm2, b);
374 y = _mm512_permutex2var_epi64 (c, pm2, d);
375 m[5] = (u32x16) _mm512_permutex2var_epi64 (x, pm3, y);
376 m[13] = (u32x16) _mm512_permutex2var_epi64 (x, pm4, y);
377
378 a = _mm512_unpackhi_epi64 (r[8], r[9]);
379 b = _mm512_unpackhi_epi64 (r[10], r[11]);
380 c = _mm512_unpackhi_epi64 (r[12], r[13]);
381 d = _mm512_unpackhi_epi64 (r[14], r[15]);
382 x = _mm512_permutex2var_epi64 (a, pm1, b);
383 y = _mm512_permutex2var_epi64 (c, pm1, d);
384 m[3] = (u32x16) _mm512_permutex2var_epi64 (x, pm3, y);
385 m[11] = (u32x16) _mm512_permutex2var_epi64 (x, pm4, y);
386 x = _mm512_permutex2var_epi64 (a, pm2, b);
387 y = _mm512_permutex2var_epi64 (c, pm2, d);
388 m[7] = (u32x16) _mm512_permutex2var_epi64 (x, pm3, y);
389 m[15] = (u32x16) _mm512_permutex2var_epi64 (x, pm4, y);
390}
391
392
393
394static_always_inline void
395u64x8_transpose (u64x8 m[8])
396{
397 __m512i r[8], x, y;
398
399 /* *INDENT-OFF* */
400 __m512i pm1 = (__m512i) (u64x8) { 0, 1, 8, 9, 4, 5, 12, 13};
401 __m512i pm2 = (__m512i) (u64x8) { 2, 3, 10, 11, 6, 7, 14, 15};
402 __m512i pm3 = (__m512i) (u64x8) { 0, 1, 2, 3, 8, 9, 10, 11};
403 __m512i pm4 = (__m512i) (u64x8) { 4, 5, 6, 7, 12, 13, 14, 15};
404 /* *INDENT-ON* */
405
406 r[0] = _mm512_unpacklo_epi64 ((__m512i) m[0], (__m512i) m[1]);
407 r[1] = _mm512_unpacklo_epi64 ((__m512i) m[2], (__m512i) m[3]);
408 r[2] = _mm512_unpacklo_epi64 ((__m512i) m[4], (__m512i) m[5]);
409 r[3] = _mm512_unpacklo_epi64 ((__m512i) m[6], (__m512i) m[7]);
410 r[4] = _mm512_unpackhi_epi64 ((__m512i) m[0], (__m512i) m[1]);
411 r[5] = _mm512_unpackhi_epi64 ((__m512i) m[2], (__m512i) m[3]);
412 r[6] = _mm512_unpackhi_epi64 ((__m512i) m[4], (__m512i) m[5]);
413 r[7] = _mm512_unpackhi_epi64 ((__m512i) m[6], (__m512i) m[7]);
414
415 x = _mm512_permutex2var_epi64 (r[0], pm1, r[1]);
416 y = _mm512_permutex2var_epi64 (r[2], pm1, r[3]);
417 m[0] = (u64x8) _mm512_permutex2var_epi64 (x, pm3, y);
418 m[4] = (u64x8) _mm512_permutex2var_epi64 (x, pm4, y);
419 x = _mm512_permutex2var_epi64 (r[0], pm2, r[1]);
420 y = _mm512_permutex2var_epi64 (r[2], pm2, r[3]);
421 m[2] = (u64x8) _mm512_permutex2var_epi64 (x, pm3, y);
422 m[6] = (u64x8) _mm512_permutex2var_epi64 (x, pm4, y);
423
424 x = _mm512_permutex2var_epi64 (r[4], pm1, r[5]);
425 y = _mm512_permutex2var_epi64 (r[6], pm1, r[7]);
426 m[1] = (u64x8) _mm512_permutex2var_epi64 (x, pm3, y);
427 m[5] = (u64x8) _mm512_permutex2var_epi64 (x, pm4, y);
428 x = _mm512_permutex2var_epi64 (r[4], pm2, r[5]);
429 y = _mm512_permutex2var_epi64 (r[6], pm2, r[7]);
430 m[3] = (u64x8) _mm512_permutex2var_epi64 (x, pm3, y);
431 m[7] = (u64x8) _mm512_permutex2var_epi64 (x, pm4, y);
432}
433
Damjan Marion8c3f8a22018-05-17 21:12:13 +0200434#endif /* included_vector_avx512_h */
Damjan Marionc5766222018-04-16 00:18:34 +0200435/*
436 * fd.io coding-style-patch-verification: ON
437 *
438 * Local Variables:
439 * eval: (c-set-style "gnu")
440 * End:
441 */