blob: a51644be1dbd89017f4bd7b51df28bae03d43fed [file] [log] [blame]
Damjan Marionc5766222018-04-16 00:18:34 +02001/*
2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16#ifndef included_vector_avx512_h
17#define included_vector_avx512_h
18
19#include <vppinfra/clib.h>
20#include <x86intrin.h>
21
Damjan Mariona52e1662018-05-19 00:04:23 +020022/* *INDENT-OFF* */
Damjan Marionc5766222018-04-16 00:18:34 +020023#define foreach_avx512_vec512i \
24 _(i,8,64,epi8) _(i,16,32,epi16) _(i,32,16,epi32) _(i,64,8,epi64)
25#define foreach_avx512_vec512u \
26 _(u,8,64,epi8) _(u,16,32,epi16) _(u,32,16,epi32) _(u,64,8,epi64)
27#define foreach_avx512_vec512f \
28 _(f,32,8,ps) _(f,64,4,pd)
29
Damjan Mariona52e1662018-05-19 00:04:23 +020030/* splat, load_unaligned, store_unaligned, is_all_zero, is_equal,
Damjan Marion1cf9a162018-05-23 20:21:51 +020031 is_all_equal, is_zero_mask */
Damjan Marionc5766222018-04-16 00:18:34 +020032#define _(t, s, c, i) \
33static_always_inline t##s##x##c \
34t##s##x##c##_splat (t##s x) \
35{ return (t##s##x##c) _mm512_set1_##i (x); } \
36\
37static_always_inline t##s##x##c \
Zhiyong Yang0c7aa7a2020-03-28 08:40:25 +000038t##s##x##c##_load_aligned (void *p) \
39{ return (t##s##x##c) _mm512_load_si512 (p); } \
40\
41static_always_inline void \
42t##s##x##c##_store_aligned (t##s##x##c v, void *p) \
43{ _mm512_store_si512 ((__m512i *) p, (__m512i) v); } \
44\
45static_always_inline t##s##x##c \
Damjan Marionc5766222018-04-16 00:18:34 +020046t##s##x##c##_load_unaligned (void *p) \
47{ return (t##s##x##c) _mm512_loadu_si512 (p); } \
48\
49static_always_inline void \
50t##s##x##c##_store_unaligned (t##s##x##c v, void *p) \
51{ _mm512_storeu_si512 ((__m512i *) p, (__m512i) v); } \
52\
Damjan Mariona52e1662018-05-19 00:04:23 +020053static_always_inline int \
54t##s##x##c##_is_all_zero (t##s##x##c v) \
55{ return (_mm512_test_epi64_mask ((__m512i) v, (__m512i) v) == 0); } \
56\
57static_always_inline int \
58t##s##x##c##_is_equal (t##s##x##c a, t##s##x##c b) \
Damjan Marion14864772018-05-22 14:07:47 +020059{ return t##s##x##c##_is_all_zero (a ^ b); } \
Damjan Mariona52e1662018-05-19 00:04:23 +020060\
61static_always_inline int \
62t##s##x##c##_is_all_equal (t##s##x##c v, t##s x) \
63{ return t##s##x##c##_is_equal (v, t##s##x##c##_splat (x)); } \
Damjan Marion1cf9a162018-05-23 20:21:51 +020064\
65static_always_inline u##c \
66t##s##x##c##_is_zero_mask (t##s##x##c v) \
67{ return _mm512_test_##i##_mask ((__m512i) v, (__m512i) v); } \
Damjan Marion2cd8ad42019-04-17 16:05:54 +020068\
69static_always_inline t##s##x##c \
70t##s##x##c##_interleave_lo (t##s##x##c a, t##s##x##c b) \
71{ return (t##s##x##c) _mm512_unpacklo_##i ((__m512i) a, (__m512i) b); } \
72\
73static_always_inline t##s##x##c \
74t##s##x##c##_interleave_hi (t##s##x##c a, t##s##x##c b) \
75{ return (t##s##x##c) _mm512_unpackhi_##i ((__m512i) a, (__m512i) b); } \
Damjan Marion1cf9a162018-05-23 20:21:51 +020076
Damjan Marionc5766222018-04-16 00:18:34 +020077
78foreach_avx512_vec512i foreach_avx512_vec512u
79#undef _
Damjan Marion8c3f8a22018-05-17 21:12:13 +020080/* *INDENT-ON* */
81
82static_always_inline u32
83u16x32_msb_mask (u16x32 v)
84{
85 return (u32) _mm512_movepi16_mask ((__m512i) v);
86}
87
Damjan Marionc899dac2019-04-16 18:41:01 +020088static_always_inline u32x16
89u32x16_byte_swap (u32x16 v)
90{
91 u8x64 swap = {
92 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12,
93 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12,
94 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12,
95 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12
96 };
97 return (u32x16) _mm512_shuffle_epi8 ((__m512i) v, (__m512i) swap);
98}
99
100static_always_inline u16x32
101u16x32_byte_swap (u16x32 v)
102{
103 u8x64 swap = {
104 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14,
105 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14,
106 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14,
107 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14
108 };
109 return (u16x32) _mm512_shuffle_epi8 ((__m512i) v, (__m512i) swap);
110}
111
112static_always_inline u32x8
113u32x16_extract_lo (u32x16 v)
114{
115 return (u32x8) _mm512_extracti64x4_epi64 ((__m512i) v, 0);
116}
117
118static_always_inline u32x8
119u32x16_extract_hi (u32x16 v)
120{
121 return (u32x8) _mm512_extracti64x4_epi64 ((__m512i) v, 1);
122}
123
Damjan Marion627fb6a2020-02-16 13:07:13 +0100124static_always_inline u8x32
125u8x64_extract_lo (u8x64 v)
126{
127 return (u8x32) _mm512_extracti64x4_epi64 ((__m512i) v, 0);
128}
129
130static_always_inline u8x32
131u8x64_extract_hi (u8x64 v)
132{
133 return (u8x32) _mm512_extracti64x4_epi64 ((__m512i) v, 1);
134}
135
Damjan Marionc899dac2019-04-16 18:41:01 +0200136static_always_inline u32
137u32x16_min_scalar (u32x16 v)
138{
139 return u32x8_min_scalar (u32x8_min (u32x16_extract_lo (v),
140 u32x16_extract_hi (v)));
141}
142
Damjan Marion2cd8ad42019-04-17 16:05:54 +0200143static_always_inline u32x16
144u32x16_insert_lo (u32x16 r, u32x8 v)
145{
146 return (u32x16) _mm512_inserti64x4 ((__m512i) r, (__m256i) v, 0);
147}
148
149static_always_inline u32x16
150u32x16_insert_hi (u32x16 r, u32x8 v)
151{
152 return (u32x16) _mm512_inserti64x4 ((__m512i) r, (__m256i) v, 1);
153}
154
155static_always_inline u64x8
156u64x8_permute (u64x8 a, u64x8 b, u64x8 mask)
157{
158 return (u64x8) _mm512_permutex2var_epi64 ((__m512i) a, (__m512i) mask,
159 (__m512i) b);
160}
161
Damjan Marionc899dac2019-04-16 18:41:01 +0200162
163#define u32x16_ternary_logic(a, b, c, d) \
164 (u32x16) _mm512_ternarylogic_epi32 ((__m512i) a, (__m512i) b, (__m512i) c, d)
Damjan Marion4e083162019-04-12 17:44:35 +0200165
Damjan Marione84e9d72020-02-13 13:11:02 +0100166#define u8x64_insert_u8x16(a, b, n) \
167 (u8x64) _mm512_inserti64x2 ((__m512i) (a), (__m128i) (b), n)
168
169#define u8x64_extract_u8x16(a, n) \
170 (u8x16) _mm512_extracti64x2_epi64 ((__m512i) (a), n)
171
Damjan Marion627fb6a2020-02-16 13:07:13 +0100172#define u8x64_word_shift_left(a,n) (u8x64) _mm512_bslli_epi128((__m512i) a, n)
173#define u8x64_word_shift_right(a,n) (u8x64) _mm512_bsrli_epi128((__m512i) a, n)
174
Damjan Marionf75defa2020-02-13 18:14:06 +0100175static_always_inline u8x64
176u8x64_xor3 (u8x64 a, u8x64 b, u8x64 c)
177{
178 return (u8x64) _mm512_ternarylogic_epi32 ((__m512i) a, (__m512i) b,
179 (__m512i) c, 0x96);
180}
181
Damjan Marion47d8f5d2020-02-25 11:51:48 +0100182static_always_inline u8x64
183u8x64_reflect_u8x16 (u8x64 x)
184{
185 static const u8x64 mask = {
186 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0,
187 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0,
188 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0,
189 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0,
190 };
191 return (u8x64) _mm512_shuffle_epi8 ((__m512i) x, (__m512i) mask);
192}
193
194static_always_inline u8x64
Damjan Marion94dbf952020-07-15 20:18:39 +0200195u8x64_shuffle (u8x64 v, u8x64 m)
196{
197 return (u8x64) _mm512_shuffle_epi8 ((__m512i) v, (__m512i) m);
198}
199
200#define u8x64_align_right(a, b, imm) \
201 (u8x64) _mm512_alignr_epi8 ((__m512i) a, (__m512i) b, imm)
202
203static_always_inline u32
204u32x16_sum_elts (u32x16 sum16)
205{
206 u32x8 sum8;
207 sum16 += (u32x16) u8x64_align_right (sum16, sum16, 8);
208 sum16 += (u32x16) u8x64_align_right (sum16, sum16, 4);
209 sum8 = u32x16_extract_hi (sum16) + u32x16_extract_lo (sum16);
210 return sum8[0] + sum8[4];
211}
212
213static_always_inline u8x64
Damjan Marion47d8f5d2020-02-25 11:51:48 +0100214u8x64_mask_load (u8x64 a, void *p, u64 mask)
215{
216 return (u8x64) _mm512_mask_loadu_epi8 ((__m512i) a, mask, p);
217}
218
219static_always_inline void
220u8x64_mask_store (u8x64 a, void *p, u64 mask)
221{
222 _mm512_mask_storeu_epi8 (p, mask, (__m512i) a);
223}
224
225static_always_inline u8x64
226u8x64_splat_u8x16 (u8x16 a)
227{
228 return (u8x64) _mm512_broadcast_i64x2 ((__m128i) a);
229}
230
231static_always_inline u32x16
232u32x16_splat_u32x4 (u32x4 a)
233{
234 return (u32x16) _mm512_broadcast_i64x2 ((__m128i) a);
235}
236
237static_always_inline u32x16
238u32x16_mask_blend (u32x16 a, u32x16 b, u16 mask)
239{
240 return (u32x16) _mm512_mask_blend_epi32 (mask, (__m512i) a, (__m512i) b);
241}
242
243static_always_inline u8x64
244u8x64_mask_blend (u8x64 a, u8x64 b, u64 mask)
245{
246 return (u8x64) _mm512_mask_blend_epi8 (mask, (__m512i) a, (__m512i) b);
247}
248
Zhiyong Yang5e524172020-07-08 20:28:36 +0000249static_always_inline u8
250u64x8_mask_is_equal (u64x8 a, u64x8 b)
251{
252 return _mm512_cmpeq_epu64_mask ((__m512i) a, (__m512i) b);
253}
254
Damjan Marion4e083162019-04-12 17:44:35 +0200255static_always_inline void
256u32x16_transpose (u32x16 m[16])
257{
258 __m512i r[16], a, b, c, d, x, y;
259
260 /* *INDENT-OFF* */
261 __m512i pm1 = (__m512i) (u64x8) { 0, 1, 8, 9, 4, 5, 12, 13};
262 __m512i pm2 = (__m512i) (u64x8) { 2, 3, 10, 11, 6, 7, 14, 15};
263 __m512i pm3 = (__m512i) (u64x8) { 0, 1, 2, 3, 8, 9, 10, 11};
264 __m512i pm4 = (__m512i) (u64x8) { 4, 5, 6, 7, 12, 13, 14, 15};
265 /* *INDENT-ON* */
266
267 r[0] = _mm512_unpacklo_epi32 ((__m512i) m[0], (__m512i) m[1]);
268 r[1] = _mm512_unpacklo_epi32 ((__m512i) m[2], (__m512i) m[3]);
269 r[2] = _mm512_unpacklo_epi32 ((__m512i) m[4], (__m512i) m[5]);
270 r[3] = _mm512_unpacklo_epi32 ((__m512i) m[6], (__m512i) m[7]);
271 r[4] = _mm512_unpacklo_epi32 ((__m512i) m[8], (__m512i) m[9]);
272 r[5] = _mm512_unpacklo_epi32 ((__m512i) m[10], (__m512i) m[11]);
273 r[6] = _mm512_unpacklo_epi32 ((__m512i) m[12], (__m512i) m[13]);
274 r[7] = _mm512_unpacklo_epi32 ((__m512i) m[14], (__m512i) m[15]);
275
276 r[8] = _mm512_unpackhi_epi32 ((__m512i) m[0], (__m512i) m[1]);
277 r[9] = _mm512_unpackhi_epi32 ((__m512i) m[2], (__m512i) m[3]);
278 r[10] = _mm512_unpackhi_epi32 ((__m512i) m[4], (__m512i) m[5]);
279 r[11] = _mm512_unpackhi_epi32 ((__m512i) m[6], (__m512i) m[7]);
280 r[12] = _mm512_unpackhi_epi32 ((__m512i) m[8], (__m512i) m[9]);
281 r[13] = _mm512_unpackhi_epi32 ((__m512i) m[10], (__m512i) m[11]);
282 r[14] = _mm512_unpackhi_epi32 ((__m512i) m[12], (__m512i) m[13]);
283 r[15] = _mm512_unpackhi_epi32 ((__m512i) m[14], (__m512i) m[15]);
284
285 a = _mm512_unpacklo_epi64 (r[0], r[1]);
286 b = _mm512_unpacklo_epi64 (r[2], r[3]);
287 c = _mm512_unpacklo_epi64 (r[4], r[5]);
288 d = _mm512_unpacklo_epi64 (r[6], r[7]);
289 x = _mm512_permutex2var_epi64 (a, pm1, b);
290 y = _mm512_permutex2var_epi64 (c, pm1, d);
291 m[0] = (u32x16) _mm512_permutex2var_epi64 (x, pm3, y);
292 m[8] = (u32x16) _mm512_permutex2var_epi64 (x, pm4, y);
293 x = _mm512_permutex2var_epi64 (a, pm2, b);
294 y = _mm512_permutex2var_epi64 (c, pm2, d);
295 m[4] = (u32x16) _mm512_permutex2var_epi64 (x, pm3, y);
296 m[12] = (u32x16) _mm512_permutex2var_epi64 (x, pm4, y);
297
298 a = _mm512_unpacklo_epi64 (r[8], r[9]);
299 b = _mm512_unpacklo_epi64 (r[10], r[11]);
300 c = _mm512_unpacklo_epi64 (r[12], r[13]);
301 d = _mm512_unpacklo_epi64 (r[14], r[15]);
302 x = _mm512_permutex2var_epi64 (a, pm1, b);
303 y = _mm512_permutex2var_epi64 (c, pm1, d);
304 m[2] = (u32x16) _mm512_permutex2var_epi64 (x, pm3, y);
305 m[10] = (u32x16) _mm512_permutex2var_epi64 (x, pm4, y);
306 x = _mm512_permutex2var_epi64 (a, pm2, b);
307 y = _mm512_permutex2var_epi64 (c, pm2, d);
308 m[6] = (u32x16) _mm512_permutex2var_epi64 (x, pm3, y);
309 m[14] = (u32x16) _mm512_permutex2var_epi64 (x, pm4, y);
310
311 a = _mm512_unpackhi_epi64 (r[0], r[1]);
312 b = _mm512_unpackhi_epi64 (r[2], r[3]);
313 c = _mm512_unpackhi_epi64 (r[4], r[5]);
314 d = _mm512_unpackhi_epi64 (r[6], r[7]);
315 x = _mm512_permutex2var_epi64 (a, pm1, b);
316 y = _mm512_permutex2var_epi64 (c, pm1, d);
317 m[1] = (u32x16) _mm512_permutex2var_epi64 (x, pm3, y);
318 m[9] = (u32x16) _mm512_permutex2var_epi64 (x, pm4, y);
319 x = _mm512_permutex2var_epi64 (a, pm2, b);
320 y = _mm512_permutex2var_epi64 (c, pm2, d);
321 m[5] = (u32x16) _mm512_permutex2var_epi64 (x, pm3, y);
322 m[13] = (u32x16) _mm512_permutex2var_epi64 (x, pm4, y);
323
324 a = _mm512_unpackhi_epi64 (r[8], r[9]);
325 b = _mm512_unpackhi_epi64 (r[10], r[11]);
326 c = _mm512_unpackhi_epi64 (r[12], r[13]);
327 d = _mm512_unpackhi_epi64 (r[14], r[15]);
328 x = _mm512_permutex2var_epi64 (a, pm1, b);
329 y = _mm512_permutex2var_epi64 (c, pm1, d);
330 m[3] = (u32x16) _mm512_permutex2var_epi64 (x, pm3, y);
331 m[11] = (u32x16) _mm512_permutex2var_epi64 (x, pm4, y);
332 x = _mm512_permutex2var_epi64 (a, pm2, b);
333 y = _mm512_permutex2var_epi64 (c, pm2, d);
334 m[7] = (u32x16) _mm512_permutex2var_epi64 (x, pm3, y);
335 m[15] = (u32x16) _mm512_permutex2var_epi64 (x, pm4, y);
336}
337
338
339
340static_always_inline void
341u64x8_transpose (u64x8 m[8])
342{
343 __m512i r[8], x, y;
344
345 /* *INDENT-OFF* */
346 __m512i pm1 = (__m512i) (u64x8) { 0, 1, 8, 9, 4, 5, 12, 13};
347 __m512i pm2 = (__m512i) (u64x8) { 2, 3, 10, 11, 6, 7, 14, 15};
348 __m512i pm3 = (__m512i) (u64x8) { 0, 1, 2, 3, 8, 9, 10, 11};
349 __m512i pm4 = (__m512i) (u64x8) { 4, 5, 6, 7, 12, 13, 14, 15};
350 /* *INDENT-ON* */
351
352 r[0] = _mm512_unpacklo_epi64 ((__m512i) m[0], (__m512i) m[1]);
353 r[1] = _mm512_unpacklo_epi64 ((__m512i) m[2], (__m512i) m[3]);
354 r[2] = _mm512_unpacklo_epi64 ((__m512i) m[4], (__m512i) m[5]);
355 r[3] = _mm512_unpacklo_epi64 ((__m512i) m[6], (__m512i) m[7]);
356 r[4] = _mm512_unpackhi_epi64 ((__m512i) m[0], (__m512i) m[1]);
357 r[5] = _mm512_unpackhi_epi64 ((__m512i) m[2], (__m512i) m[3]);
358 r[6] = _mm512_unpackhi_epi64 ((__m512i) m[4], (__m512i) m[5]);
359 r[7] = _mm512_unpackhi_epi64 ((__m512i) m[6], (__m512i) m[7]);
360
361 x = _mm512_permutex2var_epi64 (r[0], pm1, r[1]);
362 y = _mm512_permutex2var_epi64 (r[2], pm1, r[3]);
363 m[0] = (u64x8) _mm512_permutex2var_epi64 (x, pm3, y);
364 m[4] = (u64x8) _mm512_permutex2var_epi64 (x, pm4, y);
365 x = _mm512_permutex2var_epi64 (r[0], pm2, r[1]);
366 y = _mm512_permutex2var_epi64 (r[2], pm2, r[3]);
367 m[2] = (u64x8) _mm512_permutex2var_epi64 (x, pm3, y);
368 m[6] = (u64x8) _mm512_permutex2var_epi64 (x, pm4, y);
369
370 x = _mm512_permutex2var_epi64 (r[4], pm1, r[5]);
371 y = _mm512_permutex2var_epi64 (r[6], pm1, r[7]);
372 m[1] = (u64x8) _mm512_permutex2var_epi64 (x, pm3, y);
373 m[5] = (u64x8) _mm512_permutex2var_epi64 (x, pm4, y);
374 x = _mm512_permutex2var_epi64 (r[4], pm2, r[5]);
375 y = _mm512_permutex2var_epi64 (r[6], pm2, r[7]);
376 m[3] = (u64x8) _mm512_permutex2var_epi64 (x, pm3, y);
377 m[7] = (u64x8) _mm512_permutex2var_epi64 (x, pm4, y);
378}
379
Damjan Marion8c3f8a22018-05-17 21:12:13 +0200380#endif /* included_vector_avx512_h */
Damjan Marionc5766222018-04-16 00:18:34 +0200381/*
382 * fd.io coding-style-patch-verification: ON
383 *
384 * Local Variables:
385 * eval: (c-set-style "gnu")
386 * End:
387 */