blob: 69752275f08ab38267d071140814fcde67a033f6 [file] [log] [blame]
Damjan Marionc5766222018-04-16 00:18:34 +02001/*
2 * Copyright (c) 2018 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16#ifndef included_vector_avx2_h
17#define included_vector_avx2_h
18
19#include <vppinfra/clib.h>
20#include <x86intrin.h>
21
Damjan Mariona52e1662018-05-19 00:04:23 +020022/* *INDENT-OFF* */
Damjan Marionc5766222018-04-16 00:18:34 +020023#define foreach_avx2_vec256i \
Damjan Marion4fce7f72018-07-16 14:18:23 +020024 _(i,8,32,epi8) _(i,16,16,epi16) _(i,32,8,epi32) _(i,64,4,epi64)
Damjan Marionc5766222018-04-16 00:18:34 +020025#define foreach_avx2_vec256u \
Damjan Marion4fce7f72018-07-16 14:18:23 +020026 _(u,8,32,epi8) _(u,16,16,epi16) _(u,32,8,epi32) _(u,64,4,epi64)
Damjan Marionc5766222018-04-16 00:18:34 +020027#define foreach_avx2_vec256f \
28 _(f,32,8,ps) _(f,64,4,pd)
29
Damjan Marion4fce7f72018-07-16 14:18:23 +020030#define _mm256_set1_epi64 _mm256_set1_epi64x
31
Damjan Mariona52e1662018-05-19 00:04:23 +020032/* splat, load_unaligned, store_unaligned, is_all_zero, is_equal,
33 is_all_equal */
Damjan Marionc5766222018-04-16 00:18:34 +020034#define _(t, s, c, i) \
35static_always_inline t##s##x##c \
36t##s##x##c##_splat (t##s x) \
37{ return (t##s##x##c) _mm256_set1_##i (x); } \
38\
39static_always_inline t##s##x##c \
40t##s##x##c##_load_unaligned (void *p) \
41{ return (t##s##x##c) _mm256_loadu_si256 (p); } \
42\
43static_always_inline void \
44t##s##x##c##_store_unaligned (t##s##x##c v, void *p) \
45{ _mm256_storeu_si256 ((__m256i *) p, (__m256i) v); } \
46\
47static_always_inline int \
48t##s##x##c##_is_all_zero (t##s##x##c x) \
49{ return _mm256_testz_si256 ((__m256i) x, (__m256i) x); } \
50\
51static_always_inline int \
Damjan Marion14864772018-05-22 14:07:47 +020052t##s##x##c##_is_equal (t##s##x##c a, t##s##x##c b) \
53{ return t##s##x##c##_is_all_zero (a ^ b); } \
Damjan Marionc5766222018-04-16 00:18:34 +020054\
Damjan Mariona52e1662018-05-19 00:04:23 +020055static_always_inline int \
56t##s##x##c##_is_all_equal (t##s##x##c v, t##s x) \
Damjan Marion4fce7f72018-07-16 14:18:23 +020057{ return t##s##x##c##_is_equal (v, t##s##x##c##_splat (x)); } \
58\
59static_always_inline t##s##x##c \
60t##s##x##c##_interleave_lo (t##s##x##c a, t##s##x##c b) \
61{ return (t##s##x##c) _mm256_unpacklo_##i ((__m256i) a, (__m256i) b); } \
62\
63static_always_inline t##s##x##c \
64t##s##x##c##_interleave_hi (t##s##x##c a, t##s##x##c b) \
65{ return (t##s##x##c) _mm256_unpackhi_##i ((__m256i) a, (__m256i) b); } \
66
Damjan Marionc5766222018-04-16 00:18:34 +020067
68foreach_avx2_vec256i foreach_avx2_vec256u
69#undef _
Damjan Mariona52e1662018-05-19 00:04:23 +020070/* *INDENT-ON* */
71
72always_inline u32x8
Damjan Marionc5766222018-04-16 00:18:34 +020073u32x8_permute (u32x8 v, u32x8 idx)
74{
75 return (u32x8) _mm256_permutevar8x32_epi32 ((__m256i) v, (__m256i) idx);
76}
77
Damjan Marion1cf9a162018-05-23 20:21:51 +020078/* _extract_lo, _extract_hi */
79/* *INDENT-OFF* */
80#define _(t1,t2) \
81always_inline t1 \
82t2##_extract_lo (t2 v) \
83{ return (t1) _mm256_extracti128_si256 ((__m256i) v, 0); } \
84\
85always_inline t1 \
86t2##_extract_hi (t2 v) \
87{ return (t1) _mm256_extracti128_si256 ((__m256i) v, 1); } \
88\
89always_inline t2 \
90t2##_insert_lo (t2 v1, t1 v2) \
91{ return (t2) _mm256_inserti128_si256 ((__m256i) v1, (__m128i) v2, 0); }\
92\
93always_inline t2 \
94t2##_insert_hi (t2 v1, t1 v2) \
95{ return (t2) _mm256_inserti128_si256 ((__m256i) v1, (__m128i) v2, 1); }\
Damjan Marionc5766222018-04-16 00:18:34 +020096
Damjan Marion1cf9a162018-05-23 20:21:51 +020097_(u8x16, u8x32)
98_(u16x8, u16x16)
99_(u32x4, u32x8)
100_(u64x2, u64x4)
101#undef _
102/* *INDENT-ON* */
Damjan Marionc5766222018-04-16 00:18:34 +0200103
Damjan Marionee7f0bd2018-05-05 12:30:28 +0200104
Damjan Marion1cf9a162018-05-23 20:21:51 +0200105
Damjan Marionee7f0bd2018-05-05 12:30:28 +0200106
Damjan Marion8c3f8a22018-05-17 21:12:13 +0200107static_always_inline u32
108u8x32_msb_mask (u8x32 v)
109{
110 return _mm256_movemask_epi8 ((__m256i) v);
111}
112
Damjan Marion90d05bc2020-08-31 17:18:26 +0200113/* _from_ */
Damjan Marionafe56de2018-05-17 12:44:00 +0200114/* *INDENT-OFF* */
115#define _(f,t,i) \
116static_always_inline t \
Damjan Marion90d05bc2020-08-31 17:18:26 +0200117t##_from_##f (f x) \
Damjan Marionafe56de2018-05-17 12:44:00 +0200118{ return (t) _mm256_cvt##i ((__m128i) x); }
119
120_(u16x8, u32x8, epu16_epi32)
121_(u16x8, u64x4, epu16_epi64)
122_(u32x4, u64x4, epu32_epi64)
Lijian.Zhang7e9d5ff2021-04-14 16:12:28 +0800123_ (u8x16, u16x16, epu8_epi16)
Damjan Marionafe56de2018-05-17 12:44:00 +0200124_(u8x16, u32x8, epu8_epi32)
125_(u8x16, u64x4, epu8_epi64)
126_(i16x8, i32x8, epi16_epi32)
127_(i16x8, i64x4, epi16_epi64)
128_(i32x4, i64x4, epi32_epi64)
Lijian.Zhang7e9d5ff2021-04-14 16:12:28 +0800129_ (i8x16, i16x16, epi8_epi16)
Damjan Marionafe56de2018-05-17 12:44:00 +0200130_(i8x16, i32x8, epi8_epi32)
131_(i8x16, i64x4, epi8_epi64)
132#undef _
133/* *INDENT-ON* */
134
Damjan Mariondd648aa2020-03-12 11:56:00 +0100135static_always_inline u64x4
136u64x4_byte_swap (u64x4 v)
137{
138 u8x32 swap = {
139 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8,
140 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8,
141 };
142 return (u64x4) _mm256_shuffle_epi8 ((__m256i) v, (__m256i) swap);
143}
144
Damjan Marionc899dac2019-04-16 18:41:01 +0200145static_always_inline u32x8
146u32x8_byte_swap (u32x8 v)
147{
148 u8x32 swap = {
149 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12,
150 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12
151 };
152 return (u32x8) _mm256_shuffle_epi8 ((__m256i) v, (__m256i) swap);
153}
154
Damjan Marionbf129f42018-06-27 13:03:26 +0200155static_always_inline u16x16
156u16x16_byte_swap (u16x16 v)
157{
158 u8x32 swap = {
159 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14,
160 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14
161 };
162 return (u16x16) _mm256_shuffle_epi8 ((__m256i) v, (__m256i) swap);
163}
164
Damjan Marion94dbf952020-07-15 20:18:39 +0200165static_always_inline u8x32
166u8x32_shuffle (u8x32 v, u8x32 m)
167{
168 return (u8x32) _mm256_shuffle_epi8 ((__m256i) v, (__m256i) m);
169}
170
171#define u8x32_align_right(a, b, imm) \
172 (u8x32) _mm256_alignr_epi8 ((__m256i) a, (__m256i) b, imm)
173
174static_always_inline u32
175u32x8_sum_elts (u32x8 sum8)
176{
177 sum8 += (u32x8) u8x32_align_right (sum8, sum8, 8);
178 sum8 += (u32x8) u8x32_align_right (sum8, sum8, 4);
179 return sum8[0] + sum8[4];
180}
181
Damjan Marionbf129f42018-06-27 13:03:26 +0200182static_always_inline u32x8
183u32x8_hadd (u32x8 v1, u32x8 v2)
184{
185 return (u32x8) _mm256_hadd_epi32 ((__m256i) v1, (__m256i) v2);
186}
187
Damjan Marion08bca802018-06-18 22:21:40 +0200188static_always_inline u16x16
189u16x16_mask_last (u16x16 v, u8 n_last)
190{
191 const u16x16 masks[17] = {
192 {0},
193 {-1},
194 {-1, -1},
195 {-1, -1, -1},
196 {-1, -1, -1, -1},
197 {-1, -1, -1, -1, -1},
198 {-1, -1, -1, -1, -1, -1},
199 {-1, -1, -1, -1, -1, -1, -1},
200 {-1, -1, -1, -1, -1, -1, -1, -1},
201 {-1, -1, -1, -1, -1, -1, -1, -1, -1},
202 {-1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
203 {-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
204 {-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
205 {-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
206 {-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
207 {-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
208 {-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
209 };
210
211 ASSERT (n_last < 17);
212
213 return v & masks[16 - n_last];
214}
215
Damjan Marion94dbf952020-07-15 20:18:39 +0200216#ifdef __AVX512F__
217static_always_inline u8x32
218u8x32_mask_load (u8x32 a, void *p, u32 mask)
219{
220 return (u8x32) _mm256_mask_loadu_epi8 ((__m256i) a, mask, p);
221}
222#endif
223
Damjan Marion69fdfee2018-10-06 14:33:18 +0200224static_always_inline f32x8
225f32x8_from_u32x8 (u32x8 v)
226{
227 return (f32x8) _mm256_cvtepi32_ps ((__m256i) v);
228}
229
230static_always_inline u32x8
231u32x8_from_f32x8 (f32x8 v)
232{
233 return (u32x8) _mm256_cvttps_epi32 ((__m256) v);
234}
235
Damjan Marionc899dac2019-04-16 18:41:01 +0200236#define u32x8_blend(a,b,m) \
237 (u32x8) _mm256_blend_epi32 ((__m256i) a, (__m256i) b, m)
238
Damjan Marion07243572018-11-20 10:06:57 +0100239#define u16x16_blend(v1, v2, mask) \
240 (u16x16) _mm256_blend_epi16 ((__m256i) (v1), (__m256i) (v2), mask)
241
242static_always_inline u64x4
243u64x4_gather (void *p0, void *p1, void *p2, void *p3)
244{
245 u64x4 r = {
246 *(u64 *) p0, *(u64 *) p1, *(u64 *) p2, *(u64 *) p3
247 };
248 return r;
249}
250
251static_always_inline u32x8
252u32x8_gather (void *p0, void *p1, void *p2, void *p3, void *p4, void *p5,
253 void *p6, void *p7)
254{
255 u32x8 r = {
256 *(u32 *) p0, *(u32 *) p1, *(u32 *) p2, *(u32 *) p3,
257 *(u32 *) p4, *(u32 *) p5, *(u32 *) p6, *(u32 *) p7,
258 };
259 return r;
260}
261
262
263static_always_inline void
264u64x4_scatter (u64x4 r, void *p0, void *p1, void *p2, void *p3)
265{
266 *(u64 *) p0 = r[0];
267 *(u64 *) p1 = r[1];
268 *(u64 *) p2 = r[2];
269 *(u64 *) p3 = r[3];
270}
271
272static_always_inline void
273u32x8_scatter (u32x8 r, void *p0, void *p1, void *p2, void *p3, void *p4,
274 void *p5, void *p6, void *p7)
275{
276 *(u32 *) p0 = r[0];
277 *(u32 *) p1 = r[1];
278 *(u32 *) p2 = r[2];
279 *(u32 *) p3 = r[3];
280 *(u32 *) p4 = r[4];
281 *(u32 *) p5 = r[5];
282 *(u32 *) p6 = r[6];
283 *(u32 *) p7 = r[7];
284}
285
286static_always_inline void
287u64x4_scatter_one (u64x4 r, int index, void *p)
288{
289 *(u64 *) p = r[index];
290}
291
292static_always_inline void
293u32x8_scatter_one (u32x8 r, int index, void *p)
294{
295 *(u32 *) p = r[index];
296}
297
Damjan Marionc59b9a22019-03-19 15:38:40 +0100298static_always_inline u8x32
299u8x32_is_greater (u8x32 v1, u8x32 v2)
300{
301 return (u8x32) _mm256_cmpgt_epi8 ((__m256i) v1, (__m256i) v2);
302}
303
304static_always_inline u8x32
305u8x32_blend (u8x32 v1, u8x32 v2, u8x32 mask)
306{
307 return (u8x32) _mm256_blendv_epi8 ((__m256i) v1, (__m256i) v2,
308 (__m256i) mask);
309}
310
Damjan Marion9f7e33d2019-04-08 10:14:51 +0200311#define u32x8_permute_lanes(a, b, m) \
312 (u32x8) _mm256_permute2x128_si256 ((__m256i) a, (__m256i) b, m)
313#define u64x4_permute_lanes(a, b, m) \
314 (u64x4) _mm256_permute2x128_si256 ((__m256i) a, (__m256i) b, m)
315
Damjan Marionc899dac2019-04-16 18:41:01 +0200316static_always_inline u32x8
317u32x8_min (u32x8 a, u32x8 b)
318{
319 return (u32x8) _mm256_min_epu32 ((__m256i) a, (__m256i) b);
320}
321
322static_always_inline u32
323u32x8_min_scalar (u32x8 v)
324{
325 return u32x4_min_scalar (u32x4_min (u32x8_extract_lo (v),
326 u32x8_extract_hi (v)));
327}
328
Damjan Marion9f7e33d2019-04-08 10:14:51 +0200329static_always_inline void
330u32x8_transpose (u32x8 a[8])
331{
332 u64x4 r[8], x, y;
333
334 r[0] = (u64x4) u32x8_interleave_lo (a[0], a[1]);
335 r[1] = (u64x4) u32x8_interleave_hi (a[0], a[1]);
336 r[2] = (u64x4) u32x8_interleave_lo (a[2], a[3]);
337 r[3] = (u64x4) u32x8_interleave_hi (a[2], a[3]);
338 r[4] = (u64x4) u32x8_interleave_lo (a[4], a[5]);
339 r[5] = (u64x4) u32x8_interleave_hi (a[4], a[5]);
340 r[6] = (u64x4) u32x8_interleave_lo (a[6], a[7]);
341 r[7] = (u64x4) u32x8_interleave_hi (a[6], a[7]);
342
343 x = u64x4_interleave_lo (r[0], r[2]);
344 y = u64x4_interleave_lo (r[4], r[6]);
345 a[0] = u32x8_permute_lanes (x, y, 0x20);
346 a[4] = u32x8_permute_lanes (x, y, 0x31);
347
348 x = u64x4_interleave_hi (r[0], r[2]);
349 y = u64x4_interleave_hi (r[4], r[6]);
350 a[1] = u32x8_permute_lanes (x, y, 0x20);
351 a[5] = u32x8_permute_lanes (x, y, 0x31);
352
353 x = u64x4_interleave_lo (r[1], r[3]);
354 y = u64x4_interleave_lo (r[5], r[7]);
355 a[2] = u32x8_permute_lanes (x, y, 0x20);
356 a[6] = u32x8_permute_lanes (x, y, 0x31);
357
358 x = u64x4_interleave_hi (r[1], r[3]);
359 y = u64x4_interleave_hi (r[5], r[7]);
360 a[3] = u32x8_permute_lanes (x, y, 0x20);
361 a[7] = u32x8_permute_lanes (x, y, 0x31);
362}
363
364static_always_inline void
365u64x4_transpose (u64x4 a[8])
366{
367 u64x4 r[4];
368
369 r[0] = u64x4_interleave_lo (a[0], a[1]);
370 r[1] = u64x4_interleave_hi (a[0], a[1]);
371 r[2] = u64x4_interleave_lo (a[2], a[3]);
372 r[3] = u64x4_interleave_hi (a[2], a[3]);
373
374 a[0] = u64x4_permute_lanes (r[0], r[2], 0x20);
375 a[1] = u64x4_permute_lanes (r[1], r[3], 0x20);
376 a[2] = u64x4_permute_lanes (r[0], r[2], 0x31);
377 a[3] = u64x4_permute_lanes (r[1], r[3], 0x31);
378}
379
Damjan Marionc5766222018-04-16 00:18:34 +0200380#endif /* included_vector_avx2_h */
381
382/*
383 * fd.io coding-style-patch-verification: ON
384 *
385 * Local Variables:
386 * eval: (c-set-style "gnu")
387 * End:
388 */