blob: 9c1ce4700c5bc65ea2b8ee86e8f20552084b80f0 [file] [log] [blame]
Damjan Marionc5766222018-04-16 00:18:34 +02001/*
2 * Copyright (c) 2018 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16#ifndef included_vector_avx2_h
17#define included_vector_avx2_h
18
19#include <vppinfra/clib.h>
20#include <x86intrin.h>
21
22#define foreach_avx2_vec256i \
23 _(i,8,32,epi8) _(i,16,16,epi16) _(i,32,8,epi32) _(i,64,4,epi64x)
24#define foreach_avx2_vec256u \
25 _(u,8,32,epi8) _(u,16,16,epi16) _(u,32,8,epi32) _(u,64,4,epi64x)
26#define foreach_avx2_vec256f \
27 _(f,32,8,ps) _(f,64,4,pd)
28
29/* splat, load_unaligned, store_unaligned, is_all_zero, is_all_equal */
30#define _(t, s, c, i) \
31static_always_inline t##s##x##c \
32t##s##x##c##_splat (t##s x) \
33{ return (t##s##x##c) _mm256_set1_##i (x); } \
34\
35static_always_inline t##s##x##c \
36t##s##x##c##_load_unaligned (void *p) \
37{ return (t##s##x##c) _mm256_loadu_si256 (p); } \
38\
39static_always_inline void \
40t##s##x##c##_store_unaligned (t##s##x##c v, void *p) \
41{ _mm256_storeu_si256 ((__m256i *) p, (__m256i) v); } \
42\
43static_always_inline int \
44t##s##x##c##_is_all_zero (t##s##x##c x) \
45{ return _mm256_testz_si256 ((__m256i) x, (__m256i) x); } \
46\
47static_always_inline int \
48t##s##x##c##_is_all_equal (t##s##x##c v, t##s x) \
49{ return t##s##x##c##_is_all_zero (v != t##s##x##c##_splat (x)); }; \
50\
51
52foreach_avx2_vec256i foreach_avx2_vec256u
53#undef _
54 always_inline u32x8
55u32x8_permute (u32x8 v, u32x8 idx)
56{
57 return (u32x8) _mm256_permutevar8x32_epi32 ((__m256i) v, (__m256i) idx);
58}
59
60always_inline u32x4
61u32x8_extract_lo (u32x8 v)
62{
63 return (u32x4) _mm256_extracti128_si256 ((__m256i) v, 0);
64}
65
66always_inline u32x4
67u32x8_extract_hi (u32x8 v)
68{
69 return (u32x4) _mm256_extracti128_si256 ((__m256i) v, 1);
70}
71
Damjan Marionee7f0bd2018-05-05 12:30:28 +020072always_inline u32x8
73u32x8_insert_lo (u32x8 v1, u32x4 v2)
74{
75 return (u32x8) _mm256_inserti128_si256 ((__m256i) v1, (__m128i) v2, 0);
76}
77
78always_inline u32x8
79u32x8_insert_hi (u32x8 v1, u32x4 v2)
80{
81 return (u32x8) _mm256_inserti128_si256 ((__m256i) v1, (__m128i) v2, 1);
82}
83
Damjan Marionc5766222018-04-16 00:18:34 +020084#endif /* included_vector_avx2_h */
85
86/*
87 * fd.io coding-style-patch-verification: ON
88 *
89 * Local Variables:
90 * eval: (c-set-style "gnu")
91 * End:
92 */