blob: 80d7bda9f3c6b24c5d6cbc8969c2551d622426ef [file] [log] [blame]
Christophe Fontaine33e81952016-12-19 14:41:52 +01001/*
2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16#ifndef included_vector_neon_h
17#define included_vector_neon_h
18#include <arm_neon.h>
19
Lijian Zhangf5942d52018-10-31 13:35:20 +080020/* Dummy. Aid making uniform macros */
21#define vreinterpretq_u8_u8(a) a
Lijian.Zhange6a47cf2019-03-12 18:32:39 +080022/* Implement the missing intrinsics to make uniform macros */
23#define vminvq_u64(x) \
24({ \
25 u64 x0 = vgetq_lane_u64(x, 0); \
26 u64 x1 = vgetq_lane_u64(x, 1); \
27 x0 < x1 ? x0 : x1; \
28})
Christophe Fontaine33e81952016-12-19 14:41:52 +010029
Gabriel Ganneb81831d2017-12-05 17:33:37 +010030/* Converts all ones/zeros compare mask to bitmap. */
31always_inline u32
Lijian Zhanga22ba882018-10-25 18:50:33 +080032u8x16_compare_byte_mask (u8x16 v)
Gabriel Ganneb81831d2017-12-05 17:33:37 +010033{
Lijian Zhanga22ba882018-10-25 18:50:33 +080034 uint8x16_t mask = { 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80,
35 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80
36 };
37 /* v --> [0xFF, 0x00, 0xFF, 0xFF, 0xFF, 0x00, 0xFF, 0x00, ... ] */
38 uint8x16_t x = vandq_u8 (v, mask);
39 /* after v & mask,
40 * x --> [0x01, 0x00, 0x04, 0x08, 0x10, 0x00, 0x40, 0x00, ... ] */
41 uint64x2_t x64 = vpaddlq_u32 (vpaddlq_u16 (vpaddlq_u8 (x)));
42 /* after merge, x64 --> [0x5D, 0x.. ] */
43 return (u32) (vgetq_lane_u64 (x64, 0) + (vgetq_lane_u64 (x64, 1) << 8));
Gabriel Ganneb81831d2017-12-05 17:33:37 +010044}
Christophe Fontaine33e81952016-12-19 14:41:52 +010045
Sirshak Das536953d2018-06-26 13:08:46 -050046/* *INDENT-OFF* */
47#define foreach_neon_vec128i \
48 _(i,8,16,s8) _(i,16,8,s16) _(i,32,4,s32) _(i,64,2,s64)
49#define foreach_neon_vec128u \
50 _(u,8,16,u8) _(u,16,8,u16) _(u,32,4,u32) _(u,64,2,u64)
51#define foreach_neon_vec128f \
52 _(f,32,4,f32) _(f,64,2,f64)
Adrian Oanca22ac59b2018-02-23 16:27:41 +010053
Lijian.Zhangcec484f2021-06-25 23:42:21 +080054#define _(t, s, c, i) \
55 static_always_inline t##s##x##c t##s##x##c##_splat (t##s x) \
56 { \
57 return (t##s##x##c) vdupq_n_##i (x); \
58 } \
59 \
60 static_always_inline t##s##x##c t##s##x##c##_load_unaligned (void *p) \
61 { \
62 return (t##s##x##c) vld1q_##i (p); \
63 } \
64 \
65 static_always_inline void t##s##x##c##_store_unaligned (t##s##x##c v, \
66 void *p) \
67 { \
68 vst1q_##i (p, v); \
69 } \
70 \
71 static_always_inline int t##s##x##c##_is_all_zero (t##s##x##c x) \
72 { \
73 return !!(vminvq_u##s (vceqq_##i (vdupq_n_##i (0), x))); \
74 } \
75 \
76 static_always_inline int t##s##x##c##_is_equal (t##s##x##c a, t##s##x##c b) \
77 { \
78 return !!(vminvq_u##s (vceqq_##i (a, b))); \
79 } \
80 static_always_inline int t##s##x##c##_is_all_equal (t##s##x##c v, t##s x) \
81 { \
82 return t##s##x##c##_is_equal (v, t##s##x##c##_splat (x)); \
83 }; \
84 \
85 static_always_inline u32 t##s##x##c##_zero_byte_mask (t##s##x##c x) \
86 { \
87 uint8x16_t v = vreinterpretq_u8_u##s (vceqq_##i (vdupq_n_##i (0), x)); \
88 return u8x16_compare_byte_mask (v); \
89 } \
90 \
91 static_always_inline u##s##x##c t##s##x##c##_is_greater (t##s##x##c a, \
92 t##s##x##c b) \
93 { \
94 return (u##s##x##c) vcgtq_##i (a, b); \
95 } \
96 \
97 static_always_inline t##s##x##c t##s##x##c##_add_saturate (t##s##x##c a, \
98 t##s##x##c b) \
99 { \
100 return (t##s##x##c) vqaddq_##i (a, b); \
101 } \
102 \
103 static_always_inline t##s##x##c t##s##x##c##_sub_saturate (t##s##x##c a, \
104 t##s##x##c b) \
105 { \
106 return (t##s##x##c) vqsubq_##i (a, b); \
107 } \
108 \
109 static_always_inline t##s##x##c t##s##x##c##_blend ( \
110 t##s##x##c dst, t##s##x##c src, u##s##x##c mask) \
111 { \
112 return (t##s##x##c) vbslq_##i (mask, src, dst); \
113 }
Adrian Oanca22ac59b2018-02-23 16:27:41 +0100114
Sirshak Das536953d2018-06-26 13:08:46 -0500115foreach_neon_vec128i foreach_neon_vec128u
116
117#undef _
118/* *INDENT-ON* */
119
Sirshak Dasafc725a2018-07-27 01:13:33 -0500120static_always_inline u16x8
121u16x8_byte_swap (u16x8 v)
122{
Sirshak Das6da42de2018-08-22 12:02:04 +0800123 return (u16x8) vrev16q_u8 ((u8x16) v);
Sirshak Dasafc725a2018-07-27 01:13:33 -0500124}
125
Damjan Mariondd648aa2020-03-12 11:56:00 +0100126static_always_inline u32x4
127u32x4_byte_swap (u32x4 v)
128{
Lijian.Zhang9ad8a262020-04-27 10:46:06 +0800129 return (u32x4) vrev32q_u8 ((u8x16) v);
Damjan Mariondd648aa2020-03-12 11:56:00 +0100130}
131
Sirshak Dasafc725a2018-07-27 01:13:33 -0500132static_always_inline u8x16
133u8x16_shuffle (u8x16 v, u8x16 m)
134{
135 return (u8x16) vqtbl1q_u8 (v, m);
136}
137
Sirshak Das61f325d2018-08-03 11:24:51 -0500138static_always_inline u32x4
139u32x4_hadd (u32x4 v1, u32x4 v2)
140{
141 return (u32x4) vpaddq_u32 (v1, v2);
142}
143
Sirshak Das8e5d5db2018-08-22 14:04:33 +0800144static_always_inline u64x2
Damjan Marion90d05bc2020-08-31 17:18:26 +0200145u64x2_from_u32x4 (u32x4 v)
Sirshak Das8e5d5db2018-08-22 14:04:33 +0800146{
147 return vmovl_u32 (vget_low_u32 (v));
148}
149
Sirshak Das759226e2018-08-22 08:46:52 +0800150static_always_inline u64x2
Damjan Marion90d05bc2020-08-31 17:18:26 +0200151u64x2_from_u32x4_high (u32x4 v)
Sirshak Das759226e2018-08-22 08:46:52 +0800152{
Sirshak Das4f611172018-10-09 11:28:44 -0500153 return vmovl_high_u32 (v);
Sirshak Das759226e2018-08-22 08:46:52 +0800154}
155
Lijian Zhanga22ba882018-10-25 18:50:33 +0800156/* Creates a mask made up of the MSB of each byte of the source vector */
157static_always_inline u16
158u8x16_msb_mask (u8x16 v)
159{
160 int8x16_t shift =
161 { -7, -6, -5, -4, -3, -2, -1, 0, -7, -6, -5, -4, -3, -2, -1, 0 };
162 /* v --> [0x80, 0x7F, 0xF0, 0xAF, 0xF0, 0x00, 0xF2, 0x00, ... ] */
163 uint8x16_t x = vshlq_u8 (vandq_u8 (v, vdupq_n_u8 (0x80)), shift);
164 /* after (v & 0x80) >> shift,
165 * x --> [0x01, 0x00, 0x04, 0x08, 0x10, 0x00, 0x40, 0x00, ... ] */
166 uint64x2_t x64 = vpaddlq_u32 (vpaddlq_u16 (vpaddlq_u8 (x)));
167 /* after merge, x64 --> [0x5D, 0x.. ] */
168 return (u16) (vgetq_lane_u64 (x64, 0) + (vgetq_lane_u64 (x64, 1) << 8));
169}
170
Lijian.Zhang107aa832019-05-29 17:08:47 +0800171static_always_inline u64x2
172u64x2_gather (void *p0, void *p1)
173{
174 u64x2 r = vdupq_n_u64 (*(u64 *) p0);
175 r = vsetq_lane_u64 (*(u64 *) p1, r, 1);
176 return r;
177}
178
179static_always_inline u32x4
180u32x4_gather (void *p0, void *p1, void *p2, void *p3)
181{
182 u32x4 r = vdupq_n_u32 (*(u32 *) p0);
183 r = vsetq_lane_u32 (*(u32 *) p1, r, 1);
184 r = vsetq_lane_u32 (*(u32 *) p2, r, 2);
185 r = vsetq_lane_u32 (*(u32 *) p3, r, 3);
186 return r;
187}
188
Lijian.Zhang0f984512019-05-29 17:13:42 +0800189static_always_inline void
190u64x2_scatter (u64x2 r, void *p0, void *p1)
191{
192 *(u64 *) p0 = vgetq_lane_u64 (r, 0);
193 *(u64 *) p1 = vgetq_lane_u64 (r, 1);
194}
195
196static_always_inline void
197u32x4_scatter (u32x4 r, void *p0, void *p1, void *p2, void *p3)
198{
199 *(u32 *) p0 = vgetq_lane_u32 (r, 0);
200 *(u32 *) p1 = vgetq_lane_u32 (r, 1);
201 *(u32 *) p2 = vgetq_lane_u32 (r, 2);
202 *(u32 *) p3 = vgetq_lane_u32 (r, 3);
203}
204
Damjan Marion776644e2020-01-31 10:24:07 +0100205static_always_inline u32
206u32x4_min_scalar (u32x4 v)
207{
208 return vminvq_u32 (v);
209}
210
Damjan Marion4339c362020-02-18 15:12:07 +0100211#define u8x16_word_shift_left(x,n) vextq_u8(u8x16_splat (0), x, 16 - n)
212#define u8x16_word_shift_right(x,n) vextq_u8(x, u8x16_splat (0), n)
Damjan Marion93723742020-02-12 20:27:46 +0100213
Damjan Marionaa63bc62021-11-08 11:18:30 +0000214always_inline u32x4
215u32x4_interleave_hi (u32x4 a, u32x4 b)
216{
217 return (u32x4) vzip2q_u32 (a, b);
218}
219
220always_inline u32x4
221u32x4_interleave_lo (u32x4 a, u32x4 b)
222{
223 return (u32x4) vzip1q_u32 (a, b);
224}
225
Damjan Marion622b5ce2020-02-12 10:59:14 +0100226static_always_inline u8x16
227u8x16_reflect (u8x16 v)
228{
229 u8x16 mask = {
230 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0
231 };
232 return (u8x16) vqtbl1q_u8 (v, mask);
233}
234
Damjan Marionf75defa2020-02-13 18:14:06 +0100235static_always_inline u8x16
236u8x16_xor3 (u8x16 a, u8x16 b, u8x16 c)
237{
238#if __GNUC__ == 8 && __ARM_FEATURE_SHA3 == 1
239 u8x16 r;
240__asm__ ("eor3 %0.16b,%1.16b,%2.16b,%3.16b": "=w" (r): "0" (a), "w" (b), "w" (c):);
241 return r;
242#endif
243 return a ^ b ^ c;
244}
245
Lijian Zhanga22ba882018-10-25 18:50:33 +0800246#define CLIB_HAVE_VEC128_MSB_MASK
247
Sirshak Das536953d2018-06-26 13:08:46 -0500248#define CLIB_HAVE_VEC128_UNALIGNED_LOAD_STORE
249#define CLIB_VEC128_SPLAT_DEFINED
Christophe Fontaine33e81952016-12-19 14:41:52 +0100250#endif /* included_vector_neon_h */
251
252/*
253 * fd.io coding-style-patch-verification: ON
254 *
255 * Local Variables:
256 * eval: (c-set-style "gnu")
257 * End:
258 */