blob: 3ac41bbb72c6a62e688b8cd5b8223ea77869d327 [file] [log] [blame]
Christophe Fontaine33e81952016-12-19 14:41:52 +01001/*
2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16#ifndef included_vector_neon_h
17#define included_vector_neon_h
18#include <arm_neon.h>
19
Christophe Fontaine33e81952016-12-19 14:41:52 +010020/* Arithmetic */
Christophe Fontaine33e81952016-12-19 14:41:52 +010021#define u16x8_sub_saturate(a,b) vsubq_u16(a,b)
22#define i16x8_sub_saturate(a,b) vsubq_s16(a,b)
23
Gabriel Ganneb81831d2017-12-05 17:33:37 +010024/* Converts all ones/zeros compare mask to bitmap. */
25always_inline u32
Lijian Zhanga22ba882018-10-25 18:50:33 +080026u8x16_compare_byte_mask (u8x16 v)
Gabriel Ganneb81831d2017-12-05 17:33:37 +010027{
Lijian Zhanga22ba882018-10-25 18:50:33 +080028 uint8x16_t mask = { 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80,
29 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80
30 };
31 /* v --> [0xFF, 0x00, 0xFF, 0xFF, 0xFF, 0x00, 0xFF, 0x00, ... ] */
32 uint8x16_t x = vandq_u8 (v, mask);
33 /* after v & mask,
34 * x --> [0x01, 0x00, 0x04, 0x08, 0x10, 0x00, 0x40, 0x00, ... ] */
35 uint64x2_t x64 = vpaddlq_u32 (vpaddlq_u16 (vpaddlq_u8 (x)));
36 /* after merge, x64 --> [0x5D, 0x.. ] */
37 return (u32) (vgetq_lane_u64 (x64, 0) + (vgetq_lane_u64 (x64, 1) << 8));
Gabriel Ganneb81831d2017-12-05 17:33:37 +010038}
Christophe Fontaine33e81952016-12-19 14:41:52 +010039
40always_inline u32
41u16x8_zero_byte_mask (u16x8 input)
42{
43 u8x16 vall_one = vdupq_n_u8 (0x0);
44 u8x16 res_values = { 0x01, 0x02, 0x04, 0x08,
45 0x10, 0x20, 0x40, 0x80,
46 0x01, 0x02, 0x04, 0x08,
47 0x10, 0x20, 0x40, 0x80
48 };
49
50 /* input --> [0x80, 0x40, 0x01, 0xf0, ... ] */
51 u8x16 test_result =
52 vreinterpretq_u8_u16 (vceqq_u16 (input, vreinterpretq_u16_u8 (vall_one)));
53 u8x16 before_merge = vminq_u8 (test_result, res_values);
54 /*before_merge--> [0x80, 0x00, 0x00, 0x10, ... ] */
55 /* u8x16 --> [a,b,c,d, e,f,g,h, i,j,k,l, m,n,o,p] */
56 /* pair add until we have 2 uint64_t */
57 u16x8 merge1 = vpaddlq_u8 (before_merge);
58 /* u16x8--> [a+b,c+d, e+f,g+h, i+j,k+l, m+n,o+p] */
59 u32x4 merge2 = vpaddlq_u16 (merge1);
60 /* u32x4--> [a+b+c+d, e+f+g+h, i+j+k+l, m+n+o+p] */
61 u64x2 merge3 = vpaddlq_u32 (merge2);
62 /* u64x2--> [a+b+c+d+e+f+g+h, i+j+k+l+m+n+o+p] */
63 return (u32) (vgetq_lane_u64 (merge3, 1) << 8) + vgetq_lane_u64 (merge3, 0);
64}
65
Adrian Oanca22ac59b2018-02-23 16:27:41 +010066always_inline u32
67u8x16_zero_byte_mask (u8x16 input)
68{
69 return u16x8_zero_byte_mask ((u16x8) input);
70}
71
72always_inline u32
73u32x4_zero_byte_mask (u32x4 input)
74{
75 return u16x8_zero_byte_mask ((u16x8) input);
76}
77
78always_inline u32
79u64x2_zero_byte_mask (u64x2 input)
80{
81 return u16x8_zero_byte_mask ((u16x8) input);
82}
83
Sirshak Das536953d2018-06-26 13:08:46 -050084/* *INDENT-OFF* */
85#define foreach_neon_vec128i \
86 _(i,8,16,s8) _(i,16,8,s16) _(i,32,4,s32) _(i,64,2,s64)
87#define foreach_neon_vec128u \
88 _(u,8,16,u8) _(u,16,8,u16) _(u,32,4,u32) _(u,64,2,u64)
89#define foreach_neon_vec128f \
90 _(f,32,4,f32) _(f,64,2,f64)
Adrian Oanca22ac59b2018-02-23 16:27:41 +010091
Sirshak Das536953d2018-06-26 13:08:46 -050092#define _(t, s, c, i) \
93static_always_inline t##s##x##c \
94t##s##x##c##_splat (t##s x) \
95{ return (t##s##x##c) vdupq_n_##i (x); } \
96\
97static_always_inline t##s##x##c \
98t##s##x##c##_load_unaligned (void *p) \
99{ return (t##s##x##c) vld1q_##i (p); } \
100\
101static_always_inline void \
102t##s##x##c##_store_unaligned (t##s##x##c v, void *p) \
103{ vst1q_##i (p, v); } \
104\
105static_always_inline int \
106t##s##x##c##_is_all_zero (t##s##x##c x) \
107{ return !(vaddvq_##i (x)); } \
108\
109static_always_inline int \
110t##s##x##c##_is_equal (t##s##x##c a, t##s##x##c b) \
111{ return t##s##x##c##_is_all_zero (a ^ b); } \
112\
113static_always_inline int \
114t##s##x##c##_is_all_equal (t##s##x##c v, t##s x) \
115{ return t##s##x##c##_is_equal (v, t##s##x##c##_splat (x)); }; \
Adrian Oanca22ac59b2018-02-23 16:27:41 +0100116
Sirshak Das536953d2018-06-26 13:08:46 -0500117foreach_neon_vec128i foreach_neon_vec128u
118
119#undef _
120/* *INDENT-ON* */
121
Sirshak Dasafc725a2018-07-27 01:13:33 -0500122static_always_inline u16x8
123u16x8_byte_swap (u16x8 v)
124{
Sirshak Das6da42de2018-08-22 12:02:04 +0800125 return (u16x8) vrev16q_u8 ((u8x16) v);
Sirshak Dasafc725a2018-07-27 01:13:33 -0500126}
127
128static_always_inline u8x16
129u8x16_shuffle (u8x16 v, u8x16 m)
130{
131 return (u8x16) vqtbl1q_u8 (v, m);
132}
133
Sirshak Das61f325d2018-08-03 11:24:51 -0500134static_always_inline u32x4
135u32x4_hadd (u32x4 v1, u32x4 v2)
136{
137 return (u32x4) vpaddq_u32 (v1, v2);
138}
139
Sirshak Das8e5d5db2018-08-22 14:04:33 +0800140static_always_inline u64x2
141u32x4_extend_to_u64x2 (u32x4 v)
142{
143 return vmovl_u32 (vget_low_u32 (v));
144}
145
Sirshak Das759226e2018-08-22 08:46:52 +0800146static_always_inline u64x2
147u32x4_extend_to_u64x2_high (u32x4 v)
148{
Sirshak Das4f611172018-10-09 11:28:44 -0500149 return vmovl_high_u32 (v);
Sirshak Das759226e2018-08-22 08:46:52 +0800150}
151
Lijian Zhanga22ba882018-10-25 18:50:33 +0800152/* Creates a mask made up of the MSB of each byte of the source vector */
153static_always_inline u16
154u8x16_msb_mask (u8x16 v)
155{
156 int8x16_t shift =
157 { -7, -6, -5, -4, -3, -2, -1, 0, -7, -6, -5, -4, -3, -2, -1, 0 };
158 /* v --> [0x80, 0x7F, 0xF0, 0xAF, 0xF0, 0x00, 0xF2, 0x00, ... ] */
159 uint8x16_t x = vshlq_u8 (vandq_u8 (v, vdupq_n_u8 (0x80)), shift);
160 /* after (v & 0x80) >> shift,
161 * x --> [0x01, 0x00, 0x04, 0x08, 0x10, 0x00, 0x40, 0x00, ... ] */
162 uint64x2_t x64 = vpaddlq_u32 (vpaddlq_u16 (vpaddlq_u8 (x)));
163 /* after merge, x64 --> [0x5D, 0x.. ] */
164 return (u16) (vgetq_lane_u64 (x64, 0) + (vgetq_lane_u64 (x64, 1) << 8));
165}
166
167#define CLIB_HAVE_VEC128_MSB_MASK
168
Sirshak Das536953d2018-06-26 13:08:46 -0500169#define CLIB_HAVE_VEC128_UNALIGNED_LOAD_STORE
170#define CLIB_VEC128_SPLAT_DEFINED
Christophe Fontaine33e81952016-12-19 14:41:52 +0100171#endif /* included_vector_neon_h */
172
173/*
174 * fd.io coding-style-patch-verification: ON
175 *
176 * Local Variables:
177 * eval: (c-set-style "gnu")
178 * End:
179 */