Christophe Fontaine | 33e8195 | 2016-12-19 14:41:52 +0100 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (c) 2015 Cisco and/or its affiliates. |
| 3 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 4 | * you may not use this file except in compliance with the License. |
| 5 | * You may obtain a copy of the License at: |
| 6 | * |
| 7 | * http://www.apache.org/licenses/LICENSE-2.0 |
| 8 | * |
| 9 | * Unless required by applicable law or agreed to in writing, software |
| 10 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 11 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 12 | * See the License for the specific language governing permissions and |
| 13 | * limitations under the License. |
| 14 | */ |
| 15 | |
| 16 | #ifndef included_vector_neon_h |
| 17 | #define included_vector_neon_h |
| 18 | #include <arm_neon.h> |
| 19 | |
Lijian Zhang | f5942d5 | 2018-10-31 13:35:20 +0800 | [diff] [blame] | 20 | /* Dummy. Aid making uniform macros */ |
| 21 | #define vreinterpretq_u8_u8(a) a |
Lijian.Zhang | e6a47cf | 2019-03-12 18:32:39 +0800 | [diff] [blame] | 22 | /* Implement the missing intrinsics to make uniform macros */ |
| 23 | #define vminvq_u64(x) \ |
| 24 | ({ \ |
| 25 | u64 x0 = vgetq_lane_u64(x, 0); \ |
| 26 | u64 x1 = vgetq_lane_u64(x, 1); \ |
| 27 | x0 < x1 ? x0 : x1; \ |
| 28 | }) |
Christophe Fontaine | 33e8195 | 2016-12-19 14:41:52 +0100 | [diff] [blame] | 29 | |
Gabriel Ganne | b81831d | 2017-12-05 17:33:37 +0100 | [diff] [blame] | 30 | /* Converts all ones/zeros compare mask to bitmap. */ |
| 31 | always_inline u32 |
Lijian Zhang | a22ba88 | 2018-10-25 18:50:33 +0800 | [diff] [blame] | 32 | u8x16_compare_byte_mask (u8x16 v) |
Gabriel Ganne | b81831d | 2017-12-05 17:33:37 +0100 | [diff] [blame] | 33 | { |
Lijian Zhang | a22ba88 | 2018-10-25 18:50:33 +0800 | [diff] [blame] | 34 | uint8x16_t mask = { 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, |
| 35 | 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80 |
| 36 | }; |
| 37 | /* v --> [0xFF, 0x00, 0xFF, 0xFF, 0xFF, 0x00, 0xFF, 0x00, ... ] */ |
| 38 | uint8x16_t x = vandq_u8 (v, mask); |
| 39 | /* after v & mask, |
| 40 | * x --> [0x01, 0x00, 0x04, 0x08, 0x10, 0x00, 0x40, 0x00, ... ] */ |
| 41 | uint64x2_t x64 = vpaddlq_u32 (vpaddlq_u16 (vpaddlq_u8 (x))); |
| 42 | /* after merge, x64 --> [0x5D, 0x.. ] */ |
| 43 | return (u32) (vgetq_lane_u64 (x64, 0) + (vgetq_lane_u64 (x64, 1) << 8)); |
Gabriel Ganne | b81831d | 2017-12-05 17:33:37 +0100 | [diff] [blame] | 44 | } |
Christophe Fontaine | 33e8195 | 2016-12-19 14:41:52 +0100 | [diff] [blame] | 45 | |
Sirshak Das | 536953d | 2018-06-26 13:08:46 -0500 | [diff] [blame] | 46 | /* *INDENT-OFF* */ |
| 47 | #define foreach_neon_vec128i \ |
| 48 | _(i,8,16,s8) _(i,16,8,s16) _(i,32,4,s32) _(i,64,2,s64) |
| 49 | #define foreach_neon_vec128u \ |
| 50 | _(u,8,16,u8) _(u,16,8,u16) _(u,32,4,u32) _(u,64,2,u64) |
| 51 | #define foreach_neon_vec128f \ |
| 52 | _(f,32,4,f32) _(f,64,2,f64) |
Adrian Oanca | 22ac59b | 2018-02-23 16:27:41 +0100 | [diff] [blame] | 53 | |
Lijian.Zhang | cec484f | 2021-06-25 23:42:21 +0800 | [diff] [blame] | 54 | #define _(t, s, c, i) \ |
| 55 | static_always_inline t##s##x##c t##s##x##c##_splat (t##s x) \ |
| 56 | { \ |
| 57 | return (t##s##x##c) vdupq_n_##i (x); \ |
| 58 | } \ |
| 59 | \ |
| 60 | static_always_inline t##s##x##c t##s##x##c##_load_unaligned (void *p) \ |
| 61 | { \ |
| 62 | return (t##s##x##c) vld1q_##i (p); \ |
| 63 | } \ |
| 64 | \ |
| 65 | static_always_inline void t##s##x##c##_store_unaligned (t##s##x##c v, \ |
| 66 | void *p) \ |
| 67 | { \ |
| 68 | vst1q_##i (p, v); \ |
| 69 | } \ |
| 70 | \ |
| 71 | static_always_inline int t##s##x##c##_is_all_zero (t##s##x##c x) \ |
| 72 | { \ |
| 73 | return !!(vminvq_u##s (vceqq_##i (vdupq_n_##i (0), x))); \ |
| 74 | } \ |
| 75 | \ |
| 76 | static_always_inline int t##s##x##c##_is_equal (t##s##x##c a, t##s##x##c b) \ |
| 77 | { \ |
| 78 | return !!(vminvq_u##s (vceqq_##i (a, b))); \ |
| 79 | } \ |
| 80 | static_always_inline int t##s##x##c##_is_all_equal (t##s##x##c v, t##s x) \ |
| 81 | { \ |
| 82 | return t##s##x##c##_is_equal (v, t##s##x##c##_splat (x)); \ |
| 83 | }; \ |
| 84 | \ |
| 85 | static_always_inline u32 t##s##x##c##_zero_byte_mask (t##s##x##c x) \ |
| 86 | { \ |
| 87 | uint8x16_t v = vreinterpretq_u8_u##s (vceqq_##i (vdupq_n_##i (0), x)); \ |
| 88 | return u8x16_compare_byte_mask (v); \ |
| 89 | } \ |
| 90 | \ |
Lijian.Zhang | cec484f | 2021-06-25 23:42:21 +0800 | [diff] [blame] | 91 | static_always_inline t##s##x##c t##s##x##c##_add_saturate (t##s##x##c a, \ |
| 92 | t##s##x##c b) \ |
| 93 | { \ |
| 94 | return (t##s##x##c) vqaddq_##i (a, b); \ |
| 95 | } \ |
| 96 | \ |
| 97 | static_always_inline t##s##x##c t##s##x##c##_sub_saturate (t##s##x##c a, \ |
| 98 | t##s##x##c b) \ |
| 99 | { \ |
| 100 | return (t##s##x##c) vqsubq_##i (a, b); \ |
| 101 | } \ |
| 102 | \ |
| 103 | static_always_inline t##s##x##c t##s##x##c##_blend ( \ |
| 104 | t##s##x##c dst, t##s##x##c src, u##s##x##c mask) \ |
| 105 | { \ |
| 106 | return (t##s##x##c) vbslq_##i (mask, src, dst); \ |
| 107 | } |
Adrian Oanca | 22ac59b | 2018-02-23 16:27:41 +0100 | [diff] [blame] | 108 | |
Sirshak Das | 536953d | 2018-06-26 13:08:46 -0500 | [diff] [blame] | 109 | foreach_neon_vec128i foreach_neon_vec128u |
| 110 | |
| 111 | #undef _ |
| 112 | /* *INDENT-ON* */ |
| 113 | |
Sirshak Das | afc725a | 2018-07-27 01:13:33 -0500 | [diff] [blame] | 114 | static_always_inline u16x8 |
| 115 | u16x8_byte_swap (u16x8 v) |
| 116 | { |
Sirshak Das | 6da42de | 2018-08-22 12:02:04 +0800 | [diff] [blame] | 117 | return (u16x8) vrev16q_u8 ((u8x16) v); |
Sirshak Das | afc725a | 2018-07-27 01:13:33 -0500 | [diff] [blame] | 118 | } |
| 119 | |
Damjan Marion | dd648aa | 2020-03-12 11:56:00 +0100 | [diff] [blame] | 120 | static_always_inline u32x4 |
| 121 | u32x4_byte_swap (u32x4 v) |
| 122 | { |
Lijian.Zhang | 9ad8a26 | 2020-04-27 10:46:06 +0800 | [diff] [blame] | 123 | return (u32x4) vrev32q_u8 ((u8x16) v); |
Damjan Marion | dd648aa | 2020-03-12 11:56:00 +0100 | [diff] [blame] | 124 | } |
| 125 | |
Sirshak Das | 61f325d | 2018-08-03 11:24:51 -0500 | [diff] [blame] | 126 | static_always_inline u32x4 |
| 127 | u32x4_hadd (u32x4 v1, u32x4 v2) |
| 128 | { |
| 129 | return (u32x4) vpaddq_u32 (v1, v2); |
| 130 | } |
| 131 | |
Sirshak Das | 8e5d5db | 2018-08-22 14:04:33 +0800 | [diff] [blame] | 132 | static_always_inline u64x2 |
Damjan Marion | 90d05bc | 2020-08-31 17:18:26 +0200 | [diff] [blame] | 133 | u64x2_from_u32x4 (u32x4 v) |
Sirshak Das | 8e5d5db | 2018-08-22 14:04:33 +0800 | [diff] [blame] | 134 | { |
| 135 | return vmovl_u32 (vget_low_u32 (v)); |
| 136 | } |
| 137 | |
Sirshak Das | 759226e | 2018-08-22 08:46:52 +0800 | [diff] [blame] | 138 | static_always_inline u64x2 |
Damjan Marion | 90d05bc | 2020-08-31 17:18:26 +0200 | [diff] [blame] | 139 | u64x2_from_u32x4_high (u32x4 v) |
Sirshak Das | 759226e | 2018-08-22 08:46:52 +0800 | [diff] [blame] | 140 | { |
Sirshak Das | 4f61117 | 2018-10-09 11:28:44 -0500 | [diff] [blame] | 141 | return vmovl_high_u32 (v); |
Sirshak Das | 759226e | 2018-08-22 08:46:52 +0800 | [diff] [blame] | 142 | } |
| 143 | |
Lijian Zhang | a22ba88 | 2018-10-25 18:50:33 +0800 | [diff] [blame] | 144 | /* Creates a mask made up of the MSB of each byte of the source vector */ |
| 145 | static_always_inline u16 |
| 146 | u8x16_msb_mask (u8x16 v) |
| 147 | { |
| 148 | int8x16_t shift = |
| 149 | { -7, -6, -5, -4, -3, -2, -1, 0, -7, -6, -5, -4, -3, -2, -1, 0 }; |
| 150 | /* v --> [0x80, 0x7F, 0xF0, 0xAF, 0xF0, 0x00, 0xF2, 0x00, ... ] */ |
| 151 | uint8x16_t x = vshlq_u8 (vandq_u8 (v, vdupq_n_u8 (0x80)), shift); |
| 152 | /* after (v & 0x80) >> shift, |
| 153 | * x --> [0x01, 0x00, 0x04, 0x08, 0x10, 0x00, 0x40, 0x00, ... ] */ |
| 154 | uint64x2_t x64 = vpaddlq_u32 (vpaddlq_u16 (vpaddlq_u8 (x))); |
| 155 | /* after merge, x64 --> [0x5D, 0x.. ] */ |
| 156 | return (u16) (vgetq_lane_u64 (x64, 0) + (vgetq_lane_u64 (x64, 1) << 8)); |
| 157 | } |
| 158 | |
Lijian.Zhang | 107aa83 | 2019-05-29 17:08:47 +0800 | [diff] [blame] | 159 | static_always_inline u64x2 |
| 160 | u64x2_gather (void *p0, void *p1) |
| 161 | { |
| 162 | u64x2 r = vdupq_n_u64 (*(u64 *) p0); |
| 163 | r = vsetq_lane_u64 (*(u64 *) p1, r, 1); |
| 164 | return r; |
| 165 | } |
| 166 | |
| 167 | static_always_inline u32x4 |
| 168 | u32x4_gather (void *p0, void *p1, void *p2, void *p3) |
| 169 | { |
| 170 | u32x4 r = vdupq_n_u32 (*(u32 *) p0); |
| 171 | r = vsetq_lane_u32 (*(u32 *) p1, r, 1); |
| 172 | r = vsetq_lane_u32 (*(u32 *) p2, r, 2); |
| 173 | r = vsetq_lane_u32 (*(u32 *) p3, r, 3); |
| 174 | return r; |
| 175 | } |
| 176 | |
Lijian.Zhang | 0f98451 | 2019-05-29 17:13:42 +0800 | [diff] [blame] | 177 | static_always_inline void |
| 178 | u64x2_scatter (u64x2 r, void *p0, void *p1) |
| 179 | { |
| 180 | *(u64 *) p0 = vgetq_lane_u64 (r, 0); |
| 181 | *(u64 *) p1 = vgetq_lane_u64 (r, 1); |
| 182 | } |
| 183 | |
| 184 | static_always_inline void |
| 185 | u32x4_scatter (u32x4 r, void *p0, void *p1, void *p2, void *p3) |
| 186 | { |
| 187 | *(u32 *) p0 = vgetq_lane_u32 (r, 0); |
| 188 | *(u32 *) p1 = vgetq_lane_u32 (r, 1); |
| 189 | *(u32 *) p2 = vgetq_lane_u32 (r, 2); |
| 190 | *(u32 *) p3 = vgetq_lane_u32 (r, 3); |
| 191 | } |
| 192 | |
Damjan Marion | 776644e | 2020-01-31 10:24:07 +0100 | [diff] [blame] | 193 | static_always_inline u32 |
| 194 | u32x4_min_scalar (u32x4 v) |
| 195 | { |
| 196 | return vminvq_u32 (v); |
| 197 | } |
| 198 | |
Damjan Marion | 4339c36 | 2020-02-18 15:12:07 +0100 | [diff] [blame] | 199 | #define u8x16_word_shift_left(x,n) vextq_u8(u8x16_splat (0), x, 16 - n) |
| 200 | #define u8x16_word_shift_right(x,n) vextq_u8(x, u8x16_splat (0), n) |
Damjan Marion | 9372374 | 2020-02-12 20:27:46 +0100 | [diff] [blame] | 201 | |
Damjan Marion | aa63bc6 | 2021-11-08 11:18:30 +0000 | [diff] [blame] | 202 | always_inline u32x4 |
| 203 | u32x4_interleave_hi (u32x4 a, u32x4 b) |
| 204 | { |
| 205 | return (u32x4) vzip2q_u32 (a, b); |
| 206 | } |
| 207 | |
| 208 | always_inline u32x4 |
| 209 | u32x4_interleave_lo (u32x4 a, u32x4 b) |
| 210 | { |
| 211 | return (u32x4) vzip1q_u32 (a, b); |
| 212 | } |
| 213 | |
Damjan Marion | 622b5ce | 2020-02-12 10:59:14 +0100 | [diff] [blame] | 214 | static_always_inline u8x16 |
| 215 | u8x16_reflect (u8x16 v) |
| 216 | { |
| 217 | u8x16 mask = { |
| 218 | 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0 |
| 219 | }; |
| 220 | return (u8x16) vqtbl1q_u8 (v, mask); |
| 221 | } |
| 222 | |
Damjan Marion | f75defa | 2020-02-13 18:14:06 +0100 | [diff] [blame] | 223 | static_always_inline u8x16 |
| 224 | u8x16_xor3 (u8x16 a, u8x16 b, u8x16 c) |
| 225 | { |
| 226 | #if __GNUC__ == 8 && __ARM_FEATURE_SHA3 == 1 |
| 227 | u8x16 r; |
| 228 | __asm__ ("eor3 %0.16b,%1.16b,%2.16b,%3.16b": "=w" (r): "0" (a), "w" (b), "w" (c):); |
| 229 | return r; |
| 230 | #endif |
| 231 | return a ^ b ^ c; |
| 232 | } |
| 233 | |
Lijian Zhang | a22ba88 | 2018-10-25 18:50:33 +0800 | [diff] [blame] | 234 | #define CLIB_HAVE_VEC128_MSB_MASK |
| 235 | |
Sirshak Das | 536953d | 2018-06-26 13:08:46 -0500 | [diff] [blame] | 236 | #define CLIB_HAVE_VEC128_UNALIGNED_LOAD_STORE |
| 237 | #define CLIB_VEC128_SPLAT_DEFINED |
Christophe Fontaine | 33e8195 | 2016-12-19 14:41:52 +0100 | [diff] [blame] | 238 | #endif /* included_vector_neon_h */ |
| 239 | |
| 240 | /* |
| 241 | * fd.io coding-style-patch-verification: ON |
| 242 | * |
| 243 | * Local Variables: |
| 244 | * eval: (c-set-style "gnu") |
| 245 | * End: |
| 246 | */ |