Christophe Fontaine | 33e8195 | 2016-12-19 14:41:52 +0100 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (c) 2015 Cisco and/or its affiliates. |
| 3 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 4 | * you may not use this file except in compliance with the License. |
| 5 | * You may obtain a copy of the License at: |
| 6 | * |
| 7 | * http://www.apache.org/licenses/LICENSE-2.0 |
| 8 | * |
| 9 | * Unless required by applicable law or agreed to in writing, software |
| 10 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 11 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 12 | * See the License for the specific language governing permissions and |
| 13 | * limitations under the License. |
| 14 | */ |
| 15 | |
| 16 | #ifndef included_vector_neon_h |
| 17 | #define included_vector_neon_h |
| 18 | #include <arm_neon.h> |
| 19 | |
Christophe Fontaine | 33e8195 | 2016-12-19 14:41:52 +0100 | [diff] [blame] | 20 | /* Arithmetic */ |
Christophe Fontaine | 33e8195 | 2016-12-19 14:41:52 +0100 | [diff] [blame] | 21 | #define u16x8_sub_saturate(a,b) vsubq_u16(a,b) |
| 22 | #define i16x8_sub_saturate(a,b) vsubq_s16(a,b) |
| 23 | |
Gabriel Ganne | b81831d | 2017-12-05 17:33:37 +0100 | [diff] [blame] | 24 | /* Converts all ones/zeros compare mask to bitmap. */ |
| 25 | always_inline u32 |
| 26 | u8x16_compare_byte_mask (u8x16 x) |
| 27 | { |
Adrian Oanca | 0b8792f | 2018-02-15 15:44:51 +0100 | [diff] [blame] | 28 | uint8x16_t mask_shift = |
| 29 | { -7, -6, -5, -4, -3, -2, -1, 0, -7, -6, -5, -4, -3, -2, -1, 0 }; |
| 30 | uint8x16_t mask_and = vdupq_n_u8 (0x80); |
| 31 | x = vandq_u8 (x, mask_and); |
| 32 | x = vshlq_u8 (x, vreinterpretq_s8_u8 (mask_shift)); |
| 33 | x = vpaddq_u8 (x, x); |
| 34 | x = vpaddq_u8 (x, x); |
| 35 | x = vpaddq_u8 (x, x); |
| 36 | return vgetq_lane_u8 (x, 0) | (vgetq_lane_u8 (x, 1) << 8); |
Gabriel Ganne | b81831d | 2017-12-05 17:33:37 +0100 | [diff] [blame] | 37 | } |
Christophe Fontaine | 33e8195 | 2016-12-19 14:41:52 +0100 | [diff] [blame] | 38 | |
| 39 | always_inline u32 |
| 40 | u16x8_zero_byte_mask (u16x8 input) |
| 41 | { |
| 42 | u8x16 vall_one = vdupq_n_u8 (0x0); |
| 43 | u8x16 res_values = { 0x01, 0x02, 0x04, 0x08, |
| 44 | 0x10, 0x20, 0x40, 0x80, |
| 45 | 0x01, 0x02, 0x04, 0x08, |
| 46 | 0x10, 0x20, 0x40, 0x80 |
| 47 | }; |
| 48 | |
| 49 | /* input --> [0x80, 0x40, 0x01, 0xf0, ... ] */ |
| 50 | u8x16 test_result = |
| 51 | vreinterpretq_u8_u16 (vceqq_u16 (input, vreinterpretq_u16_u8 (vall_one))); |
| 52 | u8x16 before_merge = vminq_u8 (test_result, res_values); |
| 53 | /*before_merge--> [0x80, 0x00, 0x00, 0x10, ... ] */ |
| 54 | /* u8x16 --> [a,b,c,d, e,f,g,h, i,j,k,l, m,n,o,p] */ |
| 55 | /* pair add until we have 2 uint64_t */ |
| 56 | u16x8 merge1 = vpaddlq_u8 (before_merge); |
| 57 | /* u16x8--> [a+b,c+d, e+f,g+h, i+j,k+l, m+n,o+p] */ |
| 58 | u32x4 merge2 = vpaddlq_u16 (merge1); |
| 59 | /* u32x4--> [a+b+c+d, e+f+g+h, i+j+k+l, m+n+o+p] */ |
| 60 | u64x2 merge3 = vpaddlq_u32 (merge2); |
| 61 | /* u64x2--> [a+b+c+d+e+f+g+h, i+j+k+l+m+n+o+p] */ |
| 62 | return (u32) (vgetq_lane_u64 (merge3, 1) << 8) + vgetq_lane_u64 (merge3, 0); |
| 63 | } |
| 64 | |
Adrian Oanca | 22ac59b | 2018-02-23 16:27:41 +0100 | [diff] [blame] | 65 | always_inline u32 |
| 66 | u8x16_zero_byte_mask (u8x16 input) |
| 67 | { |
| 68 | return u16x8_zero_byte_mask ((u16x8) input); |
| 69 | } |
| 70 | |
| 71 | always_inline u32 |
| 72 | u32x4_zero_byte_mask (u32x4 input) |
| 73 | { |
| 74 | return u16x8_zero_byte_mask ((u16x8) input); |
| 75 | } |
| 76 | |
| 77 | always_inline u32 |
| 78 | u64x2_zero_byte_mask (u64x2 input) |
| 79 | { |
| 80 | return u16x8_zero_byte_mask ((u16x8) input); |
| 81 | } |
| 82 | |
Sirshak Das | 536953d | 2018-06-26 13:08:46 -0500 | [diff] [blame] | 83 | /* *INDENT-OFF* */ |
| 84 | #define foreach_neon_vec128i \ |
| 85 | _(i,8,16,s8) _(i,16,8,s16) _(i,32,4,s32) _(i,64,2,s64) |
| 86 | #define foreach_neon_vec128u \ |
| 87 | _(u,8,16,u8) _(u,16,8,u16) _(u,32,4,u32) _(u,64,2,u64) |
| 88 | #define foreach_neon_vec128f \ |
| 89 | _(f,32,4,f32) _(f,64,2,f64) |
Adrian Oanca | 22ac59b | 2018-02-23 16:27:41 +0100 | [diff] [blame] | 90 | |
Sirshak Das | 536953d | 2018-06-26 13:08:46 -0500 | [diff] [blame] | 91 | #define _(t, s, c, i) \ |
| 92 | static_always_inline t##s##x##c \ |
| 93 | t##s##x##c##_splat (t##s x) \ |
| 94 | { return (t##s##x##c) vdupq_n_##i (x); } \ |
| 95 | \ |
| 96 | static_always_inline t##s##x##c \ |
| 97 | t##s##x##c##_load_unaligned (void *p) \ |
| 98 | { return (t##s##x##c) vld1q_##i (p); } \ |
| 99 | \ |
| 100 | static_always_inline void \ |
| 101 | t##s##x##c##_store_unaligned (t##s##x##c v, void *p) \ |
| 102 | { vst1q_##i (p, v); } \ |
| 103 | \ |
| 104 | static_always_inline int \ |
| 105 | t##s##x##c##_is_all_zero (t##s##x##c x) \ |
| 106 | { return !(vaddvq_##i (x)); } \ |
| 107 | \ |
| 108 | static_always_inline int \ |
| 109 | t##s##x##c##_is_equal (t##s##x##c a, t##s##x##c b) \ |
| 110 | { return t##s##x##c##_is_all_zero (a ^ b); } \ |
| 111 | \ |
| 112 | static_always_inline int \ |
| 113 | t##s##x##c##_is_all_equal (t##s##x##c v, t##s x) \ |
| 114 | { return t##s##x##c##_is_equal (v, t##s##x##c##_splat (x)); }; \ |
Adrian Oanca | 22ac59b | 2018-02-23 16:27:41 +0100 | [diff] [blame] | 115 | |
Sirshak Das | 536953d | 2018-06-26 13:08:46 -0500 | [diff] [blame] | 116 | foreach_neon_vec128i foreach_neon_vec128u |
| 117 | |
| 118 | #undef _ |
| 119 | /* *INDENT-ON* */ |
| 120 | |
Sirshak Das | afc725a | 2018-07-27 01:13:33 -0500 | [diff] [blame] | 121 | static_always_inline u16x8 |
| 122 | u16x8_byte_swap (u16x8 v) |
| 123 | { |
Sirshak Das | 6da42de | 2018-08-22 12:02:04 +0800 | [diff] [blame] | 124 | return (u16x8) vrev16q_u8 ((u8x16) v); |
Sirshak Das | afc725a | 2018-07-27 01:13:33 -0500 | [diff] [blame] | 125 | } |
| 126 | |
| 127 | static_always_inline u8x16 |
| 128 | u8x16_shuffle (u8x16 v, u8x16 m) |
| 129 | { |
| 130 | return (u8x16) vqtbl1q_u8 (v, m); |
| 131 | } |
| 132 | |
Sirshak Das | 61f325d | 2018-08-03 11:24:51 -0500 | [diff] [blame] | 133 | static_always_inline u32x4 |
| 134 | u32x4_hadd (u32x4 v1, u32x4 v2) |
| 135 | { |
| 136 | return (u32x4) vpaddq_u32 (v1, v2); |
| 137 | } |
| 138 | |
Sirshak Das | 8e5d5db | 2018-08-22 14:04:33 +0800 | [diff] [blame] | 139 | static_always_inline u64x2 |
| 140 | u32x4_extend_to_u64x2 (u32x4 v) |
| 141 | { |
| 142 | return vmovl_u32 (vget_low_u32 (v)); |
| 143 | } |
| 144 | |
Sirshak Das | 759226e | 2018-08-22 08:46:52 +0800 | [diff] [blame] | 145 | static_always_inline u64x2 |
| 146 | u32x4_extend_to_u64x2_high (u32x4 v) |
| 147 | { |
| 148 | return vmovl_high_u32 (vrev64q_u32 (v)); |
| 149 | } |
| 150 | |
Sirshak Das | 536953d | 2018-06-26 13:08:46 -0500 | [diff] [blame] | 151 | #define CLIB_HAVE_VEC128_UNALIGNED_LOAD_STORE |
| 152 | #define CLIB_VEC128_SPLAT_DEFINED |
Christophe Fontaine | 33e8195 | 2016-12-19 14:41:52 +0100 | [diff] [blame] | 153 | #endif /* included_vector_neon_h */ |
| 154 | |
| 155 | /* |
| 156 | * fd.io coding-style-patch-verification: ON |
| 157 | * |
| 158 | * Local Variables: |
| 159 | * eval: (c-set-style "gnu") |
| 160 | * End: |
| 161 | */ |