blob: fc72d7dac35bc3e9bdad153a46f940392e9e9fdc [file] [log] [blame]
Damjan Marionef0bac72021-04-22 18:08:28 +02001/* SPDX-License-Identifier: Apache-2.0
2 * Copyright(c) 2021 Cisco Systems, Inc.
Ed Warnickecb9cada2015-12-08 15:45:58 -07003 */
Ed Warnickecb9cada2015-12-08 15:45:58 -07004
Damjan Mariond154a172021-07-13 21:12:41 +02005#ifndef included_vector_mask_compare_h
6#define included_vector_mask_compare_h
Damjan Marionef0bac72021-04-22 18:08:28 +02007#include <vppinfra/clib.h>
Damjan Marione3e35552021-05-06 17:34:49 +02008#include <vppinfra/memcpy.h>
Damjan Marionef0bac72021-04-22 18:08:28 +02009
10static_always_inline u64
Dmitry Valter31502502024-03-06 22:43:27 +000011clib_mask_compare_u16_x64 (u16 v, u16 *a)
Damjan Marionef0bac72021-04-22 18:08:28 +020012{
13 u64 mask = 0;
Damjan Marion3295ddf2021-04-28 19:31:22 +020014#if defined(CLIB_HAVE_VEC512)
Damjan Marionef0bac72021-04-22 18:08:28 +020015 u16x32 v32 = u16x32_splat (v);
16 u16x32u *av = (u16x32u *) a;
17 mask = ((u64) u16x32_is_equal_mask (av[0], v32) |
18 (u64) u16x32_is_equal_mask (av[1], v32) << 32);
19#elif defined(CLIB_HAVE_VEC256)
20 u16x16 v16 = u16x16_splat (v);
21 u16x16u *av = (u16x16u *) a;
22 i8x32 x;
23
Damjan Marion7d14aad2021-05-05 19:31:41 +020024 x = i8x32_pack (v16 == av[0], v16 == av[1]);
Damjan Marionef0bac72021-04-22 18:08:28 +020025 mask = i8x32_msb_mask ((i8x32) u64x4_permute (x, 0, 2, 1, 3));
Damjan Marion7d14aad2021-05-05 19:31:41 +020026 x = i8x32_pack (v16 == av[2], v16 == av[3]);
Damjan Marionef0bac72021-04-22 18:08:28 +020027 mask |= (u64) i8x32_msb_mask ((i8x32) u64x4_permute (x, 0, 2, 1, 3)) << 32;
Damjan Marion3295ddf2021-04-28 19:31:22 +020028#elif defined(CLIB_HAVE_VEC128) && defined(__ARM_NEON)
Damjan Marion7d14aad2021-05-05 19:31:41 +020029 u16x8 v8 = u16x8_splat (v);
Damjan Marion3295ddf2021-04-28 19:31:22 +020030 u16x8 m = { 1, 2, 4, 8, 16, 32, 64, 128 };
31 u16x8u *av = (u16x8u *) a;
32
Damjan Marion7d14aad2021-05-05 19:31:41 +020033 /* compare each u16 elemment with v8, result gives 0xffff in each element
Damjan Marion3295ddf2021-04-28 19:31:22 +020034 of the resulting vector if comparison result is true.
35 Bitwise AND with m will give us one bit set for true result and offset
36 of that bit represend element index. Finally vaddvq_u16() gives us sum
37 of all elements of the vector which will give us u8 bitmap. */
38
Damjan Marione3e35552021-05-06 17:34:49 +020039 for (int i = 0; i < 8; i++)
40 mask |= (u64) vaddvq_u16 ((av[i] == v8) & m) << (i * 8);
41
Damjan Marionef0bac72021-04-22 18:08:28 +020042#elif defined(CLIB_HAVE_VEC128) && defined(CLIB_HAVE_VEC128_MSB_MASK)
Damjan Marion7d14aad2021-05-05 19:31:41 +020043 u16x8 v8 = u16x8_splat (v);
Damjan Marionef0bac72021-04-22 18:08:28 +020044 u16x8u *av = (u16x8u *) a;
Damjan Marion7d14aad2021-05-05 19:31:41 +020045 mask = ((u64) i8x16_msb_mask (i8x16_pack (v8 == av[0], v8 == av[1])) |
46 (u64) i8x16_msb_mask (i8x16_pack (v8 == av[2], v8 == av[3])) << 16 |
47 (u64) i8x16_msb_mask (i8x16_pack (v8 == av[4], v8 == av[5])) << 32 |
48 (u64) i8x16_msb_mask (i8x16_pack (v8 == av[6], v8 == av[7])) << 48);
Damjan Marionef0bac72021-04-22 18:08:28 +020049#else
Dmitry Valter31502502024-03-06 22:43:27 +000050 for (int i = 0; i < 64; i++)
51 if (a[i] == v)
52 mask |= 1ULL << i;
53#endif
54 return mask;
55}
56
57static_always_inline u64
58clib_mask_compare_u16_x64_n (u16 v, u16 *a, u32 n_elts)
59{
60 u64 mask = 0;
61 CLIB_UNUSED (u64 data_mask) = pow2_mask (n_elts);
62#if defined(CLIB_HAVE_VEC512)
63 u16x32 v32 = u16x32_splat (v);
64 u16x32u *av = (u16x32u *) a;
65 mask = ((u64) u16x32_is_equal_mask (
66 u16x32_mask_load_zero (&av[0], data_mask), v32) |
67 (u64) u16x32_is_equal_mask (
68 u16x32_mask_load_zero (&av[1], data_mask >> 32), v32)
69 << 32);
70#elif defined(CLIB_HAVE_VEC256) && defined(CLIB_HAVE_VEC256_MASK_LOAD_STORE)
71 u16x16 v16 = u16x16_splat (v);
72 u16x16u *av = (u16x16u *) a;
73 i8x32 x;
74
75 x = i8x32_pack (v16 == u16x16_mask_load_zero (&av[0], data_mask),
76 v16 == u16x16_mask_load_zero (&av[1], data_mask >> 16));
77 mask = i8x32_msb_mask ((i8x32) u64x4_permute (x, 0, 2, 1, 3));
78 x = i8x32_pack (v16 == u16x16_mask_load_zero (&av[2], data_mask >> 32),
79 v16 == u16x16_mask_load_zero (&av[3], data_mask >> 48));
80 mask |= (u64) i8x32_msb_mask ((i8x32) u64x4_permute (x, 0, 2, 1, 3)) << 32;
81#else
Damjan Marione3e35552021-05-06 17:34:49 +020082 for (int i = 0; i < n_elts; i++)
Damjan Marionef0bac72021-04-22 18:08:28 +020083 if (a[i] == v)
84 mask |= 1ULL << i;
Ed Warnickecb9cada2015-12-08 15:45:58 -070085#endif
Damjan Marionef0bac72021-04-22 18:08:28 +020086 return mask;
87}
Ed Warnickecb9cada2015-12-08 15:45:58 -070088
Damjan Marione3e35552021-05-06 17:34:49 +020089/** \brief Compare 16-bit elemments with provied value and return bitmap
90
91 @param v value to compare elements with
92 @param a array of u16 elements
93 @param mask array of u64 where reuslting mask will be stored
94 @param n_elts number of elements in the array
95 @return none
96*/
97
98static_always_inline void
99clib_mask_compare_u16 (u16 v, u16 *a, u64 *mask, u32 n_elts)
100{
101 while (n_elts >= 64)
102 {
Dmitry Valter31502502024-03-06 22:43:27 +0000103 mask++[0] = clib_mask_compare_u16_x64 (v, a);
Damjan Marione3e35552021-05-06 17:34:49 +0200104 n_elts -= 64;
105 a += 64;
106 }
107
108 if (PREDICT_TRUE (n_elts == 0))
109 return;
110
Dmitry Valter31502502024-03-06 22:43:27 +0000111 mask[0] = clib_mask_compare_u16_x64_n (v, a, n_elts) & pow2_mask (n_elts);
Damjan Marione3e35552021-05-06 17:34:49 +0200112}
113
114static_always_inline u64
Dmitry Valter31502502024-03-06 22:43:27 +0000115clib_mask_compare_u32_x64 (u32 v, u32 *a)
Damjan Marione3e35552021-05-06 17:34:49 +0200116{
117 u64 mask = 0;
118#if defined(CLIB_HAVE_VEC512)
119 u32x16 v16 = u32x16_splat (v);
120 u32x16u *av = (u32x16u *) a;
121 mask = ((u64) u32x16_is_equal_mask (av[0], v16) |
122 (u64) u32x16_is_equal_mask (av[1], v16) << 16 |
123 (u64) u32x16_is_equal_mask (av[2], v16) << 32 |
124 (u64) u32x16_is_equal_mask (av[3], v16) << 48);
125#elif defined(CLIB_HAVE_VEC256)
126 u32x8 v8 = u32x8_splat (v);
127 u32x8u *av = (u32x8u *) a;
128 u32x8 m = { 0, 4, 1, 5, 2, 6, 3, 7 };
129 i8x32 c;
130
131 c = i8x32_pack (i16x16_pack ((i32x8) (v8 == av[0]), (i32x8) (v8 == av[1])),
132 i16x16_pack ((i32x8) (v8 == av[2]), (i32x8) (v8 == av[3])));
133 mask = i8x32_msb_mask ((i8x32) u32x8_permute ((u32x8) c, m));
134
135 c = i8x32_pack (i16x16_pack ((i32x8) (v8 == av[4]), (i32x8) (v8 == av[5])),
136 i16x16_pack ((i32x8) (v8 == av[6]), (i32x8) (v8 == av[7])));
137 mask |= (u64) i8x32_msb_mask ((i8x32) u32x8_permute ((u32x8) c, m)) << 32;
138
139#elif defined(CLIB_HAVE_VEC128) && defined(__ARM_NEON)
140 u32x4 v4 = u32x4_splat (v);
141 u32x4 m = { 1, 2, 4, 8 };
142 u32x4u *av = (u32x4u *) a;
143
144 /* compare each u32 elemment with v4, result gives -1 in each element
145 of the resulting vector if comparison result is true.
146 Bitwise AND with m will give us one bit set for true result and offset
147 of that bit represend element index. Finally vaddvq_u32() gives us sum
148 of all elements of the vector which will give us u8 bitmap. */
149
150 for (int i = 0; i < 16; i++)
151 mask |= (u64) vaddvq_u32 ((av[i] == v4) & m) << (i * 4);
152
153#elif defined(CLIB_HAVE_VEC128) && defined(CLIB_HAVE_VEC128_MSB_MASK)
154 u32x4 v4 = u32x4_splat (v);
155 u32x4u *av = (u32x4u *) a;
156
157 for (int i = 0; i < 4; i++)
158 {
159 i16x8 p1 = i16x8_pack (v4 == av[0], v4 == av[1]);
160 i16x8 p2 = i16x8_pack (v4 == av[2], v4 == av[3]);
161 mask |= (u64) i8x16_msb_mask (i8x16_pack (p1, p2)) << (i * 16);
162 av += 4;
163 }
164
165#else
Dmitry Valter31502502024-03-06 22:43:27 +0000166 for (int i = 0; i < 64; i++)
167 if (a[i] == v)
168 mask |= 1ULL << i;
169#endif
170 return mask;
171}
172
173static_always_inline u64
174clib_mask_compare_u32_x64_n (u32 v, u32 *a, u32 n_elts)
175{
176 u64 mask = 0;
177 CLIB_UNUSED (u64 data_mask) = pow2_mask (n_elts);
178#if defined(CLIB_HAVE_VEC512)
179 u32x16 v16 = u32x16_splat (v);
180 u32x16u *av = (u32x16u *) a;
181 mask = ((u64) u32x16_is_equal_mask (
182 u32x16_mask_load_zero (&av[0], data_mask), v16) |
183 (u64) u32x16_is_equal_mask (
184 u32x16_mask_load_zero (&av[1], data_mask >> 16), v16)
185 << 16 |
186 (u64) u32x16_is_equal_mask (
187 u32x16_mask_load_zero (&av[2], data_mask >> 32), v16)
188 << 32 |
189 (u64) u32x16_is_equal_mask (
190 u32x16_mask_load_zero (&av[3], data_mask >> 48), v16)
191 << 48);
192#elif defined(CLIB_HAVE_VEC256) && defined(CLIB_HAVE_VEC256_MASK_LOAD_STORE)
193 u32x8 v8 = u32x8_splat (v);
194 u32x8u *av = (u32x8u *) a;
195 u32x8 m = { 0, 4, 1, 5, 2, 6, 3, 7 };
196 i8x32 c;
197
198 c = i8x32_pack (
199 i16x16_pack (
200 (i32x8) (v8 == u32x8_mask_load_zero (&av[0], data_mask)),
201 (i32x8) (v8 == u32x8_mask_load_zero (&av[1], data_mask >> 8))),
202 i16x16_pack (
203 (i32x8) (v8 == u32x8_mask_load_zero (&av[2], data_mask >> 16)),
204 (i32x8) (v8 == u32x8_mask_load_zero (&av[3], data_mask >> 24))));
205 mask = i8x32_msb_mask ((i8x32) u32x8_permute ((u32x8) c, m));
206
207 c = i8x32_pack (
208 i16x16_pack (
209 (i32x8) (v8 == u32x8_mask_load_zero (&av[4], data_mask >> 32)),
210 (i32x8) (v8 == u32x8_mask_load_zero (&av[5], data_mask >> 40))),
211 i16x16_pack (
212 (i32x8) (v8 == u32x8_mask_load_zero (&av[6], data_mask >> 48)),
213 (i32x8) (v8 == u32x8_mask_load_zero (&av[7], data_mask >> 56))));
214 mask |= (u64) i8x32_msb_mask ((i8x32) u32x8_permute ((u32x8) c, m)) << 32;
215 mask |= (u64) i8x32_msb_mask ((i8x32) u32x8_permute ((u32x8) c, m)) << 32;
216#else
Damjan Marione3e35552021-05-06 17:34:49 +0200217 for (int i = 0; i < n_elts; i++)
218 if (a[i] == v)
219 mask |= 1ULL << i;
220#endif
221 return mask;
222}
223
224/** \brief Compare 32-bit elemments with provied value and return bitmap
225
226 @param v value to compare elements with
227 @param a array of u32 elements
228 @param mask array of u64 where reuslting mask will be stored
229 @param n_elts number of elements in the array
230 @return none
231*/
232
233static_always_inline void
234clib_mask_compare_u32 (u32 v, u32 *a, u64 *bitmap, u32 n_elts)
235{
236 while (n_elts >= 64)
237 {
Dmitry Valter31502502024-03-06 22:43:27 +0000238 bitmap++[0] = clib_mask_compare_u32_x64 (v, a);
Damjan Marione3e35552021-05-06 17:34:49 +0200239 n_elts -= 64;
240 a += 64;
241 }
242
243 if (PREDICT_TRUE (n_elts == 0))
244 return;
245
Dmitry Valter31502502024-03-06 22:43:27 +0000246 bitmap[0] = clib_mask_compare_u32_x64_n (v, a, n_elts) & pow2_mask (n_elts);
Damjan Marione3e35552021-05-06 17:34:49 +0200247}
248
Mohsin Kazmi556b5a32022-01-24 23:19:29 +0000249static_always_inline u64
Dmitry Valter31502502024-03-06 22:43:27 +0000250clib_mask_compare_u64_x64 (u64 v, u64 *a)
Mohsin Kazmi556b5a32022-01-24 23:19:29 +0000251{
252 u64 mask = 0;
253#if defined(CLIB_HAVE_VEC512)
254 u64x8 v8 = u64x8_splat (v);
255 u64x8u *av = (u64x8u *) a;
256 mask = ((u64) u64x8_is_equal_mask (av[0], v8) |
257 (u64) u64x8_is_equal_mask (av[1], v8) << 8 |
258 (u64) u64x8_is_equal_mask (av[2], v8) << 16 |
259 (u64) u64x8_is_equal_mask (av[3], v8) << 24 |
260 (u64) u64x8_is_equal_mask (av[4], v8) << 32 |
261 (u64) u64x8_is_equal_mask (av[5], v8) << 40 |
262 (u64) u64x8_is_equal_mask (av[6], v8) << 48 |
263 (u64) u64x8_is_equal_mask (av[7], v8) << 56);
264
265#elif defined(CLIB_HAVE_VEC256) && defined(__BMI2__)
266 u64x4 v4 = u64x4_splat (v);
267 u64x4u *av = (u64x4u *) a;
268
269 for (int i = 0; i < 16; i += 2)
270 {
271 u64 l = u8x32_msb_mask (v4 == av[i]);
272 u64 h = u8x32_msb_mask (v4 == av[i + 1]);
273 mask |= _pext_u64 (l | h << 32, 0x0101010101010101) << (i * 4);
274 }
275#else
Dmitry Valter31502502024-03-06 22:43:27 +0000276 for (int i = 0; i < 64; i++)
277 if (a[i] == v)
278 mask |= 1ULL << i;
279#endif
280 return mask;
281}
282
283static_always_inline u64
284clib_mask_compare_u64_x64_n (u64 v, u64 *a, u32 n_elts)
285{
286 u64 mask = 0;
287 CLIB_UNUSED (u64 data_mask) = pow2_mask (n_elts);
288#if defined(CLIB_HAVE_VEC512)
289 u64x8 v8 = u64x8_splat (v);
290 u64x8u *av = (u64x8u *) a;
291 mask =
292 ((u64) u64x8_is_equal_mask (u64x8_mask_load_zero (&av[0], data_mask), v8) |
293 (u64) u64x8_is_equal_mask (u64x8_mask_load_zero (&av[1], data_mask >> 8),
294 v8)
295 << 8 |
296 (u64) u64x8_is_equal_mask (u64x8_mask_load_zero (&av[2], data_mask >> 16),
297 v8)
298 << 16 |
299 (u64) u64x8_is_equal_mask (u64x8_mask_load_zero (&av[3], data_mask >> 24),
300 v8)
301 << 24 |
302 (u64) u64x8_is_equal_mask (u64x8_mask_load_zero (&av[4], data_mask >> 32),
303 v8)
304 << 32 |
305 (u64) u64x8_is_equal_mask (u64x8_mask_load_zero (&av[5], data_mask >> 40),
306 v8)
307 << 40 |
308 (u64) u64x8_is_equal_mask (u64x8_mask_load_zero (&av[6], data_mask >> 48),
309 v8)
310 << 48 |
311 (u64) u64x8_is_equal_mask (u64x8_mask_load_zero (&av[7], data_mask >> 56),
312 v8)
313 << 56);
314
315#elif defined(CLIB_HAVE_VEC256) && defined(__BMI2__) && \
316 defined(CLIB_HAVE_VEC256_MASK_LOAD_STORE)
317 u64x4 v4 = u64x4_splat (v);
318 u64x4u *av = (u64x4u *) a;
319
320 for (int i = 0; i < 16; i += 2)
321 {
322 u64 l = u8x32_msb_mask (v4 == u64x4_mask_load_zero (&av[i], data_mask));
323 u64 h = u8x32_msb_mask (
324 v4 == u64x4_mask_load_zero (&av[i + 1], data_mask >> 4));
325 mask |= _pext_u64 (l | h << 32, 0x0101010101010101) << (i * 4);
326 data_mask >>= 8;
327 }
328#else
Mohsin Kazmi556b5a32022-01-24 23:19:29 +0000329 for (int i = 0; i < n_elts; i++)
330 if (a[i] == v)
331 mask |= 1ULL << i;
332#endif
333 return mask;
334}
335
336/** \brief Compare 64-bit elemments with provied value and return bitmap
337
338 @param v value to compare elements with
339 @param a array of u64 elements
340 @param mask array of u64 where reuslting mask will be stored
341 @param n_elts number of elements in the array
342 @return none
343*/
344
345static_always_inline void
346clib_mask_compare_u64 (u64 v, u64 *a, u64 *bitmap, u32 n_elts)
347{
348 while (n_elts >= 64)
349 {
Dmitry Valter31502502024-03-06 22:43:27 +0000350 bitmap++[0] = clib_mask_compare_u64_x64 (v, a);
Mohsin Kazmi556b5a32022-01-24 23:19:29 +0000351 n_elts -= 64;
352 a += 64;
353 }
354
355 if (PREDICT_TRUE (n_elts == 0))
356 return;
357
Dmitry Valter31502502024-03-06 22:43:27 +0000358 bitmap[0] = clib_mask_compare_u64_x64_n (v, a, n_elts) & pow2_mask (n_elts);
Mohsin Kazmi556b5a32022-01-24 23:19:29 +0000359}
360
Ed Warnickecb9cada2015-12-08 15:45:58 -0700361#endif