Damjan Marion | 856d062 | 2021-04-21 21:11:35 +0200 | [diff] [blame] | 1 | /* SPDX-License-Identifier: Apache-2.0 |
| 2 | * Copyright(c) 2021 Cisco Systems, Inc. |
| 3 | */ |
| 4 | |
| 5 | #include <vppinfra/clib.h> |
| 6 | #ifndef included_memcpy_h |
| 7 | #define included_memcpy_h |
| 8 | |
Damjan Marion | 83b2f5e | 2021-04-27 11:00:54 +0200 | [diff] [blame] | 9 | #ifndef __COVERITY__ |
| 10 | |
Damjan Marion | 856d062 | 2021-04-21 21:11:35 +0200 | [diff] [blame] | 11 | static_always_inline void |
| 12 | clib_memcpy_u32_x4 (u32 *dst, u32 *src) |
| 13 | { |
| 14 | #if defined(CLIB_HAVE_VEC128) |
| 15 | u32x4_store_unaligned (u32x4_load_unaligned (src), dst); |
| 16 | #else |
| 17 | clib_memcpy_fast (dst, src, 4 * sizeof (u32)); |
| 18 | #endif |
| 19 | } |
| 20 | static_always_inline void |
| 21 | clib_memcpy_u32_x8 (u32 *dst, u32 *src) |
| 22 | { |
| 23 | #if defined(CLIB_HAVE_VEC256) |
| 24 | u32x8_store_unaligned (u32x8_load_unaligned (src), dst); |
| 25 | #else |
| 26 | clib_memcpy_u32_x4 (dst, src); |
| 27 | clib_memcpy_u32_x4 (dst + 4, src + 4); |
| 28 | #endif |
| 29 | } |
| 30 | |
| 31 | static_always_inline void |
| 32 | clib_memcpy_u32_x16 (u32 *dst, u32 *src) |
| 33 | { |
| 34 | #if defined(CLIB_HAVE_VEC512) |
| 35 | u32x16_store_unaligned (u32x16_load_unaligned (src), dst); |
| 36 | #else |
| 37 | clib_memcpy_u32_x8 (dst, src); |
| 38 | clib_memcpy_u32_x8 (dst + 8, src + 8); |
| 39 | #endif |
| 40 | } |
| 41 | |
| 42 | static_always_inline void |
| 43 | clib_memcpy_u32 (u32 *dst, u32 *src, u32 n_left) |
| 44 | { |
| 45 | #if defined(CLIB_HAVE_VEC128) |
| 46 | if (COMPILE_TIME_CONST (n_left)) |
| 47 | { |
| 48 | /* for n_left defined as compile-time constant we should prevent compiler |
| 49 | * to use more expensive mask load/store for common cases where smaller |
| 50 | * register load/store exists */ |
| 51 | switch (n_left) |
| 52 | { |
| 53 | case 4: |
| 54 | clib_memcpy_u32_x4 (dst, src); |
| 55 | return; |
| 56 | case 8: |
| 57 | clib_memcpy_u32_x8 (dst, src); |
| 58 | return; |
| 59 | case 12: |
| 60 | clib_memcpy_u32_x8 (dst, src); |
| 61 | clib_memcpy_u32_x4 (dst + 8, src + 8); |
| 62 | return; |
| 63 | case 16: |
| 64 | clib_memcpy_u32_x16 (dst, src); |
| 65 | return; |
| 66 | case 32: |
| 67 | clib_memcpy_u32_x16 (dst, src); |
| 68 | clib_memcpy_u32_x16 (dst + 16, src + 16); |
| 69 | return; |
| 70 | case 64: |
| 71 | clib_memcpy_u32_x16 (dst, src); |
| 72 | clib_memcpy_u32_x16 (dst + 16, src + 16); |
| 73 | clib_memcpy_u32_x16 (dst + 32, src + 32); |
| 74 | clib_memcpy_u32_x16 (dst + 48, src + 48); |
| 75 | return; |
| 76 | default: |
| 77 | break; |
| 78 | } |
| 79 | } |
| 80 | |
| 81 | #if defined(CLIB_HAVE_VEC512) |
| 82 | while (n_left >= 64) |
| 83 | { |
| 84 | clib_memcpy_u32_x16 (dst, src); |
| 85 | clib_memcpy_u32_x16 (dst + 16, src + 16); |
| 86 | clib_memcpy_u32_x16 (dst + 32, src + 32); |
| 87 | clib_memcpy_u32_x16 (dst + 48, src + 48); |
| 88 | dst += 64; |
| 89 | src += 64; |
| 90 | n_left -= 64; |
| 91 | } |
| 92 | #endif |
| 93 | |
| 94 | #if defined(CLIB_HAVE_VEC256) |
| 95 | while (n_left >= 32) |
| 96 | { |
| 97 | clib_memcpy_u32_x16 (dst, src); |
| 98 | clib_memcpy_u32_x16 (dst + 16, src + 16); |
| 99 | dst += 32; |
| 100 | src += 32; |
| 101 | n_left -= 32; |
| 102 | } |
| 103 | #endif |
| 104 | |
| 105 | while (n_left >= 16) |
| 106 | { |
| 107 | clib_memcpy_u32_x16 (dst, src); |
| 108 | dst += 16; |
| 109 | src += 16; |
| 110 | n_left -= 16; |
| 111 | } |
| 112 | |
| 113 | #if defined(CLIB_HAVE_VEC512_MASK_LOAD_STORE) |
| 114 | if (n_left) |
| 115 | { |
| 116 | u16 mask = pow2_mask (n_left); |
| 117 | u32x16_mask_store (u32x16_mask_load_zero (src, mask), dst, mask); |
| 118 | } |
| 119 | return; |
| 120 | #endif |
| 121 | |
| 122 | if (n_left >= 8) |
| 123 | { |
| 124 | clib_memcpy_u32_x8 (dst, src); |
| 125 | dst += 8; |
| 126 | src += 8; |
| 127 | n_left -= 8; |
| 128 | } |
| 129 | |
| 130 | #if defined(CLIB_HAVE_VEC256_MASK_LOAD_STORE) |
| 131 | if (n_left) |
| 132 | { |
| 133 | u8 mask = pow2_mask (n_left); |
| 134 | u32x8_mask_store (u32x8_mask_load_zero (src, mask), dst, mask); |
| 135 | } |
| 136 | return; |
| 137 | #endif |
| 138 | |
| 139 | if (n_left >= 4) |
| 140 | { |
| 141 | clib_memcpy_u32_x4 (dst, src); |
| 142 | dst += 4; |
| 143 | src += 4; |
| 144 | n_left -= 4; |
| 145 | } |
| 146 | #endif |
| 147 | |
| 148 | while (n_left) |
| 149 | { |
| 150 | dst[0] = src[0]; |
| 151 | dst += 1; |
| 152 | src += 1; |
| 153 | n_left -= 1; |
| 154 | } |
| 155 | } |
| 156 | |
Damjan Marion | 83b2f5e | 2021-04-27 11:00:54 +0200 | [diff] [blame] | 157 | #else /* __COVERITY__ */ |
| 158 | static_always_inline void |
| 159 | clib_memcpy_u32 (u32 *dst, u32 *src, u32 n_left) |
| 160 | { |
| 161 | memcpy (dst, src, n_left * sizeof (u32)); |
| 162 | } |
| 163 | #endif |
| 164 | |
Damjan Marion | 856d062 | 2021-04-21 21:11:35 +0200 | [diff] [blame] | 165 | #endif |