Damjan Marion | f1213b8 | 2016-03-13 02:22:06 +0100 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (c) 2016 Cisco and/or its affiliates. |
| 3 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 4 | * you may not use this file except in compliance with the License. |
| 5 | * You may obtain a copy of the License at: |
| 6 | * |
| 7 | * http://www.apache.org/licenses/LICENSE-2.0 |
| 8 | * |
| 9 | * Unless required by applicable law or agreed to in writing, software |
| 10 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 11 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 12 | * See the License for the specific language governing permissions and |
| 13 | * limitations under the License. |
| 14 | */ |
| 15 | /*- |
| 16 | * BSD LICENSE |
| 17 | * |
| 18 | * Copyright(c) 2010-2014 Intel Corporation. All rights reserved. |
| 19 | * All rights reserved. |
| 20 | * |
| 21 | * Redistribution and use in source and binary forms, with or without |
| 22 | * modification, are permitted provided that the following conditions |
| 23 | * are met: |
| 24 | * |
| 25 | * * Redistributions of source code must retain the above copyright |
| 26 | * notice, this list of conditions and the following disclaimer. |
| 27 | * * Redistributions in binary form must reproduce the above copyright |
| 28 | * notice, this list of conditions and the following disclaimer in |
| 29 | * the documentation and/or other materials provided with the |
| 30 | * distribution. |
| 31 | * * Neither the name of Intel Corporation nor the names of its |
| 32 | * contributors may be used to endorse or promote products derived |
| 33 | * from this software without specific prior written permission. |
| 34 | * |
| 35 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
| 36 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
| 37 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
| 38 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
| 39 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
| 40 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
| 41 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
| 42 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
| 43 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
| 44 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
| 45 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 46 | */ |
| 47 | |
| 48 | #ifndef included_clib_memcpy_sse3_h |
| 49 | #define included_clib_memcpy_sse3_h |
| 50 | |
| 51 | #include <stdint.h> |
| 52 | #include <x86intrin.h> |
| 53 | |
Damjan Marion | e319de0 | 2016-10-21 19:30:42 +0200 | [diff] [blame] | 54 | typedef u8 u8x16u __attribute__ ((vector_size (16), aligned (1))); |
| 55 | typedef u8 u8x32u __attribute__ ((vector_size (32), aligned (1))); |
| 56 | |
Damjan Marion | f1213b8 | 2016-03-13 02:22:06 +0100 | [diff] [blame] | 57 | static inline void |
Dave Barach | c379999 | 2016-08-15 11:12:27 -0400 | [diff] [blame] | 58 | clib_mov16 (u8 * dst, const u8 * src) |
Damjan Marion | f1213b8 | 2016-03-13 02:22:06 +0100 | [diff] [blame] | 59 | { |
Damjan Marion | e319de0 | 2016-10-21 19:30:42 +0200 | [diff] [blame] | 60 | *(u8x16u *) dst = *(u8x16u *) src; |
Damjan Marion | f1213b8 | 2016-03-13 02:22:06 +0100 | [diff] [blame] | 61 | } |
| 62 | |
| 63 | static inline void |
Dave Barach | c379999 | 2016-08-15 11:12:27 -0400 | [diff] [blame] | 64 | clib_mov32 (u8 * dst, const u8 * src) |
Damjan Marion | f1213b8 | 2016-03-13 02:22:06 +0100 | [diff] [blame] | 65 | { |
Damjan Marion | e319de0 | 2016-10-21 19:30:42 +0200 | [diff] [blame] | 66 | *(u8x32u *) dst = *(u8x32u *) src; |
Damjan Marion | f1213b8 | 2016-03-13 02:22:06 +0100 | [diff] [blame] | 67 | } |
| 68 | |
| 69 | static inline void |
Dave Barach | c379999 | 2016-08-15 11:12:27 -0400 | [diff] [blame] | 70 | clib_mov64 (u8 * dst, const u8 * src) |
Damjan Marion | f1213b8 | 2016-03-13 02:22:06 +0100 | [diff] [blame] | 71 | { |
Dave Barach | c379999 | 2016-08-15 11:12:27 -0400 | [diff] [blame] | 72 | clib_mov32 ((u8 *) dst + 0 * 32, (const u8 *) src + 0 * 32); |
| 73 | clib_mov32 ((u8 *) dst + 1 * 32, (const u8 *) src + 1 * 32); |
Damjan Marion | f1213b8 | 2016-03-13 02:22:06 +0100 | [diff] [blame] | 74 | } |
| 75 | |
| 76 | static inline void |
Dave Barach | c379999 | 2016-08-15 11:12:27 -0400 | [diff] [blame] | 77 | clib_mov128 (u8 * dst, const u8 * src) |
Damjan Marion | f1213b8 | 2016-03-13 02:22:06 +0100 | [diff] [blame] | 78 | { |
Dave Barach | c379999 | 2016-08-15 11:12:27 -0400 | [diff] [blame] | 79 | clib_mov64 ((u8 *) dst + 0 * 64, (const u8 *) src + 0 * 64); |
| 80 | clib_mov64 ((u8 *) dst + 1 * 64, (const u8 *) src + 1 * 64); |
Damjan Marion | f1213b8 | 2016-03-13 02:22:06 +0100 | [diff] [blame] | 81 | } |
| 82 | |
| 83 | static inline void |
Dave Barach | c379999 | 2016-08-15 11:12:27 -0400 | [diff] [blame] | 84 | clib_mov256 (u8 * dst, const u8 * src) |
Damjan Marion | f1213b8 | 2016-03-13 02:22:06 +0100 | [diff] [blame] | 85 | { |
Dave Barach | c379999 | 2016-08-15 11:12:27 -0400 | [diff] [blame] | 86 | clib_mov128 ((u8 *) dst + 0 * 128, (const u8 *) src + 0 * 128); |
| 87 | clib_mov128 ((u8 *) dst + 1 * 128, (const u8 *) src + 1 * 128); |
Damjan Marion | f1213b8 | 2016-03-13 02:22:06 +0100 | [diff] [blame] | 88 | } |
| 89 | |
| 90 | /** |
| 91 | * Macro for copying unaligned block from one location to another with constant load offset, |
| 92 | * 47 bytes leftover maximum, |
| 93 | * locations should not overlap. |
| 94 | * Requirements: |
| 95 | * - Store is aligned |
| 96 | * - Load offset is <offset>, which must be immediate value within [1, 15] |
| 97 | * - For <src>, make sure <offset> bit backwards & <16 - offset> bit forwards are available for loading |
| 98 | * - <dst>, <src>, <len> must be variables |
| 99 | * - __m128i <xmm0> ~ <xmm8> must be pre-defined |
| 100 | */ |
| 101 | #define CLIB_MVUNALIGN_LEFT47_IMM(dst, src, len, offset) \ |
| 102 | ({ \ |
| 103 | int tmp; \ |
| 104 | while (len >= 128 + 16 - offset) { \ |
| 105 | xmm0 = _mm_loadu_si128((const __m128i *)((const u8 *)src - offset + 0 * 16)); \ |
| 106 | len -= 128; \ |
| 107 | xmm1 = _mm_loadu_si128((const __m128i *)((const u8 *)src - offset + 1 * 16)); \ |
| 108 | xmm2 = _mm_loadu_si128((const __m128i *)((const u8 *)src - offset + 2 * 16)); \ |
| 109 | xmm3 = _mm_loadu_si128((const __m128i *)((const u8 *)src - offset + 3 * 16)); \ |
| 110 | xmm4 = _mm_loadu_si128((const __m128i *)((const u8 *)src - offset + 4 * 16)); \ |
| 111 | xmm5 = _mm_loadu_si128((const __m128i *)((const u8 *)src - offset + 5 * 16)); \ |
| 112 | xmm6 = _mm_loadu_si128((const __m128i *)((const u8 *)src - offset + 6 * 16)); \ |
| 113 | xmm7 = _mm_loadu_si128((const __m128i *)((const u8 *)src - offset + 7 * 16)); \ |
| 114 | xmm8 = _mm_loadu_si128((const __m128i *)((const u8 *)src - offset + 8 * 16)); \ |
| 115 | src = (const u8 *)src + 128; \ |
| 116 | _mm_storeu_si128((__m128i *)((u8 *)dst + 0 * 16), _mm_alignr_epi8(xmm1, xmm0, offset)); \ |
| 117 | _mm_storeu_si128((__m128i *)((u8 *)dst + 1 * 16), _mm_alignr_epi8(xmm2, xmm1, offset)); \ |
| 118 | _mm_storeu_si128((__m128i *)((u8 *)dst + 2 * 16), _mm_alignr_epi8(xmm3, xmm2, offset)); \ |
| 119 | _mm_storeu_si128((__m128i *)((u8 *)dst + 3 * 16), _mm_alignr_epi8(xmm4, xmm3, offset)); \ |
| 120 | _mm_storeu_si128((__m128i *)((u8 *)dst + 4 * 16), _mm_alignr_epi8(xmm5, xmm4, offset)); \ |
| 121 | _mm_storeu_si128((__m128i *)((u8 *)dst + 5 * 16), _mm_alignr_epi8(xmm6, xmm5, offset)); \ |
| 122 | _mm_storeu_si128((__m128i *)((u8 *)dst + 6 * 16), _mm_alignr_epi8(xmm7, xmm6, offset)); \ |
| 123 | _mm_storeu_si128((__m128i *)((u8 *)dst + 7 * 16), _mm_alignr_epi8(xmm8, xmm7, offset)); \ |
| 124 | dst = (u8 *)dst + 128; \ |
| 125 | } \ |
| 126 | tmp = len; \ |
| 127 | len = ((len - 16 + offset) & 127) + 16 - offset; \ |
| 128 | tmp -= len; \ |
| 129 | src = (const u8 *)src + tmp; \ |
| 130 | dst = (u8 *)dst + tmp; \ |
| 131 | if (len >= 32 + 16 - offset) { \ |
| 132 | while (len >= 32 + 16 - offset) { \ |
| 133 | xmm0 = _mm_loadu_si128((const __m128i *)((const u8 *)src - offset + 0 * 16)); \ |
| 134 | len -= 32; \ |
| 135 | xmm1 = _mm_loadu_si128((const __m128i *)((const u8 *)src - offset + 1 * 16)); \ |
| 136 | xmm2 = _mm_loadu_si128((const __m128i *)((const u8 *)src - offset + 2 * 16)); \ |
| 137 | src = (const u8 *)src + 32; \ |
| 138 | _mm_storeu_si128((__m128i *)((u8 *)dst + 0 * 16), _mm_alignr_epi8(xmm1, xmm0, offset)); \ |
| 139 | _mm_storeu_si128((__m128i *)((u8 *)dst + 1 * 16), _mm_alignr_epi8(xmm2, xmm1, offset)); \ |
| 140 | dst = (u8 *)dst + 32; \ |
| 141 | } \ |
| 142 | tmp = len; \ |
| 143 | len = ((len - 16 + offset) & 31) + 16 - offset; \ |
| 144 | tmp -= len; \ |
| 145 | src = (const u8 *)src + tmp; \ |
| 146 | dst = (u8 *)dst + tmp; \ |
| 147 | } \ |
| 148 | }) |
| 149 | |
| 150 | /** |
| 151 | * Macro for copying unaligned block from one location to another, |
| 152 | * 47 bytes leftover maximum, |
| 153 | * locations should not overlap. |
| 154 | * Use switch here because the aligning instruction requires immediate value for shift count. |
| 155 | * Requirements: |
| 156 | * - Store is aligned |
| 157 | * - Load offset is <offset>, which must be within [1, 15] |
| 158 | * - For <src>, make sure <offset> bit backwards & <16 - offset> bit forwards are available for loading |
| 159 | * - <dst>, <src>, <len> must be variables |
| 160 | * - __m128i <xmm0> ~ <xmm8> used in CLIB_MVUNALIGN_LEFT47_IMM must be pre-defined |
| 161 | */ |
| 162 | #define CLIB_MVUNALIGN_LEFT47(dst, src, len, offset) \ |
| 163 | ({ \ |
| 164 | switch (offset) { \ |
| 165 | case 0x01: CLIB_MVUNALIGN_LEFT47_IMM(dst, src, n, 0x01); break; \ |
| 166 | case 0x02: CLIB_MVUNALIGN_LEFT47_IMM(dst, src, n, 0x02); break; \ |
| 167 | case 0x03: CLIB_MVUNALIGN_LEFT47_IMM(dst, src, n, 0x03); break; \ |
| 168 | case 0x04: CLIB_MVUNALIGN_LEFT47_IMM(dst, src, n, 0x04); break; \ |
| 169 | case 0x05: CLIB_MVUNALIGN_LEFT47_IMM(dst, src, n, 0x05); break; \ |
| 170 | case 0x06: CLIB_MVUNALIGN_LEFT47_IMM(dst, src, n, 0x06); break; \ |
| 171 | case 0x07: CLIB_MVUNALIGN_LEFT47_IMM(dst, src, n, 0x07); break; \ |
| 172 | case 0x08: CLIB_MVUNALIGN_LEFT47_IMM(dst, src, n, 0x08); break; \ |
| 173 | case 0x09: CLIB_MVUNALIGN_LEFT47_IMM(dst, src, n, 0x09); break; \ |
| 174 | case 0x0A: CLIB_MVUNALIGN_LEFT47_IMM(dst, src, n, 0x0A); break; \ |
| 175 | case 0x0B: CLIB_MVUNALIGN_LEFT47_IMM(dst, src, n, 0x0B); break; \ |
| 176 | case 0x0C: CLIB_MVUNALIGN_LEFT47_IMM(dst, src, n, 0x0C); break; \ |
| 177 | case 0x0D: CLIB_MVUNALIGN_LEFT47_IMM(dst, src, n, 0x0D); break; \ |
| 178 | case 0x0E: CLIB_MVUNALIGN_LEFT47_IMM(dst, src, n, 0x0E); break; \ |
| 179 | case 0x0F: CLIB_MVUNALIGN_LEFT47_IMM(dst, src, n, 0x0F); break; \ |
| 180 | default:; \ |
| 181 | } \ |
| 182 | }) |
| 183 | |
| 184 | static inline void * |
Dave Barach | c379999 | 2016-08-15 11:12:27 -0400 | [diff] [blame] | 185 | clib_memcpy (void *dst, const void *src, size_t n) |
Damjan Marion | f1213b8 | 2016-03-13 02:22:06 +0100 | [diff] [blame] | 186 | { |
Dave Barach | c379999 | 2016-08-15 11:12:27 -0400 | [diff] [blame] | 187 | __m128i xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8; |
| 188 | uword dstu = (uword) dst; |
| 189 | uword srcu = (uword) src; |
| 190 | void *ret = dst; |
| 191 | size_t dstofss; |
| 192 | size_t srcofs; |
Damjan Marion | f1213b8 | 2016-03-13 02:22:06 +0100 | [diff] [blame] | 193 | |
| 194 | /** |
| 195 | * Copy less than 16 bytes |
| 196 | */ |
Dave Barach | c379999 | 2016-08-15 11:12:27 -0400 | [diff] [blame] | 197 | if (n < 16) |
| 198 | { |
| 199 | if (n & 0x01) |
| 200 | { |
| 201 | *(u8 *) dstu = *(const u8 *) srcu; |
| 202 | srcu = (uword) ((const u8 *) srcu + 1); |
| 203 | dstu = (uword) ((u8 *) dstu + 1); |
Damjan Marion | f1213b8 | 2016-03-13 02:22:06 +0100 | [diff] [blame] | 204 | } |
Dave Barach | c379999 | 2016-08-15 11:12:27 -0400 | [diff] [blame] | 205 | if (n & 0x02) |
| 206 | { |
| 207 | *(u16 *) dstu = *(const u16 *) srcu; |
| 208 | srcu = (uword) ((const u16 *) srcu + 1); |
| 209 | dstu = (uword) ((u16 *) dstu + 1); |
| 210 | } |
| 211 | if (n & 0x04) |
| 212 | { |
| 213 | *(u32 *) dstu = *(const u32 *) srcu; |
| 214 | srcu = (uword) ((const u32 *) srcu + 1); |
| 215 | dstu = (uword) ((u32 *) dstu + 1); |
| 216 | } |
| 217 | if (n & 0x08) |
| 218 | { |
| 219 | *(u64 *) dstu = *(const u64 *) srcu; |
| 220 | } |
| 221 | return ret; |
| 222 | } |
Damjan Marion | f1213b8 | 2016-03-13 02:22:06 +0100 | [diff] [blame] | 223 | |
| 224 | /** |
| 225 | * Fast way when copy size doesn't exceed 512 bytes |
| 226 | */ |
Damjan Marion | f71ef1d | 2017-03-01 20:53:59 +0100 | [diff] [blame] | 227 | if (n == 16) |
| 228 | { |
| 229 | clib_mov16 ((u8 *) dst, (const u8 *) src); |
| 230 | return ret; |
| 231 | } |
Dave Barach | c379999 | 2016-08-15 11:12:27 -0400 | [diff] [blame] | 232 | if (n <= 32) |
| 233 | { |
| 234 | clib_mov16 ((u8 *) dst, (const u8 *) src); |
| 235 | clib_mov16 ((u8 *) dst - 16 + n, (const u8 *) src - 16 + n); |
| 236 | return ret; |
| 237 | } |
| 238 | if (n <= 48) |
| 239 | { |
| 240 | clib_mov32 ((u8 *) dst, (const u8 *) src); |
| 241 | clib_mov16 ((u8 *) dst - 16 + n, (const u8 *) src - 16 + n); |
| 242 | return ret; |
| 243 | } |
| 244 | if (n <= 64) |
| 245 | { |
| 246 | clib_mov32 ((u8 *) dst, (const u8 *) src); |
| 247 | clib_mov16 ((u8 *) dst + 32, (const u8 *) src + 32); |
| 248 | clib_mov16 ((u8 *) dst - 16 + n, (const u8 *) src - 16 + n); |
| 249 | return ret; |
| 250 | } |
| 251 | if (n <= 128) |
| 252 | { |
| 253 | goto COPY_BLOCK_128_BACK15; |
| 254 | } |
| 255 | if (n <= 512) |
| 256 | { |
| 257 | if (n >= 256) |
| 258 | { |
| 259 | n -= 256; |
| 260 | clib_mov128 ((u8 *) dst, (const u8 *) src); |
| 261 | clib_mov128 ((u8 *) dst + 128, (const u8 *) src + 128); |
| 262 | src = (const u8 *) src + 256; |
| 263 | dst = (u8 *) dst + 256; |
Damjan Marion | f1213b8 | 2016-03-13 02:22:06 +0100 | [diff] [blame] | 264 | } |
Dave Barach | c379999 | 2016-08-15 11:12:27 -0400 | [diff] [blame] | 265 | COPY_BLOCK_255_BACK15: |
| 266 | if (n >= 128) |
| 267 | { |
| 268 | n -= 128; |
| 269 | clib_mov128 ((u8 *) dst, (const u8 *) src); |
| 270 | src = (const u8 *) src + 128; |
| 271 | dst = (u8 *) dst + 128; |
Damjan Marion | f1213b8 | 2016-03-13 02:22:06 +0100 | [diff] [blame] | 272 | } |
Dave Barach | c379999 | 2016-08-15 11:12:27 -0400 | [diff] [blame] | 273 | COPY_BLOCK_128_BACK15: |
| 274 | if (n >= 64) |
| 275 | { |
| 276 | n -= 64; |
| 277 | clib_mov64 ((u8 *) dst, (const u8 *) src); |
| 278 | src = (const u8 *) src + 64; |
| 279 | dst = (u8 *) dst + 64; |
Damjan Marion | f1213b8 | 2016-03-13 02:22:06 +0100 | [diff] [blame] | 280 | } |
Dave Barach | c379999 | 2016-08-15 11:12:27 -0400 | [diff] [blame] | 281 | COPY_BLOCK_64_BACK15: |
| 282 | if (n >= 32) |
| 283 | { |
| 284 | n -= 32; |
| 285 | clib_mov32 ((u8 *) dst, (const u8 *) src); |
| 286 | src = (const u8 *) src + 32; |
| 287 | dst = (u8 *) dst + 32; |
Damjan Marion | f1213b8 | 2016-03-13 02:22:06 +0100 | [diff] [blame] | 288 | } |
Dave Barach | c379999 | 2016-08-15 11:12:27 -0400 | [diff] [blame] | 289 | if (n > 16) |
| 290 | { |
| 291 | clib_mov16 ((u8 *) dst, (const u8 *) src); |
| 292 | clib_mov16 ((u8 *) dst - 16 + n, (const u8 *) src - 16 + n); |
| 293 | return ret; |
Damjan Marion | f1213b8 | 2016-03-13 02:22:06 +0100 | [diff] [blame] | 294 | } |
Dave Barach | c379999 | 2016-08-15 11:12:27 -0400 | [diff] [blame] | 295 | if (n > 0) |
| 296 | { |
| 297 | clib_mov16 ((u8 *) dst - 16 + n, (const u8 *) src - 16 + n); |
| 298 | } |
| 299 | return ret; |
| 300 | } |
Damjan Marion | f1213b8 | 2016-03-13 02:22:06 +0100 | [diff] [blame] | 301 | |
| 302 | /** |
| 303 | * Make store aligned when copy size exceeds 512 bytes, |
| 304 | * and make sure the first 15 bytes are copied, because |
| 305 | * unaligned copy functions require up to 15 bytes |
| 306 | * backwards access. |
| 307 | */ |
Dave Barach | c379999 | 2016-08-15 11:12:27 -0400 | [diff] [blame] | 308 | dstofss = 16 - ((uword) dst & 0x0F) + 16; |
| 309 | n -= dstofss; |
| 310 | clib_mov32 ((u8 *) dst, (const u8 *) src); |
| 311 | src = (const u8 *) src + dstofss; |
| 312 | dst = (u8 *) dst + dstofss; |
| 313 | srcofs = ((uword) src & 0x0F); |
Damjan Marion | f1213b8 | 2016-03-13 02:22:06 +0100 | [diff] [blame] | 314 | |
| 315 | /** |
| 316 | * For aligned copy |
| 317 | */ |
Dave Barach | c379999 | 2016-08-15 11:12:27 -0400 | [diff] [blame] | 318 | if (srcofs == 0) |
| 319 | { |
Damjan Marion | f1213b8 | 2016-03-13 02:22:06 +0100 | [diff] [blame] | 320 | /** |
| 321 | * Copy 256-byte blocks |
| 322 | */ |
Dave Barach | c379999 | 2016-08-15 11:12:27 -0400 | [diff] [blame] | 323 | for (; n >= 256; n -= 256) |
| 324 | { |
| 325 | clib_mov256 ((u8 *) dst, (const u8 *) src); |
| 326 | dst = (u8 *) dst + 256; |
| 327 | src = (const u8 *) src + 256; |
| 328 | } |
Damjan Marion | f1213b8 | 2016-03-13 02:22:06 +0100 | [diff] [blame] | 329 | |
| 330 | /** |
| 331 | * Copy whatever left |
| 332 | */ |
Dave Barach | c379999 | 2016-08-15 11:12:27 -0400 | [diff] [blame] | 333 | goto COPY_BLOCK_255_BACK15; |
| 334 | } |
Damjan Marion | f1213b8 | 2016-03-13 02:22:06 +0100 | [diff] [blame] | 335 | |
| 336 | /** |
| 337 | * For copy with unaligned load |
| 338 | */ |
Dave Barach | c379999 | 2016-08-15 11:12:27 -0400 | [diff] [blame] | 339 | CLIB_MVUNALIGN_LEFT47 (dst, src, n, srcofs); |
Damjan Marion | f1213b8 | 2016-03-13 02:22:06 +0100 | [diff] [blame] | 340 | |
| 341 | /** |
| 342 | * Copy whatever left |
| 343 | */ |
Dave Barach | c379999 | 2016-08-15 11:12:27 -0400 | [diff] [blame] | 344 | goto COPY_BLOCK_64_BACK15; |
Damjan Marion | f1213b8 | 2016-03-13 02:22:06 +0100 | [diff] [blame] | 345 | } |
| 346 | |
| 347 | |
| 348 | #undef CLIB_MVUNALIGN_LEFT47_IMM |
| 349 | #undef CLIB_MVUNALIGN_LEFT47 |
| 350 | |
| 351 | #endif /* included_clib_memcpy_sse3_h */ |
| 352 | |
Dave Barach | c379999 | 2016-08-15 11:12:27 -0400 | [diff] [blame] | 353 | |
| 354 | /* |
| 355 | * fd.io coding-style-patch-verification: ON |
| 356 | * |
| 357 | * Local Variables: |
| 358 | * eval: (c-set-style "gnu") |
| 359 | * End: |
| 360 | */ |