blob: 5346e91381784c5f0ef513c0515a96cffae2262a [file] [log] [blame]
Damjan Marionf1213b82016-03-13 02:22:06 +01001/*
2 * Copyright (c) 2016 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15/*-
16 * BSD LICENSE
17 *
18 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
19 * All rights reserved.
20 *
21 * Redistribution and use in source and binary forms, with or without
22 * modification, are permitted provided that the following conditions
23 * are met:
24 *
25 * * Redistributions of source code must retain the above copyright
26 * notice, this list of conditions and the following disclaimer.
27 * * Redistributions in binary form must reproduce the above copyright
28 * notice, this list of conditions and the following disclaimer in
29 * the documentation and/or other materials provided with the
30 * distribution.
31 * * Neither the name of Intel Corporation nor the names of its
32 * contributors may be used to endorse or promote products derived
33 * from this software without specific prior written permission.
34 *
35 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
36 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
37 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
38 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
39 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
40 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
41 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
42 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
43 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
44 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
45 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
46 */
47
48#ifndef included_clib_memcpy_sse3_h
49#define included_clib_memcpy_sse3_h
50
51#include <stdint.h>
52#include <x86intrin.h>
53
54static inline void
Dave Barachc3799992016-08-15 11:12:27 -040055clib_mov16 (u8 * dst, const u8 * src)
Damjan Marionf1213b82016-03-13 02:22:06 +010056{
Damjan Marion31e59d92017-07-05 18:15:08 +020057 __m128i xmm0;
58
59 xmm0 = _mm_loadu_si128 ((const __m128i *) src);
60 _mm_storeu_si128 ((__m128i *) dst, xmm0);
Damjan Marionf1213b82016-03-13 02:22:06 +010061}
62
63static inline void
Dave Barachc3799992016-08-15 11:12:27 -040064clib_mov32 (u8 * dst, const u8 * src)
Damjan Marionf1213b82016-03-13 02:22:06 +010065{
Damjan Marion31e59d92017-07-05 18:15:08 +020066 clib_mov16 ((u8 *) dst + 0 * 16, (const u8 *) src + 0 * 16);
67 clib_mov16 ((u8 *) dst + 1 * 16, (const u8 *) src + 1 * 16);
Damjan Marionf1213b82016-03-13 02:22:06 +010068}
69
70static inline void
Dave Barachc3799992016-08-15 11:12:27 -040071clib_mov64 (u8 * dst, const u8 * src)
Damjan Marionf1213b82016-03-13 02:22:06 +010072{
Dave Barachc3799992016-08-15 11:12:27 -040073 clib_mov32 ((u8 *) dst + 0 * 32, (const u8 *) src + 0 * 32);
74 clib_mov32 ((u8 *) dst + 1 * 32, (const u8 *) src + 1 * 32);
Damjan Marionf1213b82016-03-13 02:22:06 +010075}
76
77static inline void
Dave Barachc3799992016-08-15 11:12:27 -040078clib_mov128 (u8 * dst, const u8 * src)
Damjan Marionf1213b82016-03-13 02:22:06 +010079{
Dave Barachc3799992016-08-15 11:12:27 -040080 clib_mov64 ((u8 *) dst + 0 * 64, (const u8 *) src + 0 * 64);
81 clib_mov64 ((u8 *) dst + 1 * 64, (const u8 *) src + 1 * 64);
Damjan Marionf1213b82016-03-13 02:22:06 +010082}
83
84static inline void
Dave Barachc3799992016-08-15 11:12:27 -040085clib_mov256 (u8 * dst, const u8 * src)
Damjan Marionf1213b82016-03-13 02:22:06 +010086{
Dave Barachc3799992016-08-15 11:12:27 -040087 clib_mov128 ((u8 *) dst + 0 * 128, (const u8 *) src + 0 * 128);
88 clib_mov128 ((u8 *) dst + 1 * 128, (const u8 *) src + 1 * 128);
Damjan Marionf1213b82016-03-13 02:22:06 +010089}
90
91/**
92 * Macro for copying unaligned block from one location to another with constant load offset,
93 * 47 bytes leftover maximum,
94 * locations should not overlap.
95 * Requirements:
96 * - Store is aligned
97 * - Load offset is <offset>, which must be immediate value within [1, 15]
98 * - For <src>, make sure <offset> bit backwards & <16 - offset> bit forwards are available for loading
99 * - <dst>, <src>, <len> must be variables
100 * - __m128i <xmm0> ~ <xmm8> must be pre-defined
101 */
102#define CLIB_MVUNALIGN_LEFT47_IMM(dst, src, len, offset) \
103({ \
104 int tmp; \
105 while (len >= 128 + 16 - offset) { \
106 xmm0 = _mm_loadu_si128((const __m128i *)((const u8 *)src - offset + 0 * 16)); \
107 len -= 128; \
108 xmm1 = _mm_loadu_si128((const __m128i *)((const u8 *)src - offset + 1 * 16)); \
109 xmm2 = _mm_loadu_si128((const __m128i *)((const u8 *)src - offset + 2 * 16)); \
110 xmm3 = _mm_loadu_si128((const __m128i *)((const u8 *)src - offset + 3 * 16)); \
111 xmm4 = _mm_loadu_si128((const __m128i *)((const u8 *)src - offset + 4 * 16)); \
112 xmm5 = _mm_loadu_si128((const __m128i *)((const u8 *)src - offset + 5 * 16)); \
113 xmm6 = _mm_loadu_si128((const __m128i *)((const u8 *)src - offset + 6 * 16)); \
114 xmm7 = _mm_loadu_si128((const __m128i *)((const u8 *)src - offset + 7 * 16)); \
115 xmm8 = _mm_loadu_si128((const __m128i *)((const u8 *)src - offset + 8 * 16)); \
116 src = (const u8 *)src + 128; \
117 _mm_storeu_si128((__m128i *)((u8 *)dst + 0 * 16), _mm_alignr_epi8(xmm1, xmm0, offset)); \
118 _mm_storeu_si128((__m128i *)((u8 *)dst + 1 * 16), _mm_alignr_epi8(xmm2, xmm1, offset)); \
119 _mm_storeu_si128((__m128i *)((u8 *)dst + 2 * 16), _mm_alignr_epi8(xmm3, xmm2, offset)); \
120 _mm_storeu_si128((__m128i *)((u8 *)dst + 3 * 16), _mm_alignr_epi8(xmm4, xmm3, offset)); \
121 _mm_storeu_si128((__m128i *)((u8 *)dst + 4 * 16), _mm_alignr_epi8(xmm5, xmm4, offset)); \
122 _mm_storeu_si128((__m128i *)((u8 *)dst + 5 * 16), _mm_alignr_epi8(xmm6, xmm5, offset)); \
123 _mm_storeu_si128((__m128i *)((u8 *)dst + 6 * 16), _mm_alignr_epi8(xmm7, xmm6, offset)); \
124 _mm_storeu_si128((__m128i *)((u8 *)dst + 7 * 16), _mm_alignr_epi8(xmm8, xmm7, offset)); \
125 dst = (u8 *)dst + 128; \
126 } \
127 tmp = len; \
128 len = ((len - 16 + offset) & 127) + 16 - offset; \
129 tmp -= len; \
130 src = (const u8 *)src + tmp; \
131 dst = (u8 *)dst + tmp; \
132 if (len >= 32 + 16 - offset) { \
133 while (len >= 32 + 16 - offset) { \
134 xmm0 = _mm_loadu_si128((const __m128i *)((const u8 *)src - offset + 0 * 16)); \
135 len -= 32; \
136 xmm1 = _mm_loadu_si128((const __m128i *)((const u8 *)src - offset + 1 * 16)); \
137 xmm2 = _mm_loadu_si128((const __m128i *)((const u8 *)src - offset + 2 * 16)); \
138 src = (const u8 *)src + 32; \
139 _mm_storeu_si128((__m128i *)((u8 *)dst + 0 * 16), _mm_alignr_epi8(xmm1, xmm0, offset)); \
140 _mm_storeu_si128((__m128i *)((u8 *)dst + 1 * 16), _mm_alignr_epi8(xmm2, xmm1, offset)); \
141 dst = (u8 *)dst + 32; \
142 } \
143 tmp = len; \
144 len = ((len - 16 + offset) & 31) + 16 - offset; \
145 tmp -= len; \
146 src = (const u8 *)src + tmp; \
147 dst = (u8 *)dst + tmp; \
148 } \
149})
150
151/**
152 * Macro for copying unaligned block from one location to another,
153 * 47 bytes leftover maximum,
154 * locations should not overlap.
155 * Use switch here because the aligning instruction requires immediate value for shift count.
156 * Requirements:
157 * - Store is aligned
158 * - Load offset is <offset>, which must be within [1, 15]
159 * - For <src>, make sure <offset> bit backwards & <16 - offset> bit forwards are available for loading
160 * - <dst>, <src>, <len> must be variables
161 * - __m128i <xmm0> ~ <xmm8> used in CLIB_MVUNALIGN_LEFT47_IMM must be pre-defined
162 */
163#define CLIB_MVUNALIGN_LEFT47(dst, src, len, offset) \
164({ \
165 switch (offset) { \
166 case 0x01: CLIB_MVUNALIGN_LEFT47_IMM(dst, src, n, 0x01); break; \
167 case 0x02: CLIB_MVUNALIGN_LEFT47_IMM(dst, src, n, 0x02); break; \
168 case 0x03: CLIB_MVUNALIGN_LEFT47_IMM(dst, src, n, 0x03); break; \
169 case 0x04: CLIB_MVUNALIGN_LEFT47_IMM(dst, src, n, 0x04); break; \
170 case 0x05: CLIB_MVUNALIGN_LEFT47_IMM(dst, src, n, 0x05); break; \
171 case 0x06: CLIB_MVUNALIGN_LEFT47_IMM(dst, src, n, 0x06); break; \
172 case 0x07: CLIB_MVUNALIGN_LEFT47_IMM(dst, src, n, 0x07); break; \
173 case 0x08: CLIB_MVUNALIGN_LEFT47_IMM(dst, src, n, 0x08); break; \
174 case 0x09: CLIB_MVUNALIGN_LEFT47_IMM(dst, src, n, 0x09); break; \
175 case 0x0A: CLIB_MVUNALIGN_LEFT47_IMM(dst, src, n, 0x0A); break; \
176 case 0x0B: CLIB_MVUNALIGN_LEFT47_IMM(dst, src, n, 0x0B); break; \
177 case 0x0C: CLIB_MVUNALIGN_LEFT47_IMM(dst, src, n, 0x0C); break; \
178 case 0x0D: CLIB_MVUNALIGN_LEFT47_IMM(dst, src, n, 0x0D); break; \
179 case 0x0E: CLIB_MVUNALIGN_LEFT47_IMM(dst, src, n, 0x0E); break; \
180 case 0x0F: CLIB_MVUNALIGN_LEFT47_IMM(dst, src, n, 0x0F); break; \
181 default:; \
182 } \
183})
184
185static inline void *
Dave Barach178cf492018-11-13 16:34:13 -0500186clib_memcpy_fast (void *dst, const void *src, size_t n)
Damjan Marionf1213b82016-03-13 02:22:06 +0100187{
Dave Barachc3799992016-08-15 11:12:27 -0400188 __m128i xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8;
189 uword dstu = (uword) dst;
190 uword srcu = (uword) src;
191 void *ret = dst;
192 size_t dstofss;
193 size_t srcofs;
Damjan Marionf1213b82016-03-13 02:22:06 +0100194
195 /**
196 * Copy less than 16 bytes
197 */
Dave Barachc3799992016-08-15 11:12:27 -0400198 if (n < 16)
199 {
200 if (n & 0x01)
201 {
202 *(u8 *) dstu = *(const u8 *) srcu;
203 srcu = (uword) ((const u8 *) srcu + 1);
204 dstu = (uword) ((u8 *) dstu + 1);
Damjan Marionf1213b82016-03-13 02:22:06 +0100205 }
Dave Barachc3799992016-08-15 11:12:27 -0400206 if (n & 0x02)
207 {
208 *(u16 *) dstu = *(const u16 *) srcu;
209 srcu = (uword) ((const u16 *) srcu + 1);
210 dstu = (uword) ((u16 *) dstu + 1);
211 }
212 if (n & 0x04)
213 {
214 *(u32 *) dstu = *(const u32 *) srcu;
215 srcu = (uword) ((const u32 *) srcu + 1);
216 dstu = (uword) ((u32 *) dstu + 1);
217 }
218 if (n & 0x08)
219 {
220 *(u64 *) dstu = *(const u64 *) srcu;
221 }
222 return ret;
223 }
Damjan Marionf1213b82016-03-13 02:22:06 +0100224
Damjan Marionfad3fb32017-12-14 09:30:11 +0100225 /**
226 * Fast way when copy size doesn't exceed 512 bytes
227 */
Dave Barachc3799992016-08-15 11:12:27 -0400228 if (n <= 32)
229 {
230 clib_mov16 ((u8 *) dst, (const u8 *) src);
231 clib_mov16 ((u8 *) dst - 16 + n, (const u8 *) src - 16 + n);
232 return ret;
233 }
234 if (n <= 48)
235 {
236 clib_mov32 ((u8 *) dst, (const u8 *) src);
237 clib_mov16 ((u8 *) dst - 16 + n, (const u8 *) src - 16 + n);
238 return ret;
239 }
240 if (n <= 64)
241 {
242 clib_mov32 ((u8 *) dst, (const u8 *) src);
243 clib_mov16 ((u8 *) dst + 32, (const u8 *) src + 32);
244 clib_mov16 ((u8 *) dst - 16 + n, (const u8 *) src - 16 + n);
245 return ret;
246 }
247 if (n <= 128)
248 {
249 goto COPY_BLOCK_128_BACK15;
250 }
251 if (n <= 512)
252 {
253 if (n >= 256)
254 {
255 n -= 256;
256 clib_mov128 ((u8 *) dst, (const u8 *) src);
257 clib_mov128 ((u8 *) dst + 128, (const u8 *) src + 128);
258 src = (const u8 *) src + 256;
259 dst = (u8 *) dst + 256;
Damjan Marionf1213b82016-03-13 02:22:06 +0100260 }
Dave Barachc3799992016-08-15 11:12:27 -0400261 COPY_BLOCK_255_BACK15:
262 if (n >= 128)
263 {
264 n -= 128;
265 clib_mov128 ((u8 *) dst, (const u8 *) src);
266 src = (const u8 *) src + 128;
267 dst = (u8 *) dst + 128;
Damjan Marionf1213b82016-03-13 02:22:06 +0100268 }
Dave Barachc3799992016-08-15 11:12:27 -0400269 COPY_BLOCK_128_BACK15:
270 if (n >= 64)
271 {
272 n -= 64;
273 clib_mov64 ((u8 *) dst, (const u8 *) src);
274 src = (const u8 *) src + 64;
275 dst = (u8 *) dst + 64;
Damjan Marionf1213b82016-03-13 02:22:06 +0100276 }
Dave Barachc3799992016-08-15 11:12:27 -0400277 COPY_BLOCK_64_BACK15:
278 if (n >= 32)
279 {
280 n -= 32;
281 clib_mov32 ((u8 *) dst, (const u8 *) src);
282 src = (const u8 *) src + 32;
283 dst = (u8 *) dst + 32;
Damjan Marionf1213b82016-03-13 02:22:06 +0100284 }
Dave Barachc3799992016-08-15 11:12:27 -0400285 if (n > 16)
286 {
287 clib_mov16 ((u8 *) dst, (const u8 *) src);
288 clib_mov16 ((u8 *) dst - 16 + n, (const u8 *) src - 16 + n);
289 return ret;
Damjan Marionf1213b82016-03-13 02:22:06 +0100290 }
Dave Barachc3799992016-08-15 11:12:27 -0400291 if (n > 0)
292 {
293 clib_mov16 ((u8 *) dst - 16 + n, (const u8 *) src - 16 + n);
294 }
295 return ret;
296 }
Damjan Marionf1213b82016-03-13 02:22:06 +0100297
Damjan Marionfad3fb32017-12-14 09:30:11 +0100298 /**
299 * Make store aligned when copy size exceeds 512 bytes,
300 * and make sure the first 15 bytes are copied, because
301 * unaligned copy functions require up to 15 bytes
302 * backwards access.
303 */
304 dstofss = (uword) dst & 0x0F;
305 if (dstofss > 0)
306 {
307 dstofss = 16 - dstofss + 16;
308 n -= dstofss;
309 clib_mov32 ((u8 *) dst, (const u8 *) src);
310 src = (const u8 *) src + dstofss;
311 dst = (u8 *) dst + dstofss;
312 }
Dave Barachc3799992016-08-15 11:12:27 -0400313 srcofs = ((uword) src & 0x0F);
Damjan Marionf1213b82016-03-13 02:22:06 +0100314
Damjan Marionfad3fb32017-12-14 09:30:11 +0100315 /**
316 * For aligned copy
317 */
Dave Barachc3799992016-08-15 11:12:27 -0400318 if (srcofs == 0)
319 {
Damjan Marionfad3fb32017-12-14 09:30:11 +0100320 /**
321 * Copy 256-byte blocks
322 */
Dave Barachc3799992016-08-15 11:12:27 -0400323 for (; n >= 256; n -= 256)
324 {
325 clib_mov256 ((u8 *) dst, (const u8 *) src);
326 dst = (u8 *) dst + 256;
327 src = (const u8 *) src + 256;
328 }
Damjan Marionf1213b82016-03-13 02:22:06 +0100329
Damjan Marionfad3fb32017-12-14 09:30:11 +0100330 /**
331 * Copy whatever left
332 */
Dave Barachc3799992016-08-15 11:12:27 -0400333 goto COPY_BLOCK_255_BACK15;
334 }
Damjan Marionf1213b82016-03-13 02:22:06 +0100335
Damjan Marionfad3fb32017-12-14 09:30:11 +0100336 /**
337 * For copy with unaligned load
338 */
Dave Barachc3799992016-08-15 11:12:27 -0400339 CLIB_MVUNALIGN_LEFT47 (dst, src, n, srcofs);
Damjan Marionf1213b82016-03-13 02:22:06 +0100340
Damjan Marionfad3fb32017-12-14 09:30:11 +0100341 /**
342 * Copy whatever left
343 */
Dave Barachc3799992016-08-15 11:12:27 -0400344 goto COPY_BLOCK_64_BACK15;
Damjan Marionf1213b82016-03-13 02:22:06 +0100345}
346
347
348#undef CLIB_MVUNALIGN_LEFT47_IMM
349#undef CLIB_MVUNALIGN_LEFT47
350
351#endif /* included_clib_memcpy_sse3_h */
352
Dave Barachc3799992016-08-15 11:12:27 -0400353
354/*
355 * fd.io coding-style-patch-verification: ON
356 *
357 * Local Variables:
358 * eval: (c-set-style "gnu")
359 * End:
360 */