blob: e987d044b589c54857958f78bf24f4beaf29ed5c [file] [log] [blame]
Damjan Marionf1213b82016-03-13 02:22:06 +01001/*
2 * Copyright (c) 2016 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15/*-
16 * BSD LICENSE
17 *
18 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
19 * All rights reserved.
20 *
21 * Redistribution and use in source and binary forms, with or without
22 * modification, are permitted provided that the following conditions
23 * are met:
24 *
25 * * Redistributions of source code must retain the above copyright
26 * notice, this list of conditions and the following disclaimer.
27 * * Redistributions in binary form must reproduce the above copyright
28 * notice, this list of conditions and the following disclaimer in
29 * the documentation and/or other materials provided with the
30 * distribution.
31 * * Neither the name of Intel Corporation nor the names of its
32 * contributors may be used to endorse or promote products derived
33 * from this software without specific prior written permission.
34 *
35 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
36 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
37 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
38 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
39 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
40 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
41 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
42 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
43 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
44 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
45 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
46 */
47
48#ifndef included_clib_memcpy_avx_h
49#define included_clib_memcpy_avx_h
50
51#include <stdint.h>
52#include <x86intrin.h>
53
54static inline void
Dave Barachc3799992016-08-15 11:12:27 -040055clib_mov16 (u8 * dst, const u8 * src)
Damjan Marionf1213b82016-03-13 02:22:06 +010056{
Damjan Marion31e59d92017-07-05 18:15:08 +020057 __m128i xmm0;
58
59 xmm0 = _mm_loadu_si128 ((const __m128i *) src);
60 _mm_storeu_si128 ((__m128i *) dst, xmm0);
Damjan Marionf1213b82016-03-13 02:22:06 +010061}
62
63static inline void
Dave Barachc3799992016-08-15 11:12:27 -040064clib_mov32 (u8 * dst, const u8 * src)
Damjan Marionf1213b82016-03-13 02:22:06 +010065{
Damjan Marion31e59d92017-07-05 18:15:08 +020066 __m256i ymm0;
67
68 ymm0 = _mm256_loadu_si256 ((const __m256i *) src);
69 _mm256_storeu_si256 ((__m256i *) dst, ymm0);
Damjan Marionf1213b82016-03-13 02:22:06 +010070}
71
72static inline void
Dave Barachc3799992016-08-15 11:12:27 -040073clib_mov64 (u8 * dst, const u8 * src)
Damjan Marionf1213b82016-03-13 02:22:06 +010074{
Dave Barachc3799992016-08-15 11:12:27 -040075 clib_mov32 ((u8 *) dst + 0 * 32, (const u8 *) src + 0 * 32);
76 clib_mov32 ((u8 *) dst + 1 * 32, (const u8 *) src + 1 * 32);
Damjan Marionf1213b82016-03-13 02:22:06 +010077}
78
79static inline void
Dave Barachc3799992016-08-15 11:12:27 -040080clib_mov128 (u8 * dst, const u8 * src)
Damjan Marionf1213b82016-03-13 02:22:06 +010081{
Dave Barachc3799992016-08-15 11:12:27 -040082 clib_mov64 ((u8 *) dst + 0 * 64, (const u8 *) src + 0 * 64);
83 clib_mov64 ((u8 *) dst + 1 * 64, (const u8 *) src + 1 * 64);
Damjan Marionf1213b82016-03-13 02:22:06 +010084}
85
86static inline void
Dave Barachc3799992016-08-15 11:12:27 -040087clib_mov256 (u8 * dst, const u8 * src)
Damjan Marionf1213b82016-03-13 02:22:06 +010088{
Dave Barachc3799992016-08-15 11:12:27 -040089 clib_mov128 ((u8 *) dst + 0 * 128, (const u8 *) src + 0 * 128);
90 clib_mov128 ((u8 *) dst + 1 * 128, (const u8 *) src + 1 * 128);
Damjan Marionf1213b82016-03-13 02:22:06 +010091}
92
93static inline void
Dave Barachc3799992016-08-15 11:12:27 -040094clib_mov64blocks (u8 * dst, const u8 * src, size_t n)
Damjan Marionf1213b82016-03-13 02:22:06 +010095{
Dave Barachc3799992016-08-15 11:12:27 -040096 __m256i ymm0, ymm1;
Damjan Marionf1213b82016-03-13 02:22:06 +010097
Dave Barachc3799992016-08-15 11:12:27 -040098 while (n >= 64)
99 {
100 ymm0 =
101 _mm256_loadu_si256 ((const __m256i *) ((const u8 *) src + 0 * 32));
102 n -= 64;
103 ymm1 =
104 _mm256_loadu_si256 ((const __m256i *) ((const u8 *) src + 1 * 32));
105 src = (const u8 *) src + 64;
106 _mm256_storeu_si256 ((__m256i *) ((u8 *) dst + 0 * 32), ymm0);
107 _mm256_storeu_si256 ((__m256i *) ((u8 *) dst + 1 * 32), ymm1);
108 dst = (u8 *) dst + 64;
109 }
Damjan Marionf1213b82016-03-13 02:22:06 +0100110}
111
112static inline void
Dave Barachc3799992016-08-15 11:12:27 -0400113clib_mov256blocks (u8 * dst, const u8 * src, size_t n)
Damjan Marionf1213b82016-03-13 02:22:06 +0100114{
Dave Barachc3799992016-08-15 11:12:27 -0400115 __m256i ymm0, ymm1, ymm2, ymm3, ymm4, ymm5, ymm6, ymm7;
Damjan Marionf1213b82016-03-13 02:22:06 +0100116
Dave Barachc3799992016-08-15 11:12:27 -0400117 while (n >= 256)
118 {
119 ymm0 =
120 _mm256_loadu_si256 ((const __m256i *) ((const u8 *) src + 0 * 32));
121 n -= 256;
122 ymm1 =
123 _mm256_loadu_si256 ((const __m256i *) ((const u8 *) src + 1 * 32));
124 ymm2 =
125 _mm256_loadu_si256 ((const __m256i *) ((const u8 *) src + 2 * 32));
126 ymm3 =
127 _mm256_loadu_si256 ((const __m256i *) ((const u8 *) src + 3 * 32));
128 ymm4 =
129 _mm256_loadu_si256 ((const __m256i *) ((const u8 *) src + 4 * 32));
130 ymm5 =
131 _mm256_loadu_si256 ((const __m256i *) ((const u8 *) src + 5 * 32));
132 ymm6 =
133 _mm256_loadu_si256 ((const __m256i *) ((const u8 *) src + 6 * 32));
134 ymm7 =
135 _mm256_loadu_si256 ((const __m256i *) ((const u8 *) src + 7 * 32));
136 src = (const u8 *) src + 256;
137 _mm256_storeu_si256 ((__m256i *) ((u8 *) dst + 0 * 32), ymm0);
138 _mm256_storeu_si256 ((__m256i *) ((u8 *) dst + 1 * 32), ymm1);
139 _mm256_storeu_si256 ((__m256i *) ((u8 *) dst + 2 * 32), ymm2);
140 _mm256_storeu_si256 ((__m256i *) ((u8 *) dst + 3 * 32), ymm3);
141 _mm256_storeu_si256 ((__m256i *) ((u8 *) dst + 4 * 32), ymm4);
142 _mm256_storeu_si256 ((__m256i *) ((u8 *) dst + 5 * 32), ymm5);
143 _mm256_storeu_si256 ((__m256i *) ((u8 *) dst + 6 * 32), ymm6);
144 _mm256_storeu_si256 ((__m256i *) ((u8 *) dst + 7 * 32), ymm7);
145 dst = (u8 *) dst + 256;
146 }
Damjan Marionf1213b82016-03-13 02:22:06 +0100147}
148
149static inline void *
Dave Barachc3799992016-08-15 11:12:27 -0400150clib_memcpy (void *dst, const void *src, size_t n)
Damjan Marionf1213b82016-03-13 02:22:06 +0100151{
Dave Barachc3799992016-08-15 11:12:27 -0400152 uword dstu = (uword) dst;
153 uword srcu = (uword) src;
154 void *ret = dst;
155 size_t dstofss;
156 size_t bits;
Damjan Marionf1213b82016-03-13 02:22:06 +0100157
Dave Barachc3799992016-08-15 11:12:27 -0400158 /**
Damjan Marionf1213b82016-03-13 02:22:06 +0100159 * Copy less than 16 bytes
160 */
Dave Barachc3799992016-08-15 11:12:27 -0400161 if (n < 16)
162 {
163 if (n & 0x01)
164 {
165 *(u8 *) dstu = *(const u8 *) srcu;
166 srcu = (uword) ((const u8 *) srcu + 1);
167 dstu = (uword) ((u8 *) dstu + 1);
168 }
169 if (n & 0x02)
170 {
171 *(uint16_t *) dstu = *(const uint16_t *) srcu;
172 srcu = (uword) ((const uint16_t *) srcu + 1);
173 dstu = (uword) ((uint16_t *) dstu + 1);
174 }
175 if (n & 0x04)
176 {
177 *(uint32_t *) dstu = *(const uint32_t *) srcu;
178 srcu = (uword) ((const uint32_t *) srcu + 1);
179 dstu = (uword) ((uint32_t *) dstu + 1);
180 }
181 if (n & 0x08)
182 {
183 *(uint64_t *) dstu = *(const uint64_t *) srcu;
184 }
185 return ret;
186 }
Damjan Marionf1213b82016-03-13 02:22:06 +0100187
Dave Barachc3799992016-08-15 11:12:27 -0400188 /**
Damjan Marionf1213b82016-03-13 02:22:06 +0100189 * Fast way when copy size doesn't exceed 512 bytes
190 */
Dave Barachc3799992016-08-15 11:12:27 -0400191 if (n <= 32)
192 {
193 clib_mov16 ((u8 *) dst, (const u8 *) src);
194 clib_mov16 ((u8 *) dst - 16 + n, (const u8 *) src - 16 + n);
195 return ret;
196 }
197 if (n <= 64)
198 {
199 clib_mov32 ((u8 *) dst, (const u8 *) src);
200 clib_mov32 ((u8 *) dst - 32 + n, (const u8 *) src - 32 + n);
201 return ret;
202 }
203 if (n <= 512)
204 {
205 if (n >= 256)
206 {
207 n -= 256;
208 clib_mov256 ((u8 *) dst, (const u8 *) src);
209 src = (const u8 *) src + 256;
210 dst = (u8 *) dst + 256;
211 }
212 if (n >= 128)
213 {
214 n -= 128;
215 clib_mov128 ((u8 *) dst, (const u8 *) src);
216 src = (const u8 *) src + 128;
217 dst = (u8 *) dst + 128;
218 }
219 if (n >= 64)
220 {
221 n -= 64;
222 clib_mov64 ((u8 *) dst, (const u8 *) src);
223 src = (const u8 *) src + 64;
224 dst = (u8 *) dst + 64;
225 }
226 COPY_BLOCK_64_BACK31:
227 if (n > 32)
228 {
229 clib_mov32 ((u8 *) dst, (const u8 *) src);
230 clib_mov32 ((u8 *) dst - 32 + n, (const u8 *) src - 32 + n);
231 return ret;
232 }
233 if (n > 0)
234 {
235 clib_mov32 ((u8 *) dst - 32 + n, (const u8 *) src - 32 + n);
236 }
237 return ret;
238 }
Damjan Marionf1213b82016-03-13 02:22:06 +0100239
Dave Barachc3799992016-08-15 11:12:27 -0400240 /**
Damjan Marionf1213b82016-03-13 02:22:06 +0100241 * Make store aligned when copy size exceeds 512 bytes
242 */
Dave Barachc3799992016-08-15 11:12:27 -0400243 dstofss = (uword) dst & 0x1F;
244 if (dstofss > 0)
245 {
246 dstofss = 32 - dstofss;
247 n -= dstofss;
248 clib_mov32 ((u8 *) dst, (const u8 *) src);
249 src = (const u8 *) src + dstofss;
250 dst = (u8 *) dst + dstofss;
251 }
Damjan Marionf1213b82016-03-13 02:22:06 +0100252
Dave Barachc3799992016-08-15 11:12:27 -0400253 /**
Damjan Marionf1213b82016-03-13 02:22:06 +0100254 * Copy 256-byte blocks.
255 * Use copy block function for better instruction order control,
256 * which is important when load is unaligned.
257 */
Dave Barachc3799992016-08-15 11:12:27 -0400258 clib_mov256blocks ((u8 *) dst, (const u8 *) src, n);
259 bits = n;
260 n = n & 255;
261 bits -= n;
262 src = (const u8 *) src + bits;
263 dst = (u8 *) dst + bits;
Damjan Marionf1213b82016-03-13 02:22:06 +0100264
Dave Barachc3799992016-08-15 11:12:27 -0400265 /**
Damjan Marionf1213b82016-03-13 02:22:06 +0100266 * Copy 64-byte blocks.
267 * Use copy block function for better instruction order control,
268 * which is important when load is unaligned.
269 */
Dave Barachc3799992016-08-15 11:12:27 -0400270 if (n >= 64)
271 {
272 clib_mov64blocks ((u8 *) dst, (const u8 *) src, n);
273 bits = n;
274 n = n & 63;
275 bits -= n;
276 src = (const u8 *) src + bits;
277 dst = (u8 *) dst + bits;
278 }
Damjan Marionf1213b82016-03-13 02:22:06 +0100279
Dave Barachc3799992016-08-15 11:12:27 -0400280 /**
Damjan Marionf1213b82016-03-13 02:22:06 +0100281 * Copy whatever left
282 */
Dave Barachc3799992016-08-15 11:12:27 -0400283 goto COPY_BLOCK_64_BACK31;
Damjan Marionf1213b82016-03-13 02:22:06 +0100284}
285
286
287#endif /* included_clib_mamcpy_avx_h */
288
Dave Barachc3799992016-08-15 11:12:27 -0400289
290/*
291 * fd.io coding-style-patch-verification: ON
292 *
293 * Local Variables:
294 * eval: (c-set-style "gnu")
295 * End:
296 */