blob: e3feb76b6b70192265cd5c8d9c9bda8a630cf809 [file] [log] [blame]
Damjan Marionf1213b82016-03-13 02:22:06 +01001/*
2 * Copyright (c) 2016 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15/*-
16 * BSD LICENSE
17 *
18 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
19 * All rights reserved.
20 *
21 * Redistribution and use in source and binary forms, with or without
22 * modification, are permitted provided that the following conditions
23 * are met:
24 *
25 * * Redistributions of source code must retain the above copyright
26 * notice, this list of conditions and the following disclaimer.
27 * * Redistributions in binary form must reproduce the above copyright
28 * notice, this list of conditions and the following disclaimer in
29 * the documentation and/or other materials provided with the
30 * distribution.
31 * * Neither the name of Intel Corporation nor the names of its
32 * contributors may be used to endorse or promote products derived
33 * from this software without specific prior written permission.
34 *
35 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
36 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
37 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
38 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
39 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
40 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
41 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
42 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
43 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
44 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
45 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
46 */
47
48#ifndef included_clib_memcpy_avx_h
49#define included_clib_memcpy_avx_h
50
51#include <stdint.h>
52#include <x86intrin.h>
53
Damjan Marione319de02016-10-21 19:30:42 +020054typedef u8 u8x16u __attribute__ ((vector_size (16), aligned (1)));
55typedef u8 u8x32u __attribute__ ((vector_size (32), aligned (1)));
56
Damjan Marionf1213b82016-03-13 02:22:06 +010057static inline void
Dave Barachc3799992016-08-15 11:12:27 -040058clib_mov16 (u8 * dst, const u8 * src)
Damjan Marionf1213b82016-03-13 02:22:06 +010059{
Damjan Marione319de02016-10-21 19:30:42 +020060 *(u8x16u *) dst = *(u8x16u *) src;
Damjan Marionf1213b82016-03-13 02:22:06 +010061}
62
63static inline void
Dave Barachc3799992016-08-15 11:12:27 -040064clib_mov32 (u8 * dst, const u8 * src)
Damjan Marionf1213b82016-03-13 02:22:06 +010065{
Damjan Marione319de02016-10-21 19:30:42 +020066 *(u8x32u *) dst = *(u8x32u *) src;
Damjan Marionf1213b82016-03-13 02:22:06 +010067}
68
69static inline void
Dave Barachc3799992016-08-15 11:12:27 -040070clib_mov64 (u8 * dst, const u8 * src)
Damjan Marionf1213b82016-03-13 02:22:06 +010071{
Dave Barachc3799992016-08-15 11:12:27 -040072 clib_mov32 ((u8 *) dst + 0 * 32, (const u8 *) src + 0 * 32);
73 clib_mov32 ((u8 *) dst + 1 * 32, (const u8 *) src + 1 * 32);
Damjan Marionf1213b82016-03-13 02:22:06 +010074}
75
76static inline void
Dave Barachc3799992016-08-15 11:12:27 -040077clib_mov128 (u8 * dst, const u8 * src)
Damjan Marionf1213b82016-03-13 02:22:06 +010078{
Dave Barachc3799992016-08-15 11:12:27 -040079 clib_mov64 ((u8 *) dst + 0 * 64, (const u8 *) src + 0 * 64);
80 clib_mov64 ((u8 *) dst + 1 * 64, (const u8 *) src + 1 * 64);
Damjan Marionf1213b82016-03-13 02:22:06 +010081}
82
83static inline void
Dave Barachc3799992016-08-15 11:12:27 -040084clib_mov256 (u8 * dst, const u8 * src)
Damjan Marionf1213b82016-03-13 02:22:06 +010085{
Dave Barachc3799992016-08-15 11:12:27 -040086 clib_mov128 ((u8 *) dst + 0 * 128, (const u8 *) src + 0 * 128);
87 clib_mov128 ((u8 *) dst + 1 * 128, (const u8 *) src + 1 * 128);
Damjan Marionf1213b82016-03-13 02:22:06 +010088}
89
90static inline void
Dave Barachc3799992016-08-15 11:12:27 -040091clib_mov64blocks (u8 * dst, const u8 * src, size_t n)
Damjan Marionf1213b82016-03-13 02:22:06 +010092{
Dave Barachc3799992016-08-15 11:12:27 -040093 __m256i ymm0, ymm1;
Damjan Marionf1213b82016-03-13 02:22:06 +010094
Dave Barachc3799992016-08-15 11:12:27 -040095 while (n >= 64)
96 {
97 ymm0 =
98 _mm256_loadu_si256 ((const __m256i *) ((const u8 *) src + 0 * 32));
99 n -= 64;
100 ymm1 =
101 _mm256_loadu_si256 ((const __m256i *) ((const u8 *) src + 1 * 32));
102 src = (const u8 *) src + 64;
103 _mm256_storeu_si256 ((__m256i *) ((u8 *) dst + 0 * 32), ymm0);
104 _mm256_storeu_si256 ((__m256i *) ((u8 *) dst + 1 * 32), ymm1);
105 dst = (u8 *) dst + 64;
106 }
Damjan Marionf1213b82016-03-13 02:22:06 +0100107}
108
109static inline void
Dave Barachc3799992016-08-15 11:12:27 -0400110clib_mov256blocks (u8 * dst, const u8 * src, size_t n)
Damjan Marionf1213b82016-03-13 02:22:06 +0100111{
Dave Barachc3799992016-08-15 11:12:27 -0400112 __m256i ymm0, ymm1, ymm2, ymm3, ymm4, ymm5, ymm6, ymm7;
Damjan Marionf1213b82016-03-13 02:22:06 +0100113
Dave Barachc3799992016-08-15 11:12:27 -0400114 while (n >= 256)
115 {
116 ymm0 =
117 _mm256_loadu_si256 ((const __m256i *) ((const u8 *) src + 0 * 32));
118 n -= 256;
119 ymm1 =
120 _mm256_loadu_si256 ((const __m256i *) ((const u8 *) src + 1 * 32));
121 ymm2 =
122 _mm256_loadu_si256 ((const __m256i *) ((const u8 *) src + 2 * 32));
123 ymm3 =
124 _mm256_loadu_si256 ((const __m256i *) ((const u8 *) src + 3 * 32));
125 ymm4 =
126 _mm256_loadu_si256 ((const __m256i *) ((const u8 *) src + 4 * 32));
127 ymm5 =
128 _mm256_loadu_si256 ((const __m256i *) ((const u8 *) src + 5 * 32));
129 ymm6 =
130 _mm256_loadu_si256 ((const __m256i *) ((const u8 *) src + 6 * 32));
131 ymm7 =
132 _mm256_loadu_si256 ((const __m256i *) ((const u8 *) src + 7 * 32));
133 src = (const u8 *) src + 256;
134 _mm256_storeu_si256 ((__m256i *) ((u8 *) dst + 0 * 32), ymm0);
135 _mm256_storeu_si256 ((__m256i *) ((u8 *) dst + 1 * 32), ymm1);
136 _mm256_storeu_si256 ((__m256i *) ((u8 *) dst + 2 * 32), ymm2);
137 _mm256_storeu_si256 ((__m256i *) ((u8 *) dst + 3 * 32), ymm3);
138 _mm256_storeu_si256 ((__m256i *) ((u8 *) dst + 4 * 32), ymm4);
139 _mm256_storeu_si256 ((__m256i *) ((u8 *) dst + 5 * 32), ymm5);
140 _mm256_storeu_si256 ((__m256i *) ((u8 *) dst + 6 * 32), ymm6);
141 _mm256_storeu_si256 ((__m256i *) ((u8 *) dst + 7 * 32), ymm7);
142 dst = (u8 *) dst + 256;
143 }
Damjan Marionf1213b82016-03-13 02:22:06 +0100144}
145
146static inline void *
Dave Barachc3799992016-08-15 11:12:27 -0400147clib_memcpy (void *dst, const void *src, size_t n)
Damjan Marionf1213b82016-03-13 02:22:06 +0100148{
Dave Barachc3799992016-08-15 11:12:27 -0400149 uword dstu = (uword) dst;
150 uword srcu = (uword) src;
151 void *ret = dst;
152 size_t dstofss;
153 size_t bits;
Damjan Marionf1213b82016-03-13 02:22:06 +0100154
Dave Barachc3799992016-08-15 11:12:27 -0400155 /**
Damjan Marionf1213b82016-03-13 02:22:06 +0100156 * Copy less than 16 bytes
157 */
Dave Barachc3799992016-08-15 11:12:27 -0400158 if (n < 16)
159 {
160 if (n & 0x01)
161 {
162 *(u8 *) dstu = *(const u8 *) srcu;
163 srcu = (uword) ((const u8 *) srcu + 1);
164 dstu = (uword) ((u8 *) dstu + 1);
165 }
166 if (n & 0x02)
167 {
168 *(uint16_t *) dstu = *(const uint16_t *) srcu;
169 srcu = (uword) ((const uint16_t *) srcu + 1);
170 dstu = (uword) ((uint16_t *) dstu + 1);
171 }
172 if (n & 0x04)
173 {
174 *(uint32_t *) dstu = *(const uint32_t *) srcu;
175 srcu = (uword) ((const uint32_t *) srcu + 1);
176 dstu = (uword) ((uint32_t *) dstu + 1);
177 }
178 if (n & 0x08)
179 {
180 *(uint64_t *) dstu = *(const uint64_t *) srcu;
181 }
182 return ret;
183 }
Damjan Marionf1213b82016-03-13 02:22:06 +0100184
Dave Barachc3799992016-08-15 11:12:27 -0400185 /**
Damjan Marionf1213b82016-03-13 02:22:06 +0100186 * Fast way when copy size doesn't exceed 512 bytes
187 */
Dave Barachc3799992016-08-15 11:12:27 -0400188 if (n <= 32)
189 {
190 clib_mov16 ((u8 *) dst, (const u8 *) src);
191 clib_mov16 ((u8 *) dst - 16 + n, (const u8 *) src - 16 + n);
192 return ret;
193 }
194 if (n <= 64)
195 {
196 clib_mov32 ((u8 *) dst, (const u8 *) src);
197 clib_mov32 ((u8 *) dst - 32 + n, (const u8 *) src - 32 + n);
198 return ret;
199 }
200 if (n <= 512)
201 {
202 if (n >= 256)
203 {
204 n -= 256;
205 clib_mov256 ((u8 *) dst, (const u8 *) src);
206 src = (const u8 *) src + 256;
207 dst = (u8 *) dst + 256;
208 }
209 if (n >= 128)
210 {
211 n -= 128;
212 clib_mov128 ((u8 *) dst, (const u8 *) src);
213 src = (const u8 *) src + 128;
214 dst = (u8 *) dst + 128;
215 }
216 if (n >= 64)
217 {
218 n -= 64;
219 clib_mov64 ((u8 *) dst, (const u8 *) src);
220 src = (const u8 *) src + 64;
221 dst = (u8 *) dst + 64;
222 }
223 COPY_BLOCK_64_BACK31:
224 if (n > 32)
225 {
226 clib_mov32 ((u8 *) dst, (const u8 *) src);
227 clib_mov32 ((u8 *) dst - 32 + n, (const u8 *) src - 32 + n);
228 return ret;
229 }
230 if (n > 0)
231 {
232 clib_mov32 ((u8 *) dst - 32 + n, (const u8 *) src - 32 + n);
233 }
234 return ret;
235 }
Damjan Marionf1213b82016-03-13 02:22:06 +0100236
Dave Barachc3799992016-08-15 11:12:27 -0400237 /**
Damjan Marionf1213b82016-03-13 02:22:06 +0100238 * Make store aligned when copy size exceeds 512 bytes
239 */
Dave Barachc3799992016-08-15 11:12:27 -0400240 dstofss = (uword) dst & 0x1F;
241 if (dstofss > 0)
242 {
243 dstofss = 32 - dstofss;
244 n -= dstofss;
245 clib_mov32 ((u8 *) dst, (const u8 *) src);
246 src = (const u8 *) src + dstofss;
247 dst = (u8 *) dst + dstofss;
248 }
Damjan Marionf1213b82016-03-13 02:22:06 +0100249
Dave Barachc3799992016-08-15 11:12:27 -0400250 /**
Damjan Marionf1213b82016-03-13 02:22:06 +0100251 * Copy 256-byte blocks.
252 * Use copy block function for better instruction order control,
253 * which is important when load is unaligned.
254 */
Dave Barachc3799992016-08-15 11:12:27 -0400255 clib_mov256blocks ((u8 *) dst, (const u8 *) src, n);
256 bits = n;
257 n = n & 255;
258 bits -= n;
259 src = (const u8 *) src + bits;
260 dst = (u8 *) dst + bits;
Damjan Marionf1213b82016-03-13 02:22:06 +0100261
Dave Barachc3799992016-08-15 11:12:27 -0400262 /**
Damjan Marionf1213b82016-03-13 02:22:06 +0100263 * Copy 64-byte blocks.
264 * Use copy block function for better instruction order control,
265 * which is important when load is unaligned.
266 */
Dave Barachc3799992016-08-15 11:12:27 -0400267 if (n >= 64)
268 {
269 clib_mov64blocks ((u8 *) dst, (const u8 *) src, n);
270 bits = n;
271 n = n & 63;
272 bits -= n;
273 src = (const u8 *) src + bits;
274 dst = (u8 *) dst + bits;
275 }
Damjan Marionf1213b82016-03-13 02:22:06 +0100276
Dave Barachc3799992016-08-15 11:12:27 -0400277 /**
Damjan Marionf1213b82016-03-13 02:22:06 +0100278 * Copy whatever left
279 */
Dave Barachc3799992016-08-15 11:12:27 -0400280 goto COPY_BLOCK_64_BACK31;
Damjan Marionf1213b82016-03-13 02:22:06 +0100281}
282
283
284#endif /* included_clib_mamcpy_avx_h */
285
Dave Barachc3799992016-08-15 11:12:27 -0400286
287/*
288 * fd.io coding-style-patch-verification: ON
289 *
290 * Local Variables:
291 * eval: (c-set-style "gnu")
292 * End:
293 */