blob: 5a47725f5c3ef7b51c585e83041e02750133864a [file] [log] [blame]
Ed Warnickecb9cada2015-12-08 15:45:58 -07001/*
Damjan Marionf1213b82016-03-13 02:22:06 +01002 * Copyright (c) 2016 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15/*
Ed Warnickecb9cada2015-12-08 15:45:58 -070016 Copyright (c) 2001, 2002, 2003 Eliot Dresselhaus
17
18 Permission is hereby granted, free of charge, to any person obtaining
19 a copy of this software and associated documentation files (the
20 "Software"), to deal in the Software without restriction, including
21 without limitation the rights to use, copy, modify, merge, publish,
22 distribute, sublicense, and/or sell copies of the Software, and to
23 permit persons to whom the Software is furnished to do so, subject to
24 the following conditions:
25
26 The above copyright notice and this permission notice shall be
27 included in all copies or substantial portions of the Software.
28
29 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
30 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
31 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
32 NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
33 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
34 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
35 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
36*/
37
38#ifndef included_clib_string_h
39#define included_clib_string_h
40
Dave Barachc3799992016-08-15 11:12:27 -040041#include <vppinfra/clib.h> /* for CLIB_LINUX_KERNEL */
Damjan Marione319de02016-10-21 19:30:42 +020042#include <vppinfra/vector.h>
Ed Warnickecb9cada2015-12-08 15:45:58 -070043
44#ifdef CLIB_LINUX_KERNEL
45#include <linux/string.h>
46#endif
47
48#ifdef CLIB_UNIX
49#include <string.h>
50#endif
51
52#ifdef CLIB_STANDALONE
53#include <vppinfra/standalone_string.h>
54#endif
55
Damjan Marionb2e1fe92017-11-22 12:41:32 +010056#if _x86_64_
57#include <x86intrin.h>
58#endif
59
Ed Warnickecb9cada2015-12-08 15:45:58 -070060/* Exchanges source and destination. */
Dave Barachc3799992016-08-15 11:12:27 -040061void clib_memswap (void *_a, void *_b, uword bytes);
Ed Warnickecb9cada2015-12-08 15:45:58 -070062
Dave Barachd4048a42016-11-07 09:55:55 -050063/*
64 * the vector unit memcpy variants confuse coverity
65 * so don't let it anywhere near them.
66 */
67#ifndef __COVERITY__
Damjan Marionfad3fb32017-12-14 09:30:11 +010068#if __AVX512F__
69#include <vppinfra/memcpy_avx512.h>
70#elif __AVX2__
71#include <vppinfra/memcpy_avx2.h>
Damjan Marion793b18d2016-05-16 16:52:55 +020072#elif __SSSE3__
Damjan Marionf1213b82016-03-13 02:22:06 +010073#include <vppinfra/memcpy_sse3.h>
74#else
75#define clib_memcpy(a,b,c) memcpy(a,b,c)
76#endif
Dave Barachd4048a42016-11-07 09:55:55 -050077#else /* __COVERITY__ */
78#define clib_memcpy(a,b,c) memcpy(a,b,c)
79#endif
Damjan Marionf1213b82016-03-13 02:22:06 +010080
Damjan Marion04f3db32017-11-10 21:55:45 +010081/*
82 * Copy 64 bytes of data to 4 destinations
83 * this function is typically used in quad-loop case when whole cacheline
84 * needs to be copied to 4 different places. First it reads whole cacheline
85 * to 1/2/4 SIMD registers and then it writes data to 4 destinations.
86 */
87
88static_always_inline void
89clib_memcpy64_x4 (void *d0, void *d1, void *d2, void *d3, void *s)
90{
Damjan Marionb2e1fe92017-11-22 12:41:32 +010091#if defined (__AVX512F__)
92 __m512i r0 = _mm512_loadu_si512 (s);
Damjan Marion04f3db32017-11-10 21:55:45 +010093
Damjan Marionb2e1fe92017-11-22 12:41:32 +010094 _mm512_storeu_si512 (d0, r0);
95 _mm512_storeu_si512 (d1, r0);
96 _mm512_storeu_si512 (d2, r0);
97 _mm512_storeu_si512 (d3, r0);
Damjan Marion04f3db32017-11-10 21:55:45 +010098
Damjan Marionb2e1fe92017-11-22 12:41:32 +010099#elif defined (__AVX2__)
Sergio Gonzalez Monroy20ec7162017-12-08 11:25:13 +0000100 __m256i r0 = _mm256_loadu_si256 ((__m256i *) (s + 0 * 32));
101 __m256i r1 = _mm256_loadu_si256 ((__m256i *) (s + 1 * 32));
Damjan Marion04f3db32017-11-10 21:55:45 +0100102
Sergio Gonzalez Monroy20ec7162017-12-08 11:25:13 +0000103 _mm256_storeu_si256 ((__m256i *) (d0 + 0 * 32), r0);
104 _mm256_storeu_si256 ((__m256i *) (d0 + 1 * 32), r1);
Damjan Marion04f3db32017-11-10 21:55:45 +0100105
Sergio Gonzalez Monroy20ec7162017-12-08 11:25:13 +0000106 _mm256_storeu_si256 ((__m256i *) (d1 + 0 * 32), r0);
107 _mm256_storeu_si256 ((__m256i *) (d1 + 1 * 32), r1);
Damjan Marion04f3db32017-11-10 21:55:45 +0100108
Sergio Gonzalez Monroy20ec7162017-12-08 11:25:13 +0000109 _mm256_storeu_si256 ((__m256i *) (d2 + 0 * 32), r0);
110 _mm256_storeu_si256 ((__m256i *) (d2 + 1 * 32), r1);
Damjan Marion04f3db32017-11-10 21:55:45 +0100111
Sergio Gonzalez Monroy20ec7162017-12-08 11:25:13 +0000112 _mm256_storeu_si256 ((__m256i *) (d3 + 0 * 32), r0);
113 _mm256_storeu_si256 ((__m256i *) (d3 + 1 * 32), r1);
Damjan Marion04f3db32017-11-10 21:55:45 +0100114
Damjan Marionb2e1fe92017-11-22 12:41:32 +0100115#elif defined (__SSSE3__)
Sergio Gonzalez Monroy20ec7162017-12-08 11:25:13 +0000116 __m128i r0 = _mm_loadu_si128 ((__m128i *) (s + 0 * 16));
117 __m128i r1 = _mm_loadu_si128 ((__m128i *) (s + 1 * 16));
118 __m128i r2 = _mm_loadu_si128 ((__m128i *) (s + 2 * 16));
119 __m128i r3 = _mm_loadu_si128 ((__m128i *) (s + 3 * 16));
Damjan Marion04f3db32017-11-10 21:55:45 +0100120
Sergio Gonzalez Monroy20ec7162017-12-08 11:25:13 +0000121 _mm_storeu_si128 ((__m128i *) (d0 + 0 * 16), r0);
122 _mm_storeu_si128 ((__m128i *) (d0 + 1 * 16), r1);
123 _mm_storeu_si128 ((__m128i *) (d0 + 2 * 16), r2);
124 _mm_storeu_si128 ((__m128i *) (d0 + 3 * 16), r3);
Damjan Marion04f3db32017-11-10 21:55:45 +0100125
Sergio Gonzalez Monroy20ec7162017-12-08 11:25:13 +0000126 _mm_storeu_si128 ((__m128i *) (d1 + 0 * 16), r0);
127 _mm_storeu_si128 ((__m128i *) (d1 + 1 * 16), r1);
128 _mm_storeu_si128 ((__m128i *) (d1 + 2 * 16), r2);
129 _mm_storeu_si128 ((__m128i *) (d1 + 3 * 16), r3);
Damjan Marionb2e1fe92017-11-22 12:41:32 +0100130
Sergio Gonzalez Monroy20ec7162017-12-08 11:25:13 +0000131 _mm_storeu_si128 ((__m128i *) (d2 + 0 * 16), r0);
132 _mm_storeu_si128 ((__m128i *) (d2 + 1 * 16), r1);
133 _mm_storeu_si128 ((__m128i *) (d2 + 2 * 16), r2);
134 _mm_storeu_si128 ((__m128i *) (d2 + 3 * 16), r3);
Damjan Marionb2e1fe92017-11-22 12:41:32 +0100135
Sergio Gonzalez Monroy20ec7162017-12-08 11:25:13 +0000136 _mm_storeu_si128 ((__m128i *) (d3 + 0 * 16), r0);
137 _mm_storeu_si128 ((__m128i *) (d3 + 1 * 16), r1);
138 _mm_storeu_si128 ((__m128i *) (d3 + 2 * 16), r2);
139 _mm_storeu_si128 ((__m128i *) (d3 + 3 * 16), r3);
Damjan Marionb2e1fe92017-11-22 12:41:32 +0100140
Damjan Marion04f3db32017-11-10 21:55:45 +0100141#else
142 clib_memcpy (d0, s, 64);
143 clib_memcpy (d1, s, 64);
144 clib_memcpy (d2, s, 64);
145 clib_memcpy (d3, s, 64);
146#endif
147}
148
Damjan Marion14864772018-05-22 14:07:47 +0200149static_always_inline void
150clib_memset_u64 (void *p, u64 val, uword count)
151{
152 u64 *ptr = p;
153#if defined(CLIB_HAVE_VEC512)
154 u64x8 v512 = u64x8_splat (val);
155 while (count >= 8)
156 {
157 u64x8_store_unaligned (v512, ptr);
158 ptr += 8;
159 count -= 8;
160 }
161 if (count == 0)
162 return;
163#endif
164#if defined(CLIB_HAVE_VEC256)
165 u64x4 v256 = u64x4_splat (val);
166 while (count >= 4)
167 {
168 u64x4_store_unaligned (v256, ptr);
169 ptr += 4;
170 count -= 4;
171 }
172 if (count == 0)
173 return;
174#else
175 while (count >= 4)
176 {
177 ptr[0] = ptr[1] = ptr[2] = ptr[3] = val;
178 ptr += 4;
179 count -= 4;
180 }
181#endif
182 while (count--)
183 ptr++[0] = val;
184}
185
186static_always_inline void
187clib_memset_u32 (void *p, u32 val, uword count)
188{
189 u32 *ptr = p;
190#if defined(CLIB_HAVE_VEC512)
191 u32x16 v512 = u32x16_splat (val);
192 while (count >= 16)
193 {
194 u32x16_store_unaligned (v512, ptr);
195 ptr += 16;
196 count -= 16;
197 }
198 if (count == 0)
199 return;
200#endif
201#if defined(CLIB_HAVE_VEC256)
202 u32x8 v256 = u32x8_splat (val);
203 while (count >= 8)
204 {
205 u32x8_store_unaligned (v256, ptr);
206 ptr += 8;
207 count -= 8;
208 }
209 if (count == 0)
210 return;
211#endif
212#if defined(CLIB_HAVE_VEC128) && defined(CLIB_HAVE_VEC128_UNALIGNED_LOAD_STORE)
213 u32x4 v128 = u32x4_splat (val);
214 while (count >= 4)
215 {
216 u32x4_store_unaligned (v128, ptr);
217 ptr += 4;
218 count -= 4;
219 }
220#else
221 while (count >= 4)
222 {
223 ptr[0] = ptr[1] = ptr[2] = ptr[3] = val;
224 ptr += 4;
225 count -= 4;
226 }
227#endif
228 while (count--)
229 ptr++[0] = val;
230}
231
232static_always_inline void
233clib_memset_u16 (void *p, u16 val, uword count)
234{
235 u16 *ptr = p;
236#if defined(CLIB_HAVE_VEC512)
237 u16x32 v512 = u16x32_splat (val);
238 while (count >= 32)
239 {
240 u16x32_store_unaligned (v512, ptr);
241 ptr += 32;
242 count -= 32;
243 }
244 if (count == 0)
245 return;
246#endif
247#if defined(CLIB_HAVE_VEC256)
248 u16x16 v256 = u16x16_splat (val);
249 while (count >= 16)
250 {
251 u16x16_store_unaligned (v256, ptr);
252 ptr += 16;
253 count -= 16;
254 }
255 if (count == 0)
256 return;
257#endif
258#if defined(CLIB_HAVE_VEC128) && defined(CLIB_HAVE_VEC128_UNALIGNED_LOAD_STORE)
259 u16x8 v128 = u16x8_splat (val);
260 while (count >= 8)
261 {
262 u16x8_store_unaligned (v128, ptr);
263 ptr += 8;
264 count -= 8;
265 }
266#else
267 while (count >= 4)
268 {
269 ptr[0] = ptr[1] = ptr[2] = ptr[3] = val;
270 ptr += 4;
271 count -= 4;
272 }
273#endif
274 while (count--)
275 ptr++[0] = val;
276}
277
278static_always_inline void
279clib_memset_u8 (void *p, u8 val, uword count)
280{
281 u8 *ptr = p;
282#if defined(CLIB_HAVE_VEC512)
283 u8x64 v512 = u8x64_splat (val);
284 while (count >= 64)
285 {
286 u8x64_store_unaligned (v512, ptr);
287 ptr += 64;
288 count -= 64;
289 }
290 if (count == 0)
291 return;
292#endif
293#if defined(CLIB_HAVE_VEC256)
294 u8x32 v256 = u8x32_splat (val);
295 while (count >= 32)
296 {
297 u8x32_store_unaligned (v256, ptr);
298 ptr += 32;
299 count -= 32;
300 }
301 if (count == 0)
302 return;
303#endif
304#if defined(CLIB_HAVE_VEC128) && defined(CLIB_HAVE_VEC128_UNALIGNED_LOAD_STORE)
305 u8x16 v128 = u8x16_splat (val);
306 while (count >= 16)
307 {
308 u8x16_store_unaligned (v128, ptr);
309 ptr += 16;
310 count -= 16;
311 }
312#else
313 while (count >= 4)
314 {
315 ptr[0] = ptr[1] = ptr[2] = ptr[3] = val;
316 ptr += 4;
317 count -= 4;
318 }
319#endif
320 while (count--)
321 ptr++[0] = val;
322}
323
324static_always_inline uword
325clib_count_equal_u64 (u64 * data, uword max_count)
326{
327 uword count = 0;
328 u64 first = data[0];
329
Damjan Marion008eef32018-09-12 22:37:30 +0200330 if (data[0] != data[1])
331 return 1;
332
333#if defined(CLIB_HAVE_VEC256)
334 u64x4 splat = u64x4_splat (first);
335 while (1)
Damjan Marion14864772018-05-22 14:07:47 +0200336 {
Damjan Marion008eef32018-09-12 22:37:30 +0200337 u64 bmp;
338 bmp = u8x32_msb_mask ((u8x32) (u64x4_load_unaligned (data) == splat));
339 if (bmp != 0xffffffff)
340 {
341 count += count_trailing_zeros (~bmp) / 8;
342 return clib_min (count, max_count);
343 }
344
345 data += 4;
346 count += 4;
347
Damjan Marion14864772018-05-22 14:07:47 +0200348 if (count >= max_count)
349 return max_count;
350 }
351#endif
Damjan Marion008eef32018-09-12 22:37:30 +0200352 count += 2;
353 data += 2;
354 while (count < max_count - 3 &&
355 ((data[0] ^ first) | (data[1] ^ first) |
356 (data[2] ^ first) | (data[3] ^ first)) == 0)
Damjan Marion14864772018-05-22 14:07:47 +0200357 {
358 data += 4;
359 count += 4;
Damjan Marion14864772018-05-22 14:07:47 +0200360 }
Damjan Marion14864772018-05-22 14:07:47 +0200361 while (count < max_count && (data[0] == first))
362 {
363 data += 1;
364 count += 1;
365 }
366 return count;
367}
368
369static_always_inline uword
370clib_count_equal_u32 (u32 * data, uword max_count)
371{
372 uword count = 0;
373 u32 first = data[0];
374
Damjan Marion008eef32018-09-12 22:37:30 +0200375 if (data[0] != data[1])
376 return 1;
377
Damjan Marion14864772018-05-22 14:07:47 +0200378#if defined(CLIB_HAVE_VEC256)
Damjan Marion008eef32018-09-12 22:37:30 +0200379 u32x8 splat = u32x8_splat (first);
380 while (1)
Damjan Marion14864772018-05-22 14:07:47 +0200381 {
Damjan Marion008eef32018-09-12 22:37:30 +0200382 u64 bmp;
383 bmp = u8x32_msb_mask ((u8x32) (u32x8_load_unaligned (data) == splat));
384 if (bmp != 0xffffffff)
385 {
386 count += count_trailing_zeros (~bmp) / 4;
387 return clib_min (count, max_count);
388 }
389
Damjan Marion14864772018-05-22 14:07:47 +0200390 data += 8;
391 count += 8;
Damjan Marion008eef32018-09-12 22:37:30 +0200392
393 if (count >= max_count)
394 return max_count;
395 }
396#elif defined(CLIB_HAVE_VEC128) && defined(CLIB_HAVE_VEC128_MSB_MASK)
397 u32x4 splat = u32x4_splat (first);
398 while (1)
399 {
400 u64 bmp;
401 bmp = u8x16_msb_mask ((u8x16) (u32x4_load_unaligned (data) == splat));
402 if (bmp != 0xffff)
403 {
404 count += count_trailing_zeros (~bmp) / 4;
405 return clib_min (count, max_count);
406 }
407
408 data += 4;
409 count += 4;
410
Damjan Marion14864772018-05-22 14:07:47 +0200411 if (count >= max_count)
412 return max_count;
413 }
414#endif
Damjan Marion008eef32018-09-12 22:37:30 +0200415 count += 2;
416 data += 2;
417 while (count < max_count - 3 &&
418 ((data[0] ^ first) | (data[1] ^ first) |
419 (data[2] ^ first) | (data[3] ^ first)) == 0)
Damjan Marion14864772018-05-22 14:07:47 +0200420 {
421 data += 4;
422 count += 4;
Damjan Marion14864772018-05-22 14:07:47 +0200423 }
Damjan Marion14864772018-05-22 14:07:47 +0200424 while (count < max_count && (data[0] == first))
425 {
426 data += 1;
427 count += 1;
428 }
429 return count;
430}
431
432static_always_inline uword
433clib_count_equal_u16 (u16 * data, uword max_count)
434{
435 uword count = 0;
436 u16 first = data[0];
437
Damjan Marion008eef32018-09-12 22:37:30 +0200438 if (data[0] != data[1])
439 return 1;
440
Damjan Marion14864772018-05-22 14:07:47 +0200441#if defined(CLIB_HAVE_VEC256)
Damjan Marion008eef32018-09-12 22:37:30 +0200442 u16x16 splat = u16x16_splat (first);
443 while (1)
Damjan Marion14864772018-05-22 14:07:47 +0200444 {
Damjan Marion008eef32018-09-12 22:37:30 +0200445 u64 bmp;
446 bmp = u8x32_msb_mask ((u8x32) (u16x16_load_unaligned (data) == splat));
447 if (bmp != 0xffffffff)
448 {
449 count += count_trailing_zeros (~bmp) / 2;
450 return clib_min (count, max_count);
451 }
452
Damjan Marion14864772018-05-22 14:07:47 +0200453 data += 16;
454 count += 16;
Damjan Marion008eef32018-09-12 22:37:30 +0200455
456 if (count >= max_count)
457 return max_count;
Damjan Marion14864772018-05-22 14:07:47 +0200458 }
Damjan Marion008eef32018-09-12 22:37:30 +0200459#elif defined(CLIB_HAVE_VEC128) && defined(CLIB_HAVE_VEC128_MSB_MASK)
460 u16x8 splat = u16x8_splat (first);
461 while (1)
Damjan Marion14864772018-05-22 14:07:47 +0200462 {
Damjan Marion008eef32018-09-12 22:37:30 +0200463 u64 bmp;
464 bmp = u8x16_msb_mask ((u8x16) (u16x8_load_unaligned (data) == splat));
465 if (bmp != 0xffff)
466 {
467 count += count_trailing_zeros (~bmp) / 2;
468 return clib_min (count, max_count);
469 }
470
Damjan Marion14864772018-05-22 14:07:47 +0200471 data += 8;
472 count += 8;
Damjan Marion008eef32018-09-12 22:37:30 +0200473
474 if (count >= max_count)
475 return max_count;
Damjan Marion14864772018-05-22 14:07:47 +0200476 }
477#endif
Damjan Marion008eef32018-09-12 22:37:30 +0200478 count += 2;
479 data += 2;
480 while (count < max_count - 3 &&
481 ((data[0] ^ first) | (data[1] ^ first) |
482 (data[2] ^ first) | (data[3] ^ first)) == 0)
483 {
484 data += 4;
485 count += 4;
486 }
Damjan Marion14864772018-05-22 14:07:47 +0200487 while (count < max_count && (data[0] == first))
488 {
489 data += 1;
490 count += 1;
491 }
492 return count;
493}
494
Damjan Marion008eef32018-09-12 22:37:30 +0200495static_always_inline uword
496clib_count_equal_u8 (u8 * data, uword max_count)
Damjan Marion14864772018-05-22 14:07:47 +0200497{
498 uword count = 0;
499 u8 first = data[0];
500
Damjan Marion008eef32018-09-12 22:37:30 +0200501 if (data[0] != data[1])
502 return 1;
503
Damjan Marion14864772018-05-22 14:07:47 +0200504#if defined(CLIB_HAVE_VEC256)
Damjan Marion008eef32018-09-12 22:37:30 +0200505 u8x32 splat = u8x32_splat (first);
506 while (1)
Damjan Marion14864772018-05-22 14:07:47 +0200507 {
Damjan Marion008eef32018-09-12 22:37:30 +0200508 u64 bmp;
509 bmp = u8x32_msb_mask ((u8x32) (u8x32_load_unaligned (data) == splat));
510 if (bmp != 0xffffffff)
511 {
512 count += count_trailing_zeros (~bmp);
513 return clib_min (count, max_count);
514 }
515
Damjan Marion14864772018-05-22 14:07:47 +0200516 data += 32;
517 count += 32;
Damjan Marion008eef32018-09-12 22:37:30 +0200518
519 if (count >= max_count)
520 return max_count;
521 }
522#elif defined(CLIB_HAVE_VEC128) && defined(CLIB_HAVE_VEC128_MSB_MASK)
523 u8x16 splat = u8x16_splat (first);
524 while (1)
525 {
526 u64 bmp;
527 bmp = u8x16_msb_mask ((u8x16) (u8x16_load_unaligned (data) == splat));
528 if (bmp != 0xffff)
529 {
530 count += count_trailing_zeros (~bmp);
531 return clib_min (count, max_count);
532 }
533
534 data += 16;
535 count += 16;
536
537 if (count >= max_count)
538 return max_count;
Damjan Marion14864772018-05-22 14:07:47 +0200539 }
540#endif
Damjan Marion008eef32018-09-12 22:37:30 +0200541 count += 2;
542 data += 2;
543 while (count < max_count - 3 &&
544 ((data[0] ^ first) | (data[1] ^ first) |
545 (data[2] ^ first) | (data[3] ^ first)) == 0)
Damjan Marion14864772018-05-22 14:07:47 +0200546 {
547 data += 4;
548 count += 4;
549 }
Damjan Marion14864772018-05-22 14:07:47 +0200550 while (count < max_count && (data[0] == first))
551 {
552 data += 1;
553 count += 1;
554 }
555 return count;
556}
557
558
Ed Warnickecb9cada2015-12-08 15:45:58 -0700559#endif /* included_clib_string_h */
Dave Barachc3799992016-08-15 11:12:27 -0400560
561/*
562 * fd.io coding-style-patch-verification: ON
563 *
564 * Local Variables:
565 * eval: (c-set-style "gnu")
566 * End:
567 */