blob: d4819b7f989ffe5d1335caf49f9f188f7f342826 [file] [log] [blame]
Ed Warnickecb9cada2015-12-08 15:45:58 -07001/*
2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15/*
16 Copyright (c) 2001, 2002, 2003 Eliot Dresselhaus
17
18 Permission is hereby granted, free of charge, to any person obtaining
19 a copy of this software and associated documentation files (the
20 "Software"), to deal in the Software without restriction, including
21 without limitation the rights to use, copy, modify, merge, publish,
22 distribute, sublicense, and/or sell copies of the Software, and to
23 permit persons to whom the Software is furnished to do so, subject to
24 the following conditions:
25
26 The above copyright notice and this permission notice shall be
27 included in all copies or substantial portions of the Software.
28
29 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
30 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
31 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
32 NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
33 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
34 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
35 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
36*/
37
38#ifndef _included_clib_mem_h
39#define _included_clib_mem_h
40
41#include <stdarg.h>
Damjan Marion01914ce2017-09-14 19:04:50 +020042#include <unistd.h>
43#include <sys/mman.h>
Ed Warnickecb9cada2015-12-08 15:45:58 -070044
Dave Barachc3799992016-08-15 11:12:27 -040045#include <vppinfra/clib.h> /* uword, etc */
Damjan Marion01914ce2017-09-14 19:04:50 +020046#include <vppinfra/clib_error.h>
Dave Barach6a5adc32018-07-04 10:56:23 -040047
48#if USE_DLMALLOC == 0
Ed Warnickecb9cada2015-12-08 15:45:58 -070049#include <vppinfra/mheap_bootstrap.h>
Dave Barach6a5adc32018-07-04 10:56:23 -040050#else
51#include <vppinfra/dlmalloc.h>
52#endif
53
Ed Warnickecb9cada2015-12-08 15:45:58 -070054#include <vppinfra/os.h>
Dave Barachb7b92992018-10-17 10:38:51 -040055#include <vppinfra/string.h> /* memcpy, clib_memset */
Benoît Ganne9fb6d402019-04-15 15:28:21 +020056#include <vppinfra/sanitizer.h>
Ed Warnickecb9cada2015-12-08 15:45:58 -070057
Damjan Marionce8debf2016-02-06 19:16:21 +010058#define CLIB_MAX_MHEAPS 256
59
Ed Warnickecb9cada2015-12-08 15:45:58 -070060/* Per CPU heaps. */
Dave Barachc3799992016-08-15 11:12:27 -040061extern void *clib_per_cpu_mheaps[CLIB_MAX_MHEAPS];
Ed Warnickecb9cada2015-12-08 15:45:58 -070062
Nathan Skrzypczakd516ca42019-08-01 18:14:06 +020063always_inline void
64clib_mem_set_thread_index (void)
65{
66 /*
67 * Find an unused slot in the per-cpu-mheaps array,
68 * and grab it for this thread. We need to be able to
69 * push/pop the thread heap without affecting other thread(s).
70 */
71 int i;
72 if (__os_thread_index != 0)
73 return;
74 for (i = 0; i < ARRAY_LEN (clib_per_cpu_mheaps); i++)
75 if (clib_atomic_bool_cmp_and_swap (&clib_per_cpu_mheaps[i],
76 0, clib_per_cpu_mheaps[0]))
77 {
78 os_set_thread_index (i);
79 break;
80 }
81 ASSERT (__os_thread_index > 0);
82}
83
Dave Barachc3799992016-08-15 11:12:27 -040084always_inline void *
85clib_mem_get_per_cpu_heap (void)
Ed Warnickecb9cada2015-12-08 15:45:58 -070086{
Damjan Marionf55f9b82017-05-10 21:06:28 +020087 int cpu = os_get_thread_index ();
Ed Warnickecb9cada2015-12-08 15:45:58 -070088 return clib_per_cpu_mheaps[cpu];
89}
90
Dave Barachc3799992016-08-15 11:12:27 -040091always_inline void *
92clib_mem_set_per_cpu_heap (u8 * new_heap)
Ed Warnickecb9cada2015-12-08 15:45:58 -070093{
Damjan Marionf55f9b82017-05-10 21:06:28 +020094 int cpu = os_get_thread_index ();
Dave Barachc3799992016-08-15 11:12:27 -040095 void *old = clib_per_cpu_mheaps[cpu];
Ed Warnickecb9cada2015-12-08 15:45:58 -070096 clib_per_cpu_mheaps[cpu] = new_heap;
97 return old;
98}
99
Benoît Ganne9fb6d402019-04-15 15:28:21 +0200100always_inline uword
101clib_mem_size_nocheck (void *p)
102{
103#if USE_DLMALLOC == 0
104 mheap_elt_t *e = mheap_user_pointer_to_elt (p);
105 return mheap_elt_data_bytes (e);
106#else
107 return mspace_usable_size_with_delta (p);
108#endif
109}
110
Dave Barach241e5222016-10-13 10:53:26 -0400111/* Memory allocator which may call os_out_of_memory() if it fails */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700112always_inline void *
Dave Barach241e5222016-10-13 10:53:26 -0400113clib_mem_alloc_aligned_at_offset (uword size, uword align, uword align_offset,
114 int os_out_of_memory_on_failure)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700115{
Dave Barachc3799992016-08-15 11:12:27 -0400116 void *heap, *p;
Dave Barach6a5adc32018-07-04 10:56:23 -0400117 uword cpu;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700118
119 if (align_offset > align)
120 {
121 if (align > 0)
122 align_offset %= align;
123 else
124 align_offset = align;
125 }
126
Damjan Marionf55f9b82017-05-10 21:06:28 +0200127 cpu = os_get_thread_index ();
Ed Warnickecb9cada2015-12-08 15:45:58 -0700128 heap = clib_per_cpu_mheaps[cpu];
Dave Barach6a5adc32018-07-04 10:56:23 -0400129
130#if USE_DLMALLOC == 0
131 uword offset;
Dave Barachc3799992016-08-15 11:12:27 -0400132 heap = mheap_get_aligned (heap, size, align, align_offset, &offset);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700133 clib_per_cpu_mheaps[cpu] = heap;
Benoît Ganne9fb6d402019-04-15 15:28:21 +0200134 if (PREDICT_TRUE (offset != ~0))
135 p = heap + offset;
Dave Barach6a5adc32018-07-04 10:56:23 -0400136#else
137 p = mspace_get_aligned (heap, size, align, align_offset);
Benoît Ganne9fb6d402019-04-15 15:28:21 +0200138#endif /* USE_DLMALLOC */
139
140 if (PREDICT_FALSE (0 == p))
Dave Barach6a5adc32018-07-04 10:56:23 -0400141 {
142 if (os_out_of_memory_on_failure)
143 os_out_of_memory ();
144 return 0;
145 }
146
Benoît Ganne9fb6d402019-04-15 15:28:21 +0200147 CLIB_MEM_UNPOISON (p, size);
Dave Barach6a5adc32018-07-04 10:56:23 -0400148 return p;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700149}
150
Dave Barach241e5222016-10-13 10:53:26 -0400151/* Memory allocator which calls os_out_of_memory() when it fails */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700152always_inline void *
153clib_mem_alloc (uword size)
Dave Barachc3799992016-08-15 11:12:27 -0400154{
155 return clib_mem_alloc_aligned_at_offset (size, /* align */ 1,
Dave Barach241e5222016-10-13 10:53:26 -0400156 /* align_offset */ 0,
157 /* os_out_of_memory */ 1);
Dave Barachc3799992016-08-15 11:12:27 -0400158}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700159
160always_inline void *
161clib_mem_alloc_aligned (uword size, uword align)
Dave Barachc3799992016-08-15 11:12:27 -0400162{
Dave Barach241e5222016-10-13 10:53:26 -0400163 return clib_mem_alloc_aligned_at_offset (size, align, /* align_offset */ 0,
164 /* os_out_of_memory */ 1);
Dave Barachc3799992016-08-15 11:12:27 -0400165}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700166
Dave Barach241e5222016-10-13 10:53:26 -0400167/* Memory allocator which calls os_out_of_memory() when it fails */
168always_inline void *
169clib_mem_alloc_or_null (uword size)
170{
171 return clib_mem_alloc_aligned_at_offset (size, /* align */ 1,
172 /* align_offset */ 0,
173 /* os_out_of_memory */ 0);
174}
175
176always_inline void *
177clib_mem_alloc_aligned_or_null (uword size, uword align)
178{
179 return clib_mem_alloc_aligned_at_offset (size, align, /* align_offset */ 0,
180 /* os_out_of_memory */ 0);
181}
182
183
184
Ed Warnickecb9cada2015-12-08 15:45:58 -0700185/* Memory allocator which panics when it fails.
186 Use macro so that clib_panic macro can expand __FUNCTION__ and __LINE__. */
187#define clib_mem_alloc_aligned_no_fail(size,align) \
188({ \
189 uword _clib_mem_alloc_size = (size); \
190 void * _clib_mem_alloc_p; \
191 _clib_mem_alloc_p = clib_mem_alloc_aligned (_clib_mem_alloc_size, (align)); \
192 if (! _clib_mem_alloc_p) \
193 clib_panic ("failed to allocate %d bytes", _clib_mem_alloc_size); \
194 _clib_mem_alloc_p; \
195})
196
197#define clib_mem_alloc_no_fail(size) clib_mem_alloc_aligned_no_fail(size,1)
198
199/* Alias to stack allocator for naming consistency. */
200#define clib_mem_alloc_stack(bytes) __builtin_alloca(bytes)
201
Dave Barachc3799992016-08-15 11:12:27 -0400202always_inline uword
203clib_mem_is_heap_object (void *p)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700204{
Dave Barach6a5adc32018-07-04 10:56:23 -0400205#if USE_DLMALLOC == 0
Dave Barachc3799992016-08-15 11:12:27 -0400206 void *heap = clib_mem_get_per_cpu_heap ();
207 uword offset = (uword) p - (uword) heap;
208 mheap_elt_t *e, *n;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700209
210 if (offset >= vec_len (heap))
211 return 0;
212
213 e = mheap_elt_at_uoffset (heap, offset);
214 n = mheap_next_elt (e);
Dave Barachc3799992016-08-15 11:12:27 -0400215
Ed Warnickecb9cada2015-12-08 15:45:58 -0700216 /* Check that heap forward and reverse pointers agree. */
217 return e->n_user_data == n->prev_n_user_data;
Dave Barach6a5adc32018-07-04 10:56:23 -0400218#else
219 void *heap = clib_mem_get_per_cpu_heap ();
220
221 return mspace_is_heap_object (heap, p);
222#endif /* USE_DLMALLOC */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700223}
224
Dave Barachc3799992016-08-15 11:12:27 -0400225always_inline void
226clib_mem_free (void *p)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700227{
Dave Barachc3799992016-08-15 11:12:27 -0400228 u8 *heap = clib_mem_get_per_cpu_heap ();
Ed Warnickecb9cada2015-12-08 15:45:58 -0700229
230 /* Make sure object is in the correct heap. */
231 ASSERT (clib_mem_is_heap_object (p));
232
Benoît Ganne9fb6d402019-04-15 15:28:21 +0200233 CLIB_MEM_POISON (p, clib_mem_size_nocheck (p));
234
Dave Barach6a5adc32018-07-04 10:56:23 -0400235#if USE_DLMALLOC == 0
Ed Warnickecb9cada2015-12-08 15:45:58 -0700236 mheap_put (heap, (u8 *) p - heap);
Dave Barach6a5adc32018-07-04 10:56:23 -0400237#else
238 mspace_put (heap, p);
239#endif
Ed Warnickecb9cada2015-12-08 15:45:58 -0700240}
241
Dave Barachc3799992016-08-15 11:12:27 -0400242always_inline void *
243clib_mem_realloc (void *p, uword new_size, uword old_size)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700244{
245 /* By default use alloc, copy and free to emulate realloc. */
Dave Barachc3799992016-08-15 11:12:27 -0400246 void *q = clib_mem_alloc (new_size);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700247 if (q)
248 {
249 uword copy_size;
250 if (old_size < new_size)
251 copy_size = old_size;
252 else
253 copy_size = new_size;
Dave Barach178cf492018-11-13 16:34:13 -0500254 clib_memcpy_fast (q, p, copy_size);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700255 clib_mem_free (p);
256 }
257 return q;
258}
259
Dave Barachc3799992016-08-15 11:12:27 -0400260always_inline uword
261clib_mem_size (void *p)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700262{
Dave Barach6a5adc32018-07-04 10:56:23 -0400263 ASSERT (clib_mem_is_heap_object (p));
Benoît Ganne9fb6d402019-04-15 15:28:21 +0200264 return clib_mem_size_nocheck (p);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700265}
266
Benoît Ganne78af0a82019-04-29 17:27:24 +0200267always_inline void
268clib_mem_free_s (void *p)
269{
270 uword size = clib_mem_size (p);
Benoît Ganne9fb6d402019-04-15 15:28:21 +0200271 CLIB_MEM_UNPOISON (p, size);
Benoît Ganne78af0a82019-04-29 17:27:24 +0200272 memset_s_inline (p, size, 0, size);
273 clib_mem_free (p);
274}
275
Dave Barachc3799992016-08-15 11:12:27 -0400276always_inline void *
277clib_mem_get_heap (void)
278{
279 return clib_mem_get_per_cpu_heap ();
280}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700281
Dave Barachc3799992016-08-15 11:12:27 -0400282always_inline void *
283clib_mem_set_heap (void *heap)
284{
285 return clib_mem_set_per_cpu_heap (heap);
286}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700287
Dave Barachc3799992016-08-15 11:12:27 -0400288void *clib_mem_init (void *heap, uword size);
Dave Barach6a5adc32018-07-04 10:56:23 -0400289void *clib_mem_init_thread_safe (void *memory, uword memory_size);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700290
291void clib_mem_exit (void);
292
293uword clib_mem_get_page_size (void);
294
295void clib_mem_validate (void);
296
297void clib_mem_trace (int enable);
298
Dave Barachd67a4282019-06-15 12:46:13 -0400299int clib_mem_is_traced (void);
300
Dave Barachc3799992016-08-15 11:12:27 -0400301typedef struct
302{
Ed Warnickecb9cada2015-12-08 15:45:58 -0700303 /* Total number of objects allocated. */
304 uword object_count;
305
306 /* Total allocated bytes. Bytes used and free.
307 used + free = total */
308 uword bytes_total, bytes_used, bytes_free;
309
310 /* Number of bytes used by mheap data structure overhead
311 (e.g. free lists, mheap header). */
312 uword bytes_overhead;
313
314 /* Amount of free space returned to operating system. */
315 uword bytes_free_reclaimed;
Dave Barachc3799992016-08-15 11:12:27 -0400316
Ed Warnickecb9cada2015-12-08 15:45:58 -0700317 /* For malloc which puts small objects in sbrk region and
318 large objects in mmap'ed regions. */
319 uword bytes_used_sbrk;
320 uword bytes_used_mmap;
321
322 /* Max. number of bytes in this heap. */
323 uword bytes_max;
324} clib_mem_usage_t;
325
326void clib_mem_usage (clib_mem_usage_t * usage);
327
Dave Barachc3799992016-08-15 11:12:27 -0400328u8 *format_clib_mem_usage (u8 * s, va_list * args);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700329
Damjan Marion01914ce2017-09-14 19:04:50 +0200330/* Allocate virtual address space. */
331always_inline void *
332clib_mem_vm_alloc (uword size)
333{
334 void *mmap_addr;
335 uword flags = MAP_PRIVATE;
336
337#ifdef MAP_ANONYMOUS
338 flags |= MAP_ANONYMOUS;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700339#endif
340
Damjan Marion01914ce2017-09-14 19:04:50 +0200341 mmap_addr = mmap (0, size, PROT_READ | PROT_WRITE, flags, -1, 0);
342 if (mmap_addr == (void *) -1)
343 mmap_addr = 0;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700344
Damjan Marion01914ce2017-09-14 19:04:50 +0200345 return mmap_addr;
346}
347
348always_inline void
349clib_mem_vm_free (void *addr, uword size)
350{
351 munmap (addr, size);
352}
353
354always_inline void *
355clib_mem_vm_unmap (void *addr, uword size)
356{
357 void *mmap_addr;
358 uword flags = MAP_PRIVATE | MAP_FIXED;
359
360 /* To unmap we "map" with no protection. If we actually called
361 munmap then other callers could steal the address space. By
362 changing to PROT_NONE the kernel can free up the pages which is
363 really what we want "unmap" to mean. */
364 mmap_addr = mmap (addr, size, PROT_NONE, flags, -1, 0);
365 if (mmap_addr == (void *) -1)
366 mmap_addr = 0;
367
368 return mmap_addr;
369}
370
371always_inline void *
372clib_mem_vm_map (void *addr, uword size)
373{
374 void *mmap_addr;
Dave Barache89be4e2018-08-29 08:50:40 -0400375 uword flags = MAP_PRIVATE | MAP_FIXED | MAP_ANONYMOUS;
Damjan Marion01914ce2017-09-14 19:04:50 +0200376
377 mmap_addr = mmap (addr, size, (PROT_READ | PROT_WRITE), flags, -1, 0);
378 if (mmap_addr == (void *) -1)
379 mmap_addr = 0;
380
381 return mmap_addr;
382}
383
384typedef struct
385{
386#define CLIB_MEM_VM_F_SHARED (1 << 0)
387#define CLIB_MEM_VM_F_HUGETLB (1 << 1)
388#define CLIB_MEM_VM_F_NUMA_PREFER (1 << 2)
389#define CLIB_MEM_VM_F_NUMA_FORCE (1 << 3)
390#define CLIB_MEM_VM_F_HUGETLB_PREALLOC (1 << 4)
Damjan Marion7b185362018-03-04 16:41:35 +0100391#define CLIB_MEM_VM_F_LOCKED (1 << 5)
Damjan Marion01914ce2017-09-14 19:04:50 +0200392 u32 flags; /**< vm allocation flags:
393 <br> CLIB_MEM_VM_F_SHARED: request shared memory, file
Florin Corasd3e83a92018-01-16 02:40:18 -0800394 descriptor will be provided on successful allocation.
Damjan Marion01914ce2017-09-14 19:04:50 +0200395 <br> CLIB_MEM_VM_F_HUGETLB: request hugepages.
396 <br> CLIB_MEM_VM_F_NUMA_PREFER: numa_node field contains valid
397 numa node preference.
398 <br> CLIB_MEM_VM_F_NUMA_FORCE: fail if setting numa policy fails.
399 <br> CLIB_MEM_VM_F_HUGETLB_PREALLOC: pre-allocate hugepages if
400 number of available pages is not sufficient.
Damjan Marion7b185362018-03-04 16:41:35 +0100401 <br> CLIB_MEM_VM_F_LOCKED: request locked memory.
Damjan Marion01914ce2017-09-14 19:04:50 +0200402 */
403 char *name; /**< Name for memory allocation, set by caller. */
404 uword size; /**< Allocation size, set by caller. */
405 int numa_node; /**< numa node preference. Valid if CLIB_MEM_VM_F_NUMA_PREFER set. */
406 void *addr; /**< Pointer to allocated memory, set on successful allocation. */
Florin Corasd3e83a92018-01-16 02:40:18 -0800407 int fd; /**< File descriptor, set on successful allocation if CLIB_MEM_VM_F_SHARED is set. */
Damjan Marion01914ce2017-09-14 19:04:50 +0200408 int log2_page_size; /* Page size in log2 format, set on successful allocation. */
409 int n_pages; /* Number of pages. */
Florin Corasd3e83a92018-01-16 02:40:18 -0800410 uword requested_va; /**< Request fixed position mapping */
Damjan Marion01914ce2017-09-14 19:04:50 +0200411} clib_mem_vm_alloc_t;
412
Damjan Marion567e61d2018-10-24 17:08:26 +0200413clib_error_t *clib_mem_create_fd (char *name, int *fdp);
Damjan Marion1636b162018-10-19 12:54:42 +0200414clib_error_t *clib_mem_create_hugetlb_fd (char *name, int *fdp);
Damjan Marion01914ce2017-09-14 19:04:50 +0200415clib_error_t *clib_mem_vm_ext_alloc (clib_mem_vm_alloc_t * a);
Haiyang Tan642829d2018-10-09 19:09:45 -0700416void clib_mem_vm_ext_free (clib_mem_vm_alloc_t * a);
Damjan Marion567e61d2018-10-24 17:08:26 +0200417u64 clib_mem_get_fd_page_size (int fd);
Damjan Marion9787f5f2018-10-24 12:56:32 +0200418uword clib_mem_get_default_hugepage_size (void);
Damjan Marion567e61d2018-10-24 17:08:26 +0200419int clib_mem_get_fd_log2_page_size (int fd);
Damjan Marion01914ce2017-09-14 19:04:50 +0200420u64 *clib_mem_vm_get_paddr (void *mem, int log2_page_size, int n_pages);
421
Florin Corasd3e83a92018-01-16 02:40:18 -0800422typedef struct
423{
424 uword size; /**< Map size */
425 int fd; /**< File descriptor to be mapped */
426 uword requested_va; /**< Request fixed position mapping */
427 void *addr; /**< Pointer to mapped memory, if successful */
428} clib_mem_vm_map_t;
Florin Corasd3e83a92018-01-16 02:40:18 -0800429
Florin Corasb384b542018-01-15 01:08:33 -0800430clib_error_t *clib_mem_vm_ext_map (clib_mem_vm_map_t * a);
431void clib_mem_vm_randomize_va (uword * requested_va, u32 log2_page_size);
Dave Barach6a5adc32018-07-04 10:56:23 -0400432void mheap_trace (void *v, int enable);
Dave Barach8fdde3c2019-05-17 10:46:40 -0400433uword clib_mem_trace_enable_disable (uword enable);
434void clib_mem_trace (int enable);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700435
Dave Barachc3799992016-08-15 11:12:27 -0400436#include <vppinfra/error.h> /* clib_panic */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700437
438#endif /* _included_clib_mem_h */
Dave Barachc3799992016-08-15 11:12:27 -0400439
440/*
441 * fd.io coding-style-patch-verification: ON
442 *
443 * Local Variables:
444 * eval: (c-set-style "gnu")
445 * End:
446 */