blob: c97ee8af1579a96ace19e2001611e92e5227e5f8 [file] [log] [blame]
Ed Warnickecb9cada2015-12-08 15:45:58 -07001/*
2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15/*
16 Copyright (c) 2001, 2002, 2003 Eliot Dresselhaus
17
18 Permission is hereby granted, free of charge, to any person obtaining
19 a copy of this software and associated documentation files (the
20 "Software"), to deal in the Software without restriction, including
21 without limitation the rights to use, copy, modify, merge, publish,
22 distribute, sublicense, and/or sell copies of the Software, and to
23 permit persons to whom the Software is furnished to do so, subject to
24 the following conditions:
25
26 The above copyright notice and this permission notice shall be
27 included in all copies or substantial portions of the Software.
28
29 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
30 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
31 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
32 NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
33 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
34 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
35 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
36*/
37
38#ifndef _included_clib_mem_h
39#define _included_clib_mem_h
40
41#include <stdarg.h>
Damjan Marion01914ce2017-09-14 19:04:50 +020042#include <unistd.h>
43#include <sys/mman.h>
Ed Warnickecb9cada2015-12-08 15:45:58 -070044
Dave Barachc3799992016-08-15 11:12:27 -040045#include <vppinfra/clib.h> /* uword, etc */
Damjan Marion01914ce2017-09-14 19:04:50 +020046#include <vppinfra/clib_error.h>
Dave Barach6a5adc32018-07-04 10:56:23 -040047
Dave Barach6a5adc32018-07-04 10:56:23 -040048#include <vppinfra/dlmalloc.h>
Dave Barach6a5adc32018-07-04 10:56:23 -040049
Ed Warnickecb9cada2015-12-08 15:45:58 -070050#include <vppinfra/os.h>
Dave Barachb7b92992018-10-17 10:38:51 -040051#include <vppinfra/string.h> /* memcpy, clib_memset */
Benoît Ganne9fb6d402019-04-15 15:28:21 +020052#include <vppinfra/sanitizer.h>
Ed Warnickecb9cada2015-12-08 15:45:58 -070053
Damjan Marionce8debf2016-02-06 19:16:21 +010054#define CLIB_MAX_MHEAPS 256
Dave Baracha690fdb2020-01-21 12:34:55 -050055#define CLIB_MAX_NUMAS 8
56
57/* Unspecified NUMA socket */
58#define VEC_NUMA_UNSPECIFIED (0xFF)
Damjan Marionce8debf2016-02-06 19:16:21 +010059
Ed Warnickecb9cada2015-12-08 15:45:58 -070060/* Per CPU heaps. */
Dave Barachc3799992016-08-15 11:12:27 -040061extern void *clib_per_cpu_mheaps[CLIB_MAX_MHEAPS];
Dave Baracha690fdb2020-01-21 12:34:55 -050062extern void *clib_per_numa_mheaps[CLIB_MAX_NUMAS];
63
64always_inline void *
65clib_mem_get_per_cpu_heap (void)
66{
67 int cpu = os_get_thread_index ();
68 return clib_per_cpu_mheaps[cpu];
69}
70
71always_inline void *
72clib_mem_set_per_cpu_heap (u8 * new_heap)
73{
74 int cpu = os_get_thread_index ();
75 void *old = clib_per_cpu_mheaps[cpu];
76 clib_per_cpu_mheaps[cpu] = new_heap;
77 return old;
78}
79
80always_inline void *
81clib_mem_get_per_numa_heap (u32 numa_id)
82{
Dave Baracha6ef36b2020-02-11 10:29:13 -050083 ASSERT (numa_id < ARRAY_LEN (clib_per_numa_mheaps));
Dave Baracha690fdb2020-01-21 12:34:55 -050084 return clib_per_numa_mheaps[numa_id];
85}
86
87always_inline void *
88clib_mem_set_per_numa_heap (u8 * new_heap)
89{
90 int numa = os_get_numa_index ();
91 void *old = clib_per_numa_mheaps[numa];
92 clib_per_numa_mheaps[numa] = new_heap;
93 return old;
94}
Ed Warnickecb9cada2015-12-08 15:45:58 -070095
Nathan Skrzypczakd516ca42019-08-01 18:14:06 +020096always_inline void
97clib_mem_set_thread_index (void)
98{
99 /*
100 * Find an unused slot in the per-cpu-mheaps array,
101 * and grab it for this thread. We need to be able to
102 * push/pop the thread heap without affecting other thread(s).
103 */
104 int i;
105 if (__os_thread_index != 0)
106 return;
107 for (i = 0; i < ARRAY_LEN (clib_per_cpu_mheaps); i++)
108 if (clib_atomic_bool_cmp_and_swap (&clib_per_cpu_mheaps[i],
109 0, clib_per_cpu_mheaps[0]))
110 {
111 os_set_thread_index (i);
112 break;
113 }
114 ASSERT (__os_thread_index > 0);
115}
116
Benoît Ganne9fb6d402019-04-15 15:28:21 +0200117always_inline uword
118clib_mem_size_nocheck (void *p)
119{
Benoît Ganne9fb6d402019-04-15 15:28:21 +0200120 return mspace_usable_size_with_delta (p);
Benoît Ganne9fb6d402019-04-15 15:28:21 +0200121}
122
Dave Barach241e5222016-10-13 10:53:26 -0400123/* Memory allocator which may call os_out_of_memory() if it fails */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700124always_inline void *
Dave Barach241e5222016-10-13 10:53:26 -0400125clib_mem_alloc_aligned_at_offset (uword size, uword align, uword align_offset,
126 int os_out_of_memory_on_failure)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700127{
Dave Barachc3799992016-08-15 11:12:27 -0400128 void *heap, *p;
Dave Barach6a5adc32018-07-04 10:56:23 -0400129 uword cpu;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700130
131 if (align_offset > align)
132 {
133 if (align > 0)
134 align_offset %= align;
135 else
136 align_offset = align;
137 }
138
Damjan Marionf55f9b82017-05-10 21:06:28 +0200139 cpu = os_get_thread_index ();
Ed Warnickecb9cada2015-12-08 15:45:58 -0700140 heap = clib_per_cpu_mheaps[cpu];
Dave Barach6a5adc32018-07-04 10:56:23 -0400141
Dave Barach6a5adc32018-07-04 10:56:23 -0400142 p = mspace_get_aligned (heap, size, align, align_offset);
Benoît Ganne9fb6d402019-04-15 15:28:21 +0200143
144 if (PREDICT_FALSE (0 == p))
Dave Barach6a5adc32018-07-04 10:56:23 -0400145 {
146 if (os_out_of_memory_on_failure)
147 os_out_of_memory ();
148 return 0;
149 }
150
Benoît Ganne9fb6d402019-04-15 15:28:21 +0200151 CLIB_MEM_UNPOISON (p, size);
Dave Barach6a5adc32018-07-04 10:56:23 -0400152 return p;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700153}
154
Dave Barach241e5222016-10-13 10:53:26 -0400155/* Memory allocator which calls os_out_of_memory() when it fails */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700156always_inline void *
157clib_mem_alloc (uword size)
Dave Barachc3799992016-08-15 11:12:27 -0400158{
159 return clib_mem_alloc_aligned_at_offset (size, /* align */ 1,
Dave Barach241e5222016-10-13 10:53:26 -0400160 /* align_offset */ 0,
161 /* os_out_of_memory */ 1);
Dave Barachc3799992016-08-15 11:12:27 -0400162}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700163
164always_inline void *
165clib_mem_alloc_aligned (uword size, uword align)
Dave Barachc3799992016-08-15 11:12:27 -0400166{
Dave Barach241e5222016-10-13 10:53:26 -0400167 return clib_mem_alloc_aligned_at_offset (size, align, /* align_offset */ 0,
168 /* os_out_of_memory */ 1);
Dave Barachc3799992016-08-15 11:12:27 -0400169}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700170
Dave Barach241e5222016-10-13 10:53:26 -0400171/* Memory allocator which calls os_out_of_memory() when it fails */
172always_inline void *
173clib_mem_alloc_or_null (uword size)
174{
175 return clib_mem_alloc_aligned_at_offset (size, /* align */ 1,
176 /* align_offset */ 0,
177 /* os_out_of_memory */ 0);
178}
179
180always_inline void *
181clib_mem_alloc_aligned_or_null (uword size, uword align)
182{
183 return clib_mem_alloc_aligned_at_offset (size, align, /* align_offset */ 0,
184 /* os_out_of_memory */ 0);
185}
186
187
188
Ed Warnickecb9cada2015-12-08 15:45:58 -0700189/* Memory allocator which panics when it fails.
190 Use macro so that clib_panic macro can expand __FUNCTION__ and __LINE__. */
191#define clib_mem_alloc_aligned_no_fail(size,align) \
192({ \
193 uword _clib_mem_alloc_size = (size); \
194 void * _clib_mem_alloc_p; \
195 _clib_mem_alloc_p = clib_mem_alloc_aligned (_clib_mem_alloc_size, (align)); \
196 if (! _clib_mem_alloc_p) \
197 clib_panic ("failed to allocate %d bytes", _clib_mem_alloc_size); \
198 _clib_mem_alloc_p; \
199})
200
201#define clib_mem_alloc_no_fail(size) clib_mem_alloc_aligned_no_fail(size,1)
202
203/* Alias to stack allocator for naming consistency. */
204#define clib_mem_alloc_stack(bytes) __builtin_alloca(bytes)
205
Dave Barachc3799992016-08-15 11:12:27 -0400206always_inline uword
207clib_mem_is_heap_object (void *p)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700208{
Dave Barach6a5adc32018-07-04 10:56:23 -0400209 void *heap = clib_mem_get_per_cpu_heap ();
210
211 return mspace_is_heap_object (heap, p);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700212}
213
Dave Barachc3799992016-08-15 11:12:27 -0400214always_inline void
215clib_mem_free (void *p)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700216{
Dave Barachc3799992016-08-15 11:12:27 -0400217 u8 *heap = clib_mem_get_per_cpu_heap ();
Ed Warnickecb9cada2015-12-08 15:45:58 -0700218
219 /* Make sure object is in the correct heap. */
220 ASSERT (clib_mem_is_heap_object (p));
221
Benoît Ganne9fb6d402019-04-15 15:28:21 +0200222 CLIB_MEM_POISON (p, clib_mem_size_nocheck (p));
223
Dave Barach6a5adc32018-07-04 10:56:23 -0400224 mspace_put (heap, p);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700225}
226
Dave Barachc3799992016-08-15 11:12:27 -0400227always_inline void *
228clib_mem_realloc (void *p, uword new_size, uword old_size)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700229{
230 /* By default use alloc, copy and free to emulate realloc. */
Dave Barachc3799992016-08-15 11:12:27 -0400231 void *q = clib_mem_alloc (new_size);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700232 if (q)
233 {
234 uword copy_size;
235 if (old_size < new_size)
236 copy_size = old_size;
237 else
238 copy_size = new_size;
Dave Barach178cf492018-11-13 16:34:13 -0500239 clib_memcpy_fast (q, p, copy_size);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700240 clib_mem_free (p);
241 }
242 return q;
243}
244
Dave Barachc3799992016-08-15 11:12:27 -0400245always_inline uword
246clib_mem_size (void *p)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700247{
Dave Barach6a5adc32018-07-04 10:56:23 -0400248 ASSERT (clib_mem_is_heap_object (p));
Benoît Ganne9fb6d402019-04-15 15:28:21 +0200249 return clib_mem_size_nocheck (p);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700250}
251
Benoît Ganne78af0a82019-04-29 17:27:24 +0200252always_inline void
253clib_mem_free_s (void *p)
254{
255 uword size = clib_mem_size (p);
Benoît Ganne9fb6d402019-04-15 15:28:21 +0200256 CLIB_MEM_UNPOISON (p, size);
Benoît Ganne78af0a82019-04-29 17:27:24 +0200257 memset_s_inline (p, size, 0, size);
258 clib_mem_free (p);
259}
260
Dave Barachc3799992016-08-15 11:12:27 -0400261always_inline void *
262clib_mem_get_heap (void)
263{
264 return clib_mem_get_per_cpu_heap ();
265}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700266
Dave Barachc3799992016-08-15 11:12:27 -0400267always_inline void *
268clib_mem_set_heap (void *heap)
269{
270 return clib_mem_set_per_cpu_heap (heap);
271}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700272
Dave Barachc3799992016-08-15 11:12:27 -0400273void *clib_mem_init (void *heap, uword size);
Dave Barach6a5adc32018-07-04 10:56:23 -0400274void *clib_mem_init_thread_safe (void *memory, uword memory_size);
Florin Coras4c959952020-02-09 18:09:31 +0000275void *clib_mem_init_thread_safe_numa (void *memory, uword memory_size,
276 u8 numa);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700277
278void clib_mem_exit (void);
279
280uword clib_mem_get_page_size (void);
281
282void clib_mem_validate (void);
283
284void clib_mem_trace (int enable);
285
Dave Barachd67a4282019-06-15 12:46:13 -0400286int clib_mem_is_traced (void);
287
Dave Barachc3799992016-08-15 11:12:27 -0400288typedef struct
289{
Ed Warnickecb9cada2015-12-08 15:45:58 -0700290 /* Total number of objects allocated. */
291 uword object_count;
292
293 /* Total allocated bytes. Bytes used and free.
294 used + free = total */
295 uword bytes_total, bytes_used, bytes_free;
296
297 /* Number of bytes used by mheap data structure overhead
298 (e.g. free lists, mheap header). */
299 uword bytes_overhead;
300
301 /* Amount of free space returned to operating system. */
302 uword bytes_free_reclaimed;
Dave Barachc3799992016-08-15 11:12:27 -0400303
Ed Warnickecb9cada2015-12-08 15:45:58 -0700304 /* For malloc which puts small objects in sbrk region and
305 large objects in mmap'ed regions. */
306 uword bytes_used_sbrk;
307 uword bytes_used_mmap;
308
309 /* Max. number of bytes in this heap. */
310 uword bytes_max;
311} clib_mem_usage_t;
312
313void clib_mem_usage (clib_mem_usage_t * usage);
314
Dave Barachc3799992016-08-15 11:12:27 -0400315u8 *format_clib_mem_usage (u8 * s, va_list * args);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700316
Damjan Marion01914ce2017-09-14 19:04:50 +0200317/* Allocate virtual address space. */
318always_inline void *
319clib_mem_vm_alloc (uword size)
320{
321 void *mmap_addr;
322 uword flags = MAP_PRIVATE;
323
324#ifdef MAP_ANONYMOUS
325 flags |= MAP_ANONYMOUS;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700326#endif
327
Damjan Marion01914ce2017-09-14 19:04:50 +0200328 mmap_addr = mmap (0, size, PROT_READ | PROT_WRITE, flags, -1, 0);
329 if (mmap_addr == (void *) -1)
330 mmap_addr = 0;
Benoît Ganne1557d9a2020-02-07 11:58:16 +0100331 else
332 CLIB_MEM_UNPOISON (mmap_addr, size);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700333
Damjan Marion01914ce2017-09-14 19:04:50 +0200334 return mmap_addr;
335}
336
337always_inline void
338clib_mem_vm_free (void *addr, uword size)
339{
340 munmap (addr, size);
341}
342
343always_inline void *
344clib_mem_vm_unmap (void *addr, uword size)
345{
346 void *mmap_addr;
347 uword flags = MAP_PRIVATE | MAP_FIXED;
348
349 /* To unmap we "map" with no protection. If we actually called
350 munmap then other callers could steal the address space. By
351 changing to PROT_NONE the kernel can free up the pages which is
352 really what we want "unmap" to mean. */
353 mmap_addr = mmap (addr, size, PROT_NONE, flags, -1, 0);
354 if (mmap_addr == (void *) -1)
355 mmap_addr = 0;
Benoît Ganne1557d9a2020-02-07 11:58:16 +0100356 else
357 CLIB_MEM_UNPOISON (mmap_addr, size);
Damjan Marion01914ce2017-09-14 19:04:50 +0200358
359 return mmap_addr;
360}
361
362always_inline void *
363clib_mem_vm_map (void *addr, uword size)
364{
365 void *mmap_addr;
Dave Barache89be4e2018-08-29 08:50:40 -0400366 uword flags = MAP_PRIVATE | MAP_FIXED | MAP_ANONYMOUS;
Damjan Marion01914ce2017-09-14 19:04:50 +0200367
368 mmap_addr = mmap (addr, size, (PROT_READ | PROT_WRITE), flags, -1, 0);
369 if (mmap_addr == (void *) -1)
370 mmap_addr = 0;
Benoît Ganne1557d9a2020-02-07 11:58:16 +0100371 else
372 CLIB_MEM_UNPOISON (mmap_addr, size);
Damjan Marion01914ce2017-09-14 19:04:50 +0200373
374 return mmap_addr;
375}
376
377typedef struct
378{
379#define CLIB_MEM_VM_F_SHARED (1 << 0)
380#define CLIB_MEM_VM_F_HUGETLB (1 << 1)
381#define CLIB_MEM_VM_F_NUMA_PREFER (1 << 2)
382#define CLIB_MEM_VM_F_NUMA_FORCE (1 << 3)
383#define CLIB_MEM_VM_F_HUGETLB_PREALLOC (1 << 4)
Damjan Marion7b185362018-03-04 16:41:35 +0100384#define CLIB_MEM_VM_F_LOCKED (1 << 5)
Damjan Marion01914ce2017-09-14 19:04:50 +0200385 u32 flags; /**< vm allocation flags:
386 <br> CLIB_MEM_VM_F_SHARED: request shared memory, file
Florin Corasd3e83a92018-01-16 02:40:18 -0800387 descriptor will be provided on successful allocation.
Damjan Marion01914ce2017-09-14 19:04:50 +0200388 <br> CLIB_MEM_VM_F_HUGETLB: request hugepages.
389 <br> CLIB_MEM_VM_F_NUMA_PREFER: numa_node field contains valid
390 numa node preference.
391 <br> CLIB_MEM_VM_F_NUMA_FORCE: fail if setting numa policy fails.
392 <br> CLIB_MEM_VM_F_HUGETLB_PREALLOC: pre-allocate hugepages if
393 number of available pages is not sufficient.
Damjan Marion7b185362018-03-04 16:41:35 +0100394 <br> CLIB_MEM_VM_F_LOCKED: request locked memory.
Damjan Marion01914ce2017-09-14 19:04:50 +0200395 */
396 char *name; /**< Name for memory allocation, set by caller. */
397 uword size; /**< Allocation size, set by caller. */
398 int numa_node; /**< numa node preference. Valid if CLIB_MEM_VM_F_NUMA_PREFER set. */
399 void *addr; /**< Pointer to allocated memory, set on successful allocation. */
Florin Corasd3e83a92018-01-16 02:40:18 -0800400 int fd; /**< File descriptor, set on successful allocation if CLIB_MEM_VM_F_SHARED is set. */
Damjan Marion01914ce2017-09-14 19:04:50 +0200401 int log2_page_size; /* Page size in log2 format, set on successful allocation. */
402 int n_pages; /* Number of pages. */
Florin Corasd3e83a92018-01-16 02:40:18 -0800403 uword requested_va; /**< Request fixed position mapping */
Damjan Marion01914ce2017-09-14 19:04:50 +0200404} clib_mem_vm_alloc_t;
405
Damjan Marion567e61d2018-10-24 17:08:26 +0200406clib_error_t *clib_mem_create_fd (char *name, int *fdp);
Damjan Marion1636b162018-10-19 12:54:42 +0200407clib_error_t *clib_mem_create_hugetlb_fd (char *name, int *fdp);
Damjan Marion01914ce2017-09-14 19:04:50 +0200408clib_error_t *clib_mem_vm_ext_alloc (clib_mem_vm_alloc_t * a);
Haiyang Tan642829d2018-10-09 19:09:45 -0700409void clib_mem_vm_ext_free (clib_mem_vm_alloc_t * a);
Damjan Marion567e61d2018-10-24 17:08:26 +0200410u64 clib_mem_get_fd_page_size (int fd);
Damjan Marion9787f5f2018-10-24 12:56:32 +0200411uword clib_mem_get_default_hugepage_size (void);
Damjan Marion567e61d2018-10-24 17:08:26 +0200412int clib_mem_get_fd_log2_page_size (int fd);
Dave Barach16e4a4a2020-04-16 12:00:14 -0400413uword clib_mem_vm_reserve (uword start, uword size, u32 log2_page_sz);
Damjan Marion01914ce2017-09-14 19:04:50 +0200414u64 *clib_mem_vm_get_paddr (void *mem, int log2_page_size, int n_pages);
Dave Barach2b793412020-08-28 10:39:00 -0400415void clib_mem_destroy_mspace (void *mspace);
416void clib_mem_destroy (void);
Damjan Marion01914ce2017-09-14 19:04:50 +0200417
Florin Corasd3e83a92018-01-16 02:40:18 -0800418typedef struct
419{
420 uword size; /**< Map size */
421 int fd; /**< File descriptor to be mapped */
422 uword requested_va; /**< Request fixed position mapping */
423 void *addr; /**< Pointer to mapped memory, if successful */
Florin Coras6fe89982020-02-07 23:28:41 +0000424 u8 numa_node;
Florin Corasd3e83a92018-01-16 02:40:18 -0800425} clib_mem_vm_map_t;
Florin Corasd3e83a92018-01-16 02:40:18 -0800426
Florin Corasb384b542018-01-15 01:08:33 -0800427clib_error_t *clib_mem_vm_ext_map (clib_mem_vm_map_t * a);
428void clib_mem_vm_randomize_va (uword * requested_va, u32 log2_page_size);
Dave Barach6a5adc32018-07-04 10:56:23 -0400429void mheap_trace (void *v, int enable);
Dave Barach8fdde3c2019-05-17 10:46:40 -0400430uword clib_mem_trace_enable_disable (uword enable);
431void clib_mem_trace (int enable);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700432
Dave Barachc3799992016-08-15 11:12:27 -0400433#include <vppinfra/error.h> /* clib_panic */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700434
435#endif /* _included_clib_mem_h */
Dave Barachc3799992016-08-15 11:12:27 -0400436
437/*
438 * fd.io coding-style-patch-verification: ON
439 *
440 * Local Variables:
441 * eval: (c-set-style "gnu")
442 * End:
443 */