blob: e565757ed3eef4c6d210f3c8c5dbea7391470d29 [file] [log] [blame]
Ed Warnickecb9cada2015-12-08 15:45:58 -07001/*
2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15/*
16 Copyright (c) 2001, 2002, 2003 Eliot Dresselhaus
17
18 Permission is hereby granted, free of charge, to any person obtaining
19 a copy of this software and associated documentation files (the
20 "Software"), to deal in the Software without restriction, including
21 without limitation the rights to use, copy, modify, merge, publish,
22 distribute, sublicense, and/or sell copies of the Software, and to
23 permit persons to whom the Software is furnished to do so, subject to
24 the following conditions:
25
26 The above copyright notice and this permission notice shall be
27 included in all copies or substantial portions of the Software.
28
29 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
30 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
31 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
32 NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
33 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
34 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
35 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
36*/
37
38#ifndef _included_clib_mem_h
39#define _included_clib_mem_h
40
41#include <stdarg.h>
Damjan Marion01914ce2017-09-14 19:04:50 +020042#include <unistd.h>
43#include <sys/mman.h>
Ed Warnickecb9cada2015-12-08 15:45:58 -070044
Dave Barachc3799992016-08-15 11:12:27 -040045#include <vppinfra/clib.h> /* uword, etc */
Damjan Marion01914ce2017-09-14 19:04:50 +020046#include <vppinfra/clib_error.h>
Dave Barach6a5adc32018-07-04 10:56:23 -040047
48#if USE_DLMALLOC == 0
Ed Warnickecb9cada2015-12-08 15:45:58 -070049#include <vppinfra/mheap_bootstrap.h>
Dave Barach6a5adc32018-07-04 10:56:23 -040050#else
51#include <vppinfra/dlmalloc.h>
52#endif
53
Ed Warnickecb9cada2015-12-08 15:45:58 -070054#include <vppinfra/os.h>
Dave Barachb7b92992018-10-17 10:38:51 -040055#include <vppinfra/string.h> /* memcpy, clib_memset */
Ed Warnickecb9cada2015-12-08 15:45:58 -070056#include <vppinfra/valgrind.h>
57
Damjan Marionce8debf2016-02-06 19:16:21 +010058#define CLIB_MAX_MHEAPS 256
59
Ed Warnickecb9cada2015-12-08 15:45:58 -070060/* Per CPU heaps. */
Dave Barachc3799992016-08-15 11:12:27 -040061extern void *clib_per_cpu_mheaps[CLIB_MAX_MHEAPS];
Ed Warnickecb9cada2015-12-08 15:45:58 -070062
Nathan Skrzypczakd516ca42019-08-01 18:14:06 +020063always_inline void
64clib_mem_set_thread_index (void)
65{
66 /*
67 * Find an unused slot in the per-cpu-mheaps array,
68 * and grab it for this thread. We need to be able to
69 * push/pop the thread heap without affecting other thread(s).
70 */
71 int i;
72 if (__os_thread_index != 0)
73 return;
74 for (i = 0; i < ARRAY_LEN (clib_per_cpu_mheaps); i++)
75 if (clib_atomic_bool_cmp_and_swap (&clib_per_cpu_mheaps[i],
76 0, clib_per_cpu_mheaps[0]))
77 {
78 os_set_thread_index (i);
79 break;
80 }
81 ASSERT (__os_thread_index > 0);
82}
83
Dave Barachc3799992016-08-15 11:12:27 -040084always_inline void *
85clib_mem_get_per_cpu_heap (void)
Ed Warnickecb9cada2015-12-08 15:45:58 -070086{
Damjan Marionf55f9b82017-05-10 21:06:28 +020087 int cpu = os_get_thread_index ();
Ed Warnickecb9cada2015-12-08 15:45:58 -070088 return clib_per_cpu_mheaps[cpu];
89}
90
Dave Barachc3799992016-08-15 11:12:27 -040091always_inline void *
92clib_mem_set_per_cpu_heap (u8 * new_heap)
Ed Warnickecb9cada2015-12-08 15:45:58 -070093{
Damjan Marionf55f9b82017-05-10 21:06:28 +020094 int cpu = os_get_thread_index ();
Dave Barachc3799992016-08-15 11:12:27 -040095 void *old = clib_per_cpu_mheaps[cpu];
Ed Warnickecb9cada2015-12-08 15:45:58 -070096 clib_per_cpu_mheaps[cpu] = new_heap;
97 return old;
98}
99
Dave Barach241e5222016-10-13 10:53:26 -0400100/* Memory allocator which may call os_out_of_memory() if it fails */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700101always_inline void *
Dave Barach241e5222016-10-13 10:53:26 -0400102clib_mem_alloc_aligned_at_offset (uword size, uword align, uword align_offset,
103 int os_out_of_memory_on_failure)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700104{
Dave Barachc3799992016-08-15 11:12:27 -0400105 void *heap, *p;
Dave Barach6a5adc32018-07-04 10:56:23 -0400106 uword cpu;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700107
108 if (align_offset > align)
109 {
110 if (align > 0)
111 align_offset %= align;
112 else
113 align_offset = align;
114 }
115
Damjan Marionf55f9b82017-05-10 21:06:28 +0200116 cpu = os_get_thread_index ();
Ed Warnickecb9cada2015-12-08 15:45:58 -0700117 heap = clib_per_cpu_mheaps[cpu];
Dave Barach6a5adc32018-07-04 10:56:23 -0400118
119#if USE_DLMALLOC == 0
120 uword offset;
Dave Barachc3799992016-08-15 11:12:27 -0400121 heap = mheap_get_aligned (heap, size, align, align_offset, &offset);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700122 clib_per_cpu_mheaps[cpu] = heap;
123
124 if (offset != ~0)
125 {
126 p = heap + offset;
127#if CLIB_DEBUG > 0
128 VALGRIND_MALLOCLIKE_BLOCK (p, mheap_data_bytes (heap, offset), 0, 0);
129#endif
130 return p;
131 }
132 else
133 {
Dave Barach241e5222016-10-13 10:53:26 -0400134 if (os_out_of_memory_on_failure)
135 os_out_of_memory ();
Ed Warnickecb9cada2015-12-08 15:45:58 -0700136 return 0;
137 }
Dave Barach6a5adc32018-07-04 10:56:23 -0400138#else
139 p = mspace_get_aligned (heap, size, align, align_offset);
140 if (PREDICT_FALSE (p == 0))
141 {
142 if (os_out_of_memory_on_failure)
143 os_out_of_memory ();
144 return 0;
145 }
146
147 return p;
148#endif /* USE_DLMALLOC */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700149}
150
Dave Barach241e5222016-10-13 10:53:26 -0400151/* Memory allocator which calls os_out_of_memory() when it fails */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700152always_inline void *
153clib_mem_alloc (uword size)
Dave Barachc3799992016-08-15 11:12:27 -0400154{
155 return clib_mem_alloc_aligned_at_offset (size, /* align */ 1,
Dave Barach241e5222016-10-13 10:53:26 -0400156 /* align_offset */ 0,
157 /* os_out_of_memory */ 1);
Dave Barachc3799992016-08-15 11:12:27 -0400158}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700159
160always_inline void *
161clib_mem_alloc_aligned (uword size, uword align)
Dave Barachc3799992016-08-15 11:12:27 -0400162{
Dave Barach241e5222016-10-13 10:53:26 -0400163 return clib_mem_alloc_aligned_at_offset (size, align, /* align_offset */ 0,
164 /* os_out_of_memory */ 1);
Dave Barachc3799992016-08-15 11:12:27 -0400165}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700166
Dave Barach241e5222016-10-13 10:53:26 -0400167/* Memory allocator which calls os_out_of_memory() when it fails */
168always_inline void *
169clib_mem_alloc_or_null (uword size)
170{
171 return clib_mem_alloc_aligned_at_offset (size, /* align */ 1,
172 /* align_offset */ 0,
173 /* os_out_of_memory */ 0);
174}
175
176always_inline void *
177clib_mem_alloc_aligned_or_null (uword size, uword align)
178{
179 return clib_mem_alloc_aligned_at_offset (size, align, /* align_offset */ 0,
180 /* os_out_of_memory */ 0);
181}
182
183
184
Ed Warnickecb9cada2015-12-08 15:45:58 -0700185/* Memory allocator which panics when it fails.
186 Use macro so that clib_panic macro can expand __FUNCTION__ and __LINE__. */
187#define clib_mem_alloc_aligned_no_fail(size,align) \
188({ \
189 uword _clib_mem_alloc_size = (size); \
190 void * _clib_mem_alloc_p; \
191 _clib_mem_alloc_p = clib_mem_alloc_aligned (_clib_mem_alloc_size, (align)); \
192 if (! _clib_mem_alloc_p) \
193 clib_panic ("failed to allocate %d bytes", _clib_mem_alloc_size); \
194 _clib_mem_alloc_p; \
195})
196
197#define clib_mem_alloc_no_fail(size) clib_mem_alloc_aligned_no_fail(size,1)
198
199/* Alias to stack allocator for naming consistency. */
200#define clib_mem_alloc_stack(bytes) __builtin_alloca(bytes)
201
Dave Barachc3799992016-08-15 11:12:27 -0400202always_inline uword
203clib_mem_is_heap_object (void *p)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700204{
Dave Barach6a5adc32018-07-04 10:56:23 -0400205#if USE_DLMALLOC == 0
Dave Barachc3799992016-08-15 11:12:27 -0400206 void *heap = clib_mem_get_per_cpu_heap ();
207 uword offset = (uword) p - (uword) heap;
208 mheap_elt_t *e, *n;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700209
210 if (offset >= vec_len (heap))
211 return 0;
212
213 e = mheap_elt_at_uoffset (heap, offset);
214 n = mheap_next_elt (e);
Dave Barachc3799992016-08-15 11:12:27 -0400215
Ed Warnickecb9cada2015-12-08 15:45:58 -0700216 /* Check that heap forward and reverse pointers agree. */
217 return e->n_user_data == n->prev_n_user_data;
Dave Barach6a5adc32018-07-04 10:56:23 -0400218#else
219 void *heap = clib_mem_get_per_cpu_heap ();
220
221 return mspace_is_heap_object (heap, p);
222#endif /* USE_DLMALLOC */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700223}
224
Dave Barachc3799992016-08-15 11:12:27 -0400225always_inline void
226clib_mem_free (void *p)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700227{
Dave Barachc3799992016-08-15 11:12:27 -0400228 u8 *heap = clib_mem_get_per_cpu_heap ();
Ed Warnickecb9cada2015-12-08 15:45:58 -0700229
230 /* Make sure object is in the correct heap. */
231 ASSERT (clib_mem_is_heap_object (p));
232
Dave Barach6a5adc32018-07-04 10:56:23 -0400233#if USE_DLMALLOC == 0
Ed Warnickecb9cada2015-12-08 15:45:58 -0700234 mheap_put (heap, (u8 *) p - heap);
Dave Barach6a5adc32018-07-04 10:56:23 -0400235#else
236 mspace_put (heap, p);
237#endif
Ed Warnickecb9cada2015-12-08 15:45:58 -0700238
239#if CLIB_DEBUG > 0
240 VALGRIND_FREELIKE_BLOCK (p, 0);
241#endif
242}
243
Dave Barachc3799992016-08-15 11:12:27 -0400244always_inline void *
245clib_mem_realloc (void *p, uword new_size, uword old_size)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700246{
247 /* By default use alloc, copy and free to emulate realloc. */
Dave Barachc3799992016-08-15 11:12:27 -0400248 void *q = clib_mem_alloc (new_size);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700249 if (q)
250 {
251 uword copy_size;
252 if (old_size < new_size)
253 copy_size = old_size;
254 else
255 copy_size = new_size;
Dave Barach178cf492018-11-13 16:34:13 -0500256 clib_memcpy_fast (q, p, copy_size);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700257 clib_mem_free (p);
258 }
259 return q;
260}
261
Dave Barachc3799992016-08-15 11:12:27 -0400262always_inline uword
263clib_mem_size (void *p)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700264{
Dave Barach6a5adc32018-07-04 10:56:23 -0400265#if USE_DLMALLOC == 0
Dave Barachc3799992016-08-15 11:12:27 -0400266 mheap_elt_t *e = mheap_user_pointer_to_elt (p);
Dave Barach6a5adc32018-07-04 10:56:23 -0400267 ASSERT (clib_mem_is_heap_object (p));
Ed Warnickecb9cada2015-12-08 15:45:58 -0700268 return mheap_elt_data_bytes (e);
Dave Barach6a5adc32018-07-04 10:56:23 -0400269#else
270 ASSERT (clib_mem_is_heap_object (p));
271 return mspace_usable_size_with_delta (p);
272#endif
Ed Warnickecb9cada2015-12-08 15:45:58 -0700273}
274
Benoît Ganne78af0a82019-04-29 17:27:24 +0200275always_inline void
276clib_mem_free_s (void *p)
277{
278 uword size = clib_mem_size (p);
279 memset_s_inline (p, size, 0, size);
280 clib_mem_free (p);
281}
282
Dave Barachc3799992016-08-15 11:12:27 -0400283always_inline void *
284clib_mem_get_heap (void)
285{
286 return clib_mem_get_per_cpu_heap ();
287}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700288
Dave Barachc3799992016-08-15 11:12:27 -0400289always_inline void *
290clib_mem_set_heap (void *heap)
291{
292 return clib_mem_set_per_cpu_heap (heap);
293}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700294
Dave Barachc3799992016-08-15 11:12:27 -0400295void *clib_mem_init (void *heap, uword size);
Dave Barach6a5adc32018-07-04 10:56:23 -0400296void *clib_mem_init_thread_safe (void *memory, uword memory_size);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700297
298void clib_mem_exit (void);
299
300uword clib_mem_get_page_size (void);
301
302void clib_mem_validate (void);
303
304void clib_mem_trace (int enable);
305
Dave Barachd67a4282019-06-15 12:46:13 -0400306int clib_mem_is_traced (void);
307
Dave Barachc3799992016-08-15 11:12:27 -0400308typedef struct
309{
Ed Warnickecb9cada2015-12-08 15:45:58 -0700310 /* Total number of objects allocated. */
311 uword object_count;
312
313 /* Total allocated bytes. Bytes used and free.
314 used + free = total */
315 uword bytes_total, bytes_used, bytes_free;
316
317 /* Number of bytes used by mheap data structure overhead
318 (e.g. free lists, mheap header). */
319 uword bytes_overhead;
320
321 /* Amount of free space returned to operating system. */
322 uword bytes_free_reclaimed;
Dave Barachc3799992016-08-15 11:12:27 -0400323
Ed Warnickecb9cada2015-12-08 15:45:58 -0700324 /* For malloc which puts small objects in sbrk region and
325 large objects in mmap'ed regions. */
326 uword bytes_used_sbrk;
327 uword bytes_used_mmap;
328
329 /* Max. number of bytes in this heap. */
330 uword bytes_max;
331} clib_mem_usage_t;
332
333void clib_mem_usage (clib_mem_usage_t * usage);
334
Dave Barachc3799992016-08-15 11:12:27 -0400335u8 *format_clib_mem_usage (u8 * s, va_list * args);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700336
Damjan Marion01914ce2017-09-14 19:04:50 +0200337/* Allocate virtual address space. */
338always_inline void *
339clib_mem_vm_alloc (uword size)
340{
341 void *mmap_addr;
342 uword flags = MAP_PRIVATE;
343
344#ifdef MAP_ANONYMOUS
345 flags |= MAP_ANONYMOUS;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700346#endif
347
Damjan Marion01914ce2017-09-14 19:04:50 +0200348 mmap_addr = mmap (0, size, PROT_READ | PROT_WRITE, flags, -1, 0);
349 if (mmap_addr == (void *) -1)
350 mmap_addr = 0;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700351
Damjan Marion01914ce2017-09-14 19:04:50 +0200352 return mmap_addr;
353}
354
355always_inline void
356clib_mem_vm_free (void *addr, uword size)
357{
358 munmap (addr, size);
359}
360
361always_inline void *
362clib_mem_vm_unmap (void *addr, uword size)
363{
364 void *mmap_addr;
365 uword flags = MAP_PRIVATE | MAP_FIXED;
366
367 /* To unmap we "map" with no protection. If we actually called
368 munmap then other callers could steal the address space. By
369 changing to PROT_NONE the kernel can free up the pages which is
370 really what we want "unmap" to mean. */
371 mmap_addr = mmap (addr, size, PROT_NONE, flags, -1, 0);
372 if (mmap_addr == (void *) -1)
373 mmap_addr = 0;
374
375 return mmap_addr;
376}
377
378always_inline void *
379clib_mem_vm_map (void *addr, uword size)
380{
381 void *mmap_addr;
Dave Barache89be4e2018-08-29 08:50:40 -0400382 uword flags = MAP_PRIVATE | MAP_FIXED | MAP_ANONYMOUS;
Damjan Marion01914ce2017-09-14 19:04:50 +0200383
384 mmap_addr = mmap (addr, size, (PROT_READ | PROT_WRITE), flags, -1, 0);
385 if (mmap_addr == (void *) -1)
386 mmap_addr = 0;
387
388 return mmap_addr;
389}
390
391typedef struct
392{
393#define CLIB_MEM_VM_F_SHARED (1 << 0)
394#define CLIB_MEM_VM_F_HUGETLB (1 << 1)
395#define CLIB_MEM_VM_F_NUMA_PREFER (1 << 2)
396#define CLIB_MEM_VM_F_NUMA_FORCE (1 << 3)
397#define CLIB_MEM_VM_F_HUGETLB_PREALLOC (1 << 4)
Damjan Marion7b185362018-03-04 16:41:35 +0100398#define CLIB_MEM_VM_F_LOCKED (1 << 5)
Damjan Marion01914ce2017-09-14 19:04:50 +0200399 u32 flags; /**< vm allocation flags:
400 <br> CLIB_MEM_VM_F_SHARED: request shared memory, file
Florin Corasd3e83a92018-01-16 02:40:18 -0800401 descriptor will be provided on successful allocation.
Damjan Marion01914ce2017-09-14 19:04:50 +0200402 <br> CLIB_MEM_VM_F_HUGETLB: request hugepages.
403 <br> CLIB_MEM_VM_F_NUMA_PREFER: numa_node field contains valid
404 numa node preference.
405 <br> CLIB_MEM_VM_F_NUMA_FORCE: fail if setting numa policy fails.
406 <br> CLIB_MEM_VM_F_HUGETLB_PREALLOC: pre-allocate hugepages if
407 number of available pages is not sufficient.
Damjan Marion7b185362018-03-04 16:41:35 +0100408 <br> CLIB_MEM_VM_F_LOCKED: request locked memory.
Damjan Marion01914ce2017-09-14 19:04:50 +0200409 */
410 char *name; /**< Name for memory allocation, set by caller. */
411 uword size; /**< Allocation size, set by caller. */
412 int numa_node; /**< numa node preference. Valid if CLIB_MEM_VM_F_NUMA_PREFER set. */
413 void *addr; /**< Pointer to allocated memory, set on successful allocation. */
Florin Corasd3e83a92018-01-16 02:40:18 -0800414 int fd; /**< File descriptor, set on successful allocation if CLIB_MEM_VM_F_SHARED is set. */
Damjan Marion01914ce2017-09-14 19:04:50 +0200415 int log2_page_size; /* Page size in log2 format, set on successful allocation. */
416 int n_pages; /* Number of pages. */
Florin Corasd3e83a92018-01-16 02:40:18 -0800417 uword requested_va; /**< Request fixed position mapping */
Damjan Marion01914ce2017-09-14 19:04:50 +0200418} clib_mem_vm_alloc_t;
419
Damjan Marion567e61d2018-10-24 17:08:26 +0200420clib_error_t *clib_mem_create_fd (char *name, int *fdp);
Damjan Marion1636b162018-10-19 12:54:42 +0200421clib_error_t *clib_mem_create_hugetlb_fd (char *name, int *fdp);
Damjan Marion01914ce2017-09-14 19:04:50 +0200422clib_error_t *clib_mem_vm_ext_alloc (clib_mem_vm_alloc_t * a);
Haiyang Tan642829d2018-10-09 19:09:45 -0700423void clib_mem_vm_ext_free (clib_mem_vm_alloc_t * a);
Damjan Marion567e61d2018-10-24 17:08:26 +0200424u64 clib_mem_get_fd_page_size (int fd);
Damjan Marion9787f5f2018-10-24 12:56:32 +0200425uword clib_mem_get_default_hugepage_size (void);
Damjan Marion567e61d2018-10-24 17:08:26 +0200426int clib_mem_get_fd_log2_page_size (int fd);
Damjan Marion01914ce2017-09-14 19:04:50 +0200427u64 *clib_mem_vm_get_paddr (void *mem, int log2_page_size, int n_pages);
428
Florin Corasd3e83a92018-01-16 02:40:18 -0800429typedef struct
430{
431 uword size; /**< Map size */
432 int fd; /**< File descriptor to be mapped */
433 uword requested_va; /**< Request fixed position mapping */
434 void *addr; /**< Pointer to mapped memory, if successful */
435} clib_mem_vm_map_t;
Florin Corasd3e83a92018-01-16 02:40:18 -0800436
Florin Corasb384b542018-01-15 01:08:33 -0800437clib_error_t *clib_mem_vm_ext_map (clib_mem_vm_map_t * a);
438void clib_mem_vm_randomize_va (uword * requested_va, u32 log2_page_size);
Dave Barach6a5adc32018-07-04 10:56:23 -0400439void mheap_trace (void *v, int enable);
Dave Barach8fdde3c2019-05-17 10:46:40 -0400440uword clib_mem_trace_enable_disable (uword enable);
441void clib_mem_trace (int enable);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700442
Dave Barachc3799992016-08-15 11:12:27 -0400443#include <vppinfra/error.h> /* clib_panic */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700444
445#endif /* _included_clib_mem_h */
Dave Barachc3799992016-08-15 11:12:27 -0400446
447/*
448 * fd.io coding-style-patch-verification: ON
449 *
450 * Local Variables:
451 * eval: (c-set-style "gnu")
452 * End:
453 */