blob: 4fedd1077544d75f9c3926cfd6673889c7fecac9 [file] [log] [blame]
Ed Warnickecb9cada2015-12-08 15:45:58 -07001/*
2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15/*
16 Copyright (c) 2001, 2002, 2003 Eliot Dresselhaus
17
18 Permission is hereby granted, free of charge, to any person obtaining
19 a copy of this software and associated documentation files (the
20 "Software"), to deal in the Software without restriction, including
21 without limitation the rights to use, copy, modify, merge, publish,
22 distribute, sublicense, and/or sell copies of the Software, and to
23 permit persons to whom the Software is furnished to do so, subject to
24 the following conditions:
25
26 The above copyright notice and this permission notice shall be
27 included in all copies or substantial portions of the Software.
28
29 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
30 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
31 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
32 NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
33 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
34 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
35 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
36*/
37
38#ifndef _included_clib_mem_h
39#define _included_clib_mem_h
40
41#include <stdarg.h>
Damjan Marion01914ce2017-09-14 19:04:50 +020042#include <unistd.h>
43#include <sys/mman.h>
Ed Warnickecb9cada2015-12-08 15:45:58 -070044
Dave Barachc3799992016-08-15 11:12:27 -040045#include <vppinfra/clib.h> /* uword, etc */
Damjan Marion01914ce2017-09-14 19:04:50 +020046#include <vppinfra/clib_error.h>
Dave Barach6a5adc32018-07-04 10:56:23 -040047
Dave Barach6a5adc32018-07-04 10:56:23 -040048#include <vppinfra/dlmalloc.h>
Dave Barach6a5adc32018-07-04 10:56:23 -040049
Ed Warnickecb9cada2015-12-08 15:45:58 -070050#include <vppinfra/os.h>
Dave Barachb7b92992018-10-17 10:38:51 -040051#include <vppinfra/string.h> /* memcpy, clib_memset */
Benoît Ganne9fb6d402019-04-15 15:28:21 +020052#include <vppinfra/sanitizer.h>
Ed Warnickecb9cada2015-12-08 15:45:58 -070053
Damjan Marionce8debf2016-02-06 19:16:21 +010054#define CLIB_MAX_MHEAPS 256
Dave Baracha690fdb2020-01-21 12:34:55 -050055#define CLIB_MAX_NUMAS 8
56
57/* Unspecified NUMA socket */
58#define VEC_NUMA_UNSPECIFIED (0xFF)
Damjan Marionce8debf2016-02-06 19:16:21 +010059
Ed Warnickecb9cada2015-12-08 15:45:58 -070060/* Per CPU heaps. */
Dave Barachc3799992016-08-15 11:12:27 -040061extern void *clib_per_cpu_mheaps[CLIB_MAX_MHEAPS];
Dave Baracha690fdb2020-01-21 12:34:55 -050062extern void *clib_per_numa_mheaps[CLIB_MAX_NUMAS];
63
64always_inline void *
65clib_mem_get_per_cpu_heap (void)
66{
67 int cpu = os_get_thread_index ();
68 return clib_per_cpu_mheaps[cpu];
69}
70
71always_inline void *
72clib_mem_set_per_cpu_heap (u8 * new_heap)
73{
74 int cpu = os_get_thread_index ();
75 void *old = clib_per_cpu_mheaps[cpu];
76 clib_per_cpu_mheaps[cpu] = new_heap;
77 return old;
78}
79
80always_inline void *
81clib_mem_get_per_numa_heap (u32 numa_id)
82{
Dave Baracha6ef36b2020-02-11 10:29:13 -050083 ASSERT (numa_id < ARRAY_LEN (clib_per_numa_mheaps));
Dave Baracha690fdb2020-01-21 12:34:55 -050084 return clib_per_numa_mheaps[numa_id];
85}
86
87always_inline void *
88clib_mem_set_per_numa_heap (u8 * new_heap)
89{
90 int numa = os_get_numa_index ();
91 void *old = clib_per_numa_mheaps[numa];
92 clib_per_numa_mheaps[numa] = new_heap;
93 return old;
94}
Ed Warnickecb9cada2015-12-08 15:45:58 -070095
Nathan Skrzypczakd516ca42019-08-01 18:14:06 +020096always_inline void
97clib_mem_set_thread_index (void)
98{
99 /*
100 * Find an unused slot in the per-cpu-mheaps array,
101 * and grab it for this thread. We need to be able to
102 * push/pop the thread heap without affecting other thread(s).
103 */
104 int i;
105 if (__os_thread_index != 0)
106 return;
107 for (i = 0; i < ARRAY_LEN (clib_per_cpu_mheaps); i++)
108 if (clib_atomic_bool_cmp_and_swap (&clib_per_cpu_mheaps[i],
109 0, clib_per_cpu_mheaps[0]))
110 {
111 os_set_thread_index (i);
112 break;
113 }
114 ASSERT (__os_thread_index > 0);
115}
116
Benoît Ganne9fb6d402019-04-15 15:28:21 +0200117always_inline uword
118clib_mem_size_nocheck (void *p)
119{
Benoît Ganne9fb6d402019-04-15 15:28:21 +0200120 return mspace_usable_size_with_delta (p);
Benoît Ganne9fb6d402019-04-15 15:28:21 +0200121}
122
Dave Barach241e5222016-10-13 10:53:26 -0400123/* Memory allocator which may call os_out_of_memory() if it fails */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700124always_inline void *
Dave Barach241e5222016-10-13 10:53:26 -0400125clib_mem_alloc_aligned_at_offset (uword size, uword align, uword align_offset,
126 int os_out_of_memory_on_failure)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700127{
Dave Barachc3799992016-08-15 11:12:27 -0400128 void *heap, *p;
Dave Barach6a5adc32018-07-04 10:56:23 -0400129 uword cpu;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700130
131 if (align_offset > align)
132 {
133 if (align > 0)
134 align_offset %= align;
135 else
136 align_offset = align;
137 }
138
Damjan Marionf55f9b82017-05-10 21:06:28 +0200139 cpu = os_get_thread_index ();
Ed Warnickecb9cada2015-12-08 15:45:58 -0700140 heap = clib_per_cpu_mheaps[cpu];
Dave Barach6a5adc32018-07-04 10:56:23 -0400141
Dave Barach6a5adc32018-07-04 10:56:23 -0400142 p = mspace_get_aligned (heap, size, align, align_offset);
Benoît Ganne9fb6d402019-04-15 15:28:21 +0200143
144 if (PREDICT_FALSE (0 == p))
Dave Barach6a5adc32018-07-04 10:56:23 -0400145 {
146 if (os_out_of_memory_on_failure)
147 os_out_of_memory ();
148 return 0;
149 }
150
Benoît Ganne9fb6d402019-04-15 15:28:21 +0200151 CLIB_MEM_UNPOISON (p, size);
Dave Barach6a5adc32018-07-04 10:56:23 -0400152 return p;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700153}
154
Dave Barach241e5222016-10-13 10:53:26 -0400155/* Memory allocator which calls os_out_of_memory() when it fails */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700156always_inline void *
157clib_mem_alloc (uword size)
Dave Barachc3799992016-08-15 11:12:27 -0400158{
159 return clib_mem_alloc_aligned_at_offset (size, /* align */ 1,
Dave Barach241e5222016-10-13 10:53:26 -0400160 /* align_offset */ 0,
161 /* os_out_of_memory */ 1);
Dave Barachc3799992016-08-15 11:12:27 -0400162}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700163
164always_inline void *
165clib_mem_alloc_aligned (uword size, uword align)
Dave Barachc3799992016-08-15 11:12:27 -0400166{
Dave Barach241e5222016-10-13 10:53:26 -0400167 return clib_mem_alloc_aligned_at_offset (size, align, /* align_offset */ 0,
168 /* os_out_of_memory */ 1);
Dave Barachc3799992016-08-15 11:12:27 -0400169}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700170
Dave Barach241e5222016-10-13 10:53:26 -0400171/* Memory allocator which calls os_out_of_memory() when it fails */
172always_inline void *
173clib_mem_alloc_or_null (uword size)
174{
175 return clib_mem_alloc_aligned_at_offset (size, /* align */ 1,
176 /* align_offset */ 0,
177 /* os_out_of_memory */ 0);
178}
179
180always_inline void *
181clib_mem_alloc_aligned_or_null (uword size, uword align)
182{
183 return clib_mem_alloc_aligned_at_offset (size, align, /* align_offset */ 0,
184 /* os_out_of_memory */ 0);
185}
186
187
188
Ed Warnickecb9cada2015-12-08 15:45:58 -0700189/* Memory allocator which panics when it fails.
190 Use macro so that clib_panic macro can expand __FUNCTION__ and __LINE__. */
191#define clib_mem_alloc_aligned_no_fail(size,align) \
192({ \
193 uword _clib_mem_alloc_size = (size); \
194 void * _clib_mem_alloc_p; \
195 _clib_mem_alloc_p = clib_mem_alloc_aligned (_clib_mem_alloc_size, (align)); \
196 if (! _clib_mem_alloc_p) \
197 clib_panic ("failed to allocate %d bytes", _clib_mem_alloc_size); \
198 _clib_mem_alloc_p; \
199})
200
201#define clib_mem_alloc_no_fail(size) clib_mem_alloc_aligned_no_fail(size,1)
202
203/* Alias to stack allocator for naming consistency. */
204#define clib_mem_alloc_stack(bytes) __builtin_alloca(bytes)
205
Dave Barachc3799992016-08-15 11:12:27 -0400206always_inline uword
207clib_mem_is_heap_object (void *p)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700208{
Dave Barach6a5adc32018-07-04 10:56:23 -0400209 void *heap = clib_mem_get_per_cpu_heap ();
210
211 return mspace_is_heap_object (heap, p);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700212}
213
Dave Barachc3799992016-08-15 11:12:27 -0400214always_inline void
215clib_mem_free (void *p)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700216{
Dave Barachc3799992016-08-15 11:12:27 -0400217 u8 *heap = clib_mem_get_per_cpu_heap ();
Ed Warnickecb9cada2015-12-08 15:45:58 -0700218
219 /* Make sure object is in the correct heap. */
220 ASSERT (clib_mem_is_heap_object (p));
221
Benoît Ganne9fb6d402019-04-15 15:28:21 +0200222 CLIB_MEM_POISON (p, clib_mem_size_nocheck (p));
223
Dave Barach6a5adc32018-07-04 10:56:23 -0400224 mspace_put (heap, p);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700225}
226
Dave Barachc3799992016-08-15 11:12:27 -0400227always_inline void *
228clib_mem_realloc (void *p, uword new_size, uword old_size)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700229{
230 /* By default use alloc, copy and free to emulate realloc. */
Dave Barachc3799992016-08-15 11:12:27 -0400231 void *q = clib_mem_alloc (new_size);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700232 if (q)
233 {
234 uword copy_size;
235 if (old_size < new_size)
236 copy_size = old_size;
237 else
238 copy_size = new_size;
Dave Barach178cf492018-11-13 16:34:13 -0500239 clib_memcpy_fast (q, p, copy_size);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700240 clib_mem_free (p);
241 }
242 return q;
243}
244
Dave Barachc3799992016-08-15 11:12:27 -0400245always_inline uword
246clib_mem_size (void *p)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700247{
Dave Barach6a5adc32018-07-04 10:56:23 -0400248 ASSERT (clib_mem_is_heap_object (p));
Benoît Ganne9fb6d402019-04-15 15:28:21 +0200249 return clib_mem_size_nocheck (p);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700250}
251
Benoît Ganne78af0a82019-04-29 17:27:24 +0200252always_inline void
253clib_mem_free_s (void *p)
254{
255 uword size = clib_mem_size (p);
Benoît Ganne9fb6d402019-04-15 15:28:21 +0200256 CLIB_MEM_UNPOISON (p, size);
Benoît Ganne78af0a82019-04-29 17:27:24 +0200257 memset_s_inline (p, size, 0, size);
258 clib_mem_free (p);
259}
260
Dave Barachc3799992016-08-15 11:12:27 -0400261always_inline void *
262clib_mem_get_heap (void)
263{
264 return clib_mem_get_per_cpu_heap ();
265}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700266
Dave Barachc3799992016-08-15 11:12:27 -0400267always_inline void *
268clib_mem_set_heap (void *heap)
269{
270 return clib_mem_set_per_cpu_heap (heap);
271}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700272
Dave Barachc3799992016-08-15 11:12:27 -0400273void *clib_mem_init (void *heap, uword size);
Dave Barach6a5adc32018-07-04 10:56:23 -0400274void *clib_mem_init_thread_safe (void *memory, uword memory_size);
Florin Coras4c959952020-02-09 18:09:31 +0000275void *clib_mem_init_thread_safe_numa (void *memory, uword memory_size,
276 u8 numa);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700277
278void clib_mem_exit (void);
279
280uword clib_mem_get_page_size (void);
281
282void clib_mem_validate (void);
283
284void clib_mem_trace (int enable);
285
Dave Barachd67a4282019-06-15 12:46:13 -0400286int clib_mem_is_traced (void);
287
Dave Barachc3799992016-08-15 11:12:27 -0400288typedef struct
289{
Ed Warnickecb9cada2015-12-08 15:45:58 -0700290 /* Total number of objects allocated. */
291 uword object_count;
292
293 /* Total allocated bytes. Bytes used and free.
294 used + free = total */
295 uword bytes_total, bytes_used, bytes_free;
296
297 /* Number of bytes used by mheap data structure overhead
298 (e.g. free lists, mheap header). */
299 uword bytes_overhead;
300
301 /* Amount of free space returned to operating system. */
302 uword bytes_free_reclaimed;
Dave Barachc3799992016-08-15 11:12:27 -0400303
Ed Warnickecb9cada2015-12-08 15:45:58 -0700304 /* For malloc which puts small objects in sbrk region and
305 large objects in mmap'ed regions. */
306 uword bytes_used_sbrk;
307 uword bytes_used_mmap;
308
309 /* Max. number of bytes in this heap. */
310 uword bytes_max;
311} clib_mem_usage_t;
312
313void clib_mem_usage (clib_mem_usage_t * usage);
314
Dave Barachc3799992016-08-15 11:12:27 -0400315u8 *format_clib_mem_usage (u8 * s, va_list * args);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700316
Damjan Marion01914ce2017-09-14 19:04:50 +0200317/* Allocate virtual address space. */
318always_inline void *
319clib_mem_vm_alloc (uword size)
320{
321 void *mmap_addr;
322 uword flags = MAP_PRIVATE;
323
324#ifdef MAP_ANONYMOUS
325 flags |= MAP_ANONYMOUS;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700326#endif
327
Damjan Marion01914ce2017-09-14 19:04:50 +0200328 mmap_addr = mmap (0, size, PROT_READ | PROT_WRITE, flags, -1, 0);
329 if (mmap_addr == (void *) -1)
330 mmap_addr = 0;
Benoît Ganne1557d9a2020-02-07 11:58:16 +0100331 else
332 CLIB_MEM_UNPOISON (mmap_addr, size);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700333
Damjan Marion01914ce2017-09-14 19:04:50 +0200334 return mmap_addr;
335}
336
337always_inline void
338clib_mem_vm_free (void *addr, uword size)
339{
340 munmap (addr, size);
Benoît Ganne1557d9a2020-02-07 11:58:16 +0100341 CLIB_MEM_POISON (addr, size);
Damjan Marion01914ce2017-09-14 19:04:50 +0200342}
343
344always_inline void *
345clib_mem_vm_unmap (void *addr, uword size)
346{
347 void *mmap_addr;
348 uword flags = MAP_PRIVATE | MAP_FIXED;
349
350 /* To unmap we "map" with no protection. If we actually called
351 munmap then other callers could steal the address space. By
352 changing to PROT_NONE the kernel can free up the pages which is
353 really what we want "unmap" to mean. */
354 mmap_addr = mmap (addr, size, PROT_NONE, flags, -1, 0);
355 if (mmap_addr == (void *) -1)
356 mmap_addr = 0;
Benoît Ganne1557d9a2020-02-07 11:58:16 +0100357 else
358 CLIB_MEM_UNPOISON (mmap_addr, size);
Damjan Marion01914ce2017-09-14 19:04:50 +0200359
360 return mmap_addr;
361}
362
363always_inline void *
364clib_mem_vm_map (void *addr, uword size)
365{
366 void *mmap_addr;
Dave Barache89be4e2018-08-29 08:50:40 -0400367 uword flags = MAP_PRIVATE | MAP_FIXED | MAP_ANONYMOUS;
Damjan Marion01914ce2017-09-14 19:04:50 +0200368
369 mmap_addr = mmap (addr, size, (PROT_READ | PROT_WRITE), flags, -1, 0);
370 if (mmap_addr == (void *) -1)
371 mmap_addr = 0;
Benoît Ganne1557d9a2020-02-07 11:58:16 +0100372 else
373 CLIB_MEM_UNPOISON (mmap_addr, size);
Damjan Marion01914ce2017-09-14 19:04:50 +0200374
375 return mmap_addr;
376}
377
378typedef struct
379{
380#define CLIB_MEM_VM_F_SHARED (1 << 0)
381#define CLIB_MEM_VM_F_HUGETLB (1 << 1)
382#define CLIB_MEM_VM_F_NUMA_PREFER (1 << 2)
383#define CLIB_MEM_VM_F_NUMA_FORCE (1 << 3)
384#define CLIB_MEM_VM_F_HUGETLB_PREALLOC (1 << 4)
Damjan Marion7b185362018-03-04 16:41:35 +0100385#define CLIB_MEM_VM_F_LOCKED (1 << 5)
Damjan Marion01914ce2017-09-14 19:04:50 +0200386 u32 flags; /**< vm allocation flags:
387 <br> CLIB_MEM_VM_F_SHARED: request shared memory, file
Florin Corasd3e83a92018-01-16 02:40:18 -0800388 descriptor will be provided on successful allocation.
Damjan Marion01914ce2017-09-14 19:04:50 +0200389 <br> CLIB_MEM_VM_F_HUGETLB: request hugepages.
390 <br> CLIB_MEM_VM_F_NUMA_PREFER: numa_node field contains valid
391 numa node preference.
392 <br> CLIB_MEM_VM_F_NUMA_FORCE: fail if setting numa policy fails.
393 <br> CLIB_MEM_VM_F_HUGETLB_PREALLOC: pre-allocate hugepages if
394 number of available pages is not sufficient.
Damjan Marion7b185362018-03-04 16:41:35 +0100395 <br> CLIB_MEM_VM_F_LOCKED: request locked memory.
Damjan Marion01914ce2017-09-14 19:04:50 +0200396 */
397 char *name; /**< Name for memory allocation, set by caller. */
398 uword size; /**< Allocation size, set by caller. */
399 int numa_node; /**< numa node preference. Valid if CLIB_MEM_VM_F_NUMA_PREFER set. */
400 void *addr; /**< Pointer to allocated memory, set on successful allocation. */
Florin Corasd3e83a92018-01-16 02:40:18 -0800401 int fd; /**< File descriptor, set on successful allocation if CLIB_MEM_VM_F_SHARED is set. */
Damjan Marion01914ce2017-09-14 19:04:50 +0200402 int log2_page_size; /* Page size in log2 format, set on successful allocation. */
403 int n_pages; /* Number of pages. */
Florin Corasd3e83a92018-01-16 02:40:18 -0800404 uword requested_va; /**< Request fixed position mapping */
Damjan Marion01914ce2017-09-14 19:04:50 +0200405} clib_mem_vm_alloc_t;
406
Damjan Marion567e61d2018-10-24 17:08:26 +0200407clib_error_t *clib_mem_create_fd (char *name, int *fdp);
Damjan Marion1636b162018-10-19 12:54:42 +0200408clib_error_t *clib_mem_create_hugetlb_fd (char *name, int *fdp);
Damjan Marion01914ce2017-09-14 19:04:50 +0200409clib_error_t *clib_mem_vm_ext_alloc (clib_mem_vm_alloc_t * a);
Haiyang Tan642829d2018-10-09 19:09:45 -0700410void clib_mem_vm_ext_free (clib_mem_vm_alloc_t * a);
Damjan Marion567e61d2018-10-24 17:08:26 +0200411u64 clib_mem_get_fd_page_size (int fd);
Damjan Marion9787f5f2018-10-24 12:56:32 +0200412uword clib_mem_get_default_hugepage_size (void);
Damjan Marion567e61d2018-10-24 17:08:26 +0200413int clib_mem_get_fd_log2_page_size (int fd);
Damjan Marion01914ce2017-09-14 19:04:50 +0200414u64 *clib_mem_vm_get_paddr (void *mem, int log2_page_size, int n_pages);
415
Florin Corasd3e83a92018-01-16 02:40:18 -0800416typedef struct
417{
418 uword size; /**< Map size */
419 int fd; /**< File descriptor to be mapped */
420 uword requested_va; /**< Request fixed position mapping */
421 void *addr; /**< Pointer to mapped memory, if successful */
Florin Coras6fe89982020-02-07 23:28:41 +0000422 u8 numa_node;
Florin Corasd3e83a92018-01-16 02:40:18 -0800423} clib_mem_vm_map_t;
Florin Corasd3e83a92018-01-16 02:40:18 -0800424
Florin Corasb384b542018-01-15 01:08:33 -0800425clib_error_t *clib_mem_vm_ext_map (clib_mem_vm_map_t * a);
426void clib_mem_vm_randomize_va (uword * requested_va, u32 log2_page_size);
Dave Barach6a5adc32018-07-04 10:56:23 -0400427void mheap_trace (void *v, int enable);
Dave Barach8fdde3c2019-05-17 10:46:40 -0400428uword clib_mem_trace_enable_disable (uword enable);
429void clib_mem_trace (int enable);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700430
Dave Barachc3799992016-08-15 11:12:27 -0400431#include <vppinfra/error.h> /* clib_panic */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700432
433#endif /* _included_clib_mem_h */
Dave Barachc3799992016-08-15 11:12:27 -0400434
435/*
436 * fd.io coding-style-patch-verification: ON
437 *
438 * Local Variables:
439 * eval: (c-set-style "gnu")
440 * End:
441 */