blob: 2f9e8d218a0e79c72e5ca9084ce23495743d6d51 [file] [log] [blame]
Dave Barach6484a682018-01-24 19:20:55 -05001/*
2 * Copyright (c) 2018 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16#include <vppinfra/valloc.h>
17
18/** @file
19 @brief Simple first-fit virtual space allocator
20*/
21
22/** Add a chunk of memory to a virtual allocation arena
23 @param vam - clib_valloc_main_t * pointer to the allocation arena
24 @param template - clib_valloc_chunk_t * pointer to a template chunk which
25 describes the virtual address range to add
26
27 @note only the baseva and size member of the template chunk are significant
28 It's perfectly OK for the new chunk to be discontinuous with previous
29 chunks, the chunk fusion algorithm won't merge them.
30 */
31
Damjan Marion4a251d02021-05-06 17:28:12 +020032__clib_export void
33clib_valloc_add_chunk (clib_valloc_main_t *vam, clib_valloc_chunk_t *template)
Dave Barach6484a682018-01-24 19:20:55 -050034{
35 clib_valloc_chunk_t *ch, *new_ch;
36 u32 index;
37
38 ASSERT (vam->flags & CLIB_VALLOC_INITIALIZED);
39
40 clib_spinlock_lock_if_init (&vam->lock);
41
42 /* Add at the beginning, or at the end... */
43 index = vam->first_index;
44
45 /*
46 * Make sure we're not trying to add an overlapping chunk..
47 * It's worth checking, because someone will eventually do that.
48 */
49 if (CLIB_DEBUG > 0 && index != ~0)
50 {
51 while (index != ~0)
52 {
53 ch = pool_elt_at_index (vam->chunks, index);
54 ASSERT (template->baseva < ch->baseva || template->baseva >=
55 (ch->baseva + ch->size));
56 ASSERT (template->baseva + template->size < ch->baseva ||
57 template->baseva + template->size >=
58 (ch->baseva + ch->size));
59 index = ch->next;
60 }
61 index = vam->first_index;
62 }
63
64 if (index != ~0)
65 ch = pool_elt_at_index (vam->chunks, index);
66
67 if (index == ~0 || template->baseva < ch->baseva)
68 {
69 pool_get (vam->chunks, new_ch);
Dave Barachb7b92992018-10-17 10:38:51 -040070 clib_memset (new_ch, 0, sizeof (*new_ch));
Dave Barach6484a682018-01-24 19:20:55 -050071
72 if (index != ~0)
73 {
74 ch = pool_elt_at_index (vam->chunks, index);
75
76 new_ch->next = index;
77 new_ch->prev = ~0;
78 ch->prev = new_ch - vam->chunks;
79 }
80 else
81 {
82 new_ch->next = new_ch->prev = ~0;
83 }
84
85 new_ch->baseva = template->baseva;
86 new_ch->size = template->size;
87
88 vam->first_index = new_ch - vam->chunks;
89
90 hash_set (vam->chunk_index_by_baseva, new_ch->baseva, vam->first_index);
91 }
92 else
93 {
94 /* Walk to the end of the chunk chain */
95 while (index != ~0)
96 {
97 ch = pool_elt_at_index (vam->chunks, index);
98 index = ch->next;
99 }
100 /* we want the last chunk index */
101 index = ch - vam->chunks;
102
103 pool_get (vam->chunks, new_ch);
Dave Barachb7b92992018-10-17 10:38:51 -0400104 clib_memset (new_ch, 0, sizeof (*new_ch));
Dave Barach6484a682018-01-24 19:20:55 -0500105
106 ch = pool_elt_at_index (vam->chunks, index);
107
108 new_ch->next = ~0;
109 new_ch->prev = index;
110 ch->next = new_ch - vam->chunks;
111
112 new_ch->baseva = template->baseva;
113 new_ch->size = template->size;
114
115 hash_set (vam->chunk_index_by_baseva, new_ch->baseva,
116 new_ch - vam->chunks);
117 }
118
119 clib_spinlock_unlock_if_init (&vam->lock);
120}
121
122/** Initialize a virtual memory allocation arena
123 @param vam - clib_valloc_main_t * pointer to the arena to initialize
124 @param template - clib_valloc_chunk_t * pointer to a template chunk which
125 describes the initial virtual address range
126*/
Damjan Mariondae1c7e2020-10-17 13:32:25 +0200127__clib_export void
Dave Barach6484a682018-01-24 19:20:55 -0500128clib_valloc_init (clib_valloc_main_t * vam, clib_valloc_chunk_t * template,
129 int need_lock)
130{
131 ASSERT (template && template->baseva && template->size);
Dave Barachb7b92992018-10-17 10:38:51 -0400132 clib_memset (vam, 0, sizeof (*vam));
Dave Barach6484a682018-01-24 19:20:55 -0500133 if (need_lock)
134 clib_spinlock_init (&vam->lock);
135
136 vam->chunk_index_by_baseva = hash_create (0, sizeof (uword));
137 vam->first_index = ~0;
138 vam->flags |= CLIB_VALLOC_INITIALIZED;
139
140 clib_valloc_add_chunk (vam, template);
141}
142
143/** Allocate virtual space
144 @param vam - clib_valloc_main_t * pointer to the allocation arena
145 @param size - u64 number of bytes to allocate
146 @os_out_of_memory_on_failure - 1=> panic on allocation failure
147 @return uword allocated space, 0=> failure
148*/
Damjan Mariondae1c7e2020-10-17 13:32:25 +0200149__clib_export uword
Dave Barach6484a682018-01-24 19:20:55 -0500150clib_valloc_alloc (clib_valloc_main_t * vam, uword size,
151 int os_out_of_memory_on_failure)
152{
153 clib_valloc_chunk_t *ch, *new_ch, *next_ch;
154 u32 index;
155
156 clib_spinlock_lock_if_init (&vam->lock);
157
158 index = vam->first_index;
159
160 while (index != ~0)
161 {
162 ch = pool_elt_at_index (vam->chunks, index);
163 /* If the chunk is free... */
164 if ((ch->flags & CLIB_VALLOC_BUSY) == 0)
165 {
166 /* Too small? */
167 if (ch->size < size)
168 goto next_chunk;
169 /* Exact match? */
170 if (ch->size == size)
171 {
172 ch->flags |= CLIB_VALLOC_BUSY;
173 clib_spinlock_unlock_if_init (&vam->lock);
174 return ch->baseva;
175 }
176 /*
177 * The current free chunk is larger than necessary, split the block.
178 */
179 pool_get (vam->chunks, new_ch);
180 /* ch might have just moved */
181 ch = pool_elt_at_index (vam->chunks, index);
Dave Barachb7b92992018-10-17 10:38:51 -0400182 clib_memset (new_ch, 0, sizeof (*new_ch));
Dave Barach6484a682018-01-24 19:20:55 -0500183 new_ch->next = new_ch->prev = ~0;
184 new_ch->baseva = ch->baseva + size;
185 new_ch->size = ch->size - size;
186 ch->size = size;
187
188 /* Insert into doubly-linked list */
189 new_ch->next = ch->next;
190 new_ch->prev = ch - vam->chunks;
191
192 if (ch->next != ~0)
193 {
194 next_ch = pool_elt_at_index (vam->chunks, ch->next);
195 next_ch->prev = new_ch - vam->chunks;
196 }
197 ch->next = new_ch - vam->chunks;
198
199 hash_set (vam->chunk_index_by_baseva, new_ch->baseva,
200 new_ch - vam->chunks);
201
202 ch->flags |= CLIB_VALLOC_BUSY;
203 clib_spinlock_unlock_if_init (&vam->lock);
204 return ch->baseva;
205 }
206
207 next_chunk:
208 index = ch->next;
209 }
210 clib_spinlock_unlock_if_init (&vam->lock);
211
212 if (os_out_of_memory_on_failure)
213 os_out_of_memory ();
214
215 return 0;
216}
217
218
219/** Free virtual space
220 @param vam - clib_valloc_main_t * pointer to the allocation arena
221 @param baseva - uword base virtual address of the returned space
222 @return uword - size of the freed virtual chunk
223 @note the size is returned since we know it / in case the caller
224 doesn't memorize chunk sizes
225*/
Damjan Mariondae1c7e2020-10-17 13:32:25 +0200226__clib_export uword
Dave Barach6484a682018-01-24 19:20:55 -0500227clib_valloc_free (clib_valloc_main_t * vam, uword baseva)
228{
229 clib_valloc_chunk_t *ch, *prev_ch, *next_ch, *n2_ch;
230 u32 index;
231 uword return_size = 0;
232 uword *p;
233
234 clib_spinlock_lock_if_init (&vam->lock);
235
236 p = hash_get (vam->chunk_index_by_baseva, baseva);
237
238 /* Check even in production images */
239 if (p == 0)
240 os_panic ();
241
242 index = p[0];
243
244 ch = pool_elt_at_index (vam->chunks, index);
245
246 return_size = ch->size;
247
248 ASSERT (ch->baseva == baseva);
249 ASSERT ((ch->flags & CLIB_VALLOC_BUSY) != 0);
250
251 ch->flags &= ~CLIB_VALLOC_BUSY;
252
253 /* combine with previous entry? */
254 if (ch->prev != ~0)
255 {
256 prev_ch = pool_elt_at_index (vam->chunks, ch->prev);
257 /*
258 * Previous item must be free as well, and
259 * tangent to this block.
260 */
261 if ((prev_ch->flags & CLIB_VALLOC_BUSY) == 0
262 && ((prev_ch->baseva + prev_ch->size) == ch->baseva))
263 {
264 hash_unset (vam->chunk_index_by_baseva, baseva);
265 prev_ch->size += ch->size;
266 prev_ch->next = ch->next;
267 if (ch->next != ~0)
268 {
269 next_ch = pool_elt_at_index (vam->chunks, ch->next);
270 next_ch->prev = ch->prev;
271 }
272 ASSERT (ch - vam->chunks != vam->first_index);
Dave Barachb7b92992018-10-17 10:38:51 -0400273 clib_memset (ch, 0xfe, sizeof (*ch));
Dave Barach6484a682018-01-24 19:20:55 -0500274 pool_put (vam->chunks, ch);
275 /* See about combining with next elt */
276 ch = prev_ch;
277 }
278 }
279
280 /* Combine with next entry? */
281 if (ch->next != ~0)
282 {
283 next_ch = pool_elt_at_index (vam->chunks, ch->next);
284
285 if ((next_ch->flags & CLIB_VALLOC_BUSY) == 0
286 && ((ch->baseva + ch->size) == next_ch->baseva))
287 {
288 hash_unset (vam->chunk_index_by_baseva, next_ch->baseva);
289 ch->size += next_ch->size;
290 ch->next = next_ch->next;
291 if (ch->next != ~0)
292 {
293 n2_ch = pool_elt_at_index (vam->chunks, next_ch->next);
294 n2_ch->prev = ch - vam->chunks;
295 }
296 ASSERT (next_ch - vam->chunks != vam->first_index);
Dave Barachb7b92992018-10-17 10:38:51 -0400297 clib_memset (next_ch, 0xfe, sizeof (*ch));
Dave Barach6484a682018-01-24 19:20:55 -0500298 pool_put (vam->chunks, next_ch);
299 }
300 }
301
302 clib_spinlock_unlock_if_init (&vam->lock);
303 return return_size;
304}
305
306/** format a virtual allocation arena (varargs)
307 @param vam - clib_valloc_main_t pointer to the arena to format
308 @param verbose - int - verbosity level
309 @return u8 vector
310*/
Damjan Marion4a251d02021-05-06 17:28:12 +0200311__clib_export u8 *
312format_valloc (u8 *s, va_list *va)
Dave Barach6484a682018-01-24 19:20:55 -0500313{
314 clib_valloc_main_t *vam = va_arg (*va, clib_valloc_main_t *);
315 int verbose = va_arg (*va, int);
316 clib_valloc_chunk_t *ch;
317 u32 index;
318 uword *p;
319
320 clib_spinlock_lock_if_init (&vam->lock);
321
322 s = format (s, "%d chunks, first index %d\n",
323 pool_elts (vam->chunks), vam->first_index);
324
325 if (verbose)
326 {
327 index = vam->first_index;
328 while (index != ~0)
329 {
330 ch = pool_elt_at_index (vam->chunks, index);
331
332 s = format (s, "[%d] base %llx size %llx (%lld) prev %d %s\n",
333 index, ch->baseva, ch->size, ch->size, ch->prev,
334 (ch->flags & CLIB_VALLOC_BUSY) ? "busy" : "free");
335
336 p = hash_get (vam->chunk_index_by_baseva, ch->baseva);
337 if (p == 0)
338 {
339 s = format (s, " BUG: baseva not in hash table!\n");
340 }
341 else if (p[0] != index)
342 {
343 s = format (s, " BUG: baseva in hash table %d not %d!\n",
344 p[0], index);
345 }
346 index = ch->next;
347 }
348 }
349
350 clib_spinlock_unlock_if_init (&vam->lock);
351
352 return s;
353}
354
355/*
356 * fd.io coding-style-patch-verification: ON
357 *
358 * Local Variables:
359 * eval: (c-set-style "gnu")
360 * End:
361 */