Dave Barach | 6484a68 | 2018-01-24 19:20:55 -0500 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (c) 2018 Cisco and/or its affiliates. |
| 3 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 4 | * you may not use this file except in compliance with the License. |
| 5 | * You may obtain a copy of the License at: |
| 6 | * |
| 7 | * http://www.apache.org/licenses/LICENSE-2.0 |
| 8 | * |
| 9 | * Unless required by applicable law or agreed to in writing, software |
| 10 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 11 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 12 | * See the License for the specific language governing permissions and |
| 13 | * limitations under the License. |
| 14 | */ |
| 15 | |
| 16 | #include <vppinfra/valloc.h> |
| 17 | |
| 18 | /** @file |
| 19 | @brief Simple first-fit virtual space allocator |
| 20 | */ |
| 21 | |
| 22 | /** Add a chunk of memory to a virtual allocation arena |
| 23 | @param vam - clib_valloc_main_t * pointer to the allocation arena |
| 24 | @param template - clib_valloc_chunk_t * pointer to a template chunk which |
| 25 | describes the virtual address range to add |
| 26 | |
| 27 | @note only the baseva and size member of the template chunk are significant |
| 28 | It's perfectly OK for the new chunk to be discontinuous with previous |
| 29 | chunks, the chunk fusion algorithm won't merge them. |
| 30 | */ |
| 31 | |
| 32 | void |
| 33 | clib_valloc_add_chunk (clib_valloc_main_t * vam, |
| 34 | clib_valloc_chunk_t * template) |
| 35 | { |
| 36 | clib_valloc_chunk_t *ch, *new_ch; |
| 37 | u32 index; |
| 38 | |
| 39 | ASSERT (vam->flags & CLIB_VALLOC_INITIALIZED); |
| 40 | |
| 41 | clib_spinlock_lock_if_init (&vam->lock); |
| 42 | |
| 43 | /* Add at the beginning, or at the end... */ |
| 44 | index = vam->first_index; |
| 45 | |
| 46 | /* |
| 47 | * Make sure we're not trying to add an overlapping chunk.. |
| 48 | * It's worth checking, because someone will eventually do that. |
| 49 | */ |
| 50 | if (CLIB_DEBUG > 0 && index != ~0) |
| 51 | { |
| 52 | while (index != ~0) |
| 53 | { |
| 54 | ch = pool_elt_at_index (vam->chunks, index); |
| 55 | ASSERT (template->baseva < ch->baseva || template->baseva >= |
| 56 | (ch->baseva + ch->size)); |
| 57 | ASSERT (template->baseva + template->size < ch->baseva || |
| 58 | template->baseva + template->size >= |
| 59 | (ch->baseva + ch->size)); |
| 60 | index = ch->next; |
| 61 | } |
| 62 | index = vam->first_index; |
| 63 | } |
| 64 | |
| 65 | if (index != ~0) |
| 66 | ch = pool_elt_at_index (vam->chunks, index); |
| 67 | |
| 68 | if (index == ~0 || template->baseva < ch->baseva) |
| 69 | { |
| 70 | pool_get (vam->chunks, new_ch); |
Dave Barach | b7b9299 | 2018-10-17 10:38:51 -0400 | [diff] [blame] | 71 | clib_memset (new_ch, 0, sizeof (*new_ch)); |
Dave Barach | 6484a68 | 2018-01-24 19:20:55 -0500 | [diff] [blame] | 72 | |
| 73 | if (index != ~0) |
| 74 | { |
| 75 | ch = pool_elt_at_index (vam->chunks, index); |
| 76 | |
| 77 | new_ch->next = index; |
| 78 | new_ch->prev = ~0; |
| 79 | ch->prev = new_ch - vam->chunks; |
| 80 | } |
| 81 | else |
| 82 | { |
| 83 | new_ch->next = new_ch->prev = ~0; |
| 84 | } |
| 85 | |
| 86 | new_ch->baseva = template->baseva; |
| 87 | new_ch->size = template->size; |
| 88 | |
| 89 | vam->first_index = new_ch - vam->chunks; |
| 90 | |
| 91 | hash_set (vam->chunk_index_by_baseva, new_ch->baseva, vam->first_index); |
| 92 | } |
| 93 | else |
| 94 | { |
| 95 | /* Walk to the end of the chunk chain */ |
| 96 | while (index != ~0) |
| 97 | { |
| 98 | ch = pool_elt_at_index (vam->chunks, index); |
| 99 | index = ch->next; |
| 100 | } |
| 101 | /* we want the last chunk index */ |
| 102 | index = ch - vam->chunks; |
| 103 | |
| 104 | pool_get (vam->chunks, new_ch); |
Dave Barach | b7b9299 | 2018-10-17 10:38:51 -0400 | [diff] [blame] | 105 | clib_memset (new_ch, 0, sizeof (*new_ch)); |
Dave Barach | 6484a68 | 2018-01-24 19:20:55 -0500 | [diff] [blame] | 106 | |
| 107 | ch = pool_elt_at_index (vam->chunks, index); |
| 108 | |
| 109 | new_ch->next = ~0; |
| 110 | new_ch->prev = index; |
| 111 | ch->next = new_ch - vam->chunks; |
| 112 | |
| 113 | new_ch->baseva = template->baseva; |
| 114 | new_ch->size = template->size; |
| 115 | |
| 116 | hash_set (vam->chunk_index_by_baseva, new_ch->baseva, |
| 117 | new_ch - vam->chunks); |
| 118 | } |
| 119 | |
| 120 | clib_spinlock_unlock_if_init (&vam->lock); |
| 121 | } |
| 122 | |
| 123 | /** Initialize a virtual memory allocation arena |
| 124 | @param vam - clib_valloc_main_t * pointer to the arena to initialize |
| 125 | @param template - clib_valloc_chunk_t * pointer to a template chunk which |
| 126 | describes the initial virtual address range |
| 127 | */ |
Damjan Marion | dae1c7e | 2020-10-17 13:32:25 +0200 | [diff] [blame] | 128 | __clib_export void |
Dave Barach | 6484a68 | 2018-01-24 19:20:55 -0500 | [diff] [blame] | 129 | clib_valloc_init (clib_valloc_main_t * vam, clib_valloc_chunk_t * template, |
| 130 | int need_lock) |
| 131 | { |
| 132 | ASSERT (template && template->baseva && template->size); |
Dave Barach | b7b9299 | 2018-10-17 10:38:51 -0400 | [diff] [blame] | 133 | clib_memset (vam, 0, sizeof (*vam)); |
Dave Barach | 6484a68 | 2018-01-24 19:20:55 -0500 | [diff] [blame] | 134 | if (need_lock) |
| 135 | clib_spinlock_init (&vam->lock); |
| 136 | |
| 137 | vam->chunk_index_by_baseva = hash_create (0, sizeof (uword)); |
| 138 | vam->first_index = ~0; |
| 139 | vam->flags |= CLIB_VALLOC_INITIALIZED; |
| 140 | |
| 141 | clib_valloc_add_chunk (vam, template); |
| 142 | } |
| 143 | |
| 144 | /** Allocate virtual space |
| 145 | @param vam - clib_valloc_main_t * pointer to the allocation arena |
| 146 | @param size - u64 number of bytes to allocate |
| 147 | @os_out_of_memory_on_failure - 1=> panic on allocation failure |
| 148 | @return uword allocated space, 0=> failure |
| 149 | */ |
Damjan Marion | dae1c7e | 2020-10-17 13:32:25 +0200 | [diff] [blame] | 150 | __clib_export uword |
Dave Barach | 6484a68 | 2018-01-24 19:20:55 -0500 | [diff] [blame] | 151 | clib_valloc_alloc (clib_valloc_main_t * vam, uword size, |
| 152 | int os_out_of_memory_on_failure) |
| 153 | { |
| 154 | clib_valloc_chunk_t *ch, *new_ch, *next_ch; |
| 155 | u32 index; |
| 156 | |
| 157 | clib_spinlock_lock_if_init (&vam->lock); |
| 158 | |
| 159 | index = vam->first_index; |
| 160 | |
| 161 | while (index != ~0) |
| 162 | { |
| 163 | ch = pool_elt_at_index (vam->chunks, index); |
| 164 | /* If the chunk is free... */ |
| 165 | if ((ch->flags & CLIB_VALLOC_BUSY) == 0) |
| 166 | { |
| 167 | /* Too small? */ |
| 168 | if (ch->size < size) |
| 169 | goto next_chunk; |
| 170 | /* Exact match? */ |
| 171 | if (ch->size == size) |
| 172 | { |
| 173 | ch->flags |= CLIB_VALLOC_BUSY; |
| 174 | clib_spinlock_unlock_if_init (&vam->lock); |
| 175 | return ch->baseva; |
| 176 | } |
| 177 | /* |
| 178 | * The current free chunk is larger than necessary, split the block. |
| 179 | */ |
| 180 | pool_get (vam->chunks, new_ch); |
| 181 | /* ch might have just moved */ |
| 182 | ch = pool_elt_at_index (vam->chunks, index); |
Dave Barach | b7b9299 | 2018-10-17 10:38:51 -0400 | [diff] [blame] | 183 | clib_memset (new_ch, 0, sizeof (*new_ch)); |
Dave Barach | 6484a68 | 2018-01-24 19:20:55 -0500 | [diff] [blame] | 184 | new_ch->next = new_ch->prev = ~0; |
| 185 | new_ch->baseva = ch->baseva + size; |
| 186 | new_ch->size = ch->size - size; |
| 187 | ch->size = size; |
| 188 | |
| 189 | /* Insert into doubly-linked list */ |
| 190 | new_ch->next = ch->next; |
| 191 | new_ch->prev = ch - vam->chunks; |
| 192 | |
| 193 | if (ch->next != ~0) |
| 194 | { |
| 195 | next_ch = pool_elt_at_index (vam->chunks, ch->next); |
| 196 | next_ch->prev = new_ch - vam->chunks; |
| 197 | } |
| 198 | ch->next = new_ch - vam->chunks; |
| 199 | |
| 200 | hash_set (vam->chunk_index_by_baseva, new_ch->baseva, |
| 201 | new_ch - vam->chunks); |
| 202 | |
| 203 | ch->flags |= CLIB_VALLOC_BUSY; |
| 204 | clib_spinlock_unlock_if_init (&vam->lock); |
| 205 | return ch->baseva; |
| 206 | } |
| 207 | |
| 208 | next_chunk: |
| 209 | index = ch->next; |
| 210 | } |
| 211 | clib_spinlock_unlock_if_init (&vam->lock); |
| 212 | |
| 213 | if (os_out_of_memory_on_failure) |
| 214 | os_out_of_memory (); |
| 215 | |
| 216 | return 0; |
| 217 | } |
| 218 | |
| 219 | |
| 220 | /** Free virtual space |
| 221 | @param vam - clib_valloc_main_t * pointer to the allocation arena |
| 222 | @param baseva - uword base virtual address of the returned space |
| 223 | @return uword - size of the freed virtual chunk |
| 224 | @note the size is returned since we know it / in case the caller |
| 225 | doesn't memorize chunk sizes |
| 226 | */ |
Damjan Marion | dae1c7e | 2020-10-17 13:32:25 +0200 | [diff] [blame] | 227 | __clib_export uword |
Dave Barach | 6484a68 | 2018-01-24 19:20:55 -0500 | [diff] [blame] | 228 | clib_valloc_free (clib_valloc_main_t * vam, uword baseva) |
| 229 | { |
| 230 | clib_valloc_chunk_t *ch, *prev_ch, *next_ch, *n2_ch; |
| 231 | u32 index; |
| 232 | uword return_size = 0; |
| 233 | uword *p; |
| 234 | |
| 235 | clib_spinlock_lock_if_init (&vam->lock); |
| 236 | |
| 237 | p = hash_get (vam->chunk_index_by_baseva, baseva); |
| 238 | |
| 239 | /* Check even in production images */ |
| 240 | if (p == 0) |
| 241 | os_panic (); |
| 242 | |
| 243 | index = p[0]; |
| 244 | |
| 245 | ch = pool_elt_at_index (vam->chunks, index); |
| 246 | |
| 247 | return_size = ch->size; |
| 248 | |
| 249 | ASSERT (ch->baseva == baseva); |
| 250 | ASSERT ((ch->flags & CLIB_VALLOC_BUSY) != 0); |
| 251 | |
| 252 | ch->flags &= ~CLIB_VALLOC_BUSY; |
| 253 | |
| 254 | /* combine with previous entry? */ |
| 255 | if (ch->prev != ~0) |
| 256 | { |
| 257 | prev_ch = pool_elt_at_index (vam->chunks, ch->prev); |
| 258 | /* |
| 259 | * Previous item must be free as well, and |
| 260 | * tangent to this block. |
| 261 | */ |
| 262 | if ((prev_ch->flags & CLIB_VALLOC_BUSY) == 0 |
| 263 | && ((prev_ch->baseva + prev_ch->size) == ch->baseva)) |
| 264 | { |
| 265 | hash_unset (vam->chunk_index_by_baseva, baseva); |
| 266 | prev_ch->size += ch->size; |
| 267 | prev_ch->next = ch->next; |
| 268 | if (ch->next != ~0) |
| 269 | { |
| 270 | next_ch = pool_elt_at_index (vam->chunks, ch->next); |
| 271 | next_ch->prev = ch->prev; |
| 272 | } |
| 273 | ASSERT (ch - vam->chunks != vam->first_index); |
Dave Barach | b7b9299 | 2018-10-17 10:38:51 -0400 | [diff] [blame] | 274 | clib_memset (ch, 0xfe, sizeof (*ch)); |
Dave Barach | 6484a68 | 2018-01-24 19:20:55 -0500 | [diff] [blame] | 275 | pool_put (vam->chunks, ch); |
| 276 | /* See about combining with next elt */ |
| 277 | ch = prev_ch; |
| 278 | } |
| 279 | } |
| 280 | |
| 281 | /* Combine with next entry? */ |
| 282 | if (ch->next != ~0) |
| 283 | { |
| 284 | next_ch = pool_elt_at_index (vam->chunks, ch->next); |
| 285 | |
| 286 | if ((next_ch->flags & CLIB_VALLOC_BUSY) == 0 |
| 287 | && ((ch->baseva + ch->size) == next_ch->baseva)) |
| 288 | { |
| 289 | hash_unset (vam->chunk_index_by_baseva, next_ch->baseva); |
| 290 | ch->size += next_ch->size; |
| 291 | ch->next = next_ch->next; |
| 292 | if (ch->next != ~0) |
| 293 | { |
| 294 | n2_ch = pool_elt_at_index (vam->chunks, next_ch->next); |
| 295 | n2_ch->prev = ch - vam->chunks; |
| 296 | } |
| 297 | ASSERT (next_ch - vam->chunks != vam->first_index); |
Dave Barach | b7b9299 | 2018-10-17 10:38:51 -0400 | [diff] [blame] | 298 | clib_memset (next_ch, 0xfe, sizeof (*ch)); |
Dave Barach | 6484a68 | 2018-01-24 19:20:55 -0500 | [diff] [blame] | 299 | pool_put (vam->chunks, next_ch); |
| 300 | } |
| 301 | } |
| 302 | |
| 303 | clib_spinlock_unlock_if_init (&vam->lock); |
| 304 | return return_size; |
| 305 | } |
| 306 | |
| 307 | /** format a virtual allocation arena (varargs) |
| 308 | @param vam - clib_valloc_main_t pointer to the arena to format |
| 309 | @param verbose - int - verbosity level |
| 310 | @return u8 vector |
| 311 | */ |
| 312 | u8 * |
| 313 | format_valloc (u8 * s, va_list * va) |
| 314 | { |
| 315 | clib_valloc_main_t *vam = va_arg (*va, clib_valloc_main_t *); |
| 316 | int verbose = va_arg (*va, int); |
| 317 | clib_valloc_chunk_t *ch; |
| 318 | u32 index; |
| 319 | uword *p; |
| 320 | |
| 321 | clib_spinlock_lock_if_init (&vam->lock); |
| 322 | |
| 323 | s = format (s, "%d chunks, first index %d\n", |
| 324 | pool_elts (vam->chunks), vam->first_index); |
| 325 | |
| 326 | if (verbose) |
| 327 | { |
| 328 | index = vam->first_index; |
| 329 | while (index != ~0) |
| 330 | { |
| 331 | ch = pool_elt_at_index (vam->chunks, index); |
| 332 | |
| 333 | s = format (s, "[%d] base %llx size %llx (%lld) prev %d %s\n", |
| 334 | index, ch->baseva, ch->size, ch->size, ch->prev, |
| 335 | (ch->flags & CLIB_VALLOC_BUSY) ? "busy" : "free"); |
| 336 | |
| 337 | p = hash_get (vam->chunk_index_by_baseva, ch->baseva); |
| 338 | if (p == 0) |
| 339 | { |
| 340 | s = format (s, " BUG: baseva not in hash table!\n"); |
| 341 | } |
| 342 | else if (p[0] != index) |
| 343 | { |
| 344 | s = format (s, " BUG: baseva in hash table %d not %d!\n", |
| 345 | p[0], index); |
| 346 | } |
| 347 | index = ch->next; |
| 348 | } |
| 349 | } |
| 350 | |
| 351 | clib_spinlock_unlock_if_init (&vam->lock); |
| 352 | |
| 353 | return s; |
| 354 | } |
| 355 | |
| 356 | /* |
| 357 | * fd.io coding-style-patch-verification: ON |
| 358 | * |
| 359 | * Local Variables: |
| 360 | * eval: (c-set-style "gnu") |
| 361 | * End: |
| 362 | */ |