blob: 0ede1b3678657b1abc66b6ac9d9598fa7afff48e [file] [log] [blame]
Damjan Marion0da81682020-12-22 14:58:56 +01001/*
2 * Copyright (c) 2020 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16#include <vppinfra/clib.h>
17#include <vppinfra/mem.h>
18#include <vppinfra/time.h>
19#include <vppinfra/format.h>
20#include <vppinfra/clib_error.h>
21
22/* while usage of dlmalloc APIs is genrally discouraged, in this particular
23 * case there is significant benefit of calling them directly due to
24 * smaller memory consuption (no wwp and headroom space) */
25#include <vppinfra/dlmalloc.h>
26
27#define CLIB_MEM_BULK_DEFAULT_MIN_ELTS_PER_CHUNK 32
28
29typedef struct clib_mem_bulk_chunk_hdr
30{
31 u32 freelist;
32 u32 n_free;
33 struct clib_mem_bulk_chunk_hdr *prev, *next;
34} clib_mem_bulk_chunk_hdr_t;
35
36typedef struct
37{
38 u32 elt_sz;
39 u32 chunk_hdr_sz;
40 u32 elts_per_chunk;
41 u32 align;
42 u32 chunk_align;
43 void *mspace;
44 clib_mem_bulk_chunk_hdr_t *full_chunks, *avail_chunks;
45} clib_mem_bulk_t;
46
47__clib_export clib_mem_bulk_handle_t
48clib_mem_bulk_init (u32 elt_sz, u32 align, u32 min_elts_per_chunk)
49{
50 clib_mem_heap_t *heap = clib_mem_get_heap ();
51 clib_mem_bulk_t *b;
52 uword sz;
53
54 if ((b = mspace_memalign (heap->mspace, 16, sizeof (clib_mem_bulk_t))) == 0)
55 return 0;
56
57 if (align < 16)
58 align = 16;
59
60 if (min_elts_per_chunk == 0)
61 min_elts_per_chunk = CLIB_MEM_BULK_DEFAULT_MIN_ELTS_PER_CHUNK;
62
63 clib_memset (b, 0, sizeof (clib_mem_bulk_t));
64 b->mspace = heap->mspace;
65 b->align = align;
66 b->elt_sz = round_pow2 (elt_sz, align);
67 b->chunk_hdr_sz = round_pow2 (sizeof (clib_mem_bulk_chunk_hdr_t), align);
68 b->elts_per_chunk = min_elts_per_chunk;
Florin Corasc25882c2021-02-09 10:03:50 -080069 sz = (uword) b->elts_per_chunk * b->elt_sz + b->chunk_hdr_sz;
Damjan Marion0da81682020-12-22 14:58:56 +010070 b->chunk_align = max_pow2 (sz);
71 b->elts_per_chunk += (b->chunk_align - sz) / b->elt_sz;
72 return b;
73}
74
75__clib_export void
76clib_mem_bulk_destroy (clib_mem_bulk_handle_t h)
77{
78 clib_mem_bulk_t *b = h;
79 clib_mem_bulk_chunk_hdr_t *c, *next;
80
81 c = b->full_chunks;
82
83again:
84 while (c)
85 {
86 next = c->next;
87 mspace_free (b->mspace, c);
88 c = next;
89 }
90
91 if (b->avail_chunks)
92 {
93 c = b->avail_chunks;
94 b->avail_chunks = 0;
95 goto again;
96 }
97
98 mspace_free (b->mspace, b);
99}
100
101static inline void *
102get_chunk_elt_ptr (clib_mem_bulk_t *b, clib_mem_bulk_chunk_hdr_t *c, u32 index)
103{
104 return (u8 *) c + b->chunk_hdr_sz + index * b->elt_sz;
105}
106
107static inline void
108add_to_chunk_list (clib_mem_bulk_chunk_hdr_t **first,
109 clib_mem_bulk_chunk_hdr_t *c)
110{
111 c->next = *first;
112 c->prev = 0;
113 if (c->next)
114 c->next->prev = c;
115 *first = c;
116}
117
118static inline void
119remove_from_chunk_list (clib_mem_bulk_chunk_hdr_t **first,
120 clib_mem_bulk_chunk_hdr_t *c)
121{
122 if (c->next)
123 c->next->prev = c->prev;
124 if (c->prev)
125 c->prev->next = c->next;
126 else
127 *first = c->next;
128}
129
130__clib_export void *
131clib_mem_bulk_alloc (clib_mem_bulk_handle_t h)
132{
133 clib_mem_bulk_t *b = h;
134 clib_mem_bulk_chunk_hdr_t *c = b->avail_chunks;
135 u32 elt_idx;
136
137 if (b->avail_chunks == 0)
138 {
139 u32 i, sz = b->chunk_hdr_sz + b->elts_per_chunk * b->elt_sz;
140 c = mspace_memalign (b->mspace, b->chunk_align, sz);
141 clib_memset (c, 0, sizeof (clib_mem_bulk_chunk_hdr_t));
142 b->avail_chunks = c;
143 c->n_free = b->elts_per_chunk;
144
145 /* populate freelist */
146 for (i = 0; i < b->elts_per_chunk - 1; i++)
147 *((u32 *) get_chunk_elt_ptr (b, c, i)) = i + 1;
148 *((u32 *) get_chunk_elt_ptr (b, c, i)) = ~0;
149 }
150
151 ASSERT (c->freelist != ~0);
152 elt_idx = c->freelist;
153 c->freelist = *((u32 *) get_chunk_elt_ptr (b, c, elt_idx));
154 c->n_free--;
155
156 if (c->n_free == 0)
157 {
158 /* chunk is full */
159 ASSERT (c->freelist == ~0);
160 remove_from_chunk_list (&b->avail_chunks, c);
161 add_to_chunk_list (&b->full_chunks, c);
162 }
163
164 return get_chunk_elt_ptr (b, c, elt_idx);
165}
166
167__clib_export void
168clib_mem_bulk_free (clib_mem_bulk_handle_t h, void *p)
169{
170 clib_mem_bulk_t *b = h;
171 uword offset = (uword) p & (b->chunk_align - 1);
172 clib_mem_bulk_chunk_hdr_t *c = (void *) ((u8 *) p - offset);
173 u32 elt_idx = (offset - b->chunk_hdr_sz) / b->elt_sz;
174
175 ASSERT (elt_idx < b->elts_per_chunk);
Florin Coras7b0fa552020-12-23 10:18:16 -0800176 ASSERT (get_chunk_elt_ptr (b, c, elt_idx) == p);
Damjan Marion0da81682020-12-22 14:58:56 +0100177
178 c->n_free++;
179
180 if (c->n_free == b->elts_per_chunk)
181 {
182 /* chunk is empty - give it back */
183 remove_from_chunk_list (&b->avail_chunks, c);
184 mspace_free (b->mspace, c);
185 return;
186 }
187
188 if (c->n_free == 1)
189 {
190 /* move chunk to avail chunks */
191 remove_from_chunk_list (&b->full_chunks, c);
192 add_to_chunk_list (&b->avail_chunks, c);
193 }
194
195 /* add elt to freelist */
196 *(u32 *) p = c->freelist;
197 c->freelist = elt_idx;
198}
199
200__clib_export u8 *
201format_clib_mem_bulk (u8 *s, va_list *args)
202{
203 clib_mem_bulk_t *b = va_arg (*args, clib_mem_bulk_handle_t);
204 clib_mem_bulk_chunk_hdr_t *c;
205 uword n_chunks = 0, n_free_elts = 0, n_elts, chunk_sz;
206
207 c = b->full_chunks;
208 while (c)
209 {
210 n_chunks++;
211 c = c->next;
212 }
213
214 c = b->avail_chunks;
215 while (c)
216 {
217 n_chunks++;
218 n_free_elts += c->n_free;
219 c = c->next;
220 }
221
222 n_elts = n_chunks * b->elts_per_chunk;
Florin Corasc25882c2021-02-09 10:03:50 -0800223 chunk_sz = b->chunk_hdr_sz + (uword) b->elts_per_chunk * b->elt_sz;
Damjan Marion0da81682020-12-22 14:58:56 +0100224
225 s = format (s, "%u bytes/elt, align %u, chunk-align %u, ", b->elt_sz,
226 b->align, b->chunk_align);
227 s = format (s, "%u elts-per-chunk, chunk size %lu bytes", b->elts_per_chunk,
228 chunk_sz);
229
230 if (n_chunks == 0)
231 return format (s, "\nempty");
232
233 s = format (s, "\n%lu chunks allocated, ", n_chunks);
234 s = format (s, "%lu / %lu free elts (%.1f%%), ", n_free_elts, n_elts,
235 (f64) n_free_elts * 100 / n_elts);
236 s = format (s, "%lu bytes of memory consumed", n_chunks * chunk_sz);
237
238 return s;
239}