]> git.proxmox.com Git - mirror_frr.git/blob - lib/id_alloc.c
Merge pull request #13649 from donaldsharp/unlock_the_node_or_else
[mirror_frr.git] / lib / id_alloc.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * FRR ID Number Allocator
4 * Copyright (C) 2018 Amazon.com, Inc. or its affiliates
5 */
6
7 #ifdef HAVE_CONFIG_H
8 #include "config.h"
9 #endif
10
11 #include "id_alloc.h"
12
13 #include "log.h"
14 #include "lib_errors.h"
15 #include "memory.h"
16
17 #include <inttypes.h>
18
19 DEFINE_MTYPE_STATIC(LIB, IDALLOC_ALLOCATOR, "ID Number Allocator");
20 DEFINE_MTYPE_STATIC(LIB, IDALLOC_ALLOCATOR_NAME, "ID Number Allocator Name");
21 DEFINE_MTYPE_STATIC(LIB, IDALLOC_DIRECTORY, "ID Number Allocator Directory");
22 DEFINE_MTYPE_STATIC(LIB, IDALLOC_SUBDIRECTORY,
23 "ID Number Allocator Subdirectory");
24 DEFINE_MTYPE_STATIC(LIB, IDALLOC_PAGE, "ID Number Allocator Page");
25 DEFINE_MTYPE_STATIC(LIB, IDALLOC_POOL,
26 "ID Number temporary holding pool entry");
27
28 #if UINT_MAX >= UINT32_MAX
29 #define FFS32(x) ffs(x)
30 #else
31 /* ints less than 32 bits? Yikes. */
32 #define FFS32(x) ffsl(x)
33 #endif
34
35 #define DIR_MASK ((1<<IDALLOC_DIR_BITS)-1)
36 #define SUBDIR_MASK ((1<<IDALLOC_SUBDIR_BITS)-1)
37 #define FRR_ID_PAGE_MASK ((1<<IDALLOC_PAGE_BITS)-1)
38 #define WORD_MASK ((1<<IDALLOC_WORD_BITS)-1)
39 #define OFFSET_MASK ((1<<IDALLOC_OFFSET_BITS)-1)
40
41 #define DIR_SHIFT (IDALLOC_OFFSET_BITS + IDALLOC_WORD_BITS + \
42 IDALLOC_PAGE_BITS + IDALLOC_SUBDIR_BITS)
43 #define SUBDIR_SHIFT (IDALLOC_OFFSET_BITS + IDALLOC_WORD_BITS + \
44 IDALLOC_PAGE_BITS)
45 #define FRR_ID_PAGE_SHIFT (IDALLOC_OFFSET_BITS + IDALLOC_WORD_BITS)
46 #define WORD_SHIFT (IDALLOC_OFFSET_BITS)
47 #define OFFSET_SHIFT (0)
48
49 #define ID_DIR(id) ((id >> DIR_SHIFT) & DIR_MASK)
50 #define ID_SUBDIR(id) ((id >> SUBDIR_SHIFT) & SUBDIR_MASK)
51 #define ID_PAGE(id) ((id >> FRR_ID_PAGE_SHIFT) & FRR_ID_PAGE_MASK)
52 #define ID_WORD(id) ((id >> WORD_SHIFT) & WORD_MASK)
53 #define ID_OFFSET(id) ((id >> OFFSET_SHIFT) & OFFSET_MASK)
54
55 /*
56 * Find the page that an ID number belongs to in an allocator.
57 * Optionally create the page if it doesn't exist.
58 */
59 static struct id_alloc_page *find_or_create_page(struct id_alloc *alloc,
60 uint32_t id, int create)
61 {
62 struct id_alloc_dir *dir = NULL;
63 struct id_alloc_subdir *subdir = NULL;
64 struct id_alloc_page *page = NULL;
65
66 dir = alloc->sublevels[ID_DIR(id)];
67 if (dir == NULL) {
68 if (create) {
69 dir = XCALLOC(MTYPE_IDALLOC_DIRECTORY, sizeof(*dir));
70 alloc->sublevels[ID_DIR(id)] = dir;
71 } else {
72 return NULL;
73 }
74 }
75
76 subdir = dir->sublevels[ID_SUBDIR(id)];
77 if (subdir == NULL) {
78 if (create) {
79 subdir = XCALLOC(MTYPE_IDALLOC_SUBDIRECTORY,
80 sizeof(*subdir));
81 dir->sublevels[ID_SUBDIR(id)] = subdir;
82 } else {
83 return NULL;
84 }
85 }
86
87 page = subdir->sublevels[ID_PAGE(id)];
88 if (page == NULL && create) {
89 page = XCALLOC(MTYPE_IDALLOC_PAGE, sizeof(*page));
90 page->base_value = id;
91 subdir->sublevels[ID_PAGE(id)] = page;
92
93 alloc->capacity += 1 << FRR_ID_PAGE_SHIFT;
94 page->next_has_free = alloc->has_free;
95 alloc->has_free = page;
96 } else if (page != NULL && create) {
97 flog_err(
98 EC_LIB_ID_CONSISTENCY,
99 "ID Allocator %s attempt to re-create page at %u",
100 alloc->name, id);
101 }
102
103 return page;
104 }
105
106 /*
107 * Return an ID number back to the allocator.
108 * While this ID can be re-assigned through idalloc_allocate, the underlying
109 * memory will not be freed. If this is the first free ID in the page, the page
110 * will be added to the allocator's list of pages with free IDs.
111 */
112 void idalloc_free(struct id_alloc *alloc, uint32_t id)
113 {
114 struct id_alloc_page *page = NULL;
115
116 int word, offset;
117 uint32_t old_word, old_word_mask;
118
119 page = find_or_create_page(alloc, id, 0);
120 if (!page) {
121 flog_err(EC_LIB_ID_CONSISTENCY,
122 "ID Allocator %s cannot free #%u. ID Block does not exist.",
123 alloc->name, id);
124 return;
125 }
126
127 word = ID_WORD(id);
128 offset = ID_OFFSET(id);
129
130 if ((page->allocated_mask[word] & (1 << offset)) == 0) {
131 flog_err(EC_LIB_ID_CONSISTENCY,
132 "ID Allocator %s cannot free #%u. ID was not allocated at the time of free.",
133 alloc->name, id);
134 return;
135 }
136
137 old_word = page->allocated_mask[word];
138 page->allocated_mask[word] &= ~(((uint32_t)1) << offset);
139 alloc->allocated -= 1;
140
141 if (old_word == UINT32_MAX) {
142 /* first bit in this block of 32 to be freed.*/
143
144 old_word_mask = page->full_word_mask;
145 page->full_word_mask &= ~(((uint32_t)1) << word);
146
147 if (old_word_mask == UINT32_MAX) {
148 /* first bit in page freed, add this to the allocator's
149 * list of pages with free space
150 */
151 page->next_has_free = alloc->has_free;
152 alloc->has_free = page;
153 }
154 }
155 }
156
157 /*
158 * Add a allocation page to the end of the allocator's current range.
159 * Returns null if the allocator has had all possible pages allocated already.
160 */
161 static struct id_alloc_page *create_next_page(struct id_alloc *alloc)
162 {
163 if (alloc->capacity == 0 && alloc->sublevels[0])
164 return NULL; /* All IDs allocated and the capacity looped. */
165
166 return find_or_create_page(alloc, alloc->capacity, 1);
167 }
168
169 /*
170 * Marks an ID within an allocator page as in use.
171 * If the ID was the last free ID in the page, the page is removed from the
172 * allocator's list of free IDs. In the typical allocation case, this page is
173 * the first page in the list, and removing the page is fast. If instead an ID
174 * is being reserved by number, this may end up scanning the whole single linked
175 * list of pages in order to remove it.
176 */
177 static void reserve_bit(struct id_alloc *alloc, struct id_alloc_page *page,
178 int word, int offset)
179 {
180 struct id_alloc_page *itr;
181
182 page->allocated_mask[word] |= ((uint32_t)1) << offset;
183 alloc->allocated += 1;
184
185 if (page->allocated_mask[word] == UINT32_MAX) {
186 page->full_word_mask |= ((uint32_t)1) << word;
187 if (page->full_word_mask == UINT32_MAX) {
188 if (alloc->has_free == page) {
189 /* allocate always pulls from alloc->has_free */
190 alloc->has_free = page->next_has_free;
191 } else {
192 /* reserve could pull from any page with free
193 * bits
194 */
195 itr = alloc->has_free;
196 while (itr) {
197 if (itr->next_has_free == page) {
198 itr->next_has_free =
199 page->next_has_free;
200 return;
201 }
202
203 itr = itr->next_has_free;
204 }
205 }
206 }
207 }
208 }
209
210 /*
211 * Reserve an ID number from the allocator. Returns IDALLOC_INVALID (0) if the
212 * allocator has no more IDs available.
213 */
214 uint32_t idalloc_allocate(struct id_alloc *alloc)
215 {
216 struct id_alloc_page *page;
217 int word, offset;
218 uint32_t return_value;
219
220 if (alloc->has_free == NULL)
221 create_next_page(alloc);
222
223 if (alloc->has_free == NULL) {
224 flog_err(EC_LIB_ID_EXHAUST,
225 "ID Allocator %s has run out of IDs.", alloc->name);
226 return IDALLOC_INVALID;
227 }
228
229 page = alloc->has_free;
230 word = FFS32(~(page->full_word_mask)) - 1;
231
232 if (word < 0 || word >= 32) {
233 flog_err(EC_LIB_ID_CONSISTENCY,
234 "ID Allocator %s internal error. Page starting at %d is inconsistent.",
235 alloc->name, page->base_value);
236 return IDALLOC_INVALID;
237 }
238
239 offset = FFS32(~(page->allocated_mask[word])) - 1;
240 if (offset < 0 || offset >= 32) {
241 flog_err(EC_LIB_ID_CONSISTENCY,
242 "ID Allocator %s internal error. Page starting at %d is inconsistent on word %d",
243 alloc->name, page->base_value, word);
244 return IDALLOC_INVALID;
245 }
246 return_value = page->base_value + word * 32 + offset;
247
248 reserve_bit(alloc, page, word, offset);
249
250 return return_value;
251 }
252
253 /*
254 * Tries to allocate a specific ID from the allocator. Returns IDALLOC_INVALID
255 * when the ID being "reserved" has allready been assigned/reserved. This should
256 * only be done with low numbered IDs, as the allocator needs to reserve bit-map
257 * pages in order
258 */
259 uint32_t idalloc_reserve(struct id_alloc *alloc, uint32_t id)
260 {
261 struct id_alloc_page *page;
262 int word, offset;
263
264 while (alloc->capacity <= id)
265 create_next_page(alloc);
266
267 word = ID_WORD(id);
268 offset = ID_OFFSET(id);
269 page = find_or_create_page(alloc, id, 0);
270 /* page can't be null because the loop above ensured it was created. */
271
272 if (page->allocated_mask[word] & (((uint32_t)1) << offset)) {
273 flog_err(EC_LIB_ID_CONSISTENCY,
274 "ID Allocator %s could not reserve %u because it is already allocated.",
275 alloc->name, id);
276 return IDALLOC_INVALID;
277 }
278
279 reserve_bit(alloc, page, word, offset);
280 return id;
281 }
282
283 /*
284 * Set up an empty ID allocator, with IDALLOC_INVALID pre-reserved.
285 */
286 struct id_alloc *idalloc_new(const char *name)
287 {
288 struct id_alloc *ret;
289
290 ret = XCALLOC(MTYPE_IDALLOC_ALLOCATOR, sizeof(*ret));
291 ret->name = XSTRDUP(MTYPE_IDALLOC_ALLOCATOR_NAME, name);
292
293 idalloc_reserve(ret, IDALLOC_INVALID);
294
295 return ret;
296 }
297
298 /*
299 * Free a subdir, and all pages below it.
300 */
301 static void idalloc_destroy_subdir(struct id_alloc_subdir *subdir)
302 {
303 int i;
304
305 for (i = 0; i < IDALLOC_PAGE_COUNT; i++) {
306 if (subdir->sublevels[i])
307 XFREE(MTYPE_IDALLOC_PAGE, subdir->sublevels[i]);
308 else
309 break;
310 }
311 XFREE(MTYPE_IDALLOC_SUBDIRECTORY, subdir);
312 }
313
314 /*
315 * Free a dir, and all subdirs/pages below it.
316 */
317 static void idalloc_destroy_dir(struct id_alloc_dir *dir)
318 {
319 int i;
320
321 for (i = 0; i < IDALLOC_SUBDIR_COUNT; i++) {
322 if (dir->sublevels[i])
323 idalloc_destroy_subdir(dir->sublevels[i]);
324 else
325 break;
326 }
327 XFREE(MTYPE_IDALLOC_DIRECTORY, dir);
328 }
329
330 /*
331 * Free all memory associated with an ID allocator.
332 */
333 void idalloc_destroy(struct id_alloc *alloc)
334 {
335 int i;
336
337 for (i = 0; i < IDALLOC_DIR_COUNT; i++) {
338 if (alloc->sublevels[i])
339 idalloc_destroy_dir(alloc->sublevels[i]);
340 else
341 break;
342 }
343
344 XFREE(MTYPE_IDALLOC_ALLOCATOR_NAME, alloc->name);
345 XFREE(MTYPE_IDALLOC_ALLOCATOR, alloc);
346 }
347
348 /*
349 * Give an ID number to temporary holding pool.
350 */
351 void idalloc_free_to_pool(struct id_alloc_pool **pool_ptr, uint32_t id)
352 {
353 struct id_alloc_pool *new_pool;
354
355 new_pool = XMALLOC(MTYPE_IDALLOC_POOL, sizeof(*new_pool));
356 new_pool->id = id;
357 new_pool->next = *pool_ptr;
358 *pool_ptr = new_pool;
359 }
360
361 /*
362 * Free all ID numbers held in a holding pool back to the main allocator.
363 */
364 void idalloc_drain_pool(struct id_alloc *alloc, struct id_alloc_pool **pool_ptr)
365 {
366 struct id_alloc_pool *current, *next;
367
368 while (*pool_ptr) {
369 current = *pool_ptr;
370 next = current->next;
371 idalloc_free(alloc, current->id);
372 XFREE(MTYPE_IDALLOC_POOL, current);
373 *pool_ptr = next;
374 }
375 }
376
377 /*
378 * Allocate an ID from either a holding pool, or the main allocator. IDs will
379 * only be pulled form the main allocator when the pool is empty.
380 */
381 uint32_t idalloc_allocate_prefer_pool(struct id_alloc *alloc,
382 struct id_alloc_pool **pool_ptr)
383 {
384 uint32_t ret;
385 struct id_alloc_pool *pool_head = *pool_ptr;
386
387 if (pool_head) {
388 ret = pool_head->id;
389 *pool_ptr = pool_head->next;
390 XFREE(MTYPE_IDALLOC_POOL, pool_head);
391 return ret;
392 } else {
393 return idalloc_allocate(alloc);
394 }
395 }