]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - lib/idr.c
Btrfs: avoid unnecessary bitmap search for cluster setup
[mirror_ubuntu-artful-kernel.git] / lib / idr.c
CommitLineData
1da177e4
LT
1/*
2 * 2002-10-18 written by Jim Houston jim.houston@ccur.com
3 * Copyright (C) 2002 by Concurrent Computer Corporation
4 * Distributed under the GNU GPL license version 2.
5 *
6 * Modified by George Anzinger to reuse immediately and to use
7 * find bit instructions. Also removed _irq on spinlocks.
8 *
3219b3b7
ND
9 * Modified by Nadia Derbey to make it RCU safe.
10 *
e15ae2dd 11 * Small id to pointer translation service.
1da177e4 12 *
e15ae2dd 13 * It uses a radix tree like structure as a sparse array indexed
1da177e4 14 * by the id to obtain the pointer. The bitmap makes allocating
e15ae2dd 15 * a new id quick.
1da177e4
LT
16 *
17 * You call it to allocate an id (an int) an associate with that id a
18 * pointer or what ever, we treat it as a (void *). You can pass this
19 * id to a user for him to pass back at a later time. You then pass
20 * that id to this code and it returns your pointer.
21
e15ae2dd 22 * You can release ids at any time. When all ids are released, most of
1da177e4 23 * the memory is returned (we keep IDR_FREE_MAX) in a local pool so we
e15ae2dd 24 * don't need to go to the memory "store" during an id allocate, just
1da177e4
LT
25 * so you don't need to be too concerned about locking and conflicts
26 * with the slab allocator.
27 */
28
29#ifndef TEST // to test in user space...
30#include <linux/slab.h>
31#include <linux/init.h>
32#include <linux/module.h>
33#endif
5806f07c 34#include <linux/err.h>
1da177e4
LT
35#include <linux/string.h>
36#include <linux/idr.h>
88eca020 37#include <linux/spinlock.h>
1da177e4 38
e18b890b 39static struct kmem_cache *idr_layer_cache;
88eca020 40static DEFINE_SPINLOCK(simple_ida_lock);
1da177e4 41
4ae53789 42static struct idr_layer *get_from_free_list(struct idr *idp)
1da177e4
LT
43{
44 struct idr_layer *p;
c259cc28 45 unsigned long flags;
1da177e4 46
c259cc28 47 spin_lock_irqsave(&idp->lock, flags);
1da177e4
LT
48 if ((p = idp->id_free)) {
49 idp->id_free = p->ary[0];
50 idp->id_free_cnt--;
51 p->ary[0] = NULL;
52 }
c259cc28 53 spin_unlock_irqrestore(&idp->lock, flags);
1da177e4
LT
54 return(p);
55}
56
cf481c20
ND
57static void idr_layer_rcu_free(struct rcu_head *head)
58{
59 struct idr_layer *layer;
60
61 layer = container_of(head, struct idr_layer, rcu_head);
62 kmem_cache_free(idr_layer_cache, layer);
63}
64
65static inline void free_layer(struct idr_layer *p)
66{
67 call_rcu(&p->rcu_head, idr_layer_rcu_free);
68}
69
1eec0056 70/* only called when idp->lock is held */
4ae53789 71static void __move_to_free_list(struct idr *idp, struct idr_layer *p)
1eec0056
SR
72{
73 p->ary[0] = idp->id_free;
74 idp->id_free = p;
75 idp->id_free_cnt++;
76}
77
4ae53789 78static void move_to_free_list(struct idr *idp, struct idr_layer *p)
1da177e4 79{
c259cc28
RD
80 unsigned long flags;
81
1da177e4
LT
82 /*
83 * Depends on the return element being zeroed.
84 */
c259cc28 85 spin_lock_irqsave(&idp->lock, flags);
4ae53789 86 __move_to_free_list(idp, p);
c259cc28 87 spin_unlock_irqrestore(&idp->lock, flags);
1da177e4
LT
88}
89
e33ac8bd
TH
90static void idr_mark_full(struct idr_layer **pa, int id)
91{
92 struct idr_layer *p = pa[0];
93 int l = 0;
94
95 __set_bit(id & IDR_MASK, &p->bitmap);
96 /*
97 * If this layer is full mark the bit in the layer above to
98 * show that this part of the radix tree is full. This may
99 * complete the layer above and require walking up the radix
100 * tree.
101 */
102 while (p->bitmap == IDR_FULL) {
103 if (!(p = pa[++l]))
104 break;
105 id = id >> IDR_BITS;
106 __set_bit((id & IDR_MASK), &p->bitmap);
107 }
108}
109
1da177e4 110/**
56083ab1 111 * idr_pre_get - reserve resources for idr allocation
1da177e4
LT
112 * @idp: idr handle
113 * @gfp_mask: memory allocation flags
114 *
066a9be6
NA
115 * This function should be called prior to calling the idr_get_new* functions.
116 * It preallocates enough memory to satisfy the worst possible allocation. The
117 * caller should pass in GFP_KERNEL if possible. This of course requires that
118 * no spinning locks be held.
1da177e4 119 *
56083ab1
RD
120 * If the system is REALLY out of memory this function returns %0,
121 * otherwise %1.
1da177e4 122 */
fd4f2df2 123int idr_pre_get(struct idr *idp, gfp_t gfp_mask)
1da177e4
LT
124{
125 while (idp->id_free_cnt < IDR_FREE_MAX) {
126 struct idr_layer *new;
5b019e99 127 new = kmem_cache_zalloc(idr_layer_cache, gfp_mask);
e15ae2dd 128 if (new == NULL)
1da177e4 129 return (0);
4ae53789 130 move_to_free_list(idp, new);
1da177e4
LT
131 }
132 return 1;
133}
134EXPORT_SYMBOL(idr_pre_get);
135
e33ac8bd 136static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa)
1da177e4
LT
137{
138 int n, m, sh;
139 struct idr_layer *p, *new;
7aae6dd8 140 int l, id, oid;
5ba25331 141 unsigned long bm;
1da177e4
LT
142
143 id = *starting_id;
7aae6dd8 144 restart:
1da177e4
LT
145 p = idp->top;
146 l = idp->layers;
147 pa[l--] = NULL;
148 while (1) {
149 /*
150 * We run around this while until we reach the leaf node...
151 */
152 n = (id >> (IDR_BITS*l)) & IDR_MASK;
153 bm = ~p->bitmap;
154 m = find_next_bit(&bm, IDR_SIZE, n);
155 if (m == IDR_SIZE) {
156 /* no space available go back to previous layer. */
157 l++;
7aae6dd8 158 oid = id;
e15ae2dd 159 id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1;
7aae6dd8
TH
160
161 /* if already at the top layer, we need to grow */
d2e7276b 162 if (id >= 1 << (idp->layers * IDR_BITS)) {
1da177e4 163 *starting_id = id;
944ca05c 164 return IDR_NEED_TO_GROW;
1da177e4 165 }
d2e7276b
TH
166 p = pa[l];
167 BUG_ON(!p);
7aae6dd8
TH
168
169 /* If we need to go up one layer, continue the
170 * loop; otherwise, restart from the top.
171 */
172 sh = IDR_BITS * (l + 1);
173 if (oid >> sh == id >> sh)
174 continue;
175 else
176 goto restart;
1da177e4
LT
177 }
178 if (m != n) {
179 sh = IDR_BITS*l;
180 id = ((id >> sh) ^ n ^ m) << sh;
181 }
182 if ((id >= MAX_ID_BIT) || (id < 0))
944ca05c 183 return IDR_NOMORE_SPACE;
1da177e4
LT
184 if (l == 0)
185 break;
186 /*
187 * Create the layer below if it is missing.
188 */
189 if (!p->ary[m]) {
4ae53789
ND
190 new = get_from_free_list(idp);
191 if (!new)
1da177e4 192 return -1;
6ff2d39b 193 new->layer = l-1;
3219b3b7 194 rcu_assign_pointer(p->ary[m], new);
1da177e4
LT
195 p->count++;
196 }
197 pa[l--] = p;
198 p = p->ary[m];
199 }
e33ac8bd
TH
200
201 pa[l] = p;
202 return id;
1da177e4
LT
203}
204
e33ac8bd
TH
205static int idr_get_empty_slot(struct idr *idp, int starting_id,
206 struct idr_layer **pa)
1da177e4
LT
207{
208 struct idr_layer *p, *new;
209 int layers, v, id;
c259cc28 210 unsigned long flags;
e15ae2dd 211
1da177e4
LT
212 id = starting_id;
213build_up:
214 p = idp->top;
215 layers = idp->layers;
216 if (unlikely(!p)) {
4ae53789 217 if (!(p = get_from_free_list(idp)))
1da177e4 218 return -1;
6ff2d39b 219 p->layer = 0;
1da177e4
LT
220 layers = 1;
221 }
222 /*
223 * Add a new layer to the top of the tree if the requested
224 * id is larger than the currently allocated space.
225 */
589777ea 226 while ((layers < (MAX_LEVEL - 1)) && (id >= (1 << (layers*IDR_BITS)))) {
1da177e4 227 layers++;
711a49a0
MS
228 if (!p->count) {
229 /* special case: if the tree is currently empty,
230 * then we grow the tree by moving the top node
231 * upwards.
232 */
233 p->layer++;
1da177e4 234 continue;
711a49a0 235 }
4ae53789 236 if (!(new = get_from_free_list(idp))) {
1da177e4
LT
237 /*
238 * The allocation failed. If we built part of
239 * the structure tear it down.
240 */
c259cc28 241 spin_lock_irqsave(&idp->lock, flags);
1da177e4
LT
242 for (new = p; p && p != idp->top; new = p) {
243 p = p->ary[0];
244 new->ary[0] = NULL;
245 new->bitmap = new->count = 0;
4ae53789 246 __move_to_free_list(idp, new);
1da177e4 247 }
c259cc28 248 spin_unlock_irqrestore(&idp->lock, flags);
1da177e4
LT
249 return -1;
250 }
251 new->ary[0] = p;
252 new->count = 1;
6ff2d39b 253 new->layer = layers-1;
1da177e4
LT
254 if (p->bitmap == IDR_FULL)
255 __set_bit(0, &new->bitmap);
256 p = new;
257 }
3219b3b7 258 rcu_assign_pointer(idp->top, p);
1da177e4 259 idp->layers = layers;
e33ac8bd 260 v = sub_alloc(idp, &id, pa);
944ca05c 261 if (v == IDR_NEED_TO_GROW)
1da177e4
LT
262 goto build_up;
263 return(v);
264}
265
e33ac8bd
TH
266static int idr_get_new_above_int(struct idr *idp, void *ptr, int starting_id)
267{
268 struct idr_layer *pa[MAX_LEVEL];
269 int id;
270
271 id = idr_get_empty_slot(idp, starting_id, pa);
272 if (id >= 0) {
273 /*
274 * Successfully found an empty slot. Install the user
275 * pointer and mark the slot full.
276 */
3219b3b7
ND
277 rcu_assign_pointer(pa[0]->ary[id & IDR_MASK],
278 (struct idr_layer *)ptr);
e33ac8bd
TH
279 pa[0]->count++;
280 idr_mark_full(pa, id);
281 }
282
283 return id;
284}
285
1da177e4 286/**
7c657f2f 287 * idr_get_new_above - allocate new idr entry above or equal to a start id
1da177e4 288 * @idp: idr handle
94e2bd68 289 * @ptr: pointer you want associated with the id
ea24ea85 290 * @starting_id: id to start search at
1da177e4
LT
291 * @id: pointer to the allocated handle
292 *
293 * This is the allocate id function. It should be called with any
294 * required locks.
295 *
066a9be6 296 * If allocation from IDR's private freelist fails, idr_get_new_above() will
56083ab1 297 * return %-EAGAIN. The caller should retry the idr_pre_get() call to refill
066a9be6
NA
298 * IDR's preallocation and then retry the idr_get_new_above() call.
299 *
56083ab1 300 * If the idr is full idr_get_new_above() will return %-ENOSPC.
1da177e4 301 *
56083ab1 302 * @id returns a value in the range @starting_id ... %0x7fffffff
1da177e4
LT
303 */
304int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id)
305{
306 int rv;
e15ae2dd 307
1da177e4
LT
308 rv = idr_get_new_above_int(idp, ptr, starting_id);
309 /*
310 * This is a cheap hack until the IDR code can be fixed to
311 * return proper error values.
312 */
944ca05c
ND
313 if (rv < 0)
314 return _idr_rc_to_errno(rv);
1da177e4
LT
315 *id = rv;
316 return 0;
317}
318EXPORT_SYMBOL(idr_get_new_above);
319
320/**
321 * idr_get_new - allocate new idr entry
322 * @idp: idr handle
94e2bd68 323 * @ptr: pointer you want associated with the id
1da177e4
LT
324 * @id: pointer to the allocated handle
325 *
066a9be6 326 * If allocation from IDR's private freelist fails, idr_get_new_above() will
56083ab1 327 * return %-EAGAIN. The caller should retry the idr_pre_get() call to refill
066a9be6 328 * IDR's preallocation and then retry the idr_get_new_above() call.
1da177e4 329 *
56083ab1 330 * If the idr is full idr_get_new_above() will return %-ENOSPC.
1da177e4 331 *
56083ab1 332 * @id returns a value in the range %0 ... %0x7fffffff
1da177e4
LT
333 */
334int idr_get_new(struct idr *idp, void *ptr, int *id)
335{
336 int rv;
e15ae2dd 337
1da177e4
LT
338 rv = idr_get_new_above_int(idp, ptr, 0);
339 /*
340 * This is a cheap hack until the IDR code can be fixed to
341 * return proper error values.
342 */
944ca05c
ND
343 if (rv < 0)
344 return _idr_rc_to_errno(rv);
1da177e4
LT
345 *id = rv;
346 return 0;
347}
348EXPORT_SYMBOL(idr_get_new);
349
350static void idr_remove_warning(int id)
351{
f098ad65
ND
352 printk(KERN_WARNING
353 "idr_remove called for id=%d which is not allocated.\n", id);
1da177e4
LT
354 dump_stack();
355}
356
357static void sub_remove(struct idr *idp, int shift, int id)
358{
359 struct idr_layer *p = idp->top;
360 struct idr_layer **pa[MAX_LEVEL];
361 struct idr_layer ***paa = &pa[0];
cf481c20 362 struct idr_layer *to_free;
1da177e4
LT
363 int n;
364
365 *paa = NULL;
366 *++paa = &idp->top;
367
368 while ((shift > 0) && p) {
369 n = (id >> shift) & IDR_MASK;
370 __clear_bit(n, &p->bitmap);
371 *++paa = &p->ary[n];
372 p = p->ary[n];
373 shift -= IDR_BITS;
374 }
375 n = id & IDR_MASK;
376 if (likely(p != NULL && test_bit(n, &p->bitmap))){
377 __clear_bit(n, &p->bitmap);
cf481c20
ND
378 rcu_assign_pointer(p->ary[n], NULL);
379 to_free = NULL;
1da177e4 380 while(*paa && ! --((**paa)->count)){
cf481c20
ND
381 if (to_free)
382 free_layer(to_free);
383 to_free = **paa;
1da177e4
LT
384 **paa-- = NULL;
385 }
e15ae2dd 386 if (!*paa)
1da177e4 387 idp->layers = 0;
cf481c20
ND
388 if (to_free)
389 free_layer(to_free);
e15ae2dd 390 } else
1da177e4 391 idr_remove_warning(id);
1da177e4
LT
392}
393
394/**
56083ab1 395 * idr_remove - remove the given id and free its slot
72fd4a35
RD
396 * @idp: idr handle
397 * @id: unique key
1da177e4
LT
398 */
399void idr_remove(struct idr *idp, int id)
400{
401 struct idr_layer *p;
cf481c20 402 struct idr_layer *to_free;
1da177e4
LT
403
404 /* Mask off upper bits we don't use for the search. */
405 id &= MAX_ID_MASK;
406
407 sub_remove(idp, (idp->layers - 1) * IDR_BITS, id);
e15ae2dd 408 if (idp->top && idp->top->count == 1 && (idp->layers > 1) &&
cf481c20
ND
409 idp->top->ary[0]) {
410 /*
411 * Single child at leftmost slot: we can shrink the tree.
412 * This level is not needed anymore since when layers are
413 * inserted, they are inserted at the top of the existing
414 * tree.
415 */
416 to_free = idp->top;
1da177e4 417 p = idp->top->ary[0];
cf481c20 418 rcu_assign_pointer(idp->top, p);
1da177e4 419 --idp->layers;
cf481c20
ND
420 to_free->bitmap = to_free->count = 0;
421 free_layer(to_free);
1da177e4
LT
422 }
423 while (idp->id_free_cnt >= IDR_FREE_MAX) {
4ae53789 424 p = get_from_free_list(idp);
cf481c20
ND
425 /*
426 * Note: we don't call the rcu callback here, since the only
427 * layers that fall into the freelist are those that have been
428 * preallocated.
429 */
1da177e4 430 kmem_cache_free(idr_layer_cache, p);
1da177e4 431 }
af8e2a4c 432 return;
1da177e4
LT
433}
434EXPORT_SYMBOL(idr_remove);
435
23936cc0
KH
436/**
437 * idr_remove_all - remove all ids from the given idr tree
438 * @idp: idr handle
439 *
440 * idr_destroy() only frees up unused, cached idp_layers, but this
441 * function will remove all id mappings and leave all idp_layers
442 * unused.
443 *
56083ab1 444 * A typical clean-up sequence for objects stored in an idr tree will
23936cc0
KH
445 * use idr_for_each() to free all objects, if necessay, then
446 * idr_remove_all() to remove all ids, and idr_destroy() to free
447 * up the cached idr_layers.
448 */
449void idr_remove_all(struct idr *idp)
450{
6ace06dc 451 int n, id, max;
2dcb22b3 452 int bt_mask;
23936cc0
KH
453 struct idr_layer *p;
454 struct idr_layer *pa[MAX_LEVEL];
455 struct idr_layer **paa = &pa[0];
456
457 n = idp->layers * IDR_BITS;
458 p = idp->top;
1b23336a 459 rcu_assign_pointer(idp->top, NULL);
23936cc0
KH
460 max = 1 << n;
461
462 id = 0;
6ace06dc 463 while (id < max) {
23936cc0
KH
464 while (n > IDR_BITS && p) {
465 n -= IDR_BITS;
466 *paa++ = p;
467 p = p->ary[(id >> n) & IDR_MASK];
468 }
469
2dcb22b3 470 bt_mask = id;
23936cc0 471 id += 1 << n;
2dcb22b3
ID
472 /* Get the highest bit that the above add changed from 0->1. */
473 while (n < fls(id ^ bt_mask)) {
cf481c20
ND
474 if (p)
475 free_layer(p);
23936cc0
KH
476 n += IDR_BITS;
477 p = *--paa;
478 }
479 }
23936cc0
KH
480 idp->layers = 0;
481}
482EXPORT_SYMBOL(idr_remove_all);
483
8d3b3591
AM
484/**
485 * idr_destroy - release all cached layers within an idr tree
ea24ea85 486 * @idp: idr handle
8d3b3591
AM
487 */
488void idr_destroy(struct idr *idp)
489{
490 while (idp->id_free_cnt) {
4ae53789 491 struct idr_layer *p = get_from_free_list(idp);
8d3b3591
AM
492 kmem_cache_free(idr_layer_cache, p);
493 }
494}
495EXPORT_SYMBOL(idr_destroy);
496
1da177e4
LT
497/**
498 * idr_find - return pointer for given id
499 * @idp: idr handle
500 * @id: lookup key
501 *
502 * Return the pointer given the id it has been registered with. A %NULL
503 * return indicates that @id is not valid or you passed %NULL in
504 * idr_get_new().
505 *
f9c46d6e
ND
506 * This function can be called under rcu_read_lock(), given that the leaf
507 * pointers lifetimes are correctly managed.
1da177e4
LT
508 */
509void *idr_find(struct idr *idp, int id)
510{
511 int n;
512 struct idr_layer *p;
513
96be753a 514 p = rcu_dereference_raw(idp->top);
6ff2d39b
MS
515 if (!p)
516 return NULL;
517 n = (p->layer+1) * IDR_BITS;
1da177e4
LT
518
519 /* Mask off upper bits we don't use for the search. */
520 id &= MAX_ID_MASK;
521
522 if (id >= (1 << n))
523 return NULL;
6ff2d39b 524 BUG_ON(n == 0);
1da177e4
LT
525
526 while (n > 0 && p) {
527 n -= IDR_BITS;
6ff2d39b 528 BUG_ON(n != p->layer*IDR_BITS);
96be753a 529 p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]);
1da177e4
LT
530 }
531 return((void *)p);
532}
533EXPORT_SYMBOL(idr_find);
534
96d7fa42
KH
535/**
536 * idr_for_each - iterate through all stored pointers
537 * @idp: idr handle
538 * @fn: function to be called for each pointer
539 * @data: data passed back to callback function
540 *
541 * Iterate over the pointers registered with the given idr. The
542 * callback function will be called for each pointer currently
543 * registered, passing the id, the pointer and the data pointer passed
544 * to this function. It is not safe to modify the idr tree while in
545 * the callback, so functions such as idr_get_new and idr_remove are
546 * not allowed.
547 *
548 * We check the return of @fn each time. If it returns anything other
56083ab1 549 * than %0, we break out and return that value.
96d7fa42
KH
550 *
551 * The caller must serialize idr_for_each() vs idr_get_new() and idr_remove().
552 */
553int idr_for_each(struct idr *idp,
554 int (*fn)(int id, void *p, void *data), void *data)
555{
556 int n, id, max, error = 0;
557 struct idr_layer *p;
558 struct idr_layer *pa[MAX_LEVEL];
559 struct idr_layer **paa = &pa[0];
560
561 n = idp->layers * IDR_BITS;
96be753a 562 p = rcu_dereference_raw(idp->top);
96d7fa42
KH
563 max = 1 << n;
564
565 id = 0;
566 while (id < max) {
567 while (n > 0 && p) {
568 n -= IDR_BITS;
569 *paa++ = p;
96be753a 570 p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]);
96d7fa42
KH
571 }
572
573 if (p) {
574 error = fn(id, (void *)p, data);
575 if (error)
576 break;
577 }
578
579 id += 1 << n;
580 while (n < fls(id)) {
581 n += IDR_BITS;
582 p = *--paa;
583 }
584 }
585
586 return error;
587}
588EXPORT_SYMBOL(idr_for_each);
589
38460b48
KH
590/**
591 * idr_get_next - lookup next object of id to given id.
592 * @idp: idr handle
ea24ea85 593 * @nextidp: pointer to lookup key
38460b48
KH
594 *
595 * Returns pointer to registered object with id, which is next number to
1458ce16
NA
596 * given id. After being looked up, *@nextidp will be updated for the next
597 * iteration.
38460b48
KH
598 */
599
600void *idr_get_next(struct idr *idp, int *nextidp)
601{
602 struct idr_layer *p, *pa[MAX_LEVEL];
603 struct idr_layer **paa = &pa[0];
604 int id = *nextidp;
605 int n, max;
606
607 /* find first ent */
608 n = idp->layers * IDR_BITS;
609 max = 1 << n;
94bfa3b6 610 p = rcu_dereference_raw(idp->top);
38460b48
KH
611 if (!p)
612 return NULL;
613
614 while (id < max) {
615 while (n > 0 && p) {
616 n -= IDR_BITS;
617 *paa++ = p;
94bfa3b6 618 p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]);
38460b48
KH
619 }
620
621 if (p) {
622 *nextidp = id;
623 return p;
624 }
625
626 id += 1 << n;
627 while (n < fls(id)) {
628 n += IDR_BITS;
629 p = *--paa;
630 }
631 }
632 return NULL;
633}
4d1ee80f 634EXPORT_SYMBOL(idr_get_next);
38460b48
KH
635
636
5806f07c
JM
637/**
638 * idr_replace - replace pointer for given id
639 * @idp: idr handle
640 * @ptr: pointer you want associated with the id
641 * @id: lookup key
642 *
643 * Replace the pointer registered with an id and return the old value.
56083ab1
RD
644 * A %-ENOENT return indicates that @id was not found.
645 * A %-EINVAL return indicates that @id was not within valid constraints.
5806f07c 646 *
cf481c20 647 * The caller must serialize with writers.
5806f07c
JM
648 */
649void *idr_replace(struct idr *idp, void *ptr, int id)
650{
651 int n;
652 struct idr_layer *p, *old_p;
653
5806f07c 654 p = idp->top;
6ff2d39b
MS
655 if (!p)
656 return ERR_PTR(-EINVAL);
657
658 n = (p->layer+1) * IDR_BITS;
5806f07c
JM
659
660 id &= MAX_ID_MASK;
661
662 if (id >= (1 << n))
663 return ERR_PTR(-EINVAL);
664
665 n -= IDR_BITS;
666 while ((n > 0) && p) {
667 p = p->ary[(id >> n) & IDR_MASK];
668 n -= IDR_BITS;
669 }
670
671 n = id & IDR_MASK;
672 if (unlikely(p == NULL || !test_bit(n, &p->bitmap)))
673 return ERR_PTR(-ENOENT);
674
675 old_p = p->ary[n];
cf481c20 676 rcu_assign_pointer(p->ary[n], ptr);
5806f07c
JM
677
678 return old_p;
679}
680EXPORT_SYMBOL(idr_replace);
681
199f0ca5 682void __init idr_init_cache(void)
1da177e4 683{
199f0ca5 684 idr_layer_cache = kmem_cache_create("idr_layer_cache",
5b019e99 685 sizeof(struct idr_layer), 0, SLAB_PANIC, NULL);
1da177e4
LT
686}
687
688/**
689 * idr_init - initialize idr handle
690 * @idp: idr handle
691 *
692 * This function is use to set up the handle (@idp) that you will pass
693 * to the rest of the functions.
694 */
695void idr_init(struct idr *idp)
696{
1da177e4
LT
697 memset(idp, 0, sizeof(struct idr));
698 spin_lock_init(&idp->lock);
699}
700EXPORT_SYMBOL(idr_init);
72dba584
TH
701
702
56083ab1
RD
703/**
704 * DOC: IDA description
72dba584
TH
705 * IDA - IDR based ID allocator
706 *
56083ab1 707 * This is id allocator without id -> pointer translation. Memory
72dba584
TH
708 * usage is much lower than full blown idr because each id only
709 * occupies a bit. ida uses a custom leaf node which contains
710 * IDA_BITMAP_BITS slots.
711 *
712 * 2007-04-25 written by Tejun Heo <htejun@gmail.com>
713 */
714
715static void free_bitmap(struct ida *ida, struct ida_bitmap *bitmap)
716{
717 unsigned long flags;
718
719 if (!ida->free_bitmap) {
720 spin_lock_irqsave(&ida->idr.lock, flags);
721 if (!ida->free_bitmap) {
722 ida->free_bitmap = bitmap;
723 bitmap = NULL;
724 }
725 spin_unlock_irqrestore(&ida->idr.lock, flags);
726 }
727
728 kfree(bitmap);
729}
730
731/**
732 * ida_pre_get - reserve resources for ida allocation
733 * @ida: ida handle
734 * @gfp_mask: memory allocation flag
735 *
736 * This function should be called prior to locking and calling the
737 * following function. It preallocates enough memory to satisfy the
738 * worst possible allocation.
739 *
56083ab1
RD
740 * If the system is REALLY out of memory this function returns %0,
741 * otherwise %1.
72dba584
TH
742 */
743int ida_pre_get(struct ida *ida, gfp_t gfp_mask)
744{
745 /* allocate idr_layers */
746 if (!idr_pre_get(&ida->idr, gfp_mask))
747 return 0;
748
749 /* allocate free_bitmap */
750 if (!ida->free_bitmap) {
751 struct ida_bitmap *bitmap;
752
753 bitmap = kmalloc(sizeof(struct ida_bitmap), gfp_mask);
754 if (!bitmap)
755 return 0;
756
757 free_bitmap(ida, bitmap);
758 }
759
760 return 1;
761}
762EXPORT_SYMBOL(ida_pre_get);
763
764/**
765 * ida_get_new_above - allocate new ID above or equal to a start id
766 * @ida: ida handle
ea24ea85 767 * @starting_id: id to start search at
72dba584
TH
768 * @p_id: pointer to the allocated handle
769 *
770 * Allocate new ID above or equal to @ida. It should be called with
771 * any required locks.
772 *
56083ab1 773 * If memory is required, it will return %-EAGAIN, you should unlock
72dba584 774 * and go back to the ida_pre_get() call. If the ida is full, it will
56083ab1 775 * return %-ENOSPC.
72dba584 776 *
56083ab1 777 * @p_id returns a value in the range @starting_id ... %0x7fffffff.
72dba584
TH
778 */
779int ida_get_new_above(struct ida *ida, int starting_id, int *p_id)
780{
781 struct idr_layer *pa[MAX_LEVEL];
782 struct ida_bitmap *bitmap;
783 unsigned long flags;
784 int idr_id = starting_id / IDA_BITMAP_BITS;
785 int offset = starting_id % IDA_BITMAP_BITS;
786 int t, id;
787
788 restart:
789 /* get vacant slot */
790 t = idr_get_empty_slot(&ida->idr, idr_id, pa);
944ca05c
ND
791 if (t < 0)
792 return _idr_rc_to_errno(t);
72dba584
TH
793
794 if (t * IDA_BITMAP_BITS >= MAX_ID_BIT)
795 return -ENOSPC;
796
797 if (t != idr_id)
798 offset = 0;
799 idr_id = t;
800
801 /* if bitmap isn't there, create a new one */
802 bitmap = (void *)pa[0]->ary[idr_id & IDR_MASK];
803 if (!bitmap) {
804 spin_lock_irqsave(&ida->idr.lock, flags);
805 bitmap = ida->free_bitmap;
806 ida->free_bitmap = NULL;
807 spin_unlock_irqrestore(&ida->idr.lock, flags);
808
809 if (!bitmap)
810 return -EAGAIN;
811
812 memset(bitmap, 0, sizeof(struct ida_bitmap));
3219b3b7
ND
813 rcu_assign_pointer(pa[0]->ary[idr_id & IDR_MASK],
814 (void *)bitmap);
72dba584
TH
815 pa[0]->count++;
816 }
817
818 /* lookup for empty slot */
819 t = find_next_zero_bit(bitmap->bitmap, IDA_BITMAP_BITS, offset);
820 if (t == IDA_BITMAP_BITS) {
821 /* no empty slot after offset, continue to the next chunk */
822 idr_id++;
823 offset = 0;
824 goto restart;
825 }
826
827 id = idr_id * IDA_BITMAP_BITS + t;
828 if (id >= MAX_ID_BIT)
829 return -ENOSPC;
830
831 __set_bit(t, bitmap->bitmap);
832 if (++bitmap->nr_busy == IDA_BITMAP_BITS)
833 idr_mark_full(pa, idr_id);
834
835 *p_id = id;
836
837 /* Each leaf node can handle nearly a thousand slots and the
838 * whole idea of ida is to have small memory foot print.
839 * Throw away extra resources one by one after each successful
840 * allocation.
841 */
842 if (ida->idr.id_free_cnt || ida->free_bitmap) {
4ae53789 843 struct idr_layer *p = get_from_free_list(&ida->idr);
72dba584
TH
844 if (p)
845 kmem_cache_free(idr_layer_cache, p);
846 }
847
848 return 0;
849}
850EXPORT_SYMBOL(ida_get_new_above);
851
852/**
853 * ida_get_new - allocate new ID
854 * @ida: idr handle
855 * @p_id: pointer to the allocated handle
856 *
857 * Allocate new ID. It should be called with any required locks.
858 *
56083ab1 859 * If memory is required, it will return %-EAGAIN, you should unlock
72dba584 860 * and go back to the idr_pre_get() call. If the idr is full, it will
56083ab1 861 * return %-ENOSPC.
72dba584 862 *
56083ab1 863 * @id returns a value in the range %0 ... %0x7fffffff.
72dba584
TH
864 */
865int ida_get_new(struct ida *ida, int *p_id)
866{
867 return ida_get_new_above(ida, 0, p_id);
868}
869EXPORT_SYMBOL(ida_get_new);
870
871/**
872 * ida_remove - remove the given ID
873 * @ida: ida handle
874 * @id: ID to free
875 */
876void ida_remove(struct ida *ida, int id)
877{
878 struct idr_layer *p = ida->idr.top;
879 int shift = (ida->idr.layers - 1) * IDR_BITS;
880 int idr_id = id / IDA_BITMAP_BITS;
881 int offset = id % IDA_BITMAP_BITS;
882 int n;
883 struct ida_bitmap *bitmap;
884
885 /* clear full bits while looking up the leaf idr_layer */
886 while ((shift > 0) && p) {
887 n = (idr_id >> shift) & IDR_MASK;
888 __clear_bit(n, &p->bitmap);
889 p = p->ary[n];
890 shift -= IDR_BITS;
891 }
892
893 if (p == NULL)
894 goto err;
895
896 n = idr_id & IDR_MASK;
897 __clear_bit(n, &p->bitmap);
898
899 bitmap = (void *)p->ary[n];
900 if (!test_bit(offset, bitmap->bitmap))
901 goto err;
902
903 /* update bitmap and remove it if empty */
904 __clear_bit(offset, bitmap->bitmap);
905 if (--bitmap->nr_busy == 0) {
906 __set_bit(n, &p->bitmap); /* to please idr_remove() */
907 idr_remove(&ida->idr, idr_id);
908 free_bitmap(ida, bitmap);
909 }
910
911 return;
912
913 err:
914 printk(KERN_WARNING
915 "ida_remove called for id=%d which is not allocated.\n", id);
916}
917EXPORT_SYMBOL(ida_remove);
918
919/**
920 * ida_destroy - release all cached layers within an ida tree
ea24ea85 921 * @ida: ida handle
72dba584
TH
922 */
923void ida_destroy(struct ida *ida)
924{
925 idr_destroy(&ida->idr);
926 kfree(ida->free_bitmap);
927}
928EXPORT_SYMBOL(ida_destroy);
929
88eca020
RR
930/**
931 * ida_simple_get - get a new id.
932 * @ida: the (initialized) ida.
933 * @start: the minimum id (inclusive, < 0x8000000)
934 * @end: the maximum id (exclusive, < 0x8000000 or 0)
935 * @gfp_mask: memory allocation flags
936 *
937 * Allocates an id in the range start <= id < end, or returns -ENOSPC.
938 * On memory allocation failure, returns -ENOMEM.
939 *
940 * Use ida_simple_remove() to get rid of an id.
941 */
942int ida_simple_get(struct ida *ida, unsigned int start, unsigned int end,
943 gfp_t gfp_mask)
944{
945 int ret, id;
946 unsigned int max;
947
948 BUG_ON((int)start < 0);
949 BUG_ON((int)end < 0);
950
951 if (end == 0)
952 max = 0x80000000;
953 else {
954 BUG_ON(end < start);
955 max = end - 1;
956 }
957
958again:
959 if (!ida_pre_get(ida, gfp_mask))
960 return -ENOMEM;
961
962 spin_lock(&simple_ida_lock);
963 ret = ida_get_new_above(ida, start, &id);
964 if (!ret) {
965 if (id > max) {
966 ida_remove(ida, id);
967 ret = -ENOSPC;
968 } else {
969 ret = id;
970 }
971 }
972 spin_unlock(&simple_ida_lock);
973
974 if (unlikely(ret == -EAGAIN))
975 goto again;
976
977 return ret;
978}
979EXPORT_SYMBOL(ida_simple_get);
980
981/**
982 * ida_simple_remove - remove an allocated id.
983 * @ida: the (initialized) ida.
984 * @id: the id returned by ida_simple_get.
985 */
986void ida_simple_remove(struct ida *ida, unsigned int id)
987{
988 BUG_ON((int)id < 0);
989 spin_lock(&simple_ida_lock);
990 ida_remove(ida, id);
991 spin_unlock(&simple_ida_lock);
992}
993EXPORT_SYMBOL(ida_simple_remove);
994
72dba584
TH
995/**
996 * ida_init - initialize ida handle
997 * @ida: ida handle
998 *
999 * This function is use to set up the handle (@ida) that you will pass
1000 * to the rest of the functions.
1001 */
1002void ida_init(struct ida *ida)
1003{
1004 memset(ida, 0, sizeof(struct ida));
1005 idr_init(&ida->idr);
1006
1007}
1008EXPORT_SYMBOL(ida_init);