]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - lib/idr.c
idr: relocate idr_for_each_entry() and reorganize id[r|a]_get_new()
[mirror_ubuntu-bionic-kernel.git] / lib / idr.c
1 /*
2 * 2002-10-18 written by Jim Houston jim.houston@ccur.com
3 * Copyright (C) 2002 by Concurrent Computer Corporation
4 * Distributed under the GNU GPL license version 2.
5 *
6 * Modified by George Anzinger to reuse immediately and to use
7 * find bit instructions. Also removed _irq on spinlocks.
8 *
9 * Modified by Nadia Derbey to make it RCU safe.
10 *
11 * Small id to pointer translation service.
12 *
13 * It uses a radix tree like structure as a sparse array indexed
14 * by the id to obtain the pointer. The bitmap makes allocating
15 * a new id quick.
16 *
17 * You call it to allocate an id (an int) an associate with that id a
18 * pointer or what ever, we treat it as a (void *). You can pass this
19 * id to a user for him to pass back at a later time. You then pass
20 * that id to this code and it returns your pointer.
21
22 * You can release ids at any time. When all ids are released, most of
23 * the memory is returned (we keep MAX_IDR_FREE) in a local pool so we
24 * don't need to go to the memory "store" during an id allocate, just
25 * so you don't need to be too concerned about locking and conflicts
26 * with the slab allocator.
27 */
28
29 #ifndef TEST // to test in user space...
30 #include <linux/slab.h>
31 #include <linux/init.h>
32 #include <linux/export.h>
33 #endif
34 #include <linux/err.h>
35 #include <linux/string.h>
36 #include <linux/idr.h>
37 #include <linux/spinlock.h>
38
39 static struct kmem_cache *idr_layer_cache;
40 static DEFINE_SPINLOCK(simple_ida_lock);
41
42 static struct idr_layer *get_from_free_list(struct idr *idp)
43 {
44 struct idr_layer *p;
45 unsigned long flags;
46
47 spin_lock_irqsave(&idp->lock, flags);
48 if ((p = idp->id_free)) {
49 idp->id_free = p->ary[0];
50 idp->id_free_cnt--;
51 p->ary[0] = NULL;
52 }
53 spin_unlock_irqrestore(&idp->lock, flags);
54 return(p);
55 }
56
57 static void idr_layer_rcu_free(struct rcu_head *head)
58 {
59 struct idr_layer *layer;
60
61 layer = container_of(head, struct idr_layer, rcu_head);
62 kmem_cache_free(idr_layer_cache, layer);
63 }
64
65 static inline void free_layer(struct idr_layer *p)
66 {
67 call_rcu(&p->rcu_head, idr_layer_rcu_free);
68 }
69
70 /* only called when idp->lock is held */
71 static void __move_to_free_list(struct idr *idp, struct idr_layer *p)
72 {
73 p->ary[0] = idp->id_free;
74 idp->id_free = p;
75 idp->id_free_cnt++;
76 }
77
78 static void move_to_free_list(struct idr *idp, struct idr_layer *p)
79 {
80 unsigned long flags;
81
82 /*
83 * Depends on the return element being zeroed.
84 */
85 spin_lock_irqsave(&idp->lock, flags);
86 __move_to_free_list(idp, p);
87 spin_unlock_irqrestore(&idp->lock, flags);
88 }
89
90 static void idr_mark_full(struct idr_layer **pa, int id)
91 {
92 struct idr_layer *p = pa[0];
93 int l = 0;
94
95 __set_bit(id & IDR_MASK, &p->bitmap);
96 /*
97 * If this layer is full mark the bit in the layer above to
98 * show that this part of the radix tree is full. This may
99 * complete the layer above and require walking up the radix
100 * tree.
101 */
102 while (p->bitmap == IDR_FULL) {
103 if (!(p = pa[++l]))
104 break;
105 id = id >> IDR_BITS;
106 __set_bit((id & IDR_MASK), &p->bitmap);
107 }
108 }
109
110 /**
111 * idr_pre_get - reserve resources for idr allocation
112 * @idp: idr handle
113 * @gfp_mask: memory allocation flags
114 *
115 * This function should be called prior to calling the idr_get_new* functions.
116 * It preallocates enough memory to satisfy the worst possible allocation. The
117 * caller should pass in GFP_KERNEL if possible. This of course requires that
118 * no spinning locks be held.
119 *
120 * If the system is REALLY out of memory this function returns %0,
121 * otherwise %1.
122 */
123 int idr_pre_get(struct idr *idp, gfp_t gfp_mask)
124 {
125 while (idp->id_free_cnt < MAX_IDR_FREE) {
126 struct idr_layer *new;
127 new = kmem_cache_zalloc(idr_layer_cache, gfp_mask);
128 if (new == NULL)
129 return (0);
130 move_to_free_list(idp, new);
131 }
132 return 1;
133 }
134 EXPORT_SYMBOL(idr_pre_get);
135
136 static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa)
137 {
138 int n, m, sh;
139 struct idr_layer *p, *new;
140 int l, id, oid;
141 unsigned long bm;
142
143 id = *starting_id;
144 restart:
145 p = idp->top;
146 l = idp->layers;
147 pa[l--] = NULL;
148 while (1) {
149 /*
150 * We run around this while until we reach the leaf node...
151 */
152 n = (id >> (IDR_BITS*l)) & IDR_MASK;
153 bm = ~p->bitmap;
154 m = find_next_bit(&bm, IDR_SIZE, n);
155 if (m == IDR_SIZE) {
156 /* no space available go back to previous layer. */
157 l++;
158 oid = id;
159 id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1;
160
161 /* if already at the top layer, we need to grow */
162 if (id >= 1 << (idp->layers * IDR_BITS)) {
163 *starting_id = id;
164 return IDR_NEED_TO_GROW;
165 }
166 p = pa[l];
167 BUG_ON(!p);
168
169 /* If we need to go up one layer, continue the
170 * loop; otherwise, restart from the top.
171 */
172 sh = IDR_BITS * (l + 1);
173 if (oid >> sh == id >> sh)
174 continue;
175 else
176 goto restart;
177 }
178 if (m != n) {
179 sh = IDR_BITS*l;
180 id = ((id >> sh) ^ n ^ m) << sh;
181 }
182 if ((id >= MAX_IDR_BIT) || (id < 0))
183 return IDR_NOMORE_SPACE;
184 if (l == 0)
185 break;
186 /*
187 * Create the layer below if it is missing.
188 */
189 if (!p->ary[m]) {
190 new = get_from_free_list(idp);
191 if (!new)
192 return -1;
193 new->layer = l-1;
194 rcu_assign_pointer(p->ary[m], new);
195 p->count++;
196 }
197 pa[l--] = p;
198 p = p->ary[m];
199 }
200
201 pa[l] = p;
202 return id;
203 }
204
205 static int idr_get_empty_slot(struct idr *idp, int starting_id,
206 struct idr_layer **pa)
207 {
208 struct idr_layer *p, *new;
209 int layers, v, id;
210 unsigned long flags;
211
212 id = starting_id;
213 build_up:
214 p = idp->top;
215 layers = idp->layers;
216 if (unlikely(!p)) {
217 if (!(p = get_from_free_list(idp)))
218 return -1;
219 p->layer = 0;
220 layers = 1;
221 }
222 /*
223 * Add a new layer to the top of the tree if the requested
224 * id is larger than the currently allocated space.
225 */
226 while ((layers < (MAX_IDR_LEVEL - 1)) && (id >= (1 << (layers*IDR_BITS)))) {
227 layers++;
228 if (!p->count) {
229 /* special case: if the tree is currently empty,
230 * then we grow the tree by moving the top node
231 * upwards.
232 */
233 p->layer++;
234 continue;
235 }
236 if (!(new = get_from_free_list(idp))) {
237 /*
238 * The allocation failed. If we built part of
239 * the structure tear it down.
240 */
241 spin_lock_irqsave(&idp->lock, flags);
242 for (new = p; p && p != idp->top; new = p) {
243 p = p->ary[0];
244 new->ary[0] = NULL;
245 new->bitmap = new->count = 0;
246 __move_to_free_list(idp, new);
247 }
248 spin_unlock_irqrestore(&idp->lock, flags);
249 return -1;
250 }
251 new->ary[0] = p;
252 new->count = 1;
253 new->layer = layers-1;
254 if (p->bitmap == IDR_FULL)
255 __set_bit(0, &new->bitmap);
256 p = new;
257 }
258 rcu_assign_pointer(idp->top, p);
259 idp->layers = layers;
260 v = sub_alloc(idp, &id, pa);
261 if (v == IDR_NEED_TO_GROW)
262 goto build_up;
263 return(v);
264 }
265
266 static int idr_get_new_above_int(struct idr *idp, void *ptr, int starting_id)
267 {
268 struct idr_layer *pa[MAX_IDR_LEVEL];
269 int id;
270
271 id = idr_get_empty_slot(idp, starting_id, pa);
272 if (id >= 0) {
273 /*
274 * Successfully found an empty slot. Install the user
275 * pointer and mark the slot full.
276 */
277 rcu_assign_pointer(pa[0]->ary[id & IDR_MASK],
278 (struct idr_layer *)ptr);
279 pa[0]->count++;
280 idr_mark_full(pa, id);
281 }
282
283 return id;
284 }
285
286 /**
287 * idr_get_new_above - allocate new idr entry above or equal to a start id
288 * @idp: idr handle
289 * @ptr: pointer you want associated with the id
290 * @starting_id: id to start search at
291 * @id: pointer to the allocated handle
292 *
293 * This is the allocate id function. It should be called with any
294 * required locks.
295 *
296 * If allocation from IDR's private freelist fails, idr_get_new_above() will
297 * return %-EAGAIN. The caller should retry the idr_pre_get() call to refill
298 * IDR's preallocation and then retry the idr_get_new_above() call.
299 *
300 * If the idr is full idr_get_new_above() will return %-ENOSPC.
301 *
302 * @id returns a value in the range @starting_id ... %0x7fffffff
303 */
304 int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id)
305 {
306 int rv;
307
308 rv = idr_get_new_above_int(idp, ptr, starting_id);
309 /*
310 * This is a cheap hack until the IDR code can be fixed to
311 * return proper error values.
312 */
313 if (rv < 0)
314 return _idr_rc_to_errno(rv);
315 *id = rv;
316 return 0;
317 }
318 EXPORT_SYMBOL(idr_get_new_above);
319
320 static void idr_remove_warning(int id)
321 {
322 printk(KERN_WARNING
323 "idr_remove called for id=%d which is not allocated.\n", id);
324 dump_stack();
325 }
326
327 static void sub_remove(struct idr *idp, int shift, int id)
328 {
329 struct idr_layer *p = idp->top;
330 struct idr_layer **pa[MAX_IDR_LEVEL];
331 struct idr_layer ***paa = &pa[0];
332 struct idr_layer *to_free;
333 int n;
334
335 *paa = NULL;
336 *++paa = &idp->top;
337
338 while ((shift > 0) && p) {
339 n = (id >> shift) & IDR_MASK;
340 __clear_bit(n, &p->bitmap);
341 *++paa = &p->ary[n];
342 p = p->ary[n];
343 shift -= IDR_BITS;
344 }
345 n = id & IDR_MASK;
346 if (likely(p != NULL && test_bit(n, &p->bitmap))){
347 __clear_bit(n, &p->bitmap);
348 rcu_assign_pointer(p->ary[n], NULL);
349 to_free = NULL;
350 while(*paa && ! --((**paa)->count)){
351 if (to_free)
352 free_layer(to_free);
353 to_free = **paa;
354 **paa-- = NULL;
355 }
356 if (!*paa)
357 idp->layers = 0;
358 if (to_free)
359 free_layer(to_free);
360 } else
361 idr_remove_warning(id);
362 }
363
364 /**
365 * idr_remove - remove the given id and free its slot
366 * @idp: idr handle
367 * @id: unique key
368 */
369 void idr_remove(struct idr *idp, int id)
370 {
371 struct idr_layer *p;
372 struct idr_layer *to_free;
373
374 /* Mask off upper bits we don't use for the search. */
375 id &= MAX_IDR_MASK;
376
377 sub_remove(idp, (idp->layers - 1) * IDR_BITS, id);
378 if (idp->top && idp->top->count == 1 && (idp->layers > 1) &&
379 idp->top->ary[0]) {
380 /*
381 * Single child at leftmost slot: we can shrink the tree.
382 * This level is not needed anymore since when layers are
383 * inserted, they are inserted at the top of the existing
384 * tree.
385 */
386 to_free = idp->top;
387 p = idp->top->ary[0];
388 rcu_assign_pointer(idp->top, p);
389 --idp->layers;
390 to_free->bitmap = to_free->count = 0;
391 free_layer(to_free);
392 }
393 while (idp->id_free_cnt >= MAX_IDR_FREE) {
394 p = get_from_free_list(idp);
395 /*
396 * Note: we don't call the rcu callback here, since the only
397 * layers that fall into the freelist are those that have been
398 * preallocated.
399 */
400 kmem_cache_free(idr_layer_cache, p);
401 }
402 return;
403 }
404 EXPORT_SYMBOL(idr_remove);
405
406 void __idr_remove_all(struct idr *idp)
407 {
408 int n, id, max;
409 int bt_mask;
410 struct idr_layer *p;
411 struct idr_layer *pa[MAX_IDR_LEVEL];
412 struct idr_layer **paa = &pa[0];
413
414 n = idp->layers * IDR_BITS;
415 p = idp->top;
416 rcu_assign_pointer(idp->top, NULL);
417 max = 1 << n;
418
419 id = 0;
420 while (id < max) {
421 while (n > IDR_BITS && p) {
422 n -= IDR_BITS;
423 *paa++ = p;
424 p = p->ary[(id >> n) & IDR_MASK];
425 }
426
427 bt_mask = id;
428 id += 1 << n;
429 /* Get the highest bit that the above add changed from 0->1. */
430 while (n < fls(id ^ bt_mask)) {
431 if (p)
432 free_layer(p);
433 n += IDR_BITS;
434 p = *--paa;
435 }
436 }
437 idp->layers = 0;
438 }
439 EXPORT_SYMBOL(__idr_remove_all);
440
441 /**
442 * idr_destroy - release all cached layers within an idr tree
443 * @idp: idr handle
444 *
445 * Free all id mappings and all idp_layers. After this function, @idp is
446 * completely unused and can be freed / recycled. The caller is
447 * responsible for ensuring that no one else accesses @idp during or after
448 * idr_destroy().
449 *
450 * A typical clean-up sequence for objects stored in an idr tree will use
451 * idr_for_each() to free all objects, if necessay, then idr_destroy() to
452 * free up the id mappings and cached idr_layers.
453 */
454 void idr_destroy(struct idr *idp)
455 {
456 __idr_remove_all(idp);
457
458 while (idp->id_free_cnt) {
459 struct idr_layer *p = get_from_free_list(idp);
460 kmem_cache_free(idr_layer_cache, p);
461 }
462 }
463 EXPORT_SYMBOL(idr_destroy);
464
465 /**
466 * idr_find - return pointer for given id
467 * @idp: idr handle
468 * @id: lookup key
469 *
470 * Return the pointer given the id it has been registered with. A %NULL
471 * return indicates that @id is not valid or you passed %NULL in
472 * idr_get_new().
473 *
474 * This function can be called under rcu_read_lock(), given that the leaf
475 * pointers lifetimes are correctly managed.
476 */
477 void *idr_find(struct idr *idp, int id)
478 {
479 int n;
480 struct idr_layer *p;
481
482 p = rcu_dereference_raw(idp->top);
483 if (!p)
484 return NULL;
485 n = (p->layer+1) * IDR_BITS;
486
487 /* Mask off upper bits we don't use for the search. */
488 id &= MAX_IDR_MASK;
489
490 if (id >= (1 << n))
491 return NULL;
492 BUG_ON(n == 0);
493
494 while (n > 0 && p) {
495 n -= IDR_BITS;
496 BUG_ON(n != p->layer*IDR_BITS);
497 p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]);
498 }
499 return((void *)p);
500 }
501 EXPORT_SYMBOL(idr_find);
502
503 /**
504 * idr_for_each - iterate through all stored pointers
505 * @idp: idr handle
506 * @fn: function to be called for each pointer
507 * @data: data passed back to callback function
508 *
509 * Iterate over the pointers registered with the given idr. The
510 * callback function will be called for each pointer currently
511 * registered, passing the id, the pointer and the data pointer passed
512 * to this function. It is not safe to modify the idr tree while in
513 * the callback, so functions such as idr_get_new and idr_remove are
514 * not allowed.
515 *
516 * We check the return of @fn each time. If it returns anything other
517 * than %0, we break out and return that value.
518 *
519 * The caller must serialize idr_for_each() vs idr_get_new() and idr_remove().
520 */
521 int idr_for_each(struct idr *idp,
522 int (*fn)(int id, void *p, void *data), void *data)
523 {
524 int n, id, max, error = 0;
525 struct idr_layer *p;
526 struct idr_layer *pa[MAX_IDR_LEVEL];
527 struct idr_layer **paa = &pa[0];
528
529 n = idp->layers * IDR_BITS;
530 p = rcu_dereference_raw(idp->top);
531 max = 1 << n;
532
533 id = 0;
534 while (id < max) {
535 while (n > 0 && p) {
536 n -= IDR_BITS;
537 *paa++ = p;
538 p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]);
539 }
540
541 if (p) {
542 error = fn(id, (void *)p, data);
543 if (error)
544 break;
545 }
546
547 id += 1 << n;
548 while (n < fls(id)) {
549 n += IDR_BITS;
550 p = *--paa;
551 }
552 }
553
554 return error;
555 }
556 EXPORT_SYMBOL(idr_for_each);
557
558 /**
559 * idr_get_next - lookup next object of id to given id.
560 * @idp: idr handle
561 * @nextidp: pointer to lookup key
562 *
563 * Returns pointer to registered object with id, which is next number to
564 * given id. After being looked up, *@nextidp will be updated for the next
565 * iteration.
566 *
567 * This function can be called under rcu_read_lock(), given that the leaf
568 * pointers lifetimes are correctly managed.
569 */
570 void *idr_get_next(struct idr *idp, int *nextidp)
571 {
572 struct idr_layer *p, *pa[MAX_IDR_LEVEL];
573 struct idr_layer **paa = &pa[0];
574 int id = *nextidp;
575 int n, max;
576
577 /* find first ent */
578 p = rcu_dereference_raw(idp->top);
579 if (!p)
580 return NULL;
581 n = (p->layer + 1) * IDR_BITS;
582 max = 1 << n;
583
584 while (id < max) {
585 while (n > 0 && p) {
586 n -= IDR_BITS;
587 *paa++ = p;
588 p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]);
589 }
590
591 if (p) {
592 *nextidp = id;
593 return p;
594 }
595
596 /*
597 * Proceed to the next layer at the current level. Unlike
598 * idr_for_each(), @id isn't guaranteed to be aligned to
599 * layer boundary at this point and adding 1 << n may
600 * incorrectly skip IDs. Make sure we jump to the
601 * beginning of the next layer using round_up().
602 */
603 id = round_up(id + 1, 1 << n);
604 while (n < fls(id)) {
605 n += IDR_BITS;
606 p = *--paa;
607 }
608 }
609 return NULL;
610 }
611 EXPORT_SYMBOL(idr_get_next);
612
613
614 /**
615 * idr_replace - replace pointer for given id
616 * @idp: idr handle
617 * @ptr: pointer you want associated with the id
618 * @id: lookup key
619 *
620 * Replace the pointer registered with an id and return the old value.
621 * A %-ENOENT return indicates that @id was not found.
622 * A %-EINVAL return indicates that @id was not within valid constraints.
623 *
624 * The caller must serialize with writers.
625 */
626 void *idr_replace(struct idr *idp, void *ptr, int id)
627 {
628 int n;
629 struct idr_layer *p, *old_p;
630
631 p = idp->top;
632 if (!p)
633 return ERR_PTR(-EINVAL);
634
635 n = (p->layer+1) * IDR_BITS;
636
637 id &= MAX_IDR_MASK;
638
639 if (id >= (1 << n))
640 return ERR_PTR(-EINVAL);
641
642 n -= IDR_BITS;
643 while ((n > 0) && p) {
644 p = p->ary[(id >> n) & IDR_MASK];
645 n -= IDR_BITS;
646 }
647
648 n = id & IDR_MASK;
649 if (unlikely(p == NULL || !test_bit(n, &p->bitmap)))
650 return ERR_PTR(-ENOENT);
651
652 old_p = p->ary[n];
653 rcu_assign_pointer(p->ary[n], ptr);
654
655 return old_p;
656 }
657 EXPORT_SYMBOL(idr_replace);
658
659 void __init idr_init_cache(void)
660 {
661 idr_layer_cache = kmem_cache_create("idr_layer_cache",
662 sizeof(struct idr_layer), 0, SLAB_PANIC, NULL);
663 }
664
665 /**
666 * idr_init - initialize idr handle
667 * @idp: idr handle
668 *
669 * This function is use to set up the handle (@idp) that you will pass
670 * to the rest of the functions.
671 */
672 void idr_init(struct idr *idp)
673 {
674 memset(idp, 0, sizeof(struct idr));
675 spin_lock_init(&idp->lock);
676 }
677 EXPORT_SYMBOL(idr_init);
678
679
680 /**
681 * DOC: IDA description
682 * IDA - IDR based ID allocator
683 *
684 * This is id allocator without id -> pointer translation. Memory
685 * usage is much lower than full blown idr because each id only
686 * occupies a bit. ida uses a custom leaf node which contains
687 * IDA_BITMAP_BITS slots.
688 *
689 * 2007-04-25 written by Tejun Heo <htejun@gmail.com>
690 */
691
692 static void free_bitmap(struct ida *ida, struct ida_bitmap *bitmap)
693 {
694 unsigned long flags;
695
696 if (!ida->free_bitmap) {
697 spin_lock_irqsave(&ida->idr.lock, flags);
698 if (!ida->free_bitmap) {
699 ida->free_bitmap = bitmap;
700 bitmap = NULL;
701 }
702 spin_unlock_irqrestore(&ida->idr.lock, flags);
703 }
704
705 kfree(bitmap);
706 }
707
708 /**
709 * ida_pre_get - reserve resources for ida allocation
710 * @ida: ida handle
711 * @gfp_mask: memory allocation flag
712 *
713 * This function should be called prior to locking and calling the
714 * following function. It preallocates enough memory to satisfy the
715 * worst possible allocation.
716 *
717 * If the system is REALLY out of memory this function returns %0,
718 * otherwise %1.
719 */
720 int ida_pre_get(struct ida *ida, gfp_t gfp_mask)
721 {
722 /* allocate idr_layers */
723 if (!idr_pre_get(&ida->idr, gfp_mask))
724 return 0;
725
726 /* allocate free_bitmap */
727 if (!ida->free_bitmap) {
728 struct ida_bitmap *bitmap;
729
730 bitmap = kmalloc(sizeof(struct ida_bitmap), gfp_mask);
731 if (!bitmap)
732 return 0;
733
734 free_bitmap(ida, bitmap);
735 }
736
737 return 1;
738 }
739 EXPORT_SYMBOL(ida_pre_get);
740
741 /**
742 * ida_get_new_above - allocate new ID above or equal to a start id
743 * @ida: ida handle
744 * @starting_id: id to start search at
745 * @p_id: pointer to the allocated handle
746 *
747 * Allocate new ID above or equal to @starting_id. It should be called
748 * with any required locks.
749 *
750 * If memory is required, it will return %-EAGAIN, you should unlock
751 * and go back to the ida_pre_get() call. If the ida is full, it will
752 * return %-ENOSPC.
753 *
754 * @p_id returns a value in the range @starting_id ... %0x7fffffff.
755 */
756 int ida_get_new_above(struct ida *ida, int starting_id, int *p_id)
757 {
758 struct idr_layer *pa[MAX_IDR_LEVEL];
759 struct ida_bitmap *bitmap;
760 unsigned long flags;
761 int idr_id = starting_id / IDA_BITMAP_BITS;
762 int offset = starting_id % IDA_BITMAP_BITS;
763 int t, id;
764
765 restart:
766 /* get vacant slot */
767 t = idr_get_empty_slot(&ida->idr, idr_id, pa);
768 if (t < 0)
769 return _idr_rc_to_errno(t);
770
771 if (t * IDA_BITMAP_BITS >= MAX_IDR_BIT)
772 return -ENOSPC;
773
774 if (t != idr_id)
775 offset = 0;
776 idr_id = t;
777
778 /* if bitmap isn't there, create a new one */
779 bitmap = (void *)pa[0]->ary[idr_id & IDR_MASK];
780 if (!bitmap) {
781 spin_lock_irqsave(&ida->idr.lock, flags);
782 bitmap = ida->free_bitmap;
783 ida->free_bitmap = NULL;
784 spin_unlock_irqrestore(&ida->idr.lock, flags);
785
786 if (!bitmap)
787 return -EAGAIN;
788
789 memset(bitmap, 0, sizeof(struct ida_bitmap));
790 rcu_assign_pointer(pa[0]->ary[idr_id & IDR_MASK],
791 (void *)bitmap);
792 pa[0]->count++;
793 }
794
795 /* lookup for empty slot */
796 t = find_next_zero_bit(bitmap->bitmap, IDA_BITMAP_BITS, offset);
797 if (t == IDA_BITMAP_BITS) {
798 /* no empty slot after offset, continue to the next chunk */
799 idr_id++;
800 offset = 0;
801 goto restart;
802 }
803
804 id = idr_id * IDA_BITMAP_BITS + t;
805 if (id >= MAX_IDR_BIT)
806 return -ENOSPC;
807
808 __set_bit(t, bitmap->bitmap);
809 if (++bitmap->nr_busy == IDA_BITMAP_BITS)
810 idr_mark_full(pa, idr_id);
811
812 *p_id = id;
813
814 /* Each leaf node can handle nearly a thousand slots and the
815 * whole idea of ida is to have small memory foot print.
816 * Throw away extra resources one by one after each successful
817 * allocation.
818 */
819 if (ida->idr.id_free_cnt || ida->free_bitmap) {
820 struct idr_layer *p = get_from_free_list(&ida->idr);
821 if (p)
822 kmem_cache_free(idr_layer_cache, p);
823 }
824
825 return 0;
826 }
827 EXPORT_SYMBOL(ida_get_new_above);
828
829 /**
830 * ida_remove - remove the given ID
831 * @ida: ida handle
832 * @id: ID to free
833 */
834 void ida_remove(struct ida *ida, int id)
835 {
836 struct idr_layer *p = ida->idr.top;
837 int shift = (ida->idr.layers - 1) * IDR_BITS;
838 int idr_id = id / IDA_BITMAP_BITS;
839 int offset = id % IDA_BITMAP_BITS;
840 int n;
841 struct ida_bitmap *bitmap;
842
843 /* clear full bits while looking up the leaf idr_layer */
844 while ((shift > 0) && p) {
845 n = (idr_id >> shift) & IDR_MASK;
846 __clear_bit(n, &p->bitmap);
847 p = p->ary[n];
848 shift -= IDR_BITS;
849 }
850
851 if (p == NULL)
852 goto err;
853
854 n = idr_id & IDR_MASK;
855 __clear_bit(n, &p->bitmap);
856
857 bitmap = (void *)p->ary[n];
858 if (!test_bit(offset, bitmap->bitmap))
859 goto err;
860
861 /* update bitmap and remove it if empty */
862 __clear_bit(offset, bitmap->bitmap);
863 if (--bitmap->nr_busy == 0) {
864 __set_bit(n, &p->bitmap); /* to please idr_remove() */
865 idr_remove(&ida->idr, idr_id);
866 free_bitmap(ida, bitmap);
867 }
868
869 return;
870
871 err:
872 printk(KERN_WARNING
873 "ida_remove called for id=%d which is not allocated.\n", id);
874 }
875 EXPORT_SYMBOL(ida_remove);
876
877 /**
878 * ida_destroy - release all cached layers within an ida tree
879 * @ida: ida handle
880 */
881 void ida_destroy(struct ida *ida)
882 {
883 idr_destroy(&ida->idr);
884 kfree(ida->free_bitmap);
885 }
886 EXPORT_SYMBOL(ida_destroy);
887
888 /**
889 * ida_simple_get - get a new id.
890 * @ida: the (initialized) ida.
891 * @start: the minimum id (inclusive, < 0x8000000)
892 * @end: the maximum id (exclusive, < 0x8000000 or 0)
893 * @gfp_mask: memory allocation flags
894 *
895 * Allocates an id in the range start <= id < end, or returns -ENOSPC.
896 * On memory allocation failure, returns -ENOMEM.
897 *
898 * Use ida_simple_remove() to get rid of an id.
899 */
900 int ida_simple_get(struct ida *ida, unsigned int start, unsigned int end,
901 gfp_t gfp_mask)
902 {
903 int ret, id;
904 unsigned int max;
905 unsigned long flags;
906
907 BUG_ON((int)start < 0);
908 BUG_ON((int)end < 0);
909
910 if (end == 0)
911 max = 0x80000000;
912 else {
913 BUG_ON(end < start);
914 max = end - 1;
915 }
916
917 again:
918 if (!ida_pre_get(ida, gfp_mask))
919 return -ENOMEM;
920
921 spin_lock_irqsave(&simple_ida_lock, flags);
922 ret = ida_get_new_above(ida, start, &id);
923 if (!ret) {
924 if (id > max) {
925 ida_remove(ida, id);
926 ret = -ENOSPC;
927 } else {
928 ret = id;
929 }
930 }
931 spin_unlock_irqrestore(&simple_ida_lock, flags);
932
933 if (unlikely(ret == -EAGAIN))
934 goto again;
935
936 return ret;
937 }
938 EXPORT_SYMBOL(ida_simple_get);
939
940 /**
941 * ida_simple_remove - remove an allocated id.
942 * @ida: the (initialized) ida.
943 * @id: the id returned by ida_simple_get.
944 */
945 void ida_simple_remove(struct ida *ida, unsigned int id)
946 {
947 unsigned long flags;
948
949 BUG_ON((int)id < 0);
950 spin_lock_irqsave(&simple_ida_lock, flags);
951 ida_remove(ida, id);
952 spin_unlock_irqrestore(&simple_ida_lock, flags);
953 }
954 EXPORT_SYMBOL(ida_simple_remove);
955
956 /**
957 * ida_init - initialize ida handle
958 * @ida: ida handle
959 *
960 * This function is use to set up the handle (@ida) that you will pass
961 * to the rest of the functions.
962 */
963 void ida_init(struct ida *ida)
964 {
965 memset(ida, 0, sizeof(struct ida));
966 idr_init(&ida->idr);
967
968 }
969 EXPORT_SYMBOL(ida_init);