]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * include/linux/idr.h | |
3 | * | |
4 | * 2002-10-18 written by Jim Houston jim.houston@ccur.com | |
5 | * Copyright (C) 2002 by Concurrent Computer Corporation | |
6 | * Distributed under the GNU GPL license version 2. | |
7 | * | |
8 | * Small id to pointer translation service avoiding fixed sized | |
9 | * tables. | |
10 | */ | |
f668ab1a LT |
11 | |
12 | #ifndef __IDR_H__ | |
13 | #define __IDR_H__ | |
14 | ||
1da177e4 LT |
15 | #include <linux/types.h> |
16 | #include <linux/bitops.h> | |
199f0ca5 | 17 | #include <linux/init.h> |
2027d1ab | 18 | #include <linux/rcupdate.h> |
1da177e4 | 19 | |
050a6b47 | 20 | /* |
424251a4 MW |
21 | * Using 6 bits at each layer allows us to allocate 7 layers out of each page. |
22 | * 8 bits only gave us 3 layers out of every pair of pages, which is less | |
23 | * efficient except for trees with a largest element between 192-255 inclusive. | |
050a6b47 | 24 | */ |
424251a4 | 25 | #define IDR_BITS 6 |
1da177e4 LT |
26 | #define IDR_SIZE (1 << IDR_BITS) |
27 | #define IDR_MASK ((1 << IDR_BITS)-1) | |
28 | ||
1da177e4 | 29 | struct idr_layer { |
54616283 | 30 | int prefix; /* the ID prefix of this idr_layer */ |
dcbff5d1 | 31 | int layer; /* distance from leaf */ |
d2c2486b | 32 | struct idr_layer __rcu *ary[1<<IDR_BITS]; |
4106ecaf | 33 | int count; /* When zero, we can release it */ |
dcbff5d1 LJ |
34 | union { |
35 | /* A zero bit means "space here" */ | |
36 | DECLARE_BITMAP(bitmap, IDR_SIZE); | |
37 | struct rcu_head rcu_head; | |
38 | }; | |
1da177e4 LT |
39 | }; |
40 | ||
41 | struct idr { | |
0ffc2a9c | 42 | struct idr_layer __rcu *hint; /* the last layer allocated from */ |
4106ecaf | 43 | struct idr_layer __rcu *top; |
4106ecaf | 44 | int layers; /* only valid w/o concurrent changes */ |
3e6628c4 | 45 | int cur; /* current pos for cyclic allocation */ |
4106ecaf | 46 | spinlock_t lock; |
dcbff5d1 LJ |
47 | int id_free_cnt; |
48 | struct idr_layer *id_free; | |
1da177e4 LT |
49 | }; |
50 | ||
4106ecaf TH |
51 | #define IDR_INIT(name) \ |
52 | { \ | |
53 | .lock = __SPIN_LOCK_UNLOCKED(name.lock), \ | |
1da177e4 LT |
54 | } |
55 | #define DEFINE_IDR(name) struct idr name = IDR_INIT(name) | |
56 | ||
44430612 MW |
57 | /** |
58 | * idr_get_cursor - Return the current position of the cyclic allocator | |
59 | * @idr: idr handle | |
60 | * | |
61 | * The value returned is the value that will be next returned from | |
62 | * idr_alloc_cyclic() if it is free (otherwise the search will start from | |
63 | * this position). | |
64 | */ | |
65 | static inline unsigned int idr_get_cursor(struct idr *idr) | |
66 | { | |
67 | return READ_ONCE(idr->cur); | |
68 | } | |
69 | ||
70 | /** | |
71 | * idr_set_cursor - Set the current position of the cyclic allocator | |
72 | * @idr: idr handle | |
73 | * @val: new position | |
74 | * | |
75 | * The next call to idr_alloc_cyclic() will return @val if it is free | |
76 | * (otherwise the search will start from this position). | |
77 | */ | |
78 | static inline void idr_set_cursor(struct idr *idr, unsigned int val) | |
79 | { | |
80 | WRITE_ONCE(idr->cur, val); | |
81 | } | |
82 | ||
f9c46d6e | 83 | /** |
56083ab1 | 84 | * DOC: idr sync |
f9c46d6e ND |
85 | * idr synchronization (stolen from radix-tree.h) |
86 | * | |
87 | * idr_find() is able to be called locklessly, using RCU. The caller must | |
88 | * ensure calls to this function are made within rcu_read_lock() regions. | |
89 | * Other readers (lock-free or otherwise) and modifications may be running | |
90 | * concurrently. | |
91 | * | |
92 | * It is still required that the caller manage the synchronization and | |
93 | * lifetimes of the items. So if RCU lock-free lookups are used, typically | |
94 | * this would mean that the items have their own locks, or are amenable to | |
95 | * lock-free access; and that the items are freed by RCU (or only freed after | |
96 | * having been deleted from the idr tree *and* a synchronize_rcu() grace | |
97 | * period). | |
98 | */ | |
99 | ||
1da177e4 LT |
100 | /* |
101 | * This is what we export. | |
102 | */ | |
103 | ||
0ffc2a9c | 104 | void *idr_find_slowpath(struct idr *idp, int id); |
d5c7409f TH |
105 | void idr_preload(gfp_t gfp_mask); |
106 | int idr_alloc(struct idr *idp, void *ptr, int start, int end, gfp_t gfp_mask); | |
3e6628c4 | 107 | int idr_alloc_cyclic(struct idr *idr, void *ptr, int start, int end, gfp_t gfp_mask); |
96d7fa42 KH |
108 | int idr_for_each(struct idr *idp, |
109 | int (*fn)(int id, void *p, void *data), void *data); | |
38460b48 | 110 | void *idr_get_next(struct idr *idp, int *nextid); |
5806f07c | 111 | void *idr_replace(struct idr *idp, void *ptr, int id); |
1da177e4 | 112 | void idr_remove(struct idr *idp, int id); |
8d3b3591 | 113 | void idr_destroy(struct idr *idp); |
1da177e4 | 114 | void idr_init(struct idr *idp); |
05f7a7d6 | 115 | bool idr_is_empty(struct idr *idp); |
f668ab1a | 116 | |
d5c7409f TH |
117 | /** |
118 | * idr_preload_end - end preload section started with idr_preload() | |
119 | * | |
120 | * Each idr_preload() should be matched with an invocation of this | |
121 | * function. See idr_preload() for details. | |
122 | */ | |
123 | static inline void idr_preload_end(void) | |
124 | { | |
125 | preempt_enable(); | |
126 | } | |
127 | ||
0ffc2a9c TH |
128 | /** |
129 | * idr_find - return pointer for given id | |
5857f70c | 130 | * @idr: idr handle |
0ffc2a9c TH |
131 | * @id: lookup key |
132 | * | |
133 | * Return the pointer given the id it has been registered with. A %NULL | |
134 | * return indicates that @id is not valid or you passed %NULL in | |
135 | * idr_get_new(). | |
136 | * | |
137 | * This function can be called under rcu_read_lock(), given that the leaf | |
138 | * pointers lifetimes are correctly managed. | |
139 | */ | |
140 | static inline void *idr_find(struct idr *idr, int id) | |
141 | { | |
142 | struct idr_layer *hint = rcu_dereference_raw(idr->hint); | |
143 | ||
144 | if (hint && (id & ~IDR_MASK) == hint->prefix) | |
145 | return rcu_dereference_raw(hint->ary[id & IDR_MASK]); | |
146 | ||
147 | return idr_find_slowpath(idr, id); | |
148 | } | |
149 | ||
49038ef4 TH |
150 | /** |
151 | * idr_for_each_entry - iterate over an idr's elements of a given type | |
152 | * @idp: idr handle | |
153 | * @entry: the type * to use as cursor | |
154 | * @id: id entry's key | |
b949be58 GS |
155 | * |
156 | * @entry and @id do not need to be initialized before the loop, and | |
157 | * after normal terminatinon @entry is left with the value NULL. This | |
158 | * is convenient for a "not found" value. | |
49038ef4 | 159 | */ |
b949be58 GS |
160 | #define idr_for_each_entry(idp, entry, id) \ |
161 | for (id = 0; ((entry) = idr_get_next(idp, &(id))) != NULL; ++id) | |
49038ef4 | 162 | |
a55bbd37 AG |
163 | /** |
164 | * idr_for_each_entry - continue iteration over an idr's elements of a given type | |
165 | * @idp: idr handle | |
166 | * @entry: the type * to use as cursor | |
167 | * @id: id entry's key | |
168 | * | |
169 | * Continue to iterate over list of given type, continuing after | |
170 | * the current position. | |
171 | */ | |
172 | #define idr_for_each_entry_continue(idp, entry, id) \ | |
173 | for ((entry) = idr_get_next((idp), &(id)); \ | |
174 | entry; \ | |
175 | ++id, (entry) = idr_get_next((idp), &(id))) | |
176 | ||
72dba584 TH |
177 | /* |
178 | * IDA - IDR based id allocator, use when translation from id to | |
179 | * pointer isn't necessary. | |
ed9f524a NK |
180 | * |
181 | * IDA_BITMAP_LONGS is calculated to be one less to accommodate | |
182 | * ida_bitmap->nr_busy so that the whole struct fits in 128 bytes. | |
72dba584 TH |
183 | */ |
184 | #define IDA_CHUNK_SIZE 128 /* 128 bytes per chunk */ | |
ed9f524a NK |
185 | #define IDA_BITMAP_LONGS (IDA_CHUNK_SIZE / sizeof(long) - 1) |
186 | #define IDA_BITMAP_BITS (IDA_BITMAP_LONGS * sizeof(long) * 8) | |
72dba584 TH |
187 | |
188 | struct ida_bitmap { | |
189 | long nr_busy; | |
190 | unsigned long bitmap[IDA_BITMAP_LONGS]; | |
191 | }; | |
192 | ||
193 | struct ida { | |
194 | struct idr idr; | |
195 | struct ida_bitmap *free_bitmap; | |
196 | }; | |
197 | ||
eece09ec | 198 | #define IDA_INIT(name) { .idr = IDR_INIT((name).idr), .free_bitmap = NULL, } |
72dba584 TH |
199 | #define DEFINE_IDA(name) struct ida name = IDA_INIT(name) |
200 | ||
201 | int ida_pre_get(struct ida *ida, gfp_t gfp_mask); | |
202 | int ida_get_new_above(struct ida *ida, int starting_id, int *p_id); | |
72dba584 TH |
203 | void ida_remove(struct ida *ida, int id); |
204 | void ida_destroy(struct ida *ida); | |
205 | void ida_init(struct ida *ida); | |
206 | ||
88eca020 RR |
207 | int ida_simple_get(struct ida *ida, unsigned int start, unsigned int end, |
208 | gfp_t gfp_mask); | |
209 | void ida_simple_remove(struct ida *ida, unsigned int id); | |
210 | ||
9749f30f | 211 | /** |
49038ef4 TH |
212 | * ida_get_new - allocate new ID |
213 | * @ida: idr handle | |
214 | * @p_id: pointer to the allocated handle | |
215 | * | |
216 | * Simple wrapper around ida_get_new_above() w/ @starting_id of zero. | |
9749f30f | 217 | */ |
49038ef4 TH |
218 | static inline int ida_get_new(struct ida *ida, int *p_id) |
219 | { | |
220 | return ida_get_new_above(ida, 0, p_id); | |
221 | } | |
222 | ||
99c49407 MW |
223 | static inline bool ida_is_empty(struct ida *ida) |
224 | { | |
225 | return idr_is_empty(&ida->idr); | |
226 | } | |
227 | ||
49038ef4 | 228 | void __init idr_init_cache(void); |
9749f30f | 229 | |
f668ab1a | 230 | #endif /* __IDR_H__ */ |