]>
Commit | Line | Data |
---|---|---|
7e1e7763 TG |
1 | /* |
2 | * Resizable, Scalable, Concurrent Hash Table | |
3 | * | |
a5ec68e3 | 4 | * Copyright (c) 2014-2015 Thomas Graf <tgraf@suug.ch> |
7e1e7763 TG |
5 | * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net> |
6 | * | |
7 | * Based on the following paper: | |
8 | * https://www.usenix.org/legacy/event/atc11/tech/final_files/Triplett.pdf | |
9 | * | |
10 | * Code partially derived from nft_hash | |
11 | * | |
12 | * This program is free software; you can redistribute it and/or modify | |
13 | * it under the terms of the GNU General Public License version 2 as | |
14 | * published by the Free Software Foundation. | |
15 | */ | |
16 | ||
17 | #include <linux/kernel.h> | |
18 | #include <linux/init.h> | |
19 | #include <linux/log2.h> | |
20 | #include <linux/slab.h> | |
21 | #include <linux/vmalloc.h> | |
22 | #include <linux/mm.h> | |
87545899 | 23 | #include <linux/jhash.h> |
7e1e7763 TG |
24 | #include <linux/random.h> |
25 | #include <linux/rhashtable.h> | |
61d7b097 | 26 | #include <linux/err.h> |
7e1e7763 TG |
27 | |
28 | #define HASH_DEFAULT_SIZE 64UL | |
29 | #define HASH_MIN_SIZE 4UL | |
97defe1e TG |
30 | #define BUCKET_LOCKS_PER_CPU 128UL |
31 | ||
f89bd6f8 TG |
32 | /* Base bits plus 1 bit for nulls marker */ |
33 | #define HASH_RESERVED_SPACE (RHT_BASE_BITS + 1) | |
34 | ||
97defe1e TG |
35 | enum { |
36 | RHT_LOCK_NORMAL, | |
37 | RHT_LOCK_NESTED, | |
97defe1e TG |
38 | }; |
39 | ||
40 | /* The bucket lock is selected based on the hash and protects mutations | |
41 | * on a group of hash buckets. | |
42 | * | |
a5ec68e3 TG |
43 | * A maximum of tbl->size/2 bucket locks is allocated. This ensures that |
44 | * a single lock always covers both buckets which may both contains | |
45 | * entries which link to the same bucket of the old table during resizing. | |
46 | * This allows to simplify the locking as locking the bucket in both | |
47 | * tables during resize always guarantee protection. | |
48 | * | |
97defe1e TG |
49 | * IMPORTANT: When holding the bucket lock of both the old and new table |
50 | * during expansions and shrinking, the old bucket lock must always be | |
51 | * acquired first. | |
52 | */ | |
53 | static spinlock_t *bucket_lock(const struct bucket_table *tbl, u32 hash) | |
54 | { | |
55 | return &tbl->locks[hash & tbl->locks_mask]; | |
56 | } | |
7e1e7763 | 57 | |
c91eee56 | 58 | static void *rht_obj(const struct rhashtable *ht, const struct rhash_head *he) |
7e1e7763 TG |
59 | { |
60 | return (void *) he - ht->p.head_offset; | |
61 | } | |
7e1e7763 | 62 | |
8d24c0b4 | 63 | static u32 rht_bucket_index(const struct bucket_table *tbl, u32 hash) |
7e1e7763 | 64 | { |
8d24c0b4 | 65 | return hash & (tbl->size - 1); |
7e1e7763 | 66 | } |
7e1e7763 | 67 | |
8d24c0b4 | 68 | static u32 obj_raw_hashfn(const struct rhashtable *ht, const void *ptr) |
7e1e7763 | 69 | { |
8d24c0b4 | 70 | u32 hash; |
7e1e7763 | 71 | |
8d24c0b4 TG |
72 | if (unlikely(!ht->p.key_len)) |
73 | hash = ht->p.obj_hashfn(ptr, ht->p.hash_rnd); | |
74 | else | |
75 | hash = ht->p.hashfn(ptr + ht->p.key_offset, ht->p.key_len, | |
76 | ht->p.hash_rnd); | |
7e1e7763 | 77 | |
f89bd6f8 | 78 | return hash >> HASH_RESERVED_SPACE; |
7e1e7763 TG |
79 | } |
80 | ||
97defe1e | 81 | static u32 key_hashfn(struct rhashtable *ht, const void *key, u32 len) |
7e1e7763 | 82 | { |
c88455ce | 83 | return ht->p.hashfn(key, len, ht->p.hash_rnd) >> HASH_RESERVED_SPACE; |
7e1e7763 | 84 | } |
7e1e7763 TG |
85 | |
86 | static u32 head_hashfn(const struct rhashtable *ht, | |
8d24c0b4 TG |
87 | const struct bucket_table *tbl, |
88 | const struct rhash_head *he) | |
7e1e7763 | 89 | { |
8d24c0b4 | 90 | return rht_bucket_index(tbl, obj_raw_hashfn(ht, rht_obj(ht, he))); |
7e1e7763 TG |
91 | } |
92 | ||
a03eaec0 TG |
93 | #ifdef CONFIG_PROVE_LOCKING |
94 | static void debug_dump_buckets(const struct rhashtable *ht, | |
95 | const struct bucket_table *tbl) | |
96 | { | |
97 | struct rhash_head *he; | |
98 | unsigned int i, hash; | |
99 | ||
100 | for (i = 0; i < tbl->size; i++) { | |
101 | pr_warn(" [Bucket %d] ", i); | |
102 | rht_for_each_rcu(he, tbl, i) { | |
103 | hash = head_hashfn(ht, tbl, he); | |
104 | pr_cont("[hash = %#x, lock = %p] ", | |
105 | hash, bucket_lock(tbl, hash)); | |
106 | } | |
107 | pr_cont("\n"); | |
108 | } | |
109 | ||
110 | } | |
111 | ||
112 | static void debug_dump_table(struct rhashtable *ht, | |
113 | const struct bucket_table *tbl, | |
114 | unsigned int hash) | |
115 | { | |
116 | struct bucket_table *old_tbl, *future_tbl; | |
117 | ||
118 | pr_emerg("BUG: lock for hash %#x in table %p not held\n", | |
119 | hash, tbl); | |
120 | ||
121 | rcu_read_lock(); | |
122 | future_tbl = rht_dereference_rcu(ht->future_tbl, ht); | |
123 | old_tbl = rht_dereference_rcu(ht->tbl, ht); | |
124 | if (future_tbl != old_tbl) { | |
125 | pr_warn("Future table %p (size: %zd)\n", | |
126 | future_tbl, future_tbl->size); | |
127 | debug_dump_buckets(ht, future_tbl); | |
128 | } | |
129 | ||
130 | pr_warn("Table %p (size: %zd)\n", old_tbl, old_tbl->size); | |
131 | debug_dump_buckets(ht, old_tbl); | |
132 | ||
133 | rcu_read_unlock(); | |
134 | } | |
135 | ||
136 | #define ASSERT_RHT_MUTEX(HT) BUG_ON(!lockdep_rht_mutex_is_held(HT)) | |
137 | #define ASSERT_BUCKET_LOCK(HT, TBL, HASH) \ | |
138 | do { \ | |
139 | if (unlikely(!lockdep_rht_bucket_is_held(TBL, HASH))) { \ | |
140 | debug_dump_table(HT, TBL, HASH); \ | |
141 | BUG(); \ | |
142 | } \ | |
143 | } while (0) | |
144 | ||
145 | int lockdep_rht_mutex_is_held(struct rhashtable *ht) | |
146 | { | |
147 | return (debug_locks) ? lockdep_is_held(&ht->mutex) : 1; | |
148 | } | |
149 | EXPORT_SYMBOL_GPL(lockdep_rht_mutex_is_held); | |
150 | ||
151 | int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash) | |
152 | { | |
153 | spinlock_t *lock = bucket_lock(tbl, hash); | |
154 | ||
155 | return (debug_locks) ? lockdep_is_held(lock) : 1; | |
156 | } | |
157 | EXPORT_SYMBOL_GPL(lockdep_rht_bucket_is_held); | |
158 | #else | |
159 | #define ASSERT_RHT_MUTEX(HT) | |
160 | #define ASSERT_BUCKET_LOCK(HT, TBL, HASH) | |
161 | #endif | |
162 | ||
163 | ||
b8e1943e TG |
164 | static struct rhash_head __rcu **bucket_tail(struct bucket_table *tbl, u32 n) |
165 | { | |
166 | struct rhash_head __rcu **pprev; | |
167 | ||
168 | for (pprev = &tbl->buckets[n]; | |
f89bd6f8 | 169 | !rht_is_a_nulls(rht_dereference_bucket(*pprev, tbl, n)); |
b8e1943e TG |
170 | pprev = &rht_dereference_bucket(*pprev, tbl, n)->next) |
171 | ; | |
172 | ||
173 | return pprev; | |
174 | } | |
175 | ||
97defe1e TG |
176 | static int alloc_bucket_locks(struct rhashtable *ht, struct bucket_table *tbl) |
177 | { | |
178 | unsigned int i, size; | |
179 | #if defined(CONFIG_PROVE_LOCKING) | |
180 | unsigned int nr_pcpus = 2; | |
181 | #else | |
182 | unsigned int nr_pcpus = num_possible_cpus(); | |
183 | #endif | |
184 | ||
185 | nr_pcpus = min_t(unsigned int, nr_pcpus, 32UL); | |
186 | size = roundup_pow_of_two(nr_pcpus * ht->p.locks_mul); | |
187 | ||
a5ec68e3 TG |
188 | /* Never allocate more than 0.5 locks per bucket */ |
189 | size = min_t(unsigned int, size, tbl->size >> 1); | |
97defe1e TG |
190 | |
191 | if (sizeof(spinlock_t) != 0) { | |
192 | #ifdef CONFIG_NUMA | |
193 | if (size * sizeof(spinlock_t) > PAGE_SIZE) | |
194 | tbl->locks = vmalloc(size * sizeof(spinlock_t)); | |
195 | else | |
196 | #endif | |
197 | tbl->locks = kmalloc_array(size, sizeof(spinlock_t), | |
198 | GFP_KERNEL); | |
199 | if (!tbl->locks) | |
200 | return -ENOMEM; | |
201 | for (i = 0; i < size; i++) | |
202 | spin_lock_init(&tbl->locks[i]); | |
203 | } | |
204 | tbl->locks_mask = size - 1; | |
205 | ||
206 | return 0; | |
207 | } | |
208 | ||
209 | static void bucket_table_free(const struct bucket_table *tbl) | |
210 | { | |
211 | if (tbl) | |
212 | kvfree(tbl->locks); | |
213 | ||
214 | kvfree(tbl); | |
215 | } | |
216 | ||
217 | static struct bucket_table *bucket_table_alloc(struct rhashtable *ht, | |
218 | size_t nbuckets) | |
7e1e7763 TG |
219 | { |
220 | struct bucket_table *tbl; | |
221 | size_t size; | |
f89bd6f8 | 222 | int i; |
7e1e7763 TG |
223 | |
224 | size = sizeof(*tbl) + nbuckets * sizeof(tbl->buckets[0]); | |
6eba8224 | 225 | tbl = kzalloc(size, GFP_KERNEL | __GFP_NOWARN); |
7e1e7763 TG |
226 | if (tbl == NULL) |
227 | tbl = vzalloc(size); | |
228 | ||
229 | if (tbl == NULL) | |
230 | return NULL; | |
231 | ||
232 | tbl->size = nbuckets; | |
233 | ||
97defe1e TG |
234 | if (alloc_bucket_locks(ht, tbl) < 0) { |
235 | bucket_table_free(tbl); | |
236 | return NULL; | |
237 | } | |
7e1e7763 | 238 | |
f89bd6f8 TG |
239 | for (i = 0; i < nbuckets; i++) |
240 | INIT_RHT_NULLS_HEAD(tbl->buckets[i], ht, i); | |
241 | ||
97defe1e | 242 | return tbl; |
7e1e7763 TG |
243 | } |
244 | ||
245 | /** | |
246 | * rht_grow_above_75 - returns true if nelems > 0.75 * table-size | |
247 | * @ht: hash table | |
248 | * @new_size: new table size | |
249 | */ | |
250 | bool rht_grow_above_75(const struct rhashtable *ht, size_t new_size) | |
251 | { | |
252 | /* Expand table when exceeding 75% load */ | |
c0c09bfd YX |
253 | return atomic_read(&ht->nelems) > (new_size / 4 * 3) && |
254 | (ht->p.max_shift && atomic_read(&ht->shift) < ht->p.max_shift); | |
7e1e7763 TG |
255 | } |
256 | EXPORT_SYMBOL_GPL(rht_grow_above_75); | |
257 | ||
258 | /** | |
259 | * rht_shrink_below_30 - returns true if nelems < 0.3 * table-size | |
260 | * @ht: hash table | |
261 | * @new_size: new table size | |
262 | */ | |
263 | bool rht_shrink_below_30(const struct rhashtable *ht, size_t new_size) | |
264 | { | |
265 | /* Shrink table beneath 30% load */ | |
c0c09bfd YX |
266 | return atomic_read(&ht->nelems) < (new_size * 3 / 10) && |
267 | (atomic_read(&ht->shift) > ht->p.min_shift); | |
7e1e7763 TG |
268 | } |
269 | EXPORT_SYMBOL_GPL(rht_shrink_below_30); | |
270 | ||
a5ec68e3 TG |
271 | static void lock_buckets(struct bucket_table *new_tbl, |
272 | struct bucket_table *old_tbl, unsigned int hash) | |
273 | __acquires(old_bucket_lock) | |
274 | { | |
275 | spin_lock_bh(bucket_lock(old_tbl, hash)); | |
276 | if (new_tbl != old_tbl) | |
277 | spin_lock_bh_nested(bucket_lock(new_tbl, hash), | |
278 | RHT_LOCK_NESTED); | |
279 | } | |
280 | ||
281 | static void unlock_buckets(struct bucket_table *new_tbl, | |
282 | struct bucket_table *old_tbl, unsigned int hash) | |
283 | __releases(old_bucket_lock) | |
284 | { | |
285 | if (new_tbl != old_tbl) | |
286 | spin_unlock_bh(bucket_lock(new_tbl, hash)); | |
287 | spin_unlock_bh(bucket_lock(old_tbl, hash)); | |
288 | } | |
289 | ||
290 | /** | |
291 | * Unlink entries on bucket which hash to different bucket. | |
292 | * | |
293 | * Returns true if no more work needs to be performed on the bucket. | |
294 | */ | |
a03eaec0 | 295 | static bool hashtable_chain_unzip(struct rhashtable *ht, |
7e1e7763 | 296 | const struct bucket_table *new_tbl, |
97defe1e TG |
297 | struct bucket_table *old_tbl, |
298 | size_t old_hash) | |
7e1e7763 TG |
299 | { |
300 | struct rhash_head *he, *p, *next; | |
97defe1e TG |
301 | unsigned int new_hash, new_hash2; |
302 | ||
a03eaec0 | 303 | ASSERT_BUCKET_LOCK(ht, old_tbl, old_hash); |
7e1e7763 TG |
304 | |
305 | /* Old bucket empty, no work needed. */ | |
97defe1e TG |
306 | p = rht_dereference_bucket(old_tbl->buckets[old_hash], old_tbl, |
307 | old_hash); | |
f89bd6f8 | 308 | if (rht_is_a_nulls(p)) |
a5ec68e3 | 309 | return false; |
7e1e7763 | 310 | |
a5ec68e3 | 311 | new_hash = head_hashfn(ht, new_tbl, p); |
a03eaec0 | 312 | ASSERT_BUCKET_LOCK(ht, new_tbl, new_hash); |
97defe1e | 313 | |
7e1e7763 TG |
314 | /* Advance the old bucket pointer one or more times until it |
315 | * reaches a node that doesn't hash to the same bucket as the | |
316 | * previous node p. Call the previous node p; | |
317 | */ | |
97defe1e TG |
318 | rht_for_each_continue(he, p->next, old_tbl, old_hash) { |
319 | new_hash2 = head_hashfn(ht, new_tbl, he); | |
a03eaec0 | 320 | ASSERT_BUCKET_LOCK(ht, new_tbl, new_hash2); |
a5ec68e3 | 321 | |
97defe1e | 322 | if (new_hash != new_hash2) |
7e1e7763 TG |
323 | break; |
324 | p = he; | |
325 | } | |
97defe1e TG |
326 | rcu_assign_pointer(old_tbl->buckets[old_hash], p->next); |
327 | ||
7e1e7763 TG |
328 | /* Find the subsequent node which does hash to the same |
329 | * bucket as node P, or NULL if no such node exists. | |
330 | */ | |
f89bd6f8 TG |
331 | INIT_RHT_NULLS_HEAD(next, ht, old_hash); |
332 | if (!rht_is_a_nulls(he)) { | |
97defe1e TG |
333 | rht_for_each_continue(he, he->next, old_tbl, old_hash) { |
334 | if (head_hashfn(ht, new_tbl, he) == new_hash) { | |
7e1e7763 TG |
335 | next = he; |
336 | break; | |
337 | } | |
338 | } | |
339 | } | |
340 | ||
341 | /* Set p's next pointer to that subsequent node pointer, | |
342 | * bypassing the nodes which do not hash to p's bucket | |
343 | */ | |
97defe1e TG |
344 | rcu_assign_pointer(p->next, next); |
345 | ||
a5ec68e3 TG |
346 | p = rht_dereference_bucket(old_tbl->buckets[old_hash], old_tbl, |
347 | old_hash); | |
348 | ||
349 | return !rht_is_a_nulls(p); | |
97defe1e TG |
350 | } |
351 | ||
7cd10db8 | 352 | static void link_old_to_new(struct rhashtable *ht, struct bucket_table *new_tbl, |
97defe1e TG |
353 | unsigned int new_hash, struct rhash_head *entry) |
354 | { | |
7cd10db8 TG |
355 | ASSERT_BUCKET_LOCK(ht, new_tbl, new_hash); |
356 | ||
97defe1e | 357 | rcu_assign_pointer(*bucket_tail(new_tbl, new_hash), entry); |
7e1e7763 TG |
358 | } |
359 | ||
360 | /** | |
361 | * rhashtable_expand - Expand hash table while allowing concurrent lookups | |
362 | * @ht: the hash table to expand | |
7e1e7763 TG |
363 | * |
364 | * A secondary bucket array is allocated and the hash entries are migrated | |
365 | * while keeping them on both lists until the end of the RCU grace period. | |
366 | * | |
367 | * This function may only be called in a context where it is safe to call | |
368 | * synchronize_rcu(), e.g. not within a rcu_read_lock() section. | |
369 | * | |
97defe1e TG |
370 | * The caller must ensure that no concurrent resizing occurs by holding |
371 | * ht->mutex. | |
372 | * | |
373 | * It is valid to have concurrent insertions and deletions protected by per | |
374 | * bucket locks or concurrent RCU protected lookups and traversals. | |
7e1e7763 | 375 | */ |
6eba8224 | 376 | int rhashtable_expand(struct rhashtable *ht) |
7e1e7763 TG |
377 | { |
378 | struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht); | |
379 | struct rhash_head *he; | |
97defe1e TG |
380 | unsigned int new_hash, old_hash; |
381 | bool complete = false; | |
7e1e7763 TG |
382 | |
383 | ASSERT_RHT_MUTEX(ht); | |
384 | ||
97defe1e | 385 | new_tbl = bucket_table_alloc(ht, old_tbl->size * 2); |
7e1e7763 TG |
386 | if (new_tbl == NULL) |
387 | return -ENOMEM; | |
388 | ||
c0c09bfd | 389 | atomic_inc(&ht->shift); |
7e1e7763 | 390 | |
97defe1e TG |
391 | /* Make insertions go into the new, empty table right away. Deletions |
392 | * and lookups will be attempted in both tables until we synchronize. | |
393 | * The synchronize_rcu() guarantees for the new table to be picked up | |
394 | * so no new additions go into the old table while we relink. | |
395 | */ | |
396 | rcu_assign_pointer(ht->future_tbl, new_tbl); | |
397 | synchronize_rcu(); | |
398 | ||
399 | /* For each new bucket, search the corresponding old bucket for the | |
400 | * first entry that hashes to the new bucket, and link the end of | |
401 | * newly formed bucket chain (containing entries added to future | |
402 | * table) to that entry. Since all the entries which will end up in | |
403 | * the new bucket appear in the same old bucket, this constructs an | |
404 | * entirely valid new hash table, but with multiple buckets | |
405 | * "zipped" together into a single imprecise chain. | |
7e1e7763 | 406 | */ |
97defe1e TG |
407 | for (new_hash = 0; new_hash < new_tbl->size; new_hash++) { |
408 | old_hash = rht_bucket_index(old_tbl, new_hash); | |
a5ec68e3 | 409 | lock_buckets(new_tbl, old_tbl, new_hash); |
97defe1e TG |
410 | rht_for_each(he, old_tbl, old_hash) { |
411 | if (head_hashfn(ht, new_tbl, he) == new_hash) { | |
7cd10db8 | 412 | link_old_to_new(ht, new_tbl, new_hash, he); |
7e1e7763 TG |
413 | break; |
414 | } | |
415 | } | |
a5ec68e3 | 416 | unlock_buckets(new_tbl, old_tbl, new_hash); |
7e1e7763 TG |
417 | } |
418 | ||
7e1e7763 | 419 | /* Unzip interleaved hash chains */ |
97defe1e | 420 | while (!complete && !ht->being_destroyed) { |
7e1e7763 TG |
421 | /* Wait for readers. All new readers will see the new |
422 | * table, and thus no references to the old table will | |
423 | * remain. | |
424 | */ | |
425 | synchronize_rcu(); | |
426 | ||
427 | /* For each bucket in the old table (each of which | |
428 | * contains items from multiple buckets of the new | |
429 | * table): ... | |
430 | */ | |
431 | complete = true; | |
97defe1e | 432 | for (old_hash = 0; old_hash < old_tbl->size; old_hash++) { |
a5ec68e3 | 433 | lock_buckets(new_tbl, old_tbl, old_hash); |
97defe1e | 434 | |
a5ec68e3 TG |
435 | if (hashtable_chain_unzip(ht, new_tbl, old_tbl, |
436 | old_hash)) | |
7e1e7763 | 437 | complete = false; |
97defe1e | 438 | |
a5ec68e3 | 439 | unlock_buckets(new_tbl, old_tbl, old_hash); |
7e1e7763 | 440 | } |
97defe1e | 441 | } |
7e1e7763 | 442 | |
cf52d52f | 443 | rcu_assign_pointer(ht->tbl, new_tbl); |
2af4b529 TG |
444 | synchronize_rcu(); |
445 | ||
7e1e7763 TG |
446 | bucket_table_free(old_tbl); |
447 | return 0; | |
448 | } | |
449 | EXPORT_SYMBOL_GPL(rhashtable_expand); | |
450 | ||
451 | /** | |
452 | * rhashtable_shrink - Shrink hash table while allowing concurrent lookups | |
453 | * @ht: the hash table to shrink | |
7e1e7763 TG |
454 | * |
455 | * This function may only be called in a context where it is safe to call | |
456 | * synchronize_rcu(), e.g. not within a rcu_read_lock() section. | |
457 | * | |
97defe1e TG |
458 | * The caller must ensure that no concurrent resizing occurs by holding |
459 | * ht->mutex. | |
460 | * | |
7e1e7763 TG |
461 | * The caller must ensure that no concurrent table mutations take place. |
462 | * It is however valid to have concurrent lookups if they are RCU protected. | |
97defe1e TG |
463 | * |
464 | * It is valid to have concurrent insertions and deletions protected by per | |
465 | * bucket locks or concurrent RCU protected lookups and traversals. | |
7e1e7763 | 466 | */ |
6eba8224 | 467 | int rhashtable_shrink(struct rhashtable *ht) |
7e1e7763 | 468 | { |
97defe1e | 469 | struct bucket_table *new_tbl, *tbl = rht_dereference(ht->tbl, ht); |
97defe1e | 470 | unsigned int new_hash; |
7e1e7763 TG |
471 | |
472 | ASSERT_RHT_MUTEX(ht); | |
473 | ||
97defe1e TG |
474 | new_tbl = bucket_table_alloc(ht, tbl->size / 2); |
475 | if (new_tbl == NULL) | |
7e1e7763 TG |
476 | return -ENOMEM; |
477 | ||
97defe1e TG |
478 | rcu_assign_pointer(ht->future_tbl, new_tbl); |
479 | synchronize_rcu(); | |
7e1e7763 | 480 | |
97defe1e TG |
481 | /* Link the first entry in the old bucket to the end of the |
482 | * bucket in the new table. As entries are concurrently being | |
483 | * added to the new table, lock down the new bucket. As we | |
484 | * always divide the size in half when shrinking, each bucket | |
485 | * in the new table maps to exactly two buckets in the old | |
486 | * table. | |
7e1e7763 | 487 | */ |
97defe1e | 488 | for (new_hash = 0; new_hash < new_tbl->size; new_hash++) { |
a5ec68e3 | 489 | lock_buckets(new_tbl, tbl, new_hash); |
97defe1e TG |
490 | |
491 | rcu_assign_pointer(*bucket_tail(new_tbl, new_hash), | |
492 | tbl->buckets[new_hash]); | |
7cd10db8 | 493 | ASSERT_BUCKET_LOCK(ht, tbl, new_hash + new_tbl->size); |
97defe1e TG |
494 | rcu_assign_pointer(*bucket_tail(new_tbl, new_hash), |
495 | tbl->buckets[new_hash + new_tbl->size]); | |
496 | ||
a5ec68e3 | 497 | unlock_buckets(new_tbl, tbl, new_hash); |
7e1e7763 TG |
498 | } |
499 | ||
500 | /* Publish the new, valid hash table */ | |
97defe1e | 501 | rcu_assign_pointer(ht->tbl, new_tbl); |
c0c09bfd | 502 | atomic_dec(&ht->shift); |
7e1e7763 TG |
503 | |
504 | /* Wait for readers. No new readers will have references to the | |
505 | * old hash table. | |
506 | */ | |
507 | synchronize_rcu(); | |
508 | ||
509 | bucket_table_free(tbl); | |
510 | ||
511 | return 0; | |
512 | } | |
513 | EXPORT_SYMBOL_GPL(rhashtable_shrink); | |
514 | ||
97defe1e TG |
515 | static void rht_deferred_worker(struct work_struct *work) |
516 | { | |
517 | struct rhashtable *ht; | |
518 | struct bucket_table *tbl; | |
f2dba9c6 | 519 | struct rhashtable_walker *walker; |
97defe1e | 520 | |
57699a40 | 521 | ht = container_of(work, struct rhashtable, run_work); |
97defe1e | 522 | mutex_lock(&ht->mutex); |
28134a53 HX |
523 | if (ht->being_destroyed) |
524 | goto unlock; | |
525 | ||
97defe1e TG |
526 | tbl = rht_dereference(ht->tbl, ht); |
527 | ||
f2dba9c6 HX |
528 | list_for_each_entry(walker, &ht->walkers, list) |
529 | walker->resize = true; | |
530 | ||
97defe1e TG |
531 | if (ht->p.grow_decision && ht->p.grow_decision(ht, tbl->size)) |
532 | rhashtable_expand(ht); | |
533 | else if (ht->p.shrink_decision && ht->p.shrink_decision(ht, tbl->size)) | |
534 | rhashtable_shrink(ht); | |
535 | ||
28134a53 | 536 | unlock: |
97defe1e TG |
537 | mutex_unlock(&ht->mutex); |
538 | } | |
539 | ||
54c5b7d3 YX |
540 | static void rhashtable_wakeup_worker(struct rhashtable *ht) |
541 | { | |
542 | struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht); | |
543 | struct bucket_table *new_tbl = rht_dereference_rcu(ht->future_tbl, ht); | |
544 | size_t size = tbl->size; | |
545 | ||
546 | /* Only adjust the table if no resizing is currently in progress. */ | |
547 | if (tbl == new_tbl && | |
548 | ((ht->p.grow_decision && ht->p.grow_decision(ht, size)) || | |
549 | (ht->p.shrink_decision && ht->p.shrink_decision(ht, size)))) | |
57699a40 | 550 | schedule_work(&ht->run_work); |
54c5b7d3 YX |
551 | } |
552 | ||
db304854 YX |
553 | static void __rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj, |
554 | struct bucket_table *tbl, u32 hash) | |
555 | { | |
020219a6 TG |
556 | struct rhash_head *head; |
557 | ||
558 | hash = rht_bucket_index(tbl, hash); | |
559 | head = rht_dereference_bucket(tbl->buckets[hash], tbl, hash); | |
db304854 | 560 | |
7cd10db8 TG |
561 | ASSERT_BUCKET_LOCK(ht, tbl, hash); |
562 | ||
db304854 YX |
563 | if (rht_is_a_nulls(head)) |
564 | INIT_RHT_NULLS_HEAD(obj->next, ht, hash); | |
565 | else | |
566 | RCU_INIT_POINTER(obj->next, head); | |
567 | ||
568 | rcu_assign_pointer(tbl->buckets[hash], obj); | |
569 | ||
570 | atomic_inc(&ht->nelems); | |
571 | ||
572 | rhashtable_wakeup_worker(ht); | |
573 | } | |
574 | ||
7e1e7763 | 575 | /** |
db304854 | 576 | * rhashtable_insert - insert object into hash table |
7e1e7763 TG |
577 | * @ht: hash table |
578 | * @obj: pointer to hash head inside object | |
7e1e7763 | 579 | * |
97defe1e TG |
580 | * Will take a per bucket spinlock to protect against mutual mutations |
581 | * on the same bucket. Multiple insertions may occur in parallel unless | |
582 | * they map to the same bucket lock. | |
7e1e7763 | 583 | * |
97defe1e TG |
584 | * It is safe to call this function from atomic context. |
585 | * | |
586 | * Will trigger an automatic deferred table resizing if the size grows | |
587 | * beyond the watermark indicated by grow_decision() which can be passed | |
588 | * to rhashtable_init(). | |
7e1e7763 | 589 | */ |
6eba8224 | 590 | void rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj) |
7e1e7763 | 591 | { |
a5ec68e3 | 592 | struct bucket_table *tbl, *old_tbl; |
97defe1e | 593 | unsigned hash; |
7e1e7763 | 594 | |
97defe1e | 595 | rcu_read_lock(); |
7e1e7763 | 596 | |
97defe1e | 597 | tbl = rht_dereference_rcu(ht->future_tbl, ht); |
a5ec68e3 | 598 | old_tbl = rht_dereference_rcu(ht->tbl, ht); |
020219a6 | 599 | hash = obj_raw_hashfn(ht, rht_obj(ht, obj)); |
97defe1e | 600 | |
a5ec68e3 | 601 | lock_buckets(tbl, old_tbl, hash); |
db304854 | 602 | __rhashtable_insert(ht, obj, tbl, hash); |
a5ec68e3 | 603 | unlock_buckets(tbl, old_tbl, hash); |
7e1e7763 | 604 | |
97defe1e | 605 | rcu_read_unlock(); |
7e1e7763 TG |
606 | } |
607 | EXPORT_SYMBOL_GPL(rhashtable_insert); | |
608 | ||
7e1e7763 TG |
609 | /** |
610 | * rhashtable_remove - remove object from hash table | |
611 | * @ht: hash table | |
612 | * @obj: pointer to hash head inside object | |
7e1e7763 TG |
613 | * |
614 | * Since the hash chain is single linked, the removal operation needs to | |
615 | * walk the bucket chain upon removal. The removal operation is thus | |
616 | * considerable slow if the hash table is not correctly sized. | |
617 | * | |
db304854 | 618 | * Will automatically shrink the table via rhashtable_expand() if the |
7e1e7763 TG |
619 | * shrink_decision function specified at rhashtable_init() returns true. |
620 | * | |
621 | * The caller must ensure that no concurrent table mutations occur. It is | |
622 | * however valid to have concurrent lookups if they are RCU protected. | |
623 | */ | |
6eba8224 | 624 | bool rhashtable_remove(struct rhashtable *ht, struct rhash_head *obj) |
7e1e7763 | 625 | { |
a5ec68e3 | 626 | struct bucket_table *tbl, *new_tbl, *old_tbl; |
7e1e7763 | 627 | struct rhash_head __rcu **pprev; |
cf52d52f | 628 | struct rhash_head *he, *he2; |
a5ec68e3 | 629 | unsigned int hash, new_hash; |
fe6a043c | 630 | bool ret = false; |
7e1e7763 | 631 | |
97defe1e | 632 | rcu_read_lock(); |
020219a6 TG |
633 | old_tbl = rht_dereference_rcu(ht->tbl, ht); |
634 | tbl = new_tbl = rht_dereference_rcu(ht->future_tbl, ht); | |
cf52d52f | 635 | new_hash = obj_raw_hashfn(ht, rht_obj(ht, obj)); |
7e1e7763 | 636 | |
a5ec68e3 | 637 | lock_buckets(new_tbl, old_tbl, new_hash); |
97defe1e | 638 | restart: |
a5ec68e3 | 639 | hash = rht_bucket_index(tbl, new_hash); |
97defe1e TG |
640 | pprev = &tbl->buckets[hash]; |
641 | rht_for_each(he, tbl, hash) { | |
7e1e7763 TG |
642 | if (he != obj) { |
643 | pprev = &he->next; | |
644 | continue; | |
645 | } | |
646 | ||
7cd10db8 | 647 | ASSERT_BUCKET_LOCK(ht, tbl, hash); |
897362e4 | 648 | |
020219a6 TG |
649 | if (old_tbl->size > new_tbl->size && tbl == old_tbl && |
650 | !rht_is_a_nulls(obj->next) && | |
651 | head_hashfn(ht, tbl, obj->next) != hash) { | |
652 | rcu_assign_pointer(*pprev, (struct rhash_head *) rht_marker(ht, hash)); | |
653 | } else if (unlikely(old_tbl->size < new_tbl->size && tbl == new_tbl)) { | |
654 | rht_for_each_continue(he2, obj->next, tbl, hash) { | |
cf52d52f TG |
655 | if (head_hashfn(ht, tbl, he2) == hash) { |
656 | rcu_assign_pointer(*pprev, he2); | |
657 | goto found; | |
658 | } | |
659 | } | |
660 | ||
020219a6 | 661 | rcu_assign_pointer(*pprev, (struct rhash_head *) rht_marker(ht, hash)); |
cf52d52f TG |
662 | } else { |
663 | rcu_assign_pointer(*pprev, obj->next); | |
664 | } | |
665 | ||
666 | found: | |
fe6a043c TG |
667 | ret = true; |
668 | break; | |
7e1e7763 TG |
669 | } |
670 | ||
fe6a043c TG |
671 | /* The entry may be linked in either 'tbl', 'future_tbl', or both. |
672 | * 'future_tbl' only exists for a short period of time during | |
673 | * resizing. Thus traversing both is fine and the added cost is | |
674 | * very rare. | |
675 | */ | |
020219a6 TG |
676 | if (tbl != old_tbl) { |
677 | tbl = old_tbl; | |
97defe1e TG |
678 | goto restart; |
679 | } | |
680 | ||
a5ec68e3 | 681 | unlock_buckets(new_tbl, old_tbl, new_hash); |
fe6a043c TG |
682 | |
683 | if (ret) { | |
684 | atomic_dec(&ht->nelems); | |
685 | rhashtable_wakeup_worker(ht); | |
686 | } | |
687 | ||
97defe1e TG |
688 | rcu_read_unlock(); |
689 | ||
fe6a043c | 690 | return ret; |
7e1e7763 TG |
691 | } |
692 | EXPORT_SYMBOL_GPL(rhashtable_remove); | |
693 | ||
efb975a6 YX |
694 | struct rhashtable_compare_arg { |
695 | struct rhashtable *ht; | |
696 | const void *key; | |
697 | }; | |
698 | ||
699 | static bool rhashtable_compare(void *ptr, void *arg) | |
700 | { | |
701 | struct rhashtable_compare_arg *x = arg; | |
702 | struct rhashtable *ht = x->ht; | |
703 | ||
704 | return !memcmp(ptr + ht->p.key_offset, x->key, ht->p.key_len); | |
705 | } | |
706 | ||
7e1e7763 TG |
707 | /** |
708 | * rhashtable_lookup - lookup key in hash table | |
709 | * @ht: hash table | |
710 | * @key: pointer to key | |
711 | * | |
712 | * Computes the hash value for the key and traverses the bucket chain looking | |
713 | * for a entry with an identical key. The first matching entry is returned. | |
714 | * | |
715 | * This lookup function may only be used for fixed key hash table (key_len | |
db304854 | 716 | * parameter set). It will BUG() if used inappropriately. |
7e1e7763 | 717 | * |
97defe1e | 718 | * Lookups may occur in parallel with hashtable mutations and resizing. |
7e1e7763 | 719 | */ |
97defe1e | 720 | void *rhashtable_lookup(struct rhashtable *ht, const void *key) |
7e1e7763 | 721 | { |
efb975a6 YX |
722 | struct rhashtable_compare_arg arg = { |
723 | .ht = ht, | |
724 | .key = key, | |
725 | }; | |
7e1e7763 TG |
726 | |
727 | BUG_ON(!ht->p.key_len); | |
728 | ||
efb975a6 | 729 | return rhashtable_lookup_compare(ht, key, &rhashtable_compare, &arg); |
7e1e7763 TG |
730 | } |
731 | EXPORT_SYMBOL_GPL(rhashtable_lookup); | |
732 | ||
733 | /** | |
734 | * rhashtable_lookup_compare - search hash table with compare function | |
735 | * @ht: hash table | |
8d24c0b4 | 736 | * @key: the pointer to the key |
7e1e7763 TG |
737 | * @compare: compare function, must return true on match |
738 | * @arg: argument passed on to compare function | |
739 | * | |
740 | * Traverses the bucket chain behind the provided hash value and calls the | |
741 | * specified compare function for each entry. | |
742 | * | |
97defe1e | 743 | * Lookups may occur in parallel with hashtable mutations and resizing. |
7e1e7763 TG |
744 | * |
745 | * Returns the first entry on which the compare function returned true. | |
746 | */ | |
97defe1e | 747 | void *rhashtable_lookup_compare(struct rhashtable *ht, const void *key, |
7e1e7763 TG |
748 | bool (*compare)(void *, void *), void *arg) |
749 | { | |
97defe1e | 750 | const struct bucket_table *tbl, *old_tbl; |
7e1e7763 | 751 | struct rhash_head *he; |
8d24c0b4 | 752 | u32 hash; |
7e1e7763 | 753 | |
97defe1e TG |
754 | rcu_read_lock(); |
755 | ||
756 | old_tbl = rht_dereference_rcu(ht->tbl, ht); | |
757 | tbl = rht_dereference_rcu(ht->future_tbl, ht); | |
8d24c0b4 | 758 | hash = key_hashfn(ht, key, ht->p.key_len); |
97defe1e TG |
759 | restart: |
760 | rht_for_each_rcu(he, tbl, rht_bucket_index(tbl, hash)) { | |
7e1e7763 TG |
761 | if (!compare(rht_obj(ht, he), arg)) |
762 | continue; | |
97defe1e | 763 | rcu_read_unlock(); |
a4b18cda | 764 | return rht_obj(ht, he); |
7e1e7763 TG |
765 | } |
766 | ||
97defe1e TG |
767 | if (unlikely(tbl != old_tbl)) { |
768 | tbl = old_tbl; | |
769 | goto restart; | |
770 | } | |
771 | rcu_read_unlock(); | |
772 | ||
7e1e7763 TG |
773 | return NULL; |
774 | } | |
775 | EXPORT_SYMBOL_GPL(rhashtable_lookup_compare); | |
776 | ||
db304854 YX |
777 | /** |
778 | * rhashtable_lookup_insert - lookup and insert object into hash table | |
779 | * @ht: hash table | |
780 | * @obj: pointer to hash head inside object | |
781 | * | |
782 | * Locks down the bucket chain in both the old and new table if a resize | |
783 | * is in progress to ensure that writers can't remove from the old table | |
784 | * and can't insert to the new table during the atomic operation of search | |
785 | * and insertion. Searches for duplicates in both the old and new table if | |
786 | * a resize is in progress. | |
787 | * | |
788 | * This lookup function may only be used for fixed key hash table (key_len | |
789 | * parameter set). It will BUG() if used inappropriately. | |
790 | * | |
791 | * It is safe to call this function from atomic context. | |
792 | * | |
793 | * Will trigger an automatic deferred table resizing if the size grows | |
794 | * beyond the watermark indicated by grow_decision() which can be passed | |
795 | * to rhashtable_init(). | |
796 | */ | |
797 | bool rhashtable_lookup_insert(struct rhashtable *ht, struct rhash_head *obj) | |
7a868d1e YX |
798 | { |
799 | struct rhashtable_compare_arg arg = { | |
800 | .ht = ht, | |
801 | .key = rht_obj(ht, obj) + ht->p.key_offset, | |
802 | }; | |
803 | ||
804 | BUG_ON(!ht->p.key_len); | |
805 | ||
806 | return rhashtable_lookup_compare_insert(ht, obj, &rhashtable_compare, | |
807 | &arg); | |
808 | } | |
809 | EXPORT_SYMBOL_GPL(rhashtable_lookup_insert); | |
810 | ||
811 | /** | |
812 | * rhashtable_lookup_compare_insert - search and insert object to hash table | |
813 | * with compare function | |
814 | * @ht: hash table | |
815 | * @obj: pointer to hash head inside object | |
816 | * @compare: compare function, must return true on match | |
817 | * @arg: argument passed on to compare function | |
818 | * | |
819 | * Locks down the bucket chain in both the old and new table if a resize | |
820 | * is in progress to ensure that writers can't remove from the old table | |
821 | * and can't insert to the new table during the atomic operation of search | |
822 | * and insertion. Searches for duplicates in both the old and new table if | |
823 | * a resize is in progress. | |
824 | * | |
825 | * Lookups may occur in parallel with hashtable mutations and resizing. | |
826 | * | |
827 | * Will trigger an automatic deferred table resizing if the size grows | |
828 | * beyond the watermark indicated by grow_decision() which can be passed | |
829 | * to rhashtable_init(). | |
830 | */ | |
831 | bool rhashtable_lookup_compare_insert(struct rhashtable *ht, | |
832 | struct rhash_head *obj, | |
833 | bool (*compare)(void *, void *), | |
834 | void *arg) | |
db304854 YX |
835 | { |
836 | struct bucket_table *new_tbl, *old_tbl; | |
a5ec68e3 | 837 | u32 new_hash; |
db304854 YX |
838 | bool success = true; |
839 | ||
840 | BUG_ON(!ht->p.key_len); | |
841 | ||
842 | rcu_read_lock(); | |
db304854 | 843 | old_tbl = rht_dereference_rcu(ht->tbl, ht); |
db304854 | 844 | new_tbl = rht_dereference_rcu(ht->future_tbl, ht); |
020219a6 | 845 | new_hash = obj_raw_hashfn(ht, rht_obj(ht, obj)); |
a5ec68e3 TG |
846 | |
847 | lock_buckets(new_tbl, old_tbl, new_hash); | |
db304854 | 848 | |
7a868d1e YX |
849 | if (rhashtable_lookup_compare(ht, rht_obj(ht, obj) + ht->p.key_offset, |
850 | compare, arg)) { | |
db304854 YX |
851 | success = false; |
852 | goto exit; | |
853 | } | |
854 | ||
855 | __rhashtable_insert(ht, obj, new_tbl, new_hash); | |
856 | ||
857 | exit: | |
a5ec68e3 | 858 | unlock_buckets(new_tbl, old_tbl, new_hash); |
db304854 YX |
859 | rcu_read_unlock(); |
860 | ||
861 | return success; | |
862 | } | |
7a868d1e | 863 | EXPORT_SYMBOL_GPL(rhashtable_lookup_compare_insert); |
db304854 | 864 | |
f2dba9c6 HX |
865 | /** |
866 | * rhashtable_walk_init - Initialise an iterator | |
867 | * @ht: Table to walk over | |
868 | * @iter: Hash table Iterator | |
869 | * | |
870 | * This function prepares a hash table walk. | |
871 | * | |
872 | * Note that if you restart a walk after rhashtable_walk_stop you | |
873 | * may see the same object twice. Also, you may miss objects if | |
874 | * there are removals in between rhashtable_walk_stop and the next | |
875 | * call to rhashtable_walk_start. | |
876 | * | |
877 | * For a completely stable walk you should construct your own data | |
878 | * structure outside the hash table. | |
879 | * | |
880 | * This function may sleep so you must not call it from interrupt | |
881 | * context or with spin locks held. | |
882 | * | |
883 | * You must call rhashtable_walk_exit if this function returns | |
884 | * successfully. | |
885 | */ | |
886 | int rhashtable_walk_init(struct rhashtable *ht, struct rhashtable_iter *iter) | |
887 | { | |
888 | iter->ht = ht; | |
889 | iter->p = NULL; | |
890 | iter->slot = 0; | |
891 | iter->skip = 0; | |
892 | ||
893 | iter->walker = kmalloc(sizeof(*iter->walker), GFP_KERNEL); | |
894 | if (!iter->walker) | |
895 | return -ENOMEM; | |
896 | ||
897 | mutex_lock(&ht->mutex); | |
898 | list_add(&iter->walker->list, &ht->walkers); | |
899 | mutex_unlock(&ht->mutex); | |
900 | ||
901 | return 0; | |
902 | } | |
903 | EXPORT_SYMBOL_GPL(rhashtable_walk_init); | |
904 | ||
905 | /** | |
906 | * rhashtable_walk_exit - Free an iterator | |
907 | * @iter: Hash table Iterator | |
908 | * | |
909 | * This function frees resources allocated by rhashtable_walk_init. | |
910 | */ | |
911 | void rhashtable_walk_exit(struct rhashtable_iter *iter) | |
912 | { | |
913 | mutex_lock(&iter->ht->mutex); | |
914 | list_del(&iter->walker->list); | |
915 | mutex_unlock(&iter->ht->mutex); | |
916 | kfree(iter->walker); | |
917 | } | |
918 | EXPORT_SYMBOL_GPL(rhashtable_walk_exit); | |
919 | ||
920 | /** | |
921 | * rhashtable_walk_start - Start a hash table walk | |
922 | * @iter: Hash table iterator | |
923 | * | |
924 | * Start a hash table walk. Note that we take the RCU lock in all | |
925 | * cases including when we return an error. So you must always call | |
926 | * rhashtable_walk_stop to clean up. | |
927 | * | |
928 | * Returns zero if successful. | |
929 | * | |
930 | * Returns -EAGAIN if resize event occured. Note that the iterator | |
931 | * will rewind back to the beginning and you may use it immediately | |
932 | * by calling rhashtable_walk_next. | |
933 | */ | |
934 | int rhashtable_walk_start(struct rhashtable_iter *iter) | |
935 | { | |
936 | rcu_read_lock(); | |
937 | ||
938 | if (iter->walker->resize) { | |
939 | iter->slot = 0; | |
940 | iter->skip = 0; | |
941 | iter->walker->resize = false; | |
942 | return -EAGAIN; | |
943 | } | |
944 | ||
945 | return 0; | |
946 | } | |
947 | EXPORT_SYMBOL_GPL(rhashtable_walk_start); | |
948 | ||
949 | /** | |
950 | * rhashtable_walk_next - Return the next object and advance the iterator | |
951 | * @iter: Hash table iterator | |
952 | * | |
953 | * Note that you must call rhashtable_walk_stop when you are finished | |
954 | * with the walk. | |
955 | * | |
956 | * Returns the next object or NULL when the end of the table is reached. | |
957 | * | |
958 | * Returns -EAGAIN if resize event occured. Note that the iterator | |
959 | * will rewind back to the beginning and you may continue to use it. | |
960 | */ | |
961 | void *rhashtable_walk_next(struct rhashtable_iter *iter) | |
962 | { | |
963 | const struct bucket_table *tbl; | |
964 | struct rhashtable *ht = iter->ht; | |
965 | struct rhash_head *p = iter->p; | |
966 | void *obj = NULL; | |
967 | ||
968 | tbl = rht_dereference_rcu(ht->tbl, ht); | |
969 | ||
970 | if (p) { | |
971 | p = rht_dereference_bucket_rcu(p->next, tbl, iter->slot); | |
972 | goto next; | |
973 | } | |
974 | ||
975 | for (; iter->slot < tbl->size; iter->slot++) { | |
976 | int skip = iter->skip; | |
977 | ||
978 | rht_for_each_rcu(p, tbl, iter->slot) { | |
979 | if (!skip) | |
980 | break; | |
981 | skip--; | |
982 | } | |
983 | ||
984 | next: | |
985 | if (!rht_is_a_nulls(p)) { | |
986 | iter->skip++; | |
987 | iter->p = p; | |
988 | obj = rht_obj(ht, p); | |
989 | goto out; | |
990 | } | |
991 | ||
992 | iter->skip = 0; | |
993 | } | |
994 | ||
995 | iter->p = NULL; | |
996 | ||
997 | out: | |
998 | if (iter->walker->resize) { | |
999 | iter->p = NULL; | |
1000 | iter->slot = 0; | |
1001 | iter->skip = 0; | |
1002 | iter->walker->resize = false; | |
1003 | return ERR_PTR(-EAGAIN); | |
1004 | } | |
1005 | ||
1006 | return obj; | |
1007 | } | |
1008 | EXPORT_SYMBOL_GPL(rhashtable_walk_next); | |
1009 | ||
1010 | /** | |
1011 | * rhashtable_walk_stop - Finish a hash table walk | |
1012 | * @iter: Hash table iterator | |
1013 | * | |
1014 | * Finish a hash table walk. | |
1015 | */ | |
1016 | void rhashtable_walk_stop(struct rhashtable_iter *iter) | |
1017 | { | |
1018 | rcu_read_unlock(); | |
1019 | iter->p = NULL; | |
1020 | } | |
1021 | EXPORT_SYMBOL_GPL(rhashtable_walk_stop); | |
1022 | ||
94000176 | 1023 | static size_t rounded_hashtable_size(struct rhashtable_params *params) |
7e1e7763 | 1024 | { |
94000176 YX |
1025 | return max(roundup_pow_of_two(params->nelem_hint * 4 / 3), |
1026 | 1UL << params->min_shift); | |
7e1e7763 TG |
1027 | } |
1028 | ||
1029 | /** | |
1030 | * rhashtable_init - initialize a new hash table | |
1031 | * @ht: hash table to be initialized | |
1032 | * @params: configuration parameters | |
1033 | * | |
1034 | * Initializes a new hash table based on the provided configuration | |
1035 | * parameters. A table can be configured either with a variable or | |
1036 | * fixed length key: | |
1037 | * | |
1038 | * Configuration Example 1: Fixed length keys | |
1039 | * struct test_obj { | |
1040 | * int key; | |
1041 | * void * my_member; | |
1042 | * struct rhash_head node; | |
1043 | * }; | |
1044 | * | |
1045 | * struct rhashtable_params params = { | |
1046 | * .head_offset = offsetof(struct test_obj, node), | |
1047 | * .key_offset = offsetof(struct test_obj, key), | |
1048 | * .key_len = sizeof(int), | |
87545899 | 1049 | * .hashfn = jhash, |
f89bd6f8 | 1050 | * .nulls_base = (1U << RHT_BASE_SHIFT), |
7e1e7763 TG |
1051 | * }; |
1052 | * | |
1053 | * Configuration Example 2: Variable length keys | |
1054 | * struct test_obj { | |
1055 | * [...] | |
1056 | * struct rhash_head node; | |
1057 | * }; | |
1058 | * | |
1059 | * u32 my_hash_fn(const void *data, u32 seed) | |
1060 | * { | |
1061 | * struct test_obj *obj = data; | |
1062 | * | |
1063 | * return [... hash ...]; | |
1064 | * } | |
1065 | * | |
1066 | * struct rhashtable_params params = { | |
1067 | * .head_offset = offsetof(struct test_obj, node), | |
87545899 | 1068 | * .hashfn = jhash, |
7e1e7763 | 1069 | * .obj_hashfn = my_hash_fn, |
7e1e7763 TG |
1070 | * }; |
1071 | */ | |
1072 | int rhashtable_init(struct rhashtable *ht, struct rhashtable_params *params) | |
1073 | { | |
1074 | struct bucket_table *tbl; | |
1075 | size_t size; | |
1076 | ||
1077 | size = HASH_DEFAULT_SIZE; | |
1078 | ||
1079 | if ((params->key_len && !params->hashfn) || | |
1080 | (!params->key_len && !params->obj_hashfn)) | |
1081 | return -EINVAL; | |
1082 | ||
f89bd6f8 TG |
1083 | if (params->nulls_base && params->nulls_base < (1U << RHT_BASE_SHIFT)) |
1084 | return -EINVAL; | |
1085 | ||
94000176 YX |
1086 | params->min_shift = max_t(size_t, params->min_shift, |
1087 | ilog2(HASH_MIN_SIZE)); | |
1088 | ||
7e1e7763 | 1089 | if (params->nelem_hint) |
94000176 | 1090 | size = rounded_hashtable_size(params); |
7e1e7763 | 1091 | |
97defe1e TG |
1092 | memset(ht, 0, sizeof(*ht)); |
1093 | mutex_init(&ht->mutex); | |
1094 | memcpy(&ht->p, params, sizeof(*params)); | |
f2dba9c6 | 1095 | INIT_LIST_HEAD(&ht->walkers); |
97defe1e TG |
1096 | |
1097 | if (params->locks_mul) | |
1098 | ht->p.locks_mul = roundup_pow_of_two(params->locks_mul); | |
1099 | else | |
1100 | ht->p.locks_mul = BUCKET_LOCKS_PER_CPU; | |
1101 | ||
1102 | tbl = bucket_table_alloc(ht, size); | |
7e1e7763 TG |
1103 | if (tbl == NULL) |
1104 | return -ENOMEM; | |
1105 | ||
545a148e | 1106 | atomic_set(&ht->nelems, 0); |
c0c09bfd | 1107 | atomic_set(&ht->shift, ilog2(tbl->size)); |
7e1e7763 | 1108 | RCU_INIT_POINTER(ht->tbl, tbl); |
97defe1e | 1109 | RCU_INIT_POINTER(ht->future_tbl, tbl); |
7e1e7763 TG |
1110 | |
1111 | if (!ht->p.hash_rnd) | |
1112 | get_random_bytes(&ht->p.hash_rnd, sizeof(ht->p.hash_rnd)); | |
1113 | ||
97defe1e | 1114 | if (ht->p.grow_decision || ht->p.shrink_decision) |
57699a40 | 1115 | INIT_WORK(&ht->run_work, rht_deferred_worker); |
97defe1e | 1116 | |
7e1e7763 TG |
1117 | return 0; |
1118 | } | |
1119 | EXPORT_SYMBOL_GPL(rhashtable_init); | |
1120 | ||
1121 | /** | |
1122 | * rhashtable_destroy - destroy hash table | |
1123 | * @ht: the hash table to destroy | |
1124 | * | |
ae82ddcf PNA |
1125 | * Frees the bucket array. This function is not rcu safe, therefore the caller |
1126 | * has to make sure that no resizing may happen by unpublishing the hashtable | |
1127 | * and waiting for the quiescent cycle before releasing the bucket array. | |
7e1e7763 | 1128 | */ |
97defe1e | 1129 | void rhashtable_destroy(struct rhashtable *ht) |
7e1e7763 | 1130 | { |
97defe1e TG |
1131 | ht->being_destroyed = true; |
1132 | ||
57699a40 YX |
1133 | if (ht->p.grow_decision || ht->p.shrink_decision) |
1134 | cancel_work_sync(&ht->run_work); | |
97defe1e | 1135 | |
57699a40 | 1136 | mutex_lock(&ht->mutex); |
97defe1e | 1137 | bucket_table_free(rht_dereference(ht->tbl, ht)); |
97defe1e | 1138 | mutex_unlock(&ht->mutex); |
7e1e7763 TG |
1139 | } |
1140 | EXPORT_SYMBOL_GPL(rhashtable_destroy); |