]>
Commit | Line | Data |
---|---|---|
7e1e7763 TG |
1 | /* |
2 | * Resizable, Scalable, Concurrent Hash Table | |
3 | * | |
02fd97c3 | 4 | * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au> |
a5ec68e3 | 5 | * Copyright (c) 2014-2015 Thomas Graf <tgraf@suug.ch> |
7e1e7763 TG |
6 | * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net> |
7 | * | |
7e1e7763 | 8 | * Code partially derived from nft_hash |
02fd97c3 HX |
9 | * Rewritten with rehash code from br_multicast plus single list |
10 | * pointer as suggested by Josh Triplett | |
7e1e7763 TG |
11 | * |
12 | * This program is free software; you can redistribute it and/or modify | |
13 | * it under the terms of the GNU General Public License version 2 as | |
14 | * published by the Free Software Foundation. | |
15 | */ | |
16 | ||
07ee0722 | 17 | #include <linux/atomic.h> |
7e1e7763 TG |
18 | #include <linux/kernel.h> |
19 | #include <linux/init.h> | |
20 | #include <linux/log2.h> | |
5beb5c90 | 21 | #include <linux/sched.h> |
7e1e7763 TG |
22 | #include <linux/slab.h> |
23 | #include <linux/vmalloc.h> | |
24 | #include <linux/mm.h> | |
87545899 | 25 | #include <linux/jhash.h> |
7e1e7763 TG |
26 | #include <linux/random.h> |
27 | #include <linux/rhashtable.h> | |
61d7b097 | 28 | #include <linux/err.h> |
6d795413 | 29 | #include <linux/export.h> |
7e1e7763 TG |
30 | |
31 | #define HASH_DEFAULT_SIZE 64UL | |
c2e213cf | 32 | #define HASH_MIN_SIZE 4U |
97defe1e TG |
33 | #define BUCKET_LOCKS_PER_CPU 128UL |
34 | ||
988dfbd7 | 35 | static u32 head_hashfn(struct rhashtable *ht, |
8d24c0b4 TG |
36 | const struct bucket_table *tbl, |
37 | const struct rhash_head *he) | |
7e1e7763 | 38 | { |
02fd97c3 | 39 | return rht_head_hashfn(ht, tbl, he, ht->p); |
7e1e7763 TG |
40 | } |
41 | ||
a03eaec0 | 42 | #ifdef CONFIG_PROVE_LOCKING |
a03eaec0 | 43 | #define ASSERT_RHT_MUTEX(HT) BUG_ON(!lockdep_rht_mutex_is_held(HT)) |
a03eaec0 TG |
44 | |
45 | int lockdep_rht_mutex_is_held(struct rhashtable *ht) | |
46 | { | |
47 | return (debug_locks) ? lockdep_is_held(&ht->mutex) : 1; | |
48 | } | |
49 | EXPORT_SYMBOL_GPL(lockdep_rht_mutex_is_held); | |
50 | ||
51 | int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash) | |
52 | { | |
02fd97c3 | 53 | spinlock_t *lock = rht_bucket_lock(tbl, hash); |
a03eaec0 TG |
54 | |
55 | return (debug_locks) ? lockdep_is_held(lock) : 1; | |
56 | } | |
57 | EXPORT_SYMBOL_GPL(lockdep_rht_bucket_is_held); | |
58 | #else | |
59 | #define ASSERT_RHT_MUTEX(HT) | |
a03eaec0 TG |
60 | #endif |
61 | ||
62 | ||
b9ecfdaa HX |
63 | static int alloc_bucket_locks(struct rhashtable *ht, struct bucket_table *tbl, |
64 | gfp_t gfp) | |
97defe1e TG |
65 | { |
66 | unsigned int i, size; | |
67 | #if defined(CONFIG_PROVE_LOCKING) | |
68 | unsigned int nr_pcpus = 2; | |
69 | #else | |
70 | unsigned int nr_pcpus = num_possible_cpus(); | |
71 | #endif | |
72 | ||
73 | nr_pcpus = min_t(unsigned int, nr_pcpus, 32UL); | |
74 | size = roundup_pow_of_two(nr_pcpus * ht->p.locks_mul); | |
75 | ||
a5ec68e3 TG |
76 | /* Never allocate more than 0.5 locks per bucket */ |
77 | size = min_t(unsigned int, size, tbl->size >> 1); | |
97defe1e TG |
78 | |
79 | if (sizeof(spinlock_t) != 0) { | |
80 | #ifdef CONFIG_NUMA | |
b9ecfdaa HX |
81 | if (size * sizeof(spinlock_t) > PAGE_SIZE && |
82 | gfp == GFP_KERNEL) | |
97defe1e TG |
83 | tbl->locks = vmalloc(size * sizeof(spinlock_t)); |
84 | else | |
85 | #endif | |
86 | tbl->locks = kmalloc_array(size, sizeof(spinlock_t), | |
b9ecfdaa | 87 | gfp); |
97defe1e TG |
88 | if (!tbl->locks) |
89 | return -ENOMEM; | |
90 | for (i = 0; i < size; i++) | |
91 | spin_lock_init(&tbl->locks[i]); | |
92 | } | |
93 | tbl->locks_mask = size - 1; | |
94 | ||
95 | return 0; | |
96 | } | |
97 | ||
98 | static void bucket_table_free(const struct bucket_table *tbl) | |
99 | { | |
100 | if (tbl) | |
101 | kvfree(tbl->locks); | |
102 | ||
103 | kvfree(tbl); | |
104 | } | |
105 | ||
9d901bc0 HX |
106 | static void bucket_table_free_rcu(struct rcu_head *head) |
107 | { | |
108 | bucket_table_free(container_of(head, struct bucket_table, rcu)); | |
109 | } | |
110 | ||
97defe1e | 111 | static struct bucket_table *bucket_table_alloc(struct rhashtable *ht, |
b9ecfdaa HX |
112 | size_t nbuckets, |
113 | gfp_t gfp) | |
7e1e7763 | 114 | { |
eb6d1abf | 115 | struct bucket_table *tbl = NULL; |
7e1e7763 | 116 | size_t size; |
f89bd6f8 | 117 | int i; |
7e1e7763 TG |
118 | |
119 | size = sizeof(*tbl) + nbuckets * sizeof(tbl->buckets[0]); | |
b9ecfdaa HX |
120 | if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER) || |
121 | gfp != GFP_KERNEL) | |
122 | tbl = kzalloc(size, gfp | __GFP_NOWARN | __GFP_NORETRY); | |
a90099d9 DM |
123 | if (tbl == NULL && gfp == GFP_KERNEL) |
124 | tbl = vzalloc(size); | |
7e1e7763 TG |
125 | if (tbl == NULL) |
126 | return NULL; | |
127 | ||
128 | tbl->size = nbuckets; | |
129 | ||
b9ecfdaa | 130 | if (alloc_bucket_locks(ht, tbl, gfp) < 0) { |
97defe1e TG |
131 | bucket_table_free(tbl); |
132 | return NULL; | |
133 | } | |
7e1e7763 | 134 | |
eddee5ba HX |
135 | INIT_LIST_HEAD(&tbl->walkers); |
136 | ||
5269b53d HX |
137 | get_random_bytes(&tbl->hash_rnd, sizeof(tbl->hash_rnd)); |
138 | ||
f89bd6f8 TG |
139 | for (i = 0; i < nbuckets; i++) |
140 | INIT_RHT_NULLS_HEAD(tbl->buckets[i], ht, i); | |
141 | ||
97defe1e | 142 | return tbl; |
7e1e7763 TG |
143 | } |
144 | ||
b824478b HX |
145 | static struct bucket_table *rhashtable_last_table(struct rhashtable *ht, |
146 | struct bucket_table *tbl) | |
147 | { | |
148 | struct bucket_table *new_tbl; | |
149 | ||
150 | do { | |
151 | new_tbl = tbl; | |
152 | tbl = rht_dereference_rcu(tbl->future_tbl, ht); | |
153 | } while (tbl); | |
154 | ||
155 | return new_tbl; | |
156 | } | |
157 | ||
299e5c32 | 158 | static int rhashtable_rehash_one(struct rhashtable *ht, unsigned int old_hash) |
a5ec68e3 | 159 | { |
aa34a6cb | 160 | struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht); |
b824478b HX |
161 | struct bucket_table *new_tbl = rhashtable_last_table(ht, |
162 | rht_dereference_rcu(old_tbl->future_tbl, ht)); | |
aa34a6cb HX |
163 | struct rhash_head __rcu **pprev = &old_tbl->buckets[old_hash]; |
164 | int err = -ENOENT; | |
165 | struct rhash_head *head, *next, *entry; | |
166 | spinlock_t *new_bucket_lock; | |
299e5c32 | 167 | unsigned int new_hash; |
aa34a6cb HX |
168 | |
169 | rht_for_each(entry, old_tbl, old_hash) { | |
170 | err = 0; | |
171 | next = rht_dereference_bucket(entry->next, old_tbl, old_hash); | |
172 | ||
173 | if (rht_is_a_nulls(next)) | |
174 | break; | |
a5ec68e3 | 175 | |
aa34a6cb HX |
176 | pprev = &entry->next; |
177 | } | |
a5ec68e3 | 178 | |
aa34a6cb HX |
179 | if (err) |
180 | goto out; | |
97defe1e | 181 | |
aa34a6cb | 182 | new_hash = head_hashfn(ht, new_tbl, entry); |
7e1e7763 | 183 | |
02fd97c3 | 184 | new_bucket_lock = rht_bucket_lock(new_tbl, new_hash); |
7e1e7763 | 185 | |
8f2484bd | 186 | spin_lock_nested(new_bucket_lock, SINGLE_DEPTH_NESTING); |
aa34a6cb HX |
187 | head = rht_dereference_bucket(new_tbl->buckets[new_hash], |
188 | new_tbl, new_hash); | |
97defe1e | 189 | |
7def0f95 | 190 | RCU_INIT_POINTER(entry->next, head); |
a5ec68e3 | 191 | |
aa34a6cb HX |
192 | rcu_assign_pointer(new_tbl->buckets[new_hash], entry); |
193 | spin_unlock(new_bucket_lock); | |
97defe1e | 194 | |
aa34a6cb | 195 | rcu_assign_pointer(*pprev, next); |
7e1e7763 | 196 | |
aa34a6cb HX |
197 | out: |
198 | return err; | |
199 | } | |
97defe1e | 200 | |
299e5c32 TG |
201 | static void rhashtable_rehash_chain(struct rhashtable *ht, |
202 | unsigned int old_hash) | |
aa34a6cb HX |
203 | { |
204 | struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht); | |
205 | spinlock_t *old_bucket_lock; | |
206 | ||
02fd97c3 | 207 | old_bucket_lock = rht_bucket_lock(old_tbl, old_hash); |
a5ec68e3 | 208 | |
aa34a6cb HX |
209 | spin_lock_bh(old_bucket_lock); |
210 | while (!rhashtable_rehash_one(ht, old_hash)) | |
211 | ; | |
63d512d0 | 212 | old_tbl->rehash++; |
aa34a6cb | 213 | spin_unlock_bh(old_bucket_lock); |
97defe1e TG |
214 | } |
215 | ||
b824478b HX |
216 | static int rhashtable_rehash_attach(struct rhashtable *ht, |
217 | struct bucket_table *old_tbl, | |
218 | struct bucket_table *new_tbl) | |
97defe1e | 219 | { |
b824478b HX |
220 | /* Protect future_tbl using the first bucket lock. */ |
221 | spin_lock_bh(old_tbl->locks); | |
222 | ||
223 | /* Did somebody beat us to it? */ | |
224 | if (rcu_access_pointer(old_tbl->future_tbl)) { | |
225 | spin_unlock_bh(old_tbl->locks); | |
226 | return -EEXIST; | |
227 | } | |
7cd10db8 | 228 | |
aa34a6cb HX |
229 | /* Make insertions go into the new, empty table right away. Deletions |
230 | * and lookups will be attempted in both tables until we synchronize. | |
aa34a6cb | 231 | */ |
c4db8848 | 232 | rcu_assign_pointer(old_tbl->future_tbl, new_tbl); |
aa34a6cb | 233 | |
9497df88 HX |
234 | /* Ensure the new table is visible to readers. */ |
235 | smp_wmb(); | |
236 | ||
b824478b HX |
237 | spin_unlock_bh(old_tbl->locks); |
238 | ||
239 | return 0; | |
240 | } | |
241 | ||
242 | static int rhashtable_rehash_table(struct rhashtable *ht) | |
243 | { | |
244 | struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht); | |
245 | struct bucket_table *new_tbl; | |
246 | struct rhashtable_walker *walker; | |
299e5c32 | 247 | unsigned int old_hash; |
b824478b HX |
248 | |
249 | new_tbl = rht_dereference(old_tbl->future_tbl, ht); | |
250 | if (!new_tbl) | |
251 | return 0; | |
252 | ||
aa34a6cb HX |
253 | for (old_hash = 0; old_hash < old_tbl->size; old_hash++) |
254 | rhashtable_rehash_chain(ht, old_hash); | |
255 | ||
256 | /* Publish the new table pointer. */ | |
257 | rcu_assign_pointer(ht->tbl, new_tbl); | |
258 | ||
ba7c95ea | 259 | spin_lock(&ht->lock); |
eddee5ba HX |
260 | list_for_each_entry(walker, &old_tbl->walkers, list) |
261 | walker->tbl = NULL; | |
ba7c95ea | 262 | spin_unlock(&ht->lock); |
eddee5ba | 263 | |
aa34a6cb HX |
264 | /* Wait for readers. All new readers will see the new |
265 | * table, and thus no references to the old table will | |
266 | * remain. | |
267 | */ | |
9d901bc0 | 268 | call_rcu(&old_tbl->rcu, bucket_table_free_rcu); |
b824478b HX |
269 | |
270 | return rht_dereference(new_tbl->future_tbl, ht) ? -EAGAIN : 0; | |
7e1e7763 TG |
271 | } |
272 | ||
273 | /** | |
274 | * rhashtable_expand - Expand hash table while allowing concurrent lookups | |
275 | * @ht: the hash table to expand | |
7e1e7763 | 276 | * |
aa34a6cb | 277 | * A secondary bucket array is allocated and the hash entries are migrated. |
7e1e7763 TG |
278 | * |
279 | * This function may only be called in a context where it is safe to call | |
280 | * synchronize_rcu(), e.g. not within a rcu_read_lock() section. | |
281 | * | |
97defe1e TG |
282 | * The caller must ensure that no concurrent resizing occurs by holding |
283 | * ht->mutex. | |
284 | * | |
285 | * It is valid to have concurrent insertions and deletions protected by per | |
286 | * bucket locks or concurrent RCU protected lookups and traversals. | |
7e1e7763 | 287 | */ |
b824478b | 288 | static int rhashtable_expand(struct rhashtable *ht) |
7e1e7763 TG |
289 | { |
290 | struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht); | |
b824478b | 291 | int err; |
7e1e7763 TG |
292 | |
293 | ASSERT_RHT_MUTEX(ht); | |
294 | ||
b824478b HX |
295 | old_tbl = rhashtable_last_table(ht, old_tbl); |
296 | ||
b9ecfdaa | 297 | new_tbl = bucket_table_alloc(ht, old_tbl->size * 2, GFP_KERNEL); |
7e1e7763 TG |
298 | if (new_tbl == NULL) |
299 | return -ENOMEM; | |
300 | ||
b824478b HX |
301 | err = rhashtable_rehash_attach(ht, old_tbl, new_tbl); |
302 | if (err) | |
303 | bucket_table_free(new_tbl); | |
304 | ||
305 | return err; | |
7e1e7763 | 306 | } |
7e1e7763 TG |
307 | |
308 | /** | |
309 | * rhashtable_shrink - Shrink hash table while allowing concurrent lookups | |
310 | * @ht: the hash table to shrink | |
7e1e7763 | 311 | * |
18093d1c HX |
312 | * This function shrinks the hash table to fit, i.e., the smallest |
313 | * size would not cause it to expand right away automatically. | |
7e1e7763 | 314 | * |
97defe1e TG |
315 | * The caller must ensure that no concurrent resizing occurs by holding |
316 | * ht->mutex. | |
317 | * | |
7e1e7763 TG |
318 | * The caller must ensure that no concurrent table mutations take place. |
319 | * It is however valid to have concurrent lookups if they are RCU protected. | |
97defe1e TG |
320 | * |
321 | * It is valid to have concurrent insertions and deletions protected by per | |
322 | * bucket locks or concurrent RCU protected lookups and traversals. | |
7e1e7763 | 323 | */ |
b824478b | 324 | static int rhashtable_shrink(struct rhashtable *ht) |
7e1e7763 | 325 | { |
a5b6846f | 326 | struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht); |
299e5c32 | 327 | unsigned int size; |
b824478b | 328 | int err; |
7e1e7763 TG |
329 | |
330 | ASSERT_RHT_MUTEX(ht); | |
331 | ||
299e5c32 | 332 | size = roundup_pow_of_two(atomic_read(&ht->nelems) * 3 / 2); |
18093d1c HX |
333 | if (size < ht->p.min_size) |
334 | size = ht->p.min_size; | |
335 | ||
336 | if (old_tbl->size <= size) | |
337 | return 0; | |
338 | ||
b824478b HX |
339 | if (rht_dereference(old_tbl->future_tbl, ht)) |
340 | return -EEXIST; | |
341 | ||
b9ecfdaa | 342 | new_tbl = bucket_table_alloc(ht, size, GFP_KERNEL); |
97defe1e | 343 | if (new_tbl == NULL) |
7e1e7763 TG |
344 | return -ENOMEM; |
345 | ||
b824478b HX |
346 | err = rhashtable_rehash_attach(ht, old_tbl, new_tbl); |
347 | if (err) | |
348 | bucket_table_free(new_tbl); | |
349 | ||
350 | return err; | |
7e1e7763 | 351 | } |
7e1e7763 | 352 | |
97defe1e TG |
353 | static void rht_deferred_worker(struct work_struct *work) |
354 | { | |
355 | struct rhashtable *ht; | |
356 | struct bucket_table *tbl; | |
b824478b | 357 | int err = 0; |
97defe1e | 358 | |
57699a40 | 359 | ht = container_of(work, struct rhashtable, run_work); |
97defe1e | 360 | mutex_lock(&ht->mutex); |
28134a53 | 361 | |
97defe1e | 362 | tbl = rht_dereference(ht->tbl, ht); |
b824478b | 363 | tbl = rhashtable_last_table(ht, tbl); |
97defe1e | 364 | |
a5b6846f | 365 | if (rht_grow_above_75(ht, tbl)) |
97defe1e | 366 | rhashtable_expand(ht); |
b5e2c150 | 367 | else if (ht->p.automatic_shrinking && rht_shrink_below_30(ht, tbl)) |
97defe1e | 368 | rhashtable_shrink(ht); |
b824478b HX |
369 | |
370 | err = rhashtable_rehash_table(ht); | |
371 | ||
97defe1e | 372 | mutex_unlock(&ht->mutex); |
b824478b HX |
373 | |
374 | if (err) | |
375 | schedule_work(&ht->run_work); | |
97defe1e TG |
376 | } |
377 | ||
ccd57b1b HX |
378 | static bool rhashtable_check_elasticity(struct rhashtable *ht, |
379 | struct bucket_table *tbl, | |
299e5c32 | 380 | unsigned int hash) |
ccd57b1b | 381 | { |
299e5c32 | 382 | unsigned int elasticity = ht->elasticity; |
ccd57b1b HX |
383 | struct rhash_head *head; |
384 | ||
385 | rht_for_each(head, tbl, hash) | |
386 | if (!--elasticity) | |
387 | return true; | |
388 | ||
389 | return false; | |
390 | } | |
391 | ||
3cf92222 HX |
392 | int rhashtable_insert_rehash(struct rhashtable *ht, |
393 | struct bucket_table *tbl) | |
ccd57b1b HX |
394 | { |
395 | struct bucket_table *old_tbl; | |
396 | struct bucket_table *new_tbl; | |
ccd57b1b HX |
397 | unsigned int size; |
398 | int err; | |
399 | ||
400 | old_tbl = rht_dereference_rcu(ht->tbl, ht); | |
ccd57b1b HX |
401 | |
402 | size = tbl->size; | |
403 | ||
3cf92222 HX |
404 | err = -EBUSY; |
405 | ||
ccd57b1b HX |
406 | if (rht_grow_above_75(ht, tbl)) |
407 | size *= 2; | |
a87b9ebf TG |
408 | /* Do not schedule more than one rehash */ |
409 | else if (old_tbl != tbl) | |
3cf92222 HX |
410 | goto fail; |
411 | ||
412 | err = -ENOMEM; | |
ccd57b1b HX |
413 | |
414 | new_tbl = bucket_table_alloc(ht, size, GFP_ATOMIC); | |
3cf92222 HX |
415 | if (new_tbl == NULL) |
416 | goto fail; | |
ccd57b1b HX |
417 | |
418 | err = rhashtable_rehash_attach(ht, tbl, new_tbl); | |
419 | if (err) { | |
420 | bucket_table_free(new_tbl); | |
421 | if (err == -EEXIST) | |
422 | err = 0; | |
423 | } else | |
424 | schedule_work(&ht->run_work); | |
425 | ||
426 | return err; | |
3cf92222 HX |
427 | |
428 | fail: | |
429 | /* Do not fail the insert if someone else did a rehash. */ | |
430 | if (likely(rcu_dereference_raw(tbl->future_tbl))) | |
431 | return 0; | |
432 | ||
433 | /* Schedule async rehash to retry allocation in process context. */ | |
434 | if (err == -ENOMEM) | |
435 | schedule_work(&ht->run_work); | |
436 | ||
437 | return err; | |
ccd57b1b HX |
438 | } |
439 | EXPORT_SYMBOL_GPL(rhashtable_insert_rehash); | |
440 | ||
3cf92222 HX |
441 | struct bucket_table *rhashtable_insert_slow(struct rhashtable *ht, |
442 | const void *key, | |
443 | struct rhash_head *obj, | |
444 | struct bucket_table *tbl) | |
02fd97c3 HX |
445 | { |
446 | struct rhash_head *head; | |
299e5c32 | 447 | unsigned int hash; |
ccd57b1b | 448 | int err; |
02fd97c3 | 449 | |
b824478b | 450 | tbl = rhashtable_last_table(ht, tbl); |
02fd97c3 HX |
451 | hash = head_hashfn(ht, tbl, obj); |
452 | spin_lock_nested(rht_bucket_lock(tbl, hash), SINGLE_DEPTH_NESTING); | |
453 | ||
ccd57b1b | 454 | err = -EEXIST; |
02fd97c3 HX |
455 | if (key && rhashtable_lookup_fast(ht, key, ht->p)) |
456 | goto exit; | |
457 | ||
07ee0722 HX |
458 | err = -E2BIG; |
459 | if (unlikely(rht_grow_above_max(ht, tbl))) | |
460 | goto exit; | |
461 | ||
ccd57b1b HX |
462 | err = -EAGAIN; |
463 | if (rhashtable_check_elasticity(ht, tbl, hash) || | |
464 | rht_grow_above_100(ht, tbl)) | |
465 | goto exit; | |
466 | ||
02fd97c3 HX |
467 | err = 0; |
468 | ||
469 | head = rht_dereference_bucket(tbl->buckets[hash], tbl, hash); | |
470 | ||
471 | RCU_INIT_POINTER(obj->next, head); | |
472 | ||
473 | rcu_assign_pointer(tbl->buckets[hash], obj); | |
474 | ||
475 | atomic_inc(&ht->nelems); | |
476 | ||
477 | exit: | |
478 | spin_unlock(rht_bucket_lock(tbl, hash)); | |
479 | ||
3cf92222 HX |
480 | if (err == 0) |
481 | return NULL; | |
482 | else if (err == -EAGAIN) | |
483 | return tbl; | |
484 | else | |
485 | return ERR_PTR(err); | |
02fd97c3 HX |
486 | } |
487 | EXPORT_SYMBOL_GPL(rhashtable_insert_slow); | |
488 | ||
f2dba9c6 HX |
489 | /** |
490 | * rhashtable_walk_init - Initialise an iterator | |
491 | * @ht: Table to walk over | |
492 | * @iter: Hash table Iterator | |
493 | * | |
494 | * This function prepares a hash table walk. | |
495 | * | |
496 | * Note that if you restart a walk after rhashtable_walk_stop you | |
497 | * may see the same object twice. Also, you may miss objects if | |
498 | * there are removals in between rhashtable_walk_stop and the next | |
499 | * call to rhashtable_walk_start. | |
500 | * | |
501 | * For a completely stable walk you should construct your own data | |
502 | * structure outside the hash table. | |
503 | * | |
504 | * This function may sleep so you must not call it from interrupt | |
505 | * context or with spin locks held. | |
506 | * | |
507 | * You must call rhashtable_walk_exit if this function returns | |
508 | * successfully. | |
509 | */ | |
510 | int rhashtable_walk_init(struct rhashtable *ht, struct rhashtable_iter *iter) | |
511 | { | |
512 | iter->ht = ht; | |
513 | iter->p = NULL; | |
514 | iter->slot = 0; | |
515 | iter->skip = 0; | |
516 | ||
517 | iter->walker = kmalloc(sizeof(*iter->walker), GFP_KERNEL); | |
518 | if (!iter->walker) | |
519 | return -ENOMEM; | |
520 | ||
521 | mutex_lock(&ht->mutex); | |
eddee5ba HX |
522 | iter->walker->tbl = rht_dereference(ht->tbl, ht); |
523 | list_add(&iter->walker->list, &iter->walker->tbl->walkers); | |
f2dba9c6 HX |
524 | mutex_unlock(&ht->mutex); |
525 | ||
526 | return 0; | |
527 | } | |
528 | EXPORT_SYMBOL_GPL(rhashtable_walk_init); | |
529 | ||
530 | /** | |
531 | * rhashtable_walk_exit - Free an iterator | |
532 | * @iter: Hash table Iterator | |
533 | * | |
534 | * This function frees resources allocated by rhashtable_walk_init. | |
535 | */ | |
536 | void rhashtable_walk_exit(struct rhashtable_iter *iter) | |
537 | { | |
538 | mutex_lock(&iter->ht->mutex); | |
eddee5ba HX |
539 | if (iter->walker->tbl) |
540 | list_del(&iter->walker->list); | |
f2dba9c6 HX |
541 | mutex_unlock(&iter->ht->mutex); |
542 | kfree(iter->walker); | |
543 | } | |
544 | EXPORT_SYMBOL_GPL(rhashtable_walk_exit); | |
545 | ||
546 | /** | |
547 | * rhashtable_walk_start - Start a hash table walk | |
548 | * @iter: Hash table iterator | |
549 | * | |
550 | * Start a hash table walk. Note that we take the RCU lock in all | |
551 | * cases including when we return an error. So you must always call | |
552 | * rhashtable_walk_stop to clean up. | |
553 | * | |
554 | * Returns zero if successful. | |
555 | * | |
556 | * Returns -EAGAIN if resize event occured. Note that the iterator | |
557 | * will rewind back to the beginning and you may use it immediately | |
558 | * by calling rhashtable_walk_next. | |
559 | */ | |
560 | int rhashtable_walk_start(struct rhashtable_iter *iter) | |
db4374f4 | 561 | __acquires(RCU) |
f2dba9c6 | 562 | { |
eddee5ba HX |
563 | struct rhashtable *ht = iter->ht; |
564 | ||
565 | mutex_lock(&ht->mutex); | |
566 | ||
567 | if (iter->walker->tbl) | |
568 | list_del(&iter->walker->list); | |
569 | ||
f2dba9c6 HX |
570 | rcu_read_lock(); |
571 | ||
eddee5ba HX |
572 | mutex_unlock(&ht->mutex); |
573 | ||
574 | if (!iter->walker->tbl) { | |
575 | iter->walker->tbl = rht_dereference_rcu(ht->tbl, ht); | |
f2dba9c6 HX |
576 | return -EAGAIN; |
577 | } | |
578 | ||
579 | return 0; | |
580 | } | |
581 | EXPORT_SYMBOL_GPL(rhashtable_walk_start); | |
582 | ||
583 | /** | |
584 | * rhashtable_walk_next - Return the next object and advance the iterator | |
585 | * @iter: Hash table iterator | |
586 | * | |
587 | * Note that you must call rhashtable_walk_stop when you are finished | |
588 | * with the walk. | |
589 | * | |
590 | * Returns the next object or NULL when the end of the table is reached. | |
591 | * | |
592 | * Returns -EAGAIN if resize event occured. Note that the iterator | |
593 | * will rewind back to the beginning and you may continue to use it. | |
594 | */ | |
595 | void *rhashtable_walk_next(struct rhashtable_iter *iter) | |
596 | { | |
eddee5ba | 597 | struct bucket_table *tbl = iter->walker->tbl; |
f2dba9c6 HX |
598 | struct rhashtable *ht = iter->ht; |
599 | struct rhash_head *p = iter->p; | |
f2dba9c6 | 600 | |
f2dba9c6 HX |
601 | if (p) { |
602 | p = rht_dereference_bucket_rcu(p->next, tbl, iter->slot); | |
603 | goto next; | |
604 | } | |
605 | ||
606 | for (; iter->slot < tbl->size; iter->slot++) { | |
607 | int skip = iter->skip; | |
608 | ||
609 | rht_for_each_rcu(p, tbl, iter->slot) { | |
610 | if (!skip) | |
611 | break; | |
612 | skip--; | |
613 | } | |
614 | ||
615 | next: | |
616 | if (!rht_is_a_nulls(p)) { | |
617 | iter->skip++; | |
618 | iter->p = p; | |
c936a79f | 619 | return rht_obj(ht, p); |
f2dba9c6 HX |
620 | } |
621 | ||
622 | iter->skip = 0; | |
623 | } | |
624 | ||
142b942a PS |
625 | iter->p = NULL; |
626 | ||
d88252f9 HX |
627 | /* Ensure we see any new tables. */ |
628 | smp_rmb(); | |
629 | ||
c4db8848 HX |
630 | iter->walker->tbl = rht_dereference_rcu(tbl->future_tbl, ht); |
631 | if (iter->walker->tbl) { | |
f2dba9c6 HX |
632 | iter->slot = 0; |
633 | iter->skip = 0; | |
f2dba9c6 HX |
634 | return ERR_PTR(-EAGAIN); |
635 | } | |
636 | ||
c936a79f | 637 | return NULL; |
f2dba9c6 HX |
638 | } |
639 | EXPORT_SYMBOL_GPL(rhashtable_walk_next); | |
640 | ||
641 | /** | |
642 | * rhashtable_walk_stop - Finish a hash table walk | |
643 | * @iter: Hash table iterator | |
644 | * | |
645 | * Finish a hash table walk. | |
646 | */ | |
647 | void rhashtable_walk_stop(struct rhashtable_iter *iter) | |
db4374f4 | 648 | __releases(RCU) |
f2dba9c6 | 649 | { |
eddee5ba HX |
650 | struct rhashtable *ht; |
651 | struct bucket_table *tbl = iter->walker->tbl; | |
652 | ||
eddee5ba | 653 | if (!tbl) |
963ecbd4 | 654 | goto out; |
eddee5ba HX |
655 | |
656 | ht = iter->ht; | |
657 | ||
ba7c95ea | 658 | spin_lock(&ht->lock); |
c4db8848 | 659 | if (tbl->rehash < tbl->size) |
eddee5ba HX |
660 | list_add(&iter->walker->list, &tbl->walkers); |
661 | else | |
662 | iter->walker->tbl = NULL; | |
ba7c95ea | 663 | spin_unlock(&ht->lock); |
eddee5ba | 664 | |
f2dba9c6 | 665 | iter->p = NULL; |
963ecbd4 HX |
666 | |
667 | out: | |
668 | rcu_read_unlock(); | |
f2dba9c6 HX |
669 | } |
670 | EXPORT_SYMBOL_GPL(rhashtable_walk_stop); | |
671 | ||
488fb86e | 672 | static size_t rounded_hashtable_size(const struct rhashtable_params *params) |
7e1e7763 | 673 | { |
94000176 | 674 | return max(roundup_pow_of_two(params->nelem_hint * 4 / 3), |
e2e21c1c | 675 | (unsigned long)params->min_size); |
7e1e7763 TG |
676 | } |
677 | ||
31ccde2d HX |
678 | static u32 rhashtable_jhash2(const void *key, u32 length, u32 seed) |
679 | { | |
680 | return jhash2(key, length, seed); | |
681 | } | |
682 | ||
7e1e7763 TG |
683 | /** |
684 | * rhashtable_init - initialize a new hash table | |
685 | * @ht: hash table to be initialized | |
686 | * @params: configuration parameters | |
687 | * | |
688 | * Initializes a new hash table based on the provided configuration | |
689 | * parameters. A table can be configured either with a variable or | |
690 | * fixed length key: | |
691 | * | |
692 | * Configuration Example 1: Fixed length keys | |
693 | * struct test_obj { | |
694 | * int key; | |
695 | * void * my_member; | |
696 | * struct rhash_head node; | |
697 | * }; | |
698 | * | |
699 | * struct rhashtable_params params = { | |
700 | * .head_offset = offsetof(struct test_obj, node), | |
701 | * .key_offset = offsetof(struct test_obj, key), | |
702 | * .key_len = sizeof(int), | |
87545899 | 703 | * .hashfn = jhash, |
f89bd6f8 | 704 | * .nulls_base = (1U << RHT_BASE_SHIFT), |
7e1e7763 TG |
705 | * }; |
706 | * | |
707 | * Configuration Example 2: Variable length keys | |
708 | * struct test_obj { | |
709 | * [...] | |
710 | * struct rhash_head node; | |
711 | * }; | |
712 | * | |
49f7b33e | 713 | * u32 my_hash_fn(const void *data, u32 len, u32 seed) |
7e1e7763 TG |
714 | * { |
715 | * struct test_obj *obj = data; | |
716 | * | |
717 | * return [... hash ...]; | |
718 | * } | |
719 | * | |
720 | * struct rhashtable_params params = { | |
721 | * .head_offset = offsetof(struct test_obj, node), | |
87545899 | 722 | * .hashfn = jhash, |
7e1e7763 | 723 | * .obj_hashfn = my_hash_fn, |
7e1e7763 TG |
724 | * }; |
725 | */ | |
488fb86e HX |
726 | int rhashtable_init(struct rhashtable *ht, |
727 | const struct rhashtable_params *params) | |
7e1e7763 TG |
728 | { |
729 | struct bucket_table *tbl; | |
730 | size_t size; | |
731 | ||
732 | size = HASH_DEFAULT_SIZE; | |
733 | ||
31ccde2d | 734 | if ((!params->key_len && !params->obj_hashfn) || |
02fd97c3 | 735 | (params->obj_hashfn && !params->obj_cmpfn)) |
7e1e7763 TG |
736 | return -EINVAL; |
737 | ||
f89bd6f8 TG |
738 | if (params->nulls_base && params->nulls_base < (1U << RHT_BASE_SHIFT)) |
739 | return -EINVAL; | |
740 | ||
97defe1e TG |
741 | memset(ht, 0, sizeof(*ht)); |
742 | mutex_init(&ht->mutex); | |
ba7c95ea | 743 | spin_lock_init(&ht->lock); |
97defe1e TG |
744 | memcpy(&ht->p, params, sizeof(*params)); |
745 | ||
a998f712 TG |
746 | if (params->min_size) |
747 | ht->p.min_size = roundup_pow_of_two(params->min_size); | |
748 | ||
749 | if (params->max_size) | |
750 | ht->p.max_size = rounddown_pow_of_two(params->max_size); | |
751 | ||
07ee0722 HX |
752 | if (params->insecure_max_entries) |
753 | ht->p.insecure_max_entries = | |
754 | rounddown_pow_of_two(params->insecure_max_entries); | |
755 | else | |
756 | ht->p.insecure_max_entries = ht->p.max_size * 2; | |
757 | ||
488fb86e | 758 | ht->p.min_size = max(ht->p.min_size, HASH_MIN_SIZE); |
a998f712 | 759 | |
3a324606 HX |
760 | if (params->nelem_hint) |
761 | size = rounded_hashtable_size(&ht->p); | |
762 | ||
27ed44a5 HX |
763 | /* The maximum (not average) chain length grows with the |
764 | * size of the hash table, at a rate of (log N)/(log log N). | |
765 | * The value of 16 is selected so that even if the hash | |
766 | * table grew to 2^32 you would not expect the maximum | |
767 | * chain length to exceed it unless we are under attack | |
768 | * (or extremely unlucky). | |
769 | * | |
770 | * As this limit is only to detect attacks, we don't need | |
771 | * to set it to a lower value as you'd need the chain | |
772 | * length to vastly exceed 16 to have any real effect | |
773 | * on the system. | |
774 | */ | |
ccd57b1b HX |
775 | if (!params->insecure_elasticity) |
776 | ht->elasticity = 16; | |
777 | ||
97defe1e TG |
778 | if (params->locks_mul) |
779 | ht->p.locks_mul = roundup_pow_of_two(params->locks_mul); | |
780 | else | |
781 | ht->p.locks_mul = BUCKET_LOCKS_PER_CPU; | |
782 | ||
31ccde2d HX |
783 | ht->key_len = ht->p.key_len; |
784 | if (!params->hashfn) { | |
785 | ht->p.hashfn = jhash; | |
786 | ||
787 | if (!(ht->key_len & (sizeof(u32) - 1))) { | |
788 | ht->key_len /= sizeof(u32); | |
789 | ht->p.hashfn = rhashtable_jhash2; | |
790 | } | |
791 | } | |
792 | ||
b9ecfdaa | 793 | tbl = bucket_table_alloc(ht, size, GFP_KERNEL); |
7e1e7763 TG |
794 | if (tbl == NULL) |
795 | return -ENOMEM; | |
796 | ||
545a148e | 797 | atomic_set(&ht->nelems, 0); |
a5b6846f | 798 | |
7e1e7763 TG |
799 | RCU_INIT_POINTER(ht->tbl, tbl); |
800 | ||
4c4b52d9 | 801 | INIT_WORK(&ht->run_work, rht_deferred_worker); |
97defe1e | 802 | |
7e1e7763 TG |
803 | return 0; |
804 | } | |
805 | EXPORT_SYMBOL_GPL(rhashtable_init); | |
806 | ||
807 | /** | |
6b6f302c | 808 | * rhashtable_free_and_destroy - free elements and destroy hash table |
7e1e7763 | 809 | * @ht: the hash table to destroy |
6b6f302c TG |
810 | * @free_fn: callback to release resources of element |
811 | * @arg: pointer passed to free_fn | |
7e1e7763 | 812 | * |
6b6f302c TG |
813 | * Stops an eventual async resize. If defined, invokes free_fn for each |
814 | * element to releasal resources. Please note that RCU protected | |
815 | * readers may still be accessing the elements. Releasing of resources | |
816 | * must occur in a compatible manner. Then frees the bucket array. | |
817 | * | |
818 | * This function will eventually sleep to wait for an async resize | |
819 | * to complete. The caller is responsible that no further write operations | |
820 | * occurs in parallel. | |
7e1e7763 | 821 | */ |
6b6f302c TG |
822 | void rhashtable_free_and_destroy(struct rhashtable *ht, |
823 | void (*free_fn)(void *ptr, void *arg), | |
824 | void *arg) | |
7e1e7763 | 825 | { |
6b6f302c TG |
826 | const struct bucket_table *tbl; |
827 | unsigned int i; | |
97defe1e | 828 | |
4c4b52d9 | 829 | cancel_work_sync(&ht->run_work); |
97defe1e | 830 | |
57699a40 | 831 | mutex_lock(&ht->mutex); |
6b6f302c TG |
832 | tbl = rht_dereference(ht->tbl, ht); |
833 | if (free_fn) { | |
834 | for (i = 0; i < tbl->size; i++) { | |
835 | struct rhash_head *pos, *next; | |
836 | ||
837 | for (pos = rht_dereference(tbl->buckets[i], ht), | |
838 | next = !rht_is_a_nulls(pos) ? | |
839 | rht_dereference(pos->next, ht) : NULL; | |
840 | !rht_is_a_nulls(pos); | |
841 | pos = next, | |
842 | next = !rht_is_a_nulls(pos) ? | |
843 | rht_dereference(pos->next, ht) : NULL) | |
844 | free_fn(rht_obj(ht, pos), arg); | |
845 | } | |
846 | } | |
847 | ||
848 | bucket_table_free(tbl); | |
97defe1e | 849 | mutex_unlock(&ht->mutex); |
7e1e7763 | 850 | } |
6b6f302c TG |
851 | EXPORT_SYMBOL_GPL(rhashtable_free_and_destroy); |
852 | ||
853 | void rhashtable_destroy(struct rhashtable *ht) | |
854 | { | |
855 | return rhashtable_free_and_destroy(ht, NULL, NULL); | |
856 | } | |
7e1e7763 | 857 | EXPORT_SYMBOL_GPL(rhashtable_destroy); |