2 * Resizable, Scalable, Concurrent Hash Table
4 * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au>
5 * Copyright (c) 2014-2015 Thomas Graf <tgraf@suug.ch>
6 * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net>
8 * Code partially derived from nft_hash
9 * Rewritten with rehash code from br_multicast plus single list
10 * pointer as suggested by Josh Triplett
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License version 2 as
14 * published by the Free Software Foundation.
17 #include <linux/kernel.h>
18 #include <linux/init.h>
19 #include <linux/log2.h>
20 #include <linux/sched.h>
21 #include <linux/slab.h>
22 #include <linux/vmalloc.h>
24 #include <linux/jhash.h>
25 #include <linux/random.h>
26 #include <linux/rhashtable.h>
27 #include <linux/err.h>
29 #define HASH_DEFAULT_SIZE 64UL
30 #define HASH_MIN_SIZE 4U
31 #define BUCKET_LOCKS_PER_CPU 128UL
33 static u32
head_hashfn(struct rhashtable
*ht
,
34 const struct bucket_table
*tbl
,
35 const struct rhash_head
*he
)
37 return rht_head_hashfn(ht
, tbl
, he
, ht
->p
);
40 #ifdef CONFIG_PROVE_LOCKING
41 #define ASSERT_RHT_MUTEX(HT) BUG_ON(!lockdep_rht_mutex_is_held(HT))
43 int lockdep_rht_mutex_is_held(struct rhashtable
*ht
)
45 return (debug_locks
) ? lockdep_is_held(&ht
->mutex
) : 1;
47 EXPORT_SYMBOL_GPL(lockdep_rht_mutex_is_held
);
49 int lockdep_rht_bucket_is_held(const struct bucket_table
*tbl
, u32 hash
)
51 spinlock_t
*lock
= rht_bucket_lock(tbl
, hash
);
53 return (debug_locks
) ? lockdep_is_held(lock
) : 1;
55 EXPORT_SYMBOL_GPL(lockdep_rht_bucket_is_held
);
57 #define ASSERT_RHT_MUTEX(HT)
61 static int alloc_bucket_locks(struct rhashtable
*ht
, struct bucket_table
*tbl
,
65 #if defined(CONFIG_PROVE_LOCKING)
66 unsigned int nr_pcpus
= 2;
68 unsigned int nr_pcpus
= num_possible_cpus();
71 nr_pcpus
= min_t(unsigned int, nr_pcpus
, 32UL);
72 size
= roundup_pow_of_two(nr_pcpus
* ht
->p
.locks_mul
);
74 /* Never allocate more than 0.5 locks per bucket */
75 size
= min_t(unsigned int, size
, tbl
->size
>> 1);
77 if (sizeof(spinlock_t
) != 0) {
79 if (size
* sizeof(spinlock_t
) > PAGE_SIZE
&&
81 tbl
->locks
= vmalloc(size
* sizeof(spinlock_t
));
84 tbl
->locks
= kmalloc_array(size
, sizeof(spinlock_t
),
88 for (i
= 0; i
< size
; i
++)
89 spin_lock_init(&tbl
->locks
[i
]);
91 tbl
->locks_mask
= size
- 1;
96 static void bucket_table_free(const struct bucket_table
*tbl
)
104 static void bucket_table_free_rcu(struct rcu_head
*head
)
106 bucket_table_free(container_of(head
, struct bucket_table
, rcu
));
109 static struct bucket_table
*bucket_table_alloc(struct rhashtable
*ht
,
113 struct bucket_table
*tbl
= NULL
;
117 size
= sizeof(*tbl
) + nbuckets
* sizeof(tbl
->buckets
[0]);
118 if (size
<= (PAGE_SIZE
<< PAGE_ALLOC_COSTLY_ORDER
) ||
120 tbl
= kzalloc(size
, gfp
| __GFP_NOWARN
| __GFP_NORETRY
);
121 if (tbl
== NULL
&& gfp
== GFP_KERNEL
)
126 tbl
->size
= nbuckets
;
128 if (alloc_bucket_locks(ht
, tbl
, gfp
) < 0) {
129 bucket_table_free(tbl
);
133 INIT_LIST_HEAD(&tbl
->walkers
);
135 get_random_bytes(&tbl
->hash_rnd
, sizeof(tbl
->hash_rnd
));
137 for (i
= 0; i
< nbuckets
; i
++)
138 INIT_RHT_NULLS_HEAD(tbl
->buckets
[i
], ht
, i
);
143 static struct bucket_table
*rhashtable_last_table(struct rhashtable
*ht
,
144 struct bucket_table
*tbl
)
146 struct bucket_table
*new_tbl
;
150 tbl
= rht_dereference_rcu(tbl
->future_tbl
, ht
);
156 static int rhashtable_rehash_one(struct rhashtable
*ht
, unsigned old_hash
)
158 struct bucket_table
*old_tbl
= rht_dereference(ht
->tbl
, ht
);
159 struct bucket_table
*new_tbl
= rhashtable_last_table(ht
,
160 rht_dereference_rcu(old_tbl
->future_tbl
, ht
));
161 struct rhash_head __rcu
**pprev
= &old_tbl
->buckets
[old_hash
];
163 struct rhash_head
*head
, *next
, *entry
;
164 spinlock_t
*new_bucket_lock
;
167 rht_for_each(entry
, old_tbl
, old_hash
) {
169 next
= rht_dereference_bucket(entry
->next
, old_tbl
, old_hash
);
171 if (rht_is_a_nulls(next
))
174 pprev
= &entry
->next
;
180 new_hash
= head_hashfn(ht
, new_tbl
, entry
);
182 new_bucket_lock
= rht_bucket_lock(new_tbl
, new_hash
);
184 spin_lock_nested(new_bucket_lock
, SINGLE_DEPTH_NESTING
);
185 head
= rht_dereference_bucket(new_tbl
->buckets
[new_hash
],
188 if (rht_is_a_nulls(head
))
189 INIT_RHT_NULLS_HEAD(entry
->next
, ht
, new_hash
);
191 RCU_INIT_POINTER(entry
->next
, head
);
193 rcu_assign_pointer(new_tbl
->buckets
[new_hash
], entry
);
194 spin_unlock(new_bucket_lock
);
196 rcu_assign_pointer(*pprev
, next
);
202 static void rhashtable_rehash_chain(struct rhashtable
*ht
, unsigned old_hash
)
204 struct bucket_table
*old_tbl
= rht_dereference(ht
->tbl
, ht
);
205 spinlock_t
*old_bucket_lock
;
207 old_bucket_lock
= rht_bucket_lock(old_tbl
, old_hash
);
209 spin_lock_bh(old_bucket_lock
);
210 while (!rhashtable_rehash_one(ht
, old_hash
))
213 spin_unlock_bh(old_bucket_lock
);
216 static int rhashtable_rehash_attach(struct rhashtable
*ht
,
217 struct bucket_table
*old_tbl
,
218 struct bucket_table
*new_tbl
)
220 /* Protect future_tbl using the first bucket lock. */
221 spin_lock_bh(old_tbl
->locks
);
223 /* Did somebody beat us to it? */
224 if (rcu_access_pointer(old_tbl
->future_tbl
)) {
225 spin_unlock_bh(old_tbl
->locks
);
229 /* Make insertions go into the new, empty table right away. Deletions
230 * and lookups will be attempted in both tables until we synchronize.
232 rcu_assign_pointer(old_tbl
->future_tbl
, new_tbl
);
234 /* Ensure the new table is visible to readers. */
237 spin_unlock_bh(old_tbl
->locks
);
242 static int rhashtable_rehash_table(struct rhashtable
*ht
)
244 struct bucket_table
*old_tbl
= rht_dereference(ht
->tbl
, ht
);
245 struct bucket_table
*new_tbl
;
246 struct rhashtable_walker
*walker
;
249 new_tbl
= rht_dereference(old_tbl
->future_tbl
, ht
);
253 for (old_hash
= 0; old_hash
< old_tbl
->size
; old_hash
++)
254 rhashtable_rehash_chain(ht
, old_hash
);
256 /* Publish the new table pointer. */
257 rcu_assign_pointer(ht
->tbl
, new_tbl
);
259 list_for_each_entry(walker
, &old_tbl
->walkers
, list
)
262 /* Wait for readers. All new readers will see the new
263 * table, and thus no references to the old table will
266 call_rcu(&old_tbl
->rcu
, bucket_table_free_rcu
);
268 return rht_dereference(new_tbl
->future_tbl
, ht
) ? -EAGAIN
: 0;
272 * rhashtable_expand - Expand hash table while allowing concurrent lookups
273 * @ht: the hash table to expand
275 * A secondary bucket array is allocated and the hash entries are migrated.
277 * This function may only be called in a context where it is safe to call
278 * synchronize_rcu(), e.g. not within a rcu_read_lock() section.
280 * The caller must ensure that no concurrent resizing occurs by holding
283 * It is valid to have concurrent insertions and deletions protected by per
284 * bucket locks or concurrent RCU protected lookups and traversals.
286 static int rhashtable_expand(struct rhashtable
*ht
)
288 struct bucket_table
*new_tbl
, *old_tbl
= rht_dereference(ht
->tbl
, ht
);
291 ASSERT_RHT_MUTEX(ht
);
293 old_tbl
= rhashtable_last_table(ht
, old_tbl
);
295 new_tbl
= bucket_table_alloc(ht
, old_tbl
->size
* 2, GFP_KERNEL
);
299 err
= rhashtable_rehash_attach(ht
, old_tbl
, new_tbl
);
301 bucket_table_free(new_tbl
);
307 * rhashtable_shrink - Shrink hash table while allowing concurrent lookups
308 * @ht: the hash table to shrink
310 * This function shrinks the hash table to fit, i.e., the smallest
311 * size would not cause it to expand right away automatically.
313 * The caller must ensure that no concurrent resizing occurs by holding
316 * The caller must ensure that no concurrent table mutations take place.
317 * It is however valid to have concurrent lookups if they are RCU protected.
319 * It is valid to have concurrent insertions and deletions protected by per
320 * bucket locks or concurrent RCU protected lookups and traversals.
322 static int rhashtable_shrink(struct rhashtable
*ht
)
324 struct bucket_table
*new_tbl
, *old_tbl
= rht_dereference(ht
->tbl
, ht
);
325 unsigned size
= roundup_pow_of_two(atomic_read(&ht
->nelems
) * 3 / 2);
328 ASSERT_RHT_MUTEX(ht
);
330 if (size
< ht
->p
.min_size
)
331 size
= ht
->p
.min_size
;
333 if (old_tbl
->size
<= size
)
336 if (rht_dereference(old_tbl
->future_tbl
, ht
))
339 new_tbl
= bucket_table_alloc(ht
, size
, GFP_KERNEL
);
343 err
= rhashtable_rehash_attach(ht
, old_tbl
, new_tbl
);
345 bucket_table_free(new_tbl
);
350 static void rht_deferred_worker(struct work_struct
*work
)
352 struct rhashtable
*ht
;
353 struct bucket_table
*tbl
;
356 ht
= container_of(work
, struct rhashtable
, run_work
);
357 mutex_lock(&ht
->mutex
);
358 if (ht
->being_destroyed
)
361 tbl
= rht_dereference(ht
->tbl
, ht
);
362 tbl
= rhashtable_last_table(ht
, tbl
);
364 if (rht_grow_above_75(ht
, tbl
))
365 rhashtable_expand(ht
);
366 else if (rht_shrink_below_30(ht
, tbl
))
367 rhashtable_shrink(ht
);
369 err
= rhashtable_rehash_table(ht
);
372 mutex_unlock(&ht
->mutex
);
375 schedule_work(&ht
->run_work
);
378 int rhashtable_insert_slow(struct rhashtable
*ht
, const void *key
,
379 struct rhash_head
*obj
,
380 struct bucket_table
*tbl
)
382 struct rhash_head
*head
;
386 tbl
= rhashtable_last_table(ht
, tbl
);
387 hash
= head_hashfn(ht
, tbl
, obj
);
388 spin_lock_nested(rht_bucket_lock(tbl
, hash
), SINGLE_DEPTH_NESTING
);
390 if (key
&& rhashtable_lookup_fast(ht
, key
, ht
->p
))
395 head
= rht_dereference_bucket(tbl
->buckets
[hash
], tbl
, hash
);
397 RCU_INIT_POINTER(obj
->next
, head
);
399 rcu_assign_pointer(tbl
->buckets
[hash
], obj
);
401 atomic_inc(&ht
->nelems
);
404 spin_unlock(rht_bucket_lock(tbl
, hash
));
408 EXPORT_SYMBOL_GPL(rhashtable_insert_slow
);
411 * rhashtable_walk_init - Initialise an iterator
412 * @ht: Table to walk over
413 * @iter: Hash table Iterator
415 * This function prepares a hash table walk.
417 * Note that if you restart a walk after rhashtable_walk_stop you
418 * may see the same object twice. Also, you may miss objects if
419 * there are removals in between rhashtable_walk_stop and the next
420 * call to rhashtable_walk_start.
422 * For a completely stable walk you should construct your own data
423 * structure outside the hash table.
425 * This function may sleep so you must not call it from interrupt
426 * context or with spin locks held.
428 * You must call rhashtable_walk_exit if this function returns
431 int rhashtable_walk_init(struct rhashtable
*ht
, struct rhashtable_iter
*iter
)
438 iter
->walker
= kmalloc(sizeof(*iter
->walker
), GFP_KERNEL
);
442 mutex_lock(&ht
->mutex
);
443 iter
->walker
->tbl
= rht_dereference(ht
->tbl
, ht
);
444 list_add(&iter
->walker
->list
, &iter
->walker
->tbl
->walkers
);
445 mutex_unlock(&ht
->mutex
);
449 EXPORT_SYMBOL_GPL(rhashtable_walk_init
);
452 * rhashtable_walk_exit - Free an iterator
453 * @iter: Hash table Iterator
455 * This function frees resources allocated by rhashtable_walk_init.
457 void rhashtable_walk_exit(struct rhashtable_iter
*iter
)
459 mutex_lock(&iter
->ht
->mutex
);
460 if (iter
->walker
->tbl
)
461 list_del(&iter
->walker
->list
);
462 mutex_unlock(&iter
->ht
->mutex
);
465 EXPORT_SYMBOL_GPL(rhashtable_walk_exit
);
468 * rhashtable_walk_start - Start a hash table walk
469 * @iter: Hash table iterator
471 * Start a hash table walk. Note that we take the RCU lock in all
472 * cases including when we return an error. So you must always call
473 * rhashtable_walk_stop to clean up.
475 * Returns zero if successful.
477 * Returns -EAGAIN if resize event occured. Note that the iterator
478 * will rewind back to the beginning and you may use it immediately
479 * by calling rhashtable_walk_next.
481 int rhashtable_walk_start(struct rhashtable_iter
*iter
)
484 struct rhashtable
*ht
= iter
->ht
;
486 mutex_lock(&ht
->mutex
);
488 if (iter
->walker
->tbl
)
489 list_del(&iter
->walker
->list
);
493 mutex_unlock(&ht
->mutex
);
495 if (!iter
->walker
->tbl
) {
496 iter
->walker
->tbl
= rht_dereference_rcu(ht
->tbl
, ht
);
502 EXPORT_SYMBOL_GPL(rhashtable_walk_start
);
505 * rhashtable_walk_next - Return the next object and advance the iterator
506 * @iter: Hash table iterator
508 * Note that you must call rhashtable_walk_stop when you are finished
511 * Returns the next object or NULL when the end of the table is reached.
513 * Returns -EAGAIN if resize event occured. Note that the iterator
514 * will rewind back to the beginning and you may continue to use it.
516 void *rhashtable_walk_next(struct rhashtable_iter
*iter
)
518 struct bucket_table
*tbl
= iter
->walker
->tbl
;
519 struct rhashtable
*ht
= iter
->ht
;
520 struct rhash_head
*p
= iter
->p
;
524 p
= rht_dereference_bucket_rcu(p
->next
, tbl
, iter
->slot
);
528 for (; iter
->slot
< tbl
->size
; iter
->slot
++) {
529 int skip
= iter
->skip
;
531 rht_for_each_rcu(p
, tbl
, iter
->slot
) {
538 if (!rht_is_a_nulls(p
)) {
541 obj
= rht_obj(ht
, p
);
548 /* Ensure we see any new tables. */
551 iter
->walker
->tbl
= rht_dereference_rcu(tbl
->future_tbl
, ht
);
552 if (iter
->walker
->tbl
) {
555 return ERR_PTR(-EAGAIN
);
564 EXPORT_SYMBOL_GPL(rhashtable_walk_next
);
567 * rhashtable_walk_stop - Finish a hash table walk
568 * @iter: Hash table iterator
570 * Finish a hash table walk.
572 void rhashtable_walk_stop(struct rhashtable_iter
*iter
)
575 struct rhashtable
*ht
;
576 struct bucket_table
*tbl
= iter
->walker
->tbl
;
583 mutex_lock(&ht
->mutex
);
584 if (tbl
->rehash
< tbl
->size
)
585 list_add(&iter
->walker
->list
, &tbl
->walkers
);
587 iter
->walker
->tbl
= NULL
;
588 mutex_unlock(&ht
->mutex
);
595 EXPORT_SYMBOL_GPL(rhashtable_walk_stop
);
597 static size_t rounded_hashtable_size(const struct rhashtable_params
*params
)
599 return max(roundup_pow_of_two(params
->nelem_hint
* 4 / 3),
600 (unsigned long)params
->min_size
);
603 static u32
rhashtable_jhash2(const void *key
, u32 length
, u32 seed
)
605 return jhash2(key
, length
, seed
);
609 * rhashtable_init - initialize a new hash table
610 * @ht: hash table to be initialized
611 * @params: configuration parameters
613 * Initializes a new hash table based on the provided configuration
614 * parameters. A table can be configured either with a variable or
617 * Configuration Example 1: Fixed length keys
621 * struct rhash_head node;
624 * struct rhashtable_params params = {
625 * .head_offset = offsetof(struct test_obj, node),
626 * .key_offset = offsetof(struct test_obj, key),
627 * .key_len = sizeof(int),
629 * .nulls_base = (1U << RHT_BASE_SHIFT),
632 * Configuration Example 2: Variable length keys
635 * struct rhash_head node;
638 * u32 my_hash_fn(const void *data, u32 seed)
640 * struct test_obj *obj = data;
642 * return [... hash ...];
645 * struct rhashtable_params params = {
646 * .head_offset = offsetof(struct test_obj, node),
648 * .obj_hashfn = my_hash_fn,
651 int rhashtable_init(struct rhashtable
*ht
,
652 const struct rhashtable_params
*params
)
654 struct bucket_table
*tbl
;
657 size
= HASH_DEFAULT_SIZE
;
659 if ((!params
->key_len
&& !params
->obj_hashfn
) ||
660 (params
->obj_hashfn
&& !params
->obj_cmpfn
))
663 if (params
->nulls_base
&& params
->nulls_base
< (1U << RHT_BASE_SHIFT
))
666 if (params
->nelem_hint
)
667 size
= rounded_hashtable_size(params
);
669 memset(ht
, 0, sizeof(*ht
));
670 mutex_init(&ht
->mutex
);
671 memcpy(&ht
->p
, params
, sizeof(*params
));
673 if (params
->min_size
)
674 ht
->p
.min_size
= roundup_pow_of_two(params
->min_size
);
676 if (params
->max_size
)
677 ht
->p
.max_size
= rounddown_pow_of_two(params
->max_size
);
679 ht
->p
.min_size
= max(ht
->p
.min_size
, HASH_MIN_SIZE
);
681 if (params
->locks_mul
)
682 ht
->p
.locks_mul
= roundup_pow_of_two(params
->locks_mul
);
684 ht
->p
.locks_mul
= BUCKET_LOCKS_PER_CPU
;
686 ht
->key_len
= ht
->p
.key_len
;
687 if (!params
->hashfn
) {
688 ht
->p
.hashfn
= jhash
;
690 if (!(ht
->key_len
& (sizeof(u32
) - 1))) {
691 ht
->key_len
/= sizeof(u32
);
692 ht
->p
.hashfn
= rhashtable_jhash2
;
696 tbl
= bucket_table_alloc(ht
, size
, GFP_KERNEL
);
700 atomic_set(&ht
->nelems
, 0);
702 RCU_INIT_POINTER(ht
->tbl
, tbl
);
704 INIT_WORK(&ht
->run_work
, rht_deferred_worker
);
708 EXPORT_SYMBOL_GPL(rhashtable_init
);
711 * rhashtable_destroy - destroy hash table
712 * @ht: the hash table to destroy
714 * Frees the bucket array. This function is not rcu safe, therefore the caller
715 * has to make sure that no resizing may happen by unpublishing the hashtable
716 * and waiting for the quiescent cycle before releasing the bucket array.
718 void rhashtable_destroy(struct rhashtable
*ht
)
720 ht
->being_destroyed
= true;
722 cancel_work_sync(&ht
->run_work
);
724 mutex_lock(&ht
->mutex
);
725 bucket_table_free(rht_dereference(ht
->tbl
, ht
));
726 mutex_unlock(&ht
->mutex
);
728 EXPORT_SYMBOL_GPL(rhashtable_destroy
);