2 * Resizable, Scalable, Concurrent Hash Table
4 * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au>
5 * Copyright (c) 2014-2015 Thomas Graf <tgraf@suug.ch>
6 * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net>
8 * Code partially derived from nft_hash
9 * Rewritten with rehash code from br_multicast plus single list
10 * pointer as suggested by Josh Triplett
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License version 2 as
14 * published by the Free Software Foundation.
17 #include <linux/atomic.h>
18 #include <linux/kernel.h>
19 #include <linux/init.h>
20 #include <linux/log2.h>
21 #include <linux/sched.h>
22 #include <linux/rculist.h>
23 #include <linux/slab.h>
24 #include <linux/vmalloc.h>
26 #include <linux/jhash.h>
27 #include <linux/random.h>
28 #include <linux/rhashtable.h>
29 #include <linux/err.h>
30 #include <linux/export.h>
31 #include <linux/rhashtable.h>
33 #define HASH_DEFAULT_SIZE 64UL
34 #define HASH_MIN_SIZE 4U
35 #define BUCKET_LOCKS_PER_CPU 32UL
38 union nested_table __rcu
*table
;
39 struct rhash_head __rcu
*bucket
;
42 static u32
head_hashfn(struct rhashtable
*ht
,
43 const struct bucket_table
*tbl
,
44 const struct rhash_head
*he
)
46 return rht_head_hashfn(ht
, tbl
, he
, ht
->p
);
49 #ifdef CONFIG_PROVE_LOCKING
50 #define ASSERT_RHT_MUTEX(HT) BUG_ON(!lockdep_rht_mutex_is_held(HT))
52 int lockdep_rht_mutex_is_held(struct rhashtable
*ht
)
54 return (debug_locks
) ? lockdep_is_held(&ht
->mutex
) : 1;
56 EXPORT_SYMBOL_GPL(lockdep_rht_mutex_is_held
);
58 int lockdep_rht_bucket_is_held(const struct bucket_table
*tbl
, u32 hash
)
60 spinlock_t
*lock
= rht_bucket_lock(tbl
, hash
);
62 return (debug_locks
) ? lockdep_is_held(lock
) : 1;
64 EXPORT_SYMBOL_GPL(lockdep_rht_bucket_is_held
);
66 #define ASSERT_RHT_MUTEX(HT)
69 static void nested_table_free(union nested_table
*ntbl
, unsigned int size
)
71 const unsigned int shift
= PAGE_SHIFT
- ilog2(sizeof(void *));
72 const unsigned int len
= 1 << shift
;
75 ntbl
= rcu_dereference_raw(ntbl
->table
);
81 for (i
= 0; i
< len
; i
++)
82 nested_table_free(ntbl
+ i
, size
);
88 static void nested_bucket_table_free(const struct bucket_table
*tbl
)
90 unsigned int size
= tbl
->size
>> tbl
->nest
;
91 unsigned int len
= 1 << tbl
->nest
;
92 union nested_table
*ntbl
;
95 ntbl
= (union nested_table
*)rcu_dereference_raw(tbl
->buckets
[0]);
97 for (i
= 0; i
< len
; i
++)
98 nested_table_free(ntbl
+ i
, size
);
103 static void bucket_table_free(const struct bucket_table
*tbl
)
106 nested_bucket_table_free(tbl
);
108 free_bucket_spinlocks(tbl
->locks
);
112 static void bucket_table_free_rcu(struct rcu_head
*head
)
114 bucket_table_free(container_of(head
, struct bucket_table
, rcu
));
117 static union nested_table
*nested_table_alloc(struct rhashtable
*ht
,
118 union nested_table __rcu
**prev
,
121 union nested_table
*ntbl
;
124 ntbl
= rcu_dereference(*prev
);
128 ntbl
= kzalloc(PAGE_SIZE
, GFP_ATOMIC
);
131 for (i
= 0; i
< PAGE_SIZE
/ sizeof(ntbl
[0]); i
++)
132 INIT_RHT_NULLS_HEAD(ntbl
[i
].bucket
);
135 rcu_assign_pointer(*prev
, ntbl
);
140 static struct bucket_table
*nested_bucket_table_alloc(struct rhashtable
*ht
,
144 const unsigned int shift
= PAGE_SHIFT
- ilog2(sizeof(void *));
145 struct bucket_table
*tbl
;
148 if (nbuckets
< (1 << (shift
+ 1)))
151 size
= sizeof(*tbl
) + sizeof(tbl
->buckets
[0]);
153 tbl
= kzalloc(size
, gfp
);
157 if (!nested_table_alloc(ht
, (union nested_table __rcu
**)tbl
->buckets
,
163 tbl
->nest
= (ilog2(nbuckets
) - 1) % shift
+ 1;
168 static struct bucket_table
*bucket_table_alloc(struct rhashtable
*ht
,
172 struct bucket_table
*tbl
= NULL
;
173 size_t size
, max_locks
;
176 size
= sizeof(*tbl
) + nbuckets
* sizeof(tbl
->buckets
[0]);
177 tbl
= kvzalloc(size
, gfp
);
181 if (tbl
== NULL
&& (gfp
& ~__GFP_NOFAIL
) != GFP_KERNEL
) {
182 tbl
= nested_bucket_table_alloc(ht
, nbuckets
, gfp
);
191 max_locks
= size
>> 1;
193 max_locks
= min_t(size_t, max_locks
, 1U << tbl
->nest
);
195 if (alloc_bucket_spinlocks(&tbl
->locks
, &tbl
->locks_mask
, max_locks
,
196 ht
->p
.locks_mul
, gfp
) < 0) {
197 bucket_table_free(tbl
);
201 INIT_LIST_HEAD(&tbl
->walkers
);
203 tbl
->hash_rnd
= get_random_u32();
205 for (i
= 0; i
< nbuckets
; i
++)
206 INIT_RHT_NULLS_HEAD(tbl
->buckets
[i
]);
211 static struct bucket_table
*rhashtable_last_table(struct rhashtable
*ht
,
212 struct bucket_table
*tbl
)
214 struct bucket_table
*new_tbl
;
218 tbl
= rht_dereference_rcu(tbl
->future_tbl
, ht
);
224 static int rhashtable_rehash_one(struct rhashtable
*ht
, unsigned int old_hash
)
226 struct bucket_table
*old_tbl
= rht_dereference(ht
->tbl
, ht
);
227 struct bucket_table
*new_tbl
= rhashtable_last_table(ht
, old_tbl
);
228 struct rhash_head __rcu
**pprev
= rht_bucket_var(old_tbl
, old_hash
);
230 struct rhash_head
*head
, *next
, *entry
;
231 spinlock_t
*new_bucket_lock
;
232 unsigned int new_hash
;
239 rht_for_each(entry
, old_tbl
, old_hash
) {
241 next
= rht_dereference_bucket(entry
->next
, old_tbl
, old_hash
);
243 if (rht_is_a_nulls(next
))
246 pprev
= &entry
->next
;
252 new_hash
= head_hashfn(ht
, new_tbl
, entry
);
254 new_bucket_lock
= rht_bucket_lock(new_tbl
, new_hash
);
256 spin_lock_nested(new_bucket_lock
, SINGLE_DEPTH_NESTING
);
257 head
= rht_dereference_bucket(new_tbl
->buckets
[new_hash
],
260 RCU_INIT_POINTER(entry
->next
, head
);
262 rcu_assign_pointer(new_tbl
->buckets
[new_hash
], entry
);
263 spin_unlock(new_bucket_lock
);
265 rcu_assign_pointer(*pprev
, next
);
271 static int rhashtable_rehash_chain(struct rhashtable
*ht
,
272 unsigned int old_hash
)
274 struct bucket_table
*old_tbl
= rht_dereference(ht
->tbl
, ht
);
275 spinlock_t
*old_bucket_lock
;
278 old_bucket_lock
= rht_bucket_lock(old_tbl
, old_hash
);
280 spin_lock_bh(old_bucket_lock
);
281 while (!(err
= rhashtable_rehash_one(ht
, old_hash
)))
284 if (err
== -ENOENT
) {
288 spin_unlock_bh(old_bucket_lock
);
293 static int rhashtable_rehash_attach(struct rhashtable
*ht
,
294 struct bucket_table
*old_tbl
,
295 struct bucket_table
*new_tbl
)
297 /* Make insertions go into the new, empty table right away. Deletions
298 * and lookups will be attempted in both tables until we synchronize.
299 * As cmpxchg() provides strong barriers, we do not need
300 * rcu_assign_pointer().
303 if (cmpxchg(&old_tbl
->future_tbl
, NULL
, new_tbl
) != NULL
)
309 static int rhashtable_rehash_table(struct rhashtable
*ht
)
311 struct bucket_table
*old_tbl
= rht_dereference(ht
->tbl
, ht
);
312 struct bucket_table
*new_tbl
;
313 struct rhashtable_walker
*walker
;
314 unsigned int old_hash
;
317 new_tbl
= rht_dereference(old_tbl
->future_tbl
, ht
);
321 for (old_hash
= 0; old_hash
< old_tbl
->size
; old_hash
++) {
322 err
= rhashtable_rehash_chain(ht
, old_hash
);
328 /* Publish the new table pointer. */
329 rcu_assign_pointer(ht
->tbl
, new_tbl
);
331 spin_lock(&ht
->lock
);
332 list_for_each_entry(walker
, &old_tbl
->walkers
, list
)
334 spin_unlock(&ht
->lock
);
336 /* Wait for readers. All new readers will see the new
337 * table, and thus no references to the old table will
340 call_rcu(&old_tbl
->rcu
, bucket_table_free_rcu
);
342 return rht_dereference(new_tbl
->future_tbl
, ht
) ? -EAGAIN
: 0;
345 static int rhashtable_rehash_alloc(struct rhashtable
*ht
,
346 struct bucket_table
*old_tbl
,
349 struct bucket_table
*new_tbl
;
352 ASSERT_RHT_MUTEX(ht
);
354 new_tbl
= bucket_table_alloc(ht
, size
, GFP_KERNEL
);
358 err
= rhashtable_rehash_attach(ht
, old_tbl
, new_tbl
);
360 bucket_table_free(new_tbl
);
366 * rhashtable_shrink - Shrink hash table while allowing concurrent lookups
367 * @ht: the hash table to shrink
369 * This function shrinks the hash table to fit, i.e., the smallest
370 * size would not cause it to expand right away automatically.
372 * The caller must ensure that no concurrent resizing occurs by holding
375 * The caller must ensure that no concurrent table mutations take place.
376 * It is however valid to have concurrent lookups if they are RCU protected.
378 * It is valid to have concurrent insertions and deletions protected by per
379 * bucket locks or concurrent RCU protected lookups and traversals.
381 static int rhashtable_shrink(struct rhashtable
*ht
)
383 struct bucket_table
*old_tbl
= rht_dereference(ht
->tbl
, ht
);
384 unsigned int nelems
= atomic_read(&ht
->nelems
);
385 unsigned int size
= 0;
388 size
= roundup_pow_of_two(nelems
* 3 / 2);
389 if (size
< ht
->p
.min_size
)
390 size
= ht
->p
.min_size
;
392 if (old_tbl
->size
<= size
)
395 if (rht_dereference(old_tbl
->future_tbl
, ht
))
398 return rhashtable_rehash_alloc(ht
, old_tbl
, size
);
401 static void rht_deferred_worker(struct work_struct
*work
)
403 struct rhashtable
*ht
;
404 struct bucket_table
*tbl
;
407 ht
= container_of(work
, struct rhashtable
, run_work
);
408 mutex_lock(&ht
->mutex
);
410 tbl
= rht_dereference(ht
->tbl
, ht
);
411 tbl
= rhashtable_last_table(ht
, tbl
);
413 if (rht_grow_above_75(ht
, tbl
))
414 err
= rhashtable_rehash_alloc(ht
, tbl
, tbl
->size
* 2);
415 else if (ht
->p
.automatic_shrinking
&& rht_shrink_below_30(ht
, tbl
))
416 err
= rhashtable_shrink(ht
);
418 err
= rhashtable_rehash_alloc(ht
, tbl
, tbl
->size
);
421 err
= rhashtable_rehash_table(ht
);
423 mutex_unlock(&ht
->mutex
);
426 schedule_work(&ht
->run_work
);
429 static int rhashtable_insert_rehash(struct rhashtable
*ht
,
430 struct bucket_table
*tbl
)
432 struct bucket_table
*old_tbl
;
433 struct bucket_table
*new_tbl
;
437 old_tbl
= rht_dereference_rcu(ht
->tbl
, ht
);
443 if (rht_grow_above_75(ht
, tbl
))
445 /* Do not schedule more than one rehash */
446 else if (old_tbl
!= tbl
)
451 new_tbl
= bucket_table_alloc(ht
, size
, GFP_ATOMIC
| __GFP_NOWARN
);
455 err
= rhashtable_rehash_attach(ht
, tbl
, new_tbl
);
457 bucket_table_free(new_tbl
);
461 schedule_work(&ht
->run_work
);
466 /* Do not fail the insert if someone else did a rehash. */
467 if (likely(rcu_access_pointer(tbl
->future_tbl
)))
470 /* Schedule async rehash to retry allocation in process context. */
472 schedule_work(&ht
->run_work
);
477 static void *rhashtable_lookup_one(struct rhashtable
*ht
,
478 struct bucket_table
*tbl
, unsigned int hash
,
479 const void *key
, struct rhash_head
*obj
)
481 struct rhashtable_compare_arg arg
= {
485 struct rhash_head __rcu
**pprev
;
486 struct rhash_head
*head
;
489 elasticity
= RHT_ELASTICITY
;
490 pprev
= rht_bucket_var(tbl
, hash
);
491 rht_for_each_continue(head
, *pprev
, tbl
, hash
) {
492 struct rhlist_head
*list
;
493 struct rhlist_head
*plist
;
498 ht
->p
.obj_cmpfn(&arg
, rht_obj(ht
, head
)) :
499 rhashtable_compare(&arg
, rht_obj(ht
, head
)))) {
505 return rht_obj(ht
, head
);
507 list
= container_of(obj
, struct rhlist_head
, rhead
);
508 plist
= container_of(head
, struct rhlist_head
, rhead
);
510 RCU_INIT_POINTER(list
->next
, plist
);
511 head
= rht_dereference_bucket(head
->next
, tbl
, hash
);
512 RCU_INIT_POINTER(list
->rhead
.next
, head
);
513 rcu_assign_pointer(*pprev
, obj
);
519 return ERR_PTR(-EAGAIN
);
521 return ERR_PTR(-ENOENT
);
524 static struct bucket_table
*rhashtable_insert_one(struct rhashtable
*ht
,
525 struct bucket_table
*tbl
,
527 struct rhash_head
*obj
,
530 struct rhash_head __rcu
**pprev
;
531 struct bucket_table
*new_tbl
;
532 struct rhash_head
*head
;
534 if (!IS_ERR_OR_NULL(data
))
535 return ERR_PTR(-EEXIST
);
537 if (PTR_ERR(data
) != -EAGAIN
&& PTR_ERR(data
) != -ENOENT
)
538 return ERR_CAST(data
);
540 new_tbl
= rht_dereference_rcu(tbl
->future_tbl
, ht
);
544 if (PTR_ERR(data
) != -ENOENT
)
545 return ERR_CAST(data
);
547 if (unlikely(rht_grow_above_max(ht
, tbl
)))
548 return ERR_PTR(-E2BIG
);
550 if (unlikely(rht_grow_above_100(ht
, tbl
)))
551 return ERR_PTR(-EAGAIN
);
553 pprev
= rht_bucket_insert(ht
, tbl
, hash
);
555 return ERR_PTR(-ENOMEM
);
557 head
= rht_dereference_bucket(*pprev
, tbl
, hash
);
559 RCU_INIT_POINTER(obj
->next
, head
);
561 struct rhlist_head
*list
;
563 list
= container_of(obj
, struct rhlist_head
, rhead
);
564 RCU_INIT_POINTER(list
->next
, NULL
);
567 rcu_assign_pointer(*pprev
, obj
);
569 atomic_inc(&ht
->nelems
);
570 if (rht_grow_above_75(ht
, tbl
))
571 schedule_work(&ht
->run_work
);
576 static void *rhashtable_try_insert(struct rhashtable
*ht
, const void *key
,
577 struct rhash_head
*obj
)
579 struct bucket_table
*new_tbl
;
580 struct bucket_table
*tbl
;
585 tbl
= rcu_dereference(ht
->tbl
);
587 /* All insertions must grab the oldest table containing
588 * the hashed bucket that is yet to be rehashed.
591 hash
= rht_head_hashfn(ht
, tbl
, obj
, ht
->p
);
592 lock
= rht_bucket_lock(tbl
, hash
);
595 if (tbl
->rehash
<= hash
)
598 spin_unlock_bh(lock
);
599 tbl
= rht_dereference_rcu(tbl
->future_tbl
, ht
);
602 data
= rhashtable_lookup_one(ht
, tbl
, hash
, key
, obj
);
603 new_tbl
= rhashtable_insert_one(ht
, tbl
, hash
, obj
, data
);
604 if (PTR_ERR(new_tbl
) != -EEXIST
)
605 data
= ERR_CAST(new_tbl
);
607 while (!IS_ERR_OR_NULL(new_tbl
)) {
609 hash
= rht_head_hashfn(ht
, tbl
, obj
, ht
->p
);
610 spin_lock_nested(rht_bucket_lock(tbl
, hash
),
611 SINGLE_DEPTH_NESTING
);
613 data
= rhashtable_lookup_one(ht
, tbl
, hash
, key
, obj
);
614 new_tbl
= rhashtable_insert_one(ht
, tbl
, hash
, obj
, data
);
615 if (PTR_ERR(new_tbl
) != -EEXIST
)
616 data
= ERR_CAST(new_tbl
);
618 spin_unlock(rht_bucket_lock(tbl
, hash
));
621 spin_unlock_bh(lock
);
623 if (PTR_ERR(data
) == -EAGAIN
)
624 data
= ERR_PTR(rhashtable_insert_rehash(ht
, tbl
) ?:
630 void *rhashtable_insert_slow(struct rhashtable
*ht
, const void *key
,
631 struct rhash_head
*obj
)
637 data
= rhashtable_try_insert(ht
, key
, obj
);
639 } while (PTR_ERR(data
) == -EAGAIN
);
643 EXPORT_SYMBOL_GPL(rhashtable_insert_slow
);
646 * rhashtable_walk_enter - Initialise an iterator
647 * @ht: Table to walk over
648 * @iter: Hash table Iterator
650 * This function prepares a hash table walk.
652 * Note that if you restart a walk after rhashtable_walk_stop you
653 * may see the same object twice. Also, you may miss objects if
654 * there are removals in between rhashtable_walk_stop and the next
655 * call to rhashtable_walk_start.
657 * For a completely stable walk you should construct your own data
658 * structure outside the hash table.
660 * This function may be called from any process context, including
661 * non-preemptable context, but cannot be called from softirq or
664 * You must call rhashtable_walk_exit after this function returns.
666 void rhashtable_walk_enter(struct rhashtable
*ht
, struct rhashtable_iter
*iter
)
672 iter
->end_of_table
= 0;
674 spin_lock(&ht
->lock
);
676 rcu_dereference_protected(ht
->tbl
, lockdep_is_held(&ht
->lock
));
677 list_add(&iter
->walker
.list
, &iter
->walker
.tbl
->walkers
);
678 spin_unlock(&ht
->lock
);
680 EXPORT_SYMBOL_GPL(rhashtable_walk_enter
);
683 * rhashtable_walk_exit - Free an iterator
684 * @iter: Hash table Iterator
686 * This function frees resources allocated by rhashtable_walk_init.
688 void rhashtable_walk_exit(struct rhashtable_iter
*iter
)
690 spin_lock(&iter
->ht
->lock
);
691 if (iter
->walker
.tbl
)
692 list_del(&iter
->walker
.list
);
693 spin_unlock(&iter
->ht
->lock
);
695 EXPORT_SYMBOL_GPL(rhashtable_walk_exit
);
698 * rhashtable_walk_start_check - Start a hash table walk
699 * @iter: Hash table iterator
701 * Start a hash table walk at the current iterator position. Note that we take
702 * the RCU lock in all cases including when we return an error. So you must
703 * always call rhashtable_walk_stop to clean up.
705 * Returns zero if successful.
707 * Returns -EAGAIN if resize event occured. Note that the iterator
708 * will rewind back to the beginning and you may use it immediately
709 * by calling rhashtable_walk_next.
711 * rhashtable_walk_start is defined as an inline variant that returns
712 * void. This is preferred in cases where the caller would ignore
713 * resize events and always continue.
715 int rhashtable_walk_start_check(struct rhashtable_iter
*iter
)
718 struct rhashtable
*ht
= iter
->ht
;
719 bool rhlist
= ht
->rhlist
;
723 spin_lock(&ht
->lock
);
724 if (iter
->walker
.tbl
)
725 list_del(&iter
->walker
.list
);
726 spin_unlock(&ht
->lock
);
728 if (iter
->end_of_table
)
730 if (!iter
->walker
.tbl
) {
731 iter
->walker
.tbl
= rht_dereference_rcu(ht
->tbl
, ht
);
737 if (iter
->p
&& !rhlist
) {
739 * We need to validate that 'p' is still in the table, and
740 * if so, update 'skip'
742 struct rhash_head
*p
;
744 rht_for_each_rcu(p
, iter
->walker
.tbl
, iter
->slot
) {
752 } else if (iter
->p
&& rhlist
) {
753 /* Need to validate that 'list' is still in the table, and
754 * if so, update 'skip' and 'p'.
756 struct rhash_head
*p
;
757 struct rhlist_head
*list
;
759 rht_for_each_rcu(p
, iter
->walker
.tbl
, iter
->slot
) {
760 for (list
= container_of(p
, struct rhlist_head
, rhead
);
762 list
= rcu_dereference(list
->next
)) {
764 if (list
== iter
->list
) {
776 EXPORT_SYMBOL_GPL(rhashtable_walk_start_check
);
779 * __rhashtable_walk_find_next - Find the next element in a table (or the first
780 * one in case of a new walk).
782 * @iter: Hash table iterator
784 * Returns the found object or NULL when the end of the table is reached.
786 * Returns -EAGAIN if resize event occurred.
788 static void *__rhashtable_walk_find_next(struct rhashtable_iter
*iter
)
790 struct bucket_table
*tbl
= iter
->walker
.tbl
;
791 struct rhlist_head
*list
= iter
->list
;
792 struct rhashtable
*ht
= iter
->ht
;
793 struct rhash_head
*p
= iter
->p
;
794 bool rhlist
= ht
->rhlist
;
799 for (; iter
->slot
< tbl
->size
; iter
->slot
++) {
800 int skip
= iter
->skip
;
802 rht_for_each_rcu(p
, tbl
, iter
->slot
) {
804 list
= container_of(p
, struct rhlist_head
,
810 list
= rcu_dereference(list
->next
);
821 if (!rht_is_a_nulls(p
)) {
825 return rht_obj(ht
, rhlist
? &list
->rhead
: p
);
833 /* Ensure we see any new tables. */
836 iter
->walker
.tbl
= rht_dereference_rcu(tbl
->future_tbl
, ht
);
837 if (iter
->walker
.tbl
) {
840 return ERR_PTR(-EAGAIN
);
842 iter
->end_of_table
= true;
849 * rhashtable_walk_next - Return the next object and advance the iterator
850 * @iter: Hash table iterator
852 * Note that you must call rhashtable_walk_stop when you are finished
855 * Returns the next object or NULL when the end of the table is reached.
857 * Returns -EAGAIN if resize event occurred. Note that the iterator
858 * will rewind back to the beginning and you may continue to use it.
860 void *rhashtable_walk_next(struct rhashtable_iter
*iter
)
862 struct rhlist_head
*list
= iter
->list
;
863 struct rhashtable
*ht
= iter
->ht
;
864 struct rhash_head
*p
= iter
->p
;
865 bool rhlist
= ht
->rhlist
;
868 if (!rhlist
|| !(list
= rcu_dereference(list
->next
))) {
869 p
= rcu_dereference(p
->next
);
870 list
= container_of(p
, struct rhlist_head
, rhead
);
872 if (!rht_is_a_nulls(p
)) {
876 return rht_obj(ht
, rhlist
? &list
->rhead
: p
);
879 /* At the end of this slot, switch to next one and then find
880 * next entry from that point.
886 return __rhashtable_walk_find_next(iter
);
888 EXPORT_SYMBOL_GPL(rhashtable_walk_next
);
891 * rhashtable_walk_peek - Return the next object but don't advance the iterator
892 * @iter: Hash table iterator
894 * Returns the next object or NULL when the end of the table is reached.
896 * Returns -EAGAIN if resize event occurred. Note that the iterator
897 * will rewind back to the beginning and you may continue to use it.
899 void *rhashtable_walk_peek(struct rhashtable_iter
*iter
)
901 struct rhlist_head
*list
= iter
->list
;
902 struct rhashtable
*ht
= iter
->ht
;
903 struct rhash_head
*p
= iter
->p
;
906 return rht_obj(ht
, ht
->rhlist
? &list
->rhead
: p
);
908 /* No object found in current iter, find next one in the table. */
911 /* A nonzero skip value points to the next entry in the table
912 * beyond that last one that was found. Decrement skip so
913 * we find the current value. __rhashtable_walk_find_next
914 * will restore the original value of skip assuming that
915 * the table hasn't changed.
920 return __rhashtable_walk_find_next(iter
);
922 EXPORT_SYMBOL_GPL(rhashtable_walk_peek
);
925 * rhashtable_walk_stop - Finish a hash table walk
926 * @iter: Hash table iterator
928 * Finish a hash table walk. Does not reset the iterator to the start of the
931 void rhashtable_walk_stop(struct rhashtable_iter
*iter
)
934 struct rhashtable
*ht
;
935 struct bucket_table
*tbl
= iter
->walker
.tbl
;
942 spin_lock(&ht
->lock
);
943 if (tbl
->rehash
< tbl
->size
)
944 list_add(&iter
->walker
.list
, &tbl
->walkers
);
946 iter
->walker
.tbl
= NULL
;
947 spin_unlock(&ht
->lock
);
952 EXPORT_SYMBOL_GPL(rhashtable_walk_stop
);
954 static size_t rounded_hashtable_size(const struct rhashtable_params
*params
)
958 if (params
->nelem_hint
)
959 retsize
= max(roundup_pow_of_two(params
->nelem_hint
* 4 / 3),
960 (unsigned long)params
->min_size
);
962 retsize
= max(HASH_DEFAULT_SIZE
,
963 (unsigned long)params
->min_size
);
968 static u32
rhashtable_jhash2(const void *key
, u32 length
, u32 seed
)
970 return jhash2(key
, length
, seed
);
974 * rhashtable_init - initialize a new hash table
975 * @ht: hash table to be initialized
976 * @params: configuration parameters
978 * Initializes a new hash table based on the provided configuration
979 * parameters. A table can be configured either with a variable or
982 * Configuration Example 1: Fixed length keys
986 * struct rhash_head node;
989 * struct rhashtable_params params = {
990 * .head_offset = offsetof(struct test_obj, node),
991 * .key_offset = offsetof(struct test_obj, key),
992 * .key_len = sizeof(int),
996 * Configuration Example 2: Variable length keys
999 * struct rhash_head node;
1002 * u32 my_hash_fn(const void *data, u32 len, u32 seed)
1004 * struct test_obj *obj = data;
1006 * return [... hash ...];
1009 * struct rhashtable_params params = {
1010 * .head_offset = offsetof(struct test_obj, node),
1012 * .obj_hashfn = my_hash_fn,
1015 int rhashtable_init(struct rhashtable
*ht
,
1016 const struct rhashtable_params
*params
)
1018 struct bucket_table
*tbl
;
1021 if ((!params
->key_len
&& !params
->obj_hashfn
) ||
1022 (params
->obj_hashfn
&& !params
->obj_cmpfn
))
1025 memset(ht
, 0, sizeof(*ht
));
1026 mutex_init(&ht
->mutex
);
1027 spin_lock_init(&ht
->lock
);
1028 memcpy(&ht
->p
, params
, sizeof(*params
));
1030 if (params
->min_size
)
1031 ht
->p
.min_size
= roundup_pow_of_two(params
->min_size
);
1033 /* Cap total entries at 2^31 to avoid nelems overflow. */
1034 ht
->max_elems
= 1u << 31;
1036 if (params
->max_size
) {
1037 ht
->p
.max_size
= rounddown_pow_of_two(params
->max_size
);
1038 if (ht
->p
.max_size
< ht
->max_elems
/ 2)
1039 ht
->max_elems
= ht
->p
.max_size
* 2;
1042 ht
->p
.min_size
= max_t(u16
, ht
->p
.min_size
, HASH_MIN_SIZE
);
1044 size
= rounded_hashtable_size(&ht
->p
);
1046 if (params
->locks_mul
)
1047 ht
->p
.locks_mul
= roundup_pow_of_two(params
->locks_mul
);
1049 ht
->p
.locks_mul
= BUCKET_LOCKS_PER_CPU
;
1051 ht
->key_len
= ht
->p
.key_len
;
1052 if (!params
->hashfn
) {
1053 ht
->p
.hashfn
= jhash
;
1055 if (!(ht
->key_len
& (sizeof(u32
) - 1))) {
1056 ht
->key_len
/= sizeof(u32
);
1057 ht
->p
.hashfn
= rhashtable_jhash2
;
1062 * This is api initialization and thus we need to guarantee the
1063 * initial rhashtable allocation. Upon failure, retry with the
1064 * smallest possible size with __GFP_NOFAIL semantics.
1066 tbl
= bucket_table_alloc(ht
, size
, GFP_KERNEL
);
1067 if (unlikely(tbl
== NULL
)) {
1068 size
= max_t(u16
, ht
->p
.min_size
, HASH_MIN_SIZE
);
1069 tbl
= bucket_table_alloc(ht
, size
, GFP_KERNEL
| __GFP_NOFAIL
);
1072 atomic_set(&ht
->nelems
, 0);
1074 RCU_INIT_POINTER(ht
->tbl
, tbl
);
1076 INIT_WORK(&ht
->run_work
, rht_deferred_worker
);
1080 EXPORT_SYMBOL_GPL(rhashtable_init
);
1083 * rhltable_init - initialize a new hash list table
1084 * @hlt: hash list table to be initialized
1085 * @params: configuration parameters
1087 * Initializes a new hash list table.
1089 * See documentation for rhashtable_init.
1091 int rhltable_init(struct rhltable
*hlt
, const struct rhashtable_params
*params
)
1095 err
= rhashtable_init(&hlt
->ht
, params
);
1096 hlt
->ht
.rhlist
= true;
1099 EXPORT_SYMBOL_GPL(rhltable_init
);
1101 static void rhashtable_free_one(struct rhashtable
*ht
, struct rhash_head
*obj
,
1102 void (*free_fn
)(void *ptr
, void *arg
),
1105 struct rhlist_head
*list
;
1108 free_fn(rht_obj(ht
, obj
), arg
);
1112 list
= container_of(obj
, struct rhlist_head
, rhead
);
1115 list
= rht_dereference(list
->next
, ht
);
1116 free_fn(rht_obj(ht
, obj
), arg
);
1121 * rhashtable_free_and_destroy - free elements and destroy hash table
1122 * @ht: the hash table to destroy
1123 * @free_fn: callback to release resources of element
1124 * @arg: pointer passed to free_fn
1126 * Stops an eventual async resize. If defined, invokes free_fn for each
1127 * element to releasal resources. Please note that RCU protected
1128 * readers may still be accessing the elements. Releasing of resources
1129 * must occur in a compatible manner. Then frees the bucket array.
1131 * This function will eventually sleep to wait for an async resize
1132 * to complete. The caller is responsible that no further write operations
1133 * occurs in parallel.
1135 void rhashtable_free_and_destroy(struct rhashtable
*ht
,
1136 void (*free_fn
)(void *ptr
, void *arg
),
1139 struct bucket_table
*tbl
, *next_tbl
;
1142 cancel_work_sync(&ht
->run_work
);
1144 mutex_lock(&ht
->mutex
);
1145 tbl
= rht_dereference(ht
->tbl
, ht
);
1148 for (i
= 0; i
< tbl
->size
; i
++) {
1149 struct rhash_head
*pos
, *next
;
1152 for (pos
= rht_dereference(*rht_bucket(tbl
, i
), ht
),
1153 next
= !rht_is_a_nulls(pos
) ?
1154 rht_dereference(pos
->next
, ht
) : NULL
;
1155 !rht_is_a_nulls(pos
);
1157 next
= !rht_is_a_nulls(pos
) ?
1158 rht_dereference(pos
->next
, ht
) : NULL
)
1159 rhashtable_free_one(ht
, pos
, free_fn
, arg
);
1163 next_tbl
= rht_dereference(tbl
->future_tbl
, ht
);
1164 bucket_table_free(tbl
);
1169 mutex_unlock(&ht
->mutex
);
1171 EXPORT_SYMBOL_GPL(rhashtable_free_and_destroy
);
1173 void rhashtable_destroy(struct rhashtable
*ht
)
1175 return rhashtable_free_and_destroy(ht
, NULL
, NULL
);
1177 EXPORT_SYMBOL_GPL(rhashtable_destroy
);
1179 struct rhash_head __rcu
**rht_bucket_nested(const struct bucket_table
*tbl
,
1182 const unsigned int shift
= PAGE_SHIFT
- ilog2(sizeof(void *));
1183 static struct rhash_head __rcu
*rhnull
=
1184 (struct rhash_head __rcu
*)NULLS_MARKER(0);
1185 unsigned int index
= hash
& ((1 << tbl
->nest
) - 1);
1186 unsigned int size
= tbl
->size
>> tbl
->nest
;
1187 unsigned int subhash
= hash
;
1188 union nested_table
*ntbl
;
1190 ntbl
= (union nested_table
*)rcu_dereference_raw(tbl
->buckets
[0]);
1191 ntbl
= rht_dereference_bucket_rcu(ntbl
[index
].table
, tbl
, hash
);
1192 subhash
>>= tbl
->nest
;
1194 while (ntbl
&& size
> (1 << shift
)) {
1195 index
= subhash
& ((1 << shift
) - 1);
1196 ntbl
= rht_dereference_bucket_rcu(ntbl
[index
].table
,
1205 return &ntbl
[subhash
].bucket
;
1208 EXPORT_SYMBOL_GPL(rht_bucket_nested
);
1210 struct rhash_head __rcu
**rht_bucket_nested_insert(struct rhashtable
*ht
,
1211 struct bucket_table
*tbl
,
1214 const unsigned int shift
= PAGE_SHIFT
- ilog2(sizeof(void *));
1215 unsigned int index
= hash
& ((1 << tbl
->nest
) - 1);
1216 unsigned int size
= tbl
->size
>> tbl
->nest
;
1217 union nested_table
*ntbl
;
1219 ntbl
= (union nested_table
*)rcu_dereference_raw(tbl
->buckets
[0]);
1221 ntbl
= nested_table_alloc(ht
, &ntbl
[index
].table
,
1222 size
<= (1 << shift
));
1224 while (ntbl
&& size
> (1 << shift
)) {
1225 index
= hash
& ((1 << shift
) - 1);
1228 ntbl
= nested_table_alloc(ht
, &ntbl
[index
].table
,
1229 size
<= (1 << shift
));
1235 return &ntbl
[hash
].bucket
;
1238 EXPORT_SYMBOL_GPL(rht_bucket_nested_insert
);