2 * Resizable, Scalable, Concurrent Hash Table
4 * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au>
5 * Copyright (c) 2014-2015 Thomas Graf <tgraf@suug.ch>
6 * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net>
8 * Code partially derived from nft_hash
9 * Rewritten with rehash code from br_multicast plus single list
10 * pointer as suggested by Josh Triplett
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License version 2 as
14 * published by the Free Software Foundation.
17 #include <linux/atomic.h>
18 #include <linux/kernel.h>
19 #include <linux/init.h>
20 #include <linux/log2.h>
21 #include <linux/sched.h>
22 #include <linux/rculist.h>
23 #include <linux/slab.h>
24 #include <linux/vmalloc.h>
26 #include <linux/jhash.h>
27 #include <linux/random.h>
28 #include <linux/rhashtable.h>
29 #include <linux/err.h>
30 #include <linux/export.h>
32 #define HASH_DEFAULT_SIZE 64UL
33 #define HASH_MIN_SIZE 4U
36 union nested_table __rcu
*table
;
37 struct rhash_lock_head __rcu
*bucket
;
40 static u32
head_hashfn(struct rhashtable
*ht
,
41 const struct bucket_table
*tbl
,
42 const struct rhash_head
*he
)
44 return rht_head_hashfn(ht
, tbl
, he
, ht
->p
);
47 #ifdef CONFIG_PROVE_LOCKING
48 #define ASSERT_RHT_MUTEX(HT) BUG_ON(!lockdep_rht_mutex_is_held(HT))
50 int lockdep_rht_mutex_is_held(struct rhashtable
*ht
)
52 return (debug_locks
) ? lockdep_is_held(&ht
->mutex
) : 1;
54 EXPORT_SYMBOL_GPL(lockdep_rht_mutex_is_held
);
56 int lockdep_rht_bucket_is_held(const struct bucket_table
*tbl
, u32 hash
)
60 if (unlikely(tbl
->nest
))
62 return bit_spin_is_locked(0, (unsigned long *)&tbl
->buckets
[hash
]);
64 EXPORT_SYMBOL_GPL(lockdep_rht_bucket_is_held
);
66 #define ASSERT_RHT_MUTEX(HT)
69 static void nested_table_free(union nested_table
*ntbl
, unsigned int size
)
71 const unsigned int shift
= PAGE_SHIFT
- ilog2(sizeof(void *));
72 const unsigned int len
= 1 << shift
;
75 ntbl
= rcu_dereference_raw(ntbl
->table
);
81 for (i
= 0; i
< len
; i
++)
82 nested_table_free(ntbl
+ i
, size
);
88 static void nested_bucket_table_free(const struct bucket_table
*tbl
)
90 unsigned int size
= tbl
->size
>> tbl
->nest
;
91 unsigned int len
= 1 << tbl
->nest
;
92 union nested_table
*ntbl
;
95 ntbl
= (union nested_table
*)rcu_dereference_raw(tbl
->buckets
[0]);
97 for (i
= 0; i
< len
; i
++)
98 nested_table_free(ntbl
+ i
, size
);
103 static void bucket_table_free(const struct bucket_table
*tbl
)
106 nested_bucket_table_free(tbl
);
111 static void bucket_table_free_rcu(struct rcu_head
*head
)
113 bucket_table_free(container_of(head
, struct bucket_table
, rcu
));
116 static union nested_table
*nested_table_alloc(struct rhashtable
*ht
,
117 union nested_table __rcu
**prev
,
120 union nested_table
*ntbl
;
123 ntbl
= rcu_dereference(*prev
);
127 ntbl
= kzalloc(PAGE_SIZE
, GFP_ATOMIC
);
130 for (i
= 0; i
< PAGE_SIZE
/ sizeof(ntbl
[0]); i
++)
131 INIT_RHT_NULLS_HEAD(ntbl
[i
].bucket
);
134 if (cmpxchg(prev
, NULL
, ntbl
) == NULL
)
136 /* Raced with another thread. */
138 return rcu_dereference(*prev
);
141 static struct bucket_table
*nested_bucket_table_alloc(struct rhashtable
*ht
,
145 const unsigned int shift
= PAGE_SHIFT
- ilog2(sizeof(void *));
146 struct bucket_table
*tbl
;
149 if (nbuckets
< (1 << (shift
+ 1)))
152 size
= sizeof(*tbl
) + sizeof(tbl
->buckets
[0]);
154 tbl
= kzalloc(size
, gfp
);
158 if (!nested_table_alloc(ht
, (union nested_table __rcu
**)tbl
->buckets
,
164 tbl
->nest
= (ilog2(nbuckets
) - 1) % shift
+ 1;
169 static struct bucket_table
*bucket_table_alloc(struct rhashtable
*ht
,
173 struct bucket_table
*tbl
= NULL
;
176 static struct lock_class_key __key
;
178 tbl
= kvzalloc(struct_size(tbl
, buckets
, nbuckets
), gfp
);
182 if (tbl
== NULL
&& (gfp
& ~__GFP_NOFAIL
) != GFP_KERNEL
) {
183 tbl
= nested_bucket_table_alloc(ht
, nbuckets
, gfp
);
190 lockdep_init_map(&tbl
->dep_map
, "rhashtable_bucket", &__key
, 0);
194 rcu_head_init(&tbl
->rcu
);
195 INIT_LIST_HEAD(&tbl
->walkers
);
197 tbl
->hash_rnd
= get_random_u32();
199 for (i
= 0; i
< nbuckets
; i
++)
200 INIT_RHT_NULLS_HEAD(tbl
->buckets
[i
]);
205 static struct bucket_table
*rhashtable_last_table(struct rhashtable
*ht
,
206 struct bucket_table
*tbl
)
208 struct bucket_table
*new_tbl
;
212 tbl
= rht_dereference_rcu(tbl
->future_tbl
, ht
);
218 static int rhashtable_rehash_one(struct rhashtable
*ht
,
219 struct rhash_lock_head __rcu
**bkt
,
220 unsigned int old_hash
)
222 struct bucket_table
*old_tbl
= rht_dereference(ht
->tbl
, ht
);
223 struct bucket_table
*new_tbl
= rhashtable_last_table(ht
, old_tbl
);
225 struct rhash_head
*head
, *next
, *entry
;
226 struct rhash_head __rcu
**pprev
= NULL
;
227 unsigned int new_hash
;
234 rht_for_each_from(entry
, rht_ptr(bkt
, old_tbl
, old_hash
),
237 next
= rht_dereference_bucket(entry
->next
, old_tbl
, old_hash
);
239 if (rht_is_a_nulls(next
))
242 pprev
= &entry
->next
;
248 new_hash
= head_hashfn(ht
, new_tbl
, entry
);
250 rht_lock_nested(new_tbl
, &new_tbl
->buckets
[new_hash
], SINGLE_DEPTH_NESTING
);
252 head
= rht_ptr(new_tbl
->buckets
+ new_hash
, new_tbl
, new_hash
);
254 RCU_INIT_POINTER(entry
->next
, head
);
256 rht_assign_unlock(new_tbl
, &new_tbl
->buckets
[new_hash
], entry
);
259 rcu_assign_pointer(*pprev
, next
);
261 /* Need to preserved the bit lock. */
262 rht_assign_locked(bkt
, next
);
268 static int rhashtable_rehash_chain(struct rhashtable
*ht
,
269 unsigned int old_hash
)
271 struct bucket_table
*old_tbl
= rht_dereference(ht
->tbl
, ht
);
272 struct rhash_lock_head __rcu
**bkt
= rht_bucket_var(old_tbl
, old_hash
);
277 rht_lock(old_tbl
, bkt
);
279 while (!(err
= rhashtable_rehash_one(ht
, bkt
, old_hash
)))
284 rht_unlock(old_tbl
, bkt
);
289 static int rhashtable_rehash_attach(struct rhashtable
*ht
,
290 struct bucket_table
*old_tbl
,
291 struct bucket_table
*new_tbl
)
293 /* Make insertions go into the new, empty table right away. Deletions
294 * and lookups will be attempted in both tables until we synchronize.
295 * As cmpxchg() provides strong barriers, we do not need
296 * rcu_assign_pointer().
299 if (cmpxchg(&old_tbl
->future_tbl
, NULL
, new_tbl
) != NULL
)
305 static int rhashtable_rehash_table(struct rhashtable
*ht
)
307 struct bucket_table
*old_tbl
= rht_dereference(ht
->tbl
, ht
);
308 struct bucket_table
*new_tbl
;
309 struct rhashtable_walker
*walker
;
310 unsigned int old_hash
;
313 new_tbl
= rht_dereference(old_tbl
->future_tbl
, ht
);
317 for (old_hash
= 0; old_hash
< old_tbl
->size
; old_hash
++) {
318 err
= rhashtable_rehash_chain(ht
, old_hash
);
324 /* Publish the new table pointer. */
325 rcu_assign_pointer(ht
->tbl
, new_tbl
);
327 spin_lock(&ht
->lock
);
328 list_for_each_entry(walker
, &old_tbl
->walkers
, list
)
331 /* Wait for readers. All new readers will see the new
332 * table, and thus no references to the old table will
334 * We do this inside the locked region so that
335 * rhashtable_walk_stop() can use rcu_head_after_call_rcu()
336 * to check if it should not re-link the table.
338 call_rcu(&old_tbl
->rcu
, bucket_table_free_rcu
);
339 spin_unlock(&ht
->lock
);
341 return rht_dereference(new_tbl
->future_tbl
, ht
) ? -EAGAIN
: 0;
344 static int rhashtable_rehash_alloc(struct rhashtable
*ht
,
345 struct bucket_table
*old_tbl
,
348 struct bucket_table
*new_tbl
;
351 ASSERT_RHT_MUTEX(ht
);
353 new_tbl
= bucket_table_alloc(ht
, size
, GFP_KERNEL
);
357 err
= rhashtable_rehash_attach(ht
, old_tbl
, new_tbl
);
359 bucket_table_free(new_tbl
);
365 * rhashtable_shrink - Shrink hash table while allowing concurrent lookups
366 * @ht: the hash table to shrink
368 * This function shrinks the hash table to fit, i.e., the smallest
369 * size would not cause it to expand right away automatically.
371 * The caller must ensure that no concurrent resizing occurs by holding
374 * The caller must ensure that no concurrent table mutations take place.
375 * It is however valid to have concurrent lookups if they are RCU protected.
377 * It is valid to have concurrent insertions and deletions protected by per
378 * bucket locks or concurrent RCU protected lookups and traversals.
380 static int rhashtable_shrink(struct rhashtable
*ht
)
382 struct bucket_table
*old_tbl
= rht_dereference(ht
->tbl
, ht
);
383 unsigned int nelems
= atomic_read(&ht
->nelems
);
384 unsigned int size
= 0;
387 size
= roundup_pow_of_two(nelems
* 3 / 2);
388 if (size
< ht
->p
.min_size
)
389 size
= ht
->p
.min_size
;
391 if (old_tbl
->size
<= size
)
394 if (rht_dereference(old_tbl
->future_tbl
, ht
))
397 return rhashtable_rehash_alloc(ht
, old_tbl
, size
);
400 static void rht_deferred_worker(struct work_struct
*work
)
402 struct rhashtable
*ht
;
403 struct bucket_table
*tbl
;
406 ht
= container_of(work
, struct rhashtable
, run_work
);
407 mutex_lock(&ht
->mutex
);
409 tbl
= rht_dereference(ht
->tbl
, ht
);
410 tbl
= rhashtable_last_table(ht
, tbl
);
412 if (rht_grow_above_75(ht
, tbl
))
413 err
= rhashtable_rehash_alloc(ht
, tbl
, tbl
->size
* 2);
414 else if (ht
->p
.automatic_shrinking
&& rht_shrink_below_30(ht
, tbl
))
415 err
= rhashtable_shrink(ht
);
417 err
= rhashtable_rehash_alloc(ht
, tbl
, tbl
->size
);
419 if (!err
|| err
== -EEXIST
) {
422 nerr
= rhashtable_rehash_table(ht
);
426 mutex_unlock(&ht
->mutex
);
429 schedule_work(&ht
->run_work
);
432 static int rhashtable_insert_rehash(struct rhashtable
*ht
,
433 struct bucket_table
*tbl
)
435 struct bucket_table
*old_tbl
;
436 struct bucket_table
*new_tbl
;
440 old_tbl
= rht_dereference_rcu(ht
->tbl
, ht
);
446 if (rht_grow_above_75(ht
, tbl
))
448 /* Do not schedule more than one rehash */
449 else if (old_tbl
!= tbl
)
454 new_tbl
= bucket_table_alloc(ht
, size
, GFP_ATOMIC
| __GFP_NOWARN
);
458 err
= rhashtable_rehash_attach(ht
, tbl
, new_tbl
);
460 bucket_table_free(new_tbl
);
464 schedule_work(&ht
->run_work
);
469 /* Do not fail the insert if someone else did a rehash. */
470 if (likely(rcu_access_pointer(tbl
->future_tbl
)))
473 /* Schedule async rehash to retry allocation in process context. */
475 schedule_work(&ht
->run_work
);
480 static void *rhashtable_lookup_one(struct rhashtable
*ht
,
481 struct rhash_lock_head __rcu
**bkt
,
482 struct bucket_table
*tbl
, unsigned int hash
,
483 const void *key
, struct rhash_head
*obj
)
485 struct rhashtable_compare_arg arg
= {
489 struct rhash_head __rcu
**pprev
= NULL
;
490 struct rhash_head
*head
;
493 elasticity
= RHT_ELASTICITY
;
494 rht_for_each_from(head
, rht_ptr(bkt
, tbl
, hash
), tbl
, hash
) {
495 struct rhlist_head
*list
;
496 struct rhlist_head
*plist
;
501 ht
->p
.obj_cmpfn(&arg
, rht_obj(ht
, head
)) :
502 rhashtable_compare(&arg
, rht_obj(ht
, head
)))) {
508 return rht_obj(ht
, head
);
510 list
= container_of(obj
, struct rhlist_head
, rhead
);
511 plist
= container_of(head
, struct rhlist_head
, rhead
);
513 RCU_INIT_POINTER(list
->next
, plist
);
514 head
= rht_dereference_bucket(head
->next
, tbl
, hash
);
515 RCU_INIT_POINTER(list
->rhead
.next
, head
);
517 rcu_assign_pointer(*pprev
, obj
);
519 /* Need to preserve the bit lock */
520 rht_assign_locked(bkt
, obj
);
526 return ERR_PTR(-EAGAIN
);
528 return ERR_PTR(-ENOENT
);
531 static struct bucket_table
*rhashtable_insert_one(struct rhashtable
*ht
,
532 struct rhash_lock_head __rcu
**bkt
,
533 struct bucket_table
*tbl
,
535 struct rhash_head
*obj
,
538 struct bucket_table
*new_tbl
;
539 struct rhash_head
*head
;
541 if (!IS_ERR_OR_NULL(data
))
542 return ERR_PTR(-EEXIST
);
544 if (PTR_ERR(data
) != -EAGAIN
&& PTR_ERR(data
) != -ENOENT
)
545 return ERR_CAST(data
);
547 new_tbl
= rht_dereference_rcu(tbl
->future_tbl
, ht
);
551 if (PTR_ERR(data
) != -ENOENT
)
552 return ERR_CAST(data
);
554 if (unlikely(rht_grow_above_max(ht
, tbl
)))
555 return ERR_PTR(-E2BIG
);
557 if (unlikely(rht_grow_above_100(ht
, tbl
)))
558 return ERR_PTR(-EAGAIN
);
560 head
= rht_ptr(bkt
, tbl
, hash
);
562 RCU_INIT_POINTER(obj
->next
, head
);
564 struct rhlist_head
*list
;
566 list
= container_of(obj
, struct rhlist_head
, rhead
);
567 RCU_INIT_POINTER(list
->next
, NULL
);
570 /* bkt is always the head of the list, so it holds
571 * the lock, which we need to preserve
573 rht_assign_locked(bkt
, obj
);
575 atomic_inc(&ht
->nelems
);
576 if (rht_grow_above_75(ht
, tbl
))
577 schedule_work(&ht
->run_work
);
582 static void *rhashtable_try_insert(struct rhashtable
*ht
, const void *key
,
583 struct rhash_head
*obj
)
585 struct bucket_table
*new_tbl
;
586 struct bucket_table
*tbl
;
587 struct rhash_lock_head __rcu
**bkt
;
591 new_tbl
= rcu_dereference(ht
->tbl
);
595 hash
= rht_head_hashfn(ht
, tbl
, obj
, ht
->p
);
596 if (rcu_access_pointer(tbl
->future_tbl
))
598 bkt
= rht_bucket_var(tbl
, hash
);
600 bkt
= rht_bucket_insert(ht
, tbl
, hash
);
602 new_tbl
= rht_dereference_rcu(tbl
->future_tbl
, ht
);
603 data
= ERR_PTR(-EAGAIN
);
606 data
= rhashtable_lookup_one(ht
, bkt
, tbl
,
608 new_tbl
= rhashtable_insert_one(ht
, bkt
, tbl
,
610 if (PTR_ERR(new_tbl
) != -EEXIST
)
611 data
= ERR_CAST(new_tbl
);
613 rht_unlock(tbl
, bkt
);
615 } while (!IS_ERR_OR_NULL(new_tbl
));
617 if (PTR_ERR(data
) == -EAGAIN
)
618 data
= ERR_PTR(rhashtable_insert_rehash(ht
, tbl
) ?:
624 void *rhashtable_insert_slow(struct rhashtable
*ht
, const void *key
,
625 struct rhash_head
*obj
)
631 data
= rhashtable_try_insert(ht
, key
, obj
);
633 } while (PTR_ERR(data
) == -EAGAIN
);
637 EXPORT_SYMBOL_GPL(rhashtable_insert_slow
);
640 * rhashtable_walk_enter - Initialise an iterator
641 * @ht: Table to walk over
642 * @iter: Hash table Iterator
644 * This function prepares a hash table walk.
646 * Note that if you restart a walk after rhashtable_walk_stop you
647 * may see the same object twice. Also, you may miss objects if
648 * there are removals in between rhashtable_walk_stop and the next
649 * call to rhashtable_walk_start.
651 * For a completely stable walk you should construct your own data
652 * structure outside the hash table.
654 * This function may be called from any process context, including
655 * non-preemptable context, but cannot be called from softirq or
658 * You must call rhashtable_walk_exit after this function returns.
660 void rhashtable_walk_enter(struct rhashtable
*ht
, struct rhashtable_iter
*iter
)
666 iter
->end_of_table
= 0;
668 spin_lock(&ht
->lock
);
670 rcu_dereference_protected(ht
->tbl
, lockdep_is_held(&ht
->lock
));
671 list_add(&iter
->walker
.list
, &iter
->walker
.tbl
->walkers
);
672 spin_unlock(&ht
->lock
);
674 EXPORT_SYMBOL_GPL(rhashtable_walk_enter
);
677 * rhashtable_walk_exit - Free an iterator
678 * @iter: Hash table Iterator
680 * This function frees resources allocated by rhashtable_walk_enter.
682 void rhashtable_walk_exit(struct rhashtable_iter
*iter
)
684 spin_lock(&iter
->ht
->lock
);
685 if (iter
->walker
.tbl
)
686 list_del(&iter
->walker
.list
);
687 spin_unlock(&iter
->ht
->lock
);
689 EXPORT_SYMBOL_GPL(rhashtable_walk_exit
);
692 * rhashtable_walk_start_check - Start a hash table walk
693 * @iter: Hash table iterator
695 * Start a hash table walk at the current iterator position. Note that we take
696 * the RCU lock in all cases including when we return an error. So you must
697 * always call rhashtable_walk_stop to clean up.
699 * Returns zero if successful.
701 * Returns -EAGAIN if resize event occured. Note that the iterator
702 * will rewind back to the beginning and you may use it immediately
703 * by calling rhashtable_walk_next.
705 * rhashtable_walk_start is defined as an inline variant that returns
706 * void. This is preferred in cases where the caller would ignore
707 * resize events and always continue.
709 int rhashtable_walk_start_check(struct rhashtable_iter
*iter
)
712 struct rhashtable
*ht
= iter
->ht
;
713 bool rhlist
= ht
->rhlist
;
717 spin_lock(&ht
->lock
);
718 if (iter
->walker
.tbl
)
719 list_del(&iter
->walker
.list
);
720 spin_unlock(&ht
->lock
);
722 if (iter
->end_of_table
)
724 if (!iter
->walker
.tbl
) {
725 iter
->walker
.tbl
= rht_dereference_rcu(ht
->tbl
, ht
);
731 if (iter
->p
&& !rhlist
) {
733 * We need to validate that 'p' is still in the table, and
734 * if so, update 'skip'
736 struct rhash_head
*p
;
738 rht_for_each_rcu(p
, iter
->walker
.tbl
, iter
->slot
) {
746 } else if (iter
->p
&& rhlist
) {
747 /* Need to validate that 'list' is still in the table, and
748 * if so, update 'skip' and 'p'.
750 struct rhash_head
*p
;
751 struct rhlist_head
*list
;
753 rht_for_each_rcu(p
, iter
->walker
.tbl
, iter
->slot
) {
754 for (list
= container_of(p
, struct rhlist_head
, rhead
);
756 list
= rcu_dereference(list
->next
)) {
758 if (list
== iter
->list
) {
770 EXPORT_SYMBOL_GPL(rhashtable_walk_start_check
);
773 * __rhashtable_walk_find_next - Find the next element in a table (or the first
774 * one in case of a new walk).
776 * @iter: Hash table iterator
778 * Returns the found object or NULL when the end of the table is reached.
780 * Returns -EAGAIN if resize event occurred.
782 static void *__rhashtable_walk_find_next(struct rhashtable_iter
*iter
)
784 struct bucket_table
*tbl
= iter
->walker
.tbl
;
785 struct rhlist_head
*list
= iter
->list
;
786 struct rhashtable
*ht
= iter
->ht
;
787 struct rhash_head
*p
= iter
->p
;
788 bool rhlist
= ht
->rhlist
;
793 for (; iter
->slot
< tbl
->size
; iter
->slot
++) {
794 int skip
= iter
->skip
;
796 rht_for_each_rcu(p
, tbl
, iter
->slot
) {
798 list
= container_of(p
, struct rhlist_head
,
804 list
= rcu_dereference(list
->next
);
815 if (!rht_is_a_nulls(p
)) {
819 return rht_obj(ht
, rhlist
? &list
->rhead
: p
);
827 /* Ensure we see any new tables. */
830 iter
->walker
.tbl
= rht_dereference_rcu(tbl
->future_tbl
, ht
);
831 if (iter
->walker
.tbl
) {
834 return ERR_PTR(-EAGAIN
);
836 iter
->end_of_table
= true;
843 * rhashtable_walk_next - Return the next object and advance the iterator
844 * @iter: Hash table iterator
846 * Note that you must call rhashtable_walk_stop when you are finished
849 * Returns the next object or NULL when the end of the table is reached.
851 * Returns -EAGAIN if resize event occurred. Note that the iterator
852 * will rewind back to the beginning and you may continue to use it.
854 void *rhashtable_walk_next(struct rhashtable_iter
*iter
)
856 struct rhlist_head
*list
= iter
->list
;
857 struct rhashtable
*ht
= iter
->ht
;
858 struct rhash_head
*p
= iter
->p
;
859 bool rhlist
= ht
->rhlist
;
862 if (!rhlist
|| !(list
= rcu_dereference(list
->next
))) {
863 p
= rcu_dereference(p
->next
);
864 list
= container_of(p
, struct rhlist_head
, rhead
);
866 if (!rht_is_a_nulls(p
)) {
870 return rht_obj(ht
, rhlist
? &list
->rhead
: p
);
873 /* At the end of this slot, switch to next one and then find
874 * next entry from that point.
880 return __rhashtable_walk_find_next(iter
);
882 EXPORT_SYMBOL_GPL(rhashtable_walk_next
);
885 * rhashtable_walk_peek - Return the next object but don't advance the iterator
886 * @iter: Hash table iterator
888 * Returns the next object or NULL when the end of the table is reached.
890 * Returns -EAGAIN if resize event occurred. Note that the iterator
891 * will rewind back to the beginning and you may continue to use it.
893 void *rhashtable_walk_peek(struct rhashtable_iter
*iter
)
895 struct rhlist_head
*list
= iter
->list
;
896 struct rhashtable
*ht
= iter
->ht
;
897 struct rhash_head
*p
= iter
->p
;
900 return rht_obj(ht
, ht
->rhlist
? &list
->rhead
: p
);
902 /* No object found in current iter, find next one in the table. */
905 /* A nonzero skip value points to the next entry in the table
906 * beyond that last one that was found. Decrement skip so
907 * we find the current value. __rhashtable_walk_find_next
908 * will restore the original value of skip assuming that
909 * the table hasn't changed.
914 return __rhashtable_walk_find_next(iter
);
916 EXPORT_SYMBOL_GPL(rhashtable_walk_peek
);
919 * rhashtable_walk_stop - Finish a hash table walk
920 * @iter: Hash table iterator
922 * Finish a hash table walk. Does not reset the iterator to the start of the
925 void rhashtable_walk_stop(struct rhashtable_iter
*iter
)
928 struct rhashtable
*ht
;
929 struct bucket_table
*tbl
= iter
->walker
.tbl
;
936 spin_lock(&ht
->lock
);
937 if (rcu_head_after_call_rcu(&tbl
->rcu
, bucket_table_free_rcu
))
938 /* This bucket table is being freed, don't re-link it. */
939 iter
->walker
.tbl
= NULL
;
941 list_add(&iter
->walker
.list
, &tbl
->walkers
);
942 spin_unlock(&ht
->lock
);
947 EXPORT_SYMBOL_GPL(rhashtable_walk_stop
);
949 static size_t rounded_hashtable_size(const struct rhashtable_params
*params
)
953 if (params
->nelem_hint
)
954 retsize
= max(roundup_pow_of_two(params
->nelem_hint
* 4 / 3),
955 (unsigned long)params
->min_size
);
957 retsize
= max(HASH_DEFAULT_SIZE
,
958 (unsigned long)params
->min_size
);
963 static u32
rhashtable_jhash2(const void *key
, u32 length
, u32 seed
)
965 return jhash2(key
, length
, seed
);
969 * rhashtable_init - initialize a new hash table
970 * @ht: hash table to be initialized
971 * @params: configuration parameters
973 * Initializes a new hash table based on the provided configuration
974 * parameters. A table can be configured either with a variable or
977 * Configuration Example 1: Fixed length keys
981 * struct rhash_head node;
984 * struct rhashtable_params params = {
985 * .head_offset = offsetof(struct test_obj, node),
986 * .key_offset = offsetof(struct test_obj, key),
987 * .key_len = sizeof(int),
991 * Configuration Example 2: Variable length keys
994 * struct rhash_head node;
997 * u32 my_hash_fn(const void *data, u32 len, u32 seed)
999 * struct test_obj *obj = data;
1001 * return [... hash ...];
1004 * struct rhashtable_params params = {
1005 * .head_offset = offsetof(struct test_obj, node),
1007 * .obj_hashfn = my_hash_fn,
1010 int rhashtable_init(struct rhashtable
*ht
,
1011 const struct rhashtable_params
*params
)
1013 struct bucket_table
*tbl
;
1016 if ((!params
->key_len
&& !params
->obj_hashfn
) ||
1017 (params
->obj_hashfn
&& !params
->obj_cmpfn
))
1020 memset(ht
, 0, sizeof(*ht
));
1021 mutex_init(&ht
->mutex
);
1022 spin_lock_init(&ht
->lock
);
1023 memcpy(&ht
->p
, params
, sizeof(*params
));
1025 if (params
->min_size
)
1026 ht
->p
.min_size
= roundup_pow_of_two(params
->min_size
);
1028 /* Cap total entries at 2^31 to avoid nelems overflow. */
1029 ht
->max_elems
= 1u << 31;
1031 if (params
->max_size
) {
1032 ht
->p
.max_size
= rounddown_pow_of_two(params
->max_size
);
1033 if (ht
->p
.max_size
< ht
->max_elems
/ 2)
1034 ht
->max_elems
= ht
->p
.max_size
* 2;
1037 ht
->p
.min_size
= max_t(u16
, ht
->p
.min_size
, HASH_MIN_SIZE
);
1039 size
= rounded_hashtable_size(&ht
->p
);
1041 ht
->key_len
= ht
->p
.key_len
;
1042 if (!params
->hashfn
) {
1043 ht
->p
.hashfn
= jhash
;
1045 if (!(ht
->key_len
& (sizeof(u32
) - 1))) {
1046 ht
->key_len
/= sizeof(u32
);
1047 ht
->p
.hashfn
= rhashtable_jhash2
;
1052 * This is api initialization and thus we need to guarantee the
1053 * initial rhashtable allocation. Upon failure, retry with the
1054 * smallest possible size with __GFP_NOFAIL semantics.
1056 tbl
= bucket_table_alloc(ht
, size
, GFP_KERNEL
);
1057 if (unlikely(tbl
== NULL
)) {
1058 size
= max_t(u16
, ht
->p
.min_size
, HASH_MIN_SIZE
);
1059 tbl
= bucket_table_alloc(ht
, size
, GFP_KERNEL
| __GFP_NOFAIL
);
1062 atomic_set(&ht
->nelems
, 0);
1064 RCU_INIT_POINTER(ht
->tbl
, tbl
);
1066 INIT_WORK(&ht
->run_work
, rht_deferred_worker
);
1070 EXPORT_SYMBOL_GPL(rhashtable_init
);
1073 * rhltable_init - initialize a new hash list table
1074 * @hlt: hash list table to be initialized
1075 * @params: configuration parameters
1077 * Initializes a new hash list table.
1079 * See documentation for rhashtable_init.
1081 int rhltable_init(struct rhltable
*hlt
, const struct rhashtable_params
*params
)
1085 err
= rhashtable_init(&hlt
->ht
, params
);
1086 hlt
->ht
.rhlist
= true;
1089 EXPORT_SYMBOL_GPL(rhltable_init
);
1091 static void rhashtable_free_one(struct rhashtable
*ht
, struct rhash_head
*obj
,
1092 void (*free_fn
)(void *ptr
, void *arg
),
1095 struct rhlist_head
*list
;
1098 free_fn(rht_obj(ht
, obj
), arg
);
1102 list
= container_of(obj
, struct rhlist_head
, rhead
);
1105 list
= rht_dereference(list
->next
, ht
);
1106 free_fn(rht_obj(ht
, obj
), arg
);
1111 * rhashtable_free_and_destroy - free elements and destroy hash table
1112 * @ht: the hash table to destroy
1113 * @free_fn: callback to release resources of element
1114 * @arg: pointer passed to free_fn
1116 * Stops an eventual async resize. If defined, invokes free_fn for each
1117 * element to releasal resources. Please note that RCU protected
1118 * readers may still be accessing the elements. Releasing of resources
1119 * must occur in a compatible manner. Then frees the bucket array.
1121 * This function will eventually sleep to wait for an async resize
1122 * to complete. The caller is responsible that no further write operations
1123 * occurs in parallel.
1125 void rhashtable_free_and_destroy(struct rhashtable
*ht
,
1126 void (*free_fn
)(void *ptr
, void *arg
),
1129 struct bucket_table
*tbl
, *next_tbl
;
1132 cancel_work_sync(&ht
->run_work
);
1134 mutex_lock(&ht
->mutex
);
1135 tbl
= rht_dereference(ht
->tbl
, ht
);
1138 for (i
= 0; i
< tbl
->size
; i
++) {
1139 struct rhash_head
*pos
, *next
;
1142 for (pos
= rht_ptr_exclusive(rht_bucket(tbl
, i
)),
1143 next
= !rht_is_a_nulls(pos
) ?
1144 rht_dereference(pos
->next
, ht
) : NULL
;
1145 !rht_is_a_nulls(pos
);
1147 next
= !rht_is_a_nulls(pos
) ?
1148 rht_dereference(pos
->next
, ht
) : NULL
)
1149 rhashtable_free_one(ht
, pos
, free_fn
, arg
);
1153 next_tbl
= rht_dereference(tbl
->future_tbl
, ht
);
1154 bucket_table_free(tbl
);
1159 mutex_unlock(&ht
->mutex
);
1161 EXPORT_SYMBOL_GPL(rhashtable_free_and_destroy
);
1163 void rhashtable_destroy(struct rhashtable
*ht
)
1165 return rhashtable_free_and_destroy(ht
, NULL
, NULL
);
1167 EXPORT_SYMBOL_GPL(rhashtable_destroy
);
1169 struct rhash_lock_head __rcu
**__rht_bucket_nested(const struct bucket_table
*tbl
,
1172 const unsigned int shift
= PAGE_SHIFT
- ilog2(sizeof(void *));
1173 unsigned int index
= hash
& ((1 << tbl
->nest
) - 1);
1174 unsigned int size
= tbl
->size
>> tbl
->nest
;
1175 unsigned int subhash
= hash
;
1176 union nested_table
*ntbl
;
1178 ntbl
= (union nested_table
*)rcu_dereference_raw(tbl
->buckets
[0]);
1179 ntbl
= rht_dereference_bucket_rcu(ntbl
[index
].table
, tbl
, hash
);
1180 subhash
>>= tbl
->nest
;
1182 while (ntbl
&& size
> (1 << shift
)) {
1183 index
= subhash
& ((1 << shift
) - 1);
1184 ntbl
= rht_dereference_bucket_rcu(ntbl
[index
].table
,
1193 return &ntbl
[subhash
].bucket
;
1196 EXPORT_SYMBOL_GPL(__rht_bucket_nested
);
1198 struct rhash_lock_head __rcu
**rht_bucket_nested(const struct bucket_table
*tbl
,
1201 static struct rhash_lock_head __rcu
*rhnull
;
1204 INIT_RHT_NULLS_HEAD(rhnull
);
1205 return __rht_bucket_nested(tbl
, hash
) ?: &rhnull
;
1207 EXPORT_SYMBOL_GPL(rht_bucket_nested
);
1209 struct rhash_lock_head __rcu
**rht_bucket_nested_insert(struct rhashtable
*ht
,
1210 struct bucket_table
*tbl
,
1213 const unsigned int shift
= PAGE_SHIFT
- ilog2(sizeof(void *));
1214 unsigned int index
= hash
& ((1 << tbl
->nest
) - 1);
1215 unsigned int size
= tbl
->size
>> tbl
->nest
;
1216 union nested_table
*ntbl
;
1218 ntbl
= (union nested_table
*)rcu_dereference_raw(tbl
->buckets
[0]);
1220 ntbl
= nested_table_alloc(ht
, &ntbl
[index
].table
,
1221 size
<= (1 << shift
));
1223 while (ntbl
&& size
> (1 << shift
)) {
1224 index
= hash
& ((1 << shift
) - 1);
1227 ntbl
= nested_table_alloc(ht
, &ntbl
[index
].table
,
1228 size
<= (1 << shift
));
1234 return &ntbl
[hash
].bucket
;
1237 EXPORT_SYMBOL_GPL(rht_bucket_nested_insert
);