2 * Resizable, Scalable, Concurrent Hash Table
4 * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au>
5 * Copyright (c) 2014-2015 Thomas Graf <tgraf@suug.ch>
6 * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net>
8 * Code partially derived from nft_hash
9 * Rewritten with rehash code from br_multicast plus single list
10 * pointer as suggested by Josh Triplett
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License version 2 as
14 * published by the Free Software Foundation.
17 #include <linux/atomic.h>
18 #include <linux/kernel.h>
19 #include <linux/init.h>
20 #include <linux/log2.h>
21 #include <linux/sched.h>
22 #include <linux/slab.h>
23 #include <linux/vmalloc.h>
25 #include <linux/jhash.h>
26 #include <linux/random.h>
27 #include <linux/rhashtable.h>
28 #include <linux/err.h>
29 #include <linux/export.h>
31 #define HASH_DEFAULT_SIZE 64UL
32 #define HASH_MIN_SIZE 4U
33 #define BUCKET_LOCKS_PER_CPU 32UL
36 union nested_table __rcu
*table
;
37 struct rhash_head __rcu
*bucket
;
40 static u32
head_hashfn(struct rhashtable
*ht
,
41 const struct bucket_table
*tbl
,
42 const struct rhash_head
*he
)
44 return rht_head_hashfn(ht
, tbl
, he
, ht
->p
);
47 #ifdef CONFIG_PROVE_LOCKING
48 #define ASSERT_RHT_MUTEX(HT) BUG_ON(!lockdep_rht_mutex_is_held(HT))
50 int lockdep_rht_mutex_is_held(struct rhashtable
*ht
)
52 return (debug_locks
) ? lockdep_is_held(&ht
->mutex
) : 1;
54 EXPORT_SYMBOL_GPL(lockdep_rht_mutex_is_held
);
56 int lockdep_rht_bucket_is_held(const struct bucket_table
*tbl
, u32 hash
)
58 spinlock_t
*lock
= rht_bucket_lock(tbl
, hash
);
60 return (debug_locks
) ? lockdep_is_held(lock
) : 1;
62 EXPORT_SYMBOL_GPL(lockdep_rht_bucket_is_held
);
64 #define ASSERT_RHT_MUTEX(HT)
68 static int alloc_bucket_locks(struct rhashtable
*ht
, struct bucket_table
*tbl
,
72 #if defined(CONFIG_PROVE_LOCKING)
73 unsigned int nr_pcpus
= 2;
75 unsigned int nr_pcpus
= num_possible_cpus();
78 nr_pcpus
= min_t(unsigned int, nr_pcpus
, 64UL);
79 size
= roundup_pow_of_two(nr_pcpus
* ht
->p
.locks_mul
);
81 /* Never allocate more than 0.5 locks per bucket */
82 size
= min_t(unsigned int, size
, tbl
->size
>> 1);
85 size
= min(size
, 1U << tbl
->nest
);
87 if (sizeof(spinlock_t
) != 0) {
90 if (size
* sizeof(spinlock_t
) > PAGE_SIZE
&&
92 tbl
->locks
= vmalloc(size
* sizeof(spinlock_t
));
94 if (gfp
!= GFP_KERNEL
)
95 gfp
|= __GFP_NOWARN
| __GFP_NORETRY
;
98 tbl
->locks
= kmalloc_array(size
, sizeof(spinlock_t
),
102 for (i
= 0; i
< size
; i
++)
103 spin_lock_init(&tbl
->locks
[i
]);
105 tbl
->locks_mask
= size
- 1;
110 static void nested_table_free(union nested_table
*ntbl
, unsigned int size
)
112 const unsigned int shift
= PAGE_SHIFT
- ilog2(sizeof(void *));
113 const unsigned int len
= 1 << shift
;
116 ntbl
= rcu_dereference_raw(ntbl
->table
);
122 for (i
= 0; i
< len
; i
++)
123 nested_table_free(ntbl
+ i
, size
);
129 static void nested_bucket_table_free(const struct bucket_table
*tbl
)
131 unsigned int size
= tbl
->size
>> tbl
->nest
;
132 unsigned int len
= 1 << tbl
->nest
;
133 union nested_table
*ntbl
;
136 ntbl
= (union nested_table
*)rcu_dereference_raw(tbl
->buckets
[0]);
138 for (i
= 0; i
< len
; i
++)
139 nested_table_free(ntbl
+ i
, size
);
144 static void bucket_table_free(const struct bucket_table
*tbl
)
147 nested_bucket_table_free(tbl
);
155 static void bucket_table_free_rcu(struct rcu_head
*head
)
157 bucket_table_free(container_of(head
, struct bucket_table
, rcu
));
160 static union nested_table
*nested_table_alloc(struct rhashtable
*ht
,
161 union nested_table __rcu
**prev
,
162 unsigned int shifted
,
165 union nested_table
*ntbl
;
168 ntbl
= rcu_dereference(*prev
);
172 ntbl
= kzalloc(PAGE_SIZE
, GFP_ATOMIC
);
174 if (ntbl
&& shifted
) {
175 for (i
= 0; i
< PAGE_SIZE
/ sizeof(ntbl
[0].bucket
); i
++)
176 INIT_RHT_NULLS_HEAD(ntbl
[i
].bucket
, ht
,
177 (i
<< shifted
) | nhash
);
180 rcu_assign_pointer(*prev
, ntbl
);
185 static struct bucket_table
*nested_bucket_table_alloc(struct rhashtable
*ht
,
189 const unsigned int shift
= PAGE_SHIFT
- ilog2(sizeof(void *));
190 struct bucket_table
*tbl
;
193 if (nbuckets
< (1 << (shift
+ 1)))
196 size
= sizeof(*tbl
) + sizeof(tbl
->buckets
[0]);
198 tbl
= kzalloc(size
, gfp
);
202 if (!nested_table_alloc(ht
, (union nested_table __rcu
**)tbl
->buckets
,
208 tbl
->nest
= (ilog2(nbuckets
) - 1) % shift
+ 1;
213 static struct bucket_table
*bucket_table_alloc(struct rhashtable
*ht
,
217 struct bucket_table
*tbl
= NULL
;
221 size
= sizeof(*tbl
) + nbuckets
* sizeof(tbl
->buckets
[0]);
222 if (size
<= (PAGE_SIZE
<< PAGE_ALLOC_COSTLY_ORDER
) ||
224 tbl
= kzalloc(size
, gfp
| __GFP_NOWARN
| __GFP_NORETRY
);
225 if (tbl
== NULL
&& gfp
== GFP_KERNEL
)
230 if (tbl
== NULL
&& gfp
!= GFP_KERNEL
) {
231 tbl
= nested_bucket_table_alloc(ht
, nbuckets
, gfp
);
239 if (alloc_bucket_locks(ht
, tbl
, gfp
) < 0) {
240 bucket_table_free(tbl
);
244 INIT_LIST_HEAD(&tbl
->walkers
);
246 get_random_bytes(&tbl
->hash_rnd
, sizeof(tbl
->hash_rnd
));
248 for (i
= 0; i
< nbuckets
; i
++)
249 INIT_RHT_NULLS_HEAD(tbl
->buckets
[i
], ht
, i
);
254 static struct bucket_table
*rhashtable_last_table(struct rhashtable
*ht
,
255 struct bucket_table
*tbl
)
257 struct bucket_table
*new_tbl
;
261 tbl
= rht_dereference_rcu(tbl
->future_tbl
, ht
);
267 static int rhashtable_rehash_one(struct rhashtable
*ht
, unsigned int old_hash
)
269 struct bucket_table
*old_tbl
= rht_dereference(ht
->tbl
, ht
);
270 struct bucket_table
*new_tbl
= rhashtable_last_table(ht
,
271 rht_dereference_rcu(old_tbl
->future_tbl
, ht
));
272 struct rhash_head __rcu
**pprev
= rht_bucket_var(old_tbl
, old_hash
);
274 struct rhash_head
*head
, *next
, *entry
;
275 spinlock_t
*new_bucket_lock
;
276 unsigned int new_hash
;
283 rht_for_each(entry
, old_tbl
, old_hash
) {
285 next
= rht_dereference_bucket(entry
->next
, old_tbl
, old_hash
);
287 if (rht_is_a_nulls(next
))
290 pprev
= &entry
->next
;
296 new_hash
= head_hashfn(ht
, new_tbl
, entry
);
298 new_bucket_lock
= rht_bucket_lock(new_tbl
, new_hash
);
300 spin_lock_nested(new_bucket_lock
, SINGLE_DEPTH_NESTING
);
301 head
= rht_dereference_bucket(new_tbl
->buckets
[new_hash
],
304 RCU_INIT_POINTER(entry
->next
, head
);
306 rcu_assign_pointer(new_tbl
->buckets
[new_hash
], entry
);
307 spin_unlock(new_bucket_lock
);
309 rcu_assign_pointer(*pprev
, next
);
315 static int rhashtable_rehash_chain(struct rhashtable
*ht
,
316 unsigned int old_hash
)
318 struct bucket_table
*old_tbl
= rht_dereference(ht
->tbl
, ht
);
319 spinlock_t
*old_bucket_lock
;
322 old_bucket_lock
= rht_bucket_lock(old_tbl
, old_hash
);
324 spin_lock_bh(old_bucket_lock
);
325 while (!(err
= rhashtable_rehash_one(ht
, old_hash
)))
328 if (err
== -ENOENT
) {
332 spin_unlock_bh(old_bucket_lock
);
337 static int rhashtable_rehash_attach(struct rhashtable
*ht
,
338 struct bucket_table
*old_tbl
,
339 struct bucket_table
*new_tbl
)
341 /* Protect future_tbl using the first bucket lock. */
342 spin_lock_bh(old_tbl
->locks
);
344 /* Did somebody beat us to it? */
345 if (rcu_access_pointer(old_tbl
->future_tbl
)) {
346 spin_unlock_bh(old_tbl
->locks
);
350 /* Make insertions go into the new, empty table right away. Deletions
351 * and lookups will be attempted in both tables until we synchronize.
353 rcu_assign_pointer(old_tbl
->future_tbl
, new_tbl
);
355 spin_unlock_bh(old_tbl
->locks
);
360 static int rhashtable_rehash_table(struct rhashtable
*ht
)
362 struct bucket_table
*old_tbl
= rht_dereference(ht
->tbl
, ht
);
363 struct bucket_table
*new_tbl
;
364 struct rhashtable_walker
*walker
;
365 unsigned int old_hash
;
368 new_tbl
= rht_dereference(old_tbl
->future_tbl
, ht
);
372 for (old_hash
= 0; old_hash
< old_tbl
->size
; old_hash
++) {
373 err
= rhashtable_rehash_chain(ht
, old_hash
);
378 /* Publish the new table pointer. */
379 rcu_assign_pointer(ht
->tbl
, new_tbl
);
381 spin_lock(&ht
->lock
);
382 list_for_each_entry(walker
, &old_tbl
->walkers
, list
)
384 spin_unlock(&ht
->lock
);
386 /* Wait for readers. All new readers will see the new
387 * table, and thus no references to the old table will
390 call_rcu(&old_tbl
->rcu
, bucket_table_free_rcu
);
392 return rht_dereference(new_tbl
->future_tbl
, ht
) ? -EAGAIN
: 0;
395 static int rhashtable_rehash_alloc(struct rhashtable
*ht
,
396 struct bucket_table
*old_tbl
,
399 struct bucket_table
*new_tbl
;
402 ASSERT_RHT_MUTEX(ht
);
404 new_tbl
= bucket_table_alloc(ht
, size
, GFP_KERNEL
);
408 err
= rhashtable_rehash_attach(ht
, old_tbl
, new_tbl
);
410 bucket_table_free(new_tbl
);
416 * rhashtable_shrink - Shrink hash table while allowing concurrent lookups
417 * @ht: the hash table to shrink
419 * This function shrinks the hash table to fit, i.e., the smallest
420 * size would not cause it to expand right away automatically.
422 * The caller must ensure that no concurrent resizing occurs by holding
425 * The caller must ensure that no concurrent table mutations take place.
426 * It is however valid to have concurrent lookups if they are RCU protected.
428 * It is valid to have concurrent insertions and deletions protected by per
429 * bucket locks or concurrent RCU protected lookups and traversals.
431 static int rhashtable_shrink(struct rhashtable
*ht
)
433 struct bucket_table
*old_tbl
= rht_dereference(ht
->tbl
, ht
);
434 unsigned int nelems
= atomic_read(&ht
->nelems
);
435 unsigned int size
= 0;
438 size
= roundup_pow_of_two(nelems
* 3 / 2);
439 if (size
< ht
->p
.min_size
)
440 size
= ht
->p
.min_size
;
442 if (old_tbl
->size
<= size
)
445 if (rht_dereference(old_tbl
->future_tbl
, ht
))
448 return rhashtable_rehash_alloc(ht
, old_tbl
, size
);
451 static void rht_deferred_worker(struct work_struct
*work
)
453 struct rhashtable
*ht
;
454 struct bucket_table
*tbl
;
457 ht
= container_of(work
, struct rhashtable
, run_work
);
458 mutex_lock(&ht
->mutex
);
460 tbl
= rht_dereference(ht
->tbl
, ht
);
461 tbl
= rhashtable_last_table(ht
, tbl
);
463 if (rht_grow_above_75(ht
, tbl
))
464 err
= rhashtable_rehash_alloc(ht
, tbl
, tbl
->size
* 2);
465 else if (ht
->p
.automatic_shrinking
&& rht_shrink_below_30(ht
, tbl
))
466 err
= rhashtable_shrink(ht
);
468 err
= rhashtable_rehash_alloc(ht
, tbl
, tbl
->size
);
471 err
= rhashtable_rehash_table(ht
);
473 mutex_unlock(&ht
->mutex
);
476 schedule_work(&ht
->run_work
);
479 static int rhashtable_insert_rehash(struct rhashtable
*ht
,
480 struct bucket_table
*tbl
)
482 struct bucket_table
*old_tbl
;
483 struct bucket_table
*new_tbl
;
487 old_tbl
= rht_dereference_rcu(ht
->tbl
, ht
);
493 if (rht_grow_above_75(ht
, tbl
))
495 /* Do not schedule more than one rehash */
496 else if (old_tbl
!= tbl
)
501 new_tbl
= bucket_table_alloc(ht
, size
, GFP_ATOMIC
);
505 err
= rhashtable_rehash_attach(ht
, tbl
, new_tbl
);
507 bucket_table_free(new_tbl
);
511 schedule_work(&ht
->run_work
);
516 /* Do not fail the insert if someone else did a rehash. */
517 if (likely(rcu_dereference_raw(tbl
->future_tbl
)))
520 /* Schedule async rehash to retry allocation in process context. */
522 schedule_work(&ht
->run_work
);
527 static void *rhashtable_lookup_one(struct rhashtable
*ht
,
528 struct bucket_table
*tbl
, unsigned int hash
,
529 const void *key
, struct rhash_head
*obj
)
531 struct rhashtable_compare_arg arg
= {
535 struct rhash_head __rcu
**pprev
;
536 struct rhash_head
*head
;
539 elasticity
= ht
->elasticity
;
540 pprev
= rht_bucket_var(tbl
, hash
);
541 rht_for_each_continue(head
, *pprev
, tbl
, hash
) {
542 struct rhlist_head
*list
;
543 struct rhlist_head
*plist
;
548 ht
->p
.obj_cmpfn(&arg
, rht_obj(ht
, head
)) :
549 rhashtable_compare(&arg
, rht_obj(ht
, head
))))
553 return rht_obj(ht
, head
);
555 list
= container_of(obj
, struct rhlist_head
, rhead
);
556 plist
= container_of(head
, struct rhlist_head
, rhead
);
558 RCU_INIT_POINTER(list
->next
, plist
);
559 head
= rht_dereference_bucket(head
->next
, tbl
, hash
);
560 RCU_INIT_POINTER(list
->rhead
.next
, head
);
561 rcu_assign_pointer(*pprev
, obj
);
567 return ERR_PTR(-EAGAIN
);
569 return ERR_PTR(-ENOENT
);
572 static struct bucket_table
*rhashtable_insert_one(struct rhashtable
*ht
,
573 struct bucket_table
*tbl
,
575 struct rhash_head
*obj
,
578 struct rhash_head __rcu
**pprev
;
579 struct bucket_table
*new_tbl
;
580 struct rhash_head
*head
;
582 if (!IS_ERR_OR_NULL(data
))
583 return ERR_PTR(-EEXIST
);
585 if (PTR_ERR(data
) != -EAGAIN
&& PTR_ERR(data
) != -ENOENT
)
586 return ERR_CAST(data
);
588 new_tbl
= rcu_dereference(tbl
->future_tbl
);
592 if (PTR_ERR(data
) != -ENOENT
)
593 return ERR_CAST(data
);
595 if (unlikely(rht_grow_above_max(ht
, tbl
)))
596 return ERR_PTR(-E2BIG
);
598 if (unlikely(rht_grow_above_100(ht
, tbl
)))
599 return ERR_PTR(-EAGAIN
);
601 pprev
= rht_bucket_insert(ht
, tbl
, hash
);
603 return ERR_PTR(-ENOMEM
);
605 head
= rht_dereference_bucket(*pprev
, tbl
, hash
);
607 RCU_INIT_POINTER(obj
->next
, head
);
609 struct rhlist_head
*list
;
611 list
= container_of(obj
, struct rhlist_head
, rhead
);
612 RCU_INIT_POINTER(list
->next
, NULL
);
615 rcu_assign_pointer(*pprev
, obj
);
617 atomic_inc(&ht
->nelems
);
618 if (rht_grow_above_75(ht
, tbl
))
619 schedule_work(&ht
->run_work
);
624 static void *rhashtable_try_insert(struct rhashtable
*ht
, const void *key
,
625 struct rhash_head
*obj
)
627 struct bucket_table
*new_tbl
;
628 struct bucket_table
*tbl
;
633 tbl
= rcu_dereference(ht
->tbl
);
635 /* All insertions must grab the oldest table containing
636 * the hashed bucket that is yet to be rehashed.
639 hash
= rht_head_hashfn(ht
, tbl
, obj
, ht
->p
);
640 lock
= rht_bucket_lock(tbl
, hash
);
643 if (tbl
->rehash
<= hash
)
646 spin_unlock_bh(lock
);
647 tbl
= rcu_dereference(tbl
->future_tbl
);
650 data
= rhashtable_lookup_one(ht
, tbl
, hash
, key
, obj
);
651 new_tbl
= rhashtable_insert_one(ht
, tbl
, hash
, obj
, data
);
652 if (PTR_ERR(new_tbl
) != -EEXIST
)
653 data
= ERR_CAST(new_tbl
);
655 while (!IS_ERR_OR_NULL(new_tbl
)) {
657 hash
= rht_head_hashfn(ht
, tbl
, obj
, ht
->p
);
658 spin_lock_nested(rht_bucket_lock(tbl
, hash
),
659 SINGLE_DEPTH_NESTING
);
661 data
= rhashtable_lookup_one(ht
, tbl
, hash
, key
, obj
);
662 new_tbl
= rhashtable_insert_one(ht
, tbl
, hash
, obj
, data
);
663 if (PTR_ERR(new_tbl
) != -EEXIST
)
664 data
= ERR_CAST(new_tbl
);
666 spin_unlock(rht_bucket_lock(tbl
, hash
));
669 spin_unlock_bh(lock
);
671 if (PTR_ERR(data
) == -EAGAIN
)
672 data
= ERR_PTR(rhashtable_insert_rehash(ht
, tbl
) ?:
678 void *rhashtable_insert_slow(struct rhashtable
*ht
, const void *key
,
679 struct rhash_head
*obj
)
685 data
= rhashtable_try_insert(ht
, key
, obj
);
687 } while (PTR_ERR(data
) == -EAGAIN
);
691 EXPORT_SYMBOL_GPL(rhashtable_insert_slow
);
694 * rhashtable_walk_enter - Initialise an iterator
695 * @ht: Table to walk over
696 * @iter: Hash table Iterator
698 * This function prepares a hash table walk.
700 * Note that if you restart a walk after rhashtable_walk_stop you
701 * may see the same object twice. Also, you may miss objects if
702 * there are removals in between rhashtable_walk_stop and the next
703 * call to rhashtable_walk_start.
705 * For a completely stable walk you should construct your own data
706 * structure outside the hash table.
708 * This function may sleep so you must not call it from interrupt
709 * context or with spin locks held.
711 * You must call rhashtable_walk_exit after this function returns.
713 void rhashtable_walk_enter(struct rhashtable
*ht
, struct rhashtable_iter
*iter
)
720 spin_lock(&ht
->lock
);
722 rcu_dereference_protected(ht
->tbl
, lockdep_is_held(&ht
->lock
));
723 list_add(&iter
->walker
.list
, &iter
->walker
.tbl
->walkers
);
724 spin_unlock(&ht
->lock
);
726 EXPORT_SYMBOL_GPL(rhashtable_walk_enter
);
729 * rhashtable_walk_exit - Free an iterator
730 * @iter: Hash table Iterator
732 * This function frees resources allocated by rhashtable_walk_init.
734 void rhashtable_walk_exit(struct rhashtable_iter
*iter
)
736 spin_lock(&iter
->ht
->lock
);
737 if (iter
->walker
.tbl
)
738 list_del(&iter
->walker
.list
);
739 spin_unlock(&iter
->ht
->lock
);
741 EXPORT_SYMBOL_GPL(rhashtable_walk_exit
);
744 * rhashtable_walk_start - Start a hash table walk
745 * @iter: Hash table iterator
747 * Start a hash table walk. Note that we take the RCU lock in all
748 * cases including when we return an error. So you must always call
749 * rhashtable_walk_stop to clean up.
751 * Returns zero if successful.
753 * Returns -EAGAIN if resize event occured. Note that the iterator
754 * will rewind back to the beginning and you may use it immediately
755 * by calling rhashtable_walk_next.
757 int rhashtable_walk_start(struct rhashtable_iter
*iter
)
760 struct rhashtable
*ht
= iter
->ht
;
764 spin_lock(&ht
->lock
);
765 if (iter
->walker
.tbl
)
766 list_del(&iter
->walker
.list
);
767 spin_unlock(&ht
->lock
);
769 if (!iter
->walker
.tbl
) {
770 iter
->walker
.tbl
= rht_dereference_rcu(ht
->tbl
, ht
);
776 EXPORT_SYMBOL_GPL(rhashtable_walk_start
);
779 * rhashtable_walk_next - Return the next object and advance the iterator
780 * @iter: Hash table iterator
782 * Note that you must call rhashtable_walk_stop when you are finished
785 * Returns the next object or NULL when the end of the table is reached.
787 * Returns -EAGAIN if resize event occured. Note that the iterator
788 * will rewind back to the beginning and you may continue to use it.
790 void *rhashtable_walk_next(struct rhashtable_iter
*iter
)
792 struct bucket_table
*tbl
= iter
->walker
.tbl
;
793 struct rhlist_head
*list
= iter
->list
;
794 struct rhashtable
*ht
= iter
->ht
;
795 struct rhash_head
*p
= iter
->p
;
796 bool rhlist
= ht
->rhlist
;
799 if (!rhlist
|| !(list
= rcu_dereference(list
->next
))) {
800 p
= rcu_dereference(p
->next
);
801 list
= container_of(p
, struct rhlist_head
, rhead
);
806 for (; iter
->slot
< tbl
->size
; iter
->slot
++) {
807 int skip
= iter
->skip
;
809 rht_for_each_rcu(p
, tbl
, iter
->slot
) {
811 list
= container_of(p
, struct rhlist_head
,
817 list
= rcu_dereference(list
->next
);
828 if (!rht_is_a_nulls(p
)) {
832 return rht_obj(ht
, rhlist
? &list
->rhead
: p
);
840 /* Ensure we see any new tables. */
843 iter
->walker
.tbl
= rht_dereference_rcu(tbl
->future_tbl
, ht
);
844 if (iter
->walker
.tbl
) {
847 return ERR_PTR(-EAGAIN
);
852 EXPORT_SYMBOL_GPL(rhashtable_walk_next
);
855 * rhashtable_walk_stop - Finish a hash table walk
856 * @iter: Hash table iterator
858 * Finish a hash table walk.
860 void rhashtable_walk_stop(struct rhashtable_iter
*iter
)
863 struct rhashtable
*ht
;
864 struct bucket_table
*tbl
= iter
->walker
.tbl
;
871 spin_lock(&ht
->lock
);
872 if (tbl
->rehash
< tbl
->size
)
873 list_add(&iter
->walker
.list
, &tbl
->walkers
);
875 iter
->walker
.tbl
= NULL
;
876 spin_unlock(&ht
->lock
);
883 EXPORT_SYMBOL_GPL(rhashtable_walk_stop
);
885 static size_t rounded_hashtable_size(const struct rhashtable_params
*params
)
887 return max(roundup_pow_of_two(params
->nelem_hint
* 4 / 3),
888 (unsigned long)params
->min_size
);
891 static u32
rhashtable_jhash2(const void *key
, u32 length
, u32 seed
)
893 return jhash2(key
, length
, seed
);
897 * rhashtable_init - initialize a new hash table
898 * @ht: hash table to be initialized
899 * @params: configuration parameters
901 * Initializes a new hash table based on the provided configuration
902 * parameters. A table can be configured either with a variable or
905 * Configuration Example 1: Fixed length keys
909 * struct rhash_head node;
912 * struct rhashtable_params params = {
913 * .head_offset = offsetof(struct test_obj, node),
914 * .key_offset = offsetof(struct test_obj, key),
915 * .key_len = sizeof(int),
917 * .nulls_base = (1U << RHT_BASE_SHIFT),
920 * Configuration Example 2: Variable length keys
923 * struct rhash_head node;
926 * u32 my_hash_fn(const void *data, u32 len, u32 seed)
928 * struct test_obj *obj = data;
930 * return [... hash ...];
933 * struct rhashtable_params params = {
934 * .head_offset = offsetof(struct test_obj, node),
936 * .obj_hashfn = my_hash_fn,
939 int rhashtable_init(struct rhashtable
*ht
,
940 const struct rhashtable_params
*params
)
942 struct bucket_table
*tbl
;
945 size
= HASH_DEFAULT_SIZE
;
947 if ((!params
->key_len
&& !params
->obj_hashfn
) ||
948 (params
->obj_hashfn
&& !params
->obj_cmpfn
))
951 if (params
->nulls_base
&& params
->nulls_base
< (1U << RHT_BASE_SHIFT
))
954 memset(ht
, 0, sizeof(*ht
));
955 mutex_init(&ht
->mutex
);
956 spin_lock_init(&ht
->lock
);
957 memcpy(&ht
->p
, params
, sizeof(*params
));
959 if (params
->min_size
)
960 ht
->p
.min_size
= roundup_pow_of_two(params
->min_size
);
962 if (params
->max_size
)
963 ht
->p
.max_size
= rounddown_pow_of_two(params
->max_size
);
965 if (params
->insecure_max_entries
)
966 ht
->p
.insecure_max_entries
=
967 rounddown_pow_of_two(params
->insecure_max_entries
);
969 ht
->p
.insecure_max_entries
= ht
->p
.max_size
* 2;
971 ht
->p
.min_size
= max(ht
->p
.min_size
, HASH_MIN_SIZE
);
973 if (params
->nelem_hint
)
974 size
= rounded_hashtable_size(&ht
->p
);
976 /* The maximum (not average) chain length grows with the
977 * size of the hash table, at a rate of (log N)/(log log N).
978 * The value of 16 is selected so that even if the hash
979 * table grew to 2^32 you would not expect the maximum
980 * chain length to exceed it unless we are under attack
981 * (or extremely unlucky).
983 * As this limit is only to detect attacks, we don't need
984 * to set it to a lower value as you'd need the chain
985 * length to vastly exceed 16 to have any real effect
988 if (!params
->insecure_elasticity
)
991 if (params
->locks_mul
)
992 ht
->p
.locks_mul
= roundup_pow_of_two(params
->locks_mul
);
994 ht
->p
.locks_mul
= BUCKET_LOCKS_PER_CPU
;
996 ht
->key_len
= ht
->p
.key_len
;
997 if (!params
->hashfn
) {
998 ht
->p
.hashfn
= jhash
;
1000 if (!(ht
->key_len
& (sizeof(u32
) - 1))) {
1001 ht
->key_len
/= sizeof(u32
);
1002 ht
->p
.hashfn
= rhashtable_jhash2
;
1006 tbl
= bucket_table_alloc(ht
, size
, GFP_KERNEL
);
1010 atomic_set(&ht
->nelems
, 0);
1012 RCU_INIT_POINTER(ht
->tbl
, tbl
);
1014 INIT_WORK(&ht
->run_work
, rht_deferred_worker
);
1018 EXPORT_SYMBOL_GPL(rhashtable_init
);
1021 * rhltable_init - initialize a new hash list table
1022 * @hlt: hash list table to be initialized
1023 * @params: configuration parameters
1025 * Initializes a new hash list table.
1027 * See documentation for rhashtable_init.
1029 int rhltable_init(struct rhltable
*hlt
, const struct rhashtable_params
*params
)
1033 /* No rhlist NULLs marking for now. */
1034 if (params
->nulls_base
)
1037 err
= rhashtable_init(&hlt
->ht
, params
);
1038 hlt
->ht
.rhlist
= true;
1041 EXPORT_SYMBOL_GPL(rhltable_init
);
1043 static void rhashtable_free_one(struct rhashtable
*ht
, struct rhash_head
*obj
,
1044 void (*free_fn
)(void *ptr
, void *arg
),
1047 struct rhlist_head
*list
;
1050 free_fn(rht_obj(ht
, obj
), arg
);
1054 list
= container_of(obj
, struct rhlist_head
, rhead
);
1057 list
= rht_dereference(list
->next
, ht
);
1058 free_fn(rht_obj(ht
, obj
), arg
);
1063 * rhashtable_free_and_destroy - free elements and destroy hash table
1064 * @ht: the hash table to destroy
1065 * @free_fn: callback to release resources of element
1066 * @arg: pointer passed to free_fn
1068 * Stops an eventual async resize. If defined, invokes free_fn for each
1069 * element to releasal resources. Please note that RCU protected
1070 * readers may still be accessing the elements. Releasing of resources
1071 * must occur in a compatible manner. Then frees the bucket array.
1073 * This function will eventually sleep to wait for an async resize
1074 * to complete. The caller is responsible that no further write operations
1075 * occurs in parallel.
1077 void rhashtable_free_and_destroy(struct rhashtable
*ht
,
1078 void (*free_fn
)(void *ptr
, void *arg
),
1081 struct bucket_table
*tbl
;
1084 cancel_work_sync(&ht
->run_work
);
1086 mutex_lock(&ht
->mutex
);
1087 tbl
= rht_dereference(ht
->tbl
, ht
);
1089 for (i
= 0; i
< tbl
->size
; i
++) {
1090 struct rhash_head
*pos
, *next
;
1092 for (pos
= rht_dereference(*rht_bucket(tbl
, i
), ht
),
1093 next
= !rht_is_a_nulls(pos
) ?
1094 rht_dereference(pos
->next
, ht
) : NULL
;
1095 !rht_is_a_nulls(pos
);
1097 next
= !rht_is_a_nulls(pos
) ?
1098 rht_dereference(pos
->next
, ht
) : NULL
)
1099 rhashtable_free_one(ht
, pos
, free_fn
, arg
);
1103 bucket_table_free(tbl
);
1104 mutex_unlock(&ht
->mutex
);
1106 EXPORT_SYMBOL_GPL(rhashtable_free_and_destroy
);
1108 void rhashtable_destroy(struct rhashtable
*ht
)
1110 return rhashtable_free_and_destroy(ht
, NULL
, NULL
);
1112 EXPORT_SYMBOL_GPL(rhashtable_destroy
);
1114 struct rhash_head __rcu
**rht_bucket_nested(const struct bucket_table
*tbl
,
1117 const unsigned int shift
= PAGE_SHIFT
- ilog2(sizeof(void *));
1118 static struct rhash_head __rcu
*rhnull
=
1119 (struct rhash_head __rcu
*)NULLS_MARKER(0);
1120 unsigned int index
= hash
& ((1 << tbl
->nest
) - 1);
1121 unsigned int size
= tbl
->size
>> tbl
->nest
;
1122 unsigned int subhash
= hash
;
1123 union nested_table
*ntbl
;
1125 ntbl
= (union nested_table
*)rcu_dereference_raw(tbl
->buckets
[0]);
1126 ntbl
= rht_dereference_bucket(ntbl
[index
].table
, tbl
, hash
);
1127 subhash
>>= tbl
->nest
;
1129 while (ntbl
&& size
> (1 << shift
)) {
1130 index
= subhash
& ((1 << shift
) - 1);
1131 ntbl
= rht_dereference_bucket(ntbl
[index
].table
, tbl
, hash
);
1139 return &ntbl
[subhash
].bucket
;
1142 EXPORT_SYMBOL_GPL(rht_bucket_nested
);
1144 struct rhash_head __rcu
**rht_bucket_nested_insert(struct rhashtable
*ht
,
1145 struct bucket_table
*tbl
,
1148 const unsigned int shift
= PAGE_SHIFT
- ilog2(sizeof(void *));
1149 unsigned int index
= hash
& ((1 << tbl
->nest
) - 1);
1150 unsigned int size
= tbl
->size
>> tbl
->nest
;
1151 union nested_table
*ntbl
;
1152 unsigned int shifted
;
1155 ntbl
= (union nested_table
*)rcu_dereference_raw(tbl
->buckets
[0]);
1158 shifted
= tbl
->nest
;
1159 ntbl
= nested_table_alloc(ht
, &ntbl
[index
].table
,
1160 size
<= (1 << shift
) ? shifted
: 0, nhash
);
1162 while (ntbl
&& size
> (1 << shift
)) {
1163 index
= hash
& ((1 << shift
) - 1);
1166 nhash
|= index
<< shifted
;
1168 ntbl
= nested_table_alloc(ht
, &ntbl
[index
].table
,
1169 size
<= (1 << shift
) ? shifted
: 0,
1176 return &ntbl
[hash
].bucket
;
1179 EXPORT_SYMBOL_GPL(rht_bucket_nested_insert
);