2 * Resizable, Scalable, Concurrent Hash Table
4 * Copyright (c) 2014-2015 Thomas Graf <tgraf@suug.ch>
5 * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net>
7 * Based on the following paper:
8 * https://www.usenix.org/legacy/event/atc11/tech/final_files/Triplett.pdf
10 * Code partially derived from nft_hash
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License version 2 as
14 * published by the Free Software Foundation.
17 #include <linux/kernel.h>
18 #include <linux/init.h>
19 #include <linux/log2.h>
20 #include <linux/slab.h>
21 #include <linux/vmalloc.h>
23 #include <linux/jhash.h>
24 #include <linux/random.h>
25 #include <linux/rhashtable.h>
27 #define HASH_DEFAULT_SIZE 64UL
28 #define HASH_MIN_SIZE 4UL
29 #define BUCKET_LOCKS_PER_CPU 128UL
31 /* Base bits plus 1 bit for nulls marker */
32 #define HASH_RESERVED_SPACE (RHT_BASE_BITS + 1)
39 /* The bucket lock is selected based on the hash and protects mutations
40 * on a group of hash buckets.
42 * A maximum of tbl->size/2 bucket locks is allocated. This ensures that
43 * a single lock always covers both buckets which may both contains
44 * entries which link to the same bucket of the old table during resizing.
45 * This allows to simplify the locking as locking the bucket in both
46 * tables during resize always guarantee protection.
48 * IMPORTANT: When holding the bucket lock of both the old and new table
49 * during expansions and shrinking, the old bucket lock must always be
52 static spinlock_t
*bucket_lock(const struct bucket_table
*tbl
, u32 hash
)
54 return &tbl
->locks
[hash
& tbl
->locks_mask
];
57 static void *rht_obj(const struct rhashtable
*ht
, const struct rhash_head
*he
)
59 return (void *) he
- ht
->p
.head_offset
;
62 static u32
rht_bucket_index(const struct bucket_table
*tbl
, u32 hash
)
64 return hash
& (tbl
->size
- 1);
67 static u32
obj_raw_hashfn(const struct rhashtable
*ht
, const void *ptr
)
71 if (unlikely(!ht
->p
.key_len
))
72 hash
= ht
->p
.obj_hashfn(ptr
, ht
->p
.hash_rnd
);
74 hash
= ht
->p
.hashfn(ptr
+ ht
->p
.key_offset
, ht
->p
.key_len
,
77 return hash
>> HASH_RESERVED_SPACE
;
80 static u32
key_hashfn(struct rhashtable
*ht
, const void *key
, u32 len
)
82 return ht
->p
.hashfn(key
, len
, ht
->p
.hash_rnd
) >> HASH_RESERVED_SPACE
;
85 static u32
head_hashfn(const struct rhashtable
*ht
,
86 const struct bucket_table
*tbl
,
87 const struct rhash_head
*he
)
89 return rht_bucket_index(tbl
, obj_raw_hashfn(ht
, rht_obj(ht
, he
)));
92 #ifdef CONFIG_PROVE_LOCKING
93 static void debug_dump_buckets(const struct rhashtable
*ht
,
94 const struct bucket_table
*tbl
)
96 struct rhash_head
*he
;
99 for (i
= 0; i
< tbl
->size
; i
++) {
100 pr_warn(" [Bucket %d] ", i
);
101 rht_for_each_rcu(he
, tbl
, i
) {
102 hash
= head_hashfn(ht
, tbl
, he
);
103 pr_cont("[hash = %#x, lock = %p] ",
104 hash
, bucket_lock(tbl
, hash
));
111 static void debug_dump_table(struct rhashtable
*ht
,
112 const struct bucket_table
*tbl
,
115 struct bucket_table
*old_tbl
, *future_tbl
;
117 pr_emerg("BUG: lock for hash %#x in table %p not held\n",
121 future_tbl
= rht_dereference_rcu(ht
->future_tbl
, ht
);
122 old_tbl
= rht_dereference_rcu(ht
->tbl
, ht
);
123 if (future_tbl
!= old_tbl
) {
124 pr_warn("Future table %p (size: %zd)\n",
125 future_tbl
, future_tbl
->size
);
126 debug_dump_buckets(ht
, future_tbl
);
129 pr_warn("Table %p (size: %zd)\n", old_tbl
, old_tbl
->size
);
130 debug_dump_buckets(ht
, old_tbl
);
135 #define ASSERT_RHT_MUTEX(HT) BUG_ON(!lockdep_rht_mutex_is_held(HT))
136 #define ASSERT_BUCKET_LOCK(HT, TBL, HASH) \
138 if (unlikely(!lockdep_rht_bucket_is_held(TBL, HASH))) { \
139 debug_dump_table(HT, TBL, HASH); \
144 int lockdep_rht_mutex_is_held(struct rhashtable
*ht
)
146 return (debug_locks
) ? lockdep_is_held(&ht
->mutex
) : 1;
148 EXPORT_SYMBOL_GPL(lockdep_rht_mutex_is_held
);
150 int lockdep_rht_bucket_is_held(const struct bucket_table
*tbl
, u32 hash
)
152 spinlock_t
*lock
= bucket_lock(tbl
, hash
);
154 return (debug_locks
) ? lockdep_is_held(lock
) : 1;
156 EXPORT_SYMBOL_GPL(lockdep_rht_bucket_is_held
);
158 #define ASSERT_RHT_MUTEX(HT)
159 #define ASSERT_BUCKET_LOCK(HT, TBL, HASH)
163 static struct rhash_head __rcu
**bucket_tail(struct bucket_table
*tbl
, u32 n
)
165 struct rhash_head __rcu
**pprev
;
167 for (pprev
= &tbl
->buckets
[n
];
168 !rht_is_a_nulls(rht_dereference_bucket(*pprev
, tbl
, n
));
169 pprev
= &rht_dereference_bucket(*pprev
, tbl
, n
)->next
)
175 static int alloc_bucket_locks(struct rhashtable
*ht
, struct bucket_table
*tbl
)
177 unsigned int i
, size
;
178 #if defined(CONFIG_PROVE_LOCKING)
179 unsigned int nr_pcpus
= 2;
181 unsigned int nr_pcpus
= num_possible_cpus();
184 nr_pcpus
= min_t(unsigned int, nr_pcpus
, 32UL);
185 size
= roundup_pow_of_two(nr_pcpus
* ht
->p
.locks_mul
);
187 /* Never allocate more than 0.5 locks per bucket */
188 size
= min_t(unsigned int, size
, tbl
->size
>> 1);
190 if (sizeof(spinlock_t
) != 0) {
192 if (size
* sizeof(spinlock_t
) > PAGE_SIZE
)
193 tbl
->locks
= vmalloc(size
* sizeof(spinlock_t
));
196 tbl
->locks
= kmalloc_array(size
, sizeof(spinlock_t
),
200 for (i
= 0; i
< size
; i
++)
201 spin_lock_init(&tbl
->locks
[i
]);
203 tbl
->locks_mask
= size
- 1;
208 static void bucket_table_free(const struct bucket_table
*tbl
)
216 static struct bucket_table
*bucket_table_alloc(struct rhashtable
*ht
,
219 struct bucket_table
*tbl
;
223 size
= sizeof(*tbl
) + nbuckets
* sizeof(tbl
->buckets
[0]);
224 tbl
= kzalloc(size
, GFP_KERNEL
| __GFP_NOWARN
);
231 tbl
->size
= nbuckets
;
233 if (alloc_bucket_locks(ht
, tbl
) < 0) {
234 bucket_table_free(tbl
);
238 for (i
= 0; i
< nbuckets
; i
++)
239 INIT_RHT_NULLS_HEAD(tbl
->buckets
[i
], ht
, i
);
245 * rht_grow_above_75 - returns true if nelems > 0.75 * table-size
247 * @new_size: new table size
249 bool rht_grow_above_75(const struct rhashtable
*ht
, size_t new_size
)
251 /* Expand table when exceeding 75% load */
252 return atomic_read(&ht
->nelems
) > (new_size
/ 4 * 3) &&
253 (ht
->p
.max_shift
&& atomic_read(&ht
->shift
) < ht
->p
.max_shift
);
255 EXPORT_SYMBOL_GPL(rht_grow_above_75
);
258 * rht_shrink_below_30 - returns true if nelems < 0.3 * table-size
260 * @new_size: new table size
262 bool rht_shrink_below_30(const struct rhashtable
*ht
, size_t new_size
)
264 /* Shrink table beneath 30% load */
265 return atomic_read(&ht
->nelems
) < (new_size
* 3 / 10) &&
266 (atomic_read(&ht
->shift
) > ht
->p
.min_shift
);
268 EXPORT_SYMBOL_GPL(rht_shrink_below_30
);
270 static void lock_buckets(struct bucket_table
*new_tbl
,
271 struct bucket_table
*old_tbl
, unsigned int hash
)
272 __acquires(old_bucket_lock
)
274 spin_lock_bh(bucket_lock(old_tbl
, hash
));
275 if (new_tbl
!= old_tbl
)
276 spin_lock_bh_nested(bucket_lock(new_tbl
, hash
),
280 static void unlock_buckets(struct bucket_table
*new_tbl
,
281 struct bucket_table
*old_tbl
, unsigned int hash
)
282 __releases(old_bucket_lock
)
284 if (new_tbl
!= old_tbl
)
285 spin_unlock_bh(bucket_lock(new_tbl
, hash
));
286 spin_unlock_bh(bucket_lock(old_tbl
, hash
));
290 * Unlink entries on bucket which hash to different bucket.
292 * Returns true if no more work needs to be performed on the bucket.
294 static bool hashtable_chain_unzip(struct rhashtable
*ht
,
295 const struct bucket_table
*new_tbl
,
296 struct bucket_table
*old_tbl
,
299 struct rhash_head
*he
, *p
, *next
;
300 unsigned int new_hash
, new_hash2
;
302 ASSERT_BUCKET_LOCK(ht
, old_tbl
, old_hash
);
304 /* Old bucket empty, no work needed. */
305 p
= rht_dereference_bucket(old_tbl
->buckets
[old_hash
], old_tbl
,
307 if (rht_is_a_nulls(p
))
310 new_hash
= head_hashfn(ht
, new_tbl
, p
);
311 ASSERT_BUCKET_LOCK(ht
, new_tbl
, new_hash
);
313 /* Advance the old bucket pointer one or more times until it
314 * reaches a node that doesn't hash to the same bucket as the
315 * previous node p. Call the previous node p;
317 rht_for_each_continue(he
, p
->next
, old_tbl
, old_hash
) {
318 new_hash2
= head_hashfn(ht
, new_tbl
, he
);
319 ASSERT_BUCKET_LOCK(ht
, new_tbl
, new_hash2
);
321 if (new_hash
!= new_hash2
)
325 rcu_assign_pointer(old_tbl
->buckets
[old_hash
], p
->next
);
327 /* Find the subsequent node which does hash to the same
328 * bucket as node P, or NULL if no such node exists.
330 INIT_RHT_NULLS_HEAD(next
, ht
, old_hash
);
331 if (!rht_is_a_nulls(he
)) {
332 rht_for_each_continue(he
, he
->next
, old_tbl
, old_hash
) {
333 if (head_hashfn(ht
, new_tbl
, he
) == new_hash
) {
340 /* Set p's next pointer to that subsequent node pointer,
341 * bypassing the nodes which do not hash to p's bucket
343 rcu_assign_pointer(p
->next
, next
);
345 p
= rht_dereference_bucket(old_tbl
->buckets
[old_hash
], old_tbl
,
348 return !rht_is_a_nulls(p
);
351 static void link_old_to_new(struct bucket_table
*new_tbl
,
352 unsigned int new_hash
, struct rhash_head
*entry
)
354 rcu_assign_pointer(*bucket_tail(new_tbl
, new_hash
), entry
);
358 * rhashtable_expand - Expand hash table while allowing concurrent lookups
359 * @ht: the hash table to expand
361 * A secondary bucket array is allocated and the hash entries are migrated
362 * while keeping them on both lists until the end of the RCU grace period.
364 * This function may only be called in a context where it is safe to call
365 * synchronize_rcu(), e.g. not within a rcu_read_lock() section.
367 * The caller must ensure that no concurrent resizing occurs by holding
370 * It is valid to have concurrent insertions and deletions protected by per
371 * bucket locks or concurrent RCU protected lookups and traversals.
373 int rhashtable_expand(struct rhashtable
*ht
)
375 struct bucket_table
*new_tbl
, *old_tbl
= rht_dereference(ht
->tbl
, ht
);
376 struct rhash_head
*he
;
377 unsigned int new_hash
, old_hash
;
378 bool complete
= false;
380 ASSERT_RHT_MUTEX(ht
);
382 new_tbl
= bucket_table_alloc(ht
, old_tbl
->size
* 2);
386 atomic_inc(&ht
->shift
);
388 /* Make insertions go into the new, empty table right away. Deletions
389 * and lookups will be attempted in both tables until we synchronize.
390 * The synchronize_rcu() guarantees for the new table to be picked up
391 * so no new additions go into the old table while we relink.
393 rcu_assign_pointer(ht
->future_tbl
, new_tbl
);
396 /* For each new bucket, search the corresponding old bucket for the
397 * first entry that hashes to the new bucket, and link the end of
398 * newly formed bucket chain (containing entries added to future
399 * table) to that entry. Since all the entries which will end up in
400 * the new bucket appear in the same old bucket, this constructs an
401 * entirely valid new hash table, but with multiple buckets
402 * "zipped" together into a single imprecise chain.
404 for (new_hash
= 0; new_hash
< new_tbl
->size
; new_hash
++) {
405 old_hash
= rht_bucket_index(old_tbl
, new_hash
);
406 lock_buckets(new_tbl
, old_tbl
, new_hash
);
407 rht_for_each(he
, old_tbl
, old_hash
) {
408 if (head_hashfn(ht
, new_tbl
, he
) == new_hash
) {
409 link_old_to_new(new_tbl
, new_hash
, he
);
413 unlock_buckets(new_tbl
, old_tbl
, new_hash
);
416 /* Publish the new table pointer. Lookups may now traverse
417 * the new table, but they will not benefit from any
418 * additional efficiency until later steps unzip the buckets.
420 rcu_assign_pointer(ht
->tbl
, new_tbl
);
422 /* Unzip interleaved hash chains */
423 while (!complete
&& !ht
->being_destroyed
) {
424 /* Wait for readers. All new readers will see the new
425 * table, and thus no references to the old table will
430 /* For each bucket in the old table (each of which
431 * contains items from multiple buckets of the new
435 for (old_hash
= 0; old_hash
< old_tbl
->size
; old_hash
++) {
436 lock_buckets(new_tbl
, old_tbl
, old_hash
);
438 if (hashtable_chain_unzip(ht
, new_tbl
, old_tbl
,
442 unlock_buckets(new_tbl
, old_tbl
, old_hash
);
448 bucket_table_free(old_tbl
);
451 EXPORT_SYMBOL_GPL(rhashtable_expand
);
454 * rhashtable_shrink - Shrink hash table while allowing concurrent lookups
455 * @ht: the hash table to shrink
457 * This function may only be called in a context where it is safe to call
458 * synchronize_rcu(), e.g. not within a rcu_read_lock() section.
460 * The caller must ensure that no concurrent resizing occurs by holding
463 * The caller must ensure that no concurrent table mutations take place.
464 * It is however valid to have concurrent lookups if they are RCU protected.
466 * It is valid to have concurrent insertions and deletions protected by per
467 * bucket locks or concurrent RCU protected lookups and traversals.
469 int rhashtable_shrink(struct rhashtable
*ht
)
471 struct bucket_table
*new_tbl
, *tbl
= rht_dereference(ht
->tbl
, ht
);
472 unsigned int new_hash
;
474 ASSERT_RHT_MUTEX(ht
);
476 new_tbl
= bucket_table_alloc(ht
, tbl
->size
/ 2);
480 rcu_assign_pointer(ht
->future_tbl
, new_tbl
);
483 /* Link the first entry in the old bucket to the end of the
484 * bucket in the new table. As entries are concurrently being
485 * added to the new table, lock down the new bucket. As we
486 * always divide the size in half when shrinking, each bucket
487 * in the new table maps to exactly two buckets in the old
490 for (new_hash
= 0; new_hash
< new_tbl
->size
; new_hash
++) {
491 lock_buckets(new_tbl
, tbl
, new_hash
);
493 rcu_assign_pointer(*bucket_tail(new_tbl
, new_hash
),
494 tbl
->buckets
[new_hash
]);
495 rcu_assign_pointer(*bucket_tail(new_tbl
, new_hash
),
496 tbl
->buckets
[new_hash
+ new_tbl
->size
]);
498 unlock_buckets(new_tbl
, tbl
, new_hash
);
501 /* Publish the new, valid hash table */
502 rcu_assign_pointer(ht
->tbl
, new_tbl
);
503 atomic_dec(&ht
->shift
);
505 /* Wait for readers. No new readers will have references to the
510 bucket_table_free(tbl
);
514 EXPORT_SYMBOL_GPL(rhashtable_shrink
);
516 static void rht_deferred_worker(struct work_struct
*work
)
518 struct rhashtable
*ht
;
519 struct bucket_table
*tbl
;
520 struct rhashtable_walker
*walker
;
522 ht
= container_of(work
, struct rhashtable
, run_work
);
523 mutex_lock(&ht
->mutex
);
524 if (ht
->being_destroyed
)
527 tbl
= rht_dereference(ht
->tbl
, ht
);
529 list_for_each_entry(walker
, &ht
->walkers
, list
)
530 walker
->resize
= true;
532 if (ht
->p
.grow_decision
&& ht
->p
.grow_decision(ht
, tbl
->size
))
533 rhashtable_expand(ht
);
534 else if (ht
->p
.shrink_decision
&& ht
->p
.shrink_decision(ht
, tbl
->size
))
535 rhashtable_shrink(ht
);
538 mutex_unlock(&ht
->mutex
);
541 static void rhashtable_wakeup_worker(struct rhashtable
*ht
)
543 struct bucket_table
*tbl
= rht_dereference_rcu(ht
->tbl
, ht
);
544 struct bucket_table
*new_tbl
= rht_dereference_rcu(ht
->future_tbl
, ht
);
545 size_t size
= tbl
->size
;
547 /* Only adjust the table if no resizing is currently in progress. */
548 if (tbl
== new_tbl
&&
549 ((ht
->p
.grow_decision
&& ht
->p
.grow_decision(ht
, size
)) ||
550 (ht
->p
.shrink_decision
&& ht
->p
.shrink_decision(ht
, size
))))
551 schedule_work(&ht
->run_work
);
554 static void __rhashtable_insert(struct rhashtable
*ht
, struct rhash_head
*obj
,
555 struct bucket_table
*tbl
, u32 hash
)
557 struct rhash_head
*head
= rht_dereference_bucket(tbl
->buckets
[hash
],
560 if (rht_is_a_nulls(head
))
561 INIT_RHT_NULLS_HEAD(obj
->next
, ht
, hash
);
563 RCU_INIT_POINTER(obj
->next
, head
);
565 rcu_assign_pointer(tbl
->buckets
[hash
], obj
);
567 atomic_inc(&ht
->nelems
);
569 rhashtable_wakeup_worker(ht
);
573 * rhashtable_insert - insert object into hash table
575 * @obj: pointer to hash head inside object
577 * Will take a per bucket spinlock to protect against mutual mutations
578 * on the same bucket. Multiple insertions may occur in parallel unless
579 * they map to the same bucket lock.
581 * It is safe to call this function from atomic context.
583 * Will trigger an automatic deferred table resizing if the size grows
584 * beyond the watermark indicated by grow_decision() which can be passed
585 * to rhashtable_init().
587 void rhashtable_insert(struct rhashtable
*ht
, struct rhash_head
*obj
)
589 struct bucket_table
*tbl
, *old_tbl
;
594 tbl
= rht_dereference_rcu(ht
->future_tbl
, ht
);
595 old_tbl
= rht_dereference_rcu(ht
->tbl
, ht
);
596 hash
= head_hashfn(ht
, tbl
, obj
);
598 lock_buckets(tbl
, old_tbl
, hash
);
599 __rhashtable_insert(ht
, obj
, tbl
, hash
);
600 unlock_buckets(tbl
, old_tbl
, hash
);
604 EXPORT_SYMBOL_GPL(rhashtable_insert
);
607 * rhashtable_remove - remove object from hash table
609 * @obj: pointer to hash head inside object
611 * Since the hash chain is single linked, the removal operation needs to
612 * walk the bucket chain upon removal. The removal operation is thus
613 * considerable slow if the hash table is not correctly sized.
615 * Will automatically shrink the table via rhashtable_expand() if the
616 * shrink_decision function specified at rhashtable_init() returns true.
618 * The caller must ensure that no concurrent table mutations occur. It is
619 * however valid to have concurrent lookups if they are RCU protected.
621 bool rhashtable_remove(struct rhashtable
*ht
, struct rhash_head
*obj
)
623 struct bucket_table
*tbl
, *new_tbl
, *old_tbl
;
624 struct rhash_head __rcu
**pprev
;
625 struct rhash_head
*he
;
626 unsigned int hash
, new_hash
;
630 tbl
= old_tbl
= rht_dereference_rcu(ht
->tbl
, ht
);
631 new_tbl
= rht_dereference_rcu(ht
->future_tbl
, ht
);
632 new_hash
= head_hashfn(ht
, new_tbl
, obj
);
634 lock_buckets(new_tbl
, old_tbl
, new_hash
);
636 hash
= rht_bucket_index(tbl
, new_hash
);
637 pprev
= &tbl
->buckets
[hash
];
638 rht_for_each(he
, tbl
, hash
) {
644 rcu_assign_pointer(*pprev
, obj
->next
);
650 /* The entry may be linked in either 'tbl', 'future_tbl', or both.
651 * 'future_tbl' only exists for a short period of time during
652 * resizing. Thus traversing both is fine and the added cost is
655 if (tbl
!= new_tbl
) {
660 unlock_buckets(new_tbl
, old_tbl
, new_hash
);
663 atomic_dec(&ht
->nelems
);
664 rhashtable_wakeup_worker(ht
);
671 EXPORT_SYMBOL_GPL(rhashtable_remove
);
673 struct rhashtable_compare_arg
{
674 struct rhashtable
*ht
;
678 static bool rhashtable_compare(void *ptr
, void *arg
)
680 struct rhashtable_compare_arg
*x
= arg
;
681 struct rhashtable
*ht
= x
->ht
;
683 return !memcmp(ptr
+ ht
->p
.key_offset
, x
->key
, ht
->p
.key_len
);
687 * rhashtable_lookup - lookup key in hash table
689 * @key: pointer to key
691 * Computes the hash value for the key and traverses the bucket chain looking
692 * for a entry with an identical key. The first matching entry is returned.
694 * This lookup function may only be used for fixed key hash table (key_len
695 * parameter set). It will BUG() if used inappropriately.
697 * Lookups may occur in parallel with hashtable mutations and resizing.
699 void *rhashtable_lookup(struct rhashtable
*ht
, const void *key
)
701 struct rhashtable_compare_arg arg
= {
706 BUG_ON(!ht
->p
.key_len
);
708 return rhashtable_lookup_compare(ht
, key
, &rhashtable_compare
, &arg
);
710 EXPORT_SYMBOL_GPL(rhashtable_lookup
);
713 * rhashtable_lookup_compare - search hash table with compare function
715 * @key: the pointer to the key
716 * @compare: compare function, must return true on match
717 * @arg: argument passed on to compare function
719 * Traverses the bucket chain behind the provided hash value and calls the
720 * specified compare function for each entry.
722 * Lookups may occur in parallel with hashtable mutations and resizing.
724 * Returns the first entry on which the compare function returned true.
726 void *rhashtable_lookup_compare(struct rhashtable
*ht
, const void *key
,
727 bool (*compare
)(void *, void *), void *arg
)
729 const struct bucket_table
*tbl
, *old_tbl
;
730 struct rhash_head
*he
;
735 old_tbl
= rht_dereference_rcu(ht
->tbl
, ht
);
736 tbl
= rht_dereference_rcu(ht
->future_tbl
, ht
);
737 hash
= key_hashfn(ht
, key
, ht
->p
.key_len
);
739 rht_for_each_rcu(he
, tbl
, rht_bucket_index(tbl
, hash
)) {
740 if (!compare(rht_obj(ht
, he
), arg
))
743 return rht_obj(ht
, he
);
746 if (unlikely(tbl
!= old_tbl
)) {
754 EXPORT_SYMBOL_GPL(rhashtable_lookup_compare
);
757 * rhashtable_lookup_insert - lookup and insert object into hash table
759 * @obj: pointer to hash head inside object
761 * Locks down the bucket chain in both the old and new table if a resize
762 * is in progress to ensure that writers can't remove from the old table
763 * and can't insert to the new table during the atomic operation of search
764 * and insertion. Searches for duplicates in both the old and new table if
765 * a resize is in progress.
767 * This lookup function may only be used for fixed key hash table (key_len
768 * parameter set). It will BUG() if used inappropriately.
770 * It is safe to call this function from atomic context.
772 * Will trigger an automatic deferred table resizing if the size grows
773 * beyond the watermark indicated by grow_decision() which can be passed
774 * to rhashtable_init().
776 bool rhashtable_lookup_insert(struct rhashtable
*ht
, struct rhash_head
*obj
)
778 struct rhashtable_compare_arg arg
= {
780 .key
= rht_obj(ht
, obj
) + ht
->p
.key_offset
,
783 BUG_ON(!ht
->p
.key_len
);
785 return rhashtable_lookup_compare_insert(ht
, obj
, &rhashtable_compare
,
788 EXPORT_SYMBOL_GPL(rhashtable_lookup_insert
);
791 * rhashtable_lookup_compare_insert - search and insert object to hash table
792 * with compare function
794 * @obj: pointer to hash head inside object
795 * @compare: compare function, must return true on match
796 * @arg: argument passed on to compare function
798 * Locks down the bucket chain in both the old and new table if a resize
799 * is in progress to ensure that writers can't remove from the old table
800 * and can't insert to the new table during the atomic operation of search
801 * and insertion. Searches for duplicates in both the old and new table if
802 * a resize is in progress.
804 * Lookups may occur in parallel with hashtable mutations and resizing.
806 * Will trigger an automatic deferred table resizing if the size grows
807 * beyond the watermark indicated by grow_decision() which can be passed
808 * to rhashtable_init().
810 bool rhashtable_lookup_compare_insert(struct rhashtable
*ht
,
811 struct rhash_head
*obj
,
812 bool (*compare
)(void *, void *),
815 struct bucket_table
*new_tbl
, *old_tbl
;
819 BUG_ON(!ht
->p
.key_len
);
822 old_tbl
= rht_dereference_rcu(ht
->tbl
, ht
);
823 new_tbl
= rht_dereference_rcu(ht
->future_tbl
, ht
);
824 new_hash
= head_hashfn(ht
, new_tbl
, obj
);
826 lock_buckets(new_tbl
, old_tbl
, new_hash
);
828 if (rhashtable_lookup_compare(ht
, rht_obj(ht
, obj
) + ht
->p
.key_offset
,
834 __rhashtable_insert(ht
, obj
, new_tbl
, new_hash
);
837 unlock_buckets(new_tbl
, old_tbl
, new_hash
);
842 EXPORT_SYMBOL_GPL(rhashtable_lookup_compare_insert
);
845 * rhashtable_walk_init - Initialise an iterator
846 * @ht: Table to walk over
847 * @iter: Hash table Iterator
849 * This function prepares a hash table walk.
851 * Note that if you restart a walk after rhashtable_walk_stop you
852 * may see the same object twice. Also, you may miss objects if
853 * there are removals in between rhashtable_walk_stop and the next
854 * call to rhashtable_walk_start.
856 * For a completely stable walk you should construct your own data
857 * structure outside the hash table.
859 * This function may sleep so you must not call it from interrupt
860 * context or with spin locks held.
862 * You must call rhashtable_walk_exit if this function returns
865 int rhashtable_walk_init(struct rhashtable
*ht
, struct rhashtable_iter
*iter
)
872 iter
->walker
= kmalloc(sizeof(*iter
->walker
), GFP_KERNEL
);
876 mutex_lock(&ht
->mutex
);
877 list_add(&iter
->walker
->list
, &ht
->walkers
);
878 mutex_unlock(&ht
->mutex
);
882 EXPORT_SYMBOL_GPL(rhashtable_walk_init
);
885 * rhashtable_walk_exit - Free an iterator
886 * @iter: Hash table Iterator
888 * This function frees resources allocated by rhashtable_walk_init.
890 void rhashtable_walk_exit(struct rhashtable_iter
*iter
)
892 mutex_lock(&iter
->ht
->mutex
);
893 list_del(&iter
->walker
->list
);
894 mutex_unlock(&iter
->ht
->mutex
);
897 EXPORT_SYMBOL_GPL(rhashtable_walk_exit
);
900 * rhashtable_walk_start - Start a hash table walk
901 * @iter: Hash table iterator
903 * Start a hash table walk. Note that we take the RCU lock in all
904 * cases including when we return an error. So you must always call
905 * rhashtable_walk_stop to clean up.
907 * Returns zero if successful.
909 * Returns -EAGAIN if resize event occured. Note that the iterator
910 * will rewind back to the beginning and you may use it immediately
911 * by calling rhashtable_walk_next.
913 int rhashtable_walk_start(struct rhashtable_iter
*iter
)
917 if (iter
->walker
->resize
) {
920 iter
->walker
->resize
= false;
926 EXPORT_SYMBOL_GPL(rhashtable_walk_start
);
929 * rhashtable_walk_next - Return the next object and advance the iterator
930 * @iter: Hash table iterator
932 * Note that you must call rhashtable_walk_stop when you are finished
935 * Returns the next object or NULL when the end of the table is reached.
937 * Returns -EAGAIN if resize event occured. Note that the iterator
938 * will rewind back to the beginning and you may continue to use it.
940 void *rhashtable_walk_next(struct rhashtable_iter
*iter
)
942 const struct bucket_table
*tbl
;
943 struct rhashtable
*ht
= iter
->ht
;
944 struct rhash_head
*p
= iter
->p
;
947 tbl
= rht_dereference_rcu(ht
->tbl
, ht
);
950 p
= rht_dereference_bucket_rcu(p
->next
, tbl
, iter
->slot
);
954 for (; iter
->slot
< tbl
->size
; iter
->slot
++) {
955 int skip
= iter
->skip
;
957 rht_for_each_rcu(p
, tbl
, iter
->slot
) {
964 if (!rht_is_a_nulls(p
)) {
967 obj
= rht_obj(ht
, p
);
977 if (iter
->walker
->resize
) {
981 iter
->walker
->resize
= false;
982 return ERR_PTR(-EAGAIN
);
987 EXPORT_SYMBOL_GPL(rhashtable_walk_next
);
990 * rhashtable_walk_stop - Finish a hash table walk
991 * @iter: Hash table iterator
993 * Finish a hash table walk.
995 void rhashtable_walk_stop(struct rhashtable_iter
*iter
)
1000 EXPORT_SYMBOL_GPL(rhashtable_walk_stop
);
1002 static size_t rounded_hashtable_size(struct rhashtable_params
*params
)
1004 return max(roundup_pow_of_two(params
->nelem_hint
* 4 / 3),
1005 1UL << params
->min_shift
);
1009 * rhashtable_init - initialize a new hash table
1010 * @ht: hash table to be initialized
1011 * @params: configuration parameters
1013 * Initializes a new hash table based on the provided configuration
1014 * parameters. A table can be configured either with a variable or
1017 * Configuration Example 1: Fixed length keys
1021 * struct rhash_head node;
1024 * struct rhashtable_params params = {
1025 * .head_offset = offsetof(struct test_obj, node),
1026 * .key_offset = offsetof(struct test_obj, key),
1027 * .key_len = sizeof(int),
1029 * .nulls_base = (1U << RHT_BASE_SHIFT),
1032 * Configuration Example 2: Variable length keys
1035 * struct rhash_head node;
1038 * u32 my_hash_fn(const void *data, u32 seed)
1040 * struct test_obj *obj = data;
1042 * return [... hash ...];
1045 * struct rhashtable_params params = {
1046 * .head_offset = offsetof(struct test_obj, node),
1048 * .obj_hashfn = my_hash_fn,
1051 int rhashtable_init(struct rhashtable
*ht
, struct rhashtable_params
*params
)
1053 struct bucket_table
*tbl
;
1056 size
= HASH_DEFAULT_SIZE
;
1058 if ((params
->key_len
&& !params
->hashfn
) ||
1059 (!params
->key_len
&& !params
->obj_hashfn
))
1062 if (params
->nulls_base
&& params
->nulls_base
< (1U << RHT_BASE_SHIFT
))
1065 params
->min_shift
= max_t(size_t, params
->min_shift
,
1066 ilog2(HASH_MIN_SIZE
));
1068 if (params
->nelem_hint
)
1069 size
= rounded_hashtable_size(params
);
1071 memset(ht
, 0, sizeof(*ht
));
1072 mutex_init(&ht
->mutex
);
1073 memcpy(&ht
->p
, params
, sizeof(*params
));
1074 INIT_LIST_HEAD(&ht
->walkers
);
1076 if (params
->locks_mul
)
1077 ht
->p
.locks_mul
= roundup_pow_of_two(params
->locks_mul
);
1079 ht
->p
.locks_mul
= BUCKET_LOCKS_PER_CPU
;
1081 tbl
= bucket_table_alloc(ht
, size
);
1085 atomic_set(&ht
->nelems
, 0);
1086 atomic_set(&ht
->shift
, ilog2(tbl
->size
));
1087 RCU_INIT_POINTER(ht
->tbl
, tbl
);
1088 RCU_INIT_POINTER(ht
->future_tbl
, tbl
);
1090 if (!ht
->p
.hash_rnd
)
1091 get_random_bytes(&ht
->p
.hash_rnd
, sizeof(ht
->p
.hash_rnd
));
1093 if (ht
->p
.grow_decision
|| ht
->p
.shrink_decision
)
1094 INIT_WORK(&ht
->run_work
, rht_deferred_worker
);
1098 EXPORT_SYMBOL_GPL(rhashtable_init
);
1101 * rhashtable_destroy - destroy hash table
1102 * @ht: the hash table to destroy
1104 * Frees the bucket array. This function is not rcu safe, therefore the caller
1105 * has to make sure that no resizing may happen by unpublishing the hashtable
1106 * and waiting for the quiescent cycle before releasing the bucket array.
1108 void rhashtable_destroy(struct rhashtable
*ht
)
1110 ht
->being_destroyed
= true;
1112 if (ht
->p
.grow_decision
|| ht
->p
.shrink_decision
)
1113 cancel_work_sync(&ht
->run_work
);
1115 mutex_lock(&ht
->mutex
);
1116 bucket_table_free(rht_dereference(ht
->tbl
, ht
));
1117 mutex_unlock(&ht
->mutex
);
1119 EXPORT_SYMBOL_GPL(rhashtable_destroy
);