2 * Resizable, Scalable, Concurrent Hash Table
4 * Copyright (c) 2014 Thomas Graf <tgraf@suug.ch>
5 * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net>
7 * Based on the following paper:
8 * https://www.usenix.org/legacy/event/atc11/tech/final_files/Triplett.pdf
10 * Code partially derived from nft_hash
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License version 2 as
14 * published by the Free Software Foundation.
17 #include <linux/kernel.h>
18 #include <linux/init.h>
19 #include <linux/log2.h>
20 #include <linux/slab.h>
21 #include <linux/vmalloc.h>
23 #include <linux/jhash.h>
24 #include <linux/random.h>
25 #include <linux/rhashtable.h>
27 #define HASH_DEFAULT_SIZE 64UL
28 #define HASH_MIN_SIZE 4UL
29 #define BUCKET_LOCKS_PER_CPU 128UL
31 /* Base bits plus 1 bit for nulls marker */
32 #define HASH_RESERVED_SPACE (RHT_BASE_BITS + 1)
40 /* The bucket lock is selected based on the hash and protects mutations
41 * on a group of hash buckets.
43 * IMPORTANT: When holding the bucket lock of both the old and new table
44 * during expansions and shrinking, the old bucket lock must always be
47 static spinlock_t
*bucket_lock(const struct bucket_table
*tbl
, u32 hash
)
49 return &tbl
->locks
[hash
& tbl
->locks_mask
];
52 #define ASSERT_RHT_MUTEX(HT) BUG_ON(!lockdep_rht_mutex_is_held(HT))
53 #define ASSERT_BUCKET_LOCK(TBL, HASH) \
54 BUG_ON(!lockdep_rht_bucket_is_held(TBL, HASH))
56 #ifdef CONFIG_PROVE_LOCKING
57 int lockdep_rht_mutex_is_held(struct rhashtable
*ht
)
59 return (debug_locks
) ? lockdep_is_held(&ht
->mutex
) : 1;
61 EXPORT_SYMBOL_GPL(lockdep_rht_mutex_is_held
);
63 int lockdep_rht_bucket_is_held(const struct bucket_table
*tbl
, u32 hash
)
65 spinlock_t
*lock
= bucket_lock(tbl
, hash
);
67 return (debug_locks
) ? lockdep_is_held(lock
) : 1;
69 EXPORT_SYMBOL_GPL(lockdep_rht_bucket_is_held
);
72 static void *rht_obj(const struct rhashtable
*ht
, const struct rhash_head
*he
)
74 return (void *) he
- ht
->p
.head_offset
;
77 static u32
rht_bucket_index(const struct bucket_table
*tbl
, u32 hash
)
79 return hash
& (tbl
->size
- 1);
82 static u32
obj_raw_hashfn(const struct rhashtable
*ht
, const void *ptr
)
86 if (unlikely(!ht
->p
.key_len
))
87 hash
= ht
->p
.obj_hashfn(ptr
, ht
->p
.hash_rnd
);
89 hash
= ht
->p
.hashfn(ptr
+ ht
->p
.key_offset
, ht
->p
.key_len
,
92 return hash
>> HASH_RESERVED_SPACE
;
95 static u32
key_hashfn(struct rhashtable
*ht
, const void *key
, u32 len
)
97 struct bucket_table
*tbl
= rht_dereference_rcu(ht
->tbl
, ht
);
100 hash
= ht
->p
.hashfn(key
, len
, ht
->p
.hash_rnd
);
101 hash
>>= HASH_RESERVED_SPACE
;
103 return rht_bucket_index(tbl
, hash
);
106 static u32
head_hashfn(const struct rhashtable
*ht
,
107 const struct bucket_table
*tbl
,
108 const struct rhash_head
*he
)
110 return rht_bucket_index(tbl
, obj_raw_hashfn(ht
, rht_obj(ht
, he
)));
113 static struct rhash_head __rcu
**bucket_tail(struct bucket_table
*tbl
, u32 n
)
115 struct rhash_head __rcu
**pprev
;
117 for (pprev
= &tbl
->buckets
[n
];
118 !rht_is_a_nulls(rht_dereference_bucket(*pprev
, tbl
, n
));
119 pprev
= &rht_dereference_bucket(*pprev
, tbl
, n
)->next
)
125 static int alloc_bucket_locks(struct rhashtable
*ht
, struct bucket_table
*tbl
)
127 unsigned int i
, size
;
128 #if defined(CONFIG_PROVE_LOCKING)
129 unsigned int nr_pcpus
= 2;
131 unsigned int nr_pcpus
= num_possible_cpus();
134 nr_pcpus
= min_t(unsigned int, nr_pcpus
, 32UL);
135 size
= roundup_pow_of_two(nr_pcpus
* ht
->p
.locks_mul
);
137 /* Never allocate more than one lock per bucket */
138 size
= min_t(unsigned int, size
, tbl
->size
);
140 if (sizeof(spinlock_t
) != 0) {
142 if (size
* sizeof(spinlock_t
) > PAGE_SIZE
)
143 tbl
->locks
= vmalloc(size
* sizeof(spinlock_t
));
146 tbl
->locks
= kmalloc_array(size
, sizeof(spinlock_t
),
150 for (i
= 0; i
< size
; i
++)
151 spin_lock_init(&tbl
->locks
[i
]);
153 tbl
->locks_mask
= size
- 1;
158 static void bucket_table_free(const struct bucket_table
*tbl
)
166 static struct bucket_table
*bucket_table_alloc(struct rhashtable
*ht
,
169 struct bucket_table
*tbl
;
173 size
= sizeof(*tbl
) + nbuckets
* sizeof(tbl
->buckets
[0]);
174 tbl
= kzalloc(size
, GFP_KERNEL
| __GFP_NOWARN
);
181 tbl
->size
= nbuckets
;
183 if (alloc_bucket_locks(ht
, tbl
) < 0) {
184 bucket_table_free(tbl
);
188 for (i
= 0; i
< nbuckets
; i
++)
189 INIT_RHT_NULLS_HEAD(tbl
->buckets
[i
], ht
, i
);
195 * rht_grow_above_75 - returns true if nelems > 0.75 * table-size
197 * @new_size: new table size
199 bool rht_grow_above_75(const struct rhashtable
*ht
, size_t new_size
)
201 /* Expand table when exceeding 75% load */
202 return atomic_read(&ht
->nelems
) > (new_size
/ 4 * 3) &&
203 (ht
->p
.max_shift
&& atomic_read(&ht
->shift
) < ht
->p
.max_shift
);
205 EXPORT_SYMBOL_GPL(rht_grow_above_75
);
208 * rht_shrink_below_30 - returns true if nelems < 0.3 * table-size
210 * @new_size: new table size
212 bool rht_shrink_below_30(const struct rhashtable
*ht
, size_t new_size
)
214 /* Shrink table beneath 30% load */
215 return atomic_read(&ht
->nelems
) < (new_size
* 3 / 10) &&
216 (atomic_read(&ht
->shift
) > ht
->p
.min_shift
);
218 EXPORT_SYMBOL_GPL(rht_shrink_below_30
);
220 static void hashtable_chain_unzip(const struct rhashtable
*ht
,
221 const struct bucket_table
*new_tbl
,
222 struct bucket_table
*old_tbl
,
225 struct rhash_head
*he
, *p
, *next
;
226 spinlock_t
*new_bucket_lock
, *new_bucket_lock2
= NULL
;
227 unsigned int new_hash
, new_hash2
;
229 ASSERT_BUCKET_LOCK(old_tbl
, old_hash
);
231 /* Old bucket empty, no work needed. */
232 p
= rht_dereference_bucket(old_tbl
->buckets
[old_hash
], old_tbl
,
234 if (rht_is_a_nulls(p
))
237 new_hash
= new_hash2
= head_hashfn(ht
, new_tbl
, p
);
238 new_bucket_lock
= bucket_lock(new_tbl
, new_hash
);
240 /* Advance the old bucket pointer one or more times until it
241 * reaches a node that doesn't hash to the same bucket as the
242 * previous node p. Call the previous node p;
244 rht_for_each_continue(he
, p
->next
, old_tbl
, old_hash
) {
245 new_hash2
= head_hashfn(ht
, new_tbl
, he
);
246 if (new_hash
!= new_hash2
)
250 rcu_assign_pointer(old_tbl
->buckets
[old_hash
], p
->next
);
252 spin_lock_bh_nested(new_bucket_lock
, RHT_LOCK_NESTED
);
254 /* If we have encountered an entry that maps to a different bucket in
255 * the new table, lock down that bucket as well as we might cut off
256 * the end of the chain.
258 new_bucket_lock2
= bucket_lock(new_tbl
, new_hash
);
259 if (new_bucket_lock
!= new_bucket_lock2
)
260 spin_lock_bh_nested(new_bucket_lock2
, RHT_LOCK_NESTED2
);
262 /* Find the subsequent node which does hash to the same
263 * bucket as node P, or NULL if no such node exists.
265 INIT_RHT_NULLS_HEAD(next
, ht
, old_hash
);
266 if (!rht_is_a_nulls(he
)) {
267 rht_for_each_continue(he
, he
->next
, old_tbl
, old_hash
) {
268 if (head_hashfn(ht
, new_tbl
, he
) == new_hash
) {
275 /* Set p's next pointer to that subsequent node pointer,
276 * bypassing the nodes which do not hash to p's bucket
278 rcu_assign_pointer(p
->next
, next
);
280 if (new_bucket_lock
!= new_bucket_lock2
)
281 spin_unlock_bh(new_bucket_lock2
);
282 spin_unlock_bh(new_bucket_lock
);
285 static void link_old_to_new(struct bucket_table
*new_tbl
,
286 unsigned int new_hash
, struct rhash_head
*entry
)
288 spinlock_t
*new_bucket_lock
;
290 new_bucket_lock
= bucket_lock(new_tbl
, new_hash
);
292 spin_lock_bh_nested(new_bucket_lock
, RHT_LOCK_NESTED
);
293 rcu_assign_pointer(*bucket_tail(new_tbl
, new_hash
), entry
);
294 spin_unlock_bh(new_bucket_lock
);
298 * rhashtable_expand - Expand hash table while allowing concurrent lookups
299 * @ht: the hash table to expand
301 * A secondary bucket array is allocated and the hash entries are migrated
302 * while keeping them on both lists until the end of the RCU grace period.
304 * This function may only be called in a context where it is safe to call
305 * synchronize_rcu(), e.g. not within a rcu_read_lock() section.
307 * The caller must ensure that no concurrent resizing occurs by holding
310 * It is valid to have concurrent insertions and deletions protected by per
311 * bucket locks or concurrent RCU protected lookups and traversals.
313 int rhashtable_expand(struct rhashtable
*ht
)
315 struct bucket_table
*new_tbl
, *old_tbl
= rht_dereference(ht
->tbl
, ht
);
316 struct rhash_head
*he
;
317 spinlock_t
*old_bucket_lock
;
318 unsigned int new_hash
, old_hash
;
319 bool complete
= false;
321 ASSERT_RHT_MUTEX(ht
);
323 new_tbl
= bucket_table_alloc(ht
, old_tbl
->size
* 2);
327 atomic_inc(&ht
->shift
);
329 /* Make insertions go into the new, empty table right away. Deletions
330 * and lookups will be attempted in both tables until we synchronize.
331 * The synchronize_rcu() guarantees for the new table to be picked up
332 * so no new additions go into the old table while we relink.
334 rcu_assign_pointer(ht
->future_tbl
, new_tbl
);
337 /* For each new bucket, search the corresponding old bucket for the
338 * first entry that hashes to the new bucket, and link the end of
339 * newly formed bucket chain (containing entries added to future
340 * table) to that entry. Since all the entries which will end up in
341 * the new bucket appear in the same old bucket, this constructs an
342 * entirely valid new hash table, but with multiple buckets
343 * "zipped" together into a single imprecise chain.
345 for (new_hash
= 0; new_hash
< new_tbl
->size
; new_hash
++) {
346 old_hash
= rht_bucket_index(old_tbl
, new_hash
);
347 old_bucket_lock
= bucket_lock(old_tbl
, old_hash
);
349 spin_lock_bh(old_bucket_lock
);
350 rht_for_each(he
, old_tbl
, old_hash
) {
351 if (head_hashfn(ht
, new_tbl
, he
) == new_hash
) {
352 link_old_to_new(new_tbl
, new_hash
, he
);
356 spin_unlock_bh(old_bucket_lock
);
359 /* Publish the new table pointer. Lookups may now traverse
360 * the new table, but they will not benefit from any
361 * additional efficiency until later steps unzip the buckets.
363 rcu_assign_pointer(ht
->tbl
, new_tbl
);
365 /* Unzip interleaved hash chains */
366 while (!complete
&& !ht
->being_destroyed
) {
367 /* Wait for readers. All new readers will see the new
368 * table, and thus no references to the old table will
373 /* For each bucket in the old table (each of which
374 * contains items from multiple buckets of the new
378 for (old_hash
= 0; old_hash
< old_tbl
->size
; old_hash
++) {
379 struct rhash_head
*head
;
381 old_bucket_lock
= bucket_lock(old_tbl
, old_hash
);
382 spin_lock_bh(old_bucket_lock
);
384 hashtable_chain_unzip(ht
, new_tbl
, old_tbl
, old_hash
);
385 head
= rht_dereference_bucket(old_tbl
->buckets
[old_hash
],
387 if (!rht_is_a_nulls(head
))
390 spin_unlock_bh(old_bucket_lock
);
394 bucket_table_free(old_tbl
);
397 EXPORT_SYMBOL_GPL(rhashtable_expand
);
400 * rhashtable_shrink - Shrink hash table while allowing concurrent lookups
401 * @ht: the hash table to shrink
403 * This function may only be called in a context where it is safe to call
404 * synchronize_rcu(), e.g. not within a rcu_read_lock() section.
406 * The caller must ensure that no concurrent resizing occurs by holding
409 * The caller must ensure that no concurrent table mutations take place.
410 * It is however valid to have concurrent lookups if they are RCU protected.
412 * It is valid to have concurrent insertions and deletions protected by per
413 * bucket locks or concurrent RCU protected lookups and traversals.
415 int rhashtable_shrink(struct rhashtable
*ht
)
417 struct bucket_table
*new_tbl
, *tbl
= rht_dereference(ht
->tbl
, ht
);
418 spinlock_t
*new_bucket_lock
, *old_bucket_lock1
, *old_bucket_lock2
;
419 unsigned int new_hash
;
421 ASSERT_RHT_MUTEX(ht
);
423 new_tbl
= bucket_table_alloc(ht
, tbl
->size
/ 2);
427 rcu_assign_pointer(ht
->future_tbl
, new_tbl
);
430 /* Link the first entry in the old bucket to the end of the
431 * bucket in the new table. As entries are concurrently being
432 * added to the new table, lock down the new bucket. As we
433 * always divide the size in half when shrinking, each bucket
434 * in the new table maps to exactly two buckets in the old
437 * As removals can occur concurrently on the old table, we need
438 * to lock down both matching buckets in the old table.
440 for (new_hash
= 0; new_hash
< new_tbl
->size
; new_hash
++) {
441 old_bucket_lock1
= bucket_lock(tbl
, new_hash
);
442 old_bucket_lock2
= bucket_lock(tbl
, new_hash
+ new_tbl
->size
);
443 new_bucket_lock
= bucket_lock(new_tbl
, new_hash
);
445 spin_lock_bh(old_bucket_lock1
);
447 /* Depending on the lock per buckets mapping, the bucket in
448 * the lower and upper region may map to the same lock.
450 if (old_bucket_lock1
!= old_bucket_lock2
) {
451 spin_lock_bh_nested(old_bucket_lock2
, RHT_LOCK_NESTED
);
452 spin_lock_bh_nested(new_bucket_lock
, RHT_LOCK_NESTED2
);
454 spin_lock_bh_nested(new_bucket_lock
, RHT_LOCK_NESTED
);
457 rcu_assign_pointer(*bucket_tail(new_tbl
, new_hash
),
458 tbl
->buckets
[new_hash
]);
459 rcu_assign_pointer(*bucket_tail(new_tbl
, new_hash
),
460 tbl
->buckets
[new_hash
+ new_tbl
->size
]);
462 spin_unlock_bh(new_bucket_lock
);
463 if (old_bucket_lock1
!= old_bucket_lock2
)
464 spin_unlock_bh(old_bucket_lock2
);
465 spin_unlock_bh(old_bucket_lock1
);
468 /* Publish the new, valid hash table */
469 rcu_assign_pointer(ht
->tbl
, new_tbl
);
470 atomic_dec(&ht
->shift
);
472 /* Wait for readers. No new readers will have references to the
477 bucket_table_free(tbl
);
481 EXPORT_SYMBOL_GPL(rhashtable_shrink
);
483 static void rht_deferred_worker(struct work_struct
*work
)
485 struct rhashtable
*ht
;
486 struct bucket_table
*tbl
;
488 ht
= container_of(work
, struct rhashtable
, run_work
);
489 mutex_lock(&ht
->mutex
);
490 tbl
= rht_dereference(ht
->tbl
, ht
);
492 if (ht
->p
.grow_decision
&& ht
->p
.grow_decision(ht
, tbl
->size
))
493 rhashtable_expand(ht
);
494 else if (ht
->p
.shrink_decision
&& ht
->p
.shrink_decision(ht
, tbl
->size
))
495 rhashtable_shrink(ht
);
497 mutex_unlock(&ht
->mutex
);
500 static void rhashtable_wakeup_worker(struct rhashtable
*ht
)
502 struct bucket_table
*tbl
= rht_dereference_rcu(ht
->tbl
, ht
);
503 struct bucket_table
*new_tbl
= rht_dereference_rcu(ht
->future_tbl
, ht
);
504 size_t size
= tbl
->size
;
506 /* Only adjust the table if no resizing is currently in progress. */
507 if (tbl
== new_tbl
&&
508 ((ht
->p
.grow_decision
&& ht
->p
.grow_decision(ht
, size
)) ||
509 (ht
->p
.shrink_decision
&& ht
->p
.shrink_decision(ht
, size
))))
510 schedule_work(&ht
->run_work
);
513 static void __rhashtable_insert(struct rhashtable
*ht
, struct rhash_head
*obj
,
514 struct bucket_table
*tbl
, u32 hash
)
516 struct rhash_head
*head
= rht_dereference_bucket(tbl
->buckets
[hash
],
519 if (rht_is_a_nulls(head
))
520 INIT_RHT_NULLS_HEAD(obj
->next
, ht
, hash
);
522 RCU_INIT_POINTER(obj
->next
, head
);
524 rcu_assign_pointer(tbl
->buckets
[hash
], obj
);
526 atomic_inc(&ht
->nelems
);
528 rhashtable_wakeup_worker(ht
);
532 * rhashtable_insert - insert object into hash table
534 * @obj: pointer to hash head inside object
536 * Will take a per bucket spinlock to protect against mutual mutations
537 * on the same bucket. Multiple insertions may occur in parallel unless
538 * they map to the same bucket lock.
540 * It is safe to call this function from atomic context.
542 * Will trigger an automatic deferred table resizing if the size grows
543 * beyond the watermark indicated by grow_decision() which can be passed
544 * to rhashtable_init().
546 void rhashtable_insert(struct rhashtable
*ht
, struct rhash_head
*obj
)
548 struct bucket_table
*tbl
;
554 tbl
= rht_dereference_rcu(ht
->future_tbl
, ht
);
555 hash
= head_hashfn(ht
, tbl
, obj
);
556 lock
= bucket_lock(tbl
, hash
);
559 __rhashtable_insert(ht
, obj
, tbl
, hash
);
560 spin_unlock_bh(lock
);
564 EXPORT_SYMBOL_GPL(rhashtable_insert
);
567 * rhashtable_remove - remove object from hash table
569 * @obj: pointer to hash head inside object
571 * Since the hash chain is single linked, the removal operation needs to
572 * walk the bucket chain upon removal. The removal operation is thus
573 * considerable slow if the hash table is not correctly sized.
575 * Will automatically shrink the table via rhashtable_expand() if the
576 * shrink_decision function specified at rhashtable_init() returns true.
578 * The caller must ensure that no concurrent table mutations occur. It is
579 * however valid to have concurrent lookups if they are RCU protected.
581 bool rhashtable_remove(struct rhashtable
*ht
, struct rhash_head
*obj
)
583 struct bucket_table
*tbl
;
584 struct rhash_head __rcu
**pprev
;
585 struct rhash_head
*he
;
591 tbl
= rht_dereference_rcu(ht
->tbl
, ht
);
592 hash
= head_hashfn(ht
, tbl
, obj
);
594 lock
= bucket_lock(tbl
, hash
);
598 pprev
= &tbl
->buckets
[hash
];
599 rht_for_each(he
, tbl
, hash
) {
605 rcu_assign_pointer(*pprev
, obj
->next
);
611 /* The entry may be linked in either 'tbl', 'future_tbl', or both.
612 * 'future_tbl' only exists for a short period of time during
613 * resizing. Thus traversing both is fine and the added cost is
616 if (tbl
!= rht_dereference_rcu(ht
->future_tbl
, ht
)) {
617 spin_unlock_bh(lock
);
619 tbl
= rht_dereference_rcu(ht
->future_tbl
, ht
);
620 hash
= head_hashfn(ht
, tbl
, obj
);
622 lock
= bucket_lock(tbl
, hash
);
627 spin_unlock_bh(lock
);
630 atomic_dec(&ht
->nelems
);
631 rhashtable_wakeup_worker(ht
);
638 EXPORT_SYMBOL_GPL(rhashtable_remove
);
640 struct rhashtable_compare_arg
{
641 struct rhashtable
*ht
;
645 static bool rhashtable_compare(void *ptr
, void *arg
)
647 struct rhashtable_compare_arg
*x
= arg
;
648 struct rhashtable
*ht
= x
->ht
;
650 return !memcmp(ptr
+ ht
->p
.key_offset
, x
->key
, ht
->p
.key_len
);
654 * rhashtable_lookup - lookup key in hash table
656 * @key: pointer to key
658 * Computes the hash value for the key and traverses the bucket chain looking
659 * for a entry with an identical key. The first matching entry is returned.
661 * This lookup function may only be used for fixed key hash table (key_len
662 * parameter set). It will BUG() if used inappropriately.
664 * Lookups may occur in parallel with hashtable mutations and resizing.
666 void *rhashtable_lookup(struct rhashtable
*ht
, const void *key
)
668 struct rhashtable_compare_arg arg
= {
673 BUG_ON(!ht
->p
.key_len
);
675 return rhashtable_lookup_compare(ht
, key
, &rhashtable_compare
, &arg
);
677 EXPORT_SYMBOL_GPL(rhashtable_lookup
);
680 * rhashtable_lookup_compare - search hash table with compare function
682 * @key: the pointer to the key
683 * @compare: compare function, must return true on match
684 * @arg: argument passed on to compare function
686 * Traverses the bucket chain behind the provided hash value and calls the
687 * specified compare function for each entry.
689 * Lookups may occur in parallel with hashtable mutations and resizing.
691 * Returns the first entry on which the compare function returned true.
693 void *rhashtable_lookup_compare(struct rhashtable
*ht
, const void *key
,
694 bool (*compare
)(void *, void *), void *arg
)
696 const struct bucket_table
*tbl
, *old_tbl
;
697 struct rhash_head
*he
;
702 old_tbl
= rht_dereference_rcu(ht
->tbl
, ht
);
703 tbl
= rht_dereference_rcu(ht
->future_tbl
, ht
);
704 hash
= key_hashfn(ht
, key
, ht
->p
.key_len
);
706 rht_for_each_rcu(he
, tbl
, rht_bucket_index(tbl
, hash
)) {
707 if (!compare(rht_obj(ht
, he
), arg
))
710 return rht_obj(ht
, he
);
713 if (unlikely(tbl
!= old_tbl
)) {
721 EXPORT_SYMBOL_GPL(rhashtable_lookup_compare
);
724 * rhashtable_lookup_insert - lookup and insert object into hash table
726 * @obj: pointer to hash head inside object
728 * Locks down the bucket chain in both the old and new table if a resize
729 * is in progress to ensure that writers can't remove from the old table
730 * and can't insert to the new table during the atomic operation of search
731 * and insertion. Searches for duplicates in both the old and new table if
732 * a resize is in progress.
734 * This lookup function may only be used for fixed key hash table (key_len
735 * parameter set). It will BUG() if used inappropriately.
737 * It is safe to call this function from atomic context.
739 * Will trigger an automatic deferred table resizing if the size grows
740 * beyond the watermark indicated by grow_decision() which can be passed
741 * to rhashtable_init().
743 bool rhashtable_lookup_insert(struct rhashtable
*ht
, struct rhash_head
*obj
)
745 struct rhashtable_compare_arg arg
= {
747 .key
= rht_obj(ht
, obj
) + ht
->p
.key_offset
,
750 BUG_ON(!ht
->p
.key_len
);
752 return rhashtable_lookup_compare_insert(ht
, obj
, &rhashtable_compare
,
755 EXPORT_SYMBOL_GPL(rhashtable_lookup_insert
);
758 * rhashtable_lookup_compare_insert - search and insert object to hash table
759 * with compare function
761 * @obj: pointer to hash head inside object
762 * @compare: compare function, must return true on match
763 * @arg: argument passed on to compare function
765 * Locks down the bucket chain in both the old and new table if a resize
766 * is in progress to ensure that writers can't remove from the old table
767 * and can't insert to the new table during the atomic operation of search
768 * and insertion. Searches for duplicates in both the old and new table if
769 * a resize is in progress.
771 * Lookups may occur in parallel with hashtable mutations and resizing.
773 * Will trigger an automatic deferred table resizing if the size grows
774 * beyond the watermark indicated by grow_decision() which can be passed
775 * to rhashtable_init().
777 bool rhashtable_lookup_compare_insert(struct rhashtable
*ht
,
778 struct rhash_head
*obj
,
779 bool (*compare
)(void *, void *),
782 struct bucket_table
*new_tbl
, *old_tbl
;
783 spinlock_t
*new_bucket_lock
, *old_bucket_lock
;
784 u32 new_hash
, old_hash
;
787 BUG_ON(!ht
->p
.key_len
);
791 old_tbl
= rht_dereference_rcu(ht
->tbl
, ht
);
792 old_hash
= head_hashfn(ht
, old_tbl
, obj
);
793 old_bucket_lock
= bucket_lock(old_tbl
, old_hash
);
794 spin_lock_bh(old_bucket_lock
);
796 new_tbl
= rht_dereference_rcu(ht
->future_tbl
, ht
);
797 new_hash
= head_hashfn(ht
, new_tbl
, obj
);
798 new_bucket_lock
= bucket_lock(new_tbl
, new_hash
);
799 if (unlikely(old_tbl
!= new_tbl
))
800 spin_lock_bh_nested(new_bucket_lock
, RHT_LOCK_NESTED
);
802 if (rhashtable_lookup_compare(ht
, rht_obj(ht
, obj
) + ht
->p
.key_offset
,
808 __rhashtable_insert(ht
, obj
, new_tbl
, new_hash
);
811 if (unlikely(old_tbl
!= new_tbl
))
812 spin_unlock_bh(new_bucket_lock
);
813 spin_unlock_bh(old_bucket_lock
);
819 EXPORT_SYMBOL_GPL(rhashtable_lookup_compare_insert
);
821 static size_t rounded_hashtable_size(struct rhashtable_params
*params
)
823 return max(roundup_pow_of_two(params
->nelem_hint
* 4 / 3),
824 1UL << params
->min_shift
);
828 * rhashtable_init - initialize a new hash table
829 * @ht: hash table to be initialized
830 * @params: configuration parameters
832 * Initializes a new hash table based on the provided configuration
833 * parameters. A table can be configured either with a variable or
836 * Configuration Example 1: Fixed length keys
840 * struct rhash_head node;
843 * struct rhashtable_params params = {
844 * .head_offset = offsetof(struct test_obj, node),
845 * .key_offset = offsetof(struct test_obj, key),
846 * .key_len = sizeof(int),
848 * .nulls_base = (1U << RHT_BASE_SHIFT),
851 * Configuration Example 2: Variable length keys
854 * struct rhash_head node;
857 * u32 my_hash_fn(const void *data, u32 seed)
859 * struct test_obj *obj = data;
861 * return [... hash ...];
864 * struct rhashtable_params params = {
865 * .head_offset = offsetof(struct test_obj, node),
867 * .obj_hashfn = my_hash_fn,
870 int rhashtable_init(struct rhashtable
*ht
, struct rhashtable_params
*params
)
872 struct bucket_table
*tbl
;
875 size
= HASH_DEFAULT_SIZE
;
877 if ((params
->key_len
&& !params
->hashfn
) ||
878 (!params
->key_len
&& !params
->obj_hashfn
))
881 if (params
->nulls_base
&& params
->nulls_base
< (1U << RHT_BASE_SHIFT
))
884 params
->min_shift
= max_t(size_t, params
->min_shift
,
885 ilog2(HASH_MIN_SIZE
));
887 if (params
->nelem_hint
)
888 size
= rounded_hashtable_size(params
);
890 memset(ht
, 0, sizeof(*ht
));
891 mutex_init(&ht
->mutex
);
892 memcpy(&ht
->p
, params
, sizeof(*params
));
894 if (params
->locks_mul
)
895 ht
->p
.locks_mul
= roundup_pow_of_two(params
->locks_mul
);
897 ht
->p
.locks_mul
= BUCKET_LOCKS_PER_CPU
;
899 tbl
= bucket_table_alloc(ht
, size
);
903 atomic_set(&ht
->nelems
, 0);
904 atomic_set(&ht
->shift
, ilog2(tbl
->size
));
905 RCU_INIT_POINTER(ht
->tbl
, tbl
);
906 RCU_INIT_POINTER(ht
->future_tbl
, tbl
);
909 get_random_bytes(&ht
->p
.hash_rnd
, sizeof(ht
->p
.hash_rnd
));
911 if (ht
->p
.grow_decision
|| ht
->p
.shrink_decision
)
912 INIT_WORK(&ht
->run_work
, rht_deferred_worker
);
916 EXPORT_SYMBOL_GPL(rhashtable_init
);
919 * rhashtable_destroy - destroy hash table
920 * @ht: the hash table to destroy
922 * Frees the bucket array. This function is not rcu safe, therefore the caller
923 * has to make sure that no resizing may happen by unpublishing the hashtable
924 * and waiting for the quiescent cycle before releasing the bucket array.
926 void rhashtable_destroy(struct rhashtable
*ht
)
928 ht
->being_destroyed
= true;
930 if (ht
->p
.grow_decision
|| ht
->p
.shrink_decision
)
931 cancel_work_sync(&ht
->run_work
);
933 mutex_lock(&ht
->mutex
);
934 bucket_table_free(rht_dereference(ht
->tbl
, ht
));
935 mutex_unlock(&ht
->mutex
);
937 EXPORT_SYMBOL_GPL(rhashtable_destroy
);
939 /**************************************************************************
941 **************************************************************************/
943 #ifdef CONFIG_TEST_RHASHTABLE
945 #define TEST_HT_SIZE 8
946 #define TEST_ENTRIES 2048
947 #define TEST_PTR ((void *) 0xdeadbeef)
948 #define TEST_NEXPANDS 4
953 struct rhash_head node
;
956 static int __init
test_rht_lookup(struct rhashtable
*ht
)
960 for (i
= 0; i
< TEST_ENTRIES
* 2; i
++) {
961 struct test_obj
*obj
;
962 bool expected
= !(i
% 2);
965 obj
= rhashtable_lookup(ht
, &key
);
967 if (expected
&& !obj
) {
968 pr_warn("Test failed: Could not find key %u\n", key
);
970 } else if (!expected
&& obj
) {
971 pr_warn("Test failed: Unexpected entry found for key %u\n",
974 } else if (expected
&& obj
) {
975 if (obj
->ptr
!= TEST_PTR
|| obj
->value
!= i
) {
976 pr_warn("Test failed: Lookup value mismatch %p!=%p, %u!=%u\n",
977 obj
->ptr
, TEST_PTR
, obj
->value
, i
);
986 static void test_bucket_stats(struct rhashtable
*ht
, bool quiet
)
988 unsigned int cnt
, rcu_cnt
, i
, total
= 0;
989 struct rhash_head
*pos
;
990 struct test_obj
*obj
;
991 struct bucket_table
*tbl
;
993 tbl
= rht_dereference_rcu(ht
->tbl
, ht
);
994 for (i
= 0; i
< tbl
->size
; i
++) {
998 pr_info(" [%#4x/%zu]", i
, tbl
->size
);
1000 rht_for_each_entry_rcu(obj
, pos
, tbl
, i
, node
) {
1004 pr_cont(" [%p],", obj
);
1007 rht_for_each_entry_rcu(obj
, pos
, tbl
, i
, node
)
1011 pr_warn("Test failed: Chain count mismach %d != %d",
1015 pr_cont("\n [%#x] first element: %p, chain length: %u\n",
1016 i
, tbl
->buckets
[i
], cnt
);
1019 pr_info(" Traversal complete: counted=%u, nelems=%u, entries=%d\n",
1020 total
, atomic_read(&ht
->nelems
), TEST_ENTRIES
);
1022 if (total
!= atomic_read(&ht
->nelems
) || total
!= TEST_ENTRIES
)
1023 pr_warn("Test failed: Total count mismatch ^^^");
1026 static int __init
test_rhashtable(struct rhashtable
*ht
)
1028 struct bucket_table
*tbl
;
1029 struct test_obj
*obj
;
1030 struct rhash_head
*pos
, *next
;
1036 * Insert TEST_ENTRIES into table with all keys even numbers
1038 pr_info(" Adding %d keys\n", TEST_ENTRIES
);
1039 for (i
= 0; i
< TEST_ENTRIES
; i
++) {
1040 struct test_obj
*obj
;
1042 obj
= kzalloc(sizeof(*obj
), GFP_KERNEL
);
1048 obj
->ptr
= TEST_PTR
;
1051 rhashtable_insert(ht
, &obj
->node
);
1055 test_bucket_stats(ht
, true);
1056 test_rht_lookup(ht
);
1059 for (i
= 0; i
< TEST_NEXPANDS
; i
++) {
1060 pr_info(" Table expansion iteration %u...\n", i
);
1061 mutex_lock(&ht
->mutex
);
1062 rhashtable_expand(ht
);
1063 mutex_unlock(&ht
->mutex
);
1066 pr_info(" Verifying lookups...\n");
1067 test_rht_lookup(ht
);
1071 for (i
= 0; i
< TEST_NEXPANDS
; i
++) {
1072 pr_info(" Table shrinkage iteration %u...\n", i
);
1073 mutex_lock(&ht
->mutex
);
1074 rhashtable_shrink(ht
);
1075 mutex_unlock(&ht
->mutex
);
1078 pr_info(" Verifying lookups...\n");
1079 test_rht_lookup(ht
);
1084 test_bucket_stats(ht
, true);
1087 pr_info(" Deleting %d keys\n", TEST_ENTRIES
);
1088 for (i
= 0; i
< TEST_ENTRIES
; i
++) {
1091 obj
= rhashtable_lookup(ht
, &key
);
1094 rhashtable_remove(ht
, &obj
->node
);
1101 tbl
= rht_dereference_rcu(ht
->tbl
, ht
);
1102 for (i
= 0; i
< tbl
->size
; i
++)
1103 rht_for_each_entry_safe(obj
, pos
, next
, tbl
, i
, node
)
1109 static int __init
test_rht_init(void)
1111 struct rhashtable ht
;
1112 struct rhashtable_params params
= {
1113 .nelem_hint
= TEST_HT_SIZE
,
1114 .head_offset
= offsetof(struct test_obj
, node
),
1115 .key_offset
= offsetof(struct test_obj
, value
),
1116 .key_len
= sizeof(int),
1118 .nulls_base
= (3U << RHT_BASE_SHIFT
),
1119 .grow_decision
= rht_grow_above_75
,
1120 .shrink_decision
= rht_shrink_below_30
,
1124 pr_info("Running resizable hashtable tests...\n");
1126 err
= rhashtable_init(&ht
, ¶ms
);
1128 pr_warn("Test failed: Unable to initialize hashtable: %d\n",
1133 err
= test_rhashtable(&ht
);
1135 rhashtable_destroy(&ht
);
1140 subsys_initcall(test_rht_init
);
1142 #endif /* CONFIG_TEST_RHASHTABLE */