2 * Resizable, Scalable, Concurrent Hash Table
4 * Copyright (c) 2015-2016 Herbert Xu <herbert@gondor.apana.org.au>
5 * Copyright (c) 2014-2015 Thomas Graf <tgraf@suug.ch>
6 * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net>
8 * Code partially derived from nft_hash
9 * Rewritten with rehash code from br_multicast plus single list
10 * pointer as suggested by Josh Triplett
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License version 2 as
14 * published by the Free Software Foundation.
17 #ifndef _LINUX_RHASHTABLE_H
18 #define _LINUX_RHASHTABLE_H
20 #include <linux/atomic.h>
21 #include <linux/compiler.h>
22 #include <linux/err.h>
23 #include <linux/errno.h>
24 #include <linux/jhash.h>
25 #include <linux/list_nulls.h>
26 #include <linux/workqueue.h>
27 #include <linux/mutex.h>
28 #include <linux/rculist.h>
31 * The end of the chain is marked with a special nulls marks which has
32 * the following format:
34 * +-------+-----------------------------------------------------+-+
36 * +-------+-----------------------------------------------------+-+
38 * Base (4 bits) : Reserved to distinguish between multiple tables.
39 * Specified via &struct rhashtable_params.nulls_base.
40 * Hash (27 bits): Full hash (unmasked) of first element added to bucket
41 * 1 (1 bit) : Nulls marker (always set)
43 * The remaining bits of the next pointer remain unused for now.
45 #define RHT_BASE_BITS 4
46 #define RHT_HASH_BITS 27
47 #define RHT_BASE_SHIFT RHT_HASH_BITS
49 /* Base bits plus 1 bit for nulls marker */
50 #define RHT_HASH_RESERVED_SPACE (RHT_BASE_BITS + 1)
53 struct rhash_head __rcu
*next
;
57 struct rhash_head rhead
;
58 struct rhlist_head __rcu
*next
;
62 * struct bucket_table - Table of hash buckets
63 * @size: Number of hash buckets
64 * @nest: Number of bits of first-level nested table.
65 * @rehash: Current bucket being rehashed
66 * @hash_rnd: Random seed to fold into hash
67 * @locks_mask: Mask to apply before accessing locks[]
68 * @locks: Array of spinlocks protecting individual buckets
69 * @walkers: List of active walkers
70 * @rcu: RCU structure for freeing the table
71 * @future_tbl: Table under construction during rehashing
72 * @ntbl: Nested table used when out of memory.
73 * @buckets: size * hash buckets
80 unsigned int locks_mask
;
82 struct list_head walkers
;
85 struct bucket_table __rcu
*future_tbl
;
87 struct rhash_head __rcu
*buckets
[] ____cacheline_aligned_in_smp
;
91 * struct rhashtable_compare_arg - Key for the function rhashtable_compare
93 * @key: Key to compare against
95 struct rhashtable_compare_arg
{
96 struct rhashtable
*ht
;
100 typedef u32 (*rht_hashfn_t
)(const void *data
, u32 len
, u32 seed
);
101 typedef u32 (*rht_obj_hashfn_t
)(const void *data
, u32 len
, u32 seed
);
102 typedef int (*rht_obj_cmpfn_t
)(struct rhashtable_compare_arg
*arg
,
108 * struct rhashtable_params - Hash table construction parameters
109 * @nelem_hint: Hint on number of elements, should be 75% of desired size
110 * @key_len: Length of key
111 * @key_offset: Offset of key in struct to be hashed
112 * @head_offset: Offset of rhash_head in struct to be hashed
113 * @insecure_max_entries: Maximum number of entries (may be exceeded)
114 * @max_size: Maximum size while expanding
115 * @min_size: Minimum size while shrinking
116 * @nulls_base: Base value to generate nulls marker
117 * @insecure_elasticity: Set to true to disable chain length checks
118 * @automatic_shrinking: Enable automatic shrinking of tables
119 * @locks_mul: Number of bucket locks to allocate per cpu (default: 128)
120 * @hashfn: Hash function (default: jhash2 if !(key_len % 4), or jhash)
121 * @obj_hashfn: Function to hash object
122 * @obj_cmpfn: Function to compare key with object
124 struct rhashtable_params
{
129 unsigned int insecure_max_entries
;
130 unsigned int max_size
;
131 unsigned int min_size
;
133 bool insecure_elasticity
;
134 bool automatic_shrinking
;
137 rht_obj_hashfn_t obj_hashfn
;
138 rht_obj_cmpfn_t obj_cmpfn
;
142 * struct rhashtable - Hash table handle
144 * @nelems: Number of elements in table
145 * @key_len: Key length for hashfn
146 * @elasticity: Maximum chain length before rehash
147 * @p: Configuration parameters
148 * @rhlist: True if this is an rhltable
149 * @run_work: Deferred worker to expand/shrink asynchronously
150 * @mutex: Mutex to protect current/future table swapping
151 * @lock: Spin lock to protect walker list
154 struct bucket_table __rcu
*tbl
;
156 unsigned int key_len
;
157 unsigned int elasticity
;
158 struct rhashtable_params p
;
160 struct work_struct run_work
;
166 * struct rhltable - Hash table with duplicate objects in a list
167 * @ht: Underlying rhtable
170 struct rhashtable ht
;
174 * struct rhashtable_walker - Hash table walker
175 * @list: List entry on list of walkers
176 * @tbl: The table that we were walking over
178 struct rhashtable_walker
{
179 struct list_head list
;
180 struct bucket_table
*tbl
;
184 * struct rhashtable_iter - Hash table iterator
185 * @ht: Table to iterate through
186 * @p: Current pointer
187 * @list: Current hash list pointer
188 * @walker: Associated rhashtable walker
189 * @slot: Current slot
190 * @skip: Number of entries to skip in slot
192 struct rhashtable_iter
{
193 struct rhashtable
*ht
;
194 struct rhash_head
*p
;
195 struct rhlist_head
*list
;
196 struct rhashtable_walker walker
;
201 static inline unsigned long rht_marker(const struct rhashtable
*ht
, u32 hash
)
203 return NULLS_MARKER(ht
->p
.nulls_base
+ hash
);
206 #define INIT_RHT_NULLS_HEAD(ptr, ht, hash) \
207 ((ptr) = (typeof(ptr)) rht_marker(ht, hash))
209 static inline bool rht_is_a_nulls(const struct rhash_head
*ptr
)
211 return ((unsigned long) ptr
& 1);
214 static inline unsigned long rht_get_nulls_value(const struct rhash_head
*ptr
)
216 return ((unsigned long) ptr
) >> 1;
219 static inline void *rht_obj(const struct rhashtable
*ht
,
220 const struct rhash_head
*he
)
222 return (char *)he
- ht
->p
.head_offset
;
225 static inline unsigned int rht_bucket_index(const struct bucket_table
*tbl
,
228 return (hash
>> RHT_HASH_RESERVED_SPACE
) & (tbl
->size
- 1);
231 static inline unsigned int rht_key_hashfn(
232 struct rhashtable
*ht
, const struct bucket_table
*tbl
,
233 const void *key
, const struct rhashtable_params params
)
237 /* params must be equal to ht->p if it isn't constant. */
238 if (!__builtin_constant_p(params
.key_len
))
239 hash
= ht
->p
.hashfn(key
, ht
->key_len
, tbl
->hash_rnd
);
240 else if (params
.key_len
) {
241 unsigned int key_len
= params
.key_len
;
244 hash
= params
.hashfn(key
, key_len
, tbl
->hash_rnd
);
245 else if (key_len
& (sizeof(u32
) - 1))
246 hash
= jhash(key
, key_len
, tbl
->hash_rnd
);
248 hash
= jhash2(key
, key_len
/ sizeof(u32
),
251 unsigned int key_len
= ht
->p
.key_len
;
254 hash
= params
.hashfn(key
, key_len
, tbl
->hash_rnd
);
256 hash
= jhash(key
, key_len
, tbl
->hash_rnd
);
259 return rht_bucket_index(tbl
, hash
);
262 static inline unsigned int rht_head_hashfn(
263 struct rhashtable
*ht
, const struct bucket_table
*tbl
,
264 const struct rhash_head
*he
, const struct rhashtable_params params
)
266 const char *ptr
= rht_obj(ht
, he
);
268 return likely(params
.obj_hashfn
) ?
269 rht_bucket_index(tbl
, params
.obj_hashfn(ptr
, params
.key_len
?:
272 rht_key_hashfn(ht
, tbl
, ptr
+ params
.key_offset
, params
);
276 * rht_grow_above_75 - returns true if nelems > 0.75 * table-size
278 * @tbl: current table
280 static inline bool rht_grow_above_75(const struct rhashtable
*ht
,
281 const struct bucket_table
*tbl
)
283 /* Expand table when exceeding 75% load */
284 return atomic_read(&ht
->nelems
) > (tbl
->size
/ 4 * 3) &&
285 (!ht
->p
.max_size
|| tbl
->size
< ht
->p
.max_size
);
289 * rht_shrink_below_30 - returns true if nelems < 0.3 * table-size
291 * @tbl: current table
293 static inline bool rht_shrink_below_30(const struct rhashtable
*ht
,
294 const struct bucket_table
*tbl
)
296 /* Shrink table beneath 30% load */
297 return atomic_read(&ht
->nelems
) < (tbl
->size
* 3 / 10) &&
298 tbl
->size
> ht
->p
.min_size
;
302 * rht_grow_above_100 - returns true if nelems > table-size
304 * @tbl: current table
306 static inline bool rht_grow_above_100(const struct rhashtable
*ht
,
307 const struct bucket_table
*tbl
)
309 return atomic_read(&ht
->nelems
) > tbl
->size
&&
310 (!ht
->p
.max_size
|| tbl
->size
< ht
->p
.max_size
);
314 * rht_grow_above_max - returns true if table is above maximum
316 * @tbl: current table
318 static inline bool rht_grow_above_max(const struct rhashtable
*ht
,
319 const struct bucket_table
*tbl
)
321 return ht
->p
.insecure_max_entries
&&
322 atomic_read(&ht
->nelems
) >= ht
->p
.insecure_max_entries
;
325 /* The bucket lock is selected based on the hash and protects mutations
326 * on a group of hash buckets.
328 * A maximum of tbl->size/2 bucket locks is allocated. This ensures that
329 * a single lock always covers both buckets which may both contains
330 * entries which link to the same bucket of the old table during resizing.
331 * This allows to simplify the locking as locking the bucket in both
332 * tables during resize always guarantee protection.
334 * IMPORTANT: When holding the bucket lock of both the old and new table
335 * during expansions and shrinking, the old bucket lock must always be
338 static inline spinlock_t
*rht_bucket_lock(const struct bucket_table
*tbl
,
341 return &tbl
->locks
[hash
& tbl
->locks_mask
];
344 #ifdef CONFIG_PROVE_LOCKING
345 int lockdep_rht_mutex_is_held(struct rhashtable
*ht
);
346 int lockdep_rht_bucket_is_held(const struct bucket_table
*tbl
, u32 hash
);
348 static inline int lockdep_rht_mutex_is_held(struct rhashtable
*ht
)
353 static inline int lockdep_rht_bucket_is_held(const struct bucket_table
*tbl
,
358 #endif /* CONFIG_PROVE_LOCKING */
360 int rhashtable_init(struct rhashtable
*ht
,
361 const struct rhashtable_params
*params
);
362 int rhltable_init(struct rhltable
*hlt
,
363 const struct rhashtable_params
*params
);
365 void *rhashtable_insert_slow(struct rhashtable
*ht
, const void *key
,
366 struct rhash_head
*obj
);
368 void rhashtable_walk_enter(struct rhashtable
*ht
,
369 struct rhashtable_iter
*iter
);
370 void rhashtable_walk_exit(struct rhashtable_iter
*iter
);
371 int rhashtable_walk_start(struct rhashtable_iter
*iter
) __acquires(RCU
);
372 void *rhashtable_walk_next(struct rhashtable_iter
*iter
);
373 void rhashtable_walk_stop(struct rhashtable_iter
*iter
) __releases(RCU
);
375 void rhashtable_free_and_destroy(struct rhashtable
*ht
,
376 void (*free_fn
)(void *ptr
, void *arg
),
378 void rhashtable_destroy(struct rhashtable
*ht
);
380 struct rhash_head __rcu
**rht_bucket_nested(const struct bucket_table
*tbl
,
382 struct rhash_head __rcu
**rht_bucket_nested_insert(struct rhashtable
*ht
,
383 struct bucket_table
*tbl
,
386 #define rht_dereference(p, ht) \
387 rcu_dereference_protected(p, lockdep_rht_mutex_is_held(ht))
389 #define rht_dereference_rcu(p, ht) \
390 rcu_dereference_check(p, lockdep_rht_mutex_is_held(ht))
392 #define rht_dereference_bucket(p, tbl, hash) \
393 rcu_dereference_protected(p, lockdep_rht_bucket_is_held(tbl, hash))
395 #define rht_dereference_bucket_rcu(p, tbl, hash) \
396 rcu_dereference_check(p, lockdep_rht_bucket_is_held(tbl, hash))
398 #define rht_entry(tpos, pos, member) \
399 ({ tpos = container_of(pos, typeof(*tpos), member); 1; })
401 static inline struct rhash_head __rcu
*const *rht_bucket(
402 const struct bucket_table
*tbl
, unsigned int hash
)
404 return unlikely(tbl
->nest
) ? rht_bucket_nested(tbl
, hash
) :
408 static inline struct rhash_head __rcu
**rht_bucket_var(
409 struct bucket_table
*tbl
, unsigned int hash
)
411 return unlikely(tbl
->nest
) ? rht_bucket_nested(tbl
, hash
) :
415 static inline struct rhash_head __rcu
**rht_bucket_insert(
416 struct rhashtable
*ht
, struct bucket_table
*tbl
, unsigned int hash
)
418 return unlikely(tbl
->nest
) ? rht_bucket_nested_insert(ht
, tbl
, hash
) :
423 * rht_for_each_continue - continue iterating over hash chain
424 * @pos: the &struct rhash_head to use as a loop cursor.
425 * @head: the previous &struct rhash_head to continue from
426 * @tbl: the &struct bucket_table
427 * @hash: the hash value / bucket index
429 #define rht_for_each_continue(pos, head, tbl, hash) \
430 for (pos = rht_dereference_bucket(head, tbl, hash); \
431 !rht_is_a_nulls(pos); \
432 pos = rht_dereference_bucket((pos)->next, tbl, hash))
435 * rht_for_each - iterate over hash chain
436 * @pos: the &struct rhash_head to use as a loop cursor.
437 * @tbl: the &struct bucket_table
438 * @hash: the hash value / bucket index
440 #define rht_for_each(pos, tbl, hash) \
441 rht_for_each_continue(pos, *rht_bucket(tbl, hash), tbl, hash)
444 * rht_for_each_entry_continue - continue iterating over hash chain
445 * @tpos: the type * to use as a loop cursor.
446 * @pos: the &struct rhash_head to use as a loop cursor.
447 * @head: the previous &struct rhash_head to continue from
448 * @tbl: the &struct bucket_table
449 * @hash: the hash value / bucket index
450 * @member: name of the &struct rhash_head within the hashable struct.
452 #define rht_for_each_entry_continue(tpos, pos, head, tbl, hash, member) \
453 for (pos = rht_dereference_bucket(head, tbl, hash); \
454 (!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member); \
455 pos = rht_dereference_bucket((pos)->next, tbl, hash))
458 * rht_for_each_entry - iterate over hash chain of given type
459 * @tpos: the type * to use as a loop cursor.
460 * @pos: the &struct rhash_head to use as a loop cursor.
461 * @tbl: the &struct bucket_table
462 * @hash: the hash value / bucket index
463 * @member: name of the &struct rhash_head within the hashable struct.
465 #define rht_for_each_entry(tpos, pos, tbl, hash, member) \
466 rht_for_each_entry_continue(tpos, pos, *rht_bucket(tbl, hash), \
470 * rht_for_each_entry_safe - safely iterate over hash chain of given type
471 * @tpos: the type * to use as a loop cursor.
472 * @pos: the &struct rhash_head to use as a loop cursor.
473 * @next: the &struct rhash_head to use as next in loop cursor.
474 * @tbl: the &struct bucket_table
475 * @hash: the hash value / bucket index
476 * @member: name of the &struct rhash_head within the hashable struct.
478 * This hash chain list-traversal primitive allows for the looped code to
479 * remove the loop cursor from the list.
481 #define rht_for_each_entry_safe(tpos, pos, next, tbl, hash, member) \
482 for (pos = rht_dereference_bucket(*rht_bucket(tbl, hash), tbl, hash), \
483 next = !rht_is_a_nulls(pos) ? \
484 rht_dereference_bucket(pos->next, tbl, hash) : NULL; \
485 (!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member); \
487 next = !rht_is_a_nulls(pos) ? \
488 rht_dereference_bucket(pos->next, tbl, hash) : NULL)
491 * rht_for_each_rcu_continue - continue iterating over rcu hash chain
492 * @pos: the &struct rhash_head to use as a loop cursor.
493 * @head: the previous &struct rhash_head to continue from
494 * @tbl: the &struct bucket_table
495 * @hash: the hash value / bucket index
497 * This hash chain list-traversal primitive may safely run concurrently with
498 * the _rcu mutation primitives such as rhashtable_insert() as long as the
499 * traversal is guarded by rcu_read_lock().
501 #define rht_for_each_rcu_continue(pos, head, tbl, hash) \
502 for (({barrier(); }), \
503 pos = rht_dereference_bucket_rcu(head, tbl, hash); \
504 !rht_is_a_nulls(pos); \
505 pos = rcu_dereference_raw(pos->next))
508 * rht_for_each_rcu - iterate over rcu hash chain
509 * @pos: the &struct rhash_head to use as a loop cursor.
510 * @tbl: the &struct bucket_table
511 * @hash: the hash value / bucket index
513 * This hash chain list-traversal primitive may safely run concurrently with
514 * the _rcu mutation primitives such as rhashtable_insert() as long as the
515 * traversal is guarded by rcu_read_lock().
517 #define rht_for_each_rcu(pos, tbl, hash) \
518 rht_for_each_rcu_continue(pos, *rht_bucket(tbl, hash), tbl, hash)
521 * rht_for_each_entry_rcu_continue - continue iterating over rcu hash chain
522 * @tpos: the type * to use as a loop cursor.
523 * @pos: the &struct rhash_head to use as a loop cursor.
524 * @head: the previous &struct rhash_head to continue from
525 * @tbl: the &struct bucket_table
526 * @hash: the hash value / bucket index
527 * @member: name of the &struct rhash_head within the hashable struct.
529 * This hash chain list-traversal primitive may safely run concurrently with
530 * the _rcu mutation primitives such as rhashtable_insert() as long as the
531 * traversal is guarded by rcu_read_lock().
533 #define rht_for_each_entry_rcu_continue(tpos, pos, head, tbl, hash, member) \
534 for (({barrier(); }), \
535 pos = rht_dereference_bucket_rcu(head, tbl, hash); \
536 (!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member); \
537 pos = rht_dereference_bucket_rcu(pos->next, tbl, hash))
540 * rht_for_each_entry_rcu - iterate over rcu hash chain of given type
541 * @tpos: the type * to use as a loop cursor.
542 * @pos: the &struct rhash_head to use as a loop cursor.
543 * @tbl: the &struct bucket_table
544 * @hash: the hash value / bucket index
545 * @member: name of the &struct rhash_head within the hashable struct.
547 * This hash chain list-traversal primitive may safely run concurrently with
548 * the _rcu mutation primitives such as rhashtable_insert() as long as the
549 * traversal is guarded by rcu_read_lock().
551 #define rht_for_each_entry_rcu(tpos, pos, tbl, hash, member) \
552 rht_for_each_entry_rcu_continue(tpos, pos, *rht_bucket(tbl, hash), \
556 * rhl_for_each_rcu - iterate over rcu hash table list
557 * @pos: the &struct rlist_head to use as a loop cursor.
558 * @list: the head of the list
560 * This hash chain list-traversal primitive should be used on the
561 * list returned by rhltable_lookup.
563 #define rhl_for_each_rcu(pos, list) \
564 for (pos = list; pos; pos = rcu_dereference_raw(pos->next))
567 * rhl_for_each_entry_rcu - iterate over rcu hash table list of given type
568 * @tpos: the type * to use as a loop cursor.
569 * @pos: the &struct rlist_head to use as a loop cursor.
570 * @list: the head of the list
571 * @member: name of the &struct rlist_head within the hashable struct.
573 * This hash chain list-traversal primitive should be used on the
574 * list returned by rhltable_lookup.
576 #define rhl_for_each_entry_rcu(tpos, pos, list, member) \
577 for (pos = list; pos && rht_entry(tpos, pos, member); \
578 pos = rcu_dereference_raw(pos->next))
580 static inline int rhashtable_compare(struct rhashtable_compare_arg
*arg
,
583 struct rhashtable
*ht
= arg
->ht
;
584 const char *ptr
= obj
;
586 return memcmp(ptr
+ ht
->p
.key_offset
, arg
->key
, ht
->p
.key_len
);
589 /* Internal function, do not use. */
590 static inline struct rhash_head
*__rhashtable_lookup(
591 struct rhashtable
*ht
, const void *key
,
592 const struct rhashtable_params params
)
594 struct rhashtable_compare_arg arg
= {
598 struct bucket_table
*tbl
;
599 struct rhash_head
*he
;
602 tbl
= rht_dereference_rcu(ht
->tbl
, ht
);
604 hash
= rht_key_hashfn(ht
, tbl
, key
, params
);
605 rht_for_each_rcu(he
, tbl
, hash
) {
606 if (params
.obj_cmpfn
?
607 params
.obj_cmpfn(&arg
, rht_obj(ht
, he
)) :
608 rhashtable_compare(&arg
, rht_obj(ht
, he
)))
613 /* Ensure we see any new tables. */
616 tbl
= rht_dereference_rcu(tbl
->future_tbl
, ht
);
624 * rhashtable_lookup - search hash table
626 * @key: the pointer to the key
627 * @params: hash table parameters
629 * Computes the hash value for the key and traverses the bucket chain looking
630 * for a entry with an identical key. The first matching entry is returned.
632 * This must only be called under the RCU read lock.
634 * Returns the first entry on which the compare function returned true.
636 static inline void *rhashtable_lookup(
637 struct rhashtable
*ht
, const void *key
,
638 const struct rhashtable_params params
)
640 struct rhash_head
*he
= __rhashtable_lookup(ht
, key
, params
);
642 return he
? rht_obj(ht
, he
) : NULL
;
646 * rhashtable_lookup_fast - search hash table, without RCU read lock
648 * @key: the pointer to the key
649 * @params: hash table parameters
651 * Computes the hash value for the key and traverses the bucket chain looking
652 * for a entry with an identical key. The first matching entry is returned.
654 * Only use this function when you have other mechanisms guaranteeing
655 * that the object won't go away after the RCU read lock is released.
657 * Returns the first entry on which the compare function returned true.
659 static inline void *rhashtable_lookup_fast(
660 struct rhashtable
*ht
, const void *key
,
661 const struct rhashtable_params params
)
666 obj
= rhashtable_lookup(ht
, key
, params
);
673 * rhltable_lookup - search hash list table
675 * @key: the pointer to the key
676 * @params: hash table parameters
678 * Computes the hash value for the key and traverses the bucket chain looking
679 * for a entry with an identical key. All matching entries are returned
682 * This must only be called under the RCU read lock.
684 * Returns the list of entries that match the given key.
686 static inline struct rhlist_head
*rhltable_lookup(
687 struct rhltable
*hlt
, const void *key
,
688 const struct rhashtable_params params
)
690 struct rhash_head
*he
= __rhashtable_lookup(&hlt
->ht
, key
, params
);
692 return he
? container_of(he
, struct rhlist_head
, rhead
) : NULL
;
695 /* Internal function, please use rhashtable_insert_fast() instead. This
696 * function returns the existing element already in hashes in there is a clash,
697 * otherwise it returns an error via ERR_PTR().
699 static inline void *__rhashtable_insert_fast(
700 struct rhashtable
*ht
, const void *key
, struct rhash_head
*obj
,
701 const struct rhashtable_params params
, bool rhlist
)
703 struct rhashtable_compare_arg arg
= {
707 struct rhash_head __rcu
**pprev
;
708 struct bucket_table
*tbl
;
709 struct rhash_head
*head
;
717 tbl
= rht_dereference_rcu(ht
->tbl
, ht
);
718 hash
= rht_head_hashfn(ht
, tbl
, obj
, params
);
719 lock
= rht_bucket_lock(tbl
, hash
);
722 if (unlikely(rht_dereference_bucket(tbl
->future_tbl
, tbl
, hash
))) {
724 spin_unlock_bh(lock
);
726 return rhashtable_insert_slow(ht
, key
, obj
);
729 elasticity
= ht
->elasticity
;
730 pprev
= rht_bucket_insert(ht
, tbl
, hash
);
731 data
= ERR_PTR(-ENOMEM
);
735 rht_for_each_continue(head
, *pprev
, tbl
, hash
) {
736 struct rhlist_head
*plist
;
737 struct rhlist_head
*list
;
742 params
.obj_cmpfn(&arg
, rht_obj(ht
, head
)) :
743 rhashtable_compare(&arg
, rht_obj(ht
, head
))))
746 data
= rht_obj(ht
, head
);
752 list
= container_of(obj
, struct rhlist_head
, rhead
);
753 plist
= container_of(head
, struct rhlist_head
, rhead
);
755 RCU_INIT_POINTER(list
->next
, plist
);
756 head
= rht_dereference_bucket(head
->next
, tbl
, hash
);
757 RCU_INIT_POINTER(list
->rhead
.next
, head
);
758 rcu_assign_pointer(*pprev
, obj
);
766 data
= ERR_PTR(-E2BIG
);
767 if (unlikely(rht_grow_above_max(ht
, tbl
)))
770 if (unlikely(rht_grow_above_100(ht
, tbl
)))
773 head
= rht_dereference_bucket(*pprev
, tbl
, hash
);
775 RCU_INIT_POINTER(obj
->next
, head
);
777 struct rhlist_head
*list
;
779 list
= container_of(obj
, struct rhlist_head
, rhead
);
780 RCU_INIT_POINTER(list
->next
, NULL
);
783 rcu_assign_pointer(*pprev
, obj
);
785 atomic_inc(&ht
->nelems
);
786 if (rht_grow_above_75(ht
, tbl
))
787 schedule_work(&ht
->run_work
);
793 spin_unlock_bh(lock
);
800 * rhashtable_insert_fast - insert object into hash table
802 * @obj: pointer to hash head inside object
803 * @params: hash table parameters
805 * Will take a per bucket spinlock to protect against mutual mutations
806 * on the same bucket. Multiple insertions may occur in parallel unless
807 * they map to the same bucket lock.
809 * It is safe to call this function from atomic context.
811 * Will trigger an automatic deferred table resizing if the size grows
812 * beyond the watermark indicated by grow_decision() which can be passed
813 * to rhashtable_init().
815 static inline int rhashtable_insert_fast(
816 struct rhashtable
*ht
, struct rhash_head
*obj
,
817 const struct rhashtable_params params
)
821 ret
= __rhashtable_insert_fast(ht
, NULL
, obj
, params
, false);
825 return ret
== NULL
? 0 : -EEXIST
;
829 * rhltable_insert_key - insert object into hash list table
830 * @hlt: hash list table
831 * @key: the pointer to the key
832 * @list: pointer to hash list head inside object
833 * @params: hash table parameters
835 * Will take a per bucket spinlock to protect against mutual mutations
836 * on the same bucket. Multiple insertions may occur in parallel unless
837 * they map to the same bucket lock.
839 * It is safe to call this function from atomic context.
841 * Will trigger an automatic deferred table resizing if the size grows
842 * beyond the watermark indicated by grow_decision() which can be passed
843 * to rhashtable_init().
845 static inline int rhltable_insert_key(
846 struct rhltable
*hlt
, const void *key
, struct rhlist_head
*list
,
847 const struct rhashtable_params params
)
849 return PTR_ERR(__rhashtable_insert_fast(&hlt
->ht
, key
, &list
->rhead
,
854 * rhltable_insert - insert object into hash list table
855 * @hlt: hash list table
856 * @list: pointer to hash list head inside object
857 * @params: hash table parameters
859 * Will take a per bucket spinlock to protect against mutual mutations
860 * on the same bucket. Multiple insertions may occur in parallel unless
861 * they map to the same bucket lock.
863 * It is safe to call this function from atomic context.
865 * Will trigger an automatic deferred table resizing if the size grows
866 * beyond the watermark indicated by grow_decision() which can be passed
867 * to rhashtable_init().
869 static inline int rhltable_insert(
870 struct rhltable
*hlt
, struct rhlist_head
*list
,
871 const struct rhashtable_params params
)
873 const char *key
= rht_obj(&hlt
->ht
, &list
->rhead
);
875 key
+= params
.key_offset
;
877 return rhltable_insert_key(hlt
, key
, list
, params
);
881 * rhashtable_lookup_insert_fast - lookup and insert object into hash table
883 * @obj: pointer to hash head inside object
884 * @params: hash table parameters
886 * Locks down the bucket chain in both the old and new table if a resize
887 * is in progress to ensure that writers can't remove from the old table
888 * and can't insert to the new table during the atomic operation of search
889 * and insertion. Searches for duplicates in both the old and new table if
890 * a resize is in progress.
892 * This lookup function may only be used for fixed key hash table (key_len
893 * parameter set). It will BUG() if used inappropriately.
895 * It is safe to call this function from atomic context.
897 * Will trigger an automatic deferred table resizing if the size grows
898 * beyond the watermark indicated by grow_decision() which can be passed
899 * to rhashtable_init().
901 static inline int rhashtable_lookup_insert_fast(
902 struct rhashtable
*ht
, struct rhash_head
*obj
,
903 const struct rhashtable_params params
)
905 const char *key
= rht_obj(ht
, obj
);
908 BUG_ON(ht
->p
.obj_hashfn
);
910 ret
= __rhashtable_insert_fast(ht
, key
+ ht
->p
.key_offset
, obj
, params
,
915 return ret
== NULL
? 0 : -EEXIST
;
919 * rhashtable_lookup_insert_key - search and insert object to hash table
923 * @obj: pointer to hash head inside object
924 * @params: hash table parameters
926 * Locks down the bucket chain in both the old and new table if a resize
927 * is in progress to ensure that writers can't remove from the old table
928 * and can't insert to the new table during the atomic operation of search
929 * and insertion. Searches for duplicates in both the old and new table if
930 * a resize is in progress.
932 * Lookups may occur in parallel with hashtable mutations and resizing.
934 * Will trigger an automatic deferred table resizing if the size grows
935 * beyond the watermark indicated by grow_decision() which can be passed
936 * to rhashtable_init().
938 * Returns zero on success.
940 static inline int rhashtable_lookup_insert_key(
941 struct rhashtable
*ht
, const void *key
, struct rhash_head
*obj
,
942 const struct rhashtable_params params
)
946 BUG_ON(!ht
->p
.obj_hashfn
|| !key
);
948 ret
= __rhashtable_insert_fast(ht
, key
, obj
, params
, false);
952 return ret
== NULL
? 0 : -EEXIST
;
956 * rhashtable_lookup_get_insert_key - lookup and insert object into hash table
958 * @obj: pointer to hash head inside object
959 * @params: hash table parameters
960 * @data: pointer to element data already in hashes
962 * Just like rhashtable_lookup_insert_key(), but this function returns the
963 * object if it exists, NULL if it does not and the insertion was successful,
964 * and an ERR_PTR otherwise.
966 static inline void *rhashtable_lookup_get_insert_key(
967 struct rhashtable
*ht
, const void *key
, struct rhash_head
*obj
,
968 const struct rhashtable_params params
)
970 BUG_ON(!ht
->p
.obj_hashfn
|| !key
);
972 return __rhashtable_insert_fast(ht
, key
, obj
, params
, false);
975 /* Internal function, please use rhashtable_remove_fast() instead */
976 static inline int __rhashtable_remove_fast_one(
977 struct rhashtable
*ht
, struct bucket_table
*tbl
,
978 struct rhash_head
*obj
, const struct rhashtable_params params
,
981 struct rhash_head __rcu
**pprev
;
982 struct rhash_head
*he
;
987 hash
= rht_head_hashfn(ht
, tbl
, obj
, params
);
988 lock
= rht_bucket_lock(tbl
, hash
);
992 pprev
= rht_bucket_var(tbl
, hash
);
993 rht_for_each_continue(he
, *pprev
, tbl
, hash
) {
994 struct rhlist_head
*list
;
996 list
= container_of(he
, struct rhlist_head
, rhead
);
999 struct rhlist_head __rcu
**lpprev
;
1007 lpprev
= &list
->next
;
1008 list
= rht_dereference_bucket(list
->next
,
1010 } while (list
&& obj
!= &list
->rhead
);
1015 list
= rht_dereference_bucket(list
->next
, tbl
, hash
);
1016 RCU_INIT_POINTER(*lpprev
, list
);
1021 obj
= rht_dereference_bucket(obj
->next
, tbl
, hash
);
1025 list
= rht_dereference_bucket(list
->next
, tbl
, hash
);
1027 RCU_INIT_POINTER(list
->rhead
.next
, obj
);
1033 rcu_assign_pointer(*pprev
, obj
);
1037 spin_unlock_bh(lock
);
1040 atomic_dec(&ht
->nelems
);
1041 if (unlikely(ht
->p
.automatic_shrinking
&&
1042 rht_shrink_below_30(ht
, tbl
)))
1043 schedule_work(&ht
->run_work
);
1050 /* Internal function, please use rhashtable_remove_fast() instead */
1051 static inline int __rhashtable_remove_fast(
1052 struct rhashtable
*ht
, struct rhash_head
*obj
,
1053 const struct rhashtable_params params
, bool rhlist
)
1055 struct bucket_table
*tbl
;
1060 tbl
= rht_dereference_rcu(ht
->tbl
, ht
);
1062 /* Because we have already taken (and released) the bucket
1063 * lock in old_tbl, if we find that future_tbl is not yet
1064 * visible then that guarantees the entry to still be in
1065 * the old tbl if it exists.
1067 while ((err
= __rhashtable_remove_fast_one(ht
, tbl
, obj
, params
,
1069 (tbl
= rht_dereference_rcu(tbl
->future_tbl
, ht
)))
1078 * rhashtable_remove_fast - remove object from hash table
1080 * @obj: pointer to hash head inside object
1081 * @params: hash table parameters
1083 * Since the hash chain is single linked, the removal operation needs to
1084 * walk the bucket chain upon removal. The removal operation is thus
1085 * considerable slow if the hash table is not correctly sized.
1087 * Will automatically shrink the table via rhashtable_expand() if the
1088 * shrink_decision function specified at rhashtable_init() returns true.
1090 * Returns zero on success, -ENOENT if the entry could not be found.
1092 static inline int rhashtable_remove_fast(
1093 struct rhashtable
*ht
, struct rhash_head
*obj
,
1094 const struct rhashtable_params params
)
1096 return __rhashtable_remove_fast(ht
, obj
, params
, false);
1100 * rhltable_remove - remove object from hash list table
1101 * @hlt: hash list table
1102 * @list: pointer to hash list head inside object
1103 * @params: hash table parameters
1105 * Since the hash chain is single linked, the removal operation needs to
1106 * walk the bucket chain upon removal. The removal operation is thus
1107 * considerable slow if the hash table is not correctly sized.
1109 * Will automatically shrink the table via rhashtable_expand() if the
1110 * shrink_decision function specified at rhashtable_init() returns true.
1112 * Returns zero on success, -ENOENT if the entry could not be found.
1114 static inline int rhltable_remove(
1115 struct rhltable
*hlt
, struct rhlist_head
*list
,
1116 const struct rhashtable_params params
)
1118 return __rhashtable_remove_fast(&hlt
->ht
, &list
->rhead
, params
, true);
1121 /* Internal function, please use rhashtable_replace_fast() instead */
1122 static inline int __rhashtable_replace_fast(
1123 struct rhashtable
*ht
, struct bucket_table
*tbl
,
1124 struct rhash_head
*obj_old
, struct rhash_head
*obj_new
,
1125 const struct rhashtable_params params
)
1127 struct rhash_head __rcu
**pprev
;
1128 struct rhash_head
*he
;
1133 /* Minimally, the old and new objects must have same hash
1134 * (which should mean identifiers are the same).
1136 hash
= rht_head_hashfn(ht
, tbl
, obj_old
, params
);
1137 if (hash
!= rht_head_hashfn(ht
, tbl
, obj_new
, params
))
1140 lock
= rht_bucket_lock(tbl
, hash
);
1144 pprev
= rht_bucket_var(tbl
, hash
);
1145 rht_for_each_continue(he
, *pprev
, tbl
, hash
) {
1146 if (he
!= obj_old
) {
1151 rcu_assign_pointer(obj_new
->next
, obj_old
->next
);
1152 rcu_assign_pointer(*pprev
, obj_new
);
1157 spin_unlock_bh(lock
);
1163 * rhashtable_replace_fast - replace an object in hash table
1165 * @obj_old: pointer to hash head inside object being replaced
1166 * @obj_new: pointer to hash head inside object which is new
1167 * @params: hash table parameters
1169 * Replacing an object doesn't affect the number of elements in the hash table
1170 * or bucket, so we don't need to worry about shrinking or expanding the
1173 * Returns zero on success, -ENOENT if the entry could not be found,
1174 * -EINVAL if hash is not the same for the old and new objects.
1176 static inline int rhashtable_replace_fast(
1177 struct rhashtable
*ht
, struct rhash_head
*obj_old
,
1178 struct rhash_head
*obj_new
,
1179 const struct rhashtable_params params
)
1181 struct bucket_table
*tbl
;
1186 tbl
= rht_dereference_rcu(ht
->tbl
, ht
);
1188 /* Because we have already taken (and released) the bucket
1189 * lock in old_tbl, if we find that future_tbl is not yet
1190 * visible then that guarantees the entry to still be in
1191 * the old tbl if it exists.
1193 while ((err
= __rhashtable_replace_fast(ht
, tbl
, obj_old
,
1194 obj_new
, params
)) &&
1195 (tbl
= rht_dereference_rcu(tbl
->future_tbl
, ht
)))
1203 /* Obsolete function, do not use in new code. */
1204 static inline int rhashtable_walk_init(struct rhashtable
*ht
,
1205 struct rhashtable_iter
*iter
, gfp_t gfp
)
1207 rhashtable_walk_enter(ht
, iter
);
1212 * rhltable_walk_enter - Initialise an iterator
1213 * @hlt: Table to walk over
1214 * @iter: Hash table Iterator
1216 * This function prepares a hash table walk.
1218 * Note that if you restart a walk after rhashtable_walk_stop you
1219 * may see the same object twice. Also, you may miss objects if
1220 * there are removals in between rhashtable_walk_stop and the next
1221 * call to rhashtable_walk_start.
1223 * For a completely stable walk you should construct your own data
1224 * structure outside the hash table.
1226 * This function may sleep so you must not call it from interrupt
1227 * context or with spin locks held.
1229 * You must call rhashtable_walk_exit after this function returns.
1231 static inline void rhltable_walk_enter(struct rhltable
*hlt
,
1232 struct rhashtable_iter
*iter
)
1234 return rhashtable_walk_enter(&hlt
->ht
, iter
);
1238 * rhltable_free_and_destroy - free elements and destroy hash list table
1239 * @hlt: the hash list table to destroy
1240 * @free_fn: callback to release resources of element
1241 * @arg: pointer passed to free_fn
1243 * See documentation for rhashtable_free_and_destroy.
1245 static inline void rhltable_free_and_destroy(struct rhltable
*hlt
,
1246 void (*free_fn
)(void *ptr
,
1250 return rhashtable_free_and_destroy(&hlt
->ht
, free_fn
, arg
);
1253 static inline void rhltable_destroy(struct rhltable
*hlt
)
1255 return rhltable_free_and_destroy(hlt
, NULL
, NULL
);
1258 #endif /* _LINUX_RHASHTABLE_H */