]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - include/linux/rhashtable.h
Merge tag 'pci-v4.1-fixes-2' of git://git.kernel.org/pub/scm/linux/kernel/git/helgaas/pci
[mirror_ubuntu-artful-kernel.git] / include / linux / rhashtable.h
CommitLineData
7e1e7763
TG
1/*
2 * Resizable, Scalable, Concurrent Hash Table
3 *
dc0ee268 4 * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au>
b5e2c150 5 * Copyright (c) 2014-2015 Thomas Graf <tgraf@suug.ch>
7e1e7763
TG
6 * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net>
7 *
7e1e7763 8 * Code partially derived from nft_hash
dc0ee268
HX
9 * Rewritten with rehash code from br_multicast plus single list
10 * pointer as suggested by Josh Triplett
7e1e7763
TG
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License version 2 as
14 * published by the Free Software Foundation.
15 */
16
17#ifndef _LINUX_RHASHTABLE_H
18#define _LINUX_RHASHTABLE_H
19
07ee0722 20#include <linux/atomic.h>
f2dba9c6 21#include <linux/compiler.h>
6626af69 22#include <linux/errno.h>
31ccde2d 23#include <linux/jhash.h>
f89bd6f8 24#include <linux/list_nulls.h>
97defe1e 25#include <linux/workqueue.h>
86b35b64 26#include <linux/mutex.h>
02fd97c3 27#include <linux/rcupdate.h>
7e1e7763 28
f89bd6f8
TG
29/*
30 * The end of the chain is marked with a special nulls marks which has
31 * the following format:
32 *
33 * +-------+-----------------------------------------------------+-+
34 * | Base | Hash |1|
35 * +-------+-----------------------------------------------------+-+
36 *
37 * Base (4 bits) : Reserved to distinguish between multiple tables.
38 * Specified via &struct rhashtable_params.nulls_base.
39 * Hash (27 bits): Full hash (unmasked) of first element added to bucket
40 * 1 (1 bit) : Nulls marker (always set)
41 *
42 * The remaining bits of the next pointer remain unused for now.
43 */
44#define RHT_BASE_BITS 4
45#define RHT_HASH_BITS 27
46#define RHT_BASE_SHIFT RHT_HASH_BITS
47
02fd97c3
HX
48/* Base bits plus 1 bit for nulls marker */
49#define RHT_HASH_RESERVED_SPACE (RHT_BASE_BITS + 1)
50
7e1e7763 51struct rhash_head {
5300fdcb 52 struct rhash_head __rcu *next;
7e1e7763
TG
53};
54
97defe1e
TG
55/**
56 * struct bucket_table - Table of hash buckets
57 * @size: Number of hash buckets
63d512d0 58 * @rehash: Current bucket being rehashed
988dfbd7 59 * @hash_rnd: Random seed to fold into hash
97defe1e
TG
60 * @locks_mask: Mask to apply before accessing locks[]
61 * @locks: Array of spinlocks protecting individual buckets
eddee5ba 62 * @walkers: List of active walkers
9d901bc0 63 * @rcu: RCU structure for freeing the table
c4db8848 64 * @future_tbl: Table under construction during rehashing
97defe1e
TG
65 * @buckets: size * hash buckets
66 */
7e1e7763 67struct bucket_table {
63d512d0
HX
68 unsigned int size;
69 unsigned int rehash;
988dfbd7 70 u32 hash_rnd;
b9ebafbe
ED
71 unsigned int locks_mask;
72 spinlock_t *locks;
eddee5ba 73 struct list_head walkers;
9d901bc0 74 struct rcu_head rcu;
b9ebafbe 75
c4db8848
HX
76 struct bucket_table __rcu *future_tbl;
77
b9ebafbe 78 struct rhash_head __rcu *buckets[] ____cacheline_aligned_in_smp;
7e1e7763
TG
79};
80
02fd97c3
HX
81/**
82 * struct rhashtable_compare_arg - Key for the function rhashtable_compare
83 * @ht: Hash table
84 * @key: Key to compare against
85 */
86struct rhashtable_compare_arg {
87 struct rhashtable *ht;
88 const void *key;
89};
90
7e1e7763 91typedef u32 (*rht_hashfn_t)(const void *data, u32 len, u32 seed);
49f7b33e 92typedef u32 (*rht_obj_hashfn_t)(const void *data, u32 len, u32 seed);
02fd97c3
HX
93typedef int (*rht_obj_cmpfn_t)(struct rhashtable_compare_arg *arg,
94 const void *obj);
7e1e7763
TG
95
96struct rhashtable;
97
98/**
99 * struct rhashtable_params - Hash table construction parameters
100 * @nelem_hint: Hint on number of elements, should be 75% of desired size
101 * @key_len: Length of key
102 * @key_offset: Offset of key in struct to be hashed
103 * @head_offset: Offset of rhash_head in struct to be hashed
07ee0722 104 * @insecure_max_entries: Maximum number of entries (may be exceeded)
c2e213cf
HX
105 * @max_size: Maximum size while expanding
106 * @min_size: Minimum size while shrinking
f89bd6f8 107 * @nulls_base: Base value to generate nulls marker
ccd57b1b 108 * @insecure_elasticity: Set to true to disable chain length checks
b5e2c150 109 * @automatic_shrinking: Enable automatic shrinking of tables
97defe1e 110 * @locks_mul: Number of bucket locks to allocate per cpu (default: 128)
31ccde2d 111 * @hashfn: Hash function (default: jhash2 if !(key_len % 4), or jhash)
7e1e7763 112 * @obj_hashfn: Function to hash object
02fd97c3 113 * @obj_cmpfn: Function to compare key with object
7e1e7763
TG
114 */
115struct rhashtable_params {
116 size_t nelem_hint;
117 size_t key_len;
118 size_t key_offset;
119 size_t head_offset;
07ee0722 120 unsigned int insecure_max_entries;
c2e213cf
HX
121 unsigned int max_size;
122 unsigned int min_size;
f89bd6f8 123 u32 nulls_base;
ccd57b1b 124 bool insecure_elasticity;
b5e2c150 125 bool automatic_shrinking;
97defe1e 126 size_t locks_mul;
7e1e7763
TG
127 rht_hashfn_t hashfn;
128 rht_obj_hashfn_t obj_hashfn;
02fd97c3 129 rht_obj_cmpfn_t obj_cmpfn;
7e1e7763
TG
130};
131
132/**
133 * struct rhashtable - Hash table handle
134 * @tbl: Bucket table
135 * @nelems: Number of elements in table
31ccde2d 136 * @key_len: Key length for hashfn
ccd57b1b 137 * @elasticity: Maximum chain length before rehash
7e1e7763 138 * @p: Configuration parameters
97defe1e
TG
139 * @run_work: Deferred worker to expand/shrink asynchronously
140 * @mutex: Mutex to protect current/future table swapping
ba7c95ea 141 * @lock: Spin lock to protect walker list
7e1e7763
TG
142 */
143struct rhashtable {
144 struct bucket_table __rcu *tbl;
97defe1e 145 atomic_t nelems;
31ccde2d 146 unsigned int key_len;
ccd57b1b 147 unsigned int elasticity;
7e1e7763 148 struct rhashtable_params p;
57699a40 149 struct work_struct run_work;
97defe1e 150 struct mutex mutex;
ba7c95ea 151 spinlock_t lock;
7e1e7763
TG
152};
153
f2dba9c6
HX
154/**
155 * struct rhashtable_walker - Hash table walker
156 * @list: List entry on list of walkers
eddee5ba 157 * @tbl: The table that we were walking over
f2dba9c6
HX
158 */
159struct rhashtable_walker {
160 struct list_head list;
eddee5ba 161 struct bucket_table *tbl;
f2dba9c6
HX
162};
163
164/**
165 * struct rhashtable_iter - Hash table iterator, fits into netlink cb
166 * @ht: Table to iterate through
167 * @p: Current pointer
168 * @walker: Associated rhashtable walker
169 * @slot: Current slot
170 * @skip: Number of entries to skip in slot
171 */
172struct rhashtable_iter {
173 struct rhashtable *ht;
174 struct rhash_head *p;
175 struct rhashtable_walker *walker;
176 unsigned int slot;
177 unsigned int skip;
178};
179
f89bd6f8
TG
180static inline unsigned long rht_marker(const struct rhashtable *ht, u32 hash)
181{
182 return NULLS_MARKER(ht->p.nulls_base + hash);
183}
184
185#define INIT_RHT_NULLS_HEAD(ptr, ht, hash) \
186 ((ptr) = (typeof(ptr)) rht_marker(ht, hash))
187
188static inline bool rht_is_a_nulls(const struct rhash_head *ptr)
189{
190 return ((unsigned long) ptr & 1);
191}
192
193static inline unsigned long rht_get_nulls_value(const struct rhash_head *ptr)
194{
195 return ((unsigned long) ptr) >> 1;
196}
197
02fd97c3
HX
198static inline void *rht_obj(const struct rhashtable *ht,
199 const struct rhash_head *he)
200{
201 return (char *)he - ht->p.head_offset;
202}
203
204static inline unsigned int rht_bucket_index(const struct bucket_table *tbl,
205 unsigned int hash)
206{
207 return (hash >> RHT_HASH_RESERVED_SPACE) & (tbl->size - 1);
208}
209
210static inline unsigned int rht_key_hashfn(
211 struct rhashtable *ht, const struct bucket_table *tbl,
212 const void *key, const struct rhashtable_params params)
213{
299e5c32 214 unsigned int hash;
de91b25c 215
31ccde2d
HX
216 /* params must be equal to ht->p if it isn't constant. */
217 if (!__builtin_constant_p(params.key_len))
218 hash = ht->p.hashfn(key, ht->key_len, tbl->hash_rnd);
219 else if (params.key_len) {
299e5c32 220 unsigned int key_len = params.key_len;
31ccde2d
HX
221
222 if (params.hashfn)
223 hash = params.hashfn(key, key_len, tbl->hash_rnd);
224 else if (key_len & (sizeof(u32) - 1))
225 hash = jhash(key, key_len, tbl->hash_rnd);
226 else
227 hash = jhash2(key, key_len / sizeof(u32),
228 tbl->hash_rnd);
229 } else {
299e5c32 230 unsigned int key_len = ht->p.key_len;
31ccde2d
HX
231
232 if (params.hashfn)
233 hash = params.hashfn(key, key_len, tbl->hash_rnd);
234 else
235 hash = jhash(key, key_len, tbl->hash_rnd);
236 }
237
238 return rht_bucket_index(tbl, hash);
02fd97c3
HX
239}
240
241static inline unsigned int rht_head_hashfn(
242 struct rhashtable *ht, const struct bucket_table *tbl,
243 const struct rhash_head *he, const struct rhashtable_params params)
244{
245 const char *ptr = rht_obj(ht, he);
246
247 return likely(params.obj_hashfn) ?
49f7b33e
PM
248 rht_bucket_index(tbl, params.obj_hashfn(ptr, params.key_len ?:
249 ht->p.key_len,
250 tbl->hash_rnd)) :
02fd97c3
HX
251 rht_key_hashfn(ht, tbl, ptr + params.key_offset, params);
252}
253
254/**
255 * rht_grow_above_75 - returns true if nelems > 0.75 * table-size
256 * @ht: hash table
257 * @tbl: current table
258 */
259static inline bool rht_grow_above_75(const struct rhashtable *ht,
260 const struct bucket_table *tbl)
261{
262 /* Expand table when exceeding 75% load */
263 return atomic_read(&ht->nelems) > (tbl->size / 4 * 3) &&
264 (!ht->p.max_size || tbl->size < ht->p.max_size);
265}
266
267/**
268 * rht_shrink_below_30 - returns true if nelems < 0.3 * table-size
269 * @ht: hash table
270 * @tbl: current table
271 */
272static inline bool rht_shrink_below_30(const struct rhashtable *ht,
273 const struct bucket_table *tbl)
274{
275 /* Shrink table beneath 30% load */
276 return atomic_read(&ht->nelems) < (tbl->size * 3 / 10) &&
277 tbl->size > ht->p.min_size;
278}
279
ccd57b1b
HX
280/**
281 * rht_grow_above_100 - returns true if nelems > table-size
282 * @ht: hash table
283 * @tbl: current table
284 */
285static inline bool rht_grow_above_100(const struct rhashtable *ht,
286 const struct bucket_table *tbl)
287{
1d8dc3d3
JB
288 return atomic_read(&ht->nelems) > tbl->size &&
289 (!ht->p.max_size || tbl->size < ht->p.max_size);
ccd57b1b
HX
290}
291
07ee0722
HX
292/**
293 * rht_grow_above_max - returns true if table is above maximum
294 * @ht: hash table
295 * @tbl: current table
296 */
297static inline bool rht_grow_above_max(const struct rhashtable *ht,
298 const struct bucket_table *tbl)
299{
300 return ht->p.insecure_max_entries &&
301 atomic_read(&ht->nelems) >= ht->p.insecure_max_entries;
302}
303
02fd97c3
HX
304/* The bucket lock is selected based on the hash and protects mutations
305 * on a group of hash buckets.
306 *
307 * A maximum of tbl->size/2 bucket locks is allocated. This ensures that
308 * a single lock always covers both buckets which may both contains
309 * entries which link to the same bucket of the old table during resizing.
310 * This allows to simplify the locking as locking the bucket in both
311 * tables during resize always guarantee protection.
312 *
313 * IMPORTANT: When holding the bucket lock of both the old and new table
314 * during expansions and shrinking, the old bucket lock must always be
315 * acquired first.
316 */
317static inline spinlock_t *rht_bucket_lock(const struct bucket_table *tbl,
318 unsigned int hash)
319{
320 return &tbl->locks[hash & tbl->locks_mask];
321}
322
7e1e7763 323#ifdef CONFIG_PROVE_LOCKING
97defe1e 324int lockdep_rht_mutex_is_held(struct rhashtable *ht);
88d6ed15 325int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash);
7e1e7763 326#else
97defe1e 327static inline int lockdep_rht_mutex_is_held(struct rhashtable *ht)
7e1e7763
TG
328{
329 return 1;
330}
88d6ed15
TG
331
332static inline int lockdep_rht_bucket_is_held(const struct bucket_table *tbl,
333 u32 hash)
334{
335 return 1;
336}
7e1e7763
TG
337#endif /* CONFIG_PROVE_LOCKING */
338
488fb86e
HX
339int rhashtable_init(struct rhashtable *ht,
340 const struct rhashtable_params *params);
7e1e7763 341
02fd97c3
HX
342int rhashtable_insert_slow(struct rhashtable *ht, const void *key,
343 struct rhash_head *obj,
344 struct bucket_table *old_tbl);
ccd57b1b 345int rhashtable_insert_rehash(struct rhashtable *ht);
7e1e7763 346
f2dba9c6
HX
347int rhashtable_walk_init(struct rhashtable *ht, struct rhashtable_iter *iter);
348void rhashtable_walk_exit(struct rhashtable_iter *iter);
349int rhashtable_walk_start(struct rhashtable_iter *iter) __acquires(RCU);
350void *rhashtable_walk_next(struct rhashtable_iter *iter);
351void rhashtable_walk_stop(struct rhashtable_iter *iter) __releases(RCU);
352
6b6f302c
TG
353void rhashtable_free_and_destroy(struct rhashtable *ht,
354 void (*free_fn)(void *ptr, void *arg),
355 void *arg);
97defe1e 356void rhashtable_destroy(struct rhashtable *ht);
7e1e7763
TG
357
358#define rht_dereference(p, ht) \
359 rcu_dereference_protected(p, lockdep_rht_mutex_is_held(ht))
360
361#define rht_dereference_rcu(p, ht) \
362 rcu_dereference_check(p, lockdep_rht_mutex_is_held(ht))
363
88d6ed15
TG
364#define rht_dereference_bucket(p, tbl, hash) \
365 rcu_dereference_protected(p, lockdep_rht_bucket_is_held(tbl, hash))
7e1e7763 366
88d6ed15
TG
367#define rht_dereference_bucket_rcu(p, tbl, hash) \
368 rcu_dereference_check(p, lockdep_rht_bucket_is_held(tbl, hash))
369
370#define rht_entry(tpos, pos, member) \
371 ({ tpos = container_of(pos, typeof(*tpos), member); 1; })
7e1e7763
TG
372
373/**
88d6ed15
TG
374 * rht_for_each_continue - continue iterating over hash chain
375 * @pos: the &struct rhash_head to use as a loop cursor.
376 * @head: the previous &struct rhash_head to continue from
377 * @tbl: the &struct bucket_table
378 * @hash: the hash value / bucket index
7e1e7763 379 */
88d6ed15
TG
380#define rht_for_each_continue(pos, head, tbl, hash) \
381 for (pos = rht_dereference_bucket(head, tbl, hash); \
f89bd6f8 382 !rht_is_a_nulls(pos); \
88d6ed15
TG
383 pos = rht_dereference_bucket((pos)->next, tbl, hash))
384
385/**
386 * rht_for_each - iterate over hash chain
387 * @pos: the &struct rhash_head to use as a loop cursor.
388 * @tbl: the &struct bucket_table
389 * @hash: the hash value / bucket index
390 */
391#define rht_for_each(pos, tbl, hash) \
392 rht_for_each_continue(pos, (tbl)->buckets[hash], tbl, hash)
393
394/**
395 * rht_for_each_entry_continue - continue iterating over hash chain
396 * @tpos: the type * to use as a loop cursor.
397 * @pos: the &struct rhash_head to use as a loop cursor.
398 * @head: the previous &struct rhash_head to continue from
399 * @tbl: the &struct bucket_table
400 * @hash: the hash value / bucket index
401 * @member: name of the &struct rhash_head within the hashable struct.
402 */
403#define rht_for_each_entry_continue(tpos, pos, head, tbl, hash, member) \
404 for (pos = rht_dereference_bucket(head, tbl, hash); \
f89bd6f8 405 (!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member); \
88d6ed15 406 pos = rht_dereference_bucket((pos)->next, tbl, hash))
7e1e7763
TG
407
408/**
409 * rht_for_each_entry - iterate over hash chain of given type
88d6ed15
TG
410 * @tpos: the type * to use as a loop cursor.
411 * @pos: the &struct rhash_head to use as a loop cursor.
412 * @tbl: the &struct bucket_table
413 * @hash: the hash value / bucket index
414 * @member: name of the &struct rhash_head within the hashable struct.
7e1e7763 415 */
88d6ed15
TG
416#define rht_for_each_entry(tpos, pos, tbl, hash, member) \
417 rht_for_each_entry_continue(tpos, pos, (tbl)->buckets[hash], \
418 tbl, hash, member)
7e1e7763
TG
419
420/**
421 * rht_for_each_entry_safe - safely iterate over hash chain of given type
88d6ed15
TG
422 * @tpos: the type * to use as a loop cursor.
423 * @pos: the &struct rhash_head to use as a loop cursor.
424 * @next: the &struct rhash_head to use as next in loop cursor.
425 * @tbl: the &struct bucket_table
426 * @hash: the hash value / bucket index
427 * @member: name of the &struct rhash_head within the hashable struct.
7e1e7763
TG
428 *
429 * This hash chain list-traversal primitive allows for the looped code to
430 * remove the loop cursor from the list.
431 */
88d6ed15
TG
432#define rht_for_each_entry_safe(tpos, pos, next, tbl, hash, member) \
433 for (pos = rht_dereference_bucket((tbl)->buckets[hash], tbl, hash), \
f89bd6f8
TG
434 next = !rht_is_a_nulls(pos) ? \
435 rht_dereference_bucket(pos->next, tbl, hash) : NULL; \
436 (!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member); \
607954b0
PM
437 pos = next, \
438 next = !rht_is_a_nulls(pos) ? \
439 rht_dereference_bucket(pos->next, tbl, hash) : NULL)
88d6ed15
TG
440
441/**
442 * rht_for_each_rcu_continue - continue iterating over rcu hash chain
443 * @pos: the &struct rhash_head to use as a loop cursor.
444 * @head: the previous &struct rhash_head to continue from
445 * @tbl: the &struct bucket_table
446 * @hash: the hash value / bucket index
447 *
448 * This hash chain list-traversal primitive may safely run concurrently with
449 * the _rcu mutation primitives such as rhashtable_insert() as long as the
450 * traversal is guarded by rcu_read_lock().
451 */
452#define rht_for_each_rcu_continue(pos, head, tbl, hash) \
453 for (({barrier(); }), \
454 pos = rht_dereference_bucket_rcu(head, tbl, hash); \
f89bd6f8 455 !rht_is_a_nulls(pos); \
88d6ed15 456 pos = rcu_dereference_raw(pos->next))
7e1e7763
TG
457
458/**
459 * rht_for_each_rcu - iterate over rcu hash chain
88d6ed15
TG
460 * @pos: the &struct rhash_head to use as a loop cursor.
461 * @tbl: the &struct bucket_table
462 * @hash: the hash value / bucket index
7e1e7763
TG
463 *
464 * This hash chain list-traversal primitive may safely run concurrently with
88d6ed15 465 * the _rcu mutation primitives such as rhashtable_insert() as long as the
7e1e7763
TG
466 * traversal is guarded by rcu_read_lock().
467 */
88d6ed15
TG
468#define rht_for_each_rcu(pos, tbl, hash) \
469 rht_for_each_rcu_continue(pos, (tbl)->buckets[hash], tbl, hash)
470
471/**
472 * rht_for_each_entry_rcu_continue - continue iterating over rcu hash chain
473 * @tpos: the type * to use as a loop cursor.
474 * @pos: the &struct rhash_head to use as a loop cursor.
475 * @head: the previous &struct rhash_head to continue from
476 * @tbl: the &struct bucket_table
477 * @hash: the hash value / bucket index
478 * @member: name of the &struct rhash_head within the hashable struct.
479 *
480 * This hash chain list-traversal primitive may safely run concurrently with
481 * the _rcu mutation primitives such as rhashtable_insert() as long as the
482 * traversal is guarded by rcu_read_lock().
483 */
484#define rht_for_each_entry_rcu_continue(tpos, pos, head, tbl, hash, member) \
485 for (({barrier(); }), \
486 pos = rht_dereference_bucket_rcu(head, tbl, hash); \
f89bd6f8 487 (!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member); \
88d6ed15 488 pos = rht_dereference_bucket_rcu(pos->next, tbl, hash))
7e1e7763
TG
489
490/**
491 * rht_for_each_entry_rcu - iterate over rcu hash chain of given type
88d6ed15
TG
492 * @tpos: the type * to use as a loop cursor.
493 * @pos: the &struct rhash_head to use as a loop cursor.
494 * @tbl: the &struct bucket_table
495 * @hash: the hash value / bucket index
496 * @member: name of the &struct rhash_head within the hashable struct.
7e1e7763
TG
497 *
498 * This hash chain list-traversal primitive may safely run concurrently with
88d6ed15 499 * the _rcu mutation primitives such as rhashtable_insert() as long as the
7e1e7763
TG
500 * traversal is guarded by rcu_read_lock().
501 */
88d6ed15
TG
502#define rht_for_each_entry_rcu(tpos, pos, tbl, hash, member) \
503 rht_for_each_entry_rcu_continue(tpos, pos, (tbl)->buckets[hash],\
504 tbl, hash, member)
7e1e7763 505
02fd97c3
HX
506static inline int rhashtable_compare(struct rhashtable_compare_arg *arg,
507 const void *obj)
508{
509 struct rhashtable *ht = arg->ht;
510 const char *ptr = obj;
511
512 return memcmp(ptr + ht->p.key_offset, arg->key, ht->p.key_len);
513}
514
515/**
516 * rhashtable_lookup_fast - search hash table, inlined version
517 * @ht: hash table
518 * @key: the pointer to the key
519 * @params: hash table parameters
520 *
521 * Computes the hash value for the key and traverses the bucket chain looking
522 * for a entry with an identical key. The first matching entry is returned.
523 *
524 * Returns the first entry on which the compare function returned true.
525 */
526static inline void *rhashtable_lookup_fast(
527 struct rhashtable *ht, const void *key,
528 const struct rhashtable_params params)
529{
530 struct rhashtable_compare_arg arg = {
531 .ht = ht,
532 .key = key,
533 };
534 const struct bucket_table *tbl;
535 struct rhash_head *he;
299e5c32 536 unsigned int hash;
02fd97c3
HX
537
538 rcu_read_lock();
539
540 tbl = rht_dereference_rcu(ht->tbl, ht);
541restart:
542 hash = rht_key_hashfn(ht, tbl, key, params);
543 rht_for_each_rcu(he, tbl, hash) {
544 if (params.obj_cmpfn ?
545 params.obj_cmpfn(&arg, rht_obj(ht, he)) :
546 rhashtable_compare(&arg, rht_obj(ht, he)))
547 continue;
548 rcu_read_unlock();
549 return rht_obj(ht, he);
550 }
551
552 /* Ensure we see any new tables. */
553 smp_rmb();
554
555 tbl = rht_dereference_rcu(tbl->future_tbl, ht);
556 if (unlikely(tbl))
557 goto restart;
558 rcu_read_unlock();
559
560 return NULL;
561}
562
ac833bdd 563/* Internal function, please use rhashtable_insert_fast() instead */
02fd97c3
HX
564static inline int __rhashtable_insert_fast(
565 struct rhashtable *ht, const void *key, struct rhash_head *obj,
566 const struct rhashtable_params params)
567{
568 struct rhashtable_compare_arg arg = {
569 .ht = ht,
570 .key = key,
571 };
02fd97c3
HX
572 struct bucket_table *tbl, *new_tbl;
573 struct rhash_head *head;
574 spinlock_t *lock;
299e5c32
TG
575 unsigned int elasticity;
576 unsigned int hash;
ccd57b1b 577 int err;
02fd97c3 578
ccd57b1b 579restart:
02fd97c3
HX
580 rcu_read_lock();
581
582 tbl = rht_dereference_rcu(ht->tbl, ht);
02fd97c3 583
b824478b
HX
584 /* All insertions must grab the oldest table containing
585 * the hashed bucket that is yet to be rehashed.
02fd97c3 586 */
b824478b
HX
587 for (;;) {
588 hash = rht_head_hashfn(ht, tbl, obj, params);
589 lock = rht_bucket_lock(tbl, hash);
590 spin_lock_bh(lock);
591
592 if (tbl->rehash <= hash)
593 break;
594
595 spin_unlock_bh(lock);
596 tbl = rht_dereference_rcu(tbl->future_tbl, ht);
597 }
598
02fd97c3
HX
599 new_tbl = rht_dereference_rcu(tbl->future_tbl, ht);
600 if (unlikely(new_tbl)) {
601 err = rhashtable_insert_slow(ht, key, obj, new_tbl);
ccd57b1b
HX
602 if (err == -EAGAIN)
603 goto slow_path;
02fd97c3
HX
604 goto out;
605 }
606
07ee0722
HX
607 err = -E2BIG;
608 if (unlikely(rht_grow_above_max(ht, tbl)))
609 goto out;
610
ccd57b1b
HX
611 if (unlikely(rht_grow_above_100(ht, tbl))) {
612slow_path:
613 spin_unlock_bh(lock);
ccd57b1b 614 err = rhashtable_insert_rehash(ht);
58be8a58 615 rcu_read_unlock();
ccd57b1b
HX
616 if (err)
617 return err;
618
619 goto restart;
620 }
02fd97c3 621
ccd57b1b
HX
622 err = -EEXIST;
623 elasticity = ht->elasticity;
02fd97c3 624 rht_for_each(head, tbl, hash) {
ccd57b1b
HX
625 if (key &&
626 unlikely(!(params.obj_cmpfn ?
02fd97c3
HX
627 params.obj_cmpfn(&arg, rht_obj(ht, head)) :
628 rhashtable_compare(&arg, rht_obj(ht, head)))))
629 goto out;
ccd57b1b
HX
630 if (!--elasticity)
631 goto slow_path;
02fd97c3
HX
632 }
633
02fd97c3
HX
634 err = 0;
635
636 head = rht_dereference_bucket(tbl->buckets[hash], tbl, hash);
637
638 RCU_INIT_POINTER(obj->next, head);
639
640 rcu_assign_pointer(tbl->buckets[hash], obj);
641
642 atomic_inc(&ht->nelems);
643 if (rht_grow_above_75(ht, tbl))
644 schedule_work(&ht->run_work);
645
646out:
647 spin_unlock_bh(lock);
648 rcu_read_unlock();
649
650 return err;
651}
652
653/**
654 * rhashtable_insert_fast - insert object into hash table
655 * @ht: hash table
656 * @obj: pointer to hash head inside object
657 * @params: hash table parameters
658 *
659 * Will take a per bucket spinlock to protect against mutual mutations
660 * on the same bucket. Multiple insertions may occur in parallel unless
661 * they map to the same bucket lock.
662 *
663 * It is safe to call this function from atomic context.
664 *
665 * Will trigger an automatic deferred table resizing if the size grows
666 * beyond the watermark indicated by grow_decision() which can be passed
667 * to rhashtable_init().
668 */
669static inline int rhashtable_insert_fast(
670 struct rhashtable *ht, struct rhash_head *obj,
671 const struct rhashtable_params params)
672{
673 return __rhashtable_insert_fast(ht, NULL, obj, params);
674}
675
676/**
677 * rhashtable_lookup_insert_fast - lookup and insert object into hash table
678 * @ht: hash table
679 * @obj: pointer to hash head inside object
680 * @params: hash table parameters
681 *
682 * Locks down the bucket chain in both the old and new table if a resize
683 * is in progress to ensure that writers can't remove from the old table
684 * and can't insert to the new table during the atomic operation of search
685 * and insertion. Searches for duplicates in both the old and new table if
686 * a resize is in progress.
687 *
688 * This lookup function may only be used for fixed key hash table (key_len
689 * parameter set). It will BUG() if used inappropriately.
690 *
691 * It is safe to call this function from atomic context.
692 *
693 * Will trigger an automatic deferred table resizing if the size grows
694 * beyond the watermark indicated by grow_decision() which can be passed
695 * to rhashtable_init().
696 */
697static inline int rhashtable_lookup_insert_fast(
698 struct rhashtable *ht, struct rhash_head *obj,
699 const struct rhashtable_params params)
700{
701 const char *key = rht_obj(ht, obj);
702
703 BUG_ON(ht->p.obj_hashfn);
704
705 return __rhashtable_insert_fast(ht, key + ht->p.key_offset, obj,
706 params);
707}
708
709/**
710 * rhashtable_lookup_insert_key - search and insert object to hash table
711 * with explicit key
712 * @ht: hash table
713 * @key: key
714 * @obj: pointer to hash head inside object
715 * @params: hash table parameters
716 *
717 * Locks down the bucket chain in both the old and new table if a resize
718 * is in progress to ensure that writers can't remove from the old table
719 * and can't insert to the new table during the atomic operation of search
720 * and insertion. Searches for duplicates in both the old and new table if
721 * a resize is in progress.
722 *
723 * Lookups may occur in parallel with hashtable mutations and resizing.
724 *
725 * Will trigger an automatic deferred table resizing if the size grows
726 * beyond the watermark indicated by grow_decision() which can be passed
727 * to rhashtable_init().
728 *
729 * Returns zero on success.
730 */
731static inline int rhashtable_lookup_insert_key(
732 struct rhashtable *ht, const void *key, struct rhash_head *obj,
733 const struct rhashtable_params params)
734{
735 BUG_ON(!ht->p.obj_hashfn || !key);
736
737 return __rhashtable_insert_fast(ht, key, obj, params);
738}
739
ac833bdd 740/* Internal function, please use rhashtable_remove_fast() instead */
02fd97c3
HX
741static inline int __rhashtable_remove_fast(
742 struct rhashtable *ht, struct bucket_table *tbl,
743 struct rhash_head *obj, const struct rhashtable_params params)
744{
745 struct rhash_head __rcu **pprev;
746 struct rhash_head *he;
747 spinlock_t * lock;
299e5c32 748 unsigned int hash;
02fd97c3
HX
749 int err = -ENOENT;
750
751 hash = rht_head_hashfn(ht, tbl, obj, params);
752 lock = rht_bucket_lock(tbl, hash);
753
754 spin_lock_bh(lock);
755
756 pprev = &tbl->buckets[hash];
757 rht_for_each(he, tbl, hash) {
758 if (he != obj) {
759 pprev = &he->next;
760 continue;
761 }
762
763 rcu_assign_pointer(*pprev, obj->next);
764 err = 0;
765 break;
766 }
767
768 spin_unlock_bh(lock);
769
770 return err;
771}
772
773/**
774 * rhashtable_remove_fast - remove object from hash table
775 * @ht: hash table
776 * @obj: pointer to hash head inside object
777 * @params: hash table parameters
778 *
779 * Since the hash chain is single linked, the removal operation needs to
780 * walk the bucket chain upon removal. The removal operation is thus
781 * considerable slow if the hash table is not correctly sized.
782 *
783 * Will automatically shrink the table via rhashtable_expand() if the
784 * shrink_decision function specified at rhashtable_init() returns true.
785 *
786 * Returns zero on success, -ENOENT if the entry could not be found.
787 */
788static inline int rhashtable_remove_fast(
789 struct rhashtable *ht, struct rhash_head *obj,
790 const struct rhashtable_params params)
791{
792 struct bucket_table *tbl;
793 int err;
794
795 rcu_read_lock();
796
797 tbl = rht_dereference_rcu(ht->tbl, ht);
798
799 /* Because we have already taken (and released) the bucket
800 * lock in old_tbl, if we find that future_tbl is not yet
801 * visible then that guarantees the entry to still be in
802 * the old tbl if it exists.
803 */
804 while ((err = __rhashtable_remove_fast(ht, tbl, obj, params)) &&
805 (tbl = rht_dereference_rcu(tbl->future_tbl, ht)))
806 ;
807
808 if (err)
809 goto out;
810
811 atomic_dec(&ht->nelems);
b5e2c150
TG
812 if (unlikely(ht->p.automatic_shrinking &&
813 rht_shrink_below_30(ht, tbl)))
02fd97c3
HX
814 schedule_work(&ht->run_work);
815
816out:
817 rcu_read_unlock();
818
819 return err;
820}
821
7e1e7763 822#endif /* _LINUX_RHASHTABLE_H */