]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - include/linux/rhashtable.h
rhashtable: Allow hashfn to be unset
[mirror_ubuntu-artful-kernel.git] / include / linux / rhashtable.h
1 /*
2 * Resizable, Scalable, Concurrent Hash Table
3 *
4 * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au>
5 * Copyright (c) 2014 Thomas Graf <tgraf@suug.ch>
6 * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net>
7 *
8 * Code partially derived from nft_hash
9 * Rewritten with rehash code from br_multicast plus single list
10 * pointer as suggested by Josh Triplett
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License version 2 as
14 * published by the Free Software Foundation.
15 */
16
17 #ifndef _LINUX_RHASHTABLE_H
18 #define _LINUX_RHASHTABLE_H
19
20 #include <linux/compiler.h>
21 #include <linux/errno.h>
22 #include <linux/jhash.h>
23 #include <linux/list_nulls.h>
24 #include <linux/workqueue.h>
25 #include <linux/mutex.h>
26 #include <linux/rcupdate.h>
27
28 /*
29 * The end of the chain is marked with a special nulls marks which has
30 * the following format:
31 *
32 * +-------+-----------------------------------------------------+-+
33 * | Base | Hash |1|
34 * +-------+-----------------------------------------------------+-+
35 *
36 * Base (4 bits) : Reserved to distinguish between multiple tables.
37 * Specified via &struct rhashtable_params.nulls_base.
38 * Hash (27 bits): Full hash (unmasked) of first element added to bucket
39 * 1 (1 bit) : Nulls marker (always set)
40 *
41 * The remaining bits of the next pointer remain unused for now.
42 */
43 #define RHT_BASE_BITS 4
44 #define RHT_HASH_BITS 27
45 #define RHT_BASE_SHIFT RHT_HASH_BITS
46
47 /* Base bits plus 1 bit for nulls marker */
48 #define RHT_HASH_RESERVED_SPACE (RHT_BASE_BITS + 1)
49
50 struct rhash_head {
51 struct rhash_head __rcu *next;
52 };
53
54 /**
55 * struct bucket_table - Table of hash buckets
56 * @size: Number of hash buckets
57 * @rehash: Current bucket being rehashed
58 * @hash_rnd: Random seed to fold into hash
59 * @locks_mask: Mask to apply before accessing locks[]
60 * @locks: Array of spinlocks protecting individual buckets
61 * @walkers: List of active walkers
62 * @rcu: RCU structure for freeing the table
63 * @future_tbl: Table under construction during rehashing
64 * @buckets: size * hash buckets
65 */
66 struct bucket_table {
67 unsigned int size;
68 unsigned int rehash;
69 u32 hash_rnd;
70 unsigned int locks_mask;
71 spinlock_t *locks;
72 struct list_head walkers;
73 struct rcu_head rcu;
74
75 struct bucket_table __rcu *future_tbl;
76
77 struct rhash_head __rcu *buckets[] ____cacheline_aligned_in_smp;
78 };
79
80 /**
81 * struct rhashtable_compare_arg - Key for the function rhashtable_compare
82 * @ht: Hash table
83 * @key: Key to compare against
84 */
85 struct rhashtable_compare_arg {
86 struct rhashtable *ht;
87 const void *key;
88 };
89
90 typedef u32 (*rht_hashfn_t)(const void *data, u32 len, u32 seed);
91 typedef u32 (*rht_obj_hashfn_t)(const void *data, u32 seed);
92 typedef int (*rht_obj_cmpfn_t)(struct rhashtable_compare_arg *arg,
93 const void *obj);
94
95 struct rhashtable;
96
97 /**
98 * struct rhashtable_params - Hash table construction parameters
99 * @nelem_hint: Hint on number of elements, should be 75% of desired size
100 * @key_len: Length of key
101 * @key_offset: Offset of key in struct to be hashed
102 * @head_offset: Offset of rhash_head in struct to be hashed
103 * @max_size: Maximum size while expanding
104 * @min_size: Minimum size while shrinking
105 * @nulls_base: Base value to generate nulls marker
106 * @locks_mul: Number of bucket locks to allocate per cpu (default: 128)
107 * @hashfn: Hash function (default: jhash2 if !(key_len % 4), or jhash)
108 * @obj_hashfn: Function to hash object
109 * @obj_cmpfn: Function to compare key with object
110 */
111 struct rhashtable_params {
112 size_t nelem_hint;
113 size_t key_len;
114 size_t key_offset;
115 size_t head_offset;
116 unsigned int max_size;
117 unsigned int min_size;
118 u32 nulls_base;
119 size_t locks_mul;
120 rht_hashfn_t hashfn;
121 rht_obj_hashfn_t obj_hashfn;
122 rht_obj_cmpfn_t obj_cmpfn;
123 };
124
125 /**
126 * struct rhashtable - Hash table handle
127 * @tbl: Bucket table
128 * @nelems: Number of elements in table
129 * @key_len: Key length for hashfn
130 * @p: Configuration parameters
131 * @run_work: Deferred worker to expand/shrink asynchronously
132 * @mutex: Mutex to protect current/future table swapping
133 * @being_destroyed: True if table is set up for destruction
134 */
135 struct rhashtable {
136 struct bucket_table __rcu *tbl;
137 atomic_t nelems;
138 bool being_destroyed;
139 unsigned int key_len;
140 struct rhashtable_params p;
141 struct work_struct run_work;
142 struct mutex mutex;
143 };
144
145 /**
146 * struct rhashtable_walker - Hash table walker
147 * @list: List entry on list of walkers
148 * @tbl: The table that we were walking over
149 */
150 struct rhashtable_walker {
151 struct list_head list;
152 struct bucket_table *tbl;
153 };
154
155 /**
156 * struct rhashtable_iter - Hash table iterator, fits into netlink cb
157 * @ht: Table to iterate through
158 * @p: Current pointer
159 * @walker: Associated rhashtable walker
160 * @slot: Current slot
161 * @skip: Number of entries to skip in slot
162 */
163 struct rhashtable_iter {
164 struct rhashtable *ht;
165 struct rhash_head *p;
166 struct rhashtable_walker *walker;
167 unsigned int slot;
168 unsigned int skip;
169 };
170
171 static inline unsigned long rht_marker(const struct rhashtable *ht, u32 hash)
172 {
173 return NULLS_MARKER(ht->p.nulls_base + hash);
174 }
175
176 #define INIT_RHT_NULLS_HEAD(ptr, ht, hash) \
177 ((ptr) = (typeof(ptr)) rht_marker(ht, hash))
178
179 static inline bool rht_is_a_nulls(const struct rhash_head *ptr)
180 {
181 return ((unsigned long) ptr & 1);
182 }
183
184 static inline unsigned long rht_get_nulls_value(const struct rhash_head *ptr)
185 {
186 return ((unsigned long) ptr) >> 1;
187 }
188
189 static inline void *rht_obj(const struct rhashtable *ht,
190 const struct rhash_head *he)
191 {
192 return (char *)he - ht->p.head_offset;
193 }
194
195 static inline unsigned int rht_bucket_index(const struct bucket_table *tbl,
196 unsigned int hash)
197 {
198 return (hash >> RHT_HASH_RESERVED_SPACE) & (tbl->size - 1);
199 }
200
201 static inline unsigned int rht_key_hashfn(
202 struct rhashtable *ht, const struct bucket_table *tbl,
203 const void *key, const struct rhashtable_params params)
204 {
205 unsigned hash;
206
207 /* params must be equal to ht->p if it isn't constant. */
208 if (!__builtin_constant_p(params.key_len))
209 hash = ht->p.hashfn(key, ht->key_len, tbl->hash_rnd);
210 else if (params.key_len) {
211 unsigned key_len = params.key_len;
212
213 if (params.hashfn)
214 hash = params.hashfn(key, key_len, tbl->hash_rnd);
215 else if (key_len & (sizeof(u32) - 1))
216 hash = jhash(key, key_len, tbl->hash_rnd);
217 else
218 hash = jhash2(key, key_len / sizeof(u32),
219 tbl->hash_rnd);
220 } else {
221 unsigned key_len = ht->p.key_len;
222
223 if (params.hashfn)
224 hash = params.hashfn(key, key_len, tbl->hash_rnd);
225 else
226 hash = jhash(key, key_len, tbl->hash_rnd);
227 }
228
229 return rht_bucket_index(tbl, hash);
230 }
231
232 static inline unsigned int rht_head_hashfn(
233 struct rhashtable *ht, const struct bucket_table *tbl,
234 const struct rhash_head *he, const struct rhashtable_params params)
235 {
236 const char *ptr = rht_obj(ht, he);
237
238 return likely(params.obj_hashfn) ?
239 rht_bucket_index(tbl, params.obj_hashfn(ptr, tbl->hash_rnd)) :
240 rht_key_hashfn(ht, tbl, ptr + params.key_offset, params);
241 }
242
243 /**
244 * rht_grow_above_75 - returns true if nelems > 0.75 * table-size
245 * @ht: hash table
246 * @tbl: current table
247 */
248 static inline bool rht_grow_above_75(const struct rhashtable *ht,
249 const struct bucket_table *tbl)
250 {
251 /* Expand table when exceeding 75% load */
252 return atomic_read(&ht->nelems) > (tbl->size / 4 * 3) &&
253 (!ht->p.max_size || tbl->size < ht->p.max_size);
254 }
255
256 /**
257 * rht_shrink_below_30 - returns true if nelems < 0.3 * table-size
258 * @ht: hash table
259 * @tbl: current table
260 */
261 static inline bool rht_shrink_below_30(const struct rhashtable *ht,
262 const struct bucket_table *tbl)
263 {
264 /* Shrink table beneath 30% load */
265 return atomic_read(&ht->nelems) < (tbl->size * 3 / 10) &&
266 tbl->size > ht->p.min_size;
267 }
268
269 /* The bucket lock is selected based on the hash and protects mutations
270 * on a group of hash buckets.
271 *
272 * A maximum of tbl->size/2 bucket locks is allocated. This ensures that
273 * a single lock always covers both buckets which may both contains
274 * entries which link to the same bucket of the old table during resizing.
275 * This allows to simplify the locking as locking the bucket in both
276 * tables during resize always guarantee protection.
277 *
278 * IMPORTANT: When holding the bucket lock of both the old and new table
279 * during expansions and shrinking, the old bucket lock must always be
280 * acquired first.
281 */
282 static inline spinlock_t *rht_bucket_lock(const struct bucket_table *tbl,
283 unsigned int hash)
284 {
285 return &tbl->locks[hash & tbl->locks_mask];
286 }
287
288 #ifdef CONFIG_PROVE_LOCKING
289 int lockdep_rht_mutex_is_held(struct rhashtable *ht);
290 int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash);
291 #else
292 static inline int lockdep_rht_mutex_is_held(struct rhashtable *ht)
293 {
294 return 1;
295 }
296
297 static inline int lockdep_rht_bucket_is_held(const struct bucket_table *tbl,
298 u32 hash)
299 {
300 return 1;
301 }
302 #endif /* CONFIG_PROVE_LOCKING */
303
304 int rhashtable_init(struct rhashtable *ht,
305 const struct rhashtable_params *params);
306
307 int rhashtable_insert_slow(struct rhashtable *ht, const void *key,
308 struct rhash_head *obj,
309 struct bucket_table *old_tbl);
310
311 int rhashtable_expand(struct rhashtable *ht);
312 int rhashtable_shrink(struct rhashtable *ht);
313
314 int rhashtable_walk_init(struct rhashtable *ht, struct rhashtable_iter *iter);
315 void rhashtable_walk_exit(struct rhashtable_iter *iter);
316 int rhashtable_walk_start(struct rhashtable_iter *iter) __acquires(RCU);
317 void *rhashtable_walk_next(struct rhashtable_iter *iter);
318 void rhashtable_walk_stop(struct rhashtable_iter *iter) __releases(RCU);
319
320 void rhashtable_destroy(struct rhashtable *ht);
321
322 #define rht_dereference(p, ht) \
323 rcu_dereference_protected(p, lockdep_rht_mutex_is_held(ht))
324
325 #define rht_dereference_rcu(p, ht) \
326 rcu_dereference_check(p, lockdep_rht_mutex_is_held(ht))
327
328 #define rht_dereference_bucket(p, tbl, hash) \
329 rcu_dereference_protected(p, lockdep_rht_bucket_is_held(tbl, hash))
330
331 #define rht_dereference_bucket_rcu(p, tbl, hash) \
332 rcu_dereference_check(p, lockdep_rht_bucket_is_held(tbl, hash))
333
334 #define rht_entry(tpos, pos, member) \
335 ({ tpos = container_of(pos, typeof(*tpos), member); 1; })
336
337 /**
338 * rht_for_each_continue - continue iterating over hash chain
339 * @pos: the &struct rhash_head to use as a loop cursor.
340 * @head: the previous &struct rhash_head to continue from
341 * @tbl: the &struct bucket_table
342 * @hash: the hash value / bucket index
343 */
344 #define rht_for_each_continue(pos, head, tbl, hash) \
345 for (pos = rht_dereference_bucket(head, tbl, hash); \
346 !rht_is_a_nulls(pos); \
347 pos = rht_dereference_bucket((pos)->next, tbl, hash))
348
349 /**
350 * rht_for_each - iterate over hash chain
351 * @pos: the &struct rhash_head to use as a loop cursor.
352 * @tbl: the &struct bucket_table
353 * @hash: the hash value / bucket index
354 */
355 #define rht_for_each(pos, tbl, hash) \
356 rht_for_each_continue(pos, (tbl)->buckets[hash], tbl, hash)
357
358 /**
359 * rht_for_each_entry_continue - continue iterating over hash chain
360 * @tpos: the type * to use as a loop cursor.
361 * @pos: the &struct rhash_head to use as a loop cursor.
362 * @head: the previous &struct rhash_head to continue from
363 * @tbl: the &struct bucket_table
364 * @hash: the hash value / bucket index
365 * @member: name of the &struct rhash_head within the hashable struct.
366 */
367 #define rht_for_each_entry_continue(tpos, pos, head, tbl, hash, member) \
368 for (pos = rht_dereference_bucket(head, tbl, hash); \
369 (!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member); \
370 pos = rht_dereference_bucket((pos)->next, tbl, hash))
371
372 /**
373 * rht_for_each_entry - iterate over hash chain of given type
374 * @tpos: the type * to use as a loop cursor.
375 * @pos: the &struct rhash_head to use as a loop cursor.
376 * @tbl: the &struct bucket_table
377 * @hash: the hash value / bucket index
378 * @member: name of the &struct rhash_head within the hashable struct.
379 */
380 #define rht_for_each_entry(tpos, pos, tbl, hash, member) \
381 rht_for_each_entry_continue(tpos, pos, (tbl)->buckets[hash], \
382 tbl, hash, member)
383
384 /**
385 * rht_for_each_entry_safe - safely iterate over hash chain of given type
386 * @tpos: the type * to use as a loop cursor.
387 * @pos: the &struct rhash_head to use as a loop cursor.
388 * @next: the &struct rhash_head to use as next in loop cursor.
389 * @tbl: the &struct bucket_table
390 * @hash: the hash value / bucket index
391 * @member: name of the &struct rhash_head within the hashable struct.
392 *
393 * This hash chain list-traversal primitive allows for the looped code to
394 * remove the loop cursor from the list.
395 */
396 #define rht_for_each_entry_safe(tpos, pos, next, tbl, hash, member) \
397 for (pos = rht_dereference_bucket((tbl)->buckets[hash], tbl, hash), \
398 next = !rht_is_a_nulls(pos) ? \
399 rht_dereference_bucket(pos->next, tbl, hash) : NULL; \
400 (!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member); \
401 pos = next, \
402 next = !rht_is_a_nulls(pos) ? \
403 rht_dereference_bucket(pos->next, tbl, hash) : NULL)
404
405 /**
406 * rht_for_each_rcu_continue - continue iterating over rcu hash chain
407 * @pos: the &struct rhash_head to use as a loop cursor.
408 * @head: the previous &struct rhash_head to continue from
409 * @tbl: the &struct bucket_table
410 * @hash: the hash value / bucket index
411 *
412 * This hash chain list-traversal primitive may safely run concurrently with
413 * the _rcu mutation primitives such as rhashtable_insert() as long as the
414 * traversal is guarded by rcu_read_lock().
415 */
416 #define rht_for_each_rcu_continue(pos, head, tbl, hash) \
417 for (({barrier(); }), \
418 pos = rht_dereference_bucket_rcu(head, tbl, hash); \
419 !rht_is_a_nulls(pos); \
420 pos = rcu_dereference_raw(pos->next))
421
422 /**
423 * rht_for_each_rcu - iterate over rcu hash chain
424 * @pos: the &struct rhash_head to use as a loop cursor.
425 * @tbl: the &struct bucket_table
426 * @hash: the hash value / bucket index
427 *
428 * This hash chain list-traversal primitive may safely run concurrently with
429 * the _rcu mutation primitives such as rhashtable_insert() as long as the
430 * traversal is guarded by rcu_read_lock().
431 */
432 #define rht_for_each_rcu(pos, tbl, hash) \
433 rht_for_each_rcu_continue(pos, (tbl)->buckets[hash], tbl, hash)
434
435 /**
436 * rht_for_each_entry_rcu_continue - continue iterating over rcu hash chain
437 * @tpos: the type * to use as a loop cursor.
438 * @pos: the &struct rhash_head to use as a loop cursor.
439 * @head: the previous &struct rhash_head to continue from
440 * @tbl: the &struct bucket_table
441 * @hash: the hash value / bucket index
442 * @member: name of the &struct rhash_head within the hashable struct.
443 *
444 * This hash chain list-traversal primitive may safely run concurrently with
445 * the _rcu mutation primitives such as rhashtable_insert() as long as the
446 * traversal is guarded by rcu_read_lock().
447 */
448 #define rht_for_each_entry_rcu_continue(tpos, pos, head, tbl, hash, member) \
449 for (({barrier(); }), \
450 pos = rht_dereference_bucket_rcu(head, tbl, hash); \
451 (!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member); \
452 pos = rht_dereference_bucket_rcu(pos->next, tbl, hash))
453
454 /**
455 * rht_for_each_entry_rcu - iterate over rcu hash chain of given type
456 * @tpos: the type * to use as a loop cursor.
457 * @pos: the &struct rhash_head to use as a loop cursor.
458 * @tbl: the &struct bucket_table
459 * @hash: the hash value / bucket index
460 * @member: name of the &struct rhash_head within the hashable struct.
461 *
462 * This hash chain list-traversal primitive may safely run concurrently with
463 * the _rcu mutation primitives such as rhashtable_insert() as long as the
464 * traversal is guarded by rcu_read_lock().
465 */
466 #define rht_for_each_entry_rcu(tpos, pos, tbl, hash, member) \
467 rht_for_each_entry_rcu_continue(tpos, pos, (tbl)->buckets[hash],\
468 tbl, hash, member)
469
470 static inline int rhashtable_compare(struct rhashtable_compare_arg *arg,
471 const void *obj)
472 {
473 struct rhashtable *ht = arg->ht;
474 const char *ptr = obj;
475
476 return memcmp(ptr + ht->p.key_offset, arg->key, ht->p.key_len);
477 }
478
479 /**
480 * rhashtable_lookup_fast - search hash table, inlined version
481 * @ht: hash table
482 * @key: the pointer to the key
483 * @params: hash table parameters
484 *
485 * Computes the hash value for the key and traverses the bucket chain looking
486 * for a entry with an identical key. The first matching entry is returned.
487 *
488 * Returns the first entry on which the compare function returned true.
489 */
490 static inline void *rhashtable_lookup_fast(
491 struct rhashtable *ht, const void *key,
492 const struct rhashtable_params params)
493 {
494 struct rhashtable_compare_arg arg = {
495 .ht = ht,
496 .key = key,
497 };
498 const struct bucket_table *tbl;
499 struct rhash_head *he;
500 unsigned hash;
501
502 rcu_read_lock();
503
504 tbl = rht_dereference_rcu(ht->tbl, ht);
505 restart:
506 hash = rht_key_hashfn(ht, tbl, key, params);
507 rht_for_each_rcu(he, tbl, hash) {
508 if (params.obj_cmpfn ?
509 params.obj_cmpfn(&arg, rht_obj(ht, he)) :
510 rhashtable_compare(&arg, rht_obj(ht, he)))
511 continue;
512 rcu_read_unlock();
513 return rht_obj(ht, he);
514 }
515
516 /* Ensure we see any new tables. */
517 smp_rmb();
518
519 tbl = rht_dereference_rcu(tbl->future_tbl, ht);
520 if (unlikely(tbl))
521 goto restart;
522 rcu_read_unlock();
523
524 return NULL;
525 }
526
527 static inline int __rhashtable_insert_fast(
528 struct rhashtable *ht, const void *key, struct rhash_head *obj,
529 const struct rhashtable_params params)
530 {
531 struct rhashtable_compare_arg arg = {
532 .ht = ht,
533 .key = key,
534 };
535 int err = -EEXIST;
536 struct bucket_table *tbl, *new_tbl;
537 struct rhash_head *head;
538 spinlock_t *lock;
539 unsigned hash;
540
541 rcu_read_lock();
542
543 tbl = rht_dereference_rcu(ht->tbl, ht);
544 hash = rht_head_hashfn(ht, tbl, obj, params);
545 lock = rht_bucket_lock(tbl, hash);
546
547 spin_lock_bh(lock);
548
549 /* Because we have already taken the bucket lock in tbl,
550 * if we find that future_tbl is not yet visible then
551 * that guarantees all other insertions of the same entry
552 * will also grab the bucket lock in tbl because until
553 * the rehash completes ht->tbl won't be changed.
554 */
555 new_tbl = rht_dereference_rcu(tbl->future_tbl, ht);
556 if (unlikely(new_tbl)) {
557 err = rhashtable_insert_slow(ht, key, obj, new_tbl);
558 goto out;
559 }
560
561 if (!key)
562 goto skip_lookup;
563
564 rht_for_each(head, tbl, hash) {
565 if (unlikely(!(params.obj_cmpfn ?
566 params.obj_cmpfn(&arg, rht_obj(ht, head)) :
567 rhashtable_compare(&arg, rht_obj(ht, head)))))
568 goto out;
569 }
570
571 skip_lookup:
572 err = 0;
573
574 head = rht_dereference_bucket(tbl->buckets[hash], tbl, hash);
575
576 RCU_INIT_POINTER(obj->next, head);
577
578 rcu_assign_pointer(tbl->buckets[hash], obj);
579
580 atomic_inc(&ht->nelems);
581 if (rht_grow_above_75(ht, tbl))
582 schedule_work(&ht->run_work);
583
584 out:
585 spin_unlock_bh(lock);
586 rcu_read_unlock();
587
588 return err;
589 }
590
591 /**
592 * rhashtable_insert_fast - insert object into hash table
593 * @ht: hash table
594 * @obj: pointer to hash head inside object
595 * @params: hash table parameters
596 *
597 * Will take a per bucket spinlock to protect against mutual mutations
598 * on the same bucket. Multiple insertions may occur in parallel unless
599 * they map to the same bucket lock.
600 *
601 * It is safe to call this function from atomic context.
602 *
603 * Will trigger an automatic deferred table resizing if the size grows
604 * beyond the watermark indicated by grow_decision() which can be passed
605 * to rhashtable_init().
606 */
607 static inline int rhashtable_insert_fast(
608 struct rhashtable *ht, struct rhash_head *obj,
609 const struct rhashtable_params params)
610 {
611 return __rhashtable_insert_fast(ht, NULL, obj, params);
612 }
613
614 /**
615 * rhashtable_lookup_insert_fast - lookup and insert object into hash table
616 * @ht: hash table
617 * @obj: pointer to hash head inside object
618 * @params: hash table parameters
619 *
620 * Locks down the bucket chain in both the old and new table if a resize
621 * is in progress to ensure that writers can't remove from the old table
622 * and can't insert to the new table during the atomic operation of search
623 * and insertion. Searches for duplicates in both the old and new table if
624 * a resize is in progress.
625 *
626 * This lookup function may only be used for fixed key hash table (key_len
627 * parameter set). It will BUG() if used inappropriately.
628 *
629 * It is safe to call this function from atomic context.
630 *
631 * Will trigger an automatic deferred table resizing if the size grows
632 * beyond the watermark indicated by grow_decision() which can be passed
633 * to rhashtable_init().
634 */
635 static inline int rhashtable_lookup_insert_fast(
636 struct rhashtable *ht, struct rhash_head *obj,
637 const struct rhashtable_params params)
638 {
639 const char *key = rht_obj(ht, obj);
640
641 BUG_ON(ht->p.obj_hashfn);
642
643 return __rhashtable_insert_fast(ht, key + ht->p.key_offset, obj,
644 params);
645 }
646
647 /**
648 * rhashtable_lookup_insert_key - search and insert object to hash table
649 * with explicit key
650 * @ht: hash table
651 * @key: key
652 * @obj: pointer to hash head inside object
653 * @params: hash table parameters
654 *
655 * Locks down the bucket chain in both the old and new table if a resize
656 * is in progress to ensure that writers can't remove from the old table
657 * and can't insert to the new table during the atomic operation of search
658 * and insertion. Searches for duplicates in both the old and new table if
659 * a resize is in progress.
660 *
661 * Lookups may occur in parallel with hashtable mutations and resizing.
662 *
663 * Will trigger an automatic deferred table resizing if the size grows
664 * beyond the watermark indicated by grow_decision() which can be passed
665 * to rhashtable_init().
666 *
667 * Returns zero on success.
668 */
669 static inline int rhashtable_lookup_insert_key(
670 struct rhashtable *ht, const void *key, struct rhash_head *obj,
671 const struct rhashtable_params params)
672 {
673 BUG_ON(!ht->p.obj_hashfn || !key);
674
675 return __rhashtable_insert_fast(ht, key, obj, params);
676 }
677
678 static inline int __rhashtable_remove_fast(
679 struct rhashtable *ht, struct bucket_table *tbl,
680 struct rhash_head *obj, const struct rhashtable_params params)
681 {
682 struct rhash_head __rcu **pprev;
683 struct rhash_head *he;
684 spinlock_t * lock;
685 unsigned hash;
686 int err = -ENOENT;
687
688 hash = rht_head_hashfn(ht, tbl, obj, params);
689 lock = rht_bucket_lock(tbl, hash);
690
691 spin_lock_bh(lock);
692
693 pprev = &tbl->buckets[hash];
694 rht_for_each(he, tbl, hash) {
695 if (he != obj) {
696 pprev = &he->next;
697 continue;
698 }
699
700 rcu_assign_pointer(*pprev, obj->next);
701 err = 0;
702 break;
703 }
704
705 spin_unlock_bh(lock);
706
707 return err;
708 }
709
710 /**
711 * rhashtable_remove_fast - remove object from hash table
712 * @ht: hash table
713 * @obj: pointer to hash head inside object
714 * @params: hash table parameters
715 *
716 * Since the hash chain is single linked, the removal operation needs to
717 * walk the bucket chain upon removal. The removal operation is thus
718 * considerable slow if the hash table is not correctly sized.
719 *
720 * Will automatically shrink the table via rhashtable_expand() if the
721 * shrink_decision function specified at rhashtable_init() returns true.
722 *
723 * Returns zero on success, -ENOENT if the entry could not be found.
724 */
725 static inline int rhashtable_remove_fast(
726 struct rhashtable *ht, struct rhash_head *obj,
727 const struct rhashtable_params params)
728 {
729 struct bucket_table *tbl;
730 int err;
731
732 rcu_read_lock();
733
734 tbl = rht_dereference_rcu(ht->tbl, ht);
735
736 /* Because we have already taken (and released) the bucket
737 * lock in old_tbl, if we find that future_tbl is not yet
738 * visible then that guarantees the entry to still be in
739 * the old tbl if it exists.
740 */
741 while ((err = __rhashtable_remove_fast(ht, tbl, obj, params)) &&
742 (tbl = rht_dereference_rcu(tbl->future_tbl, ht)))
743 ;
744
745 if (err)
746 goto out;
747
748 atomic_dec(&ht->nelems);
749 if (rht_shrink_below_30(ht, tbl))
750 schedule_work(&ht->run_work);
751
752 out:
753 rcu_read_unlock();
754
755 return err;
756 }
757
758 #endif /* _LINUX_RHASHTABLE_H */