]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/blob - include/linux/rhashtable.h
Merge branch 'vmwgfx-fixes-4.20' of git://people.freedesktop.org/~thomash/linux into...
[mirror_ubuntu-eoan-kernel.git] / include / linux / rhashtable.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * Resizable, Scalable, Concurrent Hash Table
4 *
5 * Copyright (c) 2015-2016 Herbert Xu <herbert@gondor.apana.org.au>
6 * Copyright (c) 2014-2015 Thomas Graf <tgraf@suug.ch>
7 * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net>
8 *
9 * Code partially derived from nft_hash
10 * Rewritten with rehash code from br_multicast plus single list
11 * pointer as suggested by Josh Triplett
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License version 2 as
15 * published by the Free Software Foundation.
16 */
17
18 #ifndef _LINUX_RHASHTABLE_H
19 #define _LINUX_RHASHTABLE_H
20
21 #include <linux/err.h>
22 #include <linux/errno.h>
23 #include <linux/jhash.h>
24 #include <linux/list_nulls.h>
25 #include <linux/workqueue.h>
26 #include <linux/rculist.h>
27
28 #include <linux/rhashtable-types.h>
29 /*
30 * The end of the chain is marked with a special nulls marks which has
31 * the least significant bit set.
32 */
33
34 /* Maximum chain length before rehash
35 *
36 * The maximum (not average) chain length grows with the size of the hash
37 * table, at a rate of (log N)/(log log N).
38 *
39 * The value of 16 is selected so that even if the hash table grew to
40 * 2^32 you would not expect the maximum chain length to exceed it
41 * unless we are under attack (or extremely unlucky).
42 *
43 * As this limit is only to detect attacks, we don't need to set it to a
44 * lower value as you'd need the chain length to vastly exceed 16 to have
45 * any real effect on the system.
46 */
47 #define RHT_ELASTICITY 16u
48
49 /**
50 * struct bucket_table - Table of hash buckets
51 * @size: Number of hash buckets
52 * @nest: Number of bits of first-level nested table.
53 * @rehash: Current bucket being rehashed
54 * @hash_rnd: Random seed to fold into hash
55 * @locks_mask: Mask to apply before accessing locks[]
56 * @locks: Array of spinlocks protecting individual buckets
57 * @walkers: List of active walkers
58 * @rcu: RCU structure for freeing the table
59 * @future_tbl: Table under construction during rehashing
60 * @ntbl: Nested table used when out of memory.
61 * @buckets: size * hash buckets
62 */
63 struct bucket_table {
64 unsigned int size;
65 unsigned int nest;
66 unsigned int rehash;
67 u32 hash_rnd;
68 unsigned int locks_mask;
69 spinlock_t *locks;
70 struct list_head walkers;
71 struct rcu_head rcu;
72
73 struct bucket_table __rcu *future_tbl;
74
75 struct rhash_head __rcu *buckets[] ____cacheline_aligned_in_smp;
76 };
77
78 #define INIT_RHT_NULLS_HEAD(ptr) \
79 ((ptr) = (typeof(ptr)) NULLS_MARKER(0))
80
81 static inline bool rht_is_a_nulls(const struct rhash_head *ptr)
82 {
83 return ((unsigned long) ptr & 1);
84 }
85
86 static inline void *rht_obj(const struct rhashtable *ht,
87 const struct rhash_head *he)
88 {
89 return (char *)he - ht->p.head_offset;
90 }
91
92 static inline unsigned int rht_bucket_index(const struct bucket_table *tbl,
93 unsigned int hash)
94 {
95 return hash & (tbl->size - 1);
96 }
97
98 static inline unsigned int rht_key_get_hash(struct rhashtable *ht,
99 const void *key, const struct rhashtable_params params,
100 unsigned int hash_rnd)
101 {
102 unsigned int hash;
103
104 /* params must be equal to ht->p if it isn't constant. */
105 if (!__builtin_constant_p(params.key_len))
106 hash = ht->p.hashfn(key, ht->key_len, hash_rnd);
107 else if (params.key_len) {
108 unsigned int key_len = params.key_len;
109
110 if (params.hashfn)
111 hash = params.hashfn(key, key_len, hash_rnd);
112 else if (key_len & (sizeof(u32) - 1))
113 hash = jhash(key, key_len, hash_rnd);
114 else
115 hash = jhash2(key, key_len / sizeof(u32), hash_rnd);
116 } else {
117 unsigned int key_len = ht->p.key_len;
118
119 if (params.hashfn)
120 hash = params.hashfn(key, key_len, hash_rnd);
121 else
122 hash = jhash(key, key_len, hash_rnd);
123 }
124
125 return hash;
126 }
127
128 static inline unsigned int rht_key_hashfn(
129 struct rhashtable *ht, const struct bucket_table *tbl,
130 const void *key, const struct rhashtable_params params)
131 {
132 unsigned int hash = rht_key_get_hash(ht, key, params, tbl->hash_rnd);
133
134 return rht_bucket_index(tbl, hash);
135 }
136
137 static inline unsigned int rht_head_hashfn(
138 struct rhashtable *ht, const struct bucket_table *tbl,
139 const struct rhash_head *he, const struct rhashtable_params params)
140 {
141 const char *ptr = rht_obj(ht, he);
142
143 return likely(params.obj_hashfn) ?
144 rht_bucket_index(tbl, params.obj_hashfn(ptr, params.key_len ?:
145 ht->p.key_len,
146 tbl->hash_rnd)) :
147 rht_key_hashfn(ht, tbl, ptr + params.key_offset, params);
148 }
149
150 /**
151 * rht_grow_above_75 - returns true if nelems > 0.75 * table-size
152 * @ht: hash table
153 * @tbl: current table
154 */
155 static inline bool rht_grow_above_75(const struct rhashtable *ht,
156 const struct bucket_table *tbl)
157 {
158 /* Expand table when exceeding 75% load */
159 return atomic_read(&ht->nelems) > (tbl->size / 4 * 3) &&
160 (!ht->p.max_size || tbl->size < ht->p.max_size);
161 }
162
163 /**
164 * rht_shrink_below_30 - returns true if nelems < 0.3 * table-size
165 * @ht: hash table
166 * @tbl: current table
167 */
168 static inline bool rht_shrink_below_30(const struct rhashtable *ht,
169 const struct bucket_table *tbl)
170 {
171 /* Shrink table beneath 30% load */
172 return atomic_read(&ht->nelems) < (tbl->size * 3 / 10) &&
173 tbl->size > ht->p.min_size;
174 }
175
176 /**
177 * rht_grow_above_100 - returns true if nelems > table-size
178 * @ht: hash table
179 * @tbl: current table
180 */
181 static inline bool rht_grow_above_100(const struct rhashtable *ht,
182 const struct bucket_table *tbl)
183 {
184 return atomic_read(&ht->nelems) > tbl->size &&
185 (!ht->p.max_size || tbl->size < ht->p.max_size);
186 }
187
188 /**
189 * rht_grow_above_max - returns true if table is above maximum
190 * @ht: hash table
191 * @tbl: current table
192 */
193 static inline bool rht_grow_above_max(const struct rhashtable *ht,
194 const struct bucket_table *tbl)
195 {
196 return atomic_read(&ht->nelems) >= ht->max_elems;
197 }
198
199 /* The bucket lock is selected based on the hash and protects mutations
200 * on a group of hash buckets.
201 *
202 * A maximum of tbl->size/2 bucket locks is allocated. This ensures that
203 * a single lock always covers both buckets which may both contains
204 * entries which link to the same bucket of the old table during resizing.
205 * This allows to simplify the locking as locking the bucket in both
206 * tables during resize always guarantee protection.
207 *
208 * IMPORTANT: When holding the bucket lock of both the old and new table
209 * during expansions and shrinking, the old bucket lock must always be
210 * acquired first.
211 */
212 static inline spinlock_t *rht_bucket_lock(const struct bucket_table *tbl,
213 unsigned int hash)
214 {
215 return &tbl->locks[hash & tbl->locks_mask];
216 }
217
218 #ifdef CONFIG_PROVE_LOCKING
219 int lockdep_rht_mutex_is_held(struct rhashtable *ht);
220 int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash);
221 #else
222 static inline int lockdep_rht_mutex_is_held(struct rhashtable *ht)
223 {
224 return 1;
225 }
226
227 static inline int lockdep_rht_bucket_is_held(const struct bucket_table *tbl,
228 u32 hash)
229 {
230 return 1;
231 }
232 #endif /* CONFIG_PROVE_LOCKING */
233
234 void *rhashtable_insert_slow(struct rhashtable *ht, const void *key,
235 struct rhash_head *obj);
236
237 void rhashtable_walk_enter(struct rhashtable *ht,
238 struct rhashtable_iter *iter);
239 void rhashtable_walk_exit(struct rhashtable_iter *iter);
240 int rhashtable_walk_start_check(struct rhashtable_iter *iter) __acquires(RCU);
241
242 static inline void rhashtable_walk_start(struct rhashtable_iter *iter)
243 {
244 (void)rhashtable_walk_start_check(iter);
245 }
246
247 void *rhashtable_walk_next(struct rhashtable_iter *iter);
248 void *rhashtable_walk_peek(struct rhashtable_iter *iter);
249 void rhashtable_walk_stop(struct rhashtable_iter *iter) __releases(RCU);
250
251 void rhashtable_free_and_destroy(struct rhashtable *ht,
252 void (*free_fn)(void *ptr, void *arg),
253 void *arg);
254 void rhashtable_destroy(struct rhashtable *ht);
255
256 struct rhash_head __rcu **rht_bucket_nested(const struct bucket_table *tbl,
257 unsigned int hash);
258 struct rhash_head __rcu **rht_bucket_nested_insert(struct rhashtable *ht,
259 struct bucket_table *tbl,
260 unsigned int hash);
261
262 #define rht_dereference(p, ht) \
263 rcu_dereference_protected(p, lockdep_rht_mutex_is_held(ht))
264
265 #define rht_dereference_rcu(p, ht) \
266 rcu_dereference_check(p, lockdep_rht_mutex_is_held(ht))
267
268 #define rht_dereference_bucket(p, tbl, hash) \
269 rcu_dereference_protected(p, lockdep_rht_bucket_is_held(tbl, hash))
270
271 #define rht_dereference_bucket_rcu(p, tbl, hash) \
272 rcu_dereference_check(p, lockdep_rht_bucket_is_held(tbl, hash))
273
274 #define rht_entry(tpos, pos, member) \
275 ({ tpos = container_of(pos, typeof(*tpos), member); 1; })
276
277 static inline struct rhash_head __rcu *const *rht_bucket(
278 const struct bucket_table *tbl, unsigned int hash)
279 {
280 return unlikely(tbl->nest) ? rht_bucket_nested(tbl, hash) :
281 &tbl->buckets[hash];
282 }
283
284 static inline struct rhash_head __rcu **rht_bucket_var(
285 struct bucket_table *tbl, unsigned int hash)
286 {
287 return unlikely(tbl->nest) ? rht_bucket_nested(tbl, hash) :
288 &tbl->buckets[hash];
289 }
290
291 static inline struct rhash_head __rcu **rht_bucket_insert(
292 struct rhashtable *ht, struct bucket_table *tbl, unsigned int hash)
293 {
294 return unlikely(tbl->nest) ? rht_bucket_nested_insert(ht, tbl, hash) :
295 &tbl->buckets[hash];
296 }
297
298 /**
299 * rht_for_each_continue - continue iterating over hash chain
300 * @pos: the &struct rhash_head to use as a loop cursor.
301 * @head: the previous &struct rhash_head to continue from
302 * @tbl: the &struct bucket_table
303 * @hash: the hash value / bucket index
304 */
305 #define rht_for_each_continue(pos, head, tbl, hash) \
306 for (pos = rht_dereference_bucket(head, tbl, hash); \
307 !rht_is_a_nulls(pos); \
308 pos = rht_dereference_bucket((pos)->next, tbl, hash))
309
310 /**
311 * rht_for_each - iterate over hash chain
312 * @pos: the &struct rhash_head to use as a loop cursor.
313 * @tbl: the &struct bucket_table
314 * @hash: the hash value / bucket index
315 */
316 #define rht_for_each(pos, tbl, hash) \
317 rht_for_each_continue(pos, *rht_bucket(tbl, hash), tbl, hash)
318
319 /**
320 * rht_for_each_entry_continue - continue iterating over hash chain
321 * @tpos: the type * to use as a loop cursor.
322 * @pos: the &struct rhash_head to use as a loop cursor.
323 * @head: the previous &struct rhash_head to continue from
324 * @tbl: the &struct bucket_table
325 * @hash: the hash value / bucket index
326 * @member: name of the &struct rhash_head within the hashable struct.
327 */
328 #define rht_for_each_entry_continue(tpos, pos, head, tbl, hash, member) \
329 for (pos = rht_dereference_bucket(head, tbl, hash); \
330 (!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member); \
331 pos = rht_dereference_bucket((pos)->next, tbl, hash))
332
333 /**
334 * rht_for_each_entry - iterate over hash chain of given type
335 * @tpos: the type * to use as a loop cursor.
336 * @pos: the &struct rhash_head to use as a loop cursor.
337 * @tbl: the &struct bucket_table
338 * @hash: the hash value / bucket index
339 * @member: name of the &struct rhash_head within the hashable struct.
340 */
341 #define rht_for_each_entry(tpos, pos, tbl, hash, member) \
342 rht_for_each_entry_continue(tpos, pos, *rht_bucket(tbl, hash), \
343 tbl, hash, member)
344
345 /**
346 * rht_for_each_entry_safe - safely iterate over hash chain of given type
347 * @tpos: the type * to use as a loop cursor.
348 * @pos: the &struct rhash_head to use as a loop cursor.
349 * @next: the &struct rhash_head to use as next in loop cursor.
350 * @tbl: the &struct bucket_table
351 * @hash: the hash value / bucket index
352 * @member: name of the &struct rhash_head within the hashable struct.
353 *
354 * This hash chain list-traversal primitive allows for the looped code to
355 * remove the loop cursor from the list.
356 */
357 #define rht_for_each_entry_safe(tpos, pos, next, tbl, hash, member) \
358 for (pos = rht_dereference_bucket(*rht_bucket(tbl, hash), tbl, hash), \
359 next = !rht_is_a_nulls(pos) ? \
360 rht_dereference_bucket(pos->next, tbl, hash) : NULL; \
361 (!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member); \
362 pos = next, \
363 next = !rht_is_a_nulls(pos) ? \
364 rht_dereference_bucket(pos->next, tbl, hash) : NULL)
365
366 /**
367 * rht_for_each_rcu_continue - continue iterating over rcu hash chain
368 * @pos: the &struct rhash_head to use as a loop cursor.
369 * @head: the previous &struct rhash_head to continue from
370 * @tbl: the &struct bucket_table
371 * @hash: the hash value / bucket index
372 *
373 * This hash chain list-traversal primitive may safely run concurrently with
374 * the _rcu mutation primitives such as rhashtable_insert() as long as the
375 * traversal is guarded by rcu_read_lock().
376 */
377 #define rht_for_each_rcu_continue(pos, head, tbl, hash) \
378 for (({barrier(); }), \
379 pos = rht_dereference_bucket_rcu(head, tbl, hash); \
380 !rht_is_a_nulls(pos); \
381 pos = rcu_dereference_raw(pos->next))
382
383 /**
384 * rht_for_each_rcu - iterate over rcu hash chain
385 * @pos: the &struct rhash_head to use as a loop cursor.
386 * @tbl: the &struct bucket_table
387 * @hash: the hash value / bucket index
388 *
389 * This hash chain list-traversal primitive may safely run concurrently with
390 * the _rcu mutation primitives such as rhashtable_insert() as long as the
391 * traversal is guarded by rcu_read_lock().
392 */
393 #define rht_for_each_rcu(pos, tbl, hash) \
394 rht_for_each_rcu_continue(pos, *rht_bucket(tbl, hash), tbl, hash)
395
396 /**
397 * rht_for_each_entry_rcu_continue - continue iterating over rcu hash chain
398 * @tpos: the type * to use as a loop cursor.
399 * @pos: the &struct rhash_head to use as a loop cursor.
400 * @head: the previous &struct rhash_head to continue from
401 * @tbl: the &struct bucket_table
402 * @hash: the hash value / bucket index
403 * @member: name of the &struct rhash_head within the hashable struct.
404 *
405 * This hash chain list-traversal primitive may safely run concurrently with
406 * the _rcu mutation primitives such as rhashtable_insert() as long as the
407 * traversal is guarded by rcu_read_lock().
408 */
409 #define rht_for_each_entry_rcu_continue(tpos, pos, head, tbl, hash, member) \
410 for (({barrier(); }), \
411 pos = rht_dereference_bucket_rcu(head, tbl, hash); \
412 (!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member); \
413 pos = rht_dereference_bucket_rcu(pos->next, tbl, hash))
414
415 /**
416 * rht_for_each_entry_rcu - iterate over rcu hash chain of given type
417 * @tpos: the type * to use as a loop cursor.
418 * @pos: the &struct rhash_head to use as a loop cursor.
419 * @tbl: the &struct bucket_table
420 * @hash: the hash value / bucket index
421 * @member: name of the &struct rhash_head within the hashable struct.
422 *
423 * This hash chain list-traversal primitive may safely run concurrently with
424 * the _rcu mutation primitives such as rhashtable_insert() as long as the
425 * traversal is guarded by rcu_read_lock().
426 */
427 #define rht_for_each_entry_rcu(tpos, pos, tbl, hash, member) \
428 rht_for_each_entry_rcu_continue(tpos, pos, *rht_bucket(tbl, hash), \
429 tbl, hash, member)
430
431 /**
432 * rhl_for_each_rcu - iterate over rcu hash table list
433 * @pos: the &struct rlist_head to use as a loop cursor.
434 * @list: the head of the list
435 *
436 * This hash chain list-traversal primitive should be used on the
437 * list returned by rhltable_lookup.
438 */
439 #define rhl_for_each_rcu(pos, list) \
440 for (pos = list; pos; pos = rcu_dereference_raw(pos->next))
441
442 /**
443 * rhl_for_each_entry_rcu - iterate over rcu hash table list of given type
444 * @tpos: the type * to use as a loop cursor.
445 * @pos: the &struct rlist_head to use as a loop cursor.
446 * @list: the head of the list
447 * @member: name of the &struct rlist_head within the hashable struct.
448 *
449 * This hash chain list-traversal primitive should be used on the
450 * list returned by rhltable_lookup.
451 */
452 #define rhl_for_each_entry_rcu(tpos, pos, list, member) \
453 for (pos = list; pos && rht_entry(tpos, pos, member); \
454 pos = rcu_dereference_raw(pos->next))
455
456 static inline int rhashtable_compare(struct rhashtable_compare_arg *arg,
457 const void *obj)
458 {
459 struct rhashtable *ht = arg->ht;
460 const char *ptr = obj;
461
462 return memcmp(ptr + ht->p.key_offset, arg->key, ht->p.key_len);
463 }
464
465 /* Internal function, do not use. */
466 static inline struct rhash_head *__rhashtable_lookup(
467 struct rhashtable *ht, const void *key,
468 const struct rhashtable_params params)
469 {
470 struct rhashtable_compare_arg arg = {
471 .ht = ht,
472 .key = key,
473 };
474 struct bucket_table *tbl;
475 struct rhash_head *he;
476 unsigned int hash;
477
478 tbl = rht_dereference_rcu(ht->tbl, ht);
479 restart:
480 hash = rht_key_hashfn(ht, tbl, key, params);
481 rht_for_each_rcu(he, tbl, hash) {
482 if (params.obj_cmpfn ?
483 params.obj_cmpfn(&arg, rht_obj(ht, he)) :
484 rhashtable_compare(&arg, rht_obj(ht, he)))
485 continue;
486 return he;
487 }
488
489 /* Ensure we see any new tables. */
490 smp_rmb();
491
492 tbl = rht_dereference_rcu(tbl->future_tbl, ht);
493 if (unlikely(tbl))
494 goto restart;
495
496 return NULL;
497 }
498
499 /**
500 * rhashtable_lookup - search hash table
501 * @ht: hash table
502 * @key: the pointer to the key
503 * @params: hash table parameters
504 *
505 * Computes the hash value for the key and traverses the bucket chain looking
506 * for a entry with an identical key. The first matching entry is returned.
507 *
508 * This must only be called under the RCU read lock.
509 *
510 * Returns the first entry on which the compare function returned true.
511 */
512 static inline void *rhashtable_lookup(
513 struct rhashtable *ht, const void *key,
514 const struct rhashtable_params params)
515 {
516 struct rhash_head *he = __rhashtable_lookup(ht, key, params);
517
518 return he ? rht_obj(ht, he) : NULL;
519 }
520
521 /**
522 * rhashtable_lookup_fast - search hash table, without RCU read lock
523 * @ht: hash table
524 * @key: the pointer to the key
525 * @params: hash table parameters
526 *
527 * Computes the hash value for the key and traverses the bucket chain looking
528 * for a entry with an identical key. The first matching entry is returned.
529 *
530 * Only use this function when you have other mechanisms guaranteeing
531 * that the object won't go away after the RCU read lock is released.
532 *
533 * Returns the first entry on which the compare function returned true.
534 */
535 static inline void *rhashtable_lookup_fast(
536 struct rhashtable *ht, const void *key,
537 const struct rhashtable_params params)
538 {
539 void *obj;
540
541 rcu_read_lock();
542 obj = rhashtable_lookup(ht, key, params);
543 rcu_read_unlock();
544
545 return obj;
546 }
547
548 /**
549 * rhltable_lookup - search hash list table
550 * @hlt: hash table
551 * @key: the pointer to the key
552 * @params: hash table parameters
553 *
554 * Computes the hash value for the key and traverses the bucket chain looking
555 * for a entry with an identical key. All matching entries are returned
556 * in a list.
557 *
558 * This must only be called under the RCU read lock.
559 *
560 * Returns the list of entries that match the given key.
561 */
562 static inline struct rhlist_head *rhltable_lookup(
563 struct rhltable *hlt, const void *key,
564 const struct rhashtable_params params)
565 {
566 struct rhash_head *he = __rhashtable_lookup(&hlt->ht, key, params);
567
568 return he ? container_of(he, struct rhlist_head, rhead) : NULL;
569 }
570
571 /* Internal function, please use rhashtable_insert_fast() instead. This
572 * function returns the existing element already in hashes in there is a clash,
573 * otherwise it returns an error via ERR_PTR().
574 */
575 static inline void *__rhashtable_insert_fast(
576 struct rhashtable *ht, const void *key, struct rhash_head *obj,
577 const struct rhashtable_params params, bool rhlist)
578 {
579 struct rhashtable_compare_arg arg = {
580 .ht = ht,
581 .key = key,
582 };
583 struct rhash_head __rcu **pprev;
584 struct bucket_table *tbl;
585 struct rhash_head *head;
586 spinlock_t *lock;
587 unsigned int hash;
588 int elasticity;
589 void *data;
590
591 rcu_read_lock();
592
593 tbl = rht_dereference_rcu(ht->tbl, ht);
594 hash = rht_head_hashfn(ht, tbl, obj, params);
595 lock = rht_bucket_lock(tbl, hash);
596 spin_lock_bh(lock);
597
598 if (unlikely(rcu_access_pointer(tbl->future_tbl))) {
599 slow_path:
600 spin_unlock_bh(lock);
601 rcu_read_unlock();
602 return rhashtable_insert_slow(ht, key, obj);
603 }
604
605 elasticity = RHT_ELASTICITY;
606 pprev = rht_bucket_insert(ht, tbl, hash);
607 data = ERR_PTR(-ENOMEM);
608 if (!pprev)
609 goto out;
610
611 rht_for_each_continue(head, *pprev, tbl, hash) {
612 struct rhlist_head *plist;
613 struct rhlist_head *list;
614
615 elasticity--;
616 if (!key ||
617 (params.obj_cmpfn ?
618 params.obj_cmpfn(&arg, rht_obj(ht, head)) :
619 rhashtable_compare(&arg, rht_obj(ht, head)))) {
620 pprev = &head->next;
621 continue;
622 }
623
624 data = rht_obj(ht, head);
625
626 if (!rhlist)
627 goto out;
628
629
630 list = container_of(obj, struct rhlist_head, rhead);
631 plist = container_of(head, struct rhlist_head, rhead);
632
633 RCU_INIT_POINTER(list->next, plist);
634 head = rht_dereference_bucket(head->next, tbl, hash);
635 RCU_INIT_POINTER(list->rhead.next, head);
636 rcu_assign_pointer(*pprev, obj);
637
638 goto good;
639 }
640
641 if (elasticity <= 0)
642 goto slow_path;
643
644 data = ERR_PTR(-E2BIG);
645 if (unlikely(rht_grow_above_max(ht, tbl)))
646 goto out;
647
648 if (unlikely(rht_grow_above_100(ht, tbl)))
649 goto slow_path;
650
651 head = rht_dereference_bucket(*pprev, tbl, hash);
652
653 RCU_INIT_POINTER(obj->next, head);
654 if (rhlist) {
655 struct rhlist_head *list;
656
657 list = container_of(obj, struct rhlist_head, rhead);
658 RCU_INIT_POINTER(list->next, NULL);
659 }
660
661 rcu_assign_pointer(*pprev, obj);
662
663 atomic_inc(&ht->nelems);
664 if (rht_grow_above_75(ht, tbl))
665 schedule_work(&ht->run_work);
666
667 good:
668 data = NULL;
669
670 out:
671 spin_unlock_bh(lock);
672 rcu_read_unlock();
673
674 return data;
675 }
676
677 /**
678 * rhashtable_insert_fast - insert object into hash table
679 * @ht: hash table
680 * @obj: pointer to hash head inside object
681 * @params: hash table parameters
682 *
683 * Will take a per bucket spinlock to protect against mutual mutations
684 * on the same bucket. Multiple insertions may occur in parallel unless
685 * they map to the same bucket lock.
686 *
687 * It is safe to call this function from atomic context.
688 *
689 * Will trigger an automatic deferred table resizing if residency in the
690 * table grows beyond 70%.
691 */
692 static inline int rhashtable_insert_fast(
693 struct rhashtable *ht, struct rhash_head *obj,
694 const struct rhashtable_params params)
695 {
696 void *ret;
697
698 ret = __rhashtable_insert_fast(ht, NULL, obj, params, false);
699 if (IS_ERR(ret))
700 return PTR_ERR(ret);
701
702 return ret == NULL ? 0 : -EEXIST;
703 }
704
705 /**
706 * rhltable_insert_key - insert object into hash list table
707 * @hlt: hash list table
708 * @key: the pointer to the key
709 * @list: pointer to hash list head inside object
710 * @params: hash table parameters
711 *
712 * Will take a per bucket spinlock to protect against mutual mutations
713 * on the same bucket. Multiple insertions may occur in parallel unless
714 * they map to the same bucket lock.
715 *
716 * It is safe to call this function from atomic context.
717 *
718 * Will trigger an automatic deferred table resizing if residency in the
719 * table grows beyond 70%.
720 */
721 static inline int rhltable_insert_key(
722 struct rhltable *hlt, const void *key, struct rhlist_head *list,
723 const struct rhashtable_params params)
724 {
725 return PTR_ERR(__rhashtable_insert_fast(&hlt->ht, key, &list->rhead,
726 params, true));
727 }
728
729 /**
730 * rhltable_insert - insert object into hash list table
731 * @hlt: hash list table
732 * @list: pointer to hash list head inside object
733 * @params: hash table parameters
734 *
735 * Will take a per bucket spinlock to protect against mutual mutations
736 * on the same bucket. Multiple insertions may occur in parallel unless
737 * they map to the same bucket lock.
738 *
739 * It is safe to call this function from atomic context.
740 *
741 * Will trigger an automatic deferred table resizing if residency in the
742 * table grows beyond 70%.
743 */
744 static inline int rhltable_insert(
745 struct rhltable *hlt, struct rhlist_head *list,
746 const struct rhashtable_params params)
747 {
748 const char *key = rht_obj(&hlt->ht, &list->rhead);
749
750 key += params.key_offset;
751
752 return rhltable_insert_key(hlt, key, list, params);
753 }
754
755 /**
756 * rhashtable_lookup_insert_fast - lookup and insert object into hash table
757 * @ht: hash table
758 * @obj: pointer to hash head inside object
759 * @params: hash table parameters
760 *
761 * Locks down the bucket chain in both the old and new table if a resize
762 * is in progress to ensure that writers can't remove from the old table
763 * and can't insert to the new table during the atomic operation of search
764 * and insertion. Searches for duplicates in both the old and new table if
765 * a resize is in progress.
766 *
767 * This lookup function may only be used for fixed key hash table (key_len
768 * parameter set). It will BUG() if used inappropriately.
769 *
770 * It is safe to call this function from atomic context.
771 *
772 * Will trigger an automatic deferred table resizing if residency in the
773 * table grows beyond 70%.
774 */
775 static inline int rhashtable_lookup_insert_fast(
776 struct rhashtable *ht, struct rhash_head *obj,
777 const struct rhashtable_params params)
778 {
779 const char *key = rht_obj(ht, obj);
780 void *ret;
781
782 BUG_ON(ht->p.obj_hashfn);
783
784 ret = __rhashtable_insert_fast(ht, key + ht->p.key_offset, obj, params,
785 false);
786 if (IS_ERR(ret))
787 return PTR_ERR(ret);
788
789 return ret == NULL ? 0 : -EEXIST;
790 }
791
792 /**
793 * rhashtable_lookup_get_insert_fast - lookup and insert object into hash table
794 * @ht: hash table
795 * @obj: pointer to hash head inside object
796 * @params: hash table parameters
797 *
798 * Just like rhashtable_lookup_insert_fast(), but this function returns the
799 * object if it exists, NULL if it did not and the insertion was successful,
800 * and an ERR_PTR otherwise.
801 */
802 static inline void *rhashtable_lookup_get_insert_fast(
803 struct rhashtable *ht, struct rhash_head *obj,
804 const struct rhashtable_params params)
805 {
806 const char *key = rht_obj(ht, obj);
807
808 BUG_ON(ht->p.obj_hashfn);
809
810 return __rhashtable_insert_fast(ht, key + ht->p.key_offset, obj, params,
811 false);
812 }
813
814 /**
815 * rhashtable_lookup_insert_key - search and insert object to hash table
816 * with explicit key
817 * @ht: hash table
818 * @key: key
819 * @obj: pointer to hash head inside object
820 * @params: hash table parameters
821 *
822 * Locks down the bucket chain in both the old and new table if a resize
823 * is in progress to ensure that writers can't remove from the old table
824 * and can't insert to the new table during the atomic operation of search
825 * and insertion. Searches for duplicates in both the old and new table if
826 * a resize is in progress.
827 *
828 * Lookups may occur in parallel with hashtable mutations and resizing.
829 *
830 * Will trigger an automatic deferred table resizing if residency in the
831 * table grows beyond 70%.
832 *
833 * Returns zero on success.
834 */
835 static inline int rhashtable_lookup_insert_key(
836 struct rhashtable *ht, const void *key, struct rhash_head *obj,
837 const struct rhashtable_params params)
838 {
839 void *ret;
840
841 BUG_ON(!ht->p.obj_hashfn || !key);
842
843 ret = __rhashtable_insert_fast(ht, key, obj, params, false);
844 if (IS_ERR(ret))
845 return PTR_ERR(ret);
846
847 return ret == NULL ? 0 : -EEXIST;
848 }
849
850 /**
851 * rhashtable_lookup_get_insert_key - lookup and insert object into hash table
852 * @ht: hash table
853 * @obj: pointer to hash head inside object
854 * @params: hash table parameters
855 * @data: pointer to element data already in hashes
856 *
857 * Just like rhashtable_lookup_insert_key(), but this function returns the
858 * object if it exists, NULL if it does not and the insertion was successful,
859 * and an ERR_PTR otherwise.
860 */
861 static inline void *rhashtable_lookup_get_insert_key(
862 struct rhashtable *ht, const void *key, struct rhash_head *obj,
863 const struct rhashtable_params params)
864 {
865 BUG_ON(!ht->p.obj_hashfn || !key);
866
867 return __rhashtable_insert_fast(ht, key, obj, params, false);
868 }
869
870 /* Internal function, please use rhashtable_remove_fast() instead */
871 static inline int __rhashtable_remove_fast_one(
872 struct rhashtable *ht, struct bucket_table *tbl,
873 struct rhash_head *obj, const struct rhashtable_params params,
874 bool rhlist)
875 {
876 struct rhash_head __rcu **pprev;
877 struct rhash_head *he;
878 spinlock_t * lock;
879 unsigned int hash;
880 int err = -ENOENT;
881
882 hash = rht_head_hashfn(ht, tbl, obj, params);
883 lock = rht_bucket_lock(tbl, hash);
884
885 spin_lock_bh(lock);
886
887 pprev = rht_bucket_var(tbl, hash);
888 rht_for_each_continue(he, *pprev, tbl, hash) {
889 struct rhlist_head *list;
890
891 list = container_of(he, struct rhlist_head, rhead);
892
893 if (he != obj) {
894 struct rhlist_head __rcu **lpprev;
895
896 pprev = &he->next;
897
898 if (!rhlist)
899 continue;
900
901 do {
902 lpprev = &list->next;
903 list = rht_dereference_bucket(list->next,
904 tbl, hash);
905 } while (list && obj != &list->rhead);
906
907 if (!list)
908 continue;
909
910 list = rht_dereference_bucket(list->next, tbl, hash);
911 RCU_INIT_POINTER(*lpprev, list);
912 err = 0;
913 break;
914 }
915
916 obj = rht_dereference_bucket(obj->next, tbl, hash);
917 err = 1;
918
919 if (rhlist) {
920 list = rht_dereference_bucket(list->next, tbl, hash);
921 if (list) {
922 RCU_INIT_POINTER(list->rhead.next, obj);
923 obj = &list->rhead;
924 err = 0;
925 }
926 }
927
928 rcu_assign_pointer(*pprev, obj);
929 break;
930 }
931
932 spin_unlock_bh(lock);
933
934 if (err > 0) {
935 atomic_dec(&ht->nelems);
936 if (unlikely(ht->p.automatic_shrinking &&
937 rht_shrink_below_30(ht, tbl)))
938 schedule_work(&ht->run_work);
939 err = 0;
940 }
941
942 return err;
943 }
944
945 /* Internal function, please use rhashtable_remove_fast() instead */
946 static inline int __rhashtable_remove_fast(
947 struct rhashtable *ht, struct rhash_head *obj,
948 const struct rhashtable_params params, bool rhlist)
949 {
950 struct bucket_table *tbl;
951 int err;
952
953 rcu_read_lock();
954
955 tbl = rht_dereference_rcu(ht->tbl, ht);
956
957 /* Because we have already taken (and released) the bucket
958 * lock in old_tbl, if we find that future_tbl is not yet
959 * visible then that guarantees the entry to still be in
960 * the old tbl if it exists.
961 */
962 while ((err = __rhashtable_remove_fast_one(ht, tbl, obj, params,
963 rhlist)) &&
964 (tbl = rht_dereference_rcu(tbl->future_tbl, ht)))
965 ;
966
967 rcu_read_unlock();
968
969 return err;
970 }
971
972 /**
973 * rhashtable_remove_fast - remove object from hash table
974 * @ht: hash table
975 * @obj: pointer to hash head inside object
976 * @params: hash table parameters
977 *
978 * Since the hash chain is single linked, the removal operation needs to
979 * walk the bucket chain upon removal. The removal operation is thus
980 * considerable slow if the hash table is not correctly sized.
981 *
982 * Will automatically shrink the table if permitted when residency drops
983 * below 30%.
984 *
985 * Returns zero on success, -ENOENT if the entry could not be found.
986 */
987 static inline int rhashtable_remove_fast(
988 struct rhashtable *ht, struct rhash_head *obj,
989 const struct rhashtable_params params)
990 {
991 return __rhashtable_remove_fast(ht, obj, params, false);
992 }
993
994 /**
995 * rhltable_remove - remove object from hash list table
996 * @hlt: hash list table
997 * @list: pointer to hash list head inside object
998 * @params: hash table parameters
999 *
1000 * Since the hash chain is single linked, the removal operation needs to
1001 * walk the bucket chain upon removal. The removal operation is thus
1002 * considerable slow if the hash table is not correctly sized.
1003 *
1004 * Will automatically shrink the table if permitted when residency drops
1005 * below 30%
1006 *
1007 * Returns zero on success, -ENOENT if the entry could not be found.
1008 */
1009 static inline int rhltable_remove(
1010 struct rhltable *hlt, struct rhlist_head *list,
1011 const struct rhashtable_params params)
1012 {
1013 return __rhashtable_remove_fast(&hlt->ht, &list->rhead, params, true);
1014 }
1015
1016 /* Internal function, please use rhashtable_replace_fast() instead */
1017 static inline int __rhashtable_replace_fast(
1018 struct rhashtable *ht, struct bucket_table *tbl,
1019 struct rhash_head *obj_old, struct rhash_head *obj_new,
1020 const struct rhashtable_params params)
1021 {
1022 struct rhash_head __rcu **pprev;
1023 struct rhash_head *he;
1024 spinlock_t *lock;
1025 unsigned int hash;
1026 int err = -ENOENT;
1027
1028 /* Minimally, the old and new objects must have same hash
1029 * (which should mean identifiers are the same).
1030 */
1031 hash = rht_head_hashfn(ht, tbl, obj_old, params);
1032 if (hash != rht_head_hashfn(ht, tbl, obj_new, params))
1033 return -EINVAL;
1034
1035 lock = rht_bucket_lock(tbl, hash);
1036
1037 spin_lock_bh(lock);
1038
1039 pprev = rht_bucket_var(tbl, hash);
1040 rht_for_each_continue(he, *pprev, tbl, hash) {
1041 if (he != obj_old) {
1042 pprev = &he->next;
1043 continue;
1044 }
1045
1046 rcu_assign_pointer(obj_new->next, obj_old->next);
1047 rcu_assign_pointer(*pprev, obj_new);
1048 err = 0;
1049 break;
1050 }
1051
1052 spin_unlock_bh(lock);
1053
1054 return err;
1055 }
1056
1057 /**
1058 * rhashtable_replace_fast - replace an object in hash table
1059 * @ht: hash table
1060 * @obj_old: pointer to hash head inside object being replaced
1061 * @obj_new: pointer to hash head inside object which is new
1062 * @params: hash table parameters
1063 *
1064 * Replacing an object doesn't affect the number of elements in the hash table
1065 * or bucket, so we don't need to worry about shrinking or expanding the
1066 * table here.
1067 *
1068 * Returns zero on success, -ENOENT if the entry could not be found,
1069 * -EINVAL if hash is not the same for the old and new objects.
1070 */
1071 static inline int rhashtable_replace_fast(
1072 struct rhashtable *ht, struct rhash_head *obj_old,
1073 struct rhash_head *obj_new,
1074 const struct rhashtable_params params)
1075 {
1076 struct bucket_table *tbl;
1077 int err;
1078
1079 rcu_read_lock();
1080
1081 tbl = rht_dereference_rcu(ht->tbl, ht);
1082
1083 /* Because we have already taken (and released) the bucket
1084 * lock in old_tbl, if we find that future_tbl is not yet
1085 * visible then that guarantees the entry to still be in
1086 * the old tbl if it exists.
1087 */
1088 while ((err = __rhashtable_replace_fast(ht, tbl, obj_old,
1089 obj_new, params)) &&
1090 (tbl = rht_dereference_rcu(tbl->future_tbl, ht)))
1091 ;
1092
1093 rcu_read_unlock();
1094
1095 return err;
1096 }
1097
1098 /* Obsolete function, do not use in new code. */
1099 static inline int rhashtable_walk_init(struct rhashtable *ht,
1100 struct rhashtable_iter *iter, gfp_t gfp)
1101 {
1102 rhashtable_walk_enter(ht, iter);
1103 return 0;
1104 }
1105
1106 /**
1107 * rhltable_walk_enter - Initialise an iterator
1108 * @hlt: Table to walk over
1109 * @iter: Hash table Iterator
1110 *
1111 * This function prepares a hash table walk.
1112 *
1113 * Note that if you restart a walk after rhashtable_walk_stop you
1114 * may see the same object twice. Also, you may miss objects if
1115 * there are removals in between rhashtable_walk_stop and the next
1116 * call to rhashtable_walk_start.
1117 *
1118 * For a completely stable walk you should construct your own data
1119 * structure outside the hash table.
1120 *
1121 * This function may be called from any process context, including
1122 * non-preemptable context, but cannot be called from softirq or
1123 * hardirq context.
1124 *
1125 * You must call rhashtable_walk_exit after this function returns.
1126 */
1127 static inline void rhltable_walk_enter(struct rhltable *hlt,
1128 struct rhashtable_iter *iter)
1129 {
1130 return rhashtable_walk_enter(&hlt->ht, iter);
1131 }
1132
1133 /**
1134 * rhltable_free_and_destroy - free elements and destroy hash list table
1135 * @hlt: the hash list table to destroy
1136 * @free_fn: callback to release resources of element
1137 * @arg: pointer passed to free_fn
1138 *
1139 * See documentation for rhashtable_free_and_destroy.
1140 */
1141 static inline void rhltable_free_and_destroy(struct rhltable *hlt,
1142 void (*free_fn)(void *ptr,
1143 void *arg),
1144 void *arg)
1145 {
1146 return rhashtable_free_and_destroy(&hlt->ht, free_fn, arg);
1147 }
1148
1149 static inline void rhltable_destroy(struct rhltable *hlt)
1150 {
1151 return rhltable_free_and_destroy(hlt, NULL, NULL);
1152 }
1153
1154 #endif /* _LINUX_RHASHTABLE_H */