]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blob - lib/rhashtable.c
media: tw68: Replace http links with https ones
[mirror_ubuntu-hirsute-kernel.git] / lib / rhashtable.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Resizable, Scalable, Concurrent Hash Table
4 *
5 * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au>
6 * Copyright (c) 2014-2015 Thomas Graf <tgraf@suug.ch>
7 * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net>
8 *
9 * Code partially derived from nft_hash
10 * Rewritten with rehash code from br_multicast plus single list
11 * pointer as suggested by Josh Triplett
12 */
13
14 #include <linux/atomic.h>
15 #include <linux/kernel.h>
16 #include <linux/init.h>
17 #include <linux/log2.h>
18 #include <linux/sched.h>
19 #include <linux/rculist.h>
20 #include <linux/slab.h>
21 #include <linux/vmalloc.h>
22 #include <linux/mm.h>
23 #include <linux/jhash.h>
24 #include <linux/random.h>
25 #include <linux/rhashtable.h>
26 #include <linux/err.h>
27 #include <linux/export.h>
28
29 #define HASH_DEFAULT_SIZE 64UL
30 #define HASH_MIN_SIZE 4U
31
32 union nested_table {
33 union nested_table __rcu *table;
34 struct rhash_lock_head *bucket;
35 };
36
37 static u32 head_hashfn(struct rhashtable *ht,
38 const struct bucket_table *tbl,
39 const struct rhash_head *he)
40 {
41 return rht_head_hashfn(ht, tbl, he, ht->p);
42 }
43
44 #ifdef CONFIG_PROVE_LOCKING
45 #define ASSERT_RHT_MUTEX(HT) BUG_ON(!lockdep_rht_mutex_is_held(HT))
46
47 int lockdep_rht_mutex_is_held(struct rhashtable *ht)
48 {
49 return (debug_locks) ? lockdep_is_held(&ht->mutex) : 1;
50 }
51 EXPORT_SYMBOL_GPL(lockdep_rht_mutex_is_held);
52
53 int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash)
54 {
55 if (!debug_locks)
56 return 1;
57 if (unlikely(tbl->nest))
58 return 1;
59 return bit_spin_is_locked(0, (unsigned long *)&tbl->buckets[hash]);
60 }
61 EXPORT_SYMBOL_GPL(lockdep_rht_bucket_is_held);
62 #else
63 #define ASSERT_RHT_MUTEX(HT)
64 #endif
65
66 static inline union nested_table *nested_table_top(
67 const struct bucket_table *tbl)
68 {
69 /* The top-level bucket entry does not need RCU protection
70 * because it's set at the same time as tbl->nest.
71 */
72 return (void *)rcu_dereference_protected(tbl->buckets[0], 1);
73 }
74
75 static void nested_table_free(union nested_table *ntbl, unsigned int size)
76 {
77 const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *));
78 const unsigned int len = 1 << shift;
79 unsigned int i;
80
81 ntbl = rcu_dereference_protected(ntbl->table, 1);
82 if (!ntbl)
83 return;
84
85 if (size > len) {
86 size >>= shift;
87 for (i = 0; i < len; i++)
88 nested_table_free(ntbl + i, size);
89 }
90
91 kfree(ntbl);
92 }
93
94 static void nested_bucket_table_free(const struct bucket_table *tbl)
95 {
96 unsigned int size = tbl->size >> tbl->nest;
97 unsigned int len = 1 << tbl->nest;
98 union nested_table *ntbl;
99 unsigned int i;
100
101 ntbl = nested_table_top(tbl);
102
103 for (i = 0; i < len; i++)
104 nested_table_free(ntbl + i, size);
105
106 kfree(ntbl);
107 }
108
109 static void bucket_table_free(const struct bucket_table *tbl)
110 {
111 if (tbl->nest)
112 nested_bucket_table_free(tbl);
113
114 kvfree(tbl);
115 }
116
117 static void bucket_table_free_rcu(struct rcu_head *head)
118 {
119 bucket_table_free(container_of(head, struct bucket_table, rcu));
120 }
121
122 static union nested_table *nested_table_alloc(struct rhashtable *ht,
123 union nested_table __rcu **prev,
124 bool leaf)
125 {
126 union nested_table *ntbl;
127 int i;
128
129 ntbl = rcu_dereference(*prev);
130 if (ntbl)
131 return ntbl;
132
133 ntbl = kzalloc(PAGE_SIZE, GFP_ATOMIC);
134
135 if (ntbl && leaf) {
136 for (i = 0; i < PAGE_SIZE / sizeof(ntbl[0]); i++)
137 INIT_RHT_NULLS_HEAD(ntbl[i].bucket);
138 }
139
140 if (cmpxchg((union nested_table **)prev, NULL, ntbl) == NULL)
141 return ntbl;
142 /* Raced with another thread. */
143 kfree(ntbl);
144 return rcu_dereference(*prev);
145 }
146
147 static struct bucket_table *nested_bucket_table_alloc(struct rhashtable *ht,
148 size_t nbuckets,
149 gfp_t gfp)
150 {
151 const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *));
152 struct bucket_table *tbl;
153 size_t size;
154
155 if (nbuckets < (1 << (shift + 1)))
156 return NULL;
157
158 size = sizeof(*tbl) + sizeof(tbl->buckets[0]);
159
160 tbl = kzalloc(size, gfp);
161 if (!tbl)
162 return NULL;
163
164 if (!nested_table_alloc(ht, (union nested_table __rcu **)tbl->buckets,
165 false)) {
166 kfree(tbl);
167 return NULL;
168 }
169
170 tbl->nest = (ilog2(nbuckets) - 1) % shift + 1;
171
172 return tbl;
173 }
174
175 static struct bucket_table *bucket_table_alloc(struct rhashtable *ht,
176 size_t nbuckets,
177 gfp_t gfp)
178 {
179 struct bucket_table *tbl = NULL;
180 size_t size;
181 int i;
182 static struct lock_class_key __key;
183
184 tbl = kvzalloc(struct_size(tbl, buckets, nbuckets), gfp);
185
186 size = nbuckets;
187
188 if (tbl == NULL && (gfp & ~__GFP_NOFAIL) != GFP_KERNEL) {
189 tbl = nested_bucket_table_alloc(ht, nbuckets, gfp);
190 nbuckets = 0;
191 }
192
193 if (tbl == NULL)
194 return NULL;
195
196 lockdep_init_map(&tbl->dep_map, "rhashtable_bucket", &__key, 0);
197
198 tbl->size = size;
199
200 rcu_head_init(&tbl->rcu);
201 INIT_LIST_HEAD(&tbl->walkers);
202
203 tbl->hash_rnd = get_random_u32();
204
205 for (i = 0; i < nbuckets; i++)
206 INIT_RHT_NULLS_HEAD(tbl->buckets[i]);
207
208 return tbl;
209 }
210
211 static struct bucket_table *rhashtable_last_table(struct rhashtable *ht,
212 struct bucket_table *tbl)
213 {
214 struct bucket_table *new_tbl;
215
216 do {
217 new_tbl = tbl;
218 tbl = rht_dereference_rcu(tbl->future_tbl, ht);
219 } while (tbl);
220
221 return new_tbl;
222 }
223
224 static int rhashtable_rehash_one(struct rhashtable *ht,
225 struct rhash_lock_head **bkt,
226 unsigned int old_hash)
227 {
228 struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
229 struct bucket_table *new_tbl = rhashtable_last_table(ht, old_tbl);
230 int err = -EAGAIN;
231 struct rhash_head *head, *next, *entry;
232 struct rhash_head __rcu **pprev = NULL;
233 unsigned int new_hash;
234
235 if (new_tbl->nest)
236 goto out;
237
238 err = -ENOENT;
239
240 rht_for_each_from(entry, rht_ptr(bkt, old_tbl, old_hash),
241 old_tbl, old_hash) {
242 err = 0;
243 next = rht_dereference_bucket(entry->next, old_tbl, old_hash);
244
245 if (rht_is_a_nulls(next))
246 break;
247
248 pprev = &entry->next;
249 }
250
251 if (err)
252 goto out;
253
254 new_hash = head_hashfn(ht, new_tbl, entry);
255
256 rht_lock_nested(new_tbl, &new_tbl->buckets[new_hash], SINGLE_DEPTH_NESTING);
257
258 head = rht_ptr(new_tbl->buckets + new_hash, new_tbl, new_hash);
259
260 RCU_INIT_POINTER(entry->next, head);
261
262 rht_assign_unlock(new_tbl, &new_tbl->buckets[new_hash], entry);
263
264 if (pprev)
265 rcu_assign_pointer(*pprev, next);
266 else
267 /* Need to preserved the bit lock. */
268 rht_assign_locked(bkt, next);
269
270 out:
271 return err;
272 }
273
274 static int rhashtable_rehash_chain(struct rhashtable *ht,
275 unsigned int old_hash)
276 {
277 struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
278 struct rhash_lock_head **bkt = rht_bucket_var(old_tbl, old_hash);
279 int err;
280
281 if (!bkt)
282 return 0;
283 rht_lock(old_tbl, bkt);
284
285 while (!(err = rhashtable_rehash_one(ht, bkt, old_hash)))
286 ;
287
288 if (err == -ENOENT)
289 err = 0;
290 rht_unlock(old_tbl, bkt);
291
292 return err;
293 }
294
295 static int rhashtable_rehash_attach(struct rhashtable *ht,
296 struct bucket_table *old_tbl,
297 struct bucket_table *new_tbl)
298 {
299 /* Make insertions go into the new, empty table right away. Deletions
300 * and lookups will be attempted in both tables until we synchronize.
301 * As cmpxchg() provides strong barriers, we do not need
302 * rcu_assign_pointer().
303 */
304
305 if (cmpxchg((struct bucket_table **)&old_tbl->future_tbl, NULL,
306 new_tbl) != NULL)
307 return -EEXIST;
308
309 return 0;
310 }
311
312 static int rhashtable_rehash_table(struct rhashtable *ht)
313 {
314 struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
315 struct bucket_table *new_tbl;
316 struct rhashtable_walker *walker;
317 unsigned int old_hash;
318 int err;
319
320 new_tbl = rht_dereference(old_tbl->future_tbl, ht);
321 if (!new_tbl)
322 return 0;
323
324 for (old_hash = 0; old_hash < old_tbl->size; old_hash++) {
325 err = rhashtable_rehash_chain(ht, old_hash);
326 if (err)
327 return err;
328 cond_resched();
329 }
330
331 /* Publish the new table pointer. */
332 rcu_assign_pointer(ht->tbl, new_tbl);
333
334 spin_lock(&ht->lock);
335 list_for_each_entry(walker, &old_tbl->walkers, list)
336 walker->tbl = NULL;
337
338 /* Wait for readers. All new readers will see the new
339 * table, and thus no references to the old table will
340 * remain.
341 * We do this inside the locked region so that
342 * rhashtable_walk_stop() can use rcu_head_after_call_rcu()
343 * to check if it should not re-link the table.
344 */
345 call_rcu(&old_tbl->rcu, bucket_table_free_rcu);
346 spin_unlock(&ht->lock);
347
348 return rht_dereference(new_tbl->future_tbl, ht) ? -EAGAIN : 0;
349 }
350
351 static int rhashtable_rehash_alloc(struct rhashtable *ht,
352 struct bucket_table *old_tbl,
353 unsigned int size)
354 {
355 struct bucket_table *new_tbl;
356 int err;
357
358 ASSERT_RHT_MUTEX(ht);
359
360 new_tbl = bucket_table_alloc(ht, size, GFP_KERNEL);
361 if (new_tbl == NULL)
362 return -ENOMEM;
363
364 err = rhashtable_rehash_attach(ht, old_tbl, new_tbl);
365 if (err)
366 bucket_table_free(new_tbl);
367
368 return err;
369 }
370
371 /**
372 * rhashtable_shrink - Shrink hash table while allowing concurrent lookups
373 * @ht: the hash table to shrink
374 *
375 * This function shrinks the hash table to fit, i.e., the smallest
376 * size would not cause it to expand right away automatically.
377 *
378 * The caller must ensure that no concurrent resizing occurs by holding
379 * ht->mutex.
380 *
381 * The caller must ensure that no concurrent table mutations take place.
382 * It is however valid to have concurrent lookups if they are RCU protected.
383 *
384 * It is valid to have concurrent insertions and deletions protected by per
385 * bucket locks or concurrent RCU protected lookups and traversals.
386 */
387 static int rhashtable_shrink(struct rhashtable *ht)
388 {
389 struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
390 unsigned int nelems = atomic_read(&ht->nelems);
391 unsigned int size = 0;
392
393 if (nelems)
394 size = roundup_pow_of_two(nelems * 3 / 2);
395 if (size < ht->p.min_size)
396 size = ht->p.min_size;
397
398 if (old_tbl->size <= size)
399 return 0;
400
401 if (rht_dereference(old_tbl->future_tbl, ht))
402 return -EEXIST;
403
404 return rhashtable_rehash_alloc(ht, old_tbl, size);
405 }
406
407 static void rht_deferred_worker(struct work_struct *work)
408 {
409 struct rhashtable *ht;
410 struct bucket_table *tbl;
411 int err = 0;
412
413 ht = container_of(work, struct rhashtable, run_work);
414 mutex_lock(&ht->mutex);
415
416 tbl = rht_dereference(ht->tbl, ht);
417 tbl = rhashtable_last_table(ht, tbl);
418
419 if (rht_grow_above_75(ht, tbl))
420 err = rhashtable_rehash_alloc(ht, tbl, tbl->size * 2);
421 else if (ht->p.automatic_shrinking && rht_shrink_below_30(ht, tbl))
422 err = rhashtable_shrink(ht);
423 else if (tbl->nest)
424 err = rhashtable_rehash_alloc(ht, tbl, tbl->size);
425
426 if (!err || err == -EEXIST) {
427 int nerr;
428
429 nerr = rhashtable_rehash_table(ht);
430 err = err ?: nerr;
431 }
432
433 mutex_unlock(&ht->mutex);
434
435 if (err)
436 schedule_work(&ht->run_work);
437 }
438
439 static int rhashtable_insert_rehash(struct rhashtable *ht,
440 struct bucket_table *tbl)
441 {
442 struct bucket_table *old_tbl;
443 struct bucket_table *new_tbl;
444 unsigned int size;
445 int err;
446
447 old_tbl = rht_dereference_rcu(ht->tbl, ht);
448
449 size = tbl->size;
450
451 err = -EBUSY;
452
453 if (rht_grow_above_75(ht, tbl))
454 size *= 2;
455 /* Do not schedule more than one rehash */
456 else if (old_tbl != tbl)
457 goto fail;
458
459 err = -ENOMEM;
460
461 new_tbl = bucket_table_alloc(ht, size, GFP_ATOMIC | __GFP_NOWARN);
462 if (new_tbl == NULL)
463 goto fail;
464
465 err = rhashtable_rehash_attach(ht, tbl, new_tbl);
466 if (err) {
467 bucket_table_free(new_tbl);
468 if (err == -EEXIST)
469 err = 0;
470 } else
471 schedule_work(&ht->run_work);
472
473 return err;
474
475 fail:
476 /* Do not fail the insert if someone else did a rehash. */
477 if (likely(rcu_access_pointer(tbl->future_tbl)))
478 return 0;
479
480 /* Schedule async rehash to retry allocation in process context. */
481 if (err == -ENOMEM)
482 schedule_work(&ht->run_work);
483
484 return err;
485 }
486
487 static void *rhashtable_lookup_one(struct rhashtable *ht,
488 struct rhash_lock_head **bkt,
489 struct bucket_table *tbl, unsigned int hash,
490 const void *key, struct rhash_head *obj)
491 {
492 struct rhashtable_compare_arg arg = {
493 .ht = ht,
494 .key = key,
495 };
496 struct rhash_head __rcu **pprev = NULL;
497 struct rhash_head *head;
498 int elasticity;
499
500 elasticity = RHT_ELASTICITY;
501 rht_for_each_from(head, rht_ptr(bkt, tbl, hash), tbl, hash) {
502 struct rhlist_head *list;
503 struct rhlist_head *plist;
504
505 elasticity--;
506 if (!key ||
507 (ht->p.obj_cmpfn ?
508 ht->p.obj_cmpfn(&arg, rht_obj(ht, head)) :
509 rhashtable_compare(&arg, rht_obj(ht, head)))) {
510 pprev = &head->next;
511 continue;
512 }
513
514 if (!ht->rhlist)
515 return rht_obj(ht, head);
516
517 list = container_of(obj, struct rhlist_head, rhead);
518 plist = container_of(head, struct rhlist_head, rhead);
519
520 RCU_INIT_POINTER(list->next, plist);
521 head = rht_dereference_bucket(head->next, tbl, hash);
522 RCU_INIT_POINTER(list->rhead.next, head);
523 if (pprev)
524 rcu_assign_pointer(*pprev, obj);
525 else
526 /* Need to preserve the bit lock */
527 rht_assign_locked(bkt, obj);
528
529 return NULL;
530 }
531
532 if (elasticity <= 0)
533 return ERR_PTR(-EAGAIN);
534
535 return ERR_PTR(-ENOENT);
536 }
537
538 static struct bucket_table *rhashtable_insert_one(struct rhashtable *ht,
539 struct rhash_lock_head **bkt,
540 struct bucket_table *tbl,
541 unsigned int hash,
542 struct rhash_head *obj,
543 void *data)
544 {
545 struct bucket_table *new_tbl;
546 struct rhash_head *head;
547
548 if (!IS_ERR_OR_NULL(data))
549 return ERR_PTR(-EEXIST);
550
551 if (PTR_ERR(data) != -EAGAIN && PTR_ERR(data) != -ENOENT)
552 return ERR_CAST(data);
553
554 new_tbl = rht_dereference_rcu(tbl->future_tbl, ht);
555 if (new_tbl)
556 return new_tbl;
557
558 if (PTR_ERR(data) != -ENOENT)
559 return ERR_CAST(data);
560
561 if (unlikely(rht_grow_above_max(ht, tbl)))
562 return ERR_PTR(-E2BIG);
563
564 if (unlikely(rht_grow_above_100(ht, tbl)))
565 return ERR_PTR(-EAGAIN);
566
567 head = rht_ptr(bkt, tbl, hash);
568
569 RCU_INIT_POINTER(obj->next, head);
570 if (ht->rhlist) {
571 struct rhlist_head *list;
572
573 list = container_of(obj, struct rhlist_head, rhead);
574 RCU_INIT_POINTER(list->next, NULL);
575 }
576
577 /* bkt is always the head of the list, so it holds
578 * the lock, which we need to preserve
579 */
580 rht_assign_locked(bkt, obj);
581
582 atomic_inc(&ht->nelems);
583 if (rht_grow_above_75(ht, tbl))
584 schedule_work(&ht->run_work);
585
586 return NULL;
587 }
588
589 static void *rhashtable_try_insert(struct rhashtable *ht, const void *key,
590 struct rhash_head *obj)
591 {
592 struct bucket_table *new_tbl;
593 struct bucket_table *tbl;
594 struct rhash_lock_head **bkt;
595 unsigned int hash;
596 void *data;
597
598 new_tbl = rcu_dereference(ht->tbl);
599
600 do {
601 tbl = new_tbl;
602 hash = rht_head_hashfn(ht, tbl, obj, ht->p);
603 if (rcu_access_pointer(tbl->future_tbl))
604 /* Failure is OK */
605 bkt = rht_bucket_var(tbl, hash);
606 else
607 bkt = rht_bucket_insert(ht, tbl, hash);
608 if (bkt == NULL) {
609 new_tbl = rht_dereference_rcu(tbl->future_tbl, ht);
610 data = ERR_PTR(-EAGAIN);
611 } else {
612 rht_lock(tbl, bkt);
613 data = rhashtable_lookup_one(ht, bkt, tbl,
614 hash, key, obj);
615 new_tbl = rhashtable_insert_one(ht, bkt, tbl,
616 hash, obj, data);
617 if (PTR_ERR(new_tbl) != -EEXIST)
618 data = ERR_CAST(new_tbl);
619
620 rht_unlock(tbl, bkt);
621 }
622 } while (!IS_ERR_OR_NULL(new_tbl));
623
624 if (PTR_ERR(data) == -EAGAIN)
625 data = ERR_PTR(rhashtable_insert_rehash(ht, tbl) ?:
626 -EAGAIN);
627
628 return data;
629 }
630
631 void *rhashtable_insert_slow(struct rhashtable *ht, const void *key,
632 struct rhash_head *obj)
633 {
634 void *data;
635
636 do {
637 rcu_read_lock();
638 data = rhashtable_try_insert(ht, key, obj);
639 rcu_read_unlock();
640 } while (PTR_ERR(data) == -EAGAIN);
641
642 return data;
643 }
644 EXPORT_SYMBOL_GPL(rhashtable_insert_slow);
645
646 /**
647 * rhashtable_walk_enter - Initialise an iterator
648 * @ht: Table to walk over
649 * @iter: Hash table Iterator
650 *
651 * This function prepares a hash table walk.
652 *
653 * Note that if you restart a walk after rhashtable_walk_stop you
654 * may see the same object twice. Also, you may miss objects if
655 * there are removals in between rhashtable_walk_stop and the next
656 * call to rhashtable_walk_start.
657 *
658 * For a completely stable walk you should construct your own data
659 * structure outside the hash table.
660 *
661 * This function may be called from any process context, including
662 * non-preemptable context, but cannot be called from softirq or
663 * hardirq context.
664 *
665 * You must call rhashtable_walk_exit after this function returns.
666 */
667 void rhashtable_walk_enter(struct rhashtable *ht, struct rhashtable_iter *iter)
668 {
669 iter->ht = ht;
670 iter->p = NULL;
671 iter->slot = 0;
672 iter->skip = 0;
673 iter->end_of_table = 0;
674
675 spin_lock(&ht->lock);
676 iter->walker.tbl =
677 rcu_dereference_protected(ht->tbl, lockdep_is_held(&ht->lock));
678 list_add(&iter->walker.list, &iter->walker.tbl->walkers);
679 spin_unlock(&ht->lock);
680 }
681 EXPORT_SYMBOL_GPL(rhashtable_walk_enter);
682
683 /**
684 * rhashtable_walk_exit - Free an iterator
685 * @iter: Hash table Iterator
686 *
687 * This function frees resources allocated by rhashtable_walk_enter.
688 */
689 void rhashtable_walk_exit(struct rhashtable_iter *iter)
690 {
691 spin_lock(&iter->ht->lock);
692 if (iter->walker.tbl)
693 list_del(&iter->walker.list);
694 spin_unlock(&iter->ht->lock);
695 }
696 EXPORT_SYMBOL_GPL(rhashtable_walk_exit);
697
698 /**
699 * rhashtable_walk_start_check - Start a hash table walk
700 * @iter: Hash table iterator
701 *
702 * Start a hash table walk at the current iterator position. Note that we take
703 * the RCU lock in all cases including when we return an error. So you must
704 * always call rhashtable_walk_stop to clean up.
705 *
706 * Returns zero if successful.
707 *
708 * Returns -EAGAIN if resize event occured. Note that the iterator
709 * will rewind back to the beginning and you may use it immediately
710 * by calling rhashtable_walk_next.
711 *
712 * rhashtable_walk_start is defined as an inline variant that returns
713 * void. This is preferred in cases where the caller would ignore
714 * resize events and always continue.
715 */
716 int rhashtable_walk_start_check(struct rhashtable_iter *iter)
717 __acquires(RCU)
718 {
719 struct rhashtable *ht = iter->ht;
720 bool rhlist = ht->rhlist;
721
722 rcu_read_lock();
723
724 spin_lock(&ht->lock);
725 if (iter->walker.tbl)
726 list_del(&iter->walker.list);
727 spin_unlock(&ht->lock);
728
729 if (iter->end_of_table)
730 return 0;
731 if (!iter->walker.tbl) {
732 iter->walker.tbl = rht_dereference_rcu(ht->tbl, ht);
733 iter->slot = 0;
734 iter->skip = 0;
735 return -EAGAIN;
736 }
737
738 if (iter->p && !rhlist) {
739 /*
740 * We need to validate that 'p' is still in the table, and
741 * if so, update 'skip'
742 */
743 struct rhash_head *p;
744 int skip = 0;
745 rht_for_each_rcu(p, iter->walker.tbl, iter->slot) {
746 skip++;
747 if (p == iter->p) {
748 iter->skip = skip;
749 goto found;
750 }
751 }
752 iter->p = NULL;
753 } else if (iter->p && rhlist) {
754 /* Need to validate that 'list' is still in the table, and
755 * if so, update 'skip' and 'p'.
756 */
757 struct rhash_head *p;
758 struct rhlist_head *list;
759 int skip = 0;
760 rht_for_each_rcu(p, iter->walker.tbl, iter->slot) {
761 for (list = container_of(p, struct rhlist_head, rhead);
762 list;
763 list = rcu_dereference(list->next)) {
764 skip++;
765 if (list == iter->list) {
766 iter->p = p;
767 iter->skip = skip;
768 goto found;
769 }
770 }
771 }
772 iter->p = NULL;
773 }
774 found:
775 return 0;
776 }
777 EXPORT_SYMBOL_GPL(rhashtable_walk_start_check);
778
779 /**
780 * __rhashtable_walk_find_next - Find the next element in a table (or the first
781 * one in case of a new walk).
782 *
783 * @iter: Hash table iterator
784 *
785 * Returns the found object or NULL when the end of the table is reached.
786 *
787 * Returns -EAGAIN if resize event occurred.
788 */
789 static void *__rhashtable_walk_find_next(struct rhashtable_iter *iter)
790 {
791 struct bucket_table *tbl = iter->walker.tbl;
792 struct rhlist_head *list = iter->list;
793 struct rhashtable *ht = iter->ht;
794 struct rhash_head *p = iter->p;
795 bool rhlist = ht->rhlist;
796
797 if (!tbl)
798 return NULL;
799
800 for (; iter->slot < tbl->size; iter->slot++) {
801 int skip = iter->skip;
802
803 rht_for_each_rcu(p, tbl, iter->slot) {
804 if (rhlist) {
805 list = container_of(p, struct rhlist_head,
806 rhead);
807 do {
808 if (!skip)
809 goto next;
810 skip--;
811 list = rcu_dereference(list->next);
812 } while (list);
813
814 continue;
815 }
816 if (!skip)
817 break;
818 skip--;
819 }
820
821 next:
822 if (!rht_is_a_nulls(p)) {
823 iter->skip++;
824 iter->p = p;
825 iter->list = list;
826 return rht_obj(ht, rhlist ? &list->rhead : p);
827 }
828
829 iter->skip = 0;
830 }
831
832 iter->p = NULL;
833
834 /* Ensure we see any new tables. */
835 smp_rmb();
836
837 iter->walker.tbl = rht_dereference_rcu(tbl->future_tbl, ht);
838 if (iter->walker.tbl) {
839 iter->slot = 0;
840 iter->skip = 0;
841 return ERR_PTR(-EAGAIN);
842 } else {
843 iter->end_of_table = true;
844 }
845
846 return NULL;
847 }
848
849 /**
850 * rhashtable_walk_next - Return the next object and advance the iterator
851 * @iter: Hash table iterator
852 *
853 * Note that you must call rhashtable_walk_stop when you are finished
854 * with the walk.
855 *
856 * Returns the next object or NULL when the end of the table is reached.
857 *
858 * Returns -EAGAIN if resize event occurred. Note that the iterator
859 * will rewind back to the beginning and you may continue to use it.
860 */
861 void *rhashtable_walk_next(struct rhashtable_iter *iter)
862 {
863 struct rhlist_head *list = iter->list;
864 struct rhashtable *ht = iter->ht;
865 struct rhash_head *p = iter->p;
866 bool rhlist = ht->rhlist;
867
868 if (p) {
869 if (!rhlist || !(list = rcu_dereference(list->next))) {
870 p = rcu_dereference(p->next);
871 list = container_of(p, struct rhlist_head, rhead);
872 }
873 if (!rht_is_a_nulls(p)) {
874 iter->skip++;
875 iter->p = p;
876 iter->list = list;
877 return rht_obj(ht, rhlist ? &list->rhead : p);
878 }
879
880 /* At the end of this slot, switch to next one and then find
881 * next entry from that point.
882 */
883 iter->skip = 0;
884 iter->slot++;
885 }
886
887 return __rhashtable_walk_find_next(iter);
888 }
889 EXPORT_SYMBOL_GPL(rhashtable_walk_next);
890
891 /**
892 * rhashtable_walk_peek - Return the next object but don't advance the iterator
893 * @iter: Hash table iterator
894 *
895 * Returns the next object or NULL when the end of the table is reached.
896 *
897 * Returns -EAGAIN if resize event occurred. Note that the iterator
898 * will rewind back to the beginning and you may continue to use it.
899 */
900 void *rhashtable_walk_peek(struct rhashtable_iter *iter)
901 {
902 struct rhlist_head *list = iter->list;
903 struct rhashtable *ht = iter->ht;
904 struct rhash_head *p = iter->p;
905
906 if (p)
907 return rht_obj(ht, ht->rhlist ? &list->rhead : p);
908
909 /* No object found in current iter, find next one in the table. */
910
911 if (iter->skip) {
912 /* A nonzero skip value points to the next entry in the table
913 * beyond that last one that was found. Decrement skip so
914 * we find the current value. __rhashtable_walk_find_next
915 * will restore the original value of skip assuming that
916 * the table hasn't changed.
917 */
918 iter->skip--;
919 }
920
921 return __rhashtable_walk_find_next(iter);
922 }
923 EXPORT_SYMBOL_GPL(rhashtable_walk_peek);
924
925 /**
926 * rhashtable_walk_stop - Finish a hash table walk
927 * @iter: Hash table iterator
928 *
929 * Finish a hash table walk. Does not reset the iterator to the start of the
930 * hash table.
931 */
932 void rhashtable_walk_stop(struct rhashtable_iter *iter)
933 __releases(RCU)
934 {
935 struct rhashtable *ht;
936 struct bucket_table *tbl = iter->walker.tbl;
937
938 if (!tbl)
939 goto out;
940
941 ht = iter->ht;
942
943 spin_lock(&ht->lock);
944 if (rcu_head_after_call_rcu(&tbl->rcu, bucket_table_free_rcu))
945 /* This bucket table is being freed, don't re-link it. */
946 iter->walker.tbl = NULL;
947 else
948 list_add(&iter->walker.list, &tbl->walkers);
949 spin_unlock(&ht->lock);
950
951 out:
952 rcu_read_unlock();
953 }
954 EXPORT_SYMBOL_GPL(rhashtable_walk_stop);
955
956 static size_t rounded_hashtable_size(const struct rhashtable_params *params)
957 {
958 size_t retsize;
959
960 if (params->nelem_hint)
961 retsize = max(roundup_pow_of_two(params->nelem_hint * 4 / 3),
962 (unsigned long)params->min_size);
963 else
964 retsize = max(HASH_DEFAULT_SIZE,
965 (unsigned long)params->min_size);
966
967 return retsize;
968 }
969
970 static u32 rhashtable_jhash2(const void *key, u32 length, u32 seed)
971 {
972 return jhash2(key, length, seed);
973 }
974
975 /**
976 * rhashtable_init - initialize a new hash table
977 * @ht: hash table to be initialized
978 * @params: configuration parameters
979 *
980 * Initializes a new hash table based on the provided configuration
981 * parameters. A table can be configured either with a variable or
982 * fixed length key:
983 *
984 * Configuration Example 1: Fixed length keys
985 * struct test_obj {
986 * int key;
987 * void * my_member;
988 * struct rhash_head node;
989 * };
990 *
991 * struct rhashtable_params params = {
992 * .head_offset = offsetof(struct test_obj, node),
993 * .key_offset = offsetof(struct test_obj, key),
994 * .key_len = sizeof(int),
995 * .hashfn = jhash,
996 * };
997 *
998 * Configuration Example 2: Variable length keys
999 * struct test_obj {
1000 * [...]
1001 * struct rhash_head node;
1002 * };
1003 *
1004 * u32 my_hash_fn(const void *data, u32 len, u32 seed)
1005 * {
1006 * struct test_obj *obj = data;
1007 *
1008 * return [... hash ...];
1009 * }
1010 *
1011 * struct rhashtable_params params = {
1012 * .head_offset = offsetof(struct test_obj, node),
1013 * .hashfn = jhash,
1014 * .obj_hashfn = my_hash_fn,
1015 * };
1016 */
1017 int rhashtable_init(struct rhashtable *ht,
1018 const struct rhashtable_params *params)
1019 {
1020 struct bucket_table *tbl;
1021 size_t size;
1022
1023 if ((!params->key_len && !params->obj_hashfn) ||
1024 (params->obj_hashfn && !params->obj_cmpfn))
1025 return -EINVAL;
1026
1027 memset(ht, 0, sizeof(*ht));
1028 mutex_init(&ht->mutex);
1029 spin_lock_init(&ht->lock);
1030 memcpy(&ht->p, params, sizeof(*params));
1031
1032 if (params->min_size)
1033 ht->p.min_size = roundup_pow_of_two(params->min_size);
1034
1035 /* Cap total entries at 2^31 to avoid nelems overflow. */
1036 ht->max_elems = 1u << 31;
1037
1038 if (params->max_size) {
1039 ht->p.max_size = rounddown_pow_of_two(params->max_size);
1040 if (ht->p.max_size < ht->max_elems / 2)
1041 ht->max_elems = ht->p.max_size * 2;
1042 }
1043
1044 ht->p.min_size = max_t(u16, ht->p.min_size, HASH_MIN_SIZE);
1045
1046 size = rounded_hashtable_size(&ht->p);
1047
1048 ht->key_len = ht->p.key_len;
1049 if (!params->hashfn) {
1050 ht->p.hashfn = jhash;
1051
1052 if (!(ht->key_len & (sizeof(u32) - 1))) {
1053 ht->key_len /= sizeof(u32);
1054 ht->p.hashfn = rhashtable_jhash2;
1055 }
1056 }
1057
1058 /*
1059 * This is api initialization and thus we need to guarantee the
1060 * initial rhashtable allocation. Upon failure, retry with the
1061 * smallest possible size with __GFP_NOFAIL semantics.
1062 */
1063 tbl = bucket_table_alloc(ht, size, GFP_KERNEL);
1064 if (unlikely(tbl == NULL)) {
1065 size = max_t(u16, ht->p.min_size, HASH_MIN_SIZE);
1066 tbl = bucket_table_alloc(ht, size, GFP_KERNEL | __GFP_NOFAIL);
1067 }
1068
1069 atomic_set(&ht->nelems, 0);
1070
1071 RCU_INIT_POINTER(ht->tbl, tbl);
1072
1073 INIT_WORK(&ht->run_work, rht_deferred_worker);
1074
1075 return 0;
1076 }
1077 EXPORT_SYMBOL_GPL(rhashtable_init);
1078
1079 /**
1080 * rhltable_init - initialize a new hash list table
1081 * @hlt: hash list table to be initialized
1082 * @params: configuration parameters
1083 *
1084 * Initializes a new hash list table.
1085 *
1086 * See documentation for rhashtable_init.
1087 */
1088 int rhltable_init(struct rhltable *hlt, const struct rhashtable_params *params)
1089 {
1090 int err;
1091
1092 err = rhashtable_init(&hlt->ht, params);
1093 hlt->ht.rhlist = true;
1094 return err;
1095 }
1096 EXPORT_SYMBOL_GPL(rhltable_init);
1097
1098 static void rhashtable_free_one(struct rhashtable *ht, struct rhash_head *obj,
1099 void (*free_fn)(void *ptr, void *arg),
1100 void *arg)
1101 {
1102 struct rhlist_head *list;
1103
1104 if (!ht->rhlist) {
1105 free_fn(rht_obj(ht, obj), arg);
1106 return;
1107 }
1108
1109 list = container_of(obj, struct rhlist_head, rhead);
1110 do {
1111 obj = &list->rhead;
1112 list = rht_dereference(list->next, ht);
1113 free_fn(rht_obj(ht, obj), arg);
1114 } while (list);
1115 }
1116
1117 /**
1118 * rhashtable_free_and_destroy - free elements and destroy hash table
1119 * @ht: the hash table to destroy
1120 * @free_fn: callback to release resources of element
1121 * @arg: pointer passed to free_fn
1122 *
1123 * Stops an eventual async resize. If defined, invokes free_fn for each
1124 * element to releasal resources. Please note that RCU protected
1125 * readers may still be accessing the elements. Releasing of resources
1126 * must occur in a compatible manner. Then frees the bucket array.
1127 *
1128 * This function will eventually sleep to wait for an async resize
1129 * to complete. The caller is responsible that no further write operations
1130 * occurs in parallel.
1131 */
1132 void rhashtable_free_and_destroy(struct rhashtable *ht,
1133 void (*free_fn)(void *ptr, void *arg),
1134 void *arg)
1135 {
1136 struct bucket_table *tbl, *next_tbl;
1137 unsigned int i;
1138
1139 cancel_work_sync(&ht->run_work);
1140
1141 mutex_lock(&ht->mutex);
1142 tbl = rht_dereference(ht->tbl, ht);
1143 restart:
1144 if (free_fn) {
1145 for (i = 0; i < tbl->size; i++) {
1146 struct rhash_head *pos, *next;
1147
1148 cond_resched();
1149 for (pos = rht_ptr_exclusive(rht_bucket(tbl, i)),
1150 next = !rht_is_a_nulls(pos) ?
1151 rht_dereference(pos->next, ht) : NULL;
1152 !rht_is_a_nulls(pos);
1153 pos = next,
1154 next = !rht_is_a_nulls(pos) ?
1155 rht_dereference(pos->next, ht) : NULL)
1156 rhashtable_free_one(ht, pos, free_fn, arg);
1157 }
1158 }
1159
1160 next_tbl = rht_dereference(tbl->future_tbl, ht);
1161 bucket_table_free(tbl);
1162 if (next_tbl) {
1163 tbl = next_tbl;
1164 goto restart;
1165 }
1166 mutex_unlock(&ht->mutex);
1167 }
1168 EXPORT_SYMBOL_GPL(rhashtable_free_and_destroy);
1169
1170 void rhashtable_destroy(struct rhashtable *ht)
1171 {
1172 return rhashtable_free_and_destroy(ht, NULL, NULL);
1173 }
1174 EXPORT_SYMBOL_GPL(rhashtable_destroy);
1175
1176 struct rhash_lock_head **__rht_bucket_nested(const struct bucket_table *tbl,
1177 unsigned int hash)
1178 {
1179 const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *));
1180 unsigned int index = hash & ((1 << tbl->nest) - 1);
1181 unsigned int size = tbl->size >> tbl->nest;
1182 unsigned int subhash = hash;
1183 union nested_table *ntbl;
1184
1185 ntbl = nested_table_top(tbl);
1186 ntbl = rht_dereference_bucket_rcu(ntbl[index].table, tbl, hash);
1187 subhash >>= tbl->nest;
1188
1189 while (ntbl && size > (1 << shift)) {
1190 index = subhash & ((1 << shift) - 1);
1191 ntbl = rht_dereference_bucket_rcu(ntbl[index].table,
1192 tbl, hash);
1193 size >>= shift;
1194 subhash >>= shift;
1195 }
1196
1197 if (!ntbl)
1198 return NULL;
1199
1200 return &ntbl[subhash].bucket;
1201
1202 }
1203 EXPORT_SYMBOL_GPL(__rht_bucket_nested);
1204
1205 struct rhash_lock_head **rht_bucket_nested(const struct bucket_table *tbl,
1206 unsigned int hash)
1207 {
1208 static struct rhash_lock_head *rhnull;
1209
1210 if (!rhnull)
1211 INIT_RHT_NULLS_HEAD(rhnull);
1212 return __rht_bucket_nested(tbl, hash) ?: &rhnull;
1213 }
1214 EXPORT_SYMBOL_GPL(rht_bucket_nested);
1215
1216 struct rhash_lock_head **rht_bucket_nested_insert(struct rhashtable *ht,
1217 struct bucket_table *tbl,
1218 unsigned int hash)
1219 {
1220 const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *));
1221 unsigned int index = hash & ((1 << tbl->nest) - 1);
1222 unsigned int size = tbl->size >> tbl->nest;
1223 union nested_table *ntbl;
1224
1225 ntbl = nested_table_top(tbl);
1226 hash >>= tbl->nest;
1227 ntbl = nested_table_alloc(ht, &ntbl[index].table,
1228 size <= (1 << shift));
1229
1230 while (ntbl && size > (1 << shift)) {
1231 index = hash & ((1 << shift) - 1);
1232 size >>= shift;
1233 hash >>= shift;
1234 ntbl = nested_table_alloc(ht, &ntbl[index].table,
1235 size <= (1 << shift));
1236 }
1237
1238 if (!ntbl)
1239 return NULL;
1240
1241 return &ntbl[hash].bucket;
1242
1243 }
1244 EXPORT_SYMBOL_GPL(rht_bucket_nested_insert);