]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - lib/rhashtable.c
rhashtable: Fix use before NULL check in bucket_table_free
[mirror_ubuntu-artful-kernel.git] / lib / rhashtable.c
CommitLineData
7e1e7763
TG
1/*
2 * Resizable, Scalable, Concurrent Hash Table
3 *
02fd97c3 4 * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au>
a5ec68e3 5 * Copyright (c) 2014-2015 Thomas Graf <tgraf@suug.ch>
7e1e7763
TG
6 * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net>
7 *
7e1e7763 8 * Code partially derived from nft_hash
02fd97c3
HX
9 * Rewritten with rehash code from br_multicast plus single list
10 * pointer as suggested by Josh Triplett
7e1e7763
TG
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License version 2 as
14 * published by the Free Software Foundation.
15 */
16
07ee0722 17#include <linux/atomic.h>
7e1e7763
TG
18#include <linux/kernel.h>
19#include <linux/init.h>
20#include <linux/log2.h>
5beb5c90 21#include <linux/sched.h>
7e1e7763
TG
22#include <linux/slab.h>
23#include <linux/vmalloc.h>
24#include <linux/mm.h>
87545899 25#include <linux/jhash.h>
7e1e7763
TG
26#include <linux/random.h>
27#include <linux/rhashtable.h>
61d7b097 28#include <linux/err.h>
6d795413 29#include <linux/export.h>
7e1e7763
TG
30
31#define HASH_DEFAULT_SIZE 64UL
c2e213cf 32#define HASH_MIN_SIZE 4U
4cf0b354 33#define BUCKET_LOCKS_PER_CPU 32UL
97defe1e 34
da20420f
HX
35union nested_table {
36 union nested_table __rcu *table;
37 struct rhash_head __rcu *bucket;
38};
39
988dfbd7 40static u32 head_hashfn(struct rhashtable *ht,
8d24c0b4
TG
41 const struct bucket_table *tbl,
42 const struct rhash_head *he)
7e1e7763 43{
02fd97c3 44 return rht_head_hashfn(ht, tbl, he, ht->p);
7e1e7763
TG
45}
46
a03eaec0 47#ifdef CONFIG_PROVE_LOCKING
a03eaec0 48#define ASSERT_RHT_MUTEX(HT) BUG_ON(!lockdep_rht_mutex_is_held(HT))
a03eaec0
TG
49
50int lockdep_rht_mutex_is_held(struct rhashtable *ht)
51{
52 return (debug_locks) ? lockdep_is_held(&ht->mutex) : 1;
53}
54EXPORT_SYMBOL_GPL(lockdep_rht_mutex_is_held);
55
56int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash)
57{
02fd97c3 58 spinlock_t *lock = rht_bucket_lock(tbl, hash);
a03eaec0
TG
59
60 return (debug_locks) ? lockdep_is_held(lock) : 1;
61}
62EXPORT_SYMBOL_GPL(lockdep_rht_bucket_is_held);
63#else
64#define ASSERT_RHT_MUTEX(HT)
a03eaec0
TG
65#endif
66
67
b9ecfdaa
HX
68static int alloc_bucket_locks(struct rhashtable *ht, struct bucket_table *tbl,
69 gfp_t gfp)
97defe1e
TG
70{
71 unsigned int i, size;
72#if defined(CONFIG_PROVE_LOCKING)
73 unsigned int nr_pcpus = 2;
74#else
75 unsigned int nr_pcpus = num_possible_cpus();
76#endif
77
4cf0b354 78 nr_pcpus = min_t(unsigned int, nr_pcpus, 64UL);
97defe1e
TG
79 size = roundup_pow_of_two(nr_pcpus * ht->p.locks_mul);
80
a5ec68e3
TG
81 /* Never allocate more than 0.5 locks per bucket */
82 size = min_t(unsigned int, size, tbl->size >> 1);
97defe1e 83
da20420f
HX
84 if (tbl->nest)
85 size = min(size, 1U << tbl->nest);
86
97defe1e 87 if (sizeof(spinlock_t) != 0) {
9dbeea7f 88 tbl->locks = NULL;
97defe1e 89#ifdef CONFIG_NUMA
b9ecfdaa
HX
90 if (size * sizeof(spinlock_t) > PAGE_SIZE &&
91 gfp == GFP_KERNEL)
97defe1e 92 tbl->locks = vmalloc(size * sizeof(spinlock_t));
97defe1e 93#endif
4cf0b354
FW
94 if (gfp != GFP_KERNEL)
95 gfp |= __GFP_NOWARN | __GFP_NORETRY;
96
9dbeea7f
ED
97 if (!tbl->locks)
98 tbl->locks = kmalloc_array(size, sizeof(spinlock_t),
99 gfp);
97defe1e
TG
100 if (!tbl->locks)
101 return -ENOMEM;
102 for (i = 0; i < size; i++)
103 spin_lock_init(&tbl->locks[i]);
104 }
105 tbl->locks_mask = size - 1;
106
107 return 0;
108}
109
da20420f
HX
110static void nested_table_free(union nested_table *ntbl, unsigned int size)
111{
112 const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *));
113 const unsigned int len = 1 << shift;
114 unsigned int i;
115
116 ntbl = rcu_dereference_raw(ntbl->table);
117 if (!ntbl)
118 return;
119
120 if (size > len) {
121 size >>= shift;
122 for (i = 0; i < len; i++)
123 nested_table_free(ntbl + i, size);
124 }
125
126 kfree(ntbl);
127}
128
129static void nested_bucket_table_free(const struct bucket_table *tbl)
130{
131 unsigned int size = tbl->size >> tbl->nest;
132 unsigned int len = 1 << tbl->nest;
133 union nested_table *ntbl;
134 unsigned int i;
135
136 ntbl = (union nested_table *)rcu_dereference_raw(tbl->buckets[0]);
137
138 for (i = 0; i < len; i++)
139 nested_table_free(ntbl + i, size);
140
141 kfree(ntbl);
142}
143
97defe1e
TG
144static void bucket_table_free(const struct bucket_table *tbl)
145{
da20420f
HX
146 if (tbl->nest)
147 nested_bucket_table_free(tbl);
148
ca435407 149 kvfree(tbl->locks);
97defe1e
TG
150 kvfree(tbl);
151}
152
9d901bc0
HX
153static void bucket_table_free_rcu(struct rcu_head *head)
154{
155 bucket_table_free(container_of(head, struct bucket_table, rcu));
156}
157
da20420f
HX
158static union nested_table *nested_table_alloc(struct rhashtable *ht,
159 union nested_table __rcu **prev,
160 unsigned int shifted,
161 unsigned int nhash)
162{
163 union nested_table *ntbl;
164 int i;
165
166 ntbl = rcu_dereference(*prev);
167 if (ntbl)
168 return ntbl;
169
170 ntbl = kzalloc(PAGE_SIZE, GFP_ATOMIC);
171
172 if (ntbl && shifted) {
173 for (i = 0; i < PAGE_SIZE / sizeof(ntbl[0].bucket); i++)
174 INIT_RHT_NULLS_HEAD(ntbl[i].bucket, ht,
175 (i << shifted) | nhash);
176 }
177
178 rcu_assign_pointer(*prev, ntbl);
179
180 return ntbl;
181}
182
183static struct bucket_table *nested_bucket_table_alloc(struct rhashtable *ht,
184 size_t nbuckets,
185 gfp_t gfp)
186{
187 const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *));
188 struct bucket_table *tbl;
189 size_t size;
190
191 if (nbuckets < (1 << (shift + 1)))
192 return NULL;
193
194 size = sizeof(*tbl) + sizeof(tbl->buckets[0]);
195
196 tbl = kzalloc(size, gfp);
197 if (!tbl)
198 return NULL;
199
200 if (!nested_table_alloc(ht, (union nested_table __rcu **)tbl->buckets,
201 0, 0)) {
202 kfree(tbl);
203 return NULL;
204 }
205
206 tbl->nest = (ilog2(nbuckets) - 1) % shift + 1;
207
208 return tbl;
209}
210
97defe1e 211static struct bucket_table *bucket_table_alloc(struct rhashtable *ht,
b9ecfdaa
HX
212 size_t nbuckets,
213 gfp_t gfp)
7e1e7763 214{
eb6d1abf 215 struct bucket_table *tbl = NULL;
7e1e7763 216 size_t size;
f89bd6f8 217 int i;
7e1e7763
TG
218
219 size = sizeof(*tbl) + nbuckets * sizeof(tbl->buckets[0]);
b9ecfdaa
HX
220 if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER) ||
221 gfp != GFP_KERNEL)
222 tbl = kzalloc(size, gfp | __GFP_NOWARN | __GFP_NORETRY);
223 if (tbl == NULL && gfp == GFP_KERNEL)
7e1e7763 224 tbl = vzalloc(size);
da20420f
HX
225
226 size = nbuckets;
227
228 if (tbl == NULL && gfp != GFP_KERNEL) {
229 tbl = nested_bucket_table_alloc(ht, nbuckets, gfp);
230 nbuckets = 0;
231 }
7e1e7763
TG
232 if (tbl == NULL)
233 return NULL;
234
da20420f 235 tbl->size = size;
7e1e7763 236
b9ecfdaa 237 if (alloc_bucket_locks(ht, tbl, gfp) < 0) {
97defe1e
TG
238 bucket_table_free(tbl);
239 return NULL;
240 }
7e1e7763 241
eddee5ba
HX
242 INIT_LIST_HEAD(&tbl->walkers);
243
5269b53d
HX
244 get_random_bytes(&tbl->hash_rnd, sizeof(tbl->hash_rnd));
245
f89bd6f8
TG
246 for (i = 0; i < nbuckets; i++)
247 INIT_RHT_NULLS_HEAD(tbl->buckets[i], ht, i);
248
97defe1e 249 return tbl;
7e1e7763
TG
250}
251
b824478b
HX
252static struct bucket_table *rhashtable_last_table(struct rhashtable *ht,
253 struct bucket_table *tbl)
254{
255 struct bucket_table *new_tbl;
256
257 do {
258 new_tbl = tbl;
259 tbl = rht_dereference_rcu(tbl->future_tbl, ht);
260 } while (tbl);
261
262 return new_tbl;
263}
264
299e5c32 265static int rhashtable_rehash_one(struct rhashtable *ht, unsigned int old_hash)
a5ec68e3 266{
aa34a6cb 267 struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
b824478b
HX
268 struct bucket_table *new_tbl = rhashtable_last_table(ht,
269 rht_dereference_rcu(old_tbl->future_tbl, ht));
da20420f
HX
270 struct rhash_head __rcu **pprev = rht_bucket_var(old_tbl, old_hash);
271 int err = -EAGAIN;
aa34a6cb
HX
272 struct rhash_head *head, *next, *entry;
273 spinlock_t *new_bucket_lock;
299e5c32 274 unsigned int new_hash;
aa34a6cb 275
da20420f
HX
276 if (new_tbl->nest)
277 goto out;
278
279 err = -ENOENT;
280
aa34a6cb
HX
281 rht_for_each(entry, old_tbl, old_hash) {
282 err = 0;
283 next = rht_dereference_bucket(entry->next, old_tbl, old_hash);
284
285 if (rht_is_a_nulls(next))
286 break;
a5ec68e3 287
aa34a6cb
HX
288 pprev = &entry->next;
289 }
a5ec68e3 290
aa34a6cb
HX
291 if (err)
292 goto out;
97defe1e 293
aa34a6cb 294 new_hash = head_hashfn(ht, new_tbl, entry);
7e1e7763 295
02fd97c3 296 new_bucket_lock = rht_bucket_lock(new_tbl, new_hash);
7e1e7763 297
8f2484bd 298 spin_lock_nested(new_bucket_lock, SINGLE_DEPTH_NESTING);
aa34a6cb
HX
299 head = rht_dereference_bucket(new_tbl->buckets[new_hash],
300 new_tbl, new_hash);
97defe1e 301
7def0f95 302 RCU_INIT_POINTER(entry->next, head);
a5ec68e3 303
aa34a6cb
HX
304 rcu_assign_pointer(new_tbl->buckets[new_hash], entry);
305 spin_unlock(new_bucket_lock);
97defe1e 306
aa34a6cb 307 rcu_assign_pointer(*pprev, next);
7e1e7763 308
aa34a6cb
HX
309out:
310 return err;
311}
97defe1e 312
da20420f 313static int rhashtable_rehash_chain(struct rhashtable *ht,
299e5c32 314 unsigned int old_hash)
aa34a6cb
HX
315{
316 struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
317 spinlock_t *old_bucket_lock;
da20420f 318 int err;
aa34a6cb 319
02fd97c3 320 old_bucket_lock = rht_bucket_lock(old_tbl, old_hash);
a5ec68e3 321
aa34a6cb 322 spin_lock_bh(old_bucket_lock);
da20420f 323 while (!(err = rhashtable_rehash_one(ht, old_hash)))
aa34a6cb 324 ;
da20420f
HX
325
326 if (err == -ENOENT) {
327 old_tbl->rehash++;
328 err = 0;
329 }
aa34a6cb 330 spin_unlock_bh(old_bucket_lock);
da20420f
HX
331
332 return err;
97defe1e
TG
333}
334
b824478b
HX
335static int rhashtable_rehash_attach(struct rhashtable *ht,
336 struct bucket_table *old_tbl,
337 struct bucket_table *new_tbl)
97defe1e 338{
b824478b
HX
339 /* Protect future_tbl using the first bucket lock. */
340 spin_lock_bh(old_tbl->locks);
341
342 /* Did somebody beat us to it? */
343 if (rcu_access_pointer(old_tbl->future_tbl)) {
344 spin_unlock_bh(old_tbl->locks);
345 return -EEXIST;
346 }
7cd10db8 347
aa34a6cb
HX
348 /* Make insertions go into the new, empty table right away. Deletions
349 * and lookups will be attempted in both tables until we synchronize.
aa34a6cb 350 */
c4db8848 351 rcu_assign_pointer(old_tbl->future_tbl, new_tbl);
aa34a6cb 352
b824478b
HX
353 spin_unlock_bh(old_tbl->locks);
354
355 return 0;
356}
357
358static int rhashtable_rehash_table(struct rhashtable *ht)
359{
360 struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
361 struct bucket_table *new_tbl;
362 struct rhashtable_walker *walker;
299e5c32 363 unsigned int old_hash;
da20420f 364 int err;
b824478b
HX
365
366 new_tbl = rht_dereference(old_tbl->future_tbl, ht);
367 if (!new_tbl)
368 return 0;
369
da20420f
HX
370 for (old_hash = 0; old_hash < old_tbl->size; old_hash++) {
371 err = rhashtable_rehash_chain(ht, old_hash);
372 if (err)
373 return err;
374 }
aa34a6cb
HX
375
376 /* Publish the new table pointer. */
377 rcu_assign_pointer(ht->tbl, new_tbl);
378
ba7c95ea 379 spin_lock(&ht->lock);
eddee5ba
HX
380 list_for_each_entry(walker, &old_tbl->walkers, list)
381 walker->tbl = NULL;
ba7c95ea 382 spin_unlock(&ht->lock);
eddee5ba 383
aa34a6cb
HX
384 /* Wait for readers. All new readers will see the new
385 * table, and thus no references to the old table will
386 * remain.
387 */
9d901bc0 388 call_rcu(&old_tbl->rcu, bucket_table_free_rcu);
b824478b
HX
389
390 return rht_dereference(new_tbl->future_tbl, ht) ? -EAGAIN : 0;
7e1e7763
TG
391}
392
da20420f
HX
393static int rhashtable_rehash_alloc(struct rhashtable *ht,
394 struct bucket_table *old_tbl,
395 unsigned int size)
7e1e7763 396{
da20420f 397 struct bucket_table *new_tbl;
b824478b 398 int err;
7e1e7763
TG
399
400 ASSERT_RHT_MUTEX(ht);
401
da20420f 402 new_tbl = bucket_table_alloc(ht, size, GFP_KERNEL);
7e1e7763
TG
403 if (new_tbl == NULL)
404 return -ENOMEM;
405
b824478b
HX
406 err = rhashtable_rehash_attach(ht, old_tbl, new_tbl);
407 if (err)
408 bucket_table_free(new_tbl);
409
410 return err;
7e1e7763 411}
7e1e7763
TG
412
413/**
414 * rhashtable_shrink - Shrink hash table while allowing concurrent lookups
415 * @ht: the hash table to shrink
7e1e7763 416 *
18093d1c
HX
417 * This function shrinks the hash table to fit, i.e., the smallest
418 * size would not cause it to expand right away automatically.
7e1e7763 419 *
97defe1e
TG
420 * The caller must ensure that no concurrent resizing occurs by holding
421 * ht->mutex.
422 *
7e1e7763
TG
423 * The caller must ensure that no concurrent table mutations take place.
424 * It is however valid to have concurrent lookups if they are RCU protected.
97defe1e
TG
425 *
426 * It is valid to have concurrent insertions and deletions protected by per
427 * bucket locks or concurrent RCU protected lookups and traversals.
7e1e7763 428 */
b824478b 429static int rhashtable_shrink(struct rhashtable *ht)
7e1e7763 430{
da20420f 431 struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
12311959
VN
432 unsigned int nelems = atomic_read(&ht->nelems);
433 unsigned int size = 0;
7e1e7763 434
12311959
VN
435 if (nelems)
436 size = roundup_pow_of_two(nelems * 3 / 2);
18093d1c
HX
437 if (size < ht->p.min_size)
438 size = ht->p.min_size;
439
440 if (old_tbl->size <= size)
441 return 0;
442
b824478b
HX
443 if (rht_dereference(old_tbl->future_tbl, ht))
444 return -EEXIST;
445
da20420f 446 return rhashtable_rehash_alloc(ht, old_tbl, size);
7e1e7763 447}
7e1e7763 448
97defe1e
TG
449static void rht_deferred_worker(struct work_struct *work)
450{
451 struct rhashtable *ht;
452 struct bucket_table *tbl;
b824478b 453 int err = 0;
97defe1e 454
57699a40 455 ht = container_of(work, struct rhashtable, run_work);
97defe1e 456 mutex_lock(&ht->mutex);
28134a53 457
97defe1e 458 tbl = rht_dereference(ht->tbl, ht);
b824478b 459 tbl = rhashtable_last_table(ht, tbl);
97defe1e 460
a5b6846f 461 if (rht_grow_above_75(ht, tbl))
da20420f 462 err = rhashtable_rehash_alloc(ht, tbl, tbl->size * 2);
b5e2c150 463 else if (ht->p.automatic_shrinking && rht_shrink_below_30(ht, tbl))
da20420f
HX
464 err = rhashtable_shrink(ht);
465 else if (tbl->nest)
466 err = rhashtable_rehash_alloc(ht, tbl, tbl->size);
b824478b 467
da20420f
HX
468 if (!err)
469 err = rhashtable_rehash_table(ht);
b824478b 470
97defe1e 471 mutex_unlock(&ht->mutex);
b824478b
HX
472
473 if (err)
474 schedule_work(&ht->run_work);
97defe1e
TG
475}
476
ca26893f
HX
477static int rhashtable_insert_rehash(struct rhashtable *ht,
478 struct bucket_table *tbl)
ccd57b1b
HX
479{
480 struct bucket_table *old_tbl;
481 struct bucket_table *new_tbl;
ccd57b1b
HX
482 unsigned int size;
483 int err;
484
485 old_tbl = rht_dereference_rcu(ht->tbl, ht);
ccd57b1b
HX
486
487 size = tbl->size;
488
3cf92222
HX
489 err = -EBUSY;
490
ccd57b1b
HX
491 if (rht_grow_above_75(ht, tbl))
492 size *= 2;
a87b9ebf
TG
493 /* Do not schedule more than one rehash */
494 else if (old_tbl != tbl)
3cf92222
HX
495 goto fail;
496
497 err = -ENOMEM;
ccd57b1b
HX
498
499 new_tbl = bucket_table_alloc(ht, size, GFP_ATOMIC);
3cf92222
HX
500 if (new_tbl == NULL)
501 goto fail;
ccd57b1b
HX
502
503 err = rhashtable_rehash_attach(ht, tbl, new_tbl);
504 if (err) {
505 bucket_table_free(new_tbl);
506 if (err == -EEXIST)
507 err = 0;
508 } else
509 schedule_work(&ht->run_work);
510
511 return err;
3cf92222
HX
512
513fail:
514 /* Do not fail the insert if someone else did a rehash. */
515 if (likely(rcu_dereference_raw(tbl->future_tbl)))
516 return 0;
517
518 /* Schedule async rehash to retry allocation in process context. */
519 if (err == -ENOMEM)
520 schedule_work(&ht->run_work);
521
522 return err;
ccd57b1b 523}
ccd57b1b 524
ca26893f
HX
525static void *rhashtable_lookup_one(struct rhashtable *ht,
526 struct bucket_table *tbl, unsigned int hash,
527 const void *key, struct rhash_head *obj)
02fd97c3 528{
ca26893f
HX
529 struct rhashtable_compare_arg arg = {
530 .ht = ht,
531 .key = key,
532 };
533 struct rhash_head __rcu **pprev;
02fd97c3 534 struct rhash_head *head;
ca26893f 535 int elasticity;
02fd97c3 536
ca26893f 537 elasticity = ht->elasticity;
da20420f
HX
538 pprev = rht_bucket_var(tbl, hash);
539 rht_for_each_continue(head, *pprev, tbl, hash) {
ca26893f
HX
540 struct rhlist_head *list;
541 struct rhlist_head *plist;
542
543 elasticity--;
544 if (!key ||
545 (ht->p.obj_cmpfn ?
546 ht->p.obj_cmpfn(&arg, rht_obj(ht, head)) :
547 rhashtable_compare(&arg, rht_obj(ht, head))))
548 continue;
549
550 if (!ht->rhlist)
551 return rht_obj(ht, head);
552
553 list = container_of(obj, struct rhlist_head, rhead);
554 plist = container_of(head, struct rhlist_head, rhead);
555
556 RCU_INIT_POINTER(list->next, plist);
557 head = rht_dereference_bucket(head->next, tbl, hash);
558 RCU_INIT_POINTER(list->rhead.next, head);
559 rcu_assign_pointer(*pprev, obj);
560
561 return NULL;
5ca8cc5b 562 }
02fd97c3 563
ca26893f
HX
564 if (elasticity <= 0)
565 return ERR_PTR(-EAGAIN);
566
567 return ERR_PTR(-ENOENT);
568}
569
570static struct bucket_table *rhashtable_insert_one(struct rhashtable *ht,
571 struct bucket_table *tbl,
572 unsigned int hash,
573 struct rhash_head *obj,
574 void *data)
575{
da20420f 576 struct rhash_head __rcu **pprev;
ca26893f
HX
577 struct bucket_table *new_tbl;
578 struct rhash_head *head;
579
580 if (!IS_ERR_OR_NULL(data))
581 return ERR_PTR(-EEXIST);
07ee0722 582
ca26893f
HX
583 if (PTR_ERR(data) != -EAGAIN && PTR_ERR(data) != -ENOENT)
584 return ERR_CAST(data);
ccd57b1b 585
ca26893f
HX
586 new_tbl = rcu_dereference(tbl->future_tbl);
587 if (new_tbl)
588 return new_tbl;
589
590 if (PTR_ERR(data) != -ENOENT)
591 return ERR_CAST(data);
592
593 if (unlikely(rht_grow_above_max(ht, tbl)))
594 return ERR_PTR(-E2BIG);
595
596 if (unlikely(rht_grow_above_100(ht, tbl)))
597 return ERR_PTR(-EAGAIN);
02fd97c3 598
da20420f
HX
599 pprev = rht_bucket_insert(ht, tbl, hash);
600 if (!pprev)
601 return ERR_PTR(-ENOMEM);
602
603 head = rht_dereference_bucket(*pprev, tbl, hash);
02fd97c3
HX
604
605 RCU_INIT_POINTER(obj->next, head);
ca26893f
HX
606 if (ht->rhlist) {
607 struct rhlist_head *list;
608
609 list = container_of(obj, struct rhlist_head, rhead);
610 RCU_INIT_POINTER(list->next, NULL);
611 }
02fd97c3 612
da20420f 613 rcu_assign_pointer(*pprev, obj);
02fd97c3
HX
614
615 atomic_inc(&ht->nelems);
ca26893f
HX
616 if (rht_grow_above_75(ht, tbl))
617 schedule_work(&ht->run_work);
02fd97c3 618
ca26893f
HX
619 return NULL;
620}
02fd97c3 621
ca26893f
HX
622static void *rhashtable_try_insert(struct rhashtable *ht, const void *key,
623 struct rhash_head *obj)
624{
625 struct bucket_table *new_tbl;
626 struct bucket_table *tbl;
627 unsigned int hash;
628 spinlock_t *lock;
629 void *data;
630
631 tbl = rcu_dereference(ht->tbl);
632
633 /* All insertions must grab the oldest table containing
634 * the hashed bucket that is yet to be rehashed.
635 */
636 for (;;) {
637 hash = rht_head_hashfn(ht, tbl, obj, ht->p);
638 lock = rht_bucket_lock(tbl, hash);
639 spin_lock_bh(lock);
640
641 if (tbl->rehash <= hash)
642 break;
643
644 spin_unlock_bh(lock);
645 tbl = rcu_dereference(tbl->future_tbl);
646 }
647
648 data = rhashtable_lookup_one(ht, tbl, hash, key, obj);
649 new_tbl = rhashtable_insert_one(ht, tbl, hash, obj, data);
650 if (PTR_ERR(new_tbl) != -EEXIST)
651 data = ERR_CAST(new_tbl);
652
653 while (!IS_ERR_OR_NULL(new_tbl)) {
654 tbl = new_tbl;
655 hash = rht_head_hashfn(ht, tbl, obj, ht->p);
656 spin_lock_nested(rht_bucket_lock(tbl, hash),
657 SINGLE_DEPTH_NESTING);
658
659 data = rhashtable_lookup_one(ht, tbl, hash, key, obj);
660 new_tbl = rhashtable_insert_one(ht, tbl, hash, obj, data);
661 if (PTR_ERR(new_tbl) != -EEXIST)
662 data = ERR_CAST(new_tbl);
663
664 spin_unlock(rht_bucket_lock(tbl, hash));
665 }
666
667 spin_unlock_bh(lock);
668
669 if (PTR_ERR(data) == -EAGAIN)
670 data = ERR_PTR(rhashtable_insert_rehash(ht, tbl) ?:
671 -EAGAIN);
672
673 return data;
674}
675
676void *rhashtable_insert_slow(struct rhashtable *ht, const void *key,
677 struct rhash_head *obj)
678{
679 void *data;
680
681 do {
682 rcu_read_lock();
683 data = rhashtable_try_insert(ht, key, obj);
684 rcu_read_unlock();
685 } while (PTR_ERR(data) == -EAGAIN);
686
687 return data;
02fd97c3
HX
688}
689EXPORT_SYMBOL_GPL(rhashtable_insert_slow);
690
f2dba9c6 691/**
246779dd 692 * rhashtable_walk_enter - Initialise an iterator
f2dba9c6
HX
693 * @ht: Table to walk over
694 * @iter: Hash table Iterator
695 *
696 * This function prepares a hash table walk.
697 *
698 * Note that if you restart a walk after rhashtable_walk_stop you
699 * may see the same object twice. Also, you may miss objects if
700 * there are removals in between rhashtable_walk_stop and the next
701 * call to rhashtable_walk_start.
702 *
703 * For a completely stable walk you should construct your own data
704 * structure outside the hash table.
705 *
706 * This function may sleep so you must not call it from interrupt
707 * context or with spin locks held.
708 *
246779dd 709 * You must call rhashtable_walk_exit after this function returns.
f2dba9c6 710 */
246779dd 711void rhashtable_walk_enter(struct rhashtable *ht, struct rhashtable_iter *iter)
f2dba9c6
HX
712{
713 iter->ht = ht;
714 iter->p = NULL;
715 iter->slot = 0;
716 iter->skip = 0;
717
c6ff5268 718 spin_lock(&ht->lock);
246779dd 719 iter->walker.tbl =
179ccc0a 720 rcu_dereference_protected(ht->tbl, lockdep_is_held(&ht->lock));
246779dd 721 list_add(&iter->walker.list, &iter->walker.tbl->walkers);
c6ff5268 722 spin_unlock(&ht->lock);
f2dba9c6 723}
246779dd 724EXPORT_SYMBOL_GPL(rhashtable_walk_enter);
f2dba9c6
HX
725
726/**
727 * rhashtable_walk_exit - Free an iterator
728 * @iter: Hash table Iterator
729 *
730 * This function frees resources allocated by rhashtable_walk_init.
731 */
732void rhashtable_walk_exit(struct rhashtable_iter *iter)
733{
c6ff5268 734 spin_lock(&iter->ht->lock);
246779dd
HX
735 if (iter->walker.tbl)
736 list_del(&iter->walker.list);
c6ff5268 737 spin_unlock(&iter->ht->lock);
f2dba9c6
HX
738}
739EXPORT_SYMBOL_GPL(rhashtable_walk_exit);
740
741/**
742 * rhashtable_walk_start - Start a hash table walk
743 * @iter: Hash table iterator
744 *
745 * Start a hash table walk. Note that we take the RCU lock in all
746 * cases including when we return an error. So you must always call
747 * rhashtable_walk_stop to clean up.
748 *
749 * Returns zero if successful.
750 *
751 * Returns -EAGAIN if resize event occured. Note that the iterator
752 * will rewind back to the beginning and you may use it immediately
753 * by calling rhashtable_walk_next.
754 */
755int rhashtable_walk_start(struct rhashtable_iter *iter)
db4374f4 756 __acquires(RCU)
f2dba9c6 757{
eddee5ba
HX
758 struct rhashtable *ht = iter->ht;
759
c6ff5268 760 rcu_read_lock();
eddee5ba 761
c6ff5268 762 spin_lock(&ht->lock);
246779dd
HX
763 if (iter->walker.tbl)
764 list_del(&iter->walker.list);
c6ff5268 765 spin_unlock(&ht->lock);
eddee5ba 766
246779dd
HX
767 if (!iter->walker.tbl) {
768 iter->walker.tbl = rht_dereference_rcu(ht->tbl, ht);
f2dba9c6
HX
769 return -EAGAIN;
770 }
771
772 return 0;
773}
774EXPORT_SYMBOL_GPL(rhashtable_walk_start);
775
776/**
777 * rhashtable_walk_next - Return the next object and advance the iterator
778 * @iter: Hash table iterator
779 *
780 * Note that you must call rhashtable_walk_stop when you are finished
781 * with the walk.
782 *
783 * Returns the next object or NULL when the end of the table is reached.
784 *
785 * Returns -EAGAIN if resize event occured. Note that the iterator
786 * will rewind back to the beginning and you may continue to use it.
787 */
788void *rhashtable_walk_next(struct rhashtable_iter *iter)
789{
246779dd 790 struct bucket_table *tbl = iter->walker.tbl;
ca26893f 791 struct rhlist_head *list = iter->list;
f2dba9c6
HX
792 struct rhashtable *ht = iter->ht;
793 struct rhash_head *p = iter->p;
ca26893f 794 bool rhlist = ht->rhlist;
f2dba9c6 795
f2dba9c6 796 if (p) {
ca26893f
HX
797 if (!rhlist || !(list = rcu_dereference(list->next))) {
798 p = rcu_dereference(p->next);
799 list = container_of(p, struct rhlist_head, rhead);
800 }
f2dba9c6
HX
801 goto next;
802 }
803
804 for (; iter->slot < tbl->size; iter->slot++) {
805 int skip = iter->skip;
806
807 rht_for_each_rcu(p, tbl, iter->slot) {
ca26893f
HX
808 if (rhlist) {
809 list = container_of(p, struct rhlist_head,
810 rhead);
811 do {
812 if (!skip)
813 goto next;
814 skip--;
815 list = rcu_dereference(list->next);
816 } while (list);
817
818 continue;
819 }
f2dba9c6
HX
820 if (!skip)
821 break;
822 skip--;
823 }
824
825next:
826 if (!rht_is_a_nulls(p)) {
827 iter->skip++;
828 iter->p = p;
ca26893f
HX
829 iter->list = list;
830 return rht_obj(ht, rhlist ? &list->rhead : p);
f2dba9c6
HX
831 }
832
833 iter->skip = 0;
834 }
835
142b942a
PS
836 iter->p = NULL;
837
d88252f9
HX
838 /* Ensure we see any new tables. */
839 smp_rmb();
840
246779dd
HX
841 iter->walker.tbl = rht_dereference_rcu(tbl->future_tbl, ht);
842 if (iter->walker.tbl) {
f2dba9c6
HX
843 iter->slot = 0;
844 iter->skip = 0;
f2dba9c6
HX
845 return ERR_PTR(-EAGAIN);
846 }
847
c936a79f 848 return NULL;
f2dba9c6
HX
849}
850EXPORT_SYMBOL_GPL(rhashtable_walk_next);
851
852/**
853 * rhashtable_walk_stop - Finish a hash table walk
854 * @iter: Hash table iterator
855 *
856 * Finish a hash table walk.
857 */
858void rhashtable_walk_stop(struct rhashtable_iter *iter)
db4374f4 859 __releases(RCU)
f2dba9c6 860{
eddee5ba 861 struct rhashtable *ht;
246779dd 862 struct bucket_table *tbl = iter->walker.tbl;
eddee5ba 863
eddee5ba 864 if (!tbl)
963ecbd4 865 goto out;
eddee5ba
HX
866
867 ht = iter->ht;
868
ba7c95ea 869 spin_lock(&ht->lock);
c4db8848 870 if (tbl->rehash < tbl->size)
246779dd 871 list_add(&iter->walker.list, &tbl->walkers);
eddee5ba 872 else
246779dd 873 iter->walker.tbl = NULL;
ba7c95ea 874 spin_unlock(&ht->lock);
eddee5ba 875
f2dba9c6 876 iter->p = NULL;
963ecbd4
HX
877
878out:
879 rcu_read_unlock();
f2dba9c6
HX
880}
881EXPORT_SYMBOL_GPL(rhashtable_walk_stop);
882
488fb86e 883static size_t rounded_hashtable_size(const struct rhashtable_params *params)
7e1e7763 884{
94000176 885 return max(roundup_pow_of_two(params->nelem_hint * 4 / 3),
e2e21c1c 886 (unsigned long)params->min_size);
7e1e7763
TG
887}
888
31ccde2d
HX
889static u32 rhashtable_jhash2(const void *key, u32 length, u32 seed)
890{
891 return jhash2(key, length, seed);
892}
893
7e1e7763
TG
894/**
895 * rhashtable_init - initialize a new hash table
896 * @ht: hash table to be initialized
897 * @params: configuration parameters
898 *
899 * Initializes a new hash table based on the provided configuration
900 * parameters. A table can be configured either with a variable or
901 * fixed length key:
902 *
903 * Configuration Example 1: Fixed length keys
904 * struct test_obj {
905 * int key;
906 * void * my_member;
907 * struct rhash_head node;
908 * };
909 *
910 * struct rhashtable_params params = {
911 * .head_offset = offsetof(struct test_obj, node),
912 * .key_offset = offsetof(struct test_obj, key),
913 * .key_len = sizeof(int),
87545899 914 * .hashfn = jhash,
f89bd6f8 915 * .nulls_base = (1U << RHT_BASE_SHIFT),
7e1e7763
TG
916 * };
917 *
918 * Configuration Example 2: Variable length keys
919 * struct test_obj {
920 * [...]
921 * struct rhash_head node;
922 * };
923 *
49f7b33e 924 * u32 my_hash_fn(const void *data, u32 len, u32 seed)
7e1e7763
TG
925 * {
926 * struct test_obj *obj = data;
927 *
928 * return [... hash ...];
929 * }
930 *
931 * struct rhashtable_params params = {
932 * .head_offset = offsetof(struct test_obj, node),
87545899 933 * .hashfn = jhash,
7e1e7763 934 * .obj_hashfn = my_hash_fn,
7e1e7763
TG
935 * };
936 */
488fb86e
HX
937int rhashtable_init(struct rhashtable *ht,
938 const struct rhashtable_params *params)
7e1e7763
TG
939{
940 struct bucket_table *tbl;
941 size_t size;
942
943 size = HASH_DEFAULT_SIZE;
944
31ccde2d 945 if ((!params->key_len && !params->obj_hashfn) ||
02fd97c3 946 (params->obj_hashfn && !params->obj_cmpfn))
7e1e7763
TG
947 return -EINVAL;
948
f89bd6f8
TG
949 if (params->nulls_base && params->nulls_base < (1U << RHT_BASE_SHIFT))
950 return -EINVAL;
951
97defe1e
TG
952 memset(ht, 0, sizeof(*ht));
953 mutex_init(&ht->mutex);
ba7c95ea 954 spin_lock_init(&ht->lock);
97defe1e
TG
955 memcpy(&ht->p, params, sizeof(*params));
956
a998f712
TG
957 if (params->min_size)
958 ht->p.min_size = roundup_pow_of_two(params->min_size);
959
960 if (params->max_size)
961 ht->p.max_size = rounddown_pow_of_two(params->max_size);
962
07ee0722
HX
963 if (params->insecure_max_entries)
964 ht->p.insecure_max_entries =
965 rounddown_pow_of_two(params->insecure_max_entries);
966 else
967 ht->p.insecure_max_entries = ht->p.max_size * 2;
968
488fb86e 969 ht->p.min_size = max(ht->p.min_size, HASH_MIN_SIZE);
a998f712 970
3a324606
HX
971 if (params->nelem_hint)
972 size = rounded_hashtable_size(&ht->p);
973
27ed44a5
HX
974 /* The maximum (not average) chain length grows with the
975 * size of the hash table, at a rate of (log N)/(log log N).
976 * The value of 16 is selected so that even if the hash
977 * table grew to 2^32 you would not expect the maximum
978 * chain length to exceed it unless we are under attack
979 * (or extremely unlucky).
980 *
981 * As this limit is only to detect attacks, we don't need
982 * to set it to a lower value as you'd need the chain
983 * length to vastly exceed 16 to have any real effect
984 * on the system.
985 */
ccd57b1b
HX
986 if (!params->insecure_elasticity)
987 ht->elasticity = 16;
988
97defe1e
TG
989 if (params->locks_mul)
990 ht->p.locks_mul = roundup_pow_of_two(params->locks_mul);
991 else
992 ht->p.locks_mul = BUCKET_LOCKS_PER_CPU;
993
31ccde2d
HX
994 ht->key_len = ht->p.key_len;
995 if (!params->hashfn) {
996 ht->p.hashfn = jhash;
997
998 if (!(ht->key_len & (sizeof(u32) - 1))) {
999 ht->key_len /= sizeof(u32);
1000 ht->p.hashfn = rhashtable_jhash2;
1001 }
1002 }
1003
b9ecfdaa 1004 tbl = bucket_table_alloc(ht, size, GFP_KERNEL);
7e1e7763
TG
1005 if (tbl == NULL)
1006 return -ENOMEM;
1007
545a148e 1008 atomic_set(&ht->nelems, 0);
a5b6846f 1009
7e1e7763
TG
1010 RCU_INIT_POINTER(ht->tbl, tbl);
1011
4c4b52d9 1012 INIT_WORK(&ht->run_work, rht_deferred_worker);
97defe1e 1013
7e1e7763
TG
1014 return 0;
1015}
1016EXPORT_SYMBOL_GPL(rhashtable_init);
1017
ca26893f
HX
1018/**
1019 * rhltable_init - initialize a new hash list table
1020 * @hlt: hash list table to be initialized
1021 * @params: configuration parameters
1022 *
1023 * Initializes a new hash list table.
1024 *
1025 * See documentation for rhashtable_init.
1026 */
1027int rhltable_init(struct rhltable *hlt, const struct rhashtable_params *params)
1028{
1029 int err;
1030
1031 /* No rhlist NULLs marking for now. */
1032 if (params->nulls_base)
1033 return -EINVAL;
1034
1035 err = rhashtable_init(&hlt->ht, params);
1036 hlt->ht.rhlist = true;
1037 return err;
1038}
1039EXPORT_SYMBOL_GPL(rhltable_init);
1040
1041static void rhashtable_free_one(struct rhashtable *ht, struct rhash_head *obj,
1042 void (*free_fn)(void *ptr, void *arg),
1043 void *arg)
1044{
1045 struct rhlist_head *list;
1046
1047 if (!ht->rhlist) {
1048 free_fn(rht_obj(ht, obj), arg);
1049 return;
1050 }
1051
1052 list = container_of(obj, struct rhlist_head, rhead);
1053 do {
1054 obj = &list->rhead;
1055 list = rht_dereference(list->next, ht);
1056 free_fn(rht_obj(ht, obj), arg);
1057 } while (list);
1058}
1059
7e1e7763 1060/**
6b6f302c 1061 * rhashtable_free_and_destroy - free elements and destroy hash table
7e1e7763 1062 * @ht: the hash table to destroy
6b6f302c
TG
1063 * @free_fn: callback to release resources of element
1064 * @arg: pointer passed to free_fn
7e1e7763 1065 *
6b6f302c
TG
1066 * Stops an eventual async resize. If defined, invokes free_fn for each
1067 * element to releasal resources. Please note that RCU protected
1068 * readers may still be accessing the elements. Releasing of resources
1069 * must occur in a compatible manner. Then frees the bucket array.
1070 *
1071 * This function will eventually sleep to wait for an async resize
1072 * to complete. The caller is responsible that no further write operations
1073 * occurs in parallel.
7e1e7763 1074 */
6b6f302c
TG
1075void rhashtable_free_and_destroy(struct rhashtable *ht,
1076 void (*free_fn)(void *ptr, void *arg),
1077 void *arg)
7e1e7763 1078{
da20420f 1079 struct bucket_table *tbl;
6b6f302c 1080 unsigned int i;
97defe1e 1081
4c4b52d9 1082 cancel_work_sync(&ht->run_work);
97defe1e 1083
57699a40 1084 mutex_lock(&ht->mutex);
6b6f302c
TG
1085 tbl = rht_dereference(ht->tbl, ht);
1086 if (free_fn) {
1087 for (i = 0; i < tbl->size; i++) {
1088 struct rhash_head *pos, *next;
1089
da20420f 1090 for (pos = rht_dereference(*rht_bucket(tbl, i), ht),
6b6f302c
TG
1091 next = !rht_is_a_nulls(pos) ?
1092 rht_dereference(pos->next, ht) : NULL;
1093 !rht_is_a_nulls(pos);
1094 pos = next,
1095 next = !rht_is_a_nulls(pos) ?
1096 rht_dereference(pos->next, ht) : NULL)
ca26893f 1097 rhashtable_free_one(ht, pos, free_fn, arg);
6b6f302c
TG
1098 }
1099 }
1100
1101 bucket_table_free(tbl);
97defe1e 1102 mutex_unlock(&ht->mutex);
7e1e7763 1103}
6b6f302c
TG
1104EXPORT_SYMBOL_GPL(rhashtable_free_and_destroy);
1105
1106void rhashtable_destroy(struct rhashtable *ht)
1107{
1108 return rhashtable_free_and_destroy(ht, NULL, NULL);
1109}
7e1e7763 1110EXPORT_SYMBOL_GPL(rhashtable_destroy);
da20420f
HX
1111
1112struct rhash_head __rcu **rht_bucket_nested(const struct bucket_table *tbl,
1113 unsigned int hash)
1114{
1115 const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *));
1116 static struct rhash_head __rcu *rhnull =
1117 (struct rhash_head __rcu *)NULLS_MARKER(0);
1118 unsigned int index = hash & ((1 << tbl->nest) - 1);
1119 unsigned int size = tbl->size >> tbl->nest;
1120 unsigned int subhash = hash;
1121 union nested_table *ntbl;
1122
1123 ntbl = (union nested_table *)rcu_dereference_raw(tbl->buckets[0]);
1124 ntbl = rht_dereference_bucket(ntbl[index].table, tbl, hash);
1125 subhash >>= tbl->nest;
1126
1127 while (ntbl && size > (1 << shift)) {
1128 index = subhash & ((1 << shift) - 1);
1129 ntbl = rht_dereference_bucket(ntbl[index].table, tbl, hash);
1130 size >>= shift;
1131 subhash >>= shift;
1132 }
1133
1134 if (!ntbl)
1135 return &rhnull;
1136
1137 return &ntbl[subhash].bucket;
1138
1139}
1140EXPORT_SYMBOL_GPL(rht_bucket_nested);
1141
1142struct rhash_head __rcu **rht_bucket_nested_insert(struct rhashtable *ht,
1143 struct bucket_table *tbl,
1144 unsigned int hash)
1145{
1146 const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *));
1147 unsigned int index = hash & ((1 << tbl->nest) - 1);
1148 unsigned int size = tbl->size >> tbl->nest;
1149 union nested_table *ntbl;
1150 unsigned int shifted;
1151 unsigned int nhash;
1152
1153 ntbl = (union nested_table *)rcu_dereference_raw(tbl->buckets[0]);
1154 hash >>= tbl->nest;
1155 nhash = index;
1156 shifted = tbl->nest;
1157 ntbl = nested_table_alloc(ht, &ntbl[index].table,
1158 size <= (1 << shift) ? shifted : 0, nhash);
1159
1160 while (ntbl && size > (1 << shift)) {
1161 index = hash & ((1 << shift) - 1);
1162 size >>= shift;
1163 hash >>= shift;
1164 nhash |= index << shifted;
1165 shifted += shift;
1166 ntbl = nested_table_alloc(ht, &ntbl[index].table,
1167 size <= (1 << shift) ? shifted : 0,
1168 nhash);
1169 }
1170
1171 if (!ntbl)
1172 return NULL;
1173
1174 return &ntbl[hash].bucket;
1175
1176}
1177EXPORT_SYMBOL_GPL(rht_bucket_nested_insert);