]>
Commit | Line | Data |
---|---|---|
7e1e7763 TG |
1 | /* |
2 | * Resizable, Scalable, Concurrent Hash Table | |
3 | * | |
02fd97c3 | 4 | * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au> |
a5ec68e3 | 5 | * Copyright (c) 2014-2015 Thomas Graf <tgraf@suug.ch> |
7e1e7763 TG |
6 | * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net> |
7 | * | |
7e1e7763 | 8 | * Code partially derived from nft_hash |
02fd97c3 HX |
9 | * Rewritten with rehash code from br_multicast plus single list |
10 | * pointer as suggested by Josh Triplett | |
7e1e7763 TG |
11 | * |
12 | * This program is free software; you can redistribute it and/or modify | |
13 | * it under the terms of the GNU General Public License version 2 as | |
14 | * published by the Free Software Foundation. | |
15 | */ | |
16 | ||
07ee0722 | 17 | #include <linux/atomic.h> |
7e1e7763 TG |
18 | #include <linux/kernel.h> |
19 | #include <linux/init.h> | |
20 | #include <linux/log2.h> | |
5beb5c90 | 21 | #include <linux/sched.h> |
b2d09103 | 22 | #include <linux/rculist.h> |
7e1e7763 TG |
23 | #include <linux/slab.h> |
24 | #include <linux/vmalloc.h> | |
25 | #include <linux/mm.h> | |
87545899 | 26 | #include <linux/jhash.h> |
7e1e7763 TG |
27 | #include <linux/random.h> |
28 | #include <linux/rhashtable.h> | |
61d7b097 | 29 | #include <linux/err.h> |
6d795413 | 30 | #include <linux/export.h> |
7e1e7763 TG |
31 | |
32 | #define HASH_DEFAULT_SIZE 64UL | |
c2e213cf | 33 | #define HASH_MIN_SIZE 4U |
97defe1e | 34 | |
da20420f HX |
35 | union nested_table { |
36 | union nested_table __rcu *table; | |
ba6306e3 | 37 | struct rhash_lock_head *bucket; |
da20420f HX |
38 | }; |
39 | ||
988dfbd7 | 40 | static u32 head_hashfn(struct rhashtable *ht, |
8d24c0b4 TG |
41 | const struct bucket_table *tbl, |
42 | const struct rhash_head *he) | |
7e1e7763 | 43 | { |
02fd97c3 | 44 | return rht_head_hashfn(ht, tbl, he, ht->p); |
7e1e7763 TG |
45 | } |
46 | ||
a03eaec0 | 47 | #ifdef CONFIG_PROVE_LOCKING |
a03eaec0 | 48 | #define ASSERT_RHT_MUTEX(HT) BUG_ON(!lockdep_rht_mutex_is_held(HT)) |
a03eaec0 TG |
49 | |
50 | int lockdep_rht_mutex_is_held(struct rhashtable *ht) | |
51 | { | |
52 | return (debug_locks) ? lockdep_is_held(&ht->mutex) : 1; | |
53 | } | |
54 | EXPORT_SYMBOL_GPL(lockdep_rht_mutex_is_held); | |
55 | ||
56 | int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash) | |
57 | { | |
8f0db018 N |
58 | if (!debug_locks) |
59 | return 1; | |
60 | if (unlikely(tbl->nest)) | |
61 | return 1; | |
ca0b709d | 62 | return bit_spin_is_locked(0, (unsigned long *)&tbl->buckets[hash]); |
a03eaec0 TG |
63 | } |
64 | EXPORT_SYMBOL_GPL(lockdep_rht_bucket_is_held); | |
65 | #else | |
66 | #define ASSERT_RHT_MUTEX(HT) | |
a03eaec0 TG |
67 | #endif |
68 | ||
da20420f HX |
69 | static void nested_table_free(union nested_table *ntbl, unsigned int size) |
70 | { | |
71 | const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *)); | |
72 | const unsigned int len = 1 << shift; | |
73 | unsigned int i; | |
74 | ||
75 | ntbl = rcu_dereference_raw(ntbl->table); | |
76 | if (!ntbl) | |
77 | return; | |
78 | ||
79 | if (size > len) { | |
80 | size >>= shift; | |
81 | for (i = 0; i < len; i++) | |
82 | nested_table_free(ntbl + i, size); | |
83 | } | |
84 | ||
85 | kfree(ntbl); | |
86 | } | |
87 | ||
88 | static void nested_bucket_table_free(const struct bucket_table *tbl) | |
89 | { | |
90 | unsigned int size = tbl->size >> tbl->nest; | |
91 | unsigned int len = 1 << tbl->nest; | |
92 | union nested_table *ntbl; | |
93 | unsigned int i; | |
94 | ||
95 | ntbl = (union nested_table *)rcu_dereference_raw(tbl->buckets[0]); | |
96 | ||
97 | for (i = 0; i < len; i++) | |
98 | nested_table_free(ntbl + i, size); | |
99 | ||
100 | kfree(ntbl); | |
101 | } | |
102 | ||
97defe1e TG |
103 | static void bucket_table_free(const struct bucket_table *tbl) |
104 | { | |
da20420f HX |
105 | if (tbl->nest) |
106 | nested_bucket_table_free(tbl); | |
107 | ||
97defe1e TG |
108 | kvfree(tbl); |
109 | } | |
110 | ||
9d901bc0 HX |
111 | static void bucket_table_free_rcu(struct rcu_head *head) |
112 | { | |
113 | bucket_table_free(container_of(head, struct bucket_table, rcu)); | |
114 | } | |
115 | ||
da20420f HX |
116 | static union nested_table *nested_table_alloc(struct rhashtable *ht, |
117 | union nested_table __rcu **prev, | |
5af68ef7 | 118 | bool leaf) |
da20420f HX |
119 | { |
120 | union nested_table *ntbl; | |
121 | int i; | |
122 | ||
123 | ntbl = rcu_dereference(*prev); | |
124 | if (ntbl) | |
125 | return ntbl; | |
126 | ||
127 | ntbl = kzalloc(PAGE_SIZE, GFP_ATOMIC); | |
128 | ||
5af68ef7 N |
129 | if (ntbl && leaf) { |
130 | for (i = 0; i < PAGE_SIZE / sizeof(ntbl[0]); i++) | |
9b4f64a2 | 131 | INIT_RHT_NULLS_HEAD(ntbl[i].bucket); |
da20420f HX |
132 | } |
133 | ||
7a41c294 N |
134 | if (cmpxchg(prev, NULL, ntbl) == NULL) |
135 | return ntbl; | |
136 | /* Raced with another thread. */ | |
137 | kfree(ntbl); | |
138 | return rcu_dereference(*prev); | |
da20420f HX |
139 | } |
140 | ||
141 | static struct bucket_table *nested_bucket_table_alloc(struct rhashtable *ht, | |
142 | size_t nbuckets, | |
143 | gfp_t gfp) | |
144 | { | |
145 | const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *)); | |
146 | struct bucket_table *tbl; | |
147 | size_t size; | |
148 | ||
149 | if (nbuckets < (1 << (shift + 1))) | |
150 | return NULL; | |
151 | ||
152 | size = sizeof(*tbl) + sizeof(tbl->buckets[0]); | |
153 | ||
154 | tbl = kzalloc(size, gfp); | |
155 | if (!tbl) | |
156 | return NULL; | |
157 | ||
158 | if (!nested_table_alloc(ht, (union nested_table __rcu **)tbl->buckets, | |
5af68ef7 | 159 | false)) { |
da20420f HX |
160 | kfree(tbl); |
161 | return NULL; | |
162 | } | |
163 | ||
164 | tbl->nest = (ilog2(nbuckets) - 1) % shift + 1; | |
165 | ||
166 | return tbl; | |
167 | } | |
168 | ||
97defe1e | 169 | static struct bucket_table *bucket_table_alloc(struct rhashtable *ht, |
b9ecfdaa HX |
170 | size_t nbuckets, |
171 | gfp_t gfp) | |
7e1e7763 | 172 | { |
eb6d1abf | 173 | struct bucket_table *tbl = NULL; |
8f0db018 | 174 | size_t size; |
f89bd6f8 | 175 | int i; |
149212f0 | 176 | static struct lock_class_key __key; |
7e1e7763 | 177 | |
c252aa3e | 178 | tbl = kvzalloc(struct_size(tbl, buckets, nbuckets), gfp); |
da20420f HX |
179 | |
180 | size = nbuckets; | |
181 | ||
2d22ecf6 | 182 | if (tbl == NULL && (gfp & ~__GFP_NOFAIL) != GFP_KERNEL) { |
da20420f HX |
183 | tbl = nested_bucket_table_alloc(ht, nbuckets, gfp); |
184 | nbuckets = 0; | |
185 | } | |
2d22ecf6 | 186 | |
7e1e7763 TG |
187 | if (tbl == NULL) |
188 | return NULL; | |
189 | ||
149212f0 N |
190 | lockdep_init_map(&tbl->dep_map, "rhashtable_bucket", &__key, 0); |
191 | ||
da20420f | 192 | tbl->size = size; |
7e1e7763 | 193 | |
4feb7c7a | 194 | rcu_head_init(&tbl->rcu); |
eddee5ba HX |
195 | INIT_LIST_HEAD(&tbl->walkers); |
196 | ||
d48ad080 | 197 | tbl->hash_rnd = get_random_u32(); |
5269b53d | 198 | |
f89bd6f8 | 199 | for (i = 0; i < nbuckets; i++) |
9b4f64a2 | 200 | INIT_RHT_NULLS_HEAD(tbl->buckets[i]); |
f89bd6f8 | 201 | |
97defe1e | 202 | return tbl; |
7e1e7763 TG |
203 | } |
204 | ||
b824478b HX |
205 | static struct bucket_table *rhashtable_last_table(struct rhashtable *ht, |
206 | struct bucket_table *tbl) | |
207 | { | |
208 | struct bucket_table *new_tbl; | |
209 | ||
210 | do { | |
211 | new_tbl = tbl; | |
212 | tbl = rht_dereference_rcu(tbl->future_tbl, ht); | |
213 | } while (tbl); | |
214 | ||
215 | return new_tbl; | |
216 | } | |
217 | ||
8f0db018 | 218 | static int rhashtable_rehash_one(struct rhashtable *ht, |
ba6306e3 | 219 | struct rhash_lock_head **bkt, |
8f0db018 | 220 | unsigned int old_hash) |
a5ec68e3 | 221 | { |
aa34a6cb | 222 | struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht); |
c0690016 | 223 | struct bucket_table *new_tbl = rhashtable_last_table(ht, old_tbl); |
da20420f | 224 | int err = -EAGAIN; |
aa34a6cb | 225 | struct rhash_head *head, *next, *entry; |
e4edbe3c | 226 | struct rhash_head __rcu **pprev = NULL; |
299e5c32 | 227 | unsigned int new_hash; |
aa34a6cb | 228 | |
da20420f HX |
229 | if (new_tbl->nest) |
230 | goto out; | |
231 | ||
232 | err = -ENOENT; | |
233 | ||
adc6a3ab N |
234 | rht_for_each_from(entry, rht_ptr(bkt, old_tbl, old_hash), |
235 | old_tbl, old_hash) { | |
aa34a6cb HX |
236 | err = 0; |
237 | next = rht_dereference_bucket(entry->next, old_tbl, old_hash); | |
238 | ||
239 | if (rht_is_a_nulls(next)) | |
240 | break; | |
a5ec68e3 | 241 | |
aa34a6cb HX |
242 | pprev = &entry->next; |
243 | } | |
a5ec68e3 | 244 | |
aa34a6cb HX |
245 | if (err) |
246 | goto out; | |
97defe1e | 247 | |
aa34a6cb | 248 | new_hash = head_hashfn(ht, new_tbl, entry); |
7e1e7763 | 249 | |
149212f0 | 250 | rht_lock_nested(new_tbl, &new_tbl->buckets[new_hash], SINGLE_DEPTH_NESTING); |
7e1e7763 | 251 | |
adc6a3ab | 252 | head = rht_ptr(new_tbl->buckets + new_hash, new_tbl, new_hash); |
97defe1e | 253 | |
7def0f95 | 254 | RCU_INIT_POINTER(entry->next, head); |
a5ec68e3 | 255 | |
149212f0 | 256 | rht_assign_unlock(new_tbl, &new_tbl->buckets[new_hash], entry); |
97defe1e | 257 | |
8f0db018 N |
258 | if (pprev) |
259 | rcu_assign_pointer(*pprev, next); | |
260 | else | |
261 | /* Need to preserved the bit lock. */ | |
f4712b46 | 262 | rht_assign_locked(bkt, next); |
7e1e7763 | 263 | |
aa34a6cb HX |
264 | out: |
265 | return err; | |
266 | } | |
97defe1e | 267 | |
da20420f | 268 | static int rhashtable_rehash_chain(struct rhashtable *ht, |
299e5c32 | 269 | unsigned int old_hash) |
aa34a6cb HX |
270 | { |
271 | struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht); | |
ba6306e3 | 272 | struct rhash_lock_head **bkt = rht_bucket_var(old_tbl, old_hash); |
da20420f | 273 | int err; |
aa34a6cb | 274 | |
8f0db018 N |
275 | if (!bkt) |
276 | return 0; | |
149212f0 | 277 | rht_lock(old_tbl, bkt); |
a5ec68e3 | 278 | |
8f0db018 | 279 | while (!(err = rhashtable_rehash_one(ht, bkt, old_hash))) |
aa34a6cb | 280 | ; |
da20420f | 281 | |
4feb7c7a | 282 | if (err == -ENOENT) |
da20420f | 283 | err = 0; |
149212f0 | 284 | rht_unlock(old_tbl, bkt); |
da20420f HX |
285 | |
286 | return err; | |
97defe1e TG |
287 | } |
288 | ||
b824478b HX |
289 | static int rhashtable_rehash_attach(struct rhashtable *ht, |
290 | struct bucket_table *old_tbl, | |
291 | struct bucket_table *new_tbl) | |
97defe1e | 292 | { |
aa34a6cb HX |
293 | /* Make insertions go into the new, empty table right away. Deletions |
294 | * and lookups will be attempted in both tables until we synchronize. | |
0ad66449 N |
295 | * As cmpxchg() provides strong barriers, we do not need |
296 | * rcu_assign_pointer(). | |
aa34a6cb | 297 | */ |
aa34a6cb | 298 | |
0ad66449 N |
299 | if (cmpxchg(&old_tbl->future_tbl, NULL, new_tbl) != NULL) |
300 | return -EEXIST; | |
b824478b HX |
301 | |
302 | return 0; | |
303 | } | |
304 | ||
305 | static int rhashtable_rehash_table(struct rhashtable *ht) | |
306 | { | |
307 | struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht); | |
308 | struct bucket_table *new_tbl; | |
309 | struct rhashtable_walker *walker; | |
299e5c32 | 310 | unsigned int old_hash; |
da20420f | 311 | int err; |
b824478b HX |
312 | |
313 | new_tbl = rht_dereference(old_tbl->future_tbl, ht); | |
314 | if (!new_tbl) | |
315 | return 0; | |
316 | ||
da20420f HX |
317 | for (old_hash = 0; old_hash < old_tbl->size; old_hash++) { |
318 | err = rhashtable_rehash_chain(ht, old_hash); | |
319 | if (err) | |
320 | return err; | |
ae6da1f5 | 321 | cond_resched(); |
da20420f | 322 | } |
aa34a6cb HX |
323 | |
324 | /* Publish the new table pointer. */ | |
325 | rcu_assign_pointer(ht->tbl, new_tbl); | |
326 | ||
ba7c95ea | 327 | spin_lock(&ht->lock); |
eddee5ba HX |
328 | list_for_each_entry(walker, &old_tbl->walkers, list) |
329 | walker->tbl = NULL; | |
330 | ||
aa34a6cb HX |
331 | /* Wait for readers. All new readers will see the new |
332 | * table, and thus no references to the old table will | |
333 | * remain. | |
4feb7c7a N |
334 | * We do this inside the locked region so that |
335 | * rhashtable_walk_stop() can use rcu_head_after_call_rcu() | |
336 | * to check if it should not re-link the table. | |
aa34a6cb | 337 | */ |
9d901bc0 | 338 | call_rcu(&old_tbl->rcu, bucket_table_free_rcu); |
4feb7c7a | 339 | spin_unlock(&ht->lock); |
b824478b HX |
340 | |
341 | return rht_dereference(new_tbl->future_tbl, ht) ? -EAGAIN : 0; | |
7e1e7763 TG |
342 | } |
343 | ||
da20420f HX |
344 | static int rhashtable_rehash_alloc(struct rhashtable *ht, |
345 | struct bucket_table *old_tbl, | |
346 | unsigned int size) | |
7e1e7763 | 347 | { |
da20420f | 348 | struct bucket_table *new_tbl; |
b824478b | 349 | int err; |
7e1e7763 TG |
350 | |
351 | ASSERT_RHT_MUTEX(ht); | |
352 | ||
da20420f | 353 | new_tbl = bucket_table_alloc(ht, size, GFP_KERNEL); |
7e1e7763 TG |
354 | if (new_tbl == NULL) |
355 | return -ENOMEM; | |
356 | ||
b824478b HX |
357 | err = rhashtable_rehash_attach(ht, old_tbl, new_tbl); |
358 | if (err) | |
359 | bucket_table_free(new_tbl); | |
360 | ||
361 | return err; | |
7e1e7763 | 362 | } |
7e1e7763 TG |
363 | |
364 | /** | |
365 | * rhashtable_shrink - Shrink hash table while allowing concurrent lookups | |
366 | * @ht: the hash table to shrink | |
7e1e7763 | 367 | * |
18093d1c HX |
368 | * This function shrinks the hash table to fit, i.e., the smallest |
369 | * size would not cause it to expand right away automatically. | |
7e1e7763 | 370 | * |
97defe1e TG |
371 | * The caller must ensure that no concurrent resizing occurs by holding |
372 | * ht->mutex. | |
373 | * | |
7e1e7763 TG |
374 | * The caller must ensure that no concurrent table mutations take place. |
375 | * It is however valid to have concurrent lookups if they are RCU protected. | |
97defe1e TG |
376 | * |
377 | * It is valid to have concurrent insertions and deletions protected by per | |
378 | * bucket locks or concurrent RCU protected lookups and traversals. | |
7e1e7763 | 379 | */ |
b824478b | 380 | static int rhashtable_shrink(struct rhashtable *ht) |
7e1e7763 | 381 | { |
da20420f | 382 | struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht); |
12311959 VN |
383 | unsigned int nelems = atomic_read(&ht->nelems); |
384 | unsigned int size = 0; | |
7e1e7763 | 385 | |
12311959 VN |
386 | if (nelems) |
387 | size = roundup_pow_of_two(nelems * 3 / 2); | |
18093d1c HX |
388 | if (size < ht->p.min_size) |
389 | size = ht->p.min_size; | |
390 | ||
391 | if (old_tbl->size <= size) | |
392 | return 0; | |
393 | ||
b824478b HX |
394 | if (rht_dereference(old_tbl->future_tbl, ht)) |
395 | return -EEXIST; | |
396 | ||
da20420f | 397 | return rhashtable_rehash_alloc(ht, old_tbl, size); |
7e1e7763 | 398 | } |
7e1e7763 | 399 | |
97defe1e TG |
400 | static void rht_deferred_worker(struct work_struct *work) |
401 | { | |
402 | struct rhashtable *ht; | |
403 | struct bucket_table *tbl; | |
b824478b | 404 | int err = 0; |
97defe1e | 405 | |
57699a40 | 406 | ht = container_of(work, struct rhashtable, run_work); |
97defe1e | 407 | mutex_lock(&ht->mutex); |
28134a53 | 408 | |
97defe1e | 409 | tbl = rht_dereference(ht->tbl, ht); |
b824478b | 410 | tbl = rhashtable_last_table(ht, tbl); |
97defe1e | 411 | |
a5b6846f | 412 | if (rht_grow_above_75(ht, tbl)) |
da20420f | 413 | err = rhashtable_rehash_alloc(ht, tbl, tbl->size * 2); |
b5e2c150 | 414 | else if (ht->p.automatic_shrinking && rht_shrink_below_30(ht, tbl)) |
da20420f HX |
415 | err = rhashtable_shrink(ht); |
416 | else if (tbl->nest) | |
417 | err = rhashtable_rehash_alloc(ht, tbl, tbl->size); | |
b824478b | 418 | |
408f13ef HX |
419 | if (!err || err == -EEXIST) { |
420 | int nerr; | |
421 | ||
422 | nerr = rhashtable_rehash_table(ht); | |
423 | err = err ?: nerr; | |
424 | } | |
b824478b | 425 | |
97defe1e | 426 | mutex_unlock(&ht->mutex); |
b824478b HX |
427 | |
428 | if (err) | |
429 | schedule_work(&ht->run_work); | |
97defe1e TG |
430 | } |
431 | ||
ca26893f HX |
432 | static int rhashtable_insert_rehash(struct rhashtable *ht, |
433 | struct bucket_table *tbl) | |
ccd57b1b HX |
434 | { |
435 | struct bucket_table *old_tbl; | |
436 | struct bucket_table *new_tbl; | |
ccd57b1b HX |
437 | unsigned int size; |
438 | int err; | |
439 | ||
440 | old_tbl = rht_dereference_rcu(ht->tbl, ht); | |
ccd57b1b HX |
441 | |
442 | size = tbl->size; | |
443 | ||
3cf92222 HX |
444 | err = -EBUSY; |
445 | ||
ccd57b1b HX |
446 | if (rht_grow_above_75(ht, tbl)) |
447 | size *= 2; | |
a87b9ebf TG |
448 | /* Do not schedule more than one rehash */ |
449 | else if (old_tbl != tbl) | |
3cf92222 HX |
450 | goto fail; |
451 | ||
452 | err = -ENOMEM; | |
ccd57b1b | 453 | |
93f976b5 | 454 | new_tbl = bucket_table_alloc(ht, size, GFP_ATOMIC | __GFP_NOWARN); |
3cf92222 HX |
455 | if (new_tbl == NULL) |
456 | goto fail; | |
ccd57b1b HX |
457 | |
458 | err = rhashtable_rehash_attach(ht, tbl, new_tbl); | |
459 | if (err) { | |
460 | bucket_table_free(new_tbl); | |
461 | if (err == -EEXIST) | |
462 | err = 0; | |
463 | } else | |
464 | schedule_work(&ht->run_work); | |
465 | ||
466 | return err; | |
3cf92222 HX |
467 | |
468 | fail: | |
469 | /* Do not fail the insert if someone else did a rehash. */ | |
c0690016 | 470 | if (likely(rcu_access_pointer(tbl->future_tbl))) |
3cf92222 HX |
471 | return 0; |
472 | ||
473 | /* Schedule async rehash to retry allocation in process context. */ | |
474 | if (err == -ENOMEM) | |
475 | schedule_work(&ht->run_work); | |
476 | ||
477 | return err; | |
ccd57b1b | 478 | } |
ccd57b1b | 479 | |
ca26893f | 480 | static void *rhashtable_lookup_one(struct rhashtable *ht, |
ba6306e3 | 481 | struct rhash_lock_head **bkt, |
ca26893f HX |
482 | struct bucket_table *tbl, unsigned int hash, |
483 | const void *key, struct rhash_head *obj) | |
02fd97c3 | 484 | { |
ca26893f HX |
485 | struct rhashtable_compare_arg arg = { |
486 | .ht = ht, | |
487 | .key = key, | |
488 | }; | |
e4edbe3c | 489 | struct rhash_head __rcu **pprev = NULL; |
02fd97c3 | 490 | struct rhash_head *head; |
ca26893f | 491 | int elasticity; |
02fd97c3 | 492 | |
5f8ddeab | 493 | elasticity = RHT_ELASTICITY; |
adc6a3ab | 494 | rht_for_each_from(head, rht_ptr(bkt, tbl, hash), tbl, hash) { |
ca26893f HX |
495 | struct rhlist_head *list; |
496 | struct rhlist_head *plist; | |
497 | ||
498 | elasticity--; | |
499 | if (!key || | |
500 | (ht->p.obj_cmpfn ? | |
501 | ht->p.obj_cmpfn(&arg, rht_obj(ht, head)) : | |
d3dcf8eb PB |
502 | rhashtable_compare(&arg, rht_obj(ht, head)))) { |
503 | pprev = &head->next; | |
ca26893f | 504 | continue; |
d3dcf8eb | 505 | } |
ca26893f HX |
506 | |
507 | if (!ht->rhlist) | |
508 | return rht_obj(ht, head); | |
509 | ||
510 | list = container_of(obj, struct rhlist_head, rhead); | |
511 | plist = container_of(head, struct rhlist_head, rhead); | |
512 | ||
513 | RCU_INIT_POINTER(list->next, plist); | |
514 | head = rht_dereference_bucket(head->next, tbl, hash); | |
515 | RCU_INIT_POINTER(list->rhead.next, head); | |
8f0db018 N |
516 | if (pprev) |
517 | rcu_assign_pointer(*pprev, obj); | |
518 | else | |
519 | /* Need to preserve the bit lock */ | |
f4712b46 | 520 | rht_assign_locked(bkt, obj); |
ca26893f HX |
521 | |
522 | return NULL; | |
5ca8cc5b | 523 | } |
02fd97c3 | 524 | |
ca26893f HX |
525 | if (elasticity <= 0) |
526 | return ERR_PTR(-EAGAIN); | |
527 | ||
528 | return ERR_PTR(-ENOENT); | |
529 | } | |
530 | ||
531 | static struct bucket_table *rhashtable_insert_one(struct rhashtable *ht, | |
ba6306e3 | 532 | struct rhash_lock_head **bkt, |
ca26893f HX |
533 | struct bucket_table *tbl, |
534 | unsigned int hash, | |
535 | struct rhash_head *obj, | |
536 | void *data) | |
537 | { | |
538 | struct bucket_table *new_tbl; | |
539 | struct rhash_head *head; | |
540 | ||
541 | if (!IS_ERR_OR_NULL(data)) | |
542 | return ERR_PTR(-EEXIST); | |
07ee0722 | 543 | |
ca26893f HX |
544 | if (PTR_ERR(data) != -EAGAIN && PTR_ERR(data) != -ENOENT) |
545 | return ERR_CAST(data); | |
ccd57b1b | 546 | |
c0690016 | 547 | new_tbl = rht_dereference_rcu(tbl->future_tbl, ht); |
ca26893f HX |
548 | if (new_tbl) |
549 | return new_tbl; | |
550 | ||
551 | if (PTR_ERR(data) != -ENOENT) | |
552 | return ERR_CAST(data); | |
553 | ||
554 | if (unlikely(rht_grow_above_max(ht, tbl))) | |
555 | return ERR_PTR(-E2BIG); | |
556 | ||
557 | if (unlikely(rht_grow_above_100(ht, tbl))) | |
558 | return ERR_PTR(-EAGAIN); | |
02fd97c3 | 559 | |
adc6a3ab | 560 | head = rht_ptr(bkt, tbl, hash); |
02fd97c3 HX |
561 | |
562 | RCU_INIT_POINTER(obj->next, head); | |
ca26893f HX |
563 | if (ht->rhlist) { |
564 | struct rhlist_head *list; | |
565 | ||
566 | list = container_of(obj, struct rhlist_head, rhead); | |
567 | RCU_INIT_POINTER(list->next, NULL); | |
568 | } | |
02fd97c3 | 569 | |
8f0db018 N |
570 | /* bkt is always the head of the list, so it holds |
571 | * the lock, which we need to preserve | |
572 | */ | |
f4712b46 | 573 | rht_assign_locked(bkt, obj); |
02fd97c3 HX |
574 | |
575 | atomic_inc(&ht->nelems); | |
ca26893f HX |
576 | if (rht_grow_above_75(ht, tbl)) |
577 | schedule_work(&ht->run_work); | |
02fd97c3 | 578 | |
ca26893f HX |
579 | return NULL; |
580 | } | |
02fd97c3 | 581 | |
ca26893f HX |
582 | static void *rhashtable_try_insert(struct rhashtable *ht, const void *key, |
583 | struct rhash_head *obj) | |
584 | { | |
585 | struct bucket_table *new_tbl; | |
586 | struct bucket_table *tbl; | |
ba6306e3 | 587 | struct rhash_lock_head **bkt; |
ca26893f | 588 | unsigned int hash; |
ca26893f HX |
589 | void *data; |
590 | ||
4feb7c7a | 591 | new_tbl = rcu_dereference(ht->tbl); |
ca26893f | 592 | |
4feb7c7a | 593 | do { |
ca26893f HX |
594 | tbl = new_tbl; |
595 | hash = rht_head_hashfn(ht, tbl, obj, ht->p); | |
8f0db018 N |
596 | if (rcu_access_pointer(tbl->future_tbl)) |
597 | /* Failure is OK */ | |
598 | bkt = rht_bucket_var(tbl, hash); | |
599 | else | |
600 | bkt = rht_bucket_insert(ht, tbl, hash); | |
601 | if (bkt == NULL) { | |
602 | new_tbl = rht_dereference_rcu(tbl->future_tbl, ht); | |
603 | data = ERR_PTR(-EAGAIN); | |
604 | } else { | |
149212f0 | 605 | rht_lock(tbl, bkt); |
8f0db018 N |
606 | data = rhashtable_lookup_one(ht, bkt, tbl, |
607 | hash, key, obj); | |
608 | new_tbl = rhashtable_insert_one(ht, bkt, tbl, | |
609 | hash, obj, data); | |
610 | if (PTR_ERR(new_tbl) != -EEXIST) | |
611 | data = ERR_CAST(new_tbl); | |
612 | ||
149212f0 | 613 | rht_unlock(tbl, bkt); |
8f0db018 | 614 | } |
4feb7c7a | 615 | } while (!IS_ERR_OR_NULL(new_tbl)); |
ca26893f HX |
616 | |
617 | if (PTR_ERR(data) == -EAGAIN) | |
618 | data = ERR_PTR(rhashtable_insert_rehash(ht, tbl) ?: | |
619 | -EAGAIN); | |
620 | ||
621 | return data; | |
622 | } | |
623 | ||
624 | void *rhashtable_insert_slow(struct rhashtable *ht, const void *key, | |
625 | struct rhash_head *obj) | |
626 | { | |
627 | void *data; | |
628 | ||
629 | do { | |
630 | rcu_read_lock(); | |
631 | data = rhashtable_try_insert(ht, key, obj); | |
632 | rcu_read_unlock(); | |
633 | } while (PTR_ERR(data) == -EAGAIN); | |
634 | ||
635 | return data; | |
02fd97c3 HX |
636 | } |
637 | EXPORT_SYMBOL_GPL(rhashtable_insert_slow); | |
638 | ||
f2dba9c6 | 639 | /** |
246779dd | 640 | * rhashtable_walk_enter - Initialise an iterator |
f2dba9c6 HX |
641 | * @ht: Table to walk over |
642 | * @iter: Hash table Iterator | |
643 | * | |
644 | * This function prepares a hash table walk. | |
645 | * | |
646 | * Note that if you restart a walk after rhashtable_walk_stop you | |
647 | * may see the same object twice. Also, you may miss objects if | |
648 | * there are removals in between rhashtable_walk_stop and the next | |
649 | * call to rhashtable_walk_start. | |
650 | * | |
651 | * For a completely stable walk you should construct your own data | |
652 | * structure outside the hash table. | |
653 | * | |
82266e98 N |
654 | * This function may be called from any process context, including |
655 | * non-preemptable context, but cannot be called from softirq or | |
656 | * hardirq context. | |
f2dba9c6 | 657 | * |
246779dd | 658 | * You must call rhashtable_walk_exit after this function returns. |
f2dba9c6 | 659 | */ |
246779dd | 660 | void rhashtable_walk_enter(struct rhashtable *ht, struct rhashtable_iter *iter) |
f2dba9c6 HX |
661 | { |
662 | iter->ht = ht; | |
663 | iter->p = NULL; | |
664 | iter->slot = 0; | |
665 | iter->skip = 0; | |
2db54b47 | 666 | iter->end_of_table = 0; |
f2dba9c6 | 667 | |
c6ff5268 | 668 | spin_lock(&ht->lock); |
246779dd | 669 | iter->walker.tbl = |
179ccc0a | 670 | rcu_dereference_protected(ht->tbl, lockdep_is_held(&ht->lock)); |
246779dd | 671 | list_add(&iter->walker.list, &iter->walker.tbl->walkers); |
c6ff5268 | 672 | spin_unlock(&ht->lock); |
f2dba9c6 | 673 | } |
246779dd | 674 | EXPORT_SYMBOL_GPL(rhashtable_walk_enter); |
f2dba9c6 HX |
675 | |
676 | /** | |
677 | * rhashtable_walk_exit - Free an iterator | |
678 | * @iter: Hash table Iterator | |
679 | * | |
6c4128f6 | 680 | * This function frees resources allocated by rhashtable_walk_enter. |
f2dba9c6 HX |
681 | */ |
682 | void rhashtable_walk_exit(struct rhashtable_iter *iter) | |
683 | { | |
c6ff5268 | 684 | spin_lock(&iter->ht->lock); |
246779dd HX |
685 | if (iter->walker.tbl) |
686 | list_del(&iter->walker.list); | |
c6ff5268 | 687 | spin_unlock(&iter->ht->lock); |
f2dba9c6 HX |
688 | } |
689 | EXPORT_SYMBOL_GPL(rhashtable_walk_exit); | |
690 | ||
691 | /** | |
97a6ec4a | 692 | * rhashtable_walk_start_check - Start a hash table walk |
f2dba9c6 HX |
693 | * @iter: Hash table iterator |
694 | * | |
0647169c AG |
695 | * Start a hash table walk at the current iterator position. Note that we take |
696 | * the RCU lock in all cases including when we return an error. So you must | |
697 | * always call rhashtable_walk_stop to clean up. | |
f2dba9c6 HX |
698 | * |
699 | * Returns zero if successful. | |
700 | * | |
701 | * Returns -EAGAIN if resize event occured. Note that the iterator | |
702 | * will rewind back to the beginning and you may use it immediately | |
703 | * by calling rhashtable_walk_next. | |
97a6ec4a TH |
704 | * |
705 | * rhashtable_walk_start is defined as an inline variant that returns | |
706 | * void. This is preferred in cases where the caller would ignore | |
707 | * resize events and always continue. | |
f2dba9c6 | 708 | */ |
97a6ec4a | 709 | int rhashtable_walk_start_check(struct rhashtable_iter *iter) |
db4374f4 | 710 | __acquires(RCU) |
f2dba9c6 | 711 | { |
eddee5ba | 712 | struct rhashtable *ht = iter->ht; |
5d240a89 | 713 | bool rhlist = ht->rhlist; |
eddee5ba | 714 | |
c6ff5268 | 715 | rcu_read_lock(); |
eddee5ba | 716 | |
c6ff5268 | 717 | spin_lock(&ht->lock); |
246779dd HX |
718 | if (iter->walker.tbl) |
719 | list_del(&iter->walker.list); | |
c6ff5268 | 720 | spin_unlock(&ht->lock); |
eddee5ba | 721 | |
5d240a89 N |
722 | if (iter->end_of_table) |
723 | return 0; | |
724 | if (!iter->walker.tbl) { | |
246779dd | 725 | iter->walker.tbl = rht_dereference_rcu(ht->tbl, ht); |
b41cc04b N |
726 | iter->slot = 0; |
727 | iter->skip = 0; | |
f2dba9c6 HX |
728 | return -EAGAIN; |
729 | } | |
730 | ||
5d240a89 N |
731 | if (iter->p && !rhlist) { |
732 | /* | |
733 | * We need to validate that 'p' is still in the table, and | |
734 | * if so, update 'skip' | |
735 | */ | |
736 | struct rhash_head *p; | |
737 | int skip = 0; | |
738 | rht_for_each_rcu(p, iter->walker.tbl, iter->slot) { | |
739 | skip++; | |
740 | if (p == iter->p) { | |
741 | iter->skip = skip; | |
742 | goto found; | |
743 | } | |
744 | } | |
745 | iter->p = NULL; | |
746 | } else if (iter->p && rhlist) { | |
747 | /* Need to validate that 'list' is still in the table, and | |
748 | * if so, update 'skip' and 'p'. | |
749 | */ | |
750 | struct rhash_head *p; | |
751 | struct rhlist_head *list; | |
752 | int skip = 0; | |
753 | rht_for_each_rcu(p, iter->walker.tbl, iter->slot) { | |
754 | for (list = container_of(p, struct rhlist_head, rhead); | |
755 | list; | |
756 | list = rcu_dereference(list->next)) { | |
757 | skip++; | |
758 | if (list == iter->list) { | |
759 | iter->p = p; | |
c643ecf3 | 760 | iter->skip = skip; |
5d240a89 N |
761 | goto found; |
762 | } | |
763 | } | |
764 | } | |
765 | iter->p = NULL; | |
766 | } | |
767 | found: | |
f2dba9c6 HX |
768 | return 0; |
769 | } | |
97a6ec4a | 770 | EXPORT_SYMBOL_GPL(rhashtable_walk_start_check); |
f2dba9c6 HX |
771 | |
772 | /** | |
2db54b47 TH |
773 | * __rhashtable_walk_find_next - Find the next element in a table (or the first |
774 | * one in case of a new walk). | |
775 | * | |
f2dba9c6 HX |
776 | * @iter: Hash table iterator |
777 | * | |
2db54b47 | 778 | * Returns the found object or NULL when the end of the table is reached. |
f2dba9c6 | 779 | * |
2db54b47 | 780 | * Returns -EAGAIN if resize event occurred. |
f2dba9c6 | 781 | */ |
2db54b47 | 782 | static void *__rhashtable_walk_find_next(struct rhashtable_iter *iter) |
f2dba9c6 | 783 | { |
246779dd | 784 | struct bucket_table *tbl = iter->walker.tbl; |
ca26893f | 785 | struct rhlist_head *list = iter->list; |
f2dba9c6 HX |
786 | struct rhashtable *ht = iter->ht; |
787 | struct rhash_head *p = iter->p; | |
ca26893f | 788 | bool rhlist = ht->rhlist; |
f2dba9c6 | 789 | |
2db54b47 TH |
790 | if (!tbl) |
791 | return NULL; | |
f2dba9c6 HX |
792 | |
793 | for (; iter->slot < tbl->size; iter->slot++) { | |
794 | int skip = iter->skip; | |
795 | ||
796 | rht_for_each_rcu(p, tbl, iter->slot) { | |
ca26893f HX |
797 | if (rhlist) { |
798 | list = container_of(p, struct rhlist_head, | |
799 | rhead); | |
800 | do { | |
801 | if (!skip) | |
802 | goto next; | |
803 | skip--; | |
804 | list = rcu_dereference(list->next); | |
805 | } while (list); | |
806 | ||
807 | continue; | |
808 | } | |
f2dba9c6 HX |
809 | if (!skip) |
810 | break; | |
811 | skip--; | |
812 | } | |
813 | ||
814 | next: | |
815 | if (!rht_is_a_nulls(p)) { | |
816 | iter->skip++; | |
817 | iter->p = p; | |
ca26893f HX |
818 | iter->list = list; |
819 | return rht_obj(ht, rhlist ? &list->rhead : p); | |
f2dba9c6 HX |
820 | } |
821 | ||
822 | iter->skip = 0; | |
823 | } | |
824 | ||
142b942a PS |
825 | iter->p = NULL; |
826 | ||
d88252f9 HX |
827 | /* Ensure we see any new tables. */ |
828 | smp_rmb(); | |
829 | ||
246779dd HX |
830 | iter->walker.tbl = rht_dereference_rcu(tbl->future_tbl, ht); |
831 | if (iter->walker.tbl) { | |
f2dba9c6 HX |
832 | iter->slot = 0; |
833 | iter->skip = 0; | |
f2dba9c6 | 834 | return ERR_PTR(-EAGAIN); |
2db54b47 TH |
835 | } else { |
836 | iter->end_of_table = true; | |
f2dba9c6 HX |
837 | } |
838 | ||
c936a79f | 839 | return NULL; |
f2dba9c6 | 840 | } |
2db54b47 TH |
841 | |
842 | /** | |
843 | * rhashtable_walk_next - Return the next object and advance the iterator | |
844 | * @iter: Hash table iterator | |
845 | * | |
846 | * Note that you must call rhashtable_walk_stop when you are finished | |
847 | * with the walk. | |
848 | * | |
849 | * Returns the next object or NULL when the end of the table is reached. | |
850 | * | |
851 | * Returns -EAGAIN if resize event occurred. Note that the iterator | |
852 | * will rewind back to the beginning and you may continue to use it. | |
853 | */ | |
854 | void *rhashtable_walk_next(struct rhashtable_iter *iter) | |
855 | { | |
856 | struct rhlist_head *list = iter->list; | |
857 | struct rhashtable *ht = iter->ht; | |
858 | struct rhash_head *p = iter->p; | |
859 | bool rhlist = ht->rhlist; | |
860 | ||
861 | if (p) { | |
862 | if (!rhlist || !(list = rcu_dereference(list->next))) { | |
863 | p = rcu_dereference(p->next); | |
864 | list = container_of(p, struct rhlist_head, rhead); | |
865 | } | |
866 | if (!rht_is_a_nulls(p)) { | |
867 | iter->skip++; | |
868 | iter->p = p; | |
869 | iter->list = list; | |
870 | return rht_obj(ht, rhlist ? &list->rhead : p); | |
871 | } | |
872 | ||
873 | /* At the end of this slot, switch to next one and then find | |
874 | * next entry from that point. | |
875 | */ | |
876 | iter->skip = 0; | |
877 | iter->slot++; | |
878 | } | |
879 | ||
880 | return __rhashtable_walk_find_next(iter); | |
881 | } | |
f2dba9c6 HX |
882 | EXPORT_SYMBOL_GPL(rhashtable_walk_next); |
883 | ||
2db54b47 TH |
884 | /** |
885 | * rhashtable_walk_peek - Return the next object but don't advance the iterator | |
886 | * @iter: Hash table iterator | |
887 | * | |
888 | * Returns the next object or NULL when the end of the table is reached. | |
889 | * | |
890 | * Returns -EAGAIN if resize event occurred. Note that the iterator | |
891 | * will rewind back to the beginning and you may continue to use it. | |
892 | */ | |
893 | void *rhashtable_walk_peek(struct rhashtable_iter *iter) | |
894 | { | |
895 | struct rhlist_head *list = iter->list; | |
896 | struct rhashtable *ht = iter->ht; | |
897 | struct rhash_head *p = iter->p; | |
898 | ||
899 | if (p) | |
900 | return rht_obj(ht, ht->rhlist ? &list->rhead : p); | |
901 | ||
902 | /* No object found in current iter, find next one in the table. */ | |
903 | ||
904 | if (iter->skip) { | |
905 | /* A nonzero skip value points to the next entry in the table | |
906 | * beyond that last one that was found. Decrement skip so | |
907 | * we find the current value. __rhashtable_walk_find_next | |
908 | * will restore the original value of skip assuming that | |
909 | * the table hasn't changed. | |
910 | */ | |
911 | iter->skip--; | |
912 | } | |
913 | ||
914 | return __rhashtable_walk_find_next(iter); | |
915 | } | |
916 | EXPORT_SYMBOL_GPL(rhashtable_walk_peek); | |
917 | ||
f2dba9c6 HX |
918 | /** |
919 | * rhashtable_walk_stop - Finish a hash table walk | |
920 | * @iter: Hash table iterator | |
921 | * | |
0647169c AG |
922 | * Finish a hash table walk. Does not reset the iterator to the start of the |
923 | * hash table. | |
f2dba9c6 HX |
924 | */ |
925 | void rhashtable_walk_stop(struct rhashtable_iter *iter) | |
db4374f4 | 926 | __releases(RCU) |
f2dba9c6 | 927 | { |
eddee5ba | 928 | struct rhashtable *ht; |
246779dd | 929 | struct bucket_table *tbl = iter->walker.tbl; |
eddee5ba | 930 | |
eddee5ba | 931 | if (!tbl) |
963ecbd4 | 932 | goto out; |
eddee5ba HX |
933 | |
934 | ht = iter->ht; | |
935 | ||
ba7c95ea | 936 | spin_lock(&ht->lock); |
4feb7c7a N |
937 | if (rcu_head_after_call_rcu(&tbl->rcu, bucket_table_free_rcu)) |
938 | /* This bucket table is being freed, don't re-link it. */ | |
246779dd | 939 | iter->walker.tbl = NULL; |
4feb7c7a N |
940 | else |
941 | list_add(&iter->walker.list, &tbl->walkers); | |
ba7c95ea | 942 | spin_unlock(&ht->lock); |
eddee5ba | 943 | |
963ecbd4 HX |
944 | out: |
945 | rcu_read_unlock(); | |
f2dba9c6 HX |
946 | } |
947 | EXPORT_SYMBOL_GPL(rhashtable_walk_stop); | |
948 | ||
488fb86e | 949 | static size_t rounded_hashtable_size(const struct rhashtable_params *params) |
7e1e7763 | 950 | { |
107d01f5 DB |
951 | size_t retsize; |
952 | ||
953 | if (params->nelem_hint) | |
954 | retsize = max(roundup_pow_of_two(params->nelem_hint * 4 / 3), | |
955 | (unsigned long)params->min_size); | |
956 | else | |
957 | retsize = max(HASH_DEFAULT_SIZE, | |
958 | (unsigned long)params->min_size); | |
959 | ||
960 | return retsize; | |
7e1e7763 TG |
961 | } |
962 | ||
31ccde2d HX |
963 | static u32 rhashtable_jhash2(const void *key, u32 length, u32 seed) |
964 | { | |
965 | return jhash2(key, length, seed); | |
966 | } | |
967 | ||
7e1e7763 TG |
968 | /** |
969 | * rhashtable_init - initialize a new hash table | |
970 | * @ht: hash table to be initialized | |
971 | * @params: configuration parameters | |
972 | * | |
973 | * Initializes a new hash table based on the provided configuration | |
974 | * parameters. A table can be configured either with a variable or | |
975 | * fixed length key: | |
976 | * | |
977 | * Configuration Example 1: Fixed length keys | |
978 | * struct test_obj { | |
979 | * int key; | |
980 | * void * my_member; | |
981 | * struct rhash_head node; | |
982 | * }; | |
983 | * | |
984 | * struct rhashtable_params params = { | |
985 | * .head_offset = offsetof(struct test_obj, node), | |
986 | * .key_offset = offsetof(struct test_obj, key), | |
987 | * .key_len = sizeof(int), | |
87545899 | 988 | * .hashfn = jhash, |
7e1e7763 TG |
989 | * }; |
990 | * | |
991 | * Configuration Example 2: Variable length keys | |
992 | * struct test_obj { | |
993 | * [...] | |
994 | * struct rhash_head node; | |
995 | * }; | |
996 | * | |
49f7b33e | 997 | * u32 my_hash_fn(const void *data, u32 len, u32 seed) |
7e1e7763 TG |
998 | * { |
999 | * struct test_obj *obj = data; | |
1000 | * | |
1001 | * return [... hash ...]; | |
1002 | * } | |
1003 | * | |
1004 | * struct rhashtable_params params = { | |
1005 | * .head_offset = offsetof(struct test_obj, node), | |
87545899 | 1006 | * .hashfn = jhash, |
7e1e7763 | 1007 | * .obj_hashfn = my_hash_fn, |
7e1e7763 TG |
1008 | * }; |
1009 | */ | |
488fb86e HX |
1010 | int rhashtable_init(struct rhashtable *ht, |
1011 | const struct rhashtable_params *params) | |
7e1e7763 TG |
1012 | { |
1013 | struct bucket_table *tbl; | |
1014 | size_t size; | |
1015 | ||
31ccde2d | 1016 | if ((!params->key_len && !params->obj_hashfn) || |
02fd97c3 | 1017 | (params->obj_hashfn && !params->obj_cmpfn)) |
7e1e7763 TG |
1018 | return -EINVAL; |
1019 | ||
97defe1e TG |
1020 | memset(ht, 0, sizeof(*ht)); |
1021 | mutex_init(&ht->mutex); | |
ba7c95ea | 1022 | spin_lock_init(&ht->lock); |
97defe1e TG |
1023 | memcpy(&ht->p, params, sizeof(*params)); |
1024 | ||
a998f712 TG |
1025 | if (params->min_size) |
1026 | ht->p.min_size = roundup_pow_of_two(params->min_size); | |
1027 | ||
6d684e54 HX |
1028 | /* Cap total entries at 2^31 to avoid nelems overflow. */ |
1029 | ht->max_elems = 1u << 31; | |
2d2ab658 HX |
1030 | |
1031 | if (params->max_size) { | |
1032 | ht->p.max_size = rounddown_pow_of_two(params->max_size); | |
1033 | if (ht->p.max_size < ht->max_elems / 2) | |
1034 | ht->max_elems = ht->p.max_size * 2; | |
1035 | } | |
6d684e54 | 1036 | |
48e75b43 | 1037 | ht->p.min_size = max_t(u16, ht->p.min_size, HASH_MIN_SIZE); |
a998f712 | 1038 | |
107d01f5 | 1039 | size = rounded_hashtable_size(&ht->p); |
3a324606 | 1040 | |
31ccde2d HX |
1041 | ht->key_len = ht->p.key_len; |
1042 | if (!params->hashfn) { | |
1043 | ht->p.hashfn = jhash; | |
1044 | ||
1045 | if (!(ht->key_len & (sizeof(u32) - 1))) { | |
1046 | ht->key_len /= sizeof(u32); | |
1047 | ht->p.hashfn = rhashtable_jhash2; | |
1048 | } | |
1049 | } | |
1050 | ||
2d22ecf6 DB |
1051 | /* |
1052 | * This is api initialization and thus we need to guarantee the | |
1053 | * initial rhashtable allocation. Upon failure, retry with the | |
1054 | * smallest possible size with __GFP_NOFAIL semantics. | |
1055 | */ | |
b9ecfdaa | 1056 | tbl = bucket_table_alloc(ht, size, GFP_KERNEL); |
2d22ecf6 DB |
1057 | if (unlikely(tbl == NULL)) { |
1058 | size = max_t(u16, ht->p.min_size, HASH_MIN_SIZE); | |
1059 | tbl = bucket_table_alloc(ht, size, GFP_KERNEL | __GFP_NOFAIL); | |
1060 | } | |
7e1e7763 | 1061 | |
545a148e | 1062 | atomic_set(&ht->nelems, 0); |
a5b6846f | 1063 | |
7e1e7763 TG |
1064 | RCU_INIT_POINTER(ht->tbl, tbl); |
1065 | ||
4c4b52d9 | 1066 | INIT_WORK(&ht->run_work, rht_deferred_worker); |
97defe1e | 1067 | |
7e1e7763 TG |
1068 | return 0; |
1069 | } | |
1070 | EXPORT_SYMBOL_GPL(rhashtable_init); | |
1071 | ||
ca26893f HX |
1072 | /** |
1073 | * rhltable_init - initialize a new hash list table | |
1074 | * @hlt: hash list table to be initialized | |
1075 | * @params: configuration parameters | |
1076 | * | |
1077 | * Initializes a new hash list table. | |
1078 | * | |
1079 | * See documentation for rhashtable_init. | |
1080 | */ | |
1081 | int rhltable_init(struct rhltable *hlt, const struct rhashtable_params *params) | |
1082 | { | |
1083 | int err; | |
1084 | ||
ca26893f HX |
1085 | err = rhashtable_init(&hlt->ht, params); |
1086 | hlt->ht.rhlist = true; | |
1087 | return err; | |
1088 | } | |
1089 | EXPORT_SYMBOL_GPL(rhltable_init); | |
1090 | ||
1091 | static void rhashtable_free_one(struct rhashtable *ht, struct rhash_head *obj, | |
1092 | void (*free_fn)(void *ptr, void *arg), | |
1093 | void *arg) | |
1094 | { | |
1095 | struct rhlist_head *list; | |
1096 | ||
1097 | if (!ht->rhlist) { | |
1098 | free_fn(rht_obj(ht, obj), arg); | |
1099 | return; | |
1100 | } | |
1101 | ||
1102 | list = container_of(obj, struct rhlist_head, rhead); | |
1103 | do { | |
1104 | obj = &list->rhead; | |
1105 | list = rht_dereference(list->next, ht); | |
1106 | free_fn(rht_obj(ht, obj), arg); | |
1107 | } while (list); | |
1108 | } | |
1109 | ||
7e1e7763 | 1110 | /** |
6b6f302c | 1111 | * rhashtable_free_and_destroy - free elements and destroy hash table |
7e1e7763 | 1112 | * @ht: the hash table to destroy |
6b6f302c TG |
1113 | * @free_fn: callback to release resources of element |
1114 | * @arg: pointer passed to free_fn | |
7e1e7763 | 1115 | * |
6b6f302c TG |
1116 | * Stops an eventual async resize. If defined, invokes free_fn for each |
1117 | * element to releasal resources. Please note that RCU protected | |
1118 | * readers may still be accessing the elements. Releasing of resources | |
1119 | * must occur in a compatible manner. Then frees the bucket array. | |
1120 | * | |
1121 | * This function will eventually sleep to wait for an async resize | |
1122 | * to complete. The caller is responsible that no further write operations | |
1123 | * occurs in parallel. | |
7e1e7763 | 1124 | */ |
6b6f302c TG |
1125 | void rhashtable_free_and_destroy(struct rhashtable *ht, |
1126 | void (*free_fn)(void *ptr, void *arg), | |
1127 | void *arg) | |
7e1e7763 | 1128 | { |
0026129c | 1129 | struct bucket_table *tbl, *next_tbl; |
6b6f302c | 1130 | unsigned int i; |
97defe1e | 1131 | |
4c4b52d9 | 1132 | cancel_work_sync(&ht->run_work); |
97defe1e | 1133 | |
57699a40 | 1134 | mutex_lock(&ht->mutex); |
6b6f302c | 1135 | tbl = rht_dereference(ht->tbl, ht); |
0026129c | 1136 | restart: |
6b6f302c TG |
1137 | if (free_fn) { |
1138 | for (i = 0; i < tbl->size; i++) { | |
1139 | struct rhash_head *pos, *next; | |
1140 | ||
ae6da1f5 | 1141 | cond_resched(); |
adc6a3ab | 1142 | for (pos = rht_ptr_exclusive(rht_bucket(tbl, i)), |
6b6f302c TG |
1143 | next = !rht_is_a_nulls(pos) ? |
1144 | rht_dereference(pos->next, ht) : NULL; | |
1145 | !rht_is_a_nulls(pos); | |
1146 | pos = next, | |
1147 | next = !rht_is_a_nulls(pos) ? | |
1148 | rht_dereference(pos->next, ht) : NULL) | |
ca26893f | 1149 | rhashtable_free_one(ht, pos, free_fn, arg); |
6b6f302c TG |
1150 | } |
1151 | } | |
1152 | ||
0026129c | 1153 | next_tbl = rht_dereference(tbl->future_tbl, ht); |
6b6f302c | 1154 | bucket_table_free(tbl); |
0026129c TY |
1155 | if (next_tbl) { |
1156 | tbl = next_tbl; | |
1157 | goto restart; | |
1158 | } | |
97defe1e | 1159 | mutex_unlock(&ht->mutex); |
7e1e7763 | 1160 | } |
6b6f302c TG |
1161 | EXPORT_SYMBOL_GPL(rhashtable_free_and_destroy); |
1162 | ||
1163 | void rhashtable_destroy(struct rhashtable *ht) | |
1164 | { | |
1165 | return rhashtable_free_and_destroy(ht, NULL, NULL); | |
1166 | } | |
7e1e7763 | 1167 | EXPORT_SYMBOL_GPL(rhashtable_destroy); |
da20420f | 1168 | |
ba6306e3 HX |
1169 | struct rhash_lock_head **__rht_bucket_nested(const struct bucket_table *tbl, |
1170 | unsigned int hash) | |
da20420f HX |
1171 | { |
1172 | const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *)); | |
da20420f HX |
1173 | unsigned int index = hash & ((1 << tbl->nest) - 1); |
1174 | unsigned int size = tbl->size >> tbl->nest; | |
1175 | unsigned int subhash = hash; | |
1176 | union nested_table *ntbl; | |
1177 | ||
1178 | ntbl = (union nested_table *)rcu_dereference_raw(tbl->buckets[0]); | |
c4d2603d | 1179 | ntbl = rht_dereference_bucket_rcu(ntbl[index].table, tbl, hash); |
da20420f HX |
1180 | subhash >>= tbl->nest; |
1181 | ||
1182 | while (ntbl && size > (1 << shift)) { | |
1183 | index = subhash & ((1 << shift) - 1); | |
c4d2603d HX |
1184 | ntbl = rht_dereference_bucket_rcu(ntbl[index].table, |
1185 | tbl, hash); | |
da20420f HX |
1186 | size >>= shift; |
1187 | subhash >>= shift; | |
1188 | } | |
1189 | ||
ff302db9 N |
1190 | if (!ntbl) |
1191 | return NULL; | |
da20420f HX |
1192 | |
1193 | return &ntbl[subhash].bucket; | |
1194 | ||
1195 | } | |
ff302db9 N |
1196 | EXPORT_SYMBOL_GPL(__rht_bucket_nested); |
1197 | ||
ba6306e3 HX |
1198 | struct rhash_lock_head **rht_bucket_nested(const struct bucket_table *tbl, |
1199 | unsigned int hash) | |
ff302db9 | 1200 | { |
ba6306e3 | 1201 | static struct rhash_lock_head *rhnull; |
ff302db9 N |
1202 | |
1203 | if (!rhnull) | |
1204 | INIT_RHT_NULLS_HEAD(rhnull); | |
1205 | return __rht_bucket_nested(tbl, hash) ?: &rhnull; | |
1206 | } | |
da20420f HX |
1207 | EXPORT_SYMBOL_GPL(rht_bucket_nested); |
1208 | ||
ba6306e3 HX |
1209 | struct rhash_lock_head **rht_bucket_nested_insert(struct rhashtable *ht, |
1210 | struct bucket_table *tbl, | |
1211 | unsigned int hash) | |
da20420f HX |
1212 | { |
1213 | const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *)); | |
1214 | unsigned int index = hash & ((1 << tbl->nest) - 1); | |
1215 | unsigned int size = tbl->size >> tbl->nest; | |
1216 | union nested_table *ntbl; | |
da20420f HX |
1217 | |
1218 | ntbl = (union nested_table *)rcu_dereference_raw(tbl->buckets[0]); | |
1219 | hash >>= tbl->nest; | |
da20420f | 1220 | ntbl = nested_table_alloc(ht, &ntbl[index].table, |
5af68ef7 | 1221 | size <= (1 << shift)); |
da20420f HX |
1222 | |
1223 | while (ntbl && size > (1 << shift)) { | |
1224 | index = hash & ((1 << shift) - 1); | |
1225 | size >>= shift; | |
1226 | hash >>= shift; | |
da20420f | 1227 | ntbl = nested_table_alloc(ht, &ntbl[index].table, |
5af68ef7 | 1228 | size <= (1 << shift)); |
da20420f HX |
1229 | } |
1230 | ||
1231 | if (!ntbl) | |
1232 | return NULL; | |
1233 | ||
1234 | return &ntbl[hash].bucket; | |
1235 | ||
1236 | } | |
1237 | EXPORT_SYMBOL_GPL(rht_bucket_nested_insert); |