]>
Commit | Line | Data |
---|---|---|
7e1e7763 TG |
1 | /* |
2 | * Resizable, Scalable, Concurrent Hash Table | |
3 | * | |
02fd97c3 | 4 | * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au> |
a5ec68e3 | 5 | * Copyright (c) 2014-2015 Thomas Graf <tgraf@suug.ch> |
7e1e7763 TG |
6 | * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net> |
7 | * | |
7e1e7763 | 8 | * Code partially derived from nft_hash |
02fd97c3 HX |
9 | * Rewritten with rehash code from br_multicast plus single list |
10 | * pointer as suggested by Josh Triplett | |
7e1e7763 TG |
11 | * |
12 | * This program is free software; you can redistribute it and/or modify | |
13 | * it under the terms of the GNU General Public License version 2 as | |
14 | * published by the Free Software Foundation. | |
15 | */ | |
16 | ||
07ee0722 | 17 | #include <linux/atomic.h> |
7e1e7763 TG |
18 | #include <linux/kernel.h> |
19 | #include <linux/init.h> | |
20 | #include <linux/log2.h> | |
5beb5c90 | 21 | #include <linux/sched.h> |
b2d09103 | 22 | #include <linux/rculist.h> |
7e1e7763 TG |
23 | #include <linux/slab.h> |
24 | #include <linux/vmalloc.h> | |
25 | #include <linux/mm.h> | |
87545899 | 26 | #include <linux/jhash.h> |
7e1e7763 TG |
27 | #include <linux/random.h> |
28 | #include <linux/rhashtable.h> | |
61d7b097 | 29 | #include <linux/err.h> |
6d795413 | 30 | #include <linux/export.h> |
7e1e7763 TG |
31 | |
32 | #define HASH_DEFAULT_SIZE 64UL | |
c2e213cf | 33 | #define HASH_MIN_SIZE 4U |
97defe1e | 34 | |
da20420f HX |
35 | union nested_table { |
36 | union nested_table __rcu *table; | |
ba6306e3 | 37 | struct rhash_lock_head *bucket; |
da20420f HX |
38 | }; |
39 | ||
988dfbd7 | 40 | static u32 head_hashfn(struct rhashtable *ht, |
8d24c0b4 TG |
41 | const struct bucket_table *tbl, |
42 | const struct rhash_head *he) | |
7e1e7763 | 43 | { |
02fd97c3 | 44 | return rht_head_hashfn(ht, tbl, he, ht->p); |
7e1e7763 TG |
45 | } |
46 | ||
a03eaec0 | 47 | #ifdef CONFIG_PROVE_LOCKING |
a03eaec0 | 48 | #define ASSERT_RHT_MUTEX(HT) BUG_ON(!lockdep_rht_mutex_is_held(HT)) |
a03eaec0 TG |
49 | |
50 | int lockdep_rht_mutex_is_held(struct rhashtable *ht) | |
51 | { | |
52 | return (debug_locks) ? lockdep_is_held(&ht->mutex) : 1; | |
53 | } | |
54 | EXPORT_SYMBOL_GPL(lockdep_rht_mutex_is_held); | |
55 | ||
56 | int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash) | |
57 | { | |
8f0db018 N |
58 | if (!debug_locks) |
59 | return 1; | |
60 | if (unlikely(tbl->nest)) | |
61 | return 1; | |
ca0b709d | 62 | return bit_spin_is_locked(0, (unsigned long *)&tbl->buckets[hash]); |
a03eaec0 TG |
63 | } |
64 | EXPORT_SYMBOL_GPL(lockdep_rht_bucket_is_held); | |
65 | #else | |
66 | #define ASSERT_RHT_MUTEX(HT) | |
a03eaec0 TG |
67 | #endif |
68 | ||
da20420f HX |
69 | static void nested_table_free(union nested_table *ntbl, unsigned int size) |
70 | { | |
71 | const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *)); | |
72 | const unsigned int len = 1 << shift; | |
73 | unsigned int i; | |
74 | ||
75 | ntbl = rcu_dereference_raw(ntbl->table); | |
76 | if (!ntbl) | |
77 | return; | |
78 | ||
79 | if (size > len) { | |
80 | size >>= shift; | |
81 | for (i = 0; i < len; i++) | |
82 | nested_table_free(ntbl + i, size); | |
83 | } | |
84 | ||
85 | kfree(ntbl); | |
86 | } | |
87 | ||
88 | static void nested_bucket_table_free(const struct bucket_table *tbl) | |
89 | { | |
90 | unsigned int size = tbl->size >> tbl->nest; | |
91 | unsigned int len = 1 << tbl->nest; | |
92 | union nested_table *ntbl; | |
93 | unsigned int i; | |
94 | ||
95 | ntbl = (union nested_table *)rcu_dereference_raw(tbl->buckets[0]); | |
96 | ||
97 | for (i = 0; i < len; i++) | |
98 | nested_table_free(ntbl + i, size); | |
99 | ||
100 | kfree(ntbl); | |
101 | } | |
102 | ||
97defe1e TG |
103 | static void bucket_table_free(const struct bucket_table *tbl) |
104 | { | |
da20420f HX |
105 | if (tbl->nest) |
106 | nested_bucket_table_free(tbl); | |
107 | ||
97defe1e TG |
108 | kvfree(tbl); |
109 | } | |
110 | ||
9d901bc0 HX |
111 | static void bucket_table_free_rcu(struct rcu_head *head) |
112 | { | |
113 | bucket_table_free(container_of(head, struct bucket_table, rcu)); | |
114 | } | |
115 | ||
da20420f HX |
116 | static union nested_table *nested_table_alloc(struct rhashtable *ht, |
117 | union nested_table __rcu **prev, | |
5af68ef7 | 118 | bool leaf) |
da20420f HX |
119 | { |
120 | union nested_table *ntbl; | |
121 | int i; | |
122 | ||
123 | ntbl = rcu_dereference(*prev); | |
124 | if (ntbl) | |
125 | return ntbl; | |
126 | ||
127 | ntbl = kzalloc(PAGE_SIZE, GFP_ATOMIC); | |
128 | ||
5af68ef7 N |
129 | if (ntbl && leaf) { |
130 | for (i = 0; i < PAGE_SIZE / sizeof(ntbl[0]); i++) | |
9b4f64a2 | 131 | INIT_RHT_NULLS_HEAD(ntbl[i].bucket); |
da20420f HX |
132 | } |
133 | ||
e9458a4e | 134 | if (cmpxchg((union nested_table **)prev, NULL, ntbl) == NULL) |
7a41c294 N |
135 | return ntbl; |
136 | /* Raced with another thread. */ | |
137 | kfree(ntbl); | |
138 | return rcu_dereference(*prev); | |
da20420f HX |
139 | } |
140 | ||
141 | static struct bucket_table *nested_bucket_table_alloc(struct rhashtable *ht, | |
142 | size_t nbuckets, | |
143 | gfp_t gfp) | |
144 | { | |
145 | const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *)); | |
146 | struct bucket_table *tbl; | |
147 | size_t size; | |
148 | ||
149 | if (nbuckets < (1 << (shift + 1))) | |
150 | return NULL; | |
151 | ||
152 | size = sizeof(*tbl) + sizeof(tbl->buckets[0]); | |
153 | ||
154 | tbl = kzalloc(size, gfp); | |
155 | if (!tbl) | |
156 | return NULL; | |
157 | ||
158 | if (!nested_table_alloc(ht, (union nested_table __rcu **)tbl->buckets, | |
5af68ef7 | 159 | false)) { |
da20420f HX |
160 | kfree(tbl); |
161 | return NULL; | |
162 | } | |
163 | ||
164 | tbl->nest = (ilog2(nbuckets) - 1) % shift + 1; | |
165 | ||
166 | return tbl; | |
167 | } | |
168 | ||
97defe1e | 169 | static struct bucket_table *bucket_table_alloc(struct rhashtable *ht, |
b9ecfdaa HX |
170 | size_t nbuckets, |
171 | gfp_t gfp) | |
7e1e7763 | 172 | { |
eb6d1abf | 173 | struct bucket_table *tbl = NULL; |
8f0db018 | 174 | size_t size; |
f89bd6f8 | 175 | int i; |
149212f0 | 176 | static struct lock_class_key __key; |
7e1e7763 | 177 | |
c252aa3e | 178 | tbl = kvzalloc(struct_size(tbl, buckets, nbuckets), gfp); |
da20420f HX |
179 | |
180 | size = nbuckets; | |
181 | ||
2d22ecf6 | 182 | if (tbl == NULL && (gfp & ~__GFP_NOFAIL) != GFP_KERNEL) { |
da20420f HX |
183 | tbl = nested_bucket_table_alloc(ht, nbuckets, gfp); |
184 | nbuckets = 0; | |
185 | } | |
2d22ecf6 | 186 | |
7e1e7763 TG |
187 | if (tbl == NULL) |
188 | return NULL; | |
189 | ||
149212f0 N |
190 | lockdep_init_map(&tbl->dep_map, "rhashtable_bucket", &__key, 0); |
191 | ||
da20420f | 192 | tbl->size = size; |
7e1e7763 | 193 | |
4feb7c7a | 194 | rcu_head_init(&tbl->rcu); |
eddee5ba HX |
195 | INIT_LIST_HEAD(&tbl->walkers); |
196 | ||
d48ad080 | 197 | tbl->hash_rnd = get_random_u32(); |
5269b53d | 198 | |
f89bd6f8 | 199 | for (i = 0; i < nbuckets; i++) |
9b4f64a2 | 200 | INIT_RHT_NULLS_HEAD(tbl->buckets[i]); |
f89bd6f8 | 201 | |
97defe1e | 202 | return tbl; |
7e1e7763 TG |
203 | } |
204 | ||
b824478b HX |
205 | static struct bucket_table *rhashtable_last_table(struct rhashtable *ht, |
206 | struct bucket_table *tbl) | |
207 | { | |
208 | struct bucket_table *new_tbl; | |
209 | ||
210 | do { | |
211 | new_tbl = tbl; | |
212 | tbl = rht_dereference_rcu(tbl->future_tbl, ht); | |
213 | } while (tbl); | |
214 | ||
215 | return new_tbl; | |
216 | } | |
217 | ||
8f0db018 | 218 | static int rhashtable_rehash_one(struct rhashtable *ht, |
ba6306e3 | 219 | struct rhash_lock_head **bkt, |
8f0db018 | 220 | unsigned int old_hash) |
a5ec68e3 | 221 | { |
aa34a6cb | 222 | struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht); |
c0690016 | 223 | struct bucket_table *new_tbl = rhashtable_last_table(ht, old_tbl); |
da20420f | 224 | int err = -EAGAIN; |
aa34a6cb | 225 | struct rhash_head *head, *next, *entry; |
e4edbe3c | 226 | struct rhash_head __rcu **pprev = NULL; |
299e5c32 | 227 | unsigned int new_hash; |
aa34a6cb | 228 | |
da20420f HX |
229 | if (new_tbl->nest) |
230 | goto out; | |
231 | ||
232 | err = -ENOENT; | |
233 | ||
adc6a3ab N |
234 | rht_for_each_from(entry, rht_ptr(bkt, old_tbl, old_hash), |
235 | old_tbl, old_hash) { | |
aa34a6cb HX |
236 | err = 0; |
237 | next = rht_dereference_bucket(entry->next, old_tbl, old_hash); | |
238 | ||
239 | if (rht_is_a_nulls(next)) | |
240 | break; | |
a5ec68e3 | 241 | |
aa34a6cb HX |
242 | pprev = &entry->next; |
243 | } | |
a5ec68e3 | 244 | |
aa34a6cb HX |
245 | if (err) |
246 | goto out; | |
97defe1e | 247 | |
aa34a6cb | 248 | new_hash = head_hashfn(ht, new_tbl, entry); |
7e1e7763 | 249 | |
149212f0 | 250 | rht_lock_nested(new_tbl, &new_tbl->buckets[new_hash], SINGLE_DEPTH_NESTING); |
7e1e7763 | 251 | |
adc6a3ab | 252 | head = rht_ptr(new_tbl->buckets + new_hash, new_tbl, new_hash); |
97defe1e | 253 | |
7def0f95 | 254 | RCU_INIT_POINTER(entry->next, head); |
a5ec68e3 | 255 | |
149212f0 | 256 | rht_assign_unlock(new_tbl, &new_tbl->buckets[new_hash], entry); |
97defe1e | 257 | |
8f0db018 N |
258 | if (pprev) |
259 | rcu_assign_pointer(*pprev, next); | |
260 | else | |
261 | /* Need to preserved the bit lock. */ | |
f4712b46 | 262 | rht_assign_locked(bkt, next); |
7e1e7763 | 263 | |
aa34a6cb HX |
264 | out: |
265 | return err; | |
266 | } | |
97defe1e | 267 | |
da20420f | 268 | static int rhashtable_rehash_chain(struct rhashtable *ht, |
299e5c32 | 269 | unsigned int old_hash) |
aa34a6cb HX |
270 | { |
271 | struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht); | |
ba6306e3 | 272 | struct rhash_lock_head **bkt = rht_bucket_var(old_tbl, old_hash); |
da20420f | 273 | int err; |
aa34a6cb | 274 | |
8f0db018 N |
275 | if (!bkt) |
276 | return 0; | |
149212f0 | 277 | rht_lock(old_tbl, bkt); |
a5ec68e3 | 278 | |
8f0db018 | 279 | while (!(err = rhashtable_rehash_one(ht, bkt, old_hash))) |
aa34a6cb | 280 | ; |
da20420f | 281 | |
4feb7c7a | 282 | if (err == -ENOENT) |
da20420f | 283 | err = 0; |
149212f0 | 284 | rht_unlock(old_tbl, bkt); |
da20420f HX |
285 | |
286 | return err; | |
97defe1e TG |
287 | } |
288 | ||
b824478b HX |
289 | static int rhashtable_rehash_attach(struct rhashtable *ht, |
290 | struct bucket_table *old_tbl, | |
291 | struct bucket_table *new_tbl) | |
97defe1e | 292 | { |
aa34a6cb HX |
293 | /* Make insertions go into the new, empty table right away. Deletions |
294 | * and lookups will be attempted in both tables until we synchronize. | |
0ad66449 N |
295 | * As cmpxchg() provides strong barriers, we do not need |
296 | * rcu_assign_pointer(). | |
aa34a6cb | 297 | */ |
aa34a6cb | 298 | |
e9458a4e HX |
299 | if (cmpxchg((struct bucket_table **)&old_tbl->future_tbl, NULL, |
300 | new_tbl) != NULL) | |
0ad66449 | 301 | return -EEXIST; |
b824478b HX |
302 | |
303 | return 0; | |
304 | } | |
305 | ||
306 | static int rhashtable_rehash_table(struct rhashtable *ht) | |
307 | { | |
308 | struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht); | |
309 | struct bucket_table *new_tbl; | |
310 | struct rhashtable_walker *walker; | |
299e5c32 | 311 | unsigned int old_hash; |
da20420f | 312 | int err; |
b824478b HX |
313 | |
314 | new_tbl = rht_dereference(old_tbl->future_tbl, ht); | |
315 | if (!new_tbl) | |
316 | return 0; | |
317 | ||
da20420f HX |
318 | for (old_hash = 0; old_hash < old_tbl->size; old_hash++) { |
319 | err = rhashtable_rehash_chain(ht, old_hash); | |
320 | if (err) | |
321 | return err; | |
ae6da1f5 | 322 | cond_resched(); |
da20420f | 323 | } |
aa34a6cb HX |
324 | |
325 | /* Publish the new table pointer. */ | |
326 | rcu_assign_pointer(ht->tbl, new_tbl); | |
327 | ||
ba7c95ea | 328 | spin_lock(&ht->lock); |
eddee5ba HX |
329 | list_for_each_entry(walker, &old_tbl->walkers, list) |
330 | walker->tbl = NULL; | |
331 | ||
aa34a6cb HX |
332 | /* Wait for readers. All new readers will see the new |
333 | * table, and thus no references to the old table will | |
334 | * remain. | |
4feb7c7a N |
335 | * We do this inside the locked region so that |
336 | * rhashtable_walk_stop() can use rcu_head_after_call_rcu() | |
337 | * to check if it should not re-link the table. | |
aa34a6cb | 338 | */ |
9d901bc0 | 339 | call_rcu(&old_tbl->rcu, bucket_table_free_rcu); |
4feb7c7a | 340 | spin_unlock(&ht->lock); |
b824478b HX |
341 | |
342 | return rht_dereference(new_tbl->future_tbl, ht) ? -EAGAIN : 0; | |
7e1e7763 TG |
343 | } |
344 | ||
da20420f HX |
345 | static int rhashtable_rehash_alloc(struct rhashtable *ht, |
346 | struct bucket_table *old_tbl, | |
347 | unsigned int size) | |
7e1e7763 | 348 | { |
da20420f | 349 | struct bucket_table *new_tbl; |
b824478b | 350 | int err; |
7e1e7763 TG |
351 | |
352 | ASSERT_RHT_MUTEX(ht); | |
353 | ||
da20420f | 354 | new_tbl = bucket_table_alloc(ht, size, GFP_KERNEL); |
7e1e7763 TG |
355 | if (new_tbl == NULL) |
356 | return -ENOMEM; | |
357 | ||
b824478b HX |
358 | err = rhashtable_rehash_attach(ht, old_tbl, new_tbl); |
359 | if (err) | |
360 | bucket_table_free(new_tbl); | |
361 | ||
362 | return err; | |
7e1e7763 | 363 | } |
7e1e7763 TG |
364 | |
365 | /** | |
366 | * rhashtable_shrink - Shrink hash table while allowing concurrent lookups | |
367 | * @ht: the hash table to shrink | |
7e1e7763 | 368 | * |
18093d1c HX |
369 | * This function shrinks the hash table to fit, i.e., the smallest |
370 | * size would not cause it to expand right away automatically. | |
7e1e7763 | 371 | * |
97defe1e TG |
372 | * The caller must ensure that no concurrent resizing occurs by holding |
373 | * ht->mutex. | |
374 | * | |
7e1e7763 TG |
375 | * The caller must ensure that no concurrent table mutations take place. |
376 | * It is however valid to have concurrent lookups if they are RCU protected. | |
97defe1e TG |
377 | * |
378 | * It is valid to have concurrent insertions and deletions protected by per | |
379 | * bucket locks or concurrent RCU protected lookups and traversals. | |
7e1e7763 | 380 | */ |
b824478b | 381 | static int rhashtable_shrink(struct rhashtable *ht) |
7e1e7763 | 382 | { |
da20420f | 383 | struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht); |
12311959 VN |
384 | unsigned int nelems = atomic_read(&ht->nelems); |
385 | unsigned int size = 0; | |
7e1e7763 | 386 | |
12311959 VN |
387 | if (nelems) |
388 | size = roundup_pow_of_two(nelems * 3 / 2); | |
18093d1c HX |
389 | if (size < ht->p.min_size) |
390 | size = ht->p.min_size; | |
391 | ||
392 | if (old_tbl->size <= size) | |
393 | return 0; | |
394 | ||
b824478b HX |
395 | if (rht_dereference(old_tbl->future_tbl, ht)) |
396 | return -EEXIST; | |
397 | ||
da20420f | 398 | return rhashtable_rehash_alloc(ht, old_tbl, size); |
7e1e7763 | 399 | } |
7e1e7763 | 400 | |
97defe1e TG |
401 | static void rht_deferred_worker(struct work_struct *work) |
402 | { | |
403 | struct rhashtable *ht; | |
404 | struct bucket_table *tbl; | |
b824478b | 405 | int err = 0; |
97defe1e | 406 | |
57699a40 | 407 | ht = container_of(work, struct rhashtable, run_work); |
97defe1e | 408 | mutex_lock(&ht->mutex); |
28134a53 | 409 | |
97defe1e | 410 | tbl = rht_dereference(ht->tbl, ht); |
b824478b | 411 | tbl = rhashtable_last_table(ht, tbl); |
97defe1e | 412 | |
a5b6846f | 413 | if (rht_grow_above_75(ht, tbl)) |
da20420f | 414 | err = rhashtable_rehash_alloc(ht, tbl, tbl->size * 2); |
b5e2c150 | 415 | else if (ht->p.automatic_shrinking && rht_shrink_below_30(ht, tbl)) |
da20420f HX |
416 | err = rhashtable_shrink(ht); |
417 | else if (tbl->nest) | |
418 | err = rhashtable_rehash_alloc(ht, tbl, tbl->size); | |
b824478b | 419 | |
408f13ef HX |
420 | if (!err || err == -EEXIST) { |
421 | int nerr; | |
422 | ||
423 | nerr = rhashtable_rehash_table(ht); | |
424 | err = err ?: nerr; | |
425 | } | |
b824478b | 426 | |
97defe1e | 427 | mutex_unlock(&ht->mutex); |
b824478b HX |
428 | |
429 | if (err) | |
430 | schedule_work(&ht->run_work); | |
97defe1e TG |
431 | } |
432 | ||
ca26893f HX |
433 | static int rhashtable_insert_rehash(struct rhashtable *ht, |
434 | struct bucket_table *tbl) | |
ccd57b1b HX |
435 | { |
436 | struct bucket_table *old_tbl; | |
437 | struct bucket_table *new_tbl; | |
ccd57b1b HX |
438 | unsigned int size; |
439 | int err; | |
440 | ||
441 | old_tbl = rht_dereference_rcu(ht->tbl, ht); | |
ccd57b1b HX |
442 | |
443 | size = tbl->size; | |
444 | ||
3cf92222 HX |
445 | err = -EBUSY; |
446 | ||
ccd57b1b HX |
447 | if (rht_grow_above_75(ht, tbl)) |
448 | size *= 2; | |
a87b9ebf TG |
449 | /* Do not schedule more than one rehash */ |
450 | else if (old_tbl != tbl) | |
3cf92222 HX |
451 | goto fail; |
452 | ||
453 | err = -ENOMEM; | |
ccd57b1b | 454 | |
93f976b5 | 455 | new_tbl = bucket_table_alloc(ht, size, GFP_ATOMIC | __GFP_NOWARN); |
3cf92222 HX |
456 | if (new_tbl == NULL) |
457 | goto fail; | |
ccd57b1b HX |
458 | |
459 | err = rhashtable_rehash_attach(ht, tbl, new_tbl); | |
460 | if (err) { | |
461 | bucket_table_free(new_tbl); | |
462 | if (err == -EEXIST) | |
463 | err = 0; | |
464 | } else | |
465 | schedule_work(&ht->run_work); | |
466 | ||
467 | return err; | |
3cf92222 HX |
468 | |
469 | fail: | |
470 | /* Do not fail the insert if someone else did a rehash. */ | |
c0690016 | 471 | if (likely(rcu_access_pointer(tbl->future_tbl))) |
3cf92222 HX |
472 | return 0; |
473 | ||
474 | /* Schedule async rehash to retry allocation in process context. */ | |
475 | if (err == -ENOMEM) | |
476 | schedule_work(&ht->run_work); | |
477 | ||
478 | return err; | |
ccd57b1b | 479 | } |
ccd57b1b | 480 | |
ca26893f | 481 | static void *rhashtable_lookup_one(struct rhashtable *ht, |
ba6306e3 | 482 | struct rhash_lock_head **bkt, |
ca26893f HX |
483 | struct bucket_table *tbl, unsigned int hash, |
484 | const void *key, struct rhash_head *obj) | |
02fd97c3 | 485 | { |
ca26893f HX |
486 | struct rhashtable_compare_arg arg = { |
487 | .ht = ht, | |
488 | .key = key, | |
489 | }; | |
e4edbe3c | 490 | struct rhash_head __rcu **pprev = NULL; |
02fd97c3 | 491 | struct rhash_head *head; |
ca26893f | 492 | int elasticity; |
02fd97c3 | 493 | |
5f8ddeab | 494 | elasticity = RHT_ELASTICITY; |
adc6a3ab | 495 | rht_for_each_from(head, rht_ptr(bkt, tbl, hash), tbl, hash) { |
ca26893f HX |
496 | struct rhlist_head *list; |
497 | struct rhlist_head *plist; | |
498 | ||
499 | elasticity--; | |
500 | if (!key || | |
501 | (ht->p.obj_cmpfn ? | |
502 | ht->p.obj_cmpfn(&arg, rht_obj(ht, head)) : | |
d3dcf8eb PB |
503 | rhashtable_compare(&arg, rht_obj(ht, head)))) { |
504 | pprev = &head->next; | |
ca26893f | 505 | continue; |
d3dcf8eb | 506 | } |
ca26893f HX |
507 | |
508 | if (!ht->rhlist) | |
509 | return rht_obj(ht, head); | |
510 | ||
511 | list = container_of(obj, struct rhlist_head, rhead); | |
512 | plist = container_of(head, struct rhlist_head, rhead); | |
513 | ||
514 | RCU_INIT_POINTER(list->next, plist); | |
515 | head = rht_dereference_bucket(head->next, tbl, hash); | |
516 | RCU_INIT_POINTER(list->rhead.next, head); | |
8f0db018 N |
517 | if (pprev) |
518 | rcu_assign_pointer(*pprev, obj); | |
519 | else | |
520 | /* Need to preserve the bit lock */ | |
f4712b46 | 521 | rht_assign_locked(bkt, obj); |
ca26893f HX |
522 | |
523 | return NULL; | |
5ca8cc5b | 524 | } |
02fd97c3 | 525 | |
ca26893f HX |
526 | if (elasticity <= 0) |
527 | return ERR_PTR(-EAGAIN); | |
528 | ||
529 | return ERR_PTR(-ENOENT); | |
530 | } | |
531 | ||
532 | static struct bucket_table *rhashtable_insert_one(struct rhashtable *ht, | |
ba6306e3 | 533 | struct rhash_lock_head **bkt, |
ca26893f HX |
534 | struct bucket_table *tbl, |
535 | unsigned int hash, | |
536 | struct rhash_head *obj, | |
537 | void *data) | |
538 | { | |
539 | struct bucket_table *new_tbl; | |
540 | struct rhash_head *head; | |
541 | ||
542 | if (!IS_ERR_OR_NULL(data)) | |
543 | return ERR_PTR(-EEXIST); | |
07ee0722 | 544 | |
ca26893f HX |
545 | if (PTR_ERR(data) != -EAGAIN && PTR_ERR(data) != -ENOENT) |
546 | return ERR_CAST(data); | |
ccd57b1b | 547 | |
c0690016 | 548 | new_tbl = rht_dereference_rcu(tbl->future_tbl, ht); |
ca26893f HX |
549 | if (new_tbl) |
550 | return new_tbl; | |
551 | ||
552 | if (PTR_ERR(data) != -ENOENT) | |
553 | return ERR_CAST(data); | |
554 | ||
555 | if (unlikely(rht_grow_above_max(ht, tbl))) | |
556 | return ERR_PTR(-E2BIG); | |
557 | ||
558 | if (unlikely(rht_grow_above_100(ht, tbl))) | |
559 | return ERR_PTR(-EAGAIN); | |
02fd97c3 | 560 | |
adc6a3ab | 561 | head = rht_ptr(bkt, tbl, hash); |
02fd97c3 HX |
562 | |
563 | RCU_INIT_POINTER(obj->next, head); | |
ca26893f HX |
564 | if (ht->rhlist) { |
565 | struct rhlist_head *list; | |
566 | ||
567 | list = container_of(obj, struct rhlist_head, rhead); | |
568 | RCU_INIT_POINTER(list->next, NULL); | |
569 | } | |
02fd97c3 | 570 | |
8f0db018 N |
571 | /* bkt is always the head of the list, so it holds |
572 | * the lock, which we need to preserve | |
573 | */ | |
f4712b46 | 574 | rht_assign_locked(bkt, obj); |
02fd97c3 HX |
575 | |
576 | atomic_inc(&ht->nelems); | |
ca26893f HX |
577 | if (rht_grow_above_75(ht, tbl)) |
578 | schedule_work(&ht->run_work); | |
02fd97c3 | 579 | |
ca26893f HX |
580 | return NULL; |
581 | } | |
02fd97c3 | 582 | |
ca26893f HX |
583 | static void *rhashtable_try_insert(struct rhashtable *ht, const void *key, |
584 | struct rhash_head *obj) | |
585 | { | |
586 | struct bucket_table *new_tbl; | |
587 | struct bucket_table *tbl; | |
ba6306e3 | 588 | struct rhash_lock_head **bkt; |
ca26893f | 589 | unsigned int hash; |
ca26893f HX |
590 | void *data; |
591 | ||
4feb7c7a | 592 | new_tbl = rcu_dereference(ht->tbl); |
ca26893f | 593 | |
4feb7c7a | 594 | do { |
ca26893f HX |
595 | tbl = new_tbl; |
596 | hash = rht_head_hashfn(ht, tbl, obj, ht->p); | |
8f0db018 N |
597 | if (rcu_access_pointer(tbl->future_tbl)) |
598 | /* Failure is OK */ | |
599 | bkt = rht_bucket_var(tbl, hash); | |
600 | else | |
601 | bkt = rht_bucket_insert(ht, tbl, hash); | |
602 | if (bkt == NULL) { | |
603 | new_tbl = rht_dereference_rcu(tbl->future_tbl, ht); | |
604 | data = ERR_PTR(-EAGAIN); | |
605 | } else { | |
149212f0 | 606 | rht_lock(tbl, bkt); |
8f0db018 N |
607 | data = rhashtable_lookup_one(ht, bkt, tbl, |
608 | hash, key, obj); | |
609 | new_tbl = rhashtable_insert_one(ht, bkt, tbl, | |
610 | hash, obj, data); | |
611 | if (PTR_ERR(new_tbl) != -EEXIST) | |
612 | data = ERR_CAST(new_tbl); | |
613 | ||
149212f0 | 614 | rht_unlock(tbl, bkt); |
8f0db018 | 615 | } |
4feb7c7a | 616 | } while (!IS_ERR_OR_NULL(new_tbl)); |
ca26893f HX |
617 | |
618 | if (PTR_ERR(data) == -EAGAIN) | |
619 | data = ERR_PTR(rhashtable_insert_rehash(ht, tbl) ?: | |
620 | -EAGAIN); | |
621 | ||
622 | return data; | |
623 | } | |
624 | ||
625 | void *rhashtable_insert_slow(struct rhashtable *ht, const void *key, | |
626 | struct rhash_head *obj) | |
627 | { | |
628 | void *data; | |
629 | ||
630 | do { | |
631 | rcu_read_lock(); | |
632 | data = rhashtable_try_insert(ht, key, obj); | |
633 | rcu_read_unlock(); | |
634 | } while (PTR_ERR(data) == -EAGAIN); | |
635 | ||
636 | return data; | |
02fd97c3 HX |
637 | } |
638 | EXPORT_SYMBOL_GPL(rhashtable_insert_slow); | |
639 | ||
f2dba9c6 | 640 | /** |
246779dd | 641 | * rhashtable_walk_enter - Initialise an iterator |
f2dba9c6 HX |
642 | * @ht: Table to walk over |
643 | * @iter: Hash table Iterator | |
644 | * | |
645 | * This function prepares a hash table walk. | |
646 | * | |
647 | * Note that if you restart a walk after rhashtable_walk_stop you | |
648 | * may see the same object twice. Also, you may miss objects if | |
649 | * there are removals in between rhashtable_walk_stop and the next | |
650 | * call to rhashtable_walk_start. | |
651 | * | |
652 | * For a completely stable walk you should construct your own data | |
653 | * structure outside the hash table. | |
654 | * | |
82266e98 N |
655 | * This function may be called from any process context, including |
656 | * non-preemptable context, but cannot be called from softirq or | |
657 | * hardirq context. | |
f2dba9c6 | 658 | * |
246779dd | 659 | * You must call rhashtable_walk_exit after this function returns. |
f2dba9c6 | 660 | */ |
246779dd | 661 | void rhashtable_walk_enter(struct rhashtable *ht, struct rhashtable_iter *iter) |
f2dba9c6 HX |
662 | { |
663 | iter->ht = ht; | |
664 | iter->p = NULL; | |
665 | iter->slot = 0; | |
666 | iter->skip = 0; | |
2db54b47 | 667 | iter->end_of_table = 0; |
f2dba9c6 | 668 | |
c6ff5268 | 669 | spin_lock(&ht->lock); |
246779dd | 670 | iter->walker.tbl = |
179ccc0a | 671 | rcu_dereference_protected(ht->tbl, lockdep_is_held(&ht->lock)); |
246779dd | 672 | list_add(&iter->walker.list, &iter->walker.tbl->walkers); |
c6ff5268 | 673 | spin_unlock(&ht->lock); |
f2dba9c6 | 674 | } |
246779dd | 675 | EXPORT_SYMBOL_GPL(rhashtable_walk_enter); |
f2dba9c6 HX |
676 | |
677 | /** | |
678 | * rhashtable_walk_exit - Free an iterator | |
679 | * @iter: Hash table Iterator | |
680 | * | |
6c4128f6 | 681 | * This function frees resources allocated by rhashtable_walk_enter. |
f2dba9c6 HX |
682 | */ |
683 | void rhashtable_walk_exit(struct rhashtable_iter *iter) | |
684 | { | |
c6ff5268 | 685 | spin_lock(&iter->ht->lock); |
246779dd HX |
686 | if (iter->walker.tbl) |
687 | list_del(&iter->walker.list); | |
c6ff5268 | 688 | spin_unlock(&iter->ht->lock); |
f2dba9c6 HX |
689 | } |
690 | EXPORT_SYMBOL_GPL(rhashtable_walk_exit); | |
691 | ||
692 | /** | |
97a6ec4a | 693 | * rhashtable_walk_start_check - Start a hash table walk |
f2dba9c6 HX |
694 | * @iter: Hash table iterator |
695 | * | |
0647169c AG |
696 | * Start a hash table walk at the current iterator position. Note that we take |
697 | * the RCU lock in all cases including when we return an error. So you must | |
698 | * always call rhashtable_walk_stop to clean up. | |
f2dba9c6 HX |
699 | * |
700 | * Returns zero if successful. | |
701 | * | |
702 | * Returns -EAGAIN if resize event occured. Note that the iterator | |
703 | * will rewind back to the beginning and you may use it immediately | |
704 | * by calling rhashtable_walk_next. | |
97a6ec4a TH |
705 | * |
706 | * rhashtable_walk_start is defined as an inline variant that returns | |
707 | * void. This is preferred in cases where the caller would ignore | |
708 | * resize events and always continue. | |
f2dba9c6 | 709 | */ |
97a6ec4a | 710 | int rhashtable_walk_start_check(struct rhashtable_iter *iter) |
db4374f4 | 711 | __acquires(RCU) |
f2dba9c6 | 712 | { |
eddee5ba | 713 | struct rhashtable *ht = iter->ht; |
5d240a89 | 714 | bool rhlist = ht->rhlist; |
eddee5ba | 715 | |
c6ff5268 | 716 | rcu_read_lock(); |
eddee5ba | 717 | |
c6ff5268 | 718 | spin_lock(&ht->lock); |
246779dd HX |
719 | if (iter->walker.tbl) |
720 | list_del(&iter->walker.list); | |
c6ff5268 | 721 | spin_unlock(&ht->lock); |
eddee5ba | 722 | |
5d240a89 N |
723 | if (iter->end_of_table) |
724 | return 0; | |
725 | if (!iter->walker.tbl) { | |
246779dd | 726 | iter->walker.tbl = rht_dereference_rcu(ht->tbl, ht); |
b41cc04b N |
727 | iter->slot = 0; |
728 | iter->skip = 0; | |
f2dba9c6 HX |
729 | return -EAGAIN; |
730 | } | |
731 | ||
5d240a89 N |
732 | if (iter->p && !rhlist) { |
733 | /* | |
734 | * We need to validate that 'p' is still in the table, and | |
735 | * if so, update 'skip' | |
736 | */ | |
737 | struct rhash_head *p; | |
738 | int skip = 0; | |
739 | rht_for_each_rcu(p, iter->walker.tbl, iter->slot) { | |
740 | skip++; | |
741 | if (p == iter->p) { | |
742 | iter->skip = skip; | |
743 | goto found; | |
744 | } | |
745 | } | |
746 | iter->p = NULL; | |
747 | } else if (iter->p && rhlist) { | |
748 | /* Need to validate that 'list' is still in the table, and | |
749 | * if so, update 'skip' and 'p'. | |
750 | */ | |
751 | struct rhash_head *p; | |
752 | struct rhlist_head *list; | |
753 | int skip = 0; | |
754 | rht_for_each_rcu(p, iter->walker.tbl, iter->slot) { | |
755 | for (list = container_of(p, struct rhlist_head, rhead); | |
756 | list; | |
757 | list = rcu_dereference(list->next)) { | |
758 | skip++; | |
759 | if (list == iter->list) { | |
760 | iter->p = p; | |
c643ecf3 | 761 | iter->skip = skip; |
5d240a89 N |
762 | goto found; |
763 | } | |
764 | } | |
765 | } | |
766 | iter->p = NULL; | |
767 | } | |
768 | found: | |
f2dba9c6 HX |
769 | return 0; |
770 | } | |
97a6ec4a | 771 | EXPORT_SYMBOL_GPL(rhashtable_walk_start_check); |
f2dba9c6 HX |
772 | |
773 | /** | |
2db54b47 TH |
774 | * __rhashtable_walk_find_next - Find the next element in a table (or the first |
775 | * one in case of a new walk). | |
776 | * | |
f2dba9c6 HX |
777 | * @iter: Hash table iterator |
778 | * | |
2db54b47 | 779 | * Returns the found object or NULL when the end of the table is reached. |
f2dba9c6 | 780 | * |
2db54b47 | 781 | * Returns -EAGAIN if resize event occurred. |
f2dba9c6 | 782 | */ |
2db54b47 | 783 | static void *__rhashtable_walk_find_next(struct rhashtable_iter *iter) |
f2dba9c6 | 784 | { |
246779dd | 785 | struct bucket_table *tbl = iter->walker.tbl; |
ca26893f | 786 | struct rhlist_head *list = iter->list; |
f2dba9c6 HX |
787 | struct rhashtable *ht = iter->ht; |
788 | struct rhash_head *p = iter->p; | |
ca26893f | 789 | bool rhlist = ht->rhlist; |
f2dba9c6 | 790 | |
2db54b47 TH |
791 | if (!tbl) |
792 | return NULL; | |
f2dba9c6 HX |
793 | |
794 | for (; iter->slot < tbl->size; iter->slot++) { | |
795 | int skip = iter->skip; | |
796 | ||
797 | rht_for_each_rcu(p, tbl, iter->slot) { | |
ca26893f HX |
798 | if (rhlist) { |
799 | list = container_of(p, struct rhlist_head, | |
800 | rhead); | |
801 | do { | |
802 | if (!skip) | |
803 | goto next; | |
804 | skip--; | |
805 | list = rcu_dereference(list->next); | |
806 | } while (list); | |
807 | ||
808 | continue; | |
809 | } | |
f2dba9c6 HX |
810 | if (!skip) |
811 | break; | |
812 | skip--; | |
813 | } | |
814 | ||
815 | next: | |
816 | if (!rht_is_a_nulls(p)) { | |
817 | iter->skip++; | |
818 | iter->p = p; | |
ca26893f HX |
819 | iter->list = list; |
820 | return rht_obj(ht, rhlist ? &list->rhead : p); | |
f2dba9c6 HX |
821 | } |
822 | ||
823 | iter->skip = 0; | |
824 | } | |
825 | ||
142b942a PS |
826 | iter->p = NULL; |
827 | ||
d88252f9 HX |
828 | /* Ensure we see any new tables. */ |
829 | smp_rmb(); | |
830 | ||
246779dd HX |
831 | iter->walker.tbl = rht_dereference_rcu(tbl->future_tbl, ht); |
832 | if (iter->walker.tbl) { | |
f2dba9c6 HX |
833 | iter->slot = 0; |
834 | iter->skip = 0; | |
f2dba9c6 | 835 | return ERR_PTR(-EAGAIN); |
2db54b47 TH |
836 | } else { |
837 | iter->end_of_table = true; | |
f2dba9c6 HX |
838 | } |
839 | ||
c936a79f | 840 | return NULL; |
f2dba9c6 | 841 | } |
2db54b47 TH |
842 | |
843 | /** | |
844 | * rhashtable_walk_next - Return the next object and advance the iterator | |
845 | * @iter: Hash table iterator | |
846 | * | |
847 | * Note that you must call rhashtable_walk_stop when you are finished | |
848 | * with the walk. | |
849 | * | |
850 | * Returns the next object or NULL when the end of the table is reached. | |
851 | * | |
852 | * Returns -EAGAIN if resize event occurred. Note that the iterator | |
853 | * will rewind back to the beginning and you may continue to use it. | |
854 | */ | |
855 | void *rhashtable_walk_next(struct rhashtable_iter *iter) | |
856 | { | |
857 | struct rhlist_head *list = iter->list; | |
858 | struct rhashtable *ht = iter->ht; | |
859 | struct rhash_head *p = iter->p; | |
860 | bool rhlist = ht->rhlist; | |
861 | ||
862 | if (p) { | |
863 | if (!rhlist || !(list = rcu_dereference(list->next))) { | |
864 | p = rcu_dereference(p->next); | |
865 | list = container_of(p, struct rhlist_head, rhead); | |
866 | } | |
867 | if (!rht_is_a_nulls(p)) { | |
868 | iter->skip++; | |
869 | iter->p = p; | |
870 | iter->list = list; | |
871 | return rht_obj(ht, rhlist ? &list->rhead : p); | |
872 | } | |
873 | ||
874 | /* At the end of this slot, switch to next one and then find | |
875 | * next entry from that point. | |
876 | */ | |
877 | iter->skip = 0; | |
878 | iter->slot++; | |
879 | } | |
880 | ||
881 | return __rhashtable_walk_find_next(iter); | |
882 | } | |
f2dba9c6 HX |
883 | EXPORT_SYMBOL_GPL(rhashtable_walk_next); |
884 | ||
2db54b47 TH |
885 | /** |
886 | * rhashtable_walk_peek - Return the next object but don't advance the iterator | |
887 | * @iter: Hash table iterator | |
888 | * | |
889 | * Returns the next object or NULL when the end of the table is reached. | |
890 | * | |
891 | * Returns -EAGAIN if resize event occurred. Note that the iterator | |
892 | * will rewind back to the beginning and you may continue to use it. | |
893 | */ | |
894 | void *rhashtable_walk_peek(struct rhashtable_iter *iter) | |
895 | { | |
896 | struct rhlist_head *list = iter->list; | |
897 | struct rhashtable *ht = iter->ht; | |
898 | struct rhash_head *p = iter->p; | |
899 | ||
900 | if (p) | |
901 | return rht_obj(ht, ht->rhlist ? &list->rhead : p); | |
902 | ||
903 | /* No object found in current iter, find next one in the table. */ | |
904 | ||
905 | if (iter->skip) { | |
906 | /* A nonzero skip value points to the next entry in the table | |
907 | * beyond that last one that was found. Decrement skip so | |
908 | * we find the current value. __rhashtable_walk_find_next | |
909 | * will restore the original value of skip assuming that | |
910 | * the table hasn't changed. | |
911 | */ | |
912 | iter->skip--; | |
913 | } | |
914 | ||
915 | return __rhashtable_walk_find_next(iter); | |
916 | } | |
917 | EXPORT_SYMBOL_GPL(rhashtable_walk_peek); | |
918 | ||
f2dba9c6 HX |
919 | /** |
920 | * rhashtable_walk_stop - Finish a hash table walk | |
921 | * @iter: Hash table iterator | |
922 | * | |
0647169c AG |
923 | * Finish a hash table walk. Does not reset the iterator to the start of the |
924 | * hash table. | |
f2dba9c6 HX |
925 | */ |
926 | void rhashtable_walk_stop(struct rhashtable_iter *iter) | |
db4374f4 | 927 | __releases(RCU) |
f2dba9c6 | 928 | { |
eddee5ba | 929 | struct rhashtable *ht; |
246779dd | 930 | struct bucket_table *tbl = iter->walker.tbl; |
eddee5ba | 931 | |
eddee5ba | 932 | if (!tbl) |
963ecbd4 | 933 | goto out; |
eddee5ba HX |
934 | |
935 | ht = iter->ht; | |
936 | ||
ba7c95ea | 937 | spin_lock(&ht->lock); |
4feb7c7a N |
938 | if (rcu_head_after_call_rcu(&tbl->rcu, bucket_table_free_rcu)) |
939 | /* This bucket table is being freed, don't re-link it. */ | |
246779dd | 940 | iter->walker.tbl = NULL; |
4feb7c7a N |
941 | else |
942 | list_add(&iter->walker.list, &tbl->walkers); | |
ba7c95ea | 943 | spin_unlock(&ht->lock); |
eddee5ba | 944 | |
963ecbd4 HX |
945 | out: |
946 | rcu_read_unlock(); | |
f2dba9c6 HX |
947 | } |
948 | EXPORT_SYMBOL_GPL(rhashtable_walk_stop); | |
949 | ||
488fb86e | 950 | static size_t rounded_hashtable_size(const struct rhashtable_params *params) |
7e1e7763 | 951 | { |
107d01f5 DB |
952 | size_t retsize; |
953 | ||
954 | if (params->nelem_hint) | |
955 | retsize = max(roundup_pow_of_two(params->nelem_hint * 4 / 3), | |
956 | (unsigned long)params->min_size); | |
957 | else | |
958 | retsize = max(HASH_DEFAULT_SIZE, | |
959 | (unsigned long)params->min_size); | |
960 | ||
961 | return retsize; | |
7e1e7763 TG |
962 | } |
963 | ||
31ccde2d HX |
964 | static u32 rhashtable_jhash2(const void *key, u32 length, u32 seed) |
965 | { | |
966 | return jhash2(key, length, seed); | |
967 | } | |
968 | ||
7e1e7763 TG |
969 | /** |
970 | * rhashtable_init - initialize a new hash table | |
971 | * @ht: hash table to be initialized | |
972 | * @params: configuration parameters | |
973 | * | |
974 | * Initializes a new hash table based on the provided configuration | |
975 | * parameters. A table can be configured either with a variable or | |
976 | * fixed length key: | |
977 | * | |
978 | * Configuration Example 1: Fixed length keys | |
979 | * struct test_obj { | |
980 | * int key; | |
981 | * void * my_member; | |
982 | * struct rhash_head node; | |
983 | * }; | |
984 | * | |
985 | * struct rhashtable_params params = { | |
986 | * .head_offset = offsetof(struct test_obj, node), | |
987 | * .key_offset = offsetof(struct test_obj, key), | |
988 | * .key_len = sizeof(int), | |
87545899 | 989 | * .hashfn = jhash, |
7e1e7763 TG |
990 | * }; |
991 | * | |
992 | * Configuration Example 2: Variable length keys | |
993 | * struct test_obj { | |
994 | * [...] | |
995 | * struct rhash_head node; | |
996 | * }; | |
997 | * | |
49f7b33e | 998 | * u32 my_hash_fn(const void *data, u32 len, u32 seed) |
7e1e7763 TG |
999 | * { |
1000 | * struct test_obj *obj = data; | |
1001 | * | |
1002 | * return [... hash ...]; | |
1003 | * } | |
1004 | * | |
1005 | * struct rhashtable_params params = { | |
1006 | * .head_offset = offsetof(struct test_obj, node), | |
87545899 | 1007 | * .hashfn = jhash, |
7e1e7763 | 1008 | * .obj_hashfn = my_hash_fn, |
7e1e7763 TG |
1009 | * }; |
1010 | */ | |
488fb86e HX |
1011 | int rhashtable_init(struct rhashtable *ht, |
1012 | const struct rhashtable_params *params) | |
7e1e7763 TG |
1013 | { |
1014 | struct bucket_table *tbl; | |
1015 | size_t size; | |
1016 | ||
31ccde2d | 1017 | if ((!params->key_len && !params->obj_hashfn) || |
02fd97c3 | 1018 | (params->obj_hashfn && !params->obj_cmpfn)) |
7e1e7763 TG |
1019 | return -EINVAL; |
1020 | ||
97defe1e TG |
1021 | memset(ht, 0, sizeof(*ht)); |
1022 | mutex_init(&ht->mutex); | |
ba7c95ea | 1023 | spin_lock_init(&ht->lock); |
97defe1e TG |
1024 | memcpy(&ht->p, params, sizeof(*params)); |
1025 | ||
a998f712 TG |
1026 | if (params->min_size) |
1027 | ht->p.min_size = roundup_pow_of_two(params->min_size); | |
1028 | ||
6d684e54 HX |
1029 | /* Cap total entries at 2^31 to avoid nelems overflow. */ |
1030 | ht->max_elems = 1u << 31; | |
2d2ab658 HX |
1031 | |
1032 | if (params->max_size) { | |
1033 | ht->p.max_size = rounddown_pow_of_two(params->max_size); | |
1034 | if (ht->p.max_size < ht->max_elems / 2) | |
1035 | ht->max_elems = ht->p.max_size * 2; | |
1036 | } | |
6d684e54 | 1037 | |
48e75b43 | 1038 | ht->p.min_size = max_t(u16, ht->p.min_size, HASH_MIN_SIZE); |
a998f712 | 1039 | |
107d01f5 | 1040 | size = rounded_hashtable_size(&ht->p); |
3a324606 | 1041 | |
31ccde2d HX |
1042 | ht->key_len = ht->p.key_len; |
1043 | if (!params->hashfn) { | |
1044 | ht->p.hashfn = jhash; | |
1045 | ||
1046 | if (!(ht->key_len & (sizeof(u32) - 1))) { | |
1047 | ht->key_len /= sizeof(u32); | |
1048 | ht->p.hashfn = rhashtable_jhash2; | |
1049 | } | |
1050 | } | |
1051 | ||
2d22ecf6 DB |
1052 | /* |
1053 | * This is api initialization and thus we need to guarantee the | |
1054 | * initial rhashtable allocation. Upon failure, retry with the | |
1055 | * smallest possible size with __GFP_NOFAIL semantics. | |
1056 | */ | |
b9ecfdaa | 1057 | tbl = bucket_table_alloc(ht, size, GFP_KERNEL); |
2d22ecf6 DB |
1058 | if (unlikely(tbl == NULL)) { |
1059 | size = max_t(u16, ht->p.min_size, HASH_MIN_SIZE); | |
1060 | tbl = bucket_table_alloc(ht, size, GFP_KERNEL | __GFP_NOFAIL); | |
1061 | } | |
7e1e7763 | 1062 | |
545a148e | 1063 | atomic_set(&ht->nelems, 0); |
a5b6846f | 1064 | |
7e1e7763 TG |
1065 | RCU_INIT_POINTER(ht->tbl, tbl); |
1066 | ||
4c4b52d9 | 1067 | INIT_WORK(&ht->run_work, rht_deferred_worker); |
97defe1e | 1068 | |
7e1e7763 TG |
1069 | return 0; |
1070 | } | |
1071 | EXPORT_SYMBOL_GPL(rhashtable_init); | |
1072 | ||
ca26893f HX |
1073 | /** |
1074 | * rhltable_init - initialize a new hash list table | |
1075 | * @hlt: hash list table to be initialized | |
1076 | * @params: configuration parameters | |
1077 | * | |
1078 | * Initializes a new hash list table. | |
1079 | * | |
1080 | * See documentation for rhashtable_init. | |
1081 | */ | |
1082 | int rhltable_init(struct rhltable *hlt, const struct rhashtable_params *params) | |
1083 | { | |
1084 | int err; | |
1085 | ||
ca26893f HX |
1086 | err = rhashtable_init(&hlt->ht, params); |
1087 | hlt->ht.rhlist = true; | |
1088 | return err; | |
1089 | } | |
1090 | EXPORT_SYMBOL_GPL(rhltable_init); | |
1091 | ||
1092 | static void rhashtable_free_one(struct rhashtable *ht, struct rhash_head *obj, | |
1093 | void (*free_fn)(void *ptr, void *arg), | |
1094 | void *arg) | |
1095 | { | |
1096 | struct rhlist_head *list; | |
1097 | ||
1098 | if (!ht->rhlist) { | |
1099 | free_fn(rht_obj(ht, obj), arg); | |
1100 | return; | |
1101 | } | |
1102 | ||
1103 | list = container_of(obj, struct rhlist_head, rhead); | |
1104 | do { | |
1105 | obj = &list->rhead; | |
1106 | list = rht_dereference(list->next, ht); | |
1107 | free_fn(rht_obj(ht, obj), arg); | |
1108 | } while (list); | |
1109 | } | |
1110 | ||
7e1e7763 | 1111 | /** |
6b6f302c | 1112 | * rhashtable_free_and_destroy - free elements and destroy hash table |
7e1e7763 | 1113 | * @ht: the hash table to destroy |
6b6f302c TG |
1114 | * @free_fn: callback to release resources of element |
1115 | * @arg: pointer passed to free_fn | |
7e1e7763 | 1116 | * |
6b6f302c TG |
1117 | * Stops an eventual async resize. If defined, invokes free_fn for each |
1118 | * element to releasal resources. Please note that RCU protected | |
1119 | * readers may still be accessing the elements. Releasing of resources | |
1120 | * must occur in a compatible manner. Then frees the bucket array. | |
1121 | * | |
1122 | * This function will eventually sleep to wait for an async resize | |
1123 | * to complete. The caller is responsible that no further write operations | |
1124 | * occurs in parallel. | |
7e1e7763 | 1125 | */ |
6b6f302c TG |
1126 | void rhashtable_free_and_destroy(struct rhashtable *ht, |
1127 | void (*free_fn)(void *ptr, void *arg), | |
1128 | void *arg) | |
7e1e7763 | 1129 | { |
0026129c | 1130 | struct bucket_table *tbl, *next_tbl; |
6b6f302c | 1131 | unsigned int i; |
97defe1e | 1132 | |
4c4b52d9 | 1133 | cancel_work_sync(&ht->run_work); |
97defe1e | 1134 | |
57699a40 | 1135 | mutex_lock(&ht->mutex); |
6b6f302c | 1136 | tbl = rht_dereference(ht->tbl, ht); |
0026129c | 1137 | restart: |
6b6f302c TG |
1138 | if (free_fn) { |
1139 | for (i = 0; i < tbl->size; i++) { | |
1140 | struct rhash_head *pos, *next; | |
1141 | ||
ae6da1f5 | 1142 | cond_resched(); |
adc6a3ab | 1143 | for (pos = rht_ptr_exclusive(rht_bucket(tbl, i)), |
6b6f302c TG |
1144 | next = !rht_is_a_nulls(pos) ? |
1145 | rht_dereference(pos->next, ht) : NULL; | |
1146 | !rht_is_a_nulls(pos); | |
1147 | pos = next, | |
1148 | next = !rht_is_a_nulls(pos) ? | |
1149 | rht_dereference(pos->next, ht) : NULL) | |
ca26893f | 1150 | rhashtable_free_one(ht, pos, free_fn, arg); |
6b6f302c TG |
1151 | } |
1152 | } | |
1153 | ||
0026129c | 1154 | next_tbl = rht_dereference(tbl->future_tbl, ht); |
6b6f302c | 1155 | bucket_table_free(tbl); |
0026129c TY |
1156 | if (next_tbl) { |
1157 | tbl = next_tbl; | |
1158 | goto restart; | |
1159 | } | |
97defe1e | 1160 | mutex_unlock(&ht->mutex); |
7e1e7763 | 1161 | } |
6b6f302c TG |
1162 | EXPORT_SYMBOL_GPL(rhashtable_free_and_destroy); |
1163 | ||
1164 | void rhashtable_destroy(struct rhashtable *ht) | |
1165 | { | |
1166 | return rhashtable_free_and_destroy(ht, NULL, NULL); | |
1167 | } | |
7e1e7763 | 1168 | EXPORT_SYMBOL_GPL(rhashtable_destroy); |
da20420f | 1169 | |
ba6306e3 HX |
1170 | struct rhash_lock_head **__rht_bucket_nested(const struct bucket_table *tbl, |
1171 | unsigned int hash) | |
da20420f HX |
1172 | { |
1173 | const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *)); | |
da20420f HX |
1174 | unsigned int index = hash & ((1 << tbl->nest) - 1); |
1175 | unsigned int size = tbl->size >> tbl->nest; | |
1176 | unsigned int subhash = hash; | |
1177 | union nested_table *ntbl; | |
1178 | ||
1179 | ntbl = (union nested_table *)rcu_dereference_raw(tbl->buckets[0]); | |
c4d2603d | 1180 | ntbl = rht_dereference_bucket_rcu(ntbl[index].table, tbl, hash); |
da20420f HX |
1181 | subhash >>= tbl->nest; |
1182 | ||
1183 | while (ntbl && size > (1 << shift)) { | |
1184 | index = subhash & ((1 << shift) - 1); | |
c4d2603d HX |
1185 | ntbl = rht_dereference_bucket_rcu(ntbl[index].table, |
1186 | tbl, hash); | |
da20420f HX |
1187 | size >>= shift; |
1188 | subhash >>= shift; | |
1189 | } | |
1190 | ||
ff302db9 N |
1191 | if (!ntbl) |
1192 | return NULL; | |
da20420f HX |
1193 | |
1194 | return &ntbl[subhash].bucket; | |
1195 | ||
1196 | } | |
ff302db9 N |
1197 | EXPORT_SYMBOL_GPL(__rht_bucket_nested); |
1198 | ||
ba6306e3 HX |
1199 | struct rhash_lock_head **rht_bucket_nested(const struct bucket_table *tbl, |
1200 | unsigned int hash) | |
ff302db9 | 1201 | { |
ba6306e3 | 1202 | static struct rhash_lock_head *rhnull; |
ff302db9 N |
1203 | |
1204 | if (!rhnull) | |
1205 | INIT_RHT_NULLS_HEAD(rhnull); | |
1206 | return __rht_bucket_nested(tbl, hash) ?: &rhnull; | |
1207 | } | |
da20420f HX |
1208 | EXPORT_SYMBOL_GPL(rht_bucket_nested); |
1209 | ||
ba6306e3 HX |
1210 | struct rhash_lock_head **rht_bucket_nested_insert(struct rhashtable *ht, |
1211 | struct bucket_table *tbl, | |
1212 | unsigned int hash) | |
da20420f HX |
1213 | { |
1214 | const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *)); | |
1215 | unsigned int index = hash & ((1 << tbl->nest) - 1); | |
1216 | unsigned int size = tbl->size >> tbl->nest; | |
1217 | union nested_table *ntbl; | |
da20420f HX |
1218 | |
1219 | ntbl = (union nested_table *)rcu_dereference_raw(tbl->buckets[0]); | |
1220 | hash >>= tbl->nest; | |
da20420f | 1221 | ntbl = nested_table_alloc(ht, &ntbl[index].table, |
5af68ef7 | 1222 | size <= (1 << shift)); |
da20420f HX |
1223 | |
1224 | while (ntbl && size > (1 << shift)) { | |
1225 | index = hash & ((1 << shift) - 1); | |
1226 | size >>= shift; | |
1227 | hash >>= shift; | |
da20420f | 1228 | ntbl = nested_table_alloc(ht, &ntbl[index].table, |
5af68ef7 | 1229 | size <= (1 << shift)); |
da20420f HX |
1230 | } |
1231 | ||
1232 | if (!ntbl) | |
1233 | return NULL; | |
1234 | ||
1235 | return &ntbl[hash].bucket; | |
1236 | ||
1237 | } | |
1238 | EXPORT_SYMBOL_GPL(rht_bucket_nested_insert); |