]>
Commit | Line | Data |
---|---|---|
7e1e7763 TG |
1 | /* |
2 | * Resizable, Scalable, Concurrent Hash Table | |
3 | * | |
dc0ee268 | 4 | * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au> |
7e1e7763 TG |
5 | * Copyright (c) 2014 Thomas Graf <tgraf@suug.ch> |
6 | * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net> | |
7 | * | |
7e1e7763 | 8 | * Code partially derived from nft_hash |
dc0ee268 HX |
9 | * Rewritten with rehash code from br_multicast plus single list |
10 | * pointer as suggested by Josh Triplett | |
7e1e7763 TG |
11 | * |
12 | * This program is free software; you can redistribute it and/or modify | |
13 | * it under the terms of the GNU General Public License version 2 as | |
14 | * published by the Free Software Foundation. | |
15 | */ | |
16 | ||
17 | #ifndef _LINUX_RHASHTABLE_H | |
18 | #define _LINUX_RHASHTABLE_H | |
19 | ||
f2dba9c6 | 20 | #include <linux/compiler.h> |
6626af69 | 21 | #include <linux/errno.h> |
f89bd6f8 | 22 | #include <linux/list_nulls.h> |
97defe1e | 23 | #include <linux/workqueue.h> |
86b35b64 | 24 | #include <linux/mutex.h> |
02fd97c3 | 25 | #include <linux/rcupdate.h> |
7e1e7763 | 26 | |
f89bd6f8 TG |
27 | /* |
28 | * The end of the chain is marked with a special nulls marks which has | |
29 | * the following format: | |
30 | * | |
31 | * +-------+-----------------------------------------------------+-+ | |
32 | * | Base | Hash |1| | |
33 | * +-------+-----------------------------------------------------+-+ | |
34 | * | |
35 | * Base (4 bits) : Reserved to distinguish between multiple tables. | |
36 | * Specified via &struct rhashtable_params.nulls_base. | |
37 | * Hash (27 bits): Full hash (unmasked) of first element added to bucket | |
38 | * 1 (1 bit) : Nulls marker (always set) | |
39 | * | |
40 | * The remaining bits of the next pointer remain unused for now. | |
41 | */ | |
42 | #define RHT_BASE_BITS 4 | |
43 | #define RHT_HASH_BITS 27 | |
44 | #define RHT_BASE_SHIFT RHT_HASH_BITS | |
45 | ||
02fd97c3 HX |
46 | /* Base bits plus 1 bit for nulls marker */ |
47 | #define RHT_HASH_RESERVED_SPACE (RHT_BASE_BITS + 1) | |
48 | ||
7e1e7763 | 49 | struct rhash_head { |
5300fdcb | 50 | struct rhash_head __rcu *next; |
7e1e7763 TG |
51 | }; |
52 | ||
97defe1e TG |
53 | /** |
54 | * struct bucket_table - Table of hash buckets | |
55 | * @size: Number of hash buckets | |
63d512d0 | 56 | * @rehash: Current bucket being rehashed |
988dfbd7 | 57 | * @hash_rnd: Random seed to fold into hash |
97defe1e TG |
58 | * @locks_mask: Mask to apply before accessing locks[] |
59 | * @locks: Array of spinlocks protecting individual buckets | |
eddee5ba | 60 | * @walkers: List of active walkers |
9d901bc0 | 61 | * @rcu: RCU structure for freeing the table |
c4db8848 | 62 | * @future_tbl: Table under construction during rehashing |
97defe1e TG |
63 | * @buckets: size * hash buckets |
64 | */ | |
7e1e7763 | 65 | struct bucket_table { |
63d512d0 HX |
66 | unsigned int size; |
67 | unsigned int rehash; | |
988dfbd7 | 68 | u32 hash_rnd; |
b9ebafbe ED |
69 | unsigned int locks_mask; |
70 | spinlock_t *locks; | |
eddee5ba | 71 | struct list_head walkers; |
9d901bc0 | 72 | struct rcu_head rcu; |
b9ebafbe | 73 | |
c4db8848 HX |
74 | struct bucket_table __rcu *future_tbl; |
75 | ||
b9ebafbe | 76 | struct rhash_head __rcu *buckets[] ____cacheline_aligned_in_smp; |
7e1e7763 TG |
77 | }; |
78 | ||
02fd97c3 HX |
79 | /** |
80 | * struct rhashtable_compare_arg - Key for the function rhashtable_compare | |
81 | * @ht: Hash table | |
82 | * @key: Key to compare against | |
83 | */ | |
84 | struct rhashtable_compare_arg { | |
85 | struct rhashtable *ht; | |
86 | const void *key; | |
87 | }; | |
88 | ||
7e1e7763 TG |
89 | typedef u32 (*rht_hashfn_t)(const void *data, u32 len, u32 seed); |
90 | typedef u32 (*rht_obj_hashfn_t)(const void *data, u32 seed); | |
02fd97c3 HX |
91 | typedef int (*rht_obj_cmpfn_t)(struct rhashtable_compare_arg *arg, |
92 | const void *obj); | |
7e1e7763 TG |
93 | |
94 | struct rhashtable; | |
95 | ||
96 | /** | |
97 | * struct rhashtable_params - Hash table construction parameters | |
98 | * @nelem_hint: Hint on number of elements, should be 75% of desired size | |
99 | * @key_len: Length of key | |
100 | * @key_offset: Offset of key in struct to be hashed | |
101 | * @head_offset: Offset of rhash_head in struct to be hashed | |
c2e213cf HX |
102 | * @max_size: Maximum size while expanding |
103 | * @min_size: Minimum size while shrinking | |
f89bd6f8 | 104 | * @nulls_base: Base value to generate nulls marker |
97defe1e | 105 | * @locks_mul: Number of bucket locks to allocate per cpu (default: 128) |
7e1e7763 TG |
106 | * @hashfn: Function to hash key |
107 | * @obj_hashfn: Function to hash object | |
02fd97c3 | 108 | * @obj_cmpfn: Function to compare key with object |
7e1e7763 TG |
109 | */ |
110 | struct rhashtable_params { | |
111 | size_t nelem_hint; | |
112 | size_t key_len; | |
113 | size_t key_offset; | |
114 | size_t head_offset; | |
c2e213cf HX |
115 | unsigned int max_size; |
116 | unsigned int min_size; | |
f89bd6f8 | 117 | u32 nulls_base; |
97defe1e | 118 | size_t locks_mul; |
7e1e7763 TG |
119 | rht_hashfn_t hashfn; |
120 | rht_obj_hashfn_t obj_hashfn; | |
02fd97c3 | 121 | rht_obj_cmpfn_t obj_cmpfn; |
7e1e7763 TG |
122 | }; |
123 | ||
124 | /** | |
125 | * struct rhashtable - Hash table handle | |
126 | * @tbl: Bucket table | |
127 | * @nelems: Number of elements in table | |
7e1e7763 | 128 | * @p: Configuration parameters |
97defe1e TG |
129 | * @run_work: Deferred worker to expand/shrink asynchronously |
130 | * @mutex: Mutex to protect current/future table swapping | |
131 | * @being_destroyed: True if table is set up for destruction | |
7e1e7763 TG |
132 | */ |
133 | struct rhashtable { | |
134 | struct bucket_table __rcu *tbl; | |
97defe1e | 135 | atomic_t nelems; |
a5b6846f | 136 | bool being_destroyed; |
7e1e7763 | 137 | struct rhashtable_params p; |
57699a40 | 138 | struct work_struct run_work; |
97defe1e | 139 | struct mutex mutex; |
7e1e7763 TG |
140 | }; |
141 | ||
f2dba9c6 HX |
142 | /** |
143 | * struct rhashtable_walker - Hash table walker | |
144 | * @list: List entry on list of walkers | |
eddee5ba | 145 | * @tbl: The table that we were walking over |
f2dba9c6 HX |
146 | */ |
147 | struct rhashtable_walker { | |
148 | struct list_head list; | |
eddee5ba | 149 | struct bucket_table *tbl; |
f2dba9c6 HX |
150 | }; |
151 | ||
152 | /** | |
153 | * struct rhashtable_iter - Hash table iterator, fits into netlink cb | |
154 | * @ht: Table to iterate through | |
155 | * @p: Current pointer | |
156 | * @walker: Associated rhashtable walker | |
157 | * @slot: Current slot | |
158 | * @skip: Number of entries to skip in slot | |
159 | */ | |
160 | struct rhashtable_iter { | |
161 | struct rhashtable *ht; | |
162 | struct rhash_head *p; | |
163 | struct rhashtable_walker *walker; | |
164 | unsigned int slot; | |
165 | unsigned int skip; | |
166 | }; | |
167 | ||
f89bd6f8 TG |
168 | static inline unsigned long rht_marker(const struct rhashtable *ht, u32 hash) |
169 | { | |
170 | return NULLS_MARKER(ht->p.nulls_base + hash); | |
171 | } | |
172 | ||
173 | #define INIT_RHT_NULLS_HEAD(ptr, ht, hash) \ | |
174 | ((ptr) = (typeof(ptr)) rht_marker(ht, hash)) | |
175 | ||
176 | static inline bool rht_is_a_nulls(const struct rhash_head *ptr) | |
177 | { | |
178 | return ((unsigned long) ptr & 1); | |
179 | } | |
180 | ||
181 | static inline unsigned long rht_get_nulls_value(const struct rhash_head *ptr) | |
182 | { | |
183 | return ((unsigned long) ptr) >> 1; | |
184 | } | |
185 | ||
02fd97c3 HX |
186 | static inline void *rht_obj(const struct rhashtable *ht, |
187 | const struct rhash_head *he) | |
188 | { | |
189 | return (char *)he - ht->p.head_offset; | |
190 | } | |
191 | ||
192 | static inline unsigned int rht_bucket_index(const struct bucket_table *tbl, | |
193 | unsigned int hash) | |
194 | { | |
195 | return (hash >> RHT_HASH_RESERVED_SPACE) & (tbl->size - 1); | |
196 | } | |
197 | ||
198 | static inline unsigned int rht_key_hashfn( | |
199 | struct rhashtable *ht, const struct bucket_table *tbl, | |
200 | const void *key, const struct rhashtable_params params) | |
201 | { | |
de91b25c HX |
202 | /* params must be equal to ht->p if it isn't constant. */ |
203 | unsigned key_len = __builtin_constant_p(params.key_len) ? | |
204 | (params.key_len ?: ht->p.key_len) : | |
205 | params.key_len; | |
206 | ||
207 | return rht_bucket_index(tbl, params.hashfn(key, key_len, | |
02fd97c3 HX |
208 | tbl->hash_rnd)); |
209 | } | |
210 | ||
211 | static inline unsigned int rht_head_hashfn( | |
212 | struct rhashtable *ht, const struct bucket_table *tbl, | |
213 | const struct rhash_head *he, const struct rhashtable_params params) | |
214 | { | |
215 | const char *ptr = rht_obj(ht, he); | |
216 | ||
217 | return likely(params.obj_hashfn) ? | |
218 | rht_bucket_index(tbl, params.obj_hashfn(ptr, tbl->hash_rnd)) : | |
219 | rht_key_hashfn(ht, tbl, ptr + params.key_offset, params); | |
220 | } | |
221 | ||
222 | /** | |
223 | * rht_grow_above_75 - returns true if nelems > 0.75 * table-size | |
224 | * @ht: hash table | |
225 | * @tbl: current table | |
226 | */ | |
227 | static inline bool rht_grow_above_75(const struct rhashtable *ht, | |
228 | const struct bucket_table *tbl) | |
229 | { | |
230 | /* Expand table when exceeding 75% load */ | |
231 | return atomic_read(&ht->nelems) > (tbl->size / 4 * 3) && | |
232 | (!ht->p.max_size || tbl->size < ht->p.max_size); | |
233 | } | |
234 | ||
235 | /** | |
236 | * rht_shrink_below_30 - returns true if nelems < 0.3 * table-size | |
237 | * @ht: hash table | |
238 | * @tbl: current table | |
239 | */ | |
240 | static inline bool rht_shrink_below_30(const struct rhashtable *ht, | |
241 | const struct bucket_table *tbl) | |
242 | { | |
243 | /* Shrink table beneath 30% load */ | |
244 | return atomic_read(&ht->nelems) < (tbl->size * 3 / 10) && | |
245 | tbl->size > ht->p.min_size; | |
246 | } | |
247 | ||
248 | /* The bucket lock is selected based on the hash and protects mutations | |
249 | * on a group of hash buckets. | |
250 | * | |
251 | * A maximum of tbl->size/2 bucket locks is allocated. This ensures that | |
252 | * a single lock always covers both buckets which may both contains | |
253 | * entries which link to the same bucket of the old table during resizing. | |
254 | * This allows to simplify the locking as locking the bucket in both | |
255 | * tables during resize always guarantee protection. | |
256 | * | |
257 | * IMPORTANT: When holding the bucket lock of both the old and new table | |
258 | * during expansions and shrinking, the old bucket lock must always be | |
259 | * acquired first. | |
260 | */ | |
261 | static inline spinlock_t *rht_bucket_lock(const struct bucket_table *tbl, | |
262 | unsigned int hash) | |
263 | { | |
264 | return &tbl->locks[hash & tbl->locks_mask]; | |
265 | } | |
266 | ||
7e1e7763 | 267 | #ifdef CONFIG_PROVE_LOCKING |
97defe1e | 268 | int lockdep_rht_mutex_is_held(struct rhashtable *ht); |
88d6ed15 | 269 | int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash); |
7e1e7763 | 270 | #else |
97defe1e | 271 | static inline int lockdep_rht_mutex_is_held(struct rhashtable *ht) |
7e1e7763 TG |
272 | { |
273 | return 1; | |
274 | } | |
88d6ed15 TG |
275 | |
276 | static inline int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, | |
277 | u32 hash) | |
278 | { | |
279 | return 1; | |
280 | } | |
7e1e7763 TG |
281 | #endif /* CONFIG_PROVE_LOCKING */ |
282 | ||
488fb86e HX |
283 | int rhashtable_init(struct rhashtable *ht, |
284 | const struct rhashtable_params *params); | |
7e1e7763 | 285 | |
02fd97c3 HX |
286 | int rhashtable_insert_slow(struct rhashtable *ht, const void *key, |
287 | struct rhash_head *obj, | |
288 | struct bucket_table *old_tbl); | |
7e1e7763 | 289 | |
6eba8224 TG |
290 | int rhashtable_expand(struct rhashtable *ht); |
291 | int rhashtable_shrink(struct rhashtable *ht); | |
7e1e7763 | 292 | |
f2dba9c6 HX |
293 | int rhashtable_walk_init(struct rhashtable *ht, struct rhashtable_iter *iter); |
294 | void rhashtable_walk_exit(struct rhashtable_iter *iter); | |
295 | int rhashtable_walk_start(struct rhashtable_iter *iter) __acquires(RCU); | |
296 | void *rhashtable_walk_next(struct rhashtable_iter *iter); | |
297 | void rhashtable_walk_stop(struct rhashtable_iter *iter) __releases(RCU); | |
298 | ||
97defe1e | 299 | void rhashtable_destroy(struct rhashtable *ht); |
7e1e7763 TG |
300 | |
301 | #define rht_dereference(p, ht) \ | |
302 | rcu_dereference_protected(p, lockdep_rht_mutex_is_held(ht)) | |
303 | ||
304 | #define rht_dereference_rcu(p, ht) \ | |
305 | rcu_dereference_check(p, lockdep_rht_mutex_is_held(ht)) | |
306 | ||
88d6ed15 TG |
307 | #define rht_dereference_bucket(p, tbl, hash) \ |
308 | rcu_dereference_protected(p, lockdep_rht_bucket_is_held(tbl, hash)) | |
7e1e7763 | 309 | |
88d6ed15 TG |
310 | #define rht_dereference_bucket_rcu(p, tbl, hash) \ |
311 | rcu_dereference_check(p, lockdep_rht_bucket_is_held(tbl, hash)) | |
312 | ||
313 | #define rht_entry(tpos, pos, member) \ | |
314 | ({ tpos = container_of(pos, typeof(*tpos), member); 1; }) | |
7e1e7763 TG |
315 | |
316 | /** | |
88d6ed15 TG |
317 | * rht_for_each_continue - continue iterating over hash chain |
318 | * @pos: the &struct rhash_head to use as a loop cursor. | |
319 | * @head: the previous &struct rhash_head to continue from | |
320 | * @tbl: the &struct bucket_table | |
321 | * @hash: the hash value / bucket index | |
7e1e7763 | 322 | */ |
88d6ed15 TG |
323 | #define rht_for_each_continue(pos, head, tbl, hash) \ |
324 | for (pos = rht_dereference_bucket(head, tbl, hash); \ | |
f89bd6f8 | 325 | !rht_is_a_nulls(pos); \ |
88d6ed15 TG |
326 | pos = rht_dereference_bucket((pos)->next, tbl, hash)) |
327 | ||
328 | /** | |
329 | * rht_for_each - iterate over hash chain | |
330 | * @pos: the &struct rhash_head to use as a loop cursor. | |
331 | * @tbl: the &struct bucket_table | |
332 | * @hash: the hash value / bucket index | |
333 | */ | |
334 | #define rht_for_each(pos, tbl, hash) \ | |
335 | rht_for_each_continue(pos, (tbl)->buckets[hash], tbl, hash) | |
336 | ||
337 | /** | |
338 | * rht_for_each_entry_continue - continue iterating over hash chain | |
339 | * @tpos: the type * to use as a loop cursor. | |
340 | * @pos: the &struct rhash_head to use as a loop cursor. | |
341 | * @head: the previous &struct rhash_head to continue from | |
342 | * @tbl: the &struct bucket_table | |
343 | * @hash: the hash value / bucket index | |
344 | * @member: name of the &struct rhash_head within the hashable struct. | |
345 | */ | |
346 | #define rht_for_each_entry_continue(tpos, pos, head, tbl, hash, member) \ | |
347 | for (pos = rht_dereference_bucket(head, tbl, hash); \ | |
f89bd6f8 | 348 | (!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member); \ |
88d6ed15 | 349 | pos = rht_dereference_bucket((pos)->next, tbl, hash)) |
7e1e7763 TG |
350 | |
351 | /** | |
352 | * rht_for_each_entry - iterate over hash chain of given type | |
88d6ed15 TG |
353 | * @tpos: the type * to use as a loop cursor. |
354 | * @pos: the &struct rhash_head to use as a loop cursor. | |
355 | * @tbl: the &struct bucket_table | |
356 | * @hash: the hash value / bucket index | |
357 | * @member: name of the &struct rhash_head within the hashable struct. | |
7e1e7763 | 358 | */ |
88d6ed15 TG |
359 | #define rht_for_each_entry(tpos, pos, tbl, hash, member) \ |
360 | rht_for_each_entry_continue(tpos, pos, (tbl)->buckets[hash], \ | |
361 | tbl, hash, member) | |
7e1e7763 TG |
362 | |
363 | /** | |
364 | * rht_for_each_entry_safe - safely iterate over hash chain of given type | |
88d6ed15 TG |
365 | * @tpos: the type * to use as a loop cursor. |
366 | * @pos: the &struct rhash_head to use as a loop cursor. | |
367 | * @next: the &struct rhash_head to use as next in loop cursor. | |
368 | * @tbl: the &struct bucket_table | |
369 | * @hash: the hash value / bucket index | |
370 | * @member: name of the &struct rhash_head within the hashable struct. | |
7e1e7763 TG |
371 | * |
372 | * This hash chain list-traversal primitive allows for the looped code to | |
373 | * remove the loop cursor from the list. | |
374 | */ | |
88d6ed15 TG |
375 | #define rht_for_each_entry_safe(tpos, pos, next, tbl, hash, member) \ |
376 | for (pos = rht_dereference_bucket((tbl)->buckets[hash], tbl, hash), \ | |
f89bd6f8 TG |
377 | next = !rht_is_a_nulls(pos) ? \ |
378 | rht_dereference_bucket(pos->next, tbl, hash) : NULL; \ | |
379 | (!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member); \ | |
607954b0 PM |
380 | pos = next, \ |
381 | next = !rht_is_a_nulls(pos) ? \ | |
382 | rht_dereference_bucket(pos->next, tbl, hash) : NULL) | |
88d6ed15 TG |
383 | |
384 | /** | |
385 | * rht_for_each_rcu_continue - continue iterating over rcu hash chain | |
386 | * @pos: the &struct rhash_head to use as a loop cursor. | |
387 | * @head: the previous &struct rhash_head to continue from | |
388 | * @tbl: the &struct bucket_table | |
389 | * @hash: the hash value / bucket index | |
390 | * | |
391 | * This hash chain list-traversal primitive may safely run concurrently with | |
392 | * the _rcu mutation primitives such as rhashtable_insert() as long as the | |
393 | * traversal is guarded by rcu_read_lock(). | |
394 | */ | |
395 | #define rht_for_each_rcu_continue(pos, head, tbl, hash) \ | |
396 | for (({barrier(); }), \ | |
397 | pos = rht_dereference_bucket_rcu(head, tbl, hash); \ | |
f89bd6f8 | 398 | !rht_is_a_nulls(pos); \ |
88d6ed15 | 399 | pos = rcu_dereference_raw(pos->next)) |
7e1e7763 TG |
400 | |
401 | /** | |
402 | * rht_for_each_rcu - iterate over rcu hash chain | |
88d6ed15 TG |
403 | * @pos: the &struct rhash_head to use as a loop cursor. |
404 | * @tbl: the &struct bucket_table | |
405 | * @hash: the hash value / bucket index | |
7e1e7763 TG |
406 | * |
407 | * This hash chain list-traversal primitive may safely run concurrently with | |
88d6ed15 | 408 | * the _rcu mutation primitives such as rhashtable_insert() as long as the |
7e1e7763 TG |
409 | * traversal is guarded by rcu_read_lock(). |
410 | */ | |
88d6ed15 TG |
411 | #define rht_for_each_rcu(pos, tbl, hash) \ |
412 | rht_for_each_rcu_continue(pos, (tbl)->buckets[hash], tbl, hash) | |
413 | ||
414 | /** | |
415 | * rht_for_each_entry_rcu_continue - continue iterating over rcu hash chain | |
416 | * @tpos: the type * to use as a loop cursor. | |
417 | * @pos: the &struct rhash_head to use as a loop cursor. | |
418 | * @head: the previous &struct rhash_head to continue from | |
419 | * @tbl: the &struct bucket_table | |
420 | * @hash: the hash value / bucket index | |
421 | * @member: name of the &struct rhash_head within the hashable struct. | |
422 | * | |
423 | * This hash chain list-traversal primitive may safely run concurrently with | |
424 | * the _rcu mutation primitives such as rhashtable_insert() as long as the | |
425 | * traversal is guarded by rcu_read_lock(). | |
426 | */ | |
427 | #define rht_for_each_entry_rcu_continue(tpos, pos, head, tbl, hash, member) \ | |
428 | for (({barrier(); }), \ | |
429 | pos = rht_dereference_bucket_rcu(head, tbl, hash); \ | |
f89bd6f8 | 430 | (!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member); \ |
88d6ed15 | 431 | pos = rht_dereference_bucket_rcu(pos->next, tbl, hash)) |
7e1e7763 TG |
432 | |
433 | /** | |
434 | * rht_for_each_entry_rcu - iterate over rcu hash chain of given type | |
88d6ed15 TG |
435 | * @tpos: the type * to use as a loop cursor. |
436 | * @pos: the &struct rhash_head to use as a loop cursor. | |
437 | * @tbl: the &struct bucket_table | |
438 | * @hash: the hash value / bucket index | |
439 | * @member: name of the &struct rhash_head within the hashable struct. | |
7e1e7763 TG |
440 | * |
441 | * This hash chain list-traversal primitive may safely run concurrently with | |
88d6ed15 | 442 | * the _rcu mutation primitives such as rhashtable_insert() as long as the |
7e1e7763 TG |
443 | * traversal is guarded by rcu_read_lock(). |
444 | */ | |
88d6ed15 TG |
445 | #define rht_for_each_entry_rcu(tpos, pos, tbl, hash, member) \ |
446 | rht_for_each_entry_rcu_continue(tpos, pos, (tbl)->buckets[hash],\ | |
447 | tbl, hash, member) | |
7e1e7763 | 448 | |
02fd97c3 HX |
449 | static inline int rhashtable_compare(struct rhashtable_compare_arg *arg, |
450 | const void *obj) | |
451 | { | |
452 | struct rhashtable *ht = arg->ht; | |
453 | const char *ptr = obj; | |
454 | ||
455 | return memcmp(ptr + ht->p.key_offset, arg->key, ht->p.key_len); | |
456 | } | |
457 | ||
458 | /** | |
459 | * rhashtable_lookup_fast - search hash table, inlined version | |
460 | * @ht: hash table | |
461 | * @key: the pointer to the key | |
462 | * @params: hash table parameters | |
463 | * | |
464 | * Computes the hash value for the key and traverses the bucket chain looking | |
465 | * for a entry with an identical key. The first matching entry is returned. | |
466 | * | |
467 | * Returns the first entry on which the compare function returned true. | |
468 | */ | |
469 | static inline void *rhashtable_lookup_fast( | |
470 | struct rhashtable *ht, const void *key, | |
471 | const struct rhashtable_params params) | |
472 | { | |
473 | struct rhashtable_compare_arg arg = { | |
474 | .ht = ht, | |
475 | .key = key, | |
476 | }; | |
477 | const struct bucket_table *tbl; | |
478 | struct rhash_head *he; | |
479 | unsigned hash; | |
480 | ||
481 | rcu_read_lock(); | |
482 | ||
483 | tbl = rht_dereference_rcu(ht->tbl, ht); | |
484 | restart: | |
485 | hash = rht_key_hashfn(ht, tbl, key, params); | |
486 | rht_for_each_rcu(he, tbl, hash) { | |
487 | if (params.obj_cmpfn ? | |
488 | params.obj_cmpfn(&arg, rht_obj(ht, he)) : | |
489 | rhashtable_compare(&arg, rht_obj(ht, he))) | |
490 | continue; | |
491 | rcu_read_unlock(); | |
492 | return rht_obj(ht, he); | |
493 | } | |
494 | ||
495 | /* Ensure we see any new tables. */ | |
496 | smp_rmb(); | |
497 | ||
498 | tbl = rht_dereference_rcu(tbl->future_tbl, ht); | |
499 | if (unlikely(tbl)) | |
500 | goto restart; | |
501 | rcu_read_unlock(); | |
502 | ||
503 | return NULL; | |
504 | } | |
505 | ||
506 | static inline int __rhashtable_insert_fast( | |
507 | struct rhashtable *ht, const void *key, struct rhash_head *obj, | |
508 | const struct rhashtable_params params) | |
509 | { | |
510 | struct rhashtable_compare_arg arg = { | |
511 | .ht = ht, | |
512 | .key = key, | |
513 | }; | |
514 | int err = -EEXIST; | |
515 | struct bucket_table *tbl, *new_tbl; | |
516 | struct rhash_head *head; | |
517 | spinlock_t *lock; | |
518 | unsigned hash; | |
519 | ||
520 | rcu_read_lock(); | |
521 | ||
522 | tbl = rht_dereference_rcu(ht->tbl, ht); | |
523 | hash = rht_head_hashfn(ht, tbl, obj, params); | |
524 | lock = rht_bucket_lock(tbl, hash); | |
525 | ||
526 | spin_lock_bh(lock); | |
527 | ||
528 | /* Because we have already taken the bucket lock in tbl, | |
529 | * if we find that future_tbl is not yet visible then | |
530 | * that guarantees all other insertions of the same entry | |
531 | * will also grab the bucket lock in tbl because until | |
532 | * the rehash completes ht->tbl won't be changed. | |
533 | */ | |
534 | new_tbl = rht_dereference_rcu(tbl->future_tbl, ht); | |
535 | if (unlikely(new_tbl)) { | |
536 | err = rhashtable_insert_slow(ht, key, obj, new_tbl); | |
537 | goto out; | |
538 | } | |
539 | ||
540 | if (!key) | |
541 | goto skip_lookup; | |
542 | ||
543 | rht_for_each(head, tbl, hash) { | |
544 | if (unlikely(!(params.obj_cmpfn ? | |
545 | params.obj_cmpfn(&arg, rht_obj(ht, head)) : | |
546 | rhashtable_compare(&arg, rht_obj(ht, head))))) | |
547 | goto out; | |
548 | } | |
549 | ||
550 | skip_lookup: | |
551 | err = 0; | |
552 | ||
553 | head = rht_dereference_bucket(tbl->buckets[hash], tbl, hash); | |
554 | ||
555 | RCU_INIT_POINTER(obj->next, head); | |
556 | ||
557 | rcu_assign_pointer(tbl->buckets[hash], obj); | |
558 | ||
559 | atomic_inc(&ht->nelems); | |
560 | if (rht_grow_above_75(ht, tbl)) | |
561 | schedule_work(&ht->run_work); | |
562 | ||
563 | out: | |
564 | spin_unlock_bh(lock); | |
565 | rcu_read_unlock(); | |
566 | ||
567 | return err; | |
568 | } | |
569 | ||
570 | /** | |
571 | * rhashtable_insert_fast - insert object into hash table | |
572 | * @ht: hash table | |
573 | * @obj: pointer to hash head inside object | |
574 | * @params: hash table parameters | |
575 | * | |
576 | * Will take a per bucket spinlock to protect against mutual mutations | |
577 | * on the same bucket. Multiple insertions may occur in parallel unless | |
578 | * they map to the same bucket lock. | |
579 | * | |
580 | * It is safe to call this function from atomic context. | |
581 | * | |
582 | * Will trigger an automatic deferred table resizing if the size grows | |
583 | * beyond the watermark indicated by grow_decision() which can be passed | |
584 | * to rhashtable_init(). | |
585 | */ | |
586 | static inline int rhashtable_insert_fast( | |
587 | struct rhashtable *ht, struct rhash_head *obj, | |
588 | const struct rhashtable_params params) | |
589 | { | |
590 | return __rhashtable_insert_fast(ht, NULL, obj, params); | |
591 | } | |
592 | ||
593 | /** | |
594 | * rhashtable_lookup_insert_fast - lookup and insert object into hash table | |
595 | * @ht: hash table | |
596 | * @obj: pointer to hash head inside object | |
597 | * @params: hash table parameters | |
598 | * | |
599 | * Locks down the bucket chain in both the old and new table if a resize | |
600 | * is in progress to ensure that writers can't remove from the old table | |
601 | * and can't insert to the new table during the atomic operation of search | |
602 | * and insertion. Searches for duplicates in both the old and new table if | |
603 | * a resize is in progress. | |
604 | * | |
605 | * This lookup function may only be used for fixed key hash table (key_len | |
606 | * parameter set). It will BUG() if used inappropriately. | |
607 | * | |
608 | * It is safe to call this function from atomic context. | |
609 | * | |
610 | * Will trigger an automatic deferred table resizing if the size grows | |
611 | * beyond the watermark indicated by grow_decision() which can be passed | |
612 | * to rhashtable_init(). | |
613 | */ | |
614 | static inline int rhashtable_lookup_insert_fast( | |
615 | struct rhashtable *ht, struct rhash_head *obj, | |
616 | const struct rhashtable_params params) | |
617 | { | |
618 | const char *key = rht_obj(ht, obj); | |
619 | ||
620 | BUG_ON(ht->p.obj_hashfn); | |
621 | ||
622 | return __rhashtable_insert_fast(ht, key + ht->p.key_offset, obj, | |
623 | params); | |
624 | } | |
625 | ||
626 | /** | |
627 | * rhashtable_lookup_insert_key - search and insert object to hash table | |
628 | * with explicit key | |
629 | * @ht: hash table | |
630 | * @key: key | |
631 | * @obj: pointer to hash head inside object | |
632 | * @params: hash table parameters | |
633 | * | |
634 | * Locks down the bucket chain in both the old and new table if a resize | |
635 | * is in progress to ensure that writers can't remove from the old table | |
636 | * and can't insert to the new table during the atomic operation of search | |
637 | * and insertion. Searches for duplicates in both the old and new table if | |
638 | * a resize is in progress. | |
639 | * | |
640 | * Lookups may occur in parallel with hashtable mutations and resizing. | |
641 | * | |
642 | * Will trigger an automatic deferred table resizing if the size grows | |
643 | * beyond the watermark indicated by grow_decision() which can be passed | |
644 | * to rhashtable_init(). | |
645 | * | |
646 | * Returns zero on success. | |
647 | */ | |
648 | static inline int rhashtable_lookup_insert_key( | |
649 | struct rhashtable *ht, const void *key, struct rhash_head *obj, | |
650 | const struct rhashtable_params params) | |
651 | { | |
652 | BUG_ON(!ht->p.obj_hashfn || !key); | |
653 | ||
654 | return __rhashtable_insert_fast(ht, key, obj, params); | |
655 | } | |
656 | ||
657 | static inline int __rhashtable_remove_fast( | |
658 | struct rhashtable *ht, struct bucket_table *tbl, | |
659 | struct rhash_head *obj, const struct rhashtable_params params) | |
660 | { | |
661 | struct rhash_head __rcu **pprev; | |
662 | struct rhash_head *he; | |
663 | spinlock_t * lock; | |
664 | unsigned hash; | |
665 | int err = -ENOENT; | |
666 | ||
667 | hash = rht_head_hashfn(ht, tbl, obj, params); | |
668 | lock = rht_bucket_lock(tbl, hash); | |
669 | ||
670 | spin_lock_bh(lock); | |
671 | ||
672 | pprev = &tbl->buckets[hash]; | |
673 | rht_for_each(he, tbl, hash) { | |
674 | if (he != obj) { | |
675 | pprev = &he->next; | |
676 | continue; | |
677 | } | |
678 | ||
679 | rcu_assign_pointer(*pprev, obj->next); | |
680 | err = 0; | |
681 | break; | |
682 | } | |
683 | ||
684 | spin_unlock_bh(lock); | |
685 | ||
686 | return err; | |
687 | } | |
688 | ||
689 | /** | |
690 | * rhashtable_remove_fast - remove object from hash table | |
691 | * @ht: hash table | |
692 | * @obj: pointer to hash head inside object | |
693 | * @params: hash table parameters | |
694 | * | |
695 | * Since the hash chain is single linked, the removal operation needs to | |
696 | * walk the bucket chain upon removal. The removal operation is thus | |
697 | * considerable slow if the hash table is not correctly sized. | |
698 | * | |
699 | * Will automatically shrink the table via rhashtable_expand() if the | |
700 | * shrink_decision function specified at rhashtable_init() returns true. | |
701 | * | |
702 | * Returns zero on success, -ENOENT if the entry could not be found. | |
703 | */ | |
704 | static inline int rhashtable_remove_fast( | |
705 | struct rhashtable *ht, struct rhash_head *obj, | |
706 | const struct rhashtable_params params) | |
707 | { | |
708 | struct bucket_table *tbl; | |
709 | int err; | |
710 | ||
711 | rcu_read_lock(); | |
712 | ||
713 | tbl = rht_dereference_rcu(ht->tbl, ht); | |
714 | ||
715 | /* Because we have already taken (and released) the bucket | |
716 | * lock in old_tbl, if we find that future_tbl is not yet | |
717 | * visible then that guarantees the entry to still be in | |
718 | * the old tbl if it exists. | |
719 | */ | |
720 | while ((err = __rhashtable_remove_fast(ht, tbl, obj, params)) && | |
721 | (tbl = rht_dereference_rcu(tbl->future_tbl, ht))) | |
722 | ; | |
723 | ||
724 | if (err) | |
725 | goto out; | |
726 | ||
727 | atomic_dec(&ht->nelems); | |
728 | if (rht_shrink_below_30(ht, tbl)) | |
729 | schedule_work(&ht->run_work); | |
730 | ||
731 | out: | |
732 | rcu_read_unlock(); | |
733 | ||
734 | return err; | |
735 | } | |
736 | ||
7e1e7763 | 737 | #endif /* _LINUX_RHASHTABLE_H */ |