]>
Commit | Line | Data |
---|---|---|
09c434b8 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
f9a61eb4 JK |
2 | #include <linux/spinlock.h> |
3 | #include <linux/slab.h> | |
4 | #include <linux/list.h> | |
5 | #include <linux/list_bl.h> | |
6 | #include <linux/module.h> | |
7 | #include <linux/sched.h> | |
c2f3140f | 8 | #include <linux/workqueue.h> |
7a2508e1 | 9 | #include <linux/mbcache.h> |
f9a61eb4 JK |
10 | |
11 | /* | |
12 | * Mbcache is a simple key-value store. Keys need not be unique, however | |
13 | * key-value pairs are expected to be unique (we use this fact in | |
1825f0ab | 14 | * mb_cache_entry_delete_or_get()). |
f9a61eb4 JK |
15 | * |
16 | * Ext2 and ext4 use this cache for deduplication of extended attribute blocks. | |
dec214d0 TE |
17 | * Ext4 also uses it for deduplication of xattr values stored in inodes. |
18 | * They use hash of data as a key and provide a value that may represent a | |
19 | * block or inode number. That's why keys need not be unique (hash of different | |
20 | * data may be the same). However user provided value always uniquely | |
21 | * identifies a cache entry. | |
f9a61eb4 JK |
22 | * |
23 | * We provide functions for creation and removal of entries, search by key, | |
24 | * and a special "delete entry with given key-value pair" operation. Fixed | |
25 | * size hash table is used for fast key lookups. | |
26 | */ | |
27 | ||
7a2508e1 | 28 | struct mb_cache { |
f9a61eb4 JK |
29 | /* Hash table of entries */ |
30 | struct hlist_bl_head *c_hash; | |
31 | /* log2 of hash table size */ | |
32 | int c_bucket_bits; | |
c2f3140f | 33 | /* Maximum entries in cache to avoid degrading hash too much */ |
132d4e2d | 34 | unsigned long c_max_entries; |
f0c8b462 JK |
35 | /* Protects c_list, c_entry_count */ |
36 | spinlock_t c_list_lock; | |
37 | struct list_head c_list; | |
f9a61eb4 JK |
38 | /* Number of entries in cache */ |
39 | unsigned long c_entry_count; | |
40 | struct shrinker c_shrink; | |
c2f3140f JK |
41 | /* Work for shrinking when the cache has too many entries */ |
42 | struct work_struct c_shrink_work; | |
f9a61eb4 JK |
43 | }; |
44 | ||
7a2508e1 | 45 | static struct kmem_cache *mb_entry_cache; |
f9a61eb4 | 46 | |
7a2508e1 | 47 | static unsigned long mb_cache_shrink(struct mb_cache *cache, |
132d4e2d | 48 | unsigned long nr_to_scan); |
c2f3140f | 49 | |
dc8d5e56 AG |
50 | static inline struct hlist_bl_head *mb_cache_entry_head(struct mb_cache *cache, |
51 | u32 key) | |
f0c8b462 | 52 | { |
dc8d5e56 | 53 | return &cache->c_hash[hash_32(key, cache->c_bucket_bits)]; |
f0c8b462 JK |
54 | } |
55 | ||
c2f3140f JK |
56 | /* |
57 | * Number of entries to reclaim synchronously when there are too many entries | |
58 | * in cache | |
59 | */ | |
60 | #define SYNC_SHRINK_BATCH 64 | |
61 | ||
f9a61eb4 | 62 | /* |
7a2508e1 | 63 | * mb_cache_entry_create - create entry in cache |
f9a61eb4 JK |
64 | * @cache - cache where the entry should be created |
65 | * @mask - gfp mask with which the entry should be allocated | |
66 | * @key - key of the entry | |
c07dfcb4 TE |
67 | * @value - value of the entry |
68 | * @reusable - is the entry reusable by others? | |
f9a61eb4 | 69 | * |
c07dfcb4 TE |
70 | * Creates entry in @cache with key @key and value @value. The function returns |
71 | * -EBUSY if entry with the same key and value already exists in cache. | |
72 | * Otherwise 0 is returned. | |
f9a61eb4 | 73 | */ |
7a2508e1 | 74 | int mb_cache_entry_create(struct mb_cache *cache, gfp_t mask, u32 key, |
c07dfcb4 | 75 | u64 value, bool reusable) |
f9a61eb4 | 76 | { |
7a2508e1 | 77 | struct mb_cache_entry *entry, *dup; |
f9a61eb4 JK |
78 | struct hlist_bl_node *dup_node; |
79 | struct hlist_bl_head *head; | |
80 | ||
c2f3140f JK |
81 | /* Schedule background reclaim if there are too many entries */ |
82 | if (cache->c_entry_count >= cache->c_max_entries) | |
83 | schedule_work(&cache->c_shrink_work); | |
84 | /* Do some sync reclaim if background reclaim cannot keep up */ | |
85 | if (cache->c_entry_count >= 2*cache->c_max_entries) | |
7a2508e1 | 86 | mb_cache_shrink(cache, SYNC_SHRINK_BATCH); |
c2f3140f | 87 | |
7a2508e1 | 88 | entry = kmem_cache_alloc(mb_entry_cache, mask); |
f9a61eb4 JK |
89 | if (!entry) |
90 | return -ENOMEM; | |
91 | ||
f0c8b462 | 92 | INIT_LIST_HEAD(&entry->e_list); |
f9a61eb4 JK |
93 | /* One ref for hash, one ref returned */ |
94 | atomic_set(&entry->e_refcnt, 1); | |
95 | entry->e_key = key; | |
c07dfcb4 | 96 | entry->e_value = value; |
6048c64b | 97 | entry->e_reusable = reusable; |
3876bbe2 | 98 | entry->e_referenced = 0; |
dc8d5e56 | 99 | head = mb_cache_entry_head(cache, key); |
f9a61eb4 JK |
100 | hlist_bl_lock(head); |
101 | hlist_bl_for_each_entry(dup, dup_node, head, e_hash_list) { | |
c07dfcb4 | 102 | if (dup->e_key == key && dup->e_value == value) { |
f9a61eb4 | 103 | hlist_bl_unlock(head); |
7a2508e1 | 104 | kmem_cache_free(mb_entry_cache, entry); |
f9a61eb4 JK |
105 | return -EBUSY; |
106 | } | |
107 | } | |
108 | hlist_bl_add_head(&entry->e_hash_list, head); | |
109 | hlist_bl_unlock(head); | |
110 | ||
f0c8b462 JK |
111 | spin_lock(&cache->c_list_lock); |
112 | list_add_tail(&entry->e_list, &cache->c_list); | |
f9a61eb4 JK |
113 | /* Grab ref for LRU list */ |
114 | atomic_inc(&entry->e_refcnt); | |
115 | cache->c_entry_count++; | |
f0c8b462 | 116 | spin_unlock(&cache->c_list_lock); |
f9a61eb4 JK |
117 | |
118 | return 0; | |
119 | } | |
7a2508e1 | 120 | EXPORT_SYMBOL(mb_cache_entry_create); |
f9a61eb4 | 121 | |
7a2508e1 | 122 | void __mb_cache_entry_free(struct mb_cache_entry *entry) |
f9a61eb4 | 123 | { |
7a2508e1 | 124 | kmem_cache_free(mb_entry_cache, entry); |
f9a61eb4 | 125 | } |
7a2508e1 | 126 | EXPORT_SYMBOL(__mb_cache_entry_free); |
f9a61eb4 | 127 | |
1825f0ab JK |
128 | /* |
129 | * mb_cache_entry_wait_unused - wait to be the last user of the entry | |
130 | * | |
131 | * @entry - entry to work on | |
132 | * | |
133 | * Wait to be the last user of the entry. | |
134 | */ | |
135 | void mb_cache_entry_wait_unused(struct mb_cache_entry *entry) | |
136 | { | |
137 | wait_var_event(&entry->e_refcnt, atomic_read(&entry->e_refcnt) <= 3); | |
138 | } | |
139 | EXPORT_SYMBOL(mb_cache_entry_wait_unused); | |
140 | ||
7a2508e1 JK |
141 | static struct mb_cache_entry *__entry_find(struct mb_cache *cache, |
142 | struct mb_cache_entry *entry, | |
143 | u32 key) | |
f9a61eb4 | 144 | { |
7a2508e1 | 145 | struct mb_cache_entry *old_entry = entry; |
f9a61eb4 JK |
146 | struct hlist_bl_node *node; |
147 | struct hlist_bl_head *head; | |
148 | ||
dc8d5e56 | 149 | head = mb_cache_entry_head(cache, key); |
f9a61eb4 JK |
150 | hlist_bl_lock(head); |
151 | if (entry && !hlist_bl_unhashed(&entry->e_hash_list)) | |
152 | node = entry->e_hash_list.next; | |
153 | else | |
154 | node = hlist_bl_first(head); | |
155 | while (node) { | |
7a2508e1 | 156 | entry = hlist_bl_entry(node, struct mb_cache_entry, |
f9a61eb4 | 157 | e_hash_list); |
6048c64b | 158 | if (entry->e_key == key && entry->e_reusable) { |
f9a61eb4 JK |
159 | atomic_inc(&entry->e_refcnt); |
160 | goto out; | |
161 | } | |
162 | node = node->next; | |
163 | } | |
164 | entry = NULL; | |
165 | out: | |
166 | hlist_bl_unlock(head); | |
167 | if (old_entry) | |
7a2508e1 | 168 | mb_cache_entry_put(cache, old_entry); |
f9a61eb4 JK |
169 | |
170 | return entry; | |
171 | } | |
172 | ||
173 | /* | |
b649668c | 174 | * mb_cache_entry_find_first - find the first reusable entry with the given key |
f9a61eb4 JK |
175 | * @cache: cache where we should search |
176 | * @key: key to look for | |
177 | * | |
b649668c EB |
178 | * Search in @cache for a reusable entry with key @key. Grabs reference to the |
179 | * first reusable entry found and returns the entry. | |
f9a61eb4 | 180 | */ |
7a2508e1 JK |
181 | struct mb_cache_entry *mb_cache_entry_find_first(struct mb_cache *cache, |
182 | u32 key) | |
f9a61eb4 JK |
183 | { |
184 | return __entry_find(cache, NULL, key); | |
185 | } | |
7a2508e1 | 186 | EXPORT_SYMBOL(mb_cache_entry_find_first); |
f9a61eb4 JK |
187 | |
188 | /* | |
b649668c | 189 | * mb_cache_entry_find_next - find next reusable entry with the same key |
f9a61eb4 JK |
190 | * @cache: cache where we should search |
191 | * @entry: entry to start search from | |
192 | * | |
b649668c EB |
193 | * Finds next reusable entry in the hash chain which has the same key as @entry. |
194 | * If @entry is unhashed (which can happen when deletion of entry races with the | |
195 | * search), finds the first reusable entry in the hash chain. The function drops | |
196 | * reference to @entry and returns with a reference to the found entry. | |
f9a61eb4 | 197 | */ |
7a2508e1 JK |
198 | struct mb_cache_entry *mb_cache_entry_find_next(struct mb_cache *cache, |
199 | struct mb_cache_entry *entry) | |
f9a61eb4 JK |
200 | { |
201 | return __entry_find(cache, entry, entry->e_key); | |
202 | } | |
7a2508e1 | 203 | EXPORT_SYMBOL(mb_cache_entry_find_next); |
f9a61eb4 | 204 | |
6048c64b | 205 | /* |
c07dfcb4 | 206 | * mb_cache_entry_get - get a cache entry by value (and key) |
6048c64b | 207 | * @cache - cache we work with |
c07dfcb4 TE |
208 | * @key - key |
209 | * @value - value | |
6048c64b AG |
210 | */ |
211 | struct mb_cache_entry *mb_cache_entry_get(struct mb_cache *cache, u32 key, | |
c07dfcb4 | 212 | u64 value) |
6048c64b AG |
213 | { |
214 | struct hlist_bl_node *node; | |
215 | struct hlist_bl_head *head; | |
216 | struct mb_cache_entry *entry; | |
217 | ||
218 | head = mb_cache_entry_head(cache, key); | |
219 | hlist_bl_lock(head); | |
220 | hlist_bl_for_each_entry(entry, node, head, e_hash_list) { | |
c07dfcb4 | 221 | if (entry->e_key == key && entry->e_value == value) { |
6048c64b AG |
222 | atomic_inc(&entry->e_refcnt); |
223 | goto out; | |
224 | } | |
225 | } | |
226 | entry = NULL; | |
227 | out: | |
228 | hlist_bl_unlock(head); | |
229 | return entry; | |
230 | } | |
231 | EXPORT_SYMBOL(mb_cache_entry_get); | |
232 | ||
1825f0ab | 233 | /* mb_cache_entry_delete - try to remove a cache entry |
f9a61eb4 | 234 | * @cache - cache we work with |
c07dfcb4 TE |
235 | * @key - key |
236 | * @value - value | |
f9a61eb4 | 237 | * |
c07dfcb4 | 238 | * Remove entry from cache @cache with key @key and value @value. |
f9a61eb4 | 239 | */ |
c07dfcb4 | 240 | void mb_cache_entry_delete(struct mb_cache *cache, u32 key, u64 value) |
f9a61eb4 JK |
241 | { |
242 | struct hlist_bl_node *node; | |
243 | struct hlist_bl_head *head; | |
7a2508e1 | 244 | struct mb_cache_entry *entry; |
f9a61eb4 | 245 | |
dc8d5e56 | 246 | head = mb_cache_entry_head(cache, key); |
f9a61eb4 JK |
247 | hlist_bl_lock(head); |
248 | hlist_bl_for_each_entry(entry, node, head, e_hash_list) { | |
c07dfcb4 | 249 | if (entry->e_key == key && entry->e_value == value) { |
f9a61eb4 JK |
250 | /* We keep hash list reference to keep entry alive */ |
251 | hlist_bl_del_init(&entry->e_hash_list); | |
252 | hlist_bl_unlock(head); | |
f0c8b462 JK |
253 | spin_lock(&cache->c_list_lock); |
254 | if (!list_empty(&entry->e_list)) { | |
255 | list_del_init(&entry->e_list); | |
9ee93ba3 JB |
256 | if (!WARN_ONCE(cache->c_entry_count == 0, |
257 | "mbcache: attempt to decrement c_entry_count past zero")) | |
258 | cache->c_entry_count--; | |
f9a61eb4 JK |
259 | atomic_dec(&entry->e_refcnt); |
260 | } | |
f0c8b462 | 261 | spin_unlock(&cache->c_list_lock); |
7a2508e1 | 262 | mb_cache_entry_put(cache, entry); |
f9a61eb4 JK |
263 | return; |
264 | } | |
265 | } | |
266 | hlist_bl_unlock(head); | |
267 | } | |
c07dfcb4 | 268 | EXPORT_SYMBOL(mb_cache_entry_delete); |
f9a61eb4 | 269 | |
1825f0ab JK |
270 | /* mb_cache_entry_delete_or_get - remove a cache entry if it has no users |
271 | * @cache - cache we work with | |
272 | * @key - key | |
273 | * @value - value | |
274 | * | |
275 | * Remove entry from cache @cache with key @key and value @value. The removal | |
276 | * happens only if the entry is unused. The function returns NULL in case the | |
277 | * entry was successfully removed or there's no entry in cache. Otherwise the | |
278 | * function grabs reference of the entry that we failed to delete because it | |
279 | * still has users and return it. | |
280 | */ | |
281 | struct mb_cache_entry *mb_cache_entry_delete_or_get(struct mb_cache *cache, | |
282 | u32 key, u64 value) | |
283 | { | |
284 | struct hlist_bl_node *node; | |
285 | struct hlist_bl_head *head; | |
286 | struct mb_cache_entry *entry; | |
287 | ||
288 | head = mb_cache_entry_head(cache, key); | |
289 | hlist_bl_lock(head); | |
290 | hlist_bl_for_each_entry(entry, node, head, e_hash_list) { | |
291 | if (entry->e_key == key && entry->e_value == value) { | |
292 | if (atomic_read(&entry->e_refcnt) > 2) { | |
293 | atomic_inc(&entry->e_refcnt); | |
294 | hlist_bl_unlock(head); | |
295 | return entry; | |
296 | } | |
297 | /* We keep hash list reference to keep entry alive */ | |
298 | hlist_bl_del_init(&entry->e_hash_list); | |
299 | hlist_bl_unlock(head); | |
300 | spin_lock(&cache->c_list_lock); | |
301 | if (!list_empty(&entry->e_list)) { | |
302 | list_del_init(&entry->e_list); | |
303 | if (!WARN_ONCE(cache->c_entry_count == 0, | |
304 | "mbcache: attempt to decrement c_entry_count past zero")) | |
305 | cache->c_entry_count--; | |
306 | atomic_dec(&entry->e_refcnt); | |
307 | } | |
308 | spin_unlock(&cache->c_list_lock); | |
309 | mb_cache_entry_put(cache, entry); | |
310 | return NULL; | |
311 | } | |
312 | } | |
313 | hlist_bl_unlock(head); | |
314 | ||
315 | return NULL; | |
316 | } | |
317 | EXPORT_SYMBOL(mb_cache_entry_delete_or_get); | |
318 | ||
7a2508e1 | 319 | /* mb_cache_entry_touch - cache entry got used |
f9a61eb4 JK |
320 | * @cache - cache the entry belongs to |
321 | * @entry - entry that got used | |
322 | * | |
f0c8b462 | 323 | * Marks entry as used to give hit higher chances of surviving in cache. |
f9a61eb4 | 324 | */ |
7a2508e1 JK |
325 | void mb_cache_entry_touch(struct mb_cache *cache, |
326 | struct mb_cache_entry *entry) | |
f9a61eb4 | 327 | { |
dc8d5e56 | 328 | entry->e_referenced = 1; |
f9a61eb4 | 329 | } |
7a2508e1 | 330 | EXPORT_SYMBOL(mb_cache_entry_touch); |
f9a61eb4 | 331 | |
7a2508e1 JK |
332 | static unsigned long mb_cache_count(struct shrinker *shrink, |
333 | struct shrink_control *sc) | |
f9a61eb4 | 334 | { |
7a2508e1 JK |
335 | struct mb_cache *cache = container_of(shrink, struct mb_cache, |
336 | c_shrink); | |
f9a61eb4 JK |
337 | |
338 | return cache->c_entry_count; | |
339 | } | |
340 | ||
341 | /* Shrink number of entries in cache */ | |
7a2508e1 | 342 | static unsigned long mb_cache_shrink(struct mb_cache *cache, |
132d4e2d | 343 | unsigned long nr_to_scan) |
f9a61eb4 | 344 | { |
7a2508e1 | 345 | struct mb_cache_entry *entry; |
f9a61eb4 | 346 | struct hlist_bl_head *head; |
132d4e2d | 347 | unsigned long shrunk = 0; |
f9a61eb4 | 348 | |
f0c8b462 JK |
349 | spin_lock(&cache->c_list_lock); |
350 | while (nr_to_scan-- && !list_empty(&cache->c_list)) { | |
351 | entry = list_first_entry(&cache->c_list, | |
7a2508e1 | 352 | struct mb_cache_entry, e_list); |
88b35d4f | 353 | if (entry->e_referenced || atomic_read(&entry->e_refcnt) > 2) { |
dc8d5e56 | 354 | entry->e_referenced = 0; |
918b7306 | 355 | list_move_tail(&entry->e_list, &cache->c_list); |
f0c8b462 JK |
356 | continue; |
357 | } | |
358 | list_del_init(&entry->e_list); | |
f9a61eb4 JK |
359 | cache->c_entry_count--; |
360 | /* | |
361 | * We keep LRU list reference so that entry doesn't go away | |
362 | * from under us. | |
363 | */ | |
f0c8b462 | 364 | spin_unlock(&cache->c_list_lock); |
dc8d5e56 | 365 | head = mb_cache_entry_head(cache, entry->e_key); |
f9a61eb4 | 366 | hlist_bl_lock(head); |
88b35d4f JK |
367 | /* Now a reliable check if the entry didn't get used... */ |
368 | if (atomic_read(&entry->e_refcnt) > 2) { | |
369 | hlist_bl_unlock(head); | |
370 | spin_lock(&cache->c_list_lock); | |
371 | list_add_tail(&entry->e_list, &cache->c_list); | |
372 | cache->c_entry_count++; | |
373 | continue; | |
374 | } | |
f9a61eb4 JK |
375 | if (!hlist_bl_unhashed(&entry->e_hash_list)) { |
376 | hlist_bl_del_init(&entry->e_hash_list); | |
377 | atomic_dec(&entry->e_refcnt); | |
378 | } | |
379 | hlist_bl_unlock(head); | |
7a2508e1 | 380 | if (mb_cache_entry_put(cache, entry)) |
f9a61eb4 JK |
381 | shrunk++; |
382 | cond_resched(); | |
f0c8b462 | 383 | spin_lock(&cache->c_list_lock); |
f9a61eb4 | 384 | } |
f0c8b462 | 385 | spin_unlock(&cache->c_list_lock); |
f9a61eb4 JK |
386 | |
387 | return shrunk; | |
388 | } | |
389 | ||
7a2508e1 JK |
390 | static unsigned long mb_cache_scan(struct shrinker *shrink, |
391 | struct shrink_control *sc) | |
c2f3140f | 392 | { |
7a2508e1 | 393 | struct mb_cache *cache = container_of(shrink, struct mb_cache, |
c2f3140f | 394 | c_shrink); |
132d4e2d | 395 | return mb_cache_shrink(cache, sc->nr_to_scan); |
c2f3140f JK |
396 | } |
397 | ||
398 | /* We shrink 1/X of the cache when we have too many entries in it */ | |
399 | #define SHRINK_DIVISOR 16 | |
400 | ||
7a2508e1 | 401 | static void mb_cache_shrink_worker(struct work_struct *work) |
c2f3140f | 402 | { |
7a2508e1 JK |
403 | struct mb_cache *cache = container_of(work, struct mb_cache, |
404 | c_shrink_work); | |
405 | mb_cache_shrink(cache, cache->c_max_entries / SHRINK_DIVISOR); | |
c2f3140f JK |
406 | } |
407 | ||
f9a61eb4 | 408 | /* |
7a2508e1 | 409 | * mb_cache_create - create cache |
f9a61eb4 JK |
410 | * @bucket_bits: log2 of the hash table size |
411 | * | |
412 | * Create cache for keys with 2^bucket_bits hash entries. | |
413 | */ | |
7a2508e1 | 414 | struct mb_cache *mb_cache_create(int bucket_bits) |
f9a61eb4 | 415 | { |
7a2508e1 | 416 | struct mb_cache *cache; |
132d4e2d EB |
417 | unsigned long bucket_count = 1UL << bucket_bits; |
418 | unsigned long i; | |
f9a61eb4 | 419 | |
7a2508e1 | 420 | cache = kzalloc(sizeof(struct mb_cache), GFP_KERNEL); |
f9a61eb4 JK |
421 | if (!cache) |
422 | goto err_out; | |
423 | cache->c_bucket_bits = bucket_bits; | |
c2f3140f | 424 | cache->c_max_entries = bucket_count << 4; |
f0c8b462 JK |
425 | INIT_LIST_HEAD(&cache->c_list); |
426 | spin_lock_init(&cache->c_list_lock); | |
6da2ec56 KC |
427 | cache->c_hash = kmalloc_array(bucket_count, |
428 | sizeof(struct hlist_bl_head), | |
429 | GFP_KERNEL); | |
f9a61eb4 JK |
430 | if (!cache->c_hash) { |
431 | kfree(cache); | |
432 | goto err_out; | |
433 | } | |
434 | for (i = 0; i < bucket_count; i++) | |
435 | INIT_HLIST_BL_HEAD(&cache->c_hash[i]); | |
436 | ||
7a2508e1 JK |
437 | cache->c_shrink.count_objects = mb_cache_count; |
438 | cache->c_shrink.scan_objects = mb_cache_scan; | |
f9a61eb4 | 439 | cache->c_shrink.seeks = DEFAULT_SEEKS; |
8913f343 CY |
440 | if (register_shrinker(&cache->c_shrink)) { |
441 | kfree(cache->c_hash); | |
442 | kfree(cache); | |
443 | goto err_out; | |
444 | } | |
f9a61eb4 | 445 | |
7a2508e1 | 446 | INIT_WORK(&cache->c_shrink_work, mb_cache_shrink_worker); |
c2f3140f | 447 | |
f9a61eb4 JK |
448 | return cache; |
449 | ||
450 | err_out: | |
f9a61eb4 JK |
451 | return NULL; |
452 | } | |
7a2508e1 | 453 | EXPORT_SYMBOL(mb_cache_create); |
f9a61eb4 JK |
454 | |
455 | /* | |
7a2508e1 | 456 | * mb_cache_destroy - destroy cache |
f9a61eb4 JK |
457 | * @cache: the cache to destroy |
458 | * | |
459 | * Free all entries in cache and cache itself. Caller must make sure nobody | |
460 | * (except shrinker) can reach @cache when calling this. | |
461 | */ | |
7a2508e1 | 462 | void mb_cache_destroy(struct mb_cache *cache) |
f9a61eb4 | 463 | { |
7a2508e1 | 464 | struct mb_cache_entry *entry, *next; |
f9a61eb4 JK |
465 | |
466 | unregister_shrinker(&cache->c_shrink); | |
467 | ||
468 | /* | |
469 | * We don't bother with any locking. Cache must not be used at this | |
470 | * point. | |
471 | */ | |
f0c8b462 | 472 | list_for_each_entry_safe(entry, next, &cache->c_list, e_list) { |
f9a61eb4 JK |
473 | if (!hlist_bl_unhashed(&entry->e_hash_list)) { |
474 | hlist_bl_del_init(&entry->e_hash_list); | |
475 | atomic_dec(&entry->e_refcnt); | |
476 | } else | |
477 | WARN_ON(1); | |
f0c8b462 | 478 | list_del(&entry->e_list); |
f9a61eb4 | 479 | WARN_ON(atomic_read(&entry->e_refcnt) != 1); |
7a2508e1 | 480 | mb_cache_entry_put(cache, entry); |
f9a61eb4 JK |
481 | } |
482 | kfree(cache->c_hash); | |
483 | kfree(cache); | |
f9a61eb4 | 484 | } |
7a2508e1 | 485 | EXPORT_SYMBOL(mb_cache_destroy); |
f9a61eb4 | 486 | |
7a2508e1 | 487 | static int __init mbcache_init(void) |
f9a61eb4 | 488 | { |
7a2508e1 JK |
489 | mb_entry_cache = kmem_cache_create("mbcache", |
490 | sizeof(struct mb_cache_entry), 0, | |
f9a61eb4 | 491 | SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD, NULL); |
21d0f4fa EB |
492 | if (!mb_entry_cache) |
493 | return -ENOMEM; | |
f9a61eb4 JK |
494 | return 0; |
495 | } | |
496 | ||
7a2508e1 | 497 | static void __exit mbcache_exit(void) |
f9a61eb4 | 498 | { |
7a2508e1 | 499 | kmem_cache_destroy(mb_entry_cache); |
f9a61eb4 JK |
500 | } |
501 | ||
7a2508e1 JK |
502 | module_init(mbcache_init) |
503 | module_exit(mbcache_exit) | |
f9a61eb4 JK |
504 | |
505 | MODULE_AUTHOR("Jan Kara <jack@suse.cz>"); | |
506 | MODULE_DESCRIPTION("Meta block cache (for extended attributes)"); | |
507 | MODULE_LICENSE("GPL"); |