]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - fs/mbcache.c
nilfs2: fix deadlock in nilfs_count_free_blocks()
[mirror_ubuntu-jammy-kernel.git] / fs / mbcache.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <linux/spinlock.h>
3 #include <linux/slab.h>
4 #include <linux/list.h>
5 #include <linux/list_bl.h>
6 #include <linux/module.h>
7 #include <linux/sched.h>
8 #include <linux/workqueue.h>
9 #include <linux/mbcache.h>
10
11 /*
12 * Mbcache is a simple key-value store. Keys need not be unique, however
13 * key-value pairs are expected to be unique (we use this fact in
14 * mb_cache_entry_delete_or_get()).
15 *
16 * Ext2 and ext4 use this cache for deduplication of extended attribute blocks.
17 * Ext4 also uses it for deduplication of xattr values stored in inodes.
18 * They use hash of data as a key and provide a value that may represent a
19 * block or inode number. That's why keys need not be unique (hash of different
20 * data may be the same). However user provided value always uniquely
21 * identifies a cache entry.
22 *
23 * We provide functions for creation and removal of entries, search by key,
24 * and a special "delete entry with given key-value pair" operation. Fixed
25 * size hash table is used for fast key lookups.
26 */
27
28 struct mb_cache {
29 /* Hash table of entries */
30 struct hlist_bl_head *c_hash;
31 /* log2 of hash table size */
32 int c_bucket_bits;
33 /* Maximum entries in cache to avoid degrading hash too much */
34 unsigned long c_max_entries;
35 /* Protects c_list, c_entry_count */
36 spinlock_t c_list_lock;
37 struct list_head c_list;
38 /* Number of entries in cache */
39 unsigned long c_entry_count;
40 struct shrinker c_shrink;
41 /* Work for shrinking when the cache has too many entries */
42 struct work_struct c_shrink_work;
43 };
44
45 static struct kmem_cache *mb_entry_cache;
46
47 static unsigned long mb_cache_shrink(struct mb_cache *cache,
48 unsigned long nr_to_scan);
49
50 static inline struct hlist_bl_head *mb_cache_entry_head(struct mb_cache *cache,
51 u32 key)
52 {
53 return &cache->c_hash[hash_32(key, cache->c_bucket_bits)];
54 }
55
56 /*
57 * Number of entries to reclaim synchronously when there are too many entries
58 * in cache
59 */
60 #define SYNC_SHRINK_BATCH 64
61
62 /*
63 * mb_cache_entry_create - create entry in cache
64 * @cache - cache where the entry should be created
65 * @mask - gfp mask with which the entry should be allocated
66 * @key - key of the entry
67 * @value - value of the entry
68 * @reusable - is the entry reusable by others?
69 *
70 * Creates entry in @cache with key @key and value @value. The function returns
71 * -EBUSY if entry with the same key and value already exists in cache.
72 * Otherwise 0 is returned.
73 */
74 int mb_cache_entry_create(struct mb_cache *cache, gfp_t mask, u32 key,
75 u64 value, bool reusable)
76 {
77 struct mb_cache_entry *entry, *dup;
78 struct hlist_bl_node *dup_node;
79 struct hlist_bl_head *head;
80
81 /* Schedule background reclaim if there are too many entries */
82 if (cache->c_entry_count >= cache->c_max_entries)
83 schedule_work(&cache->c_shrink_work);
84 /* Do some sync reclaim if background reclaim cannot keep up */
85 if (cache->c_entry_count >= 2*cache->c_max_entries)
86 mb_cache_shrink(cache, SYNC_SHRINK_BATCH);
87
88 entry = kmem_cache_alloc(mb_entry_cache, mask);
89 if (!entry)
90 return -ENOMEM;
91
92 INIT_LIST_HEAD(&entry->e_list);
93 /* One ref for hash, one ref returned */
94 atomic_set(&entry->e_refcnt, 1);
95 entry->e_key = key;
96 entry->e_value = value;
97 entry->e_reusable = reusable;
98 entry->e_referenced = 0;
99 head = mb_cache_entry_head(cache, key);
100 hlist_bl_lock(head);
101 hlist_bl_for_each_entry(dup, dup_node, head, e_hash_list) {
102 if (dup->e_key == key && dup->e_value == value) {
103 hlist_bl_unlock(head);
104 kmem_cache_free(mb_entry_cache, entry);
105 return -EBUSY;
106 }
107 }
108 hlist_bl_add_head(&entry->e_hash_list, head);
109 hlist_bl_unlock(head);
110
111 spin_lock(&cache->c_list_lock);
112 list_add_tail(&entry->e_list, &cache->c_list);
113 /* Grab ref for LRU list */
114 atomic_inc(&entry->e_refcnt);
115 cache->c_entry_count++;
116 spin_unlock(&cache->c_list_lock);
117
118 return 0;
119 }
120 EXPORT_SYMBOL(mb_cache_entry_create);
121
122 void __mb_cache_entry_free(struct mb_cache_entry *entry)
123 {
124 kmem_cache_free(mb_entry_cache, entry);
125 }
126 EXPORT_SYMBOL(__mb_cache_entry_free);
127
128 /*
129 * mb_cache_entry_wait_unused - wait to be the last user of the entry
130 *
131 * @entry - entry to work on
132 *
133 * Wait to be the last user of the entry.
134 */
135 void mb_cache_entry_wait_unused(struct mb_cache_entry *entry)
136 {
137 wait_var_event(&entry->e_refcnt, atomic_read(&entry->e_refcnt) <= 3);
138 }
139 EXPORT_SYMBOL(mb_cache_entry_wait_unused);
140
141 static struct mb_cache_entry *__entry_find(struct mb_cache *cache,
142 struct mb_cache_entry *entry,
143 u32 key)
144 {
145 struct mb_cache_entry *old_entry = entry;
146 struct hlist_bl_node *node;
147 struct hlist_bl_head *head;
148
149 head = mb_cache_entry_head(cache, key);
150 hlist_bl_lock(head);
151 if (entry && !hlist_bl_unhashed(&entry->e_hash_list))
152 node = entry->e_hash_list.next;
153 else
154 node = hlist_bl_first(head);
155 while (node) {
156 entry = hlist_bl_entry(node, struct mb_cache_entry,
157 e_hash_list);
158 if (entry->e_key == key && entry->e_reusable) {
159 atomic_inc(&entry->e_refcnt);
160 goto out;
161 }
162 node = node->next;
163 }
164 entry = NULL;
165 out:
166 hlist_bl_unlock(head);
167 if (old_entry)
168 mb_cache_entry_put(cache, old_entry);
169
170 return entry;
171 }
172
173 /*
174 * mb_cache_entry_find_first - find the first reusable entry with the given key
175 * @cache: cache where we should search
176 * @key: key to look for
177 *
178 * Search in @cache for a reusable entry with key @key. Grabs reference to the
179 * first reusable entry found and returns the entry.
180 */
181 struct mb_cache_entry *mb_cache_entry_find_first(struct mb_cache *cache,
182 u32 key)
183 {
184 return __entry_find(cache, NULL, key);
185 }
186 EXPORT_SYMBOL(mb_cache_entry_find_first);
187
188 /*
189 * mb_cache_entry_find_next - find next reusable entry with the same key
190 * @cache: cache where we should search
191 * @entry: entry to start search from
192 *
193 * Finds next reusable entry in the hash chain which has the same key as @entry.
194 * If @entry is unhashed (which can happen when deletion of entry races with the
195 * search), finds the first reusable entry in the hash chain. The function drops
196 * reference to @entry and returns with a reference to the found entry.
197 */
198 struct mb_cache_entry *mb_cache_entry_find_next(struct mb_cache *cache,
199 struct mb_cache_entry *entry)
200 {
201 return __entry_find(cache, entry, entry->e_key);
202 }
203 EXPORT_SYMBOL(mb_cache_entry_find_next);
204
205 /*
206 * mb_cache_entry_get - get a cache entry by value (and key)
207 * @cache - cache we work with
208 * @key - key
209 * @value - value
210 */
211 struct mb_cache_entry *mb_cache_entry_get(struct mb_cache *cache, u32 key,
212 u64 value)
213 {
214 struct hlist_bl_node *node;
215 struct hlist_bl_head *head;
216 struct mb_cache_entry *entry;
217
218 head = mb_cache_entry_head(cache, key);
219 hlist_bl_lock(head);
220 hlist_bl_for_each_entry(entry, node, head, e_hash_list) {
221 if (entry->e_key == key && entry->e_value == value) {
222 atomic_inc(&entry->e_refcnt);
223 goto out;
224 }
225 }
226 entry = NULL;
227 out:
228 hlist_bl_unlock(head);
229 return entry;
230 }
231 EXPORT_SYMBOL(mb_cache_entry_get);
232
233 /* mb_cache_entry_delete - try to remove a cache entry
234 * @cache - cache we work with
235 * @key - key
236 * @value - value
237 *
238 * Remove entry from cache @cache with key @key and value @value.
239 */
240 void mb_cache_entry_delete(struct mb_cache *cache, u32 key, u64 value)
241 {
242 struct hlist_bl_node *node;
243 struct hlist_bl_head *head;
244 struct mb_cache_entry *entry;
245
246 head = mb_cache_entry_head(cache, key);
247 hlist_bl_lock(head);
248 hlist_bl_for_each_entry(entry, node, head, e_hash_list) {
249 if (entry->e_key == key && entry->e_value == value) {
250 /* We keep hash list reference to keep entry alive */
251 hlist_bl_del_init(&entry->e_hash_list);
252 hlist_bl_unlock(head);
253 spin_lock(&cache->c_list_lock);
254 if (!list_empty(&entry->e_list)) {
255 list_del_init(&entry->e_list);
256 if (!WARN_ONCE(cache->c_entry_count == 0,
257 "mbcache: attempt to decrement c_entry_count past zero"))
258 cache->c_entry_count--;
259 atomic_dec(&entry->e_refcnt);
260 }
261 spin_unlock(&cache->c_list_lock);
262 mb_cache_entry_put(cache, entry);
263 return;
264 }
265 }
266 hlist_bl_unlock(head);
267 }
268 EXPORT_SYMBOL(mb_cache_entry_delete);
269
270 /* mb_cache_entry_delete_or_get - remove a cache entry if it has no users
271 * @cache - cache we work with
272 * @key - key
273 * @value - value
274 *
275 * Remove entry from cache @cache with key @key and value @value. The removal
276 * happens only if the entry is unused. The function returns NULL in case the
277 * entry was successfully removed or there's no entry in cache. Otherwise the
278 * function grabs reference of the entry that we failed to delete because it
279 * still has users and return it.
280 */
281 struct mb_cache_entry *mb_cache_entry_delete_or_get(struct mb_cache *cache,
282 u32 key, u64 value)
283 {
284 struct hlist_bl_node *node;
285 struct hlist_bl_head *head;
286 struct mb_cache_entry *entry;
287
288 head = mb_cache_entry_head(cache, key);
289 hlist_bl_lock(head);
290 hlist_bl_for_each_entry(entry, node, head, e_hash_list) {
291 if (entry->e_key == key && entry->e_value == value) {
292 if (atomic_read(&entry->e_refcnt) > 2) {
293 atomic_inc(&entry->e_refcnt);
294 hlist_bl_unlock(head);
295 return entry;
296 }
297 /* We keep hash list reference to keep entry alive */
298 hlist_bl_del_init(&entry->e_hash_list);
299 hlist_bl_unlock(head);
300 spin_lock(&cache->c_list_lock);
301 if (!list_empty(&entry->e_list)) {
302 list_del_init(&entry->e_list);
303 if (!WARN_ONCE(cache->c_entry_count == 0,
304 "mbcache: attempt to decrement c_entry_count past zero"))
305 cache->c_entry_count--;
306 atomic_dec(&entry->e_refcnt);
307 }
308 spin_unlock(&cache->c_list_lock);
309 mb_cache_entry_put(cache, entry);
310 return NULL;
311 }
312 }
313 hlist_bl_unlock(head);
314
315 return NULL;
316 }
317 EXPORT_SYMBOL(mb_cache_entry_delete_or_get);
318
319 /* mb_cache_entry_touch - cache entry got used
320 * @cache - cache the entry belongs to
321 * @entry - entry that got used
322 *
323 * Marks entry as used to give hit higher chances of surviving in cache.
324 */
325 void mb_cache_entry_touch(struct mb_cache *cache,
326 struct mb_cache_entry *entry)
327 {
328 entry->e_referenced = 1;
329 }
330 EXPORT_SYMBOL(mb_cache_entry_touch);
331
332 static unsigned long mb_cache_count(struct shrinker *shrink,
333 struct shrink_control *sc)
334 {
335 struct mb_cache *cache = container_of(shrink, struct mb_cache,
336 c_shrink);
337
338 return cache->c_entry_count;
339 }
340
341 /* Shrink number of entries in cache */
342 static unsigned long mb_cache_shrink(struct mb_cache *cache,
343 unsigned long nr_to_scan)
344 {
345 struct mb_cache_entry *entry;
346 struct hlist_bl_head *head;
347 unsigned long shrunk = 0;
348
349 spin_lock(&cache->c_list_lock);
350 while (nr_to_scan-- && !list_empty(&cache->c_list)) {
351 entry = list_first_entry(&cache->c_list,
352 struct mb_cache_entry, e_list);
353 if (entry->e_referenced || atomic_read(&entry->e_refcnt) > 2) {
354 entry->e_referenced = 0;
355 list_move_tail(&entry->e_list, &cache->c_list);
356 continue;
357 }
358 list_del_init(&entry->e_list);
359 cache->c_entry_count--;
360 /*
361 * We keep LRU list reference so that entry doesn't go away
362 * from under us.
363 */
364 spin_unlock(&cache->c_list_lock);
365 head = mb_cache_entry_head(cache, entry->e_key);
366 hlist_bl_lock(head);
367 /* Now a reliable check if the entry didn't get used... */
368 if (atomic_read(&entry->e_refcnt) > 2) {
369 hlist_bl_unlock(head);
370 spin_lock(&cache->c_list_lock);
371 list_add_tail(&entry->e_list, &cache->c_list);
372 cache->c_entry_count++;
373 continue;
374 }
375 if (!hlist_bl_unhashed(&entry->e_hash_list)) {
376 hlist_bl_del_init(&entry->e_hash_list);
377 atomic_dec(&entry->e_refcnt);
378 }
379 hlist_bl_unlock(head);
380 if (mb_cache_entry_put(cache, entry))
381 shrunk++;
382 cond_resched();
383 spin_lock(&cache->c_list_lock);
384 }
385 spin_unlock(&cache->c_list_lock);
386
387 return shrunk;
388 }
389
390 static unsigned long mb_cache_scan(struct shrinker *shrink,
391 struct shrink_control *sc)
392 {
393 struct mb_cache *cache = container_of(shrink, struct mb_cache,
394 c_shrink);
395 return mb_cache_shrink(cache, sc->nr_to_scan);
396 }
397
398 /* We shrink 1/X of the cache when we have too many entries in it */
399 #define SHRINK_DIVISOR 16
400
401 static void mb_cache_shrink_worker(struct work_struct *work)
402 {
403 struct mb_cache *cache = container_of(work, struct mb_cache,
404 c_shrink_work);
405 mb_cache_shrink(cache, cache->c_max_entries / SHRINK_DIVISOR);
406 }
407
408 /*
409 * mb_cache_create - create cache
410 * @bucket_bits: log2 of the hash table size
411 *
412 * Create cache for keys with 2^bucket_bits hash entries.
413 */
414 struct mb_cache *mb_cache_create(int bucket_bits)
415 {
416 struct mb_cache *cache;
417 unsigned long bucket_count = 1UL << bucket_bits;
418 unsigned long i;
419
420 cache = kzalloc(sizeof(struct mb_cache), GFP_KERNEL);
421 if (!cache)
422 goto err_out;
423 cache->c_bucket_bits = bucket_bits;
424 cache->c_max_entries = bucket_count << 4;
425 INIT_LIST_HEAD(&cache->c_list);
426 spin_lock_init(&cache->c_list_lock);
427 cache->c_hash = kmalloc_array(bucket_count,
428 sizeof(struct hlist_bl_head),
429 GFP_KERNEL);
430 if (!cache->c_hash) {
431 kfree(cache);
432 goto err_out;
433 }
434 for (i = 0; i < bucket_count; i++)
435 INIT_HLIST_BL_HEAD(&cache->c_hash[i]);
436
437 cache->c_shrink.count_objects = mb_cache_count;
438 cache->c_shrink.scan_objects = mb_cache_scan;
439 cache->c_shrink.seeks = DEFAULT_SEEKS;
440 if (register_shrinker(&cache->c_shrink)) {
441 kfree(cache->c_hash);
442 kfree(cache);
443 goto err_out;
444 }
445
446 INIT_WORK(&cache->c_shrink_work, mb_cache_shrink_worker);
447
448 return cache;
449
450 err_out:
451 return NULL;
452 }
453 EXPORT_SYMBOL(mb_cache_create);
454
455 /*
456 * mb_cache_destroy - destroy cache
457 * @cache: the cache to destroy
458 *
459 * Free all entries in cache and cache itself. Caller must make sure nobody
460 * (except shrinker) can reach @cache when calling this.
461 */
462 void mb_cache_destroy(struct mb_cache *cache)
463 {
464 struct mb_cache_entry *entry, *next;
465
466 unregister_shrinker(&cache->c_shrink);
467
468 /*
469 * We don't bother with any locking. Cache must not be used at this
470 * point.
471 */
472 list_for_each_entry_safe(entry, next, &cache->c_list, e_list) {
473 if (!hlist_bl_unhashed(&entry->e_hash_list)) {
474 hlist_bl_del_init(&entry->e_hash_list);
475 atomic_dec(&entry->e_refcnt);
476 } else
477 WARN_ON(1);
478 list_del(&entry->e_list);
479 WARN_ON(atomic_read(&entry->e_refcnt) != 1);
480 mb_cache_entry_put(cache, entry);
481 }
482 kfree(cache->c_hash);
483 kfree(cache);
484 }
485 EXPORT_SYMBOL(mb_cache_destroy);
486
487 static int __init mbcache_init(void)
488 {
489 mb_entry_cache = kmem_cache_create("mbcache",
490 sizeof(struct mb_cache_entry), 0,
491 SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD, NULL);
492 if (!mb_entry_cache)
493 return -ENOMEM;
494 return 0;
495 }
496
497 static void __exit mbcache_exit(void)
498 {
499 kmem_cache_destroy(mb_entry_cache);
500 }
501
502 module_init(mbcache_init)
503 module_exit(mbcache_exit)
504
505 MODULE_AUTHOR("Jan Kara <jack@suse.cz>");
506 MODULE_DESCRIPTION("Meta block cache (for extended attributes)");
507 MODULE_LICENSE("GPL");