]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - fs/mbcache.c
UBUNTU: Ubuntu-4.13.0-45.50
[mirror_ubuntu-artful-kernel.git] / fs / mbcache.c
CommitLineData
f9a61eb4
JK
1#include <linux/spinlock.h>
2#include <linux/slab.h>
3#include <linux/list.h>
4#include <linux/list_bl.h>
5#include <linux/module.h>
6#include <linux/sched.h>
c2f3140f 7#include <linux/workqueue.h>
7a2508e1 8#include <linux/mbcache.h>
f9a61eb4
JK
9
10/*
11 * Mbcache is a simple key-value store. Keys need not be unique, however
12 * key-value pairs are expected to be unique (we use this fact in
c07dfcb4 13 * mb_cache_entry_delete()).
f9a61eb4
JK
14 *
15 * Ext2 and ext4 use this cache for deduplication of extended attribute blocks.
dec214d0
TE
16 * Ext4 also uses it for deduplication of xattr values stored in inodes.
17 * They use hash of data as a key and provide a value that may represent a
18 * block or inode number. That's why keys need not be unique (hash of different
19 * data may be the same). However user provided value always uniquely
20 * identifies a cache entry.
f9a61eb4
JK
21 *
22 * We provide functions for creation and removal of entries, search by key,
23 * and a special "delete entry with given key-value pair" operation. Fixed
24 * size hash table is used for fast key lookups.
25 */
26
7a2508e1 27struct mb_cache {
f9a61eb4
JK
28 /* Hash table of entries */
29 struct hlist_bl_head *c_hash;
30 /* log2 of hash table size */
31 int c_bucket_bits;
c2f3140f 32 /* Maximum entries in cache to avoid degrading hash too much */
132d4e2d 33 unsigned long c_max_entries;
f0c8b462
JK
34 /* Protects c_list, c_entry_count */
35 spinlock_t c_list_lock;
36 struct list_head c_list;
f9a61eb4
JK
37 /* Number of entries in cache */
38 unsigned long c_entry_count;
39 struct shrinker c_shrink;
c2f3140f
JK
40 /* Work for shrinking when the cache has too many entries */
41 struct work_struct c_shrink_work;
f9a61eb4
JK
42};
43
7a2508e1 44static struct kmem_cache *mb_entry_cache;
f9a61eb4 45
7a2508e1 46static unsigned long mb_cache_shrink(struct mb_cache *cache,
132d4e2d 47 unsigned long nr_to_scan);
c2f3140f 48
dc8d5e56
AG
49static inline struct hlist_bl_head *mb_cache_entry_head(struct mb_cache *cache,
50 u32 key)
f0c8b462 51{
dc8d5e56 52 return &cache->c_hash[hash_32(key, cache->c_bucket_bits)];
f0c8b462
JK
53}
54
c2f3140f
JK
55/*
56 * Number of entries to reclaim synchronously when there are too many entries
57 * in cache
58 */
59#define SYNC_SHRINK_BATCH 64
60
f9a61eb4 61/*
7a2508e1 62 * mb_cache_entry_create - create entry in cache
f9a61eb4
JK
63 * @cache - cache where the entry should be created
64 * @mask - gfp mask with which the entry should be allocated
65 * @key - key of the entry
c07dfcb4
TE
66 * @value - value of the entry
67 * @reusable - is the entry reusable by others?
f9a61eb4 68 *
c07dfcb4
TE
69 * Creates entry in @cache with key @key and value @value. The function returns
70 * -EBUSY if entry with the same key and value already exists in cache.
71 * Otherwise 0 is returned.
f9a61eb4 72 */
7a2508e1 73int mb_cache_entry_create(struct mb_cache *cache, gfp_t mask, u32 key,
c07dfcb4 74 u64 value, bool reusable)
f9a61eb4 75{
7a2508e1 76 struct mb_cache_entry *entry, *dup;
f9a61eb4
JK
77 struct hlist_bl_node *dup_node;
78 struct hlist_bl_head *head;
79
c2f3140f
JK
80 /* Schedule background reclaim if there are too many entries */
81 if (cache->c_entry_count >= cache->c_max_entries)
82 schedule_work(&cache->c_shrink_work);
83 /* Do some sync reclaim if background reclaim cannot keep up */
84 if (cache->c_entry_count >= 2*cache->c_max_entries)
7a2508e1 85 mb_cache_shrink(cache, SYNC_SHRINK_BATCH);
c2f3140f 86
7a2508e1 87 entry = kmem_cache_alloc(mb_entry_cache, mask);
f9a61eb4
JK
88 if (!entry)
89 return -ENOMEM;
90
f0c8b462 91 INIT_LIST_HEAD(&entry->e_list);
f9a61eb4
JK
92 /* One ref for hash, one ref returned */
93 atomic_set(&entry->e_refcnt, 1);
94 entry->e_key = key;
c07dfcb4 95 entry->e_value = value;
6048c64b 96 entry->e_reusable = reusable;
dc8d5e56 97 head = mb_cache_entry_head(cache, key);
f9a61eb4
JK
98 hlist_bl_lock(head);
99 hlist_bl_for_each_entry(dup, dup_node, head, e_hash_list) {
c07dfcb4 100 if (dup->e_key == key && dup->e_value == value) {
f9a61eb4 101 hlist_bl_unlock(head);
7a2508e1 102 kmem_cache_free(mb_entry_cache, entry);
f9a61eb4
JK
103 return -EBUSY;
104 }
105 }
106 hlist_bl_add_head(&entry->e_hash_list, head);
107 hlist_bl_unlock(head);
108
f0c8b462
JK
109 spin_lock(&cache->c_list_lock);
110 list_add_tail(&entry->e_list, &cache->c_list);
f9a61eb4
JK
111 /* Grab ref for LRU list */
112 atomic_inc(&entry->e_refcnt);
113 cache->c_entry_count++;
f0c8b462 114 spin_unlock(&cache->c_list_lock);
f9a61eb4
JK
115
116 return 0;
117}
7a2508e1 118EXPORT_SYMBOL(mb_cache_entry_create);
f9a61eb4 119
7a2508e1 120void __mb_cache_entry_free(struct mb_cache_entry *entry)
f9a61eb4 121{
7a2508e1 122 kmem_cache_free(mb_entry_cache, entry);
f9a61eb4 123}
7a2508e1 124EXPORT_SYMBOL(__mb_cache_entry_free);
f9a61eb4 125
7a2508e1
JK
126static struct mb_cache_entry *__entry_find(struct mb_cache *cache,
127 struct mb_cache_entry *entry,
128 u32 key)
f9a61eb4 129{
7a2508e1 130 struct mb_cache_entry *old_entry = entry;
f9a61eb4
JK
131 struct hlist_bl_node *node;
132 struct hlist_bl_head *head;
133
dc8d5e56 134 head = mb_cache_entry_head(cache, key);
f9a61eb4
JK
135 hlist_bl_lock(head);
136 if (entry && !hlist_bl_unhashed(&entry->e_hash_list))
137 node = entry->e_hash_list.next;
138 else
139 node = hlist_bl_first(head);
140 while (node) {
7a2508e1 141 entry = hlist_bl_entry(node, struct mb_cache_entry,
f9a61eb4 142 e_hash_list);
6048c64b 143 if (entry->e_key == key && entry->e_reusable) {
f9a61eb4
JK
144 atomic_inc(&entry->e_refcnt);
145 goto out;
146 }
147 node = node->next;
148 }
149 entry = NULL;
150out:
151 hlist_bl_unlock(head);
152 if (old_entry)
7a2508e1 153 mb_cache_entry_put(cache, old_entry);
f9a61eb4
JK
154
155 return entry;
156}
157
158/*
b649668c 159 * mb_cache_entry_find_first - find the first reusable entry with the given key
f9a61eb4
JK
160 * @cache: cache where we should search
161 * @key: key to look for
162 *
b649668c
EB
163 * Search in @cache for a reusable entry with key @key. Grabs reference to the
164 * first reusable entry found and returns the entry.
f9a61eb4 165 */
7a2508e1
JK
166struct mb_cache_entry *mb_cache_entry_find_first(struct mb_cache *cache,
167 u32 key)
f9a61eb4
JK
168{
169 return __entry_find(cache, NULL, key);
170}
7a2508e1 171EXPORT_SYMBOL(mb_cache_entry_find_first);
f9a61eb4
JK
172
173/*
b649668c 174 * mb_cache_entry_find_next - find next reusable entry with the same key
f9a61eb4
JK
175 * @cache: cache where we should search
176 * @entry: entry to start search from
177 *
b649668c
EB
178 * Finds next reusable entry in the hash chain which has the same key as @entry.
179 * If @entry is unhashed (which can happen when deletion of entry races with the
180 * search), finds the first reusable entry in the hash chain. The function drops
181 * reference to @entry and returns with a reference to the found entry.
f9a61eb4 182 */
7a2508e1
JK
183struct mb_cache_entry *mb_cache_entry_find_next(struct mb_cache *cache,
184 struct mb_cache_entry *entry)
f9a61eb4
JK
185{
186 return __entry_find(cache, entry, entry->e_key);
187}
7a2508e1 188EXPORT_SYMBOL(mb_cache_entry_find_next);
f9a61eb4 189
6048c64b 190/*
c07dfcb4 191 * mb_cache_entry_get - get a cache entry by value (and key)
6048c64b 192 * @cache - cache we work with
c07dfcb4
TE
193 * @key - key
194 * @value - value
6048c64b
AG
195 */
196struct mb_cache_entry *mb_cache_entry_get(struct mb_cache *cache, u32 key,
c07dfcb4 197 u64 value)
6048c64b
AG
198{
199 struct hlist_bl_node *node;
200 struct hlist_bl_head *head;
201 struct mb_cache_entry *entry;
202
203 head = mb_cache_entry_head(cache, key);
204 hlist_bl_lock(head);
205 hlist_bl_for_each_entry(entry, node, head, e_hash_list) {
c07dfcb4 206 if (entry->e_key == key && entry->e_value == value) {
6048c64b
AG
207 atomic_inc(&entry->e_refcnt);
208 goto out;
209 }
210 }
211 entry = NULL;
212out:
213 hlist_bl_unlock(head);
214 return entry;
215}
216EXPORT_SYMBOL(mb_cache_entry_get);
217
c07dfcb4 218/* mb_cache_entry_delete - remove a cache entry
f9a61eb4 219 * @cache - cache we work with
c07dfcb4
TE
220 * @key - key
221 * @value - value
f9a61eb4 222 *
c07dfcb4 223 * Remove entry from cache @cache with key @key and value @value.
f9a61eb4 224 */
c07dfcb4 225void mb_cache_entry_delete(struct mb_cache *cache, u32 key, u64 value)
f9a61eb4
JK
226{
227 struct hlist_bl_node *node;
228 struct hlist_bl_head *head;
7a2508e1 229 struct mb_cache_entry *entry;
f9a61eb4 230
dc8d5e56 231 head = mb_cache_entry_head(cache, key);
f9a61eb4
JK
232 hlist_bl_lock(head);
233 hlist_bl_for_each_entry(entry, node, head, e_hash_list) {
c07dfcb4 234 if (entry->e_key == key && entry->e_value == value) {
f9a61eb4
JK
235 /* We keep hash list reference to keep entry alive */
236 hlist_bl_del_init(&entry->e_hash_list);
237 hlist_bl_unlock(head);
f0c8b462
JK
238 spin_lock(&cache->c_list_lock);
239 if (!list_empty(&entry->e_list)) {
240 list_del_init(&entry->e_list);
f9a61eb4
JK
241 cache->c_entry_count--;
242 atomic_dec(&entry->e_refcnt);
243 }
f0c8b462 244 spin_unlock(&cache->c_list_lock);
7a2508e1 245 mb_cache_entry_put(cache, entry);
f9a61eb4
JK
246 return;
247 }
248 }
249 hlist_bl_unlock(head);
250}
c07dfcb4 251EXPORT_SYMBOL(mb_cache_entry_delete);
f9a61eb4 252
7a2508e1 253/* mb_cache_entry_touch - cache entry got used
f9a61eb4
JK
254 * @cache - cache the entry belongs to
255 * @entry - entry that got used
256 *
f0c8b462 257 * Marks entry as used to give hit higher chances of surviving in cache.
f9a61eb4 258 */
7a2508e1
JK
259void mb_cache_entry_touch(struct mb_cache *cache,
260 struct mb_cache_entry *entry)
f9a61eb4 261{
dc8d5e56 262 entry->e_referenced = 1;
f9a61eb4 263}
7a2508e1 264EXPORT_SYMBOL(mb_cache_entry_touch);
f9a61eb4 265
7a2508e1
JK
266static unsigned long mb_cache_count(struct shrinker *shrink,
267 struct shrink_control *sc)
f9a61eb4 268{
7a2508e1
JK
269 struct mb_cache *cache = container_of(shrink, struct mb_cache,
270 c_shrink);
f9a61eb4
JK
271
272 return cache->c_entry_count;
273}
274
275/* Shrink number of entries in cache */
7a2508e1 276static unsigned long mb_cache_shrink(struct mb_cache *cache,
132d4e2d 277 unsigned long nr_to_scan)
f9a61eb4 278{
7a2508e1 279 struct mb_cache_entry *entry;
f9a61eb4 280 struct hlist_bl_head *head;
132d4e2d 281 unsigned long shrunk = 0;
f9a61eb4 282
f0c8b462
JK
283 spin_lock(&cache->c_list_lock);
284 while (nr_to_scan-- && !list_empty(&cache->c_list)) {
285 entry = list_first_entry(&cache->c_list,
7a2508e1 286 struct mb_cache_entry, e_list);
dc8d5e56
AG
287 if (entry->e_referenced) {
288 entry->e_referenced = 0;
918b7306 289 list_move_tail(&entry->e_list, &cache->c_list);
f0c8b462
JK
290 continue;
291 }
292 list_del_init(&entry->e_list);
f9a61eb4
JK
293 cache->c_entry_count--;
294 /*
295 * We keep LRU list reference so that entry doesn't go away
296 * from under us.
297 */
f0c8b462 298 spin_unlock(&cache->c_list_lock);
dc8d5e56 299 head = mb_cache_entry_head(cache, entry->e_key);
f9a61eb4
JK
300 hlist_bl_lock(head);
301 if (!hlist_bl_unhashed(&entry->e_hash_list)) {
302 hlist_bl_del_init(&entry->e_hash_list);
303 atomic_dec(&entry->e_refcnt);
304 }
305 hlist_bl_unlock(head);
7a2508e1 306 if (mb_cache_entry_put(cache, entry))
f9a61eb4
JK
307 shrunk++;
308 cond_resched();
f0c8b462 309 spin_lock(&cache->c_list_lock);
f9a61eb4 310 }
f0c8b462 311 spin_unlock(&cache->c_list_lock);
f9a61eb4
JK
312
313 return shrunk;
314}
315
7a2508e1
JK
316static unsigned long mb_cache_scan(struct shrinker *shrink,
317 struct shrink_control *sc)
c2f3140f 318{
7a2508e1 319 struct mb_cache *cache = container_of(shrink, struct mb_cache,
c2f3140f 320 c_shrink);
132d4e2d 321 return mb_cache_shrink(cache, sc->nr_to_scan);
c2f3140f
JK
322}
323
324/* We shrink 1/X of the cache when we have too many entries in it */
325#define SHRINK_DIVISOR 16
326
7a2508e1 327static void mb_cache_shrink_worker(struct work_struct *work)
c2f3140f 328{
7a2508e1
JK
329 struct mb_cache *cache = container_of(work, struct mb_cache,
330 c_shrink_work);
331 mb_cache_shrink(cache, cache->c_max_entries / SHRINK_DIVISOR);
c2f3140f
JK
332}
333
f9a61eb4 334/*
7a2508e1 335 * mb_cache_create - create cache
f9a61eb4
JK
336 * @bucket_bits: log2 of the hash table size
337 *
338 * Create cache for keys with 2^bucket_bits hash entries.
339 */
7a2508e1 340struct mb_cache *mb_cache_create(int bucket_bits)
f9a61eb4 341{
7a2508e1 342 struct mb_cache *cache;
132d4e2d
EB
343 unsigned long bucket_count = 1UL << bucket_bits;
344 unsigned long i;
f9a61eb4 345
7a2508e1 346 cache = kzalloc(sizeof(struct mb_cache), GFP_KERNEL);
f9a61eb4
JK
347 if (!cache)
348 goto err_out;
349 cache->c_bucket_bits = bucket_bits;
c2f3140f 350 cache->c_max_entries = bucket_count << 4;
f0c8b462
JK
351 INIT_LIST_HEAD(&cache->c_list);
352 spin_lock_init(&cache->c_list_lock);
f9a61eb4
JK
353 cache->c_hash = kmalloc(bucket_count * sizeof(struct hlist_bl_head),
354 GFP_KERNEL);
355 if (!cache->c_hash) {
356 kfree(cache);
357 goto err_out;
358 }
359 for (i = 0; i < bucket_count; i++)
360 INIT_HLIST_BL_HEAD(&cache->c_hash[i]);
361
7a2508e1
JK
362 cache->c_shrink.count_objects = mb_cache_count;
363 cache->c_shrink.scan_objects = mb_cache_scan;
f9a61eb4 364 cache->c_shrink.seeks = DEFAULT_SEEKS;
8913f343
CY
365 if (register_shrinker(&cache->c_shrink)) {
366 kfree(cache->c_hash);
367 kfree(cache);
368 goto err_out;
369 }
f9a61eb4 370
7a2508e1 371 INIT_WORK(&cache->c_shrink_work, mb_cache_shrink_worker);
c2f3140f 372
f9a61eb4
JK
373 return cache;
374
375err_out:
f9a61eb4
JK
376 return NULL;
377}
7a2508e1 378EXPORT_SYMBOL(mb_cache_create);
f9a61eb4
JK
379
380/*
7a2508e1 381 * mb_cache_destroy - destroy cache
f9a61eb4
JK
382 * @cache: the cache to destroy
383 *
384 * Free all entries in cache and cache itself. Caller must make sure nobody
385 * (except shrinker) can reach @cache when calling this.
386 */
7a2508e1 387void mb_cache_destroy(struct mb_cache *cache)
f9a61eb4 388{
7a2508e1 389 struct mb_cache_entry *entry, *next;
f9a61eb4
JK
390
391 unregister_shrinker(&cache->c_shrink);
392
393 /*
394 * We don't bother with any locking. Cache must not be used at this
395 * point.
396 */
f0c8b462 397 list_for_each_entry_safe(entry, next, &cache->c_list, e_list) {
f9a61eb4
JK
398 if (!hlist_bl_unhashed(&entry->e_hash_list)) {
399 hlist_bl_del_init(&entry->e_hash_list);
400 atomic_dec(&entry->e_refcnt);
401 } else
402 WARN_ON(1);
f0c8b462 403 list_del(&entry->e_list);
f9a61eb4 404 WARN_ON(atomic_read(&entry->e_refcnt) != 1);
7a2508e1 405 mb_cache_entry_put(cache, entry);
f9a61eb4
JK
406 }
407 kfree(cache->c_hash);
408 kfree(cache);
f9a61eb4 409}
7a2508e1 410EXPORT_SYMBOL(mb_cache_destroy);
f9a61eb4 411
7a2508e1 412static int __init mbcache_init(void)
f9a61eb4 413{
7a2508e1
JK
414 mb_entry_cache = kmem_cache_create("mbcache",
415 sizeof(struct mb_cache_entry), 0,
f9a61eb4 416 SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD, NULL);
21d0f4fa
EB
417 if (!mb_entry_cache)
418 return -ENOMEM;
f9a61eb4
JK
419 return 0;
420}
421
7a2508e1 422static void __exit mbcache_exit(void)
f9a61eb4 423{
7a2508e1 424 kmem_cache_destroy(mb_entry_cache);
f9a61eb4
JK
425}
426
7a2508e1
JK
427module_init(mbcache_init)
428module_exit(mbcache_exit)
f9a61eb4
JK
429
430MODULE_AUTHOR("Jan Kara <jack@suse.cz>");
431MODULE_DESCRIPTION("Meta block cache (for extended attributes)");
432MODULE_LICENSE("GPL");