]>
Commit | Line | Data |
---|---|---|
f9a61eb4 JK |
1 | #include <linux/spinlock.h> |
2 | #include <linux/slab.h> | |
3 | #include <linux/list.h> | |
4 | #include <linux/list_bl.h> | |
5 | #include <linux/module.h> | |
6 | #include <linux/sched.h> | |
c2f3140f | 7 | #include <linux/workqueue.h> |
f9a61eb4 JK |
8 | #include <linux/mbcache2.h> |
9 | ||
10 | /* | |
11 | * Mbcache is a simple key-value store. Keys need not be unique, however | |
12 | * key-value pairs are expected to be unique (we use this fact in | |
13 | * mb2_cache_entry_delete_block()). | |
14 | * | |
15 | * Ext2 and ext4 use this cache for deduplication of extended attribute blocks. | |
16 | * They use hash of a block contents as a key and block number as a value. | |
17 | * That's why keys need not be unique (different xattr blocks may end up having | |
18 | * the same hash). However block number always uniquely identifies a cache | |
19 | * entry. | |
20 | * | |
21 | * We provide functions for creation and removal of entries, search by key, | |
22 | * and a special "delete entry with given key-value pair" operation. Fixed | |
23 | * size hash table is used for fast key lookups. | |
24 | */ | |
25 | ||
26 | struct mb2_cache { | |
27 | /* Hash table of entries */ | |
28 | struct hlist_bl_head *c_hash; | |
29 | /* log2 of hash table size */ | |
30 | int c_bucket_bits; | |
c2f3140f JK |
31 | /* Maximum entries in cache to avoid degrading hash too much */ |
32 | int c_max_entries; | |
f0c8b462 JK |
33 | /* Protects c_list, c_entry_count */ |
34 | spinlock_t c_list_lock; | |
35 | struct list_head c_list; | |
f9a61eb4 JK |
36 | /* Number of entries in cache */ |
37 | unsigned long c_entry_count; | |
38 | struct shrinker c_shrink; | |
c2f3140f JK |
39 | /* Work for shrinking when the cache has too many entries */ |
40 | struct work_struct c_shrink_work; | |
f9a61eb4 JK |
41 | }; |
42 | ||
43 | static struct kmem_cache *mb2_entry_cache; | |
44 | ||
c2f3140f JK |
45 | static unsigned long mb2_cache_shrink(struct mb2_cache *cache, |
46 | unsigned int nr_to_scan); | |
47 | ||
f0c8b462 JK |
48 | static inline bool mb2_cache_entry_referenced(struct mb2_cache_entry *entry) |
49 | { | |
50 | return entry->_e_hash_list_head & 1; | |
51 | } | |
52 | ||
53 | static inline void mb2_cache_entry_set_referenced(struct mb2_cache_entry *entry) | |
54 | { | |
55 | entry->_e_hash_list_head |= 1; | |
56 | } | |
57 | ||
58 | static inline void mb2_cache_entry_clear_referenced( | |
59 | struct mb2_cache_entry *entry) | |
60 | { | |
61 | entry->_e_hash_list_head &= ~1; | |
62 | } | |
63 | ||
64 | static inline struct hlist_bl_head *mb2_cache_entry_head( | |
65 | struct mb2_cache_entry *entry) | |
66 | { | |
67 | return (struct hlist_bl_head *) | |
68 | (entry->_e_hash_list_head & ~1); | |
69 | } | |
70 | ||
c2f3140f JK |
71 | /* |
72 | * Number of entries to reclaim synchronously when there are too many entries | |
73 | * in cache | |
74 | */ | |
75 | #define SYNC_SHRINK_BATCH 64 | |
76 | ||
f9a61eb4 JK |
77 | /* |
78 | * mb2_cache_entry_create - create entry in cache | |
79 | * @cache - cache where the entry should be created | |
80 | * @mask - gfp mask with which the entry should be allocated | |
81 | * @key - key of the entry | |
82 | * @block - block that contains data | |
83 | * | |
84 | * Creates entry in @cache with key @key and records that data is stored in | |
85 | * block @block. The function returns -EBUSY if entry with the same key | |
86 | * and for the same block already exists in cache. Otherwise 0 is returned. | |
87 | */ | |
88 | int mb2_cache_entry_create(struct mb2_cache *cache, gfp_t mask, u32 key, | |
89 | sector_t block) | |
90 | { | |
91 | struct mb2_cache_entry *entry, *dup; | |
92 | struct hlist_bl_node *dup_node; | |
93 | struct hlist_bl_head *head; | |
94 | ||
c2f3140f JK |
95 | /* Schedule background reclaim if there are too many entries */ |
96 | if (cache->c_entry_count >= cache->c_max_entries) | |
97 | schedule_work(&cache->c_shrink_work); | |
98 | /* Do some sync reclaim if background reclaim cannot keep up */ | |
99 | if (cache->c_entry_count >= 2*cache->c_max_entries) | |
100 | mb2_cache_shrink(cache, SYNC_SHRINK_BATCH); | |
101 | ||
f9a61eb4 JK |
102 | entry = kmem_cache_alloc(mb2_entry_cache, mask); |
103 | if (!entry) | |
104 | return -ENOMEM; | |
105 | ||
f0c8b462 | 106 | INIT_LIST_HEAD(&entry->e_list); |
f9a61eb4 JK |
107 | /* One ref for hash, one ref returned */ |
108 | atomic_set(&entry->e_refcnt, 1); | |
109 | entry->e_key = key; | |
110 | entry->e_block = block; | |
111 | head = &cache->c_hash[hash_32(key, cache->c_bucket_bits)]; | |
f0c8b462 | 112 | entry->_e_hash_list_head = (unsigned long)head; |
f9a61eb4 JK |
113 | hlist_bl_lock(head); |
114 | hlist_bl_for_each_entry(dup, dup_node, head, e_hash_list) { | |
115 | if (dup->e_key == key && dup->e_block == block) { | |
116 | hlist_bl_unlock(head); | |
117 | kmem_cache_free(mb2_entry_cache, entry); | |
118 | return -EBUSY; | |
119 | } | |
120 | } | |
121 | hlist_bl_add_head(&entry->e_hash_list, head); | |
122 | hlist_bl_unlock(head); | |
123 | ||
f0c8b462 JK |
124 | spin_lock(&cache->c_list_lock); |
125 | list_add_tail(&entry->e_list, &cache->c_list); | |
f9a61eb4 JK |
126 | /* Grab ref for LRU list */ |
127 | atomic_inc(&entry->e_refcnt); | |
128 | cache->c_entry_count++; | |
f0c8b462 | 129 | spin_unlock(&cache->c_list_lock); |
f9a61eb4 JK |
130 | |
131 | return 0; | |
132 | } | |
133 | EXPORT_SYMBOL(mb2_cache_entry_create); | |
134 | ||
135 | void __mb2_cache_entry_free(struct mb2_cache_entry *entry) | |
136 | { | |
137 | kmem_cache_free(mb2_entry_cache, entry); | |
138 | } | |
139 | EXPORT_SYMBOL(__mb2_cache_entry_free); | |
140 | ||
141 | static struct mb2_cache_entry *__entry_find(struct mb2_cache *cache, | |
142 | struct mb2_cache_entry *entry, | |
143 | u32 key) | |
144 | { | |
145 | struct mb2_cache_entry *old_entry = entry; | |
146 | struct hlist_bl_node *node; | |
147 | struct hlist_bl_head *head; | |
148 | ||
149 | if (entry) | |
f0c8b462 | 150 | head = mb2_cache_entry_head(entry); |
f9a61eb4 JK |
151 | else |
152 | head = &cache->c_hash[hash_32(key, cache->c_bucket_bits)]; | |
153 | hlist_bl_lock(head); | |
154 | if (entry && !hlist_bl_unhashed(&entry->e_hash_list)) | |
155 | node = entry->e_hash_list.next; | |
156 | else | |
157 | node = hlist_bl_first(head); | |
158 | while (node) { | |
159 | entry = hlist_bl_entry(node, struct mb2_cache_entry, | |
160 | e_hash_list); | |
161 | if (entry->e_key == key) { | |
162 | atomic_inc(&entry->e_refcnt); | |
163 | goto out; | |
164 | } | |
165 | node = node->next; | |
166 | } | |
167 | entry = NULL; | |
168 | out: | |
169 | hlist_bl_unlock(head); | |
170 | if (old_entry) | |
171 | mb2_cache_entry_put(cache, old_entry); | |
172 | ||
173 | return entry; | |
174 | } | |
175 | ||
176 | /* | |
177 | * mb2_cache_entry_find_first - find the first entry in cache with given key | |
178 | * @cache: cache where we should search | |
179 | * @key: key to look for | |
180 | * | |
181 | * Search in @cache for entry with key @key. Grabs reference to the first | |
182 | * entry found and returns the entry. | |
183 | */ | |
184 | struct mb2_cache_entry *mb2_cache_entry_find_first(struct mb2_cache *cache, | |
185 | u32 key) | |
186 | { | |
187 | return __entry_find(cache, NULL, key); | |
188 | } | |
189 | EXPORT_SYMBOL(mb2_cache_entry_find_first); | |
190 | ||
191 | /* | |
192 | * mb2_cache_entry_find_next - find next entry in cache with the same | |
193 | * @cache: cache where we should search | |
194 | * @entry: entry to start search from | |
195 | * | |
196 | * Finds next entry in the hash chain which has the same key as @entry. | |
197 | * If @entry is unhashed (which can happen when deletion of entry races | |
198 | * with the search), finds the first entry in the hash chain. The function | |
199 | * drops reference to @entry and returns with a reference to the found entry. | |
200 | */ | |
201 | struct mb2_cache_entry *mb2_cache_entry_find_next(struct mb2_cache *cache, | |
202 | struct mb2_cache_entry *entry) | |
203 | { | |
204 | return __entry_find(cache, entry, entry->e_key); | |
205 | } | |
206 | EXPORT_SYMBOL(mb2_cache_entry_find_next); | |
207 | ||
208 | /* mb2_cache_entry_delete_block - remove information about block from cache | |
209 | * @cache - cache we work with | |
210 | * @key - key of the entry to remove | |
211 | * @block - block containing data for @key | |
212 | * | |
213 | * Remove entry from cache @cache with key @key with data stored in @block. | |
214 | */ | |
215 | void mb2_cache_entry_delete_block(struct mb2_cache *cache, u32 key, | |
216 | sector_t block) | |
217 | { | |
218 | struct hlist_bl_node *node; | |
219 | struct hlist_bl_head *head; | |
220 | struct mb2_cache_entry *entry; | |
221 | ||
222 | head = &cache->c_hash[hash_32(key, cache->c_bucket_bits)]; | |
223 | hlist_bl_lock(head); | |
224 | hlist_bl_for_each_entry(entry, node, head, e_hash_list) { | |
225 | if (entry->e_key == key && entry->e_block == block) { | |
226 | /* We keep hash list reference to keep entry alive */ | |
227 | hlist_bl_del_init(&entry->e_hash_list); | |
228 | hlist_bl_unlock(head); | |
f0c8b462 JK |
229 | spin_lock(&cache->c_list_lock); |
230 | if (!list_empty(&entry->e_list)) { | |
231 | list_del_init(&entry->e_list); | |
f9a61eb4 JK |
232 | cache->c_entry_count--; |
233 | atomic_dec(&entry->e_refcnt); | |
234 | } | |
f0c8b462 | 235 | spin_unlock(&cache->c_list_lock); |
f9a61eb4 JK |
236 | mb2_cache_entry_put(cache, entry); |
237 | return; | |
238 | } | |
239 | } | |
240 | hlist_bl_unlock(head); | |
241 | } | |
242 | EXPORT_SYMBOL(mb2_cache_entry_delete_block); | |
243 | ||
244 | /* mb2_cache_entry_touch - cache entry got used | |
245 | * @cache - cache the entry belongs to | |
246 | * @entry - entry that got used | |
247 | * | |
f0c8b462 | 248 | * Marks entry as used to give hit higher chances of surviving in cache. |
f9a61eb4 JK |
249 | */ |
250 | void mb2_cache_entry_touch(struct mb2_cache *cache, | |
251 | struct mb2_cache_entry *entry) | |
252 | { | |
f0c8b462 | 253 | mb2_cache_entry_set_referenced(entry); |
f9a61eb4 JK |
254 | } |
255 | EXPORT_SYMBOL(mb2_cache_entry_touch); | |
256 | ||
257 | static unsigned long mb2_cache_count(struct shrinker *shrink, | |
258 | struct shrink_control *sc) | |
259 | { | |
260 | struct mb2_cache *cache = container_of(shrink, struct mb2_cache, | |
261 | c_shrink); | |
262 | ||
263 | return cache->c_entry_count; | |
264 | } | |
265 | ||
266 | /* Shrink number of entries in cache */ | |
c2f3140f JK |
267 | static unsigned long mb2_cache_shrink(struct mb2_cache *cache, |
268 | unsigned int nr_to_scan) | |
f9a61eb4 | 269 | { |
f9a61eb4 JK |
270 | struct mb2_cache_entry *entry; |
271 | struct hlist_bl_head *head; | |
272 | unsigned int shrunk = 0; | |
273 | ||
f0c8b462 JK |
274 | spin_lock(&cache->c_list_lock); |
275 | while (nr_to_scan-- && !list_empty(&cache->c_list)) { | |
276 | entry = list_first_entry(&cache->c_list, | |
277 | struct mb2_cache_entry, e_list); | |
278 | if (mb2_cache_entry_referenced(entry)) { | |
279 | mb2_cache_entry_clear_referenced(entry); | |
280 | list_move_tail(&cache->c_list, &entry->e_list); | |
281 | continue; | |
282 | } | |
283 | list_del_init(&entry->e_list); | |
f9a61eb4 JK |
284 | cache->c_entry_count--; |
285 | /* | |
286 | * We keep LRU list reference so that entry doesn't go away | |
287 | * from under us. | |
288 | */ | |
f0c8b462 JK |
289 | spin_unlock(&cache->c_list_lock); |
290 | head = mb2_cache_entry_head(entry); | |
f9a61eb4 JK |
291 | hlist_bl_lock(head); |
292 | if (!hlist_bl_unhashed(&entry->e_hash_list)) { | |
293 | hlist_bl_del_init(&entry->e_hash_list); | |
294 | atomic_dec(&entry->e_refcnt); | |
295 | } | |
296 | hlist_bl_unlock(head); | |
297 | if (mb2_cache_entry_put(cache, entry)) | |
298 | shrunk++; | |
299 | cond_resched(); | |
f0c8b462 | 300 | spin_lock(&cache->c_list_lock); |
f9a61eb4 | 301 | } |
f0c8b462 | 302 | spin_unlock(&cache->c_list_lock); |
f9a61eb4 JK |
303 | |
304 | return shrunk; | |
305 | } | |
306 | ||
c2f3140f JK |
307 | static unsigned long mb2_cache_scan(struct shrinker *shrink, |
308 | struct shrink_control *sc) | |
309 | { | |
310 | int nr_to_scan = sc->nr_to_scan; | |
311 | struct mb2_cache *cache = container_of(shrink, struct mb2_cache, | |
312 | c_shrink); | |
313 | return mb2_cache_shrink(cache, nr_to_scan); | |
314 | } | |
315 | ||
316 | /* We shrink 1/X of the cache when we have too many entries in it */ | |
317 | #define SHRINK_DIVISOR 16 | |
318 | ||
319 | static void mb2_cache_shrink_worker(struct work_struct *work) | |
320 | { | |
321 | struct mb2_cache *cache = container_of(work, struct mb2_cache, | |
322 | c_shrink_work); | |
323 | mb2_cache_shrink(cache, cache->c_max_entries / SHRINK_DIVISOR); | |
324 | } | |
325 | ||
f9a61eb4 JK |
326 | /* |
327 | * mb2_cache_create - create cache | |
328 | * @bucket_bits: log2 of the hash table size | |
329 | * | |
330 | * Create cache for keys with 2^bucket_bits hash entries. | |
331 | */ | |
332 | struct mb2_cache *mb2_cache_create(int bucket_bits) | |
333 | { | |
334 | struct mb2_cache *cache; | |
335 | int bucket_count = 1 << bucket_bits; | |
336 | int i; | |
337 | ||
338 | if (!try_module_get(THIS_MODULE)) | |
339 | return NULL; | |
340 | ||
341 | cache = kzalloc(sizeof(struct mb2_cache), GFP_KERNEL); | |
342 | if (!cache) | |
343 | goto err_out; | |
344 | cache->c_bucket_bits = bucket_bits; | |
c2f3140f | 345 | cache->c_max_entries = bucket_count << 4; |
f0c8b462 JK |
346 | INIT_LIST_HEAD(&cache->c_list); |
347 | spin_lock_init(&cache->c_list_lock); | |
f9a61eb4 JK |
348 | cache->c_hash = kmalloc(bucket_count * sizeof(struct hlist_bl_head), |
349 | GFP_KERNEL); | |
350 | if (!cache->c_hash) { | |
351 | kfree(cache); | |
352 | goto err_out; | |
353 | } | |
354 | for (i = 0; i < bucket_count; i++) | |
355 | INIT_HLIST_BL_HEAD(&cache->c_hash[i]); | |
356 | ||
357 | cache->c_shrink.count_objects = mb2_cache_count; | |
358 | cache->c_shrink.scan_objects = mb2_cache_scan; | |
359 | cache->c_shrink.seeks = DEFAULT_SEEKS; | |
360 | register_shrinker(&cache->c_shrink); | |
361 | ||
c2f3140f JK |
362 | INIT_WORK(&cache->c_shrink_work, mb2_cache_shrink_worker); |
363 | ||
f9a61eb4 JK |
364 | return cache; |
365 | ||
366 | err_out: | |
367 | module_put(THIS_MODULE); | |
368 | return NULL; | |
369 | } | |
370 | EXPORT_SYMBOL(mb2_cache_create); | |
371 | ||
372 | /* | |
373 | * mb2_cache_destroy - destroy cache | |
374 | * @cache: the cache to destroy | |
375 | * | |
376 | * Free all entries in cache and cache itself. Caller must make sure nobody | |
377 | * (except shrinker) can reach @cache when calling this. | |
378 | */ | |
379 | void mb2_cache_destroy(struct mb2_cache *cache) | |
380 | { | |
381 | struct mb2_cache_entry *entry, *next; | |
382 | ||
383 | unregister_shrinker(&cache->c_shrink); | |
384 | ||
385 | /* | |
386 | * We don't bother with any locking. Cache must not be used at this | |
387 | * point. | |
388 | */ | |
f0c8b462 | 389 | list_for_each_entry_safe(entry, next, &cache->c_list, e_list) { |
f9a61eb4 JK |
390 | if (!hlist_bl_unhashed(&entry->e_hash_list)) { |
391 | hlist_bl_del_init(&entry->e_hash_list); | |
392 | atomic_dec(&entry->e_refcnt); | |
393 | } else | |
394 | WARN_ON(1); | |
f0c8b462 | 395 | list_del(&entry->e_list); |
f9a61eb4 JK |
396 | WARN_ON(atomic_read(&entry->e_refcnt) != 1); |
397 | mb2_cache_entry_put(cache, entry); | |
398 | } | |
399 | kfree(cache->c_hash); | |
400 | kfree(cache); | |
401 | module_put(THIS_MODULE); | |
402 | } | |
403 | EXPORT_SYMBOL(mb2_cache_destroy); | |
404 | ||
405 | static int __init mb2cache_init(void) | |
406 | { | |
407 | mb2_entry_cache = kmem_cache_create("mbcache", | |
408 | sizeof(struct mb2_cache_entry), 0, | |
409 | SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD, NULL); | |
410 | BUG_ON(!mb2_entry_cache); | |
411 | return 0; | |
412 | } | |
413 | ||
414 | static void __exit mb2cache_exit(void) | |
415 | { | |
416 | kmem_cache_destroy(mb2_entry_cache); | |
417 | } | |
418 | ||
419 | module_init(mb2cache_init) | |
420 | module_exit(mb2cache_exit) | |
421 | ||
422 | MODULE_AUTHOR("Jan Kara <jack@suse.cz>"); | |
423 | MODULE_DESCRIPTION("Meta block cache (for extended attributes)"); | |
424 | MODULE_LICENSE("GPL"); |