]>
Commit | Line | Data |
---|---|---|
95ad37f9 FL |
1 | // SPDX-License-Identifier: GPL-2.0 |
2 | ||
3 | /* | |
4 | * Copyright 2019, 2020 Amazon.com, Inc. or its affiliates. All rights reserved. | |
5 | * | |
6 | * User extended attribute client side cache functions. | |
7 | * | |
8 | * Author: Frank van der Linden <fllinden@amazon.com> | |
9 | */ | |
10 | #include <linux/errno.h> | |
11 | #include <linux/nfs_fs.h> | |
12 | #include <linux/hashtable.h> | |
13 | #include <linux/refcount.h> | |
14 | #include <uapi/linux/xattr.h> | |
15 | ||
16 | #include "nfs4_fs.h" | |
17 | #include "internal.h" | |
18 | ||
19 | /* | |
20 | * User extended attributes client side caching is implemented by having | |
21 | * a cache structure attached to NFS inodes. This structure is allocated | |
22 | * when needed, and freed when the cache is zapped. | |
23 | * | |
24 | * The cache structure contains as hash table of entries, and a pointer | |
25 | * to a special-cased entry for the listxattr cache. | |
26 | * | |
27 | * Accessing and allocating / freeing the caches is done via reference | |
28 | * counting. The cache entries use a similar refcounting scheme. | |
29 | * | |
30 | * This makes freeing a cache, both from the shrinker and from the | |
31 | * zap cache path, easy. It also means that, in current use cases, | |
32 | * the large majority of inodes will not waste any memory, as they | |
33 | * will never have any user extended attributes assigned to them. | |
34 | * | |
35 | * Attribute entries are hashed in to a simple hash table. They are | |
36 | * also part of an LRU. | |
37 | * | |
38 | * There are three shrinkers. | |
39 | * | |
40 | * Two shrinkers deal with the cache entries themselves: one for | |
41 | * large entries (> PAGE_SIZE), and one for smaller entries. The | |
42 | * shrinker for the larger entries works more aggressively than | |
43 | * those for the smaller entries. | |
44 | * | |
45 | * The other shrinker frees the cache structures themselves. | |
46 | */ | |
47 | ||
48 | /* | |
49 | * 64 buckets is a good default. There is likely no reasonable | |
50 | * workload that uses more than even 64 user extended attributes. | |
51 | * You can certainly add a lot more - but you get what you ask for | |
52 | * in those circumstances. | |
53 | */ | |
54 | #define NFS4_XATTR_HASH_SIZE 64 | |
55 | ||
56 | #define NFSDBG_FACILITY NFSDBG_XATTRCACHE | |
57 | ||
58 | struct nfs4_xattr_cache; | |
59 | struct nfs4_xattr_entry; | |
60 | ||
61 | struct nfs4_xattr_bucket { | |
62 | spinlock_t lock; | |
63 | struct hlist_head hlist; | |
64 | struct nfs4_xattr_cache *cache; | |
65 | bool draining; | |
66 | }; | |
67 | ||
68 | struct nfs4_xattr_cache { | |
69 | struct kref ref; | |
70 | spinlock_t hash_lock; /* protects hashtable and lru */ | |
71 | struct nfs4_xattr_bucket buckets[NFS4_XATTR_HASH_SIZE]; | |
72 | struct list_head lru; | |
73 | struct list_head dispose; | |
74 | atomic_long_t nent; | |
75 | spinlock_t listxattr_lock; | |
76 | struct inode *inode; | |
77 | struct nfs4_xattr_entry *listxattr; | |
78 | struct work_struct work; | |
79 | }; | |
80 | ||
81 | struct nfs4_xattr_entry { | |
82 | struct kref ref; | |
83 | struct hlist_node hnode; | |
84 | struct list_head lru; | |
85 | struct list_head dispose; | |
86 | char *xattr_name; | |
87 | void *xattr_value; | |
88 | size_t xattr_size; | |
89 | struct nfs4_xattr_bucket *bucket; | |
90 | uint32_t flags; | |
91 | }; | |
92 | ||
93 | #define NFS4_XATTR_ENTRY_EXTVAL 0x0001 | |
94 | ||
95 | /* | |
96 | * LRU list of NFS inodes that have xattr caches. | |
97 | */ | |
98 | static struct list_lru nfs4_xattr_cache_lru; | |
99 | static struct list_lru nfs4_xattr_entry_lru; | |
100 | static struct list_lru nfs4_xattr_large_entry_lru; | |
101 | ||
102 | static struct kmem_cache *nfs4_xattr_cache_cachep; | |
103 | ||
104 | static struct workqueue_struct *nfs4_xattr_cache_wq; | |
105 | ||
106 | /* | |
107 | * Hashing helper functions. | |
108 | */ | |
109 | static void | |
110 | nfs4_xattr_hash_init(struct nfs4_xattr_cache *cache) | |
111 | { | |
112 | unsigned int i; | |
113 | ||
114 | for (i = 0; i < NFS4_XATTR_HASH_SIZE; i++) { | |
115 | INIT_HLIST_HEAD(&cache->buckets[i].hlist); | |
116 | spin_lock_init(&cache->buckets[i].lock); | |
117 | cache->buckets[i].cache = cache; | |
118 | cache->buckets[i].draining = false; | |
119 | } | |
120 | } | |
121 | ||
122 | /* | |
123 | * Locking order: | |
124 | * 1. inode i_lock or bucket lock | |
125 | * 2. list_lru lock (taken by list_lru_* functions) | |
126 | */ | |
127 | ||
128 | /* | |
129 | * Wrapper functions to add a cache entry to the right LRU. | |
130 | */ | |
131 | static bool | |
132 | nfs4_xattr_entry_lru_add(struct nfs4_xattr_entry *entry) | |
133 | { | |
134 | struct list_lru *lru; | |
135 | ||
136 | lru = (entry->flags & NFS4_XATTR_ENTRY_EXTVAL) ? | |
137 | &nfs4_xattr_large_entry_lru : &nfs4_xattr_entry_lru; | |
138 | ||
139 | return list_lru_add(lru, &entry->lru); | |
140 | } | |
141 | ||
142 | static bool | |
143 | nfs4_xattr_entry_lru_del(struct nfs4_xattr_entry *entry) | |
144 | { | |
145 | struct list_lru *lru; | |
146 | ||
147 | lru = (entry->flags & NFS4_XATTR_ENTRY_EXTVAL) ? | |
148 | &nfs4_xattr_large_entry_lru : &nfs4_xattr_entry_lru; | |
149 | ||
150 | return list_lru_del(lru, &entry->lru); | |
151 | } | |
152 | ||
153 | /* | |
154 | * This function allocates cache entries. They are the normal | |
155 | * extended attribute name/value pairs, but may also be a listxattr | |
156 | * cache. Those allocations use the same entry so that they can be | |
157 | * treated as one by the memory shrinker. | |
158 | * | |
159 | * xattr cache entries are allocated together with names. If the | |
160 | * value fits in to one page with the entry structure and the name, | |
161 | * it will also be part of the same allocation (kmalloc). This is | |
162 | * expected to be the vast majority of cases. Larger allocations | |
163 | * have a value pointer that is allocated separately by kvmalloc. | |
164 | * | |
165 | * Parameters: | |
166 | * | |
167 | * @name: Name of the extended attribute. NULL for listxattr cache | |
168 | * entry. | |
169 | * @value: Value of attribute, or listxattr cache. NULL if the | |
170 | * value is to be copied from pages instead. | |
171 | * @pages: Pages to copy the value from, if not NULL. Passed in to | |
172 | * make it easier to copy the value after an RPC, even if | |
173 | * the value will not be passed up to application (e.g. | |
174 | * for a 'query' getxattr with NULL buffer). | |
175 | * @len: Length of the value. Can be 0 for zero-length attribues. | |
176 | * @value and @pages will be NULL if @len is 0. | |
177 | */ | |
178 | static struct nfs4_xattr_entry * | |
179 | nfs4_xattr_alloc_entry(const char *name, const void *value, | |
180 | struct page **pages, size_t len) | |
181 | { | |
182 | struct nfs4_xattr_entry *entry; | |
183 | void *valp; | |
184 | char *namep; | |
185 | size_t alloclen, slen; | |
186 | char *buf; | |
187 | uint32_t flags; | |
188 | ||
189 | BUILD_BUG_ON(sizeof(struct nfs4_xattr_entry) + | |
190 | XATTR_NAME_MAX + 1 > PAGE_SIZE); | |
191 | ||
192 | alloclen = sizeof(struct nfs4_xattr_entry); | |
193 | if (name != NULL) { | |
194 | slen = strlen(name) + 1; | |
195 | alloclen += slen; | |
196 | } else | |
197 | slen = 0; | |
198 | ||
199 | if (alloclen + len <= PAGE_SIZE) { | |
200 | alloclen += len; | |
201 | flags = 0; | |
202 | } else { | |
203 | flags = NFS4_XATTR_ENTRY_EXTVAL; | |
204 | } | |
205 | ||
206 | buf = kmalloc(alloclen, GFP_KERNEL_ACCOUNT | GFP_NOFS); | |
207 | if (buf == NULL) | |
208 | return NULL; | |
209 | entry = (struct nfs4_xattr_entry *)buf; | |
210 | ||
211 | if (name != NULL) { | |
212 | namep = buf + sizeof(struct nfs4_xattr_entry); | |
213 | memcpy(namep, name, slen); | |
214 | } else { | |
215 | namep = NULL; | |
216 | } | |
217 | ||
218 | ||
219 | if (flags & NFS4_XATTR_ENTRY_EXTVAL) { | |
220 | valp = kvmalloc(len, GFP_KERNEL_ACCOUNT | GFP_NOFS); | |
221 | if (valp == NULL) { | |
222 | kfree(buf); | |
223 | return NULL; | |
224 | } | |
225 | } else if (len != 0) { | |
226 | valp = buf + sizeof(struct nfs4_xattr_entry) + slen; | |
227 | } else | |
228 | valp = NULL; | |
229 | ||
230 | if (valp != NULL) { | |
231 | if (value != NULL) | |
232 | memcpy(valp, value, len); | |
233 | else | |
234 | _copy_from_pages(valp, pages, 0, len); | |
235 | } | |
236 | ||
237 | entry->flags = flags; | |
238 | entry->xattr_value = valp; | |
239 | kref_init(&entry->ref); | |
240 | entry->xattr_name = namep; | |
241 | entry->xattr_size = len; | |
242 | entry->bucket = NULL; | |
243 | INIT_LIST_HEAD(&entry->lru); | |
244 | INIT_LIST_HEAD(&entry->dispose); | |
245 | INIT_HLIST_NODE(&entry->hnode); | |
246 | ||
247 | return entry; | |
248 | } | |
249 | ||
250 | static void | |
251 | nfs4_xattr_free_entry(struct nfs4_xattr_entry *entry) | |
252 | { | |
253 | if (entry->flags & NFS4_XATTR_ENTRY_EXTVAL) | |
254 | kvfree(entry->xattr_value); | |
255 | kfree(entry); | |
256 | } | |
257 | ||
258 | static void | |
259 | nfs4_xattr_free_entry_cb(struct kref *kref) | |
260 | { | |
261 | struct nfs4_xattr_entry *entry; | |
262 | ||
263 | entry = container_of(kref, struct nfs4_xattr_entry, ref); | |
264 | ||
265 | if (WARN_ON(!list_empty(&entry->lru))) | |
266 | return; | |
267 | ||
268 | nfs4_xattr_free_entry(entry); | |
269 | } | |
270 | ||
271 | static void | |
272 | nfs4_xattr_free_cache_cb(struct kref *kref) | |
273 | { | |
274 | struct nfs4_xattr_cache *cache; | |
275 | int i; | |
276 | ||
277 | cache = container_of(kref, struct nfs4_xattr_cache, ref); | |
278 | ||
279 | for (i = 0; i < NFS4_XATTR_HASH_SIZE; i++) { | |
280 | if (WARN_ON(!hlist_empty(&cache->buckets[i].hlist))) | |
281 | return; | |
282 | cache->buckets[i].draining = false; | |
283 | } | |
284 | ||
285 | cache->listxattr = NULL; | |
286 | ||
287 | kmem_cache_free(nfs4_xattr_cache_cachep, cache); | |
288 | ||
289 | } | |
290 | ||
291 | static struct nfs4_xattr_cache * | |
292 | nfs4_xattr_alloc_cache(void) | |
293 | { | |
294 | struct nfs4_xattr_cache *cache; | |
295 | ||
296 | cache = kmem_cache_alloc(nfs4_xattr_cache_cachep, | |
297 | GFP_KERNEL_ACCOUNT | GFP_NOFS); | |
298 | if (cache == NULL) | |
299 | return NULL; | |
300 | ||
301 | kref_init(&cache->ref); | |
302 | atomic_long_set(&cache->nent, 0); | |
303 | ||
304 | return cache; | |
305 | } | |
306 | ||
307 | /* | |
308 | * Set the listxattr cache, which is a special-cased cache entry. | |
309 | * The special value ERR_PTR(-ESTALE) is used to indicate that | |
310 | * the cache is being drained - this prevents a new listxattr | |
311 | * cache from being added to what is now a stale cache. | |
312 | */ | |
313 | static int | |
314 | nfs4_xattr_set_listcache(struct nfs4_xattr_cache *cache, | |
315 | struct nfs4_xattr_entry *new) | |
316 | { | |
317 | struct nfs4_xattr_entry *old; | |
318 | int ret = 1; | |
319 | ||
320 | spin_lock(&cache->listxattr_lock); | |
321 | ||
322 | old = cache->listxattr; | |
323 | ||
324 | if (old == ERR_PTR(-ESTALE)) { | |
325 | ret = 0; | |
326 | goto out; | |
327 | } | |
328 | ||
329 | cache->listxattr = new; | |
330 | if (new != NULL && new != ERR_PTR(-ESTALE)) | |
331 | nfs4_xattr_entry_lru_add(new); | |
332 | ||
333 | if (old != NULL) { | |
334 | nfs4_xattr_entry_lru_del(old); | |
335 | kref_put(&old->ref, nfs4_xattr_free_entry_cb); | |
336 | } | |
337 | out: | |
338 | spin_unlock(&cache->listxattr_lock); | |
339 | ||
340 | return ret; | |
341 | } | |
342 | ||
343 | /* | |
344 | * Unlink a cache from its parent inode, clearing out an invalid | |
345 | * cache. Must be called with i_lock held. | |
346 | */ | |
347 | static struct nfs4_xattr_cache * | |
348 | nfs4_xattr_cache_unlink(struct inode *inode) | |
349 | { | |
350 | struct nfs_inode *nfsi; | |
351 | struct nfs4_xattr_cache *oldcache; | |
352 | ||
353 | nfsi = NFS_I(inode); | |
354 | ||
355 | oldcache = nfsi->xattr_cache; | |
356 | if (oldcache != NULL) { | |
357 | list_lru_del(&nfs4_xattr_cache_lru, &oldcache->lru); | |
358 | oldcache->inode = NULL; | |
359 | } | |
360 | nfsi->xattr_cache = NULL; | |
361 | nfsi->cache_validity &= ~NFS_INO_INVALID_XATTR; | |
362 | ||
363 | return oldcache; | |
364 | ||
365 | } | |
366 | ||
367 | /* | |
368 | * Discard a cache. Usually called by a worker, since walking all | |
369 | * the entries can take up some cycles that we don't want to waste | |
370 | * in the I/O path. Can also be called from the shrinker callback. | |
371 | * | |
372 | * The cache is dead, it has already been unlinked from its inode, | |
373 | * and no longer appears on the cache LRU list. | |
374 | * | |
375 | * Mark all buckets as draining, so that no new entries are added. This | |
376 | * could still happen in the unlikely, but possible case that another | |
377 | * thread had grabbed a reference before it was unlinked from the inode, | |
378 | * and is still holding it for an add operation. | |
379 | * | |
380 | * Remove all entries from the LRU lists, so that there is no longer | |
381 | * any way to 'find' this cache. Then, remove the entries from the hash | |
382 | * table. | |
383 | * | |
384 | * At that point, the cache will remain empty and can be freed when the final | |
385 | * reference drops, which is very likely the kref_put at the end of | |
386 | * this function, or the one called immediately afterwards in the | |
387 | * shrinker callback. | |
388 | */ | |
389 | static void | |
390 | nfs4_xattr_discard_cache(struct nfs4_xattr_cache *cache) | |
391 | { | |
392 | unsigned int i; | |
393 | struct nfs4_xattr_entry *entry; | |
394 | struct nfs4_xattr_bucket *bucket; | |
395 | struct hlist_node *n; | |
396 | ||
397 | nfs4_xattr_set_listcache(cache, ERR_PTR(-ESTALE)); | |
398 | ||
399 | for (i = 0; i < NFS4_XATTR_HASH_SIZE; i++) { | |
400 | bucket = &cache->buckets[i]; | |
401 | ||
402 | spin_lock(&bucket->lock); | |
403 | bucket->draining = true; | |
404 | hlist_for_each_entry_safe(entry, n, &bucket->hlist, hnode) { | |
405 | nfs4_xattr_entry_lru_del(entry); | |
406 | hlist_del_init(&entry->hnode); | |
407 | kref_put(&entry->ref, nfs4_xattr_free_entry_cb); | |
408 | } | |
409 | spin_unlock(&bucket->lock); | |
410 | } | |
411 | ||
412 | atomic_long_set(&cache->nent, 0); | |
413 | ||
414 | kref_put(&cache->ref, nfs4_xattr_free_cache_cb); | |
415 | } | |
416 | ||
417 | static void | |
418 | nfs4_xattr_discard_cache_worker(struct work_struct *work) | |
419 | { | |
420 | struct nfs4_xattr_cache *cache = container_of(work, | |
421 | struct nfs4_xattr_cache, work); | |
422 | ||
423 | nfs4_xattr_discard_cache(cache); | |
424 | } | |
425 | ||
426 | static void | |
427 | nfs4_xattr_reap_cache(struct nfs4_xattr_cache *cache) | |
428 | { | |
429 | queue_work(nfs4_xattr_cache_wq, &cache->work); | |
430 | } | |
431 | ||
432 | /* | |
433 | * Get a referenced copy of the cache structure. Avoid doing allocs | |
434 | * while holding i_lock. Which means that we do some optimistic allocation, | |
435 | * and might have to free the result in rare cases. | |
436 | * | |
437 | * This function only checks the NFS_INO_INVALID_XATTR cache validity bit | |
438 | * and acts accordingly, replacing the cache when needed. For the read case | |
439 | * (!add), this means that the caller must make sure that the cache | |
440 | * is valid before caling this function. getxattr and listxattr call | |
441 | * revalidate_inode to do this. The attribute cache timeout (for the | |
442 | * non-delegated case) is expected to be dealt with in the revalidate | |
443 | * call. | |
444 | */ | |
445 | ||
446 | static struct nfs4_xattr_cache * | |
447 | nfs4_xattr_get_cache(struct inode *inode, int add) | |
448 | { | |
449 | struct nfs_inode *nfsi; | |
450 | struct nfs4_xattr_cache *cache, *oldcache, *newcache; | |
451 | ||
452 | nfsi = NFS_I(inode); | |
453 | ||
454 | cache = oldcache = NULL; | |
455 | ||
456 | spin_lock(&inode->i_lock); | |
457 | ||
458 | if (nfsi->cache_validity & NFS_INO_INVALID_XATTR) | |
459 | oldcache = nfs4_xattr_cache_unlink(inode); | |
460 | else | |
461 | cache = nfsi->xattr_cache; | |
462 | ||
463 | if (cache != NULL) | |
464 | kref_get(&cache->ref); | |
465 | ||
466 | spin_unlock(&inode->i_lock); | |
467 | ||
468 | if (add && cache == NULL) { | |
469 | newcache = NULL; | |
470 | ||
471 | cache = nfs4_xattr_alloc_cache(); | |
472 | if (cache == NULL) | |
473 | goto out; | |
474 | ||
475 | spin_lock(&inode->i_lock); | |
476 | if (nfsi->cache_validity & NFS_INO_INVALID_XATTR) { | |
477 | /* | |
478 | * The cache was invalidated again. Give up, | |
479 | * since what we want to enter is now likely | |
480 | * outdated anyway. | |
481 | */ | |
482 | spin_unlock(&inode->i_lock); | |
483 | kref_put(&cache->ref, nfs4_xattr_free_cache_cb); | |
484 | cache = NULL; | |
485 | goto out; | |
486 | } | |
487 | ||
488 | /* | |
489 | * Check if someone beat us to it. | |
490 | */ | |
491 | if (nfsi->xattr_cache != NULL) { | |
492 | newcache = nfsi->xattr_cache; | |
493 | kref_get(&newcache->ref); | |
494 | } else { | |
495 | kref_get(&cache->ref); | |
496 | nfsi->xattr_cache = cache; | |
497 | cache->inode = inode; | |
498 | list_lru_add(&nfs4_xattr_cache_lru, &cache->lru); | |
499 | } | |
500 | ||
501 | spin_unlock(&inode->i_lock); | |
502 | ||
503 | /* | |
504 | * If there was a race, throw away the cache we just | |
505 | * allocated, and use the new one allocated by someone | |
506 | * else. | |
507 | */ | |
508 | if (newcache != NULL) { | |
509 | kref_put(&cache->ref, nfs4_xattr_free_cache_cb); | |
510 | cache = newcache; | |
511 | } | |
512 | } | |
513 | ||
514 | out: | |
515 | /* | |
516 | * Discarding an old cache is done via a workqueue. | |
517 | */ | |
518 | if (oldcache != NULL) | |
519 | nfs4_xattr_reap_cache(oldcache); | |
520 | ||
521 | return cache; | |
522 | } | |
523 | ||
524 | static inline struct nfs4_xattr_bucket * | |
525 | nfs4_xattr_hash_bucket(struct nfs4_xattr_cache *cache, const char *name) | |
526 | { | |
527 | return &cache->buckets[jhash(name, strlen(name), 0) & | |
528 | (ARRAY_SIZE(cache->buckets) - 1)]; | |
529 | } | |
530 | ||
531 | static struct nfs4_xattr_entry * | |
532 | nfs4_xattr_get_entry(struct nfs4_xattr_bucket *bucket, const char *name) | |
533 | { | |
534 | struct nfs4_xattr_entry *entry; | |
535 | ||
536 | entry = NULL; | |
537 | ||
538 | hlist_for_each_entry(entry, &bucket->hlist, hnode) { | |
539 | if (!strcmp(entry->xattr_name, name)) | |
540 | break; | |
541 | } | |
542 | ||
543 | return entry; | |
544 | } | |
545 | ||
546 | static int | |
547 | nfs4_xattr_hash_add(struct nfs4_xattr_cache *cache, | |
548 | struct nfs4_xattr_entry *entry) | |
549 | { | |
550 | struct nfs4_xattr_bucket *bucket; | |
551 | struct nfs4_xattr_entry *oldentry = NULL; | |
552 | int ret = 1; | |
553 | ||
554 | bucket = nfs4_xattr_hash_bucket(cache, entry->xattr_name); | |
555 | entry->bucket = bucket; | |
556 | ||
557 | spin_lock(&bucket->lock); | |
558 | ||
559 | if (bucket->draining) { | |
560 | ret = 0; | |
561 | goto out; | |
562 | } | |
563 | ||
564 | oldentry = nfs4_xattr_get_entry(bucket, entry->xattr_name); | |
565 | if (oldentry != NULL) { | |
566 | hlist_del_init(&oldentry->hnode); | |
567 | nfs4_xattr_entry_lru_del(oldentry); | |
568 | } else { | |
569 | atomic_long_inc(&cache->nent); | |
570 | } | |
571 | ||
572 | hlist_add_head(&entry->hnode, &bucket->hlist); | |
573 | nfs4_xattr_entry_lru_add(entry); | |
574 | ||
575 | out: | |
576 | spin_unlock(&bucket->lock); | |
577 | ||
578 | if (oldentry != NULL) | |
579 | kref_put(&oldentry->ref, nfs4_xattr_free_entry_cb); | |
580 | ||
581 | return ret; | |
582 | } | |
583 | ||
584 | static void | |
585 | nfs4_xattr_hash_remove(struct nfs4_xattr_cache *cache, const char *name) | |
586 | { | |
587 | struct nfs4_xattr_bucket *bucket; | |
588 | struct nfs4_xattr_entry *entry; | |
589 | ||
590 | bucket = nfs4_xattr_hash_bucket(cache, name); | |
591 | ||
592 | spin_lock(&bucket->lock); | |
593 | ||
594 | entry = nfs4_xattr_get_entry(bucket, name); | |
595 | if (entry != NULL) { | |
596 | hlist_del_init(&entry->hnode); | |
597 | nfs4_xattr_entry_lru_del(entry); | |
598 | atomic_long_dec(&cache->nent); | |
599 | } | |
600 | ||
601 | spin_unlock(&bucket->lock); | |
602 | ||
603 | if (entry != NULL) | |
604 | kref_put(&entry->ref, nfs4_xattr_free_entry_cb); | |
605 | } | |
606 | ||
607 | static struct nfs4_xattr_entry * | |
608 | nfs4_xattr_hash_find(struct nfs4_xattr_cache *cache, const char *name) | |
609 | { | |
610 | struct nfs4_xattr_bucket *bucket; | |
611 | struct nfs4_xattr_entry *entry; | |
612 | ||
613 | bucket = nfs4_xattr_hash_bucket(cache, name); | |
614 | ||
615 | spin_lock(&bucket->lock); | |
616 | ||
617 | entry = nfs4_xattr_get_entry(bucket, name); | |
618 | if (entry != NULL) | |
619 | kref_get(&entry->ref); | |
620 | ||
621 | spin_unlock(&bucket->lock); | |
622 | ||
623 | return entry; | |
624 | } | |
625 | ||
626 | /* | |
627 | * Entry point to retrieve an entry from the cache. | |
628 | */ | |
629 | ssize_t nfs4_xattr_cache_get(struct inode *inode, const char *name, char *buf, | |
630 | ssize_t buflen) | |
631 | { | |
632 | struct nfs4_xattr_cache *cache; | |
633 | struct nfs4_xattr_entry *entry; | |
634 | ssize_t ret; | |
635 | ||
636 | cache = nfs4_xattr_get_cache(inode, 0); | |
637 | if (cache == NULL) | |
638 | return -ENOENT; | |
639 | ||
640 | ret = 0; | |
641 | entry = nfs4_xattr_hash_find(cache, name); | |
642 | ||
643 | if (entry != NULL) { | |
644 | dprintk("%s: cache hit '%s', len %lu\n", __func__, | |
645 | entry->xattr_name, (unsigned long)entry->xattr_size); | |
646 | if (buflen == 0) { | |
647 | /* Length probe only */ | |
648 | ret = entry->xattr_size; | |
649 | } else if (buflen < entry->xattr_size) | |
650 | ret = -ERANGE; | |
651 | else { | |
652 | memcpy(buf, entry->xattr_value, entry->xattr_size); | |
653 | ret = entry->xattr_size; | |
654 | } | |
655 | kref_put(&entry->ref, nfs4_xattr_free_entry_cb); | |
656 | } else { | |
657 | dprintk("%s: cache miss '%s'\n", __func__, name); | |
658 | ret = -ENOENT; | |
659 | } | |
660 | ||
661 | kref_put(&cache->ref, nfs4_xattr_free_cache_cb); | |
662 | ||
663 | return ret; | |
664 | } | |
665 | ||
666 | /* | |
667 | * Retrieve a cached list of xattrs from the cache. | |
668 | */ | |
669 | ssize_t nfs4_xattr_cache_list(struct inode *inode, char *buf, ssize_t buflen) | |
670 | { | |
671 | struct nfs4_xattr_cache *cache; | |
672 | struct nfs4_xattr_entry *entry; | |
673 | ssize_t ret; | |
674 | ||
675 | cache = nfs4_xattr_get_cache(inode, 0); | |
676 | if (cache == NULL) | |
677 | return -ENOENT; | |
678 | ||
679 | spin_lock(&cache->listxattr_lock); | |
680 | ||
681 | entry = cache->listxattr; | |
682 | ||
683 | if (entry != NULL && entry != ERR_PTR(-ESTALE)) { | |
684 | if (buflen == 0) { | |
685 | /* Length probe only */ | |
686 | ret = entry->xattr_size; | |
687 | } else if (entry->xattr_size > buflen) | |
688 | ret = -ERANGE; | |
689 | else { | |
690 | memcpy(buf, entry->xattr_value, entry->xattr_size); | |
691 | ret = entry->xattr_size; | |
692 | } | |
693 | } else { | |
694 | ret = -ENOENT; | |
695 | } | |
696 | ||
697 | spin_unlock(&cache->listxattr_lock); | |
698 | ||
699 | kref_put(&cache->ref, nfs4_xattr_free_cache_cb); | |
700 | ||
701 | return ret; | |
702 | } | |
703 | ||
704 | /* | |
705 | * Add an xattr to the cache. | |
706 | * | |
707 | * This also invalidates the xattr list cache. | |
708 | */ | |
709 | void nfs4_xattr_cache_add(struct inode *inode, const char *name, | |
710 | const char *buf, struct page **pages, ssize_t buflen) | |
711 | { | |
712 | struct nfs4_xattr_cache *cache; | |
713 | struct nfs4_xattr_entry *entry; | |
714 | ||
715 | dprintk("%s: add '%s' len %lu\n", __func__, | |
716 | name, (unsigned long)buflen); | |
717 | ||
718 | cache = nfs4_xattr_get_cache(inode, 1); | |
719 | if (cache == NULL) | |
720 | return; | |
721 | ||
722 | entry = nfs4_xattr_alloc_entry(name, buf, pages, buflen); | |
723 | if (entry == NULL) | |
724 | goto out; | |
725 | ||
726 | (void)nfs4_xattr_set_listcache(cache, NULL); | |
727 | ||
728 | if (!nfs4_xattr_hash_add(cache, entry)) | |
729 | kref_put(&entry->ref, nfs4_xattr_free_entry_cb); | |
730 | ||
731 | out: | |
732 | kref_put(&cache->ref, nfs4_xattr_free_cache_cb); | |
733 | } | |
734 | ||
735 | ||
736 | /* | |
737 | * Remove an xattr from the cache. | |
738 | * | |
739 | * This also invalidates the xattr list cache. | |
740 | */ | |
741 | void nfs4_xattr_cache_remove(struct inode *inode, const char *name) | |
742 | { | |
743 | struct nfs4_xattr_cache *cache; | |
744 | ||
745 | dprintk("%s: remove '%s'\n", __func__, name); | |
746 | ||
747 | cache = nfs4_xattr_get_cache(inode, 0); | |
748 | if (cache == NULL) | |
749 | return; | |
750 | ||
751 | (void)nfs4_xattr_set_listcache(cache, NULL); | |
752 | nfs4_xattr_hash_remove(cache, name); | |
753 | ||
754 | kref_put(&cache->ref, nfs4_xattr_free_cache_cb); | |
755 | } | |
756 | ||
757 | /* | |
758 | * Cache listxattr output, replacing any possible old one. | |
759 | */ | |
760 | void nfs4_xattr_cache_set_list(struct inode *inode, const char *buf, | |
761 | ssize_t buflen) | |
762 | { | |
763 | struct nfs4_xattr_cache *cache; | |
764 | struct nfs4_xattr_entry *entry; | |
765 | ||
766 | cache = nfs4_xattr_get_cache(inode, 1); | |
767 | if (cache == NULL) | |
768 | return; | |
769 | ||
770 | entry = nfs4_xattr_alloc_entry(NULL, buf, NULL, buflen); | |
771 | if (entry == NULL) | |
772 | goto out; | |
773 | ||
774 | /* | |
775 | * This is just there to be able to get to bucket->cache, | |
776 | * which is obviously the same for all buckets, so just | |
777 | * use bucket 0. | |
778 | */ | |
779 | entry->bucket = &cache->buckets[0]; | |
780 | ||
781 | if (!nfs4_xattr_set_listcache(cache, entry)) | |
782 | kref_put(&entry->ref, nfs4_xattr_free_entry_cb); | |
783 | ||
784 | out: | |
785 | kref_put(&cache->ref, nfs4_xattr_free_cache_cb); | |
786 | } | |
787 | ||
788 | /* | |
789 | * Zap the entire cache. Called when an inode is evicted. | |
790 | */ | |
791 | void nfs4_xattr_cache_zap(struct inode *inode) | |
792 | { | |
793 | struct nfs4_xattr_cache *oldcache; | |
794 | ||
795 | spin_lock(&inode->i_lock); | |
796 | oldcache = nfs4_xattr_cache_unlink(inode); | |
797 | spin_unlock(&inode->i_lock); | |
798 | ||
799 | if (oldcache) | |
800 | nfs4_xattr_discard_cache(oldcache); | |
801 | } | |
802 | ||
803 | /* | |
804 | * The entry LRU is shrunk more aggressively than the cache LRU, | |
805 | * by settings @seeks to 1. | |
806 | * | |
807 | * Cache structures are freed only when they've become empty, after | |
808 | * pruning all but one entry. | |
809 | */ | |
810 | ||
811 | static unsigned long nfs4_xattr_cache_count(struct shrinker *shrink, | |
812 | struct shrink_control *sc); | |
813 | static unsigned long nfs4_xattr_entry_count(struct shrinker *shrink, | |
814 | struct shrink_control *sc); | |
815 | static unsigned long nfs4_xattr_cache_scan(struct shrinker *shrink, | |
816 | struct shrink_control *sc); | |
817 | static unsigned long nfs4_xattr_entry_scan(struct shrinker *shrink, | |
818 | struct shrink_control *sc); | |
819 | ||
820 | static struct shrinker nfs4_xattr_cache_shrinker = { | |
821 | .count_objects = nfs4_xattr_cache_count, | |
822 | .scan_objects = nfs4_xattr_cache_scan, | |
823 | .seeks = DEFAULT_SEEKS, | |
824 | .flags = SHRINKER_MEMCG_AWARE, | |
825 | }; | |
826 | ||
827 | static struct shrinker nfs4_xattr_entry_shrinker = { | |
828 | .count_objects = nfs4_xattr_entry_count, | |
829 | .scan_objects = nfs4_xattr_entry_scan, | |
830 | .seeks = DEFAULT_SEEKS, | |
831 | .batch = 512, | |
832 | .flags = SHRINKER_MEMCG_AWARE, | |
833 | }; | |
834 | ||
835 | static struct shrinker nfs4_xattr_large_entry_shrinker = { | |
836 | .count_objects = nfs4_xattr_entry_count, | |
837 | .scan_objects = nfs4_xattr_entry_scan, | |
838 | .seeks = 1, | |
839 | .batch = 512, | |
840 | .flags = SHRINKER_MEMCG_AWARE, | |
841 | }; | |
842 | ||
843 | static enum lru_status | |
844 | cache_lru_isolate(struct list_head *item, | |
845 | struct list_lru_one *lru, spinlock_t *lru_lock, void *arg) | |
846 | { | |
847 | struct list_head *dispose = arg; | |
848 | struct inode *inode; | |
849 | struct nfs4_xattr_cache *cache = container_of(item, | |
850 | struct nfs4_xattr_cache, lru); | |
851 | ||
852 | if (atomic_long_read(&cache->nent) > 1) | |
853 | return LRU_SKIP; | |
854 | ||
855 | /* | |
856 | * If a cache structure is on the LRU list, we know that | |
857 | * its inode is valid. Try to lock it to break the link. | |
858 | * Since we're inverting the lock order here, only try. | |
859 | */ | |
860 | inode = cache->inode; | |
861 | ||
862 | if (!spin_trylock(&inode->i_lock)) | |
863 | return LRU_SKIP; | |
864 | ||
865 | kref_get(&cache->ref); | |
866 | ||
867 | cache->inode = NULL; | |
868 | NFS_I(inode)->xattr_cache = NULL; | |
869 | NFS_I(inode)->cache_validity &= ~NFS_INO_INVALID_XATTR; | |
870 | list_lru_isolate(lru, &cache->lru); | |
871 | ||
872 | spin_unlock(&inode->i_lock); | |
873 | ||
874 | list_add_tail(&cache->dispose, dispose); | |
875 | return LRU_REMOVED; | |
876 | } | |
877 | ||
878 | static unsigned long | |
879 | nfs4_xattr_cache_scan(struct shrinker *shrink, struct shrink_control *sc) | |
880 | { | |
881 | LIST_HEAD(dispose); | |
882 | unsigned long freed; | |
883 | struct nfs4_xattr_cache *cache; | |
884 | ||
885 | freed = list_lru_shrink_walk(&nfs4_xattr_cache_lru, sc, | |
886 | cache_lru_isolate, &dispose); | |
887 | while (!list_empty(&dispose)) { | |
888 | cache = list_first_entry(&dispose, struct nfs4_xattr_cache, | |
889 | dispose); | |
890 | list_del_init(&cache->dispose); | |
891 | nfs4_xattr_discard_cache(cache); | |
892 | kref_put(&cache->ref, nfs4_xattr_free_cache_cb); | |
893 | } | |
894 | ||
895 | return freed; | |
896 | } | |
897 | ||
898 | ||
899 | static unsigned long | |
900 | nfs4_xattr_cache_count(struct shrinker *shrink, struct shrink_control *sc) | |
901 | { | |
902 | unsigned long count; | |
903 | ||
904 | count = list_lru_count(&nfs4_xattr_cache_lru); | |
905 | return vfs_pressure_ratio(count); | |
906 | } | |
907 | ||
908 | static enum lru_status | |
909 | entry_lru_isolate(struct list_head *item, | |
910 | struct list_lru_one *lru, spinlock_t *lru_lock, void *arg) | |
911 | { | |
912 | struct list_head *dispose = arg; | |
913 | struct nfs4_xattr_bucket *bucket; | |
914 | struct nfs4_xattr_cache *cache; | |
915 | struct nfs4_xattr_entry *entry = container_of(item, | |
916 | struct nfs4_xattr_entry, lru); | |
917 | ||
918 | bucket = entry->bucket; | |
919 | cache = bucket->cache; | |
920 | ||
921 | /* | |
922 | * Unhook the entry from its parent (either a cache bucket | |
923 | * or a cache structure if it's a listxattr buf), so that | |
924 | * it's no longer found. Then add it to the isolate list, | |
925 | * to be freed later. | |
926 | * | |
927 | * In both cases, we're reverting lock order, so use | |
928 | * trylock and skip the entry if we can't get the lock. | |
929 | */ | |
930 | if (entry->xattr_name != NULL) { | |
931 | /* Regular cache entry */ | |
932 | if (!spin_trylock(&bucket->lock)) | |
933 | return LRU_SKIP; | |
934 | ||
935 | kref_get(&entry->ref); | |
936 | ||
937 | hlist_del_init(&entry->hnode); | |
938 | atomic_long_dec(&cache->nent); | |
939 | list_lru_isolate(lru, &entry->lru); | |
940 | ||
941 | spin_unlock(&bucket->lock); | |
942 | } else { | |
943 | /* Listxattr cache entry */ | |
944 | if (!spin_trylock(&cache->listxattr_lock)) | |
945 | return LRU_SKIP; | |
946 | ||
947 | kref_get(&entry->ref); | |
948 | ||
949 | cache->listxattr = NULL; | |
950 | list_lru_isolate(lru, &entry->lru); | |
951 | ||
952 | spin_unlock(&cache->listxattr_lock); | |
953 | } | |
954 | ||
955 | list_add_tail(&entry->dispose, dispose); | |
956 | return LRU_REMOVED; | |
957 | } | |
958 | ||
959 | static unsigned long | |
960 | nfs4_xattr_entry_scan(struct shrinker *shrink, struct shrink_control *sc) | |
961 | { | |
962 | LIST_HEAD(dispose); | |
963 | unsigned long freed; | |
964 | struct nfs4_xattr_entry *entry; | |
965 | struct list_lru *lru; | |
966 | ||
967 | lru = (shrink == &nfs4_xattr_large_entry_shrinker) ? | |
968 | &nfs4_xattr_large_entry_lru : &nfs4_xattr_entry_lru; | |
969 | ||
970 | freed = list_lru_shrink_walk(lru, sc, entry_lru_isolate, &dispose); | |
971 | ||
972 | while (!list_empty(&dispose)) { | |
973 | entry = list_first_entry(&dispose, struct nfs4_xattr_entry, | |
974 | dispose); | |
975 | list_del_init(&entry->dispose); | |
976 | ||
977 | /* | |
978 | * Drop two references: the one that we just grabbed | |
979 | * in entry_lru_isolate, and the one that was set | |
980 | * when the entry was first allocated. | |
981 | */ | |
982 | kref_put(&entry->ref, nfs4_xattr_free_entry_cb); | |
983 | kref_put(&entry->ref, nfs4_xattr_free_entry_cb); | |
984 | } | |
985 | ||
986 | return freed; | |
987 | } | |
988 | ||
989 | static unsigned long | |
990 | nfs4_xattr_entry_count(struct shrinker *shrink, struct shrink_control *sc) | |
991 | { | |
992 | unsigned long count; | |
993 | struct list_lru *lru; | |
994 | ||
995 | lru = (shrink == &nfs4_xattr_large_entry_shrinker) ? | |
996 | &nfs4_xattr_large_entry_lru : &nfs4_xattr_entry_lru; | |
997 | ||
998 | count = list_lru_count(lru); | |
999 | return vfs_pressure_ratio(count); | |
1000 | } | |
1001 | ||
1002 | ||
1003 | static void nfs4_xattr_cache_init_once(void *p) | |
1004 | { | |
1005 | struct nfs4_xattr_cache *cache = (struct nfs4_xattr_cache *)p; | |
1006 | ||
1007 | spin_lock_init(&cache->listxattr_lock); | |
1008 | atomic_long_set(&cache->nent, 0); | |
1009 | nfs4_xattr_hash_init(cache); | |
1010 | cache->listxattr = NULL; | |
1011 | INIT_WORK(&cache->work, nfs4_xattr_discard_cache_worker); | |
1012 | INIT_LIST_HEAD(&cache->lru); | |
1013 | INIT_LIST_HEAD(&cache->dispose); | |
1014 | } | |
1015 | ||
1016 | int __init nfs4_xattr_cache_init(void) | |
1017 | { | |
1018 | int ret = 0; | |
1019 | ||
1020 | nfs4_xattr_cache_cachep = kmem_cache_create("nfs4_xattr_cache_cache", | |
1021 | sizeof(struct nfs4_xattr_cache), 0, | |
1022 | (SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_ACCOUNT), | |
1023 | nfs4_xattr_cache_init_once); | |
1024 | if (nfs4_xattr_cache_cachep == NULL) | |
1025 | return -ENOMEM; | |
1026 | ||
1027 | ret = list_lru_init_memcg(&nfs4_xattr_large_entry_lru, | |
1028 | &nfs4_xattr_large_entry_shrinker); | |
1029 | if (ret) | |
1030 | goto out4; | |
1031 | ||
1032 | ret = list_lru_init_memcg(&nfs4_xattr_entry_lru, | |
1033 | &nfs4_xattr_entry_shrinker); | |
1034 | if (ret) | |
1035 | goto out3; | |
1036 | ||
1037 | ret = list_lru_init_memcg(&nfs4_xattr_cache_lru, | |
1038 | &nfs4_xattr_cache_shrinker); | |
1039 | if (ret) | |
1040 | goto out2; | |
1041 | ||
1042 | nfs4_xattr_cache_wq = alloc_workqueue("nfs4_xattr", WQ_MEM_RECLAIM, 0); | |
1043 | if (nfs4_xattr_cache_wq == NULL) | |
1044 | goto out1; | |
1045 | ||
1046 | ret = register_shrinker(&nfs4_xattr_cache_shrinker); | |
1047 | if (ret) | |
1048 | goto out0; | |
1049 | ||
1050 | ret = register_shrinker(&nfs4_xattr_entry_shrinker); | |
1051 | if (ret) | |
1052 | goto out; | |
1053 | ||
1054 | ret = register_shrinker(&nfs4_xattr_large_entry_shrinker); | |
1055 | if (!ret) | |
1056 | return 0; | |
1057 | ||
1058 | unregister_shrinker(&nfs4_xattr_entry_shrinker); | |
1059 | out: | |
1060 | unregister_shrinker(&nfs4_xattr_cache_shrinker); | |
1061 | out0: | |
1062 | destroy_workqueue(nfs4_xattr_cache_wq); | |
1063 | out1: | |
1064 | list_lru_destroy(&nfs4_xattr_cache_lru); | |
1065 | out2: | |
1066 | list_lru_destroy(&nfs4_xattr_entry_lru); | |
1067 | out3: | |
1068 | list_lru_destroy(&nfs4_xattr_large_entry_lru); | |
1069 | out4: | |
1070 | kmem_cache_destroy(nfs4_xattr_cache_cachep); | |
1071 | ||
1072 | return ret; | |
1073 | } | |
1074 | ||
1075 | void nfs4_xattr_cache_exit(void) | |
1076 | { | |
1077 | unregister_shrinker(&nfs4_xattr_entry_shrinker); | |
1078 | unregister_shrinker(&nfs4_xattr_cache_shrinker); | |
1079 | list_lru_destroy(&nfs4_xattr_entry_lru); | |
1080 | list_lru_destroy(&nfs4_xattr_cache_lru); | |
1081 | kmem_cache_destroy(nfs4_xattr_cache_cachep); | |
1082 | destroy_workqueue(nfs4_xattr_cache_wq); | |
1083 | } |