4 * (c) 2015 - Jeff Layton <jeff.layton@primarydata.com>
7 #include <linux/hash.h>
8 #include <linux/slab.h>
9 #include <linux/file.h>
10 #include <linux/sched.h>
11 #include <linux/list_lru.h>
12 #include <linux/fsnotify_backend.h>
13 #include <linux/fsnotify.h>
14 #include <linux/seq_file.h>
20 #include "filecache.h"
23 #define NFSDDBG_FACILITY NFSDDBG_FH
25 /* FIXME: dynamically size this for the machine somehow? */
26 #define NFSD_FILE_HASH_BITS 12
27 #define NFSD_FILE_HASH_SIZE (1 << NFSD_FILE_HASH_BITS)
28 #define NFSD_LAUNDRETTE_DELAY (2 * HZ)
30 #define NFSD_FILE_LRU_RESCAN (0)
31 #define NFSD_FILE_SHUTDOWN (1)
32 #define NFSD_FILE_LRU_THRESHOLD (4096UL)
33 #define NFSD_FILE_LRU_LIMIT (NFSD_FILE_LRU_THRESHOLD << 2)
35 /* We only care about NFSD_MAY_READ/WRITE for this cache */
36 #define NFSD_FILE_MAY_MASK (NFSD_MAY_READ|NFSD_MAY_WRITE)
38 struct nfsd_fcache_bucket
{
39 struct hlist_head nfb_head
;
41 unsigned int nfb_count
;
42 unsigned int nfb_maxcount
;
45 static DEFINE_PER_CPU(unsigned long, nfsd_file_cache_hits
);
47 static struct kmem_cache
*nfsd_file_slab
;
48 static struct kmem_cache
*nfsd_file_mark_slab
;
49 static struct nfsd_fcache_bucket
*nfsd_file_hashtbl
;
50 static struct list_lru nfsd_file_lru
;
51 static long nfsd_file_lru_flags
;
52 static struct fsnotify_group
*nfsd_file_fsnotify_group
;
53 static atomic_long_t nfsd_filecache_count
;
54 static struct delayed_work nfsd_filecache_laundrette
;
56 enum nfsd_file_laundrette_ctl
{
57 NFSD_FILE_LAUNDRETTE_NOFLUSH
= 0,
58 NFSD_FILE_LAUNDRETTE_MAY_FLUSH
62 nfsd_file_schedule_laundrette(enum nfsd_file_laundrette_ctl ctl
)
64 long count
= atomic_long_read(&nfsd_filecache_count
);
66 if (count
== 0 || test_bit(NFSD_FILE_SHUTDOWN
, &nfsd_file_lru_flags
))
69 /* Be more aggressive about scanning if over the threshold */
70 if (count
> NFSD_FILE_LRU_THRESHOLD
)
71 mod_delayed_work(system_wq
, &nfsd_filecache_laundrette
, 0);
73 schedule_delayed_work(&nfsd_filecache_laundrette
, NFSD_LAUNDRETTE_DELAY
);
75 if (ctl
== NFSD_FILE_LAUNDRETTE_NOFLUSH
)
78 /* ...and don't delay flushing if we're out of control */
79 if (count
>= NFSD_FILE_LRU_LIMIT
)
80 flush_delayed_work(&nfsd_filecache_laundrette
);
84 nfsd_file_slab_free(struct rcu_head
*rcu
)
86 struct nfsd_file
*nf
= container_of(rcu
, struct nfsd_file
, nf_rcu
);
88 put_cred(nf
->nf_cred
);
89 kmem_cache_free(nfsd_file_slab
, nf
);
93 nfsd_file_mark_free(struct fsnotify_mark
*mark
)
95 struct nfsd_file_mark
*nfm
= container_of(mark
, struct nfsd_file_mark
,
98 kmem_cache_free(nfsd_file_mark_slab
, nfm
);
101 static struct nfsd_file_mark
*
102 nfsd_file_mark_get(struct nfsd_file_mark
*nfm
)
104 if (!atomic_inc_not_zero(&nfm
->nfm_ref
))
110 nfsd_file_mark_put(struct nfsd_file_mark
*nfm
)
112 if (atomic_dec_and_test(&nfm
->nfm_ref
)) {
114 fsnotify_destroy_mark(&nfm
->nfm_mark
, nfsd_file_fsnotify_group
);
115 fsnotify_put_mark(&nfm
->nfm_mark
);
119 static struct nfsd_file_mark
*
120 nfsd_file_mark_find_or_create(struct nfsd_file
*nf
)
123 struct fsnotify_mark
*mark
;
124 struct nfsd_file_mark
*nfm
= NULL
, *new;
125 struct inode
*inode
= nf
->nf_inode
;
128 mutex_lock(&nfsd_file_fsnotify_group
->mark_mutex
);
129 mark
= fsnotify_find_mark(&inode
->i_fsnotify_marks
,
130 nfsd_file_fsnotify_group
);
132 nfm
= nfsd_file_mark_get(container_of(mark
,
133 struct nfsd_file_mark
,
135 mutex_unlock(&nfsd_file_fsnotify_group
->mark_mutex
);
137 fsnotify_put_mark(mark
);
140 /* Avoid soft lockup race with nfsd_file_mark_put() */
141 fsnotify_destroy_mark(mark
, nfsd_file_fsnotify_group
);
142 fsnotify_put_mark(mark
);
144 mutex_unlock(&nfsd_file_fsnotify_group
->mark_mutex
);
146 /* allocate a new nfm */
147 new = kmem_cache_alloc(nfsd_file_mark_slab
, GFP_KERNEL
);
150 fsnotify_init_mark(&new->nfm_mark
, nfsd_file_fsnotify_group
);
151 new->nfm_mark
.mask
= FS_ATTRIB
|FS_DELETE_SELF
;
152 atomic_set(&new->nfm_ref
, 1);
154 err
= fsnotify_add_inode_mark(&new->nfm_mark
, inode
, 0);
157 * If the add was successful, then return the object.
158 * Otherwise, we need to put the reference we hold on the
159 * nfm_mark. The fsnotify code will take a reference and put
160 * it on failure, so we can't just free it directly. It's also
161 * not safe to call fsnotify_destroy_mark on it as the
162 * mark->group will be NULL. Thus, we can't let the nfm_ref
163 * counter drive the destruction at this point.
168 fsnotify_put_mark(&new->nfm_mark
);
169 } while (unlikely(err
== -EEXIST
));
174 static struct nfsd_file
*
175 nfsd_file_alloc(struct inode
*inode
, unsigned int may
, unsigned int hashval
,
178 struct nfsd_file
*nf
;
180 nf
= kmem_cache_alloc(nfsd_file_slab
, GFP_KERNEL
);
182 INIT_HLIST_NODE(&nf
->nf_node
);
183 INIT_LIST_HEAD(&nf
->nf_lru
);
185 nf
->nf_cred
= get_current_cred();
188 nf
->nf_inode
= inode
;
189 nf
->nf_hashval
= hashval
;
190 atomic_set(&nf
->nf_ref
, 1);
191 nf
->nf_may
= may
& NFSD_FILE_MAY_MASK
;
192 if (may
& NFSD_MAY_NOT_BREAK_LEASE
) {
193 if (may
& NFSD_MAY_WRITE
)
194 __set_bit(NFSD_FILE_BREAK_WRITE
, &nf
->nf_flags
);
195 if (may
& NFSD_MAY_READ
)
196 __set_bit(NFSD_FILE_BREAK_READ
, &nf
->nf_flags
);
199 trace_nfsd_file_alloc(nf
);
205 nfsd_file_free(struct nfsd_file
*nf
)
209 trace_nfsd_file_put_final(nf
);
211 nfsd_file_mark_put(nf
->nf_mark
);
213 get_file(nf
->nf_file
);
214 filp_close(nf
->nf_file
, NULL
);
218 call_rcu(&nf
->nf_rcu
, nfsd_file_slab_free
);
223 nfsd_file_check_writeback(struct nfsd_file
*nf
)
225 struct file
*file
= nf
->nf_file
;
226 struct address_space
*mapping
;
228 if (!file
|| !(file
->f_mode
& FMODE_WRITE
))
230 mapping
= file
->f_mapping
;
231 return mapping_tagged(mapping
, PAGECACHE_TAG_DIRTY
) ||
232 mapping_tagged(mapping
, PAGECACHE_TAG_WRITEBACK
);
236 nfsd_file_check_write_error(struct nfsd_file
*nf
)
238 struct file
*file
= nf
->nf_file
;
240 if (!file
|| !(file
->f_mode
& FMODE_WRITE
))
242 return filemap_check_wb_err(file
->f_mapping
, READ_ONCE(file
->f_wb_err
));
246 nfsd_file_in_use(struct nfsd_file
*nf
)
248 return nfsd_file_check_writeback(nf
) ||
249 nfsd_file_check_write_error(nf
);
253 nfsd_file_do_unhash(struct nfsd_file
*nf
)
255 lockdep_assert_held(&nfsd_file_hashtbl
[nf
->nf_hashval
].nfb_lock
);
257 trace_nfsd_file_unhash(nf
);
259 if (nfsd_file_check_write_error(nf
))
260 nfsd_reset_boot_verifier(net_generic(nf
->nf_net
, nfsd_net_id
));
261 --nfsd_file_hashtbl
[nf
->nf_hashval
].nfb_count
;
262 hlist_del_rcu(&nf
->nf_node
);
263 atomic_long_dec(&nfsd_filecache_count
);
267 nfsd_file_unhash(struct nfsd_file
*nf
)
269 if (test_and_clear_bit(NFSD_FILE_HASHED
, &nf
->nf_flags
)) {
270 nfsd_file_do_unhash(nf
);
271 if (!list_empty(&nf
->nf_lru
))
272 list_lru_del(&nfsd_file_lru
, &nf
->nf_lru
);
279 * Return true if the file was unhashed.
282 nfsd_file_unhash_and_release_locked(struct nfsd_file
*nf
, struct list_head
*dispose
)
284 lockdep_assert_held(&nfsd_file_hashtbl
[nf
->nf_hashval
].nfb_lock
);
286 trace_nfsd_file_unhash_and_release_locked(nf
);
287 if (!nfsd_file_unhash(nf
))
289 /* keep final reference for nfsd_file_lru_dispose */
290 if (atomic_add_unless(&nf
->nf_ref
, -1, 1))
293 list_add(&nf
->nf_lru
, dispose
);
298 nfsd_file_put_noref(struct nfsd_file
*nf
)
301 trace_nfsd_file_put(nf
);
303 count
= atomic_dec_return(&nf
->nf_ref
);
305 WARN_ON(test_bit(NFSD_FILE_HASHED
, &nf
->nf_flags
));
312 nfsd_file_put(struct nfsd_file
*nf
)
314 bool is_hashed
= test_bit(NFSD_FILE_HASHED
, &nf
->nf_flags
) != 0;
315 bool unused
= !nfsd_file_in_use(nf
);
317 set_bit(NFSD_FILE_REFERENCED
, &nf
->nf_flags
);
318 if (nfsd_file_put_noref(nf
) == 1 && is_hashed
&& unused
)
319 nfsd_file_schedule_laundrette(NFSD_FILE_LAUNDRETTE_MAY_FLUSH
);
323 nfsd_file_get(struct nfsd_file
*nf
)
325 if (likely(atomic_inc_not_zero(&nf
->nf_ref
)))
331 nfsd_file_dispose_list(struct list_head
*dispose
)
333 struct nfsd_file
*nf
;
335 while(!list_empty(dispose
)) {
336 nf
= list_first_entry(dispose
, struct nfsd_file
, nf_lru
);
337 list_del(&nf
->nf_lru
);
338 nfsd_file_put_noref(nf
);
343 nfsd_file_dispose_list_sync(struct list_head
*dispose
)
346 struct nfsd_file
*nf
;
348 while(!list_empty(dispose
)) {
349 nf
= list_first_entry(dispose
, struct nfsd_file
, nf_lru
);
350 list_del(&nf
->nf_lru
);
351 if (!atomic_dec_and_test(&nf
->nf_ref
))
353 if (nfsd_file_free(nf
))
357 flush_delayed_fput();
361 * Note this can deadlock with nfsd_file_cache_purge.
363 static enum lru_status
364 nfsd_file_lru_cb(struct list_head
*item
, struct list_lru_one
*lru
,
365 spinlock_t
*lock
, void *arg
)
369 struct list_head
*head
= arg
;
370 struct nfsd_file
*nf
= list_entry(item
, struct nfsd_file
, nf_lru
);
373 * Do a lockless refcount check. The hashtable holds one reference, so
374 * we look to see if anything else has a reference, or if any have
375 * been put since the shrinker last ran. Those don't get unhashed and
378 * Note that in the put path, we set the flag and then decrement the
379 * counter. Here we check the counter and then test and clear the flag.
380 * That order is deliberate to ensure that we can do this locklessly.
382 if (atomic_read(&nf
->nf_ref
) > 1)
386 * Don't throw out files that are still undergoing I/O or
387 * that have uncleared errors pending.
389 if (nfsd_file_check_writeback(nf
))
392 if (test_and_clear_bit(NFSD_FILE_REFERENCED
, &nf
->nf_flags
))
395 if (!test_and_clear_bit(NFSD_FILE_HASHED
, &nf
->nf_flags
))
398 list_lru_isolate_move(lru
, &nf
->nf_lru
, head
);
401 set_bit(NFSD_FILE_LRU_RESCAN
, &nfsd_file_lru_flags
);
407 nfsd_file_lru_dispose(struct list_head
*head
)
409 struct nfsd_file
*nf
;
411 list_for_each_entry(nf
, head
, nf_lru
) {
412 spin_lock(&nfsd_file_hashtbl
[nf
->nf_hashval
].nfb_lock
);
413 nfsd_file_do_unhash(nf
);
414 spin_unlock(&nfsd_file_hashtbl
[nf
->nf_hashval
].nfb_lock
);
416 nfsd_file_dispose_list(head
);
420 nfsd_file_lru_count(struct shrinker
*s
, struct shrink_control
*sc
)
422 return list_lru_count(&nfsd_file_lru
);
426 nfsd_file_lru_scan(struct shrinker
*s
, struct shrink_control
*sc
)
431 ret
= list_lru_shrink_walk(&nfsd_file_lru
, sc
, nfsd_file_lru_cb
, &head
);
432 nfsd_file_lru_dispose(&head
);
436 static struct shrinker nfsd_file_shrinker
= {
437 .scan_objects
= nfsd_file_lru_scan
,
438 .count_objects
= nfsd_file_lru_count
,
443 __nfsd_file_close_inode(struct inode
*inode
, unsigned int hashval
,
444 struct list_head
*dispose
)
446 struct nfsd_file
*nf
;
447 struct hlist_node
*tmp
;
449 spin_lock(&nfsd_file_hashtbl
[hashval
].nfb_lock
);
450 hlist_for_each_entry_safe(nf
, tmp
, &nfsd_file_hashtbl
[hashval
].nfb_head
, nf_node
) {
451 if (inode
== nf
->nf_inode
)
452 nfsd_file_unhash_and_release_locked(nf
, dispose
);
454 spin_unlock(&nfsd_file_hashtbl
[hashval
].nfb_lock
);
458 * nfsd_file_close_inode_sync - attempt to forcibly close a nfsd_file
459 * @inode: inode of the file to attempt to remove
461 * Walk the whole hash bucket, looking for any files that correspond to "inode".
462 * If any do, then unhash them and put the hashtable reference to them and
463 * destroy any that had their last reference put. Also ensure that any of the
464 * fputs also have their final __fput done as well.
467 nfsd_file_close_inode_sync(struct inode
*inode
)
469 unsigned int hashval
= (unsigned int)hash_long(inode
->i_ino
,
470 NFSD_FILE_HASH_BITS
);
473 __nfsd_file_close_inode(inode
, hashval
, &dispose
);
474 trace_nfsd_file_close_inode_sync(inode
, hashval
, !list_empty(&dispose
));
475 nfsd_file_dispose_list_sync(&dispose
);
479 * nfsd_file_close_inode_sync - attempt to forcibly close a nfsd_file
480 * @inode: inode of the file to attempt to remove
482 * Walk the whole hash bucket, looking for any files that correspond to "inode".
483 * If any do, then unhash them and put the hashtable reference to them and
484 * destroy any that had their last reference put.
487 nfsd_file_close_inode(struct inode
*inode
)
489 unsigned int hashval
= (unsigned int)hash_long(inode
->i_ino
,
490 NFSD_FILE_HASH_BITS
);
493 __nfsd_file_close_inode(inode
, hashval
, &dispose
);
494 trace_nfsd_file_close_inode(inode
, hashval
, !list_empty(&dispose
));
495 nfsd_file_dispose_list(&dispose
);
499 * nfsd_file_delayed_close - close unused nfsd_files
502 * Walk the LRU list and close any entries that have not been used since
505 * Note this can deadlock with nfsd_file_cache_purge.
508 nfsd_file_delayed_close(struct work_struct
*work
)
512 list_lru_walk(&nfsd_file_lru
, nfsd_file_lru_cb
, &head
, LONG_MAX
);
514 if (test_and_clear_bit(NFSD_FILE_LRU_RESCAN
, &nfsd_file_lru_flags
))
515 nfsd_file_schedule_laundrette(NFSD_FILE_LAUNDRETTE_NOFLUSH
);
517 if (!list_empty(&head
)) {
518 nfsd_file_lru_dispose(&head
);
519 flush_delayed_fput();
524 nfsd_file_lease_notifier_call(struct notifier_block
*nb
, unsigned long arg
,
527 struct file_lock
*fl
= data
;
529 /* Only close files for F_SETLEASE leases */
530 if (fl
->fl_flags
& FL_LEASE
)
531 nfsd_file_close_inode_sync(file_inode(fl
->fl_file
));
535 static struct notifier_block nfsd_file_lease_notifier
= {
536 .notifier_call
= nfsd_file_lease_notifier_call
,
540 nfsd_file_fsnotify_handle_event(struct fsnotify_group
*group
,
542 u32 mask
, const void *data
, int data_type
,
543 const struct qstr
*file_name
, u32 cookie
,
544 struct fsnotify_iter_info
*iter_info
)
546 trace_nfsd_file_fsnotify_handle_event(inode
, mask
);
548 /* Should be no marks on non-regular files */
549 if (!S_ISREG(inode
->i_mode
)) {
554 /* don't close files if this was not the last link */
555 if (mask
& FS_ATTRIB
) {
560 nfsd_file_close_inode(inode
);
565 static const struct fsnotify_ops nfsd_file_fsnotify_ops
= {
566 .handle_event
= nfsd_file_fsnotify_handle_event
,
567 .free_mark
= nfsd_file_mark_free
,
571 nfsd_file_cache_init(void)
576 clear_bit(NFSD_FILE_SHUTDOWN
, &nfsd_file_lru_flags
);
578 if (nfsd_file_hashtbl
)
581 nfsd_file_hashtbl
= kcalloc(NFSD_FILE_HASH_SIZE
,
582 sizeof(*nfsd_file_hashtbl
), GFP_KERNEL
);
583 if (!nfsd_file_hashtbl
) {
584 pr_err("nfsd: unable to allocate nfsd_file_hashtbl\n");
588 nfsd_file_slab
= kmem_cache_create("nfsd_file",
589 sizeof(struct nfsd_file
), 0, 0, NULL
);
590 if (!nfsd_file_slab
) {
591 pr_err("nfsd: unable to create nfsd_file_slab\n");
595 nfsd_file_mark_slab
= kmem_cache_create("nfsd_file_mark",
596 sizeof(struct nfsd_file_mark
), 0, 0, NULL
);
597 if (!nfsd_file_mark_slab
) {
598 pr_err("nfsd: unable to create nfsd_file_mark_slab\n");
603 ret
= list_lru_init(&nfsd_file_lru
);
605 pr_err("nfsd: failed to init nfsd_file_lru: %d\n", ret
);
609 ret
= register_shrinker(&nfsd_file_shrinker
);
611 pr_err("nfsd: failed to register nfsd_file_shrinker: %d\n", ret
);
615 ret
= lease_register_notifier(&nfsd_file_lease_notifier
);
617 pr_err("nfsd: unable to register lease notifier: %d\n", ret
);
621 nfsd_file_fsnotify_group
= fsnotify_alloc_group(&nfsd_file_fsnotify_ops
);
622 if (IS_ERR(nfsd_file_fsnotify_group
)) {
623 pr_err("nfsd: unable to create fsnotify group: %ld\n",
624 PTR_ERR(nfsd_file_fsnotify_group
));
625 nfsd_file_fsnotify_group
= NULL
;
629 for (i
= 0; i
< NFSD_FILE_HASH_SIZE
; i
++) {
630 INIT_HLIST_HEAD(&nfsd_file_hashtbl
[i
].nfb_head
);
631 spin_lock_init(&nfsd_file_hashtbl
[i
].nfb_lock
);
634 INIT_DELAYED_WORK(&nfsd_filecache_laundrette
, nfsd_file_delayed_close
);
638 lease_unregister_notifier(&nfsd_file_lease_notifier
);
640 unregister_shrinker(&nfsd_file_shrinker
);
642 list_lru_destroy(&nfsd_file_lru
);
644 kmem_cache_destroy(nfsd_file_slab
);
645 nfsd_file_slab
= NULL
;
646 kmem_cache_destroy(nfsd_file_mark_slab
);
647 nfsd_file_mark_slab
= NULL
;
648 kfree(nfsd_file_hashtbl
);
649 nfsd_file_hashtbl
= NULL
;
654 * Note this can deadlock with nfsd_file_lru_cb.
657 nfsd_file_cache_purge(struct net
*net
)
660 struct nfsd_file
*nf
;
661 struct hlist_node
*next
;
665 if (!nfsd_file_hashtbl
)
668 for (i
= 0; i
< NFSD_FILE_HASH_SIZE
; i
++) {
669 struct nfsd_fcache_bucket
*nfb
= &nfsd_file_hashtbl
[i
];
671 spin_lock(&nfb
->nfb_lock
);
672 hlist_for_each_entry_safe(nf
, next
, &nfb
->nfb_head
, nf_node
) {
673 if (net
&& nf
->nf_net
!= net
)
675 del
= nfsd_file_unhash_and_release_locked(nf
, &dispose
);
678 * Deadlock detected! Something marked this entry as
679 * unhased, but hasn't removed it from the hash list.
683 spin_unlock(&nfb
->nfb_lock
);
684 nfsd_file_dispose_list(&dispose
);
689 nfsd_file_cache_shutdown(void)
693 set_bit(NFSD_FILE_SHUTDOWN
, &nfsd_file_lru_flags
);
695 lease_unregister_notifier(&nfsd_file_lease_notifier
);
696 unregister_shrinker(&nfsd_file_shrinker
);
698 * make sure all callers of nfsd_file_lru_cb are done before
699 * calling nfsd_file_cache_purge
701 cancel_delayed_work_sync(&nfsd_filecache_laundrette
);
702 nfsd_file_cache_purge(NULL
);
703 list_lru_destroy(&nfsd_file_lru
);
705 fsnotify_put_group(nfsd_file_fsnotify_group
);
706 nfsd_file_fsnotify_group
= NULL
;
707 kmem_cache_destroy(nfsd_file_slab
);
708 nfsd_file_slab
= NULL
;
709 fsnotify_wait_marks_destroyed();
710 kmem_cache_destroy(nfsd_file_mark_slab
);
711 nfsd_file_mark_slab
= NULL
;
712 kfree(nfsd_file_hashtbl
);
713 nfsd_file_hashtbl
= NULL
;
717 nfsd_match_cred(const struct cred
*c1
, const struct cred
*c2
)
721 if (!uid_eq(c1
->fsuid
, c2
->fsuid
))
723 if (!gid_eq(c1
->fsgid
, c2
->fsgid
))
725 if (c1
->group_info
== NULL
|| c2
->group_info
== NULL
)
726 return c1
->group_info
== c2
->group_info
;
727 if (c1
->group_info
->ngroups
!= c2
->group_info
->ngroups
)
729 for (i
= 0; i
< c1
->group_info
->ngroups
; i
++) {
730 if (!gid_eq(c1
->group_info
->gid
[i
], c2
->group_info
->gid
[i
]))
736 static struct nfsd_file
*
737 nfsd_file_find_locked(struct inode
*inode
, unsigned int may_flags
,
738 unsigned int hashval
, struct net
*net
)
740 struct nfsd_file
*nf
;
741 unsigned char need
= may_flags
& NFSD_FILE_MAY_MASK
;
743 hlist_for_each_entry_rcu(nf
, &nfsd_file_hashtbl
[hashval
].nfb_head
,
745 if ((need
& nf
->nf_may
) != need
)
747 if (nf
->nf_inode
!= inode
)
749 if (nf
->nf_net
!= net
)
751 if (!nfsd_match_cred(nf
->nf_cred
, current_cred()))
753 if (!test_bit(NFSD_FILE_HASHED
, &nf
->nf_flags
))
755 if (nfsd_file_get(nf
) != NULL
)
762 * nfsd_file_is_cached - are there any cached open files for this fh?
763 * @inode: inode of the file to check
765 * Scan the hashtable for open files that match this fh. Returns true if there
766 * are any, and false if not.
769 nfsd_file_is_cached(struct inode
*inode
)
772 struct nfsd_file
*nf
;
773 unsigned int hashval
;
775 hashval
= (unsigned int)hash_long(inode
->i_ino
, NFSD_FILE_HASH_BITS
);
778 hlist_for_each_entry_rcu(nf
, &nfsd_file_hashtbl
[hashval
].nfb_head
,
780 if (inode
== nf
->nf_inode
) {
786 trace_nfsd_file_is_cached(inode
, hashval
, (int)ret
);
791 nfsd_file_acquire(struct svc_rqst
*rqstp
, struct svc_fh
*fhp
,
792 unsigned int may_flags
, struct nfsd_file
**pnf
)
795 struct net
*net
= SVC_NET(rqstp
);
796 struct nfsd_file
*nf
, *new;
798 unsigned int hashval
;
801 /* FIXME: skip this if fh_dentry is already set? */
802 status
= fh_verify(rqstp
, fhp
, S_IFREG
,
803 may_flags
|NFSD_MAY_OWNER_OVERRIDE
);
804 if (status
!= nfs_ok
)
807 inode
= d_inode(fhp
->fh_dentry
);
808 hashval
= (unsigned int)hash_long(inode
->i_ino
, NFSD_FILE_HASH_BITS
);
811 nf
= nfsd_file_find_locked(inode
, may_flags
, hashval
, net
);
814 goto wait_for_construction
;
816 new = nfsd_file_alloc(inode
, may_flags
, hashval
, net
);
818 trace_nfsd_file_acquire(rqstp
, hashval
, inode
, may_flags
,
819 NULL
, nfserr_jukebox
);
820 return nfserr_jukebox
;
823 spin_lock(&nfsd_file_hashtbl
[hashval
].nfb_lock
);
824 nf
= nfsd_file_find_locked(inode
, may_flags
, hashval
, net
);
827 spin_unlock(&nfsd_file_hashtbl
[hashval
].nfb_lock
);
828 nfsd_file_slab_free(&new->nf_rcu
);
830 wait_for_construction
:
831 wait_on_bit(&nf
->nf_flags
, NFSD_FILE_PENDING
, TASK_UNINTERRUPTIBLE
);
833 /* Did construction of this file fail? */
834 if (!test_bit(NFSD_FILE_HASHED
, &nf
->nf_flags
)) {
836 status
= nfserr_jukebox
;
840 nfsd_file_put_noref(nf
);
844 this_cpu_inc(nfsd_file_cache_hits
);
846 if (!(may_flags
& NFSD_MAY_NOT_BREAK_LEASE
)) {
847 bool write
= (may_flags
& NFSD_MAY_WRITE
);
849 if (test_bit(NFSD_FILE_BREAK_READ
, &nf
->nf_flags
) ||
850 (test_bit(NFSD_FILE_BREAK_WRITE
, &nf
->nf_flags
) && write
)) {
851 status
= nfserrno(nfsd_open_break_lease(
852 file_inode(nf
->nf_file
), may_flags
));
853 if (status
== nfs_ok
) {
854 clear_bit(NFSD_FILE_BREAK_READ
, &nf
->nf_flags
);
856 clear_bit(NFSD_FILE_BREAK_WRITE
,
862 if (status
== nfs_ok
) {
869 trace_nfsd_file_acquire(rqstp
, hashval
, inode
, may_flags
, nf
, status
);
873 /* Take reference for the hashtable */
874 atomic_inc(&nf
->nf_ref
);
875 __set_bit(NFSD_FILE_HASHED
, &nf
->nf_flags
);
876 __set_bit(NFSD_FILE_PENDING
, &nf
->nf_flags
);
877 list_lru_add(&nfsd_file_lru
, &nf
->nf_lru
);
878 hlist_add_head_rcu(&nf
->nf_node
, &nfsd_file_hashtbl
[hashval
].nfb_head
);
879 ++nfsd_file_hashtbl
[hashval
].nfb_count
;
880 nfsd_file_hashtbl
[hashval
].nfb_maxcount
= max(nfsd_file_hashtbl
[hashval
].nfb_maxcount
,
881 nfsd_file_hashtbl
[hashval
].nfb_count
);
882 spin_unlock(&nfsd_file_hashtbl
[hashval
].nfb_lock
);
883 atomic_long_inc(&nfsd_filecache_count
);
885 nf
->nf_mark
= nfsd_file_mark_find_or_create(nf
);
887 status
= nfsd_open_verified(rqstp
, fhp
, S_IFREG
,
888 may_flags
, &nf
->nf_file
);
890 status
= nfserr_jukebox
;
892 * If construction failed, or we raced with a call to unlink()
895 if (status
!= nfs_ok
|| inode
->i_nlink
== 0) {
897 spin_lock(&nfsd_file_hashtbl
[hashval
].nfb_lock
);
898 do_free
= nfsd_file_unhash(nf
);
899 spin_unlock(&nfsd_file_hashtbl
[hashval
].nfb_lock
);
901 nfsd_file_put_noref(nf
);
903 clear_bit_unlock(NFSD_FILE_PENDING
, &nf
->nf_flags
);
904 smp_mb__after_atomic();
905 wake_up_bit(&nf
->nf_flags
, NFSD_FILE_PENDING
);
910 * Note that fields may be added, removed or reordered in the future. Programs
911 * scraping this file for info should test the labels to ensure they're
912 * getting the correct field.
914 static int nfsd_file_cache_stats_show(struct seq_file
*m
, void *v
)
916 unsigned int i
, count
= 0, longest
= 0;
917 unsigned long hits
= 0;
920 * No need for spinlocks here since we're not terribly interested in
921 * accuracy. We do take the nfsd_mutex simply to ensure that we
922 * don't end up racing with server shutdown
924 mutex_lock(&nfsd_mutex
);
925 if (nfsd_file_hashtbl
) {
926 for (i
= 0; i
< NFSD_FILE_HASH_SIZE
; i
++) {
927 count
+= nfsd_file_hashtbl
[i
].nfb_count
;
928 longest
= max(longest
, nfsd_file_hashtbl
[i
].nfb_count
);
931 mutex_unlock(&nfsd_mutex
);
933 for_each_possible_cpu(i
)
934 hits
+= per_cpu(nfsd_file_cache_hits
, i
);
936 seq_printf(m
, "total entries: %u\n", count
);
937 seq_printf(m
, "longest chain: %u\n", longest
);
938 seq_printf(m
, "cache hits: %lu\n", hits
);
942 int nfsd_file_cache_stats_open(struct inode
*inode
, struct file
*file
)
944 return single_open(file
, nfsd_file_cache_stats_show
, NULL
);