2 * pNFS functions to call and manage layout drivers.
4 * Copyright (c) 2002 [year of first publication]
5 * The Regents of the University of Michigan
8 * Dean Hildebrand <dhildebz@umich.edu>
10 * Permission is granted to use, copy, create derivative works, and
11 * redistribute this software and such derivative works for any purpose,
12 * so long as the name of the University of Michigan is not used in
13 * any advertising or publicity pertaining to the use or distribution
14 * of this software without specific, written prior authorization. If
15 * the above copyright notice or any other identification of the
16 * University of Michigan is included in any copy of any portion of
17 * this software, then the disclaimer below must also be included.
19 * This software is provided as is, without representation or warranty
20 * of any kind either express or implied, including without limitation
21 * the implied warranties of merchantability, fitness for a particular
22 * purpose, or noninfringement. The Regents of the University of
23 * Michigan shall not be liable for any damages, including special,
24 * indirect, incidental, or consequential damages, with respect to any
25 * claim arising out of or in connection with the use of the software,
26 * even if it has been or is hereafter advised of the possibility of
30 #include <linux/nfs_fs.h>
31 #include <linux/nfs_page.h>
32 #include <linux/module.h>
33 #include <linux/sort.h>
37 #include "nfs4trace.h"
38 #include "delegation.h"
41 #define NFSDBG_FACILITY NFSDBG_PNFS
42 #define PNFS_LAYOUTGET_RETRY_TIMEOUT (120*HZ)
47 * protects pnfs_modules_tbl.
49 static DEFINE_SPINLOCK(pnfs_spinlock
);
52 * pnfs_modules_tbl holds all pnfs modules
54 static LIST_HEAD(pnfs_modules_tbl
);
56 static void pnfs_layoutreturn_before_put_layout_hdr(struct pnfs_layout_hdr
*lo
);
57 static void pnfs_free_returned_lsegs(struct pnfs_layout_hdr
*lo
,
58 struct list_head
*free_me
,
59 const struct pnfs_layout_range
*range
,
61 static bool pnfs_lseg_dec_and_remove_zero(struct pnfs_layout_segment
*lseg
,
62 struct list_head
*tmp_list
);
64 /* Return the registered pnfs layout driver module matching given id */
65 static struct pnfs_layoutdriver_type
*
66 find_pnfs_driver_locked(u32 id
)
68 struct pnfs_layoutdriver_type
*local
;
70 list_for_each_entry(local
, &pnfs_modules_tbl
, pnfs_tblid
)
75 dprintk("%s: Searching for id %u, found %p\n", __func__
, id
, local
);
79 static struct pnfs_layoutdriver_type
*
80 find_pnfs_driver(u32 id
)
82 struct pnfs_layoutdriver_type
*local
;
84 spin_lock(&pnfs_spinlock
);
85 local
= find_pnfs_driver_locked(id
);
86 if (local
!= NULL
&& !try_module_get(local
->owner
)) {
87 dprintk("%s: Could not grab reference on module\n", __func__
);
90 spin_unlock(&pnfs_spinlock
);
95 unset_pnfs_layoutdriver(struct nfs_server
*nfss
)
97 if (nfss
->pnfs_curr_ld
) {
98 if (nfss
->pnfs_curr_ld
->clear_layoutdriver
)
99 nfss
->pnfs_curr_ld
->clear_layoutdriver(nfss
);
100 /* Decrement the MDS count. Purge the deviceid cache if zero */
101 if (atomic_dec_and_test(&nfss
->nfs_client
->cl_mds_count
))
102 nfs4_deviceid_purge_client(nfss
->nfs_client
);
103 module_put(nfss
->pnfs_curr_ld
->owner
);
105 nfss
->pnfs_curr_ld
= NULL
;
109 * When the server sends a list of layout types, we choose one in the order
110 * given in the list below.
112 * FIXME: should this list be configurable in some fashion? module param?
113 * mount option? something else?
115 static const u32 ld_prefs
[] = {
120 LAYOUT_NFSV4_1_FILES
,
125 ld_cmp(const void *e1
, const void *e2
)
127 u32 ld1
= *((u32
*)e1
);
128 u32 ld2
= *((u32
*)e2
);
131 for (i
= 0; ld_prefs
[i
] != 0; i
++) {
132 if (ld1
== ld_prefs
[i
])
135 if (ld2
== ld_prefs
[i
])
142 * Try to set the server's pnfs module to the pnfs layout type specified by id.
143 * Currently only one pNFS layout driver per filesystem is supported.
145 * @ids array of layout types supported by MDS.
148 set_pnfs_layoutdriver(struct nfs_server
*server
, const struct nfs_fh
*mntfh
,
149 struct nfs_fsinfo
*fsinfo
)
151 struct pnfs_layoutdriver_type
*ld_type
= NULL
;
155 if (fsinfo
->nlayouttypes
== 0)
157 if (!(server
->nfs_client
->cl_exchange_flags
&
158 (EXCHGID4_FLAG_USE_NON_PNFS
| EXCHGID4_FLAG_USE_PNFS_MDS
))) {
159 printk(KERN_ERR
"NFS: %s: cl_exchange_flags 0x%x\n",
160 __func__
, server
->nfs_client
->cl_exchange_flags
);
164 sort(fsinfo
->layouttype
, fsinfo
->nlayouttypes
,
165 sizeof(*fsinfo
->layouttype
), ld_cmp
, NULL
);
167 for (i
= 0; i
< fsinfo
->nlayouttypes
; i
++) {
168 id
= fsinfo
->layouttype
[i
];
169 ld_type
= find_pnfs_driver(id
);
171 request_module("%s-%u", LAYOUT_NFSV4_1_MODULE_PREFIX
,
173 ld_type
= find_pnfs_driver(id
);
180 dprintk("%s: No pNFS module found!\n", __func__
);
184 server
->pnfs_curr_ld
= ld_type
;
185 if (ld_type
->set_layoutdriver
186 && ld_type
->set_layoutdriver(server
, mntfh
)) {
187 printk(KERN_ERR
"NFS: %s: Error initializing pNFS layout "
188 "driver %u.\n", __func__
, id
);
189 module_put(ld_type
->owner
);
192 /* Bump the MDS count */
193 atomic_inc(&server
->nfs_client
->cl_mds_count
);
195 dprintk("%s: pNFS module for %u set\n", __func__
, id
);
199 dprintk("%s: Using NFSv4 I/O\n", __func__
);
200 server
->pnfs_curr_ld
= NULL
;
204 pnfs_register_layoutdriver(struct pnfs_layoutdriver_type
*ld_type
)
206 int status
= -EINVAL
;
207 struct pnfs_layoutdriver_type
*tmp
;
209 if (ld_type
->id
== 0) {
210 printk(KERN_ERR
"NFS: %s id 0 is reserved\n", __func__
);
213 if (!ld_type
->alloc_lseg
|| !ld_type
->free_lseg
) {
214 printk(KERN_ERR
"NFS: %s Layout driver must provide "
215 "alloc_lseg and free_lseg.\n", __func__
);
219 spin_lock(&pnfs_spinlock
);
220 tmp
= find_pnfs_driver_locked(ld_type
->id
);
222 list_add(&ld_type
->pnfs_tblid
, &pnfs_modules_tbl
);
224 dprintk("%s Registering id:%u name:%s\n", __func__
, ld_type
->id
,
227 printk(KERN_ERR
"NFS: %s Module with id %d already loaded!\n",
228 __func__
, ld_type
->id
);
230 spin_unlock(&pnfs_spinlock
);
234 EXPORT_SYMBOL_GPL(pnfs_register_layoutdriver
);
237 pnfs_unregister_layoutdriver(struct pnfs_layoutdriver_type
*ld_type
)
239 dprintk("%s Deregistering id:%u\n", __func__
, ld_type
->id
);
240 spin_lock(&pnfs_spinlock
);
241 list_del(&ld_type
->pnfs_tblid
);
242 spin_unlock(&pnfs_spinlock
);
244 EXPORT_SYMBOL_GPL(pnfs_unregister_layoutdriver
);
247 * pNFS client layout cache
250 /* Need to hold i_lock if caller does not already hold reference */
252 pnfs_get_layout_hdr(struct pnfs_layout_hdr
*lo
)
254 refcount_inc(&lo
->plh_refcount
);
257 static struct pnfs_layout_hdr
*
258 pnfs_alloc_layout_hdr(struct inode
*ino
, gfp_t gfp_flags
)
260 struct pnfs_layoutdriver_type
*ld
= NFS_SERVER(ino
)->pnfs_curr_ld
;
261 return ld
->alloc_layout_hdr(ino
, gfp_flags
);
265 pnfs_free_layout_hdr(struct pnfs_layout_hdr
*lo
)
267 struct nfs_server
*server
= NFS_SERVER(lo
->plh_inode
);
268 struct pnfs_layoutdriver_type
*ld
= server
->pnfs_curr_ld
;
270 if (!list_empty(&lo
->plh_layouts
)) {
271 struct nfs_client
*clp
= server
->nfs_client
;
273 spin_lock(&clp
->cl_lock
);
274 list_del_init(&lo
->plh_layouts
);
275 spin_unlock(&clp
->cl_lock
);
277 put_rpccred(lo
->plh_lc_cred
);
278 return ld
->free_layout_hdr(lo
);
282 pnfs_detach_layout_hdr(struct pnfs_layout_hdr
*lo
)
284 struct nfs_inode
*nfsi
= NFS_I(lo
->plh_inode
);
285 dprintk("%s: freeing layout cache %p\n", __func__
, lo
);
287 /* Reset MDS Threshold I/O counters */
293 pnfs_put_layout_hdr(struct pnfs_layout_hdr
*lo
)
299 inode
= lo
->plh_inode
;
300 pnfs_layoutreturn_before_put_layout_hdr(lo
);
302 if (refcount_dec_and_lock(&lo
->plh_refcount
, &inode
->i_lock
)) {
303 if (!list_empty(&lo
->plh_segs
))
304 WARN_ONCE(1, "NFS: BUG unfreed layout segments.\n");
305 pnfs_detach_layout_hdr(lo
);
306 spin_unlock(&inode
->i_lock
);
307 pnfs_free_layout_hdr(lo
);
312 pnfs_set_plh_return_info(struct pnfs_layout_hdr
*lo
, enum pnfs_iomode iomode
,
315 if (lo
->plh_return_iomode
!= 0 && lo
->plh_return_iomode
!= iomode
)
317 lo
->plh_return_iomode
= iomode
;
318 set_bit(NFS_LAYOUT_RETURN_REQUESTED
, &lo
->plh_flags
);
320 WARN_ON_ONCE(lo
->plh_return_seq
!= 0 && lo
->plh_return_seq
!= seq
);
321 lo
->plh_return_seq
= seq
;
326 pnfs_clear_layoutreturn_info(struct pnfs_layout_hdr
*lo
)
328 struct pnfs_layout_segment
*lseg
;
329 lo
->plh_return_iomode
= 0;
330 lo
->plh_return_seq
= 0;
331 clear_bit(NFS_LAYOUT_RETURN_REQUESTED
, &lo
->plh_flags
);
332 list_for_each_entry(lseg
, &lo
->plh_segs
, pls_list
) {
333 if (!test_bit(NFS_LSEG_LAYOUTRETURN
, &lseg
->pls_flags
))
335 pnfs_set_plh_return_info(lo
, lseg
->pls_range
.iomode
, 0);
339 static void pnfs_clear_layoutreturn_waitbit(struct pnfs_layout_hdr
*lo
)
341 clear_bit_unlock(NFS_LAYOUT_RETURN
, &lo
->plh_flags
);
342 clear_bit(NFS_LAYOUT_RETURN_LOCK
, &lo
->plh_flags
);
343 smp_mb__after_atomic();
344 wake_up_bit(&lo
->plh_flags
, NFS_LAYOUT_RETURN
);
345 rpc_wake_up(&NFS_SERVER(lo
->plh_inode
)->roc_rpcwaitq
);
349 pnfs_clear_lseg_state(struct pnfs_layout_segment
*lseg
,
350 struct list_head
*free_me
)
352 clear_bit(NFS_LSEG_ROC
, &lseg
->pls_flags
);
353 clear_bit(NFS_LSEG_LAYOUTRETURN
, &lseg
->pls_flags
);
354 if (test_and_clear_bit(NFS_LSEG_VALID
, &lseg
->pls_flags
))
355 pnfs_lseg_dec_and_remove_zero(lseg
, free_me
);
356 if (test_and_clear_bit(NFS_LSEG_LAYOUTCOMMIT
, &lseg
->pls_flags
))
357 pnfs_lseg_dec_and_remove_zero(lseg
, free_me
);
361 * Update the seqid of a layout stateid
363 bool nfs4_refresh_layout_stateid(nfs4_stateid
*dst
, struct inode
*inode
)
365 struct pnfs_layout_hdr
*lo
;
368 spin_lock(&inode
->i_lock
);
369 lo
= NFS_I(inode
)->layout
;
370 if (lo
&& nfs4_stateid_match_other(dst
, &lo
->plh_stateid
)) {
371 dst
->seqid
= lo
->plh_stateid
.seqid
;
374 spin_unlock(&inode
->i_lock
);
379 * Mark a pnfs_layout_hdr and all associated layout segments as invalid
381 * In order to continue using the pnfs_layout_hdr, a full recovery
383 * Note that caller must hold inode->i_lock.
386 pnfs_mark_layout_stateid_invalid(struct pnfs_layout_hdr
*lo
,
387 struct list_head
*lseg_list
)
389 struct pnfs_layout_range range
= {
390 .iomode
= IOMODE_ANY
,
392 .length
= NFS4_MAX_UINT64
,
394 struct pnfs_layout_segment
*lseg
, *next
;
396 set_bit(NFS_LAYOUT_INVALID_STID
, &lo
->plh_flags
);
397 list_for_each_entry_safe(lseg
, next
, &lo
->plh_segs
, pls_list
)
398 pnfs_clear_lseg_state(lseg
, lseg_list
);
399 pnfs_clear_layoutreturn_info(lo
);
400 pnfs_free_returned_lsegs(lo
, lseg_list
, &range
, 0);
401 if (test_bit(NFS_LAYOUT_RETURN
, &lo
->plh_flags
) &&
402 !test_and_set_bit(NFS_LAYOUT_RETURN_LOCK
, &lo
->plh_flags
))
403 pnfs_clear_layoutreturn_waitbit(lo
);
404 return !list_empty(&lo
->plh_segs
);
408 pnfs_iomode_to_fail_bit(u32 iomode
)
410 return iomode
== IOMODE_RW
?
411 NFS_LAYOUT_RW_FAILED
: NFS_LAYOUT_RO_FAILED
;
415 pnfs_layout_set_fail_bit(struct pnfs_layout_hdr
*lo
, int fail_bit
)
417 lo
->plh_retry_timestamp
= jiffies
;
418 if (!test_and_set_bit(fail_bit
, &lo
->plh_flags
))
419 refcount_inc(&lo
->plh_refcount
);
423 pnfs_layout_clear_fail_bit(struct pnfs_layout_hdr
*lo
, int fail_bit
)
425 if (test_and_clear_bit(fail_bit
, &lo
->plh_flags
))
426 refcount_dec(&lo
->plh_refcount
);
430 pnfs_layout_io_set_failed(struct pnfs_layout_hdr
*lo
, u32 iomode
)
432 struct inode
*inode
= lo
->plh_inode
;
433 struct pnfs_layout_range range
= {
436 .length
= NFS4_MAX_UINT64
,
440 spin_lock(&inode
->i_lock
);
441 pnfs_layout_set_fail_bit(lo
, pnfs_iomode_to_fail_bit(iomode
));
442 pnfs_mark_matching_lsegs_invalid(lo
, &head
, &range
, 0);
443 spin_unlock(&inode
->i_lock
);
444 pnfs_free_lseg_list(&head
);
445 dprintk("%s Setting layout IOMODE_%s fail bit\n", __func__
,
446 iomode
== IOMODE_RW
? "RW" : "READ");
450 pnfs_layout_io_test_failed(struct pnfs_layout_hdr
*lo
, u32 iomode
)
452 unsigned long start
, end
;
453 int fail_bit
= pnfs_iomode_to_fail_bit(iomode
);
455 if (test_bit(fail_bit
, &lo
->plh_flags
) == 0)
458 start
= end
- PNFS_LAYOUTGET_RETRY_TIMEOUT
;
459 if (!time_in_range(lo
->plh_retry_timestamp
, start
, end
)) {
460 /* It is time to retry the failed layoutgets */
461 pnfs_layout_clear_fail_bit(lo
, fail_bit
);
468 pnfs_init_lseg(struct pnfs_layout_hdr
*lo
, struct pnfs_layout_segment
*lseg
,
469 const struct pnfs_layout_range
*range
,
470 const nfs4_stateid
*stateid
)
472 INIT_LIST_HEAD(&lseg
->pls_list
);
473 INIT_LIST_HEAD(&lseg
->pls_lc_list
);
474 refcount_set(&lseg
->pls_refcount
, 1);
475 set_bit(NFS_LSEG_VALID
, &lseg
->pls_flags
);
476 lseg
->pls_layout
= lo
;
477 lseg
->pls_range
= *range
;
478 lseg
->pls_seq
= be32_to_cpu(stateid
->seqid
);
481 static void pnfs_free_lseg(struct pnfs_layout_segment
*lseg
)
484 struct inode
*inode
= lseg
->pls_layout
->plh_inode
;
485 NFS_SERVER(inode
)->pnfs_curr_ld
->free_lseg(lseg
);
490 pnfs_layout_remove_lseg(struct pnfs_layout_hdr
*lo
,
491 struct pnfs_layout_segment
*lseg
)
493 WARN_ON(test_bit(NFS_LSEG_VALID
, &lseg
->pls_flags
));
494 list_del_init(&lseg
->pls_list
);
495 /* Matched by pnfs_get_layout_hdr in pnfs_layout_insert_lseg */
496 refcount_dec(&lo
->plh_refcount
);
497 if (test_bit(NFS_LSEG_LAYOUTRETURN
, &lseg
->pls_flags
))
499 if (list_empty(&lo
->plh_segs
) &&
500 !test_bit(NFS_LAYOUT_RETURN_REQUESTED
, &lo
->plh_flags
) &&
501 !test_bit(NFS_LAYOUT_RETURN
, &lo
->plh_flags
)) {
502 if (atomic_read(&lo
->plh_outstanding
) == 0)
503 set_bit(NFS_LAYOUT_INVALID_STID
, &lo
->plh_flags
);
504 clear_bit(NFS_LAYOUT_BULK_RECALL
, &lo
->plh_flags
);
509 pnfs_cache_lseg_for_layoutreturn(struct pnfs_layout_hdr
*lo
,
510 struct pnfs_layout_segment
*lseg
)
512 if (test_and_clear_bit(NFS_LSEG_LAYOUTRETURN
, &lseg
->pls_flags
) &&
513 pnfs_layout_is_valid(lo
)) {
514 pnfs_set_plh_return_info(lo
, lseg
->pls_range
.iomode
, 0);
515 list_move_tail(&lseg
->pls_list
, &lo
->plh_return_segs
);
522 pnfs_put_lseg(struct pnfs_layout_segment
*lseg
)
524 struct pnfs_layout_hdr
*lo
;
530 dprintk("%s: lseg %p ref %d valid %d\n", __func__
, lseg
,
531 refcount_read(&lseg
->pls_refcount
),
532 test_bit(NFS_LSEG_VALID
, &lseg
->pls_flags
));
534 lo
= lseg
->pls_layout
;
535 inode
= lo
->plh_inode
;
537 if (refcount_dec_and_lock(&lseg
->pls_refcount
, &inode
->i_lock
)) {
538 if (test_bit(NFS_LSEG_VALID
, &lseg
->pls_flags
)) {
539 spin_unlock(&inode
->i_lock
);
542 pnfs_get_layout_hdr(lo
);
543 pnfs_layout_remove_lseg(lo
, lseg
);
544 if (pnfs_cache_lseg_for_layoutreturn(lo
, lseg
))
546 spin_unlock(&inode
->i_lock
);
547 pnfs_free_lseg(lseg
);
548 pnfs_put_layout_hdr(lo
);
551 EXPORT_SYMBOL_GPL(pnfs_put_lseg
);
554 * is l2 fully contained in l1?
556 * [----------------------------------)
561 pnfs_lseg_range_contained(const struct pnfs_layout_range
*l1
,
562 const struct pnfs_layout_range
*l2
)
564 u64 start1
= l1
->offset
;
565 u64 end1
= pnfs_end_offset(start1
, l1
->length
);
566 u64 start2
= l2
->offset
;
567 u64 end2
= pnfs_end_offset(start2
, l2
->length
);
569 return (start1
<= start2
) && (end1
>= end2
);
572 static bool pnfs_lseg_dec_and_remove_zero(struct pnfs_layout_segment
*lseg
,
573 struct list_head
*tmp_list
)
575 if (!refcount_dec_and_test(&lseg
->pls_refcount
))
577 pnfs_layout_remove_lseg(lseg
->pls_layout
, lseg
);
578 list_add(&lseg
->pls_list
, tmp_list
);
582 /* Returns 1 if lseg is removed from list, 0 otherwise */
583 static int mark_lseg_invalid(struct pnfs_layout_segment
*lseg
,
584 struct list_head
*tmp_list
)
588 if (test_and_clear_bit(NFS_LSEG_VALID
, &lseg
->pls_flags
)) {
589 /* Remove the reference keeping the lseg in the
590 * list. It will now be removed when all
591 * outstanding io is finished.
593 dprintk("%s: lseg %p ref %d\n", __func__
, lseg
,
594 refcount_read(&lseg
->pls_refcount
));
595 if (pnfs_lseg_dec_and_remove_zero(lseg
, tmp_list
))
602 * Compare 2 layout stateid sequence ids, to see which is newer,
603 * taking into account wraparound issues.
605 static bool pnfs_seqid_is_newer(u32 s1
, u32 s2
)
607 return (s32
)(s1
- s2
) > 0;
611 pnfs_should_free_range(const struct pnfs_layout_range
*lseg_range
,
612 const struct pnfs_layout_range
*recall_range
)
614 return (recall_range
->iomode
== IOMODE_ANY
||
615 lseg_range
->iomode
== recall_range
->iomode
) &&
616 pnfs_lseg_range_intersecting(lseg_range
, recall_range
);
620 pnfs_match_lseg_recall(const struct pnfs_layout_segment
*lseg
,
621 const struct pnfs_layout_range
*recall_range
,
624 if (seq
!= 0 && pnfs_seqid_is_newer(lseg
->pls_seq
, seq
))
626 if (recall_range
== NULL
)
628 return pnfs_should_free_range(&lseg
->pls_range
, recall_range
);
632 * pnfs_mark_matching_lsegs_invalid - tear down lsegs or mark them for later
633 * @lo: layout header containing the lsegs
634 * @tmp_list: list head where doomed lsegs should go
635 * @recall_range: optional recall range argument to match (may be NULL)
636 * @seq: only invalidate lsegs obtained prior to this sequence (may be 0)
638 * Walk the list of lsegs in the layout header, and tear down any that should
639 * be destroyed. If "recall_range" is specified then the segment must match
640 * that range. If "seq" is non-zero, then only match segments that were handed
641 * out at or before that sequence.
643 * Returns number of matching invalid lsegs remaining in list after scanning
644 * it and purging them.
647 pnfs_mark_matching_lsegs_invalid(struct pnfs_layout_hdr
*lo
,
648 struct list_head
*tmp_list
,
649 const struct pnfs_layout_range
*recall_range
,
652 struct pnfs_layout_segment
*lseg
, *next
;
655 dprintk("%s:Begin lo %p\n", __func__
, lo
);
657 if (list_empty(&lo
->plh_segs
))
659 list_for_each_entry_safe(lseg
, next
, &lo
->plh_segs
, pls_list
)
660 if (pnfs_match_lseg_recall(lseg
, recall_range
, seq
)) {
661 dprintk("%s: freeing lseg %p iomode %d seq %u"
662 "offset %llu length %llu\n", __func__
,
663 lseg
, lseg
->pls_range
.iomode
, lseg
->pls_seq
,
664 lseg
->pls_range
.offset
, lseg
->pls_range
.length
);
665 if (!mark_lseg_invalid(lseg
, tmp_list
))
668 dprintk("%s:Return %i\n", __func__
, remaining
);
673 pnfs_free_returned_lsegs(struct pnfs_layout_hdr
*lo
,
674 struct list_head
*free_me
,
675 const struct pnfs_layout_range
*range
,
678 struct pnfs_layout_segment
*lseg
, *next
;
680 list_for_each_entry_safe(lseg
, next
, &lo
->plh_return_segs
, pls_list
) {
681 if (pnfs_match_lseg_recall(lseg
, range
, seq
))
682 list_move_tail(&lseg
->pls_list
, free_me
);
686 /* note free_me must contain lsegs from a single layout_hdr */
688 pnfs_free_lseg_list(struct list_head
*free_me
)
690 struct pnfs_layout_segment
*lseg
, *tmp
;
692 if (list_empty(free_me
))
695 list_for_each_entry_safe(lseg
, tmp
, free_me
, pls_list
) {
696 list_del(&lseg
->pls_list
);
697 pnfs_free_lseg(lseg
);
702 pnfs_destroy_layout(struct nfs_inode
*nfsi
)
704 struct pnfs_layout_hdr
*lo
;
707 spin_lock(&nfsi
->vfs_inode
.i_lock
);
710 pnfs_get_layout_hdr(lo
);
711 pnfs_mark_layout_stateid_invalid(lo
, &tmp_list
);
712 pnfs_layout_clear_fail_bit(lo
, NFS_LAYOUT_RO_FAILED
);
713 pnfs_layout_clear_fail_bit(lo
, NFS_LAYOUT_RW_FAILED
);
714 spin_unlock(&nfsi
->vfs_inode
.i_lock
);
715 pnfs_free_lseg_list(&tmp_list
);
716 nfs_commit_inode(&nfsi
->vfs_inode
, 0);
717 pnfs_put_layout_hdr(lo
);
719 spin_unlock(&nfsi
->vfs_inode
.i_lock
);
721 EXPORT_SYMBOL_GPL(pnfs_destroy_layout
);
724 pnfs_layout_add_bulk_destroy_list(struct inode
*inode
,
725 struct list_head
*layout_list
)
727 struct pnfs_layout_hdr
*lo
;
730 spin_lock(&inode
->i_lock
);
731 lo
= NFS_I(inode
)->layout
;
732 if (lo
!= NULL
&& list_empty(&lo
->plh_bulk_destroy
)) {
733 pnfs_get_layout_hdr(lo
);
734 list_add(&lo
->plh_bulk_destroy
, layout_list
);
737 spin_unlock(&inode
->i_lock
);
741 /* Caller must hold rcu_read_lock and clp->cl_lock */
743 pnfs_layout_bulk_destroy_byserver_locked(struct nfs_client
*clp
,
744 struct nfs_server
*server
,
745 struct list_head
*layout_list
)
746 __must_hold(&clp
->cl_lock
)
749 struct pnfs_layout_hdr
*lo
, *next
;
752 list_for_each_entry_safe(lo
, next
, &server
->layouts
, plh_layouts
) {
753 if (test_bit(NFS_LAYOUT_INVALID_STID
, &lo
->plh_flags
) ||
754 test_bit(NFS_LAYOUT_INODE_FREEING
, &lo
->plh_flags
) ||
755 !list_empty(&lo
->plh_bulk_destroy
))
757 /* If the sb is being destroyed, just bail */
758 if (!nfs_sb_active(server
->super
))
760 inode
= igrab(lo
->plh_inode
);
762 list_del_init(&lo
->plh_layouts
);
763 if (pnfs_layout_add_bulk_destroy_list(inode
,
767 spin_unlock(&clp
->cl_lock
);
771 spin_unlock(&clp
->cl_lock
);
772 set_bit(NFS_LAYOUT_INODE_FREEING
, &lo
->plh_flags
);
774 nfs_sb_deactive(server
->super
);
775 spin_lock(&clp
->cl_lock
);
783 pnfs_layout_free_bulk_destroy_list(struct list_head
*layout_list
,
786 struct pnfs_layout_hdr
*lo
;
788 LIST_HEAD(lseg_list
);
791 while (!list_empty(layout_list
)) {
792 lo
= list_entry(layout_list
->next
, struct pnfs_layout_hdr
,
794 dprintk("%s freeing layout for inode %lu\n", __func__
,
795 lo
->plh_inode
->i_ino
);
796 inode
= lo
->plh_inode
;
798 pnfs_layoutcommit_inode(inode
, false);
800 spin_lock(&inode
->i_lock
);
801 list_del_init(&lo
->plh_bulk_destroy
);
802 if (pnfs_mark_layout_stateid_invalid(lo
, &lseg_list
)) {
804 set_bit(NFS_LAYOUT_BULK_RECALL
, &lo
->plh_flags
);
807 spin_unlock(&inode
->i_lock
);
808 pnfs_free_lseg_list(&lseg_list
);
809 /* Free all lsegs that are attached to commit buckets */
810 nfs_commit_inode(inode
, 0);
811 pnfs_put_layout_hdr(lo
);
812 nfs_iput_and_deactive(inode
);
818 pnfs_destroy_layouts_byfsid(struct nfs_client
*clp
,
819 struct nfs_fsid
*fsid
,
822 struct nfs_server
*server
;
823 LIST_HEAD(layout_list
);
825 spin_lock(&clp
->cl_lock
);
828 list_for_each_entry_rcu(server
, &clp
->cl_superblocks
, client_link
) {
829 if (memcmp(&server
->fsid
, fsid
, sizeof(*fsid
)) != 0)
831 if (pnfs_layout_bulk_destroy_byserver_locked(clp
,
837 spin_unlock(&clp
->cl_lock
);
839 if (list_empty(&layout_list
))
841 return pnfs_layout_free_bulk_destroy_list(&layout_list
, is_recall
);
845 pnfs_destroy_layouts_byclid(struct nfs_client
*clp
,
848 struct nfs_server
*server
;
849 LIST_HEAD(layout_list
);
851 spin_lock(&clp
->cl_lock
);
854 list_for_each_entry_rcu(server
, &clp
->cl_superblocks
, client_link
) {
855 if (pnfs_layout_bulk_destroy_byserver_locked(clp
,
861 spin_unlock(&clp
->cl_lock
);
863 if (list_empty(&layout_list
))
865 return pnfs_layout_free_bulk_destroy_list(&layout_list
, is_recall
);
869 * Called by the state manger to remove all layouts established under an
873 pnfs_destroy_all_layouts(struct nfs_client
*clp
)
875 nfs4_deviceid_mark_client_invalid(clp
);
876 nfs4_deviceid_purge_client(clp
);
878 pnfs_destroy_layouts_byclid(clp
, false);
881 /* update lo->plh_stateid with new if is more recent */
883 pnfs_set_layout_stateid(struct pnfs_layout_hdr
*lo
, const nfs4_stateid
*new,
886 u32 oldseq
, newseq
, new_barrier
= 0;
888 oldseq
= be32_to_cpu(lo
->plh_stateid
.seqid
);
889 newseq
= be32_to_cpu(new->seqid
);
891 if (!pnfs_layout_is_valid(lo
)) {
892 nfs4_stateid_copy(&lo
->plh_stateid
, new);
893 lo
->plh_barrier
= newseq
;
894 pnfs_clear_layoutreturn_info(lo
);
895 clear_bit(NFS_LAYOUT_INVALID_STID
, &lo
->plh_flags
);
898 if (pnfs_seqid_is_newer(newseq
, oldseq
)) {
899 nfs4_stateid_copy(&lo
->plh_stateid
, new);
901 * Because of wraparound, we want to keep the barrier
902 * "close" to the current seqids.
904 new_barrier
= newseq
- atomic_read(&lo
->plh_outstanding
);
907 new_barrier
= be32_to_cpu(new->seqid
);
908 else if (new_barrier
== 0)
910 if (pnfs_seqid_is_newer(new_barrier
, lo
->plh_barrier
))
911 lo
->plh_barrier
= new_barrier
;
915 pnfs_layout_stateid_blocked(const struct pnfs_layout_hdr
*lo
,
916 const nfs4_stateid
*stateid
)
918 u32 seqid
= be32_to_cpu(stateid
->seqid
);
920 return !pnfs_seqid_is_newer(seqid
, lo
->plh_barrier
);
923 /* lget is set to 1 if called from inside send_layoutget call chain */
925 pnfs_layoutgets_blocked(const struct pnfs_layout_hdr
*lo
)
927 return lo
->plh_block_lgets
||
928 test_bit(NFS_LAYOUT_BULK_RECALL
, &lo
->plh_flags
);
932 * Get layout from server.
933 * for now, assume that whole file layouts are requested.
935 * arg->length: all ones
937 static struct pnfs_layout_segment
*
938 send_layoutget(struct pnfs_layout_hdr
*lo
,
939 struct nfs_open_context
*ctx
,
940 nfs4_stateid
*stateid
,
941 const struct pnfs_layout_range
*range
,
942 long *timeout
, gfp_t gfp_flags
)
944 struct inode
*ino
= lo
->plh_inode
;
945 struct nfs_server
*server
= NFS_SERVER(ino
);
946 struct nfs4_layoutget
*lgp
;
949 dprintk("--> %s\n", __func__
);
952 * Synchronously retrieve layout information from server and
953 * store in lseg. If we race with a concurrent seqid morphing
954 * op, then re-send the LAYOUTGET.
956 lgp
= kzalloc(sizeof(*lgp
), gfp_flags
);
958 return ERR_PTR(-ENOMEM
);
960 i_size
= i_size_read(ino
);
962 lgp
->args
.minlength
= PAGE_SIZE
;
963 if (lgp
->args
.minlength
> range
->length
)
964 lgp
->args
.minlength
= range
->length
;
965 if (range
->iomode
== IOMODE_READ
) {
966 if (range
->offset
>= i_size
)
967 lgp
->args
.minlength
= 0;
968 else if (i_size
- range
->offset
< lgp
->args
.minlength
)
969 lgp
->args
.minlength
= i_size
- range
->offset
;
971 lgp
->args
.maxcount
= PNFS_LAYOUT_MAXSIZE
;
972 pnfs_copy_range(&lgp
->args
.range
, range
);
973 lgp
->args
.type
= server
->pnfs_curr_ld
->id
;
974 lgp
->args
.inode
= ino
;
975 lgp
->args
.ctx
= get_nfs_open_context(ctx
);
976 nfs4_stateid_copy(&lgp
->args
.stateid
, stateid
);
977 lgp
->gfp_flags
= gfp_flags
;
978 lgp
->cred
= lo
->plh_lc_cred
;
980 return nfs4_proc_layoutget(lgp
, timeout
, gfp_flags
);
983 static void pnfs_clear_layoutcommit(struct inode
*inode
,
984 struct list_head
*head
)
986 struct nfs_inode
*nfsi
= NFS_I(inode
);
987 struct pnfs_layout_segment
*lseg
, *tmp
;
989 if (!test_and_clear_bit(NFS_INO_LAYOUTCOMMIT
, &nfsi
->flags
))
991 list_for_each_entry_safe(lseg
, tmp
, &nfsi
->layout
->plh_segs
, pls_list
) {
992 if (!test_and_clear_bit(NFS_LSEG_LAYOUTCOMMIT
, &lseg
->pls_flags
))
994 pnfs_lseg_dec_and_remove_zero(lseg
, head
);
998 void pnfs_layoutreturn_free_lsegs(struct pnfs_layout_hdr
*lo
,
999 const nfs4_stateid
*arg_stateid
,
1000 const struct pnfs_layout_range
*range
,
1001 const nfs4_stateid
*stateid
)
1003 struct inode
*inode
= lo
->plh_inode
;
1006 spin_lock(&inode
->i_lock
);
1007 if (!pnfs_layout_is_valid(lo
) || !arg_stateid
||
1008 !nfs4_stateid_match_other(&lo
->plh_stateid
, arg_stateid
))
1011 u32 seq
= be32_to_cpu(arg_stateid
->seqid
);
1013 pnfs_mark_matching_lsegs_invalid(lo
, &freeme
, range
, seq
);
1014 pnfs_free_returned_lsegs(lo
, &freeme
, range
, seq
);
1015 pnfs_set_layout_stateid(lo
, stateid
, true);
1017 pnfs_mark_layout_stateid_invalid(lo
, &freeme
);
1019 pnfs_clear_layoutreturn_waitbit(lo
);
1020 spin_unlock(&inode
->i_lock
);
1021 pnfs_free_lseg_list(&freeme
);
1026 pnfs_prepare_layoutreturn(struct pnfs_layout_hdr
*lo
,
1027 nfs4_stateid
*stateid
,
1028 enum pnfs_iomode
*iomode
)
1030 /* Serialise LAYOUTGET/LAYOUTRETURN */
1031 if (atomic_read(&lo
->plh_outstanding
) != 0)
1033 if (test_and_set_bit(NFS_LAYOUT_RETURN_LOCK
, &lo
->plh_flags
))
1035 set_bit(NFS_LAYOUT_RETURN
, &lo
->plh_flags
);
1036 pnfs_get_layout_hdr(lo
);
1037 if (test_bit(NFS_LAYOUT_RETURN_REQUESTED
, &lo
->plh_flags
)) {
1038 if (stateid
!= NULL
) {
1039 nfs4_stateid_copy(stateid
, &lo
->plh_stateid
);
1040 if (lo
->plh_return_seq
!= 0)
1041 stateid
->seqid
= cpu_to_be32(lo
->plh_return_seq
);
1044 *iomode
= lo
->plh_return_iomode
;
1045 pnfs_clear_layoutreturn_info(lo
);
1048 if (stateid
!= NULL
)
1049 nfs4_stateid_copy(stateid
, &lo
->plh_stateid
);
1051 *iomode
= IOMODE_ANY
;
1056 pnfs_init_layoutreturn_args(struct nfs4_layoutreturn_args
*args
,
1057 struct pnfs_layout_hdr
*lo
,
1058 const nfs4_stateid
*stateid
,
1059 enum pnfs_iomode iomode
)
1061 struct inode
*inode
= lo
->plh_inode
;
1063 args
->layout_type
= NFS_SERVER(inode
)->pnfs_curr_ld
->id
;
1064 args
->inode
= inode
;
1065 args
->range
.iomode
= iomode
;
1066 args
->range
.offset
= 0;
1067 args
->range
.length
= NFS4_MAX_UINT64
;
1069 nfs4_stateid_copy(&args
->stateid
, stateid
);
1073 pnfs_send_layoutreturn(struct pnfs_layout_hdr
*lo
, const nfs4_stateid
*stateid
,
1074 enum pnfs_iomode iomode
, bool sync
)
1076 struct inode
*ino
= lo
->plh_inode
;
1077 struct pnfs_layoutdriver_type
*ld
= NFS_SERVER(ino
)->pnfs_curr_ld
;
1078 struct nfs4_layoutreturn
*lrp
;
1081 lrp
= kzalloc(sizeof(*lrp
), GFP_NOFS
);
1082 if (unlikely(lrp
== NULL
)) {
1084 spin_lock(&ino
->i_lock
);
1085 pnfs_clear_layoutreturn_waitbit(lo
);
1086 spin_unlock(&ino
->i_lock
);
1087 pnfs_put_layout_hdr(lo
);
1091 pnfs_init_layoutreturn_args(&lrp
->args
, lo
, stateid
, iomode
);
1092 lrp
->args
.ld_private
= &lrp
->ld_private
;
1093 lrp
->clp
= NFS_SERVER(ino
)->nfs_client
;
1094 lrp
->cred
= lo
->plh_lc_cred
;
1095 if (ld
->prepare_layoutreturn
)
1096 ld
->prepare_layoutreturn(&lrp
->args
);
1098 status
= nfs4_proc_layoutreturn(lrp
, sync
);
1100 dprintk("<-- %s status: %d\n", __func__
, status
);
1104 /* Return true if layoutreturn is needed */
1106 pnfs_layout_need_return(struct pnfs_layout_hdr
*lo
)
1108 struct pnfs_layout_segment
*s
;
1110 if (!test_bit(NFS_LAYOUT_RETURN_REQUESTED
, &lo
->plh_flags
))
1113 /* Defer layoutreturn until all lsegs are done */
1114 list_for_each_entry(s
, &lo
->plh_segs
, pls_list
) {
1115 if (test_bit(NFS_LSEG_LAYOUTRETURN
, &s
->pls_flags
))
1122 static void pnfs_layoutreturn_before_put_layout_hdr(struct pnfs_layout_hdr
*lo
)
1124 struct inode
*inode
= lo
->plh_inode
;
1126 if (!test_bit(NFS_LAYOUT_RETURN_REQUESTED
, &lo
->plh_flags
))
1128 spin_lock(&inode
->i_lock
);
1129 if (pnfs_layout_need_return(lo
)) {
1130 nfs4_stateid stateid
;
1131 enum pnfs_iomode iomode
;
1134 send
= pnfs_prepare_layoutreturn(lo
, &stateid
, &iomode
);
1135 spin_unlock(&inode
->i_lock
);
1137 /* Send an async layoutreturn so we dont deadlock */
1138 pnfs_send_layoutreturn(lo
, &stateid
, iomode
, false);
1141 spin_unlock(&inode
->i_lock
);
1145 * Initiates a LAYOUTRETURN(FILE), and removes the pnfs_layout_hdr
1146 * when the layout segment list is empty.
1148 * Note that a pnfs_layout_hdr can exist with an empty layout segment
1149 * list when LAYOUTGET has failed, or when LAYOUTGET succeeded, but the
1150 * deviceid is marked invalid.
1153 _pnfs_return_layout(struct inode
*ino
)
1155 struct pnfs_layout_hdr
*lo
= NULL
;
1156 struct nfs_inode
*nfsi
= NFS_I(ino
);
1157 LIST_HEAD(tmp_list
);
1158 nfs4_stateid stateid
;
1160 bool send
, valid_layout
;
1162 dprintk("NFS: %s for inode %lu\n", __func__
, ino
->i_ino
);
1164 spin_lock(&ino
->i_lock
);
1167 spin_unlock(&ino
->i_lock
);
1168 dprintk("NFS: %s no layout to return\n", __func__
);
1171 /* Reference matched in nfs4_layoutreturn_release */
1172 pnfs_get_layout_hdr(lo
);
1173 /* Is there an outstanding layoutreturn ? */
1174 if (test_bit(NFS_LAYOUT_RETURN_LOCK
, &lo
->plh_flags
)) {
1175 spin_unlock(&ino
->i_lock
);
1176 if (wait_on_bit(&lo
->plh_flags
, NFS_LAYOUT_RETURN
,
1177 TASK_UNINTERRUPTIBLE
))
1178 goto out_put_layout_hdr
;
1179 spin_lock(&ino
->i_lock
);
1181 valid_layout
= pnfs_layout_is_valid(lo
);
1182 pnfs_clear_layoutcommit(ino
, &tmp_list
);
1183 pnfs_mark_matching_lsegs_invalid(lo
, &tmp_list
, NULL
, 0);
1185 if (NFS_SERVER(ino
)->pnfs_curr_ld
->return_range
) {
1186 struct pnfs_layout_range range
= {
1187 .iomode
= IOMODE_ANY
,
1189 .length
= NFS4_MAX_UINT64
,
1191 NFS_SERVER(ino
)->pnfs_curr_ld
->return_range(lo
, &range
);
1194 /* Don't send a LAYOUTRETURN if list was initially empty */
1195 if (!test_bit(NFS_LAYOUT_RETURN_REQUESTED
, &lo
->plh_flags
) ||
1197 spin_unlock(&ino
->i_lock
);
1198 dprintk("NFS: %s no layout segments to return\n", __func__
);
1199 goto out_put_layout_hdr
;
1202 send
= pnfs_prepare_layoutreturn(lo
, &stateid
, NULL
);
1203 spin_unlock(&ino
->i_lock
);
1205 status
= pnfs_send_layoutreturn(lo
, &stateid
, IOMODE_ANY
, true);
1207 pnfs_free_lseg_list(&tmp_list
);
1208 pnfs_put_layout_hdr(lo
);
1210 dprintk("<-- %s status: %d\n", __func__
, status
);
1215 pnfs_commit_and_return_layout(struct inode
*inode
)
1217 struct pnfs_layout_hdr
*lo
;
1220 spin_lock(&inode
->i_lock
);
1221 lo
= NFS_I(inode
)->layout
;
1223 spin_unlock(&inode
->i_lock
);
1226 pnfs_get_layout_hdr(lo
);
1227 /* Block new layoutgets and read/write to ds */
1228 lo
->plh_block_lgets
++;
1229 spin_unlock(&inode
->i_lock
);
1230 filemap_fdatawait(inode
->i_mapping
);
1231 ret
= pnfs_layoutcommit_inode(inode
, true);
1233 ret
= _pnfs_return_layout(inode
);
1234 spin_lock(&inode
->i_lock
);
1235 lo
->plh_block_lgets
--;
1236 spin_unlock(&inode
->i_lock
);
1237 pnfs_put_layout_hdr(lo
);
1241 bool pnfs_roc(struct inode
*ino
,
1242 struct nfs4_layoutreturn_args
*args
,
1243 struct nfs4_layoutreturn_res
*res
,
1244 const struct rpc_cred
*cred
)
1246 struct nfs_inode
*nfsi
= NFS_I(ino
);
1247 struct nfs_open_context
*ctx
;
1248 struct nfs4_state
*state
;
1249 struct pnfs_layout_hdr
*lo
;
1250 struct pnfs_layout_segment
*lseg
, *next
;
1251 nfs4_stateid stateid
;
1252 enum pnfs_iomode iomode
= 0;
1253 bool layoutreturn
= false, roc
= false;
1254 bool skip_read
= false;
1256 if (!nfs_have_layout(ino
))
1259 spin_lock(&ino
->i_lock
);
1261 if (!lo
|| !pnfs_layout_is_valid(lo
) ||
1262 test_bit(NFS_LAYOUT_BULK_RECALL
, &lo
->plh_flags
)) {
1266 pnfs_get_layout_hdr(lo
);
1267 if (test_bit(NFS_LAYOUT_RETURN_LOCK
, &lo
->plh_flags
)) {
1268 spin_unlock(&ino
->i_lock
);
1269 wait_on_bit(&lo
->plh_flags
, NFS_LAYOUT_RETURN
,
1270 TASK_UNINTERRUPTIBLE
);
1271 pnfs_put_layout_hdr(lo
);
1275 /* no roc if we hold a delegation */
1276 if (nfs4_check_delegation(ino
, FMODE_READ
)) {
1277 if (nfs4_check_delegation(ino
, FMODE_WRITE
))
1282 list_for_each_entry(ctx
, &nfsi
->open_files
, list
) {
1286 /* Don't return layout if there is open file state */
1287 if (state
->state
& FMODE_WRITE
)
1289 if (state
->state
& FMODE_READ
)
1294 list_for_each_entry_safe(lseg
, next
, &lo
->plh_segs
, pls_list
) {
1295 if (skip_read
&& lseg
->pls_range
.iomode
== IOMODE_READ
)
1297 /* If we are sending layoutreturn, invalidate all valid lsegs */
1298 if (!test_and_clear_bit(NFS_LSEG_ROC
, &lseg
->pls_flags
))
1301 * Note: mark lseg for return so pnfs_layout_remove_lseg
1302 * doesn't invalidate the layout for us.
1304 set_bit(NFS_LSEG_LAYOUTRETURN
, &lseg
->pls_flags
);
1305 if (!mark_lseg_invalid(lseg
, &lo
->plh_return_segs
))
1307 pnfs_set_plh_return_info(lo
, lseg
->pls_range
.iomode
, 0);
1310 if (!test_bit(NFS_LAYOUT_RETURN_REQUESTED
, &lo
->plh_flags
))
1313 /* ROC in two conditions:
1314 * 1. there are ROC lsegs
1315 * 2. we don't send layoutreturn
1317 /* lo ref dropped in pnfs_roc_release() */
1318 layoutreturn
= pnfs_prepare_layoutreturn(lo
, &stateid
, &iomode
);
1319 /* If the creds don't match, we can't compound the layoutreturn */
1320 if (!layoutreturn
|| cred
!= lo
->plh_lc_cred
)
1324 pnfs_init_layoutreturn_args(args
, lo
, &stateid
, iomode
);
1325 res
->lrs_present
= 0;
1326 layoutreturn
= false;
1329 spin_unlock(&ino
->i_lock
);
1330 pnfs_layoutcommit_inode(ino
, true);
1332 struct pnfs_layoutdriver_type
*ld
= NFS_SERVER(ino
)->pnfs_curr_ld
;
1333 if (ld
->prepare_layoutreturn
)
1334 ld
->prepare_layoutreturn(args
);
1335 pnfs_put_layout_hdr(lo
);
1339 pnfs_send_layoutreturn(lo
, &stateid
, iomode
, true);
1340 pnfs_put_layout_hdr(lo
);
1344 void pnfs_roc_release(struct nfs4_layoutreturn_args
*args
,
1345 struct nfs4_layoutreturn_res
*res
,
1348 struct pnfs_layout_hdr
*lo
= args
->layout
;
1349 const nfs4_stateid
*arg_stateid
= NULL
;
1350 const nfs4_stateid
*res_stateid
= NULL
;
1351 struct nfs4_xdr_opaque_data
*ld_private
= args
->ld_private
;
1354 case -NFS4ERR_NOMATCHING_LAYOUT
:
1357 if (res
->lrs_present
)
1358 res_stateid
= &res
->stateid
;
1361 arg_stateid
= &args
->stateid
;
1363 pnfs_layoutreturn_free_lsegs(lo
, arg_stateid
, &args
->range
,
1365 if (ld_private
&& ld_private
->ops
&& ld_private
->ops
->free
)
1366 ld_private
->ops
->free(ld_private
);
1367 pnfs_put_layout_hdr(lo
);
1368 trace_nfs4_layoutreturn_on_close(args
->inode
, 0);
1371 bool pnfs_wait_on_layoutreturn(struct inode
*ino
, struct rpc_task
*task
)
1373 struct nfs_inode
*nfsi
= NFS_I(ino
);
1374 struct pnfs_layout_hdr
*lo
;
1377 /* we might not have grabbed lo reference. so need to check under
1379 spin_lock(&ino
->i_lock
);
1381 if (lo
&& test_bit(NFS_LAYOUT_RETURN
, &lo
->plh_flags
)) {
1382 rpc_sleep_on(&NFS_SERVER(ino
)->roc_rpcwaitq
, task
, NULL
);
1385 spin_unlock(&ino
->i_lock
);
1390 * Compare two layout segments for sorting into layout cache.
1391 * We want to preferentially return RW over RO layouts, so ensure those
1395 pnfs_lseg_range_cmp(const struct pnfs_layout_range
*l1
,
1396 const struct pnfs_layout_range
*l2
)
1400 /* high offset > low offset */
1401 d
= l1
->offset
- l2
->offset
;
1405 /* short length > long length */
1406 d
= l2
->length
- l1
->length
;
1410 /* read > read/write */
1411 return (int)(l1
->iomode
== IOMODE_READ
) - (int)(l2
->iomode
== IOMODE_READ
);
1415 pnfs_lseg_range_is_after(const struct pnfs_layout_range
*l1
,
1416 const struct pnfs_layout_range
*l2
)
1418 return pnfs_lseg_range_cmp(l1
, l2
) > 0;
1422 pnfs_lseg_no_merge(struct pnfs_layout_segment
*lseg
,
1423 struct pnfs_layout_segment
*old
)
1429 pnfs_generic_layout_insert_lseg(struct pnfs_layout_hdr
*lo
,
1430 struct pnfs_layout_segment
*lseg
,
1431 bool (*is_after
)(const struct pnfs_layout_range
*,
1432 const struct pnfs_layout_range
*),
1433 bool (*do_merge
)(struct pnfs_layout_segment
*,
1434 struct pnfs_layout_segment
*),
1435 struct list_head
*free_me
)
1437 struct pnfs_layout_segment
*lp
, *tmp
;
1439 dprintk("%s:Begin\n", __func__
);
1441 list_for_each_entry_safe(lp
, tmp
, &lo
->plh_segs
, pls_list
) {
1442 if (test_bit(NFS_LSEG_VALID
, &lp
->pls_flags
) == 0)
1444 if (do_merge(lseg
, lp
)) {
1445 mark_lseg_invalid(lp
, free_me
);
1448 if (is_after(&lseg
->pls_range
, &lp
->pls_range
))
1450 list_add_tail(&lseg
->pls_list
, &lp
->pls_list
);
1451 dprintk("%s: inserted lseg %p "
1452 "iomode %d offset %llu length %llu before "
1453 "lp %p iomode %d offset %llu length %llu\n",
1454 __func__
, lseg
, lseg
->pls_range
.iomode
,
1455 lseg
->pls_range
.offset
, lseg
->pls_range
.length
,
1456 lp
, lp
->pls_range
.iomode
, lp
->pls_range
.offset
,
1457 lp
->pls_range
.length
);
1460 list_add_tail(&lseg
->pls_list
, &lo
->plh_segs
);
1461 dprintk("%s: inserted lseg %p "
1462 "iomode %d offset %llu length %llu at tail\n",
1463 __func__
, lseg
, lseg
->pls_range
.iomode
,
1464 lseg
->pls_range
.offset
, lseg
->pls_range
.length
);
1466 pnfs_get_layout_hdr(lo
);
1468 dprintk("%s:Return\n", __func__
);
1470 EXPORT_SYMBOL_GPL(pnfs_generic_layout_insert_lseg
);
1473 pnfs_layout_insert_lseg(struct pnfs_layout_hdr
*lo
,
1474 struct pnfs_layout_segment
*lseg
,
1475 struct list_head
*free_me
)
1477 struct inode
*inode
= lo
->plh_inode
;
1478 struct pnfs_layoutdriver_type
*ld
= NFS_SERVER(inode
)->pnfs_curr_ld
;
1480 if (ld
->add_lseg
!= NULL
)
1481 ld
->add_lseg(lo
, lseg
, free_me
);
1483 pnfs_generic_layout_insert_lseg(lo
, lseg
,
1484 pnfs_lseg_range_is_after
,
1489 static struct pnfs_layout_hdr
*
1490 alloc_init_layout_hdr(struct inode
*ino
,
1491 struct nfs_open_context
*ctx
,
1494 struct pnfs_layout_hdr
*lo
;
1496 lo
= pnfs_alloc_layout_hdr(ino
, gfp_flags
);
1499 refcount_set(&lo
->plh_refcount
, 1);
1500 INIT_LIST_HEAD(&lo
->plh_layouts
);
1501 INIT_LIST_HEAD(&lo
->plh_segs
);
1502 INIT_LIST_HEAD(&lo
->plh_return_segs
);
1503 INIT_LIST_HEAD(&lo
->plh_bulk_destroy
);
1504 lo
->plh_inode
= ino
;
1505 lo
->plh_lc_cred
= get_rpccred(ctx
->cred
);
1506 lo
->plh_flags
|= 1 << NFS_LAYOUT_INVALID_STID
;
1510 static struct pnfs_layout_hdr
*
1511 pnfs_find_alloc_layout(struct inode
*ino
,
1512 struct nfs_open_context
*ctx
,
1514 __releases(&ino
->i_lock
)
1515 __acquires(&ino
->i_lock
)
1517 struct nfs_inode
*nfsi
= NFS_I(ino
);
1518 struct pnfs_layout_hdr
*new = NULL
;
1520 dprintk("%s Begin ino=%p layout=%p\n", __func__
, ino
, nfsi
->layout
);
1522 if (nfsi
->layout
!= NULL
)
1524 spin_unlock(&ino
->i_lock
);
1525 new = alloc_init_layout_hdr(ino
, ctx
, gfp_flags
);
1526 spin_lock(&ino
->i_lock
);
1528 if (likely(nfsi
->layout
== NULL
)) { /* Won the race? */
1531 } else if (new != NULL
)
1532 pnfs_free_layout_hdr(new);
1534 pnfs_get_layout_hdr(nfsi
->layout
);
1535 return nfsi
->layout
;
1539 * iomode matching rules:
1540 * iomode lseg strict match
1542 * ----- ----- ------ -----
1547 * READ READ N/A true
1548 * READ RW true false
1549 * READ RW false true
1552 pnfs_lseg_range_match(const struct pnfs_layout_range
*ls_range
,
1553 const struct pnfs_layout_range
*range
,
1556 struct pnfs_layout_range range1
;
1558 if ((range
->iomode
== IOMODE_RW
&&
1559 ls_range
->iomode
!= IOMODE_RW
) ||
1560 (range
->iomode
!= ls_range
->iomode
&&
1562 !pnfs_lseg_range_intersecting(ls_range
, range
))
1565 /* range1 covers only the first byte in the range */
1568 return pnfs_lseg_range_contained(ls_range
, &range1
);
1572 * lookup range in layout
1574 static struct pnfs_layout_segment
*
1575 pnfs_find_lseg(struct pnfs_layout_hdr
*lo
,
1576 struct pnfs_layout_range
*range
,
1579 struct pnfs_layout_segment
*lseg
, *ret
= NULL
;
1581 dprintk("%s:Begin\n", __func__
);
1583 list_for_each_entry(lseg
, &lo
->plh_segs
, pls_list
) {
1584 if (test_bit(NFS_LSEG_VALID
, &lseg
->pls_flags
) &&
1585 !test_bit(NFS_LSEG_LAYOUTRETURN
, &lseg
->pls_flags
) &&
1586 pnfs_lseg_range_match(&lseg
->pls_range
, range
,
1588 ret
= pnfs_get_lseg(lseg
);
1593 dprintk("%s:Return lseg %p ref %d\n",
1594 __func__
, ret
, ret
? refcount_read(&ret
->pls_refcount
) : 0);
1599 * Use mdsthreshold hints set at each OPEN to determine if I/O should go
1600 * to the MDS or over pNFS
1602 * The nfs_inode read_io and write_io fields are cumulative counters reset
1603 * when there are no layout segments. Note that in pnfs_update_layout iomode
1604 * is set to IOMODE_READ for a READ request, and set to IOMODE_RW for a
1607 * A return of true means use MDS I/O.
1610 * If a file's size is smaller than the file size threshold, data accesses
1611 * SHOULD be sent to the metadata server. If an I/O request has a length that
1612 * is below the I/O size threshold, the I/O SHOULD be sent to the metadata
1613 * server. If both file size and I/O size are provided, the client SHOULD
1614 * reach or exceed both thresholds before sending its read or write
1615 * requests to the data server.
1617 static bool pnfs_within_mdsthreshold(struct nfs_open_context
*ctx
,
1618 struct inode
*ino
, int iomode
)
1620 struct nfs4_threshold
*t
= ctx
->mdsthreshold
;
1621 struct nfs_inode
*nfsi
= NFS_I(ino
);
1622 loff_t fsize
= i_size_read(ino
);
1623 bool size
= false, size_set
= false, io
= false, io_set
= false, ret
= false;
1628 dprintk("%s bm=0x%x rd_sz=%llu wr_sz=%llu rd_io=%llu wr_io=%llu\n",
1629 __func__
, t
->bm
, t
->rd_sz
, t
->wr_sz
, t
->rd_io_sz
, t
->wr_io_sz
);
1633 if (t
->bm
& THRESHOLD_RD
) {
1634 dprintk("%s fsize %llu\n", __func__
, fsize
);
1636 if (fsize
< t
->rd_sz
)
1639 if (t
->bm
& THRESHOLD_RD_IO
) {
1640 dprintk("%s nfsi->read_io %llu\n", __func__
,
1643 if (nfsi
->read_io
< t
->rd_io_sz
)
1648 if (t
->bm
& THRESHOLD_WR
) {
1649 dprintk("%s fsize %llu\n", __func__
, fsize
);
1651 if (fsize
< t
->wr_sz
)
1654 if (t
->bm
& THRESHOLD_WR_IO
) {
1655 dprintk("%s nfsi->write_io %llu\n", __func__
,
1658 if (nfsi
->write_io
< t
->wr_io_sz
)
1663 if (size_set
&& io_set
) {
1666 } else if (size
|| io
)
1669 dprintk("<-- %s size %d io %d ret %d\n", __func__
, size
, io
, ret
);
1673 static bool pnfs_prepare_to_retry_layoutget(struct pnfs_layout_hdr
*lo
)
1676 * send layoutcommit as it can hold up layoutreturn due to lseg
1679 pnfs_layoutcommit_inode(lo
->plh_inode
, false);
1680 return !wait_on_bit_action(&lo
->plh_flags
, NFS_LAYOUT_RETURN
,
1681 nfs_wait_bit_killable
,
1682 TASK_UNINTERRUPTIBLE
);
1685 static void pnfs_clear_first_layoutget(struct pnfs_layout_hdr
*lo
)
1687 unsigned long *bitlock
= &lo
->plh_flags
;
1689 clear_bit_unlock(NFS_LAYOUT_FIRST_LAYOUTGET
, bitlock
);
1690 smp_mb__after_atomic();
1691 wake_up_bit(bitlock
, NFS_LAYOUT_FIRST_LAYOUTGET
);
1695 * Layout segment is retreived from the server if not cached.
1696 * The appropriate layout segment is referenced and returned to the caller.
1698 struct pnfs_layout_segment
*
1699 pnfs_update_layout(struct inode
*ino
,
1700 struct nfs_open_context
*ctx
,
1703 enum pnfs_iomode iomode
,
1707 struct pnfs_layout_range arg
= {
1713 struct nfs_server
*server
= NFS_SERVER(ino
);
1714 struct nfs_client
*clp
= server
->nfs_client
;
1715 struct pnfs_layout_hdr
*lo
= NULL
;
1716 struct pnfs_layout_segment
*lseg
= NULL
;
1717 nfs4_stateid stateid
;
1719 unsigned long giveup
= jiffies
+ (clp
->cl_lease_time
<< 1);
1722 if (!pnfs_enabled_sb(NFS_SERVER(ino
))) {
1723 trace_pnfs_update_layout(ino
, pos
, count
, iomode
, lo
, lseg
,
1724 PNFS_UPDATE_LAYOUT_NO_PNFS
);
1728 if (iomode
== IOMODE_READ
&& i_size_read(ino
) == 0) {
1729 trace_pnfs_update_layout(ino
, pos
, count
, iomode
, lo
, lseg
,
1730 PNFS_UPDATE_LAYOUT_RD_ZEROLEN
);
1734 if (pnfs_within_mdsthreshold(ctx
, ino
, iomode
)) {
1735 trace_pnfs_update_layout(ino
, pos
, count
, iomode
, lo
, lseg
,
1736 PNFS_UPDATE_LAYOUT_MDSTHRESH
);
1741 nfs4_client_recover_expired_lease(clp
);
1743 spin_lock(&ino
->i_lock
);
1744 lo
= pnfs_find_alloc_layout(ino
, ctx
, gfp_flags
);
1746 spin_unlock(&ino
->i_lock
);
1747 trace_pnfs_update_layout(ino
, pos
, count
, iomode
, lo
, lseg
,
1748 PNFS_UPDATE_LAYOUT_NOMEM
);
1752 /* Do we even need to bother with this? */
1753 if (test_bit(NFS_LAYOUT_BULK_RECALL
, &lo
->plh_flags
)) {
1754 trace_pnfs_update_layout(ino
, pos
, count
, iomode
, lo
, lseg
,
1755 PNFS_UPDATE_LAYOUT_BULK_RECALL
);
1756 dprintk("%s matches recall, use MDS\n", __func__
);
1760 /* if LAYOUTGET already failed once we don't try again */
1761 if (pnfs_layout_io_test_failed(lo
, iomode
)) {
1762 trace_pnfs_update_layout(ino
, pos
, count
, iomode
, lo
, lseg
,
1763 PNFS_UPDATE_LAYOUT_IO_TEST_FAIL
);
1767 lseg
= pnfs_find_lseg(lo
, &arg
, strict_iomode
);
1769 trace_pnfs_update_layout(ino
, pos
, count
, iomode
, lo
, lseg
,
1770 PNFS_UPDATE_LAYOUT_FOUND_CACHED
);
1774 if (!nfs4_valid_open_stateid(ctx
->state
)) {
1775 trace_pnfs_update_layout(ino
, pos
, count
, iomode
, lo
, lseg
,
1776 PNFS_UPDATE_LAYOUT_INVALID_OPEN
);
1781 * Choose a stateid for the LAYOUTGET. If we don't have a layout
1782 * stateid, or it has been invalidated, then we must use the open
1785 if (test_bit(NFS_LAYOUT_INVALID_STID
, &lo
->plh_flags
)) {
1788 * The first layoutget for the file. Need to serialize per
1789 * RFC 5661 Errata 3208.
1791 if (test_and_set_bit(NFS_LAYOUT_FIRST_LAYOUTGET
,
1793 spin_unlock(&ino
->i_lock
);
1794 wait_on_bit(&lo
->plh_flags
, NFS_LAYOUT_FIRST_LAYOUTGET
,
1795 TASK_UNINTERRUPTIBLE
);
1796 pnfs_put_layout_hdr(lo
);
1797 dprintk("%s retrying\n", __func__
);
1802 if (nfs4_select_rw_stateid(ctx
->state
,
1803 iomode
== IOMODE_RW
? FMODE_WRITE
: FMODE_READ
,
1804 NULL
, &stateid
, NULL
) != 0) {
1805 trace_pnfs_update_layout(ino
, pos
, count
,
1807 PNFS_UPDATE_LAYOUT_INVALID_OPEN
);
1811 nfs4_stateid_copy(&stateid
, &lo
->plh_stateid
);
1815 * Because we free lsegs before sending LAYOUTRETURN, we need to wait
1816 * for LAYOUTRETURN even if first is true.
1818 if (test_bit(NFS_LAYOUT_RETURN
, &lo
->plh_flags
)) {
1819 spin_unlock(&ino
->i_lock
);
1820 dprintk("%s wait for layoutreturn\n", __func__
);
1821 if (pnfs_prepare_to_retry_layoutget(lo
)) {
1823 pnfs_clear_first_layoutget(lo
);
1824 pnfs_put_layout_hdr(lo
);
1825 dprintk("%s retrying\n", __func__
);
1826 trace_pnfs_update_layout(ino
, pos
, count
, iomode
, lo
,
1827 lseg
, PNFS_UPDATE_LAYOUT_RETRY
);
1830 trace_pnfs_update_layout(ino
, pos
, count
, iomode
, lo
, lseg
,
1831 PNFS_UPDATE_LAYOUT_RETURN
);
1832 goto out_put_layout_hdr
;
1835 if (pnfs_layoutgets_blocked(lo
)) {
1836 trace_pnfs_update_layout(ino
, pos
, count
, iomode
, lo
, lseg
,
1837 PNFS_UPDATE_LAYOUT_BLOCKED
);
1840 atomic_inc(&lo
->plh_outstanding
);
1841 spin_unlock(&ino
->i_lock
);
1843 if (list_empty(&lo
->plh_layouts
)) {
1844 /* The lo must be on the clp list if there is any
1845 * chance of a CB_LAYOUTRECALL(FILE) coming in.
1847 spin_lock(&clp
->cl_lock
);
1848 if (list_empty(&lo
->plh_layouts
))
1849 list_add_tail(&lo
->plh_layouts
, &server
->layouts
);
1850 spin_unlock(&clp
->cl_lock
);
1853 pg_offset
= arg
.offset
& ~PAGE_MASK
;
1855 arg
.offset
-= pg_offset
;
1856 arg
.length
+= pg_offset
;
1858 if (arg
.length
!= NFS4_MAX_UINT64
)
1859 arg
.length
= PAGE_ALIGN(arg
.length
);
1861 lseg
= send_layoutget(lo
, ctx
, &stateid
, &arg
, &timeout
, gfp_flags
);
1862 trace_pnfs_update_layout(ino
, pos
, count
, iomode
, lo
, lseg
,
1863 PNFS_UPDATE_LAYOUT_SEND_LAYOUTGET
);
1864 atomic_dec(&lo
->plh_outstanding
);
1866 switch(PTR_ERR(lseg
)) {
1868 if (time_after(jiffies
, giveup
))
1871 case -ERECALLCONFLICT
:
1872 /* Huh? We hold no layouts, how is there a recall? */
1877 /* Destroy the existing layout and start over */
1878 if (time_after(jiffies
, giveup
))
1879 pnfs_destroy_layout(NFS_I(ino
));
1884 if (!nfs_error_is_fatal(PTR_ERR(lseg
))) {
1885 pnfs_layout_clear_fail_bit(lo
, pnfs_iomode_to_fail_bit(iomode
));
1888 goto out_put_layout_hdr
;
1892 pnfs_clear_first_layoutget(lo
);
1893 trace_pnfs_update_layout(ino
, pos
, count
,
1894 iomode
, lo
, lseg
, PNFS_UPDATE_LAYOUT_RETRY
);
1895 pnfs_put_layout_hdr(lo
);
1899 pnfs_layout_clear_fail_bit(lo
, pnfs_iomode_to_fail_bit(iomode
));
1904 pnfs_clear_first_layoutget(lo
);
1905 pnfs_put_layout_hdr(lo
);
1907 dprintk("%s: inode %s/%llu pNFS layout segment %s for "
1908 "(%s, offset: %llu, length: %llu)\n",
1909 __func__
, ino
->i_sb
->s_id
,
1910 (unsigned long long)NFS_FILEID(ino
),
1911 IS_ERR_OR_NULL(lseg
) ? "not found" : "found",
1912 iomode
==IOMODE_RW
? "read/write" : "read-only",
1913 (unsigned long long)pos
,
1914 (unsigned long long)count
);
1917 spin_unlock(&ino
->i_lock
);
1918 goto out_put_layout_hdr
;
1920 EXPORT_SYMBOL_GPL(pnfs_update_layout
);
1923 pnfs_sanity_check_layout_range(struct pnfs_layout_range
*range
)
1925 switch (range
->iomode
) {
1932 if (range
->offset
== NFS4_MAX_UINT64
)
1934 if (range
->length
== 0)
1936 if (range
->length
!= NFS4_MAX_UINT64
&&
1937 range
->length
> NFS4_MAX_UINT64
- range
->offset
)
1942 struct pnfs_layout_segment
*
1943 pnfs_layout_process(struct nfs4_layoutget
*lgp
)
1945 struct pnfs_layout_hdr
*lo
= NFS_I(lgp
->args
.inode
)->layout
;
1946 struct nfs4_layoutget_res
*res
= &lgp
->res
;
1947 struct pnfs_layout_segment
*lseg
;
1948 struct inode
*ino
= lo
->plh_inode
;
1951 if (!pnfs_sanity_check_layout_range(&res
->range
))
1952 return ERR_PTR(-EINVAL
);
1954 /* Inject layout blob into I/O device driver */
1955 lseg
= NFS_SERVER(ino
)->pnfs_curr_ld
->alloc_lseg(lo
, res
, lgp
->gfp_flags
);
1956 if (IS_ERR_OR_NULL(lseg
)) {
1958 lseg
= ERR_PTR(-ENOMEM
);
1960 dprintk("%s: Could not allocate layout: error %ld\n",
1961 __func__
, PTR_ERR(lseg
));
1965 pnfs_init_lseg(lo
, lseg
, &res
->range
, &res
->stateid
);
1967 spin_lock(&ino
->i_lock
);
1968 if (pnfs_layoutgets_blocked(lo
)) {
1969 dprintk("%s forget reply due to state\n", __func__
);
1973 if (!pnfs_layout_is_valid(lo
)) {
1974 /* We have a completely new layout */
1975 pnfs_set_layout_stateid(lo
, &res
->stateid
, true);
1976 } else if (nfs4_stateid_match_other(&lo
->plh_stateid
, &res
->stateid
)) {
1977 /* existing state ID, make sure the sequence number matches. */
1978 if (pnfs_layout_stateid_blocked(lo
, &res
->stateid
)) {
1979 dprintk("%s forget reply due to sequence\n", __func__
);
1982 pnfs_set_layout_stateid(lo
, &res
->stateid
, false);
1985 * We got an entirely new state ID. Mark all segments for the
1986 * inode invalid, and retry the layoutget
1988 pnfs_mark_layout_stateid_invalid(lo
, &free_me
);
1992 pnfs_get_lseg(lseg
);
1993 pnfs_layout_insert_lseg(lo
, lseg
, &free_me
);
1996 if (res
->return_on_close
)
1997 set_bit(NFS_LSEG_ROC
, &lseg
->pls_flags
);
1999 spin_unlock(&ino
->i_lock
);
2000 pnfs_free_lseg_list(&free_me
);
2004 spin_unlock(&ino
->i_lock
);
2005 lseg
->pls_layout
= lo
;
2006 NFS_SERVER(ino
)->pnfs_curr_ld
->free_lseg(lseg
);
2007 if (!pnfs_layout_is_valid(lo
))
2008 nfs_commit_inode(ino
, 0);
2009 return ERR_PTR(-EAGAIN
);
2013 * pnfs_mark_matching_lsegs_return - Free or return matching layout segments
2014 * @lo: pointer to layout header
2015 * @tmp_list: list header to be used with pnfs_free_lseg_list()
2016 * @return_range: describe layout segment ranges to be returned
2018 * This function is mainly intended for use by layoutrecall. It attempts
2019 * to free the layout segment immediately, or else to mark it for return
2020 * as soon as its reference count drops to zero.
2023 pnfs_mark_matching_lsegs_return(struct pnfs_layout_hdr
*lo
,
2024 struct list_head
*tmp_list
,
2025 const struct pnfs_layout_range
*return_range
,
2028 struct pnfs_layout_segment
*lseg
, *next
;
2031 dprintk("%s:Begin lo %p\n", __func__
, lo
);
2033 if (list_empty(&lo
->plh_segs
))
2036 assert_spin_locked(&lo
->plh_inode
->i_lock
);
2038 list_for_each_entry_safe(lseg
, next
, &lo
->plh_segs
, pls_list
)
2039 if (pnfs_match_lseg_recall(lseg
, return_range
, seq
)) {
2040 dprintk("%s: marking lseg %p iomode %d "
2041 "offset %llu length %llu\n", __func__
,
2042 lseg
, lseg
->pls_range
.iomode
,
2043 lseg
->pls_range
.offset
,
2044 lseg
->pls_range
.length
);
2045 if (mark_lseg_invalid(lseg
, tmp_list
))
2048 set_bit(NFS_LSEG_LAYOUTRETURN
, &lseg
->pls_flags
);
2052 pnfs_set_plh_return_info(lo
, return_range
->iomode
, seq
);
2057 void pnfs_error_mark_layout_for_return(struct inode
*inode
,
2058 struct pnfs_layout_segment
*lseg
)
2060 struct pnfs_layout_hdr
*lo
= NFS_I(inode
)->layout
;
2061 struct pnfs_layout_range range
= {
2062 .iomode
= lseg
->pls_range
.iomode
,
2064 .length
= NFS4_MAX_UINT64
,
2066 bool return_now
= false;
2068 spin_lock(&inode
->i_lock
);
2069 if (!pnfs_layout_is_valid(lo
)) {
2070 spin_unlock(&inode
->i_lock
);
2073 pnfs_set_plh_return_info(lo
, range
.iomode
, 0);
2075 * mark all matching lsegs so that we are sure to have no live
2076 * segments at hand when sending layoutreturn. See pnfs_put_lseg()
2079 if (!pnfs_mark_matching_lsegs_return(lo
, &lo
->plh_return_segs
, &range
, 0)) {
2080 nfs4_stateid stateid
;
2081 enum pnfs_iomode iomode
;
2083 return_now
= pnfs_prepare_layoutreturn(lo
, &stateid
, &iomode
);
2084 spin_unlock(&inode
->i_lock
);
2086 pnfs_send_layoutreturn(lo
, &stateid
, iomode
, false);
2088 spin_unlock(&inode
->i_lock
);
2089 nfs_commit_inode(inode
, 0);
2092 EXPORT_SYMBOL_GPL(pnfs_error_mark_layout_for_return
);
2095 pnfs_generic_pg_check_layout(struct nfs_pageio_descriptor
*pgio
)
2097 if (pgio
->pg_lseg
== NULL
||
2098 test_bit(NFS_LSEG_VALID
, &pgio
->pg_lseg
->pls_flags
))
2100 pnfs_put_lseg(pgio
->pg_lseg
);
2101 pgio
->pg_lseg
= NULL
;
2103 EXPORT_SYMBOL_GPL(pnfs_generic_pg_check_layout
);
2106 * Check for any intersection between the request and the pgio->pg_lseg,
2107 * and if none, put this pgio->pg_lseg away.
2110 pnfs_generic_pg_check_range(struct nfs_pageio_descriptor
*pgio
, struct nfs_page
*req
)
2112 if (pgio
->pg_lseg
&& !pnfs_lseg_request_intersecting(pgio
->pg_lseg
, req
)) {
2113 pnfs_put_lseg(pgio
->pg_lseg
);
2114 pgio
->pg_lseg
= NULL
;
2119 pnfs_generic_pg_init_read(struct nfs_pageio_descriptor
*pgio
, struct nfs_page
*req
)
2121 u64 rd_size
= req
->wb_bytes
;
2123 pnfs_generic_pg_check_layout(pgio
);
2124 pnfs_generic_pg_check_range(pgio
, req
);
2125 if (pgio
->pg_lseg
== NULL
) {
2126 if (pgio
->pg_dreq
== NULL
)
2127 rd_size
= i_size_read(pgio
->pg_inode
) - req_offset(req
);
2129 rd_size
= nfs_dreq_bytes_left(pgio
->pg_dreq
);
2131 pgio
->pg_lseg
= pnfs_update_layout(pgio
->pg_inode
,
2138 if (IS_ERR(pgio
->pg_lseg
)) {
2139 pgio
->pg_error
= PTR_ERR(pgio
->pg_lseg
);
2140 pgio
->pg_lseg
= NULL
;
2144 /* If no lseg, fall back to read through mds */
2145 if (pgio
->pg_lseg
== NULL
)
2146 nfs_pageio_reset_read_mds(pgio
);
2149 EXPORT_SYMBOL_GPL(pnfs_generic_pg_init_read
);
2152 pnfs_generic_pg_init_write(struct nfs_pageio_descriptor
*pgio
,
2153 struct nfs_page
*req
, u64 wb_size
)
2155 pnfs_generic_pg_check_layout(pgio
);
2156 pnfs_generic_pg_check_range(pgio
, req
);
2157 if (pgio
->pg_lseg
== NULL
) {
2158 pgio
->pg_lseg
= pnfs_update_layout(pgio
->pg_inode
,
2165 if (IS_ERR(pgio
->pg_lseg
)) {
2166 pgio
->pg_error
= PTR_ERR(pgio
->pg_lseg
);
2167 pgio
->pg_lseg
= NULL
;
2171 /* If no lseg, fall back to write through mds */
2172 if (pgio
->pg_lseg
== NULL
)
2173 nfs_pageio_reset_write_mds(pgio
);
2175 EXPORT_SYMBOL_GPL(pnfs_generic_pg_init_write
);
2178 pnfs_generic_pg_cleanup(struct nfs_pageio_descriptor
*desc
)
2180 if (desc
->pg_lseg
) {
2181 pnfs_put_lseg(desc
->pg_lseg
);
2182 desc
->pg_lseg
= NULL
;
2185 EXPORT_SYMBOL_GPL(pnfs_generic_pg_cleanup
);
2188 * Return 0 if @req cannot be coalesced into @pgio, otherwise return the number
2189 * of bytes (maximum @req->wb_bytes) that can be coalesced.
2192 pnfs_generic_pg_test(struct nfs_pageio_descriptor
*pgio
,
2193 struct nfs_page
*prev
, struct nfs_page
*req
)
2196 u64 seg_end
, req_start
, seg_left
;
2198 size
= nfs_generic_pg_test(pgio
, prev
, req
);
2203 * 'size' contains the number of bytes left in the current page (up
2204 * to the original size asked for in @req->wb_bytes).
2206 * Calculate how many bytes are left in the layout segment
2207 * and if there are less bytes than 'size', return that instead.
2209 * Please also note that 'end_offset' is actually the offset of the
2210 * first byte that lies outside the pnfs_layout_range. FIXME?
2213 if (pgio
->pg_lseg
) {
2214 seg_end
= pnfs_end_offset(pgio
->pg_lseg
->pls_range
.offset
,
2215 pgio
->pg_lseg
->pls_range
.length
);
2216 req_start
= req_offset(req
);
2218 /* start of request is past the last byte of this segment */
2219 if (req_start
>= seg_end
)
2222 /* adjust 'size' iff there are fewer bytes left in the
2223 * segment than what nfs_generic_pg_test returned */
2224 seg_left
= seg_end
- req_start
;
2225 if (seg_left
< size
)
2226 size
= (unsigned int)seg_left
;
2231 EXPORT_SYMBOL_GPL(pnfs_generic_pg_test
);
2233 int pnfs_write_done_resend_to_mds(struct nfs_pgio_header
*hdr
)
2235 struct nfs_pageio_descriptor pgio
;
2237 /* Resend all requests through the MDS */
2238 nfs_pageio_init_write(&pgio
, hdr
->inode
, FLUSH_STABLE
, true,
2239 hdr
->completion_ops
);
2240 set_bit(NFS_CONTEXT_RESEND_WRITES
, &hdr
->args
.context
->flags
);
2241 return nfs_pageio_resend(&pgio
, hdr
);
2243 EXPORT_SYMBOL_GPL(pnfs_write_done_resend_to_mds
);
2245 static void pnfs_ld_handle_write_error(struct nfs_pgio_header
*hdr
)
2248 dprintk("pnfs write error = %d\n", hdr
->pnfs_error
);
2249 if (NFS_SERVER(hdr
->inode
)->pnfs_curr_ld
->flags
&
2250 PNFS_LAYOUTRET_ON_ERROR
) {
2251 pnfs_return_layout(hdr
->inode
);
2253 if (!test_and_set_bit(NFS_IOHDR_REDO
, &hdr
->flags
))
2254 hdr
->task
.tk_status
= pnfs_write_done_resend_to_mds(hdr
);
2258 * Called by non rpc-based layout drivers
2260 void pnfs_ld_write_done(struct nfs_pgio_header
*hdr
)
2262 if (likely(!hdr
->pnfs_error
)) {
2263 pnfs_set_layoutcommit(hdr
->inode
, hdr
->lseg
,
2264 hdr
->mds_offset
+ hdr
->res
.count
);
2265 hdr
->mds_ops
->rpc_call_done(&hdr
->task
, hdr
);
2267 trace_nfs4_pnfs_write(hdr
, hdr
->pnfs_error
);
2268 if (unlikely(hdr
->pnfs_error
))
2269 pnfs_ld_handle_write_error(hdr
);
2270 hdr
->mds_ops
->rpc_release(hdr
);
2272 EXPORT_SYMBOL_GPL(pnfs_ld_write_done
);
2275 pnfs_write_through_mds(struct nfs_pageio_descriptor
*desc
,
2276 struct nfs_pgio_header
*hdr
)
2278 struct nfs_pgio_mirror
*mirror
= nfs_pgio_current_mirror(desc
);
2280 if (!test_and_set_bit(NFS_IOHDR_REDO
, &hdr
->flags
)) {
2281 list_splice_tail_init(&hdr
->pages
, &mirror
->pg_list
);
2282 nfs_pageio_reset_write_mds(desc
);
2283 mirror
->pg_recoalesce
= 1;
2285 hdr
->completion_ops
->completion(hdr
);
2288 static enum pnfs_try_status
2289 pnfs_try_to_write_data(struct nfs_pgio_header
*hdr
,
2290 const struct rpc_call_ops
*call_ops
,
2291 struct pnfs_layout_segment
*lseg
,
2294 struct inode
*inode
= hdr
->inode
;
2295 enum pnfs_try_status trypnfs
;
2296 struct nfs_server
*nfss
= NFS_SERVER(inode
);
2298 hdr
->mds_ops
= call_ops
;
2300 dprintk("%s: Writing ino:%lu %u@%llu (how %d)\n", __func__
,
2301 inode
->i_ino
, hdr
->args
.count
, hdr
->args
.offset
, how
);
2302 trypnfs
= nfss
->pnfs_curr_ld
->write_pagelist(hdr
, how
);
2303 if (trypnfs
!= PNFS_NOT_ATTEMPTED
)
2304 nfs_inc_stats(inode
, NFSIOS_PNFS_WRITE
);
2305 dprintk("%s End (trypnfs:%d)\n", __func__
, trypnfs
);
2310 pnfs_do_write(struct nfs_pageio_descriptor
*desc
,
2311 struct nfs_pgio_header
*hdr
, int how
)
2313 const struct rpc_call_ops
*call_ops
= desc
->pg_rpc_callops
;
2314 struct pnfs_layout_segment
*lseg
= desc
->pg_lseg
;
2315 enum pnfs_try_status trypnfs
;
2317 trypnfs
= pnfs_try_to_write_data(hdr
, call_ops
, lseg
, how
);
2319 case PNFS_NOT_ATTEMPTED
:
2320 pnfs_write_through_mds(desc
, hdr
);
2321 case PNFS_ATTEMPTED
:
2323 case PNFS_TRY_AGAIN
:
2324 /* cleanup hdr and prepare to redo pnfs */
2325 if (!test_and_set_bit(NFS_IOHDR_REDO
, &hdr
->flags
)) {
2326 struct nfs_pgio_mirror
*mirror
= nfs_pgio_current_mirror(desc
);
2327 list_splice_init(&hdr
->pages
, &mirror
->pg_list
);
2328 mirror
->pg_recoalesce
= 1;
2330 hdr
->mds_ops
->rpc_release(hdr
);
2334 static void pnfs_writehdr_free(struct nfs_pgio_header
*hdr
)
2336 pnfs_put_lseg(hdr
->lseg
);
2337 nfs_pgio_header_free(hdr
);
2341 pnfs_generic_pg_writepages(struct nfs_pageio_descriptor
*desc
)
2343 struct nfs_pgio_header
*hdr
;
2346 hdr
= nfs_pgio_header_alloc(desc
->pg_rw_ops
);
2348 desc
->pg_error
= -ENOMEM
;
2349 return desc
->pg_error
;
2351 nfs_pgheader_init(desc
, hdr
, pnfs_writehdr_free
);
2353 hdr
->lseg
= pnfs_get_lseg(desc
->pg_lseg
);
2354 ret
= nfs_generic_pgio(desc
, hdr
);
2356 pnfs_do_write(desc
, hdr
, desc
->pg_ioflags
);
2360 EXPORT_SYMBOL_GPL(pnfs_generic_pg_writepages
);
2362 int pnfs_read_done_resend_to_mds(struct nfs_pgio_header
*hdr
)
2364 struct nfs_pageio_descriptor pgio
;
2366 /* Resend all requests through the MDS */
2367 nfs_pageio_init_read(&pgio
, hdr
->inode
, true, hdr
->completion_ops
);
2368 return nfs_pageio_resend(&pgio
, hdr
);
2370 EXPORT_SYMBOL_GPL(pnfs_read_done_resend_to_mds
);
2372 static void pnfs_ld_handle_read_error(struct nfs_pgio_header
*hdr
)
2374 dprintk("pnfs read error = %d\n", hdr
->pnfs_error
);
2375 if (NFS_SERVER(hdr
->inode
)->pnfs_curr_ld
->flags
&
2376 PNFS_LAYOUTRET_ON_ERROR
) {
2377 pnfs_return_layout(hdr
->inode
);
2379 if (!test_and_set_bit(NFS_IOHDR_REDO
, &hdr
->flags
))
2380 hdr
->task
.tk_status
= pnfs_read_done_resend_to_mds(hdr
);
2384 * Called by non rpc-based layout drivers
2386 void pnfs_ld_read_done(struct nfs_pgio_header
*hdr
)
2388 if (likely(!hdr
->pnfs_error
))
2389 hdr
->mds_ops
->rpc_call_done(&hdr
->task
, hdr
);
2390 trace_nfs4_pnfs_read(hdr
, hdr
->pnfs_error
);
2391 if (unlikely(hdr
->pnfs_error
))
2392 pnfs_ld_handle_read_error(hdr
);
2393 hdr
->mds_ops
->rpc_release(hdr
);
2395 EXPORT_SYMBOL_GPL(pnfs_ld_read_done
);
2398 pnfs_read_through_mds(struct nfs_pageio_descriptor
*desc
,
2399 struct nfs_pgio_header
*hdr
)
2401 struct nfs_pgio_mirror
*mirror
= nfs_pgio_current_mirror(desc
);
2403 if (!test_and_set_bit(NFS_IOHDR_REDO
, &hdr
->flags
)) {
2404 list_splice_tail_init(&hdr
->pages
, &mirror
->pg_list
);
2405 nfs_pageio_reset_read_mds(desc
);
2406 mirror
->pg_recoalesce
= 1;
2408 hdr
->completion_ops
->completion(hdr
);
2412 * Call the appropriate parallel I/O subsystem read function.
2414 static enum pnfs_try_status
2415 pnfs_try_to_read_data(struct nfs_pgio_header
*hdr
,
2416 const struct rpc_call_ops
*call_ops
,
2417 struct pnfs_layout_segment
*lseg
)
2419 struct inode
*inode
= hdr
->inode
;
2420 struct nfs_server
*nfss
= NFS_SERVER(inode
);
2421 enum pnfs_try_status trypnfs
;
2423 hdr
->mds_ops
= call_ops
;
2425 dprintk("%s: Reading ino:%lu %u@%llu\n",
2426 __func__
, inode
->i_ino
, hdr
->args
.count
, hdr
->args
.offset
);
2428 trypnfs
= nfss
->pnfs_curr_ld
->read_pagelist(hdr
);
2429 if (trypnfs
!= PNFS_NOT_ATTEMPTED
)
2430 nfs_inc_stats(inode
, NFSIOS_PNFS_READ
);
2431 dprintk("%s End (trypnfs:%d)\n", __func__
, trypnfs
);
2435 /* Resend all requests through pnfs. */
2436 void pnfs_read_resend_pnfs(struct nfs_pgio_header
*hdr
)
2438 struct nfs_pageio_descriptor pgio
;
2440 if (!test_and_set_bit(NFS_IOHDR_REDO
, &hdr
->flags
)) {
2441 /* Prevent deadlocks with layoutreturn! */
2442 pnfs_put_lseg(hdr
->lseg
);
2445 nfs_pageio_init_read(&pgio
, hdr
->inode
, false,
2446 hdr
->completion_ops
);
2447 hdr
->task
.tk_status
= nfs_pageio_resend(&pgio
, hdr
);
2450 EXPORT_SYMBOL_GPL(pnfs_read_resend_pnfs
);
2453 pnfs_do_read(struct nfs_pageio_descriptor
*desc
, struct nfs_pgio_header
*hdr
)
2455 const struct rpc_call_ops
*call_ops
= desc
->pg_rpc_callops
;
2456 struct pnfs_layout_segment
*lseg
= desc
->pg_lseg
;
2457 enum pnfs_try_status trypnfs
;
2459 trypnfs
= pnfs_try_to_read_data(hdr
, call_ops
, lseg
);
2461 case PNFS_NOT_ATTEMPTED
:
2462 pnfs_read_through_mds(desc
, hdr
);
2463 case PNFS_ATTEMPTED
:
2465 case PNFS_TRY_AGAIN
:
2466 /* cleanup hdr and prepare to redo pnfs */
2467 if (!test_and_set_bit(NFS_IOHDR_REDO
, &hdr
->flags
)) {
2468 struct nfs_pgio_mirror
*mirror
= nfs_pgio_current_mirror(desc
);
2469 list_splice_init(&hdr
->pages
, &mirror
->pg_list
);
2470 mirror
->pg_recoalesce
= 1;
2472 hdr
->mds_ops
->rpc_release(hdr
);
2476 static void pnfs_readhdr_free(struct nfs_pgio_header
*hdr
)
2478 pnfs_put_lseg(hdr
->lseg
);
2479 nfs_pgio_header_free(hdr
);
2483 pnfs_generic_pg_readpages(struct nfs_pageio_descriptor
*desc
)
2485 struct nfs_pgio_header
*hdr
;
2488 hdr
= nfs_pgio_header_alloc(desc
->pg_rw_ops
);
2490 desc
->pg_error
= -ENOMEM
;
2491 return desc
->pg_error
;
2493 nfs_pgheader_init(desc
, hdr
, pnfs_readhdr_free
);
2494 hdr
->lseg
= pnfs_get_lseg(desc
->pg_lseg
);
2495 ret
= nfs_generic_pgio(desc
, hdr
);
2497 pnfs_do_read(desc
, hdr
);
2500 EXPORT_SYMBOL_GPL(pnfs_generic_pg_readpages
);
2502 static void pnfs_clear_layoutcommitting(struct inode
*inode
)
2504 unsigned long *bitlock
= &NFS_I(inode
)->flags
;
2506 clear_bit_unlock(NFS_INO_LAYOUTCOMMITTING
, bitlock
);
2507 smp_mb__after_atomic();
2508 wake_up_bit(bitlock
, NFS_INO_LAYOUTCOMMITTING
);
2512 * There can be multiple RW segments.
2514 static void pnfs_list_write_lseg(struct inode
*inode
, struct list_head
*listp
)
2516 struct pnfs_layout_segment
*lseg
;
2518 list_for_each_entry(lseg
, &NFS_I(inode
)->layout
->plh_segs
, pls_list
) {
2519 if (lseg
->pls_range
.iomode
== IOMODE_RW
&&
2520 test_and_clear_bit(NFS_LSEG_LAYOUTCOMMIT
, &lseg
->pls_flags
))
2521 list_add(&lseg
->pls_lc_list
, listp
);
2525 static void pnfs_list_write_lseg_done(struct inode
*inode
, struct list_head
*listp
)
2527 struct pnfs_layout_segment
*lseg
, *tmp
;
2529 /* Matched by references in pnfs_set_layoutcommit */
2530 list_for_each_entry_safe(lseg
, tmp
, listp
, pls_lc_list
) {
2531 list_del_init(&lseg
->pls_lc_list
);
2532 pnfs_put_lseg(lseg
);
2535 pnfs_clear_layoutcommitting(inode
);
2538 void pnfs_set_lo_fail(struct pnfs_layout_segment
*lseg
)
2540 pnfs_layout_io_set_failed(lseg
->pls_layout
, lseg
->pls_range
.iomode
);
2542 EXPORT_SYMBOL_GPL(pnfs_set_lo_fail
);
2545 pnfs_set_layoutcommit(struct inode
*inode
, struct pnfs_layout_segment
*lseg
,
2548 struct nfs_inode
*nfsi
= NFS_I(inode
);
2549 bool mark_as_dirty
= false;
2551 spin_lock(&inode
->i_lock
);
2552 if (!test_and_set_bit(NFS_INO_LAYOUTCOMMIT
, &nfsi
->flags
)) {
2553 nfsi
->layout
->plh_lwb
= end_pos
;
2554 mark_as_dirty
= true;
2555 dprintk("%s: Set layoutcommit for inode %lu ",
2556 __func__
, inode
->i_ino
);
2557 } else if (end_pos
> nfsi
->layout
->plh_lwb
)
2558 nfsi
->layout
->plh_lwb
= end_pos
;
2559 if (!test_and_set_bit(NFS_LSEG_LAYOUTCOMMIT
, &lseg
->pls_flags
)) {
2560 /* references matched in nfs4_layoutcommit_release */
2561 pnfs_get_lseg(lseg
);
2563 spin_unlock(&inode
->i_lock
);
2564 dprintk("%s: lseg %p end_pos %llu\n",
2565 __func__
, lseg
, nfsi
->layout
->plh_lwb
);
2567 /* if pnfs_layoutcommit_inode() runs between inode locks, the next one
2568 * will be a noop because NFS_INO_LAYOUTCOMMIT will not be set */
2570 mark_inode_dirty_sync(inode
);
2572 EXPORT_SYMBOL_GPL(pnfs_set_layoutcommit
);
2574 void pnfs_cleanup_layoutcommit(struct nfs4_layoutcommit_data
*data
)
2576 struct nfs_server
*nfss
= NFS_SERVER(data
->args
.inode
);
2578 if (nfss
->pnfs_curr_ld
->cleanup_layoutcommit
)
2579 nfss
->pnfs_curr_ld
->cleanup_layoutcommit(data
);
2580 pnfs_list_write_lseg_done(data
->args
.inode
, &data
->lseg_list
);
2584 * For the LAYOUT4_NFSV4_1_FILES layout type, NFS_DATA_SYNC WRITEs and
2585 * NFS_UNSTABLE WRITEs with a COMMIT to data servers must store enough
2586 * data to disk to allow the server to recover the data if it crashes.
2587 * LAYOUTCOMMIT is only needed when the NFL4_UFLG_COMMIT_THRU_MDS flag
2588 * is off, and a COMMIT is sent to a data server, or
2589 * if WRITEs to a data server return NFS_DATA_SYNC.
2592 pnfs_layoutcommit_inode(struct inode
*inode
, bool sync
)
2594 struct pnfs_layoutdriver_type
*ld
= NFS_SERVER(inode
)->pnfs_curr_ld
;
2595 struct nfs4_layoutcommit_data
*data
;
2596 struct nfs_inode
*nfsi
= NFS_I(inode
);
2600 if (!pnfs_layoutcommit_outstanding(inode
))
2603 dprintk("--> %s inode %lu\n", __func__
, inode
->i_ino
);
2606 if (test_and_set_bit(NFS_INO_LAYOUTCOMMITTING
, &nfsi
->flags
)) {
2609 status
= wait_on_bit_lock_action(&nfsi
->flags
,
2610 NFS_INO_LAYOUTCOMMITTING
,
2611 nfs_wait_bit_killable
,
2618 /* Note kzalloc ensures data->res.seq_res.sr_slot == NULL */
2619 data
= kzalloc(sizeof(*data
), GFP_NOFS
);
2621 goto clear_layoutcommitting
;
2624 spin_lock(&inode
->i_lock
);
2625 if (!test_and_clear_bit(NFS_INO_LAYOUTCOMMIT
, &nfsi
->flags
))
2628 INIT_LIST_HEAD(&data
->lseg_list
);
2629 pnfs_list_write_lseg(inode
, &data
->lseg_list
);
2631 end_pos
= nfsi
->layout
->plh_lwb
;
2633 nfs4_stateid_copy(&data
->args
.stateid
, &nfsi
->layout
->plh_stateid
);
2634 spin_unlock(&inode
->i_lock
);
2636 data
->args
.inode
= inode
;
2637 data
->cred
= get_rpccred(nfsi
->layout
->plh_lc_cred
);
2638 nfs_fattr_init(&data
->fattr
);
2639 data
->args
.bitmask
= NFS_SERVER(inode
)->cache_consistency_bitmask
;
2640 data
->res
.fattr
= &data
->fattr
;
2642 data
->args
.lastbytewritten
= end_pos
- 1;
2644 data
->args
.lastbytewritten
= U64_MAX
;
2645 data
->res
.server
= NFS_SERVER(inode
);
2647 if (ld
->prepare_layoutcommit
) {
2648 status
= ld
->prepare_layoutcommit(&data
->args
);
2650 put_rpccred(data
->cred
);
2651 spin_lock(&inode
->i_lock
);
2652 set_bit(NFS_INO_LAYOUTCOMMIT
, &nfsi
->flags
);
2653 if (end_pos
> nfsi
->layout
->plh_lwb
)
2654 nfsi
->layout
->plh_lwb
= end_pos
;
2660 status
= nfs4_proc_layoutcommit(data
, sync
);
2663 mark_inode_dirty_sync(inode
);
2664 dprintk("<-- %s status %d\n", __func__
, status
);
2667 spin_unlock(&inode
->i_lock
);
2669 clear_layoutcommitting
:
2670 pnfs_clear_layoutcommitting(inode
);
2673 EXPORT_SYMBOL_GPL(pnfs_layoutcommit_inode
);
2676 pnfs_generic_sync(struct inode
*inode
, bool datasync
)
2678 return pnfs_layoutcommit_inode(inode
, true);
2680 EXPORT_SYMBOL_GPL(pnfs_generic_sync
);
2682 struct nfs4_threshold
*pnfs_mdsthreshold_alloc(void)
2684 struct nfs4_threshold
*thp
;
2686 thp
= kzalloc(sizeof(*thp
), GFP_NOFS
);
2688 dprintk("%s mdsthreshold allocation failed\n", __func__
);
2694 #if IS_ENABLED(CONFIG_NFS_V4_2)
2696 pnfs_report_layoutstat(struct inode
*inode
, gfp_t gfp_flags
)
2698 struct pnfs_layoutdriver_type
*ld
= NFS_SERVER(inode
)->pnfs_curr_ld
;
2699 struct nfs_server
*server
= NFS_SERVER(inode
);
2700 struct nfs_inode
*nfsi
= NFS_I(inode
);
2701 struct nfs42_layoutstat_data
*data
;
2702 struct pnfs_layout_hdr
*hdr
;
2705 if (!pnfs_enabled_sb(server
) || !ld
->prepare_layoutstats
)
2708 if (!nfs_server_capable(inode
, NFS_CAP_LAYOUTSTATS
))
2711 if (test_and_set_bit(NFS_INO_LAYOUTSTATS
, &nfsi
->flags
))
2714 spin_lock(&inode
->i_lock
);
2715 if (!NFS_I(inode
)->layout
) {
2716 spin_unlock(&inode
->i_lock
);
2717 goto out_clear_layoutstats
;
2719 hdr
= NFS_I(inode
)->layout
;
2720 pnfs_get_layout_hdr(hdr
);
2721 spin_unlock(&inode
->i_lock
);
2723 data
= kzalloc(sizeof(*data
), gfp_flags
);
2729 data
->args
.fh
= NFS_FH(inode
);
2730 data
->args
.inode
= inode
;
2731 status
= ld
->prepare_layoutstats(&data
->args
);
2735 status
= nfs42_proc_layoutstats_generic(NFS_SERVER(inode
), data
);
2738 dprintk("%s returns %d\n", __func__
, status
);
2744 pnfs_put_layout_hdr(hdr
);
2745 out_clear_layoutstats
:
2746 smp_mb__before_atomic();
2747 clear_bit(NFS_INO_LAYOUTSTATS
, &nfsi
->flags
);
2748 smp_mb__after_atomic();
2751 EXPORT_SYMBOL_GPL(pnfs_report_layoutstat
);
2754 unsigned int layoutstats_timer
;
2755 module_param(layoutstats_timer
, uint
, 0644);
2756 EXPORT_SYMBOL_GPL(layoutstats_timer
);