2 * pNFS functions to call and manage layout drivers.
4 * Copyright (c) 2002 [year of first publication]
5 * The Regents of the University of Michigan
8 * Dean Hildebrand <dhildebz@umich.edu>
10 * Permission is granted to use, copy, create derivative works, and
11 * redistribute this software and such derivative works for any purpose,
12 * so long as the name of the University of Michigan is not used in
13 * any advertising or publicity pertaining to the use or distribution
14 * of this software without specific, written prior authorization. If
15 * the above copyright notice or any other identification of the
16 * University of Michigan is included in any copy of any portion of
17 * this software, then the disclaimer below must also be included.
19 * This software is provided as is, without representation or warranty
20 * of any kind either express or implied, including without limitation
21 * the implied warranties of merchantability, fitness for a particular
22 * purpose, or noninfringement. The Regents of the University of
23 * Michigan shall not be liable for any damages, including special,
24 * indirect, incidental, or consequential damages, with respect to any
25 * claim arising out of or in connection with the use of the software,
26 * even if it has been or is hereafter advised of the possibility of
30 #include <linux/nfs_fs.h>
31 #include <linux/nfs_page.h>
32 #include <linux/module.h>
33 #include <linux/sort.h>
37 #include "nfs4trace.h"
38 #include "delegation.h"
42 #define NFSDBG_FACILITY NFSDBG_PNFS
43 #define PNFS_LAYOUTGET_RETRY_TIMEOUT (120*HZ)
48 * protects pnfs_modules_tbl.
50 static DEFINE_SPINLOCK(pnfs_spinlock
);
53 * pnfs_modules_tbl holds all pnfs modules
55 static LIST_HEAD(pnfs_modules_tbl
);
57 static void pnfs_layoutreturn_before_put_layout_hdr(struct pnfs_layout_hdr
*lo
);
58 static void pnfs_free_returned_lsegs(struct pnfs_layout_hdr
*lo
,
59 struct list_head
*free_me
,
60 const struct pnfs_layout_range
*range
,
62 static bool pnfs_lseg_dec_and_remove_zero(struct pnfs_layout_segment
*lseg
,
63 struct list_head
*tmp_list
);
65 /* Return the registered pnfs layout driver module matching given id */
66 static struct pnfs_layoutdriver_type
*
67 find_pnfs_driver_locked(u32 id
)
69 struct pnfs_layoutdriver_type
*local
;
71 list_for_each_entry(local
, &pnfs_modules_tbl
, pnfs_tblid
)
76 dprintk("%s: Searching for id %u, found %p\n", __func__
, id
, local
);
80 static struct pnfs_layoutdriver_type
*
81 find_pnfs_driver(u32 id
)
83 struct pnfs_layoutdriver_type
*local
;
85 spin_lock(&pnfs_spinlock
);
86 local
= find_pnfs_driver_locked(id
);
87 if (local
!= NULL
&& !try_module_get(local
->owner
)) {
88 dprintk("%s: Could not grab reference on module\n", __func__
);
91 spin_unlock(&pnfs_spinlock
);
96 unset_pnfs_layoutdriver(struct nfs_server
*nfss
)
98 if (nfss
->pnfs_curr_ld
) {
99 if (nfss
->pnfs_curr_ld
->clear_layoutdriver
)
100 nfss
->pnfs_curr_ld
->clear_layoutdriver(nfss
);
101 /* Decrement the MDS count. Purge the deviceid cache if zero */
102 if (atomic_dec_and_test(&nfss
->nfs_client
->cl_mds_count
))
103 nfs4_deviceid_purge_client(nfss
->nfs_client
);
104 module_put(nfss
->pnfs_curr_ld
->owner
);
106 nfss
->pnfs_curr_ld
= NULL
;
110 * When the server sends a list of layout types, we choose one in the order
111 * given in the list below.
113 * FIXME: should this list be configurable in some fashion? module param?
114 * mount option? something else?
116 static const u32 ld_prefs
[] = {
121 LAYOUT_NFSV4_1_FILES
,
126 ld_cmp(const void *e1
, const void *e2
)
128 u32 ld1
= *((u32
*)e1
);
129 u32 ld2
= *((u32
*)e2
);
132 for (i
= 0; ld_prefs
[i
] != 0; i
++) {
133 if (ld1
== ld_prefs
[i
])
136 if (ld2
== ld_prefs
[i
])
143 * Try to set the server's pnfs module to the pnfs layout type specified by id.
144 * Currently only one pNFS layout driver per filesystem is supported.
146 * @ids array of layout types supported by MDS.
149 set_pnfs_layoutdriver(struct nfs_server
*server
, const struct nfs_fh
*mntfh
,
150 struct nfs_fsinfo
*fsinfo
)
152 struct pnfs_layoutdriver_type
*ld_type
= NULL
;
156 if (fsinfo
->nlayouttypes
== 0)
158 if (!(server
->nfs_client
->cl_exchange_flags
&
159 (EXCHGID4_FLAG_USE_NON_PNFS
| EXCHGID4_FLAG_USE_PNFS_MDS
))) {
160 printk(KERN_ERR
"NFS: %s: cl_exchange_flags 0x%x\n",
161 __func__
, server
->nfs_client
->cl_exchange_flags
);
165 sort(fsinfo
->layouttype
, fsinfo
->nlayouttypes
,
166 sizeof(*fsinfo
->layouttype
), ld_cmp
, NULL
);
168 for (i
= 0; i
< fsinfo
->nlayouttypes
; i
++) {
169 id
= fsinfo
->layouttype
[i
];
170 ld_type
= find_pnfs_driver(id
);
172 request_module("%s-%u", LAYOUT_NFSV4_1_MODULE_PREFIX
,
174 ld_type
= find_pnfs_driver(id
);
181 dprintk("%s: No pNFS module found!\n", __func__
);
185 server
->pnfs_curr_ld
= ld_type
;
186 if (ld_type
->set_layoutdriver
187 && ld_type
->set_layoutdriver(server
, mntfh
)) {
188 printk(KERN_ERR
"NFS: %s: Error initializing pNFS layout "
189 "driver %u.\n", __func__
, id
);
190 module_put(ld_type
->owner
);
193 /* Bump the MDS count */
194 atomic_inc(&server
->nfs_client
->cl_mds_count
);
196 dprintk("%s: pNFS module for %u set\n", __func__
, id
);
200 dprintk("%s: Using NFSv4 I/O\n", __func__
);
201 server
->pnfs_curr_ld
= NULL
;
205 pnfs_register_layoutdriver(struct pnfs_layoutdriver_type
*ld_type
)
207 int status
= -EINVAL
;
208 struct pnfs_layoutdriver_type
*tmp
;
210 if (ld_type
->id
== 0) {
211 printk(KERN_ERR
"NFS: %s id 0 is reserved\n", __func__
);
214 if (!ld_type
->alloc_lseg
|| !ld_type
->free_lseg
) {
215 printk(KERN_ERR
"NFS: %s Layout driver must provide "
216 "alloc_lseg and free_lseg.\n", __func__
);
220 spin_lock(&pnfs_spinlock
);
221 tmp
= find_pnfs_driver_locked(ld_type
->id
);
223 list_add(&ld_type
->pnfs_tblid
, &pnfs_modules_tbl
);
225 dprintk("%s Registering id:%u name:%s\n", __func__
, ld_type
->id
,
228 printk(KERN_ERR
"NFS: %s Module with id %d already loaded!\n",
229 __func__
, ld_type
->id
);
231 spin_unlock(&pnfs_spinlock
);
235 EXPORT_SYMBOL_GPL(pnfs_register_layoutdriver
);
238 pnfs_unregister_layoutdriver(struct pnfs_layoutdriver_type
*ld_type
)
240 dprintk("%s Deregistering id:%u\n", __func__
, ld_type
->id
);
241 spin_lock(&pnfs_spinlock
);
242 list_del(&ld_type
->pnfs_tblid
);
243 spin_unlock(&pnfs_spinlock
);
245 EXPORT_SYMBOL_GPL(pnfs_unregister_layoutdriver
);
248 * pNFS client layout cache
251 /* Need to hold i_lock if caller does not already hold reference */
253 pnfs_get_layout_hdr(struct pnfs_layout_hdr
*lo
)
255 refcount_inc(&lo
->plh_refcount
);
258 static struct pnfs_layout_hdr
*
259 pnfs_alloc_layout_hdr(struct inode
*ino
, gfp_t gfp_flags
)
261 struct pnfs_layoutdriver_type
*ld
= NFS_SERVER(ino
)->pnfs_curr_ld
;
262 return ld
->alloc_layout_hdr(ino
, gfp_flags
);
266 pnfs_free_layout_hdr(struct pnfs_layout_hdr
*lo
)
268 struct nfs_server
*server
= NFS_SERVER(lo
->plh_inode
);
269 struct pnfs_layoutdriver_type
*ld
= server
->pnfs_curr_ld
;
271 if (test_and_clear_bit(NFS_LAYOUT_HASHED
, &lo
->plh_flags
)) {
272 struct nfs_client
*clp
= server
->nfs_client
;
274 spin_lock(&clp
->cl_lock
);
275 list_del_rcu(&lo
->plh_layouts
);
276 spin_unlock(&clp
->cl_lock
);
278 put_cred(lo
->plh_lc_cred
);
279 return ld
->free_layout_hdr(lo
);
283 pnfs_detach_layout_hdr(struct pnfs_layout_hdr
*lo
)
285 struct nfs_inode
*nfsi
= NFS_I(lo
->plh_inode
);
286 dprintk("%s: freeing layout cache %p\n", __func__
, lo
);
288 /* Reset MDS Threshold I/O counters */
294 pnfs_put_layout_hdr(struct pnfs_layout_hdr
*lo
)
297 unsigned long i_state
;
301 inode
= lo
->plh_inode
;
302 pnfs_layoutreturn_before_put_layout_hdr(lo
);
304 if (refcount_dec_and_lock(&lo
->plh_refcount
, &inode
->i_lock
)) {
305 if (!list_empty(&lo
->plh_segs
))
306 WARN_ONCE(1, "NFS: BUG unfreed layout segments.\n");
307 pnfs_detach_layout_hdr(lo
);
308 i_state
= inode
->i_state
;
309 spin_unlock(&inode
->i_lock
);
310 pnfs_free_layout_hdr(lo
);
311 /* Notify pnfs_destroy_layout_final() that we're done */
312 if (i_state
& (I_FREEING
| I_CLEAR
))
317 static struct inode
*
318 pnfs_grab_inode_layout_hdr(struct pnfs_layout_hdr
*lo
)
320 struct inode
*inode
= igrab(lo
->plh_inode
);
323 set_bit(NFS_LAYOUT_INODE_FREEING
, &lo
->plh_flags
);
328 * Compare 2 layout stateid sequence ids, to see which is newer,
329 * taking into account wraparound issues.
331 static bool pnfs_seqid_is_newer(u32 s1
, u32 s2
)
333 return (s32
)(s1
- s2
) > 0;
336 static void pnfs_barrier_update(struct pnfs_layout_hdr
*lo
, u32 newseq
)
338 if (pnfs_seqid_is_newer(newseq
, lo
->plh_barrier
))
339 lo
->plh_barrier
= newseq
;
343 pnfs_set_plh_return_info(struct pnfs_layout_hdr
*lo
, enum pnfs_iomode iomode
,
346 if (lo
->plh_return_iomode
!= 0 && lo
->plh_return_iomode
!= iomode
)
348 lo
->plh_return_iomode
= iomode
;
349 set_bit(NFS_LAYOUT_RETURN_REQUESTED
, &lo
->plh_flags
);
351 WARN_ON_ONCE(lo
->plh_return_seq
!= 0 && lo
->plh_return_seq
!= seq
);
352 lo
->plh_return_seq
= seq
;
353 pnfs_barrier_update(lo
, seq
);
358 pnfs_clear_layoutreturn_info(struct pnfs_layout_hdr
*lo
)
360 struct pnfs_layout_segment
*lseg
;
361 lo
->plh_return_iomode
= 0;
362 lo
->plh_return_seq
= 0;
363 clear_bit(NFS_LAYOUT_RETURN_REQUESTED
, &lo
->plh_flags
);
364 list_for_each_entry(lseg
, &lo
->plh_segs
, pls_list
) {
365 if (!test_bit(NFS_LSEG_LAYOUTRETURN
, &lseg
->pls_flags
))
367 pnfs_set_plh_return_info(lo
, lseg
->pls_range
.iomode
, 0);
371 static void pnfs_clear_layoutreturn_waitbit(struct pnfs_layout_hdr
*lo
)
373 clear_bit_unlock(NFS_LAYOUT_RETURN
, &lo
->plh_flags
);
374 clear_bit(NFS_LAYOUT_RETURN_LOCK
, &lo
->plh_flags
);
375 smp_mb__after_atomic();
376 wake_up_bit(&lo
->plh_flags
, NFS_LAYOUT_RETURN
);
377 rpc_wake_up(&NFS_SERVER(lo
->plh_inode
)->roc_rpcwaitq
);
381 pnfs_clear_lseg_state(struct pnfs_layout_segment
*lseg
,
382 struct list_head
*free_me
)
384 clear_bit(NFS_LSEG_ROC
, &lseg
->pls_flags
);
385 clear_bit(NFS_LSEG_LAYOUTRETURN
, &lseg
->pls_flags
);
386 if (test_and_clear_bit(NFS_LSEG_VALID
, &lseg
->pls_flags
))
387 pnfs_lseg_dec_and_remove_zero(lseg
, free_me
);
388 if (test_and_clear_bit(NFS_LSEG_LAYOUTCOMMIT
, &lseg
->pls_flags
))
389 pnfs_lseg_dec_and_remove_zero(lseg
, free_me
);
393 * Update the seqid of a layout stateid after receiving
394 * NFS4ERR_OLD_STATEID
396 bool nfs4_layout_refresh_old_stateid(nfs4_stateid
*dst
,
397 struct pnfs_layout_range
*dst_range
,
400 struct pnfs_layout_hdr
*lo
;
401 struct pnfs_layout_range range
= {
402 .iomode
= IOMODE_ANY
,
404 .length
= NFS4_MAX_UINT64
,
410 spin_lock(&inode
->i_lock
);
411 lo
= NFS_I(inode
)->layout
;
412 if (lo
&& pnfs_layout_is_valid(lo
) &&
413 nfs4_stateid_match_other(dst
, &lo
->plh_stateid
)) {
414 /* Is our call using the most recent seqid? If so, bump it */
415 if (!nfs4_stateid_is_newer(&lo
->plh_stateid
, dst
)) {
416 nfs4_stateid_seqid_inc(dst
);
420 /* Try to update the seqid to the most recent */
421 err
= pnfs_mark_matching_lsegs_return(lo
, &head
, &range
, 0);
423 dst
->seqid
= lo
->plh_stateid
.seqid
;
429 spin_unlock(&inode
->i_lock
);
430 pnfs_free_lseg_list(&head
);
435 * Mark a pnfs_layout_hdr and all associated layout segments as invalid
437 * In order to continue using the pnfs_layout_hdr, a full recovery
439 * Note that caller must hold inode->i_lock.
442 pnfs_mark_layout_stateid_invalid(struct pnfs_layout_hdr
*lo
,
443 struct list_head
*lseg_list
)
445 struct pnfs_layout_range range
= {
446 .iomode
= IOMODE_ANY
,
448 .length
= NFS4_MAX_UINT64
,
450 struct pnfs_layout_segment
*lseg
, *next
;
452 set_bit(NFS_LAYOUT_INVALID_STID
, &lo
->plh_flags
);
453 list_for_each_entry_safe(lseg
, next
, &lo
->plh_segs
, pls_list
)
454 pnfs_clear_lseg_state(lseg
, lseg_list
);
455 pnfs_clear_layoutreturn_info(lo
);
456 pnfs_free_returned_lsegs(lo
, lseg_list
, &range
, 0);
457 if (test_bit(NFS_LAYOUT_RETURN
, &lo
->plh_flags
) &&
458 !test_and_set_bit(NFS_LAYOUT_RETURN_LOCK
, &lo
->plh_flags
))
459 pnfs_clear_layoutreturn_waitbit(lo
);
460 return !list_empty(&lo
->plh_segs
);
464 pnfs_iomode_to_fail_bit(u32 iomode
)
466 return iomode
== IOMODE_RW
?
467 NFS_LAYOUT_RW_FAILED
: NFS_LAYOUT_RO_FAILED
;
471 pnfs_layout_set_fail_bit(struct pnfs_layout_hdr
*lo
, int fail_bit
)
473 lo
->plh_retry_timestamp
= jiffies
;
474 if (!test_and_set_bit(fail_bit
, &lo
->plh_flags
))
475 refcount_inc(&lo
->plh_refcount
);
479 pnfs_layout_clear_fail_bit(struct pnfs_layout_hdr
*lo
, int fail_bit
)
481 if (test_and_clear_bit(fail_bit
, &lo
->plh_flags
))
482 refcount_dec(&lo
->plh_refcount
);
486 pnfs_layout_io_set_failed(struct pnfs_layout_hdr
*lo
, u32 iomode
)
488 struct inode
*inode
= lo
->plh_inode
;
489 struct pnfs_layout_range range
= {
492 .length
= NFS4_MAX_UINT64
,
496 spin_lock(&inode
->i_lock
);
497 pnfs_layout_set_fail_bit(lo
, pnfs_iomode_to_fail_bit(iomode
));
498 pnfs_mark_matching_lsegs_invalid(lo
, &head
, &range
, 0);
499 spin_unlock(&inode
->i_lock
);
500 pnfs_free_lseg_list(&head
);
501 dprintk("%s Setting layout IOMODE_%s fail bit\n", __func__
,
502 iomode
== IOMODE_RW
? "RW" : "READ");
506 pnfs_layout_io_test_failed(struct pnfs_layout_hdr
*lo
, u32 iomode
)
508 unsigned long start
, end
;
509 int fail_bit
= pnfs_iomode_to_fail_bit(iomode
);
511 if (test_bit(fail_bit
, &lo
->plh_flags
) == 0)
514 start
= end
- PNFS_LAYOUTGET_RETRY_TIMEOUT
;
515 if (!time_in_range(lo
->plh_retry_timestamp
, start
, end
)) {
516 /* It is time to retry the failed layoutgets */
517 pnfs_layout_clear_fail_bit(lo
, fail_bit
);
524 pnfs_init_lseg(struct pnfs_layout_hdr
*lo
, struct pnfs_layout_segment
*lseg
,
525 const struct pnfs_layout_range
*range
,
526 const nfs4_stateid
*stateid
)
528 INIT_LIST_HEAD(&lseg
->pls_list
);
529 INIT_LIST_HEAD(&lseg
->pls_lc_list
);
530 INIT_LIST_HEAD(&lseg
->pls_commits
);
531 refcount_set(&lseg
->pls_refcount
, 1);
532 set_bit(NFS_LSEG_VALID
, &lseg
->pls_flags
);
533 lseg
->pls_layout
= lo
;
534 lseg
->pls_range
= *range
;
535 lseg
->pls_seq
= be32_to_cpu(stateid
->seqid
);
538 static void pnfs_free_lseg(struct pnfs_layout_segment
*lseg
)
541 struct inode
*inode
= lseg
->pls_layout
->plh_inode
;
542 NFS_SERVER(inode
)->pnfs_curr_ld
->free_lseg(lseg
);
547 pnfs_layout_remove_lseg(struct pnfs_layout_hdr
*lo
,
548 struct pnfs_layout_segment
*lseg
)
550 WARN_ON(test_bit(NFS_LSEG_VALID
, &lseg
->pls_flags
));
551 list_del_init(&lseg
->pls_list
);
552 /* Matched by pnfs_get_layout_hdr in pnfs_layout_insert_lseg */
553 refcount_dec(&lo
->plh_refcount
);
554 if (test_bit(NFS_LSEG_LAYOUTRETURN
, &lseg
->pls_flags
))
556 if (list_empty(&lo
->plh_segs
) &&
557 !test_bit(NFS_LAYOUT_RETURN_REQUESTED
, &lo
->plh_flags
) &&
558 !test_bit(NFS_LAYOUT_RETURN
, &lo
->plh_flags
)) {
559 if (atomic_read(&lo
->plh_outstanding
) == 0)
560 set_bit(NFS_LAYOUT_INVALID_STID
, &lo
->plh_flags
);
561 clear_bit(NFS_LAYOUT_BULK_RECALL
, &lo
->plh_flags
);
566 pnfs_cache_lseg_for_layoutreturn(struct pnfs_layout_hdr
*lo
,
567 struct pnfs_layout_segment
*lseg
)
569 if (test_and_clear_bit(NFS_LSEG_LAYOUTRETURN
, &lseg
->pls_flags
) &&
570 pnfs_layout_is_valid(lo
)) {
571 pnfs_set_plh_return_info(lo
, lseg
->pls_range
.iomode
, 0);
572 list_move_tail(&lseg
->pls_list
, &lo
->plh_return_segs
);
579 pnfs_put_lseg(struct pnfs_layout_segment
*lseg
)
581 struct pnfs_layout_hdr
*lo
;
587 dprintk("%s: lseg %p ref %d valid %d\n", __func__
, lseg
,
588 refcount_read(&lseg
->pls_refcount
),
589 test_bit(NFS_LSEG_VALID
, &lseg
->pls_flags
));
591 lo
= lseg
->pls_layout
;
592 inode
= lo
->plh_inode
;
594 if (refcount_dec_and_lock(&lseg
->pls_refcount
, &inode
->i_lock
)) {
595 if (test_bit(NFS_LSEG_VALID
, &lseg
->pls_flags
)) {
596 spin_unlock(&inode
->i_lock
);
599 pnfs_get_layout_hdr(lo
);
600 pnfs_layout_remove_lseg(lo
, lseg
);
601 if (pnfs_cache_lseg_for_layoutreturn(lo
, lseg
))
603 spin_unlock(&inode
->i_lock
);
604 pnfs_free_lseg(lseg
);
605 pnfs_put_layout_hdr(lo
);
608 EXPORT_SYMBOL_GPL(pnfs_put_lseg
);
611 * is l2 fully contained in l1?
613 * [----------------------------------)
618 pnfs_lseg_range_contained(const struct pnfs_layout_range
*l1
,
619 const struct pnfs_layout_range
*l2
)
621 u64 start1
= l1
->offset
;
622 u64 end1
= pnfs_end_offset(start1
, l1
->length
);
623 u64 start2
= l2
->offset
;
624 u64 end2
= pnfs_end_offset(start2
, l2
->length
);
626 return (start1
<= start2
) && (end1
>= end2
);
629 static bool pnfs_lseg_dec_and_remove_zero(struct pnfs_layout_segment
*lseg
,
630 struct list_head
*tmp_list
)
632 if (!refcount_dec_and_test(&lseg
->pls_refcount
))
634 pnfs_layout_remove_lseg(lseg
->pls_layout
, lseg
);
635 list_add(&lseg
->pls_list
, tmp_list
);
639 /* Returns 1 if lseg is removed from list, 0 otherwise */
640 static int mark_lseg_invalid(struct pnfs_layout_segment
*lseg
,
641 struct list_head
*tmp_list
)
645 if (test_and_clear_bit(NFS_LSEG_VALID
, &lseg
->pls_flags
)) {
646 /* Remove the reference keeping the lseg in the
647 * list. It will now be removed when all
648 * outstanding io is finished.
650 dprintk("%s: lseg %p ref %d\n", __func__
, lseg
,
651 refcount_read(&lseg
->pls_refcount
));
652 if (pnfs_lseg_dec_and_remove_zero(lseg
, tmp_list
))
659 pnfs_should_free_range(const struct pnfs_layout_range
*lseg_range
,
660 const struct pnfs_layout_range
*recall_range
)
662 return (recall_range
->iomode
== IOMODE_ANY
||
663 lseg_range
->iomode
== recall_range
->iomode
) &&
664 pnfs_lseg_range_intersecting(lseg_range
, recall_range
);
668 pnfs_match_lseg_recall(const struct pnfs_layout_segment
*lseg
,
669 const struct pnfs_layout_range
*recall_range
,
672 if (seq
!= 0 && pnfs_seqid_is_newer(lseg
->pls_seq
, seq
))
674 if (recall_range
== NULL
)
676 return pnfs_should_free_range(&lseg
->pls_range
, recall_range
);
680 * pnfs_mark_matching_lsegs_invalid - tear down lsegs or mark them for later
681 * @lo: layout header containing the lsegs
682 * @tmp_list: list head where doomed lsegs should go
683 * @recall_range: optional recall range argument to match (may be NULL)
684 * @seq: only invalidate lsegs obtained prior to this sequence (may be 0)
686 * Walk the list of lsegs in the layout header, and tear down any that should
687 * be destroyed. If "recall_range" is specified then the segment must match
688 * that range. If "seq" is non-zero, then only match segments that were handed
689 * out at or before that sequence.
691 * Returns number of matching invalid lsegs remaining in list after scanning
692 * it and purging them.
695 pnfs_mark_matching_lsegs_invalid(struct pnfs_layout_hdr
*lo
,
696 struct list_head
*tmp_list
,
697 const struct pnfs_layout_range
*recall_range
,
700 struct pnfs_layout_segment
*lseg
, *next
;
703 dprintk("%s:Begin lo %p\n", __func__
, lo
);
705 if (list_empty(&lo
->plh_segs
))
707 list_for_each_entry_safe(lseg
, next
, &lo
->plh_segs
, pls_list
)
708 if (pnfs_match_lseg_recall(lseg
, recall_range
, seq
)) {
709 dprintk("%s: freeing lseg %p iomode %d seq %u "
710 "offset %llu length %llu\n", __func__
,
711 lseg
, lseg
->pls_range
.iomode
, lseg
->pls_seq
,
712 lseg
->pls_range
.offset
, lseg
->pls_range
.length
);
713 if (!mark_lseg_invalid(lseg
, tmp_list
))
716 dprintk("%s:Return %i\n", __func__
, remaining
);
721 pnfs_free_returned_lsegs(struct pnfs_layout_hdr
*lo
,
722 struct list_head
*free_me
,
723 const struct pnfs_layout_range
*range
,
726 struct pnfs_layout_segment
*lseg
, *next
;
728 list_for_each_entry_safe(lseg
, next
, &lo
->plh_return_segs
, pls_list
) {
729 if (pnfs_match_lseg_recall(lseg
, range
, seq
))
730 list_move_tail(&lseg
->pls_list
, free_me
);
734 /* note free_me must contain lsegs from a single layout_hdr */
736 pnfs_free_lseg_list(struct list_head
*free_me
)
738 struct pnfs_layout_segment
*lseg
, *tmp
;
740 if (list_empty(free_me
))
743 list_for_each_entry_safe(lseg
, tmp
, free_me
, pls_list
) {
744 list_del(&lseg
->pls_list
);
745 pnfs_free_lseg(lseg
);
749 static struct pnfs_layout_hdr
*__pnfs_destroy_layout(struct nfs_inode
*nfsi
)
751 struct pnfs_layout_hdr
*lo
;
754 spin_lock(&nfsi
->vfs_inode
.i_lock
);
757 pnfs_get_layout_hdr(lo
);
758 pnfs_mark_layout_stateid_invalid(lo
, &tmp_list
);
759 pnfs_layout_clear_fail_bit(lo
, NFS_LAYOUT_RO_FAILED
);
760 pnfs_layout_clear_fail_bit(lo
, NFS_LAYOUT_RW_FAILED
);
761 spin_unlock(&nfsi
->vfs_inode
.i_lock
);
762 pnfs_free_lseg_list(&tmp_list
);
763 nfs_commit_inode(&nfsi
->vfs_inode
, 0);
764 pnfs_put_layout_hdr(lo
);
766 spin_unlock(&nfsi
->vfs_inode
.i_lock
);
770 void pnfs_destroy_layout(struct nfs_inode
*nfsi
)
772 __pnfs_destroy_layout(nfsi
);
774 EXPORT_SYMBOL_GPL(pnfs_destroy_layout
);
776 static bool pnfs_layout_removed(struct nfs_inode
*nfsi
,
777 struct pnfs_layout_hdr
*lo
)
781 spin_lock(&nfsi
->vfs_inode
.i_lock
);
782 ret
= nfsi
->layout
!= lo
;
783 spin_unlock(&nfsi
->vfs_inode
.i_lock
);
787 void pnfs_destroy_layout_final(struct nfs_inode
*nfsi
)
789 struct pnfs_layout_hdr
*lo
= __pnfs_destroy_layout(nfsi
);
792 wait_var_event(lo
, pnfs_layout_removed(nfsi
, lo
));
796 pnfs_layout_add_bulk_destroy_list(struct inode
*inode
,
797 struct list_head
*layout_list
)
799 struct pnfs_layout_hdr
*lo
;
802 spin_lock(&inode
->i_lock
);
803 lo
= NFS_I(inode
)->layout
;
804 if (lo
!= NULL
&& list_empty(&lo
->plh_bulk_destroy
)) {
805 pnfs_get_layout_hdr(lo
);
806 list_add(&lo
->plh_bulk_destroy
, layout_list
);
809 spin_unlock(&inode
->i_lock
);
813 /* Caller must hold rcu_read_lock and clp->cl_lock */
815 pnfs_layout_bulk_destroy_byserver_locked(struct nfs_client
*clp
,
816 struct nfs_server
*server
,
817 struct list_head
*layout_list
)
818 __must_hold(&clp
->cl_lock
)
821 struct pnfs_layout_hdr
*lo
, *next
;
824 list_for_each_entry_safe(lo
, next
, &server
->layouts
, plh_layouts
) {
825 if (test_bit(NFS_LAYOUT_INVALID_STID
, &lo
->plh_flags
) ||
826 test_bit(NFS_LAYOUT_INODE_FREEING
, &lo
->plh_flags
) ||
827 !list_empty(&lo
->plh_bulk_destroy
))
829 /* If the sb is being destroyed, just bail */
830 if (!nfs_sb_active(server
->super
))
832 inode
= pnfs_grab_inode_layout_hdr(lo
);
834 if (test_and_clear_bit(NFS_LAYOUT_HASHED
, &lo
->plh_flags
))
835 list_del_rcu(&lo
->plh_layouts
);
836 if (pnfs_layout_add_bulk_destroy_list(inode
,
840 spin_unlock(&clp
->cl_lock
);
844 spin_unlock(&clp
->cl_lock
);
846 nfs_sb_deactive(server
->super
);
847 spin_lock(&clp
->cl_lock
);
855 pnfs_layout_free_bulk_destroy_list(struct list_head
*layout_list
,
858 struct pnfs_layout_hdr
*lo
;
860 LIST_HEAD(lseg_list
);
863 while (!list_empty(layout_list
)) {
864 lo
= list_entry(layout_list
->next
, struct pnfs_layout_hdr
,
866 dprintk("%s freeing layout for inode %lu\n", __func__
,
867 lo
->plh_inode
->i_ino
);
868 inode
= lo
->plh_inode
;
870 pnfs_layoutcommit_inode(inode
, false);
872 spin_lock(&inode
->i_lock
);
873 list_del_init(&lo
->plh_bulk_destroy
);
874 if (pnfs_mark_layout_stateid_invalid(lo
, &lseg_list
)) {
876 set_bit(NFS_LAYOUT_BULK_RECALL
, &lo
->plh_flags
);
879 spin_unlock(&inode
->i_lock
);
880 pnfs_free_lseg_list(&lseg_list
);
881 /* Free all lsegs that are attached to commit buckets */
882 nfs_commit_inode(inode
, 0);
883 pnfs_put_layout_hdr(lo
);
884 nfs_iput_and_deactive(inode
);
890 pnfs_destroy_layouts_byfsid(struct nfs_client
*clp
,
891 struct nfs_fsid
*fsid
,
894 struct nfs_server
*server
;
895 LIST_HEAD(layout_list
);
897 spin_lock(&clp
->cl_lock
);
900 list_for_each_entry_rcu(server
, &clp
->cl_superblocks
, client_link
) {
901 if (memcmp(&server
->fsid
, fsid
, sizeof(*fsid
)) != 0)
903 if (pnfs_layout_bulk_destroy_byserver_locked(clp
,
909 spin_unlock(&clp
->cl_lock
);
911 if (list_empty(&layout_list
))
913 return pnfs_layout_free_bulk_destroy_list(&layout_list
, is_recall
);
917 pnfs_destroy_layouts_byclid(struct nfs_client
*clp
,
920 struct nfs_server
*server
;
921 LIST_HEAD(layout_list
);
923 spin_lock(&clp
->cl_lock
);
926 list_for_each_entry_rcu(server
, &clp
->cl_superblocks
, client_link
) {
927 if (pnfs_layout_bulk_destroy_byserver_locked(clp
,
933 spin_unlock(&clp
->cl_lock
);
935 if (list_empty(&layout_list
))
937 return pnfs_layout_free_bulk_destroy_list(&layout_list
, is_recall
);
941 * Called by the state manager to remove all layouts established under an
945 pnfs_destroy_all_layouts(struct nfs_client
*clp
)
947 nfs4_deviceid_mark_client_invalid(clp
);
948 nfs4_deviceid_purge_client(clp
);
950 pnfs_destroy_layouts_byclid(clp
, false);
954 pnfs_set_layout_cred(struct pnfs_layout_hdr
*lo
, const struct cred
*cred
)
956 const struct cred
*old
;
958 if (cred
&& cred_fscmp(lo
->plh_lc_cred
, cred
) != 0) {
959 old
= xchg(&lo
->plh_lc_cred
, get_cred(cred
));
964 /* update lo->plh_stateid with new if is more recent */
966 pnfs_set_layout_stateid(struct pnfs_layout_hdr
*lo
, const nfs4_stateid
*new,
967 const struct cred
*cred
, bool update_barrier
)
969 u32 oldseq
, newseq
, new_barrier
= 0;
971 oldseq
= be32_to_cpu(lo
->plh_stateid
.seqid
);
972 newseq
= be32_to_cpu(new->seqid
);
974 if (!pnfs_layout_is_valid(lo
)) {
975 pnfs_set_layout_cred(lo
, cred
);
976 nfs4_stateid_copy(&lo
->plh_stateid
, new);
977 lo
->plh_barrier
= newseq
;
978 pnfs_clear_layoutreturn_info(lo
);
979 clear_bit(NFS_LAYOUT_INVALID_STID
, &lo
->plh_flags
);
982 if (pnfs_seqid_is_newer(newseq
, oldseq
)) {
983 nfs4_stateid_copy(&lo
->plh_stateid
, new);
985 * Because of wraparound, we want to keep the barrier
986 * "close" to the current seqids.
988 new_barrier
= newseq
- atomic_read(&lo
->plh_outstanding
);
991 new_barrier
= be32_to_cpu(new->seqid
);
992 else if (new_barrier
== 0)
994 pnfs_barrier_update(lo
, new_barrier
);
998 pnfs_layout_stateid_blocked(const struct pnfs_layout_hdr
*lo
,
999 const nfs4_stateid
*stateid
)
1001 u32 seqid
= be32_to_cpu(stateid
->seqid
);
1003 return !pnfs_seqid_is_newer(seqid
, lo
->plh_barrier
) && lo
->plh_barrier
;
1006 /* lget is set to 1 if called from inside send_layoutget call chain */
1008 pnfs_layoutgets_blocked(const struct pnfs_layout_hdr
*lo
)
1010 return lo
->plh_block_lgets
||
1011 test_bit(NFS_LAYOUT_BULK_RECALL
, &lo
->plh_flags
);
1014 static struct nfs_server
*
1015 pnfs_find_server(struct inode
*inode
, struct nfs_open_context
*ctx
)
1017 struct nfs_server
*server
;
1020 server
= NFS_SERVER(inode
);
1022 struct dentry
*parent_dir
= dget_parent(ctx
->dentry
);
1023 server
= NFS_SERVER(parent_dir
->d_inode
);
1029 static void nfs4_free_pages(struct page
**pages
, size_t size
)
1036 for (i
= 0; i
< size
; i
++) {
1039 __free_page(pages
[i
]);
1044 static struct page
**nfs4_alloc_pages(size_t size
, gfp_t gfp_flags
)
1046 struct page
**pages
;
1049 pages
= kmalloc_array(size
, sizeof(struct page
*), gfp_flags
);
1051 dprintk("%s: can't alloc array of %zu pages\n", __func__
, size
);
1055 for (i
= 0; i
< size
; i
++) {
1056 pages
[i
] = alloc_page(gfp_flags
);
1058 dprintk("%s: failed to allocate page\n", __func__
);
1059 nfs4_free_pages(pages
, i
);
1067 static struct nfs4_layoutget
*
1068 pnfs_alloc_init_layoutget_args(struct inode
*ino
,
1069 struct nfs_open_context
*ctx
,
1070 const nfs4_stateid
*stateid
,
1071 const struct pnfs_layout_range
*range
,
1074 struct nfs_server
*server
= pnfs_find_server(ino
, ctx
);
1075 size_t max_reply_sz
= server
->pnfs_curr_ld
->max_layoutget_response
;
1076 size_t max_pages
= max_response_pages(server
);
1077 struct nfs4_layoutget
*lgp
;
1079 dprintk("--> %s\n", __func__
);
1081 lgp
= kzalloc(sizeof(*lgp
), gfp_flags
);
1086 size_t npages
= (max_reply_sz
+ PAGE_SIZE
- 1) >> PAGE_SHIFT
;
1087 if (npages
< max_pages
)
1091 lgp
->args
.layout
.pages
= nfs4_alloc_pages(max_pages
, gfp_flags
);
1092 if (!lgp
->args
.layout
.pages
) {
1096 lgp
->args
.layout
.pglen
= max_pages
* PAGE_SIZE
;
1097 lgp
->res
.layoutp
= &lgp
->args
.layout
;
1099 /* Don't confuse uninitialised result and success */
1100 lgp
->res
.status
= -NFS4ERR_DELAY
;
1102 lgp
->args
.minlength
= PAGE_SIZE
;
1103 if (lgp
->args
.minlength
> range
->length
)
1104 lgp
->args
.minlength
= range
->length
;
1106 loff_t i_size
= i_size_read(ino
);
1108 if (range
->iomode
== IOMODE_READ
) {
1109 if (range
->offset
>= i_size
)
1110 lgp
->args
.minlength
= 0;
1111 else if (i_size
- range
->offset
< lgp
->args
.minlength
)
1112 lgp
->args
.minlength
= i_size
- range
->offset
;
1115 lgp
->args
.maxcount
= PNFS_LAYOUT_MAXSIZE
;
1116 pnfs_copy_range(&lgp
->args
.range
, range
);
1117 lgp
->args
.type
= server
->pnfs_curr_ld
->id
;
1118 lgp
->args
.inode
= ino
;
1119 lgp
->args
.ctx
= get_nfs_open_context(ctx
);
1120 nfs4_stateid_copy(&lgp
->args
.stateid
, stateid
);
1121 lgp
->gfp_flags
= gfp_flags
;
1122 lgp
->cred
= ctx
->cred
;
1126 void pnfs_layoutget_free(struct nfs4_layoutget
*lgp
)
1128 size_t max_pages
= lgp
->args
.layout
.pglen
/ PAGE_SIZE
;
1130 nfs4_free_pages(lgp
->args
.layout
.pages
, max_pages
);
1131 if (lgp
->args
.inode
)
1132 pnfs_put_layout_hdr(NFS_I(lgp
->args
.inode
)->layout
);
1133 put_nfs_open_context(lgp
->args
.ctx
);
1137 static void pnfs_clear_layoutcommit(struct inode
*inode
,
1138 struct list_head
*head
)
1140 struct nfs_inode
*nfsi
= NFS_I(inode
);
1141 struct pnfs_layout_segment
*lseg
, *tmp
;
1143 if (!test_and_clear_bit(NFS_INO_LAYOUTCOMMIT
, &nfsi
->flags
))
1145 list_for_each_entry_safe(lseg
, tmp
, &nfsi
->layout
->plh_segs
, pls_list
) {
1146 if (!test_and_clear_bit(NFS_LSEG_LAYOUTCOMMIT
, &lseg
->pls_flags
))
1148 pnfs_lseg_dec_and_remove_zero(lseg
, head
);
1152 void pnfs_layoutreturn_free_lsegs(struct pnfs_layout_hdr
*lo
,
1153 const nfs4_stateid
*arg_stateid
,
1154 const struct pnfs_layout_range
*range
,
1155 const nfs4_stateid
*stateid
)
1157 struct inode
*inode
= lo
->plh_inode
;
1160 spin_lock(&inode
->i_lock
);
1161 if (!pnfs_layout_is_valid(lo
) ||
1162 !nfs4_stateid_match_other(&lo
->plh_stateid
, arg_stateid
))
1165 u32 seq
= be32_to_cpu(arg_stateid
->seqid
);
1167 pnfs_mark_matching_lsegs_invalid(lo
, &freeme
, range
, seq
);
1168 pnfs_free_returned_lsegs(lo
, &freeme
, range
, seq
);
1169 pnfs_set_layout_stateid(lo
, stateid
, NULL
, true);
1171 pnfs_mark_layout_stateid_invalid(lo
, &freeme
);
1173 pnfs_clear_layoutreturn_waitbit(lo
);
1174 spin_unlock(&inode
->i_lock
);
1175 pnfs_free_lseg_list(&freeme
);
1180 pnfs_prepare_layoutreturn(struct pnfs_layout_hdr
*lo
,
1181 nfs4_stateid
*stateid
,
1182 const struct cred
**cred
,
1183 enum pnfs_iomode
*iomode
)
1185 /* Serialise LAYOUTGET/LAYOUTRETURN */
1186 if (atomic_read(&lo
->plh_outstanding
) != 0)
1188 if (test_and_set_bit(NFS_LAYOUT_RETURN_LOCK
, &lo
->plh_flags
))
1190 set_bit(NFS_LAYOUT_RETURN
, &lo
->plh_flags
);
1191 pnfs_get_layout_hdr(lo
);
1192 nfs4_stateid_copy(stateid
, &lo
->plh_stateid
);
1193 *cred
= get_cred(lo
->plh_lc_cred
);
1194 if (test_bit(NFS_LAYOUT_RETURN_REQUESTED
, &lo
->plh_flags
)) {
1195 if (lo
->plh_return_seq
!= 0)
1196 stateid
->seqid
= cpu_to_be32(lo
->plh_return_seq
);
1198 *iomode
= lo
->plh_return_iomode
;
1199 pnfs_clear_layoutreturn_info(lo
);
1200 } else if (iomode
!= NULL
)
1201 *iomode
= IOMODE_ANY
;
1202 pnfs_barrier_update(lo
, be32_to_cpu(stateid
->seqid
));
1207 pnfs_init_layoutreturn_args(struct nfs4_layoutreturn_args
*args
,
1208 struct pnfs_layout_hdr
*lo
,
1209 const nfs4_stateid
*stateid
,
1210 enum pnfs_iomode iomode
)
1212 struct inode
*inode
= lo
->plh_inode
;
1214 args
->layout_type
= NFS_SERVER(inode
)->pnfs_curr_ld
->id
;
1215 args
->inode
= inode
;
1216 args
->range
.iomode
= iomode
;
1217 args
->range
.offset
= 0;
1218 args
->range
.length
= NFS4_MAX_UINT64
;
1220 nfs4_stateid_copy(&args
->stateid
, stateid
);
1224 pnfs_send_layoutreturn(struct pnfs_layout_hdr
*lo
,
1225 const nfs4_stateid
*stateid
,
1226 const struct cred
**pcred
,
1227 enum pnfs_iomode iomode
,
1230 struct inode
*ino
= lo
->plh_inode
;
1231 struct pnfs_layoutdriver_type
*ld
= NFS_SERVER(ino
)->pnfs_curr_ld
;
1232 struct nfs4_layoutreturn
*lrp
;
1233 const struct cred
*cred
= *pcred
;
1237 lrp
= kzalloc(sizeof(*lrp
), GFP_NOFS
);
1238 if (unlikely(lrp
== NULL
)) {
1240 spin_lock(&ino
->i_lock
);
1241 pnfs_clear_layoutreturn_waitbit(lo
);
1242 spin_unlock(&ino
->i_lock
);
1244 pnfs_put_layout_hdr(lo
);
1248 pnfs_init_layoutreturn_args(&lrp
->args
, lo
, stateid
, iomode
);
1249 lrp
->args
.ld_private
= &lrp
->ld_private
;
1250 lrp
->clp
= NFS_SERVER(ino
)->nfs_client
;
1252 if (ld
->prepare_layoutreturn
)
1253 ld
->prepare_layoutreturn(&lrp
->args
);
1255 status
= nfs4_proc_layoutreturn(lrp
, sync
);
1257 dprintk("<-- %s status: %d\n", __func__
, status
);
1262 pnfs_layout_segments_returnable(struct pnfs_layout_hdr
*lo
,
1263 enum pnfs_iomode iomode
,
1266 struct pnfs_layout_range recall_range
= {
1267 .length
= NFS4_MAX_UINT64
,
1270 return pnfs_mark_matching_lsegs_return(lo
, &lo
->plh_return_segs
,
1271 &recall_range
, seq
) != -EBUSY
;
1274 /* Return true if layoutreturn is needed */
1276 pnfs_layout_need_return(struct pnfs_layout_hdr
*lo
)
1278 if (!test_bit(NFS_LAYOUT_RETURN_REQUESTED
, &lo
->plh_flags
))
1280 return pnfs_layout_segments_returnable(lo
, lo
->plh_return_iomode
,
1281 lo
->plh_return_seq
);
1284 static void pnfs_layoutreturn_before_put_layout_hdr(struct pnfs_layout_hdr
*lo
)
1286 struct inode
*inode
= lo
->plh_inode
;
1288 if (!test_bit(NFS_LAYOUT_RETURN_REQUESTED
, &lo
->plh_flags
))
1290 spin_lock(&inode
->i_lock
);
1291 if (pnfs_layout_need_return(lo
)) {
1292 const struct cred
*cred
;
1293 nfs4_stateid stateid
;
1294 enum pnfs_iomode iomode
;
1297 send
= pnfs_prepare_layoutreturn(lo
, &stateid
, &cred
, &iomode
);
1298 spin_unlock(&inode
->i_lock
);
1300 /* Send an async layoutreturn so we dont deadlock */
1301 pnfs_send_layoutreturn(lo
, &stateid
, &cred
, iomode
, false);
1304 spin_unlock(&inode
->i_lock
);
1308 * Initiates a LAYOUTRETURN(FILE), and removes the pnfs_layout_hdr
1309 * when the layout segment list is empty.
1311 * Note that a pnfs_layout_hdr can exist with an empty layout segment
1312 * list when LAYOUTGET has failed, or when LAYOUTGET succeeded, but the
1313 * deviceid is marked invalid.
1316 _pnfs_return_layout(struct inode
*ino
)
1318 struct pnfs_layout_hdr
*lo
= NULL
;
1319 struct nfs_inode
*nfsi
= NFS_I(ino
);
1320 LIST_HEAD(tmp_list
);
1321 const struct cred
*cred
;
1322 nfs4_stateid stateid
;
1324 bool send
, valid_layout
;
1326 dprintk("NFS: %s for inode %lu\n", __func__
, ino
->i_ino
);
1328 spin_lock(&ino
->i_lock
);
1331 spin_unlock(&ino
->i_lock
);
1332 dprintk("NFS: %s no layout to return\n", __func__
);
1335 /* Reference matched in nfs4_layoutreturn_release */
1336 pnfs_get_layout_hdr(lo
);
1337 /* Is there an outstanding layoutreturn ? */
1338 if (test_bit(NFS_LAYOUT_RETURN_LOCK
, &lo
->plh_flags
)) {
1339 spin_unlock(&ino
->i_lock
);
1340 if (wait_on_bit(&lo
->plh_flags
, NFS_LAYOUT_RETURN
,
1341 TASK_UNINTERRUPTIBLE
))
1342 goto out_put_layout_hdr
;
1343 spin_lock(&ino
->i_lock
);
1345 valid_layout
= pnfs_layout_is_valid(lo
);
1346 pnfs_clear_layoutcommit(ino
, &tmp_list
);
1347 pnfs_mark_matching_lsegs_return(lo
, &tmp_list
, NULL
, 0);
1349 if (NFS_SERVER(ino
)->pnfs_curr_ld
->return_range
) {
1350 struct pnfs_layout_range range
= {
1351 .iomode
= IOMODE_ANY
,
1353 .length
= NFS4_MAX_UINT64
,
1355 NFS_SERVER(ino
)->pnfs_curr_ld
->return_range(lo
, &range
);
1358 /* Don't send a LAYOUTRETURN if list was initially empty */
1359 if (!test_bit(NFS_LAYOUT_RETURN_REQUESTED
, &lo
->plh_flags
) ||
1361 spin_unlock(&ino
->i_lock
);
1362 dprintk("NFS: %s no layout segments to return\n", __func__
);
1363 goto out_wait_layoutreturn
;
1366 send
= pnfs_prepare_layoutreturn(lo
, &stateid
, &cred
, NULL
);
1367 spin_unlock(&ino
->i_lock
);
1369 status
= pnfs_send_layoutreturn(lo
, &stateid
, &cred
, IOMODE_ANY
, true);
1370 out_wait_layoutreturn
:
1371 wait_on_bit(&lo
->plh_flags
, NFS_LAYOUT_RETURN
, TASK_UNINTERRUPTIBLE
);
1373 pnfs_free_lseg_list(&tmp_list
);
1374 pnfs_put_layout_hdr(lo
);
1376 dprintk("<-- %s status: %d\n", __func__
, status
);
1381 pnfs_commit_and_return_layout(struct inode
*inode
)
1383 struct pnfs_layout_hdr
*lo
;
1386 spin_lock(&inode
->i_lock
);
1387 lo
= NFS_I(inode
)->layout
;
1389 spin_unlock(&inode
->i_lock
);
1392 pnfs_get_layout_hdr(lo
);
1393 /* Block new layoutgets and read/write to ds */
1394 lo
->plh_block_lgets
++;
1395 spin_unlock(&inode
->i_lock
);
1396 filemap_fdatawait(inode
->i_mapping
);
1397 ret
= pnfs_layoutcommit_inode(inode
, true);
1399 ret
= _pnfs_return_layout(inode
);
1400 spin_lock(&inode
->i_lock
);
1401 lo
->plh_block_lgets
--;
1402 spin_unlock(&inode
->i_lock
);
1403 pnfs_put_layout_hdr(lo
);
1407 bool pnfs_roc(struct inode
*ino
,
1408 struct nfs4_layoutreturn_args
*args
,
1409 struct nfs4_layoutreturn_res
*res
,
1410 const struct cred
*cred
)
1412 struct nfs_inode
*nfsi
= NFS_I(ino
);
1413 struct nfs_open_context
*ctx
;
1414 struct nfs4_state
*state
;
1415 struct pnfs_layout_hdr
*lo
;
1416 struct pnfs_layout_segment
*lseg
, *next
;
1417 const struct cred
*lc_cred
;
1418 nfs4_stateid stateid
;
1419 enum pnfs_iomode iomode
= 0;
1420 bool layoutreturn
= false, roc
= false;
1421 bool skip_read
= false;
1423 if (!nfs_have_layout(ino
))
1427 spin_lock(&ino
->i_lock
);
1429 if (!lo
|| !pnfs_layout_is_valid(lo
) ||
1430 test_bit(NFS_LAYOUT_BULK_RECALL
, &lo
->plh_flags
)) {
1434 pnfs_get_layout_hdr(lo
);
1435 if (test_bit(NFS_LAYOUT_RETURN_LOCK
, &lo
->plh_flags
)) {
1436 spin_unlock(&ino
->i_lock
);
1438 wait_on_bit(&lo
->plh_flags
, NFS_LAYOUT_RETURN
,
1439 TASK_UNINTERRUPTIBLE
);
1440 pnfs_put_layout_hdr(lo
);
1444 /* no roc if we hold a delegation */
1445 if (nfs4_check_delegation(ino
, FMODE_READ
)) {
1446 if (nfs4_check_delegation(ino
, FMODE_WRITE
))
1451 list_for_each_entry_rcu(ctx
, &nfsi
->open_files
, list
) {
1455 /* Don't return layout if there is open file state */
1456 if (state
->state
& FMODE_WRITE
)
1458 if (state
->state
& FMODE_READ
)
1463 list_for_each_entry_safe(lseg
, next
, &lo
->plh_segs
, pls_list
) {
1464 if (skip_read
&& lseg
->pls_range
.iomode
== IOMODE_READ
)
1466 /* If we are sending layoutreturn, invalidate all valid lsegs */
1467 if (!test_and_clear_bit(NFS_LSEG_ROC
, &lseg
->pls_flags
))
1470 * Note: mark lseg for return so pnfs_layout_remove_lseg
1471 * doesn't invalidate the layout for us.
1473 set_bit(NFS_LSEG_LAYOUTRETURN
, &lseg
->pls_flags
);
1474 if (!mark_lseg_invalid(lseg
, &lo
->plh_return_segs
))
1476 pnfs_set_plh_return_info(lo
, lseg
->pls_range
.iomode
, 0);
1479 if (!test_bit(NFS_LAYOUT_RETURN_REQUESTED
, &lo
->plh_flags
))
1482 /* ROC in two conditions:
1483 * 1. there are ROC lsegs
1484 * 2. we don't send layoutreturn
1486 /* lo ref dropped in pnfs_roc_release() */
1487 layoutreturn
= pnfs_prepare_layoutreturn(lo
, &stateid
, &lc_cred
, &iomode
);
1488 /* If the creds don't match, we can't compound the layoutreturn */
1489 if (!layoutreturn
|| cred_fscmp(cred
, lc_cred
) != 0)
1493 pnfs_init_layoutreturn_args(args
, lo
, &stateid
, iomode
);
1494 res
->lrs_present
= 0;
1495 layoutreturn
= false;
1499 spin_unlock(&ino
->i_lock
);
1501 pnfs_layoutcommit_inode(ino
, true);
1503 struct pnfs_layoutdriver_type
*ld
= NFS_SERVER(ino
)->pnfs_curr_ld
;
1504 if (ld
->prepare_layoutreturn
)
1505 ld
->prepare_layoutreturn(args
);
1506 pnfs_put_layout_hdr(lo
);
1510 pnfs_send_layoutreturn(lo
, &stateid
, &lc_cred
, iomode
, true);
1511 pnfs_put_layout_hdr(lo
);
1515 int pnfs_roc_done(struct rpc_task
*task
, struct nfs4_layoutreturn_args
**argpp
,
1516 struct nfs4_layoutreturn_res
**respp
, int *ret
)
1518 struct nfs4_layoutreturn_args
*arg
= *argpp
;
1519 int retval
= -EAGAIN
;
1523 /* Handle Layoutreturn errors */
1528 case -NFS4ERR_NOMATCHING_LAYOUT
:
1529 /* Was there an RPC level error? If not, retry */
1530 if (task
->tk_rpc_status
== 0)
1532 /* If the call was not sent, let caller handle it */
1533 if (!RPC_WAS_SENT(task
))
1536 * Otherwise, assume the call succeeded and
1537 * that we need to release the layout
1540 (*respp
)->lrs_present
= 0;
1543 case -NFS4ERR_DELAY
:
1544 /* Let the caller handle the retry */
1545 *ret
= -NFS4ERR_NOMATCHING_LAYOUT
;
1547 case -NFS4ERR_OLD_STATEID
:
1548 if (!nfs4_layout_refresh_old_stateid(&arg
->stateid
,
1549 &arg
->range
, arg
->inode
))
1551 *ret
= -NFS4ERR_NOMATCHING_LAYOUT
;
1559 void pnfs_roc_release(struct nfs4_layoutreturn_args
*args
,
1560 struct nfs4_layoutreturn_res
*res
,
1563 struct pnfs_layout_hdr
*lo
= args
->layout
;
1564 struct inode
*inode
= args
->inode
;
1565 const nfs4_stateid
*res_stateid
= NULL
;
1566 struct nfs4_xdr_opaque_data
*ld_private
= args
->ld_private
;
1569 case -NFS4ERR_NOMATCHING_LAYOUT
:
1570 spin_lock(&inode
->i_lock
);
1571 if (pnfs_layout_is_valid(lo
) &&
1572 nfs4_stateid_match_other(&args
->stateid
, &lo
->plh_stateid
))
1573 pnfs_set_plh_return_info(lo
, args
->range
.iomode
, 0);
1574 pnfs_clear_layoutreturn_waitbit(lo
);
1575 spin_unlock(&inode
->i_lock
);
1578 if (res
->lrs_present
)
1579 res_stateid
= &res
->stateid
;
1582 pnfs_layoutreturn_free_lsegs(lo
, &args
->stateid
, &args
->range
,
1585 trace_nfs4_layoutreturn_on_close(args
->inode
, &args
->stateid
, ret
);
1586 if (ld_private
&& ld_private
->ops
&& ld_private
->ops
->free
)
1587 ld_private
->ops
->free(ld_private
);
1588 pnfs_put_layout_hdr(lo
);
1591 bool pnfs_wait_on_layoutreturn(struct inode
*ino
, struct rpc_task
*task
)
1593 struct nfs_inode
*nfsi
= NFS_I(ino
);
1594 struct pnfs_layout_hdr
*lo
;
1597 /* we might not have grabbed lo reference. so need to check under
1599 spin_lock(&ino
->i_lock
);
1601 if (lo
&& test_bit(NFS_LAYOUT_RETURN
, &lo
->plh_flags
)) {
1602 rpc_sleep_on(&NFS_SERVER(ino
)->roc_rpcwaitq
, task
, NULL
);
1605 spin_unlock(&ino
->i_lock
);
1610 * Compare two layout segments for sorting into layout cache.
1611 * We want to preferentially return RW over RO layouts, so ensure those
1615 pnfs_lseg_range_cmp(const struct pnfs_layout_range
*l1
,
1616 const struct pnfs_layout_range
*l2
)
1620 /* high offset > low offset */
1621 d
= l1
->offset
- l2
->offset
;
1625 /* short length > long length */
1626 d
= l2
->length
- l1
->length
;
1630 /* read > read/write */
1631 return (int)(l1
->iomode
== IOMODE_READ
) - (int)(l2
->iomode
== IOMODE_READ
);
1635 pnfs_lseg_range_is_after(const struct pnfs_layout_range
*l1
,
1636 const struct pnfs_layout_range
*l2
)
1638 return pnfs_lseg_range_cmp(l1
, l2
) > 0;
1642 pnfs_lseg_no_merge(struct pnfs_layout_segment
*lseg
,
1643 struct pnfs_layout_segment
*old
)
1649 pnfs_generic_layout_insert_lseg(struct pnfs_layout_hdr
*lo
,
1650 struct pnfs_layout_segment
*lseg
,
1651 bool (*is_after
)(const struct pnfs_layout_range
*,
1652 const struct pnfs_layout_range
*),
1653 bool (*do_merge
)(struct pnfs_layout_segment
*,
1654 struct pnfs_layout_segment
*),
1655 struct list_head
*free_me
)
1657 struct pnfs_layout_segment
*lp
, *tmp
;
1659 dprintk("%s:Begin\n", __func__
);
1661 list_for_each_entry_safe(lp
, tmp
, &lo
->plh_segs
, pls_list
) {
1662 if (test_bit(NFS_LSEG_VALID
, &lp
->pls_flags
) == 0)
1664 if (do_merge(lseg
, lp
)) {
1665 mark_lseg_invalid(lp
, free_me
);
1668 if (is_after(&lseg
->pls_range
, &lp
->pls_range
))
1670 list_add_tail(&lseg
->pls_list
, &lp
->pls_list
);
1671 dprintk("%s: inserted lseg %p "
1672 "iomode %d offset %llu length %llu before "
1673 "lp %p iomode %d offset %llu length %llu\n",
1674 __func__
, lseg
, lseg
->pls_range
.iomode
,
1675 lseg
->pls_range
.offset
, lseg
->pls_range
.length
,
1676 lp
, lp
->pls_range
.iomode
, lp
->pls_range
.offset
,
1677 lp
->pls_range
.length
);
1680 list_add_tail(&lseg
->pls_list
, &lo
->plh_segs
);
1681 dprintk("%s: inserted lseg %p "
1682 "iomode %d offset %llu length %llu at tail\n",
1683 __func__
, lseg
, lseg
->pls_range
.iomode
,
1684 lseg
->pls_range
.offset
, lseg
->pls_range
.length
);
1686 pnfs_get_layout_hdr(lo
);
1688 dprintk("%s:Return\n", __func__
);
1690 EXPORT_SYMBOL_GPL(pnfs_generic_layout_insert_lseg
);
1693 pnfs_layout_insert_lseg(struct pnfs_layout_hdr
*lo
,
1694 struct pnfs_layout_segment
*lseg
,
1695 struct list_head
*free_me
)
1697 struct inode
*inode
= lo
->plh_inode
;
1698 struct pnfs_layoutdriver_type
*ld
= NFS_SERVER(inode
)->pnfs_curr_ld
;
1700 if (ld
->add_lseg
!= NULL
)
1701 ld
->add_lseg(lo
, lseg
, free_me
);
1703 pnfs_generic_layout_insert_lseg(lo
, lseg
,
1704 pnfs_lseg_range_is_after
,
1709 static struct pnfs_layout_hdr
*
1710 alloc_init_layout_hdr(struct inode
*ino
,
1711 struct nfs_open_context
*ctx
,
1714 struct pnfs_layout_hdr
*lo
;
1716 lo
= pnfs_alloc_layout_hdr(ino
, gfp_flags
);
1719 refcount_set(&lo
->plh_refcount
, 1);
1720 INIT_LIST_HEAD(&lo
->plh_layouts
);
1721 INIT_LIST_HEAD(&lo
->plh_segs
);
1722 INIT_LIST_HEAD(&lo
->plh_return_segs
);
1723 INIT_LIST_HEAD(&lo
->plh_bulk_destroy
);
1724 lo
->plh_inode
= ino
;
1725 lo
->plh_lc_cred
= get_cred(ctx
->cred
);
1726 lo
->plh_flags
|= 1 << NFS_LAYOUT_INVALID_STID
;
1730 static struct pnfs_layout_hdr
*
1731 pnfs_find_alloc_layout(struct inode
*ino
,
1732 struct nfs_open_context
*ctx
,
1734 __releases(&ino
->i_lock
)
1735 __acquires(&ino
->i_lock
)
1737 struct nfs_inode
*nfsi
= NFS_I(ino
);
1738 struct pnfs_layout_hdr
*new = NULL
;
1740 dprintk("%s Begin ino=%p layout=%p\n", __func__
, ino
, nfsi
->layout
);
1742 if (nfsi
->layout
!= NULL
)
1744 spin_unlock(&ino
->i_lock
);
1745 new = alloc_init_layout_hdr(ino
, ctx
, gfp_flags
);
1746 spin_lock(&ino
->i_lock
);
1748 if (likely(nfsi
->layout
== NULL
)) { /* Won the race? */
1751 } else if (new != NULL
)
1752 pnfs_free_layout_hdr(new);
1754 pnfs_get_layout_hdr(nfsi
->layout
);
1755 return nfsi
->layout
;
1759 * iomode matching rules:
1760 * iomode lseg strict match
1762 * ----- ----- ------ -----
1767 * READ READ N/A true
1768 * READ RW true false
1769 * READ RW false true
1772 pnfs_lseg_range_match(const struct pnfs_layout_range
*ls_range
,
1773 const struct pnfs_layout_range
*range
,
1776 struct pnfs_layout_range range1
;
1778 if ((range
->iomode
== IOMODE_RW
&&
1779 ls_range
->iomode
!= IOMODE_RW
) ||
1780 (range
->iomode
!= ls_range
->iomode
&&
1782 !pnfs_lseg_range_intersecting(ls_range
, range
))
1785 /* range1 covers only the first byte in the range */
1788 return pnfs_lseg_range_contained(ls_range
, &range1
);
1792 * lookup range in layout
1794 static struct pnfs_layout_segment
*
1795 pnfs_find_lseg(struct pnfs_layout_hdr
*lo
,
1796 struct pnfs_layout_range
*range
,
1799 struct pnfs_layout_segment
*lseg
, *ret
= NULL
;
1801 dprintk("%s:Begin\n", __func__
);
1803 list_for_each_entry(lseg
, &lo
->plh_segs
, pls_list
) {
1804 if (test_bit(NFS_LSEG_VALID
, &lseg
->pls_flags
) &&
1805 pnfs_lseg_range_match(&lseg
->pls_range
, range
,
1807 ret
= pnfs_get_lseg(lseg
);
1812 dprintk("%s:Return lseg %p ref %d\n",
1813 __func__
, ret
, ret
? refcount_read(&ret
->pls_refcount
) : 0);
1818 * Use mdsthreshold hints set at each OPEN to determine if I/O should go
1819 * to the MDS or over pNFS
1821 * The nfs_inode read_io and write_io fields are cumulative counters reset
1822 * when there are no layout segments. Note that in pnfs_update_layout iomode
1823 * is set to IOMODE_READ for a READ request, and set to IOMODE_RW for a
1826 * A return of true means use MDS I/O.
1829 * If a file's size is smaller than the file size threshold, data accesses
1830 * SHOULD be sent to the metadata server. If an I/O request has a length that
1831 * is below the I/O size threshold, the I/O SHOULD be sent to the metadata
1832 * server. If both file size and I/O size are provided, the client SHOULD
1833 * reach or exceed both thresholds before sending its read or write
1834 * requests to the data server.
1836 static bool pnfs_within_mdsthreshold(struct nfs_open_context
*ctx
,
1837 struct inode
*ino
, int iomode
)
1839 struct nfs4_threshold
*t
= ctx
->mdsthreshold
;
1840 struct nfs_inode
*nfsi
= NFS_I(ino
);
1841 loff_t fsize
= i_size_read(ino
);
1842 bool size
= false, size_set
= false, io
= false, io_set
= false, ret
= false;
1847 dprintk("%s bm=0x%x rd_sz=%llu wr_sz=%llu rd_io=%llu wr_io=%llu\n",
1848 __func__
, t
->bm
, t
->rd_sz
, t
->wr_sz
, t
->rd_io_sz
, t
->wr_io_sz
);
1852 if (t
->bm
& THRESHOLD_RD
) {
1853 dprintk("%s fsize %llu\n", __func__
, fsize
);
1855 if (fsize
< t
->rd_sz
)
1858 if (t
->bm
& THRESHOLD_RD_IO
) {
1859 dprintk("%s nfsi->read_io %llu\n", __func__
,
1862 if (nfsi
->read_io
< t
->rd_io_sz
)
1867 if (t
->bm
& THRESHOLD_WR
) {
1868 dprintk("%s fsize %llu\n", __func__
, fsize
);
1870 if (fsize
< t
->wr_sz
)
1873 if (t
->bm
& THRESHOLD_WR_IO
) {
1874 dprintk("%s nfsi->write_io %llu\n", __func__
,
1877 if (nfsi
->write_io
< t
->wr_io_sz
)
1882 if (size_set
&& io_set
) {
1885 } else if (size
|| io
)
1888 dprintk("<-- %s size %d io %d ret %d\n", __func__
, size
, io
, ret
);
1892 static int pnfs_prepare_to_retry_layoutget(struct pnfs_layout_hdr
*lo
)
1895 * send layoutcommit as it can hold up layoutreturn due to lseg
1898 pnfs_layoutcommit_inode(lo
->plh_inode
, false);
1899 return wait_on_bit_action(&lo
->plh_flags
, NFS_LAYOUT_RETURN
,
1900 nfs_wait_bit_killable
,
1904 static void nfs_layoutget_begin(struct pnfs_layout_hdr
*lo
)
1906 atomic_inc(&lo
->plh_outstanding
);
1909 static void nfs_layoutget_end(struct pnfs_layout_hdr
*lo
)
1911 if (atomic_dec_and_test(&lo
->plh_outstanding
))
1912 wake_up_var(&lo
->plh_outstanding
);
1915 static bool pnfs_is_first_layoutget(struct pnfs_layout_hdr
*lo
)
1917 return test_bit(NFS_LAYOUT_FIRST_LAYOUTGET
, &lo
->plh_flags
);
1920 static void pnfs_clear_first_layoutget(struct pnfs_layout_hdr
*lo
)
1922 unsigned long *bitlock
= &lo
->plh_flags
;
1924 clear_bit_unlock(NFS_LAYOUT_FIRST_LAYOUTGET
, bitlock
);
1925 smp_mb__after_atomic();
1926 wake_up_bit(bitlock
, NFS_LAYOUT_FIRST_LAYOUTGET
);
1929 static void _add_to_server_list(struct pnfs_layout_hdr
*lo
,
1930 struct nfs_server
*server
)
1932 if (!test_and_set_bit(NFS_LAYOUT_HASHED
, &lo
->plh_flags
)) {
1933 struct nfs_client
*clp
= server
->nfs_client
;
1935 /* The lo must be on the clp list if there is any
1936 * chance of a CB_LAYOUTRECALL(FILE) coming in.
1938 spin_lock(&clp
->cl_lock
);
1939 list_add_tail_rcu(&lo
->plh_layouts
, &server
->layouts
);
1940 spin_unlock(&clp
->cl_lock
);
1945 * Layout segment is retreived from the server if not cached.
1946 * The appropriate layout segment is referenced and returned to the caller.
1948 struct pnfs_layout_segment
*
1949 pnfs_update_layout(struct inode
*ino
,
1950 struct nfs_open_context
*ctx
,
1953 enum pnfs_iomode iomode
,
1957 struct pnfs_layout_range arg
= {
1963 struct nfs_server
*server
= NFS_SERVER(ino
);
1964 struct nfs_client
*clp
= server
->nfs_client
;
1965 struct pnfs_layout_hdr
*lo
= NULL
;
1966 struct pnfs_layout_segment
*lseg
= NULL
;
1967 struct nfs4_layoutget
*lgp
;
1968 nfs4_stateid stateid
;
1970 unsigned long giveup
= jiffies
+ (clp
->cl_lease_time
<< 1);
1973 if (!pnfs_enabled_sb(NFS_SERVER(ino
))) {
1974 trace_pnfs_update_layout(ino
, pos
, count
, iomode
, lo
, lseg
,
1975 PNFS_UPDATE_LAYOUT_NO_PNFS
);
1979 if (pnfs_within_mdsthreshold(ctx
, ino
, iomode
)) {
1980 trace_pnfs_update_layout(ino
, pos
, count
, iomode
, lo
, lseg
,
1981 PNFS_UPDATE_LAYOUT_MDSTHRESH
);
1986 lseg
= ERR_PTR(nfs4_client_recover_expired_lease(clp
));
1990 spin_lock(&ino
->i_lock
);
1991 lo
= pnfs_find_alloc_layout(ino
, ctx
, gfp_flags
);
1993 spin_unlock(&ino
->i_lock
);
1994 trace_pnfs_update_layout(ino
, pos
, count
, iomode
, lo
, lseg
,
1995 PNFS_UPDATE_LAYOUT_NOMEM
);
1999 /* Do we even need to bother with this? */
2000 if (test_bit(NFS_LAYOUT_BULK_RECALL
, &lo
->plh_flags
)) {
2001 trace_pnfs_update_layout(ino
, pos
, count
, iomode
, lo
, lseg
,
2002 PNFS_UPDATE_LAYOUT_BULK_RECALL
);
2003 dprintk("%s matches recall, use MDS\n", __func__
);
2007 /* if LAYOUTGET already failed once we don't try again */
2008 if (pnfs_layout_io_test_failed(lo
, iomode
)) {
2009 trace_pnfs_update_layout(ino
, pos
, count
, iomode
, lo
, lseg
,
2010 PNFS_UPDATE_LAYOUT_IO_TEST_FAIL
);
2015 * If the layout segment list is empty, but there are outstanding
2016 * layoutget calls, then they might be subject to a layoutrecall.
2018 if (list_empty(&lo
->plh_segs
) &&
2019 atomic_read(&lo
->plh_outstanding
) != 0) {
2020 spin_unlock(&ino
->i_lock
);
2021 lseg
= ERR_PTR(wait_var_event_killable(&lo
->plh_outstanding
,
2022 !atomic_read(&lo
->plh_outstanding
)));
2024 goto out_put_layout_hdr
;
2025 pnfs_put_layout_hdr(lo
);
2030 * Because we free lsegs when sending LAYOUTRETURN, we need to wait
2033 if (test_bit(NFS_LAYOUT_RETURN
, &lo
->plh_flags
)) {
2034 spin_unlock(&ino
->i_lock
);
2035 dprintk("%s wait for layoutreturn\n", __func__
);
2036 lseg
= ERR_PTR(pnfs_prepare_to_retry_layoutget(lo
));
2037 if (!IS_ERR(lseg
)) {
2038 pnfs_put_layout_hdr(lo
);
2039 dprintk("%s retrying\n", __func__
);
2040 trace_pnfs_update_layout(ino
, pos
, count
, iomode
, lo
,
2042 PNFS_UPDATE_LAYOUT_RETRY
);
2045 trace_pnfs_update_layout(ino
, pos
, count
, iomode
, lo
, lseg
,
2046 PNFS_UPDATE_LAYOUT_RETURN
);
2047 goto out_put_layout_hdr
;
2050 lseg
= pnfs_find_lseg(lo
, &arg
, strict_iomode
);
2052 trace_pnfs_update_layout(ino
, pos
, count
, iomode
, lo
, lseg
,
2053 PNFS_UPDATE_LAYOUT_FOUND_CACHED
);
2058 * Choose a stateid for the LAYOUTGET. If we don't have a layout
2059 * stateid, or it has been invalidated, then we must use the open
2062 if (test_bit(NFS_LAYOUT_INVALID_STID
, &lo
->plh_flags
)) {
2066 * The first layoutget for the file. Need to serialize per
2067 * RFC 5661 Errata 3208.
2069 if (test_and_set_bit(NFS_LAYOUT_FIRST_LAYOUTGET
,
2071 spin_unlock(&ino
->i_lock
);
2072 lseg
= ERR_PTR(wait_on_bit(&lo
->plh_flags
,
2073 NFS_LAYOUT_FIRST_LAYOUTGET
,
2076 goto out_put_layout_hdr
;
2077 pnfs_put_layout_hdr(lo
);
2078 dprintk("%s retrying\n", __func__
);
2082 spin_unlock(&ino
->i_lock
);
2084 status
= nfs4_select_rw_stateid(ctx
->state
,
2085 iomode
== IOMODE_RW
? FMODE_WRITE
: FMODE_READ
,
2086 NULL
, &stateid
, NULL
);
2088 lseg
= ERR_PTR(status
);
2089 trace_pnfs_update_layout(ino
, pos
, count
,
2091 PNFS_UPDATE_LAYOUT_INVALID_OPEN
);
2092 nfs4_schedule_stateid_recovery(server
, ctx
->state
);
2093 pnfs_clear_first_layoutget(lo
);
2094 pnfs_put_layout_hdr(lo
);
2097 spin_lock(&ino
->i_lock
);
2099 nfs4_stateid_copy(&stateid
, &lo
->plh_stateid
);
2102 if (pnfs_layoutgets_blocked(lo
)) {
2103 trace_pnfs_update_layout(ino
, pos
, count
, iomode
, lo
, lseg
,
2104 PNFS_UPDATE_LAYOUT_BLOCKED
);
2107 nfs_layoutget_begin(lo
);
2108 spin_unlock(&ino
->i_lock
);
2110 _add_to_server_list(lo
, server
);
2112 pg_offset
= arg
.offset
& ~PAGE_MASK
;
2114 arg
.offset
-= pg_offset
;
2115 arg
.length
+= pg_offset
;
2117 if (arg
.length
!= NFS4_MAX_UINT64
)
2118 arg
.length
= PAGE_ALIGN(arg
.length
);
2120 lgp
= pnfs_alloc_init_layoutget_args(ino
, ctx
, &stateid
, &arg
, gfp_flags
);
2122 trace_pnfs_update_layout(ino
, pos
, count
, iomode
, lo
, NULL
,
2123 PNFS_UPDATE_LAYOUT_NOMEM
);
2124 nfs_layoutget_end(lo
);
2125 goto out_put_layout_hdr
;
2128 lseg
= nfs4_proc_layoutget(lgp
, &timeout
);
2129 trace_pnfs_update_layout(ino
, pos
, count
, iomode
, lo
, lseg
,
2130 PNFS_UPDATE_LAYOUT_SEND_LAYOUTGET
);
2131 nfs_layoutget_end(lo
);
2133 switch(PTR_ERR(lseg
)) {
2135 if (time_after(jiffies
, giveup
))
2138 case -ERECALLCONFLICT
:
2142 if (!nfs_error_is_fatal(PTR_ERR(lseg
))) {
2143 pnfs_layout_clear_fail_bit(lo
, pnfs_iomode_to_fail_bit(iomode
));
2146 goto out_put_layout_hdr
;
2150 pnfs_clear_first_layoutget(lo
);
2151 trace_pnfs_update_layout(ino
, pos
, count
,
2152 iomode
, lo
, lseg
, PNFS_UPDATE_LAYOUT_RETRY
);
2153 pnfs_put_layout_hdr(lo
);
2157 pnfs_layout_clear_fail_bit(lo
, pnfs_iomode_to_fail_bit(iomode
));
2162 pnfs_clear_first_layoutget(lo
);
2163 trace_pnfs_update_layout(ino
, pos
, count
, iomode
, lo
, lseg
,
2164 PNFS_UPDATE_LAYOUT_EXIT
);
2165 pnfs_put_layout_hdr(lo
);
2167 dprintk("%s: inode %s/%llu pNFS layout segment %s for "
2168 "(%s, offset: %llu, length: %llu)\n",
2169 __func__
, ino
->i_sb
->s_id
,
2170 (unsigned long long)NFS_FILEID(ino
),
2171 IS_ERR_OR_NULL(lseg
) ? "not found" : "found",
2172 iomode
==IOMODE_RW
? "read/write" : "read-only",
2173 (unsigned long long)pos
,
2174 (unsigned long long)count
);
2177 spin_unlock(&ino
->i_lock
);
2178 goto out_put_layout_hdr
;
2180 EXPORT_SYMBOL_GPL(pnfs_update_layout
);
2183 pnfs_sanity_check_layout_range(struct pnfs_layout_range
*range
)
2185 switch (range
->iomode
) {
2192 if (range
->offset
== NFS4_MAX_UINT64
)
2194 if (range
->length
== 0)
2196 if (range
->length
!= NFS4_MAX_UINT64
&&
2197 range
->length
> NFS4_MAX_UINT64
- range
->offset
)
2202 static struct pnfs_layout_hdr
*
2203 _pnfs_grab_empty_layout(struct inode
*ino
, struct nfs_open_context
*ctx
)
2205 struct pnfs_layout_hdr
*lo
;
2207 spin_lock(&ino
->i_lock
);
2208 lo
= pnfs_find_alloc_layout(ino
, ctx
, GFP_KERNEL
);
2211 if (!test_bit(NFS_LAYOUT_INVALID_STID
, &lo
->plh_flags
))
2213 if (test_bit(NFS_LAYOUT_RETURN
, &lo
->plh_flags
))
2215 if (pnfs_layoutgets_blocked(lo
))
2217 if (test_and_set_bit(NFS_LAYOUT_FIRST_LAYOUTGET
, &lo
->plh_flags
))
2219 nfs_layoutget_begin(lo
);
2220 spin_unlock(&ino
->i_lock
);
2221 _add_to_server_list(lo
, NFS_SERVER(ino
));
2225 spin_unlock(&ino
->i_lock
);
2226 pnfs_put_layout_hdr(lo
);
2230 static void _lgopen_prepare_attached(struct nfs4_opendata
*data
,
2231 struct nfs_open_context
*ctx
)
2233 struct inode
*ino
= data
->dentry
->d_inode
;
2234 struct pnfs_layout_range rng
= {
2235 .iomode
= (data
->o_arg
.fmode
& FMODE_WRITE
) ?
2236 IOMODE_RW
: IOMODE_READ
,
2238 .length
= NFS4_MAX_UINT64
,
2240 struct nfs4_layoutget
*lgp
;
2241 struct pnfs_layout_hdr
*lo
;
2243 /* Heuristic: don't send layoutget if we have cached data */
2244 if (rng
.iomode
== IOMODE_READ
&&
2245 (i_size_read(ino
) == 0 || ino
->i_mapping
->nrpages
!= 0))
2248 lo
= _pnfs_grab_empty_layout(ino
, ctx
);
2251 lgp
= pnfs_alloc_init_layoutget_args(ino
, ctx
, ¤t_stateid
,
2254 pnfs_clear_first_layoutget(lo
);
2255 nfs_layoutget_end(lo
);
2256 pnfs_put_layout_hdr(lo
);
2260 data
->o_arg
.lg_args
= &lgp
->args
;
2261 data
->o_res
.lg_res
= &lgp
->res
;
2264 static void _lgopen_prepare_floating(struct nfs4_opendata
*data
,
2265 struct nfs_open_context
*ctx
)
2267 struct pnfs_layout_range rng
= {
2268 .iomode
= (data
->o_arg
.fmode
& FMODE_WRITE
) ?
2269 IOMODE_RW
: IOMODE_READ
,
2271 .length
= NFS4_MAX_UINT64
,
2273 struct nfs4_layoutget
*lgp
;
2275 lgp
= pnfs_alloc_init_layoutget_args(NULL
, ctx
, ¤t_stateid
,
2280 data
->o_arg
.lg_args
= &lgp
->args
;
2281 data
->o_res
.lg_res
= &lgp
->res
;
2284 void pnfs_lgopen_prepare(struct nfs4_opendata
*data
,
2285 struct nfs_open_context
*ctx
)
2287 struct nfs_server
*server
= NFS_SERVER(data
->dir
->d_inode
);
2289 if (!(pnfs_enabled_sb(server
) &&
2290 server
->pnfs_curr_ld
->flags
& PNFS_LAYOUTGET_ON_OPEN
))
2292 /* Could check on max_ops, but currently hardcoded high enough */
2293 if (!nfs_server_capable(data
->dir
->d_inode
, NFS_CAP_LGOPEN
))
2296 _lgopen_prepare_attached(data
, ctx
);
2298 _lgopen_prepare_floating(data
, ctx
);
2301 void pnfs_parse_lgopen(struct inode
*ino
, struct nfs4_layoutget
*lgp
,
2302 struct nfs_open_context
*ctx
)
2304 struct pnfs_layout_hdr
*lo
;
2305 struct pnfs_layout_segment
*lseg
;
2306 struct nfs_server
*srv
= NFS_SERVER(ino
);
2311 dprintk("%s: entered with status %i\n", __func__
, lgp
->res
.status
);
2312 if (lgp
->res
.status
) {
2313 switch (lgp
->res
.status
) {
2317 * Halt lgopen attempts if the server doesn't recognise
2318 * the "current stateid" value, the layout type, or the
2319 * layoutget operation as being valid.
2320 * Also if it complains about too many ops in the compound
2321 * or of the request/reply being too big.
2323 case -NFS4ERR_BAD_STATEID
:
2324 case -NFS4ERR_NOTSUPP
:
2325 case -NFS4ERR_REP_TOO_BIG
:
2326 case -NFS4ERR_REP_TOO_BIG_TO_CACHE
:
2327 case -NFS4ERR_REQ_TOO_BIG
:
2328 case -NFS4ERR_TOO_MANY_OPS
:
2329 case -NFS4ERR_UNKNOWN_LAYOUTTYPE
:
2330 srv
->caps
&= ~NFS_CAP_LGOPEN
;
2334 if (!lgp
->args
.inode
) {
2335 lo
= _pnfs_grab_empty_layout(ino
, ctx
);
2338 lgp
->args
.inode
= ino
;
2340 lo
= NFS_I(lgp
->args
.inode
)->layout
;
2342 lseg
= pnfs_layout_process(lgp
);
2343 if (!IS_ERR(lseg
)) {
2344 iomode
= lgp
->args
.range
.iomode
;
2345 pnfs_layout_clear_fail_bit(lo
, pnfs_iomode_to_fail_bit(iomode
));
2346 pnfs_put_lseg(lseg
);
2350 void nfs4_lgopen_release(struct nfs4_layoutget
*lgp
)
2353 struct inode
*inode
= lgp
->args
.inode
;
2355 struct pnfs_layout_hdr
*lo
= NFS_I(inode
)->layout
;
2356 pnfs_clear_first_layoutget(lo
);
2357 nfs_layoutget_end(lo
);
2359 pnfs_layoutget_free(lgp
);
2363 struct pnfs_layout_segment
*
2364 pnfs_layout_process(struct nfs4_layoutget
*lgp
)
2366 struct pnfs_layout_hdr
*lo
= NFS_I(lgp
->args
.inode
)->layout
;
2367 struct nfs4_layoutget_res
*res
= &lgp
->res
;
2368 struct pnfs_layout_segment
*lseg
;
2369 struct inode
*ino
= lo
->plh_inode
;
2372 if (!pnfs_sanity_check_layout_range(&res
->range
))
2373 return ERR_PTR(-EINVAL
);
2375 /* Inject layout blob into I/O device driver */
2376 lseg
= NFS_SERVER(ino
)->pnfs_curr_ld
->alloc_lseg(lo
, res
, lgp
->gfp_flags
);
2377 if (IS_ERR_OR_NULL(lseg
)) {
2379 lseg
= ERR_PTR(-ENOMEM
);
2381 dprintk("%s: Could not allocate layout: error %ld\n",
2382 __func__
, PTR_ERR(lseg
));
2386 pnfs_init_lseg(lo
, lseg
, &res
->range
, &res
->stateid
);
2388 spin_lock(&ino
->i_lock
);
2389 if (pnfs_layoutgets_blocked(lo
)) {
2390 dprintk("%s forget reply due to state\n", __func__
);
2394 if (nfs4_stateid_match_other(&lo
->plh_stateid
, &res
->stateid
)) {
2395 /* existing state ID, make sure the sequence number matches. */
2396 if (pnfs_layout_stateid_blocked(lo
, &res
->stateid
)) {
2397 if (!pnfs_layout_is_valid(lo
) &&
2398 pnfs_is_first_layoutget(lo
))
2399 lo
->plh_barrier
= 0;
2400 dprintk("%s forget reply due to sequence\n", __func__
);
2403 pnfs_set_layout_stateid(lo
, &res
->stateid
, lgp
->cred
, false);
2404 } else if (pnfs_layout_is_valid(lo
)) {
2406 * We got an entirely new state ID. Mark all segments for the
2407 * inode invalid, and retry the layoutget
2409 struct pnfs_layout_range range
= {
2410 .iomode
= IOMODE_ANY
,
2411 .length
= NFS4_MAX_UINT64
,
2413 pnfs_mark_matching_lsegs_return(lo
, &free_me
, &range
, 0);
2416 /* We have a completely new layout */
2417 if (!pnfs_is_first_layoutget(lo
))
2419 pnfs_set_layout_stateid(lo
, &res
->stateid
, lgp
->cred
, true);
2422 pnfs_get_lseg(lseg
);
2423 pnfs_layout_insert_lseg(lo
, lseg
, &free_me
);
2426 if (res
->return_on_close
)
2427 set_bit(NFS_LSEG_ROC
, &lseg
->pls_flags
);
2429 spin_unlock(&ino
->i_lock
);
2430 pnfs_free_lseg_list(&free_me
);
2434 spin_unlock(&ino
->i_lock
);
2435 lseg
->pls_layout
= lo
;
2436 NFS_SERVER(ino
)->pnfs_curr_ld
->free_lseg(lseg
);
2437 return ERR_PTR(-EAGAIN
);
2441 * pnfs_mark_matching_lsegs_return - Free or return matching layout segments
2442 * @lo: pointer to layout header
2443 * @tmp_list: list header to be used with pnfs_free_lseg_list()
2444 * @return_range: describe layout segment ranges to be returned
2445 * @seq: stateid seqid to match
2447 * This function is mainly intended for use by layoutrecall. It attempts
2448 * to free the layout segment immediately, or else to mark it for return
2449 * as soon as its reference count drops to zero.
2452 * - 0: a layoutreturn needs to be scheduled.
2453 * - EBUSY: there are layout segment that are still in use.
2454 * - ENOENT: there are no layout segments that need to be returned.
2457 pnfs_mark_matching_lsegs_return(struct pnfs_layout_hdr
*lo
,
2458 struct list_head
*tmp_list
,
2459 const struct pnfs_layout_range
*return_range
,
2462 struct pnfs_layout_segment
*lseg
, *next
;
2465 dprintk("%s:Begin lo %p\n", __func__
, lo
);
2467 assert_spin_locked(&lo
->plh_inode
->i_lock
);
2469 if (test_bit(NFS_LAYOUT_RETURN_REQUESTED
, &lo
->plh_flags
))
2470 tmp_list
= &lo
->plh_return_segs
;
2472 list_for_each_entry_safe(lseg
, next
, &lo
->plh_segs
, pls_list
)
2473 if (pnfs_match_lseg_recall(lseg
, return_range
, seq
)) {
2474 dprintk("%s: marking lseg %p iomode %d "
2475 "offset %llu length %llu\n", __func__
,
2476 lseg
, lseg
->pls_range
.iomode
,
2477 lseg
->pls_range
.offset
,
2478 lseg
->pls_range
.length
);
2479 if (test_bit(NFS_LSEG_LAYOUTRETURN
, &lseg
->pls_flags
))
2480 tmp_list
= &lo
->plh_return_segs
;
2481 if (mark_lseg_invalid(lseg
, tmp_list
))
2484 set_bit(NFS_LSEG_LAYOUTRETURN
, &lseg
->pls_flags
);
2488 pnfs_set_plh_return_info(lo
, return_range
->iomode
, seq
);
2492 if (!list_empty(&lo
->plh_return_segs
)) {
2493 pnfs_set_plh_return_info(lo
, return_range
->iomode
, seq
);
2501 pnfs_mark_layout_for_return(struct inode
*inode
,
2502 const struct pnfs_layout_range
*range
)
2504 struct pnfs_layout_hdr
*lo
;
2505 bool return_now
= false;
2507 spin_lock(&inode
->i_lock
);
2508 lo
= NFS_I(inode
)->layout
;
2509 if (!pnfs_layout_is_valid(lo
)) {
2510 spin_unlock(&inode
->i_lock
);
2513 pnfs_set_plh_return_info(lo
, range
->iomode
, 0);
2515 * mark all matching lsegs so that we are sure to have no live
2516 * segments at hand when sending layoutreturn. See pnfs_put_lseg()
2519 if (pnfs_mark_matching_lsegs_return(lo
, &lo
->plh_return_segs
, range
, 0) != -EBUSY
) {
2520 const struct cred
*cred
;
2521 nfs4_stateid stateid
;
2522 enum pnfs_iomode iomode
;
2524 return_now
= pnfs_prepare_layoutreturn(lo
, &stateid
, &cred
, &iomode
);
2525 spin_unlock(&inode
->i_lock
);
2527 pnfs_send_layoutreturn(lo
, &stateid
, &cred
, iomode
, false);
2529 spin_unlock(&inode
->i_lock
);
2530 nfs_commit_inode(inode
, 0);
2534 void pnfs_error_mark_layout_for_return(struct inode
*inode
,
2535 struct pnfs_layout_segment
*lseg
)
2537 struct pnfs_layout_range range
= {
2538 .iomode
= lseg
->pls_range
.iomode
,
2540 .length
= NFS4_MAX_UINT64
,
2543 pnfs_mark_layout_for_return(inode
, &range
);
2545 EXPORT_SYMBOL_GPL(pnfs_error_mark_layout_for_return
);
2548 pnfs_layout_can_be_returned(struct pnfs_layout_hdr
*lo
)
2550 return pnfs_layout_is_valid(lo
) &&
2551 !test_bit(NFS_LAYOUT_INODE_FREEING
, &lo
->plh_flags
) &&
2552 !test_bit(NFS_LAYOUT_RETURN
, &lo
->plh_flags
);
2555 static struct pnfs_layout_segment
*
2556 pnfs_find_first_lseg(struct pnfs_layout_hdr
*lo
,
2557 const struct pnfs_layout_range
*range
,
2558 enum pnfs_iomode iomode
)
2560 struct pnfs_layout_segment
*lseg
;
2562 list_for_each_entry(lseg
, &lo
->plh_segs
, pls_list
) {
2563 if (!test_bit(NFS_LSEG_VALID
, &lseg
->pls_flags
))
2565 if (test_bit(NFS_LSEG_LAYOUTRETURN
, &lseg
->pls_flags
))
2567 if (lseg
->pls_range
.iomode
!= iomode
&& iomode
!= IOMODE_ANY
)
2569 if (pnfs_lseg_range_intersecting(&lseg
->pls_range
, range
))
2575 /* Find open file states whose mode matches that of the range */
2577 pnfs_should_return_unused_layout(struct pnfs_layout_hdr
*lo
,
2578 const struct pnfs_layout_range
*range
)
2580 struct list_head
*head
;
2581 struct nfs_open_context
*ctx
;
2584 if (!pnfs_layout_can_be_returned(lo
) ||
2585 !pnfs_find_first_lseg(lo
, range
, range
->iomode
))
2588 head
= &NFS_I(lo
->plh_inode
)->open_files
;
2589 list_for_each_entry_rcu(ctx
, head
, list
) {
2591 mode
|= ctx
->state
->state
& (FMODE_READ
|FMODE_WRITE
);
2594 switch (range
->iomode
) {
2598 mode
&= ~FMODE_WRITE
;
2601 if (pnfs_find_first_lseg(lo
, range
, IOMODE_READ
))
2602 mode
&= ~FMODE_READ
;
2608 pnfs_layout_return_unused_byserver(struct nfs_server
*server
, void *data
)
2610 const struct pnfs_layout_range
*range
= data
;
2611 struct pnfs_layout_hdr
*lo
;
2612 struct inode
*inode
;
2615 list_for_each_entry_rcu(lo
, &server
->layouts
, plh_layouts
) {
2616 if (!pnfs_layout_can_be_returned(lo
) ||
2617 test_bit(NFS_LAYOUT_RETURN_REQUESTED
, &lo
->plh_flags
))
2619 inode
= lo
->plh_inode
;
2620 spin_lock(&inode
->i_lock
);
2621 if (!pnfs_should_return_unused_layout(lo
, range
)) {
2622 spin_unlock(&inode
->i_lock
);
2625 spin_unlock(&inode
->i_lock
);
2626 inode
= pnfs_grab_inode_layout_hdr(lo
);
2630 pnfs_mark_layout_for_return(inode
, range
);
2640 pnfs_layout_return_unused_byclid(struct nfs_client
*clp
,
2641 enum pnfs_iomode iomode
)
2643 struct pnfs_layout_range range
= {
2646 .length
= NFS4_MAX_UINT64
,
2649 nfs_client_for_each_server(clp
, pnfs_layout_return_unused_byserver
,
2654 pnfs_generic_pg_check_layout(struct nfs_pageio_descriptor
*pgio
)
2656 if (pgio
->pg_lseg
== NULL
||
2657 test_bit(NFS_LSEG_VALID
, &pgio
->pg_lseg
->pls_flags
))
2659 pnfs_put_lseg(pgio
->pg_lseg
);
2660 pgio
->pg_lseg
= NULL
;
2662 EXPORT_SYMBOL_GPL(pnfs_generic_pg_check_layout
);
2665 * Check for any intersection between the request and the pgio->pg_lseg,
2666 * and if none, put this pgio->pg_lseg away.
2669 pnfs_generic_pg_check_range(struct nfs_pageio_descriptor
*pgio
, struct nfs_page
*req
)
2671 if (pgio
->pg_lseg
&& !pnfs_lseg_request_intersecting(pgio
->pg_lseg
, req
)) {
2672 pnfs_put_lseg(pgio
->pg_lseg
);
2673 pgio
->pg_lseg
= NULL
;
2676 EXPORT_SYMBOL_GPL(pnfs_generic_pg_check_range
);
2679 pnfs_generic_pg_init_read(struct nfs_pageio_descriptor
*pgio
, struct nfs_page
*req
)
2681 u64 rd_size
= req
->wb_bytes
;
2683 pnfs_generic_pg_check_layout(pgio
);
2684 pnfs_generic_pg_check_range(pgio
, req
);
2685 if (pgio
->pg_lseg
== NULL
) {
2686 if (pgio
->pg_dreq
== NULL
)
2687 rd_size
= i_size_read(pgio
->pg_inode
) - req_offset(req
);
2689 rd_size
= nfs_dreq_bytes_left(pgio
->pg_dreq
);
2691 pgio
->pg_lseg
= pnfs_update_layout(pgio
->pg_inode
,
2692 nfs_req_openctx(req
),
2698 if (IS_ERR(pgio
->pg_lseg
)) {
2699 pgio
->pg_error
= PTR_ERR(pgio
->pg_lseg
);
2700 pgio
->pg_lseg
= NULL
;
2704 /* If no lseg, fall back to read through mds */
2705 if (pgio
->pg_lseg
== NULL
)
2706 nfs_pageio_reset_read_mds(pgio
);
2709 EXPORT_SYMBOL_GPL(pnfs_generic_pg_init_read
);
2712 pnfs_generic_pg_init_write(struct nfs_pageio_descriptor
*pgio
,
2713 struct nfs_page
*req
, u64 wb_size
)
2715 pnfs_generic_pg_check_layout(pgio
);
2716 pnfs_generic_pg_check_range(pgio
, req
);
2717 if (pgio
->pg_lseg
== NULL
) {
2718 pgio
->pg_lseg
= pnfs_update_layout(pgio
->pg_inode
,
2719 nfs_req_openctx(req
),
2725 if (IS_ERR(pgio
->pg_lseg
)) {
2726 pgio
->pg_error
= PTR_ERR(pgio
->pg_lseg
);
2727 pgio
->pg_lseg
= NULL
;
2731 /* If no lseg, fall back to write through mds */
2732 if (pgio
->pg_lseg
== NULL
)
2733 nfs_pageio_reset_write_mds(pgio
);
2735 EXPORT_SYMBOL_GPL(pnfs_generic_pg_init_write
);
2738 pnfs_generic_pg_cleanup(struct nfs_pageio_descriptor
*desc
)
2740 if (desc
->pg_lseg
) {
2741 pnfs_put_lseg(desc
->pg_lseg
);
2742 desc
->pg_lseg
= NULL
;
2745 EXPORT_SYMBOL_GPL(pnfs_generic_pg_cleanup
);
2748 * Return 0 if @req cannot be coalesced into @pgio, otherwise return the number
2749 * of bytes (maximum @req->wb_bytes) that can be coalesced.
2752 pnfs_generic_pg_test(struct nfs_pageio_descriptor
*pgio
,
2753 struct nfs_page
*prev
, struct nfs_page
*req
)
2756 u64 seg_end
, req_start
, seg_left
;
2758 size
= nfs_generic_pg_test(pgio
, prev
, req
);
2763 * 'size' contains the number of bytes left in the current page (up
2764 * to the original size asked for in @req->wb_bytes).
2766 * Calculate how many bytes are left in the layout segment
2767 * and if there are less bytes than 'size', return that instead.
2769 * Please also note that 'end_offset' is actually the offset of the
2770 * first byte that lies outside the pnfs_layout_range. FIXME?
2773 if (pgio
->pg_lseg
) {
2774 seg_end
= pnfs_end_offset(pgio
->pg_lseg
->pls_range
.offset
,
2775 pgio
->pg_lseg
->pls_range
.length
);
2776 req_start
= req_offset(req
);
2778 /* start of request is past the last byte of this segment */
2779 if (req_start
>= seg_end
)
2782 /* adjust 'size' iff there are fewer bytes left in the
2783 * segment than what nfs_generic_pg_test returned */
2784 seg_left
= seg_end
- req_start
;
2785 if (seg_left
< size
)
2786 size
= (unsigned int)seg_left
;
2791 EXPORT_SYMBOL_GPL(pnfs_generic_pg_test
);
2793 int pnfs_write_done_resend_to_mds(struct nfs_pgio_header
*hdr
)
2795 struct nfs_pageio_descriptor pgio
;
2797 /* Resend all requests through the MDS */
2798 nfs_pageio_init_write(&pgio
, hdr
->inode
, FLUSH_STABLE
, true,
2799 hdr
->completion_ops
);
2800 set_bit(NFS_CONTEXT_RESEND_WRITES
, &hdr
->args
.context
->flags
);
2801 return nfs_pageio_resend(&pgio
, hdr
);
2803 EXPORT_SYMBOL_GPL(pnfs_write_done_resend_to_mds
);
2805 static void pnfs_ld_handle_write_error(struct nfs_pgio_header
*hdr
)
2808 dprintk("pnfs write error = %d\n", hdr
->pnfs_error
);
2809 if (NFS_SERVER(hdr
->inode
)->pnfs_curr_ld
->flags
&
2810 PNFS_LAYOUTRET_ON_ERROR
) {
2811 pnfs_return_layout(hdr
->inode
);
2813 if (!test_and_set_bit(NFS_IOHDR_REDO
, &hdr
->flags
))
2814 hdr
->task
.tk_status
= pnfs_write_done_resend_to_mds(hdr
);
2818 * Called by non rpc-based layout drivers
2820 void pnfs_ld_write_done(struct nfs_pgio_header
*hdr
)
2822 if (likely(!hdr
->pnfs_error
)) {
2823 pnfs_set_layoutcommit(hdr
->inode
, hdr
->lseg
,
2824 hdr
->mds_offset
+ hdr
->res
.count
);
2825 hdr
->mds_ops
->rpc_call_done(&hdr
->task
, hdr
);
2827 trace_nfs4_pnfs_write(hdr
, hdr
->pnfs_error
);
2828 if (unlikely(hdr
->pnfs_error
))
2829 pnfs_ld_handle_write_error(hdr
);
2830 hdr
->mds_ops
->rpc_release(hdr
);
2832 EXPORT_SYMBOL_GPL(pnfs_ld_write_done
);
2835 pnfs_write_through_mds(struct nfs_pageio_descriptor
*desc
,
2836 struct nfs_pgio_header
*hdr
)
2838 struct nfs_pgio_mirror
*mirror
= nfs_pgio_current_mirror(desc
);
2840 if (!test_and_set_bit(NFS_IOHDR_REDO
, &hdr
->flags
)) {
2841 list_splice_tail_init(&hdr
->pages
, &mirror
->pg_list
);
2842 nfs_pageio_reset_write_mds(desc
);
2843 mirror
->pg_recoalesce
= 1;
2845 hdr
->completion_ops
->completion(hdr
);
2848 static enum pnfs_try_status
2849 pnfs_try_to_write_data(struct nfs_pgio_header
*hdr
,
2850 const struct rpc_call_ops
*call_ops
,
2851 struct pnfs_layout_segment
*lseg
,
2854 struct inode
*inode
= hdr
->inode
;
2855 enum pnfs_try_status trypnfs
;
2856 struct nfs_server
*nfss
= NFS_SERVER(inode
);
2858 hdr
->mds_ops
= call_ops
;
2860 dprintk("%s: Writing ino:%lu %u@%llu (how %d)\n", __func__
,
2861 inode
->i_ino
, hdr
->args
.count
, hdr
->args
.offset
, how
);
2862 trypnfs
= nfss
->pnfs_curr_ld
->write_pagelist(hdr
, how
);
2863 if (trypnfs
!= PNFS_NOT_ATTEMPTED
)
2864 nfs_inc_stats(inode
, NFSIOS_PNFS_WRITE
);
2865 dprintk("%s End (trypnfs:%d)\n", __func__
, trypnfs
);
2870 pnfs_do_write(struct nfs_pageio_descriptor
*desc
,
2871 struct nfs_pgio_header
*hdr
, int how
)
2873 const struct rpc_call_ops
*call_ops
= desc
->pg_rpc_callops
;
2874 struct pnfs_layout_segment
*lseg
= desc
->pg_lseg
;
2875 enum pnfs_try_status trypnfs
;
2877 trypnfs
= pnfs_try_to_write_data(hdr
, call_ops
, lseg
, how
);
2879 case PNFS_NOT_ATTEMPTED
:
2880 pnfs_write_through_mds(desc
, hdr
);
2882 case PNFS_ATTEMPTED
:
2884 case PNFS_TRY_AGAIN
:
2885 /* cleanup hdr and prepare to redo pnfs */
2886 if (!test_and_set_bit(NFS_IOHDR_REDO
, &hdr
->flags
)) {
2887 struct nfs_pgio_mirror
*mirror
= nfs_pgio_current_mirror(desc
);
2888 list_splice_init(&hdr
->pages
, &mirror
->pg_list
);
2889 mirror
->pg_recoalesce
= 1;
2891 hdr
->mds_ops
->rpc_release(hdr
);
2895 static void pnfs_writehdr_free(struct nfs_pgio_header
*hdr
)
2897 pnfs_put_lseg(hdr
->lseg
);
2898 nfs_pgio_header_free(hdr
);
2902 pnfs_generic_pg_writepages(struct nfs_pageio_descriptor
*desc
)
2904 struct nfs_pgio_header
*hdr
;
2907 hdr
= nfs_pgio_header_alloc(desc
->pg_rw_ops
);
2909 desc
->pg_error
= -ENOMEM
;
2910 return desc
->pg_error
;
2912 nfs_pgheader_init(desc
, hdr
, pnfs_writehdr_free
);
2914 hdr
->lseg
= pnfs_get_lseg(desc
->pg_lseg
);
2915 ret
= nfs_generic_pgio(desc
, hdr
);
2917 pnfs_do_write(desc
, hdr
, desc
->pg_ioflags
);
2921 EXPORT_SYMBOL_GPL(pnfs_generic_pg_writepages
);
2923 int pnfs_read_done_resend_to_mds(struct nfs_pgio_header
*hdr
)
2925 struct nfs_pageio_descriptor pgio
;
2927 /* Resend all requests through the MDS */
2928 nfs_pageio_init_read(&pgio
, hdr
->inode
, true, hdr
->completion_ops
);
2929 return nfs_pageio_resend(&pgio
, hdr
);
2931 EXPORT_SYMBOL_GPL(pnfs_read_done_resend_to_mds
);
2933 static void pnfs_ld_handle_read_error(struct nfs_pgio_header
*hdr
)
2935 dprintk("pnfs read error = %d\n", hdr
->pnfs_error
);
2936 if (NFS_SERVER(hdr
->inode
)->pnfs_curr_ld
->flags
&
2937 PNFS_LAYOUTRET_ON_ERROR
) {
2938 pnfs_return_layout(hdr
->inode
);
2940 if (!test_and_set_bit(NFS_IOHDR_REDO
, &hdr
->flags
))
2941 hdr
->task
.tk_status
= pnfs_read_done_resend_to_mds(hdr
);
2945 * Called by non rpc-based layout drivers
2947 void pnfs_ld_read_done(struct nfs_pgio_header
*hdr
)
2949 if (likely(!hdr
->pnfs_error
))
2950 hdr
->mds_ops
->rpc_call_done(&hdr
->task
, hdr
);
2951 trace_nfs4_pnfs_read(hdr
, hdr
->pnfs_error
);
2952 if (unlikely(hdr
->pnfs_error
))
2953 pnfs_ld_handle_read_error(hdr
);
2954 hdr
->mds_ops
->rpc_release(hdr
);
2956 EXPORT_SYMBOL_GPL(pnfs_ld_read_done
);
2959 pnfs_read_through_mds(struct nfs_pageio_descriptor
*desc
,
2960 struct nfs_pgio_header
*hdr
)
2962 struct nfs_pgio_mirror
*mirror
= nfs_pgio_current_mirror(desc
);
2964 if (!test_and_set_bit(NFS_IOHDR_REDO
, &hdr
->flags
)) {
2965 list_splice_tail_init(&hdr
->pages
, &mirror
->pg_list
);
2966 nfs_pageio_reset_read_mds(desc
);
2967 mirror
->pg_recoalesce
= 1;
2969 hdr
->completion_ops
->completion(hdr
);
2973 * Call the appropriate parallel I/O subsystem read function.
2975 static enum pnfs_try_status
2976 pnfs_try_to_read_data(struct nfs_pgio_header
*hdr
,
2977 const struct rpc_call_ops
*call_ops
,
2978 struct pnfs_layout_segment
*lseg
)
2980 struct inode
*inode
= hdr
->inode
;
2981 struct nfs_server
*nfss
= NFS_SERVER(inode
);
2982 enum pnfs_try_status trypnfs
;
2984 hdr
->mds_ops
= call_ops
;
2986 dprintk("%s: Reading ino:%lu %u@%llu\n",
2987 __func__
, inode
->i_ino
, hdr
->args
.count
, hdr
->args
.offset
);
2989 trypnfs
= nfss
->pnfs_curr_ld
->read_pagelist(hdr
);
2990 if (trypnfs
!= PNFS_NOT_ATTEMPTED
)
2991 nfs_inc_stats(inode
, NFSIOS_PNFS_READ
);
2992 dprintk("%s End (trypnfs:%d)\n", __func__
, trypnfs
);
2996 /* Resend all requests through pnfs. */
2997 void pnfs_read_resend_pnfs(struct nfs_pgio_header
*hdr
,
2998 unsigned int mirror_idx
)
3000 struct nfs_pageio_descriptor pgio
;
3002 if (!test_and_set_bit(NFS_IOHDR_REDO
, &hdr
->flags
)) {
3003 /* Prevent deadlocks with layoutreturn! */
3004 pnfs_put_lseg(hdr
->lseg
);
3007 nfs_pageio_init_read(&pgio
, hdr
->inode
, false,
3008 hdr
->completion_ops
);
3009 pgio
.pg_mirror_idx
= mirror_idx
;
3010 hdr
->task
.tk_status
= nfs_pageio_resend(&pgio
, hdr
);
3013 EXPORT_SYMBOL_GPL(pnfs_read_resend_pnfs
);
3016 pnfs_do_read(struct nfs_pageio_descriptor
*desc
, struct nfs_pgio_header
*hdr
)
3018 const struct rpc_call_ops
*call_ops
= desc
->pg_rpc_callops
;
3019 struct pnfs_layout_segment
*lseg
= desc
->pg_lseg
;
3020 enum pnfs_try_status trypnfs
;
3022 trypnfs
= pnfs_try_to_read_data(hdr
, call_ops
, lseg
);
3024 case PNFS_NOT_ATTEMPTED
:
3025 pnfs_read_through_mds(desc
, hdr
);
3027 case PNFS_ATTEMPTED
:
3029 case PNFS_TRY_AGAIN
:
3030 /* cleanup hdr and prepare to redo pnfs */
3031 if (!test_and_set_bit(NFS_IOHDR_REDO
, &hdr
->flags
)) {
3032 struct nfs_pgio_mirror
*mirror
= nfs_pgio_current_mirror(desc
);
3033 list_splice_init(&hdr
->pages
, &mirror
->pg_list
);
3034 mirror
->pg_recoalesce
= 1;
3036 hdr
->mds_ops
->rpc_release(hdr
);
3040 static void pnfs_readhdr_free(struct nfs_pgio_header
*hdr
)
3042 pnfs_put_lseg(hdr
->lseg
);
3043 nfs_pgio_header_free(hdr
);
3047 pnfs_generic_pg_readpages(struct nfs_pageio_descriptor
*desc
)
3049 struct nfs_pgio_header
*hdr
;
3052 hdr
= nfs_pgio_header_alloc(desc
->pg_rw_ops
);
3054 desc
->pg_error
= -ENOMEM
;
3055 return desc
->pg_error
;
3057 nfs_pgheader_init(desc
, hdr
, pnfs_readhdr_free
);
3058 hdr
->lseg
= pnfs_get_lseg(desc
->pg_lseg
);
3059 ret
= nfs_generic_pgio(desc
, hdr
);
3061 pnfs_do_read(desc
, hdr
);
3064 EXPORT_SYMBOL_GPL(pnfs_generic_pg_readpages
);
3066 static void pnfs_clear_layoutcommitting(struct inode
*inode
)
3068 unsigned long *bitlock
= &NFS_I(inode
)->flags
;
3070 clear_bit_unlock(NFS_INO_LAYOUTCOMMITTING
, bitlock
);
3071 smp_mb__after_atomic();
3072 wake_up_bit(bitlock
, NFS_INO_LAYOUTCOMMITTING
);
3076 * There can be multiple RW segments.
3078 static void pnfs_list_write_lseg(struct inode
*inode
, struct list_head
*listp
)
3080 struct pnfs_layout_segment
*lseg
;
3082 list_for_each_entry(lseg
, &NFS_I(inode
)->layout
->plh_segs
, pls_list
) {
3083 if (lseg
->pls_range
.iomode
== IOMODE_RW
&&
3084 test_and_clear_bit(NFS_LSEG_LAYOUTCOMMIT
, &lseg
->pls_flags
))
3085 list_add(&lseg
->pls_lc_list
, listp
);
3089 static void pnfs_list_write_lseg_done(struct inode
*inode
, struct list_head
*listp
)
3091 struct pnfs_layout_segment
*lseg
, *tmp
;
3093 /* Matched by references in pnfs_set_layoutcommit */
3094 list_for_each_entry_safe(lseg
, tmp
, listp
, pls_lc_list
) {
3095 list_del_init(&lseg
->pls_lc_list
);
3096 pnfs_put_lseg(lseg
);
3099 pnfs_clear_layoutcommitting(inode
);
3102 void pnfs_set_lo_fail(struct pnfs_layout_segment
*lseg
)
3104 pnfs_layout_io_set_failed(lseg
->pls_layout
, lseg
->pls_range
.iomode
);
3106 EXPORT_SYMBOL_GPL(pnfs_set_lo_fail
);
3109 pnfs_set_layoutcommit(struct inode
*inode
, struct pnfs_layout_segment
*lseg
,
3112 struct nfs_inode
*nfsi
= NFS_I(inode
);
3113 bool mark_as_dirty
= false;
3115 spin_lock(&inode
->i_lock
);
3116 if (!test_and_set_bit(NFS_INO_LAYOUTCOMMIT
, &nfsi
->flags
)) {
3117 nfsi
->layout
->plh_lwb
= end_pos
;
3118 mark_as_dirty
= true;
3119 dprintk("%s: Set layoutcommit for inode %lu ",
3120 __func__
, inode
->i_ino
);
3121 } else if (end_pos
> nfsi
->layout
->plh_lwb
)
3122 nfsi
->layout
->plh_lwb
= end_pos
;
3123 if (!test_and_set_bit(NFS_LSEG_LAYOUTCOMMIT
, &lseg
->pls_flags
)) {
3124 /* references matched in nfs4_layoutcommit_release */
3125 pnfs_get_lseg(lseg
);
3127 spin_unlock(&inode
->i_lock
);
3128 dprintk("%s: lseg %p end_pos %llu\n",
3129 __func__
, lseg
, nfsi
->layout
->plh_lwb
);
3131 /* if pnfs_layoutcommit_inode() runs between inode locks, the next one
3132 * will be a noop because NFS_INO_LAYOUTCOMMIT will not be set */
3134 mark_inode_dirty_sync(inode
);
3136 EXPORT_SYMBOL_GPL(pnfs_set_layoutcommit
);
3138 void pnfs_cleanup_layoutcommit(struct nfs4_layoutcommit_data
*data
)
3140 struct nfs_server
*nfss
= NFS_SERVER(data
->args
.inode
);
3142 if (nfss
->pnfs_curr_ld
->cleanup_layoutcommit
)
3143 nfss
->pnfs_curr_ld
->cleanup_layoutcommit(data
);
3144 pnfs_list_write_lseg_done(data
->args
.inode
, &data
->lseg_list
);
3148 * For the LAYOUT4_NFSV4_1_FILES layout type, NFS_DATA_SYNC WRITEs and
3149 * NFS_UNSTABLE WRITEs with a COMMIT to data servers must store enough
3150 * data to disk to allow the server to recover the data if it crashes.
3151 * LAYOUTCOMMIT is only needed when the NFL4_UFLG_COMMIT_THRU_MDS flag
3152 * is off, and a COMMIT is sent to a data server, or
3153 * if WRITEs to a data server return NFS_DATA_SYNC.
3156 pnfs_layoutcommit_inode(struct inode
*inode
, bool sync
)
3158 struct pnfs_layoutdriver_type
*ld
= NFS_SERVER(inode
)->pnfs_curr_ld
;
3159 struct nfs4_layoutcommit_data
*data
;
3160 struct nfs_inode
*nfsi
= NFS_I(inode
);
3164 if (!pnfs_layoutcommit_outstanding(inode
))
3167 dprintk("--> %s inode %lu\n", __func__
, inode
->i_ino
);
3170 if (test_and_set_bit(NFS_INO_LAYOUTCOMMITTING
, &nfsi
->flags
)) {
3173 status
= wait_on_bit_lock_action(&nfsi
->flags
,
3174 NFS_INO_LAYOUTCOMMITTING
,
3175 nfs_wait_bit_killable
,
3182 /* Note kzalloc ensures data->res.seq_res.sr_slot == NULL */
3183 data
= kzalloc(sizeof(*data
), GFP_NOFS
);
3185 goto clear_layoutcommitting
;
3188 spin_lock(&inode
->i_lock
);
3189 if (!test_and_clear_bit(NFS_INO_LAYOUTCOMMIT
, &nfsi
->flags
))
3192 INIT_LIST_HEAD(&data
->lseg_list
);
3193 pnfs_list_write_lseg(inode
, &data
->lseg_list
);
3195 end_pos
= nfsi
->layout
->plh_lwb
;
3197 nfs4_stateid_copy(&data
->args
.stateid
, &nfsi
->layout
->plh_stateid
);
3198 data
->cred
= get_cred(nfsi
->layout
->plh_lc_cred
);
3199 spin_unlock(&inode
->i_lock
);
3201 data
->args
.inode
= inode
;
3202 nfs_fattr_init(&data
->fattr
);
3203 data
->args
.bitmask
= NFS_SERVER(inode
)->cache_consistency_bitmask
;
3204 data
->res
.fattr
= &data
->fattr
;
3206 data
->args
.lastbytewritten
= end_pos
- 1;
3208 data
->args
.lastbytewritten
= U64_MAX
;
3209 data
->res
.server
= NFS_SERVER(inode
);
3211 if (ld
->prepare_layoutcommit
) {
3212 status
= ld
->prepare_layoutcommit(&data
->args
);
3214 put_cred(data
->cred
);
3215 spin_lock(&inode
->i_lock
);
3216 set_bit(NFS_INO_LAYOUTCOMMIT
, &nfsi
->flags
);
3217 if (end_pos
> nfsi
->layout
->plh_lwb
)
3218 nfsi
->layout
->plh_lwb
= end_pos
;
3224 status
= nfs4_proc_layoutcommit(data
, sync
);
3227 mark_inode_dirty_sync(inode
);
3228 dprintk("<-- %s status %d\n", __func__
, status
);
3231 spin_unlock(&inode
->i_lock
);
3233 clear_layoutcommitting
:
3234 pnfs_clear_layoutcommitting(inode
);
3237 EXPORT_SYMBOL_GPL(pnfs_layoutcommit_inode
);
3240 pnfs_generic_sync(struct inode
*inode
, bool datasync
)
3242 return pnfs_layoutcommit_inode(inode
, true);
3244 EXPORT_SYMBOL_GPL(pnfs_generic_sync
);
3246 struct nfs4_threshold
*pnfs_mdsthreshold_alloc(void)
3248 struct nfs4_threshold
*thp
;
3250 thp
= kzalloc(sizeof(*thp
), GFP_NOFS
);
3252 dprintk("%s mdsthreshold allocation failed\n", __func__
);
3258 #if IS_ENABLED(CONFIG_NFS_V4_2)
3260 pnfs_report_layoutstat(struct inode
*inode
, gfp_t gfp_flags
)
3262 struct pnfs_layoutdriver_type
*ld
= NFS_SERVER(inode
)->pnfs_curr_ld
;
3263 struct nfs_server
*server
= NFS_SERVER(inode
);
3264 struct nfs_inode
*nfsi
= NFS_I(inode
);
3265 struct nfs42_layoutstat_data
*data
;
3266 struct pnfs_layout_hdr
*hdr
;
3269 if (!pnfs_enabled_sb(server
) || !ld
->prepare_layoutstats
)
3272 if (!nfs_server_capable(inode
, NFS_CAP_LAYOUTSTATS
))
3275 if (test_and_set_bit(NFS_INO_LAYOUTSTATS
, &nfsi
->flags
))
3278 spin_lock(&inode
->i_lock
);
3279 if (!NFS_I(inode
)->layout
) {
3280 spin_unlock(&inode
->i_lock
);
3281 goto out_clear_layoutstats
;
3283 hdr
= NFS_I(inode
)->layout
;
3284 pnfs_get_layout_hdr(hdr
);
3285 spin_unlock(&inode
->i_lock
);
3287 data
= kzalloc(sizeof(*data
), gfp_flags
);
3293 data
->args
.fh
= NFS_FH(inode
);
3294 data
->args
.inode
= inode
;
3295 status
= ld
->prepare_layoutstats(&data
->args
);
3299 status
= nfs42_proc_layoutstats_generic(NFS_SERVER(inode
), data
);
3302 dprintk("%s returns %d\n", __func__
, status
);
3308 pnfs_put_layout_hdr(hdr
);
3309 out_clear_layoutstats
:
3310 smp_mb__before_atomic();
3311 clear_bit(NFS_INO_LAYOUTSTATS
, &nfsi
->flags
);
3312 smp_mb__after_atomic();
3315 EXPORT_SYMBOL_GPL(pnfs_report_layoutstat
);
3318 unsigned int layoutstats_timer
;
3319 module_param(layoutstats_timer
, uint
, 0644);
3320 EXPORT_SYMBOL_GPL(layoutstats_timer
);