2 * pNFS functions to call and manage layout drivers.
4 * Copyright (c) 2002 [year of first publication]
5 * The Regents of the University of Michigan
8 * Dean Hildebrand <dhildebz@umich.edu>
10 * Permission is granted to use, copy, create derivative works, and
11 * redistribute this software and such derivative works for any purpose,
12 * so long as the name of the University of Michigan is not used in
13 * any advertising or publicity pertaining to the use or distribution
14 * of this software without specific, written prior authorization. If
15 * the above copyright notice or any other identification of the
16 * University of Michigan is included in any copy of any portion of
17 * this software, then the disclaimer below must also be included.
19 * This software is provided as is, without representation or warranty
20 * of any kind either express or implied, including without limitation
21 * the implied warranties of merchantability, fitness for a particular
22 * purpose, or noninfringement. The Regents of the University of
23 * Michigan shall not be liable for any damages, including special,
24 * indirect, incidental, or consequential damages, with respect to any
25 * claim arising out of or in connection with the use of the software,
26 * even if it has been or is hereafter advised of the possibility of
30 #include <linux/nfs_fs.h>
31 #include <linux/nfs_page.h>
32 #include <linux/module.h>
36 #include "nfs4trace.h"
37 #include "delegation.h"
40 #define NFSDBG_FACILITY NFSDBG_PNFS
41 #define PNFS_LAYOUTGET_RETRY_TIMEOUT (120*HZ)
46 * protects pnfs_modules_tbl.
48 static DEFINE_SPINLOCK(pnfs_spinlock
);
51 * pnfs_modules_tbl holds all pnfs modules
53 static LIST_HEAD(pnfs_modules_tbl
);
56 pnfs_send_layoutreturn(struct pnfs_layout_hdr
*lo
, const nfs4_stateid
*stateid
,
57 enum pnfs_iomode iomode
, bool sync
);
59 /* Return the registered pnfs layout driver module matching given id */
60 static struct pnfs_layoutdriver_type
*
61 find_pnfs_driver_locked(u32 id
)
63 struct pnfs_layoutdriver_type
*local
;
65 list_for_each_entry(local
, &pnfs_modules_tbl
, pnfs_tblid
)
70 dprintk("%s: Searching for id %u, found %p\n", __func__
, id
, local
);
74 static struct pnfs_layoutdriver_type
*
75 find_pnfs_driver(u32 id
)
77 struct pnfs_layoutdriver_type
*local
;
79 spin_lock(&pnfs_spinlock
);
80 local
= find_pnfs_driver_locked(id
);
81 if (local
!= NULL
&& !try_module_get(local
->owner
)) {
82 dprintk("%s: Could not grab reference on module\n", __func__
);
85 spin_unlock(&pnfs_spinlock
);
90 unset_pnfs_layoutdriver(struct nfs_server
*nfss
)
92 if (nfss
->pnfs_curr_ld
) {
93 if (nfss
->pnfs_curr_ld
->clear_layoutdriver
)
94 nfss
->pnfs_curr_ld
->clear_layoutdriver(nfss
);
95 /* Decrement the MDS count. Purge the deviceid cache if zero */
96 if (atomic_dec_and_test(&nfss
->nfs_client
->cl_mds_count
))
97 nfs4_deviceid_purge_client(nfss
->nfs_client
);
98 module_put(nfss
->pnfs_curr_ld
->owner
);
100 nfss
->pnfs_curr_ld
= NULL
;
104 * Try to set the server's pnfs module to the pnfs layout type specified by id.
105 * Currently only one pNFS layout driver per filesystem is supported.
107 * @id layout type. Zero (illegal layout type) indicates pNFS not in use.
110 set_pnfs_layoutdriver(struct nfs_server
*server
, const struct nfs_fh
*mntfh
,
113 struct pnfs_layoutdriver_type
*ld_type
= NULL
;
117 if (!(server
->nfs_client
->cl_exchange_flags
&
118 (EXCHGID4_FLAG_USE_NON_PNFS
| EXCHGID4_FLAG_USE_PNFS_MDS
))) {
119 printk(KERN_ERR
"NFS: %s: id %u cl_exchange_flags 0x%x\n",
120 __func__
, id
, server
->nfs_client
->cl_exchange_flags
);
123 ld_type
= find_pnfs_driver(id
);
125 request_module("%s-%u", LAYOUT_NFSV4_1_MODULE_PREFIX
, id
);
126 ld_type
= find_pnfs_driver(id
);
128 dprintk("%s: No pNFS module found for %u.\n",
133 server
->pnfs_curr_ld
= ld_type
;
134 if (ld_type
->set_layoutdriver
135 && ld_type
->set_layoutdriver(server
, mntfh
)) {
136 printk(KERN_ERR
"NFS: %s: Error initializing pNFS layout "
137 "driver %u.\n", __func__
, id
);
138 module_put(ld_type
->owner
);
141 /* Bump the MDS count */
142 atomic_inc(&server
->nfs_client
->cl_mds_count
);
144 dprintk("%s: pNFS module for %u set\n", __func__
, id
);
148 dprintk("%s: Using NFSv4 I/O\n", __func__
);
149 server
->pnfs_curr_ld
= NULL
;
153 pnfs_register_layoutdriver(struct pnfs_layoutdriver_type
*ld_type
)
155 int status
= -EINVAL
;
156 struct pnfs_layoutdriver_type
*tmp
;
158 if (ld_type
->id
== 0) {
159 printk(KERN_ERR
"NFS: %s id 0 is reserved\n", __func__
);
162 if (!ld_type
->alloc_lseg
|| !ld_type
->free_lseg
) {
163 printk(KERN_ERR
"NFS: %s Layout driver must provide "
164 "alloc_lseg and free_lseg.\n", __func__
);
168 spin_lock(&pnfs_spinlock
);
169 tmp
= find_pnfs_driver_locked(ld_type
->id
);
171 list_add(&ld_type
->pnfs_tblid
, &pnfs_modules_tbl
);
173 dprintk("%s Registering id:%u name:%s\n", __func__
, ld_type
->id
,
176 printk(KERN_ERR
"NFS: %s Module with id %d already loaded!\n",
177 __func__
, ld_type
->id
);
179 spin_unlock(&pnfs_spinlock
);
183 EXPORT_SYMBOL_GPL(pnfs_register_layoutdriver
);
186 pnfs_unregister_layoutdriver(struct pnfs_layoutdriver_type
*ld_type
)
188 dprintk("%s Deregistering id:%u\n", __func__
, ld_type
->id
);
189 spin_lock(&pnfs_spinlock
);
190 list_del(&ld_type
->pnfs_tblid
);
191 spin_unlock(&pnfs_spinlock
);
193 EXPORT_SYMBOL_GPL(pnfs_unregister_layoutdriver
);
196 * pNFS client layout cache
199 /* Need to hold i_lock if caller does not already hold reference */
201 pnfs_get_layout_hdr(struct pnfs_layout_hdr
*lo
)
203 atomic_inc(&lo
->plh_refcount
);
206 static struct pnfs_layout_hdr
*
207 pnfs_alloc_layout_hdr(struct inode
*ino
, gfp_t gfp_flags
)
209 struct pnfs_layoutdriver_type
*ld
= NFS_SERVER(ino
)->pnfs_curr_ld
;
210 return ld
->alloc_layout_hdr(ino
, gfp_flags
);
214 pnfs_free_layout_hdr(struct pnfs_layout_hdr
*lo
)
216 struct nfs_server
*server
= NFS_SERVER(lo
->plh_inode
);
217 struct pnfs_layoutdriver_type
*ld
= server
->pnfs_curr_ld
;
219 if (!list_empty(&lo
->plh_layouts
)) {
220 struct nfs_client
*clp
= server
->nfs_client
;
222 spin_lock(&clp
->cl_lock
);
223 list_del_init(&lo
->plh_layouts
);
224 spin_unlock(&clp
->cl_lock
);
226 put_rpccred(lo
->plh_lc_cred
);
227 return ld
->free_layout_hdr(lo
);
231 pnfs_detach_layout_hdr(struct pnfs_layout_hdr
*lo
)
233 struct nfs_inode
*nfsi
= NFS_I(lo
->plh_inode
);
234 dprintk("%s: freeing layout cache %p\n", __func__
, lo
);
236 /* Reset MDS Threshold I/O counters */
242 pnfs_put_layout_hdr(struct pnfs_layout_hdr
*lo
)
244 struct inode
*inode
= lo
->plh_inode
;
246 if (atomic_dec_and_lock(&lo
->plh_refcount
, &inode
->i_lock
)) {
247 if (!list_empty(&lo
->plh_segs
))
248 WARN_ONCE(1, "NFS: BUG unfreed layout segments.\n");
249 pnfs_detach_layout_hdr(lo
);
250 spin_unlock(&inode
->i_lock
);
251 pnfs_free_layout_hdr(lo
);
256 pnfs_iomode_to_fail_bit(u32 iomode
)
258 return iomode
== IOMODE_RW
?
259 NFS_LAYOUT_RW_FAILED
: NFS_LAYOUT_RO_FAILED
;
263 pnfs_layout_set_fail_bit(struct pnfs_layout_hdr
*lo
, int fail_bit
)
265 lo
->plh_retry_timestamp
= jiffies
;
266 if (!test_and_set_bit(fail_bit
, &lo
->plh_flags
))
267 atomic_inc(&lo
->plh_refcount
);
271 pnfs_layout_clear_fail_bit(struct pnfs_layout_hdr
*lo
, int fail_bit
)
273 if (test_and_clear_bit(fail_bit
, &lo
->plh_flags
))
274 atomic_dec(&lo
->plh_refcount
);
278 pnfs_layout_io_set_failed(struct pnfs_layout_hdr
*lo
, u32 iomode
)
280 struct inode
*inode
= lo
->plh_inode
;
281 struct pnfs_layout_range range
= {
284 .length
= NFS4_MAX_UINT64
,
288 spin_lock(&inode
->i_lock
);
289 pnfs_layout_set_fail_bit(lo
, pnfs_iomode_to_fail_bit(iomode
));
290 pnfs_mark_matching_lsegs_invalid(lo
, &head
, &range
);
291 spin_unlock(&inode
->i_lock
);
292 pnfs_free_lseg_list(&head
);
293 dprintk("%s Setting layout IOMODE_%s fail bit\n", __func__
,
294 iomode
== IOMODE_RW
? "RW" : "READ");
298 pnfs_layout_io_test_failed(struct pnfs_layout_hdr
*lo
, u32 iomode
)
300 unsigned long start
, end
;
301 int fail_bit
= pnfs_iomode_to_fail_bit(iomode
);
303 if (test_bit(fail_bit
, &lo
->plh_flags
) == 0)
306 start
= end
- PNFS_LAYOUTGET_RETRY_TIMEOUT
;
307 if (!time_in_range(lo
->plh_retry_timestamp
, start
, end
)) {
308 /* It is time to retry the failed layoutgets */
309 pnfs_layout_clear_fail_bit(lo
, fail_bit
);
316 init_lseg(struct pnfs_layout_hdr
*lo
, struct pnfs_layout_segment
*lseg
)
318 INIT_LIST_HEAD(&lseg
->pls_list
);
319 INIT_LIST_HEAD(&lseg
->pls_lc_list
);
320 atomic_set(&lseg
->pls_refcount
, 1);
322 set_bit(NFS_LSEG_VALID
, &lseg
->pls_flags
);
323 lseg
->pls_layout
= lo
;
326 static void pnfs_free_lseg(struct pnfs_layout_segment
*lseg
)
328 struct inode
*ino
= lseg
->pls_layout
->plh_inode
;
330 NFS_SERVER(ino
)->pnfs_curr_ld
->free_lseg(lseg
);
334 pnfs_layout_remove_lseg(struct pnfs_layout_hdr
*lo
,
335 struct pnfs_layout_segment
*lseg
)
337 struct inode
*inode
= lo
->plh_inode
;
339 WARN_ON(test_bit(NFS_LSEG_VALID
, &lseg
->pls_flags
));
340 list_del_init(&lseg
->pls_list
);
341 /* Matched by pnfs_get_layout_hdr in pnfs_layout_insert_lseg */
342 atomic_dec(&lo
->plh_refcount
);
343 if (list_empty(&lo
->plh_segs
))
344 clear_bit(NFS_LAYOUT_BULK_RECALL
, &lo
->plh_flags
);
345 rpc_wake_up(&NFS_SERVER(inode
)->roc_rpcwaitq
);
348 /* Return true if layoutreturn is needed */
350 pnfs_layout_need_return(struct pnfs_layout_hdr
*lo
,
351 struct pnfs_layout_segment
*lseg
)
353 struct pnfs_layout_segment
*s
;
355 if (!test_and_clear_bit(NFS_LSEG_LAYOUTRETURN
, &lseg
->pls_flags
))
358 list_for_each_entry(s
, &lo
->plh_segs
, pls_list
)
359 if (s
!= lseg
&& test_bit(NFS_LSEG_LAYOUTRETURN
, &s
->pls_flags
))
366 pnfs_prepare_layoutreturn(struct pnfs_layout_hdr
*lo
)
368 if (test_and_set_bit(NFS_LAYOUT_RETURN
, &lo
->plh_flags
))
370 lo
->plh_return_iomode
= 0;
371 pnfs_get_layout_hdr(lo
);
372 clear_bit(NFS_LAYOUT_RETURN_BEFORE_CLOSE
, &lo
->plh_flags
);
376 static void pnfs_layoutreturn_before_put_lseg(struct pnfs_layout_segment
*lseg
,
377 struct pnfs_layout_hdr
*lo
, struct inode
*inode
)
379 lo
= lseg
->pls_layout
;
380 inode
= lo
->plh_inode
;
382 spin_lock(&inode
->i_lock
);
383 if (pnfs_layout_need_return(lo
, lseg
)) {
384 nfs4_stateid stateid
;
385 enum pnfs_iomode iomode
;
388 nfs4_stateid_copy(&stateid
, &lo
->plh_stateid
);
389 iomode
= lo
->plh_return_iomode
;
390 send
= pnfs_prepare_layoutreturn(lo
);
391 spin_unlock(&inode
->i_lock
);
393 /* Send an async layoutreturn so we dont deadlock */
394 pnfs_send_layoutreturn(lo
, &stateid
, iomode
, false);
397 spin_unlock(&inode
->i_lock
);
401 pnfs_put_lseg(struct pnfs_layout_segment
*lseg
)
403 struct pnfs_layout_hdr
*lo
;
409 dprintk("%s: lseg %p ref %d valid %d\n", __func__
, lseg
,
410 atomic_read(&lseg
->pls_refcount
),
411 test_bit(NFS_LSEG_VALID
, &lseg
->pls_flags
));
413 /* Handle the case where refcount != 1 */
414 if (atomic_add_unless(&lseg
->pls_refcount
, -1, 1))
417 lo
= lseg
->pls_layout
;
418 inode
= lo
->plh_inode
;
419 /* Do we need a layoutreturn? */
420 if (test_bit(NFS_LSEG_LAYOUTRETURN
, &lseg
->pls_flags
))
421 pnfs_layoutreturn_before_put_lseg(lseg
, lo
, inode
);
423 if (atomic_dec_and_lock(&lseg
->pls_refcount
, &inode
->i_lock
)) {
424 if (test_bit(NFS_LSEG_VALID
, &lseg
->pls_flags
)) {
425 spin_unlock(&inode
->i_lock
);
428 pnfs_get_layout_hdr(lo
);
429 pnfs_layout_remove_lseg(lo
, lseg
);
430 spin_unlock(&inode
->i_lock
);
431 pnfs_free_lseg(lseg
);
432 pnfs_put_layout_hdr(lo
);
435 EXPORT_SYMBOL_GPL(pnfs_put_lseg
);
437 static void pnfs_free_lseg_async_work(struct work_struct
*work
)
439 struct pnfs_layout_segment
*lseg
;
440 struct pnfs_layout_hdr
*lo
;
442 lseg
= container_of(work
, struct pnfs_layout_segment
, pls_work
);
443 lo
= lseg
->pls_layout
;
445 pnfs_free_lseg(lseg
);
446 pnfs_put_layout_hdr(lo
);
449 static void pnfs_free_lseg_async(struct pnfs_layout_segment
*lseg
)
451 INIT_WORK(&lseg
->pls_work
, pnfs_free_lseg_async_work
);
452 schedule_work(&lseg
->pls_work
);
456 pnfs_put_lseg_locked(struct pnfs_layout_segment
*lseg
)
461 assert_spin_locked(&lseg
->pls_layout
->plh_inode
->i_lock
);
463 dprintk("%s: lseg %p ref %d valid %d\n", __func__
, lseg
,
464 atomic_read(&lseg
->pls_refcount
),
465 test_bit(NFS_LSEG_VALID
, &lseg
->pls_flags
));
466 if (atomic_dec_and_test(&lseg
->pls_refcount
)) {
467 struct pnfs_layout_hdr
*lo
= lseg
->pls_layout
;
468 if (test_bit(NFS_LSEG_VALID
, &lseg
->pls_flags
))
470 pnfs_get_layout_hdr(lo
);
471 pnfs_layout_remove_lseg(lo
, lseg
);
472 pnfs_free_lseg_async(lseg
);
475 EXPORT_SYMBOL_GPL(pnfs_put_lseg_locked
);
478 end_offset(u64 start
, u64 len
)
483 return end
>= start
? end
: NFS4_MAX_UINT64
;
487 * is l2 fully contained in l1?
489 * [----------------------------------)
494 pnfs_lseg_range_contained(const struct pnfs_layout_range
*l1
,
495 const struct pnfs_layout_range
*l2
)
497 u64 start1
= l1
->offset
;
498 u64 end1
= end_offset(start1
, l1
->length
);
499 u64 start2
= l2
->offset
;
500 u64 end2
= end_offset(start2
, l2
->length
);
502 return (start1
<= start2
) && (end1
>= end2
);
506 * is l1 and l2 intersecting?
508 * [----------------------------------)
513 pnfs_lseg_range_intersecting(const struct pnfs_layout_range
*l1
,
514 const struct pnfs_layout_range
*l2
)
516 u64 start1
= l1
->offset
;
517 u64 end1
= end_offset(start1
, l1
->length
);
518 u64 start2
= l2
->offset
;
519 u64 end2
= end_offset(start2
, l2
->length
);
521 return (end1
== NFS4_MAX_UINT64
|| end1
> start2
) &&
522 (end2
== NFS4_MAX_UINT64
|| end2
> start1
);
526 should_free_lseg(const struct pnfs_layout_range
*lseg_range
,
527 const struct pnfs_layout_range
*recall_range
)
529 return (recall_range
->iomode
== IOMODE_ANY
||
530 lseg_range
->iomode
== recall_range
->iomode
) &&
531 pnfs_lseg_range_intersecting(lseg_range
, recall_range
);
534 static bool pnfs_lseg_dec_and_remove_zero(struct pnfs_layout_segment
*lseg
,
535 struct list_head
*tmp_list
)
537 if (!atomic_dec_and_test(&lseg
->pls_refcount
))
539 pnfs_layout_remove_lseg(lseg
->pls_layout
, lseg
);
540 list_add(&lseg
->pls_list
, tmp_list
);
544 /* Returns 1 if lseg is removed from list, 0 otherwise */
545 static int mark_lseg_invalid(struct pnfs_layout_segment
*lseg
,
546 struct list_head
*tmp_list
)
550 if (test_and_clear_bit(NFS_LSEG_VALID
, &lseg
->pls_flags
)) {
551 /* Remove the reference keeping the lseg in the
552 * list. It will now be removed when all
553 * outstanding io is finished.
555 dprintk("%s: lseg %p ref %d\n", __func__
, lseg
,
556 atomic_read(&lseg
->pls_refcount
));
557 if (pnfs_lseg_dec_and_remove_zero(lseg
, tmp_list
))
563 /* Returns count of number of matching invalid lsegs remaining in list
567 pnfs_mark_matching_lsegs_invalid(struct pnfs_layout_hdr
*lo
,
568 struct list_head
*tmp_list
,
569 const struct pnfs_layout_range
*recall_range
)
571 struct pnfs_layout_segment
*lseg
, *next
;
574 dprintk("%s:Begin lo %p\n", __func__
, lo
);
576 if (list_empty(&lo
->plh_segs
))
578 list_for_each_entry_safe(lseg
, next
, &lo
->plh_segs
, pls_list
)
580 should_free_lseg(&lseg
->pls_range
, recall_range
)) {
581 dprintk("%s: freeing lseg %p iomode %d "
582 "offset %llu length %llu\n", __func__
,
583 lseg
, lseg
->pls_range
.iomode
, lseg
->pls_range
.offset
,
584 lseg
->pls_range
.length
);
585 if (!mark_lseg_invalid(lseg
, tmp_list
))
588 dprintk("%s:Return %i\n", __func__
, remaining
);
592 /* note free_me must contain lsegs from a single layout_hdr */
594 pnfs_free_lseg_list(struct list_head
*free_me
)
596 struct pnfs_layout_segment
*lseg
, *tmp
;
598 if (list_empty(free_me
))
601 list_for_each_entry_safe(lseg
, tmp
, free_me
, pls_list
) {
602 list_del(&lseg
->pls_list
);
603 pnfs_free_lseg(lseg
);
608 pnfs_destroy_layout(struct nfs_inode
*nfsi
)
610 struct pnfs_layout_hdr
*lo
;
613 spin_lock(&nfsi
->vfs_inode
.i_lock
);
616 lo
->plh_block_lgets
++; /* permanently block new LAYOUTGETs */
617 pnfs_mark_matching_lsegs_invalid(lo
, &tmp_list
, NULL
);
618 pnfs_get_layout_hdr(lo
);
619 pnfs_layout_clear_fail_bit(lo
, NFS_LAYOUT_RO_FAILED
);
620 pnfs_layout_clear_fail_bit(lo
, NFS_LAYOUT_RW_FAILED
);
621 spin_unlock(&nfsi
->vfs_inode
.i_lock
);
622 pnfs_free_lseg_list(&tmp_list
);
623 pnfs_put_layout_hdr(lo
);
625 spin_unlock(&nfsi
->vfs_inode
.i_lock
);
627 EXPORT_SYMBOL_GPL(pnfs_destroy_layout
);
630 pnfs_layout_add_bulk_destroy_list(struct inode
*inode
,
631 struct list_head
*layout_list
)
633 struct pnfs_layout_hdr
*lo
;
636 spin_lock(&inode
->i_lock
);
637 lo
= NFS_I(inode
)->layout
;
638 if (lo
!= NULL
&& list_empty(&lo
->plh_bulk_destroy
)) {
639 pnfs_get_layout_hdr(lo
);
640 list_add(&lo
->plh_bulk_destroy
, layout_list
);
643 spin_unlock(&inode
->i_lock
);
647 /* Caller must hold rcu_read_lock and clp->cl_lock */
649 pnfs_layout_bulk_destroy_byserver_locked(struct nfs_client
*clp
,
650 struct nfs_server
*server
,
651 struct list_head
*layout_list
)
653 struct pnfs_layout_hdr
*lo
, *next
;
656 list_for_each_entry_safe(lo
, next
, &server
->layouts
, plh_layouts
) {
657 inode
= igrab(lo
->plh_inode
);
660 list_del_init(&lo
->plh_layouts
);
661 if (pnfs_layout_add_bulk_destroy_list(inode
, layout_list
))
664 spin_unlock(&clp
->cl_lock
);
666 spin_lock(&clp
->cl_lock
);
674 pnfs_layout_free_bulk_destroy_list(struct list_head
*layout_list
,
677 struct pnfs_layout_hdr
*lo
;
679 struct pnfs_layout_range range
= {
680 .iomode
= IOMODE_ANY
,
682 .length
= NFS4_MAX_UINT64
,
684 LIST_HEAD(lseg_list
);
687 while (!list_empty(layout_list
)) {
688 lo
= list_entry(layout_list
->next
, struct pnfs_layout_hdr
,
690 dprintk("%s freeing layout for inode %lu\n", __func__
,
691 lo
->plh_inode
->i_ino
);
692 inode
= lo
->plh_inode
;
694 pnfs_layoutcommit_inode(inode
, false);
696 spin_lock(&inode
->i_lock
);
697 list_del_init(&lo
->plh_bulk_destroy
);
698 lo
->plh_block_lgets
++; /* permanently block new LAYOUTGETs */
700 set_bit(NFS_LAYOUT_BULK_RECALL
, &lo
->plh_flags
);
701 if (pnfs_mark_matching_lsegs_invalid(lo
, &lseg_list
, &range
))
703 spin_unlock(&inode
->i_lock
);
704 pnfs_free_lseg_list(&lseg_list
);
705 /* Free all lsegs that are attached to commit buckets */
706 nfs_commit_inode(inode
, 0);
707 pnfs_put_layout_hdr(lo
);
714 pnfs_destroy_layouts_byfsid(struct nfs_client
*clp
,
715 struct nfs_fsid
*fsid
,
718 struct nfs_server
*server
;
719 LIST_HEAD(layout_list
);
721 spin_lock(&clp
->cl_lock
);
724 list_for_each_entry_rcu(server
, &clp
->cl_superblocks
, client_link
) {
725 if (memcmp(&server
->fsid
, fsid
, sizeof(*fsid
)) != 0)
727 if (pnfs_layout_bulk_destroy_byserver_locked(clp
,
733 spin_unlock(&clp
->cl_lock
);
735 if (list_empty(&layout_list
))
737 return pnfs_layout_free_bulk_destroy_list(&layout_list
, is_recall
);
741 pnfs_destroy_layouts_byclid(struct nfs_client
*clp
,
744 struct nfs_server
*server
;
745 LIST_HEAD(layout_list
);
747 spin_lock(&clp
->cl_lock
);
750 list_for_each_entry_rcu(server
, &clp
->cl_superblocks
, client_link
) {
751 if (pnfs_layout_bulk_destroy_byserver_locked(clp
,
757 spin_unlock(&clp
->cl_lock
);
759 if (list_empty(&layout_list
))
761 return pnfs_layout_free_bulk_destroy_list(&layout_list
, is_recall
);
765 * Called by the state manger to remove all layouts established under an
769 pnfs_destroy_all_layouts(struct nfs_client
*clp
)
771 nfs4_deviceid_mark_client_invalid(clp
);
772 nfs4_deviceid_purge_client(clp
);
774 pnfs_destroy_layouts_byclid(clp
, false);
778 * Compare 2 layout stateid sequence ids, to see which is newer,
779 * taking into account wraparound issues.
781 static bool pnfs_seqid_is_newer(u32 s1
, u32 s2
)
783 return (s32
)(s1
- s2
) > 0;
786 /* update lo->plh_stateid with new if is more recent */
788 pnfs_set_layout_stateid(struct pnfs_layout_hdr
*lo
, const nfs4_stateid
*new,
791 u32 oldseq
, newseq
, new_barrier
;
792 int empty
= list_empty(&lo
->plh_segs
);
794 oldseq
= be32_to_cpu(lo
->plh_stateid
.seqid
);
795 newseq
= be32_to_cpu(new->seqid
);
796 if (empty
|| pnfs_seqid_is_newer(newseq
, oldseq
)) {
797 nfs4_stateid_copy(&lo
->plh_stateid
, new);
798 if (update_barrier
) {
799 new_barrier
= be32_to_cpu(new->seqid
);
801 /* Because of wraparound, we want to keep the barrier
802 * "close" to the current seqids.
804 new_barrier
= newseq
- atomic_read(&lo
->plh_outstanding
);
806 if (empty
|| pnfs_seqid_is_newer(new_barrier
, lo
->plh_barrier
))
807 lo
->plh_barrier
= new_barrier
;
812 pnfs_layout_stateid_blocked(const struct pnfs_layout_hdr
*lo
,
813 const nfs4_stateid
*stateid
)
815 u32 seqid
= be32_to_cpu(stateid
->seqid
);
817 return !pnfs_seqid_is_newer(seqid
, lo
->plh_barrier
);
820 /* lget is set to 1 if called from inside send_layoutget call chain */
822 pnfs_layoutgets_blocked(const struct pnfs_layout_hdr
*lo
)
824 return lo
->plh_block_lgets
||
825 test_bit(NFS_LAYOUT_BULK_RECALL
, &lo
->plh_flags
);
829 pnfs_choose_layoutget_stateid(nfs4_stateid
*dst
, struct pnfs_layout_hdr
*lo
,
830 const struct pnfs_layout_range
*range
,
831 struct nfs4_state
*open_state
)
835 dprintk("--> %s\n", __func__
);
836 spin_lock(&lo
->plh_inode
->i_lock
);
837 if (pnfs_layoutgets_blocked(lo
)) {
839 } else if (!nfs4_valid_open_stateid(open_state
)) {
841 } else if (list_empty(&lo
->plh_segs
) ||
842 test_bit(NFS_LAYOUT_INVALID_STID
, &lo
->plh_flags
)) {
846 seq
= read_seqbegin(&open_state
->seqlock
);
847 nfs4_stateid_copy(dst
, &open_state
->stateid
);
848 } while (read_seqretry(&open_state
->seqlock
, seq
));
850 nfs4_stateid_copy(dst
, &lo
->plh_stateid
);
851 spin_unlock(&lo
->plh_inode
->i_lock
);
852 dprintk("<-- %s\n", __func__
);
857 * Get layout from server.
858 * for now, assume that whole file layouts are requested.
860 * arg->length: all ones
862 static struct pnfs_layout_segment
*
863 send_layoutget(struct pnfs_layout_hdr
*lo
,
864 struct nfs_open_context
*ctx
,
865 const struct pnfs_layout_range
*range
,
868 struct inode
*ino
= lo
->plh_inode
;
869 struct nfs_server
*server
= NFS_SERVER(ino
);
870 struct nfs4_layoutget
*lgp
;
871 struct pnfs_layout_segment
*lseg
;
874 dprintk("--> %s\n", __func__
);
877 * Synchronously retrieve layout information from server and
878 * store in lseg. If we race with a concurrent seqid morphing
879 * op, then re-send the LAYOUTGET.
882 lgp
= kzalloc(sizeof(*lgp
), gfp_flags
);
886 i_size
= i_size_read(ino
);
888 lgp
->args
.minlength
= PAGE_CACHE_SIZE
;
889 if (lgp
->args
.minlength
> range
->length
)
890 lgp
->args
.minlength
= range
->length
;
891 if (range
->iomode
== IOMODE_READ
) {
892 if (range
->offset
>= i_size
)
893 lgp
->args
.minlength
= 0;
894 else if (i_size
- range
->offset
< lgp
->args
.minlength
)
895 lgp
->args
.minlength
= i_size
- range
->offset
;
897 lgp
->args
.maxcount
= PNFS_LAYOUT_MAXSIZE
;
898 pnfs_copy_range(&lgp
->args
.range
, range
);
899 lgp
->args
.type
= server
->pnfs_curr_ld
->id
;
900 lgp
->args
.inode
= ino
;
901 lgp
->args
.ctx
= get_nfs_open_context(ctx
);
902 lgp
->gfp_flags
= gfp_flags
;
903 lgp
->cred
= lo
->plh_lc_cred
;
905 lseg
= nfs4_proc_layoutget(lgp
, gfp_flags
);
906 } while (lseg
== ERR_PTR(-EAGAIN
));
908 if (IS_ERR(lseg
) && !nfs_error_is_fatal(PTR_ERR(lseg
)))
911 pnfs_layout_clear_fail_bit(lo
,
912 pnfs_iomode_to_fail_bit(range
->iomode
));
917 static void pnfs_clear_layoutcommit(struct inode
*inode
,
918 struct list_head
*head
)
920 struct nfs_inode
*nfsi
= NFS_I(inode
);
921 struct pnfs_layout_segment
*lseg
, *tmp
;
923 if (!test_and_clear_bit(NFS_INO_LAYOUTCOMMIT
, &nfsi
->flags
))
925 list_for_each_entry_safe(lseg
, tmp
, &nfsi
->layout
->plh_segs
, pls_list
) {
926 if (!test_and_clear_bit(NFS_LSEG_LAYOUTCOMMIT
, &lseg
->pls_flags
))
928 pnfs_lseg_dec_and_remove_zero(lseg
, head
);
932 void pnfs_clear_layoutreturn_waitbit(struct pnfs_layout_hdr
*lo
)
934 clear_bit_unlock(NFS_LAYOUT_RETURN
, &lo
->plh_flags
);
935 smp_mb__after_atomic();
936 wake_up_bit(&lo
->plh_flags
, NFS_LAYOUT_RETURN
);
937 rpc_wake_up(&NFS_SERVER(lo
->plh_inode
)->roc_rpcwaitq
);
941 pnfs_send_layoutreturn(struct pnfs_layout_hdr
*lo
, const nfs4_stateid
*stateid
,
942 enum pnfs_iomode iomode
, bool sync
)
944 struct inode
*ino
= lo
->plh_inode
;
945 struct nfs4_layoutreturn
*lrp
;
948 lrp
= kzalloc(sizeof(*lrp
), GFP_NOFS
);
949 if (unlikely(lrp
== NULL
)) {
951 spin_lock(&ino
->i_lock
);
952 pnfs_clear_layoutreturn_waitbit(lo
);
953 spin_unlock(&ino
->i_lock
);
954 pnfs_put_layout_hdr(lo
);
958 nfs4_stateid_copy(&lrp
->args
.stateid
, stateid
);
959 lrp
->args
.layout_type
= NFS_SERVER(ino
)->pnfs_curr_ld
->id
;
960 lrp
->args
.inode
= ino
;
961 lrp
->args
.range
.iomode
= iomode
;
962 lrp
->args
.range
.offset
= 0;
963 lrp
->args
.range
.length
= NFS4_MAX_UINT64
;
964 lrp
->args
.layout
= lo
;
965 lrp
->clp
= NFS_SERVER(ino
)->nfs_client
;
966 lrp
->cred
= lo
->plh_lc_cred
;
968 status
= nfs4_proc_layoutreturn(lrp
, sync
);
970 dprintk("<-- %s status: %d\n", __func__
, status
);
975 * Initiates a LAYOUTRETURN(FILE), and removes the pnfs_layout_hdr
976 * when the layout segment list is empty.
978 * Note that a pnfs_layout_hdr can exist with an empty layout segment
979 * list when LAYOUTGET has failed, or when LAYOUTGET succeeded, but the
980 * deviceid is marked invalid.
983 _pnfs_return_layout(struct inode
*ino
)
985 struct pnfs_layout_hdr
*lo
= NULL
;
986 struct nfs_inode
*nfsi
= NFS_I(ino
);
988 nfs4_stateid stateid
;
989 int status
= 0, empty
;
992 dprintk("NFS: %s for inode %lu\n", __func__
, ino
->i_ino
);
994 spin_lock(&ino
->i_lock
);
997 spin_unlock(&ino
->i_lock
);
998 dprintk("NFS: %s no layout to return\n", __func__
);
1001 nfs4_stateid_copy(&stateid
, &nfsi
->layout
->plh_stateid
);
1002 /* Reference matched in nfs4_layoutreturn_release */
1003 pnfs_get_layout_hdr(lo
);
1004 empty
= list_empty(&lo
->plh_segs
);
1005 pnfs_clear_layoutcommit(ino
, &tmp_list
);
1006 pnfs_mark_matching_lsegs_invalid(lo
, &tmp_list
, NULL
);
1008 if (NFS_SERVER(ino
)->pnfs_curr_ld
->return_range
) {
1009 struct pnfs_layout_range range
= {
1010 .iomode
= IOMODE_ANY
,
1012 .length
= NFS4_MAX_UINT64
,
1014 NFS_SERVER(ino
)->pnfs_curr_ld
->return_range(lo
, &range
);
1017 /* Don't send a LAYOUTRETURN if list was initially empty */
1019 spin_unlock(&ino
->i_lock
);
1020 dprintk("NFS: %s no layout segments to return\n", __func__
);
1021 goto out_put_layout_hdr
;
1024 set_bit(NFS_LAYOUT_INVALID_STID
, &lo
->plh_flags
);
1025 send
= pnfs_prepare_layoutreturn(lo
);
1026 spin_unlock(&ino
->i_lock
);
1027 pnfs_free_lseg_list(&tmp_list
);
1029 status
= pnfs_send_layoutreturn(lo
, &stateid
, IOMODE_ANY
, true);
1031 pnfs_put_layout_hdr(lo
);
1033 dprintk("<-- %s status: %d\n", __func__
, status
);
1036 EXPORT_SYMBOL_GPL(_pnfs_return_layout
);
1039 pnfs_commit_and_return_layout(struct inode
*inode
)
1041 struct pnfs_layout_hdr
*lo
;
1044 spin_lock(&inode
->i_lock
);
1045 lo
= NFS_I(inode
)->layout
;
1047 spin_unlock(&inode
->i_lock
);
1050 pnfs_get_layout_hdr(lo
);
1051 /* Block new layoutgets and read/write to ds */
1052 lo
->plh_block_lgets
++;
1053 spin_unlock(&inode
->i_lock
);
1054 filemap_fdatawait(inode
->i_mapping
);
1055 ret
= pnfs_layoutcommit_inode(inode
, true);
1057 ret
= _pnfs_return_layout(inode
);
1058 spin_lock(&inode
->i_lock
);
1059 lo
->plh_block_lgets
--;
1060 spin_unlock(&inode
->i_lock
);
1061 pnfs_put_layout_hdr(lo
);
1065 bool pnfs_roc(struct inode
*ino
)
1067 struct nfs_inode
*nfsi
= NFS_I(ino
);
1068 struct nfs_open_context
*ctx
;
1069 struct nfs4_state
*state
;
1070 struct pnfs_layout_hdr
*lo
;
1071 struct pnfs_layout_segment
*lseg
, *tmp
;
1072 nfs4_stateid stateid
;
1073 LIST_HEAD(tmp_list
);
1074 bool found
= false, layoutreturn
= false, roc
= false;
1076 spin_lock(&ino
->i_lock
);
1078 if (!lo
|| test_bit(NFS_LAYOUT_BULK_RECALL
, &lo
->plh_flags
))
1081 /* no roc if we hold a delegation */
1082 if (nfs4_check_delegation(ino
, FMODE_READ
))
1085 list_for_each_entry(ctx
, &nfsi
->open_files
, list
) {
1087 /* Don't return layout if there is open file state */
1088 if (state
!= NULL
&& state
->state
!= 0)
1092 nfs4_stateid_copy(&stateid
, &lo
->plh_stateid
);
1093 /* always send layoutreturn if being marked so */
1094 if (test_and_clear_bit(NFS_LAYOUT_RETURN_BEFORE_CLOSE
,
1096 layoutreturn
= pnfs_prepare_layoutreturn(lo
);
1098 list_for_each_entry_safe(lseg
, tmp
, &lo
->plh_segs
, pls_list
)
1099 /* If we are sending layoutreturn, invalidate all valid lsegs */
1100 if (layoutreturn
|| test_bit(NFS_LSEG_ROC
, &lseg
->pls_flags
)) {
1101 mark_lseg_invalid(lseg
, &tmp_list
);
1104 /* ROC in two conditions:
1105 * 1. there are ROC lsegs
1106 * 2. we don't send layoutreturn
1108 if (found
&& !layoutreturn
) {
1109 /* lo ref dropped in pnfs_roc_release() */
1110 pnfs_get_layout_hdr(lo
);
1115 spin_unlock(&ino
->i_lock
);
1116 pnfs_free_lseg_list(&tmp_list
);
1117 pnfs_layoutcommit_inode(ino
, true);
1119 pnfs_send_layoutreturn(lo
, &stateid
, IOMODE_ANY
, true);
1123 void pnfs_roc_release(struct inode
*ino
)
1125 struct pnfs_layout_hdr
*lo
;
1127 spin_lock(&ino
->i_lock
);
1128 lo
= NFS_I(ino
)->layout
;
1129 pnfs_clear_layoutreturn_waitbit(lo
);
1130 if (atomic_dec_and_test(&lo
->plh_refcount
)) {
1131 pnfs_detach_layout_hdr(lo
);
1132 spin_unlock(&ino
->i_lock
);
1133 pnfs_free_layout_hdr(lo
);
1135 spin_unlock(&ino
->i_lock
);
1138 void pnfs_roc_set_barrier(struct inode
*ino
, u32 barrier
)
1140 struct pnfs_layout_hdr
*lo
;
1142 spin_lock(&ino
->i_lock
);
1143 lo
= NFS_I(ino
)->layout
;
1144 pnfs_mark_layout_returned_if_empty(lo
);
1145 if (pnfs_seqid_is_newer(barrier
, lo
->plh_barrier
))
1146 lo
->plh_barrier
= barrier
;
1147 spin_unlock(&ino
->i_lock
);
1148 trace_nfs4_layoutreturn_on_close(ino
, 0);
1151 void pnfs_roc_get_barrier(struct inode
*ino
, u32
*barrier
)
1153 struct nfs_inode
*nfsi
= NFS_I(ino
);
1154 struct pnfs_layout_hdr
*lo
;
1157 spin_lock(&ino
->i_lock
);
1159 current_seqid
= be32_to_cpu(lo
->plh_stateid
.seqid
);
1161 /* Since close does not return a layout stateid for use as
1162 * a barrier, we choose the worst-case barrier.
1164 *barrier
= current_seqid
+ atomic_read(&lo
->plh_outstanding
);
1165 spin_unlock(&ino
->i_lock
);
1168 bool pnfs_wait_on_layoutreturn(struct inode
*ino
, struct rpc_task
*task
)
1170 struct nfs_inode
*nfsi
= NFS_I(ino
);
1171 struct pnfs_layout_hdr
*lo
;
1174 /* we might not have grabbed lo reference. so need to check under
1176 spin_lock(&ino
->i_lock
);
1178 if (lo
&& test_bit(NFS_LAYOUT_RETURN
, &lo
->plh_flags
))
1180 spin_unlock(&ino
->i_lock
);
1183 rpc_sleep_on(&NFS_SERVER(ino
)->roc_rpcwaitq
, task
, NULL
);
1189 * Compare two layout segments for sorting into layout cache.
1190 * We want to preferentially return RW over RO layouts, so ensure those
1194 pnfs_lseg_range_cmp(const struct pnfs_layout_range
*l1
,
1195 const struct pnfs_layout_range
*l2
)
1199 /* high offset > low offset */
1200 d
= l1
->offset
- l2
->offset
;
1204 /* short length > long length */
1205 d
= l2
->length
- l1
->length
;
1209 /* read > read/write */
1210 return (int)(l1
->iomode
== IOMODE_READ
) - (int)(l2
->iomode
== IOMODE_READ
);
1214 pnfs_lseg_range_is_after(const struct pnfs_layout_range
*l1
,
1215 const struct pnfs_layout_range
*l2
)
1217 return pnfs_lseg_range_cmp(l1
, l2
) > 0;
1221 pnfs_lseg_no_merge(struct pnfs_layout_segment
*lseg
,
1222 struct pnfs_layout_segment
*old
)
1228 pnfs_generic_layout_insert_lseg(struct pnfs_layout_hdr
*lo
,
1229 struct pnfs_layout_segment
*lseg
,
1230 bool (*is_after
)(const struct pnfs_layout_range
*,
1231 const struct pnfs_layout_range
*),
1232 bool (*do_merge
)(struct pnfs_layout_segment
*,
1233 struct pnfs_layout_segment
*),
1234 struct list_head
*free_me
)
1236 struct pnfs_layout_segment
*lp
, *tmp
;
1238 dprintk("%s:Begin\n", __func__
);
1240 list_for_each_entry_safe(lp
, tmp
, &lo
->plh_segs
, pls_list
) {
1241 if (test_bit(NFS_LSEG_VALID
, &lp
->pls_flags
) == 0)
1243 if (do_merge(lseg
, lp
)) {
1244 mark_lseg_invalid(lp
, free_me
);
1247 if (is_after(&lseg
->pls_range
, &lp
->pls_range
))
1249 list_add_tail(&lseg
->pls_list
, &lp
->pls_list
);
1250 dprintk("%s: inserted lseg %p "
1251 "iomode %d offset %llu length %llu before "
1252 "lp %p iomode %d offset %llu length %llu\n",
1253 __func__
, lseg
, lseg
->pls_range
.iomode
,
1254 lseg
->pls_range
.offset
, lseg
->pls_range
.length
,
1255 lp
, lp
->pls_range
.iomode
, lp
->pls_range
.offset
,
1256 lp
->pls_range
.length
);
1259 list_add_tail(&lseg
->pls_list
, &lo
->plh_segs
);
1260 dprintk("%s: inserted lseg %p "
1261 "iomode %d offset %llu length %llu at tail\n",
1262 __func__
, lseg
, lseg
->pls_range
.iomode
,
1263 lseg
->pls_range
.offset
, lseg
->pls_range
.length
);
1265 pnfs_get_layout_hdr(lo
);
1267 dprintk("%s:Return\n", __func__
);
1269 EXPORT_SYMBOL_GPL(pnfs_generic_layout_insert_lseg
);
1272 pnfs_layout_insert_lseg(struct pnfs_layout_hdr
*lo
,
1273 struct pnfs_layout_segment
*lseg
,
1274 struct list_head
*free_me
)
1276 struct inode
*inode
= lo
->plh_inode
;
1277 struct pnfs_layoutdriver_type
*ld
= NFS_SERVER(inode
)->pnfs_curr_ld
;
1279 if (ld
->add_lseg
!= NULL
)
1280 ld
->add_lseg(lo
, lseg
, free_me
);
1282 pnfs_generic_layout_insert_lseg(lo
, lseg
,
1283 pnfs_lseg_range_is_after
,
1288 static struct pnfs_layout_hdr
*
1289 alloc_init_layout_hdr(struct inode
*ino
,
1290 struct nfs_open_context
*ctx
,
1293 struct pnfs_layout_hdr
*lo
;
1295 lo
= pnfs_alloc_layout_hdr(ino
, gfp_flags
);
1298 atomic_set(&lo
->plh_refcount
, 1);
1299 INIT_LIST_HEAD(&lo
->plh_layouts
);
1300 INIT_LIST_HEAD(&lo
->plh_segs
);
1301 INIT_LIST_HEAD(&lo
->plh_bulk_destroy
);
1302 lo
->plh_inode
= ino
;
1303 lo
->plh_lc_cred
= get_rpccred(ctx
->cred
);
1307 static struct pnfs_layout_hdr
*
1308 pnfs_find_alloc_layout(struct inode
*ino
,
1309 struct nfs_open_context
*ctx
,
1312 struct nfs_inode
*nfsi
= NFS_I(ino
);
1313 struct pnfs_layout_hdr
*new = NULL
;
1315 dprintk("%s Begin ino=%p layout=%p\n", __func__
, ino
, nfsi
->layout
);
1317 if (nfsi
->layout
!= NULL
)
1319 spin_unlock(&ino
->i_lock
);
1320 new = alloc_init_layout_hdr(ino
, ctx
, gfp_flags
);
1321 spin_lock(&ino
->i_lock
);
1323 if (likely(nfsi
->layout
== NULL
)) { /* Won the race? */
1326 } else if (new != NULL
)
1327 pnfs_free_layout_hdr(new);
1329 pnfs_get_layout_hdr(nfsi
->layout
);
1330 return nfsi
->layout
;
1334 * iomode matching rules:
1345 pnfs_lseg_range_match(const struct pnfs_layout_range
*ls_range
,
1346 const struct pnfs_layout_range
*range
)
1348 struct pnfs_layout_range range1
;
1350 if ((range
->iomode
== IOMODE_RW
&&
1351 ls_range
->iomode
!= IOMODE_RW
) ||
1352 !pnfs_lseg_range_intersecting(ls_range
, range
))
1355 /* range1 covers only the first byte in the range */
1358 return pnfs_lseg_range_contained(ls_range
, &range1
);
1362 * lookup range in layout
1364 static struct pnfs_layout_segment
*
1365 pnfs_find_lseg(struct pnfs_layout_hdr
*lo
,
1366 struct pnfs_layout_range
*range
)
1368 struct pnfs_layout_segment
*lseg
, *ret
= NULL
;
1370 dprintk("%s:Begin\n", __func__
);
1372 list_for_each_entry(lseg
, &lo
->plh_segs
, pls_list
) {
1373 if (test_bit(NFS_LSEG_VALID
, &lseg
->pls_flags
) &&
1374 !test_bit(NFS_LSEG_LAYOUTRETURN
, &lseg
->pls_flags
) &&
1375 pnfs_lseg_range_match(&lseg
->pls_range
, range
)) {
1376 ret
= pnfs_get_lseg(lseg
);
1381 dprintk("%s:Return lseg %p ref %d\n",
1382 __func__
, ret
, ret
? atomic_read(&ret
->pls_refcount
) : 0);
1387 * Use mdsthreshold hints set at each OPEN to determine if I/O should go
1388 * to the MDS or over pNFS
1390 * The nfs_inode read_io and write_io fields are cumulative counters reset
1391 * when there are no layout segments. Note that in pnfs_update_layout iomode
1392 * is set to IOMODE_READ for a READ request, and set to IOMODE_RW for a
1395 * A return of true means use MDS I/O.
1398 * If a file's size is smaller than the file size threshold, data accesses
1399 * SHOULD be sent to the metadata server. If an I/O request has a length that
1400 * is below the I/O size threshold, the I/O SHOULD be sent to the metadata
1401 * server. If both file size and I/O size are provided, the client SHOULD
1402 * reach or exceed both thresholds before sending its read or write
1403 * requests to the data server.
1405 static bool pnfs_within_mdsthreshold(struct nfs_open_context
*ctx
,
1406 struct inode
*ino
, int iomode
)
1408 struct nfs4_threshold
*t
= ctx
->mdsthreshold
;
1409 struct nfs_inode
*nfsi
= NFS_I(ino
);
1410 loff_t fsize
= i_size_read(ino
);
1411 bool size
= false, size_set
= false, io
= false, io_set
= false, ret
= false;
1416 dprintk("%s bm=0x%x rd_sz=%llu wr_sz=%llu rd_io=%llu wr_io=%llu\n",
1417 __func__
, t
->bm
, t
->rd_sz
, t
->wr_sz
, t
->rd_io_sz
, t
->wr_io_sz
);
1421 if (t
->bm
& THRESHOLD_RD
) {
1422 dprintk("%s fsize %llu\n", __func__
, fsize
);
1424 if (fsize
< t
->rd_sz
)
1427 if (t
->bm
& THRESHOLD_RD_IO
) {
1428 dprintk("%s nfsi->read_io %llu\n", __func__
,
1431 if (nfsi
->read_io
< t
->rd_io_sz
)
1436 if (t
->bm
& THRESHOLD_WR
) {
1437 dprintk("%s fsize %llu\n", __func__
, fsize
);
1439 if (fsize
< t
->wr_sz
)
1442 if (t
->bm
& THRESHOLD_WR_IO
) {
1443 dprintk("%s nfsi->write_io %llu\n", __func__
,
1446 if (nfsi
->write_io
< t
->wr_io_sz
)
1451 if (size_set
&& io_set
) {
1454 } else if (size
|| io
)
1457 dprintk("<-- %s size %d io %d ret %d\n", __func__
, size
, io
, ret
);
1461 static bool pnfs_prepare_to_retry_layoutget(struct pnfs_layout_hdr
*lo
)
1464 * send layoutcommit as it can hold up layoutreturn due to lseg
1467 pnfs_layoutcommit_inode(lo
->plh_inode
, false);
1468 return !wait_on_bit_action(&lo
->plh_flags
, NFS_LAYOUT_RETURN
,
1469 nfs_wait_bit_killable
,
1470 TASK_UNINTERRUPTIBLE
);
1473 static void pnfs_clear_first_layoutget(struct pnfs_layout_hdr
*lo
)
1475 unsigned long *bitlock
= &lo
->plh_flags
;
1477 clear_bit_unlock(NFS_LAYOUT_FIRST_LAYOUTGET
, bitlock
);
1478 smp_mb__after_atomic();
1479 wake_up_bit(bitlock
, NFS_LAYOUT_FIRST_LAYOUTGET
);
1483 * Layout segment is retreived from the server if not cached.
1484 * The appropriate layout segment is referenced and returned to the caller.
1486 struct pnfs_layout_segment
*
1487 pnfs_update_layout(struct inode
*ino
,
1488 struct nfs_open_context
*ctx
,
1491 enum pnfs_iomode iomode
,
1494 struct pnfs_layout_range arg
= {
1500 struct nfs_server
*server
= NFS_SERVER(ino
);
1501 struct nfs_client
*clp
= server
->nfs_client
;
1502 struct pnfs_layout_hdr
*lo
;
1503 struct pnfs_layout_segment
*lseg
= NULL
;
1506 if (!pnfs_enabled_sb(NFS_SERVER(ino
))) {
1507 trace_pnfs_update_layout(ino
, pos
, count
, iomode
, NULL
,
1508 PNFS_UPDATE_LAYOUT_NO_PNFS
);
1512 if (iomode
== IOMODE_READ
&& i_size_read(ino
) == 0) {
1513 trace_pnfs_update_layout(ino
, pos
, count
, iomode
, NULL
,
1514 PNFS_UPDATE_LAYOUT_RD_ZEROLEN
);
1518 if (pnfs_within_mdsthreshold(ctx
, ino
, iomode
)) {
1519 trace_pnfs_update_layout(ino
, pos
, count
, iomode
, NULL
,
1520 PNFS_UPDATE_LAYOUT_MDSTHRESH
);
1526 spin_lock(&ino
->i_lock
);
1527 lo
= pnfs_find_alloc_layout(ino
, ctx
, gfp_flags
);
1529 spin_unlock(&ino
->i_lock
);
1530 trace_pnfs_update_layout(ino
, pos
, count
, iomode
, NULL
,
1531 PNFS_UPDATE_LAYOUT_NOMEM
);
1535 /* Do we even need to bother with this? */
1536 if (test_bit(NFS_LAYOUT_BULK_RECALL
, &lo
->plh_flags
)) {
1537 trace_pnfs_update_layout(ino
, pos
, count
, iomode
, lo
,
1538 PNFS_UPDATE_LAYOUT_BULK_RECALL
);
1539 dprintk("%s matches recall, use MDS\n", __func__
);
1543 /* if LAYOUTGET already failed once we don't try again */
1544 if (pnfs_layout_io_test_failed(lo
, iomode
)) {
1545 trace_pnfs_update_layout(ino
, pos
, count
, iomode
, lo
,
1546 PNFS_UPDATE_LAYOUT_IO_TEST_FAIL
);
1550 first
= list_empty(&lo
->plh_segs
);
1552 /* The first layoutget for the file. Need to serialize per
1553 * RFC 5661 Errata 3208.
1555 if (test_and_set_bit(NFS_LAYOUT_FIRST_LAYOUTGET
,
1557 spin_unlock(&ino
->i_lock
);
1558 wait_on_bit(&lo
->plh_flags
, NFS_LAYOUT_FIRST_LAYOUTGET
,
1559 TASK_UNINTERRUPTIBLE
);
1560 pnfs_put_layout_hdr(lo
);
1564 /* Check to see if the layout for the given range
1567 lseg
= pnfs_find_lseg(lo
, &arg
);
1569 trace_pnfs_update_layout(ino
, pos
, count
, iomode
, lo
,
1570 PNFS_UPDATE_LAYOUT_FOUND_CACHED
);
1576 * Because we free lsegs before sending LAYOUTRETURN, we need to wait
1577 * for LAYOUTRETURN even if first is true.
1579 if (test_bit(NFS_LAYOUT_RETURN
, &lo
->plh_flags
)) {
1580 spin_unlock(&ino
->i_lock
);
1581 dprintk("%s wait for layoutreturn\n", __func__
);
1582 if (pnfs_prepare_to_retry_layoutget(lo
)) {
1584 pnfs_clear_first_layoutget(lo
);
1585 pnfs_put_layout_hdr(lo
);
1586 dprintk("%s retrying\n", __func__
);
1589 trace_pnfs_update_layout(ino
, pos
, count
, iomode
, lo
,
1590 PNFS_UPDATE_LAYOUT_RETURN
);
1591 goto out_put_layout_hdr
;
1594 if (pnfs_layoutgets_blocked(lo
)) {
1595 trace_pnfs_update_layout(ino
, pos
, count
, iomode
, lo
,
1596 PNFS_UPDATE_LAYOUT_BLOCKED
);
1599 atomic_inc(&lo
->plh_outstanding
);
1600 spin_unlock(&ino
->i_lock
);
1602 if (list_empty(&lo
->plh_layouts
)) {
1603 /* The lo must be on the clp list if there is any
1604 * chance of a CB_LAYOUTRECALL(FILE) coming in.
1606 spin_lock(&clp
->cl_lock
);
1607 if (list_empty(&lo
->plh_layouts
))
1608 list_add_tail(&lo
->plh_layouts
, &server
->layouts
);
1609 spin_unlock(&clp
->cl_lock
);
1612 pg_offset
= arg
.offset
& ~PAGE_CACHE_MASK
;
1614 arg
.offset
-= pg_offset
;
1615 arg
.length
+= pg_offset
;
1617 if (arg
.length
!= NFS4_MAX_UINT64
)
1618 arg
.length
= PAGE_CACHE_ALIGN(arg
.length
);
1620 lseg
= send_layoutget(lo
, ctx
, &arg
, gfp_flags
);
1621 atomic_dec(&lo
->plh_outstanding
);
1622 trace_pnfs_update_layout(ino
, pos
, count
, iomode
, lo
,
1623 PNFS_UPDATE_LAYOUT_SEND_LAYOUTGET
);
1626 pnfs_clear_first_layoutget(lo
);
1627 pnfs_put_layout_hdr(lo
);
1629 dprintk("%s: inode %s/%llu pNFS layout segment %s for "
1630 "(%s, offset: %llu, length: %llu)\n",
1631 __func__
, ino
->i_sb
->s_id
,
1632 (unsigned long long)NFS_FILEID(ino
),
1633 IS_ERR_OR_NULL(lseg
) ? "not found" : "found",
1634 iomode
==IOMODE_RW
? "read/write" : "read-only",
1635 (unsigned long long)pos
,
1636 (unsigned long long)count
);
1639 spin_unlock(&ino
->i_lock
);
1640 goto out_put_layout_hdr
;
1642 EXPORT_SYMBOL_GPL(pnfs_update_layout
);
1645 pnfs_sanity_check_layout_range(struct pnfs_layout_range
*range
)
1647 switch (range
->iomode
) {
1654 if (range
->offset
== NFS4_MAX_UINT64
)
1656 if (range
->length
== 0)
1658 if (range
->length
!= NFS4_MAX_UINT64
&&
1659 range
->length
> NFS4_MAX_UINT64
- range
->offset
)
1664 struct pnfs_layout_segment
*
1665 pnfs_layout_process(struct nfs4_layoutget
*lgp
)
1667 struct pnfs_layout_hdr
*lo
= NFS_I(lgp
->args
.inode
)->layout
;
1668 struct nfs4_layoutget_res
*res
= &lgp
->res
;
1669 struct pnfs_layout_segment
*lseg
;
1670 struct inode
*ino
= lo
->plh_inode
;
1672 int status
= -EINVAL
;
1674 if (!pnfs_sanity_check_layout_range(&res
->range
))
1677 /* Inject layout blob into I/O device driver */
1678 lseg
= NFS_SERVER(ino
)->pnfs_curr_ld
->alloc_lseg(lo
, res
, lgp
->gfp_flags
);
1679 if (!lseg
|| IS_ERR(lseg
)) {
1683 status
= PTR_ERR(lseg
);
1684 dprintk("%s: Could not allocate layout: error %d\n",
1689 init_lseg(lo
, lseg
);
1690 lseg
->pls_range
= res
->range
;
1692 spin_lock(&ino
->i_lock
);
1693 if (pnfs_layoutgets_blocked(lo
)) {
1694 dprintk("%s forget reply due to state\n", __func__
);
1695 goto out_forget_reply
;
1698 if (nfs4_stateid_match_other(&lo
->plh_stateid
, &res
->stateid
)) {
1699 /* existing state ID, make sure the sequence number matches. */
1700 if (pnfs_layout_stateid_blocked(lo
, &res
->stateid
)) {
1701 dprintk("%s forget reply due to sequence\n", __func__
);
1703 goto out_forget_reply
;
1705 pnfs_set_layout_stateid(lo
, &res
->stateid
, false);
1708 * We got an entirely new state ID. Mark all segments for the
1709 * inode invalid, and don't bother validating the stateid
1712 pnfs_mark_matching_lsegs_invalid(lo
, &free_me
, NULL
);
1714 nfs4_stateid_copy(&lo
->plh_stateid
, &res
->stateid
);
1715 lo
->plh_barrier
= be32_to_cpu(res
->stateid
.seqid
);
1718 clear_bit(NFS_LAYOUT_INVALID_STID
, &lo
->plh_flags
);
1720 pnfs_get_lseg(lseg
);
1721 pnfs_layout_insert_lseg(lo
, lseg
, &free_me
);
1723 if (res
->return_on_close
)
1724 set_bit(NFS_LSEG_ROC
, &lseg
->pls_flags
);
1726 spin_unlock(&ino
->i_lock
);
1727 pnfs_free_lseg_list(&free_me
);
1730 return ERR_PTR(status
);
1733 spin_unlock(&ino
->i_lock
);
1734 lseg
->pls_layout
= lo
;
1735 NFS_SERVER(ino
)->pnfs_curr_ld
->free_lseg(lseg
);
1740 pnfs_set_plh_return_iomode(struct pnfs_layout_hdr
*lo
, enum pnfs_iomode iomode
)
1742 if (lo
->plh_return_iomode
== iomode
)
1744 if (lo
->plh_return_iomode
!= 0)
1745 iomode
= IOMODE_ANY
;
1746 lo
->plh_return_iomode
= iomode
;
1750 pnfs_mark_matching_lsegs_return(struct pnfs_layout_hdr
*lo
,
1751 struct list_head
*tmp_list
,
1752 const struct pnfs_layout_range
*return_range
)
1754 struct pnfs_layout_segment
*lseg
, *next
;
1757 dprintk("%s:Begin lo %p\n", __func__
, lo
);
1759 if (list_empty(&lo
->plh_segs
))
1762 assert_spin_locked(&lo
->plh_inode
->i_lock
);
1764 list_for_each_entry_safe(lseg
, next
, &lo
->plh_segs
, pls_list
)
1765 if (should_free_lseg(&lseg
->pls_range
, return_range
)) {
1766 dprintk("%s: marking lseg %p iomode %d "
1767 "offset %llu length %llu\n", __func__
,
1768 lseg
, lseg
->pls_range
.iomode
,
1769 lseg
->pls_range
.offset
,
1770 lseg
->pls_range
.length
);
1771 set_bit(NFS_LSEG_LAYOUTRETURN
, &lseg
->pls_flags
);
1772 pnfs_set_plh_return_iomode(lo
, return_range
->iomode
);
1773 if (!mark_lseg_invalid(lseg
, tmp_list
))
1775 set_bit(NFS_LAYOUT_RETURN_BEFORE_CLOSE
,
1781 void pnfs_error_mark_layout_for_return(struct inode
*inode
,
1782 struct pnfs_layout_segment
*lseg
)
1784 struct pnfs_layout_hdr
*lo
= NFS_I(inode
)->layout
;
1785 struct pnfs_layout_range range
= {
1786 .iomode
= lseg
->pls_range
.iomode
,
1788 .length
= NFS4_MAX_UINT64
,
1791 bool return_now
= false;
1793 spin_lock(&inode
->i_lock
);
1794 pnfs_set_plh_return_iomode(lo
, range
.iomode
);
1796 * mark all matching lsegs so that we are sure to have no live
1797 * segments at hand when sending layoutreturn. See pnfs_put_lseg()
1800 if (!pnfs_mark_matching_lsegs_return(lo
, &free_me
, &range
)) {
1801 nfs4_stateid stateid
;
1802 enum pnfs_iomode iomode
= lo
->plh_return_iomode
;
1804 nfs4_stateid_copy(&stateid
, &lo
->plh_stateid
);
1805 return_now
= pnfs_prepare_layoutreturn(lo
);
1806 spin_unlock(&inode
->i_lock
);
1808 pnfs_send_layoutreturn(lo
, &stateid
, iomode
, false);
1810 spin_unlock(&inode
->i_lock
);
1811 nfs_commit_inode(inode
, 0);
1813 pnfs_free_lseg_list(&free_me
);
1815 EXPORT_SYMBOL_GPL(pnfs_error_mark_layout_for_return
);
1818 pnfs_generic_pg_init_read(struct nfs_pageio_descriptor
*pgio
, struct nfs_page
*req
)
1820 u64 rd_size
= req
->wb_bytes
;
1822 if (pgio
->pg_lseg
== NULL
) {
1823 if (pgio
->pg_dreq
== NULL
)
1824 rd_size
= i_size_read(pgio
->pg_inode
) - req_offset(req
);
1826 rd_size
= nfs_dreq_bytes_left(pgio
->pg_dreq
);
1828 pgio
->pg_lseg
= pnfs_update_layout(pgio
->pg_inode
,
1834 if (IS_ERR(pgio
->pg_lseg
)) {
1835 pgio
->pg_error
= PTR_ERR(pgio
->pg_lseg
);
1836 pgio
->pg_lseg
= NULL
;
1840 /* If no lseg, fall back to read through mds */
1841 if (pgio
->pg_lseg
== NULL
)
1842 nfs_pageio_reset_read_mds(pgio
);
1845 EXPORT_SYMBOL_GPL(pnfs_generic_pg_init_read
);
1848 pnfs_generic_pg_init_write(struct nfs_pageio_descriptor
*pgio
,
1849 struct nfs_page
*req
, u64 wb_size
)
1851 if (pgio
->pg_lseg
== NULL
) {
1852 pgio
->pg_lseg
= pnfs_update_layout(pgio
->pg_inode
,
1858 if (IS_ERR(pgio
->pg_lseg
)) {
1859 pgio
->pg_error
= PTR_ERR(pgio
->pg_lseg
);
1860 pgio
->pg_lseg
= NULL
;
1864 /* If no lseg, fall back to write through mds */
1865 if (pgio
->pg_lseg
== NULL
)
1866 nfs_pageio_reset_write_mds(pgio
);
1868 EXPORT_SYMBOL_GPL(pnfs_generic_pg_init_write
);
1871 pnfs_generic_pg_cleanup(struct nfs_pageio_descriptor
*desc
)
1873 if (desc
->pg_lseg
) {
1874 pnfs_put_lseg(desc
->pg_lseg
);
1875 desc
->pg_lseg
= NULL
;
1878 EXPORT_SYMBOL_GPL(pnfs_generic_pg_cleanup
);
1881 * Return 0 if @req cannot be coalesced into @pgio, otherwise return the number
1882 * of bytes (maximum @req->wb_bytes) that can be coalesced.
1885 pnfs_generic_pg_test(struct nfs_pageio_descriptor
*pgio
,
1886 struct nfs_page
*prev
, struct nfs_page
*req
)
1889 u64 seg_end
, req_start
, seg_left
;
1891 size
= nfs_generic_pg_test(pgio
, prev
, req
);
1896 * 'size' contains the number of bytes left in the current page (up
1897 * to the original size asked for in @req->wb_bytes).
1899 * Calculate how many bytes are left in the layout segment
1900 * and if there are less bytes than 'size', return that instead.
1902 * Please also note that 'end_offset' is actually the offset of the
1903 * first byte that lies outside the pnfs_layout_range. FIXME?
1906 if (pgio
->pg_lseg
) {
1907 seg_end
= end_offset(pgio
->pg_lseg
->pls_range
.offset
,
1908 pgio
->pg_lseg
->pls_range
.length
);
1909 req_start
= req_offset(req
);
1910 WARN_ON_ONCE(req_start
>= seg_end
);
1911 /* start of request is past the last byte of this segment */
1912 if (req_start
>= seg_end
) {
1913 /* reference the new lseg */
1914 if (pgio
->pg_ops
->pg_cleanup
)
1915 pgio
->pg_ops
->pg_cleanup(pgio
);
1916 if (pgio
->pg_ops
->pg_init
)
1917 pgio
->pg_ops
->pg_init(pgio
, req
);
1921 /* adjust 'size' iff there are fewer bytes left in the
1922 * segment than what nfs_generic_pg_test returned */
1923 seg_left
= seg_end
- req_start
;
1924 if (seg_left
< size
)
1925 size
= (unsigned int)seg_left
;
1930 EXPORT_SYMBOL_GPL(pnfs_generic_pg_test
);
1932 int pnfs_write_done_resend_to_mds(struct nfs_pgio_header
*hdr
)
1934 struct nfs_pageio_descriptor pgio
;
1936 /* Resend all requests through the MDS */
1937 nfs_pageio_init_write(&pgio
, hdr
->inode
, FLUSH_STABLE
, true,
1938 hdr
->completion_ops
);
1939 set_bit(NFS_CONTEXT_RESEND_WRITES
, &hdr
->args
.context
->flags
);
1940 return nfs_pageio_resend(&pgio
, hdr
);
1942 EXPORT_SYMBOL_GPL(pnfs_write_done_resend_to_mds
);
1944 static void pnfs_ld_handle_write_error(struct nfs_pgio_header
*hdr
)
1947 dprintk("pnfs write error = %d\n", hdr
->pnfs_error
);
1948 if (NFS_SERVER(hdr
->inode
)->pnfs_curr_ld
->flags
&
1949 PNFS_LAYOUTRET_ON_ERROR
) {
1950 pnfs_return_layout(hdr
->inode
);
1952 if (!test_and_set_bit(NFS_IOHDR_REDO
, &hdr
->flags
))
1953 hdr
->task
.tk_status
= pnfs_write_done_resend_to_mds(hdr
);
1957 * Called by non rpc-based layout drivers
1959 void pnfs_ld_write_done(struct nfs_pgio_header
*hdr
)
1961 if (likely(!hdr
->pnfs_error
)) {
1962 pnfs_set_layoutcommit(hdr
->inode
, hdr
->lseg
,
1963 hdr
->mds_offset
+ hdr
->res
.count
);
1964 hdr
->mds_ops
->rpc_call_done(&hdr
->task
, hdr
);
1966 trace_nfs4_pnfs_write(hdr
, hdr
->pnfs_error
);
1967 if (unlikely(hdr
->pnfs_error
))
1968 pnfs_ld_handle_write_error(hdr
);
1969 hdr
->mds_ops
->rpc_release(hdr
);
1971 EXPORT_SYMBOL_GPL(pnfs_ld_write_done
);
1974 pnfs_write_through_mds(struct nfs_pageio_descriptor
*desc
,
1975 struct nfs_pgio_header
*hdr
)
1977 struct nfs_pgio_mirror
*mirror
= nfs_pgio_current_mirror(desc
);
1979 if (!test_and_set_bit(NFS_IOHDR_REDO
, &hdr
->flags
)) {
1980 list_splice_tail_init(&hdr
->pages
, &mirror
->pg_list
);
1981 nfs_pageio_reset_write_mds(desc
);
1982 mirror
->pg_recoalesce
= 1;
1984 nfs_pgio_data_destroy(hdr
);
1988 static enum pnfs_try_status
1989 pnfs_try_to_write_data(struct nfs_pgio_header
*hdr
,
1990 const struct rpc_call_ops
*call_ops
,
1991 struct pnfs_layout_segment
*lseg
,
1994 struct inode
*inode
= hdr
->inode
;
1995 enum pnfs_try_status trypnfs
;
1996 struct nfs_server
*nfss
= NFS_SERVER(inode
);
1998 hdr
->mds_ops
= call_ops
;
2000 dprintk("%s: Writing ino:%lu %u@%llu (how %d)\n", __func__
,
2001 inode
->i_ino
, hdr
->args
.count
, hdr
->args
.offset
, how
);
2002 trypnfs
= nfss
->pnfs_curr_ld
->write_pagelist(hdr
, how
);
2003 if (trypnfs
!= PNFS_NOT_ATTEMPTED
)
2004 nfs_inc_stats(inode
, NFSIOS_PNFS_WRITE
);
2005 dprintk("%s End (trypnfs:%d)\n", __func__
, trypnfs
);
2010 pnfs_do_write(struct nfs_pageio_descriptor
*desc
,
2011 struct nfs_pgio_header
*hdr
, int how
)
2013 const struct rpc_call_ops
*call_ops
= desc
->pg_rpc_callops
;
2014 struct pnfs_layout_segment
*lseg
= desc
->pg_lseg
;
2015 enum pnfs_try_status trypnfs
;
2017 trypnfs
= pnfs_try_to_write_data(hdr
, call_ops
, lseg
, how
);
2018 if (trypnfs
== PNFS_NOT_ATTEMPTED
)
2019 pnfs_write_through_mds(desc
, hdr
);
2022 static void pnfs_writehdr_free(struct nfs_pgio_header
*hdr
)
2024 pnfs_put_lseg(hdr
->lseg
);
2025 nfs_pgio_header_free(hdr
);
2029 pnfs_generic_pg_writepages(struct nfs_pageio_descriptor
*desc
)
2031 struct nfs_pgio_header
*hdr
;
2034 hdr
= nfs_pgio_header_alloc(desc
->pg_rw_ops
);
2036 desc
->pg_error
= -ENOMEM
;
2037 return desc
->pg_error
;
2039 nfs_pgheader_init(desc
, hdr
, pnfs_writehdr_free
);
2041 hdr
->lseg
= pnfs_get_lseg(desc
->pg_lseg
);
2042 ret
= nfs_generic_pgio(desc
, hdr
);
2044 pnfs_do_write(desc
, hdr
, desc
->pg_ioflags
);
2048 EXPORT_SYMBOL_GPL(pnfs_generic_pg_writepages
);
2050 int pnfs_read_done_resend_to_mds(struct nfs_pgio_header
*hdr
)
2052 struct nfs_pageio_descriptor pgio
;
2054 /* Resend all requests through the MDS */
2055 nfs_pageio_init_read(&pgio
, hdr
->inode
, true, hdr
->completion_ops
);
2056 return nfs_pageio_resend(&pgio
, hdr
);
2058 EXPORT_SYMBOL_GPL(pnfs_read_done_resend_to_mds
);
2060 static void pnfs_ld_handle_read_error(struct nfs_pgio_header
*hdr
)
2062 dprintk("pnfs read error = %d\n", hdr
->pnfs_error
);
2063 if (NFS_SERVER(hdr
->inode
)->pnfs_curr_ld
->flags
&
2064 PNFS_LAYOUTRET_ON_ERROR
) {
2065 pnfs_return_layout(hdr
->inode
);
2067 if (!test_and_set_bit(NFS_IOHDR_REDO
, &hdr
->flags
))
2068 hdr
->task
.tk_status
= pnfs_read_done_resend_to_mds(hdr
);
2072 * Called by non rpc-based layout drivers
2074 void pnfs_ld_read_done(struct nfs_pgio_header
*hdr
)
2076 if (likely(!hdr
->pnfs_error
)) {
2077 __nfs4_read_done_cb(hdr
);
2078 hdr
->mds_ops
->rpc_call_done(&hdr
->task
, hdr
);
2080 trace_nfs4_pnfs_read(hdr
, hdr
->pnfs_error
);
2081 if (unlikely(hdr
->pnfs_error
))
2082 pnfs_ld_handle_read_error(hdr
);
2083 hdr
->mds_ops
->rpc_release(hdr
);
2085 EXPORT_SYMBOL_GPL(pnfs_ld_read_done
);
2088 pnfs_read_through_mds(struct nfs_pageio_descriptor
*desc
,
2089 struct nfs_pgio_header
*hdr
)
2091 struct nfs_pgio_mirror
*mirror
= nfs_pgio_current_mirror(desc
);
2093 if (!test_and_set_bit(NFS_IOHDR_REDO
, &hdr
->flags
)) {
2094 list_splice_tail_init(&hdr
->pages
, &mirror
->pg_list
);
2095 nfs_pageio_reset_read_mds(desc
);
2096 mirror
->pg_recoalesce
= 1;
2098 nfs_pgio_data_destroy(hdr
);
2103 * Call the appropriate parallel I/O subsystem read function.
2105 static enum pnfs_try_status
2106 pnfs_try_to_read_data(struct nfs_pgio_header
*hdr
,
2107 const struct rpc_call_ops
*call_ops
,
2108 struct pnfs_layout_segment
*lseg
)
2110 struct inode
*inode
= hdr
->inode
;
2111 struct nfs_server
*nfss
= NFS_SERVER(inode
);
2112 enum pnfs_try_status trypnfs
;
2114 hdr
->mds_ops
= call_ops
;
2116 dprintk("%s: Reading ino:%lu %u@%llu\n",
2117 __func__
, inode
->i_ino
, hdr
->args
.count
, hdr
->args
.offset
);
2119 trypnfs
= nfss
->pnfs_curr_ld
->read_pagelist(hdr
);
2120 if (trypnfs
!= PNFS_NOT_ATTEMPTED
)
2121 nfs_inc_stats(inode
, NFSIOS_PNFS_READ
);
2122 dprintk("%s End (trypnfs:%d)\n", __func__
, trypnfs
);
2126 /* Resend all requests through pnfs. */
2127 int pnfs_read_resend_pnfs(struct nfs_pgio_header
*hdr
)
2129 struct nfs_pageio_descriptor pgio
;
2131 nfs_pageio_init_read(&pgio
, hdr
->inode
, false, hdr
->completion_ops
);
2132 return nfs_pageio_resend(&pgio
, hdr
);
2134 EXPORT_SYMBOL_GPL(pnfs_read_resend_pnfs
);
2137 pnfs_do_read(struct nfs_pageio_descriptor
*desc
, struct nfs_pgio_header
*hdr
)
2139 const struct rpc_call_ops
*call_ops
= desc
->pg_rpc_callops
;
2140 struct pnfs_layout_segment
*lseg
= desc
->pg_lseg
;
2141 enum pnfs_try_status trypnfs
;
2144 trypnfs
= pnfs_try_to_read_data(hdr
, call_ops
, lseg
);
2145 if (trypnfs
== PNFS_TRY_AGAIN
)
2146 err
= pnfs_read_resend_pnfs(hdr
);
2147 if (trypnfs
== PNFS_NOT_ATTEMPTED
|| err
)
2148 pnfs_read_through_mds(desc
, hdr
);
2151 static void pnfs_readhdr_free(struct nfs_pgio_header
*hdr
)
2153 pnfs_put_lseg(hdr
->lseg
);
2154 nfs_pgio_header_free(hdr
);
2158 pnfs_generic_pg_readpages(struct nfs_pageio_descriptor
*desc
)
2160 struct nfs_pgio_header
*hdr
;
2163 hdr
= nfs_pgio_header_alloc(desc
->pg_rw_ops
);
2165 desc
->pg_error
= -ENOMEM
;
2166 return desc
->pg_error
;
2168 nfs_pgheader_init(desc
, hdr
, pnfs_readhdr_free
);
2169 hdr
->lseg
= pnfs_get_lseg(desc
->pg_lseg
);
2170 ret
= nfs_generic_pgio(desc
, hdr
);
2172 pnfs_do_read(desc
, hdr
);
2175 EXPORT_SYMBOL_GPL(pnfs_generic_pg_readpages
);
2177 static void pnfs_clear_layoutcommitting(struct inode
*inode
)
2179 unsigned long *bitlock
= &NFS_I(inode
)->flags
;
2181 clear_bit_unlock(NFS_INO_LAYOUTCOMMITTING
, bitlock
);
2182 smp_mb__after_atomic();
2183 wake_up_bit(bitlock
, NFS_INO_LAYOUTCOMMITTING
);
2187 * There can be multiple RW segments.
2189 static void pnfs_list_write_lseg(struct inode
*inode
, struct list_head
*listp
)
2191 struct pnfs_layout_segment
*lseg
;
2193 list_for_each_entry(lseg
, &NFS_I(inode
)->layout
->plh_segs
, pls_list
) {
2194 if (lseg
->pls_range
.iomode
== IOMODE_RW
&&
2195 test_and_clear_bit(NFS_LSEG_LAYOUTCOMMIT
, &lseg
->pls_flags
))
2196 list_add(&lseg
->pls_lc_list
, listp
);
2200 static void pnfs_list_write_lseg_done(struct inode
*inode
, struct list_head
*listp
)
2202 struct pnfs_layout_segment
*lseg
, *tmp
;
2204 /* Matched by references in pnfs_set_layoutcommit */
2205 list_for_each_entry_safe(lseg
, tmp
, listp
, pls_lc_list
) {
2206 list_del_init(&lseg
->pls_lc_list
);
2207 pnfs_put_lseg(lseg
);
2210 pnfs_clear_layoutcommitting(inode
);
2213 void pnfs_set_lo_fail(struct pnfs_layout_segment
*lseg
)
2215 pnfs_layout_io_set_failed(lseg
->pls_layout
, lseg
->pls_range
.iomode
);
2217 EXPORT_SYMBOL_GPL(pnfs_set_lo_fail
);
2220 pnfs_set_layoutcommit(struct inode
*inode
, struct pnfs_layout_segment
*lseg
,
2223 struct nfs_inode
*nfsi
= NFS_I(inode
);
2224 bool mark_as_dirty
= false;
2226 spin_lock(&inode
->i_lock
);
2227 if (!test_and_set_bit(NFS_INO_LAYOUTCOMMIT
, &nfsi
->flags
)) {
2228 nfsi
->layout
->plh_lwb
= end_pos
;
2229 mark_as_dirty
= true;
2230 dprintk("%s: Set layoutcommit for inode %lu ",
2231 __func__
, inode
->i_ino
);
2232 } else if (end_pos
> nfsi
->layout
->plh_lwb
)
2233 nfsi
->layout
->plh_lwb
= end_pos
;
2234 if (!test_and_set_bit(NFS_LSEG_LAYOUTCOMMIT
, &lseg
->pls_flags
)) {
2235 /* references matched in nfs4_layoutcommit_release */
2236 pnfs_get_lseg(lseg
);
2238 spin_unlock(&inode
->i_lock
);
2239 dprintk("%s: lseg %p end_pos %llu\n",
2240 __func__
, lseg
, nfsi
->layout
->plh_lwb
);
2242 /* if pnfs_layoutcommit_inode() runs between inode locks, the next one
2243 * will be a noop because NFS_INO_LAYOUTCOMMIT will not be set */
2245 mark_inode_dirty_sync(inode
);
2247 EXPORT_SYMBOL_GPL(pnfs_set_layoutcommit
);
2249 void pnfs_cleanup_layoutcommit(struct nfs4_layoutcommit_data
*data
)
2251 struct nfs_server
*nfss
= NFS_SERVER(data
->args
.inode
);
2253 if (nfss
->pnfs_curr_ld
->cleanup_layoutcommit
)
2254 nfss
->pnfs_curr_ld
->cleanup_layoutcommit(data
);
2255 pnfs_list_write_lseg_done(data
->args
.inode
, &data
->lseg_list
);
2259 * For the LAYOUT4_NFSV4_1_FILES layout type, NFS_DATA_SYNC WRITEs and
2260 * NFS_UNSTABLE WRITEs with a COMMIT to data servers must store enough
2261 * data to disk to allow the server to recover the data if it crashes.
2262 * LAYOUTCOMMIT is only needed when the NFL4_UFLG_COMMIT_THRU_MDS flag
2263 * is off, and a COMMIT is sent to a data server, or
2264 * if WRITEs to a data server return NFS_DATA_SYNC.
2267 pnfs_layoutcommit_inode(struct inode
*inode
, bool sync
)
2269 struct pnfs_layoutdriver_type
*ld
= NFS_SERVER(inode
)->pnfs_curr_ld
;
2270 struct nfs4_layoutcommit_data
*data
;
2271 struct nfs_inode
*nfsi
= NFS_I(inode
);
2275 if (!pnfs_layoutcommit_outstanding(inode
))
2278 dprintk("--> %s inode %lu\n", __func__
, inode
->i_ino
);
2281 if (test_and_set_bit(NFS_INO_LAYOUTCOMMITTING
, &nfsi
->flags
)) {
2284 status
= wait_on_bit_lock_action(&nfsi
->flags
,
2285 NFS_INO_LAYOUTCOMMITTING
,
2286 nfs_wait_bit_killable
,
2293 /* Note kzalloc ensures data->res.seq_res.sr_slot == NULL */
2294 data
= kzalloc(sizeof(*data
), GFP_NOFS
);
2296 goto clear_layoutcommitting
;
2299 spin_lock(&inode
->i_lock
);
2300 if (!test_and_clear_bit(NFS_INO_LAYOUTCOMMIT
, &nfsi
->flags
))
2303 INIT_LIST_HEAD(&data
->lseg_list
);
2304 pnfs_list_write_lseg(inode
, &data
->lseg_list
);
2306 end_pos
= nfsi
->layout
->plh_lwb
;
2308 nfs4_stateid_copy(&data
->args
.stateid
, &nfsi
->layout
->plh_stateid
);
2309 spin_unlock(&inode
->i_lock
);
2311 data
->args
.inode
= inode
;
2312 data
->cred
= get_rpccred(nfsi
->layout
->plh_lc_cred
);
2313 nfs_fattr_init(&data
->fattr
);
2314 data
->args
.bitmask
= NFS_SERVER(inode
)->cache_consistency_bitmask
;
2315 data
->res
.fattr
= &data
->fattr
;
2316 data
->args
.lastbytewritten
= end_pos
- 1;
2317 data
->res
.server
= NFS_SERVER(inode
);
2319 if (ld
->prepare_layoutcommit
) {
2320 status
= ld
->prepare_layoutcommit(&data
->args
);
2322 put_rpccred(data
->cred
);
2323 spin_lock(&inode
->i_lock
);
2324 set_bit(NFS_INO_LAYOUTCOMMIT
, &nfsi
->flags
);
2325 if (end_pos
> nfsi
->layout
->plh_lwb
)
2326 nfsi
->layout
->plh_lwb
= end_pos
;
2332 status
= nfs4_proc_layoutcommit(data
, sync
);
2335 mark_inode_dirty_sync(inode
);
2336 dprintk("<-- %s status %d\n", __func__
, status
);
2339 spin_unlock(&inode
->i_lock
);
2341 clear_layoutcommitting
:
2342 pnfs_clear_layoutcommitting(inode
);
2345 EXPORT_SYMBOL_GPL(pnfs_layoutcommit_inode
);
2348 pnfs_generic_sync(struct inode
*inode
, bool datasync
)
2350 return pnfs_layoutcommit_inode(inode
, true);
2352 EXPORT_SYMBOL_GPL(pnfs_generic_sync
);
2354 struct nfs4_threshold
*pnfs_mdsthreshold_alloc(void)
2356 struct nfs4_threshold
*thp
;
2358 thp
= kzalloc(sizeof(*thp
), GFP_NOFS
);
2360 dprintk("%s mdsthreshold allocation failed\n", __func__
);
2366 #if IS_ENABLED(CONFIG_NFS_V4_2)
2368 pnfs_report_layoutstat(struct inode
*inode
, gfp_t gfp_flags
)
2370 struct pnfs_layoutdriver_type
*ld
= NFS_SERVER(inode
)->pnfs_curr_ld
;
2371 struct nfs_server
*server
= NFS_SERVER(inode
);
2372 struct nfs_inode
*nfsi
= NFS_I(inode
);
2373 struct nfs42_layoutstat_data
*data
;
2374 struct pnfs_layout_hdr
*hdr
;
2377 if (!pnfs_enabled_sb(server
) || !ld
->prepare_layoutstats
)
2380 if (!nfs_server_capable(inode
, NFS_CAP_LAYOUTSTATS
))
2383 if (test_and_set_bit(NFS_INO_LAYOUTSTATS
, &nfsi
->flags
))
2386 spin_lock(&inode
->i_lock
);
2387 if (!NFS_I(inode
)->layout
) {
2388 spin_unlock(&inode
->i_lock
);
2391 hdr
= NFS_I(inode
)->layout
;
2392 pnfs_get_layout_hdr(hdr
);
2393 spin_unlock(&inode
->i_lock
);
2395 data
= kzalloc(sizeof(*data
), gfp_flags
);
2401 data
->args
.fh
= NFS_FH(inode
);
2402 data
->args
.inode
= inode
;
2403 nfs4_stateid_copy(&data
->args
.stateid
, &hdr
->plh_stateid
);
2404 status
= ld
->prepare_layoutstats(&data
->args
);
2408 status
= nfs42_proc_layoutstats_generic(NFS_SERVER(inode
), data
);
2411 dprintk("%s returns %d\n", __func__
, status
);
2417 pnfs_put_layout_hdr(hdr
);
2418 smp_mb__before_atomic();
2419 clear_bit(NFS_INO_LAYOUTSTATS
, &nfsi
->flags
);
2420 smp_mb__after_atomic();
2423 EXPORT_SYMBOL_GPL(pnfs_report_layoutstat
);
2426 unsigned int layoutstats_timer
;
2427 module_param(layoutstats_timer
, uint
, 0644);
2428 EXPORT_SYMBOL_GPL(layoutstats_timer
);