2 * pNFS functions to call and manage layout drivers.
4 * Copyright (c) 2002 [year of first publication]
5 * The Regents of the University of Michigan
8 * Dean Hildebrand <dhildebz@umich.edu>
10 * Permission is granted to use, copy, create derivative works, and
11 * redistribute this software and such derivative works for any purpose,
12 * so long as the name of the University of Michigan is not used in
13 * any advertising or publicity pertaining to the use or distribution
14 * of this software without specific, written prior authorization. If
15 * the above copyright notice or any other identification of the
16 * University of Michigan is included in any copy of any portion of
17 * this software, then the disclaimer below must also be included.
19 * This software is provided as is, without representation or warranty
20 * of any kind either express or implied, including without limitation
21 * the implied warranties of merchantability, fitness for a particular
22 * purpose, or noninfringement. The Regents of the University of
23 * Michigan shall not be liable for any damages, including special,
24 * indirect, incidental, or consequential damages, with respect to any
25 * claim arising out of or in connection with the use of the software,
26 * even if it has been or is hereafter advised of the possibility of
30 #include <linux/nfs_fs.h>
31 #include <linux/nfs_page.h>
32 #include <linux/module.h>
36 #include "nfs4trace.h"
38 #define NFSDBG_FACILITY NFSDBG_PNFS
39 #define PNFS_LAYOUTGET_RETRY_TIMEOUT (120*HZ)
44 * protects pnfs_modules_tbl.
46 static DEFINE_SPINLOCK(pnfs_spinlock
);
49 * pnfs_modules_tbl holds all pnfs modules
51 static LIST_HEAD(pnfs_modules_tbl
);
54 pnfs_send_layoutreturn(struct pnfs_layout_hdr
*lo
, nfs4_stateid stateid
,
55 enum pnfs_iomode iomode
, bool sync
);
57 /* Return the registered pnfs layout driver module matching given id */
58 static struct pnfs_layoutdriver_type
*
59 find_pnfs_driver_locked(u32 id
)
61 struct pnfs_layoutdriver_type
*local
;
63 list_for_each_entry(local
, &pnfs_modules_tbl
, pnfs_tblid
)
68 dprintk("%s: Searching for id %u, found %p\n", __func__
, id
, local
);
72 static struct pnfs_layoutdriver_type
*
73 find_pnfs_driver(u32 id
)
75 struct pnfs_layoutdriver_type
*local
;
77 spin_lock(&pnfs_spinlock
);
78 local
= find_pnfs_driver_locked(id
);
79 if (local
!= NULL
&& !try_module_get(local
->owner
)) {
80 dprintk("%s: Could not grab reference on module\n", __func__
);
83 spin_unlock(&pnfs_spinlock
);
88 unset_pnfs_layoutdriver(struct nfs_server
*nfss
)
90 if (nfss
->pnfs_curr_ld
) {
91 if (nfss
->pnfs_curr_ld
->clear_layoutdriver
)
92 nfss
->pnfs_curr_ld
->clear_layoutdriver(nfss
);
93 /* Decrement the MDS count. Purge the deviceid cache if zero */
94 if (atomic_dec_and_test(&nfss
->nfs_client
->cl_mds_count
))
95 nfs4_deviceid_purge_client(nfss
->nfs_client
);
96 module_put(nfss
->pnfs_curr_ld
->owner
);
98 nfss
->pnfs_curr_ld
= NULL
;
102 * Try to set the server's pnfs module to the pnfs layout type specified by id.
103 * Currently only one pNFS layout driver per filesystem is supported.
105 * @id layout type. Zero (illegal layout type) indicates pNFS not in use.
108 set_pnfs_layoutdriver(struct nfs_server
*server
, const struct nfs_fh
*mntfh
,
111 struct pnfs_layoutdriver_type
*ld_type
= NULL
;
115 if (!(server
->nfs_client
->cl_exchange_flags
&
116 (EXCHGID4_FLAG_USE_NON_PNFS
| EXCHGID4_FLAG_USE_PNFS_MDS
))) {
117 printk(KERN_ERR
"NFS: %s: id %u cl_exchange_flags 0x%x\n",
118 __func__
, id
, server
->nfs_client
->cl_exchange_flags
);
121 ld_type
= find_pnfs_driver(id
);
123 request_module("%s-%u", LAYOUT_NFSV4_1_MODULE_PREFIX
, id
);
124 ld_type
= find_pnfs_driver(id
);
126 dprintk("%s: No pNFS module found for %u.\n",
131 server
->pnfs_curr_ld
= ld_type
;
132 if (ld_type
->set_layoutdriver
133 && ld_type
->set_layoutdriver(server
, mntfh
)) {
134 printk(KERN_ERR
"NFS: %s: Error initializing pNFS layout "
135 "driver %u.\n", __func__
, id
);
136 module_put(ld_type
->owner
);
139 /* Bump the MDS count */
140 atomic_inc(&server
->nfs_client
->cl_mds_count
);
142 dprintk("%s: pNFS module for %u set\n", __func__
, id
);
146 dprintk("%s: Using NFSv4 I/O\n", __func__
);
147 server
->pnfs_curr_ld
= NULL
;
151 pnfs_register_layoutdriver(struct pnfs_layoutdriver_type
*ld_type
)
153 int status
= -EINVAL
;
154 struct pnfs_layoutdriver_type
*tmp
;
156 if (ld_type
->id
== 0) {
157 printk(KERN_ERR
"NFS: %s id 0 is reserved\n", __func__
);
160 if (!ld_type
->alloc_lseg
|| !ld_type
->free_lseg
) {
161 printk(KERN_ERR
"NFS: %s Layout driver must provide "
162 "alloc_lseg and free_lseg.\n", __func__
);
166 spin_lock(&pnfs_spinlock
);
167 tmp
= find_pnfs_driver_locked(ld_type
->id
);
169 list_add(&ld_type
->pnfs_tblid
, &pnfs_modules_tbl
);
171 dprintk("%s Registering id:%u name:%s\n", __func__
, ld_type
->id
,
174 printk(KERN_ERR
"NFS: %s Module with id %d already loaded!\n",
175 __func__
, ld_type
->id
);
177 spin_unlock(&pnfs_spinlock
);
181 EXPORT_SYMBOL_GPL(pnfs_register_layoutdriver
);
184 pnfs_unregister_layoutdriver(struct pnfs_layoutdriver_type
*ld_type
)
186 dprintk("%s Deregistering id:%u\n", __func__
, ld_type
->id
);
187 spin_lock(&pnfs_spinlock
);
188 list_del(&ld_type
->pnfs_tblid
);
189 spin_unlock(&pnfs_spinlock
);
191 EXPORT_SYMBOL_GPL(pnfs_unregister_layoutdriver
);
194 * pNFS client layout cache
197 /* Need to hold i_lock if caller does not already hold reference */
199 pnfs_get_layout_hdr(struct pnfs_layout_hdr
*lo
)
201 atomic_inc(&lo
->plh_refcount
);
204 static struct pnfs_layout_hdr
*
205 pnfs_alloc_layout_hdr(struct inode
*ino
, gfp_t gfp_flags
)
207 struct pnfs_layoutdriver_type
*ld
= NFS_SERVER(ino
)->pnfs_curr_ld
;
208 return ld
->alloc_layout_hdr(ino
, gfp_flags
);
212 pnfs_free_layout_hdr(struct pnfs_layout_hdr
*lo
)
214 struct nfs_server
*server
= NFS_SERVER(lo
->plh_inode
);
215 struct pnfs_layoutdriver_type
*ld
= server
->pnfs_curr_ld
;
217 if (!list_empty(&lo
->plh_layouts
)) {
218 struct nfs_client
*clp
= server
->nfs_client
;
220 spin_lock(&clp
->cl_lock
);
221 list_del_init(&lo
->plh_layouts
);
222 spin_unlock(&clp
->cl_lock
);
224 put_rpccred(lo
->plh_lc_cred
);
225 return ld
->free_layout_hdr(lo
);
229 pnfs_detach_layout_hdr(struct pnfs_layout_hdr
*lo
)
231 struct nfs_inode
*nfsi
= NFS_I(lo
->plh_inode
);
232 dprintk("%s: freeing layout cache %p\n", __func__
, lo
);
234 /* Reset MDS Threshold I/O counters */
240 pnfs_put_layout_hdr(struct pnfs_layout_hdr
*lo
)
242 struct inode
*inode
= lo
->plh_inode
;
244 if (atomic_dec_and_lock(&lo
->plh_refcount
, &inode
->i_lock
)) {
245 if (!list_empty(&lo
->plh_segs
))
246 WARN_ONCE(1, "NFS: BUG unfreed layout segments.\n");
247 pnfs_detach_layout_hdr(lo
);
248 spin_unlock(&inode
->i_lock
);
249 pnfs_free_layout_hdr(lo
);
254 pnfs_iomode_to_fail_bit(u32 iomode
)
256 return iomode
== IOMODE_RW
?
257 NFS_LAYOUT_RW_FAILED
: NFS_LAYOUT_RO_FAILED
;
261 pnfs_layout_set_fail_bit(struct pnfs_layout_hdr
*lo
, int fail_bit
)
263 lo
->plh_retry_timestamp
= jiffies
;
264 if (!test_and_set_bit(fail_bit
, &lo
->plh_flags
))
265 atomic_inc(&lo
->plh_refcount
);
269 pnfs_layout_clear_fail_bit(struct pnfs_layout_hdr
*lo
, int fail_bit
)
271 if (test_and_clear_bit(fail_bit
, &lo
->plh_flags
))
272 atomic_dec(&lo
->plh_refcount
);
276 pnfs_layout_io_set_failed(struct pnfs_layout_hdr
*lo
, u32 iomode
)
278 struct inode
*inode
= lo
->plh_inode
;
279 struct pnfs_layout_range range
= {
282 .length
= NFS4_MAX_UINT64
,
286 spin_lock(&inode
->i_lock
);
287 pnfs_layout_set_fail_bit(lo
, pnfs_iomode_to_fail_bit(iomode
));
288 pnfs_mark_matching_lsegs_invalid(lo
, &head
, &range
);
289 spin_unlock(&inode
->i_lock
);
290 pnfs_free_lseg_list(&head
);
291 dprintk("%s Setting layout IOMODE_%s fail bit\n", __func__
,
292 iomode
== IOMODE_RW
? "RW" : "READ");
296 pnfs_layout_io_test_failed(struct pnfs_layout_hdr
*lo
, u32 iomode
)
298 unsigned long start
, end
;
299 int fail_bit
= pnfs_iomode_to_fail_bit(iomode
);
301 if (test_bit(fail_bit
, &lo
->plh_flags
) == 0)
304 start
= end
- PNFS_LAYOUTGET_RETRY_TIMEOUT
;
305 if (!time_in_range(lo
->plh_retry_timestamp
, start
, end
)) {
306 /* It is time to retry the failed layoutgets */
307 pnfs_layout_clear_fail_bit(lo
, fail_bit
);
314 init_lseg(struct pnfs_layout_hdr
*lo
, struct pnfs_layout_segment
*lseg
)
316 INIT_LIST_HEAD(&lseg
->pls_list
);
317 INIT_LIST_HEAD(&lseg
->pls_lc_list
);
318 atomic_set(&lseg
->pls_refcount
, 1);
320 set_bit(NFS_LSEG_VALID
, &lseg
->pls_flags
);
321 lseg
->pls_layout
= lo
;
324 static void pnfs_free_lseg(struct pnfs_layout_segment
*lseg
)
326 struct inode
*ino
= lseg
->pls_layout
->plh_inode
;
328 NFS_SERVER(ino
)->pnfs_curr_ld
->free_lseg(lseg
);
332 pnfs_layout_remove_lseg(struct pnfs_layout_hdr
*lo
,
333 struct pnfs_layout_segment
*lseg
)
335 struct inode
*inode
= lo
->plh_inode
;
337 WARN_ON(test_bit(NFS_LSEG_VALID
, &lseg
->pls_flags
));
338 list_del_init(&lseg
->pls_list
);
339 /* Matched by pnfs_get_layout_hdr in pnfs_layout_insert_lseg */
340 atomic_dec(&lo
->plh_refcount
);
341 if (list_empty(&lo
->plh_segs
))
342 clear_bit(NFS_LAYOUT_BULK_RECALL
, &lo
->plh_flags
);
343 rpc_wake_up(&NFS_SERVER(inode
)->roc_rpcwaitq
);
346 /* Return true if layoutreturn is needed */
348 pnfs_layout_need_return(struct pnfs_layout_hdr
*lo
,
349 struct pnfs_layout_segment
*lseg
)
351 struct pnfs_layout_segment
*s
;
353 if (!test_bit(NFS_LSEG_LAYOUTRETURN
, &lseg
->pls_flags
))
356 list_for_each_entry(s
, &lo
->plh_segs
, pls_list
)
357 if (s
!= lseg
&& test_bit(NFS_LSEG_LAYOUTRETURN
, &s
->pls_flags
))
363 static void pnfs_layoutreturn_free_lseg(struct work_struct
*work
)
365 struct pnfs_layout_segment
*lseg
;
366 struct pnfs_layout_hdr
*lo
;
369 lseg
= container_of(work
, struct pnfs_layout_segment
, pls_work
);
370 WARN_ON(atomic_read(&lseg
->pls_refcount
));
371 lo
= lseg
->pls_layout
;
372 inode
= lo
->plh_inode
;
374 spin_lock(&inode
->i_lock
);
375 if (pnfs_layout_need_return(lo
, lseg
)) {
376 nfs4_stateid stateid
;
377 enum pnfs_iomode iomode
;
379 stateid
= lo
->plh_stateid
;
380 iomode
= lo
->plh_return_iomode
;
381 /* decreased in pnfs_send_layoutreturn() */
382 lo
->plh_block_lgets
++;
383 lo
->plh_return_iomode
= 0;
384 spin_unlock(&inode
->i_lock
);
386 pnfs_send_layoutreturn(lo
, stateid
, iomode
, true);
387 spin_lock(&inode
->i_lock
);
389 /* match pnfs_get_layout_hdr #2 in pnfs_put_lseg */
390 pnfs_put_layout_hdr(lo
);
391 pnfs_layout_remove_lseg(lo
, lseg
);
392 spin_unlock(&inode
->i_lock
);
393 pnfs_free_lseg(lseg
);
394 /* match pnfs_get_layout_hdr #1 in pnfs_put_lseg */
395 pnfs_put_layout_hdr(lo
);
399 pnfs_layoutreturn_free_lseg_async(struct pnfs_layout_segment
*lseg
)
401 INIT_WORK(&lseg
->pls_work
, pnfs_layoutreturn_free_lseg
);
402 queue_work(nfsiod_workqueue
, &lseg
->pls_work
);
406 pnfs_put_lseg(struct pnfs_layout_segment
*lseg
)
408 struct pnfs_layout_hdr
*lo
;
414 dprintk("%s: lseg %p ref %d valid %d\n", __func__
, lseg
,
415 atomic_read(&lseg
->pls_refcount
),
416 test_bit(NFS_LSEG_VALID
, &lseg
->pls_flags
));
417 lo
= lseg
->pls_layout
;
418 inode
= lo
->plh_inode
;
419 if (atomic_dec_and_lock(&lseg
->pls_refcount
, &inode
->i_lock
)) {
420 pnfs_get_layout_hdr(lo
);
421 if (pnfs_layout_need_return(lo
, lseg
)) {
422 spin_unlock(&inode
->i_lock
);
423 /* hdr reference dropped in nfs4_layoutreturn_release */
424 pnfs_get_layout_hdr(lo
);
425 pnfs_layoutreturn_free_lseg_async(lseg
);
427 pnfs_layout_remove_lseg(lo
, lseg
);
428 spin_unlock(&inode
->i_lock
);
429 pnfs_free_lseg(lseg
);
430 pnfs_put_layout_hdr(lo
);
434 EXPORT_SYMBOL_GPL(pnfs_put_lseg
);
436 static void pnfs_free_lseg_async_work(struct work_struct
*work
)
438 struct pnfs_layout_segment
*lseg
;
439 struct pnfs_layout_hdr
*lo
;
441 lseg
= container_of(work
, struct pnfs_layout_segment
, pls_work
);
442 lo
= lseg
->pls_layout
;
444 pnfs_free_lseg(lseg
);
445 pnfs_put_layout_hdr(lo
);
448 static void pnfs_free_lseg_async(struct pnfs_layout_segment
*lseg
)
450 INIT_WORK(&lseg
->pls_work
, pnfs_free_lseg_async_work
);
451 schedule_work(&lseg
->pls_work
);
455 pnfs_put_lseg_locked(struct pnfs_layout_segment
*lseg
)
460 assert_spin_locked(&lseg
->pls_layout
->plh_inode
->i_lock
);
462 dprintk("%s: lseg %p ref %d valid %d\n", __func__
, lseg
,
463 atomic_read(&lseg
->pls_refcount
),
464 test_bit(NFS_LSEG_VALID
, &lseg
->pls_flags
));
465 if (atomic_dec_and_test(&lseg
->pls_refcount
)) {
466 struct pnfs_layout_hdr
*lo
= lseg
->pls_layout
;
467 pnfs_get_layout_hdr(lo
);
468 pnfs_layout_remove_lseg(lo
, lseg
);
469 pnfs_free_lseg_async(lseg
);
472 EXPORT_SYMBOL_GPL(pnfs_put_lseg_locked
);
475 end_offset(u64 start
, u64 len
)
480 return end
>= start
? end
: NFS4_MAX_UINT64
;
484 * is l2 fully contained in l1?
486 * [----------------------------------)
491 pnfs_lseg_range_contained(const struct pnfs_layout_range
*l1
,
492 const struct pnfs_layout_range
*l2
)
494 u64 start1
= l1
->offset
;
495 u64 end1
= end_offset(start1
, l1
->length
);
496 u64 start2
= l2
->offset
;
497 u64 end2
= end_offset(start2
, l2
->length
);
499 return (start1
<= start2
) && (end1
>= end2
);
503 * is l1 and l2 intersecting?
505 * [----------------------------------)
510 pnfs_lseg_range_intersecting(const struct pnfs_layout_range
*l1
,
511 const struct pnfs_layout_range
*l2
)
513 u64 start1
= l1
->offset
;
514 u64 end1
= end_offset(start1
, l1
->length
);
515 u64 start2
= l2
->offset
;
516 u64 end2
= end_offset(start2
, l2
->length
);
518 return (end1
== NFS4_MAX_UINT64
|| end1
> start2
) &&
519 (end2
== NFS4_MAX_UINT64
|| end2
> start1
);
523 should_free_lseg(const struct pnfs_layout_range
*lseg_range
,
524 const struct pnfs_layout_range
*recall_range
)
526 return (recall_range
->iomode
== IOMODE_ANY
||
527 lseg_range
->iomode
== recall_range
->iomode
) &&
528 pnfs_lseg_range_intersecting(lseg_range
, recall_range
);
531 static bool pnfs_lseg_dec_and_remove_zero(struct pnfs_layout_segment
*lseg
,
532 struct list_head
*tmp_list
)
534 if (!atomic_dec_and_test(&lseg
->pls_refcount
))
536 pnfs_layout_remove_lseg(lseg
->pls_layout
, lseg
);
537 list_add(&lseg
->pls_list
, tmp_list
);
541 /* Returns 1 if lseg is removed from list, 0 otherwise */
542 static int mark_lseg_invalid(struct pnfs_layout_segment
*lseg
,
543 struct list_head
*tmp_list
)
547 if (test_and_clear_bit(NFS_LSEG_VALID
, &lseg
->pls_flags
)) {
548 /* Remove the reference keeping the lseg in the
549 * list. It will now be removed when all
550 * outstanding io is finished.
552 dprintk("%s: lseg %p ref %d\n", __func__
, lseg
,
553 atomic_read(&lseg
->pls_refcount
));
554 if (pnfs_lseg_dec_and_remove_zero(lseg
, tmp_list
))
560 /* Returns count of number of matching invalid lsegs remaining in list
564 pnfs_mark_matching_lsegs_invalid(struct pnfs_layout_hdr
*lo
,
565 struct list_head
*tmp_list
,
566 struct pnfs_layout_range
*recall_range
)
568 struct pnfs_layout_segment
*lseg
, *next
;
569 int invalid
= 0, removed
= 0;
571 dprintk("%s:Begin lo %p\n", __func__
, lo
);
573 if (list_empty(&lo
->plh_segs
))
575 list_for_each_entry_safe(lseg
, next
, &lo
->plh_segs
, pls_list
)
577 should_free_lseg(&lseg
->pls_range
, recall_range
)) {
578 dprintk("%s: freeing lseg %p iomode %d "
579 "offset %llu length %llu\n", __func__
,
580 lseg
, lseg
->pls_range
.iomode
, lseg
->pls_range
.offset
,
581 lseg
->pls_range
.length
);
583 removed
+= mark_lseg_invalid(lseg
, tmp_list
);
585 dprintk("%s:Return %i\n", __func__
, invalid
- removed
);
586 return invalid
- removed
;
589 /* note free_me must contain lsegs from a single layout_hdr */
591 pnfs_free_lseg_list(struct list_head
*free_me
)
593 struct pnfs_layout_segment
*lseg
, *tmp
;
595 if (list_empty(free_me
))
598 list_for_each_entry_safe(lseg
, tmp
, free_me
, pls_list
) {
599 list_del(&lseg
->pls_list
);
600 pnfs_free_lseg(lseg
);
605 pnfs_destroy_layout(struct nfs_inode
*nfsi
)
607 struct pnfs_layout_hdr
*lo
;
610 spin_lock(&nfsi
->vfs_inode
.i_lock
);
613 lo
->plh_block_lgets
++; /* permanently block new LAYOUTGETs */
614 pnfs_mark_matching_lsegs_invalid(lo
, &tmp_list
, NULL
);
615 pnfs_get_layout_hdr(lo
);
616 pnfs_layout_clear_fail_bit(lo
, NFS_LAYOUT_RO_FAILED
);
617 pnfs_layout_clear_fail_bit(lo
, NFS_LAYOUT_RW_FAILED
);
618 pnfs_clear_retry_layoutget(lo
);
619 spin_unlock(&nfsi
->vfs_inode
.i_lock
);
620 pnfs_free_lseg_list(&tmp_list
);
621 pnfs_put_layout_hdr(lo
);
623 spin_unlock(&nfsi
->vfs_inode
.i_lock
);
625 EXPORT_SYMBOL_GPL(pnfs_destroy_layout
);
628 pnfs_layout_add_bulk_destroy_list(struct inode
*inode
,
629 struct list_head
*layout_list
)
631 struct pnfs_layout_hdr
*lo
;
634 spin_lock(&inode
->i_lock
);
635 lo
= NFS_I(inode
)->layout
;
636 if (lo
!= NULL
&& list_empty(&lo
->plh_bulk_destroy
)) {
637 pnfs_get_layout_hdr(lo
);
638 list_add(&lo
->plh_bulk_destroy
, layout_list
);
641 spin_unlock(&inode
->i_lock
);
645 /* Caller must hold rcu_read_lock and clp->cl_lock */
647 pnfs_layout_bulk_destroy_byserver_locked(struct nfs_client
*clp
,
648 struct nfs_server
*server
,
649 struct list_head
*layout_list
)
651 struct pnfs_layout_hdr
*lo
, *next
;
654 list_for_each_entry_safe(lo
, next
, &server
->layouts
, plh_layouts
) {
655 inode
= igrab(lo
->plh_inode
);
658 list_del_init(&lo
->plh_layouts
);
659 if (pnfs_layout_add_bulk_destroy_list(inode
, layout_list
))
662 spin_unlock(&clp
->cl_lock
);
664 spin_lock(&clp
->cl_lock
);
672 pnfs_layout_free_bulk_destroy_list(struct list_head
*layout_list
,
675 struct pnfs_layout_hdr
*lo
;
677 struct pnfs_layout_range range
= {
678 .iomode
= IOMODE_ANY
,
680 .length
= NFS4_MAX_UINT64
,
682 LIST_HEAD(lseg_list
);
685 while (!list_empty(layout_list
)) {
686 lo
= list_entry(layout_list
->next
, struct pnfs_layout_hdr
,
688 dprintk("%s freeing layout for inode %lu\n", __func__
,
689 lo
->plh_inode
->i_ino
);
690 inode
= lo
->plh_inode
;
692 pnfs_layoutcommit_inode(inode
, false);
694 spin_lock(&inode
->i_lock
);
695 list_del_init(&lo
->plh_bulk_destroy
);
696 lo
->plh_block_lgets
++; /* permanently block new LAYOUTGETs */
698 set_bit(NFS_LAYOUT_BULK_RECALL
, &lo
->plh_flags
);
699 if (pnfs_mark_matching_lsegs_invalid(lo
, &lseg_list
, &range
))
701 spin_unlock(&inode
->i_lock
);
702 pnfs_free_lseg_list(&lseg_list
);
703 pnfs_put_layout_hdr(lo
);
710 pnfs_destroy_layouts_byfsid(struct nfs_client
*clp
,
711 struct nfs_fsid
*fsid
,
714 struct nfs_server
*server
;
715 LIST_HEAD(layout_list
);
717 spin_lock(&clp
->cl_lock
);
720 list_for_each_entry_rcu(server
, &clp
->cl_superblocks
, client_link
) {
721 if (memcmp(&server
->fsid
, fsid
, sizeof(*fsid
)) != 0)
723 if (pnfs_layout_bulk_destroy_byserver_locked(clp
,
729 spin_unlock(&clp
->cl_lock
);
731 if (list_empty(&layout_list
))
733 return pnfs_layout_free_bulk_destroy_list(&layout_list
, is_recall
);
737 pnfs_destroy_layouts_byclid(struct nfs_client
*clp
,
740 struct nfs_server
*server
;
741 LIST_HEAD(layout_list
);
743 spin_lock(&clp
->cl_lock
);
746 list_for_each_entry_rcu(server
, &clp
->cl_superblocks
, client_link
) {
747 if (pnfs_layout_bulk_destroy_byserver_locked(clp
,
753 spin_unlock(&clp
->cl_lock
);
755 if (list_empty(&layout_list
))
757 return pnfs_layout_free_bulk_destroy_list(&layout_list
, is_recall
);
761 * Called by the state manger to remove all layouts established under an
765 pnfs_destroy_all_layouts(struct nfs_client
*clp
)
767 nfs4_deviceid_mark_client_invalid(clp
);
768 nfs4_deviceid_purge_client(clp
);
770 pnfs_destroy_layouts_byclid(clp
, false);
774 * Compare 2 layout stateid sequence ids, to see which is newer,
775 * taking into account wraparound issues.
777 static bool pnfs_seqid_is_newer(u32 s1
, u32 s2
)
779 return (s32
)(s1
- s2
) > 0;
782 /* update lo->plh_stateid with new if is more recent */
784 pnfs_set_layout_stateid(struct pnfs_layout_hdr
*lo
, const nfs4_stateid
*new,
787 u32 oldseq
, newseq
, new_barrier
;
788 int empty
= list_empty(&lo
->plh_segs
);
790 oldseq
= be32_to_cpu(lo
->plh_stateid
.seqid
);
791 newseq
= be32_to_cpu(new->seqid
);
792 if (empty
|| pnfs_seqid_is_newer(newseq
, oldseq
)) {
793 nfs4_stateid_copy(&lo
->plh_stateid
, new);
794 if (update_barrier
) {
795 new_barrier
= be32_to_cpu(new->seqid
);
797 /* Because of wraparound, we want to keep the barrier
798 * "close" to the current seqids.
800 new_barrier
= newseq
- atomic_read(&lo
->plh_outstanding
);
802 if (empty
|| pnfs_seqid_is_newer(new_barrier
, lo
->plh_barrier
))
803 lo
->plh_barrier
= new_barrier
;
808 pnfs_layout_stateid_blocked(const struct pnfs_layout_hdr
*lo
,
809 const nfs4_stateid
*stateid
)
811 u32 seqid
= be32_to_cpu(stateid
->seqid
);
813 return !pnfs_seqid_is_newer(seqid
, lo
->plh_barrier
);
817 pnfs_layout_returning(const struct pnfs_layout_hdr
*lo
,
818 struct pnfs_layout_range
*range
)
820 return test_bit(NFS_LAYOUT_RETURN
, &lo
->plh_flags
) &&
821 (lo
->plh_return_iomode
== IOMODE_ANY
||
822 lo
->plh_return_iomode
== range
->iomode
);
825 /* lget is set to 1 if called from inside send_layoutget call chain */
827 pnfs_layoutgets_blocked(const struct pnfs_layout_hdr
*lo
,
828 struct pnfs_layout_range
*range
, int lget
)
830 return lo
->plh_block_lgets
||
831 test_bit(NFS_LAYOUT_BULK_RECALL
, &lo
->plh_flags
) ||
832 (list_empty(&lo
->plh_segs
) &&
833 (atomic_read(&lo
->plh_outstanding
) > lget
)) ||
834 pnfs_layout_returning(lo
, range
);
838 pnfs_choose_layoutget_stateid(nfs4_stateid
*dst
, struct pnfs_layout_hdr
*lo
,
839 struct pnfs_layout_range
*range
,
840 struct nfs4_state
*open_state
)
844 dprintk("--> %s\n", __func__
);
845 spin_lock(&lo
->plh_inode
->i_lock
);
846 if (pnfs_layoutgets_blocked(lo
, range
, 1)) {
848 } else if (!nfs4_valid_open_stateid(open_state
)) {
850 } else if (list_empty(&lo
->plh_segs
) ||
851 test_bit(NFS_LAYOUT_INVALID_STID
, &lo
->plh_flags
)) {
855 seq
= read_seqbegin(&open_state
->seqlock
);
856 nfs4_stateid_copy(dst
, &open_state
->stateid
);
857 } while (read_seqretry(&open_state
->seqlock
, seq
));
859 nfs4_stateid_copy(dst
, &lo
->plh_stateid
);
860 spin_unlock(&lo
->plh_inode
->i_lock
);
861 dprintk("<-- %s\n", __func__
);
866 * Get layout from server.
867 * for now, assume that whole file layouts are requested.
869 * arg->length: all ones
871 static struct pnfs_layout_segment
*
872 send_layoutget(struct pnfs_layout_hdr
*lo
,
873 struct nfs_open_context
*ctx
,
874 struct pnfs_layout_range
*range
,
877 struct inode
*ino
= lo
->plh_inode
;
878 struct nfs_server
*server
= NFS_SERVER(ino
);
879 struct nfs4_layoutget
*lgp
;
880 struct pnfs_layout_segment
*lseg
;
882 dprintk("--> %s\n", __func__
);
884 lgp
= kzalloc(sizeof(*lgp
), gfp_flags
);
888 lgp
->args
.minlength
= PAGE_CACHE_SIZE
;
889 if (lgp
->args
.minlength
> range
->length
)
890 lgp
->args
.minlength
= range
->length
;
891 lgp
->args
.maxcount
= PNFS_LAYOUT_MAXSIZE
;
892 lgp
->args
.range
= *range
;
893 lgp
->args
.type
= server
->pnfs_curr_ld
->id
;
894 lgp
->args
.inode
= ino
;
895 lgp
->args
.ctx
= get_nfs_open_context(ctx
);
896 lgp
->gfp_flags
= gfp_flags
;
897 lgp
->cred
= lo
->plh_lc_cred
;
899 /* Synchronously retrieve layout information from server and
902 lseg
= nfs4_proc_layoutget(lgp
, gfp_flags
);
904 switch (PTR_ERR(lseg
)) {
909 /* remember that LAYOUTGET failed and suspend trying */
910 pnfs_layout_io_set_failed(lo
, range
->iomode
);
918 static void pnfs_clear_layoutcommit(struct inode
*inode
,
919 struct list_head
*head
)
921 struct nfs_inode
*nfsi
= NFS_I(inode
);
922 struct pnfs_layout_segment
*lseg
, *tmp
;
924 if (!test_and_clear_bit(NFS_INO_LAYOUTCOMMIT
, &nfsi
->flags
))
926 list_for_each_entry_safe(lseg
, tmp
, &nfsi
->layout
->plh_segs
, pls_list
) {
927 if (!test_and_clear_bit(NFS_LSEG_LAYOUTCOMMIT
, &lseg
->pls_flags
))
929 pnfs_lseg_dec_and_remove_zero(lseg
, head
);
934 pnfs_send_layoutreturn(struct pnfs_layout_hdr
*lo
, nfs4_stateid stateid
,
935 enum pnfs_iomode iomode
, bool sync
)
937 struct inode
*ino
= lo
->plh_inode
;
938 struct nfs4_layoutreturn
*lrp
;
941 lrp
= kzalloc(sizeof(*lrp
), GFP_KERNEL
);
942 if (unlikely(lrp
== NULL
)) {
944 spin_lock(&ino
->i_lock
);
945 lo
->plh_block_lgets
--;
946 rpc_wake_up(&NFS_SERVER(ino
)->roc_rpcwaitq
);
947 spin_unlock(&ino
->i_lock
);
948 pnfs_put_layout_hdr(lo
);
952 lrp
->args
.stateid
= stateid
;
953 lrp
->args
.layout_type
= NFS_SERVER(ino
)->pnfs_curr_ld
->id
;
954 lrp
->args
.inode
= ino
;
955 lrp
->args
.range
.iomode
= iomode
;
956 lrp
->args
.range
.offset
= 0;
957 lrp
->args
.range
.length
= NFS4_MAX_UINT64
;
958 lrp
->args
.layout
= lo
;
959 lrp
->clp
= NFS_SERVER(ino
)->nfs_client
;
960 lrp
->cred
= lo
->plh_lc_cred
;
962 status
= nfs4_proc_layoutreturn(lrp
, sync
);
964 dprintk("<-- %s status: %d\n", __func__
, status
);
969 * Initiates a LAYOUTRETURN(FILE), and removes the pnfs_layout_hdr
970 * when the layout segment list is empty.
972 * Note that a pnfs_layout_hdr can exist with an empty layout segment
973 * list when LAYOUTGET has failed, or when LAYOUTGET succeeded, but the
974 * deviceid is marked invalid.
977 _pnfs_return_layout(struct inode
*ino
)
979 struct pnfs_layout_hdr
*lo
= NULL
;
980 struct nfs_inode
*nfsi
= NFS_I(ino
);
982 nfs4_stateid stateid
;
983 int status
= 0, empty
;
985 dprintk("NFS: %s for inode %lu\n", __func__
, ino
->i_ino
);
987 spin_lock(&ino
->i_lock
);
990 spin_unlock(&ino
->i_lock
);
991 dprintk("NFS: %s no layout to return\n", __func__
);
994 stateid
= nfsi
->layout
->plh_stateid
;
995 /* Reference matched in nfs4_layoutreturn_release */
996 pnfs_get_layout_hdr(lo
);
997 empty
= list_empty(&lo
->plh_segs
);
998 pnfs_clear_layoutcommit(ino
, &tmp_list
);
999 pnfs_mark_matching_lsegs_invalid(lo
, &tmp_list
, NULL
);
1001 if (NFS_SERVER(ino
)->pnfs_curr_ld
->return_range
) {
1002 struct pnfs_layout_range range
= {
1003 .iomode
= IOMODE_ANY
,
1005 .length
= NFS4_MAX_UINT64
,
1007 NFS_SERVER(ino
)->pnfs_curr_ld
->return_range(lo
, &range
);
1010 /* Don't send a LAYOUTRETURN if list was initially empty */
1012 spin_unlock(&ino
->i_lock
);
1013 pnfs_put_layout_hdr(lo
);
1014 dprintk("NFS: %s no layout segments to return\n", __func__
);
1018 set_bit(NFS_LAYOUT_INVALID_STID
, &lo
->plh_flags
);
1019 lo
->plh_block_lgets
++;
1020 spin_unlock(&ino
->i_lock
);
1021 pnfs_free_lseg_list(&tmp_list
);
1023 status
= pnfs_send_layoutreturn(lo
, stateid
, IOMODE_ANY
, true);
1025 dprintk("<-- %s status: %d\n", __func__
, status
);
1028 EXPORT_SYMBOL_GPL(_pnfs_return_layout
);
1031 pnfs_commit_and_return_layout(struct inode
*inode
)
1033 struct pnfs_layout_hdr
*lo
;
1036 spin_lock(&inode
->i_lock
);
1037 lo
= NFS_I(inode
)->layout
;
1039 spin_unlock(&inode
->i_lock
);
1042 pnfs_get_layout_hdr(lo
);
1043 /* Block new layoutgets and read/write to ds */
1044 lo
->plh_block_lgets
++;
1045 spin_unlock(&inode
->i_lock
);
1046 filemap_fdatawait(inode
->i_mapping
);
1047 ret
= pnfs_layoutcommit_inode(inode
, true);
1049 ret
= _pnfs_return_layout(inode
);
1050 spin_lock(&inode
->i_lock
);
1051 lo
->plh_block_lgets
--;
1052 spin_unlock(&inode
->i_lock
);
1053 pnfs_put_layout_hdr(lo
);
1057 bool pnfs_roc(struct inode
*ino
)
1059 struct pnfs_layout_hdr
*lo
;
1060 struct pnfs_layout_segment
*lseg
, *tmp
;
1061 nfs4_stateid stateid
;
1062 LIST_HEAD(tmp_list
);
1063 bool found
= false, layoutreturn
= false;
1065 spin_lock(&ino
->i_lock
);
1066 lo
= NFS_I(ino
)->layout
;
1067 if (!lo
|| !test_and_clear_bit(NFS_LAYOUT_ROC
, &lo
->plh_flags
) ||
1068 test_bit(NFS_LAYOUT_BULK_RECALL
, &lo
->plh_flags
))
1070 pnfs_clear_retry_layoutget(lo
);
1071 list_for_each_entry_safe(lseg
, tmp
, &lo
->plh_segs
, pls_list
)
1072 if (test_bit(NFS_LSEG_ROC
, &lseg
->pls_flags
)) {
1073 mark_lseg_invalid(lseg
, &tmp_list
);
1078 lo
->plh_block_lgets
++;
1079 pnfs_get_layout_hdr(lo
); /* matched in pnfs_roc_release */
1080 spin_unlock(&ino
->i_lock
);
1081 pnfs_free_lseg_list(&tmp_list
);
1086 stateid
= lo
->plh_stateid
;
1088 test_and_clear_bit(NFS_LAYOUT_RETURN_BEFORE_CLOSE
,
1091 lo
->plh_block_lgets
++;
1092 pnfs_get_layout_hdr(lo
);
1095 spin_unlock(&ino
->i_lock
);
1097 pnfs_send_layoutreturn(lo
, stateid
, IOMODE_ANY
, true);
1101 void pnfs_roc_release(struct inode
*ino
)
1103 struct pnfs_layout_hdr
*lo
;
1105 spin_lock(&ino
->i_lock
);
1106 lo
= NFS_I(ino
)->layout
;
1107 lo
->plh_block_lgets
--;
1108 if (atomic_dec_and_test(&lo
->plh_refcount
)) {
1109 pnfs_detach_layout_hdr(lo
);
1110 spin_unlock(&ino
->i_lock
);
1111 pnfs_free_layout_hdr(lo
);
1113 spin_unlock(&ino
->i_lock
);
1116 void pnfs_roc_set_barrier(struct inode
*ino
, u32 barrier
)
1118 struct pnfs_layout_hdr
*lo
;
1120 spin_lock(&ino
->i_lock
);
1121 lo
= NFS_I(ino
)->layout
;
1122 if (pnfs_seqid_is_newer(barrier
, lo
->plh_barrier
))
1123 lo
->plh_barrier
= barrier
;
1124 spin_unlock(&ino
->i_lock
);
1127 bool pnfs_roc_drain(struct inode
*ino
, u32
*barrier
, struct rpc_task
*task
)
1129 struct nfs_inode
*nfsi
= NFS_I(ino
);
1130 struct pnfs_layout_hdr
*lo
;
1131 struct pnfs_layout_segment
*lseg
;
1132 nfs4_stateid stateid
;
1134 bool found
= false, layoutreturn
= false;
1136 spin_lock(&ino
->i_lock
);
1137 list_for_each_entry(lseg
, &nfsi
->layout
->plh_segs
, pls_list
)
1138 if (test_bit(NFS_LSEG_ROC
, &lseg
->pls_flags
)) {
1139 rpc_sleep_on(&NFS_SERVER(ino
)->roc_rpcwaitq
, task
, NULL
);
1144 current_seqid
= be32_to_cpu(lo
->plh_stateid
.seqid
);
1146 /* Since close does not return a layout stateid for use as
1147 * a barrier, we choose the worst-case barrier.
1149 *barrier
= current_seqid
+ atomic_read(&lo
->plh_outstanding
);
1152 stateid
= lo
->plh_stateid
;
1154 test_and_clear_bit(NFS_LAYOUT_RETURN_BEFORE_CLOSE
,
1157 lo
->plh_block_lgets
++;
1158 pnfs_get_layout_hdr(lo
);
1161 spin_unlock(&ino
->i_lock
);
1163 rpc_sleep_on(&NFS_SERVER(ino
)->roc_rpcwaitq
, task
, NULL
);
1164 pnfs_send_layoutreturn(lo
, stateid
, IOMODE_ANY
, false);
1170 * Compare two layout segments for sorting into layout cache.
1171 * We want to preferentially return RW over RO layouts, so ensure those
1175 pnfs_lseg_range_cmp(const struct pnfs_layout_range
*l1
,
1176 const struct pnfs_layout_range
*l2
)
1180 /* high offset > low offset */
1181 d
= l1
->offset
- l2
->offset
;
1185 /* short length > long length */
1186 d
= l2
->length
- l1
->length
;
1190 /* read > read/write */
1191 return (int)(l1
->iomode
== IOMODE_READ
) - (int)(l2
->iomode
== IOMODE_READ
);
1195 pnfs_layout_insert_lseg(struct pnfs_layout_hdr
*lo
,
1196 struct pnfs_layout_segment
*lseg
)
1198 struct pnfs_layout_segment
*lp
;
1200 dprintk("%s:Begin\n", __func__
);
1202 list_for_each_entry(lp
, &lo
->plh_segs
, pls_list
) {
1203 if (pnfs_lseg_range_cmp(&lseg
->pls_range
, &lp
->pls_range
) > 0)
1205 list_add_tail(&lseg
->pls_list
, &lp
->pls_list
);
1206 dprintk("%s: inserted lseg %p "
1207 "iomode %d offset %llu length %llu before "
1208 "lp %p iomode %d offset %llu length %llu\n",
1209 __func__
, lseg
, lseg
->pls_range
.iomode
,
1210 lseg
->pls_range
.offset
, lseg
->pls_range
.length
,
1211 lp
, lp
->pls_range
.iomode
, lp
->pls_range
.offset
,
1212 lp
->pls_range
.length
);
1215 list_add_tail(&lseg
->pls_list
, &lo
->plh_segs
);
1216 dprintk("%s: inserted lseg %p "
1217 "iomode %d offset %llu length %llu at tail\n",
1218 __func__
, lseg
, lseg
->pls_range
.iomode
,
1219 lseg
->pls_range
.offset
, lseg
->pls_range
.length
);
1221 pnfs_get_layout_hdr(lo
);
1223 dprintk("%s:Return\n", __func__
);
1226 static struct pnfs_layout_hdr
*
1227 alloc_init_layout_hdr(struct inode
*ino
,
1228 struct nfs_open_context
*ctx
,
1231 struct pnfs_layout_hdr
*lo
;
1233 lo
= pnfs_alloc_layout_hdr(ino
, gfp_flags
);
1236 atomic_set(&lo
->plh_refcount
, 1);
1237 INIT_LIST_HEAD(&lo
->plh_layouts
);
1238 INIT_LIST_HEAD(&lo
->plh_segs
);
1239 INIT_LIST_HEAD(&lo
->plh_bulk_destroy
);
1240 lo
->plh_inode
= ino
;
1241 lo
->plh_lc_cred
= get_rpccred(ctx
->cred
);
1245 static struct pnfs_layout_hdr
*
1246 pnfs_find_alloc_layout(struct inode
*ino
,
1247 struct nfs_open_context
*ctx
,
1250 struct nfs_inode
*nfsi
= NFS_I(ino
);
1251 struct pnfs_layout_hdr
*new = NULL
;
1253 dprintk("%s Begin ino=%p layout=%p\n", __func__
, ino
, nfsi
->layout
);
1255 if (nfsi
->layout
!= NULL
)
1257 spin_unlock(&ino
->i_lock
);
1258 new = alloc_init_layout_hdr(ino
, ctx
, gfp_flags
);
1259 spin_lock(&ino
->i_lock
);
1261 if (likely(nfsi
->layout
== NULL
)) { /* Won the race? */
1264 } else if (new != NULL
)
1265 pnfs_free_layout_hdr(new);
1267 pnfs_get_layout_hdr(nfsi
->layout
);
1268 return nfsi
->layout
;
1272 * iomode matching rules:
1283 pnfs_lseg_range_match(const struct pnfs_layout_range
*ls_range
,
1284 const struct pnfs_layout_range
*range
)
1286 struct pnfs_layout_range range1
;
1288 if ((range
->iomode
== IOMODE_RW
&&
1289 ls_range
->iomode
!= IOMODE_RW
) ||
1290 !pnfs_lseg_range_intersecting(ls_range
, range
))
1293 /* range1 covers only the first byte in the range */
1296 return pnfs_lseg_range_contained(ls_range
, &range1
);
1300 * lookup range in layout
1302 static struct pnfs_layout_segment
*
1303 pnfs_find_lseg(struct pnfs_layout_hdr
*lo
,
1304 struct pnfs_layout_range
*range
)
1306 struct pnfs_layout_segment
*lseg
, *ret
= NULL
;
1308 dprintk("%s:Begin\n", __func__
);
1310 list_for_each_entry(lseg
, &lo
->plh_segs
, pls_list
) {
1311 if (test_bit(NFS_LSEG_VALID
, &lseg
->pls_flags
) &&
1312 !test_bit(NFS_LSEG_LAYOUTRETURN
, &lseg
->pls_flags
) &&
1313 pnfs_lseg_range_match(&lseg
->pls_range
, range
)) {
1314 ret
= pnfs_get_lseg(lseg
);
1317 if (lseg
->pls_range
.offset
> range
->offset
)
1321 dprintk("%s:Return lseg %p ref %d\n",
1322 __func__
, ret
, ret
? atomic_read(&ret
->pls_refcount
) : 0);
1327 * Use mdsthreshold hints set at each OPEN to determine if I/O should go
1328 * to the MDS or over pNFS
1330 * The nfs_inode read_io and write_io fields are cumulative counters reset
1331 * when there are no layout segments. Note that in pnfs_update_layout iomode
1332 * is set to IOMODE_READ for a READ request, and set to IOMODE_RW for a
1335 * A return of true means use MDS I/O.
1338 * If a file's size is smaller than the file size threshold, data accesses
1339 * SHOULD be sent to the metadata server. If an I/O request has a length that
1340 * is below the I/O size threshold, the I/O SHOULD be sent to the metadata
1341 * server. If both file size and I/O size are provided, the client SHOULD
1342 * reach or exceed both thresholds before sending its read or write
1343 * requests to the data server.
1345 static bool pnfs_within_mdsthreshold(struct nfs_open_context
*ctx
,
1346 struct inode
*ino
, int iomode
)
1348 struct nfs4_threshold
*t
= ctx
->mdsthreshold
;
1349 struct nfs_inode
*nfsi
= NFS_I(ino
);
1350 loff_t fsize
= i_size_read(ino
);
1351 bool size
= false, size_set
= false, io
= false, io_set
= false, ret
= false;
1356 dprintk("%s bm=0x%x rd_sz=%llu wr_sz=%llu rd_io=%llu wr_io=%llu\n",
1357 __func__
, t
->bm
, t
->rd_sz
, t
->wr_sz
, t
->rd_io_sz
, t
->wr_io_sz
);
1361 if (t
->bm
& THRESHOLD_RD
) {
1362 dprintk("%s fsize %llu\n", __func__
, fsize
);
1364 if (fsize
< t
->rd_sz
)
1367 if (t
->bm
& THRESHOLD_RD_IO
) {
1368 dprintk("%s nfsi->read_io %llu\n", __func__
,
1371 if (nfsi
->read_io
< t
->rd_io_sz
)
1376 if (t
->bm
& THRESHOLD_WR
) {
1377 dprintk("%s fsize %llu\n", __func__
, fsize
);
1379 if (fsize
< t
->wr_sz
)
1382 if (t
->bm
& THRESHOLD_WR_IO
) {
1383 dprintk("%s nfsi->write_io %llu\n", __func__
,
1386 if (nfsi
->write_io
< t
->wr_io_sz
)
1391 if (size_set
&& io_set
) {
1394 } else if (size
|| io
)
1397 dprintk("<-- %s size %d io %d ret %d\n", __func__
, size
, io
, ret
);
1401 /* stop waiting if someone clears NFS_LAYOUT_RETRY_LAYOUTGET bit. */
1402 static int pnfs_layoutget_retry_bit_wait(struct wait_bit_key
*key
)
1404 if (!test_bit(NFS_LAYOUT_RETRY_LAYOUTGET
, key
->flags
))
1406 return nfs_wait_bit_killable(key
);
1409 static bool pnfs_prepare_to_retry_layoutget(struct pnfs_layout_hdr
*lo
)
1412 * send layoutcommit as it can hold up layoutreturn due to lseg
1415 pnfs_layoutcommit_inode(lo
->plh_inode
, false);
1416 return !wait_on_bit_action(&lo
->plh_flags
, NFS_LAYOUT_RETURN
,
1417 pnfs_layoutget_retry_bit_wait
,
1418 TASK_UNINTERRUPTIBLE
);
1422 * Layout segment is retreived from the server if not cached.
1423 * The appropriate layout segment is referenced and returned to the caller.
1425 struct pnfs_layout_segment
*
1426 pnfs_update_layout(struct inode
*ino
,
1427 struct nfs_open_context
*ctx
,
1430 enum pnfs_iomode iomode
,
1433 struct pnfs_layout_range arg
= {
1439 struct nfs_server
*server
= NFS_SERVER(ino
);
1440 struct nfs_client
*clp
= server
->nfs_client
;
1441 struct pnfs_layout_hdr
*lo
;
1442 struct pnfs_layout_segment
*lseg
= NULL
;
1445 if (!pnfs_enabled_sb(NFS_SERVER(ino
)))
1448 if (pnfs_within_mdsthreshold(ctx
, ino
, iomode
))
1453 spin_lock(&ino
->i_lock
);
1454 lo
= pnfs_find_alloc_layout(ino
, ctx
, gfp_flags
);
1456 spin_unlock(&ino
->i_lock
);
1460 /* Do we even need to bother with this? */
1461 if (test_bit(NFS_LAYOUT_BULK_RECALL
, &lo
->plh_flags
)) {
1462 dprintk("%s matches recall, use MDS\n", __func__
);
1466 /* if LAYOUTGET already failed once we don't try again */
1467 if (pnfs_layout_io_test_failed(lo
, iomode
) &&
1468 !pnfs_should_retry_layoutget(lo
))
1471 first
= list_empty(&lo
->plh_segs
);
1473 /* The first layoutget for the file. Need to serialize per
1474 * RFC 5661 Errata 3208.
1476 if (test_and_set_bit(NFS_LAYOUT_FIRST_LAYOUTGET
,
1478 spin_unlock(&ino
->i_lock
);
1479 wait_on_bit(&lo
->plh_flags
, NFS_LAYOUT_FIRST_LAYOUTGET
,
1480 TASK_UNINTERRUPTIBLE
);
1481 pnfs_put_layout_hdr(lo
);
1485 /* Check to see if the layout for the given range
1488 lseg
= pnfs_find_lseg(lo
, &arg
);
1494 * Because we free lsegs before sending LAYOUTRETURN, we need to wait
1495 * for LAYOUTRETURN even if first is true.
1497 if (!lseg
&& pnfs_should_retry_layoutget(lo
) &&
1498 test_bit(NFS_LAYOUT_RETURN
, &lo
->plh_flags
)) {
1499 spin_unlock(&ino
->i_lock
);
1500 dprintk("%s wait for layoutreturn\n", __func__
);
1501 if (pnfs_prepare_to_retry_layoutget(lo
)) {
1502 pnfs_put_layout_hdr(lo
);
1503 dprintk("%s retrying\n", __func__
);
1506 goto out_put_layout_hdr
;
1509 if (pnfs_layoutgets_blocked(lo
, &arg
, 0))
1511 atomic_inc(&lo
->plh_outstanding
);
1512 spin_unlock(&ino
->i_lock
);
1514 if (list_empty(&lo
->plh_layouts
)) {
1515 /* The lo must be on the clp list if there is any
1516 * chance of a CB_LAYOUTRECALL(FILE) coming in.
1518 spin_lock(&clp
->cl_lock
);
1519 if (list_empty(&lo
->plh_layouts
))
1520 list_add_tail(&lo
->plh_layouts
, &server
->layouts
);
1521 spin_unlock(&clp
->cl_lock
);
1524 pg_offset
= arg
.offset
& ~PAGE_CACHE_MASK
;
1526 arg
.offset
-= pg_offset
;
1527 arg
.length
+= pg_offset
;
1529 if (arg
.length
!= NFS4_MAX_UINT64
)
1530 arg
.length
= PAGE_CACHE_ALIGN(arg
.length
);
1532 lseg
= send_layoutget(lo
, ctx
, &arg
, gfp_flags
);
1533 pnfs_clear_retry_layoutget(lo
);
1534 atomic_dec(&lo
->plh_outstanding
);
1537 unsigned long *bitlock
= &lo
->plh_flags
;
1539 clear_bit_unlock(NFS_LAYOUT_FIRST_LAYOUTGET
, bitlock
);
1540 smp_mb__after_atomic();
1541 wake_up_bit(bitlock
, NFS_LAYOUT_FIRST_LAYOUTGET
);
1543 pnfs_put_layout_hdr(lo
);
1545 dprintk("%s: inode %s/%llu pNFS layout segment %s for "
1546 "(%s, offset: %llu, length: %llu)\n",
1547 __func__
, ino
->i_sb
->s_id
,
1548 (unsigned long long)NFS_FILEID(ino
),
1549 lseg
== NULL
? "not found" : "found",
1550 iomode
==IOMODE_RW
? "read/write" : "read-only",
1551 (unsigned long long)pos
,
1552 (unsigned long long)count
);
1555 spin_unlock(&ino
->i_lock
);
1556 goto out_put_layout_hdr
;
1558 EXPORT_SYMBOL_GPL(pnfs_update_layout
);
1560 struct pnfs_layout_segment
*
1561 pnfs_layout_process(struct nfs4_layoutget
*lgp
)
1563 struct pnfs_layout_hdr
*lo
= NFS_I(lgp
->args
.inode
)->layout
;
1564 struct nfs4_layoutget_res
*res
= &lgp
->res
;
1565 struct pnfs_layout_segment
*lseg
;
1566 struct inode
*ino
= lo
->plh_inode
;
1570 /* Inject layout blob into I/O device driver */
1571 lseg
= NFS_SERVER(ino
)->pnfs_curr_ld
->alloc_lseg(lo
, res
, lgp
->gfp_flags
);
1572 if (!lseg
|| IS_ERR(lseg
)) {
1576 status
= PTR_ERR(lseg
);
1577 dprintk("%s: Could not allocate layout: error %d\n",
1582 init_lseg(lo
, lseg
);
1583 lseg
->pls_range
= res
->range
;
1585 spin_lock(&ino
->i_lock
);
1586 if (test_bit(NFS_LAYOUT_BULK_RECALL
, &lo
->plh_flags
)) {
1587 dprintk("%s forget reply due to recall\n", __func__
);
1588 goto out_forget_reply
;
1591 if (pnfs_layoutgets_blocked(lo
, &lgp
->args
.range
, 1)) {
1592 dprintk("%s forget reply due to state\n", __func__
);
1593 goto out_forget_reply
;
1596 if (nfs4_stateid_match_other(&lo
->plh_stateid
, &res
->stateid
)) {
1597 /* existing state ID, make sure the sequence number matches. */
1598 if (pnfs_layout_stateid_blocked(lo
, &res
->stateid
)) {
1599 dprintk("%s forget reply due to sequence\n", __func__
);
1600 goto out_forget_reply
;
1602 pnfs_set_layout_stateid(lo
, &res
->stateid
, false);
1605 * We got an entirely new state ID. Mark all segments for the
1606 * inode invalid, and don't bother validating the stateid
1609 pnfs_mark_matching_lsegs_invalid(lo
, &free_me
, NULL
);
1611 nfs4_stateid_copy(&lo
->plh_stateid
, &res
->stateid
);
1612 lo
->plh_barrier
= be32_to_cpu(res
->stateid
.seqid
);
1615 clear_bit(NFS_LAYOUT_INVALID_STID
, &lo
->plh_flags
);
1617 pnfs_get_lseg(lseg
);
1618 pnfs_layout_insert_lseg(lo
, lseg
);
1620 if (res
->return_on_close
) {
1621 set_bit(NFS_LSEG_ROC
, &lseg
->pls_flags
);
1622 set_bit(NFS_LAYOUT_ROC
, &lo
->plh_flags
);
1625 spin_unlock(&ino
->i_lock
);
1626 pnfs_free_lseg_list(&free_me
);
1629 return ERR_PTR(status
);
1632 spin_unlock(&ino
->i_lock
);
1633 lseg
->pls_layout
= lo
;
1634 NFS_SERVER(ino
)->pnfs_curr_ld
->free_lseg(lseg
);
1639 pnfs_mark_matching_lsegs_return(struct pnfs_layout_hdr
*lo
,
1640 struct list_head
*tmp_list
,
1641 struct pnfs_layout_range
*return_range
)
1643 struct pnfs_layout_segment
*lseg
, *next
;
1645 dprintk("%s:Begin lo %p\n", __func__
, lo
);
1647 if (list_empty(&lo
->plh_segs
))
1650 list_for_each_entry_safe(lseg
, next
, &lo
->plh_segs
, pls_list
)
1651 if (should_free_lseg(&lseg
->pls_range
, return_range
)) {
1652 dprintk("%s: marking lseg %p iomode %d "
1653 "offset %llu length %llu\n", __func__
,
1654 lseg
, lseg
->pls_range
.iomode
,
1655 lseg
->pls_range
.offset
,
1656 lseg
->pls_range
.length
);
1657 set_bit(NFS_LSEG_LAYOUTRETURN
, &lseg
->pls_flags
);
1658 mark_lseg_invalid(lseg
, tmp_list
);
1662 void pnfs_error_mark_layout_for_return(struct inode
*inode
,
1663 struct pnfs_layout_segment
*lseg
)
1665 struct pnfs_layout_hdr
*lo
= NFS_I(inode
)->layout
;
1666 int iomode
= pnfs_iomode_to_fail_bit(lseg
->pls_range
.iomode
);
1667 struct pnfs_layout_range range
= {
1668 .iomode
= lseg
->pls_range
.iomode
,
1670 .length
= NFS4_MAX_UINT64
,
1674 spin_lock(&inode
->i_lock
);
1675 /* set failure bit so that pnfs path will be retried later */
1676 pnfs_layout_set_fail_bit(lo
, iomode
);
1677 set_bit(NFS_LAYOUT_RETURN
, &lo
->plh_flags
);
1678 if (lo
->plh_return_iomode
== 0)
1679 lo
->plh_return_iomode
= range
.iomode
;
1680 else if (lo
->plh_return_iomode
!= range
.iomode
)
1681 lo
->plh_return_iomode
= IOMODE_ANY
;
1683 * mark all matching lsegs so that we are sure to have no live
1684 * segments at hand when sending layoutreturn. See pnfs_put_lseg()
1687 pnfs_mark_matching_lsegs_return(lo
, &free_me
, &range
);
1688 spin_unlock(&inode
->i_lock
);
1689 pnfs_free_lseg_list(&free_me
);
1691 EXPORT_SYMBOL_GPL(pnfs_error_mark_layout_for_return
);
1694 pnfs_generic_pg_init_read(struct nfs_pageio_descriptor
*pgio
, struct nfs_page
*req
)
1696 u64 rd_size
= req
->wb_bytes
;
1698 WARN_ON_ONCE(pgio
->pg_lseg
!= NULL
);
1700 if (pgio
->pg_dreq
== NULL
)
1701 rd_size
= i_size_read(pgio
->pg_inode
) - req_offset(req
);
1703 rd_size
= nfs_dreq_bytes_left(pgio
->pg_dreq
);
1705 pgio
->pg_lseg
= pnfs_update_layout(pgio
->pg_inode
,
1711 /* If no lseg, fall back to read through mds */
1712 if (pgio
->pg_lseg
== NULL
)
1713 nfs_pageio_reset_read_mds(pgio
);
1716 EXPORT_SYMBOL_GPL(pnfs_generic_pg_init_read
);
1719 pnfs_generic_pg_init_write(struct nfs_pageio_descriptor
*pgio
,
1720 struct nfs_page
*req
, u64 wb_size
)
1722 WARN_ON_ONCE(pgio
->pg_lseg
!= NULL
);
1724 pgio
->pg_lseg
= pnfs_update_layout(pgio
->pg_inode
,
1730 /* If no lseg, fall back to write through mds */
1731 if (pgio
->pg_lseg
== NULL
)
1732 nfs_pageio_reset_write_mds(pgio
);
1734 EXPORT_SYMBOL_GPL(pnfs_generic_pg_init_write
);
1737 pnfs_generic_pg_cleanup(struct nfs_pageio_descriptor
*desc
)
1739 if (desc
->pg_lseg
) {
1740 pnfs_put_lseg(desc
->pg_lseg
);
1741 desc
->pg_lseg
= NULL
;
1744 EXPORT_SYMBOL_GPL(pnfs_generic_pg_cleanup
);
1747 * Return 0 if @req cannot be coalesced into @pgio, otherwise return the number
1748 * of bytes (maximum @req->wb_bytes) that can be coalesced.
1751 pnfs_generic_pg_test(struct nfs_pageio_descriptor
*pgio
,
1752 struct nfs_page
*prev
, struct nfs_page
*req
)
1755 u64 seg_end
, req_start
, seg_left
;
1757 size
= nfs_generic_pg_test(pgio
, prev
, req
);
1762 * 'size' contains the number of bytes left in the current page (up
1763 * to the original size asked for in @req->wb_bytes).
1765 * Calculate how many bytes are left in the layout segment
1766 * and if there are less bytes than 'size', return that instead.
1768 * Please also note that 'end_offset' is actually the offset of the
1769 * first byte that lies outside the pnfs_layout_range. FIXME?
1772 if (pgio
->pg_lseg
) {
1773 seg_end
= end_offset(pgio
->pg_lseg
->pls_range
.offset
,
1774 pgio
->pg_lseg
->pls_range
.length
);
1775 req_start
= req_offset(req
);
1776 WARN_ON_ONCE(req_start
> seg_end
);
1777 /* start of request is past the last byte of this segment */
1778 if (req_start
>= seg_end
)
1781 /* adjust 'size' iff there are fewer bytes left in the
1782 * segment than what nfs_generic_pg_test returned */
1783 seg_left
= seg_end
- req_start
;
1784 if (seg_left
< size
)
1785 size
= (unsigned int)seg_left
;
1790 EXPORT_SYMBOL_GPL(pnfs_generic_pg_test
);
1792 int pnfs_write_done_resend_to_mds(struct nfs_pgio_header
*hdr
)
1794 struct nfs_pageio_descriptor pgio
;
1796 /* Resend all requests through the MDS */
1797 nfs_pageio_init_write(&pgio
, hdr
->inode
, FLUSH_STABLE
, true,
1798 hdr
->completion_ops
);
1799 return nfs_pageio_resend(&pgio
, hdr
);
1801 EXPORT_SYMBOL_GPL(pnfs_write_done_resend_to_mds
);
1803 static void pnfs_ld_handle_write_error(struct nfs_pgio_header
*hdr
)
1806 dprintk("pnfs write error = %d\n", hdr
->pnfs_error
);
1807 if (NFS_SERVER(hdr
->inode
)->pnfs_curr_ld
->flags
&
1808 PNFS_LAYOUTRET_ON_ERROR
) {
1809 pnfs_return_layout(hdr
->inode
);
1811 if (!test_and_set_bit(NFS_IOHDR_REDO
, &hdr
->flags
))
1812 hdr
->task
.tk_status
= pnfs_write_done_resend_to_mds(hdr
);
1816 * Called by non rpc-based layout drivers
1818 void pnfs_ld_write_done(struct nfs_pgio_header
*hdr
)
1820 trace_nfs4_pnfs_write(hdr
, hdr
->pnfs_error
);
1821 if (!hdr
->pnfs_error
) {
1822 pnfs_set_layoutcommit(hdr
);
1823 hdr
->mds_ops
->rpc_call_done(&hdr
->task
, hdr
);
1825 pnfs_ld_handle_write_error(hdr
);
1826 hdr
->mds_ops
->rpc_release(hdr
);
1828 EXPORT_SYMBOL_GPL(pnfs_ld_write_done
);
1831 pnfs_write_through_mds(struct nfs_pageio_descriptor
*desc
,
1832 struct nfs_pgio_header
*hdr
)
1834 struct nfs_pgio_mirror
*mirror
= nfs_pgio_current_mirror(desc
);
1836 if (!test_and_set_bit(NFS_IOHDR_REDO
, &hdr
->flags
)) {
1837 list_splice_tail_init(&hdr
->pages
, &mirror
->pg_list
);
1838 nfs_pageio_reset_write_mds(desc
);
1839 mirror
->pg_recoalesce
= 1;
1841 nfs_pgio_data_destroy(hdr
);
1844 static enum pnfs_try_status
1845 pnfs_try_to_write_data(struct nfs_pgio_header
*hdr
,
1846 const struct rpc_call_ops
*call_ops
,
1847 struct pnfs_layout_segment
*lseg
,
1850 struct inode
*inode
= hdr
->inode
;
1851 enum pnfs_try_status trypnfs
;
1852 struct nfs_server
*nfss
= NFS_SERVER(inode
);
1854 hdr
->mds_ops
= call_ops
;
1856 dprintk("%s: Writing ino:%lu %u@%llu (how %d)\n", __func__
,
1857 inode
->i_ino
, hdr
->args
.count
, hdr
->args
.offset
, how
);
1858 trypnfs
= nfss
->pnfs_curr_ld
->write_pagelist(hdr
, how
);
1859 if (trypnfs
!= PNFS_NOT_ATTEMPTED
)
1860 nfs_inc_stats(inode
, NFSIOS_PNFS_WRITE
);
1861 dprintk("%s End (trypnfs:%d)\n", __func__
, trypnfs
);
1866 pnfs_do_write(struct nfs_pageio_descriptor
*desc
,
1867 struct nfs_pgio_header
*hdr
, int how
)
1869 const struct rpc_call_ops
*call_ops
= desc
->pg_rpc_callops
;
1870 struct pnfs_layout_segment
*lseg
= desc
->pg_lseg
;
1871 enum pnfs_try_status trypnfs
;
1873 trypnfs
= pnfs_try_to_write_data(hdr
, call_ops
, lseg
, how
);
1874 if (trypnfs
== PNFS_NOT_ATTEMPTED
)
1875 pnfs_write_through_mds(desc
, hdr
);
1878 static void pnfs_writehdr_free(struct nfs_pgio_header
*hdr
)
1880 pnfs_put_lseg(hdr
->lseg
);
1881 nfs_pgio_header_free(hdr
);
1883 EXPORT_SYMBOL_GPL(pnfs_writehdr_free
);
1886 pnfs_generic_pg_writepages(struct nfs_pageio_descriptor
*desc
)
1888 struct nfs_pgio_mirror
*mirror
= nfs_pgio_current_mirror(desc
);
1890 struct nfs_pgio_header
*hdr
;
1893 hdr
= nfs_pgio_header_alloc(desc
->pg_rw_ops
);
1895 desc
->pg_completion_ops
->error_cleanup(&mirror
->pg_list
);
1898 nfs_pgheader_init(desc
, hdr
, pnfs_writehdr_free
);
1900 hdr
->lseg
= pnfs_get_lseg(desc
->pg_lseg
);
1901 ret
= nfs_generic_pgio(desc
, hdr
);
1903 pnfs_do_write(desc
, hdr
, desc
->pg_ioflags
);
1907 EXPORT_SYMBOL_GPL(pnfs_generic_pg_writepages
);
1909 int pnfs_read_done_resend_to_mds(struct nfs_pgio_header
*hdr
)
1911 struct nfs_pageio_descriptor pgio
;
1913 /* Resend all requests through the MDS */
1914 nfs_pageio_init_read(&pgio
, hdr
->inode
, true, hdr
->completion_ops
);
1915 return nfs_pageio_resend(&pgio
, hdr
);
1917 EXPORT_SYMBOL_GPL(pnfs_read_done_resend_to_mds
);
1919 static void pnfs_ld_handle_read_error(struct nfs_pgio_header
*hdr
)
1921 dprintk("pnfs read error = %d\n", hdr
->pnfs_error
);
1922 if (NFS_SERVER(hdr
->inode
)->pnfs_curr_ld
->flags
&
1923 PNFS_LAYOUTRET_ON_ERROR
) {
1924 pnfs_return_layout(hdr
->inode
);
1926 if (!test_and_set_bit(NFS_IOHDR_REDO
, &hdr
->flags
))
1927 hdr
->task
.tk_status
= pnfs_read_done_resend_to_mds(hdr
);
1931 * Called by non rpc-based layout drivers
1933 void pnfs_ld_read_done(struct nfs_pgio_header
*hdr
)
1935 trace_nfs4_pnfs_read(hdr
, hdr
->pnfs_error
);
1936 if (likely(!hdr
->pnfs_error
)) {
1937 __nfs4_read_done_cb(hdr
);
1938 hdr
->mds_ops
->rpc_call_done(&hdr
->task
, hdr
);
1940 pnfs_ld_handle_read_error(hdr
);
1941 hdr
->mds_ops
->rpc_release(hdr
);
1943 EXPORT_SYMBOL_GPL(pnfs_ld_read_done
);
1946 pnfs_read_through_mds(struct nfs_pageio_descriptor
*desc
,
1947 struct nfs_pgio_header
*hdr
)
1949 struct nfs_pgio_mirror
*mirror
= nfs_pgio_current_mirror(desc
);
1951 if (!test_and_set_bit(NFS_IOHDR_REDO
, &hdr
->flags
)) {
1952 list_splice_tail_init(&hdr
->pages
, &mirror
->pg_list
);
1953 nfs_pageio_reset_read_mds(desc
);
1954 mirror
->pg_recoalesce
= 1;
1956 nfs_pgio_data_destroy(hdr
);
1960 * Call the appropriate parallel I/O subsystem read function.
1962 static enum pnfs_try_status
1963 pnfs_try_to_read_data(struct nfs_pgio_header
*hdr
,
1964 const struct rpc_call_ops
*call_ops
,
1965 struct pnfs_layout_segment
*lseg
)
1967 struct inode
*inode
= hdr
->inode
;
1968 struct nfs_server
*nfss
= NFS_SERVER(inode
);
1969 enum pnfs_try_status trypnfs
;
1971 hdr
->mds_ops
= call_ops
;
1973 dprintk("%s: Reading ino:%lu %u@%llu\n",
1974 __func__
, inode
->i_ino
, hdr
->args
.count
, hdr
->args
.offset
);
1976 trypnfs
= nfss
->pnfs_curr_ld
->read_pagelist(hdr
);
1977 if (trypnfs
!= PNFS_NOT_ATTEMPTED
)
1978 nfs_inc_stats(inode
, NFSIOS_PNFS_READ
);
1979 dprintk("%s End (trypnfs:%d)\n", __func__
, trypnfs
);
1983 /* Resend all requests through pnfs. */
1984 int pnfs_read_resend_pnfs(struct nfs_pgio_header
*hdr
)
1986 struct nfs_pageio_descriptor pgio
;
1988 nfs_pageio_init_read(&pgio
, hdr
->inode
, false, hdr
->completion_ops
);
1989 return nfs_pageio_resend(&pgio
, hdr
);
1991 EXPORT_SYMBOL_GPL(pnfs_read_resend_pnfs
);
1994 pnfs_do_read(struct nfs_pageio_descriptor
*desc
, struct nfs_pgio_header
*hdr
)
1996 const struct rpc_call_ops
*call_ops
= desc
->pg_rpc_callops
;
1997 struct pnfs_layout_segment
*lseg
= desc
->pg_lseg
;
1998 enum pnfs_try_status trypnfs
;
2001 trypnfs
= pnfs_try_to_read_data(hdr
, call_ops
, lseg
);
2002 if (trypnfs
== PNFS_TRY_AGAIN
)
2003 err
= pnfs_read_resend_pnfs(hdr
);
2004 if (trypnfs
== PNFS_NOT_ATTEMPTED
|| err
)
2005 pnfs_read_through_mds(desc
, hdr
);
2008 static void pnfs_readhdr_free(struct nfs_pgio_header
*hdr
)
2010 pnfs_put_lseg(hdr
->lseg
);
2011 nfs_pgio_header_free(hdr
);
2013 EXPORT_SYMBOL_GPL(pnfs_readhdr_free
);
2016 pnfs_generic_pg_readpages(struct nfs_pageio_descriptor
*desc
)
2018 struct nfs_pgio_mirror
*mirror
= nfs_pgio_current_mirror(desc
);
2020 struct nfs_pgio_header
*hdr
;
2023 hdr
= nfs_pgio_header_alloc(desc
->pg_rw_ops
);
2025 desc
->pg_completion_ops
->error_cleanup(&mirror
->pg_list
);
2028 nfs_pgheader_init(desc
, hdr
, pnfs_readhdr_free
);
2029 hdr
->lseg
= pnfs_get_lseg(desc
->pg_lseg
);
2030 ret
= nfs_generic_pgio(desc
, hdr
);
2032 pnfs_do_read(desc
, hdr
);
2035 EXPORT_SYMBOL_GPL(pnfs_generic_pg_readpages
);
2037 static void pnfs_clear_layoutcommitting(struct inode
*inode
)
2039 unsigned long *bitlock
= &NFS_I(inode
)->flags
;
2041 clear_bit_unlock(NFS_INO_LAYOUTCOMMITTING
, bitlock
);
2042 smp_mb__after_atomic();
2043 wake_up_bit(bitlock
, NFS_INO_LAYOUTCOMMITTING
);
2047 * There can be multiple RW segments.
2049 static void pnfs_list_write_lseg(struct inode
*inode
, struct list_head
*listp
)
2051 struct pnfs_layout_segment
*lseg
;
2053 list_for_each_entry(lseg
, &NFS_I(inode
)->layout
->plh_segs
, pls_list
) {
2054 if (lseg
->pls_range
.iomode
== IOMODE_RW
&&
2055 test_and_clear_bit(NFS_LSEG_LAYOUTCOMMIT
, &lseg
->pls_flags
))
2056 list_add(&lseg
->pls_lc_list
, listp
);
2060 static void pnfs_list_write_lseg_done(struct inode
*inode
, struct list_head
*listp
)
2062 struct pnfs_layout_segment
*lseg
, *tmp
;
2064 /* Matched by references in pnfs_set_layoutcommit */
2065 list_for_each_entry_safe(lseg
, tmp
, listp
, pls_lc_list
) {
2066 list_del_init(&lseg
->pls_lc_list
);
2067 pnfs_put_lseg(lseg
);
2070 pnfs_clear_layoutcommitting(inode
);
2073 void pnfs_set_lo_fail(struct pnfs_layout_segment
*lseg
)
2075 pnfs_layout_io_set_failed(lseg
->pls_layout
, lseg
->pls_range
.iomode
);
2077 EXPORT_SYMBOL_GPL(pnfs_set_lo_fail
);
2080 pnfs_set_layoutcommit(struct nfs_pgio_header
*hdr
)
2082 struct inode
*inode
= hdr
->inode
;
2083 struct nfs_inode
*nfsi
= NFS_I(inode
);
2084 loff_t end_pos
= hdr
->mds_offset
+ hdr
->res
.count
;
2085 bool mark_as_dirty
= false;
2087 spin_lock(&inode
->i_lock
);
2088 if (!test_and_set_bit(NFS_INO_LAYOUTCOMMIT
, &nfsi
->flags
)) {
2089 mark_as_dirty
= true;
2090 dprintk("%s: Set layoutcommit for inode %lu ",
2091 __func__
, inode
->i_ino
);
2093 if (!test_and_set_bit(NFS_LSEG_LAYOUTCOMMIT
, &hdr
->lseg
->pls_flags
)) {
2094 /* references matched in nfs4_layoutcommit_release */
2095 pnfs_get_lseg(hdr
->lseg
);
2097 if (end_pos
> nfsi
->layout
->plh_lwb
)
2098 nfsi
->layout
->plh_lwb
= end_pos
;
2099 spin_unlock(&inode
->i_lock
);
2100 dprintk("%s: lseg %p end_pos %llu\n",
2101 __func__
, hdr
->lseg
, nfsi
->layout
->plh_lwb
);
2103 /* if pnfs_layoutcommit_inode() runs between inode locks, the next one
2104 * will be a noop because NFS_INO_LAYOUTCOMMIT will not be set */
2106 mark_inode_dirty_sync(inode
);
2108 EXPORT_SYMBOL_GPL(pnfs_set_layoutcommit
);
2110 void pnfs_commit_set_layoutcommit(struct nfs_commit_data
*data
)
2112 struct inode
*inode
= data
->inode
;
2113 struct nfs_inode
*nfsi
= NFS_I(inode
);
2114 bool mark_as_dirty
= false;
2116 spin_lock(&inode
->i_lock
);
2117 if (!test_and_set_bit(NFS_INO_LAYOUTCOMMIT
, &nfsi
->flags
)) {
2118 mark_as_dirty
= true;
2119 dprintk("%s: Set layoutcommit for inode %lu ",
2120 __func__
, inode
->i_ino
);
2122 if (!test_and_set_bit(NFS_LSEG_LAYOUTCOMMIT
, &data
->lseg
->pls_flags
)) {
2123 /* references matched in nfs4_layoutcommit_release */
2124 pnfs_get_lseg(data
->lseg
);
2126 if (data
->lwb
> nfsi
->layout
->plh_lwb
)
2127 nfsi
->layout
->plh_lwb
= data
->lwb
;
2128 spin_unlock(&inode
->i_lock
);
2129 dprintk("%s: lseg %p end_pos %llu\n",
2130 __func__
, data
->lseg
, nfsi
->layout
->plh_lwb
);
2132 /* if pnfs_layoutcommit_inode() runs between inode locks, the next one
2133 * will be a noop because NFS_INO_LAYOUTCOMMIT will not be set */
2135 mark_inode_dirty_sync(inode
);
2137 EXPORT_SYMBOL_GPL(pnfs_commit_set_layoutcommit
);
2139 void pnfs_cleanup_layoutcommit(struct nfs4_layoutcommit_data
*data
)
2141 struct nfs_server
*nfss
= NFS_SERVER(data
->args
.inode
);
2143 if (nfss
->pnfs_curr_ld
->cleanup_layoutcommit
)
2144 nfss
->pnfs_curr_ld
->cleanup_layoutcommit(data
);
2145 pnfs_list_write_lseg_done(data
->args
.inode
, &data
->lseg_list
);
2149 * For the LAYOUT4_NFSV4_1_FILES layout type, NFS_DATA_SYNC WRITEs and
2150 * NFS_UNSTABLE WRITEs with a COMMIT to data servers must store enough
2151 * data to disk to allow the server to recover the data if it crashes.
2152 * LAYOUTCOMMIT is only needed when the NFL4_UFLG_COMMIT_THRU_MDS flag
2153 * is off, and a COMMIT is sent to a data server, or
2154 * if WRITEs to a data server return NFS_DATA_SYNC.
2157 pnfs_layoutcommit_inode(struct inode
*inode
, bool sync
)
2159 struct pnfs_layoutdriver_type
*ld
= NFS_SERVER(inode
)->pnfs_curr_ld
;
2160 struct nfs4_layoutcommit_data
*data
;
2161 struct nfs_inode
*nfsi
= NFS_I(inode
);
2165 if (!pnfs_layoutcommit_outstanding(inode
))
2168 dprintk("--> %s inode %lu\n", __func__
, inode
->i_ino
);
2171 if (test_and_set_bit(NFS_INO_LAYOUTCOMMITTING
, &nfsi
->flags
)) {
2174 status
= wait_on_bit_lock_action(&nfsi
->flags
,
2175 NFS_INO_LAYOUTCOMMITTING
,
2176 nfs_wait_bit_killable
,
2183 /* Note kzalloc ensures data->res.seq_res.sr_slot == NULL */
2184 data
= kzalloc(sizeof(*data
), GFP_NOFS
);
2186 goto clear_layoutcommitting
;
2189 spin_lock(&inode
->i_lock
);
2190 if (!test_and_clear_bit(NFS_INO_LAYOUTCOMMIT
, &nfsi
->flags
))
2193 INIT_LIST_HEAD(&data
->lseg_list
);
2194 pnfs_list_write_lseg(inode
, &data
->lseg_list
);
2196 end_pos
= nfsi
->layout
->plh_lwb
;
2197 nfsi
->layout
->plh_lwb
= 0;
2199 nfs4_stateid_copy(&data
->args
.stateid
, &nfsi
->layout
->plh_stateid
);
2200 spin_unlock(&inode
->i_lock
);
2202 data
->args
.inode
= inode
;
2203 data
->cred
= get_rpccred(nfsi
->layout
->plh_lc_cred
);
2204 nfs_fattr_init(&data
->fattr
);
2205 data
->args
.bitmask
= NFS_SERVER(inode
)->cache_consistency_bitmask
;
2206 data
->res
.fattr
= &data
->fattr
;
2207 data
->args
.lastbytewritten
= end_pos
- 1;
2208 data
->res
.server
= NFS_SERVER(inode
);
2210 if (ld
->prepare_layoutcommit
) {
2211 status
= ld
->prepare_layoutcommit(&data
->args
);
2213 spin_lock(&inode
->i_lock
);
2214 if (end_pos
< nfsi
->layout
->plh_lwb
)
2215 nfsi
->layout
->plh_lwb
= end_pos
;
2216 spin_unlock(&inode
->i_lock
);
2217 put_rpccred(data
->cred
);
2218 set_bit(NFS_INO_LAYOUTCOMMIT
, &nfsi
->flags
);
2219 goto clear_layoutcommitting
;
2224 status
= nfs4_proc_layoutcommit(data
, sync
);
2227 mark_inode_dirty_sync(inode
);
2228 dprintk("<-- %s status %d\n", __func__
, status
);
2231 spin_unlock(&inode
->i_lock
);
2233 clear_layoutcommitting
:
2234 pnfs_clear_layoutcommitting(inode
);
2237 EXPORT_SYMBOL_GPL(pnfs_layoutcommit_inode
);
2239 struct nfs4_threshold
*pnfs_mdsthreshold_alloc(void)
2241 struct nfs4_threshold
*thp
;
2243 thp
= kzalloc(sizeof(*thp
), GFP_NOFS
);
2245 dprintk("%s mdsthreshold allocation failed\n", __func__
);