2 * Module for pnfs flexfile layout driver.
4 * Copyright (c) 2014, Primary Data, Inc. All rights reserved.
6 * Tao Peng <bergwolf@primarydata.com>
9 #include <linux/nfs_fs.h>
10 #include <linux/nfs_page.h>
11 #include <linux/module.h>
13 #include <linux/sunrpc/metrics.h>
15 #include "flexfilelayout.h"
16 #include "../nfs4session.h"
17 #include "../nfs4idmap.h"
18 #include "../internal.h"
19 #include "../delegation.h"
20 #include "../nfs4trace.h"
21 #include "../iostat.h"
25 #define NFSDBG_FACILITY NFSDBG_PNFS_LD
27 #define FF_LAYOUT_POLL_RETRY_MAX (15*HZ)
29 static struct group_info
*ff_zero_group
;
31 static void ff_layout_read_record_layoutstats_done(struct rpc_task
*task
,
32 struct nfs_pgio_header
*hdr
);
34 static struct pnfs_layout_hdr
*
35 ff_layout_alloc_layout_hdr(struct inode
*inode
, gfp_t gfp_flags
)
37 struct nfs4_flexfile_layout
*ffl
;
39 ffl
= kzalloc(sizeof(*ffl
), gfp_flags
);
41 INIT_LIST_HEAD(&ffl
->error_list
);
42 INIT_LIST_HEAD(&ffl
->mirrors
);
43 ffl
->last_report_time
= ktime_get();
44 return &ffl
->generic_hdr
;
50 ff_layout_free_layout_hdr(struct pnfs_layout_hdr
*lo
)
52 struct nfs4_ff_layout_ds_err
*err
, *n
;
54 list_for_each_entry_safe(err
, n
, &FF_LAYOUT_FROM_HDR(lo
)->error_list
,
59 kfree(FF_LAYOUT_FROM_HDR(lo
));
62 static int decode_pnfs_stateid(struct xdr_stream
*xdr
, nfs4_stateid
*stateid
)
66 p
= xdr_inline_decode(xdr
, NFS4_STATEID_SIZE
);
67 if (unlikely(p
== NULL
))
69 stateid
->type
= NFS4_PNFS_DS_STATEID_TYPE
;
70 memcpy(stateid
->data
, p
, NFS4_STATEID_SIZE
);
71 dprintk("%s: stateid id= [%x%x%x%x]\n", __func__
,
72 p
[0], p
[1], p
[2], p
[3]);
76 static int decode_deviceid(struct xdr_stream
*xdr
, struct nfs4_deviceid
*devid
)
80 p
= xdr_inline_decode(xdr
, NFS4_DEVICEID4_SIZE
);
83 memcpy(devid
, p
, NFS4_DEVICEID4_SIZE
);
84 nfs4_print_deviceid(devid
);
88 static int decode_nfs_fh(struct xdr_stream
*xdr
, struct nfs_fh
*fh
)
92 p
= xdr_inline_decode(xdr
, 4);
95 fh
->size
= be32_to_cpup(p
++);
96 if (fh
->size
> sizeof(struct nfs_fh
)) {
97 printk(KERN_ERR
"NFS flexfiles: Too big fh received %d\n",
102 p
= xdr_inline_decode(xdr
, fh
->size
);
105 memcpy(&fh
->data
, p
, fh
->size
);
106 dprintk("%s: fh len %d\n", __func__
, fh
->size
);
112 * Currently only stringified uids and gids are accepted.
113 * I.e., kerberos is not supported to the DSes, so no pricipals.
115 * That means that one common function will suffice, but when
116 * principals are added, this should be split to accomodate
117 * calls to both nfs_map_name_to_uid() and nfs_map_group_to_gid().
120 decode_name(struct xdr_stream
*xdr
, u32
*id
)
125 /* opaque_length(4)*/
126 p
= xdr_inline_decode(xdr
, 4);
129 len
= be32_to_cpup(p
++);
133 dprintk("%s: len %u\n", __func__
, len
);
136 p
= xdr_inline_decode(xdr
, len
);
140 if (!nfs_map_string_to_numeric((char *)p
, len
, id
))
146 static bool ff_mirror_match_fh(const struct nfs4_ff_layout_mirror
*m1
,
147 const struct nfs4_ff_layout_mirror
*m2
)
151 if (m1
->fh_versions_cnt
!= m2
->fh_versions_cnt
)
153 for (i
= 0; i
< m1
->fh_versions_cnt
; i
++) {
154 bool found_fh
= false;
155 for (j
= 0; j
< m2
->fh_versions_cnt
; j
++) {
156 if (nfs_compare_fh(&m1
->fh_versions
[i
],
157 &m2
->fh_versions
[j
]) == 0) {
168 static struct nfs4_ff_layout_mirror
*
169 ff_layout_add_mirror(struct pnfs_layout_hdr
*lo
,
170 struct nfs4_ff_layout_mirror
*mirror
)
172 struct nfs4_flexfile_layout
*ff_layout
= FF_LAYOUT_FROM_HDR(lo
);
173 struct nfs4_ff_layout_mirror
*pos
;
174 struct inode
*inode
= lo
->plh_inode
;
176 spin_lock(&inode
->i_lock
);
177 list_for_each_entry(pos
, &ff_layout
->mirrors
, mirrors
) {
178 if (mirror
->mirror_ds
!= pos
->mirror_ds
)
180 if (!ff_mirror_match_fh(mirror
, pos
))
182 if (atomic_inc_not_zero(&pos
->ref
)) {
183 spin_unlock(&inode
->i_lock
);
187 list_add(&mirror
->mirrors
, &ff_layout
->mirrors
);
189 spin_unlock(&inode
->i_lock
);
194 ff_layout_remove_mirror(struct nfs4_ff_layout_mirror
*mirror
)
197 if (mirror
->layout
== NULL
)
199 inode
= mirror
->layout
->plh_inode
;
200 spin_lock(&inode
->i_lock
);
201 list_del(&mirror
->mirrors
);
202 spin_unlock(&inode
->i_lock
);
203 mirror
->layout
= NULL
;
206 static struct nfs4_ff_layout_mirror
*ff_layout_alloc_mirror(gfp_t gfp_flags
)
208 struct nfs4_ff_layout_mirror
*mirror
;
210 mirror
= kzalloc(sizeof(*mirror
), gfp_flags
);
211 if (mirror
!= NULL
) {
212 spin_lock_init(&mirror
->lock
);
213 atomic_set(&mirror
->ref
, 1);
214 INIT_LIST_HEAD(&mirror
->mirrors
);
219 static void ff_layout_free_mirror(struct nfs4_ff_layout_mirror
*mirror
)
221 struct rpc_cred
*cred
;
223 ff_layout_remove_mirror(mirror
);
224 kfree(mirror
->fh_versions
);
225 cred
= rcu_access_pointer(mirror
->ro_cred
);
228 cred
= rcu_access_pointer(mirror
->rw_cred
);
231 nfs4_ff_layout_put_deviceid(mirror
->mirror_ds
);
235 static void ff_layout_put_mirror(struct nfs4_ff_layout_mirror
*mirror
)
237 if (mirror
!= NULL
&& atomic_dec_and_test(&mirror
->ref
))
238 ff_layout_free_mirror(mirror
);
241 static void ff_layout_free_mirror_array(struct nfs4_ff_layout_segment
*fls
)
245 if (fls
->mirror_array
) {
246 for (i
= 0; i
< fls
->mirror_array_cnt
; i
++) {
247 /* normally mirror_ds is freed in
248 * .free_deviceid_node but we still do it here
249 * for .alloc_lseg error path */
250 ff_layout_put_mirror(fls
->mirror_array
[i
]);
252 kfree(fls
->mirror_array
);
253 fls
->mirror_array
= NULL
;
257 static int ff_layout_check_layout(struct nfs4_layoutget_res
*lgr
)
261 dprintk("--> %s\n", __func__
);
263 /* FIXME: remove this check when layout segment support is added */
264 if (lgr
->range
.offset
!= 0 ||
265 lgr
->range
.length
!= NFS4_MAX_UINT64
) {
266 dprintk("%s Only whole file layouts supported. Use MDS i/o\n",
271 dprintk("--> %s returns %d\n", __func__
, ret
);
275 static void _ff_layout_free_lseg(struct nfs4_ff_layout_segment
*fls
)
278 ff_layout_free_mirror_array(fls
);
284 ff_lseg_range_is_after(const struct pnfs_layout_range
*l1
,
285 const struct pnfs_layout_range
*l2
)
289 if (l1
->iomode
!= l2
->iomode
)
290 return l1
->iomode
!= IOMODE_READ
;
291 end1
= pnfs_calc_offset_end(l1
->offset
, l1
->length
);
292 end2
= pnfs_calc_offset_end(l2
->offset
, l2
->length
);
293 if (end1
< l2
->offset
)
295 if (end2
< l1
->offset
)
297 return l2
->offset
<= l1
->offset
;
301 ff_lseg_merge(struct pnfs_layout_segment
*new,
302 struct pnfs_layout_segment
*old
)
304 u64 new_end
, old_end
;
306 if (test_bit(NFS_LSEG_LAYOUTRETURN
, &old
->pls_flags
))
308 if (new->pls_range
.iomode
!= old
->pls_range
.iomode
)
310 old_end
= pnfs_calc_offset_end(old
->pls_range
.offset
,
311 old
->pls_range
.length
);
312 if (old_end
< new->pls_range
.offset
)
314 new_end
= pnfs_calc_offset_end(new->pls_range
.offset
,
315 new->pls_range
.length
);
316 if (new_end
< old
->pls_range
.offset
)
319 /* Mergeable: copy info from 'old' to 'new' */
320 if (new_end
< old_end
)
322 if (new->pls_range
.offset
< old
->pls_range
.offset
)
323 new->pls_range
.offset
= old
->pls_range
.offset
;
324 new->pls_range
.length
= pnfs_calc_offset_length(new->pls_range
.offset
,
326 if (test_bit(NFS_LSEG_ROC
, &old
->pls_flags
))
327 set_bit(NFS_LSEG_ROC
, &new->pls_flags
);
332 ff_layout_add_lseg(struct pnfs_layout_hdr
*lo
,
333 struct pnfs_layout_segment
*lseg
,
334 struct list_head
*free_me
)
336 pnfs_generic_layout_insert_lseg(lo
, lseg
,
337 ff_lseg_range_is_after
,
342 static void ff_layout_sort_mirrors(struct nfs4_ff_layout_segment
*fls
)
346 for (i
= 0; i
< fls
->mirror_array_cnt
- 1; i
++) {
347 for (j
= i
+ 1; j
< fls
->mirror_array_cnt
; j
++)
348 if (fls
->mirror_array
[i
]->efficiency
<
349 fls
->mirror_array
[j
]->efficiency
)
350 swap(fls
->mirror_array
[i
],
351 fls
->mirror_array
[j
]);
355 static void ff_layout_mark_devices_valid(struct nfs4_ff_layout_segment
*fls
)
357 struct nfs4_deviceid_node
*node
;
360 if (!(fls
->flags
& FF_FLAGS_NO_IO_THRU_MDS
))
362 for (i
= 0; i
< fls
->mirror_array_cnt
; i
++) {
363 node
= &fls
->mirror_array
[i
]->mirror_ds
->id_node
;
364 clear_bit(NFS_DEVICEID_UNAVAILABLE
, &node
->flags
);
368 static struct pnfs_layout_segment
*
369 ff_layout_alloc_lseg(struct pnfs_layout_hdr
*lh
,
370 struct nfs4_layoutget_res
*lgr
,
373 struct pnfs_layout_segment
*ret
;
374 struct nfs4_ff_layout_segment
*fls
= NULL
;
375 struct xdr_stream stream
;
377 struct page
*scratch
;
379 u32 mirror_array_cnt
;
383 dprintk("--> %s\n", __func__
);
384 scratch
= alloc_page(gfp_flags
);
386 return ERR_PTR(-ENOMEM
);
388 xdr_init_decode_pages(&stream
, &buf
, lgr
->layoutp
->pages
,
390 xdr_set_scratch_buffer(&stream
, page_address(scratch
), PAGE_SIZE
);
392 /* stripe unit and mirror_array_cnt */
394 p
= xdr_inline_decode(&stream
, 8 + 4);
398 p
= xdr_decode_hyper(p
, &stripe_unit
);
399 mirror_array_cnt
= be32_to_cpup(p
++);
400 dprintk("%s: stripe_unit=%llu mirror_array_cnt=%u\n", __func__
,
401 stripe_unit
, mirror_array_cnt
);
403 if (mirror_array_cnt
> NFS4_FLEXFILE_LAYOUT_MAX_MIRROR_CNT
||
404 mirror_array_cnt
== 0)
408 fls
= kzalloc(sizeof(*fls
), gfp_flags
);
412 fls
->mirror_array_cnt
= mirror_array_cnt
;
413 fls
->stripe_unit
= stripe_unit
;
414 fls
->mirror_array
= kcalloc(fls
->mirror_array_cnt
,
415 sizeof(fls
->mirror_array
[0]), gfp_flags
);
416 if (fls
->mirror_array
== NULL
)
419 for (i
= 0; i
< fls
->mirror_array_cnt
; i
++) {
420 struct nfs4_ff_layout_mirror
*mirror
;
421 struct nfs4_deviceid devid
;
422 struct nfs4_deviceid_node
*idnode
;
423 struct auth_cred acred
= { .group_info
= ff_zero_group
};
424 struct rpc_cred __rcu
*cred
;
425 u32 ds_count
, fh_count
, id
;
429 p
= xdr_inline_decode(&stream
, 4);
432 ds_count
= be32_to_cpup(p
);
434 /* FIXME: allow for striping? */
438 fls
->mirror_array
[i
] = ff_layout_alloc_mirror(gfp_flags
);
439 if (fls
->mirror_array
[i
] == NULL
) {
444 fls
->mirror_array
[i
]->ds_count
= ds_count
;
447 rc
= decode_deviceid(&stream
, &devid
);
451 idnode
= nfs4_find_get_deviceid(NFS_SERVER(lh
->plh_inode
),
452 &devid
, lh
->plh_lc_cred
,
455 * upon success, mirror_ds is allocated by previous
456 * getdeviceinfo, or newly by .alloc_deviceid_node
457 * nfs4_find_get_deviceid failure is indeed getdeviceinfo falure
460 fls
->mirror_array
[i
]->mirror_ds
=
461 FF_LAYOUT_MIRROR_DS(idnode
);
467 p
= xdr_inline_decode(&stream
, 4);
470 fls
->mirror_array
[i
]->efficiency
= be32_to_cpup(p
);
473 rc
= decode_pnfs_stateid(&stream
, &fls
->mirror_array
[i
]->stateid
);
478 p
= xdr_inline_decode(&stream
, 4);
481 fh_count
= be32_to_cpup(p
);
483 fls
->mirror_array
[i
]->fh_versions
=
484 kzalloc(fh_count
* sizeof(struct nfs_fh
),
486 if (fls
->mirror_array
[i
]->fh_versions
== NULL
) {
491 for (j
= 0; j
< fh_count
; j
++) {
492 rc
= decode_nfs_fh(&stream
,
493 &fls
->mirror_array
[i
]->fh_versions
[j
]);
498 fls
->mirror_array
[i
]->fh_versions_cnt
= fh_count
;
501 rc
= decode_name(&stream
, &id
);
505 acred
.uid
= make_kuid(&init_user_ns
, id
);
508 rc
= decode_name(&stream
, &id
);
512 acred
.gid
= make_kgid(&init_user_ns
, id
);
514 /* find the cred for it */
515 rcu_assign_pointer(cred
, rpc_lookup_generic_cred(&acred
, 0, gfp_flags
));
521 if (lgr
->range
.iomode
== IOMODE_READ
)
522 rcu_assign_pointer(fls
->mirror_array
[i
]->ro_cred
, cred
);
524 rcu_assign_pointer(fls
->mirror_array
[i
]->rw_cred
, cred
);
526 mirror
= ff_layout_add_mirror(lh
, fls
->mirror_array
[i
]);
527 if (mirror
!= fls
->mirror_array
[i
]) {
528 /* swap cred ptrs so free_mirror will clean up old */
529 if (lgr
->range
.iomode
== IOMODE_READ
) {
530 cred
= xchg(&mirror
->ro_cred
, cred
);
531 rcu_assign_pointer(fls
->mirror_array
[i
]->ro_cred
, cred
);
533 cred
= xchg(&mirror
->rw_cred
, cred
);
534 rcu_assign_pointer(fls
->mirror_array
[i
]->rw_cred
, cred
);
536 ff_layout_free_mirror(fls
->mirror_array
[i
]);
537 fls
->mirror_array
[i
] = mirror
;
540 dprintk("%s: iomode %s uid %u gid %u\n", __func__
,
541 lgr
->range
.iomode
== IOMODE_READ
? "READ" : "RW",
542 from_kuid(&init_user_ns
, acred
.uid
),
543 from_kgid(&init_user_ns
, acred
.gid
));
546 p
= xdr_inline_decode(&stream
, 4);
548 goto out_sort_mirrors
;
549 fls
->flags
= be32_to_cpup(p
);
551 p
= xdr_inline_decode(&stream
, 4);
553 goto out_sort_mirrors
;
554 for (i
=0; i
< fls
->mirror_array_cnt
; i
++)
555 fls
->mirror_array
[i
]->report_interval
= be32_to_cpup(p
);
558 ff_layout_sort_mirrors(fls
);
559 rc
= ff_layout_check_layout(lgr
);
562 ff_layout_mark_devices_valid(fls
);
564 ret
= &fls
->generic_hdr
;
565 dprintk("<-- %s (success)\n", __func__
);
567 __free_page(scratch
);
570 _ff_layout_free_lseg(fls
);
572 dprintk("<-- %s (%d)\n", __func__
, rc
);
576 static bool ff_layout_has_rw_segments(struct pnfs_layout_hdr
*layout
)
578 struct pnfs_layout_segment
*lseg
;
580 list_for_each_entry(lseg
, &layout
->plh_segs
, pls_list
)
581 if (lseg
->pls_range
.iomode
== IOMODE_RW
)
588 ff_layout_free_lseg(struct pnfs_layout_segment
*lseg
)
590 struct nfs4_ff_layout_segment
*fls
= FF_LAYOUT_LSEG(lseg
);
592 dprintk("--> %s\n", __func__
);
594 if (lseg
->pls_range
.iomode
== IOMODE_RW
) {
595 struct nfs4_flexfile_layout
*ffl
;
598 ffl
= FF_LAYOUT_FROM_HDR(lseg
->pls_layout
);
599 inode
= ffl
->generic_hdr
.plh_inode
;
600 spin_lock(&inode
->i_lock
);
601 if (!ff_layout_has_rw_segments(lseg
->pls_layout
)) {
602 ffl
->commit_info
.nbuckets
= 0;
603 kfree(ffl
->commit_info
.buckets
);
604 ffl
->commit_info
.buckets
= NULL
;
606 spin_unlock(&inode
->i_lock
);
608 _ff_layout_free_lseg(fls
);
611 /* Return 1 until we have multiple lsegs support */
613 ff_layout_get_lseg_count(struct nfs4_ff_layout_segment
*fls
)
619 nfs4_ff_start_busy_timer(struct nfs4_ff_busy_timer
*timer
, ktime_t now
)
621 /* first IO request? */
622 if (atomic_inc_return(&timer
->n_ops
) == 1) {
623 timer
->start_time
= now
;
628 nfs4_ff_end_busy_timer(struct nfs4_ff_busy_timer
*timer
, ktime_t now
)
632 if (atomic_dec_return(&timer
->n_ops
) < 0)
635 start
= timer
->start_time
;
636 timer
->start_time
= now
;
637 return ktime_sub(now
, start
);
641 nfs4_ff_layoutstat_start_io(struct nfs4_ff_layout_mirror
*mirror
,
642 struct nfs4_ff_layoutstat
*layoutstat
,
645 static const ktime_t notime
= {0};
646 s64 report_interval
= FF_LAYOUTSTATS_REPORT_INTERVAL
;
647 struct nfs4_flexfile_layout
*ffl
= FF_LAYOUT_FROM_HDR(mirror
->layout
);
649 nfs4_ff_start_busy_timer(&layoutstat
->busy_timer
, now
);
650 if (ktime_equal(mirror
->start_time
, notime
))
651 mirror
->start_time
= now
;
652 if (mirror
->report_interval
!= 0)
653 report_interval
= (s64
)mirror
->report_interval
* 1000LL;
654 else if (layoutstats_timer
!= 0)
655 report_interval
= (s64
)layoutstats_timer
* 1000LL;
656 if (ktime_to_ms(ktime_sub(now
, ffl
->last_report_time
)) >=
658 ffl
->last_report_time
= now
;
666 nfs4_ff_layout_stat_io_update_requested(struct nfs4_ff_layoutstat
*layoutstat
,
669 struct nfs4_ff_io_stat
*iostat
= &layoutstat
->io_stat
;
671 iostat
->ops_requested
++;
672 iostat
->bytes_requested
+= requested
;
676 nfs4_ff_layout_stat_io_update_completed(struct nfs4_ff_layoutstat
*layoutstat
,
679 ktime_t time_completed
,
680 ktime_t time_started
)
682 struct nfs4_ff_io_stat
*iostat
= &layoutstat
->io_stat
;
683 ktime_t completion_time
= ktime_sub(time_completed
, time_started
);
686 iostat
->ops_completed
++;
687 iostat
->bytes_completed
+= completed
;
688 iostat
->bytes_not_delivered
+= requested
- completed
;
690 timer
= nfs4_ff_end_busy_timer(&layoutstat
->busy_timer
, time_completed
);
691 iostat
->total_busy_time
=
692 ktime_add(iostat
->total_busy_time
, timer
);
693 iostat
->aggregate_completion_time
=
694 ktime_add(iostat
->aggregate_completion_time
,
699 nfs4_ff_layout_stat_io_start_read(struct inode
*inode
,
700 struct nfs4_ff_layout_mirror
*mirror
,
701 __u64 requested
, ktime_t now
)
705 spin_lock(&mirror
->lock
);
706 report
= nfs4_ff_layoutstat_start_io(mirror
, &mirror
->read_stat
, now
);
707 nfs4_ff_layout_stat_io_update_requested(&mirror
->read_stat
, requested
);
708 spin_unlock(&mirror
->lock
);
711 pnfs_report_layoutstat(inode
, GFP_KERNEL
);
715 nfs4_ff_layout_stat_io_end_read(struct rpc_task
*task
,
716 struct nfs4_ff_layout_mirror
*mirror
,
720 spin_lock(&mirror
->lock
);
721 nfs4_ff_layout_stat_io_update_completed(&mirror
->read_stat
,
722 requested
, completed
,
723 ktime_get(), task
->tk_start
);
724 spin_unlock(&mirror
->lock
);
728 nfs4_ff_layout_stat_io_start_write(struct inode
*inode
,
729 struct nfs4_ff_layout_mirror
*mirror
,
730 __u64 requested
, ktime_t now
)
734 spin_lock(&mirror
->lock
);
735 report
= nfs4_ff_layoutstat_start_io(mirror
, &mirror
->write_stat
, now
);
736 nfs4_ff_layout_stat_io_update_requested(&mirror
->write_stat
, requested
);
737 spin_unlock(&mirror
->lock
);
740 pnfs_report_layoutstat(inode
, GFP_NOIO
);
744 nfs4_ff_layout_stat_io_end_write(struct rpc_task
*task
,
745 struct nfs4_ff_layout_mirror
*mirror
,
748 enum nfs3_stable_how committed
)
750 if (committed
== NFS_UNSTABLE
)
751 requested
= completed
= 0;
753 spin_lock(&mirror
->lock
);
754 nfs4_ff_layout_stat_io_update_completed(&mirror
->write_stat
,
755 requested
, completed
, ktime_get(), task
->tk_start
);
756 spin_unlock(&mirror
->lock
);
760 ff_layout_alloc_commit_info(struct pnfs_layout_segment
*lseg
,
761 struct nfs_commit_info
*cinfo
,
764 struct nfs4_ff_layout_segment
*fls
= FF_LAYOUT_LSEG(lseg
);
765 struct pnfs_commit_bucket
*buckets
;
768 if (cinfo
->ds
->nbuckets
!= 0) {
769 /* This assumes there is only one RW lseg per file.
770 * To support multiple lseg per file, we need to
771 * change struct pnfs_commit_bucket to allow dynamic
772 * increasing nbuckets.
777 size
= ff_layout_get_lseg_count(fls
) * FF_LAYOUT_MIRROR_COUNT(lseg
);
779 buckets
= kcalloc(size
, sizeof(struct pnfs_commit_bucket
),
786 spin_lock(&cinfo
->inode
->i_lock
);
787 if (cinfo
->ds
->nbuckets
!= 0)
790 cinfo
->ds
->buckets
= buckets
;
791 cinfo
->ds
->nbuckets
= size
;
792 for (i
= 0; i
< size
; i
++) {
793 INIT_LIST_HEAD(&buckets
[i
].written
);
794 INIT_LIST_HEAD(&buckets
[i
].committing
);
795 /* mark direct verifier as unset */
796 buckets
[i
].direct_verf
.committed
=
797 NFS_INVALID_STABLE_HOW
;
800 spin_unlock(&cinfo
->inode
->i_lock
);
805 static struct nfs4_pnfs_ds
*
806 ff_layout_choose_best_ds_for_read(struct pnfs_layout_segment
*lseg
,
810 struct nfs4_ff_layout_segment
*fls
= FF_LAYOUT_LSEG(lseg
);
811 struct nfs4_pnfs_ds
*ds
;
812 bool fail_return
= false;
815 /* mirrors are sorted by efficiency */
816 for (idx
= start_idx
; idx
< fls
->mirror_array_cnt
; idx
++) {
817 if (idx
+1 == fls
->mirror_array_cnt
)
819 ds
= nfs4_ff_layout_prepare_ds(lseg
, idx
, fail_return
);
830 ff_layout_pg_get_read(struct nfs_pageio_descriptor
*pgio
,
831 struct nfs_page
*req
,
835 pnfs_put_lseg(pgio
->pg_lseg
);
836 pgio
->pg_lseg
= pnfs_update_layout(pgio
->pg_inode
,
843 if (IS_ERR(pgio
->pg_lseg
)) {
844 pgio
->pg_error
= PTR_ERR(pgio
->pg_lseg
);
845 pgio
->pg_lseg
= NULL
;
848 /* If we don't have checking, do get a IOMODE_RW
849 * segment, and the server wants to avoid READs
852 if (pgio
->pg_lseg
&& !strict_iomode
&&
853 ff_layout_avoid_read_on_rw(pgio
->pg_lseg
)) {
854 strict_iomode
= true;
860 ff_layout_pg_init_read(struct nfs_pageio_descriptor
*pgio
,
861 struct nfs_page
*req
)
863 struct nfs_pgio_mirror
*pgm
;
864 struct nfs4_ff_layout_mirror
*mirror
;
865 struct nfs4_pnfs_ds
*ds
;
869 /* Use full layout for now */
871 ff_layout_pg_get_read(pgio
, req
, false);
872 else if (ff_layout_avoid_read_on_rw(pgio
->pg_lseg
))
873 ff_layout_pg_get_read(pgio
, req
, true);
875 /* If no lseg, fall back to read through mds */
876 if (pgio
->pg_lseg
== NULL
)
879 ds
= ff_layout_choose_best_ds_for_read(pgio
->pg_lseg
, 0, &ds_idx
);
881 if (!ff_layout_no_fallback_to_mds(pgio
->pg_lseg
))
883 pnfs_put_lseg(pgio
->pg_lseg
);
884 pgio
->pg_lseg
= NULL
;
885 /* Sleep for 1 second before retrying */
890 mirror
= FF_LAYOUT_COMP(pgio
->pg_lseg
, ds_idx
);
892 pgio
->pg_mirror_idx
= ds_idx
;
894 /* read always uses only one mirror - idx 0 for pgio layer */
895 pgm
= &pgio
->pg_mirrors
[0];
896 pgm
->pg_bsize
= mirror
->mirror_ds
->ds_versions
[0].rsize
;
900 pnfs_put_lseg(pgio
->pg_lseg
);
901 pgio
->pg_lseg
= NULL
;
902 nfs_pageio_reset_read_mds(pgio
);
906 ff_layout_pg_init_write(struct nfs_pageio_descriptor
*pgio
,
907 struct nfs_page
*req
)
909 struct nfs4_ff_layout_mirror
*mirror
;
910 struct nfs_pgio_mirror
*pgm
;
911 struct nfs_commit_info cinfo
;
912 struct nfs4_pnfs_ds
*ds
;
917 if (!pgio
->pg_lseg
) {
918 pgio
->pg_lseg
= pnfs_update_layout(pgio
->pg_inode
,
925 if (IS_ERR(pgio
->pg_lseg
)) {
926 pgio
->pg_error
= PTR_ERR(pgio
->pg_lseg
);
927 pgio
->pg_lseg
= NULL
;
931 /* If no lseg, fall back to write through mds */
932 if (pgio
->pg_lseg
== NULL
)
935 nfs_init_cinfo(&cinfo
, pgio
->pg_inode
, pgio
->pg_dreq
);
936 status
= ff_layout_alloc_commit_info(pgio
->pg_lseg
, &cinfo
, GFP_NOFS
);
940 /* Use a direct mapping of ds_idx to pgio mirror_idx */
941 if (WARN_ON_ONCE(pgio
->pg_mirror_count
!=
942 FF_LAYOUT_MIRROR_COUNT(pgio
->pg_lseg
)))
945 for (i
= 0; i
< pgio
->pg_mirror_count
; i
++) {
946 ds
= nfs4_ff_layout_prepare_ds(pgio
->pg_lseg
, i
, true);
948 if (!ff_layout_no_fallback_to_mds(pgio
->pg_lseg
))
950 pnfs_put_lseg(pgio
->pg_lseg
);
951 pgio
->pg_lseg
= NULL
;
952 /* Sleep for 1 second before retrying */
956 pgm
= &pgio
->pg_mirrors
[i
];
957 mirror
= FF_LAYOUT_COMP(pgio
->pg_lseg
, i
);
958 pgm
->pg_bsize
= mirror
->mirror_ds
->ds_versions
[0].wsize
;
964 pnfs_put_lseg(pgio
->pg_lseg
);
965 pgio
->pg_lseg
= NULL
;
966 nfs_pageio_reset_write_mds(pgio
);
970 ff_layout_pg_get_mirror_count_write(struct nfs_pageio_descriptor
*pgio
,
971 struct nfs_page
*req
)
973 if (!pgio
->pg_lseg
) {
974 pgio
->pg_lseg
= pnfs_update_layout(pgio
->pg_inode
,
981 if (IS_ERR(pgio
->pg_lseg
)) {
982 pgio
->pg_error
= PTR_ERR(pgio
->pg_lseg
);
983 pgio
->pg_lseg
= NULL
;
988 return FF_LAYOUT_MIRROR_COUNT(pgio
->pg_lseg
);
990 /* no lseg means that pnfs is not in use, so no mirroring here */
991 nfs_pageio_reset_write_mds(pgio
);
996 static const struct nfs_pageio_ops ff_layout_pg_read_ops
= {
997 .pg_init
= ff_layout_pg_init_read
,
998 .pg_test
= pnfs_generic_pg_test
,
999 .pg_doio
= pnfs_generic_pg_readpages
,
1000 .pg_cleanup
= pnfs_generic_pg_cleanup
,
1003 static const struct nfs_pageio_ops ff_layout_pg_write_ops
= {
1004 .pg_init
= ff_layout_pg_init_write
,
1005 .pg_test
= pnfs_generic_pg_test
,
1006 .pg_doio
= pnfs_generic_pg_writepages
,
1007 .pg_get_mirror_count
= ff_layout_pg_get_mirror_count_write
,
1008 .pg_cleanup
= pnfs_generic_pg_cleanup
,
1011 static void ff_layout_reset_write(struct nfs_pgio_header
*hdr
, bool retry_pnfs
)
1013 struct rpc_task
*task
= &hdr
->task
;
1015 pnfs_layoutcommit_inode(hdr
->inode
, false);
1018 dprintk("%s Reset task %5u for i/o through pNFS "
1019 "(req %s/%llu, %u bytes @ offset %llu)\n", __func__
,
1021 hdr
->inode
->i_sb
->s_id
,
1022 (unsigned long long)NFS_FILEID(hdr
->inode
),
1024 (unsigned long long)hdr
->args
.offset
);
1026 hdr
->completion_ops
->reschedule_io(hdr
);
1030 if (!test_and_set_bit(NFS_IOHDR_REDO
, &hdr
->flags
)) {
1031 dprintk("%s Reset task %5u for i/o through MDS "
1032 "(req %s/%llu, %u bytes @ offset %llu)\n", __func__
,
1034 hdr
->inode
->i_sb
->s_id
,
1035 (unsigned long long)NFS_FILEID(hdr
->inode
),
1037 (unsigned long long)hdr
->args
.offset
);
1039 task
->tk_status
= pnfs_write_done_resend_to_mds(hdr
);
1043 static void ff_layout_reset_read(struct nfs_pgio_header
*hdr
)
1045 struct rpc_task
*task
= &hdr
->task
;
1047 pnfs_layoutcommit_inode(hdr
->inode
, false);
1049 if (!test_and_set_bit(NFS_IOHDR_REDO
, &hdr
->flags
)) {
1050 dprintk("%s Reset task %5u for i/o through MDS "
1051 "(req %s/%llu, %u bytes @ offset %llu)\n", __func__
,
1053 hdr
->inode
->i_sb
->s_id
,
1054 (unsigned long long)NFS_FILEID(hdr
->inode
),
1056 (unsigned long long)hdr
->args
.offset
);
1058 task
->tk_status
= pnfs_read_done_resend_to_mds(hdr
);
1062 static int ff_layout_async_handle_error_v4(struct rpc_task
*task
,
1063 struct nfs4_state
*state
,
1064 struct nfs_client
*clp
,
1065 struct pnfs_layout_segment
*lseg
,
1068 struct pnfs_layout_hdr
*lo
= lseg
->pls_layout
;
1069 struct inode
*inode
= lo
->plh_inode
;
1070 struct nfs_server
*mds_server
= NFS_SERVER(inode
);
1072 struct nfs4_deviceid_node
*devid
= FF_LAYOUT_DEVID_NODE(lseg
, idx
);
1073 struct nfs_client
*mds_client
= mds_server
->nfs_client
;
1074 struct nfs4_slot_table
*tbl
= &clp
->cl_session
->fc_slot_table
;
1076 if (task
->tk_status
>= 0)
1079 switch (task
->tk_status
) {
1080 /* MDS state errors */
1081 case -NFS4ERR_DELEG_REVOKED
:
1082 case -NFS4ERR_ADMIN_REVOKED
:
1083 case -NFS4ERR_BAD_STATEID
:
1086 nfs_remove_bad_delegation(state
->inode
, NULL
);
1087 case -NFS4ERR_OPENMODE
:
1090 if (nfs4_schedule_stateid_recovery(mds_server
, state
) < 0)
1091 goto out_bad_stateid
;
1092 goto wait_on_recovery
;
1093 case -NFS4ERR_EXPIRED
:
1094 if (state
!= NULL
) {
1095 if (nfs4_schedule_stateid_recovery(mds_server
, state
) < 0)
1096 goto out_bad_stateid
;
1098 nfs4_schedule_lease_recovery(mds_client
);
1099 goto wait_on_recovery
;
1100 /* DS session errors */
1101 case -NFS4ERR_BADSESSION
:
1102 case -NFS4ERR_BADSLOT
:
1103 case -NFS4ERR_BAD_HIGH_SLOT
:
1104 case -NFS4ERR_DEADSESSION
:
1105 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION
:
1106 case -NFS4ERR_SEQ_FALSE_RETRY
:
1107 case -NFS4ERR_SEQ_MISORDERED
:
1108 dprintk("%s ERROR %d, Reset session. Exchangeid "
1109 "flags 0x%x\n", __func__
, task
->tk_status
,
1110 clp
->cl_exchange_flags
);
1111 nfs4_schedule_session_recovery(clp
->cl_session
, task
->tk_status
);
1113 case -NFS4ERR_DELAY
:
1114 case -NFS4ERR_GRACE
:
1115 rpc_delay(task
, FF_LAYOUT_POLL_RETRY_MAX
);
1117 case -NFS4ERR_RETRY_UNCACHED_REP
:
1119 /* Invalidate Layout errors */
1120 case -NFS4ERR_PNFS_NO_LAYOUT
:
1121 case -ESTALE
: /* mapped NFS4ERR_STALE */
1122 case -EBADHANDLE
: /* mapped NFS4ERR_BADHANDLE */
1123 case -EISDIR
: /* mapped NFS4ERR_ISDIR */
1124 case -NFS4ERR_FHEXPIRED
:
1125 case -NFS4ERR_WRONG_TYPE
:
1126 dprintk("%s Invalid layout error %d\n", __func__
,
1129 * Destroy layout so new i/o will get a new layout.
1130 * Layout will not be destroyed until all current lseg
1131 * references are put. Mark layout as invalid to resend failed
1132 * i/o and all i/o waiting on the slot table to the MDS until
1133 * layout is destroyed and a new valid layout is obtained.
1135 pnfs_destroy_layout(NFS_I(inode
));
1136 rpc_wake_up(&tbl
->slot_tbl_waitq
);
1138 /* RPC connection errors */
1146 dprintk("%s DS connection error %d\n", __func__
,
1148 nfs4_mark_deviceid_unavailable(devid
);
1149 rpc_wake_up(&tbl
->slot_tbl_waitq
);
1152 if (ff_layout_avoid_mds_available_ds(lseg
))
1153 return -NFS4ERR_RESET_TO_PNFS
;
1155 dprintk("%s Retry through MDS. Error %d\n", __func__
,
1157 return -NFS4ERR_RESET_TO_MDS
;
1160 task
->tk_status
= 0;
1163 task
->tk_status
= -EIO
;
1166 rpc_sleep_on(&mds_client
->cl_rpcwaitq
, task
, NULL
);
1167 if (test_bit(NFS4CLNT_MANAGER_RUNNING
, &mds_client
->cl_state
) == 0)
1168 rpc_wake_up_queued_task(&mds_client
->cl_rpcwaitq
, task
);
1172 /* Retry all errors through either pNFS or MDS except for -EJUKEBOX */
1173 static int ff_layout_async_handle_error_v3(struct rpc_task
*task
,
1174 struct pnfs_layout_segment
*lseg
,
1177 struct nfs4_deviceid_node
*devid
= FF_LAYOUT_DEVID_NODE(lseg
, idx
);
1179 if (task
->tk_status
>= 0)
1182 switch (task
->tk_status
) {
1183 /* File access problems. Don't mark the device as unavailable */
1192 nfs_inc_stats(lseg
->pls_layout
->plh_inode
, NFSIOS_DELAY
);
1195 dprintk("%s DS connection error %d\n", __func__
,
1197 nfs4_mark_deviceid_unavailable(devid
);
1199 /* FIXME: Need to prevent infinite looping here. */
1200 return -NFS4ERR_RESET_TO_PNFS
;
1202 task
->tk_status
= 0;
1203 rpc_restart_call_prepare(task
);
1204 rpc_delay(task
, NFS_JUKEBOX_RETRY_TIME
);
1208 static int ff_layout_async_handle_error(struct rpc_task
*task
,
1209 struct nfs4_state
*state
,
1210 struct nfs_client
*clp
,
1211 struct pnfs_layout_segment
*lseg
,
1214 int vers
= clp
->cl_nfs_mod
->rpc_vers
->number
;
1218 return ff_layout_async_handle_error_v3(task
, lseg
, idx
);
1220 return ff_layout_async_handle_error_v4(task
, state
, clp
,
1223 /* should never happen */
1229 static void ff_layout_io_track_ds_error(struct pnfs_layout_segment
*lseg
,
1230 int idx
, u64 offset
, u64 length
,
1231 u32 status
, int opnum
, int error
)
1233 struct nfs4_ff_layout_mirror
*mirror
;
1240 case -EPROTONOSUPPORT
:
1251 status
= NFS4ERR_NXIO
;
1254 status
= NFS4ERR_ACCESS
;
1269 mirror
= FF_LAYOUT_COMP(lseg
, idx
);
1270 err
= ff_layout_track_ds_error(FF_LAYOUT_FROM_HDR(lseg
->pls_layout
),
1271 mirror
, offset
, length
, status
, opnum
,
1273 pnfs_error_mark_layout_for_return(lseg
->pls_layout
->plh_inode
, lseg
);
1274 dprintk("%s: err %d op %d status %u\n", __func__
, err
, opnum
, status
);
1277 /* NFS_PROTO call done callback routines */
1278 static int ff_layout_read_done_cb(struct rpc_task
*task
,
1279 struct nfs_pgio_header
*hdr
)
1283 trace_nfs4_pnfs_read(hdr
, task
->tk_status
);
1284 if (task
->tk_status
< 0)
1285 ff_layout_io_track_ds_error(hdr
->lseg
, hdr
->pgio_mirror_idx
,
1286 hdr
->args
.offset
, hdr
->args
.count
,
1287 hdr
->res
.op_status
, OP_READ
,
1289 err
= ff_layout_async_handle_error(task
, hdr
->args
.context
->state
,
1290 hdr
->ds_clp
, hdr
->lseg
,
1291 hdr
->pgio_mirror_idx
);
1294 case -NFS4ERR_RESET_TO_PNFS
:
1295 if (ff_layout_choose_best_ds_for_read(hdr
->lseg
,
1296 hdr
->pgio_mirror_idx
+ 1,
1297 &hdr
->pgio_mirror_idx
))
1299 ff_layout_read_record_layoutstats_done(task
, hdr
);
1300 pnfs_read_resend_pnfs(hdr
);
1301 return task
->tk_status
;
1302 case -NFS4ERR_RESET_TO_MDS
:
1303 ff_layout_reset_read(hdr
);
1304 return task
->tk_status
;
1311 rpc_restart_call_prepare(task
);
1316 ff_layout_need_layoutcommit(struct pnfs_layout_segment
*lseg
)
1318 return !(FF_LAYOUT_LSEG(lseg
)->flags
& FF_FLAGS_NO_LAYOUTCOMMIT
);
1322 * We reference the rpc_cred of the first WRITE that triggers the need for
1323 * a LAYOUTCOMMIT, and use it to send the layoutcommit compound.
1324 * rfc5661 is not clear about which credential should be used.
1326 * Flexlayout client should treat DS replied FILE_SYNC as DATA_SYNC, so
1327 * to follow http://www.rfc-editor.org/errata_search.php?rfc=5661&eid=2751
1328 * we always send layoutcommit after DS writes.
1331 ff_layout_set_layoutcommit(struct inode
*inode
,
1332 struct pnfs_layout_segment
*lseg
,
1335 if (!ff_layout_need_layoutcommit(lseg
))
1338 pnfs_set_layoutcommit(inode
, lseg
, end_offset
);
1339 dprintk("%s inode %lu pls_end_pos %llu\n", __func__
, inode
->i_ino
,
1340 (unsigned long long) NFS_I(inode
)->layout
->plh_lwb
);
1344 ff_layout_device_unavailable(struct pnfs_layout_segment
*lseg
, int idx
)
1346 /* No mirroring for now */
1347 struct nfs4_deviceid_node
*node
= FF_LAYOUT_DEVID_NODE(lseg
, idx
);
1349 return ff_layout_test_devid_unavailable(node
);
1352 static void ff_layout_read_record_layoutstats_start(struct rpc_task
*task
,
1353 struct nfs_pgio_header
*hdr
)
1355 if (test_and_set_bit(NFS_IOHDR_STAT
, &hdr
->flags
))
1357 nfs4_ff_layout_stat_io_start_read(hdr
->inode
,
1358 FF_LAYOUT_COMP(hdr
->lseg
, hdr
->pgio_mirror_idx
),
1363 static void ff_layout_read_record_layoutstats_done(struct rpc_task
*task
,
1364 struct nfs_pgio_header
*hdr
)
1366 if (!test_and_clear_bit(NFS_IOHDR_STAT
, &hdr
->flags
))
1368 nfs4_ff_layout_stat_io_end_read(task
,
1369 FF_LAYOUT_COMP(hdr
->lseg
, hdr
->pgio_mirror_idx
),
1374 static int ff_layout_read_prepare_common(struct rpc_task
*task
,
1375 struct nfs_pgio_header
*hdr
)
1377 if (unlikely(test_bit(NFS_CONTEXT_BAD
, &hdr
->args
.context
->flags
))) {
1378 rpc_exit(task
, -EIO
);
1381 if (ff_layout_device_unavailable(hdr
->lseg
, hdr
->pgio_mirror_idx
)) {
1382 rpc_exit(task
, -EHOSTDOWN
);
1386 ff_layout_read_record_layoutstats_start(task
, hdr
);
1391 * Call ops for the async read/write cases
1392 * In the case of dense layouts, the offset needs to be reset to its
1395 static void ff_layout_read_prepare_v3(struct rpc_task
*task
, void *data
)
1397 struct nfs_pgio_header
*hdr
= data
;
1399 if (ff_layout_read_prepare_common(task
, hdr
))
1402 rpc_call_start(task
);
1405 static int ff_layout_setup_sequence(struct nfs_client
*ds_clp
,
1406 struct nfs4_sequence_args
*args
,
1407 struct nfs4_sequence_res
*res
,
1408 struct rpc_task
*task
)
1410 if (ds_clp
->cl_session
)
1411 return nfs41_setup_sequence(ds_clp
->cl_session
,
1415 return nfs40_setup_sequence(ds_clp
->cl_slot_tbl
,
1421 static void ff_layout_read_prepare_v4(struct rpc_task
*task
, void *data
)
1423 struct nfs_pgio_header
*hdr
= data
;
1425 if (ff_layout_setup_sequence(hdr
->ds_clp
,
1426 &hdr
->args
.seq_args
,
1431 if (ff_layout_read_prepare_common(task
, hdr
))
1434 if (nfs4_set_rw_stateid(&hdr
->args
.stateid
, hdr
->args
.context
,
1435 hdr
->args
.lock_context
, FMODE_READ
) == -EIO
)
1436 rpc_exit(task
, -EIO
); /* lost lock, terminate I/O */
1439 static void ff_layout_read_call_done(struct rpc_task
*task
, void *data
)
1441 struct nfs_pgio_header
*hdr
= data
;
1443 dprintk("--> %s task->tk_status %d\n", __func__
, task
->tk_status
);
1445 if (test_bit(NFS_IOHDR_REDO
, &hdr
->flags
) &&
1446 task
->tk_status
== 0) {
1447 nfs4_sequence_done(task
, &hdr
->res
.seq_res
);
1451 /* Note this may cause RPC to be resent */
1452 hdr
->mds_ops
->rpc_call_done(task
, hdr
);
1455 static void ff_layout_read_count_stats(struct rpc_task
*task
, void *data
)
1457 struct nfs_pgio_header
*hdr
= data
;
1459 ff_layout_read_record_layoutstats_done(task
, hdr
);
1460 rpc_count_iostats_metrics(task
,
1461 &NFS_CLIENT(hdr
->inode
)->cl_metrics
[NFSPROC4_CLNT_READ
]);
1464 static void ff_layout_read_release(void *data
)
1466 struct nfs_pgio_header
*hdr
= data
;
1468 ff_layout_read_record_layoutstats_done(&hdr
->task
, hdr
);
1469 pnfs_generic_rw_release(data
);
1473 static int ff_layout_write_done_cb(struct rpc_task
*task
,
1474 struct nfs_pgio_header
*hdr
)
1476 loff_t end_offs
= 0;
1479 trace_nfs4_pnfs_write(hdr
, task
->tk_status
);
1480 if (task
->tk_status
< 0)
1481 ff_layout_io_track_ds_error(hdr
->lseg
, hdr
->pgio_mirror_idx
,
1482 hdr
->args
.offset
, hdr
->args
.count
,
1483 hdr
->res
.op_status
, OP_WRITE
,
1485 err
= ff_layout_async_handle_error(task
, hdr
->args
.context
->state
,
1486 hdr
->ds_clp
, hdr
->lseg
,
1487 hdr
->pgio_mirror_idx
);
1490 case -NFS4ERR_RESET_TO_PNFS
:
1491 ff_layout_reset_write(hdr
, true);
1492 return task
->tk_status
;
1493 case -NFS4ERR_RESET_TO_MDS
:
1494 ff_layout_reset_write(hdr
, false);
1495 return task
->tk_status
;
1500 if (hdr
->res
.verf
->committed
== NFS_FILE_SYNC
||
1501 hdr
->res
.verf
->committed
== NFS_DATA_SYNC
)
1502 end_offs
= hdr
->mds_offset
+ (loff_t
)hdr
->res
.count
;
1504 /* Note: if the write is unstable, don't set end_offs until commit */
1505 ff_layout_set_layoutcommit(hdr
->inode
, hdr
->lseg
, end_offs
);
1507 /* zero out fattr since we don't care DS attr at all */
1508 hdr
->fattr
.valid
= 0;
1509 if (task
->tk_status
>= 0)
1510 nfs_writeback_update_inode(hdr
);
1515 static int ff_layout_commit_done_cb(struct rpc_task
*task
,
1516 struct nfs_commit_data
*data
)
1520 trace_nfs4_pnfs_commit_ds(data
, task
->tk_status
);
1521 if (task
->tk_status
< 0)
1522 ff_layout_io_track_ds_error(data
->lseg
, data
->ds_commit_index
,
1523 data
->args
.offset
, data
->args
.count
,
1524 data
->res
.op_status
, OP_COMMIT
,
1526 err
= ff_layout_async_handle_error(task
, NULL
, data
->ds_clp
,
1527 data
->lseg
, data
->ds_commit_index
);
1530 case -NFS4ERR_RESET_TO_PNFS
:
1531 pnfs_generic_prepare_to_resend_writes(data
);
1533 case -NFS4ERR_RESET_TO_MDS
:
1534 pnfs_generic_prepare_to_resend_writes(data
);
1537 rpc_restart_call_prepare(task
);
1541 ff_layout_set_layoutcommit(data
->inode
, data
->lseg
, data
->lwb
);
1546 static void ff_layout_write_record_layoutstats_start(struct rpc_task
*task
,
1547 struct nfs_pgio_header
*hdr
)
1549 if (test_and_set_bit(NFS_IOHDR_STAT
, &hdr
->flags
))
1551 nfs4_ff_layout_stat_io_start_write(hdr
->inode
,
1552 FF_LAYOUT_COMP(hdr
->lseg
, hdr
->pgio_mirror_idx
),
1557 static void ff_layout_write_record_layoutstats_done(struct rpc_task
*task
,
1558 struct nfs_pgio_header
*hdr
)
1560 if (!test_and_clear_bit(NFS_IOHDR_STAT
, &hdr
->flags
))
1562 nfs4_ff_layout_stat_io_end_write(task
,
1563 FF_LAYOUT_COMP(hdr
->lseg
, hdr
->pgio_mirror_idx
),
1564 hdr
->args
.count
, hdr
->res
.count
,
1565 hdr
->res
.verf
->committed
);
1568 static int ff_layout_write_prepare_common(struct rpc_task
*task
,
1569 struct nfs_pgio_header
*hdr
)
1571 if (unlikely(test_bit(NFS_CONTEXT_BAD
, &hdr
->args
.context
->flags
))) {
1572 rpc_exit(task
, -EIO
);
1576 if (ff_layout_device_unavailable(hdr
->lseg
, hdr
->pgio_mirror_idx
)) {
1577 rpc_exit(task
, -EHOSTDOWN
);
1581 ff_layout_write_record_layoutstats_start(task
, hdr
);
1585 static void ff_layout_write_prepare_v3(struct rpc_task
*task
, void *data
)
1587 struct nfs_pgio_header
*hdr
= data
;
1589 if (ff_layout_write_prepare_common(task
, hdr
))
1592 rpc_call_start(task
);
1595 static void ff_layout_write_prepare_v4(struct rpc_task
*task
, void *data
)
1597 struct nfs_pgio_header
*hdr
= data
;
1599 if (ff_layout_setup_sequence(hdr
->ds_clp
,
1600 &hdr
->args
.seq_args
,
1605 if (ff_layout_write_prepare_common(task
, hdr
))
1608 if (nfs4_set_rw_stateid(&hdr
->args
.stateid
, hdr
->args
.context
,
1609 hdr
->args
.lock_context
, FMODE_WRITE
) == -EIO
)
1610 rpc_exit(task
, -EIO
); /* lost lock, terminate I/O */
1613 static void ff_layout_write_call_done(struct rpc_task
*task
, void *data
)
1615 struct nfs_pgio_header
*hdr
= data
;
1617 if (test_bit(NFS_IOHDR_REDO
, &hdr
->flags
) &&
1618 task
->tk_status
== 0) {
1619 nfs4_sequence_done(task
, &hdr
->res
.seq_res
);
1623 /* Note this may cause RPC to be resent */
1624 hdr
->mds_ops
->rpc_call_done(task
, hdr
);
1627 static void ff_layout_write_count_stats(struct rpc_task
*task
, void *data
)
1629 struct nfs_pgio_header
*hdr
= data
;
1631 ff_layout_write_record_layoutstats_done(task
, hdr
);
1632 rpc_count_iostats_metrics(task
,
1633 &NFS_CLIENT(hdr
->inode
)->cl_metrics
[NFSPROC4_CLNT_WRITE
]);
1636 static void ff_layout_write_release(void *data
)
1638 struct nfs_pgio_header
*hdr
= data
;
1640 ff_layout_write_record_layoutstats_done(&hdr
->task
, hdr
);
1641 pnfs_generic_rw_release(data
);
1644 static void ff_layout_commit_record_layoutstats_start(struct rpc_task
*task
,
1645 struct nfs_commit_data
*cdata
)
1647 if (test_and_set_bit(NFS_IOHDR_STAT
, &cdata
->flags
))
1649 nfs4_ff_layout_stat_io_start_write(cdata
->inode
,
1650 FF_LAYOUT_COMP(cdata
->lseg
, cdata
->ds_commit_index
),
1654 static void ff_layout_commit_record_layoutstats_done(struct rpc_task
*task
,
1655 struct nfs_commit_data
*cdata
)
1657 struct nfs_page
*req
;
1660 if (!test_and_clear_bit(NFS_IOHDR_STAT
, &cdata
->flags
))
1663 if (task
->tk_status
== 0) {
1664 list_for_each_entry(req
, &cdata
->pages
, wb_list
)
1665 count
+= req
->wb_bytes
;
1667 nfs4_ff_layout_stat_io_end_write(task
,
1668 FF_LAYOUT_COMP(cdata
->lseg
, cdata
->ds_commit_index
),
1669 count
, count
, NFS_FILE_SYNC
);
1672 static void ff_layout_commit_prepare_common(struct rpc_task
*task
,
1673 struct nfs_commit_data
*cdata
)
1675 ff_layout_commit_record_layoutstats_start(task
, cdata
);
1678 static void ff_layout_commit_prepare_v3(struct rpc_task
*task
, void *data
)
1680 ff_layout_commit_prepare_common(task
, data
);
1681 rpc_call_start(task
);
1684 static void ff_layout_commit_prepare_v4(struct rpc_task
*task
, void *data
)
1686 struct nfs_commit_data
*wdata
= data
;
1688 if (ff_layout_setup_sequence(wdata
->ds_clp
,
1689 &wdata
->args
.seq_args
,
1690 &wdata
->res
.seq_res
,
1693 ff_layout_commit_prepare_common(task
, data
);
1696 static void ff_layout_commit_done(struct rpc_task
*task
, void *data
)
1698 pnfs_generic_write_commit_done(task
, data
);
1701 static void ff_layout_commit_count_stats(struct rpc_task
*task
, void *data
)
1703 struct nfs_commit_data
*cdata
= data
;
1705 ff_layout_commit_record_layoutstats_done(task
, cdata
);
1706 rpc_count_iostats_metrics(task
,
1707 &NFS_CLIENT(cdata
->inode
)->cl_metrics
[NFSPROC4_CLNT_COMMIT
]);
1710 static void ff_layout_commit_release(void *data
)
1712 struct nfs_commit_data
*cdata
= data
;
1714 ff_layout_commit_record_layoutstats_done(&cdata
->task
, cdata
);
1715 pnfs_generic_commit_release(data
);
1718 static const struct rpc_call_ops ff_layout_read_call_ops_v3
= {
1719 .rpc_call_prepare
= ff_layout_read_prepare_v3
,
1720 .rpc_call_done
= ff_layout_read_call_done
,
1721 .rpc_count_stats
= ff_layout_read_count_stats
,
1722 .rpc_release
= ff_layout_read_release
,
1725 static const struct rpc_call_ops ff_layout_read_call_ops_v4
= {
1726 .rpc_call_prepare
= ff_layout_read_prepare_v4
,
1727 .rpc_call_done
= ff_layout_read_call_done
,
1728 .rpc_count_stats
= ff_layout_read_count_stats
,
1729 .rpc_release
= ff_layout_read_release
,
1732 static const struct rpc_call_ops ff_layout_write_call_ops_v3
= {
1733 .rpc_call_prepare
= ff_layout_write_prepare_v3
,
1734 .rpc_call_done
= ff_layout_write_call_done
,
1735 .rpc_count_stats
= ff_layout_write_count_stats
,
1736 .rpc_release
= ff_layout_write_release
,
1739 static const struct rpc_call_ops ff_layout_write_call_ops_v4
= {
1740 .rpc_call_prepare
= ff_layout_write_prepare_v4
,
1741 .rpc_call_done
= ff_layout_write_call_done
,
1742 .rpc_count_stats
= ff_layout_write_count_stats
,
1743 .rpc_release
= ff_layout_write_release
,
1746 static const struct rpc_call_ops ff_layout_commit_call_ops_v3
= {
1747 .rpc_call_prepare
= ff_layout_commit_prepare_v3
,
1748 .rpc_call_done
= ff_layout_commit_done
,
1749 .rpc_count_stats
= ff_layout_commit_count_stats
,
1750 .rpc_release
= ff_layout_commit_release
,
1753 static const struct rpc_call_ops ff_layout_commit_call_ops_v4
= {
1754 .rpc_call_prepare
= ff_layout_commit_prepare_v4
,
1755 .rpc_call_done
= ff_layout_commit_done
,
1756 .rpc_count_stats
= ff_layout_commit_count_stats
,
1757 .rpc_release
= ff_layout_commit_release
,
1760 static enum pnfs_try_status
1761 ff_layout_read_pagelist(struct nfs_pgio_header
*hdr
)
1763 struct pnfs_layout_segment
*lseg
= hdr
->lseg
;
1764 struct nfs4_pnfs_ds
*ds
;
1765 struct rpc_clnt
*ds_clnt
;
1766 struct rpc_cred
*ds_cred
;
1767 loff_t offset
= hdr
->args
.offset
;
1768 u32 idx
= hdr
->pgio_mirror_idx
;
1772 dprintk("--> %s ino %lu pgbase %u req %Zu@%llu\n",
1773 __func__
, hdr
->inode
->i_ino
,
1774 hdr
->args
.pgbase
, (size_t)hdr
->args
.count
, offset
);
1776 ds
= nfs4_ff_layout_prepare_ds(lseg
, idx
, false);
1780 ds_clnt
= nfs4_ff_find_or_create_ds_client(lseg
, idx
, ds
->ds_clp
,
1782 if (IS_ERR(ds_clnt
))
1785 ds_cred
= ff_layout_get_ds_cred(lseg
, idx
, hdr
->cred
);
1789 vers
= nfs4_ff_layout_ds_version(lseg
, idx
);
1791 dprintk("%s USE DS: %s cl_count %d vers %d\n", __func__
,
1792 ds
->ds_remotestr
, atomic_read(&ds
->ds_clp
->cl_count
), vers
);
1794 hdr
->pgio_done_cb
= ff_layout_read_done_cb
;
1795 atomic_inc(&ds
->ds_clp
->cl_count
);
1796 hdr
->ds_clp
= ds
->ds_clp
;
1797 fh
= nfs4_ff_layout_select_ds_fh(lseg
, idx
);
1801 * Note that if we ever decide to split across DSes,
1802 * then we may need to handle dense-like offsets.
1804 hdr
->args
.offset
= offset
;
1805 hdr
->mds_offset
= offset
;
1807 /* Perform an asynchronous read to ds */
1808 nfs_initiate_pgio(ds_clnt
, hdr
, ds_cred
, ds
->ds_clp
->rpc_ops
,
1809 vers
== 3 ? &ff_layout_read_call_ops_v3
:
1810 &ff_layout_read_call_ops_v4
,
1811 0, RPC_TASK_SOFTCONN
);
1812 put_rpccred(ds_cred
);
1813 return PNFS_ATTEMPTED
;
1816 if (ff_layout_avoid_mds_available_ds(lseg
))
1817 return PNFS_TRY_AGAIN
;
1818 return PNFS_NOT_ATTEMPTED
;
1821 /* Perform async writes. */
1822 static enum pnfs_try_status
1823 ff_layout_write_pagelist(struct nfs_pgio_header
*hdr
, int sync
)
1825 struct pnfs_layout_segment
*lseg
= hdr
->lseg
;
1826 struct nfs4_pnfs_ds
*ds
;
1827 struct rpc_clnt
*ds_clnt
;
1828 struct rpc_cred
*ds_cred
;
1829 loff_t offset
= hdr
->args
.offset
;
1832 int idx
= hdr
->pgio_mirror_idx
;
1834 ds
= nfs4_ff_layout_prepare_ds(lseg
, idx
, true);
1836 return PNFS_NOT_ATTEMPTED
;
1838 ds_clnt
= nfs4_ff_find_or_create_ds_client(lseg
, idx
, ds
->ds_clp
,
1840 if (IS_ERR(ds_clnt
))
1841 return PNFS_NOT_ATTEMPTED
;
1843 ds_cred
= ff_layout_get_ds_cred(lseg
, idx
, hdr
->cred
);
1845 return PNFS_NOT_ATTEMPTED
;
1847 vers
= nfs4_ff_layout_ds_version(lseg
, idx
);
1849 dprintk("%s ino %lu sync %d req %Zu@%llu DS: %s cl_count %d vers %d\n",
1850 __func__
, hdr
->inode
->i_ino
, sync
, (size_t) hdr
->args
.count
,
1851 offset
, ds
->ds_remotestr
, atomic_read(&ds
->ds_clp
->cl_count
),
1854 hdr
->pgio_done_cb
= ff_layout_write_done_cb
;
1855 atomic_inc(&ds
->ds_clp
->cl_count
);
1856 hdr
->ds_clp
= ds
->ds_clp
;
1857 hdr
->ds_commit_idx
= idx
;
1858 fh
= nfs4_ff_layout_select_ds_fh(lseg
, idx
);
1863 * Note that if we ever decide to split across DSes,
1864 * then we may need to handle dense-like offsets.
1866 hdr
->args
.offset
= offset
;
1868 /* Perform an asynchronous write */
1869 nfs_initiate_pgio(ds_clnt
, hdr
, ds_cred
, ds
->ds_clp
->rpc_ops
,
1870 vers
== 3 ? &ff_layout_write_call_ops_v3
:
1871 &ff_layout_write_call_ops_v4
,
1872 sync
, RPC_TASK_SOFTCONN
);
1873 put_rpccred(ds_cred
);
1874 return PNFS_ATTEMPTED
;
1877 static u32
calc_ds_index_from_commit(struct pnfs_layout_segment
*lseg
, u32 i
)
1882 static struct nfs_fh
*
1883 select_ds_fh_from_commit(struct pnfs_layout_segment
*lseg
, u32 i
)
1885 struct nfs4_ff_layout_segment
*flseg
= FF_LAYOUT_LSEG(lseg
);
1887 /* FIXME: Assume that there is only one NFS version available
1890 return &flseg
->mirror_array
[i
]->fh_versions
[0];
1893 static int ff_layout_initiate_commit(struct nfs_commit_data
*data
, int how
)
1895 struct pnfs_layout_segment
*lseg
= data
->lseg
;
1896 struct nfs4_pnfs_ds
*ds
;
1897 struct rpc_clnt
*ds_clnt
;
1898 struct rpc_cred
*ds_cred
;
1903 idx
= calc_ds_index_from_commit(lseg
, data
->ds_commit_index
);
1904 ds
= nfs4_ff_layout_prepare_ds(lseg
, idx
, true);
1908 ds_clnt
= nfs4_ff_find_or_create_ds_client(lseg
, idx
, ds
->ds_clp
,
1910 if (IS_ERR(ds_clnt
))
1913 ds_cred
= ff_layout_get_ds_cred(lseg
, idx
, data
->cred
);
1917 vers
= nfs4_ff_layout_ds_version(lseg
, idx
);
1919 dprintk("%s ino %lu, how %d cl_count %d vers %d\n", __func__
,
1920 data
->inode
->i_ino
, how
, atomic_read(&ds
->ds_clp
->cl_count
),
1922 data
->commit_done_cb
= ff_layout_commit_done_cb
;
1923 data
->cred
= ds_cred
;
1924 atomic_inc(&ds
->ds_clp
->cl_count
);
1925 data
->ds_clp
= ds
->ds_clp
;
1926 fh
= select_ds_fh_from_commit(lseg
, data
->ds_commit_index
);
1930 ret
= nfs_initiate_commit(ds_clnt
, data
, ds
->ds_clp
->rpc_ops
,
1931 vers
== 3 ? &ff_layout_commit_call_ops_v3
:
1932 &ff_layout_commit_call_ops_v4
,
1933 how
, RPC_TASK_SOFTCONN
);
1934 put_rpccred(ds_cred
);
1937 pnfs_generic_prepare_to_resend_writes(data
);
1938 pnfs_generic_commit_release(data
);
1943 ff_layout_commit_pagelist(struct inode
*inode
, struct list_head
*mds_pages
,
1944 int how
, struct nfs_commit_info
*cinfo
)
1946 return pnfs_generic_commit_pagelist(inode
, mds_pages
, how
, cinfo
,
1947 ff_layout_initiate_commit
);
1950 static struct pnfs_ds_commit_info
*
1951 ff_layout_get_ds_info(struct inode
*inode
)
1953 struct pnfs_layout_hdr
*layout
= NFS_I(inode
)->layout
;
1958 return &FF_LAYOUT_FROM_HDR(layout
)->commit_info
;
1962 ff_layout_free_deviceid_node(struct nfs4_deviceid_node
*d
)
1964 nfs4_ff_layout_free_deviceid(container_of(d
, struct nfs4_ff_layout_ds
,
1968 static int ff_layout_encode_ioerr(struct nfs4_flexfile_layout
*flo
,
1969 struct xdr_stream
*xdr
,
1970 const struct nfs4_layoutreturn_args
*args
)
1972 struct pnfs_layout_hdr
*hdr
= &flo
->generic_hdr
;
1974 int count
= 0, ret
= 0;
1976 start
= xdr_reserve_space(xdr
, 4);
1977 if (unlikely(!start
))
1980 /* This assume we always return _ALL_ layouts */
1981 spin_lock(&hdr
->plh_inode
->i_lock
);
1982 ret
= ff_layout_encode_ds_ioerr(flo
, xdr
, &count
, &args
->range
);
1983 spin_unlock(&hdr
->plh_inode
->i_lock
);
1985 *start
= cpu_to_be32(count
);
1990 /* report nothing for now */
1991 static void ff_layout_encode_iostats(struct nfs4_flexfile_layout
*flo
,
1992 struct xdr_stream
*xdr
,
1993 const struct nfs4_layoutreturn_args
*args
)
1997 p
= xdr_reserve_space(xdr
, 4);
1999 *p
= cpu_to_be32(0);
2002 static struct nfs4_deviceid_node
*
2003 ff_layout_alloc_deviceid_node(struct nfs_server
*server
,
2004 struct pnfs_device
*pdev
, gfp_t gfp_flags
)
2006 struct nfs4_ff_layout_ds
*dsaddr
;
2008 dsaddr
= nfs4_ff_alloc_deviceid_node(server
, pdev
, gfp_flags
);
2011 return &dsaddr
->id_node
;
2015 ff_layout_encode_layoutreturn(struct xdr_stream
*xdr
,
2016 const struct nfs4_layoutreturn_args
*args
)
2018 struct pnfs_layout_hdr
*lo
= args
->layout
;
2019 struct nfs4_flexfile_layout
*flo
= FF_LAYOUT_FROM_HDR(lo
);
2022 dprintk("%s: Begin\n", __func__
);
2023 start
= xdr_reserve_space(xdr
, 4);
2026 ff_layout_encode_ioerr(flo
, xdr
, args
);
2027 ff_layout_encode_iostats(flo
, xdr
, args
);
2029 *start
= cpu_to_be32((xdr
->p
- start
- 1) * 4);
2030 dprintk("%s: Return\n", __func__
);
2034 ff_layout_ntop4(const struct sockaddr
*sap
, char *buf
, const size_t buflen
)
2036 const struct sockaddr_in
*sin
= (struct sockaddr_in
*)sap
;
2038 return snprintf(buf
, buflen
, "%pI4", &sin
->sin_addr
);
2042 ff_layout_ntop6_noscopeid(const struct sockaddr
*sap
, char *buf
,
2045 const struct sockaddr_in6
*sin6
= (struct sockaddr_in6
*)sap
;
2046 const struct in6_addr
*addr
= &sin6
->sin6_addr
;
2049 * RFC 4291, Section 2.2.2
2051 * Shorthanded ANY address
2053 if (ipv6_addr_any(addr
))
2054 return snprintf(buf
, buflen
, "::");
2057 * RFC 4291, Section 2.2.2
2059 * Shorthanded loopback address
2061 if (ipv6_addr_loopback(addr
))
2062 return snprintf(buf
, buflen
, "::1");
2065 * RFC 4291, Section 2.2.3
2067 * Special presentation address format for mapped v4
2070 if (ipv6_addr_v4mapped(addr
))
2071 return snprintf(buf
, buflen
, "::ffff:%pI4",
2072 &addr
->s6_addr32
[3]);
2075 * RFC 4291, Section 2.2.1
2077 return snprintf(buf
, buflen
, "%pI6c", addr
);
2080 /* Derived from rpc_sockaddr2uaddr */
2082 ff_layout_encode_netaddr(struct xdr_stream
*xdr
, struct nfs4_pnfs_ds_addr
*da
)
2084 struct sockaddr
*sap
= (struct sockaddr
*)&da
->da_addr
;
2085 char portbuf
[RPCBIND_MAXUADDRPLEN
];
2086 char addrbuf
[RPCBIND_MAXUADDRLEN
];
2088 unsigned short port
;
2092 switch (sap
->sa_family
) {
2094 if (ff_layout_ntop4(sap
, addrbuf
, sizeof(addrbuf
)) == 0)
2096 port
= ntohs(((struct sockaddr_in
*)sap
)->sin_port
);
2101 if (ff_layout_ntop6_noscopeid(sap
, addrbuf
, sizeof(addrbuf
)) == 0)
2103 port
= ntohs(((struct sockaddr_in6
*)sap
)->sin6_port
);
2108 /* we only support tcp and tcp6 */
2113 snprintf(portbuf
, sizeof(portbuf
), ".%u.%u", port
>> 8, port
& 0xff);
2114 len
= strlcat(addrbuf
, portbuf
, sizeof(addrbuf
));
2116 p
= xdr_reserve_space(xdr
, 4 + netid_len
);
2117 xdr_encode_opaque(p
, netid
, netid_len
);
2119 p
= xdr_reserve_space(xdr
, 4 + len
);
2120 xdr_encode_opaque(p
, addrbuf
, len
);
2124 ff_layout_encode_nfstime(struct xdr_stream
*xdr
,
2127 struct timespec64 ts
;
2130 p
= xdr_reserve_space(xdr
, 12);
2131 ts
= ktime_to_timespec64(t
);
2132 p
= xdr_encode_hyper(p
, ts
.tv_sec
);
2133 *p
++ = cpu_to_be32(ts
.tv_nsec
);
2137 ff_layout_encode_io_latency(struct xdr_stream
*xdr
,
2138 struct nfs4_ff_io_stat
*stat
)
2142 p
= xdr_reserve_space(xdr
, 5 * 8);
2143 p
= xdr_encode_hyper(p
, stat
->ops_requested
);
2144 p
= xdr_encode_hyper(p
, stat
->bytes_requested
);
2145 p
= xdr_encode_hyper(p
, stat
->ops_completed
);
2146 p
= xdr_encode_hyper(p
, stat
->bytes_completed
);
2147 p
= xdr_encode_hyper(p
, stat
->bytes_not_delivered
);
2148 ff_layout_encode_nfstime(xdr
, stat
->total_busy_time
);
2149 ff_layout_encode_nfstime(xdr
, stat
->aggregate_completion_time
);
2153 ff_layout_encode_layoutstats(struct xdr_stream
*xdr
,
2154 struct nfs42_layoutstat_args
*args
,
2155 struct nfs42_layoutstat_devinfo
*devinfo
)
2157 struct nfs4_ff_layout_mirror
*mirror
= devinfo
->layout_private
;
2158 struct nfs4_pnfs_ds_addr
*da
;
2159 struct nfs4_pnfs_ds
*ds
= mirror
->mirror_ds
->ds
;
2160 struct nfs_fh
*fh
= &mirror
->fh_versions
[0];
2163 da
= list_first_entry(&ds
->ds_addrs
, struct nfs4_pnfs_ds_addr
, da_node
);
2164 dprintk("%s: DS %s: encoding address %s\n",
2165 __func__
, ds
->ds_remotestr
, da
->da_remotestr
);
2166 /* layoutupdate length */
2167 start
= xdr_reserve_space(xdr
, 4);
2169 ff_layout_encode_netaddr(xdr
, da
);
2171 p
= xdr_reserve_space(xdr
, 4 + fh
->size
);
2172 xdr_encode_opaque(p
, fh
->data
, fh
->size
);
2173 /* ff_io_latency4 read */
2174 spin_lock(&mirror
->lock
);
2175 ff_layout_encode_io_latency(xdr
, &mirror
->read_stat
.io_stat
);
2176 /* ff_io_latency4 write */
2177 ff_layout_encode_io_latency(xdr
, &mirror
->write_stat
.io_stat
);
2178 spin_unlock(&mirror
->lock
);
2180 ff_layout_encode_nfstime(xdr
, ktime_sub(ktime_get(), mirror
->start_time
));
2182 p
= xdr_reserve_space(xdr
, 4);
2183 *p
= cpu_to_be32(false);
2185 *start
= cpu_to_be32((xdr
->p
- start
- 1) * 4);
2189 ff_layout_mirror_prepare_stats(struct nfs42_layoutstat_args
*args
,
2190 struct pnfs_layout_hdr
*lo
,
2193 struct nfs4_flexfile_layout
*ff_layout
= FF_LAYOUT_FROM_HDR(lo
);
2194 struct nfs4_ff_layout_mirror
*mirror
;
2195 struct nfs4_deviceid_node
*dev
;
2196 struct nfs42_layoutstat_devinfo
*devinfo
;
2199 list_for_each_entry(mirror
, &ff_layout
->mirrors
, mirrors
) {
2202 if (!mirror
->mirror_ds
)
2204 /* mirror refcount put in cleanup_layoutstats */
2205 if (!atomic_inc_not_zero(&mirror
->ref
))
2207 dev
= &mirror
->mirror_ds
->id_node
;
2208 devinfo
= &args
->devinfo
[i
];
2209 memcpy(&devinfo
->dev_id
, &dev
->deviceid
, NFS4_DEVICEID4_SIZE
);
2210 devinfo
->offset
= 0;
2211 devinfo
->length
= NFS4_MAX_UINT64
;
2212 devinfo
->read_count
= mirror
->read_stat
.io_stat
.ops_completed
;
2213 devinfo
->read_bytes
= mirror
->read_stat
.io_stat
.bytes_completed
;
2214 devinfo
->write_count
= mirror
->write_stat
.io_stat
.ops_completed
;
2215 devinfo
->write_bytes
= mirror
->write_stat
.io_stat
.bytes_completed
;
2216 devinfo
->layout_type
= LAYOUT_FLEX_FILES
;
2217 devinfo
->layoutstats_encode
= ff_layout_encode_layoutstats
;
2218 devinfo
->layout_private
= mirror
;
2226 ff_layout_prepare_layoutstats(struct nfs42_layoutstat_args
*args
)
2228 struct nfs4_flexfile_layout
*ff_layout
;
2229 struct nfs4_ff_layout_mirror
*mirror
;
2232 spin_lock(&args
->inode
->i_lock
);
2233 ff_layout
= FF_LAYOUT_FROM_HDR(NFS_I(args
->inode
)->layout
);
2234 list_for_each_entry(mirror
, &ff_layout
->mirrors
, mirrors
) {
2235 if (atomic_read(&mirror
->ref
) != 0)
2238 spin_unlock(&args
->inode
->i_lock
);
2239 /* For now, send at most PNFS_LAYOUTSTATS_MAXDEV statistics */
2240 if (dev_count
> PNFS_LAYOUTSTATS_MAXDEV
) {
2241 dprintk("%s: truncating devinfo to limit (%d:%d)\n",
2242 __func__
, dev_count
, PNFS_LAYOUTSTATS_MAXDEV
);
2243 dev_count
= PNFS_LAYOUTSTATS_MAXDEV
;
2245 args
->devinfo
= kmalloc_array(dev_count
, sizeof(*args
->devinfo
), GFP_NOIO
);
2249 spin_lock(&args
->inode
->i_lock
);
2250 args
->num_dev
= ff_layout_mirror_prepare_stats(args
,
2251 &ff_layout
->generic_hdr
, dev_count
);
2252 spin_unlock(&args
->inode
->i_lock
);
2258 ff_layout_cleanup_layoutstats(struct nfs42_layoutstat_data
*data
)
2260 struct nfs4_ff_layout_mirror
*mirror
;
2263 for (i
= 0; i
< data
->args
.num_dev
; i
++) {
2264 mirror
= data
->args
.devinfo
[i
].layout_private
;
2265 data
->args
.devinfo
[i
].layout_private
= NULL
;
2266 ff_layout_put_mirror(mirror
);
2270 static struct pnfs_layoutdriver_type flexfilelayout_type
= {
2271 .id
= LAYOUT_FLEX_FILES
,
2272 .name
= "LAYOUT_FLEX_FILES",
2273 .owner
= THIS_MODULE
,
2274 .alloc_layout_hdr
= ff_layout_alloc_layout_hdr
,
2275 .free_layout_hdr
= ff_layout_free_layout_hdr
,
2276 .alloc_lseg
= ff_layout_alloc_lseg
,
2277 .free_lseg
= ff_layout_free_lseg
,
2278 .add_lseg
= ff_layout_add_lseg
,
2279 .pg_read_ops
= &ff_layout_pg_read_ops
,
2280 .pg_write_ops
= &ff_layout_pg_write_ops
,
2281 .get_ds_info
= ff_layout_get_ds_info
,
2282 .free_deviceid_node
= ff_layout_free_deviceid_node
,
2283 .mark_request_commit
= pnfs_layout_mark_request_commit
,
2284 .clear_request_commit
= pnfs_generic_clear_request_commit
,
2285 .scan_commit_lists
= pnfs_generic_scan_commit_lists
,
2286 .recover_commit_reqs
= pnfs_generic_recover_commit_reqs
,
2287 .commit_pagelist
= ff_layout_commit_pagelist
,
2288 .read_pagelist
= ff_layout_read_pagelist
,
2289 .write_pagelist
= ff_layout_write_pagelist
,
2290 .alloc_deviceid_node
= ff_layout_alloc_deviceid_node
,
2291 .encode_layoutreturn
= ff_layout_encode_layoutreturn
,
2292 .sync
= pnfs_nfs_generic_sync
,
2293 .prepare_layoutstats
= ff_layout_prepare_layoutstats
,
2294 .cleanup_layoutstats
= ff_layout_cleanup_layoutstats
,
2297 static int __init
nfs4flexfilelayout_init(void)
2299 printk(KERN_INFO
"%s: NFSv4 Flexfile Layout Driver Registering...\n",
2301 if (!ff_zero_group
) {
2302 ff_zero_group
= groups_alloc(0);
2306 return pnfs_register_layoutdriver(&flexfilelayout_type
);
2309 static void __exit
nfs4flexfilelayout_exit(void)
2311 printk(KERN_INFO
"%s: NFSv4 Flexfile Layout Driver Unregistering...\n",
2313 pnfs_unregister_layoutdriver(&flexfilelayout_type
);
2314 if (ff_zero_group
) {
2315 put_group_info(ff_zero_group
);
2316 ff_zero_group
= NULL
;
2320 MODULE_ALIAS("nfs-layouttype4-4");
2322 MODULE_LICENSE("GPL");
2323 MODULE_DESCRIPTION("The NFSv4 flexfile layout driver");
2325 module_init(nfs4flexfilelayout_init
);
2326 module_exit(nfs4flexfilelayout_exit
);