2 * Module for pnfs flexfile layout driver.
4 * Copyright (c) 2014, Primary Data, Inc. All rights reserved.
6 * Tao Peng <bergwolf@primarydata.com>
9 #include <linux/nfs_fs.h>
10 #include <linux/nfs_page.h>
11 #include <linux/module.h>
13 #include <linux/sunrpc/metrics.h>
15 #include "flexfilelayout.h"
16 #include "../nfs4session.h"
17 #include "../nfs4idmap.h"
18 #include "../internal.h"
19 #include "../delegation.h"
20 #include "../nfs4trace.h"
21 #include "../iostat.h"
25 #define NFSDBG_FACILITY NFSDBG_PNFS_LD
27 #define FF_LAYOUT_POLL_RETRY_MAX (15*HZ)
28 #define FF_LAYOUTRETURN_MAXERR 20
31 static struct group_info
*ff_zero_group
;
33 static void ff_layout_read_record_layoutstats_done(struct rpc_task
*task
,
34 struct nfs_pgio_header
*hdr
);
35 static int ff_layout_mirror_prepare_stats(struct pnfs_layout_hdr
*lo
,
36 struct nfs42_layoutstat_devinfo
*devinfo
,
38 static void ff_layout_encode_ff_layoutupdate(struct xdr_stream
*xdr
,
39 const struct nfs42_layoutstat_devinfo
*devinfo
,
40 struct nfs4_ff_layout_mirror
*mirror
);
42 static struct pnfs_layout_hdr
*
43 ff_layout_alloc_layout_hdr(struct inode
*inode
, gfp_t gfp_flags
)
45 struct nfs4_flexfile_layout
*ffl
;
47 ffl
= kzalloc(sizeof(*ffl
), gfp_flags
);
49 INIT_LIST_HEAD(&ffl
->error_list
);
50 INIT_LIST_HEAD(&ffl
->mirrors
);
51 ffl
->last_report_time
= ktime_get();
52 return &ffl
->generic_hdr
;
58 ff_layout_free_layout_hdr(struct pnfs_layout_hdr
*lo
)
60 struct nfs4_ff_layout_ds_err
*err
, *n
;
62 list_for_each_entry_safe(err
, n
, &FF_LAYOUT_FROM_HDR(lo
)->error_list
,
67 kfree(FF_LAYOUT_FROM_HDR(lo
));
70 static int decode_pnfs_stateid(struct xdr_stream
*xdr
, nfs4_stateid
*stateid
)
74 p
= xdr_inline_decode(xdr
, NFS4_STATEID_SIZE
);
75 if (unlikely(p
== NULL
))
77 stateid
->type
= NFS4_PNFS_DS_STATEID_TYPE
;
78 memcpy(stateid
->data
, p
, NFS4_STATEID_SIZE
);
79 dprintk("%s: stateid id= [%x%x%x%x]\n", __func__
,
80 p
[0], p
[1], p
[2], p
[3]);
84 static int decode_deviceid(struct xdr_stream
*xdr
, struct nfs4_deviceid
*devid
)
88 p
= xdr_inline_decode(xdr
, NFS4_DEVICEID4_SIZE
);
91 memcpy(devid
, p
, NFS4_DEVICEID4_SIZE
);
92 nfs4_print_deviceid(devid
);
96 static int decode_nfs_fh(struct xdr_stream
*xdr
, struct nfs_fh
*fh
)
100 p
= xdr_inline_decode(xdr
, 4);
103 fh
->size
= be32_to_cpup(p
++);
104 if (fh
->size
> sizeof(struct nfs_fh
)) {
105 printk(KERN_ERR
"NFS flexfiles: Too big fh received %d\n",
110 p
= xdr_inline_decode(xdr
, fh
->size
);
113 memcpy(&fh
->data
, p
, fh
->size
);
114 dprintk("%s: fh len %d\n", __func__
, fh
->size
);
120 * Currently only stringified uids and gids are accepted.
121 * I.e., kerberos is not supported to the DSes, so no pricipals.
123 * That means that one common function will suffice, but when
124 * principals are added, this should be split to accomodate
125 * calls to both nfs_map_name_to_uid() and nfs_map_group_to_gid().
128 decode_name(struct xdr_stream
*xdr
, u32
*id
)
133 /* opaque_length(4)*/
134 p
= xdr_inline_decode(xdr
, 4);
137 len
= be32_to_cpup(p
++);
141 dprintk("%s: len %u\n", __func__
, len
);
144 p
= xdr_inline_decode(xdr
, len
);
148 if (!nfs_map_string_to_numeric((char *)p
, len
, id
))
154 static bool ff_mirror_match_fh(const struct nfs4_ff_layout_mirror
*m1
,
155 const struct nfs4_ff_layout_mirror
*m2
)
159 if (m1
->fh_versions_cnt
!= m2
->fh_versions_cnt
)
161 for (i
= 0; i
< m1
->fh_versions_cnt
; i
++) {
162 bool found_fh
= false;
163 for (j
= 0; j
< m2
->fh_versions_cnt
; j
++) {
164 if (nfs_compare_fh(&m1
->fh_versions
[i
],
165 &m2
->fh_versions
[j
]) == 0) {
176 static struct nfs4_ff_layout_mirror
*
177 ff_layout_add_mirror(struct pnfs_layout_hdr
*lo
,
178 struct nfs4_ff_layout_mirror
*mirror
)
180 struct nfs4_flexfile_layout
*ff_layout
= FF_LAYOUT_FROM_HDR(lo
);
181 struct nfs4_ff_layout_mirror
*pos
;
182 struct inode
*inode
= lo
->plh_inode
;
184 spin_lock(&inode
->i_lock
);
185 list_for_each_entry(pos
, &ff_layout
->mirrors
, mirrors
) {
186 if (memcmp(&mirror
->devid
, &pos
->devid
, sizeof(pos
->devid
)) != 0)
188 if (!ff_mirror_match_fh(mirror
, pos
))
190 if (atomic_inc_not_zero(&pos
->ref
)) {
191 spin_unlock(&inode
->i_lock
);
195 list_add(&mirror
->mirrors
, &ff_layout
->mirrors
);
197 spin_unlock(&inode
->i_lock
);
202 ff_layout_remove_mirror(struct nfs4_ff_layout_mirror
*mirror
)
205 if (mirror
->layout
== NULL
)
207 inode
= mirror
->layout
->plh_inode
;
208 spin_lock(&inode
->i_lock
);
209 list_del(&mirror
->mirrors
);
210 spin_unlock(&inode
->i_lock
);
211 mirror
->layout
= NULL
;
214 static struct nfs4_ff_layout_mirror
*ff_layout_alloc_mirror(gfp_t gfp_flags
)
216 struct nfs4_ff_layout_mirror
*mirror
;
218 mirror
= kzalloc(sizeof(*mirror
), gfp_flags
);
219 if (mirror
!= NULL
) {
220 spin_lock_init(&mirror
->lock
);
221 atomic_set(&mirror
->ref
, 1);
222 INIT_LIST_HEAD(&mirror
->mirrors
);
227 static void ff_layout_free_mirror(struct nfs4_ff_layout_mirror
*mirror
)
229 struct rpc_cred
*cred
;
231 ff_layout_remove_mirror(mirror
);
232 kfree(mirror
->fh_versions
);
233 cred
= rcu_access_pointer(mirror
->ro_cred
);
236 cred
= rcu_access_pointer(mirror
->rw_cred
);
239 nfs4_ff_layout_put_deviceid(mirror
->mirror_ds
);
243 static void ff_layout_put_mirror(struct nfs4_ff_layout_mirror
*mirror
)
245 if (mirror
!= NULL
&& atomic_dec_and_test(&mirror
->ref
))
246 ff_layout_free_mirror(mirror
);
249 static void ff_layout_free_mirror_array(struct nfs4_ff_layout_segment
*fls
)
253 if (fls
->mirror_array
) {
254 for (i
= 0; i
< fls
->mirror_array_cnt
; i
++) {
255 /* normally mirror_ds is freed in
256 * .free_deviceid_node but we still do it here
257 * for .alloc_lseg error path */
258 ff_layout_put_mirror(fls
->mirror_array
[i
]);
260 kfree(fls
->mirror_array
);
261 fls
->mirror_array
= NULL
;
265 static int ff_layout_check_layout(struct nfs4_layoutget_res
*lgr
)
269 dprintk("--> %s\n", __func__
);
271 /* FIXME: remove this check when layout segment support is added */
272 if (lgr
->range
.offset
!= 0 ||
273 lgr
->range
.length
!= NFS4_MAX_UINT64
) {
274 dprintk("%s Only whole file layouts supported. Use MDS i/o\n",
279 dprintk("--> %s returns %d\n", __func__
, ret
);
283 static void _ff_layout_free_lseg(struct nfs4_ff_layout_segment
*fls
)
286 ff_layout_free_mirror_array(fls
);
292 ff_lseg_range_is_after(const struct pnfs_layout_range
*l1
,
293 const struct pnfs_layout_range
*l2
)
297 if (l1
->iomode
!= l2
->iomode
)
298 return l1
->iomode
!= IOMODE_READ
;
299 end1
= pnfs_calc_offset_end(l1
->offset
, l1
->length
);
300 end2
= pnfs_calc_offset_end(l2
->offset
, l2
->length
);
301 if (end1
< l2
->offset
)
303 if (end2
< l1
->offset
)
305 return l2
->offset
<= l1
->offset
;
309 ff_lseg_merge(struct pnfs_layout_segment
*new,
310 struct pnfs_layout_segment
*old
)
312 u64 new_end
, old_end
;
314 if (test_bit(NFS_LSEG_LAYOUTRETURN
, &old
->pls_flags
))
316 if (new->pls_range
.iomode
!= old
->pls_range
.iomode
)
318 old_end
= pnfs_calc_offset_end(old
->pls_range
.offset
,
319 old
->pls_range
.length
);
320 if (old_end
< new->pls_range
.offset
)
322 new_end
= pnfs_calc_offset_end(new->pls_range
.offset
,
323 new->pls_range
.length
);
324 if (new_end
< old
->pls_range
.offset
)
327 /* Mergeable: copy info from 'old' to 'new' */
328 if (new_end
< old_end
)
330 if (new->pls_range
.offset
< old
->pls_range
.offset
)
331 new->pls_range
.offset
= old
->pls_range
.offset
;
332 new->pls_range
.length
= pnfs_calc_offset_length(new->pls_range
.offset
,
334 if (test_bit(NFS_LSEG_ROC
, &old
->pls_flags
))
335 set_bit(NFS_LSEG_ROC
, &new->pls_flags
);
340 ff_layout_add_lseg(struct pnfs_layout_hdr
*lo
,
341 struct pnfs_layout_segment
*lseg
,
342 struct list_head
*free_me
)
344 pnfs_generic_layout_insert_lseg(lo
, lseg
,
345 ff_lseg_range_is_after
,
350 static void ff_layout_sort_mirrors(struct nfs4_ff_layout_segment
*fls
)
354 for (i
= 0; i
< fls
->mirror_array_cnt
- 1; i
++) {
355 for (j
= i
+ 1; j
< fls
->mirror_array_cnt
; j
++)
356 if (fls
->mirror_array
[i
]->efficiency
<
357 fls
->mirror_array
[j
]->efficiency
)
358 swap(fls
->mirror_array
[i
],
359 fls
->mirror_array
[j
]);
363 static struct pnfs_layout_segment
*
364 ff_layout_alloc_lseg(struct pnfs_layout_hdr
*lh
,
365 struct nfs4_layoutget_res
*lgr
,
368 struct pnfs_layout_segment
*ret
;
369 struct nfs4_ff_layout_segment
*fls
= NULL
;
370 struct xdr_stream stream
;
372 struct page
*scratch
;
374 u32 mirror_array_cnt
;
378 dprintk("--> %s\n", __func__
);
379 scratch
= alloc_page(gfp_flags
);
381 return ERR_PTR(-ENOMEM
);
383 xdr_init_decode_pages(&stream
, &buf
, lgr
->layoutp
->pages
,
385 xdr_set_scratch_buffer(&stream
, page_address(scratch
), PAGE_SIZE
);
387 /* stripe unit and mirror_array_cnt */
389 p
= xdr_inline_decode(&stream
, 8 + 4);
393 p
= xdr_decode_hyper(p
, &stripe_unit
);
394 mirror_array_cnt
= be32_to_cpup(p
++);
395 dprintk("%s: stripe_unit=%llu mirror_array_cnt=%u\n", __func__
,
396 stripe_unit
, mirror_array_cnt
);
398 if (mirror_array_cnt
> NFS4_FLEXFILE_LAYOUT_MAX_MIRROR_CNT
||
399 mirror_array_cnt
== 0)
403 fls
= kzalloc(sizeof(*fls
), gfp_flags
);
407 fls
->mirror_array_cnt
= mirror_array_cnt
;
408 fls
->stripe_unit
= stripe_unit
;
409 fls
->mirror_array
= kcalloc(fls
->mirror_array_cnt
,
410 sizeof(fls
->mirror_array
[0]), gfp_flags
);
411 if (fls
->mirror_array
== NULL
)
414 for (i
= 0; i
< fls
->mirror_array_cnt
; i
++) {
415 struct nfs4_ff_layout_mirror
*mirror
;
416 struct auth_cred acred
= { .group_info
= ff_zero_group
};
417 struct rpc_cred __rcu
*cred
;
418 u32 ds_count
, fh_count
, id
;
422 p
= xdr_inline_decode(&stream
, 4);
425 ds_count
= be32_to_cpup(p
);
427 /* FIXME: allow for striping? */
431 fls
->mirror_array
[i
] = ff_layout_alloc_mirror(gfp_flags
);
432 if (fls
->mirror_array
[i
] == NULL
) {
437 fls
->mirror_array
[i
]->ds_count
= ds_count
;
440 rc
= decode_deviceid(&stream
, &fls
->mirror_array
[i
]->devid
);
446 p
= xdr_inline_decode(&stream
, 4);
449 fls
->mirror_array
[i
]->efficiency
= be32_to_cpup(p
);
452 rc
= decode_pnfs_stateid(&stream
, &fls
->mirror_array
[i
]->stateid
);
457 p
= xdr_inline_decode(&stream
, 4);
460 fh_count
= be32_to_cpup(p
);
462 fls
->mirror_array
[i
]->fh_versions
=
463 kzalloc(fh_count
* sizeof(struct nfs_fh
),
465 if (fls
->mirror_array
[i
]->fh_versions
== NULL
) {
470 for (j
= 0; j
< fh_count
; j
++) {
471 rc
= decode_nfs_fh(&stream
,
472 &fls
->mirror_array
[i
]->fh_versions
[j
]);
477 fls
->mirror_array
[i
]->fh_versions_cnt
= fh_count
;
480 rc
= decode_name(&stream
, &id
);
484 acred
.uid
= make_kuid(&init_user_ns
, id
);
487 rc
= decode_name(&stream
, &id
);
491 acred
.gid
= make_kgid(&init_user_ns
, id
);
493 /* find the cred for it */
494 rcu_assign_pointer(cred
, rpc_lookup_generic_cred(&acred
, 0, gfp_flags
));
500 if (lgr
->range
.iomode
== IOMODE_READ
)
501 rcu_assign_pointer(fls
->mirror_array
[i
]->ro_cred
, cred
);
503 rcu_assign_pointer(fls
->mirror_array
[i
]->rw_cred
, cred
);
505 mirror
= ff_layout_add_mirror(lh
, fls
->mirror_array
[i
]);
506 if (mirror
!= fls
->mirror_array
[i
]) {
507 /* swap cred ptrs so free_mirror will clean up old */
508 if (lgr
->range
.iomode
== IOMODE_READ
) {
509 cred
= xchg(&mirror
->ro_cred
, cred
);
510 rcu_assign_pointer(fls
->mirror_array
[i
]->ro_cred
, cred
);
512 cred
= xchg(&mirror
->rw_cred
, cred
);
513 rcu_assign_pointer(fls
->mirror_array
[i
]->rw_cred
, cred
);
515 ff_layout_free_mirror(fls
->mirror_array
[i
]);
516 fls
->mirror_array
[i
] = mirror
;
519 dprintk("%s: iomode %s uid %u gid %u\n", __func__
,
520 lgr
->range
.iomode
== IOMODE_READ
? "READ" : "RW",
521 from_kuid(&init_user_ns
, acred
.uid
),
522 from_kgid(&init_user_ns
, acred
.gid
));
525 p
= xdr_inline_decode(&stream
, 4);
527 goto out_sort_mirrors
;
528 fls
->flags
= be32_to_cpup(p
);
530 p
= xdr_inline_decode(&stream
, 4);
532 goto out_sort_mirrors
;
533 for (i
=0; i
< fls
->mirror_array_cnt
; i
++)
534 fls
->mirror_array
[i
]->report_interval
= be32_to_cpup(p
);
537 ff_layout_sort_mirrors(fls
);
538 rc
= ff_layout_check_layout(lgr
);
541 ret
= &fls
->generic_hdr
;
542 dprintk("<-- %s (success)\n", __func__
);
544 __free_page(scratch
);
547 _ff_layout_free_lseg(fls
);
549 dprintk("<-- %s (%d)\n", __func__
, rc
);
553 static bool ff_layout_has_rw_segments(struct pnfs_layout_hdr
*layout
)
555 struct pnfs_layout_segment
*lseg
;
557 list_for_each_entry(lseg
, &layout
->plh_segs
, pls_list
)
558 if (lseg
->pls_range
.iomode
== IOMODE_RW
)
565 ff_layout_free_lseg(struct pnfs_layout_segment
*lseg
)
567 struct nfs4_ff_layout_segment
*fls
= FF_LAYOUT_LSEG(lseg
);
569 dprintk("--> %s\n", __func__
);
571 if (lseg
->pls_range
.iomode
== IOMODE_RW
) {
572 struct nfs4_flexfile_layout
*ffl
;
575 ffl
= FF_LAYOUT_FROM_HDR(lseg
->pls_layout
);
576 inode
= ffl
->generic_hdr
.plh_inode
;
577 spin_lock(&inode
->i_lock
);
578 if (!ff_layout_has_rw_segments(lseg
->pls_layout
)) {
579 ffl
->commit_info
.nbuckets
= 0;
580 kfree(ffl
->commit_info
.buckets
);
581 ffl
->commit_info
.buckets
= NULL
;
583 spin_unlock(&inode
->i_lock
);
585 _ff_layout_free_lseg(fls
);
588 /* Return 1 until we have multiple lsegs support */
590 ff_layout_get_lseg_count(struct nfs4_ff_layout_segment
*fls
)
596 nfs4_ff_start_busy_timer(struct nfs4_ff_busy_timer
*timer
, ktime_t now
)
598 /* first IO request? */
599 if (atomic_inc_return(&timer
->n_ops
) == 1) {
600 timer
->start_time
= now
;
605 nfs4_ff_end_busy_timer(struct nfs4_ff_busy_timer
*timer
, ktime_t now
)
609 if (atomic_dec_return(&timer
->n_ops
) < 0)
612 start
= timer
->start_time
;
613 timer
->start_time
= now
;
614 return ktime_sub(now
, start
);
618 nfs4_ff_layoutstat_start_io(struct nfs4_ff_layout_mirror
*mirror
,
619 struct nfs4_ff_layoutstat
*layoutstat
,
622 s64 report_interval
= FF_LAYOUTSTATS_REPORT_INTERVAL
;
623 struct nfs4_flexfile_layout
*ffl
= FF_LAYOUT_FROM_HDR(mirror
->layout
);
625 nfs4_ff_start_busy_timer(&layoutstat
->busy_timer
, now
);
626 if (!mirror
->start_time
)
627 mirror
->start_time
= now
;
628 if (mirror
->report_interval
!= 0)
629 report_interval
= (s64
)mirror
->report_interval
* 1000LL;
630 else if (layoutstats_timer
!= 0)
631 report_interval
= (s64
)layoutstats_timer
* 1000LL;
632 if (ktime_to_ms(ktime_sub(now
, ffl
->last_report_time
)) >=
634 ffl
->last_report_time
= now
;
642 nfs4_ff_layout_stat_io_update_requested(struct nfs4_ff_layoutstat
*layoutstat
,
645 struct nfs4_ff_io_stat
*iostat
= &layoutstat
->io_stat
;
647 iostat
->ops_requested
++;
648 iostat
->bytes_requested
+= requested
;
652 nfs4_ff_layout_stat_io_update_completed(struct nfs4_ff_layoutstat
*layoutstat
,
655 ktime_t time_completed
,
656 ktime_t time_started
)
658 struct nfs4_ff_io_stat
*iostat
= &layoutstat
->io_stat
;
659 ktime_t completion_time
= ktime_sub(time_completed
, time_started
);
662 iostat
->ops_completed
++;
663 iostat
->bytes_completed
+= completed
;
664 iostat
->bytes_not_delivered
+= requested
- completed
;
666 timer
= nfs4_ff_end_busy_timer(&layoutstat
->busy_timer
, time_completed
);
667 iostat
->total_busy_time
=
668 ktime_add(iostat
->total_busy_time
, timer
);
669 iostat
->aggregate_completion_time
=
670 ktime_add(iostat
->aggregate_completion_time
,
675 nfs4_ff_layout_stat_io_start_read(struct inode
*inode
,
676 struct nfs4_ff_layout_mirror
*mirror
,
677 __u64 requested
, ktime_t now
)
681 spin_lock(&mirror
->lock
);
682 report
= nfs4_ff_layoutstat_start_io(mirror
, &mirror
->read_stat
, now
);
683 nfs4_ff_layout_stat_io_update_requested(&mirror
->read_stat
, requested
);
684 set_bit(NFS4_FF_MIRROR_STAT_AVAIL
, &mirror
->flags
);
685 spin_unlock(&mirror
->lock
);
688 pnfs_report_layoutstat(inode
, GFP_KERNEL
);
692 nfs4_ff_layout_stat_io_end_read(struct rpc_task
*task
,
693 struct nfs4_ff_layout_mirror
*mirror
,
697 spin_lock(&mirror
->lock
);
698 nfs4_ff_layout_stat_io_update_completed(&mirror
->read_stat
,
699 requested
, completed
,
700 ktime_get(), task
->tk_start
);
701 set_bit(NFS4_FF_MIRROR_STAT_AVAIL
, &mirror
->flags
);
702 spin_unlock(&mirror
->lock
);
706 nfs4_ff_layout_stat_io_start_write(struct inode
*inode
,
707 struct nfs4_ff_layout_mirror
*mirror
,
708 __u64 requested
, ktime_t now
)
712 spin_lock(&mirror
->lock
);
713 report
= nfs4_ff_layoutstat_start_io(mirror
, &mirror
->write_stat
, now
);
714 nfs4_ff_layout_stat_io_update_requested(&mirror
->write_stat
, requested
);
715 set_bit(NFS4_FF_MIRROR_STAT_AVAIL
, &mirror
->flags
);
716 spin_unlock(&mirror
->lock
);
719 pnfs_report_layoutstat(inode
, GFP_NOIO
);
723 nfs4_ff_layout_stat_io_end_write(struct rpc_task
*task
,
724 struct nfs4_ff_layout_mirror
*mirror
,
727 enum nfs3_stable_how committed
)
729 if (committed
== NFS_UNSTABLE
)
730 requested
= completed
= 0;
732 spin_lock(&mirror
->lock
);
733 nfs4_ff_layout_stat_io_update_completed(&mirror
->write_stat
,
734 requested
, completed
, ktime_get(), task
->tk_start
);
735 set_bit(NFS4_FF_MIRROR_STAT_AVAIL
, &mirror
->flags
);
736 spin_unlock(&mirror
->lock
);
740 ff_layout_alloc_commit_info(struct pnfs_layout_segment
*lseg
,
741 struct nfs_commit_info
*cinfo
,
744 struct nfs4_ff_layout_segment
*fls
= FF_LAYOUT_LSEG(lseg
);
745 struct pnfs_commit_bucket
*buckets
;
748 if (cinfo
->ds
->nbuckets
!= 0) {
749 /* This assumes there is only one RW lseg per file.
750 * To support multiple lseg per file, we need to
751 * change struct pnfs_commit_bucket to allow dynamic
752 * increasing nbuckets.
757 size
= ff_layout_get_lseg_count(fls
) * FF_LAYOUT_MIRROR_COUNT(lseg
);
759 buckets
= kcalloc(size
, sizeof(struct pnfs_commit_bucket
),
766 spin_lock(&cinfo
->inode
->i_lock
);
767 if (cinfo
->ds
->nbuckets
!= 0)
770 cinfo
->ds
->buckets
= buckets
;
771 cinfo
->ds
->nbuckets
= size
;
772 for (i
= 0; i
< size
; i
++) {
773 INIT_LIST_HEAD(&buckets
[i
].written
);
774 INIT_LIST_HEAD(&buckets
[i
].committing
);
775 /* mark direct verifier as unset */
776 buckets
[i
].direct_verf
.committed
=
777 NFS_INVALID_STABLE_HOW
;
780 spin_unlock(&cinfo
->inode
->i_lock
);
785 static struct nfs4_pnfs_ds
*
786 ff_layout_choose_best_ds_for_read(struct pnfs_layout_segment
*lseg
,
790 struct nfs4_ff_layout_segment
*fls
= FF_LAYOUT_LSEG(lseg
);
791 struct nfs4_pnfs_ds
*ds
;
792 bool fail_return
= false;
795 /* mirrors are sorted by efficiency */
796 for (idx
= start_idx
; idx
< fls
->mirror_array_cnt
; idx
++) {
797 if (idx
+1 == fls
->mirror_array_cnt
)
799 ds
= nfs4_ff_layout_prepare_ds(lseg
, idx
, fail_return
);
810 ff_layout_pg_get_read(struct nfs_pageio_descriptor
*pgio
,
811 struct nfs_page
*req
,
815 pnfs_put_lseg(pgio
->pg_lseg
);
816 pgio
->pg_lseg
= pnfs_update_layout(pgio
->pg_inode
,
823 if (IS_ERR(pgio
->pg_lseg
)) {
824 pgio
->pg_error
= PTR_ERR(pgio
->pg_lseg
);
825 pgio
->pg_lseg
= NULL
;
828 /* If we don't have checking, do get a IOMODE_RW
829 * segment, and the server wants to avoid READs
832 if (pgio
->pg_lseg
&& !strict_iomode
&&
833 ff_layout_avoid_read_on_rw(pgio
->pg_lseg
)) {
834 strict_iomode
= true;
840 ff_layout_pg_init_read(struct nfs_pageio_descriptor
*pgio
,
841 struct nfs_page
*req
)
843 struct nfs_pgio_mirror
*pgm
;
844 struct nfs4_ff_layout_mirror
*mirror
;
845 struct nfs4_pnfs_ds
*ds
;
849 /* Use full layout for now */
851 ff_layout_pg_get_read(pgio
, req
, false);
852 else if (ff_layout_avoid_read_on_rw(pgio
->pg_lseg
))
853 ff_layout_pg_get_read(pgio
, req
, true);
855 /* If no lseg, fall back to read through mds */
856 if (pgio
->pg_lseg
== NULL
)
859 ds
= ff_layout_choose_best_ds_for_read(pgio
->pg_lseg
, 0, &ds_idx
);
861 if (!ff_layout_no_fallback_to_mds(pgio
->pg_lseg
))
863 pnfs_put_lseg(pgio
->pg_lseg
);
864 pgio
->pg_lseg
= NULL
;
865 /* Sleep for 1 second before retrying */
870 mirror
= FF_LAYOUT_COMP(pgio
->pg_lseg
, ds_idx
);
872 pgio
->pg_mirror_idx
= ds_idx
;
874 /* read always uses only one mirror - idx 0 for pgio layer */
875 pgm
= &pgio
->pg_mirrors
[0];
876 pgm
->pg_bsize
= mirror
->mirror_ds
->ds_versions
[0].rsize
;
880 pnfs_put_lseg(pgio
->pg_lseg
);
881 pgio
->pg_lseg
= NULL
;
882 nfs_pageio_reset_read_mds(pgio
);
886 ff_layout_pg_init_write(struct nfs_pageio_descriptor
*pgio
,
887 struct nfs_page
*req
)
889 struct nfs4_ff_layout_mirror
*mirror
;
890 struct nfs_pgio_mirror
*pgm
;
891 struct nfs_commit_info cinfo
;
892 struct nfs4_pnfs_ds
*ds
;
897 if (!pgio
->pg_lseg
) {
898 pgio
->pg_lseg
= pnfs_update_layout(pgio
->pg_inode
,
905 if (IS_ERR(pgio
->pg_lseg
)) {
906 pgio
->pg_error
= PTR_ERR(pgio
->pg_lseg
);
907 pgio
->pg_lseg
= NULL
;
911 /* If no lseg, fall back to write through mds */
912 if (pgio
->pg_lseg
== NULL
)
915 nfs_init_cinfo(&cinfo
, pgio
->pg_inode
, pgio
->pg_dreq
);
916 status
= ff_layout_alloc_commit_info(pgio
->pg_lseg
, &cinfo
, GFP_NOFS
);
920 /* Use a direct mapping of ds_idx to pgio mirror_idx */
921 if (WARN_ON_ONCE(pgio
->pg_mirror_count
!=
922 FF_LAYOUT_MIRROR_COUNT(pgio
->pg_lseg
)))
925 for (i
= 0; i
< pgio
->pg_mirror_count
; i
++) {
926 ds
= nfs4_ff_layout_prepare_ds(pgio
->pg_lseg
, i
, true);
928 if (!ff_layout_no_fallback_to_mds(pgio
->pg_lseg
))
930 pnfs_put_lseg(pgio
->pg_lseg
);
931 pgio
->pg_lseg
= NULL
;
932 /* Sleep for 1 second before retrying */
936 pgm
= &pgio
->pg_mirrors
[i
];
937 mirror
= FF_LAYOUT_COMP(pgio
->pg_lseg
, i
);
938 pgm
->pg_bsize
= mirror
->mirror_ds
->ds_versions
[0].wsize
;
944 pnfs_put_lseg(pgio
->pg_lseg
);
945 pgio
->pg_lseg
= NULL
;
946 nfs_pageio_reset_write_mds(pgio
);
950 ff_layout_pg_get_mirror_count_write(struct nfs_pageio_descriptor
*pgio
,
951 struct nfs_page
*req
)
953 if (!pgio
->pg_lseg
) {
954 pgio
->pg_lseg
= pnfs_update_layout(pgio
->pg_inode
,
961 if (IS_ERR(pgio
->pg_lseg
)) {
962 pgio
->pg_error
= PTR_ERR(pgio
->pg_lseg
);
963 pgio
->pg_lseg
= NULL
;
968 return FF_LAYOUT_MIRROR_COUNT(pgio
->pg_lseg
);
970 /* no lseg means that pnfs is not in use, so no mirroring here */
971 nfs_pageio_reset_write_mds(pgio
);
976 static const struct nfs_pageio_ops ff_layout_pg_read_ops
= {
977 .pg_init
= ff_layout_pg_init_read
,
978 .pg_test
= pnfs_generic_pg_test
,
979 .pg_doio
= pnfs_generic_pg_readpages
,
980 .pg_cleanup
= pnfs_generic_pg_cleanup
,
983 static const struct nfs_pageio_ops ff_layout_pg_write_ops
= {
984 .pg_init
= ff_layout_pg_init_write
,
985 .pg_test
= pnfs_generic_pg_test
,
986 .pg_doio
= pnfs_generic_pg_writepages
,
987 .pg_get_mirror_count
= ff_layout_pg_get_mirror_count_write
,
988 .pg_cleanup
= pnfs_generic_pg_cleanup
,
991 static void ff_layout_reset_write(struct nfs_pgio_header
*hdr
, bool retry_pnfs
)
993 struct rpc_task
*task
= &hdr
->task
;
995 pnfs_layoutcommit_inode(hdr
->inode
, false);
998 dprintk("%s Reset task %5u for i/o through pNFS "
999 "(req %s/%llu, %u bytes @ offset %llu)\n", __func__
,
1001 hdr
->inode
->i_sb
->s_id
,
1002 (unsigned long long)NFS_FILEID(hdr
->inode
),
1004 (unsigned long long)hdr
->args
.offset
);
1006 hdr
->completion_ops
->reschedule_io(hdr
);
1010 if (!test_and_set_bit(NFS_IOHDR_REDO
, &hdr
->flags
)) {
1011 dprintk("%s Reset task %5u for i/o through MDS "
1012 "(req %s/%llu, %u bytes @ offset %llu)\n", __func__
,
1014 hdr
->inode
->i_sb
->s_id
,
1015 (unsigned long long)NFS_FILEID(hdr
->inode
),
1017 (unsigned long long)hdr
->args
.offset
);
1019 task
->tk_status
= pnfs_write_done_resend_to_mds(hdr
);
1023 static void ff_layout_reset_read(struct nfs_pgio_header
*hdr
)
1025 struct rpc_task
*task
= &hdr
->task
;
1027 pnfs_layoutcommit_inode(hdr
->inode
, false);
1029 if (!test_and_set_bit(NFS_IOHDR_REDO
, &hdr
->flags
)) {
1030 dprintk("%s Reset task %5u for i/o through MDS "
1031 "(req %s/%llu, %u bytes @ offset %llu)\n", __func__
,
1033 hdr
->inode
->i_sb
->s_id
,
1034 (unsigned long long)NFS_FILEID(hdr
->inode
),
1036 (unsigned long long)hdr
->args
.offset
);
1038 task
->tk_status
= pnfs_read_done_resend_to_mds(hdr
);
1042 static int ff_layout_async_handle_error_v4(struct rpc_task
*task
,
1043 struct nfs4_state
*state
,
1044 struct nfs_client
*clp
,
1045 struct pnfs_layout_segment
*lseg
,
1048 struct pnfs_layout_hdr
*lo
= lseg
->pls_layout
;
1049 struct inode
*inode
= lo
->plh_inode
;
1050 struct nfs_server
*mds_server
= NFS_SERVER(inode
);
1052 struct nfs4_deviceid_node
*devid
= FF_LAYOUT_DEVID_NODE(lseg
, idx
);
1053 struct nfs_client
*mds_client
= mds_server
->nfs_client
;
1054 struct nfs4_slot_table
*tbl
= &clp
->cl_session
->fc_slot_table
;
1056 if (task
->tk_status
>= 0)
1059 switch (task
->tk_status
) {
1060 /* MDS state errors */
1061 case -NFS4ERR_DELEG_REVOKED
:
1062 case -NFS4ERR_ADMIN_REVOKED
:
1063 case -NFS4ERR_BAD_STATEID
:
1066 nfs_remove_bad_delegation(state
->inode
, NULL
);
1067 case -NFS4ERR_OPENMODE
:
1070 if (nfs4_schedule_stateid_recovery(mds_server
, state
) < 0)
1071 goto out_bad_stateid
;
1072 goto wait_on_recovery
;
1073 case -NFS4ERR_EXPIRED
:
1074 if (state
!= NULL
) {
1075 if (nfs4_schedule_stateid_recovery(mds_server
, state
) < 0)
1076 goto out_bad_stateid
;
1078 nfs4_schedule_lease_recovery(mds_client
);
1079 goto wait_on_recovery
;
1080 /* DS session errors */
1081 case -NFS4ERR_BADSESSION
:
1082 case -NFS4ERR_BADSLOT
:
1083 case -NFS4ERR_BAD_HIGH_SLOT
:
1084 case -NFS4ERR_DEADSESSION
:
1085 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION
:
1086 case -NFS4ERR_SEQ_FALSE_RETRY
:
1087 case -NFS4ERR_SEQ_MISORDERED
:
1088 dprintk("%s ERROR %d, Reset session. Exchangeid "
1089 "flags 0x%x\n", __func__
, task
->tk_status
,
1090 clp
->cl_exchange_flags
);
1091 nfs4_schedule_session_recovery(clp
->cl_session
, task
->tk_status
);
1093 case -NFS4ERR_DELAY
:
1094 case -NFS4ERR_GRACE
:
1095 rpc_delay(task
, FF_LAYOUT_POLL_RETRY_MAX
);
1097 case -NFS4ERR_RETRY_UNCACHED_REP
:
1099 /* Invalidate Layout errors */
1100 case -NFS4ERR_PNFS_NO_LAYOUT
:
1101 case -ESTALE
: /* mapped NFS4ERR_STALE */
1102 case -EBADHANDLE
: /* mapped NFS4ERR_BADHANDLE */
1103 case -EISDIR
: /* mapped NFS4ERR_ISDIR */
1104 case -NFS4ERR_FHEXPIRED
:
1105 case -NFS4ERR_WRONG_TYPE
:
1106 dprintk("%s Invalid layout error %d\n", __func__
,
1109 * Destroy layout so new i/o will get a new layout.
1110 * Layout will not be destroyed until all current lseg
1111 * references are put. Mark layout as invalid to resend failed
1112 * i/o and all i/o waiting on the slot table to the MDS until
1113 * layout is destroyed and a new valid layout is obtained.
1115 pnfs_destroy_layout(NFS_I(inode
));
1116 rpc_wake_up(&tbl
->slot_tbl_waitq
);
1118 /* RPC connection errors */
1126 dprintk("%s DS connection error %d\n", __func__
,
1128 nfs4_delete_deviceid(devid
->ld
, devid
->nfs_client
,
1130 rpc_wake_up(&tbl
->slot_tbl_waitq
);
1133 if (ff_layout_avoid_mds_available_ds(lseg
))
1134 return -NFS4ERR_RESET_TO_PNFS
;
1136 dprintk("%s Retry through MDS. Error %d\n", __func__
,
1138 return -NFS4ERR_RESET_TO_MDS
;
1141 task
->tk_status
= 0;
1144 task
->tk_status
= -EIO
;
1147 rpc_sleep_on(&mds_client
->cl_rpcwaitq
, task
, NULL
);
1148 if (test_bit(NFS4CLNT_MANAGER_RUNNING
, &mds_client
->cl_state
) == 0)
1149 rpc_wake_up_queued_task(&mds_client
->cl_rpcwaitq
, task
);
1153 /* Retry all errors through either pNFS or MDS except for -EJUKEBOX */
1154 static int ff_layout_async_handle_error_v3(struct rpc_task
*task
,
1155 struct pnfs_layout_segment
*lseg
,
1158 struct nfs4_deviceid_node
*devid
= FF_LAYOUT_DEVID_NODE(lseg
, idx
);
1160 if (task
->tk_status
>= 0)
1163 switch (task
->tk_status
) {
1164 /* File access problems. Don't mark the device as unavailable */
1173 nfs_inc_stats(lseg
->pls_layout
->plh_inode
, NFSIOS_DELAY
);
1176 dprintk("%s DS connection error %d\n", __func__
,
1178 nfs4_delete_deviceid(devid
->ld
, devid
->nfs_client
,
1181 /* FIXME: Need to prevent infinite looping here. */
1182 return -NFS4ERR_RESET_TO_PNFS
;
1184 task
->tk_status
= 0;
1185 rpc_restart_call_prepare(task
);
1186 rpc_delay(task
, NFS_JUKEBOX_RETRY_TIME
);
1190 static int ff_layout_async_handle_error(struct rpc_task
*task
,
1191 struct nfs4_state
*state
,
1192 struct nfs_client
*clp
,
1193 struct pnfs_layout_segment
*lseg
,
1196 int vers
= clp
->cl_nfs_mod
->rpc_vers
->number
;
1200 return ff_layout_async_handle_error_v3(task
, lseg
, idx
);
1202 return ff_layout_async_handle_error_v4(task
, state
, clp
,
1205 /* should never happen */
1211 static void ff_layout_io_track_ds_error(struct pnfs_layout_segment
*lseg
,
1212 int idx
, u64 offset
, u64 length
,
1213 u32 status
, int opnum
, int error
)
1215 struct nfs4_ff_layout_mirror
*mirror
;
1222 case -EPROTONOSUPPORT
:
1233 status
= NFS4ERR_NXIO
;
1236 status
= NFS4ERR_ACCESS
;
1251 mirror
= FF_LAYOUT_COMP(lseg
, idx
);
1252 err
= ff_layout_track_ds_error(FF_LAYOUT_FROM_HDR(lseg
->pls_layout
),
1253 mirror
, offset
, length
, status
, opnum
,
1255 pnfs_error_mark_layout_for_return(lseg
->pls_layout
->plh_inode
, lseg
);
1256 dprintk("%s: err %d op %d status %u\n", __func__
, err
, opnum
, status
);
1259 /* NFS_PROTO call done callback routines */
1260 static int ff_layout_read_done_cb(struct rpc_task
*task
,
1261 struct nfs_pgio_header
*hdr
)
1265 trace_nfs4_pnfs_read(hdr
, task
->tk_status
);
1266 if (task
->tk_status
< 0)
1267 ff_layout_io_track_ds_error(hdr
->lseg
, hdr
->pgio_mirror_idx
,
1268 hdr
->args
.offset
, hdr
->args
.count
,
1269 hdr
->res
.op_status
, OP_READ
,
1271 err
= ff_layout_async_handle_error(task
, hdr
->args
.context
->state
,
1272 hdr
->ds_clp
, hdr
->lseg
,
1273 hdr
->pgio_mirror_idx
);
1276 case -NFS4ERR_RESET_TO_PNFS
:
1277 if (ff_layout_choose_best_ds_for_read(hdr
->lseg
,
1278 hdr
->pgio_mirror_idx
+ 1,
1279 &hdr
->pgio_mirror_idx
))
1281 ff_layout_read_record_layoutstats_done(task
, hdr
);
1282 pnfs_read_resend_pnfs(hdr
);
1283 return task
->tk_status
;
1284 case -NFS4ERR_RESET_TO_MDS
:
1285 ff_layout_reset_read(hdr
);
1286 return task
->tk_status
;
1293 rpc_restart_call_prepare(task
);
1298 ff_layout_need_layoutcommit(struct pnfs_layout_segment
*lseg
)
1300 return !(FF_LAYOUT_LSEG(lseg
)->flags
& FF_FLAGS_NO_LAYOUTCOMMIT
);
1304 * We reference the rpc_cred of the first WRITE that triggers the need for
1305 * a LAYOUTCOMMIT, and use it to send the layoutcommit compound.
1306 * rfc5661 is not clear about which credential should be used.
1308 * Flexlayout client should treat DS replied FILE_SYNC as DATA_SYNC, so
1309 * to follow http://www.rfc-editor.org/errata_search.php?rfc=5661&eid=2751
1310 * we always send layoutcommit after DS writes.
1313 ff_layout_set_layoutcommit(struct inode
*inode
,
1314 struct pnfs_layout_segment
*lseg
,
1317 if (!ff_layout_need_layoutcommit(lseg
))
1320 pnfs_set_layoutcommit(inode
, lseg
, end_offset
);
1321 dprintk("%s inode %lu pls_end_pos %llu\n", __func__
, inode
->i_ino
,
1322 (unsigned long long) NFS_I(inode
)->layout
->plh_lwb
);
1326 ff_layout_device_unavailable(struct pnfs_layout_segment
*lseg
, int idx
)
1328 /* No mirroring for now */
1329 struct nfs4_deviceid_node
*node
= FF_LAYOUT_DEVID_NODE(lseg
, idx
);
1331 return ff_layout_test_devid_unavailable(node
);
1334 static void ff_layout_read_record_layoutstats_start(struct rpc_task
*task
,
1335 struct nfs_pgio_header
*hdr
)
1337 if (test_and_set_bit(NFS_IOHDR_STAT
, &hdr
->flags
))
1339 nfs4_ff_layout_stat_io_start_read(hdr
->inode
,
1340 FF_LAYOUT_COMP(hdr
->lseg
, hdr
->pgio_mirror_idx
),
1345 static void ff_layout_read_record_layoutstats_done(struct rpc_task
*task
,
1346 struct nfs_pgio_header
*hdr
)
1348 if (!test_and_clear_bit(NFS_IOHDR_STAT
, &hdr
->flags
))
1350 nfs4_ff_layout_stat_io_end_read(task
,
1351 FF_LAYOUT_COMP(hdr
->lseg
, hdr
->pgio_mirror_idx
),
1356 static int ff_layout_read_prepare_common(struct rpc_task
*task
,
1357 struct nfs_pgio_header
*hdr
)
1359 if (unlikely(test_bit(NFS_CONTEXT_BAD
, &hdr
->args
.context
->flags
))) {
1360 rpc_exit(task
, -EIO
);
1363 if (ff_layout_device_unavailable(hdr
->lseg
, hdr
->pgio_mirror_idx
)) {
1364 rpc_exit(task
, -EHOSTDOWN
);
1368 ff_layout_read_record_layoutstats_start(task
, hdr
);
1373 * Call ops for the async read/write cases
1374 * In the case of dense layouts, the offset needs to be reset to its
1377 static void ff_layout_read_prepare_v3(struct rpc_task
*task
, void *data
)
1379 struct nfs_pgio_header
*hdr
= data
;
1381 if (ff_layout_read_prepare_common(task
, hdr
))
1384 rpc_call_start(task
);
1387 static int ff_layout_setup_sequence(struct nfs_client
*ds_clp
,
1388 struct nfs4_sequence_args
*args
,
1389 struct nfs4_sequence_res
*res
,
1390 struct rpc_task
*task
)
1392 if (ds_clp
->cl_session
)
1393 return nfs41_setup_sequence(ds_clp
->cl_session
,
1397 return nfs40_setup_sequence(ds_clp
->cl_slot_tbl
,
1403 static void ff_layout_read_prepare_v4(struct rpc_task
*task
, void *data
)
1405 struct nfs_pgio_header
*hdr
= data
;
1407 if (ff_layout_setup_sequence(hdr
->ds_clp
,
1408 &hdr
->args
.seq_args
,
1413 if (ff_layout_read_prepare_common(task
, hdr
))
1416 if (nfs4_set_rw_stateid(&hdr
->args
.stateid
, hdr
->args
.context
,
1417 hdr
->args
.lock_context
, FMODE_READ
) == -EIO
)
1418 rpc_exit(task
, -EIO
); /* lost lock, terminate I/O */
1421 static void ff_layout_read_call_done(struct rpc_task
*task
, void *data
)
1423 struct nfs_pgio_header
*hdr
= data
;
1425 dprintk("--> %s task->tk_status %d\n", __func__
, task
->tk_status
);
1427 if (test_bit(NFS_IOHDR_REDO
, &hdr
->flags
) &&
1428 task
->tk_status
== 0) {
1429 nfs4_sequence_done(task
, &hdr
->res
.seq_res
);
1433 /* Note this may cause RPC to be resent */
1434 hdr
->mds_ops
->rpc_call_done(task
, hdr
);
1437 static void ff_layout_read_count_stats(struct rpc_task
*task
, void *data
)
1439 struct nfs_pgio_header
*hdr
= data
;
1441 ff_layout_read_record_layoutstats_done(task
, hdr
);
1442 rpc_count_iostats_metrics(task
,
1443 &NFS_CLIENT(hdr
->inode
)->cl_metrics
[NFSPROC4_CLNT_READ
]);
1446 static void ff_layout_read_release(void *data
)
1448 struct nfs_pgio_header
*hdr
= data
;
1450 ff_layout_read_record_layoutstats_done(&hdr
->task
, hdr
);
1451 pnfs_generic_rw_release(data
);
1455 static int ff_layout_write_done_cb(struct rpc_task
*task
,
1456 struct nfs_pgio_header
*hdr
)
1458 loff_t end_offs
= 0;
1461 trace_nfs4_pnfs_write(hdr
, task
->tk_status
);
1462 if (task
->tk_status
< 0)
1463 ff_layout_io_track_ds_error(hdr
->lseg
, hdr
->pgio_mirror_idx
,
1464 hdr
->args
.offset
, hdr
->args
.count
,
1465 hdr
->res
.op_status
, OP_WRITE
,
1467 err
= ff_layout_async_handle_error(task
, hdr
->args
.context
->state
,
1468 hdr
->ds_clp
, hdr
->lseg
,
1469 hdr
->pgio_mirror_idx
);
1472 case -NFS4ERR_RESET_TO_PNFS
:
1473 ff_layout_reset_write(hdr
, true);
1474 return task
->tk_status
;
1475 case -NFS4ERR_RESET_TO_MDS
:
1476 ff_layout_reset_write(hdr
, false);
1477 return task
->tk_status
;
1482 if (hdr
->res
.verf
->committed
== NFS_FILE_SYNC
||
1483 hdr
->res
.verf
->committed
== NFS_DATA_SYNC
)
1484 end_offs
= hdr
->mds_offset
+ (loff_t
)hdr
->res
.count
;
1486 /* Note: if the write is unstable, don't set end_offs until commit */
1487 ff_layout_set_layoutcommit(hdr
->inode
, hdr
->lseg
, end_offs
);
1489 /* zero out fattr since we don't care DS attr at all */
1490 hdr
->fattr
.valid
= 0;
1491 if (task
->tk_status
>= 0)
1492 nfs_writeback_update_inode(hdr
);
1497 static int ff_layout_commit_done_cb(struct rpc_task
*task
,
1498 struct nfs_commit_data
*data
)
1502 trace_nfs4_pnfs_commit_ds(data
, task
->tk_status
);
1503 if (task
->tk_status
< 0)
1504 ff_layout_io_track_ds_error(data
->lseg
, data
->ds_commit_index
,
1505 data
->args
.offset
, data
->args
.count
,
1506 data
->res
.op_status
, OP_COMMIT
,
1508 err
= ff_layout_async_handle_error(task
, NULL
, data
->ds_clp
,
1509 data
->lseg
, data
->ds_commit_index
);
1512 case -NFS4ERR_RESET_TO_PNFS
:
1513 pnfs_generic_prepare_to_resend_writes(data
);
1515 case -NFS4ERR_RESET_TO_MDS
:
1516 pnfs_generic_prepare_to_resend_writes(data
);
1519 rpc_restart_call_prepare(task
);
1523 ff_layout_set_layoutcommit(data
->inode
, data
->lseg
, data
->lwb
);
1528 static void ff_layout_write_record_layoutstats_start(struct rpc_task
*task
,
1529 struct nfs_pgio_header
*hdr
)
1531 if (test_and_set_bit(NFS_IOHDR_STAT
, &hdr
->flags
))
1533 nfs4_ff_layout_stat_io_start_write(hdr
->inode
,
1534 FF_LAYOUT_COMP(hdr
->lseg
, hdr
->pgio_mirror_idx
),
1539 static void ff_layout_write_record_layoutstats_done(struct rpc_task
*task
,
1540 struct nfs_pgio_header
*hdr
)
1542 if (!test_and_clear_bit(NFS_IOHDR_STAT
, &hdr
->flags
))
1544 nfs4_ff_layout_stat_io_end_write(task
,
1545 FF_LAYOUT_COMP(hdr
->lseg
, hdr
->pgio_mirror_idx
),
1546 hdr
->args
.count
, hdr
->res
.count
,
1547 hdr
->res
.verf
->committed
);
1550 static int ff_layout_write_prepare_common(struct rpc_task
*task
,
1551 struct nfs_pgio_header
*hdr
)
1553 if (unlikely(test_bit(NFS_CONTEXT_BAD
, &hdr
->args
.context
->flags
))) {
1554 rpc_exit(task
, -EIO
);
1558 if (ff_layout_device_unavailable(hdr
->lseg
, hdr
->pgio_mirror_idx
)) {
1559 rpc_exit(task
, -EHOSTDOWN
);
1563 ff_layout_write_record_layoutstats_start(task
, hdr
);
1567 static void ff_layout_write_prepare_v3(struct rpc_task
*task
, void *data
)
1569 struct nfs_pgio_header
*hdr
= data
;
1571 if (ff_layout_write_prepare_common(task
, hdr
))
1574 rpc_call_start(task
);
1577 static void ff_layout_write_prepare_v4(struct rpc_task
*task
, void *data
)
1579 struct nfs_pgio_header
*hdr
= data
;
1581 if (ff_layout_setup_sequence(hdr
->ds_clp
,
1582 &hdr
->args
.seq_args
,
1587 if (ff_layout_write_prepare_common(task
, hdr
))
1590 if (nfs4_set_rw_stateid(&hdr
->args
.stateid
, hdr
->args
.context
,
1591 hdr
->args
.lock_context
, FMODE_WRITE
) == -EIO
)
1592 rpc_exit(task
, -EIO
); /* lost lock, terminate I/O */
1595 static void ff_layout_write_call_done(struct rpc_task
*task
, void *data
)
1597 struct nfs_pgio_header
*hdr
= data
;
1599 if (test_bit(NFS_IOHDR_REDO
, &hdr
->flags
) &&
1600 task
->tk_status
== 0) {
1601 nfs4_sequence_done(task
, &hdr
->res
.seq_res
);
1605 /* Note this may cause RPC to be resent */
1606 hdr
->mds_ops
->rpc_call_done(task
, hdr
);
1609 static void ff_layout_write_count_stats(struct rpc_task
*task
, void *data
)
1611 struct nfs_pgio_header
*hdr
= data
;
1613 ff_layout_write_record_layoutstats_done(task
, hdr
);
1614 rpc_count_iostats_metrics(task
,
1615 &NFS_CLIENT(hdr
->inode
)->cl_metrics
[NFSPROC4_CLNT_WRITE
]);
1618 static void ff_layout_write_release(void *data
)
1620 struct nfs_pgio_header
*hdr
= data
;
1622 ff_layout_write_record_layoutstats_done(&hdr
->task
, hdr
);
1623 pnfs_generic_rw_release(data
);
1626 static void ff_layout_commit_record_layoutstats_start(struct rpc_task
*task
,
1627 struct nfs_commit_data
*cdata
)
1629 if (test_and_set_bit(NFS_IOHDR_STAT
, &cdata
->flags
))
1631 nfs4_ff_layout_stat_io_start_write(cdata
->inode
,
1632 FF_LAYOUT_COMP(cdata
->lseg
, cdata
->ds_commit_index
),
1636 static void ff_layout_commit_record_layoutstats_done(struct rpc_task
*task
,
1637 struct nfs_commit_data
*cdata
)
1639 struct nfs_page
*req
;
1642 if (!test_and_clear_bit(NFS_IOHDR_STAT
, &cdata
->flags
))
1645 if (task
->tk_status
== 0) {
1646 list_for_each_entry(req
, &cdata
->pages
, wb_list
)
1647 count
+= req
->wb_bytes
;
1649 nfs4_ff_layout_stat_io_end_write(task
,
1650 FF_LAYOUT_COMP(cdata
->lseg
, cdata
->ds_commit_index
),
1651 count
, count
, NFS_FILE_SYNC
);
1654 static void ff_layout_commit_prepare_common(struct rpc_task
*task
,
1655 struct nfs_commit_data
*cdata
)
1657 ff_layout_commit_record_layoutstats_start(task
, cdata
);
1660 static void ff_layout_commit_prepare_v3(struct rpc_task
*task
, void *data
)
1662 ff_layout_commit_prepare_common(task
, data
);
1663 rpc_call_start(task
);
1666 static void ff_layout_commit_prepare_v4(struct rpc_task
*task
, void *data
)
1668 struct nfs_commit_data
*wdata
= data
;
1670 if (ff_layout_setup_sequence(wdata
->ds_clp
,
1671 &wdata
->args
.seq_args
,
1672 &wdata
->res
.seq_res
,
1675 ff_layout_commit_prepare_common(task
, data
);
1678 static void ff_layout_commit_done(struct rpc_task
*task
, void *data
)
1680 pnfs_generic_write_commit_done(task
, data
);
1683 static void ff_layout_commit_count_stats(struct rpc_task
*task
, void *data
)
1685 struct nfs_commit_data
*cdata
= data
;
1687 ff_layout_commit_record_layoutstats_done(task
, cdata
);
1688 rpc_count_iostats_metrics(task
,
1689 &NFS_CLIENT(cdata
->inode
)->cl_metrics
[NFSPROC4_CLNT_COMMIT
]);
1692 static void ff_layout_commit_release(void *data
)
1694 struct nfs_commit_data
*cdata
= data
;
1696 ff_layout_commit_record_layoutstats_done(&cdata
->task
, cdata
);
1697 pnfs_generic_commit_release(data
);
1700 static const struct rpc_call_ops ff_layout_read_call_ops_v3
= {
1701 .rpc_call_prepare
= ff_layout_read_prepare_v3
,
1702 .rpc_call_done
= ff_layout_read_call_done
,
1703 .rpc_count_stats
= ff_layout_read_count_stats
,
1704 .rpc_release
= ff_layout_read_release
,
1707 static const struct rpc_call_ops ff_layout_read_call_ops_v4
= {
1708 .rpc_call_prepare
= ff_layout_read_prepare_v4
,
1709 .rpc_call_done
= ff_layout_read_call_done
,
1710 .rpc_count_stats
= ff_layout_read_count_stats
,
1711 .rpc_release
= ff_layout_read_release
,
1714 static const struct rpc_call_ops ff_layout_write_call_ops_v3
= {
1715 .rpc_call_prepare
= ff_layout_write_prepare_v3
,
1716 .rpc_call_done
= ff_layout_write_call_done
,
1717 .rpc_count_stats
= ff_layout_write_count_stats
,
1718 .rpc_release
= ff_layout_write_release
,
1721 static const struct rpc_call_ops ff_layout_write_call_ops_v4
= {
1722 .rpc_call_prepare
= ff_layout_write_prepare_v4
,
1723 .rpc_call_done
= ff_layout_write_call_done
,
1724 .rpc_count_stats
= ff_layout_write_count_stats
,
1725 .rpc_release
= ff_layout_write_release
,
1728 static const struct rpc_call_ops ff_layout_commit_call_ops_v3
= {
1729 .rpc_call_prepare
= ff_layout_commit_prepare_v3
,
1730 .rpc_call_done
= ff_layout_commit_done
,
1731 .rpc_count_stats
= ff_layout_commit_count_stats
,
1732 .rpc_release
= ff_layout_commit_release
,
1735 static const struct rpc_call_ops ff_layout_commit_call_ops_v4
= {
1736 .rpc_call_prepare
= ff_layout_commit_prepare_v4
,
1737 .rpc_call_done
= ff_layout_commit_done
,
1738 .rpc_count_stats
= ff_layout_commit_count_stats
,
1739 .rpc_release
= ff_layout_commit_release
,
1742 static enum pnfs_try_status
1743 ff_layout_read_pagelist(struct nfs_pgio_header
*hdr
)
1745 struct pnfs_layout_segment
*lseg
= hdr
->lseg
;
1746 struct nfs4_pnfs_ds
*ds
;
1747 struct rpc_clnt
*ds_clnt
;
1748 struct rpc_cred
*ds_cred
;
1749 loff_t offset
= hdr
->args
.offset
;
1750 u32 idx
= hdr
->pgio_mirror_idx
;
1754 dprintk("--> %s ino %lu pgbase %u req %zu@%llu\n",
1755 __func__
, hdr
->inode
->i_ino
,
1756 hdr
->args
.pgbase
, (size_t)hdr
->args
.count
, offset
);
1758 ds
= nfs4_ff_layout_prepare_ds(lseg
, idx
, false);
1762 ds_clnt
= nfs4_ff_find_or_create_ds_client(lseg
, idx
, ds
->ds_clp
,
1764 if (IS_ERR(ds_clnt
))
1767 ds_cred
= ff_layout_get_ds_cred(lseg
, idx
, hdr
->cred
);
1771 vers
= nfs4_ff_layout_ds_version(lseg
, idx
);
1773 dprintk("%s USE DS: %s cl_count %d vers %d\n", __func__
,
1774 ds
->ds_remotestr
, atomic_read(&ds
->ds_clp
->cl_count
), vers
);
1776 hdr
->pgio_done_cb
= ff_layout_read_done_cb
;
1777 atomic_inc(&ds
->ds_clp
->cl_count
);
1778 hdr
->ds_clp
= ds
->ds_clp
;
1779 fh
= nfs4_ff_layout_select_ds_fh(lseg
, idx
);
1783 * Note that if we ever decide to split across DSes,
1784 * then we may need to handle dense-like offsets.
1786 hdr
->args
.offset
= offset
;
1787 hdr
->mds_offset
= offset
;
1789 /* Perform an asynchronous read to ds */
1790 nfs_initiate_pgio(ds_clnt
, hdr
, ds_cred
, ds
->ds_clp
->rpc_ops
,
1791 vers
== 3 ? &ff_layout_read_call_ops_v3
:
1792 &ff_layout_read_call_ops_v4
,
1793 0, RPC_TASK_SOFTCONN
);
1794 put_rpccred(ds_cred
);
1795 return PNFS_ATTEMPTED
;
1798 if (ff_layout_avoid_mds_available_ds(lseg
))
1799 return PNFS_TRY_AGAIN
;
1800 return PNFS_NOT_ATTEMPTED
;
1803 /* Perform async writes. */
1804 static enum pnfs_try_status
1805 ff_layout_write_pagelist(struct nfs_pgio_header
*hdr
, int sync
)
1807 struct pnfs_layout_segment
*lseg
= hdr
->lseg
;
1808 struct nfs4_pnfs_ds
*ds
;
1809 struct rpc_clnt
*ds_clnt
;
1810 struct rpc_cred
*ds_cred
;
1811 loff_t offset
= hdr
->args
.offset
;
1814 int idx
= hdr
->pgio_mirror_idx
;
1816 ds
= nfs4_ff_layout_prepare_ds(lseg
, idx
, true);
1818 return PNFS_NOT_ATTEMPTED
;
1820 ds_clnt
= nfs4_ff_find_or_create_ds_client(lseg
, idx
, ds
->ds_clp
,
1822 if (IS_ERR(ds_clnt
))
1823 return PNFS_NOT_ATTEMPTED
;
1825 ds_cred
= ff_layout_get_ds_cred(lseg
, idx
, hdr
->cred
);
1827 return PNFS_NOT_ATTEMPTED
;
1829 vers
= nfs4_ff_layout_ds_version(lseg
, idx
);
1831 dprintk("%s ino %lu sync %d req %zu@%llu DS: %s cl_count %d vers %d\n",
1832 __func__
, hdr
->inode
->i_ino
, sync
, (size_t) hdr
->args
.count
,
1833 offset
, ds
->ds_remotestr
, atomic_read(&ds
->ds_clp
->cl_count
),
1836 hdr
->pgio_done_cb
= ff_layout_write_done_cb
;
1837 atomic_inc(&ds
->ds_clp
->cl_count
);
1838 hdr
->ds_clp
= ds
->ds_clp
;
1839 hdr
->ds_commit_idx
= idx
;
1840 fh
= nfs4_ff_layout_select_ds_fh(lseg
, idx
);
1845 * Note that if we ever decide to split across DSes,
1846 * then we may need to handle dense-like offsets.
1848 hdr
->args
.offset
= offset
;
1850 /* Perform an asynchronous write */
1851 nfs_initiate_pgio(ds_clnt
, hdr
, ds_cred
, ds
->ds_clp
->rpc_ops
,
1852 vers
== 3 ? &ff_layout_write_call_ops_v3
:
1853 &ff_layout_write_call_ops_v4
,
1854 sync
, RPC_TASK_SOFTCONN
);
1855 put_rpccred(ds_cred
);
1856 return PNFS_ATTEMPTED
;
1859 static u32
calc_ds_index_from_commit(struct pnfs_layout_segment
*lseg
, u32 i
)
1864 static struct nfs_fh
*
1865 select_ds_fh_from_commit(struct pnfs_layout_segment
*lseg
, u32 i
)
1867 struct nfs4_ff_layout_segment
*flseg
= FF_LAYOUT_LSEG(lseg
);
1869 /* FIXME: Assume that there is only one NFS version available
1872 return &flseg
->mirror_array
[i
]->fh_versions
[0];
1875 static int ff_layout_initiate_commit(struct nfs_commit_data
*data
, int how
)
1877 struct pnfs_layout_segment
*lseg
= data
->lseg
;
1878 struct nfs4_pnfs_ds
*ds
;
1879 struct rpc_clnt
*ds_clnt
;
1880 struct rpc_cred
*ds_cred
;
1885 idx
= calc_ds_index_from_commit(lseg
, data
->ds_commit_index
);
1886 ds
= nfs4_ff_layout_prepare_ds(lseg
, idx
, true);
1890 ds_clnt
= nfs4_ff_find_or_create_ds_client(lseg
, idx
, ds
->ds_clp
,
1892 if (IS_ERR(ds_clnt
))
1895 ds_cred
= ff_layout_get_ds_cred(lseg
, idx
, data
->cred
);
1899 vers
= nfs4_ff_layout_ds_version(lseg
, idx
);
1901 dprintk("%s ino %lu, how %d cl_count %d vers %d\n", __func__
,
1902 data
->inode
->i_ino
, how
, atomic_read(&ds
->ds_clp
->cl_count
),
1904 data
->commit_done_cb
= ff_layout_commit_done_cb
;
1905 data
->cred
= ds_cred
;
1906 atomic_inc(&ds
->ds_clp
->cl_count
);
1907 data
->ds_clp
= ds
->ds_clp
;
1908 fh
= select_ds_fh_from_commit(lseg
, data
->ds_commit_index
);
1912 ret
= nfs_initiate_commit(ds_clnt
, data
, ds
->ds_clp
->rpc_ops
,
1913 vers
== 3 ? &ff_layout_commit_call_ops_v3
:
1914 &ff_layout_commit_call_ops_v4
,
1915 how
, RPC_TASK_SOFTCONN
);
1916 put_rpccred(ds_cred
);
1919 pnfs_generic_prepare_to_resend_writes(data
);
1920 pnfs_generic_commit_release(data
);
1925 ff_layout_commit_pagelist(struct inode
*inode
, struct list_head
*mds_pages
,
1926 int how
, struct nfs_commit_info
*cinfo
)
1928 return pnfs_generic_commit_pagelist(inode
, mds_pages
, how
, cinfo
,
1929 ff_layout_initiate_commit
);
1932 static struct pnfs_ds_commit_info
*
1933 ff_layout_get_ds_info(struct inode
*inode
)
1935 struct pnfs_layout_hdr
*layout
= NFS_I(inode
)->layout
;
1940 return &FF_LAYOUT_FROM_HDR(layout
)->commit_info
;
1944 ff_layout_free_deviceid_node(struct nfs4_deviceid_node
*d
)
1946 nfs4_ff_layout_free_deviceid(container_of(d
, struct nfs4_ff_layout_ds
,
1950 static int ff_layout_encode_ioerr(struct xdr_stream
*xdr
,
1951 const struct nfs4_layoutreturn_args
*args
,
1952 const struct nfs4_flexfile_layoutreturn_args
*ff_args
)
1956 start
= xdr_reserve_space(xdr
, 4);
1957 if (unlikely(!start
))
1960 *start
= cpu_to_be32(ff_args
->num_errors
);
1961 /* This assume we always return _ALL_ layouts */
1962 return ff_layout_encode_ds_ioerr(xdr
, &ff_args
->errors
);
1966 encode_opaque_fixed(struct xdr_stream
*xdr
, const void *buf
, size_t len
)
1970 p
= xdr_reserve_space(xdr
, len
);
1971 xdr_encode_opaque_fixed(p
, buf
, len
);
1975 ff_layout_encode_ff_iostat_head(struct xdr_stream
*xdr
,
1976 const nfs4_stateid
*stateid
,
1977 const struct nfs42_layoutstat_devinfo
*devinfo
)
1981 p
= xdr_reserve_space(xdr
, 8 + 8);
1982 p
= xdr_encode_hyper(p
, devinfo
->offset
);
1983 p
= xdr_encode_hyper(p
, devinfo
->length
);
1984 encode_opaque_fixed(xdr
, stateid
->data
, NFS4_STATEID_SIZE
);
1985 p
= xdr_reserve_space(xdr
, 4*8);
1986 p
= xdr_encode_hyper(p
, devinfo
->read_count
);
1987 p
= xdr_encode_hyper(p
, devinfo
->read_bytes
);
1988 p
= xdr_encode_hyper(p
, devinfo
->write_count
);
1989 p
= xdr_encode_hyper(p
, devinfo
->write_bytes
);
1990 encode_opaque_fixed(xdr
, devinfo
->dev_id
.data
, NFS4_DEVICEID4_SIZE
);
1994 ff_layout_encode_ff_iostat(struct xdr_stream
*xdr
,
1995 const nfs4_stateid
*stateid
,
1996 const struct nfs42_layoutstat_devinfo
*devinfo
)
1998 ff_layout_encode_ff_iostat_head(xdr
, stateid
, devinfo
);
1999 ff_layout_encode_ff_layoutupdate(xdr
, devinfo
,
2000 devinfo
->ld_private
.data
);
2003 /* report nothing for now */
2004 static void ff_layout_encode_iostats_array(struct xdr_stream
*xdr
,
2005 const struct nfs4_layoutreturn_args
*args
,
2006 struct nfs4_flexfile_layoutreturn_args
*ff_args
)
2011 p
= xdr_reserve_space(xdr
, 4);
2012 *p
= cpu_to_be32(ff_args
->num_dev
);
2013 for (i
= 0; i
< ff_args
->num_dev
; i
++)
2014 ff_layout_encode_ff_iostat(xdr
,
2015 &args
->layout
->plh_stateid
,
2016 &ff_args
->devinfo
[i
]);
2020 ff_layout_free_iostats_array(struct nfs42_layoutstat_devinfo
*devinfo
,
2021 unsigned int num_entries
)
2025 for (i
= 0; i
< num_entries
; i
++) {
2026 if (!devinfo
[i
].ld_private
.ops
)
2028 if (!devinfo
[i
].ld_private
.ops
->free
)
2030 devinfo
[i
].ld_private
.ops
->free(&devinfo
[i
].ld_private
);
2034 static struct nfs4_deviceid_node
*
2035 ff_layout_alloc_deviceid_node(struct nfs_server
*server
,
2036 struct pnfs_device
*pdev
, gfp_t gfp_flags
)
2038 struct nfs4_ff_layout_ds
*dsaddr
;
2040 dsaddr
= nfs4_ff_alloc_deviceid_node(server
, pdev
, gfp_flags
);
2043 return &dsaddr
->id_node
;
2047 ff_layout_encode_layoutreturn(struct xdr_stream
*xdr
,
2048 const void *voidargs
,
2049 const struct nfs4_xdr_opaque_data
*ff_opaque
)
2051 const struct nfs4_layoutreturn_args
*args
= voidargs
;
2052 struct nfs4_flexfile_layoutreturn_args
*ff_args
= ff_opaque
->data
;
2053 struct xdr_buf tmp_buf
= {
2056 .iov_base
= page_address(ff_args
->pages
[0]),
2059 .buflen
= PAGE_SIZE
,
2061 struct xdr_stream tmp_xdr
;
2064 dprintk("%s: Begin\n", __func__
);
2066 xdr_init_encode(&tmp_xdr
, &tmp_buf
, NULL
);
2068 ff_layout_encode_ioerr(&tmp_xdr
, args
, ff_args
);
2069 ff_layout_encode_iostats_array(&tmp_xdr
, args
, ff_args
);
2071 start
= xdr_reserve_space(xdr
, 4);
2072 *start
= cpu_to_be32(tmp_buf
.len
);
2073 xdr_write_pages(xdr
, ff_args
->pages
, 0, tmp_buf
.len
);
2075 dprintk("%s: Return\n", __func__
);
2079 ff_layout_free_layoutreturn(struct nfs4_xdr_opaque_data
*args
)
2081 struct nfs4_flexfile_layoutreturn_args
*ff_args
;
2085 ff_args
= args
->data
;
2088 ff_layout_free_ds_ioerr(&ff_args
->errors
);
2089 ff_layout_free_iostats_array(ff_args
->devinfo
, ff_args
->num_dev
);
2091 put_page(ff_args
->pages
[0]);
2095 const struct nfs4_xdr_opaque_ops layoutreturn_ops
= {
2096 .encode
= ff_layout_encode_layoutreturn
,
2097 .free
= ff_layout_free_layoutreturn
,
2101 ff_layout_prepare_layoutreturn(struct nfs4_layoutreturn_args
*args
)
2103 struct nfs4_flexfile_layoutreturn_args
*ff_args
;
2104 struct nfs4_flexfile_layout
*ff_layout
= FF_LAYOUT_FROM_HDR(args
->layout
);
2106 ff_args
= kmalloc(sizeof(*ff_args
), GFP_KERNEL
);
2109 ff_args
->pages
[0] = alloc_page(GFP_KERNEL
);
2110 if (!ff_args
->pages
[0])
2111 goto out_nomem_free
;
2113 INIT_LIST_HEAD(&ff_args
->errors
);
2114 ff_args
->num_errors
= ff_layout_fetch_ds_ioerr(args
->layout
,
2115 &args
->range
, &ff_args
->errors
,
2116 FF_LAYOUTRETURN_MAXERR
);
2118 spin_lock(&args
->inode
->i_lock
);
2119 ff_args
->num_dev
= ff_layout_mirror_prepare_stats(&ff_layout
->generic_hdr
,
2120 &ff_args
->devinfo
[0], ARRAY_SIZE(ff_args
->devinfo
));
2121 spin_unlock(&args
->inode
->i_lock
);
2123 args
->ld_private
->ops
= &layoutreturn_ops
;
2124 args
->ld_private
->data
= ff_args
;
2133 ff_layout_ntop4(const struct sockaddr
*sap
, char *buf
, const size_t buflen
)
2135 const struct sockaddr_in
*sin
= (struct sockaddr_in
*)sap
;
2137 return snprintf(buf
, buflen
, "%pI4", &sin
->sin_addr
);
2141 ff_layout_ntop6_noscopeid(const struct sockaddr
*sap
, char *buf
,
2144 const struct sockaddr_in6
*sin6
= (struct sockaddr_in6
*)sap
;
2145 const struct in6_addr
*addr
= &sin6
->sin6_addr
;
2148 * RFC 4291, Section 2.2.2
2150 * Shorthanded ANY address
2152 if (ipv6_addr_any(addr
))
2153 return snprintf(buf
, buflen
, "::");
2156 * RFC 4291, Section 2.2.2
2158 * Shorthanded loopback address
2160 if (ipv6_addr_loopback(addr
))
2161 return snprintf(buf
, buflen
, "::1");
2164 * RFC 4291, Section 2.2.3
2166 * Special presentation address format for mapped v4
2169 if (ipv6_addr_v4mapped(addr
))
2170 return snprintf(buf
, buflen
, "::ffff:%pI4",
2171 &addr
->s6_addr32
[3]);
2174 * RFC 4291, Section 2.2.1
2176 return snprintf(buf
, buflen
, "%pI6c", addr
);
2179 /* Derived from rpc_sockaddr2uaddr */
2181 ff_layout_encode_netaddr(struct xdr_stream
*xdr
, struct nfs4_pnfs_ds_addr
*da
)
2183 struct sockaddr
*sap
= (struct sockaddr
*)&da
->da_addr
;
2184 char portbuf
[RPCBIND_MAXUADDRPLEN
];
2185 char addrbuf
[RPCBIND_MAXUADDRLEN
];
2187 unsigned short port
;
2191 switch (sap
->sa_family
) {
2193 if (ff_layout_ntop4(sap
, addrbuf
, sizeof(addrbuf
)) == 0)
2195 port
= ntohs(((struct sockaddr_in
*)sap
)->sin_port
);
2200 if (ff_layout_ntop6_noscopeid(sap
, addrbuf
, sizeof(addrbuf
)) == 0)
2202 port
= ntohs(((struct sockaddr_in6
*)sap
)->sin6_port
);
2207 /* we only support tcp and tcp6 */
2212 snprintf(portbuf
, sizeof(portbuf
), ".%u.%u", port
>> 8, port
& 0xff);
2213 len
= strlcat(addrbuf
, portbuf
, sizeof(addrbuf
));
2215 p
= xdr_reserve_space(xdr
, 4 + netid_len
);
2216 xdr_encode_opaque(p
, netid
, netid_len
);
2218 p
= xdr_reserve_space(xdr
, 4 + len
);
2219 xdr_encode_opaque(p
, addrbuf
, len
);
2223 ff_layout_encode_nfstime(struct xdr_stream
*xdr
,
2226 struct timespec64 ts
;
2229 p
= xdr_reserve_space(xdr
, 12);
2230 ts
= ktime_to_timespec64(t
);
2231 p
= xdr_encode_hyper(p
, ts
.tv_sec
);
2232 *p
++ = cpu_to_be32(ts
.tv_nsec
);
2236 ff_layout_encode_io_latency(struct xdr_stream
*xdr
,
2237 struct nfs4_ff_io_stat
*stat
)
2241 p
= xdr_reserve_space(xdr
, 5 * 8);
2242 p
= xdr_encode_hyper(p
, stat
->ops_requested
);
2243 p
= xdr_encode_hyper(p
, stat
->bytes_requested
);
2244 p
= xdr_encode_hyper(p
, stat
->ops_completed
);
2245 p
= xdr_encode_hyper(p
, stat
->bytes_completed
);
2246 p
= xdr_encode_hyper(p
, stat
->bytes_not_delivered
);
2247 ff_layout_encode_nfstime(xdr
, stat
->total_busy_time
);
2248 ff_layout_encode_nfstime(xdr
, stat
->aggregate_completion_time
);
2252 ff_layout_encode_ff_layoutupdate(struct xdr_stream
*xdr
,
2253 const struct nfs42_layoutstat_devinfo
*devinfo
,
2254 struct nfs4_ff_layout_mirror
*mirror
)
2256 struct nfs4_pnfs_ds_addr
*da
;
2257 struct nfs4_pnfs_ds
*ds
= mirror
->mirror_ds
->ds
;
2258 struct nfs_fh
*fh
= &mirror
->fh_versions
[0];
2261 da
= list_first_entry(&ds
->ds_addrs
, struct nfs4_pnfs_ds_addr
, da_node
);
2262 dprintk("%s: DS %s: encoding address %s\n",
2263 __func__
, ds
->ds_remotestr
, da
->da_remotestr
);
2265 ff_layout_encode_netaddr(xdr
, da
);
2267 p
= xdr_reserve_space(xdr
, 4 + fh
->size
);
2268 xdr_encode_opaque(p
, fh
->data
, fh
->size
);
2269 /* ff_io_latency4 read */
2270 spin_lock(&mirror
->lock
);
2271 ff_layout_encode_io_latency(xdr
, &mirror
->read_stat
.io_stat
);
2272 /* ff_io_latency4 write */
2273 ff_layout_encode_io_latency(xdr
, &mirror
->write_stat
.io_stat
);
2274 spin_unlock(&mirror
->lock
);
2276 ff_layout_encode_nfstime(xdr
, ktime_sub(ktime_get(), mirror
->start_time
));
2278 p
= xdr_reserve_space(xdr
, 4);
2279 *p
= cpu_to_be32(false);
2283 ff_layout_encode_layoutstats(struct xdr_stream
*xdr
, const void *args
,
2284 const struct nfs4_xdr_opaque_data
*opaque
)
2286 struct nfs42_layoutstat_devinfo
*devinfo
= container_of(opaque
,
2287 struct nfs42_layoutstat_devinfo
, ld_private
);
2290 /* layoutupdate length */
2291 start
= xdr_reserve_space(xdr
, 4);
2292 ff_layout_encode_ff_layoutupdate(xdr
, devinfo
, opaque
->data
);
2294 *start
= cpu_to_be32((xdr
->p
- start
- 1) * 4);
2298 ff_layout_free_layoutstats(struct nfs4_xdr_opaque_data
*opaque
)
2300 struct nfs4_ff_layout_mirror
*mirror
= opaque
->data
;
2302 ff_layout_put_mirror(mirror
);
2305 static const struct nfs4_xdr_opaque_ops layoutstat_ops
= {
2306 .encode
= ff_layout_encode_layoutstats
,
2307 .free
= ff_layout_free_layoutstats
,
2311 ff_layout_mirror_prepare_stats(struct pnfs_layout_hdr
*lo
,
2312 struct nfs42_layoutstat_devinfo
*devinfo
,
2315 struct nfs4_flexfile_layout
*ff_layout
= FF_LAYOUT_FROM_HDR(lo
);
2316 struct nfs4_ff_layout_mirror
*mirror
;
2317 struct nfs4_deviceid_node
*dev
;
2320 list_for_each_entry(mirror
, &ff_layout
->mirrors
, mirrors
) {
2323 if (IS_ERR_OR_NULL(mirror
->mirror_ds
))
2325 if (!test_and_clear_bit(NFS4_FF_MIRROR_STAT_AVAIL
, &mirror
->flags
))
2327 /* mirror refcount put in cleanup_layoutstats */
2328 if (!atomic_inc_not_zero(&mirror
->ref
))
2330 dev
= &mirror
->mirror_ds
->id_node
;
2331 memcpy(&devinfo
->dev_id
, &dev
->deviceid
, NFS4_DEVICEID4_SIZE
);
2332 devinfo
->offset
= 0;
2333 devinfo
->length
= NFS4_MAX_UINT64
;
2334 spin_lock(&mirror
->lock
);
2335 devinfo
->read_count
= mirror
->read_stat
.io_stat
.ops_completed
;
2336 devinfo
->read_bytes
= mirror
->read_stat
.io_stat
.bytes_completed
;
2337 devinfo
->write_count
= mirror
->write_stat
.io_stat
.ops_completed
;
2338 devinfo
->write_bytes
= mirror
->write_stat
.io_stat
.bytes_completed
;
2339 spin_unlock(&mirror
->lock
);
2340 devinfo
->layout_type
= LAYOUT_FLEX_FILES
;
2341 devinfo
->ld_private
.ops
= &layoutstat_ops
;
2342 devinfo
->ld_private
.data
= mirror
;
2351 ff_layout_prepare_layoutstats(struct nfs42_layoutstat_args
*args
)
2353 struct nfs4_flexfile_layout
*ff_layout
;
2354 const int dev_count
= PNFS_LAYOUTSTATS_MAXDEV
;
2356 /* For now, send at most PNFS_LAYOUTSTATS_MAXDEV statistics */
2357 args
->devinfo
= kmalloc_array(dev_count
, sizeof(*args
->devinfo
), GFP_NOIO
);
2361 spin_lock(&args
->inode
->i_lock
);
2362 ff_layout
= FF_LAYOUT_FROM_HDR(NFS_I(args
->inode
)->layout
);
2363 args
->num_dev
= ff_layout_mirror_prepare_stats(&ff_layout
->generic_hdr
,
2364 &args
->devinfo
[0], dev_count
);
2365 spin_unlock(&args
->inode
->i_lock
);
2366 if (!args
->num_dev
) {
2367 kfree(args
->devinfo
);
2368 args
->devinfo
= NULL
;
2375 static struct pnfs_layoutdriver_type flexfilelayout_type
= {
2376 .id
= LAYOUT_FLEX_FILES
,
2377 .name
= "LAYOUT_FLEX_FILES",
2378 .owner
= THIS_MODULE
,
2379 .alloc_layout_hdr
= ff_layout_alloc_layout_hdr
,
2380 .free_layout_hdr
= ff_layout_free_layout_hdr
,
2381 .alloc_lseg
= ff_layout_alloc_lseg
,
2382 .free_lseg
= ff_layout_free_lseg
,
2383 .add_lseg
= ff_layout_add_lseg
,
2384 .pg_read_ops
= &ff_layout_pg_read_ops
,
2385 .pg_write_ops
= &ff_layout_pg_write_ops
,
2386 .get_ds_info
= ff_layout_get_ds_info
,
2387 .free_deviceid_node
= ff_layout_free_deviceid_node
,
2388 .mark_request_commit
= pnfs_layout_mark_request_commit
,
2389 .clear_request_commit
= pnfs_generic_clear_request_commit
,
2390 .scan_commit_lists
= pnfs_generic_scan_commit_lists
,
2391 .recover_commit_reqs
= pnfs_generic_recover_commit_reqs
,
2392 .commit_pagelist
= ff_layout_commit_pagelist
,
2393 .read_pagelist
= ff_layout_read_pagelist
,
2394 .write_pagelist
= ff_layout_write_pagelist
,
2395 .alloc_deviceid_node
= ff_layout_alloc_deviceid_node
,
2396 .prepare_layoutreturn
= ff_layout_prepare_layoutreturn
,
2397 .sync
= pnfs_nfs_generic_sync
,
2398 .prepare_layoutstats
= ff_layout_prepare_layoutstats
,
2401 static int __init
nfs4flexfilelayout_init(void)
2403 printk(KERN_INFO
"%s: NFSv4 Flexfile Layout Driver Registering...\n",
2405 if (!ff_zero_group
) {
2406 ff_zero_group
= groups_alloc(0);
2410 return pnfs_register_layoutdriver(&flexfilelayout_type
);
2413 static void __exit
nfs4flexfilelayout_exit(void)
2415 printk(KERN_INFO
"%s: NFSv4 Flexfile Layout Driver Unregistering...\n",
2417 pnfs_unregister_layoutdriver(&flexfilelayout_type
);
2418 if (ff_zero_group
) {
2419 put_group_info(ff_zero_group
);
2420 ff_zero_group
= NULL
;
2424 MODULE_ALIAS("nfs-layouttype4-4");
2426 MODULE_LICENSE("GPL");
2427 MODULE_DESCRIPTION("The NFSv4 flexfile layout driver");
2429 module_init(nfs4flexfilelayout_init
);
2430 module_exit(nfs4flexfilelayout_exit
);