]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - fs/nfs/flexfilelayout/flexfilelayout.c
pNFS: Get rid of unnecessary layout parameter in encode_layoutreturn callback
[mirror_ubuntu-bionic-kernel.git] / fs / nfs / flexfilelayout / flexfilelayout.c
1 /*
2 * Module for pnfs flexfile layout driver.
3 *
4 * Copyright (c) 2014, Primary Data, Inc. All rights reserved.
5 *
6 * Tao Peng <bergwolf@primarydata.com>
7 */
8
9 #include <linux/nfs_fs.h>
10 #include <linux/nfs_page.h>
11 #include <linux/module.h>
12
13 #include <linux/sunrpc/metrics.h>
14
15 #include "flexfilelayout.h"
16 #include "../nfs4session.h"
17 #include "../nfs4idmap.h"
18 #include "../internal.h"
19 #include "../delegation.h"
20 #include "../nfs4trace.h"
21 #include "../iostat.h"
22 #include "../nfs.h"
23 #include "../nfs42.h"
24
25 #define NFSDBG_FACILITY NFSDBG_PNFS_LD
26
27 #define FF_LAYOUT_POLL_RETRY_MAX (15*HZ)
28
29 static struct group_info *ff_zero_group;
30
31 static void ff_layout_read_record_layoutstats_done(struct rpc_task *task,
32 struct nfs_pgio_header *hdr);
33
34 static struct pnfs_layout_hdr *
35 ff_layout_alloc_layout_hdr(struct inode *inode, gfp_t gfp_flags)
36 {
37 struct nfs4_flexfile_layout *ffl;
38
39 ffl = kzalloc(sizeof(*ffl), gfp_flags);
40 if (ffl) {
41 INIT_LIST_HEAD(&ffl->error_list);
42 INIT_LIST_HEAD(&ffl->mirrors);
43 ffl->last_report_time = ktime_get();
44 return &ffl->generic_hdr;
45 } else
46 return NULL;
47 }
48
49 static void
50 ff_layout_free_layout_hdr(struct pnfs_layout_hdr *lo)
51 {
52 struct nfs4_ff_layout_ds_err *err, *n;
53
54 list_for_each_entry_safe(err, n, &FF_LAYOUT_FROM_HDR(lo)->error_list,
55 list) {
56 list_del(&err->list);
57 kfree(err);
58 }
59 kfree(FF_LAYOUT_FROM_HDR(lo));
60 }
61
62 static int decode_pnfs_stateid(struct xdr_stream *xdr, nfs4_stateid *stateid)
63 {
64 __be32 *p;
65
66 p = xdr_inline_decode(xdr, NFS4_STATEID_SIZE);
67 if (unlikely(p == NULL))
68 return -ENOBUFS;
69 stateid->type = NFS4_PNFS_DS_STATEID_TYPE;
70 memcpy(stateid->data, p, NFS4_STATEID_SIZE);
71 dprintk("%s: stateid id= [%x%x%x%x]\n", __func__,
72 p[0], p[1], p[2], p[3]);
73 return 0;
74 }
75
76 static int decode_deviceid(struct xdr_stream *xdr, struct nfs4_deviceid *devid)
77 {
78 __be32 *p;
79
80 p = xdr_inline_decode(xdr, NFS4_DEVICEID4_SIZE);
81 if (unlikely(!p))
82 return -ENOBUFS;
83 memcpy(devid, p, NFS4_DEVICEID4_SIZE);
84 nfs4_print_deviceid(devid);
85 return 0;
86 }
87
88 static int decode_nfs_fh(struct xdr_stream *xdr, struct nfs_fh *fh)
89 {
90 __be32 *p;
91
92 p = xdr_inline_decode(xdr, 4);
93 if (unlikely(!p))
94 return -ENOBUFS;
95 fh->size = be32_to_cpup(p++);
96 if (fh->size > sizeof(struct nfs_fh)) {
97 printk(KERN_ERR "NFS flexfiles: Too big fh received %d\n",
98 fh->size);
99 return -EOVERFLOW;
100 }
101 /* fh.data */
102 p = xdr_inline_decode(xdr, fh->size);
103 if (unlikely(!p))
104 return -ENOBUFS;
105 memcpy(&fh->data, p, fh->size);
106 dprintk("%s: fh len %d\n", __func__, fh->size);
107
108 return 0;
109 }
110
111 /*
112 * Currently only stringified uids and gids are accepted.
113 * I.e., kerberos is not supported to the DSes, so no pricipals.
114 *
115 * That means that one common function will suffice, but when
116 * principals are added, this should be split to accomodate
117 * calls to both nfs_map_name_to_uid() and nfs_map_group_to_gid().
118 */
119 static int
120 decode_name(struct xdr_stream *xdr, u32 *id)
121 {
122 __be32 *p;
123 int len;
124
125 /* opaque_length(4)*/
126 p = xdr_inline_decode(xdr, 4);
127 if (unlikely(!p))
128 return -ENOBUFS;
129 len = be32_to_cpup(p++);
130 if (len < 0)
131 return -EINVAL;
132
133 dprintk("%s: len %u\n", __func__, len);
134
135 /* opaque body */
136 p = xdr_inline_decode(xdr, len);
137 if (unlikely(!p))
138 return -ENOBUFS;
139
140 if (!nfs_map_string_to_numeric((char *)p, len, id))
141 return -EINVAL;
142
143 return 0;
144 }
145
146 static bool ff_mirror_match_fh(const struct nfs4_ff_layout_mirror *m1,
147 const struct nfs4_ff_layout_mirror *m2)
148 {
149 int i, j;
150
151 if (m1->fh_versions_cnt != m2->fh_versions_cnt)
152 return false;
153 for (i = 0; i < m1->fh_versions_cnt; i++) {
154 bool found_fh = false;
155 for (j = 0; j < m2->fh_versions_cnt; j++) {
156 if (nfs_compare_fh(&m1->fh_versions[i],
157 &m2->fh_versions[j]) == 0) {
158 found_fh = true;
159 break;
160 }
161 }
162 if (!found_fh)
163 return false;
164 }
165 return true;
166 }
167
168 static struct nfs4_ff_layout_mirror *
169 ff_layout_add_mirror(struct pnfs_layout_hdr *lo,
170 struct nfs4_ff_layout_mirror *mirror)
171 {
172 struct nfs4_flexfile_layout *ff_layout = FF_LAYOUT_FROM_HDR(lo);
173 struct nfs4_ff_layout_mirror *pos;
174 struct inode *inode = lo->plh_inode;
175
176 spin_lock(&inode->i_lock);
177 list_for_each_entry(pos, &ff_layout->mirrors, mirrors) {
178 if (mirror->mirror_ds != pos->mirror_ds)
179 continue;
180 if (!ff_mirror_match_fh(mirror, pos))
181 continue;
182 if (atomic_inc_not_zero(&pos->ref)) {
183 spin_unlock(&inode->i_lock);
184 return pos;
185 }
186 }
187 list_add(&mirror->mirrors, &ff_layout->mirrors);
188 mirror->layout = lo;
189 spin_unlock(&inode->i_lock);
190 return mirror;
191 }
192
193 static void
194 ff_layout_remove_mirror(struct nfs4_ff_layout_mirror *mirror)
195 {
196 struct inode *inode;
197 if (mirror->layout == NULL)
198 return;
199 inode = mirror->layout->plh_inode;
200 spin_lock(&inode->i_lock);
201 list_del(&mirror->mirrors);
202 spin_unlock(&inode->i_lock);
203 mirror->layout = NULL;
204 }
205
206 static struct nfs4_ff_layout_mirror *ff_layout_alloc_mirror(gfp_t gfp_flags)
207 {
208 struct nfs4_ff_layout_mirror *mirror;
209
210 mirror = kzalloc(sizeof(*mirror), gfp_flags);
211 if (mirror != NULL) {
212 spin_lock_init(&mirror->lock);
213 atomic_set(&mirror->ref, 1);
214 INIT_LIST_HEAD(&mirror->mirrors);
215 }
216 return mirror;
217 }
218
219 static void ff_layout_free_mirror(struct nfs4_ff_layout_mirror *mirror)
220 {
221 struct rpc_cred *cred;
222
223 ff_layout_remove_mirror(mirror);
224 kfree(mirror->fh_versions);
225 cred = rcu_access_pointer(mirror->ro_cred);
226 if (cred)
227 put_rpccred(cred);
228 cred = rcu_access_pointer(mirror->rw_cred);
229 if (cred)
230 put_rpccred(cred);
231 nfs4_ff_layout_put_deviceid(mirror->mirror_ds);
232 kfree(mirror);
233 }
234
235 static void ff_layout_put_mirror(struct nfs4_ff_layout_mirror *mirror)
236 {
237 if (mirror != NULL && atomic_dec_and_test(&mirror->ref))
238 ff_layout_free_mirror(mirror);
239 }
240
241 static void ff_layout_free_mirror_array(struct nfs4_ff_layout_segment *fls)
242 {
243 int i;
244
245 if (fls->mirror_array) {
246 for (i = 0; i < fls->mirror_array_cnt; i++) {
247 /* normally mirror_ds is freed in
248 * .free_deviceid_node but we still do it here
249 * for .alloc_lseg error path */
250 ff_layout_put_mirror(fls->mirror_array[i]);
251 }
252 kfree(fls->mirror_array);
253 fls->mirror_array = NULL;
254 }
255 }
256
257 static int ff_layout_check_layout(struct nfs4_layoutget_res *lgr)
258 {
259 int ret = 0;
260
261 dprintk("--> %s\n", __func__);
262
263 /* FIXME: remove this check when layout segment support is added */
264 if (lgr->range.offset != 0 ||
265 lgr->range.length != NFS4_MAX_UINT64) {
266 dprintk("%s Only whole file layouts supported. Use MDS i/o\n",
267 __func__);
268 ret = -EINVAL;
269 }
270
271 dprintk("--> %s returns %d\n", __func__, ret);
272 return ret;
273 }
274
275 static void _ff_layout_free_lseg(struct nfs4_ff_layout_segment *fls)
276 {
277 if (fls) {
278 ff_layout_free_mirror_array(fls);
279 kfree(fls);
280 }
281 }
282
283 static bool
284 ff_lseg_range_is_after(const struct pnfs_layout_range *l1,
285 const struct pnfs_layout_range *l2)
286 {
287 u64 end1, end2;
288
289 if (l1->iomode != l2->iomode)
290 return l1->iomode != IOMODE_READ;
291 end1 = pnfs_calc_offset_end(l1->offset, l1->length);
292 end2 = pnfs_calc_offset_end(l2->offset, l2->length);
293 if (end1 < l2->offset)
294 return false;
295 if (end2 < l1->offset)
296 return true;
297 return l2->offset <= l1->offset;
298 }
299
300 static bool
301 ff_lseg_merge(struct pnfs_layout_segment *new,
302 struct pnfs_layout_segment *old)
303 {
304 u64 new_end, old_end;
305
306 if (test_bit(NFS_LSEG_LAYOUTRETURN, &old->pls_flags))
307 return false;
308 if (new->pls_range.iomode != old->pls_range.iomode)
309 return false;
310 old_end = pnfs_calc_offset_end(old->pls_range.offset,
311 old->pls_range.length);
312 if (old_end < new->pls_range.offset)
313 return false;
314 new_end = pnfs_calc_offset_end(new->pls_range.offset,
315 new->pls_range.length);
316 if (new_end < old->pls_range.offset)
317 return false;
318
319 /* Mergeable: copy info from 'old' to 'new' */
320 if (new_end < old_end)
321 new_end = old_end;
322 if (new->pls_range.offset < old->pls_range.offset)
323 new->pls_range.offset = old->pls_range.offset;
324 new->pls_range.length = pnfs_calc_offset_length(new->pls_range.offset,
325 new_end);
326 if (test_bit(NFS_LSEG_ROC, &old->pls_flags))
327 set_bit(NFS_LSEG_ROC, &new->pls_flags);
328 return true;
329 }
330
331 static void
332 ff_layout_add_lseg(struct pnfs_layout_hdr *lo,
333 struct pnfs_layout_segment *lseg,
334 struct list_head *free_me)
335 {
336 pnfs_generic_layout_insert_lseg(lo, lseg,
337 ff_lseg_range_is_after,
338 ff_lseg_merge,
339 free_me);
340 }
341
342 static void ff_layout_sort_mirrors(struct nfs4_ff_layout_segment *fls)
343 {
344 int i, j;
345
346 for (i = 0; i < fls->mirror_array_cnt - 1; i++) {
347 for (j = i + 1; j < fls->mirror_array_cnt; j++)
348 if (fls->mirror_array[i]->efficiency <
349 fls->mirror_array[j]->efficiency)
350 swap(fls->mirror_array[i],
351 fls->mirror_array[j]);
352 }
353 }
354
355 static void ff_layout_mark_devices_valid(struct nfs4_ff_layout_segment *fls)
356 {
357 struct nfs4_deviceid_node *node;
358 int i;
359
360 if (!(fls->flags & FF_FLAGS_NO_IO_THRU_MDS))
361 return;
362 for (i = 0; i < fls->mirror_array_cnt; i++) {
363 node = &fls->mirror_array[i]->mirror_ds->id_node;
364 clear_bit(NFS_DEVICEID_UNAVAILABLE, &node->flags);
365 }
366 }
367
368 static struct pnfs_layout_segment *
369 ff_layout_alloc_lseg(struct pnfs_layout_hdr *lh,
370 struct nfs4_layoutget_res *lgr,
371 gfp_t gfp_flags)
372 {
373 struct pnfs_layout_segment *ret;
374 struct nfs4_ff_layout_segment *fls = NULL;
375 struct xdr_stream stream;
376 struct xdr_buf buf;
377 struct page *scratch;
378 u64 stripe_unit;
379 u32 mirror_array_cnt;
380 __be32 *p;
381 int i, rc;
382
383 dprintk("--> %s\n", __func__);
384 scratch = alloc_page(gfp_flags);
385 if (!scratch)
386 return ERR_PTR(-ENOMEM);
387
388 xdr_init_decode_pages(&stream, &buf, lgr->layoutp->pages,
389 lgr->layoutp->len);
390 xdr_set_scratch_buffer(&stream, page_address(scratch), PAGE_SIZE);
391
392 /* stripe unit and mirror_array_cnt */
393 rc = -EIO;
394 p = xdr_inline_decode(&stream, 8 + 4);
395 if (!p)
396 goto out_err_free;
397
398 p = xdr_decode_hyper(p, &stripe_unit);
399 mirror_array_cnt = be32_to_cpup(p++);
400 dprintk("%s: stripe_unit=%llu mirror_array_cnt=%u\n", __func__,
401 stripe_unit, mirror_array_cnt);
402
403 if (mirror_array_cnt > NFS4_FLEXFILE_LAYOUT_MAX_MIRROR_CNT ||
404 mirror_array_cnt == 0)
405 goto out_err_free;
406
407 rc = -ENOMEM;
408 fls = kzalloc(sizeof(*fls), gfp_flags);
409 if (!fls)
410 goto out_err_free;
411
412 fls->mirror_array_cnt = mirror_array_cnt;
413 fls->stripe_unit = stripe_unit;
414 fls->mirror_array = kcalloc(fls->mirror_array_cnt,
415 sizeof(fls->mirror_array[0]), gfp_flags);
416 if (fls->mirror_array == NULL)
417 goto out_err_free;
418
419 for (i = 0; i < fls->mirror_array_cnt; i++) {
420 struct nfs4_ff_layout_mirror *mirror;
421 struct nfs4_deviceid devid;
422 struct nfs4_deviceid_node *idnode;
423 struct auth_cred acred = { .group_info = ff_zero_group };
424 struct rpc_cred __rcu *cred;
425 u32 ds_count, fh_count, id;
426 int j;
427
428 rc = -EIO;
429 p = xdr_inline_decode(&stream, 4);
430 if (!p)
431 goto out_err_free;
432 ds_count = be32_to_cpup(p);
433
434 /* FIXME: allow for striping? */
435 if (ds_count != 1)
436 goto out_err_free;
437
438 fls->mirror_array[i] = ff_layout_alloc_mirror(gfp_flags);
439 if (fls->mirror_array[i] == NULL) {
440 rc = -ENOMEM;
441 goto out_err_free;
442 }
443
444 fls->mirror_array[i]->ds_count = ds_count;
445
446 /* deviceid */
447 rc = decode_deviceid(&stream, &devid);
448 if (rc)
449 goto out_err_free;
450
451 idnode = nfs4_find_get_deviceid(NFS_SERVER(lh->plh_inode),
452 &devid, lh->plh_lc_cred,
453 gfp_flags);
454 /*
455 * upon success, mirror_ds is allocated by previous
456 * getdeviceinfo, or newly by .alloc_deviceid_node
457 * nfs4_find_get_deviceid failure is indeed getdeviceinfo falure
458 */
459 if (idnode)
460 fls->mirror_array[i]->mirror_ds =
461 FF_LAYOUT_MIRROR_DS(idnode);
462 else
463 goto out_err_free;
464
465 /* efficiency */
466 rc = -EIO;
467 p = xdr_inline_decode(&stream, 4);
468 if (!p)
469 goto out_err_free;
470 fls->mirror_array[i]->efficiency = be32_to_cpup(p);
471
472 /* stateid */
473 rc = decode_pnfs_stateid(&stream, &fls->mirror_array[i]->stateid);
474 if (rc)
475 goto out_err_free;
476
477 /* fh */
478 p = xdr_inline_decode(&stream, 4);
479 if (!p)
480 goto out_err_free;
481 fh_count = be32_to_cpup(p);
482
483 fls->mirror_array[i]->fh_versions =
484 kzalloc(fh_count * sizeof(struct nfs_fh),
485 gfp_flags);
486 if (fls->mirror_array[i]->fh_versions == NULL) {
487 rc = -ENOMEM;
488 goto out_err_free;
489 }
490
491 for (j = 0; j < fh_count; j++) {
492 rc = decode_nfs_fh(&stream,
493 &fls->mirror_array[i]->fh_versions[j]);
494 if (rc)
495 goto out_err_free;
496 }
497
498 fls->mirror_array[i]->fh_versions_cnt = fh_count;
499
500 /* user */
501 rc = decode_name(&stream, &id);
502 if (rc)
503 goto out_err_free;
504
505 acred.uid = make_kuid(&init_user_ns, id);
506
507 /* group */
508 rc = decode_name(&stream, &id);
509 if (rc)
510 goto out_err_free;
511
512 acred.gid = make_kgid(&init_user_ns, id);
513
514 /* find the cred for it */
515 rcu_assign_pointer(cred, rpc_lookup_generic_cred(&acred, 0, gfp_flags));
516 if (IS_ERR(cred)) {
517 rc = PTR_ERR(cred);
518 goto out_err_free;
519 }
520
521 if (lgr->range.iomode == IOMODE_READ)
522 rcu_assign_pointer(fls->mirror_array[i]->ro_cred, cred);
523 else
524 rcu_assign_pointer(fls->mirror_array[i]->rw_cred, cred);
525
526 mirror = ff_layout_add_mirror(lh, fls->mirror_array[i]);
527 if (mirror != fls->mirror_array[i]) {
528 /* swap cred ptrs so free_mirror will clean up old */
529 if (lgr->range.iomode == IOMODE_READ) {
530 cred = xchg(&mirror->ro_cred, cred);
531 rcu_assign_pointer(fls->mirror_array[i]->ro_cred, cred);
532 } else {
533 cred = xchg(&mirror->rw_cred, cred);
534 rcu_assign_pointer(fls->mirror_array[i]->rw_cred, cred);
535 }
536 ff_layout_free_mirror(fls->mirror_array[i]);
537 fls->mirror_array[i] = mirror;
538 }
539
540 dprintk("%s: iomode %s uid %u gid %u\n", __func__,
541 lgr->range.iomode == IOMODE_READ ? "READ" : "RW",
542 from_kuid(&init_user_ns, acred.uid),
543 from_kgid(&init_user_ns, acred.gid));
544 }
545
546 p = xdr_inline_decode(&stream, 4);
547 if (!p)
548 goto out_sort_mirrors;
549 fls->flags = be32_to_cpup(p);
550
551 p = xdr_inline_decode(&stream, 4);
552 if (!p)
553 goto out_sort_mirrors;
554 for (i=0; i < fls->mirror_array_cnt; i++)
555 fls->mirror_array[i]->report_interval = be32_to_cpup(p);
556
557 out_sort_mirrors:
558 ff_layout_sort_mirrors(fls);
559 rc = ff_layout_check_layout(lgr);
560 if (rc)
561 goto out_err_free;
562 ff_layout_mark_devices_valid(fls);
563
564 ret = &fls->generic_hdr;
565 dprintk("<-- %s (success)\n", __func__);
566 out_free_page:
567 __free_page(scratch);
568 return ret;
569 out_err_free:
570 _ff_layout_free_lseg(fls);
571 ret = ERR_PTR(rc);
572 dprintk("<-- %s (%d)\n", __func__, rc);
573 goto out_free_page;
574 }
575
576 static bool ff_layout_has_rw_segments(struct pnfs_layout_hdr *layout)
577 {
578 struct pnfs_layout_segment *lseg;
579
580 list_for_each_entry(lseg, &layout->plh_segs, pls_list)
581 if (lseg->pls_range.iomode == IOMODE_RW)
582 return true;
583
584 return false;
585 }
586
587 static void
588 ff_layout_free_lseg(struct pnfs_layout_segment *lseg)
589 {
590 struct nfs4_ff_layout_segment *fls = FF_LAYOUT_LSEG(lseg);
591
592 dprintk("--> %s\n", __func__);
593
594 if (lseg->pls_range.iomode == IOMODE_RW) {
595 struct nfs4_flexfile_layout *ffl;
596 struct inode *inode;
597
598 ffl = FF_LAYOUT_FROM_HDR(lseg->pls_layout);
599 inode = ffl->generic_hdr.plh_inode;
600 spin_lock(&inode->i_lock);
601 if (!ff_layout_has_rw_segments(lseg->pls_layout)) {
602 ffl->commit_info.nbuckets = 0;
603 kfree(ffl->commit_info.buckets);
604 ffl->commit_info.buckets = NULL;
605 }
606 spin_unlock(&inode->i_lock);
607 }
608 _ff_layout_free_lseg(fls);
609 }
610
611 /* Return 1 until we have multiple lsegs support */
612 static int
613 ff_layout_get_lseg_count(struct nfs4_ff_layout_segment *fls)
614 {
615 return 1;
616 }
617
618 static void
619 nfs4_ff_start_busy_timer(struct nfs4_ff_busy_timer *timer, ktime_t now)
620 {
621 /* first IO request? */
622 if (atomic_inc_return(&timer->n_ops) == 1) {
623 timer->start_time = now;
624 }
625 }
626
627 static ktime_t
628 nfs4_ff_end_busy_timer(struct nfs4_ff_busy_timer *timer, ktime_t now)
629 {
630 ktime_t start;
631
632 if (atomic_dec_return(&timer->n_ops) < 0)
633 WARN_ON_ONCE(1);
634
635 start = timer->start_time;
636 timer->start_time = now;
637 return ktime_sub(now, start);
638 }
639
640 static bool
641 nfs4_ff_layoutstat_start_io(struct nfs4_ff_layout_mirror *mirror,
642 struct nfs4_ff_layoutstat *layoutstat,
643 ktime_t now)
644 {
645 static const ktime_t notime = {0};
646 s64 report_interval = FF_LAYOUTSTATS_REPORT_INTERVAL;
647 struct nfs4_flexfile_layout *ffl = FF_LAYOUT_FROM_HDR(mirror->layout);
648
649 nfs4_ff_start_busy_timer(&layoutstat->busy_timer, now);
650 if (ktime_equal(mirror->start_time, notime))
651 mirror->start_time = now;
652 if (mirror->report_interval != 0)
653 report_interval = (s64)mirror->report_interval * 1000LL;
654 else if (layoutstats_timer != 0)
655 report_interval = (s64)layoutstats_timer * 1000LL;
656 if (ktime_to_ms(ktime_sub(now, ffl->last_report_time)) >=
657 report_interval) {
658 ffl->last_report_time = now;
659 return true;
660 }
661
662 return false;
663 }
664
665 static void
666 nfs4_ff_layout_stat_io_update_requested(struct nfs4_ff_layoutstat *layoutstat,
667 __u64 requested)
668 {
669 struct nfs4_ff_io_stat *iostat = &layoutstat->io_stat;
670
671 iostat->ops_requested++;
672 iostat->bytes_requested += requested;
673 }
674
675 static void
676 nfs4_ff_layout_stat_io_update_completed(struct nfs4_ff_layoutstat *layoutstat,
677 __u64 requested,
678 __u64 completed,
679 ktime_t time_completed,
680 ktime_t time_started)
681 {
682 struct nfs4_ff_io_stat *iostat = &layoutstat->io_stat;
683 ktime_t completion_time = ktime_sub(time_completed, time_started);
684 ktime_t timer;
685
686 iostat->ops_completed++;
687 iostat->bytes_completed += completed;
688 iostat->bytes_not_delivered += requested - completed;
689
690 timer = nfs4_ff_end_busy_timer(&layoutstat->busy_timer, time_completed);
691 iostat->total_busy_time =
692 ktime_add(iostat->total_busy_time, timer);
693 iostat->aggregate_completion_time =
694 ktime_add(iostat->aggregate_completion_time,
695 completion_time);
696 }
697
698 static void
699 nfs4_ff_layout_stat_io_start_read(struct inode *inode,
700 struct nfs4_ff_layout_mirror *mirror,
701 __u64 requested, ktime_t now)
702 {
703 bool report;
704
705 spin_lock(&mirror->lock);
706 report = nfs4_ff_layoutstat_start_io(mirror, &mirror->read_stat, now);
707 nfs4_ff_layout_stat_io_update_requested(&mirror->read_stat, requested);
708 spin_unlock(&mirror->lock);
709
710 if (report)
711 pnfs_report_layoutstat(inode, GFP_KERNEL);
712 }
713
714 static void
715 nfs4_ff_layout_stat_io_end_read(struct rpc_task *task,
716 struct nfs4_ff_layout_mirror *mirror,
717 __u64 requested,
718 __u64 completed)
719 {
720 spin_lock(&mirror->lock);
721 nfs4_ff_layout_stat_io_update_completed(&mirror->read_stat,
722 requested, completed,
723 ktime_get(), task->tk_start);
724 spin_unlock(&mirror->lock);
725 }
726
727 static void
728 nfs4_ff_layout_stat_io_start_write(struct inode *inode,
729 struct nfs4_ff_layout_mirror *mirror,
730 __u64 requested, ktime_t now)
731 {
732 bool report;
733
734 spin_lock(&mirror->lock);
735 report = nfs4_ff_layoutstat_start_io(mirror , &mirror->write_stat, now);
736 nfs4_ff_layout_stat_io_update_requested(&mirror->write_stat, requested);
737 spin_unlock(&mirror->lock);
738
739 if (report)
740 pnfs_report_layoutstat(inode, GFP_NOIO);
741 }
742
743 static void
744 nfs4_ff_layout_stat_io_end_write(struct rpc_task *task,
745 struct nfs4_ff_layout_mirror *mirror,
746 __u64 requested,
747 __u64 completed,
748 enum nfs3_stable_how committed)
749 {
750 if (committed == NFS_UNSTABLE)
751 requested = completed = 0;
752
753 spin_lock(&mirror->lock);
754 nfs4_ff_layout_stat_io_update_completed(&mirror->write_stat,
755 requested, completed, ktime_get(), task->tk_start);
756 spin_unlock(&mirror->lock);
757 }
758
759 static int
760 ff_layout_alloc_commit_info(struct pnfs_layout_segment *lseg,
761 struct nfs_commit_info *cinfo,
762 gfp_t gfp_flags)
763 {
764 struct nfs4_ff_layout_segment *fls = FF_LAYOUT_LSEG(lseg);
765 struct pnfs_commit_bucket *buckets;
766 int size;
767
768 if (cinfo->ds->nbuckets != 0) {
769 /* This assumes there is only one RW lseg per file.
770 * To support multiple lseg per file, we need to
771 * change struct pnfs_commit_bucket to allow dynamic
772 * increasing nbuckets.
773 */
774 return 0;
775 }
776
777 size = ff_layout_get_lseg_count(fls) * FF_LAYOUT_MIRROR_COUNT(lseg);
778
779 buckets = kcalloc(size, sizeof(struct pnfs_commit_bucket),
780 gfp_flags);
781 if (!buckets)
782 return -ENOMEM;
783 else {
784 int i;
785
786 spin_lock(&cinfo->inode->i_lock);
787 if (cinfo->ds->nbuckets != 0)
788 kfree(buckets);
789 else {
790 cinfo->ds->buckets = buckets;
791 cinfo->ds->nbuckets = size;
792 for (i = 0; i < size; i++) {
793 INIT_LIST_HEAD(&buckets[i].written);
794 INIT_LIST_HEAD(&buckets[i].committing);
795 /* mark direct verifier as unset */
796 buckets[i].direct_verf.committed =
797 NFS_INVALID_STABLE_HOW;
798 }
799 }
800 spin_unlock(&cinfo->inode->i_lock);
801 return 0;
802 }
803 }
804
805 static struct nfs4_pnfs_ds *
806 ff_layout_choose_best_ds_for_read(struct pnfs_layout_segment *lseg,
807 int start_idx,
808 int *best_idx)
809 {
810 struct nfs4_ff_layout_segment *fls = FF_LAYOUT_LSEG(lseg);
811 struct nfs4_pnfs_ds *ds;
812 bool fail_return = false;
813 int idx;
814
815 /* mirrors are sorted by efficiency */
816 for (idx = start_idx; idx < fls->mirror_array_cnt; idx++) {
817 if (idx+1 == fls->mirror_array_cnt)
818 fail_return = true;
819 ds = nfs4_ff_layout_prepare_ds(lseg, idx, fail_return);
820 if (ds) {
821 *best_idx = idx;
822 return ds;
823 }
824 }
825
826 return NULL;
827 }
828
829 static void
830 ff_layout_pg_get_read(struct nfs_pageio_descriptor *pgio,
831 struct nfs_page *req,
832 bool strict_iomode)
833 {
834 retry_strict:
835 pnfs_put_lseg(pgio->pg_lseg);
836 pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
837 req->wb_context,
838 0,
839 NFS4_MAX_UINT64,
840 IOMODE_READ,
841 strict_iomode,
842 GFP_KERNEL);
843 if (IS_ERR(pgio->pg_lseg)) {
844 pgio->pg_error = PTR_ERR(pgio->pg_lseg);
845 pgio->pg_lseg = NULL;
846 }
847
848 /* If we don't have checking, do get a IOMODE_RW
849 * segment, and the server wants to avoid READs
850 * there, then retry!
851 */
852 if (pgio->pg_lseg && !strict_iomode &&
853 ff_layout_avoid_read_on_rw(pgio->pg_lseg)) {
854 strict_iomode = true;
855 goto retry_strict;
856 }
857 }
858
859 static void
860 ff_layout_pg_init_read(struct nfs_pageio_descriptor *pgio,
861 struct nfs_page *req)
862 {
863 struct nfs_pgio_mirror *pgm;
864 struct nfs4_ff_layout_mirror *mirror;
865 struct nfs4_pnfs_ds *ds;
866 int ds_idx;
867
868 retry:
869 /* Use full layout for now */
870 if (!pgio->pg_lseg)
871 ff_layout_pg_get_read(pgio, req, false);
872 else if (ff_layout_avoid_read_on_rw(pgio->pg_lseg))
873 ff_layout_pg_get_read(pgio, req, true);
874
875 /* If no lseg, fall back to read through mds */
876 if (pgio->pg_lseg == NULL)
877 goto out_mds;
878
879 ds = ff_layout_choose_best_ds_for_read(pgio->pg_lseg, 0, &ds_idx);
880 if (!ds) {
881 if (!ff_layout_no_fallback_to_mds(pgio->pg_lseg))
882 goto out_mds;
883 pnfs_put_lseg(pgio->pg_lseg);
884 pgio->pg_lseg = NULL;
885 /* Sleep for 1 second before retrying */
886 ssleep(1);
887 goto retry;
888 }
889
890 mirror = FF_LAYOUT_COMP(pgio->pg_lseg, ds_idx);
891
892 pgio->pg_mirror_idx = ds_idx;
893
894 /* read always uses only one mirror - idx 0 for pgio layer */
895 pgm = &pgio->pg_mirrors[0];
896 pgm->pg_bsize = mirror->mirror_ds->ds_versions[0].rsize;
897
898 return;
899 out_mds:
900 pnfs_put_lseg(pgio->pg_lseg);
901 pgio->pg_lseg = NULL;
902 nfs_pageio_reset_read_mds(pgio);
903 }
904
905 static void
906 ff_layout_pg_init_write(struct nfs_pageio_descriptor *pgio,
907 struct nfs_page *req)
908 {
909 struct nfs4_ff_layout_mirror *mirror;
910 struct nfs_pgio_mirror *pgm;
911 struct nfs_commit_info cinfo;
912 struct nfs4_pnfs_ds *ds;
913 int i;
914 int status;
915
916 retry:
917 if (!pgio->pg_lseg) {
918 pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
919 req->wb_context,
920 0,
921 NFS4_MAX_UINT64,
922 IOMODE_RW,
923 false,
924 GFP_NOFS);
925 if (IS_ERR(pgio->pg_lseg)) {
926 pgio->pg_error = PTR_ERR(pgio->pg_lseg);
927 pgio->pg_lseg = NULL;
928 return;
929 }
930 }
931 /* If no lseg, fall back to write through mds */
932 if (pgio->pg_lseg == NULL)
933 goto out_mds;
934
935 nfs_init_cinfo(&cinfo, pgio->pg_inode, pgio->pg_dreq);
936 status = ff_layout_alloc_commit_info(pgio->pg_lseg, &cinfo, GFP_NOFS);
937 if (status < 0)
938 goto out_mds;
939
940 /* Use a direct mapping of ds_idx to pgio mirror_idx */
941 if (WARN_ON_ONCE(pgio->pg_mirror_count !=
942 FF_LAYOUT_MIRROR_COUNT(pgio->pg_lseg)))
943 goto out_mds;
944
945 for (i = 0; i < pgio->pg_mirror_count; i++) {
946 ds = nfs4_ff_layout_prepare_ds(pgio->pg_lseg, i, true);
947 if (!ds) {
948 if (!ff_layout_no_fallback_to_mds(pgio->pg_lseg))
949 goto out_mds;
950 pnfs_put_lseg(pgio->pg_lseg);
951 pgio->pg_lseg = NULL;
952 /* Sleep for 1 second before retrying */
953 ssleep(1);
954 goto retry;
955 }
956 pgm = &pgio->pg_mirrors[i];
957 mirror = FF_LAYOUT_COMP(pgio->pg_lseg, i);
958 pgm->pg_bsize = mirror->mirror_ds->ds_versions[0].wsize;
959 }
960
961 return;
962
963 out_mds:
964 pnfs_put_lseg(pgio->pg_lseg);
965 pgio->pg_lseg = NULL;
966 nfs_pageio_reset_write_mds(pgio);
967 }
968
969 static unsigned int
970 ff_layout_pg_get_mirror_count_write(struct nfs_pageio_descriptor *pgio,
971 struct nfs_page *req)
972 {
973 if (!pgio->pg_lseg) {
974 pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
975 req->wb_context,
976 0,
977 NFS4_MAX_UINT64,
978 IOMODE_RW,
979 false,
980 GFP_NOFS);
981 if (IS_ERR(pgio->pg_lseg)) {
982 pgio->pg_error = PTR_ERR(pgio->pg_lseg);
983 pgio->pg_lseg = NULL;
984 goto out;
985 }
986 }
987 if (pgio->pg_lseg)
988 return FF_LAYOUT_MIRROR_COUNT(pgio->pg_lseg);
989
990 /* no lseg means that pnfs is not in use, so no mirroring here */
991 nfs_pageio_reset_write_mds(pgio);
992 out:
993 return 1;
994 }
995
996 static const struct nfs_pageio_ops ff_layout_pg_read_ops = {
997 .pg_init = ff_layout_pg_init_read,
998 .pg_test = pnfs_generic_pg_test,
999 .pg_doio = pnfs_generic_pg_readpages,
1000 .pg_cleanup = pnfs_generic_pg_cleanup,
1001 };
1002
1003 static const struct nfs_pageio_ops ff_layout_pg_write_ops = {
1004 .pg_init = ff_layout_pg_init_write,
1005 .pg_test = pnfs_generic_pg_test,
1006 .pg_doio = pnfs_generic_pg_writepages,
1007 .pg_get_mirror_count = ff_layout_pg_get_mirror_count_write,
1008 .pg_cleanup = pnfs_generic_pg_cleanup,
1009 };
1010
1011 static void ff_layout_reset_write(struct nfs_pgio_header *hdr, bool retry_pnfs)
1012 {
1013 struct rpc_task *task = &hdr->task;
1014
1015 pnfs_layoutcommit_inode(hdr->inode, false);
1016
1017 if (retry_pnfs) {
1018 dprintk("%s Reset task %5u for i/o through pNFS "
1019 "(req %s/%llu, %u bytes @ offset %llu)\n", __func__,
1020 hdr->task.tk_pid,
1021 hdr->inode->i_sb->s_id,
1022 (unsigned long long)NFS_FILEID(hdr->inode),
1023 hdr->args.count,
1024 (unsigned long long)hdr->args.offset);
1025
1026 hdr->completion_ops->reschedule_io(hdr);
1027 return;
1028 }
1029
1030 if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
1031 dprintk("%s Reset task %5u for i/o through MDS "
1032 "(req %s/%llu, %u bytes @ offset %llu)\n", __func__,
1033 hdr->task.tk_pid,
1034 hdr->inode->i_sb->s_id,
1035 (unsigned long long)NFS_FILEID(hdr->inode),
1036 hdr->args.count,
1037 (unsigned long long)hdr->args.offset);
1038
1039 task->tk_status = pnfs_write_done_resend_to_mds(hdr);
1040 }
1041 }
1042
1043 static void ff_layout_reset_read(struct nfs_pgio_header *hdr)
1044 {
1045 struct rpc_task *task = &hdr->task;
1046
1047 pnfs_layoutcommit_inode(hdr->inode, false);
1048
1049 if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
1050 dprintk("%s Reset task %5u for i/o through MDS "
1051 "(req %s/%llu, %u bytes @ offset %llu)\n", __func__,
1052 hdr->task.tk_pid,
1053 hdr->inode->i_sb->s_id,
1054 (unsigned long long)NFS_FILEID(hdr->inode),
1055 hdr->args.count,
1056 (unsigned long long)hdr->args.offset);
1057
1058 task->tk_status = pnfs_read_done_resend_to_mds(hdr);
1059 }
1060 }
1061
1062 static int ff_layout_async_handle_error_v4(struct rpc_task *task,
1063 struct nfs4_state *state,
1064 struct nfs_client *clp,
1065 struct pnfs_layout_segment *lseg,
1066 int idx)
1067 {
1068 struct pnfs_layout_hdr *lo = lseg->pls_layout;
1069 struct inode *inode = lo->plh_inode;
1070 struct nfs_server *mds_server = NFS_SERVER(inode);
1071
1072 struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx);
1073 struct nfs_client *mds_client = mds_server->nfs_client;
1074 struct nfs4_slot_table *tbl = &clp->cl_session->fc_slot_table;
1075
1076 if (task->tk_status >= 0)
1077 return 0;
1078
1079 switch (task->tk_status) {
1080 /* MDS state errors */
1081 case -NFS4ERR_DELEG_REVOKED:
1082 case -NFS4ERR_ADMIN_REVOKED:
1083 case -NFS4ERR_BAD_STATEID:
1084 if (state == NULL)
1085 break;
1086 nfs_remove_bad_delegation(state->inode, NULL);
1087 case -NFS4ERR_OPENMODE:
1088 if (state == NULL)
1089 break;
1090 if (nfs4_schedule_stateid_recovery(mds_server, state) < 0)
1091 goto out_bad_stateid;
1092 goto wait_on_recovery;
1093 case -NFS4ERR_EXPIRED:
1094 if (state != NULL) {
1095 if (nfs4_schedule_stateid_recovery(mds_server, state) < 0)
1096 goto out_bad_stateid;
1097 }
1098 nfs4_schedule_lease_recovery(mds_client);
1099 goto wait_on_recovery;
1100 /* DS session errors */
1101 case -NFS4ERR_BADSESSION:
1102 case -NFS4ERR_BADSLOT:
1103 case -NFS4ERR_BAD_HIGH_SLOT:
1104 case -NFS4ERR_DEADSESSION:
1105 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
1106 case -NFS4ERR_SEQ_FALSE_RETRY:
1107 case -NFS4ERR_SEQ_MISORDERED:
1108 dprintk("%s ERROR %d, Reset session. Exchangeid "
1109 "flags 0x%x\n", __func__, task->tk_status,
1110 clp->cl_exchange_flags);
1111 nfs4_schedule_session_recovery(clp->cl_session, task->tk_status);
1112 break;
1113 case -NFS4ERR_DELAY:
1114 case -NFS4ERR_GRACE:
1115 rpc_delay(task, FF_LAYOUT_POLL_RETRY_MAX);
1116 break;
1117 case -NFS4ERR_RETRY_UNCACHED_REP:
1118 break;
1119 /* Invalidate Layout errors */
1120 case -NFS4ERR_PNFS_NO_LAYOUT:
1121 case -ESTALE: /* mapped NFS4ERR_STALE */
1122 case -EBADHANDLE: /* mapped NFS4ERR_BADHANDLE */
1123 case -EISDIR: /* mapped NFS4ERR_ISDIR */
1124 case -NFS4ERR_FHEXPIRED:
1125 case -NFS4ERR_WRONG_TYPE:
1126 dprintk("%s Invalid layout error %d\n", __func__,
1127 task->tk_status);
1128 /*
1129 * Destroy layout so new i/o will get a new layout.
1130 * Layout will not be destroyed until all current lseg
1131 * references are put. Mark layout as invalid to resend failed
1132 * i/o and all i/o waiting on the slot table to the MDS until
1133 * layout is destroyed and a new valid layout is obtained.
1134 */
1135 pnfs_destroy_layout(NFS_I(inode));
1136 rpc_wake_up(&tbl->slot_tbl_waitq);
1137 goto reset;
1138 /* RPC connection errors */
1139 case -ECONNREFUSED:
1140 case -EHOSTDOWN:
1141 case -EHOSTUNREACH:
1142 case -ENETUNREACH:
1143 case -EIO:
1144 case -ETIMEDOUT:
1145 case -EPIPE:
1146 dprintk("%s DS connection error %d\n", __func__,
1147 task->tk_status);
1148 nfs4_mark_deviceid_unavailable(devid);
1149 rpc_wake_up(&tbl->slot_tbl_waitq);
1150 /* fall through */
1151 default:
1152 if (ff_layout_avoid_mds_available_ds(lseg))
1153 return -NFS4ERR_RESET_TO_PNFS;
1154 reset:
1155 dprintk("%s Retry through MDS. Error %d\n", __func__,
1156 task->tk_status);
1157 return -NFS4ERR_RESET_TO_MDS;
1158 }
1159 out:
1160 task->tk_status = 0;
1161 return -EAGAIN;
1162 out_bad_stateid:
1163 task->tk_status = -EIO;
1164 return 0;
1165 wait_on_recovery:
1166 rpc_sleep_on(&mds_client->cl_rpcwaitq, task, NULL);
1167 if (test_bit(NFS4CLNT_MANAGER_RUNNING, &mds_client->cl_state) == 0)
1168 rpc_wake_up_queued_task(&mds_client->cl_rpcwaitq, task);
1169 goto out;
1170 }
1171
1172 /* Retry all errors through either pNFS or MDS except for -EJUKEBOX */
1173 static int ff_layout_async_handle_error_v3(struct rpc_task *task,
1174 struct pnfs_layout_segment *lseg,
1175 int idx)
1176 {
1177 struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx);
1178
1179 if (task->tk_status >= 0)
1180 return 0;
1181
1182 switch (task->tk_status) {
1183 /* File access problems. Don't mark the device as unavailable */
1184 case -EACCES:
1185 case -ESTALE:
1186 case -EISDIR:
1187 case -EBADHANDLE:
1188 case -ELOOP:
1189 case -ENOSPC:
1190 break;
1191 case -EJUKEBOX:
1192 nfs_inc_stats(lseg->pls_layout->plh_inode, NFSIOS_DELAY);
1193 goto out_retry;
1194 default:
1195 dprintk("%s DS connection error %d\n", __func__,
1196 task->tk_status);
1197 nfs4_mark_deviceid_unavailable(devid);
1198 }
1199 /* FIXME: Need to prevent infinite looping here. */
1200 return -NFS4ERR_RESET_TO_PNFS;
1201 out_retry:
1202 task->tk_status = 0;
1203 rpc_restart_call_prepare(task);
1204 rpc_delay(task, NFS_JUKEBOX_RETRY_TIME);
1205 return -EAGAIN;
1206 }
1207
1208 static int ff_layout_async_handle_error(struct rpc_task *task,
1209 struct nfs4_state *state,
1210 struct nfs_client *clp,
1211 struct pnfs_layout_segment *lseg,
1212 int idx)
1213 {
1214 int vers = clp->cl_nfs_mod->rpc_vers->number;
1215
1216 switch (vers) {
1217 case 3:
1218 return ff_layout_async_handle_error_v3(task, lseg, idx);
1219 case 4:
1220 return ff_layout_async_handle_error_v4(task, state, clp,
1221 lseg, idx);
1222 default:
1223 /* should never happen */
1224 WARN_ON_ONCE(1);
1225 return 0;
1226 }
1227 }
1228
1229 static void ff_layout_io_track_ds_error(struct pnfs_layout_segment *lseg,
1230 int idx, u64 offset, u64 length,
1231 u32 status, int opnum, int error)
1232 {
1233 struct nfs4_ff_layout_mirror *mirror;
1234 int err;
1235
1236 if (status == 0) {
1237 switch (error) {
1238 case -ETIMEDOUT:
1239 case -EPFNOSUPPORT:
1240 case -EPROTONOSUPPORT:
1241 case -EOPNOTSUPP:
1242 case -ECONNREFUSED:
1243 case -ECONNRESET:
1244 case -EHOSTDOWN:
1245 case -EHOSTUNREACH:
1246 case -ENETUNREACH:
1247 case -EADDRINUSE:
1248 case -ENOBUFS:
1249 case -EPIPE:
1250 case -EPERM:
1251 status = NFS4ERR_NXIO;
1252 break;
1253 case -EACCES:
1254 status = NFS4ERR_ACCESS;
1255 break;
1256 default:
1257 return;
1258 }
1259 }
1260
1261 switch (status) {
1262 case NFS4ERR_DELAY:
1263 case NFS4ERR_GRACE:
1264 return;
1265 default:
1266 break;
1267 }
1268
1269 mirror = FF_LAYOUT_COMP(lseg, idx);
1270 err = ff_layout_track_ds_error(FF_LAYOUT_FROM_HDR(lseg->pls_layout),
1271 mirror, offset, length, status, opnum,
1272 GFP_NOIO);
1273 pnfs_error_mark_layout_for_return(lseg->pls_layout->plh_inode, lseg);
1274 dprintk("%s: err %d op %d status %u\n", __func__, err, opnum, status);
1275 }
1276
1277 /* NFS_PROTO call done callback routines */
1278 static int ff_layout_read_done_cb(struct rpc_task *task,
1279 struct nfs_pgio_header *hdr)
1280 {
1281 int err;
1282
1283 trace_nfs4_pnfs_read(hdr, task->tk_status);
1284 if (task->tk_status < 0)
1285 ff_layout_io_track_ds_error(hdr->lseg, hdr->pgio_mirror_idx,
1286 hdr->args.offset, hdr->args.count,
1287 hdr->res.op_status, OP_READ,
1288 task->tk_status);
1289 err = ff_layout_async_handle_error(task, hdr->args.context->state,
1290 hdr->ds_clp, hdr->lseg,
1291 hdr->pgio_mirror_idx);
1292
1293 switch (err) {
1294 case -NFS4ERR_RESET_TO_PNFS:
1295 if (ff_layout_choose_best_ds_for_read(hdr->lseg,
1296 hdr->pgio_mirror_idx + 1,
1297 &hdr->pgio_mirror_idx))
1298 goto out_eagain;
1299 ff_layout_read_record_layoutstats_done(task, hdr);
1300 pnfs_read_resend_pnfs(hdr);
1301 return task->tk_status;
1302 case -NFS4ERR_RESET_TO_MDS:
1303 ff_layout_reset_read(hdr);
1304 return task->tk_status;
1305 case -EAGAIN:
1306 goto out_eagain;
1307 }
1308
1309 return 0;
1310 out_eagain:
1311 rpc_restart_call_prepare(task);
1312 return -EAGAIN;
1313 }
1314
1315 static bool
1316 ff_layout_need_layoutcommit(struct pnfs_layout_segment *lseg)
1317 {
1318 return !(FF_LAYOUT_LSEG(lseg)->flags & FF_FLAGS_NO_LAYOUTCOMMIT);
1319 }
1320
1321 /*
1322 * We reference the rpc_cred of the first WRITE that triggers the need for
1323 * a LAYOUTCOMMIT, and use it to send the layoutcommit compound.
1324 * rfc5661 is not clear about which credential should be used.
1325 *
1326 * Flexlayout client should treat DS replied FILE_SYNC as DATA_SYNC, so
1327 * to follow http://www.rfc-editor.org/errata_search.php?rfc=5661&eid=2751
1328 * we always send layoutcommit after DS writes.
1329 */
1330 static void
1331 ff_layout_set_layoutcommit(struct inode *inode,
1332 struct pnfs_layout_segment *lseg,
1333 loff_t end_offset)
1334 {
1335 if (!ff_layout_need_layoutcommit(lseg))
1336 return;
1337
1338 pnfs_set_layoutcommit(inode, lseg, end_offset);
1339 dprintk("%s inode %lu pls_end_pos %llu\n", __func__, inode->i_ino,
1340 (unsigned long long) NFS_I(inode)->layout->plh_lwb);
1341 }
1342
1343 static bool
1344 ff_layout_device_unavailable(struct pnfs_layout_segment *lseg, int idx)
1345 {
1346 /* No mirroring for now */
1347 struct nfs4_deviceid_node *node = FF_LAYOUT_DEVID_NODE(lseg, idx);
1348
1349 return ff_layout_test_devid_unavailable(node);
1350 }
1351
1352 static void ff_layout_read_record_layoutstats_start(struct rpc_task *task,
1353 struct nfs_pgio_header *hdr)
1354 {
1355 if (test_and_set_bit(NFS_IOHDR_STAT, &hdr->flags))
1356 return;
1357 nfs4_ff_layout_stat_io_start_read(hdr->inode,
1358 FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx),
1359 hdr->args.count,
1360 task->tk_start);
1361 }
1362
1363 static void ff_layout_read_record_layoutstats_done(struct rpc_task *task,
1364 struct nfs_pgio_header *hdr)
1365 {
1366 if (!test_and_clear_bit(NFS_IOHDR_STAT, &hdr->flags))
1367 return;
1368 nfs4_ff_layout_stat_io_end_read(task,
1369 FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx),
1370 hdr->args.count,
1371 hdr->res.count);
1372 }
1373
1374 static int ff_layout_read_prepare_common(struct rpc_task *task,
1375 struct nfs_pgio_header *hdr)
1376 {
1377 if (unlikely(test_bit(NFS_CONTEXT_BAD, &hdr->args.context->flags))) {
1378 rpc_exit(task, -EIO);
1379 return -EIO;
1380 }
1381 if (ff_layout_device_unavailable(hdr->lseg, hdr->pgio_mirror_idx)) {
1382 rpc_exit(task, -EHOSTDOWN);
1383 return -EAGAIN;
1384 }
1385
1386 ff_layout_read_record_layoutstats_start(task, hdr);
1387 return 0;
1388 }
1389
1390 /*
1391 * Call ops for the async read/write cases
1392 * In the case of dense layouts, the offset needs to be reset to its
1393 * original value.
1394 */
1395 static void ff_layout_read_prepare_v3(struct rpc_task *task, void *data)
1396 {
1397 struct nfs_pgio_header *hdr = data;
1398
1399 if (ff_layout_read_prepare_common(task, hdr))
1400 return;
1401
1402 rpc_call_start(task);
1403 }
1404
1405 static int ff_layout_setup_sequence(struct nfs_client *ds_clp,
1406 struct nfs4_sequence_args *args,
1407 struct nfs4_sequence_res *res,
1408 struct rpc_task *task)
1409 {
1410 if (ds_clp->cl_session)
1411 return nfs41_setup_sequence(ds_clp->cl_session,
1412 args,
1413 res,
1414 task);
1415 return nfs40_setup_sequence(ds_clp->cl_slot_tbl,
1416 args,
1417 res,
1418 task);
1419 }
1420
1421 static void ff_layout_read_prepare_v4(struct rpc_task *task, void *data)
1422 {
1423 struct nfs_pgio_header *hdr = data;
1424
1425 if (ff_layout_setup_sequence(hdr->ds_clp,
1426 &hdr->args.seq_args,
1427 &hdr->res.seq_res,
1428 task))
1429 return;
1430
1431 if (ff_layout_read_prepare_common(task, hdr))
1432 return;
1433
1434 if (nfs4_set_rw_stateid(&hdr->args.stateid, hdr->args.context,
1435 hdr->args.lock_context, FMODE_READ) == -EIO)
1436 rpc_exit(task, -EIO); /* lost lock, terminate I/O */
1437 }
1438
1439 static void ff_layout_read_call_done(struct rpc_task *task, void *data)
1440 {
1441 struct nfs_pgio_header *hdr = data;
1442
1443 dprintk("--> %s task->tk_status %d\n", __func__, task->tk_status);
1444
1445 if (test_bit(NFS_IOHDR_REDO, &hdr->flags) &&
1446 task->tk_status == 0) {
1447 nfs4_sequence_done(task, &hdr->res.seq_res);
1448 return;
1449 }
1450
1451 /* Note this may cause RPC to be resent */
1452 hdr->mds_ops->rpc_call_done(task, hdr);
1453 }
1454
1455 static void ff_layout_read_count_stats(struct rpc_task *task, void *data)
1456 {
1457 struct nfs_pgio_header *hdr = data;
1458
1459 ff_layout_read_record_layoutstats_done(task, hdr);
1460 rpc_count_iostats_metrics(task,
1461 &NFS_CLIENT(hdr->inode)->cl_metrics[NFSPROC4_CLNT_READ]);
1462 }
1463
1464 static void ff_layout_read_release(void *data)
1465 {
1466 struct nfs_pgio_header *hdr = data;
1467
1468 ff_layout_read_record_layoutstats_done(&hdr->task, hdr);
1469 pnfs_generic_rw_release(data);
1470 }
1471
1472
1473 static int ff_layout_write_done_cb(struct rpc_task *task,
1474 struct nfs_pgio_header *hdr)
1475 {
1476 loff_t end_offs = 0;
1477 int err;
1478
1479 trace_nfs4_pnfs_write(hdr, task->tk_status);
1480 if (task->tk_status < 0)
1481 ff_layout_io_track_ds_error(hdr->lseg, hdr->pgio_mirror_idx,
1482 hdr->args.offset, hdr->args.count,
1483 hdr->res.op_status, OP_WRITE,
1484 task->tk_status);
1485 err = ff_layout_async_handle_error(task, hdr->args.context->state,
1486 hdr->ds_clp, hdr->lseg,
1487 hdr->pgio_mirror_idx);
1488
1489 switch (err) {
1490 case -NFS4ERR_RESET_TO_PNFS:
1491 ff_layout_reset_write(hdr, true);
1492 return task->tk_status;
1493 case -NFS4ERR_RESET_TO_MDS:
1494 ff_layout_reset_write(hdr, false);
1495 return task->tk_status;
1496 case -EAGAIN:
1497 return -EAGAIN;
1498 }
1499
1500 if (hdr->res.verf->committed == NFS_FILE_SYNC ||
1501 hdr->res.verf->committed == NFS_DATA_SYNC)
1502 end_offs = hdr->mds_offset + (loff_t)hdr->res.count;
1503
1504 /* Note: if the write is unstable, don't set end_offs until commit */
1505 ff_layout_set_layoutcommit(hdr->inode, hdr->lseg, end_offs);
1506
1507 /* zero out fattr since we don't care DS attr at all */
1508 hdr->fattr.valid = 0;
1509 if (task->tk_status >= 0)
1510 nfs_writeback_update_inode(hdr);
1511
1512 return 0;
1513 }
1514
1515 static int ff_layout_commit_done_cb(struct rpc_task *task,
1516 struct nfs_commit_data *data)
1517 {
1518 int err;
1519
1520 trace_nfs4_pnfs_commit_ds(data, task->tk_status);
1521 if (task->tk_status < 0)
1522 ff_layout_io_track_ds_error(data->lseg, data->ds_commit_index,
1523 data->args.offset, data->args.count,
1524 data->res.op_status, OP_COMMIT,
1525 task->tk_status);
1526 err = ff_layout_async_handle_error(task, NULL, data->ds_clp,
1527 data->lseg, data->ds_commit_index);
1528
1529 switch (err) {
1530 case -NFS4ERR_RESET_TO_PNFS:
1531 pnfs_generic_prepare_to_resend_writes(data);
1532 return -EAGAIN;
1533 case -NFS4ERR_RESET_TO_MDS:
1534 pnfs_generic_prepare_to_resend_writes(data);
1535 return -EAGAIN;
1536 case -EAGAIN:
1537 rpc_restart_call_prepare(task);
1538 return -EAGAIN;
1539 }
1540
1541 ff_layout_set_layoutcommit(data->inode, data->lseg, data->lwb);
1542
1543 return 0;
1544 }
1545
1546 static void ff_layout_write_record_layoutstats_start(struct rpc_task *task,
1547 struct nfs_pgio_header *hdr)
1548 {
1549 if (test_and_set_bit(NFS_IOHDR_STAT, &hdr->flags))
1550 return;
1551 nfs4_ff_layout_stat_io_start_write(hdr->inode,
1552 FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx),
1553 hdr->args.count,
1554 task->tk_start);
1555 }
1556
1557 static void ff_layout_write_record_layoutstats_done(struct rpc_task *task,
1558 struct nfs_pgio_header *hdr)
1559 {
1560 if (!test_and_clear_bit(NFS_IOHDR_STAT, &hdr->flags))
1561 return;
1562 nfs4_ff_layout_stat_io_end_write(task,
1563 FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx),
1564 hdr->args.count, hdr->res.count,
1565 hdr->res.verf->committed);
1566 }
1567
1568 static int ff_layout_write_prepare_common(struct rpc_task *task,
1569 struct nfs_pgio_header *hdr)
1570 {
1571 if (unlikely(test_bit(NFS_CONTEXT_BAD, &hdr->args.context->flags))) {
1572 rpc_exit(task, -EIO);
1573 return -EIO;
1574 }
1575
1576 if (ff_layout_device_unavailable(hdr->lseg, hdr->pgio_mirror_idx)) {
1577 rpc_exit(task, -EHOSTDOWN);
1578 return -EAGAIN;
1579 }
1580
1581 ff_layout_write_record_layoutstats_start(task, hdr);
1582 return 0;
1583 }
1584
1585 static void ff_layout_write_prepare_v3(struct rpc_task *task, void *data)
1586 {
1587 struct nfs_pgio_header *hdr = data;
1588
1589 if (ff_layout_write_prepare_common(task, hdr))
1590 return;
1591
1592 rpc_call_start(task);
1593 }
1594
1595 static void ff_layout_write_prepare_v4(struct rpc_task *task, void *data)
1596 {
1597 struct nfs_pgio_header *hdr = data;
1598
1599 if (ff_layout_setup_sequence(hdr->ds_clp,
1600 &hdr->args.seq_args,
1601 &hdr->res.seq_res,
1602 task))
1603 return;
1604
1605 if (ff_layout_write_prepare_common(task, hdr))
1606 return;
1607
1608 if (nfs4_set_rw_stateid(&hdr->args.stateid, hdr->args.context,
1609 hdr->args.lock_context, FMODE_WRITE) == -EIO)
1610 rpc_exit(task, -EIO); /* lost lock, terminate I/O */
1611 }
1612
1613 static void ff_layout_write_call_done(struct rpc_task *task, void *data)
1614 {
1615 struct nfs_pgio_header *hdr = data;
1616
1617 if (test_bit(NFS_IOHDR_REDO, &hdr->flags) &&
1618 task->tk_status == 0) {
1619 nfs4_sequence_done(task, &hdr->res.seq_res);
1620 return;
1621 }
1622
1623 /* Note this may cause RPC to be resent */
1624 hdr->mds_ops->rpc_call_done(task, hdr);
1625 }
1626
1627 static void ff_layout_write_count_stats(struct rpc_task *task, void *data)
1628 {
1629 struct nfs_pgio_header *hdr = data;
1630
1631 ff_layout_write_record_layoutstats_done(task, hdr);
1632 rpc_count_iostats_metrics(task,
1633 &NFS_CLIENT(hdr->inode)->cl_metrics[NFSPROC4_CLNT_WRITE]);
1634 }
1635
1636 static void ff_layout_write_release(void *data)
1637 {
1638 struct nfs_pgio_header *hdr = data;
1639
1640 ff_layout_write_record_layoutstats_done(&hdr->task, hdr);
1641 pnfs_generic_rw_release(data);
1642 }
1643
1644 static void ff_layout_commit_record_layoutstats_start(struct rpc_task *task,
1645 struct nfs_commit_data *cdata)
1646 {
1647 if (test_and_set_bit(NFS_IOHDR_STAT, &cdata->flags))
1648 return;
1649 nfs4_ff_layout_stat_io_start_write(cdata->inode,
1650 FF_LAYOUT_COMP(cdata->lseg, cdata->ds_commit_index),
1651 0, task->tk_start);
1652 }
1653
1654 static void ff_layout_commit_record_layoutstats_done(struct rpc_task *task,
1655 struct nfs_commit_data *cdata)
1656 {
1657 struct nfs_page *req;
1658 __u64 count = 0;
1659
1660 if (!test_and_clear_bit(NFS_IOHDR_STAT, &cdata->flags))
1661 return;
1662
1663 if (task->tk_status == 0) {
1664 list_for_each_entry(req, &cdata->pages, wb_list)
1665 count += req->wb_bytes;
1666 }
1667 nfs4_ff_layout_stat_io_end_write(task,
1668 FF_LAYOUT_COMP(cdata->lseg, cdata->ds_commit_index),
1669 count, count, NFS_FILE_SYNC);
1670 }
1671
1672 static void ff_layout_commit_prepare_common(struct rpc_task *task,
1673 struct nfs_commit_data *cdata)
1674 {
1675 ff_layout_commit_record_layoutstats_start(task, cdata);
1676 }
1677
1678 static void ff_layout_commit_prepare_v3(struct rpc_task *task, void *data)
1679 {
1680 ff_layout_commit_prepare_common(task, data);
1681 rpc_call_start(task);
1682 }
1683
1684 static void ff_layout_commit_prepare_v4(struct rpc_task *task, void *data)
1685 {
1686 struct nfs_commit_data *wdata = data;
1687
1688 if (ff_layout_setup_sequence(wdata->ds_clp,
1689 &wdata->args.seq_args,
1690 &wdata->res.seq_res,
1691 task))
1692 return;
1693 ff_layout_commit_prepare_common(task, data);
1694 }
1695
1696 static void ff_layout_commit_done(struct rpc_task *task, void *data)
1697 {
1698 pnfs_generic_write_commit_done(task, data);
1699 }
1700
1701 static void ff_layout_commit_count_stats(struct rpc_task *task, void *data)
1702 {
1703 struct nfs_commit_data *cdata = data;
1704
1705 ff_layout_commit_record_layoutstats_done(task, cdata);
1706 rpc_count_iostats_metrics(task,
1707 &NFS_CLIENT(cdata->inode)->cl_metrics[NFSPROC4_CLNT_COMMIT]);
1708 }
1709
1710 static void ff_layout_commit_release(void *data)
1711 {
1712 struct nfs_commit_data *cdata = data;
1713
1714 ff_layout_commit_record_layoutstats_done(&cdata->task, cdata);
1715 pnfs_generic_commit_release(data);
1716 }
1717
1718 static const struct rpc_call_ops ff_layout_read_call_ops_v3 = {
1719 .rpc_call_prepare = ff_layout_read_prepare_v3,
1720 .rpc_call_done = ff_layout_read_call_done,
1721 .rpc_count_stats = ff_layout_read_count_stats,
1722 .rpc_release = ff_layout_read_release,
1723 };
1724
1725 static const struct rpc_call_ops ff_layout_read_call_ops_v4 = {
1726 .rpc_call_prepare = ff_layout_read_prepare_v4,
1727 .rpc_call_done = ff_layout_read_call_done,
1728 .rpc_count_stats = ff_layout_read_count_stats,
1729 .rpc_release = ff_layout_read_release,
1730 };
1731
1732 static const struct rpc_call_ops ff_layout_write_call_ops_v3 = {
1733 .rpc_call_prepare = ff_layout_write_prepare_v3,
1734 .rpc_call_done = ff_layout_write_call_done,
1735 .rpc_count_stats = ff_layout_write_count_stats,
1736 .rpc_release = ff_layout_write_release,
1737 };
1738
1739 static const struct rpc_call_ops ff_layout_write_call_ops_v4 = {
1740 .rpc_call_prepare = ff_layout_write_prepare_v4,
1741 .rpc_call_done = ff_layout_write_call_done,
1742 .rpc_count_stats = ff_layout_write_count_stats,
1743 .rpc_release = ff_layout_write_release,
1744 };
1745
1746 static const struct rpc_call_ops ff_layout_commit_call_ops_v3 = {
1747 .rpc_call_prepare = ff_layout_commit_prepare_v3,
1748 .rpc_call_done = ff_layout_commit_done,
1749 .rpc_count_stats = ff_layout_commit_count_stats,
1750 .rpc_release = ff_layout_commit_release,
1751 };
1752
1753 static const struct rpc_call_ops ff_layout_commit_call_ops_v4 = {
1754 .rpc_call_prepare = ff_layout_commit_prepare_v4,
1755 .rpc_call_done = ff_layout_commit_done,
1756 .rpc_count_stats = ff_layout_commit_count_stats,
1757 .rpc_release = ff_layout_commit_release,
1758 };
1759
1760 static enum pnfs_try_status
1761 ff_layout_read_pagelist(struct nfs_pgio_header *hdr)
1762 {
1763 struct pnfs_layout_segment *lseg = hdr->lseg;
1764 struct nfs4_pnfs_ds *ds;
1765 struct rpc_clnt *ds_clnt;
1766 struct rpc_cred *ds_cred;
1767 loff_t offset = hdr->args.offset;
1768 u32 idx = hdr->pgio_mirror_idx;
1769 int vers;
1770 struct nfs_fh *fh;
1771
1772 dprintk("--> %s ino %lu pgbase %u req %Zu@%llu\n",
1773 __func__, hdr->inode->i_ino,
1774 hdr->args.pgbase, (size_t)hdr->args.count, offset);
1775
1776 ds = nfs4_ff_layout_prepare_ds(lseg, idx, false);
1777 if (!ds)
1778 goto out_failed;
1779
1780 ds_clnt = nfs4_ff_find_or_create_ds_client(lseg, idx, ds->ds_clp,
1781 hdr->inode);
1782 if (IS_ERR(ds_clnt))
1783 goto out_failed;
1784
1785 ds_cred = ff_layout_get_ds_cred(lseg, idx, hdr->cred);
1786 if (!ds_cred)
1787 goto out_failed;
1788
1789 vers = nfs4_ff_layout_ds_version(lseg, idx);
1790
1791 dprintk("%s USE DS: %s cl_count %d vers %d\n", __func__,
1792 ds->ds_remotestr, atomic_read(&ds->ds_clp->cl_count), vers);
1793
1794 hdr->pgio_done_cb = ff_layout_read_done_cb;
1795 atomic_inc(&ds->ds_clp->cl_count);
1796 hdr->ds_clp = ds->ds_clp;
1797 fh = nfs4_ff_layout_select_ds_fh(lseg, idx);
1798 if (fh)
1799 hdr->args.fh = fh;
1800 /*
1801 * Note that if we ever decide to split across DSes,
1802 * then we may need to handle dense-like offsets.
1803 */
1804 hdr->args.offset = offset;
1805 hdr->mds_offset = offset;
1806
1807 /* Perform an asynchronous read to ds */
1808 nfs_initiate_pgio(ds_clnt, hdr, ds_cred, ds->ds_clp->rpc_ops,
1809 vers == 3 ? &ff_layout_read_call_ops_v3 :
1810 &ff_layout_read_call_ops_v4,
1811 0, RPC_TASK_SOFTCONN);
1812 put_rpccred(ds_cred);
1813 return PNFS_ATTEMPTED;
1814
1815 out_failed:
1816 if (ff_layout_avoid_mds_available_ds(lseg))
1817 return PNFS_TRY_AGAIN;
1818 return PNFS_NOT_ATTEMPTED;
1819 }
1820
1821 /* Perform async writes. */
1822 static enum pnfs_try_status
1823 ff_layout_write_pagelist(struct nfs_pgio_header *hdr, int sync)
1824 {
1825 struct pnfs_layout_segment *lseg = hdr->lseg;
1826 struct nfs4_pnfs_ds *ds;
1827 struct rpc_clnt *ds_clnt;
1828 struct rpc_cred *ds_cred;
1829 loff_t offset = hdr->args.offset;
1830 int vers;
1831 struct nfs_fh *fh;
1832 int idx = hdr->pgio_mirror_idx;
1833
1834 ds = nfs4_ff_layout_prepare_ds(lseg, idx, true);
1835 if (!ds)
1836 return PNFS_NOT_ATTEMPTED;
1837
1838 ds_clnt = nfs4_ff_find_or_create_ds_client(lseg, idx, ds->ds_clp,
1839 hdr->inode);
1840 if (IS_ERR(ds_clnt))
1841 return PNFS_NOT_ATTEMPTED;
1842
1843 ds_cred = ff_layout_get_ds_cred(lseg, idx, hdr->cred);
1844 if (!ds_cred)
1845 return PNFS_NOT_ATTEMPTED;
1846
1847 vers = nfs4_ff_layout_ds_version(lseg, idx);
1848
1849 dprintk("%s ino %lu sync %d req %Zu@%llu DS: %s cl_count %d vers %d\n",
1850 __func__, hdr->inode->i_ino, sync, (size_t) hdr->args.count,
1851 offset, ds->ds_remotestr, atomic_read(&ds->ds_clp->cl_count),
1852 vers);
1853
1854 hdr->pgio_done_cb = ff_layout_write_done_cb;
1855 atomic_inc(&ds->ds_clp->cl_count);
1856 hdr->ds_clp = ds->ds_clp;
1857 hdr->ds_commit_idx = idx;
1858 fh = nfs4_ff_layout_select_ds_fh(lseg, idx);
1859 if (fh)
1860 hdr->args.fh = fh;
1861
1862 /*
1863 * Note that if we ever decide to split across DSes,
1864 * then we may need to handle dense-like offsets.
1865 */
1866 hdr->args.offset = offset;
1867
1868 /* Perform an asynchronous write */
1869 nfs_initiate_pgio(ds_clnt, hdr, ds_cred, ds->ds_clp->rpc_ops,
1870 vers == 3 ? &ff_layout_write_call_ops_v3 :
1871 &ff_layout_write_call_ops_v4,
1872 sync, RPC_TASK_SOFTCONN);
1873 put_rpccred(ds_cred);
1874 return PNFS_ATTEMPTED;
1875 }
1876
1877 static u32 calc_ds_index_from_commit(struct pnfs_layout_segment *lseg, u32 i)
1878 {
1879 return i;
1880 }
1881
1882 static struct nfs_fh *
1883 select_ds_fh_from_commit(struct pnfs_layout_segment *lseg, u32 i)
1884 {
1885 struct nfs4_ff_layout_segment *flseg = FF_LAYOUT_LSEG(lseg);
1886
1887 /* FIXME: Assume that there is only one NFS version available
1888 * for the DS.
1889 */
1890 return &flseg->mirror_array[i]->fh_versions[0];
1891 }
1892
1893 static int ff_layout_initiate_commit(struct nfs_commit_data *data, int how)
1894 {
1895 struct pnfs_layout_segment *lseg = data->lseg;
1896 struct nfs4_pnfs_ds *ds;
1897 struct rpc_clnt *ds_clnt;
1898 struct rpc_cred *ds_cred;
1899 u32 idx;
1900 int vers, ret;
1901 struct nfs_fh *fh;
1902
1903 idx = calc_ds_index_from_commit(lseg, data->ds_commit_index);
1904 ds = nfs4_ff_layout_prepare_ds(lseg, idx, true);
1905 if (!ds)
1906 goto out_err;
1907
1908 ds_clnt = nfs4_ff_find_or_create_ds_client(lseg, idx, ds->ds_clp,
1909 data->inode);
1910 if (IS_ERR(ds_clnt))
1911 goto out_err;
1912
1913 ds_cred = ff_layout_get_ds_cred(lseg, idx, data->cred);
1914 if (!ds_cred)
1915 goto out_err;
1916
1917 vers = nfs4_ff_layout_ds_version(lseg, idx);
1918
1919 dprintk("%s ino %lu, how %d cl_count %d vers %d\n", __func__,
1920 data->inode->i_ino, how, atomic_read(&ds->ds_clp->cl_count),
1921 vers);
1922 data->commit_done_cb = ff_layout_commit_done_cb;
1923 data->cred = ds_cred;
1924 atomic_inc(&ds->ds_clp->cl_count);
1925 data->ds_clp = ds->ds_clp;
1926 fh = select_ds_fh_from_commit(lseg, data->ds_commit_index);
1927 if (fh)
1928 data->args.fh = fh;
1929
1930 ret = nfs_initiate_commit(ds_clnt, data, ds->ds_clp->rpc_ops,
1931 vers == 3 ? &ff_layout_commit_call_ops_v3 :
1932 &ff_layout_commit_call_ops_v4,
1933 how, RPC_TASK_SOFTCONN);
1934 put_rpccred(ds_cred);
1935 return ret;
1936 out_err:
1937 pnfs_generic_prepare_to_resend_writes(data);
1938 pnfs_generic_commit_release(data);
1939 return -EAGAIN;
1940 }
1941
1942 static int
1943 ff_layout_commit_pagelist(struct inode *inode, struct list_head *mds_pages,
1944 int how, struct nfs_commit_info *cinfo)
1945 {
1946 return pnfs_generic_commit_pagelist(inode, mds_pages, how, cinfo,
1947 ff_layout_initiate_commit);
1948 }
1949
1950 static struct pnfs_ds_commit_info *
1951 ff_layout_get_ds_info(struct inode *inode)
1952 {
1953 struct pnfs_layout_hdr *layout = NFS_I(inode)->layout;
1954
1955 if (layout == NULL)
1956 return NULL;
1957
1958 return &FF_LAYOUT_FROM_HDR(layout)->commit_info;
1959 }
1960
1961 static void
1962 ff_layout_free_deviceid_node(struct nfs4_deviceid_node *d)
1963 {
1964 nfs4_ff_layout_free_deviceid(container_of(d, struct nfs4_ff_layout_ds,
1965 id_node));
1966 }
1967
1968 static int ff_layout_encode_ioerr(struct nfs4_flexfile_layout *flo,
1969 struct xdr_stream *xdr,
1970 const struct nfs4_layoutreturn_args *args)
1971 {
1972 struct pnfs_layout_hdr *hdr = &flo->generic_hdr;
1973 __be32 *start;
1974 int count = 0, ret = 0;
1975
1976 start = xdr_reserve_space(xdr, 4);
1977 if (unlikely(!start))
1978 return -E2BIG;
1979
1980 /* This assume we always return _ALL_ layouts */
1981 spin_lock(&hdr->plh_inode->i_lock);
1982 ret = ff_layout_encode_ds_ioerr(flo, xdr, &count, &args->range);
1983 spin_unlock(&hdr->plh_inode->i_lock);
1984
1985 *start = cpu_to_be32(count);
1986
1987 return ret;
1988 }
1989
1990 /* report nothing for now */
1991 static void ff_layout_encode_iostats(struct nfs4_flexfile_layout *flo,
1992 struct xdr_stream *xdr,
1993 const struct nfs4_layoutreturn_args *args)
1994 {
1995 __be32 *p;
1996
1997 p = xdr_reserve_space(xdr, 4);
1998 if (likely(p))
1999 *p = cpu_to_be32(0);
2000 }
2001
2002 static struct nfs4_deviceid_node *
2003 ff_layout_alloc_deviceid_node(struct nfs_server *server,
2004 struct pnfs_device *pdev, gfp_t gfp_flags)
2005 {
2006 struct nfs4_ff_layout_ds *dsaddr;
2007
2008 dsaddr = nfs4_ff_alloc_deviceid_node(server, pdev, gfp_flags);
2009 if (!dsaddr)
2010 return NULL;
2011 return &dsaddr->id_node;
2012 }
2013
2014 static void
2015 ff_layout_encode_layoutreturn(struct xdr_stream *xdr,
2016 const struct nfs4_layoutreturn_args *args)
2017 {
2018 struct pnfs_layout_hdr *lo = args->layout;
2019 struct nfs4_flexfile_layout *flo = FF_LAYOUT_FROM_HDR(lo);
2020 __be32 *start;
2021
2022 dprintk("%s: Begin\n", __func__);
2023 start = xdr_reserve_space(xdr, 4);
2024 BUG_ON(!start);
2025
2026 ff_layout_encode_ioerr(flo, xdr, args);
2027 ff_layout_encode_iostats(flo, xdr, args);
2028
2029 *start = cpu_to_be32((xdr->p - start - 1) * 4);
2030 dprintk("%s: Return\n", __func__);
2031 }
2032
2033 static int
2034 ff_layout_ntop4(const struct sockaddr *sap, char *buf, const size_t buflen)
2035 {
2036 const struct sockaddr_in *sin = (struct sockaddr_in *)sap;
2037
2038 return snprintf(buf, buflen, "%pI4", &sin->sin_addr);
2039 }
2040
2041 static size_t
2042 ff_layout_ntop6_noscopeid(const struct sockaddr *sap, char *buf,
2043 const int buflen)
2044 {
2045 const struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)sap;
2046 const struct in6_addr *addr = &sin6->sin6_addr;
2047
2048 /*
2049 * RFC 4291, Section 2.2.2
2050 *
2051 * Shorthanded ANY address
2052 */
2053 if (ipv6_addr_any(addr))
2054 return snprintf(buf, buflen, "::");
2055
2056 /*
2057 * RFC 4291, Section 2.2.2
2058 *
2059 * Shorthanded loopback address
2060 */
2061 if (ipv6_addr_loopback(addr))
2062 return snprintf(buf, buflen, "::1");
2063
2064 /*
2065 * RFC 4291, Section 2.2.3
2066 *
2067 * Special presentation address format for mapped v4
2068 * addresses.
2069 */
2070 if (ipv6_addr_v4mapped(addr))
2071 return snprintf(buf, buflen, "::ffff:%pI4",
2072 &addr->s6_addr32[3]);
2073
2074 /*
2075 * RFC 4291, Section 2.2.1
2076 */
2077 return snprintf(buf, buflen, "%pI6c", addr);
2078 }
2079
2080 /* Derived from rpc_sockaddr2uaddr */
2081 static void
2082 ff_layout_encode_netaddr(struct xdr_stream *xdr, struct nfs4_pnfs_ds_addr *da)
2083 {
2084 struct sockaddr *sap = (struct sockaddr *)&da->da_addr;
2085 char portbuf[RPCBIND_MAXUADDRPLEN];
2086 char addrbuf[RPCBIND_MAXUADDRLEN];
2087 char *netid;
2088 unsigned short port;
2089 int len, netid_len;
2090 __be32 *p;
2091
2092 switch (sap->sa_family) {
2093 case AF_INET:
2094 if (ff_layout_ntop4(sap, addrbuf, sizeof(addrbuf)) == 0)
2095 return;
2096 port = ntohs(((struct sockaddr_in *)sap)->sin_port);
2097 netid = "tcp";
2098 netid_len = 3;
2099 break;
2100 case AF_INET6:
2101 if (ff_layout_ntop6_noscopeid(sap, addrbuf, sizeof(addrbuf)) == 0)
2102 return;
2103 port = ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
2104 netid = "tcp6";
2105 netid_len = 4;
2106 break;
2107 default:
2108 /* we only support tcp and tcp6 */
2109 WARN_ON_ONCE(1);
2110 return;
2111 }
2112
2113 snprintf(portbuf, sizeof(portbuf), ".%u.%u", port >> 8, port & 0xff);
2114 len = strlcat(addrbuf, portbuf, sizeof(addrbuf));
2115
2116 p = xdr_reserve_space(xdr, 4 + netid_len);
2117 xdr_encode_opaque(p, netid, netid_len);
2118
2119 p = xdr_reserve_space(xdr, 4 + len);
2120 xdr_encode_opaque(p, addrbuf, len);
2121 }
2122
2123 static void
2124 ff_layout_encode_nfstime(struct xdr_stream *xdr,
2125 ktime_t t)
2126 {
2127 struct timespec64 ts;
2128 __be32 *p;
2129
2130 p = xdr_reserve_space(xdr, 12);
2131 ts = ktime_to_timespec64(t);
2132 p = xdr_encode_hyper(p, ts.tv_sec);
2133 *p++ = cpu_to_be32(ts.tv_nsec);
2134 }
2135
2136 static void
2137 ff_layout_encode_io_latency(struct xdr_stream *xdr,
2138 struct nfs4_ff_io_stat *stat)
2139 {
2140 __be32 *p;
2141
2142 p = xdr_reserve_space(xdr, 5 * 8);
2143 p = xdr_encode_hyper(p, stat->ops_requested);
2144 p = xdr_encode_hyper(p, stat->bytes_requested);
2145 p = xdr_encode_hyper(p, stat->ops_completed);
2146 p = xdr_encode_hyper(p, stat->bytes_completed);
2147 p = xdr_encode_hyper(p, stat->bytes_not_delivered);
2148 ff_layout_encode_nfstime(xdr, stat->total_busy_time);
2149 ff_layout_encode_nfstime(xdr, stat->aggregate_completion_time);
2150 }
2151
2152 static void
2153 ff_layout_encode_layoutstats(struct xdr_stream *xdr,
2154 struct nfs42_layoutstat_args *args,
2155 struct nfs42_layoutstat_devinfo *devinfo)
2156 {
2157 struct nfs4_ff_layout_mirror *mirror = devinfo->layout_private;
2158 struct nfs4_pnfs_ds_addr *da;
2159 struct nfs4_pnfs_ds *ds = mirror->mirror_ds->ds;
2160 struct nfs_fh *fh = &mirror->fh_versions[0];
2161 __be32 *p, *start;
2162
2163 da = list_first_entry(&ds->ds_addrs, struct nfs4_pnfs_ds_addr, da_node);
2164 dprintk("%s: DS %s: encoding address %s\n",
2165 __func__, ds->ds_remotestr, da->da_remotestr);
2166 /* layoutupdate length */
2167 start = xdr_reserve_space(xdr, 4);
2168 /* netaddr4 */
2169 ff_layout_encode_netaddr(xdr, da);
2170 /* nfs_fh4 */
2171 p = xdr_reserve_space(xdr, 4 + fh->size);
2172 xdr_encode_opaque(p, fh->data, fh->size);
2173 /* ff_io_latency4 read */
2174 spin_lock(&mirror->lock);
2175 ff_layout_encode_io_latency(xdr, &mirror->read_stat.io_stat);
2176 /* ff_io_latency4 write */
2177 ff_layout_encode_io_latency(xdr, &mirror->write_stat.io_stat);
2178 spin_unlock(&mirror->lock);
2179 /* nfstime4 */
2180 ff_layout_encode_nfstime(xdr, ktime_sub(ktime_get(), mirror->start_time));
2181 /* bool */
2182 p = xdr_reserve_space(xdr, 4);
2183 *p = cpu_to_be32(false);
2184
2185 *start = cpu_to_be32((xdr->p - start - 1) * 4);
2186 }
2187
2188 static int
2189 ff_layout_mirror_prepare_stats(struct nfs42_layoutstat_args *args,
2190 struct pnfs_layout_hdr *lo,
2191 int dev_limit)
2192 {
2193 struct nfs4_flexfile_layout *ff_layout = FF_LAYOUT_FROM_HDR(lo);
2194 struct nfs4_ff_layout_mirror *mirror;
2195 struct nfs4_deviceid_node *dev;
2196 struct nfs42_layoutstat_devinfo *devinfo;
2197 int i = 0;
2198
2199 list_for_each_entry(mirror, &ff_layout->mirrors, mirrors) {
2200 if (i >= dev_limit)
2201 break;
2202 if (!mirror->mirror_ds)
2203 continue;
2204 /* mirror refcount put in cleanup_layoutstats */
2205 if (!atomic_inc_not_zero(&mirror->ref))
2206 continue;
2207 dev = &mirror->mirror_ds->id_node;
2208 devinfo = &args->devinfo[i];
2209 memcpy(&devinfo->dev_id, &dev->deviceid, NFS4_DEVICEID4_SIZE);
2210 devinfo->offset = 0;
2211 devinfo->length = NFS4_MAX_UINT64;
2212 devinfo->read_count = mirror->read_stat.io_stat.ops_completed;
2213 devinfo->read_bytes = mirror->read_stat.io_stat.bytes_completed;
2214 devinfo->write_count = mirror->write_stat.io_stat.ops_completed;
2215 devinfo->write_bytes = mirror->write_stat.io_stat.bytes_completed;
2216 devinfo->layout_type = LAYOUT_FLEX_FILES;
2217 devinfo->layoutstats_encode = ff_layout_encode_layoutstats;
2218 devinfo->layout_private = mirror;
2219
2220 i++;
2221 }
2222 return i;
2223 }
2224
2225 static int
2226 ff_layout_prepare_layoutstats(struct nfs42_layoutstat_args *args)
2227 {
2228 struct nfs4_flexfile_layout *ff_layout;
2229 struct nfs4_ff_layout_mirror *mirror;
2230 int dev_count = 0;
2231
2232 spin_lock(&args->inode->i_lock);
2233 ff_layout = FF_LAYOUT_FROM_HDR(NFS_I(args->inode)->layout);
2234 list_for_each_entry(mirror, &ff_layout->mirrors, mirrors) {
2235 if (atomic_read(&mirror->ref) != 0)
2236 dev_count ++;
2237 }
2238 spin_unlock(&args->inode->i_lock);
2239 /* For now, send at most PNFS_LAYOUTSTATS_MAXDEV statistics */
2240 if (dev_count > PNFS_LAYOUTSTATS_MAXDEV) {
2241 dprintk("%s: truncating devinfo to limit (%d:%d)\n",
2242 __func__, dev_count, PNFS_LAYOUTSTATS_MAXDEV);
2243 dev_count = PNFS_LAYOUTSTATS_MAXDEV;
2244 }
2245 args->devinfo = kmalloc_array(dev_count, sizeof(*args->devinfo), GFP_NOIO);
2246 if (!args->devinfo)
2247 return -ENOMEM;
2248
2249 spin_lock(&args->inode->i_lock);
2250 args->num_dev = ff_layout_mirror_prepare_stats(args,
2251 &ff_layout->generic_hdr, dev_count);
2252 spin_unlock(&args->inode->i_lock);
2253
2254 return 0;
2255 }
2256
2257 static void
2258 ff_layout_cleanup_layoutstats(struct nfs42_layoutstat_data *data)
2259 {
2260 struct nfs4_ff_layout_mirror *mirror;
2261 int i;
2262
2263 for (i = 0; i < data->args.num_dev; i++) {
2264 mirror = data->args.devinfo[i].layout_private;
2265 data->args.devinfo[i].layout_private = NULL;
2266 ff_layout_put_mirror(mirror);
2267 }
2268 }
2269
2270 static struct pnfs_layoutdriver_type flexfilelayout_type = {
2271 .id = LAYOUT_FLEX_FILES,
2272 .name = "LAYOUT_FLEX_FILES",
2273 .owner = THIS_MODULE,
2274 .alloc_layout_hdr = ff_layout_alloc_layout_hdr,
2275 .free_layout_hdr = ff_layout_free_layout_hdr,
2276 .alloc_lseg = ff_layout_alloc_lseg,
2277 .free_lseg = ff_layout_free_lseg,
2278 .add_lseg = ff_layout_add_lseg,
2279 .pg_read_ops = &ff_layout_pg_read_ops,
2280 .pg_write_ops = &ff_layout_pg_write_ops,
2281 .get_ds_info = ff_layout_get_ds_info,
2282 .free_deviceid_node = ff_layout_free_deviceid_node,
2283 .mark_request_commit = pnfs_layout_mark_request_commit,
2284 .clear_request_commit = pnfs_generic_clear_request_commit,
2285 .scan_commit_lists = pnfs_generic_scan_commit_lists,
2286 .recover_commit_reqs = pnfs_generic_recover_commit_reqs,
2287 .commit_pagelist = ff_layout_commit_pagelist,
2288 .read_pagelist = ff_layout_read_pagelist,
2289 .write_pagelist = ff_layout_write_pagelist,
2290 .alloc_deviceid_node = ff_layout_alloc_deviceid_node,
2291 .encode_layoutreturn = ff_layout_encode_layoutreturn,
2292 .sync = pnfs_nfs_generic_sync,
2293 .prepare_layoutstats = ff_layout_prepare_layoutstats,
2294 .cleanup_layoutstats = ff_layout_cleanup_layoutstats,
2295 };
2296
2297 static int __init nfs4flexfilelayout_init(void)
2298 {
2299 printk(KERN_INFO "%s: NFSv4 Flexfile Layout Driver Registering...\n",
2300 __func__);
2301 if (!ff_zero_group) {
2302 ff_zero_group = groups_alloc(0);
2303 if (!ff_zero_group)
2304 return -ENOMEM;
2305 }
2306 return pnfs_register_layoutdriver(&flexfilelayout_type);
2307 }
2308
2309 static void __exit nfs4flexfilelayout_exit(void)
2310 {
2311 printk(KERN_INFO "%s: NFSv4 Flexfile Layout Driver Unregistering...\n",
2312 __func__);
2313 pnfs_unregister_layoutdriver(&flexfilelayout_type);
2314 if (ff_zero_group) {
2315 put_group_info(ff_zero_group);
2316 ff_zero_group = NULL;
2317 }
2318 }
2319
2320 MODULE_ALIAS("nfs-layouttype4-4");
2321
2322 MODULE_LICENSE("GPL");
2323 MODULE_DESCRIPTION("The NFSv4 flexfile layout driver");
2324
2325 module_init(nfs4flexfilelayout_init);
2326 module_exit(nfs4flexfilelayout_exit);