]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - fs/nfs/flexfilelayout/flexfilelayout.c
SUNRPC: Prevent SYN+SYNACK+RST storms
[mirror_ubuntu-jammy-kernel.git] / fs / nfs / flexfilelayout / flexfilelayout.c
CommitLineData
d67ae825
TH
1/*
2 * Module for pnfs flexfile layout driver.
3 *
4 * Copyright (c) 2014, Primary Data, Inc. All rights reserved.
5 *
6 * Tao Peng <bergwolf@primarydata.com>
7 */
8
9#include <linux/nfs_fs.h>
10#include <linux/nfs_page.h>
11#include <linux/module.h>
12
13#include <linux/sunrpc/metrics.h>
d67ae825
TH
14
15#include "flexfilelayout.h"
16#include "../nfs4session.h"
40c64c26 17#include "../nfs4idmap.h"
d67ae825
TH
18#include "../internal.h"
19#include "../delegation.h"
20#include "../nfs4trace.h"
21#include "../iostat.h"
22#include "../nfs.h"
ad4dc53e 23#include "../nfs42.h"
d67ae825
TH
24
25#define NFSDBG_FACILITY NFSDBG_PNFS_LD
26
27#define FF_LAYOUT_POLL_RETRY_MAX (15*HZ)
28
29static struct pnfs_layout_hdr *
30ff_layout_alloc_layout_hdr(struct inode *inode, gfp_t gfp_flags)
31{
32 struct nfs4_flexfile_layout *ffl;
33
34 ffl = kzalloc(sizeof(*ffl), gfp_flags);
35 if (ffl) {
36 INIT_LIST_HEAD(&ffl->error_list);
266d12d4 37 INIT_LIST_HEAD(&ffl->mirrors);
d67ae825
TH
38 return &ffl->generic_hdr;
39 } else
40 return NULL;
41}
42
43static void
44ff_layout_free_layout_hdr(struct pnfs_layout_hdr *lo)
45{
46 struct nfs4_ff_layout_ds_err *err, *n;
47
48 list_for_each_entry_safe(err, n, &FF_LAYOUT_FROM_HDR(lo)->error_list,
49 list) {
50 list_del(&err->list);
51 kfree(err);
52 }
53 kfree(FF_LAYOUT_FROM_HDR(lo));
54}
55
56static int decode_stateid(struct xdr_stream *xdr, nfs4_stateid *stateid)
57{
58 __be32 *p;
59
60 p = xdr_inline_decode(xdr, NFS4_STATEID_SIZE);
61 if (unlikely(p == NULL))
62 return -ENOBUFS;
63 memcpy(stateid, p, NFS4_STATEID_SIZE);
64 dprintk("%s: stateid id= [%x%x%x%x]\n", __func__,
65 p[0], p[1], p[2], p[3]);
66 return 0;
67}
68
69static int decode_deviceid(struct xdr_stream *xdr, struct nfs4_deviceid *devid)
70{
71 __be32 *p;
72
73 p = xdr_inline_decode(xdr, NFS4_DEVICEID4_SIZE);
74 if (unlikely(!p))
75 return -ENOBUFS;
76 memcpy(devid, p, NFS4_DEVICEID4_SIZE);
77 nfs4_print_deviceid(devid);
78 return 0;
79}
80
81static int decode_nfs_fh(struct xdr_stream *xdr, struct nfs_fh *fh)
82{
83 __be32 *p;
84
85 p = xdr_inline_decode(xdr, 4);
86 if (unlikely(!p))
87 return -ENOBUFS;
88 fh->size = be32_to_cpup(p++);
89 if (fh->size > sizeof(struct nfs_fh)) {
90 printk(KERN_ERR "NFS flexfiles: Too big fh received %d\n",
91 fh->size);
92 return -EOVERFLOW;
93 }
94 /* fh.data */
95 p = xdr_inline_decode(xdr, fh->size);
96 if (unlikely(!p))
97 return -ENOBUFS;
98 memcpy(&fh->data, p, fh->size);
99 dprintk("%s: fh len %d\n", __func__, fh->size);
100
101 return 0;
102}
103
104/*
105 * Currently only stringified uids and gids are accepted.
106 * I.e., kerberos is not supported to the DSes, so no pricipals.
107 *
108 * That means that one common function will suffice, but when
109 * principals are added, this should be split to accomodate
110 * calls to both nfs_map_name_to_uid() and nfs_map_group_to_gid().
111 */
112static int
113decode_name(struct xdr_stream *xdr, u32 *id)
114{
115 __be32 *p;
116 int len;
117
118 /* opaque_length(4)*/
119 p = xdr_inline_decode(xdr, 4);
120 if (unlikely(!p))
121 return -ENOBUFS;
122 len = be32_to_cpup(p++);
123 if (len < 0)
124 return -EINVAL;
125
126 dprintk("%s: len %u\n", __func__, len);
127
128 /* opaque body */
129 p = xdr_inline_decode(xdr, len);
130 if (unlikely(!p))
131 return -ENOBUFS;
132
133 if (!nfs_map_string_to_numeric((char *)p, len, id))
134 return -EINVAL;
135
136 return 0;
137}
138
266d12d4
TM
139static bool ff_mirror_match_fh(const struct nfs4_ff_layout_mirror *m1,
140 const struct nfs4_ff_layout_mirror *m2)
141{
142 int i, j;
143
144 if (m1->fh_versions_cnt != m2->fh_versions_cnt)
145 return false;
146 for (i = 0; i < m1->fh_versions_cnt; i++) {
147 bool found_fh = false;
148 for (j = 0; j < m2->fh_versions_cnt; i++) {
149 if (nfs_compare_fh(&m1->fh_versions[i],
150 &m2->fh_versions[j]) == 0) {
151 found_fh = true;
152 break;
153 }
154 }
155 if (!found_fh)
156 return false;
157 }
158 return true;
159}
160
161static struct nfs4_ff_layout_mirror *
162ff_layout_add_mirror(struct pnfs_layout_hdr *lo,
163 struct nfs4_ff_layout_mirror *mirror)
164{
165 struct nfs4_flexfile_layout *ff_layout = FF_LAYOUT_FROM_HDR(lo);
166 struct nfs4_ff_layout_mirror *pos;
167 struct inode *inode = lo->plh_inode;
168
169 spin_lock(&inode->i_lock);
170 list_for_each_entry(pos, &ff_layout->mirrors, mirrors) {
171 if (mirror->mirror_ds != pos->mirror_ds)
172 continue;
173 if (!ff_mirror_match_fh(mirror, pos))
174 continue;
175 if (atomic_inc_not_zero(&pos->ref)) {
176 spin_unlock(&inode->i_lock);
177 return pos;
178 }
179 }
180 list_add(&mirror->mirrors, &ff_layout->mirrors);
181 mirror->layout = lo;
182 spin_unlock(&inode->i_lock);
183 return mirror;
184}
185
e3b1df2d 186static void
266d12d4
TM
187ff_layout_remove_mirror(struct nfs4_ff_layout_mirror *mirror)
188{
189 struct inode *inode;
190 if (mirror->layout == NULL)
191 return;
192 inode = mirror->layout->plh_inode;
193 spin_lock(&inode->i_lock);
194 list_del(&mirror->mirrors);
195 spin_unlock(&inode->i_lock);
196 mirror->layout = NULL;
197}
198
28a0d72c
TM
199static struct nfs4_ff_layout_mirror *ff_layout_alloc_mirror(gfp_t gfp_flags)
200{
201 struct nfs4_ff_layout_mirror *mirror;
202
203 mirror = kzalloc(sizeof(*mirror), gfp_flags);
204 if (mirror != NULL) {
205 spin_lock_init(&mirror->lock);
206 atomic_set(&mirror->ref, 1);
266d12d4 207 INIT_LIST_HEAD(&mirror->mirrors);
28a0d72c
TM
208 }
209 return mirror;
210}
211
212static void ff_layout_free_mirror(struct nfs4_ff_layout_mirror *mirror)
213{
266d12d4 214 ff_layout_remove_mirror(mirror);
28a0d72c
TM
215 kfree(mirror->fh_versions);
216 nfs4_ff_layout_put_deviceid(mirror->mirror_ds);
217 kfree(mirror);
218}
219
220static void ff_layout_put_mirror(struct nfs4_ff_layout_mirror *mirror)
221{
222 if (mirror != NULL && atomic_dec_and_test(&mirror->ref))
223 ff_layout_free_mirror(mirror);
224}
225
d67ae825
TH
226static void ff_layout_free_mirror_array(struct nfs4_ff_layout_segment *fls)
227{
228 int i;
229
230 if (fls->mirror_array) {
231 for (i = 0; i < fls->mirror_array_cnt; i++) {
232 /* normally mirror_ds is freed in
233 * .free_deviceid_node but we still do it here
234 * for .alloc_lseg error path */
28a0d72c 235 ff_layout_put_mirror(fls->mirror_array[i]);
d67ae825
TH
236 }
237 kfree(fls->mirror_array);
238 fls->mirror_array = NULL;
239 }
240}
241
242static int ff_layout_check_layout(struct nfs4_layoutget_res *lgr)
243{
244 int ret = 0;
245
246 dprintk("--> %s\n", __func__);
247
248 /* FIXME: remove this check when layout segment support is added */
249 if (lgr->range.offset != 0 ||
250 lgr->range.length != NFS4_MAX_UINT64) {
251 dprintk("%s Only whole file layouts supported. Use MDS i/o\n",
252 __func__);
253 ret = -EINVAL;
254 }
255
256 dprintk("--> %s returns %d\n", __func__, ret);
257 return ret;
258}
259
260static void _ff_layout_free_lseg(struct nfs4_ff_layout_segment *fls)
261{
262 if (fls) {
263 ff_layout_free_mirror_array(fls);
264 kfree(fls);
265 }
266}
267
0762ed2c
TM
268static bool
269ff_lseg_range_is_after(const struct pnfs_layout_range *l1,
270 const struct pnfs_layout_range *l2)
271{
272 u64 end1, end2;
273
274 if (l1->iomode != l2->iomode)
275 return l1->iomode != IOMODE_READ;
276 end1 = pnfs_calc_offset_end(l1->offset, l1->length);
277 end2 = pnfs_calc_offset_end(l2->offset, l2->length);
278 if (end1 < l2->offset)
279 return false;
280 if (end2 < l1->offset)
281 return true;
282 return l2->offset <= l1->offset;
283}
284
285static bool
286ff_lseg_merge(struct pnfs_layout_segment *new,
287 struct pnfs_layout_segment *old)
288{
289 u64 new_end, old_end;
290
291 if (new->pls_range.iomode != old->pls_range.iomode)
292 return false;
293 old_end = pnfs_calc_offset_end(old->pls_range.offset,
294 old->pls_range.length);
295 if (old_end < new->pls_range.offset)
296 return false;
297 new_end = pnfs_calc_offset_end(new->pls_range.offset,
298 new->pls_range.length);
299 if (new_end < old->pls_range.offset)
300 return false;
301
302 /* Mergeable: copy info from 'old' to 'new' */
303 if (new_end < old_end)
304 new_end = old_end;
305 if (new->pls_range.offset < old->pls_range.offset)
306 new->pls_range.offset = old->pls_range.offset;
307 new->pls_range.length = pnfs_calc_offset_length(new->pls_range.offset,
308 new_end);
309 if (test_bit(NFS_LSEG_ROC, &old->pls_flags))
310 set_bit(NFS_LSEG_ROC, &new->pls_flags);
311 if (test_bit(NFS_LSEG_LAYOUTRETURN, &old->pls_flags))
312 set_bit(NFS_LSEG_LAYOUTRETURN, &new->pls_flags);
313 return true;
314}
315
316static void
317ff_layout_add_lseg(struct pnfs_layout_hdr *lo,
318 struct pnfs_layout_segment *lseg,
319 struct list_head *free_me)
320{
321 pnfs_generic_layout_insert_lseg(lo, lseg,
322 ff_lseg_range_is_after,
323 ff_lseg_merge,
324 free_me);
325}
326
d67ae825
TH
327static void ff_layout_sort_mirrors(struct nfs4_ff_layout_segment *fls)
328{
d67ae825
TH
329 int i, j;
330
331 for (i = 0; i < fls->mirror_array_cnt - 1; i++) {
332 for (j = i + 1; j < fls->mirror_array_cnt; j++)
333 if (fls->mirror_array[i]->efficiency <
455b6ee6
FF
334 fls->mirror_array[j]->efficiency)
335 swap(fls->mirror_array[i],
336 fls->mirror_array[j]);
d67ae825
TH
337 }
338}
339
340static struct pnfs_layout_segment *
341ff_layout_alloc_lseg(struct pnfs_layout_hdr *lh,
342 struct nfs4_layoutget_res *lgr,
343 gfp_t gfp_flags)
344{
345 struct pnfs_layout_segment *ret;
346 struct nfs4_ff_layout_segment *fls = NULL;
347 struct xdr_stream stream;
348 struct xdr_buf buf;
349 struct page *scratch;
350 u64 stripe_unit;
351 u32 mirror_array_cnt;
352 __be32 *p;
353 int i, rc;
354
355 dprintk("--> %s\n", __func__);
356 scratch = alloc_page(gfp_flags);
357 if (!scratch)
358 return ERR_PTR(-ENOMEM);
359
360 xdr_init_decode_pages(&stream, &buf, lgr->layoutp->pages,
361 lgr->layoutp->len);
362 xdr_set_scratch_buffer(&stream, page_address(scratch), PAGE_SIZE);
363
364 /* stripe unit and mirror_array_cnt */
365 rc = -EIO;
366 p = xdr_inline_decode(&stream, 8 + 4);
367 if (!p)
368 goto out_err_free;
369
370 p = xdr_decode_hyper(p, &stripe_unit);
371 mirror_array_cnt = be32_to_cpup(p++);
372 dprintk("%s: stripe_unit=%llu mirror_array_cnt=%u\n", __func__,
373 stripe_unit, mirror_array_cnt);
374
375 if (mirror_array_cnt > NFS4_FLEXFILE_LAYOUT_MAX_MIRROR_CNT ||
376 mirror_array_cnt == 0)
377 goto out_err_free;
378
379 rc = -ENOMEM;
380 fls = kzalloc(sizeof(*fls), gfp_flags);
381 if (!fls)
382 goto out_err_free;
383
384 fls->mirror_array_cnt = mirror_array_cnt;
385 fls->stripe_unit = stripe_unit;
386 fls->mirror_array = kcalloc(fls->mirror_array_cnt,
387 sizeof(fls->mirror_array[0]), gfp_flags);
388 if (fls->mirror_array == NULL)
389 goto out_err_free;
390
391 for (i = 0; i < fls->mirror_array_cnt; i++) {
266d12d4 392 struct nfs4_ff_layout_mirror *mirror;
d67ae825
TH
393 struct nfs4_deviceid devid;
394 struct nfs4_deviceid_node *idnode;
395 u32 ds_count;
396 u32 fh_count;
397 int j;
398
399 rc = -EIO;
400 p = xdr_inline_decode(&stream, 4);
401 if (!p)
402 goto out_err_free;
403 ds_count = be32_to_cpup(p);
404
405 /* FIXME: allow for striping? */
406 if (ds_count != 1)
407 goto out_err_free;
408
28a0d72c 409 fls->mirror_array[i] = ff_layout_alloc_mirror(gfp_flags);
d67ae825
TH
410 if (fls->mirror_array[i] == NULL) {
411 rc = -ENOMEM;
412 goto out_err_free;
413 }
414
d67ae825
TH
415 fls->mirror_array[i]->ds_count = ds_count;
416
417 /* deviceid */
418 rc = decode_deviceid(&stream, &devid);
419 if (rc)
420 goto out_err_free;
421
422 idnode = nfs4_find_get_deviceid(NFS_SERVER(lh->plh_inode),
423 &devid, lh->plh_lc_cred,
424 gfp_flags);
425 /*
426 * upon success, mirror_ds is allocated by previous
427 * getdeviceinfo, or newly by .alloc_deviceid_node
428 * nfs4_find_get_deviceid failure is indeed getdeviceinfo falure
429 */
430 if (idnode)
431 fls->mirror_array[i]->mirror_ds =
432 FF_LAYOUT_MIRROR_DS(idnode);
433 else
434 goto out_err_free;
435
436 /* efficiency */
437 rc = -EIO;
438 p = xdr_inline_decode(&stream, 4);
439 if (!p)
440 goto out_err_free;
441 fls->mirror_array[i]->efficiency = be32_to_cpup(p);
442
443 /* stateid */
444 rc = decode_stateid(&stream, &fls->mirror_array[i]->stateid);
445 if (rc)
446 goto out_err_free;
447
448 /* fh */
449 p = xdr_inline_decode(&stream, 4);
450 if (!p)
451 goto out_err_free;
452 fh_count = be32_to_cpup(p);
453
454 fls->mirror_array[i]->fh_versions =
455 kzalloc(fh_count * sizeof(struct nfs_fh),
456 gfp_flags);
457 if (fls->mirror_array[i]->fh_versions == NULL) {
458 rc = -ENOMEM;
459 goto out_err_free;
460 }
461
462 for (j = 0; j < fh_count; j++) {
463 rc = decode_nfs_fh(&stream,
464 &fls->mirror_array[i]->fh_versions[j]);
465 if (rc)
466 goto out_err_free;
467 }
468
469 fls->mirror_array[i]->fh_versions_cnt = fh_count;
470
471 /* user */
472 rc = decode_name(&stream, &fls->mirror_array[i]->uid);
473 if (rc)
474 goto out_err_free;
475
476 /* group */
477 rc = decode_name(&stream, &fls->mirror_array[i]->gid);
478 if (rc)
479 goto out_err_free;
480
266d12d4
TM
481 mirror = ff_layout_add_mirror(lh, fls->mirror_array[i]);
482 if (mirror != fls->mirror_array[i]) {
483 ff_layout_free_mirror(fls->mirror_array[i]);
484 fls->mirror_array[i] = mirror;
485 }
486
d67ae825
TH
487 dprintk("%s: uid %d gid %d\n", __func__,
488 fls->mirror_array[i]->uid,
489 fls->mirror_array[i]->gid);
490 }
491
c0f5f505
TM
492 p = xdr_inline_decode(&stream, 4);
493 if (p)
494 fls->flags = be32_to_cpup(p);
495
d67ae825
TH
496 ff_layout_sort_mirrors(fls);
497 rc = ff_layout_check_layout(lgr);
498 if (rc)
499 goto out_err_free;
500
501 ret = &fls->generic_hdr;
502 dprintk("<-- %s (success)\n", __func__);
503out_free_page:
504 __free_page(scratch);
505 return ret;
506out_err_free:
507 _ff_layout_free_lseg(fls);
508 ret = ERR_PTR(rc);
509 dprintk("<-- %s (%d)\n", __func__, rc);
510 goto out_free_page;
511}
512
513static bool ff_layout_has_rw_segments(struct pnfs_layout_hdr *layout)
514{
515 struct pnfs_layout_segment *lseg;
516
517 list_for_each_entry(lseg, &layout->plh_segs, pls_list)
518 if (lseg->pls_range.iomode == IOMODE_RW)
519 return true;
520
521 return false;
522}
523
524static void
525ff_layout_free_lseg(struct pnfs_layout_segment *lseg)
526{
527 struct nfs4_ff_layout_segment *fls = FF_LAYOUT_LSEG(lseg);
528 int i;
529
530 dprintk("--> %s\n", __func__);
531
532 for (i = 0; i < fls->mirror_array_cnt; i++) {
533 if (fls->mirror_array[i]) {
534 nfs4_ff_layout_put_deviceid(fls->mirror_array[i]->mirror_ds);
535 fls->mirror_array[i]->mirror_ds = NULL;
536 if (fls->mirror_array[i]->cred) {
537 put_rpccred(fls->mirror_array[i]->cred);
538 fls->mirror_array[i]->cred = NULL;
539 }
540 }
541 }
542
543 if (lseg->pls_range.iomode == IOMODE_RW) {
544 struct nfs4_flexfile_layout *ffl;
545 struct inode *inode;
546
547 ffl = FF_LAYOUT_FROM_HDR(lseg->pls_layout);
548 inode = ffl->generic_hdr.plh_inode;
549 spin_lock(&inode->i_lock);
550 if (!ff_layout_has_rw_segments(lseg->pls_layout)) {
551 ffl->commit_info.nbuckets = 0;
552 kfree(ffl->commit_info.buckets);
553 ffl->commit_info.buckets = NULL;
554 }
555 spin_unlock(&inode->i_lock);
556 }
557 _ff_layout_free_lseg(fls);
558}
559
560/* Return 1 until we have multiple lsegs support */
561static int
562ff_layout_get_lseg_count(struct nfs4_ff_layout_segment *fls)
563{
564 return 1;
565}
566
abcb7bfc 567static void
e76d28dd 568nfs4_ff_start_busy_timer(struct nfs4_ff_busy_timer *timer, ktime_t now)
abcb7bfc 569{
9bbd9bb4
PT
570 /* first IO request? */
571 if (atomic_inc_return(&timer->n_ops) == 1) {
e76d28dd 572 timer->start_time = now;
abcb7bfc
TM
573 }
574}
575
576static ktime_t
e76d28dd 577nfs4_ff_end_busy_timer(struct nfs4_ff_busy_timer *timer, ktime_t now)
abcb7bfc 578{
e76d28dd 579 ktime_t start;
abcb7bfc 580
9bbd9bb4
PT
581 if (atomic_dec_return(&timer->n_ops) < 0)
582 WARN_ON_ONCE(1);
583
9bbd9bb4
PT
584 start = timer->start_time;
585 timer->start_time = now;
abcb7bfc
TM
586 return ktime_sub(now, start);
587}
588
97ba375b 589static bool
d983803d 590nfs4_ff_layoutstat_start_io(struct nfs4_ff_layout_mirror *mirror,
e76d28dd
TM
591 struct nfs4_ff_layoutstat *layoutstat,
592 ktime_t now)
abcb7bfc 593{
d983803d 594 static const ktime_t notime = {0};
bbf58bf3 595 s64 report_interval = FF_LAYOUTSTATS_REPORT_INTERVAL;
d983803d 596
e76d28dd 597 nfs4_ff_start_busy_timer(&layoutstat->busy_timer, now);
9bbd9bb4
PT
598 if (ktime_equal(mirror->start_time, notime))
599 mirror->start_time = now;
600 if (ktime_equal(mirror->last_report_time, notime))
601 mirror->last_report_time = now;
bbf58bf3
TM
602 if (layoutstats_timer != 0)
603 report_interval = (s64)layoutstats_timer * 1000LL;
97ba375b 604 if (ktime_to_ms(ktime_sub(now, mirror->last_report_time)) >=
bbf58bf3 605 report_interval) {
97ba375b
PT
606 mirror->last_report_time = now;
607 return true;
608 }
609
610 return false;
abcb7bfc
TM
611}
612
613static void
614nfs4_ff_layout_stat_io_update_requested(struct nfs4_ff_layoutstat *layoutstat,
615 __u64 requested)
616{
617 struct nfs4_ff_io_stat *iostat = &layoutstat->io_stat;
618
619 iostat->ops_requested++;
620 iostat->bytes_requested += requested;
621}
622
623static void
624nfs4_ff_layout_stat_io_update_completed(struct nfs4_ff_layoutstat *layoutstat,
625 __u64 requested,
626 __u64 completed,
e76d28dd
TM
627 ktime_t time_completed,
628 ktime_t time_started)
abcb7bfc
TM
629{
630 struct nfs4_ff_io_stat *iostat = &layoutstat->io_stat;
e76d28dd 631 ktime_t completion_time = ktime_sub(time_completed, time_started);
abcb7bfc
TM
632 ktime_t timer;
633
634 iostat->ops_completed++;
635 iostat->bytes_completed += completed;
636 iostat->bytes_not_delivered += requested - completed;
637
e76d28dd 638 timer = nfs4_ff_end_busy_timer(&layoutstat->busy_timer, time_completed);
abcb7bfc
TM
639 iostat->total_busy_time =
640 ktime_add(iostat->total_busy_time, timer);
641 iostat->aggregate_completion_time =
e76d28dd
TM
642 ktime_add(iostat->aggregate_completion_time,
643 completion_time);
abcb7bfc
TM
644}
645
646static void
0b7baf94
TM
647nfs4_ff_layout_stat_io_start_read(struct inode *inode,
648 struct nfs4_ff_layout_mirror *mirror,
e76d28dd 649 __u64 requested, ktime_t now)
abcb7bfc 650{
97ba375b
PT
651 bool report;
652
abcb7bfc 653 spin_lock(&mirror->lock);
e76d28dd 654 report = nfs4_ff_layoutstat_start_io(mirror, &mirror->read_stat, now);
abcb7bfc
TM
655 nfs4_ff_layout_stat_io_update_requested(&mirror->read_stat, requested);
656 spin_unlock(&mirror->lock);
97ba375b
PT
657
658 if (report)
0b7baf94 659 pnfs_report_layoutstat(inode, GFP_KERNEL);
abcb7bfc
TM
660}
661
662static void
663nfs4_ff_layout_stat_io_end_read(struct rpc_task *task,
664 struct nfs4_ff_layout_mirror *mirror,
665 __u64 requested,
666 __u64 completed)
667{
668 spin_lock(&mirror->lock);
669 nfs4_ff_layout_stat_io_update_completed(&mirror->read_stat,
670 requested, completed,
e76d28dd 671 ktime_get(), task->tk_start);
abcb7bfc
TM
672 spin_unlock(&mirror->lock);
673}
674
675static void
0b7baf94
TM
676nfs4_ff_layout_stat_io_start_write(struct inode *inode,
677 struct nfs4_ff_layout_mirror *mirror,
e76d28dd 678 __u64 requested, ktime_t now)
abcb7bfc 679{
97ba375b
PT
680 bool report;
681
abcb7bfc 682 spin_lock(&mirror->lock);
e76d28dd 683 report = nfs4_ff_layoutstat_start_io(mirror , &mirror->write_stat, now);
abcb7bfc
TM
684 nfs4_ff_layout_stat_io_update_requested(&mirror->write_stat, requested);
685 spin_unlock(&mirror->lock);
97ba375b
PT
686
687 if (report)
0b7baf94 688 pnfs_report_layoutstat(inode, GFP_NOIO);
abcb7bfc
TM
689}
690
691static void
692nfs4_ff_layout_stat_io_end_write(struct rpc_task *task,
693 struct nfs4_ff_layout_mirror *mirror,
694 __u64 requested,
695 __u64 completed,
696 enum nfs3_stable_how committed)
697{
698 if (committed == NFS_UNSTABLE)
699 requested = completed = 0;
700
701 spin_lock(&mirror->lock);
702 nfs4_ff_layout_stat_io_update_completed(&mirror->write_stat,
e76d28dd 703 requested, completed, ktime_get(), task->tk_start);
abcb7bfc
TM
704 spin_unlock(&mirror->lock);
705}
706
d67ae825
TH
707static int
708ff_layout_alloc_commit_info(struct pnfs_layout_segment *lseg,
709 struct nfs_commit_info *cinfo,
710 gfp_t gfp_flags)
711{
712 struct nfs4_ff_layout_segment *fls = FF_LAYOUT_LSEG(lseg);
713 struct pnfs_commit_bucket *buckets;
714 int size;
715
716 if (cinfo->ds->nbuckets != 0) {
717 /* This assumes there is only one RW lseg per file.
718 * To support multiple lseg per file, we need to
719 * change struct pnfs_commit_bucket to allow dynamic
720 * increasing nbuckets.
721 */
722 return 0;
723 }
724
725 size = ff_layout_get_lseg_count(fls) * FF_LAYOUT_MIRROR_COUNT(lseg);
726
727 buckets = kcalloc(size, sizeof(struct pnfs_commit_bucket),
728 gfp_flags);
729 if (!buckets)
730 return -ENOMEM;
731 else {
732 int i;
733
734 spin_lock(cinfo->lock);
735 if (cinfo->ds->nbuckets != 0)
736 kfree(buckets);
737 else {
738 cinfo->ds->buckets = buckets;
739 cinfo->ds->nbuckets = size;
740 for (i = 0; i < size; i++) {
741 INIT_LIST_HEAD(&buckets[i].written);
742 INIT_LIST_HEAD(&buckets[i].committing);
743 /* mark direct verifier as unset */
744 buckets[i].direct_verf.committed =
745 NFS_INVALID_STABLE_HOW;
746 }
747 }
748 spin_unlock(cinfo->lock);
749 return 0;
750 }
751}
752
753static struct nfs4_pnfs_ds *
754ff_layout_choose_best_ds_for_read(struct nfs_pageio_descriptor *pgio,
755 int *best_idx)
756{
757 struct nfs4_ff_layout_segment *fls;
758 struct nfs4_pnfs_ds *ds;
759 int idx;
760
761 fls = FF_LAYOUT_LSEG(pgio->pg_lseg);
762 /* mirrors are sorted by efficiency */
763 for (idx = 0; idx < fls->mirror_array_cnt; idx++) {
764 ds = nfs4_ff_layout_prepare_ds(pgio->pg_lseg, idx, false);
765 if (ds) {
766 *best_idx = idx;
767 return ds;
768 }
769 }
770
771 return NULL;
772}
773
774static void
775ff_layout_pg_init_read(struct nfs_pageio_descriptor *pgio,
776 struct nfs_page *req)
777{
778 struct nfs_pgio_mirror *pgm;
779 struct nfs4_ff_layout_mirror *mirror;
780 struct nfs4_pnfs_ds *ds;
781 int ds_idx;
782
783 /* Use full layout for now */
784 if (!pgio->pg_lseg)
785 pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
786 req->wb_context,
787 0,
788 NFS4_MAX_UINT64,
789 IOMODE_READ,
790 GFP_KERNEL);
791 /* If no lseg, fall back to read through mds */
792 if (pgio->pg_lseg == NULL)
793 goto out_mds;
794
795 ds = ff_layout_choose_best_ds_for_read(pgio, &ds_idx);
796 if (!ds)
797 goto out_mds;
798 mirror = FF_LAYOUT_COMP(pgio->pg_lseg, ds_idx);
799
800 pgio->pg_mirror_idx = ds_idx;
801
802 /* read always uses only one mirror - idx 0 for pgio layer */
803 pgm = &pgio->pg_mirrors[0];
804 pgm->pg_bsize = mirror->mirror_ds->ds_versions[0].rsize;
805
806 return;
807out_mds:
808 pnfs_put_lseg(pgio->pg_lseg);
809 pgio->pg_lseg = NULL;
810 nfs_pageio_reset_read_mds(pgio);
811}
812
813static void
814ff_layout_pg_init_write(struct nfs_pageio_descriptor *pgio,
815 struct nfs_page *req)
816{
817 struct nfs4_ff_layout_mirror *mirror;
818 struct nfs_pgio_mirror *pgm;
819 struct nfs_commit_info cinfo;
820 struct nfs4_pnfs_ds *ds;
821 int i;
822 int status;
823
824 if (!pgio->pg_lseg)
825 pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
826 req->wb_context,
827 0,
828 NFS4_MAX_UINT64,
829 IOMODE_RW,
830 GFP_NOFS);
831 /* If no lseg, fall back to write through mds */
832 if (pgio->pg_lseg == NULL)
833 goto out_mds;
834
835 nfs_init_cinfo(&cinfo, pgio->pg_inode, pgio->pg_dreq);
836 status = ff_layout_alloc_commit_info(pgio->pg_lseg, &cinfo, GFP_NOFS);
837 if (status < 0)
838 goto out_mds;
839
840 /* Use a direct mapping of ds_idx to pgio mirror_idx */
841 if (WARN_ON_ONCE(pgio->pg_mirror_count !=
842 FF_LAYOUT_MIRROR_COUNT(pgio->pg_lseg)))
843 goto out_mds;
844
845 for (i = 0; i < pgio->pg_mirror_count; i++) {
846 ds = nfs4_ff_layout_prepare_ds(pgio->pg_lseg, i, true);
847 if (!ds)
848 goto out_mds;
849 pgm = &pgio->pg_mirrors[i];
850 mirror = FF_LAYOUT_COMP(pgio->pg_lseg, i);
851 pgm->pg_bsize = mirror->mirror_ds->ds_versions[0].wsize;
852 }
853
854 return;
855
856out_mds:
857 pnfs_put_lseg(pgio->pg_lseg);
858 pgio->pg_lseg = NULL;
859 nfs_pageio_reset_write_mds(pgio);
860}
861
862static unsigned int
863ff_layout_pg_get_mirror_count_write(struct nfs_pageio_descriptor *pgio,
864 struct nfs_page *req)
865{
866 if (!pgio->pg_lseg)
867 pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
868 req->wb_context,
869 0,
870 NFS4_MAX_UINT64,
871 IOMODE_RW,
872 GFP_NOFS);
873 if (pgio->pg_lseg)
874 return FF_LAYOUT_MIRROR_COUNT(pgio->pg_lseg);
875
876 /* no lseg means that pnfs is not in use, so no mirroring here */
d67ae825
TH
877 nfs_pageio_reset_write_mds(pgio);
878 return 1;
879}
880
881static const struct nfs_pageio_ops ff_layout_pg_read_ops = {
882 .pg_init = ff_layout_pg_init_read,
883 .pg_test = pnfs_generic_pg_test,
884 .pg_doio = pnfs_generic_pg_readpages,
885 .pg_cleanup = pnfs_generic_pg_cleanup,
886};
887
888static const struct nfs_pageio_ops ff_layout_pg_write_ops = {
889 .pg_init = ff_layout_pg_init_write,
890 .pg_test = pnfs_generic_pg_test,
891 .pg_doio = pnfs_generic_pg_writepages,
892 .pg_get_mirror_count = ff_layout_pg_get_mirror_count_write,
893 .pg_cleanup = pnfs_generic_pg_cleanup,
894};
895
896static void ff_layout_reset_write(struct nfs_pgio_header *hdr, bool retry_pnfs)
897{
898 struct rpc_task *task = &hdr->task;
899
900 pnfs_layoutcommit_inode(hdr->inode, false);
901
902 if (retry_pnfs) {
903 dprintk("%s Reset task %5u for i/o through pNFS "
904 "(req %s/%llu, %u bytes @ offset %llu)\n", __func__,
905 hdr->task.tk_pid,
906 hdr->inode->i_sb->s_id,
907 (unsigned long long)NFS_FILEID(hdr->inode),
908 hdr->args.count,
909 (unsigned long long)hdr->args.offset);
910
911 if (!hdr->dreq) {
912 struct nfs_open_context *ctx;
913
914 ctx = nfs_list_entry(hdr->pages.next)->wb_context;
915 set_bit(NFS_CONTEXT_RESEND_WRITES, &ctx->flags);
916 hdr->completion_ops->error_cleanup(&hdr->pages);
917 } else {
918 nfs_direct_set_resched_writes(hdr->dreq);
919 /* fake unstable write to let common nfs resend pages */
920 hdr->verf.committed = NFS_UNSTABLE;
d6208769 921 hdr->good_bytes = hdr->args.count;
d67ae825
TH
922 }
923 return;
924 }
925
926 if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
927 dprintk("%s Reset task %5u for i/o through MDS "
928 "(req %s/%llu, %u bytes @ offset %llu)\n", __func__,
929 hdr->task.tk_pid,
930 hdr->inode->i_sb->s_id,
931 (unsigned long long)NFS_FILEID(hdr->inode),
932 hdr->args.count,
933 (unsigned long long)hdr->args.offset);
934
935 task->tk_status = pnfs_write_done_resend_to_mds(hdr);
936 }
937}
938
939static void ff_layout_reset_read(struct nfs_pgio_header *hdr)
940{
941 struct rpc_task *task = &hdr->task;
942
943 pnfs_layoutcommit_inode(hdr->inode, false);
944
945 if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
946 dprintk("%s Reset task %5u for i/o through MDS "
947 "(req %s/%llu, %u bytes @ offset %llu)\n", __func__,
948 hdr->task.tk_pid,
949 hdr->inode->i_sb->s_id,
950 (unsigned long long)NFS_FILEID(hdr->inode),
951 hdr->args.count,
952 (unsigned long long)hdr->args.offset);
953
954 task->tk_status = pnfs_read_done_resend_to_mds(hdr);
955 }
956}
957
958static int ff_layout_async_handle_error_v4(struct rpc_task *task,
959 struct nfs4_state *state,
960 struct nfs_client *clp,
961 struct pnfs_layout_segment *lseg,
962 int idx)
963{
964 struct pnfs_layout_hdr *lo = lseg->pls_layout;
965 struct inode *inode = lo->plh_inode;
966 struct nfs_server *mds_server = NFS_SERVER(inode);
967
968 struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx);
969 struct nfs_client *mds_client = mds_server->nfs_client;
970 struct nfs4_slot_table *tbl = &clp->cl_session->fc_slot_table;
971
972 if (task->tk_status >= 0)
973 return 0;
974
975 switch (task->tk_status) {
976 /* MDS state errors */
977 case -NFS4ERR_DELEG_REVOKED:
978 case -NFS4ERR_ADMIN_REVOKED:
979 case -NFS4ERR_BAD_STATEID:
980 if (state == NULL)
981 break;
982 nfs_remove_bad_delegation(state->inode);
983 case -NFS4ERR_OPENMODE:
984 if (state == NULL)
985 break;
986 if (nfs4_schedule_stateid_recovery(mds_server, state) < 0)
987 goto out_bad_stateid;
988 goto wait_on_recovery;
989 case -NFS4ERR_EXPIRED:
990 if (state != NULL) {
991 if (nfs4_schedule_stateid_recovery(mds_server, state) < 0)
992 goto out_bad_stateid;
993 }
994 nfs4_schedule_lease_recovery(mds_client);
995 goto wait_on_recovery;
996 /* DS session errors */
997 case -NFS4ERR_BADSESSION:
998 case -NFS4ERR_BADSLOT:
999 case -NFS4ERR_BAD_HIGH_SLOT:
1000 case -NFS4ERR_DEADSESSION:
1001 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
1002 case -NFS4ERR_SEQ_FALSE_RETRY:
1003 case -NFS4ERR_SEQ_MISORDERED:
1004 dprintk("%s ERROR %d, Reset session. Exchangeid "
1005 "flags 0x%x\n", __func__, task->tk_status,
1006 clp->cl_exchange_flags);
1007 nfs4_schedule_session_recovery(clp->cl_session, task->tk_status);
1008 break;
1009 case -NFS4ERR_DELAY:
1010 case -NFS4ERR_GRACE:
1011 rpc_delay(task, FF_LAYOUT_POLL_RETRY_MAX);
1012 break;
1013 case -NFS4ERR_RETRY_UNCACHED_REP:
1014 break;
1015 /* Invalidate Layout errors */
1016 case -NFS4ERR_PNFS_NO_LAYOUT:
1017 case -ESTALE: /* mapped NFS4ERR_STALE */
1018 case -EBADHANDLE: /* mapped NFS4ERR_BADHANDLE */
1019 case -EISDIR: /* mapped NFS4ERR_ISDIR */
1020 case -NFS4ERR_FHEXPIRED:
1021 case -NFS4ERR_WRONG_TYPE:
1022 dprintk("%s Invalid layout error %d\n", __func__,
1023 task->tk_status);
1024 /*
1025 * Destroy layout so new i/o will get a new layout.
1026 * Layout will not be destroyed until all current lseg
1027 * references are put. Mark layout as invalid to resend failed
1028 * i/o and all i/o waiting on the slot table to the MDS until
1029 * layout is destroyed and a new valid layout is obtained.
1030 */
1031 pnfs_destroy_layout(NFS_I(inode));
1032 rpc_wake_up(&tbl->slot_tbl_waitq);
1033 goto reset;
1034 /* RPC connection errors */
1035 case -ECONNREFUSED:
1036 case -EHOSTDOWN:
1037 case -EHOSTUNREACH:
1038 case -ENETUNREACH:
1039 case -EIO:
1040 case -ETIMEDOUT:
1041 case -EPIPE:
1042 dprintk("%s DS connection error %d\n", __func__,
1043 task->tk_status);
1044 nfs4_mark_deviceid_unavailable(devid);
1045 rpc_wake_up(&tbl->slot_tbl_waitq);
1046 /* fall through */
1047 default:
1048 if (ff_layout_has_available_ds(lseg))
1049 return -NFS4ERR_RESET_TO_PNFS;
1050reset:
1051 dprintk("%s Retry through MDS. Error %d\n", __func__,
1052 task->tk_status);
1053 return -NFS4ERR_RESET_TO_MDS;
1054 }
1055out:
1056 task->tk_status = 0;
1057 return -EAGAIN;
1058out_bad_stateid:
1059 task->tk_status = -EIO;
1060 return 0;
1061wait_on_recovery:
1062 rpc_sleep_on(&mds_client->cl_rpcwaitq, task, NULL);
1063 if (test_bit(NFS4CLNT_MANAGER_RUNNING, &mds_client->cl_state) == 0)
1064 rpc_wake_up_queued_task(&mds_client->cl_rpcwaitq, task);
1065 goto out;
1066}
1067
1068/* Retry all errors through either pNFS or MDS except for -EJUKEBOX */
1069static int ff_layout_async_handle_error_v3(struct rpc_task *task,
1070 struct pnfs_layout_segment *lseg,
1071 int idx)
1072{
1073 struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx);
1074
1075 if (task->tk_status >= 0)
1076 return 0;
1077
1078 if (task->tk_status != -EJUKEBOX) {
1079 dprintk("%s DS connection error %d\n", __func__,
1080 task->tk_status);
1081 nfs4_mark_deviceid_unavailable(devid);
1082 if (ff_layout_has_available_ds(lseg))
1083 return -NFS4ERR_RESET_TO_PNFS;
1084 else
1085 return -NFS4ERR_RESET_TO_MDS;
1086 }
1087
1088 if (task->tk_status == -EJUKEBOX)
1089 nfs_inc_stats(lseg->pls_layout->plh_inode, NFSIOS_DELAY);
1090 task->tk_status = 0;
1091 rpc_restart_call(task);
1092 rpc_delay(task, NFS_JUKEBOX_RETRY_TIME);
1093 return -EAGAIN;
1094}
1095
1096static int ff_layout_async_handle_error(struct rpc_task *task,
1097 struct nfs4_state *state,
1098 struct nfs_client *clp,
1099 struct pnfs_layout_segment *lseg,
1100 int idx)
1101{
1102 int vers = clp->cl_nfs_mod->rpc_vers->number;
1103
1104 switch (vers) {
1105 case 3:
1106 return ff_layout_async_handle_error_v3(task, lseg, idx);
1107 case 4:
1108 return ff_layout_async_handle_error_v4(task, state, clp,
1109 lseg, idx);
1110 default:
1111 /* should never happen */
1112 WARN_ON_ONCE(1);
1113 return 0;
1114 }
1115}
1116
1117static void ff_layout_io_track_ds_error(struct pnfs_layout_segment *lseg,
1118 int idx, u64 offset, u64 length,
dd52128a 1119 u32 status, int opnum, int error)
d67ae825
TH
1120{
1121 struct nfs4_ff_layout_mirror *mirror;
1122 int err;
1123
dd52128a
TM
1124 if (status == 0) {
1125 switch (error) {
1126 case -ETIMEDOUT:
1127 case -EPFNOSUPPORT:
1128 case -EPROTONOSUPPORT:
1129 case -EOPNOTSUPP:
1130 case -ECONNREFUSED:
1131 case -ECONNRESET:
1132 case -EHOSTDOWN:
1133 case -EHOSTUNREACH:
1134 case -ENETUNREACH:
1135 case -EADDRINUSE:
1136 case -ENOBUFS:
1137 case -EPIPE:
1138 case -EPERM:
1139 status = NFS4ERR_NXIO;
1140 break;
1141 case -EACCES:
1142 status = NFS4ERR_ACCESS;
1143 break;
1144 default:
1145 return;
1146 }
1147 }
1148
d67ae825
TH
1149 mirror = FF_LAYOUT_COMP(lseg, idx);
1150 err = ff_layout_track_ds_error(FF_LAYOUT_FROM_HDR(lseg->pls_layout),
1151 mirror, offset, length, status, opnum,
1152 GFP_NOIO);
1153 dprintk("%s: err %d op %d status %u\n", __func__, err, opnum, status);
1154}
1155
1156/* NFS_PROTO call done callback routines */
1157
1158static int ff_layout_read_done_cb(struct rpc_task *task,
1159 struct nfs_pgio_header *hdr)
1160{
1161 struct inode *inode;
1162 int err;
1163
1164 trace_nfs4_pnfs_read(hdr, task->tk_status);
dd52128a 1165 if (task->tk_status < 0)
d67ae825
TH
1166 ff_layout_io_track_ds_error(hdr->lseg, hdr->pgio_mirror_idx,
1167 hdr->args.offset, hdr->args.count,
dd52128a
TM
1168 hdr->res.op_status, OP_READ,
1169 task->tk_status);
d67ae825
TH
1170 err = ff_layout_async_handle_error(task, hdr->args.context->state,
1171 hdr->ds_clp, hdr->lseg,
1172 hdr->pgio_mirror_idx);
1173
1174 switch (err) {
1175 case -NFS4ERR_RESET_TO_PNFS:
1176 set_bit(NFS_LAYOUT_RETURN_BEFORE_CLOSE,
1177 &hdr->lseg->pls_layout->plh_flags);
1178 pnfs_read_resend_pnfs(hdr);
1179 return task->tk_status;
1180 case -NFS4ERR_RESET_TO_MDS:
1181 inode = hdr->lseg->pls_layout->plh_inode;
1182 pnfs_error_mark_layout_for_return(inode, hdr->lseg);
1183 ff_layout_reset_read(hdr);
1184 return task->tk_status;
1185 case -EAGAIN:
1186 rpc_restart_call_prepare(task);
1187 return -EAGAIN;
1188 }
1189
1190 return 0;
1191}
1192
c0f5f505
TM
1193static bool
1194ff_layout_need_layoutcommit(struct pnfs_layout_segment *lseg)
1195{
1196 return !(FF_LAYOUT_LSEG(lseg)->flags & FF_FLAGS_NO_LAYOUTCOMMIT);
1197}
1198
d67ae825
TH
1199/*
1200 * We reference the rpc_cred of the first WRITE that triggers the need for
1201 * a LAYOUTCOMMIT, and use it to send the layoutcommit compound.
1202 * rfc5661 is not clear about which credential should be used.
1203 *
1204 * Flexlayout client should treat DS replied FILE_SYNC as DATA_SYNC, so
1205 * to follow http://www.rfc-editor.org/errata_search.php?rfc=5661&eid=2751
1206 * we always send layoutcommit after DS writes.
1207 */
1208static void
1209ff_layout_set_layoutcommit(struct nfs_pgio_header *hdr)
1210{
c0f5f505
TM
1211 if (!ff_layout_need_layoutcommit(hdr->lseg))
1212 return;
1213
67af7611
TM
1214 pnfs_set_layoutcommit(hdr->inode, hdr->lseg,
1215 hdr->mds_offset + hdr->res.count);
d67ae825
TH
1216 dprintk("%s inode %lu pls_end_pos %lu\n", __func__, hdr->inode->i_ino,
1217 (unsigned long) NFS_I(hdr->inode)->layout->plh_lwb);
1218}
1219
1220static bool
1221ff_layout_reset_to_mds(struct pnfs_layout_segment *lseg, int idx)
1222{
1223 /* No mirroring for now */
1224 struct nfs4_deviceid_node *node = FF_LAYOUT_DEVID_NODE(lseg, idx);
1225
1226 return ff_layout_test_devid_unavailable(node);
1227}
1228
1229static int ff_layout_read_prepare_common(struct rpc_task *task,
1230 struct nfs_pgio_header *hdr)
1231{
0b7baf94 1232 nfs4_ff_layout_stat_io_start_read(hdr->inode,
abcb7bfc 1233 FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx),
e76d28dd
TM
1234 hdr->args.count,
1235 task->tk_start);
abcb7bfc 1236
d67ae825
TH
1237 if (unlikely(test_bit(NFS_CONTEXT_BAD, &hdr->args.context->flags))) {
1238 rpc_exit(task, -EIO);
1239 return -EIO;
1240 }
1241 if (ff_layout_reset_to_mds(hdr->lseg, hdr->pgio_mirror_idx)) {
1242 dprintk("%s task %u reset io to MDS\n", __func__, task->tk_pid);
1243 if (ff_layout_has_available_ds(hdr->lseg))
1244 pnfs_read_resend_pnfs(hdr);
1245 else
1246 ff_layout_reset_read(hdr);
1247 rpc_exit(task, 0);
1248 return -EAGAIN;
1249 }
1250 hdr->pgio_done_cb = ff_layout_read_done_cb;
1251
1252 return 0;
1253}
1254
1255/*
1256 * Call ops for the async read/write cases
1257 * In the case of dense layouts, the offset needs to be reset to its
1258 * original value.
1259 */
1260static void ff_layout_read_prepare_v3(struct rpc_task *task, void *data)
1261{
1262 struct nfs_pgio_header *hdr = data;
1263
1264 if (ff_layout_read_prepare_common(task, hdr))
1265 return;
1266
1267 rpc_call_start(task);
1268}
1269
1270static int ff_layout_setup_sequence(struct nfs_client *ds_clp,
1271 struct nfs4_sequence_args *args,
1272 struct nfs4_sequence_res *res,
1273 struct rpc_task *task)
1274{
1275 if (ds_clp->cl_session)
1276 return nfs41_setup_sequence(ds_clp->cl_session,
1277 args,
1278 res,
1279 task);
1280 return nfs40_setup_sequence(ds_clp->cl_slot_tbl,
1281 args,
1282 res,
1283 task);
1284}
1285
1286static void ff_layout_read_prepare_v4(struct rpc_task *task, void *data)
1287{
1288 struct nfs_pgio_header *hdr = data;
1289
d67ae825
TH
1290 if (ff_layout_setup_sequence(hdr->ds_clp,
1291 &hdr->args.seq_args,
1292 &hdr->res.seq_res,
1293 task))
1294 return;
1295
abcb7bfc
TM
1296 if (ff_layout_read_prepare_common(task, hdr))
1297 return;
1298
d67ae825
TH
1299 if (nfs4_set_rw_stateid(&hdr->args.stateid, hdr->args.context,
1300 hdr->args.lock_context, FMODE_READ) == -EIO)
1301 rpc_exit(task, -EIO); /* lost lock, terminate I/O */
1302}
1303
1304static void ff_layout_read_call_done(struct rpc_task *task, void *data)
1305{
1306 struct nfs_pgio_header *hdr = data;
1307
1308 dprintk("--> %s task->tk_status %d\n", __func__, task->tk_status);
1309
abcb7bfc
TM
1310 nfs4_ff_layout_stat_io_end_read(task,
1311 FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx),
1312 hdr->args.count, hdr->res.count);
1313
d67ae825
TH
1314 if (test_bit(NFS_IOHDR_REDO, &hdr->flags) &&
1315 task->tk_status == 0) {
1316 nfs4_sequence_done(task, &hdr->res.seq_res);
1317 return;
1318 }
1319
1320 /* Note this may cause RPC to be resent */
1321 hdr->mds_ops->rpc_call_done(task, hdr);
1322}
1323
1324static void ff_layout_read_count_stats(struct rpc_task *task, void *data)
1325{
1326 struct nfs_pgio_header *hdr = data;
1327
1328 rpc_count_iostats_metrics(task,
1329 &NFS_CLIENT(hdr->inode)->cl_metrics[NFSPROC4_CLNT_READ]);
1330}
1331
1332static int ff_layout_write_done_cb(struct rpc_task *task,
1333 struct nfs_pgio_header *hdr)
1334{
1335 struct inode *inode;
1336 int err;
1337
1338 trace_nfs4_pnfs_write(hdr, task->tk_status);
dd52128a 1339 if (task->tk_status < 0)
d67ae825
TH
1340 ff_layout_io_track_ds_error(hdr->lseg, hdr->pgio_mirror_idx,
1341 hdr->args.offset, hdr->args.count,
dd52128a
TM
1342 hdr->res.op_status, OP_WRITE,
1343 task->tk_status);
d67ae825
TH
1344 err = ff_layout_async_handle_error(task, hdr->args.context->state,
1345 hdr->ds_clp, hdr->lseg,
1346 hdr->pgio_mirror_idx);
1347
1348 switch (err) {
1349 case -NFS4ERR_RESET_TO_PNFS:
1350 case -NFS4ERR_RESET_TO_MDS:
1351 inode = hdr->lseg->pls_layout->plh_inode;
1352 pnfs_error_mark_layout_for_return(inode, hdr->lseg);
1353 if (err == -NFS4ERR_RESET_TO_PNFS) {
1354 pnfs_set_retry_layoutget(hdr->lseg->pls_layout);
1355 ff_layout_reset_write(hdr, true);
1356 } else {
1357 pnfs_clear_retry_layoutget(hdr->lseg->pls_layout);
1358 ff_layout_reset_write(hdr, false);
1359 }
1360 return task->tk_status;
1361 case -EAGAIN:
1362 rpc_restart_call_prepare(task);
1363 return -EAGAIN;
1364 }
1365
1366 if (hdr->res.verf->committed == NFS_FILE_SYNC ||
1367 hdr->res.verf->committed == NFS_DATA_SYNC)
1368 ff_layout_set_layoutcommit(hdr);
1369
54204010
PT
1370 /* zero out fattr since we don't care DS attr at all */
1371 hdr->fattr.valid = 0;
69f230d9
PT
1372 if (task->tk_status >= 0)
1373 nfs_writeback_update_inode(hdr);
1374
d67ae825
TH
1375 return 0;
1376}
1377
1378static int ff_layout_commit_done_cb(struct rpc_task *task,
1379 struct nfs_commit_data *data)
1380{
1381 struct inode *inode;
1382 int err;
1383
1384 trace_nfs4_pnfs_commit_ds(data, task->tk_status);
dd52128a 1385 if (task->tk_status < 0)
d67ae825
TH
1386 ff_layout_io_track_ds_error(data->lseg, data->ds_commit_index,
1387 data->args.offset, data->args.count,
dd52128a
TM
1388 data->res.op_status, OP_COMMIT,
1389 task->tk_status);
d67ae825
TH
1390 err = ff_layout_async_handle_error(task, NULL, data->ds_clp,
1391 data->lseg, data->ds_commit_index);
1392
1393 switch (err) {
1394 case -NFS4ERR_RESET_TO_PNFS:
1395 case -NFS4ERR_RESET_TO_MDS:
1396 inode = data->lseg->pls_layout->plh_inode;
1397 pnfs_error_mark_layout_for_return(inode, data->lseg);
1398 if (err == -NFS4ERR_RESET_TO_PNFS)
1399 pnfs_set_retry_layoutget(data->lseg->pls_layout);
1400 else
1401 pnfs_clear_retry_layoutget(data->lseg->pls_layout);
1402 pnfs_generic_prepare_to_resend_writes(data);
1403 return -EAGAIN;
1404 case -EAGAIN:
1405 rpc_restart_call_prepare(task);
1406 return -EAGAIN;
1407 }
1408
c0f5f505
TM
1409 if (data->verf.committed == NFS_UNSTABLE
1410 && ff_layout_need_layoutcommit(data->lseg))
67af7611 1411 pnfs_set_layoutcommit(data->inode, data->lseg, data->lwb);
d67ae825
TH
1412
1413 return 0;
1414}
1415
1416static int ff_layout_write_prepare_common(struct rpc_task *task,
1417 struct nfs_pgio_header *hdr)
1418{
0b7baf94 1419 nfs4_ff_layout_stat_io_start_write(hdr->inode,
abcb7bfc 1420 FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx),
e76d28dd
TM
1421 hdr->args.count,
1422 task->tk_start);
abcb7bfc 1423
d67ae825
TH
1424 if (unlikely(test_bit(NFS_CONTEXT_BAD, &hdr->args.context->flags))) {
1425 rpc_exit(task, -EIO);
1426 return -EIO;
1427 }
1428
1429 if (ff_layout_reset_to_mds(hdr->lseg, hdr->pgio_mirror_idx)) {
1430 bool retry_pnfs;
1431
1432 retry_pnfs = ff_layout_has_available_ds(hdr->lseg);
1433 dprintk("%s task %u reset io to %s\n", __func__,
1434 task->tk_pid, retry_pnfs ? "pNFS" : "MDS");
1435 ff_layout_reset_write(hdr, retry_pnfs);
1436 rpc_exit(task, 0);
1437 return -EAGAIN;
1438 }
1439
1440 return 0;
1441}
1442
1443static void ff_layout_write_prepare_v3(struct rpc_task *task, void *data)
1444{
1445 struct nfs_pgio_header *hdr = data;
1446
1447 if (ff_layout_write_prepare_common(task, hdr))
1448 return;
1449
1450 rpc_call_start(task);
1451}
1452
1453static void ff_layout_write_prepare_v4(struct rpc_task *task, void *data)
1454{
1455 struct nfs_pgio_header *hdr = data;
1456
d67ae825
TH
1457 if (ff_layout_setup_sequence(hdr->ds_clp,
1458 &hdr->args.seq_args,
1459 &hdr->res.seq_res,
1460 task))
1461 return;
1462
abcb7bfc
TM
1463 if (ff_layout_write_prepare_common(task, hdr))
1464 return;
1465
d67ae825
TH
1466 if (nfs4_set_rw_stateid(&hdr->args.stateid, hdr->args.context,
1467 hdr->args.lock_context, FMODE_WRITE) == -EIO)
1468 rpc_exit(task, -EIO); /* lost lock, terminate I/O */
1469}
1470
1471static void ff_layout_write_call_done(struct rpc_task *task, void *data)
1472{
1473 struct nfs_pgio_header *hdr = data;
1474
abcb7bfc
TM
1475 nfs4_ff_layout_stat_io_end_write(task,
1476 FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx),
1477 hdr->args.count, hdr->res.count,
1478 hdr->res.verf->committed);
1479
d67ae825
TH
1480 if (test_bit(NFS_IOHDR_REDO, &hdr->flags) &&
1481 task->tk_status == 0) {
1482 nfs4_sequence_done(task, &hdr->res.seq_res);
1483 return;
1484 }
1485
1486 /* Note this may cause RPC to be resent */
1487 hdr->mds_ops->rpc_call_done(task, hdr);
1488}
1489
1490static void ff_layout_write_count_stats(struct rpc_task *task, void *data)
1491{
1492 struct nfs_pgio_header *hdr = data;
1493
1494 rpc_count_iostats_metrics(task,
1495 &NFS_CLIENT(hdr->inode)->cl_metrics[NFSPROC4_CLNT_WRITE]);
1496}
1497
abcb7bfc
TM
1498static void ff_layout_commit_prepare_common(struct rpc_task *task,
1499 struct nfs_commit_data *cdata)
1500{
0b7baf94 1501 nfs4_ff_layout_stat_io_start_write(cdata->inode,
abcb7bfc 1502 FF_LAYOUT_COMP(cdata->lseg, cdata->ds_commit_index),
e76d28dd 1503 0, task->tk_start);
abcb7bfc
TM
1504}
1505
d67ae825
TH
1506static void ff_layout_commit_prepare_v3(struct rpc_task *task, void *data)
1507{
abcb7bfc 1508 ff_layout_commit_prepare_common(task, data);
d67ae825
TH
1509 rpc_call_start(task);
1510}
1511
1512static void ff_layout_commit_prepare_v4(struct rpc_task *task, void *data)
1513{
1514 struct nfs_commit_data *wdata = data;
1515
abcb7bfc 1516 if (ff_layout_setup_sequence(wdata->ds_clp,
d67ae825
TH
1517 &wdata->args.seq_args,
1518 &wdata->res.seq_res,
abcb7bfc
TM
1519 task))
1520 return;
1521 ff_layout_commit_prepare_common(task, data);
1522}
1523
1524static void ff_layout_commit_done(struct rpc_task *task, void *data)
1525{
1526 struct nfs_commit_data *cdata = data;
1527 struct nfs_page *req;
1528 __u64 count = 0;
1529
1530 if (task->tk_status == 0) {
1531 list_for_each_entry(req, &cdata->pages, wb_list)
1532 count += req->wb_bytes;
1533 }
1534
1535 nfs4_ff_layout_stat_io_end_write(task,
1536 FF_LAYOUT_COMP(cdata->lseg, cdata->ds_commit_index),
1537 count, count, NFS_FILE_SYNC);
1538
1539 pnfs_generic_write_commit_done(task, data);
d67ae825
TH
1540}
1541
1542static void ff_layout_commit_count_stats(struct rpc_task *task, void *data)
1543{
1544 struct nfs_commit_data *cdata = data;
1545
1546 rpc_count_iostats_metrics(task,
1547 &NFS_CLIENT(cdata->inode)->cl_metrics[NFSPROC4_CLNT_COMMIT]);
1548}
1549
1550static const struct rpc_call_ops ff_layout_read_call_ops_v3 = {
1551 .rpc_call_prepare = ff_layout_read_prepare_v3,
1552 .rpc_call_done = ff_layout_read_call_done,
1553 .rpc_count_stats = ff_layout_read_count_stats,
1554 .rpc_release = pnfs_generic_rw_release,
1555};
1556
1557static const struct rpc_call_ops ff_layout_read_call_ops_v4 = {
1558 .rpc_call_prepare = ff_layout_read_prepare_v4,
1559 .rpc_call_done = ff_layout_read_call_done,
1560 .rpc_count_stats = ff_layout_read_count_stats,
1561 .rpc_release = pnfs_generic_rw_release,
1562};
1563
1564static const struct rpc_call_ops ff_layout_write_call_ops_v3 = {
1565 .rpc_call_prepare = ff_layout_write_prepare_v3,
1566 .rpc_call_done = ff_layout_write_call_done,
1567 .rpc_count_stats = ff_layout_write_count_stats,
1568 .rpc_release = pnfs_generic_rw_release,
1569};
1570
1571static const struct rpc_call_ops ff_layout_write_call_ops_v4 = {
1572 .rpc_call_prepare = ff_layout_write_prepare_v4,
1573 .rpc_call_done = ff_layout_write_call_done,
1574 .rpc_count_stats = ff_layout_write_count_stats,
1575 .rpc_release = pnfs_generic_rw_release,
1576};
1577
1578static const struct rpc_call_ops ff_layout_commit_call_ops_v3 = {
1579 .rpc_call_prepare = ff_layout_commit_prepare_v3,
abcb7bfc 1580 .rpc_call_done = ff_layout_commit_done,
d67ae825
TH
1581 .rpc_count_stats = ff_layout_commit_count_stats,
1582 .rpc_release = pnfs_generic_commit_release,
1583};
1584
1585static const struct rpc_call_ops ff_layout_commit_call_ops_v4 = {
1586 .rpc_call_prepare = ff_layout_commit_prepare_v4,
abcb7bfc 1587 .rpc_call_done = ff_layout_commit_done,
d67ae825
TH
1588 .rpc_count_stats = ff_layout_commit_count_stats,
1589 .rpc_release = pnfs_generic_commit_release,
1590};
1591
1592static enum pnfs_try_status
1593ff_layout_read_pagelist(struct nfs_pgio_header *hdr)
1594{
1595 struct pnfs_layout_segment *lseg = hdr->lseg;
1596 struct nfs4_pnfs_ds *ds;
1597 struct rpc_clnt *ds_clnt;
1598 struct rpc_cred *ds_cred;
1599 loff_t offset = hdr->args.offset;
1600 u32 idx = hdr->pgio_mirror_idx;
1601 int vers;
1602 struct nfs_fh *fh;
1603
1604 dprintk("--> %s ino %lu pgbase %u req %Zu@%llu\n",
1605 __func__, hdr->inode->i_ino,
1606 hdr->args.pgbase, (size_t)hdr->args.count, offset);
1607
1608 ds = nfs4_ff_layout_prepare_ds(lseg, idx, false);
1609 if (!ds)
1610 goto out_failed;
1611
1612 ds_clnt = nfs4_ff_find_or_create_ds_client(lseg, idx, ds->ds_clp,
1613 hdr->inode);
1614 if (IS_ERR(ds_clnt))
1615 goto out_failed;
1616
1617 ds_cred = ff_layout_get_ds_cred(lseg, idx, hdr->cred);
1618 if (IS_ERR(ds_cred))
1619 goto out_failed;
1620
1621 vers = nfs4_ff_layout_ds_version(lseg, idx);
1622
1623 dprintk("%s USE DS: %s cl_count %d vers %d\n", __func__,
1624 ds->ds_remotestr, atomic_read(&ds->ds_clp->cl_count), vers);
1625
1626 atomic_inc(&ds->ds_clp->cl_count);
1627 hdr->ds_clp = ds->ds_clp;
1628 fh = nfs4_ff_layout_select_ds_fh(lseg, idx);
1629 if (fh)
1630 hdr->args.fh = fh;
d67ae825
TH
1631 /*
1632 * Note that if we ever decide to split across DSes,
1633 * then we may need to handle dense-like offsets.
1634 */
1635 hdr->args.offset = offset;
1636 hdr->mds_offset = offset;
1637
1638 /* Perform an asynchronous read to ds */
1639 nfs_initiate_pgio(ds_clnt, hdr, ds_cred, ds->ds_clp->rpc_ops,
1640 vers == 3 ? &ff_layout_read_call_ops_v3 :
1641 &ff_layout_read_call_ops_v4,
1642 0, RPC_TASK_SOFTCONN);
1643
1644 return PNFS_ATTEMPTED;
1645
1646out_failed:
1647 if (ff_layout_has_available_ds(lseg))
1648 return PNFS_TRY_AGAIN;
1649 return PNFS_NOT_ATTEMPTED;
1650}
1651
1652/* Perform async writes. */
1653static enum pnfs_try_status
1654ff_layout_write_pagelist(struct nfs_pgio_header *hdr, int sync)
1655{
1656 struct pnfs_layout_segment *lseg = hdr->lseg;
1657 struct nfs4_pnfs_ds *ds;
1658 struct rpc_clnt *ds_clnt;
1659 struct rpc_cred *ds_cred;
1660 loff_t offset = hdr->args.offset;
1661 int vers;
1662 struct nfs_fh *fh;
1663 int idx = hdr->pgio_mirror_idx;
1664
1665 ds = nfs4_ff_layout_prepare_ds(lseg, idx, true);
1666 if (!ds)
1667 return PNFS_NOT_ATTEMPTED;
1668
1669 ds_clnt = nfs4_ff_find_or_create_ds_client(lseg, idx, ds->ds_clp,
1670 hdr->inode);
1671 if (IS_ERR(ds_clnt))
1672 return PNFS_NOT_ATTEMPTED;
1673
1674 ds_cred = ff_layout_get_ds_cred(lseg, idx, hdr->cred);
1675 if (IS_ERR(ds_cred))
1676 return PNFS_NOT_ATTEMPTED;
1677
1678 vers = nfs4_ff_layout_ds_version(lseg, idx);
1679
1680 dprintk("%s ino %lu sync %d req %Zu@%llu DS: %s cl_count %d vers %d\n",
1681 __func__, hdr->inode->i_ino, sync, (size_t) hdr->args.count,
1682 offset, ds->ds_remotestr, atomic_read(&ds->ds_clp->cl_count),
1683 vers);
1684
1685 hdr->pgio_done_cb = ff_layout_write_done_cb;
1686 atomic_inc(&ds->ds_clp->cl_count);
1687 hdr->ds_clp = ds->ds_clp;
1688 hdr->ds_commit_idx = idx;
1689 fh = nfs4_ff_layout_select_ds_fh(lseg, idx);
1690 if (fh)
1691 hdr->args.fh = fh;
1692
1693 /*
1694 * Note that if we ever decide to split across DSes,
1695 * then we may need to handle dense-like offsets.
1696 */
1697 hdr->args.offset = offset;
1698
1699 /* Perform an asynchronous write */
1700 nfs_initiate_pgio(ds_clnt, hdr, ds_cred, ds->ds_clp->rpc_ops,
1701 vers == 3 ? &ff_layout_write_call_ops_v3 :
1702 &ff_layout_write_call_ops_v4,
1703 sync, RPC_TASK_SOFTCONN);
1704 return PNFS_ATTEMPTED;
1705}
1706
d67ae825
TH
1707static u32 calc_ds_index_from_commit(struct pnfs_layout_segment *lseg, u32 i)
1708{
1709 return i;
1710}
1711
1712static struct nfs_fh *
1713select_ds_fh_from_commit(struct pnfs_layout_segment *lseg, u32 i)
1714{
1715 struct nfs4_ff_layout_segment *flseg = FF_LAYOUT_LSEG(lseg);
1716
1717 /* FIXME: Assume that there is only one NFS version available
1718 * for the DS.
1719 */
1720 return &flseg->mirror_array[i]->fh_versions[0];
1721}
1722
1723static int ff_layout_initiate_commit(struct nfs_commit_data *data, int how)
1724{
1725 struct pnfs_layout_segment *lseg = data->lseg;
1726 struct nfs4_pnfs_ds *ds;
1727 struct rpc_clnt *ds_clnt;
1728 struct rpc_cred *ds_cred;
1729 u32 idx;
1730 int vers;
1731 struct nfs_fh *fh;
1732
1733 idx = calc_ds_index_from_commit(lseg, data->ds_commit_index);
1734 ds = nfs4_ff_layout_prepare_ds(lseg, idx, true);
1735 if (!ds)
1736 goto out_err;
1737
1738 ds_clnt = nfs4_ff_find_or_create_ds_client(lseg, idx, ds->ds_clp,
1739 data->inode);
1740 if (IS_ERR(ds_clnt))
1741 goto out_err;
1742
1743 ds_cred = ff_layout_get_ds_cred(lseg, idx, data->cred);
1744 if (IS_ERR(ds_cred))
1745 goto out_err;
1746
1747 vers = nfs4_ff_layout_ds_version(lseg, idx);
1748
1749 dprintk("%s ino %lu, how %d cl_count %d vers %d\n", __func__,
1750 data->inode->i_ino, how, atomic_read(&ds->ds_clp->cl_count),
1751 vers);
1752 data->commit_done_cb = ff_layout_commit_done_cb;
1753 data->cred = ds_cred;
1754 atomic_inc(&ds->ds_clp->cl_count);
1755 data->ds_clp = ds->ds_clp;
1756 fh = select_ds_fh_from_commit(lseg, data->ds_commit_index);
1757 if (fh)
1758 data->args.fh = fh;
abcb7bfc 1759
d67ae825
TH
1760 return nfs_initiate_commit(ds_clnt, data, ds->ds_clp->rpc_ops,
1761 vers == 3 ? &ff_layout_commit_call_ops_v3 :
1762 &ff_layout_commit_call_ops_v4,
1763 how, RPC_TASK_SOFTCONN);
1764out_err:
1765 pnfs_generic_prepare_to_resend_writes(data);
1766 pnfs_generic_commit_release(data);
1767 return -EAGAIN;
1768}
1769
1770static int
1771ff_layout_commit_pagelist(struct inode *inode, struct list_head *mds_pages,
1772 int how, struct nfs_commit_info *cinfo)
1773{
1774 return pnfs_generic_commit_pagelist(inode, mds_pages, how, cinfo,
1775 ff_layout_initiate_commit);
1776}
1777
1778static struct pnfs_ds_commit_info *
1779ff_layout_get_ds_info(struct inode *inode)
1780{
1781 struct pnfs_layout_hdr *layout = NFS_I(inode)->layout;
1782
1783 if (layout == NULL)
1784 return NULL;
1785
1786 return &FF_LAYOUT_FROM_HDR(layout)->commit_info;
1787}
1788
1789static void
fc87701b 1790ff_layout_free_deviceid_node(struct nfs4_deviceid_node *d)
d67ae825
TH
1791{
1792 nfs4_ff_layout_free_deviceid(container_of(d, struct nfs4_ff_layout_ds,
1793 id_node));
1794}
1795
1796static int ff_layout_encode_ioerr(struct nfs4_flexfile_layout *flo,
1797 struct xdr_stream *xdr,
1798 const struct nfs4_layoutreturn_args *args)
1799{
1800 struct pnfs_layout_hdr *hdr = &flo->generic_hdr;
1801 __be32 *start;
1802 int count = 0, ret = 0;
1803
1804 start = xdr_reserve_space(xdr, 4);
1805 if (unlikely(!start))
1806 return -E2BIG;
1807
1808 /* This assume we always return _ALL_ layouts */
1809 spin_lock(&hdr->plh_inode->i_lock);
1810 ret = ff_layout_encode_ds_ioerr(flo, xdr, &count, &args->range);
1811 spin_unlock(&hdr->plh_inode->i_lock);
1812
1813 *start = cpu_to_be32(count);
1814
1815 return ret;
1816}
1817
1818/* report nothing for now */
1819static void ff_layout_encode_iostats(struct nfs4_flexfile_layout *flo,
1820 struct xdr_stream *xdr,
1821 const struct nfs4_layoutreturn_args *args)
1822{
1823 __be32 *p;
1824
1825 p = xdr_reserve_space(xdr, 4);
1826 if (likely(p))
1827 *p = cpu_to_be32(0);
1828}
1829
1830static struct nfs4_deviceid_node *
1831ff_layout_alloc_deviceid_node(struct nfs_server *server,
1832 struct pnfs_device *pdev, gfp_t gfp_flags)
1833{
1834 struct nfs4_ff_layout_ds *dsaddr;
1835
1836 dsaddr = nfs4_ff_alloc_deviceid_node(server, pdev, gfp_flags);
1837 if (!dsaddr)
1838 return NULL;
1839 return &dsaddr->id_node;
1840}
1841
1842static void
1843ff_layout_encode_layoutreturn(struct pnfs_layout_hdr *lo,
1844 struct xdr_stream *xdr,
1845 const struct nfs4_layoutreturn_args *args)
1846{
1847 struct nfs4_flexfile_layout *flo = FF_LAYOUT_FROM_HDR(lo);
1848 __be32 *start;
1849
1850 dprintk("%s: Begin\n", __func__);
1851 start = xdr_reserve_space(xdr, 4);
1852 BUG_ON(!start);
1853
1854 if (ff_layout_encode_ioerr(flo, xdr, args))
1855 goto out;
1856
1857 ff_layout_encode_iostats(flo, xdr, args);
1858out:
1859 *start = cpu_to_be32((xdr->p - start - 1) * 4);
1860 dprintk("%s: Return\n", __func__);
1861}
1862
27c43064
PT
1863static int
1864ff_layout_ntop4(const struct sockaddr *sap, char *buf, const size_t buflen)
1865{
1866 const struct sockaddr_in *sin = (struct sockaddr_in *)sap;
1867
1868 return snprintf(buf, buflen, "%pI4", &sin->sin_addr);
1869}
1870
1871static size_t
1872ff_layout_ntop6_noscopeid(const struct sockaddr *sap, char *buf,
1873 const int buflen)
1874{
1875 const struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)sap;
1876 const struct in6_addr *addr = &sin6->sin6_addr;
1877
1878 /*
1879 * RFC 4291, Section 2.2.2
1880 *
1881 * Shorthanded ANY address
1882 */
1883 if (ipv6_addr_any(addr))
1884 return snprintf(buf, buflen, "::");
1885
1886 /*
1887 * RFC 4291, Section 2.2.2
1888 *
1889 * Shorthanded loopback address
1890 */
1891 if (ipv6_addr_loopback(addr))
1892 return snprintf(buf, buflen, "::1");
1893
1894 /*
1895 * RFC 4291, Section 2.2.3
1896 *
1897 * Special presentation address format for mapped v4
1898 * addresses.
1899 */
1900 if (ipv6_addr_v4mapped(addr))
1901 return snprintf(buf, buflen, "::ffff:%pI4",
1902 &addr->s6_addr32[3]);
1903
1904 /*
1905 * RFC 4291, Section 2.2.1
1906 */
1907 return snprintf(buf, buflen, "%pI6c", addr);
1908}
1909
1910/* Derived from rpc_sockaddr2uaddr */
1911static void
1912ff_layout_encode_netaddr(struct xdr_stream *xdr, struct nfs4_pnfs_ds_addr *da)
1913{
1914 struct sockaddr *sap = (struct sockaddr *)&da->da_addr;
1915 char portbuf[RPCBIND_MAXUADDRPLEN];
1916 char addrbuf[RPCBIND_MAXUADDRLEN];
1917 char *netid;
1918 unsigned short port;
1919 int len, netid_len;
1920 __be32 *p;
1921
1922 switch (sap->sa_family) {
1923 case AF_INET:
1924 if (ff_layout_ntop4(sap, addrbuf, sizeof(addrbuf)) == 0)
1925 return;
1926 port = ntohs(((struct sockaddr_in *)sap)->sin_port);
1927 netid = "tcp";
1928 netid_len = 3;
1929 break;
1930 case AF_INET6:
1931 if (ff_layout_ntop6_noscopeid(sap, addrbuf, sizeof(addrbuf)) == 0)
1932 return;
1933 port = ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
1934 netid = "tcp6";
1935 netid_len = 4;
1936 break;
1937 default:
1938 /* we only support tcp and tcp6 */
1939 WARN_ON_ONCE(1);
1940 return;
1941 }
1942
1943 snprintf(portbuf, sizeof(portbuf), ".%u.%u", port >> 8, port & 0xff);
1944 len = strlcat(addrbuf, portbuf, sizeof(addrbuf));
1945
1946 p = xdr_reserve_space(xdr, 4 + netid_len);
1947 xdr_encode_opaque(p, netid, netid_len);
1948
1949 p = xdr_reserve_space(xdr, 4 + len);
1950 xdr_encode_opaque(p, addrbuf, len);
1951}
1952
1953static void
1954ff_layout_encode_nfstime(struct xdr_stream *xdr,
1955 ktime_t t)
1956{
1957 struct timespec64 ts;
1958 __be32 *p;
1959
1960 p = xdr_reserve_space(xdr, 12);
1961 ts = ktime_to_timespec64(t);
1962 p = xdr_encode_hyper(p, ts.tv_sec);
1963 *p++ = cpu_to_be32(ts.tv_nsec);
1964}
1965
1966static void
1967ff_layout_encode_io_latency(struct xdr_stream *xdr,
1968 struct nfs4_ff_io_stat *stat)
1969{
1970 __be32 *p;
1971
1972 p = xdr_reserve_space(xdr, 5 * 8);
1973 p = xdr_encode_hyper(p, stat->ops_requested);
1974 p = xdr_encode_hyper(p, stat->bytes_requested);
1975 p = xdr_encode_hyper(p, stat->ops_completed);
1976 p = xdr_encode_hyper(p, stat->bytes_completed);
1977 p = xdr_encode_hyper(p, stat->bytes_not_delivered);
1978 ff_layout_encode_nfstime(xdr, stat->total_busy_time);
1979 ff_layout_encode_nfstime(xdr, stat->aggregate_completion_time);
1980}
1981
1982static void
1983ff_layout_encode_layoutstats(struct xdr_stream *xdr,
1984 struct nfs42_layoutstat_args *args,
1985 struct nfs42_layoutstat_devinfo *devinfo)
1986{
1987 struct nfs4_ff_layout_mirror *mirror = devinfo->layout_private;
1988 struct nfs4_pnfs_ds_addr *da;
1989 struct nfs4_pnfs_ds *ds = mirror->mirror_ds->ds;
1990 struct nfs_fh *fh = &mirror->fh_versions[0];
1991 __be32 *p, *start;
1992
1993 da = list_first_entry(&ds->ds_addrs, struct nfs4_pnfs_ds_addr, da_node);
1994 dprintk("%s: DS %s: encoding address %s\n",
1995 __func__, ds->ds_remotestr, da->da_remotestr);
1996 /* layoutupdate length */
1997 start = xdr_reserve_space(xdr, 4);
1998 /* netaddr4 */
1999 ff_layout_encode_netaddr(xdr, da);
2000 /* nfs_fh4 */
2001 p = xdr_reserve_space(xdr, 4 + fh->size);
2002 xdr_encode_opaque(p, fh->data, fh->size);
2003 /* ff_io_latency4 read */
2004 spin_lock(&mirror->lock);
2005 ff_layout_encode_io_latency(xdr, &mirror->read_stat.io_stat);
2006 /* ff_io_latency4 write */
2007 ff_layout_encode_io_latency(xdr, &mirror->write_stat.io_stat);
2008 spin_unlock(&mirror->lock);
2009 /* nfstime4 */
2010 ff_layout_encode_nfstime(xdr, ktime_sub(ktime_get(), mirror->start_time));
2011 /* bool */
2012 p = xdr_reserve_space(xdr, 4);
2013 *p = cpu_to_be32(false);
2014
2015 *start = cpu_to_be32((xdr->p - start - 1) * 4);
2016}
2017
266d12d4 2018static int
ad4dc53e 2019ff_layout_mirror_prepare_stats(struct nfs42_layoutstat_args *args,
266d12d4
TM
2020 struct pnfs_layout_hdr *lo,
2021 int dev_limit)
ad4dc53e 2022{
266d12d4 2023 struct nfs4_flexfile_layout *ff_layout = FF_LAYOUT_FROM_HDR(lo);
ad4dc53e
PT
2024 struct nfs4_ff_layout_mirror *mirror;
2025 struct nfs4_deviceid_node *dev;
2026 struct nfs42_layoutstat_devinfo *devinfo;
266d12d4 2027 int i = 0;
ad4dc53e 2028
266d12d4
TM
2029 list_for_each_entry(mirror, &ff_layout->mirrors, mirrors) {
2030 if (i >= dev_limit)
ad4dc53e 2031 break;
266d12d4 2032 if (!mirror->mirror_ds)
27c43064 2033 continue;
266d12d4
TM
2034 /* mirror refcount put in cleanup_layoutstats */
2035 if (!atomic_inc_not_zero(&mirror->ref))
2036 continue;
2037 dev = &mirror->mirror_ds->id_node;
2038 devinfo = &args->devinfo[i];
ad4dc53e 2039 memcpy(&devinfo->dev_id, &dev->deviceid, NFS4_DEVICEID4_SIZE);
266d12d4
TM
2040 devinfo->offset = 0;
2041 devinfo->length = NFS4_MAX_UINT64;
d099d7b8 2042 devinfo->read_count = mirror->read_stat.io_stat.ops_completed;
ad4dc53e 2043 devinfo->read_bytes = mirror->read_stat.io_stat.bytes_completed;
d099d7b8 2044 devinfo->write_count = mirror->write_stat.io_stat.ops_completed;
ad4dc53e
PT
2045 devinfo->write_bytes = mirror->write_stat.io_stat.bytes_completed;
2046 devinfo->layout_type = LAYOUT_FLEX_FILES;
27c43064
PT
2047 devinfo->layoutstats_encode = ff_layout_encode_layoutstats;
2048 devinfo->layout_private = mirror;
ad4dc53e 2049
266d12d4 2050 i++;
ad4dc53e 2051 }
266d12d4 2052 return i;
ad4dc53e
PT
2053}
2054
2055static int
2056ff_layout_prepare_layoutstats(struct nfs42_layoutstat_args *args)
2057{
266d12d4
TM
2058 struct nfs4_flexfile_layout *ff_layout;
2059 struct nfs4_ff_layout_mirror *mirror;
ad4dc53e
PT
2060 int dev_count = 0;
2061
2062 spin_lock(&args->inode->i_lock);
266d12d4
TM
2063 ff_layout = FF_LAYOUT_FROM_HDR(NFS_I(args->inode)->layout);
2064 list_for_each_entry(mirror, &ff_layout->mirrors, mirrors) {
2065 if (atomic_read(&mirror->ref) != 0)
2066 dev_count ++;
ad4dc53e
PT
2067 }
2068 spin_unlock(&args->inode->i_lock);
2069 /* For now, send at most PNFS_LAYOUTSTATS_MAXDEV statistics */
2070 if (dev_count > PNFS_LAYOUTSTATS_MAXDEV) {
2071 dprintk("%s: truncating devinfo to limit (%d:%d)\n",
2072 __func__, dev_count, PNFS_LAYOUTSTATS_MAXDEV);
2073 dev_count = PNFS_LAYOUTSTATS_MAXDEV;
2074 }
266d12d4 2075 args->devinfo = kmalloc_array(dev_count, sizeof(*args->devinfo), GFP_NOIO);
ad4dc53e
PT
2076 if (!args->devinfo)
2077 return -ENOMEM;
2078
ad4dc53e 2079 spin_lock(&args->inode->i_lock);
266d12d4
TM
2080 args->num_dev = ff_layout_mirror_prepare_stats(args,
2081 &ff_layout->generic_hdr, dev_count);
ad4dc53e 2082 spin_unlock(&args->inode->i_lock);
ad4dc53e
PT
2083
2084 return 0;
2085}
2086
27c43064
PT
2087static void
2088ff_layout_cleanup_layoutstats(struct nfs42_layoutstat_data *data)
2089{
2090 struct nfs4_ff_layout_mirror *mirror;
2091 int i;
2092
2093 for (i = 0; i < data->args.num_dev; i++) {
2094 mirror = data->args.devinfo[i].layout_private;
2095 data->args.devinfo[i].layout_private = NULL;
0b7baf94 2096 ff_layout_put_mirror(mirror);
27c43064
PT
2097 }
2098}
2099
d67ae825
TH
2100static struct pnfs_layoutdriver_type flexfilelayout_type = {
2101 .id = LAYOUT_FLEX_FILES,
2102 .name = "LAYOUT_FLEX_FILES",
2103 .owner = THIS_MODULE,
2104 .alloc_layout_hdr = ff_layout_alloc_layout_hdr,
2105 .free_layout_hdr = ff_layout_free_layout_hdr,
2106 .alloc_lseg = ff_layout_alloc_lseg,
2107 .free_lseg = ff_layout_free_lseg,
0762ed2c 2108 .add_lseg = ff_layout_add_lseg,
d67ae825
TH
2109 .pg_read_ops = &ff_layout_pg_read_ops,
2110 .pg_write_ops = &ff_layout_pg_write_ops,
2111 .get_ds_info = ff_layout_get_ds_info,
fc87701b 2112 .free_deviceid_node = ff_layout_free_deviceid_node,
338d00cf 2113 .mark_request_commit = pnfs_layout_mark_request_commit,
d67ae825
TH
2114 .clear_request_commit = pnfs_generic_clear_request_commit,
2115 .scan_commit_lists = pnfs_generic_scan_commit_lists,
2116 .recover_commit_reqs = pnfs_generic_recover_commit_reqs,
2117 .commit_pagelist = ff_layout_commit_pagelist,
2118 .read_pagelist = ff_layout_read_pagelist,
2119 .write_pagelist = ff_layout_write_pagelist,
2120 .alloc_deviceid_node = ff_layout_alloc_deviceid_node,
2121 .encode_layoutreturn = ff_layout_encode_layoutreturn,
5bb89b47 2122 .sync = pnfs_nfs_generic_sync,
ad4dc53e 2123 .prepare_layoutstats = ff_layout_prepare_layoutstats,
27c43064 2124 .cleanup_layoutstats = ff_layout_cleanup_layoutstats,
d67ae825
TH
2125};
2126
2127static int __init nfs4flexfilelayout_init(void)
2128{
2129 printk(KERN_INFO "%s: NFSv4 Flexfile Layout Driver Registering...\n",
2130 __func__);
2131 return pnfs_register_layoutdriver(&flexfilelayout_type);
2132}
2133
2134static void __exit nfs4flexfilelayout_exit(void)
2135{
2136 printk(KERN_INFO "%s: NFSv4 Flexfile Layout Driver Unregistering...\n",
2137 __func__);
2138 pnfs_unregister_layoutdriver(&flexfilelayout_type);
2139}
2140
2141MODULE_ALIAS("nfs-layouttype4-4");
2142
2143MODULE_LICENSE("GPL");
2144MODULE_DESCRIPTION("The NFSv4 flexfile layout driver");
2145
2146module_init(nfs4flexfilelayout_init);
2147module_exit(nfs4flexfilelayout_exit);