]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - fs/nfs/flexfilelayout/flexfilelayoutdev.c
Merge remote-tracking branches 'spi/topic/devprop', 'spi/topic/fsl', 'spi/topic/fsl...
[mirror_ubuntu-bionic-kernel.git] / fs / nfs / flexfilelayout / flexfilelayoutdev.c
1 /*
2 * Device operations for the pnfs nfs4 file layout driver.
3 *
4 * Copyright (c) 2014, Primary Data, Inc. All rights reserved.
5 *
6 * Tao Peng <bergwolf@primarydata.com>
7 */
8
9 #include <linux/nfs_fs.h>
10 #include <linux/vmalloc.h>
11 #include <linux/module.h>
12 #include <linux/sunrpc/addr.h>
13
14 #include "../internal.h"
15 #include "../nfs4session.h"
16 #include "flexfilelayout.h"
17
18 #define NFSDBG_FACILITY NFSDBG_PNFS_LD
19
20 static unsigned int dataserver_timeo = NFS_DEF_TCP_RETRANS;
21 static unsigned int dataserver_retrans;
22
23 static bool ff_layout_has_available_ds(struct pnfs_layout_segment *lseg);
24
25 void nfs4_ff_layout_put_deviceid(struct nfs4_ff_layout_ds *mirror_ds)
26 {
27 if (!IS_ERR_OR_NULL(mirror_ds))
28 nfs4_put_deviceid_node(&mirror_ds->id_node);
29 }
30
31 void nfs4_ff_layout_free_deviceid(struct nfs4_ff_layout_ds *mirror_ds)
32 {
33 nfs4_print_deviceid(&mirror_ds->id_node.deviceid);
34 nfs4_pnfs_ds_put(mirror_ds->ds);
35 kfree_rcu(mirror_ds, id_node.rcu);
36 }
37
38 /* Decode opaque device data and construct new_ds using it */
39 struct nfs4_ff_layout_ds *
40 nfs4_ff_alloc_deviceid_node(struct nfs_server *server, struct pnfs_device *pdev,
41 gfp_t gfp_flags)
42 {
43 struct xdr_stream stream;
44 struct xdr_buf buf;
45 struct page *scratch;
46 struct list_head dsaddrs;
47 struct nfs4_pnfs_ds_addr *da;
48 struct nfs4_ff_layout_ds *new_ds = NULL;
49 struct nfs4_ff_ds_version *ds_versions = NULL;
50 u32 mp_count;
51 u32 version_count;
52 __be32 *p;
53 int i, ret = -ENOMEM;
54
55 /* set up xdr stream */
56 scratch = alloc_page(gfp_flags);
57 if (!scratch)
58 goto out_err;
59
60 new_ds = kzalloc(sizeof(struct nfs4_ff_layout_ds), gfp_flags);
61 if (!new_ds)
62 goto out_scratch;
63
64 nfs4_init_deviceid_node(&new_ds->id_node,
65 server,
66 &pdev->dev_id);
67 INIT_LIST_HEAD(&dsaddrs);
68
69 xdr_init_decode_pages(&stream, &buf, pdev->pages, pdev->pglen);
70 xdr_set_scratch_buffer(&stream, page_address(scratch), PAGE_SIZE);
71
72 /* multipath count */
73 p = xdr_inline_decode(&stream, 4);
74 if (unlikely(!p))
75 goto out_err_drain_dsaddrs;
76 mp_count = be32_to_cpup(p);
77 dprintk("%s: multipath ds count %d\n", __func__, mp_count);
78
79 for (i = 0; i < mp_count; i++) {
80 /* multipath ds */
81 da = nfs4_decode_mp_ds_addr(server->nfs_client->cl_net,
82 &stream, gfp_flags);
83 if (da)
84 list_add_tail(&da->da_node, &dsaddrs);
85 }
86 if (list_empty(&dsaddrs)) {
87 dprintk("%s: no suitable DS addresses found\n",
88 __func__);
89 ret = -ENOMEDIUM;
90 goto out_err_drain_dsaddrs;
91 }
92
93 /* version count */
94 p = xdr_inline_decode(&stream, 4);
95 if (unlikely(!p))
96 goto out_err_drain_dsaddrs;
97 version_count = be32_to_cpup(p);
98 dprintk("%s: version count %d\n", __func__, version_count);
99
100 ds_versions = kzalloc(version_count * sizeof(struct nfs4_ff_ds_version),
101 gfp_flags);
102 if (!ds_versions)
103 goto out_scratch;
104
105 for (i = 0; i < version_count; i++) {
106 /* 20 = version(4) + minor_version(4) + rsize(4) + wsize(4) +
107 * tightly_coupled(4) */
108 p = xdr_inline_decode(&stream, 20);
109 if (unlikely(!p))
110 goto out_err_drain_dsaddrs;
111 ds_versions[i].version = be32_to_cpup(p++);
112 ds_versions[i].minor_version = be32_to_cpup(p++);
113 ds_versions[i].rsize = nfs_block_size(be32_to_cpup(p++), NULL);
114 ds_versions[i].wsize = nfs_block_size(be32_to_cpup(p++), NULL);
115 ds_versions[i].tightly_coupled = be32_to_cpup(p);
116
117 if (ds_versions[i].rsize > NFS_MAX_FILE_IO_SIZE)
118 ds_versions[i].rsize = NFS_MAX_FILE_IO_SIZE;
119 if (ds_versions[i].wsize > NFS_MAX_FILE_IO_SIZE)
120 ds_versions[i].wsize = NFS_MAX_FILE_IO_SIZE;
121
122 if (ds_versions[i].version != 3 || ds_versions[i].minor_version != 0) {
123 dprintk("%s: [%d] unsupported ds version %d-%d\n", __func__,
124 i, ds_versions[i].version,
125 ds_versions[i].minor_version);
126 ret = -EPROTONOSUPPORT;
127 goto out_err_drain_dsaddrs;
128 }
129
130 dprintk("%s: [%d] vers %u minor_ver %u rsize %u wsize %u coupled %d\n",
131 __func__, i, ds_versions[i].version,
132 ds_versions[i].minor_version,
133 ds_versions[i].rsize,
134 ds_versions[i].wsize,
135 ds_versions[i].tightly_coupled);
136 }
137
138 new_ds->ds_versions = ds_versions;
139 new_ds->ds_versions_cnt = version_count;
140
141 new_ds->ds = nfs4_pnfs_ds_add(&dsaddrs, gfp_flags);
142 if (!new_ds->ds)
143 goto out_err_drain_dsaddrs;
144
145 /* If DS was already in cache, free ds addrs */
146 while (!list_empty(&dsaddrs)) {
147 da = list_first_entry(&dsaddrs,
148 struct nfs4_pnfs_ds_addr,
149 da_node);
150 list_del_init(&da->da_node);
151 kfree(da->da_remotestr);
152 kfree(da);
153 }
154
155 __free_page(scratch);
156 return new_ds;
157
158 out_err_drain_dsaddrs:
159 while (!list_empty(&dsaddrs)) {
160 da = list_first_entry(&dsaddrs, struct nfs4_pnfs_ds_addr,
161 da_node);
162 list_del_init(&da->da_node);
163 kfree(da->da_remotestr);
164 kfree(da);
165 }
166
167 kfree(ds_versions);
168 out_scratch:
169 __free_page(scratch);
170 out_err:
171 kfree(new_ds);
172
173 dprintk("%s ERROR: returning %d\n", __func__, ret);
174 return NULL;
175 }
176
177 static void ff_layout_mark_devid_invalid(struct pnfs_layout_segment *lseg,
178 struct nfs4_deviceid_node *devid)
179 {
180 nfs4_delete_deviceid(devid->ld, devid->nfs_client, &devid->deviceid);
181 if (!ff_layout_has_available_ds(lseg))
182 pnfs_error_mark_layout_for_return(lseg->pls_layout->plh_inode,
183 lseg);
184 }
185
186 static bool ff_layout_mirror_valid(struct pnfs_layout_segment *lseg,
187 struct nfs4_ff_layout_mirror *mirror,
188 bool create)
189 {
190 if (mirror == NULL || IS_ERR(mirror->mirror_ds))
191 goto outerr;
192 if (mirror->mirror_ds == NULL) {
193 if (create) {
194 struct nfs4_deviceid_node *node;
195 struct pnfs_layout_hdr *lh = lseg->pls_layout;
196 struct nfs4_ff_layout_ds *mirror_ds = ERR_PTR(-ENODEV);
197
198 node = nfs4_find_get_deviceid(NFS_SERVER(lh->plh_inode),
199 &mirror->devid, lh->plh_lc_cred,
200 GFP_KERNEL);
201 if (node)
202 mirror_ds = FF_LAYOUT_MIRROR_DS(node);
203
204 /* check for race with another call to this function */
205 if (cmpxchg(&mirror->mirror_ds, NULL, mirror_ds) &&
206 mirror_ds != ERR_PTR(-ENODEV))
207 nfs4_put_deviceid_node(node);
208 } else
209 goto outerr;
210 }
211
212 if (IS_ERR(mirror->mirror_ds))
213 goto outerr;
214
215 if (mirror->mirror_ds->ds == NULL) {
216 struct nfs4_deviceid_node *devid;
217 devid = &mirror->mirror_ds->id_node;
218 ff_layout_mark_devid_invalid(lseg, devid);
219 return false;
220 }
221 return true;
222 outerr:
223 pnfs_error_mark_layout_for_return(lseg->pls_layout->plh_inode, lseg);
224 return false;
225 }
226
227 static void extend_ds_error(struct nfs4_ff_layout_ds_err *err,
228 u64 offset, u64 length)
229 {
230 u64 end;
231
232 end = max_t(u64, pnfs_end_offset(err->offset, err->length),
233 pnfs_end_offset(offset, length));
234 err->offset = min_t(u64, err->offset, offset);
235 err->length = end - err->offset;
236 }
237
238 static int
239 ff_ds_error_match(const struct nfs4_ff_layout_ds_err *e1,
240 const struct nfs4_ff_layout_ds_err *e2)
241 {
242 int ret;
243
244 if (e1->opnum != e2->opnum)
245 return e1->opnum < e2->opnum ? -1 : 1;
246 if (e1->status != e2->status)
247 return e1->status < e2->status ? -1 : 1;
248 ret = memcmp(e1->stateid.data, e2->stateid.data,
249 sizeof(e1->stateid.data));
250 if (ret != 0)
251 return ret;
252 ret = memcmp(&e1->deviceid, &e2->deviceid, sizeof(e1->deviceid));
253 if (ret != 0)
254 return ret;
255 if (pnfs_end_offset(e1->offset, e1->length) < e2->offset)
256 return -1;
257 if (e1->offset > pnfs_end_offset(e2->offset, e2->length))
258 return 1;
259 /* If ranges overlap or are contiguous, they are the same */
260 return 0;
261 }
262
263 static void
264 ff_layout_add_ds_error_locked(struct nfs4_flexfile_layout *flo,
265 struct nfs4_ff_layout_ds_err *dserr)
266 {
267 struct nfs4_ff_layout_ds_err *err, *tmp;
268 struct list_head *head = &flo->error_list;
269 int match;
270
271 /* Do insertion sort w/ merges */
272 list_for_each_entry_safe(err, tmp, &flo->error_list, list) {
273 match = ff_ds_error_match(err, dserr);
274 if (match < 0)
275 continue;
276 if (match > 0) {
277 /* Add entry "dserr" _before_ entry "err" */
278 head = &err->list;
279 break;
280 }
281 /* Entries match, so merge "err" into "dserr" */
282 extend_ds_error(dserr, err->offset, err->length);
283 list_replace(&err->list, &dserr->list);
284 kfree(err);
285 return;
286 }
287
288 list_add_tail(&dserr->list, head);
289 }
290
291 int ff_layout_track_ds_error(struct nfs4_flexfile_layout *flo,
292 struct nfs4_ff_layout_mirror *mirror, u64 offset,
293 u64 length, int status, enum nfs_opnum4 opnum,
294 gfp_t gfp_flags)
295 {
296 struct nfs4_ff_layout_ds_err *dserr;
297
298 if (status == 0)
299 return 0;
300
301 if (mirror->mirror_ds == NULL)
302 return -EINVAL;
303
304 dserr = kmalloc(sizeof(*dserr), gfp_flags);
305 if (!dserr)
306 return -ENOMEM;
307
308 INIT_LIST_HEAD(&dserr->list);
309 dserr->offset = offset;
310 dserr->length = length;
311 dserr->status = status;
312 dserr->opnum = opnum;
313 nfs4_stateid_copy(&dserr->stateid, &mirror->stateid);
314 memcpy(&dserr->deviceid, &mirror->mirror_ds->id_node.deviceid,
315 NFS4_DEVICEID4_SIZE);
316
317 spin_lock(&flo->generic_hdr.plh_inode->i_lock);
318 ff_layout_add_ds_error_locked(flo, dserr);
319 spin_unlock(&flo->generic_hdr.plh_inode->i_lock);
320
321 return 0;
322 }
323
324 static struct rpc_cred *
325 ff_layout_get_mirror_cred(struct nfs4_ff_layout_mirror *mirror, u32 iomode)
326 {
327 struct rpc_cred *cred, __rcu **pcred;
328
329 if (iomode == IOMODE_READ)
330 pcred = &mirror->ro_cred;
331 else
332 pcred = &mirror->rw_cred;
333
334 rcu_read_lock();
335 do {
336 cred = rcu_dereference(*pcred);
337 if (!cred)
338 break;
339
340 cred = get_rpccred_rcu(cred);
341 } while(!cred);
342 rcu_read_unlock();
343 return cred;
344 }
345
346 struct nfs_fh *
347 nfs4_ff_layout_select_ds_fh(struct pnfs_layout_segment *lseg, u32 mirror_idx)
348 {
349 struct nfs4_ff_layout_mirror *mirror = FF_LAYOUT_COMP(lseg, mirror_idx);
350 struct nfs_fh *fh = NULL;
351
352 if (!ff_layout_mirror_valid(lseg, mirror, false)) {
353 pr_err_ratelimited("NFS: %s: No data server for mirror offset index %d\n",
354 __func__, mirror_idx);
355 goto out;
356 }
357
358 /* FIXME: For now assume there is only 1 version available for the DS */
359 fh = &mirror->fh_versions[0];
360 out:
361 return fh;
362 }
363
364 /**
365 * nfs4_ff_layout_prepare_ds - prepare a DS connection for an RPC call
366 * @lseg: the layout segment we're operating on
367 * @ds_idx: index of the DS to use
368 * @fail_return: return layout on connect failure?
369 *
370 * Try to prepare a DS connection to accept an RPC call. This involves
371 * selecting a mirror to use and connecting the client to it if it's not
372 * already connected.
373 *
374 * Since we only need a single functioning mirror to satisfy a read, we don't
375 * want to return the layout if there is one. For writes though, any down
376 * mirror should result in a LAYOUTRETURN. @fail_return is how we distinguish
377 * between the two cases.
378 *
379 * Returns a pointer to a connected DS object on success or NULL on failure.
380 */
381 struct nfs4_pnfs_ds *
382 nfs4_ff_layout_prepare_ds(struct pnfs_layout_segment *lseg, u32 ds_idx,
383 bool fail_return)
384 {
385 struct nfs4_ff_layout_mirror *mirror = FF_LAYOUT_COMP(lseg, ds_idx);
386 struct nfs4_pnfs_ds *ds = NULL;
387 struct nfs4_deviceid_node *devid;
388 struct inode *ino = lseg->pls_layout->plh_inode;
389 struct nfs_server *s = NFS_SERVER(ino);
390 unsigned int max_payload;
391 int status;
392
393 if (!ff_layout_mirror_valid(lseg, mirror, true)) {
394 pr_err_ratelimited("NFS: %s: No data server for offset index %d\n",
395 __func__, ds_idx);
396 goto out;
397 }
398
399 devid = &mirror->mirror_ds->id_node;
400 if (ff_layout_test_devid_unavailable(devid))
401 goto out_fail;
402
403 ds = mirror->mirror_ds->ds;
404 /* matching smp_wmb() in _nfs4_pnfs_v3/4_ds_connect */
405 smp_rmb();
406 if (ds->ds_clp)
407 goto out;
408
409 /* FIXME: For now we assume the server sent only one version of NFS
410 * to use for the DS.
411 */
412 status = nfs4_pnfs_ds_connect(s, ds, devid, dataserver_timeo,
413 dataserver_retrans,
414 mirror->mirror_ds->ds_versions[0].version,
415 mirror->mirror_ds->ds_versions[0].minor_version);
416
417 /* connect success, check rsize/wsize limit */
418 if (ds->ds_clp) {
419 max_payload =
420 nfs_block_size(rpc_max_payload(ds->ds_clp->cl_rpcclient),
421 NULL);
422 if (mirror->mirror_ds->ds_versions[0].rsize > max_payload)
423 mirror->mirror_ds->ds_versions[0].rsize = max_payload;
424 if (mirror->mirror_ds->ds_versions[0].wsize > max_payload)
425 mirror->mirror_ds->ds_versions[0].wsize = max_payload;
426 goto out;
427 }
428 out_fail:
429 ff_layout_track_ds_error(FF_LAYOUT_FROM_HDR(lseg->pls_layout),
430 mirror, lseg->pls_range.offset,
431 lseg->pls_range.length, NFS4ERR_NXIO,
432 OP_ILLEGAL, GFP_NOIO);
433 if (fail_return || !ff_layout_has_available_ds(lseg))
434 pnfs_error_mark_layout_for_return(ino, lseg);
435 ds = NULL;
436 out:
437 return ds;
438 }
439
440 struct rpc_cred *
441 ff_layout_get_ds_cred(struct pnfs_layout_segment *lseg, u32 ds_idx,
442 struct rpc_cred *mdscred)
443 {
444 struct nfs4_ff_layout_mirror *mirror = FF_LAYOUT_COMP(lseg, ds_idx);
445 struct rpc_cred *cred;
446
447 if (mirror) {
448 cred = ff_layout_get_mirror_cred(mirror, lseg->pls_range.iomode);
449 if (!cred)
450 cred = get_rpccred(mdscred);
451 } else {
452 cred = get_rpccred(mdscred);
453 }
454 return cred;
455 }
456
457 /**
458 * Find or create a DS rpc client with th MDS server rpc client auth flavor
459 * in the nfs_client cl_ds_clients list.
460 */
461 struct rpc_clnt *
462 nfs4_ff_find_or_create_ds_client(struct pnfs_layout_segment *lseg, u32 ds_idx,
463 struct nfs_client *ds_clp, struct inode *inode)
464 {
465 struct nfs4_ff_layout_mirror *mirror = FF_LAYOUT_COMP(lseg, ds_idx);
466
467 switch (mirror->mirror_ds->ds_versions[0].version) {
468 case 3:
469 /* For NFSv3 DS, flavor is set when creating DS connections */
470 return ds_clp->cl_rpcclient;
471 case 4:
472 return nfs4_find_or_create_ds_client(ds_clp, inode);
473 default:
474 BUG();
475 }
476 }
477
478 void ff_layout_free_ds_ioerr(struct list_head *head)
479 {
480 struct nfs4_ff_layout_ds_err *err;
481
482 while (!list_empty(head)) {
483 err = list_first_entry(head,
484 struct nfs4_ff_layout_ds_err,
485 list);
486 list_del(&err->list);
487 kfree(err);
488 }
489 }
490
491 /* called with inode i_lock held */
492 int ff_layout_encode_ds_ioerr(struct xdr_stream *xdr, const struct list_head *head)
493 {
494 struct nfs4_ff_layout_ds_err *err;
495 __be32 *p;
496
497 list_for_each_entry(err, head, list) {
498 /* offset(8) + length(8) + stateid(NFS4_STATEID_SIZE)
499 * + array length + deviceid(NFS4_DEVICEID4_SIZE)
500 * + status(4) + opnum(4)
501 */
502 p = xdr_reserve_space(xdr,
503 28 + NFS4_STATEID_SIZE + NFS4_DEVICEID4_SIZE);
504 if (unlikely(!p))
505 return -ENOBUFS;
506 p = xdr_encode_hyper(p, err->offset);
507 p = xdr_encode_hyper(p, err->length);
508 p = xdr_encode_opaque_fixed(p, &err->stateid,
509 NFS4_STATEID_SIZE);
510 /* Encode 1 error */
511 *p++ = cpu_to_be32(1);
512 p = xdr_encode_opaque_fixed(p, &err->deviceid,
513 NFS4_DEVICEID4_SIZE);
514 *p++ = cpu_to_be32(err->status);
515 *p++ = cpu_to_be32(err->opnum);
516 dprintk("%s: offset %llu length %llu status %d op %d\n",
517 __func__, err->offset, err->length, err->status,
518 err->opnum);
519 }
520
521 return 0;
522 }
523
524 static
525 unsigned int do_layout_fetch_ds_ioerr(struct pnfs_layout_hdr *lo,
526 const struct pnfs_layout_range *range,
527 struct list_head *head,
528 unsigned int maxnum)
529 {
530 struct nfs4_flexfile_layout *flo = FF_LAYOUT_FROM_HDR(lo);
531 struct inode *inode = lo->plh_inode;
532 struct nfs4_ff_layout_ds_err *err, *n;
533 unsigned int ret = 0;
534
535 spin_lock(&inode->i_lock);
536 list_for_each_entry_safe(err, n, &flo->error_list, list) {
537 if (!pnfs_is_range_intersecting(err->offset,
538 pnfs_end_offset(err->offset, err->length),
539 range->offset,
540 pnfs_end_offset(range->offset, range->length)))
541 continue;
542 if (!maxnum)
543 break;
544 list_move(&err->list, head);
545 maxnum--;
546 ret++;
547 }
548 spin_unlock(&inode->i_lock);
549 return ret;
550 }
551
552 unsigned int ff_layout_fetch_ds_ioerr(struct pnfs_layout_hdr *lo,
553 const struct pnfs_layout_range *range,
554 struct list_head *head,
555 unsigned int maxnum)
556 {
557 unsigned int ret;
558
559 ret = do_layout_fetch_ds_ioerr(lo, range, head, maxnum);
560 /* If we're over the max, discard all remaining entries */
561 if (ret == maxnum) {
562 LIST_HEAD(discard);
563 do_layout_fetch_ds_ioerr(lo, range, &discard, -1);
564 ff_layout_free_ds_ioerr(&discard);
565 }
566 return ret;
567 }
568
569 static bool ff_read_layout_has_available_ds(struct pnfs_layout_segment *lseg)
570 {
571 struct nfs4_ff_layout_mirror *mirror;
572 struct nfs4_deviceid_node *devid;
573 u32 idx;
574
575 for (idx = 0; idx < FF_LAYOUT_MIRROR_COUNT(lseg); idx++) {
576 mirror = FF_LAYOUT_COMP(lseg, idx);
577 if (mirror) {
578 if (!mirror->mirror_ds)
579 return true;
580 if (IS_ERR(mirror->mirror_ds))
581 continue;
582 devid = &mirror->mirror_ds->id_node;
583 if (!ff_layout_test_devid_unavailable(devid))
584 return true;
585 }
586 }
587
588 return false;
589 }
590
591 static bool ff_rw_layout_has_available_ds(struct pnfs_layout_segment *lseg)
592 {
593 struct nfs4_ff_layout_mirror *mirror;
594 struct nfs4_deviceid_node *devid;
595 u32 idx;
596
597 for (idx = 0; idx < FF_LAYOUT_MIRROR_COUNT(lseg); idx++) {
598 mirror = FF_LAYOUT_COMP(lseg, idx);
599 if (!mirror || IS_ERR(mirror->mirror_ds))
600 return false;
601 if (!mirror->mirror_ds)
602 continue;
603 devid = &mirror->mirror_ds->id_node;
604 if (ff_layout_test_devid_unavailable(devid))
605 return false;
606 }
607
608 return FF_LAYOUT_MIRROR_COUNT(lseg) != 0;
609 }
610
611 static bool ff_layout_has_available_ds(struct pnfs_layout_segment *lseg)
612 {
613 if (lseg->pls_range.iomode == IOMODE_READ)
614 return ff_read_layout_has_available_ds(lseg);
615 /* Note: RW layout needs all mirrors available */
616 return ff_rw_layout_has_available_ds(lseg);
617 }
618
619 bool ff_layout_avoid_mds_available_ds(struct pnfs_layout_segment *lseg)
620 {
621 return ff_layout_no_fallback_to_mds(lseg) ||
622 ff_layout_has_available_ds(lseg);
623 }
624
625 bool ff_layout_avoid_read_on_rw(struct pnfs_layout_segment *lseg)
626 {
627 return lseg->pls_range.iomode == IOMODE_RW &&
628 ff_layout_no_read_on_rw(lseg);
629 }
630
631 module_param(dataserver_retrans, uint, 0644);
632 MODULE_PARM_DESC(dataserver_retrans, "The number of times the NFSv4.1 client "
633 "retries a request before it attempts further "
634 " recovery action.");
635 module_param(dataserver_timeo, uint, 0644);
636 MODULE_PARM_DESC(dataserver_timeo, "The time (in tenths of a second) the "
637 "NFSv4.1 client waits for a response from a "
638 " data server before it retries an NFS request.");