Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/*
2 * Device operations for the pnfs nfs4 file layout driver.
3 *
4 * Copyright (c) 2014, Primary Data, Inc. All rights reserved.
5 *
6 * Tao Peng <bergwolf@primarydata.com>
7 */
8
9#include <linux/nfs_fs.h>
10#include <linux/vmalloc.h>
11#include <linux/module.h>
12#include <linux/sunrpc/addr.h>
13
14#include "../internal.h"
15#include "../nfs4session.h"
16#include "flexfilelayout.h"
17
18#define NFSDBG_FACILITY NFSDBG_PNFS_LD
19
20static unsigned int dataserver_timeo = NFS_DEF_TCP_RETRANS;
21static unsigned int dataserver_retrans;
22
23static bool ff_layout_has_available_ds(struct pnfs_layout_segment *lseg);
24
25void nfs4_ff_layout_put_deviceid(struct nfs4_ff_layout_ds *mirror_ds)
26{
27 if (!IS_ERR_OR_NULL(mirror_ds))
28 nfs4_put_deviceid_node(&mirror_ds->id_node);
29}
30
31void nfs4_ff_layout_free_deviceid(struct nfs4_ff_layout_ds *mirror_ds)
32{
33 nfs4_print_deviceid(&mirror_ds->id_node.deviceid);
34 nfs4_pnfs_ds_put(mirror_ds->ds);
35 kfree(mirror_ds->ds_versions);
36 kfree_rcu(mirror_ds, id_node.rcu);
37}
38
39/* Decode opaque device data and construct new_ds using it */
40struct nfs4_ff_layout_ds *
41nfs4_ff_alloc_deviceid_node(struct nfs_server *server, struct pnfs_device *pdev,
42 gfp_t gfp_flags)
43{
44 struct xdr_stream stream;
45 struct xdr_buf buf;
46 struct page *scratch;
47 struct list_head dsaddrs;
48 struct nfs4_pnfs_ds_addr *da;
49 struct nfs4_ff_layout_ds *new_ds = NULL;
50 struct nfs4_ff_ds_version *ds_versions = NULL;
51 u32 mp_count;
52 u32 version_count;
53 __be32 *p;
54 int i, ret = -ENOMEM;
55
56 /* set up xdr stream */
57 scratch = alloc_page(gfp_flags);
58 if (!scratch)
59 goto out_err;
60
61 new_ds = kzalloc(sizeof(struct nfs4_ff_layout_ds), gfp_flags);
62 if (!new_ds)
63 goto out_scratch;
64
65 nfs4_init_deviceid_node(&new_ds->id_node,
66 server,
67 &pdev->dev_id);
68 INIT_LIST_HEAD(&dsaddrs);
69
70 xdr_init_decode_pages(&stream, &buf, pdev->pages, pdev->pglen);
71 xdr_set_scratch_buffer(&stream, page_address(scratch), PAGE_SIZE);
72
73 /* multipath count */
74 p = xdr_inline_decode(&stream, 4);
75 if (unlikely(!p))
76 goto out_err_drain_dsaddrs;
77 mp_count = be32_to_cpup(p);
78 dprintk("%s: multipath ds count %d\n", __func__, mp_count);
79
80 for (i = 0; i < mp_count; i++) {
81 /* multipath ds */
82 da = nfs4_decode_mp_ds_addr(server->nfs_client->cl_net,
83 &stream, gfp_flags);
84 if (da)
85 list_add_tail(&da->da_node, &dsaddrs);
86 }
87 if (list_empty(&dsaddrs)) {
88 dprintk("%s: no suitable DS addresses found\n",
89 __func__);
90 ret = -ENOMEDIUM;
91 goto out_err_drain_dsaddrs;
92 }
93
94 /* version count */
95 p = xdr_inline_decode(&stream, 4);
96 if (unlikely(!p))
97 goto out_err_drain_dsaddrs;
98 version_count = be32_to_cpup(p);
99 dprintk("%s: version count %d\n", __func__, version_count);
100
101 ds_versions = kzalloc(version_count * sizeof(struct nfs4_ff_ds_version),
102 gfp_flags);
103 if (!ds_versions)
104 goto out_scratch;
105
106 for (i = 0; i < version_count; i++) {
107 /* 20 = version(4) + minor_version(4) + rsize(4) + wsize(4) +
108 * tightly_coupled(4) */
109 p = xdr_inline_decode(&stream, 20);
110 if (unlikely(!p))
111 goto out_err_drain_dsaddrs;
112 ds_versions[i].version = be32_to_cpup(p++);
113 ds_versions[i].minor_version = be32_to_cpup(p++);
114 ds_versions[i].rsize = nfs_block_size(be32_to_cpup(p++), NULL);
115 ds_versions[i].wsize = nfs_block_size(be32_to_cpup(p++), NULL);
116 ds_versions[i].tightly_coupled = be32_to_cpup(p);
117
118 if (ds_versions[i].rsize > NFS_MAX_FILE_IO_SIZE)
119 ds_versions[i].rsize = NFS_MAX_FILE_IO_SIZE;
120 if (ds_versions[i].wsize > NFS_MAX_FILE_IO_SIZE)
121 ds_versions[i].wsize = NFS_MAX_FILE_IO_SIZE;
122
123 /*
124 * check for valid major/minor combination.
125 * currently we support dataserver which talk:
126 * v3, v4.0, v4.1, v4.2
127 */
128 if (!((ds_versions[i].version == 3 && ds_versions[i].minor_version == 0) ||
129 (ds_versions[i].version == 4 && ds_versions[i].minor_version < 3))) {
130 dprintk("%s: [%d] unsupported ds version %d-%d\n", __func__,
131 i, ds_versions[i].version,
132 ds_versions[i].minor_version);
133 ret = -EPROTONOSUPPORT;
134 goto out_err_drain_dsaddrs;
135 }
136
137 dprintk("%s: [%d] vers %u minor_ver %u rsize %u wsize %u coupled %d\n",
138 __func__, i, ds_versions[i].version,
139 ds_versions[i].minor_version,
140 ds_versions[i].rsize,
141 ds_versions[i].wsize,
142 ds_versions[i].tightly_coupled);
143 }
144
145 new_ds->ds_versions = ds_versions;
146 new_ds->ds_versions_cnt = version_count;
147
148 new_ds->ds = nfs4_pnfs_ds_add(&dsaddrs, gfp_flags);
149 if (!new_ds->ds)
150 goto out_err_drain_dsaddrs;
151
152 /* If DS was already in cache, free ds addrs */
153 while (!list_empty(&dsaddrs)) {
154 da = list_first_entry(&dsaddrs,
155 struct nfs4_pnfs_ds_addr,
156 da_node);
157 list_del_init(&da->da_node);
158 kfree(da->da_remotestr);
159 kfree(da);
160 }
161
162 __free_page(scratch);
163 return new_ds;
164
165out_err_drain_dsaddrs:
166 while (!list_empty(&dsaddrs)) {
167 da = list_first_entry(&dsaddrs, struct nfs4_pnfs_ds_addr,
168 da_node);
169 list_del_init(&da->da_node);
170 kfree(da->da_remotestr);
171 kfree(da);
172 }
173
174 kfree(ds_versions);
175out_scratch:
176 __free_page(scratch);
177out_err:
178 kfree(new_ds);
179
180 dprintk("%s ERROR: returning %d\n", __func__, ret);
181 return NULL;
182}
183
184static void ff_layout_mark_devid_invalid(struct pnfs_layout_segment *lseg,
185 struct nfs4_deviceid_node *devid)
186{
187 nfs4_delete_deviceid(devid->ld, devid->nfs_client, &devid->deviceid);
188 if (!ff_layout_has_available_ds(lseg))
189 pnfs_error_mark_layout_for_return(lseg->pls_layout->plh_inode,
190 lseg);
191}
192
193static bool ff_layout_mirror_valid(struct pnfs_layout_segment *lseg,
194 struct nfs4_ff_layout_mirror *mirror,
195 bool create)
196{
197 if (mirror == NULL || IS_ERR(mirror->mirror_ds))
198 goto outerr;
199 if (mirror->mirror_ds == NULL) {
200 if (create) {
201 struct nfs4_deviceid_node *node;
202 struct pnfs_layout_hdr *lh = lseg->pls_layout;
203 struct nfs4_ff_layout_ds *mirror_ds = ERR_PTR(-ENODEV);
204
205 node = nfs4_find_get_deviceid(NFS_SERVER(lh->plh_inode),
206 &mirror->devid, lh->plh_lc_cred,
207 GFP_KERNEL);
208 if (node)
209 mirror_ds = FF_LAYOUT_MIRROR_DS(node);
210
211 /* check for race with another call to this function */
212 if (cmpxchg(&mirror->mirror_ds, NULL, mirror_ds) &&
213 mirror_ds != ERR_PTR(-ENODEV))
214 nfs4_put_deviceid_node(node);
215 } else
216 goto outerr;
217 }
218
219 if (IS_ERR(mirror->mirror_ds))
220 goto outerr;
221
222 if (mirror->mirror_ds->ds == NULL) {
223 struct nfs4_deviceid_node *devid;
224 devid = &mirror->mirror_ds->id_node;
225 ff_layout_mark_devid_invalid(lseg, devid);
226 return false;
227 }
228 return true;
229outerr:
230 pnfs_error_mark_layout_for_return(lseg->pls_layout->plh_inode, lseg);
231 return false;
232}
233
234static void extend_ds_error(struct nfs4_ff_layout_ds_err *err,
235 u64 offset, u64 length)
236{
237 u64 end;
238
239 end = max_t(u64, pnfs_end_offset(err->offset, err->length),
240 pnfs_end_offset(offset, length));
241 err->offset = min_t(u64, err->offset, offset);
242 err->length = end - err->offset;
243}
244
245static int
246ff_ds_error_match(const struct nfs4_ff_layout_ds_err *e1,
247 const struct nfs4_ff_layout_ds_err *e2)
248{
249 int ret;
250
251 if (e1->opnum != e2->opnum)
252 return e1->opnum < e2->opnum ? -1 : 1;
253 if (e1->status != e2->status)
254 return e1->status < e2->status ? -1 : 1;
255 ret = memcmp(e1->stateid.data, e2->stateid.data,
256 sizeof(e1->stateid.data));
257 if (ret != 0)
258 return ret;
259 ret = memcmp(&e1->deviceid, &e2->deviceid, sizeof(e1->deviceid));
260 if (ret != 0)
261 return ret;
262 if (pnfs_end_offset(e1->offset, e1->length) < e2->offset)
263 return -1;
264 if (e1->offset > pnfs_end_offset(e2->offset, e2->length))
265 return 1;
266 /* If ranges overlap or are contiguous, they are the same */
267 return 0;
268}
269
270static void
271ff_layout_add_ds_error_locked(struct nfs4_flexfile_layout *flo,
272 struct nfs4_ff_layout_ds_err *dserr)
273{
274 struct nfs4_ff_layout_ds_err *err, *tmp;
275 struct list_head *head = &flo->error_list;
276 int match;
277
278 /* Do insertion sort w/ merges */
279 list_for_each_entry_safe(err, tmp, &flo->error_list, list) {
280 match = ff_ds_error_match(err, dserr);
281 if (match < 0)
282 continue;
283 if (match > 0) {
284 /* Add entry "dserr" _before_ entry "err" */
285 head = &err->list;
286 break;
287 }
288 /* Entries match, so merge "err" into "dserr" */
289 extend_ds_error(dserr, err->offset, err->length);
290 list_replace(&err->list, &dserr->list);
291 kfree(err);
292 return;
293 }
294
295 list_add_tail(&dserr->list, head);
296}
297
298int ff_layout_track_ds_error(struct nfs4_flexfile_layout *flo,
299 struct nfs4_ff_layout_mirror *mirror, u64 offset,
300 u64 length, int status, enum nfs_opnum4 opnum,
301 gfp_t gfp_flags)
302{
303 struct nfs4_ff_layout_ds_err *dserr;
304
305 if (status == 0)
306 return 0;
307
308 if (mirror->mirror_ds == NULL)
309 return -EINVAL;
310
311 dserr = kmalloc(sizeof(*dserr), gfp_flags);
312 if (!dserr)
313 return -ENOMEM;
314
315 INIT_LIST_HEAD(&dserr->list);
316 dserr->offset = offset;
317 dserr->length = length;
318 dserr->status = status;
319 dserr->opnum = opnum;
320 nfs4_stateid_copy(&dserr->stateid, &mirror->stateid);
321 memcpy(&dserr->deviceid, &mirror->mirror_ds->id_node.deviceid,
322 NFS4_DEVICEID4_SIZE);
323
324 spin_lock(&flo->generic_hdr.plh_inode->i_lock);
325 ff_layout_add_ds_error_locked(flo, dserr);
326 spin_unlock(&flo->generic_hdr.plh_inode->i_lock);
327
328 return 0;
329}
330
331static struct rpc_cred *
332ff_layout_get_mirror_cred(struct nfs4_ff_layout_mirror *mirror, u32 iomode)
333{
334 struct rpc_cred *cred, __rcu **pcred;
335
336 if (iomode == IOMODE_READ)
337 pcred = &mirror->ro_cred;
338 else
339 pcred = &mirror->rw_cred;
340
341 rcu_read_lock();
342 do {
343 cred = rcu_dereference(*pcred);
344 if (!cred)
345 break;
346
347 cred = get_rpccred_rcu(cred);
348 } while(!cred);
349 rcu_read_unlock();
350 return cred;
351}
352
353struct nfs_fh *
354nfs4_ff_layout_select_ds_fh(struct pnfs_layout_segment *lseg, u32 mirror_idx)
355{
356 struct nfs4_ff_layout_mirror *mirror = FF_LAYOUT_COMP(lseg, mirror_idx);
357 struct nfs_fh *fh = NULL;
358
359 if (!ff_layout_mirror_valid(lseg, mirror, false)) {
360 pr_err_ratelimited("NFS: %s: No data server for mirror offset index %d\n",
361 __func__, mirror_idx);
362 goto out;
363 }
364
365 /* FIXME: For now assume there is only 1 version available for the DS */
366 fh = &mirror->fh_versions[0];
367out:
368 return fh;
369}
370
371/**
372 * nfs4_ff_layout_prepare_ds - prepare a DS connection for an RPC call
373 * @lseg: the layout segment we're operating on
374 * @ds_idx: index of the DS to use
375 * @fail_return: return layout on connect failure?
376 *
377 * Try to prepare a DS connection to accept an RPC call. This involves
378 * selecting a mirror to use and connecting the client to it if it's not
379 * already connected.
380 *
381 * Since we only need a single functioning mirror to satisfy a read, we don't
382 * want to return the layout if there is one. For writes though, any down
383 * mirror should result in a LAYOUTRETURN. @fail_return is how we distinguish
384 * between the two cases.
385 *
386 * Returns a pointer to a connected DS object on success or NULL on failure.
387 */
388struct nfs4_pnfs_ds *
389nfs4_ff_layout_prepare_ds(struct pnfs_layout_segment *lseg, u32 ds_idx,
390 bool fail_return)
391{
392 struct nfs4_ff_layout_mirror *mirror = FF_LAYOUT_COMP(lseg, ds_idx);
393 struct nfs4_pnfs_ds *ds = NULL;
394 struct nfs4_deviceid_node *devid;
395 struct inode *ino = lseg->pls_layout->plh_inode;
396 struct nfs_server *s = NFS_SERVER(ino);
397 unsigned int max_payload;
398 int status;
399
400 if (!ff_layout_mirror_valid(lseg, mirror, true)) {
401 pr_err_ratelimited("NFS: %s: No data server for offset index %d\n",
402 __func__, ds_idx);
403 goto out;
404 }
405
406 devid = &mirror->mirror_ds->id_node;
407 if (ff_layout_test_devid_unavailable(devid))
408 goto out_fail;
409
410 ds = mirror->mirror_ds->ds;
411 /* matching smp_wmb() in _nfs4_pnfs_v3/4_ds_connect */
412 smp_rmb();
413 if (ds->ds_clp)
414 goto out;
415
416 /* FIXME: For now we assume the server sent only one version of NFS
417 * to use for the DS.
418 */
419 status = nfs4_pnfs_ds_connect(s, ds, devid, dataserver_timeo,
420 dataserver_retrans,
421 mirror->mirror_ds->ds_versions[0].version,
422 mirror->mirror_ds->ds_versions[0].minor_version);
423
424 /* connect success, check rsize/wsize limit */
425 if (!status) {
426 max_payload =
427 nfs_block_size(rpc_max_payload(ds->ds_clp->cl_rpcclient),
428 NULL);
429 if (mirror->mirror_ds->ds_versions[0].rsize > max_payload)
430 mirror->mirror_ds->ds_versions[0].rsize = max_payload;
431 if (mirror->mirror_ds->ds_versions[0].wsize > max_payload)
432 mirror->mirror_ds->ds_versions[0].wsize = max_payload;
433 goto out;
434 }
435out_fail:
436 ff_layout_track_ds_error(FF_LAYOUT_FROM_HDR(lseg->pls_layout),
437 mirror, lseg->pls_range.offset,
438 lseg->pls_range.length, NFS4ERR_NXIO,
439 OP_ILLEGAL, GFP_NOIO);
440 if (fail_return || !ff_layout_has_available_ds(lseg))
441 pnfs_error_mark_layout_for_return(ino, lseg);
442 ds = NULL;
443out:
444 return ds;
445}
446
447struct rpc_cred *
448ff_layout_get_ds_cred(struct pnfs_layout_segment *lseg, u32 ds_idx,
449 struct rpc_cred *mdscred)
450{
451 struct nfs4_ff_layout_mirror *mirror = FF_LAYOUT_COMP(lseg, ds_idx);
452 struct rpc_cred *cred;
453
454 if (mirror) {
455 cred = ff_layout_get_mirror_cred(mirror, lseg->pls_range.iomode);
456 if (!cred)
457 cred = get_rpccred(mdscred);
458 } else {
459 cred = get_rpccred(mdscred);
460 }
461 return cred;
462}
463
464/**
465* Find or create a DS rpc client with th MDS server rpc client auth flavor
466* in the nfs_client cl_ds_clients list.
467*/
468struct rpc_clnt *
469nfs4_ff_find_or_create_ds_client(struct pnfs_layout_segment *lseg, u32 ds_idx,
470 struct nfs_client *ds_clp, struct inode *inode)
471{
472 struct nfs4_ff_layout_mirror *mirror = FF_LAYOUT_COMP(lseg, ds_idx);
473
474 switch (mirror->mirror_ds->ds_versions[0].version) {
475 case 3:
476 /* For NFSv3 DS, flavor is set when creating DS connections */
477 return ds_clp->cl_rpcclient;
478 case 4:
479 return nfs4_find_or_create_ds_client(ds_clp, inode);
480 default:
481 BUG();
482 }
483}
484
485void ff_layout_free_ds_ioerr(struct list_head *head)
486{
487 struct nfs4_ff_layout_ds_err *err;
488
489 while (!list_empty(head)) {
490 err = list_first_entry(head,
491 struct nfs4_ff_layout_ds_err,
492 list);
493 list_del(&err->list);
494 kfree(err);
495 }
496}
497
498/* called with inode i_lock held */
499int ff_layout_encode_ds_ioerr(struct xdr_stream *xdr, const struct list_head *head)
500{
501 struct nfs4_ff_layout_ds_err *err;
502 __be32 *p;
503
504 list_for_each_entry(err, head, list) {
505 /* offset(8) + length(8) + stateid(NFS4_STATEID_SIZE)
506 * + array length + deviceid(NFS4_DEVICEID4_SIZE)
507 * + status(4) + opnum(4)
508 */
509 p = xdr_reserve_space(xdr,
510 28 + NFS4_STATEID_SIZE + NFS4_DEVICEID4_SIZE);
511 if (unlikely(!p))
512 return -ENOBUFS;
513 p = xdr_encode_hyper(p, err->offset);
514 p = xdr_encode_hyper(p, err->length);
515 p = xdr_encode_opaque_fixed(p, &err->stateid,
516 NFS4_STATEID_SIZE);
517 /* Encode 1 error */
518 *p++ = cpu_to_be32(1);
519 p = xdr_encode_opaque_fixed(p, &err->deviceid,
520 NFS4_DEVICEID4_SIZE);
521 *p++ = cpu_to_be32(err->status);
522 *p++ = cpu_to_be32(err->opnum);
523 dprintk("%s: offset %llu length %llu status %d op %d\n",
524 __func__, err->offset, err->length, err->status,
525 err->opnum);
526 }
527
528 return 0;
529}
530
531static
532unsigned int do_layout_fetch_ds_ioerr(struct pnfs_layout_hdr *lo,
533 const struct pnfs_layout_range *range,
534 struct list_head *head,
535 unsigned int maxnum)
536{
537 struct nfs4_flexfile_layout *flo = FF_LAYOUT_FROM_HDR(lo);
538 struct inode *inode = lo->plh_inode;
539 struct nfs4_ff_layout_ds_err *err, *n;
540 unsigned int ret = 0;
541
542 spin_lock(&inode->i_lock);
543 list_for_each_entry_safe(err, n, &flo->error_list, list) {
544 if (!pnfs_is_range_intersecting(err->offset,
545 pnfs_end_offset(err->offset, err->length),
546 range->offset,
547 pnfs_end_offset(range->offset, range->length)))
548 continue;
549 if (!maxnum)
550 break;
551 list_move(&err->list, head);
552 maxnum--;
553 ret++;
554 }
555 spin_unlock(&inode->i_lock);
556 return ret;
557}
558
559unsigned int ff_layout_fetch_ds_ioerr(struct pnfs_layout_hdr *lo,
560 const struct pnfs_layout_range *range,
561 struct list_head *head,
562 unsigned int maxnum)
563{
564 unsigned int ret;
565
566 ret = do_layout_fetch_ds_ioerr(lo, range, head, maxnum);
567 /* If we're over the max, discard all remaining entries */
568 if (ret == maxnum) {
569 LIST_HEAD(discard);
570 do_layout_fetch_ds_ioerr(lo, range, &discard, -1);
571 ff_layout_free_ds_ioerr(&discard);
572 }
573 return ret;
574}
575
576static bool ff_read_layout_has_available_ds(struct pnfs_layout_segment *lseg)
577{
578 struct nfs4_ff_layout_mirror *mirror;
579 struct nfs4_deviceid_node *devid;
580 u32 idx;
581
582 for (idx = 0; idx < FF_LAYOUT_MIRROR_COUNT(lseg); idx++) {
583 mirror = FF_LAYOUT_COMP(lseg, idx);
584 if (mirror) {
585 if (!mirror->mirror_ds)
586 return true;
587 if (IS_ERR(mirror->mirror_ds))
588 continue;
589 devid = &mirror->mirror_ds->id_node;
590 if (!ff_layout_test_devid_unavailable(devid))
591 return true;
592 }
593 }
594
595 return false;
596}
597
598static bool ff_rw_layout_has_available_ds(struct pnfs_layout_segment *lseg)
599{
600 struct nfs4_ff_layout_mirror *mirror;
601 struct nfs4_deviceid_node *devid;
602 u32 idx;
603
604 for (idx = 0; idx < FF_LAYOUT_MIRROR_COUNT(lseg); idx++) {
605 mirror = FF_LAYOUT_COMP(lseg, idx);
606 if (!mirror || IS_ERR(mirror->mirror_ds))
607 return false;
608 if (!mirror->mirror_ds)
609 continue;
610 devid = &mirror->mirror_ds->id_node;
611 if (ff_layout_test_devid_unavailable(devid))
612 return false;
613 }
614
615 return FF_LAYOUT_MIRROR_COUNT(lseg) != 0;
616}
617
618static bool ff_layout_has_available_ds(struct pnfs_layout_segment *lseg)
619{
620 if (lseg->pls_range.iomode == IOMODE_READ)
621 return ff_read_layout_has_available_ds(lseg);
622 /* Note: RW layout needs all mirrors available */
623 return ff_rw_layout_has_available_ds(lseg);
624}
625
626bool ff_layout_avoid_mds_available_ds(struct pnfs_layout_segment *lseg)
627{
628 return ff_layout_no_fallback_to_mds(lseg) ||
629 ff_layout_has_available_ds(lseg);
630}
631
632bool ff_layout_avoid_read_on_rw(struct pnfs_layout_segment *lseg)
633{
634 return lseg->pls_range.iomode == IOMODE_RW &&
635 ff_layout_no_read_on_rw(lseg);
636}
637
638module_param(dataserver_retrans, uint, 0644);
639MODULE_PARM_DESC(dataserver_retrans, "The number of times the NFSv4.1 client "
640 "retries a request before it attempts further "
641 " recovery action.");
642module_param(dataserver_timeo, uint, 0644);
643MODULE_PARM_DESC(dataserver_timeo, "The time (in tenths of a second) the "
644 "NFSv4.1 client waits for a response from a "
645 " data server before it retries an NFS request.");