]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - fs/nfs/direct.c
NFS: Convert v4 into a module
[mirror_ubuntu-bionic-kernel.git] / fs / nfs / direct.c
CommitLineData
1da177e4
LT
1/*
2 * linux/fs/nfs/direct.c
3 *
4 * Copyright (C) 2003 by Chuck Lever <cel@netapp.com>
5 *
6 * High-performance uncached I/O for the Linux NFS client
7 *
8 * There are important applications whose performance or correctness
9 * depends on uncached access to file data. Database clusters
88467055 10 * (multiple copies of the same instance running on separate hosts)
1da177e4 11 * implement their own cache coherency protocol that subsumes file
88467055
CL
12 * system cache protocols. Applications that process datasets
13 * considerably larger than the client's memory do not always benefit
14 * from a local cache. A streaming video server, for instance, has no
1da177e4
LT
15 * need to cache the contents of a file.
16 *
17 * When an application requests uncached I/O, all read and write requests
18 * are made directly to the server; data stored or fetched via these
19 * requests is not cached in the Linux page cache. The client does not
20 * correct unaligned requests from applications. All requested bytes are
21 * held on permanent storage before a direct write system call returns to
22 * an application.
23 *
24 * Solaris implements an uncached I/O facility called directio() that
25 * is used for backups and sequential I/O to very large files. Solaris
26 * also supports uncaching whole NFS partitions with "-o forcedirectio,"
27 * an undocumented mount option.
28 *
29 * Designed by Jeff Kimmel, Chuck Lever, and Trond Myklebust, with
30 * help from Andrew Morton.
31 *
32 * 18 Dec 2001 Initial implementation for 2.4 --cel
33 * 08 Jul 2002 Version for 2.4.19, with bug fixes --trondmy
34 * 08 Jun 2003 Port to 2.5 APIs --cel
35 * 31 Mar 2004 Handle direct I/O without VFS support --cel
36 * 15 Sep 2004 Parallel async reads --cel
88467055 37 * 04 May 2005 support O_DIRECT with aio --cel
1da177e4
LT
38 *
39 */
40
1da177e4
LT
41#include <linux/errno.h>
42#include <linux/sched.h>
43#include <linux/kernel.h>
1da177e4
LT
44#include <linux/file.h>
45#include <linux/pagemap.h>
46#include <linux/kref.h>
5a0e3ad6 47#include <linux/slab.h>
7ec10f26 48#include <linux/task_io_accounting_ops.h>
1da177e4
LT
49
50#include <linux/nfs_fs.h>
51#include <linux/nfs_page.h>
52#include <linux/sunrpc/clnt.h>
53
1da177e4 54#include <asm/uaccess.h>
60063497 55#include <linux/atomic.h>
1da177e4 56
8d5658c9 57#include "internal.h"
91d5b470 58#include "iostat.h"
1763da12 59#include "pnfs.h"
91d5b470 60
1da177e4 61#define NFSDBG_FACILITY NFSDBG_VFS
1da177e4 62
e18b890b 63static struct kmem_cache *nfs_direct_cachep;
1da177e4
LT
64
65/*
66 * This represents a set of asynchronous requests that we're waiting on
67 */
68struct nfs_direct_req {
69 struct kref kref; /* release manager */
15ce4a0c
CL
70
71 /* I/O parameters */
a8881f5a 72 struct nfs_open_context *ctx; /* file open context info */
f11ac8db 73 struct nfs_lock_context *l_ctx; /* Lock context info */
99514f8f 74 struct kiocb * iocb; /* controlling i/o request */
88467055 75 struct inode * inode; /* target file of i/o */
15ce4a0c
CL
76
77 /* completion state */
607f31e8 78 atomic_t io_count; /* i/os we're waiting for */
15ce4a0c 79 spinlock_t lock; /* protect completion state */
15ce4a0c 80 ssize_t count, /* bytes actually processed */
1da177e4 81 error; /* any reported error */
d72b7a6b 82 struct completion completion; /* wait for i/o completion */
fad61490
TM
83
84 /* commit state */
1763da12
FI
85 struct nfs_mds_commit_info mds_cinfo; /* Storage for cinfo */
86 struct pnfs_ds_commit_info ds_cinfo; /* Storage for cinfo */
87 struct work_struct work;
fad61490
TM
88 int flags;
89#define NFS_ODIRECT_DO_COMMIT (1) /* an unstable reply was received */
90#define NFS_ODIRECT_RESCHED_WRITES (2) /* write verification failed */
91 struct nfs_writeverf verf; /* unstable write verifier */
1da177e4
LT
92};
93
1763da12
FI
94static const struct nfs_pgio_completion_ops nfs_direct_write_completion_ops;
95static const struct nfs_commit_completion_ops nfs_direct_commit_completion_ops;
fad61490 96static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode *inode);
1763da12 97static void nfs_direct_write_schedule_work(struct work_struct *work);
607f31e8
TM
98
99static inline void get_dreq(struct nfs_direct_req *dreq)
100{
101 atomic_inc(&dreq->io_count);
102}
103
104static inline int put_dreq(struct nfs_direct_req *dreq)
105{
106 return atomic_dec_and_test(&dreq->io_count);
107}
108
1da177e4 109/**
b8a32e2b
CL
110 * nfs_direct_IO - NFS address space operation for direct I/O
111 * @rw: direction (read or write)
112 * @iocb: target I/O control block
113 * @iov: array of vectors that define I/O buffer
114 * @pos: offset in file to begin the operation
115 * @nr_segs: size of iovec array
116 *
117 * The presence of this routine in the address space ops vector means
118 * the NFS client supports direct I/O. However, we shunt off direct
119 * read and write requests before the VFS gets them, so this method
120 * should never be called.
1da177e4 121 */
b8a32e2b
CL
122ssize_t nfs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, loff_t pos, unsigned long nr_segs)
123{
b8a32e2b 124 dprintk("NFS: nfs_direct_IO (%s) off/no(%Ld/%lu) EINVAL\n",
01cce933 125 iocb->ki_filp->f_path.dentry->d_name.name,
e99170ff 126 (long long) pos, nr_segs);
b8a32e2b
CL
127
128 return -EINVAL;
129}
130
749e146e 131static void nfs_direct_release_pages(struct page **pages, unsigned int npages)
9c93ab7d 132{
749e146e 133 unsigned int i;
607f31e8
TM
134 for (i = 0; i < npages; i++)
135 page_cache_release(pages[i]);
6b45d858
TM
136}
137
1763da12
FI
138void nfs_init_cinfo_from_dreq(struct nfs_commit_info *cinfo,
139 struct nfs_direct_req *dreq)
140{
141 cinfo->lock = &dreq->lock;
142 cinfo->mds = &dreq->mds_cinfo;
143 cinfo->ds = &dreq->ds_cinfo;
144 cinfo->dreq = dreq;
145 cinfo->completion_ops = &nfs_direct_commit_completion_ops;
146}
147
93619e59 148static inline struct nfs_direct_req *nfs_direct_req_alloc(void)
1da177e4 149{
93619e59
CL
150 struct nfs_direct_req *dreq;
151
292f3eee 152 dreq = kmem_cache_zalloc(nfs_direct_cachep, GFP_KERNEL);
93619e59
CL
153 if (!dreq)
154 return NULL;
155
156 kref_init(&dreq->kref);
607f31e8 157 kref_get(&dreq->kref);
d72b7a6b 158 init_completion(&dreq->completion);
1763da12
FI
159 INIT_LIST_HEAD(&dreq->mds_cinfo.list);
160 INIT_WORK(&dreq->work, nfs_direct_write_schedule_work);
15ce4a0c 161 spin_lock_init(&dreq->lock);
93619e59
CL
162
163 return dreq;
1da177e4
LT
164}
165
b4946ffb 166static void nfs_direct_req_free(struct kref *kref)
1da177e4
LT
167{
168 struct nfs_direct_req *dreq = container_of(kref, struct nfs_direct_req, kref);
a8881f5a 169
f11ac8db
TM
170 if (dreq->l_ctx != NULL)
171 nfs_put_lock_context(dreq->l_ctx);
a8881f5a
TM
172 if (dreq->ctx != NULL)
173 put_nfs_open_context(dreq->ctx);
1da177e4
LT
174 kmem_cache_free(nfs_direct_cachep, dreq);
175}
176
b4946ffb
TM
177static void nfs_direct_req_release(struct nfs_direct_req *dreq)
178{
179 kref_put(&dreq->kref, nfs_direct_req_free);
180}
181
bc0fb201
CL
182/*
183 * Collects and returns the final error value/byte-count.
184 */
185static ssize_t nfs_direct_wait(struct nfs_direct_req *dreq)
186{
15ce4a0c 187 ssize_t result = -EIOCBQUEUED;
bc0fb201
CL
188
189 /* Async requests don't wait here */
190 if (dreq->iocb)
191 goto out;
192
150030b7 193 result = wait_for_completion_killable(&dreq->completion);
bc0fb201
CL
194
195 if (!result)
15ce4a0c 196 result = dreq->error;
bc0fb201 197 if (!result)
15ce4a0c 198 result = dreq->count;
bc0fb201
CL
199
200out:
bc0fb201
CL
201 return (ssize_t) result;
202}
203
63ab46ab 204/*
607f31e8
TM
205 * Synchronous I/O uses a stack-allocated iocb. Thus we can't trust
206 * the iocb is still valid here if this is a synchronous request.
63ab46ab
CL
207 */
208static void nfs_direct_complete(struct nfs_direct_req *dreq)
209{
63ab46ab 210 if (dreq->iocb) {
15ce4a0c 211 long res = (long) dreq->error;
63ab46ab 212 if (!res)
15ce4a0c 213 res = (long) dreq->count;
63ab46ab 214 aio_complete(dreq->iocb, res, 0);
d72b7a6b
TM
215 }
216 complete_all(&dreq->completion);
63ab46ab 217
b4946ffb 218 nfs_direct_req_release(dreq);
63ab46ab
CL
219}
220
1385b811 221static void nfs_direct_readpage_release(struct nfs_page *req)
1da177e4 222{
584aa810
FI
223 dprintk("NFS: direct read done (%s/%lld %d@%lld)\n",
224 req->wb_context->dentry->d_inode->i_sb->s_id,
225 (long long)NFS_FILEID(req->wb_context->dentry->d_inode),
226 req->wb_bytes,
227 (long long)req_offset(req));
228 nfs_release_request(req);
fdd1e74c
TM
229}
230
584aa810 231static void nfs_direct_read_completion(struct nfs_pgio_header *hdr)
fdd1e74c 232{
584aa810
FI
233 unsigned long bytes = 0;
234 struct nfs_direct_req *dreq = hdr->dreq;
fdd1e74c 235
584aa810
FI
236 if (test_bit(NFS_IOHDR_REDO, &hdr->flags))
237 goto out_put;
15ce4a0c
CL
238
239 spin_lock(&dreq->lock);
584aa810
FI
240 if (test_bit(NFS_IOHDR_ERROR, &hdr->flags) && (hdr->good_bytes == 0))
241 dreq->error = hdr->error;
242 else
243 dreq->count += hdr->good_bytes;
244 spin_unlock(&dreq->lock);
245
4bd8b010
TM
246 while (!list_empty(&hdr->pages)) {
247 struct nfs_page *req = nfs_list_entry(hdr->pages.next);
248 struct page *page = req->wb_page;
249
250 if (test_bit(NFS_IOHDR_EOF, &hdr->flags)) {
251 if (bytes > hdr->good_bytes)
252 zero_user(page, 0, PAGE_SIZE);
253 else if (hdr->good_bytes - bytes < PAGE_SIZE)
254 zero_user_segment(page,
255 hdr->good_bytes & ~PAGE_MASK,
256 PAGE_SIZE);
584aa810 257 }
4bd8b010
TM
258 if (!PageCompound(page)) {
259 if (test_bit(NFS_IOHDR_ERROR, &hdr->flags)) {
260 if (bytes < hdr->good_bytes)
261 set_page_dirty(page);
262 } else
263 set_page_dirty(page);
584aa810 264 }
4bd8b010
TM
265 bytes += req->wb_bytes;
266 nfs_list_remove_request(req);
267 nfs_direct_readpage_release(req);
d4a8f367 268 }
584aa810 269out_put:
607f31e8
TM
270 if (put_dreq(dreq))
271 nfs_direct_complete(dreq);
584aa810 272 hdr->release(hdr);
1da177e4
LT
273}
274
3e9e0ca3 275static void nfs_read_sync_pgio_error(struct list_head *head)
cd841605 276{
584aa810 277 struct nfs_page *req;
cd841605 278
584aa810
FI
279 while (!list_empty(head)) {
280 req = nfs_list_entry(head->next);
281 nfs_list_remove_request(req);
282 nfs_release_request(req);
283 }
cd841605
FI
284}
285
584aa810
FI
286static void nfs_direct_pgio_init(struct nfs_pgio_header *hdr)
287{
288 get_dreq(hdr->dreq);
289}
290
291static const struct nfs_pgio_completion_ops nfs_direct_read_completion_ops = {
3e9e0ca3 292 .error_cleanup = nfs_read_sync_pgio_error,
584aa810
FI
293 .init_hdr = nfs_direct_pgio_init,
294 .completion = nfs_direct_read_completion,
295};
296
d4cc948b 297/*
607f31e8
TM
298 * For each rsize'd chunk of the user's buffer, dispatch an NFS READ
299 * operation. If nfs_readdata_alloc() or get_user_pages() fails,
300 * bail and stop sending more reads. Read length accounting is
301 * handled automatically by nfs_direct_read_result(). Otherwise, if
302 * no requests have been sent, just return an error.
1da177e4 303 */
584aa810 304static ssize_t nfs_direct_read_schedule_segment(struct nfs_pageio_descriptor *desc,
02fe4946
CL
305 const struct iovec *iov,
306 loff_t pos)
1da177e4 307{
584aa810 308 struct nfs_direct_req *dreq = desc->pg_dreq;
a8881f5a 309 struct nfs_open_context *ctx = dreq->ctx;
3d4ff43d 310 struct inode *inode = ctx->dentry->d_inode;
02fe4946
CL
311 unsigned long user_addr = (unsigned long)iov->iov_base;
312 size_t count = iov->iov_len;
5dd602f2 313 size_t rsize = NFS_SERVER(inode)->rsize;
607f31e8
TM
314 unsigned int pgbase;
315 int result;
316 ssize_t started = 0;
584aa810
FI
317 struct page **pagevec = NULL;
318 unsigned int npages;
607f31e8 319
1da177e4 320 do {
5dd602f2 321 size_t bytes;
584aa810 322 int i;
1da177e4 323
e9f7bee1 324 pgbase = user_addr & ~PAGE_MASK;
bf5fc402 325 bytes = min(max_t(size_t, rsize, PAGE_SIZE), count);
e9f7bee1 326
607f31e8 327 result = -ENOMEM;
584aa810
FI
328 npages = nfs_page_array_len(pgbase, bytes);
329 if (!pagevec)
330 pagevec = kmalloc(npages * sizeof(struct page *),
331 GFP_KERNEL);
332 if (!pagevec)
4db6e0b7 333 break;
607f31e8
TM
334 down_read(&current->mm->mmap_sem);
335 result = get_user_pages(current, current->mm, user_addr,
584aa810 336 npages, 1, 0, pagevec, NULL);
607f31e8 337 up_read(&current->mm->mmap_sem);
584aa810 338 if (result < 0)
749e146e 339 break;
584aa810 340 if ((unsigned)result < npages) {
d9df8d6b
TM
341 bytes = result * PAGE_SIZE;
342 if (bytes <= pgbase) {
584aa810 343 nfs_direct_release_pages(pagevec, result);
d9df8d6b
TM
344 break;
345 }
346 bytes -= pgbase;
584aa810 347 npages = result;
607f31e8
TM
348 }
349
584aa810
FI
350 for (i = 0; i < npages; i++) {
351 struct nfs_page *req;
bf5fc402 352 unsigned int req_len = min_t(size_t, bytes, PAGE_SIZE - pgbase);
584aa810
FI
353 /* XXX do we need to do the eof zeroing found in async_filler? */
354 req = nfs_create_request(dreq->ctx, dreq->inode,
355 pagevec[i],
356 pgbase, req_len);
357 if (IS_ERR(req)) {
584aa810
FI
358 result = PTR_ERR(req);
359 break;
360 }
361 req->wb_index = pos >> PAGE_SHIFT;
362 req->wb_offset = pos & ~PAGE_MASK;
363 if (!nfs_pageio_add_request(desc, req)) {
364 result = desc->pg_error;
365 nfs_release_request(req);
584aa810
FI
366 break;
367 }
368 pgbase = 0;
369 bytes -= req_len;
370 started += req_len;
371 user_addr += req_len;
372 pos += req_len;
373 count -= req_len;
374 }
6d74743b
TM
375 /* The nfs_page now hold references to these pages */
376 nfs_direct_release_pages(pagevec, npages);
71e8cc00 377 } while (count != 0 && result >= 0);
607f31e8 378
584aa810
FI
379 kfree(pagevec);
380
607f31e8 381 if (started)
c216fd70 382 return started;
607f31e8 383 return result < 0 ? (ssize_t) result : -EFAULT;
1da177e4
LT
384}
385
19f73787
CL
386static ssize_t nfs_direct_read_schedule_iovec(struct nfs_direct_req *dreq,
387 const struct iovec *iov,
388 unsigned long nr_segs,
389 loff_t pos)
390{
584aa810 391 struct nfs_pageio_descriptor desc;
19f73787
CL
392 ssize_t result = -EINVAL;
393 size_t requested_bytes = 0;
394 unsigned long seg;
395
59948db3 396 NFS_PROTO(dreq->inode)->read_pageio_init(&desc, dreq->inode,
584aa810 397 &nfs_direct_read_completion_ops);
19f73787 398 get_dreq(dreq);
584aa810 399 desc.pg_dreq = dreq;
19f73787
CL
400
401 for (seg = 0; seg < nr_segs; seg++) {
402 const struct iovec *vec = &iov[seg];
584aa810 403 result = nfs_direct_read_schedule_segment(&desc, vec, pos);
19f73787
CL
404 if (result < 0)
405 break;
406 requested_bytes += result;
407 if ((size_t)result < vec->iov_len)
408 break;
409 pos += vec->iov_len;
410 }
411
584aa810
FI
412 nfs_pageio_complete(&desc);
413
839f7ad6
CL
414 /*
415 * If no bytes were started, return the error, and let the
416 * generic layer handle the completion.
417 */
418 if (requested_bytes == 0) {
419 nfs_direct_req_release(dreq);
420 return result < 0 ? result : -EIO;
421 }
422
19f73787
CL
423 if (put_dreq(dreq))
424 nfs_direct_complete(dreq);
839f7ad6 425 return 0;
19f73787
CL
426}
427
c216fd70
CL
428static ssize_t nfs_direct_read(struct kiocb *iocb, const struct iovec *iov,
429 unsigned long nr_segs, loff_t pos)
1da177e4 430{
f11ac8db 431 ssize_t result = -ENOMEM;
99514f8f 432 struct inode *inode = iocb->ki_filp->f_mapping->host;
1da177e4
LT
433 struct nfs_direct_req *dreq;
434
607f31e8 435 dreq = nfs_direct_req_alloc();
f11ac8db
TM
436 if (dreq == NULL)
437 goto out;
1da177e4 438
91d5b470 439 dreq->inode = inode;
cd3758e3 440 dreq->ctx = get_nfs_open_context(nfs_file_open_context(iocb->ki_filp));
f11ac8db
TM
441 dreq->l_ctx = nfs_get_lock_context(dreq->ctx);
442 if (dreq->l_ctx == NULL)
443 goto out_release;
487b8372
CL
444 if (!is_sync_kiocb(iocb))
445 dreq->iocb = iocb;
1da177e4 446
c216fd70 447 result = nfs_direct_read_schedule_iovec(dreq, iov, nr_segs, pos);
607f31e8
TM
448 if (!result)
449 result = nfs_direct_wait(dreq);
2701d086 450 NFS_I(inode)->read_io += result;
f11ac8db 451out_release:
b4946ffb 452 nfs_direct_req_release(dreq);
f11ac8db 453out:
1da177e4
LT
454 return result;
455}
456
1d59d61f
TM
457static void nfs_inode_dio_write_done(struct inode *inode)
458{
459 nfs_zap_mapping(inode, inode->i_mapping);
460 inode_dio_done(inode);
461}
462
89d77c8f 463#if IS_ENABLED(CONFIG_NFS_V3) || IS_ENABLED(CONFIG_NFS_V4)
fad61490
TM
464static void nfs_direct_write_reschedule(struct nfs_direct_req *dreq)
465{
1763da12
FI
466 struct nfs_pageio_descriptor desc;
467 struct nfs_page *req, *tmp;
468 LIST_HEAD(reqs);
469 struct nfs_commit_info cinfo;
470 LIST_HEAD(failed);
471
472 nfs_init_cinfo_from_dreq(&cinfo, dreq);
473 pnfs_recover_commit_reqs(dreq->inode, &reqs, &cinfo);
474 spin_lock(cinfo.lock);
475 nfs_scan_commit_list(&cinfo.mds->list, &reqs, &cinfo, 0);
476 spin_unlock(cinfo.lock);
1da177e4 477
fad61490 478 dreq->count = 0;
607f31e8
TM
479 get_dreq(dreq);
480
c95908e4 481 NFS_PROTO(dreq->inode)->write_pageio_init(&desc, dreq->inode, FLUSH_STABLE,
1763da12
FI
482 &nfs_direct_write_completion_ops);
483 desc.pg_dreq = dreq;
fedb595c 484
1763da12
FI
485 list_for_each_entry_safe(req, tmp, &reqs, wb_list) {
486 if (!nfs_pageio_add_request(&desc, req)) {
4035c248 487 nfs_list_remove_request(req);
1763da12
FI
488 nfs_list_add_request(req, &failed);
489 spin_lock(cinfo.lock);
490 dreq->flags = 0;
491 dreq->error = -EIO;
492 spin_unlock(cinfo.lock);
493 }
5a695da2 494 nfs_release_request(req);
1763da12
FI
495 }
496 nfs_pageio_complete(&desc);
fad61490 497
4035c248
TM
498 while (!list_empty(&failed)) {
499 req = nfs_list_entry(failed.next);
500 nfs_list_remove_request(req);
1d1afcbc 501 nfs_unlock_and_release_request(req);
4035c248 502 }
fad61490 503
1763da12
FI
504 if (put_dreq(dreq))
505 nfs_direct_write_complete(dreq, dreq->inode);
c9d8f89d
TM
506}
507
1763da12 508static void nfs_direct_commit_complete(struct nfs_commit_data *data)
c9d8f89d 509{
0b7c0153 510 struct nfs_direct_req *dreq = data->dreq;
1763da12
FI
511 struct nfs_commit_info cinfo;
512 struct nfs_page *req;
c9d8f89d
TM
513 int status = data->task.tk_status;
514
1763da12 515 nfs_init_cinfo_from_dreq(&cinfo, dreq);
c9d8f89d 516 if (status < 0) {
60fa3f76 517 dprintk("NFS: %5u commit failed with error %d.\n",
1763da12 518 data->task.tk_pid, status);
fad61490 519 dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
60fa3f76 520 } else if (memcmp(&dreq->verf, &data->verf, sizeof(data->verf))) {
c9d8f89d 521 dprintk("NFS: %5u commit verify failed\n", data->task.tk_pid);
fad61490 522 dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
1da177e4
LT
523 }
524
c9d8f89d 525 dprintk("NFS: %5u commit returned %d\n", data->task.tk_pid, status);
1763da12
FI
526 while (!list_empty(&data->pages)) {
527 req = nfs_list_entry(data->pages.next);
528 nfs_list_remove_request(req);
529 if (dreq->flags == NFS_ODIRECT_RESCHED_WRITES) {
530 /* Note the rewrite will go through mds */
531 nfs_mark_request_commit(req, NULL, &cinfo);
906369e4
FI
532 } else
533 nfs_release_request(req);
1d1afcbc 534 nfs_unlock_and_release_request(req);
1763da12
FI
535 }
536
537 if (atomic_dec_and_test(&cinfo.mds->rpcs_out))
538 nfs_direct_write_complete(dreq, data->inode);
1da177e4
LT
539}
540
1763da12
FI
541static void nfs_direct_error_cleanup(struct nfs_inode *nfsi)
542{
543 /* There is no lock to clear */
544}
545
546static const struct nfs_commit_completion_ops nfs_direct_commit_completion_ops = {
547 .completion = nfs_direct_commit_complete,
548 .error_cleanup = nfs_direct_error_cleanup,
fad61490
TM
549};
550
551static void nfs_direct_commit_schedule(struct nfs_direct_req *dreq)
1da177e4 552{
1763da12
FI
553 int res;
554 struct nfs_commit_info cinfo;
555 LIST_HEAD(mds_list);
556
557 nfs_init_cinfo_from_dreq(&cinfo, dreq);
558 nfs_scan_commit(dreq->inode, &mds_list, &cinfo);
559 res = nfs_generic_commit_list(dreq->inode, &mds_list, 0, &cinfo);
560 if (res < 0) /* res == -ENOMEM */
561 nfs_direct_write_reschedule(dreq);
fad61490 562}
1da177e4 563
1763da12 564static void nfs_direct_write_schedule_work(struct work_struct *work)
fad61490 565{
1763da12 566 struct nfs_direct_req *dreq = container_of(work, struct nfs_direct_req, work);
fad61490 567 int flags = dreq->flags;
1da177e4 568
fad61490
TM
569 dreq->flags = 0;
570 switch (flags) {
571 case NFS_ODIRECT_DO_COMMIT:
572 nfs_direct_commit_schedule(dreq);
1da177e4 573 break;
fad61490
TM
574 case NFS_ODIRECT_RESCHED_WRITES:
575 nfs_direct_write_reschedule(dreq);
576 break;
577 default:
1d59d61f 578 nfs_inode_dio_write_done(dreq->inode);
fad61490
TM
579 nfs_direct_complete(dreq);
580 }
581}
1da177e4 582
1763da12 583static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode *inode)
fad61490 584{
1763da12 585 schedule_work(&dreq->work); /* Calls nfs_direct_write_schedule_work */
fad61490 586}
1763da12 587
fad61490 588#else
24fc9211
BS
589static void nfs_direct_write_schedule_work(struct work_struct *work)
590{
591}
1da177e4 592
fad61490
TM
593static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode *inode)
594{
1d59d61f 595 nfs_inode_dio_write_done(inode);
fad61490
TM
596 nfs_direct_complete(dreq);
597}
598#endif
1da177e4 599
c9d8f89d
TM
600/*
601 * NB: Return the value of the first error return code. Subsequent
602 * errors after the first one are ignored.
603 */
462d5b32 604/*
607f31e8
TM
605 * For each wsize'd chunk of the user's buffer, dispatch an NFS WRITE
606 * operation. If nfs_writedata_alloc() or get_user_pages() fails,
607 * bail and stop sending more writes. Write length accounting is
608 * handled automatically by nfs_direct_write_result(). Otherwise, if
609 * no requests have been sent, just return an error.
462d5b32 610 */
1763da12 611static ssize_t nfs_direct_write_schedule_segment(struct nfs_pageio_descriptor *desc,
02fe4946 612 const struct iovec *iov,
1763da12 613 loff_t pos)
462d5b32 614{
1763da12 615 struct nfs_direct_req *dreq = desc->pg_dreq;
a8881f5a 616 struct nfs_open_context *ctx = dreq->ctx;
3d4ff43d 617 struct inode *inode = ctx->dentry->d_inode;
02fe4946
CL
618 unsigned long user_addr = (unsigned long)iov->iov_base;
619 size_t count = iov->iov_len;
462d5b32 620 size_t wsize = NFS_SERVER(inode)->wsize;
607f31e8
TM
621 unsigned int pgbase;
622 int result;
623 ssize_t started = 0;
1763da12
FI
624 struct page **pagevec = NULL;
625 unsigned int npages;
82b145c5 626
1da177e4 627 do {
462d5b32 628 size_t bytes;
1763da12 629 int i;
462d5b32 630
e9f7bee1 631 pgbase = user_addr & ~PAGE_MASK;
bf5fc402 632 bytes = min(max_t(size_t, wsize, PAGE_SIZE), count);
e9f7bee1 633
607f31e8 634 result = -ENOMEM;
1763da12
FI
635 npages = nfs_page_array_len(pgbase, bytes);
636 if (!pagevec)
637 pagevec = kmalloc(npages * sizeof(struct page *), GFP_KERNEL);
638 if (!pagevec)
607f31e8
TM
639 break;
640
607f31e8
TM
641 down_read(&current->mm->mmap_sem);
642 result = get_user_pages(current, current->mm, user_addr,
1763da12 643 npages, 0, 0, pagevec, NULL);
607f31e8 644 up_read(&current->mm->mmap_sem);
1763da12 645 if (result < 0)
749e146e 646 break;
1763da12
FI
647
648 if ((unsigned)result < npages) {
d9df8d6b
TM
649 bytes = result * PAGE_SIZE;
650 if (bytes <= pgbase) {
1763da12 651 nfs_direct_release_pages(pagevec, result);
d9df8d6b
TM
652 break;
653 }
654 bytes -= pgbase;
1763da12 655 npages = result;
607f31e8
TM
656 }
657
1763da12
FI
658 for (i = 0; i < npages; i++) {
659 struct nfs_page *req;
bf5fc402 660 unsigned int req_len = min_t(size_t, bytes, PAGE_SIZE - pgbase);
1da177e4 661
1763da12
FI
662 req = nfs_create_request(dreq->ctx, dreq->inode,
663 pagevec[i],
664 pgbase, req_len);
665 if (IS_ERR(req)) {
1763da12
FI
666 result = PTR_ERR(req);
667 break;
668 }
669 nfs_lock_request(req);
670 req->wb_index = pos >> PAGE_SHIFT;
671 req->wb_offset = pos & ~PAGE_MASK;
672 if (!nfs_pageio_add_request(desc, req)) {
673 result = desc->pg_error;
1d1afcbc 674 nfs_unlock_and_release_request(req);
71e8cc00 675 break;
1763da12
FI
676 }
677 pgbase = 0;
678 bytes -= req_len;
679 started += req_len;
680 user_addr += req_len;
681 pos += req_len;
682 count -= req_len;
683 }
6d74743b
TM
684 /* The nfs_page now hold references to these pages */
685 nfs_direct_release_pages(pagevec, npages);
71e8cc00 686 } while (count != 0 && result >= 0);
607f31e8 687
1763da12
FI
688 kfree(pagevec);
689
607f31e8 690 if (started)
c216fd70 691 return started;
607f31e8 692 return result < 0 ? (ssize_t) result : -EFAULT;
462d5b32 693}
1da177e4 694
1763da12
FI
695static void nfs_direct_write_completion(struct nfs_pgio_header *hdr)
696{
697 struct nfs_direct_req *dreq = hdr->dreq;
698 struct nfs_commit_info cinfo;
699 int bit = -1;
700 struct nfs_page *req = nfs_list_entry(hdr->pages.next);
701
702 if (test_bit(NFS_IOHDR_REDO, &hdr->flags))
703 goto out_put;
704
705 nfs_init_cinfo_from_dreq(&cinfo, dreq);
706
707 spin_lock(&dreq->lock);
708
709 if (test_bit(NFS_IOHDR_ERROR, &hdr->flags)) {
710 dreq->flags = 0;
711 dreq->error = hdr->error;
712 }
713 if (dreq->error != 0)
714 bit = NFS_IOHDR_ERROR;
715 else {
716 dreq->count += hdr->good_bytes;
717 if (test_bit(NFS_IOHDR_NEED_RESCHED, &hdr->flags)) {
718 dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
719 bit = NFS_IOHDR_NEED_RESCHED;
720 } else if (test_bit(NFS_IOHDR_NEED_COMMIT, &hdr->flags)) {
721 if (dreq->flags == NFS_ODIRECT_RESCHED_WRITES)
722 bit = NFS_IOHDR_NEED_RESCHED;
723 else if (dreq->flags == 0) {
9bce008b 724 memcpy(&dreq->verf, hdr->verf,
1763da12
FI
725 sizeof(dreq->verf));
726 bit = NFS_IOHDR_NEED_COMMIT;
727 dreq->flags = NFS_ODIRECT_DO_COMMIT;
728 } else if (dreq->flags == NFS_ODIRECT_DO_COMMIT) {
9bce008b 729 if (memcmp(&dreq->verf, hdr->verf, sizeof(dreq->verf))) {
1763da12
FI
730 dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
731 bit = NFS_IOHDR_NEED_RESCHED;
732 } else
733 bit = NFS_IOHDR_NEED_COMMIT;
734 }
735 }
736 }
737 spin_unlock(&dreq->lock);
738
739 while (!list_empty(&hdr->pages)) {
740 req = nfs_list_entry(hdr->pages.next);
741 nfs_list_remove_request(req);
742 switch (bit) {
743 case NFS_IOHDR_NEED_RESCHED:
744 case NFS_IOHDR_NEED_COMMIT:
04277086 745 kref_get(&req->wb_kref);
1763da12 746 nfs_mark_request_commit(req, hdr->lseg, &cinfo);
1763da12 747 }
1d1afcbc 748 nfs_unlock_and_release_request(req);
1763da12
FI
749 }
750
751out_put:
752 if (put_dreq(dreq))
753 nfs_direct_write_complete(dreq, hdr->inode);
754 hdr->release(hdr);
755}
756
3e9e0ca3
TM
757static void nfs_write_sync_pgio_error(struct list_head *head)
758{
759 struct nfs_page *req;
760
761 while (!list_empty(head)) {
762 req = nfs_list_entry(head->next);
763 nfs_list_remove_request(req);
1d1afcbc 764 nfs_unlock_and_release_request(req);
3e9e0ca3
TM
765 }
766}
767
1763da12 768static const struct nfs_pgio_completion_ops nfs_direct_write_completion_ops = {
3e9e0ca3 769 .error_cleanup = nfs_write_sync_pgio_error,
1763da12
FI
770 .init_hdr = nfs_direct_pgio_init,
771 .completion = nfs_direct_write_completion,
772};
773
19f73787
CL
774static ssize_t nfs_direct_write_schedule_iovec(struct nfs_direct_req *dreq,
775 const struct iovec *iov,
776 unsigned long nr_segs,
1763da12 777 loff_t pos)
19f73787 778{
1763da12 779 struct nfs_pageio_descriptor desc;
1d59d61f 780 struct inode *inode = dreq->inode;
19f73787
CL
781 ssize_t result = 0;
782 size_t requested_bytes = 0;
783 unsigned long seg;
784
c95908e4 785 NFS_PROTO(inode)->write_pageio_init(&desc, inode, FLUSH_COND_STABLE,
1763da12
FI
786 &nfs_direct_write_completion_ops);
787 desc.pg_dreq = dreq;
19f73787 788 get_dreq(dreq);
1d59d61f 789 atomic_inc(&inode->i_dio_count);
19f73787
CL
790
791 for (seg = 0; seg < nr_segs; seg++) {
792 const struct iovec *vec = &iov[seg];
1763da12 793 result = nfs_direct_write_schedule_segment(&desc, vec, pos);
19f73787
CL
794 if (result < 0)
795 break;
796 requested_bytes += result;
797 if ((size_t)result < vec->iov_len)
798 break;
799 pos += vec->iov_len;
800 }
1763da12 801 nfs_pageio_complete(&desc);
2701d086 802 NFS_I(dreq->inode)->write_io += desc.pg_bytes_written;
19f73787 803
839f7ad6
CL
804 /*
805 * If no bytes were started, return the error, and let the
806 * generic layer handle the completion.
807 */
808 if (requested_bytes == 0) {
1d59d61f 809 inode_dio_done(inode);
839f7ad6
CL
810 nfs_direct_req_release(dreq);
811 return result < 0 ? result : -EIO;
812 }
813
19f73787
CL
814 if (put_dreq(dreq))
815 nfs_direct_write_complete(dreq, dreq->inode);
839f7ad6 816 return 0;
19f73787
CL
817}
818
c216fd70
CL
819static ssize_t nfs_direct_write(struct kiocb *iocb, const struct iovec *iov,
820 unsigned long nr_segs, loff_t pos,
821 size_t count)
462d5b32 822{
f11ac8db 823 ssize_t result = -ENOMEM;
c89f2ee5 824 struct inode *inode = iocb->ki_filp->f_mapping->host;
462d5b32 825 struct nfs_direct_req *dreq;
1da177e4 826
607f31e8 827 dreq = nfs_direct_req_alloc();
462d5b32 828 if (!dreq)
f11ac8db 829 goto out;
1da177e4 830
c89f2ee5 831 dreq->inode = inode;
cd3758e3 832 dreq->ctx = get_nfs_open_context(nfs_file_open_context(iocb->ki_filp));
f11ac8db 833 dreq->l_ctx = nfs_get_lock_context(dreq->ctx);
568a810d 834 if (dreq->l_ctx == NULL)
f11ac8db 835 goto out_release;
c89f2ee5
CL
836 if (!is_sync_kiocb(iocb))
837 dreq->iocb = iocb;
1da177e4 838
1763da12 839 result = nfs_direct_write_schedule_iovec(dreq, iov, nr_segs, pos);
607f31e8
TM
840 if (!result)
841 result = nfs_direct_wait(dreq);
f11ac8db 842out_release:
b4946ffb 843 nfs_direct_req_release(dreq);
f11ac8db 844out:
1da177e4
LT
845 return result;
846}
847
848/**
849 * nfs_file_direct_read - file direct read operation for NFS files
850 * @iocb: target I/O control block
027445c3
BP
851 * @iov: vector of user buffers into which to read data
852 * @nr_segs: size of iov vector
88467055 853 * @pos: byte offset in file where reading starts
1da177e4
LT
854 *
855 * We use this function for direct reads instead of calling
856 * generic_file_aio_read() in order to avoid gfar's check to see if
857 * the request starts before the end of the file. For that check
858 * to work, we must generate a GETATTR before each direct read, and
859 * even then there is a window between the GETATTR and the subsequent
88467055 860 * READ where the file size could change. Our preference is simply
1da177e4
LT
861 * to do all reads the application wants, and the server will take
862 * care of managing the end of file boundary.
88467055 863 *
1da177e4
LT
864 * This function also eliminates unnecessarily updating the file's
865 * atime locally, as the NFS server sets the file's atime, and this
866 * client must read the updated atime from the server back into its
867 * cache.
868 */
027445c3
BP
869ssize_t nfs_file_direct_read(struct kiocb *iocb, const struct iovec *iov,
870 unsigned long nr_segs, loff_t pos)
1da177e4
LT
871{
872 ssize_t retval = -EINVAL;
1da177e4 873 struct file *file = iocb->ki_filp;
1da177e4 874 struct address_space *mapping = file->f_mapping;
c216fd70
CL
875 size_t count;
876
877 count = iov_length(iov, nr_segs);
878 nfs_add_stats(mapping->host, NFSIOS_DIRECTREADBYTES, count);
1da177e4 879
6da24bc9 880 dfprintk(FILE, "NFS: direct read(%s/%s, %zd@%Ld)\n",
01cce933
JJS
881 file->f_path.dentry->d_parent->d_name.name,
882 file->f_path.dentry->d_name.name,
c216fd70 883 count, (long long) pos);
1da177e4 884
1da177e4
LT
885 retval = 0;
886 if (!count)
887 goto out;
888
29884df0
TM
889 retval = nfs_sync_mapping(mapping);
890 if (retval)
891 goto out;
1da177e4 892
7ec10f26
KK
893 task_io_account_read(count);
894
c216fd70 895 retval = nfs_direct_read(iocb, iov, nr_segs, pos);
1da177e4 896 if (retval > 0)
0cdd80d0 897 iocb->ki_pos = pos + retval;
1da177e4
LT
898
899out:
900 return retval;
901}
902
903/**
904 * nfs_file_direct_write - file direct write operation for NFS files
905 * @iocb: target I/O control block
027445c3
BP
906 * @iov: vector of user buffers from which to write data
907 * @nr_segs: size of iov vector
88467055 908 * @pos: byte offset in file where writing starts
1da177e4
LT
909 *
910 * We use this function for direct writes instead of calling
911 * generic_file_aio_write() in order to avoid taking the inode
912 * semaphore and updating the i_size. The NFS server will set
913 * the new i_size and this client must read the updated size
914 * back into its cache. We let the server do generic write
915 * parameter checking and report problems.
916 *
1da177e4
LT
917 * We eliminate local atime updates, see direct read above.
918 *
919 * We avoid unnecessary page cache invalidations for normal cached
920 * readers of this file.
921 *
922 * Note that O_APPEND is not supported for NFS direct writes, as there
923 * is no atomic O_APPEND write facility in the NFS protocol.
924 */
027445c3
BP
925ssize_t nfs_file_direct_write(struct kiocb *iocb, const struct iovec *iov,
926 unsigned long nr_segs, loff_t pos)
1da177e4 927{
070ea602 928 ssize_t retval = -EINVAL;
1da177e4 929 struct file *file = iocb->ki_filp;
1da177e4 930 struct address_space *mapping = file->f_mapping;
c216fd70 931 size_t count;
1da177e4 932
c216fd70
CL
933 count = iov_length(iov, nr_segs);
934 nfs_add_stats(mapping->host, NFSIOS_DIRECTWRITTENBYTES, count);
935
6da24bc9 936 dfprintk(FILE, "NFS: direct write(%s/%s, %zd@%Ld)\n",
01cce933
JJS
937 file->f_path.dentry->d_parent->d_name.name,
938 file->f_path.dentry->d_name.name,
c216fd70 939 count, (long long) pos);
027445c3 940
ce1a8e67
CL
941 retval = generic_write_checks(file, &pos, &count, 0);
942 if (retval)
1da177e4 943 goto out;
ce1a8e67
CL
944
945 retval = -EINVAL;
946 if ((ssize_t) count < 0)
1da177e4 947 goto out;
1da177e4
LT
948 retval = 0;
949 if (!count)
950 goto out;
ce1a8e67 951
29884df0
TM
952 retval = nfs_sync_mapping(mapping);
953 if (retval)
954 goto out;
1da177e4 955
7ec10f26
KK
956 task_io_account_write(count);
957
c216fd70 958 retval = nfs_direct_write(iocb, iov, nr_segs, pos, count);
1763da12
FI
959 if (retval > 0) {
960 struct inode *inode = mapping->host;
9eafa8cc 961
ce1a8e67 962 iocb->ki_pos = pos + retval;
1763da12
FI
963 spin_lock(&inode->i_lock);
964 if (i_size_read(inode) < iocb->ki_pos)
965 i_size_write(inode, iocb->ki_pos);
966 spin_unlock(&inode->i_lock);
967 }
1da177e4
LT
968out:
969 return retval;
970}
971
88467055
CL
972/**
973 * nfs_init_directcache - create a slab cache for nfs_direct_req structures
974 *
975 */
f7b422b1 976int __init nfs_init_directcache(void)
1da177e4
LT
977{
978 nfs_direct_cachep = kmem_cache_create("nfs_direct_cache",
979 sizeof(struct nfs_direct_req),
fffb60f9
PJ
980 0, (SLAB_RECLAIM_ACCOUNT|
981 SLAB_MEM_SPREAD),
20c2df83 982 NULL);
1da177e4
LT
983 if (nfs_direct_cachep == NULL)
984 return -ENOMEM;
985
986 return 0;
987}
988
88467055 989/**
f7b422b1 990 * nfs_destroy_directcache - destroy the slab cache for nfs_direct_req structures
88467055
CL
991 *
992 */
266bee88 993void nfs_destroy_directcache(void)
1da177e4 994{
1a1d92c1 995 kmem_cache_destroy(nfs_direct_cachep);
1da177e4 996}