]>
Commit | Line | Data |
---|---|---|
1 | // SPDX-License-Identifier: GPL-2.0-only | |
2 | /* | |
3 | * linux/fs/nfs/direct.c | |
4 | * | |
5 | * Copyright (C) 2003 by Chuck Lever <cel@netapp.com> | |
6 | * | |
7 | * High-performance uncached I/O for the Linux NFS client | |
8 | * | |
9 | * There are important applications whose performance or correctness | |
10 | * depends on uncached access to file data. Database clusters | |
11 | * (multiple copies of the same instance running on separate hosts) | |
12 | * implement their own cache coherency protocol that subsumes file | |
13 | * system cache protocols. Applications that process datasets | |
14 | * considerably larger than the client's memory do not always benefit | |
15 | * from a local cache. A streaming video server, for instance, has no | |
16 | * need to cache the contents of a file. | |
17 | * | |
18 | * When an application requests uncached I/O, all read and write requests | |
19 | * are made directly to the server; data stored or fetched via these | |
20 | * requests is not cached in the Linux page cache. The client does not | |
21 | * correct unaligned requests from applications. All requested bytes are | |
22 | * held on permanent storage before a direct write system call returns to | |
23 | * an application. | |
24 | * | |
25 | * Solaris implements an uncached I/O facility called directio() that | |
26 | * is used for backups and sequential I/O to very large files. Solaris | |
27 | * also supports uncaching whole NFS partitions with "-o forcedirectio," | |
28 | * an undocumented mount option. | |
29 | * | |
30 | * Designed by Jeff Kimmel, Chuck Lever, and Trond Myklebust, with | |
31 | * help from Andrew Morton. | |
32 | * | |
33 | * 18 Dec 2001 Initial implementation for 2.4 --cel | |
34 | * 08 Jul 2002 Version for 2.4.19, with bug fixes --trondmy | |
35 | * 08 Jun 2003 Port to 2.5 APIs --cel | |
36 | * 31 Mar 2004 Handle direct I/O without VFS support --cel | |
37 | * 15 Sep 2004 Parallel async reads --cel | |
38 | * 04 May 2005 support O_DIRECT with aio --cel | |
39 | * | |
40 | */ | |
41 | ||
42 | #include <linux/errno.h> | |
43 | #include <linux/sched.h> | |
44 | #include <linux/kernel.h> | |
45 | #include <linux/file.h> | |
46 | #include <linux/pagemap.h> | |
47 | #include <linux/kref.h> | |
48 | #include <linux/slab.h> | |
49 | #include <linux/task_io_accounting_ops.h> | |
50 | #include <linux/module.h> | |
51 | ||
52 | #include <linux/nfs_fs.h> | |
53 | #include <linux/nfs_page.h> | |
54 | #include <linux/sunrpc/clnt.h> | |
55 | ||
56 | #include <linux/uaccess.h> | |
57 | #include <linux/atomic.h> | |
58 | ||
59 | #include "internal.h" | |
60 | #include "iostat.h" | |
61 | #include "pnfs.h" | |
62 | ||
63 | #define NFSDBG_FACILITY NFSDBG_VFS | |
64 | ||
65 | static struct kmem_cache *nfs_direct_cachep; | |
66 | ||
67 | struct nfs_direct_req { | |
68 | struct kref kref; /* release manager */ | |
69 | ||
70 | /* I/O parameters */ | |
71 | struct nfs_open_context *ctx; /* file open context info */ | |
72 | struct nfs_lock_context *l_ctx; /* Lock context info */ | |
73 | struct kiocb * iocb; /* controlling i/o request */ | |
74 | struct inode * inode; /* target file of i/o */ | |
75 | ||
76 | /* completion state */ | |
77 | atomic_t io_count; /* i/os we're waiting for */ | |
78 | spinlock_t lock; /* protect completion state */ | |
79 | ||
80 | loff_t io_start; /* Start offset for I/O */ | |
81 | ssize_t count, /* bytes actually processed */ | |
82 | max_count, /* max expected count */ | |
83 | bytes_left, /* bytes left to be sent */ | |
84 | error; /* any reported error */ | |
85 | struct completion completion; /* wait for i/o completion */ | |
86 | ||
87 | /* commit state */ | |
88 | struct nfs_mds_commit_info mds_cinfo; /* Storage for cinfo */ | |
89 | struct pnfs_ds_commit_info ds_cinfo; /* Storage for cinfo */ | |
90 | struct work_struct work; | |
91 | int flags; | |
92 | /* for write */ | |
93 | #define NFS_ODIRECT_DO_COMMIT (1) /* an unstable reply was received */ | |
94 | #define NFS_ODIRECT_RESCHED_WRITES (2) /* write verification failed */ | |
95 | /* for read */ | |
96 | #define NFS_ODIRECT_SHOULD_DIRTY (3) /* dirty user-space page after read */ | |
97 | #define NFS_ODIRECT_DONE INT_MAX /* write verification failed */ | |
98 | }; | |
99 | ||
100 | static const struct nfs_pgio_completion_ops nfs_direct_write_completion_ops; | |
101 | static const struct nfs_commit_completion_ops nfs_direct_commit_completion_ops; | |
102 | static void nfs_direct_write_complete(struct nfs_direct_req *dreq); | |
103 | static void nfs_direct_write_schedule_work(struct work_struct *work); | |
104 | ||
105 | static inline void get_dreq(struct nfs_direct_req *dreq) | |
106 | { | |
107 | atomic_inc(&dreq->io_count); | |
108 | } | |
109 | ||
110 | static inline int put_dreq(struct nfs_direct_req *dreq) | |
111 | { | |
112 | return atomic_dec_and_test(&dreq->io_count); | |
113 | } | |
114 | ||
115 | static void | |
116 | nfs_direct_handle_truncated(struct nfs_direct_req *dreq, | |
117 | const struct nfs_pgio_header *hdr, | |
118 | ssize_t dreq_len) | |
119 | { | |
120 | if (!(test_bit(NFS_IOHDR_ERROR, &hdr->flags) || | |
121 | test_bit(NFS_IOHDR_EOF, &hdr->flags))) | |
122 | return; | |
123 | if (dreq->max_count >= dreq_len) { | |
124 | dreq->max_count = dreq_len; | |
125 | if (dreq->count > dreq_len) | |
126 | dreq->count = dreq_len; | |
127 | ||
128 | if (test_bit(NFS_IOHDR_ERROR, &hdr->flags)) | |
129 | dreq->error = hdr->error; | |
130 | else /* Clear outstanding error if this is EOF */ | |
131 | dreq->error = 0; | |
132 | } | |
133 | } | |
134 | ||
135 | static void | |
136 | nfs_direct_count_bytes(struct nfs_direct_req *dreq, | |
137 | const struct nfs_pgio_header *hdr) | |
138 | { | |
139 | loff_t hdr_end = hdr->io_start + hdr->good_bytes; | |
140 | ssize_t dreq_len = 0; | |
141 | ||
142 | if (hdr_end > dreq->io_start) | |
143 | dreq_len = hdr_end - dreq->io_start; | |
144 | ||
145 | nfs_direct_handle_truncated(dreq, hdr, dreq_len); | |
146 | ||
147 | if (dreq_len > dreq->max_count) | |
148 | dreq_len = dreq->max_count; | |
149 | ||
150 | if (dreq->count < dreq_len) | |
151 | dreq->count = dreq_len; | |
152 | } | |
153 | ||
154 | /** | |
155 | * nfs_direct_IO - NFS address space operation for direct I/O | |
156 | * @iocb: target I/O control block | |
157 | * @iter: I/O buffer | |
158 | * | |
159 | * The presence of this routine in the address space ops vector means | |
160 | * the NFS client supports direct I/O. However, for most direct IO, we | |
161 | * shunt off direct read and write requests before the VFS gets them, | |
162 | * so this method is only ever called for swap. | |
163 | */ | |
164 | ssize_t nfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter) | |
165 | { | |
166 | struct inode *inode = iocb->ki_filp->f_mapping->host; | |
167 | ||
168 | /* we only support swap file calling nfs_direct_IO */ | |
169 | if (!IS_SWAPFILE(inode)) | |
170 | return 0; | |
171 | ||
172 | VM_BUG_ON(iov_iter_count(iter) != PAGE_SIZE); | |
173 | ||
174 | if (iov_iter_rw(iter) == READ) | |
175 | return nfs_file_direct_read(iocb, iter, true); | |
176 | return nfs_file_direct_write(iocb, iter, true); | |
177 | } | |
178 | ||
179 | static void nfs_direct_release_pages(struct page **pages, unsigned int npages) | |
180 | { | |
181 | unsigned int i; | |
182 | for (i = 0; i < npages; i++) | |
183 | put_page(pages[i]); | |
184 | } | |
185 | ||
186 | void nfs_init_cinfo_from_dreq(struct nfs_commit_info *cinfo, | |
187 | struct nfs_direct_req *dreq) | |
188 | { | |
189 | cinfo->inode = dreq->inode; | |
190 | cinfo->mds = &dreq->mds_cinfo; | |
191 | cinfo->ds = &dreq->ds_cinfo; | |
192 | cinfo->dreq = dreq; | |
193 | cinfo->completion_ops = &nfs_direct_commit_completion_ops; | |
194 | } | |
195 | ||
196 | static inline struct nfs_direct_req *nfs_direct_req_alloc(void) | |
197 | { | |
198 | struct nfs_direct_req *dreq; | |
199 | ||
200 | dreq = kmem_cache_zalloc(nfs_direct_cachep, GFP_KERNEL); | |
201 | if (!dreq) | |
202 | return NULL; | |
203 | ||
204 | kref_init(&dreq->kref); | |
205 | kref_get(&dreq->kref); | |
206 | init_completion(&dreq->completion); | |
207 | INIT_LIST_HEAD(&dreq->mds_cinfo.list); | |
208 | pnfs_init_ds_commit_info(&dreq->ds_cinfo); | |
209 | INIT_WORK(&dreq->work, nfs_direct_write_schedule_work); | |
210 | spin_lock_init(&dreq->lock); | |
211 | ||
212 | return dreq; | |
213 | } | |
214 | ||
215 | static void nfs_direct_req_free(struct kref *kref) | |
216 | { | |
217 | struct nfs_direct_req *dreq = container_of(kref, struct nfs_direct_req, kref); | |
218 | ||
219 | pnfs_release_ds_info(&dreq->ds_cinfo, dreq->inode); | |
220 | if (dreq->l_ctx != NULL) | |
221 | nfs_put_lock_context(dreq->l_ctx); | |
222 | if (dreq->ctx != NULL) | |
223 | put_nfs_open_context(dreq->ctx); | |
224 | kmem_cache_free(nfs_direct_cachep, dreq); | |
225 | } | |
226 | ||
227 | static void nfs_direct_req_release(struct nfs_direct_req *dreq) | |
228 | { | |
229 | kref_put(&dreq->kref, nfs_direct_req_free); | |
230 | } | |
231 | ||
232 | ssize_t nfs_dreq_bytes_left(struct nfs_direct_req *dreq) | |
233 | { | |
234 | return dreq->bytes_left; | |
235 | } | |
236 | EXPORT_SYMBOL_GPL(nfs_dreq_bytes_left); | |
237 | ||
238 | /* | |
239 | * Collects and returns the final error value/byte-count. | |
240 | */ | |
241 | static ssize_t nfs_direct_wait(struct nfs_direct_req *dreq) | |
242 | { | |
243 | ssize_t result = -EIOCBQUEUED; | |
244 | ||
245 | /* Async requests don't wait here */ | |
246 | if (dreq->iocb) | |
247 | goto out; | |
248 | ||
249 | result = wait_for_completion_killable(&dreq->completion); | |
250 | ||
251 | if (!result) { | |
252 | result = dreq->count; | |
253 | WARN_ON_ONCE(dreq->count < 0); | |
254 | } | |
255 | if (!result) | |
256 | result = dreq->error; | |
257 | ||
258 | out: | |
259 | return (ssize_t) result; | |
260 | } | |
261 | ||
262 | /* | |
263 | * Synchronous I/O uses a stack-allocated iocb. Thus we can't trust | |
264 | * the iocb is still valid here if this is a synchronous request. | |
265 | */ | |
266 | static void nfs_direct_complete(struct nfs_direct_req *dreq) | |
267 | { | |
268 | struct inode *inode = dreq->inode; | |
269 | ||
270 | inode_dio_end(inode); | |
271 | ||
272 | if (dreq->iocb) { | |
273 | long res = (long) dreq->error; | |
274 | if (dreq->count != 0) { | |
275 | res = (long) dreq->count; | |
276 | WARN_ON_ONCE(dreq->count < 0); | |
277 | } | |
278 | dreq->iocb->ki_complete(dreq->iocb, res, 0); | |
279 | } | |
280 | ||
281 | complete(&dreq->completion); | |
282 | ||
283 | nfs_direct_req_release(dreq); | |
284 | } | |
285 | ||
286 | static void nfs_direct_read_completion(struct nfs_pgio_header *hdr) | |
287 | { | |
288 | unsigned long bytes = 0; | |
289 | struct nfs_direct_req *dreq = hdr->dreq; | |
290 | ||
291 | spin_lock(&dreq->lock); | |
292 | if (test_bit(NFS_IOHDR_REDO, &hdr->flags)) { | |
293 | spin_unlock(&dreq->lock); | |
294 | goto out_put; | |
295 | } | |
296 | ||
297 | nfs_direct_count_bytes(dreq, hdr); | |
298 | spin_unlock(&dreq->lock); | |
299 | ||
300 | while (!list_empty(&hdr->pages)) { | |
301 | struct nfs_page *req = nfs_list_entry(hdr->pages.next); | |
302 | struct page *page = req->wb_page; | |
303 | ||
304 | if (!PageCompound(page) && bytes < hdr->good_bytes && | |
305 | (dreq->flags == NFS_ODIRECT_SHOULD_DIRTY)) | |
306 | set_page_dirty(page); | |
307 | bytes += req->wb_bytes; | |
308 | nfs_list_remove_request(req); | |
309 | nfs_release_request(req); | |
310 | } | |
311 | out_put: | |
312 | if (put_dreq(dreq)) | |
313 | nfs_direct_complete(dreq); | |
314 | hdr->release(hdr); | |
315 | } | |
316 | ||
317 | static void nfs_read_sync_pgio_error(struct list_head *head, int error) | |
318 | { | |
319 | struct nfs_page *req; | |
320 | ||
321 | while (!list_empty(head)) { | |
322 | req = nfs_list_entry(head->next); | |
323 | nfs_list_remove_request(req); | |
324 | nfs_release_request(req); | |
325 | } | |
326 | } | |
327 | ||
328 | static void nfs_direct_pgio_init(struct nfs_pgio_header *hdr) | |
329 | { | |
330 | get_dreq(hdr->dreq); | |
331 | } | |
332 | ||
333 | static const struct nfs_pgio_completion_ops nfs_direct_read_completion_ops = { | |
334 | .error_cleanup = nfs_read_sync_pgio_error, | |
335 | .init_hdr = nfs_direct_pgio_init, | |
336 | .completion = nfs_direct_read_completion, | |
337 | }; | |
338 | ||
339 | /* | |
340 | * For each rsize'd chunk of the user's buffer, dispatch an NFS READ | |
341 | * operation. If nfs_readdata_alloc() or get_user_pages() fails, | |
342 | * bail and stop sending more reads. Read length accounting is | |
343 | * handled automatically by nfs_direct_read_result(). Otherwise, if | |
344 | * no requests have been sent, just return an error. | |
345 | */ | |
346 | ||
347 | static ssize_t nfs_direct_read_schedule_iovec(struct nfs_direct_req *dreq, | |
348 | struct iov_iter *iter, | |
349 | loff_t pos) | |
350 | { | |
351 | struct nfs_pageio_descriptor desc; | |
352 | struct inode *inode = dreq->inode; | |
353 | ssize_t result = -EINVAL; | |
354 | size_t requested_bytes = 0; | |
355 | size_t rsize = max_t(size_t, NFS_SERVER(inode)->rsize, PAGE_SIZE); | |
356 | ||
357 | nfs_pageio_init_read(&desc, dreq->inode, false, | |
358 | &nfs_direct_read_completion_ops); | |
359 | get_dreq(dreq); | |
360 | desc.pg_dreq = dreq; | |
361 | inode_dio_begin(inode); | |
362 | ||
363 | while (iov_iter_count(iter)) { | |
364 | struct page **pagevec; | |
365 | size_t bytes; | |
366 | size_t pgbase; | |
367 | unsigned npages, i; | |
368 | ||
369 | result = iov_iter_get_pages_alloc(iter, &pagevec, | |
370 | rsize, &pgbase); | |
371 | if (result < 0) | |
372 | break; | |
373 | ||
374 | bytes = result; | |
375 | iov_iter_advance(iter, bytes); | |
376 | npages = (result + pgbase + PAGE_SIZE - 1) / PAGE_SIZE; | |
377 | for (i = 0; i < npages; i++) { | |
378 | struct nfs_page *req; | |
379 | unsigned int req_len = min_t(size_t, bytes, PAGE_SIZE - pgbase); | |
380 | /* XXX do we need to do the eof zeroing found in async_filler? */ | |
381 | req = nfs_create_request(dreq->ctx, pagevec[i], | |
382 | pgbase, req_len); | |
383 | if (IS_ERR(req)) { | |
384 | result = PTR_ERR(req); | |
385 | break; | |
386 | } | |
387 | req->wb_index = pos >> PAGE_SHIFT; | |
388 | req->wb_offset = pos & ~PAGE_MASK; | |
389 | if (!nfs_pageio_add_request(&desc, req)) { | |
390 | result = desc.pg_error; | |
391 | nfs_release_request(req); | |
392 | break; | |
393 | } | |
394 | pgbase = 0; | |
395 | bytes -= req_len; | |
396 | requested_bytes += req_len; | |
397 | pos += req_len; | |
398 | dreq->bytes_left -= req_len; | |
399 | } | |
400 | nfs_direct_release_pages(pagevec, npages); | |
401 | kvfree(pagevec); | |
402 | if (result < 0) | |
403 | break; | |
404 | } | |
405 | ||
406 | nfs_pageio_complete(&desc); | |
407 | ||
408 | /* | |
409 | * If no bytes were started, return the error, and let the | |
410 | * generic layer handle the completion. | |
411 | */ | |
412 | if (requested_bytes == 0) { | |
413 | inode_dio_end(inode); | |
414 | nfs_direct_req_release(dreq); | |
415 | return result < 0 ? result : -EIO; | |
416 | } | |
417 | ||
418 | if (put_dreq(dreq)) | |
419 | nfs_direct_complete(dreq); | |
420 | return requested_bytes; | |
421 | } | |
422 | ||
423 | /** | |
424 | * nfs_file_direct_read - file direct read operation for NFS files | |
425 | * @iocb: target I/O control block | |
426 | * @iter: vector of user buffers into which to read data | |
427 | * @swap: flag indicating this is swap IO, not O_DIRECT IO | |
428 | * | |
429 | * We use this function for direct reads instead of calling | |
430 | * generic_file_aio_read() in order to avoid gfar's check to see if | |
431 | * the request starts before the end of the file. For that check | |
432 | * to work, we must generate a GETATTR before each direct read, and | |
433 | * even then there is a window between the GETATTR and the subsequent | |
434 | * READ where the file size could change. Our preference is simply | |
435 | * to do all reads the application wants, and the server will take | |
436 | * care of managing the end of file boundary. | |
437 | * | |
438 | * This function also eliminates unnecessarily updating the file's | |
439 | * atime locally, as the NFS server sets the file's atime, and this | |
440 | * client must read the updated atime from the server back into its | |
441 | * cache. | |
442 | */ | |
443 | ssize_t nfs_file_direct_read(struct kiocb *iocb, struct iov_iter *iter, | |
444 | bool swap) | |
445 | { | |
446 | struct file *file = iocb->ki_filp; | |
447 | struct address_space *mapping = file->f_mapping; | |
448 | struct inode *inode = mapping->host; | |
449 | struct nfs_direct_req *dreq; | |
450 | struct nfs_lock_context *l_ctx; | |
451 | ssize_t result, requested; | |
452 | size_t count = iov_iter_count(iter); | |
453 | nfs_add_stats(mapping->host, NFSIOS_DIRECTREADBYTES, count); | |
454 | ||
455 | dfprintk(FILE, "NFS: direct read(%pD2, %zd@%Ld)\n", | |
456 | file, count, (long long) iocb->ki_pos); | |
457 | ||
458 | result = 0; | |
459 | if (!count) | |
460 | goto out; | |
461 | ||
462 | task_io_account_read(count); | |
463 | ||
464 | result = -ENOMEM; | |
465 | dreq = nfs_direct_req_alloc(); | |
466 | if (dreq == NULL) | |
467 | goto out; | |
468 | ||
469 | dreq->inode = inode; | |
470 | dreq->bytes_left = dreq->max_count = count; | |
471 | dreq->io_start = iocb->ki_pos; | |
472 | dreq->ctx = get_nfs_open_context(nfs_file_open_context(iocb->ki_filp)); | |
473 | l_ctx = nfs_get_lock_context(dreq->ctx); | |
474 | if (IS_ERR(l_ctx)) { | |
475 | result = PTR_ERR(l_ctx); | |
476 | nfs_direct_req_release(dreq); | |
477 | goto out_release; | |
478 | } | |
479 | dreq->l_ctx = l_ctx; | |
480 | if (!is_sync_kiocb(iocb)) | |
481 | dreq->iocb = iocb; | |
482 | ||
483 | if (iter_is_iovec(iter)) | |
484 | dreq->flags = NFS_ODIRECT_SHOULD_DIRTY; | |
485 | ||
486 | if (!swap) | |
487 | nfs_start_io_direct(inode); | |
488 | ||
489 | NFS_I(inode)->read_io += count; | |
490 | requested = nfs_direct_read_schedule_iovec(dreq, iter, iocb->ki_pos); | |
491 | ||
492 | if (!swap) | |
493 | nfs_end_io_direct(inode); | |
494 | ||
495 | if (requested > 0) { | |
496 | result = nfs_direct_wait(dreq); | |
497 | if (result > 0) { | |
498 | requested -= result; | |
499 | iocb->ki_pos += result; | |
500 | } | |
501 | iov_iter_revert(iter, requested); | |
502 | } else { | |
503 | result = requested; | |
504 | } | |
505 | ||
506 | out_release: | |
507 | nfs_direct_req_release(dreq); | |
508 | out: | |
509 | return result; | |
510 | } | |
511 | ||
512 | static void | |
513 | nfs_direct_join_group(struct list_head *list, struct inode *inode) | |
514 | { | |
515 | struct nfs_page *req, *next; | |
516 | ||
517 | list_for_each_entry(req, list, wb_list) { | |
518 | if (req->wb_head != req || req->wb_this_page == req) | |
519 | continue; | |
520 | for (next = req->wb_this_page; | |
521 | next != req->wb_head; | |
522 | next = next->wb_this_page) { | |
523 | nfs_list_remove_request(next); | |
524 | nfs_release_request(next); | |
525 | } | |
526 | nfs_join_page_group(req, inode); | |
527 | } | |
528 | } | |
529 | ||
530 | static void | |
531 | nfs_direct_write_scan_commit_list(struct inode *inode, | |
532 | struct list_head *list, | |
533 | struct nfs_commit_info *cinfo) | |
534 | { | |
535 | mutex_lock(&NFS_I(cinfo->inode)->commit_mutex); | |
536 | pnfs_recover_commit_reqs(list, cinfo); | |
537 | nfs_scan_commit_list(&cinfo->mds->list, list, cinfo, 0); | |
538 | mutex_unlock(&NFS_I(cinfo->inode)->commit_mutex); | |
539 | } | |
540 | ||
541 | static void nfs_direct_write_reschedule(struct nfs_direct_req *dreq) | |
542 | { | |
543 | struct nfs_pageio_descriptor desc; | |
544 | struct nfs_page *req, *tmp; | |
545 | LIST_HEAD(reqs); | |
546 | struct nfs_commit_info cinfo; | |
547 | LIST_HEAD(failed); | |
548 | ||
549 | nfs_init_cinfo_from_dreq(&cinfo, dreq); | |
550 | nfs_direct_write_scan_commit_list(dreq->inode, &reqs, &cinfo); | |
551 | ||
552 | nfs_direct_join_group(&reqs, dreq->inode); | |
553 | ||
554 | dreq->count = 0; | |
555 | dreq->max_count = 0; | |
556 | list_for_each_entry(req, &reqs, wb_list) | |
557 | dreq->max_count += req->wb_bytes; | |
558 | nfs_clear_pnfs_ds_commit_verifiers(&dreq->ds_cinfo); | |
559 | get_dreq(dreq); | |
560 | ||
561 | nfs_pageio_init_write(&desc, dreq->inode, FLUSH_STABLE, false, | |
562 | &nfs_direct_write_completion_ops); | |
563 | desc.pg_dreq = dreq; | |
564 | ||
565 | list_for_each_entry_safe(req, tmp, &reqs, wb_list) { | |
566 | /* Bump the transmission count */ | |
567 | req->wb_nio++; | |
568 | if (!nfs_pageio_add_request(&desc, req)) { | |
569 | nfs_list_move_request(req, &failed); | |
570 | spin_lock(&cinfo.inode->i_lock); | |
571 | dreq->flags = 0; | |
572 | if (desc.pg_error < 0) | |
573 | dreq->error = desc.pg_error; | |
574 | else | |
575 | dreq->error = -EIO; | |
576 | spin_unlock(&cinfo.inode->i_lock); | |
577 | } | |
578 | nfs_release_request(req); | |
579 | } | |
580 | nfs_pageio_complete(&desc); | |
581 | ||
582 | while (!list_empty(&failed)) { | |
583 | req = nfs_list_entry(failed.next); | |
584 | nfs_list_remove_request(req); | |
585 | nfs_unlock_and_release_request(req); | |
586 | } | |
587 | ||
588 | if (put_dreq(dreq)) | |
589 | nfs_direct_write_complete(dreq); | |
590 | } | |
591 | ||
592 | static void nfs_direct_commit_complete(struct nfs_commit_data *data) | |
593 | { | |
594 | const struct nfs_writeverf *verf = data->res.verf; | |
595 | struct nfs_direct_req *dreq = data->dreq; | |
596 | struct nfs_commit_info cinfo; | |
597 | struct nfs_page *req; | |
598 | int status = data->task.tk_status; | |
599 | ||
600 | if (status < 0) { | |
601 | /* Errors in commit are fatal */ | |
602 | dreq->error = status; | |
603 | dreq->max_count = 0; | |
604 | dreq->count = 0; | |
605 | dreq->flags = NFS_ODIRECT_DONE; | |
606 | } else if (dreq->flags == NFS_ODIRECT_DONE) | |
607 | status = dreq->error; | |
608 | ||
609 | nfs_init_cinfo_from_dreq(&cinfo, dreq); | |
610 | ||
611 | while (!list_empty(&data->pages)) { | |
612 | req = nfs_list_entry(data->pages.next); | |
613 | nfs_list_remove_request(req); | |
614 | if (status >= 0 && !nfs_write_match_verf(verf, req)) { | |
615 | dreq->flags = NFS_ODIRECT_RESCHED_WRITES; | |
616 | /* | |
617 | * Despite the reboot, the write was successful, | |
618 | * so reset wb_nio. | |
619 | */ | |
620 | req->wb_nio = 0; | |
621 | nfs_mark_request_commit(req, NULL, &cinfo, 0); | |
622 | } else /* Error or match */ | |
623 | nfs_release_request(req); | |
624 | nfs_unlock_and_release_request(req); | |
625 | } | |
626 | ||
627 | if (nfs_commit_end(cinfo.mds)) | |
628 | nfs_direct_write_complete(dreq); | |
629 | } | |
630 | ||
631 | static void nfs_direct_resched_write(struct nfs_commit_info *cinfo, | |
632 | struct nfs_page *req) | |
633 | { | |
634 | struct nfs_direct_req *dreq = cinfo->dreq; | |
635 | ||
636 | spin_lock(&dreq->lock); | |
637 | if (dreq->flags != NFS_ODIRECT_DONE) | |
638 | dreq->flags = NFS_ODIRECT_RESCHED_WRITES; | |
639 | spin_unlock(&dreq->lock); | |
640 | nfs_mark_request_commit(req, NULL, cinfo, 0); | |
641 | } | |
642 | ||
643 | static const struct nfs_commit_completion_ops nfs_direct_commit_completion_ops = { | |
644 | .completion = nfs_direct_commit_complete, | |
645 | .resched_write = nfs_direct_resched_write, | |
646 | }; | |
647 | ||
648 | static void nfs_direct_commit_schedule(struct nfs_direct_req *dreq) | |
649 | { | |
650 | int res; | |
651 | struct nfs_commit_info cinfo; | |
652 | LIST_HEAD(mds_list); | |
653 | ||
654 | nfs_init_cinfo_from_dreq(&cinfo, dreq); | |
655 | nfs_scan_commit(dreq->inode, &mds_list, &cinfo); | |
656 | res = nfs_generic_commit_list(dreq->inode, &mds_list, 0, &cinfo); | |
657 | if (res < 0) /* res == -ENOMEM */ | |
658 | nfs_direct_write_reschedule(dreq); | |
659 | } | |
660 | ||
661 | static void nfs_direct_write_clear_reqs(struct nfs_direct_req *dreq) | |
662 | { | |
663 | struct nfs_commit_info cinfo; | |
664 | struct nfs_page *req; | |
665 | LIST_HEAD(reqs); | |
666 | ||
667 | nfs_init_cinfo_from_dreq(&cinfo, dreq); | |
668 | nfs_direct_write_scan_commit_list(dreq->inode, &reqs, &cinfo); | |
669 | ||
670 | while (!list_empty(&reqs)) { | |
671 | req = nfs_list_entry(reqs.next); | |
672 | nfs_list_remove_request(req); | |
673 | nfs_release_request(req); | |
674 | nfs_unlock_and_release_request(req); | |
675 | } | |
676 | } | |
677 | ||
678 | static void nfs_direct_write_schedule_work(struct work_struct *work) | |
679 | { | |
680 | struct nfs_direct_req *dreq = container_of(work, struct nfs_direct_req, work); | |
681 | int flags = dreq->flags; | |
682 | ||
683 | dreq->flags = 0; | |
684 | switch (flags) { | |
685 | case NFS_ODIRECT_DO_COMMIT: | |
686 | nfs_direct_commit_schedule(dreq); | |
687 | break; | |
688 | case NFS_ODIRECT_RESCHED_WRITES: | |
689 | nfs_direct_write_reschedule(dreq); | |
690 | break; | |
691 | default: | |
692 | nfs_direct_write_clear_reqs(dreq); | |
693 | nfs_zap_mapping(dreq->inode, dreq->inode->i_mapping); | |
694 | nfs_direct_complete(dreq); | |
695 | } | |
696 | } | |
697 | ||
698 | static void nfs_direct_write_complete(struct nfs_direct_req *dreq) | |
699 | { | |
700 | queue_work(nfsiod_workqueue, &dreq->work); /* Calls nfs_direct_write_schedule_work */ | |
701 | } | |
702 | ||
703 | static void nfs_direct_write_completion(struct nfs_pgio_header *hdr) | |
704 | { | |
705 | struct nfs_direct_req *dreq = hdr->dreq; | |
706 | struct nfs_commit_info cinfo; | |
707 | struct nfs_page *req = nfs_list_entry(hdr->pages.next); | |
708 | int flags = NFS_ODIRECT_DONE; | |
709 | ||
710 | nfs_init_cinfo_from_dreq(&cinfo, dreq); | |
711 | ||
712 | spin_lock(&dreq->lock); | |
713 | if (test_bit(NFS_IOHDR_REDO, &hdr->flags)) { | |
714 | spin_unlock(&dreq->lock); | |
715 | goto out_put; | |
716 | } | |
717 | ||
718 | nfs_direct_count_bytes(dreq, hdr); | |
719 | if (hdr->good_bytes != 0 && nfs_write_need_commit(hdr)) { | |
720 | if (!dreq->flags) | |
721 | dreq->flags = NFS_ODIRECT_DO_COMMIT; | |
722 | flags = dreq->flags; | |
723 | } | |
724 | spin_unlock(&dreq->lock); | |
725 | ||
726 | while (!list_empty(&hdr->pages)) { | |
727 | ||
728 | req = nfs_list_entry(hdr->pages.next); | |
729 | nfs_list_remove_request(req); | |
730 | if (flags == NFS_ODIRECT_DO_COMMIT) { | |
731 | kref_get(&req->wb_kref); | |
732 | memcpy(&req->wb_verf, &hdr->verf.verifier, | |
733 | sizeof(req->wb_verf)); | |
734 | nfs_mark_request_commit(req, hdr->lseg, &cinfo, | |
735 | hdr->ds_commit_idx); | |
736 | } else if (flags == NFS_ODIRECT_RESCHED_WRITES) { | |
737 | kref_get(&req->wb_kref); | |
738 | nfs_mark_request_commit(req, NULL, &cinfo, 0); | |
739 | } | |
740 | nfs_unlock_and_release_request(req); | |
741 | } | |
742 | ||
743 | out_put: | |
744 | if (put_dreq(dreq)) | |
745 | nfs_direct_write_complete(dreq); | |
746 | hdr->release(hdr); | |
747 | } | |
748 | ||
749 | static void nfs_write_sync_pgio_error(struct list_head *head, int error) | |
750 | { | |
751 | struct nfs_page *req; | |
752 | ||
753 | while (!list_empty(head)) { | |
754 | req = nfs_list_entry(head->next); | |
755 | nfs_list_remove_request(req); | |
756 | nfs_unlock_and_release_request(req); | |
757 | } | |
758 | } | |
759 | ||
760 | static void nfs_direct_write_reschedule_io(struct nfs_pgio_header *hdr) | |
761 | { | |
762 | struct nfs_direct_req *dreq = hdr->dreq; | |
763 | ||
764 | spin_lock(&dreq->lock); | |
765 | if (dreq->error == 0) { | |
766 | dreq->flags = NFS_ODIRECT_RESCHED_WRITES; | |
767 | /* fake unstable write to let common nfs resend pages */ | |
768 | hdr->verf.committed = NFS_UNSTABLE; | |
769 | hdr->good_bytes = hdr->args.offset + hdr->args.count - | |
770 | hdr->io_start; | |
771 | } | |
772 | spin_unlock(&dreq->lock); | |
773 | } | |
774 | ||
775 | static const struct nfs_pgio_completion_ops nfs_direct_write_completion_ops = { | |
776 | .error_cleanup = nfs_write_sync_pgio_error, | |
777 | .init_hdr = nfs_direct_pgio_init, | |
778 | .completion = nfs_direct_write_completion, | |
779 | .reschedule_io = nfs_direct_write_reschedule_io, | |
780 | }; | |
781 | ||
782 | ||
783 | /* | |
784 | * NB: Return the value of the first error return code. Subsequent | |
785 | * errors after the first one are ignored. | |
786 | */ | |
787 | /* | |
788 | * For each wsize'd chunk of the user's buffer, dispatch an NFS WRITE | |
789 | * operation. If nfs_writedata_alloc() or get_user_pages() fails, | |
790 | * bail and stop sending more writes. Write length accounting is | |
791 | * handled automatically by nfs_direct_write_result(). Otherwise, if | |
792 | * no requests have been sent, just return an error. | |
793 | */ | |
794 | static ssize_t nfs_direct_write_schedule_iovec(struct nfs_direct_req *dreq, | |
795 | struct iov_iter *iter, | |
796 | loff_t pos, int ioflags) | |
797 | { | |
798 | struct nfs_pageio_descriptor desc; | |
799 | struct inode *inode = dreq->inode; | |
800 | ssize_t result = 0; | |
801 | size_t requested_bytes = 0; | |
802 | size_t wsize = max_t(size_t, NFS_SERVER(inode)->wsize, PAGE_SIZE); | |
803 | ||
804 | nfs_pageio_init_write(&desc, inode, ioflags, false, | |
805 | &nfs_direct_write_completion_ops); | |
806 | desc.pg_dreq = dreq; | |
807 | get_dreq(dreq); | |
808 | inode_dio_begin(inode); | |
809 | ||
810 | NFS_I(inode)->write_io += iov_iter_count(iter); | |
811 | while (iov_iter_count(iter)) { | |
812 | struct page **pagevec; | |
813 | size_t bytes; | |
814 | size_t pgbase; | |
815 | unsigned npages, i; | |
816 | ||
817 | result = iov_iter_get_pages_alloc(iter, &pagevec, | |
818 | wsize, &pgbase); | |
819 | if (result < 0) | |
820 | break; | |
821 | ||
822 | bytes = result; | |
823 | iov_iter_advance(iter, bytes); | |
824 | npages = (result + pgbase + PAGE_SIZE - 1) / PAGE_SIZE; | |
825 | for (i = 0; i < npages; i++) { | |
826 | struct nfs_page *req; | |
827 | unsigned int req_len = min_t(size_t, bytes, PAGE_SIZE - pgbase); | |
828 | ||
829 | req = nfs_create_request(dreq->ctx, pagevec[i], | |
830 | pgbase, req_len); | |
831 | if (IS_ERR(req)) { | |
832 | result = PTR_ERR(req); | |
833 | break; | |
834 | } | |
835 | ||
836 | if (desc.pg_error < 0) { | |
837 | nfs_free_request(req); | |
838 | result = desc.pg_error; | |
839 | break; | |
840 | } | |
841 | ||
842 | nfs_lock_request(req); | |
843 | req->wb_index = pos >> PAGE_SHIFT; | |
844 | req->wb_offset = pos & ~PAGE_MASK; | |
845 | if (!nfs_pageio_add_request(&desc, req)) { | |
846 | result = desc.pg_error; | |
847 | nfs_unlock_and_release_request(req); | |
848 | break; | |
849 | } | |
850 | pgbase = 0; | |
851 | bytes -= req_len; | |
852 | requested_bytes += req_len; | |
853 | pos += req_len; | |
854 | dreq->bytes_left -= req_len; | |
855 | } | |
856 | nfs_direct_release_pages(pagevec, npages); | |
857 | kvfree(pagevec); | |
858 | if (result < 0) | |
859 | break; | |
860 | } | |
861 | nfs_pageio_complete(&desc); | |
862 | ||
863 | /* | |
864 | * If no bytes were started, return the error, and let the | |
865 | * generic layer handle the completion. | |
866 | */ | |
867 | if (requested_bytes == 0) { | |
868 | inode_dio_end(inode); | |
869 | nfs_direct_req_release(dreq); | |
870 | return result < 0 ? result : -EIO; | |
871 | } | |
872 | ||
873 | if (put_dreq(dreq)) | |
874 | nfs_direct_write_complete(dreq); | |
875 | return requested_bytes; | |
876 | } | |
877 | ||
878 | /** | |
879 | * nfs_file_direct_write - file direct write operation for NFS files | |
880 | * @iocb: target I/O control block | |
881 | * @iter: vector of user buffers from which to write data | |
882 | * @swap: flag indicating this is swap IO, not O_DIRECT IO | |
883 | * | |
884 | * We use this function for direct writes instead of calling | |
885 | * generic_file_aio_write() in order to avoid taking the inode | |
886 | * semaphore and updating the i_size. The NFS server will set | |
887 | * the new i_size and this client must read the updated size | |
888 | * back into its cache. We let the server do generic write | |
889 | * parameter checking and report problems. | |
890 | * | |
891 | * We eliminate local atime updates, see direct read above. | |
892 | * | |
893 | * We avoid unnecessary page cache invalidations for normal cached | |
894 | * readers of this file. | |
895 | * | |
896 | * Note that O_APPEND is not supported for NFS direct writes, as there | |
897 | * is no atomic O_APPEND write facility in the NFS protocol. | |
898 | */ | |
899 | ssize_t nfs_file_direct_write(struct kiocb *iocb, struct iov_iter *iter, | |
900 | bool swap) | |
901 | { | |
902 | ssize_t result, requested; | |
903 | size_t count; | |
904 | struct file *file = iocb->ki_filp; | |
905 | struct address_space *mapping = file->f_mapping; | |
906 | struct inode *inode = mapping->host; | |
907 | struct nfs_direct_req *dreq; | |
908 | struct nfs_lock_context *l_ctx; | |
909 | loff_t pos, end; | |
910 | ||
911 | dfprintk(FILE, "NFS: direct write(%pD2, %zd@%Ld)\n", | |
912 | file, iov_iter_count(iter), (long long) iocb->ki_pos); | |
913 | ||
914 | if (swap) | |
915 | /* bypass generic checks */ | |
916 | result = iov_iter_count(iter); | |
917 | else | |
918 | result = generic_write_checks(iocb, iter); | |
919 | if (result <= 0) | |
920 | return result; | |
921 | count = result; | |
922 | nfs_add_stats(mapping->host, NFSIOS_DIRECTWRITTENBYTES, count); | |
923 | ||
924 | pos = iocb->ki_pos; | |
925 | end = (pos + iov_iter_count(iter) - 1) >> PAGE_SHIFT; | |
926 | ||
927 | task_io_account_write(count); | |
928 | ||
929 | result = -ENOMEM; | |
930 | dreq = nfs_direct_req_alloc(); | |
931 | if (!dreq) | |
932 | goto out; | |
933 | ||
934 | dreq->inode = inode; | |
935 | dreq->bytes_left = dreq->max_count = count; | |
936 | dreq->io_start = pos; | |
937 | dreq->ctx = get_nfs_open_context(nfs_file_open_context(iocb->ki_filp)); | |
938 | l_ctx = nfs_get_lock_context(dreq->ctx); | |
939 | if (IS_ERR(l_ctx)) { | |
940 | result = PTR_ERR(l_ctx); | |
941 | nfs_direct_req_release(dreq); | |
942 | goto out_release; | |
943 | } | |
944 | dreq->l_ctx = l_ctx; | |
945 | if (!is_sync_kiocb(iocb)) | |
946 | dreq->iocb = iocb; | |
947 | pnfs_init_ds_commit_info_ops(&dreq->ds_cinfo, inode); | |
948 | ||
949 | if (swap) { | |
950 | requested = nfs_direct_write_schedule_iovec(dreq, iter, pos, | |
951 | FLUSH_STABLE); | |
952 | } else { | |
953 | nfs_start_io_direct(inode); | |
954 | ||
955 | requested = nfs_direct_write_schedule_iovec(dreq, iter, pos, | |
956 | FLUSH_COND_STABLE); | |
957 | ||
958 | if (mapping->nrpages) { | |
959 | invalidate_inode_pages2_range(mapping, | |
960 | pos >> PAGE_SHIFT, end); | |
961 | } | |
962 | ||
963 | nfs_end_io_direct(inode); | |
964 | } | |
965 | ||
966 | if (requested > 0) { | |
967 | result = nfs_direct_wait(dreq); | |
968 | if (result > 0) { | |
969 | requested -= result; | |
970 | iocb->ki_pos = pos + result; | |
971 | /* XXX: should check the generic_write_sync retval */ | |
972 | generic_write_sync(iocb, result); | |
973 | } | |
974 | iov_iter_revert(iter, requested); | |
975 | } else { | |
976 | result = requested; | |
977 | } | |
978 | out_release: | |
979 | nfs_direct_req_release(dreq); | |
980 | out: | |
981 | return result; | |
982 | } | |
983 | ||
984 | /** | |
985 | * nfs_init_directcache - create a slab cache for nfs_direct_req structures | |
986 | * | |
987 | */ | |
988 | int __init nfs_init_directcache(void) | |
989 | { | |
990 | nfs_direct_cachep = kmem_cache_create("nfs_direct_cache", | |
991 | sizeof(struct nfs_direct_req), | |
992 | 0, (SLAB_RECLAIM_ACCOUNT| | |
993 | SLAB_MEM_SPREAD), | |
994 | NULL); | |
995 | if (nfs_direct_cachep == NULL) | |
996 | return -ENOMEM; | |
997 | ||
998 | return 0; | |
999 | } | |
1000 | ||
1001 | /** | |
1002 | * nfs_destroy_directcache - destroy the slab cache for nfs_direct_req structures | |
1003 | * | |
1004 | */ | |
1005 | void nfs_destroy_directcache(void) | |
1006 | { | |
1007 | kmem_cache_destroy(nfs_direct_cachep); | |
1008 | } |