]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/fs/nfs/direct.c | |
3 | * | |
4 | * Copyright (C) 2003 by Chuck Lever <cel@netapp.com> | |
5 | * | |
6 | * High-performance uncached I/O for the Linux NFS client | |
7 | * | |
8 | * There are important applications whose performance or correctness | |
9 | * depends on uncached access to file data. Database clusters | |
10 | * (multiple copies of the same instance running on separate hosts) | |
11 | * implement their own cache coherency protocol that subsumes file | |
12 | * system cache protocols. Applications that process datasets | |
13 | * considerably larger than the client's memory do not always benefit | |
14 | * from a local cache. A streaming video server, for instance, has no | |
15 | * need to cache the contents of a file. | |
16 | * | |
17 | * When an application requests uncached I/O, all read and write requests | |
18 | * are made directly to the server; data stored or fetched via these | |
19 | * requests is not cached in the Linux page cache. The client does not | |
20 | * correct unaligned requests from applications. All requested bytes are | |
21 | * held on permanent storage before a direct write system call returns to | |
22 | * an application. | |
23 | * | |
24 | * Solaris implements an uncached I/O facility called directio() that | |
25 | * is used for backups and sequential I/O to very large files. Solaris | |
26 | * also supports uncaching whole NFS partitions with "-o forcedirectio," | |
27 | * an undocumented mount option. | |
28 | * | |
29 | * Designed by Jeff Kimmel, Chuck Lever, and Trond Myklebust, with | |
30 | * help from Andrew Morton. | |
31 | * | |
32 | * 18 Dec 2001 Initial implementation for 2.4 --cel | |
33 | * 08 Jul 2002 Version for 2.4.19, with bug fixes --trondmy | |
34 | * 08 Jun 2003 Port to 2.5 APIs --cel | |
35 | * 31 Mar 2004 Handle direct I/O without VFS support --cel | |
36 | * 15 Sep 2004 Parallel async reads --cel | |
37 | * | |
38 | */ | |
39 | ||
40 | #include <linux/config.h> | |
41 | #include <linux/errno.h> | |
42 | #include <linux/sched.h> | |
43 | #include <linux/kernel.h> | |
44 | #include <linux/smp_lock.h> | |
45 | #include <linux/file.h> | |
46 | #include <linux/pagemap.h> | |
47 | #include <linux/kref.h> | |
48 | ||
49 | #include <linux/nfs_fs.h> | |
50 | #include <linux/nfs_page.h> | |
51 | #include <linux/sunrpc/clnt.h> | |
52 | ||
53 | #include <asm/system.h> | |
54 | #include <asm/uaccess.h> | |
55 | #include <asm/atomic.h> | |
56 | ||
91d5b470 CL |
57 | #include "iostat.h" |
58 | ||
1da177e4 LT |
59 | #define NFSDBG_FACILITY NFSDBG_VFS |
60 | #define MAX_DIRECTIO_SIZE (4096UL << PAGE_SHIFT) | |
61 | ||
143f412e | 62 | static void nfs_free_user_pages(struct page **pages, int npages, int do_dirty); |
1da177e4 LT |
63 | static kmem_cache_t *nfs_direct_cachep; |
64 | ||
65 | /* | |
66 | * This represents a set of asynchronous requests that we're waiting on | |
67 | */ | |
68 | struct nfs_direct_req { | |
69 | struct kref kref; /* release manager */ | |
70 | struct list_head list; /* nfs_read_data structs */ | |
99514f8f CL |
71 | struct file * filp; /* file descriptor */ |
72 | struct kiocb * iocb; /* controlling i/o request */ | |
1da177e4 | 73 | wait_queue_head_t wait; /* wait for i/o completion */ |
91d5b470 | 74 | struct inode * inode; /* target file of I/O */ |
1da177e4 LT |
75 | struct page ** pages; /* pages in our buffer */ |
76 | unsigned int npages; /* count of pages */ | |
77 | atomic_t complete, /* i/os we're waiting for */ | |
78 | count, /* bytes actually processed */ | |
79 | error; /* any reported error */ | |
80 | }; | |
81 | ||
82 | ||
b8a32e2b CL |
83 | /** |
84 | * nfs_direct_IO - NFS address space operation for direct I/O | |
85 | * @rw: direction (read or write) | |
86 | * @iocb: target I/O control block | |
87 | * @iov: array of vectors that define I/O buffer | |
88 | * @pos: offset in file to begin the operation | |
89 | * @nr_segs: size of iovec array | |
90 | * | |
91 | * The presence of this routine in the address space ops vector means | |
92 | * the NFS client supports direct I/O. However, we shunt off direct | |
93 | * read and write requests before the VFS gets them, so this method | |
94 | * should never be called. | |
95 | */ | |
96 | ssize_t nfs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, loff_t pos, unsigned long nr_segs) | |
97 | { | |
98 | struct dentry *dentry = iocb->ki_filp->f_dentry; | |
99 | ||
100 | dprintk("NFS: nfs_direct_IO (%s) off/no(%Ld/%lu) EINVAL\n", | |
101 | dentry->d_name.name, (long long) pos, nr_segs); | |
102 | ||
103 | return -EINVAL; | |
104 | } | |
105 | ||
d4cc948b | 106 | static inline int nfs_get_user_pages(int rw, unsigned long user_addr, size_t size, struct page ***pages) |
1da177e4 LT |
107 | { |
108 | int result = -ENOMEM; | |
109 | unsigned long page_count; | |
110 | size_t array_size; | |
111 | ||
112 | /* set an arbitrary limit to prevent type overflow */ | |
113 | /* XXX: this can probably be as large as INT_MAX */ | |
114 | if (size > MAX_DIRECTIO_SIZE) { | |
115 | *pages = NULL; | |
116 | return -EFBIG; | |
117 | } | |
118 | ||
119 | page_count = (user_addr + size + PAGE_SIZE - 1) >> PAGE_SHIFT; | |
120 | page_count -= user_addr >> PAGE_SHIFT; | |
121 | ||
122 | array_size = (page_count * sizeof(struct page *)); | |
123 | *pages = kmalloc(array_size, GFP_KERNEL); | |
124 | if (*pages) { | |
125 | down_read(¤t->mm->mmap_sem); | |
126 | result = get_user_pages(current, current->mm, user_addr, | |
127 | page_count, (rw == READ), 0, | |
128 | *pages, NULL); | |
129 | up_read(¤t->mm->mmap_sem); | |
143f412e TM |
130 | /* |
131 | * If we got fewer pages than expected from get_user_pages(), | |
132 | * the user buffer runs off the end of a mapping; return EFAULT. | |
133 | */ | |
134 | if (result >= 0 && result < page_count) { | |
135 | nfs_free_user_pages(*pages, result, 0); | |
136 | *pages = NULL; | |
137 | result = -EFAULT; | |
138 | } | |
1da177e4 LT |
139 | } |
140 | return result; | |
141 | } | |
142 | ||
d4cc948b | 143 | static void nfs_free_user_pages(struct page **pages, int npages, int do_dirty) |
1da177e4 LT |
144 | { |
145 | int i; | |
146 | for (i = 0; i < npages; i++) { | |
566dd606 TM |
147 | struct page *page = pages[i]; |
148 | if (do_dirty && !PageCompound(page)) | |
149 | set_page_dirty_lock(page); | |
150 | page_cache_release(page); | |
1da177e4 LT |
151 | } |
152 | kfree(pages); | |
153 | } | |
154 | ||
93619e59 CL |
155 | static inline struct nfs_direct_req *nfs_direct_req_alloc(void) |
156 | { | |
157 | struct nfs_direct_req *dreq; | |
158 | ||
159 | dreq = kmem_cache_alloc(nfs_direct_cachep, SLAB_KERNEL); | |
160 | if (!dreq) | |
161 | return NULL; | |
162 | ||
163 | kref_init(&dreq->kref); | |
164 | init_waitqueue_head(&dreq->wait); | |
165 | INIT_LIST_HEAD(&dreq->list); | |
166 | dreq->iocb = NULL; | |
167 | atomic_set(&dreq->count, 0); | |
168 | atomic_set(&dreq->error, 0); | |
169 | ||
170 | return dreq; | |
171 | } | |
172 | ||
1da177e4 LT |
173 | static void nfs_direct_req_release(struct kref *kref) |
174 | { | |
175 | struct nfs_direct_req *dreq = container_of(kref, struct nfs_direct_req, kref); | |
176 | kmem_cache_free(nfs_direct_cachep, dreq); | |
177 | } | |
178 | ||
bc0fb201 CL |
179 | /* |
180 | * Collects and returns the final error value/byte-count. | |
181 | */ | |
182 | static ssize_t nfs_direct_wait(struct nfs_direct_req *dreq) | |
183 | { | |
184 | int result = -EIOCBQUEUED; | |
185 | ||
186 | /* Async requests don't wait here */ | |
187 | if (dreq->iocb) | |
188 | goto out; | |
189 | ||
190 | result = wait_event_interruptible(dreq->wait, | |
191 | (atomic_read(&dreq->complete) == 0)); | |
192 | ||
193 | if (!result) | |
194 | result = atomic_read(&dreq->error); | |
195 | if (!result) | |
196 | result = atomic_read(&dreq->count); | |
197 | ||
198 | out: | |
199 | kref_put(&dreq->kref, nfs_direct_req_release); | |
200 | return (ssize_t) result; | |
201 | } | |
202 | ||
63ab46ab CL |
203 | /* |
204 | * We must hold a reference to all the pages in this direct read request | |
205 | * until the RPCs complete. This could be long *after* we are woken up in | |
206 | * nfs_direct_wait (for instance, if someone hits ^C on a slow server). | |
207 | * | |
208 | * In addition, synchronous I/O uses a stack-allocated iocb. Thus we | |
209 | * can't trust the iocb is still valid here if this is a synchronous | |
210 | * request. If the waiter is woken prematurely, the iocb is long gone. | |
211 | */ | |
212 | static void nfs_direct_complete(struct nfs_direct_req *dreq) | |
213 | { | |
214 | nfs_free_user_pages(dreq->pages, dreq->npages, 1); | |
215 | ||
216 | if (dreq->iocb) { | |
217 | long res = atomic_read(&dreq->error); | |
218 | if (!res) | |
219 | res = atomic_read(&dreq->count); | |
220 | aio_complete(dreq->iocb, res, 0); | |
221 | } else | |
222 | wake_up(&dreq->wait); | |
223 | ||
224 | kref_put(&dreq->kref, nfs_direct_req_release); | |
225 | } | |
226 | ||
d4cc948b | 227 | /* |
1da177e4 LT |
228 | * Note we also set the number of requests we have in the dreq when we are |
229 | * done. This prevents races with I/O completion so we will always wait | |
230 | * until all requests have been dispatched and completed. | |
231 | */ | |
5dd602f2 | 232 | static struct nfs_direct_req *nfs_direct_read_alloc(size_t nbytes, size_t rsize) |
1da177e4 LT |
233 | { |
234 | struct list_head *list; | |
235 | struct nfs_direct_req *dreq; | |
236 | unsigned int reads = 0; | |
40859d7e | 237 | unsigned int rpages = (rsize + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; |
1da177e4 | 238 | |
93619e59 | 239 | dreq = nfs_direct_req_alloc(); |
1da177e4 LT |
240 | if (!dreq) |
241 | return NULL; | |
242 | ||
1da177e4 LT |
243 | list = &dreq->list; |
244 | for(;;) { | |
40859d7e | 245 | struct nfs_read_data *data = nfs_readdata_alloc(rpages); |
1da177e4 LT |
246 | |
247 | if (unlikely(!data)) { | |
248 | while (!list_empty(list)) { | |
249 | data = list_entry(list->next, | |
250 | struct nfs_read_data, pages); | |
251 | list_del(&data->pages); | |
252 | nfs_readdata_free(data); | |
253 | } | |
254 | kref_put(&dreq->kref, nfs_direct_req_release); | |
255 | return NULL; | |
256 | } | |
257 | ||
258 | INIT_LIST_HEAD(&data->pages); | |
259 | list_add(&data->pages, list); | |
260 | ||
261 | data->req = (struct nfs_page *) dreq; | |
262 | reads++; | |
263 | if (nbytes <= rsize) | |
264 | break; | |
265 | nbytes -= rsize; | |
266 | } | |
267 | kref_get(&dreq->kref); | |
268 | atomic_set(&dreq->complete, reads); | |
269 | return dreq; | |
270 | } | |
271 | ||
ec06c096 | 272 | static void nfs_direct_read_result(struct rpc_task *task, void *calldata) |
1da177e4 | 273 | { |
ec06c096 | 274 | struct nfs_read_data *data = calldata; |
1da177e4 LT |
275 | struct nfs_direct_req *dreq = (struct nfs_direct_req *) data->req; |
276 | ||
ec06c096 TM |
277 | if (nfs_readpage_result(task, data) != 0) |
278 | return; | |
279 | if (likely(task->tk_status >= 0)) | |
1da177e4 LT |
280 | atomic_add(data->res.count, &dreq->count); |
281 | else | |
ec06c096 | 282 | atomic_set(&dreq->error, task->tk_status); |
1da177e4 | 283 | |
63ab46ab CL |
284 | if (unlikely(atomic_dec_and_test(&dreq->complete))) |
285 | nfs_direct_complete(dreq); | |
1da177e4 LT |
286 | } |
287 | ||
ec06c096 TM |
288 | static const struct rpc_call_ops nfs_read_direct_ops = { |
289 | .rpc_call_done = nfs_direct_read_result, | |
290 | .rpc_release = nfs_readdata_release, | |
291 | }; | |
292 | ||
d4cc948b | 293 | /* |
1da177e4 LT |
294 | * For each nfs_read_data struct that was allocated on the list, dispatch |
295 | * an NFS READ operation | |
296 | */ | |
99514f8f | 297 | static void nfs_direct_read_schedule(struct nfs_direct_req *dreq, unsigned long user_addr, size_t count, loff_t file_offset) |
1da177e4 | 298 | { |
99514f8f CL |
299 | struct file *file = dreq->filp; |
300 | struct inode *inode = file->f_mapping->host; | |
301 | struct nfs_open_context *ctx = (struct nfs_open_context *) | |
302 | file->private_data; | |
1da177e4 LT |
303 | struct list_head *list = &dreq->list; |
304 | struct page **pages = dreq->pages; | |
5dd602f2 | 305 | size_t rsize = NFS_SERVER(inode)->rsize; |
1da177e4 | 306 | unsigned int curpage, pgbase; |
1da177e4 LT |
307 | |
308 | curpage = 0; | |
309 | pgbase = user_addr & ~PAGE_MASK; | |
310 | do { | |
311 | struct nfs_read_data *data; | |
5dd602f2 | 312 | size_t bytes; |
1da177e4 LT |
313 | |
314 | bytes = rsize; | |
315 | if (count < rsize) | |
316 | bytes = count; | |
317 | ||
318 | data = list_entry(list->next, struct nfs_read_data, pages); | |
319 | list_del_init(&data->pages); | |
320 | ||
321 | data->inode = inode; | |
322 | data->cred = ctx->cred; | |
323 | data->args.fh = NFS_FH(inode); | |
324 | data->args.context = ctx; | |
325 | data->args.offset = file_offset; | |
326 | data->args.pgbase = pgbase; | |
327 | data->args.pages = &pages[curpage]; | |
328 | data->args.count = bytes; | |
329 | data->res.fattr = &data->fattr; | |
330 | data->res.eof = 0; | |
331 | data->res.count = bytes; | |
332 | ||
ec06c096 TM |
333 | rpc_init_task(&data->task, NFS_CLIENT(inode), RPC_TASK_ASYNC, |
334 | &nfs_read_direct_ops, data); | |
1da177e4 LT |
335 | NFS_PROTO(inode)->read_setup(data); |
336 | ||
337 | data->task.tk_cookie = (unsigned long) inode; | |
1da177e4 LT |
338 | |
339 | lock_kernel(); | |
340 | rpc_execute(&data->task); | |
341 | unlock_kernel(); | |
342 | ||
343 | dfprintk(VFS, "NFS: %4d initiated direct read call (req %s/%Ld, %u bytes @ offset %Lu)\n", | |
344 | data->task.tk_pid, | |
345 | inode->i_sb->s_id, | |
346 | (long long)NFS_FILEID(inode), | |
347 | bytes, | |
348 | (unsigned long long)data->args.offset); | |
349 | ||
350 | file_offset += bytes; | |
351 | pgbase += bytes; | |
352 | curpage += pgbase >> PAGE_SHIFT; | |
353 | pgbase &= ~PAGE_MASK; | |
354 | ||
355 | count -= bytes; | |
356 | } while (count != 0); | |
357 | } | |
358 | ||
99514f8f | 359 | static ssize_t nfs_direct_read(struct kiocb *iocb, unsigned long user_addr, size_t count, loff_t file_offset, struct page **pages, unsigned int nr_pages) |
1da177e4 LT |
360 | { |
361 | ssize_t result; | |
362 | sigset_t oldset; | |
99514f8f | 363 | struct inode *inode = iocb->ki_filp->f_mapping->host; |
1da177e4 LT |
364 | struct rpc_clnt *clnt = NFS_CLIENT(inode); |
365 | struct nfs_direct_req *dreq; | |
366 | ||
367 | dreq = nfs_direct_read_alloc(count, NFS_SERVER(inode)->rsize); | |
368 | if (!dreq) | |
369 | return -ENOMEM; | |
370 | ||
371 | dreq->pages = pages; | |
372 | dreq->npages = nr_pages; | |
91d5b470 | 373 | dreq->inode = inode; |
99514f8f | 374 | dreq->filp = iocb->ki_filp; |
487b8372 CL |
375 | if (!is_sync_kiocb(iocb)) |
376 | dreq->iocb = iocb; | |
1da177e4 | 377 | |
91d5b470 | 378 | nfs_add_stats(inode, NFSIOS_DIRECTREADBYTES, count); |
1da177e4 | 379 | rpc_clnt_sigmask(clnt, &oldset); |
99514f8f | 380 | nfs_direct_read_schedule(dreq, user_addr, count, file_offset); |
bc0fb201 | 381 | result = nfs_direct_wait(dreq); |
1da177e4 LT |
382 | rpc_clnt_sigunmask(clnt, &oldset); |
383 | ||
384 | return result; | |
385 | } | |
386 | ||
462d5b32 | 387 | static struct nfs_direct_req *nfs_direct_write_alloc(size_t nbytes, size_t wsize) |
1da177e4 | 388 | { |
462d5b32 CL |
389 | struct list_head *list; |
390 | struct nfs_direct_req *dreq; | |
391 | unsigned int writes = 0; | |
392 | unsigned int wpages = (wsize + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; | |
1da177e4 | 393 | |
462d5b32 CL |
394 | dreq = nfs_direct_req_alloc(); |
395 | if (!dreq) | |
396 | return NULL; | |
397 | ||
398 | list = &dreq->list; | |
399 | for(;;) { | |
400 | struct nfs_write_data *data = nfs_writedata_alloc(wpages); | |
401 | ||
402 | if (unlikely(!data)) { | |
403 | while (!list_empty(list)) { | |
404 | data = list_entry(list->next, | |
405 | struct nfs_write_data, pages); | |
406 | list_del(&data->pages); | |
407 | nfs_writedata_free(data); | |
408 | } | |
409 | kref_put(&dreq->kref, nfs_direct_req_release); | |
410 | return NULL; | |
411 | } | |
412 | ||
413 | INIT_LIST_HEAD(&data->pages); | |
414 | list_add(&data->pages, list); | |
415 | ||
416 | data->req = (struct nfs_page *) dreq; | |
417 | writes++; | |
418 | if (nbytes <= wsize) | |
419 | break; | |
420 | nbytes -= wsize; | |
421 | } | |
422 | kref_get(&dreq->kref); | |
423 | atomic_set(&dreq->complete, writes); | |
424 | return dreq; | |
425 | } | |
426 | ||
427 | /* | |
428 | * Collects and returns the final error value/byte-count. | |
429 | */ | |
430 | static ssize_t nfs_direct_write_wait(struct nfs_direct_req *dreq, int intr) | |
431 | { | |
432 | int result = 0; | |
433 | ||
434 | if (intr) { | |
435 | result = wait_event_interruptible(dreq->wait, | |
436 | (atomic_read(&dreq->complete) == 0)); | |
437 | } else { | |
438 | wait_event(dreq->wait, (atomic_read(&dreq->complete) == 0)); | |
439 | } | |
440 | ||
441 | if (!result) | |
442 | result = atomic_read(&dreq->error); | |
443 | if (!result) | |
444 | result = atomic_read(&dreq->count); | |
445 | ||
446 | kref_put(&dreq->kref, nfs_direct_req_release); | |
447 | return (ssize_t) result; | |
448 | } | |
449 | ||
450 | static void nfs_direct_write_result(struct rpc_task *task, void *calldata) | |
451 | { | |
452 | struct nfs_write_data *data = calldata; | |
453 | struct nfs_direct_req *dreq = (struct nfs_direct_req *) data->req; | |
454 | int status = task->tk_status; | |
455 | ||
456 | if (nfs_writeback_done(task, data) != 0) | |
457 | return; | |
458 | /* If the server fell back to an UNSTABLE write, it's an error. */ | |
459 | if (unlikely(data->res.verf->committed != NFS_FILE_SYNC)) | |
460 | status = -EIO; | |
461 | ||
462 | if (likely(status >= 0)) | |
463 | atomic_add(data->res.count, &dreq->count); | |
464 | else | |
465 | atomic_set(&dreq->error, status); | |
466 | ||
467 | if (unlikely(atomic_dec_and_test(&dreq->complete))) | |
468 | nfs_direct_complete(dreq); | |
469 | } | |
470 | ||
471 | static const struct rpc_call_ops nfs_write_direct_ops = { | |
472 | .rpc_call_done = nfs_direct_write_result, | |
473 | .rpc_release = nfs_writedata_release, | |
474 | }; | |
475 | ||
476 | /* | |
477 | * For each nfs_write_data struct that was allocated on the list, dispatch | |
478 | * an NFS WRITE operation | |
479 | * | |
480 | * XXX: For now, support only FILE_SYNC writes. Later we may add | |
481 | * support for UNSTABLE + COMMIT. | |
482 | */ | |
483 | static void nfs_direct_write_schedule(struct nfs_direct_req *dreq, struct inode *inode, struct nfs_open_context *ctx, unsigned long user_addr, size_t count, loff_t file_offset) | |
484 | { | |
485 | struct list_head *list = &dreq->list; | |
486 | struct page **pages = dreq->pages; | |
487 | size_t wsize = NFS_SERVER(inode)->wsize; | |
488 | unsigned int curpage, pgbase; | |
1da177e4 | 489 | |
1da177e4 | 490 | curpage = 0; |
462d5b32 | 491 | pgbase = user_addr & ~PAGE_MASK; |
1da177e4 | 492 | do { |
462d5b32 CL |
493 | struct nfs_write_data *data; |
494 | size_t bytes; | |
495 | ||
496 | bytes = wsize; | |
497 | if (count < wsize) | |
498 | bytes = count; | |
499 | ||
500 | data = list_entry(list->next, struct nfs_write_data, pages); | |
501 | list_del_init(&data->pages); | |
502 | ||
503 | data->inode = inode; | |
504 | data->cred = ctx->cred; | |
505 | data->args.fh = NFS_FH(inode); | |
506 | data->args.context = ctx; | |
507 | data->args.offset = file_offset; | |
508 | data->args.pgbase = pgbase; | |
509 | data->args.pages = &pages[curpage]; | |
510 | data->args.count = bytes; | |
511 | data->res.fattr = &data->fattr; | |
512 | data->res.count = bytes; | |
47989d74 | 513 | data->res.verf = &data->verf; |
462d5b32 CL |
514 | |
515 | rpc_init_task(&data->task, NFS_CLIENT(inode), RPC_TASK_ASYNC, | |
516 | &nfs_write_direct_ops, data); | |
517 | NFS_PROTO(inode)->write_setup(data, FLUSH_STABLE); | |
1da177e4 | 518 | |
462d5b32 CL |
519 | data->task.tk_priority = RPC_PRIORITY_NORMAL; |
520 | data->task.tk_cookie = (unsigned long) inode; | |
1da177e4 LT |
521 | |
522 | lock_kernel(); | |
462d5b32 | 523 | rpc_execute(&data->task); |
1da177e4 LT |
524 | unlock_kernel(); |
525 | ||
462d5b32 CL |
526 | dfprintk(VFS, "NFS: %4d initiated direct write call (req %s/%Ld, %u bytes @ offset %Lu)\n", |
527 | data->task.tk_pid, | |
528 | inode->i_sb->s_id, | |
529 | (long long)NFS_FILEID(inode), | |
530 | bytes, | |
531 | (unsigned long long)data->args.offset); | |
1da177e4 | 532 | |
462d5b32 CL |
533 | file_offset += bytes; |
534 | pgbase += bytes; | |
535 | curpage += pgbase >> PAGE_SHIFT; | |
536 | pgbase &= ~PAGE_MASK; | |
1da177e4 | 537 | |
462d5b32 CL |
538 | count -= bytes; |
539 | } while (count != 0); | |
540 | } | |
1da177e4 | 541 | |
47989d74 | 542 | static ssize_t nfs_direct_write(struct inode *inode, struct nfs_open_context *ctx, unsigned long user_addr, size_t count, loff_t file_offset, struct page **pages, int nr_pages) |
462d5b32 CL |
543 | { |
544 | ssize_t result; | |
545 | sigset_t oldset; | |
546 | struct rpc_clnt *clnt = NFS_CLIENT(inode); | |
547 | struct nfs_direct_req *dreq; | |
1da177e4 | 548 | |
462d5b32 CL |
549 | dreq = nfs_direct_write_alloc(count, NFS_SERVER(inode)->wsize); |
550 | if (!dreq) | |
551 | return -ENOMEM; | |
1da177e4 | 552 | |
462d5b32 CL |
553 | dreq->pages = pages; |
554 | dreq->npages = nr_pages; | |
1da177e4 | 555 | |
47989d74 CL |
556 | nfs_add_stats(inode, NFSIOS_DIRECTWRITTENBYTES, count); |
557 | ||
462d5b32 | 558 | nfs_begin_data_update(inode); |
1da177e4 | 559 | |
462d5b32 CL |
560 | rpc_clnt_sigmask(clnt, &oldset); |
561 | nfs_direct_write_schedule(dreq, inode, ctx, user_addr, count, | |
562 | file_offset); | |
563 | result = nfs_direct_write_wait(dreq, clnt->cl_intr); | |
564 | rpc_clnt_sigunmask(clnt, &oldset); | |
1da177e4 | 565 | |
951a143b | 566 | nfs_end_data_update(inode); |
1da177e4 | 567 | |
462d5b32 | 568 | return result; |
1da177e4 LT |
569 | } |
570 | ||
1da177e4 LT |
571 | /** |
572 | * nfs_file_direct_read - file direct read operation for NFS files | |
573 | * @iocb: target I/O control block | |
574 | * @buf: user's buffer into which to read data | |
575 | * count: number of bytes to read | |
576 | * pos: byte offset in file where reading starts | |
577 | * | |
578 | * We use this function for direct reads instead of calling | |
579 | * generic_file_aio_read() in order to avoid gfar's check to see if | |
580 | * the request starts before the end of the file. For that check | |
581 | * to work, we must generate a GETATTR before each direct read, and | |
582 | * even then there is a window between the GETATTR and the subsequent | |
583 | * READ where the file size could change. So our preference is simply | |
584 | * to do all reads the application wants, and the server will take | |
585 | * care of managing the end of file boundary. | |
586 | * | |
587 | * This function also eliminates unnecessarily updating the file's | |
588 | * atime locally, as the NFS server sets the file's atime, and this | |
589 | * client must read the updated atime from the server back into its | |
590 | * cache. | |
591 | */ | |
d4cc948b | 592 | ssize_t nfs_file_direct_read(struct kiocb *iocb, char __user *buf, size_t count, loff_t pos) |
1da177e4 LT |
593 | { |
594 | ssize_t retval = -EINVAL; | |
0cdd80d0 CL |
595 | int page_count; |
596 | struct page **pages; | |
1da177e4 | 597 | struct file *file = iocb->ki_filp; |
1da177e4 | 598 | struct address_space *mapping = file->f_mapping; |
1da177e4 | 599 | |
ce1a8e67 | 600 | dprintk("nfs: direct read(%s/%s, %lu@%Ld)\n", |
0bbacc40 CL |
601 | file->f_dentry->d_parent->d_name.name, |
602 | file->f_dentry->d_name.name, | |
ce1a8e67 | 603 | (unsigned long) count, (long long) pos); |
1da177e4 | 604 | |
1da177e4 LT |
605 | if (count < 0) |
606 | goto out; | |
607 | retval = -EFAULT; | |
0cdd80d0 | 608 | if (!access_ok(VERIFY_WRITE, buf, count)) |
1da177e4 LT |
609 | goto out; |
610 | retval = 0; | |
611 | if (!count) | |
612 | goto out; | |
613 | ||
29884df0 TM |
614 | retval = nfs_sync_mapping(mapping); |
615 | if (retval) | |
616 | goto out; | |
1da177e4 | 617 | |
0cdd80d0 CL |
618 | page_count = nfs_get_user_pages(READ, (unsigned long) buf, |
619 | count, &pages); | |
620 | if (page_count < 0) { | |
621 | nfs_free_user_pages(pages, 0, 0); | |
622 | retval = page_count; | |
623 | goto out; | |
624 | } | |
625 | ||
99514f8f | 626 | retval = nfs_direct_read(iocb, (unsigned long) buf, count, pos, |
0cdd80d0 | 627 | pages, page_count); |
1da177e4 | 628 | if (retval > 0) |
0cdd80d0 | 629 | iocb->ki_pos = pos + retval; |
1da177e4 LT |
630 | |
631 | out: | |
632 | return retval; | |
633 | } | |
634 | ||
635 | /** | |
636 | * nfs_file_direct_write - file direct write operation for NFS files | |
637 | * @iocb: target I/O control block | |
638 | * @buf: user's buffer from which to write data | |
639 | * count: number of bytes to write | |
640 | * pos: byte offset in file where writing starts | |
641 | * | |
642 | * We use this function for direct writes instead of calling | |
643 | * generic_file_aio_write() in order to avoid taking the inode | |
644 | * semaphore and updating the i_size. The NFS server will set | |
645 | * the new i_size and this client must read the updated size | |
646 | * back into its cache. We let the server do generic write | |
647 | * parameter checking and report problems. | |
648 | * | |
649 | * We also avoid an unnecessary invocation of generic_osync_inode(), | |
650 | * as it is fairly meaningless to sync the metadata of an NFS file. | |
651 | * | |
652 | * We eliminate local atime updates, see direct read above. | |
653 | * | |
654 | * We avoid unnecessary page cache invalidations for normal cached | |
655 | * readers of this file. | |
656 | * | |
657 | * Note that O_APPEND is not supported for NFS direct writes, as there | |
658 | * is no atomic O_APPEND write facility in the NFS protocol. | |
659 | */ | |
d4cc948b | 660 | ssize_t nfs_file_direct_write(struct kiocb *iocb, const char __user *buf, size_t count, loff_t pos) |
1da177e4 | 661 | { |
ce1a8e67 | 662 | ssize_t retval; |
47989d74 CL |
663 | int page_count; |
664 | struct page **pages; | |
1da177e4 LT |
665 | struct file *file = iocb->ki_filp; |
666 | struct nfs_open_context *ctx = | |
667 | (struct nfs_open_context *) file->private_data; | |
1da177e4 LT |
668 | struct address_space *mapping = file->f_mapping; |
669 | struct inode *inode = mapping->host; | |
1da177e4 | 670 | |
ce1a8e67 | 671 | dfprintk(VFS, "nfs: direct write(%s/%s, %lu@%Ld)\n", |
0bbacc40 | 672 | file->f_dentry->d_parent->d_name.name, |
ce1a8e67 CL |
673 | file->f_dentry->d_name.name, |
674 | (unsigned long) count, (long long) pos); | |
1da177e4 | 675 | |
ce1a8e67 | 676 | retval = -EINVAL; |
1da177e4 LT |
677 | if (!is_sync_kiocb(iocb)) |
678 | goto out; | |
ce1a8e67 CL |
679 | |
680 | retval = generic_write_checks(file, &pos, &count, 0); | |
681 | if (retval) | |
1da177e4 | 682 | goto out; |
ce1a8e67 CL |
683 | |
684 | retval = -EINVAL; | |
685 | if ((ssize_t) count < 0) | |
1da177e4 | 686 | goto out; |
1da177e4 LT |
687 | retval = 0; |
688 | if (!count) | |
689 | goto out; | |
ce1a8e67 CL |
690 | |
691 | retval = -EFAULT; | |
47989d74 | 692 | if (!access_ok(VERIFY_READ, buf, count)) |
ce1a8e67 | 693 | goto out; |
1da177e4 | 694 | |
29884df0 TM |
695 | retval = nfs_sync_mapping(mapping); |
696 | if (retval) | |
697 | goto out; | |
1da177e4 | 698 | |
47989d74 CL |
699 | page_count = nfs_get_user_pages(WRITE, (unsigned long) buf, |
700 | count, &pages); | |
701 | if (page_count < 0) { | |
702 | nfs_free_user_pages(pages, 0, 0); | |
703 | retval = page_count; | |
704 | goto out; | |
705 | } | |
706 | ||
707 | retval = nfs_direct_write(inode, ctx, (unsigned long) buf, count, | |
708 | pos, pages, page_count); | |
1da177e4 LT |
709 | if (mapping->nrpages) |
710 | invalidate_inode_pages2(mapping); | |
711 | if (retval > 0) | |
ce1a8e67 | 712 | iocb->ki_pos = pos + retval; |
1da177e4 LT |
713 | |
714 | out: | |
715 | return retval; | |
716 | } | |
717 | ||
718 | int nfs_init_directcache(void) | |
719 | { | |
720 | nfs_direct_cachep = kmem_cache_create("nfs_direct_cache", | |
721 | sizeof(struct nfs_direct_req), | |
722 | 0, SLAB_RECLAIM_ACCOUNT, | |
723 | NULL, NULL); | |
724 | if (nfs_direct_cachep == NULL) | |
725 | return -ENOMEM; | |
726 | ||
727 | return 0; | |
728 | } | |
729 | ||
730 | void nfs_destroy_directcache(void) | |
731 | { | |
732 | if (kmem_cache_destroy(nfs_direct_cachep)) | |
733 | printk(KERN_INFO "nfs_direct_cache: not all structures were freed\n"); | |
734 | } |