]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - fs/nfs/write.c
Merge git://git.kernel.org/pub/scm/linux/kernel/git/bunk/trivial
[mirror_ubuntu-artful-kernel.git] / fs / nfs / write.c
1 /*
2 * linux/fs/nfs/write.c
3 *
4 * Writing file data over NFS.
5 *
6 * We do it like this: When a (user) process wishes to write data to an
7 * NFS file, a write request is allocated that contains the RPC task data
8 * plus some info on the page to be written, and added to the inode's
9 * write chain. If the process writes past the end of the page, an async
10 * RPC call to write the page is scheduled immediately; otherwise, the call
11 * is delayed for a few seconds.
12 *
13 * Just like readahead, no async I/O is performed if wsize < PAGE_SIZE.
14 *
15 * Write requests are kept on the inode's writeback list. Each entry in
16 * that list references the page (portion) to be written. When the
17 * cache timeout has expired, the RPC task is woken up, and tries to
18 * lock the page. As soon as it manages to do so, the request is moved
19 * from the writeback list to the writelock list.
20 *
21 * Note: we must make sure never to confuse the inode passed in the
22 * write_page request with the one in page->inode. As far as I understand
23 * it, these are different when doing a swap-out.
24 *
25 * To understand everything that goes on here and in the NFS read code,
26 * one should be aware that a page is locked in exactly one of the following
27 * cases:
28 *
29 * - A write request is in progress.
30 * - A user process is in generic_file_write/nfs_update_page
31 * - A user process is in generic_file_read
32 *
33 * Also note that because of the way pages are invalidated in
34 * nfs_revalidate_inode, the following assertions hold:
35 *
36 * - If a page is dirty, there will be no read requests (a page will
37 * not be re-read unless invalidated by nfs_revalidate_inode).
38 * - If the page is not uptodate, there will be no pending write
39 * requests, and no process will be in nfs_update_page.
40 *
41 * FIXME: Interaction with the vmscan routines is not optimal yet.
42 * Either vmscan must be made nfs-savvy, or we need a different page
43 * reclaim concept that supports something like FS-independent
44 * buffer_heads with a b_ops-> field.
45 *
46 * Copyright (C) 1996, 1997, Olaf Kirch <okir@monad.swb.de>
47 */
48
49 #include <linux/types.h>
50 #include <linux/slab.h>
51 #include <linux/mm.h>
52 #include <linux/pagemap.h>
53 #include <linux/file.h>
54 #include <linux/mpage.h>
55 #include <linux/writeback.h>
56
57 #include <linux/sunrpc/clnt.h>
58 #include <linux/nfs_fs.h>
59 #include <linux/nfs_mount.h>
60 #include <linux/nfs_page.h>
61 #include <asm/uaccess.h>
62 #include <linux/smp_lock.h>
63
64 #include "delegation.h"
65 #include "iostat.h"
66
67 #define NFSDBG_FACILITY NFSDBG_PAGECACHE
68
69 #define MIN_POOL_WRITE (32)
70 #define MIN_POOL_COMMIT (4)
71
72 /*
73 * Local function declarations
74 */
75 static struct nfs_page * nfs_update_request(struct nfs_open_context*,
76 struct inode *,
77 struct page *,
78 unsigned int, unsigned int);
79 static int nfs_wait_on_write_congestion(struct address_space *, int);
80 static int nfs_wait_on_requests(struct inode *, unsigned long, unsigned int);
81 static int nfs_flush_inode(struct inode *inode, unsigned long idx_start,
82 unsigned int npages, int how);
83 static const struct rpc_call_ops nfs_write_partial_ops;
84 static const struct rpc_call_ops nfs_write_full_ops;
85 static const struct rpc_call_ops nfs_commit_ops;
86
87 static kmem_cache_t *nfs_wdata_cachep;
88 static mempool_t *nfs_wdata_mempool;
89 static mempool_t *nfs_commit_mempool;
90
91 static DECLARE_WAIT_QUEUE_HEAD(nfs_write_congestion);
92
93 struct nfs_write_data *nfs_commit_alloc(unsigned int pagecount)
94 {
95 struct nfs_write_data *p = mempool_alloc(nfs_commit_mempool, SLAB_NOFS);
96
97 if (p) {
98 memset(p, 0, sizeof(*p));
99 INIT_LIST_HEAD(&p->pages);
100 if (pagecount <= ARRAY_SIZE(p->page_array))
101 p->pagevec = p->page_array;
102 else {
103 p->pagevec = kcalloc(pagecount, sizeof(struct page *), GFP_NOFS);
104 if (!p->pagevec) {
105 mempool_free(p, nfs_commit_mempool);
106 p = NULL;
107 }
108 }
109 }
110 return p;
111 }
112
113 void nfs_commit_free(struct nfs_write_data *p)
114 {
115 if (p && (p->pagevec != &p->page_array[0]))
116 kfree(p->pagevec);
117 mempool_free(p, nfs_commit_mempool);
118 }
119
120 struct nfs_write_data *nfs_writedata_alloc(unsigned int pagecount)
121 {
122 struct nfs_write_data *p = mempool_alloc(nfs_wdata_mempool, SLAB_NOFS);
123
124 if (p) {
125 memset(p, 0, sizeof(*p));
126 INIT_LIST_HEAD(&p->pages);
127 if (pagecount <= ARRAY_SIZE(p->page_array))
128 p->pagevec = p->page_array;
129 else {
130 p->pagevec = kcalloc(pagecount, sizeof(struct page *), GFP_NOFS);
131 if (!p->pagevec) {
132 mempool_free(p, nfs_wdata_mempool);
133 p = NULL;
134 }
135 }
136 }
137 return p;
138 }
139
140 void nfs_writedata_free(struct nfs_write_data *p)
141 {
142 if (p && (p->pagevec != &p->page_array[0]))
143 kfree(p->pagevec);
144 mempool_free(p, nfs_wdata_mempool);
145 }
146
147 void nfs_writedata_release(void *wdata)
148 {
149 nfs_writedata_free(wdata);
150 }
151
152 /* Adjust the file length if we're writing beyond the end */
153 static void nfs_grow_file(struct page *page, unsigned int offset, unsigned int count)
154 {
155 struct inode *inode = page->mapping->host;
156 loff_t end, i_size = i_size_read(inode);
157 unsigned long end_index = (i_size - 1) >> PAGE_CACHE_SHIFT;
158
159 if (i_size > 0 && page->index < end_index)
160 return;
161 end = ((loff_t)page->index << PAGE_CACHE_SHIFT) + ((loff_t)offset+count);
162 if (i_size >= end)
163 return;
164 nfs_inc_stats(inode, NFSIOS_EXTENDWRITE);
165 i_size_write(inode, end);
166 }
167
168 /* We can set the PG_uptodate flag if we see that a write request
169 * covers the full page.
170 */
171 static void nfs_mark_uptodate(struct page *page, unsigned int base, unsigned int count)
172 {
173 loff_t end_offs;
174
175 if (PageUptodate(page))
176 return;
177 if (base != 0)
178 return;
179 if (count == PAGE_CACHE_SIZE) {
180 SetPageUptodate(page);
181 return;
182 }
183
184 end_offs = i_size_read(page->mapping->host) - 1;
185 if (end_offs < 0)
186 return;
187 /* Is this the last page? */
188 if (page->index != (unsigned long)(end_offs >> PAGE_CACHE_SHIFT))
189 return;
190 /* This is the last page: set PG_uptodate if we cover the entire
191 * extent of the data, then zero the rest of the page.
192 */
193 if (count == (unsigned int)(end_offs & (PAGE_CACHE_SIZE - 1)) + 1) {
194 memclear_highpage_flush(page, count, PAGE_CACHE_SIZE - count);
195 SetPageUptodate(page);
196 }
197 }
198
199 /*
200 * Write a page synchronously.
201 * Offset is the data offset within the page.
202 */
203 static int nfs_writepage_sync(struct nfs_open_context *ctx, struct inode *inode,
204 struct page *page, unsigned int offset, unsigned int count,
205 int how)
206 {
207 unsigned int wsize = NFS_SERVER(inode)->wsize;
208 int result, written = 0;
209 struct nfs_write_data *wdata;
210
211 wdata = nfs_writedata_alloc(1);
212 if (!wdata)
213 return -ENOMEM;
214
215 wdata->flags = how;
216 wdata->cred = ctx->cred;
217 wdata->inode = inode;
218 wdata->args.fh = NFS_FH(inode);
219 wdata->args.context = ctx;
220 wdata->args.pages = &page;
221 wdata->args.stable = NFS_FILE_SYNC;
222 wdata->args.pgbase = offset;
223 wdata->args.count = wsize;
224 wdata->res.fattr = &wdata->fattr;
225 wdata->res.verf = &wdata->verf;
226
227 dprintk("NFS: nfs_writepage_sync(%s/%Ld %d@%Ld)\n",
228 inode->i_sb->s_id,
229 (long long)NFS_FILEID(inode),
230 count, (long long)(page_offset(page) + offset));
231
232 set_page_writeback(page);
233 nfs_begin_data_update(inode);
234 do {
235 if (count < wsize)
236 wdata->args.count = count;
237 wdata->args.offset = page_offset(page) + wdata->args.pgbase;
238
239 result = NFS_PROTO(inode)->write(wdata);
240
241 if (result < 0) {
242 /* Must mark the page invalid after I/O error */
243 ClearPageUptodate(page);
244 goto io_error;
245 }
246 if (result < wdata->args.count)
247 printk(KERN_WARNING "NFS: short write, count=%u, result=%d\n",
248 wdata->args.count, result);
249
250 wdata->args.offset += result;
251 wdata->args.pgbase += result;
252 written += result;
253 count -= result;
254 nfs_add_stats(inode, NFSIOS_SERVERWRITTENBYTES, result);
255 } while (count);
256 /* Update file length */
257 nfs_grow_file(page, offset, written);
258 /* Set the PG_uptodate flag? */
259 nfs_mark_uptodate(page, offset, written);
260
261 if (PageError(page))
262 ClearPageError(page);
263
264 io_error:
265 nfs_end_data_update(inode);
266 end_page_writeback(page);
267 nfs_writedata_free(wdata);
268 return written ? written : result;
269 }
270
271 static int nfs_writepage_async(struct nfs_open_context *ctx,
272 struct inode *inode, struct page *page,
273 unsigned int offset, unsigned int count)
274 {
275 struct nfs_page *req;
276
277 req = nfs_update_request(ctx, inode, page, offset, count);
278 if (IS_ERR(req))
279 return PTR_ERR(req);
280 /* Update file length */
281 nfs_grow_file(page, offset, count);
282 /* Set the PG_uptodate flag? */
283 nfs_mark_uptodate(page, offset, count);
284 nfs_unlock_request(req);
285 return 0;
286 }
287
288 static int wb_priority(struct writeback_control *wbc)
289 {
290 if (wbc->for_reclaim)
291 return FLUSH_HIGHPRI;
292 if (wbc->for_kupdate)
293 return FLUSH_LOWPRI;
294 return 0;
295 }
296
297 /*
298 * Write an mmapped page to the server.
299 */
300 int nfs_writepage(struct page *page, struct writeback_control *wbc)
301 {
302 struct nfs_open_context *ctx;
303 struct inode *inode = page->mapping->host;
304 unsigned long end_index;
305 unsigned offset = PAGE_CACHE_SIZE;
306 loff_t i_size = i_size_read(inode);
307 int inode_referenced = 0;
308 int priority = wb_priority(wbc);
309 int err;
310
311 nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGE);
312 nfs_add_stats(inode, NFSIOS_WRITEPAGES, 1);
313
314 /*
315 * Note: We need to ensure that we have a reference to the inode
316 * if we are to do asynchronous writes. If not, waiting
317 * in nfs_wait_on_request() may deadlock with clear_inode().
318 *
319 * If igrab() fails here, then it is in any case safe to
320 * call nfs_wb_page(), since there will be no pending writes.
321 */
322 if (igrab(inode) != 0)
323 inode_referenced = 1;
324 end_index = i_size >> PAGE_CACHE_SHIFT;
325
326 /* Ensure we've flushed out any previous writes */
327 nfs_wb_page_priority(inode, page, priority);
328
329 /* easy case */
330 if (page->index < end_index)
331 goto do_it;
332 /* things got complicated... */
333 offset = i_size & (PAGE_CACHE_SIZE-1);
334
335 /* OK, are we completely out? */
336 err = 0; /* potential race with truncate - ignore */
337 if (page->index >= end_index+1 || !offset)
338 goto out;
339 do_it:
340 ctx = nfs_find_open_context(inode, NULL, FMODE_WRITE);
341 if (ctx == NULL) {
342 err = -EBADF;
343 goto out;
344 }
345 lock_kernel();
346 if (!IS_SYNC(inode) && inode_referenced) {
347 err = nfs_writepage_async(ctx, inode, page, 0, offset);
348 if (!wbc->for_writepages)
349 nfs_flush_inode(inode, 0, 0, wb_priority(wbc));
350 } else {
351 err = nfs_writepage_sync(ctx, inode, page, 0,
352 offset, priority);
353 if (err >= 0) {
354 if (err != offset)
355 redirty_page_for_writepage(wbc, page);
356 err = 0;
357 }
358 }
359 unlock_kernel();
360 put_nfs_open_context(ctx);
361 out:
362 unlock_page(page);
363 if (inode_referenced)
364 iput(inode);
365 return err;
366 }
367
368 /*
369 * Note: causes nfs_update_request() to block on the assumption
370 * that the writeback is generated due to memory pressure.
371 */
372 int nfs_writepages(struct address_space *mapping, struct writeback_control *wbc)
373 {
374 struct backing_dev_info *bdi = mapping->backing_dev_info;
375 struct inode *inode = mapping->host;
376 int err;
377
378 nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGES);
379
380 err = generic_writepages(mapping, wbc);
381 if (err)
382 return err;
383 while (test_and_set_bit(BDI_write_congested, &bdi->state) != 0) {
384 if (wbc->nonblocking)
385 return 0;
386 nfs_wait_on_write_congestion(mapping, 0);
387 }
388 err = nfs_flush_inode(inode, 0, 0, wb_priority(wbc));
389 if (err < 0)
390 goto out;
391 nfs_add_stats(inode, NFSIOS_WRITEPAGES, err);
392 wbc->nr_to_write -= err;
393 if (!wbc->nonblocking && wbc->sync_mode == WB_SYNC_ALL) {
394 err = nfs_wait_on_requests(inode, 0, 0);
395 if (err < 0)
396 goto out;
397 }
398 err = nfs_commit_inode(inode, wb_priority(wbc));
399 if (err > 0) {
400 wbc->nr_to_write -= err;
401 err = 0;
402 }
403 out:
404 clear_bit(BDI_write_congested, &bdi->state);
405 wake_up_all(&nfs_write_congestion);
406 return err;
407 }
408
409 /*
410 * Insert a write request into an inode
411 */
412 static int nfs_inode_add_request(struct inode *inode, struct nfs_page *req)
413 {
414 struct nfs_inode *nfsi = NFS_I(inode);
415 int error;
416
417 error = radix_tree_insert(&nfsi->nfs_page_tree, req->wb_index, req);
418 BUG_ON(error == -EEXIST);
419 if (error)
420 return error;
421 if (!nfsi->npages) {
422 igrab(inode);
423 nfs_begin_data_update(inode);
424 if (nfs_have_delegation(inode, FMODE_WRITE))
425 nfsi->change_attr++;
426 }
427 SetPagePrivate(req->wb_page);
428 nfsi->npages++;
429 atomic_inc(&req->wb_count);
430 return 0;
431 }
432
433 /*
434 * Insert a write request into an inode
435 */
436 static void nfs_inode_remove_request(struct nfs_page *req)
437 {
438 struct inode *inode = req->wb_context->dentry->d_inode;
439 struct nfs_inode *nfsi = NFS_I(inode);
440
441 BUG_ON (!NFS_WBACK_BUSY(req));
442
443 spin_lock(&nfsi->req_lock);
444 ClearPagePrivate(req->wb_page);
445 radix_tree_delete(&nfsi->nfs_page_tree, req->wb_index);
446 nfsi->npages--;
447 if (!nfsi->npages) {
448 spin_unlock(&nfsi->req_lock);
449 nfs_end_data_update(inode);
450 iput(inode);
451 } else
452 spin_unlock(&nfsi->req_lock);
453 nfs_clear_request(req);
454 nfs_release_request(req);
455 }
456
457 /*
458 * Find a request
459 */
460 static inline struct nfs_page *
461 _nfs_find_request(struct inode *inode, unsigned long index)
462 {
463 struct nfs_inode *nfsi = NFS_I(inode);
464 struct nfs_page *req;
465
466 req = (struct nfs_page*)radix_tree_lookup(&nfsi->nfs_page_tree, index);
467 if (req)
468 atomic_inc(&req->wb_count);
469 return req;
470 }
471
472 static struct nfs_page *
473 nfs_find_request(struct inode *inode, unsigned long index)
474 {
475 struct nfs_page *req;
476 struct nfs_inode *nfsi = NFS_I(inode);
477
478 spin_lock(&nfsi->req_lock);
479 req = _nfs_find_request(inode, index);
480 spin_unlock(&nfsi->req_lock);
481 return req;
482 }
483
484 /*
485 * Add a request to the inode's dirty list.
486 */
487 static void
488 nfs_mark_request_dirty(struct nfs_page *req)
489 {
490 struct inode *inode = req->wb_context->dentry->d_inode;
491 struct nfs_inode *nfsi = NFS_I(inode);
492
493 spin_lock(&nfsi->req_lock);
494 radix_tree_tag_set(&nfsi->nfs_page_tree,
495 req->wb_index, NFS_PAGE_TAG_DIRTY);
496 nfs_list_add_request(req, &nfsi->dirty);
497 nfsi->ndirty++;
498 spin_unlock(&nfsi->req_lock);
499 inc_zone_page_state(req->wb_page, NR_FILE_DIRTY);
500 mark_inode_dirty(inode);
501 }
502
503 /*
504 * Check if a request is dirty
505 */
506 static inline int
507 nfs_dirty_request(struct nfs_page *req)
508 {
509 struct nfs_inode *nfsi = NFS_I(req->wb_context->dentry->d_inode);
510 return !list_empty(&req->wb_list) && req->wb_list_head == &nfsi->dirty;
511 }
512
513 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
514 /*
515 * Add a request to the inode's commit list.
516 */
517 static void
518 nfs_mark_request_commit(struct nfs_page *req)
519 {
520 struct inode *inode = req->wb_context->dentry->d_inode;
521 struct nfs_inode *nfsi = NFS_I(inode);
522
523 spin_lock(&nfsi->req_lock);
524 nfs_list_add_request(req, &nfsi->commit);
525 nfsi->ncommit++;
526 spin_unlock(&nfsi->req_lock);
527 inc_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
528 mark_inode_dirty(inode);
529 }
530 #endif
531
532 /*
533 * Wait for a request to complete.
534 *
535 * Interruptible by signals only if mounted with intr flag.
536 */
537 static int nfs_wait_on_requests_locked(struct inode *inode, unsigned long idx_start, unsigned int npages)
538 {
539 struct nfs_inode *nfsi = NFS_I(inode);
540 struct nfs_page *req;
541 unsigned long idx_end, next;
542 unsigned int res = 0;
543 int error;
544
545 if (npages == 0)
546 idx_end = ~0;
547 else
548 idx_end = idx_start + npages - 1;
549
550 next = idx_start;
551 while (radix_tree_gang_lookup_tag(&nfsi->nfs_page_tree, (void **)&req, next, 1, NFS_PAGE_TAG_WRITEBACK)) {
552 if (req->wb_index > idx_end)
553 break;
554
555 next = req->wb_index + 1;
556 BUG_ON(!NFS_WBACK_BUSY(req));
557
558 atomic_inc(&req->wb_count);
559 spin_unlock(&nfsi->req_lock);
560 error = nfs_wait_on_request(req);
561 nfs_release_request(req);
562 spin_lock(&nfsi->req_lock);
563 if (error < 0)
564 return error;
565 res++;
566 }
567 return res;
568 }
569
570 static int nfs_wait_on_requests(struct inode *inode, unsigned long idx_start, unsigned int npages)
571 {
572 struct nfs_inode *nfsi = NFS_I(inode);
573 int ret;
574
575 spin_lock(&nfsi->req_lock);
576 ret = nfs_wait_on_requests_locked(inode, idx_start, npages);
577 spin_unlock(&nfsi->req_lock);
578 return ret;
579 }
580
581 static void nfs_cancel_requests(struct list_head *head)
582 {
583 struct nfs_page *req;
584 while(!list_empty(head)) {
585 req = nfs_list_entry(head->next);
586 nfs_list_remove_request(req);
587 nfs_inode_remove_request(req);
588 nfs_clear_page_writeback(req);
589 }
590 }
591
592 /*
593 * nfs_scan_dirty - Scan an inode for dirty requests
594 * @inode: NFS inode to scan
595 * @dst: destination list
596 * @idx_start: lower bound of page->index to scan.
597 * @npages: idx_start + npages sets the upper bound to scan.
598 *
599 * Moves requests from the inode's dirty page list.
600 * The requests are *not* checked to ensure that they form a contiguous set.
601 */
602 static int
603 nfs_scan_dirty(struct inode *inode, struct list_head *dst, unsigned long idx_start, unsigned int npages)
604 {
605 struct nfs_inode *nfsi = NFS_I(inode);
606 int res = 0;
607
608 if (nfsi->ndirty != 0) {
609 res = nfs_scan_lock_dirty(nfsi, dst, idx_start, npages);
610 nfsi->ndirty -= res;
611 if ((nfsi->ndirty == 0) != list_empty(&nfsi->dirty))
612 printk(KERN_ERR "NFS: desynchronized value of nfs_i.ndirty.\n");
613 }
614 return res;
615 }
616
617 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
618 /*
619 * nfs_scan_commit - Scan an inode for commit requests
620 * @inode: NFS inode to scan
621 * @dst: destination list
622 * @idx_start: lower bound of page->index to scan.
623 * @npages: idx_start + npages sets the upper bound to scan.
624 *
625 * Moves requests from the inode's 'commit' request list.
626 * The requests are *not* checked to ensure that they form a contiguous set.
627 */
628 static int
629 nfs_scan_commit(struct inode *inode, struct list_head *dst, unsigned long idx_start, unsigned int npages)
630 {
631 struct nfs_inode *nfsi = NFS_I(inode);
632 int res = 0;
633
634 if (nfsi->ncommit != 0) {
635 res = nfs_scan_list(nfsi, &nfsi->commit, dst, idx_start, npages);
636 nfsi->ncommit -= res;
637 if ((nfsi->ncommit == 0) != list_empty(&nfsi->commit))
638 printk(KERN_ERR "NFS: desynchronized value of nfs_i.ncommit.\n");
639 }
640 return res;
641 }
642 #else
643 static inline int nfs_scan_commit(struct inode *inode, struct list_head *dst, unsigned long idx_start, unsigned int npages)
644 {
645 return 0;
646 }
647 #endif
648
649 static int nfs_wait_on_write_congestion(struct address_space *mapping, int intr)
650 {
651 struct backing_dev_info *bdi = mapping->backing_dev_info;
652 DEFINE_WAIT(wait);
653 int ret = 0;
654
655 might_sleep();
656
657 if (!bdi_write_congested(bdi))
658 return 0;
659
660 nfs_inc_stats(mapping->host, NFSIOS_CONGESTIONWAIT);
661
662 if (intr) {
663 struct rpc_clnt *clnt = NFS_CLIENT(mapping->host);
664 sigset_t oldset;
665
666 rpc_clnt_sigmask(clnt, &oldset);
667 prepare_to_wait(&nfs_write_congestion, &wait, TASK_INTERRUPTIBLE);
668 if (bdi_write_congested(bdi)) {
669 if (signalled())
670 ret = -ERESTARTSYS;
671 else
672 schedule();
673 }
674 rpc_clnt_sigunmask(clnt, &oldset);
675 } else {
676 prepare_to_wait(&nfs_write_congestion, &wait, TASK_UNINTERRUPTIBLE);
677 if (bdi_write_congested(bdi))
678 schedule();
679 }
680 finish_wait(&nfs_write_congestion, &wait);
681 return ret;
682 }
683
684
685 /*
686 * Try to update any existing write request, or create one if there is none.
687 * In order to match, the request's credentials must match those of
688 * the calling process.
689 *
690 * Note: Should always be called with the Page Lock held!
691 */
692 static struct nfs_page * nfs_update_request(struct nfs_open_context* ctx,
693 struct inode *inode, struct page *page,
694 unsigned int offset, unsigned int bytes)
695 {
696 struct nfs_server *server = NFS_SERVER(inode);
697 struct nfs_inode *nfsi = NFS_I(inode);
698 struct nfs_page *req, *new = NULL;
699 unsigned long rqend, end;
700
701 end = offset + bytes;
702
703 if (nfs_wait_on_write_congestion(page->mapping, server->flags & NFS_MOUNT_INTR))
704 return ERR_PTR(-ERESTARTSYS);
705 for (;;) {
706 /* Loop over all inode entries and see if we find
707 * A request for the page we wish to update
708 */
709 spin_lock(&nfsi->req_lock);
710 req = _nfs_find_request(inode, page->index);
711 if (req) {
712 if (!nfs_lock_request_dontget(req)) {
713 int error;
714 spin_unlock(&nfsi->req_lock);
715 error = nfs_wait_on_request(req);
716 nfs_release_request(req);
717 if (error < 0) {
718 if (new)
719 nfs_release_request(new);
720 return ERR_PTR(error);
721 }
722 continue;
723 }
724 spin_unlock(&nfsi->req_lock);
725 if (new)
726 nfs_release_request(new);
727 break;
728 }
729
730 if (new) {
731 int error;
732 nfs_lock_request_dontget(new);
733 error = nfs_inode_add_request(inode, new);
734 if (error) {
735 spin_unlock(&nfsi->req_lock);
736 nfs_unlock_request(new);
737 return ERR_PTR(error);
738 }
739 spin_unlock(&nfsi->req_lock);
740 nfs_mark_request_dirty(new);
741 return new;
742 }
743 spin_unlock(&nfsi->req_lock);
744
745 new = nfs_create_request(ctx, inode, page, offset, bytes);
746 if (IS_ERR(new))
747 return new;
748 }
749
750 /* We have a request for our page.
751 * If the creds don't match, or the
752 * page addresses don't match,
753 * tell the caller to wait on the conflicting
754 * request.
755 */
756 rqend = req->wb_offset + req->wb_bytes;
757 if (req->wb_context != ctx
758 || req->wb_page != page
759 || !nfs_dirty_request(req)
760 || offset > rqend || end < req->wb_offset) {
761 nfs_unlock_request(req);
762 return ERR_PTR(-EBUSY);
763 }
764
765 /* Okay, the request matches. Update the region */
766 if (offset < req->wb_offset) {
767 req->wb_offset = offset;
768 req->wb_pgbase = offset;
769 req->wb_bytes = rqend - req->wb_offset;
770 }
771
772 if (end > rqend)
773 req->wb_bytes = end - req->wb_offset;
774
775 return req;
776 }
777
778 int nfs_flush_incompatible(struct file *file, struct page *page)
779 {
780 struct nfs_open_context *ctx = (struct nfs_open_context *)file->private_data;
781 struct inode *inode = page->mapping->host;
782 struct nfs_page *req;
783 int status = 0;
784 /*
785 * Look for a request corresponding to this page. If there
786 * is one, and it belongs to another file, we flush it out
787 * before we try to copy anything into the page. Do this
788 * due to the lack of an ACCESS-type call in NFSv2.
789 * Also do the same if we find a request from an existing
790 * dropped page.
791 */
792 req = nfs_find_request(inode, page->index);
793 if (req) {
794 if (req->wb_page != page || ctx != req->wb_context)
795 status = nfs_wb_page(inode, page);
796 nfs_release_request(req);
797 }
798 return (status < 0) ? status : 0;
799 }
800
801 /*
802 * Update and possibly write a cached page of an NFS file.
803 *
804 * XXX: Keep an eye on generic_file_read to make sure it doesn't do bad
805 * things with a page scheduled for an RPC call (e.g. invalidate it).
806 */
807 int nfs_updatepage(struct file *file, struct page *page,
808 unsigned int offset, unsigned int count)
809 {
810 struct nfs_open_context *ctx = (struct nfs_open_context *)file->private_data;
811 struct inode *inode = page->mapping->host;
812 struct nfs_page *req;
813 int status = 0;
814
815 nfs_inc_stats(inode, NFSIOS_VFSUPDATEPAGE);
816
817 dprintk("NFS: nfs_updatepage(%s/%s %d@%Ld)\n",
818 file->f_dentry->d_parent->d_name.name,
819 file->f_dentry->d_name.name, count,
820 (long long)(page_offset(page) +offset));
821
822 if (IS_SYNC(inode)) {
823 status = nfs_writepage_sync(ctx, inode, page, offset, count, 0);
824 if (status > 0) {
825 if (offset == 0 && status == PAGE_CACHE_SIZE)
826 SetPageUptodate(page);
827 return 0;
828 }
829 return status;
830 }
831
832 /* If we're not using byte range locks, and we know the page
833 * is entirely in cache, it may be more efficient to avoid
834 * fragmenting write requests.
835 */
836 if (PageUptodate(page) && inode->i_flock == NULL && !(file->f_mode & O_SYNC)) {
837 loff_t end_offs = i_size_read(inode) - 1;
838 unsigned long end_index = end_offs >> PAGE_CACHE_SHIFT;
839
840 count += offset;
841 offset = 0;
842 if (unlikely(end_offs < 0)) {
843 /* Do nothing */
844 } else if (page->index == end_index) {
845 unsigned int pglen;
846 pglen = (unsigned int)(end_offs & (PAGE_CACHE_SIZE-1)) + 1;
847 if (count < pglen)
848 count = pglen;
849 } else if (page->index < end_index)
850 count = PAGE_CACHE_SIZE;
851 }
852
853 /*
854 * Try to find an NFS request corresponding to this page
855 * and update it.
856 * If the existing request cannot be updated, we must flush
857 * it out now.
858 */
859 do {
860 req = nfs_update_request(ctx, inode, page, offset, count);
861 status = (IS_ERR(req)) ? PTR_ERR(req) : 0;
862 if (status != -EBUSY)
863 break;
864 /* Request could not be updated. Flush it out and try again */
865 status = nfs_wb_page(inode, page);
866 } while (status >= 0);
867 if (status < 0)
868 goto done;
869
870 status = 0;
871
872 /* Update file length */
873 nfs_grow_file(page, offset, count);
874 /* Set the PG_uptodate flag? */
875 nfs_mark_uptodate(page, req->wb_pgbase, req->wb_bytes);
876 nfs_unlock_request(req);
877 done:
878 dprintk("NFS: nfs_updatepage returns %d (isize %Ld)\n",
879 status, (long long)i_size_read(inode));
880 if (status < 0)
881 ClearPageUptodate(page);
882 return status;
883 }
884
885 static void nfs_writepage_release(struct nfs_page *req)
886 {
887 end_page_writeback(req->wb_page);
888
889 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
890 if (!PageError(req->wb_page)) {
891 if (NFS_NEED_RESCHED(req)) {
892 nfs_mark_request_dirty(req);
893 goto out;
894 } else if (NFS_NEED_COMMIT(req)) {
895 nfs_mark_request_commit(req);
896 goto out;
897 }
898 }
899 nfs_inode_remove_request(req);
900
901 out:
902 nfs_clear_commit(req);
903 nfs_clear_reschedule(req);
904 #else
905 nfs_inode_remove_request(req);
906 #endif
907 nfs_clear_page_writeback(req);
908 }
909
910 static inline int flush_task_priority(int how)
911 {
912 switch (how & (FLUSH_HIGHPRI|FLUSH_LOWPRI)) {
913 case FLUSH_HIGHPRI:
914 return RPC_PRIORITY_HIGH;
915 case FLUSH_LOWPRI:
916 return RPC_PRIORITY_LOW;
917 }
918 return RPC_PRIORITY_NORMAL;
919 }
920
921 /*
922 * Set up the argument/result storage required for the RPC call.
923 */
924 static void nfs_write_rpcsetup(struct nfs_page *req,
925 struct nfs_write_data *data,
926 const struct rpc_call_ops *call_ops,
927 unsigned int count, unsigned int offset,
928 int how)
929 {
930 struct inode *inode;
931 int flags;
932
933 /* Set up the RPC argument and reply structs
934 * NB: take care not to mess about with data->commit et al. */
935
936 data->req = req;
937 data->inode = inode = req->wb_context->dentry->d_inode;
938 data->cred = req->wb_context->cred;
939
940 data->args.fh = NFS_FH(inode);
941 data->args.offset = req_offset(req) + offset;
942 data->args.pgbase = req->wb_pgbase + offset;
943 data->args.pages = data->pagevec;
944 data->args.count = count;
945 data->args.context = req->wb_context;
946
947 data->res.fattr = &data->fattr;
948 data->res.count = count;
949 data->res.verf = &data->verf;
950 nfs_fattr_init(&data->fattr);
951
952 /* Set up the initial task struct. */
953 flags = (how & FLUSH_SYNC) ? 0 : RPC_TASK_ASYNC;
954 rpc_init_task(&data->task, NFS_CLIENT(inode), flags, call_ops, data);
955 NFS_PROTO(inode)->write_setup(data, how);
956
957 data->task.tk_priority = flush_task_priority(how);
958 data->task.tk_cookie = (unsigned long)inode;
959
960 dprintk("NFS: %4d initiated write call (req %s/%Ld, %u bytes @ offset %Lu)\n",
961 data->task.tk_pid,
962 inode->i_sb->s_id,
963 (long long)NFS_FILEID(inode),
964 count,
965 (unsigned long long)data->args.offset);
966 }
967
968 static void nfs_execute_write(struct nfs_write_data *data)
969 {
970 struct rpc_clnt *clnt = NFS_CLIENT(data->inode);
971 sigset_t oldset;
972
973 rpc_clnt_sigmask(clnt, &oldset);
974 lock_kernel();
975 rpc_execute(&data->task);
976 unlock_kernel();
977 rpc_clnt_sigunmask(clnt, &oldset);
978 }
979
980 /*
981 * Generate multiple small requests to write out a single
982 * contiguous dirty area on one page.
983 */
984 static int nfs_flush_multi(struct inode *inode, struct list_head *head, int how)
985 {
986 struct nfs_page *req = nfs_list_entry(head->next);
987 struct page *page = req->wb_page;
988 struct nfs_write_data *data;
989 unsigned int wsize = NFS_SERVER(inode)->wsize;
990 unsigned int nbytes, offset;
991 int requests = 0;
992 LIST_HEAD(list);
993
994 nfs_list_remove_request(req);
995
996 nbytes = req->wb_bytes;
997 for (;;) {
998 data = nfs_writedata_alloc(1);
999 if (!data)
1000 goto out_bad;
1001 list_add(&data->pages, &list);
1002 requests++;
1003 if (nbytes <= wsize)
1004 break;
1005 nbytes -= wsize;
1006 }
1007 atomic_set(&req->wb_complete, requests);
1008
1009 ClearPageError(page);
1010 set_page_writeback(page);
1011 offset = 0;
1012 nbytes = req->wb_bytes;
1013 do {
1014 data = list_entry(list.next, struct nfs_write_data, pages);
1015 list_del_init(&data->pages);
1016
1017 data->pagevec[0] = page;
1018
1019 if (nbytes > wsize) {
1020 nfs_write_rpcsetup(req, data, &nfs_write_partial_ops,
1021 wsize, offset, how);
1022 offset += wsize;
1023 nbytes -= wsize;
1024 } else {
1025 nfs_write_rpcsetup(req, data, &nfs_write_partial_ops,
1026 nbytes, offset, how);
1027 nbytes = 0;
1028 }
1029 nfs_execute_write(data);
1030 } while (nbytes != 0);
1031
1032 return 0;
1033
1034 out_bad:
1035 while (!list_empty(&list)) {
1036 data = list_entry(list.next, struct nfs_write_data, pages);
1037 list_del(&data->pages);
1038 nfs_writedata_free(data);
1039 }
1040 nfs_mark_request_dirty(req);
1041 nfs_clear_page_writeback(req);
1042 return -ENOMEM;
1043 }
1044
1045 /*
1046 * Create an RPC task for the given write request and kick it.
1047 * The page must have been locked by the caller.
1048 *
1049 * It may happen that the page we're passed is not marked dirty.
1050 * This is the case if nfs_updatepage detects a conflicting request
1051 * that has been written but not committed.
1052 */
1053 static int nfs_flush_one(struct inode *inode, struct list_head *head, int how)
1054 {
1055 struct nfs_page *req;
1056 struct page **pages;
1057 struct nfs_write_data *data;
1058 unsigned int count;
1059
1060 data = nfs_writedata_alloc(NFS_SERVER(inode)->wpages);
1061 if (!data)
1062 goto out_bad;
1063
1064 pages = data->pagevec;
1065 count = 0;
1066 while (!list_empty(head)) {
1067 req = nfs_list_entry(head->next);
1068 nfs_list_remove_request(req);
1069 nfs_list_add_request(req, &data->pages);
1070 ClearPageError(req->wb_page);
1071 set_page_writeback(req->wb_page);
1072 *pages++ = req->wb_page;
1073 count += req->wb_bytes;
1074 }
1075 req = nfs_list_entry(data->pages.next);
1076
1077 /* Set up the argument struct */
1078 nfs_write_rpcsetup(req, data, &nfs_write_full_ops, count, 0, how);
1079
1080 nfs_execute_write(data);
1081 return 0;
1082 out_bad:
1083 while (!list_empty(head)) {
1084 struct nfs_page *req = nfs_list_entry(head->next);
1085 nfs_list_remove_request(req);
1086 nfs_mark_request_dirty(req);
1087 nfs_clear_page_writeback(req);
1088 }
1089 return -ENOMEM;
1090 }
1091
1092 static int nfs_flush_list(struct inode *inode, struct list_head *head, int npages, int how)
1093 {
1094 LIST_HEAD(one_request);
1095 int (*flush_one)(struct inode *, struct list_head *, int);
1096 struct nfs_page *req;
1097 int wpages = NFS_SERVER(inode)->wpages;
1098 int wsize = NFS_SERVER(inode)->wsize;
1099 int error;
1100
1101 flush_one = nfs_flush_one;
1102 if (wsize < PAGE_CACHE_SIZE)
1103 flush_one = nfs_flush_multi;
1104 /* For single writes, FLUSH_STABLE is more efficient */
1105 if (npages <= wpages && npages == NFS_I(inode)->npages
1106 && nfs_list_entry(head->next)->wb_bytes <= wsize)
1107 how |= FLUSH_STABLE;
1108
1109 do {
1110 nfs_coalesce_requests(head, &one_request, wpages);
1111 req = nfs_list_entry(one_request.next);
1112 error = flush_one(inode, &one_request, how);
1113 if (error < 0)
1114 goto out_err;
1115 } while (!list_empty(head));
1116 return 0;
1117 out_err:
1118 while (!list_empty(head)) {
1119 req = nfs_list_entry(head->next);
1120 nfs_list_remove_request(req);
1121 nfs_mark_request_dirty(req);
1122 nfs_clear_page_writeback(req);
1123 }
1124 return error;
1125 }
1126
1127 /*
1128 * Handle a write reply that flushed part of a page.
1129 */
1130 static void nfs_writeback_done_partial(struct rpc_task *task, void *calldata)
1131 {
1132 struct nfs_write_data *data = calldata;
1133 struct nfs_page *req = data->req;
1134 struct page *page = req->wb_page;
1135
1136 dprintk("NFS: write (%s/%Ld %d@%Ld)",
1137 req->wb_context->dentry->d_inode->i_sb->s_id,
1138 (long long)NFS_FILEID(req->wb_context->dentry->d_inode),
1139 req->wb_bytes,
1140 (long long)req_offset(req));
1141
1142 if (nfs_writeback_done(task, data) != 0)
1143 return;
1144
1145 if (task->tk_status < 0) {
1146 ClearPageUptodate(page);
1147 SetPageError(page);
1148 req->wb_context->error = task->tk_status;
1149 dprintk(", error = %d\n", task->tk_status);
1150 } else {
1151 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
1152 if (data->verf.committed < NFS_FILE_SYNC) {
1153 if (!NFS_NEED_COMMIT(req)) {
1154 nfs_defer_commit(req);
1155 memcpy(&req->wb_verf, &data->verf, sizeof(req->wb_verf));
1156 dprintk(" defer commit\n");
1157 } else if (memcmp(&req->wb_verf, &data->verf, sizeof(req->wb_verf))) {
1158 nfs_defer_reschedule(req);
1159 dprintk(" server reboot detected\n");
1160 }
1161 } else
1162 #endif
1163 dprintk(" OK\n");
1164 }
1165
1166 if (atomic_dec_and_test(&req->wb_complete))
1167 nfs_writepage_release(req);
1168 }
1169
1170 static const struct rpc_call_ops nfs_write_partial_ops = {
1171 .rpc_call_done = nfs_writeback_done_partial,
1172 .rpc_release = nfs_writedata_release,
1173 };
1174
1175 /*
1176 * Handle a write reply that flushes a whole page.
1177 *
1178 * FIXME: There is an inherent race with invalidate_inode_pages and
1179 * writebacks since the page->count is kept > 1 for as long
1180 * as the page has a write request pending.
1181 */
1182 static void nfs_writeback_done_full(struct rpc_task *task, void *calldata)
1183 {
1184 struct nfs_write_data *data = calldata;
1185 struct nfs_page *req;
1186 struct page *page;
1187
1188 if (nfs_writeback_done(task, data) != 0)
1189 return;
1190
1191 /* Update attributes as result of writeback. */
1192 while (!list_empty(&data->pages)) {
1193 req = nfs_list_entry(data->pages.next);
1194 nfs_list_remove_request(req);
1195 page = req->wb_page;
1196
1197 dprintk("NFS: write (%s/%Ld %d@%Ld)",
1198 req->wb_context->dentry->d_inode->i_sb->s_id,
1199 (long long)NFS_FILEID(req->wb_context->dentry->d_inode),
1200 req->wb_bytes,
1201 (long long)req_offset(req));
1202
1203 if (task->tk_status < 0) {
1204 ClearPageUptodate(page);
1205 SetPageError(page);
1206 req->wb_context->error = task->tk_status;
1207 end_page_writeback(page);
1208 nfs_inode_remove_request(req);
1209 dprintk(", error = %d\n", task->tk_status);
1210 goto next;
1211 }
1212 end_page_writeback(page);
1213
1214 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
1215 if (data->args.stable != NFS_UNSTABLE || data->verf.committed == NFS_FILE_SYNC) {
1216 nfs_inode_remove_request(req);
1217 dprintk(" OK\n");
1218 goto next;
1219 }
1220 memcpy(&req->wb_verf, &data->verf, sizeof(req->wb_verf));
1221 nfs_mark_request_commit(req);
1222 dprintk(" marked for commit\n");
1223 #else
1224 nfs_inode_remove_request(req);
1225 #endif
1226 next:
1227 nfs_clear_page_writeback(req);
1228 }
1229 }
1230
1231 static const struct rpc_call_ops nfs_write_full_ops = {
1232 .rpc_call_done = nfs_writeback_done_full,
1233 .rpc_release = nfs_writedata_release,
1234 };
1235
1236
1237 /*
1238 * This function is called when the WRITE call is complete.
1239 */
1240 int nfs_writeback_done(struct rpc_task *task, struct nfs_write_data *data)
1241 {
1242 struct nfs_writeargs *argp = &data->args;
1243 struct nfs_writeres *resp = &data->res;
1244 int status;
1245
1246 dprintk("NFS: %4d nfs_writeback_done (status %d)\n",
1247 task->tk_pid, task->tk_status);
1248
1249 /* Call the NFS version-specific code */
1250 status = NFS_PROTO(data->inode)->write_done(task, data);
1251 if (status != 0)
1252 return status;
1253 nfs_add_stats(data->inode, NFSIOS_SERVERWRITTENBYTES, resp->count);
1254
1255 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
1256 if (resp->verf->committed < argp->stable && task->tk_status >= 0) {
1257 /* We tried a write call, but the server did not
1258 * commit data to stable storage even though we
1259 * requested it.
1260 * Note: There is a known bug in Tru64 < 5.0 in which
1261 * the server reports NFS_DATA_SYNC, but performs
1262 * NFS_FILE_SYNC. We therefore implement this checking
1263 * as a dprintk() in order to avoid filling syslog.
1264 */
1265 static unsigned long complain;
1266
1267 if (time_before(complain, jiffies)) {
1268 dprintk("NFS: faulty NFS server %s:"
1269 " (committed = %d) != (stable = %d)\n",
1270 NFS_SERVER(data->inode)->hostname,
1271 resp->verf->committed, argp->stable);
1272 complain = jiffies + 300 * HZ;
1273 }
1274 }
1275 #endif
1276 /* Is this a short write? */
1277 if (task->tk_status >= 0 && resp->count < argp->count) {
1278 static unsigned long complain;
1279
1280 nfs_inc_stats(data->inode, NFSIOS_SHORTWRITE);
1281
1282 /* Has the server at least made some progress? */
1283 if (resp->count != 0) {
1284 /* Was this an NFSv2 write or an NFSv3 stable write? */
1285 if (resp->verf->committed != NFS_UNSTABLE) {
1286 /* Resend from where the server left off */
1287 argp->offset += resp->count;
1288 argp->pgbase += resp->count;
1289 argp->count -= resp->count;
1290 } else {
1291 /* Resend as a stable write in order to avoid
1292 * headaches in the case of a server crash.
1293 */
1294 argp->stable = NFS_FILE_SYNC;
1295 }
1296 rpc_restart_call(task);
1297 return -EAGAIN;
1298 }
1299 if (time_before(complain, jiffies)) {
1300 printk(KERN_WARNING
1301 "NFS: Server wrote zero bytes, expected %u.\n",
1302 argp->count);
1303 complain = jiffies + 300 * HZ;
1304 }
1305 /* Can't do anything about it except throw an error. */
1306 task->tk_status = -EIO;
1307 }
1308 return 0;
1309 }
1310
1311
1312 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
1313 void nfs_commit_release(void *wdata)
1314 {
1315 nfs_commit_free(wdata);
1316 }
1317
1318 /*
1319 * Set up the argument/result storage required for the RPC call.
1320 */
1321 static void nfs_commit_rpcsetup(struct list_head *head,
1322 struct nfs_write_data *data,
1323 int how)
1324 {
1325 struct nfs_page *first;
1326 struct inode *inode;
1327 int flags;
1328
1329 /* Set up the RPC argument and reply structs
1330 * NB: take care not to mess about with data->commit et al. */
1331
1332 list_splice_init(head, &data->pages);
1333 first = nfs_list_entry(data->pages.next);
1334 inode = first->wb_context->dentry->d_inode;
1335
1336 data->inode = inode;
1337 data->cred = first->wb_context->cred;
1338
1339 data->args.fh = NFS_FH(data->inode);
1340 /* Note: we always request a commit of the entire inode */
1341 data->args.offset = 0;
1342 data->args.count = 0;
1343 data->res.count = 0;
1344 data->res.fattr = &data->fattr;
1345 data->res.verf = &data->verf;
1346 nfs_fattr_init(&data->fattr);
1347
1348 /* Set up the initial task struct. */
1349 flags = (how & FLUSH_SYNC) ? 0 : RPC_TASK_ASYNC;
1350 rpc_init_task(&data->task, NFS_CLIENT(inode), flags, &nfs_commit_ops, data);
1351 NFS_PROTO(inode)->commit_setup(data, how);
1352
1353 data->task.tk_priority = flush_task_priority(how);
1354 data->task.tk_cookie = (unsigned long)inode;
1355
1356 dprintk("NFS: %4d initiated commit call\n", data->task.tk_pid);
1357 }
1358
1359 /*
1360 * Commit dirty pages
1361 */
1362 static int
1363 nfs_commit_list(struct inode *inode, struct list_head *head, int how)
1364 {
1365 struct nfs_write_data *data;
1366 struct nfs_page *req;
1367
1368 data = nfs_commit_alloc(NFS_SERVER(inode)->wpages);
1369
1370 if (!data)
1371 goto out_bad;
1372
1373 /* Set up the argument struct */
1374 nfs_commit_rpcsetup(head, data, how);
1375
1376 nfs_execute_write(data);
1377 return 0;
1378 out_bad:
1379 while (!list_empty(head)) {
1380 req = nfs_list_entry(head->next);
1381 nfs_list_remove_request(req);
1382 nfs_mark_request_commit(req);
1383 nfs_clear_page_writeback(req);
1384 }
1385 return -ENOMEM;
1386 }
1387
1388 /*
1389 * COMMIT call returned
1390 */
1391 static void nfs_commit_done(struct rpc_task *task, void *calldata)
1392 {
1393 struct nfs_write_data *data = calldata;
1394 struct nfs_page *req;
1395
1396 dprintk("NFS: %4d nfs_commit_done (status %d)\n",
1397 task->tk_pid, task->tk_status);
1398
1399 /* Call the NFS version-specific code */
1400 if (NFS_PROTO(data->inode)->commit_done(task, data) != 0)
1401 return;
1402
1403 while (!list_empty(&data->pages)) {
1404 req = nfs_list_entry(data->pages.next);
1405 nfs_list_remove_request(req);
1406 dec_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
1407
1408 dprintk("NFS: commit (%s/%Ld %d@%Ld)",
1409 req->wb_context->dentry->d_inode->i_sb->s_id,
1410 (long long)NFS_FILEID(req->wb_context->dentry->d_inode),
1411 req->wb_bytes,
1412 (long long)req_offset(req));
1413 if (task->tk_status < 0) {
1414 req->wb_context->error = task->tk_status;
1415 nfs_inode_remove_request(req);
1416 dprintk(", error = %d\n", task->tk_status);
1417 goto next;
1418 }
1419
1420 /* Okay, COMMIT succeeded, apparently. Check the verifier
1421 * returned by the server against all stored verfs. */
1422 if (!memcmp(req->wb_verf.verifier, data->verf.verifier, sizeof(data->verf.verifier))) {
1423 /* We have a match */
1424 nfs_inode_remove_request(req);
1425 dprintk(" OK\n");
1426 goto next;
1427 }
1428 /* We have a mismatch. Write the page again */
1429 dprintk(" mismatch\n");
1430 nfs_mark_request_dirty(req);
1431 next:
1432 nfs_clear_page_writeback(req);
1433 }
1434 }
1435
1436 static const struct rpc_call_ops nfs_commit_ops = {
1437 .rpc_call_done = nfs_commit_done,
1438 .rpc_release = nfs_commit_release,
1439 };
1440 #else
1441 static inline int nfs_commit_list(struct inode *inode, struct list_head *head, int how)
1442 {
1443 return 0;
1444 }
1445 #endif
1446
1447 static int nfs_flush_inode(struct inode *inode, unsigned long idx_start,
1448 unsigned int npages, int how)
1449 {
1450 struct nfs_inode *nfsi = NFS_I(inode);
1451 LIST_HEAD(head);
1452 int res;
1453
1454 spin_lock(&nfsi->req_lock);
1455 res = nfs_scan_dirty(inode, &head, idx_start, npages);
1456 spin_unlock(&nfsi->req_lock);
1457 if (res) {
1458 int error = nfs_flush_list(inode, &head, res, how);
1459 if (error < 0)
1460 return error;
1461 }
1462 return res;
1463 }
1464
1465 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
1466 int nfs_commit_inode(struct inode *inode, int how)
1467 {
1468 struct nfs_inode *nfsi = NFS_I(inode);
1469 LIST_HEAD(head);
1470 int res;
1471
1472 spin_lock(&nfsi->req_lock);
1473 res = nfs_scan_commit(inode, &head, 0, 0);
1474 spin_unlock(&nfsi->req_lock);
1475 if (res) {
1476 int error = nfs_commit_list(inode, &head, how);
1477 if (error < 0)
1478 return error;
1479 }
1480 return res;
1481 }
1482 #endif
1483
1484 int nfs_sync_inode_wait(struct inode *inode, unsigned long idx_start,
1485 unsigned int npages, int how)
1486 {
1487 struct nfs_inode *nfsi = NFS_I(inode);
1488 LIST_HEAD(head);
1489 int nocommit = how & FLUSH_NOCOMMIT;
1490 int pages, ret;
1491
1492 how &= ~FLUSH_NOCOMMIT;
1493 spin_lock(&nfsi->req_lock);
1494 do {
1495 ret = nfs_wait_on_requests_locked(inode, idx_start, npages);
1496 if (ret != 0)
1497 continue;
1498 pages = nfs_scan_dirty(inode, &head, idx_start, npages);
1499 if (pages != 0) {
1500 spin_unlock(&nfsi->req_lock);
1501 if (how & FLUSH_INVALIDATE)
1502 nfs_cancel_requests(&head);
1503 else
1504 ret = nfs_flush_list(inode, &head, pages, how);
1505 spin_lock(&nfsi->req_lock);
1506 continue;
1507 }
1508 if (nocommit)
1509 break;
1510 pages = nfs_scan_commit(inode, &head, idx_start, npages);
1511 if (pages == 0)
1512 break;
1513 if (how & FLUSH_INVALIDATE) {
1514 spin_unlock(&nfsi->req_lock);
1515 nfs_cancel_requests(&head);
1516 spin_lock(&nfsi->req_lock);
1517 continue;
1518 }
1519 pages += nfs_scan_commit(inode, &head, 0, 0);
1520 spin_unlock(&nfsi->req_lock);
1521 ret = nfs_commit_list(inode, &head, how);
1522 spin_lock(&nfsi->req_lock);
1523 } while (ret >= 0);
1524 spin_unlock(&nfsi->req_lock);
1525 return ret;
1526 }
1527
1528 int __init nfs_init_writepagecache(void)
1529 {
1530 nfs_wdata_cachep = kmem_cache_create("nfs_write_data",
1531 sizeof(struct nfs_write_data),
1532 0, SLAB_HWCACHE_ALIGN,
1533 NULL, NULL);
1534 if (nfs_wdata_cachep == NULL)
1535 return -ENOMEM;
1536
1537 nfs_wdata_mempool = mempool_create_slab_pool(MIN_POOL_WRITE,
1538 nfs_wdata_cachep);
1539 if (nfs_wdata_mempool == NULL)
1540 return -ENOMEM;
1541
1542 nfs_commit_mempool = mempool_create_slab_pool(MIN_POOL_COMMIT,
1543 nfs_wdata_cachep);
1544 if (nfs_commit_mempool == NULL)
1545 return -ENOMEM;
1546
1547 return 0;
1548 }
1549
1550 void nfs_destroy_writepagecache(void)
1551 {
1552 mempool_destroy(nfs_commit_mempool);
1553 mempool_destroy(nfs_wdata_mempool);
1554 if (kmem_cache_destroy(nfs_wdata_cachep))
1555 printk(KERN_INFO "nfs_write_data: not all structures were freed\n");
1556 }
1557