4 * Writing file data over NFS.
6 * We do it like this: When a (user) process wishes to write data to an
7 * NFS file, a write request is allocated that contains the RPC task data
8 * plus some info on the page to be written, and added to the inode's
9 * write chain. If the process writes past the end of the page, an async
10 * RPC call to write the page is scheduled immediately; otherwise, the call
11 * is delayed for a few seconds.
13 * Just like readahead, no async I/O is performed if wsize < PAGE_SIZE.
15 * Write requests are kept on the inode's writeback list. Each entry in
16 * that list references the page (portion) to be written. When the
17 * cache timeout has expired, the RPC task is woken up, and tries to
18 * lock the page. As soon as it manages to do so, the request is moved
19 * from the writeback list to the writelock list.
21 * Note: we must make sure never to confuse the inode passed in the
22 * write_page request with the one in page->inode. As far as I understand
23 * it, these are different when doing a swap-out.
25 * To understand everything that goes on here and in the NFS read code,
26 * one should be aware that a page is locked in exactly one of the following
29 * - A write request is in progress.
30 * - A user process is in generic_file_write/nfs_update_page
31 * - A user process is in generic_file_read
33 * Also note that because of the way pages are invalidated in
34 * nfs_revalidate_inode, the following assertions hold:
36 * - If a page is dirty, there will be no read requests (a page will
37 * not be re-read unless invalidated by nfs_revalidate_inode).
38 * - If the page is not uptodate, there will be no pending write
39 * requests, and no process will be in nfs_update_page.
41 * FIXME: Interaction with the vmscan routines is not optimal yet.
42 * Either vmscan must be made nfs-savvy, or we need a different page
43 * reclaim concept that supports something like FS-independent
44 * buffer_heads with a b_ops-> field.
46 * Copyright (C) 1996, 1997, Olaf Kirch <okir@monad.swb.de>
49 #include <linux/types.h>
50 #include <linux/slab.h>
52 #include <linux/pagemap.h>
53 #include <linux/file.h>
54 #include <linux/writeback.h>
56 #include <linux/sunrpc/clnt.h>
57 #include <linux/nfs_fs.h>
58 #include <linux/nfs_mount.h>
59 #include <linux/nfs_page.h>
60 #include <linux/backing-dev.h>
62 #include <asm/uaccess.h>
63 #include <linux/smp_lock.h>
65 #include "delegation.h"
69 #define NFSDBG_FACILITY NFSDBG_PAGECACHE
71 #define MIN_POOL_WRITE (32)
72 #define MIN_POOL_COMMIT (4)
75 * Local function declarations
77 static struct nfs_page
* nfs_update_request(struct nfs_open_context
*,
79 unsigned int, unsigned int);
80 static void nfs_mark_request_dirty(struct nfs_page
*req
);
81 static int nfs_wait_on_write_congestion(struct address_space
*, int);
82 static int nfs_wait_on_requests(struct inode
*, unsigned long, unsigned int);
83 static long nfs_flush_mapping(struct address_space
*mapping
, struct writeback_control
*wbc
, int how
);
84 static const struct rpc_call_ops nfs_write_partial_ops
;
85 static const struct rpc_call_ops nfs_write_full_ops
;
86 static const struct rpc_call_ops nfs_commit_ops
;
88 static kmem_cache_t
*nfs_wdata_cachep
;
89 static mempool_t
*nfs_wdata_mempool
;
90 static mempool_t
*nfs_commit_mempool
;
92 static DECLARE_WAIT_QUEUE_HEAD(nfs_write_congestion
);
94 struct nfs_write_data
*nfs_commit_alloc(void)
96 struct nfs_write_data
*p
= mempool_alloc(nfs_commit_mempool
, SLAB_NOFS
);
99 memset(p
, 0, sizeof(*p
));
100 INIT_LIST_HEAD(&p
->pages
);
105 void nfs_commit_rcu_free(struct rcu_head
*head
)
107 struct nfs_write_data
*p
= container_of(head
, struct nfs_write_data
, task
.u
.tk_rcu
);
108 if (p
&& (p
->pagevec
!= &p
->page_array
[0]))
110 mempool_free(p
, nfs_commit_mempool
);
113 void nfs_commit_free(struct nfs_write_data
*wdata
)
115 call_rcu_bh(&wdata
->task
.u
.tk_rcu
, nfs_commit_rcu_free
);
118 struct nfs_write_data
*nfs_writedata_alloc(size_t len
)
120 unsigned int pagecount
= (len
+ PAGE_SIZE
- 1) >> PAGE_SHIFT
;
121 struct nfs_write_data
*p
= mempool_alloc(nfs_wdata_mempool
, SLAB_NOFS
);
124 memset(p
, 0, sizeof(*p
));
125 INIT_LIST_HEAD(&p
->pages
);
126 p
->npages
= pagecount
;
127 if (pagecount
<= ARRAY_SIZE(p
->page_array
))
128 p
->pagevec
= p
->page_array
;
130 p
->pagevec
= kcalloc(pagecount
, sizeof(struct page
*), GFP_NOFS
);
132 mempool_free(p
, nfs_wdata_mempool
);
140 static void nfs_writedata_rcu_free(struct rcu_head
*head
)
142 struct nfs_write_data
*p
= container_of(head
, struct nfs_write_data
, task
.u
.tk_rcu
);
143 if (p
&& (p
->pagevec
!= &p
->page_array
[0]))
145 mempool_free(p
, nfs_wdata_mempool
);
148 static void nfs_writedata_free(struct nfs_write_data
*wdata
)
150 call_rcu_bh(&wdata
->task
.u
.tk_rcu
, nfs_writedata_rcu_free
);
153 void nfs_writedata_release(void *wdata
)
155 nfs_writedata_free(wdata
);
158 static struct nfs_page
*nfs_page_find_request_locked(struct page
*page
)
160 struct nfs_page
*req
= NULL
;
162 if (PagePrivate(page
)) {
163 req
= (struct nfs_page
*)page_private(page
);
165 atomic_inc(&req
->wb_count
);
170 static struct nfs_page
*nfs_page_find_request(struct page
*page
)
172 struct nfs_page
*req
= NULL
;
173 spinlock_t
*req_lock
= &NFS_I(page
->mapping
->host
)->req_lock
;
176 req
= nfs_page_find_request_locked(page
);
177 spin_unlock(req_lock
);
181 /* Adjust the file length if we're writing beyond the end */
182 static void nfs_grow_file(struct page
*page
, unsigned int offset
, unsigned int count
)
184 struct inode
*inode
= page
->mapping
->host
;
185 loff_t end
, i_size
= i_size_read(inode
);
186 unsigned long end_index
= (i_size
- 1) >> PAGE_CACHE_SHIFT
;
188 if (i_size
> 0 && page
->index
< end_index
)
190 end
= ((loff_t
)page
->index
<< PAGE_CACHE_SHIFT
) + ((loff_t
)offset
+count
);
193 nfs_inc_stats(inode
, NFSIOS_EXTENDWRITE
);
194 i_size_write(inode
, end
);
197 /* We can set the PG_uptodate flag if we see that a write request
198 * covers the full page.
200 static void nfs_mark_uptodate(struct page
*page
, unsigned int base
, unsigned int count
)
202 if (PageUptodate(page
))
206 if (count
!= nfs_page_length(page
))
208 if (count
!= PAGE_CACHE_SIZE
)
209 memclear_highpage_flush(page
, count
, PAGE_CACHE_SIZE
- count
);
210 SetPageUptodate(page
);
213 static int nfs_writepage_setup(struct nfs_open_context
*ctx
, struct page
*page
,
214 unsigned int offset
, unsigned int count
)
216 struct nfs_page
*req
;
220 req
= nfs_update_request(ctx
, page
, offset
, count
);
226 ret
= nfs_wb_page(page
->mapping
->host
, page
);
230 /* Update file length */
231 nfs_grow_file(page
, offset
, count
);
232 /* Set the PG_uptodate flag? */
233 nfs_mark_uptodate(page
, offset
, count
);
234 nfs_unlock_request(req
);
238 static int wb_priority(struct writeback_control
*wbc
)
240 if (wbc
->for_reclaim
)
241 return FLUSH_HIGHPRI
;
242 if (wbc
->for_kupdate
)
248 * Find an associated nfs write request, and prepare to flush it out
249 * Returns 1 if there was no write request, or if the request was
250 * already tagged by nfs_set_page_dirty.Returns 0 if the request
252 * May also return an error if the user signalled nfs_wait_on_request().
254 static int nfs_page_mark_flush(struct page
*page
)
256 struct nfs_page
*req
;
257 spinlock_t
*req_lock
= &NFS_I(page
->mapping
->host
)->req_lock
;
262 req
= nfs_page_find_request_locked(page
);
264 spin_unlock(req_lock
);
267 if (nfs_lock_request_dontget(req
))
269 /* Note: If we hold the page lock, as is the case in nfs_writepage,
270 * then the call to nfs_lock_request_dontget() will always
271 * succeed provided that someone hasn't already marked the
272 * request as dirty (in which case we don't care).
274 spin_unlock(req_lock
);
275 ret
= nfs_wait_on_request(req
);
276 nfs_release_request(req
);
281 spin_unlock(req_lock
);
282 if (test_and_set_bit(PG_FLUSHING
, &req
->wb_flags
) == 0) {
283 nfs_mark_request_dirty(req
);
284 set_page_writeback(page
);
286 ret
= test_bit(PG_NEED_FLUSH
, &req
->wb_flags
);
287 nfs_unlock_request(req
);
292 * Write an mmapped page to the server.
294 static int nfs_writepage_locked(struct page
*page
, struct writeback_control
*wbc
)
296 struct nfs_open_context
*ctx
;
297 struct inode
*inode
= page
->mapping
->host
;
301 nfs_inc_stats(inode
, NFSIOS_VFSWRITEPAGE
);
302 nfs_add_stats(inode
, NFSIOS_WRITEPAGES
, 1);
304 err
= nfs_page_mark_flush(page
);
308 offset
= nfs_page_length(page
);
312 ctx
= nfs_find_open_context(inode
, NULL
, FMODE_WRITE
);
317 err
= nfs_writepage_setup(ctx
, page
, 0, offset
);
318 put_nfs_open_context(ctx
);
321 err
= nfs_page_mark_flush(page
);
325 if (!wbc
->for_writepages
)
326 nfs_flush_mapping(page
->mapping
, wbc
, wb_priority(wbc
));
330 int nfs_writepage(struct page
*page
, struct writeback_control
*wbc
)
334 err
= nfs_writepage_locked(page
, wbc
);
340 * Note: causes nfs_update_request() to block on the assumption
341 * that the writeback is generated due to memory pressure.
343 int nfs_writepages(struct address_space
*mapping
, struct writeback_control
*wbc
)
345 struct backing_dev_info
*bdi
= mapping
->backing_dev_info
;
346 struct inode
*inode
= mapping
->host
;
349 nfs_inc_stats(inode
, NFSIOS_VFSWRITEPAGES
);
351 err
= generic_writepages(mapping
, wbc
);
354 while (test_and_set_bit(BDI_write_congested
, &bdi
->state
) != 0) {
355 if (wbc
->nonblocking
)
357 nfs_wait_on_write_congestion(mapping
, 0);
359 err
= nfs_flush_mapping(mapping
, wbc
, wb_priority(wbc
));
362 nfs_add_stats(inode
, NFSIOS_WRITEPAGES
, err
);
363 if (!wbc
->nonblocking
&& wbc
->sync_mode
== WB_SYNC_ALL
) {
364 err
= nfs_wait_on_requests(inode
, 0, 0);
368 err
= nfs_commit_inode(inode
, wb_priority(wbc
));
372 clear_bit(BDI_write_congested
, &bdi
->state
);
373 wake_up_all(&nfs_write_congestion
);
374 congestion_end(WRITE
);
379 * Insert a write request into an inode
381 static int nfs_inode_add_request(struct inode
*inode
, struct nfs_page
*req
)
383 struct nfs_inode
*nfsi
= NFS_I(inode
);
386 error
= radix_tree_insert(&nfsi
->nfs_page_tree
, req
->wb_index
, req
);
387 BUG_ON(error
== -EEXIST
);
392 nfs_begin_data_update(inode
);
393 if (nfs_have_delegation(inode
, FMODE_WRITE
))
396 SetPagePrivate(req
->wb_page
);
397 set_page_private(req
->wb_page
, (unsigned long)req
);
399 atomic_inc(&req
->wb_count
);
404 * Insert a write request into an inode
406 static void nfs_inode_remove_request(struct nfs_page
*req
)
408 struct inode
*inode
= req
->wb_context
->dentry
->d_inode
;
409 struct nfs_inode
*nfsi
= NFS_I(inode
);
411 BUG_ON (!NFS_WBACK_BUSY(req
));
413 spin_lock(&nfsi
->req_lock
);
414 set_page_private(req
->wb_page
, 0);
415 ClearPagePrivate(req
->wb_page
);
416 radix_tree_delete(&nfsi
->nfs_page_tree
, req
->wb_index
);
419 spin_unlock(&nfsi
->req_lock
);
420 nfs_end_data_update(inode
);
423 spin_unlock(&nfsi
->req_lock
);
424 nfs_clear_request(req
);
425 nfs_release_request(req
);
429 * Add a request to the inode's dirty list.
432 nfs_mark_request_dirty(struct nfs_page
*req
)
434 struct inode
*inode
= req
->wb_context
->dentry
->d_inode
;
435 struct nfs_inode
*nfsi
= NFS_I(inode
);
437 spin_lock(&nfsi
->req_lock
);
438 radix_tree_tag_set(&nfsi
->nfs_page_tree
,
439 req
->wb_index
, NFS_PAGE_TAG_DIRTY
);
440 nfs_list_add_request(req
, &nfsi
->dirty
);
442 spin_unlock(&nfsi
->req_lock
);
443 inc_zone_page_state(req
->wb_page
, NR_FILE_DIRTY
);
444 mark_inode_dirty(inode
);
448 nfs_redirty_request(struct nfs_page
*req
)
450 clear_bit(PG_FLUSHING
, &req
->wb_flags
);
451 __set_page_dirty_nobuffers(req
->wb_page
);
455 * Check if a request is dirty
458 nfs_dirty_request(struct nfs_page
*req
)
460 return test_bit(PG_FLUSHING
, &req
->wb_flags
) == 0;
463 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
465 * Add a request to the inode's commit list.
468 nfs_mark_request_commit(struct nfs_page
*req
)
470 struct inode
*inode
= req
->wb_context
->dentry
->d_inode
;
471 struct nfs_inode
*nfsi
= NFS_I(inode
);
473 spin_lock(&nfsi
->req_lock
);
474 nfs_list_add_request(req
, &nfsi
->commit
);
476 spin_unlock(&nfsi
->req_lock
);
477 inc_zone_page_state(req
->wb_page
, NR_UNSTABLE_NFS
);
478 mark_inode_dirty(inode
);
483 * Wait for a request to complete.
485 * Interruptible by signals only if mounted with intr flag.
487 static int nfs_wait_on_requests_locked(struct inode
*inode
, unsigned long idx_start
, unsigned int npages
)
489 struct nfs_inode
*nfsi
= NFS_I(inode
);
490 struct nfs_page
*req
;
491 unsigned long idx_end
, next
;
492 unsigned int res
= 0;
498 idx_end
= idx_start
+ npages
- 1;
501 while (radix_tree_gang_lookup_tag(&nfsi
->nfs_page_tree
, (void **)&req
, next
, 1, NFS_PAGE_TAG_WRITEBACK
)) {
502 if (req
->wb_index
> idx_end
)
505 next
= req
->wb_index
+ 1;
506 BUG_ON(!NFS_WBACK_BUSY(req
));
508 atomic_inc(&req
->wb_count
);
509 spin_unlock(&nfsi
->req_lock
);
510 error
= nfs_wait_on_request(req
);
511 nfs_release_request(req
);
512 spin_lock(&nfsi
->req_lock
);
520 static int nfs_wait_on_requests(struct inode
*inode
, unsigned long idx_start
, unsigned int npages
)
522 struct nfs_inode
*nfsi
= NFS_I(inode
);
525 spin_lock(&nfsi
->req_lock
);
526 ret
= nfs_wait_on_requests_locked(inode
, idx_start
, npages
);
527 spin_unlock(&nfsi
->req_lock
);
531 static void nfs_cancel_dirty_list(struct list_head
*head
)
533 struct nfs_page
*req
;
534 while(!list_empty(head
)) {
535 req
= nfs_list_entry(head
->next
);
536 nfs_list_remove_request(req
);
537 nfs_inode_remove_request(req
);
538 nfs_clear_page_writeback(req
);
542 static void nfs_cancel_commit_list(struct list_head
*head
)
544 struct nfs_page
*req
;
546 while(!list_empty(head
)) {
547 req
= nfs_list_entry(head
->next
);
548 dec_zone_page_state(req
->wb_page
, NR_UNSTABLE_NFS
);
549 nfs_list_remove_request(req
);
550 nfs_inode_remove_request(req
);
551 nfs_unlock_request(req
);
555 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
557 * nfs_scan_commit - Scan an inode for commit requests
558 * @inode: NFS inode to scan
559 * @dst: destination list
560 * @idx_start: lower bound of page->index to scan.
561 * @npages: idx_start + npages sets the upper bound to scan.
563 * Moves requests from the inode's 'commit' request list.
564 * The requests are *not* checked to ensure that they form a contiguous set.
567 nfs_scan_commit(struct inode
*inode
, struct list_head
*dst
, unsigned long idx_start
, unsigned int npages
)
569 struct nfs_inode
*nfsi
= NFS_I(inode
);
572 if (nfsi
->ncommit
!= 0) {
573 res
= nfs_scan_list(nfsi
, &nfsi
->commit
, dst
, idx_start
, npages
);
574 nfsi
->ncommit
-= res
;
575 if ((nfsi
->ncommit
== 0) != list_empty(&nfsi
->commit
))
576 printk(KERN_ERR
"NFS: desynchronized value of nfs_i.ncommit.\n");
581 static inline int nfs_scan_commit(struct inode
*inode
, struct list_head
*dst
, unsigned long idx_start
, unsigned int npages
)
587 static int nfs_wait_on_write_congestion(struct address_space
*mapping
, int intr
)
589 struct backing_dev_info
*bdi
= mapping
->backing_dev_info
;
595 if (!bdi_write_congested(bdi
))
598 nfs_inc_stats(mapping
->host
, NFSIOS_CONGESTIONWAIT
);
601 struct rpc_clnt
*clnt
= NFS_CLIENT(mapping
->host
);
604 rpc_clnt_sigmask(clnt
, &oldset
);
605 prepare_to_wait(&nfs_write_congestion
, &wait
, TASK_INTERRUPTIBLE
);
606 if (bdi_write_congested(bdi
)) {
612 rpc_clnt_sigunmask(clnt
, &oldset
);
614 prepare_to_wait(&nfs_write_congestion
, &wait
, TASK_UNINTERRUPTIBLE
);
615 if (bdi_write_congested(bdi
))
618 finish_wait(&nfs_write_congestion
, &wait
);
624 * Try to update any existing write request, or create one if there is none.
625 * In order to match, the request's credentials must match those of
626 * the calling process.
628 * Note: Should always be called with the Page Lock held!
630 static struct nfs_page
* nfs_update_request(struct nfs_open_context
* ctx
,
631 struct page
*page
, unsigned int offset
, unsigned int bytes
)
633 struct inode
*inode
= page
->mapping
->host
;
634 struct nfs_inode
*nfsi
= NFS_I(inode
);
635 struct nfs_page
*req
, *new = NULL
;
636 unsigned long rqend
, end
;
638 end
= offset
+ bytes
;
640 if (nfs_wait_on_write_congestion(page
->mapping
, NFS_SERVER(inode
)->flags
& NFS_MOUNT_INTR
))
641 return ERR_PTR(-ERESTARTSYS
);
643 /* Loop over all inode entries and see if we find
644 * A request for the page we wish to update
646 spin_lock(&nfsi
->req_lock
);
647 req
= nfs_page_find_request_locked(page
);
649 if (!nfs_lock_request_dontget(req
)) {
652 spin_unlock(&nfsi
->req_lock
);
653 error
= nfs_wait_on_request(req
);
654 nfs_release_request(req
);
657 nfs_release_request(new);
658 return ERR_PTR(error
);
662 spin_unlock(&nfsi
->req_lock
);
664 nfs_release_request(new);
670 nfs_lock_request_dontget(new);
671 error
= nfs_inode_add_request(inode
, new);
673 spin_unlock(&nfsi
->req_lock
);
674 nfs_unlock_request(new);
675 return ERR_PTR(error
);
677 spin_unlock(&nfsi
->req_lock
);
680 spin_unlock(&nfsi
->req_lock
);
682 new = nfs_create_request(ctx
, inode
, page
, offset
, bytes
);
687 /* We have a request for our page.
688 * If the creds don't match, or the
689 * page addresses don't match,
690 * tell the caller to wait on the conflicting
693 rqend
= req
->wb_offset
+ req
->wb_bytes
;
694 if (req
->wb_context
!= ctx
695 || req
->wb_page
!= page
696 || !nfs_dirty_request(req
)
697 || offset
> rqend
|| end
< req
->wb_offset
) {
698 nfs_unlock_request(req
);
699 return ERR_PTR(-EBUSY
);
702 /* Okay, the request matches. Update the region */
703 if (offset
< req
->wb_offset
) {
704 req
->wb_offset
= offset
;
705 req
->wb_pgbase
= offset
;
706 req
->wb_bytes
= rqend
- req
->wb_offset
;
710 req
->wb_bytes
= end
- req
->wb_offset
;
715 int nfs_flush_incompatible(struct file
*file
, struct page
*page
)
717 struct nfs_open_context
*ctx
= (struct nfs_open_context
*)file
->private_data
;
718 struct nfs_page
*req
;
719 int do_flush
, status
;
721 * Look for a request corresponding to this page. If there
722 * is one, and it belongs to another file, we flush it out
723 * before we try to copy anything into the page. Do this
724 * due to the lack of an ACCESS-type call in NFSv2.
725 * Also do the same if we find a request from an existing
729 req
= nfs_page_find_request(page
);
732 do_flush
= req
->wb_page
!= page
|| req
->wb_context
!= ctx
733 || !nfs_dirty_request(req
);
734 nfs_release_request(req
);
737 status
= nfs_wb_page(page
->mapping
->host
, page
);
738 } while (status
== 0);
743 * Update and possibly write a cached page of an NFS file.
745 * XXX: Keep an eye on generic_file_read to make sure it doesn't do bad
746 * things with a page scheduled for an RPC call (e.g. invalidate it).
748 int nfs_updatepage(struct file
*file
, struct page
*page
,
749 unsigned int offset
, unsigned int count
)
751 struct nfs_open_context
*ctx
= (struct nfs_open_context
*)file
->private_data
;
752 struct inode
*inode
= page
->mapping
->host
;
755 nfs_inc_stats(inode
, NFSIOS_VFSUPDATEPAGE
);
757 dprintk("NFS: nfs_updatepage(%s/%s %d@%Ld)\n",
758 file
->f_dentry
->d_parent
->d_name
.name
,
759 file
->f_dentry
->d_name
.name
, count
,
760 (long long)(page_offset(page
) +offset
));
762 /* If we're not using byte range locks, and we know the page
763 * is entirely in cache, it may be more efficient to avoid
764 * fragmenting write requests.
766 if (PageUptodate(page
) && inode
->i_flock
== NULL
&& !(file
->f_mode
& O_SYNC
)) {
767 count
= max(count
+ offset
, nfs_page_length(page
));
771 status
= nfs_writepage_setup(ctx
, page
, offset
, count
);
772 __set_page_dirty_nobuffers(page
);
774 dprintk("NFS: nfs_updatepage returns %d (isize %Ld)\n",
775 status
, (long long)i_size_read(inode
));
777 ClearPageUptodate(page
);
781 static void nfs_writepage_release(struct nfs_page
*req
)
783 end_page_writeback(req
->wb_page
);
785 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
786 if (!PageError(req
->wb_page
)) {
787 if (NFS_NEED_RESCHED(req
)) {
788 nfs_redirty_request(req
);
790 } else if (NFS_NEED_COMMIT(req
)) {
791 nfs_mark_request_commit(req
);
795 nfs_inode_remove_request(req
);
798 nfs_clear_commit(req
);
799 nfs_clear_reschedule(req
);
801 nfs_inode_remove_request(req
);
803 nfs_clear_page_writeback(req
);
806 static inline int flush_task_priority(int how
)
808 switch (how
& (FLUSH_HIGHPRI
|FLUSH_LOWPRI
)) {
810 return RPC_PRIORITY_HIGH
;
812 return RPC_PRIORITY_LOW
;
814 return RPC_PRIORITY_NORMAL
;
818 * Set up the argument/result storage required for the RPC call.
820 static void nfs_write_rpcsetup(struct nfs_page
*req
,
821 struct nfs_write_data
*data
,
822 const struct rpc_call_ops
*call_ops
,
823 unsigned int count
, unsigned int offset
,
829 /* Set up the RPC argument and reply structs
830 * NB: take care not to mess about with data->commit et al. */
833 data
->inode
= inode
= req
->wb_context
->dentry
->d_inode
;
834 data
->cred
= req
->wb_context
->cred
;
836 data
->args
.fh
= NFS_FH(inode
);
837 data
->args
.offset
= req_offset(req
) + offset
;
838 data
->args
.pgbase
= req
->wb_pgbase
+ offset
;
839 data
->args
.pages
= data
->pagevec
;
840 data
->args
.count
= count
;
841 data
->args
.context
= req
->wb_context
;
843 data
->res
.fattr
= &data
->fattr
;
844 data
->res
.count
= count
;
845 data
->res
.verf
= &data
->verf
;
846 nfs_fattr_init(&data
->fattr
);
848 /* Set up the initial task struct. */
849 flags
= (how
& FLUSH_SYNC
) ? 0 : RPC_TASK_ASYNC
;
850 rpc_init_task(&data
->task
, NFS_CLIENT(inode
), flags
, call_ops
, data
);
851 NFS_PROTO(inode
)->write_setup(data
, how
);
853 data
->task
.tk_priority
= flush_task_priority(how
);
854 data
->task
.tk_cookie
= (unsigned long)inode
;
856 dprintk("NFS: %4d initiated write call (req %s/%Ld, %u bytes @ offset %Lu)\n",
859 (long long)NFS_FILEID(inode
),
861 (unsigned long long)data
->args
.offset
);
864 static void nfs_execute_write(struct nfs_write_data
*data
)
866 struct rpc_clnt
*clnt
= NFS_CLIENT(data
->inode
);
869 rpc_clnt_sigmask(clnt
, &oldset
);
870 rpc_execute(&data
->task
);
871 rpc_clnt_sigunmask(clnt
, &oldset
);
875 * Generate multiple small requests to write out a single
876 * contiguous dirty area on one page.
878 static int nfs_flush_multi(struct inode
*inode
, struct list_head
*head
, int how
)
880 struct nfs_page
*req
= nfs_list_entry(head
->next
);
881 struct page
*page
= req
->wb_page
;
882 struct nfs_write_data
*data
;
883 size_t wsize
= NFS_SERVER(inode
)->wsize
, nbytes
;
888 nfs_list_remove_request(req
);
890 nbytes
= req
->wb_bytes
;
892 size_t len
= min(nbytes
, wsize
);
894 data
= nfs_writedata_alloc(len
);
897 list_add(&data
->pages
, &list
);
900 } while (nbytes
!= 0);
901 atomic_set(&req
->wb_complete
, requests
);
903 ClearPageError(page
);
905 nbytes
= req
->wb_bytes
;
907 data
= list_entry(list
.next
, struct nfs_write_data
, pages
);
908 list_del_init(&data
->pages
);
910 data
->pagevec
[0] = page
;
912 if (nbytes
> wsize
) {
913 nfs_write_rpcsetup(req
, data
, &nfs_write_partial_ops
,
918 nfs_write_rpcsetup(req
, data
, &nfs_write_partial_ops
,
919 nbytes
, offset
, how
);
922 nfs_execute_write(data
);
923 } while (nbytes
!= 0);
928 while (!list_empty(&list
)) {
929 data
= list_entry(list
.next
, struct nfs_write_data
, pages
);
930 list_del(&data
->pages
);
931 nfs_writedata_release(data
);
933 nfs_redirty_request(req
);
934 nfs_clear_page_writeback(req
);
939 * Create an RPC task for the given write request and kick it.
940 * The page must have been locked by the caller.
942 * It may happen that the page we're passed is not marked dirty.
943 * This is the case if nfs_updatepage detects a conflicting request
944 * that has been written but not committed.
946 static int nfs_flush_one(struct inode
*inode
, struct list_head
*head
, int how
)
948 struct nfs_page
*req
;
950 struct nfs_write_data
*data
;
953 data
= nfs_writedata_alloc(NFS_SERVER(inode
)->wsize
);
957 pages
= data
->pagevec
;
959 while (!list_empty(head
)) {
960 req
= nfs_list_entry(head
->next
);
961 nfs_list_remove_request(req
);
962 nfs_list_add_request(req
, &data
->pages
);
963 ClearPageError(req
->wb_page
);
964 *pages
++ = req
->wb_page
;
965 count
+= req
->wb_bytes
;
967 req
= nfs_list_entry(data
->pages
.next
);
969 /* Set up the argument struct */
970 nfs_write_rpcsetup(req
, data
, &nfs_write_full_ops
, count
, 0, how
);
972 nfs_execute_write(data
);
975 while (!list_empty(head
)) {
976 struct nfs_page
*req
= nfs_list_entry(head
->next
);
977 nfs_list_remove_request(req
);
978 nfs_redirty_request(req
);
979 nfs_clear_page_writeback(req
);
984 static int nfs_flush_list(struct inode
*inode
, struct list_head
*head
, int npages
, int how
)
986 LIST_HEAD(one_request
);
987 int (*flush_one
)(struct inode
*, struct list_head
*, int);
988 struct nfs_page
*req
;
989 int wpages
= NFS_SERVER(inode
)->wpages
;
990 int wsize
= NFS_SERVER(inode
)->wsize
;
993 flush_one
= nfs_flush_one
;
994 if (wsize
< PAGE_CACHE_SIZE
)
995 flush_one
= nfs_flush_multi
;
996 /* For single writes, FLUSH_STABLE is more efficient */
997 if (npages
<= wpages
&& npages
== NFS_I(inode
)->npages
998 && nfs_list_entry(head
->next
)->wb_bytes
<= wsize
)
1002 nfs_coalesce_requests(head
, &one_request
, wpages
);
1003 req
= nfs_list_entry(one_request
.next
);
1004 error
= flush_one(inode
, &one_request
, how
);
1007 } while (!list_empty(head
));
1010 while (!list_empty(head
)) {
1011 req
= nfs_list_entry(head
->next
);
1012 nfs_list_remove_request(req
);
1013 nfs_redirty_request(req
);
1014 nfs_clear_page_writeback(req
);
1020 * Handle a write reply that flushed part of a page.
1022 static void nfs_writeback_done_partial(struct rpc_task
*task
, void *calldata
)
1024 struct nfs_write_data
*data
= calldata
;
1025 struct nfs_page
*req
= data
->req
;
1026 struct page
*page
= req
->wb_page
;
1028 dprintk("NFS: write (%s/%Ld %d@%Ld)",
1029 req
->wb_context
->dentry
->d_inode
->i_sb
->s_id
,
1030 (long long)NFS_FILEID(req
->wb_context
->dentry
->d_inode
),
1032 (long long)req_offset(req
));
1034 if (nfs_writeback_done(task
, data
) != 0)
1037 if (task
->tk_status
< 0) {
1038 ClearPageUptodate(page
);
1040 req
->wb_context
->error
= task
->tk_status
;
1041 dprintk(", error = %d\n", task
->tk_status
);
1043 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
1044 if (data
->verf
.committed
< NFS_FILE_SYNC
) {
1045 if (!NFS_NEED_COMMIT(req
)) {
1046 nfs_defer_commit(req
);
1047 memcpy(&req
->wb_verf
, &data
->verf
, sizeof(req
->wb_verf
));
1048 dprintk(" defer commit\n");
1049 } else if (memcmp(&req
->wb_verf
, &data
->verf
, sizeof(req
->wb_verf
))) {
1050 nfs_defer_reschedule(req
);
1051 dprintk(" server reboot detected\n");
1058 if (atomic_dec_and_test(&req
->wb_complete
))
1059 nfs_writepage_release(req
);
1062 static const struct rpc_call_ops nfs_write_partial_ops
= {
1063 .rpc_call_done
= nfs_writeback_done_partial
,
1064 .rpc_release
= nfs_writedata_release
,
1068 * Handle a write reply that flushes a whole page.
1070 * FIXME: There is an inherent race with invalidate_inode_pages and
1071 * writebacks since the page->count is kept > 1 for as long
1072 * as the page has a write request pending.
1074 static void nfs_writeback_done_full(struct rpc_task
*task
, void *calldata
)
1076 struct nfs_write_data
*data
= calldata
;
1077 struct nfs_page
*req
;
1080 if (nfs_writeback_done(task
, data
) != 0)
1083 /* Update attributes as result of writeback. */
1084 while (!list_empty(&data
->pages
)) {
1085 req
= nfs_list_entry(data
->pages
.next
);
1086 nfs_list_remove_request(req
);
1087 page
= req
->wb_page
;
1089 dprintk("NFS: write (%s/%Ld %d@%Ld)",
1090 req
->wb_context
->dentry
->d_inode
->i_sb
->s_id
,
1091 (long long)NFS_FILEID(req
->wb_context
->dentry
->d_inode
),
1093 (long long)req_offset(req
));
1095 if (task
->tk_status
< 0) {
1096 ClearPageUptodate(page
);
1098 req
->wb_context
->error
= task
->tk_status
;
1099 end_page_writeback(page
);
1100 nfs_inode_remove_request(req
);
1101 dprintk(", error = %d\n", task
->tk_status
);
1104 end_page_writeback(page
);
1106 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
1107 if (data
->args
.stable
!= NFS_UNSTABLE
|| data
->verf
.committed
== NFS_FILE_SYNC
) {
1108 nfs_inode_remove_request(req
);
1112 memcpy(&req
->wb_verf
, &data
->verf
, sizeof(req
->wb_verf
));
1113 nfs_mark_request_commit(req
);
1114 dprintk(" marked for commit\n");
1116 nfs_inode_remove_request(req
);
1119 nfs_clear_page_writeback(req
);
1123 static const struct rpc_call_ops nfs_write_full_ops
= {
1124 .rpc_call_done
= nfs_writeback_done_full
,
1125 .rpc_release
= nfs_writedata_release
,
1130 * This function is called when the WRITE call is complete.
1132 int nfs_writeback_done(struct rpc_task
*task
, struct nfs_write_data
*data
)
1134 struct nfs_writeargs
*argp
= &data
->args
;
1135 struct nfs_writeres
*resp
= &data
->res
;
1138 dprintk("NFS: %4d nfs_writeback_done (status %d)\n",
1139 task
->tk_pid
, task
->tk_status
);
1142 * ->write_done will attempt to use post-op attributes to detect
1143 * conflicting writes by other clients. A strict interpretation
1144 * of close-to-open would allow us to continue caching even if
1145 * another writer had changed the file, but some applications
1146 * depend on tighter cache coherency when writing.
1148 status
= NFS_PROTO(data
->inode
)->write_done(task
, data
);
1151 nfs_add_stats(data
->inode
, NFSIOS_SERVERWRITTENBYTES
, resp
->count
);
1153 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
1154 if (resp
->verf
->committed
< argp
->stable
&& task
->tk_status
>= 0) {
1155 /* We tried a write call, but the server did not
1156 * commit data to stable storage even though we
1158 * Note: There is a known bug in Tru64 < 5.0 in which
1159 * the server reports NFS_DATA_SYNC, but performs
1160 * NFS_FILE_SYNC. We therefore implement this checking
1161 * as a dprintk() in order to avoid filling syslog.
1163 static unsigned long complain
;
1165 if (time_before(complain
, jiffies
)) {
1166 dprintk("NFS: faulty NFS server %s:"
1167 " (committed = %d) != (stable = %d)\n",
1168 NFS_SERVER(data
->inode
)->nfs_client
->cl_hostname
,
1169 resp
->verf
->committed
, argp
->stable
);
1170 complain
= jiffies
+ 300 * HZ
;
1174 /* Is this a short write? */
1175 if (task
->tk_status
>= 0 && resp
->count
< argp
->count
) {
1176 static unsigned long complain
;
1178 nfs_inc_stats(data
->inode
, NFSIOS_SHORTWRITE
);
1180 /* Has the server at least made some progress? */
1181 if (resp
->count
!= 0) {
1182 /* Was this an NFSv2 write or an NFSv3 stable write? */
1183 if (resp
->verf
->committed
!= NFS_UNSTABLE
) {
1184 /* Resend from where the server left off */
1185 argp
->offset
+= resp
->count
;
1186 argp
->pgbase
+= resp
->count
;
1187 argp
->count
-= resp
->count
;
1189 /* Resend as a stable write in order to avoid
1190 * headaches in the case of a server crash.
1192 argp
->stable
= NFS_FILE_SYNC
;
1194 rpc_restart_call(task
);
1197 if (time_before(complain
, jiffies
)) {
1199 "NFS: Server wrote zero bytes, expected %u.\n",
1201 complain
= jiffies
+ 300 * HZ
;
1203 /* Can't do anything about it except throw an error. */
1204 task
->tk_status
= -EIO
;
1210 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
1211 void nfs_commit_release(void *wdata
)
1213 nfs_commit_free(wdata
);
1217 * Set up the argument/result storage required for the RPC call.
1219 static void nfs_commit_rpcsetup(struct list_head
*head
,
1220 struct nfs_write_data
*data
,
1223 struct nfs_page
*first
;
1224 struct inode
*inode
;
1227 /* Set up the RPC argument and reply structs
1228 * NB: take care not to mess about with data->commit et al. */
1230 list_splice_init(head
, &data
->pages
);
1231 first
= nfs_list_entry(data
->pages
.next
);
1232 inode
= first
->wb_context
->dentry
->d_inode
;
1234 data
->inode
= inode
;
1235 data
->cred
= first
->wb_context
->cred
;
1237 data
->args
.fh
= NFS_FH(data
->inode
);
1238 /* Note: we always request a commit of the entire inode */
1239 data
->args
.offset
= 0;
1240 data
->args
.count
= 0;
1241 data
->res
.count
= 0;
1242 data
->res
.fattr
= &data
->fattr
;
1243 data
->res
.verf
= &data
->verf
;
1244 nfs_fattr_init(&data
->fattr
);
1246 /* Set up the initial task struct. */
1247 flags
= (how
& FLUSH_SYNC
) ? 0 : RPC_TASK_ASYNC
;
1248 rpc_init_task(&data
->task
, NFS_CLIENT(inode
), flags
, &nfs_commit_ops
, data
);
1249 NFS_PROTO(inode
)->commit_setup(data
, how
);
1251 data
->task
.tk_priority
= flush_task_priority(how
);
1252 data
->task
.tk_cookie
= (unsigned long)inode
;
1254 dprintk("NFS: %4d initiated commit call\n", data
->task
.tk_pid
);
1258 * Commit dirty pages
1261 nfs_commit_list(struct inode
*inode
, struct list_head
*head
, int how
)
1263 struct nfs_write_data
*data
;
1264 struct nfs_page
*req
;
1266 data
= nfs_commit_alloc();
1271 /* Set up the argument struct */
1272 nfs_commit_rpcsetup(head
, data
, how
);
1274 nfs_execute_write(data
);
1277 while (!list_empty(head
)) {
1278 req
= nfs_list_entry(head
->next
);
1279 nfs_list_remove_request(req
);
1280 nfs_mark_request_commit(req
);
1281 dec_zone_page_state(req
->wb_page
, NR_UNSTABLE_NFS
);
1282 nfs_clear_page_writeback(req
);
1288 * COMMIT call returned
1290 static void nfs_commit_done(struct rpc_task
*task
, void *calldata
)
1292 struct nfs_write_data
*data
= calldata
;
1293 struct nfs_page
*req
;
1295 dprintk("NFS: %4d nfs_commit_done (status %d)\n",
1296 task
->tk_pid
, task
->tk_status
);
1298 /* Call the NFS version-specific code */
1299 if (NFS_PROTO(data
->inode
)->commit_done(task
, data
) != 0)
1302 while (!list_empty(&data
->pages
)) {
1303 req
= nfs_list_entry(data
->pages
.next
);
1304 nfs_list_remove_request(req
);
1305 dec_zone_page_state(req
->wb_page
, NR_UNSTABLE_NFS
);
1307 dprintk("NFS: commit (%s/%Ld %d@%Ld)",
1308 req
->wb_context
->dentry
->d_inode
->i_sb
->s_id
,
1309 (long long)NFS_FILEID(req
->wb_context
->dentry
->d_inode
),
1311 (long long)req_offset(req
));
1312 if (task
->tk_status
< 0) {
1313 req
->wb_context
->error
= task
->tk_status
;
1314 nfs_inode_remove_request(req
);
1315 dprintk(", error = %d\n", task
->tk_status
);
1319 /* Okay, COMMIT succeeded, apparently. Check the verifier
1320 * returned by the server against all stored verfs. */
1321 if (!memcmp(req
->wb_verf
.verifier
, data
->verf
.verifier
, sizeof(data
->verf
.verifier
))) {
1322 /* We have a match */
1323 nfs_inode_remove_request(req
);
1327 /* We have a mismatch. Write the page again */
1328 dprintk(" mismatch\n");
1329 nfs_redirty_request(req
);
1331 nfs_clear_page_writeback(req
);
1335 static const struct rpc_call_ops nfs_commit_ops
= {
1336 .rpc_call_done
= nfs_commit_done
,
1337 .rpc_release
= nfs_commit_release
,
1340 static inline int nfs_commit_list(struct inode
*inode
, struct list_head
*head
, int how
)
1346 static long nfs_flush_mapping(struct address_space
*mapping
, struct writeback_control
*wbc
, int how
)
1348 struct nfs_inode
*nfsi
= NFS_I(mapping
->host
);
1352 spin_lock(&nfsi
->req_lock
);
1353 res
= nfs_scan_dirty(mapping
, wbc
, &head
);
1354 spin_unlock(&nfsi
->req_lock
);
1356 int error
= nfs_flush_list(mapping
->host
, &head
, res
, how
);
1363 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
1364 int nfs_commit_inode(struct inode
*inode
, int how
)
1366 struct nfs_inode
*nfsi
= NFS_I(inode
);
1370 spin_lock(&nfsi
->req_lock
);
1371 res
= nfs_scan_commit(inode
, &head
, 0, 0);
1372 spin_unlock(&nfsi
->req_lock
);
1374 int error
= nfs_commit_list(inode
, &head
, how
);
1382 long nfs_sync_mapping_wait(struct address_space
*mapping
, struct writeback_control
*wbc
, int how
)
1384 struct inode
*inode
= mapping
->host
;
1385 struct nfs_inode
*nfsi
= NFS_I(inode
);
1386 unsigned long idx_start
, idx_end
;
1387 unsigned int npages
= 0;
1389 int nocommit
= how
& FLUSH_NOCOMMIT
;
1393 if (wbc
->range_cyclic
)
1396 idx_start
= wbc
->range_start
>> PAGE_CACHE_SHIFT
;
1397 idx_end
= wbc
->range_end
>> PAGE_CACHE_SHIFT
;
1398 if (idx_end
> idx_start
) {
1399 unsigned long l_npages
= 1 + idx_end
- idx_start
;
1401 if (sizeof(npages
) != sizeof(l_npages
) &&
1402 (unsigned long)npages
!= l_npages
)
1406 how
&= ~FLUSH_NOCOMMIT
;
1407 spin_lock(&nfsi
->req_lock
);
1409 wbc
->pages_skipped
= 0;
1410 ret
= nfs_wait_on_requests_locked(inode
, idx_start
, npages
);
1413 pages
= nfs_scan_dirty(mapping
, wbc
, &head
);
1415 spin_unlock(&nfsi
->req_lock
);
1416 if (how
& FLUSH_INVALIDATE
) {
1417 nfs_cancel_dirty_list(&head
);
1420 ret
= nfs_flush_list(inode
, &head
, pages
, how
);
1421 spin_lock(&nfsi
->req_lock
);
1424 if (wbc
->pages_skipped
!= 0)
1428 pages
= nfs_scan_commit(inode
, &head
, idx_start
, npages
);
1430 if (wbc
->pages_skipped
!= 0)
1434 if (how
& FLUSH_INVALIDATE
) {
1435 spin_unlock(&nfsi
->req_lock
);
1436 nfs_cancel_commit_list(&head
);
1438 spin_lock(&nfsi
->req_lock
);
1441 pages
+= nfs_scan_commit(inode
, &head
, 0, 0);
1442 spin_unlock(&nfsi
->req_lock
);
1443 ret
= nfs_commit_list(inode
, &head
, how
);
1444 spin_lock(&nfsi
->req_lock
);
1446 spin_unlock(&nfsi
->req_lock
);
1451 * flush the inode to disk.
1453 int nfs_wb_all(struct inode
*inode
)
1455 struct address_space
*mapping
= inode
->i_mapping
;
1456 struct writeback_control wbc
= {
1457 .bdi
= mapping
->backing_dev_info
,
1458 .sync_mode
= WB_SYNC_ALL
,
1459 .nr_to_write
= LONG_MAX
,
1460 .for_writepages
= 1,
1465 ret
= generic_writepages(mapping
, &wbc
);
1468 ret
= nfs_sync_mapping_wait(mapping
, &wbc
, 0);
1472 __mark_inode_dirty(mapping
->host
, I_DIRTY_PAGES
);
1476 int nfs_sync_mapping_range(struct address_space
*mapping
, loff_t range_start
, loff_t range_end
, int how
)
1478 struct writeback_control wbc
= {
1479 .bdi
= mapping
->backing_dev_info
,
1480 .sync_mode
= WB_SYNC_ALL
,
1481 .nr_to_write
= LONG_MAX
,
1482 .range_start
= range_start
,
1483 .range_end
= range_end
,
1484 .for_writepages
= 1,
1488 if (!(how
& FLUSH_NOWRITEPAGE
)) {
1489 ret
= generic_writepages(mapping
, &wbc
);
1493 ret
= nfs_sync_mapping_wait(mapping
, &wbc
, how
);
1497 __mark_inode_dirty(mapping
->host
, I_DIRTY_PAGES
);
1501 int nfs_wb_page_priority(struct inode
*inode
, struct page
*page
, int how
)
1503 loff_t range_start
= page_offset(page
);
1504 loff_t range_end
= range_start
+ (loff_t
)(PAGE_CACHE_SIZE
- 1);
1505 struct writeback_control wbc
= {
1506 .bdi
= page
->mapping
->backing_dev_info
,
1507 .sync_mode
= WB_SYNC_ALL
,
1508 .nr_to_write
= LONG_MAX
,
1509 .range_start
= range_start
,
1510 .range_end
= range_end
,
1514 BUG_ON(!PageLocked(page
));
1515 if (!(how
& FLUSH_NOWRITEPAGE
) && clear_page_dirty_for_io(page
)) {
1516 ret
= nfs_writepage_locked(page
, &wbc
);
1520 ret
= nfs_sync_mapping_wait(page
->mapping
, &wbc
, how
);
1524 __mark_inode_dirty(inode
, I_DIRTY_PAGES
);
1529 * Write back all requests on one page - we do this before reading it.
1531 int nfs_wb_page(struct inode
*inode
, struct page
* page
)
1533 return nfs_wb_page_priority(inode
, page
, FLUSH_STABLE
);
1536 int nfs_set_page_dirty(struct page
*page
)
1538 struct nfs_page
*req
;
1540 req
= nfs_page_find_request(page
);
1542 /* Mark any existing write requests for flushing */
1543 set_bit(PG_NEED_FLUSH
, &req
->wb_flags
);
1544 nfs_release_request(req
);
1546 return __set_page_dirty_nobuffers(page
);
1550 int __init
nfs_init_writepagecache(void)
1552 nfs_wdata_cachep
= kmem_cache_create("nfs_write_data",
1553 sizeof(struct nfs_write_data
),
1554 0, SLAB_HWCACHE_ALIGN
,
1556 if (nfs_wdata_cachep
== NULL
)
1559 nfs_wdata_mempool
= mempool_create_slab_pool(MIN_POOL_WRITE
,
1561 if (nfs_wdata_mempool
== NULL
)
1564 nfs_commit_mempool
= mempool_create_slab_pool(MIN_POOL_COMMIT
,
1566 if (nfs_commit_mempool
== NULL
)
1572 void nfs_destroy_writepagecache(void)
1574 mempool_destroy(nfs_commit_mempool
);
1575 mempool_destroy(nfs_wdata_mempool
);
1576 kmem_cache_destroy(nfs_wdata_cachep
);