4 * Write file data over NFS.
6 * Copyright (C) 1996, 1997, Olaf Kirch <okir@monad.swb.de>
9 #include <linux/types.h>
10 #include <linux/slab.h>
12 #include <linux/pagemap.h>
13 #include <linux/file.h>
14 #include <linux/writeback.h>
15 #include <linux/swap.h>
17 #include <linux/sunrpc/clnt.h>
18 #include <linux/nfs_fs.h>
19 #include <linux/nfs_mount.h>
20 #include <linux/nfs_page.h>
21 #include <linux/backing-dev.h>
23 #include <asm/uaccess.h>
24 #include <linux/smp_lock.h>
26 #include "delegation.h"
30 #define NFSDBG_FACILITY NFSDBG_PAGECACHE
32 #define MIN_POOL_WRITE (32)
33 #define MIN_POOL_COMMIT (4)
36 * Local function declarations
38 static struct nfs_page
* nfs_update_request(struct nfs_open_context
*,
40 unsigned int, unsigned int);
41 static void nfs_mark_request_dirty(struct nfs_page
*req
);
42 static long nfs_flush_mapping(struct address_space
*mapping
, struct writeback_control
*wbc
, int how
);
43 static const struct rpc_call_ops nfs_write_partial_ops
;
44 static const struct rpc_call_ops nfs_write_full_ops
;
45 static const struct rpc_call_ops nfs_commit_ops
;
47 static struct kmem_cache
*nfs_wdata_cachep
;
48 static mempool_t
*nfs_wdata_mempool
;
49 static mempool_t
*nfs_commit_mempool
;
51 struct nfs_write_data
*nfs_commit_alloc(void)
53 struct nfs_write_data
*p
= mempool_alloc(nfs_commit_mempool
, GFP_NOFS
);
56 memset(p
, 0, sizeof(*p
));
57 INIT_LIST_HEAD(&p
->pages
);
62 void nfs_commit_rcu_free(struct rcu_head
*head
)
64 struct nfs_write_data
*p
= container_of(head
, struct nfs_write_data
, task
.u
.tk_rcu
);
65 if (p
&& (p
->pagevec
!= &p
->page_array
[0]))
67 mempool_free(p
, nfs_commit_mempool
);
70 void nfs_commit_free(struct nfs_write_data
*wdata
)
72 call_rcu_bh(&wdata
->task
.u
.tk_rcu
, nfs_commit_rcu_free
);
75 struct nfs_write_data
*nfs_writedata_alloc(size_t len
)
77 unsigned int pagecount
= (len
+ PAGE_SIZE
- 1) >> PAGE_SHIFT
;
78 struct nfs_write_data
*p
= mempool_alloc(nfs_wdata_mempool
, GFP_NOFS
);
81 memset(p
, 0, sizeof(*p
));
82 INIT_LIST_HEAD(&p
->pages
);
83 p
->npages
= pagecount
;
84 if (pagecount
<= ARRAY_SIZE(p
->page_array
))
85 p
->pagevec
= p
->page_array
;
87 p
->pagevec
= kcalloc(pagecount
, sizeof(struct page
*), GFP_NOFS
);
89 mempool_free(p
, nfs_wdata_mempool
);
97 static void nfs_writedata_rcu_free(struct rcu_head
*head
)
99 struct nfs_write_data
*p
= container_of(head
, struct nfs_write_data
, task
.u
.tk_rcu
);
100 if (p
&& (p
->pagevec
!= &p
->page_array
[0]))
102 mempool_free(p
, nfs_wdata_mempool
);
105 static void nfs_writedata_free(struct nfs_write_data
*wdata
)
107 call_rcu_bh(&wdata
->task
.u
.tk_rcu
, nfs_writedata_rcu_free
);
110 void nfs_writedata_release(void *wdata
)
112 nfs_writedata_free(wdata
);
115 static struct nfs_page
*nfs_page_find_request_locked(struct page
*page
)
117 struct nfs_page
*req
= NULL
;
119 if (PagePrivate(page
)) {
120 req
= (struct nfs_page
*)page_private(page
);
122 atomic_inc(&req
->wb_count
);
127 static struct nfs_page
*nfs_page_find_request(struct page
*page
)
129 struct nfs_page
*req
= NULL
;
130 spinlock_t
*req_lock
= &NFS_I(page
->mapping
->host
)->req_lock
;
133 req
= nfs_page_find_request_locked(page
);
134 spin_unlock(req_lock
);
138 /* Adjust the file length if we're writing beyond the end */
139 static void nfs_grow_file(struct page
*page
, unsigned int offset
, unsigned int count
)
141 struct inode
*inode
= page
->mapping
->host
;
142 loff_t end
, i_size
= i_size_read(inode
);
143 unsigned long end_index
= (i_size
- 1) >> PAGE_CACHE_SHIFT
;
145 if (i_size
> 0 && page
->index
< end_index
)
147 end
= ((loff_t
)page
->index
<< PAGE_CACHE_SHIFT
) + ((loff_t
)offset
+count
);
150 nfs_inc_stats(inode
, NFSIOS_EXTENDWRITE
);
151 i_size_write(inode
, end
);
154 /* A writeback failed: mark the page as bad, and invalidate the page cache */
155 static void nfs_set_pageerror(struct page
*page
)
158 nfs_zap_mapping(page
->mapping
->host
, page
->mapping
);
161 /* We can set the PG_uptodate flag if we see that a write request
162 * covers the full page.
164 static void nfs_mark_uptodate(struct page
*page
, unsigned int base
, unsigned int count
)
166 if (PageUptodate(page
))
170 if (count
!= nfs_page_length(page
))
172 if (count
!= PAGE_CACHE_SIZE
)
173 memclear_highpage_flush(page
, count
, PAGE_CACHE_SIZE
- count
);
174 SetPageUptodate(page
);
177 static int nfs_writepage_setup(struct nfs_open_context
*ctx
, struct page
*page
,
178 unsigned int offset
, unsigned int count
)
180 struct nfs_page
*req
;
184 req
= nfs_update_request(ctx
, page
, offset
, count
);
190 ret
= nfs_wb_page(page
->mapping
->host
, page
);
194 /* Update file length */
195 nfs_grow_file(page
, offset
, count
);
196 /* Set the PG_uptodate flag? */
197 nfs_mark_uptodate(page
, offset
, count
);
198 nfs_unlock_request(req
);
202 static int wb_priority(struct writeback_control
*wbc
)
204 if (wbc
->for_reclaim
)
205 return FLUSH_HIGHPRI
;
206 if (wbc
->for_kupdate
)
212 * NFS congestion control
215 int nfs_congestion_kb
;
217 #define NFS_CONGESTION_ON_THRESH (nfs_congestion_kb >> (PAGE_SHIFT-10))
218 #define NFS_CONGESTION_OFF_THRESH \
219 (NFS_CONGESTION_ON_THRESH - (NFS_CONGESTION_ON_THRESH >> 2))
221 static void nfs_set_page_writeback(struct page
*page
)
223 if (!test_set_page_writeback(page
)) {
224 struct inode
*inode
= page
->mapping
->host
;
225 struct nfs_server
*nfss
= NFS_SERVER(inode
);
227 if (atomic_inc_return(&nfss
->writeback
) >
228 NFS_CONGESTION_ON_THRESH
)
229 set_bdi_congested(&nfss
->backing_dev_info
, WRITE
);
233 static void nfs_end_page_writeback(struct page
*page
)
235 struct inode
*inode
= page
->mapping
->host
;
236 struct nfs_server
*nfss
= NFS_SERVER(inode
);
238 end_page_writeback(page
);
239 if (atomic_dec_return(&nfss
->writeback
) < NFS_CONGESTION_OFF_THRESH
) {
240 clear_bdi_congested(&nfss
->backing_dev_info
, WRITE
);
241 congestion_end(WRITE
);
246 * Find an associated nfs write request, and prepare to flush it out
247 * Returns 1 if there was no write request, or if the request was
248 * already tagged by nfs_set_page_dirty.Returns 0 if the request
250 * May also return an error if the user signalled nfs_wait_on_request().
252 static int nfs_page_mark_flush(struct page
*page
)
254 struct nfs_page
*req
;
255 spinlock_t
*req_lock
= &NFS_I(page
->mapping
->host
)->req_lock
;
260 req
= nfs_page_find_request_locked(page
);
262 spin_unlock(req_lock
);
265 if (nfs_lock_request_dontget(req
))
267 /* Note: If we hold the page lock, as is the case in nfs_writepage,
268 * then the call to nfs_lock_request_dontget() will always
269 * succeed provided that someone hasn't already marked the
270 * request as dirty (in which case we don't care).
272 spin_unlock(req_lock
);
273 ret
= nfs_wait_on_request(req
);
274 nfs_release_request(req
);
279 spin_unlock(req_lock
);
280 if (test_and_set_bit(PG_FLUSHING
, &req
->wb_flags
) == 0) {
281 nfs_mark_request_dirty(req
);
282 nfs_set_page_writeback(page
);
284 ret
= test_bit(PG_NEED_FLUSH
, &req
->wb_flags
);
285 nfs_unlock_request(req
);
290 * Write an mmapped page to the server.
292 static int nfs_writepage_locked(struct page
*page
, struct writeback_control
*wbc
)
294 struct nfs_open_context
*ctx
;
295 struct inode
*inode
= page
->mapping
->host
;
299 nfs_inc_stats(inode
, NFSIOS_VFSWRITEPAGE
);
300 nfs_add_stats(inode
, NFSIOS_WRITEPAGES
, 1);
302 err
= nfs_page_mark_flush(page
);
306 offset
= nfs_page_length(page
);
310 ctx
= nfs_find_open_context(inode
, NULL
, FMODE_WRITE
);
315 err
= nfs_writepage_setup(ctx
, page
, 0, offset
);
316 put_nfs_open_context(ctx
);
319 err
= nfs_page_mark_flush(page
);
323 if (!wbc
->for_writepages
)
324 nfs_flush_mapping(page
->mapping
, wbc
, FLUSH_STABLE
|wb_priority(wbc
));
328 int nfs_writepage(struct page
*page
, struct writeback_control
*wbc
)
332 err
= nfs_writepage_locked(page
, wbc
);
337 int nfs_writepages(struct address_space
*mapping
, struct writeback_control
*wbc
)
339 struct inode
*inode
= mapping
->host
;
342 nfs_inc_stats(inode
, NFSIOS_VFSWRITEPAGES
);
344 err
= generic_writepages(mapping
, wbc
);
347 err
= nfs_flush_mapping(mapping
, wbc
, wb_priority(wbc
));
350 nfs_add_stats(inode
, NFSIOS_WRITEPAGES
, err
);
357 * Insert a write request into an inode
359 static int nfs_inode_add_request(struct inode
*inode
, struct nfs_page
*req
)
361 struct nfs_inode
*nfsi
= NFS_I(inode
);
364 error
= radix_tree_insert(&nfsi
->nfs_page_tree
, req
->wb_index
, req
);
365 BUG_ON(error
== -EEXIST
);
370 nfs_begin_data_update(inode
);
371 if (nfs_have_delegation(inode
, FMODE_WRITE
))
374 SetPagePrivate(req
->wb_page
);
375 set_page_private(req
->wb_page
, (unsigned long)req
);
377 atomic_inc(&req
->wb_count
);
382 * Remove a write request from an inode
384 static void nfs_inode_remove_request(struct nfs_page
*req
)
386 struct inode
*inode
= req
->wb_context
->dentry
->d_inode
;
387 struct nfs_inode
*nfsi
= NFS_I(inode
);
389 BUG_ON (!NFS_WBACK_BUSY(req
));
391 spin_lock(&nfsi
->req_lock
);
392 set_page_private(req
->wb_page
, 0);
393 ClearPagePrivate(req
->wb_page
);
394 radix_tree_delete(&nfsi
->nfs_page_tree
, req
->wb_index
);
397 spin_unlock(&nfsi
->req_lock
);
398 nfs_end_data_update(inode
);
401 spin_unlock(&nfsi
->req_lock
);
402 nfs_clear_request(req
);
403 nfs_release_request(req
);
407 * Add a request to the inode's dirty list.
410 nfs_mark_request_dirty(struct nfs_page
*req
)
412 struct inode
*inode
= req
->wb_context
->dentry
->d_inode
;
413 struct nfs_inode
*nfsi
= NFS_I(inode
);
415 spin_lock(&nfsi
->req_lock
);
416 radix_tree_tag_set(&nfsi
->nfs_page_tree
,
417 req
->wb_index
, NFS_PAGE_TAG_DIRTY
);
418 nfs_list_add_request(req
, &nfsi
->dirty
);
420 spin_unlock(&nfsi
->req_lock
);
421 __mark_inode_dirty(inode
, I_DIRTY_PAGES
);
425 nfs_redirty_request(struct nfs_page
*req
)
427 clear_bit(PG_FLUSHING
, &req
->wb_flags
);
428 __set_page_dirty_nobuffers(req
->wb_page
);
432 * Check if a request is dirty
435 nfs_dirty_request(struct nfs_page
*req
)
437 return test_bit(PG_FLUSHING
, &req
->wb_flags
) == 0;
440 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
442 * Add a request to the inode's commit list.
445 nfs_mark_request_commit(struct nfs_page
*req
)
447 struct inode
*inode
= req
->wb_context
->dentry
->d_inode
;
448 struct nfs_inode
*nfsi
= NFS_I(inode
);
450 spin_lock(&nfsi
->req_lock
);
451 nfs_list_add_request(req
, &nfsi
->commit
);
453 spin_unlock(&nfsi
->req_lock
);
454 inc_zone_page_state(req
->wb_page
, NR_UNSTABLE_NFS
);
455 __mark_inode_dirty(inode
, I_DIRTY_DATASYNC
);
460 * Wait for a request to complete.
462 * Interruptible by signals only if mounted with intr flag.
464 static int nfs_wait_on_requests_locked(struct inode
*inode
, unsigned long idx_start
, unsigned int npages
)
466 struct nfs_inode
*nfsi
= NFS_I(inode
);
467 struct nfs_page
*req
;
468 unsigned long idx_end
, next
;
469 unsigned int res
= 0;
475 idx_end
= idx_start
+ npages
- 1;
478 while (radix_tree_gang_lookup_tag(&nfsi
->nfs_page_tree
, (void **)&req
, next
, 1, NFS_PAGE_TAG_WRITEBACK
)) {
479 if (req
->wb_index
> idx_end
)
482 next
= req
->wb_index
+ 1;
483 BUG_ON(!NFS_WBACK_BUSY(req
));
485 atomic_inc(&req
->wb_count
);
486 spin_unlock(&nfsi
->req_lock
);
487 error
= nfs_wait_on_request(req
);
488 nfs_release_request(req
);
489 spin_lock(&nfsi
->req_lock
);
497 static void nfs_cancel_dirty_list(struct list_head
*head
)
499 struct nfs_page
*req
;
500 while(!list_empty(head
)) {
501 req
= nfs_list_entry(head
->next
);
502 nfs_list_remove_request(req
);
503 nfs_inode_remove_request(req
);
504 nfs_clear_page_writeback(req
);
508 static void nfs_cancel_commit_list(struct list_head
*head
)
510 struct nfs_page
*req
;
512 while(!list_empty(head
)) {
513 req
= nfs_list_entry(head
->next
);
514 dec_zone_page_state(req
->wb_page
, NR_UNSTABLE_NFS
);
515 nfs_list_remove_request(req
);
516 nfs_inode_remove_request(req
);
517 nfs_unlock_request(req
);
521 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
523 * nfs_scan_commit - Scan an inode for commit requests
524 * @inode: NFS inode to scan
525 * @dst: destination list
526 * @idx_start: lower bound of page->index to scan.
527 * @npages: idx_start + npages sets the upper bound to scan.
529 * Moves requests from the inode's 'commit' request list.
530 * The requests are *not* checked to ensure that they form a contiguous set.
533 nfs_scan_commit(struct inode
*inode
, struct list_head
*dst
, unsigned long idx_start
, unsigned int npages
)
535 struct nfs_inode
*nfsi
= NFS_I(inode
);
538 if (nfsi
->ncommit
!= 0) {
539 res
= nfs_scan_list(nfsi
, &nfsi
->commit
, dst
, idx_start
, npages
);
540 nfsi
->ncommit
-= res
;
541 if ((nfsi
->ncommit
== 0) != list_empty(&nfsi
->commit
))
542 printk(KERN_ERR
"NFS: desynchronized value of nfs_i.ncommit.\n");
547 static inline int nfs_scan_commit(struct inode
*inode
, struct list_head
*dst
, unsigned long idx_start
, unsigned int npages
)
553 static int nfs_wait_on_write_congestion(struct address_space
*mapping
)
555 struct inode
*inode
= mapping
->host
;
556 struct backing_dev_info
*bdi
= mapping
->backing_dev_info
;
561 if (!bdi_write_congested(bdi
))
564 nfs_inc_stats(inode
, NFSIOS_CONGESTIONWAIT
);
567 struct rpc_clnt
*clnt
= NFS_CLIENT(inode
);
570 rpc_clnt_sigmask(clnt
, &oldset
);
571 ret
= congestion_wait_interruptible(WRITE
, HZ
/10);
572 rpc_clnt_sigunmask(clnt
, &oldset
);
573 if (ret
== -ERESTARTSYS
)
576 } while (bdi_write_congested(bdi
));
582 * Try to update any existing write request, or create one if there is none.
583 * In order to match, the request's credentials must match those of
584 * the calling process.
586 * Note: Should always be called with the Page Lock held!
588 static struct nfs_page
* nfs_update_request(struct nfs_open_context
* ctx
,
589 struct page
*page
, unsigned int offset
, unsigned int bytes
)
591 struct address_space
*mapping
= page
->mapping
;
592 struct inode
*inode
= mapping
->host
;
593 struct nfs_inode
*nfsi
= NFS_I(inode
);
594 struct nfs_page
*req
, *new = NULL
;
595 unsigned long rqend
, end
;
597 end
= offset
+ bytes
;
599 if (nfs_wait_on_write_congestion(mapping
))
600 return ERR_PTR(-ERESTARTSYS
);
602 /* Loop over all inode entries and see if we find
603 * A request for the page we wish to update
605 spin_lock(&nfsi
->req_lock
);
606 req
= nfs_page_find_request_locked(page
);
608 if (!nfs_lock_request_dontget(req
)) {
611 spin_unlock(&nfsi
->req_lock
);
612 error
= nfs_wait_on_request(req
);
613 nfs_release_request(req
);
616 nfs_release_request(new);
617 return ERR_PTR(error
);
621 spin_unlock(&nfsi
->req_lock
);
623 nfs_release_request(new);
629 nfs_lock_request_dontget(new);
630 error
= nfs_inode_add_request(inode
, new);
632 spin_unlock(&nfsi
->req_lock
);
633 nfs_unlock_request(new);
634 return ERR_PTR(error
);
636 spin_unlock(&nfsi
->req_lock
);
639 spin_unlock(&nfsi
->req_lock
);
641 new = nfs_create_request(ctx
, inode
, page
, offset
, bytes
);
646 /* We have a request for our page.
647 * If the creds don't match, or the
648 * page addresses don't match,
649 * tell the caller to wait on the conflicting
652 rqend
= req
->wb_offset
+ req
->wb_bytes
;
653 if (req
->wb_context
!= ctx
654 || req
->wb_page
!= page
655 || !nfs_dirty_request(req
)
656 || offset
> rqend
|| end
< req
->wb_offset
) {
657 nfs_unlock_request(req
);
658 return ERR_PTR(-EBUSY
);
661 /* Okay, the request matches. Update the region */
662 if (offset
< req
->wb_offset
) {
663 req
->wb_offset
= offset
;
664 req
->wb_pgbase
= offset
;
665 req
->wb_bytes
= rqend
- req
->wb_offset
;
669 req
->wb_bytes
= end
- req
->wb_offset
;
674 int nfs_flush_incompatible(struct file
*file
, struct page
*page
)
676 struct nfs_open_context
*ctx
= (struct nfs_open_context
*)file
->private_data
;
677 struct nfs_page
*req
;
678 int do_flush
, status
;
680 * Look for a request corresponding to this page. If there
681 * is one, and it belongs to another file, we flush it out
682 * before we try to copy anything into the page. Do this
683 * due to the lack of an ACCESS-type call in NFSv2.
684 * Also do the same if we find a request from an existing
688 req
= nfs_page_find_request(page
);
691 do_flush
= req
->wb_page
!= page
|| req
->wb_context
!= ctx
692 || !nfs_dirty_request(req
);
693 nfs_release_request(req
);
696 status
= nfs_wb_page(page
->mapping
->host
, page
);
697 } while (status
== 0);
702 * Update and possibly write a cached page of an NFS file.
704 * XXX: Keep an eye on generic_file_read to make sure it doesn't do bad
705 * things with a page scheduled for an RPC call (e.g. invalidate it).
707 int nfs_updatepage(struct file
*file
, struct page
*page
,
708 unsigned int offset
, unsigned int count
)
710 struct nfs_open_context
*ctx
= (struct nfs_open_context
*)file
->private_data
;
711 struct inode
*inode
= page
->mapping
->host
;
714 nfs_inc_stats(inode
, NFSIOS_VFSUPDATEPAGE
);
716 dprintk("NFS: nfs_updatepage(%s/%s %d@%Ld)\n",
717 file
->f_path
.dentry
->d_parent
->d_name
.name
,
718 file
->f_path
.dentry
->d_name
.name
, count
,
719 (long long)(page_offset(page
) +offset
));
721 /* If we're not using byte range locks, and we know the page
722 * is entirely in cache, it may be more efficient to avoid
723 * fragmenting write requests.
725 if (PageUptodate(page
) && inode
->i_flock
== NULL
&& !(file
->f_mode
& O_SYNC
)) {
726 count
= max(count
+ offset
, nfs_page_length(page
));
730 status
= nfs_writepage_setup(ctx
, page
, offset
, count
);
731 __set_page_dirty_nobuffers(page
);
733 dprintk("NFS: nfs_updatepage returns %d (isize %Ld)\n",
734 status
, (long long)i_size_read(inode
));
736 nfs_set_pageerror(page
);
740 static void nfs_writepage_release(struct nfs_page
*req
)
742 nfs_end_page_writeback(req
->wb_page
);
744 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
745 if (!PageError(req
->wb_page
)) {
746 if (NFS_NEED_RESCHED(req
)) {
747 nfs_redirty_request(req
);
749 } else if (NFS_NEED_COMMIT(req
)) {
750 nfs_mark_request_commit(req
);
754 nfs_inode_remove_request(req
);
757 nfs_clear_commit(req
);
758 nfs_clear_reschedule(req
);
760 nfs_inode_remove_request(req
);
762 nfs_clear_page_writeback(req
);
765 static inline int flush_task_priority(int how
)
767 switch (how
& (FLUSH_HIGHPRI
|FLUSH_LOWPRI
)) {
769 return RPC_PRIORITY_HIGH
;
771 return RPC_PRIORITY_LOW
;
773 return RPC_PRIORITY_NORMAL
;
777 * Set up the argument/result storage required for the RPC call.
779 static void nfs_write_rpcsetup(struct nfs_page
*req
,
780 struct nfs_write_data
*data
,
781 const struct rpc_call_ops
*call_ops
,
782 unsigned int count
, unsigned int offset
,
788 /* Set up the RPC argument and reply structs
789 * NB: take care not to mess about with data->commit et al. */
792 data
->inode
= inode
= req
->wb_context
->dentry
->d_inode
;
793 data
->cred
= req
->wb_context
->cred
;
795 data
->args
.fh
= NFS_FH(inode
);
796 data
->args
.offset
= req_offset(req
) + offset
;
797 data
->args
.pgbase
= req
->wb_pgbase
+ offset
;
798 data
->args
.pages
= data
->pagevec
;
799 data
->args
.count
= count
;
800 data
->args
.context
= req
->wb_context
;
802 data
->res
.fattr
= &data
->fattr
;
803 data
->res
.count
= count
;
804 data
->res
.verf
= &data
->verf
;
805 nfs_fattr_init(&data
->fattr
);
807 /* Set up the initial task struct. */
808 flags
= (how
& FLUSH_SYNC
) ? 0 : RPC_TASK_ASYNC
;
809 rpc_init_task(&data
->task
, NFS_CLIENT(inode
), flags
, call_ops
, data
);
810 NFS_PROTO(inode
)->write_setup(data
, how
);
812 data
->task
.tk_priority
= flush_task_priority(how
);
813 data
->task
.tk_cookie
= (unsigned long)inode
;
815 dprintk("NFS: %5u initiated write call "
816 "(req %s/%Ld, %u bytes @ offset %Lu)\n",
819 (long long)NFS_FILEID(inode
),
821 (unsigned long long)data
->args
.offset
);
824 static void nfs_execute_write(struct nfs_write_data
*data
)
826 struct rpc_clnt
*clnt
= NFS_CLIENT(data
->inode
);
829 rpc_clnt_sigmask(clnt
, &oldset
);
830 rpc_execute(&data
->task
);
831 rpc_clnt_sigunmask(clnt
, &oldset
);
835 * Generate multiple small requests to write out a single
836 * contiguous dirty area on one page.
838 static int nfs_flush_multi(struct inode
*inode
, struct list_head
*head
, int how
)
840 struct nfs_page
*req
= nfs_list_entry(head
->next
);
841 struct page
*page
= req
->wb_page
;
842 struct nfs_write_data
*data
;
843 size_t wsize
= NFS_SERVER(inode
)->wsize
, nbytes
;
848 nfs_list_remove_request(req
);
850 nbytes
= req
->wb_bytes
;
852 size_t len
= min(nbytes
, wsize
);
854 data
= nfs_writedata_alloc(len
);
857 list_add(&data
->pages
, &list
);
860 } while (nbytes
!= 0);
861 atomic_set(&req
->wb_complete
, requests
);
863 ClearPageError(page
);
865 nbytes
= req
->wb_bytes
;
867 data
= list_entry(list
.next
, struct nfs_write_data
, pages
);
868 list_del_init(&data
->pages
);
870 data
->pagevec
[0] = page
;
872 if (nbytes
> wsize
) {
873 nfs_write_rpcsetup(req
, data
, &nfs_write_partial_ops
,
878 nfs_write_rpcsetup(req
, data
, &nfs_write_partial_ops
,
879 nbytes
, offset
, how
);
882 nfs_execute_write(data
);
883 } while (nbytes
!= 0);
888 while (!list_empty(&list
)) {
889 data
= list_entry(list
.next
, struct nfs_write_data
, pages
);
890 list_del(&data
->pages
);
891 nfs_writedata_release(data
);
893 nfs_redirty_request(req
);
894 nfs_clear_page_writeback(req
);
899 * Create an RPC task for the given write request and kick it.
900 * The page must have been locked by the caller.
902 * It may happen that the page we're passed is not marked dirty.
903 * This is the case if nfs_updatepage detects a conflicting request
904 * that has been written but not committed.
906 static int nfs_flush_one(struct inode
*inode
, struct list_head
*head
, int how
)
908 struct nfs_page
*req
;
910 struct nfs_write_data
*data
;
913 data
= nfs_writedata_alloc(NFS_SERVER(inode
)->wsize
);
917 pages
= data
->pagevec
;
919 while (!list_empty(head
)) {
920 req
= nfs_list_entry(head
->next
);
921 nfs_list_remove_request(req
);
922 nfs_list_add_request(req
, &data
->pages
);
923 ClearPageError(req
->wb_page
);
924 *pages
++ = req
->wb_page
;
925 count
+= req
->wb_bytes
;
927 req
= nfs_list_entry(data
->pages
.next
);
929 /* Set up the argument struct */
930 nfs_write_rpcsetup(req
, data
, &nfs_write_full_ops
, count
, 0, how
);
932 nfs_execute_write(data
);
935 while (!list_empty(head
)) {
936 struct nfs_page
*req
= nfs_list_entry(head
->next
);
937 nfs_list_remove_request(req
);
938 nfs_redirty_request(req
);
939 nfs_clear_page_writeback(req
);
944 static int nfs_flush_list(struct inode
*inode
, struct list_head
*head
, int npages
, int how
)
946 LIST_HEAD(one_request
);
947 int (*flush_one
)(struct inode
*, struct list_head
*, int);
948 struct nfs_page
*req
;
949 int wpages
= NFS_SERVER(inode
)->wpages
;
950 int wsize
= NFS_SERVER(inode
)->wsize
;
953 flush_one
= nfs_flush_one
;
954 if (wsize
< PAGE_CACHE_SIZE
)
955 flush_one
= nfs_flush_multi
;
956 /* For single writes, FLUSH_STABLE is more efficient */
957 if (npages
<= wpages
&& npages
== NFS_I(inode
)->npages
958 && nfs_list_entry(head
->next
)->wb_bytes
<= wsize
)
962 nfs_coalesce_requests(head
, &one_request
, wpages
);
963 req
= nfs_list_entry(one_request
.next
);
964 error
= flush_one(inode
, &one_request
, how
);
967 } while (!list_empty(head
));
970 while (!list_empty(head
)) {
971 req
= nfs_list_entry(head
->next
);
972 nfs_list_remove_request(req
);
973 nfs_redirty_request(req
);
974 nfs_clear_page_writeback(req
);
980 * Handle a write reply that flushed part of a page.
982 static void nfs_writeback_done_partial(struct rpc_task
*task
, void *calldata
)
984 struct nfs_write_data
*data
= calldata
;
985 struct nfs_page
*req
= data
->req
;
986 struct page
*page
= req
->wb_page
;
988 dprintk("NFS: write (%s/%Ld %d@%Ld)",
989 req
->wb_context
->dentry
->d_inode
->i_sb
->s_id
,
990 (long long)NFS_FILEID(req
->wb_context
->dentry
->d_inode
),
992 (long long)req_offset(req
));
994 if (nfs_writeback_done(task
, data
) != 0)
997 if (task
->tk_status
< 0) {
998 nfs_set_pageerror(page
);
999 req
->wb_context
->error
= task
->tk_status
;
1000 dprintk(", error = %d\n", task
->tk_status
);
1002 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
1003 if (data
->verf
.committed
< NFS_FILE_SYNC
) {
1004 if (!NFS_NEED_COMMIT(req
)) {
1005 nfs_defer_commit(req
);
1006 memcpy(&req
->wb_verf
, &data
->verf
, sizeof(req
->wb_verf
));
1007 dprintk(" defer commit\n");
1008 } else if (memcmp(&req
->wb_verf
, &data
->verf
, sizeof(req
->wb_verf
))) {
1009 nfs_defer_reschedule(req
);
1010 dprintk(" server reboot detected\n");
1017 if (atomic_dec_and_test(&req
->wb_complete
))
1018 nfs_writepage_release(req
);
1021 static const struct rpc_call_ops nfs_write_partial_ops
= {
1022 .rpc_call_done
= nfs_writeback_done_partial
,
1023 .rpc_release
= nfs_writedata_release
,
1027 * Handle a write reply that flushes a whole page.
1029 * FIXME: There is an inherent race with invalidate_inode_pages and
1030 * writebacks since the page->count is kept > 1 for as long
1031 * as the page has a write request pending.
1033 static void nfs_writeback_done_full(struct rpc_task
*task
, void *calldata
)
1035 struct nfs_write_data
*data
= calldata
;
1036 struct nfs_page
*req
;
1039 if (nfs_writeback_done(task
, data
) != 0)
1042 /* Update attributes as result of writeback. */
1043 while (!list_empty(&data
->pages
)) {
1044 req
= nfs_list_entry(data
->pages
.next
);
1045 nfs_list_remove_request(req
);
1046 page
= req
->wb_page
;
1048 dprintk("NFS: write (%s/%Ld %d@%Ld)",
1049 req
->wb_context
->dentry
->d_inode
->i_sb
->s_id
,
1050 (long long)NFS_FILEID(req
->wb_context
->dentry
->d_inode
),
1052 (long long)req_offset(req
));
1054 if (task
->tk_status
< 0) {
1055 nfs_set_pageerror(page
);
1056 req
->wb_context
->error
= task
->tk_status
;
1057 nfs_end_page_writeback(page
);
1058 nfs_inode_remove_request(req
);
1059 dprintk(", error = %d\n", task
->tk_status
);
1062 nfs_end_page_writeback(page
);
1064 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
1065 if (data
->args
.stable
!= NFS_UNSTABLE
|| data
->verf
.committed
== NFS_FILE_SYNC
) {
1066 nfs_inode_remove_request(req
);
1070 memcpy(&req
->wb_verf
, &data
->verf
, sizeof(req
->wb_verf
));
1071 nfs_mark_request_commit(req
);
1072 dprintk(" marked for commit\n");
1074 nfs_inode_remove_request(req
);
1077 nfs_clear_page_writeback(req
);
1081 static const struct rpc_call_ops nfs_write_full_ops
= {
1082 .rpc_call_done
= nfs_writeback_done_full
,
1083 .rpc_release
= nfs_writedata_release
,
1088 * This function is called when the WRITE call is complete.
1090 int nfs_writeback_done(struct rpc_task
*task
, struct nfs_write_data
*data
)
1092 struct nfs_writeargs
*argp
= &data
->args
;
1093 struct nfs_writeres
*resp
= &data
->res
;
1096 dprintk("NFS: %5u nfs_writeback_done (status %d)\n",
1097 task
->tk_pid
, task
->tk_status
);
1100 * ->write_done will attempt to use post-op attributes to detect
1101 * conflicting writes by other clients. A strict interpretation
1102 * of close-to-open would allow us to continue caching even if
1103 * another writer had changed the file, but some applications
1104 * depend on tighter cache coherency when writing.
1106 status
= NFS_PROTO(data
->inode
)->write_done(task
, data
);
1109 nfs_add_stats(data
->inode
, NFSIOS_SERVERWRITTENBYTES
, resp
->count
);
1111 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
1112 if (resp
->verf
->committed
< argp
->stable
&& task
->tk_status
>= 0) {
1113 /* We tried a write call, but the server did not
1114 * commit data to stable storage even though we
1116 * Note: There is a known bug in Tru64 < 5.0 in which
1117 * the server reports NFS_DATA_SYNC, but performs
1118 * NFS_FILE_SYNC. We therefore implement this checking
1119 * as a dprintk() in order to avoid filling syslog.
1121 static unsigned long complain
;
1123 if (time_before(complain
, jiffies
)) {
1124 dprintk("NFS: faulty NFS server %s:"
1125 " (committed = %d) != (stable = %d)\n",
1126 NFS_SERVER(data
->inode
)->nfs_client
->cl_hostname
,
1127 resp
->verf
->committed
, argp
->stable
);
1128 complain
= jiffies
+ 300 * HZ
;
1132 /* Is this a short write? */
1133 if (task
->tk_status
>= 0 && resp
->count
< argp
->count
) {
1134 static unsigned long complain
;
1136 nfs_inc_stats(data
->inode
, NFSIOS_SHORTWRITE
);
1138 /* Has the server at least made some progress? */
1139 if (resp
->count
!= 0) {
1140 /* Was this an NFSv2 write or an NFSv3 stable write? */
1141 if (resp
->verf
->committed
!= NFS_UNSTABLE
) {
1142 /* Resend from where the server left off */
1143 argp
->offset
+= resp
->count
;
1144 argp
->pgbase
+= resp
->count
;
1145 argp
->count
-= resp
->count
;
1147 /* Resend as a stable write in order to avoid
1148 * headaches in the case of a server crash.
1150 argp
->stable
= NFS_FILE_SYNC
;
1152 rpc_restart_call(task
);
1155 if (time_before(complain
, jiffies
)) {
1157 "NFS: Server wrote zero bytes, expected %u.\n",
1159 complain
= jiffies
+ 300 * HZ
;
1161 /* Can't do anything about it except throw an error. */
1162 task
->tk_status
= -EIO
;
1168 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
1169 void nfs_commit_release(void *wdata
)
1171 nfs_commit_free(wdata
);
1175 * Set up the argument/result storage required for the RPC call.
1177 static void nfs_commit_rpcsetup(struct list_head
*head
,
1178 struct nfs_write_data
*data
,
1181 struct nfs_page
*first
;
1182 struct inode
*inode
;
1185 /* Set up the RPC argument and reply structs
1186 * NB: take care not to mess about with data->commit et al. */
1188 list_splice_init(head
, &data
->pages
);
1189 first
= nfs_list_entry(data
->pages
.next
);
1190 inode
= first
->wb_context
->dentry
->d_inode
;
1192 data
->inode
= inode
;
1193 data
->cred
= first
->wb_context
->cred
;
1195 data
->args
.fh
= NFS_FH(data
->inode
);
1196 /* Note: we always request a commit of the entire inode */
1197 data
->args
.offset
= 0;
1198 data
->args
.count
= 0;
1199 data
->res
.count
= 0;
1200 data
->res
.fattr
= &data
->fattr
;
1201 data
->res
.verf
= &data
->verf
;
1202 nfs_fattr_init(&data
->fattr
);
1204 /* Set up the initial task struct. */
1205 flags
= (how
& FLUSH_SYNC
) ? 0 : RPC_TASK_ASYNC
;
1206 rpc_init_task(&data
->task
, NFS_CLIENT(inode
), flags
, &nfs_commit_ops
, data
);
1207 NFS_PROTO(inode
)->commit_setup(data
, how
);
1209 data
->task
.tk_priority
= flush_task_priority(how
);
1210 data
->task
.tk_cookie
= (unsigned long)inode
;
1212 dprintk("NFS: %5u initiated commit call\n", data
->task
.tk_pid
);
1216 * Commit dirty pages
1219 nfs_commit_list(struct inode
*inode
, struct list_head
*head
, int how
)
1221 struct nfs_write_data
*data
;
1222 struct nfs_page
*req
;
1224 data
= nfs_commit_alloc();
1229 /* Set up the argument struct */
1230 nfs_commit_rpcsetup(head
, data
, how
);
1232 nfs_execute_write(data
);
1235 while (!list_empty(head
)) {
1236 req
= nfs_list_entry(head
->next
);
1237 nfs_list_remove_request(req
);
1238 nfs_mark_request_commit(req
);
1239 dec_zone_page_state(req
->wb_page
, NR_UNSTABLE_NFS
);
1240 nfs_clear_page_writeback(req
);
1246 * COMMIT call returned
1248 static void nfs_commit_done(struct rpc_task
*task
, void *calldata
)
1250 struct nfs_write_data
*data
= calldata
;
1251 struct nfs_page
*req
;
1253 dprintk("NFS: %5u nfs_commit_done (status %d)\n",
1254 task
->tk_pid
, task
->tk_status
);
1256 /* Call the NFS version-specific code */
1257 if (NFS_PROTO(data
->inode
)->commit_done(task
, data
) != 0)
1260 while (!list_empty(&data
->pages
)) {
1261 req
= nfs_list_entry(data
->pages
.next
);
1262 nfs_list_remove_request(req
);
1263 dec_zone_page_state(req
->wb_page
, NR_UNSTABLE_NFS
);
1265 dprintk("NFS: commit (%s/%Ld %d@%Ld)",
1266 req
->wb_context
->dentry
->d_inode
->i_sb
->s_id
,
1267 (long long)NFS_FILEID(req
->wb_context
->dentry
->d_inode
),
1269 (long long)req_offset(req
));
1270 if (task
->tk_status
< 0) {
1271 req
->wb_context
->error
= task
->tk_status
;
1272 nfs_inode_remove_request(req
);
1273 dprintk(", error = %d\n", task
->tk_status
);
1277 /* Okay, COMMIT succeeded, apparently. Check the verifier
1278 * returned by the server against all stored verfs. */
1279 if (!memcmp(req
->wb_verf
.verifier
, data
->verf
.verifier
, sizeof(data
->verf
.verifier
))) {
1280 /* We have a match */
1281 nfs_inode_remove_request(req
);
1285 /* We have a mismatch. Write the page again */
1286 dprintk(" mismatch\n");
1287 nfs_redirty_request(req
);
1289 nfs_clear_page_writeback(req
);
1293 static const struct rpc_call_ops nfs_commit_ops
= {
1294 .rpc_call_done
= nfs_commit_done
,
1295 .rpc_release
= nfs_commit_release
,
1298 static inline int nfs_commit_list(struct inode
*inode
, struct list_head
*head
, int how
)
1304 static long nfs_flush_mapping(struct address_space
*mapping
, struct writeback_control
*wbc
, int how
)
1306 struct nfs_inode
*nfsi
= NFS_I(mapping
->host
);
1310 spin_lock(&nfsi
->req_lock
);
1311 res
= nfs_scan_dirty(mapping
, wbc
, &head
);
1312 spin_unlock(&nfsi
->req_lock
);
1314 int error
= nfs_flush_list(mapping
->host
, &head
, res
, how
);
1321 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
1322 int nfs_commit_inode(struct inode
*inode
, int how
)
1324 struct nfs_inode
*nfsi
= NFS_I(inode
);
1328 spin_lock(&nfsi
->req_lock
);
1329 res
= nfs_scan_commit(inode
, &head
, 0, 0);
1330 spin_unlock(&nfsi
->req_lock
);
1332 int error
= nfs_commit_list(inode
, &head
, how
);
1340 long nfs_sync_mapping_wait(struct address_space
*mapping
, struct writeback_control
*wbc
, int how
)
1342 struct inode
*inode
= mapping
->host
;
1343 struct nfs_inode
*nfsi
= NFS_I(inode
);
1344 unsigned long idx_start
, idx_end
;
1345 unsigned int npages
= 0;
1347 int nocommit
= how
& FLUSH_NOCOMMIT
;
1351 if (wbc
->range_cyclic
)
1354 idx_start
= wbc
->range_start
>> PAGE_CACHE_SHIFT
;
1355 idx_end
= wbc
->range_end
>> PAGE_CACHE_SHIFT
;
1356 if (idx_end
> idx_start
) {
1357 unsigned long l_npages
= 1 + idx_end
- idx_start
;
1359 if (sizeof(npages
) != sizeof(l_npages
) &&
1360 (unsigned long)npages
!= l_npages
)
1364 how
&= ~FLUSH_NOCOMMIT
;
1365 spin_lock(&nfsi
->req_lock
);
1367 wbc
->pages_skipped
= 0;
1368 ret
= nfs_wait_on_requests_locked(inode
, idx_start
, npages
);
1371 pages
= nfs_scan_dirty(mapping
, wbc
, &head
);
1373 spin_unlock(&nfsi
->req_lock
);
1374 if (how
& FLUSH_INVALIDATE
) {
1375 nfs_cancel_dirty_list(&head
);
1378 ret
= nfs_flush_list(inode
, &head
, pages
, how
);
1379 spin_lock(&nfsi
->req_lock
);
1382 if (wbc
->pages_skipped
!= 0)
1386 pages
= nfs_scan_commit(inode
, &head
, idx_start
, npages
);
1388 if (wbc
->pages_skipped
!= 0)
1392 if (how
& FLUSH_INVALIDATE
) {
1393 spin_unlock(&nfsi
->req_lock
);
1394 nfs_cancel_commit_list(&head
);
1396 spin_lock(&nfsi
->req_lock
);
1399 pages
+= nfs_scan_commit(inode
, &head
, 0, 0);
1400 spin_unlock(&nfsi
->req_lock
);
1401 ret
= nfs_commit_list(inode
, &head
, how
);
1402 spin_lock(&nfsi
->req_lock
);
1404 spin_unlock(&nfsi
->req_lock
);
1409 * flush the inode to disk.
1411 int nfs_wb_all(struct inode
*inode
)
1413 struct address_space
*mapping
= inode
->i_mapping
;
1414 struct writeback_control wbc
= {
1415 .bdi
= mapping
->backing_dev_info
,
1416 .sync_mode
= WB_SYNC_ALL
,
1417 .nr_to_write
= LONG_MAX
,
1418 .for_writepages
= 1,
1423 ret
= generic_writepages(mapping
, &wbc
);
1426 ret
= nfs_sync_mapping_wait(mapping
, &wbc
, 0);
1430 __mark_inode_dirty(mapping
->host
, I_DIRTY_PAGES
);
1434 int nfs_sync_mapping_range(struct address_space
*mapping
, loff_t range_start
, loff_t range_end
, int how
)
1436 struct writeback_control wbc
= {
1437 .bdi
= mapping
->backing_dev_info
,
1438 .sync_mode
= WB_SYNC_ALL
,
1439 .nr_to_write
= LONG_MAX
,
1440 .range_start
= range_start
,
1441 .range_end
= range_end
,
1442 .for_writepages
= 1,
1446 if (!(how
& FLUSH_NOWRITEPAGE
)) {
1447 ret
= generic_writepages(mapping
, &wbc
);
1451 ret
= nfs_sync_mapping_wait(mapping
, &wbc
, how
);
1455 __mark_inode_dirty(mapping
->host
, I_DIRTY_PAGES
);
1459 int nfs_wb_page_priority(struct inode
*inode
, struct page
*page
, int how
)
1461 loff_t range_start
= page_offset(page
);
1462 loff_t range_end
= range_start
+ (loff_t
)(PAGE_CACHE_SIZE
- 1);
1463 struct writeback_control wbc
= {
1464 .bdi
= page
->mapping
->backing_dev_info
,
1465 .sync_mode
= WB_SYNC_ALL
,
1466 .nr_to_write
= LONG_MAX
,
1467 .range_start
= range_start
,
1468 .range_end
= range_end
,
1472 BUG_ON(!PageLocked(page
));
1473 if (!(how
& FLUSH_NOWRITEPAGE
) && clear_page_dirty_for_io(page
)) {
1474 ret
= nfs_writepage_locked(page
, &wbc
);
1478 if (!PagePrivate(page
))
1480 ret
= nfs_sync_mapping_wait(page
->mapping
, &wbc
, how
);
1484 __mark_inode_dirty(inode
, I_DIRTY_PAGES
);
1489 * Write back all requests on one page - we do this before reading it.
1491 int nfs_wb_page(struct inode
*inode
, struct page
* page
)
1493 return nfs_wb_page_priority(inode
, page
, FLUSH_STABLE
);
1496 int nfs_set_page_dirty(struct page
*page
)
1498 struct nfs_page
*req
;
1500 req
= nfs_page_find_request(page
);
1502 /* Mark any existing write requests for flushing */
1503 set_bit(PG_NEED_FLUSH
, &req
->wb_flags
);
1504 nfs_release_request(req
);
1506 return __set_page_dirty_nobuffers(page
);
1510 int __init
nfs_init_writepagecache(void)
1512 nfs_wdata_cachep
= kmem_cache_create("nfs_write_data",
1513 sizeof(struct nfs_write_data
),
1514 0, SLAB_HWCACHE_ALIGN
,
1516 if (nfs_wdata_cachep
== NULL
)
1519 nfs_wdata_mempool
= mempool_create_slab_pool(MIN_POOL_WRITE
,
1521 if (nfs_wdata_mempool
== NULL
)
1524 nfs_commit_mempool
= mempool_create_slab_pool(MIN_POOL_COMMIT
,
1526 if (nfs_commit_mempool
== NULL
)
1530 * NFS congestion size, scale with available memory.
1542 * This allows larger machines to have larger/more transfers.
1543 * Limit the default to 256M
1545 nfs_congestion_kb
= (16*int_sqrt(totalram_pages
)) << (PAGE_SHIFT
-10);
1546 if (nfs_congestion_kb
> 256*1024)
1547 nfs_congestion_kb
= 256*1024;
1552 void nfs_destroy_writepagecache(void)
1554 mempool_destroy(nfs_commit_mempool
);
1555 mempool_destroy(nfs_wdata_mempool
);
1556 kmem_cache_destroy(nfs_wdata_cachep
);