4 * Write file data over NFS.
6 * Copyright (C) 1996, 1997, Olaf Kirch <okir@monad.swb.de>
9 #include <linux/types.h>
10 #include <linux/slab.h>
12 #include <linux/pagemap.h>
13 #include <linux/file.h>
14 #include <linux/writeback.h>
15 #include <linux/swap.h>
16 #include <linux/migrate.h>
18 #include <linux/sunrpc/clnt.h>
19 #include <linux/nfs_fs.h>
20 #include <linux/nfs_mount.h>
21 #include <linux/nfs_page.h>
22 #include <linux/backing-dev.h>
24 #include <asm/uaccess.h>
26 #include "delegation.h"
33 #define NFSDBG_FACILITY NFSDBG_PAGECACHE
35 #define MIN_POOL_WRITE (32)
36 #define MIN_POOL_COMMIT (4)
39 * Local function declarations
41 static void nfs_pageio_init_write(struct nfs_pageio_descriptor
*desc
,
42 struct inode
*inode
, int ioflags
);
43 static void nfs_redirty_request(struct nfs_page
*req
);
44 static const struct rpc_call_ops nfs_write_partial_ops
;
45 static const struct rpc_call_ops nfs_write_full_ops
;
46 static const struct rpc_call_ops nfs_commit_ops
;
48 static struct kmem_cache
*nfs_wdata_cachep
;
49 static mempool_t
*nfs_wdata_mempool
;
50 static mempool_t
*nfs_commit_mempool
;
52 struct nfs_write_data
*nfs_commitdata_alloc(void)
54 struct nfs_write_data
*p
= mempool_alloc(nfs_commit_mempool
, GFP_NOFS
);
57 memset(p
, 0, sizeof(*p
));
58 INIT_LIST_HEAD(&p
->pages
);
63 void nfs_commit_free(struct nfs_write_data
*p
)
65 if (p
&& (p
->pagevec
!= &p
->page_array
[0]))
67 mempool_free(p
, nfs_commit_mempool
);
70 struct nfs_write_data
*nfs_writedata_alloc(unsigned int pagecount
)
72 struct nfs_write_data
*p
= mempool_alloc(nfs_wdata_mempool
, GFP_NOFS
);
75 memset(p
, 0, sizeof(*p
));
76 INIT_LIST_HEAD(&p
->pages
);
77 p
->npages
= pagecount
;
78 if (pagecount
<= ARRAY_SIZE(p
->page_array
))
79 p
->pagevec
= p
->page_array
;
81 p
->pagevec
= kcalloc(pagecount
, sizeof(struct page
*), GFP_NOFS
);
83 mempool_free(p
, nfs_wdata_mempool
);
91 void nfs_writedata_free(struct nfs_write_data
*p
)
93 if (p
&& (p
->pagevec
!= &p
->page_array
[0]))
95 mempool_free(p
, nfs_wdata_mempool
);
98 static void nfs_writedata_release(struct nfs_write_data
*wdata
)
100 put_lseg(wdata
->lseg
);
101 put_nfs_open_context(wdata
->args
.context
);
102 nfs_writedata_free(wdata
);
105 static void nfs_context_set_write_error(struct nfs_open_context
*ctx
, int error
)
109 set_bit(NFS_CONTEXT_ERROR_WRITE
, &ctx
->flags
);
112 static struct nfs_page
*nfs_page_find_request_locked(struct page
*page
)
114 struct nfs_page
*req
= NULL
;
116 if (PagePrivate(page
)) {
117 req
= (struct nfs_page
*)page_private(page
);
119 kref_get(&req
->wb_kref
);
124 static struct nfs_page
*nfs_page_find_request(struct page
*page
)
126 struct inode
*inode
= page
->mapping
->host
;
127 struct nfs_page
*req
= NULL
;
129 spin_lock(&inode
->i_lock
);
130 req
= nfs_page_find_request_locked(page
);
131 spin_unlock(&inode
->i_lock
);
135 /* Adjust the file length if we're writing beyond the end */
136 static void nfs_grow_file(struct page
*page
, unsigned int offset
, unsigned int count
)
138 struct inode
*inode
= page
->mapping
->host
;
142 spin_lock(&inode
->i_lock
);
143 i_size
= i_size_read(inode
);
144 end_index
= (i_size
- 1) >> PAGE_CACHE_SHIFT
;
145 if (i_size
> 0 && page
->index
< end_index
)
147 end
= ((loff_t
)page
->index
<< PAGE_CACHE_SHIFT
) + ((loff_t
)offset
+count
);
150 i_size_write(inode
, end
);
151 nfs_inc_stats(inode
, NFSIOS_EXTENDWRITE
);
153 spin_unlock(&inode
->i_lock
);
156 /* A writeback failed: mark the page as bad, and invalidate the page cache */
157 static void nfs_set_pageerror(struct page
*page
)
160 nfs_zap_mapping(page
->mapping
->host
, page
->mapping
);
163 /* We can set the PG_uptodate flag if we see that a write request
164 * covers the full page.
166 static void nfs_mark_uptodate(struct page
*page
, unsigned int base
, unsigned int count
)
168 if (PageUptodate(page
))
172 if (count
!= nfs_page_length(page
))
174 SetPageUptodate(page
);
177 static int wb_priority(struct writeback_control
*wbc
)
179 if (wbc
->for_reclaim
)
180 return FLUSH_HIGHPRI
| FLUSH_STABLE
;
181 if (wbc
->for_kupdate
|| wbc
->for_background
)
182 return FLUSH_LOWPRI
| FLUSH_COND_STABLE
;
183 return FLUSH_COND_STABLE
;
187 * NFS congestion control
190 int nfs_congestion_kb
;
192 #define NFS_CONGESTION_ON_THRESH (nfs_congestion_kb >> (PAGE_SHIFT-10))
193 #define NFS_CONGESTION_OFF_THRESH \
194 (NFS_CONGESTION_ON_THRESH - (NFS_CONGESTION_ON_THRESH >> 2))
196 static int nfs_set_page_writeback(struct page
*page
)
198 int ret
= test_set_page_writeback(page
);
201 struct inode
*inode
= page
->mapping
->host
;
202 struct nfs_server
*nfss
= NFS_SERVER(inode
);
204 page_cache_get(page
);
205 if (atomic_long_inc_return(&nfss
->writeback
) >
206 NFS_CONGESTION_ON_THRESH
) {
207 set_bdi_congested(&nfss
->backing_dev_info
,
214 static void nfs_end_page_writeback(struct page
*page
)
216 struct inode
*inode
= page
->mapping
->host
;
217 struct nfs_server
*nfss
= NFS_SERVER(inode
);
219 end_page_writeback(page
);
220 page_cache_release(page
);
221 if (atomic_long_dec_return(&nfss
->writeback
) < NFS_CONGESTION_OFF_THRESH
)
222 clear_bdi_congested(&nfss
->backing_dev_info
, BLK_RW_ASYNC
);
225 static struct nfs_page
*nfs_find_and_lock_request(struct page
*page
, bool nonblock
)
227 struct inode
*inode
= page
->mapping
->host
;
228 struct nfs_page
*req
;
231 spin_lock(&inode
->i_lock
);
233 req
= nfs_page_find_request_locked(page
);
236 if (nfs_set_page_tag_locked(req
))
238 /* Note: If we hold the page lock, as is the case in nfs_writepage,
239 * then the call to nfs_set_page_tag_locked() will always
240 * succeed provided that someone hasn't already marked the
241 * request as dirty (in which case we don't care).
243 spin_unlock(&inode
->i_lock
);
245 ret
= nfs_wait_on_request(req
);
248 nfs_release_request(req
);
251 spin_lock(&inode
->i_lock
);
253 spin_unlock(&inode
->i_lock
);
258 * Find an associated nfs write request, and prepare to flush it out
259 * May return an error if the user signalled nfs_wait_on_request().
261 static int nfs_page_async_flush(struct nfs_pageio_descriptor
*pgio
,
262 struct page
*page
, bool nonblock
)
264 struct nfs_page
*req
;
267 req
= nfs_find_and_lock_request(page
, nonblock
);
274 ret
= nfs_set_page_writeback(page
);
276 BUG_ON(test_bit(PG_CLEAN
, &req
->wb_flags
));
278 if (!nfs_pageio_add_request(pgio
, req
)) {
279 nfs_redirty_request(req
);
280 ret
= pgio
->pg_error
;
286 static int nfs_do_writepage(struct page
*page
, struct writeback_control
*wbc
, struct nfs_pageio_descriptor
*pgio
)
288 struct inode
*inode
= page
->mapping
->host
;
291 nfs_inc_stats(inode
, NFSIOS_VFSWRITEPAGE
);
292 nfs_add_stats(inode
, NFSIOS_WRITEPAGES
, 1);
294 nfs_pageio_cond_complete(pgio
, page
->index
);
295 ret
= nfs_page_async_flush(pgio
, page
, wbc
->sync_mode
== WB_SYNC_NONE
);
296 if (ret
== -EAGAIN
) {
297 redirty_page_for_writepage(wbc
, page
);
304 * Write an mmapped page to the server.
306 static int nfs_writepage_locked(struct page
*page
, struct writeback_control
*wbc
)
308 struct nfs_pageio_descriptor pgio
;
311 nfs_pageio_init_write(&pgio
, page
->mapping
->host
, wb_priority(wbc
));
312 err
= nfs_do_writepage(page
, wbc
, &pgio
);
313 nfs_pageio_complete(&pgio
);
316 if (pgio
.pg_error
< 0)
317 return pgio
.pg_error
;
321 int nfs_writepage(struct page
*page
, struct writeback_control
*wbc
)
325 ret
= nfs_writepage_locked(page
, wbc
);
330 static int nfs_writepages_callback(struct page
*page
, struct writeback_control
*wbc
, void *data
)
334 ret
= nfs_do_writepage(page
, wbc
, data
);
339 int nfs_writepages(struct address_space
*mapping
, struct writeback_control
*wbc
)
341 struct inode
*inode
= mapping
->host
;
342 unsigned long *bitlock
= &NFS_I(inode
)->flags
;
343 struct nfs_pageio_descriptor pgio
;
346 /* Stop dirtying of new pages while we sync */
347 err
= wait_on_bit_lock(bitlock
, NFS_INO_FLUSHING
,
348 nfs_wait_bit_killable
, TASK_KILLABLE
);
352 nfs_inc_stats(inode
, NFSIOS_VFSWRITEPAGES
);
354 nfs_pageio_init_write(&pgio
, inode
, wb_priority(wbc
));
355 err
= write_cache_pages(mapping
, wbc
, nfs_writepages_callback
, &pgio
);
356 nfs_pageio_complete(&pgio
);
358 clear_bit_unlock(NFS_INO_FLUSHING
, bitlock
);
359 smp_mb__after_clear_bit();
360 wake_up_bit(bitlock
, NFS_INO_FLUSHING
);
373 * Insert a write request into an inode
375 static int nfs_inode_add_request(struct inode
*inode
, struct nfs_page
*req
)
377 struct nfs_inode
*nfsi
= NFS_I(inode
);
380 error
= radix_tree_preload(GFP_NOFS
);
384 /* Lock the request! */
385 nfs_lock_request_dontget(req
);
387 spin_lock(&inode
->i_lock
);
388 error
= radix_tree_insert(&nfsi
->nfs_page_tree
, req
->wb_index
, req
);
392 if (nfs_have_delegation(inode
, FMODE_WRITE
))
395 set_bit(PG_MAPPED
, &req
->wb_flags
);
396 SetPagePrivate(req
->wb_page
);
397 set_page_private(req
->wb_page
, (unsigned long)req
);
399 kref_get(&req
->wb_kref
);
400 radix_tree_tag_set(&nfsi
->nfs_page_tree
, req
->wb_index
,
401 NFS_PAGE_TAG_LOCKED
);
402 spin_unlock(&inode
->i_lock
);
403 radix_tree_preload_end();
409 * Remove a write request from an inode
411 static void nfs_inode_remove_request(struct nfs_page
*req
)
413 struct inode
*inode
= req
->wb_context
->path
.dentry
->d_inode
;
414 struct nfs_inode
*nfsi
= NFS_I(inode
);
416 BUG_ON (!NFS_WBACK_BUSY(req
));
418 spin_lock(&inode
->i_lock
);
419 set_page_private(req
->wb_page
, 0);
420 ClearPagePrivate(req
->wb_page
);
421 clear_bit(PG_MAPPED
, &req
->wb_flags
);
422 radix_tree_delete(&nfsi
->nfs_page_tree
, req
->wb_index
);
425 spin_unlock(&inode
->i_lock
);
428 spin_unlock(&inode
->i_lock
);
429 nfs_release_request(req
);
433 nfs_mark_request_dirty(struct nfs_page
*req
)
435 __set_page_dirty_nobuffers(req
->wb_page
);
436 __mark_inode_dirty(req
->wb_page
->mapping
->host
, I_DIRTY_DATASYNC
);
439 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
441 * Add a request to the inode's commit list.
444 nfs_mark_request_commit(struct nfs_page
*req
)
446 struct inode
*inode
= req
->wb_context
->path
.dentry
->d_inode
;
447 struct nfs_inode
*nfsi
= NFS_I(inode
);
449 spin_lock(&inode
->i_lock
);
450 set_bit(PG_CLEAN
, &(req
)->wb_flags
);
451 radix_tree_tag_set(&nfsi
->nfs_page_tree
,
453 NFS_PAGE_TAG_COMMIT
);
455 spin_unlock(&inode
->i_lock
);
456 inc_zone_page_state(req
->wb_page
, NR_UNSTABLE_NFS
);
457 inc_bdi_stat(req
->wb_page
->mapping
->backing_dev_info
, BDI_RECLAIMABLE
);
458 __mark_inode_dirty(inode
, I_DIRTY_DATASYNC
);
462 nfs_clear_request_commit(struct nfs_page
*req
)
464 struct page
*page
= req
->wb_page
;
466 if (test_and_clear_bit(PG_CLEAN
, &(req
)->wb_flags
)) {
467 dec_zone_page_state(page
, NR_UNSTABLE_NFS
);
468 dec_bdi_stat(page
->mapping
->backing_dev_info
, BDI_RECLAIMABLE
);
475 int nfs_write_need_commit(struct nfs_write_data
*data
)
477 if (data
->verf
.committed
== NFS_DATA_SYNC
)
478 return data
->lseg
== NULL
;
480 return data
->verf
.committed
!= NFS_FILE_SYNC
;
484 int nfs_reschedule_unstable_write(struct nfs_page
*req
)
486 if (test_and_clear_bit(PG_NEED_COMMIT
, &req
->wb_flags
)) {
487 nfs_mark_request_commit(req
);
490 if (test_and_clear_bit(PG_NEED_RESCHED
, &req
->wb_flags
)) {
491 nfs_mark_request_dirty(req
);
498 nfs_mark_request_commit(struct nfs_page
*req
)
503 nfs_clear_request_commit(struct nfs_page
*req
)
509 int nfs_write_need_commit(struct nfs_write_data
*data
)
515 int nfs_reschedule_unstable_write(struct nfs_page
*req
)
521 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
523 nfs_need_commit(struct nfs_inode
*nfsi
)
525 return radix_tree_tagged(&nfsi
->nfs_page_tree
, NFS_PAGE_TAG_COMMIT
);
529 * nfs_scan_commit - Scan an inode for commit requests
530 * @inode: NFS inode to scan
531 * @dst: destination list
532 * @idx_start: lower bound of page->index to scan.
533 * @npages: idx_start + npages sets the upper bound to scan.
535 * Moves requests from the inode's 'commit' request list.
536 * The requests are *not* checked to ensure that they form a contiguous set.
539 nfs_scan_commit(struct inode
*inode
, struct list_head
*dst
, pgoff_t idx_start
, unsigned int npages
)
541 struct nfs_inode
*nfsi
= NFS_I(inode
);
544 if (!nfs_need_commit(nfsi
))
547 ret
= nfs_scan_list(nfsi
, dst
, idx_start
, npages
, NFS_PAGE_TAG_COMMIT
);
549 nfsi
->ncommit
-= ret
;
550 if (nfs_need_commit(NFS_I(inode
)))
551 __mark_inode_dirty(inode
, I_DIRTY_DATASYNC
);
555 static inline int nfs_need_commit(struct nfs_inode
*nfsi
)
560 static inline int nfs_scan_commit(struct inode
*inode
, struct list_head
*dst
, pgoff_t idx_start
, unsigned int npages
)
567 * Search for an existing write request, and attempt to update
568 * it to reflect a new dirty region on a given page.
570 * If the attempt fails, then the existing request is flushed out
573 static struct nfs_page
*nfs_try_to_update_request(struct inode
*inode
,
578 struct nfs_page
*req
;
583 if (!PagePrivate(page
))
586 end
= offset
+ bytes
;
587 spin_lock(&inode
->i_lock
);
590 req
= nfs_page_find_request_locked(page
);
594 rqend
= req
->wb_offset
+ req
->wb_bytes
;
596 * Tell the caller to flush out the request if
597 * the offsets are non-contiguous.
598 * Note: nfs_flush_incompatible() will already
599 * have flushed out requests having wrong owners.
602 || end
< req
->wb_offset
)
605 if (nfs_set_page_tag_locked(req
))
608 /* The request is locked, so wait and then retry */
609 spin_unlock(&inode
->i_lock
);
610 error
= nfs_wait_on_request(req
);
611 nfs_release_request(req
);
614 spin_lock(&inode
->i_lock
);
617 if (nfs_clear_request_commit(req
) &&
618 radix_tree_tag_clear(&NFS_I(inode
)->nfs_page_tree
,
619 req
->wb_index
, NFS_PAGE_TAG_COMMIT
) != NULL
)
620 NFS_I(inode
)->ncommit
--;
622 /* Okay, the request matches. Update the region */
623 if (offset
< req
->wb_offset
) {
624 req
->wb_offset
= offset
;
625 req
->wb_pgbase
= offset
;
628 req
->wb_bytes
= end
- req
->wb_offset
;
630 req
->wb_bytes
= rqend
- req
->wb_offset
;
632 spin_unlock(&inode
->i_lock
);
635 spin_unlock(&inode
->i_lock
);
636 nfs_release_request(req
);
637 error
= nfs_wb_page(inode
, page
);
639 return ERR_PTR(error
);
643 * Try to update an existing write request, or create one if there is none.
645 * Note: Should always be called with the Page Lock held to prevent races
646 * if we have to add a new request. Also assumes that the caller has
647 * already called nfs_flush_incompatible() if necessary.
649 static struct nfs_page
* nfs_setup_write_request(struct nfs_open_context
* ctx
,
650 struct page
*page
, unsigned int offset
, unsigned int bytes
)
652 struct inode
*inode
= page
->mapping
->host
;
653 struct nfs_page
*req
;
656 req
= nfs_try_to_update_request(inode
, page
, offset
, bytes
);
659 req
= nfs_create_request(ctx
, inode
, page
, offset
, bytes
);
662 error
= nfs_inode_add_request(inode
, req
);
664 nfs_release_request(req
);
665 req
= ERR_PTR(error
);
671 static int nfs_writepage_setup(struct nfs_open_context
*ctx
, struct page
*page
,
672 unsigned int offset
, unsigned int count
)
674 struct nfs_page
*req
;
676 req
= nfs_setup_write_request(ctx
, page
, offset
, count
);
679 nfs_mark_request_dirty(req
);
680 /* Update file length */
681 nfs_grow_file(page
, offset
, count
);
682 nfs_mark_uptodate(page
, req
->wb_pgbase
, req
->wb_bytes
);
683 nfs_mark_request_dirty(req
);
684 nfs_clear_page_tag_locked(req
);
688 int nfs_flush_incompatible(struct file
*file
, struct page
*page
)
690 struct nfs_open_context
*ctx
= nfs_file_open_context(file
);
691 struct nfs_page
*req
;
692 int do_flush
, status
;
694 * Look for a request corresponding to this page. If there
695 * is one, and it belongs to another file, we flush it out
696 * before we try to copy anything into the page. Do this
697 * due to the lack of an ACCESS-type call in NFSv2.
698 * Also do the same if we find a request from an existing
702 req
= nfs_page_find_request(page
);
705 do_flush
= req
->wb_page
!= page
|| req
->wb_context
!= ctx
||
706 req
->wb_lock_context
->lockowner
!= current
->files
||
707 req
->wb_lock_context
->pid
!= current
->tgid
;
708 nfs_release_request(req
);
711 status
= nfs_wb_page(page
->mapping
->host
, page
);
712 } while (status
== 0);
717 * If the page cache is marked as unsafe or invalid, then we can't rely on
718 * the PageUptodate() flag. In this case, we will need to turn off
719 * write optimisations that depend on the page contents being correct.
721 static int nfs_write_pageuptodate(struct page
*page
, struct inode
*inode
)
723 return PageUptodate(page
) &&
724 !(NFS_I(inode
)->cache_validity
& (NFS_INO_REVAL_PAGECACHE
|NFS_INO_INVALID_DATA
));
728 * Update and possibly write a cached page of an NFS file.
730 * XXX: Keep an eye on generic_file_read to make sure it doesn't do bad
731 * things with a page scheduled for an RPC call (e.g. invalidate it).
733 int nfs_updatepage(struct file
*file
, struct page
*page
,
734 unsigned int offset
, unsigned int count
)
736 struct nfs_open_context
*ctx
= nfs_file_open_context(file
);
737 struct inode
*inode
= page
->mapping
->host
;
740 nfs_inc_stats(inode
, NFSIOS_VFSUPDATEPAGE
);
742 dprintk("NFS: nfs_updatepage(%s/%s %d@%lld)\n",
743 file
->f_path
.dentry
->d_parent
->d_name
.name
,
744 file
->f_path
.dentry
->d_name
.name
, count
,
745 (long long)(page_offset(page
) + offset
));
747 /* If we're not using byte range locks, and we know the page
748 * is up to date, it may be more efficient to extend the write
749 * to cover the entire page in order to avoid fragmentation
752 if (nfs_write_pageuptodate(page
, inode
) &&
753 inode
->i_flock
== NULL
&&
754 !(file
->f_flags
& O_DSYNC
)) {
755 count
= max(count
+ offset
, nfs_page_length(page
));
759 status
= nfs_writepage_setup(ctx
, page
, offset
, count
);
761 nfs_set_pageerror(page
);
763 dprintk("NFS: nfs_updatepage returns %d (isize %lld)\n",
764 status
, (long long)i_size_read(inode
));
768 static void nfs_writepage_release(struct nfs_page
*req
)
770 struct page
*page
= req
->wb_page
;
772 if (PageError(req
->wb_page
) || !nfs_reschedule_unstable_write(req
))
773 nfs_inode_remove_request(req
);
774 nfs_clear_page_tag_locked(req
);
775 nfs_end_page_writeback(page
);
778 static int flush_task_priority(int how
)
780 switch (how
& (FLUSH_HIGHPRI
|FLUSH_LOWPRI
)) {
782 return RPC_PRIORITY_HIGH
;
784 return RPC_PRIORITY_LOW
;
786 return RPC_PRIORITY_NORMAL
;
789 int nfs_initiate_write(struct nfs_write_data
*data
,
790 struct rpc_clnt
*clnt
,
791 const struct rpc_call_ops
*call_ops
,
794 struct inode
*inode
= data
->inode
;
795 int priority
= flush_task_priority(how
);
796 struct rpc_task
*task
;
797 struct rpc_message msg
= {
798 .rpc_argp
= &data
->args
,
799 .rpc_resp
= &data
->res
,
800 .rpc_cred
= data
->cred
,
802 struct rpc_task_setup task_setup_data
= {
806 .callback_ops
= call_ops
,
807 .callback_data
= data
,
808 .workqueue
= nfsiod_workqueue
,
809 .flags
= RPC_TASK_ASYNC
,
810 .priority
= priority
,
814 /* Set up the initial task struct. */
815 NFS_PROTO(inode
)->write_setup(data
, &msg
);
817 dprintk("NFS: %5u initiated write call "
818 "(req %s/%lld, %u bytes @ offset %llu)\n",
821 (long long)NFS_FILEID(inode
),
823 (unsigned long long)data
->args
.offset
);
825 task
= rpc_run_task(&task_setup_data
);
830 if (how
& FLUSH_SYNC
) {
831 ret
= rpc_wait_for_completion_task(task
);
833 ret
= task
->tk_status
;
839 EXPORT_SYMBOL_GPL(nfs_initiate_write
);
842 * Set up the argument/result storage required for the RPC call.
844 static int nfs_write_rpcsetup(struct nfs_page
*req
,
845 struct nfs_write_data
*data
,
846 const struct rpc_call_ops
*call_ops
,
847 unsigned int count
, unsigned int offset
,
848 struct pnfs_layout_segment
*lseg
,
851 struct inode
*inode
= req
->wb_context
->path
.dentry
->d_inode
;
853 /* Set up the RPC argument and reply structs
854 * NB: take care not to mess about with data->commit et al. */
857 data
->inode
= inode
= req
->wb_context
->path
.dentry
->d_inode
;
858 data
->cred
= req
->wb_context
->cred
;
859 data
->lseg
= get_lseg(lseg
);
861 data
->args
.fh
= NFS_FH(inode
);
862 data
->args
.offset
= req_offset(req
) + offset
;
863 data
->args
.pgbase
= req
->wb_pgbase
+ offset
;
864 data
->args
.pages
= data
->pagevec
;
865 data
->args
.count
= count
;
866 data
->args
.context
= get_nfs_open_context(req
->wb_context
);
867 data
->args
.lock_context
= req
->wb_lock_context
;
868 data
->args
.stable
= NFS_UNSTABLE
;
869 if (how
& (FLUSH_STABLE
| FLUSH_COND_STABLE
)) {
870 data
->args
.stable
= NFS_DATA_SYNC
;
871 if (!nfs_need_commit(NFS_I(inode
)))
872 data
->args
.stable
= NFS_FILE_SYNC
;
875 data
->res
.fattr
= &data
->fattr
;
876 data
->res
.count
= count
;
877 data
->res
.verf
= &data
->verf
;
878 nfs_fattr_init(&data
->fattr
);
881 (pnfs_try_to_write_data(data
, call_ops
, how
) == PNFS_ATTEMPTED
))
884 return nfs_initiate_write(data
, NFS_CLIENT(inode
), call_ops
, how
);
887 /* If a nfs_flush_* function fails, it should remove reqs from @head and
888 * call this on each, which will prepare them to be retried on next
889 * writeback using standard nfs.
891 static void nfs_redirty_request(struct nfs_page
*req
)
893 struct page
*page
= req
->wb_page
;
895 nfs_mark_request_dirty(req
);
896 nfs_clear_page_tag_locked(req
);
897 nfs_end_page_writeback(page
);
901 * Generate multiple small requests to write out a single
902 * contiguous dirty area on one page.
904 static int nfs_flush_multi(struct nfs_pageio_descriptor
*desc
)
906 struct nfs_page
*req
= nfs_list_entry(desc
->pg_list
.next
);
907 struct page
*page
= req
->wb_page
;
908 struct nfs_write_data
*data
;
909 size_t wsize
= NFS_SERVER(desc
->pg_inode
)->wsize
, nbytes
;
913 struct pnfs_layout_segment
*lseg
;
916 nfs_list_remove_request(req
);
918 if ((desc
->pg_ioflags
& FLUSH_COND_STABLE
) &&
919 (desc
->pg_moreio
|| NFS_I(desc
->pg_inode
)->ncommit
||
920 desc
->pg_count
> wsize
))
921 desc
->pg_ioflags
&= ~FLUSH_COND_STABLE
;
924 nbytes
= desc
->pg_count
;
926 size_t len
= min(nbytes
, wsize
);
928 data
= nfs_writedata_alloc(1);
931 list_add(&data
->pages
, &list
);
934 } while (nbytes
!= 0);
935 atomic_set(&req
->wb_complete
, requests
);
937 BUG_ON(desc
->pg_lseg
);
938 lseg
= pnfs_update_layout(desc
->pg_inode
, req
->wb_context
, IOMODE_RW
);
939 ClearPageError(page
);
941 nbytes
= desc
->pg_count
;
945 data
= list_entry(list
.next
, struct nfs_write_data
, pages
);
946 list_del_init(&data
->pages
);
948 data
->pagevec
[0] = page
;
952 ret2
= nfs_write_rpcsetup(req
, data
, &nfs_write_partial_ops
,
953 wsize
, offset
, lseg
, desc
->pg_ioflags
);
958 } while (nbytes
!= 0);
961 desc
->pg_lseg
= NULL
;
965 while (!list_empty(&list
)) {
966 data
= list_entry(list
.next
, struct nfs_write_data
, pages
);
967 list_del(&data
->pages
);
968 nfs_writedata_free(data
);
970 nfs_redirty_request(req
);
975 * Create an RPC task for the given write request and kick it.
976 * The page must have been locked by the caller.
978 * It may happen that the page we're passed is not marked dirty.
979 * This is the case if nfs_updatepage detects a conflicting request
980 * that has been written but not committed.
982 static int nfs_flush_one(struct nfs_pageio_descriptor
*desc
)
984 struct nfs_page
*req
;
986 struct nfs_write_data
*data
;
987 struct list_head
*head
= &desc
->pg_list
;
988 struct pnfs_layout_segment
*lseg
= desc
->pg_lseg
;
991 data
= nfs_writedata_alloc(nfs_page_array_len(desc
->pg_base
,
994 while (!list_empty(head
)) {
995 req
= nfs_list_entry(head
->next
);
996 nfs_list_remove_request(req
);
997 nfs_redirty_request(req
);
1002 pages
= data
->pagevec
;
1003 while (!list_empty(head
)) {
1004 req
= nfs_list_entry(head
->next
);
1005 nfs_list_remove_request(req
);
1006 nfs_list_add_request(req
, &data
->pages
);
1007 ClearPageError(req
->wb_page
);
1008 *pages
++ = req
->wb_page
;
1010 req
= nfs_list_entry(data
->pages
.next
);
1011 if ((!lseg
) && list_is_singular(&data
->pages
))
1012 lseg
= pnfs_update_layout(desc
->pg_inode
, req
->wb_context
, IOMODE_RW
);
1014 if ((desc
->pg_ioflags
& FLUSH_COND_STABLE
) &&
1015 (desc
->pg_moreio
|| NFS_I(desc
->pg_inode
)->ncommit
))
1016 desc
->pg_ioflags
&= ~FLUSH_COND_STABLE
;
1018 /* Set up the argument struct */
1019 ret
= nfs_write_rpcsetup(req
, data
, &nfs_write_full_ops
, desc
->pg_count
, 0, lseg
, desc
->pg_ioflags
);
1021 put_lseg(lseg
); /* Cleans any gotten in ->pg_test */
1022 desc
->pg_lseg
= NULL
;
1026 static void nfs_pageio_init_write(struct nfs_pageio_descriptor
*pgio
,
1027 struct inode
*inode
, int ioflags
)
1029 size_t wsize
= NFS_SERVER(inode
)->wsize
;
1031 pnfs_pageio_init_write(pgio
, inode
);
1033 if (wsize
< PAGE_CACHE_SIZE
)
1034 nfs_pageio_init(pgio
, inode
, nfs_flush_multi
, wsize
, ioflags
);
1036 nfs_pageio_init(pgio
, inode
, nfs_flush_one
, wsize
, ioflags
);
1040 * Handle a write reply that flushed part of a page.
1042 static void nfs_writeback_done_partial(struct rpc_task
*task
, void *calldata
)
1044 struct nfs_write_data
*data
= calldata
;
1046 dprintk("NFS: %5u write(%s/%lld %d@%lld)",
1048 data
->req
->wb_context
->path
.dentry
->d_inode
->i_sb
->s_id
,
1050 NFS_FILEID(data
->req
->wb_context
->path
.dentry
->d_inode
),
1051 data
->req
->wb_bytes
, (long long)req_offset(data
->req
));
1053 nfs_writeback_done(task
, data
);
1056 static void nfs_writeback_release_partial(void *calldata
)
1058 struct nfs_write_data
*data
= calldata
;
1059 struct nfs_page
*req
= data
->req
;
1060 struct page
*page
= req
->wb_page
;
1061 int status
= data
->task
.tk_status
;
1064 nfs_set_pageerror(page
);
1065 nfs_context_set_write_error(req
->wb_context
, status
);
1066 dprintk(", error = %d\n", status
);
1070 if (nfs_write_need_commit(data
)) {
1071 struct inode
*inode
= page
->mapping
->host
;
1073 spin_lock(&inode
->i_lock
);
1074 if (test_bit(PG_NEED_RESCHED
, &req
->wb_flags
)) {
1075 /* Do nothing we need to resend the writes */
1076 } else if (!test_and_set_bit(PG_NEED_COMMIT
, &req
->wb_flags
)) {
1077 memcpy(&req
->wb_verf
, &data
->verf
, sizeof(req
->wb_verf
));
1078 dprintk(" defer commit\n");
1079 } else if (memcmp(&req
->wb_verf
, &data
->verf
, sizeof(req
->wb_verf
))) {
1080 set_bit(PG_NEED_RESCHED
, &req
->wb_flags
);
1081 clear_bit(PG_NEED_COMMIT
, &req
->wb_flags
);
1082 dprintk(" server reboot detected\n");
1084 spin_unlock(&inode
->i_lock
);
1089 if (atomic_dec_and_test(&req
->wb_complete
))
1090 nfs_writepage_release(req
);
1091 nfs_writedata_release(calldata
);
1094 #if defined(CONFIG_NFS_V4_1)
1095 void nfs_write_prepare(struct rpc_task
*task
, void *calldata
)
1097 struct nfs_write_data
*data
= calldata
;
1099 if (nfs4_setup_sequence(NFS_SERVER(data
->inode
),
1100 &data
->args
.seq_args
,
1101 &data
->res
.seq_res
, 1, task
))
1103 rpc_call_start(task
);
1105 #endif /* CONFIG_NFS_V4_1 */
1107 static const struct rpc_call_ops nfs_write_partial_ops
= {
1108 #if defined(CONFIG_NFS_V4_1)
1109 .rpc_call_prepare
= nfs_write_prepare
,
1110 #endif /* CONFIG_NFS_V4_1 */
1111 .rpc_call_done
= nfs_writeback_done_partial
,
1112 .rpc_release
= nfs_writeback_release_partial
,
1116 * Handle a write reply that flushes a whole page.
1118 * FIXME: There is an inherent race with invalidate_inode_pages and
1119 * writebacks since the page->count is kept > 1 for as long
1120 * as the page has a write request pending.
1122 static void nfs_writeback_done_full(struct rpc_task
*task
, void *calldata
)
1124 struct nfs_write_data
*data
= calldata
;
1126 nfs_writeback_done(task
, data
);
1129 static void nfs_writeback_release_full(void *calldata
)
1131 struct nfs_write_data
*data
= calldata
;
1132 int status
= data
->task
.tk_status
;
1134 /* Update attributes as result of writeback. */
1135 while (!list_empty(&data
->pages
)) {
1136 struct nfs_page
*req
= nfs_list_entry(data
->pages
.next
);
1137 struct page
*page
= req
->wb_page
;
1139 nfs_list_remove_request(req
);
1141 dprintk("NFS: %5u write (%s/%lld %d@%lld)",
1143 req
->wb_context
->path
.dentry
->d_inode
->i_sb
->s_id
,
1144 (long long)NFS_FILEID(req
->wb_context
->path
.dentry
->d_inode
),
1146 (long long)req_offset(req
));
1149 nfs_set_pageerror(page
);
1150 nfs_context_set_write_error(req
->wb_context
, status
);
1151 dprintk(", error = %d\n", status
);
1152 goto remove_request
;
1155 if (nfs_write_need_commit(data
)) {
1156 memcpy(&req
->wb_verf
, &data
->verf
, sizeof(req
->wb_verf
));
1157 nfs_mark_request_commit(req
);
1158 dprintk(" marked for commit\n");
1163 nfs_inode_remove_request(req
);
1165 nfs_clear_page_tag_locked(req
);
1166 nfs_end_page_writeback(page
);
1168 nfs_writedata_release(calldata
);
1171 static const struct rpc_call_ops nfs_write_full_ops
= {
1172 #if defined(CONFIG_NFS_V4_1)
1173 .rpc_call_prepare
= nfs_write_prepare
,
1174 #endif /* CONFIG_NFS_V4_1 */
1175 .rpc_call_done
= nfs_writeback_done_full
,
1176 .rpc_release
= nfs_writeback_release_full
,
1181 * This function is called when the WRITE call is complete.
1183 void nfs_writeback_done(struct rpc_task
*task
, struct nfs_write_data
*data
)
1185 struct nfs_writeargs
*argp
= &data
->args
;
1186 struct nfs_writeres
*resp
= &data
->res
;
1187 struct nfs_server
*server
= NFS_SERVER(data
->inode
);
1190 dprintk("NFS: %5u nfs_writeback_done (status %d)\n",
1191 task
->tk_pid
, task
->tk_status
);
1194 * ->write_done will attempt to use post-op attributes to detect
1195 * conflicting writes by other clients. A strict interpretation
1196 * of close-to-open would allow us to continue caching even if
1197 * another writer had changed the file, but some applications
1198 * depend on tighter cache coherency when writing.
1200 status
= NFS_PROTO(data
->inode
)->write_done(task
, data
);
1203 nfs_add_stats(data
->inode
, NFSIOS_SERVERWRITTENBYTES
, resp
->count
);
1205 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
1206 if (resp
->verf
->committed
< argp
->stable
&& task
->tk_status
>= 0) {
1207 /* We tried a write call, but the server did not
1208 * commit data to stable storage even though we
1210 * Note: There is a known bug in Tru64 < 5.0 in which
1211 * the server reports NFS_DATA_SYNC, but performs
1212 * NFS_FILE_SYNC. We therefore implement this checking
1213 * as a dprintk() in order to avoid filling syslog.
1215 static unsigned long complain
;
1217 /* Note this will print the MDS for a DS write */
1218 if (time_before(complain
, jiffies
)) {
1219 dprintk("NFS: faulty NFS server %s:"
1220 " (committed = %d) != (stable = %d)\n",
1221 server
->nfs_client
->cl_hostname
,
1222 resp
->verf
->committed
, argp
->stable
);
1223 complain
= jiffies
+ 300 * HZ
;
1227 /* Is this a short write? */
1228 if (task
->tk_status
>= 0 && resp
->count
< argp
->count
) {
1229 static unsigned long complain
;
1231 nfs_inc_stats(data
->inode
, NFSIOS_SHORTWRITE
);
1233 /* Has the server at least made some progress? */
1234 if (resp
->count
!= 0) {
1235 /* Was this an NFSv2 write or an NFSv3 stable write? */
1236 if (resp
->verf
->committed
!= NFS_UNSTABLE
) {
1237 /* Resend from where the server left off */
1238 data
->mds_offset
+= resp
->count
;
1239 argp
->offset
+= resp
->count
;
1240 argp
->pgbase
+= resp
->count
;
1241 argp
->count
-= resp
->count
;
1243 /* Resend as a stable write in order to avoid
1244 * headaches in the case of a server crash.
1246 argp
->stable
= NFS_FILE_SYNC
;
1248 nfs_restart_rpc(task
, server
->nfs_client
);
1251 if (time_before(complain
, jiffies
)) {
1253 "NFS: Server wrote zero bytes, expected %u.\n",
1255 complain
= jiffies
+ 300 * HZ
;
1257 /* Can't do anything about it except throw an error. */
1258 task
->tk_status
= -EIO
;
1264 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
1265 static int nfs_commit_set_lock(struct nfs_inode
*nfsi
, int may_wait
)
1269 if (!test_and_set_bit(NFS_INO_COMMIT
, &nfsi
->flags
))
1273 ret
= out_of_line_wait_on_bit_lock(&nfsi
->flags
,
1275 nfs_wait_bit_killable
,
1277 return (ret
< 0) ? ret
: 1;
1280 static void nfs_commit_clear_lock(struct nfs_inode
*nfsi
)
1282 clear_bit(NFS_INO_COMMIT
, &nfsi
->flags
);
1283 smp_mb__after_clear_bit();
1284 wake_up_bit(&nfsi
->flags
, NFS_INO_COMMIT
);
1288 static void nfs_commitdata_release(void *data
)
1290 struct nfs_write_data
*wdata
= data
;
1292 put_nfs_open_context(wdata
->args
.context
);
1293 nfs_commit_free(wdata
);
1296 static int nfs_initiate_commit(struct nfs_write_data
*data
, struct rpc_clnt
*clnt
,
1297 const struct rpc_call_ops
*call_ops
,
1300 struct rpc_task
*task
;
1301 int priority
= flush_task_priority(how
);
1302 struct rpc_message msg
= {
1303 .rpc_argp
= &data
->args
,
1304 .rpc_resp
= &data
->res
,
1305 .rpc_cred
= data
->cred
,
1307 struct rpc_task_setup task_setup_data
= {
1308 .task
= &data
->task
,
1310 .rpc_message
= &msg
,
1311 .callback_ops
= call_ops
,
1312 .callback_data
= data
,
1313 .workqueue
= nfsiod_workqueue
,
1314 .flags
= RPC_TASK_ASYNC
,
1315 .priority
= priority
,
1317 /* Set up the initial task struct. */
1318 NFS_PROTO(data
->inode
)->commit_setup(data
, &msg
);
1320 dprintk("NFS: %5u initiated commit call\n", data
->task
.tk_pid
);
1322 task
= rpc_run_task(&task_setup_data
);
1324 return PTR_ERR(task
);
1325 if (how
& FLUSH_SYNC
)
1326 rpc_wait_for_completion_task(task
);
1332 * Set up the argument/result storage required for the RPC call.
1334 static void nfs_init_commit(struct nfs_write_data
*data
,
1335 struct list_head
*head
)
1337 struct nfs_page
*first
= nfs_list_entry(head
->next
);
1338 struct inode
*inode
= first
->wb_context
->path
.dentry
->d_inode
;
1340 /* Set up the RPC argument and reply structs
1341 * NB: take care not to mess about with data->commit et al. */
1343 list_splice_init(head
, &data
->pages
);
1345 data
->inode
= inode
;
1346 data
->cred
= first
->wb_context
->cred
;
1347 data
->mds_ops
= &nfs_commit_ops
;
1349 data
->args
.fh
= NFS_FH(data
->inode
);
1350 /* Note: we always request a commit of the entire inode */
1351 data
->args
.offset
= 0;
1352 data
->args
.count
= 0;
1353 data
->args
.context
= get_nfs_open_context(first
->wb_context
);
1354 data
->res
.count
= 0;
1355 data
->res
.fattr
= &data
->fattr
;
1356 data
->res
.verf
= &data
->verf
;
1357 nfs_fattr_init(&data
->fattr
);
1360 static void nfs_retry_commit(struct list_head
*page_list
)
1362 struct nfs_page
*req
;
1364 while (!list_empty(page_list
)) {
1365 req
= nfs_list_entry(page_list
->next
);
1366 nfs_list_remove_request(req
);
1367 nfs_mark_request_commit(req
);
1368 dec_zone_page_state(req
->wb_page
, NR_UNSTABLE_NFS
);
1369 dec_bdi_stat(req
->wb_page
->mapping
->backing_dev_info
,
1371 nfs_clear_page_tag_locked(req
);
1376 * Commit dirty pages
1379 nfs_commit_list(struct inode
*inode
, struct list_head
*head
, int how
)
1381 struct nfs_write_data
*data
;
1383 data
= nfs_commitdata_alloc();
1388 /* Set up the argument struct */
1389 nfs_init_commit(data
, head
);
1390 return nfs_initiate_commit(data
, NFS_CLIENT(inode
), data
->mds_ops
, how
);
1392 nfs_retry_commit(head
);
1393 nfs_commit_clear_lock(NFS_I(inode
));
1398 * COMMIT call returned
1400 static void nfs_commit_done(struct rpc_task
*task
, void *calldata
)
1402 struct nfs_write_data
*data
= calldata
;
1404 dprintk("NFS: %5u nfs_commit_done (status %d)\n",
1405 task
->tk_pid
, task
->tk_status
);
1407 /* Call the NFS version-specific code */
1408 if (NFS_PROTO(data
->inode
)->commit_done(task
, data
) != 0)
1412 static void nfs_commit_release(void *calldata
)
1414 struct nfs_write_data
*data
= calldata
;
1415 struct nfs_page
*req
;
1416 int status
= data
->task
.tk_status
;
1418 while (!list_empty(&data
->pages
)) {
1419 req
= nfs_list_entry(data
->pages
.next
);
1420 nfs_list_remove_request(req
);
1421 nfs_clear_request_commit(req
);
1423 dprintk("NFS: commit (%s/%lld %d@%lld)",
1424 req
->wb_context
->path
.dentry
->d_inode
->i_sb
->s_id
,
1425 (long long)NFS_FILEID(req
->wb_context
->path
.dentry
->d_inode
),
1427 (long long)req_offset(req
));
1429 nfs_context_set_write_error(req
->wb_context
, status
);
1430 nfs_inode_remove_request(req
);
1431 dprintk(", error = %d\n", status
);
1435 /* Okay, COMMIT succeeded, apparently. Check the verifier
1436 * returned by the server against all stored verfs. */
1437 if (!memcmp(req
->wb_verf
.verifier
, data
->verf
.verifier
, sizeof(data
->verf
.verifier
))) {
1438 /* We have a match */
1439 nfs_inode_remove_request(req
);
1443 /* We have a mismatch. Write the page again */
1444 dprintk(" mismatch\n");
1445 nfs_mark_request_dirty(req
);
1447 nfs_clear_page_tag_locked(req
);
1449 nfs_commit_clear_lock(NFS_I(data
->inode
));
1450 nfs_commitdata_release(calldata
);
1453 static const struct rpc_call_ops nfs_commit_ops
= {
1454 #if defined(CONFIG_NFS_V4_1)
1455 .rpc_call_prepare
= nfs_write_prepare
,
1456 #endif /* CONFIG_NFS_V4_1 */
1457 .rpc_call_done
= nfs_commit_done
,
1458 .rpc_release
= nfs_commit_release
,
1461 int nfs_commit_inode(struct inode
*inode
, int how
)
1464 int may_wait
= how
& FLUSH_SYNC
;
1467 res
= nfs_commit_set_lock(NFS_I(inode
), may_wait
);
1469 goto out_mark_dirty
;
1470 spin_lock(&inode
->i_lock
);
1471 res
= nfs_scan_commit(inode
, &head
, 0, 0);
1472 spin_unlock(&inode
->i_lock
);
1474 int error
= nfs_commit_list(inode
, &head
, how
);
1478 goto out_mark_dirty
;
1479 error
= wait_on_bit(&NFS_I(inode
)->flags
,
1481 nfs_wait_bit_killable
,
1486 nfs_commit_clear_lock(NFS_I(inode
));
1488 /* Note: If we exit without ensuring that the commit is complete,
1489 * we must mark the inode as dirty. Otherwise, future calls to
1490 * sync_inode() with the WB_SYNC_ALL flag set will fail to ensure
1491 * that the data is on the disk.
1494 __mark_inode_dirty(inode
, I_DIRTY_DATASYNC
);
1498 static int nfs_commit_unstable_pages(struct inode
*inode
, struct writeback_control
*wbc
)
1500 struct nfs_inode
*nfsi
= NFS_I(inode
);
1501 int flags
= FLUSH_SYNC
;
1504 if (wbc
->sync_mode
== WB_SYNC_NONE
) {
1505 /* Don't commit yet if this is a non-blocking flush and there
1506 * are a lot of outstanding writes for this mapping.
1508 if (nfsi
->ncommit
<= (nfsi
->npages
>> 1))
1509 goto out_mark_dirty
;
1511 /* don't wait for the COMMIT response */
1515 ret
= nfs_commit_inode(inode
, flags
);
1517 if (wbc
->sync_mode
== WB_SYNC_NONE
) {
1518 if (ret
< wbc
->nr_to_write
)
1519 wbc
->nr_to_write
-= ret
;
1521 wbc
->nr_to_write
= 0;
1526 __mark_inode_dirty(inode
, I_DIRTY_DATASYNC
);
1530 static int nfs_commit_unstable_pages(struct inode
*inode
, struct writeback_control
*wbc
)
1536 int nfs_write_inode(struct inode
*inode
, struct writeback_control
*wbc
)
1538 return nfs_commit_unstable_pages(inode
, wbc
);
1542 * flush the inode to disk.
1544 int nfs_wb_all(struct inode
*inode
)
1546 struct writeback_control wbc
= {
1547 .sync_mode
= WB_SYNC_ALL
,
1548 .nr_to_write
= LONG_MAX
,
1550 .range_end
= LLONG_MAX
,
1553 return sync_inode(inode
, &wbc
);
1556 int nfs_wb_page_cancel(struct inode
*inode
, struct page
*page
)
1558 struct nfs_page
*req
;
1561 BUG_ON(!PageLocked(page
));
1563 wait_on_page_writeback(page
);
1564 req
= nfs_page_find_request(page
);
1567 if (nfs_lock_request_dontget(req
)) {
1568 nfs_inode_remove_request(req
);
1570 * In case nfs_inode_remove_request has marked the
1571 * page as being dirty
1573 cancel_dirty_page(page
, PAGE_CACHE_SIZE
);
1574 nfs_unlock_request(req
);
1577 ret
= nfs_wait_on_request(req
);
1578 nfs_release_request(req
);
1586 * Write back all requests on one page - we do this before reading it.
1588 int nfs_wb_page(struct inode
*inode
, struct page
*page
)
1590 loff_t range_start
= page_offset(page
);
1591 loff_t range_end
= range_start
+ (loff_t
)(PAGE_CACHE_SIZE
- 1);
1592 struct writeback_control wbc
= {
1593 .sync_mode
= WB_SYNC_ALL
,
1595 .range_start
= range_start
,
1596 .range_end
= range_end
,
1601 wait_on_page_writeback(page
);
1602 if (clear_page_dirty_for_io(page
)) {
1603 ret
= nfs_writepage_locked(page
, &wbc
);
1608 if (!PagePrivate(page
))
1610 ret
= nfs_commit_inode(inode
, FLUSH_SYNC
);
1619 #ifdef CONFIG_MIGRATION
1620 int nfs_migrate_page(struct address_space
*mapping
, struct page
*newpage
,
1623 struct nfs_page
*req
;
1626 nfs_fscache_release_page(page
, GFP_KERNEL
);
1628 req
= nfs_find_and_lock_request(page
, false);
1633 ret
= migrate_page(mapping
, newpage
, page
);
1638 page_cache_get(newpage
);
1639 spin_lock(&mapping
->host
->i_lock
);
1640 req
->wb_page
= newpage
;
1641 SetPagePrivate(newpage
);
1642 set_page_private(newpage
, (unsigned long)req
);
1643 ClearPagePrivate(page
);
1644 set_page_private(page
, 0);
1645 spin_unlock(&mapping
->host
->i_lock
);
1646 page_cache_release(page
);
1648 nfs_clear_page_tag_locked(req
);
1654 int __init
nfs_init_writepagecache(void)
1656 nfs_wdata_cachep
= kmem_cache_create("nfs_write_data",
1657 sizeof(struct nfs_write_data
),
1658 0, SLAB_HWCACHE_ALIGN
,
1660 if (nfs_wdata_cachep
== NULL
)
1663 nfs_wdata_mempool
= mempool_create_slab_pool(MIN_POOL_WRITE
,
1665 if (nfs_wdata_mempool
== NULL
)
1668 nfs_commit_mempool
= mempool_create_slab_pool(MIN_POOL_COMMIT
,
1670 if (nfs_commit_mempool
== NULL
)
1674 * NFS congestion size, scale with available memory.
1686 * This allows larger machines to have larger/more transfers.
1687 * Limit the default to 256M
1689 nfs_congestion_kb
= (16*int_sqrt(totalram_pages
)) << (PAGE_SHIFT
-10);
1690 if (nfs_congestion_kb
> 256*1024)
1691 nfs_congestion_kb
= 256*1024;
1696 void nfs_destroy_writepagecache(void)
1698 mempool_destroy(nfs_commit_mempool
);
1699 mempool_destroy(nfs_wdata_mempool
);
1700 kmem_cache_destroy(nfs_wdata_cachep
);