4 * Write file data over NFS.
6 * Copyright (C) 1996, 1997, Olaf Kirch <okir@monad.swb.de>
9 #include <linux/types.h>
10 #include <linux/slab.h>
12 #include <linux/pagemap.h>
13 #include <linux/file.h>
14 #include <linux/writeback.h>
15 #include <linux/swap.h>
16 #include <linux/migrate.h>
18 #include <linux/sunrpc/clnt.h>
19 #include <linux/nfs_fs.h>
20 #include <linux/nfs_mount.h>
21 #include <linux/nfs_page.h>
22 #include <linux/backing-dev.h>
23 #include <linux/export.h>
25 #include <asm/uaccess.h>
27 #include "delegation.h"
36 #define NFSDBG_FACILITY NFSDBG_PAGECACHE
38 #define MIN_POOL_WRITE (32)
39 #define MIN_POOL_COMMIT (4)
42 * Local function declarations
44 static void nfs_redirty_request(struct nfs_page
*req
);
45 static const struct rpc_call_ops nfs_write_common_ops
;
46 static const struct rpc_call_ops nfs_commit_ops
;
47 static const struct nfs_pgio_completion_ops nfs_async_write_completion_ops
;
48 static const struct nfs_commit_completion_ops nfs_commit_completion_ops
;
50 static struct kmem_cache
*nfs_wdata_cachep
;
51 static mempool_t
*nfs_wdata_mempool
;
52 static struct kmem_cache
*nfs_cdata_cachep
;
53 static mempool_t
*nfs_commit_mempool
;
55 struct nfs_commit_data
*nfs_commitdata_alloc(void)
57 struct nfs_commit_data
*p
= mempool_alloc(nfs_commit_mempool
, GFP_NOIO
);
60 memset(p
, 0, sizeof(*p
));
61 INIT_LIST_HEAD(&p
->pages
);
65 EXPORT_SYMBOL_GPL(nfs_commitdata_alloc
);
67 void nfs_commit_free(struct nfs_commit_data
*p
)
69 mempool_free(p
, nfs_commit_mempool
);
71 EXPORT_SYMBOL_GPL(nfs_commit_free
);
73 struct nfs_rw_header
*nfs_writehdr_alloc(void)
75 struct nfs_rw_header
*p
= mempool_alloc(nfs_wdata_mempool
, GFP_NOIO
);
78 struct nfs_pgio_header
*hdr
= &p
->header
;
80 memset(p
, 0, sizeof(*p
));
81 INIT_LIST_HEAD(&hdr
->pages
);
82 INIT_LIST_HEAD(&hdr
->rpc_list
);
83 spin_lock_init(&hdr
->lock
);
84 atomic_set(&hdr
->refcnt
, 0);
89 EXPORT_SYMBOL_GPL(nfs_writehdr_alloc
);
91 static struct nfs_pgio_data
*nfs_writedata_alloc(struct nfs_pgio_header
*hdr
,
92 unsigned int pagecount
)
94 struct nfs_pgio_data
*data
, *prealloc
;
96 prealloc
= &container_of(hdr
, struct nfs_rw_header
, header
)->rpc_data
;
97 if (prealloc
->header
== NULL
)
100 data
= kzalloc(sizeof(*data
), GFP_KERNEL
);
104 if (nfs_pgarray_set(&data
->pages
, pagecount
)) {
106 atomic_inc(&hdr
->refcnt
);
108 if (data
!= prealloc
)
116 void nfs_writehdr_free(struct nfs_pgio_header
*hdr
)
118 struct nfs_rw_header
*whdr
= container_of(hdr
, struct nfs_rw_header
, header
);
119 mempool_free(whdr
, nfs_wdata_mempool
);
121 EXPORT_SYMBOL_GPL(nfs_writehdr_free
);
123 void nfs_writedata_release(struct nfs_pgio_data
*wdata
)
125 struct nfs_pgio_header
*hdr
= wdata
->header
;
126 struct nfs_rw_header
*write_header
= container_of(hdr
, struct nfs_rw_header
, header
);
128 put_nfs_open_context(wdata
->args
.context
);
129 if (wdata
->pages
.pagevec
!= wdata
->pages
.page_array
)
130 kfree(wdata
->pages
.pagevec
);
131 if (wdata
== &write_header
->rpc_data
) {
132 wdata
->header
= NULL
;
135 if (atomic_dec_and_test(&hdr
->refcnt
))
136 hdr
->completion_ops
->completion(hdr
);
137 /* Note: we only free the rpc_task after callbacks are done.
138 * See the comment in rpc_free_task() for why
142 EXPORT_SYMBOL_GPL(nfs_writedata_release
);
144 static void nfs_context_set_write_error(struct nfs_open_context
*ctx
, int error
)
148 set_bit(NFS_CONTEXT_ERROR_WRITE
, &ctx
->flags
);
151 static struct nfs_page
*
152 nfs_page_find_request_locked(struct nfs_inode
*nfsi
, struct page
*page
)
154 struct nfs_page
*req
= NULL
;
156 if (PagePrivate(page
))
157 req
= (struct nfs_page
*)page_private(page
);
158 else if (unlikely(PageSwapCache(page
))) {
159 struct nfs_page
*freq
, *t
;
161 /* Linearly search the commit list for the correct req */
162 list_for_each_entry_safe(freq
, t
, &nfsi
->commit_info
.list
, wb_list
) {
163 if (freq
->wb_page
== page
) {
171 kref_get(&req
->wb_kref
);
176 static struct nfs_page
*nfs_page_find_request(struct page
*page
)
178 struct inode
*inode
= page_file_mapping(page
)->host
;
179 struct nfs_page
*req
= NULL
;
181 spin_lock(&inode
->i_lock
);
182 req
= nfs_page_find_request_locked(NFS_I(inode
), page
);
183 spin_unlock(&inode
->i_lock
);
187 /* Adjust the file length if we're writing beyond the end */
188 static void nfs_grow_file(struct page
*page
, unsigned int offset
, unsigned int count
)
190 struct inode
*inode
= page_file_mapping(page
)->host
;
194 spin_lock(&inode
->i_lock
);
195 i_size
= i_size_read(inode
);
196 end_index
= (i_size
- 1) >> PAGE_CACHE_SHIFT
;
197 if (i_size
> 0 && page_file_index(page
) < end_index
)
199 end
= page_file_offset(page
) + ((loff_t
)offset
+count
);
202 i_size_write(inode
, end
);
203 nfs_inc_stats(inode
, NFSIOS_EXTENDWRITE
);
205 spin_unlock(&inode
->i_lock
);
208 /* A writeback failed: mark the page as bad, and invalidate the page cache */
209 static void nfs_set_pageerror(struct page
*page
)
211 nfs_zap_mapping(page_file_mapping(page
)->host
, page_file_mapping(page
));
214 /* We can set the PG_uptodate flag if we see that a write request
215 * covers the full page.
217 static void nfs_mark_uptodate(struct page
*page
, unsigned int base
, unsigned int count
)
219 if (PageUptodate(page
))
223 if (count
!= nfs_page_length(page
))
225 SetPageUptodate(page
);
228 static int wb_priority(struct writeback_control
*wbc
)
230 if (wbc
->for_reclaim
)
231 return FLUSH_HIGHPRI
| FLUSH_STABLE
;
232 if (wbc
->for_kupdate
|| wbc
->for_background
)
233 return FLUSH_LOWPRI
| FLUSH_COND_STABLE
;
234 return FLUSH_COND_STABLE
;
238 * NFS congestion control
241 int nfs_congestion_kb
;
243 #define NFS_CONGESTION_ON_THRESH (nfs_congestion_kb >> (PAGE_SHIFT-10))
244 #define NFS_CONGESTION_OFF_THRESH \
245 (NFS_CONGESTION_ON_THRESH - (NFS_CONGESTION_ON_THRESH >> 2))
247 static void nfs_set_page_writeback(struct page
*page
)
249 struct nfs_server
*nfss
= NFS_SERVER(page_file_mapping(page
)->host
);
250 int ret
= test_set_page_writeback(page
);
252 WARN_ON_ONCE(ret
!= 0);
254 if (atomic_long_inc_return(&nfss
->writeback
) >
255 NFS_CONGESTION_ON_THRESH
) {
256 set_bdi_congested(&nfss
->backing_dev_info
,
261 static void nfs_end_page_writeback(struct page
*page
)
263 struct inode
*inode
= page_file_mapping(page
)->host
;
264 struct nfs_server
*nfss
= NFS_SERVER(inode
);
266 end_page_writeback(page
);
267 if (atomic_long_dec_return(&nfss
->writeback
) < NFS_CONGESTION_OFF_THRESH
)
268 clear_bdi_congested(&nfss
->backing_dev_info
, BLK_RW_ASYNC
);
271 static struct nfs_page
*nfs_find_and_lock_request(struct page
*page
, bool nonblock
)
273 struct inode
*inode
= page_file_mapping(page
)->host
;
274 struct nfs_page
*req
;
277 spin_lock(&inode
->i_lock
);
279 req
= nfs_page_find_request_locked(NFS_I(inode
), page
);
282 if (nfs_lock_request(req
))
284 /* Note: If we hold the page lock, as is the case in nfs_writepage,
285 * then the call to nfs_lock_request() will always
286 * succeed provided that someone hasn't already marked the
287 * request as dirty (in which case we don't care).
289 spin_unlock(&inode
->i_lock
);
291 ret
= nfs_wait_on_request(req
);
294 nfs_release_request(req
);
297 spin_lock(&inode
->i_lock
);
299 spin_unlock(&inode
->i_lock
);
304 * Find an associated nfs write request, and prepare to flush it out
305 * May return an error if the user signalled nfs_wait_on_request().
307 static int nfs_page_async_flush(struct nfs_pageio_descriptor
*pgio
,
308 struct page
*page
, bool nonblock
)
310 struct nfs_page
*req
;
313 req
= nfs_find_and_lock_request(page
, nonblock
);
320 nfs_set_page_writeback(page
);
321 WARN_ON_ONCE(test_bit(PG_CLEAN
, &req
->wb_flags
));
324 if (!nfs_pageio_add_request(pgio
, req
)) {
325 nfs_redirty_request(req
);
326 ret
= pgio
->pg_error
;
332 static int nfs_do_writepage(struct page
*page
, struct writeback_control
*wbc
, struct nfs_pageio_descriptor
*pgio
)
334 struct inode
*inode
= page_file_mapping(page
)->host
;
337 nfs_inc_stats(inode
, NFSIOS_VFSWRITEPAGE
);
338 nfs_add_stats(inode
, NFSIOS_WRITEPAGES
, 1);
340 nfs_pageio_cond_complete(pgio
, page_file_index(page
));
341 ret
= nfs_page_async_flush(pgio
, page
, wbc
->sync_mode
== WB_SYNC_NONE
);
342 if (ret
== -EAGAIN
) {
343 redirty_page_for_writepage(wbc
, page
);
350 * Write an mmapped page to the server.
352 static int nfs_writepage_locked(struct page
*page
, struct writeback_control
*wbc
)
354 struct nfs_pageio_descriptor pgio
;
357 nfs_pageio_init_write(&pgio
, page
->mapping
->host
, wb_priority(wbc
),
358 false, &nfs_async_write_completion_ops
);
359 err
= nfs_do_writepage(page
, wbc
, &pgio
);
360 nfs_pageio_complete(&pgio
);
363 if (pgio
.pg_error
< 0)
364 return pgio
.pg_error
;
368 int nfs_writepage(struct page
*page
, struct writeback_control
*wbc
)
372 ret
= nfs_writepage_locked(page
, wbc
);
377 static int nfs_writepages_callback(struct page
*page
, struct writeback_control
*wbc
, void *data
)
381 ret
= nfs_do_writepage(page
, wbc
, data
);
386 int nfs_writepages(struct address_space
*mapping
, struct writeback_control
*wbc
)
388 struct inode
*inode
= mapping
->host
;
389 unsigned long *bitlock
= &NFS_I(inode
)->flags
;
390 struct nfs_pageio_descriptor pgio
;
393 /* Stop dirtying of new pages while we sync */
394 err
= wait_on_bit_lock(bitlock
, NFS_INO_FLUSHING
,
395 nfs_wait_bit_killable
, TASK_KILLABLE
);
399 nfs_inc_stats(inode
, NFSIOS_VFSWRITEPAGES
);
401 nfs_pageio_init_write(&pgio
, inode
, wb_priority(wbc
), false,
402 &nfs_async_write_completion_ops
);
403 err
= write_cache_pages(mapping
, wbc
, nfs_writepages_callback
, &pgio
);
404 nfs_pageio_complete(&pgio
);
406 clear_bit_unlock(NFS_INO_FLUSHING
, bitlock
);
407 smp_mb__after_clear_bit();
408 wake_up_bit(bitlock
, NFS_INO_FLUSHING
);
421 * Insert a write request into an inode
423 static void nfs_inode_add_request(struct inode
*inode
, struct nfs_page
*req
)
425 struct nfs_inode
*nfsi
= NFS_I(inode
);
427 /* Lock the request! */
428 nfs_lock_request(req
);
430 spin_lock(&inode
->i_lock
);
431 if (!nfsi
->npages
&& NFS_PROTO(inode
)->have_delegation(inode
, FMODE_WRITE
))
434 * Swap-space should not get truncated. Hence no need to plug the race
435 * with invalidate/truncate.
437 if (likely(!PageSwapCache(req
->wb_page
))) {
438 set_bit(PG_MAPPED
, &req
->wb_flags
);
439 SetPagePrivate(req
->wb_page
);
440 set_page_private(req
->wb_page
, (unsigned long)req
);
443 kref_get(&req
->wb_kref
);
444 spin_unlock(&inode
->i_lock
);
448 * Remove a write request from an inode
450 static void nfs_inode_remove_request(struct nfs_page
*req
)
452 struct inode
*inode
= req
->wb_context
->dentry
->d_inode
;
453 struct nfs_inode
*nfsi
= NFS_I(inode
);
455 spin_lock(&inode
->i_lock
);
456 if (likely(!PageSwapCache(req
->wb_page
))) {
457 set_page_private(req
->wb_page
, 0);
458 ClearPagePrivate(req
->wb_page
);
459 clear_bit(PG_MAPPED
, &req
->wb_flags
);
462 spin_unlock(&inode
->i_lock
);
463 nfs_release_request(req
);
467 nfs_mark_request_dirty(struct nfs_page
*req
)
469 __set_page_dirty_nobuffers(req
->wb_page
);
472 #if IS_ENABLED(CONFIG_NFS_V3) || IS_ENABLED(CONFIG_NFS_V4)
474 * nfs_request_add_commit_list - add request to a commit list
475 * @req: pointer to a struct nfs_page
476 * @dst: commit list head
477 * @cinfo: holds list lock and accounting info
479 * This sets the PG_CLEAN bit, updates the cinfo count of
480 * number of outstanding requests requiring a commit as well as
483 * The caller must _not_ hold the cinfo->lock, but must be
484 * holding the nfs_page lock.
487 nfs_request_add_commit_list(struct nfs_page
*req
, struct list_head
*dst
,
488 struct nfs_commit_info
*cinfo
)
490 set_bit(PG_CLEAN
, &(req
)->wb_flags
);
491 spin_lock(cinfo
->lock
);
492 nfs_list_add_request(req
, dst
);
493 cinfo
->mds
->ncommit
++;
494 spin_unlock(cinfo
->lock
);
496 inc_zone_page_state(req
->wb_page
, NR_UNSTABLE_NFS
);
497 inc_bdi_stat(page_file_mapping(req
->wb_page
)->backing_dev_info
,
499 __mark_inode_dirty(req
->wb_context
->dentry
->d_inode
,
503 EXPORT_SYMBOL_GPL(nfs_request_add_commit_list
);
506 * nfs_request_remove_commit_list - Remove request from a commit list
507 * @req: pointer to a nfs_page
508 * @cinfo: holds list lock and accounting info
510 * This clears the PG_CLEAN bit, and updates the cinfo's count of
511 * number of outstanding requests requiring a commit
512 * It does not update the MM page stats.
514 * The caller _must_ hold the cinfo->lock and the nfs_page lock.
517 nfs_request_remove_commit_list(struct nfs_page
*req
,
518 struct nfs_commit_info
*cinfo
)
520 if (!test_and_clear_bit(PG_CLEAN
, &(req
)->wb_flags
))
522 nfs_list_remove_request(req
);
523 cinfo
->mds
->ncommit
--;
525 EXPORT_SYMBOL_GPL(nfs_request_remove_commit_list
);
527 static void nfs_init_cinfo_from_inode(struct nfs_commit_info
*cinfo
,
530 cinfo
->lock
= &inode
->i_lock
;
531 cinfo
->mds
= &NFS_I(inode
)->commit_info
;
532 cinfo
->ds
= pnfs_get_ds_info(inode
);
534 cinfo
->completion_ops
= &nfs_commit_completion_ops
;
537 void nfs_init_cinfo(struct nfs_commit_info
*cinfo
,
539 struct nfs_direct_req
*dreq
)
542 nfs_init_cinfo_from_dreq(cinfo
, dreq
);
544 nfs_init_cinfo_from_inode(cinfo
, inode
);
546 EXPORT_SYMBOL_GPL(nfs_init_cinfo
);
549 * Add a request to the inode's commit list.
552 nfs_mark_request_commit(struct nfs_page
*req
, struct pnfs_layout_segment
*lseg
,
553 struct nfs_commit_info
*cinfo
)
555 if (pnfs_mark_request_commit(req
, lseg
, cinfo
))
557 nfs_request_add_commit_list(req
, &cinfo
->mds
->list
, cinfo
);
561 nfs_clear_page_commit(struct page
*page
)
563 dec_zone_page_state(page
, NR_UNSTABLE_NFS
);
564 dec_bdi_stat(page_file_mapping(page
)->backing_dev_info
, BDI_RECLAIMABLE
);
568 nfs_clear_request_commit(struct nfs_page
*req
)
570 if (test_bit(PG_CLEAN
, &req
->wb_flags
)) {
571 struct inode
*inode
= req
->wb_context
->dentry
->d_inode
;
572 struct nfs_commit_info cinfo
;
574 nfs_init_cinfo_from_inode(&cinfo
, inode
);
575 if (!pnfs_clear_request_commit(req
, &cinfo
)) {
576 spin_lock(cinfo
.lock
);
577 nfs_request_remove_commit_list(req
, &cinfo
);
578 spin_unlock(cinfo
.lock
);
580 nfs_clear_page_commit(req
->wb_page
);
585 int nfs_write_need_commit(struct nfs_pgio_data
*data
)
587 if (data
->verf
.committed
== NFS_DATA_SYNC
)
588 return data
->header
->lseg
== NULL
;
589 return data
->verf
.committed
!= NFS_FILE_SYNC
;
593 static void nfs_init_cinfo_from_inode(struct nfs_commit_info
*cinfo
,
598 void nfs_init_cinfo(struct nfs_commit_info
*cinfo
,
600 struct nfs_direct_req
*dreq
)
605 nfs_mark_request_commit(struct nfs_page
*req
, struct pnfs_layout_segment
*lseg
,
606 struct nfs_commit_info
*cinfo
)
611 nfs_clear_request_commit(struct nfs_page
*req
)
616 int nfs_write_need_commit(struct nfs_pgio_data
*data
)
623 static void nfs_write_completion(struct nfs_pgio_header
*hdr
)
625 struct nfs_commit_info cinfo
;
626 unsigned long bytes
= 0;
628 if (test_bit(NFS_IOHDR_REDO
, &hdr
->flags
))
630 nfs_init_cinfo_from_inode(&cinfo
, hdr
->inode
);
631 while (!list_empty(&hdr
->pages
)) {
632 struct nfs_page
*req
= nfs_list_entry(hdr
->pages
.next
);
634 bytes
+= req
->wb_bytes
;
635 nfs_list_remove_request(req
);
636 if (test_bit(NFS_IOHDR_ERROR
, &hdr
->flags
) &&
637 (hdr
->good_bytes
< bytes
)) {
638 nfs_set_pageerror(req
->wb_page
);
639 nfs_context_set_write_error(req
->wb_context
, hdr
->error
);
642 if (test_bit(NFS_IOHDR_NEED_RESCHED
, &hdr
->flags
)) {
643 nfs_mark_request_dirty(req
);
646 if (test_bit(NFS_IOHDR_NEED_COMMIT
, &hdr
->flags
)) {
647 memcpy(&req
->wb_verf
, &hdr
->verf
->verifier
, sizeof(req
->wb_verf
));
648 nfs_mark_request_commit(req
, hdr
->lseg
, &cinfo
);
652 nfs_inode_remove_request(req
);
654 nfs_unlock_request(req
);
655 nfs_end_page_writeback(req
->wb_page
);
656 nfs_release_request(req
);
662 #if IS_ENABLED(CONFIG_NFS_V3) || IS_ENABLED(CONFIG_NFS_V4)
664 nfs_reqs_to_commit(struct nfs_commit_info
*cinfo
)
666 return cinfo
->mds
->ncommit
;
669 /* cinfo->lock held by caller */
671 nfs_scan_commit_list(struct list_head
*src
, struct list_head
*dst
,
672 struct nfs_commit_info
*cinfo
, int max
)
674 struct nfs_page
*req
, *tmp
;
677 list_for_each_entry_safe(req
, tmp
, src
, wb_list
) {
678 if (!nfs_lock_request(req
))
680 kref_get(&req
->wb_kref
);
681 if (cond_resched_lock(cinfo
->lock
))
682 list_safe_reset_next(req
, tmp
, wb_list
);
683 nfs_request_remove_commit_list(req
, cinfo
);
684 nfs_list_add_request(req
, dst
);
686 if ((ret
== max
) && !cinfo
->dreq
)
693 * nfs_scan_commit - Scan an inode for commit requests
694 * @inode: NFS inode to scan
695 * @dst: mds destination list
696 * @cinfo: mds and ds lists of reqs ready to commit
698 * Moves requests from the inode's 'commit' request list.
699 * The requests are *not* checked to ensure that they form a contiguous set.
702 nfs_scan_commit(struct inode
*inode
, struct list_head
*dst
,
703 struct nfs_commit_info
*cinfo
)
707 spin_lock(cinfo
->lock
);
708 if (cinfo
->mds
->ncommit
> 0) {
709 const int max
= INT_MAX
;
711 ret
= nfs_scan_commit_list(&cinfo
->mds
->list
, dst
,
713 ret
+= pnfs_scan_commit_lists(inode
, cinfo
, max
- ret
);
715 spin_unlock(cinfo
->lock
);
720 static unsigned long nfs_reqs_to_commit(struct nfs_commit_info
*cinfo
)
725 int nfs_scan_commit(struct inode
*inode
, struct list_head
*dst
,
726 struct nfs_commit_info
*cinfo
)
733 * Search for an existing write request, and attempt to update
734 * it to reflect a new dirty region on a given page.
736 * If the attempt fails, then the existing request is flushed out
739 static struct nfs_page
*nfs_try_to_update_request(struct inode
*inode
,
744 struct nfs_page
*req
;
749 if (!PagePrivate(page
))
752 end
= offset
+ bytes
;
753 spin_lock(&inode
->i_lock
);
756 req
= nfs_page_find_request_locked(NFS_I(inode
), page
);
760 rqend
= req
->wb_offset
+ req
->wb_bytes
;
762 * Tell the caller to flush out the request if
763 * the offsets are non-contiguous.
764 * Note: nfs_flush_incompatible() will already
765 * have flushed out requests having wrong owners.
768 || end
< req
->wb_offset
)
771 if (nfs_lock_request(req
))
774 /* The request is locked, so wait and then retry */
775 spin_unlock(&inode
->i_lock
);
776 error
= nfs_wait_on_request(req
);
777 nfs_release_request(req
);
780 spin_lock(&inode
->i_lock
);
783 /* Okay, the request matches. Update the region */
784 if (offset
< req
->wb_offset
) {
785 req
->wb_offset
= offset
;
786 req
->wb_pgbase
= offset
;
789 req
->wb_bytes
= end
- req
->wb_offset
;
791 req
->wb_bytes
= rqend
- req
->wb_offset
;
793 spin_unlock(&inode
->i_lock
);
795 nfs_clear_request_commit(req
);
798 spin_unlock(&inode
->i_lock
);
799 nfs_release_request(req
);
800 error
= nfs_wb_page(inode
, page
);
802 return ERR_PTR(error
);
806 * Try to update an existing write request, or create one if there is none.
808 * Note: Should always be called with the Page Lock held to prevent races
809 * if we have to add a new request. Also assumes that the caller has
810 * already called nfs_flush_incompatible() if necessary.
812 static struct nfs_page
* nfs_setup_write_request(struct nfs_open_context
* ctx
,
813 struct page
*page
, unsigned int offset
, unsigned int bytes
)
815 struct inode
*inode
= page_file_mapping(page
)->host
;
816 struct nfs_page
*req
;
818 req
= nfs_try_to_update_request(inode
, page
, offset
, bytes
);
821 req
= nfs_create_request(ctx
, inode
, page
, offset
, bytes
);
824 nfs_inode_add_request(inode
, req
);
829 static int nfs_writepage_setup(struct nfs_open_context
*ctx
, struct page
*page
,
830 unsigned int offset
, unsigned int count
)
832 struct nfs_page
*req
;
834 req
= nfs_setup_write_request(ctx
, page
, offset
, count
);
837 /* Update file length */
838 nfs_grow_file(page
, offset
, count
);
839 nfs_mark_uptodate(page
, req
->wb_pgbase
, req
->wb_bytes
);
840 nfs_mark_request_dirty(req
);
841 nfs_unlock_and_release_request(req
);
845 int nfs_flush_incompatible(struct file
*file
, struct page
*page
)
847 struct nfs_open_context
*ctx
= nfs_file_open_context(file
);
848 struct nfs_lock_context
*l_ctx
;
849 struct nfs_page
*req
;
850 int do_flush
, status
;
852 * Look for a request corresponding to this page. If there
853 * is one, and it belongs to another file, we flush it out
854 * before we try to copy anything into the page. Do this
855 * due to the lack of an ACCESS-type call in NFSv2.
856 * Also do the same if we find a request from an existing
860 req
= nfs_page_find_request(page
);
863 l_ctx
= req
->wb_lock_context
;
864 do_flush
= req
->wb_page
!= page
|| req
->wb_context
!= ctx
;
865 if (l_ctx
&& ctx
->dentry
->d_inode
->i_flock
!= NULL
) {
866 do_flush
|= l_ctx
->lockowner
.l_owner
!= current
->files
867 || l_ctx
->lockowner
.l_pid
!= current
->tgid
;
869 nfs_release_request(req
);
872 status
= nfs_wb_page(page_file_mapping(page
)->host
, page
);
873 } while (status
== 0);
878 * Avoid buffered writes when a open context credential's key would
881 * Returns -EACCES if the key will expire within RPC_KEY_EXPIRE_FAIL.
883 * Return 0 and set a credential flag which triggers the inode to flush
884 * and performs NFS_FILE_SYNC writes if the key will expired within
885 * RPC_KEY_EXPIRE_TIMEO.
888 nfs_key_timeout_notify(struct file
*filp
, struct inode
*inode
)
890 struct nfs_open_context
*ctx
= nfs_file_open_context(filp
);
891 struct rpc_auth
*auth
= NFS_SERVER(inode
)->client
->cl_auth
;
893 return rpcauth_key_timeout_notify(auth
, ctx
->cred
);
897 * Test if the open context credential key is marked to expire soon.
899 bool nfs_ctx_key_to_expire(struct nfs_open_context
*ctx
)
901 return rpcauth_cred_key_to_expire(ctx
->cred
);
905 * If the page cache is marked as unsafe or invalid, then we can't rely on
906 * the PageUptodate() flag. In this case, we will need to turn off
907 * write optimisations that depend on the page contents being correct.
909 static bool nfs_write_pageuptodate(struct page
*page
, struct inode
*inode
)
911 struct nfs_inode
*nfsi
= NFS_I(inode
);
913 if (nfs_have_delegated_attributes(inode
))
915 if (nfsi
->cache_validity
& (NFS_INO_INVALID_DATA
|NFS_INO_REVAL_PAGECACHE
))
918 if (test_bit(NFS_INO_INVALIDATING
, &nfsi
->flags
))
921 return PageUptodate(page
) != 0;
924 /* If we know the page is up to date, and we're not using byte range locks (or
925 * if we have the whole file locked for writing), it may be more efficient to
926 * extend the write to cover the entire page in order to avoid fragmentation
929 * If the file is opened for synchronous writes then we can just skip the rest
932 static int nfs_can_extend_write(struct file
*file
, struct page
*page
, struct inode
*inode
)
934 if (file
->f_flags
& O_DSYNC
)
936 if (!nfs_write_pageuptodate(page
, inode
))
938 if (NFS_PROTO(inode
)->have_delegation(inode
, FMODE_WRITE
))
940 if (inode
->i_flock
== NULL
|| (inode
->i_flock
->fl_start
== 0 &&
941 inode
->i_flock
->fl_end
== OFFSET_MAX
&&
942 inode
->i_flock
->fl_type
!= F_RDLCK
))
948 * Update and possibly write a cached page of an NFS file.
950 * XXX: Keep an eye on generic_file_read to make sure it doesn't do bad
951 * things with a page scheduled for an RPC call (e.g. invalidate it).
953 int nfs_updatepage(struct file
*file
, struct page
*page
,
954 unsigned int offset
, unsigned int count
)
956 struct nfs_open_context
*ctx
= nfs_file_open_context(file
);
957 struct inode
*inode
= page_file_mapping(page
)->host
;
960 nfs_inc_stats(inode
, NFSIOS_VFSUPDATEPAGE
);
962 dprintk("NFS: nfs_updatepage(%pD2 %d@%lld)\n",
963 file
, count
, (long long)(page_file_offset(page
) + offset
));
965 if (nfs_can_extend_write(file
, page
, inode
)) {
966 count
= max(count
+ offset
, nfs_page_length(page
));
970 status
= nfs_writepage_setup(ctx
, page
, offset
, count
);
972 nfs_set_pageerror(page
);
974 __set_page_dirty_nobuffers(page
);
976 dprintk("NFS: nfs_updatepage returns %d (isize %lld)\n",
977 status
, (long long)i_size_read(inode
));
981 static int flush_task_priority(int how
)
983 switch (how
& (FLUSH_HIGHPRI
|FLUSH_LOWPRI
)) {
985 return RPC_PRIORITY_HIGH
;
987 return RPC_PRIORITY_LOW
;
989 return RPC_PRIORITY_NORMAL
;
992 int nfs_initiate_write(struct rpc_clnt
*clnt
,
993 struct nfs_pgio_data
*data
,
994 const struct rpc_call_ops
*call_ops
,
997 struct inode
*inode
= data
->header
->inode
;
998 int priority
= flush_task_priority(how
);
999 struct rpc_task
*task
;
1000 struct rpc_message msg
= {
1001 .rpc_argp
= &data
->args
,
1002 .rpc_resp
= &data
->res
,
1003 .rpc_cred
= data
->header
->cred
,
1005 struct rpc_task_setup task_setup_data
= {
1007 .task
= &data
->task
,
1008 .rpc_message
= &msg
,
1009 .callback_ops
= call_ops
,
1010 .callback_data
= data
,
1011 .workqueue
= nfsiod_workqueue
,
1012 .flags
= RPC_TASK_ASYNC
| flags
,
1013 .priority
= priority
,
1017 /* Set up the initial task struct. */
1018 NFS_PROTO(inode
)->write_setup(data
, &msg
);
1020 dprintk("NFS: %5u initiated write call "
1021 "(req %s/%llu, %u bytes @ offset %llu)\n",
1024 (unsigned long long)NFS_FILEID(inode
),
1026 (unsigned long long)data
->args
.offset
);
1028 nfs4_state_protect_write(NFS_SERVER(inode
)->nfs_client
,
1029 &task_setup_data
.rpc_client
, &msg
, data
);
1031 task
= rpc_run_task(&task_setup_data
);
1033 ret
= PTR_ERR(task
);
1036 if (how
& FLUSH_SYNC
) {
1037 ret
= rpc_wait_for_completion_task(task
);
1039 ret
= task
->tk_status
;
1045 EXPORT_SYMBOL_GPL(nfs_initiate_write
);
1048 * Set up the argument/result storage required for the RPC call.
1050 static void nfs_write_rpcsetup(struct nfs_pgio_data
*data
,
1051 unsigned int count
, unsigned int offset
,
1052 int how
, struct nfs_commit_info
*cinfo
)
1054 struct nfs_page
*req
= data
->header
->req
;
1056 /* Set up the RPC argument and reply structs
1057 * NB: take care not to mess about with data->commit et al. */
1059 data
->args
.fh
= NFS_FH(data
->header
->inode
);
1060 data
->args
.offset
= req_offset(req
) + offset
;
1061 /* pnfs_set_layoutcommit needs this */
1062 data
->mds_offset
= data
->args
.offset
;
1063 data
->args
.pgbase
= req
->wb_pgbase
+ offset
;
1064 data
->args
.pages
= data
->pages
.pagevec
;
1065 data
->args
.count
= count
;
1066 data
->args
.context
= get_nfs_open_context(req
->wb_context
);
1067 data
->args
.lock_context
= req
->wb_lock_context
;
1068 data
->args
.stable
= NFS_UNSTABLE
;
1069 switch (how
& (FLUSH_STABLE
| FLUSH_COND_STABLE
)) {
1072 case FLUSH_COND_STABLE
:
1073 if (nfs_reqs_to_commit(cinfo
))
1076 data
->args
.stable
= NFS_FILE_SYNC
;
1079 data
->res
.fattr
= &data
->fattr
;
1080 data
->res
.count
= count
;
1081 data
->res
.verf
= &data
->verf
;
1082 nfs_fattr_init(&data
->fattr
);
1085 static int nfs_do_write(struct nfs_pgio_data
*data
,
1086 const struct rpc_call_ops
*call_ops
,
1089 struct inode
*inode
= data
->header
->inode
;
1091 return nfs_initiate_write(NFS_CLIENT(inode
), data
, call_ops
, how
, 0);
1094 static int nfs_do_multiple_writes(struct list_head
*head
,
1095 const struct rpc_call_ops
*call_ops
,
1098 struct nfs_pgio_data
*data
;
1101 while (!list_empty(head
)) {
1104 data
= list_first_entry(head
, struct nfs_pgio_data
, list
);
1105 list_del_init(&data
->list
);
1107 ret2
= nfs_do_write(data
, call_ops
, how
);
1114 /* If a nfs_flush_* function fails, it should remove reqs from @head and
1115 * call this on each, which will prepare them to be retried on next
1116 * writeback using standard nfs.
1118 static void nfs_redirty_request(struct nfs_page
*req
)
1120 nfs_mark_request_dirty(req
);
1121 nfs_unlock_request(req
);
1122 nfs_end_page_writeback(req
->wb_page
);
1123 nfs_release_request(req
);
1126 static void nfs_async_write_error(struct list_head
*head
)
1128 struct nfs_page
*req
;
1130 while (!list_empty(head
)) {
1131 req
= nfs_list_entry(head
->next
);
1132 nfs_list_remove_request(req
);
1133 nfs_redirty_request(req
);
1137 static const struct nfs_pgio_completion_ops nfs_async_write_completion_ops
= {
1138 .error_cleanup
= nfs_async_write_error
,
1139 .completion
= nfs_write_completion
,
1142 static void nfs_flush_error(struct nfs_pageio_descriptor
*desc
,
1143 struct nfs_pgio_header
*hdr
)
1145 set_bit(NFS_IOHDR_REDO
, &hdr
->flags
);
1146 while (!list_empty(&hdr
->rpc_list
)) {
1147 struct nfs_pgio_data
*data
= list_first_entry(&hdr
->rpc_list
,
1148 struct nfs_pgio_data
, list
);
1149 list_del(&data
->list
);
1150 nfs_writedata_release(data
);
1152 desc
->pg_completion_ops
->error_cleanup(&desc
->pg_list
);
1156 * Generate multiple small requests to write out a single
1157 * contiguous dirty area on one page.
1159 static int nfs_flush_multi(struct nfs_pageio_descriptor
*desc
,
1160 struct nfs_pgio_header
*hdr
)
1162 struct nfs_page
*req
= hdr
->req
;
1163 struct page
*page
= req
->wb_page
;
1164 struct nfs_pgio_data
*data
;
1165 size_t wsize
= desc
->pg_bsize
, nbytes
;
1166 unsigned int offset
;
1168 struct nfs_commit_info cinfo
;
1170 nfs_init_cinfo(&cinfo
, desc
->pg_inode
, desc
->pg_dreq
);
1172 if ((desc
->pg_ioflags
& FLUSH_COND_STABLE
) &&
1173 (desc
->pg_moreio
|| nfs_reqs_to_commit(&cinfo
) ||
1174 desc
->pg_count
> wsize
))
1175 desc
->pg_ioflags
&= ~FLUSH_COND_STABLE
;
1179 nbytes
= desc
->pg_count
;
1181 size_t len
= min(nbytes
, wsize
);
1183 data
= nfs_writedata_alloc(hdr
, 1);
1185 nfs_flush_error(desc
, hdr
);
1188 data
->pages
.pagevec
[0] = page
;
1189 nfs_write_rpcsetup(data
, len
, offset
, desc
->pg_ioflags
, &cinfo
);
1190 list_add(&data
->list
, &hdr
->rpc_list
);
1194 } while (nbytes
!= 0);
1195 nfs_list_remove_request(req
);
1196 nfs_list_add_request(req
, &hdr
->pages
);
1197 desc
->pg_rpc_callops
= &nfs_write_common_ops
;
1202 * Create an RPC task for the given write request and kick it.
1203 * The page must have been locked by the caller.
1205 * It may happen that the page we're passed is not marked dirty.
1206 * This is the case if nfs_updatepage detects a conflicting request
1207 * that has been written but not committed.
1209 static int nfs_flush_one(struct nfs_pageio_descriptor
*desc
,
1210 struct nfs_pgio_header
*hdr
)
1212 struct nfs_page
*req
;
1213 struct page
**pages
;
1214 struct nfs_pgio_data
*data
;
1215 struct list_head
*head
= &desc
->pg_list
;
1216 struct nfs_commit_info cinfo
;
1218 data
= nfs_writedata_alloc(hdr
, nfs_page_array_len(desc
->pg_base
,
1221 nfs_flush_error(desc
, hdr
);
1225 nfs_init_cinfo(&cinfo
, desc
->pg_inode
, desc
->pg_dreq
);
1226 pages
= data
->pages
.pagevec
;
1227 while (!list_empty(head
)) {
1228 req
= nfs_list_entry(head
->next
);
1229 nfs_list_remove_request(req
);
1230 nfs_list_add_request(req
, &hdr
->pages
);
1231 *pages
++ = req
->wb_page
;
1234 if ((desc
->pg_ioflags
& FLUSH_COND_STABLE
) &&
1235 (desc
->pg_moreio
|| nfs_reqs_to_commit(&cinfo
)))
1236 desc
->pg_ioflags
&= ~FLUSH_COND_STABLE
;
1238 /* Set up the argument struct */
1239 nfs_write_rpcsetup(data
, desc
->pg_count
, 0, desc
->pg_ioflags
, &cinfo
);
1240 list_add(&data
->list
, &hdr
->rpc_list
);
1241 desc
->pg_rpc_callops
= &nfs_write_common_ops
;
1245 int nfs_generic_flush(struct nfs_pageio_descriptor
*desc
,
1246 struct nfs_pgio_header
*hdr
)
1248 if (desc
->pg_bsize
< PAGE_CACHE_SIZE
)
1249 return nfs_flush_multi(desc
, hdr
);
1250 return nfs_flush_one(desc
, hdr
);
1252 EXPORT_SYMBOL_GPL(nfs_generic_flush
);
1254 static int nfs_generic_pg_writepages(struct nfs_pageio_descriptor
*desc
)
1256 struct nfs_rw_header
*whdr
;
1257 struct nfs_pgio_header
*hdr
;
1260 whdr
= nfs_writehdr_alloc();
1262 desc
->pg_completion_ops
->error_cleanup(&desc
->pg_list
);
1265 hdr
= &whdr
->header
;
1266 nfs_pgheader_init(desc
, hdr
, nfs_writehdr_free
);
1267 atomic_inc(&hdr
->refcnt
);
1268 ret
= nfs_generic_flush(desc
, hdr
);
1270 ret
= nfs_do_multiple_writes(&hdr
->rpc_list
,
1271 desc
->pg_rpc_callops
,
1273 if (atomic_dec_and_test(&hdr
->refcnt
))
1274 hdr
->completion_ops
->completion(hdr
);
1278 static const struct nfs_pageio_ops nfs_pageio_write_ops
= {
1279 .pg_test
= nfs_generic_pg_test
,
1280 .pg_doio
= nfs_generic_pg_writepages
,
1283 void nfs_pageio_init_write(struct nfs_pageio_descriptor
*pgio
,
1284 struct inode
*inode
, int ioflags
, bool force_mds
,
1285 const struct nfs_pgio_completion_ops
*compl_ops
)
1287 struct nfs_server
*server
= NFS_SERVER(inode
);
1288 const struct nfs_pageio_ops
*pg_ops
= &nfs_pageio_write_ops
;
1290 #ifdef CONFIG_NFS_V4_1
1291 if (server
->pnfs_curr_ld
&& !force_mds
)
1292 pg_ops
= server
->pnfs_curr_ld
->pg_write_ops
;
1294 nfs_pageio_init(pgio
, inode
, pg_ops
, compl_ops
, server
->wsize
, ioflags
);
1296 EXPORT_SYMBOL_GPL(nfs_pageio_init_write
);
1298 void nfs_pageio_reset_write_mds(struct nfs_pageio_descriptor
*pgio
)
1300 pgio
->pg_ops
= &nfs_pageio_write_ops
;
1301 pgio
->pg_bsize
= NFS_SERVER(pgio
->pg_inode
)->wsize
;
1303 EXPORT_SYMBOL_GPL(nfs_pageio_reset_write_mds
);
1306 void nfs_write_prepare(struct rpc_task
*task
, void *calldata
)
1308 struct nfs_pgio_data
*data
= calldata
;
1310 err
= NFS_PROTO(data
->header
->inode
)->write_rpc_prepare(task
, data
);
1312 rpc_exit(task
, err
);
1315 void nfs_commit_prepare(struct rpc_task
*task
, void *calldata
)
1317 struct nfs_commit_data
*data
= calldata
;
1319 NFS_PROTO(data
->inode
)->commit_rpc_prepare(task
, data
);
1323 * Handle a write reply that flushes a whole page.
1325 * FIXME: There is an inherent race with invalidate_inode_pages and
1326 * writebacks since the page->count is kept > 1 for as long
1327 * as the page has a write request pending.
1329 static void nfs_writeback_done_common(struct rpc_task
*task
, void *calldata
)
1331 struct nfs_pgio_data
*data
= calldata
;
1333 nfs_writeback_done(task
, data
);
1336 static void nfs_writeback_release_common(void *calldata
)
1338 struct nfs_pgio_data
*data
= calldata
;
1339 struct nfs_pgio_header
*hdr
= data
->header
;
1340 int status
= data
->task
.tk_status
;
1342 if ((status
>= 0) && nfs_write_need_commit(data
)) {
1343 spin_lock(&hdr
->lock
);
1344 if (test_bit(NFS_IOHDR_NEED_RESCHED
, &hdr
->flags
))
1346 else if (!test_and_set_bit(NFS_IOHDR_NEED_COMMIT
, &hdr
->flags
))
1347 memcpy(hdr
->verf
, &data
->verf
, sizeof(*hdr
->verf
));
1348 else if (memcmp(hdr
->verf
, &data
->verf
, sizeof(*hdr
->verf
)))
1349 set_bit(NFS_IOHDR_NEED_RESCHED
, &hdr
->flags
);
1350 spin_unlock(&hdr
->lock
);
1352 nfs_writedata_release(data
);
1355 static const struct rpc_call_ops nfs_write_common_ops
= {
1356 .rpc_call_prepare
= nfs_write_prepare
,
1357 .rpc_call_done
= nfs_writeback_done_common
,
1358 .rpc_release
= nfs_writeback_release_common
,
1362 * Special version of should_remove_suid() that ignores capabilities.
1364 static int nfs_should_remove_suid(const struct inode
*inode
)
1366 umode_t mode
= inode
->i_mode
;
1369 /* suid always must be killed */
1370 if (unlikely(mode
& S_ISUID
))
1371 kill
= ATTR_KILL_SUID
;
1374 * sgid without any exec bits is just a mandatory locking mark; leave
1375 * it alone. If some exec bits are set, it's a real sgid; kill it.
1377 if (unlikely((mode
& S_ISGID
) && (mode
& S_IXGRP
)))
1378 kill
|= ATTR_KILL_SGID
;
1380 if (unlikely(kill
&& S_ISREG(mode
)))
1387 * This function is called when the WRITE call is complete.
1389 void nfs_writeback_done(struct rpc_task
*task
, struct nfs_pgio_data
*data
)
1391 struct nfs_pgio_args
*argp
= &data
->args
;
1392 struct nfs_pgio_res
*resp
= &data
->res
;
1393 struct inode
*inode
= data
->header
->inode
;
1396 dprintk("NFS: %5u nfs_writeback_done (status %d)\n",
1397 task
->tk_pid
, task
->tk_status
);
1400 * ->write_done will attempt to use post-op attributes to detect
1401 * conflicting writes by other clients. A strict interpretation
1402 * of close-to-open would allow us to continue caching even if
1403 * another writer had changed the file, but some applications
1404 * depend on tighter cache coherency when writing.
1406 status
= NFS_PROTO(inode
)->write_done(task
, data
);
1409 nfs_add_stats(inode
, NFSIOS_SERVERWRITTENBYTES
, resp
->count
);
1411 #if IS_ENABLED(CONFIG_NFS_V3) || IS_ENABLED(CONFIG_NFS_V4)
1412 if (resp
->verf
->committed
< argp
->stable
&& task
->tk_status
>= 0) {
1413 /* We tried a write call, but the server did not
1414 * commit data to stable storage even though we
1416 * Note: There is a known bug in Tru64 < 5.0 in which
1417 * the server reports NFS_DATA_SYNC, but performs
1418 * NFS_FILE_SYNC. We therefore implement this checking
1419 * as a dprintk() in order to avoid filling syslog.
1421 static unsigned long complain
;
1423 /* Note this will print the MDS for a DS write */
1424 if (time_before(complain
, jiffies
)) {
1425 dprintk("NFS: faulty NFS server %s:"
1426 " (committed = %d) != (stable = %d)\n",
1427 NFS_SERVER(inode
)->nfs_client
->cl_hostname
,
1428 resp
->verf
->committed
, argp
->stable
);
1429 complain
= jiffies
+ 300 * HZ
;
1433 if (task
->tk_status
< 0) {
1434 nfs_set_pgio_error(data
->header
, task
->tk_status
, argp
->offset
);
1438 /* Deal with the suid/sgid bit corner case */
1439 if (nfs_should_remove_suid(inode
))
1440 nfs_mark_for_revalidate(inode
);
1442 if (resp
->count
< argp
->count
) {
1443 static unsigned long complain
;
1445 /* This a short write! */
1446 nfs_inc_stats(inode
, NFSIOS_SHORTWRITE
);
1448 /* Has the server at least made some progress? */
1449 if (resp
->count
== 0) {
1450 if (time_before(complain
, jiffies
)) {
1452 "NFS: Server wrote zero bytes, expected %u.\n",
1454 complain
= jiffies
+ 300 * HZ
;
1456 nfs_set_pgio_error(data
->header
, -EIO
, argp
->offset
);
1457 task
->tk_status
= -EIO
;
1460 /* Was this an NFSv2 write or an NFSv3 stable write? */
1461 if (resp
->verf
->committed
!= NFS_UNSTABLE
) {
1462 /* Resend from where the server left off */
1463 data
->mds_offset
+= resp
->count
;
1464 argp
->offset
+= resp
->count
;
1465 argp
->pgbase
+= resp
->count
;
1466 argp
->count
-= resp
->count
;
1468 /* Resend as a stable write in order to avoid
1469 * headaches in the case of a server crash.
1471 argp
->stable
= NFS_FILE_SYNC
;
1473 rpc_restart_call_prepare(task
);
1478 #if IS_ENABLED(CONFIG_NFS_V3) || IS_ENABLED(CONFIG_NFS_V4)
1479 static int nfs_commit_set_lock(struct nfs_inode
*nfsi
, int may_wait
)
1483 if (!test_and_set_bit(NFS_INO_COMMIT
, &nfsi
->flags
))
1487 ret
= out_of_line_wait_on_bit_lock(&nfsi
->flags
,
1489 nfs_wait_bit_killable
,
1491 return (ret
< 0) ? ret
: 1;
1494 static void nfs_commit_clear_lock(struct nfs_inode
*nfsi
)
1496 clear_bit(NFS_INO_COMMIT
, &nfsi
->flags
);
1497 smp_mb__after_clear_bit();
1498 wake_up_bit(&nfsi
->flags
, NFS_INO_COMMIT
);
1501 void nfs_commitdata_release(struct nfs_commit_data
*data
)
1503 put_nfs_open_context(data
->context
);
1504 nfs_commit_free(data
);
1506 EXPORT_SYMBOL_GPL(nfs_commitdata_release
);
1508 int nfs_initiate_commit(struct rpc_clnt
*clnt
, struct nfs_commit_data
*data
,
1509 const struct rpc_call_ops
*call_ops
,
1512 struct rpc_task
*task
;
1513 int priority
= flush_task_priority(how
);
1514 struct rpc_message msg
= {
1515 .rpc_argp
= &data
->args
,
1516 .rpc_resp
= &data
->res
,
1517 .rpc_cred
= data
->cred
,
1519 struct rpc_task_setup task_setup_data
= {
1520 .task
= &data
->task
,
1522 .rpc_message
= &msg
,
1523 .callback_ops
= call_ops
,
1524 .callback_data
= data
,
1525 .workqueue
= nfsiod_workqueue
,
1526 .flags
= RPC_TASK_ASYNC
| flags
,
1527 .priority
= priority
,
1529 /* Set up the initial task struct. */
1530 NFS_PROTO(data
->inode
)->commit_setup(data
, &msg
);
1532 dprintk("NFS: %5u initiated commit call\n", data
->task
.tk_pid
);
1534 nfs4_state_protect(NFS_SERVER(data
->inode
)->nfs_client
,
1535 NFS_SP4_MACH_CRED_COMMIT
, &task_setup_data
.rpc_client
, &msg
);
1537 task
= rpc_run_task(&task_setup_data
);
1539 return PTR_ERR(task
);
1540 if (how
& FLUSH_SYNC
)
1541 rpc_wait_for_completion_task(task
);
1545 EXPORT_SYMBOL_GPL(nfs_initiate_commit
);
1548 * Set up the argument/result storage required for the RPC call.
1550 void nfs_init_commit(struct nfs_commit_data
*data
,
1551 struct list_head
*head
,
1552 struct pnfs_layout_segment
*lseg
,
1553 struct nfs_commit_info
*cinfo
)
1555 struct nfs_page
*first
= nfs_list_entry(head
->next
);
1556 struct inode
*inode
= first
->wb_context
->dentry
->d_inode
;
1558 /* Set up the RPC argument and reply structs
1559 * NB: take care not to mess about with data->commit et al. */
1561 list_splice_init(head
, &data
->pages
);
1563 data
->inode
= inode
;
1564 data
->cred
= first
->wb_context
->cred
;
1565 data
->lseg
= lseg
; /* reference transferred */
1566 data
->mds_ops
= &nfs_commit_ops
;
1567 data
->completion_ops
= cinfo
->completion_ops
;
1568 data
->dreq
= cinfo
->dreq
;
1570 data
->args
.fh
= NFS_FH(data
->inode
);
1571 /* Note: we always request a commit of the entire inode */
1572 data
->args
.offset
= 0;
1573 data
->args
.count
= 0;
1574 data
->context
= get_nfs_open_context(first
->wb_context
);
1575 data
->res
.fattr
= &data
->fattr
;
1576 data
->res
.verf
= &data
->verf
;
1577 nfs_fattr_init(&data
->fattr
);
1579 EXPORT_SYMBOL_GPL(nfs_init_commit
);
1581 void nfs_retry_commit(struct list_head
*page_list
,
1582 struct pnfs_layout_segment
*lseg
,
1583 struct nfs_commit_info
*cinfo
)
1585 struct nfs_page
*req
;
1587 while (!list_empty(page_list
)) {
1588 req
= nfs_list_entry(page_list
->next
);
1589 nfs_list_remove_request(req
);
1590 nfs_mark_request_commit(req
, lseg
, cinfo
);
1592 dec_zone_page_state(req
->wb_page
, NR_UNSTABLE_NFS
);
1593 dec_bdi_stat(page_file_mapping(req
->wb_page
)->backing_dev_info
,
1596 nfs_unlock_and_release_request(req
);
1599 EXPORT_SYMBOL_GPL(nfs_retry_commit
);
1602 * Commit dirty pages
1605 nfs_commit_list(struct inode
*inode
, struct list_head
*head
, int how
,
1606 struct nfs_commit_info
*cinfo
)
1608 struct nfs_commit_data
*data
;
1610 data
= nfs_commitdata_alloc();
1615 /* Set up the argument struct */
1616 nfs_init_commit(data
, head
, NULL
, cinfo
);
1617 atomic_inc(&cinfo
->mds
->rpcs_out
);
1618 return nfs_initiate_commit(NFS_CLIENT(inode
), data
, data
->mds_ops
,
1621 nfs_retry_commit(head
, NULL
, cinfo
);
1622 cinfo
->completion_ops
->error_cleanup(NFS_I(inode
));
1627 * COMMIT call returned
1629 static void nfs_commit_done(struct rpc_task
*task
, void *calldata
)
1631 struct nfs_commit_data
*data
= calldata
;
1633 dprintk("NFS: %5u nfs_commit_done (status %d)\n",
1634 task
->tk_pid
, task
->tk_status
);
1636 /* Call the NFS version-specific code */
1637 NFS_PROTO(data
->inode
)->commit_done(task
, data
);
1640 static void nfs_commit_release_pages(struct nfs_commit_data
*data
)
1642 struct nfs_page
*req
;
1643 int status
= data
->task
.tk_status
;
1644 struct nfs_commit_info cinfo
;
1646 while (!list_empty(&data
->pages
)) {
1647 req
= nfs_list_entry(data
->pages
.next
);
1648 nfs_list_remove_request(req
);
1649 nfs_clear_page_commit(req
->wb_page
);
1651 dprintk("NFS: commit (%s/%llu %d@%lld)",
1652 req
->wb_context
->dentry
->d_sb
->s_id
,
1653 (unsigned long long)NFS_FILEID(req
->wb_context
->dentry
->d_inode
),
1655 (long long)req_offset(req
));
1657 nfs_context_set_write_error(req
->wb_context
, status
);
1658 nfs_inode_remove_request(req
);
1659 dprintk(", error = %d\n", status
);
1663 /* Okay, COMMIT succeeded, apparently. Check the verifier
1664 * returned by the server against all stored verfs. */
1665 if (!memcmp(&req
->wb_verf
, &data
->verf
.verifier
, sizeof(req
->wb_verf
))) {
1666 /* We have a match */
1667 nfs_inode_remove_request(req
);
1671 /* We have a mismatch. Write the page again */
1672 dprintk(" mismatch\n");
1673 nfs_mark_request_dirty(req
);
1674 set_bit(NFS_CONTEXT_RESEND_WRITES
, &req
->wb_context
->flags
);
1676 nfs_unlock_and_release_request(req
);
1678 nfs_init_cinfo(&cinfo
, data
->inode
, data
->dreq
);
1679 if (atomic_dec_and_test(&cinfo
.mds
->rpcs_out
))
1680 nfs_commit_clear_lock(NFS_I(data
->inode
));
1683 static void nfs_commit_release(void *calldata
)
1685 struct nfs_commit_data
*data
= calldata
;
1687 data
->completion_ops
->completion(data
);
1688 nfs_commitdata_release(calldata
);
1691 static const struct rpc_call_ops nfs_commit_ops
= {
1692 .rpc_call_prepare
= nfs_commit_prepare
,
1693 .rpc_call_done
= nfs_commit_done
,
1694 .rpc_release
= nfs_commit_release
,
1697 static const struct nfs_commit_completion_ops nfs_commit_completion_ops
= {
1698 .completion
= nfs_commit_release_pages
,
1699 .error_cleanup
= nfs_commit_clear_lock
,
1702 int nfs_generic_commit_list(struct inode
*inode
, struct list_head
*head
,
1703 int how
, struct nfs_commit_info
*cinfo
)
1707 status
= pnfs_commit_list(inode
, head
, how
, cinfo
);
1708 if (status
== PNFS_NOT_ATTEMPTED
)
1709 status
= nfs_commit_list(inode
, head
, how
, cinfo
);
1713 int nfs_commit_inode(struct inode
*inode
, int how
)
1716 struct nfs_commit_info cinfo
;
1717 int may_wait
= how
& FLUSH_SYNC
;
1720 res
= nfs_commit_set_lock(NFS_I(inode
), may_wait
);
1722 goto out_mark_dirty
;
1723 nfs_init_cinfo_from_inode(&cinfo
, inode
);
1724 res
= nfs_scan_commit(inode
, &head
, &cinfo
);
1728 error
= nfs_generic_commit_list(inode
, &head
, how
, &cinfo
);
1732 goto out_mark_dirty
;
1733 error
= wait_on_bit(&NFS_I(inode
)->flags
,
1735 nfs_wait_bit_killable
,
1740 nfs_commit_clear_lock(NFS_I(inode
));
1742 /* Note: If we exit without ensuring that the commit is complete,
1743 * we must mark the inode as dirty. Otherwise, future calls to
1744 * sync_inode() with the WB_SYNC_ALL flag set will fail to ensure
1745 * that the data is on the disk.
1748 __mark_inode_dirty(inode
, I_DIRTY_DATASYNC
);
1752 static int nfs_commit_unstable_pages(struct inode
*inode
, struct writeback_control
*wbc
)
1754 struct nfs_inode
*nfsi
= NFS_I(inode
);
1755 int flags
= FLUSH_SYNC
;
1758 /* no commits means nothing needs to be done */
1759 if (!nfsi
->commit_info
.ncommit
)
1762 if (wbc
->sync_mode
== WB_SYNC_NONE
) {
1763 /* Don't commit yet if this is a non-blocking flush and there
1764 * are a lot of outstanding writes for this mapping.
1766 if (nfsi
->commit_info
.ncommit
<= (nfsi
->npages
>> 1))
1767 goto out_mark_dirty
;
1769 /* don't wait for the COMMIT response */
1773 ret
= nfs_commit_inode(inode
, flags
);
1775 if (wbc
->sync_mode
== WB_SYNC_NONE
) {
1776 if (ret
< wbc
->nr_to_write
)
1777 wbc
->nr_to_write
-= ret
;
1779 wbc
->nr_to_write
= 0;
1784 __mark_inode_dirty(inode
, I_DIRTY_DATASYNC
);
1788 static int nfs_commit_unstable_pages(struct inode
*inode
, struct writeback_control
*wbc
)
1794 int nfs_write_inode(struct inode
*inode
, struct writeback_control
*wbc
)
1796 return nfs_commit_unstable_pages(inode
, wbc
);
1798 EXPORT_SYMBOL_GPL(nfs_write_inode
);
1801 * flush the inode to disk.
1803 int nfs_wb_all(struct inode
*inode
)
1805 struct writeback_control wbc
= {
1806 .sync_mode
= WB_SYNC_ALL
,
1807 .nr_to_write
= LONG_MAX
,
1809 .range_end
= LLONG_MAX
,
1813 trace_nfs_writeback_inode_enter(inode
);
1815 ret
= sync_inode(inode
, &wbc
);
1817 trace_nfs_writeback_inode_exit(inode
, ret
);
1820 EXPORT_SYMBOL_GPL(nfs_wb_all
);
1822 int nfs_wb_page_cancel(struct inode
*inode
, struct page
*page
)
1824 struct nfs_page
*req
;
1828 wait_on_page_writeback(page
);
1829 req
= nfs_page_find_request(page
);
1832 if (nfs_lock_request(req
)) {
1833 nfs_clear_request_commit(req
);
1834 nfs_inode_remove_request(req
);
1836 * In case nfs_inode_remove_request has marked the
1837 * page as being dirty
1839 cancel_dirty_page(page
, PAGE_CACHE_SIZE
);
1840 nfs_unlock_and_release_request(req
);
1843 ret
= nfs_wait_on_request(req
);
1844 nfs_release_request(req
);
1852 * Write back all requests on one page - we do this before reading it.
1854 int nfs_wb_page(struct inode
*inode
, struct page
*page
)
1856 loff_t range_start
= page_file_offset(page
);
1857 loff_t range_end
= range_start
+ (loff_t
)(PAGE_CACHE_SIZE
- 1);
1858 struct writeback_control wbc
= {
1859 .sync_mode
= WB_SYNC_ALL
,
1861 .range_start
= range_start
,
1862 .range_end
= range_end
,
1866 trace_nfs_writeback_page_enter(inode
);
1869 wait_on_page_writeback(page
);
1870 if (clear_page_dirty_for_io(page
)) {
1871 ret
= nfs_writepage_locked(page
, &wbc
);
1877 if (!PagePrivate(page
))
1879 ret
= nfs_commit_inode(inode
, FLUSH_SYNC
);
1884 trace_nfs_writeback_page_exit(inode
, ret
);
1888 #ifdef CONFIG_MIGRATION
1889 int nfs_migrate_page(struct address_space
*mapping
, struct page
*newpage
,
1890 struct page
*page
, enum migrate_mode mode
)
1893 * If PagePrivate is set, then the page is currently associated with
1894 * an in-progress read or write request. Don't try to migrate it.
1896 * FIXME: we could do this in principle, but we'll need a way to ensure
1897 * that we can safely release the inode reference while holding
1900 if (PagePrivate(page
))
1903 if (!nfs_fscache_release_page(page
, GFP_KERNEL
))
1906 return migrate_page(mapping
, newpage
, page
, mode
);
1910 int __init
nfs_init_writepagecache(void)
1912 nfs_wdata_cachep
= kmem_cache_create("nfs_write_data",
1913 sizeof(struct nfs_rw_header
),
1914 0, SLAB_HWCACHE_ALIGN
,
1916 if (nfs_wdata_cachep
== NULL
)
1919 nfs_wdata_mempool
= mempool_create_slab_pool(MIN_POOL_WRITE
,
1921 if (nfs_wdata_mempool
== NULL
)
1922 goto out_destroy_write_cache
;
1924 nfs_cdata_cachep
= kmem_cache_create("nfs_commit_data",
1925 sizeof(struct nfs_commit_data
),
1926 0, SLAB_HWCACHE_ALIGN
,
1928 if (nfs_cdata_cachep
== NULL
)
1929 goto out_destroy_write_mempool
;
1931 nfs_commit_mempool
= mempool_create_slab_pool(MIN_POOL_COMMIT
,
1933 if (nfs_commit_mempool
== NULL
)
1934 goto out_destroy_commit_cache
;
1937 * NFS congestion size, scale with available memory.
1949 * This allows larger machines to have larger/more transfers.
1950 * Limit the default to 256M
1952 nfs_congestion_kb
= (16*int_sqrt(totalram_pages
)) << (PAGE_SHIFT
-10);
1953 if (nfs_congestion_kb
> 256*1024)
1954 nfs_congestion_kb
= 256*1024;
1958 out_destroy_commit_cache
:
1959 kmem_cache_destroy(nfs_cdata_cachep
);
1960 out_destroy_write_mempool
:
1961 mempool_destroy(nfs_wdata_mempool
);
1962 out_destroy_write_cache
:
1963 kmem_cache_destroy(nfs_wdata_cachep
);
1967 void nfs_destroy_writepagecache(void)
1969 mempool_destroy(nfs_commit_mempool
);
1970 kmem_cache_destroy(nfs_cdata_cachep
);
1971 mempool_destroy(nfs_wdata_mempool
);
1972 kmem_cache_destroy(nfs_wdata_cachep
);