]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blob - fs/nfs/write.c
NFS: Fix up documentation warnings
[mirror_ubuntu-hirsute-kernel.git] / fs / nfs / write.c
1 /*
2 * linux/fs/nfs/write.c
3 *
4 * Write file data over NFS.
5 *
6 * Copyright (C) 1996, 1997, Olaf Kirch <okir@monad.swb.de>
7 */
8
9 #include <linux/types.h>
10 #include <linux/slab.h>
11 #include <linux/mm.h>
12 #include <linux/pagemap.h>
13 #include <linux/file.h>
14 #include <linux/writeback.h>
15 #include <linux/swap.h>
16 #include <linux/migrate.h>
17
18 #include <linux/sunrpc/clnt.h>
19 #include <linux/nfs_fs.h>
20 #include <linux/nfs_mount.h>
21 #include <linux/nfs_page.h>
22 #include <linux/backing-dev.h>
23 #include <linux/export.h>
24 #include <linux/freezer.h>
25 #include <linux/wait.h>
26 #include <linux/iversion.h>
27
28 #include <linux/uaccess.h>
29 #include <linux/sched/mm.h>
30
31 #include "delegation.h"
32 #include "internal.h"
33 #include "iostat.h"
34 #include "nfs4_fs.h"
35 #include "fscache.h"
36 #include "pnfs.h"
37
38 #include "nfstrace.h"
39
40 #define NFSDBG_FACILITY NFSDBG_PAGECACHE
41
42 #define MIN_POOL_WRITE (32)
43 #define MIN_POOL_COMMIT (4)
44
45 struct nfs_io_completion {
46 void (*complete)(void *data);
47 void *data;
48 struct kref refcount;
49 };
50
51 /*
52 * Local function declarations
53 */
54 static void nfs_redirty_request(struct nfs_page *req);
55 static const struct rpc_call_ops nfs_commit_ops;
56 static const struct nfs_pgio_completion_ops nfs_async_write_completion_ops;
57 static const struct nfs_commit_completion_ops nfs_commit_completion_ops;
58 static const struct nfs_rw_ops nfs_rw_write_ops;
59 static void nfs_clear_request_commit(struct nfs_page *req);
60 static void nfs_init_cinfo_from_inode(struct nfs_commit_info *cinfo,
61 struct inode *inode);
62 static struct nfs_page *
63 nfs_page_search_commits_for_head_request_locked(struct nfs_inode *nfsi,
64 struct page *page);
65
66 static struct kmem_cache *nfs_wdata_cachep;
67 static mempool_t *nfs_wdata_mempool;
68 static struct kmem_cache *nfs_cdata_cachep;
69 static mempool_t *nfs_commit_mempool;
70
71 struct nfs_commit_data *nfs_commitdata_alloc(bool never_fail)
72 {
73 struct nfs_commit_data *p;
74
75 if (never_fail)
76 p = mempool_alloc(nfs_commit_mempool, GFP_NOIO);
77 else {
78 /* It is OK to do some reclaim, not no safe to wait
79 * for anything to be returned to the pool.
80 * mempool_alloc() cannot handle that particular combination,
81 * so we need two separate attempts.
82 */
83 p = mempool_alloc(nfs_commit_mempool, GFP_NOWAIT);
84 if (!p)
85 p = kmem_cache_alloc(nfs_cdata_cachep, GFP_NOIO |
86 __GFP_NOWARN | __GFP_NORETRY);
87 if (!p)
88 return NULL;
89 }
90
91 memset(p, 0, sizeof(*p));
92 INIT_LIST_HEAD(&p->pages);
93 return p;
94 }
95 EXPORT_SYMBOL_GPL(nfs_commitdata_alloc);
96
97 void nfs_commit_free(struct nfs_commit_data *p)
98 {
99 mempool_free(p, nfs_commit_mempool);
100 }
101 EXPORT_SYMBOL_GPL(nfs_commit_free);
102
103 static struct nfs_pgio_header *nfs_writehdr_alloc(void)
104 {
105 struct nfs_pgio_header *p = mempool_alloc(nfs_wdata_mempool, GFP_NOIO);
106
107 memset(p, 0, sizeof(*p));
108 p->rw_mode = FMODE_WRITE;
109 return p;
110 }
111
112 static void nfs_writehdr_free(struct nfs_pgio_header *hdr)
113 {
114 mempool_free(hdr, nfs_wdata_mempool);
115 }
116
117 static struct nfs_io_completion *nfs_io_completion_alloc(gfp_t gfp_flags)
118 {
119 return kmalloc(sizeof(struct nfs_io_completion), gfp_flags);
120 }
121
122 static void nfs_io_completion_init(struct nfs_io_completion *ioc,
123 void (*complete)(void *), void *data)
124 {
125 ioc->complete = complete;
126 ioc->data = data;
127 kref_init(&ioc->refcount);
128 }
129
130 static void nfs_io_completion_release(struct kref *kref)
131 {
132 struct nfs_io_completion *ioc = container_of(kref,
133 struct nfs_io_completion, refcount);
134 ioc->complete(ioc->data);
135 kfree(ioc);
136 }
137
138 static void nfs_io_completion_get(struct nfs_io_completion *ioc)
139 {
140 if (ioc != NULL)
141 kref_get(&ioc->refcount);
142 }
143
144 static void nfs_io_completion_put(struct nfs_io_completion *ioc)
145 {
146 if (ioc != NULL)
147 kref_put(&ioc->refcount, nfs_io_completion_release);
148 }
149
150 static struct nfs_page *
151 nfs_page_private_request(struct page *page)
152 {
153 if (!PagePrivate(page))
154 return NULL;
155 return (struct nfs_page *)page_private(page);
156 }
157
158 /*
159 * nfs_page_find_head_request_locked - find head request associated with @page
160 *
161 * must be called while holding the inode lock.
162 *
163 * returns matching head request with reference held, or NULL if not found.
164 */
165 static struct nfs_page *
166 nfs_page_find_private_request(struct page *page)
167 {
168 struct address_space *mapping = page_file_mapping(page);
169 struct nfs_page *req;
170
171 if (!PagePrivate(page))
172 return NULL;
173 spin_lock(&mapping->private_lock);
174 req = nfs_page_private_request(page);
175 if (req) {
176 WARN_ON_ONCE(req->wb_head != req);
177 kref_get(&req->wb_kref);
178 }
179 spin_unlock(&mapping->private_lock);
180 return req;
181 }
182
183 static struct nfs_page *
184 nfs_page_find_swap_request(struct page *page)
185 {
186 struct inode *inode = page_file_mapping(page)->host;
187 struct nfs_inode *nfsi = NFS_I(inode);
188 struct nfs_page *req = NULL;
189 if (!PageSwapCache(page))
190 return NULL;
191 mutex_lock(&nfsi->commit_mutex);
192 if (PageSwapCache(page)) {
193 req = nfs_page_search_commits_for_head_request_locked(nfsi,
194 page);
195 if (req) {
196 WARN_ON_ONCE(req->wb_head != req);
197 kref_get(&req->wb_kref);
198 }
199 }
200 mutex_unlock(&nfsi->commit_mutex);
201 return req;
202 }
203
204 /*
205 * nfs_page_find_head_request - find head request associated with @page
206 *
207 * returns matching head request with reference held, or NULL if not found.
208 */
209 static struct nfs_page *nfs_page_find_head_request(struct page *page)
210 {
211 struct nfs_page *req;
212
213 req = nfs_page_find_private_request(page);
214 if (!req)
215 req = nfs_page_find_swap_request(page);
216 return req;
217 }
218
219 /* Adjust the file length if we're writing beyond the end */
220 static void nfs_grow_file(struct page *page, unsigned int offset, unsigned int count)
221 {
222 struct inode *inode = page_file_mapping(page)->host;
223 loff_t end, i_size;
224 pgoff_t end_index;
225
226 spin_lock(&inode->i_lock);
227 i_size = i_size_read(inode);
228 end_index = (i_size - 1) >> PAGE_SHIFT;
229 if (i_size > 0 && page_index(page) < end_index)
230 goto out;
231 end = page_file_offset(page) + ((loff_t)offset+count);
232 if (i_size >= end)
233 goto out;
234 i_size_write(inode, end);
235 NFS_I(inode)->cache_validity &= ~NFS_INO_INVALID_SIZE;
236 nfs_inc_stats(inode, NFSIOS_EXTENDWRITE);
237 out:
238 spin_unlock(&inode->i_lock);
239 }
240
241 /* A writeback failed: mark the page as bad, and invalidate the page cache */
242 static void nfs_set_pageerror(struct address_space *mapping)
243 {
244 nfs_zap_mapping(mapping->host, mapping);
245 }
246
247 /*
248 * nfs_page_group_search_locked
249 * @head - head request of page group
250 * @page_offset - offset into page
251 *
252 * Search page group with head @head to find a request that contains the
253 * page offset @page_offset.
254 *
255 * Returns a pointer to the first matching nfs request, or NULL if no
256 * match is found.
257 *
258 * Must be called with the page group lock held
259 */
260 static struct nfs_page *
261 nfs_page_group_search_locked(struct nfs_page *head, unsigned int page_offset)
262 {
263 struct nfs_page *req;
264
265 req = head;
266 do {
267 if (page_offset >= req->wb_pgbase &&
268 page_offset < (req->wb_pgbase + req->wb_bytes))
269 return req;
270
271 req = req->wb_this_page;
272 } while (req != head);
273
274 return NULL;
275 }
276
277 /*
278 * nfs_page_group_covers_page
279 * @head - head request of page group
280 *
281 * Return true if the page group with head @head covers the whole page,
282 * returns false otherwise
283 */
284 static bool nfs_page_group_covers_page(struct nfs_page *req)
285 {
286 struct nfs_page *tmp;
287 unsigned int pos = 0;
288 unsigned int len = nfs_page_length(req->wb_page);
289
290 nfs_page_group_lock(req);
291
292 for (;;) {
293 tmp = nfs_page_group_search_locked(req->wb_head, pos);
294 if (!tmp)
295 break;
296 pos = tmp->wb_pgbase + tmp->wb_bytes;
297 }
298
299 nfs_page_group_unlock(req);
300 return pos >= len;
301 }
302
303 /* We can set the PG_uptodate flag if we see that a write request
304 * covers the full page.
305 */
306 static void nfs_mark_uptodate(struct nfs_page *req)
307 {
308 if (PageUptodate(req->wb_page))
309 return;
310 if (!nfs_page_group_covers_page(req))
311 return;
312 SetPageUptodate(req->wb_page);
313 }
314
315 static int wb_priority(struct writeback_control *wbc)
316 {
317 int ret = 0;
318
319 if (wbc->sync_mode == WB_SYNC_ALL)
320 ret = FLUSH_COND_STABLE;
321 return ret;
322 }
323
324 /*
325 * NFS congestion control
326 */
327
328 int nfs_congestion_kb;
329
330 #define NFS_CONGESTION_ON_THRESH (nfs_congestion_kb >> (PAGE_SHIFT-10))
331 #define NFS_CONGESTION_OFF_THRESH \
332 (NFS_CONGESTION_ON_THRESH - (NFS_CONGESTION_ON_THRESH >> 2))
333
334 static void nfs_set_page_writeback(struct page *page)
335 {
336 struct inode *inode = page_file_mapping(page)->host;
337 struct nfs_server *nfss = NFS_SERVER(inode);
338 int ret = test_set_page_writeback(page);
339
340 WARN_ON_ONCE(ret != 0);
341
342 if (atomic_long_inc_return(&nfss->writeback) >
343 NFS_CONGESTION_ON_THRESH)
344 set_bdi_congested(inode_to_bdi(inode), BLK_RW_ASYNC);
345 }
346
347 static void nfs_end_page_writeback(struct nfs_page *req)
348 {
349 struct inode *inode = page_file_mapping(req->wb_page)->host;
350 struct nfs_server *nfss = NFS_SERVER(inode);
351 bool is_done;
352
353 is_done = nfs_page_group_sync_on_bit(req, PG_WB_END);
354 nfs_unlock_request(req);
355 if (!is_done)
356 return;
357
358 end_page_writeback(req->wb_page);
359 if (atomic_long_dec_return(&nfss->writeback) < NFS_CONGESTION_OFF_THRESH)
360 clear_bdi_congested(inode_to_bdi(inode), BLK_RW_ASYNC);
361 }
362
363 /*
364 * nfs_unroll_locks_and_wait - unlock all newly locked reqs and wait on @req
365 *
366 * this is a helper function for nfs_lock_and_join_requests
367 *
368 * @inode - inode associated with request page group, must be holding inode lock
369 * @head - head request of page group, must be holding head lock
370 * @req - request that couldn't lock and needs to wait on the req bit lock
371 *
372 * NOTE: this must be called holding page_group bit lock
373 * which will be released before returning.
374 *
375 * returns 0 on success, < 0 on error.
376 */
377 static void
378 nfs_unroll_locks(struct inode *inode, struct nfs_page *head,
379 struct nfs_page *req)
380 {
381 struct nfs_page *tmp;
382
383 /* relinquish all the locks successfully grabbed this run */
384 for (tmp = head->wb_this_page ; tmp != req; tmp = tmp->wb_this_page) {
385 if (!kref_read(&tmp->wb_kref))
386 continue;
387 nfs_unlock_and_release_request(tmp);
388 }
389 }
390
391 /*
392 * nfs_destroy_unlinked_subrequests - destroy recently unlinked subrequests
393 *
394 * @destroy_list - request list (using wb_this_page) terminated by @old_head
395 * @old_head - the old head of the list
396 *
397 * All subrequests must be locked and removed from all lists, so at this point
398 * they are only "active" in this function, and possibly in nfs_wait_on_request
399 * with a reference held by some other context.
400 */
401 static void
402 nfs_destroy_unlinked_subrequests(struct nfs_page *destroy_list,
403 struct nfs_page *old_head,
404 struct inode *inode)
405 {
406 while (destroy_list) {
407 struct nfs_page *subreq = destroy_list;
408
409 destroy_list = (subreq->wb_this_page == old_head) ?
410 NULL : subreq->wb_this_page;
411
412 WARN_ON_ONCE(old_head != subreq->wb_head);
413
414 /* make sure old group is not used */
415 subreq->wb_this_page = subreq;
416
417 clear_bit(PG_REMOVE, &subreq->wb_flags);
418
419 /* Note: races with nfs_page_group_destroy() */
420 if (!kref_read(&subreq->wb_kref)) {
421 /* Check if we raced with nfs_page_group_destroy() */
422 if (test_and_clear_bit(PG_TEARDOWN, &subreq->wb_flags))
423 nfs_free_request(subreq);
424 continue;
425 }
426
427 subreq->wb_head = subreq;
428
429 if (test_and_clear_bit(PG_INODE_REF, &subreq->wb_flags)) {
430 nfs_release_request(subreq);
431 atomic_long_dec(&NFS_I(inode)->nrequests);
432 }
433
434 /* subreq is now totally disconnected from page group or any
435 * write / commit lists. last chance to wake any waiters */
436 nfs_unlock_and_release_request(subreq);
437 }
438 }
439
440 /*
441 * nfs_lock_and_join_requests - join all subreqs to the head req and return
442 * a locked reference, cancelling any pending
443 * operations for this page.
444 *
445 * @page - the page used to lookup the "page group" of nfs_page structures
446 *
447 * This function joins all sub requests to the head request by first
448 * locking all requests in the group, cancelling any pending operations
449 * and finally updating the head request to cover the whole range covered by
450 * the (former) group. All subrequests are removed from any write or commit
451 * lists, unlinked from the group and destroyed.
452 *
453 * Returns a locked, referenced pointer to the head request - which after
454 * this call is guaranteed to be the only request associated with the page.
455 * Returns NULL if no requests are found for @page, or a ERR_PTR if an
456 * error was encountered.
457 */
458 static struct nfs_page *
459 nfs_lock_and_join_requests(struct page *page)
460 {
461 struct inode *inode = page_file_mapping(page)->host;
462 struct nfs_page *head, *subreq;
463 struct nfs_page *destroy_list = NULL;
464 unsigned int total_bytes;
465 int ret;
466
467 try_again:
468 /*
469 * A reference is taken only on the head request which acts as a
470 * reference to the whole page group - the group will not be destroyed
471 * until the head reference is released.
472 */
473 head = nfs_page_find_head_request(page);
474 if (!head)
475 return NULL;
476
477 /* lock the page head first in order to avoid an ABBA inefficiency */
478 if (!nfs_lock_request(head)) {
479 ret = nfs_wait_on_request(head);
480 nfs_release_request(head);
481 if (ret < 0)
482 return ERR_PTR(ret);
483 goto try_again;
484 }
485
486 /* Ensure that nobody removed the request before we locked it */
487 if (head != nfs_page_private_request(page) && !PageSwapCache(page)) {
488 nfs_unlock_and_release_request(head);
489 goto try_again;
490 }
491
492 ret = nfs_page_group_lock(head);
493 if (ret < 0)
494 goto release_request;
495
496 /* lock each request in the page group */
497 total_bytes = head->wb_bytes;
498 for (subreq = head->wb_this_page; subreq != head;
499 subreq = subreq->wb_this_page) {
500
501 if (!kref_get_unless_zero(&subreq->wb_kref)) {
502 if (subreq->wb_offset == head->wb_offset + total_bytes)
503 total_bytes += subreq->wb_bytes;
504 continue;
505 }
506
507 while (!nfs_lock_request(subreq)) {
508 /*
509 * Unlock page to allow nfs_page_group_sync_on_bit()
510 * to succeed
511 */
512 nfs_page_group_unlock(head);
513 ret = nfs_wait_on_request(subreq);
514 if (!ret)
515 ret = nfs_page_group_lock(head);
516 if (ret < 0) {
517 nfs_unroll_locks(inode, head, subreq);
518 nfs_release_request(subreq);
519 goto release_request;
520 }
521 }
522 /*
523 * Subrequests are always contiguous, non overlapping
524 * and in order - but may be repeated (mirrored writes).
525 */
526 if (subreq->wb_offset == (head->wb_offset + total_bytes)) {
527 /* keep track of how many bytes this group covers */
528 total_bytes += subreq->wb_bytes;
529 } else if (WARN_ON_ONCE(subreq->wb_offset < head->wb_offset ||
530 ((subreq->wb_offset + subreq->wb_bytes) >
531 (head->wb_offset + total_bytes)))) {
532 nfs_page_group_unlock(head);
533 nfs_unroll_locks(inode, head, subreq);
534 nfs_unlock_and_release_request(subreq);
535 ret = -EIO;
536 goto release_request;
537 }
538 }
539
540 /* Now that all requests are locked, make sure they aren't on any list.
541 * Commit list removal accounting is done after locks are dropped */
542 subreq = head;
543 do {
544 nfs_clear_request_commit(subreq);
545 subreq = subreq->wb_this_page;
546 } while (subreq != head);
547
548 /* unlink subrequests from head, destroy them later */
549 if (head->wb_this_page != head) {
550 /* destroy list will be terminated by head */
551 destroy_list = head->wb_this_page;
552 head->wb_this_page = head;
553
554 /* change head request to cover whole range that
555 * the former page group covered */
556 head->wb_bytes = total_bytes;
557 }
558
559 /* Postpone destruction of this request */
560 if (test_and_clear_bit(PG_REMOVE, &head->wb_flags)) {
561 set_bit(PG_INODE_REF, &head->wb_flags);
562 kref_get(&head->wb_kref);
563 atomic_long_inc(&NFS_I(inode)->nrequests);
564 }
565
566 nfs_page_group_unlock(head);
567
568 nfs_destroy_unlinked_subrequests(destroy_list, head, inode);
569
570 /* Did we lose a race with nfs_inode_remove_request()? */
571 if (!(PagePrivate(page) || PageSwapCache(page))) {
572 nfs_unlock_and_release_request(head);
573 return NULL;
574 }
575
576 /* still holds ref on head from nfs_page_find_head_request
577 * and still has lock on head from lock loop */
578 return head;
579
580 release_request:
581 nfs_unlock_and_release_request(head);
582 return ERR_PTR(ret);
583 }
584
585 static void nfs_write_error_remove_page(struct nfs_page *req)
586 {
587 nfs_end_page_writeback(req);
588 generic_error_remove_page(page_file_mapping(req->wb_page),
589 req->wb_page);
590 nfs_release_request(req);
591 }
592
593 static bool
594 nfs_error_is_fatal_on_server(int err)
595 {
596 switch (err) {
597 case 0:
598 case -ERESTARTSYS:
599 case -EINTR:
600 return false;
601 }
602 return nfs_error_is_fatal(err);
603 }
604
605 /*
606 * Find an associated nfs write request, and prepare to flush it out
607 * May return an error if the user signalled nfs_wait_on_request().
608 */
609 static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio,
610 struct page *page)
611 {
612 struct nfs_page *req;
613 int ret = 0;
614
615 req = nfs_lock_and_join_requests(page);
616 if (!req)
617 goto out;
618 ret = PTR_ERR(req);
619 if (IS_ERR(req))
620 goto out;
621
622 nfs_set_page_writeback(page);
623 WARN_ON_ONCE(test_bit(PG_CLEAN, &req->wb_flags));
624
625 ret = req->wb_context->error;
626 /* If there is a fatal error that covers this write, just exit */
627 if (nfs_error_is_fatal_on_server(ret))
628 goto out_launder;
629
630 ret = 0;
631 if (!nfs_pageio_add_request(pgio, req)) {
632 ret = pgio->pg_error;
633 /*
634 * Remove the problematic req upon fatal errors on the server
635 */
636 if (nfs_error_is_fatal(ret)) {
637 nfs_context_set_write_error(req->wb_context, ret);
638 if (nfs_error_is_fatal_on_server(ret))
639 goto out_launder;
640 } else
641 ret = -EAGAIN;
642 nfs_redirty_request(req);
643 } else
644 nfs_add_stats(page_file_mapping(page)->host,
645 NFSIOS_WRITEPAGES, 1);
646 out:
647 return ret;
648 out_launder:
649 nfs_write_error_remove_page(req);
650 return ret;
651 }
652
653 static int nfs_do_writepage(struct page *page, struct writeback_control *wbc,
654 struct nfs_pageio_descriptor *pgio)
655 {
656 int ret;
657
658 nfs_pageio_cond_complete(pgio, page_index(page));
659 ret = nfs_page_async_flush(pgio, page);
660 if (ret == -EAGAIN) {
661 redirty_page_for_writepage(wbc, page);
662 ret = 0;
663 }
664 return ret;
665 }
666
667 /*
668 * Write an mmapped page to the server.
669 */
670 static int nfs_writepage_locked(struct page *page,
671 struct writeback_control *wbc)
672 {
673 struct nfs_pageio_descriptor pgio;
674 struct inode *inode = page_file_mapping(page)->host;
675 int err;
676
677 nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGE);
678 nfs_pageio_init_write(&pgio, inode, 0,
679 false, &nfs_async_write_completion_ops);
680 err = nfs_do_writepage(page, wbc, &pgio);
681 nfs_pageio_complete(&pgio);
682 if (err < 0)
683 return err;
684 if (pgio.pg_error < 0)
685 return pgio.pg_error;
686 return 0;
687 }
688
689 int nfs_writepage(struct page *page, struct writeback_control *wbc)
690 {
691 int ret;
692
693 ret = nfs_writepage_locked(page, wbc);
694 unlock_page(page);
695 return ret;
696 }
697
698 static int nfs_writepages_callback(struct page *page, struct writeback_control *wbc, void *data)
699 {
700 int ret;
701
702 ret = nfs_do_writepage(page, wbc, data);
703 unlock_page(page);
704 return ret;
705 }
706
707 static void nfs_io_completion_commit(void *inode)
708 {
709 nfs_commit_inode(inode, 0);
710 }
711
712 int nfs_writepages(struct address_space *mapping, struct writeback_control *wbc)
713 {
714 struct inode *inode = mapping->host;
715 struct nfs_pageio_descriptor pgio;
716 struct nfs_io_completion *ioc;
717 unsigned int pflags = memalloc_nofs_save();
718 int err;
719
720 nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGES);
721
722 ioc = nfs_io_completion_alloc(GFP_NOFS);
723 if (ioc)
724 nfs_io_completion_init(ioc, nfs_io_completion_commit, inode);
725
726 nfs_pageio_init_write(&pgio, inode, wb_priority(wbc), false,
727 &nfs_async_write_completion_ops);
728 pgio.pg_io_completion = ioc;
729 err = write_cache_pages(mapping, wbc, nfs_writepages_callback, &pgio);
730 nfs_pageio_complete(&pgio);
731 nfs_io_completion_put(ioc);
732
733 memalloc_nofs_restore(pflags);
734
735 if (err < 0)
736 goto out_err;
737 err = pgio.pg_error;
738 if (err < 0)
739 goto out_err;
740 return 0;
741 out_err:
742 return err;
743 }
744
745 /*
746 * Insert a write request into an inode
747 */
748 static void nfs_inode_add_request(struct inode *inode, struct nfs_page *req)
749 {
750 struct address_space *mapping = page_file_mapping(req->wb_page);
751 struct nfs_inode *nfsi = NFS_I(inode);
752
753 WARN_ON_ONCE(req->wb_this_page != req);
754
755 /* Lock the request! */
756 nfs_lock_request(req);
757
758 /*
759 * Swap-space should not get truncated. Hence no need to plug the race
760 * with invalidate/truncate.
761 */
762 spin_lock(&mapping->private_lock);
763 if (!nfs_have_writebacks(inode) &&
764 NFS_PROTO(inode)->have_delegation(inode, FMODE_WRITE))
765 inode_inc_iversion_raw(inode);
766 if (likely(!PageSwapCache(req->wb_page))) {
767 set_bit(PG_MAPPED, &req->wb_flags);
768 SetPagePrivate(req->wb_page);
769 set_page_private(req->wb_page, (unsigned long)req);
770 }
771 spin_unlock(&mapping->private_lock);
772 atomic_long_inc(&nfsi->nrequests);
773 /* this a head request for a page group - mark it as having an
774 * extra reference so sub groups can follow suit.
775 * This flag also informs pgio layer when to bump nrequests when
776 * adding subrequests. */
777 WARN_ON(test_and_set_bit(PG_INODE_REF, &req->wb_flags));
778 kref_get(&req->wb_kref);
779 }
780
781 /*
782 * Remove a write request from an inode
783 */
784 static void nfs_inode_remove_request(struct nfs_page *req)
785 {
786 struct address_space *mapping = page_file_mapping(req->wb_page);
787 struct inode *inode = mapping->host;
788 struct nfs_inode *nfsi = NFS_I(inode);
789 struct nfs_page *head;
790
791 atomic_long_dec(&nfsi->nrequests);
792 if (nfs_page_group_sync_on_bit(req, PG_REMOVE)) {
793 head = req->wb_head;
794
795 spin_lock(&mapping->private_lock);
796 if (likely(head->wb_page && !PageSwapCache(head->wb_page))) {
797 set_page_private(head->wb_page, 0);
798 ClearPagePrivate(head->wb_page);
799 clear_bit(PG_MAPPED, &head->wb_flags);
800 }
801 spin_unlock(&mapping->private_lock);
802 }
803
804 if (test_and_clear_bit(PG_INODE_REF, &req->wb_flags))
805 nfs_release_request(req);
806 }
807
808 static void
809 nfs_mark_request_dirty(struct nfs_page *req)
810 {
811 if (req->wb_page)
812 __set_page_dirty_nobuffers(req->wb_page);
813 }
814
815 /*
816 * nfs_page_search_commits_for_head_request_locked
817 *
818 * Search through commit lists on @inode for the head request for @page.
819 * Must be called while holding the inode (which is cinfo) lock.
820 *
821 * Returns the head request if found, or NULL if not found.
822 */
823 static struct nfs_page *
824 nfs_page_search_commits_for_head_request_locked(struct nfs_inode *nfsi,
825 struct page *page)
826 {
827 struct nfs_page *freq, *t;
828 struct nfs_commit_info cinfo;
829 struct inode *inode = &nfsi->vfs_inode;
830
831 nfs_init_cinfo_from_inode(&cinfo, inode);
832
833 /* search through pnfs commit lists */
834 freq = pnfs_search_commit_reqs(inode, &cinfo, page);
835 if (freq)
836 return freq->wb_head;
837
838 /* Linearly search the commit list for the correct request */
839 list_for_each_entry_safe(freq, t, &cinfo.mds->list, wb_list) {
840 if (freq->wb_page == page)
841 return freq->wb_head;
842 }
843
844 return NULL;
845 }
846
847 /**
848 * nfs_request_add_commit_list_locked - add request to a commit list
849 * @req: pointer to a struct nfs_page
850 * @dst: commit list head
851 * @cinfo: holds list lock and accounting info
852 *
853 * This sets the PG_CLEAN bit, updates the cinfo count of
854 * number of outstanding requests requiring a commit as well as
855 * the MM page stats.
856 *
857 * The caller must hold NFS_I(cinfo->inode)->commit_mutex, and the
858 * nfs_page lock.
859 */
860 void
861 nfs_request_add_commit_list_locked(struct nfs_page *req, struct list_head *dst,
862 struct nfs_commit_info *cinfo)
863 {
864 set_bit(PG_CLEAN, &req->wb_flags);
865 nfs_list_add_request(req, dst);
866 atomic_long_inc(&cinfo->mds->ncommit);
867 }
868 EXPORT_SYMBOL_GPL(nfs_request_add_commit_list_locked);
869
870 /**
871 * nfs_request_add_commit_list - add request to a commit list
872 * @req: pointer to a struct nfs_page
873 * @cinfo: holds list lock and accounting info
874 *
875 * This sets the PG_CLEAN bit, updates the cinfo count of
876 * number of outstanding requests requiring a commit as well as
877 * the MM page stats.
878 *
879 * The caller must _not_ hold the cinfo->lock, but must be
880 * holding the nfs_page lock.
881 */
882 void
883 nfs_request_add_commit_list(struct nfs_page *req, struct nfs_commit_info *cinfo)
884 {
885 mutex_lock(&NFS_I(cinfo->inode)->commit_mutex);
886 nfs_request_add_commit_list_locked(req, &cinfo->mds->list, cinfo);
887 mutex_unlock(&NFS_I(cinfo->inode)->commit_mutex);
888 if (req->wb_page)
889 nfs_mark_page_unstable(req->wb_page, cinfo);
890 }
891 EXPORT_SYMBOL_GPL(nfs_request_add_commit_list);
892
893 /**
894 * nfs_request_remove_commit_list - Remove request from a commit list
895 * @req: pointer to a nfs_page
896 * @cinfo: holds list lock and accounting info
897 *
898 * This clears the PG_CLEAN bit, and updates the cinfo's count of
899 * number of outstanding requests requiring a commit
900 * It does not update the MM page stats.
901 *
902 * The caller _must_ hold the cinfo->lock and the nfs_page lock.
903 */
904 void
905 nfs_request_remove_commit_list(struct nfs_page *req,
906 struct nfs_commit_info *cinfo)
907 {
908 if (!test_and_clear_bit(PG_CLEAN, &(req)->wb_flags))
909 return;
910 nfs_list_remove_request(req);
911 atomic_long_dec(&cinfo->mds->ncommit);
912 }
913 EXPORT_SYMBOL_GPL(nfs_request_remove_commit_list);
914
915 static void nfs_init_cinfo_from_inode(struct nfs_commit_info *cinfo,
916 struct inode *inode)
917 {
918 cinfo->inode = inode;
919 cinfo->mds = &NFS_I(inode)->commit_info;
920 cinfo->ds = pnfs_get_ds_info(inode);
921 cinfo->dreq = NULL;
922 cinfo->completion_ops = &nfs_commit_completion_ops;
923 }
924
925 void nfs_init_cinfo(struct nfs_commit_info *cinfo,
926 struct inode *inode,
927 struct nfs_direct_req *dreq)
928 {
929 if (dreq)
930 nfs_init_cinfo_from_dreq(cinfo, dreq);
931 else
932 nfs_init_cinfo_from_inode(cinfo, inode);
933 }
934 EXPORT_SYMBOL_GPL(nfs_init_cinfo);
935
936 /*
937 * Add a request to the inode's commit list.
938 */
939 void
940 nfs_mark_request_commit(struct nfs_page *req, struct pnfs_layout_segment *lseg,
941 struct nfs_commit_info *cinfo, u32 ds_commit_idx)
942 {
943 if (pnfs_mark_request_commit(req, lseg, cinfo, ds_commit_idx))
944 return;
945 nfs_request_add_commit_list(req, cinfo);
946 }
947
948 static void
949 nfs_clear_page_commit(struct page *page)
950 {
951 dec_node_page_state(page, NR_UNSTABLE_NFS);
952 dec_wb_stat(&inode_to_bdi(page_file_mapping(page)->host)->wb,
953 WB_RECLAIMABLE);
954 }
955
956 /* Called holding the request lock on @req */
957 static void
958 nfs_clear_request_commit(struct nfs_page *req)
959 {
960 if (test_bit(PG_CLEAN, &req->wb_flags)) {
961 struct inode *inode = d_inode(req->wb_context->dentry);
962 struct nfs_commit_info cinfo;
963
964 nfs_init_cinfo_from_inode(&cinfo, inode);
965 mutex_lock(&NFS_I(inode)->commit_mutex);
966 if (!pnfs_clear_request_commit(req, &cinfo)) {
967 nfs_request_remove_commit_list(req, &cinfo);
968 }
969 mutex_unlock(&NFS_I(inode)->commit_mutex);
970 nfs_clear_page_commit(req->wb_page);
971 }
972 }
973
974 int nfs_write_need_commit(struct nfs_pgio_header *hdr)
975 {
976 if (hdr->verf.committed == NFS_DATA_SYNC)
977 return hdr->lseg == NULL;
978 return hdr->verf.committed != NFS_FILE_SYNC;
979 }
980
981 static void nfs_async_write_init(struct nfs_pgio_header *hdr)
982 {
983 nfs_io_completion_get(hdr->io_completion);
984 }
985
986 static void nfs_write_completion(struct nfs_pgio_header *hdr)
987 {
988 struct nfs_commit_info cinfo;
989 unsigned long bytes = 0;
990
991 if (test_bit(NFS_IOHDR_REDO, &hdr->flags))
992 goto out;
993 nfs_init_cinfo_from_inode(&cinfo, hdr->inode);
994 while (!list_empty(&hdr->pages)) {
995 struct nfs_page *req = nfs_list_entry(hdr->pages.next);
996
997 bytes += req->wb_bytes;
998 nfs_list_remove_request(req);
999 if (test_bit(NFS_IOHDR_ERROR, &hdr->flags) &&
1000 (hdr->good_bytes < bytes)) {
1001 nfs_set_pageerror(page_file_mapping(req->wb_page));
1002 nfs_context_set_write_error(req->wb_context, hdr->error);
1003 goto remove_req;
1004 }
1005 if (nfs_write_need_commit(hdr)) {
1006 memcpy(&req->wb_verf, &hdr->verf.verifier, sizeof(req->wb_verf));
1007 nfs_mark_request_commit(req, hdr->lseg, &cinfo,
1008 hdr->pgio_mirror_idx);
1009 goto next;
1010 }
1011 remove_req:
1012 nfs_inode_remove_request(req);
1013 next:
1014 nfs_end_page_writeback(req);
1015 nfs_release_request(req);
1016 }
1017 out:
1018 nfs_io_completion_put(hdr->io_completion);
1019 hdr->release(hdr);
1020 }
1021
1022 unsigned long
1023 nfs_reqs_to_commit(struct nfs_commit_info *cinfo)
1024 {
1025 return atomic_long_read(&cinfo->mds->ncommit);
1026 }
1027
1028 /* NFS_I(cinfo->inode)->commit_mutex held by caller */
1029 int
1030 nfs_scan_commit_list(struct list_head *src, struct list_head *dst,
1031 struct nfs_commit_info *cinfo, int max)
1032 {
1033 struct nfs_page *req, *tmp;
1034 int ret = 0;
1035
1036 restart:
1037 list_for_each_entry_safe(req, tmp, src, wb_list) {
1038 kref_get(&req->wb_kref);
1039 if (!nfs_lock_request(req)) {
1040 int status;
1041
1042 /* Prevent deadlock with nfs_lock_and_join_requests */
1043 if (!list_empty(dst)) {
1044 nfs_release_request(req);
1045 continue;
1046 }
1047 /* Ensure we make progress to prevent livelock */
1048 mutex_unlock(&NFS_I(cinfo->inode)->commit_mutex);
1049 status = nfs_wait_on_request(req);
1050 nfs_release_request(req);
1051 mutex_lock(&NFS_I(cinfo->inode)->commit_mutex);
1052 if (status < 0)
1053 break;
1054 goto restart;
1055 }
1056 nfs_request_remove_commit_list(req, cinfo);
1057 clear_bit(PG_COMMIT_TO_DS, &req->wb_flags);
1058 nfs_list_add_request(req, dst);
1059 ret++;
1060 if ((ret == max) && !cinfo->dreq)
1061 break;
1062 cond_resched();
1063 }
1064 return ret;
1065 }
1066 EXPORT_SYMBOL_GPL(nfs_scan_commit_list);
1067
1068 /*
1069 * nfs_scan_commit - Scan an inode for commit requests
1070 * @inode: NFS inode to scan
1071 * @dst: mds destination list
1072 * @cinfo: mds and ds lists of reqs ready to commit
1073 *
1074 * Moves requests from the inode's 'commit' request list.
1075 * The requests are *not* checked to ensure that they form a contiguous set.
1076 */
1077 int
1078 nfs_scan_commit(struct inode *inode, struct list_head *dst,
1079 struct nfs_commit_info *cinfo)
1080 {
1081 int ret = 0;
1082
1083 if (!atomic_long_read(&cinfo->mds->ncommit))
1084 return 0;
1085 mutex_lock(&NFS_I(cinfo->inode)->commit_mutex);
1086 if (atomic_long_read(&cinfo->mds->ncommit) > 0) {
1087 const int max = INT_MAX;
1088
1089 ret = nfs_scan_commit_list(&cinfo->mds->list, dst,
1090 cinfo, max);
1091 ret += pnfs_scan_commit_lists(inode, cinfo, max - ret);
1092 }
1093 mutex_unlock(&NFS_I(cinfo->inode)->commit_mutex);
1094 return ret;
1095 }
1096
1097 /*
1098 * Search for an existing write request, and attempt to update
1099 * it to reflect a new dirty region on a given page.
1100 *
1101 * If the attempt fails, then the existing request is flushed out
1102 * to disk.
1103 */
1104 static struct nfs_page *nfs_try_to_update_request(struct inode *inode,
1105 struct page *page,
1106 unsigned int offset,
1107 unsigned int bytes)
1108 {
1109 struct nfs_page *req;
1110 unsigned int rqend;
1111 unsigned int end;
1112 int error;
1113
1114 end = offset + bytes;
1115
1116 req = nfs_lock_and_join_requests(page);
1117 if (IS_ERR_OR_NULL(req))
1118 return req;
1119
1120 rqend = req->wb_offset + req->wb_bytes;
1121 /*
1122 * Tell the caller to flush out the request if
1123 * the offsets are non-contiguous.
1124 * Note: nfs_flush_incompatible() will already
1125 * have flushed out requests having wrong owners.
1126 */
1127 if (offset > rqend || end < req->wb_offset)
1128 goto out_flushme;
1129
1130 /* Okay, the request matches. Update the region */
1131 if (offset < req->wb_offset) {
1132 req->wb_offset = offset;
1133 req->wb_pgbase = offset;
1134 }
1135 if (end > rqend)
1136 req->wb_bytes = end - req->wb_offset;
1137 else
1138 req->wb_bytes = rqend - req->wb_offset;
1139 return req;
1140 out_flushme:
1141 /*
1142 * Note: we mark the request dirty here because
1143 * nfs_lock_and_join_requests() cannot preserve
1144 * commit flags, so we have to replay the write.
1145 */
1146 nfs_mark_request_dirty(req);
1147 nfs_unlock_and_release_request(req);
1148 error = nfs_wb_page(inode, page);
1149 return (error < 0) ? ERR_PTR(error) : NULL;
1150 }
1151
1152 /*
1153 * Try to update an existing write request, or create one if there is none.
1154 *
1155 * Note: Should always be called with the Page Lock held to prevent races
1156 * if we have to add a new request. Also assumes that the caller has
1157 * already called nfs_flush_incompatible() if necessary.
1158 */
1159 static struct nfs_page * nfs_setup_write_request(struct nfs_open_context* ctx,
1160 struct page *page, unsigned int offset, unsigned int bytes)
1161 {
1162 struct inode *inode = page_file_mapping(page)->host;
1163 struct nfs_page *req;
1164
1165 req = nfs_try_to_update_request(inode, page, offset, bytes);
1166 if (req != NULL)
1167 goto out;
1168 req = nfs_create_request(ctx, page, NULL, offset, bytes);
1169 if (IS_ERR(req))
1170 goto out;
1171 nfs_inode_add_request(inode, req);
1172 out:
1173 return req;
1174 }
1175
1176 static int nfs_writepage_setup(struct nfs_open_context *ctx, struct page *page,
1177 unsigned int offset, unsigned int count)
1178 {
1179 struct nfs_page *req;
1180
1181 req = nfs_setup_write_request(ctx, page, offset, count);
1182 if (IS_ERR(req))
1183 return PTR_ERR(req);
1184 /* Update file length */
1185 nfs_grow_file(page, offset, count);
1186 nfs_mark_uptodate(req);
1187 nfs_mark_request_dirty(req);
1188 nfs_unlock_and_release_request(req);
1189 return 0;
1190 }
1191
1192 int nfs_flush_incompatible(struct file *file, struct page *page)
1193 {
1194 struct nfs_open_context *ctx = nfs_file_open_context(file);
1195 struct nfs_lock_context *l_ctx;
1196 struct file_lock_context *flctx = file_inode(file)->i_flctx;
1197 struct nfs_page *req;
1198 int do_flush, status;
1199 /*
1200 * Look for a request corresponding to this page. If there
1201 * is one, and it belongs to another file, we flush it out
1202 * before we try to copy anything into the page. Do this
1203 * due to the lack of an ACCESS-type call in NFSv2.
1204 * Also do the same if we find a request from an existing
1205 * dropped page.
1206 */
1207 do {
1208 req = nfs_page_find_head_request(page);
1209 if (req == NULL)
1210 return 0;
1211 l_ctx = req->wb_lock_context;
1212 do_flush = req->wb_page != page ||
1213 !nfs_match_open_context(req->wb_context, ctx);
1214 if (l_ctx && flctx &&
1215 !(list_empty_careful(&flctx->flc_posix) &&
1216 list_empty_careful(&flctx->flc_flock))) {
1217 do_flush |= l_ctx->lockowner != current->files;
1218 }
1219 nfs_release_request(req);
1220 if (!do_flush)
1221 return 0;
1222 status = nfs_wb_page(page_file_mapping(page)->host, page);
1223 } while (status == 0);
1224 return status;
1225 }
1226
1227 /*
1228 * Avoid buffered writes when a open context credential's key would
1229 * expire soon.
1230 *
1231 * Returns -EACCES if the key will expire within RPC_KEY_EXPIRE_FAIL.
1232 *
1233 * Return 0 and set a credential flag which triggers the inode to flush
1234 * and performs NFS_FILE_SYNC writes if the key will expired within
1235 * RPC_KEY_EXPIRE_TIMEO.
1236 */
1237 int
1238 nfs_key_timeout_notify(struct file *filp, struct inode *inode)
1239 {
1240 struct nfs_open_context *ctx = nfs_file_open_context(filp);
1241
1242 if (nfs_ctx_key_to_expire(ctx, inode) &&
1243 !ctx->ll_cred)
1244 /* Already expired! */
1245 return -EACCES;
1246 return 0;
1247 }
1248
1249 /*
1250 * Test if the open context credential key is marked to expire soon.
1251 */
1252 bool nfs_ctx_key_to_expire(struct nfs_open_context *ctx, struct inode *inode)
1253 {
1254 struct rpc_auth *auth = NFS_SERVER(inode)->client->cl_auth;
1255 struct rpc_cred *cred = ctx->ll_cred;
1256 struct auth_cred acred = {
1257 .cred = ctx->cred,
1258 };
1259
1260 if (cred && !cred->cr_ops->crmatch(&acred, cred, 0)) {
1261 put_rpccred(cred);
1262 ctx->ll_cred = NULL;
1263 cred = NULL;
1264 }
1265 if (!cred)
1266 cred = auth->au_ops->lookup_cred(auth, &acred, 0);
1267 if (!cred || IS_ERR(cred))
1268 return true;
1269 ctx->ll_cred = cred;
1270 return !!(cred->cr_ops->crkey_timeout &&
1271 cred->cr_ops->crkey_timeout(cred));
1272 }
1273
1274 /*
1275 * If the page cache is marked as unsafe or invalid, then we can't rely on
1276 * the PageUptodate() flag. In this case, we will need to turn off
1277 * write optimisations that depend on the page contents being correct.
1278 */
1279 static bool nfs_write_pageuptodate(struct page *page, struct inode *inode)
1280 {
1281 struct nfs_inode *nfsi = NFS_I(inode);
1282
1283 if (nfs_have_delegated_attributes(inode))
1284 goto out;
1285 if (nfsi->cache_validity & NFS_INO_REVAL_PAGECACHE)
1286 return false;
1287 smp_rmb();
1288 if (test_bit(NFS_INO_INVALIDATING, &nfsi->flags))
1289 return false;
1290 out:
1291 if (nfsi->cache_validity & NFS_INO_INVALID_DATA)
1292 return false;
1293 return PageUptodate(page) != 0;
1294 }
1295
1296 static bool
1297 is_whole_file_wrlock(struct file_lock *fl)
1298 {
1299 return fl->fl_start == 0 && fl->fl_end == OFFSET_MAX &&
1300 fl->fl_type == F_WRLCK;
1301 }
1302
1303 /* If we know the page is up to date, and we're not using byte range locks (or
1304 * if we have the whole file locked for writing), it may be more efficient to
1305 * extend the write to cover the entire page in order to avoid fragmentation
1306 * inefficiencies.
1307 *
1308 * If the file is opened for synchronous writes then we can just skip the rest
1309 * of the checks.
1310 */
1311 static int nfs_can_extend_write(struct file *file, struct page *page, struct inode *inode)
1312 {
1313 int ret;
1314 struct file_lock_context *flctx = inode->i_flctx;
1315 struct file_lock *fl;
1316
1317 if (file->f_flags & O_DSYNC)
1318 return 0;
1319 if (!nfs_write_pageuptodate(page, inode))
1320 return 0;
1321 if (NFS_PROTO(inode)->have_delegation(inode, FMODE_WRITE))
1322 return 1;
1323 if (!flctx || (list_empty_careful(&flctx->flc_flock) &&
1324 list_empty_careful(&flctx->flc_posix)))
1325 return 1;
1326
1327 /* Check to see if there are whole file write locks */
1328 ret = 0;
1329 spin_lock(&flctx->flc_lock);
1330 if (!list_empty(&flctx->flc_posix)) {
1331 fl = list_first_entry(&flctx->flc_posix, struct file_lock,
1332 fl_list);
1333 if (is_whole_file_wrlock(fl))
1334 ret = 1;
1335 } else if (!list_empty(&flctx->flc_flock)) {
1336 fl = list_first_entry(&flctx->flc_flock, struct file_lock,
1337 fl_list);
1338 if (fl->fl_type == F_WRLCK)
1339 ret = 1;
1340 }
1341 spin_unlock(&flctx->flc_lock);
1342 return ret;
1343 }
1344
1345 /*
1346 * Update and possibly write a cached page of an NFS file.
1347 *
1348 * XXX: Keep an eye on generic_file_read to make sure it doesn't do bad
1349 * things with a page scheduled for an RPC call (e.g. invalidate it).
1350 */
1351 int nfs_updatepage(struct file *file, struct page *page,
1352 unsigned int offset, unsigned int count)
1353 {
1354 struct nfs_open_context *ctx = nfs_file_open_context(file);
1355 struct address_space *mapping = page_file_mapping(page);
1356 struct inode *inode = mapping->host;
1357 int status = 0;
1358
1359 nfs_inc_stats(inode, NFSIOS_VFSUPDATEPAGE);
1360
1361 dprintk("NFS: nfs_updatepage(%pD2 %d@%lld)\n",
1362 file, count, (long long)(page_file_offset(page) + offset));
1363
1364 if (!count)
1365 goto out;
1366
1367 if (nfs_can_extend_write(file, page, inode)) {
1368 count = max(count + offset, nfs_page_length(page));
1369 offset = 0;
1370 }
1371
1372 status = nfs_writepage_setup(ctx, page, offset, count);
1373 if (status < 0)
1374 nfs_set_pageerror(mapping);
1375 else
1376 __set_page_dirty_nobuffers(page);
1377 out:
1378 dprintk("NFS: nfs_updatepage returns %d (isize %lld)\n",
1379 status, (long long)i_size_read(inode));
1380 return status;
1381 }
1382
1383 static int flush_task_priority(int how)
1384 {
1385 switch (how & (FLUSH_HIGHPRI|FLUSH_LOWPRI)) {
1386 case FLUSH_HIGHPRI:
1387 return RPC_PRIORITY_HIGH;
1388 case FLUSH_LOWPRI:
1389 return RPC_PRIORITY_LOW;
1390 }
1391 return RPC_PRIORITY_NORMAL;
1392 }
1393
1394 static void nfs_initiate_write(struct nfs_pgio_header *hdr,
1395 struct rpc_message *msg,
1396 const struct nfs_rpc_ops *rpc_ops,
1397 struct rpc_task_setup *task_setup_data, int how)
1398 {
1399 int priority = flush_task_priority(how);
1400
1401 task_setup_data->priority = priority;
1402 rpc_ops->write_setup(hdr, msg, &task_setup_data->rpc_client);
1403 trace_nfs_initiate_write(hdr->inode, hdr->io_start, hdr->good_bytes,
1404 hdr->args.stable);
1405 }
1406
1407 /* If a nfs_flush_* function fails, it should remove reqs from @head and
1408 * call this on each, which will prepare them to be retried on next
1409 * writeback using standard nfs.
1410 */
1411 static void nfs_redirty_request(struct nfs_page *req)
1412 {
1413 nfs_mark_request_dirty(req);
1414 set_bit(NFS_CONTEXT_RESEND_WRITES, &req->wb_context->flags);
1415 nfs_end_page_writeback(req);
1416 nfs_release_request(req);
1417 }
1418
1419 static void nfs_async_write_error(struct list_head *head, int error)
1420 {
1421 struct nfs_page *req;
1422
1423 while (!list_empty(head)) {
1424 req = nfs_list_entry(head->next);
1425 nfs_list_remove_request(req);
1426 if (nfs_error_is_fatal(error)) {
1427 nfs_context_set_write_error(req->wb_context, error);
1428 if (nfs_error_is_fatal_on_server(error)) {
1429 nfs_write_error_remove_page(req);
1430 continue;
1431 }
1432 }
1433 nfs_redirty_request(req);
1434 }
1435 }
1436
1437 static void nfs_async_write_reschedule_io(struct nfs_pgio_header *hdr)
1438 {
1439 nfs_async_write_error(&hdr->pages, 0);
1440 filemap_fdatawrite_range(hdr->inode->i_mapping, hdr->args.offset,
1441 hdr->args.offset + hdr->args.count - 1);
1442 }
1443
1444 static const struct nfs_pgio_completion_ops nfs_async_write_completion_ops = {
1445 .init_hdr = nfs_async_write_init,
1446 .error_cleanup = nfs_async_write_error,
1447 .completion = nfs_write_completion,
1448 .reschedule_io = nfs_async_write_reschedule_io,
1449 };
1450
1451 void nfs_pageio_init_write(struct nfs_pageio_descriptor *pgio,
1452 struct inode *inode, int ioflags, bool force_mds,
1453 const struct nfs_pgio_completion_ops *compl_ops)
1454 {
1455 struct nfs_server *server = NFS_SERVER(inode);
1456 const struct nfs_pageio_ops *pg_ops = &nfs_pgio_rw_ops;
1457
1458 #ifdef CONFIG_NFS_V4_1
1459 if (server->pnfs_curr_ld && !force_mds)
1460 pg_ops = server->pnfs_curr_ld->pg_write_ops;
1461 #endif
1462 nfs_pageio_init(pgio, inode, pg_ops, compl_ops, &nfs_rw_write_ops,
1463 server->wsize, ioflags);
1464 }
1465 EXPORT_SYMBOL_GPL(nfs_pageio_init_write);
1466
1467 void nfs_pageio_reset_write_mds(struct nfs_pageio_descriptor *pgio)
1468 {
1469 struct nfs_pgio_mirror *mirror;
1470
1471 if (pgio->pg_ops && pgio->pg_ops->pg_cleanup)
1472 pgio->pg_ops->pg_cleanup(pgio);
1473
1474 pgio->pg_ops = &nfs_pgio_rw_ops;
1475
1476 nfs_pageio_stop_mirroring(pgio);
1477
1478 mirror = &pgio->pg_mirrors[0];
1479 mirror->pg_bsize = NFS_SERVER(pgio->pg_inode)->wsize;
1480 }
1481 EXPORT_SYMBOL_GPL(nfs_pageio_reset_write_mds);
1482
1483
1484 void nfs_commit_prepare(struct rpc_task *task, void *calldata)
1485 {
1486 struct nfs_commit_data *data = calldata;
1487
1488 NFS_PROTO(data->inode)->commit_rpc_prepare(task, data);
1489 }
1490
1491 /*
1492 * Special version of should_remove_suid() that ignores capabilities.
1493 */
1494 static int nfs_should_remove_suid(const struct inode *inode)
1495 {
1496 umode_t mode = inode->i_mode;
1497 int kill = 0;
1498
1499 /* suid always must be killed */
1500 if (unlikely(mode & S_ISUID))
1501 kill = ATTR_KILL_SUID;
1502
1503 /*
1504 * sgid without any exec bits is just a mandatory locking mark; leave
1505 * it alone. If some exec bits are set, it's a real sgid; kill it.
1506 */
1507 if (unlikely((mode & S_ISGID) && (mode & S_IXGRP)))
1508 kill |= ATTR_KILL_SGID;
1509
1510 if (unlikely(kill && S_ISREG(mode)))
1511 return kill;
1512
1513 return 0;
1514 }
1515
1516 static void nfs_writeback_check_extend(struct nfs_pgio_header *hdr,
1517 struct nfs_fattr *fattr)
1518 {
1519 struct nfs_pgio_args *argp = &hdr->args;
1520 struct nfs_pgio_res *resp = &hdr->res;
1521 u64 size = argp->offset + resp->count;
1522
1523 if (!(fattr->valid & NFS_ATTR_FATTR_SIZE))
1524 fattr->size = size;
1525 if (nfs_size_to_loff_t(fattr->size) < i_size_read(hdr->inode)) {
1526 fattr->valid &= ~NFS_ATTR_FATTR_SIZE;
1527 return;
1528 }
1529 if (size != fattr->size)
1530 return;
1531 /* Set attribute barrier */
1532 nfs_fattr_set_barrier(fattr);
1533 /* ...and update size */
1534 fattr->valid |= NFS_ATTR_FATTR_SIZE;
1535 }
1536
1537 void nfs_writeback_update_inode(struct nfs_pgio_header *hdr)
1538 {
1539 struct nfs_fattr *fattr = &hdr->fattr;
1540 struct inode *inode = hdr->inode;
1541
1542 spin_lock(&inode->i_lock);
1543 nfs_writeback_check_extend(hdr, fattr);
1544 nfs_post_op_update_inode_force_wcc_locked(inode, fattr);
1545 spin_unlock(&inode->i_lock);
1546 }
1547 EXPORT_SYMBOL_GPL(nfs_writeback_update_inode);
1548
1549 /*
1550 * This function is called when the WRITE call is complete.
1551 */
1552 static int nfs_writeback_done(struct rpc_task *task,
1553 struct nfs_pgio_header *hdr,
1554 struct inode *inode)
1555 {
1556 int status;
1557
1558 /*
1559 * ->write_done will attempt to use post-op attributes to detect
1560 * conflicting writes by other clients. A strict interpretation
1561 * of close-to-open would allow us to continue caching even if
1562 * another writer had changed the file, but some applications
1563 * depend on tighter cache coherency when writing.
1564 */
1565 status = NFS_PROTO(inode)->write_done(task, hdr);
1566 if (status != 0)
1567 return status;
1568
1569 nfs_add_stats(inode, NFSIOS_SERVERWRITTENBYTES, hdr->res.count);
1570 trace_nfs_writeback_done(inode, task->tk_status,
1571 hdr->args.offset, hdr->res.verf);
1572
1573 if (hdr->res.verf->committed < hdr->args.stable &&
1574 task->tk_status >= 0) {
1575 /* We tried a write call, but the server did not
1576 * commit data to stable storage even though we
1577 * requested it.
1578 * Note: There is a known bug in Tru64 < 5.0 in which
1579 * the server reports NFS_DATA_SYNC, but performs
1580 * NFS_FILE_SYNC. We therefore implement this checking
1581 * as a dprintk() in order to avoid filling syslog.
1582 */
1583 static unsigned long complain;
1584
1585 /* Note this will print the MDS for a DS write */
1586 if (time_before(complain, jiffies)) {
1587 dprintk("NFS: faulty NFS server %s:"
1588 " (committed = %d) != (stable = %d)\n",
1589 NFS_SERVER(inode)->nfs_client->cl_hostname,
1590 hdr->res.verf->committed, hdr->args.stable);
1591 complain = jiffies + 300 * HZ;
1592 }
1593 }
1594
1595 /* Deal with the suid/sgid bit corner case */
1596 if (nfs_should_remove_suid(inode)) {
1597 spin_lock(&inode->i_lock);
1598 NFS_I(inode)->cache_validity |= NFS_INO_INVALID_OTHER;
1599 spin_unlock(&inode->i_lock);
1600 }
1601 return 0;
1602 }
1603
1604 /*
1605 * This function is called when the WRITE call is complete.
1606 */
1607 static void nfs_writeback_result(struct rpc_task *task,
1608 struct nfs_pgio_header *hdr)
1609 {
1610 struct nfs_pgio_args *argp = &hdr->args;
1611 struct nfs_pgio_res *resp = &hdr->res;
1612
1613 if (resp->count < argp->count) {
1614 static unsigned long complain;
1615
1616 /* This a short write! */
1617 nfs_inc_stats(hdr->inode, NFSIOS_SHORTWRITE);
1618
1619 /* Has the server at least made some progress? */
1620 if (resp->count == 0) {
1621 if (time_before(complain, jiffies)) {
1622 printk(KERN_WARNING
1623 "NFS: Server wrote zero bytes, expected %u.\n",
1624 argp->count);
1625 complain = jiffies + 300 * HZ;
1626 }
1627 nfs_set_pgio_error(hdr, -EIO, argp->offset);
1628 task->tk_status = -EIO;
1629 return;
1630 }
1631
1632 /* For non rpc-based layout drivers, retry-through-MDS */
1633 if (!task->tk_ops) {
1634 hdr->pnfs_error = -EAGAIN;
1635 return;
1636 }
1637
1638 /* Was this an NFSv2 write or an NFSv3 stable write? */
1639 if (resp->verf->committed != NFS_UNSTABLE) {
1640 /* Resend from where the server left off */
1641 hdr->mds_offset += resp->count;
1642 argp->offset += resp->count;
1643 argp->pgbase += resp->count;
1644 argp->count -= resp->count;
1645 } else {
1646 /* Resend as a stable write in order to avoid
1647 * headaches in the case of a server crash.
1648 */
1649 argp->stable = NFS_FILE_SYNC;
1650 }
1651 rpc_restart_call_prepare(task);
1652 }
1653 }
1654
1655 static int wait_on_commit(struct nfs_mds_commit_info *cinfo)
1656 {
1657 return wait_var_event_killable(&cinfo->rpcs_out,
1658 !atomic_read(&cinfo->rpcs_out));
1659 }
1660
1661 static void nfs_commit_begin(struct nfs_mds_commit_info *cinfo)
1662 {
1663 atomic_inc(&cinfo->rpcs_out);
1664 }
1665
1666 static void nfs_commit_end(struct nfs_mds_commit_info *cinfo)
1667 {
1668 if (atomic_dec_and_test(&cinfo->rpcs_out))
1669 wake_up_var(&cinfo->rpcs_out);
1670 }
1671
1672 void nfs_commitdata_release(struct nfs_commit_data *data)
1673 {
1674 put_nfs_open_context(data->context);
1675 nfs_commit_free(data);
1676 }
1677 EXPORT_SYMBOL_GPL(nfs_commitdata_release);
1678
1679 int nfs_initiate_commit(struct rpc_clnt *clnt, struct nfs_commit_data *data,
1680 const struct nfs_rpc_ops *nfs_ops,
1681 const struct rpc_call_ops *call_ops,
1682 int how, int flags)
1683 {
1684 struct rpc_task *task;
1685 int priority = flush_task_priority(how);
1686 struct rpc_message msg = {
1687 .rpc_argp = &data->args,
1688 .rpc_resp = &data->res,
1689 .rpc_cred = data->cred,
1690 };
1691 struct rpc_task_setup task_setup_data = {
1692 .task = &data->task,
1693 .rpc_client = clnt,
1694 .rpc_message = &msg,
1695 .callback_ops = call_ops,
1696 .callback_data = data,
1697 .workqueue = nfsiod_workqueue,
1698 .flags = RPC_TASK_ASYNC | flags,
1699 .priority = priority,
1700 };
1701 /* Set up the initial task struct. */
1702 nfs_ops->commit_setup(data, &msg, &task_setup_data.rpc_client);
1703 trace_nfs_initiate_commit(data);
1704
1705 dprintk("NFS: initiated commit call\n");
1706
1707 task = rpc_run_task(&task_setup_data);
1708 if (IS_ERR(task))
1709 return PTR_ERR(task);
1710 if (how & FLUSH_SYNC)
1711 rpc_wait_for_completion_task(task);
1712 rpc_put_task(task);
1713 return 0;
1714 }
1715 EXPORT_SYMBOL_GPL(nfs_initiate_commit);
1716
1717 static loff_t nfs_get_lwb(struct list_head *head)
1718 {
1719 loff_t lwb = 0;
1720 struct nfs_page *req;
1721
1722 list_for_each_entry(req, head, wb_list)
1723 if (lwb < (req_offset(req) + req->wb_bytes))
1724 lwb = req_offset(req) + req->wb_bytes;
1725
1726 return lwb;
1727 }
1728
1729 /*
1730 * Set up the argument/result storage required for the RPC call.
1731 */
1732 void nfs_init_commit(struct nfs_commit_data *data,
1733 struct list_head *head,
1734 struct pnfs_layout_segment *lseg,
1735 struct nfs_commit_info *cinfo)
1736 {
1737 struct nfs_page *first = nfs_list_entry(head->next);
1738 struct inode *inode = d_inode(first->wb_context->dentry);
1739
1740 /* Set up the RPC argument and reply structs
1741 * NB: take care not to mess about with data->commit et al. */
1742
1743 list_splice_init(head, &data->pages);
1744
1745 data->inode = inode;
1746 data->cred = first->wb_context->cred;
1747 data->lseg = lseg; /* reference transferred */
1748 /* only set lwb for pnfs commit */
1749 if (lseg)
1750 data->lwb = nfs_get_lwb(&data->pages);
1751 data->mds_ops = &nfs_commit_ops;
1752 data->completion_ops = cinfo->completion_ops;
1753 data->dreq = cinfo->dreq;
1754
1755 data->args.fh = NFS_FH(data->inode);
1756 /* Note: we always request a commit of the entire inode */
1757 data->args.offset = 0;
1758 data->args.count = 0;
1759 data->context = get_nfs_open_context(first->wb_context);
1760 data->res.fattr = &data->fattr;
1761 data->res.verf = &data->verf;
1762 nfs_fattr_init(&data->fattr);
1763 }
1764 EXPORT_SYMBOL_GPL(nfs_init_commit);
1765
1766 void nfs_retry_commit(struct list_head *page_list,
1767 struct pnfs_layout_segment *lseg,
1768 struct nfs_commit_info *cinfo,
1769 u32 ds_commit_idx)
1770 {
1771 struct nfs_page *req;
1772
1773 while (!list_empty(page_list)) {
1774 req = nfs_list_entry(page_list->next);
1775 nfs_list_remove_request(req);
1776 nfs_mark_request_commit(req, lseg, cinfo, ds_commit_idx);
1777 if (!cinfo->dreq)
1778 nfs_clear_page_commit(req->wb_page);
1779 nfs_unlock_and_release_request(req);
1780 }
1781 }
1782 EXPORT_SYMBOL_GPL(nfs_retry_commit);
1783
1784 static void
1785 nfs_commit_resched_write(struct nfs_commit_info *cinfo,
1786 struct nfs_page *req)
1787 {
1788 __set_page_dirty_nobuffers(req->wb_page);
1789 }
1790
1791 /*
1792 * Commit dirty pages
1793 */
1794 static int
1795 nfs_commit_list(struct inode *inode, struct list_head *head, int how,
1796 struct nfs_commit_info *cinfo)
1797 {
1798 struct nfs_commit_data *data;
1799
1800 /* another commit raced with us */
1801 if (list_empty(head))
1802 return 0;
1803
1804 data = nfs_commitdata_alloc(true);
1805
1806 /* Set up the argument struct */
1807 nfs_init_commit(data, head, NULL, cinfo);
1808 atomic_inc(&cinfo->mds->rpcs_out);
1809 return nfs_initiate_commit(NFS_CLIENT(inode), data, NFS_PROTO(inode),
1810 data->mds_ops, how, 0);
1811 }
1812
1813 /*
1814 * COMMIT call returned
1815 */
1816 static void nfs_commit_done(struct rpc_task *task, void *calldata)
1817 {
1818 struct nfs_commit_data *data = calldata;
1819
1820 dprintk("NFS: %5u nfs_commit_done (status %d)\n",
1821 task->tk_pid, task->tk_status);
1822
1823 /* Call the NFS version-specific code */
1824 NFS_PROTO(data->inode)->commit_done(task, data);
1825 trace_nfs_commit_done(data);
1826 }
1827
1828 static void nfs_commit_release_pages(struct nfs_commit_data *data)
1829 {
1830 struct nfs_page *req;
1831 int status = data->task.tk_status;
1832 struct nfs_commit_info cinfo;
1833 struct nfs_server *nfss;
1834
1835 while (!list_empty(&data->pages)) {
1836 req = nfs_list_entry(data->pages.next);
1837 nfs_list_remove_request(req);
1838 if (req->wb_page)
1839 nfs_clear_page_commit(req->wb_page);
1840
1841 dprintk("NFS: commit (%s/%llu %d@%lld)",
1842 req->wb_context->dentry->d_sb->s_id,
1843 (unsigned long long)NFS_FILEID(d_inode(req->wb_context->dentry)),
1844 req->wb_bytes,
1845 (long long)req_offset(req));
1846 if (status < 0) {
1847 nfs_context_set_write_error(req->wb_context, status);
1848 if (req->wb_page)
1849 nfs_inode_remove_request(req);
1850 dprintk_cont(", error = %d\n", status);
1851 goto next;
1852 }
1853
1854 /* Okay, COMMIT succeeded, apparently. Check the verifier
1855 * returned by the server against all stored verfs. */
1856 if (!nfs_write_verifier_cmp(&req->wb_verf, &data->verf.verifier)) {
1857 /* We have a match */
1858 if (req->wb_page)
1859 nfs_inode_remove_request(req);
1860 dprintk_cont(" OK\n");
1861 goto next;
1862 }
1863 /* We have a mismatch. Write the page again */
1864 dprintk_cont(" mismatch\n");
1865 nfs_mark_request_dirty(req);
1866 set_bit(NFS_CONTEXT_RESEND_WRITES, &req->wb_context->flags);
1867 next:
1868 nfs_unlock_and_release_request(req);
1869 /* Latency breaker */
1870 cond_resched();
1871 }
1872 nfss = NFS_SERVER(data->inode);
1873 if (atomic_long_read(&nfss->writeback) < NFS_CONGESTION_OFF_THRESH)
1874 clear_bdi_congested(inode_to_bdi(data->inode), BLK_RW_ASYNC);
1875
1876 nfs_init_cinfo(&cinfo, data->inode, data->dreq);
1877 nfs_commit_end(cinfo.mds);
1878 }
1879
1880 static void nfs_commit_release(void *calldata)
1881 {
1882 struct nfs_commit_data *data = calldata;
1883
1884 data->completion_ops->completion(data);
1885 nfs_commitdata_release(calldata);
1886 }
1887
1888 static const struct rpc_call_ops nfs_commit_ops = {
1889 .rpc_call_prepare = nfs_commit_prepare,
1890 .rpc_call_done = nfs_commit_done,
1891 .rpc_release = nfs_commit_release,
1892 };
1893
1894 static const struct nfs_commit_completion_ops nfs_commit_completion_ops = {
1895 .completion = nfs_commit_release_pages,
1896 .resched_write = nfs_commit_resched_write,
1897 };
1898
1899 int nfs_generic_commit_list(struct inode *inode, struct list_head *head,
1900 int how, struct nfs_commit_info *cinfo)
1901 {
1902 int status;
1903
1904 status = pnfs_commit_list(inode, head, how, cinfo);
1905 if (status == PNFS_NOT_ATTEMPTED)
1906 status = nfs_commit_list(inode, head, how, cinfo);
1907 return status;
1908 }
1909
1910 static int __nfs_commit_inode(struct inode *inode, int how,
1911 struct writeback_control *wbc)
1912 {
1913 LIST_HEAD(head);
1914 struct nfs_commit_info cinfo;
1915 int may_wait = how & FLUSH_SYNC;
1916 int ret, nscan;
1917
1918 nfs_init_cinfo_from_inode(&cinfo, inode);
1919 nfs_commit_begin(cinfo.mds);
1920 for (;;) {
1921 ret = nscan = nfs_scan_commit(inode, &head, &cinfo);
1922 if (ret <= 0)
1923 break;
1924 ret = nfs_generic_commit_list(inode, &head, how, &cinfo);
1925 if (ret < 0)
1926 break;
1927 ret = 0;
1928 if (wbc && wbc->sync_mode == WB_SYNC_NONE) {
1929 if (nscan < wbc->nr_to_write)
1930 wbc->nr_to_write -= nscan;
1931 else
1932 wbc->nr_to_write = 0;
1933 }
1934 if (nscan < INT_MAX)
1935 break;
1936 cond_resched();
1937 }
1938 nfs_commit_end(cinfo.mds);
1939 if (ret || !may_wait)
1940 return ret;
1941 return wait_on_commit(cinfo.mds);
1942 }
1943
1944 int nfs_commit_inode(struct inode *inode, int how)
1945 {
1946 return __nfs_commit_inode(inode, how, NULL);
1947 }
1948 EXPORT_SYMBOL_GPL(nfs_commit_inode);
1949
1950 int nfs_write_inode(struct inode *inode, struct writeback_control *wbc)
1951 {
1952 struct nfs_inode *nfsi = NFS_I(inode);
1953 int flags = FLUSH_SYNC;
1954 int ret = 0;
1955
1956 if (wbc->sync_mode == WB_SYNC_NONE) {
1957 /* no commits means nothing needs to be done */
1958 if (!atomic_long_read(&nfsi->commit_info.ncommit))
1959 goto check_requests_outstanding;
1960
1961 /* Don't commit yet if this is a non-blocking flush and there
1962 * are a lot of outstanding writes for this mapping.
1963 */
1964 if (mapping_tagged(inode->i_mapping, PAGECACHE_TAG_WRITEBACK))
1965 goto out_mark_dirty;
1966
1967 /* don't wait for the COMMIT response */
1968 flags = 0;
1969 }
1970
1971 ret = __nfs_commit_inode(inode, flags, wbc);
1972 if (!ret) {
1973 if (flags & FLUSH_SYNC)
1974 return 0;
1975 } else if (atomic_long_read(&nfsi->commit_info.ncommit))
1976 goto out_mark_dirty;
1977
1978 check_requests_outstanding:
1979 if (!atomic_read(&nfsi->commit_info.rpcs_out))
1980 return ret;
1981 out_mark_dirty:
1982 __mark_inode_dirty(inode, I_DIRTY_DATASYNC);
1983 return ret;
1984 }
1985 EXPORT_SYMBOL_GPL(nfs_write_inode);
1986
1987 /*
1988 * Wrapper for filemap_write_and_wait_range()
1989 *
1990 * Needed for pNFS in order to ensure data becomes visible to the
1991 * client.
1992 */
1993 int nfs_filemap_write_and_wait_range(struct address_space *mapping,
1994 loff_t lstart, loff_t lend)
1995 {
1996 int ret;
1997
1998 ret = filemap_write_and_wait_range(mapping, lstart, lend);
1999 if (ret == 0)
2000 ret = pnfs_sync_inode(mapping->host, true);
2001 return ret;
2002 }
2003 EXPORT_SYMBOL_GPL(nfs_filemap_write_and_wait_range);
2004
2005 /*
2006 * flush the inode to disk.
2007 */
2008 int nfs_wb_all(struct inode *inode)
2009 {
2010 int ret;
2011
2012 trace_nfs_writeback_inode_enter(inode);
2013
2014 ret = filemap_write_and_wait(inode->i_mapping);
2015 if (ret)
2016 goto out;
2017 ret = nfs_commit_inode(inode, FLUSH_SYNC);
2018 if (ret < 0)
2019 goto out;
2020 pnfs_sync_inode(inode, true);
2021 ret = 0;
2022
2023 out:
2024 trace_nfs_writeback_inode_exit(inode, ret);
2025 return ret;
2026 }
2027 EXPORT_SYMBOL_GPL(nfs_wb_all);
2028
2029 int nfs_wb_page_cancel(struct inode *inode, struct page *page)
2030 {
2031 struct nfs_page *req;
2032 int ret = 0;
2033
2034 wait_on_page_writeback(page);
2035
2036 /* blocking call to cancel all requests and join to a single (head)
2037 * request */
2038 req = nfs_lock_and_join_requests(page);
2039
2040 if (IS_ERR(req)) {
2041 ret = PTR_ERR(req);
2042 } else if (req) {
2043 /* all requests from this page have been cancelled by
2044 * nfs_lock_and_join_requests, so just remove the head
2045 * request from the inode / page_private pointer and
2046 * release it */
2047 nfs_inode_remove_request(req);
2048 nfs_unlock_and_release_request(req);
2049 }
2050
2051 return ret;
2052 }
2053
2054 /*
2055 * Write back all requests on one page - we do this before reading it.
2056 */
2057 int nfs_wb_page(struct inode *inode, struct page *page)
2058 {
2059 loff_t range_start = page_file_offset(page);
2060 loff_t range_end = range_start + (loff_t)(PAGE_SIZE - 1);
2061 struct writeback_control wbc = {
2062 .sync_mode = WB_SYNC_ALL,
2063 .nr_to_write = 0,
2064 .range_start = range_start,
2065 .range_end = range_end,
2066 };
2067 int ret;
2068
2069 trace_nfs_writeback_page_enter(inode);
2070
2071 for (;;) {
2072 wait_on_page_writeback(page);
2073 if (clear_page_dirty_for_io(page)) {
2074 ret = nfs_writepage_locked(page, &wbc);
2075 if (ret < 0)
2076 goto out_error;
2077 continue;
2078 }
2079 ret = 0;
2080 if (!PagePrivate(page))
2081 break;
2082 ret = nfs_commit_inode(inode, FLUSH_SYNC);
2083 if (ret < 0)
2084 goto out_error;
2085 }
2086 out_error:
2087 trace_nfs_writeback_page_exit(inode, ret);
2088 return ret;
2089 }
2090
2091 #ifdef CONFIG_MIGRATION
2092 int nfs_migrate_page(struct address_space *mapping, struct page *newpage,
2093 struct page *page, enum migrate_mode mode)
2094 {
2095 /*
2096 * If PagePrivate is set, then the page is currently associated with
2097 * an in-progress read or write request. Don't try to migrate it.
2098 *
2099 * FIXME: we could do this in principle, but we'll need a way to ensure
2100 * that we can safely release the inode reference while holding
2101 * the page lock.
2102 */
2103 if (PagePrivate(page))
2104 return -EBUSY;
2105
2106 if (!nfs_fscache_release_page(page, GFP_KERNEL))
2107 return -EBUSY;
2108
2109 return migrate_page(mapping, newpage, page, mode);
2110 }
2111 #endif
2112
2113 int __init nfs_init_writepagecache(void)
2114 {
2115 nfs_wdata_cachep = kmem_cache_create("nfs_write_data",
2116 sizeof(struct nfs_pgio_header),
2117 0, SLAB_HWCACHE_ALIGN,
2118 NULL);
2119 if (nfs_wdata_cachep == NULL)
2120 return -ENOMEM;
2121
2122 nfs_wdata_mempool = mempool_create_slab_pool(MIN_POOL_WRITE,
2123 nfs_wdata_cachep);
2124 if (nfs_wdata_mempool == NULL)
2125 goto out_destroy_write_cache;
2126
2127 nfs_cdata_cachep = kmem_cache_create("nfs_commit_data",
2128 sizeof(struct nfs_commit_data),
2129 0, SLAB_HWCACHE_ALIGN,
2130 NULL);
2131 if (nfs_cdata_cachep == NULL)
2132 goto out_destroy_write_mempool;
2133
2134 nfs_commit_mempool = mempool_create_slab_pool(MIN_POOL_COMMIT,
2135 nfs_cdata_cachep);
2136 if (nfs_commit_mempool == NULL)
2137 goto out_destroy_commit_cache;
2138
2139 /*
2140 * NFS congestion size, scale with available memory.
2141 *
2142 * 64MB: 8192k
2143 * 128MB: 11585k
2144 * 256MB: 16384k
2145 * 512MB: 23170k
2146 * 1GB: 32768k
2147 * 2GB: 46340k
2148 * 4GB: 65536k
2149 * 8GB: 92681k
2150 * 16GB: 131072k
2151 *
2152 * This allows larger machines to have larger/more transfers.
2153 * Limit the default to 256M
2154 */
2155 nfs_congestion_kb = (16*int_sqrt(totalram_pages())) << (PAGE_SHIFT-10);
2156 if (nfs_congestion_kb > 256*1024)
2157 nfs_congestion_kb = 256*1024;
2158
2159 return 0;
2160
2161 out_destroy_commit_cache:
2162 kmem_cache_destroy(nfs_cdata_cachep);
2163 out_destroy_write_mempool:
2164 mempool_destroy(nfs_wdata_mempool);
2165 out_destroy_write_cache:
2166 kmem_cache_destroy(nfs_wdata_cachep);
2167 return -ENOMEM;
2168 }
2169
2170 void nfs_destroy_writepagecache(void)
2171 {
2172 mempool_destroy(nfs_commit_mempool);
2173 kmem_cache_destroy(nfs_cdata_cachep);
2174 mempool_destroy(nfs_wdata_mempool);
2175 kmem_cache_destroy(nfs_wdata_cachep);
2176 }
2177
2178 static const struct nfs_rw_ops nfs_rw_write_ops = {
2179 .rw_alloc_header = nfs_writehdr_alloc,
2180 .rw_free_header = nfs_writehdr_free,
2181 .rw_done = nfs_writeback_done,
2182 .rw_result = nfs_writeback_result,
2183 .rw_initiate = nfs_initiate_write,
2184 };