1 /* handling of writes to regular files and writing back to the server
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
12 #include <linux/backing-dev.h>
13 #include <linux/slab.h>
15 #include <linux/pagemap.h>
16 #include <linux/writeback.h>
17 #include <linux/pagevec.h>
21 * mark a page as having been made dirty and thus needing writeback
23 int afs_set_page_dirty(struct page
*page
)
26 return __set_page_dirty_nobuffers(page
);
30 * partly or wholly fill a page that's under preparation for writing
32 static int afs_fill_page(struct afs_vnode
*vnode
, struct key
*key
,
33 loff_t pos
, unsigned int len
, struct page
*page
)
40 _enter(",,%llu", (unsigned long long)pos
);
42 if (pos
>= vnode
->vfs_inode
.i_size
) {
44 ASSERTCMP(p
+ len
, <=, PAGE_SIZE
);
46 memset(data
+ p
, 0, len
);
51 req
= kzalloc(sizeof(struct afs_read
) + sizeof(struct page
*),
56 refcount_set(&req
->usage
, 1);
60 req
->pages
= req
->array
;
64 ret
= afs_fetch_data(vnode
, key
, req
);
68 _debug("got NOENT from server"
69 " - marking file deleted and stale");
70 set_bit(AFS_VNODE_DELETED
, &vnode
->flags
);
80 * prepare to perform part of a write to a page
82 int afs_write_begin(struct file
*file
, struct address_space
*mapping
,
83 loff_t pos
, unsigned len
, unsigned flags
,
84 struct page
**pagep
, void **fsdata
)
86 struct afs_vnode
*vnode
= AFS_FS_I(file_inode(file
));
88 struct key
*key
= afs_file_key(file
);
90 unsigned f
, from
= pos
& (PAGE_SIZE
- 1);
91 unsigned t
, to
= from
+ len
;
92 pgoff_t index
= pos
>> PAGE_SHIFT
;
95 _enter("{%llx:%llu},{%lx},%u,%u",
96 vnode
->fid
.vid
, vnode
->fid
.vnode
, index
, from
, to
);
98 /* We want to store information about how much of a page is altered in
101 BUILD_BUG_ON(PAGE_SIZE
> 32768 && sizeof(page
->private) < 8);
103 page
= grab_cache_page_write_begin(mapping
, index
, flags
);
107 if (!PageUptodate(page
) && len
!= PAGE_SIZE
) {
108 ret
= afs_fill_page(vnode
, key
, pos
& PAGE_MASK
, PAGE_SIZE
, page
);
112 _leave(" = %d [prep]", ret
);
115 SetPageUptodate(page
);
118 /* page won't leak in error case: it eventually gets cleaned off LRU */
122 /* See if this page is already partially written in a way that we can
123 * merge the new write with.
126 if (PagePrivate(page
)) {
127 priv
= page_private(page
);
128 f
= priv
& AFS_PRIV_MAX
;
129 t
= priv
>> AFS_PRIV_SHIFT
;
134 if (PageWriteback(page
)) {
135 trace_afs_page_dirty(vnode
, tracepoint_string("alrdy"),
137 goto flush_conflicting_write
;
139 /* If the file is being filled locally, allow inter-write
140 * spaces to be merged into writes. If it's not, only write
141 * back what the user gives us.
143 if (!test_bit(AFS_VNODE_NEW_CONTENT
, &vnode
->flags
) &&
144 (to
< f
|| from
> t
))
145 goto flush_conflicting_write
;
155 priv
= (unsigned long)t
<< AFS_PRIV_SHIFT
;
157 trace_afs_page_dirty(vnode
, tracepoint_string("begin"),
159 SetPagePrivate(page
);
160 set_page_private(page
, priv
);
164 /* The previous write and this write aren't adjacent or overlapping, so
165 * flush the page out.
167 flush_conflicting_write
:
168 _debug("flush conflict");
169 ret
= write_one_page(page
);
171 _leave(" = %d", ret
);
175 ret
= lock_page_killable(page
);
177 _leave(" = %d", ret
);
184 * finalise part of a write to a page
186 int afs_write_end(struct file
*file
, struct address_space
*mapping
,
187 loff_t pos
, unsigned len
, unsigned copied
,
188 struct page
*page
, void *fsdata
)
190 struct afs_vnode
*vnode
= AFS_FS_I(file_inode(file
));
191 struct key
*key
= afs_file_key(file
);
192 loff_t i_size
, maybe_i_size
;
195 _enter("{%llx:%llu},{%lx}",
196 vnode
->fid
.vid
, vnode
->fid
.vnode
, page
->index
);
198 maybe_i_size
= pos
+ copied
;
200 i_size
= i_size_read(&vnode
->vfs_inode
);
201 if (maybe_i_size
> i_size
) {
202 spin_lock(&vnode
->wb_lock
);
203 i_size
= i_size_read(&vnode
->vfs_inode
);
204 if (maybe_i_size
> i_size
)
205 i_size_write(&vnode
->vfs_inode
, maybe_i_size
);
206 spin_unlock(&vnode
->wb_lock
);
209 if (!PageUptodate(page
)) {
211 /* Try and load any missing data from the server. The
212 * unmarshalling routine will take care of clearing any
213 * bits that are beyond the EOF.
215 ret
= afs_fill_page(vnode
, key
, pos
+ copied
,
220 SetPageUptodate(page
);
223 set_page_dirty(page
);
235 * kill all the pages in the given range
237 static void afs_kill_pages(struct address_space
*mapping
,
238 pgoff_t first
, pgoff_t last
)
240 struct afs_vnode
*vnode
= AFS_FS_I(mapping
->host
);
242 unsigned count
, loop
;
244 _enter("{%llx:%llu},%lx-%lx",
245 vnode
->fid
.vid
, vnode
->fid
.vnode
, first
, last
);
250 _debug("kill %lx-%lx", first
, last
);
252 count
= last
- first
+ 1;
253 if (count
> PAGEVEC_SIZE
)
254 count
= PAGEVEC_SIZE
;
255 pv
.nr
= find_get_pages_contig(mapping
, first
, count
, pv
.pages
);
256 ASSERTCMP(pv
.nr
, ==, count
);
258 for (loop
= 0; loop
< count
; loop
++) {
259 struct page
*page
= pv
.pages
[loop
];
260 ClearPageUptodate(page
);
262 end_page_writeback(page
);
263 if (page
->index
>= first
)
264 first
= page
->index
+ 1;
266 generic_error_remove_page(mapping
, page
);
270 __pagevec_release(&pv
);
271 } while (first
<= last
);
277 * Redirty all the pages in a given range.
279 static void afs_redirty_pages(struct writeback_control
*wbc
,
280 struct address_space
*mapping
,
281 pgoff_t first
, pgoff_t last
)
283 struct afs_vnode
*vnode
= AFS_FS_I(mapping
->host
);
285 unsigned count
, loop
;
287 _enter("{%llx:%llu},%lx-%lx",
288 vnode
->fid
.vid
, vnode
->fid
.vnode
, first
, last
);
293 _debug("redirty %lx-%lx", first
, last
);
295 count
= last
- first
+ 1;
296 if (count
> PAGEVEC_SIZE
)
297 count
= PAGEVEC_SIZE
;
298 pv
.nr
= find_get_pages_contig(mapping
, first
, count
, pv
.pages
);
299 ASSERTCMP(pv
.nr
, ==, count
);
301 for (loop
= 0; loop
< count
; loop
++) {
302 struct page
*page
= pv
.pages
[loop
];
304 redirty_page_for_writepage(wbc
, page
);
305 end_page_writeback(page
);
306 if (page
->index
>= first
)
307 first
= page
->index
+ 1;
310 __pagevec_release(&pv
);
311 } while (first
<= last
);
317 * completion of write to server
319 static void afs_pages_written_back(struct afs_vnode
*vnode
,
320 pgoff_t first
, pgoff_t last
)
324 unsigned count
, loop
;
326 _enter("{%llx:%llu},{%lx-%lx}",
327 vnode
->fid
.vid
, vnode
->fid
.vnode
, first
, last
);
332 _debug("done %lx-%lx", first
, last
);
334 count
= last
- first
+ 1;
335 if (count
> PAGEVEC_SIZE
)
336 count
= PAGEVEC_SIZE
;
337 pv
.nr
= find_get_pages_contig(vnode
->vfs_inode
.i_mapping
,
338 first
, count
, pv
.pages
);
339 ASSERTCMP(pv
.nr
, ==, count
);
341 for (loop
= 0; loop
< count
; loop
++) {
342 priv
= page_private(pv
.pages
[loop
]);
343 trace_afs_page_dirty(vnode
, tracepoint_string("clear"),
344 pv
.pages
[loop
]->index
, priv
);
345 set_page_private(pv
.pages
[loop
], 0);
346 end_page_writeback(pv
.pages
[loop
]);
349 __pagevec_release(&pv
);
350 } while (first
<= last
);
352 afs_prune_wb_keys(vnode
);
359 static int afs_store_data(struct address_space
*mapping
,
360 pgoff_t first
, pgoff_t last
,
361 unsigned offset
, unsigned to
)
363 struct afs_vnode
*vnode
= AFS_FS_I(mapping
->host
);
364 struct afs_fs_cursor fc
;
365 struct afs_status_cb
*scb
;
366 struct afs_wb_key
*wbk
= NULL
;
368 int ret
= -ENOKEY
, ret2
;
370 _enter("%s{%llx:%llu.%u},%lx,%lx,%x,%x",
375 first
, last
, offset
, to
);
377 scb
= kzalloc(sizeof(struct afs_status_cb
), GFP_NOFS
);
381 spin_lock(&vnode
->wb_lock
);
382 p
= vnode
->wb_keys
.next
;
384 /* Iterate through the list looking for a valid key to use. */
386 while (p
!= &vnode
->wb_keys
) {
387 wbk
= list_entry(p
, struct afs_wb_key
, vnode_link
);
388 _debug("wbk %u", key_serial(wbk
->key
));
389 ret2
= key_validate(wbk
->key
);
397 spin_unlock(&vnode
->wb_lock
);
400 _leave(" = %d [no keys]", ret
);
404 refcount_inc(&wbk
->usage
);
405 spin_unlock(&vnode
->wb_lock
);
407 _debug("USE WB KEY %u", key_serial(wbk
->key
));
410 if (afs_begin_vnode_operation(&fc
, vnode
, wbk
->key
, false)) {
411 afs_dataversion_t data_version
= vnode
->status
.data_version
+ 1;
413 while (afs_select_fileserver(&fc
)) {
414 fc
.cb_break
= afs_calc_vnode_cb_break(vnode
);
415 afs_fs_store_data(&fc
, mapping
, first
, last
, offset
, to
, scb
);
418 afs_check_for_remote_deletion(&fc
, vnode
);
419 afs_vnode_commit_status(&fc
, vnode
, fc
.cb_break
,
421 if (fc
.ac
.error
== 0)
422 afs_pages_written_back(vnode
, first
, last
);
423 ret
= afs_end_vnode_operation(&fc
);
428 afs_stat_v(vnode
, n_stores
);
429 atomic_long_add((last
* PAGE_SIZE
+ to
) -
430 (first
* PAGE_SIZE
+ offset
),
431 &afs_v2net(vnode
)->n_store_bytes
);
440 spin_lock(&vnode
->wb_lock
);
441 p
= wbk
->vnode_link
.next
;
448 _leave(" = %d", ret
);
453 * Synchronously write back the locked page and any subsequent non-locked dirty
456 static int afs_write_back_from_locked_page(struct address_space
*mapping
,
457 struct writeback_control
*wbc
,
458 struct page
*primary_page
,
461 struct afs_vnode
*vnode
= AFS_FS_I(mapping
->host
);
462 struct page
*pages
[8], *page
;
463 unsigned long count
, priv
;
464 unsigned n
, offset
, to
, f
, t
;
465 pgoff_t start
, first
, last
;
468 _enter(",%lx", primary_page
->index
);
471 if (test_set_page_writeback(primary_page
))
474 /* Find all consecutive lockable dirty pages that have contiguous
475 * written regions, stopping when we find a page that is not
476 * immediately lockable, is not dirty or is missing, or we reach the
479 start
= primary_page
->index
;
480 priv
= page_private(primary_page
);
481 offset
= priv
& AFS_PRIV_MAX
;
482 to
= priv
>> AFS_PRIV_SHIFT
;
483 trace_afs_page_dirty(vnode
, tracepoint_string("store"),
484 primary_page
->index
, priv
);
486 WARN_ON(offset
== to
);
488 trace_afs_page_dirty(vnode
, tracepoint_string("WARN"),
489 primary_page
->index
, priv
);
491 if (start
>= final_page
||
492 (to
< PAGE_SIZE
&& !test_bit(AFS_VNODE_NEW_CONTENT
, &vnode
->flags
)))
497 _debug("more %lx [%lx]", start
, count
);
498 n
= final_page
- start
+ 1;
499 if (n
> ARRAY_SIZE(pages
))
500 n
= ARRAY_SIZE(pages
);
501 n
= find_get_pages_contig(mapping
, start
, ARRAY_SIZE(pages
), pages
);
502 _debug("fgpc %u", n
);
505 if (pages
[0]->index
!= start
) {
507 put_page(pages
[--n
]);
512 for (loop
= 0; loop
< n
; loop
++) {
514 if (to
!= PAGE_SIZE
&&
515 !test_bit(AFS_VNODE_NEW_CONTENT
, &vnode
->flags
))
517 if (page
->index
> final_page
)
519 if (!trylock_page(page
))
521 if (!PageDirty(page
) || PageWriteback(page
)) {
526 priv
= page_private(page
);
527 f
= priv
& AFS_PRIV_MAX
;
528 t
= priv
>> AFS_PRIV_SHIFT
;
530 !test_bit(AFS_VNODE_NEW_CONTENT
, &vnode
->flags
)) {
536 trace_afs_page_dirty(vnode
, tracepoint_string("store+"),
539 if (!clear_page_dirty_for_io(page
))
541 if (test_set_page_writeback(page
))
548 for (; loop
< n
; loop
++)
549 put_page(pages
[loop
]);
554 } while (start
<= final_page
&& count
< 65536);
557 /* We now have a contiguous set of dirty pages, each with writeback
558 * set; the first page is still locked at this point, but all the rest
559 * have been unlocked.
561 unlock_page(primary_page
);
563 first
= primary_page
->index
;
564 last
= first
+ count
- 1;
566 _debug("write back %lx[%u..] to %lx[..%u]", first
, offset
, last
, to
);
568 ret
= afs_store_data(mapping
, first
, last
, offset
, to
);
575 pr_notice("kAFS: Unexpected error from FS.StoreData %d\n", ret
);
583 afs_redirty_pages(wbc
, mapping
, first
, last
);
584 mapping_set_error(mapping
, ret
);
589 afs_redirty_pages(wbc
, mapping
, first
, last
);
590 mapping_set_error(mapping
, -ENOSPC
);
600 trace_afs_file_error(vnode
, ret
, afs_file_error_writeback_fail
);
601 afs_kill_pages(mapping
, first
, last
);
602 mapping_set_error(mapping
, ret
);
606 _leave(" = %d", ret
);
611 * write a page back to the server
612 * - the caller locked the page for us
614 int afs_writepage(struct page
*page
, struct writeback_control
*wbc
)
618 _enter("{%lx},", page
->index
);
620 ret
= afs_write_back_from_locked_page(page
->mapping
, wbc
, page
,
621 wbc
->range_end
>> PAGE_SHIFT
);
623 _leave(" = %d", ret
);
627 wbc
->nr_to_write
-= ret
;
634 * write a region of pages back to the server
636 static int afs_writepages_region(struct address_space
*mapping
,
637 struct writeback_control
*wbc
,
638 pgoff_t index
, pgoff_t end
, pgoff_t
*_next
)
643 _enter(",,%lx,%lx,", index
, end
);
646 n
= find_get_pages_range_tag(mapping
, &index
, end
,
647 PAGECACHE_TAG_DIRTY
, 1, &page
);
651 _debug("wback %lx", page
->index
);
654 * at this point we hold neither the i_pages lock nor the
655 * page lock: the page may be truncated or invalidated
656 * (changing page->mapping to NULL), or even swizzled
657 * back from swapper_space to tmpfs file mapping
659 ret
= lock_page_killable(page
);
662 _leave(" = %d", ret
);
666 if (page
->mapping
!= mapping
|| !PageDirty(page
)) {
672 if (PageWriteback(page
)) {
674 if (wbc
->sync_mode
!= WB_SYNC_NONE
)
675 wait_on_page_writeback(page
);
680 if (!clear_page_dirty_for_io(page
))
682 ret
= afs_write_back_from_locked_page(mapping
, wbc
, page
, end
);
685 _leave(" = %d", ret
);
689 wbc
->nr_to_write
-= ret
;
692 } while (index
< end
&& wbc
->nr_to_write
> 0);
695 _leave(" = 0 [%lx]", *_next
);
700 * write some of the pending data back to the server
702 int afs_writepages(struct address_space
*mapping
,
703 struct writeback_control
*wbc
)
705 pgoff_t start
, end
, next
;
710 if (wbc
->range_cyclic
) {
711 start
= mapping
->writeback_index
;
713 ret
= afs_writepages_region(mapping
, wbc
, start
, end
, &next
);
714 if (start
> 0 && wbc
->nr_to_write
> 0 && ret
== 0)
715 ret
= afs_writepages_region(mapping
, wbc
, 0, start
,
717 mapping
->writeback_index
= next
;
718 } else if (wbc
->range_start
== 0 && wbc
->range_end
== LLONG_MAX
) {
719 end
= (pgoff_t
)(LLONG_MAX
>> PAGE_SHIFT
);
720 ret
= afs_writepages_region(mapping
, wbc
, 0, end
, &next
);
721 if (wbc
->nr_to_write
> 0)
722 mapping
->writeback_index
= next
;
724 start
= wbc
->range_start
>> PAGE_SHIFT
;
725 end
= wbc
->range_end
>> PAGE_SHIFT
;
726 ret
= afs_writepages_region(mapping
, wbc
, start
, end
, &next
);
729 _leave(" = %d", ret
);
734 * write to an AFS file
736 ssize_t
afs_file_write(struct kiocb
*iocb
, struct iov_iter
*from
)
738 struct afs_vnode
*vnode
= AFS_FS_I(file_inode(iocb
->ki_filp
));
740 size_t count
= iov_iter_count(from
);
742 _enter("{%llx:%llu},{%zu},",
743 vnode
->fid
.vid
, vnode
->fid
.vnode
, count
);
745 if (IS_SWAPFILE(&vnode
->vfs_inode
)) {
747 "AFS: Attempt to write to active swap file!\n");
754 result
= generic_file_write_iter(iocb
, from
);
756 _leave(" = %zd", result
);
761 * flush any dirty pages for this process, and check for write errors.
762 * - the return status from this call provides a reliable indication of
763 * whether any write errors occurred for this process.
765 int afs_fsync(struct file
*file
, loff_t start
, loff_t end
, int datasync
)
767 struct inode
*inode
= file_inode(file
);
768 struct afs_vnode
*vnode
= AFS_FS_I(inode
);
770 _enter("{%llx:%llu},{n=%pD},%d",
771 vnode
->fid
.vid
, vnode
->fid
.vnode
, file
,
774 return file_write_and_wait_range(file
, start
, end
);
778 * notification that a previously read-only page is about to become writable
779 * - if it returns an error, the caller will deliver a bus error signal
781 vm_fault_t
afs_page_mkwrite(struct vm_fault
*vmf
)
783 struct file
*file
= vmf
->vma
->vm_file
;
784 struct inode
*inode
= file_inode(file
);
785 struct afs_vnode
*vnode
= AFS_FS_I(inode
);
788 _enter("{{%llx:%llu}},{%lx}",
789 vnode
->fid
.vid
, vnode
->fid
.vnode
, vmf
->page
->index
);
791 sb_start_pagefault(inode
->i_sb
);
793 /* Wait for the page to be written to the cache before we allow it to
794 * be modified. We then assume the entire page will need writing back.
796 #ifdef CONFIG_AFS_FSCACHE
797 fscache_wait_on_page_write(vnode
->cache
, vmf
->page
);
800 if (PageWriteback(vmf
->page
) &&
801 wait_on_page_bit_killable(vmf
->page
, PG_writeback
) < 0)
802 return VM_FAULT_RETRY
;
804 if (lock_page_killable(vmf
->page
) < 0)
805 return VM_FAULT_RETRY
;
807 /* We mustn't change page->private until writeback is complete as that
808 * details the portion of the page we need to write back and we might
809 * need to redirty the page if there's a problem.
811 wait_on_page_writeback(vmf
->page
);
813 priv
= (unsigned long)PAGE_SIZE
<< AFS_PRIV_SHIFT
; /* To */
814 priv
|= 0; /* From */
815 trace_afs_page_dirty(vnode
, tracepoint_string("mkwrite"),
816 vmf
->page
->index
, priv
);
817 SetPagePrivate(vmf
->page
);
818 set_page_private(vmf
->page
, priv
);
820 sb_end_pagefault(inode
->i_sb
);
821 return VM_FAULT_LOCKED
;
825 * Prune the keys cached for writeback. The caller must hold vnode->wb_lock.
827 void afs_prune_wb_keys(struct afs_vnode
*vnode
)
829 LIST_HEAD(graveyard
);
830 struct afs_wb_key
*wbk
, *tmp
;
832 /* Discard unused keys */
833 spin_lock(&vnode
->wb_lock
);
835 if (!mapping_tagged(&vnode
->vfs_inode
.i_data
, PAGECACHE_TAG_WRITEBACK
) &&
836 !mapping_tagged(&vnode
->vfs_inode
.i_data
, PAGECACHE_TAG_DIRTY
)) {
837 list_for_each_entry_safe(wbk
, tmp
, &vnode
->wb_keys
, vnode_link
) {
838 if (refcount_read(&wbk
->usage
) == 1)
839 list_move(&wbk
->vnode_link
, &graveyard
);
843 spin_unlock(&vnode
->wb_lock
);
845 while (!list_empty(&graveyard
)) {
846 wbk
= list_entry(graveyard
.next
, struct afs_wb_key
, vnode_link
);
847 list_del(&wbk
->vnode_link
);
853 * Clean up a page during invalidation.
855 int afs_launder_page(struct page
*page
)
857 struct address_space
*mapping
= page
->mapping
;
858 struct afs_vnode
*vnode
= AFS_FS_I(mapping
->host
);
863 _enter("{%lx}", page
->index
);
865 priv
= page_private(page
);
866 if (clear_page_dirty_for_io(page
)) {
869 if (PagePrivate(page
)) {
870 f
= priv
& AFS_PRIV_MAX
;
871 t
= priv
>> AFS_PRIV_SHIFT
;
874 trace_afs_page_dirty(vnode
, tracepoint_string("launder"),
876 ret
= afs_store_data(mapping
, page
->index
, page
->index
, t
, f
);
879 trace_afs_page_dirty(vnode
, tracepoint_string("laundered"),
881 set_page_private(page
, 0);
882 ClearPagePrivate(page
);
884 #ifdef CONFIG_AFS_FSCACHE
885 if (PageFsCache(page
)) {
886 fscache_wait_on_page_write(vnode
->cache
, page
);
887 fscache_uncache_page(vnode
->cache
, page
);