1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* handling of writes to regular files and writing back to the server
4 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
5 * Written by David Howells (dhowells@redhat.com)
8 #include <linux/backing-dev.h>
9 #include <linux/slab.h>
11 #include <linux/pagemap.h>
12 #include <linux/writeback.h>
13 #include <linux/pagevec.h>
17 * mark a page as having been made dirty and thus needing writeback
19 int afs_set_page_dirty(struct page
*page
)
22 return __set_page_dirty_nobuffers(page
);
26 * partly or wholly fill a page that's under preparation for writing
28 static int afs_fill_page(struct afs_vnode
*vnode
, struct key
*key
,
29 loff_t pos
, unsigned int len
, struct page
*page
)
36 _enter(",,%llu", (unsigned long long)pos
);
38 if (pos
>= vnode
->vfs_inode
.i_size
) {
40 ASSERTCMP(p
+ len
, <=, PAGE_SIZE
);
42 memset(data
+ p
, 0, len
);
47 req
= kzalloc(sizeof(struct afs_read
) + sizeof(struct page
*),
52 refcount_set(&req
->usage
, 1);
56 req
->pages
= req
->array
;
60 ret
= afs_fetch_data(vnode
, key
, req
);
64 _debug("got NOENT from server"
65 " - marking file deleted and stale");
66 set_bit(AFS_VNODE_DELETED
, &vnode
->flags
);
76 * prepare to perform part of a write to a page
78 int afs_write_begin(struct file
*file
, struct address_space
*mapping
,
79 loff_t pos
, unsigned len
, unsigned flags
,
80 struct page
**pagep
, void **fsdata
)
82 struct afs_vnode
*vnode
= AFS_FS_I(file_inode(file
));
84 struct key
*key
= afs_file_key(file
);
86 unsigned f
, from
= pos
& (PAGE_SIZE
- 1);
87 unsigned t
, to
= from
+ len
;
88 pgoff_t index
= pos
>> PAGE_SHIFT
;
91 _enter("{%llx:%llu},{%lx},%u,%u",
92 vnode
->fid
.vid
, vnode
->fid
.vnode
, index
, from
, to
);
94 /* We want to store information about how much of a page is altered in
97 BUILD_BUG_ON(PAGE_SIZE
> 32768 && sizeof(page
->private) < 8);
99 page
= grab_cache_page_write_begin(mapping
, index
, flags
);
103 if (!PageUptodate(page
) && len
!= PAGE_SIZE
) {
104 ret
= afs_fill_page(vnode
, key
, pos
& PAGE_MASK
, PAGE_SIZE
, page
);
108 _leave(" = %d [prep]", ret
);
111 SetPageUptodate(page
);
114 /* page won't leak in error case: it eventually gets cleaned off LRU */
118 /* See if this page is already partially written in a way that we can
119 * merge the new write with.
122 if (PagePrivate(page
)) {
123 priv
= page_private(page
);
124 f
= priv
& AFS_PRIV_MAX
;
125 t
= priv
>> AFS_PRIV_SHIFT
;
130 if (PageWriteback(page
)) {
131 trace_afs_page_dirty(vnode
, tracepoint_string("alrdy"),
133 goto flush_conflicting_write
;
135 /* If the file is being filled locally, allow inter-write
136 * spaces to be merged into writes. If it's not, only write
137 * back what the user gives us.
139 if (!test_bit(AFS_VNODE_NEW_CONTENT
, &vnode
->flags
) &&
140 (to
< f
|| from
> t
))
141 goto flush_conflicting_write
;
151 priv
= (unsigned long)t
<< AFS_PRIV_SHIFT
;
153 trace_afs_page_dirty(vnode
, tracepoint_string("begin"),
155 SetPagePrivate(page
);
156 set_page_private(page
, priv
);
160 /* The previous write and this write aren't adjacent or overlapping, so
161 * flush the page out.
163 flush_conflicting_write
:
164 _debug("flush conflict");
165 ret
= write_one_page(page
);
167 _leave(" = %d", ret
);
171 ret
= lock_page_killable(page
);
173 _leave(" = %d", ret
);
180 * finalise part of a write to a page
182 int afs_write_end(struct file
*file
, struct address_space
*mapping
,
183 loff_t pos
, unsigned len
, unsigned copied
,
184 struct page
*page
, void *fsdata
)
186 struct afs_vnode
*vnode
= AFS_FS_I(file_inode(file
));
187 struct key
*key
= afs_file_key(file
);
188 loff_t i_size
, maybe_i_size
;
191 _enter("{%llx:%llu},{%lx}",
192 vnode
->fid
.vid
, vnode
->fid
.vnode
, page
->index
);
194 maybe_i_size
= pos
+ copied
;
196 i_size
= i_size_read(&vnode
->vfs_inode
);
197 if (maybe_i_size
> i_size
) {
198 spin_lock(&vnode
->wb_lock
);
199 i_size
= i_size_read(&vnode
->vfs_inode
);
200 if (maybe_i_size
> i_size
)
201 i_size_write(&vnode
->vfs_inode
, maybe_i_size
);
202 spin_unlock(&vnode
->wb_lock
);
205 if (!PageUptodate(page
)) {
207 /* Try and load any missing data from the server. The
208 * unmarshalling routine will take care of clearing any
209 * bits that are beyond the EOF.
211 ret
= afs_fill_page(vnode
, key
, pos
+ copied
,
216 SetPageUptodate(page
);
219 set_page_dirty(page
);
231 * kill all the pages in the given range
233 static void afs_kill_pages(struct address_space
*mapping
,
234 pgoff_t first
, pgoff_t last
)
236 struct afs_vnode
*vnode
= AFS_FS_I(mapping
->host
);
238 unsigned count
, loop
;
240 _enter("{%llx:%llu},%lx-%lx",
241 vnode
->fid
.vid
, vnode
->fid
.vnode
, first
, last
);
246 _debug("kill %lx-%lx", first
, last
);
248 count
= last
- first
+ 1;
249 if (count
> PAGEVEC_SIZE
)
250 count
= PAGEVEC_SIZE
;
251 pv
.nr
= find_get_pages_contig(mapping
, first
, count
, pv
.pages
);
252 ASSERTCMP(pv
.nr
, ==, count
);
254 for (loop
= 0; loop
< count
; loop
++) {
255 struct page
*page
= pv
.pages
[loop
];
256 ClearPageUptodate(page
);
258 end_page_writeback(page
);
259 if (page
->index
>= first
)
260 first
= page
->index
+ 1;
262 generic_error_remove_page(mapping
, page
);
266 __pagevec_release(&pv
);
267 } while (first
<= last
);
273 * Redirty all the pages in a given range.
275 static void afs_redirty_pages(struct writeback_control
*wbc
,
276 struct address_space
*mapping
,
277 pgoff_t first
, pgoff_t last
)
279 struct afs_vnode
*vnode
= AFS_FS_I(mapping
->host
);
281 unsigned count
, loop
;
283 _enter("{%llx:%llu},%lx-%lx",
284 vnode
->fid
.vid
, vnode
->fid
.vnode
, first
, last
);
289 _debug("redirty %lx-%lx", first
, last
);
291 count
= last
- first
+ 1;
292 if (count
> PAGEVEC_SIZE
)
293 count
= PAGEVEC_SIZE
;
294 pv
.nr
= find_get_pages_contig(mapping
, first
, count
, pv
.pages
);
295 ASSERTCMP(pv
.nr
, ==, count
);
297 for (loop
= 0; loop
< count
; loop
++) {
298 struct page
*page
= pv
.pages
[loop
];
300 redirty_page_for_writepage(wbc
, page
);
301 end_page_writeback(page
);
302 if (page
->index
>= first
)
303 first
= page
->index
+ 1;
306 __pagevec_release(&pv
);
307 } while (first
<= last
);
313 * completion of write to server
315 static void afs_pages_written_back(struct afs_vnode
*vnode
,
316 pgoff_t first
, pgoff_t last
)
320 unsigned count
, loop
;
322 _enter("{%llx:%llu},{%lx-%lx}",
323 vnode
->fid
.vid
, vnode
->fid
.vnode
, first
, last
);
328 _debug("done %lx-%lx", first
, last
);
330 count
= last
- first
+ 1;
331 if (count
> PAGEVEC_SIZE
)
332 count
= PAGEVEC_SIZE
;
333 pv
.nr
= find_get_pages_contig(vnode
->vfs_inode
.i_mapping
,
334 first
, count
, pv
.pages
);
335 ASSERTCMP(pv
.nr
, ==, count
);
337 for (loop
= 0; loop
< count
; loop
++) {
338 priv
= page_private(pv
.pages
[loop
]);
339 trace_afs_page_dirty(vnode
, tracepoint_string("clear"),
340 pv
.pages
[loop
]->index
, priv
);
341 set_page_private(pv
.pages
[loop
], 0);
342 end_page_writeback(pv
.pages
[loop
]);
345 __pagevec_release(&pv
);
346 } while (first
<= last
);
348 afs_prune_wb_keys(vnode
);
355 static int afs_store_data(struct address_space
*mapping
,
356 pgoff_t first
, pgoff_t last
,
357 unsigned offset
, unsigned to
)
359 struct afs_vnode
*vnode
= AFS_FS_I(mapping
->host
);
360 struct afs_fs_cursor fc
;
361 struct afs_status_cb
*scb
;
362 struct afs_wb_key
*wbk
= NULL
;
364 int ret
= -ENOKEY
, ret2
;
366 _enter("%s{%llx:%llu.%u},%lx,%lx,%x,%x",
371 first
, last
, offset
, to
);
373 scb
= kzalloc(sizeof(struct afs_status_cb
), GFP_NOFS
);
377 spin_lock(&vnode
->wb_lock
);
378 p
= vnode
->wb_keys
.next
;
380 /* Iterate through the list looking for a valid key to use. */
382 while (p
!= &vnode
->wb_keys
) {
383 wbk
= list_entry(p
, struct afs_wb_key
, vnode_link
);
384 _debug("wbk %u", key_serial(wbk
->key
));
385 ret2
= key_validate(wbk
->key
);
393 spin_unlock(&vnode
->wb_lock
);
396 _leave(" = %d [no keys]", ret
);
400 refcount_inc(&wbk
->usage
);
401 spin_unlock(&vnode
->wb_lock
);
403 _debug("USE WB KEY %u", key_serial(wbk
->key
));
406 if (afs_begin_vnode_operation(&fc
, vnode
, wbk
->key
, false)) {
407 afs_dataversion_t data_version
= vnode
->status
.data_version
+ 1;
409 while (afs_select_fileserver(&fc
)) {
410 fc
.cb_break
= afs_calc_vnode_cb_break(vnode
);
411 afs_fs_store_data(&fc
, mapping
, first
, last
, offset
, to
, scb
);
414 afs_check_for_remote_deletion(&fc
, vnode
);
415 afs_vnode_commit_status(&fc
, vnode
, fc
.cb_break
,
417 if (fc
.ac
.error
== 0)
418 afs_pages_written_back(vnode
, first
, last
);
419 ret
= afs_end_vnode_operation(&fc
);
424 afs_stat_v(vnode
, n_stores
);
425 atomic_long_add((last
* PAGE_SIZE
+ to
) -
426 (first
* PAGE_SIZE
+ offset
),
427 &afs_v2net(vnode
)->n_store_bytes
);
436 spin_lock(&vnode
->wb_lock
);
437 p
= wbk
->vnode_link
.next
;
444 _leave(" = %d", ret
);
449 * Synchronously write back the locked page and any subsequent non-locked dirty
452 static int afs_write_back_from_locked_page(struct address_space
*mapping
,
453 struct writeback_control
*wbc
,
454 struct page
*primary_page
,
457 struct afs_vnode
*vnode
= AFS_FS_I(mapping
->host
);
458 struct page
*pages
[8], *page
;
459 unsigned long count
, priv
;
460 unsigned n
, offset
, to
, f
, t
;
461 pgoff_t start
, first
, last
;
464 _enter(",%lx", primary_page
->index
);
467 if (test_set_page_writeback(primary_page
))
470 /* Find all consecutive lockable dirty pages that have contiguous
471 * written regions, stopping when we find a page that is not
472 * immediately lockable, is not dirty or is missing, or we reach the
475 start
= primary_page
->index
;
476 priv
= page_private(primary_page
);
477 offset
= priv
& AFS_PRIV_MAX
;
478 to
= priv
>> AFS_PRIV_SHIFT
;
479 trace_afs_page_dirty(vnode
, tracepoint_string("store"),
480 primary_page
->index
, priv
);
482 WARN_ON(offset
== to
);
484 trace_afs_page_dirty(vnode
, tracepoint_string("WARN"),
485 primary_page
->index
, priv
);
487 if (start
>= final_page
||
488 (to
< PAGE_SIZE
&& !test_bit(AFS_VNODE_NEW_CONTENT
, &vnode
->flags
)))
493 _debug("more %lx [%lx]", start
, count
);
494 n
= final_page
- start
+ 1;
495 if (n
> ARRAY_SIZE(pages
))
496 n
= ARRAY_SIZE(pages
);
497 n
= find_get_pages_contig(mapping
, start
, ARRAY_SIZE(pages
), pages
);
498 _debug("fgpc %u", n
);
501 if (pages
[0]->index
!= start
) {
503 put_page(pages
[--n
]);
508 for (loop
= 0; loop
< n
; loop
++) {
510 if (to
!= PAGE_SIZE
&&
511 !test_bit(AFS_VNODE_NEW_CONTENT
, &vnode
->flags
))
513 if (page
->index
> final_page
)
515 if (!trylock_page(page
))
517 if (!PageDirty(page
) || PageWriteback(page
)) {
522 priv
= page_private(page
);
523 f
= priv
& AFS_PRIV_MAX
;
524 t
= priv
>> AFS_PRIV_SHIFT
;
526 !test_bit(AFS_VNODE_NEW_CONTENT
, &vnode
->flags
)) {
532 trace_afs_page_dirty(vnode
, tracepoint_string("store+"),
535 if (!clear_page_dirty_for_io(page
))
537 if (test_set_page_writeback(page
))
544 for (; loop
< n
; loop
++)
545 put_page(pages
[loop
]);
550 } while (start
<= final_page
&& count
< 65536);
553 /* We now have a contiguous set of dirty pages, each with writeback
554 * set; the first page is still locked at this point, but all the rest
555 * have been unlocked.
557 unlock_page(primary_page
);
559 first
= primary_page
->index
;
560 last
= first
+ count
- 1;
562 _debug("write back %lx[%u..] to %lx[..%u]", first
, offset
, last
, to
);
564 ret
= afs_store_data(mapping
, first
, last
, offset
, to
);
571 pr_notice("kAFS: Unexpected error from FS.StoreData %d\n", ret
);
579 afs_redirty_pages(wbc
, mapping
, first
, last
);
580 mapping_set_error(mapping
, ret
);
585 afs_redirty_pages(wbc
, mapping
, first
, last
);
586 mapping_set_error(mapping
, -ENOSPC
);
596 trace_afs_file_error(vnode
, ret
, afs_file_error_writeback_fail
);
597 afs_kill_pages(mapping
, first
, last
);
598 mapping_set_error(mapping
, ret
);
602 _leave(" = %d", ret
);
607 * write a page back to the server
608 * - the caller locked the page for us
610 int afs_writepage(struct page
*page
, struct writeback_control
*wbc
)
614 _enter("{%lx},", page
->index
);
616 ret
= afs_write_back_from_locked_page(page
->mapping
, wbc
, page
,
617 wbc
->range_end
>> PAGE_SHIFT
);
619 _leave(" = %d", ret
);
623 wbc
->nr_to_write
-= ret
;
630 * write a region of pages back to the server
632 static int afs_writepages_region(struct address_space
*mapping
,
633 struct writeback_control
*wbc
,
634 pgoff_t index
, pgoff_t end
, pgoff_t
*_next
)
639 _enter(",,%lx,%lx,", index
, end
);
642 n
= find_get_pages_range_tag(mapping
, &index
, end
,
643 PAGECACHE_TAG_DIRTY
, 1, &page
);
647 _debug("wback %lx", page
->index
);
650 * at this point we hold neither the i_pages lock nor the
651 * page lock: the page may be truncated or invalidated
652 * (changing page->mapping to NULL), or even swizzled
653 * back from swapper_space to tmpfs file mapping
655 ret
= lock_page_killable(page
);
658 _leave(" = %d", ret
);
662 if (page
->mapping
!= mapping
|| !PageDirty(page
)) {
668 if (PageWriteback(page
)) {
670 if (wbc
->sync_mode
!= WB_SYNC_NONE
)
671 wait_on_page_writeback(page
);
676 if (!clear_page_dirty_for_io(page
))
678 ret
= afs_write_back_from_locked_page(mapping
, wbc
, page
, end
);
681 _leave(" = %d", ret
);
685 wbc
->nr_to_write
-= ret
;
688 } while (index
< end
&& wbc
->nr_to_write
> 0);
691 _leave(" = 0 [%lx]", *_next
);
696 * write some of the pending data back to the server
698 int afs_writepages(struct address_space
*mapping
,
699 struct writeback_control
*wbc
)
701 pgoff_t start
, end
, next
;
706 if (wbc
->range_cyclic
) {
707 start
= mapping
->writeback_index
;
709 ret
= afs_writepages_region(mapping
, wbc
, start
, end
, &next
);
710 if (start
> 0 && wbc
->nr_to_write
> 0 && ret
== 0)
711 ret
= afs_writepages_region(mapping
, wbc
, 0, start
,
713 mapping
->writeback_index
= next
;
714 } else if (wbc
->range_start
== 0 && wbc
->range_end
== LLONG_MAX
) {
715 end
= (pgoff_t
)(LLONG_MAX
>> PAGE_SHIFT
);
716 ret
= afs_writepages_region(mapping
, wbc
, 0, end
, &next
);
717 if (wbc
->nr_to_write
> 0)
718 mapping
->writeback_index
= next
;
720 start
= wbc
->range_start
>> PAGE_SHIFT
;
721 end
= wbc
->range_end
>> PAGE_SHIFT
;
722 ret
= afs_writepages_region(mapping
, wbc
, start
, end
, &next
);
725 _leave(" = %d", ret
);
730 * write to an AFS file
732 ssize_t
afs_file_write(struct kiocb
*iocb
, struct iov_iter
*from
)
734 struct afs_vnode
*vnode
= AFS_FS_I(file_inode(iocb
->ki_filp
));
736 size_t count
= iov_iter_count(from
);
738 _enter("{%llx:%llu},{%zu},",
739 vnode
->fid
.vid
, vnode
->fid
.vnode
, count
);
741 if (IS_SWAPFILE(&vnode
->vfs_inode
)) {
743 "AFS: Attempt to write to active swap file!\n");
750 result
= generic_file_write_iter(iocb
, from
);
752 _leave(" = %zd", result
);
757 * flush any dirty pages for this process, and check for write errors.
758 * - the return status from this call provides a reliable indication of
759 * whether any write errors occurred for this process.
761 int afs_fsync(struct file
*file
, loff_t start
, loff_t end
, int datasync
)
763 struct inode
*inode
= file_inode(file
);
764 struct afs_vnode
*vnode
= AFS_FS_I(inode
);
766 _enter("{%llx:%llu},{n=%pD},%d",
767 vnode
->fid
.vid
, vnode
->fid
.vnode
, file
,
770 return file_write_and_wait_range(file
, start
, end
);
774 * notification that a previously read-only page is about to become writable
775 * - if it returns an error, the caller will deliver a bus error signal
777 vm_fault_t
afs_page_mkwrite(struct vm_fault
*vmf
)
779 struct file
*file
= vmf
->vma
->vm_file
;
780 struct inode
*inode
= file_inode(file
);
781 struct afs_vnode
*vnode
= AFS_FS_I(inode
);
784 _enter("{{%llx:%llu}},{%lx}",
785 vnode
->fid
.vid
, vnode
->fid
.vnode
, vmf
->page
->index
);
787 sb_start_pagefault(inode
->i_sb
);
789 /* Wait for the page to be written to the cache before we allow it to
790 * be modified. We then assume the entire page will need writing back.
792 #ifdef CONFIG_AFS_FSCACHE
793 fscache_wait_on_page_write(vnode
->cache
, vmf
->page
);
796 if (PageWriteback(vmf
->page
) &&
797 wait_on_page_bit_killable(vmf
->page
, PG_writeback
) < 0)
798 return VM_FAULT_RETRY
;
800 if (lock_page_killable(vmf
->page
) < 0)
801 return VM_FAULT_RETRY
;
803 /* We mustn't change page->private until writeback is complete as that
804 * details the portion of the page we need to write back and we might
805 * need to redirty the page if there's a problem.
807 wait_on_page_writeback(vmf
->page
);
809 priv
= (unsigned long)PAGE_SIZE
<< AFS_PRIV_SHIFT
; /* To */
810 priv
|= 0; /* From */
811 trace_afs_page_dirty(vnode
, tracepoint_string("mkwrite"),
812 vmf
->page
->index
, priv
);
813 SetPagePrivate(vmf
->page
);
814 set_page_private(vmf
->page
, priv
);
816 sb_end_pagefault(inode
->i_sb
);
817 return VM_FAULT_LOCKED
;
821 * Prune the keys cached for writeback. The caller must hold vnode->wb_lock.
823 void afs_prune_wb_keys(struct afs_vnode
*vnode
)
825 LIST_HEAD(graveyard
);
826 struct afs_wb_key
*wbk
, *tmp
;
828 /* Discard unused keys */
829 spin_lock(&vnode
->wb_lock
);
831 if (!mapping_tagged(&vnode
->vfs_inode
.i_data
, PAGECACHE_TAG_WRITEBACK
) &&
832 !mapping_tagged(&vnode
->vfs_inode
.i_data
, PAGECACHE_TAG_DIRTY
)) {
833 list_for_each_entry_safe(wbk
, tmp
, &vnode
->wb_keys
, vnode_link
) {
834 if (refcount_read(&wbk
->usage
) == 1)
835 list_move(&wbk
->vnode_link
, &graveyard
);
839 spin_unlock(&vnode
->wb_lock
);
841 while (!list_empty(&graveyard
)) {
842 wbk
= list_entry(graveyard
.next
, struct afs_wb_key
, vnode_link
);
843 list_del(&wbk
->vnode_link
);
849 * Clean up a page during invalidation.
851 int afs_launder_page(struct page
*page
)
853 struct address_space
*mapping
= page
->mapping
;
854 struct afs_vnode
*vnode
= AFS_FS_I(mapping
->host
);
859 _enter("{%lx}", page
->index
);
861 priv
= page_private(page
);
862 if (clear_page_dirty_for_io(page
)) {
865 if (PagePrivate(page
)) {
866 f
= priv
& AFS_PRIV_MAX
;
867 t
= priv
>> AFS_PRIV_SHIFT
;
870 trace_afs_page_dirty(vnode
, tracepoint_string("launder"),
872 ret
= afs_store_data(mapping
, page
->index
, page
->index
, t
, f
);
875 trace_afs_page_dirty(vnode
, tracepoint_string("laundered"),
877 set_page_private(page
, 0);
878 ClearPagePrivate(page
);
880 #ifdef CONFIG_AFS_FSCACHE
881 if (PageFsCache(page
)) {
882 fscache_wait_on_page_write(vnode
->cache
, page
);
883 fscache_uncache_page(vnode
->cache
, page
);