]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - fs/afs/write.c
afs: Fix dirty-region encoding on ppc32 with 64K pages
[mirror_ubuntu-hirsute-kernel.git] / fs / afs / write.c
CommitLineData
2874c5fd 1// SPDX-License-Identifier: GPL-2.0-or-later
31143d5d
DH
2/* handling of writes to regular files and writing back to the server
3 *
4 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
5 * Written by David Howells (dhowells@redhat.com)
31143d5d 6 */
4343d008 7
4af3c9cc 8#include <linux/backing-dev.h>
31143d5d
DH
9#include <linux/slab.h>
10#include <linux/fs.h>
11#include <linux/pagemap.h>
12#include <linux/writeback.h>
13#include <linux/pagevec.h>
14#include "internal.h"
15
31143d5d
DH
16/*
17 * mark a page as having been made dirty and thus needing writeback
18 */
19int afs_set_page_dirty(struct page *page)
20{
21 _enter("");
22 return __set_page_dirty_nobuffers(page);
23}
24
31143d5d
DH
25/*
26 * partly or wholly fill a page that's under preparation for writing
27 */
28static int afs_fill_page(struct afs_vnode *vnode, struct key *key,
e8e581a8 29 loff_t pos, unsigned int len, struct page *page)
31143d5d 30{
196ee9cd 31 struct afs_read *req;
2a0b4f64
DH
32 size_t p;
33 void *data;
31143d5d
DH
34 int ret;
35
5e7f2337 36 _enter(",,%llu", (unsigned long long)pos);
31143d5d 37
2a0b4f64
DH
38 if (pos >= vnode->vfs_inode.i_size) {
39 p = pos & ~PAGE_MASK;
40 ASSERTCMP(p + len, <=, PAGE_SIZE);
41 data = kmap(page);
42 memset(data + p, 0, len);
43 kunmap(page);
44 return 0;
45 }
46
ee102584 47 req = kzalloc(struct_size(req, array, 1), GFP_KERNEL);
196ee9cd
DH
48 if (!req)
49 return -ENOMEM;
50
f3ddee8d 51 refcount_set(&req->usage, 1);
196ee9cd 52 req->pos = pos;
e8e581a8 53 req->len = len;
196ee9cd 54 req->nr_pages = 1;
f3ddee8d 55 req->pages = req->array;
196ee9cd 56 req->pages[0] = page;
5611ef28 57 get_page(page);
196ee9cd 58
d2ddc776 59 ret = afs_fetch_data(vnode, key, req);
196ee9cd 60 afs_put_read(req);
31143d5d
DH
61 if (ret < 0) {
62 if (ret == -ENOENT) {
63 _debug("got NOENT from server"
64 " - marking file deleted and stale");
65 set_bit(AFS_VNODE_DELETED, &vnode->flags);
66 ret = -ESTALE;
67 }
68 }
69
70 _leave(" = %d", ret);
71 return ret;
72}
73
31143d5d
DH
74/*
75 * prepare to perform part of a write to a page
31143d5d 76 */
15b4650e
NP
77int afs_write_begin(struct file *file, struct address_space *mapping,
78 loff_t pos, unsigned len, unsigned flags,
21db2cdc 79 struct page **_page, void **fsdata)
31143d5d 80{
496ad9aa 81 struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
15b4650e 82 struct page *page;
215804a9 83 struct key *key = afs_file_key(file);
4343d008
DH
84 unsigned long priv;
85 unsigned f, from = pos & (PAGE_SIZE - 1);
86 unsigned t, to = from + len;
09cbfeaf 87 pgoff_t index = pos >> PAGE_SHIFT;
31143d5d
DH
88 int ret;
89
3b6492df 90 _enter("{%llx:%llu},{%lx},%u,%u",
15b4650e 91 vnode->fid.vid, vnode->fid.vnode, index, from, to);
31143d5d 92
54566b2c 93 page = grab_cache_page_write_begin(mapping, index, flags);
4343d008 94 if (!page)
15b4650e 95 return -ENOMEM;
15b4650e 96
09cbfeaf 97 if (!PageUptodate(page) && len != PAGE_SIZE) {
e8e581a8 98 ret = afs_fill_page(vnode, key, pos & PAGE_MASK, PAGE_SIZE, page);
31143d5d 99 if (ret < 0) {
6d06b0d2
DH
100 unlock_page(page);
101 put_page(page);
31143d5d
DH
102 _leave(" = %d [prep]", ret);
103 return ret;
104 }
15b4650e 105 SetPageUptodate(page);
31143d5d
DH
106 }
107
108try_again:
4343d008
DH
109 /* See if this page is already partially written in a way that we can
110 * merge the new write with.
111 */
112 t = f = 0;
113 if (PagePrivate(page)) {
114 priv = page_private(page);
185f0c70
DH
115 f = afs_page_dirty_from(priv);
116 t = afs_page_dirty_to(priv);
4343d008 117 ASSERTCMP(f, <=, t);
31143d5d
DH
118 }
119
4343d008 120 if (f != t) {
5a039c32
DH
121 if (PageWriteback(page)) {
122 trace_afs_page_dirty(vnode, tracepoint_string("alrdy"),
123 page->index, priv);
124 goto flush_conflicting_write;
125 }
5a813276
DH
126 /* If the file is being filled locally, allow inter-write
127 * spaces to be merged into writes. If it's not, only write
128 * back what the user gives us.
129 */
130 if (!test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags) &&
131 (to < f || from > t))
4343d008 132 goto flush_conflicting_write;
31143d5d
DH
133 }
134
21db2cdc 135 *_page = page;
4343d008 136 _leave(" = 0");
31143d5d
DH
137 return 0;
138
4343d008
DH
139 /* The previous write and this write aren't adjacent or overlapping, so
140 * flush the page out.
141 */
142flush_conflicting_write:
31143d5d 143 _debug("flush conflict");
4343d008 144 ret = write_one_page(page);
21db2cdc
DH
145 if (ret < 0)
146 goto error;
31143d5d 147
4343d008 148 ret = lock_page_killable(page);
21db2cdc
DH
149 if (ret < 0)
150 goto error;
31143d5d 151 goto try_again;
21db2cdc
DH
152
153error:
154 put_page(page);
155 _leave(" = %d", ret);
156 return ret;
31143d5d
DH
157}
158
159/*
160 * finalise part of a write to a page
161 */
15b4650e
NP
162int afs_write_end(struct file *file, struct address_space *mapping,
163 loff_t pos, unsigned len, unsigned copied,
164 struct page *page, void *fsdata)
31143d5d 165{
496ad9aa 166 struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
215804a9 167 struct key *key = afs_file_key(file);
f792e3ac
DH
168 unsigned long priv;
169 unsigned int f, from = pos & (PAGE_SIZE - 1);
170 unsigned int t, to = from + copied;
31143d5d 171 loff_t i_size, maybe_i_size;
e8e581a8 172 int ret;
31143d5d 173
3b6492df 174 _enter("{%llx:%llu},{%lx}",
15b4650e 175 vnode->fid.vid, vnode->fid.vnode, page->index);
31143d5d 176
15b4650e 177 maybe_i_size = pos + copied;
31143d5d
DH
178
179 i_size = i_size_read(&vnode->vfs_inode);
180 if (maybe_i_size > i_size) {
1f32ef79 181 write_seqlock(&vnode->cb_lock);
31143d5d
DH
182 i_size = i_size_read(&vnode->vfs_inode);
183 if (maybe_i_size > i_size)
184 i_size_write(&vnode->vfs_inode, maybe_i_size);
1f32ef79 185 write_sequnlock(&vnode->cb_lock);
31143d5d
DH
186 }
187
e8e581a8
DH
188 if (!PageUptodate(page)) {
189 if (copied < len) {
190 /* Try and load any missing data from the server. The
191 * unmarshalling routine will take care of clearing any
192 * bits that are beyond the EOF.
193 */
194 ret = afs_fill_page(vnode, key, pos + copied,
195 len - copied, page);
196 if (ret < 0)
afae457d 197 goto out;
e8e581a8
DH
198 }
199 SetPageUptodate(page);
200 }
201
f792e3ac
DH
202 if (PagePrivate(page)) {
203 priv = page_private(page);
185f0c70
DH
204 f = afs_page_dirty_from(priv);
205 t = afs_page_dirty_to(priv);
f792e3ac
DH
206 if (from < f)
207 f = from;
208 if (to > t)
209 t = to;
185f0c70 210 priv = afs_page_dirty(f, t);
f792e3ac
DH
211 set_page_private(page, priv);
212 trace_afs_page_dirty(vnode, tracepoint_string("dirty+"),
213 page->index, priv);
214 } else {
185f0c70 215 priv = afs_page_dirty(from, to);
f792e3ac
DH
216 attach_page_private(page, (void *)priv);
217 trace_afs_page_dirty(vnode, tracepoint_string("dirty"),
218 page->index, priv);
219 }
220
31143d5d 221 set_page_dirty(page);
31143d5d
DH
222 if (PageDirty(page))
223 _debug("dirtied");
afae457d
DH
224 ret = copied;
225
226out:
15b4650e 227 unlock_page(page);
09cbfeaf 228 put_page(page);
afae457d 229 return ret;
31143d5d
DH
230}
231
232/*
233 * kill all the pages in the given range
234 */
4343d008 235static void afs_kill_pages(struct address_space *mapping,
31143d5d
DH
236 pgoff_t first, pgoff_t last)
237{
4343d008 238 struct afs_vnode *vnode = AFS_FS_I(mapping->host);
31143d5d
DH
239 struct pagevec pv;
240 unsigned count, loop;
241
3b6492df 242 _enter("{%llx:%llu},%lx-%lx",
31143d5d
DH
243 vnode->fid.vid, vnode->fid.vnode, first, last);
244
86679820 245 pagevec_init(&pv);
31143d5d
DH
246
247 do {
248 _debug("kill %lx-%lx", first, last);
249
250 count = last - first + 1;
251 if (count > PAGEVEC_SIZE)
252 count = PAGEVEC_SIZE;
4343d008 253 pv.nr = find_get_pages_contig(mapping, first, count, pv.pages);
31143d5d
DH
254 ASSERTCMP(pv.nr, ==, count);
255
256 for (loop = 0; loop < count; loop++) {
7286a35e
DH
257 struct page *page = pv.pages[loop];
258 ClearPageUptodate(page);
4343d008
DH
259 SetPageError(page);
260 end_page_writeback(page);
7286a35e
DH
261 if (page->index >= first)
262 first = page->index + 1;
4343d008
DH
263 lock_page(page);
264 generic_error_remove_page(mapping, page);
21bd68f1 265 unlock_page(page);
31143d5d
DH
266 }
267
268 __pagevec_release(&pv);
4343d008 269 } while (first <= last);
31143d5d
DH
270
271 _leave("");
272}
273
274/*
4343d008
DH
275 * Redirty all the pages in a given range.
276 */
277static void afs_redirty_pages(struct writeback_control *wbc,
278 struct address_space *mapping,
279 pgoff_t first, pgoff_t last)
280{
281 struct afs_vnode *vnode = AFS_FS_I(mapping->host);
282 struct pagevec pv;
283 unsigned count, loop;
284
3b6492df 285 _enter("{%llx:%llu},%lx-%lx",
4343d008
DH
286 vnode->fid.vid, vnode->fid.vnode, first, last);
287
487e2c9f 288 pagevec_init(&pv);
4343d008
DH
289
290 do {
291 _debug("redirty %lx-%lx", first, last);
292
293 count = last - first + 1;
294 if (count > PAGEVEC_SIZE)
295 count = PAGEVEC_SIZE;
296 pv.nr = find_get_pages_contig(mapping, first, count, pv.pages);
297 ASSERTCMP(pv.nr, ==, count);
298
299 for (loop = 0; loop < count; loop++) {
300 struct page *page = pv.pages[loop];
301
302 redirty_page_for_writepage(wbc, page);
303 end_page_writeback(page);
304 if (page->index >= first)
305 first = page->index + 1;
306 }
307
308 __pagevec_release(&pv);
309 } while (first <= last);
31143d5d
DH
310
311 _leave("");
312}
313
a58823ac
DH
314/*
315 * completion of write to server
316 */
317static void afs_pages_written_back(struct afs_vnode *vnode,
318 pgoff_t first, pgoff_t last)
319{
320 struct pagevec pv;
321 unsigned long priv;
322 unsigned count, loop;
323
324 _enter("{%llx:%llu},{%lx-%lx}",
325 vnode->fid.vid, vnode->fid.vnode, first, last);
326
327 pagevec_init(&pv);
328
329 do {
330 _debug("done %lx-%lx", first, last);
331
332 count = last - first + 1;
333 if (count > PAGEVEC_SIZE)
334 count = PAGEVEC_SIZE;
335 pv.nr = find_get_pages_contig(vnode->vfs_inode.i_mapping,
336 first, count, pv.pages);
337 ASSERTCMP(pv.nr, ==, count);
338
339 for (loop = 0; loop < count; loop++) {
fa04a40b 340 priv = (unsigned long)detach_page_private(pv.pages[loop]);
a58823ac
DH
341 trace_afs_page_dirty(vnode, tracepoint_string("clear"),
342 pv.pages[loop]->index, priv);
a58823ac
DH
343 end_page_writeback(pv.pages[loop]);
344 }
345 first += count;
346 __pagevec_release(&pv);
347 } while (first <= last);
348
349 afs_prune_wb_keys(vnode);
350 _leave("");
351}
352
d2ddc776 353/*
e49c7b2f
DH
354 * Find a key to use for the writeback. We cached the keys used to author the
355 * writes on the vnode. *_wbk will contain the last writeback key used or NULL
356 * and we need to start from there if it's set.
d2ddc776 357 */
e49c7b2f
DH
358static int afs_get_writeback_key(struct afs_vnode *vnode,
359 struct afs_wb_key **_wbk)
d2ddc776 360{
4343d008
DH
361 struct afs_wb_key *wbk = NULL;
362 struct list_head *p;
363 int ret = -ENOKEY, ret2;
d2ddc776 364
4343d008 365 spin_lock(&vnode->wb_lock);
e49c7b2f
DH
366 if (*_wbk)
367 p = (*_wbk)->vnode_link.next;
368 else
369 p = vnode->wb_keys.next;
4343d008 370
4343d008
DH
371 while (p != &vnode->wb_keys) {
372 wbk = list_entry(p, struct afs_wb_key, vnode_link);
373 _debug("wbk %u", key_serial(wbk->key));
374 ret2 = key_validate(wbk->key);
e49c7b2f
DH
375 if (ret2 == 0) {
376 refcount_inc(&wbk->usage);
377 _debug("USE WB KEY %u", key_serial(wbk->key));
378 break;
379 }
380
381 wbk = NULL;
4343d008
DH
382 if (ret == -ENOKEY)
383 ret = ret2;
384 p = p->next;
385 }
386
387 spin_unlock(&vnode->wb_lock);
e49c7b2f
DH
388 if (*_wbk)
389 afs_put_wb_key(*_wbk);
390 *_wbk = wbk;
391 return 0;
392}
4343d008 393
e49c7b2f
DH
394static void afs_store_data_success(struct afs_operation *op)
395{
396 struct afs_vnode *vnode = op->file[0].vnode;
4343d008 397
da8d0755 398 op->ctime = op->file[0].scb.status.mtime_client;
e49c7b2f
DH
399 afs_vnode_commit_status(op, &op->file[0]);
400 if (op->error == 0) {
d383e346
DH
401 if (!op->store.laundering)
402 afs_pages_written_back(vnode, op->store.first, op->store.last);
e49c7b2f
DH
403 afs_stat_v(vnode, n_stores);
404 atomic_long_add((op->store.last * PAGE_SIZE + op->store.last_to) -
405 (op->store.first * PAGE_SIZE + op->store.first_offset),
406 &afs_v2net(vnode)->n_store_bytes);
407 }
408}
4343d008 409
e49c7b2f
DH
410static const struct afs_operation_ops afs_store_data_operation = {
411 .issue_afs_rpc = afs_fs_store_data,
412 .issue_yfs_rpc = yfs_fs_store_data,
413 .success = afs_store_data_success,
414};
a58823ac 415
e49c7b2f
DH
416/*
417 * write to a file
418 */
419static int afs_store_data(struct address_space *mapping,
420 pgoff_t first, pgoff_t last,
d383e346 421 unsigned offset, unsigned to, bool laundering)
e49c7b2f
DH
422{
423 struct afs_vnode *vnode = AFS_FS_I(mapping->host);
424 struct afs_operation *op;
425 struct afs_wb_key *wbk = NULL;
426 int ret;
427
428 _enter("%s{%llx:%llu.%u},%lx,%lx,%x,%x",
429 vnode->volume->name,
430 vnode->fid.vid,
431 vnode->fid.vnode,
432 vnode->fid.unique,
433 first, last, offset, to);
d2ddc776 434
e49c7b2f
DH
435 ret = afs_get_writeback_key(vnode, &wbk);
436 if (ret) {
437 _leave(" = %d [no keys]", ret);
438 return ret;
d2ddc776
DH
439 }
440
e49c7b2f
DH
441 op = afs_alloc_operation(wbk->key, vnode->volume);
442 if (IS_ERR(op)) {
443 afs_put_wb_key(wbk);
444 return -ENOMEM;
445 }
446
447 afs_op_set_vnode(op, 0, vnode);
448 op->file[0].dv_delta = 1;
449 op->store.mapping = mapping;
450 op->store.first = first;
451 op->store.last = last;
452 op->store.first_offset = offset;
453 op->store.last_to = to;
d383e346 454 op->store.laundering = laundering;
b3597945 455 op->mtime = vnode->vfs_inode.i_mtime;
811f04ba 456 op->flags |= AFS_OPERATION_UNINTR;
e49c7b2f
DH
457 op->ops = &afs_store_data_operation;
458
459try_next_key:
460 afs_begin_vnode_operation(op);
461 afs_wait_for_operation(op);
462
463 switch (op->error) {
4343d008
DH
464 case -EACCES:
465 case -EPERM:
466 case -ENOKEY:
467 case -EKEYEXPIRED:
468 case -EKEYREJECTED:
469 case -EKEYREVOKED:
470 _debug("next");
e49c7b2f
DH
471
472 ret = afs_get_writeback_key(vnode, &wbk);
473 if (ret == 0) {
474 key_put(op->key);
475 op->key = key_get(wbk->key);
476 goto try_next_key;
477 }
478 break;
4343d008
DH
479 }
480
481 afs_put_wb_key(wbk);
e49c7b2f
DH
482 _leave(" = %d", op->error);
483 return afs_put_operation(op);
d2ddc776
DH
484}
485
31143d5d 486/*
4343d008
DH
487 * Synchronously write back the locked page and any subsequent non-locked dirty
488 * pages.
31143d5d 489 */
4343d008
DH
490static int afs_write_back_from_locked_page(struct address_space *mapping,
491 struct writeback_control *wbc,
492 struct page *primary_page,
493 pgoff_t final_page)
31143d5d 494{
13524ab3 495 struct afs_vnode *vnode = AFS_FS_I(mapping->host);
31143d5d 496 struct page *pages[8], *page;
4343d008
DH
497 unsigned long count, priv;
498 unsigned n, offset, to, f, t;
31143d5d 499 pgoff_t start, first, last;
793fe82e 500 loff_t i_size, end;
31143d5d
DH
501 int loop, ret;
502
503 _enter(",%lx", primary_page->index);
504
505 count = 1;
31143d5d
DH
506 if (test_set_page_writeback(primary_page))
507 BUG();
508
4343d008
DH
509 /* Find all consecutive lockable dirty pages that have contiguous
510 * written regions, stopping when we find a page that is not
511 * immediately lockable, is not dirty or is missing, or we reach the
512 * end of the range.
513 */
31143d5d 514 start = primary_page->index;
4343d008 515 priv = page_private(primary_page);
185f0c70
DH
516 offset = afs_page_dirty_from(priv);
517 to = afs_page_dirty_to(priv);
13524ab3
DH
518 trace_afs_page_dirty(vnode, tracepoint_string("store"),
519 primary_page->index, priv);
4343d008
DH
520
521 WARN_ON(offset == to);
13524ab3
DH
522 if (offset == to)
523 trace_afs_page_dirty(vnode, tracepoint_string("WARN"),
524 primary_page->index, priv);
4343d008 525
5a813276
DH
526 if (start >= final_page ||
527 (to < PAGE_SIZE && !test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags)))
31143d5d 528 goto no_more;
4343d008 529
31143d5d
DH
530 start++;
531 do {
532 _debug("more %lx [%lx]", start, count);
4343d008 533 n = final_page - start + 1;
31143d5d
DH
534 if (n > ARRAY_SIZE(pages))
535 n = ARRAY_SIZE(pages);
4343d008 536 n = find_get_pages_contig(mapping, start, ARRAY_SIZE(pages), pages);
31143d5d
DH
537 _debug("fgpc %u", n);
538 if (n == 0)
539 goto no_more;
540 if (pages[0]->index != start) {
9d577b6a
DH
541 do {
542 put_page(pages[--n]);
543 } while (n > 0);
31143d5d
DH
544 goto no_more;
545 }
546
547 for (loop = 0; loop < n; loop++) {
548 page = pages[loop];
5a813276
DH
549 if (to != PAGE_SIZE &&
550 !test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags))
551 break;
4343d008 552 if (page->index > final_page)
31143d5d 553 break;
529ae9aa 554 if (!trylock_page(page))
31143d5d 555 break;
4343d008 556 if (!PageDirty(page) || PageWriteback(page)) {
31143d5d
DH
557 unlock_page(page);
558 break;
559 }
4343d008
DH
560
561 priv = page_private(page);
185f0c70
DH
562 f = afs_page_dirty_from(priv);
563 t = afs_page_dirty_to(priv);
5a813276
DH
564 if (f != 0 &&
565 !test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags)) {
31143d5d
DH
566 unlock_page(page);
567 break;
568 }
4343d008
DH
569 to = t;
570
13524ab3
DH
571 trace_afs_page_dirty(vnode, tracepoint_string("store+"),
572 page->index, priv);
573
31143d5d
DH
574 if (!clear_page_dirty_for_io(page))
575 BUG();
576 if (test_set_page_writeback(page))
577 BUG();
578 unlock_page(page);
579 put_page(page);
580 }
581 count += loop;
582 if (loop < n) {
583 for (; loop < n; loop++)
584 put_page(pages[loop]);
585 goto no_more;
586 }
587
588 start += loop;
4343d008 589 } while (start <= final_page && count < 65536);
31143d5d
DH
590
591no_more:
4343d008
DH
592 /* We now have a contiguous set of dirty pages, each with writeback
593 * set; the first page is still locked at this point, but all the rest
594 * have been unlocked.
595 */
596 unlock_page(primary_page);
597
31143d5d
DH
598 first = primary_page->index;
599 last = first + count - 1;
600
793fe82e
DH
601 end = (loff_t)last * PAGE_SIZE + to;
602 i_size = i_size_read(&vnode->vfs_inode);
603
31143d5d 604 _debug("write back %lx[%u..] to %lx[..%u]", first, offset, last, to);
793fe82e
DH
605 if (end > i_size)
606 to = i_size & ~PAGE_MASK;
31143d5d 607
d383e346 608 ret = afs_store_data(mapping, first, last, offset, to, false);
4343d008
DH
609 switch (ret) {
610 case 0:
31143d5d 611 ret = count;
4343d008
DH
612 break;
613
614 default:
615 pr_notice("kAFS: Unexpected error from FS.StoreData %d\n", ret);
df561f66 616 fallthrough;
4343d008
DH
617 case -EACCES:
618 case -EPERM:
619 case -ENOKEY:
620 case -EKEYEXPIRED:
621 case -EKEYREJECTED:
622 case -EKEYREVOKED:
623 afs_redirty_pages(wbc, mapping, first, last);
624 mapping_set_error(mapping, ret);
625 break;
626
627 case -EDQUOT:
628 case -ENOSPC:
629 afs_redirty_pages(wbc, mapping, first, last);
630 mapping_set_error(mapping, -ENOSPC);
631 break;
632
633 case -EROFS:
634 case -EIO:
635 case -EREMOTEIO:
636 case -EFBIG:
637 case -ENOENT:
638 case -ENOMEDIUM:
639 case -ENXIO:
f51375cd 640 trace_afs_file_error(vnode, ret, afs_file_error_writeback_fail);
4343d008
DH
641 afs_kill_pages(mapping, first, last);
642 mapping_set_error(mapping, ret);
643 break;
31143d5d
DH
644 }
645
646 _leave(" = %d", ret);
647 return ret;
648}
649
650/*
651 * write a page back to the server
652 * - the caller locked the page for us
653 */
654int afs_writepage(struct page *page, struct writeback_control *wbc)
655{
31143d5d
DH
656 int ret;
657
658 _enter("{%lx},", page->index);
659
4343d008
DH
660 ret = afs_write_back_from_locked_page(page->mapping, wbc, page,
661 wbc->range_end >> PAGE_SHIFT);
31143d5d
DH
662 if (ret < 0) {
663 _leave(" = %d", ret);
664 return 0;
665 }
666
667 wbc->nr_to_write -= ret;
31143d5d
DH
668
669 _leave(" = 0");
670 return 0;
671}
672
673/*
674 * write a region of pages back to the server
675 */
c1206a2c
AB
676static int afs_writepages_region(struct address_space *mapping,
677 struct writeback_control *wbc,
678 pgoff_t index, pgoff_t end, pgoff_t *_next)
31143d5d 679{
31143d5d
DH
680 struct page *page;
681 int ret, n;
682
683 _enter(",,%lx,%lx,", index, end);
684
685 do {
aef6e415
JK
686 n = find_get_pages_range_tag(mapping, &index, end,
687 PAGECACHE_TAG_DIRTY, 1, &page);
31143d5d
DH
688 if (!n)
689 break;
690
691 _debug("wback %lx", page->index);
692
b93b0163
MW
693 /*
694 * at this point we hold neither the i_pages lock nor the
695 * page lock: the page may be truncated or invalidated
696 * (changing page->mapping to NULL), or even swizzled
697 * back from swapper_space to tmpfs file mapping
31143d5d 698 */
4343d008
DH
699 ret = lock_page_killable(page);
700 if (ret < 0) {
701 put_page(page);
702 _leave(" = %d", ret);
703 return ret;
704 }
31143d5d 705
c5051c7b 706 if (page->mapping != mapping || !PageDirty(page)) {
31143d5d 707 unlock_page(page);
09cbfeaf 708 put_page(page);
31143d5d
DH
709 continue;
710 }
711
c5051c7b 712 if (PageWriteback(page)) {
31143d5d 713 unlock_page(page);
c5051c7b
DH
714 if (wbc->sync_mode != WB_SYNC_NONE)
715 wait_on_page_writeback(page);
29c8bbbd 716 put_page(page);
31143d5d
DH
717 continue;
718 }
719
65a15109
DH
720 if (!clear_page_dirty_for_io(page))
721 BUG();
4343d008 722 ret = afs_write_back_from_locked_page(mapping, wbc, page, end);
09cbfeaf 723 put_page(page);
31143d5d
DH
724 if (ret < 0) {
725 _leave(" = %d", ret);
726 return ret;
727 }
728
729 wbc->nr_to_write -= ret;
730
31143d5d
DH
731 cond_resched();
732 } while (index < end && wbc->nr_to_write > 0);
733
734 *_next = index;
735 _leave(" = 0 [%lx]", *_next);
736 return 0;
737}
738
739/*
740 * write some of the pending data back to the server
741 */
742int afs_writepages(struct address_space *mapping,
743 struct writeback_control *wbc)
744{
ec0fa0b6 745 struct afs_vnode *vnode = AFS_FS_I(mapping->host);
31143d5d
DH
746 pgoff_t start, end, next;
747 int ret;
748
749 _enter("");
750
ec0fa0b6
DH
751 /* We have to be careful as we can end up racing with setattr()
752 * truncating the pagecache since the caller doesn't take a lock here
753 * to prevent it.
754 */
755 if (wbc->sync_mode == WB_SYNC_ALL)
756 down_read(&vnode->validate_lock);
757 else if (!down_read_trylock(&vnode->validate_lock))
758 return 0;
759
31143d5d
DH
760 if (wbc->range_cyclic) {
761 start = mapping->writeback_index;
762 end = -1;
763 ret = afs_writepages_region(mapping, wbc, start, end, &next);
1b430bee 764 if (start > 0 && wbc->nr_to_write > 0 && ret == 0)
31143d5d
DH
765 ret = afs_writepages_region(mapping, wbc, 0, start,
766 &next);
767 mapping->writeback_index = next;
768 } else if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) {
09cbfeaf 769 end = (pgoff_t)(LLONG_MAX >> PAGE_SHIFT);
31143d5d
DH
770 ret = afs_writepages_region(mapping, wbc, 0, end, &next);
771 if (wbc->nr_to_write > 0)
772 mapping->writeback_index = next;
773 } else {
09cbfeaf
KS
774 start = wbc->range_start >> PAGE_SHIFT;
775 end = wbc->range_end >> PAGE_SHIFT;
31143d5d
DH
776 ret = afs_writepages_region(mapping, wbc, start, end, &next);
777 }
778
ec0fa0b6 779 up_read(&vnode->validate_lock);
31143d5d
DH
780 _leave(" = %d", ret);
781 return ret;
782}
783
31143d5d
DH
784/*
785 * write to an AFS file
786 */
50b5551d 787ssize_t afs_file_write(struct kiocb *iocb, struct iov_iter *from)
31143d5d 788{
496ad9aa 789 struct afs_vnode *vnode = AFS_FS_I(file_inode(iocb->ki_filp));
31143d5d 790 ssize_t result;
50b5551d 791 size_t count = iov_iter_count(from);
31143d5d 792
3b6492df 793 _enter("{%llx:%llu},{%zu},",
50b5551d 794 vnode->fid.vid, vnode->fid.vnode, count);
31143d5d
DH
795
796 if (IS_SWAPFILE(&vnode->vfs_inode)) {
797 printk(KERN_INFO
798 "AFS: Attempt to write to active swap file!\n");
799 return -EBUSY;
800 }
801
802 if (!count)
803 return 0;
804
50b5551d 805 result = generic_file_write_iter(iocb, from);
31143d5d 806
31143d5d
DH
807 _leave(" = %zd", result);
808 return result;
809}
810
31143d5d
DH
811/*
812 * flush any dirty pages for this process, and check for write errors.
813 * - the return status from this call provides a reliable indication of
814 * whether any write errors occurred for this process.
815 */
02c24a82 816int afs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
31143d5d 817{
3c981bfc 818 struct inode *inode = file_inode(file);
3c981bfc 819 struct afs_vnode *vnode = AFS_FS_I(inode);
31143d5d 820
3b6492df 821 _enter("{%llx:%llu},{n=%pD},%d",
3c981bfc 822 vnode->fid.vid, vnode->fid.vnode, file,
31143d5d
DH
823 datasync);
824
4343d008 825 return file_write_and_wait_range(file, start, end);
31143d5d 826}
9b3f26c9
DH
827
828/*
829 * notification that a previously read-only page is about to become writable
830 * - if it returns an error, the caller will deliver a bus error signal
831 */
0722f186 832vm_fault_t afs_page_mkwrite(struct vm_fault *vmf)
9b3f26c9 833{
1cf7a151
DH
834 struct file *file = vmf->vma->vm_file;
835 struct inode *inode = file_inode(file);
836 struct afs_vnode *vnode = AFS_FS_I(inode);
837 unsigned long priv;
9b3f26c9 838
3b6492df 839 _enter("{{%llx:%llu}},{%lx}",
1cf7a151 840 vnode->fid.vid, vnode->fid.vnode, vmf->page->index);
9b3f26c9 841
1cf7a151 842 sb_start_pagefault(inode->i_sb);
9b3f26c9 843
1cf7a151
DH
844 /* Wait for the page to be written to the cache before we allow it to
845 * be modified. We then assume the entire page will need writing back.
846 */
9b3f26c9 847#ifdef CONFIG_AFS_FSCACHE
1cf7a151 848 fscache_wait_on_page_write(vnode->cache, vmf->page);
9b3f26c9
DH
849#endif
850
1cf7a151
DH
851 if (PageWriteback(vmf->page) &&
852 wait_on_page_bit_killable(vmf->page, PG_writeback) < 0)
853 return VM_FAULT_RETRY;
854
855 if (lock_page_killable(vmf->page) < 0)
856 return VM_FAULT_RETRY;
857
858 /* We mustn't change page->private until writeback is complete as that
859 * details the portion of the page we need to write back and we might
860 * need to redirty the page if there's a problem.
861 */
862 wait_on_page_writeback(vmf->page);
863
185f0c70 864 priv = afs_page_dirty(0, PAGE_SIZE);
f86726a6 865 priv = afs_page_dirty_mmapped(priv);
13524ab3
DH
866 trace_afs_page_dirty(vnode, tracepoint_string("mkwrite"),
867 vmf->page->index, priv);
fa04a40b
DH
868 if (PagePrivate(vmf->page))
869 set_page_private(vmf->page, priv);
870 else
871 attach_page_private(vmf->page, (void *)priv);
bb413489 872 file_update_time(file);
1cf7a151
DH
873
874 sb_end_pagefault(inode->i_sb);
875 return VM_FAULT_LOCKED;
9b3f26c9 876}
4343d008
DH
877
878/*
879 * Prune the keys cached for writeback. The caller must hold vnode->wb_lock.
880 */
881void afs_prune_wb_keys(struct afs_vnode *vnode)
882{
883 LIST_HEAD(graveyard);
884 struct afs_wb_key *wbk, *tmp;
885
886 /* Discard unused keys */
887 spin_lock(&vnode->wb_lock);
888
889 if (!mapping_tagged(&vnode->vfs_inode.i_data, PAGECACHE_TAG_WRITEBACK) &&
890 !mapping_tagged(&vnode->vfs_inode.i_data, PAGECACHE_TAG_DIRTY)) {
891 list_for_each_entry_safe(wbk, tmp, &vnode->wb_keys, vnode_link) {
892 if (refcount_read(&wbk->usage) == 1)
893 list_move(&wbk->vnode_link, &graveyard);
894 }
895 }
896
897 spin_unlock(&vnode->wb_lock);
898
899 while (!list_empty(&graveyard)) {
900 wbk = list_entry(graveyard.next, struct afs_wb_key, vnode_link);
901 list_del(&wbk->vnode_link);
902 afs_put_wb_key(wbk);
903 }
904}
905
906/*
907 * Clean up a page during invalidation.
908 */
909int afs_launder_page(struct page *page)
910{
911 struct address_space *mapping = page->mapping;
912 struct afs_vnode *vnode = AFS_FS_I(mapping->host);
913 unsigned long priv;
914 unsigned int f, t;
915 int ret = 0;
916
917 _enter("{%lx}", page->index);
918
919 priv = page_private(page);
920 if (clear_page_dirty_for_io(page)) {
921 f = 0;
922 t = PAGE_SIZE;
923 if (PagePrivate(page)) {
185f0c70
DH
924 f = afs_page_dirty_from(priv);
925 t = afs_page_dirty_to(priv);
4343d008
DH
926 }
927
13524ab3
DH
928 trace_afs_page_dirty(vnode, tracepoint_string("launder"),
929 page->index, priv);
d383e346 930 ret = afs_store_data(mapping, page->index, page->index, t, f, true);
4343d008
DH
931 }
932
fa04a40b 933 priv = (unsigned long)detach_page_private(page);
13524ab3
DH
934 trace_afs_page_dirty(vnode, tracepoint_string("laundered"),
935 page->index, priv);
4343d008
DH
936
937#ifdef CONFIG_AFS_FSCACHE
938 if (PageFsCache(page)) {
939 fscache_wait_on_page_write(vnode->cache, page);
940 fscache_uncache_page(vnode->cache, page);
941 }
942#endif
943 return ret;
9b3f26c9 944}