]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - fs/afs/file.c
afs: Introduce a file-private data record
[mirror_ubuntu-bionic-kernel.git] / fs / afs / file.c
1 /* AFS filesystem file handling
2 *
3 * Copyright (C) 2002, 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12 #include <linux/kernel.h>
13 #include <linux/module.h>
14 #include <linux/init.h>
15 #include <linux/fs.h>
16 #include <linux/pagemap.h>
17 #include <linux/writeback.h>
18 #include <linux/gfp.h>
19 #include <linux/task_io_accounting_ops.h>
20 #include "internal.h"
21
22 static int afs_readpage(struct file *file, struct page *page);
23 static void afs_invalidatepage(struct page *page, unsigned int offset,
24 unsigned int length);
25 static int afs_releasepage(struct page *page, gfp_t gfp_flags);
26 static int afs_launder_page(struct page *page);
27
28 static int afs_readpages(struct file *filp, struct address_space *mapping,
29 struct list_head *pages, unsigned nr_pages);
30
31 const struct file_operations afs_file_operations = {
32 .open = afs_open,
33 .flush = afs_flush,
34 .release = afs_release,
35 .llseek = generic_file_llseek,
36 .read_iter = generic_file_read_iter,
37 .write_iter = afs_file_write,
38 .mmap = generic_file_readonly_mmap,
39 .splice_read = generic_file_splice_read,
40 .fsync = afs_fsync,
41 .lock = afs_lock,
42 .flock = afs_flock,
43 };
44
45 const struct inode_operations afs_file_inode_operations = {
46 .getattr = afs_getattr,
47 .setattr = afs_setattr,
48 .permission = afs_permission,
49 .listxattr = afs_listxattr,
50 };
51
52 const struct address_space_operations afs_fs_aops = {
53 .readpage = afs_readpage,
54 .readpages = afs_readpages,
55 .set_page_dirty = afs_set_page_dirty,
56 .launder_page = afs_launder_page,
57 .releasepage = afs_releasepage,
58 .invalidatepage = afs_invalidatepage,
59 .write_begin = afs_write_begin,
60 .write_end = afs_write_end,
61 .writepage = afs_writepage,
62 .writepages = afs_writepages,
63 };
64
65 /*
66 * open an AFS file or directory and attach a key to it
67 */
68 int afs_open(struct inode *inode, struct file *file)
69 {
70 struct afs_vnode *vnode = AFS_FS_I(inode);
71 struct afs_file *af;
72 struct key *key;
73 int ret;
74
75 _enter("{%x:%u},", vnode->fid.vid, vnode->fid.vnode);
76
77 key = afs_request_key(vnode->volume->cell);
78 if (IS_ERR(key)) {
79 ret = PTR_ERR(key);
80 goto error;
81 }
82
83 af = kzalloc(sizeof(*af), GFP_KERNEL);
84 if (!af) {
85 ret = -ENOMEM;
86 goto error_key;
87 }
88
89 ret = afs_validate(vnode, key);
90 if (ret < 0)
91 goto error_af;
92
93 af->key = key;
94 file->private_data = af;
95 _leave(" = 0");
96 return 0;
97
98 error_af:
99 kfree(af);
100 error_key:
101 key_put(key);
102 error:
103 _leave(" = %d", ret);
104 return ret;
105 }
106
107 /*
108 * release an AFS file or directory and discard its key
109 */
110 int afs_release(struct inode *inode, struct file *file)
111 {
112 struct afs_vnode *vnode = AFS_FS_I(inode);
113 struct afs_file *af = file->private_data;
114
115 _enter("{%x:%u},", vnode->fid.vid, vnode->fid.vnode);
116
117 file->private_data = NULL;
118 key_put(af->key);
119 kfree(af);
120 _leave(" = 0");
121 return 0;
122 }
123
124 /*
125 * Dispose of a ref to a read record.
126 */
127 void afs_put_read(struct afs_read *req)
128 {
129 int i;
130
131 if (atomic_dec_and_test(&req->usage)) {
132 for (i = 0; i < req->nr_pages; i++)
133 if (req->pages[i])
134 put_page(req->pages[i]);
135 kfree(req);
136 }
137 }
138
139 #ifdef CONFIG_AFS_FSCACHE
140 /*
141 * deal with notification that a page was read from the cache
142 */
143 static void afs_file_readpage_read_complete(struct page *page,
144 void *data,
145 int error)
146 {
147 _enter("%p,%p,%d", page, data, error);
148
149 /* if the read completes with an error, we just unlock the page and let
150 * the VM reissue the readpage */
151 if (!error)
152 SetPageUptodate(page);
153 unlock_page(page);
154 }
155 #endif
156
157 /*
158 * Fetch file data from the volume.
159 */
160 int afs_fetch_data(struct afs_vnode *vnode, struct key *key, struct afs_read *desc)
161 {
162 struct afs_fs_cursor fc;
163 int ret;
164
165 _enter("%s{%x:%u.%u},%x,,,",
166 vnode->volume->name,
167 vnode->fid.vid,
168 vnode->fid.vnode,
169 vnode->fid.unique,
170 key_serial(key));
171
172 ret = -ERESTARTSYS;
173 if (afs_begin_vnode_operation(&fc, vnode, key)) {
174 while (afs_select_fileserver(&fc)) {
175 fc.cb_break = vnode->cb_break + vnode->cb_s_break;
176 afs_fs_fetch_data(&fc, desc);
177 }
178
179 afs_check_for_remote_deletion(&fc, fc.vnode);
180 afs_vnode_commit_status(&fc, vnode, fc.cb_break);
181 ret = afs_end_vnode_operation(&fc);
182 }
183
184 _leave(" = %d", ret);
185 return ret;
186 }
187
188 /*
189 * read page from file, directory or symlink, given a key to use
190 */
191 int afs_page_filler(void *data, struct page *page)
192 {
193 struct inode *inode = page->mapping->host;
194 struct afs_vnode *vnode = AFS_FS_I(inode);
195 struct afs_read *req;
196 struct key *key = data;
197 int ret;
198
199 _enter("{%x},{%lu},{%lu}", key_serial(key), inode->i_ino, page->index);
200
201 BUG_ON(!PageLocked(page));
202
203 ret = -ESTALE;
204 if (test_bit(AFS_VNODE_DELETED, &vnode->flags))
205 goto error;
206
207 /* is it cached? */
208 #ifdef CONFIG_AFS_FSCACHE
209 ret = fscache_read_or_alloc_page(vnode->cache,
210 page,
211 afs_file_readpage_read_complete,
212 NULL,
213 GFP_KERNEL);
214 #else
215 ret = -ENOBUFS;
216 #endif
217 switch (ret) {
218 /* read BIO submitted (page in cache) */
219 case 0:
220 break;
221
222 /* page not yet cached */
223 case -ENODATA:
224 _debug("cache said ENODATA");
225 goto go_on;
226
227 /* page will not be cached */
228 case -ENOBUFS:
229 _debug("cache said ENOBUFS");
230 default:
231 go_on:
232 req = kzalloc(sizeof(struct afs_read) + sizeof(struct page *),
233 GFP_KERNEL);
234 if (!req)
235 goto enomem;
236
237 /* We request a full page. If the page is a partial one at the
238 * end of the file, the server will return a short read and the
239 * unmarshalling code will clear the unfilled space.
240 */
241 atomic_set(&req->usage, 1);
242 req->pos = (loff_t)page->index << PAGE_SHIFT;
243 req->len = PAGE_SIZE;
244 req->nr_pages = 1;
245 req->pages[0] = page;
246 get_page(page);
247
248 /* read the contents of the file from the server into the
249 * page */
250 ret = afs_fetch_data(vnode, key, req);
251 afs_put_read(req);
252
253 if (ret >= 0 && S_ISDIR(inode->i_mode) &&
254 !afs_dir_check_page(inode, page))
255 ret = -EIO;
256
257 if (ret < 0) {
258 if (ret == -ENOENT) {
259 _debug("got NOENT from server"
260 " - marking file deleted and stale");
261 set_bit(AFS_VNODE_DELETED, &vnode->flags);
262 ret = -ESTALE;
263 }
264
265 #ifdef CONFIG_AFS_FSCACHE
266 fscache_uncache_page(vnode->cache, page);
267 #endif
268 BUG_ON(PageFsCache(page));
269
270 if (ret == -EINTR ||
271 ret == -ENOMEM ||
272 ret == -ERESTARTSYS ||
273 ret == -EAGAIN)
274 goto error;
275 goto io_error;
276 }
277
278 SetPageUptodate(page);
279
280 /* send the page to the cache */
281 #ifdef CONFIG_AFS_FSCACHE
282 if (PageFsCache(page) &&
283 fscache_write_page(vnode->cache, page, GFP_KERNEL) != 0) {
284 fscache_uncache_page(vnode->cache, page);
285 BUG_ON(PageFsCache(page));
286 }
287 #endif
288 unlock_page(page);
289 }
290
291 _leave(" = 0");
292 return 0;
293
294 io_error:
295 SetPageError(page);
296 goto error;
297 enomem:
298 ret = -ENOMEM;
299 error:
300 unlock_page(page);
301 _leave(" = %d", ret);
302 return ret;
303 }
304
305 /*
306 * read page from file, directory or symlink, given a file to nominate the key
307 * to be used
308 */
309 static int afs_readpage(struct file *file, struct page *page)
310 {
311 struct key *key;
312 int ret;
313
314 if (file) {
315 key = afs_file_key(file);
316 ASSERT(key != NULL);
317 ret = afs_page_filler(key, page);
318 } else {
319 struct inode *inode = page->mapping->host;
320 key = afs_request_key(AFS_FS_S(inode->i_sb)->cell);
321 if (IS_ERR(key)) {
322 ret = PTR_ERR(key);
323 } else {
324 ret = afs_page_filler(key, page);
325 key_put(key);
326 }
327 }
328 return ret;
329 }
330
331 /*
332 * Make pages available as they're filled.
333 */
334 static void afs_readpages_page_done(struct afs_call *call, struct afs_read *req)
335 {
336 #ifdef CONFIG_AFS_FSCACHE
337 struct afs_vnode *vnode = call->reply[0];
338 #endif
339 struct page *page = req->pages[req->index];
340
341 req->pages[req->index] = NULL;
342 SetPageUptodate(page);
343
344 /* send the page to the cache */
345 #ifdef CONFIG_AFS_FSCACHE
346 if (PageFsCache(page) &&
347 fscache_write_page(vnode->cache, page, GFP_KERNEL) != 0) {
348 fscache_uncache_page(vnode->cache, page);
349 BUG_ON(PageFsCache(page));
350 }
351 #endif
352 unlock_page(page);
353 put_page(page);
354 }
355
356 /*
357 * Read a contiguous set of pages.
358 */
359 static int afs_readpages_one(struct file *file, struct address_space *mapping,
360 struct list_head *pages)
361 {
362 struct afs_vnode *vnode = AFS_FS_I(mapping->host);
363 struct afs_read *req;
364 struct list_head *p;
365 struct page *first, *page;
366 struct key *key = afs_file_key(file);
367 pgoff_t index;
368 int ret, n, i;
369
370 /* Count the number of contiguous pages at the front of the list. Note
371 * that the list goes prev-wards rather than next-wards.
372 */
373 first = list_entry(pages->prev, struct page, lru);
374 index = first->index + 1;
375 n = 1;
376 for (p = first->lru.prev; p != pages; p = p->prev) {
377 page = list_entry(p, struct page, lru);
378 if (page->index != index)
379 break;
380 index++;
381 n++;
382 }
383
384 req = kzalloc(sizeof(struct afs_read) + sizeof(struct page *) * n,
385 GFP_NOFS);
386 if (!req)
387 return -ENOMEM;
388
389 atomic_set(&req->usage, 1);
390 req->page_done = afs_readpages_page_done;
391 req->pos = first->index;
392 req->pos <<= PAGE_SHIFT;
393
394 /* Transfer the pages to the request. We add them in until one fails
395 * to add to the LRU and then we stop (as that'll make a hole in the
396 * contiguous run.
397 *
398 * Note that it's possible for the file size to change whilst we're
399 * doing this, but we rely on the server returning less than we asked
400 * for if the file shrank. We also rely on this to deal with a partial
401 * page at the end of the file.
402 */
403 do {
404 page = list_entry(pages->prev, struct page, lru);
405 list_del(&page->lru);
406 index = page->index;
407 if (add_to_page_cache_lru(page, mapping, index,
408 readahead_gfp_mask(mapping))) {
409 #ifdef CONFIG_AFS_FSCACHE
410 fscache_uncache_page(vnode->cache, page);
411 #endif
412 put_page(page);
413 break;
414 }
415
416 req->pages[req->nr_pages++] = page;
417 req->len += PAGE_SIZE;
418 } while (req->nr_pages < n);
419
420 if (req->nr_pages == 0) {
421 kfree(req);
422 return 0;
423 }
424
425 ret = afs_fetch_data(vnode, key, req);
426 if (ret < 0)
427 goto error;
428
429 task_io_account_read(PAGE_SIZE * req->nr_pages);
430 afs_put_read(req);
431 return 0;
432
433 error:
434 if (ret == -ENOENT) {
435 _debug("got NOENT from server"
436 " - marking file deleted and stale");
437 set_bit(AFS_VNODE_DELETED, &vnode->flags);
438 ret = -ESTALE;
439 }
440
441 for (i = 0; i < req->nr_pages; i++) {
442 page = req->pages[i];
443 if (page) {
444 #ifdef CONFIG_AFS_FSCACHE
445 fscache_uncache_page(vnode->cache, page);
446 #endif
447 SetPageError(page);
448 unlock_page(page);
449 }
450 }
451
452 afs_put_read(req);
453 return ret;
454 }
455
456 /*
457 * read a set of pages
458 */
459 static int afs_readpages(struct file *file, struct address_space *mapping,
460 struct list_head *pages, unsigned nr_pages)
461 {
462 struct key *key = afs_file_key(file);
463 struct afs_vnode *vnode;
464 int ret = 0;
465
466 _enter("{%d},{%lu},,%d",
467 key_serial(key), mapping->host->i_ino, nr_pages);
468
469 ASSERT(key != NULL);
470
471 vnode = AFS_FS_I(mapping->host);
472 if (test_bit(AFS_VNODE_DELETED, &vnode->flags)) {
473 _leave(" = -ESTALE");
474 return -ESTALE;
475 }
476
477 /* attempt to read as many of the pages as possible */
478 #ifdef CONFIG_AFS_FSCACHE
479 ret = fscache_read_or_alloc_pages(vnode->cache,
480 mapping,
481 pages,
482 &nr_pages,
483 afs_file_readpage_read_complete,
484 NULL,
485 mapping_gfp_mask(mapping));
486 #else
487 ret = -ENOBUFS;
488 #endif
489
490 switch (ret) {
491 /* all pages are being read from the cache */
492 case 0:
493 BUG_ON(!list_empty(pages));
494 BUG_ON(nr_pages != 0);
495 _leave(" = 0 [reading all]");
496 return 0;
497
498 /* there were pages that couldn't be read from the cache */
499 case -ENODATA:
500 case -ENOBUFS:
501 break;
502
503 /* other error */
504 default:
505 _leave(" = %d", ret);
506 return ret;
507 }
508
509 while (!list_empty(pages)) {
510 ret = afs_readpages_one(file, mapping, pages);
511 if (ret < 0)
512 break;
513 }
514
515 _leave(" = %d [netting]", ret);
516 return ret;
517 }
518
519 /*
520 * write back a dirty page
521 */
522 static int afs_launder_page(struct page *page)
523 {
524 _enter("{%lu}", page->index);
525
526 return 0;
527 }
528
529 /*
530 * invalidate part or all of a page
531 * - release a page and clean up its private data if offset is 0 (indicating
532 * the entire page)
533 */
534 static void afs_invalidatepage(struct page *page, unsigned int offset,
535 unsigned int length)
536 {
537 struct afs_writeback *wb = (struct afs_writeback *) page_private(page);
538
539 _enter("{%lu},%u,%u", page->index, offset, length);
540
541 BUG_ON(!PageLocked(page));
542
543 /* we clean up only if the entire page is being invalidated */
544 if (offset == 0 && length == PAGE_SIZE) {
545 #ifdef CONFIG_AFS_FSCACHE
546 if (PageFsCache(page)) {
547 struct afs_vnode *vnode = AFS_FS_I(page->mapping->host);
548 fscache_wait_on_page_write(vnode->cache, page);
549 fscache_uncache_page(vnode->cache, page);
550 }
551 #endif
552
553 if (PagePrivate(page)) {
554 if (wb && !PageWriteback(page)) {
555 set_page_private(page, 0);
556 afs_put_writeback(wb);
557 }
558
559 if (!page_private(page))
560 ClearPagePrivate(page);
561 }
562 }
563
564 _leave("");
565 }
566
567 /*
568 * release a page and clean up its private state if it's not busy
569 * - return true if the page can now be released, false if not
570 */
571 static int afs_releasepage(struct page *page, gfp_t gfp_flags)
572 {
573 struct afs_writeback *wb = (struct afs_writeback *) page_private(page);
574 struct afs_vnode *vnode = AFS_FS_I(page->mapping->host);
575
576 _enter("{{%x:%u}[%lu],%lx},%x",
577 vnode->fid.vid, vnode->fid.vnode, page->index, page->flags,
578 gfp_flags);
579
580 /* deny if page is being written to the cache and the caller hasn't
581 * elected to wait */
582 #ifdef CONFIG_AFS_FSCACHE
583 if (!fscache_maybe_release_page(vnode->cache, page, gfp_flags)) {
584 _leave(" = F [cache busy]");
585 return 0;
586 }
587 #endif
588
589 if (PagePrivate(page)) {
590 if (wb) {
591 set_page_private(page, 0);
592 afs_put_writeback(wb);
593 }
594 ClearPagePrivate(page);
595 }
596
597 /* indicate that the page can be released */
598 _leave(" = T");
599 return 1;
600 }