1 /* AFS filesystem file handling
3 * Copyright (C) 2002, 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
12 #include <linux/kernel.h>
13 #include <linux/module.h>
14 #include <linux/init.h>
16 #include <linux/pagemap.h>
17 #include <linux/writeback.h>
18 #include <linux/gfp.h>
19 #include <linux/task_io_accounting_ops.h>
22 static int afs_readpage(struct file
*file
, struct page
*page
);
23 static void afs_invalidatepage(struct page
*page
, unsigned int offset
,
25 static int afs_releasepage(struct page
*page
, gfp_t gfp_flags
);
26 static int afs_launder_page(struct page
*page
);
28 static int afs_readpages(struct file
*filp
, struct address_space
*mapping
,
29 struct list_head
*pages
, unsigned nr_pages
);
31 const struct file_operations afs_file_operations
= {
34 .release
= afs_release
,
35 .llseek
= generic_file_llseek
,
36 .read_iter
= generic_file_read_iter
,
37 .write_iter
= afs_file_write
,
38 .mmap
= generic_file_readonly_mmap
,
39 .splice_read
= generic_file_splice_read
,
45 const struct inode_operations afs_file_inode_operations
= {
46 .getattr
= afs_getattr
,
47 .setattr
= afs_setattr
,
48 .permission
= afs_permission
,
49 .listxattr
= afs_listxattr
,
52 const struct address_space_operations afs_fs_aops
= {
53 .readpage
= afs_readpage
,
54 .readpages
= afs_readpages
,
55 .set_page_dirty
= afs_set_page_dirty
,
56 .launder_page
= afs_launder_page
,
57 .releasepage
= afs_releasepage
,
58 .invalidatepage
= afs_invalidatepage
,
59 .write_begin
= afs_write_begin
,
60 .write_end
= afs_write_end
,
61 .writepage
= afs_writepage
,
62 .writepages
= afs_writepages
,
66 * open an AFS file or directory and attach a key to it
68 int afs_open(struct inode
*inode
, struct file
*file
)
70 struct afs_vnode
*vnode
= AFS_FS_I(inode
);
75 _enter("{%x:%u},", vnode
->fid
.vid
, vnode
->fid
.vnode
);
77 key
= afs_request_key(vnode
->volume
->cell
);
83 af
= kzalloc(sizeof(*af
), GFP_KERNEL
);
89 ret
= afs_validate(vnode
, key
);
94 file
->private_data
= af
;
103 _leave(" = %d", ret
);
108 * release an AFS file or directory and discard its key
110 int afs_release(struct inode
*inode
, struct file
*file
)
112 struct afs_vnode
*vnode
= AFS_FS_I(inode
);
113 struct afs_file
*af
= file
->private_data
;
115 _enter("{%x:%u},", vnode
->fid
.vid
, vnode
->fid
.vnode
);
117 file
->private_data
= NULL
;
125 * Dispose of a ref to a read record.
127 void afs_put_read(struct afs_read
*req
)
131 if (atomic_dec_and_test(&req
->usage
)) {
132 for (i
= 0; i
< req
->nr_pages
; i
++)
134 put_page(req
->pages
[i
]);
139 #ifdef CONFIG_AFS_FSCACHE
141 * deal with notification that a page was read from the cache
143 static void afs_file_readpage_read_complete(struct page
*page
,
147 _enter("%p,%p,%d", page
, data
, error
);
149 /* if the read completes with an error, we just unlock the page and let
150 * the VM reissue the readpage */
152 SetPageUptodate(page
);
158 * Fetch file data from the volume.
160 int afs_fetch_data(struct afs_vnode
*vnode
, struct key
*key
, struct afs_read
*desc
)
162 struct afs_fs_cursor fc
;
165 _enter("%s{%x:%u.%u},%x,,,",
173 if (afs_begin_vnode_operation(&fc
, vnode
, key
)) {
174 while (afs_select_fileserver(&fc
)) {
175 fc
.cb_break
= vnode
->cb_break
+ vnode
->cb_s_break
;
176 afs_fs_fetch_data(&fc
, desc
);
179 afs_check_for_remote_deletion(&fc
, fc
.vnode
);
180 afs_vnode_commit_status(&fc
, vnode
, fc
.cb_break
);
181 ret
= afs_end_vnode_operation(&fc
);
184 _leave(" = %d", ret
);
189 * read page from file, directory or symlink, given a key to use
191 int afs_page_filler(void *data
, struct page
*page
)
193 struct inode
*inode
= page
->mapping
->host
;
194 struct afs_vnode
*vnode
= AFS_FS_I(inode
);
195 struct afs_read
*req
;
196 struct key
*key
= data
;
199 _enter("{%x},{%lu},{%lu}", key_serial(key
), inode
->i_ino
, page
->index
);
201 BUG_ON(!PageLocked(page
));
204 if (test_bit(AFS_VNODE_DELETED
, &vnode
->flags
))
208 #ifdef CONFIG_AFS_FSCACHE
209 ret
= fscache_read_or_alloc_page(vnode
->cache
,
211 afs_file_readpage_read_complete
,
218 /* read BIO submitted (page in cache) */
222 /* page not yet cached */
224 _debug("cache said ENODATA");
227 /* page will not be cached */
229 _debug("cache said ENOBUFS");
232 req
= kzalloc(sizeof(struct afs_read
) + sizeof(struct page
*),
237 /* We request a full page. If the page is a partial one at the
238 * end of the file, the server will return a short read and the
239 * unmarshalling code will clear the unfilled space.
241 atomic_set(&req
->usage
, 1);
242 req
->pos
= (loff_t
)page
->index
<< PAGE_SHIFT
;
243 req
->len
= PAGE_SIZE
;
245 req
->pages
[0] = page
;
248 /* read the contents of the file from the server into the
250 ret
= afs_fetch_data(vnode
, key
, req
);
253 if (ret
>= 0 && S_ISDIR(inode
->i_mode
) &&
254 !afs_dir_check_page(inode
, page
))
258 if (ret
== -ENOENT
) {
259 _debug("got NOENT from server"
260 " - marking file deleted and stale");
261 set_bit(AFS_VNODE_DELETED
, &vnode
->flags
);
265 #ifdef CONFIG_AFS_FSCACHE
266 fscache_uncache_page(vnode
->cache
, page
);
268 BUG_ON(PageFsCache(page
));
272 ret
== -ERESTARTSYS
||
278 SetPageUptodate(page
);
280 /* send the page to the cache */
281 #ifdef CONFIG_AFS_FSCACHE
282 if (PageFsCache(page
) &&
283 fscache_write_page(vnode
->cache
, page
, GFP_KERNEL
) != 0) {
284 fscache_uncache_page(vnode
->cache
, page
);
285 BUG_ON(PageFsCache(page
));
301 _leave(" = %d", ret
);
306 * read page from file, directory or symlink, given a file to nominate the key
309 static int afs_readpage(struct file
*file
, struct page
*page
)
315 key
= afs_file_key(file
);
317 ret
= afs_page_filler(key
, page
);
319 struct inode
*inode
= page
->mapping
->host
;
320 key
= afs_request_key(AFS_FS_S(inode
->i_sb
)->cell
);
324 ret
= afs_page_filler(key
, page
);
332 * Make pages available as they're filled.
334 static void afs_readpages_page_done(struct afs_call
*call
, struct afs_read
*req
)
336 #ifdef CONFIG_AFS_FSCACHE
337 struct afs_vnode
*vnode
= call
->reply
[0];
339 struct page
*page
= req
->pages
[req
->index
];
341 req
->pages
[req
->index
] = NULL
;
342 SetPageUptodate(page
);
344 /* send the page to the cache */
345 #ifdef CONFIG_AFS_FSCACHE
346 if (PageFsCache(page
) &&
347 fscache_write_page(vnode
->cache
, page
, GFP_KERNEL
) != 0) {
348 fscache_uncache_page(vnode
->cache
, page
);
349 BUG_ON(PageFsCache(page
));
357 * Read a contiguous set of pages.
359 static int afs_readpages_one(struct file
*file
, struct address_space
*mapping
,
360 struct list_head
*pages
)
362 struct afs_vnode
*vnode
= AFS_FS_I(mapping
->host
);
363 struct afs_read
*req
;
365 struct page
*first
, *page
;
366 struct key
*key
= afs_file_key(file
);
370 /* Count the number of contiguous pages at the front of the list. Note
371 * that the list goes prev-wards rather than next-wards.
373 first
= list_entry(pages
->prev
, struct page
, lru
);
374 index
= first
->index
+ 1;
376 for (p
= first
->lru
.prev
; p
!= pages
; p
= p
->prev
) {
377 page
= list_entry(p
, struct page
, lru
);
378 if (page
->index
!= index
)
384 req
= kzalloc(sizeof(struct afs_read
) + sizeof(struct page
*) * n
,
389 atomic_set(&req
->usage
, 1);
390 req
->page_done
= afs_readpages_page_done
;
391 req
->pos
= first
->index
;
392 req
->pos
<<= PAGE_SHIFT
;
394 /* Transfer the pages to the request. We add them in until one fails
395 * to add to the LRU and then we stop (as that'll make a hole in the
398 * Note that it's possible for the file size to change whilst we're
399 * doing this, but we rely on the server returning less than we asked
400 * for if the file shrank. We also rely on this to deal with a partial
401 * page at the end of the file.
404 page
= list_entry(pages
->prev
, struct page
, lru
);
405 list_del(&page
->lru
);
407 if (add_to_page_cache_lru(page
, mapping
, index
,
408 readahead_gfp_mask(mapping
))) {
409 #ifdef CONFIG_AFS_FSCACHE
410 fscache_uncache_page(vnode
->cache
, page
);
416 req
->pages
[req
->nr_pages
++] = page
;
417 req
->len
+= PAGE_SIZE
;
418 } while (req
->nr_pages
< n
);
420 if (req
->nr_pages
== 0) {
425 ret
= afs_fetch_data(vnode
, key
, req
);
429 task_io_account_read(PAGE_SIZE
* req
->nr_pages
);
434 if (ret
== -ENOENT
) {
435 _debug("got NOENT from server"
436 " - marking file deleted and stale");
437 set_bit(AFS_VNODE_DELETED
, &vnode
->flags
);
441 for (i
= 0; i
< req
->nr_pages
; i
++) {
442 page
= req
->pages
[i
];
444 #ifdef CONFIG_AFS_FSCACHE
445 fscache_uncache_page(vnode
->cache
, page
);
457 * read a set of pages
459 static int afs_readpages(struct file
*file
, struct address_space
*mapping
,
460 struct list_head
*pages
, unsigned nr_pages
)
462 struct key
*key
= afs_file_key(file
);
463 struct afs_vnode
*vnode
;
466 _enter("{%d},{%lu},,%d",
467 key_serial(key
), mapping
->host
->i_ino
, nr_pages
);
471 vnode
= AFS_FS_I(mapping
->host
);
472 if (test_bit(AFS_VNODE_DELETED
, &vnode
->flags
)) {
473 _leave(" = -ESTALE");
477 /* attempt to read as many of the pages as possible */
478 #ifdef CONFIG_AFS_FSCACHE
479 ret
= fscache_read_or_alloc_pages(vnode
->cache
,
483 afs_file_readpage_read_complete
,
485 mapping_gfp_mask(mapping
));
491 /* all pages are being read from the cache */
493 BUG_ON(!list_empty(pages
));
494 BUG_ON(nr_pages
!= 0);
495 _leave(" = 0 [reading all]");
498 /* there were pages that couldn't be read from the cache */
505 _leave(" = %d", ret
);
509 while (!list_empty(pages
)) {
510 ret
= afs_readpages_one(file
, mapping
, pages
);
515 _leave(" = %d [netting]", ret
);
520 * write back a dirty page
522 static int afs_launder_page(struct page
*page
)
524 _enter("{%lu}", page
->index
);
530 * invalidate part or all of a page
531 * - release a page and clean up its private data if offset is 0 (indicating
534 static void afs_invalidatepage(struct page
*page
, unsigned int offset
,
537 struct afs_writeback
*wb
= (struct afs_writeback
*) page_private(page
);
539 _enter("{%lu},%u,%u", page
->index
, offset
, length
);
541 BUG_ON(!PageLocked(page
));
543 /* we clean up only if the entire page is being invalidated */
544 if (offset
== 0 && length
== PAGE_SIZE
) {
545 #ifdef CONFIG_AFS_FSCACHE
546 if (PageFsCache(page
)) {
547 struct afs_vnode
*vnode
= AFS_FS_I(page
->mapping
->host
);
548 fscache_wait_on_page_write(vnode
->cache
, page
);
549 fscache_uncache_page(vnode
->cache
, page
);
553 if (PagePrivate(page
)) {
554 if (wb
&& !PageWriteback(page
)) {
555 set_page_private(page
, 0);
556 afs_put_writeback(wb
);
559 if (!page_private(page
))
560 ClearPagePrivate(page
);
568 * release a page and clean up its private state if it's not busy
569 * - return true if the page can now be released, false if not
571 static int afs_releasepage(struct page
*page
, gfp_t gfp_flags
)
573 struct afs_writeback
*wb
= (struct afs_writeback
*) page_private(page
);
574 struct afs_vnode
*vnode
= AFS_FS_I(page
->mapping
->host
);
576 _enter("{{%x:%u}[%lu],%lx},%x",
577 vnode
->fid
.vid
, vnode
->fid
.vnode
, page
->index
, page
->flags
,
580 /* deny if page is being written to the cache and the caller hasn't
582 #ifdef CONFIG_AFS_FSCACHE
583 if (!fscache_maybe_release_page(vnode
->cache
, page
, gfp_flags
)) {
584 _leave(" = F [cache busy]");
589 if (PagePrivate(page
)) {
591 set_page_private(page
, 0);
592 afs_put_writeback(wb
);
594 ClearPagePrivate(page
);
597 /* indicate that the page can be released */