]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - fs/afs/file.c
Merge tag 'powerpc-5.2-2' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc...
[mirror_ubuntu-hirsute-kernel.git] / fs / afs / file.c
CommitLineData
08e0e7c8 1/* AFS filesystem file handling
1da177e4 2 *
08e0e7c8 3 * Copyright (C) 2002, 2007 Red Hat, Inc. All Rights Reserved.
1da177e4
LT
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#include <linux/kernel.h>
13#include <linux/module.h>
14#include <linux/init.h>
1da177e4
LT
15#include <linux/fs.h>
16#include <linux/pagemap.h>
31143d5d 17#include <linux/writeback.h>
5a0e3ad6 18#include <linux/gfp.h>
91b467e0 19#include <linux/task_io_accounting_ops.h>
f86196ea 20#include <linux/mm.h>
1da177e4
LT
21#include "internal.h"
22
1cf7a151 23static int afs_file_mmap(struct file *file, struct vm_area_struct *vma);
416351f2 24static int afs_readpage(struct file *file, struct page *page);
d47992f8
LC
25static void afs_invalidatepage(struct page *page, unsigned int offset,
26 unsigned int length);
416351f2 27static int afs_releasepage(struct page *page, gfp_t gfp_flags);
1da177e4 28
9b3f26c9
DH
29static int afs_readpages(struct file *filp, struct address_space *mapping,
30 struct list_head *pages, unsigned nr_pages);
31
00d3b7a4
DH
32const struct file_operations afs_file_operations = {
33 .open = afs_open,
34 .release = afs_release,
35 .llseek = generic_file_llseek,
aad4f8bb 36 .read_iter = generic_file_read_iter,
50b5551d 37 .write_iter = afs_file_write,
1cf7a151 38 .mmap = afs_file_mmap,
5ffc4ef4 39 .splice_read = generic_file_splice_read,
31143d5d 40 .fsync = afs_fsync,
e8d6c554
DH
41 .lock = afs_lock,
42 .flock = afs_flock,
00d3b7a4
DH
43};
44
754661f1 45const struct inode_operations afs_file_inode_operations = {
416351f2 46 .getattr = afs_getattr,
31143d5d 47 .setattr = afs_setattr,
00d3b7a4 48 .permission = afs_permission,
d3e3b7ea 49 .listxattr = afs_listxattr,
1da177e4
LT
50};
51
f5e54d6e 52const struct address_space_operations afs_fs_aops = {
416351f2 53 .readpage = afs_readpage,
9b3f26c9 54 .readpages = afs_readpages,
31143d5d
DH
55 .set_page_dirty = afs_set_page_dirty,
56 .launder_page = afs_launder_page,
416351f2
DH
57 .releasepage = afs_releasepage,
58 .invalidatepage = afs_invalidatepage,
15b4650e
NP
59 .write_begin = afs_write_begin,
60 .write_end = afs_write_end,
31143d5d
DH
61 .writepage = afs_writepage,
62 .writepages = afs_writepages,
1da177e4
LT
63};
64
1cf7a151
DH
65static const struct vm_operations_struct afs_vm_ops = {
66 .fault = filemap_fault,
67 .map_pages = filemap_map_pages,
68 .page_mkwrite = afs_page_mkwrite,
69};
70
4343d008
DH
71/*
72 * Discard a pin on a writeback key.
73 */
74void afs_put_wb_key(struct afs_wb_key *wbk)
75{
76 if (refcount_dec_and_test(&wbk->usage)) {
77 key_put(wbk->key);
78 kfree(wbk);
79 }
80}
81
82/*
83 * Cache key for writeback.
84 */
85int afs_cache_wb_key(struct afs_vnode *vnode, struct afs_file *af)
86{
87 struct afs_wb_key *wbk, *p;
88
89 wbk = kzalloc(sizeof(struct afs_wb_key), GFP_KERNEL);
90 if (!wbk)
91 return -ENOMEM;
92 refcount_set(&wbk->usage, 2);
93 wbk->key = af->key;
94
95 spin_lock(&vnode->wb_lock);
96 list_for_each_entry(p, &vnode->wb_keys, vnode_link) {
97 if (p->key == wbk->key)
98 goto found;
99 }
100
101 key_get(wbk->key);
102 list_add_tail(&wbk->vnode_link, &vnode->wb_keys);
103 spin_unlock(&vnode->wb_lock);
104 af->wb = wbk;
105 return 0;
106
107found:
108 refcount_inc(&p->usage);
109 spin_unlock(&vnode->wb_lock);
110 af->wb = p;
111 kfree(wbk);
112 return 0;
113}
114
00d3b7a4
DH
115/*
116 * open an AFS file or directory and attach a key to it
117 */
118int afs_open(struct inode *inode, struct file *file)
119{
120 struct afs_vnode *vnode = AFS_FS_I(inode);
215804a9 121 struct afs_file *af;
00d3b7a4 122 struct key *key;
260a9803 123 int ret;
00d3b7a4 124
3b6492df 125 _enter("{%llx:%llu},", vnode->fid.vid, vnode->fid.vnode);
00d3b7a4
DH
126
127 key = afs_request_key(vnode->volume->cell);
128 if (IS_ERR(key)) {
215804a9
DH
129 ret = PTR_ERR(key);
130 goto error;
00d3b7a4
DH
131 }
132
215804a9
DH
133 af = kzalloc(sizeof(*af), GFP_KERNEL);
134 if (!af) {
135 ret = -ENOMEM;
136 goto error_key;
260a9803 137 }
4343d008 138 af->key = key;
260a9803 139
215804a9
DH
140 ret = afs_validate(vnode, key);
141 if (ret < 0)
142 goto error_af;
143
4343d008
DH
144 if (file->f_mode & FMODE_WRITE) {
145 ret = afs_cache_wb_key(vnode, af);
146 if (ret < 0)
147 goto error_af;
148 }
5a813276
DH
149
150 if (file->f_flags & O_TRUNC)
151 set_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags);
4343d008 152
215804a9 153 file->private_data = af;
00d3b7a4
DH
154 _leave(" = 0");
155 return 0;
215804a9
DH
156
157error_af:
158 kfree(af);
159error_key:
160 key_put(key);
161error:
162 _leave(" = %d", ret);
163 return ret;
00d3b7a4
DH
164}
165
166/*
167 * release an AFS file or directory and discard its key
168 */
169int afs_release(struct inode *inode, struct file *file)
170{
171 struct afs_vnode *vnode = AFS_FS_I(inode);
215804a9 172 struct afs_file *af = file->private_data;
a1b879ee 173 int ret = 0;
00d3b7a4 174
3b6492df 175 _enter("{%llx:%llu},", vnode->fid.vid, vnode->fid.vnode);
00d3b7a4 176
5a813276 177 if ((file->f_mode & FMODE_WRITE))
a1b879ee 178 ret = vfs_fsync(file, 0);
5a813276 179
215804a9 180 file->private_data = NULL;
4343d008
DH
181 if (af->wb)
182 afs_put_wb_key(af->wb);
215804a9
DH
183 key_put(af->key);
184 kfree(af);
4343d008 185 afs_prune_wb_keys(vnode);
a1b879ee
DH
186 _leave(" = %d", ret);
187 return ret;
00d3b7a4
DH
188}
189
196ee9cd
DH
190/*
191 * Dispose of a ref to a read record.
192 */
193void afs_put_read(struct afs_read *req)
194{
195 int i;
196
f3ddee8d 197 if (refcount_dec_and_test(&req->usage)) {
196ee9cd
DH
198 for (i = 0; i < req->nr_pages; i++)
199 if (req->pages[i])
200 put_page(req->pages[i]);
f3ddee8d
DH
201 if (req->pages != req->array)
202 kfree(req->pages);
196ee9cd
DH
203 kfree(req);
204 }
205}
206
6566abdb 207#ifdef CONFIG_AFS_FSCACHE
1da177e4
LT
208/*
209 * deal with notification that a page was read from the cache
210 */
9b3f26c9
DH
211static void afs_file_readpage_read_complete(struct page *page,
212 void *data,
213 int error)
1da177e4 214{
9b3f26c9 215 _enter("%p,%p,%d", page, data, error);
1da177e4 216
9b3f26c9
DH
217 /* if the read completes with an error, we just unlock the page and let
218 * the VM reissue the readpage */
219 if (!error)
1da177e4
LT
220 SetPageUptodate(page);
221 unlock_page(page);
ec26815a 222}
6566abdb 223#endif
1da177e4 224
d2ddc776
DH
225/*
226 * Fetch file data from the volume.
227 */
228int afs_fetch_data(struct afs_vnode *vnode, struct key *key, struct afs_read *desc)
229{
230 struct afs_fs_cursor fc;
a58823ac 231 struct afs_status_cb *scb;
d2ddc776
DH
232 int ret;
233
3b6492df 234 _enter("%s{%llx:%llu.%u},%x,,,",
d2ddc776
DH
235 vnode->volume->name,
236 vnode->fid.vid,
237 vnode->fid.vnode,
238 vnode->fid.unique,
239 key_serial(key));
240
a58823ac
DH
241 scb = kzalloc(sizeof(struct afs_status_cb), GFP_KERNEL);
242 if (!scb)
243 return -ENOMEM;
244
d2ddc776 245 ret = -ERESTARTSYS;
20b8391f 246 if (afs_begin_vnode_operation(&fc, vnode, key, true)) {
a58823ac
DH
247 afs_dataversion_t data_version = vnode->status.data_version;
248
d2ddc776 249 while (afs_select_fileserver(&fc)) {
68251f0a 250 fc.cb_break = afs_calc_vnode_cb_break(vnode);
a58823ac 251 afs_fs_fetch_data(&fc, scb, desc);
d2ddc776
DH
252 }
253
a58823ac
DH
254 afs_check_for_remote_deletion(&fc, vnode);
255 afs_vnode_commit_status(&fc, vnode, fc.cb_break,
256 &data_version, scb);
d2ddc776
DH
257 ret = afs_end_vnode_operation(&fc);
258 }
259
76a5cb6f
DH
260 if (ret == 0) {
261 afs_stat_v(vnode, n_fetches);
262 atomic_long_add(desc->actual_len,
263 &afs_v2net(vnode)->n_fetch_bytes);
264 }
265
a58823ac 266 kfree(scb);
d2ddc776
DH
267 _leave(" = %d", ret);
268 return ret;
269}
270
1da177e4 271/*
f6d335c0 272 * read page from file, directory or symlink, given a key to use
1da177e4 273 */
f6d335c0 274int afs_page_filler(void *data, struct page *page)
1da177e4 275{
f6d335c0
AV
276 struct inode *inode = page->mapping->host;
277 struct afs_vnode *vnode = AFS_FS_I(inode);
196ee9cd 278 struct afs_read *req;
f6d335c0 279 struct key *key = data;
1da177e4
LT
280 int ret;
281
00d3b7a4 282 _enter("{%x},{%lu},{%lu}", key_serial(key), inode->i_ino, page->index);
1da177e4 283
cd7619d6 284 BUG_ON(!PageLocked(page));
1da177e4
LT
285
286 ret = -ESTALE;
08e0e7c8 287 if (test_bit(AFS_VNODE_DELETED, &vnode->flags))
1da177e4
LT
288 goto error;
289
1da177e4 290 /* is it cached? */
9b3f26c9
DH
291#ifdef CONFIG_AFS_FSCACHE
292 ret = fscache_read_or_alloc_page(vnode->cache,
1da177e4
LT
293 page,
294 afs_file_readpage_read_complete,
295 NULL,
296 GFP_KERNEL);
297#else
298 ret = -ENOBUFS;
299#endif
1da177e4 300 switch (ret) {
1da177e4
LT
301 /* read BIO submitted (page in cache) */
302 case 0:
303 break;
304
9b3f26c9 305 /* page not yet cached */
1da177e4 306 case -ENODATA:
9b3f26c9
DH
307 _debug("cache said ENODATA");
308 goto go_on;
309
310 /* page will not be cached */
311 case -ENOBUFS:
312 _debug("cache said ENOBUFS");
e690c9e3
GS
313
314 /* fall through */
1da177e4 315 default:
9b3f26c9 316 go_on:
196ee9cd
DH
317 req = kzalloc(sizeof(struct afs_read) + sizeof(struct page *),
318 GFP_KERNEL);
319 if (!req)
320 goto enomem;
321
6db3ac3c
DH
322 /* We request a full page. If the page is a partial one at the
323 * end of the file, the server will return a short read and the
324 * unmarshalling code will clear the unfilled space.
325 */
f3ddee8d 326 refcount_set(&req->usage, 1);
196ee9cd 327 req->pos = (loff_t)page->index << PAGE_SHIFT;
6db3ac3c 328 req->len = PAGE_SIZE;
196ee9cd 329 req->nr_pages = 1;
f3ddee8d 330 req->pages = req->array;
196ee9cd
DH
331 req->pages[0] = page;
332 get_page(page);
1da177e4
LT
333
334 /* read the contents of the file from the server into the
335 * page */
d2ddc776 336 ret = afs_fetch_data(vnode, key, req);
196ee9cd 337 afs_put_read(req);
dab17c1a 338
1da177e4 339 if (ret < 0) {
08e0e7c8 340 if (ret == -ENOENT) {
1da177e4
LT
341 _debug("got NOENT from server"
342 " - marking file deleted and stale");
08e0e7c8 343 set_bit(AFS_VNODE_DELETED, &vnode->flags);
1da177e4
LT
344 ret = -ESTALE;
345 }
9b3f26c9
DH
346
347#ifdef CONFIG_AFS_FSCACHE
348 fscache_uncache_page(vnode->cache, page);
1da177e4 349#endif
9b3f26c9 350 BUG_ON(PageFsCache(page));
68ae849d
DH
351
352 if (ret == -EINTR ||
353 ret == -ENOMEM ||
354 ret == -ERESTARTSYS ||
355 ret == -EAGAIN)
356 goto error;
357 goto io_error;
1da177e4
LT
358 }
359
360 SetPageUptodate(page);
361
9b3f26c9
DH
362 /* send the page to the cache */
363#ifdef CONFIG_AFS_FSCACHE
364 if (PageFsCache(page) &&
ee1235a9
DH
365 fscache_write_page(vnode->cache, page, vnode->status.size,
366 GFP_KERNEL) != 0) {
9b3f26c9
DH
367 fscache_uncache_page(vnode->cache, page);
368 BUG_ON(PageFsCache(page));
1da177e4 369 }
1da177e4 370#endif
9b3f26c9 371 unlock_page(page);
1da177e4
LT
372 }
373
374 _leave(" = 0");
375 return 0;
376
68ae849d
DH
377io_error:
378 SetPageError(page);
379 goto error;
196ee9cd
DH
380enomem:
381 ret = -ENOMEM;
08e0e7c8 382error:
1da177e4 383 unlock_page(page);
1da177e4
LT
384 _leave(" = %d", ret);
385 return ret;
ec26815a 386}
1da177e4 387
f6d335c0
AV
388/*
389 * read page from file, directory or symlink, given a file to nominate the key
390 * to be used
391 */
392static int afs_readpage(struct file *file, struct page *page)
393{
394 struct key *key;
395 int ret;
396
397 if (file) {
215804a9 398 key = afs_file_key(file);
f6d335c0
AV
399 ASSERT(key != NULL);
400 ret = afs_page_filler(key, page);
401 } else {
402 struct inode *inode = page->mapping->host;
d2ddc776 403 key = afs_request_key(AFS_FS_S(inode->i_sb)->cell);
f6d335c0
AV
404 if (IS_ERR(key)) {
405 ret = PTR_ERR(key);
406 } else {
407 ret = afs_page_filler(key, page);
408 key_put(key);
409 }
410 }
411 return ret;
412}
413
91b467e0
DH
414/*
415 * Make pages available as they're filled.
416 */
a58823ac 417static void afs_readpages_page_done(struct afs_read *req)
91b467e0 418{
51c89e6a 419#ifdef CONFIG_AFS_FSCACHE
a58823ac 420 struct afs_vnode *vnode = req->vnode;
51c89e6a 421#endif
91b467e0
DH
422 struct page *page = req->pages[req->index];
423
424 req->pages[req->index] = NULL;
425 SetPageUptodate(page);
426
427 /* send the page to the cache */
428#ifdef CONFIG_AFS_FSCACHE
429 if (PageFsCache(page) &&
ee1235a9
DH
430 fscache_write_page(vnode->cache, page, vnode->status.size,
431 GFP_KERNEL) != 0) {
91b467e0
DH
432 fscache_uncache_page(vnode->cache, page);
433 BUG_ON(PageFsCache(page));
434 }
435#endif
436 unlock_page(page);
437 put_page(page);
438}
439
440/*
441 * Read a contiguous set of pages.
442 */
443static int afs_readpages_one(struct file *file, struct address_space *mapping,
444 struct list_head *pages)
445{
446 struct afs_vnode *vnode = AFS_FS_I(mapping->host);
447 struct afs_read *req;
448 struct list_head *p;
449 struct page *first, *page;
215804a9 450 struct key *key = afs_file_key(file);
91b467e0
DH
451 pgoff_t index;
452 int ret, n, i;
453
454 /* Count the number of contiguous pages at the front of the list. Note
455 * that the list goes prev-wards rather than next-wards.
456 */
f86196ea 457 first = lru_to_page(pages);
91b467e0
DH
458 index = first->index + 1;
459 n = 1;
460 for (p = first->lru.prev; p != pages; p = p->prev) {
461 page = list_entry(p, struct page, lru);
462 if (page->index != index)
463 break;
464 index++;
465 n++;
466 }
467
468 req = kzalloc(sizeof(struct afs_read) + sizeof(struct page *) * n,
469 GFP_NOFS);
470 if (!req)
471 return -ENOMEM;
472
f3ddee8d 473 refcount_set(&req->usage, 1);
a58823ac 474 req->vnode = vnode;
91b467e0
DH
475 req->page_done = afs_readpages_page_done;
476 req->pos = first->index;
477 req->pos <<= PAGE_SHIFT;
f3ddee8d 478 req->pages = req->array;
91b467e0
DH
479
480 /* Transfer the pages to the request. We add them in until one fails
481 * to add to the LRU and then we stop (as that'll make a hole in the
482 * contiguous run.
483 *
484 * Note that it's possible for the file size to change whilst we're
485 * doing this, but we rely on the server returning less than we asked
486 * for if the file shrank. We also rely on this to deal with a partial
487 * page at the end of the file.
488 */
489 do {
f86196ea 490 page = lru_to_page(pages);
91b467e0
DH
491 list_del(&page->lru);
492 index = page->index;
493 if (add_to_page_cache_lru(page, mapping, index,
494 readahead_gfp_mask(mapping))) {
495#ifdef CONFIG_AFS_FSCACHE
496 fscache_uncache_page(vnode->cache, page);
497#endif
498 put_page(page);
499 break;
500 }
501
502 req->pages[req->nr_pages++] = page;
503 req->len += PAGE_SIZE;
504 } while (req->nr_pages < n);
505
506 if (req->nr_pages == 0) {
507 kfree(req);
508 return 0;
509 }
510
d2ddc776 511 ret = afs_fetch_data(vnode, key, req);
91b467e0
DH
512 if (ret < 0)
513 goto error;
514
515 task_io_account_read(PAGE_SIZE * req->nr_pages);
516 afs_put_read(req);
517 return 0;
518
519error:
520 if (ret == -ENOENT) {
521 _debug("got NOENT from server"
522 " - marking file deleted and stale");
523 set_bit(AFS_VNODE_DELETED, &vnode->flags);
524 ret = -ESTALE;
525 }
526
527 for (i = 0; i < req->nr_pages; i++) {
528 page = req->pages[i];
529 if (page) {
530#ifdef CONFIG_AFS_FSCACHE
531 fscache_uncache_page(vnode->cache, page);
532#endif
533 SetPageError(page);
534 unlock_page(page);
535 }
536 }
537
538 afs_put_read(req);
539 return ret;
540}
541
1da177e4 542/*
9b3f26c9 543 * read a set of pages
1da177e4 544 */
9b3f26c9
DH
545static int afs_readpages(struct file *file, struct address_space *mapping,
546 struct list_head *pages, unsigned nr_pages)
1da177e4 547{
215804a9 548 struct key *key = afs_file_key(file);
9b3f26c9
DH
549 struct afs_vnode *vnode;
550 int ret = 0;
1da177e4 551
f6d335c0
AV
552 _enter("{%d},{%lu},,%d",
553 key_serial(key), mapping->host->i_ino, nr_pages);
554
555 ASSERT(key != NULL);
1da177e4 556
9b3f26c9 557 vnode = AFS_FS_I(mapping->host);
ad2a8e60 558 if (test_bit(AFS_VNODE_DELETED, &vnode->flags)) {
9b3f26c9
DH
559 _leave(" = -ESTALE");
560 return -ESTALE;
561 }
1da177e4 562
9b3f26c9
DH
563 /* attempt to read as many of the pages as possible */
564#ifdef CONFIG_AFS_FSCACHE
565 ret = fscache_read_or_alloc_pages(vnode->cache,
566 mapping,
567 pages,
568 &nr_pages,
569 afs_file_readpage_read_complete,
570 NULL,
571 mapping_gfp_mask(mapping));
572#else
573 ret = -ENOBUFS;
574#endif
575
576 switch (ret) {
577 /* all pages are being read from the cache */
578 case 0:
579 BUG_ON(!list_empty(pages));
580 BUG_ON(nr_pages != 0);
581 _leave(" = 0 [reading all]");
582 return 0;
583
584 /* there were pages that couldn't be read from the cache */
585 case -ENODATA:
586 case -ENOBUFS:
587 break;
588
589 /* other error */
590 default:
591 _leave(" = %d", ret);
592 return ret;
1da177e4
LT
593 }
594
91b467e0
DH
595 while (!list_empty(pages)) {
596 ret = afs_readpages_one(file, mapping, pages);
597 if (ret < 0)
598 break;
599 }
9b3f26c9
DH
600
601 _leave(" = %d [netting]", ret);
602 return ret;
ec26815a 603}
1da177e4 604
1da177e4 605/*
9b3f26c9
DH
606 * invalidate part or all of a page
607 * - release a page and clean up its private data if offset is 0 (indicating
608 * the entire page)
609 */
d47992f8
LC
610static void afs_invalidatepage(struct page *page, unsigned int offset,
611 unsigned int length)
9b3f26c9 612{
13524ab3
DH
613 struct afs_vnode *vnode = AFS_FS_I(page->mapping->host);
614 unsigned long priv;
615
d47992f8 616 _enter("{%lu},%u,%u", page->index, offset, length);
9b3f26c9
DH
617
618 BUG_ON(!PageLocked(page));
619
620 /* we clean up only if the entire page is being invalidated */
09cbfeaf 621 if (offset == 0 && length == PAGE_SIZE) {
9b3f26c9
DH
622#ifdef CONFIG_AFS_FSCACHE
623 if (PageFsCache(page)) {
624 struct afs_vnode *vnode = AFS_FS_I(page->mapping->host);
625 fscache_wait_on_page_write(vnode->cache, page);
626 fscache_uncache_page(vnode->cache, page);
9b3f26c9
DH
627 }
628#endif
629
630 if (PagePrivate(page)) {
13524ab3
DH
631 priv = page_private(page);
632 trace_afs_page_dirty(vnode, tracepoint_string("inval"),
633 page->index, priv);
4343d008
DH
634 set_page_private(page, 0);
635 ClearPagePrivate(page);
9b3f26c9
DH
636 }
637 }
638
639 _leave("");
640}
641
642/*
643 * release a page and clean up its private state if it's not busy
644 * - return true if the page can now be released, false if not
1da177e4 645 */
416351f2 646static int afs_releasepage(struct page *page, gfp_t gfp_flags)
1da177e4 647{
416351f2 648 struct afs_vnode *vnode = AFS_FS_I(page->mapping->host);
13524ab3 649 unsigned long priv;
1da177e4 650
3b6492df 651 _enter("{{%llx:%llu}[%lu],%lx},%x",
416351f2
DH
652 vnode->fid.vid, vnode->fid.vnode, page->index, page->flags,
653 gfp_flags);
1da177e4 654
9b3f26c9
DH
655 /* deny if page is being written to the cache and the caller hasn't
656 * elected to wait */
657#ifdef CONFIG_AFS_FSCACHE
201a1542
DH
658 if (!fscache_maybe_release_page(vnode->cache, page, gfp_flags)) {
659 _leave(" = F [cache busy]");
660 return 0;
9b3f26c9
DH
661 }
662#endif
663
1da177e4 664 if (PagePrivate(page)) {
13524ab3
DH
665 priv = page_private(page);
666 trace_afs_page_dirty(vnode, tracepoint_string("rel"),
667 page->index, priv);
4343d008 668 set_page_private(page, 0);
1da177e4 669 ClearPagePrivate(page);
1da177e4
LT
670 }
671
9b3f26c9
DH
672 /* indicate that the page can be released */
673 _leave(" = T");
674 return 1;
ec26815a 675}
1cf7a151
DH
676
677/*
678 * Handle setting up a memory mapping on an AFS file.
679 */
680static int afs_file_mmap(struct file *file, struct vm_area_struct *vma)
681{
682 int ret;
683
684 ret = generic_file_mmap(file, vma);
685 if (ret == 0)
686 vma->vm_ops = &afs_vm_ops;
687 return ret;
688}