]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - mm/filemap.c
netfilter: nft_ct: allow to set ctnetlink event types of a connection
[mirror_ubuntu-artful-kernel.git] / mm / filemap.c
CommitLineData
1da177e4
LT
1/*
2 * linux/mm/filemap.c
3 *
4 * Copyright (C) 1994-1999 Linus Torvalds
5 */
6
7/*
8 * This file handles the generic file mmap semantics used by
9 * most "normal" filesystems (but you don't /have/ to use this:
10 * the NFS filesystem used to do this differently, for example)
11 */
b95f1b31 12#include <linux/export.h>
1da177e4 13#include <linux/compiler.h>
f9fe48be 14#include <linux/dax.h>
1da177e4 15#include <linux/fs.h>
3f07c014 16#include <linux/sched/signal.h>
c22ce143 17#include <linux/uaccess.h>
c59ede7b 18#include <linux/capability.h>
1da177e4 19#include <linux/kernel_stat.h>
5a0e3ad6 20#include <linux/gfp.h>
1da177e4
LT
21#include <linux/mm.h>
22#include <linux/swap.h>
23#include <linux/mman.h>
24#include <linux/pagemap.h>
25#include <linux/file.h>
26#include <linux/uio.h>
27#include <linux/hash.h>
28#include <linux/writeback.h>
53253383 29#include <linux/backing-dev.h>
1da177e4
LT
30#include <linux/pagevec.h>
31#include <linux/blkdev.h>
32#include <linux/security.h>
44110fe3 33#include <linux/cpuset.h>
2f718ffc 34#include <linux/hardirq.h> /* for BUG_ON(!in_atomic()) only */
00501b53 35#include <linux/hugetlb.h>
8a9f3ccd 36#include <linux/memcontrol.h>
c515e1fd 37#include <linux/cleancache.h>
f1820361 38#include <linux/rmap.h>
0f8053a5
NP
39#include "internal.h"
40
fe0bfaaf
RJ
41#define CREATE_TRACE_POINTS
42#include <trace/events/filemap.h>
43
1da177e4 44/*
1da177e4
LT
45 * FIXME: remove all knowledge of the buffer layer from the core VM
46 */
148f948b 47#include <linux/buffer_head.h> /* for try_to_free_buffers */
1da177e4 48
1da177e4
LT
49#include <asm/mman.h>
50
51/*
52 * Shared mappings implemented 30.11.1994. It's not fully working yet,
53 * though.
54 *
55 * Shared mappings now work. 15.8.1995 Bruno.
56 *
57 * finished 'unifying' the page and buffer cache and SMP-threaded the
58 * page-cache, 21.05.1999, Ingo Molnar <mingo@redhat.com>
59 *
60 * SMP-threaded pagemap-LRU 1999, Andrea Arcangeli <andrea@suse.de>
61 */
62
63/*
64 * Lock ordering:
65 *
c8c06efa 66 * ->i_mmap_rwsem (truncate_pagecache)
1da177e4 67 * ->private_lock (__free_pte->__set_page_dirty_buffers)
5d337b91
HD
68 * ->swap_lock (exclusive_swap_page, others)
69 * ->mapping->tree_lock
1da177e4 70 *
1b1dcc1b 71 * ->i_mutex
c8c06efa 72 * ->i_mmap_rwsem (truncate->unmap_mapping_range)
1da177e4
LT
73 *
74 * ->mmap_sem
c8c06efa 75 * ->i_mmap_rwsem
b8072f09 76 * ->page_table_lock or pte_lock (various, mainly in memory.c)
1da177e4
LT
77 * ->mapping->tree_lock (arch-dependent flush_dcache_mmap_lock)
78 *
79 * ->mmap_sem
80 * ->lock_page (access_process_vm)
81 *
ccad2365 82 * ->i_mutex (generic_perform_write)
82591e6e 83 * ->mmap_sem (fault_in_pages_readable->do_page_fault)
1da177e4 84 *
f758eeab 85 * bdi->wb.list_lock
a66979ab 86 * sb_lock (fs/fs-writeback.c)
1da177e4
LT
87 * ->mapping->tree_lock (__sync_single_inode)
88 *
c8c06efa 89 * ->i_mmap_rwsem
1da177e4
LT
90 * ->anon_vma.lock (vma_adjust)
91 *
92 * ->anon_vma.lock
b8072f09 93 * ->page_table_lock or pte_lock (anon_vma_prepare and various)
1da177e4 94 *
b8072f09 95 * ->page_table_lock or pte_lock
5d337b91 96 * ->swap_lock (try_to_unmap_one)
1da177e4
LT
97 * ->private_lock (try_to_unmap_one)
98 * ->tree_lock (try_to_unmap_one)
a52633d8
MG
99 * ->zone_lru_lock(zone) (follow_page->mark_page_accessed)
100 * ->zone_lru_lock(zone) (check_pte_range->isolate_lru_page)
1da177e4
LT
101 * ->private_lock (page_remove_rmap->set_page_dirty)
102 * ->tree_lock (page_remove_rmap->set_page_dirty)
f758eeab 103 * bdi.wb->list_lock (page_remove_rmap->set_page_dirty)
250df6ed 104 * ->inode->i_lock (page_remove_rmap->set_page_dirty)
81f8c3a4 105 * ->memcg->move_lock (page_remove_rmap->lock_page_memcg)
f758eeab 106 * bdi.wb->list_lock (zap_pte_range->set_page_dirty)
250df6ed 107 * ->inode->i_lock (zap_pte_range->set_page_dirty)
1da177e4
LT
108 * ->private_lock (zap_pte_range->__set_page_dirty_buffers)
109 *
c8c06efa 110 * ->i_mmap_rwsem
9a3c531d 111 * ->tasklist_lock (memory_failure, collect_procs_ao)
1da177e4
LT
112 */
113
22f2ac51
JW
114static int page_cache_tree_insert(struct address_space *mapping,
115 struct page *page, void **shadowp)
116{
117 struct radix_tree_node *node;
118 void **slot;
119 int error;
120
121 error = __radix_tree_create(&mapping->page_tree, page->index, 0,
122 &node, &slot);
123 if (error)
124 return error;
125 if (*slot) {
126 void *p;
127
128 p = radix_tree_deref_slot_protected(slot, &mapping->tree_lock);
129 if (!radix_tree_exceptional_entry(p))
130 return -EEXIST;
131
132 mapping->nrexceptional--;
133 if (!dax_mapping(mapping)) {
134 if (shadowp)
135 *shadowp = p;
22f2ac51
JW
136 } else {
137 /* DAX can replace empty locked entry with a hole */
138 WARN_ON_ONCE(p !=
642261ac 139 dax_radix_locked_entry(0, RADIX_DAX_EMPTY));
22f2ac51 140 /* Wakeup waiters for exceptional entry lock */
63e95b5c 141 dax_wake_mapping_entry_waiter(mapping, page->index, p,
965d004a 142 true);
22f2ac51
JW
143 }
144 }
14b46879
JW
145 __radix_tree_replace(&mapping->page_tree, node, slot, page,
146 workingset_update_node, mapping);
22f2ac51 147 mapping->nrpages++;
22f2ac51
JW
148 return 0;
149}
150
91b0abe3
JW
151static void page_cache_tree_delete(struct address_space *mapping,
152 struct page *page, void *shadow)
153{
c70b647d
KS
154 int i, nr;
155
156 /* hugetlb pages are represented by one entry in the radix tree */
157 nr = PageHuge(page) ? 1 : hpage_nr_pages(page);
91b0abe3 158
83929372
KS
159 VM_BUG_ON_PAGE(!PageLocked(page), page);
160 VM_BUG_ON_PAGE(PageTail(page), page);
161 VM_BUG_ON_PAGE(nr != 1 && shadow, page);
449dd698 162
83929372 163 for (i = 0; i < nr; i++) {
d3798ae8
JW
164 struct radix_tree_node *node;
165 void **slot;
166
167 __radix_tree_lookup(&mapping->page_tree, page->index + i,
168 &node, &slot);
169
dbc446b8 170 VM_BUG_ON_PAGE(!node && nr != 1, page);
449dd698 171
14b46879
JW
172 radix_tree_clear_tags(&mapping->page_tree, node, slot);
173 __radix_tree_replace(&mapping->page_tree, node, slot, shadow,
174 workingset_update_node, mapping);
449dd698 175 }
d3798ae8
JW
176
177 if (shadow) {
178 mapping->nrexceptional += nr;
179 /*
180 * Make sure the nrexceptional update is committed before
181 * the nrpages update so that final truncate racing
182 * with reclaim does not see both counters 0 at the
183 * same time and miss a shadow entry.
184 */
185 smp_wmb();
186 }
187 mapping->nrpages -= nr;
91b0abe3
JW
188}
189
1da177e4 190/*
e64a782f 191 * Delete a page from the page cache and free it. Caller has to make
1da177e4 192 * sure the page is locked and that nobody else uses it - or that usage
fdf1cdb9 193 * is safe. The caller must hold the mapping's tree_lock.
1da177e4 194 */
62cccb8c 195void __delete_from_page_cache(struct page *page, void *shadow)
1da177e4
LT
196{
197 struct address_space *mapping = page->mapping;
83929372 198 int nr = hpage_nr_pages(page);
1da177e4 199
fe0bfaaf 200 trace_mm_filemap_delete_from_page_cache(page);
c515e1fd
DM
201 /*
202 * if we're uptodate, flush out into the cleancache, otherwise
203 * invalidate any existing cleancache entries. We can't leave
204 * stale data around in the cleancache once our page is gone
205 */
206 if (PageUptodate(page) && PageMappedToDisk(page))
207 cleancache_put_page(page);
208 else
3167760f 209 cleancache_invalidate_page(mapping, page);
c515e1fd 210
83929372 211 VM_BUG_ON_PAGE(PageTail(page), page);
06b241f3
HD
212 VM_BUG_ON_PAGE(page_mapped(page), page);
213 if (!IS_ENABLED(CONFIG_DEBUG_VM) && unlikely(page_mapped(page))) {
214 int mapcount;
215
216 pr_alert("BUG: Bad page cache in process %s pfn:%05lx\n",
217 current->comm, page_to_pfn(page));
218 dump_page(page, "still mapped when deleted");
219 dump_stack();
220 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
221
222 mapcount = page_mapcount(page);
223 if (mapping_exiting(mapping) &&
224 page_count(page) >= mapcount + 2) {
225 /*
226 * All vmas have already been torn down, so it's
227 * a good bet that actually the page is unmapped,
228 * and we'd prefer not to leak it: if we're wrong,
229 * some other bad page check should catch it later.
230 */
231 page_mapcount_reset(page);
6d061f9f 232 page_ref_sub(page, mapcount);
06b241f3
HD
233 }
234 }
235
91b0abe3
JW
236 page_cache_tree_delete(mapping, page, shadow);
237
1da177e4 238 page->mapping = NULL;
b85e0eff 239 /* Leave page->index set: truncation lookup relies upon it */
91b0abe3 240
4165b9b4
MH
241 /* hugetlb pages do not participate in page cache accounting. */
242 if (!PageHuge(page))
11fb9989 243 __mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, -nr);
800d8c63 244 if (PageSwapBacked(page)) {
11fb9989 245 __mod_node_page_state(page_pgdat(page), NR_SHMEM, -nr);
800d8c63 246 if (PageTransHuge(page))
11fb9989 247 __dec_node_page_state(page, NR_SHMEM_THPS);
800d8c63
KS
248 } else {
249 VM_BUG_ON_PAGE(PageTransHuge(page) && !PageHuge(page), page);
250 }
3a692790
LT
251
252 /*
b9ea2515
KK
253 * At this point page must be either written or cleaned by truncate.
254 * Dirty page here signals a bug and loss of unwritten data.
3a692790 255 *
b9ea2515
KK
256 * This fixes dirty accounting after removing the page entirely but
257 * leaves PageDirty set: it has no effect for truncated page and
258 * anyway will be cleared before returning page into buddy allocator.
3a692790 259 */
b9ea2515 260 if (WARN_ON_ONCE(PageDirty(page)))
62cccb8c 261 account_page_cleaned(page, mapping, inode_to_wb(mapping->host));
1da177e4
LT
262}
263
702cfbf9
MK
264/**
265 * delete_from_page_cache - delete page from page cache
266 * @page: the page which the kernel is trying to remove from page cache
267 *
268 * This must be called only on pages that have been verified to be in the page
269 * cache and locked. It will never put the page into the free list, the caller
270 * has a reference on the page.
271 */
272void delete_from_page_cache(struct page *page)
1da177e4 273{
83929372 274 struct address_space *mapping = page_mapping(page);
c4843a75 275 unsigned long flags;
6072d13c 276 void (*freepage)(struct page *);
1da177e4 277
cd7619d6 278 BUG_ON(!PageLocked(page));
1da177e4 279
6072d13c 280 freepage = mapping->a_ops->freepage;
c4843a75 281
c4843a75 282 spin_lock_irqsave(&mapping->tree_lock, flags);
62cccb8c 283 __delete_from_page_cache(page, NULL);
c4843a75 284 spin_unlock_irqrestore(&mapping->tree_lock, flags);
6072d13c
LT
285
286 if (freepage)
287 freepage(page);
83929372
KS
288
289 if (PageTransHuge(page) && !PageHuge(page)) {
290 page_ref_sub(page, HPAGE_PMD_NR);
291 VM_BUG_ON_PAGE(page_count(page) <= 0, page);
292 } else {
293 put_page(page);
294 }
97cecb5a
MK
295}
296EXPORT_SYMBOL(delete_from_page_cache);
297
d72d9e2a 298int filemap_check_errors(struct address_space *mapping)
865ffef3
DM
299{
300 int ret = 0;
301 /* Check for outstanding write errors */
7fcbbaf1
JA
302 if (test_bit(AS_ENOSPC, &mapping->flags) &&
303 test_and_clear_bit(AS_ENOSPC, &mapping->flags))
865ffef3 304 ret = -ENOSPC;
7fcbbaf1
JA
305 if (test_bit(AS_EIO, &mapping->flags) &&
306 test_and_clear_bit(AS_EIO, &mapping->flags))
865ffef3
DM
307 ret = -EIO;
308 return ret;
309}
d72d9e2a 310EXPORT_SYMBOL(filemap_check_errors);
865ffef3 311
1da177e4 312/**
485bb99b 313 * __filemap_fdatawrite_range - start writeback on mapping dirty pages in range
67be2dd1
MW
314 * @mapping: address space structure to write
315 * @start: offset in bytes where the range starts
469eb4d0 316 * @end: offset in bytes where the range ends (inclusive)
67be2dd1 317 * @sync_mode: enable synchronous operation
1da177e4 318 *
485bb99b
RD
319 * Start writeback against all of a mapping's dirty pages that lie
320 * within the byte offsets <start, end> inclusive.
321 *
1da177e4 322 * If sync_mode is WB_SYNC_ALL then this is a "data integrity" operation, as
485bb99b 323 * opposed to a regular memory cleansing writeback. The difference between
1da177e4
LT
324 * these two operations is that if a dirty page/buffer is encountered, it must
325 * be waited upon, and not just skipped over.
326 */
ebcf28e1
AM
327int __filemap_fdatawrite_range(struct address_space *mapping, loff_t start,
328 loff_t end, int sync_mode)
1da177e4
LT
329{
330 int ret;
331 struct writeback_control wbc = {
332 .sync_mode = sync_mode,
05fe478d 333 .nr_to_write = LONG_MAX,
111ebb6e
OH
334 .range_start = start,
335 .range_end = end,
1da177e4
LT
336 };
337
338 if (!mapping_cap_writeback_dirty(mapping))
339 return 0;
340
b16b1deb 341 wbc_attach_fdatawrite_inode(&wbc, mapping->host);
1da177e4 342 ret = do_writepages(mapping, &wbc);
b16b1deb 343 wbc_detach_inode(&wbc);
1da177e4
LT
344 return ret;
345}
346
347static inline int __filemap_fdatawrite(struct address_space *mapping,
348 int sync_mode)
349{
111ebb6e 350 return __filemap_fdatawrite_range(mapping, 0, LLONG_MAX, sync_mode);
1da177e4
LT
351}
352
353int filemap_fdatawrite(struct address_space *mapping)
354{
355 return __filemap_fdatawrite(mapping, WB_SYNC_ALL);
356}
357EXPORT_SYMBOL(filemap_fdatawrite);
358
f4c0a0fd 359int filemap_fdatawrite_range(struct address_space *mapping, loff_t start,
ebcf28e1 360 loff_t end)
1da177e4
LT
361{
362 return __filemap_fdatawrite_range(mapping, start, end, WB_SYNC_ALL);
363}
f4c0a0fd 364EXPORT_SYMBOL(filemap_fdatawrite_range);
1da177e4 365
485bb99b
RD
366/**
367 * filemap_flush - mostly a non-blocking flush
368 * @mapping: target address_space
369 *
1da177e4
LT
370 * This is a mostly non-blocking flush. Not suitable for data-integrity
371 * purposes - I/O may not be started against all dirty pages.
372 */
373int filemap_flush(struct address_space *mapping)
374{
375 return __filemap_fdatawrite(mapping, WB_SYNC_NONE);
376}
377EXPORT_SYMBOL(filemap_flush);
378
aa750fd7
JN
379static int __filemap_fdatawait_range(struct address_space *mapping,
380 loff_t start_byte, loff_t end_byte)
1da177e4 381{
09cbfeaf
KS
382 pgoff_t index = start_byte >> PAGE_SHIFT;
383 pgoff_t end = end_byte >> PAGE_SHIFT;
1da177e4
LT
384 struct pagevec pvec;
385 int nr_pages;
aa750fd7 386 int ret = 0;
1da177e4 387
94004ed7 388 if (end_byte < start_byte)
865ffef3 389 goto out;
1da177e4
LT
390
391 pagevec_init(&pvec, 0);
1da177e4
LT
392 while ((index <= end) &&
393 (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
394 PAGECACHE_TAG_WRITEBACK,
395 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1)) != 0) {
396 unsigned i;
397
398 for (i = 0; i < nr_pages; i++) {
399 struct page *page = pvec.pages[i];
400
401 /* until radix tree lookup accepts end_index */
402 if (page->index > end)
403 continue;
404
405 wait_on_page_writeback(page);
212260aa 406 if (TestClearPageError(page))
1da177e4
LT
407 ret = -EIO;
408 }
409 pagevec_release(&pvec);
410 cond_resched();
411 }
865ffef3 412out:
aa750fd7
JN
413 return ret;
414}
415
416/**
417 * filemap_fdatawait_range - wait for writeback to complete
418 * @mapping: address space structure to wait for
419 * @start_byte: offset in bytes where the range starts
420 * @end_byte: offset in bytes where the range ends (inclusive)
421 *
422 * Walk the list of under-writeback pages of the given address space
423 * in the given range and wait for all of them. Check error status of
424 * the address space and return it.
425 *
426 * Since the error status of the address space is cleared by this function,
427 * callers are responsible for checking the return value and handling and/or
428 * reporting the error.
429 */
430int filemap_fdatawait_range(struct address_space *mapping, loff_t start_byte,
431 loff_t end_byte)
432{
433 int ret, ret2;
434
435 ret = __filemap_fdatawait_range(mapping, start_byte, end_byte);
865ffef3
DM
436 ret2 = filemap_check_errors(mapping);
437 if (!ret)
438 ret = ret2;
1da177e4
LT
439
440 return ret;
441}
d3bccb6f
JK
442EXPORT_SYMBOL(filemap_fdatawait_range);
443
aa750fd7
JN
444/**
445 * filemap_fdatawait_keep_errors - wait for writeback without clearing errors
446 * @mapping: address space structure to wait for
447 *
448 * Walk the list of under-writeback pages of the given address space
449 * and wait for all of them. Unlike filemap_fdatawait(), this function
450 * does not clear error status of the address space.
451 *
452 * Use this function if callers don't handle errors themselves. Expected
453 * call sites are system-wide / filesystem-wide data flushers: e.g. sync(2),
454 * fsfreeze(8)
455 */
456void filemap_fdatawait_keep_errors(struct address_space *mapping)
457{
458 loff_t i_size = i_size_read(mapping->host);
459
460 if (i_size == 0)
461 return;
462
463 __filemap_fdatawait_range(mapping, 0, i_size - 1);
464}
465
1da177e4 466/**
485bb99b 467 * filemap_fdatawait - wait for all under-writeback pages to complete
1da177e4 468 * @mapping: address space structure to wait for
485bb99b
RD
469 *
470 * Walk the list of under-writeback pages of the given address space
aa750fd7
JN
471 * and wait for all of them. Check error status of the address space
472 * and return it.
473 *
474 * Since the error status of the address space is cleared by this function,
475 * callers are responsible for checking the return value and handling and/or
476 * reporting the error.
1da177e4
LT
477 */
478int filemap_fdatawait(struct address_space *mapping)
479{
480 loff_t i_size = i_size_read(mapping->host);
481
482 if (i_size == 0)
483 return 0;
484
94004ed7 485 return filemap_fdatawait_range(mapping, 0, i_size - 1);
1da177e4
LT
486}
487EXPORT_SYMBOL(filemap_fdatawait);
488
489int filemap_write_and_wait(struct address_space *mapping)
490{
28fd1298 491 int err = 0;
1da177e4 492
7f6d5b52
RZ
493 if ((!dax_mapping(mapping) && mapping->nrpages) ||
494 (dax_mapping(mapping) && mapping->nrexceptional)) {
28fd1298
OH
495 err = filemap_fdatawrite(mapping);
496 /*
497 * Even if the above returned error, the pages may be
498 * written partially (e.g. -ENOSPC), so we wait for it.
499 * But the -EIO is special case, it may indicate the worst
500 * thing (e.g. bug) happened, so we avoid waiting for it.
501 */
502 if (err != -EIO) {
503 int err2 = filemap_fdatawait(mapping);
504 if (!err)
505 err = err2;
506 }
865ffef3
DM
507 } else {
508 err = filemap_check_errors(mapping);
1da177e4 509 }
28fd1298 510 return err;
1da177e4 511}
28fd1298 512EXPORT_SYMBOL(filemap_write_and_wait);
1da177e4 513
485bb99b
RD
514/**
515 * filemap_write_and_wait_range - write out & wait on a file range
516 * @mapping: the address_space for the pages
517 * @lstart: offset in bytes where the range starts
518 * @lend: offset in bytes where the range ends (inclusive)
519 *
469eb4d0
AM
520 * Write out and wait upon file offsets lstart->lend, inclusive.
521 *
522 * Note that `lend' is inclusive (describes the last byte to be written) so
523 * that this function can be used to write to the very end-of-file (end = -1).
524 */
1da177e4
LT
525int filemap_write_and_wait_range(struct address_space *mapping,
526 loff_t lstart, loff_t lend)
527{
28fd1298 528 int err = 0;
1da177e4 529
7f6d5b52
RZ
530 if ((!dax_mapping(mapping) && mapping->nrpages) ||
531 (dax_mapping(mapping) && mapping->nrexceptional)) {
28fd1298
OH
532 err = __filemap_fdatawrite_range(mapping, lstart, lend,
533 WB_SYNC_ALL);
534 /* See comment of filemap_write_and_wait() */
535 if (err != -EIO) {
94004ed7
CH
536 int err2 = filemap_fdatawait_range(mapping,
537 lstart, lend);
28fd1298
OH
538 if (!err)
539 err = err2;
540 }
865ffef3
DM
541 } else {
542 err = filemap_check_errors(mapping);
1da177e4 543 }
28fd1298 544 return err;
1da177e4 545}
f6995585 546EXPORT_SYMBOL(filemap_write_and_wait_range);
1da177e4 547
ef6a3c63
MS
548/**
549 * replace_page_cache_page - replace a pagecache page with a new one
550 * @old: page to be replaced
551 * @new: page to replace with
552 * @gfp_mask: allocation mode
553 *
554 * This function replaces a page in the pagecache with a new one. On
555 * success it acquires the pagecache reference for the new page and
556 * drops it for the old page. Both the old and new pages must be
557 * locked. This function does not add the new page to the LRU, the
558 * caller must do that.
559 *
560 * The remove + add is atomic. The only way this function can fail is
561 * memory allocation failure.
562 */
563int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask)
564{
565 int error;
ef6a3c63 566
309381fe
SL
567 VM_BUG_ON_PAGE(!PageLocked(old), old);
568 VM_BUG_ON_PAGE(!PageLocked(new), new);
569 VM_BUG_ON_PAGE(new->mapping, new);
ef6a3c63 570
ef6a3c63
MS
571 error = radix_tree_preload(gfp_mask & ~__GFP_HIGHMEM);
572 if (!error) {
573 struct address_space *mapping = old->mapping;
574 void (*freepage)(struct page *);
c4843a75 575 unsigned long flags;
ef6a3c63
MS
576
577 pgoff_t offset = old->index;
578 freepage = mapping->a_ops->freepage;
579
09cbfeaf 580 get_page(new);
ef6a3c63
MS
581 new->mapping = mapping;
582 new->index = offset;
583
c4843a75 584 spin_lock_irqsave(&mapping->tree_lock, flags);
62cccb8c 585 __delete_from_page_cache(old, NULL);
22f2ac51 586 error = page_cache_tree_insert(mapping, new, NULL);
ef6a3c63 587 BUG_ON(error);
4165b9b4
MH
588
589 /*
590 * hugetlb pages do not participate in page cache accounting.
591 */
592 if (!PageHuge(new))
11fb9989 593 __inc_node_page_state(new, NR_FILE_PAGES);
ef6a3c63 594 if (PageSwapBacked(new))
11fb9989 595 __inc_node_page_state(new, NR_SHMEM);
c4843a75 596 spin_unlock_irqrestore(&mapping->tree_lock, flags);
6a93ca8f 597 mem_cgroup_migrate(old, new);
ef6a3c63
MS
598 radix_tree_preload_end();
599 if (freepage)
600 freepage(old);
09cbfeaf 601 put_page(old);
ef6a3c63
MS
602 }
603
604 return error;
605}
606EXPORT_SYMBOL_GPL(replace_page_cache_page);
607
a528910e
JW
608static int __add_to_page_cache_locked(struct page *page,
609 struct address_space *mapping,
610 pgoff_t offset, gfp_t gfp_mask,
611 void **shadowp)
1da177e4 612{
00501b53
JW
613 int huge = PageHuge(page);
614 struct mem_cgroup *memcg;
e286781d
NP
615 int error;
616
309381fe
SL
617 VM_BUG_ON_PAGE(!PageLocked(page), page);
618 VM_BUG_ON_PAGE(PageSwapBacked(page), page);
e286781d 619
00501b53
JW
620 if (!huge) {
621 error = mem_cgroup_try_charge(page, current->mm,
f627c2f5 622 gfp_mask, &memcg, false);
00501b53
JW
623 if (error)
624 return error;
625 }
1da177e4 626
5e4c0d97 627 error = radix_tree_maybe_preload(gfp_mask & ~__GFP_HIGHMEM);
66a0c8ee 628 if (error) {
00501b53 629 if (!huge)
f627c2f5 630 mem_cgroup_cancel_charge(page, memcg, false);
66a0c8ee
KS
631 return error;
632 }
633
09cbfeaf 634 get_page(page);
66a0c8ee
KS
635 page->mapping = mapping;
636 page->index = offset;
637
638 spin_lock_irq(&mapping->tree_lock);
a528910e 639 error = page_cache_tree_insert(mapping, page, shadowp);
66a0c8ee
KS
640 radix_tree_preload_end();
641 if (unlikely(error))
642 goto err_insert;
4165b9b4
MH
643
644 /* hugetlb pages do not participate in page cache accounting. */
645 if (!huge)
11fb9989 646 __inc_node_page_state(page, NR_FILE_PAGES);
66a0c8ee 647 spin_unlock_irq(&mapping->tree_lock);
00501b53 648 if (!huge)
f627c2f5 649 mem_cgroup_commit_charge(page, memcg, false, false);
66a0c8ee
KS
650 trace_mm_filemap_add_to_page_cache(page);
651 return 0;
652err_insert:
653 page->mapping = NULL;
654 /* Leave page->index set: truncation relies upon it */
655 spin_unlock_irq(&mapping->tree_lock);
00501b53 656 if (!huge)
f627c2f5 657 mem_cgroup_cancel_charge(page, memcg, false);
09cbfeaf 658 put_page(page);
1da177e4
LT
659 return error;
660}
a528910e
JW
661
662/**
663 * add_to_page_cache_locked - add a locked page to the pagecache
664 * @page: page to add
665 * @mapping: the page's address_space
666 * @offset: page index
667 * @gfp_mask: page allocation mode
668 *
669 * This function is used to add a page to the pagecache. It must be locked.
670 * This function does not add the page to the LRU. The caller must do that.
671 */
672int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
673 pgoff_t offset, gfp_t gfp_mask)
674{
675 return __add_to_page_cache_locked(page, mapping, offset,
676 gfp_mask, NULL);
677}
e286781d 678EXPORT_SYMBOL(add_to_page_cache_locked);
1da177e4
LT
679
680int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
6daa0e28 681 pgoff_t offset, gfp_t gfp_mask)
1da177e4 682{
a528910e 683 void *shadow = NULL;
4f98a2fe
RR
684 int ret;
685
48c935ad 686 __SetPageLocked(page);
a528910e
JW
687 ret = __add_to_page_cache_locked(page, mapping, offset,
688 gfp_mask, &shadow);
689 if (unlikely(ret))
48c935ad 690 __ClearPageLocked(page);
a528910e
JW
691 else {
692 /*
693 * The page might have been evicted from cache only
694 * recently, in which case it should be activated like
695 * any other repeatedly accessed page.
f0281a00
RR
696 * The exception is pages getting rewritten; evicting other
697 * data from the working set, only to cache data that will
698 * get overwritten with something else, is a waste of memory.
a528910e 699 */
f0281a00
RR
700 if (!(gfp_mask & __GFP_WRITE) &&
701 shadow && workingset_refault(shadow)) {
a528910e
JW
702 SetPageActive(page);
703 workingset_activation(page);
704 } else
705 ClearPageActive(page);
706 lru_cache_add(page);
707 }
1da177e4
LT
708 return ret;
709}
18bc0bbd 710EXPORT_SYMBOL_GPL(add_to_page_cache_lru);
1da177e4 711
44110fe3 712#ifdef CONFIG_NUMA
2ae88149 713struct page *__page_cache_alloc(gfp_t gfp)
44110fe3 714{
c0ff7453
MX
715 int n;
716 struct page *page;
717
44110fe3 718 if (cpuset_do_page_mem_spread()) {
cc9a6c87
MG
719 unsigned int cpuset_mems_cookie;
720 do {
d26914d1 721 cpuset_mems_cookie = read_mems_allowed_begin();
cc9a6c87 722 n = cpuset_mem_spread_node();
96db800f 723 page = __alloc_pages_node(n, gfp, 0);
d26914d1 724 } while (!page && read_mems_allowed_retry(cpuset_mems_cookie));
cc9a6c87 725
c0ff7453 726 return page;
44110fe3 727 }
2ae88149 728 return alloc_pages(gfp, 0);
44110fe3 729}
2ae88149 730EXPORT_SYMBOL(__page_cache_alloc);
44110fe3
PJ
731#endif
732
1da177e4
LT
733/*
734 * In order to wait for pages to become available there must be
735 * waitqueues associated with pages. By using a hash table of
736 * waitqueues where the bucket discipline is to maintain all
737 * waiters on the same queue and wake all when any of the pages
738 * become available, and for the woken contexts to check to be
739 * sure the appropriate page became available, this saves space
740 * at a cost of "thundering herd" phenomena during rare hash
741 * collisions.
742 */
62906027
NP
743#define PAGE_WAIT_TABLE_BITS 8
744#define PAGE_WAIT_TABLE_SIZE (1 << PAGE_WAIT_TABLE_BITS)
745static wait_queue_head_t page_wait_table[PAGE_WAIT_TABLE_SIZE] __cacheline_aligned;
746
747static wait_queue_head_t *page_waitqueue(struct page *page)
1da177e4 748{
62906027 749 return &page_wait_table[hash_ptr(page, PAGE_WAIT_TABLE_BITS)];
1da177e4 750}
1da177e4 751
62906027 752void __init pagecache_init(void)
1da177e4 753{
62906027 754 int i;
1da177e4 755
62906027
NP
756 for (i = 0; i < PAGE_WAIT_TABLE_SIZE; i++)
757 init_waitqueue_head(&page_wait_table[i]);
758
759 page_writeback_init();
1da177e4 760}
1da177e4 761
62906027
NP
762struct wait_page_key {
763 struct page *page;
764 int bit_nr;
765 int page_match;
766};
767
768struct wait_page_queue {
769 struct page *page;
770 int bit_nr;
771 wait_queue_t wait;
772};
773
774static int wake_page_function(wait_queue_t *wait, unsigned mode, int sync, void *arg)
f62e00cc 775{
62906027
NP
776 struct wait_page_key *key = arg;
777 struct wait_page_queue *wait_page
778 = container_of(wait, struct wait_page_queue, wait);
779
780 if (wait_page->page != key->page)
781 return 0;
782 key->page_match = 1;
f62e00cc 783
62906027
NP
784 if (wait_page->bit_nr != key->bit_nr)
785 return 0;
786 if (test_bit(key->bit_nr, &key->page->flags))
f62e00cc
KM
787 return 0;
788
62906027 789 return autoremove_wake_function(wait, mode, sync, key);
f62e00cc
KM
790}
791
74d81bfa 792static void wake_up_page_bit(struct page *page, int bit_nr)
cbbce822 793{
62906027
NP
794 wait_queue_head_t *q = page_waitqueue(page);
795 struct wait_page_key key;
796 unsigned long flags;
cbbce822 797
62906027
NP
798 key.page = page;
799 key.bit_nr = bit_nr;
800 key.page_match = 0;
801
802 spin_lock_irqsave(&q->lock, flags);
803 __wake_up_locked_key(q, TASK_NORMAL, &key);
804 /*
805 * It is possible for other pages to have collided on the waitqueue
806 * hash, so in that case check for a page match. That prevents a long-
807 * term waiter
808 *
809 * It is still possible to miss a case here, when we woke page waiters
810 * and removed them from the waitqueue, but there are still other
811 * page waiters.
812 */
813 if (!waitqueue_active(q) || !key.page_match) {
814 ClearPageWaiters(page);
815 /*
816 * It's possible to miss clearing Waiters here, when we woke
817 * our page waiters, but the hashed waitqueue has waiters for
818 * other pages on it.
819 *
820 * That's okay, it's a rare case. The next waker will clear it.
821 */
822 }
823 spin_unlock_irqrestore(&q->lock, flags);
824}
74d81bfa
NP
825
826static void wake_up_page(struct page *page, int bit)
827{
828 if (!PageWaiters(page))
829 return;
830 wake_up_page_bit(page, bit);
831}
62906027
NP
832
833static inline int wait_on_page_bit_common(wait_queue_head_t *q,
834 struct page *page, int bit_nr, int state, bool lock)
835{
836 struct wait_page_queue wait_page;
837 wait_queue_t *wait = &wait_page.wait;
838 int ret = 0;
839
840 init_wait(wait);
841 wait->func = wake_page_function;
842 wait_page.page = page;
843 wait_page.bit_nr = bit_nr;
844
845 for (;;) {
846 spin_lock_irq(&q->lock);
847
848 if (likely(list_empty(&wait->task_list))) {
849 if (lock)
850 __add_wait_queue_tail_exclusive(q, wait);
851 else
852 __add_wait_queue(q, wait);
853 SetPageWaiters(page);
854 }
855
856 set_current_state(state);
857
858 spin_unlock_irq(&q->lock);
859
860 if (likely(test_bit(bit_nr, &page->flags))) {
861 io_schedule();
862 if (unlikely(signal_pending_state(state, current))) {
863 ret = -EINTR;
864 break;
865 }
866 }
867
868 if (lock) {
869 if (!test_and_set_bit_lock(bit_nr, &page->flags))
870 break;
871 } else {
872 if (!test_bit(bit_nr, &page->flags))
873 break;
874 }
875 }
876
877 finish_wait(q, wait);
878
879 /*
880 * A signal could leave PageWaiters set. Clearing it here if
881 * !waitqueue_active would be possible (by open-coding finish_wait),
882 * but still fail to catch it in the case of wait hash collision. We
883 * already can fail to clear wait hash collision cases, so don't
884 * bother with signals either.
885 */
886
887 return ret;
888}
889
890void wait_on_page_bit(struct page *page, int bit_nr)
891{
892 wait_queue_head_t *q = page_waitqueue(page);
893 wait_on_page_bit_common(q, page, bit_nr, TASK_UNINTERRUPTIBLE, false);
894}
895EXPORT_SYMBOL(wait_on_page_bit);
896
897int wait_on_page_bit_killable(struct page *page, int bit_nr)
898{
899 wait_queue_head_t *q = page_waitqueue(page);
900 return wait_on_page_bit_common(q, page, bit_nr, TASK_KILLABLE, false);
cbbce822 901}
cbbce822 902
385e1ca5
DH
903/**
904 * add_page_wait_queue - Add an arbitrary waiter to a page's wait queue
697f619f
RD
905 * @page: Page defining the wait queue of interest
906 * @waiter: Waiter to add to the queue
385e1ca5
DH
907 *
908 * Add an arbitrary @waiter to the wait queue for the nominated @page.
909 */
910void add_page_wait_queue(struct page *page, wait_queue_t *waiter)
911{
912 wait_queue_head_t *q = page_waitqueue(page);
913 unsigned long flags;
914
915 spin_lock_irqsave(&q->lock, flags);
916 __add_wait_queue(q, waiter);
62906027 917 SetPageWaiters(page);
385e1ca5
DH
918 spin_unlock_irqrestore(&q->lock, flags);
919}
920EXPORT_SYMBOL_GPL(add_page_wait_queue);
921
b91e1302
LT
922#ifndef clear_bit_unlock_is_negative_byte
923
924/*
925 * PG_waiters is the high bit in the same byte as PG_lock.
926 *
927 * On x86 (and on many other architectures), we can clear PG_lock and
928 * test the sign bit at the same time. But if the architecture does
929 * not support that special operation, we just do this all by hand
930 * instead.
931 *
932 * The read of PG_waiters has to be after (or concurrently with) PG_locked
933 * being cleared, but a memory barrier should be unneccssary since it is
934 * in the same byte as PG_locked.
935 */
936static inline bool clear_bit_unlock_is_negative_byte(long nr, volatile void *mem)
937{
938 clear_bit_unlock(nr, mem);
939 /* smp_mb__after_atomic(); */
98473f9f 940 return test_bit(PG_waiters, mem);
b91e1302
LT
941}
942
943#endif
944
1da177e4 945/**
485bb99b 946 * unlock_page - unlock a locked page
1da177e4
LT
947 * @page: the page
948 *
949 * Unlocks the page and wakes up sleepers in ___wait_on_page_locked().
950 * Also wakes sleepers in wait_on_page_writeback() because the wakeup
da3dae54 951 * mechanism between PageLocked pages and PageWriteback pages is shared.
1da177e4
LT
952 * But that's OK - sleepers in wait_on_page_writeback() just go back to sleep.
953 *
b91e1302
LT
954 * Note that this depends on PG_waiters being the sign bit in the byte
955 * that contains PG_locked - thus the BUILD_BUG_ON(). That allows us to
956 * clear the PG_locked bit and test PG_waiters at the same time fairly
957 * portably (architectures that do LL/SC can test any bit, while x86 can
958 * test the sign bit).
1da177e4 959 */
920c7a5d 960void unlock_page(struct page *page)
1da177e4 961{
b91e1302 962 BUILD_BUG_ON(PG_waiters != 7);
48c935ad 963 page = compound_head(page);
309381fe 964 VM_BUG_ON_PAGE(!PageLocked(page), page);
b91e1302
LT
965 if (clear_bit_unlock_is_negative_byte(PG_locked, &page->flags))
966 wake_up_page_bit(page, PG_locked);
1da177e4
LT
967}
968EXPORT_SYMBOL(unlock_page);
969
485bb99b
RD
970/**
971 * end_page_writeback - end writeback against a page
972 * @page: the page
1da177e4
LT
973 */
974void end_page_writeback(struct page *page)
975{
888cf2db
MG
976 /*
977 * TestClearPageReclaim could be used here but it is an atomic
978 * operation and overkill in this particular case. Failing to
979 * shuffle a page marked for immediate reclaim is too mild to
980 * justify taking an atomic operation penalty at the end of
981 * ever page writeback.
982 */
983 if (PageReclaim(page)) {
984 ClearPageReclaim(page);
ac6aadb2 985 rotate_reclaimable_page(page);
888cf2db 986 }
ac6aadb2
MS
987
988 if (!test_clear_page_writeback(page))
989 BUG();
990
4e857c58 991 smp_mb__after_atomic();
1da177e4
LT
992 wake_up_page(page, PG_writeback);
993}
994EXPORT_SYMBOL(end_page_writeback);
995
57d99845
MW
996/*
997 * After completing I/O on a page, call this routine to update the page
998 * flags appropriately
999 */
c11f0c0b 1000void page_endio(struct page *page, bool is_write, int err)
57d99845 1001{
c11f0c0b 1002 if (!is_write) {
57d99845
MW
1003 if (!err) {
1004 SetPageUptodate(page);
1005 } else {
1006 ClearPageUptodate(page);
1007 SetPageError(page);
1008 }
1009 unlock_page(page);
abf54548 1010 } else {
57d99845 1011 if (err) {
dd8416c4
MK
1012 struct address_space *mapping;
1013
57d99845 1014 SetPageError(page);
dd8416c4
MK
1015 mapping = page_mapping(page);
1016 if (mapping)
1017 mapping_set_error(mapping, err);
57d99845
MW
1018 }
1019 end_page_writeback(page);
1020 }
1021}
1022EXPORT_SYMBOL_GPL(page_endio);
1023
485bb99b
RD
1024/**
1025 * __lock_page - get a lock on the page, assuming we need to sleep to get it
87066755 1026 * @__page: the page to lock
1da177e4 1027 */
62906027 1028void __lock_page(struct page *__page)
1da177e4 1029{
62906027
NP
1030 struct page *page = compound_head(__page);
1031 wait_queue_head_t *q = page_waitqueue(page);
1032 wait_on_page_bit_common(q, page, PG_locked, TASK_UNINTERRUPTIBLE, true);
1da177e4
LT
1033}
1034EXPORT_SYMBOL(__lock_page);
1035
62906027 1036int __lock_page_killable(struct page *__page)
2687a356 1037{
62906027
NP
1038 struct page *page = compound_head(__page);
1039 wait_queue_head_t *q = page_waitqueue(page);
1040 return wait_on_page_bit_common(q, page, PG_locked, TASK_KILLABLE, true);
2687a356 1041}
18bc0bbd 1042EXPORT_SYMBOL_GPL(__lock_page_killable);
2687a356 1043
9a95f3cf
PC
1044/*
1045 * Return values:
1046 * 1 - page is locked; mmap_sem is still held.
1047 * 0 - page is not locked.
1048 * mmap_sem has been released (up_read()), unless flags had both
1049 * FAULT_FLAG_ALLOW_RETRY and FAULT_FLAG_RETRY_NOWAIT set, in
1050 * which case mmap_sem is still held.
1051 *
1052 * If neither ALLOW_RETRY nor KILLABLE are set, will always return 1
1053 * with the page locked and the mmap_sem unperturbed.
1054 */
d065bd81
ML
1055int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
1056 unsigned int flags)
1057{
37b23e05
KM
1058 if (flags & FAULT_FLAG_ALLOW_RETRY) {
1059 /*
1060 * CAUTION! In this case, mmap_sem is not released
1061 * even though return 0.
1062 */
1063 if (flags & FAULT_FLAG_RETRY_NOWAIT)
1064 return 0;
1065
1066 up_read(&mm->mmap_sem);
1067 if (flags & FAULT_FLAG_KILLABLE)
1068 wait_on_page_locked_killable(page);
1069 else
318b275f 1070 wait_on_page_locked(page);
d065bd81 1071 return 0;
37b23e05
KM
1072 } else {
1073 if (flags & FAULT_FLAG_KILLABLE) {
1074 int ret;
1075
1076 ret = __lock_page_killable(page);
1077 if (ret) {
1078 up_read(&mm->mmap_sem);
1079 return 0;
1080 }
1081 } else
1082 __lock_page(page);
1083 return 1;
d065bd81
ML
1084 }
1085}
1086
e7b563bb
JW
1087/**
1088 * page_cache_next_hole - find the next hole (not-present entry)
1089 * @mapping: mapping
1090 * @index: index
1091 * @max_scan: maximum range to search
1092 *
1093 * Search the set [index, min(index+max_scan-1, MAX_INDEX)] for the
1094 * lowest indexed hole.
1095 *
1096 * Returns: the index of the hole if found, otherwise returns an index
1097 * outside of the set specified (in which case 'return - index >=
1098 * max_scan' will be true). In rare cases of index wrap-around, 0 will
1099 * be returned.
1100 *
1101 * page_cache_next_hole may be called under rcu_read_lock. However,
1102 * like radix_tree_gang_lookup, this will not atomically search a
1103 * snapshot of the tree at a single point in time. For example, if a
1104 * hole is created at index 5, then subsequently a hole is created at
1105 * index 10, page_cache_next_hole covering both indexes may return 10
1106 * if called under rcu_read_lock.
1107 */
1108pgoff_t page_cache_next_hole(struct address_space *mapping,
1109 pgoff_t index, unsigned long max_scan)
1110{
1111 unsigned long i;
1112
1113 for (i = 0; i < max_scan; i++) {
0cd6144a
JW
1114 struct page *page;
1115
1116 page = radix_tree_lookup(&mapping->page_tree, index);
1117 if (!page || radix_tree_exceptional_entry(page))
e7b563bb
JW
1118 break;
1119 index++;
1120 if (index == 0)
1121 break;
1122 }
1123
1124 return index;
1125}
1126EXPORT_SYMBOL(page_cache_next_hole);
1127
1128/**
1129 * page_cache_prev_hole - find the prev hole (not-present entry)
1130 * @mapping: mapping
1131 * @index: index
1132 * @max_scan: maximum range to search
1133 *
1134 * Search backwards in the range [max(index-max_scan+1, 0), index] for
1135 * the first hole.
1136 *
1137 * Returns: the index of the hole if found, otherwise returns an index
1138 * outside of the set specified (in which case 'index - return >=
1139 * max_scan' will be true). In rare cases of wrap-around, ULONG_MAX
1140 * will be returned.
1141 *
1142 * page_cache_prev_hole may be called under rcu_read_lock. However,
1143 * like radix_tree_gang_lookup, this will not atomically search a
1144 * snapshot of the tree at a single point in time. For example, if a
1145 * hole is created at index 10, then subsequently a hole is created at
1146 * index 5, page_cache_prev_hole covering both indexes may return 5 if
1147 * called under rcu_read_lock.
1148 */
1149pgoff_t page_cache_prev_hole(struct address_space *mapping,
1150 pgoff_t index, unsigned long max_scan)
1151{
1152 unsigned long i;
1153
1154 for (i = 0; i < max_scan; i++) {
0cd6144a
JW
1155 struct page *page;
1156
1157 page = radix_tree_lookup(&mapping->page_tree, index);
1158 if (!page || radix_tree_exceptional_entry(page))
e7b563bb
JW
1159 break;
1160 index--;
1161 if (index == ULONG_MAX)
1162 break;
1163 }
1164
1165 return index;
1166}
1167EXPORT_SYMBOL(page_cache_prev_hole);
1168
485bb99b 1169/**
0cd6144a 1170 * find_get_entry - find and get a page cache entry
485bb99b 1171 * @mapping: the address_space to search
0cd6144a
JW
1172 * @offset: the page cache index
1173 *
1174 * Looks up the page cache slot at @mapping & @offset. If there is a
1175 * page cache page, it is returned with an increased refcount.
485bb99b 1176 *
139b6a6f
JW
1177 * If the slot holds a shadow entry of a previously evicted page, or a
1178 * swap entry from shmem/tmpfs, it is returned.
0cd6144a
JW
1179 *
1180 * Otherwise, %NULL is returned.
1da177e4 1181 */
0cd6144a 1182struct page *find_get_entry(struct address_space *mapping, pgoff_t offset)
1da177e4 1183{
a60637c8 1184 void **pagep;
83929372 1185 struct page *head, *page;
1da177e4 1186
a60637c8
NP
1187 rcu_read_lock();
1188repeat:
1189 page = NULL;
1190 pagep = radix_tree_lookup_slot(&mapping->page_tree, offset);
1191 if (pagep) {
1192 page = radix_tree_deref_slot(pagep);
27d20fdd
NP
1193 if (unlikely(!page))
1194 goto out;
a2c16d6c 1195 if (radix_tree_exception(page)) {
8079b1c8
HD
1196 if (radix_tree_deref_retry(page))
1197 goto repeat;
1198 /*
139b6a6f
JW
1199 * A shadow entry of a recently evicted page,
1200 * or a swap entry from shmem/tmpfs. Return
1201 * it without attempting to raise page count.
8079b1c8
HD
1202 */
1203 goto out;
a2c16d6c 1204 }
83929372
KS
1205
1206 head = compound_head(page);
1207 if (!page_cache_get_speculative(head))
1208 goto repeat;
1209
1210 /* The page was split under us? */
1211 if (compound_head(page) != head) {
1212 put_page(head);
a60637c8 1213 goto repeat;
83929372 1214 }
a60637c8
NP
1215
1216 /*
1217 * Has the page moved?
1218 * This is part of the lockless pagecache protocol. See
1219 * include/linux/pagemap.h for details.
1220 */
1221 if (unlikely(page != *pagep)) {
83929372 1222 put_page(head);
a60637c8
NP
1223 goto repeat;
1224 }
1225 }
27d20fdd 1226out:
a60637c8
NP
1227 rcu_read_unlock();
1228
1da177e4
LT
1229 return page;
1230}
0cd6144a 1231EXPORT_SYMBOL(find_get_entry);
1da177e4 1232
0cd6144a
JW
1233/**
1234 * find_lock_entry - locate, pin and lock a page cache entry
1235 * @mapping: the address_space to search
1236 * @offset: the page cache index
1237 *
1238 * Looks up the page cache slot at @mapping & @offset. If there is a
1239 * page cache page, it is returned locked and with an increased
1240 * refcount.
1241 *
139b6a6f
JW
1242 * If the slot holds a shadow entry of a previously evicted page, or a
1243 * swap entry from shmem/tmpfs, it is returned.
0cd6144a
JW
1244 *
1245 * Otherwise, %NULL is returned.
1246 *
1247 * find_lock_entry() may sleep.
1248 */
1249struct page *find_lock_entry(struct address_space *mapping, pgoff_t offset)
1da177e4
LT
1250{
1251 struct page *page;
1252
1da177e4 1253repeat:
0cd6144a 1254 page = find_get_entry(mapping, offset);
a2c16d6c 1255 if (page && !radix_tree_exception(page)) {
a60637c8
NP
1256 lock_page(page);
1257 /* Has the page been truncated? */
83929372 1258 if (unlikely(page_mapping(page) != mapping)) {
a60637c8 1259 unlock_page(page);
09cbfeaf 1260 put_page(page);
a60637c8 1261 goto repeat;
1da177e4 1262 }
83929372 1263 VM_BUG_ON_PAGE(page_to_pgoff(page) != offset, page);
1da177e4 1264 }
1da177e4
LT
1265 return page;
1266}
0cd6144a
JW
1267EXPORT_SYMBOL(find_lock_entry);
1268
1269/**
2457aec6 1270 * pagecache_get_page - find and get a page reference
0cd6144a
JW
1271 * @mapping: the address_space to search
1272 * @offset: the page index
2457aec6 1273 * @fgp_flags: PCG flags
45f87de5 1274 * @gfp_mask: gfp mask to use for the page cache data page allocation
0cd6144a 1275 *
2457aec6 1276 * Looks up the page cache slot at @mapping & @offset.
1da177e4 1277 *
75325189 1278 * PCG flags modify how the page is returned.
0cd6144a 1279 *
2457aec6
MG
1280 * FGP_ACCESSED: the page will be marked accessed
1281 * FGP_LOCK: Page is return locked
1282 * FGP_CREAT: If page is not present then a new page is allocated using
45f87de5
MH
1283 * @gfp_mask and added to the page cache and the VM's LRU
1284 * list. The page is returned locked and with an increased
1285 * refcount. Otherwise, %NULL is returned.
1da177e4 1286 *
2457aec6
MG
1287 * If FGP_LOCK or FGP_CREAT are specified then the function may sleep even
1288 * if the GFP flags specified for FGP_CREAT are atomic.
1da177e4 1289 *
2457aec6 1290 * If there is a page cache page, it is returned with an increased refcount.
1da177e4 1291 */
2457aec6 1292struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset,
45f87de5 1293 int fgp_flags, gfp_t gfp_mask)
1da177e4 1294{
eb2be189 1295 struct page *page;
2457aec6 1296
1da177e4 1297repeat:
2457aec6
MG
1298 page = find_get_entry(mapping, offset);
1299 if (radix_tree_exceptional_entry(page))
1300 page = NULL;
1301 if (!page)
1302 goto no_page;
1303
1304 if (fgp_flags & FGP_LOCK) {
1305 if (fgp_flags & FGP_NOWAIT) {
1306 if (!trylock_page(page)) {
09cbfeaf 1307 put_page(page);
2457aec6
MG
1308 return NULL;
1309 }
1310 } else {
1311 lock_page(page);
1312 }
1313
1314 /* Has the page been truncated? */
1315 if (unlikely(page->mapping != mapping)) {
1316 unlock_page(page);
09cbfeaf 1317 put_page(page);
2457aec6
MG
1318 goto repeat;
1319 }
1320 VM_BUG_ON_PAGE(page->index != offset, page);
1321 }
1322
1323 if (page && (fgp_flags & FGP_ACCESSED))
1324 mark_page_accessed(page);
1325
1326no_page:
1327 if (!page && (fgp_flags & FGP_CREAT)) {
1328 int err;
1329 if ((fgp_flags & FGP_WRITE) && mapping_cap_account_dirty(mapping))
45f87de5
MH
1330 gfp_mask |= __GFP_WRITE;
1331 if (fgp_flags & FGP_NOFS)
1332 gfp_mask &= ~__GFP_FS;
2457aec6 1333
45f87de5 1334 page = __page_cache_alloc(gfp_mask);
eb2be189
NP
1335 if (!page)
1336 return NULL;
2457aec6
MG
1337
1338 if (WARN_ON_ONCE(!(fgp_flags & FGP_LOCK)))
1339 fgp_flags |= FGP_LOCK;
1340
eb39d618 1341 /* Init accessed so avoid atomic mark_page_accessed later */
2457aec6 1342 if (fgp_flags & FGP_ACCESSED)
eb39d618 1343 __SetPageReferenced(page);
2457aec6 1344
45f87de5
MH
1345 err = add_to_page_cache_lru(page, mapping, offset,
1346 gfp_mask & GFP_RECLAIM_MASK);
eb2be189 1347 if (unlikely(err)) {
09cbfeaf 1348 put_page(page);
eb2be189
NP
1349 page = NULL;
1350 if (err == -EEXIST)
1351 goto repeat;
1da177e4 1352 }
1da177e4 1353 }
2457aec6 1354
1da177e4
LT
1355 return page;
1356}
2457aec6 1357EXPORT_SYMBOL(pagecache_get_page);
1da177e4 1358
0cd6144a
JW
1359/**
1360 * find_get_entries - gang pagecache lookup
1361 * @mapping: The address_space to search
1362 * @start: The starting page cache index
1363 * @nr_entries: The maximum number of entries
1364 * @entries: Where the resulting entries are placed
1365 * @indices: The cache indices corresponding to the entries in @entries
1366 *
1367 * find_get_entries() will search for and return a group of up to
1368 * @nr_entries entries in the mapping. The entries are placed at
1369 * @entries. find_get_entries() takes a reference against any actual
1370 * pages it returns.
1371 *
1372 * The search returns a group of mapping-contiguous page cache entries
1373 * with ascending indexes. There may be holes in the indices due to
1374 * not-present pages.
1375 *
139b6a6f
JW
1376 * Any shadow entries of evicted pages, or swap entries from
1377 * shmem/tmpfs, are included in the returned array.
0cd6144a
JW
1378 *
1379 * find_get_entries() returns the number of pages and shadow entries
1380 * which were found.
1381 */
1382unsigned find_get_entries(struct address_space *mapping,
1383 pgoff_t start, unsigned int nr_entries,
1384 struct page **entries, pgoff_t *indices)
1385{
1386 void **slot;
1387 unsigned int ret = 0;
1388 struct radix_tree_iter iter;
1389
1390 if (!nr_entries)
1391 return 0;
1392
1393 rcu_read_lock();
0cd6144a 1394 radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, start) {
83929372 1395 struct page *head, *page;
0cd6144a
JW
1396repeat:
1397 page = radix_tree_deref_slot(slot);
1398 if (unlikely(!page))
1399 continue;
1400 if (radix_tree_exception(page)) {
2cf938aa
MW
1401 if (radix_tree_deref_retry(page)) {
1402 slot = radix_tree_iter_retry(&iter);
1403 continue;
1404 }
0cd6144a 1405 /*
f9fe48be
RZ
1406 * A shadow entry of a recently evicted page, a swap
1407 * entry from shmem/tmpfs or a DAX entry. Return it
1408 * without attempting to raise page count.
0cd6144a
JW
1409 */
1410 goto export;
1411 }
83929372
KS
1412
1413 head = compound_head(page);
1414 if (!page_cache_get_speculative(head))
1415 goto repeat;
1416
1417 /* The page was split under us? */
1418 if (compound_head(page) != head) {
1419 put_page(head);
0cd6144a 1420 goto repeat;
83929372 1421 }
0cd6144a
JW
1422
1423 /* Has the page moved? */
1424 if (unlikely(page != *slot)) {
83929372 1425 put_page(head);
0cd6144a
JW
1426 goto repeat;
1427 }
1428export:
1429 indices[ret] = iter.index;
1430 entries[ret] = page;
1431 if (++ret == nr_entries)
1432 break;
1433 }
1434 rcu_read_unlock();
1435 return ret;
1436}
1437
1da177e4
LT
1438/**
1439 * find_get_pages - gang pagecache lookup
1440 * @mapping: The address_space to search
1441 * @start: The starting page index
1442 * @nr_pages: The maximum number of pages
1443 * @pages: Where the resulting pages are placed
1444 *
1445 * find_get_pages() will search for and return a group of up to
1446 * @nr_pages pages in the mapping. The pages are placed at @pages.
1447 * find_get_pages() takes a reference against the returned pages.
1448 *
1449 * The search returns a group of mapping-contiguous pages with ascending
1450 * indexes. There may be holes in the indices due to not-present pages.
1451 *
1452 * find_get_pages() returns the number of pages which were found.
1453 */
1454unsigned find_get_pages(struct address_space *mapping, pgoff_t start,
1455 unsigned int nr_pages, struct page **pages)
1456{
0fc9d104
KK
1457 struct radix_tree_iter iter;
1458 void **slot;
1459 unsigned ret = 0;
1460
1461 if (unlikely(!nr_pages))
1462 return 0;
a60637c8
NP
1463
1464 rcu_read_lock();
0fc9d104 1465 radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, start) {
83929372 1466 struct page *head, *page;
a60637c8 1467repeat:
0fc9d104 1468 page = radix_tree_deref_slot(slot);
a60637c8
NP
1469 if (unlikely(!page))
1470 continue;
9d8aa4ea 1471
a2c16d6c 1472 if (radix_tree_exception(page)) {
8079b1c8 1473 if (radix_tree_deref_retry(page)) {
2cf938aa
MW
1474 slot = radix_tree_iter_retry(&iter);
1475 continue;
8079b1c8 1476 }
a2c16d6c 1477 /*
139b6a6f
JW
1478 * A shadow entry of a recently evicted page,
1479 * or a swap entry from shmem/tmpfs. Skip
1480 * over it.
a2c16d6c 1481 */
8079b1c8 1482 continue;
27d20fdd 1483 }
a60637c8 1484
83929372
KS
1485 head = compound_head(page);
1486 if (!page_cache_get_speculative(head))
1487 goto repeat;
1488
1489 /* The page was split under us? */
1490 if (compound_head(page) != head) {
1491 put_page(head);
a60637c8 1492 goto repeat;
83929372 1493 }
a60637c8
NP
1494
1495 /* Has the page moved? */
0fc9d104 1496 if (unlikely(page != *slot)) {
83929372 1497 put_page(head);
a60637c8
NP
1498 goto repeat;
1499 }
1da177e4 1500
a60637c8 1501 pages[ret] = page;
0fc9d104
KK
1502 if (++ret == nr_pages)
1503 break;
a60637c8 1504 }
5b280c0c 1505
a60637c8 1506 rcu_read_unlock();
1da177e4
LT
1507 return ret;
1508}
1509
ebf43500
JA
1510/**
1511 * find_get_pages_contig - gang contiguous pagecache lookup
1512 * @mapping: The address_space to search
1513 * @index: The starting page index
1514 * @nr_pages: The maximum number of pages
1515 * @pages: Where the resulting pages are placed
1516 *
1517 * find_get_pages_contig() works exactly like find_get_pages(), except
1518 * that the returned number of pages are guaranteed to be contiguous.
1519 *
1520 * find_get_pages_contig() returns the number of pages which were found.
1521 */
1522unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t index,
1523 unsigned int nr_pages, struct page **pages)
1524{
0fc9d104
KK
1525 struct radix_tree_iter iter;
1526 void **slot;
1527 unsigned int ret = 0;
1528
1529 if (unlikely(!nr_pages))
1530 return 0;
a60637c8
NP
1531
1532 rcu_read_lock();
0fc9d104 1533 radix_tree_for_each_contig(slot, &mapping->page_tree, &iter, index) {
83929372 1534 struct page *head, *page;
a60637c8 1535repeat:
0fc9d104
KK
1536 page = radix_tree_deref_slot(slot);
1537 /* The hole, there no reason to continue */
a60637c8 1538 if (unlikely(!page))
0fc9d104 1539 break;
9d8aa4ea 1540
a2c16d6c 1541 if (radix_tree_exception(page)) {
8079b1c8 1542 if (radix_tree_deref_retry(page)) {
2cf938aa
MW
1543 slot = radix_tree_iter_retry(&iter);
1544 continue;
8079b1c8 1545 }
a2c16d6c 1546 /*
139b6a6f
JW
1547 * A shadow entry of a recently evicted page,
1548 * or a swap entry from shmem/tmpfs. Stop
1549 * looking for contiguous pages.
a2c16d6c 1550 */
8079b1c8 1551 break;
a2c16d6c 1552 }
ebf43500 1553
83929372
KS
1554 head = compound_head(page);
1555 if (!page_cache_get_speculative(head))
1556 goto repeat;
1557
1558 /* The page was split under us? */
1559 if (compound_head(page) != head) {
1560 put_page(head);
a60637c8 1561 goto repeat;
83929372 1562 }
a60637c8
NP
1563
1564 /* Has the page moved? */
0fc9d104 1565 if (unlikely(page != *slot)) {
83929372 1566 put_page(head);
a60637c8
NP
1567 goto repeat;
1568 }
1569
9cbb4cb2
NP
1570 /*
1571 * must check mapping and index after taking the ref.
1572 * otherwise we can get both false positives and false
1573 * negatives, which is just confusing to the caller.
1574 */
83929372 1575 if (page->mapping == NULL || page_to_pgoff(page) != iter.index) {
09cbfeaf 1576 put_page(page);
9cbb4cb2
NP
1577 break;
1578 }
1579
a60637c8 1580 pages[ret] = page;
0fc9d104
KK
1581 if (++ret == nr_pages)
1582 break;
ebf43500 1583 }
a60637c8
NP
1584 rcu_read_unlock();
1585 return ret;
ebf43500 1586}
ef71c15c 1587EXPORT_SYMBOL(find_get_pages_contig);
ebf43500 1588
485bb99b
RD
1589/**
1590 * find_get_pages_tag - find and return pages that match @tag
1591 * @mapping: the address_space to search
1592 * @index: the starting page index
1593 * @tag: the tag index
1594 * @nr_pages: the maximum number of pages
1595 * @pages: where the resulting pages are placed
1596 *
1da177e4 1597 * Like find_get_pages, except we only return pages which are tagged with
485bb99b 1598 * @tag. We update @index to index the next page for the traversal.
1da177e4
LT
1599 */
1600unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index,
1601 int tag, unsigned int nr_pages, struct page **pages)
1602{
0fc9d104
KK
1603 struct radix_tree_iter iter;
1604 void **slot;
1605 unsigned ret = 0;
1606
1607 if (unlikely(!nr_pages))
1608 return 0;
a60637c8
NP
1609
1610 rcu_read_lock();
0fc9d104
KK
1611 radix_tree_for_each_tagged(slot, &mapping->page_tree,
1612 &iter, *index, tag) {
83929372 1613 struct page *head, *page;
a60637c8 1614repeat:
0fc9d104 1615 page = radix_tree_deref_slot(slot);
a60637c8
NP
1616 if (unlikely(!page))
1617 continue;
9d8aa4ea 1618
a2c16d6c 1619 if (radix_tree_exception(page)) {
8079b1c8 1620 if (radix_tree_deref_retry(page)) {
2cf938aa
MW
1621 slot = radix_tree_iter_retry(&iter);
1622 continue;
8079b1c8 1623 }
a2c16d6c 1624 /*
139b6a6f
JW
1625 * A shadow entry of a recently evicted page.
1626 *
1627 * Those entries should never be tagged, but
1628 * this tree walk is lockless and the tags are
1629 * looked up in bulk, one radix tree node at a
1630 * time, so there is a sizable window for page
1631 * reclaim to evict a page we saw tagged.
1632 *
1633 * Skip over it.
a2c16d6c 1634 */
139b6a6f 1635 continue;
a2c16d6c 1636 }
a60637c8 1637
83929372
KS
1638 head = compound_head(page);
1639 if (!page_cache_get_speculative(head))
a60637c8
NP
1640 goto repeat;
1641
83929372
KS
1642 /* The page was split under us? */
1643 if (compound_head(page) != head) {
1644 put_page(head);
1645 goto repeat;
1646 }
1647
a60637c8 1648 /* Has the page moved? */
0fc9d104 1649 if (unlikely(page != *slot)) {
83929372 1650 put_page(head);
a60637c8
NP
1651 goto repeat;
1652 }
1653
1654 pages[ret] = page;
0fc9d104
KK
1655 if (++ret == nr_pages)
1656 break;
a60637c8 1657 }
5b280c0c 1658
a60637c8 1659 rcu_read_unlock();
1da177e4 1660
1da177e4
LT
1661 if (ret)
1662 *index = pages[ret - 1]->index + 1;
a60637c8 1663
1da177e4
LT
1664 return ret;
1665}
ef71c15c 1666EXPORT_SYMBOL(find_get_pages_tag);
1da177e4 1667
7e7f7749
RZ
1668/**
1669 * find_get_entries_tag - find and return entries that match @tag
1670 * @mapping: the address_space to search
1671 * @start: the starting page cache index
1672 * @tag: the tag index
1673 * @nr_entries: the maximum number of entries
1674 * @entries: where the resulting entries are placed
1675 * @indices: the cache indices corresponding to the entries in @entries
1676 *
1677 * Like find_get_entries, except we only return entries which are tagged with
1678 * @tag.
1679 */
1680unsigned find_get_entries_tag(struct address_space *mapping, pgoff_t start,
1681 int tag, unsigned int nr_entries,
1682 struct page **entries, pgoff_t *indices)
1683{
1684 void **slot;
1685 unsigned int ret = 0;
1686 struct radix_tree_iter iter;
1687
1688 if (!nr_entries)
1689 return 0;
1690
1691 rcu_read_lock();
7e7f7749
RZ
1692 radix_tree_for_each_tagged(slot, &mapping->page_tree,
1693 &iter, start, tag) {
83929372 1694 struct page *head, *page;
7e7f7749
RZ
1695repeat:
1696 page = radix_tree_deref_slot(slot);
1697 if (unlikely(!page))
1698 continue;
1699 if (radix_tree_exception(page)) {
1700 if (radix_tree_deref_retry(page)) {
2cf938aa
MW
1701 slot = radix_tree_iter_retry(&iter);
1702 continue;
7e7f7749
RZ
1703 }
1704
1705 /*
1706 * A shadow entry of a recently evicted page, a swap
1707 * entry from shmem/tmpfs or a DAX entry. Return it
1708 * without attempting to raise page count.
1709 */
1710 goto export;
1711 }
83929372
KS
1712
1713 head = compound_head(page);
1714 if (!page_cache_get_speculative(head))
7e7f7749
RZ
1715 goto repeat;
1716
83929372
KS
1717 /* The page was split under us? */
1718 if (compound_head(page) != head) {
1719 put_page(head);
1720 goto repeat;
1721 }
1722
7e7f7749
RZ
1723 /* Has the page moved? */
1724 if (unlikely(page != *slot)) {
83929372 1725 put_page(head);
7e7f7749
RZ
1726 goto repeat;
1727 }
1728export:
1729 indices[ret] = iter.index;
1730 entries[ret] = page;
1731 if (++ret == nr_entries)
1732 break;
1733 }
1734 rcu_read_unlock();
1735 return ret;
1736}
1737EXPORT_SYMBOL(find_get_entries_tag);
1738
76d42bd9
WF
1739/*
1740 * CD/DVDs are error prone. When a medium error occurs, the driver may fail
1741 * a _large_ part of the i/o request. Imagine the worst scenario:
1742 *
1743 * ---R__________________________________________B__________
1744 * ^ reading here ^ bad block(assume 4k)
1745 *
1746 * read(R) => miss => readahead(R...B) => media error => frustrating retries
1747 * => failing the whole request => read(R) => read(R+1) =>
1748 * readahead(R+1...B+1) => bang => read(R+2) => read(R+3) =>
1749 * readahead(R+3...B+2) => bang => read(R+3) => read(R+4) =>
1750 * readahead(R+4...B+3) => bang => read(R+4) => read(R+5) => ......
1751 *
1752 * It is going insane. Fix it by quickly scaling down the readahead size.
1753 */
1754static void shrink_readahead_size_eio(struct file *filp,
1755 struct file_ra_state *ra)
1756{
76d42bd9 1757 ra->ra_pages /= 4;
76d42bd9
WF
1758}
1759
485bb99b 1760/**
36e78914 1761 * do_generic_file_read - generic file read routine
485bb99b
RD
1762 * @filp: the file to read
1763 * @ppos: current file position
6e58e79d
AV
1764 * @iter: data destination
1765 * @written: already copied
485bb99b 1766 *
1da177e4 1767 * This is a generic file read routine, and uses the
485bb99b 1768 * mapping->a_ops->readpage() function for the actual low-level stuff.
1da177e4
LT
1769 *
1770 * This is really ugly. But the goto's actually try to clarify some
1771 * of the logic when it comes to error handling etc.
1da177e4 1772 */
6e58e79d
AV
1773static ssize_t do_generic_file_read(struct file *filp, loff_t *ppos,
1774 struct iov_iter *iter, ssize_t written)
1da177e4 1775{
36e78914 1776 struct address_space *mapping = filp->f_mapping;
1da177e4 1777 struct inode *inode = mapping->host;
36e78914 1778 struct file_ra_state *ra = &filp->f_ra;
57f6b96c
FW
1779 pgoff_t index;
1780 pgoff_t last_index;
1781 pgoff_t prev_index;
1782 unsigned long offset; /* offset into pagecache page */
ec0f1637 1783 unsigned int prev_offset;
6e58e79d 1784 int error = 0;
1da177e4 1785
c2a9737f 1786 if (unlikely(*ppos >= inode->i_sb->s_maxbytes))
d05c5f7b 1787 return 0;
c2a9737f
WF
1788 iov_iter_truncate(iter, inode->i_sb->s_maxbytes);
1789
09cbfeaf
KS
1790 index = *ppos >> PAGE_SHIFT;
1791 prev_index = ra->prev_pos >> PAGE_SHIFT;
1792 prev_offset = ra->prev_pos & (PAGE_SIZE-1);
1793 last_index = (*ppos + iter->count + PAGE_SIZE-1) >> PAGE_SHIFT;
1794 offset = *ppos & ~PAGE_MASK;
1da177e4 1795
1da177e4
LT
1796 for (;;) {
1797 struct page *page;
57f6b96c 1798 pgoff_t end_index;
a32ea1e1 1799 loff_t isize;
1da177e4
LT
1800 unsigned long nr, ret;
1801
1da177e4 1802 cond_resched();
1da177e4 1803find_page:
5abf186a
MH
1804 if (fatal_signal_pending(current)) {
1805 error = -EINTR;
1806 goto out;
1807 }
1808
1da177e4 1809 page = find_get_page(mapping, index);
3ea89ee8 1810 if (!page) {
cf914a7d 1811 page_cache_sync_readahead(mapping,
7ff81078 1812 ra, filp,
3ea89ee8
FW
1813 index, last_index - index);
1814 page = find_get_page(mapping, index);
1815 if (unlikely(page == NULL))
1816 goto no_cached_page;
1817 }
1818 if (PageReadahead(page)) {
cf914a7d 1819 page_cache_async_readahead(mapping,
7ff81078 1820 ra, filp, page,
3ea89ee8 1821 index, last_index - index);
1da177e4 1822 }
8ab22b9a 1823 if (!PageUptodate(page)) {
ebded027
MG
1824 /*
1825 * See comment in do_read_cache_page on why
1826 * wait_on_page_locked is used to avoid unnecessarily
1827 * serialisations and why it's safe.
1828 */
c4b209a4
BVA
1829 error = wait_on_page_locked_killable(page);
1830 if (unlikely(error))
1831 goto readpage_error;
ebded027
MG
1832 if (PageUptodate(page))
1833 goto page_ok;
1834
09cbfeaf 1835 if (inode->i_blkbits == PAGE_SHIFT ||
8ab22b9a
HH
1836 !mapping->a_ops->is_partially_uptodate)
1837 goto page_not_up_to_date;
6d6d36bc
EG
1838 /* pipes can't handle partially uptodate pages */
1839 if (unlikely(iter->type & ITER_PIPE))
1840 goto page_not_up_to_date;
529ae9aa 1841 if (!trylock_page(page))
8ab22b9a 1842 goto page_not_up_to_date;
8d056cb9
DH
1843 /* Did it get truncated before we got the lock? */
1844 if (!page->mapping)
1845 goto page_not_up_to_date_locked;
8ab22b9a 1846 if (!mapping->a_ops->is_partially_uptodate(page,
6e58e79d 1847 offset, iter->count))
8ab22b9a
HH
1848 goto page_not_up_to_date_locked;
1849 unlock_page(page);
1850 }
1da177e4 1851page_ok:
a32ea1e1
N
1852 /*
1853 * i_size must be checked after we know the page is Uptodate.
1854 *
1855 * Checking i_size after the check allows us to calculate
1856 * the correct value for "nr", which means the zero-filled
1857 * part of the page is not copied back to userspace (unless
1858 * another truncate extends the file - this is desired though).
1859 */
1860
1861 isize = i_size_read(inode);
09cbfeaf 1862 end_index = (isize - 1) >> PAGE_SHIFT;
a32ea1e1 1863 if (unlikely(!isize || index > end_index)) {
09cbfeaf 1864 put_page(page);
a32ea1e1
N
1865 goto out;
1866 }
1867
1868 /* nr is the maximum number of bytes to copy from this page */
09cbfeaf 1869 nr = PAGE_SIZE;
a32ea1e1 1870 if (index == end_index) {
09cbfeaf 1871 nr = ((isize - 1) & ~PAGE_MASK) + 1;
a32ea1e1 1872 if (nr <= offset) {
09cbfeaf 1873 put_page(page);
a32ea1e1
N
1874 goto out;
1875 }
1876 }
1877 nr = nr - offset;
1da177e4
LT
1878
1879 /* If users can be writing to this page using arbitrary
1880 * virtual addresses, take care about potential aliasing
1881 * before reading the page on the kernel side.
1882 */
1883 if (mapping_writably_mapped(mapping))
1884 flush_dcache_page(page);
1885
1886 /*
ec0f1637
JK
1887 * When a sequential read accesses a page several times,
1888 * only mark it as accessed the first time.
1da177e4 1889 */
ec0f1637 1890 if (prev_index != index || offset != prev_offset)
1da177e4
LT
1891 mark_page_accessed(page);
1892 prev_index = index;
1893
1894 /*
1895 * Ok, we have the page, and it's up-to-date, so
1896 * now we can copy it to user space...
1da177e4 1897 */
6e58e79d
AV
1898
1899 ret = copy_page_to_iter(page, offset, nr, iter);
1da177e4 1900 offset += ret;
09cbfeaf
KS
1901 index += offset >> PAGE_SHIFT;
1902 offset &= ~PAGE_MASK;
6ce745ed 1903 prev_offset = offset;
1da177e4 1904
09cbfeaf 1905 put_page(page);
6e58e79d
AV
1906 written += ret;
1907 if (!iov_iter_count(iter))
1908 goto out;
1909 if (ret < nr) {
1910 error = -EFAULT;
1911 goto out;
1912 }
1913 continue;
1da177e4
LT
1914
1915page_not_up_to_date:
1916 /* Get exclusive access to the page ... */
85462323
ON
1917 error = lock_page_killable(page);
1918 if (unlikely(error))
1919 goto readpage_error;
1da177e4 1920
8ab22b9a 1921page_not_up_to_date_locked:
da6052f7 1922 /* Did it get truncated before we got the lock? */
1da177e4
LT
1923 if (!page->mapping) {
1924 unlock_page(page);
09cbfeaf 1925 put_page(page);
1da177e4
LT
1926 continue;
1927 }
1928
1929 /* Did somebody else fill it already? */
1930 if (PageUptodate(page)) {
1931 unlock_page(page);
1932 goto page_ok;
1933 }
1934
1935readpage:
91803b49
JM
1936 /*
1937 * A previous I/O error may have been due to temporary
1938 * failures, eg. multipath errors.
1939 * PG_error will be set again if readpage fails.
1940 */
1941 ClearPageError(page);
1da177e4
LT
1942 /* Start the actual read. The read will unlock the page. */
1943 error = mapping->a_ops->readpage(filp, page);
1944
994fc28c
ZB
1945 if (unlikely(error)) {
1946 if (error == AOP_TRUNCATED_PAGE) {
09cbfeaf 1947 put_page(page);
6e58e79d 1948 error = 0;
994fc28c
ZB
1949 goto find_page;
1950 }
1da177e4 1951 goto readpage_error;
994fc28c 1952 }
1da177e4
LT
1953
1954 if (!PageUptodate(page)) {
85462323
ON
1955 error = lock_page_killable(page);
1956 if (unlikely(error))
1957 goto readpage_error;
1da177e4
LT
1958 if (!PageUptodate(page)) {
1959 if (page->mapping == NULL) {
1960 /*
2ecdc82e 1961 * invalidate_mapping_pages got it
1da177e4
LT
1962 */
1963 unlock_page(page);
09cbfeaf 1964 put_page(page);
1da177e4
LT
1965 goto find_page;
1966 }
1967 unlock_page(page);
7ff81078 1968 shrink_readahead_size_eio(filp, ra);
85462323
ON
1969 error = -EIO;
1970 goto readpage_error;
1da177e4
LT
1971 }
1972 unlock_page(page);
1973 }
1974
1da177e4
LT
1975 goto page_ok;
1976
1977readpage_error:
1978 /* UHHUH! A synchronous read error occurred. Report it */
09cbfeaf 1979 put_page(page);
1da177e4
LT
1980 goto out;
1981
1982no_cached_page:
1983 /*
1984 * Ok, it wasn't cached, so we need to create a new
1985 * page..
1986 */
eb2be189
NP
1987 page = page_cache_alloc_cold(mapping);
1988 if (!page) {
6e58e79d 1989 error = -ENOMEM;
eb2be189 1990 goto out;
1da177e4 1991 }
6afdb859 1992 error = add_to_page_cache_lru(page, mapping, index,
c62d2555 1993 mapping_gfp_constraint(mapping, GFP_KERNEL));
1da177e4 1994 if (error) {
09cbfeaf 1995 put_page(page);
6e58e79d
AV
1996 if (error == -EEXIST) {
1997 error = 0;
1da177e4 1998 goto find_page;
6e58e79d 1999 }
1da177e4
LT
2000 goto out;
2001 }
1da177e4
LT
2002 goto readpage;
2003 }
2004
2005out:
7ff81078 2006 ra->prev_pos = prev_index;
09cbfeaf 2007 ra->prev_pos <<= PAGE_SHIFT;
7ff81078 2008 ra->prev_pos |= prev_offset;
1da177e4 2009
09cbfeaf 2010 *ppos = ((loff_t)index << PAGE_SHIFT) + offset;
0c6aa263 2011 file_accessed(filp);
6e58e79d 2012 return written ? written : error;
1da177e4
LT
2013}
2014
485bb99b 2015/**
6abd2322 2016 * generic_file_read_iter - generic filesystem read routine
485bb99b 2017 * @iocb: kernel I/O control block
6abd2322 2018 * @iter: destination for the data read
485bb99b 2019 *
6abd2322 2020 * This is the "read_iter()" routine for all filesystems
1da177e4
LT
2021 * that can use the page cache directly.
2022 */
2023ssize_t
ed978a81 2024generic_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
1da177e4 2025{
ed978a81 2026 struct file *file = iocb->ki_filp;
cb66a7a1 2027 ssize_t retval = 0;
e7080a43
NS
2028 size_t count = iov_iter_count(iter);
2029
2030 if (!count)
2031 goto out; /* skip atime */
1da177e4 2032
2ba48ce5 2033 if (iocb->ki_flags & IOCB_DIRECT) {
ed978a81
AV
2034 struct address_space *mapping = file->f_mapping;
2035 struct inode *inode = mapping->host;
0d5b0cf2 2036 struct iov_iter data = *iter;
543ade1f 2037 loff_t size;
1da177e4 2038
1da177e4 2039 size = i_size_read(inode);
c64fb5c7
CH
2040 retval = filemap_write_and_wait_range(mapping, iocb->ki_pos,
2041 iocb->ki_pos + count - 1);
0d5b0cf2
CH
2042 if (retval < 0)
2043 goto out;
d8d3d94b 2044
0d5b0cf2
CH
2045 file_accessed(file);
2046
2047 retval = mapping->a_ops->direct_IO(iocb, &data);
c3a69024 2048 if (retval >= 0) {
c64fb5c7 2049 iocb->ki_pos += retval;
ed978a81 2050 iov_iter_advance(iter, retval);
9fe55eea 2051 }
66f998f6 2052
9fe55eea
SW
2053 /*
2054 * Btrfs can have a short DIO read if we encounter
2055 * compressed extents, so if there was an error, or if
2056 * we've already read everything we wanted to, or if
2057 * there was a short read because we hit EOF, go ahead
2058 * and return. Otherwise fallthrough to buffered io for
fbbbad4b
MW
2059 * the rest of the read. Buffered reads will not work for
2060 * DAX files, so don't bother trying.
9fe55eea 2061 */
c64fb5c7 2062 if (retval < 0 || !iov_iter_count(iter) || iocb->ki_pos >= size ||
0d5b0cf2 2063 IS_DAX(inode))
9fe55eea 2064 goto out;
1da177e4
LT
2065 }
2066
c64fb5c7 2067 retval = do_generic_file_read(file, &iocb->ki_pos, iter, retval);
1da177e4
LT
2068out:
2069 return retval;
2070}
ed978a81 2071EXPORT_SYMBOL(generic_file_read_iter);
1da177e4 2072
1da177e4 2073#ifdef CONFIG_MMU
485bb99b
RD
2074/**
2075 * page_cache_read - adds requested page to the page cache if not already there
2076 * @file: file to read
2077 * @offset: page index
62eb320a 2078 * @gfp_mask: memory allocation flags
485bb99b 2079 *
1da177e4
LT
2080 * This adds the requested page to the page cache if it isn't already there,
2081 * and schedules an I/O to read in its contents from disk.
2082 */
c20cd45e 2083static int page_cache_read(struct file *file, pgoff_t offset, gfp_t gfp_mask)
1da177e4
LT
2084{
2085 struct address_space *mapping = file->f_mapping;
99dadfdd 2086 struct page *page;
994fc28c 2087 int ret;
1da177e4 2088
994fc28c 2089 do {
c20cd45e 2090 page = __page_cache_alloc(gfp_mask|__GFP_COLD);
994fc28c
ZB
2091 if (!page)
2092 return -ENOMEM;
2093
c20cd45e 2094 ret = add_to_page_cache_lru(page, mapping, offset, gfp_mask & GFP_KERNEL);
994fc28c
ZB
2095 if (ret == 0)
2096 ret = mapping->a_ops->readpage(file, page);
2097 else if (ret == -EEXIST)
2098 ret = 0; /* losing race to add is OK */
1da177e4 2099
09cbfeaf 2100 put_page(page);
1da177e4 2101
994fc28c 2102 } while (ret == AOP_TRUNCATED_PAGE);
99dadfdd 2103
994fc28c 2104 return ret;
1da177e4
LT
2105}
2106
2107#define MMAP_LOTSAMISS (100)
2108
ef00e08e
LT
2109/*
2110 * Synchronous readahead happens when we don't even find
2111 * a page in the page cache at all.
2112 */
2113static void do_sync_mmap_readahead(struct vm_area_struct *vma,
2114 struct file_ra_state *ra,
2115 struct file *file,
2116 pgoff_t offset)
2117{
ef00e08e
LT
2118 struct address_space *mapping = file->f_mapping;
2119
2120 /* If we don't want any read-ahead, don't bother */
64363aad 2121 if (vma->vm_flags & VM_RAND_READ)
ef00e08e 2122 return;
275b12bf
WF
2123 if (!ra->ra_pages)
2124 return;
ef00e08e 2125
64363aad 2126 if (vma->vm_flags & VM_SEQ_READ) {
7ffc59b4
WF
2127 page_cache_sync_readahead(mapping, ra, file, offset,
2128 ra->ra_pages);
ef00e08e
LT
2129 return;
2130 }
2131
207d04ba
AK
2132 /* Avoid banging the cache line if not needed */
2133 if (ra->mmap_miss < MMAP_LOTSAMISS * 10)
ef00e08e
LT
2134 ra->mmap_miss++;
2135
2136 /*
2137 * Do we miss much more than hit in this file? If so,
2138 * stop bothering with read-ahead. It will only hurt.
2139 */
2140 if (ra->mmap_miss > MMAP_LOTSAMISS)
2141 return;
2142
d30a1100
WF
2143 /*
2144 * mmap read-around
2145 */
600e19af
RG
2146 ra->start = max_t(long, 0, offset - ra->ra_pages / 2);
2147 ra->size = ra->ra_pages;
2148 ra->async_size = ra->ra_pages / 4;
275b12bf 2149 ra_submit(ra, mapping, file);
ef00e08e
LT
2150}
2151
2152/*
2153 * Asynchronous readahead happens when we find the page and PG_readahead,
2154 * so we want to possibly extend the readahead further..
2155 */
2156static void do_async_mmap_readahead(struct vm_area_struct *vma,
2157 struct file_ra_state *ra,
2158 struct file *file,
2159 struct page *page,
2160 pgoff_t offset)
2161{
2162 struct address_space *mapping = file->f_mapping;
2163
2164 /* If we don't want any read-ahead, don't bother */
64363aad 2165 if (vma->vm_flags & VM_RAND_READ)
ef00e08e
LT
2166 return;
2167 if (ra->mmap_miss > 0)
2168 ra->mmap_miss--;
2169 if (PageReadahead(page))
2fad6f5d
WF
2170 page_cache_async_readahead(mapping, ra, file,
2171 page, offset, ra->ra_pages);
ef00e08e
LT
2172}
2173
485bb99b 2174/**
54cb8821 2175 * filemap_fault - read in file data for page fault handling
d0217ac0 2176 * @vmf: struct vm_fault containing details of the fault
485bb99b 2177 *
54cb8821 2178 * filemap_fault() is invoked via the vma operations vector for a
1da177e4
LT
2179 * mapped memory region to read in file data during a page fault.
2180 *
2181 * The goto's are kind of ugly, but this streamlines the normal case of having
2182 * it in the page cache, and handles the special cases reasonably without
2183 * having a lot of duplicated code.
9a95f3cf
PC
2184 *
2185 * vma->vm_mm->mmap_sem must be held on entry.
2186 *
2187 * If our return value has VM_FAULT_RETRY set, it's because
2188 * lock_page_or_retry() returned 0.
2189 * The mmap_sem has usually been released in this case.
2190 * See __lock_page_or_retry() for the exception.
2191 *
2192 * If our return value does not have VM_FAULT_RETRY set, the mmap_sem
2193 * has not been released.
2194 *
2195 * We never return with VM_FAULT_RETRY and a bit from VM_FAULT_ERROR set.
1da177e4 2196 */
11bac800 2197int filemap_fault(struct vm_fault *vmf)
1da177e4
LT
2198{
2199 int error;
11bac800 2200 struct file *file = vmf->vma->vm_file;
1da177e4
LT
2201 struct address_space *mapping = file->f_mapping;
2202 struct file_ra_state *ra = &file->f_ra;
2203 struct inode *inode = mapping->host;
ef00e08e 2204 pgoff_t offset = vmf->pgoff;
1da177e4 2205 struct page *page;
99e3e53f 2206 loff_t size;
83c54070 2207 int ret = 0;
1da177e4 2208
09cbfeaf
KS
2209 size = round_up(i_size_read(inode), PAGE_SIZE);
2210 if (offset >= size >> PAGE_SHIFT)
5307cc1a 2211 return VM_FAULT_SIGBUS;
1da177e4 2212
1da177e4 2213 /*
49426420 2214 * Do we have something in the page cache already?
1da177e4 2215 */
ef00e08e 2216 page = find_get_page(mapping, offset);
45cac65b 2217 if (likely(page) && !(vmf->flags & FAULT_FLAG_TRIED)) {
1da177e4 2218 /*
ef00e08e
LT
2219 * We found the page, so try async readahead before
2220 * waiting for the lock.
1da177e4 2221 */
11bac800 2222 do_async_mmap_readahead(vmf->vma, ra, file, page, offset);
45cac65b 2223 } else if (!page) {
ef00e08e 2224 /* No page in the page cache at all */
11bac800 2225 do_sync_mmap_readahead(vmf->vma, ra, file, offset);
ef00e08e 2226 count_vm_event(PGMAJFAULT);
11bac800 2227 mem_cgroup_count_vm_event(vmf->vma->vm_mm, PGMAJFAULT);
ef00e08e
LT
2228 ret = VM_FAULT_MAJOR;
2229retry_find:
b522c94d 2230 page = find_get_page(mapping, offset);
1da177e4
LT
2231 if (!page)
2232 goto no_cached_page;
2233 }
2234
11bac800 2235 if (!lock_page_or_retry(page, vmf->vma->vm_mm, vmf->flags)) {
09cbfeaf 2236 put_page(page);
d065bd81 2237 return ret | VM_FAULT_RETRY;
d88c0922 2238 }
b522c94d
ML
2239
2240 /* Did it get truncated? */
2241 if (unlikely(page->mapping != mapping)) {
2242 unlock_page(page);
2243 put_page(page);
2244 goto retry_find;
2245 }
309381fe 2246 VM_BUG_ON_PAGE(page->index != offset, page);
b522c94d 2247
1da177e4 2248 /*
d00806b1
NP
2249 * We have a locked page in the page cache, now we need to check
2250 * that it's up-to-date. If not, it is going to be due to an error.
1da177e4 2251 */
d00806b1 2252 if (unlikely(!PageUptodate(page)))
1da177e4
LT
2253 goto page_not_uptodate;
2254
ef00e08e
LT
2255 /*
2256 * Found the page and have a reference on it.
2257 * We must recheck i_size under page lock.
2258 */
09cbfeaf
KS
2259 size = round_up(i_size_read(inode), PAGE_SIZE);
2260 if (unlikely(offset >= size >> PAGE_SHIFT)) {
d00806b1 2261 unlock_page(page);
09cbfeaf 2262 put_page(page);
5307cc1a 2263 return VM_FAULT_SIGBUS;
d00806b1
NP
2264 }
2265
d0217ac0 2266 vmf->page = page;
83c54070 2267 return ret | VM_FAULT_LOCKED;
1da177e4 2268
1da177e4
LT
2269no_cached_page:
2270 /*
2271 * We're only likely to ever get here if MADV_RANDOM is in
2272 * effect.
2273 */
c20cd45e 2274 error = page_cache_read(file, offset, vmf->gfp_mask);
1da177e4
LT
2275
2276 /*
2277 * The page we want has now been added to the page cache.
2278 * In the unlikely event that someone removed it in the
2279 * meantime, we'll just come back here and read it again.
2280 */
2281 if (error >= 0)
2282 goto retry_find;
2283
2284 /*
2285 * An error return from page_cache_read can result if the
2286 * system is low on memory, or a problem occurs while trying
2287 * to schedule I/O.
2288 */
2289 if (error == -ENOMEM)
d0217ac0
NP
2290 return VM_FAULT_OOM;
2291 return VM_FAULT_SIGBUS;
1da177e4
LT
2292
2293page_not_uptodate:
1da177e4
LT
2294 /*
2295 * Umm, take care of errors if the page isn't up-to-date.
2296 * Try to re-read it _once_. We do this synchronously,
2297 * because there really aren't any performance issues here
2298 * and we need to check for errors.
2299 */
1da177e4 2300 ClearPageError(page);
994fc28c 2301 error = mapping->a_ops->readpage(file, page);
3ef0f720
MS
2302 if (!error) {
2303 wait_on_page_locked(page);
2304 if (!PageUptodate(page))
2305 error = -EIO;
2306 }
09cbfeaf 2307 put_page(page);
d00806b1
NP
2308
2309 if (!error || error == AOP_TRUNCATED_PAGE)
994fc28c 2310 goto retry_find;
1da177e4 2311
d00806b1 2312 /* Things didn't work out. Return zero to tell the mm layer so. */
76d42bd9 2313 shrink_readahead_size_eio(file, ra);
d0217ac0 2314 return VM_FAULT_SIGBUS;
54cb8821
NP
2315}
2316EXPORT_SYMBOL(filemap_fault);
2317
82b0f8c3 2318void filemap_map_pages(struct vm_fault *vmf,
bae473a4 2319 pgoff_t start_pgoff, pgoff_t end_pgoff)
f1820361
KS
2320{
2321 struct radix_tree_iter iter;
2322 void **slot;
82b0f8c3 2323 struct file *file = vmf->vma->vm_file;
f1820361 2324 struct address_space *mapping = file->f_mapping;
bae473a4 2325 pgoff_t last_pgoff = start_pgoff;
f1820361 2326 loff_t size;
83929372 2327 struct page *head, *page;
f1820361
KS
2328
2329 rcu_read_lock();
bae473a4
KS
2330 radix_tree_for_each_slot(slot, &mapping->page_tree, &iter,
2331 start_pgoff) {
2332 if (iter.index > end_pgoff)
f1820361
KS
2333 break;
2334repeat:
2335 page = radix_tree_deref_slot(slot);
2336 if (unlikely(!page))
2337 goto next;
2338 if (radix_tree_exception(page)) {
2cf938aa
MW
2339 if (radix_tree_deref_retry(page)) {
2340 slot = radix_tree_iter_retry(&iter);
2341 continue;
2342 }
2343 goto next;
f1820361
KS
2344 }
2345
83929372
KS
2346 head = compound_head(page);
2347 if (!page_cache_get_speculative(head))
f1820361
KS
2348 goto repeat;
2349
83929372
KS
2350 /* The page was split under us? */
2351 if (compound_head(page) != head) {
2352 put_page(head);
2353 goto repeat;
2354 }
2355
f1820361
KS
2356 /* Has the page moved? */
2357 if (unlikely(page != *slot)) {
83929372 2358 put_page(head);
f1820361
KS
2359 goto repeat;
2360 }
2361
2362 if (!PageUptodate(page) ||
2363 PageReadahead(page) ||
2364 PageHWPoison(page))
2365 goto skip;
2366 if (!trylock_page(page))
2367 goto skip;
2368
2369 if (page->mapping != mapping || !PageUptodate(page))
2370 goto unlock;
2371
09cbfeaf
KS
2372 size = round_up(i_size_read(mapping->host), PAGE_SIZE);
2373 if (page->index >= size >> PAGE_SHIFT)
f1820361
KS
2374 goto unlock;
2375
f1820361
KS
2376 if (file->f_ra.mmap_miss > 0)
2377 file->f_ra.mmap_miss--;
7267ec00 2378
82b0f8c3
JK
2379 vmf->address += (iter.index - last_pgoff) << PAGE_SHIFT;
2380 if (vmf->pte)
2381 vmf->pte += iter.index - last_pgoff;
7267ec00 2382 last_pgoff = iter.index;
82b0f8c3 2383 if (alloc_set_pte(vmf, NULL, page))
7267ec00 2384 goto unlock;
f1820361
KS
2385 unlock_page(page);
2386 goto next;
2387unlock:
2388 unlock_page(page);
2389skip:
09cbfeaf 2390 put_page(page);
f1820361 2391next:
7267ec00 2392 /* Huge page is mapped? No need to proceed. */
82b0f8c3 2393 if (pmd_trans_huge(*vmf->pmd))
7267ec00 2394 break;
bae473a4 2395 if (iter.index == end_pgoff)
f1820361
KS
2396 break;
2397 }
2398 rcu_read_unlock();
2399}
2400EXPORT_SYMBOL(filemap_map_pages);
2401
11bac800 2402int filemap_page_mkwrite(struct vm_fault *vmf)
4fcf1c62
JK
2403{
2404 struct page *page = vmf->page;
11bac800 2405 struct inode *inode = file_inode(vmf->vma->vm_file);
4fcf1c62
JK
2406 int ret = VM_FAULT_LOCKED;
2407
14da9200 2408 sb_start_pagefault(inode->i_sb);
11bac800 2409 file_update_time(vmf->vma->vm_file);
4fcf1c62
JK
2410 lock_page(page);
2411 if (page->mapping != inode->i_mapping) {
2412 unlock_page(page);
2413 ret = VM_FAULT_NOPAGE;
2414 goto out;
2415 }
14da9200
JK
2416 /*
2417 * We mark the page dirty already here so that when freeze is in
2418 * progress, we are guaranteed that writeback during freezing will
2419 * see the dirty page and writeprotect it again.
2420 */
2421 set_page_dirty(page);
1d1d1a76 2422 wait_for_stable_page(page);
4fcf1c62 2423out:
14da9200 2424 sb_end_pagefault(inode->i_sb);
4fcf1c62
JK
2425 return ret;
2426}
2427EXPORT_SYMBOL(filemap_page_mkwrite);
2428
f0f37e2f 2429const struct vm_operations_struct generic_file_vm_ops = {
54cb8821 2430 .fault = filemap_fault,
f1820361 2431 .map_pages = filemap_map_pages,
4fcf1c62 2432 .page_mkwrite = filemap_page_mkwrite,
1da177e4
LT
2433};
2434
2435/* This is used for a general mmap of a disk file */
2436
2437int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
2438{
2439 struct address_space *mapping = file->f_mapping;
2440
2441 if (!mapping->a_ops->readpage)
2442 return -ENOEXEC;
2443 file_accessed(file);
2444 vma->vm_ops = &generic_file_vm_ops;
2445 return 0;
2446}
1da177e4
LT
2447
2448/*
2449 * This is for filesystems which do not implement ->writepage.
2450 */
2451int generic_file_readonly_mmap(struct file *file, struct vm_area_struct *vma)
2452{
2453 if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE))
2454 return -EINVAL;
2455 return generic_file_mmap(file, vma);
2456}
2457#else
2458int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
2459{
2460 return -ENOSYS;
2461}
2462int generic_file_readonly_mmap(struct file * file, struct vm_area_struct * vma)
2463{
2464 return -ENOSYS;
2465}
2466#endif /* CONFIG_MMU */
2467
2468EXPORT_SYMBOL(generic_file_mmap);
2469EXPORT_SYMBOL(generic_file_readonly_mmap);
2470
67f9fd91
SL
2471static struct page *wait_on_page_read(struct page *page)
2472{
2473 if (!IS_ERR(page)) {
2474 wait_on_page_locked(page);
2475 if (!PageUptodate(page)) {
09cbfeaf 2476 put_page(page);
67f9fd91
SL
2477 page = ERR_PTR(-EIO);
2478 }
2479 }
2480 return page;
2481}
2482
32b63529 2483static struct page *do_read_cache_page(struct address_space *mapping,
57f6b96c 2484 pgoff_t index,
5e5358e7 2485 int (*filler)(void *, struct page *),
0531b2aa
LT
2486 void *data,
2487 gfp_t gfp)
1da177e4 2488{
eb2be189 2489 struct page *page;
1da177e4
LT
2490 int err;
2491repeat:
2492 page = find_get_page(mapping, index);
2493 if (!page) {
0531b2aa 2494 page = __page_cache_alloc(gfp | __GFP_COLD);
eb2be189
NP
2495 if (!page)
2496 return ERR_PTR(-ENOMEM);
e6f67b8c 2497 err = add_to_page_cache_lru(page, mapping, index, gfp);
eb2be189 2498 if (unlikely(err)) {
09cbfeaf 2499 put_page(page);
eb2be189
NP
2500 if (err == -EEXIST)
2501 goto repeat;
1da177e4 2502 /* Presumably ENOMEM for radix tree node */
1da177e4
LT
2503 return ERR_PTR(err);
2504 }
32b63529
MG
2505
2506filler:
1da177e4
LT
2507 err = filler(data, page);
2508 if (err < 0) {
09cbfeaf 2509 put_page(page);
32b63529 2510 return ERR_PTR(err);
1da177e4 2511 }
1da177e4 2512
32b63529
MG
2513 page = wait_on_page_read(page);
2514 if (IS_ERR(page))
2515 return page;
2516 goto out;
2517 }
1da177e4
LT
2518 if (PageUptodate(page))
2519 goto out;
2520
ebded027
MG
2521 /*
2522 * Page is not up to date and may be locked due one of the following
2523 * case a: Page is being filled and the page lock is held
2524 * case b: Read/write error clearing the page uptodate status
2525 * case c: Truncation in progress (page locked)
2526 * case d: Reclaim in progress
2527 *
2528 * Case a, the page will be up to date when the page is unlocked.
2529 * There is no need to serialise on the page lock here as the page
2530 * is pinned so the lock gives no additional protection. Even if the
2531 * the page is truncated, the data is still valid if PageUptodate as
2532 * it's a race vs truncate race.
2533 * Case b, the page will not be up to date
2534 * Case c, the page may be truncated but in itself, the data may still
2535 * be valid after IO completes as it's a read vs truncate race. The
2536 * operation must restart if the page is not uptodate on unlock but
2537 * otherwise serialising on page lock to stabilise the mapping gives
2538 * no additional guarantees to the caller as the page lock is
2539 * released before return.
2540 * Case d, similar to truncation. If reclaim holds the page lock, it
2541 * will be a race with remove_mapping that determines if the mapping
2542 * is valid on unlock but otherwise the data is valid and there is
2543 * no need to serialise with page lock.
2544 *
2545 * As the page lock gives no additional guarantee, we optimistically
2546 * wait on the page to be unlocked and check if it's up to date and
2547 * use the page if it is. Otherwise, the page lock is required to
2548 * distinguish between the different cases. The motivation is that we
2549 * avoid spurious serialisations and wakeups when multiple processes
2550 * wait on the same page for IO to complete.
2551 */
2552 wait_on_page_locked(page);
2553 if (PageUptodate(page))
2554 goto out;
2555
2556 /* Distinguish between all the cases under the safety of the lock */
1da177e4 2557 lock_page(page);
ebded027
MG
2558
2559 /* Case c or d, restart the operation */
1da177e4
LT
2560 if (!page->mapping) {
2561 unlock_page(page);
09cbfeaf 2562 put_page(page);
32b63529 2563 goto repeat;
1da177e4 2564 }
ebded027
MG
2565
2566 /* Someone else locked and filled the page in a very small window */
1da177e4
LT
2567 if (PageUptodate(page)) {
2568 unlock_page(page);
2569 goto out;
2570 }
32b63529
MG
2571 goto filler;
2572
c855ff37 2573out:
6fe6900e
NP
2574 mark_page_accessed(page);
2575 return page;
2576}
0531b2aa
LT
2577
2578/**
67f9fd91 2579 * read_cache_page - read into page cache, fill it if needed
0531b2aa
LT
2580 * @mapping: the page's address_space
2581 * @index: the page index
2582 * @filler: function to perform the read
5e5358e7 2583 * @data: first arg to filler(data, page) function, often left as NULL
0531b2aa 2584 *
0531b2aa 2585 * Read into the page cache. If a page already exists, and PageUptodate() is
67f9fd91 2586 * not set, try to fill the page and wait for it to become unlocked.
0531b2aa
LT
2587 *
2588 * If the page does not get brought uptodate, return -EIO.
2589 */
67f9fd91 2590struct page *read_cache_page(struct address_space *mapping,
0531b2aa 2591 pgoff_t index,
5e5358e7 2592 int (*filler)(void *, struct page *),
0531b2aa
LT
2593 void *data)
2594{
2595 return do_read_cache_page(mapping, index, filler, data, mapping_gfp_mask(mapping));
2596}
67f9fd91 2597EXPORT_SYMBOL(read_cache_page);
0531b2aa
LT
2598
2599/**
2600 * read_cache_page_gfp - read into page cache, using specified page allocation flags.
2601 * @mapping: the page's address_space
2602 * @index: the page index
2603 * @gfp: the page allocator flags to use if allocating
2604 *
2605 * This is the same as "read_mapping_page(mapping, index, NULL)", but with
e6f67b8c 2606 * any new page allocations done using the specified allocation flags.
0531b2aa
LT
2607 *
2608 * If the page does not get brought uptodate, return -EIO.
2609 */
2610struct page *read_cache_page_gfp(struct address_space *mapping,
2611 pgoff_t index,
2612 gfp_t gfp)
2613{
2614 filler_t *filler = (filler_t *)mapping->a_ops->readpage;
2615
67f9fd91 2616 return do_read_cache_page(mapping, index, filler, NULL, gfp);
0531b2aa
LT
2617}
2618EXPORT_SYMBOL(read_cache_page_gfp);
2619
1da177e4
LT
2620/*
2621 * Performs necessary checks before doing a write
2622 *
485bb99b 2623 * Can adjust writing position or amount of bytes to write.
1da177e4
LT
2624 * Returns appropriate error code that caller should return or
2625 * zero in case that write should be allowed.
2626 */
3309dd04 2627inline ssize_t generic_write_checks(struct kiocb *iocb, struct iov_iter *from)
1da177e4 2628{
3309dd04 2629 struct file *file = iocb->ki_filp;
1da177e4 2630 struct inode *inode = file->f_mapping->host;
59e99e5b 2631 unsigned long limit = rlimit(RLIMIT_FSIZE);
3309dd04 2632 loff_t pos;
1da177e4 2633
3309dd04
AV
2634 if (!iov_iter_count(from))
2635 return 0;
1da177e4 2636
0fa6b005 2637 /* FIXME: this is for backwards compatibility with 2.4 */
2ba48ce5 2638 if (iocb->ki_flags & IOCB_APPEND)
3309dd04 2639 iocb->ki_pos = i_size_read(inode);
1da177e4 2640
3309dd04 2641 pos = iocb->ki_pos;
1da177e4 2642
0fa6b005 2643 if (limit != RLIM_INFINITY) {
3309dd04 2644 if (iocb->ki_pos >= limit) {
0fa6b005
AV
2645 send_sig(SIGXFSZ, current, 0);
2646 return -EFBIG;
1da177e4 2647 }
3309dd04 2648 iov_iter_truncate(from, limit - (unsigned long)pos);
1da177e4
LT
2649 }
2650
2651 /*
2652 * LFS rule
2653 */
3309dd04 2654 if (unlikely(pos + iov_iter_count(from) > MAX_NON_LFS &&
1da177e4 2655 !(file->f_flags & O_LARGEFILE))) {
3309dd04 2656 if (pos >= MAX_NON_LFS)
1da177e4 2657 return -EFBIG;
3309dd04 2658 iov_iter_truncate(from, MAX_NON_LFS - (unsigned long)pos);
1da177e4
LT
2659 }
2660
2661 /*
2662 * Are we about to exceed the fs block limit ?
2663 *
2664 * If we have written data it becomes a short write. If we have
2665 * exceeded without writing data we send a signal and return EFBIG.
2666 * Linus frestrict idea will clean these up nicely..
2667 */
3309dd04
AV
2668 if (unlikely(pos >= inode->i_sb->s_maxbytes))
2669 return -EFBIG;
1da177e4 2670
3309dd04
AV
2671 iov_iter_truncate(from, inode->i_sb->s_maxbytes - pos);
2672 return iov_iter_count(from);
1da177e4
LT
2673}
2674EXPORT_SYMBOL(generic_write_checks);
2675
afddba49
NP
2676int pagecache_write_begin(struct file *file, struct address_space *mapping,
2677 loff_t pos, unsigned len, unsigned flags,
2678 struct page **pagep, void **fsdata)
2679{
2680 const struct address_space_operations *aops = mapping->a_ops;
2681
4e02ed4b 2682 return aops->write_begin(file, mapping, pos, len, flags,
afddba49 2683 pagep, fsdata);
afddba49
NP
2684}
2685EXPORT_SYMBOL(pagecache_write_begin);
2686
2687int pagecache_write_end(struct file *file, struct address_space *mapping,
2688 loff_t pos, unsigned len, unsigned copied,
2689 struct page *page, void *fsdata)
2690{
2691 const struct address_space_operations *aops = mapping->a_ops;
afddba49 2692
4e02ed4b 2693 return aops->write_end(file, mapping, pos, len, copied, page, fsdata);
afddba49
NP
2694}
2695EXPORT_SYMBOL(pagecache_write_end);
2696
1da177e4 2697ssize_t
1af5bb49 2698generic_file_direct_write(struct kiocb *iocb, struct iov_iter *from)
1da177e4
LT
2699{
2700 struct file *file = iocb->ki_filp;
2701 struct address_space *mapping = file->f_mapping;
2702 struct inode *inode = mapping->host;
1af5bb49 2703 loff_t pos = iocb->ki_pos;
1da177e4 2704 ssize_t written;
a969e903
CH
2705 size_t write_len;
2706 pgoff_t end;
26978b8b 2707 struct iov_iter data;
1da177e4 2708
0c949334 2709 write_len = iov_iter_count(from);
09cbfeaf 2710 end = (pos + write_len - 1) >> PAGE_SHIFT;
a969e903 2711
48b47c56 2712 written = filemap_write_and_wait_range(mapping, pos, pos + write_len - 1);
a969e903
CH
2713 if (written)
2714 goto out;
2715
2716 /*
2717 * After a write we want buffered reads to be sure to go to disk to get
2718 * the new data. We invalidate clean cached page from the region we're
2719 * about to write. We do this *before* the write so that we can return
6ccfa806 2720 * without clobbering -EIOCBQUEUED from ->direct_IO().
a969e903
CH
2721 */
2722 if (mapping->nrpages) {
2723 written = invalidate_inode_pages2_range(mapping,
09cbfeaf 2724 pos >> PAGE_SHIFT, end);
6ccfa806
HH
2725 /*
2726 * If a page can not be invalidated, return 0 to fall back
2727 * to buffered write.
2728 */
2729 if (written) {
2730 if (written == -EBUSY)
2731 return 0;
a969e903 2732 goto out;
6ccfa806 2733 }
a969e903
CH
2734 }
2735
26978b8b 2736 data = *from;
c8b8e32d 2737 written = mapping->a_ops->direct_IO(iocb, &data);
a969e903
CH
2738
2739 /*
2740 * Finally, try again to invalidate clean pages which might have been
2741 * cached by non-direct readahead, or faulted in by get_user_pages()
2742 * if the source of the write was an mmap'ed region of the file
2743 * we're writing. Either one is a pretty crazy thing to do,
2744 * so we don't support it 100%. If this invalidation
2745 * fails, tough, the write still worked...
2746 */
2747 if (mapping->nrpages) {
2748 invalidate_inode_pages2_range(mapping,
09cbfeaf 2749 pos >> PAGE_SHIFT, end);
a969e903
CH
2750 }
2751
1da177e4 2752 if (written > 0) {
0116651c 2753 pos += written;
f8579f86 2754 iov_iter_advance(from, written);
0116651c
NK
2755 if (pos > i_size_read(inode) && !S_ISBLK(inode->i_mode)) {
2756 i_size_write(inode, pos);
1da177e4
LT
2757 mark_inode_dirty(inode);
2758 }
5cb6c6c7 2759 iocb->ki_pos = pos;
1da177e4 2760 }
a969e903 2761out:
1da177e4
LT
2762 return written;
2763}
2764EXPORT_SYMBOL(generic_file_direct_write);
2765
eb2be189
NP
2766/*
2767 * Find or create a page at the given pagecache position. Return the locked
2768 * page. This function is specifically for buffered writes.
2769 */
54566b2c
NP
2770struct page *grab_cache_page_write_begin(struct address_space *mapping,
2771 pgoff_t index, unsigned flags)
eb2be189 2772{
eb2be189 2773 struct page *page;
bbddabe2 2774 int fgp_flags = FGP_LOCK|FGP_WRITE|FGP_CREAT;
0faa70cb 2775
54566b2c 2776 if (flags & AOP_FLAG_NOFS)
2457aec6
MG
2777 fgp_flags |= FGP_NOFS;
2778
2779 page = pagecache_get_page(mapping, index, fgp_flags,
45f87de5 2780 mapping_gfp_mask(mapping));
c585a267 2781 if (page)
2457aec6 2782 wait_for_stable_page(page);
eb2be189 2783
eb2be189
NP
2784 return page;
2785}
54566b2c 2786EXPORT_SYMBOL(grab_cache_page_write_begin);
eb2be189 2787
3b93f911 2788ssize_t generic_perform_write(struct file *file,
afddba49
NP
2789 struct iov_iter *i, loff_t pos)
2790{
2791 struct address_space *mapping = file->f_mapping;
2792 const struct address_space_operations *a_ops = mapping->a_ops;
2793 long status = 0;
2794 ssize_t written = 0;
674b892e
NP
2795 unsigned int flags = 0;
2796
2797 /*
2798 * Copies from kernel address space cannot fail (NFSD is a big user).
2799 */
777eda2c 2800 if (!iter_is_iovec(i))
674b892e 2801 flags |= AOP_FLAG_UNINTERRUPTIBLE;
afddba49
NP
2802
2803 do {
2804 struct page *page;
afddba49
NP
2805 unsigned long offset; /* Offset into pagecache page */
2806 unsigned long bytes; /* Bytes to write to page */
2807 size_t copied; /* Bytes copied from user */
2808 void *fsdata;
2809
09cbfeaf
KS
2810 offset = (pos & (PAGE_SIZE - 1));
2811 bytes = min_t(unsigned long, PAGE_SIZE - offset,
afddba49
NP
2812 iov_iter_count(i));
2813
2814again:
00a3d660
LT
2815 /*
2816 * Bring in the user page that we will copy from _first_.
2817 * Otherwise there's a nasty deadlock on copying from the
2818 * same page as we're writing to, without it being marked
2819 * up-to-date.
2820 *
2821 * Not only is this an optimisation, but it is also required
2822 * to check that the address is actually valid, when atomic
2823 * usercopies are used, below.
2824 */
2825 if (unlikely(iov_iter_fault_in_readable(i, bytes))) {
2826 status = -EFAULT;
2827 break;
2828 }
2829
296291cd
JK
2830 if (fatal_signal_pending(current)) {
2831 status = -EINTR;
2832 break;
2833 }
2834
674b892e 2835 status = a_ops->write_begin(file, mapping, pos, bytes, flags,
afddba49 2836 &page, &fsdata);
2457aec6 2837 if (unlikely(status < 0))
afddba49
NP
2838 break;
2839
931e80e4 2840 if (mapping_writably_mapped(mapping))
2841 flush_dcache_page(page);
00a3d660 2842
afddba49 2843 copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes);
afddba49
NP
2844 flush_dcache_page(page);
2845
2846 status = a_ops->write_end(file, mapping, pos, bytes, copied,
2847 page, fsdata);
2848 if (unlikely(status < 0))
2849 break;
2850 copied = status;
2851
2852 cond_resched();
2853
124d3b70 2854 iov_iter_advance(i, copied);
afddba49
NP
2855 if (unlikely(copied == 0)) {
2856 /*
2857 * If we were unable to copy any data at all, we must
2858 * fall back to a single segment length write.
2859 *
2860 * If we didn't fallback here, we could livelock
2861 * because not all segments in the iov can be copied at
2862 * once without a pagefault.
2863 */
09cbfeaf 2864 bytes = min_t(unsigned long, PAGE_SIZE - offset,
afddba49
NP
2865 iov_iter_single_seg_count(i));
2866 goto again;
2867 }
afddba49
NP
2868 pos += copied;
2869 written += copied;
2870
2871 balance_dirty_pages_ratelimited(mapping);
afddba49
NP
2872 } while (iov_iter_count(i));
2873
2874 return written ? written : status;
2875}
3b93f911 2876EXPORT_SYMBOL(generic_perform_write);
1da177e4 2877
e4dd9de3 2878/**
8174202b 2879 * __generic_file_write_iter - write data to a file
e4dd9de3 2880 * @iocb: IO state structure (file, offset, etc.)
8174202b 2881 * @from: iov_iter with data to write
e4dd9de3
JK
2882 *
2883 * This function does all the work needed for actually writing data to a
2884 * file. It does all basic checks, removes SUID from the file, updates
2885 * modification times and calls proper subroutines depending on whether we
2886 * do direct IO or a standard buffered write.
2887 *
2888 * It expects i_mutex to be grabbed unless we work on a block device or similar
2889 * object which does not need locking at all.
2890 *
2891 * This function does *not* take care of syncing data in case of O_SYNC write.
2892 * A caller has to handle it. This is mainly due to the fact that we want to
2893 * avoid syncing under i_mutex.
2894 */
8174202b 2895ssize_t __generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
1da177e4
LT
2896{
2897 struct file *file = iocb->ki_filp;
fb5527e6 2898 struct address_space * mapping = file->f_mapping;
1da177e4 2899 struct inode *inode = mapping->host;
3b93f911 2900 ssize_t written = 0;
1da177e4 2901 ssize_t err;
3b93f911 2902 ssize_t status;
1da177e4 2903
1da177e4 2904 /* We can write back this queue in page reclaim */
de1414a6 2905 current->backing_dev_info = inode_to_bdi(inode);
5fa8e0a1 2906 err = file_remove_privs(file);
1da177e4
LT
2907 if (err)
2908 goto out;
2909
c3b2da31
JB
2910 err = file_update_time(file);
2911 if (err)
2912 goto out;
1da177e4 2913
2ba48ce5 2914 if (iocb->ki_flags & IOCB_DIRECT) {
0b8def9d 2915 loff_t pos, endbyte;
fb5527e6 2916
1af5bb49 2917 written = generic_file_direct_write(iocb, from);
1da177e4 2918 /*
fbbbad4b
MW
2919 * If the write stopped short of completing, fall back to
2920 * buffered writes. Some filesystems do this for writes to
2921 * holes, for example. For DAX files, a buffered write will
2922 * not succeed (even if it did, DAX does not handle dirty
2923 * page-cache pages correctly).
1da177e4 2924 */
0b8def9d 2925 if (written < 0 || !iov_iter_count(from) || IS_DAX(inode))
fbbbad4b
MW
2926 goto out;
2927
0b8def9d 2928 status = generic_perform_write(file, from, pos = iocb->ki_pos);
fb5527e6 2929 /*
3b93f911 2930 * If generic_perform_write() returned a synchronous error
fb5527e6
JM
2931 * then we want to return the number of bytes which were
2932 * direct-written, or the error code if that was zero. Note
2933 * that this differs from normal direct-io semantics, which
2934 * will return -EFOO even if some bytes were written.
2935 */
60bb4529 2936 if (unlikely(status < 0)) {
3b93f911 2937 err = status;
fb5527e6
JM
2938 goto out;
2939 }
fb5527e6
JM
2940 /*
2941 * We need to ensure that the page cache pages are written to
2942 * disk and invalidated to preserve the expected O_DIRECT
2943 * semantics.
2944 */
3b93f911 2945 endbyte = pos + status - 1;
0b8def9d 2946 err = filemap_write_and_wait_range(mapping, pos, endbyte);
fb5527e6 2947 if (err == 0) {
0b8def9d 2948 iocb->ki_pos = endbyte + 1;
3b93f911 2949 written += status;
fb5527e6 2950 invalidate_mapping_pages(mapping,
09cbfeaf
KS
2951 pos >> PAGE_SHIFT,
2952 endbyte >> PAGE_SHIFT);
fb5527e6
JM
2953 } else {
2954 /*
2955 * We don't know how much we wrote, so just return
2956 * the number of bytes which were direct-written
2957 */
2958 }
2959 } else {
0b8def9d
AV
2960 written = generic_perform_write(file, from, iocb->ki_pos);
2961 if (likely(written > 0))
2962 iocb->ki_pos += written;
fb5527e6 2963 }
1da177e4
LT
2964out:
2965 current->backing_dev_info = NULL;
2966 return written ? written : err;
2967}
8174202b 2968EXPORT_SYMBOL(__generic_file_write_iter);
e4dd9de3 2969
e4dd9de3 2970/**
8174202b 2971 * generic_file_write_iter - write data to a file
e4dd9de3 2972 * @iocb: IO state structure
8174202b 2973 * @from: iov_iter with data to write
e4dd9de3 2974 *
8174202b 2975 * This is a wrapper around __generic_file_write_iter() to be used by most
e4dd9de3
JK
2976 * filesystems. It takes care of syncing the file in case of O_SYNC file
2977 * and acquires i_mutex as needed.
2978 */
8174202b 2979ssize_t generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
1da177e4
LT
2980{
2981 struct file *file = iocb->ki_filp;
148f948b 2982 struct inode *inode = file->f_mapping->host;
1da177e4 2983 ssize_t ret;
1da177e4 2984
5955102c 2985 inode_lock(inode);
3309dd04
AV
2986 ret = generic_write_checks(iocb, from);
2987 if (ret > 0)
5f380c7f 2988 ret = __generic_file_write_iter(iocb, from);
5955102c 2989 inode_unlock(inode);
1da177e4 2990
e2592217
CH
2991 if (ret > 0)
2992 ret = generic_write_sync(iocb, ret);
1da177e4
LT
2993 return ret;
2994}
8174202b 2995EXPORT_SYMBOL(generic_file_write_iter);
1da177e4 2996
cf9a2ae8
DH
2997/**
2998 * try_to_release_page() - release old fs-specific metadata on a page
2999 *
3000 * @page: the page which the kernel is trying to free
3001 * @gfp_mask: memory allocation flags (and I/O mode)
3002 *
3003 * The address_space is to try to release any data against the page
3004 * (presumably at page->private). If the release was successful, return `1'.
3005 * Otherwise return zero.
3006 *
266cf658
DH
3007 * This may also be called if PG_fscache is set on a page, indicating that the
3008 * page is known to the local caching routines.
3009 *
cf9a2ae8 3010 * The @gfp_mask argument specifies whether I/O may be performed to release
71baba4b 3011 * this page (__GFP_IO), and whether the call may block (__GFP_RECLAIM & __GFP_FS).
cf9a2ae8 3012 *
cf9a2ae8
DH
3013 */
3014int try_to_release_page(struct page *page, gfp_t gfp_mask)
3015{
3016 struct address_space * const mapping = page->mapping;
3017
3018 BUG_ON(!PageLocked(page));
3019 if (PageWriteback(page))
3020 return 0;
3021
3022 if (mapping && mapping->a_ops->releasepage)
3023 return mapping->a_ops->releasepage(page, gfp_mask);
3024 return try_to_free_buffers(page);
3025}
3026
3027EXPORT_SYMBOL(try_to_release_page);