]> git.proxmox.com Git - mirror_ubuntu-kernels.git/blame - mm/filemap.c
mm/filemap/c: break generic_file_buffered_read up into multiple functions
[mirror_ubuntu-kernels.git] / mm / filemap.c
CommitLineData
457c8996 1// SPDX-License-Identifier: GPL-2.0-only
1da177e4
LT
2/*
3 * linux/mm/filemap.c
4 *
5 * Copyright (C) 1994-1999 Linus Torvalds
6 */
7
8/*
9 * This file handles the generic file mmap semantics used by
10 * most "normal" filesystems (but you don't /have/ to use this:
11 * the NFS filesystem used to do this differently, for example)
12 */
b95f1b31 13#include <linux/export.h>
1da177e4 14#include <linux/compiler.h>
f9fe48be 15#include <linux/dax.h>
1da177e4 16#include <linux/fs.h>
3f07c014 17#include <linux/sched/signal.h>
c22ce143 18#include <linux/uaccess.h>
c59ede7b 19#include <linux/capability.h>
1da177e4 20#include <linux/kernel_stat.h>
5a0e3ad6 21#include <linux/gfp.h>
1da177e4
LT
22#include <linux/mm.h>
23#include <linux/swap.h>
24#include <linux/mman.h>
25#include <linux/pagemap.h>
26#include <linux/file.h>
27#include <linux/uio.h>
cfcbfb13 28#include <linux/error-injection.h>
1da177e4
LT
29#include <linux/hash.h>
30#include <linux/writeback.h>
53253383 31#include <linux/backing-dev.h>
1da177e4
LT
32#include <linux/pagevec.h>
33#include <linux/blkdev.h>
34#include <linux/security.h>
44110fe3 35#include <linux/cpuset.h>
00501b53 36#include <linux/hugetlb.h>
8a9f3ccd 37#include <linux/memcontrol.h>
c515e1fd 38#include <linux/cleancache.h>
c7df8ad2 39#include <linux/shmem_fs.h>
f1820361 40#include <linux/rmap.h>
b1d29ba8 41#include <linux/delayacct.h>
eb414681 42#include <linux/psi.h>
d0e6a582 43#include <linux/ramfs.h>
b9306a79 44#include <linux/page_idle.h>
0f8053a5
NP
45#include "internal.h"
46
fe0bfaaf
RJ
47#define CREATE_TRACE_POINTS
48#include <trace/events/filemap.h>
49
1da177e4 50/*
1da177e4
LT
51 * FIXME: remove all knowledge of the buffer layer from the core VM
52 */
148f948b 53#include <linux/buffer_head.h> /* for try_to_free_buffers */
1da177e4 54
1da177e4
LT
55#include <asm/mman.h>
56
57/*
58 * Shared mappings implemented 30.11.1994. It's not fully working yet,
59 * though.
60 *
61 * Shared mappings now work. 15.8.1995 Bruno.
62 *
63 * finished 'unifying' the page and buffer cache and SMP-threaded the
64 * page-cache, 21.05.1999, Ingo Molnar <mingo@redhat.com>
65 *
66 * SMP-threaded pagemap-LRU 1999, Andrea Arcangeli <andrea@suse.de>
67 */
68
69/*
70 * Lock ordering:
71 *
c8c06efa 72 * ->i_mmap_rwsem (truncate_pagecache)
1da177e4 73 * ->private_lock (__free_pte->__set_page_dirty_buffers)
5d337b91 74 * ->swap_lock (exclusive_swap_page, others)
b93b0163 75 * ->i_pages lock
1da177e4 76 *
1b1dcc1b 77 * ->i_mutex
c8c06efa 78 * ->i_mmap_rwsem (truncate->unmap_mapping_range)
1da177e4 79 *
c1e8d7c6 80 * ->mmap_lock
c8c06efa 81 * ->i_mmap_rwsem
b8072f09 82 * ->page_table_lock or pte_lock (various, mainly in memory.c)
b93b0163 83 * ->i_pages lock (arch-dependent flush_dcache_mmap_lock)
1da177e4 84 *
c1e8d7c6 85 * ->mmap_lock
1da177e4
LT
86 * ->lock_page (access_process_vm)
87 *
ccad2365 88 * ->i_mutex (generic_perform_write)
c1e8d7c6 89 * ->mmap_lock (fault_in_pages_readable->do_page_fault)
1da177e4 90 *
f758eeab 91 * bdi->wb.list_lock
a66979ab 92 * sb_lock (fs/fs-writeback.c)
b93b0163 93 * ->i_pages lock (__sync_single_inode)
1da177e4 94 *
c8c06efa 95 * ->i_mmap_rwsem
1da177e4
LT
96 * ->anon_vma.lock (vma_adjust)
97 *
98 * ->anon_vma.lock
b8072f09 99 * ->page_table_lock or pte_lock (anon_vma_prepare and various)
1da177e4 100 *
b8072f09 101 * ->page_table_lock or pte_lock
5d337b91 102 * ->swap_lock (try_to_unmap_one)
1da177e4 103 * ->private_lock (try_to_unmap_one)
b93b0163 104 * ->i_pages lock (try_to_unmap_one)
f4b7e272
AR
105 * ->pgdat->lru_lock (follow_page->mark_page_accessed)
106 * ->pgdat->lru_lock (check_pte_range->isolate_lru_page)
1da177e4 107 * ->private_lock (page_remove_rmap->set_page_dirty)
b93b0163 108 * ->i_pages lock (page_remove_rmap->set_page_dirty)
f758eeab 109 * bdi.wb->list_lock (page_remove_rmap->set_page_dirty)
250df6ed 110 * ->inode->i_lock (page_remove_rmap->set_page_dirty)
81f8c3a4 111 * ->memcg->move_lock (page_remove_rmap->lock_page_memcg)
f758eeab 112 * bdi.wb->list_lock (zap_pte_range->set_page_dirty)
250df6ed 113 * ->inode->i_lock (zap_pte_range->set_page_dirty)
1da177e4
LT
114 * ->private_lock (zap_pte_range->__set_page_dirty_buffers)
115 *
c8c06efa 116 * ->i_mmap_rwsem
9a3c531d 117 * ->tasklist_lock (memory_failure, collect_procs_ao)
1da177e4
LT
118 */
119
5c024e6a 120static void page_cache_delete(struct address_space *mapping,
91b0abe3
JW
121 struct page *page, void *shadow)
122{
5c024e6a
MW
123 XA_STATE(xas, &mapping->i_pages, page->index);
124 unsigned int nr = 1;
c70b647d 125
5c024e6a 126 mapping_set_update(&xas, mapping);
c70b647d 127
5c024e6a
MW
128 /* hugetlb pages are represented by a single entry in the xarray */
129 if (!PageHuge(page)) {
130 xas_set_order(&xas, page->index, compound_order(page));
d8c6546b 131 nr = compound_nr(page);
5c024e6a 132 }
91b0abe3 133
83929372
KS
134 VM_BUG_ON_PAGE(!PageLocked(page), page);
135 VM_BUG_ON_PAGE(PageTail(page), page);
136 VM_BUG_ON_PAGE(nr != 1 && shadow, page);
449dd698 137
5c024e6a
MW
138 xas_store(&xas, shadow);
139 xas_init_marks(&xas);
d3798ae8 140
2300638b
JK
141 page->mapping = NULL;
142 /* Leave page->index set: truncation lookup relies upon it */
143
d3798ae8
JW
144 if (shadow) {
145 mapping->nrexceptional += nr;
146 /*
147 * Make sure the nrexceptional update is committed before
148 * the nrpages update so that final truncate racing
149 * with reclaim does not see both counters 0 at the
150 * same time and miss a shadow entry.
151 */
152 smp_wmb();
153 }
154 mapping->nrpages -= nr;
91b0abe3
JW
155}
156
5ecc4d85
JK
157static void unaccount_page_cache_page(struct address_space *mapping,
158 struct page *page)
1da177e4 159{
5ecc4d85 160 int nr;
1da177e4 161
c515e1fd
DM
162 /*
163 * if we're uptodate, flush out into the cleancache, otherwise
164 * invalidate any existing cleancache entries. We can't leave
165 * stale data around in the cleancache once our page is gone
166 */
167 if (PageUptodate(page) && PageMappedToDisk(page))
168 cleancache_put_page(page);
169 else
3167760f 170 cleancache_invalidate_page(mapping, page);
c515e1fd 171
83929372 172 VM_BUG_ON_PAGE(PageTail(page), page);
06b241f3
HD
173 VM_BUG_ON_PAGE(page_mapped(page), page);
174 if (!IS_ENABLED(CONFIG_DEBUG_VM) && unlikely(page_mapped(page))) {
175 int mapcount;
176
177 pr_alert("BUG: Bad page cache in process %s pfn:%05lx\n",
178 current->comm, page_to_pfn(page));
179 dump_page(page, "still mapped when deleted");
180 dump_stack();
181 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
182
183 mapcount = page_mapcount(page);
184 if (mapping_exiting(mapping) &&
185 page_count(page) >= mapcount + 2) {
186 /*
187 * All vmas have already been torn down, so it's
188 * a good bet that actually the page is unmapped,
189 * and we'd prefer not to leak it: if we're wrong,
190 * some other bad page check should catch it later.
191 */
192 page_mapcount_reset(page);
6d061f9f 193 page_ref_sub(page, mapcount);
06b241f3
HD
194 }
195 }
196
4165b9b4 197 /* hugetlb pages do not participate in page cache accounting. */
5ecc4d85
JK
198 if (PageHuge(page))
199 return;
09612fa6 200
6c357848 201 nr = thp_nr_pages(page);
5ecc4d85 202
0d1c2072 203 __mod_lruvec_page_state(page, NR_FILE_PAGES, -nr);
5ecc4d85 204 if (PageSwapBacked(page)) {
0d1c2072 205 __mod_lruvec_page_state(page, NR_SHMEM, -nr);
5ecc4d85
JK
206 if (PageTransHuge(page))
207 __dec_node_page_state(page, NR_SHMEM_THPS);
99cb0dbd
SL
208 } else if (PageTransHuge(page)) {
209 __dec_node_page_state(page, NR_FILE_THPS);
09d91cda 210 filemap_nr_thps_dec(mapping);
800d8c63 211 }
5ecc4d85
JK
212
213 /*
214 * At this point page must be either written or cleaned by
215 * truncate. Dirty page here signals a bug and loss of
216 * unwritten data.
217 *
218 * This fixes dirty accounting after removing the page entirely
219 * but leaves PageDirty set: it has no effect for truncated
220 * page and anyway will be cleared before returning page into
221 * buddy allocator.
222 */
223 if (WARN_ON_ONCE(PageDirty(page)))
224 account_page_cleaned(page, mapping, inode_to_wb(mapping->host));
225}
226
227/*
228 * Delete a page from the page cache and free it. Caller has to make
229 * sure the page is locked and that nobody else uses it - or that usage
b93b0163 230 * is safe. The caller must hold the i_pages lock.
5ecc4d85
JK
231 */
232void __delete_from_page_cache(struct page *page, void *shadow)
233{
234 struct address_space *mapping = page->mapping;
235
236 trace_mm_filemap_delete_from_page_cache(page);
237
238 unaccount_page_cache_page(mapping, page);
5c024e6a 239 page_cache_delete(mapping, page, shadow);
1da177e4
LT
240}
241
59c66c5f
JK
242static void page_cache_free_page(struct address_space *mapping,
243 struct page *page)
244{
245 void (*freepage)(struct page *);
246
247 freepage = mapping->a_ops->freepage;
248 if (freepage)
249 freepage(page);
250
251 if (PageTransHuge(page) && !PageHuge(page)) {
887b22c6 252 page_ref_sub(page, thp_nr_pages(page));
59c66c5f
JK
253 VM_BUG_ON_PAGE(page_count(page) <= 0, page);
254 } else {
255 put_page(page);
256 }
257}
258
702cfbf9
MK
259/**
260 * delete_from_page_cache - delete page from page cache
261 * @page: the page which the kernel is trying to remove from page cache
262 *
263 * This must be called only on pages that have been verified to be in the page
264 * cache and locked. It will never put the page into the free list, the caller
265 * has a reference on the page.
266 */
267void delete_from_page_cache(struct page *page)
1da177e4 268{
83929372 269 struct address_space *mapping = page_mapping(page);
c4843a75 270 unsigned long flags;
1da177e4 271
cd7619d6 272 BUG_ON(!PageLocked(page));
b93b0163 273 xa_lock_irqsave(&mapping->i_pages, flags);
62cccb8c 274 __delete_from_page_cache(page, NULL);
b93b0163 275 xa_unlock_irqrestore(&mapping->i_pages, flags);
6072d13c 276
59c66c5f 277 page_cache_free_page(mapping, page);
97cecb5a
MK
278}
279EXPORT_SYMBOL(delete_from_page_cache);
280
aa65c29c 281/*
ef8e5717 282 * page_cache_delete_batch - delete several pages from page cache
aa65c29c
JK
283 * @mapping: the mapping to which pages belong
284 * @pvec: pagevec with pages to delete
285 *
b93b0163 286 * The function walks over mapping->i_pages and removes pages passed in @pvec
4101196b
MWO
287 * from the mapping. The function expects @pvec to be sorted by page index
288 * and is optimised for it to be dense.
b93b0163 289 * It tolerates holes in @pvec (mapping entries at those indices are not
aa65c29c 290 * modified). The function expects only THP head pages to be present in the
4101196b 291 * @pvec.
aa65c29c 292 *
b93b0163 293 * The function expects the i_pages lock to be held.
aa65c29c 294 */
ef8e5717 295static void page_cache_delete_batch(struct address_space *mapping,
aa65c29c
JK
296 struct pagevec *pvec)
297{
ef8e5717 298 XA_STATE(xas, &mapping->i_pages, pvec->pages[0]->index);
aa65c29c 299 int total_pages = 0;
4101196b 300 int i = 0;
aa65c29c 301 struct page *page;
aa65c29c 302
ef8e5717
MW
303 mapping_set_update(&xas, mapping);
304 xas_for_each(&xas, page, ULONG_MAX) {
4101196b 305 if (i >= pagevec_count(pvec))
aa65c29c 306 break;
4101196b
MWO
307
308 /* A swap/dax/shadow entry got inserted? Skip it. */
3159f943 309 if (xa_is_value(page))
aa65c29c 310 continue;
4101196b
MWO
311 /*
312 * A page got inserted in our range? Skip it. We have our
313 * pages locked so they are protected from being removed.
314 * If we see a page whose index is higher than ours, it
315 * means our page has been removed, which shouldn't be
316 * possible because we're holding the PageLock.
317 */
318 if (page != pvec->pages[i]) {
319 VM_BUG_ON_PAGE(page->index > pvec->pages[i]->index,
320 page);
321 continue;
322 }
323
324 WARN_ON_ONCE(!PageLocked(page));
325
326 if (page->index == xas.xa_index)
aa65c29c 327 page->mapping = NULL;
4101196b
MWO
328 /* Leave page->index set: truncation lookup relies on it */
329
330 /*
331 * Move to the next page in the vector if this is a regular
332 * page or the index is of the last sub-page of this compound
333 * page.
334 */
335 if (page->index + compound_nr(page) - 1 == xas.xa_index)
aa65c29c 336 i++;
ef8e5717 337 xas_store(&xas, NULL);
aa65c29c
JK
338 total_pages++;
339 }
340 mapping->nrpages -= total_pages;
341}
342
343void delete_from_page_cache_batch(struct address_space *mapping,
344 struct pagevec *pvec)
345{
346 int i;
347 unsigned long flags;
348
349 if (!pagevec_count(pvec))
350 return;
351
b93b0163 352 xa_lock_irqsave(&mapping->i_pages, flags);
aa65c29c
JK
353 for (i = 0; i < pagevec_count(pvec); i++) {
354 trace_mm_filemap_delete_from_page_cache(pvec->pages[i]);
355
356 unaccount_page_cache_page(mapping, pvec->pages[i]);
357 }
ef8e5717 358 page_cache_delete_batch(mapping, pvec);
b93b0163 359 xa_unlock_irqrestore(&mapping->i_pages, flags);
aa65c29c
JK
360
361 for (i = 0; i < pagevec_count(pvec); i++)
362 page_cache_free_page(mapping, pvec->pages[i]);
363}
364
d72d9e2a 365int filemap_check_errors(struct address_space *mapping)
865ffef3
DM
366{
367 int ret = 0;
368 /* Check for outstanding write errors */
7fcbbaf1
JA
369 if (test_bit(AS_ENOSPC, &mapping->flags) &&
370 test_and_clear_bit(AS_ENOSPC, &mapping->flags))
865ffef3 371 ret = -ENOSPC;
7fcbbaf1
JA
372 if (test_bit(AS_EIO, &mapping->flags) &&
373 test_and_clear_bit(AS_EIO, &mapping->flags))
865ffef3
DM
374 ret = -EIO;
375 return ret;
376}
d72d9e2a 377EXPORT_SYMBOL(filemap_check_errors);
865ffef3 378
76341cab
JL
379static int filemap_check_and_keep_errors(struct address_space *mapping)
380{
381 /* Check for outstanding write errors */
382 if (test_bit(AS_EIO, &mapping->flags))
383 return -EIO;
384 if (test_bit(AS_ENOSPC, &mapping->flags))
385 return -ENOSPC;
386 return 0;
387}
388
1da177e4 389/**
485bb99b 390 * __filemap_fdatawrite_range - start writeback on mapping dirty pages in range
67be2dd1
MW
391 * @mapping: address space structure to write
392 * @start: offset in bytes where the range starts
469eb4d0 393 * @end: offset in bytes where the range ends (inclusive)
67be2dd1 394 * @sync_mode: enable synchronous operation
1da177e4 395 *
485bb99b
RD
396 * Start writeback against all of a mapping's dirty pages that lie
397 * within the byte offsets <start, end> inclusive.
398 *
1da177e4 399 * If sync_mode is WB_SYNC_ALL then this is a "data integrity" operation, as
485bb99b 400 * opposed to a regular memory cleansing writeback. The difference between
1da177e4
LT
401 * these two operations is that if a dirty page/buffer is encountered, it must
402 * be waited upon, and not just skipped over.
a862f68a
MR
403 *
404 * Return: %0 on success, negative error code otherwise.
1da177e4 405 */
ebcf28e1
AM
406int __filemap_fdatawrite_range(struct address_space *mapping, loff_t start,
407 loff_t end, int sync_mode)
1da177e4
LT
408{
409 int ret;
410 struct writeback_control wbc = {
411 .sync_mode = sync_mode,
05fe478d 412 .nr_to_write = LONG_MAX,
111ebb6e
OH
413 .range_start = start,
414 .range_end = end,
1da177e4
LT
415 };
416
f56753ac 417 if (!mapping_can_writeback(mapping) ||
c3aab9a0 418 !mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
1da177e4
LT
419 return 0;
420
b16b1deb 421 wbc_attach_fdatawrite_inode(&wbc, mapping->host);
1da177e4 422 ret = do_writepages(mapping, &wbc);
b16b1deb 423 wbc_detach_inode(&wbc);
1da177e4
LT
424 return ret;
425}
426
427static inline int __filemap_fdatawrite(struct address_space *mapping,
428 int sync_mode)
429{
111ebb6e 430 return __filemap_fdatawrite_range(mapping, 0, LLONG_MAX, sync_mode);
1da177e4
LT
431}
432
433int filemap_fdatawrite(struct address_space *mapping)
434{
435 return __filemap_fdatawrite(mapping, WB_SYNC_ALL);
436}
437EXPORT_SYMBOL(filemap_fdatawrite);
438
f4c0a0fd 439int filemap_fdatawrite_range(struct address_space *mapping, loff_t start,
ebcf28e1 440 loff_t end)
1da177e4
LT
441{
442 return __filemap_fdatawrite_range(mapping, start, end, WB_SYNC_ALL);
443}
f4c0a0fd 444EXPORT_SYMBOL(filemap_fdatawrite_range);
1da177e4 445
485bb99b
RD
446/**
447 * filemap_flush - mostly a non-blocking flush
448 * @mapping: target address_space
449 *
1da177e4
LT
450 * This is a mostly non-blocking flush. Not suitable for data-integrity
451 * purposes - I/O may not be started against all dirty pages.
a862f68a
MR
452 *
453 * Return: %0 on success, negative error code otherwise.
1da177e4
LT
454 */
455int filemap_flush(struct address_space *mapping)
456{
457 return __filemap_fdatawrite(mapping, WB_SYNC_NONE);
458}
459EXPORT_SYMBOL(filemap_flush);
460
7fc9e472
GR
461/**
462 * filemap_range_has_page - check if a page exists in range.
463 * @mapping: address space within which to check
464 * @start_byte: offset in bytes where the range starts
465 * @end_byte: offset in bytes where the range ends (inclusive)
466 *
467 * Find at least one page in the range supplied, usually used to check if
468 * direct writing in this range will trigger a writeback.
a862f68a
MR
469 *
470 * Return: %true if at least one page exists in the specified range,
471 * %false otherwise.
7fc9e472
GR
472 */
473bool filemap_range_has_page(struct address_space *mapping,
474 loff_t start_byte, loff_t end_byte)
475{
f7b68046 476 struct page *page;
8fa8e538
MW
477 XA_STATE(xas, &mapping->i_pages, start_byte >> PAGE_SHIFT);
478 pgoff_t max = end_byte >> PAGE_SHIFT;
7fc9e472
GR
479
480 if (end_byte < start_byte)
481 return false;
482
8fa8e538
MW
483 rcu_read_lock();
484 for (;;) {
485 page = xas_find(&xas, max);
486 if (xas_retry(&xas, page))
487 continue;
488 /* Shadow entries don't count */
489 if (xa_is_value(page))
490 continue;
491 /*
492 * We don't need to try to pin this page; we're about to
493 * release the RCU lock anyway. It is enough to know that
494 * there was a page here recently.
495 */
496 break;
497 }
498 rcu_read_unlock();
7fc9e472 499
8fa8e538 500 return page != NULL;
7fc9e472
GR
501}
502EXPORT_SYMBOL(filemap_range_has_page);
503
5e8fcc1a 504static void __filemap_fdatawait_range(struct address_space *mapping,
aa750fd7 505 loff_t start_byte, loff_t end_byte)
1da177e4 506{
09cbfeaf
KS
507 pgoff_t index = start_byte >> PAGE_SHIFT;
508 pgoff_t end = end_byte >> PAGE_SHIFT;
1da177e4
LT
509 struct pagevec pvec;
510 int nr_pages;
1da177e4 511
94004ed7 512 if (end_byte < start_byte)
5e8fcc1a 513 return;
1da177e4 514
86679820 515 pagevec_init(&pvec);
312e9d2f 516 while (index <= end) {
1da177e4
LT
517 unsigned i;
518
312e9d2f 519 nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index,
67fd707f 520 end, PAGECACHE_TAG_WRITEBACK);
312e9d2f
JK
521 if (!nr_pages)
522 break;
523
1da177e4
LT
524 for (i = 0; i < nr_pages; i++) {
525 struct page *page = pvec.pages[i];
526
1da177e4 527 wait_on_page_writeback(page);
5e8fcc1a 528 ClearPageError(page);
1da177e4
LT
529 }
530 pagevec_release(&pvec);
531 cond_resched();
532 }
aa750fd7
JN
533}
534
535/**
536 * filemap_fdatawait_range - wait for writeback to complete
537 * @mapping: address space structure to wait for
538 * @start_byte: offset in bytes where the range starts
539 * @end_byte: offset in bytes where the range ends (inclusive)
540 *
541 * Walk the list of under-writeback pages of the given address space
542 * in the given range and wait for all of them. Check error status of
543 * the address space and return it.
544 *
545 * Since the error status of the address space is cleared by this function,
546 * callers are responsible for checking the return value and handling and/or
547 * reporting the error.
a862f68a
MR
548 *
549 * Return: error status of the address space.
aa750fd7
JN
550 */
551int filemap_fdatawait_range(struct address_space *mapping, loff_t start_byte,
552 loff_t end_byte)
553{
5e8fcc1a
JL
554 __filemap_fdatawait_range(mapping, start_byte, end_byte);
555 return filemap_check_errors(mapping);
1da177e4 556}
d3bccb6f
JK
557EXPORT_SYMBOL(filemap_fdatawait_range);
558
aa0bfcd9
RZ
559/**
560 * filemap_fdatawait_range_keep_errors - wait for writeback to complete
561 * @mapping: address space structure to wait for
562 * @start_byte: offset in bytes where the range starts
563 * @end_byte: offset in bytes where the range ends (inclusive)
564 *
565 * Walk the list of under-writeback pages of the given address space in the
566 * given range and wait for all of them. Unlike filemap_fdatawait_range(),
567 * this function does not clear error status of the address space.
568 *
569 * Use this function if callers don't handle errors themselves. Expected
570 * call sites are system-wide / filesystem-wide data flushers: e.g. sync(2),
571 * fsfreeze(8)
572 */
573int filemap_fdatawait_range_keep_errors(struct address_space *mapping,
574 loff_t start_byte, loff_t end_byte)
575{
576 __filemap_fdatawait_range(mapping, start_byte, end_byte);
577 return filemap_check_and_keep_errors(mapping);
578}
579EXPORT_SYMBOL(filemap_fdatawait_range_keep_errors);
580
a823e458
JL
581/**
582 * file_fdatawait_range - wait for writeback to complete
583 * @file: file pointing to address space structure to wait for
584 * @start_byte: offset in bytes where the range starts
585 * @end_byte: offset in bytes where the range ends (inclusive)
586 *
587 * Walk the list of under-writeback pages of the address space that file
588 * refers to, in the given range and wait for all of them. Check error
589 * status of the address space vs. the file->f_wb_err cursor and return it.
590 *
591 * Since the error status of the file is advanced by this function,
592 * callers are responsible for checking the return value and handling and/or
593 * reporting the error.
a862f68a
MR
594 *
595 * Return: error status of the address space vs. the file->f_wb_err cursor.
a823e458
JL
596 */
597int file_fdatawait_range(struct file *file, loff_t start_byte, loff_t end_byte)
598{
599 struct address_space *mapping = file->f_mapping;
600
601 __filemap_fdatawait_range(mapping, start_byte, end_byte);
602 return file_check_and_advance_wb_err(file);
603}
604EXPORT_SYMBOL(file_fdatawait_range);
d3bccb6f 605
aa750fd7
JN
606/**
607 * filemap_fdatawait_keep_errors - wait for writeback without clearing errors
608 * @mapping: address space structure to wait for
609 *
610 * Walk the list of under-writeback pages of the given address space
611 * and wait for all of them. Unlike filemap_fdatawait(), this function
612 * does not clear error status of the address space.
613 *
614 * Use this function if callers don't handle errors themselves. Expected
615 * call sites are system-wide / filesystem-wide data flushers: e.g. sync(2),
616 * fsfreeze(8)
a862f68a
MR
617 *
618 * Return: error status of the address space.
aa750fd7 619 */
76341cab 620int filemap_fdatawait_keep_errors(struct address_space *mapping)
aa750fd7 621{
ffb959bb 622 __filemap_fdatawait_range(mapping, 0, LLONG_MAX);
76341cab 623 return filemap_check_and_keep_errors(mapping);
aa750fd7 624}
76341cab 625EXPORT_SYMBOL(filemap_fdatawait_keep_errors);
aa750fd7 626
875d91b1 627/* Returns true if writeback might be needed or already in progress. */
9326c9b2 628static bool mapping_needs_writeback(struct address_space *mapping)
1da177e4 629{
875d91b1
KK
630 if (dax_mapping(mapping))
631 return mapping->nrexceptional;
632
633 return mapping->nrpages;
1da177e4 634}
1da177e4 635
485bb99b
RD
636/**
637 * filemap_write_and_wait_range - write out & wait on a file range
638 * @mapping: the address_space for the pages
639 * @lstart: offset in bytes where the range starts
640 * @lend: offset in bytes where the range ends (inclusive)
641 *
469eb4d0
AM
642 * Write out and wait upon file offsets lstart->lend, inclusive.
643 *
0e056eb5 644 * Note that @lend is inclusive (describes the last byte to be written) so
469eb4d0 645 * that this function can be used to write to the very end-of-file (end = -1).
a862f68a
MR
646 *
647 * Return: error status of the address space.
469eb4d0 648 */
1da177e4
LT
649int filemap_write_and_wait_range(struct address_space *mapping,
650 loff_t lstart, loff_t lend)
651{
28fd1298 652 int err = 0;
1da177e4 653
9326c9b2 654 if (mapping_needs_writeback(mapping)) {
28fd1298
OH
655 err = __filemap_fdatawrite_range(mapping, lstart, lend,
656 WB_SYNC_ALL);
ddf8f376
IW
657 /*
658 * Even if the above returned error, the pages may be
659 * written partially (e.g. -ENOSPC), so we wait for it.
660 * But the -EIO is special case, it may indicate the worst
661 * thing (e.g. bug) happened, so we avoid waiting for it.
662 */
28fd1298 663 if (err != -EIO) {
94004ed7
CH
664 int err2 = filemap_fdatawait_range(mapping,
665 lstart, lend);
28fd1298
OH
666 if (!err)
667 err = err2;
cbeaf951
JL
668 } else {
669 /* Clear any previously stored errors */
670 filemap_check_errors(mapping);
28fd1298 671 }
865ffef3
DM
672 } else {
673 err = filemap_check_errors(mapping);
1da177e4 674 }
28fd1298 675 return err;
1da177e4 676}
f6995585 677EXPORT_SYMBOL(filemap_write_and_wait_range);
1da177e4 678
5660e13d
JL
679void __filemap_set_wb_err(struct address_space *mapping, int err)
680{
3acdfd28 681 errseq_t eseq = errseq_set(&mapping->wb_err, err);
5660e13d
JL
682
683 trace_filemap_set_wb_err(mapping, eseq);
684}
685EXPORT_SYMBOL(__filemap_set_wb_err);
686
687/**
688 * file_check_and_advance_wb_err - report wb error (if any) that was previously
689 * and advance wb_err to current one
690 * @file: struct file on which the error is being reported
691 *
692 * When userland calls fsync (or something like nfsd does the equivalent), we
693 * want to report any writeback errors that occurred since the last fsync (or
694 * since the file was opened if there haven't been any).
695 *
696 * Grab the wb_err from the mapping. If it matches what we have in the file,
697 * then just quickly return 0. The file is all caught up.
698 *
699 * If it doesn't match, then take the mapping value, set the "seen" flag in
700 * it and try to swap it into place. If it works, or another task beat us
701 * to it with the new value, then update the f_wb_err and return the error
702 * portion. The error at this point must be reported via proper channels
703 * (a'la fsync, or NFS COMMIT operation, etc.).
704 *
705 * While we handle mapping->wb_err with atomic operations, the f_wb_err
706 * value is protected by the f_lock since we must ensure that it reflects
707 * the latest value swapped in for this file descriptor.
a862f68a
MR
708 *
709 * Return: %0 on success, negative error code otherwise.
5660e13d
JL
710 */
711int file_check_and_advance_wb_err(struct file *file)
712{
713 int err = 0;
714 errseq_t old = READ_ONCE(file->f_wb_err);
715 struct address_space *mapping = file->f_mapping;
716
717 /* Locklessly handle the common case where nothing has changed */
718 if (errseq_check(&mapping->wb_err, old)) {
719 /* Something changed, must use slow path */
720 spin_lock(&file->f_lock);
721 old = file->f_wb_err;
722 err = errseq_check_and_advance(&mapping->wb_err,
723 &file->f_wb_err);
724 trace_file_check_and_advance_wb_err(file, old);
725 spin_unlock(&file->f_lock);
726 }
f4e222c5
JL
727
728 /*
729 * We're mostly using this function as a drop in replacement for
730 * filemap_check_errors. Clear AS_EIO/AS_ENOSPC to emulate the effect
731 * that the legacy code would have had on these flags.
732 */
733 clear_bit(AS_EIO, &mapping->flags);
734 clear_bit(AS_ENOSPC, &mapping->flags);
5660e13d
JL
735 return err;
736}
737EXPORT_SYMBOL(file_check_and_advance_wb_err);
738
739/**
740 * file_write_and_wait_range - write out & wait on a file range
741 * @file: file pointing to address_space with pages
742 * @lstart: offset in bytes where the range starts
743 * @lend: offset in bytes where the range ends (inclusive)
744 *
745 * Write out and wait upon file offsets lstart->lend, inclusive.
746 *
747 * Note that @lend is inclusive (describes the last byte to be written) so
748 * that this function can be used to write to the very end-of-file (end = -1).
749 *
750 * After writing out and waiting on the data, we check and advance the
751 * f_wb_err cursor to the latest value, and return any errors detected there.
a862f68a
MR
752 *
753 * Return: %0 on success, negative error code otherwise.
5660e13d
JL
754 */
755int file_write_and_wait_range(struct file *file, loff_t lstart, loff_t lend)
756{
757 int err = 0, err2;
758 struct address_space *mapping = file->f_mapping;
759
9326c9b2 760 if (mapping_needs_writeback(mapping)) {
5660e13d
JL
761 err = __filemap_fdatawrite_range(mapping, lstart, lend,
762 WB_SYNC_ALL);
763 /* See comment of filemap_write_and_wait() */
764 if (err != -EIO)
765 __filemap_fdatawait_range(mapping, lstart, lend);
766 }
767 err2 = file_check_and_advance_wb_err(file);
768 if (!err)
769 err = err2;
770 return err;
771}
772EXPORT_SYMBOL(file_write_and_wait_range);
773
ef6a3c63
MS
774/**
775 * replace_page_cache_page - replace a pagecache page with a new one
776 * @old: page to be replaced
777 * @new: page to replace with
778 * @gfp_mask: allocation mode
779 *
780 * This function replaces a page in the pagecache with a new one. On
781 * success it acquires the pagecache reference for the new page and
782 * drops it for the old page. Both the old and new pages must be
783 * locked. This function does not add the new page to the LRU, the
784 * caller must do that.
785 *
74d60958 786 * The remove + add is atomic. This function cannot fail.
a862f68a
MR
787 *
788 * Return: %0
ef6a3c63
MS
789 */
790int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask)
791{
74d60958
MW
792 struct address_space *mapping = old->mapping;
793 void (*freepage)(struct page *) = mapping->a_ops->freepage;
794 pgoff_t offset = old->index;
795 XA_STATE(xas, &mapping->i_pages, offset);
796 unsigned long flags;
ef6a3c63 797
309381fe
SL
798 VM_BUG_ON_PAGE(!PageLocked(old), old);
799 VM_BUG_ON_PAGE(!PageLocked(new), new);
800 VM_BUG_ON_PAGE(new->mapping, new);
ef6a3c63 801
74d60958
MW
802 get_page(new);
803 new->mapping = mapping;
804 new->index = offset;
ef6a3c63 805
0d1c2072
JW
806 mem_cgroup_migrate(old, new);
807
74d60958
MW
808 xas_lock_irqsave(&xas, flags);
809 xas_store(&xas, new);
4165b9b4 810
74d60958
MW
811 old->mapping = NULL;
812 /* hugetlb pages do not participate in page cache accounting. */
813 if (!PageHuge(old))
0d1c2072 814 __dec_lruvec_page_state(old, NR_FILE_PAGES);
74d60958 815 if (!PageHuge(new))
0d1c2072 816 __inc_lruvec_page_state(new, NR_FILE_PAGES);
74d60958 817 if (PageSwapBacked(old))
0d1c2072 818 __dec_lruvec_page_state(old, NR_SHMEM);
74d60958 819 if (PageSwapBacked(new))
0d1c2072 820 __inc_lruvec_page_state(new, NR_SHMEM);
74d60958 821 xas_unlock_irqrestore(&xas, flags);
74d60958
MW
822 if (freepage)
823 freepage(old);
824 put_page(old);
ef6a3c63 825
74d60958 826 return 0;
ef6a3c63
MS
827}
828EXPORT_SYMBOL_GPL(replace_page_cache_page);
829
16c0cc0c 830noinline int __add_to_page_cache_locked(struct page *page,
76cd6173 831 struct address_space *mapping,
c4cf498d 832 pgoff_t offset, gfp_t gfp,
76cd6173 833 void **shadowp)
1da177e4 834{
74d60958 835 XA_STATE(xas, &mapping->i_pages, offset);
00501b53 836 int huge = PageHuge(page);
e286781d
NP
837 int error;
838
309381fe
SL
839 VM_BUG_ON_PAGE(!PageLocked(page), page);
840 VM_BUG_ON_PAGE(PageSwapBacked(page), page);
74d60958 841 mapping_set_update(&xas, mapping);
e286781d 842
09cbfeaf 843 get_page(page);
66a0c8ee
KS
844 page->mapping = mapping;
845 page->index = offset;
846
3fea5a49 847 if (!huge) {
198b62f8 848 error = mem_cgroup_charge(page, current->mm, gfp);
3fea5a49
JW
849 if (error)
850 goto error;
851 }
852
198b62f8
MWO
853 gfp &= GFP_RECLAIM_MASK;
854
74d60958 855 do {
198b62f8
MWO
856 unsigned int order = xa_get_order(xas.xa, xas.xa_index);
857 void *entry, *old = NULL;
858
859 if (order > thp_order(page))
860 xas_split_alloc(&xas, xa_load(xas.xa, xas.xa_index),
861 order, gfp);
74d60958 862 xas_lock_irq(&xas);
198b62f8
MWO
863 xas_for_each_conflict(&xas, entry) {
864 old = entry;
865 if (!xa_is_value(entry)) {
866 xas_set_err(&xas, -EEXIST);
867 goto unlock;
868 }
869 }
870
871 if (old) {
872 if (shadowp)
873 *shadowp = old;
874 /* entry may have been split before we acquired lock */
875 order = xa_get_order(xas.xa, xas.xa_index);
876 if (order > thp_order(page)) {
877 xas_split(&xas, old, order);
878 xas_reset(&xas);
879 }
880 }
881
74d60958
MW
882 xas_store(&xas, page);
883 if (xas_error(&xas))
884 goto unlock;
885
198b62f8 886 if (old)
74d60958 887 mapping->nrexceptional--;
74d60958
MW
888 mapping->nrpages++;
889
890 /* hugetlb pages do not participate in page cache accounting */
891 if (!huge)
0d1c2072 892 __inc_lruvec_page_state(page, NR_FILE_PAGES);
74d60958
MW
893unlock:
894 xas_unlock_irq(&xas);
198b62f8 895 } while (xas_nomem(&xas, gfp));
74d60958 896
3fea5a49
JW
897 if (xas_error(&xas)) {
898 error = xas_error(&xas);
74d60958 899 goto error;
3fea5a49 900 }
4165b9b4 901
66a0c8ee
KS
902 trace_mm_filemap_add_to_page_cache(page);
903 return 0;
74d60958 904error:
66a0c8ee
KS
905 page->mapping = NULL;
906 /* Leave page->index set: truncation relies upon it */
09cbfeaf 907 put_page(page);
3fea5a49 908 return error;
1da177e4 909}
cfcbfb13 910ALLOW_ERROR_INJECTION(__add_to_page_cache_locked, ERRNO);
a528910e
JW
911
912/**
913 * add_to_page_cache_locked - add a locked page to the pagecache
914 * @page: page to add
915 * @mapping: the page's address_space
916 * @offset: page index
917 * @gfp_mask: page allocation mode
918 *
919 * This function is used to add a page to the pagecache. It must be locked.
920 * This function does not add the page to the LRU. The caller must do that.
a862f68a
MR
921 *
922 * Return: %0 on success, negative error code otherwise.
a528910e
JW
923 */
924int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
925 pgoff_t offset, gfp_t gfp_mask)
926{
927 return __add_to_page_cache_locked(page, mapping, offset,
928 gfp_mask, NULL);
929}
e286781d 930EXPORT_SYMBOL(add_to_page_cache_locked);
1da177e4
LT
931
932int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
6daa0e28 933 pgoff_t offset, gfp_t gfp_mask)
1da177e4 934{
a528910e 935 void *shadow = NULL;
4f98a2fe
RR
936 int ret;
937
48c935ad 938 __SetPageLocked(page);
a528910e
JW
939 ret = __add_to_page_cache_locked(page, mapping, offset,
940 gfp_mask, &shadow);
941 if (unlikely(ret))
48c935ad 942 __ClearPageLocked(page);
a528910e
JW
943 else {
944 /*
945 * The page might have been evicted from cache only
946 * recently, in which case it should be activated like
947 * any other repeatedly accessed page.
f0281a00
RR
948 * The exception is pages getting rewritten; evicting other
949 * data from the working set, only to cache data that will
950 * get overwritten with something else, is a waste of memory.
a528910e 951 */
1899ad18
JW
952 WARN_ON_ONCE(PageActive(page));
953 if (!(gfp_mask & __GFP_WRITE) && shadow)
954 workingset_refault(page, shadow);
a528910e
JW
955 lru_cache_add(page);
956 }
1da177e4
LT
957 return ret;
958}
18bc0bbd 959EXPORT_SYMBOL_GPL(add_to_page_cache_lru);
1da177e4 960
44110fe3 961#ifdef CONFIG_NUMA
2ae88149 962struct page *__page_cache_alloc(gfp_t gfp)
44110fe3 963{
c0ff7453
MX
964 int n;
965 struct page *page;
966
44110fe3 967 if (cpuset_do_page_mem_spread()) {
cc9a6c87
MG
968 unsigned int cpuset_mems_cookie;
969 do {
d26914d1 970 cpuset_mems_cookie = read_mems_allowed_begin();
cc9a6c87 971 n = cpuset_mem_spread_node();
96db800f 972 page = __alloc_pages_node(n, gfp, 0);
d26914d1 973 } while (!page && read_mems_allowed_retry(cpuset_mems_cookie));
cc9a6c87 974
c0ff7453 975 return page;
44110fe3 976 }
2ae88149 977 return alloc_pages(gfp, 0);
44110fe3 978}
2ae88149 979EXPORT_SYMBOL(__page_cache_alloc);
44110fe3
PJ
980#endif
981
1da177e4
LT
982/*
983 * In order to wait for pages to become available there must be
984 * waitqueues associated with pages. By using a hash table of
985 * waitqueues where the bucket discipline is to maintain all
986 * waiters on the same queue and wake all when any of the pages
987 * become available, and for the woken contexts to check to be
988 * sure the appropriate page became available, this saves space
989 * at a cost of "thundering herd" phenomena during rare hash
990 * collisions.
991 */
62906027
NP
992#define PAGE_WAIT_TABLE_BITS 8
993#define PAGE_WAIT_TABLE_SIZE (1 << PAGE_WAIT_TABLE_BITS)
994static wait_queue_head_t page_wait_table[PAGE_WAIT_TABLE_SIZE] __cacheline_aligned;
995
996static wait_queue_head_t *page_waitqueue(struct page *page)
1da177e4 997{
62906027 998 return &page_wait_table[hash_ptr(page, PAGE_WAIT_TABLE_BITS)];
1da177e4 999}
1da177e4 1000
62906027 1001void __init pagecache_init(void)
1da177e4 1002{
62906027 1003 int i;
1da177e4 1004
62906027
NP
1005 for (i = 0; i < PAGE_WAIT_TABLE_SIZE; i++)
1006 init_waitqueue_head(&page_wait_table[i]);
1007
1008 page_writeback_init();
1da177e4 1009}
1da177e4 1010
5ef64cc8
LT
1011/*
1012 * The page wait code treats the "wait->flags" somewhat unusually, because
5868ec26 1013 * we have multiple different kinds of waits, not just the usual "exclusive"
5ef64cc8
LT
1014 * one.
1015 *
1016 * We have:
1017 *
1018 * (a) no special bits set:
1019 *
1020 * We're just waiting for the bit to be released, and when a waker
1021 * calls the wakeup function, we set WQ_FLAG_WOKEN and wake it up,
1022 * and remove it from the wait queue.
1023 *
1024 * Simple and straightforward.
1025 *
1026 * (b) WQ_FLAG_EXCLUSIVE:
1027 *
1028 * The waiter is waiting to get the lock, and only one waiter should
1029 * be woken up to avoid any thundering herd behavior. We'll set the
1030 * WQ_FLAG_WOKEN bit, wake it up, and remove it from the wait queue.
1031 *
1032 * This is the traditional exclusive wait.
1033 *
5868ec26 1034 * (c) WQ_FLAG_EXCLUSIVE | WQ_FLAG_CUSTOM:
5ef64cc8
LT
1035 *
1036 * The waiter is waiting to get the bit, and additionally wants the
1037 * lock to be transferred to it for fair lock behavior. If the lock
1038 * cannot be taken, we stop walking the wait queue without waking
1039 * the waiter.
1040 *
1041 * This is the "fair lock handoff" case, and in addition to setting
1042 * WQ_FLAG_WOKEN, we set WQ_FLAG_DONE to let the waiter easily see
1043 * that it now has the lock.
1044 */
ac6424b9 1045static int wake_page_function(wait_queue_entry_t *wait, unsigned mode, int sync, void *arg)
f62e00cc 1046{
5ef64cc8 1047 unsigned int flags;
62906027
NP
1048 struct wait_page_key *key = arg;
1049 struct wait_page_queue *wait_page
1050 = container_of(wait, struct wait_page_queue, wait);
1051
cdc8fcb4 1052 if (!wake_page_match(wait_page, key))
62906027 1053 return 0;
3510ca20 1054
9a1ea439 1055 /*
5ef64cc8
LT
1056 * If it's a lock handoff wait, we get the bit for it, and
1057 * stop walking (and do not wake it up) if we can't.
9a1ea439 1058 */
5ef64cc8
LT
1059 flags = wait->flags;
1060 if (flags & WQ_FLAG_EXCLUSIVE) {
1061 if (test_bit(key->bit_nr, &key->page->flags))
2a9127fc 1062 return -1;
5ef64cc8
LT
1063 if (flags & WQ_FLAG_CUSTOM) {
1064 if (test_and_set_bit(key->bit_nr, &key->page->flags))
1065 return -1;
1066 flags |= WQ_FLAG_DONE;
1067 }
2a9127fc 1068 }
f62e00cc 1069
5ef64cc8
LT
1070 /*
1071 * We are holding the wait-queue lock, but the waiter that
1072 * is waiting for this will be checking the flags without
1073 * any locking.
1074 *
1075 * So update the flags atomically, and wake up the waiter
1076 * afterwards to avoid any races. This store-release pairs
1077 * with the load-acquire in wait_on_page_bit_common().
1078 */
1079 smp_store_release(&wait->flags, flags | WQ_FLAG_WOKEN);
2a9127fc
LT
1080 wake_up_state(wait->private, mode);
1081
1082 /*
1083 * Ok, we have successfully done what we're waiting for,
1084 * and we can unconditionally remove the wait entry.
1085 *
5ef64cc8
LT
1086 * Note that this pairs with the "finish_wait()" in the
1087 * waiter, and has to be the absolute last thing we do.
1088 * After this list_del_init(&wait->entry) the wait entry
2a9127fc
LT
1089 * might be de-allocated and the process might even have
1090 * exited.
2a9127fc 1091 */
c6fe44d9 1092 list_del_init_careful(&wait->entry);
5ef64cc8 1093 return (flags & WQ_FLAG_EXCLUSIVE) != 0;
f62e00cc
KM
1094}
1095
74d81bfa 1096static void wake_up_page_bit(struct page *page, int bit_nr)
cbbce822 1097{
62906027
NP
1098 wait_queue_head_t *q = page_waitqueue(page);
1099 struct wait_page_key key;
1100 unsigned long flags;
11a19c7b 1101 wait_queue_entry_t bookmark;
cbbce822 1102
62906027
NP
1103 key.page = page;
1104 key.bit_nr = bit_nr;
1105 key.page_match = 0;
1106
11a19c7b
TC
1107 bookmark.flags = 0;
1108 bookmark.private = NULL;
1109 bookmark.func = NULL;
1110 INIT_LIST_HEAD(&bookmark.entry);
1111
62906027 1112 spin_lock_irqsave(&q->lock, flags);
11a19c7b
TC
1113 __wake_up_locked_key_bookmark(q, TASK_NORMAL, &key, &bookmark);
1114
1115 while (bookmark.flags & WQ_FLAG_BOOKMARK) {
1116 /*
1117 * Take a breather from holding the lock,
1118 * allow pages that finish wake up asynchronously
1119 * to acquire the lock and remove themselves
1120 * from wait queue
1121 */
1122 spin_unlock_irqrestore(&q->lock, flags);
1123 cpu_relax();
1124 spin_lock_irqsave(&q->lock, flags);
1125 __wake_up_locked_key_bookmark(q, TASK_NORMAL, &key, &bookmark);
1126 }
1127
62906027
NP
1128 /*
1129 * It is possible for other pages to have collided on the waitqueue
1130 * hash, so in that case check for a page match. That prevents a long-
1131 * term waiter
1132 *
1133 * It is still possible to miss a case here, when we woke page waiters
1134 * and removed them from the waitqueue, but there are still other
1135 * page waiters.
1136 */
1137 if (!waitqueue_active(q) || !key.page_match) {
1138 ClearPageWaiters(page);
1139 /*
1140 * It's possible to miss clearing Waiters here, when we woke
1141 * our page waiters, but the hashed waitqueue has waiters for
1142 * other pages on it.
1143 *
1144 * That's okay, it's a rare case. The next waker will clear it.
1145 */
1146 }
1147 spin_unlock_irqrestore(&q->lock, flags);
1148}
74d81bfa
NP
1149
1150static void wake_up_page(struct page *page, int bit)
1151{
1152 if (!PageWaiters(page))
1153 return;
1154 wake_up_page_bit(page, bit);
1155}
62906027 1156
9a1ea439
HD
1157/*
1158 * A choice of three behaviors for wait_on_page_bit_common():
1159 */
1160enum behavior {
1161 EXCLUSIVE, /* Hold ref to page and take the bit when woken, like
1162 * __lock_page() waiting on then setting PG_locked.
1163 */
1164 SHARED, /* Hold ref to page and check the bit when woken, like
1165 * wait_on_page_writeback() waiting on PG_writeback.
1166 */
1167 DROP, /* Drop ref to page before wait, no check when woken,
1168 * like put_and_wait_on_page_locked() on PG_locked.
1169 */
1170};
1171
2a9127fc 1172/*
5ef64cc8
LT
1173 * Attempt to check (or get) the page bit, and mark us done
1174 * if successful.
2a9127fc
LT
1175 */
1176static inline bool trylock_page_bit_common(struct page *page, int bit_nr,
1177 struct wait_queue_entry *wait)
1178{
1179 if (wait->flags & WQ_FLAG_EXCLUSIVE) {
1180 if (test_and_set_bit(bit_nr, &page->flags))
1181 return false;
1182 } else if (test_bit(bit_nr, &page->flags))
1183 return false;
1184
5ef64cc8 1185 wait->flags |= WQ_FLAG_WOKEN | WQ_FLAG_DONE;
2a9127fc
LT
1186 return true;
1187}
1188
5ef64cc8
LT
1189/* How many times do we accept lock stealing from under a waiter? */
1190int sysctl_page_lock_unfairness = 5;
1191
62906027 1192static inline int wait_on_page_bit_common(wait_queue_head_t *q,
9a1ea439 1193 struct page *page, int bit_nr, int state, enum behavior behavior)
62906027 1194{
5ef64cc8 1195 int unfairness = sysctl_page_lock_unfairness;
62906027 1196 struct wait_page_queue wait_page;
ac6424b9 1197 wait_queue_entry_t *wait = &wait_page.wait;
b1d29ba8 1198 bool thrashing = false;
9a1ea439 1199 bool delayacct = false;
eb414681 1200 unsigned long pflags;
62906027 1201
eb414681 1202 if (bit_nr == PG_locked &&
b1d29ba8 1203 !PageUptodate(page) && PageWorkingset(page)) {
9a1ea439 1204 if (!PageSwapBacked(page)) {
eb414681 1205 delayacct_thrashing_start();
9a1ea439
HD
1206 delayacct = true;
1207 }
eb414681 1208 psi_memstall_enter(&pflags);
b1d29ba8
JW
1209 thrashing = true;
1210 }
1211
62906027
NP
1212 init_wait(wait);
1213 wait->func = wake_page_function;
1214 wait_page.page = page;
1215 wait_page.bit_nr = bit_nr;
1216
5ef64cc8
LT
1217repeat:
1218 wait->flags = 0;
1219 if (behavior == EXCLUSIVE) {
1220 wait->flags = WQ_FLAG_EXCLUSIVE;
1221 if (--unfairness < 0)
1222 wait->flags |= WQ_FLAG_CUSTOM;
1223 }
1224
2a9127fc
LT
1225 /*
1226 * Do one last check whether we can get the
1227 * page bit synchronously.
1228 *
1229 * Do the SetPageWaiters() marking before that
1230 * to let any waker we _just_ missed know they
1231 * need to wake us up (otherwise they'll never
1232 * even go to the slow case that looks at the
1233 * page queue), and add ourselves to the wait
1234 * queue if we need to sleep.
1235 *
1236 * This part needs to be done under the queue
1237 * lock to avoid races.
1238 */
1239 spin_lock_irq(&q->lock);
1240 SetPageWaiters(page);
1241 if (!trylock_page_bit_common(page, bit_nr, wait))
1242 __add_wait_queue_entry_tail(q, wait);
1243 spin_unlock_irq(&q->lock);
62906027 1244
2a9127fc
LT
1245 /*
1246 * From now on, all the logic will be based on
5ef64cc8
LT
1247 * the WQ_FLAG_WOKEN and WQ_FLAG_DONE flag, to
1248 * see whether the page bit testing has already
1249 * been done by the wake function.
2a9127fc
LT
1250 *
1251 * We can drop our reference to the page.
1252 */
1253 if (behavior == DROP)
1254 put_page(page);
62906027 1255
5ef64cc8
LT
1256 /*
1257 * Note that until the "finish_wait()", or until
1258 * we see the WQ_FLAG_WOKEN flag, we need to
1259 * be very careful with the 'wait->flags', because
1260 * we may race with a waker that sets them.
1261 */
2a9127fc 1262 for (;;) {
5ef64cc8
LT
1263 unsigned int flags;
1264
62906027
NP
1265 set_current_state(state);
1266
5ef64cc8
LT
1267 /* Loop until we've been woken or interrupted */
1268 flags = smp_load_acquire(&wait->flags);
1269 if (!(flags & WQ_FLAG_WOKEN)) {
1270 if (signal_pending_state(state, current))
1271 break;
1272
1273 io_schedule();
1274 continue;
1275 }
1276
1277 /* If we were non-exclusive, we're done */
1278 if (behavior != EXCLUSIVE)
a8b169af 1279 break;
9a1ea439 1280
5ef64cc8
LT
1281 /* If the waker got the lock for us, we're done */
1282 if (flags & WQ_FLAG_DONE)
9a1ea439 1283 break;
2a9127fc 1284
5ef64cc8
LT
1285 /*
1286 * Otherwise, if we're getting the lock, we need to
1287 * try to get it ourselves.
1288 *
1289 * And if that fails, we'll have to retry this all.
1290 */
1291 if (unlikely(test_and_set_bit(bit_nr, &page->flags)))
1292 goto repeat;
1293
1294 wait->flags |= WQ_FLAG_DONE;
1295 break;
62906027
NP
1296 }
1297
5ef64cc8
LT
1298 /*
1299 * If a signal happened, this 'finish_wait()' may remove the last
1300 * waiter from the wait-queues, but the PageWaiters bit will remain
1301 * set. That's ok. The next wakeup will take care of it, and trying
1302 * to do it here would be difficult and prone to races.
1303 */
62906027
NP
1304 finish_wait(q, wait);
1305
eb414681 1306 if (thrashing) {
9a1ea439 1307 if (delayacct)
eb414681
JW
1308 delayacct_thrashing_end();
1309 psi_memstall_leave(&pflags);
1310 }
b1d29ba8 1311
62906027 1312 /*
5ef64cc8
LT
1313 * NOTE! The wait->flags weren't stable until we've done the
1314 * 'finish_wait()', and we could have exited the loop above due
1315 * to a signal, and had a wakeup event happen after the signal
1316 * test but before the 'finish_wait()'.
1317 *
1318 * So only after the finish_wait() can we reliably determine
1319 * if we got woken up or not, so we can now figure out the final
1320 * return value based on that state without races.
1321 *
1322 * Also note that WQ_FLAG_WOKEN is sufficient for a non-exclusive
1323 * waiter, but an exclusive one requires WQ_FLAG_DONE.
62906027 1324 */
5ef64cc8
LT
1325 if (behavior == EXCLUSIVE)
1326 return wait->flags & WQ_FLAG_DONE ? 0 : -EINTR;
62906027 1327
2a9127fc 1328 return wait->flags & WQ_FLAG_WOKEN ? 0 : -EINTR;
62906027
NP
1329}
1330
1331void wait_on_page_bit(struct page *page, int bit_nr)
1332{
1333 wait_queue_head_t *q = page_waitqueue(page);
9a1ea439 1334 wait_on_page_bit_common(q, page, bit_nr, TASK_UNINTERRUPTIBLE, SHARED);
62906027
NP
1335}
1336EXPORT_SYMBOL(wait_on_page_bit);
1337
1338int wait_on_page_bit_killable(struct page *page, int bit_nr)
1339{
1340 wait_queue_head_t *q = page_waitqueue(page);
9a1ea439 1341 return wait_on_page_bit_common(q, page, bit_nr, TASK_KILLABLE, SHARED);
cbbce822 1342}
4343d008 1343EXPORT_SYMBOL(wait_on_page_bit_killable);
cbbce822 1344
dd3e6d50
JA
1345static int __wait_on_page_locked_async(struct page *page,
1346 struct wait_page_queue *wait, bool set)
1347{
1348 struct wait_queue_head *q = page_waitqueue(page);
1349 int ret = 0;
1350
1351 wait->page = page;
1352 wait->bit_nr = PG_locked;
1353
1354 spin_lock_irq(&q->lock);
1355 __add_wait_queue_entry_tail(q, &wait->wait);
1356 SetPageWaiters(page);
1357 if (set)
1358 ret = !trylock_page(page);
1359 else
1360 ret = PageLocked(page);
1361 /*
1362 * If we were succesful now, we know we're still on the
1363 * waitqueue as we're still under the lock. This means it's
1364 * safe to remove and return success, we know the callback
1365 * isn't going to trigger.
1366 */
1367 if (!ret)
1368 __remove_wait_queue(q, &wait->wait);
1369 else
1370 ret = -EIOCBQUEUED;
1371 spin_unlock_irq(&q->lock);
1372 return ret;
1373}
1374
1a0a7853
JA
1375static int wait_on_page_locked_async(struct page *page,
1376 struct wait_page_queue *wait)
1377{
1378 if (!PageLocked(page))
1379 return 0;
1380 return __wait_on_page_locked_async(compound_head(page), wait, false);
1381}
1382
9a1ea439
HD
1383/**
1384 * put_and_wait_on_page_locked - Drop a reference and wait for it to be unlocked
1385 * @page: The page to wait for.
1386 *
1387 * The caller should hold a reference on @page. They expect the page to
1388 * become unlocked relatively soon, but do not wish to hold up migration
1389 * (for example) by holding the reference while waiting for the page to
1390 * come unlocked. After this function returns, the caller should not
1391 * dereference @page.
1392 */
1393void put_and_wait_on_page_locked(struct page *page)
1394{
1395 wait_queue_head_t *q;
1396
1397 page = compound_head(page);
1398 q = page_waitqueue(page);
1399 wait_on_page_bit_common(q, page, PG_locked, TASK_UNINTERRUPTIBLE, DROP);
1400}
1401
385e1ca5
DH
1402/**
1403 * add_page_wait_queue - Add an arbitrary waiter to a page's wait queue
697f619f
RD
1404 * @page: Page defining the wait queue of interest
1405 * @waiter: Waiter to add to the queue
385e1ca5
DH
1406 *
1407 * Add an arbitrary @waiter to the wait queue for the nominated @page.
1408 */
ac6424b9 1409void add_page_wait_queue(struct page *page, wait_queue_entry_t *waiter)
385e1ca5
DH
1410{
1411 wait_queue_head_t *q = page_waitqueue(page);
1412 unsigned long flags;
1413
1414 spin_lock_irqsave(&q->lock, flags);
9c3a815f 1415 __add_wait_queue_entry_tail(q, waiter);
62906027 1416 SetPageWaiters(page);
385e1ca5
DH
1417 spin_unlock_irqrestore(&q->lock, flags);
1418}
1419EXPORT_SYMBOL_GPL(add_page_wait_queue);
1420
b91e1302
LT
1421#ifndef clear_bit_unlock_is_negative_byte
1422
1423/*
1424 * PG_waiters is the high bit in the same byte as PG_lock.
1425 *
1426 * On x86 (and on many other architectures), we can clear PG_lock and
1427 * test the sign bit at the same time. But if the architecture does
1428 * not support that special operation, we just do this all by hand
1429 * instead.
1430 *
1431 * The read of PG_waiters has to be after (or concurrently with) PG_locked
ffceeb62 1432 * being cleared, but a memory barrier should be unnecessary since it is
b91e1302
LT
1433 * in the same byte as PG_locked.
1434 */
1435static inline bool clear_bit_unlock_is_negative_byte(long nr, volatile void *mem)
1436{
1437 clear_bit_unlock(nr, mem);
1438 /* smp_mb__after_atomic(); */
98473f9f 1439 return test_bit(PG_waiters, mem);
b91e1302
LT
1440}
1441
1442#endif
1443
1da177e4 1444/**
485bb99b 1445 * unlock_page - unlock a locked page
1da177e4
LT
1446 * @page: the page
1447 *
0e9aa675 1448 * Unlocks the page and wakes up sleepers in wait_on_page_locked().
1da177e4 1449 * Also wakes sleepers in wait_on_page_writeback() because the wakeup
da3dae54 1450 * mechanism between PageLocked pages and PageWriteback pages is shared.
1da177e4
LT
1451 * But that's OK - sleepers in wait_on_page_writeback() just go back to sleep.
1452 *
b91e1302
LT
1453 * Note that this depends on PG_waiters being the sign bit in the byte
1454 * that contains PG_locked - thus the BUILD_BUG_ON(). That allows us to
1455 * clear the PG_locked bit and test PG_waiters at the same time fairly
1456 * portably (architectures that do LL/SC can test any bit, while x86 can
1457 * test the sign bit).
1da177e4 1458 */
920c7a5d 1459void unlock_page(struct page *page)
1da177e4 1460{
b91e1302 1461 BUILD_BUG_ON(PG_waiters != 7);
48c935ad 1462 page = compound_head(page);
309381fe 1463 VM_BUG_ON_PAGE(!PageLocked(page), page);
b91e1302
LT
1464 if (clear_bit_unlock_is_negative_byte(PG_locked, &page->flags))
1465 wake_up_page_bit(page, PG_locked);
1da177e4
LT
1466}
1467EXPORT_SYMBOL(unlock_page);
1468
485bb99b
RD
1469/**
1470 * end_page_writeback - end writeback against a page
1471 * @page: the page
1da177e4
LT
1472 */
1473void end_page_writeback(struct page *page)
1474{
888cf2db
MG
1475 /*
1476 * TestClearPageReclaim could be used here but it is an atomic
1477 * operation and overkill in this particular case. Failing to
1478 * shuffle a page marked for immediate reclaim is too mild to
1479 * justify taking an atomic operation penalty at the end of
1480 * ever page writeback.
1481 */
1482 if (PageReclaim(page)) {
1483 ClearPageReclaim(page);
ac6aadb2 1484 rotate_reclaimable_page(page);
888cf2db 1485 }
ac6aadb2 1486
073861ed
HD
1487 /*
1488 * Writeback does not hold a page reference of its own, relying
1489 * on truncation to wait for the clearing of PG_writeback.
1490 * But here we must make sure that the page is not freed and
1491 * reused before the wake_up_page().
1492 */
1493 get_page(page);
ac6aadb2
MS
1494 if (!test_clear_page_writeback(page))
1495 BUG();
1496
4e857c58 1497 smp_mb__after_atomic();
1da177e4 1498 wake_up_page(page, PG_writeback);
073861ed 1499 put_page(page);
1da177e4
LT
1500}
1501EXPORT_SYMBOL(end_page_writeback);
1502
57d99845
MW
1503/*
1504 * After completing I/O on a page, call this routine to update the page
1505 * flags appropriately
1506 */
c11f0c0b 1507void page_endio(struct page *page, bool is_write, int err)
57d99845 1508{
c11f0c0b 1509 if (!is_write) {
57d99845
MW
1510 if (!err) {
1511 SetPageUptodate(page);
1512 } else {
1513 ClearPageUptodate(page);
1514 SetPageError(page);
1515 }
1516 unlock_page(page);
abf54548 1517 } else {
57d99845 1518 if (err) {
dd8416c4
MK
1519 struct address_space *mapping;
1520
57d99845 1521 SetPageError(page);
dd8416c4
MK
1522 mapping = page_mapping(page);
1523 if (mapping)
1524 mapping_set_error(mapping, err);
57d99845
MW
1525 }
1526 end_page_writeback(page);
1527 }
1528}
1529EXPORT_SYMBOL_GPL(page_endio);
1530
485bb99b
RD
1531/**
1532 * __lock_page - get a lock on the page, assuming we need to sleep to get it
87066755 1533 * @__page: the page to lock
1da177e4 1534 */
62906027 1535void __lock_page(struct page *__page)
1da177e4 1536{
62906027
NP
1537 struct page *page = compound_head(__page);
1538 wait_queue_head_t *q = page_waitqueue(page);
9a1ea439
HD
1539 wait_on_page_bit_common(q, page, PG_locked, TASK_UNINTERRUPTIBLE,
1540 EXCLUSIVE);
1da177e4
LT
1541}
1542EXPORT_SYMBOL(__lock_page);
1543
62906027 1544int __lock_page_killable(struct page *__page)
2687a356 1545{
62906027
NP
1546 struct page *page = compound_head(__page);
1547 wait_queue_head_t *q = page_waitqueue(page);
9a1ea439
HD
1548 return wait_on_page_bit_common(q, page, PG_locked, TASK_KILLABLE,
1549 EXCLUSIVE);
2687a356 1550}
18bc0bbd 1551EXPORT_SYMBOL_GPL(__lock_page_killable);
2687a356 1552
dd3e6d50
JA
1553int __lock_page_async(struct page *page, struct wait_page_queue *wait)
1554{
1555 return __wait_on_page_locked_async(page, wait, true);
1556}
1557
9a95f3cf
PC
1558/*
1559 * Return values:
c1e8d7c6 1560 * 1 - page is locked; mmap_lock is still held.
9a95f3cf 1561 * 0 - page is not locked.
3e4e28c5 1562 * mmap_lock has been released (mmap_read_unlock(), unless flags had both
9a95f3cf 1563 * FAULT_FLAG_ALLOW_RETRY and FAULT_FLAG_RETRY_NOWAIT set, in
c1e8d7c6 1564 * which case mmap_lock is still held.
9a95f3cf
PC
1565 *
1566 * If neither ALLOW_RETRY nor KILLABLE are set, will always return 1
c1e8d7c6 1567 * with the page locked and the mmap_lock unperturbed.
9a95f3cf 1568 */
d065bd81
ML
1569int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
1570 unsigned int flags)
1571{
4064b982 1572 if (fault_flag_allow_retry_first(flags)) {
37b23e05 1573 /*
c1e8d7c6 1574 * CAUTION! In this case, mmap_lock is not released
37b23e05
KM
1575 * even though return 0.
1576 */
1577 if (flags & FAULT_FLAG_RETRY_NOWAIT)
1578 return 0;
1579
d8ed45c5 1580 mmap_read_unlock(mm);
37b23e05
KM
1581 if (flags & FAULT_FLAG_KILLABLE)
1582 wait_on_page_locked_killable(page);
1583 else
318b275f 1584 wait_on_page_locked(page);
d065bd81 1585 return 0;
37b23e05
KM
1586 } else {
1587 if (flags & FAULT_FLAG_KILLABLE) {
1588 int ret;
1589
1590 ret = __lock_page_killable(page);
1591 if (ret) {
d8ed45c5 1592 mmap_read_unlock(mm);
37b23e05
KM
1593 return 0;
1594 }
1595 } else
1596 __lock_page(page);
1597 return 1;
d065bd81
ML
1598 }
1599}
1600
e7b563bb 1601/**
0d3f9296
MW
1602 * page_cache_next_miss() - Find the next gap in the page cache.
1603 * @mapping: Mapping.
1604 * @index: Index.
1605 * @max_scan: Maximum range to search.
e7b563bb 1606 *
0d3f9296
MW
1607 * Search the range [index, min(index + max_scan - 1, ULONG_MAX)] for the
1608 * gap with the lowest index.
e7b563bb 1609 *
0d3f9296
MW
1610 * This function may be called under the rcu_read_lock. However, this will
1611 * not atomically search a snapshot of the cache at a single point in time.
1612 * For example, if a gap is created at index 5, then subsequently a gap is
1613 * created at index 10, page_cache_next_miss covering both indices may
1614 * return 10 if called under the rcu_read_lock.
e7b563bb 1615 *
0d3f9296
MW
1616 * Return: The index of the gap if found, otherwise an index outside the
1617 * range specified (in which case 'return - index >= max_scan' will be true).
1618 * In the rare case of index wrap-around, 0 will be returned.
e7b563bb 1619 */
0d3f9296 1620pgoff_t page_cache_next_miss(struct address_space *mapping,
e7b563bb
JW
1621 pgoff_t index, unsigned long max_scan)
1622{
0d3f9296 1623 XA_STATE(xas, &mapping->i_pages, index);
e7b563bb 1624
0d3f9296
MW
1625 while (max_scan--) {
1626 void *entry = xas_next(&xas);
1627 if (!entry || xa_is_value(entry))
e7b563bb 1628 break;
0d3f9296 1629 if (xas.xa_index == 0)
e7b563bb
JW
1630 break;
1631 }
1632
0d3f9296 1633 return xas.xa_index;
e7b563bb 1634}
0d3f9296 1635EXPORT_SYMBOL(page_cache_next_miss);
e7b563bb
JW
1636
1637/**
2346a560 1638 * page_cache_prev_miss() - Find the previous gap in the page cache.
0d3f9296
MW
1639 * @mapping: Mapping.
1640 * @index: Index.
1641 * @max_scan: Maximum range to search.
e7b563bb 1642 *
0d3f9296
MW
1643 * Search the range [max(index - max_scan + 1, 0), index] for the
1644 * gap with the highest index.
e7b563bb 1645 *
0d3f9296
MW
1646 * This function may be called under the rcu_read_lock. However, this will
1647 * not atomically search a snapshot of the cache at a single point in time.
1648 * For example, if a gap is created at index 10, then subsequently a gap is
1649 * created at index 5, page_cache_prev_miss() covering both indices may
1650 * return 5 if called under the rcu_read_lock.
e7b563bb 1651 *
0d3f9296
MW
1652 * Return: The index of the gap if found, otherwise an index outside the
1653 * range specified (in which case 'index - return >= max_scan' will be true).
1654 * In the rare case of wrap-around, ULONG_MAX will be returned.
e7b563bb 1655 */
0d3f9296 1656pgoff_t page_cache_prev_miss(struct address_space *mapping,
e7b563bb
JW
1657 pgoff_t index, unsigned long max_scan)
1658{
0d3f9296 1659 XA_STATE(xas, &mapping->i_pages, index);
e7b563bb 1660
0d3f9296
MW
1661 while (max_scan--) {
1662 void *entry = xas_prev(&xas);
1663 if (!entry || xa_is_value(entry))
e7b563bb 1664 break;
0d3f9296 1665 if (xas.xa_index == ULONG_MAX)
e7b563bb
JW
1666 break;
1667 }
1668
0d3f9296 1669 return xas.xa_index;
e7b563bb 1670}
0d3f9296 1671EXPORT_SYMBOL(page_cache_prev_miss);
e7b563bb 1672
485bb99b 1673/**
0cd6144a 1674 * find_get_entry - find and get a page cache entry
485bb99b 1675 * @mapping: the address_space to search
a6de4b48 1676 * @index: The page cache index.
0cd6144a
JW
1677 *
1678 * Looks up the page cache slot at @mapping & @offset. If there is a
a6de4b48 1679 * page cache page, the head page is returned with an increased refcount.
485bb99b 1680 *
139b6a6f
JW
1681 * If the slot holds a shadow entry of a previously evicted page, or a
1682 * swap entry from shmem/tmpfs, it is returned.
0cd6144a 1683 *
a6de4b48 1684 * Return: The head page or shadow entry, %NULL if nothing is found.
1da177e4 1685 */
a6de4b48 1686struct page *find_get_entry(struct address_space *mapping, pgoff_t index)
1da177e4 1687{
a6de4b48 1688 XA_STATE(xas, &mapping->i_pages, index);
4101196b 1689 struct page *page;
1da177e4 1690
a60637c8
NP
1691 rcu_read_lock();
1692repeat:
4c7472c0
MW
1693 xas_reset(&xas);
1694 page = xas_load(&xas);
1695 if (xas_retry(&xas, page))
1696 goto repeat;
1697 /*
1698 * A shadow entry of a recently evicted page, or a swap entry from
1699 * shmem/tmpfs. Return it without attempting to raise page count.
1700 */
1701 if (!page || xa_is_value(page))
1702 goto out;
83929372 1703
4101196b 1704 if (!page_cache_get_speculative(page))
4c7472c0 1705 goto repeat;
83929372 1706
4c7472c0 1707 /*
4101196b 1708 * Has the page moved or been split?
4c7472c0
MW
1709 * This is part of the lockless pagecache protocol. See
1710 * include/linux/pagemap.h for details.
1711 */
1712 if (unlikely(page != xas_reload(&xas))) {
4101196b 1713 put_page(page);
4c7472c0 1714 goto repeat;
a60637c8 1715 }
27d20fdd 1716out:
a60637c8
NP
1717 rcu_read_unlock();
1718
1da177e4
LT
1719 return page;
1720}
1da177e4 1721
0cd6144a 1722/**
63ec1973
MWO
1723 * find_lock_entry - Locate and lock a page cache entry.
1724 * @mapping: The address_space to search.
1725 * @index: The page cache index.
0cd6144a 1726 *
63ec1973
MWO
1727 * Looks up the page at @mapping & @index. If there is a page in the
1728 * cache, the head page is returned locked and with an increased refcount.
0cd6144a 1729 *
139b6a6f
JW
1730 * If the slot holds a shadow entry of a previously evicted page, or a
1731 * swap entry from shmem/tmpfs, it is returned.
0cd6144a 1732 *
63ec1973
MWO
1733 * Context: May sleep.
1734 * Return: The head page or shadow entry, %NULL if nothing is found.
0cd6144a 1735 */
63ec1973 1736struct page *find_lock_entry(struct address_space *mapping, pgoff_t index)
1da177e4
LT
1737{
1738 struct page *page;
1739
1da177e4 1740repeat:
63ec1973 1741 page = find_get_entry(mapping, index);
4c7472c0 1742 if (page && !xa_is_value(page)) {
a60637c8
NP
1743 lock_page(page);
1744 /* Has the page been truncated? */
63ec1973 1745 if (unlikely(page->mapping != mapping)) {
a60637c8 1746 unlock_page(page);
09cbfeaf 1747 put_page(page);
a60637c8 1748 goto repeat;
1da177e4 1749 }
63ec1973 1750 VM_BUG_ON_PAGE(!thp_contains(page, index), page);
1da177e4 1751 }
1da177e4
LT
1752 return page;
1753}
0cd6144a
JW
1754
1755/**
2294b32e
MWO
1756 * pagecache_get_page - Find and get a reference to a page.
1757 * @mapping: The address_space to search.
1758 * @index: The page index.
1759 * @fgp_flags: %FGP flags modify how the page is returned.
1760 * @gfp_mask: Memory allocation flags to use if %FGP_CREAT is specified.
1da177e4 1761 *
2294b32e 1762 * Looks up the page cache entry at @mapping & @index.
0cd6144a 1763 *
2294b32e 1764 * @fgp_flags can be zero or more of these flags:
0e056eb5 1765 *
2294b32e
MWO
1766 * * %FGP_ACCESSED - The page will be marked accessed.
1767 * * %FGP_LOCK - The page is returned locked.
a8cf7f27
MWO
1768 * * %FGP_HEAD - If the page is present and a THP, return the head page
1769 * rather than the exact page specified by the index.
2294b32e
MWO
1770 * * %FGP_CREAT - If no page is present then a new page is allocated using
1771 * @gfp_mask and added to the page cache and the VM's LRU list.
1772 * The page is returned locked and with an increased refcount.
1773 * * %FGP_FOR_MMAP - The caller wants to do its own locking dance if the
1774 * page is already in cache. If the page was allocated, unlock it before
1775 * returning so the caller can do the same dance.
605cad83
YS
1776 * * %FGP_WRITE - The page will be written
1777 * * %FGP_NOFS - __GFP_FS will get cleared in gfp mask
1778 * * %FGP_NOWAIT - Don't get blocked by page lock
1da177e4 1779 *
2294b32e
MWO
1780 * If %FGP_LOCK or %FGP_CREAT are specified then the function may sleep even
1781 * if the %GFP flags specified for %FGP_CREAT are atomic.
1da177e4 1782 *
2457aec6 1783 * If there is a page cache page, it is returned with an increased refcount.
a862f68a 1784 *
2294b32e 1785 * Return: The found page or %NULL otherwise.
1da177e4 1786 */
2294b32e
MWO
1787struct page *pagecache_get_page(struct address_space *mapping, pgoff_t index,
1788 int fgp_flags, gfp_t gfp_mask)
1da177e4 1789{
eb2be189 1790 struct page *page;
2457aec6 1791
1da177e4 1792repeat:
2294b32e 1793 page = find_get_entry(mapping, index);
3159f943 1794 if (xa_is_value(page))
2457aec6
MG
1795 page = NULL;
1796 if (!page)
1797 goto no_page;
1798
1799 if (fgp_flags & FGP_LOCK) {
1800 if (fgp_flags & FGP_NOWAIT) {
1801 if (!trylock_page(page)) {
09cbfeaf 1802 put_page(page);
2457aec6
MG
1803 return NULL;
1804 }
1805 } else {
1806 lock_page(page);
1807 }
1808
1809 /* Has the page been truncated? */
a8cf7f27 1810 if (unlikely(page->mapping != mapping)) {
2457aec6 1811 unlock_page(page);
09cbfeaf 1812 put_page(page);
2457aec6
MG
1813 goto repeat;
1814 }
a8cf7f27 1815 VM_BUG_ON_PAGE(!thp_contains(page, index), page);
2457aec6
MG
1816 }
1817
c16eb000 1818 if (fgp_flags & FGP_ACCESSED)
2457aec6 1819 mark_page_accessed(page);
b9306a79
YS
1820 else if (fgp_flags & FGP_WRITE) {
1821 /* Clear idle flag for buffer write */
1822 if (page_is_idle(page))
1823 clear_page_idle(page);
1824 }
a8cf7f27
MWO
1825 if (!(fgp_flags & FGP_HEAD))
1826 page = find_subpage(page, index);
2457aec6
MG
1827
1828no_page:
1829 if (!page && (fgp_flags & FGP_CREAT)) {
1830 int err;
f56753ac 1831 if ((fgp_flags & FGP_WRITE) && mapping_can_writeback(mapping))
45f87de5
MH
1832 gfp_mask |= __GFP_WRITE;
1833 if (fgp_flags & FGP_NOFS)
1834 gfp_mask &= ~__GFP_FS;
2457aec6 1835
45f87de5 1836 page = __page_cache_alloc(gfp_mask);
eb2be189
NP
1837 if (!page)
1838 return NULL;
2457aec6 1839
a75d4c33 1840 if (WARN_ON_ONCE(!(fgp_flags & (FGP_LOCK | FGP_FOR_MMAP))))
2457aec6
MG
1841 fgp_flags |= FGP_LOCK;
1842
eb39d618 1843 /* Init accessed so avoid atomic mark_page_accessed later */
2457aec6 1844 if (fgp_flags & FGP_ACCESSED)
eb39d618 1845 __SetPageReferenced(page);
2457aec6 1846
2294b32e 1847 err = add_to_page_cache_lru(page, mapping, index, gfp_mask);
eb2be189 1848 if (unlikely(err)) {
09cbfeaf 1849 put_page(page);
eb2be189
NP
1850 page = NULL;
1851 if (err == -EEXIST)
1852 goto repeat;
1da177e4 1853 }
a75d4c33
JB
1854
1855 /*
1856 * add_to_page_cache_lru locks the page, and for mmap we expect
1857 * an unlocked page.
1858 */
1859 if (page && (fgp_flags & FGP_FOR_MMAP))
1860 unlock_page(page);
1da177e4 1861 }
2457aec6 1862
1da177e4
LT
1863 return page;
1864}
2457aec6 1865EXPORT_SYMBOL(pagecache_get_page);
1da177e4 1866
0cd6144a
JW
1867/**
1868 * find_get_entries - gang pagecache lookup
1869 * @mapping: The address_space to search
1870 * @start: The starting page cache index
1871 * @nr_entries: The maximum number of entries
1872 * @entries: Where the resulting entries are placed
1873 * @indices: The cache indices corresponding to the entries in @entries
1874 *
1875 * find_get_entries() will search for and return a group of up to
1876 * @nr_entries entries in the mapping. The entries are placed at
1877 * @entries. find_get_entries() takes a reference against any actual
1878 * pages it returns.
1879 *
1880 * The search returns a group of mapping-contiguous page cache entries
1881 * with ascending indexes. There may be holes in the indices due to
1882 * not-present pages.
1883 *
139b6a6f
JW
1884 * Any shadow entries of evicted pages, or swap entries from
1885 * shmem/tmpfs, are included in the returned array.
0cd6144a 1886 *
71725ed1
HD
1887 * If it finds a Transparent Huge Page, head or tail, find_get_entries()
1888 * stops at that page: the caller is likely to have a better way to handle
1889 * the compound page as a whole, and then skip its extent, than repeatedly
1890 * calling find_get_entries() to return all its tails.
1891 *
a862f68a 1892 * Return: the number of pages and shadow entries which were found.
0cd6144a
JW
1893 */
1894unsigned find_get_entries(struct address_space *mapping,
1895 pgoff_t start, unsigned int nr_entries,
1896 struct page **entries, pgoff_t *indices)
1897{
f280bf09
MW
1898 XA_STATE(xas, &mapping->i_pages, start);
1899 struct page *page;
0cd6144a 1900 unsigned int ret = 0;
0cd6144a
JW
1901
1902 if (!nr_entries)
1903 return 0;
1904
1905 rcu_read_lock();
f280bf09 1906 xas_for_each(&xas, page, ULONG_MAX) {
f280bf09 1907 if (xas_retry(&xas, page))
0cd6144a 1908 continue;
f280bf09
MW
1909 /*
1910 * A shadow entry of a recently evicted page, a swap
1911 * entry from shmem/tmpfs or a DAX entry. Return it
1912 * without attempting to raise page count.
1913 */
1914 if (xa_is_value(page))
0cd6144a 1915 goto export;
83929372 1916
4101196b 1917 if (!page_cache_get_speculative(page))
f280bf09 1918 goto retry;
83929372 1919
4101196b 1920 /* Has the page moved or been split? */
f280bf09
MW
1921 if (unlikely(page != xas_reload(&xas)))
1922 goto put_page;
1923
71725ed1
HD
1924 /*
1925 * Terminate early on finding a THP, to allow the caller to
1926 * handle it all at once; but continue if this is hugetlbfs.
1927 */
1928 if (PageTransHuge(page) && !PageHuge(page)) {
1929 page = find_subpage(page, xas.xa_index);
1930 nr_entries = ret + 1;
1931 }
0cd6144a 1932export:
f280bf09 1933 indices[ret] = xas.xa_index;
0cd6144a
JW
1934 entries[ret] = page;
1935 if (++ret == nr_entries)
1936 break;
f280bf09
MW
1937 continue;
1938put_page:
4101196b 1939 put_page(page);
f280bf09
MW
1940retry:
1941 xas_reset(&xas);
0cd6144a
JW
1942 }
1943 rcu_read_unlock();
1944 return ret;
1945}
1946
1da177e4 1947/**
b947cee4 1948 * find_get_pages_range - gang pagecache lookup
1da177e4
LT
1949 * @mapping: The address_space to search
1950 * @start: The starting page index
b947cee4 1951 * @end: The final page index (inclusive)
1da177e4
LT
1952 * @nr_pages: The maximum number of pages
1953 * @pages: Where the resulting pages are placed
1954 *
b947cee4
JK
1955 * find_get_pages_range() will search for and return a group of up to @nr_pages
1956 * pages in the mapping starting at index @start and up to index @end
1957 * (inclusive). The pages are placed at @pages. find_get_pages_range() takes
1958 * a reference against the returned pages.
1da177e4
LT
1959 *
1960 * The search returns a group of mapping-contiguous pages with ascending
1961 * indexes. There may be holes in the indices due to not-present pages.
d72dc8a2 1962 * We also update @start to index the next page for the traversal.
1da177e4 1963 *
a862f68a
MR
1964 * Return: the number of pages which were found. If this number is
1965 * smaller than @nr_pages, the end of specified range has been
b947cee4 1966 * reached.
1da177e4 1967 */
b947cee4
JK
1968unsigned find_get_pages_range(struct address_space *mapping, pgoff_t *start,
1969 pgoff_t end, unsigned int nr_pages,
1970 struct page **pages)
1da177e4 1971{
fd1b3cee
MW
1972 XA_STATE(xas, &mapping->i_pages, *start);
1973 struct page *page;
0fc9d104
KK
1974 unsigned ret = 0;
1975
1976 if (unlikely(!nr_pages))
1977 return 0;
a60637c8
NP
1978
1979 rcu_read_lock();
fd1b3cee 1980 xas_for_each(&xas, page, end) {
fd1b3cee 1981 if (xas_retry(&xas, page))
a60637c8 1982 continue;
fd1b3cee
MW
1983 /* Skip over shadow, swap and DAX entries */
1984 if (xa_is_value(page))
8079b1c8 1985 continue;
a60637c8 1986
4101196b 1987 if (!page_cache_get_speculative(page))
fd1b3cee 1988 goto retry;
83929372 1989
4101196b 1990 /* Has the page moved or been split? */
fd1b3cee
MW
1991 if (unlikely(page != xas_reload(&xas)))
1992 goto put_page;
1da177e4 1993
4101196b 1994 pages[ret] = find_subpage(page, xas.xa_index);
b947cee4 1995 if (++ret == nr_pages) {
5d3ee42f 1996 *start = xas.xa_index + 1;
b947cee4
JK
1997 goto out;
1998 }
fd1b3cee
MW
1999 continue;
2000put_page:
4101196b 2001 put_page(page);
fd1b3cee
MW
2002retry:
2003 xas_reset(&xas);
a60637c8 2004 }
5b280c0c 2005
b947cee4
JK
2006 /*
2007 * We come here when there is no page beyond @end. We take care to not
2008 * overflow the index @start as it confuses some of the callers. This
fd1b3cee 2009 * breaks the iteration when there is a page at index -1 but that is
b947cee4
JK
2010 * already broken anyway.
2011 */
2012 if (end == (pgoff_t)-1)
2013 *start = (pgoff_t)-1;
2014 else
2015 *start = end + 1;
2016out:
a60637c8 2017 rcu_read_unlock();
d72dc8a2 2018
1da177e4
LT
2019 return ret;
2020}
2021
ebf43500
JA
2022/**
2023 * find_get_pages_contig - gang contiguous pagecache lookup
2024 * @mapping: The address_space to search
2025 * @index: The starting page index
2026 * @nr_pages: The maximum number of pages
2027 * @pages: Where the resulting pages are placed
2028 *
2029 * find_get_pages_contig() works exactly like find_get_pages(), except
2030 * that the returned number of pages are guaranteed to be contiguous.
2031 *
a862f68a 2032 * Return: the number of pages which were found.
ebf43500
JA
2033 */
2034unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t index,
2035 unsigned int nr_pages, struct page **pages)
2036{
3ece58a2
MW
2037 XA_STATE(xas, &mapping->i_pages, index);
2038 struct page *page;
0fc9d104
KK
2039 unsigned int ret = 0;
2040
2041 if (unlikely(!nr_pages))
2042 return 0;
a60637c8
NP
2043
2044 rcu_read_lock();
3ece58a2 2045 for (page = xas_load(&xas); page; page = xas_next(&xas)) {
3ece58a2
MW
2046 if (xas_retry(&xas, page))
2047 continue;
2048 /*
2049 * If the entry has been swapped out, we can stop looking.
2050 * No current caller is looking for DAX entries.
2051 */
2052 if (xa_is_value(page))
8079b1c8 2053 break;
ebf43500 2054
4101196b 2055 if (!page_cache_get_speculative(page))
3ece58a2 2056 goto retry;
83929372 2057
4101196b 2058 /* Has the page moved or been split? */
3ece58a2
MW
2059 if (unlikely(page != xas_reload(&xas)))
2060 goto put_page;
a60637c8 2061
4101196b 2062 pages[ret] = find_subpage(page, xas.xa_index);
0fc9d104
KK
2063 if (++ret == nr_pages)
2064 break;
3ece58a2
MW
2065 continue;
2066put_page:
4101196b 2067 put_page(page);
3ece58a2
MW
2068retry:
2069 xas_reset(&xas);
ebf43500 2070 }
a60637c8
NP
2071 rcu_read_unlock();
2072 return ret;
ebf43500 2073}
ef71c15c 2074EXPORT_SYMBOL(find_get_pages_contig);
ebf43500 2075
485bb99b 2076/**
72b045ae 2077 * find_get_pages_range_tag - find and return pages in given range matching @tag
485bb99b
RD
2078 * @mapping: the address_space to search
2079 * @index: the starting page index
72b045ae 2080 * @end: The final page index (inclusive)
485bb99b
RD
2081 * @tag: the tag index
2082 * @nr_pages: the maximum number of pages
2083 * @pages: where the resulting pages are placed
2084 *
1da177e4 2085 * Like find_get_pages, except we only return pages which are tagged with
485bb99b 2086 * @tag. We update @index to index the next page for the traversal.
a862f68a
MR
2087 *
2088 * Return: the number of pages which were found.
1da177e4 2089 */
72b045ae 2090unsigned find_get_pages_range_tag(struct address_space *mapping, pgoff_t *index,
a6906972 2091 pgoff_t end, xa_mark_t tag, unsigned int nr_pages,
72b045ae 2092 struct page **pages)
1da177e4 2093{
a6906972
MW
2094 XA_STATE(xas, &mapping->i_pages, *index);
2095 struct page *page;
0fc9d104
KK
2096 unsigned ret = 0;
2097
2098 if (unlikely(!nr_pages))
2099 return 0;
a60637c8
NP
2100
2101 rcu_read_lock();
a6906972 2102 xas_for_each_marked(&xas, page, end, tag) {
a6906972 2103 if (xas_retry(&xas, page))
a60637c8 2104 continue;
a6906972
MW
2105 /*
2106 * Shadow entries should never be tagged, but this iteration
2107 * is lockless so there is a window for page reclaim to evict
2108 * a page we saw tagged. Skip over it.
2109 */
2110 if (xa_is_value(page))
139b6a6f 2111 continue;
a60637c8 2112
4101196b 2113 if (!page_cache_get_speculative(page))
a6906972 2114 goto retry;
a60637c8 2115
4101196b 2116 /* Has the page moved or been split? */
a6906972
MW
2117 if (unlikely(page != xas_reload(&xas)))
2118 goto put_page;
a60637c8 2119
4101196b 2120 pages[ret] = find_subpage(page, xas.xa_index);
72b045ae 2121 if (++ret == nr_pages) {
5d3ee42f 2122 *index = xas.xa_index + 1;
72b045ae
JK
2123 goto out;
2124 }
a6906972
MW
2125 continue;
2126put_page:
4101196b 2127 put_page(page);
a6906972
MW
2128retry:
2129 xas_reset(&xas);
a60637c8 2130 }
5b280c0c 2131
72b045ae 2132 /*
a6906972 2133 * We come here when we got to @end. We take care to not overflow the
72b045ae 2134 * index @index as it confuses some of the callers. This breaks the
a6906972
MW
2135 * iteration when there is a page at index -1 but that is already
2136 * broken anyway.
72b045ae
JK
2137 */
2138 if (end == (pgoff_t)-1)
2139 *index = (pgoff_t)-1;
2140 else
2141 *index = end + 1;
2142out:
a60637c8 2143 rcu_read_unlock();
1da177e4 2144
1da177e4
LT
2145 return ret;
2146}
72b045ae 2147EXPORT_SYMBOL(find_get_pages_range_tag);
1da177e4 2148
76d42bd9
WF
2149/*
2150 * CD/DVDs are error prone. When a medium error occurs, the driver may fail
2151 * a _large_ part of the i/o request. Imagine the worst scenario:
2152 *
2153 * ---R__________________________________________B__________
2154 * ^ reading here ^ bad block(assume 4k)
2155 *
2156 * read(R) => miss => readahead(R...B) => media error => frustrating retries
2157 * => failing the whole request => read(R) => read(R+1) =>
2158 * readahead(R+1...B+1) => bang => read(R+2) => read(R+3) =>
2159 * readahead(R+3...B+2) => bang => read(R+3) => read(R+4) =>
2160 * readahead(R+4...B+3) => bang => read(R+4) => read(R+5) => ......
2161 *
2162 * It is going insane. Fix it by quickly scaling down the readahead size.
2163 */
0f8e2db4 2164static void shrink_readahead_size_eio(struct file_ra_state *ra)
76d42bd9 2165{
76d42bd9 2166 ra->ra_pages /= 4;
76d42bd9
WF
2167}
2168
723ef24b
KO
2169static int lock_page_for_iocb(struct kiocb *iocb, struct page *page)
2170{
2171 if (iocb->ki_flags & IOCB_WAITQ)
2172 return lock_page_async(page, iocb->ki_waitq);
2173 else if (iocb->ki_flags & IOCB_NOWAIT)
2174 return trylock_page(page) ? 0 : -EAGAIN;
2175 else
2176 return lock_page_killable(page);
2177}
2178
2179static int generic_file_buffered_read_page_ok(struct kiocb *iocb,
2180 struct iov_iter *iter,
2181 struct page *page)
2182{
2183 struct address_space *mapping = iocb->ki_filp->f_mapping;
2184 struct inode *inode = mapping->host;
2185 struct file_ra_state *ra = &iocb->ki_filp->f_ra;
2186 unsigned int offset = iocb->ki_pos & ~PAGE_MASK;
2187 unsigned int bytes, copied;
2188 loff_t isize, end_offset;
2189
2190 BUG_ON(iocb->ki_pos >> PAGE_SHIFT != page->index);
2191
2192 /*
2193 * i_size must be checked after we know the page is Uptodate.
2194 *
2195 * Checking i_size after the check allows us to calculate
2196 * the correct value for "bytes", which means the zero-filled
2197 * part of the page is not copied back to userspace (unless
2198 * another truncate extends the file - this is desired though).
2199 */
2200
2201 isize = i_size_read(inode);
2202 if (unlikely(iocb->ki_pos >= isize))
2203 return 1;
2204
2205 end_offset = min_t(loff_t, isize, iocb->ki_pos + iter->count);
2206
2207 bytes = min_t(loff_t, end_offset - iocb->ki_pos, PAGE_SIZE - offset);
2208
2209 /* If users can be writing to this page using arbitrary
2210 * virtual addresses, take care about potential aliasing
2211 * before reading the page on the kernel side.
2212 */
2213 if (mapping_writably_mapped(mapping))
2214 flush_dcache_page(page);
2215
2216 /*
2217 * Ok, we have the page, and it's up-to-date, so
2218 * now we can copy it to user space...
2219 */
2220
2221 copied = copy_page_to_iter(page, offset, bytes, iter);
2222
2223 iocb->ki_pos += copied;
2224
2225 /*
2226 * When a sequential read accesses a page several times,
2227 * only mark it as accessed the first time.
2228 */
2229 if (iocb->ki_pos >> PAGE_SHIFT != ra->prev_pos >> PAGE_SHIFT)
2230 mark_page_accessed(page);
2231
2232 ra->prev_pos = iocb->ki_pos;
2233
2234 if (copied < bytes)
2235 return -EFAULT;
2236
2237 return !iov_iter_count(iter) || iocb->ki_pos == isize;
2238}
2239
2240static struct page *
2241generic_file_buffered_read_readpage(struct kiocb *iocb,
2242 struct file *filp,
2243 struct address_space *mapping,
2244 struct page *page)
2245{
2246 struct file_ra_state *ra = &filp->f_ra;
2247 int error;
2248
2249 if (iocb->ki_flags & (IOCB_NOIO | IOCB_NOWAIT)) {
2250 unlock_page(page);
2251 put_page(page);
2252 return ERR_PTR(-EAGAIN);
2253 }
2254
2255 /*
2256 * A previous I/O error may have been due to temporary
2257 * failures, eg. multipath errors.
2258 * PG_error will be set again if readpage fails.
2259 */
2260 ClearPageError(page);
2261 /* Start the actual read. The read will unlock the page. */
2262 error = mapping->a_ops->readpage(filp, page);
2263
2264 if (unlikely(error)) {
2265 put_page(page);
2266 return error != AOP_TRUNCATED_PAGE ? ERR_PTR(error) : NULL;
2267 }
2268
2269 if (!PageUptodate(page)) {
2270 error = lock_page_for_iocb(iocb, page);
2271 if (unlikely(error)) {
2272 put_page(page);
2273 return ERR_PTR(error);
2274 }
2275 if (!PageUptodate(page)) {
2276 if (page->mapping == NULL) {
2277 /*
2278 * invalidate_mapping_pages got it
2279 */
2280 unlock_page(page);
2281 put_page(page);
2282 return NULL;
2283 }
2284 unlock_page(page);
2285 shrink_readahead_size_eio(ra);
2286 put_page(page);
2287 return ERR_PTR(-EIO);
2288 }
2289 unlock_page(page);
2290 }
2291
2292 return page;
2293}
2294
2295static struct page *
2296generic_file_buffered_read_pagenotuptodate(struct kiocb *iocb,
2297 struct file *filp,
2298 struct iov_iter *iter,
2299 struct page *page,
2300 loff_t pos, loff_t count)
2301{
2302 struct address_space *mapping = filp->f_mapping;
2303 struct inode *inode = mapping->host;
2304 int error;
2305
2306 /*
2307 * See comment in do_read_cache_page on why
2308 * wait_on_page_locked is used to avoid unnecessarily
2309 * serialisations and why it's safe.
2310 */
2311 if (iocb->ki_flags & IOCB_WAITQ) {
2312 error = wait_on_page_locked_async(page,
2313 iocb->ki_waitq);
2314 } else {
2315 error = wait_on_page_locked_killable(page);
2316 }
2317 if (unlikely(error)) {
2318 put_page(page);
2319 return ERR_PTR(error);
2320 }
2321 if (PageUptodate(page))
2322 return page;
2323
2324 if (inode->i_blkbits == PAGE_SHIFT ||
2325 !mapping->a_ops->is_partially_uptodate)
2326 goto page_not_up_to_date;
2327 /* pipes can't handle partially uptodate pages */
2328 if (unlikely(iov_iter_is_pipe(iter)))
2329 goto page_not_up_to_date;
2330 if (!trylock_page(page))
2331 goto page_not_up_to_date;
2332 /* Did it get truncated before we got the lock? */
2333 if (!page->mapping)
2334 goto page_not_up_to_date_locked;
2335 if (!mapping->a_ops->is_partially_uptodate(page,
2336 pos & ~PAGE_MASK, count))
2337 goto page_not_up_to_date_locked;
2338 unlock_page(page);
2339 return page;
2340
2341page_not_up_to_date:
2342 /* Get exclusive access to the page ... */
2343 error = lock_page_for_iocb(iocb, page);
2344 if (unlikely(error)) {
2345 put_page(page);
2346 return ERR_PTR(error);
2347 }
2348
2349page_not_up_to_date_locked:
2350 /* Did it get truncated before we got the lock? */
2351 if (!page->mapping) {
2352 unlock_page(page);
2353 put_page(page);
2354 return NULL;
2355 }
2356
2357 /* Did somebody else fill it already? */
2358 if (PageUptodate(page)) {
2359 unlock_page(page);
2360 return page;
2361 }
2362
2363 return generic_file_buffered_read_readpage(iocb, filp, mapping, page);
2364}
2365
2366static struct page *
2367generic_file_buffered_read_no_cached_page(struct kiocb *iocb,
2368 struct iov_iter *iter)
2369{
2370 struct file *filp = iocb->ki_filp;
2371 struct address_space *mapping = filp->f_mapping;
2372 pgoff_t index = iocb->ki_pos >> PAGE_SHIFT;
2373 struct page *page;
2374 int error;
2375
2376 if (iocb->ki_flags & IOCB_NOIO)
2377 return ERR_PTR(-EAGAIN);
2378
2379 /*
2380 * Ok, it wasn't cached, so we need to create a new
2381 * page..
2382 */
2383 page = page_cache_alloc(mapping);
2384 if (!page)
2385 return ERR_PTR(-ENOMEM);
2386
2387 error = add_to_page_cache_lru(page, mapping, index,
2388 mapping_gfp_constraint(mapping, GFP_KERNEL));
2389 if (error) {
2390 put_page(page);
2391 return error != -EEXIST ? ERR_PTR(error) : NULL;
2392 }
2393
2394 return generic_file_buffered_read_readpage(iocb, filp, mapping, page);
2395}
2396
485bb99b 2397/**
47c27bc4
CH
2398 * generic_file_buffered_read - generic file read routine
2399 * @iocb: the iocb to read
6e58e79d
AV
2400 * @iter: data destination
2401 * @written: already copied
485bb99b 2402 *
1da177e4 2403 * This is a generic file read routine, and uses the
485bb99b 2404 * mapping->a_ops->readpage() function for the actual low-level stuff.
1da177e4
LT
2405 *
2406 * This is really ugly. But the goto's actually try to clarify some
2407 * of the logic when it comes to error handling etc.
a862f68a
MR
2408 *
2409 * Return:
2410 * * total number of bytes copied, including those the were already @written
2411 * * negative error code if nothing was copied
1da177e4 2412 */
d85dc2e1 2413ssize_t generic_file_buffered_read(struct kiocb *iocb,
6e58e79d 2414 struct iov_iter *iter, ssize_t written)
1da177e4 2415{
47c27bc4 2416 struct file *filp = iocb->ki_filp;
36e78914 2417 struct address_space *mapping = filp->f_mapping;
1da177e4 2418 struct inode *inode = mapping->host;
36e78914 2419 struct file_ra_state *ra = &filp->f_ra;
723ef24b 2420 size_t orig_count = iov_iter_count(iter);
57f6b96c 2421 pgoff_t last_index;
6e58e79d 2422 int error = 0;
1da177e4 2423
723ef24b 2424 if (unlikely(iocb->ki_pos >= inode->i_sb->s_maxbytes))
d05c5f7b 2425 return 0;
c2a9737f
WF
2426 iov_iter_truncate(iter, inode->i_sb->s_maxbytes);
2427
723ef24b 2428 last_index = (iocb->ki_pos + iter->count + PAGE_SIZE-1) >> PAGE_SHIFT;
1da177e4 2429
13bd6914
JA
2430 /*
2431 * If we've already successfully copied some data, then we
2432 * can no longer safely return -EIOCBQUEUED. Hence mark
2433 * an async read NOWAIT at that point.
2434 */
2435 if (written && (iocb->ki_flags & IOCB_WAITQ))
2436 iocb->ki_flags |= IOCB_NOWAIT;
2437
1da177e4 2438 for (;;) {
723ef24b 2439 pgoff_t index = iocb->ki_pos >> PAGE_SHIFT;
1da177e4 2440 struct page *page;
1da177e4 2441
1da177e4 2442 cond_resched();
1da177e4 2443find_page:
5abf186a
MH
2444 if (fatal_signal_pending(current)) {
2445 error = -EINTR;
2446 goto out;
2447 }
2448
723ef24b
KO
2449 /*
2450 * We can't return -EIOCBQUEUED once we've done some work, so
2451 * ensure we don't block:
2452 */
2453 if ((iocb->ki_flags & IOCB_WAITQ) &&
2454 (written + orig_count - iov_iter_count(iter)))
2455 iocb->ki_flags |= IOCB_NOWAIT;
2456
1da177e4 2457 page = find_get_page(mapping, index);
3ea89ee8 2458 if (!page) {
cdc8fcb4 2459 if (iocb->ki_flags & IOCB_NOIO)
3239d834 2460 goto would_block;
cf914a7d 2461 page_cache_sync_readahead(mapping,
7ff81078 2462 ra, filp,
3ea89ee8
FW
2463 index, last_index - index);
2464 page = find_get_page(mapping, index);
723ef24b
KO
2465 if (unlikely(page == NULL)) {
2466 page = generic_file_buffered_read_no_cached_page(iocb, iter);
2467 if (!page)
2468 goto find_page;
2469 if (IS_ERR(page)) {
2470 error = PTR_ERR(page);
2471 goto out;
2472 }
2473 }
3ea89ee8
FW
2474 }
2475 if (PageReadahead(page)) {
41da51bc
AG
2476 if (iocb->ki_flags & IOCB_NOIO) {
2477 put_page(page);
2478 goto out;
2479 }
cf914a7d 2480 page_cache_async_readahead(mapping,
7ff81078 2481 ra, filp, page,
3ea89ee8 2482 index, last_index - index);
1da177e4 2483 }
8ab22b9a 2484 if (!PageUptodate(page)) {
723ef24b 2485 if (iocb->ki_flags & IOCB_NOWAIT) {
09cbfeaf 2486 put_page(page);
723ef24b 2487 error = -EAGAIN;
a32ea1e1
N
2488 goto out;
2489 }
723ef24b
KO
2490 page = generic_file_buffered_read_pagenotuptodate(iocb,
2491 filp, iter, page, iocb->ki_pos, iter->count);
2492 if (!page)
994fc28c 2493 goto find_page;
723ef24b
KO
2494 if (IS_ERR(page)) {
2495 error = PTR_ERR(page);
2496 goto out;
994fc28c 2497 }
1da177e4
LT
2498 }
2499
723ef24b 2500 error = generic_file_buffered_read_page_ok(iocb, iter, page);
09cbfeaf 2501 put_page(page);
1da177e4 2502
1da177e4 2503 if (error) {
723ef24b 2504 if (error > 0)
6e58e79d 2505 error = 0;
1da177e4
LT
2506 goto out;
2507 }
1da177e4
LT
2508 }
2509
3239d834
MT
2510would_block:
2511 error = -EAGAIN;
1da177e4 2512out:
0c6aa263 2513 file_accessed(filp);
723ef24b
KO
2514 written += orig_count - iov_iter_count(iter);
2515
6e58e79d 2516 return written ? written : error;
1da177e4 2517}
d85dc2e1 2518EXPORT_SYMBOL_GPL(generic_file_buffered_read);
1da177e4 2519
485bb99b 2520/**
6abd2322 2521 * generic_file_read_iter - generic filesystem read routine
485bb99b 2522 * @iocb: kernel I/O control block
6abd2322 2523 * @iter: destination for the data read
485bb99b 2524 *
6abd2322 2525 * This is the "read_iter()" routine for all filesystems
1da177e4 2526 * that can use the page cache directly.
41da51bc
AG
2527 *
2528 * The IOCB_NOWAIT flag in iocb->ki_flags indicates that -EAGAIN shall
2529 * be returned when no data can be read without waiting for I/O requests
2530 * to complete; it doesn't prevent readahead.
2531 *
2532 * The IOCB_NOIO flag in iocb->ki_flags indicates that no new I/O
2533 * requests shall be made for the read or for readahead. When no data
2534 * can be read, -EAGAIN shall be returned. When readahead would be
2535 * triggered, a partial, possibly empty read shall be returned.
2536 *
a862f68a
MR
2537 * Return:
2538 * * number of bytes copied, even for partial reads
41da51bc 2539 * * negative error code (or 0 if IOCB_NOIO) if nothing was read
1da177e4
LT
2540 */
2541ssize_t
ed978a81 2542generic_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
1da177e4 2543{
e7080a43 2544 size_t count = iov_iter_count(iter);
47c27bc4 2545 ssize_t retval = 0;
e7080a43
NS
2546
2547 if (!count)
2548 goto out; /* skip atime */
1da177e4 2549
2ba48ce5 2550 if (iocb->ki_flags & IOCB_DIRECT) {
47c27bc4 2551 struct file *file = iocb->ki_filp;
ed978a81
AV
2552 struct address_space *mapping = file->f_mapping;
2553 struct inode *inode = mapping->host;
543ade1f 2554 loff_t size;
1da177e4 2555
1da177e4 2556 size = i_size_read(inode);
6be96d3a
GR
2557 if (iocb->ki_flags & IOCB_NOWAIT) {
2558 if (filemap_range_has_page(mapping, iocb->ki_pos,
2559 iocb->ki_pos + count - 1))
2560 return -EAGAIN;
2561 } else {
2562 retval = filemap_write_and_wait_range(mapping,
2563 iocb->ki_pos,
2564 iocb->ki_pos + count - 1);
2565 if (retval < 0)
2566 goto out;
2567 }
d8d3d94b 2568
0d5b0cf2
CH
2569 file_accessed(file);
2570
5ecda137 2571 retval = mapping->a_ops->direct_IO(iocb, iter);
c3a69024 2572 if (retval >= 0) {
c64fb5c7 2573 iocb->ki_pos += retval;
5ecda137 2574 count -= retval;
9fe55eea 2575 }
5b47d59a 2576 iov_iter_revert(iter, count - iov_iter_count(iter));
66f998f6 2577
9fe55eea
SW
2578 /*
2579 * Btrfs can have a short DIO read if we encounter
2580 * compressed extents, so if there was an error, or if
2581 * we've already read everything we wanted to, or if
2582 * there was a short read because we hit EOF, go ahead
2583 * and return. Otherwise fallthrough to buffered io for
fbbbad4b
MW
2584 * the rest of the read. Buffered reads will not work for
2585 * DAX files, so don't bother trying.
9fe55eea 2586 */
5ecda137 2587 if (retval < 0 || !count || iocb->ki_pos >= size ||
0d5b0cf2 2588 IS_DAX(inode))
9fe55eea 2589 goto out;
1da177e4
LT
2590 }
2591
47c27bc4 2592 retval = generic_file_buffered_read(iocb, iter, retval);
1da177e4
LT
2593out:
2594 return retval;
2595}
ed978a81 2596EXPORT_SYMBOL(generic_file_read_iter);
1da177e4 2597
1da177e4 2598#ifdef CONFIG_MMU
1da177e4 2599#define MMAP_LOTSAMISS (100)
6b4c9f44 2600/*
c1e8d7c6 2601 * lock_page_maybe_drop_mmap - lock the page, possibly dropping the mmap_lock
6b4c9f44
JB
2602 * @vmf - the vm_fault for this fault.
2603 * @page - the page to lock.
2604 * @fpin - the pointer to the file we may pin (or is already pinned).
2605 *
c1e8d7c6 2606 * This works similar to lock_page_or_retry in that it can drop the mmap_lock.
6b4c9f44 2607 * It differs in that it actually returns the page locked if it returns 1 and 0
c1e8d7c6 2608 * if it couldn't lock the page. If we did have to drop the mmap_lock then fpin
6b4c9f44
JB
2609 * will point to the pinned file and needs to be fput()'ed at a later point.
2610 */
2611static int lock_page_maybe_drop_mmap(struct vm_fault *vmf, struct page *page,
2612 struct file **fpin)
2613{
2614 if (trylock_page(page))
2615 return 1;
2616
8b0f9fa2
LT
2617 /*
2618 * NOTE! This will make us return with VM_FAULT_RETRY, but with
c1e8d7c6 2619 * the mmap_lock still held. That's how FAULT_FLAG_RETRY_NOWAIT
8b0f9fa2
LT
2620 * is supposed to work. We have way too many special cases..
2621 */
6b4c9f44
JB
2622 if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT)
2623 return 0;
2624
2625 *fpin = maybe_unlock_mmap_for_io(vmf, *fpin);
2626 if (vmf->flags & FAULT_FLAG_KILLABLE) {
2627 if (__lock_page_killable(page)) {
2628 /*
c1e8d7c6 2629 * We didn't have the right flags to drop the mmap_lock,
6b4c9f44
JB
2630 * but all fault_handlers only check for fatal signals
2631 * if we return VM_FAULT_RETRY, so we need to drop the
c1e8d7c6 2632 * mmap_lock here and return 0 if we don't have a fpin.
6b4c9f44
JB
2633 */
2634 if (*fpin == NULL)
d8ed45c5 2635 mmap_read_unlock(vmf->vma->vm_mm);
6b4c9f44
JB
2636 return 0;
2637 }
2638 } else
2639 __lock_page(page);
2640 return 1;
2641}
2642
1da177e4 2643
ef00e08e 2644/*
6b4c9f44
JB
2645 * Synchronous readahead happens when we don't even find a page in the page
2646 * cache at all. We don't want to perform IO under the mmap sem, so if we have
2647 * to drop the mmap sem we return the file that was pinned in order for us to do
2648 * that. If we didn't pin a file then we return NULL. The file that is
2649 * returned needs to be fput()'ed when we're done with it.
ef00e08e 2650 */
6b4c9f44 2651static struct file *do_sync_mmap_readahead(struct vm_fault *vmf)
ef00e08e 2652{
2a1180f1
JB
2653 struct file *file = vmf->vma->vm_file;
2654 struct file_ra_state *ra = &file->f_ra;
ef00e08e 2655 struct address_space *mapping = file->f_mapping;
db660d46 2656 DEFINE_READAHEAD(ractl, file, mapping, vmf->pgoff);
6b4c9f44 2657 struct file *fpin = NULL;
e630bfac 2658 unsigned int mmap_miss;
ef00e08e
LT
2659
2660 /* If we don't want any read-ahead, don't bother */
2a1180f1 2661 if (vmf->vma->vm_flags & VM_RAND_READ)
6b4c9f44 2662 return fpin;
275b12bf 2663 if (!ra->ra_pages)
6b4c9f44 2664 return fpin;
ef00e08e 2665
2a1180f1 2666 if (vmf->vma->vm_flags & VM_SEQ_READ) {
6b4c9f44 2667 fpin = maybe_unlock_mmap_for_io(vmf, fpin);
db660d46 2668 page_cache_sync_ra(&ractl, ra, ra->ra_pages);
6b4c9f44 2669 return fpin;
ef00e08e
LT
2670 }
2671
207d04ba 2672 /* Avoid banging the cache line if not needed */
e630bfac
KS
2673 mmap_miss = READ_ONCE(ra->mmap_miss);
2674 if (mmap_miss < MMAP_LOTSAMISS * 10)
2675 WRITE_ONCE(ra->mmap_miss, ++mmap_miss);
ef00e08e
LT
2676
2677 /*
2678 * Do we miss much more than hit in this file? If so,
2679 * stop bothering with read-ahead. It will only hurt.
2680 */
e630bfac 2681 if (mmap_miss > MMAP_LOTSAMISS)
6b4c9f44 2682 return fpin;
ef00e08e 2683
d30a1100
WF
2684 /*
2685 * mmap read-around
2686 */
6b4c9f44 2687 fpin = maybe_unlock_mmap_for_io(vmf, fpin);
db660d46 2688 ra->start = max_t(long, 0, vmf->pgoff - ra->ra_pages / 2);
600e19af
RG
2689 ra->size = ra->ra_pages;
2690 ra->async_size = ra->ra_pages / 4;
db660d46
DH
2691 ractl._index = ra->start;
2692 do_page_cache_ra(&ractl, ra->size, ra->async_size);
6b4c9f44 2693 return fpin;
ef00e08e
LT
2694}
2695
2696/*
2697 * Asynchronous readahead happens when we find the page and PG_readahead,
6b4c9f44 2698 * so we want to possibly extend the readahead further. We return the file that
c1e8d7c6 2699 * was pinned if we have to drop the mmap_lock in order to do IO.
ef00e08e 2700 */
6b4c9f44
JB
2701static struct file *do_async_mmap_readahead(struct vm_fault *vmf,
2702 struct page *page)
ef00e08e 2703{
2a1180f1
JB
2704 struct file *file = vmf->vma->vm_file;
2705 struct file_ra_state *ra = &file->f_ra;
ef00e08e 2706 struct address_space *mapping = file->f_mapping;
6b4c9f44 2707 struct file *fpin = NULL;
e630bfac 2708 unsigned int mmap_miss;
2a1180f1 2709 pgoff_t offset = vmf->pgoff;
ef00e08e
LT
2710
2711 /* If we don't want any read-ahead, don't bother */
5c72feee 2712 if (vmf->vma->vm_flags & VM_RAND_READ || !ra->ra_pages)
6b4c9f44 2713 return fpin;
e630bfac
KS
2714 mmap_miss = READ_ONCE(ra->mmap_miss);
2715 if (mmap_miss)
2716 WRITE_ONCE(ra->mmap_miss, --mmap_miss);
6b4c9f44
JB
2717 if (PageReadahead(page)) {
2718 fpin = maybe_unlock_mmap_for_io(vmf, fpin);
2fad6f5d
WF
2719 page_cache_async_readahead(mapping, ra, file,
2720 page, offset, ra->ra_pages);
6b4c9f44
JB
2721 }
2722 return fpin;
ef00e08e
LT
2723}
2724
485bb99b 2725/**
54cb8821 2726 * filemap_fault - read in file data for page fault handling
d0217ac0 2727 * @vmf: struct vm_fault containing details of the fault
485bb99b 2728 *
54cb8821 2729 * filemap_fault() is invoked via the vma operations vector for a
1da177e4
LT
2730 * mapped memory region to read in file data during a page fault.
2731 *
2732 * The goto's are kind of ugly, but this streamlines the normal case of having
2733 * it in the page cache, and handles the special cases reasonably without
2734 * having a lot of duplicated code.
9a95f3cf 2735 *
c1e8d7c6 2736 * vma->vm_mm->mmap_lock must be held on entry.
9a95f3cf 2737 *
c1e8d7c6 2738 * If our return value has VM_FAULT_RETRY set, it's because the mmap_lock
a4985833 2739 * may be dropped before doing I/O or by lock_page_maybe_drop_mmap().
9a95f3cf 2740 *
c1e8d7c6 2741 * If our return value does not have VM_FAULT_RETRY set, the mmap_lock
9a95f3cf
PC
2742 * has not been released.
2743 *
2744 * We never return with VM_FAULT_RETRY and a bit from VM_FAULT_ERROR set.
a862f68a
MR
2745 *
2746 * Return: bitwise-OR of %VM_FAULT_ codes.
1da177e4 2747 */
2bcd6454 2748vm_fault_t filemap_fault(struct vm_fault *vmf)
1da177e4
LT
2749{
2750 int error;
11bac800 2751 struct file *file = vmf->vma->vm_file;
6b4c9f44 2752 struct file *fpin = NULL;
1da177e4
LT
2753 struct address_space *mapping = file->f_mapping;
2754 struct file_ra_state *ra = &file->f_ra;
2755 struct inode *inode = mapping->host;
ef00e08e 2756 pgoff_t offset = vmf->pgoff;
9ab2594f 2757 pgoff_t max_off;
1da177e4 2758 struct page *page;
2bcd6454 2759 vm_fault_t ret = 0;
1da177e4 2760
9ab2594f
MW
2761 max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
2762 if (unlikely(offset >= max_off))
5307cc1a 2763 return VM_FAULT_SIGBUS;
1da177e4 2764
1da177e4 2765 /*
49426420 2766 * Do we have something in the page cache already?
1da177e4 2767 */
ef00e08e 2768 page = find_get_page(mapping, offset);
45cac65b 2769 if (likely(page) && !(vmf->flags & FAULT_FLAG_TRIED)) {
1da177e4 2770 /*
ef00e08e
LT
2771 * We found the page, so try async readahead before
2772 * waiting for the lock.
1da177e4 2773 */
6b4c9f44 2774 fpin = do_async_mmap_readahead(vmf, page);
45cac65b 2775 } else if (!page) {
ef00e08e 2776 /* No page in the page cache at all */
ef00e08e 2777 count_vm_event(PGMAJFAULT);
2262185c 2778 count_memcg_event_mm(vmf->vma->vm_mm, PGMAJFAULT);
ef00e08e 2779 ret = VM_FAULT_MAJOR;
6b4c9f44 2780 fpin = do_sync_mmap_readahead(vmf);
ef00e08e 2781retry_find:
a75d4c33
JB
2782 page = pagecache_get_page(mapping, offset,
2783 FGP_CREAT|FGP_FOR_MMAP,
2784 vmf->gfp_mask);
6b4c9f44
JB
2785 if (!page) {
2786 if (fpin)
2787 goto out_retry;
e520e932 2788 return VM_FAULT_OOM;
6b4c9f44 2789 }
1da177e4
LT
2790 }
2791
6b4c9f44
JB
2792 if (!lock_page_maybe_drop_mmap(vmf, page, &fpin))
2793 goto out_retry;
b522c94d
ML
2794
2795 /* Did it get truncated? */
585e5a7b 2796 if (unlikely(compound_head(page)->mapping != mapping)) {
b522c94d
ML
2797 unlock_page(page);
2798 put_page(page);
2799 goto retry_find;
2800 }
520e5ba4 2801 VM_BUG_ON_PAGE(page_to_pgoff(page) != offset, page);
b522c94d 2802
1da177e4 2803 /*
d00806b1
NP
2804 * We have a locked page in the page cache, now we need to check
2805 * that it's up-to-date. If not, it is going to be due to an error.
1da177e4 2806 */
d00806b1 2807 if (unlikely(!PageUptodate(page)))
1da177e4
LT
2808 goto page_not_uptodate;
2809
6b4c9f44 2810 /*
c1e8d7c6 2811 * We've made it this far and we had to drop our mmap_lock, now is the
6b4c9f44
JB
2812 * time to return to the upper layer and have it re-find the vma and
2813 * redo the fault.
2814 */
2815 if (fpin) {
2816 unlock_page(page);
2817 goto out_retry;
2818 }
2819
ef00e08e
LT
2820 /*
2821 * Found the page and have a reference on it.
2822 * We must recheck i_size under page lock.
2823 */
9ab2594f
MW
2824 max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
2825 if (unlikely(offset >= max_off)) {
d00806b1 2826 unlock_page(page);
09cbfeaf 2827 put_page(page);
5307cc1a 2828 return VM_FAULT_SIGBUS;
d00806b1
NP
2829 }
2830
d0217ac0 2831 vmf->page = page;
83c54070 2832 return ret | VM_FAULT_LOCKED;
1da177e4 2833
1da177e4 2834page_not_uptodate:
1da177e4
LT
2835 /*
2836 * Umm, take care of errors if the page isn't up-to-date.
2837 * Try to re-read it _once_. We do this synchronously,
2838 * because there really aren't any performance issues here
2839 * and we need to check for errors.
2840 */
1da177e4 2841 ClearPageError(page);
6b4c9f44 2842 fpin = maybe_unlock_mmap_for_io(vmf, fpin);
994fc28c 2843 error = mapping->a_ops->readpage(file, page);
3ef0f720
MS
2844 if (!error) {
2845 wait_on_page_locked(page);
2846 if (!PageUptodate(page))
2847 error = -EIO;
2848 }
6b4c9f44
JB
2849 if (fpin)
2850 goto out_retry;
09cbfeaf 2851 put_page(page);
d00806b1
NP
2852
2853 if (!error || error == AOP_TRUNCATED_PAGE)
994fc28c 2854 goto retry_find;
1da177e4 2855
0f8e2db4 2856 shrink_readahead_size_eio(ra);
d0217ac0 2857 return VM_FAULT_SIGBUS;
6b4c9f44
JB
2858
2859out_retry:
2860 /*
c1e8d7c6 2861 * We dropped the mmap_lock, we need to return to the fault handler to
6b4c9f44
JB
2862 * re-find the vma and come back and find our hopefully still populated
2863 * page.
2864 */
2865 if (page)
2866 put_page(page);
2867 if (fpin)
2868 fput(fpin);
2869 return ret | VM_FAULT_RETRY;
54cb8821
NP
2870}
2871EXPORT_SYMBOL(filemap_fault);
2872
82b0f8c3 2873void filemap_map_pages(struct vm_fault *vmf,
bae473a4 2874 pgoff_t start_pgoff, pgoff_t end_pgoff)
f1820361 2875{
82b0f8c3 2876 struct file *file = vmf->vma->vm_file;
f1820361 2877 struct address_space *mapping = file->f_mapping;
bae473a4 2878 pgoff_t last_pgoff = start_pgoff;
9ab2594f 2879 unsigned long max_idx;
070e807c 2880 XA_STATE(xas, &mapping->i_pages, start_pgoff);
27a83a60 2881 struct page *head, *page;
e630bfac 2882 unsigned int mmap_miss = READ_ONCE(file->f_ra.mmap_miss);
f1820361
KS
2883
2884 rcu_read_lock();
27a83a60
MWO
2885 xas_for_each(&xas, head, end_pgoff) {
2886 if (xas_retry(&xas, head))
070e807c 2887 continue;
27a83a60 2888 if (xa_is_value(head))
2cf938aa 2889 goto next;
f1820361 2890
e0975b2a
MH
2891 /*
2892 * Check for a locked page first, as a speculative
2893 * reference may adversely influence page migration.
2894 */
27a83a60 2895 if (PageLocked(head))
e0975b2a 2896 goto next;
27a83a60 2897 if (!page_cache_get_speculative(head))
070e807c 2898 goto next;
f1820361 2899
4101196b 2900 /* Has the page moved or been split? */
27a83a60 2901 if (unlikely(head != xas_reload(&xas)))
070e807c 2902 goto skip;
27a83a60 2903 page = find_subpage(head, xas.xa_index);
f1820361 2904
27a83a60 2905 if (!PageUptodate(head) ||
f1820361
KS
2906 PageReadahead(page) ||
2907 PageHWPoison(page))
2908 goto skip;
27a83a60 2909 if (!trylock_page(head))
f1820361
KS
2910 goto skip;
2911
27a83a60 2912 if (head->mapping != mapping || !PageUptodate(head))
f1820361
KS
2913 goto unlock;
2914
9ab2594f 2915 max_idx = DIV_ROUND_UP(i_size_read(mapping->host), PAGE_SIZE);
27a83a60 2916 if (xas.xa_index >= max_idx)
f1820361
KS
2917 goto unlock;
2918
e630bfac
KS
2919 if (mmap_miss > 0)
2920 mmap_miss--;
7267ec00 2921
070e807c 2922 vmf->address += (xas.xa_index - last_pgoff) << PAGE_SHIFT;
82b0f8c3 2923 if (vmf->pte)
070e807c
MW
2924 vmf->pte += xas.xa_index - last_pgoff;
2925 last_pgoff = xas.xa_index;
9d82c694 2926 if (alloc_set_pte(vmf, page))
7267ec00 2927 goto unlock;
27a83a60 2928 unlock_page(head);
f1820361
KS
2929 goto next;
2930unlock:
27a83a60 2931 unlock_page(head);
f1820361 2932skip:
27a83a60 2933 put_page(head);
f1820361 2934next:
7267ec00 2935 /* Huge page is mapped? No need to proceed. */
82b0f8c3 2936 if (pmd_trans_huge(*vmf->pmd))
7267ec00 2937 break;
f1820361
KS
2938 }
2939 rcu_read_unlock();
e630bfac 2940 WRITE_ONCE(file->f_ra.mmap_miss, mmap_miss);
f1820361
KS
2941}
2942EXPORT_SYMBOL(filemap_map_pages);
2943
2bcd6454 2944vm_fault_t filemap_page_mkwrite(struct vm_fault *vmf)
4fcf1c62
JK
2945{
2946 struct page *page = vmf->page;
11bac800 2947 struct inode *inode = file_inode(vmf->vma->vm_file);
2bcd6454 2948 vm_fault_t ret = VM_FAULT_LOCKED;
4fcf1c62 2949
14da9200 2950 sb_start_pagefault(inode->i_sb);
11bac800 2951 file_update_time(vmf->vma->vm_file);
4fcf1c62
JK
2952 lock_page(page);
2953 if (page->mapping != inode->i_mapping) {
2954 unlock_page(page);
2955 ret = VM_FAULT_NOPAGE;
2956 goto out;
2957 }
14da9200
JK
2958 /*
2959 * We mark the page dirty already here so that when freeze is in
2960 * progress, we are guaranteed that writeback during freezing will
2961 * see the dirty page and writeprotect it again.
2962 */
2963 set_page_dirty(page);
1d1d1a76 2964 wait_for_stable_page(page);
4fcf1c62 2965out:
14da9200 2966 sb_end_pagefault(inode->i_sb);
4fcf1c62
JK
2967 return ret;
2968}
4fcf1c62 2969
f0f37e2f 2970const struct vm_operations_struct generic_file_vm_ops = {
54cb8821 2971 .fault = filemap_fault,
f1820361 2972 .map_pages = filemap_map_pages,
4fcf1c62 2973 .page_mkwrite = filemap_page_mkwrite,
1da177e4
LT
2974};
2975
2976/* This is used for a general mmap of a disk file */
2977
2978int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
2979{
2980 struct address_space *mapping = file->f_mapping;
2981
2982 if (!mapping->a_ops->readpage)
2983 return -ENOEXEC;
2984 file_accessed(file);
2985 vma->vm_ops = &generic_file_vm_ops;
2986 return 0;
2987}
1da177e4
LT
2988
2989/*
2990 * This is for filesystems which do not implement ->writepage.
2991 */
2992int generic_file_readonly_mmap(struct file *file, struct vm_area_struct *vma)
2993{
2994 if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE))
2995 return -EINVAL;
2996 return generic_file_mmap(file, vma);
2997}
2998#else
4b96a37d 2999vm_fault_t filemap_page_mkwrite(struct vm_fault *vmf)
45397228 3000{
4b96a37d 3001 return VM_FAULT_SIGBUS;
45397228 3002}
1da177e4
LT
3003int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
3004{
3005 return -ENOSYS;
3006}
3007int generic_file_readonly_mmap(struct file * file, struct vm_area_struct * vma)
3008{
3009 return -ENOSYS;
3010}
3011#endif /* CONFIG_MMU */
3012
45397228 3013EXPORT_SYMBOL(filemap_page_mkwrite);
1da177e4
LT
3014EXPORT_SYMBOL(generic_file_mmap);
3015EXPORT_SYMBOL(generic_file_readonly_mmap);
3016
67f9fd91
SL
3017static struct page *wait_on_page_read(struct page *page)
3018{
3019 if (!IS_ERR(page)) {
3020 wait_on_page_locked(page);
3021 if (!PageUptodate(page)) {
09cbfeaf 3022 put_page(page);
67f9fd91
SL
3023 page = ERR_PTR(-EIO);
3024 }
3025 }
3026 return page;
3027}
3028
32b63529 3029static struct page *do_read_cache_page(struct address_space *mapping,
57f6b96c 3030 pgoff_t index,
5e5358e7 3031 int (*filler)(void *, struct page *),
0531b2aa
LT
3032 void *data,
3033 gfp_t gfp)
1da177e4 3034{
eb2be189 3035 struct page *page;
1da177e4
LT
3036 int err;
3037repeat:
3038 page = find_get_page(mapping, index);
3039 if (!page) {
453f85d4 3040 page = __page_cache_alloc(gfp);
eb2be189
NP
3041 if (!page)
3042 return ERR_PTR(-ENOMEM);
e6f67b8c 3043 err = add_to_page_cache_lru(page, mapping, index, gfp);
eb2be189 3044 if (unlikely(err)) {
09cbfeaf 3045 put_page(page);
eb2be189
NP
3046 if (err == -EEXIST)
3047 goto repeat;
22ecdb4f 3048 /* Presumably ENOMEM for xarray node */
1da177e4
LT
3049 return ERR_PTR(err);
3050 }
32b63529
MG
3051
3052filler:
6c45b454
CH
3053 if (filler)
3054 err = filler(data, page);
3055 else
3056 err = mapping->a_ops->readpage(data, page);
3057
1da177e4 3058 if (err < 0) {
09cbfeaf 3059 put_page(page);
32b63529 3060 return ERR_PTR(err);
1da177e4 3061 }
1da177e4 3062
32b63529
MG
3063 page = wait_on_page_read(page);
3064 if (IS_ERR(page))
3065 return page;
3066 goto out;
3067 }
1da177e4
LT
3068 if (PageUptodate(page))
3069 goto out;
3070
ebded027 3071 /*
0e9aa675 3072 * Page is not up to date and may be locked due to one of the following
ebded027
MG
3073 * case a: Page is being filled and the page lock is held
3074 * case b: Read/write error clearing the page uptodate status
3075 * case c: Truncation in progress (page locked)
3076 * case d: Reclaim in progress
3077 *
3078 * Case a, the page will be up to date when the page is unlocked.
3079 * There is no need to serialise on the page lock here as the page
3080 * is pinned so the lock gives no additional protection. Even if the
ce89fddf 3081 * page is truncated, the data is still valid if PageUptodate as
ebded027
MG
3082 * it's a race vs truncate race.
3083 * Case b, the page will not be up to date
3084 * Case c, the page may be truncated but in itself, the data may still
3085 * be valid after IO completes as it's a read vs truncate race. The
3086 * operation must restart if the page is not uptodate on unlock but
3087 * otherwise serialising on page lock to stabilise the mapping gives
3088 * no additional guarantees to the caller as the page lock is
3089 * released before return.
3090 * Case d, similar to truncation. If reclaim holds the page lock, it
3091 * will be a race with remove_mapping that determines if the mapping
3092 * is valid on unlock but otherwise the data is valid and there is
3093 * no need to serialise with page lock.
3094 *
3095 * As the page lock gives no additional guarantee, we optimistically
3096 * wait on the page to be unlocked and check if it's up to date and
3097 * use the page if it is. Otherwise, the page lock is required to
3098 * distinguish between the different cases. The motivation is that we
3099 * avoid spurious serialisations and wakeups when multiple processes
3100 * wait on the same page for IO to complete.
3101 */
3102 wait_on_page_locked(page);
3103 if (PageUptodate(page))
3104 goto out;
3105
3106 /* Distinguish between all the cases under the safety of the lock */
1da177e4 3107 lock_page(page);
ebded027
MG
3108
3109 /* Case c or d, restart the operation */
1da177e4
LT
3110 if (!page->mapping) {
3111 unlock_page(page);
09cbfeaf 3112 put_page(page);
32b63529 3113 goto repeat;
1da177e4 3114 }
ebded027
MG
3115
3116 /* Someone else locked and filled the page in a very small window */
1da177e4
LT
3117 if (PageUptodate(page)) {
3118 unlock_page(page);
3119 goto out;
3120 }
faffdfa0
XT
3121
3122 /*
3123 * A previous I/O error may have been due to temporary
3124 * failures.
3125 * Clear page error before actual read, PG_error will be
3126 * set again if read page fails.
3127 */
3128 ClearPageError(page);
32b63529
MG
3129 goto filler;
3130
c855ff37 3131out:
6fe6900e
NP
3132 mark_page_accessed(page);
3133 return page;
3134}
0531b2aa
LT
3135
3136/**
67f9fd91 3137 * read_cache_page - read into page cache, fill it if needed
0531b2aa
LT
3138 * @mapping: the page's address_space
3139 * @index: the page index
3140 * @filler: function to perform the read
5e5358e7 3141 * @data: first arg to filler(data, page) function, often left as NULL
0531b2aa 3142 *
0531b2aa 3143 * Read into the page cache. If a page already exists, and PageUptodate() is
67f9fd91 3144 * not set, try to fill the page and wait for it to become unlocked.
0531b2aa
LT
3145 *
3146 * If the page does not get brought uptodate, return -EIO.
a862f68a
MR
3147 *
3148 * Return: up to date page on success, ERR_PTR() on failure.
0531b2aa 3149 */
67f9fd91 3150struct page *read_cache_page(struct address_space *mapping,
0531b2aa 3151 pgoff_t index,
5e5358e7 3152 int (*filler)(void *, struct page *),
0531b2aa
LT
3153 void *data)
3154{
d322a8e5
CH
3155 return do_read_cache_page(mapping, index, filler, data,
3156 mapping_gfp_mask(mapping));
0531b2aa 3157}
67f9fd91 3158EXPORT_SYMBOL(read_cache_page);
0531b2aa
LT
3159
3160/**
3161 * read_cache_page_gfp - read into page cache, using specified page allocation flags.
3162 * @mapping: the page's address_space
3163 * @index: the page index
3164 * @gfp: the page allocator flags to use if allocating
3165 *
3166 * This is the same as "read_mapping_page(mapping, index, NULL)", but with
e6f67b8c 3167 * any new page allocations done using the specified allocation flags.
0531b2aa
LT
3168 *
3169 * If the page does not get brought uptodate, return -EIO.
a862f68a
MR
3170 *
3171 * Return: up to date page on success, ERR_PTR() on failure.
0531b2aa
LT
3172 */
3173struct page *read_cache_page_gfp(struct address_space *mapping,
3174 pgoff_t index,
3175 gfp_t gfp)
3176{
6c45b454 3177 return do_read_cache_page(mapping, index, NULL, NULL, gfp);
0531b2aa
LT
3178}
3179EXPORT_SYMBOL(read_cache_page_gfp);
3180
afddba49
NP
3181int pagecache_write_begin(struct file *file, struct address_space *mapping,
3182 loff_t pos, unsigned len, unsigned flags,
3183 struct page **pagep, void **fsdata)
3184{
3185 const struct address_space_operations *aops = mapping->a_ops;
3186
4e02ed4b 3187 return aops->write_begin(file, mapping, pos, len, flags,
afddba49 3188 pagep, fsdata);
afddba49
NP
3189}
3190EXPORT_SYMBOL(pagecache_write_begin);
3191
3192int pagecache_write_end(struct file *file, struct address_space *mapping,
3193 loff_t pos, unsigned len, unsigned copied,
3194 struct page *page, void *fsdata)
3195{
3196 const struct address_space_operations *aops = mapping->a_ops;
afddba49 3197
4e02ed4b 3198 return aops->write_end(file, mapping, pos, len, copied, page, fsdata);
afddba49
NP
3199}
3200EXPORT_SYMBOL(pagecache_write_end);
3201
a92853b6
KK
3202/*
3203 * Warn about a page cache invalidation failure during a direct I/O write.
3204 */
3205void dio_warn_stale_pagecache(struct file *filp)
3206{
3207 static DEFINE_RATELIMIT_STATE(_rs, 86400 * HZ, DEFAULT_RATELIMIT_BURST);
3208 char pathname[128];
3209 struct inode *inode = file_inode(filp);
3210 char *path;
3211
3212 errseq_set(&inode->i_mapping->wb_err, -EIO);
3213 if (__ratelimit(&_rs)) {
3214 path = file_path(filp, pathname, sizeof(pathname));
3215 if (IS_ERR(path))
3216 path = "(unknown)";
3217 pr_crit("Page cache invalidation failure on direct I/O. Possible data corruption due to collision with buffered I/O!\n");
3218 pr_crit("File: %s PID: %d Comm: %.20s\n", path, current->pid,
3219 current->comm);
3220 }
3221}
3222
1da177e4 3223ssize_t
1af5bb49 3224generic_file_direct_write(struct kiocb *iocb, struct iov_iter *from)
1da177e4
LT
3225{
3226 struct file *file = iocb->ki_filp;
3227 struct address_space *mapping = file->f_mapping;
3228 struct inode *inode = mapping->host;
1af5bb49 3229 loff_t pos = iocb->ki_pos;
1da177e4 3230 ssize_t written;
a969e903
CH
3231 size_t write_len;
3232 pgoff_t end;
1da177e4 3233
0c949334 3234 write_len = iov_iter_count(from);
09cbfeaf 3235 end = (pos + write_len - 1) >> PAGE_SHIFT;
a969e903 3236
6be96d3a
GR
3237 if (iocb->ki_flags & IOCB_NOWAIT) {
3238 /* If there are pages to writeback, return */
3239 if (filemap_range_has_page(inode->i_mapping, pos,
35f12f0f 3240 pos + write_len - 1))
6be96d3a
GR
3241 return -EAGAIN;
3242 } else {
3243 written = filemap_write_and_wait_range(mapping, pos,
3244 pos + write_len - 1);
3245 if (written)
3246 goto out;
3247 }
a969e903
CH
3248
3249 /*
3250 * After a write we want buffered reads to be sure to go to disk to get
3251 * the new data. We invalidate clean cached page from the region we're
3252 * about to write. We do this *before* the write so that we can return
6ccfa806 3253 * without clobbering -EIOCBQUEUED from ->direct_IO().
a969e903 3254 */
55635ba7 3255 written = invalidate_inode_pages2_range(mapping,
09cbfeaf 3256 pos >> PAGE_SHIFT, end);
55635ba7
AR
3257 /*
3258 * If a page can not be invalidated, return 0 to fall back
3259 * to buffered write.
3260 */
3261 if (written) {
3262 if (written == -EBUSY)
3263 return 0;
3264 goto out;
a969e903
CH
3265 }
3266
639a93a5 3267 written = mapping->a_ops->direct_IO(iocb, from);
a969e903
CH
3268
3269 /*
3270 * Finally, try again to invalidate clean pages which might have been
3271 * cached by non-direct readahead, or faulted in by get_user_pages()
3272 * if the source of the write was an mmap'ed region of the file
3273 * we're writing. Either one is a pretty crazy thing to do,
3274 * so we don't support it 100%. If this invalidation
3275 * fails, tough, the write still worked...
332391a9
LC
3276 *
3277 * Most of the time we do not need this since dio_complete() will do
3278 * the invalidation for us. However there are some file systems that
3279 * do not end up with dio_complete() being called, so let's not break
80c1fe90
KK
3280 * them by removing it completely.
3281 *
9266a140
KK
3282 * Noticeable example is a blkdev_direct_IO().
3283 *
80c1fe90 3284 * Skip invalidation for async writes or if mapping has no pages.
a969e903 3285 */
9266a140
KK
3286 if (written > 0 && mapping->nrpages &&
3287 invalidate_inode_pages2_range(mapping, pos >> PAGE_SHIFT, end))
3288 dio_warn_stale_pagecache(file);
a969e903 3289
1da177e4 3290 if (written > 0) {
0116651c 3291 pos += written;
639a93a5 3292 write_len -= written;
0116651c
NK
3293 if (pos > i_size_read(inode) && !S_ISBLK(inode->i_mode)) {
3294 i_size_write(inode, pos);
1da177e4
LT
3295 mark_inode_dirty(inode);
3296 }
5cb6c6c7 3297 iocb->ki_pos = pos;
1da177e4 3298 }
639a93a5 3299 iov_iter_revert(from, write_len - iov_iter_count(from));
a969e903 3300out:
1da177e4
LT
3301 return written;
3302}
3303EXPORT_SYMBOL(generic_file_direct_write);
3304
eb2be189
NP
3305/*
3306 * Find or create a page at the given pagecache position. Return the locked
3307 * page. This function is specifically for buffered writes.
3308 */
54566b2c
NP
3309struct page *grab_cache_page_write_begin(struct address_space *mapping,
3310 pgoff_t index, unsigned flags)
eb2be189 3311{
eb2be189 3312 struct page *page;
bbddabe2 3313 int fgp_flags = FGP_LOCK|FGP_WRITE|FGP_CREAT;
0faa70cb 3314
54566b2c 3315 if (flags & AOP_FLAG_NOFS)
2457aec6
MG
3316 fgp_flags |= FGP_NOFS;
3317
3318 page = pagecache_get_page(mapping, index, fgp_flags,
45f87de5 3319 mapping_gfp_mask(mapping));
c585a267 3320 if (page)
2457aec6 3321 wait_for_stable_page(page);
eb2be189 3322
eb2be189
NP
3323 return page;
3324}
54566b2c 3325EXPORT_SYMBOL(grab_cache_page_write_begin);
eb2be189 3326
3b93f911 3327ssize_t generic_perform_write(struct file *file,
afddba49
NP
3328 struct iov_iter *i, loff_t pos)
3329{
3330 struct address_space *mapping = file->f_mapping;
3331 const struct address_space_operations *a_ops = mapping->a_ops;
3332 long status = 0;
3333 ssize_t written = 0;
674b892e
NP
3334 unsigned int flags = 0;
3335
afddba49
NP
3336 do {
3337 struct page *page;
afddba49
NP
3338 unsigned long offset; /* Offset into pagecache page */
3339 unsigned long bytes; /* Bytes to write to page */
3340 size_t copied; /* Bytes copied from user */
3341 void *fsdata;
3342
09cbfeaf
KS
3343 offset = (pos & (PAGE_SIZE - 1));
3344 bytes = min_t(unsigned long, PAGE_SIZE - offset,
afddba49
NP
3345 iov_iter_count(i));
3346
3347again:
00a3d660
LT
3348 /*
3349 * Bring in the user page that we will copy from _first_.
3350 * Otherwise there's a nasty deadlock on copying from the
3351 * same page as we're writing to, without it being marked
3352 * up-to-date.
3353 *
3354 * Not only is this an optimisation, but it is also required
3355 * to check that the address is actually valid, when atomic
3356 * usercopies are used, below.
3357 */
3358 if (unlikely(iov_iter_fault_in_readable(i, bytes))) {
3359 status = -EFAULT;
3360 break;
3361 }
3362
296291cd
JK
3363 if (fatal_signal_pending(current)) {
3364 status = -EINTR;
3365 break;
3366 }
3367
674b892e 3368 status = a_ops->write_begin(file, mapping, pos, bytes, flags,
afddba49 3369 &page, &fsdata);
2457aec6 3370 if (unlikely(status < 0))
afddba49
NP
3371 break;
3372
931e80e4 3373 if (mapping_writably_mapped(mapping))
3374 flush_dcache_page(page);
00a3d660 3375
afddba49 3376 copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes);
afddba49
NP
3377 flush_dcache_page(page);
3378
3379 status = a_ops->write_end(file, mapping, pos, bytes, copied,
3380 page, fsdata);
3381 if (unlikely(status < 0))
3382 break;
3383 copied = status;
3384
3385 cond_resched();
3386
124d3b70 3387 iov_iter_advance(i, copied);
afddba49
NP
3388 if (unlikely(copied == 0)) {
3389 /*
3390 * If we were unable to copy any data at all, we must
3391 * fall back to a single segment length write.
3392 *
3393 * If we didn't fallback here, we could livelock
3394 * because not all segments in the iov can be copied at
3395 * once without a pagefault.
3396 */
09cbfeaf 3397 bytes = min_t(unsigned long, PAGE_SIZE - offset,
afddba49
NP
3398 iov_iter_single_seg_count(i));
3399 goto again;
3400 }
afddba49
NP
3401 pos += copied;
3402 written += copied;
3403
3404 balance_dirty_pages_ratelimited(mapping);
afddba49
NP
3405 } while (iov_iter_count(i));
3406
3407 return written ? written : status;
3408}
3b93f911 3409EXPORT_SYMBOL(generic_perform_write);
1da177e4 3410
e4dd9de3 3411/**
8174202b 3412 * __generic_file_write_iter - write data to a file
e4dd9de3 3413 * @iocb: IO state structure (file, offset, etc.)
8174202b 3414 * @from: iov_iter with data to write
e4dd9de3
JK
3415 *
3416 * This function does all the work needed for actually writing data to a
3417 * file. It does all basic checks, removes SUID from the file, updates
3418 * modification times and calls proper subroutines depending on whether we
3419 * do direct IO or a standard buffered write.
3420 *
3421 * It expects i_mutex to be grabbed unless we work on a block device or similar
3422 * object which does not need locking at all.
3423 *
3424 * This function does *not* take care of syncing data in case of O_SYNC write.
3425 * A caller has to handle it. This is mainly due to the fact that we want to
3426 * avoid syncing under i_mutex.
a862f68a
MR
3427 *
3428 * Return:
3429 * * number of bytes written, even for truncated writes
3430 * * negative error code if no data has been written at all
e4dd9de3 3431 */
8174202b 3432ssize_t __generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
1da177e4
LT
3433{
3434 struct file *file = iocb->ki_filp;
fb5527e6 3435 struct address_space * mapping = file->f_mapping;
1da177e4 3436 struct inode *inode = mapping->host;
3b93f911 3437 ssize_t written = 0;
1da177e4 3438 ssize_t err;
3b93f911 3439 ssize_t status;
1da177e4 3440
1da177e4 3441 /* We can write back this queue in page reclaim */
de1414a6 3442 current->backing_dev_info = inode_to_bdi(inode);
5fa8e0a1 3443 err = file_remove_privs(file);
1da177e4
LT
3444 if (err)
3445 goto out;
3446
c3b2da31
JB
3447 err = file_update_time(file);
3448 if (err)
3449 goto out;
1da177e4 3450
2ba48ce5 3451 if (iocb->ki_flags & IOCB_DIRECT) {
0b8def9d 3452 loff_t pos, endbyte;
fb5527e6 3453
1af5bb49 3454 written = generic_file_direct_write(iocb, from);
1da177e4 3455 /*
fbbbad4b
MW
3456 * If the write stopped short of completing, fall back to
3457 * buffered writes. Some filesystems do this for writes to
3458 * holes, for example. For DAX files, a buffered write will
3459 * not succeed (even if it did, DAX does not handle dirty
3460 * page-cache pages correctly).
1da177e4 3461 */
0b8def9d 3462 if (written < 0 || !iov_iter_count(from) || IS_DAX(inode))
fbbbad4b
MW
3463 goto out;
3464
0b8def9d 3465 status = generic_perform_write(file, from, pos = iocb->ki_pos);
fb5527e6 3466 /*
3b93f911 3467 * If generic_perform_write() returned a synchronous error
fb5527e6
JM
3468 * then we want to return the number of bytes which were
3469 * direct-written, or the error code if that was zero. Note
3470 * that this differs from normal direct-io semantics, which
3471 * will return -EFOO even if some bytes were written.
3472 */
60bb4529 3473 if (unlikely(status < 0)) {
3b93f911 3474 err = status;
fb5527e6
JM
3475 goto out;
3476 }
fb5527e6
JM
3477 /*
3478 * We need to ensure that the page cache pages are written to
3479 * disk and invalidated to preserve the expected O_DIRECT
3480 * semantics.
3481 */
3b93f911 3482 endbyte = pos + status - 1;
0b8def9d 3483 err = filemap_write_and_wait_range(mapping, pos, endbyte);
fb5527e6 3484 if (err == 0) {
0b8def9d 3485 iocb->ki_pos = endbyte + 1;
3b93f911 3486 written += status;
fb5527e6 3487 invalidate_mapping_pages(mapping,
09cbfeaf
KS
3488 pos >> PAGE_SHIFT,
3489 endbyte >> PAGE_SHIFT);
fb5527e6
JM
3490 } else {
3491 /*
3492 * We don't know how much we wrote, so just return
3493 * the number of bytes which were direct-written
3494 */
3495 }
3496 } else {
0b8def9d
AV
3497 written = generic_perform_write(file, from, iocb->ki_pos);
3498 if (likely(written > 0))
3499 iocb->ki_pos += written;
fb5527e6 3500 }
1da177e4
LT
3501out:
3502 current->backing_dev_info = NULL;
3503 return written ? written : err;
3504}
8174202b 3505EXPORT_SYMBOL(__generic_file_write_iter);
e4dd9de3 3506
e4dd9de3 3507/**
8174202b 3508 * generic_file_write_iter - write data to a file
e4dd9de3 3509 * @iocb: IO state structure
8174202b 3510 * @from: iov_iter with data to write
e4dd9de3 3511 *
8174202b 3512 * This is a wrapper around __generic_file_write_iter() to be used by most
e4dd9de3
JK
3513 * filesystems. It takes care of syncing the file in case of O_SYNC file
3514 * and acquires i_mutex as needed.
a862f68a
MR
3515 * Return:
3516 * * negative error code if no data has been written at all of
3517 * vfs_fsync_range() failed for a synchronous write
3518 * * number of bytes written, even for truncated writes
e4dd9de3 3519 */
8174202b 3520ssize_t generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
1da177e4
LT
3521{
3522 struct file *file = iocb->ki_filp;
148f948b 3523 struct inode *inode = file->f_mapping->host;
1da177e4 3524 ssize_t ret;
1da177e4 3525
5955102c 3526 inode_lock(inode);
3309dd04
AV
3527 ret = generic_write_checks(iocb, from);
3528 if (ret > 0)
5f380c7f 3529 ret = __generic_file_write_iter(iocb, from);
5955102c 3530 inode_unlock(inode);
1da177e4 3531
e2592217
CH
3532 if (ret > 0)
3533 ret = generic_write_sync(iocb, ret);
1da177e4
LT
3534 return ret;
3535}
8174202b 3536EXPORT_SYMBOL(generic_file_write_iter);
1da177e4 3537
cf9a2ae8
DH
3538/**
3539 * try_to_release_page() - release old fs-specific metadata on a page
3540 *
3541 * @page: the page which the kernel is trying to free
3542 * @gfp_mask: memory allocation flags (and I/O mode)
3543 *
3544 * The address_space is to try to release any data against the page
a862f68a 3545 * (presumably at page->private).
cf9a2ae8 3546 *
266cf658
DH
3547 * This may also be called if PG_fscache is set on a page, indicating that the
3548 * page is known to the local caching routines.
3549 *
cf9a2ae8 3550 * The @gfp_mask argument specifies whether I/O may be performed to release
71baba4b 3551 * this page (__GFP_IO), and whether the call may block (__GFP_RECLAIM & __GFP_FS).
cf9a2ae8 3552 *
a862f68a 3553 * Return: %1 if the release was successful, otherwise return zero.
cf9a2ae8
DH
3554 */
3555int try_to_release_page(struct page *page, gfp_t gfp_mask)
3556{
3557 struct address_space * const mapping = page->mapping;
3558
3559 BUG_ON(!PageLocked(page));
3560 if (PageWriteback(page))
3561 return 0;
3562
3563 if (mapping && mapping->a_ops->releasepage)
3564 return mapping->a_ops->releasepage(page, gfp_mask);
3565 return try_to_free_buffers(page);
3566}
3567
3568EXPORT_SYMBOL(try_to_release_page);