]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - mm/truncate.c
PCI: aardvark: Fix support for PME requester on emulated bridge
[mirror_ubuntu-jammy-kernel.git] / mm / truncate.c
CommitLineData
457c8996 1// SPDX-License-Identifier: GPL-2.0-only
1da177e4
LT
2/*
3 * mm/truncate.c - code for taking down pages from address_spaces
4 *
5 * Copyright (C) 2002, Linus Torvalds
6 *
e1f8e874 7 * 10Sep2002 Andrew Morton
1da177e4
LT
8 * Initial version.
9 */
10
11#include <linux/kernel.h>
4af3c9cc 12#include <linux/backing-dev.h>
f9fe48be 13#include <linux/dax.h>
5a0e3ad6 14#include <linux/gfp.h>
1da177e4 15#include <linux/mm.h>
0fd0e6b0 16#include <linux/swap.h>
b95f1b31 17#include <linux/export.h>
1da177e4 18#include <linux/pagemap.h>
01f2705d 19#include <linux/highmem.h>
1da177e4 20#include <linux/pagevec.h>
e08748ce 21#include <linux/task_io_accounting_ops.h>
1da177e4 22#include <linux/buffer_head.h> /* grr. try_to_release_page,
aaa4059b 23 do_invalidatepage */
3a4f8a0b 24#include <linux/shmem_fs.h>
c515e1fd 25#include <linux/cleancache.h>
90a80202 26#include <linux/rmap.h>
ba470de4 27#include "internal.h"
1da177e4 28
f2187599
MG
29/*
30 * Regular page slots are stabilized by the page lock even without the tree
31 * itself locked. These unlocked entries need verification under the tree
32 * lock.
33 */
34static inline void __clear_shadow_entry(struct address_space *mapping,
35 pgoff_t index, void *entry)
0cd6144a 36{
69b6c131 37 XA_STATE(xas, &mapping->i_pages, index);
449dd698 38
69b6c131
MW
39 xas_set_update(&xas, workingset_update_node);
40 if (xas_load(&xas) != entry)
f2187599 41 return;
69b6c131 42 xas_store(&xas, NULL);
f2187599
MG
43}
44
45static void clear_shadow_entry(struct address_space *mapping, pgoff_t index,
46 void *entry)
47{
b93b0163 48 xa_lock_irq(&mapping->i_pages);
f2187599 49 __clear_shadow_entry(mapping, index, entry);
b93b0163 50 xa_unlock_irq(&mapping->i_pages);
0cd6144a 51}
1da177e4 52
c6dcf52c 53/*
f2187599
MG
54 * Unconditionally remove exceptional entries. Usually called from truncate
55 * path. Note that the pagevec may be altered by this function by removing
56 * exceptional entries similar to what pagevec_remove_exceptionals does.
c6dcf52c 57 */
f2187599 58static void truncate_exceptional_pvec_entries(struct address_space *mapping,
31d270fd 59 struct pagevec *pvec, pgoff_t *indices)
c6dcf52c 60{
f2187599 61 int i, j;
31d270fd 62 bool dax;
f2187599 63
c6dcf52c
JK
64 /* Handled by shmem itself */
65 if (shmem_mapping(mapping))
66 return;
67
f2187599 68 for (j = 0; j < pagevec_count(pvec); j++)
3159f943 69 if (xa_is_value(pvec->pages[j]))
f2187599
MG
70 break;
71
72 if (j == pagevec_count(pvec))
c6dcf52c 73 return;
f2187599
MG
74
75 dax = dax_mapping(mapping);
31d270fd 76 if (!dax)
b93b0163 77 xa_lock_irq(&mapping->i_pages);
f2187599
MG
78
79 for (i = j; i < pagevec_count(pvec); i++) {
80 struct page *page = pvec->pages[i];
81 pgoff_t index = indices[i];
82
3159f943 83 if (!xa_is_value(page)) {
f2187599
MG
84 pvec->pages[j++] = page;
85 continue;
86 }
87
f2187599
MG
88 if (unlikely(dax)) {
89 dax_delete_mapping_entry(mapping, index);
90 continue;
91 }
92
93 __clear_shadow_entry(mapping, index, page);
c6dcf52c 94 }
f2187599 95
31d270fd 96 if (!dax)
b93b0163 97 xa_unlock_irq(&mapping->i_pages);
f2187599 98 pvec->nr = j;
c6dcf52c
JK
99}
100
101/*
102 * Invalidate exceptional entry if easily possible. This handles exceptional
4636e70b 103 * entries for invalidate_inode_pages().
c6dcf52c
JK
104 */
105static int invalidate_exceptional_entry(struct address_space *mapping,
106 pgoff_t index, void *entry)
107{
4636e70b
RZ
108 /* Handled by shmem itself, or for DAX we do nothing. */
109 if (shmem_mapping(mapping) || dax_mapping(mapping))
c6dcf52c 110 return 1;
c6dcf52c
JK
111 clear_shadow_entry(mapping, index, entry);
112 return 1;
113}
114
115/*
116 * Invalidate exceptional entry if clean. This handles exceptional entries for
117 * invalidate_inode_pages2() so for DAX it evicts only clean entries.
118 */
119static int invalidate_exceptional_entry2(struct address_space *mapping,
120 pgoff_t index, void *entry)
121{
122 /* Handled by shmem itself */
123 if (shmem_mapping(mapping))
124 return 1;
125 if (dax_mapping(mapping))
126 return dax_invalidate_mapping_entry_sync(mapping, index);
127 clear_shadow_entry(mapping, index, entry);
128 return 1;
129}
130
cf9a2ae8 131/**
28bc44d7 132 * do_invalidatepage - invalidate part or all of a page
cf9a2ae8 133 * @page: the page which is affected
d47992f8
LC
134 * @offset: start of the range to invalidate
135 * @length: length of the range to invalidate
cf9a2ae8
DH
136 *
137 * do_invalidatepage() is called when all or part of the page has become
138 * invalidated by a truncate operation.
139 *
140 * do_invalidatepage() does not have to release all buffers, but it must
141 * ensure that no dirty buffer is left outside @offset and that no I/O
142 * is underway against any of the blocks which are outside the truncation
143 * point. Because the caller is about to free (and possibly reuse) those
144 * blocks on-disk.
145 */
d47992f8
LC
146void do_invalidatepage(struct page *page, unsigned int offset,
147 unsigned int length)
cf9a2ae8 148{
d47992f8
LC
149 void (*invalidatepage)(struct page *, unsigned int, unsigned int);
150
cf9a2ae8 151 invalidatepage = page->mapping->a_ops->invalidatepage;
9361401e 152#ifdef CONFIG_BLOCK
cf9a2ae8
DH
153 if (!invalidatepage)
154 invalidatepage = block_invalidatepage;
9361401e 155#endif
cf9a2ae8 156 if (invalidatepage)
d47992f8 157 (*invalidatepage)(page, offset, length);
cf9a2ae8
DH
158}
159
1da177e4
LT
160/*
161 * If truncate cannot remove the fs-private metadata from the page, the page
62e1c553 162 * becomes orphaned. It will be left on the LRU and may even be mapped into
54cb8821 163 * user pagetables if we're racing with filemap_fault().
1da177e4 164 *
fc3a5ac5 165 * We need to bail out if page->mapping is no longer equal to the original
1da177e4 166 * mapping. This happens a) when the VM reclaimed the page while we waited on
fc0ecff6 167 * its lock, b) when a concurrent invalidate_mapping_pages got there first and
1da177e4
LT
168 * c) when tmpfs swizzles a page between a tmpfs inode and swapper_space.
169 */
22061a1f 170static void truncate_cleanup_page(struct page *page)
1da177e4 171{
22061a1f
HD
172 if (page_mapped(page))
173 unmap_mapping_page(page);
1da177e4 174
266cf658 175 if (page_has_private(page))
fc3a5ac5 176 do_invalidatepage(page, 0, thp_size(page));
1da177e4 177
b9ea2515
KK
178 /*
179 * Some filesystems seem to re-dirty the page even after
180 * the VM has canceled the dirty bit (eg ext3 journaling).
181 * Hence dirty accounting check is placed after invalidation.
182 */
11f81bec 183 cancel_dirty_page(page);
1da177e4 184 ClearPageMappedToDisk(page);
1da177e4
LT
185}
186
187/*
fc0ecff6 188 * This is for invalidate_mapping_pages(). That function can be called at
1da177e4 189 * any time, and is not supposed to throw away dirty pages. But pages can
0fd0e6b0
NP
190 * be marked dirty at any time too, so use remove_mapping which safely
191 * discards clean, unused pages.
1da177e4
LT
192 *
193 * Returns non-zero if the page was successfully invalidated.
194 */
195static int
196invalidate_complete_page(struct address_space *mapping, struct page *page)
197{
0fd0e6b0
NP
198 int ret;
199
1da177e4
LT
200 if (page->mapping != mapping)
201 return 0;
202
266cf658 203 if (page_has_private(page) && !try_to_release_page(page, 0))
1da177e4
LT
204 return 0;
205
0fd0e6b0 206 ret = remove_mapping(mapping, page);
0fd0e6b0
NP
207
208 return ret;
1da177e4
LT
209}
210
750b4987
NP
211int truncate_inode_page(struct address_space *mapping, struct page *page)
212{
fc127da0
KS
213 VM_BUG_ON_PAGE(PageTail(page), page);
214
9f4e41f4
JK
215 if (page->mapping != mapping)
216 return -EIO;
217
22061a1f 218 truncate_cleanup_page(page);
9f4e41f4
JK
219 delete_from_page_cache(page);
220 return 0;
750b4987
NP
221}
222
25718736
AK
223/*
224 * Used to get rid of pages on hardware memory corruption.
225 */
226int generic_error_remove_page(struct address_space *mapping, struct page *page)
227{
228 if (!mapping)
229 return -EINVAL;
230 /*
231 * Only punch for normal data pages for now.
232 * Handling other types like directories would need more auditing.
233 */
234 if (!S_ISREG(mapping->host->i_mode))
235 return -EIO;
236 return truncate_inode_page(mapping, page);
237}
238EXPORT_SYMBOL(generic_error_remove_page);
239
83f78668
WF
240/*
241 * Safely invalidate one page from its pagecache mapping.
242 * It only drops clean, unused pages. The page must be locked.
243 *
244 * Returns 1 if the page is successfully invalidated, otherwise 0.
245 */
246int invalidate_inode_page(struct page *page)
247{
248 struct address_space *mapping = page_mapping(page);
249 if (!mapping)
250 return 0;
251 if (PageDirty(page) || PageWriteback(page))
252 return 0;
253 if (page_mapped(page))
254 return 0;
255 return invalidate_complete_page(mapping, page);
256}
257
1da177e4 258/**
73c1e204 259 * truncate_inode_pages_range - truncate range of pages specified by start & end byte offsets
1da177e4
LT
260 * @mapping: mapping to truncate
261 * @lstart: offset from which to truncate
5a720394 262 * @lend: offset to which to truncate (inclusive)
1da177e4 263 *
d7339071 264 * Truncate the page cache, removing the pages that are between
5a720394
LC
265 * specified offsets (and zeroing out partial pages
266 * if lstart or lend + 1 is not page aligned).
1da177e4
LT
267 *
268 * Truncate takes two passes - the first pass is nonblocking. It will not
269 * block on page locks and it will not block on writeback. The second pass
270 * will wait. This is to prevent as much IO as possible in the affected region.
271 * The first pass will remove most pages, so the search cost of the second pass
272 * is low.
273 *
1da177e4
LT
274 * We pass down the cache-hot hint to the page freeing code. Even if the
275 * mapping is large, it is probably the case that the final pages are the most
276 * recently touched, and freeing happens in ascending file offset order.
5a720394
LC
277 *
278 * Note that since ->invalidatepage() accepts range to invalidate
279 * truncate_inode_pages_range is able to handle cases where lend + 1 is not
280 * page aligned properly.
1da177e4 281 */
d7339071
HR
282void truncate_inode_pages_range(struct address_space *mapping,
283 loff_t lstart, loff_t lend)
1da177e4 284{
5a720394
LC
285 pgoff_t start; /* inclusive */
286 pgoff_t end; /* exclusive */
287 unsigned int partial_start; /* inclusive */
288 unsigned int partial_end; /* exclusive */
289 struct pagevec pvec;
0cd6144a 290 pgoff_t indices[PAGEVEC_SIZE];
5a720394
LC
291 pgoff_t index;
292 int i;
1da177e4 293
7716506a 294 if (mapping_empty(mapping))
34ccb69e 295 goto out;
1da177e4 296
5a720394 297 /* Offsets within partial pages */
09cbfeaf
KS
298 partial_start = lstart & (PAGE_SIZE - 1);
299 partial_end = (lend + 1) & (PAGE_SIZE - 1);
5a720394
LC
300
301 /*
302 * 'start' and 'end' always covers the range of pages to be fully
303 * truncated. Partial pages are covered with 'partial_start' at the
304 * start of the range and 'partial_end' at the end of the range.
305 * Note that 'end' is exclusive while 'lend' is inclusive.
306 */
09cbfeaf 307 start = (lstart + PAGE_SIZE - 1) >> PAGE_SHIFT;
5a720394
LC
308 if (lend == -1)
309 /*
310 * lend == -1 indicates end-of-file so we have to set 'end'
311 * to the highest possible pgoff_t and since the type is
312 * unsigned we're using -1.
313 */
314 end = -1;
315 else
09cbfeaf 316 end = (lend + 1) >> PAGE_SHIFT;
d7339071 317
86679820 318 pagevec_init(&pvec);
b85e0eff 319 index = start;
5c211ba2
MWO
320 while (index < end && find_lock_entries(mapping, index, end - 1,
321 &pvec, indices)) {
322 index = indices[pagevec_count(&pvec) - 1] + 1;
31d270fd 323 truncate_exceptional_pvec_entries(mapping, &pvec, indices);
5c211ba2 324 for (i = 0; i < pagevec_count(&pvec); i++)
22061a1f 325 truncate_cleanup_page(pvec.pages[i]);
5c211ba2
MWO
326 delete_from_page_cache_batch(mapping, &pvec);
327 for (i = 0; i < pagevec_count(&pvec); i++)
328 unlock_page(pvec.pages[i]);
1da177e4
LT
329 pagevec_release(&pvec);
330 cond_resched();
331 }
5c211ba2 332
5a720394 333 if (partial_start) {
1da177e4
LT
334 struct page *page = find_lock_page(mapping, start - 1);
335 if (page) {
09cbfeaf 336 unsigned int top = PAGE_SIZE;
5a720394
LC
337 if (start > end) {
338 /* Truncation within a single page */
339 top = partial_end;
340 partial_end = 0;
341 }
1da177e4 342 wait_on_page_writeback(page);
5a720394
LC
343 zero_user_segment(page, partial_start, top);
344 cleancache_invalidate_page(mapping, page);
345 if (page_has_private(page))
346 do_invalidatepage(page, partial_start,
347 top - partial_start);
1da177e4 348 unlock_page(page);
09cbfeaf 349 put_page(page);
1da177e4
LT
350 }
351 }
5a720394
LC
352 if (partial_end) {
353 struct page *page = find_lock_page(mapping, end);
354 if (page) {
355 wait_on_page_writeback(page);
356 zero_user_segment(page, 0, partial_end);
357 cleancache_invalidate_page(mapping, page);
358 if (page_has_private(page))
359 do_invalidatepage(page, 0,
360 partial_end);
361 unlock_page(page);
09cbfeaf 362 put_page(page);
5a720394
LC
363 }
364 }
365 /*
366 * If the truncation happened within a single page no pages
367 * will be released, just zeroed, so we can bail out now.
368 */
369 if (start >= end)
34ccb69e 370 goto out;
1da177e4 371
b85e0eff 372 index = start;
1da177e4
LT
373 for ( ; ; ) {
374 cond_resched();
a656a202 375 if (!find_get_entries(mapping, index, end - 1, &pvec,
38cefeb3 376 indices)) {
792ceaef 377 /* If all gone from start onwards, we're done */
b85e0eff 378 if (index == start)
1da177e4 379 break;
792ceaef 380 /* Otherwise restart to make sure all gone */
b85e0eff 381 index = start;
1da177e4
LT
382 continue;
383 }
f2187599 384
1da177e4
LT
385 for (i = 0; i < pagevec_count(&pvec); i++) {
386 struct page *page = pvec.pages[i];
387
b85e0eff 388 /* We rely upon deletion not changing page->index */
0cd6144a 389 index = indices[i];
b85e0eff 390
3159f943 391 if (xa_is_value(page))
0cd6144a 392 continue;
0cd6144a 393
1da177e4 394 lock_page(page);
5cbc198a 395 WARN_ON(page_to_index(page) != index);
1da177e4 396 wait_on_page_writeback(page);
750b4987 397 truncate_inode_page(mapping, page);
1da177e4
LT
398 unlock_page(page);
399 }
31d270fd 400 truncate_exceptional_pvec_entries(mapping, &pvec, indices);
1da177e4 401 pagevec_release(&pvec);
b85e0eff 402 index++;
1da177e4 403 }
34ccb69e
AR
404
405out:
3167760f 406 cleancache_invalidate_inode(mapping);
1da177e4 407}
d7339071 408EXPORT_SYMBOL(truncate_inode_pages_range);
1da177e4 409
d7339071
HR
410/**
411 * truncate_inode_pages - truncate *all* the pages from an offset
412 * @mapping: mapping to truncate
413 * @lstart: offset from which to truncate
414 *
730633f0
JK
415 * Called under (and serialised by) inode->i_rwsem and
416 * mapping->invalidate_lock.
08142579
JK
417 *
418 * Note: When this function returns, there can be a page in the process of
419 * deletion (inside __delete_from_page_cache()) in the specified range. Thus
420 * mapping->nrpages can be non-zero when this function returns even after
421 * truncation of the whole mapping.
d7339071
HR
422 */
423void truncate_inode_pages(struct address_space *mapping, loff_t lstart)
424{
425 truncate_inode_pages_range(mapping, lstart, (loff_t)-1);
426}
1da177e4
LT
427EXPORT_SYMBOL(truncate_inode_pages);
428
91b0abe3
JW
429/**
430 * truncate_inode_pages_final - truncate *all* pages before inode dies
431 * @mapping: mapping to truncate
432 *
9608703e 433 * Called under (and serialized by) inode->i_rwsem.
91b0abe3
JW
434 *
435 * Filesystems have to use this in the .evict_inode path to inform the
436 * VM that this is the final truncate and the inode is going away.
437 */
438void truncate_inode_pages_final(struct address_space *mapping)
439{
91b0abe3
JW
440 /*
441 * Page reclaim can not participate in regular inode lifetime
442 * management (can't call iput()) and thus can race with the
443 * inode teardown. Tell it when the address space is exiting,
444 * so that it does not install eviction information after the
445 * final truncate has begun.
446 */
447 mapping_set_exiting(mapping);
448
7716506a 449 if (!mapping_empty(mapping)) {
91b0abe3
JW
450 /*
451 * As truncation uses a lockless tree lookup, cycle
452 * the tree lock to make sure any ongoing tree
453 * modification that does not see AS_EXITING is
454 * completed before starting the final truncate.
455 */
b93b0163
MW
456 xa_lock_irq(&mapping->i_pages);
457 xa_unlock_irq(&mapping->i_pages);
91b0abe3 458 }
6ff38bd4
PT
459
460 /*
461 * Cleancache needs notification even if there are no pages or shadow
462 * entries.
463 */
464 truncate_inode_pages(mapping, 0);
91b0abe3
JW
465}
466EXPORT_SYMBOL(truncate_inode_pages_final);
467
a77eedbc 468static unsigned long __invalidate_mapping_pages(struct address_space *mapping,
eb1d7a65 469 pgoff_t start, pgoff_t end, unsigned long *nr_pagevec)
1da177e4 470{
0cd6144a 471 pgoff_t indices[PAGEVEC_SIZE];
1da177e4 472 struct pagevec pvec;
b85e0eff 473 pgoff_t index = start;
31560180
MK
474 unsigned long ret;
475 unsigned long count = 0;
1da177e4
LT
476 int i;
477
86679820 478 pagevec_init(&pvec);
5c211ba2 479 while (find_lock_entries(mapping, index, end, &pvec, indices)) {
1da177e4
LT
480 for (i = 0; i < pagevec_count(&pvec); i++) {
481 struct page *page = pvec.pages[i];
e0f23603 482
b85e0eff 483 /* We rely upon deletion not changing page->index */
0cd6144a 484 index = indices[i];
e0f23603 485
3159f943 486 if (xa_is_value(page)) {
7ae12c80
JW
487 count += invalidate_exceptional_entry(mapping,
488 index,
489 page);
0cd6144a
JW
490 continue;
491 }
5c211ba2 492 index += thp_nr_pages(page) - 1;
fc127da0 493
31560180 494 ret = invalidate_inode_page(page);
1da177e4 495 unlock_page(page);
31560180
MK
496 /*
497 * Invalidation is a hint that the page is no longer
498 * of interest and try to speed up its reclaim.
499 */
eb1d7a65 500 if (!ret) {
cc5993bd 501 deactivate_file_page(page);
eb1d7a65
YS
502 /* It is likely on the pagevec of a remote CPU */
503 if (nr_pagevec)
504 (*nr_pagevec)++;
505 }
31560180 506 count += ret;
1da177e4 507 }
0cd6144a 508 pagevec_remove_exceptionals(&pvec);
1da177e4 509 pagevec_release(&pvec);
28697355 510 cond_resched();
b85e0eff 511 index++;
1da177e4 512 }
31560180 513 return count;
1da177e4 514}
eb1d7a65
YS
515
516/**
7ae12c80
JW
517 * invalidate_mapping_pages - Invalidate all clean, unlocked cache of one inode
518 * @mapping: the address_space which holds the cache to invalidate
eb1d7a65
YS
519 * @start: the offset 'from' which to invalidate
520 * @end: the offset 'to' which to invalidate (inclusive)
521 *
7ae12c80
JW
522 * This function removes pages that are clean, unmapped and unlocked,
523 * as well as shadow entries. It will not block on IO activity.
eb1d7a65 524 *
7ae12c80
JW
525 * If you want to remove all the pages of one inode, regardless of
526 * their use and writeback state, use truncate_inode_pages().
eb1d7a65 527 *
7ae12c80 528 * Return: the number of the cache entries that were invalidated
eb1d7a65
YS
529 */
530unsigned long invalidate_mapping_pages(struct address_space *mapping,
531 pgoff_t start, pgoff_t end)
532{
533 return __invalidate_mapping_pages(mapping, start, end, NULL);
534}
54bc4855 535EXPORT_SYMBOL(invalidate_mapping_pages);
1da177e4 536
eb1d7a65 537/**
649c6dfe
AS
538 * invalidate_mapping_pagevec - Invalidate all the unlocked pages of one inode
539 * @mapping: the address_space which holds the pages to invalidate
540 * @start: the offset 'from' which to invalidate
541 * @end: the offset 'to' which to invalidate (inclusive)
542 * @nr_pagevec: invalidate failed page number for caller
543 *
a00cda3f
MCC
544 * This helper is similar to invalidate_mapping_pages(), except that it accounts
545 * for pages that are likely on a pagevec and counts them in @nr_pagevec, which
546 * will be used by the caller.
eb1d7a65
YS
547 */
548void invalidate_mapping_pagevec(struct address_space *mapping,
549 pgoff_t start, pgoff_t end, unsigned long *nr_pagevec)
550{
551 __invalidate_mapping_pages(mapping, start, end, nr_pagevec);
552}
553
bd4c8ce4
AM
554/*
555 * This is like invalidate_complete_page(), except it ignores the page's
556 * refcount. We do this because invalidate_inode_pages2() needs stronger
557 * invalidation guarantees, and cannot afford to leave pages behind because
2706a1b8
AB
558 * shrink_page_list() has a temp ref on them, or because they're transiently
559 * sitting in the lru_cache_add() pagevecs.
bd4c8ce4
AM
560 */
561static int
562invalidate_complete_page2(struct address_space *mapping, struct page *page)
563{
564 if (page->mapping != mapping)
565 return 0;
566
266cf658 567 if (page_has_private(page) && !try_to_release_page(page, GFP_KERNEL))
bd4c8ce4
AM
568 return 0;
569
30472509 570 xa_lock_irq(&mapping->i_pages);
bd4c8ce4
AM
571 if (PageDirty(page))
572 goto failed;
573
266cf658 574 BUG_ON(page_has_private(page));
62cccb8c 575 __delete_from_page_cache(page, NULL);
30472509 576 xa_unlock_irq(&mapping->i_pages);
6072d13c
LT
577
578 if (mapping->a_ops->freepage)
579 mapping->a_ops->freepage(page);
580
09cbfeaf 581 put_page(page); /* pagecache ref */
bd4c8ce4
AM
582 return 1;
583failed:
30472509 584 xa_unlock_irq(&mapping->i_pages);
bd4c8ce4
AM
585 return 0;
586}
587
e3db7691
TM
588static int do_launder_page(struct address_space *mapping, struct page *page)
589{
590 if (!PageDirty(page))
591 return 0;
592 if (page->mapping != mapping || mapping->a_ops->launder_page == NULL)
593 return 0;
594 return mapping->a_ops->launder_page(page);
595}
596
1da177e4
LT
597/**
598 * invalidate_inode_pages2_range - remove range of pages from an address_space
67be2dd1 599 * @mapping: the address_space
1da177e4
LT
600 * @start: the page offset 'from' which to invalidate
601 * @end: the page offset 'to' which to invalidate (inclusive)
602 *
603 * Any pages which are found to be mapped into pagetables are unmapped prior to
604 * invalidation.
605 *
a862f68a 606 * Return: -EBUSY if any pages could not be invalidated.
1da177e4
LT
607 */
608int invalidate_inode_pages2_range(struct address_space *mapping,
609 pgoff_t start, pgoff_t end)
610{
0cd6144a 611 pgoff_t indices[PAGEVEC_SIZE];
1da177e4 612 struct pagevec pvec;
b85e0eff 613 pgoff_t index;
1da177e4
LT
614 int i;
615 int ret = 0;
0dd1334f 616 int ret2 = 0;
1da177e4 617 int did_range_unmap = 0;
1da177e4 618
7716506a 619 if (mapping_empty(mapping))
34ccb69e 620 goto out;
32691f0f 621
86679820 622 pagevec_init(&pvec);
b85e0eff 623 index = start;
a656a202 624 while (find_get_entries(mapping, index, end, &pvec, indices)) {
7b965e08 625 for (i = 0; i < pagevec_count(&pvec); i++) {
1da177e4 626 struct page *page = pvec.pages[i];
b85e0eff
HD
627
628 /* We rely upon deletion not changing page->index */
0cd6144a 629 index = indices[i];
1da177e4 630
3159f943 631 if (xa_is_value(page)) {
c6dcf52c
JK
632 if (!invalidate_exceptional_entry2(mapping,
633 index, page))
634 ret = -EBUSY;
0cd6144a
JW
635 continue;
636 }
637
22061a1f
HD
638 if (!did_range_unmap && page_mapped(page)) {
639 /*
640 * If page is mapped, before taking its lock,
641 * zap the rest of the file in one hit.
642 */
643 unmap_mapping_pages(mapping, index,
644 (1 + end - index), false);
645 did_range_unmap = 1;
646 }
647
1da177e4 648 lock_page(page);
5cbc198a 649 WARN_ON(page_to_index(page) != index);
1da177e4
LT
650 if (page->mapping != mapping) {
651 unlock_page(page);
652 continue;
653 }
1da177e4 654 wait_on_page_writeback(page);
22061a1f
HD
655
656 if (page_mapped(page))
657 unmap_mapping_page(page);
d00806b1 658 BUG_ON(page_mapped(page));
22061a1f 659
0dd1334f
HH
660 ret2 = do_launder_page(mapping, page);
661 if (ret2 == 0) {
662 if (!invalidate_complete_page2(mapping, page))
6ccfa806 663 ret2 = -EBUSY;
0dd1334f
HH
664 }
665 if (ret2 < 0)
666 ret = ret2;
1da177e4
LT
667 unlock_page(page);
668 }
0cd6144a 669 pagevec_remove_exceptionals(&pvec);
1da177e4
LT
670 pagevec_release(&pvec);
671 cond_resched();
b85e0eff 672 index++;
1da177e4 673 }
cd656375 674 /*
69b6c131 675 * For DAX we invalidate page tables after invalidating page cache. We
cd656375
JK
676 * could invalidate page tables while invalidating each entry however
677 * that would be expensive. And doing range unmapping before doesn't
69b6c131 678 * work as we have no cheap way to find whether page cache entry didn't
cd656375
JK
679 * get remapped later.
680 */
681 if (dax_mapping(mapping)) {
977fbdcd 682 unmap_mapping_pages(mapping, start, end - start + 1, false);
cd656375 683 }
34ccb69e 684out:
3167760f 685 cleancache_invalidate_inode(mapping);
1da177e4
LT
686 return ret;
687}
688EXPORT_SYMBOL_GPL(invalidate_inode_pages2_range);
689
690/**
691 * invalidate_inode_pages2 - remove all pages from an address_space
67be2dd1 692 * @mapping: the address_space
1da177e4
LT
693 *
694 * Any pages which are found to be mapped into pagetables are unmapped prior to
695 * invalidation.
696 *
a862f68a 697 * Return: -EBUSY if any pages could not be invalidated.
1da177e4
LT
698 */
699int invalidate_inode_pages2(struct address_space *mapping)
700{
701 return invalidate_inode_pages2_range(mapping, 0, -1);
702}
703EXPORT_SYMBOL_GPL(invalidate_inode_pages2);
25d9e2d1
NP
704
705/**
706 * truncate_pagecache - unmap and remove pagecache that has been truncated
707 * @inode: inode
8a549bea 708 * @newsize: new file size
25d9e2d1
NP
709 *
710 * inode's new i_size must already be written before truncate_pagecache
711 * is called.
712 *
713 * This function should typically be called before the filesystem
714 * releases resources associated with the freed range (eg. deallocates
715 * blocks). This way, pagecache will always stay logically coherent
716 * with on-disk format, and the filesystem would not have to deal with
717 * situations such as writepage being called for a page that has already
718 * had its underlying blocks deallocated.
719 */
7caef267 720void truncate_pagecache(struct inode *inode, loff_t newsize)
25d9e2d1 721{
cedabed4 722 struct address_space *mapping = inode->i_mapping;
8a549bea 723 loff_t holebegin = round_up(newsize, PAGE_SIZE);
cedabed4
OH
724
725 /*
726 * unmap_mapping_range is called twice, first simply for
727 * efficiency so that truncate_inode_pages does fewer
728 * single-page unmaps. However after this first call, and
729 * before truncate_inode_pages finishes, it is possible for
730 * private pages to be COWed, which remain after
731 * truncate_inode_pages finishes, hence the second
732 * unmap_mapping_range call must be made for correctness.
733 */
8a549bea
HD
734 unmap_mapping_range(mapping, holebegin, 0, 1);
735 truncate_inode_pages(mapping, newsize);
736 unmap_mapping_range(mapping, holebegin, 0, 1);
25d9e2d1
NP
737}
738EXPORT_SYMBOL(truncate_pagecache);
739
2c27c65e
CH
740/**
741 * truncate_setsize - update inode and pagecache for a new file size
742 * @inode: inode
743 * @newsize: new file size
744 *
382e27da
JK
745 * truncate_setsize updates i_size and performs pagecache truncation (if
746 * necessary) to @newsize. It will be typically be called from the filesystem's
747 * setattr function when ATTR_SIZE is passed in.
2c27c65e 748 *
77783d06 749 * Must be called with a lock serializing truncates and writes (generally
9608703e 750 * i_rwsem but e.g. xfs uses a different lock) and before all filesystem
77783d06 751 * specific block truncation has been performed.
2c27c65e
CH
752 */
753void truncate_setsize(struct inode *inode, loff_t newsize)
754{
90a80202
JK
755 loff_t oldsize = inode->i_size;
756
2c27c65e 757 i_size_write(inode, newsize);
90a80202
JK
758 if (newsize > oldsize)
759 pagecache_isize_extended(inode, oldsize, newsize);
7caef267 760 truncate_pagecache(inode, newsize);
2c27c65e
CH
761}
762EXPORT_SYMBOL(truncate_setsize);
763
90a80202
JK
764/**
765 * pagecache_isize_extended - update pagecache after extension of i_size
766 * @inode: inode for which i_size was extended
767 * @from: original inode size
768 * @to: new inode size
769 *
770 * Handle extension of inode size either caused by extending truncate or by
771 * write starting after current i_size. We mark the page straddling current
772 * i_size RO so that page_mkwrite() is called on the nearest write access to
773 * the page. This way filesystem can be sure that page_mkwrite() is called on
774 * the page before user writes to the page via mmap after the i_size has been
775 * changed.
776 *
777 * The function must be called after i_size is updated so that page fault
778 * coming after we unlock the page will already see the new i_size.
9608703e 779 * The function must be called while we still hold i_rwsem - this not only
90a80202
JK
780 * makes sure i_size is stable but also that userspace cannot observe new
781 * i_size value before we are prepared to store mmap writes at new inode size.
782 */
783void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to)
784{
93407472 785 int bsize = i_blocksize(inode);
90a80202
JK
786 loff_t rounded_from;
787 struct page *page;
788 pgoff_t index;
789
90a80202
JK
790 WARN_ON(to > inode->i_size);
791
09cbfeaf 792 if (from >= to || bsize == PAGE_SIZE)
90a80202
JK
793 return;
794 /* Page straddling @from will not have any hole block created? */
795 rounded_from = round_up(from, bsize);
09cbfeaf 796 if (to <= rounded_from || !(rounded_from & (PAGE_SIZE - 1)))
90a80202
JK
797 return;
798
09cbfeaf 799 index = from >> PAGE_SHIFT;
90a80202
JK
800 page = find_lock_page(inode->i_mapping, index);
801 /* Page not cached? Nothing to do */
802 if (!page)
803 return;
804 /*
805 * See clear_page_dirty_for_io() for details why set_page_dirty()
806 * is needed.
807 */
808 if (page_mkclean(page))
809 set_page_dirty(page);
810 unlock_page(page);
09cbfeaf 811 put_page(page);
90a80202
JK
812}
813EXPORT_SYMBOL(pagecache_isize_extended);
814
623e3db9
HD
815/**
816 * truncate_pagecache_range - unmap and remove pagecache that is hole-punched
817 * @inode: inode
818 * @lstart: offset of beginning of hole
819 * @lend: offset of last byte of hole
820 *
821 * This function should typically be called before the filesystem
822 * releases resources associated with the freed range (eg. deallocates
823 * blocks). This way, pagecache will always stay logically coherent
824 * with on-disk format, and the filesystem would not have to deal with
825 * situations such as writepage being called for a page that has already
826 * had its underlying blocks deallocated.
827 */
828void truncate_pagecache_range(struct inode *inode, loff_t lstart, loff_t lend)
829{
830 struct address_space *mapping = inode->i_mapping;
831 loff_t unmap_start = round_up(lstart, PAGE_SIZE);
832 loff_t unmap_end = round_down(1 + lend, PAGE_SIZE) - 1;
833 /*
834 * This rounding is currently just for example: unmap_mapping_range
835 * expands its hole outwards, whereas we want it to contract the hole
836 * inwards. However, existing callers of truncate_pagecache_range are
5a720394
LC
837 * doing their own page rounding first. Note that unmap_mapping_range
838 * allows holelen 0 for all, and we allow lend -1 for end of file.
623e3db9
HD
839 */
840
841 /*
842 * Unlike in truncate_pagecache, unmap_mapping_range is called only
843 * once (before truncating pagecache), and without "even_cows" flag:
844 * hole-punching should not remove private COWed pages from the hole.
845 */
846 if ((u64)unmap_end > (u64)unmap_start)
847 unmap_mapping_range(mapping, unmap_start,
848 1 + unmap_end - unmap_start, 0);
849 truncate_inode_pages_range(mapping, lstart, lend);
850}
851EXPORT_SYMBOL(truncate_pagecache_range);