]> git.proxmox.com Git - mirror_ubuntu-focal-kernel.git/blame - mm/truncate.c
[PATCH] Fix XFS after clear_page_dirty() removal
[mirror_ubuntu-focal-kernel.git] / mm / truncate.c
CommitLineData
1da177e4
LT
1/*
2 * mm/truncate.c - code for taking down pages from address_spaces
3 *
4 * Copyright (C) 2002, Linus Torvalds
5 *
6 * 10Sep2002 akpm@zip.com.au
7 * Initial version.
8 */
9
10#include <linux/kernel.h>
11#include <linux/mm.h>
0fd0e6b0 12#include <linux/swap.h>
1da177e4
LT
13#include <linux/module.h>
14#include <linux/pagemap.h>
15#include <linux/pagevec.h>
e08748ce 16#include <linux/task_io_accounting_ops.h>
1da177e4 17#include <linux/buffer_head.h> /* grr. try_to_release_page,
aaa4059b 18 do_invalidatepage */
1da177e4
LT
19
20
cf9a2ae8
DH
21/**
22 * do_invalidatepage - invalidate part of all of a page
23 * @page: the page which is affected
24 * @offset: the index of the truncation point
25 *
26 * do_invalidatepage() is called when all or part of the page has become
27 * invalidated by a truncate operation.
28 *
29 * do_invalidatepage() does not have to release all buffers, but it must
30 * ensure that no dirty buffer is left outside @offset and that no I/O
31 * is underway against any of the blocks which are outside the truncation
32 * point. Because the caller is about to free (and possibly reuse) those
33 * blocks on-disk.
34 */
35void do_invalidatepage(struct page *page, unsigned long offset)
36{
37 void (*invalidatepage)(struct page *, unsigned long);
38 invalidatepage = page->mapping->a_ops->invalidatepage;
9361401e 39#ifdef CONFIG_BLOCK
cf9a2ae8
DH
40 if (!invalidatepage)
41 invalidatepage = block_invalidatepage;
9361401e 42#endif
cf9a2ae8
DH
43 if (invalidatepage)
44 (*invalidatepage)(page, offset);
45}
46
1da177e4
LT
47static inline void truncate_partial_page(struct page *page, unsigned partial)
48{
49 memclear_highpage_flush(page, partial, PAGE_CACHE_SIZE-partial);
50 if (PagePrivate(page))
51 do_invalidatepage(page, partial);
52}
53
fba2591b
LT
54void cancel_dirty_page(struct page *page, unsigned int account_size)
55{
56 /* If we're cancelling the page, it had better not be mapped any more */
57 if (page_mapped(page)) {
58 static unsigned int warncount;
59
60 WARN_ON(++warncount < 5);
61 }
62
63 if (TestClearPageDirty(page) && account_size)
64 task_io_account_cancelled_write(account_size);
65}
66
67
1da177e4
LT
68/*
69 * If truncate cannot remove the fs-private metadata from the page, the page
70 * becomes anonymous. It will be left on the LRU and may even be mapped into
71 * user pagetables if we're racing with filemap_nopage().
72 *
73 * We need to bale out if page->mapping is no longer equal to the original
74 * mapping. This happens a) when the VM reclaimed the page while we waited on
75 * its lock, b) when a concurrent invalidate_inode_pages got there first and
76 * c) when tmpfs swizzles a page between a tmpfs inode and swapper_space.
77 */
78static void
79truncate_complete_page(struct address_space *mapping, struct page *page)
80{
81 if (page->mapping != mapping)
82 return;
83
84 if (PagePrivate(page))
85 do_invalidatepage(page, 0);
86
fba2591b
LT
87 cancel_dirty_page(page, PAGE_CACHE_SIZE);
88
1da177e4
LT
89 ClearPageUptodate(page);
90 ClearPageMappedToDisk(page);
91 remove_from_page_cache(page);
92 page_cache_release(page); /* pagecache ref */
93}
94
95/*
96 * This is for invalidate_inode_pages(). That function can be called at
97 * any time, and is not supposed to throw away dirty pages. But pages can
0fd0e6b0
NP
98 * be marked dirty at any time too, so use remove_mapping which safely
99 * discards clean, unused pages.
1da177e4
LT
100 *
101 * Returns non-zero if the page was successfully invalidated.
102 */
103static int
104invalidate_complete_page(struct address_space *mapping, struct page *page)
105{
0fd0e6b0
NP
106 int ret;
107
1da177e4
LT
108 if (page->mapping != mapping)
109 return 0;
110
111 if (PagePrivate(page) && !try_to_release_page(page, 0))
112 return 0;
113
0fd0e6b0 114 ret = remove_mapping(mapping, page);
0fd0e6b0
NP
115
116 return ret;
1da177e4
LT
117}
118
119/**
d7339071
HR
120 * truncate_inode_pages - truncate range of pages specified by start and
121 * end byte offsets
1da177e4
LT
122 * @mapping: mapping to truncate
123 * @lstart: offset from which to truncate
d7339071 124 * @lend: offset to which to truncate
1da177e4 125 *
d7339071
HR
126 * Truncate the page cache, removing the pages that are between
127 * specified offsets (and zeroing out partial page
128 * (if lstart is not page aligned)).
1da177e4
LT
129 *
130 * Truncate takes two passes - the first pass is nonblocking. It will not
131 * block on page locks and it will not block on writeback. The second pass
132 * will wait. This is to prevent as much IO as possible in the affected region.
133 * The first pass will remove most pages, so the search cost of the second pass
134 * is low.
135 *
136 * When looking at page->index outside the page lock we need to be careful to
137 * copy it into a local to avoid races (it could change at any time).
138 *
139 * We pass down the cache-hot hint to the page freeing code. Even if the
140 * mapping is large, it is probably the case that the final pages are the most
141 * recently touched, and freeing happens in ascending file offset order.
1da177e4 142 */
d7339071
HR
143void truncate_inode_pages_range(struct address_space *mapping,
144 loff_t lstart, loff_t lend)
1da177e4
LT
145{
146 const pgoff_t start = (lstart + PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT;
d7339071 147 pgoff_t end;
1da177e4
LT
148 const unsigned partial = lstart & (PAGE_CACHE_SIZE - 1);
149 struct pagevec pvec;
150 pgoff_t next;
151 int i;
152
153 if (mapping->nrpages == 0)
154 return;
155
d7339071
HR
156 BUG_ON((lend & (PAGE_CACHE_SIZE - 1)) != (PAGE_CACHE_SIZE - 1));
157 end = (lend >> PAGE_CACHE_SHIFT);
158
1da177e4
LT
159 pagevec_init(&pvec, 0);
160 next = start;
d7339071
HR
161 while (next <= end &&
162 pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
1da177e4
LT
163 for (i = 0; i < pagevec_count(&pvec); i++) {
164 struct page *page = pvec.pages[i];
165 pgoff_t page_index = page->index;
166
d7339071
HR
167 if (page_index > end) {
168 next = page_index;
169 break;
170 }
171
1da177e4
LT
172 if (page_index > next)
173 next = page_index;
174 next++;
175 if (TestSetPageLocked(page))
176 continue;
177 if (PageWriteback(page)) {
178 unlock_page(page);
179 continue;
180 }
181 truncate_complete_page(mapping, page);
182 unlock_page(page);
183 }
184 pagevec_release(&pvec);
185 cond_resched();
186 }
187
188 if (partial) {
189 struct page *page = find_lock_page(mapping, start - 1);
190 if (page) {
191 wait_on_page_writeback(page);
192 truncate_partial_page(page, partial);
193 unlock_page(page);
194 page_cache_release(page);
195 }
196 }
197
198 next = start;
199 for ( ; ; ) {
200 cond_resched();
201 if (!pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
202 if (next == start)
203 break;
204 next = start;
205 continue;
206 }
d7339071
HR
207 if (pvec.pages[0]->index > end) {
208 pagevec_release(&pvec);
209 break;
210 }
1da177e4
LT
211 for (i = 0; i < pagevec_count(&pvec); i++) {
212 struct page *page = pvec.pages[i];
213
d7339071
HR
214 if (page->index > end)
215 break;
1da177e4
LT
216 lock_page(page);
217 wait_on_page_writeback(page);
218 if (page->index > next)
219 next = page->index;
220 next++;
221 truncate_complete_page(mapping, page);
222 unlock_page(page);
223 }
224 pagevec_release(&pvec);
225 }
226}
d7339071 227EXPORT_SYMBOL(truncate_inode_pages_range);
1da177e4 228
d7339071
HR
229/**
230 * truncate_inode_pages - truncate *all* the pages from an offset
231 * @mapping: mapping to truncate
232 * @lstart: offset from which to truncate
233 *
1b1dcc1b 234 * Called under (and serialised by) inode->i_mutex.
d7339071
HR
235 */
236void truncate_inode_pages(struct address_space *mapping, loff_t lstart)
237{
238 truncate_inode_pages_range(mapping, lstart, (loff_t)-1);
239}
1da177e4
LT
240EXPORT_SYMBOL(truncate_inode_pages);
241
242/**
243 * invalidate_mapping_pages - Invalidate all the unlocked pages of one inode
244 * @mapping: the address_space which holds the pages to invalidate
245 * @start: the offset 'from' which to invalidate
246 * @end: the offset 'to' which to invalidate (inclusive)
247 *
248 * This function only removes the unlocked pages, if you want to
249 * remove all the pages of one inode, you must call truncate_inode_pages.
250 *
251 * invalidate_mapping_pages() will not block on IO activity. It will not
252 * invalidate pages which are dirty, locked, under writeback or mapped into
253 * pagetables.
254 */
255unsigned long invalidate_mapping_pages(struct address_space *mapping,
256 pgoff_t start, pgoff_t end)
257{
258 struct pagevec pvec;
259 pgoff_t next = start;
260 unsigned long ret = 0;
261 int i;
262
263 pagevec_init(&pvec, 0);
264 while (next <= end &&
265 pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
266 for (i = 0; i < pagevec_count(&pvec); i++) {
267 struct page *page = pvec.pages[i];
e0f23603
N
268 pgoff_t index;
269 int lock_failed;
1da177e4 270
e0f23603
N
271 lock_failed = TestSetPageLocked(page);
272
273 /*
274 * We really shouldn't be looking at the ->index of an
275 * unlocked page. But we're not allowed to lock these
276 * pages. So we rely upon nobody altering the ->index
277 * of this (pinned-by-us) page.
278 */
279 index = page->index;
280 if (index > next)
281 next = index;
1da177e4 282 next++;
e0f23603
N
283 if (lock_failed)
284 continue;
285
1da177e4
LT
286 if (PageDirty(page) || PageWriteback(page))
287 goto unlock;
288 if (page_mapped(page))
289 goto unlock;
290 ret += invalidate_complete_page(mapping, page);
291unlock:
292 unlock_page(page);
293 if (next > end)
294 break;
295 }
296 pagevec_release(&pvec);
1da177e4
LT
297 }
298 return ret;
299}
300
301unsigned long invalidate_inode_pages(struct address_space *mapping)
302{
303 return invalidate_mapping_pages(mapping, 0, ~0UL);
304}
1da177e4
LT
305EXPORT_SYMBOL(invalidate_inode_pages);
306
bd4c8ce4
AM
307/*
308 * This is like invalidate_complete_page(), except it ignores the page's
309 * refcount. We do this because invalidate_inode_pages2() needs stronger
310 * invalidation guarantees, and cannot afford to leave pages behind because
311 * shrink_list() has a temp ref on them, or because they're transiently sitting
312 * in the lru_cache_add() pagevecs.
313 */
314static int
315invalidate_complete_page2(struct address_space *mapping, struct page *page)
316{
317 if (page->mapping != mapping)
318 return 0;
319
887ed2f3 320 if (PagePrivate(page) && !try_to_release_page(page, GFP_KERNEL))
bd4c8ce4
AM
321 return 0;
322
323 write_lock_irq(&mapping->tree_lock);
324 if (PageDirty(page))
325 goto failed;
326
327 BUG_ON(PagePrivate(page));
328 __remove_from_page_cache(page);
329 write_unlock_irq(&mapping->tree_lock);
330 ClearPageUptodate(page);
331 page_cache_release(page); /* pagecache ref */
332 return 1;
333failed:
334 write_unlock_irq(&mapping->tree_lock);
335 return 0;
336}
337
1da177e4
LT
338/**
339 * invalidate_inode_pages2_range - remove range of pages from an address_space
67be2dd1 340 * @mapping: the address_space
1da177e4
LT
341 * @start: the page offset 'from' which to invalidate
342 * @end: the page offset 'to' which to invalidate (inclusive)
343 *
344 * Any pages which are found to be mapped into pagetables are unmapped prior to
345 * invalidation.
346 *
347 * Returns -EIO if any pages could not be invalidated.
348 */
349int invalidate_inode_pages2_range(struct address_space *mapping,
350 pgoff_t start, pgoff_t end)
351{
352 struct pagevec pvec;
353 pgoff_t next;
354 int i;
355 int ret = 0;
356 int did_range_unmap = 0;
357 int wrapped = 0;
358
359 pagevec_init(&pvec, 0);
360 next = start;
361 while (next <= end && !ret && !wrapped &&
362 pagevec_lookup(&pvec, mapping, next,
363 min(end - next, (pgoff_t)PAGEVEC_SIZE - 1) + 1)) {
364 for (i = 0; !ret && i < pagevec_count(&pvec); i++) {
365 struct page *page = pvec.pages[i];
366 pgoff_t page_index;
1da177e4
LT
367
368 lock_page(page);
369 if (page->mapping != mapping) {
370 unlock_page(page);
371 continue;
372 }
373 page_index = page->index;
374 next = page_index + 1;
375 if (next == 0)
376 wrapped = 1;
377 if (page_index > end) {
378 unlock_page(page);
379 break;
380 }
381 wait_on_page_writeback(page);
382 while (page_mapped(page)) {
383 if (!did_range_unmap) {
384 /*
385 * Zap the rest of the file in one hit.
386 */
387 unmap_mapping_range(mapping,
479ef592
OD
388 (loff_t)page_index<<PAGE_CACHE_SHIFT,
389 (loff_t)(end - page_index + 1)
1da177e4
LT
390 << PAGE_CACHE_SHIFT,
391 0);
392 did_range_unmap = 1;
393 } else {
394 /*
395 * Just zap this page
396 */
397 unmap_mapping_range(mapping,
479ef592 398 (loff_t)page_index<<PAGE_CACHE_SHIFT,
1da177e4
LT
399 PAGE_CACHE_SIZE, 0);
400 }
401 }
fba2591b 402 if (!invalidate_complete_page2(mapping, page))
1da177e4 403 ret = -EIO;
1da177e4
LT
404 unlock_page(page);
405 }
406 pagevec_release(&pvec);
407 cond_resched();
408 }
8258d4a5 409 WARN_ON_ONCE(ret);
1da177e4
LT
410 return ret;
411}
412EXPORT_SYMBOL_GPL(invalidate_inode_pages2_range);
413
414/**
415 * invalidate_inode_pages2 - remove all pages from an address_space
67be2dd1 416 * @mapping: the address_space
1da177e4
LT
417 *
418 * Any pages which are found to be mapped into pagetables are unmapped prior to
419 * invalidation.
420 *
421 * Returns -EIO if any pages could not be invalidated.
422 */
423int invalidate_inode_pages2(struct address_space *mapping)
424{
425 return invalidate_inode_pages2_range(mapping, 0, -1);
426}
427EXPORT_SYMBOL_GPL(invalidate_inode_pages2);