]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - mm/truncate.c
mm: fix fault vs invalidate race for linear mappings
[mirror_ubuntu-artful-kernel.git] / mm / truncate.c
CommitLineData
1da177e4
LT
1/*
2 * mm/truncate.c - code for taking down pages from address_spaces
3 *
4 * Copyright (C) 2002, Linus Torvalds
5 *
6 * 10Sep2002 akpm@zip.com.au
7 * Initial version.
8 */
9
10#include <linux/kernel.h>
11#include <linux/mm.h>
0fd0e6b0 12#include <linux/swap.h>
1da177e4
LT
13#include <linux/module.h>
14#include <linux/pagemap.h>
01f2705d 15#include <linux/highmem.h>
1da177e4 16#include <linux/pagevec.h>
e08748ce 17#include <linux/task_io_accounting_ops.h>
1da177e4 18#include <linux/buffer_head.h> /* grr. try_to_release_page,
aaa4059b 19 do_invalidatepage */
1da177e4
LT
20
21
cf9a2ae8
DH
22/**
23 * do_invalidatepage - invalidate part of all of a page
24 * @page: the page which is affected
25 * @offset: the index of the truncation point
26 *
27 * do_invalidatepage() is called when all or part of the page has become
28 * invalidated by a truncate operation.
29 *
30 * do_invalidatepage() does not have to release all buffers, but it must
31 * ensure that no dirty buffer is left outside @offset and that no I/O
32 * is underway against any of the blocks which are outside the truncation
33 * point. Because the caller is about to free (and possibly reuse) those
34 * blocks on-disk.
35 */
36void do_invalidatepage(struct page *page, unsigned long offset)
37{
38 void (*invalidatepage)(struct page *, unsigned long);
39 invalidatepage = page->mapping->a_ops->invalidatepage;
9361401e 40#ifdef CONFIG_BLOCK
cf9a2ae8
DH
41 if (!invalidatepage)
42 invalidatepage = block_invalidatepage;
9361401e 43#endif
cf9a2ae8
DH
44 if (invalidatepage)
45 (*invalidatepage)(page, offset);
46}
47
1da177e4
LT
48static inline void truncate_partial_page(struct page *page, unsigned partial)
49{
01f2705d 50 zero_user_page(page, partial, PAGE_CACHE_SIZE - partial, KM_USER0);
1da177e4
LT
51 if (PagePrivate(page))
52 do_invalidatepage(page, partial);
53}
54
ecdfc978
LT
55/*
56 * This cancels just the dirty bit on the kernel page itself, it
57 * does NOT actually remove dirty bits on any mmap's that may be
58 * around. It also leaves the page tagged dirty, so any sync
59 * activity will still find it on the dirty lists, and in particular,
60 * clear_page_dirty_for_io() will still look at the dirty bits in
61 * the VM.
62 *
63 * Doing this should *normally* only ever be done when a page
64 * is truncated, and is not actually mapped anywhere at all. However,
65 * fs/buffer.c does this when it notices that somebody has cleaned
66 * out all the buffers on a page without actually doing it through
67 * the VM. Can you say "ext3 is horribly ugly"? Tought you could.
68 */
fba2591b
LT
69void cancel_dirty_page(struct page *page, unsigned int account_size)
70{
8368e328
LT
71 if (TestClearPageDirty(page)) {
72 struct address_space *mapping = page->mapping;
73 if (mapping && mapping_cap_account_dirty(mapping)) {
74 dec_zone_page_state(page, NR_FILE_DIRTY);
75 if (account_size)
76 task_io_account_cancelled_write(account_size);
77 }
3e67c098 78 }
fba2591b 79}
8368e328 80EXPORT_SYMBOL(cancel_dirty_page);
fba2591b 81
1da177e4
LT
82/*
83 * If truncate cannot remove the fs-private metadata from the page, the page
84 * becomes anonymous. It will be left on the LRU and may even be mapped into
85 * user pagetables if we're racing with filemap_nopage().
86 *
87 * We need to bale out if page->mapping is no longer equal to the original
88 * mapping. This happens a) when the VM reclaimed the page while we waited on
fc0ecff6 89 * its lock, b) when a concurrent invalidate_mapping_pages got there first and
1da177e4
LT
90 * c) when tmpfs swizzles a page between a tmpfs inode and swapper_space.
91 */
92static void
93truncate_complete_page(struct address_space *mapping, struct page *page)
94{
95 if (page->mapping != mapping)
96 return;
97
3e67c098
AM
98 cancel_dirty_page(page, PAGE_CACHE_SIZE);
99
1da177e4
LT
100 if (PagePrivate(page))
101 do_invalidatepage(page, 0);
102
787d2214 103 remove_from_page_cache(page);
1da177e4
LT
104 ClearPageUptodate(page);
105 ClearPageMappedToDisk(page);
1da177e4
LT
106 page_cache_release(page); /* pagecache ref */
107}
108
109/*
fc0ecff6 110 * This is for invalidate_mapping_pages(). That function can be called at
1da177e4 111 * any time, and is not supposed to throw away dirty pages. But pages can
0fd0e6b0
NP
112 * be marked dirty at any time too, so use remove_mapping which safely
113 * discards clean, unused pages.
1da177e4
LT
114 *
115 * Returns non-zero if the page was successfully invalidated.
116 */
117static int
118invalidate_complete_page(struct address_space *mapping, struct page *page)
119{
0fd0e6b0
NP
120 int ret;
121
1da177e4
LT
122 if (page->mapping != mapping)
123 return 0;
124
125 if (PagePrivate(page) && !try_to_release_page(page, 0))
126 return 0;
127
0fd0e6b0 128 ret = remove_mapping(mapping, page);
0fd0e6b0
NP
129
130 return ret;
1da177e4
LT
131}
132
133/**
d7339071
HR
134 * truncate_inode_pages - truncate range of pages specified by start and
135 * end byte offsets
1da177e4
LT
136 * @mapping: mapping to truncate
137 * @lstart: offset from which to truncate
d7339071 138 * @lend: offset to which to truncate
1da177e4 139 *
d7339071
HR
140 * Truncate the page cache, removing the pages that are between
141 * specified offsets (and zeroing out partial page
142 * (if lstart is not page aligned)).
1da177e4
LT
143 *
144 * Truncate takes two passes - the first pass is nonblocking. It will not
145 * block on page locks and it will not block on writeback. The second pass
146 * will wait. This is to prevent as much IO as possible in the affected region.
147 * The first pass will remove most pages, so the search cost of the second pass
148 * is low.
149 *
150 * When looking at page->index outside the page lock we need to be careful to
151 * copy it into a local to avoid races (it could change at any time).
152 *
153 * We pass down the cache-hot hint to the page freeing code. Even if the
154 * mapping is large, it is probably the case that the final pages are the most
155 * recently touched, and freeing happens in ascending file offset order.
1da177e4 156 */
d7339071
HR
157void truncate_inode_pages_range(struct address_space *mapping,
158 loff_t lstart, loff_t lend)
1da177e4
LT
159{
160 const pgoff_t start = (lstart + PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT;
d7339071 161 pgoff_t end;
1da177e4
LT
162 const unsigned partial = lstart & (PAGE_CACHE_SIZE - 1);
163 struct pagevec pvec;
164 pgoff_t next;
165 int i;
166
167 if (mapping->nrpages == 0)
168 return;
169
d7339071
HR
170 BUG_ON((lend & (PAGE_CACHE_SIZE - 1)) != (PAGE_CACHE_SIZE - 1));
171 end = (lend >> PAGE_CACHE_SHIFT);
172
1da177e4
LT
173 pagevec_init(&pvec, 0);
174 next = start;
d7339071
HR
175 while (next <= end &&
176 pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
1da177e4
LT
177 for (i = 0; i < pagevec_count(&pvec); i++) {
178 struct page *page = pvec.pages[i];
179 pgoff_t page_index = page->index;
180
d7339071
HR
181 if (page_index > end) {
182 next = page_index;
183 break;
184 }
185
1da177e4
LT
186 if (page_index > next)
187 next = page_index;
188 next++;
189 if (TestSetPageLocked(page))
190 continue;
191 if (PageWriteback(page)) {
192 unlock_page(page);
193 continue;
194 }
d00806b1
NP
195 if (page_mapped(page)) {
196 unmap_mapping_range(mapping,
197 (loff_t)page_index<<PAGE_CACHE_SHIFT,
198 PAGE_CACHE_SIZE, 0);
199 }
1da177e4
LT
200 truncate_complete_page(mapping, page);
201 unlock_page(page);
202 }
203 pagevec_release(&pvec);
204 cond_resched();
205 }
206
207 if (partial) {
208 struct page *page = find_lock_page(mapping, start - 1);
209 if (page) {
210 wait_on_page_writeback(page);
211 truncate_partial_page(page, partial);
212 unlock_page(page);
213 page_cache_release(page);
214 }
215 }
216
217 next = start;
218 for ( ; ; ) {
219 cond_resched();
220 if (!pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
221 if (next == start)
222 break;
223 next = start;
224 continue;
225 }
d7339071
HR
226 if (pvec.pages[0]->index > end) {
227 pagevec_release(&pvec);
228 break;
229 }
1da177e4
LT
230 for (i = 0; i < pagevec_count(&pvec); i++) {
231 struct page *page = pvec.pages[i];
232
d7339071
HR
233 if (page->index > end)
234 break;
1da177e4
LT
235 lock_page(page);
236 wait_on_page_writeback(page);
d00806b1
NP
237 if (page_mapped(page)) {
238 unmap_mapping_range(mapping,
239 (loff_t)page->index<<PAGE_CACHE_SHIFT,
240 PAGE_CACHE_SIZE, 0);
241 }
1da177e4
LT
242 if (page->index > next)
243 next = page->index;
244 next++;
245 truncate_complete_page(mapping, page);
246 unlock_page(page);
247 }
248 pagevec_release(&pvec);
249 }
250}
d7339071 251EXPORT_SYMBOL(truncate_inode_pages_range);
1da177e4 252
d7339071
HR
253/**
254 * truncate_inode_pages - truncate *all* the pages from an offset
255 * @mapping: mapping to truncate
256 * @lstart: offset from which to truncate
257 *
1b1dcc1b 258 * Called under (and serialised by) inode->i_mutex.
d7339071
HR
259 */
260void truncate_inode_pages(struct address_space *mapping, loff_t lstart)
261{
262 truncate_inode_pages_range(mapping, lstart, (loff_t)-1);
263}
1da177e4
LT
264EXPORT_SYMBOL(truncate_inode_pages);
265
fc9a07e7
AM
266unsigned long __invalidate_mapping_pages(struct address_space *mapping,
267 pgoff_t start, pgoff_t end, bool be_atomic)
1da177e4
LT
268{
269 struct pagevec pvec;
270 pgoff_t next = start;
271 unsigned long ret = 0;
272 int i;
273
274 pagevec_init(&pvec, 0);
275 while (next <= end &&
276 pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
277 for (i = 0; i < pagevec_count(&pvec); i++) {
278 struct page *page = pvec.pages[i];
e0f23603
N
279 pgoff_t index;
280 int lock_failed;
1da177e4 281
e0f23603
N
282 lock_failed = TestSetPageLocked(page);
283
284 /*
285 * We really shouldn't be looking at the ->index of an
286 * unlocked page. But we're not allowed to lock these
287 * pages. So we rely upon nobody altering the ->index
288 * of this (pinned-by-us) page.
289 */
290 index = page->index;
291 if (index > next)
292 next = index;
1da177e4 293 next++;
e0f23603
N
294 if (lock_failed)
295 continue;
296
1da177e4
LT
297 if (PageDirty(page) || PageWriteback(page))
298 goto unlock;
299 if (page_mapped(page))
300 goto unlock;
301 ret += invalidate_complete_page(mapping, page);
302unlock:
303 unlock_page(page);
304 if (next > end)
305 break;
306 }
307 pagevec_release(&pvec);
fc9a07e7
AM
308 if (likely(!be_atomic))
309 cond_resched();
1da177e4
LT
310 }
311 return ret;
312}
fc9a07e7
AM
313
314/**
315 * invalidate_mapping_pages - Invalidate all the unlocked pages of one inode
316 * @mapping: the address_space which holds the pages to invalidate
317 * @start: the offset 'from' which to invalidate
318 * @end: the offset 'to' which to invalidate (inclusive)
319 *
320 * This function only removes the unlocked pages, if you want to
321 * remove all the pages of one inode, you must call truncate_inode_pages.
322 *
323 * invalidate_mapping_pages() will not block on IO activity. It will not
324 * invalidate pages which are dirty, locked, under writeback or mapped into
325 * pagetables.
326 */
327unsigned long invalidate_mapping_pages(struct address_space *mapping,
328 pgoff_t start, pgoff_t end)
329{
330 return __invalidate_mapping_pages(mapping, start, end, false);
331}
54bc4855 332EXPORT_SYMBOL(invalidate_mapping_pages);
1da177e4 333
bd4c8ce4
AM
334/*
335 * This is like invalidate_complete_page(), except it ignores the page's
336 * refcount. We do this because invalidate_inode_pages2() needs stronger
337 * invalidation guarantees, and cannot afford to leave pages behind because
2706a1b8
AB
338 * shrink_page_list() has a temp ref on them, or because they're transiently
339 * sitting in the lru_cache_add() pagevecs.
bd4c8ce4
AM
340 */
341static int
342invalidate_complete_page2(struct address_space *mapping, struct page *page)
343{
344 if (page->mapping != mapping)
345 return 0;
346
887ed2f3 347 if (PagePrivate(page) && !try_to_release_page(page, GFP_KERNEL))
bd4c8ce4
AM
348 return 0;
349
350 write_lock_irq(&mapping->tree_lock);
351 if (PageDirty(page))
352 goto failed;
353
354 BUG_ON(PagePrivate(page));
355 __remove_from_page_cache(page);
356 write_unlock_irq(&mapping->tree_lock);
357 ClearPageUptodate(page);
358 page_cache_release(page); /* pagecache ref */
359 return 1;
360failed:
361 write_unlock_irq(&mapping->tree_lock);
362 return 0;
363}
364
e3db7691
TM
365static int do_launder_page(struct address_space *mapping, struct page *page)
366{
367 if (!PageDirty(page))
368 return 0;
369 if (page->mapping != mapping || mapping->a_ops->launder_page == NULL)
370 return 0;
371 return mapping->a_ops->launder_page(page);
372}
373
1da177e4
LT
374/**
375 * invalidate_inode_pages2_range - remove range of pages from an address_space
67be2dd1 376 * @mapping: the address_space
1da177e4
LT
377 * @start: the page offset 'from' which to invalidate
378 * @end: the page offset 'to' which to invalidate (inclusive)
379 *
380 * Any pages which are found to be mapped into pagetables are unmapped prior to
381 * invalidation.
382 *
383 * Returns -EIO if any pages could not be invalidated.
384 */
385int invalidate_inode_pages2_range(struct address_space *mapping,
386 pgoff_t start, pgoff_t end)
387{
388 struct pagevec pvec;
389 pgoff_t next;
390 int i;
391 int ret = 0;
392 int did_range_unmap = 0;
393 int wrapped = 0;
394
395 pagevec_init(&pvec, 0);
396 next = start;
7b965e08 397 while (next <= end && !wrapped &&
1da177e4
LT
398 pagevec_lookup(&pvec, mapping, next,
399 min(end - next, (pgoff_t)PAGEVEC_SIZE - 1) + 1)) {
7b965e08 400 for (i = 0; i < pagevec_count(&pvec); i++) {
1da177e4
LT
401 struct page *page = pvec.pages[i];
402 pgoff_t page_index;
1da177e4
LT
403
404 lock_page(page);
405 if (page->mapping != mapping) {
406 unlock_page(page);
407 continue;
408 }
409 page_index = page->index;
410 next = page_index + 1;
411 if (next == 0)
412 wrapped = 1;
413 if (page_index > end) {
414 unlock_page(page);
415 break;
416 }
417 wait_on_page_writeback(page);
d00806b1 418 if (page_mapped(page)) {
1da177e4
LT
419 if (!did_range_unmap) {
420 /*
421 * Zap the rest of the file in one hit.
422 */
423 unmap_mapping_range(mapping,
479ef592
OD
424 (loff_t)page_index<<PAGE_CACHE_SHIFT,
425 (loff_t)(end - page_index + 1)
1da177e4
LT
426 << PAGE_CACHE_SHIFT,
427 0);
428 did_range_unmap = 1;
429 } else {
430 /*
431 * Just zap this page
432 */
433 unmap_mapping_range(mapping,
479ef592 434 (loff_t)page_index<<PAGE_CACHE_SHIFT,
1da177e4
LT
435 PAGE_CACHE_SIZE, 0);
436 }
437 }
d00806b1 438 BUG_ON(page_mapped(page));
e3db7691
TM
439 ret = do_launder_page(mapping, page);
440 if (ret == 0 && !invalidate_complete_page2(mapping, page))
1da177e4 441 ret = -EIO;
1da177e4
LT
442 unlock_page(page);
443 }
444 pagevec_release(&pvec);
445 cond_resched();
446 }
447 return ret;
448}
449EXPORT_SYMBOL_GPL(invalidate_inode_pages2_range);
450
451/**
452 * invalidate_inode_pages2 - remove all pages from an address_space
67be2dd1 453 * @mapping: the address_space
1da177e4
LT
454 *
455 * Any pages which are found to be mapped into pagetables are unmapped prior to
456 * invalidation.
457 *
458 * Returns -EIO if any pages could not be invalidated.
459 */
460int invalidate_inode_pages2(struct address_space *mapping)
461{
462 return invalidate_inode_pages2_range(mapping, 0, -1);
463}
464EXPORT_SYMBOL_GPL(invalidate_inode_pages2);