]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - mm/swap_state.c
UBUNTU: Ubuntu-4.10.0-37.41
[mirror_ubuntu-zesty-kernel.git] / mm / swap_state.c
CommitLineData
1da177e4
LT
1/*
2 * linux/mm/swap_state.c
3 *
4 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
5 * Swap reorganised 29.12.95, Stephen Tweedie
6 *
7 * Rewritten to use page cache, (C) 1998 Stephen Tweedie
8 */
1da177e4 9#include <linux/mm.h>
5a0e3ad6 10#include <linux/gfp.h>
1da177e4
LT
11#include <linux/kernel_stat.h>
12#include <linux/swap.h>
46017e95 13#include <linux/swapops.h>
1da177e4
LT
14#include <linux/init.h>
15#include <linux/pagemap.h>
1da177e4 16#include <linux/backing-dev.h>
3fb5c298 17#include <linux/blkdev.h>
c484d410 18#include <linux/pagevec.h>
b20a3503 19#include <linux/migrate.h>
1da177e4
LT
20
21#include <asm/pgtable.h>
22
23/*
24 * swapper_space is a fiction, retained to simplify the path through
7eaceacc 25 * vmscan's shrink_page_list.
1da177e4 26 */
f5e54d6e 27static const struct address_space_operations swap_aops = {
1da177e4 28 .writepage = swap_writepage,
62c230bc 29 .set_page_dirty = swap_set_page_dirty,
1c93923c 30#ifdef CONFIG_MIGRATION
e965f963 31 .migratepage = migrate_page,
1c93923c 32#endif
1da177e4
LT
33};
34
33806f06
SL
35struct address_space swapper_spaces[MAX_SWAPFILES] = {
36 [0 ... MAX_SWAPFILES - 1] = {
37 .page_tree = RADIX_TREE_INIT(GFP_ATOMIC|__GFP_NOWARN),
4bb5f5d9 38 .i_mmap_writable = ATOMIC_INIT(0),
33806f06 39 .a_ops = &swap_aops,
371a096e
HY
40 /* swap cache doesn't use writeback related tags */
41 .flags = 1 << AS_NO_WRITEBACK_TAGS,
33806f06 42 }
1da177e4 43};
1da177e4
LT
44
45#define INC_CACHE_INFO(x) do { swap_cache_info.x++; } while (0)
46
47static struct {
48 unsigned long add_total;
49 unsigned long del_total;
50 unsigned long find_success;
51 unsigned long find_total;
1da177e4
LT
52} swap_cache_info;
53
33806f06
SL
54unsigned long total_swapcache_pages(void)
55{
56 int i;
57 unsigned long ret = 0;
58
59 for (i = 0; i < MAX_SWAPFILES; i++)
60 ret += swapper_spaces[i].nrpages;
61 return ret;
62}
63
579f8290
SL
64static atomic_t swapin_readahead_hits = ATOMIC_INIT(4);
65
1da177e4
LT
66void show_swap_cache_info(void)
67{
33806f06 68 printk("%lu pages in swap cache\n", total_swapcache_pages());
2c97b7fc 69 printk("Swap cache stats: add %lu, delete %lu, find %lu/%lu\n",
1da177e4 70 swap_cache_info.add_total, swap_cache_info.del_total,
bb63be0a 71 swap_cache_info.find_success, swap_cache_info.find_total);
ec8acf20
SL
72 printk("Free swap = %ldkB\n",
73 get_nr_swap_pages() << (PAGE_SHIFT - 10));
1da177e4
LT
74 printk("Total swap = %lukB\n", total_swap_pages << (PAGE_SHIFT - 10));
75}
76
77/*
31a56396 78 * __add_to_swap_cache resembles add_to_page_cache_locked on swapper_space,
1da177e4
LT
79 * but sets SwapCache flag and private instead of mapping and index.
80 */
2f772e6c 81int __add_to_swap_cache(struct page *page, swp_entry_t entry)
1da177e4
LT
82{
83 int error;
33806f06 84 struct address_space *address_space;
1da177e4 85
309381fe
SL
86 VM_BUG_ON_PAGE(!PageLocked(page), page);
87 VM_BUG_ON_PAGE(PageSwapCache(page), page);
88 VM_BUG_ON_PAGE(!PageSwapBacked(page), page);
51726b12 89
09cbfeaf 90 get_page(page);
31a56396
DN
91 SetPageSwapCache(page);
92 set_page_private(page, entry.val);
93
33806f06
SL
94 address_space = swap_address_space(entry);
95 spin_lock_irq(&address_space->tree_lock);
96 error = radix_tree_insert(&address_space->page_tree,
f6ab1f7f 97 swp_offset(entry), page);
31a56396 98 if (likely(!error)) {
33806f06 99 address_space->nrpages++;
11fb9989 100 __inc_node_page_state(page, NR_FILE_PAGES);
31a56396
DN
101 INC_CACHE_INFO(add_total);
102 }
33806f06 103 spin_unlock_irq(&address_space->tree_lock);
31a56396
DN
104
105 if (unlikely(error)) {
2ca4532a
DN
106 /*
107 * Only the context which have set SWAP_HAS_CACHE flag
108 * would call add_to_swap_cache().
109 * So add_to_swap_cache() doesn't returns -EEXIST.
110 */
111 VM_BUG_ON(error == -EEXIST);
31a56396
DN
112 set_page_private(page, 0UL);
113 ClearPageSwapCache(page);
09cbfeaf 114 put_page(page);
31a56396
DN
115 }
116
117 return error;
118}
119
120
121int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp_mask)
122{
123 int error;
124
5e4c0d97 125 error = radix_tree_maybe_preload(gfp_mask);
35c754d7 126 if (!error) {
31a56396 127 error = __add_to_swap_cache(page, entry);
1da177e4 128 radix_tree_preload_end();
fa1de900 129 }
1da177e4
LT
130 return error;
131}
132
1da177e4
LT
133/*
134 * This must be called only on pages that have
135 * been verified to be in the swap cache.
136 */
137void __delete_from_swap_cache(struct page *page)
138{
33806f06
SL
139 swp_entry_t entry;
140 struct address_space *address_space;
141
309381fe
SL
142 VM_BUG_ON_PAGE(!PageLocked(page), page);
143 VM_BUG_ON_PAGE(!PageSwapCache(page), page);
144 VM_BUG_ON_PAGE(PageWriteback(page), page);
1da177e4 145
33806f06
SL
146 entry.val = page_private(page);
147 address_space = swap_address_space(entry);
f6ab1f7f 148 radix_tree_delete(&address_space->page_tree, swp_offset(entry));
4c21e2f2 149 set_page_private(page, 0);
1da177e4 150 ClearPageSwapCache(page);
33806f06 151 address_space->nrpages--;
11fb9989 152 __dec_node_page_state(page, NR_FILE_PAGES);
1da177e4
LT
153 INC_CACHE_INFO(del_total);
154}
155
156/**
157 * add_to_swap - allocate swap space for a page
158 * @page: page we want to move to swap
159 *
160 * Allocate swap space for the page and add the page to the
161 * swap cache. Caller needs to hold the page lock.
162 */
5bc7b8ac 163int add_to_swap(struct page *page, struct list_head *list)
1da177e4
LT
164{
165 swp_entry_t entry;
1da177e4
LT
166 int err;
167
309381fe
SL
168 VM_BUG_ON_PAGE(!PageLocked(page), page);
169 VM_BUG_ON_PAGE(!PageUptodate(page), page);
1da177e4 170
2ca4532a
DN
171 entry = get_swap_page();
172 if (!entry.val)
173 return 0;
174
37e84351
VD
175 if (mem_cgroup_try_charge_swap(page, entry)) {
176 swapcache_free(entry);
177 return 0;
178 }
179
3f04f62f 180 if (unlikely(PageTransHuge(page)))
5bc7b8ac 181 if (unlikely(split_huge_page_to_list(page, list))) {
0a31bc97 182 swapcache_free(entry);
3f04f62f
AA
183 return 0;
184 }
185
2ca4532a
DN
186 /*
187 * Radix-tree node allocations from PF_MEMALLOC contexts could
188 * completely exhaust the page allocator. __GFP_NOMEMALLOC
189 * stops emergency reserves from being allocated.
190 *
191 * TODO: this could cause a theoretical memory reclaim
192 * deadlock in the swap out path.
193 */
194 /*
854e9ed0 195 * Add it to the swap cache.
2ca4532a
DN
196 */
197 err = add_to_swap_cache(page, entry,
198 __GFP_HIGH|__GFP_NOMEMALLOC|__GFP_NOWARN);
199
854e9ed0 200 if (!err) {
2ca4532a
DN
201 return 1;
202 } else { /* -ENOMEM radix-tree allocation failure */
bd53b714 203 /*
2ca4532a
DN
204 * add_to_swap_cache() doesn't return -EEXIST, so we can safely
205 * clear SWAP_HAS_CACHE flag.
1da177e4 206 */
0a31bc97 207 swapcache_free(entry);
2ca4532a 208 return 0;
1da177e4
LT
209 }
210}
211
212/*
213 * This must be called only on pages that have
214 * been verified to be in the swap cache and locked.
215 * It will never put the page into the free list,
216 * the caller has a reference on the page.
217 */
218void delete_from_swap_cache(struct page *page)
219{
220 swp_entry_t entry;
33806f06 221 struct address_space *address_space;
1da177e4 222
4c21e2f2 223 entry.val = page_private(page);
1da177e4 224
33806f06
SL
225 address_space = swap_address_space(entry);
226 spin_lock_irq(&address_space->tree_lock);
1da177e4 227 __delete_from_swap_cache(page);
33806f06 228 spin_unlock_irq(&address_space->tree_lock);
1da177e4 229
0a31bc97 230 swapcache_free(entry);
09cbfeaf 231 put_page(page);
1da177e4
LT
232}
233
1da177e4
LT
234/*
235 * If we are the only user, then try to free up the swap cache.
236 *
237 * Its ok to check for PageSwapCache without the page lock
a2c43eed
HD
238 * here because we are going to recheck again inside
239 * try_to_free_swap() _with_ the lock.
1da177e4
LT
240 * - Marcelo
241 */
242static inline void free_swap_cache(struct page *page)
243{
a2c43eed
HD
244 if (PageSwapCache(page) && !page_mapped(page) && trylock_page(page)) {
245 try_to_free_swap(page);
1da177e4
LT
246 unlock_page(page);
247 }
248}
249
250/*
251 * Perform a free_page(), also freeing any swap cache associated with
b8072f09 252 * this page if it is the last user of the page.
1da177e4
LT
253 */
254void free_page_and_swap_cache(struct page *page)
255{
256 free_swap_cache(page);
6fcb52a5 257 if (!is_huge_zero_page(page))
770a5370 258 put_page(page);
1da177e4
LT
259}
260
261/*
262 * Passed an array of pages, drop them all from swapcache and then release
263 * them. They are removed from the LRU and freed if this is their last use.
264 */
265void free_pages_and_swap_cache(struct page **pages, int nr)
266{
1da177e4 267 struct page **pagep = pages;
aabfb572 268 int i;
1da177e4
LT
269
270 lru_add_drain();
aabfb572
MH
271 for (i = 0; i < nr; i++)
272 free_swap_cache(pagep[i]);
273 release_pages(pagep, nr, false);
1da177e4
LT
274}
275
276/*
277 * Lookup a swap entry in the swap cache. A found page will be returned
278 * unlocked and with its refcount incremented - we rely on the kernel
279 * lock getting page table operations atomic even if we drop the page
280 * lock before returning.
281 */
282struct page * lookup_swap_cache(swp_entry_t entry)
283{
284 struct page *page;
285
f6ab1f7f 286 page = find_get_page(swap_address_space(entry), swp_offset(entry));
1da177e4 287
579f8290 288 if (page) {
1da177e4 289 INC_CACHE_INFO(find_success);
579f8290
SL
290 if (TestClearPageReadahead(page))
291 atomic_inc(&swapin_readahead_hits);
292 }
1da177e4
LT
293
294 INC_CACHE_INFO(find_total);
295 return page;
296}
297
5b999aad
DS
298struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
299 struct vm_area_struct *vma, unsigned long addr,
300 bool *new_page_allocated)
1da177e4
LT
301{
302 struct page *found_page, *new_page = NULL;
5b999aad 303 struct address_space *swapper_space = swap_address_space(entry);
1da177e4 304 int err;
5b999aad 305 *new_page_allocated = false;
1da177e4
LT
306
307 do {
308 /*
309 * First check the swap cache. Since this is normally
310 * called after lookup_swap_cache() failed, re-calling
311 * that would confuse statistics.
312 */
f6ab1f7f 313 found_page = find_get_page(swapper_space, swp_offset(entry));
1da177e4
LT
314 if (found_page)
315 break;
316
317 /*
318 * Get a new page to read into from swap.
319 */
320 if (!new_page) {
02098fea 321 new_page = alloc_page_vma(gfp_mask, vma, addr);
1da177e4
LT
322 if (!new_page)
323 break; /* Out of memory */
324 }
325
31a56396
DN
326 /*
327 * call radix_tree_preload() while we can wait.
328 */
5e4c0d97 329 err = radix_tree_maybe_preload(gfp_mask & GFP_KERNEL);
31a56396
DN
330 if (err)
331 break;
332
f000944d
HD
333 /*
334 * Swap entry may have been freed since our caller observed it.
335 */
355cfa73 336 err = swapcache_prepare(entry);
cbab0e4e 337 if (err == -EEXIST) {
31a56396 338 radix_tree_preload_end();
cbab0e4e
RA
339 /*
340 * We might race against get_swap_page() and stumble
341 * across a SWAP_HAS_CACHE swap_map entry whose page
342 * has not been brought into the swapcache yet, while
343 * the other end is scheduled away waiting on discard
344 * I/O completion at scan_swap_map().
345 *
346 * In order to avoid turning this transitory state
347 * into a permanent loop around this -EEXIST case
348 * if !CONFIG_PREEMPT and the I/O completion happens
349 * to be waiting on the CPU waitqueue where we are now
350 * busy looping, we just conditionally invoke the
351 * scheduler here, if there are some more important
352 * tasks to run.
353 */
354 cond_resched();
355cfa73 355 continue;
31a56396
DN
356 }
357 if (err) { /* swp entry is obsolete ? */
358 radix_tree_preload_end();
f000944d 359 break;
31a56396 360 }
f000944d 361
2ca4532a 362 /* May fail (-ENOMEM) if radix-tree node allocation failed. */
48c935ad 363 __SetPageLocked(new_page);
fa9949da 364 __SetPageSwapBacked(new_page);
31a56396 365 err = __add_to_swap_cache(new_page, entry);
529ae9aa 366 if (likely(!err)) {
31a56396 367 radix_tree_preload_end();
1da177e4
LT
368 /*
369 * Initiate read into locked page and return.
370 */
c5fdae46 371 lru_cache_add_anon(new_page);
5b999aad 372 *new_page_allocated = true;
1da177e4
LT
373 return new_page;
374 }
31a56396 375 radix_tree_preload_end();
48c935ad 376 __ClearPageLocked(new_page);
2ca4532a
DN
377 /*
378 * add_to_swap_cache() doesn't return -EEXIST, so we can safely
379 * clear SWAP_HAS_CACHE flag.
380 */
0a31bc97 381 swapcache_free(entry);
f000944d 382 } while (err != -ENOMEM);
1da177e4
LT
383
384 if (new_page)
09cbfeaf 385 put_page(new_page);
1da177e4
LT
386 return found_page;
387}
46017e95 388
5b999aad
DS
389/*
390 * Locate a page of swap in physical memory, reserving swap cache space
391 * and reading the disk if it is not already cached.
392 * A failure return means that either the page allocation failed or that
393 * the swap entry is no longer in use.
394 */
395struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
396 struct vm_area_struct *vma, unsigned long addr)
397{
398 bool page_was_allocated;
399 struct page *retpage = __read_swap_cache_async(entry, gfp_mask,
400 vma, addr, &page_was_allocated);
401
402 if (page_was_allocated)
403 swap_readpage(retpage);
404
405 return retpage;
406}
407
579f8290
SL
408static unsigned long swapin_nr_pages(unsigned long offset)
409{
410 static unsigned long prev_offset;
411 unsigned int pages, max_pages, last_ra;
412 static atomic_t last_readahead_pages;
413
4db0c3c2 414 max_pages = 1 << READ_ONCE(page_cluster);
579f8290
SL
415 if (max_pages <= 1)
416 return 1;
417
418 /*
419 * This heuristic has been found to work well on both sequential and
420 * random loads, swapping to hard disk or to SSD: please don't ask
421 * what the "+ 2" means, it just happens to work well, that's all.
422 */
423 pages = atomic_xchg(&swapin_readahead_hits, 0) + 2;
424 if (pages == 2) {
425 /*
426 * We can have no readahead hits to judge by: but must not get
427 * stuck here forever, so check for an adjacent offset instead
428 * (and don't even bother to check whether swap type is same).
429 */
430 if (offset != prev_offset + 1 && offset != prev_offset - 1)
431 pages = 1;
432 prev_offset = offset;
433 } else {
434 unsigned int roundup = 4;
435 while (roundup < pages)
436 roundup <<= 1;
437 pages = roundup;
438 }
439
440 if (pages > max_pages)
441 pages = max_pages;
442
443 /* Don't shrink readahead too fast */
444 last_ra = atomic_read(&last_readahead_pages) / 2;
445 if (pages < last_ra)
446 pages = last_ra;
447 atomic_set(&last_readahead_pages, pages);
448
449 return pages;
450}
451
46017e95
HD
452/**
453 * swapin_readahead - swap in pages in hope we need them soon
454 * @entry: swap entry of this memory
7682486b 455 * @gfp_mask: memory allocation flags
46017e95
HD
456 * @vma: user vma this address belongs to
457 * @addr: target address for mempolicy
458 *
459 * Returns the struct page for entry and addr, after queueing swapin.
460 *
461 * Primitive swap readahead code. We simply read an aligned block of
462 * (1 << page_cluster) entries in the swap area. This method is chosen
463 * because it doesn't cost us any seek time. We also make sure to queue
464 * the 'original' request together with the readahead ones...
465 *
466 * This has been extended to use the NUMA policies from the mm triggering
467 * the readahead.
468 *
469 * Caller must hold down_read on the vma->vm_mm if vma is not NULL.
470 */
02098fea 471struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask,
46017e95
HD
472 struct vm_area_struct *vma, unsigned long addr)
473{
46017e95 474 struct page *page;
579f8290
SL
475 unsigned long entry_offset = swp_offset(entry);
476 unsigned long offset = entry_offset;
67f96aa2 477 unsigned long start_offset, end_offset;
579f8290 478 unsigned long mask;
3fb5c298 479 struct blk_plug plug;
46017e95 480
579f8290
SL
481 mask = swapin_nr_pages(offset) - 1;
482 if (!mask)
483 goto skip;
484
67f96aa2
RR
485 /* Read a page_cluster sized and aligned cluster around offset. */
486 start_offset = offset & ~mask;
487 end_offset = offset | mask;
488 if (!start_offset) /* First page is swap header. */
489 start_offset++;
490
3fb5c298 491 blk_start_plug(&plug);
67f96aa2 492 for (offset = start_offset; offset <= end_offset ; offset++) {
46017e95
HD
493 /* Ok, do the async read-ahead now */
494 page = read_swap_cache_async(swp_entry(swp_type(entry), offset),
02098fea 495 gfp_mask, vma, addr);
46017e95 496 if (!page)
67f96aa2 497 continue;
579f8290
SL
498 if (offset != entry_offset)
499 SetPageReadahead(page);
09cbfeaf 500 put_page(page);
46017e95 501 }
3fb5c298
CE
502 blk_finish_plug(&plug);
503
46017e95 504 lru_add_drain(); /* Push any new pages onto the LRU now */
579f8290 505skip:
02098fea 506 return read_swap_cache_async(entry, gfp_mask, vma, addr);
46017e95 507}