]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * linux/mm/swap_state.c | |
3 | * | |
4 | * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds | |
5 | * Swap reorganised 29.12.95, Stephen Tweedie | |
6 | * | |
7 | * Rewritten to use page cache, (C) 1998 Stephen Tweedie | |
8 | */ | |
9 | #include <linux/mm.h> | |
10 | #include <linux/gfp.h> | |
11 | #include <linux/kernel_stat.h> | |
12 | #include <linux/swap.h> | |
13 | #include <linux/swapops.h> | |
14 | #include <linux/init.h> | |
15 | #include <linux/pagemap.h> | |
16 | #include <linux/backing-dev.h> | |
17 | #include <linux/blkdev.h> | |
18 | #include <linux/pagevec.h> | |
19 | #include <linux/migrate.h> | |
20 | ||
21 | #include <asm/pgtable.h> | |
22 | ||
23 | /* | |
24 | * swapper_space is a fiction, retained to simplify the path through | |
25 | * vmscan's shrink_page_list. | |
26 | */ | |
27 | static const struct address_space_operations swap_aops = { | |
28 | .writepage = swap_writepage, | |
29 | .set_page_dirty = swap_set_page_dirty, | |
30 | #ifdef CONFIG_MIGRATION | |
31 | .migratepage = migrate_page, | |
32 | #endif | |
33 | }; | |
34 | ||
35 | struct address_space swapper_spaces[MAX_SWAPFILES] = { | |
36 | [0 ... MAX_SWAPFILES - 1] = { | |
37 | .page_tree = RADIX_TREE_INIT(GFP_ATOMIC|__GFP_NOWARN), | |
38 | .i_mmap_writable = ATOMIC_INIT(0), | |
39 | .a_ops = &swap_aops, | |
40 | } | |
41 | }; | |
42 | ||
43 | #define INC_CACHE_INFO(x) do { swap_cache_info.x++; } while (0) | |
44 | ||
45 | static struct { | |
46 | unsigned long add_total; | |
47 | unsigned long del_total; | |
48 | unsigned long find_success; | |
49 | unsigned long find_total; | |
50 | } swap_cache_info; | |
51 | ||
52 | unsigned long total_swapcache_pages(void) | |
53 | { | |
54 | int i; | |
55 | unsigned long ret = 0; | |
56 | ||
57 | for (i = 0; i < MAX_SWAPFILES; i++) | |
58 | ret += swapper_spaces[i].nrpages; | |
59 | return ret; | |
60 | } | |
61 | ||
62 | static atomic_t swapin_readahead_hits = ATOMIC_INIT(4); | |
63 | ||
64 | void show_swap_cache_info(void) | |
65 | { | |
66 | printk("%lu pages in swap cache\n", total_swapcache_pages()); | |
67 | printk("Swap cache stats: add %lu, delete %lu, find %lu/%lu\n", | |
68 | swap_cache_info.add_total, swap_cache_info.del_total, | |
69 | swap_cache_info.find_success, swap_cache_info.find_total); | |
70 | printk("Free swap = %ldkB\n", | |
71 | get_nr_swap_pages() << (PAGE_SHIFT - 10)); | |
72 | printk("Total swap = %lukB\n", total_swap_pages << (PAGE_SHIFT - 10)); | |
73 | } | |
74 | ||
75 | /* | |
76 | * __add_to_swap_cache resembles add_to_page_cache_locked on swapper_space, | |
77 | * but sets SwapCache flag and private instead of mapping and index. | |
78 | */ | |
79 | int __add_to_swap_cache(struct page *page, swp_entry_t entry) | |
80 | { | |
81 | int error; | |
82 | struct address_space *address_space; | |
83 | ||
84 | VM_BUG_ON_PAGE(!PageLocked(page), page); | |
85 | VM_BUG_ON_PAGE(PageSwapCache(page), page); | |
86 | VM_BUG_ON_PAGE(!PageSwapBacked(page), page); | |
87 | ||
88 | page_cache_get(page); | |
89 | SetPageSwapCache(page); | |
90 | set_page_private(page, entry.val); | |
91 | ||
92 | address_space = swap_address_space(entry); | |
93 | spin_lock_irq(&address_space->tree_lock); | |
94 | error = radix_tree_insert(&address_space->page_tree, | |
95 | entry.val, page); | |
96 | if (likely(!error)) { | |
97 | address_space->nrpages++; | |
98 | __inc_zone_page_state(page, NR_FILE_PAGES); | |
99 | INC_CACHE_INFO(add_total); | |
100 | } | |
101 | spin_unlock_irq(&address_space->tree_lock); | |
102 | ||
103 | if (unlikely(error)) { | |
104 | /* | |
105 | * Only the context which have set SWAP_HAS_CACHE flag | |
106 | * would call add_to_swap_cache(). | |
107 | * So add_to_swap_cache() doesn't returns -EEXIST. | |
108 | */ | |
109 | VM_BUG_ON(error == -EEXIST); | |
110 | set_page_private(page, 0UL); | |
111 | ClearPageSwapCache(page); | |
112 | page_cache_release(page); | |
113 | } | |
114 | ||
115 | return error; | |
116 | } | |
117 | ||
118 | ||
119 | int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp_mask) | |
120 | { | |
121 | int error; | |
122 | ||
123 | error = radix_tree_maybe_preload(gfp_mask); | |
124 | if (!error) { | |
125 | error = __add_to_swap_cache(page, entry); | |
126 | radix_tree_preload_end(); | |
127 | } | |
128 | return error; | |
129 | } | |
130 | ||
131 | /* | |
132 | * This must be called only on pages that have | |
133 | * been verified to be in the swap cache. | |
134 | */ | |
135 | void __delete_from_swap_cache(struct page *page) | |
136 | { | |
137 | swp_entry_t entry; | |
138 | struct address_space *address_space; | |
139 | ||
140 | VM_BUG_ON_PAGE(!PageLocked(page), page); | |
141 | VM_BUG_ON_PAGE(!PageSwapCache(page), page); | |
142 | VM_BUG_ON_PAGE(PageWriteback(page), page); | |
143 | ||
144 | entry.val = page_private(page); | |
145 | address_space = swap_address_space(entry); | |
146 | radix_tree_delete(&address_space->page_tree, page_private(page)); | |
147 | set_page_private(page, 0); | |
148 | ClearPageSwapCache(page); | |
149 | address_space->nrpages--; | |
150 | __dec_zone_page_state(page, NR_FILE_PAGES); | |
151 | INC_CACHE_INFO(del_total); | |
152 | } | |
153 | ||
154 | /** | |
155 | * add_to_swap - allocate swap space for a page | |
156 | * @page: page we want to move to swap | |
157 | * | |
158 | * Allocate swap space for the page and add the page to the | |
159 | * swap cache. Caller needs to hold the page lock. | |
160 | */ | |
161 | int add_to_swap(struct page *page, struct list_head *list) | |
162 | { | |
163 | swp_entry_t entry; | |
164 | int err; | |
165 | ||
166 | VM_BUG_ON_PAGE(!PageLocked(page), page); | |
167 | VM_BUG_ON_PAGE(!PageUptodate(page), page); | |
168 | ||
169 | entry = get_swap_page(); | |
170 | if (!entry.val) | |
171 | return 0; | |
172 | ||
173 | if (unlikely(PageTransHuge(page))) | |
174 | if (unlikely(split_huge_page_to_list(page, list))) { | |
175 | swapcache_free(entry); | |
176 | return 0; | |
177 | } | |
178 | ||
179 | /* | |
180 | * Radix-tree node allocations from PF_MEMALLOC contexts could | |
181 | * completely exhaust the page allocator. __GFP_NOMEMALLOC | |
182 | * stops emergency reserves from being allocated. | |
183 | * | |
184 | * TODO: this could cause a theoretical memory reclaim | |
185 | * deadlock in the swap out path. | |
186 | */ | |
187 | /* | |
188 | * Add it to the swap cache. | |
189 | */ | |
190 | err = add_to_swap_cache(page, entry, | |
191 | __GFP_HIGH|__GFP_NOMEMALLOC|__GFP_NOWARN); | |
192 | ||
193 | if (!err) { | |
194 | return 1; | |
195 | } else { /* -ENOMEM radix-tree allocation failure */ | |
196 | /* | |
197 | * add_to_swap_cache() doesn't return -EEXIST, so we can safely | |
198 | * clear SWAP_HAS_CACHE flag. | |
199 | */ | |
200 | swapcache_free(entry); | |
201 | return 0; | |
202 | } | |
203 | } | |
204 | ||
205 | /* | |
206 | * This must be called only on pages that have | |
207 | * been verified to be in the swap cache and locked. | |
208 | * It will never put the page into the free list, | |
209 | * the caller has a reference on the page. | |
210 | */ | |
211 | void delete_from_swap_cache(struct page *page) | |
212 | { | |
213 | swp_entry_t entry; | |
214 | struct address_space *address_space; | |
215 | ||
216 | entry.val = page_private(page); | |
217 | ||
218 | address_space = swap_address_space(entry); | |
219 | spin_lock_irq(&address_space->tree_lock); | |
220 | __delete_from_swap_cache(page); | |
221 | spin_unlock_irq(&address_space->tree_lock); | |
222 | ||
223 | swapcache_free(entry); | |
224 | page_cache_release(page); | |
225 | } | |
226 | ||
227 | /* | |
228 | * If we are the only user, then try to free up the swap cache. | |
229 | * | |
230 | * Its ok to check for PageSwapCache without the page lock | |
231 | * here because we are going to recheck again inside | |
232 | * try_to_free_swap() _with_ the lock. | |
233 | * - Marcelo | |
234 | */ | |
235 | static inline void free_swap_cache(struct page *page) | |
236 | { | |
237 | if (PageSwapCache(page) && !page_mapped(page) && trylock_page(page)) { | |
238 | try_to_free_swap(page); | |
239 | unlock_page(page); | |
240 | } | |
241 | } | |
242 | ||
243 | /* | |
244 | * Perform a free_page(), also freeing any swap cache associated with | |
245 | * this page if it is the last user of the page. | |
246 | */ | |
247 | void free_page_and_swap_cache(struct page *page) | |
248 | { | |
249 | free_swap_cache(page); | |
250 | page_cache_release(page); | |
251 | } | |
252 | ||
253 | /* | |
254 | * Passed an array of pages, drop them all from swapcache and then release | |
255 | * them. They are removed from the LRU and freed if this is their last use. | |
256 | */ | |
257 | void free_pages_and_swap_cache(struct page **pages, int nr) | |
258 | { | |
259 | struct page **pagep = pages; | |
260 | int i; | |
261 | ||
262 | lru_add_drain(); | |
263 | for (i = 0; i < nr; i++) | |
264 | free_swap_cache(pagep[i]); | |
265 | release_pages(pagep, nr, false); | |
266 | } | |
267 | ||
268 | /* | |
269 | * Lookup a swap entry in the swap cache. A found page will be returned | |
270 | * unlocked and with its refcount incremented - we rely on the kernel | |
271 | * lock getting page table operations atomic even if we drop the page | |
272 | * lock before returning. | |
273 | */ | |
274 | struct page * lookup_swap_cache(swp_entry_t entry) | |
275 | { | |
276 | struct page *page; | |
277 | ||
278 | page = find_get_page(swap_address_space(entry), entry.val); | |
279 | ||
280 | if (page) { | |
281 | INC_CACHE_INFO(find_success); | |
282 | if (TestClearPageReadahead(page)) | |
283 | atomic_inc(&swapin_readahead_hits); | |
284 | } | |
285 | ||
286 | INC_CACHE_INFO(find_total); | |
287 | return page; | |
288 | } | |
289 | ||
290 | struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, | |
291 | struct vm_area_struct *vma, unsigned long addr, | |
292 | bool *new_page_allocated) | |
293 | { | |
294 | struct page *found_page, *new_page = NULL; | |
295 | struct address_space *swapper_space = swap_address_space(entry); | |
296 | int err; | |
297 | *new_page_allocated = false; | |
298 | ||
299 | do { | |
300 | /* | |
301 | * First check the swap cache. Since this is normally | |
302 | * called after lookup_swap_cache() failed, re-calling | |
303 | * that would confuse statistics. | |
304 | */ | |
305 | found_page = find_get_page(swapper_space, entry.val); | |
306 | if (found_page) | |
307 | break; | |
308 | ||
309 | /* | |
310 | * Get a new page to read into from swap. | |
311 | */ | |
312 | if (!new_page) { | |
313 | new_page = alloc_page_vma(gfp_mask, vma, addr); | |
314 | if (!new_page) | |
315 | break; /* Out of memory */ | |
316 | } | |
317 | ||
318 | /* | |
319 | * call radix_tree_preload() while we can wait. | |
320 | */ | |
321 | err = radix_tree_maybe_preload(gfp_mask & GFP_KERNEL); | |
322 | if (err) | |
323 | break; | |
324 | ||
325 | /* | |
326 | * Swap entry may have been freed since our caller observed it. | |
327 | */ | |
328 | err = swapcache_prepare(entry); | |
329 | if (err == -EEXIST) { | |
330 | radix_tree_preload_end(); | |
331 | /* | |
332 | * We might race against get_swap_page() and stumble | |
333 | * across a SWAP_HAS_CACHE swap_map entry whose page | |
334 | * has not been brought into the swapcache yet, while | |
335 | * the other end is scheduled away waiting on discard | |
336 | * I/O completion at scan_swap_map(). | |
337 | * | |
338 | * In order to avoid turning this transitory state | |
339 | * into a permanent loop around this -EEXIST case | |
340 | * if !CONFIG_PREEMPT and the I/O completion happens | |
341 | * to be waiting on the CPU waitqueue where we are now | |
342 | * busy looping, we just conditionally invoke the | |
343 | * scheduler here, if there are some more important | |
344 | * tasks to run. | |
345 | */ | |
346 | cond_resched(); | |
347 | continue; | |
348 | } | |
349 | if (err) { /* swp entry is obsolete ? */ | |
350 | radix_tree_preload_end(); | |
351 | break; | |
352 | } | |
353 | ||
354 | /* May fail (-ENOMEM) if radix-tree node allocation failed. */ | |
355 | __SetPageLocked(new_page); | |
356 | SetPageSwapBacked(new_page); | |
357 | err = __add_to_swap_cache(new_page, entry); | |
358 | if (likely(!err)) { | |
359 | radix_tree_preload_end(); | |
360 | /* | |
361 | * Initiate read into locked page and return. | |
362 | */ | |
363 | lru_cache_add_anon(new_page); | |
364 | *new_page_allocated = true; | |
365 | return new_page; | |
366 | } | |
367 | radix_tree_preload_end(); | |
368 | ClearPageSwapBacked(new_page); | |
369 | __ClearPageLocked(new_page); | |
370 | /* | |
371 | * add_to_swap_cache() doesn't return -EEXIST, so we can safely | |
372 | * clear SWAP_HAS_CACHE flag. | |
373 | */ | |
374 | swapcache_free(entry); | |
375 | } while (err != -ENOMEM); | |
376 | ||
377 | if (new_page) | |
378 | page_cache_release(new_page); | |
379 | return found_page; | |
380 | } | |
381 | ||
382 | /* | |
383 | * Locate a page of swap in physical memory, reserving swap cache space | |
384 | * and reading the disk if it is not already cached. | |
385 | * A failure return means that either the page allocation failed or that | |
386 | * the swap entry is no longer in use. | |
387 | */ | |
388 | struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, | |
389 | struct vm_area_struct *vma, unsigned long addr) | |
390 | { | |
391 | bool page_was_allocated; | |
392 | struct page *retpage = __read_swap_cache_async(entry, gfp_mask, | |
393 | vma, addr, &page_was_allocated); | |
394 | ||
395 | if (page_was_allocated) | |
396 | swap_readpage(retpage); | |
397 | ||
398 | return retpage; | |
399 | } | |
400 | ||
401 | static unsigned long swapin_nr_pages(unsigned long offset) | |
402 | { | |
403 | static unsigned long prev_offset; | |
404 | unsigned int pages, max_pages, last_ra; | |
405 | static atomic_t last_readahead_pages; | |
406 | ||
407 | max_pages = 1 << READ_ONCE(page_cluster); | |
408 | if (max_pages <= 1) | |
409 | return 1; | |
410 | ||
411 | /* | |
412 | * This heuristic has been found to work well on both sequential and | |
413 | * random loads, swapping to hard disk or to SSD: please don't ask | |
414 | * what the "+ 2" means, it just happens to work well, that's all. | |
415 | */ | |
416 | pages = atomic_xchg(&swapin_readahead_hits, 0) + 2; | |
417 | if (pages == 2) { | |
418 | /* | |
419 | * We can have no readahead hits to judge by: but must not get | |
420 | * stuck here forever, so check for an adjacent offset instead | |
421 | * (and don't even bother to check whether swap type is same). | |
422 | */ | |
423 | if (offset != prev_offset + 1 && offset != prev_offset - 1) | |
424 | pages = 1; | |
425 | prev_offset = offset; | |
426 | } else { | |
427 | unsigned int roundup = 4; | |
428 | while (roundup < pages) | |
429 | roundup <<= 1; | |
430 | pages = roundup; | |
431 | } | |
432 | ||
433 | if (pages > max_pages) | |
434 | pages = max_pages; | |
435 | ||
436 | /* Don't shrink readahead too fast */ | |
437 | last_ra = atomic_read(&last_readahead_pages) / 2; | |
438 | if (pages < last_ra) | |
439 | pages = last_ra; | |
440 | atomic_set(&last_readahead_pages, pages); | |
441 | ||
442 | return pages; | |
443 | } | |
444 | ||
445 | /** | |
446 | * swapin_readahead - swap in pages in hope we need them soon | |
447 | * @entry: swap entry of this memory | |
448 | * @gfp_mask: memory allocation flags | |
449 | * @vma: user vma this address belongs to | |
450 | * @addr: target address for mempolicy | |
451 | * | |
452 | * Returns the struct page for entry and addr, after queueing swapin. | |
453 | * | |
454 | * Primitive swap readahead code. We simply read an aligned block of | |
455 | * (1 << page_cluster) entries in the swap area. This method is chosen | |
456 | * because it doesn't cost us any seek time. We also make sure to queue | |
457 | * the 'original' request together with the readahead ones... | |
458 | * | |
459 | * This has been extended to use the NUMA policies from the mm triggering | |
460 | * the readahead. | |
461 | * | |
462 | * Caller must hold down_read on the vma->vm_mm if vma is not NULL. | |
463 | */ | |
464 | struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask, | |
465 | struct vm_area_struct *vma, unsigned long addr) | |
466 | { | |
467 | struct page *page; | |
468 | unsigned long entry_offset = swp_offset(entry); | |
469 | unsigned long offset = entry_offset; | |
470 | unsigned long start_offset, end_offset; | |
471 | unsigned long mask; | |
472 | struct blk_plug plug; | |
473 | ||
474 | mask = swapin_nr_pages(offset) - 1; | |
475 | if (!mask) | |
476 | goto skip; | |
477 | ||
478 | /* Read a page_cluster sized and aligned cluster around offset. */ | |
479 | start_offset = offset & ~mask; | |
480 | end_offset = offset | mask; | |
481 | if (!start_offset) /* First page is swap header. */ | |
482 | start_offset++; | |
483 | ||
484 | blk_start_plug(&plug); | |
485 | for (offset = start_offset; offset <= end_offset ; offset++) { | |
486 | /* Ok, do the async read-ahead now */ | |
487 | page = read_swap_cache_async(swp_entry(swp_type(entry), offset), | |
488 | gfp_mask, vma, addr); | |
489 | if (!page) | |
490 | continue; | |
491 | if (offset != entry_offset) | |
492 | SetPageReadahead(page); | |
493 | page_cache_release(page); | |
494 | } | |
495 | blk_finish_plug(&plug); | |
496 | ||
497 | lru_add_drain(); /* Push any new pages onto the LRU now */ | |
498 | skip: | |
499 | return read_swap_cache_async(entry, gfp_mask, vma, addr); | |
500 | } |