]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/mm/swap_state.c | |
3 | * | |
4 | * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds | |
5 | * Swap reorganised 29.12.95, Stephen Tweedie | |
6 | * | |
7 | * Rewritten to use page cache, (C) 1998 Stephen Tweedie | |
8 | */ | |
1da177e4 | 9 | #include <linux/mm.h> |
5a0e3ad6 | 10 | #include <linux/gfp.h> |
1da177e4 LT |
11 | #include <linux/kernel_stat.h> |
12 | #include <linux/swap.h> | |
46017e95 | 13 | #include <linux/swapops.h> |
1da177e4 LT |
14 | #include <linux/init.h> |
15 | #include <linux/pagemap.h> | |
1da177e4 | 16 | #include <linux/backing-dev.h> |
3fb5c298 | 17 | #include <linux/blkdev.h> |
c484d410 | 18 | #include <linux/pagevec.h> |
b20a3503 | 19 | #include <linux/migrate.h> |
4b3ef9da | 20 | #include <linux/vmalloc.h> |
67afa38e | 21 | #include <linux/swap_slots.h> |
38d8b4e6 | 22 | #include <linux/huge_mm.h> |
1da177e4 LT |
23 | |
24 | #include <asm/pgtable.h> | |
25 | ||
26 | /* | |
27 | * swapper_space is a fiction, retained to simplify the path through | |
7eaceacc | 28 | * vmscan's shrink_page_list. |
1da177e4 | 29 | */ |
f5e54d6e | 30 | static const struct address_space_operations swap_aops = { |
1da177e4 | 31 | .writepage = swap_writepage, |
62c230bc | 32 | .set_page_dirty = swap_set_page_dirty, |
1c93923c | 33 | #ifdef CONFIG_MIGRATION |
e965f963 | 34 | .migratepage = migrate_page, |
1c93923c | 35 | #endif |
1da177e4 LT |
36 | }; |
37 | ||
4b3ef9da HY |
38 | struct address_space *swapper_spaces[MAX_SWAPFILES]; |
39 | static unsigned int nr_swapper_spaces[MAX_SWAPFILES]; | |
1da177e4 LT |
40 | |
41 | #define INC_CACHE_INFO(x) do { swap_cache_info.x++; } while (0) | |
38d8b4e6 | 42 | #define ADD_CACHE_INFO(x, nr) do { swap_cache_info.x += (nr); } while (0) |
1da177e4 LT |
43 | |
44 | static struct { | |
45 | unsigned long add_total; | |
46 | unsigned long del_total; | |
47 | unsigned long find_success; | |
48 | unsigned long find_total; | |
1da177e4 LT |
49 | } swap_cache_info; |
50 | ||
33806f06 SL |
51 | unsigned long total_swapcache_pages(void) |
52 | { | |
4b3ef9da | 53 | unsigned int i, j, nr; |
33806f06 | 54 | unsigned long ret = 0; |
4b3ef9da | 55 | struct address_space *spaces; |
33806f06 | 56 | |
4b3ef9da HY |
57 | rcu_read_lock(); |
58 | for (i = 0; i < MAX_SWAPFILES; i++) { | |
59 | /* | |
60 | * The corresponding entries in nr_swapper_spaces and | |
61 | * swapper_spaces will be reused only after at least | |
62 | * one grace period. So it is impossible for them | |
63 | * belongs to different usage. | |
64 | */ | |
65 | nr = nr_swapper_spaces[i]; | |
66 | spaces = rcu_dereference(swapper_spaces[i]); | |
67 | if (!nr || !spaces) | |
68 | continue; | |
69 | for (j = 0; j < nr; j++) | |
70 | ret += spaces[j].nrpages; | |
71 | } | |
72 | rcu_read_unlock(); | |
33806f06 SL |
73 | return ret; |
74 | } | |
75 | ||
579f8290 SL |
76 | static atomic_t swapin_readahead_hits = ATOMIC_INIT(4); |
77 | ||
1da177e4 LT |
78 | void show_swap_cache_info(void) |
79 | { | |
33806f06 | 80 | printk("%lu pages in swap cache\n", total_swapcache_pages()); |
2c97b7fc | 81 | printk("Swap cache stats: add %lu, delete %lu, find %lu/%lu\n", |
1da177e4 | 82 | swap_cache_info.add_total, swap_cache_info.del_total, |
bb63be0a | 83 | swap_cache_info.find_success, swap_cache_info.find_total); |
ec8acf20 SL |
84 | printk("Free swap = %ldkB\n", |
85 | get_nr_swap_pages() << (PAGE_SHIFT - 10)); | |
1da177e4 LT |
86 | printk("Total swap = %lukB\n", total_swap_pages << (PAGE_SHIFT - 10)); |
87 | } | |
88 | ||
89 | /* | |
31a56396 | 90 | * __add_to_swap_cache resembles add_to_page_cache_locked on swapper_space, |
1da177e4 LT |
91 | * but sets SwapCache flag and private instead of mapping and index. |
92 | */ | |
2f772e6c | 93 | int __add_to_swap_cache(struct page *page, swp_entry_t entry) |
1da177e4 | 94 | { |
38d8b4e6 | 95 | int error, i, nr = hpage_nr_pages(page); |
33806f06 | 96 | struct address_space *address_space; |
38d8b4e6 | 97 | pgoff_t idx = swp_offset(entry); |
1da177e4 | 98 | |
309381fe SL |
99 | VM_BUG_ON_PAGE(!PageLocked(page), page); |
100 | VM_BUG_ON_PAGE(PageSwapCache(page), page); | |
101 | VM_BUG_ON_PAGE(!PageSwapBacked(page), page); | |
51726b12 | 102 | |
38d8b4e6 | 103 | page_ref_add(page, nr); |
31a56396 | 104 | SetPageSwapCache(page); |
31a56396 | 105 | |
33806f06 SL |
106 | address_space = swap_address_space(entry); |
107 | spin_lock_irq(&address_space->tree_lock); | |
38d8b4e6 HY |
108 | for (i = 0; i < nr; i++) { |
109 | set_page_private(page + i, entry.val + i); | |
110 | error = radix_tree_insert(&address_space->page_tree, | |
111 | idx + i, page + i); | |
112 | if (unlikely(error)) | |
113 | break; | |
31a56396 | 114 | } |
38d8b4e6 HY |
115 | if (likely(!error)) { |
116 | address_space->nrpages += nr; | |
117 | __mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, nr); | |
118 | ADD_CACHE_INFO(add_total, nr); | |
119 | } else { | |
2ca4532a DN |
120 | /* |
121 | * Only the context which have set SWAP_HAS_CACHE flag | |
122 | * would call add_to_swap_cache(). | |
123 | * So add_to_swap_cache() doesn't returns -EEXIST. | |
124 | */ | |
125 | VM_BUG_ON(error == -EEXIST); | |
38d8b4e6 HY |
126 | set_page_private(page + i, 0UL); |
127 | while (i--) { | |
128 | radix_tree_delete(&address_space->page_tree, idx + i); | |
129 | set_page_private(page + i, 0UL); | |
130 | } | |
31a56396 | 131 | ClearPageSwapCache(page); |
38d8b4e6 | 132 | page_ref_sub(page, nr); |
31a56396 | 133 | } |
38d8b4e6 | 134 | spin_unlock_irq(&address_space->tree_lock); |
31a56396 DN |
135 | |
136 | return error; | |
137 | } | |
138 | ||
139 | ||
140 | int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp_mask) | |
141 | { | |
142 | int error; | |
143 | ||
38d8b4e6 | 144 | error = radix_tree_maybe_preload_order(gfp_mask, compound_order(page)); |
35c754d7 | 145 | if (!error) { |
31a56396 | 146 | error = __add_to_swap_cache(page, entry); |
1da177e4 | 147 | radix_tree_preload_end(); |
fa1de900 | 148 | } |
1da177e4 LT |
149 | return error; |
150 | } | |
151 | ||
1da177e4 LT |
152 | /* |
153 | * This must be called only on pages that have | |
154 | * been verified to be in the swap cache. | |
155 | */ | |
156 | void __delete_from_swap_cache(struct page *page) | |
157 | { | |
33806f06 | 158 | struct address_space *address_space; |
38d8b4e6 HY |
159 | int i, nr = hpage_nr_pages(page); |
160 | swp_entry_t entry; | |
161 | pgoff_t idx; | |
33806f06 | 162 | |
309381fe SL |
163 | VM_BUG_ON_PAGE(!PageLocked(page), page); |
164 | VM_BUG_ON_PAGE(!PageSwapCache(page), page); | |
165 | VM_BUG_ON_PAGE(PageWriteback(page), page); | |
1da177e4 | 166 | |
33806f06 SL |
167 | entry.val = page_private(page); |
168 | address_space = swap_address_space(entry); | |
38d8b4e6 HY |
169 | idx = swp_offset(entry); |
170 | for (i = 0; i < nr; i++) { | |
171 | radix_tree_delete(&address_space->page_tree, idx + i); | |
172 | set_page_private(page + i, 0); | |
173 | } | |
1da177e4 | 174 | ClearPageSwapCache(page); |
38d8b4e6 HY |
175 | address_space->nrpages -= nr; |
176 | __mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, -nr); | |
177 | ADD_CACHE_INFO(del_total, nr); | |
1da177e4 LT |
178 | } |
179 | ||
180 | /** | |
181 | * add_to_swap - allocate swap space for a page | |
182 | * @page: page we want to move to swap | |
183 | * | |
184 | * Allocate swap space for the page and add the page to the | |
185 | * swap cache. Caller needs to hold the page lock. | |
186 | */ | |
0f074658 | 187 | int add_to_swap(struct page *page) |
1da177e4 LT |
188 | { |
189 | swp_entry_t entry; | |
1da177e4 LT |
190 | int err; |
191 | ||
309381fe SL |
192 | VM_BUG_ON_PAGE(!PageLocked(page), page); |
193 | VM_BUG_ON_PAGE(!PageUptodate(page), page); | |
1da177e4 | 194 | |
38d8b4e6 | 195 | entry = get_swap_page(page); |
2ca4532a | 196 | if (!entry.val) |
0f074658 MK |
197 | return 0; |
198 | ||
38d8b4e6 | 199 | if (mem_cgroup_try_charge_swap(page, entry)) |
0f074658 | 200 | goto fail; |
3f04f62f | 201 | |
2ca4532a DN |
202 | /* |
203 | * Radix-tree node allocations from PF_MEMALLOC contexts could | |
204 | * completely exhaust the page allocator. __GFP_NOMEMALLOC | |
205 | * stops emergency reserves from being allocated. | |
206 | * | |
207 | * TODO: this could cause a theoretical memory reclaim | |
208 | * deadlock in the swap out path. | |
209 | */ | |
210 | /* | |
854e9ed0 | 211 | * Add it to the swap cache. |
2ca4532a DN |
212 | */ |
213 | err = add_to_swap_cache(page, entry, | |
214 | __GFP_HIGH|__GFP_NOMEMALLOC|__GFP_NOWARN); | |
38d8b4e6 HY |
215 | /* -ENOMEM radix-tree allocation failure */ |
216 | if (err) | |
bd53b714 | 217 | /* |
2ca4532a DN |
218 | * add_to_swap_cache() doesn't return -EEXIST, so we can safely |
219 | * clear SWAP_HAS_CACHE flag. | |
1da177e4 | 220 | */ |
0f074658 | 221 | goto fail; |
cfc1b7c3 SL |
222 | /* |
223 | * Normally the page will be dirtied in unmap because its pte should be | |
224 | * dirty. A special case is MADV_FREE page. The page'e pte could have | |
225 | * dirty bit cleared but the page's SwapBacked bit is still set because | |
226 | * clearing the dirty bit and SwapBacked bit has no lock protected. For | |
227 | * such page, unmap will not set dirty bit for it, so page reclaim will | |
228 | * not write the page out. This can cause data corruption when the page | |
229 | * is swap in later. Always setting the dirty bit for the page solves | |
230 | * the problem. | |
231 | */ | |
232 | set_page_dirty(page); | |
38d8b4e6 HY |
233 | |
234 | return 1; | |
235 | ||
38d8b4e6 | 236 | fail: |
0f074658 | 237 | put_swap_page(page, entry); |
38d8b4e6 | 238 | return 0; |
1da177e4 LT |
239 | } |
240 | ||
241 | /* | |
242 | * This must be called only on pages that have | |
243 | * been verified to be in the swap cache and locked. | |
244 | * It will never put the page into the free list, | |
245 | * the caller has a reference on the page. | |
246 | */ | |
247 | void delete_from_swap_cache(struct page *page) | |
248 | { | |
249 | swp_entry_t entry; | |
33806f06 | 250 | struct address_space *address_space; |
1da177e4 | 251 | |
4c21e2f2 | 252 | entry.val = page_private(page); |
1da177e4 | 253 | |
33806f06 SL |
254 | address_space = swap_address_space(entry); |
255 | spin_lock_irq(&address_space->tree_lock); | |
1da177e4 | 256 | __delete_from_swap_cache(page); |
33806f06 | 257 | spin_unlock_irq(&address_space->tree_lock); |
1da177e4 | 258 | |
75f6d6d2 | 259 | put_swap_page(page, entry); |
38d8b4e6 | 260 | page_ref_sub(page, hpage_nr_pages(page)); |
1da177e4 LT |
261 | } |
262 | ||
1da177e4 LT |
263 | /* |
264 | * If we are the only user, then try to free up the swap cache. | |
265 | * | |
266 | * Its ok to check for PageSwapCache without the page lock | |
a2c43eed HD |
267 | * here because we are going to recheck again inside |
268 | * try_to_free_swap() _with_ the lock. | |
1da177e4 LT |
269 | * - Marcelo |
270 | */ | |
271 | static inline void free_swap_cache(struct page *page) | |
272 | { | |
a2c43eed HD |
273 | if (PageSwapCache(page) && !page_mapped(page) && trylock_page(page)) { |
274 | try_to_free_swap(page); | |
1da177e4 LT |
275 | unlock_page(page); |
276 | } | |
277 | } | |
278 | ||
279 | /* | |
280 | * Perform a free_page(), also freeing any swap cache associated with | |
b8072f09 | 281 | * this page if it is the last user of the page. |
1da177e4 LT |
282 | */ |
283 | void free_page_and_swap_cache(struct page *page) | |
284 | { | |
285 | free_swap_cache(page); | |
6fcb52a5 | 286 | if (!is_huge_zero_page(page)) |
770a5370 | 287 | put_page(page); |
1da177e4 LT |
288 | } |
289 | ||
290 | /* | |
291 | * Passed an array of pages, drop them all from swapcache and then release | |
292 | * them. They are removed from the LRU and freed if this is their last use. | |
293 | */ | |
294 | void free_pages_and_swap_cache(struct page **pages, int nr) | |
295 | { | |
1da177e4 | 296 | struct page **pagep = pages; |
aabfb572 | 297 | int i; |
1da177e4 LT |
298 | |
299 | lru_add_drain(); | |
aabfb572 MH |
300 | for (i = 0; i < nr; i++) |
301 | free_swap_cache(pagep[i]); | |
302 | release_pages(pagep, nr, false); | |
1da177e4 LT |
303 | } |
304 | ||
305 | /* | |
306 | * Lookup a swap entry in the swap cache. A found page will be returned | |
307 | * unlocked and with its refcount incremented - we rely on the kernel | |
308 | * lock getting page table operations atomic even if we drop the page | |
309 | * lock before returning. | |
310 | */ | |
311 | struct page * lookup_swap_cache(swp_entry_t entry) | |
312 | { | |
313 | struct page *page; | |
314 | ||
f6ab1f7f | 315 | page = find_get_page(swap_address_space(entry), swp_offset(entry)); |
1da177e4 | 316 | |
38d8b4e6 | 317 | if (page && likely(!PageTransCompound(page))) { |
1da177e4 | 318 | INC_CACHE_INFO(find_success); |
579f8290 SL |
319 | if (TestClearPageReadahead(page)) |
320 | atomic_inc(&swapin_readahead_hits); | |
321 | } | |
1da177e4 LT |
322 | |
323 | INC_CACHE_INFO(find_total); | |
324 | return page; | |
325 | } | |
326 | ||
5b999aad DS |
327 | struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, |
328 | struct vm_area_struct *vma, unsigned long addr, | |
329 | bool *new_page_allocated) | |
1da177e4 LT |
330 | { |
331 | struct page *found_page, *new_page = NULL; | |
5b999aad | 332 | struct address_space *swapper_space = swap_address_space(entry); |
1da177e4 | 333 | int err; |
5b999aad | 334 | *new_page_allocated = false; |
1da177e4 LT |
335 | |
336 | do { | |
337 | /* | |
338 | * First check the swap cache. Since this is normally | |
339 | * called after lookup_swap_cache() failed, re-calling | |
340 | * that would confuse statistics. | |
341 | */ | |
f6ab1f7f | 342 | found_page = find_get_page(swapper_space, swp_offset(entry)); |
1da177e4 LT |
343 | if (found_page) |
344 | break; | |
345 | ||
ba81f838 HY |
346 | /* |
347 | * Just skip read ahead for unused swap slot. | |
348 | * During swap_off when swap_slot_cache is disabled, | |
349 | * we have to handle the race between putting | |
350 | * swap entry in swap cache and marking swap slot | |
351 | * as SWAP_HAS_CACHE. That's done in later part of code or | |
352 | * else swap_off will be aborted if we return NULL. | |
353 | */ | |
354 | if (!__swp_swapcount(entry) && swap_slot_cache_enabled) | |
355 | break; | |
e8c26ab6 | 356 | |
1da177e4 LT |
357 | /* |
358 | * Get a new page to read into from swap. | |
359 | */ | |
360 | if (!new_page) { | |
02098fea | 361 | new_page = alloc_page_vma(gfp_mask, vma, addr); |
1da177e4 LT |
362 | if (!new_page) |
363 | break; /* Out of memory */ | |
364 | } | |
365 | ||
31a56396 DN |
366 | /* |
367 | * call radix_tree_preload() while we can wait. | |
368 | */ | |
5e4c0d97 | 369 | err = radix_tree_maybe_preload(gfp_mask & GFP_KERNEL); |
31a56396 DN |
370 | if (err) |
371 | break; | |
372 | ||
f000944d HD |
373 | /* |
374 | * Swap entry may have been freed since our caller observed it. | |
375 | */ | |
355cfa73 | 376 | err = swapcache_prepare(entry); |
cbab0e4e | 377 | if (err == -EEXIST) { |
31a56396 | 378 | radix_tree_preload_end(); |
cbab0e4e RA |
379 | /* |
380 | * We might race against get_swap_page() and stumble | |
381 | * across a SWAP_HAS_CACHE swap_map entry whose page | |
9c1cc2e4 | 382 | * has not been brought into the swapcache yet. |
cbab0e4e RA |
383 | */ |
384 | cond_resched(); | |
355cfa73 | 385 | continue; |
31a56396 DN |
386 | } |
387 | if (err) { /* swp entry is obsolete ? */ | |
388 | radix_tree_preload_end(); | |
f000944d | 389 | break; |
31a56396 | 390 | } |
f000944d | 391 | |
2ca4532a | 392 | /* May fail (-ENOMEM) if radix-tree node allocation failed. */ |
48c935ad | 393 | __SetPageLocked(new_page); |
fa9949da | 394 | __SetPageSwapBacked(new_page); |
31a56396 | 395 | err = __add_to_swap_cache(new_page, entry); |
529ae9aa | 396 | if (likely(!err)) { |
31a56396 | 397 | radix_tree_preload_end(); |
1da177e4 LT |
398 | /* |
399 | * Initiate read into locked page and return. | |
400 | */ | |
c5fdae46 | 401 | lru_cache_add_anon(new_page); |
5b999aad | 402 | *new_page_allocated = true; |
1da177e4 LT |
403 | return new_page; |
404 | } | |
31a56396 | 405 | radix_tree_preload_end(); |
48c935ad | 406 | __ClearPageLocked(new_page); |
2ca4532a DN |
407 | /* |
408 | * add_to_swap_cache() doesn't return -EEXIST, so we can safely | |
409 | * clear SWAP_HAS_CACHE flag. | |
410 | */ | |
75f6d6d2 | 411 | put_swap_page(new_page, entry); |
f000944d | 412 | } while (err != -ENOMEM); |
1da177e4 LT |
413 | |
414 | if (new_page) | |
09cbfeaf | 415 | put_page(new_page); |
1da177e4 LT |
416 | return found_page; |
417 | } | |
46017e95 | 418 | |
5b999aad DS |
419 | /* |
420 | * Locate a page of swap in physical memory, reserving swap cache space | |
421 | * and reading the disk if it is not already cached. | |
422 | * A failure return means that either the page allocation failed or that | |
423 | * the swap entry is no longer in use. | |
424 | */ | |
425 | struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, | |
23955622 | 426 | struct vm_area_struct *vma, unsigned long addr, bool do_poll) |
5b999aad DS |
427 | { |
428 | bool page_was_allocated; | |
429 | struct page *retpage = __read_swap_cache_async(entry, gfp_mask, | |
430 | vma, addr, &page_was_allocated); | |
431 | ||
432 | if (page_was_allocated) | |
23955622 | 433 | swap_readpage(retpage, do_poll); |
5b999aad DS |
434 | |
435 | return retpage; | |
436 | } | |
437 | ||
579f8290 SL |
438 | static unsigned long swapin_nr_pages(unsigned long offset) |
439 | { | |
440 | static unsigned long prev_offset; | |
441 | unsigned int pages, max_pages, last_ra; | |
442 | static atomic_t last_readahead_pages; | |
443 | ||
4db0c3c2 | 444 | max_pages = 1 << READ_ONCE(page_cluster); |
579f8290 SL |
445 | if (max_pages <= 1) |
446 | return 1; | |
447 | ||
448 | /* | |
449 | * This heuristic has been found to work well on both sequential and | |
450 | * random loads, swapping to hard disk or to SSD: please don't ask | |
451 | * what the "+ 2" means, it just happens to work well, that's all. | |
452 | */ | |
453 | pages = atomic_xchg(&swapin_readahead_hits, 0) + 2; | |
454 | if (pages == 2) { | |
455 | /* | |
456 | * We can have no readahead hits to judge by: but must not get | |
457 | * stuck here forever, so check for an adjacent offset instead | |
458 | * (and don't even bother to check whether swap type is same). | |
459 | */ | |
460 | if (offset != prev_offset + 1 && offset != prev_offset - 1) | |
461 | pages = 1; | |
462 | prev_offset = offset; | |
463 | } else { | |
464 | unsigned int roundup = 4; | |
465 | while (roundup < pages) | |
466 | roundup <<= 1; | |
467 | pages = roundup; | |
468 | } | |
469 | ||
470 | if (pages > max_pages) | |
471 | pages = max_pages; | |
472 | ||
473 | /* Don't shrink readahead too fast */ | |
474 | last_ra = atomic_read(&last_readahead_pages) / 2; | |
475 | if (pages < last_ra) | |
476 | pages = last_ra; | |
477 | atomic_set(&last_readahead_pages, pages); | |
478 | ||
479 | return pages; | |
480 | } | |
481 | ||
46017e95 HD |
482 | /** |
483 | * swapin_readahead - swap in pages in hope we need them soon | |
484 | * @entry: swap entry of this memory | |
7682486b | 485 | * @gfp_mask: memory allocation flags |
46017e95 HD |
486 | * @vma: user vma this address belongs to |
487 | * @addr: target address for mempolicy | |
488 | * | |
489 | * Returns the struct page for entry and addr, after queueing swapin. | |
490 | * | |
491 | * Primitive swap readahead code. We simply read an aligned block of | |
492 | * (1 << page_cluster) entries in the swap area. This method is chosen | |
493 | * because it doesn't cost us any seek time. We also make sure to queue | |
494 | * the 'original' request together with the readahead ones... | |
495 | * | |
496 | * This has been extended to use the NUMA policies from the mm triggering | |
497 | * the readahead. | |
498 | * | |
499 | * Caller must hold down_read on the vma->vm_mm if vma is not NULL. | |
500 | */ | |
02098fea | 501 | struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask, |
46017e95 HD |
502 | struct vm_area_struct *vma, unsigned long addr) |
503 | { | |
46017e95 | 504 | struct page *page; |
579f8290 SL |
505 | unsigned long entry_offset = swp_offset(entry); |
506 | unsigned long offset = entry_offset; | |
67f96aa2 | 507 | unsigned long start_offset, end_offset; |
579f8290 | 508 | unsigned long mask; |
3fb5c298 | 509 | struct blk_plug plug; |
23955622 | 510 | bool do_poll = true; |
46017e95 | 511 | |
579f8290 SL |
512 | mask = swapin_nr_pages(offset) - 1; |
513 | if (!mask) | |
514 | goto skip; | |
515 | ||
23955622 | 516 | do_poll = false; |
67f96aa2 RR |
517 | /* Read a page_cluster sized and aligned cluster around offset. */ |
518 | start_offset = offset & ~mask; | |
519 | end_offset = offset | mask; | |
520 | if (!start_offset) /* First page is swap header. */ | |
521 | start_offset++; | |
522 | ||
3fb5c298 | 523 | blk_start_plug(&plug); |
67f96aa2 | 524 | for (offset = start_offset; offset <= end_offset ; offset++) { |
46017e95 HD |
525 | /* Ok, do the async read-ahead now */ |
526 | page = read_swap_cache_async(swp_entry(swp_type(entry), offset), | |
23955622 | 527 | gfp_mask, vma, addr, false); |
46017e95 | 528 | if (!page) |
67f96aa2 | 529 | continue; |
38d8b4e6 | 530 | if (offset != entry_offset && likely(!PageTransCompound(page))) |
579f8290 | 531 | SetPageReadahead(page); |
09cbfeaf | 532 | put_page(page); |
46017e95 | 533 | } |
3fb5c298 CE |
534 | blk_finish_plug(&plug); |
535 | ||
46017e95 | 536 | lru_add_drain(); /* Push any new pages onto the LRU now */ |
579f8290 | 537 | skip: |
23955622 | 538 | return read_swap_cache_async(entry, gfp_mask, vma, addr, do_poll); |
46017e95 | 539 | } |
4b3ef9da HY |
540 | |
541 | int init_swap_address_space(unsigned int type, unsigned long nr_pages) | |
542 | { | |
543 | struct address_space *spaces, *space; | |
544 | unsigned int i, nr; | |
545 | ||
546 | nr = DIV_ROUND_UP(nr_pages, SWAP_ADDRESS_SPACE_PAGES); | |
54f180d3 | 547 | spaces = kvzalloc(sizeof(struct address_space) * nr, GFP_KERNEL); |
4b3ef9da HY |
548 | if (!spaces) |
549 | return -ENOMEM; | |
550 | for (i = 0; i < nr; i++) { | |
551 | space = spaces + i; | |
552 | INIT_RADIX_TREE(&space->page_tree, GFP_ATOMIC|__GFP_NOWARN); | |
553 | atomic_set(&space->i_mmap_writable, 0); | |
554 | space->a_ops = &swap_aops; | |
555 | /* swap cache doesn't use writeback related tags */ | |
556 | mapping_set_no_writeback_tags(space); | |
557 | spin_lock_init(&space->tree_lock); | |
558 | } | |
559 | nr_swapper_spaces[type] = nr; | |
560 | rcu_assign_pointer(swapper_spaces[type], spaces); | |
561 | ||
562 | return 0; | |
563 | } | |
564 | ||
565 | void exit_swap_address_space(unsigned int type) | |
566 | { | |
567 | struct address_space *spaces; | |
568 | ||
569 | spaces = swapper_spaces[type]; | |
570 | nr_swapper_spaces[type] = 0; | |
571 | rcu_assign_pointer(swapper_spaces[type], NULL); | |
572 | synchronize_rcu(); | |
573 | kvfree(spaces); | |
574 | } |