]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - mm/swap_state.c
mm: fix kernel-doc markups
[mirror_ubuntu-jammy-kernel.git] / mm / swap_state.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
1da177e4
LT
2/*
3 * linux/mm/swap_state.c
4 *
5 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
6 * Swap reorganised 29.12.95, Stephen Tweedie
7 *
8 * Rewritten to use page cache, (C) 1998 Stephen Tweedie
9 */
1da177e4 10#include <linux/mm.h>
5a0e3ad6 11#include <linux/gfp.h>
1da177e4
LT
12#include <linux/kernel_stat.h>
13#include <linux/swap.h>
46017e95 14#include <linux/swapops.h>
1da177e4
LT
15#include <linux/init.h>
16#include <linux/pagemap.h>
1da177e4 17#include <linux/backing-dev.h>
3fb5c298 18#include <linux/blkdev.h>
c484d410 19#include <linux/pagevec.h>
b20a3503 20#include <linux/migrate.h>
4b3ef9da 21#include <linux/vmalloc.h>
67afa38e 22#include <linux/swap_slots.h>
38d8b4e6 23#include <linux/huge_mm.h>
61ef1865 24#include <linux/shmem_fs.h>
243bce09 25#include "internal.h"
1da177e4
LT
26
27/*
28 * swapper_space is a fiction, retained to simplify the path through
7eaceacc 29 * vmscan's shrink_page_list.
1da177e4 30 */
f5e54d6e 31static const struct address_space_operations swap_aops = {
1da177e4 32 .writepage = swap_writepage,
62c230bc 33 .set_page_dirty = swap_set_page_dirty,
1c93923c 34#ifdef CONFIG_MIGRATION
e965f963 35 .migratepage = migrate_page,
1c93923c 36#endif
1da177e4
LT
37};
38
783cb68e
CD
39struct address_space *swapper_spaces[MAX_SWAPFILES] __read_mostly;
40static unsigned int nr_swapper_spaces[MAX_SWAPFILES] __read_mostly;
f5c754d6 41static bool enable_vma_readahead __read_mostly = true;
ec560175 42
ec560175
HY
43#define SWAP_RA_WIN_SHIFT (PAGE_SHIFT / 2)
44#define SWAP_RA_HITS_MASK ((1UL << SWAP_RA_WIN_SHIFT) - 1)
45#define SWAP_RA_HITS_MAX SWAP_RA_HITS_MASK
46#define SWAP_RA_WIN_MASK (~PAGE_MASK & ~SWAP_RA_HITS_MASK)
47
48#define SWAP_RA_HITS(v) ((v) & SWAP_RA_HITS_MASK)
49#define SWAP_RA_WIN(v) (((v) & SWAP_RA_WIN_MASK) >> SWAP_RA_WIN_SHIFT)
50#define SWAP_RA_ADDR(v) ((v) & PAGE_MASK)
51
52#define SWAP_RA_VAL(addr, win, hits) \
53 (((addr) & PAGE_MASK) | \
54 (((win) << SWAP_RA_WIN_SHIFT) & SWAP_RA_WIN_MASK) | \
55 ((hits) & SWAP_RA_HITS_MASK))
56
57/* Initial readahead hits is 4 to start up with a small window */
58#define GET_SWAP_RA_VAL(vma) \
59 (atomic_long_read(&(vma)->swap_readahead_info) ? : 4)
1da177e4 60
b96a3db2
QC
61#define INC_CACHE_INFO(x) data_race(swap_cache_info.x++)
62#define ADD_CACHE_INFO(x, nr) data_race(swap_cache_info.x += (nr))
1da177e4
LT
63
64static struct {
65 unsigned long add_total;
66 unsigned long del_total;
67 unsigned long find_success;
68 unsigned long find_total;
1da177e4
LT
69} swap_cache_info;
70
33806f06
SL
71unsigned long total_swapcache_pages(void)
72{
4b3ef9da 73 unsigned int i, j, nr;
33806f06 74 unsigned long ret = 0;
4b3ef9da 75 struct address_space *spaces;
054f1d1f 76 struct swap_info_struct *si;
33806f06 77
4b3ef9da 78 for (i = 0; i < MAX_SWAPFILES; i++) {
054f1d1f
HY
79 swp_entry_t entry = swp_entry(i, 1);
80
81 /* Avoid get_swap_device() to warn for bad swap entry */
82 if (!swp_swap_info(entry))
83 continue;
84 /* Prevent swapoff to free swapper_spaces */
85 si = get_swap_device(entry);
86 if (!si)
4b3ef9da 87 continue;
054f1d1f
HY
88 nr = nr_swapper_spaces[i];
89 spaces = swapper_spaces[i];
4b3ef9da
HY
90 for (j = 0; j < nr; j++)
91 ret += spaces[j].nrpages;
054f1d1f 92 put_swap_device(si);
4b3ef9da 93 }
33806f06
SL
94 return ret;
95}
96
579f8290
SL
97static atomic_t swapin_readahead_hits = ATOMIC_INIT(4);
98
1da177e4
LT
99void show_swap_cache_info(void)
100{
33806f06 101 printk("%lu pages in swap cache\n", total_swapcache_pages());
2c97b7fc 102 printk("Swap cache stats: add %lu, delete %lu, find %lu/%lu\n",
1da177e4 103 swap_cache_info.add_total, swap_cache_info.del_total,
bb63be0a 104 swap_cache_info.find_success, swap_cache_info.find_total);
ec8acf20
SL
105 printk("Free swap = %ldkB\n",
106 get_nr_swap_pages() << (PAGE_SHIFT - 10));
1da177e4
LT
107 printk("Total swap = %lukB\n", total_swap_pages << (PAGE_SHIFT - 10));
108}
109
aae466b0
JK
110void *get_shadow_from_swap_cache(swp_entry_t entry)
111{
112 struct address_space *address_space = swap_address_space(entry);
113 pgoff_t idx = swp_offset(entry);
114 struct page *page;
115
116 page = find_get_entry(address_space, idx);
117 if (xa_is_value(page))
118 return page;
119 if (page)
120 put_page(page);
121 return NULL;
122}
123
1da177e4 124/*
8d93b41c 125 * add_to_swap_cache resembles add_to_page_cache_locked on swapper_space,
1da177e4
LT
126 * but sets SwapCache flag and private instead of mapping and index.
127 */
3852f676
JK
128int add_to_swap_cache(struct page *page, swp_entry_t entry,
129 gfp_t gfp, void **shadowp)
1da177e4 130{
8d93b41c 131 struct address_space *address_space = swap_address_space(entry);
38d8b4e6 132 pgoff_t idx = swp_offset(entry);
8d93b41c 133 XA_STATE_ORDER(xas, &address_space->i_pages, idx, compound_order(page));
6c357848 134 unsigned long i, nr = thp_nr_pages(page);
3852f676 135 void *old;
1da177e4 136
309381fe
SL
137 VM_BUG_ON_PAGE(!PageLocked(page), page);
138 VM_BUG_ON_PAGE(PageSwapCache(page), page);
139 VM_BUG_ON_PAGE(!PageSwapBacked(page), page);
51726b12 140
38d8b4e6 141 page_ref_add(page, nr);
31a56396 142 SetPageSwapCache(page);
31a56396 143
8d93b41c 144 do {
3852f676
JK
145 unsigned long nr_shadows = 0;
146
8d93b41c
MW
147 xas_lock_irq(&xas);
148 xas_create_range(&xas);
149 if (xas_error(&xas))
150 goto unlock;
151 for (i = 0; i < nr; i++) {
152 VM_BUG_ON_PAGE(xas.xa_index != idx + i, page);
3852f676
JK
153 old = xas_load(&xas);
154 if (xa_is_value(old)) {
155 nr_shadows++;
156 if (shadowp)
157 *shadowp = old;
158 }
8d93b41c 159 set_page_private(page + i, entry.val + i);
4101196b 160 xas_store(&xas, page);
8d93b41c
MW
161 xas_next(&xas);
162 }
3852f676 163 address_space->nrexceptional -= nr_shadows;
38d8b4e6
HY
164 address_space->nrpages += nr;
165 __mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, nr);
166 ADD_CACHE_INFO(add_total, nr);
8d93b41c
MW
167unlock:
168 xas_unlock_irq(&xas);
169 } while (xas_nomem(&xas, gfp));
31a56396 170
8d93b41c
MW
171 if (!xas_error(&xas))
172 return 0;
31a56396 173
8d93b41c
MW
174 ClearPageSwapCache(page);
175 page_ref_sub(page, nr);
176 return xas_error(&xas);
1da177e4
LT
177}
178
1da177e4
LT
179/*
180 * This must be called only on pages that have
181 * been verified to be in the swap cache.
182 */
3852f676
JK
183void __delete_from_swap_cache(struct page *page,
184 swp_entry_t entry, void *shadow)
1da177e4 185{
4e17ec25 186 struct address_space *address_space = swap_address_space(entry);
6c357848 187 int i, nr = thp_nr_pages(page);
4e17ec25
MW
188 pgoff_t idx = swp_offset(entry);
189 XA_STATE(xas, &address_space->i_pages, idx);
33806f06 190
309381fe
SL
191 VM_BUG_ON_PAGE(!PageLocked(page), page);
192 VM_BUG_ON_PAGE(!PageSwapCache(page), page);
193 VM_BUG_ON_PAGE(PageWriteback(page), page);
1da177e4 194
38d8b4e6 195 for (i = 0; i < nr; i++) {
3852f676 196 void *entry = xas_store(&xas, shadow);
4101196b 197 VM_BUG_ON_PAGE(entry != page, entry);
38d8b4e6 198 set_page_private(page + i, 0);
4e17ec25 199 xas_next(&xas);
38d8b4e6 200 }
1da177e4 201 ClearPageSwapCache(page);
3852f676
JK
202 if (shadow)
203 address_space->nrexceptional += nr;
38d8b4e6
HY
204 address_space->nrpages -= nr;
205 __mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, -nr);
206 ADD_CACHE_INFO(del_total, nr);
1da177e4
LT
207}
208
209/**
210 * add_to_swap - allocate swap space for a page
211 * @page: page we want to move to swap
212 *
213 * Allocate swap space for the page and add the page to the
214 * swap cache. Caller needs to hold the page lock.
215 */
0f074658 216int add_to_swap(struct page *page)
1da177e4
LT
217{
218 swp_entry_t entry;
1da177e4
LT
219 int err;
220
309381fe
SL
221 VM_BUG_ON_PAGE(!PageLocked(page), page);
222 VM_BUG_ON_PAGE(!PageUptodate(page), page);
1da177e4 223
38d8b4e6 224 entry = get_swap_page(page);
2ca4532a 225 if (!entry.val)
0f074658
MK
226 return 0;
227
2ca4532a 228 /*
8d93b41c 229 * XArray node allocations from PF_MEMALLOC contexts could
2ca4532a
DN
230 * completely exhaust the page allocator. __GFP_NOMEMALLOC
231 * stops emergency reserves from being allocated.
232 *
233 * TODO: this could cause a theoretical memory reclaim
234 * deadlock in the swap out path.
235 */
236 /*
854e9ed0 237 * Add it to the swap cache.
2ca4532a
DN
238 */
239 err = add_to_swap_cache(page, entry,
3852f676 240 __GFP_HIGH|__GFP_NOMEMALLOC|__GFP_NOWARN, NULL);
38d8b4e6 241 if (err)
bd53b714 242 /*
2ca4532a
DN
243 * add_to_swap_cache() doesn't return -EEXIST, so we can safely
244 * clear SWAP_HAS_CACHE flag.
1da177e4 245 */
0f074658 246 goto fail;
9625456c
SL
247 /*
248 * Normally the page will be dirtied in unmap because its pte should be
0e9aa675 249 * dirty. A special case is MADV_FREE page. The page's pte could have
9625456c
SL
250 * dirty bit cleared but the page's SwapBacked bit is still set because
251 * clearing the dirty bit and SwapBacked bit has no lock protected. For
252 * such page, unmap will not set dirty bit for it, so page reclaim will
253 * not write the page out. This can cause data corruption when the page
254 * is swap in later. Always setting the dirty bit for the page solves
255 * the problem.
256 */
257 set_page_dirty(page);
38d8b4e6
HY
258
259 return 1;
260
38d8b4e6 261fail:
0f074658 262 put_swap_page(page, entry);
38d8b4e6 263 return 0;
1da177e4
LT
264}
265
266/*
267 * This must be called only on pages that have
268 * been verified to be in the swap cache and locked.
269 * It will never put the page into the free list,
270 * the caller has a reference on the page.
271 */
272void delete_from_swap_cache(struct page *page)
273{
4e17ec25
MW
274 swp_entry_t entry = { .val = page_private(page) };
275 struct address_space *address_space = swap_address_space(entry);
1da177e4 276
b93b0163 277 xa_lock_irq(&address_space->i_pages);
3852f676 278 __delete_from_swap_cache(page, entry, NULL);
b93b0163 279 xa_unlock_irq(&address_space->i_pages);
1da177e4 280
75f6d6d2 281 put_swap_page(page, entry);
6c357848 282 page_ref_sub(page, thp_nr_pages(page));
1da177e4
LT
283}
284
3852f676
JK
285void clear_shadow_from_swap_cache(int type, unsigned long begin,
286 unsigned long end)
287{
288 unsigned long curr = begin;
289 void *old;
290
291 for (;;) {
292 unsigned long nr_shadows = 0;
293 swp_entry_t entry = swp_entry(type, curr);
294 struct address_space *address_space = swap_address_space(entry);
295 XA_STATE(xas, &address_space->i_pages, curr);
296
297 xa_lock_irq(&address_space->i_pages);
298 xas_for_each(&xas, old, end) {
299 if (!xa_is_value(old))
300 continue;
301 xas_store(&xas, NULL);
302 nr_shadows++;
303 }
304 address_space->nrexceptional -= nr_shadows;
305 xa_unlock_irq(&address_space->i_pages);
306
307 /* search the next swapcache until we meet end */
308 curr >>= SWAP_ADDRESS_SPACE_SHIFT;
309 curr++;
310 curr <<= SWAP_ADDRESS_SPACE_SHIFT;
311 if (curr > end)
312 break;
313 }
314}
315
1da177e4
LT
316/*
317 * If we are the only user, then try to free up the swap cache.
318 *
319 * Its ok to check for PageSwapCache without the page lock
a2c43eed
HD
320 * here because we are going to recheck again inside
321 * try_to_free_swap() _with_ the lock.
1da177e4
LT
322 * - Marcelo
323 */
324static inline void free_swap_cache(struct page *page)
325{
a2c43eed
HD
326 if (PageSwapCache(page) && !page_mapped(page) && trylock_page(page)) {
327 try_to_free_swap(page);
1da177e4
LT
328 unlock_page(page);
329 }
330}
331
332/*
333 * Perform a free_page(), also freeing any swap cache associated with
b8072f09 334 * this page if it is the last user of the page.
1da177e4
LT
335 */
336void free_page_and_swap_cache(struct page *page)
337{
338 free_swap_cache(page);
6fcb52a5 339 if (!is_huge_zero_page(page))
770a5370 340 put_page(page);
1da177e4
LT
341}
342
343/*
344 * Passed an array of pages, drop them all from swapcache and then release
345 * them. They are removed from the LRU and freed if this is their last use.
346 */
347void free_pages_and_swap_cache(struct page **pages, int nr)
348{
1da177e4 349 struct page **pagep = pages;
aabfb572 350 int i;
1da177e4
LT
351
352 lru_add_drain();
aabfb572
MH
353 for (i = 0; i < nr; i++)
354 free_swap_cache(pagep[i]);
c6f92f9f 355 release_pages(pagep, nr);
1da177e4
LT
356}
357
e9e9b7ec
MK
358static inline bool swap_use_vma_readahead(void)
359{
360 return READ_ONCE(enable_vma_readahead) && !atomic_read(&nr_rotate_swap);
361}
362
1da177e4
LT
363/*
364 * Lookup a swap entry in the swap cache. A found page will be returned
365 * unlocked and with its refcount incremented - we rely on the kernel
366 * lock getting page table operations atomic even if we drop the page
367 * lock before returning.
368 */
ec560175
HY
369struct page *lookup_swap_cache(swp_entry_t entry, struct vm_area_struct *vma,
370 unsigned long addr)
1da177e4
LT
371{
372 struct page *page;
eb085574 373 struct swap_info_struct *si;
1da177e4 374
eb085574
HY
375 si = get_swap_device(entry);
376 if (!si)
377 return NULL;
f6ab1f7f 378 page = find_get_page(swap_address_space(entry), swp_offset(entry));
eb085574 379 put_swap_device(si);
1da177e4 380
ec560175
HY
381 INC_CACHE_INFO(find_total);
382 if (page) {
eaf649eb
MK
383 bool vma_ra = swap_use_vma_readahead();
384 bool readahead;
385
1da177e4 386 INC_CACHE_INFO(find_success);
eaf649eb
MK
387 /*
388 * At the moment, we don't support PG_readahead for anon THP
389 * so let's bail out rather than confusing the readahead stat.
390 */
ec560175
HY
391 if (unlikely(PageTransCompound(page)))
392 return page;
eaf649eb 393
ec560175 394 readahead = TestClearPageReadahead(page);
eaf649eb
MK
395 if (vma && vma_ra) {
396 unsigned long ra_val;
397 int win, hits;
398
399 ra_val = GET_SWAP_RA_VAL(vma);
400 win = SWAP_RA_WIN(ra_val);
401 hits = SWAP_RA_HITS(ra_val);
ec560175
HY
402 if (readahead)
403 hits = min_t(int, hits + 1, SWAP_RA_HITS_MAX);
404 atomic_long_set(&vma->swap_readahead_info,
405 SWAP_RA_VAL(addr, win, hits));
406 }
eaf649eb 407
ec560175 408 if (readahead) {
cbc65df2 409 count_vm_event(SWAP_RA_HIT);
eaf649eb 410 if (!vma || !vma_ra)
ec560175 411 atomic_inc(&swapin_readahead_hits);
cbc65df2 412 }
579f8290 413 }
eaf649eb 414
1da177e4
LT
415 return page;
416}
417
61ef1865
MWO
418/**
419 * find_get_incore_page - Find and get a page from the page or swap caches.
420 * @mapping: The address_space to search.
421 * @index: The page cache index.
422 *
423 * This differs from find_get_page() in that it will also look for the
424 * page in the swap cache.
425 *
426 * Return: The found page or %NULL.
427 */
428struct page *find_get_incore_page(struct address_space *mapping, pgoff_t index)
429{
430 swp_entry_t swp;
431 struct swap_info_struct *si;
432 struct page *page = find_get_entry(mapping, index);
433
a6de4b48 434 if (!page)
61ef1865 435 return page;
a6de4b48
MWO
436 if (!xa_is_value(page))
437 return find_subpage(page, index);
61ef1865
MWO
438 if (!shmem_mapping(mapping))
439 return NULL;
440
441 swp = radix_to_swp_entry(page);
442 /* Prevent swapoff from happening to us */
443 si = get_swap_device(swp);
444 if (!si)
445 return NULL;
446 page = find_get_page(swap_address_space(swp), swp_offset(swp));
447 put_swap_device(si);
448 return page;
449}
450
5b999aad
DS
451struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
452 struct vm_area_struct *vma, unsigned long addr,
453 bool *new_page_allocated)
1da177e4 454{
eb085574 455 struct swap_info_struct *si;
4c6355b2 456 struct page *page;
aae466b0 457 void *shadow = NULL;
4c6355b2 458
5b999aad 459 *new_page_allocated = false;
1da177e4 460
4c6355b2
JW
461 for (;;) {
462 int err;
1da177e4
LT
463 /*
464 * First check the swap cache. Since this is normally
465 * called after lookup_swap_cache() failed, re-calling
466 * that would confuse statistics.
467 */
eb085574
HY
468 si = get_swap_device(entry);
469 if (!si)
4c6355b2
JW
470 return NULL;
471 page = find_get_page(swap_address_space(entry),
472 swp_offset(entry));
eb085574 473 put_swap_device(si);
4c6355b2
JW
474 if (page)
475 return page;
1da177e4 476
ba81f838
HY
477 /*
478 * Just skip read ahead for unused swap slot.
479 * During swap_off when swap_slot_cache is disabled,
480 * we have to handle the race between putting
481 * swap entry in swap cache and marking swap slot
482 * as SWAP_HAS_CACHE. That's done in later part of code or
483 * else swap_off will be aborted if we return NULL.
484 */
485 if (!__swp_swapcount(entry) && swap_slot_cache_enabled)
4c6355b2 486 return NULL;
e8c26ab6 487
1da177e4 488 /*
4c6355b2
JW
489 * Get a new page to read into from swap. Allocate it now,
490 * before marking swap_map SWAP_HAS_CACHE, when -EEXIST will
491 * cause any racers to loop around until we add it to cache.
1da177e4 492 */
4c6355b2
JW
493 page = alloc_page_vma(gfp_mask, vma, addr);
494 if (!page)
495 return NULL;
1da177e4 496
f000944d
HD
497 /*
498 * Swap entry may have been freed since our caller observed it.
499 */
355cfa73 500 err = swapcache_prepare(entry);
4c6355b2 501 if (!err)
f000944d
HD
502 break;
503
4c6355b2
JW
504 put_page(page);
505 if (err != -EEXIST)
506 return NULL;
507
2ca4532a 508 /*
4c6355b2
JW
509 * We might race against __delete_from_swap_cache(), and
510 * stumble across a swap_map entry whose SWAP_HAS_CACHE
511 * has not yet been cleared. Or race against another
512 * __read_swap_cache_async(), which has set SWAP_HAS_CACHE
513 * in swap_map, but not yet added its page to swap cache.
2ca4532a 514 */
4c6355b2
JW
515 cond_resched();
516 }
517
518 /*
519 * The swap entry is ours to swap in. Prepare the new page.
520 */
521
522 __SetPageLocked(page);
523 __SetPageSwapBacked(page);
524
525 /* May fail (-ENOMEM) if XArray node allocation failed. */
aae466b0 526 if (add_to_swap_cache(page, entry, gfp_mask & GFP_RECLAIM_MASK, &shadow)) {
4c6355b2
JW
527 put_swap_page(page, entry);
528 goto fail_unlock;
529 }
530
d9eb1ea2 531 if (mem_cgroup_charge(page, NULL, gfp_mask)) {
4c6355b2
JW
532 delete_from_swap_cache(page);
533 goto fail_unlock;
534 }
535
aae466b0
JK
536 if (shadow)
537 workingset_refault(page, shadow);
314b57fb 538
4c6355b2
JW
539 /* Caller will initiate read into locked page */
540 SetPageWorkingset(page);
6058eaec 541 lru_cache_add(page);
4c6355b2
JW
542 *new_page_allocated = true;
543 return page;
1da177e4 544
4c6355b2
JW
545fail_unlock:
546 unlock_page(page);
547 put_page(page);
548 return NULL;
1da177e4 549}
46017e95 550
5b999aad
DS
551/*
552 * Locate a page of swap in physical memory, reserving swap cache space
553 * and reading the disk if it is not already cached.
554 * A failure return means that either the page allocation failed or that
555 * the swap entry is no longer in use.
556 */
557struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
23955622 558 struct vm_area_struct *vma, unsigned long addr, bool do_poll)
5b999aad
DS
559{
560 bool page_was_allocated;
561 struct page *retpage = __read_swap_cache_async(entry, gfp_mask,
562 vma, addr, &page_was_allocated);
563
564 if (page_was_allocated)
23955622 565 swap_readpage(retpage, do_poll);
5b999aad
DS
566
567 return retpage;
568}
569
ec560175
HY
570static unsigned int __swapin_nr_pages(unsigned long prev_offset,
571 unsigned long offset,
572 int hits,
573 int max_pages,
574 int prev_win)
579f8290 575{
ec560175 576 unsigned int pages, last_ra;
579f8290
SL
577
578 /*
579 * This heuristic has been found to work well on both sequential and
580 * random loads, swapping to hard disk or to SSD: please don't ask
581 * what the "+ 2" means, it just happens to work well, that's all.
582 */
ec560175 583 pages = hits + 2;
579f8290
SL
584 if (pages == 2) {
585 /*
586 * We can have no readahead hits to judge by: but must not get
587 * stuck here forever, so check for an adjacent offset instead
588 * (and don't even bother to check whether swap type is same).
589 */
590 if (offset != prev_offset + 1 && offset != prev_offset - 1)
591 pages = 1;
579f8290
SL
592 } else {
593 unsigned int roundup = 4;
594 while (roundup < pages)
595 roundup <<= 1;
596 pages = roundup;
597 }
598
599 if (pages > max_pages)
600 pages = max_pages;
601
602 /* Don't shrink readahead too fast */
ec560175 603 last_ra = prev_win / 2;
579f8290
SL
604 if (pages < last_ra)
605 pages = last_ra;
ec560175
HY
606
607 return pages;
608}
609
610static unsigned long swapin_nr_pages(unsigned long offset)
611{
612 static unsigned long prev_offset;
613 unsigned int hits, pages, max_pages;
614 static atomic_t last_readahead_pages;
615
616 max_pages = 1 << READ_ONCE(page_cluster);
617 if (max_pages <= 1)
618 return 1;
619
620 hits = atomic_xchg(&swapin_readahead_hits, 0);
d6c1f098
QC
621 pages = __swapin_nr_pages(READ_ONCE(prev_offset), offset, hits,
622 max_pages,
ec560175
HY
623 atomic_read(&last_readahead_pages));
624 if (!hits)
d6c1f098 625 WRITE_ONCE(prev_offset, offset);
579f8290
SL
626 atomic_set(&last_readahead_pages, pages);
627
628 return pages;
629}
630
46017e95 631/**
e9e9b7ec 632 * swap_cluster_readahead - swap in pages in hope we need them soon
46017e95 633 * @entry: swap entry of this memory
7682486b 634 * @gfp_mask: memory allocation flags
e9e9b7ec 635 * @vmf: fault information
46017e95
HD
636 *
637 * Returns the struct page for entry and addr, after queueing swapin.
638 *
639 * Primitive swap readahead code. We simply read an aligned block of
640 * (1 << page_cluster) entries in the swap area. This method is chosen
641 * because it doesn't cost us any seek time. We also make sure to queue
642 * the 'original' request together with the readahead ones...
643 *
644 * This has been extended to use the NUMA policies from the mm triggering
645 * the readahead.
646 *
c1e8d7c6 647 * Caller must hold read mmap_lock if vmf->vma is not NULL.
46017e95 648 */
e9e9b7ec
MK
649struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask,
650 struct vm_fault *vmf)
46017e95 651{
46017e95 652 struct page *page;
579f8290
SL
653 unsigned long entry_offset = swp_offset(entry);
654 unsigned long offset = entry_offset;
67f96aa2 655 unsigned long start_offset, end_offset;
579f8290 656 unsigned long mask;
e9a6effa 657 struct swap_info_struct *si = swp_swap_info(entry);
3fb5c298 658 struct blk_plug plug;
c4fa6309 659 bool do_poll = true, page_allocated;
e9e9b7ec
MK
660 struct vm_area_struct *vma = vmf->vma;
661 unsigned long addr = vmf->address;
46017e95 662
579f8290
SL
663 mask = swapin_nr_pages(offset) - 1;
664 if (!mask)
665 goto skip;
666
8fd2e0b5 667 /* Test swap type to make sure the dereference is safe */
32646315 668 if (likely(si->flags & (SWP_BLKDEV | SWP_FS_OPS))) {
8fd2e0b5
YS
669 struct inode *inode = si->swap_file->f_mapping->host;
670 if (inode_read_congested(inode))
671 goto skip;
672 }
673
23955622 674 do_poll = false;
67f96aa2
RR
675 /* Read a page_cluster sized and aligned cluster around offset. */
676 start_offset = offset & ~mask;
677 end_offset = offset | mask;
678 if (!start_offset) /* First page is swap header. */
679 start_offset++;
e9a6effa
HY
680 if (end_offset >= si->max)
681 end_offset = si->max - 1;
67f96aa2 682
3fb5c298 683 blk_start_plug(&plug);
67f96aa2 684 for (offset = start_offset; offset <= end_offset ; offset++) {
46017e95 685 /* Ok, do the async read-ahead now */
c4fa6309
HY
686 page = __read_swap_cache_async(
687 swp_entry(swp_type(entry), offset),
688 gfp_mask, vma, addr, &page_allocated);
46017e95 689 if (!page)
67f96aa2 690 continue;
c4fa6309
HY
691 if (page_allocated) {
692 swap_readpage(page, false);
eaf649eb 693 if (offset != entry_offset) {
c4fa6309
HY
694 SetPageReadahead(page);
695 count_vm_event(SWAP_RA);
696 }
cbc65df2 697 }
09cbfeaf 698 put_page(page);
46017e95 699 }
3fb5c298
CE
700 blk_finish_plug(&plug);
701
46017e95 702 lru_add_drain(); /* Push any new pages onto the LRU now */
579f8290 703skip:
23955622 704 return read_swap_cache_async(entry, gfp_mask, vma, addr, do_poll);
46017e95 705}
4b3ef9da
HY
706
707int init_swap_address_space(unsigned int type, unsigned long nr_pages)
708{
709 struct address_space *spaces, *space;
710 unsigned int i, nr;
711
712 nr = DIV_ROUND_UP(nr_pages, SWAP_ADDRESS_SPACE_PAGES);
778e1cdd 713 spaces = kvcalloc(nr, sizeof(struct address_space), GFP_KERNEL);
4b3ef9da
HY
714 if (!spaces)
715 return -ENOMEM;
716 for (i = 0; i < nr; i++) {
717 space = spaces + i;
a2833486 718 xa_init_flags(&space->i_pages, XA_FLAGS_LOCK_IRQ);
4b3ef9da
HY
719 atomic_set(&space->i_mmap_writable, 0);
720 space->a_ops = &swap_aops;
721 /* swap cache doesn't use writeback related tags */
722 mapping_set_no_writeback_tags(space);
4b3ef9da
HY
723 }
724 nr_swapper_spaces[type] = nr;
054f1d1f 725 swapper_spaces[type] = spaces;
4b3ef9da
HY
726
727 return 0;
728}
729
730void exit_swap_address_space(unsigned int type)
731{
054f1d1f 732 kvfree(swapper_spaces[type]);
4b3ef9da 733 nr_swapper_spaces[type] = 0;
054f1d1f 734 swapper_spaces[type] = NULL;
4b3ef9da 735}
ec560175
HY
736
737static inline void swap_ra_clamp_pfn(struct vm_area_struct *vma,
738 unsigned long faddr,
739 unsigned long lpfn,
740 unsigned long rpfn,
741 unsigned long *start,
742 unsigned long *end)
743{
744 *start = max3(lpfn, PFN_DOWN(vma->vm_start),
745 PFN_DOWN(faddr & PMD_MASK));
746 *end = min3(rpfn, PFN_DOWN(vma->vm_end),
747 PFN_DOWN((faddr & PMD_MASK) + PMD_SIZE));
748}
749
eaf649eb
MK
750static void swap_ra_info(struct vm_fault *vmf,
751 struct vma_swap_readahead *ra_info)
ec560175
HY
752{
753 struct vm_area_struct *vma = vmf->vma;
eaf649eb 754 unsigned long ra_val;
ec560175
HY
755 swp_entry_t entry;
756 unsigned long faddr, pfn, fpfn;
757 unsigned long start, end;
eaf649eb 758 pte_t *pte, *orig_pte;
ec560175
HY
759 unsigned int max_win, hits, prev_win, win, left;
760#ifndef CONFIG_64BIT
761 pte_t *tpte;
762#endif
763
61b63972
HY
764 max_win = 1 << min_t(unsigned int, READ_ONCE(page_cluster),
765 SWAP_RA_ORDER_CEILING);
766 if (max_win == 1) {
eaf649eb
MK
767 ra_info->win = 1;
768 return;
61b63972
HY
769 }
770
ec560175 771 faddr = vmf->address;
eaf649eb
MK
772 orig_pte = pte = pte_offset_map(vmf->pmd, faddr);
773 entry = pte_to_swp_entry(*pte);
774 if ((unlikely(non_swap_entry(entry)))) {
775 pte_unmap(orig_pte);
776 return;
777 }
ec560175 778
ec560175 779 fpfn = PFN_DOWN(faddr);
eaf649eb
MK
780 ra_val = GET_SWAP_RA_VAL(vma);
781 pfn = PFN_DOWN(SWAP_RA_ADDR(ra_val));
782 prev_win = SWAP_RA_WIN(ra_val);
783 hits = SWAP_RA_HITS(ra_val);
784 ra_info->win = win = __swapin_nr_pages(pfn, fpfn, hits,
ec560175
HY
785 max_win, prev_win);
786 atomic_long_set(&vma->swap_readahead_info,
787 SWAP_RA_VAL(faddr, win, 0));
788
eaf649eb
MK
789 if (win == 1) {
790 pte_unmap(orig_pte);
791 return;
792 }
ec560175
HY
793
794 /* Copy the PTEs because the page table may be unmapped */
795 if (fpfn == pfn + 1)
796 swap_ra_clamp_pfn(vma, faddr, fpfn, fpfn + win, &start, &end);
797 else if (pfn == fpfn + 1)
798 swap_ra_clamp_pfn(vma, faddr, fpfn - win + 1, fpfn + 1,
799 &start, &end);
800 else {
801 left = (win - 1) / 2;
802 swap_ra_clamp_pfn(vma, faddr, fpfn - left, fpfn + win - left,
803 &start, &end);
804 }
eaf649eb
MK
805 ra_info->nr_pte = end - start;
806 ra_info->offset = fpfn - start;
807 pte -= ra_info->offset;
ec560175 808#ifdef CONFIG_64BIT
eaf649eb 809 ra_info->ptes = pte;
ec560175 810#else
eaf649eb 811 tpte = ra_info->ptes;
ec560175
HY
812 for (pfn = start; pfn != end; pfn++)
813 *tpte++ = *pte++;
814#endif
eaf649eb 815 pte_unmap(orig_pte);
ec560175
HY
816}
817
e9f59873
YS
818/**
819 * swap_vma_readahead - swap in pages in hope we need them soon
27ec4878 820 * @fentry: swap entry of this memory
e9f59873
YS
821 * @gfp_mask: memory allocation flags
822 * @vmf: fault information
823 *
824 * Returns the struct page for entry and addr, after queueing swapin.
825 *
826 * Primitive swap readahead code. We simply read in a few pages whoes
827 * virtual addresses are around the fault address in the same vma.
828 *
c1e8d7c6 829 * Caller must hold read mmap_lock if vmf->vma is not NULL.
e9f59873
YS
830 *
831 */
f5c754d6
CIK
832static struct page *swap_vma_readahead(swp_entry_t fentry, gfp_t gfp_mask,
833 struct vm_fault *vmf)
ec560175
HY
834{
835 struct blk_plug plug;
836 struct vm_area_struct *vma = vmf->vma;
837 struct page *page;
838 pte_t *pte, pentry;
839 swp_entry_t entry;
840 unsigned int i;
841 bool page_allocated;
e97af699
ML
842 struct vma_swap_readahead ra_info = {
843 .win = 1,
844 };
ec560175 845
eaf649eb
MK
846 swap_ra_info(vmf, &ra_info);
847 if (ra_info.win == 1)
ec560175
HY
848 goto skip;
849
850 blk_start_plug(&plug);
eaf649eb 851 for (i = 0, pte = ra_info.ptes; i < ra_info.nr_pte;
ec560175
HY
852 i++, pte++) {
853 pentry = *pte;
854 if (pte_none(pentry))
855 continue;
856 if (pte_present(pentry))
857 continue;
858 entry = pte_to_swp_entry(pentry);
859 if (unlikely(non_swap_entry(entry)))
860 continue;
861 page = __read_swap_cache_async(entry, gfp_mask, vma,
862 vmf->address, &page_allocated);
863 if (!page)
864 continue;
865 if (page_allocated) {
866 swap_readpage(page, false);
eaf649eb 867 if (i != ra_info.offset) {
ec560175
HY
868 SetPageReadahead(page);
869 count_vm_event(SWAP_RA);
870 }
871 }
872 put_page(page);
873 }
874 blk_finish_plug(&plug);
875 lru_add_drain();
876skip:
877 return read_swap_cache_async(fentry, gfp_mask, vma, vmf->address,
eaf649eb 878 ra_info.win == 1);
ec560175 879}
d9bfcfdc 880
e9e9b7ec
MK
881/**
882 * swapin_readahead - swap in pages in hope we need them soon
883 * @entry: swap entry of this memory
884 * @gfp_mask: memory allocation flags
885 * @vmf: fault information
886 *
887 * Returns the struct page for entry and addr, after queueing swapin.
888 *
889 * It's a main entry function for swap readahead. By the configuration,
890 * it will read ahead blocks by cluster-based(ie, physical disk based)
891 * or vma-based(ie, virtual address based on faulty address) readahead.
892 */
893struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask,
894 struct vm_fault *vmf)
895{
896 return swap_use_vma_readahead() ?
897 swap_vma_readahead(entry, gfp_mask, vmf) :
898 swap_cluster_readahead(entry, gfp_mask, vmf);
899}
900
d9bfcfdc
HY
901#ifdef CONFIG_SYSFS
902static ssize_t vma_ra_enabled_show(struct kobject *kobj,
903 struct kobj_attribute *attr, char *buf)
904{
e9e9b7ec 905 return sprintf(buf, "%s\n", enable_vma_readahead ? "true" : "false");
d9bfcfdc
HY
906}
907static ssize_t vma_ra_enabled_store(struct kobject *kobj,
908 struct kobj_attribute *attr,
909 const char *buf, size_t count)
910{
911 if (!strncmp(buf, "true", 4) || !strncmp(buf, "1", 1))
e9e9b7ec 912 enable_vma_readahead = true;
d9bfcfdc 913 else if (!strncmp(buf, "false", 5) || !strncmp(buf, "0", 1))
e9e9b7ec 914 enable_vma_readahead = false;
d9bfcfdc
HY
915 else
916 return -EINVAL;
917
918 return count;
919}
920static struct kobj_attribute vma_ra_enabled_attr =
921 __ATTR(vma_ra_enabled, 0644, vma_ra_enabled_show,
922 vma_ra_enabled_store);
923
d9bfcfdc
HY
924static struct attribute *swap_attrs[] = {
925 &vma_ra_enabled_attr.attr,
d9bfcfdc
HY
926 NULL,
927};
928
929static struct attribute_group swap_attr_group = {
930 .attrs = swap_attrs,
931};
932
933static int __init swap_init_sysfs(void)
934{
935 int err;
936 struct kobject *swap_kobj;
937
938 swap_kobj = kobject_create_and_add("swap", mm_kobj);
939 if (!swap_kobj) {
940 pr_err("failed to create swap kobject\n");
941 return -ENOMEM;
942 }
943 err = sysfs_create_group(swap_kobj, &swap_attr_group);
944 if (err) {
945 pr_err("failed to register swap group\n");
946 goto delete_obj;
947 }
948 return 0;
949
950delete_obj:
951 kobject_put(swap_kobj);
952 return err;
953}
954subsys_initcall(swap_init_sysfs);
955#endif