]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - mm/swap.c
mm/rmap: try_to_migrate() skip zone_device !device_private
[mirror_ubuntu-jammy-kernel.git] / mm / swap.c
CommitLineData
457c8996 1// SPDX-License-Identifier: GPL-2.0-only
1da177e4
LT
2/*
3 * linux/mm/swap.c
4 *
5 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
6 */
7
8/*
183ff22b 9 * This file contains the default values for the operation of the
1da177e4 10 * Linux VM subsystem. Fine-tuning documentation can be found in
57043247 11 * Documentation/admin-guide/sysctl/vm.rst.
1da177e4
LT
12 * Started 18.12.91
13 * Swap aging added 23.2.95, Stephen Tweedie.
14 * Buffermem limits added 12.3.98, Rik van Riel.
15 */
16
17#include <linux/mm.h>
18#include <linux/sched.h>
19#include <linux/kernel_stat.h>
20#include <linux/swap.h>
21#include <linux/mman.h>
22#include <linux/pagemap.h>
23#include <linux/pagevec.h>
24#include <linux/init.h>
b95f1b31 25#include <linux/export.h>
1da177e4 26#include <linux/mm_inline.h>
1da177e4 27#include <linux/percpu_counter.h>
3565fce3 28#include <linux/memremap.h>
1da177e4
LT
29#include <linux/percpu.h>
30#include <linux/cpu.h>
31#include <linux/notifier.h>
e0bf68dd 32#include <linux/backing-dev.h>
66e1707b 33#include <linux/memcontrol.h>
5a0e3ad6 34#include <linux/gfp.h>
a27bb332 35#include <linux/uio.h>
822fc613 36#include <linux/hugetlb.h>
33c3fc71 37#include <linux/page_idle.h>
b01b2141 38#include <linux/local_lock.h>
8cc621d2 39#include <linux/buffer_head.h>
1da177e4 40
64d6519d
LS
41#include "internal.h"
42
c6286c98
MG
43#define CREATE_TRACE_POINTS
44#include <trace/events/pagemap.h>
45
1da177e4
LT
46/* How many pages do we try to swap or page in/out together? */
47int page_cluster;
48
b01b2141
IM
49/* Protecting only lru_rotate.pvec which requires disabling interrupts */
50struct lru_rotate {
51 local_lock_t lock;
52 struct pagevec pvec;
53};
54static DEFINE_PER_CPU(struct lru_rotate, lru_rotate) = {
55 .lock = INIT_LOCAL_LOCK(lock),
56};
57
58/*
59 * The following struct pagevec are grouped together because they are protected
60 * by disabling preemption (and interrupts remain enabled).
61 */
62struct lru_pvecs {
63 local_lock_t lock;
64 struct pagevec lru_add;
65 struct pagevec lru_deactivate_file;
66 struct pagevec lru_deactivate;
67 struct pagevec lru_lazyfree;
a4a921aa 68#ifdef CONFIG_SMP
b01b2141 69 struct pagevec activate_page;
a4a921aa 70#endif
b01b2141
IM
71};
72static DEFINE_PER_CPU(struct lru_pvecs, lru_pvecs) = {
73 .lock = INIT_LOCAL_LOCK(lock),
74};
902aaed0 75
b221385b
AB
76/*
77 * This path almost never happens for VM activity - pages are normally
78 * freed via pagevecs. But it gets used by networking.
79 */
920c7a5d 80static void __page_cache_release(struct page *page)
b221385b
AB
81{
82 if (PageLRU(page)) {
fa9add64
HD
83 struct lruvec *lruvec;
84 unsigned long flags;
b221385b 85
6168d0da 86 lruvec = lock_page_lruvec_irqsave(page, &flags);
46ae6b2c 87 del_page_from_lru_list(page, lruvec);
87560179 88 __clear_page_lru_flags(page);
6168d0da 89 unlock_page_lruvec_irqrestore(lruvec, flags);
b221385b 90 }
62906027 91 __ClearPageWaiters(page);
91807063
AA
92}
93
94static void __put_single_page(struct page *page)
95{
96 __page_cache_release(page);
7ae88534 97 mem_cgroup_uncharge(page);
44042b44 98 free_unref_page(page, 0);
b221385b
AB
99}
100
91807063 101static void __put_compound_page(struct page *page)
1da177e4 102{
822fc613
NH
103 /*
104 * __page_cache_release() is supposed to be called for thp, not for
105 * hugetlb. This is because hugetlb page does never have PageLRU set
106 * (it's never listed to any LRU lists) and no memcg routines should
107 * be called for hugetlb (it has a separate hugetlb_cgroup.)
108 */
109 if (!PageHuge(page))
110 __page_cache_release(page);
ff45fc3c 111 destroy_compound_page(page);
91807063
AA
112}
113
ddc58f27 114void __put_page(struct page *page)
8519fb30 115{
71389703
DW
116 if (is_zone_device_page(page)) {
117 put_dev_pagemap(page->pgmap);
118
119 /*
120 * The page belongs to the device that created pgmap. Do
121 * not return it to page allocator.
122 */
123 return;
124 }
125
8519fb30 126 if (unlikely(PageCompound(page)))
ddc58f27
KS
127 __put_compound_page(page);
128 else
91807063 129 __put_single_page(page);
1da177e4 130}
ddc58f27 131EXPORT_SYMBOL(__put_page);
70b50f94 132
1d7ea732 133/**
7682486b
RD
134 * put_pages_list() - release a list of pages
135 * @pages: list of pages threaded on page->lru
1d7ea732
AZ
136 *
137 * Release a list of pages which are strung together on page.lru. Currently
138 * used by read_cache_pages() and related error recovery code.
1d7ea732
AZ
139 */
140void put_pages_list(struct list_head *pages)
141{
142 while (!list_empty(pages)) {
143 struct page *victim;
144
f86196ea 145 victim = lru_to_page(pages);
1d7ea732 146 list_del(&victim->lru);
09cbfeaf 147 put_page(victim);
1d7ea732
AZ
148 }
149}
150EXPORT_SYMBOL(put_pages_list);
151
18022c5d
MG
152/*
153 * get_kernel_pages() - pin kernel pages in memory
154 * @kiov: An array of struct kvec structures
155 * @nr_segs: number of segments to pin
156 * @write: pinning for read/write, currently ignored
157 * @pages: array that receives pointers to the pages pinned.
158 * Should be at least nr_segs long.
159 *
160 * Returns number of pages pinned. This may be fewer than the number
161 * requested. If nr_pages is 0 or negative, returns 0. If no pages
162 * were pinned, returns -errno. Each page returned must be released
163 * with a put_page() call when it is finished with.
164 */
165int get_kernel_pages(const struct kvec *kiov, int nr_segs, int write,
166 struct page **pages)
167{
168 int seg;
169
170 for (seg = 0; seg < nr_segs; seg++) {
171 if (WARN_ON(kiov[seg].iov_len != PAGE_SIZE))
172 return seg;
173
5a178119 174 pages[seg] = kmap_to_page(kiov[seg].iov_base);
09cbfeaf 175 get_page(pages[seg]);
18022c5d
MG
176 }
177
178 return seg;
179}
180EXPORT_SYMBOL_GPL(get_kernel_pages);
181
182/*
183 * get_kernel_page() - pin a kernel page in memory
184 * @start: starting kernel address
185 * @write: pinning for read/write, currently ignored
186 * @pages: array that receives pointer to the page pinned.
187 * Must be at least nr_segs long.
188 *
189 * Returns 1 if page is pinned. If the page was not pinned, returns
190 * -errno. The page returned must be released with a put_page() call
191 * when it is finished with.
192 */
193int get_kernel_page(unsigned long start, int write, struct page **pages)
194{
195 const struct kvec kiov = {
196 .iov_base = (void *)start,
197 .iov_len = PAGE_SIZE
198 };
199
200 return get_kernel_pages(&kiov, 1, write, pages);
201}
202EXPORT_SYMBOL_GPL(get_kernel_page);
203
3dd7ae8e 204static void pagevec_lru_move_fn(struct pagevec *pvec,
c7c7b80c 205 void (*move_fn)(struct page *page, struct lruvec *lruvec))
902aaed0
HH
206{
207 int i;
6168d0da 208 struct lruvec *lruvec = NULL;
3dd7ae8e 209 unsigned long flags = 0;
902aaed0
HH
210
211 for (i = 0; i < pagevec_count(pvec); i++) {
212 struct page *page = pvec->pages[i];
3dd7ae8e 213
fc574c23
AS
214 /* block memcg migration during page moving between lru */
215 if (!TestClearPageLRU(page))
216 continue;
217
2a5e4e34 218 lruvec = relock_page_lruvec_irqsave(page, lruvec, &flags);
c7c7b80c 219 (*move_fn)(page, lruvec);
fc574c23
AS
220
221 SetPageLRU(page);
902aaed0 222 }
6168d0da
AS
223 if (lruvec)
224 unlock_page_lruvec_irqrestore(lruvec, flags);
c6f92f9f 225 release_pages(pvec->pages, pvec->nr);
83896fb5 226 pagevec_reinit(pvec);
d8505dee
SL
227}
228
c7c7b80c 229static void pagevec_move_tail_fn(struct page *page, struct lruvec *lruvec)
3dd7ae8e 230{
fc574c23 231 if (!PageUnevictable(page)) {
46ae6b2c 232 del_page_from_lru_list(page, lruvec);
c55e8d03 233 ClearPageActive(page);
3a9c9788 234 add_page_to_lru_list_tail(page, lruvec);
c7c7b80c 235 __count_vm_events(PGROTATED, thp_nr_pages(page));
3dd7ae8e
SL
236 }
237}
238
d479960e
MK
239/* return true if pagevec needs to drain */
240static bool pagevec_add_and_need_flush(struct pagevec *pvec, struct page *page)
241{
242 bool ret = false;
243
244 if (!pagevec_add(pvec, page) || PageCompound(page) ||
245 lru_cache_disabled())
246 ret = true;
247
248 return ret;
249}
250
1da177e4
LT
251/*
252 * Writeback is about to end against a page which has been marked for immediate
253 * reclaim. If it still appears to be reclaimable, move it to the tail of the
902aaed0 254 * inactive list.
c7c7b80c
AS
255 *
256 * rotate_reclaimable_page() must disable IRQs, to prevent nasty races.
1da177e4 257 */
3dd7ae8e 258void rotate_reclaimable_page(struct page *page)
1da177e4 259{
c55e8d03 260 if (!PageLocked(page) && !PageDirty(page) &&
894bc310 261 !PageUnevictable(page) && PageLRU(page)) {
ac6aadb2
MS
262 struct pagevec *pvec;
263 unsigned long flags;
264
09cbfeaf 265 get_page(page);
b01b2141
IM
266 local_lock_irqsave(&lru_rotate.lock, flags);
267 pvec = this_cpu_ptr(&lru_rotate.pvec);
d479960e 268 if (pagevec_add_and_need_flush(pvec, page))
c7c7b80c 269 pagevec_lru_move_fn(pvec, pagevec_move_tail_fn);
b01b2141 270 local_unlock_irqrestore(&lru_rotate.lock, flags);
ac6aadb2 271 }
1da177e4
LT
272}
273
96f8bf4f 274void lru_note_cost(struct lruvec *lruvec, bool file, unsigned int nr_pages)
3e2f41f1 275{
7cf111bc
JW
276 do {
277 unsigned long lrusize;
278
6168d0da
AS
279 /*
280 * Hold lruvec->lru_lock is safe here, since
281 * 1) The pinned lruvec in reclaim, or
282 * 2) From a pre-LRU page during refault (which also holds the
283 * rcu lock, so would be safe even if the page was on the LRU
284 * and could move simultaneously to a new lruvec).
285 */
286 spin_lock_irq(&lruvec->lru_lock);
7cf111bc 287 /* Record cost event */
96f8bf4f
JW
288 if (file)
289 lruvec->file_cost += nr_pages;
7cf111bc 290 else
96f8bf4f 291 lruvec->anon_cost += nr_pages;
7cf111bc
JW
292
293 /*
294 * Decay previous events
295 *
296 * Because workloads change over time (and to avoid
297 * overflow) we keep these statistics as a floating
298 * average, which ends up weighing recent refaults
299 * more than old ones.
300 */
301 lrusize = lruvec_page_state(lruvec, NR_INACTIVE_ANON) +
302 lruvec_page_state(lruvec, NR_ACTIVE_ANON) +
303 lruvec_page_state(lruvec, NR_INACTIVE_FILE) +
304 lruvec_page_state(lruvec, NR_ACTIVE_FILE);
305
306 if (lruvec->file_cost + lruvec->anon_cost > lrusize / 4) {
307 lruvec->file_cost /= 2;
308 lruvec->anon_cost /= 2;
309 }
6168d0da 310 spin_unlock_irq(&lruvec->lru_lock);
7cf111bc 311 } while ((lruvec = parent_lruvec(lruvec)));
3e2f41f1
KM
312}
313
96f8bf4f
JW
314void lru_note_cost_page(struct page *page)
315{
a984226f 316 lru_note_cost(mem_cgroup_page_lruvec(page),
6c357848 317 page_is_file_lru(page), thp_nr_pages(page));
96f8bf4f
JW
318}
319
c7c7b80c 320static void __activate_page(struct page *page, struct lruvec *lruvec)
1da177e4 321{
fc574c23 322 if (!PageActive(page) && !PageUnevictable(page)) {
6c357848 323 int nr_pages = thp_nr_pages(page);
744ed144 324
46ae6b2c 325 del_page_from_lru_list(page, lruvec);
7a608572 326 SetPageActive(page);
3a9c9788 327 add_page_to_lru_list(page, lruvec);
24b7e581 328 trace_mm_lru_activate(page);
4f98a2fe 329
21e330fc
SB
330 __count_vm_events(PGACTIVATE, nr_pages);
331 __count_memcg_events(lruvec_memcg(lruvec), PGACTIVATE,
332 nr_pages);
1da177e4 333 }
eb709b0d
SL
334}
335
336#ifdef CONFIG_SMP
eb709b0d
SL
337static void activate_page_drain(int cpu)
338{
b01b2141 339 struct pagevec *pvec = &per_cpu(lru_pvecs.activate_page, cpu);
eb709b0d
SL
340
341 if (pagevec_count(pvec))
c7c7b80c 342 pagevec_lru_move_fn(pvec, __activate_page);
eb709b0d
SL
343}
344
5fbc4616
CM
345static bool need_activate_page_drain(int cpu)
346{
b01b2141 347 return pagevec_count(&per_cpu(lru_pvecs.activate_page, cpu)) != 0;
5fbc4616
CM
348}
349
cc2828b2 350static void activate_page(struct page *page)
eb709b0d 351{
800d8c63 352 page = compound_head(page);
eb709b0d 353 if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
b01b2141 354 struct pagevec *pvec;
eb709b0d 355
b01b2141
IM
356 local_lock(&lru_pvecs.lock);
357 pvec = this_cpu_ptr(&lru_pvecs.activate_page);
09cbfeaf 358 get_page(page);
d479960e 359 if (pagevec_add_and_need_flush(pvec, page))
c7c7b80c 360 pagevec_lru_move_fn(pvec, __activate_page);
b01b2141 361 local_unlock(&lru_pvecs.lock);
eb709b0d
SL
362 }
363}
364
365#else
366static inline void activate_page_drain(int cpu)
367{
368}
369
cc2828b2 370static void activate_page(struct page *page)
eb709b0d 371{
6168d0da 372 struct lruvec *lruvec;
eb709b0d 373
800d8c63 374 page = compound_head(page);
6168d0da
AS
375 if (TestClearPageLRU(page)) {
376 lruvec = lock_page_lruvec_irq(page);
377 __activate_page(page, lruvec);
378 unlock_page_lruvec_irq(lruvec);
379 SetPageLRU(page);
380 }
1da177e4 381}
eb709b0d 382#endif
1da177e4 383
059285a2
MG
384static void __lru_cache_activate_page(struct page *page)
385{
b01b2141 386 struct pagevec *pvec;
059285a2
MG
387 int i;
388
b01b2141
IM
389 local_lock(&lru_pvecs.lock);
390 pvec = this_cpu_ptr(&lru_pvecs.lru_add);
391
059285a2
MG
392 /*
393 * Search backwards on the optimistic assumption that the page being
394 * activated has just been added to this pagevec. Note that only
395 * the local pagevec is examined as a !PageLRU page could be in the
396 * process of being released, reclaimed, migrated or on a remote
397 * pagevec that is currently being drained. Furthermore, marking
398 * a remote pagevec's page PageActive potentially hits a race where
399 * a page is marked PageActive just after it is added to the inactive
400 * list causing accounting errors and BUG_ON checks to trigger.
401 */
402 for (i = pagevec_count(pvec) - 1; i >= 0; i--) {
403 struct page *pagevec_page = pvec->pages[i];
404
405 if (pagevec_page == page) {
406 SetPageActive(page);
407 break;
408 }
409 }
410
b01b2141 411 local_unlock(&lru_pvecs.lock);
059285a2
MG
412}
413
1da177e4
LT
414/*
415 * Mark a page as having seen activity.
416 *
417 * inactive,unreferenced -> inactive,referenced
418 * inactive,referenced -> active,unreferenced
419 * active,unreferenced -> active,referenced
eb39d618
HD
420 *
421 * When a newly allocated page is not yet visible, so safe for non-atomic ops,
422 * __SetPageReferenced(page) may be substituted for mark_page_accessed(page).
1da177e4 423 */
920c7a5d 424void mark_page_accessed(struct page *page)
1da177e4 425{
e90309c9 426 page = compound_head(page);
059285a2 427
a1100a74
FW
428 if (!PageReferenced(page)) {
429 SetPageReferenced(page);
430 } else if (PageUnevictable(page)) {
431 /*
432 * Unevictable pages are on the "LRU_UNEVICTABLE" list. But,
433 * this list is never rotated or maintained, so marking an
434 * evictable page accessed has no effect.
435 */
436 } else if (!PageActive(page)) {
059285a2
MG
437 /*
438 * If the page is on the LRU, queue it for activation via
b01b2141 439 * lru_pvecs.activate_page. Otherwise, assume the page is on a
059285a2
MG
440 * pagevec, mark it active and it'll be moved to the active
441 * LRU on the next drain.
442 */
443 if (PageLRU(page))
444 activate_page(page);
445 else
446 __lru_cache_activate_page(page);
1da177e4 447 ClearPageReferenced(page);
cb686883 448 workingset_activation(page);
1da177e4 449 }
33c3fc71
VD
450 if (page_is_idle(page))
451 clear_page_idle(page);
1da177e4 452}
1da177e4
LT
453EXPORT_SYMBOL(mark_page_accessed);
454
f04e9ebb 455/**
c53954a0 456 * lru_cache_add - add a page to a page list
f04e9ebb 457 * @page: the page to be added to the LRU.
2329d375
JZ
458 *
459 * Queue the page for addition to the LRU via pagevec. The decision on whether
460 * to add the page to the [in]active [file|anon] list is deferred until the
461 * pagevec is drained. This gives a chance for the caller of lru_cache_add()
462 * have the page added to the active list using mark_page_accessed().
f04e9ebb 463 */
c53954a0 464void lru_cache_add(struct page *page)
1da177e4 465{
6058eaec
JW
466 struct pagevec *pvec;
467
309381fe
SL
468 VM_BUG_ON_PAGE(PageActive(page) && PageUnevictable(page), page);
469 VM_BUG_ON_PAGE(PageLRU(page), page);
6058eaec
JW
470
471 get_page(page);
472 local_lock(&lru_pvecs.lock);
473 pvec = this_cpu_ptr(&lru_pvecs.lru_add);
d479960e 474 if (pagevec_add_and_need_flush(pvec, page))
6058eaec
JW
475 __pagevec_lru_add(pvec);
476 local_unlock(&lru_pvecs.lock);
1da177e4 477}
6058eaec 478EXPORT_SYMBOL(lru_cache_add);
1da177e4 479
00501b53 480/**
b518154e 481 * lru_cache_add_inactive_or_unevictable
00501b53
JW
482 * @page: the page to be added to LRU
483 * @vma: vma in which page is mapped for determining reclaimability
484 *
b518154e 485 * Place @page on the inactive or unevictable LRU list, depending on its
12eab428 486 * evictability.
00501b53 487 */
b518154e 488void lru_cache_add_inactive_or_unevictable(struct page *page,
00501b53
JW
489 struct vm_area_struct *vma)
490{
b518154e
JK
491 bool unevictable;
492
00501b53
JW
493 VM_BUG_ON_PAGE(PageLRU(page), page);
494
b518154e
JK
495 unevictable = (vma->vm_flags & (VM_LOCKED | VM_SPECIAL)) == VM_LOCKED;
496 if (unlikely(unevictable) && !TestSetPageMlocked(page)) {
0964730b 497 int nr_pages = thp_nr_pages(page);
00501b53 498 /*
cb152a1a 499 * We use the irq-unsafe __mod_zone_page_state because this
00501b53
JW
500 * counter is not modified from interrupt context, and the pte
501 * lock is held(spinlock), which implies preemption disabled.
502 */
0964730b
HD
503 __mod_zone_page_state(page_zone(page), NR_MLOCK, nr_pages);
504 count_vm_events(UNEVICTABLE_PGMLOCKED, nr_pages);
00501b53 505 }
9c4e6b1a 506 lru_cache_add(page);
00501b53
JW
507}
508
31560180
MK
509/*
510 * If the page can not be invalidated, it is moved to the
511 * inactive list to speed up its reclaim. It is moved to the
512 * head of the list, rather than the tail, to give the flusher
513 * threads some time to write it out, as this is much more
514 * effective than the single-page writeout from reclaim.
278df9f4
MK
515 *
516 * If the page isn't page_mapped and dirty/writeback, the page
517 * could reclaim asap using PG_reclaim.
518 *
519 * 1. active, mapped page -> none
520 * 2. active, dirty/writeback page -> inactive, head, PG_reclaim
521 * 3. inactive, mapped page -> none
522 * 4. inactive, dirty/writeback page -> inactive, head, PG_reclaim
523 * 5. inactive, clean -> inactive, tail
524 * 6. Others -> none
525 *
526 * In 4, why it moves inactive's head, the VM expects the page would
527 * be write it out by flusher threads as this is much more effective
528 * than the single-page writeout from reclaim.
31560180 529 */
c7c7b80c 530static void lru_deactivate_file_fn(struct page *page, struct lruvec *lruvec)
31560180 531{
46ae6b2c 532 bool active = PageActive(page);
6c357848 533 int nr_pages = thp_nr_pages(page);
31560180 534
bad49d9c
MK
535 if (PageUnevictable(page))
536 return;
537
31560180
MK
538 /* Some processes are using the page */
539 if (page_mapped(page))
540 return;
541
46ae6b2c 542 del_page_from_lru_list(page, lruvec);
31560180
MK
543 ClearPageActive(page);
544 ClearPageReferenced(page);
31560180 545
278df9f4
MK
546 if (PageWriteback(page) || PageDirty(page)) {
547 /*
548 * PG_reclaim could be raced with end_page_writeback
549 * It can make readahead confusing. But race window
550 * is _really_ small and it's non-critical problem.
551 */
3a9c9788 552 add_page_to_lru_list(page, lruvec);
278df9f4
MK
553 SetPageReclaim(page);
554 } else {
555 /*
556 * The page's writeback ends up during pagevec
c4ffefd1 557 * We move that page into tail of inactive.
278df9f4 558 */
3a9c9788 559 add_page_to_lru_list_tail(page, lruvec);
5d91f31f 560 __count_vm_events(PGROTATED, nr_pages);
278df9f4
MK
561 }
562
21e330fc 563 if (active) {
5d91f31f 564 __count_vm_events(PGDEACTIVATE, nr_pages);
21e330fc
SB
565 __count_memcg_events(lruvec_memcg(lruvec), PGDEACTIVATE,
566 nr_pages);
567 }
31560180
MK
568}
569
c7c7b80c 570static void lru_deactivate_fn(struct page *page, struct lruvec *lruvec)
9c276cc6 571{
fc574c23 572 if (PageActive(page) && !PageUnevictable(page)) {
6c357848 573 int nr_pages = thp_nr_pages(page);
9c276cc6 574
46ae6b2c 575 del_page_from_lru_list(page, lruvec);
9c276cc6
MK
576 ClearPageActive(page);
577 ClearPageReferenced(page);
3a9c9788 578 add_page_to_lru_list(page, lruvec);
9c276cc6 579
21e330fc
SB
580 __count_vm_events(PGDEACTIVATE, nr_pages);
581 __count_memcg_events(lruvec_memcg(lruvec), PGDEACTIVATE,
582 nr_pages);
9c276cc6
MK
583 }
584}
10853a03 585
c7c7b80c 586static void lru_lazyfree_fn(struct page *page, struct lruvec *lruvec)
10853a03 587{
fc574c23 588 if (PageAnon(page) && PageSwapBacked(page) &&
24c92eb7 589 !PageSwapCache(page) && !PageUnevictable(page)) {
6c357848 590 int nr_pages = thp_nr_pages(page);
10853a03 591
46ae6b2c 592 del_page_from_lru_list(page, lruvec);
10853a03
MK
593 ClearPageActive(page);
594 ClearPageReferenced(page);
f7ad2a6c 595 /*
9de4f22a
HY
596 * Lazyfree pages are clean anonymous pages. They have
597 * PG_swapbacked flag cleared, to distinguish them from normal
598 * anonymous pages
f7ad2a6c
SL
599 */
600 ClearPageSwapBacked(page);
3a9c9788 601 add_page_to_lru_list(page, lruvec);
10853a03 602
21e330fc
SB
603 __count_vm_events(PGLAZYFREE, nr_pages);
604 __count_memcg_events(lruvec_memcg(lruvec), PGLAZYFREE,
605 nr_pages);
10853a03
MK
606 }
607}
608
902aaed0
HH
609/*
610 * Drain pages out of the cpu's pagevecs.
611 * Either "cpu" is the current CPU, and preemption has already been
612 * disabled; or "cpu" is being hot-unplugged, and is already dead.
613 */
f0cb3c76 614void lru_add_drain_cpu(int cpu)
1da177e4 615{
b01b2141 616 struct pagevec *pvec = &per_cpu(lru_pvecs.lru_add, cpu);
1da177e4 617
13f7f789 618 if (pagevec_count(pvec))
a0b8cab3 619 __pagevec_lru_add(pvec);
902aaed0 620
b01b2141 621 pvec = &per_cpu(lru_rotate.pvec, cpu);
7e0cc01e
QC
622 /* Disabling interrupts below acts as a compiler barrier. */
623 if (data_race(pagevec_count(pvec))) {
902aaed0
HH
624 unsigned long flags;
625
626 /* No harm done if a racing interrupt already did this */
b01b2141 627 local_lock_irqsave(&lru_rotate.lock, flags);
c7c7b80c 628 pagevec_lru_move_fn(pvec, pagevec_move_tail_fn);
b01b2141 629 local_unlock_irqrestore(&lru_rotate.lock, flags);
902aaed0 630 }
31560180 631
b01b2141 632 pvec = &per_cpu(lru_pvecs.lru_deactivate_file, cpu);
31560180 633 if (pagevec_count(pvec))
c7c7b80c 634 pagevec_lru_move_fn(pvec, lru_deactivate_file_fn);
eb709b0d 635
b01b2141 636 pvec = &per_cpu(lru_pvecs.lru_deactivate, cpu);
9c276cc6 637 if (pagevec_count(pvec))
c7c7b80c 638 pagevec_lru_move_fn(pvec, lru_deactivate_fn);
9c276cc6 639
b01b2141 640 pvec = &per_cpu(lru_pvecs.lru_lazyfree, cpu);
10853a03 641 if (pagevec_count(pvec))
c7c7b80c 642 pagevec_lru_move_fn(pvec, lru_lazyfree_fn);
10853a03 643
eb709b0d 644 activate_page_drain(cpu);
8cc621d2 645 invalidate_bh_lrus_cpu(cpu);
31560180
MK
646}
647
648/**
cc5993bd 649 * deactivate_file_page - forcefully deactivate a file page
31560180
MK
650 * @page: page to deactivate
651 *
652 * This function hints the VM that @page is a good reclaim candidate,
653 * for example if its invalidation fails due to the page being dirty
654 * or under writeback.
655 */
cc5993bd 656void deactivate_file_page(struct page *page)
31560180 657{
821ed6bb 658 /*
cc5993bd
MK
659 * In a workload with many unevictable page such as mprotect,
660 * unevictable page deactivation for accelerating reclaim is pointless.
821ed6bb
MK
661 */
662 if (PageUnevictable(page))
663 return;
664
31560180 665 if (likely(get_page_unless_zero(page))) {
b01b2141
IM
666 struct pagevec *pvec;
667
668 local_lock(&lru_pvecs.lock);
669 pvec = this_cpu_ptr(&lru_pvecs.lru_deactivate_file);
31560180 670
d479960e 671 if (pagevec_add_and_need_flush(pvec, page))
c7c7b80c 672 pagevec_lru_move_fn(pvec, lru_deactivate_file_fn);
b01b2141 673 local_unlock(&lru_pvecs.lock);
31560180 674 }
80bfed90
AM
675}
676
9c276cc6
MK
677/*
678 * deactivate_page - deactivate a page
679 * @page: page to deactivate
680 *
681 * deactivate_page() moves @page to the inactive list if @page was on the active
682 * list and was not an unevictable page. This is done to accelerate the reclaim
683 * of @page.
684 */
685void deactivate_page(struct page *page)
686{
687 if (PageLRU(page) && PageActive(page) && !PageUnevictable(page)) {
b01b2141 688 struct pagevec *pvec;
9c276cc6 689
b01b2141
IM
690 local_lock(&lru_pvecs.lock);
691 pvec = this_cpu_ptr(&lru_pvecs.lru_deactivate);
9c276cc6 692 get_page(page);
d479960e 693 if (pagevec_add_and_need_flush(pvec, page))
c7c7b80c 694 pagevec_lru_move_fn(pvec, lru_deactivate_fn);
b01b2141 695 local_unlock(&lru_pvecs.lock);
9c276cc6
MK
696 }
697}
698
10853a03 699/**
f7ad2a6c 700 * mark_page_lazyfree - make an anon page lazyfree
10853a03
MK
701 * @page: page to deactivate
702 *
f7ad2a6c
SL
703 * mark_page_lazyfree() moves @page to the inactive file list.
704 * This is done to accelerate the reclaim of @page.
10853a03 705 */
f7ad2a6c 706void mark_page_lazyfree(struct page *page)
10853a03 707{
f7ad2a6c 708 if (PageLRU(page) && PageAnon(page) && PageSwapBacked(page) &&
24c92eb7 709 !PageSwapCache(page) && !PageUnevictable(page)) {
b01b2141 710 struct pagevec *pvec;
10853a03 711
b01b2141
IM
712 local_lock(&lru_pvecs.lock);
713 pvec = this_cpu_ptr(&lru_pvecs.lru_lazyfree);
09cbfeaf 714 get_page(page);
d479960e 715 if (pagevec_add_and_need_flush(pvec, page))
c7c7b80c 716 pagevec_lru_move_fn(pvec, lru_lazyfree_fn);
b01b2141 717 local_unlock(&lru_pvecs.lock);
10853a03
MK
718 }
719}
720
80bfed90
AM
721void lru_add_drain(void)
722{
b01b2141
IM
723 local_lock(&lru_pvecs.lock);
724 lru_add_drain_cpu(smp_processor_id());
725 local_unlock(&lru_pvecs.lock);
726}
727
728void lru_add_drain_cpu_zone(struct zone *zone)
729{
730 local_lock(&lru_pvecs.lock);
731 lru_add_drain_cpu(smp_processor_id());
732 drain_local_pages(zone);
733 local_unlock(&lru_pvecs.lock);
1da177e4
LT
734}
735
6ea183d6
MH
736#ifdef CONFIG_SMP
737
738static DEFINE_PER_CPU(struct work_struct, lru_add_drain_work);
739
c4028958 740static void lru_add_drain_per_cpu(struct work_struct *dummy)
053837fc
NP
741{
742 lru_add_drain();
743}
744
9852a721
MH
745/*
746 * Doesn't need any cpu hotplug locking because we do rely on per-cpu
747 * kworkers being shut down before our page_alloc_cpu_dead callback is
748 * executed on the offlined cpu.
749 * Calling this function with cpu hotplug locks held can actually lead
750 * to obscure indirect dependencies via WQ context.
751 */
d479960e 752inline void __lru_add_drain_all(bool force_all_cpus)
053837fc 753{
6446a513
AD
754 /*
755 * lru_drain_gen - Global pages generation number
756 *
757 * (A) Definition: global lru_drain_gen = x implies that all generations
758 * 0 < n <= x are already *scheduled* for draining.
759 *
760 * This is an optimization for the highly-contended use case where a
761 * user space workload keeps constantly generating a flow of pages for
762 * each CPU.
763 */
764 static unsigned int lru_drain_gen;
5fbc4616 765 static struct cpumask has_work;
6446a513
AD
766 static DEFINE_MUTEX(lock);
767 unsigned cpu, this_gen;
5fbc4616 768
ce612879
MH
769 /*
770 * Make sure nobody triggers this path before mm_percpu_wq is fully
771 * initialized.
772 */
773 if (WARN_ON(!mm_percpu_wq))
774 return;
775
6446a513
AD
776 /*
777 * Guarantee pagevec counter stores visible by this CPU are visible to
778 * other CPUs before loading the current drain generation.
779 */
780 smp_mb();
781
782 /*
783 * (B) Locally cache global LRU draining generation number
784 *
785 * The read barrier ensures that the counter is loaded before the mutex
786 * is taken. It pairs with smp_mb() inside the mutex critical section
787 * at (D).
788 */
789 this_gen = smp_load_acquire(&lru_drain_gen);
eef1a429 790
5fbc4616 791 mutex_lock(&lock);
eef1a429
KK
792
793 /*
6446a513
AD
794 * (C) Exit the draining operation if a newer generation, from another
795 * lru_add_drain_all(), was already scheduled for draining. Check (A).
eef1a429 796 */
d479960e 797 if (unlikely(this_gen != lru_drain_gen && !force_all_cpus))
eef1a429
KK
798 goto done;
799
6446a513
AD
800 /*
801 * (D) Increment global generation number
802 *
803 * Pairs with smp_load_acquire() at (B), outside of the critical
804 * section. Use a full memory barrier to guarantee that the new global
805 * drain generation number is stored before loading pagevec counters.
806 *
807 * This pairing must be done here, before the for_each_online_cpu loop
808 * below which drains the page vectors.
809 *
810 * Let x, y, and z represent some system CPU numbers, where x < y < z.
cb152a1a 811 * Assume CPU #z is in the middle of the for_each_online_cpu loop
6446a513
AD
812 * below and has already reached CPU #y's per-cpu data. CPU #x comes
813 * along, adds some pages to its per-cpu vectors, then calls
814 * lru_add_drain_all().
815 *
816 * If the paired barrier is done at any later step, e.g. after the
817 * loop, CPU #x will just exit at (C) and miss flushing out all of its
818 * added pages.
819 */
820 WRITE_ONCE(lru_drain_gen, lru_drain_gen + 1);
821 smp_mb();
eef1a429 822
5fbc4616 823 cpumask_clear(&has_work);
5fbc4616
CM
824 for_each_online_cpu(cpu) {
825 struct work_struct *work = &per_cpu(lru_add_drain_work, cpu);
826
d479960e
MK
827 if (force_all_cpus ||
828 pagevec_count(&per_cpu(lru_pvecs.lru_add, cpu)) ||
7e0cc01e 829 data_race(pagevec_count(&per_cpu(lru_rotate.pvec, cpu))) ||
b01b2141
IM
830 pagevec_count(&per_cpu(lru_pvecs.lru_deactivate_file, cpu)) ||
831 pagevec_count(&per_cpu(lru_pvecs.lru_deactivate, cpu)) ||
832 pagevec_count(&per_cpu(lru_pvecs.lru_lazyfree, cpu)) ||
8cc621d2
MK
833 need_activate_page_drain(cpu) ||
834 has_bh_in_lru(cpu, NULL)) {
5fbc4616 835 INIT_WORK(work, lru_add_drain_per_cpu);
ce612879 836 queue_work_on(cpu, mm_percpu_wq, work);
6446a513 837 __cpumask_set_cpu(cpu, &has_work);
5fbc4616
CM
838 }
839 }
840
841 for_each_cpu(cpu, &has_work)
842 flush_work(&per_cpu(lru_add_drain_work, cpu));
843
eef1a429 844done:
5fbc4616 845 mutex_unlock(&lock);
053837fc 846}
d479960e
MK
847
848void lru_add_drain_all(void)
849{
850 __lru_add_drain_all(false);
851}
6ea183d6
MH
852#else
853void lru_add_drain_all(void)
854{
855 lru_add_drain();
856}
6446a513 857#endif /* CONFIG_SMP */
053837fc 858
d479960e
MK
859atomic_t lru_disable_count = ATOMIC_INIT(0);
860
861/*
862 * lru_cache_disable() needs to be called before we start compiling
863 * a list of pages to be migrated using isolate_lru_page().
864 * It drains pages on LRU cache and then disable on all cpus until
865 * lru_cache_enable is called.
866 *
867 * Must be paired with a call to lru_cache_enable().
868 */
869void lru_cache_disable(void)
870{
871 atomic_inc(&lru_disable_count);
872#ifdef CONFIG_SMP
873 /*
874 * lru_add_drain_all in the force mode will schedule draining on
875 * all online CPUs so any calls of lru_cache_disabled wrapped by
876 * local_lock or preemption disabled would be ordered by that.
877 * The atomic operation doesn't need to have stronger ordering
878 * requirements because that is enforeced by the scheduling
879 * guarantees.
880 */
881 __lru_add_drain_all(true);
882#else
883 lru_add_drain();
884#endif
885}
886
aabfb572 887/**
ea1754a0 888 * release_pages - batched put_page()
aabfb572
MH
889 * @pages: array of pages to release
890 * @nr: number of pages
1da177e4 891 *
aabfb572
MH
892 * Decrement the reference count on all the pages in @pages. If it
893 * fell to zero, remove the page from the LRU and free it.
1da177e4 894 */
c6f92f9f 895void release_pages(struct page **pages, int nr)
1da177e4
LT
896{
897 int i;
cc59850e 898 LIST_HEAD(pages_to_free);
6168d0da 899 struct lruvec *lruvec = NULL;
3f649ab7
KC
900 unsigned long flags;
901 unsigned int lock_batch;
1da177e4 902
1da177e4
LT
903 for (i = 0; i < nr; i++) {
904 struct page *page = pages[i];
1da177e4 905
aabfb572
MH
906 /*
907 * Make sure the IRQ-safe lock-holding time does not get
908 * excessive with a continuous string of pages from the
6168d0da 909 * same lruvec. The lock is held only if lruvec != NULL.
aabfb572 910 */
6168d0da
AS
911 if (lruvec && ++lock_batch == SWAP_CLUSTER_MAX) {
912 unlock_page_lruvec_irqrestore(lruvec, flags);
913 lruvec = NULL;
aabfb572
MH
914 }
915
a9b576f7 916 page = compound_head(page);
6fcb52a5 917 if (is_huge_zero_page(page))
aa88b68c 918 continue;
aa88b68c 919
c5d6c45e 920 if (is_zone_device_page(page)) {
6168d0da
AS
921 if (lruvec) {
922 unlock_page_lruvec_irqrestore(lruvec, flags);
923 lruvec = NULL;
df6ad698 924 }
c5d6c45e
IW
925 /*
926 * ZONE_DEVICE pages that return 'false' from
a3e7bea0 927 * page_is_devmap_managed() do not require special
c5d6c45e
IW
928 * processing, and instead, expect a call to
929 * put_page_testzero().
930 */
07d80269
JH
931 if (page_is_devmap_managed(page)) {
932 put_devmap_managed_page(page);
c5d6c45e 933 continue;
07d80269 934 }
43fbdeb3
RC
935 if (put_page_testzero(page))
936 put_dev_pagemap(page->pgmap);
937 continue;
df6ad698
JG
938 }
939
b5810039 940 if (!put_page_testzero(page))
1da177e4
LT
941 continue;
942
ddc58f27 943 if (PageCompound(page)) {
6168d0da
AS
944 if (lruvec) {
945 unlock_page_lruvec_irqrestore(lruvec, flags);
946 lruvec = NULL;
ddc58f27
KS
947 }
948 __put_compound_page(page);
949 continue;
950 }
951
46453a6e 952 if (PageLRU(page)) {
2a5e4e34
AD
953 struct lruvec *prev_lruvec = lruvec;
954
955 lruvec = relock_page_lruvec_irqsave(page, lruvec,
956 &flags);
957 if (prev_lruvec != lruvec)
aabfb572 958 lock_batch = 0;
fa9add64 959
46ae6b2c 960 del_page_from_lru_list(page, lruvec);
87560179 961 __clear_page_lru_flags(page);
46453a6e
NP
962 }
963
62906027 964 __ClearPageWaiters(page);
c53954a0 965
cc59850e 966 list_add(&page->lru, &pages_to_free);
1da177e4 967 }
6168d0da
AS
968 if (lruvec)
969 unlock_page_lruvec_irqrestore(lruvec, flags);
1da177e4 970
747db954 971 mem_cgroup_uncharge_list(&pages_to_free);
2d4894b5 972 free_unref_page_list(&pages_to_free);
1da177e4 973}
0be8557b 974EXPORT_SYMBOL(release_pages);
1da177e4
LT
975
976/*
977 * The pages which we're about to release may be in the deferred lru-addition
978 * queues. That would prevent them from really being freed right now. That's
979 * OK from a correctness point of view but is inefficient - those pages may be
980 * cache-warm and we want to give them back to the page allocator ASAP.
981 *
982 * So __pagevec_release() will drain those queues here. __pagevec_lru_add()
983 * and __pagevec_lru_add_active() call release_pages() directly to avoid
984 * mutual recursion.
985 */
986void __pagevec_release(struct pagevec *pvec)
987{
7f0b5fb9 988 if (!pvec->percpu_pvec_drained) {
d9ed0d08 989 lru_add_drain();
7f0b5fb9 990 pvec->percpu_pvec_drained = true;
d9ed0d08 991 }
c6f92f9f 992 release_pages(pvec->pages, pagevec_count(pvec));
1da177e4
LT
993 pagevec_reinit(pvec);
994}
7f285701
SF
995EXPORT_SYMBOL(__pagevec_release);
996
c7c7b80c 997static void __pagevec_lru_add_fn(struct page *page, struct lruvec *lruvec)
3dd7ae8e 998{
9c4e6b1a 999 int was_unevictable = TestClearPageUnevictable(page);
6c357848 1000 int nr_pages = thp_nr_pages(page);
3dd7ae8e 1001
309381fe 1002 VM_BUG_ON_PAGE(PageLRU(page), page);
3dd7ae8e 1003
9c4e6b1a
SB
1004 /*
1005 * Page becomes evictable in two ways:
dae966dc 1006 * 1) Within LRU lock [munlock_vma_page() and __munlock_pagevec()].
9c4e6b1a
SB
1007 * 2) Before acquiring LRU lock to put the page to correct LRU and then
1008 * a) do PageLRU check with lock [check_move_unevictable_pages]
1009 * b) do PageLRU check before lock [clear_page_mlock]
1010 *
1011 * (1) & (2a) are ok as LRU lock will serialize them. For (2b), we need
1012 * following strict ordering:
1013 *
1014 * #0: __pagevec_lru_add_fn #1: clear_page_mlock
1015 *
1016 * SetPageLRU() TestClearPageMlocked()
1017 * smp_mb() // explicit ordering // above provides strict
1018 * // ordering
1019 * PageMlocked() PageLRU()
1020 *
1021 *
1022 * if '#1' does not observe setting of PG_lru by '#0' and fails
1023 * isolation, the explicit barrier will make sure that page_evictable
1024 * check will put the page in correct LRU. Without smp_mb(), SetPageLRU
1025 * can be reordered after PageMlocked check and can make '#1' to fail
1026 * the isolation of the page whose Mlocked bit is cleared (#0 is also
1027 * looking at the same page) and the evictable page will be stranded
1028 * in an unevictable LRU.
1029 */
9a9b6cce
YS
1030 SetPageLRU(page);
1031 smp_mb__after_atomic();
9c4e6b1a
SB
1032
1033 if (page_evictable(page)) {
9c4e6b1a 1034 if (was_unevictable)
5d91f31f 1035 __count_vm_events(UNEVICTABLE_PGRESCUED, nr_pages);
9c4e6b1a 1036 } else {
9c4e6b1a
SB
1037 ClearPageActive(page);
1038 SetPageUnevictable(page);
1039 if (!was_unevictable)
5d91f31f 1040 __count_vm_events(UNEVICTABLE_PGCULLED, nr_pages);
9c4e6b1a
SB
1041 }
1042
3a9c9788 1043 add_page_to_lru_list(page, lruvec);
86140453 1044 trace_mm_lru_insertion(page);
3dd7ae8e
SL
1045}
1046
1da177e4
LT
1047/*
1048 * Add the passed pages to the LRU, then drop the caller's refcount
1049 * on them. Reinitialises the caller's pagevec.
1050 */
a0b8cab3 1051void __pagevec_lru_add(struct pagevec *pvec)
1da177e4 1052{
fc574c23 1053 int i;
6168d0da 1054 struct lruvec *lruvec = NULL;
fc574c23
AS
1055 unsigned long flags = 0;
1056
1057 for (i = 0; i < pagevec_count(pvec); i++) {
1058 struct page *page = pvec->pages[i];
fc574c23 1059
2a5e4e34 1060 lruvec = relock_page_lruvec_irqsave(page, lruvec, &flags);
fc574c23
AS
1061 __pagevec_lru_add_fn(page, lruvec);
1062 }
6168d0da
AS
1063 if (lruvec)
1064 unlock_page_lruvec_irqrestore(lruvec, flags);
fc574c23
AS
1065 release_pages(pvec->pages, pvec->nr);
1066 pagevec_reinit(pvec);
1da177e4 1067}
1da177e4 1068
0cd6144a
JW
1069/**
1070 * pagevec_remove_exceptionals - pagevec exceptionals pruning
1071 * @pvec: The pagevec to prune
1072 *
a656a202
MWO
1073 * find_get_entries() fills both pages and XArray value entries (aka
1074 * exceptional entries) into the pagevec. This function prunes all
0cd6144a
JW
1075 * exceptionals from @pvec without leaving holes, so that it can be
1076 * passed on to page-only pagevec operations.
1077 */
1078void pagevec_remove_exceptionals(struct pagevec *pvec)
1079{
1080 int i, j;
1081
1082 for (i = 0, j = 0; i < pagevec_count(pvec); i++) {
1083 struct page *page = pvec->pages[i];
3159f943 1084 if (!xa_is_value(page))
0cd6144a
JW
1085 pvec->pages[j++] = page;
1086 }
1087 pvec->nr = j;
1088}
1089
1da177e4 1090/**
b947cee4 1091 * pagevec_lookup_range - gang pagecache lookup
1da177e4
LT
1092 * @pvec: Where the resulting pages are placed
1093 * @mapping: The address_space to search
1094 * @start: The starting page index
b947cee4 1095 * @end: The final page index
1da177e4 1096 *
e02a9f04 1097 * pagevec_lookup_range() will search for & return a group of up to PAGEVEC_SIZE
b947cee4
JK
1098 * pages in the mapping starting from index @start and upto index @end
1099 * (inclusive). The pages are placed in @pvec. pagevec_lookup() takes a
1da177e4
LT
1100 * reference against the pages in @pvec.
1101 *
1102 * The search returns a group of mapping-contiguous pages with ascending
d72dc8a2
JK
1103 * indexes. There may be holes in the indices due to not-present pages. We
1104 * also update @start to index the next page for the traversal.
1da177e4 1105 *
b947cee4 1106 * pagevec_lookup_range() returns the number of pages which were found. If this
e02a9f04 1107 * number is smaller than PAGEVEC_SIZE, the end of specified range has been
b947cee4 1108 * reached.
1da177e4 1109 */
b947cee4 1110unsigned pagevec_lookup_range(struct pagevec *pvec,
397162ff 1111 struct address_space *mapping, pgoff_t *start, pgoff_t end)
1da177e4 1112{
397162ff 1113 pvec->nr = find_get_pages_range(mapping, start, end, PAGEVEC_SIZE,
b947cee4 1114 pvec->pages);
1da177e4
LT
1115 return pagevec_count(pvec);
1116}
b947cee4 1117EXPORT_SYMBOL(pagevec_lookup_range);
78539fdf 1118
72b045ae
JK
1119unsigned pagevec_lookup_range_tag(struct pagevec *pvec,
1120 struct address_space *mapping, pgoff_t *index, pgoff_t end,
10bbd235 1121 xa_mark_t tag)
1da177e4 1122{
72b045ae 1123 pvec->nr = find_get_pages_range_tag(mapping, index, end, tag,
67fd707f 1124 PAGEVEC_SIZE, pvec->pages);
1da177e4
LT
1125 return pagevec_count(pvec);
1126}
72b045ae 1127EXPORT_SYMBOL(pagevec_lookup_range_tag);
1da177e4 1128
1da177e4
LT
1129/*
1130 * Perform any setup for the swap system
1131 */
1132void __init swap_setup(void)
1133{
ca79b0c2 1134 unsigned long megs = totalram_pages() >> (20 - PAGE_SHIFT);
e0bf68dd 1135
1da177e4
LT
1136 /* Use a smaller cluster for small-memory machines */
1137 if (megs < 16)
1138 page_cluster = 2;
1139 else
1140 page_cluster = 3;
1141 /*
1142 * Right now other parts of the system means that we
1143 * _really_ don't want to cluster much more
1144 */
1da177e4 1145}
07d80269
JH
1146
1147#ifdef CONFIG_DEV_PAGEMAP_OPS
1148void put_devmap_managed_page(struct page *page)
1149{
1150 int count;
1151
1152 if (WARN_ON_ONCE(!page_is_devmap_managed(page)))
1153 return;
1154
1155 count = page_ref_dec_return(page);
1156
1157 /*
1158 * devmap page refcounts are 1-based, rather than 0-based: if
1159 * refcount is 1, then the page is free and the refcount is
1160 * stable because nobody holds a reference on the page.
1161 */
1162 if (count == 1)
1163 free_devmap_managed_page(page);
1164 else if (!count)
1165 __put_page(page);
1166}
1167EXPORT_SYMBOL(put_devmap_managed_page);
1168#endif