]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - mm/vmscan.c
xfrm: add and use xfrm_state_afinfo_get_rcu
[mirror_ubuntu-artful-kernel.git] / mm / vmscan.c
CommitLineData
1da177e4
LT
1/*
2 * linux/mm/vmscan.c
3 *
4 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
5 *
6 * Swap reorganised 29.12.95, Stephen Tweedie.
7 * kswapd added: 7.1.96 sct
8 * Removed kswapd_ctl limits, and swap out as many pages as needed
9 * to bring the system back to freepages.high: 2.4.97, Rik van Riel.
10 * Zone aware kswapd started 02/00, Kanoj Sarcar (kanoj@sgi.com).
11 * Multiqueue VM started 5.8.00, Rik van Riel.
12 */
13
b1de0d13
MH
14#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15
1da177e4
LT
16#include <linux/mm.h>
17#include <linux/module.h>
5a0e3ad6 18#include <linux/gfp.h>
1da177e4
LT
19#include <linux/kernel_stat.h>
20#include <linux/swap.h>
21#include <linux/pagemap.h>
22#include <linux/init.h>
23#include <linux/highmem.h>
70ddf637 24#include <linux/vmpressure.h>
e129b5c2 25#include <linux/vmstat.h>
1da177e4
LT
26#include <linux/file.h>
27#include <linux/writeback.h>
28#include <linux/blkdev.h>
29#include <linux/buffer_head.h> /* for try_to_release_page(),
30 buffer_heads_over_limit */
31#include <linux/mm_inline.h>
1da177e4
LT
32#include <linux/backing-dev.h>
33#include <linux/rmap.h>
34#include <linux/topology.h>
35#include <linux/cpu.h>
36#include <linux/cpuset.h>
3e7d3449 37#include <linux/compaction.h>
1da177e4
LT
38#include <linux/notifier.h>
39#include <linux/rwsem.h>
248a0301 40#include <linux/delay.h>
3218ae14 41#include <linux/kthread.h>
7dfb7103 42#include <linux/freezer.h>
66e1707b 43#include <linux/memcontrol.h>
873b4771 44#include <linux/delayacct.h>
af936a16 45#include <linux/sysctl.h>
929bea7c 46#include <linux/oom.h>
268bb0ce 47#include <linux/prefetch.h>
b1de0d13 48#include <linux/printk.h>
f9fe48be 49#include <linux/dax.h>
1da177e4
LT
50
51#include <asm/tlbflush.h>
52#include <asm/div64.h>
53
54#include <linux/swapops.h>
117aad1e 55#include <linux/balloon_compaction.h>
1da177e4 56
0f8053a5
NP
57#include "internal.h"
58
33906bc5
MG
59#define CREATE_TRACE_POINTS
60#include <trace/events/vmscan.h>
61
1da177e4 62struct scan_control {
22fba335
KM
63 /* How many pages shrink_list() should reclaim */
64 unsigned long nr_to_reclaim;
65
1da177e4 66 /* This context's GFP mask */
6daa0e28 67 gfp_t gfp_mask;
1da177e4 68
ee814fe2 69 /* Allocation order */
5ad333eb 70 int order;
66e1707b 71
ee814fe2
JW
72 /*
73 * Nodemask of nodes allowed by the caller. If NULL, all nodes
74 * are scanned.
75 */
76 nodemask_t *nodemask;
9e3b2f8c 77
f16015fb
JW
78 /*
79 * The memory cgroup that hit its limit and as a result is the
80 * primary target of this reclaim invocation.
81 */
82 struct mem_cgroup *target_mem_cgroup;
66e1707b 83
ee814fe2
JW
84 /* Scan (total_size >> priority) pages at once */
85 int priority;
86
b2e18757
MG
87 /* The highest zone to isolate pages for reclaim from */
88 enum zone_type reclaim_idx;
89
ee814fe2
JW
90 unsigned int may_writepage:1;
91
92 /* Can mapped pages be reclaimed? */
93 unsigned int may_unmap:1;
94
95 /* Can pages be swapped as part of reclaim? */
96 unsigned int may_swap:1;
97
241994ed
JW
98 /* Can cgroups be reclaimed below their normal consumption range? */
99 unsigned int may_thrash:1;
100
ee814fe2
JW
101 unsigned int hibernation_mode:1;
102
103 /* One of the zones is ready for compaction */
104 unsigned int compaction_ready:1;
105
106 /* Incremented by the number of inactive pages that were scanned */
107 unsigned long nr_scanned;
108
109 /* Number of pages freed so far during a call to shrink_zones() */
110 unsigned long nr_reclaimed;
1da177e4
LT
111};
112
1da177e4
LT
113#ifdef ARCH_HAS_PREFETCH
114#define prefetch_prev_lru_page(_page, _base, _field) \
115 do { \
116 if ((_page)->lru.prev != _base) { \
117 struct page *prev; \
118 \
119 prev = lru_to_page(&(_page->lru)); \
120 prefetch(&prev->_field); \
121 } \
122 } while (0)
123#else
124#define prefetch_prev_lru_page(_page, _base, _field) do { } while (0)
125#endif
126
127#ifdef ARCH_HAS_PREFETCHW
128#define prefetchw_prev_lru_page(_page, _base, _field) \
129 do { \
130 if ((_page)->lru.prev != _base) { \
131 struct page *prev; \
132 \
133 prev = lru_to_page(&(_page->lru)); \
134 prefetchw(&prev->_field); \
135 } \
136 } while (0)
137#else
138#define prefetchw_prev_lru_page(_page, _base, _field) do { } while (0)
139#endif
140
141/*
142 * From 0 .. 100. Higher means more swappy.
143 */
144int vm_swappiness = 60;
d0480be4
WSH
145/*
146 * The total number of pages which are beyond the high watermark within all
147 * zones.
148 */
149unsigned long vm_total_pages;
1da177e4
LT
150
151static LIST_HEAD(shrinker_list);
152static DECLARE_RWSEM(shrinker_rwsem);
153
c255a458 154#ifdef CONFIG_MEMCG
89b5fae5
JW
155static bool global_reclaim(struct scan_control *sc)
156{
f16015fb 157 return !sc->target_mem_cgroup;
89b5fae5 158}
97c9341f
TH
159
160/**
161 * sane_reclaim - is the usual dirty throttling mechanism operational?
162 * @sc: scan_control in question
163 *
164 * The normal page dirty throttling mechanism in balance_dirty_pages() is
165 * completely broken with the legacy memcg and direct stalling in
166 * shrink_page_list() is used for throttling instead, which lacks all the
167 * niceties such as fairness, adaptive pausing, bandwidth proportional
168 * allocation and configurability.
169 *
170 * This function tests whether the vmscan currently in progress can assume
171 * that the normal dirty throttling mechanism is operational.
172 */
173static bool sane_reclaim(struct scan_control *sc)
174{
175 struct mem_cgroup *memcg = sc->target_mem_cgroup;
176
177 if (!memcg)
178 return true;
179#ifdef CONFIG_CGROUP_WRITEBACK
69234ace 180 if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
97c9341f
TH
181 return true;
182#endif
183 return false;
184}
91a45470 185#else
89b5fae5
JW
186static bool global_reclaim(struct scan_control *sc)
187{
188 return true;
189}
97c9341f
TH
190
191static bool sane_reclaim(struct scan_control *sc)
192{
193 return true;
194}
91a45470
KH
195#endif
196
5a1c84b4
MG
197/*
198 * This misses isolated pages which are not accounted for to save counters.
199 * As the data only determines if reclaim or compaction continues, it is
200 * not expected that isolated pages will be a dominating factor.
201 */
202unsigned long zone_reclaimable_pages(struct zone *zone)
203{
204 unsigned long nr;
205
206 nr = zone_page_state_snapshot(zone, NR_ZONE_INACTIVE_FILE) +
207 zone_page_state_snapshot(zone, NR_ZONE_ACTIVE_FILE);
208 if (get_nr_swap_pages() > 0)
209 nr += zone_page_state_snapshot(zone, NR_ZONE_INACTIVE_ANON) +
210 zone_page_state_snapshot(zone, NR_ZONE_ACTIVE_ANON);
211
212 return nr;
213}
214
599d0c95
MG
215unsigned long pgdat_reclaimable_pages(struct pglist_data *pgdat)
216{
217 unsigned long nr;
218
219 nr = node_page_state_snapshot(pgdat, NR_ACTIVE_FILE) +
220 node_page_state_snapshot(pgdat, NR_INACTIVE_FILE) +
221 node_page_state_snapshot(pgdat, NR_ISOLATED_FILE);
6e543d57
LD
222
223 if (get_nr_swap_pages() > 0)
599d0c95
MG
224 nr += node_page_state_snapshot(pgdat, NR_ACTIVE_ANON) +
225 node_page_state_snapshot(pgdat, NR_INACTIVE_ANON) +
226 node_page_state_snapshot(pgdat, NR_ISOLATED_ANON);
6e543d57
LD
227
228 return nr;
229}
230
599d0c95 231bool pgdat_reclaimable(struct pglist_data *pgdat)
6e543d57 232{
599d0c95
MG
233 return node_page_state_snapshot(pgdat, NR_PAGES_SCANNED) <
234 pgdat_reclaimable_pages(pgdat) * 6;
6e543d57
LD
235}
236
23047a96 237unsigned long lruvec_lru_size(struct lruvec *lruvec, enum lru_list lru)
c9f299d9 238{
c3c787e8 239 if (!mem_cgroup_disabled())
4d7dcca2 240 return mem_cgroup_get_lru_size(lruvec, lru);
a3d8e054 241
599d0c95 242 return node_page_state(lruvec_pgdat(lruvec), NR_LRU_BASE + lru);
c9f299d9
KM
243}
244
1da177e4 245/*
1d3d4437 246 * Add a shrinker callback to be called from the vm.
1da177e4 247 */
1d3d4437 248int register_shrinker(struct shrinker *shrinker)
1da177e4 249{
1d3d4437
GC
250 size_t size = sizeof(*shrinker->nr_deferred);
251
1d3d4437
GC
252 if (shrinker->flags & SHRINKER_NUMA_AWARE)
253 size *= nr_node_ids;
254
255 shrinker->nr_deferred = kzalloc(size, GFP_KERNEL);
256 if (!shrinker->nr_deferred)
257 return -ENOMEM;
258
8e1f936b
RR
259 down_write(&shrinker_rwsem);
260 list_add_tail(&shrinker->list, &shrinker_list);
261 up_write(&shrinker_rwsem);
1d3d4437 262 return 0;
1da177e4 263}
8e1f936b 264EXPORT_SYMBOL(register_shrinker);
1da177e4
LT
265
266/*
267 * Remove one
268 */
8e1f936b 269void unregister_shrinker(struct shrinker *shrinker)
1da177e4
LT
270{
271 down_write(&shrinker_rwsem);
272 list_del(&shrinker->list);
273 up_write(&shrinker_rwsem);
ae393321 274 kfree(shrinker->nr_deferred);
1da177e4 275}
8e1f936b 276EXPORT_SYMBOL(unregister_shrinker);
1da177e4
LT
277
278#define SHRINK_BATCH 128
1d3d4437 279
cb731d6c
VD
280static unsigned long do_shrink_slab(struct shrink_control *shrinkctl,
281 struct shrinker *shrinker,
282 unsigned long nr_scanned,
283 unsigned long nr_eligible)
1d3d4437
GC
284{
285 unsigned long freed = 0;
286 unsigned long long delta;
287 long total_scan;
d5bc5fd3 288 long freeable;
1d3d4437
GC
289 long nr;
290 long new_nr;
291 int nid = shrinkctl->nid;
292 long batch_size = shrinker->batch ? shrinker->batch
293 : SHRINK_BATCH;
5f33a080 294 long scanned = 0, next_deferred;
1d3d4437 295
d5bc5fd3
VD
296 freeable = shrinker->count_objects(shrinker, shrinkctl);
297 if (freeable == 0)
1d3d4437
GC
298 return 0;
299
300 /*
301 * copy the current shrinker scan count into a local variable
302 * and zero it so that other concurrent shrinker invocations
303 * don't also do this scanning work.
304 */
305 nr = atomic_long_xchg(&shrinker->nr_deferred[nid], 0);
306
307 total_scan = nr;
6b4f7799 308 delta = (4 * nr_scanned) / shrinker->seeks;
d5bc5fd3 309 delta *= freeable;
6b4f7799 310 do_div(delta, nr_eligible + 1);
1d3d4437
GC
311 total_scan += delta;
312 if (total_scan < 0) {
8612c663 313 pr_err("shrink_slab: %pF negative objects to delete nr=%ld\n",
a0b02131 314 shrinker->scan_objects, total_scan);
d5bc5fd3 315 total_scan = freeable;
5f33a080
SL
316 next_deferred = nr;
317 } else
318 next_deferred = total_scan;
1d3d4437
GC
319
320 /*
321 * We need to avoid excessive windup on filesystem shrinkers
322 * due to large numbers of GFP_NOFS allocations causing the
323 * shrinkers to return -1 all the time. This results in a large
324 * nr being built up so when a shrink that can do some work
325 * comes along it empties the entire cache due to nr >>>
d5bc5fd3 326 * freeable. This is bad for sustaining a working set in
1d3d4437
GC
327 * memory.
328 *
329 * Hence only allow the shrinker to scan the entire cache when
330 * a large delta change is calculated directly.
331 */
d5bc5fd3
VD
332 if (delta < freeable / 4)
333 total_scan = min(total_scan, freeable / 2);
1d3d4437
GC
334
335 /*
336 * Avoid risking looping forever due to too large nr value:
337 * never try to free more than twice the estimate number of
338 * freeable entries.
339 */
d5bc5fd3
VD
340 if (total_scan > freeable * 2)
341 total_scan = freeable * 2;
1d3d4437
GC
342
343 trace_mm_shrink_slab_start(shrinker, shrinkctl, nr,
6b4f7799
JW
344 nr_scanned, nr_eligible,
345 freeable, delta, total_scan);
1d3d4437 346
0b1fb40a
VD
347 /*
348 * Normally, we should not scan less than batch_size objects in one
349 * pass to avoid too frequent shrinker calls, but if the slab has less
350 * than batch_size objects in total and we are really tight on memory,
351 * we will try to reclaim all available objects, otherwise we can end
352 * up failing allocations although there are plenty of reclaimable
353 * objects spread over several slabs with usage less than the
354 * batch_size.
355 *
356 * We detect the "tight on memory" situations by looking at the total
357 * number of objects we want to scan (total_scan). If it is greater
d5bc5fd3 358 * than the total number of objects on slab (freeable), we must be
0b1fb40a
VD
359 * scanning at high prio and therefore should try to reclaim as much as
360 * possible.
361 */
362 while (total_scan >= batch_size ||
d5bc5fd3 363 total_scan >= freeable) {
a0b02131 364 unsigned long ret;
0b1fb40a 365 unsigned long nr_to_scan = min(batch_size, total_scan);
1d3d4437 366
0b1fb40a 367 shrinkctl->nr_to_scan = nr_to_scan;
a0b02131
DC
368 ret = shrinker->scan_objects(shrinker, shrinkctl);
369 if (ret == SHRINK_STOP)
370 break;
371 freed += ret;
1d3d4437 372
0b1fb40a
VD
373 count_vm_events(SLABS_SCANNED, nr_to_scan);
374 total_scan -= nr_to_scan;
5f33a080 375 scanned += nr_to_scan;
1d3d4437
GC
376
377 cond_resched();
378 }
379
5f33a080
SL
380 if (next_deferred >= scanned)
381 next_deferred -= scanned;
382 else
383 next_deferred = 0;
1d3d4437
GC
384 /*
385 * move the unused scan count back into the shrinker in a
386 * manner that handles concurrent updates. If we exhausted the
387 * scan, there is no need to do an update.
388 */
5f33a080
SL
389 if (next_deferred > 0)
390 new_nr = atomic_long_add_return(next_deferred,
1d3d4437
GC
391 &shrinker->nr_deferred[nid]);
392 else
393 new_nr = atomic_long_read(&shrinker->nr_deferred[nid]);
394
df9024a8 395 trace_mm_shrink_slab_end(shrinker, nid, freed, nr, new_nr, total_scan);
1d3d4437 396 return freed;
1495f230
YH
397}
398
6b4f7799 399/**
cb731d6c 400 * shrink_slab - shrink slab caches
6b4f7799
JW
401 * @gfp_mask: allocation context
402 * @nid: node whose slab caches to target
cb731d6c 403 * @memcg: memory cgroup whose slab caches to target
6b4f7799
JW
404 * @nr_scanned: pressure numerator
405 * @nr_eligible: pressure denominator
1da177e4 406 *
6b4f7799 407 * Call the shrink functions to age shrinkable caches.
1da177e4 408 *
6b4f7799
JW
409 * @nid is passed along to shrinkers with SHRINKER_NUMA_AWARE set,
410 * unaware shrinkers will receive a node id of 0 instead.
1da177e4 411 *
cb731d6c
VD
412 * @memcg specifies the memory cgroup to target. If it is not NULL,
413 * only shrinkers with SHRINKER_MEMCG_AWARE set will be called to scan
0fc9f58a
VD
414 * objects from the memory cgroup specified. Otherwise, only unaware
415 * shrinkers are called.
cb731d6c 416 *
6b4f7799
JW
417 * @nr_scanned and @nr_eligible form a ratio that indicate how much of
418 * the available objects should be scanned. Page reclaim for example
419 * passes the number of pages scanned and the number of pages on the
420 * LRU lists that it considered on @nid, plus a bias in @nr_scanned
421 * when it encountered mapped pages. The ratio is further biased by
422 * the ->seeks setting of the shrink function, which indicates the
423 * cost to recreate an object relative to that of an LRU page.
b15e0905 424 *
6b4f7799 425 * Returns the number of reclaimed slab objects.
1da177e4 426 */
cb731d6c
VD
427static unsigned long shrink_slab(gfp_t gfp_mask, int nid,
428 struct mem_cgroup *memcg,
429 unsigned long nr_scanned,
430 unsigned long nr_eligible)
1da177e4
LT
431{
432 struct shrinker *shrinker;
24f7c6b9 433 unsigned long freed = 0;
1da177e4 434
0fc9f58a 435 if (memcg && (!memcg_kmem_enabled() || !mem_cgroup_online(memcg)))
cb731d6c
VD
436 return 0;
437
6b4f7799
JW
438 if (nr_scanned == 0)
439 nr_scanned = SWAP_CLUSTER_MAX;
1da177e4 440
f06590bd 441 if (!down_read_trylock(&shrinker_rwsem)) {
24f7c6b9
DC
442 /*
443 * If we would return 0, our callers would understand that we
444 * have nothing else to shrink and give up trying. By returning
445 * 1 we keep it going and assume we'll be able to shrink next
446 * time.
447 */
448 freed = 1;
f06590bd
MK
449 goto out;
450 }
1da177e4
LT
451
452 list_for_each_entry(shrinker, &shrinker_list, list) {
6b4f7799
JW
453 struct shrink_control sc = {
454 .gfp_mask = gfp_mask,
455 .nid = nid,
cb731d6c 456 .memcg = memcg,
6b4f7799 457 };
ec97097b 458
0fc9f58a
VD
459 /*
460 * If kernel memory accounting is disabled, we ignore
461 * SHRINKER_MEMCG_AWARE flag and call all shrinkers
462 * passing NULL for memcg.
463 */
464 if (memcg_kmem_enabled() &&
465 !!memcg != !!(shrinker->flags & SHRINKER_MEMCG_AWARE))
cb731d6c
VD
466 continue;
467
6b4f7799
JW
468 if (!(shrinker->flags & SHRINKER_NUMA_AWARE))
469 sc.nid = 0;
1da177e4 470
cb731d6c 471 freed += do_shrink_slab(&sc, shrinker, nr_scanned, nr_eligible);
1da177e4 472 }
6b4f7799 473
1da177e4 474 up_read(&shrinker_rwsem);
f06590bd
MK
475out:
476 cond_resched();
24f7c6b9 477 return freed;
1da177e4
LT
478}
479
cb731d6c
VD
480void drop_slab_node(int nid)
481{
482 unsigned long freed;
483
484 do {
485 struct mem_cgroup *memcg = NULL;
486
487 freed = 0;
488 do {
489 freed += shrink_slab(GFP_KERNEL, nid, memcg,
490 1000, 1000);
491 } while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)) != NULL);
492 } while (freed > 10);
493}
494
495void drop_slab(void)
496{
497 int nid;
498
499 for_each_online_node(nid)
500 drop_slab_node(nid);
501}
502
1da177e4
LT
503static inline int is_page_cache_freeable(struct page *page)
504{
ceddc3a5
JW
505 /*
506 * A freeable page cache page is referenced only by the caller
507 * that isolated the page, the page cache radix tree and
508 * optional buffer heads at page->private.
509 */
edcf4748 510 return page_count(page) - page_has_private(page) == 2;
1da177e4
LT
511}
512
703c2708 513static int may_write_to_inode(struct inode *inode, struct scan_control *sc)
1da177e4 514{
930d9152 515 if (current->flags & PF_SWAPWRITE)
1da177e4 516 return 1;
703c2708 517 if (!inode_write_congested(inode))
1da177e4 518 return 1;
703c2708 519 if (inode_to_bdi(inode) == current->backing_dev_info)
1da177e4
LT
520 return 1;
521 return 0;
522}
523
524/*
525 * We detected a synchronous write error writing a page out. Probably
526 * -ENOSPC. We need to propagate that into the address_space for a subsequent
527 * fsync(), msync() or close().
528 *
529 * The tricky part is that after writepage we cannot touch the mapping: nothing
530 * prevents it from being freed up. But we have a ref on the page and once
531 * that page is locked, the mapping is pinned.
532 *
533 * We're allowed to run sleeping lock_page() here because we know the caller has
534 * __GFP_FS.
535 */
536static void handle_write_error(struct address_space *mapping,
537 struct page *page, int error)
538{
7eaceacc 539 lock_page(page);
3e9f45bd
GC
540 if (page_mapping(page) == mapping)
541 mapping_set_error(mapping, error);
1da177e4
LT
542 unlock_page(page);
543}
544
04e62a29
CL
545/* possible outcome of pageout() */
546typedef enum {
547 /* failed to write page out, page is locked */
548 PAGE_KEEP,
549 /* move page to the active list, page is locked */
550 PAGE_ACTIVATE,
551 /* page has been sent to the disk successfully, page is unlocked */
552 PAGE_SUCCESS,
553 /* page is clean and locked */
554 PAGE_CLEAN,
555} pageout_t;
556
1da177e4 557/*
1742f19f
AM
558 * pageout is called by shrink_page_list() for each dirty page.
559 * Calls ->writepage().
1da177e4 560 */
c661b078 561static pageout_t pageout(struct page *page, struct address_space *mapping,
7d3579e8 562 struct scan_control *sc)
1da177e4
LT
563{
564 /*
565 * If the page is dirty, only perform writeback if that write
566 * will be non-blocking. To prevent this allocation from being
567 * stalled by pagecache activity. But note that there may be
568 * stalls if we need to run get_block(). We could test
569 * PagePrivate for that.
570 *
8174202b 571 * If this process is currently in __generic_file_write_iter() against
1da177e4
LT
572 * this page's queue, we can perform writeback even if that
573 * will block.
574 *
575 * If the page is swapcache, write it back even if that would
576 * block, for some throttling. This happens by accident, because
577 * swap_backing_dev_info is bust: it doesn't reflect the
578 * congestion state of the swapdevs. Easy to fix, if needed.
1da177e4
LT
579 */
580 if (!is_page_cache_freeable(page))
581 return PAGE_KEEP;
582 if (!mapping) {
583 /*
584 * Some data journaling orphaned pages can have
585 * page->mapping == NULL while being dirty with clean buffers.
586 */
266cf658 587 if (page_has_private(page)) {
1da177e4
LT
588 if (try_to_free_buffers(page)) {
589 ClearPageDirty(page);
b1de0d13 590 pr_info("%s: orphaned page\n", __func__);
1da177e4
LT
591 return PAGE_CLEAN;
592 }
593 }
594 return PAGE_KEEP;
595 }
596 if (mapping->a_ops->writepage == NULL)
597 return PAGE_ACTIVATE;
703c2708 598 if (!may_write_to_inode(mapping->host, sc))
1da177e4
LT
599 return PAGE_KEEP;
600
601 if (clear_page_dirty_for_io(page)) {
602 int res;
603 struct writeback_control wbc = {
604 .sync_mode = WB_SYNC_NONE,
605 .nr_to_write = SWAP_CLUSTER_MAX,
111ebb6e
OH
606 .range_start = 0,
607 .range_end = LLONG_MAX,
1da177e4
LT
608 .for_reclaim = 1,
609 };
610
611 SetPageReclaim(page);
612 res = mapping->a_ops->writepage(page, &wbc);
613 if (res < 0)
614 handle_write_error(mapping, page, res);
994fc28c 615 if (res == AOP_WRITEPAGE_ACTIVATE) {
1da177e4
LT
616 ClearPageReclaim(page);
617 return PAGE_ACTIVATE;
618 }
c661b078 619
1da177e4
LT
620 if (!PageWriteback(page)) {
621 /* synchronous write or broken a_ops? */
622 ClearPageReclaim(page);
623 }
3aa23851 624 trace_mm_vmscan_writepage(page);
c4a25635 625 inc_node_page_state(page, NR_VMSCAN_WRITE);
1da177e4
LT
626 return PAGE_SUCCESS;
627 }
628
629 return PAGE_CLEAN;
630}
631
a649fd92 632/*
e286781d
NP
633 * Same as remove_mapping, but if the page is removed from the mapping, it
634 * gets returned with a refcount of 0.
a649fd92 635 */
a528910e
JW
636static int __remove_mapping(struct address_space *mapping, struct page *page,
637 bool reclaimed)
49d2e9cc 638{
c4843a75 639 unsigned long flags;
c4843a75 640
28e4d965
NP
641 BUG_ON(!PageLocked(page));
642 BUG_ON(mapping != page_mapping(page));
49d2e9cc 643
c4843a75 644 spin_lock_irqsave(&mapping->tree_lock, flags);
49d2e9cc 645 /*
0fd0e6b0
NP
646 * The non racy check for a busy page.
647 *
648 * Must be careful with the order of the tests. When someone has
649 * a ref to the page, it may be possible that they dirty it then
650 * drop the reference. So if PageDirty is tested before page_count
651 * here, then the following race may occur:
652 *
653 * get_user_pages(&page);
654 * [user mapping goes away]
655 * write_to(page);
656 * !PageDirty(page) [good]
657 * SetPageDirty(page);
658 * put_page(page);
659 * !page_count(page) [good, discard it]
660 *
661 * [oops, our write_to data is lost]
662 *
663 * Reversing the order of the tests ensures such a situation cannot
664 * escape unnoticed. The smp_rmb is needed to ensure the page->flags
0139aa7b 665 * load is not satisfied before that of page->_refcount.
0fd0e6b0
NP
666 *
667 * Note that if SetPageDirty is always performed via set_page_dirty,
668 * and thus under tree_lock, then this ordering is not required.
49d2e9cc 669 */
fe896d18 670 if (!page_ref_freeze(page, 2))
49d2e9cc 671 goto cannot_free;
e286781d
NP
672 /* note: atomic_cmpxchg in page_freeze_refs provides the smp_rmb */
673 if (unlikely(PageDirty(page))) {
fe896d18 674 page_ref_unfreeze(page, 2);
49d2e9cc 675 goto cannot_free;
e286781d 676 }
49d2e9cc
CL
677
678 if (PageSwapCache(page)) {
679 swp_entry_t swap = { .val = page_private(page) };
0a31bc97 680 mem_cgroup_swapout(page, swap);
49d2e9cc 681 __delete_from_swap_cache(page);
c4843a75 682 spin_unlock_irqrestore(&mapping->tree_lock, flags);
0a31bc97 683 swapcache_free(swap);
e286781d 684 } else {
6072d13c 685 void (*freepage)(struct page *);
a528910e 686 void *shadow = NULL;
6072d13c
LT
687
688 freepage = mapping->a_ops->freepage;
a528910e
JW
689 /*
690 * Remember a shadow entry for reclaimed file cache in
691 * order to detect refaults, thus thrashing, later on.
692 *
693 * But don't store shadows in an address space that is
694 * already exiting. This is not just an optizimation,
695 * inode reclaim needs to empty out the radix tree or
696 * the nodes are lost. Don't plant shadows behind its
697 * back.
f9fe48be
RZ
698 *
699 * We also don't store shadows for DAX mappings because the
700 * only page cache pages found in these are zero pages
701 * covering holes, and because we don't want to mix DAX
702 * exceptional entries and shadow exceptional entries in the
703 * same page_tree.
a528910e
JW
704 */
705 if (reclaimed && page_is_file_cache(page) &&
f9fe48be 706 !mapping_exiting(mapping) && !dax_mapping(mapping))
a528910e 707 shadow = workingset_eviction(mapping, page);
62cccb8c 708 __delete_from_page_cache(page, shadow);
c4843a75 709 spin_unlock_irqrestore(&mapping->tree_lock, flags);
6072d13c
LT
710
711 if (freepage != NULL)
712 freepage(page);
49d2e9cc
CL
713 }
714
49d2e9cc
CL
715 return 1;
716
717cannot_free:
c4843a75 718 spin_unlock_irqrestore(&mapping->tree_lock, flags);
49d2e9cc
CL
719 return 0;
720}
721
e286781d
NP
722/*
723 * Attempt to detach a locked page from its ->mapping. If it is dirty or if
724 * someone else has a ref on the page, abort and return 0. If it was
725 * successfully detached, return 1. Assumes the caller has a single ref on
726 * this page.
727 */
728int remove_mapping(struct address_space *mapping, struct page *page)
729{
a528910e 730 if (__remove_mapping(mapping, page, false)) {
e286781d
NP
731 /*
732 * Unfreezing the refcount with 1 rather than 2 effectively
733 * drops the pagecache ref for us without requiring another
734 * atomic operation.
735 */
fe896d18 736 page_ref_unfreeze(page, 1);
e286781d
NP
737 return 1;
738 }
739 return 0;
740}
741
894bc310
LS
742/**
743 * putback_lru_page - put previously isolated page onto appropriate LRU list
744 * @page: page to be put back to appropriate lru list
745 *
746 * Add previously isolated @page to appropriate LRU list.
747 * Page may still be unevictable for other reasons.
748 *
749 * lru_lock must not be held, interrupts must be enabled.
750 */
894bc310
LS
751void putback_lru_page(struct page *page)
752{
0ec3b74c 753 bool is_unevictable;
bbfd28ee 754 int was_unevictable = PageUnevictable(page);
894bc310 755
309381fe 756 VM_BUG_ON_PAGE(PageLRU(page), page);
894bc310
LS
757
758redo:
759 ClearPageUnevictable(page);
760
39b5f29a 761 if (page_evictable(page)) {
894bc310
LS
762 /*
763 * For evictable pages, we can use the cache.
764 * In event of a race, worst case is we end up with an
765 * unevictable page on [in]active list.
766 * We know how to handle that.
767 */
0ec3b74c 768 is_unevictable = false;
c53954a0 769 lru_cache_add(page);
894bc310
LS
770 } else {
771 /*
772 * Put unevictable pages directly on zone's unevictable
773 * list.
774 */
0ec3b74c 775 is_unevictable = true;
894bc310 776 add_page_to_unevictable_list(page);
6a7b9548 777 /*
21ee9f39
MK
778 * When racing with an mlock or AS_UNEVICTABLE clearing
779 * (page is unlocked) make sure that if the other thread
780 * does not observe our setting of PG_lru and fails
24513264 781 * isolation/check_move_unevictable_pages,
21ee9f39 782 * we see PG_mlocked/AS_UNEVICTABLE cleared below and move
6a7b9548
JW
783 * the page back to the evictable list.
784 *
21ee9f39 785 * The other side is TestClearPageMlocked() or shmem_lock().
6a7b9548
JW
786 */
787 smp_mb();
894bc310 788 }
894bc310
LS
789
790 /*
791 * page's status can change while we move it among lru. If an evictable
792 * page is on unevictable list, it never be freed. To avoid that,
793 * check after we added it to the list, again.
794 */
0ec3b74c 795 if (is_unevictable && page_evictable(page)) {
894bc310
LS
796 if (!isolate_lru_page(page)) {
797 put_page(page);
798 goto redo;
799 }
800 /* This means someone else dropped this page from LRU
801 * So, it will be freed or putback to LRU again. There is
802 * nothing to do here.
803 */
804 }
805
0ec3b74c 806 if (was_unevictable && !is_unevictable)
bbfd28ee 807 count_vm_event(UNEVICTABLE_PGRESCUED);
0ec3b74c 808 else if (!was_unevictable && is_unevictable)
bbfd28ee
LS
809 count_vm_event(UNEVICTABLE_PGCULLED);
810
894bc310
LS
811 put_page(page); /* drop ref from isolate */
812}
813
dfc8d636
JW
814enum page_references {
815 PAGEREF_RECLAIM,
816 PAGEREF_RECLAIM_CLEAN,
64574746 817 PAGEREF_KEEP,
dfc8d636
JW
818 PAGEREF_ACTIVATE,
819};
820
821static enum page_references page_check_references(struct page *page,
822 struct scan_control *sc)
823{
64574746 824 int referenced_ptes, referenced_page;
dfc8d636 825 unsigned long vm_flags;
dfc8d636 826
c3ac9a8a
JW
827 referenced_ptes = page_referenced(page, 1, sc->target_mem_cgroup,
828 &vm_flags);
64574746 829 referenced_page = TestClearPageReferenced(page);
dfc8d636 830
dfc8d636
JW
831 /*
832 * Mlock lost the isolation race with us. Let try_to_unmap()
833 * move the page to the unevictable list.
834 */
835 if (vm_flags & VM_LOCKED)
836 return PAGEREF_RECLAIM;
837
64574746 838 if (referenced_ptes) {
e4898273 839 if (PageSwapBacked(page))
64574746
JW
840 return PAGEREF_ACTIVATE;
841 /*
842 * All mapped pages start out with page table
843 * references from the instantiating fault, so we need
844 * to look twice if a mapped file page is used more
845 * than once.
846 *
847 * Mark it and spare it for another trip around the
848 * inactive list. Another page table reference will
849 * lead to its activation.
850 *
851 * Note: the mark is set for activated pages as well
852 * so that recently deactivated but used pages are
853 * quickly recovered.
854 */
855 SetPageReferenced(page);
856
34dbc67a 857 if (referenced_page || referenced_ptes > 1)
64574746
JW
858 return PAGEREF_ACTIVATE;
859
c909e993
KK
860 /*
861 * Activate file-backed executable pages after first usage.
862 */
863 if (vm_flags & VM_EXEC)
864 return PAGEREF_ACTIVATE;
865
64574746
JW
866 return PAGEREF_KEEP;
867 }
dfc8d636
JW
868
869 /* Reclaim if clean, defer dirty pages to writeback */
2e30244a 870 if (referenced_page && !PageSwapBacked(page))
64574746
JW
871 return PAGEREF_RECLAIM_CLEAN;
872
873 return PAGEREF_RECLAIM;
dfc8d636
JW
874}
875
e2be15f6
MG
876/* Check if a page is dirty or under writeback */
877static void page_check_dirty_writeback(struct page *page,
878 bool *dirty, bool *writeback)
879{
b4597226
MG
880 struct address_space *mapping;
881
e2be15f6
MG
882 /*
883 * Anonymous pages are not handled by flushers and must be written
884 * from reclaim context. Do not stall reclaim based on them
885 */
886 if (!page_is_file_cache(page)) {
887 *dirty = false;
888 *writeback = false;
889 return;
890 }
891
892 /* By default assume that the page flags are accurate */
893 *dirty = PageDirty(page);
894 *writeback = PageWriteback(page);
b4597226
MG
895
896 /* Verify dirty/writeback state if the filesystem supports it */
897 if (!page_has_private(page))
898 return;
899
900 mapping = page_mapping(page);
901 if (mapping && mapping->a_ops->is_dirty_writeback)
902 mapping->a_ops->is_dirty_writeback(page, dirty, writeback);
e2be15f6
MG
903}
904
1da177e4 905/*
1742f19f 906 * shrink_page_list() returns the number of reclaimed pages
1da177e4 907 */
1742f19f 908static unsigned long shrink_page_list(struct list_head *page_list,
599d0c95 909 struct pglist_data *pgdat,
f84f6e2b 910 struct scan_control *sc,
02c6de8d 911 enum ttu_flags ttu_flags,
8e950282 912 unsigned long *ret_nr_dirty,
d43006d5 913 unsigned long *ret_nr_unqueued_dirty,
8e950282 914 unsigned long *ret_nr_congested,
02c6de8d 915 unsigned long *ret_nr_writeback,
b1a6f21e 916 unsigned long *ret_nr_immediate,
02c6de8d 917 bool force_reclaim)
1da177e4
LT
918{
919 LIST_HEAD(ret_pages);
abe4c3b5 920 LIST_HEAD(free_pages);
1da177e4 921 int pgactivate = 0;
d43006d5 922 unsigned long nr_unqueued_dirty = 0;
0e093d99
MG
923 unsigned long nr_dirty = 0;
924 unsigned long nr_congested = 0;
05ff5137 925 unsigned long nr_reclaimed = 0;
92df3a72 926 unsigned long nr_writeback = 0;
b1a6f21e 927 unsigned long nr_immediate = 0;
1da177e4
LT
928
929 cond_resched();
930
1da177e4
LT
931 while (!list_empty(page_list)) {
932 struct address_space *mapping;
933 struct page *page;
934 int may_enter_fs;
02c6de8d 935 enum page_references references = PAGEREF_RECLAIM_CLEAN;
e2be15f6 936 bool dirty, writeback;
854e9ed0
MK
937 bool lazyfree = false;
938 int ret = SWAP_SUCCESS;
1da177e4
LT
939
940 cond_resched();
941
942 page = lru_to_page(page_list);
943 list_del(&page->lru);
944
529ae9aa 945 if (!trylock_page(page))
1da177e4
LT
946 goto keep;
947
309381fe 948 VM_BUG_ON_PAGE(PageActive(page), page);
1da177e4
LT
949
950 sc->nr_scanned++;
80e43426 951
39b5f29a 952 if (unlikely(!page_evictable(page)))
b291f000 953 goto cull_mlocked;
894bc310 954
a6dc60f8 955 if (!sc->may_unmap && page_mapped(page))
80e43426
CL
956 goto keep_locked;
957
1da177e4
LT
958 /* Double the slab pressure for mapped and swapcache pages */
959 if (page_mapped(page) || PageSwapCache(page))
960 sc->nr_scanned++;
961
c661b078
AW
962 may_enter_fs = (sc->gfp_mask & __GFP_FS) ||
963 (PageSwapCache(page) && (sc->gfp_mask & __GFP_IO));
964
e2be15f6
MG
965 /*
966 * The number of dirty pages determines if a zone is marked
967 * reclaim_congested which affects wait_iff_congested. kswapd
968 * will stall and start writing pages if the tail of the LRU
969 * is all dirty unqueued pages.
970 */
971 page_check_dirty_writeback(page, &dirty, &writeback);
972 if (dirty || writeback)
973 nr_dirty++;
974
975 if (dirty && !writeback)
976 nr_unqueued_dirty++;
977
d04e8acd
MG
978 /*
979 * Treat this page as congested if the underlying BDI is or if
980 * pages are cycling through the LRU so quickly that the
981 * pages marked for immediate reclaim are making it to the
982 * end of the LRU a second time.
983 */
e2be15f6 984 mapping = page_mapping(page);
1da58ee2 985 if (((dirty || writeback) && mapping &&
703c2708 986 inode_write_congested(mapping->host)) ||
d04e8acd 987 (writeback && PageReclaim(page)))
e2be15f6
MG
988 nr_congested++;
989
283aba9f
MG
990 /*
991 * If a page at the tail of the LRU is under writeback, there
992 * are three cases to consider.
993 *
994 * 1) If reclaim is encountering an excessive number of pages
995 * under writeback and this page is both under writeback and
996 * PageReclaim then it indicates that pages are being queued
997 * for IO but are being recycled through the LRU before the
998 * IO can complete. Waiting on the page itself risks an
999 * indefinite stall if it is impossible to writeback the
1000 * page due to IO error or disconnected storage so instead
b1a6f21e
MG
1001 * note that the LRU is being scanned too quickly and the
1002 * caller can stall after page list has been processed.
283aba9f 1003 *
97c9341f 1004 * 2) Global or new memcg reclaim encounters a page that is
ecf5fc6e
MH
1005 * not marked for immediate reclaim, or the caller does not
1006 * have __GFP_FS (or __GFP_IO if it's simply going to swap,
1007 * not to fs). In this case mark the page for immediate
97c9341f 1008 * reclaim and continue scanning.
283aba9f 1009 *
ecf5fc6e
MH
1010 * Require may_enter_fs because we would wait on fs, which
1011 * may not have submitted IO yet. And the loop driver might
283aba9f
MG
1012 * enter reclaim, and deadlock if it waits on a page for
1013 * which it is needed to do the write (loop masks off
1014 * __GFP_IO|__GFP_FS for this reason); but more thought
1015 * would probably show more reasons.
1016 *
7fadc820 1017 * 3) Legacy memcg encounters a page that is already marked
283aba9f
MG
1018 * PageReclaim. memcg does not have any dirty pages
1019 * throttling so we could easily OOM just because too many
1020 * pages are in writeback and there is nothing else to
1021 * reclaim. Wait for the writeback to complete.
1022 */
c661b078 1023 if (PageWriteback(page)) {
283aba9f
MG
1024 /* Case 1 above */
1025 if (current_is_kswapd() &&
1026 PageReclaim(page) &&
599d0c95 1027 test_bit(PGDAT_WRITEBACK, &pgdat->flags)) {
b1a6f21e
MG
1028 nr_immediate++;
1029 goto keep_locked;
283aba9f
MG
1030
1031 /* Case 2 above */
97c9341f 1032 } else if (sane_reclaim(sc) ||
ecf5fc6e 1033 !PageReclaim(page) || !may_enter_fs) {
c3b94f44
HD
1034 /*
1035 * This is slightly racy - end_page_writeback()
1036 * might have just cleared PageReclaim, then
1037 * setting PageReclaim here end up interpreted
1038 * as PageReadahead - but that does not matter
1039 * enough to care. What we do want is for this
1040 * page to have PageReclaim set next time memcg
1041 * reclaim reaches the tests above, so it will
1042 * then wait_on_page_writeback() to avoid OOM;
1043 * and it's also appropriate in global reclaim.
1044 */
1045 SetPageReclaim(page);
e62e384e 1046 nr_writeback++;
c3b94f44 1047 goto keep_locked;
283aba9f
MG
1048
1049 /* Case 3 above */
1050 } else {
7fadc820 1051 unlock_page(page);
283aba9f 1052 wait_on_page_writeback(page);
7fadc820
HD
1053 /* then go back and try same page again */
1054 list_add_tail(&page->lru, page_list);
1055 continue;
e62e384e 1056 }
c661b078 1057 }
1da177e4 1058
02c6de8d
MK
1059 if (!force_reclaim)
1060 references = page_check_references(page, sc);
1061
dfc8d636
JW
1062 switch (references) {
1063 case PAGEREF_ACTIVATE:
1da177e4 1064 goto activate_locked;
64574746
JW
1065 case PAGEREF_KEEP:
1066 goto keep_locked;
dfc8d636
JW
1067 case PAGEREF_RECLAIM:
1068 case PAGEREF_RECLAIM_CLEAN:
1069 ; /* try to reclaim the page below */
1070 }
1da177e4 1071
1da177e4
LT
1072 /*
1073 * Anonymous process memory has backing store?
1074 * Try to allocate it some swap space here.
1075 */
b291f000 1076 if (PageAnon(page) && !PageSwapCache(page)) {
63eb6b93
HD
1077 if (!(sc->gfp_mask & __GFP_IO))
1078 goto keep_locked;
5bc7b8ac 1079 if (!add_to_swap(page, page_list))
1da177e4 1080 goto activate_locked;
854e9ed0 1081 lazyfree = true;
63eb6b93 1082 may_enter_fs = 1;
1da177e4 1083
e2be15f6
MG
1084 /* Adding to swap updated mapping */
1085 mapping = page_mapping(page);
7751b2da
KS
1086 } else if (unlikely(PageTransHuge(page))) {
1087 /* Split file THP */
1088 if (split_huge_page_to_list(page, page_list))
1089 goto keep_locked;
e2be15f6 1090 }
1da177e4 1091
7751b2da
KS
1092 VM_BUG_ON_PAGE(PageTransHuge(page), page);
1093
1da177e4
LT
1094 /*
1095 * The page is mapped into the page tables of one or more
1096 * processes. Try to unmap it here.
1097 */
1098 if (page_mapped(page) && mapping) {
854e9ed0
MK
1099 switch (ret = try_to_unmap(page, lazyfree ?
1100 (ttu_flags | TTU_BATCH_FLUSH | TTU_LZFREE) :
1101 (ttu_flags | TTU_BATCH_FLUSH))) {
1da177e4
LT
1102 case SWAP_FAIL:
1103 goto activate_locked;
1104 case SWAP_AGAIN:
1105 goto keep_locked;
b291f000
NP
1106 case SWAP_MLOCK:
1107 goto cull_mlocked;
854e9ed0
MK
1108 case SWAP_LZFREE:
1109 goto lazyfree;
1da177e4
LT
1110 case SWAP_SUCCESS:
1111 ; /* try to free the page below */
1112 }
1113 }
1114
1115 if (PageDirty(page)) {
ee72886d
MG
1116 /*
1117 * Only kswapd can writeback filesystem pages to
d43006d5
MG
1118 * avoid risk of stack overflow but only writeback
1119 * if many dirty pages have been encountered.
ee72886d 1120 */
f84f6e2b 1121 if (page_is_file_cache(page) &&
9e3b2f8c 1122 (!current_is_kswapd() ||
599d0c95 1123 !test_bit(PGDAT_DIRTY, &pgdat->flags))) {
49ea7eb6
MG
1124 /*
1125 * Immediately reclaim when written back.
1126 * Similar in principal to deactivate_page()
1127 * except we already have the page isolated
1128 * and know it's dirty
1129 */
c4a25635 1130 inc_node_page_state(page, NR_VMSCAN_IMMEDIATE);
49ea7eb6
MG
1131 SetPageReclaim(page);
1132
ee72886d
MG
1133 goto keep_locked;
1134 }
1135
dfc8d636 1136 if (references == PAGEREF_RECLAIM_CLEAN)
1da177e4 1137 goto keep_locked;
4dd4b920 1138 if (!may_enter_fs)
1da177e4 1139 goto keep_locked;
52a8363e 1140 if (!sc->may_writepage)
1da177e4
LT
1141 goto keep_locked;
1142
d950c947
MG
1143 /*
1144 * Page is dirty. Flush the TLB if a writable entry
1145 * potentially exists to avoid CPU writes after IO
1146 * starts and then write it out here.
1147 */
1148 try_to_unmap_flush_dirty();
7d3579e8 1149 switch (pageout(page, mapping, sc)) {
1da177e4
LT
1150 case PAGE_KEEP:
1151 goto keep_locked;
1152 case PAGE_ACTIVATE:
1153 goto activate_locked;
1154 case PAGE_SUCCESS:
7d3579e8 1155 if (PageWriteback(page))
41ac1999 1156 goto keep;
7d3579e8 1157 if (PageDirty(page))
1da177e4 1158 goto keep;
7d3579e8 1159
1da177e4
LT
1160 /*
1161 * A synchronous write - probably a ramdisk. Go
1162 * ahead and try to reclaim the page.
1163 */
529ae9aa 1164 if (!trylock_page(page))
1da177e4
LT
1165 goto keep;
1166 if (PageDirty(page) || PageWriteback(page))
1167 goto keep_locked;
1168 mapping = page_mapping(page);
1169 case PAGE_CLEAN:
1170 ; /* try to free the page below */
1171 }
1172 }
1173
1174 /*
1175 * If the page has buffers, try to free the buffer mappings
1176 * associated with this page. If we succeed we try to free
1177 * the page as well.
1178 *
1179 * We do this even if the page is PageDirty().
1180 * try_to_release_page() does not perform I/O, but it is
1181 * possible for a page to have PageDirty set, but it is actually
1182 * clean (all its buffers are clean). This happens if the
1183 * buffers were written out directly, with submit_bh(). ext3
894bc310 1184 * will do this, as well as the blockdev mapping.
1da177e4
LT
1185 * try_to_release_page() will discover that cleanness and will
1186 * drop the buffers and mark the page clean - it can be freed.
1187 *
1188 * Rarely, pages can have buffers and no ->mapping. These are
1189 * the pages which were not successfully invalidated in
1190 * truncate_complete_page(). We try to drop those buffers here
1191 * and if that worked, and the page is no longer mapped into
1192 * process address space (page_count == 1) it can be freed.
1193 * Otherwise, leave the page on the LRU so it is swappable.
1194 */
266cf658 1195 if (page_has_private(page)) {
1da177e4
LT
1196 if (!try_to_release_page(page, sc->gfp_mask))
1197 goto activate_locked;
e286781d
NP
1198 if (!mapping && page_count(page) == 1) {
1199 unlock_page(page);
1200 if (put_page_testzero(page))
1201 goto free_it;
1202 else {
1203 /*
1204 * rare race with speculative reference.
1205 * the speculative reference will free
1206 * this page shortly, so we may
1207 * increment nr_reclaimed here (and
1208 * leave it off the LRU).
1209 */
1210 nr_reclaimed++;
1211 continue;
1212 }
1213 }
1da177e4
LT
1214 }
1215
854e9ed0 1216lazyfree:
a528910e 1217 if (!mapping || !__remove_mapping(mapping, page, true))
49d2e9cc 1218 goto keep_locked;
1da177e4 1219
a978d6f5
NP
1220 /*
1221 * At this point, we have no other references and there is
1222 * no way to pick any more up (removed from LRU, removed
1223 * from pagecache). Can use non-atomic bitops now (and
1224 * we obviously don't have to worry about waking up a process
1225 * waiting on the page lock, because there are no references.
1226 */
48c935ad 1227 __ClearPageLocked(page);
e286781d 1228free_it:
854e9ed0
MK
1229 if (ret == SWAP_LZFREE)
1230 count_vm_event(PGLAZYFREED);
1231
05ff5137 1232 nr_reclaimed++;
abe4c3b5
MG
1233
1234 /*
1235 * Is there need to periodically free_page_list? It would
1236 * appear not as the counts should be low
1237 */
1238 list_add(&page->lru, &free_pages);
1da177e4
LT
1239 continue;
1240
b291f000 1241cull_mlocked:
63d6c5ad
HD
1242 if (PageSwapCache(page))
1243 try_to_free_swap(page);
b291f000 1244 unlock_page(page);
c54839a7 1245 list_add(&page->lru, &ret_pages);
b291f000
NP
1246 continue;
1247
1da177e4 1248activate_locked:
68a22394 1249 /* Not a candidate for swapping, so reclaim swap space. */
5ccc5aba 1250 if (PageSwapCache(page) && mem_cgroup_swap_full(page))
a2c43eed 1251 try_to_free_swap(page);
309381fe 1252 VM_BUG_ON_PAGE(PageActive(page), page);
1da177e4
LT
1253 SetPageActive(page);
1254 pgactivate++;
1255keep_locked:
1256 unlock_page(page);
1257keep:
1258 list_add(&page->lru, &ret_pages);
309381fe 1259 VM_BUG_ON_PAGE(PageLRU(page) || PageUnevictable(page), page);
1da177e4 1260 }
abe4c3b5 1261
747db954 1262 mem_cgroup_uncharge_list(&free_pages);
72b252ae 1263 try_to_unmap_flush();
b745bc85 1264 free_hot_cold_page_list(&free_pages, true);
abe4c3b5 1265
1da177e4 1266 list_splice(&ret_pages, page_list);
f8891e5e 1267 count_vm_events(PGACTIVATE, pgactivate);
0a31bc97 1268
8e950282
MG
1269 *ret_nr_dirty += nr_dirty;
1270 *ret_nr_congested += nr_congested;
d43006d5 1271 *ret_nr_unqueued_dirty += nr_unqueued_dirty;
92df3a72 1272 *ret_nr_writeback += nr_writeback;
b1a6f21e 1273 *ret_nr_immediate += nr_immediate;
05ff5137 1274 return nr_reclaimed;
1da177e4
LT
1275}
1276
02c6de8d
MK
1277unsigned long reclaim_clean_pages_from_list(struct zone *zone,
1278 struct list_head *page_list)
1279{
1280 struct scan_control sc = {
1281 .gfp_mask = GFP_KERNEL,
1282 .priority = DEF_PRIORITY,
1283 .may_unmap = 1,
1284 };
8e950282 1285 unsigned long ret, dummy1, dummy2, dummy3, dummy4, dummy5;
02c6de8d
MK
1286 struct page *page, *next;
1287 LIST_HEAD(clean_pages);
1288
1289 list_for_each_entry_safe(page, next, page_list, lru) {
117aad1e 1290 if (page_is_file_cache(page) && !PageDirty(page) &&
b1123ea6 1291 !__PageMovable(page)) {
02c6de8d
MK
1292 ClearPageActive(page);
1293 list_move(&page->lru, &clean_pages);
1294 }
1295 }
1296
599d0c95 1297 ret = shrink_page_list(&clean_pages, zone->zone_pgdat, &sc,
8e950282
MG
1298 TTU_UNMAP|TTU_IGNORE_ACCESS,
1299 &dummy1, &dummy2, &dummy3, &dummy4, &dummy5, true);
02c6de8d 1300 list_splice(&clean_pages, page_list);
599d0c95 1301 mod_node_page_state(zone->zone_pgdat, NR_ISOLATED_FILE, -ret);
02c6de8d
MK
1302 return ret;
1303}
1304
5ad333eb
AW
1305/*
1306 * Attempt to remove the specified page from its LRU. Only take this page
1307 * if it is of the appropriate PageActive status. Pages which are being
1308 * freed elsewhere are also ignored.
1309 *
1310 * page: page to consider
1311 * mode: one of the LRU isolation modes defined above
1312 *
1313 * returns 0 on success, -ve errno on failure.
1314 */
f3fd4a61 1315int __isolate_lru_page(struct page *page, isolate_mode_t mode)
5ad333eb
AW
1316{
1317 int ret = -EINVAL;
1318
1319 /* Only take pages on the LRU. */
1320 if (!PageLRU(page))
1321 return ret;
1322
e46a2879
MK
1323 /* Compaction should not handle unevictable pages but CMA can do so */
1324 if (PageUnevictable(page) && !(mode & ISOLATE_UNEVICTABLE))
894bc310
LS
1325 return ret;
1326
5ad333eb 1327 ret = -EBUSY;
08e552c6 1328
c8244935
MG
1329 /*
1330 * To minimise LRU disruption, the caller can indicate that it only
1331 * wants to isolate pages it will be able to operate on without
1332 * blocking - clean pages for the most part.
1333 *
1334 * ISOLATE_CLEAN means that only clean pages should be isolated. This
1335 * is used by reclaim when it is cannot write to backing storage
1336 *
1337 * ISOLATE_ASYNC_MIGRATE is used to indicate that it only wants to pages
1338 * that it is possible to migrate without blocking
1339 */
1340 if (mode & (ISOLATE_CLEAN|ISOLATE_ASYNC_MIGRATE)) {
1341 /* All the caller can do on PageWriteback is block */
1342 if (PageWriteback(page))
1343 return ret;
1344
1345 if (PageDirty(page)) {
1346 struct address_space *mapping;
1347
1348 /* ISOLATE_CLEAN means only clean pages */
1349 if (mode & ISOLATE_CLEAN)
1350 return ret;
1351
1352 /*
1353 * Only pages without mappings or that have a
1354 * ->migratepage callback are possible to migrate
1355 * without blocking
1356 */
1357 mapping = page_mapping(page);
1358 if (mapping && !mapping->a_ops->migratepage)
1359 return ret;
1360 }
1361 }
39deaf85 1362
f80c0673
MK
1363 if ((mode & ISOLATE_UNMAPPED) && page_mapped(page))
1364 return ret;
1365
5ad333eb
AW
1366 if (likely(get_page_unless_zero(page))) {
1367 /*
1368 * Be careful not to clear PageLRU until after we're
1369 * sure the page is not being freed elsewhere -- the
1370 * page release code relies on it.
1371 */
1372 ClearPageLRU(page);
1373 ret = 0;
1374 }
1375
1376 return ret;
1377}
1378
7ee36a14
MG
1379
1380/*
1381 * Update LRU sizes after isolating pages. The LRU size updates must
1382 * be complete before mem_cgroup_update_lru_size due to a santity check.
1383 */
1384static __always_inline void update_lru_sizes(struct lruvec *lruvec,
1385 enum lru_list lru, unsigned long *nr_zone_taken,
1386 unsigned long nr_taken)
1387{
7ee36a14
MG
1388 int zid;
1389
7ee36a14
MG
1390 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
1391 if (!nr_zone_taken[zid])
1392 continue;
1393
1394 __update_lru_size(lruvec, lru, zid, -nr_zone_taken[zid]);
1395 }
7ee36a14
MG
1396
1397#ifdef CONFIG_MEMCG
1398 mem_cgroup_update_lru_size(lruvec, lru, -nr_taken);
1399#endif
1400}
1401
1da177e4 1402/*
a52633d8 1403 * zone_lru_lock is heavily contended. Some of the functions that
1da177e4
LT
1404 * shrink the lists perform better by taking out a batch of pages
1405 * and working on them outside the LRU lock.
1406 *
1407 * For pagecache intensive workloads, this function is the hottest
1408 * spot in the kernel (apart from copy_*_user functions).
1409 *
1410 * Appropriate locks must be held before calling this function.
1411 *
1412 * @nr_to_scan: The number of pages to look through on the list.
5dc35979 1413 * @lruvec: The LRU vector to pull pages from.
1da177e4 1414 * @dst: The temp list to put pages on to.
f626012d 1415 * @nr_scanned: The number of pages that were scanned.
fe2c2a10 1416 * @sc: The scan_control struct for this reclaim session
5ad333eb 1417 * @mode: One of the LRU isolation modes
3cb99451 1418 * @lru: LRU list id for isolating
1da177e4
LT
1419 *
1420 * returns how many pages were moved onto *@dst.
1421 */
69e05944 1422static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
5dc35979 1423 struct lruvec *lruvec, struct list_head *dst,
fe2c2a10 1424 unsigned long *nr_scanned, struct scan_control *sc,
3cb99451 1425 isolate_mode_t mode, enum lru_list lru)
1da177e4 1426{
75b00af7 1427 struct list_head *src = &lruvec->lists[lru];
69e05944 1428 unsigned long nr_taken = 0;
599d0c95 1429 unsigned long nr_zone_taken[MAX_NR_ZONES] = { 0 };
7cc30fcf 1430 unsigned long nr_skipped[MAX_NR_ZONES] = { 0, };
599d0c95 1431 unsigned long scan, nr_pages;
b2e18757 1432 LIST_HEAD(pages_skipped);
1da177e4 1433
0b802f10 1434 for (scan = 0; scan < nr_to_scan && nr_taken < nr_to_scan &&
d7f05528 1435 !list_empty(src);) {
5ad333eb 1436 struct page *page;
5ad333eb 1437
1da177e4
LT
1438 page = lru_to_page(src);
1439 prefetchw_prev_lru_page(page, src, flags);
1440
309381fe 1441 VM_BUG_ON_PAGE(!PageLRU(page), page);
8d438f96 1442
b2e18757
MG
1443 if (page_zonenum(page) > sc->reclaim_idx) {
1444 list_move(&page->lru, &pages_skipped);
7cc30fcf 1445 nr_skipped[page_zonenum(page)]++;
b2e18757
MG
1446 continue;
1447 }
1448
d7f05528
MG
1449 /*
1450 * Account for scanned and skipped separetly to avoid the pgdat
1451 * being prematurely marked unreclaimable by pgdat_reclaimable.
1452 */
1453 scan++;
1454
f3fd4a61 1455 switch (__isolate_lru_page(page, mode)) {
5ad333eb 1456 case 0:
599d0c95
MG
1457 nr_pages = hpage_nr_pages(page);
1458 nr_taken += nr_pages;
1459 nr_zone_taken[page_zonenum(page)] += nr_pages;
5ad333eb 1460 list_move(&page->lru, dst);
5ad333eb
AW
1461 break;
1462
1463 case -EBUSY:
1464 /* else it is being freed elsewhere */
1465 list_move(&page->lru, src);
1466 continue;
46453a6e 1467
5ad333eb
AW
1468 default:
1469 BUG();
1470 }
1da177e4
LT
1471 }
1472
b2e18757
MG
1473 /*
1474 * Splice any skipped pages to the start of the LRU list. Note that
1475 * this disrupts the LRU order when reclaiming for lower zones but
1476 * we cannot splice to the tail. If we did then the SWAP_CLUSTER_MAX
1477 * scanning would soon rescan the same pages to skip and put the
1478 * system at risk of premature OOM.
1479 */
7cc30fcf
MG
1480 if (!list_empty(&pages_skipped)) {
1481 int zid;
d7f05528 1482 unsigned long total_skipped = 0;
7cc30fcf 1483
7cc30fcf
MG
1484 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
1485 if (!nr_skipped[zid])
1486 continue;
1487
1488 __count_zid_vm_events(PGSCAN_SKIP, zid, nr_skipped[zid]);
d7f05528 1489 total_skipped += nr_skipped[zid];
7cc30fcf 1490 }
d7f05528
MG
1491
1492 /*
1493 * Account skipped pages as a partial scan as the pgdat may be
1494 * close to unreclaimable. If the LRU list is empty, account
1495 * skipped pages as a full scan.
1496 */
1497 scan += list_empty(src) ? total_skipped : total_skipped >> 2;
1498
1499 list_splice(&pages_skipped, src);
7cc30fcf 1500 }
f626012d 1501 *nr_scanned = scan;
e5146b12 1502 trace_mm_vmscan_lru_isolate(sc->reclaim_idx, sc->order, nr_to_scan, scan,
75b00af7 1503 nr_taken, mode, is_file_lru(lru));
7ee36a14 1504 update_lru_sizes(lruvec, lru, nr_zone_taken, nr_taken);
1da177e4
LT
1505 return nr_taken;
1506}
1507
62695a84
NP
1508/**
1509 * isolate_lru_page - tries to isolate a page from its LRU list
1510 * @page: page to isolate from its LRU list
1511 *
1512 * Isolates a @page from an LRU list, clears PageLRU and adjusts the
1513 * vmstat statistic corresponding to whatever LRU list the page was on.
1514 *
1515 * Returns 0 if the page was removed from an LRU list.
1516 * Returns -EBUSY if the page was not on an LRU list.
1517 *
1518 * The returned page will have PageLRU() cleared. If it was found on
894bc310
LS
1519 * the active list, it will have PageActive set. If it was found on
1520 * the unevictable list, it will have the PageUnevictable bit set. That flag
1521 * may need to be cleared by the caller before letting the page go.
62695a84
NP
1522 *
1523 * The vmstat statistic corresponding to the list on which the page was
1524 * found will be decremented.
1525 *
1526 * Restrictions:
1527 * (1) Must be called with an elevated refcount on the page. This is a
1528 * fundamentnal difference from isolate_lru_pages (which is called
1529 * without a stable reference).
1530 * (2) the lru_lock must not be held.
1531 * (3) interrupts must be enabled.
1532 */
1533int isolate_lru_page(struct page *page)
1534{
1535 int ret = -EBUSY;
1536
309381fe 1537 VM_BUG_ON_PAGE(!page_count(page), page);
cf2a82ee 1538 WARN_RATELIMIT(PageTail(page), "trying to isolate tail page");
0c917313 1539
62695a84
NP
1540 if (PageLRU(page)) {
1541 struct zone *zone = page_zone(page);
fa9add64 1542 struct lruvec *lruvec;
62695a84 1543
a52633d8 1544 spin_lock_irq(zone_lru_lock(zone));
599d0c95 1545 lruvec = mem_cgroup_page_lruvec(page, zone->zone_pgdat);
0c917313 1546 if (PageLRU(page)) {
894bc310 1547 int lru = page_lru(page);
0c917313 1548 get_page(page);
62695a84 1549 ClearPageLRU(page);
fa9add64
HD
1550 del_page_from_lru_list(page, lruvec, lru);
1551 ret = 0;
62695a84 1552 }
a52633d8 1553 spin_unlock_irq(zone_lru_lock(zone));
62695a84
NP
1554 }
1555 return ret;
1556}
1557
35cd7815 1558/*
d37dd5dc
FW
1559 * A direct reclaimer may isolate SWAP_CLUSTER_MAX pages from the LRU list and
1560 * then get resheduled. When there are massive number of tasks doing page
1561 * allocation, such sleeping direct reclaimers may keep piling up on each CPU,
1562 * the LRU list will go small and be scanned faster than necessary, leading to
1563 * unnecessary swapping, thrashing and OOM.
35cd7815 1564 */
599d0c95 1565static int too_many_isolated(struct pglist_data *pgdat, int file,
35cd7815
RR
1566 struct scan_control *sc)
1567{
1568 unsigned long inactive, isolated;
1569
1570 if (current_is_kswapd())
1571 return 0;
1572
97c9341f 1573 if (!sane_reclaim(sc))
35cd7815
RR
1574 return 0;
1575
1576 if (file) {
599d0c95
MG
1577 inactive = node_page_state(pgdat, NR_INACTIVE_FILE);
1578 isolated = node_page_state(pgdat, NR_ISOLATED_FILE);
35cd7815 1579 } else {
599d0c95
MG
1580 inactive = node_page_state(pgdat, NR_INACTIVE_ANON);
1581 isolated = node_page_state(pgdat, NR_ISOLATED_ANON);
35cd7815
RR
1582 }
1583
3cf23841
FW
1584 /*
1585 * GFP_NOIO/GFP_NOFS callers are allowed to isolate more pages, so they
1586 * won't get blocked by normal direct-reclaimers, forming a circular
1587 * deadlock.
1588 */
d0164adc 1589 if ((sc->gfp_mask & (__GFP_IO | __GFP_FS)) == (__GFP_IO | __GFP_FS))
3cf23841
FW
1590 inactive >>= 3;
1591
35cd7815
RR
1592 return isolated > inactive;
1593}
1594
66635629 1595static noinline_for_stack void
75b00af7 1596putback_inactive_pages(struct lruvec *lruvec, struct list_head *page_list)
66635629 1597{
27ac81d8 1598 struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat;
599d0c95 1599 struct pglist_data *pgdat = lruvec_pgdat(lruvec);
3f79768f 1600 LIST_HEAD(pages_to_free);
66635629 1601
66635629
MG
1602 /*
1603 * Put back any unfreeable pages.
1604 */
66635629 1605 while (!list_empty(page_list)) {
3f79768f 1606 struct page *page = lru_to_page(page_list);
66635629 1607 int lru;
3f79768f 1608
309381fe 1609 VM_BUG_ON_PAGE(PageLRU(page), page);
66635629 1610 list_del(&page->lru);
39b5f29a 1611 if (unlikely(!page_evictable(page))) {
599d0c95 1612 spin_unlock_irq(&pgdat->lru_lock);
66635629 1613 putback_lru_page(page);
599d0c95 1614 spin_lock_irq(&pgdat->lru_lock);
66635629
MG
1615 continue;
1616 }
fa9add64 1617
599d0c95 1618 lruvec = mem_cgroup_page_lruvec(page, pgdat);
fa9add64 1619
7a608572 1620 SetPageLRU(page);
66635629 1621 lru = page_lru(page);
fa9add64
HD
1622 add_page_to_lru_list(page, lruvec, lru);
1623
66635629
MG
1624 if (is_active_lru(lru)) {
1625 int file = is_file_lru(lru);
9992af10
RR
1626 int numpages = hpage_nr_pages(page);
1627 reclaim_stat->recent_rotated[file] += numpages;
66635629 1628 }
2bcf8879
HD
1629 if (put_page_testzero(page)) {
1630 __ClearPageLRU(page);
1631 __ClearPageActive(page);
fa9add64 1632 del_page_from_lru_list(page, lruvec, lru);
2bcf8879
HD
1633
1634 if (unlikely(PageCompound(page))) {
599d0c95 1635 spin_unlock_irq(&pgdat->lru_lock);
747db954 1636 mem_cgroup_uncharge(page);
2bcf8879 1637 (*get_compound_page_dtor(page))(page);
599d0c95 1638 spin_lock_irq(&pgdat->lru_lock);
2bcf8879
HD
1639 } else
1640 list_add(&page->lru, &pages_to_free);
66635629
MG
1641 }
1642 }
66635629 1643
3f79768f
HD
1644 /*
1645 * To save our caller's stack, now use input list for pages to free.
1646 */
1647 list_splice(&pages_to_free, page_list);
66635629
MG
1648}
1649
399ba0b9
N
1650/*
1651 * If a kernel thread (such as nfsd for loop-back mounts) services
1652 * a backing device by writing to the page cache it sets PF_LESS_THROTTLE.
1653 * In that case we should only throttle if the backing device it is
1654 * writing to is congested. In other cases it is safe to throttle.
1655 */
1656static int current_may_throttle(void)
1657{
1658 return !(current->flags & PF_LESS_THROTTLE) ||
1659 current->backing_dev_info == NULL ||
1660 bdi_write_congested(current->backing_dev_info);
1661}
1662
91dcade4
MK
1663static bool inactive_reclaimable_pages(struct lruvec *lruvec,
1664 struct scan_control *sc, enum lru_list lru)
1665{
1666 int zid;
1667 struct zone *zone;
1668 int file = is_file_lru(lru);
1669 struct pglist_data *pgdat = lruvec_pgdat(lruvec);
1670
1671 if (!global_reclaim(sc))
1672 return true;
1673
1674 for (zid = sc->reclaim_idx; zid >= 0; zid--) {
1675 zone = &pgdat->node_zones[zid];
6aa303de 1676 if (!managed_zone(zone))
91dcade4
MK
1677 continue;
1678
1679 if (zone_page_state_snapshot(zone, NR_ZONE_LRU_BASE +
1680 LRU_FILE * file) >= SWAP_CLUSTER_MAX)
1681 return true;
1682 }
1683
1684 return false;
1685}
1686
1da177e4 1687/*
b2e18757 1688 * shrink_inactive_list() is a helper for shrink_node(). It returns the number
1742f19f 1689 * of reclaimed pages
1da177e4 1690 */
66635629 1691static noinline_for_stack unsigned long
1a93be0e 1692shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
9e3b2f8c 1693 struct scan_control *sc, enum lru_list lru)
1da177e4
LT
1694{
1695 LIST_HEAD(page_list);
e247dbce 1696 unsigned long nr_scanned;
05ff5137 1697 unsigned long nr_reclaimed = 0;
e247dbce 1698 unsigned long nr_taken;
8e950282
MG
1699 unsigned long nr_dirty = 0;
1700 unsigned long nr_congested = 0;
e2be15f6 1701 unsigned long nr_unqueued_dirty = 0;
92df3a72 1702 unsigned long nr_writeback = 0;
b1a6f21e 1703 unsigned long nr_immediate = 0;
f3fd4a61 1704 isolate_mode_t isolate_mode = 0;
3cb99451 1705 int file = is_file_lru(lru);
599d0c95 1706 struct pglist_data *pgdat = lruvec_pgdat(lruvec);
1a93be0e 1707 struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat;
78dc583d 1708
91dcade4
MK
1709 if (!inactive_reclaimable_pages(lruvec, sc, lru))
1710 return 0;
1711
599d0c95 1712 while (unlikely(too_many_isolated(pgdat, file, sc))) {
58355c78 1713 congestion_wait(BLK_RW_ASYNC, HZ/10);
35cd7815
RR
1714
1715 /* We are about to die and free our memory. Return now. */
1716 if (fatal_signal_pending(current))
1717 return SWAP_CLUSTER_MAX;
1718 }
1719
1da177e4 1720 lru_add_drain();
f80c0673
MK
1721
1722 if (!sc->may_unmap)
61317289 1723 isolate_mode |= ISOLATE_UNMAPPED;
f80c0673 1724 if (!sc->may_writepage)
61317289 1725 isolate_mode |= ISOLATE_CLEAN;
f80c0673 1726
599d0c95 1727 spin_lock_irq(&pgdat->lru_lock);
b35ea17b 1728
5dc35979
KK
1729 nr_taken = isolate_lru_pages(nr_to_scan, lruvec, &page_list,
1730 &nr_scanned, sc, isolate_mode, lru);
95d918fc 1731
599d0c95 1732 __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, nr_taken);
9d5e6a9f 1733 reclaim_stat->recent_scanned[file] += nr_taken;
95d918fc 1734
89b5fae5 1735 if (global_reclaim(sc)) {
599d0c95 1736 __mod_node_page_state(pgdat, NR_PAGES_SCANNED, nr_scanned);
e247dbce 1737 if (current_is_kswapd())
599d0c95 1738 __count_vm_events(PGSCAN_KSWAPD, nr_scanned);
e247dbce 1739 else
599d0c95 1740 __count_vm_events(PGSCAN_DIRECT, nr_scanned);
e247dbce 1741 }
599d0c95 1742 spin_unlock_irq(&pgdat->lru_lock);
b35ea17b 1743
d563c050 1744 if (nr_taken == 0)
66635629 1745 return 0;
5ad333eb 1746
599d0c95 1747 nr_reclaimed = shrink_page_list(&page_list, pgdat, sc, TTU_UNMAP,
8e950282
MG
1748 &nr_dirty, &nr_unqueued_dirty, &nr_congested,
1749 &nr_writeback, &nr_immediate,
1750 false);
c661b078 1751
599d0c95 1752 spin_lock_irq(&pgdat->lru_lock);
3f79768f 1753
904249aa
YH
1754 if (global_reclaim(sc)) {
1755 if (current_is_kswapd())
599d0c95 1756 __count_vm_events(PGSTEAL_KSWAPD, nr_reclaimed);
904249aa 1757 else
599d0c95 1758 __count_vm_events(PGSTEAL_DIRECT, nr_reclaimed);
904249aa 1759 }
a74609fa 1760
27ac81d8 1761 putback_inactive_pages(lruvec, &page_list);
3f79768f 1762
599d0c95 1763 __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, -nr_taken);
3f79768f 1764
599d0c95 1765 spin_unlock_irq(&pgdat->lru_lock);
3f79768f 1766
747db954 1767 mem_cgroup_uncharge_list(&page_list);
b745bc85 1768 free_hot_cold_page_list(&page_list, true);
e11da5b4 1769
92df3a72
MG
1770 /*
1771 * If reclaim is isolating dirty pages under writeback, it implies
1772 * that the long-lived page allocation rate is exceeding the page
1773 * laundering rate. Either the global limits are not being effective
1774 * at throttling processes due to the page distribution throughout
1775 * zones or there is heavy usage of a slow backing device. The
1776 * only option is to throttle from reclaim context which is not ideal
1777 * as there is no guarantee the dirtying process is throttled in the
1778 * same way balance_dirty_pages() manages.
1779 *
8e950282
MG
1780 * Once a zone is flagged ZONE_WRITEBACK, kswapd will count the number
1781 * of pages under pages flagged for immediate reclaim and stall if any
1782 * are encountered in the nr_immediate check below.
92df3a72 1783 */
918fc718 1784 if (nr_writeback && nr_writeback == nr_taken)
599d0c95 1785 set_bit(PGDAT_WRITEBACK, &pgdat->flags);
92df3a72 1786
d43006d5 1787 /*
97c9341f
TH
1788 * Legacy memcg will stall in page writeback so avoid forcibly
1789 * stalling here.
d43006d5 1790 */
97c9341f 1791 if (sane_reclaim(sc)) {
8e950282
MG
1792 /*
1793 * Tag a zone as congested if all the dirty pages scanned were
1794 * backed by a congested BDI and wait_iff_congested will stall.
1795 */
1796 if (nr_dirty && nr_dirty == nr_congested)
599d0c95 1797 set_bit(PGDAT_CONGESTED, &pgdat->flags);
8e950282 1798
b1a6f21e
MG
1799 /*
1800 * If dirty pages are scanned that are not queued for IO, it
1801 * implies that flushers are not keeping up. In this case, flag
599d0c95 1802 * the pgdat PGDAT_DIRTY and kswapd will start writing pages from
57054651 1803 * reclaim context.
b1a6f21e
MG
1804 */
1805 if (nr_unqueued_dirty == nr_taken)
599d0c95 1806 set_bit(PGDAT_DIRTY, &pgdat->flags);
b1a6f21e
MG
1807
1808 /*
b738d764
LT
1809 * If kswapd scans pages marked marked for immediate
1810 * reclaim and under writeback (nr_immediate), it implies
1811 * that pages are cycling through the LRU faster than
b1a6f21e
MG
1812 * they are written so also forcibly stall.
1813 */
b738d764 1814 if (nr_immediate && current_may_throttle())
b1a6f21e 1815 congestion_wait(BLK_RW_ASYNC, HZ/10);
e2be15f6 1816 }
d43006d5 1817
8e950282
MG
1818 /*
1819 * Stall direct reclaim for IO completions if underlying BDIs or zone
1820 * is congested. Allow kswapd to continue until it starts encountering
1821 * unqueued dirty pages or cycling through the LRU too quickly.
1822 */
399ba0b9
N
1823 if (!sc->hibernation_mode && !current_is_kswapd() &&
1824 current_may_throttle())
599d0c95 1825 wait_iff_congested(pgdat, BLK_RW_ASYNC, HZ/10);
8e950282 1826
599d0c95
MG
1827 trace_mm_vmscan_lru_shrink_inactive(pgdat->node_id,
1828 nr_scanned, nr_reclaimed,
ba5e9579 1829 sc->priority, file);
05ff5137 1830 return nr_reclaimed;
1da177e4
LT
1831}
1832
1833/*
1834 * This moves pages from the active list to the inactive list.
1835 *
1836 * We move them the other way if the page is referenced by one or more
1837 * processes, from rmap.
1838 *
1839 * If the pages are mostly unmapped, the processing is fast and it is
a52633d8 1840 * appropriate to hold zone_lru_lock across the whole operation. But if
1da177e4 1841 * the pages are mapped, the processing is slow (page_referenced()) so we
a52633d8 1842 * should drop zone_lru_lock around each page. It's impossible to balance
1da177e4
LT
1843 * this, so instead we remove the pages from the LRU while processing them.
1844 * It is safe to rely on PG_active against the non-LRU pages in here because
1845 * nobody will play with that bit on a non-LRU page.
1846 *
0139aa7b 1847 * The downside is that we have to touch page->_refcount against each page.
1da177e4
LT
1848 * But we had to alter page->flags anyway.
1849 */
1cfb419b 1850
fa9add64 1851static void move_active_pages_to_lru(struct lruvec *lruvec,
3eb4140f 1852 struct list_head *list,
2bcf8879 1853 struct list_head *pages_to_free,
3eb4140f
WF
1854 enum lru_list lru)
1855{
599d0c95 1856 struct pglist_data *pgdat = lruvec_pgdat(lruvec);
3eb4140f 1857 unsigned long pgmoved = 0;
3eb4140f 1858 struct page *page;
fa9add64 1859 int nr_pages;
3eb4140f 1860
3eb4140f
WF
1861 while (!list_empty(list)) {
1862 page = lru_to_page(list);
599d0c95 1863 lruvec = mem_cgroup_page_lruvec(page, pgdat);
3eb4140f 1864
309381fe 1865 VM_BUG_ON_PAGE(PageLRU(page), page);
3eb4140f
WF
1866 SetPageLRU(page);
1867
fa9add64 1868 nr_pages = hpage_nr_pages(page);
599d0c95 1869 update_lru_size(lruvec, lru, page_zonenum(page), nr_pages);
925b7673 1870 list_move(&page->lru, &lruvec->lists[lru]);
fa9add64 1871 pgmoved += nr_pages;
3eb4140f 1872
2bcf8879
HD
1873 if (put_page_testzero(page)) {
1874 __ClearPageLRU(page);
1875 __ClearPageActive(page);
fa9add64 1876 del_page_from_lru_list(page, lruvec, lru);
2bcf8879
HD
1877
1878 if (unlikely(PageCompound(page))) {
599d0c95 1879 spin_unlock_irq(&pgdat->lru_lock);
747db954 1880 mem_cgroup_uncharge(page);
2bcf8879 1881 (*get_compound_page_dtor(page))(page);
599d0c95 1882 spin_lock_irq(&pgdat->lru_lock);
2bcf8879
HD
1883 } else
1884 list_add(&page->lru, pages_to_free);
3eb4140f
WF
1885 }
1886 }
9d5e6a9f 1887
3eb4140f
WF
1888 if (!is_active_lru(lru))
1889 __count_vm_events(PGDEACTIVATE, pgmoved);
1890}
1cfb419b 1891
f626012d 1892static void shrink_active_list(unsigned long nr_to_scan,
1a93be0e 1893 struct lruvec *lruvec,
f16015fb 1894 struct scan_control *sc,
9e3b2f8c 1895 enum lru_list lru)
1da177e4 1896{
44c241f1 1897 unsigned long nr_taken;
f626012d 1898 unsigned long nr_scanned;
6fe6b7e3 1899 unsigned long vm_flags;
1da177e4 1900 LIST_HEAD(l_hold); /* The pages which were snipped off */
8cab4754 1901 LIST_HEAD(l_active);
b69408e8 1902 LIST_HEAD(l_inactive);
1da177e4 1903 struct page *page;
1a93be0e 1904 struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat;
44c241f1 1905 unsigned long nr_rotated = 0;
f3fd4a61 1906 isolate_mode_t isolate_mode = 0;
3cb99451 1907 int file = is_file_lru(lru);
599d0c95 1908 struct pglist_data *pgdat = lruvec_pgdat(lruvec);
1da177e4
LT
1909
1910 lru_add_drain();
f80c0673
MK
1911
1912 if (!sc->may_unmap)
61317289 1913 isolate_mode |= ISOLATE_UNMAPPED;
f80c0673 1914 if (!sc->may_writepage)
61317289 1915 isolate_mode |= ISOLATE_CLEAN;
f80c0673 1916
599d0c95 1917 spin_lock_irq(&pgdat->lru_lock);
925b7673 1918
5dc35979
KK
1919 nr_taken = isolate_lru_pages(nr_to_scan, lruvec, &l_hold,
1920 &nr_scanned, sc, isolate_mode, lru);
89b5fae5 1921
599d0c95 1922 __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, nr_taken);
b7c46d15 1923 reclaim_stat->recent_scanned[file] += nr_taken;
1cfb419b 1924
9d5e6a9f 1925 if (global_reclaim(sc))
599d0c95
MG
1926 __mod_node_page_state(pgdat, NR_PAGES_SCANNED, nr_scanned);
1927 __count_vm_events(PGREFILL, nr_scanned);
9d5e6a9f 1928
599d0c95 1929 spin_unlock_irq(&pgdat->lru_lock);
1da177e4 1930
1da177e4
LT
1931 while (!list_empty(&l_hold)) {
1932 cond_resched();
1933 page = lru_to_page(&l_hold);
1934 list_del(&page->lru);
7e9cd484 1935
39b5f29a 1936 if (unlikely(!page_evictable(page))) {
894bc310
LS
1937 putback_lru_page(page);
1938 continue;
1939 }
1940
cc715d99
MG
1941 if (unlikely(buffer_heads_over_limit)) {
1942 if (page_has_private(page) && trylock_page(page)) {
1943 if (page_has_private(page))
1944 try_to_release_page(page, 0);
1945 unlock_page(page);
1946 }
1947 }
1948
c3ac9a8a
JW
1949 if (page_referenced(page, 0, sc->target_mem_cgroup,
1950 &vm_flags)) {
9992af10 1951 nr_rotated += hpage_nr_pages(page);
8cab4754
WF
1952 /*
1953 * Identify referenced, file-backed active pages and
1954 * give them one more trip around the active list. So
1955 * that executable code get better chances to stay in
1956 * memory under moderate memory pressure. Anon pages
1957 * are not likely to be evicted by use-once streaming
1958 * IO, plus JVM can create lots of anon VM_EXEC pages,
1959 * so we ignore them here.
1960 */
41e20983 1961 if ((vm_flags & VM_EXEC) && page_is_file_cache(page)) {
8cab4754
WF
1962 list_add(&page->lru, &l_active);
1963 continue;
1964 }
1965 }
7e9cd484 1966
5205e56e 1967 ClearPageActive(page); /* we are de-activating */
1da177e4
LT
1968 list_add(&page->lru, &l_inactive);
1969 }
1970
b555749a 1971 /*
8cab4754 1972 * Move pages back to the lru list.
b555749a 1973 */
599d0c95 1974 spin_lock_irq(&pgdat->lru_lock);
556adecb 1975 /*
8cab4754
WF
1976 * Count referenced pages from currently used mappings as rotated,
1977 * even though only some of them are actually re-activated. This
1978 * helps balance scan pressure between file and anonymous pages in
7c0db9e9 1979 * get_scan_count.
7e9cd484 1980 */
b7c46d15 1981 reclaim_stat->recent_rotated[file] += nr_rotated;
556adecb 1982
fa9add64
HD
1983 move_active_pages_to_lru(lruvec, &l_active, &l_hold, lru);
1984 move_active_pages_to_lru(lruvec, &l_inactive, &l_hold, lru - LRU_ACTIVE);
599d0c95
MG
1985 __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, -nr_taken);
1986 spin_unlock_irq(&pgdat->lru_lock);
2bcf8879 1987
747db954 1988 mem_cgroup_uncharge_list(&l_hold);
b745bc85 1989 free_hot_cold_page_list(&l_hold, true);
1da177e4
LT
1990}
1991
59dc76b0
RR
1992/*
1993 * The inactive anon list should be small enough that the VM never has
1994 * to do too much work.
14797e23 1995 *
59dc76b0
RR
1996 * The inactive file list should be small enough to leave most memory
1997 * to the established workingset on the scan-resistant active list,
1998 * but large enough to avoid thrashing the aggregate readahead window.
56e49d21 1999 *
59dc76b0
RR
2000 * Both inactive lists should also be large enough that each inactive
2001 * page has a chance to be referenced again before it is reclaimed.
56e49d21 2002 *
59dc76b0
RR
2003 * The inactive_ratio is the target ratio of ACTIVE to INACTIVE pages
2004 * on this LRU, maintained by the pageout code. A zone->inactive_ratio
2005 * of 3 means 3:1 or 25% of the pages are kept on the inactive list.
56e49d21 2006 *
59dc76b0
RR
2007 * total target max
2008 * memory ratio inactive
2009 * -------------------------------------
2010 * 10MB 1 5MB
2011 * 100MB 1 50MB
2012 * 1GB 3 250MB
2013 * 10GB 10 0.9GB
2014 * 100GB 31 3GB
2015 * 1TB 101 10GB
2016 * 10TB 320 32GB
56e49d21 2017 */
f8d1a311
MG
2018static bool inactive_list_is_low(struct lruvec *lruvec, bool file,
2019 struct scan_control *sc)
56e49d21 2020{
59dc76b0 2021 unsigned long inactive_ratio;
e3790144
JW
2022 unsigned long inactive;
2023 unsigned long active;
59dc76b0 2024 unsigned long gb;
f8d1a311
MG
2025 struct pglist_data *pgdat = lruvec_pgdat(lruvec);
2026 int zid;
e3790144 2027
59dc76b0
RR
2028 /*
2029 * If we don't have swap space, anonymous page deactivation
2030 * is pointless.
2031 */
2032 if (!file && !total_swap_pages)
2033 return false;
56e49d21 2034
59dc76b0
RR
2035 inactive = lruvec_lru_size(lruvec, file * LRU_FILE);
2036 active = lruvec_lru_size(lruvec, file * LRU_FILE + LRU_ACTIVE);
56e49d21 2037
f8d1a311
MG
2038 /*
2039 * For zone-constrained allocations, it is necessary to check if
2040 * deactivations are required for lowmem to be reclaimed. This
2041 * calculates the inactive/active pages available in eligible zones.
2042 */
2043 for (zid = sc->reclaim_idx + 1; zid < MAX_NR_ZONES; zid++) {
2044 struct zone *zone = &pgdat->node_zones[zid];
2045 unsigned long inactive_zone, active_zone;
2046
6aa303de 2047 if (!managed_zone(zone))
f8d1a311
MG
2048 continue;
2049
2050 inactive_zone = zone_page_state(zone,
2051 NR_ZONE_LRU_BASE + (file * LRU_FILE));
2052 active_zone = zone_page_state(zone,
2053 NR_ZONE_LRU_BASE + (file * LRU_FILE) + LRU_ACTIVE);
2054
2055 inactive -= min(inactive, inactive_zone);
2056 active -= min(active, active_zone);
2057 }
2058
59dc76b0
RR
2059 gb = (inactive + active) >> (30 - PAGE_SHIFT);
2060 if (gb)
2061 inactive_ratio = int_sqrt(10 * gb);
b39415b2 2062 else
59dc76b0
RR
2063 inactive_ratio = 1;
2064
2065 return inactive * inactive_ratio < active;
b39415b2
RR
2066}
2067
4f98a2fe 2068static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan,
1a93be0e 2069 struct lruvec *lruvec, struct scan_control *sc)
b69408e8 2070{
b39415b2 2071 if (is_active_lru(lru)) {
f8d1a311 2072 if (inactive_list_is_low(lruvec, is_file_lru(lru), sc))
1a93be0e 2073 shrink_active_list(nr_to_scan, lruvec, sc, lru);
556adecb
RR
2074 return 0;
2075 }
2076
1a93be0e 2077 return shrink_inactive_list(nr_to_scan, lruvec, sc, lru);
4f98a2fe
RR
2078}
2079
9a265114
JW
2080enum scan_balance {
2081 SCAN_EQUAL,
2082 SCAN_FRACT,
2083 SCAN_ANON,
2084 SCAN_FILE,
2085};
2086
4f98a2fe
RR
2087/*
2088 * Determine how aggressively the anon and file LRU lists should be
2089 * scanned. The relative value of each set of LRU lists is determined
2090 * by looking at the fraction of the pages scanned we did rotate back
2091 * onto the active list instead of evict.
2092 *
be7bd59d
WL
2093 * nr[0] = anon inactive pages to scan; nr[1] = anon active pages to scan
2094 * nr[2] = file inactive pages to scan; nr[3] = file active pages to scan
4f98a2fe 2095 */
33377678 2096static void get_scan_count(struct lruvec *lruvec, struct mem_cgroup *memcg,
6b4f7799
JW
2097 struct scan_control *sc, unsigned long *nr,
2098 unsigned long *lru_pages)
4f98a2fe 2099{
33377678 2100 int swappiness = mem_cgroup_swappiness(memcg);
9a265114
JW
2101 struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat;
2102 u64 fraction[2];
2103 u64 denominator = 0; /* gcc */
599d0c95 2104 struct pglist_data *pgdat = lruvec_pgdat(lruvec);
4f98a2fe 2105 unsigned long anon_prio, file_prio;
9a265114 2106 enum scan_balance scan_balance;
0bf1457f 2107 unsigned long anon, file;
9a265114 2108 bool force_scan = false;
4f98a2fe 2109 unsigned long ap, fp;
4111304d 2110 enum lru_list lru;
6f04f48d
SS
2111 bool some_scanned;
2112 int pass;
246e87a9 2113
f11c0ca5
JW
2114 /*
2115 * If the zone or memcg is small, nr[l] can be 0. This
2116 * results in no scanning on this priority and a potential
2117 * priority drop. Global direct reclaim can go to the next
2118 * zone and tends to have no problems. Global kswapd is for
2119 * zone balancing and it needs to scan a minimum amount. When
2120 * reclaiming for a memcg, a priority drop can cause high
2121 * latencies, so it's better to scan a minimum amount there as
2122 * well.
2123 */
90cbc250 2124 if (current_is_kswapd()) {
599d0c95 2125 if (!pgdat_reclaimable(pgdat))
90cbc250 2126 force_scan = true;
eb01aaab 2127 if (!mem_cgroup_online(memcg))
90cbc250
VD
2128 force_scan = true;
2129 }
89b5fae5 2130 if (!global_reclaim(sc))
a4d3e9e7 2131 force_scan = true;
76a33fc3
SL
2132
2133 /* If we have no swap space, do not bother scanning anon pages. */
d8b38438 2134 if (!sc->may_swap || mem_cgroup_get_nr_swap_pages(memcg) <= 0) {
9a265114 2135 scan_balance = SCAN_FILE;
76a33fc3
SL
2136 goto out;
2137 }
4f98a2fe 2138
10316b31
JW
2139 /*
2140 * Global reclaim will swap to prevent OOM even with no
2141 * swappiness, but memcg users want to use this knob to
2142 * disable swapping for individual groups completely when
2143 * using the memory controller's swap limit feature would be
2144 * too expensive.
2145 */
02695175 2146 if (!global_reclaim(sc) && !swappiness) {
9a265114 2147 scan_balance = SCAN_FILE;
10316b31
JW
2148 goto out;
2149 }
2150
2151 /*
2152 * Do not apply any pressure balancing cleverness when the
2153 * system is close to OOM, scan both anon and file equally
2154 * (unless the swappiness setting disagrees with swapping).
2155 */
02695175 2156 if (!sc->priority && swappiness) {
9a265114 2157 scan_balance = SCAN_EQUAL;
10316b31
JW
2158 goto out;
2159 }
2160
62376251
JW
2161 /*
2162 * Prevent the reclaimer from falling into the cache trap: as
2163 * cache pages start out inactive, every cache fault will tip
2164 * the scan balance towards the file LRU. And as the file LRU
2165 * shrinks, so does the window for rotation from references.
2166 * This means we have a runaway feedback loop where a tiny
2167 * thrashing file LRU becomes infinitely more attractive than
2168 * anon pages. Try to detect this based on file LRU size.
2169 */
2170 if (global_reclaim(sc)) {
599d0c95
MG
2171 unsigned long pgdatfile;
2172 unsigned long pgdatfree;
2173 int z;
2174 unsigned long total_high_wmark = 0;
2ab051e1 2175
599d0c95
MG
2176 pgdatfree = sum_zone_node_page_state(pgdat->node_id, NR_FREE_PAGES);
2177 pgdatfile = node_page_state(pgdat, NR_ACTIVE_FILE) +
2178 node_page_state(pgdat, NR_INACTIVE_FILE);
2179
2180 for (z = 0; z < MAX_NR_ZONES; z++) {
2181 struct zone *zone = &pgdat->node_zones[z];
6aa303de 2182 if (!managed_zone(zone))
599d0c95
MG
2183 continue;
2184
2185 total_high_wmark += high_wmark_pages(zone);
2186 }
62376251 2187
599d0c95 2188 if (unlikely(pgdatfile + pgdatfree <= total_high_wmark)) {
62376251
JW
2189 scan_balance = SCAN_ANON;
2190 goto out;
2191 }
2192 }
2193
7c5bd705 2194 /*
316bda0e
VD
2195 * If there is enough inactive page cache, i.e. if the size of the
2196 * inactive list is greater than that of the active list *and* the
2197 * inactive list actually has some pages to scan on this priority, we
2198 * do not reclaim anything from the anonymous working set right now.
2199 * Without the second condition we could end up never scanning an
2200 * lruvec even if it has plenty of old anonymous pages unless the
2201 * system is under heavy pressure.
7c5bd705 2202 */
f8d1a311 2203 if (!inactive_list_is_low(lruvec, true, sc) &&
23047a96 2204 lruvec_lru_size(lruvec, LRU_INACTIVE_FILE) >> sc->priority) {
9a265114 2205 scan_balance = SCAN_FILE;
7c5bd705
JW
2206 goto out;
2207 }
2208
9a265114
JW
2209 scan_balance = SCAN_FRACT;
2210
58c37f6e
KM
2211 /*
2212 * With swappiness at 100, anonymous and file have the same priority.
2213 * This scanning priority is essentially the inverse of IO cost.
2214 */
02695175 2215 anon_prio = swappiness;
75b00af7 2216 file_prio = 200 - anon_prio;
58c37f6e 2217
4f98a2fe
RR
2218 /*
2219 * OK, so we have swap space and a fair amount of page cache
2220 * pages. We use the recently rotated / recently scanned
2221 * ratios to determine how valuable each cache is.
2222 *
2223 * Because workloads change over time (and to avoid overflow)
2224 * we keep these statistics as a floating average, which ends
2225 * up weighing recent references more than old ones.
2226 *
2227 * anon in [0], file in [1]
2228 */
2ab051e1 2229
23047a96
JW
2230 anon = lruvec_lru_size(lruvec, LRU_ACTIVE_ANON) +
2231 lruvec_lru_size(lruvec, LRU_INACTIVE_ANON);
2232 file = lruvec_lru_size(lruvec, LRU_ACTIVE_FILE) +
2233 lruvec_lru_size(lruvec, LRU_INACTIVE_FILE);
2ab051e1 2234
599d0c95 2235 spin_lock_irq(&pgdat->lru_lock);
6e901571 2236 if (unlikely(reclaim_stat->recent_scanned[0] > anon / 4)) {
6e901571
KM
2237 reclaim_stat->recent_scanned[0] /= 2;
2238 reclaim_stat->recent_rotated[0] /= 2;
4f98a2fe
RR
2239 }
2240
6e901571 2241 if (unlikely(reclaim_stat->recent_scanned[1] > file / 4)) {
6e901571
KM
2242 reclaim_stat->recent_scanned[1] /= 2;
2243 reclaim_stat->recent_rotated[1] /= 2;
4f98a2fe
RR
2244 }
2245
4f98a2fe 2246 /*
00d8089c
RR
2247 * The amount of pressure on anon vs file pages is inversely
2248 * proportional to the fraction of recently scanned pages on
2249 * each list that were recently referenced and in active use.
4f98a2fe 2250 */
fe35004f 2251 ap = anon_prio * (reclaim_stat->recent_scanned[0] + 1);
6e901571 2252 ap /= reclaim_stat->recent_rotated[0] + 1;
4f98a2fe 2253
fe35004f 2254 fp = file_prio * (reclaim_stat->recent_scanned[1] + 1);
6e901571 2255 fp /= reclaim_stat->recent_rotated[1] + 1;
599d0c95 2256 spin_unlock_irq(&pgdat->lru_lock);
4f98a2fe 2257
76a33fc3
SL
2258 fraction[0] = ap;
2259 fraction[1] = fp;
2260 denominator = ap + fp + 1;
2261out:
6f04f48d
SS
2262 some_scanned = false;
2263 /* Only use force_scan on second pass. */
2264 for (pass = 0; !some_scanned && pass < 2; pass++) {
6b4f7799 2265 *lru_pages = 0;
6f04f48d
SS
2266 for_each_evictable_lru(lru) {
2267 int file = is_file_lru(lru);
2268 unsigned long size;
2269 unsigned long scan;
6e08a369 2270
23047a96 2271 size = lruvec_lru_size(lruvec, lru);
6f04f48d 2272 scan = size >> sc->priority;
9a265114 2273
6f04f48d
SS
2274 if (!scan && pass && force_scan)
2275 scan = min(size, SWAP_CLUSTER_MAX);
9a265114 2276
6f04f48d
SS
2277 switch (scan_balance) {
2278 case SCAN_EQUAL:
2279 /* Scan lists relative to size */
2280 break;
2281 case SCAN_FRACT:
2282 /*
2283 * Scan types proportional to swappiness and
2284 * their relative recent reclaim efficiency.
2285 */
2286 scan = div64_u64(scan * fraction[file],
2287 denominator);
2288 break;
2289 case SCAN_FILE:
2290 case SCAN_ANON:
2291 /* Scan one type exclusively */
6b4f7799
JW
2292 if ((scan_balance == SCAN_FILE) != file) {
2293 size = 0;
6f04f48d 2294 scan = 0;
6b4f7799 2295 }
6f04f48d
SS
2296 break;
2297 default:
2298 /* Look ma, no brain */
2299 BUG();
2300 }
6b4f7799
JW
2301
2302 *lru_pages += size;
6f04f48d 2303 nr[lru] = scan;
6b4f7799 2304
9a265114 2305 /*
6f04f48d
SS
2306 * Skip the second pass and don't force_scan,
2307 * if we found something to scan.
9a265114 2308 */
6f04f48d 2309 some_scanned |= !!scan;
9a265114 2310 }
76a33fc3 2311 }
6e08a369 2312}
4f98a2fe 2313
9b4f98cd 2314/*
a9dd0a83 2315 * This is a basic per-node page freer. Used by both kswapd and direct reclaim.
9b4f98cd 2316 */
a9dd0a83 2317static void shrink_node_memcg(struct pglist_data *pgdat, struct mem_cgroup *memcg,
33377678 2318 struct scan_control *sc, unsigned long *lru_pages)
9b4f98cd 2319{
ef8f2327 2320 struct lruvec *lruvec = mem_cgroup_lruvec(pgdat, memcg);
9b4f98cd 2321 unsigned long nr[NR_LRU_LISTS];
e82e0561 2322 unsigned long targets[NR_LRU_LISTS];
9b4f98cd
JW
2323 unsigned long nr_to_scan;
2324 enum lru_list lru;
2325 unsigned long nr_reclaimed = 0;
2326 unsigned long nr_to_reclaim = sc->nr_to_reclaim;
2327 struct blk_plug plug;
1a501907 2328 bool scan_adjusted;
9b4f98cd 2329
33377678 2330 get_scan_count(lruvec, memcg, sc, nr, lru_pages);
9b4f98cd 2331
e82e0561
MG
2332 /* Record the original scan target for proportional adjustments later */
2333 memcpy(targets, nr, sizeof(nr));
2334
1a501907
MG
2335 /*
2336 * Global reclaiming within direct reclaim at DEF_PRIORITY is a normal
2337 * event that can occur when there is little memory pressure e.g.
2338 * multiple streaming readers/writers. Hence, we do not abort scanning
2339 * when the requested number of pages are reclaimed when scanning at
2340 * DEF_PRIORITY on the assumption that the fact we are direct
2341 * reclaiming implies that kswapd is not keeping up and it is best to
2342 * do a batch of work at once. For memcg reclaim one check is made to
2343 * abort proportional reclaim if either the file or anon lru has already
2344 * dropped to zero at the first pass.
2345 */
2346 scan_adjusted = (global_reclaim(sc) && !current_is_kswapd() &&
2347 sc->priority == DEF_PRIORITY);
2348
9b4f98cd
JW
2349 blk_start_plug(&plug);
2350 while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] ||
2351 nr[LRU_INACTIVE_FILE]) {
e82e0561
MG
2352 unsigned long nr_anon, nr_file, percentage;
2353 unsigned long nr_scanned;
2354
9b4f98cd
JW
2355 for_each_evictable_lru(lru) {
2356 if (nr[lru]) {
2357 nr_to_scan = min(nr[lru], SWAP_CLUSTER_MAX);
2358 nr[lru] -= nr_to_scan;
2359
2360 nr_reclaimed += shrink_list(lru, nr_to_scan,
2361 lruvec, sc);
2362 }
2363 }
e82e0561 2364
bd041733
MH
2365 cond_resched();
2366
e82e0561
MG
2367 if (nr_reclaimed < nr_to_reclaim || scan_adjusted)
2368 continue;
2369
e82e0561
MG
2370 /*
2371 * For kswapd and memcg, reclaim at least the number of pages
1a501907 2372 * requested. Ensure that the anon and file LRUs are scanned
e82e0561
MG
2373 * proportionally what was requested by get_scan_count(). We
2374 * stop reclaiming one LRU and reduce the amount scanning
2375 * proportional to the original scan target.
2376 */
2377 nr_file = nr[LRU_INACTIVE_FILE] + nr[LRU_ACTIVE_FILE];
2378 nr_anon = nr[LRU_INACTIVE_ANON] + nr[LRU_ACTIVE_ANON];
2379
1a501907
MG
2380 /*
2381 * It's just vindictive to attack the larger once the smaller
2382 * has gone to zero. And given the way we stop scanning the
2383 * smaller below, this makes sure that we only make one nudge
2384 * towards proportionality once we've got nr_to_reclaim.
2385 */
2386 if (!nr_file || !nr_anon)
2387 break;
2388
e82e0561
MG
2389 if (nr_file > nr_anon) {
2390 unsigned long scan_target = targets[LRU_INACTIVE_ANON] +
2391 targets[LRU_ACTIVE_ANON] + 1;
2392 lru = LRU_BASE;
2393 percentage = nr_anon * 100 / scan_target;
2394 } else {
2395 unsigned long scan_target = targets[LRU_INACTIVE_FILE] +
2396 targets[LRU_ACTIVE_FILE] + 1;
2397 lru = LRU_FILE;
2398 percentage = nr_file * 100 / scan_target;
2399 }
2400
2401 /* Stop scanning the smaller of the LRU */
2402 nr[lru] = 0;
2403 nr[lru + LRU_ACTIVE] = 0;
2404
2405 /*
2406 * Recalculate the other LRU scan count based on its original
2407 * scan target and the percentage scanning already complete
2408 */
2409 lru = (lru == LRU_FILE) ? LRU_BASE : LRU_FILE;
2410 nr_scanned = targets[lru] - nr[lru];
2411 nr[lru] = targets[lru] * (100 - percentage) / 100;
2412 nr[lru] -= min(nr[lru], nr_scanned);
2413
2414 lru += LRU_ACTIVE;
2415 nr_scanned = targets[lru] - nr[lru];
2416 nr[lru] = targets[lru] * (100 - percentage) / 100;
2417 nr[lru] -= min(nr[lru], nr_scanned);
2418
2419 scan_adjusted = true;
9b4f98cd
JW
2420 }
2421 blk_finish_plug(&plug);
2422 sc->nr_reclaimed += nr_reclaimed;
2423
2424 /*
2425 * Even if we did not try to evict anon pages at all, we want to
2426 * rebalance the anon lru active/inactive ratio.
2427 */
f8d1a311 2428 if (inactive_list_is_low(lruvec, false, sc))
9b4f98cd
JW
2429 shrink_active_list(SWAP_CLUSTER_MAX, lruvec,
2430 sc, LRU_ACTIVE_ANON);
9b4f98cd
JW
2431}
2432
23b9da55 2433/* Use reclaim/compaction for costly allocs or under memory pressure */
9e3b2f8c 2434static bool in_reclaim_compaction(struct scan_control *sc)
23b9da55 2435{
d84da3f9 2436 if (IS_ENABLED(CONFIG_COMPACTION) && sc->order &&
23b9da55 2437 (sc->order > PAGE_ALLOC_COSTLY_ORDER ||
9e3b2f8c 2438 sc->priority < DEF_PRIORITY - 2))
23b9da55
MG
2439 return true;
2440
2441 return false;
2442}
2443
3e7d3449 2444/*
23b9da55
MG
2445 * Reclaim/compaction is used for high-order allocation requests. It reclaims
2446 * order-0 pages before compacting the zone. should_continue_reclaim() returns
2447 * true if more pages should be reclaimed such that when the page allocator
2448 * calls try_to_compact_zone() that it will have enough free pages to succeed.
2449 * It will give up earlier than that if there is difficulty reclaiming pages.
3e7d3449 2450 */
a9dd0a83 2451static inline bool should_continue_reclaim(struct pglist_data *pgdat,
3e7d3449
MG
2452 unsigned long nr_reclaimed,
2453 unsigned long nr_scanned,
2454 struct scan_control *sc)
2455{
2456 unsigned long pages_for_compaction;
2457 unsigned long inactive_lru_pages;
a9dd0a83 2458 int z;
3e7d3449
MG
2459
2460 /* If not in reclaim/compaction mode, stop */
9e3b2f8c 2461 if (!in_reclaim_compaction(sc))
3e7d3449
MG
2462 return false;
2463
2876592f
MG
2464 /* Consider stopping depending on scan and reclaim activity */
2465 if (sc->gfp_mask & __GFP_REPEAT) {
2466 /*
2467 * For __GFP_REPEAT allocations, stop reclaiming if the
2468 * full LRU list has been scanned and we are still failing
2469 * to reclaim pages. This full LRU scan is potentially
2470 * expensive but a __GFP_REPEAT caller really wants to succeed
2471 */
2472 if (!nr_reclaimed && !nr_scanned)
2473 return false;
2474 } else {
2475 /*
2476 * For non-__GFP_REPEAT allocations which can presumably
2477 * fail without consequence, stop if we failed to reclaim
2478 * any pages from the last SWAP_CLUSTER_MAX number of
2479 * pages that were scanned. This will return to the
2480 * caller faster at the risk reclaim/compaction and
2481 * the resulting allocation attempt fails
2482 */
2483 if (!nr_reclaimed)
2484 return false;
2485 }
3e7d3449
MG
2486
2487 /*
2488 * If we have not reclaimed enough pages for compaction and the
2489 * inactive lists are large enough, continue reclaiming
2490 */
9861a62c 2491 pages_for_compaction = compact_gap(sc->order);
a9dd0a83 2492 inactive_lru_pages = node_page_state(pgdat, NR_INACTIVE_FILE);
ec8acf20 2493 if (get_nr_swap_pages() > 0)
a9dd0a83 2494 inactive_lru_pages += node_page_state(pgdat, NR_INACTIVE_ANON);
3e7d3449
MG
2495 if (sc->nr_reclaimed < pages_for_compaction &&
2496 inactive_lru_pages > pages_for_compaction)
2497 return true;
2498
2499 /* If compaction would go ahead or the allocation would succeed, stop */
a9dd0a83
MG
2500 for (z = 0; z <= sc->reclaim_idx; z++) {
2501 struct zone *zone = &pgdat->node_zones[z];
6aa303de 2502 if (!managed_zone(zone))
a9dd0a83
MG
2503 continue;
2504
2505 switch (compaction_suitable(zone, sc->order, 0, sc->reclaim_idx)) {
cf378319 2506 case COMPACT_SUCCESS:
a9dd0a83
MG
2507 case COMPACT_CONTINUE:
2508 return false;
2509 default:
2510 /* check next zone */
2511 ;
2512 }
3e7d3449 2513 }
a9dd0a83 2514 return true;
3e7d3449
MG
2515}
2516
970a39a3 2517static bool shrink_node(pg_data_t *pgdat, struct scan_control *sc)
1da177e4 2518{
cb731d6c 2519 struct reclaim_state *reclaim_state = current->reclaim_state;
f0fdc5e8 2520 unsigned long nr_reclaimed, nr_scanned;
2344d7e4 2521 bool reclaimable = false;
1da177e4 2522
9b4f98cd
JW
2523 do {
2524 struct mem_cgroup *root = sc->target_mem_cgroup;
2525 struct mem_cgroup_reclaim_cookie reclaim = {
ef8f2327 2526 .pgdat = pgdat,
9b4f98cd
JW
2527 .priority = sc->priority,
2528 };
a9dd0a83 2529 unsigned long node_lru_pages = 0;
694fbc0f 2530 struct mem_cgroup *memcg;
3e7d3449 2531
9b4f98cd
JW
2532 nr_reclaimed = sc->nr_reclaimed;
2533 nr_scanned = sc->nr_scanned;
1da177e4 2534
694fbc0f
AM
2535 memcg = mem_cgroup_iter(root, NULL, &reclaim);
2536 do {
6b4f7799 2537 unsigned long lru_pages;
8e8ae645 2538 unsigned long reclaimed;
cb731d6c 2539 unsigned long scanned;
5660048c 2540
241994ed
JW
2541 if (mem_cgroup_low(root, memcg)) {
2542 if (!sc->may_thrash)
2543 continue;
2544 mem_cgroup_events(memcg, MEMCG_LOW, 1);
2545 }
2546
8e8ae645 2547 reclaimed = sc->nr_reclaimed;
cb731d6c 2548 scanned = sc->nr_scanned;
f9be23d6 2549
a9dd0a83
MG
2550 shrink_node_memcg(pgdat, memcg, sc, &lru_pages);
2551 node_lru_pages += lru_pages;
f16015fb 2552
b5afba29 2553 if (memcg)
a9dd0a83 2554 shrink_slab(sc->gfp_mask, pgdat->node_id,
cb731d6c
VD
2555 memcg, sc->nr_scanned - scanned,
2556 lru_pages);
2557
8e8ae645
JW
2558 /* Record the group's reclaim efficiency */
2559 vmpressure(sc->gfp_mask, memcg, false,
2560 sc->nr_scanned - scanned,
2561 sc->nr_reclaimed - reclaimed);
2562
9b4f98cd 2563 /*
a394cb8e
MH
2564 * Direct reclaim and kswapd have to scan all memory
2565 * cgroups to fulfill the overall scan target for the
a9dd0a83 2566 * node.
a394cb8e
MH
2567 *
2568 * Limit reclaim, on the other hand, only cares about
2569 * nr_to_reclaim pages to be reclaimed and it will
2570 * retry with decreasing priority if one round over the
2571 * whole hierarchy is not sufficient.
9b4f98cd 2572 */
a394cb8e
MH
2573 if (!global_reclaim(sc) &&
2574 sc->nr_reclaimed >= sc->nr_to_reclaim) {
9b4f98cd
JW
2575 mem_cgroup_iter_break(root, memcg);
2576 break;
2577 }
241994ed 2578 } while ((memcg = mem_cgroup_iter(root, memcg, &reclaim)));
70ddf637 2579
6b4f7799
JW
2580 /*
2581 * Shrink the slab caches in the same proportion that
2582 * the eligible LRU pages were scanned.
2583 */
b2e18757 2584 if (global_reclaim(sc))
a9dd0a83 2585 shrink_slab(sc->gfp_mask, pgdat->node_id, NULL,
cb731d6c 2586 sc->nr_scanned - nr_scanned,
a9dd0a83 2587 node_lru_pages);
cb731d6c
VD
2588
2589 if (reclaim_state) {
2590 sc->nr_reclaimed += reclaim_state->reclaimed_slab;
2591 reclaim_state->reclaimed_slab = 0;
6b4f7799
JW
2592 }
2593
8e8ae645
JW
2594 /* Record the subtree's reclaim efficiency */
2595 vmpressure(sc->gfp_mask, sc->target_mem_cgroup, true,
70ddf637
AV
2596 sc->nr_scanned - nr_scanned,
2597 sc->nr_reclaimed - nr_reclaimed);
2598
2344d7e4
JW
2599 if (sc->nr_reclaimed - nr_reclaimed)
2600 reclaimable = true;
2601
a9dd0a83 2602 } while (should_continue_reclaim(pgdat, sc->nr_reclaimed - nr_reclaimed,
9b4f98cd 2603 sc->nr_scanned - nr_scanned, sc));
2344d7e4
JW
2604
2605 return reclaimable;
f16015fb
JW
2606}
2607
53853e2d 2608/*
fdd4c614
VB
2609 * Returns true if compaction should go ahead for a costly-order request, or
2610 * the allocation would already succeed without compaction. Return false if we
2611 * should reclaim first.
53853e2d 2612 */
4f588331 2613static inline bool compaction_ready(struct zone *zone, struct scan_control *sc)
fe4b1b24 2614{
31483b6a 2615 unsigned long watermark;
fdd4c614 2616 enum compact_result suitable;
fe4b1b24 2617
fdd4c614
VB
2618 suitable = compaction_suitable(zone, sc->order, 0, sc->reclaim_idx);
2619 if (suitable == COMPACT_SUCCESS)
2620 /* Allocation should succeed already. Don't reclaim. */
2621 return true;
2622 if (suitable == COMPACT_SKIPPED)
2623 /* Compaction cannot yet proceed. Do reclaim. */
2624 return false;
fe4b1b24 2625
53853e2d 2626 /*
fdd4c614
VB
2627 * Compaction is already possible, but it takes time to run and there
2628 * are potentially other callers using the pages just freed. So proceed
2629 * with reclaim to make a buffer of free pages available to give
2630 * compaction a reasonable chance of completing and allocating the page.
2631 * Note that we won't actually reclaim the whole buffer in one attempt
2632 * as the target watermark in should_continue_reclaim() is lower. But if
2633 * we are already above the high+gap watermark, don't reclaim at all.
53853e2d 2634 */
fdd4c614 2635 watermark = high_wmark_pages(zone) + compact_gap(sc->order);
fe4b1b24 2636
fdd4c614 2637 return zone_watermark_ok_safe(zone, 0, watermark, sc->reclaim_idx);
fe4b1b24
MG
2638}
2639
1da177e4
LT
2640/*
2641 * This is the direct reclaim path, for page-allocating processes. We only
2642 * try to reclaim pages from zones which will satisfy the caller's allocation
2643 * request.
2644 *
1da177e4
LT
2645 * If a zone is deemed to be full of pinned pages then just give it a light
2646 * scan then give up on it.
2647 */
0a0337e0 2648static void shrink_zones(struct zonelist *zonelist, struct scan_control *sc)
1da177e4 2649{
dd1a239f 2650 struct zoneref *z;
54a6eb5c 2651 struct zone *zone;
0608f43d
AM
2652 unsigned long nr_soft_reclaimed;
2653 unsigned long nr_soft_scanned;
619d0d76 2654 gfp_t orig_mask;
79dafcdc 2655 pg_data_t *last_pgdat = NULL;
1cfb419b 2656
cc715d99
MG
2657 /*
2658 * If the number of buffer_heads in the machine exceeds the maximum
2659 * allowed level, force direct reclaim to scan the highmem zone as
2660 * highmem pages could be pinning lowmem pages storing buffer_heads
2661 */
619d0d76 2662 orig_mask = sc->gfp_mask;
b2e18757 2663 if (buffer_heads_over_limit) {
cc715d99 2664 sc->gfp_mask |= __GFP_HIGHMEM;
4f588331 2665 sc->reclaim_idx = gfp_zone(sc->gfp_mask);
b2e18757 2666 }
cc715d99 2667
d4debc66 2668 for_each_zone_zonelist_nodemask(zone, z, zonelist,
b2e18757 2669 sc->reclaim_idx, sc->nodemask) {
1cfb419b
KH
2670 /*
2671 * Take care memory controller reclaiming has small influence
2672 * to global LRU.
2673 */
89b5fae5 2674 if (global_reclaim(sc)) {
344736f2
VD
2675 if (!cpuset_zone_allowed(zone,
2676 GFP_KERNEL | __GFP_HARDWALL))
1cfb419b 2677 continue;
65ec02cb 2678
6e543d57 2679 if (sc->priority != DEF_PRIORITY &&
599d0c95 2680 !pgdat_reclaimable(zone->zone_pgdat))
1cfb419b 2681 continue; /* Let kswapd poll it */
0b06496a
JW
2682
2683 /*
2684 * If we already have plenty of memory free for
2685 * compaction in this zone, don't free any more.
2686 * Even though compaction is invoked for any
2687 * non-zero order, only frequent costly order
2688 * reclamation is disruptive enough to become a
2689 * noticeable problem, like transparent huge
2690 * page allocations.
2691 */
2692 if (IS_ENABLED(CONFIG_COMPACTION) &&
2693 sc->order > PAGE_ALLOC_COSTLY_ORDER &&
4f588331 2694 compaction_ready(zone, sc)) {
0b06496a
JW
2695 sc->compaction_ready = true;
2696 continue;
e0887c19 2697 }
0b06496a 2698
79dafcdc
MG
2699 /*
2700 * Shrink each node in the zonelist once. If the
2701 * zonelist is ordered by zone (not the default) then a
2702 * node may be shrunk multiple times but in that case
2703 * the user prefers lower zones being preserved.
2704 */
2705 if (zone->zone_pgdat == last_pgdat)
2706 continue;
2707
0608f43d
AM
2708 /*
2709 * This steals pages from memory cgroups over softlimit
2710 * and returns the number of reclaimed pages and
2711 * scanned pages. This works for global memory pressure
2712 * and balancing, not for a memcg's limit.
2713 */
2714 nr_soft_scanned = 0;
ef8f2327 2715 nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(zone->zone_pgdat,
0608f43d
AM
2716 sc->order, sc->gfp_mask,
2717 &nr_soft_scanned);
2718 sc->nr_reclaimed += nr_soft_reclaimed;
2719 sc->nr_scanned += nr_soft_scanned;
ac34a1a3 2720 /* need some check for avoid more shrink_zone() */
1cfb419b 2721 }
408d8544 2722
79dafcdc
MG
2723 /* See comment about same check for global reclaim above */
2724 if (zone->zone_pgdat == last_pgdat)
2725 continue;
2726 last_pgdat = zone->zone_pgdat;
970a39a3 2727 shrink_node(zone->zone_pgdat, sc);
1da177e4 2728 }
e0c23279 2729
619d0d76
WY
2730 /*
2731 * Restore to original mask to avoid the impact on the caller if we
2732 * promoted it to __GFP_HIGHMEM.
2733 */
2734 sc->gfp_mask = orig_mask;
1da177e4 2735}
4f98a2fe 2736
1da177e4
LT
2737/*
2738 * This is the main entry point to direct page reclaim.
2739 *
2740 * If a full scan of the inactive list fails to free enough memory then we
2741 * are "out of memory" and something needs to be killed.
2742 *
2743 * If the caller is !__GFP_FS then the probability of a failure is reasonably
2744 * high - the zone may be full of dirty or under-writeback pages, which this
5b0830cb
JA
2745 * caller can't do much about. We kick the writeback threads and take explicit
2746 * naps in the hope that some of these pages can be written. But if the
2747 * allocating task holds filesystem locks which prevent writeout this might not
2748 * work, and the allocation attempt will fail.
a41f24ea
NA
2749 *
2750 * returns: 0, if no pages reclaimed
2751 * else, the number of pages reclaimed
1da177e4 2752 */
dac1d27b 2753static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
3115cd91 2754 struct scan_control *sc)
1da177e4 2755{
241994ed 2756 int initial_priority = sc->priority;
69e05944 2757 unsigned long total_scanned = 0;
22fba335 2758 unsigned long writeback_threshold;
241994ed 2759retry:
873b4771
KK
2760 delayacct_freepages_start();
2761
89b5fae5 2762 if (global_reclaim(sc))
7cc30fcf 2763 __count_zid_vm_events(ALLOCSTALL, sc->reclaim_idx, 1);
1da177e4 2764
9e3b2f8c 2765 do {
70ddf637
AV
2766 vmpressure_prio(sc->gfp_mask, sc->target_mem_cgroup,
2767 sc->priority);
66e1707b 2768 sc->nr_scanned = 0;
0a0337e0 2769 shrink_zones(zonelist, sc);
c6a8a8c5 2770
66e1707b 2771 total_scanned += sc->nr_scanned;
bb21c7ce 2772 if (sc->nr_reclaimed >= sc->nr_to_reclaim)
0b06496a
JW
2773 break;
2774
2775 if (sc->compaction_ready)
2776 break;
1da177e4 2777
0e50ce3b
MK
2778 /*
2779 * If we're getting trouble reclaiming, start doing
2780 * writepage even in laptop mode.
2781 */
2782 if (sc->priority < DEF_PRIORITY - 2)
2783 sc->may_writepage = 1;
2784
1da177e4
LT
2785 /*
2786 * Try to write back as many pages as we just scanned. This
2787 * tends to cause slow streaming writers to write data to the
2788 * disk smoothly, at the dirtying rate, which is nice. But
2789 * that's undesirable in laptop mode, where we *want* lumpy
2790 * writeout. So in laptop mode, write out the whole world.
2791 */
22fba335
KM
2792 writeback_threshold = sc->nr_to_reclaim + sc->nr_to_reclaim / 2;
2793 if (total_scanned > writeback_threshold) {
0e175a18
CW
2794 wakeup_flusher_threads(laptop_mode ? 0 : total_scanned,
2795 WB_REASON_TRY_TO_FREE_PAGES);
66e1707b 2796 sc->may_writepage = 1;
1da177e4 2797 }
0b06496a 2798 } while (--sc->priority >= 0);
bb21c7ce 2799
873b4771
KK
2800 delayacct_freepages_end();
2801
bb21c7ce
KM
2802 if (sc->nr_reclaimed)
2803 return sc->nr_reclaimed;
2804
0cee34fd 2805 /* Aborted reclaim to try compaction? don't OOM, then */
0b06496a 2806 if (sc->compaction_ready)
7335084d
MG
2807 return 1;
2808
241994ed
JW
2809 /* Untapped cgroup reserves? Don't OOM, retry. */
2810 if (!sc->may_thrash) {
2811 sc->priority = initial_priority;
2812 sc->may_thrash = 1;
2813 goto retry;
2814 }
2815
bb21c7ce 2816 return 0;
1da177e4
LT
2817}
2818
5515061d
MG
2819static bool pfmemalloc_watermark_ok(pg_data_t *pgdat)
2820{
2821 struct zone *zone;
2822 unsigned long pfmemalloc_reserve = 0;
2823 unsigned long free_pages = 0;
2824 int i;
2825 bool wmark_ok;
2826
2827 for (i = 0; i <= ZONE_NORMAL; i++) {
2828 zone = &pgdat->node_zones[i];
6aa303de 2829 if (!managed_zone(zone) ||
599d0c95 2830 pgdat_reclaimable_pages(pgdat) == 0)
675becce
MG
2831 continue;
2832
5515061d
MG
2833 pfmemalloc_reserve += min_wmark_pages(zone);
2834 free_pages += zone_page_state(zone, NR_FREE_PAGES);
2835 }
2836
675becce
MG
2837 /* If there are no reserves (unexpected config) then do not throttle */
2838 if (!pfmemalloc_reserve)
2839 return true;
2840
5515061d
MG
2841 wmark_ok = free_pages > pfmemalloc_reserve / 2;
2842
2843 /* kswapd must be awake if processes are being throttled */
2844 if (!wmark_ok && waitqueue_active(&pgdat->kswapd_wait)) {
38087d9b 2845 pgdat->kswapd_classzone_idx = min(pgdat->kswapd_classzone_idx,
5515061d
MG
2846 (enum zone_type)ZONE_NORMAL);
2847 wake_up_interruptible(&pgdat->kswapd_wait);
2848 }
2849
2850 return wmark_ok;
2851}
2852
2853/*
2854 * Throttle direct reclaimers if backing storage is backed by the network
2855 * and the PFMEMALLOC reserve for the preferred node is getting dangerously
2856 * depleted. kswapd will continue to make progress and wake the processes
50694c28
MG
2857 * when the low watermark is reached.
2858 *
2859 * Returns true if a fatal signal was delivered during throttling. If this
2860 * happens, the page allocator should not consider triggering the OOM killer.
5515061d 2861 */
50694c28 2862static bool throttle_direct_reclaim(gfp_t gfp_mask, struct zonelist *zonelist,
5515061d
MG
2863 nodemask_t *nodemask)
2864{
675becce 2865 struct zoneref *z;
5515061d 2866 struct zone *zone;
675becce 2867 pg_data_t *pgdat = NULL;
5515061d
MG
2868
2869 /*
2870 * Kernel threads should not be throttled as they may be indirectly
2871 * responsible for cleaning pages necessary for reclaim to make forward
2872 * progress. kjournald for example may enter direct reclaim while
2873 * committing a transaction where throttling it could forcing other
2874 * processes to block on log_wait_commit().
2875 */
2876 if (current->flags & PF_KTHREAD)
50694c28
MG
2877 goto out;
2878
2879 /*
2880 * If a fatal signal is pending, this process should not throttle.
2881 * It should return quickly so it can exit and free its memory
2882 */
2883 if (fatal_signal_pending(current))
2884 goto out;
5515061d 2885
675becce
MG
2886 /*
2887 * Check if the pfmemalloc reserves are ok by finding the first node
2888 * with a usable ZONE_NORMAL or lower zone. The expectation is that
2889 * GFP_KERNEL will be required for allocating network buffers when
2890 * swapping over the network so ZONE_HIGHMEM is unusable.
2891 *
2892 * Throttling is based on the first usable node and throttled processes
2893 * wait on a queue until kswapd makes progress and wakes them. There
2894 * is an affinity then between processes waking up and where reclaim
2895 * progress has been made assuming the process wakes on the same node.
2896 * More importantly, processes running on remote nodes will not compete
2897 * for remote pfmemalloc reserves and processes on different nodes
2898 * should make reasonable progress.
2899 */
2900 for_each_zone_zonelist_nodemask(zone, z, zonelist,
17636faa 2901 gfp_zone(gfp_mask), nodemask) {
675becce
MG
2902 if (zone_idx(zone) > ZONE_NORMAL)
2903 continue;
2904
2905 /* Throttle based on the first usable node */
2906 pgdat = zone->zone_pgdat;
2907 if (pfmemalloc_watermark_ok(pgdat))
2908 goto out;
2909 break;
2910 }
2911
2912 /* If no zone was usable by the allocation flags then do not throttle */
2913 if (!pgdat)
50694c28 2914 goto out;
5515061d 2915
68243e76
MG
2916 /* Account for the throttling */
2917 count_vm_event(PGSCAN_DIRECT_THROTTLE);
2918
5515061d
MG
2919 /*
2920 * If the caller cannot enter the filesystem, it's possible that it
2921 * is due to the caller holding an FS lock or performing a journal
2922 * transaction in the case of a filesystem like ext[3|4]. In this case,
2923 * it is not safe to block on pfmemalloc_wait as kswapd could be
2924 * blocked waiting on the same lock. Instead, throttle for up to a
2925 * second before continuing.
2926 */
2927 if (!(gfp_mask & __GFP_FS)) {
2928 wait_event_interruptible_timeout(pgdat->pfmemalloc_wait,
2929 pfmemalloc_watermark_ok(pgdat), HZ);
50694c28
MG
2930
2931 goto check_pending;
5515061d
MG
2932 }
2933
2934 /* Throttle until kswapd wakes the process */
2935 wait_event_killable(zone->zone_pgdat->pfmemalloc_wait,
2936 pfmemalloc_watermark_ok(pgdat));
50694c28
MG
2937
2938check_pending:
2939 if (fatal_signal_pending(current))
2940 return true;
2941
2942out:
2943 return false;
5515061d
MG
2944}
2945
dac1d27b 2946unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
327c0e96 2947 gfp_t gfp_mask, nodemask_t *nodemask)
66e1707b 2948{
33906bc5 2949 unsigned long nr_reclaimed;
66e1707b 2950 struct scan_control sc = {
ee814fe2 2951 .nr_to_reclaim = SWAP_CLUSTER_MAX,
21caf2fc 2952 .gfp_mask = (gfp_mask = memalloc_noio_flags(gfp_mask)),
b2e18757 2953 .reclaim_idx = gfp_zone(gfp_mask),
ee814fe2
JW
2954 .order = order,
2955 .nodemask = nodemask,
2956 .priority = DEF_PRIORITY,
66e1707b 2957 .may_writepage = !laptop_mode,
a6dc60f8 2958 .may_unmap = 1,
2e2e4259 2959 .may_swap = 1,
66e1707b
BS
2960 };
2961
5515061d 2962 /*
50694c28
MG
2963 * Do not enter reclaim if fatal signal was delivered while throttled.
2964 * 1 is returned so that the page allocator does not OOM kill at this
2965 * point.
5515061d 2966 */
50694c28 2967 if (throttle_direct_reclaim(gfp_mask, zonelist, nodemask))
5515061d
MG
2968 return 1;
2969
33906bc5
MG
2970 trace_mm_vmscan_direct_reclaim_begin(order,
2971 sc.may_writepage,
e5146b12
MG
2972 gfp_mask,
2973 sc.reclaim_idx);
33906bc5 2974
3115cd91 2975 nr_reclaimed = do_try_to_free_pages(zonelist, &sc);
33906bc5
MG
2976
2977 trace_mm_vmscan_direct_reclaim_end(nr_reclaimed);
2978
2979 return nr_reclaimed;
66e1707b
BS
2980}
2981
c255a458 2982#ifdef CONFIG_MEMCG
66e1707b 2983
a9dd0a83 2984unsigned long mem_cgroup_shrink_node(struct mem_cgroup *memcg,
4e416953 2985 gfp_t gfp_mask, bool noswap,
ef8f2327 2986 pg_data_t *pgdat,
0ae5e89c 2987 unsigned long *nr_scanned)
4e416953
BS
2988{
2989 struct scan_control sc = {
b8f5c566 2990 .nr_to_reclaim = SWAP_CLUSTER_MAX,
ee814fe2 2991 .target_mem_cgroup = memcg,
4e416953
BS
2992 .may_writepage = !laptop_mode,
2993 .may_unmap = 1,
b2e18757 2994 .reclaim_idx = MAX_NR_ZONES - 1,
4e416953 2995 .may_swap = !noswap,
4e416953 2996 };
6b4f7799 2997 unsigned long lru_pages;
0ae5e89c 2998
4e416953
BS
2999 sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
3000 (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK);
bdce6d9e 3001
9e3b2f8c 3002 trace_mm_vmscan_memcg_softlimit_reclaim_begin(sc.order,
bdce6d9e 3003 sc.may_writepage,
e5146b12
MG
3004 sc.gfp_mask,
3005 sc.reclaim_idx);
bdce6d9e 3006
4e416953
BS
3007 /*
3008 * NOTE: Although we can get the priority field, using it
3009 * here is not a good idea, since it limits the pages we can scan.
a9dd0a83 3010 * if we don't reclaim here, the shrink_node from balance_pgdat
4e416953
BS
3011 * will pick up pages from other mem cgroup's as well. We hack
3012 * the priority and make it zero.
3013 */
ef8f2327 3014 shrink_node_memcg(pgdat, memcg, &sc, &lru_pages);
bdce6d9e
KM
3015
3016 trace_mm_vmscan_memcg_softlimit_reclaim_end(sc.nr_reclaimed);
3017
0ae5e89c 3018 *nr_scanned = sc.nr_scanned;
4e416953
BS
3019 return sc.nr_reclaimed;
3020}
3021
72835c86 3022unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
b70a2a21 3023 unsigned long nr_pages,
a7885eb8 3024 gfp_t gfp_mask,
b70a2a21 3025 bool may_swap)
66e1707b 3026{
4e416953 3027 struct zonelist *zonelist;
bdce6d9e 3028 unsigned long nr_reclaimed;
889976db 3029 int nid;
66e1707b 3030 struct scan_control sc = {
b70a2a21 3031 .nr_to_reclaim = max(nr_pages, SWAP_CLUSTER_MAX),
a09ed5e0
YH
3032 .gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
3033 (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK),
b2e18757 3034 .reclaim_idx = MAX_NR_ZONES - 1,
ee814fe2
JW
3035 .target_mem_cgroup = memcg,
3036 .priority = DEF_PRIORITY,
3037 .may_writepage = !laptop_mode,
3038 .may_unmap = 1,
b70a2a21 3039 .may_swap = may_swap,
a09ed5e0 3040 };
66e1707b 3041
889976db
YH
3042 /*
3043 * Unlike direct reclaim via alloc_pages(), memcg's reclaim doesn't
3044 * take care of from where we get pages. So the node where we start the
3045 * scan does not need to be the current node.
3046 */
72835c86 3047 nid = mem_cgroup_select_victim_node(memcg);
889976db 3048
c9634cf0 3049 zonelist = &NODE_DATA(nid)->node_zonelists[ZONELIST_FALLBACK];
bdce6d9e
KM
3050
3051 trace_mm_vmscan_memcg_reclaim_begin(0,
3052 sc.may_writepage,
e5146b12
MG
3053 sc.gfp_mask,
3054 sc.reclaim_idx);
bdce6d9e 3055
89a28483 3056 current->flags |= PF_MEMALLOC;
3115cd91 3057 nr_reclaimed = do_try_to_free_pages(zonelist, &sc);
89a28483 3058 current->flags &= ~PF_MEMALLOC;
bdce6d9e
KM
3059
3060 trace_mm_vmscan_memcg_reclaim_end(nr_reclaimed);
3061
3062 return nr_reclaimed;
66e1707b
BS
3063}
3064#endif
3065
1d82de61 3066static void age_active_anon(struct pglist_data *pgdat,
ef8f2327 3067 struct scan_control *sc)
f16015fb 3068{
b95a2f2d 3069 struct mem_cgroup *memcg;
f16015fb 3070
b95a2f2d
JW
3071 if (!total_swap_pages)
3072 return;
3073
3074 memcg = mem_cgroup_iter(NULL, NULL, NULL);
3075 do {
ef8f2327 3076 struct lruvec *lruvec = mem_cgroup_lruvec(pgdat, memcg);
b95a2f2d 3077
f8d1a311 3078 if (inactive_list_is_low(lruvec, false, sc))
1a93be0e 3079 shrink_active_list(SWAP_CLUSTER_MAX, lruvec,
9e3b2f8c 3080 sc, LRU_ACTIVE_ANON);
b95a2f2d
JW
3081
3082 memcg = mem_cgroup_iter(NULL, memcg, NULL);
3083 } while (memcg);
f16015fb
JW
3084}
3085
31483b6a 3086static bool zone_balanced(struct zone *zone, int order, int classzone_idx)
60cefed4 3087{
31483b6a 3088 unsigned long mark = high_wmark_pages(zone);
60cefed4 3089
6256c6b4
MG
3090 if (!zone_watermark_ok_safe(zone, order, mark, classzone_idx))
3091 return false;
3092
3093 /*
3094 * If any eligible zone is balanced then the node is not considered
3095 * to be congested or dirty
3096 */
3097 clear_bit(PGDAT_CONGESTED, &zone->zone_pgdat->flags);
3098 clear_bit(PGDAT_DIRTY, &zone->zone_pgdat->flags);
3099
3100 return true;
60cefed4
JW
3101}
3102
5515061d
MG
3103/*
3104 * Prepare kswapd for sleeping. This verifies that there are no processes
3105 * waiting in throttle_direct_reclaim() and that watermarks have been met.
3106 *
3107 * Returns true if kswapd is ready to sleep
3108 */
d9f21d42 3109static bool prepare_kswapd_sleep(pg_data_t *pgdat, int order, int classzone_idx)
f50de2d3 3110{
1d82de61
MG
3111 int i;
3112
5515061d 3113 /*
9e5e3661
VB
3114 * The throttled processes are normally woken up in balance_pgdat() as
3115 * soon as pfmemalloc_watermark_ok() is true. But there is a potential
3116 * race between when kswapd checks the watermarks and a process gets
3117 * throttled. There is also a potential race if processes get
3118 * throttled, kswapd wakes, a large process exits thereby balancing the
3119 * zones, which causes kswapd to exit balance_pgdat() before reaching
3120 * the wake up checks. If kswapd is going to sleep, no process should
3121 * be sleeping on pfmemalloc_wait, so wake them now if necessary. If
3122 * the wake up is premature, processes will wake kswapd and get
3123 * throttled again. The difference from wake ups in balance_pgdat() is
3124 * that here we are under prepare_to_wait().
5515061d 3125 */
9e5e3661
VB
3126 if (waitqueue_active(&pgdat->pfmemalloc_wait))
3127 wake_up_all(&pgdat->pfmemalloc_wait);
f50de2d3 3128
1d82de61
MG
3129 for (i = 0; i <= classzone_idx; i++) {
3130 struct zone *zone = pgdat->node_zones + i;
3131
6aa303de 3132 if (!managed_zone(zone))
1d82de61
MG
3133 continue;
3134
38087d9b
MG
3135 if (!zone_balanced(zone, order, classzone_idx))
3136 return false;
1d82de61
MG
3137 }
3138
38087d9b 3139 return true;
f50de2d3
MG
3140}
3141
75485363 3142/*
1d82de61
MG
3143 * kswapd shrinks a node of pages that are at or below the highest usable
3144 * zone that is currently unbalanced.
b8e83b94
MG
3145 *
3146 * Returns true if kswapd scanned at least the requested number of pages to
283aba9f
MG
3147 * reclaim or if the lack of progress was due to pages under writeback.
3148 * This is used to determine if the scanning priority needs to be raised.
75485363 3149 */
1d82de61 3150static bool kswapd_shrink_node(pg_data_t *pgdat,
accf6242 3151 struct scan_control *sc)
75485363 3152{
1d82de61
MG
3153 struct zone *zone;
3154 int z;
75485363 3155
1d82de61
MG
3156 /* Reclaim a number of pages proportional to the number of zones */
3157 sc->nr_to_reclaim = 0;
970a39a3 3158 for (z = 0; z <= sc->reclaim_idx; z++) {
1d82de61 3159 zone = pgdat->node_zones + z;
6aa303de 3160 if (!managed_zone(zone))
1d82de61 3161 continue;
7c954f6d 3162
1d82de61
MG
3163 sc->nr_to_reclaim += max(high_wmark_pages(zone), SWAP_CLUSTER_MAX);
3164 }
7c954f6d
MG
3165
3166 /*
1d82de61
MG
3167 * Historically care was taken to put equal pressure on all zones but
3168 * now pressure is applied based on node LRU order.
7c954f6d 3169 */
970a39a3 3170 shrink_node(pgdat, sc);
283aba9f 3171
7c954f6d 3172 /*
1d82de61
MG
3173 * Fragmentation may mean that the system cannot be rebalanced for
3174 * high-order allocations. If twice the allocation size has been
3175 * reclaimed then recheck watermarks only at order-0 to prevent
3176 * excessive reclaim. Assume that a process requested a high-order
3177 * can direct reclaim/compact.
7c954f6d 3178 */
9861a62c 3179 if (sc->order && sc->nr_reclaimed >= compact_gap(sc->order))
1d82de61 3180 sc->order = 0;
7c954f6d 3181
b8e83b94 3182 return sc->nr_scanned >= sc->nr_to_reclaim;
75485363
MG
3183}
3184
1da177e4 3185/*
1d82de61
MG
3186 * For kswapd, balance_pgdat() will reclaim pages across a node from zones
3187 * that are eligible for use by the caller until at least one zone is
3188 * balanced.
1da177e4 3189 *
1d82de61 3190 * Returns the order kswapd finished reclaiming at.
1da177e4
LT
3191 *
3192 * kswapd scans the zones in the highmem->normal->dma direction. It skips
41858966 3193 * zones which have free_pages > high_wmark_pages(zone), but once a zone is
1d82de61
MG
3194 * found to have free_pages <= high_wmark_pages(zone), any page is that zone
3195 * or lower is eligible for reclaim until at least one usable zone is
3196 * balanced.
1da177e4 3197 */
accf6242 3198static int balance_pgdat(pg_data_t *pgdat, int order, int classzone_idx)
1da177e4 3199{
1da177e4 3200 int i;
0608f43d
AM
3201 unsigned long nr_soft_reclaimed;
3202 unsigned long nr_soft_scanned;
1d82de61 3203 struct zone *zone;
179e9639
AM
3204 struct scan_control sc = {
3205 .gfp_mask = GFP_KERNEL,
ee814fe2 3206 .order = order,
b8e83b94 3207 .priority = DEF_PRIORITY,
ee814fe2 3208 .may_writepage = !laptop_mode,
a6dc60f8 3209 .may_unmap = 1,
2e2e4259 3210 .may_swap = 1,
179e9639 3211 };
f8891e5e 3212 count_vm_event(PAGEOUTRUN);
1da177e4 3213
9e3b2f8c 3214 do {
b8e83b94
MG
3215 bool raise_priority = true;
3216
3217 sc.nr_reclaimed = 0;
84c7a777 3218 sc.reclaim_idx = classzone_idx;
1da177e4 3219
86c79f6b 3220 /*
84c7a777
MG
3221 * If the number of buffer_heads exceeds the maximum allowed
3222 * then consider reclaiming from all zones. This has a dual
3223 * purpose -- on 64-bit systems it is expected that
3224 * buffer_heads are stripped during active rotation. On 32-bit
3225 * systems, highmem pages can pin lowmem memory and shrinking
3226 * buffers can relieve lowmem pressure. Reclaim may still not
3227 * go ahead if all eligible zones for the original allocation
3228 * request are balanced to avoid excessive reclaim from kswapd.
86c79f6b
MG
3229 */
3230 if (buffer_heads_over_limit) {
3231 for (i = MAX_NR_ZONES - 1; i >= 0; i--) {
3232 zone = pgdat->node_zones + i;
6aa303de 3233 if (!managed_zone(zone))
86c79f6b 3234 continue;
cc715d99 3235
970a39a3 3236 sc.reclaim_idx = i;
e1dbeda6 3237 break;
1da177e4 3238 }
1da177e4 3239 }
dafcb73e 3240
86c79f6b
MG
3241 /*
3242 * Only reclaim if there are no eligible zones. Check from
3243 * high to low zone as allocations prefer higher zones.
3244 * Scanning from low to high zone would allow congestion to be
3245 * cleared during a very small window when a small low
3246 * zone was balanced even under extreme pressure when the
84c7a777
MG
3247 * overall node may be congested. Note that sc.reclaim_idx
3248 * is not used as buffer_heads_over_limit may have adjusted
3249 * it.
86c79f6b 3250 */
84c7a777 3251 for (i = classzone_idx; i >= 0; i--) {
86c79f6b 3252 zone = pgdat->node_zones + i;
6aa303de 3253 if (!managed_zone(zone))
86c79f6b
MG
3254 continue;
3255
84c7a777 3256 if (zone_balanced(zone, sc.order, classzone_idx))
86c79f6b
MG
3257 goto out;
3258 }
e1dbeda6 3259
1d82de61
MG
3260 /*
3261 * Do some background aging of the anon list, to give
3262 * pages a chance to be referenced before reclaiming. All
3263 * pages are rotated regardless of classzone as this is
3264 * about consistent aging.
3265 */
ef8f2327 3266 age_active_anon(pgdat, &sc);
1d82de61 3267
b7ea3c41
MG
3268 /*
3269 * If we're getting trouble reclaiming, start doing writepage
3270 * even in laptop mode.
3271 */
1d82de61 3272 if (sc.priority < DEF_PRIORITY - 2 || !pgdat_reclaimable(pgdat))
b7ea3c41
MG
3273 sc.may_writepage = 1;
3274
1d82de61
MG
3275 /* Call soft limit reclaim before calling shrink_node. */
3276 sc.nr_scanned = 0;
3277 nr_soft_scanned = 0;
ef8f2327 3278 nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(pgdat, sc.order,
1d82de61
MG
3279 sc.gfp_mask, &nr_soft_scanned);
3280 sc.nr_reclaimed += nr_soft_reclaimed;
3281
1da177e4 3282 /*
1d82de61
MG
3283 * There should be no need to raise the scanning priority if
3284 * enough pages are already being scanned that that high
3285 * watermark would be met at 100% efficiency.
1da177e4 3286 */
970a39a3 3287 if (kswapd_shrink_node(pgdat, &sc))
1d82de61 3288 raise_priority = false;
5515061d
MG
3289
3290 /*
3291 * If the low watermark is met there is no need for processes
3292 * to be throttled on pfmemalloc_wait as they should not be
3293 * able to safely make forward progress. Wake them
3294 */
3295 if (waitqueue_active(&pgdat->pfmemalloc_wait) &&
3296 pfmemalloc_watermark_ok(pgdat))
cfc51155 3297 wake_up_all(&pgdat->pfmemalloc_wait);
5515061d 3298
b8e83b94
MG
3299 /* Check if kswapd should be suspending */
3300 if (try_to_freeze() || kthread_should_stop())
3301 break;
8357376d 3302
73ce02e9 3303 /*
b8e83b94
MG
3304 * Raise priority if scanning rate is too low or there was no
3305 * progress in reclaiming pages
73ce02e9 3306 */
b8e83b94
MG
3307 if (raise_priority || !sc.nr_reclaimed)
3308 sc.priority--;
1d82de61 3309 } while (sc.priority >= 1);
1da177e4 3310
b8e83b94 3311out:
0abdee2b 3312 /*
1d82de61
MG
3313 * Return the order kswapd stopped reclaiming at as
3314 * prepare_kswapd_sleep() takes it into account. If another caller
3315 * entered the allocator slow path while kswapd was awake, order will
3316 * remain at the higher level.
0abdee2b 3317 */
1d82de61 3318 return sc.order;
1da177e4
LT
3319}
3320
38087d9b
MG
3321static void kswapd_try_to_sleep(pg_data_t *pgdat, int alloc_order, int reclaim_order,
3322 unsigned int classzone_idx)
f0bc0a60
KM
3323{
3324 long remaining = 0;
3325 DEFINE_WAIT(wait);
3326
3327 if (freezing(current) || kthread_should_stop())
3328 return;
3329
3330 prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE);
3331
3332 /* Try to sleep for a short interval */
d9f21d42 3333 if (prepare_kswapd_sleep(pgdat, reclaim_order, classzone_idx)) {
fd901c95
VB
3334 /*
3335 * Compaction records what page blocks it recently failed to
3336 * isolate pages from and skips them in the future scanning.
3337 * When kswapd is going to sleep, it is reasonable to assume
3338 * that pages and compaction may succeed so reset the cache.
3339 */
3340 reset_isolation_suitable(pgdat);
3341
3342 /*
3343 * We have freed the memory, now we should compact it to make
3344 * allocation of the requested order possible.
3345 */
38087d9b 3346 wakeup_kcompactd(pgdat, alloc_order, classzone_idx);
fd901c95 3347
f0bc0a60 3348 remaining = schedule_timeout(HZ/10);
38087d9b
MG
3349
3350 /*
3351 * If woken prematurely then reset kswapd_classzone_idx and
3352 * order. The values will either be from a wakeup request or
3353 * the previous request that slept prematurely.
3354 */
3355 if (remaining) {
3356 pgdat->kswapd_classzone_idx = max(pgdat->kswapd_classzone_idx, classzone_idx);
3357 pgdat->kswapd_order = max(pgdat->kswapd_order, reclaim_order);
3358 }
3359
f0bc0a60
KM
3360 finish_wait(&pgdat->kswapd_wait, &wait);
3361 prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE);
3362 }
3363
3364 /*
3365 * After a short sleep, check if it was a premature sleep. If not, then
3366 * go fully to sleep until explicitly woken up.
3367 */
d9f21d42
MG
3368 if (!remaining &&
3369 prepare_kswapd_sleep(pgdat, reclaim_order, classzone_idx)) {
f0bc0a60
KM
3370 trace_mm_vmscan_kswapd_sleep(pgdat->node_id);
3371
3372 /*
3373 * vmstat counters are not perfectly accurate and the estimated
3374 * value for counters such as NR_FREE_PAGES can deviate from the
3375 * true value by nr_online_cpus * threshold. To avoid the zone
3376 * watermarks being breached while under pressure, we reduce the
3377 * per-cpu vmstat threshold while kswapd is awake and restore
3378 * them before going back to sleep.
3379 */
3380 set_pgdat_percpu_threshold(pgdat, calculate_normal_threshold);
1c7e7f6c
AK
3381
3382 if (!kthread_should_stop())
3383 schedule();
3384
f0bc0a60
KM
3385 set_pgdat_percpu_threshold(pgdat, calculate_pressure_threshold);
3386 } else {
3387 if (remaining)
3388 count_vm_event(KSWAPD_LOW_WMARK_HIT_QUICKLY);
3389 else
3390 count_vm_event(KSWAPD_HIGH_WMARK_HIT_QUICKLY);
3391 }
3392 finish_wait(&pgdat->kswapd_wait, &wait);
3393}
3394
1da177e4
LT
3395/*
3396 * The background pageout daemon, started as a kernel thread
4f98a2fe 3397 * from the init process.
1da177e4
LT
3398 *
3399 * This basically trickles out pages so that we have _some_
3400 * free memory available even if there is no other activity
3401 * that frees anything up. This is needed for things like routing
3402 * etc, where we otherwise might have all activity going on in
3403 * asynchronous contexts that cannot page things out.
3404 *
3405 * If there are applications that are active memory-allocators
3406 * (most normal use), this basically shouldn't matter.
3407 */
3408static int kswapd(void *p)
3409{
38087d9b 3410 unsigned int alloc_order, reclaim_order, classzone_idx;
1da177e4
LT
3411 pg_data_t *pgdat = (pg_data_t*)p;
3412 struct task_struct *tsk = current;
f0bc0a60 3413
1da177e4
LT
3414 struct reclaim_state reclaim_state = {
3415 .reclaimed_slab = 0,
3416 };
a70f7302 3417 const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id);
1da177e4 3418
cf40bd16
NP
3419 lockdep_set_current_reclaim_state(GFP_KERNEL);
3420
174596a0 3421 if (!cpumask_empty(cpumask))
c5f59f08 3422 set_cpus_allowed_ptr(tsk, cpumask);
1da177e4
LT
3423 current->reclaim_state = &reclaim_state;
3424
3425 /*
3426 * Tell the memory management that we're a "memory allocator",
3427 * and that if we need more memory we should get access to it
3428 * regardless (see "__alloc_pages()"). "kswapd" should
3429 * never get caught in the normal page freeing logic.
3430 *
3431 * (Kswapd normally doesn't need memory anyway, but sometimes
3432 * you need a small amount of memory in order to be able to
3433 * page out something else, and this flag essentially protects
3434 * us from recursively trying to free more memory as we're
3435 * trying to free the first piece of memory in the first place).
3436 */
930d9152 3437 tsk->flags |= PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD;
83144186 3438 set_freezable();
1da177e4 3439
38087d9b
MG
3440 pgdat->kswapd_order = alloc_order = reclaim_order = 0;
3441 pgdat->kswapd_classzone_idx = classzone_idx = 0;
1da177e4 3442 for ( ; ; ) {
6f6313d4 3443 bool ret;
3e1d1d28 3444
38087d9b
MG
3445kswapd_try_sleep:
3446 kswapd_try_to_sleep(pgdat, alloc_order, reclaim_order,
3447 classzone_idx);
215ddd66 3448
38087d9b
MG
3449 /* Read the new order and classzone_idx */
3450 alloc_order = reclaim_order = pgdat->kswapd_order;
3451 classzone_idx = pgdat->kswapd_classzone_idx;
3452 pgdat->kswapd_order = 0;
3453 pgdat->kswapd_classzone_idx = 0;
1da177e4 3454
8fe23e05
DR
3455 ret = try_to_freeze();
3456 if (kthread_should_stop())
3457 break;
3458
3459 /*
3460 * We can speed up thawing tasks if we don't call balance_pgdat
3461 * after returning from the refrigerator
3462 */
38087d9b
MG
3463 if (ret)
3464 continue;
3465
3466 /*
3467 * Reclaim begins at the requested order but if a high-order
3468 * reclaim fails then kswapd falls back to reclaiming for
3469 * order-0. If that happens, kswapd will consider sleeping
3470 * for the order it finished reclaiming at (reclaim_order)
3471 * but kcompactd is woken to compact for the original
3472 * request (alloc_order).
3473 */
e5146b12
MG
3474 trace_mm_vmscan_kswapd_wake(pgdat->node_id, classzone_idx,
3475 alloc_order);
38087d9b
MG
3476 reclaim_order = balance_pgdat(pgdat, alloc_order, classzone_idx);
3477 if (reclaim_order < alloc_order)
3478 goto kswapd_try_sleep;
1d82de61 3479
38087d9b
MG
3480 alloc_order = reclaim_order = pgdat->kswapd_order;
3481 classzone_idx = pgdat->kswapd_classzone_idx;
1da177e4 3482 }
b0a8cc58 3483
71abdc15 3484 tsk->flags &= ~(PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD);
b0a8cc58 3485 current->reclaim_state = NULL;
71abdc15
JW
3486 lockdep_clear_current_reclaim_state();
3487
1da177e4
LT
3488 return 0;
3489}
3490
3491/*
3492 * A zone is low on free memory, so wake its kswapd task to service it.
3493 */
99504748 3494void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx)
1da177e4
LT
3495{
3496 pg_data_t *pgdat;
e1a55637 3497 int z;
1da177e4 3498
6aa303de 3499 if (!managed_zone(zone))
1da177e4
LT
3500 return;
3501
344736f2 3502 if (!cpuset_zone_allowed(zone, GFP_KERNEL | __GFP_HARDWALL))
1da177e4 3503 return;
88f5acf8 3504 pgdat = zone->zone_pgdat;
38087d9b
MG
3505 pgdat->kswapd_classzone_idx = max(pgdat->kswapd_classzone_idx, classzone_idx);
3506 pgdat->kswapd_order = max(pgdat->kswapd_order, order);
8d0986e2 3507 if (!waitqueue_active(&pgdat->kswapd_wait))
1da177e4 3508 return;
e1a55637
MG
3509
3510 /* Only wake kswapd if all zones are unbalanced */
3511 for (z = 0; z <= classzone_idx; z++) {
3512 zone = pgdat->node_zones + z;
6aa303de 3513 if (!managed_zone(zone))
e1a55637
MG
3514 continue;
3515
3516 if (zone_balanced(zone, order, classzone_idx))
3517 return;
3518 }
88f5acf8
MG
3519
3520 trace_mm_vmscan_wakeup_kswapd(pgdat->node_id, zone_idx(zone), order);
8d0986e2 3521 wake_up_interruptible(&pgdat->kswapd_wait);
1da177e4
LT
3522}
3523
c6f37f12 3524#ifdef CONFIG_HIBERNATION
1da177e4 3525/*
7b51755c 3526 * Try to free `nr_to_reclaim' of memory, system-wide, and return the number of
d6277db4
RW
3527 * freed pages.
3528 *
3529 * Rather than trying to age LRUs the aim is to preserve the overall
3530 * LRU order by reclaiming preferentially
3531 * inactive > active > active referenced > active mapped
1da177e4 3532 */
7b51755c 3533unsigned long shrink_all_memory(unsigned long nr_to_reclaim)
1da177e4 3534{
d6277db4 3535 struct reclaim_state reclaim_state;
d6277db4 3536 struct scan_control sc = {
ee814fe2 3537 .nr_to_reclaim = nr_to_reclaim,
7b51755c 3538 .gfp_mask = GFP_HIGHUSER_MOVABLE,
b2e18757 3539 .reclaim_idx = MAX_NR_ZONES - 1,
ee814fe2 3540 .priority = DEF_PRIORITY,
d6277db4 3541 .may_writepage = 1,
ee814fe2
JW
3542 .may_unmap = 1,
3543 .may_swap = 1,
7b51755c 3544 .hibernation_mode = 1,
1da177e4 3545 };
a09ed5e0 3546 struct zonelist *zonelist = node_zonelist(numa_node_id(), sc.gfp_mask);
7b51755c
KM
3547 struct task_struct *p = current;
3548 unsigned long nr_reclaimed;
1da177e4 3549
7b51755c
KM
3550 p->flags |= PF_MEMALLOC;
3551 lockdep_set_current_reclaim_state(sc.gfp_mask);
3552 reclaim_state.reclaimed_slab = 0;
3553 p->reclaim_state = &reclaim_state;
d6277db4 3554
3115cd91 3555 nr_reclaimed = do_try_to_free_pages(zonelist, &sc);
d979677c 3556
7b51755c
KM
3557 p->reclaim_state = NULL;
3558 lockdep_clear_current_reclaim_state();
3559 p->flags &= ~PF_MEMALLOC;
d6277db4 3560
7b51755c 3561 return nr_reclaimed;
1da177e4 3562}
c6f37f12 3563#endif /* CONFIG_HIBERNATION */
1da177e4 3564
1da177e4
LT
3565/* It's optimal to keep kswapds on the same CPUs as their memory, but
3566 not required for correctness. So if the last cpu in a node goes
3567 away, we get changed to run anywhere: as the first one comes back,
3568 restore their cpu bindings. */
517bbed9 3569static int kswapd_cpu_online(unsigned int cpu)
1da177e4 3570{
58c0a4a7 3571 int nid;
1da177e4 3572
517bbed9
SAS
3573 for_each_node_state(nid, N_MEMORY) {
3574 pg_data_t *pgdat = NODE_DATA(nid);
3575 const struct cpumask *mask;
a70f7302 3576
517bbed9 3577 mask = cpumask_of_node(pgdat->node_id);
c5f59f08 3578
517bbed9
SAS
3579 if (cpumask_any_and(cpu_online_mask, mask) < nr_cpu_ids)
3580 /* One of our CPUs online: restore mask */
3581 set_cpus_allowed_ptr(pgdat->kswapd, mask);
1da177e4 3582 }
517bbed9 3583 return 0;
1da177e4 3584}
1da177e4 3585
3218ae14
YG
3586/*
3587 * This kswapd start function will be called by init and node-hot-add.
3588 * On node-hot-add, kswapd will moved to proper cpus if cpus are hot-added.
3589 */
3590int kswapd_run(int nid)
3591{
3592 pg_data_t *pgdat = NODE_DATA(nid);
3593 int ret = 0;
3594
3595 if (pgdat->kswapd)
3596 return 0;
3597
3598 pgdat->kswapd = kthread_run(kswapd, pgdat, "kswapd%d", nid);
3599 if (IS_ERR(pgdat->kswapd)) {
3600 /* failure at boot is fatal */
3601 BUG_ON(system_state == SYSTEM_BOOTING);
d5dc0ad9
GS
3602 pr_err("Failed to start kswapd on node %d\n", nid);
3603 ret = PTR_ERR(pgdat->kswapd);
d72515b8 3604 pgdat->kswapd = NULL;
3218ae14
YG
3605 }
3606 return ret;
3607}
3608
8fe23e05 3609/*
d8adde17 3610 * Called by memory hotplug when all memory in a node is offlined. Caller must
bfc8c901 3611 * hold mem_hotplug_begin/end().
8fe23e05
DR
3612 */
3613void kswapd_stop(int nid)
3614{
3615 struct task_struct *kswapd = NODE_DATA(nid)->kswapd;
3616
d8adde17 3617 if (kswapd) {
8fe23e05 3618 kthread_stop(kswapd);
d8adde17
JL
3619 NODE_DATA(nid)->kswapd = NULL;
3620 }
8fe23e05
DR
3621}
3622
1da177e4
LT
3623static int __init kswapd_init(void)
3624{
517bbed9 3625 int nid, ret;
69e05944 3626
1da177e4 3627 swap_setup();
48fb2e24 3628 for_each_node_state(nid, N_MEMORY)
3218ae14 3629 kswapd_run(nid);
517bbed9
SAS
3630 ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
3631 "mm/vmscan:online", kswapd_cpu_online,
3632 NULL);
3633 WARN_ON(ret < 0);
1da177e4
LT
3634 return 0;
3635}
3636
3637module_init(kswapd_init)
9eeff239
CL
3638
3639#ifdef CONFIG_NUMA
3640/*
a5f5f91d 3641 * Node reclaim mode
9eeff239 3642 *
a5f5f91d 3643 * If non-zero call node_reclaim when the number of free pages falls below
9eeff239 3644 * the watermarks.
9eeff239 3645 */
a5f5f91d 3646int node_reclaim_mode __read_mostly;
9eeff239 3647
1b2ffb78 3648#define RECLAIM_OFF 0
7d03431c 3649#define RECLAIM_ZONE (1<<0) /* Run shrink_inactive_list on the zone */
1b2ffb78 3650#define RECLAIM_WRITE (1<<1) /* Writeout pages during reclaim */
95bbc0c7 3651#define RECLAIM_UNMAP (1<<2) /* Unmap pages during reclaim */
1b2ffb78 3652
a92f7126 3653/*
a5f5f91d 3654 * Priority for NODE_RECLAIM. This determines the fraction of pages
a92f7126
CL
3655 * of a node considered for each zone_reclaim. 4 scans 1/16th of
3656 * a zone.
3657 */
a5f5f91d 3658#define NODE_RECLAIM_PRIORITY 4
a92f7126 3659
9614634f 3660/*
a5f5f91d 3661 * Percentage of pages in a zone that must be unmapped for node_reclaim to
9614634f
CL
3662 * occur.
3663 */
3664int sysctl_min_unmapped_ratio = 1;
3665
0ff38490
CL
3666/*
3667 * If the number of slab pages in a zone grows beyond this percentage then
3668 * slab reclaim needs to occur.
3669 */
3670int sysctl_min_slab_ratio = 5;
3671
11fb9989 3672static inline unsigned long node_unmapped_file_pages(struct pglist_data *pgdat)
90afa5de 3673{
11fb9989
MG
3674 unsigned long file_mapped = node_page_state(pgdat, NR_FILE_MAPPED);
3675 unsigned long file_lru = node_page_state(pgdat, NR_INACTIVE_FILE) +
3676 node_page_state(pgdat, NR_ACTIVE_FILE);
90afa5de
MG
3677
3678 /*
3679 * It's possible for there to be more file mapped pages than
3680 * accounted for by the pages on the file LRU lists because
3681 * tmpfs pages accounted for as ANON can also be FILE_MAPPED
3682 */
3683 return (file_lru > file_mapped) ? (file_lru - file_mapped) : 0;
3684}
3685
3686/* Work out how many page cache pages we can reclaim in this reclaim_mode */
a5f5f91d 3687static unsigned long node_pagecache_reclaimable(struct pglist_data *pgdat)
90afa5de 3688{
d031a157
AM
3689 unsigned long nr_pagecache_reclaimable;
3690 unsigned long delta = 0;
90afa5de
MG
3691
3692 /*
95bbc0c7 3693 * If RECLAIM_UNMAP is set, then all file pages are considered
90afa5de 3694 * potentially reclaimable. Otherwise, we have to worry about
11fb9989 3695 * pages like swapcache and node_unmapped_file_pages() provides
90afa5de
MG
3696 * a better estimate
3697 */
a5f5f91d
MG
3698 if (node_reclaim_mode & RECLAIM_UNMAP)
3699 nr_pagecache_reclaimable = node_page_state(pgdat, NR_FILE_PAGES);
90afa5de 3700 else
a5f5f91d 3701 nr_pagecache_reclaimable = node_unmapped_file_pages(pgdat);
90afa5de
MG
3702
3703 /* If we can't clean pages, remove dirty pages from consideration */
a5f5f91d
MG
3704 if (!(node_reclaim_mode & RECLAIM_WRITE))
3705 delta += node_page_state(pgdat, NR_FILE_DIRTY);
90afa5de
MG
3706
3707 /* Watch for any possible underflows due to delta */
3708 if (unlikely(delta > nr_pagecache_reclaimable))
3709 delta = nr_pagecache_reclaimable;
3710
3711 return nr_pagecache_reclaimable - delta;
3712}
3713
9eeff239 3714/*
a5f5f91d 3715 * Try to free up some pages from this node through reclaim.
9eeff239 3716 */
a5f5f91d 3717static int __node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned int order)
9eeff239 3718{
7fb2d46d 3719 /* Minimum pages needed in order to stay on node */
69e05944 3720 const unsigned long nr_pages = 1 << order;
9eeff239
CL
3721 struct task_struct *p = current;
3722 struct reclaim_state reclaim_state;
a5f5f91d 3723 int classzone_idx = gfp_zone(gfp_mask);
179e9639 3724 struct scan_control sc = {
62b726c1 3725 .nr_to_reclaim = max(nr_pages, SWAP_CLUSTER_MAX),
21caf2fc 3726 .gfp_mask = (gfp_mask = memalloc_noio_flags(gfp_mask)),
bd2f6199 3727 .order = order,
a5f5f91d
MG
3728 .priority = NODE_RECLAIM_PRIORITY,
3729 .may_writepage = !!(node_reclaim_mode & RECLAIM_WRITE),
3730 .may_unmap = !!(node_reclaim_mode & RECLAIM_UNMAP),
ee814fe2 3731 .may_swap = 1,
a5f5f91d 3732 .reclaim_idx = classzone_idx,
179e9639 3733 };
9eeff239 3734
9eeff239 3735 cond_resched();
d4f7796e 3736 /*
95bbc0c7 3737 * We need to be able to allocate from the reserves for RECLAIM_UNMAP
d4f7796e 3738 * and we also need to be able to write out pages for RECLAIM_WRITE
95bbc0c7 3739 * and RECLAIM_UNMAP.
d4f7796e
CL
3740 */
3741 p->flags |= PF_MEMALLOC | PF_SWAPWRITE;
76ca542d 3742 lockdep_set_current_reclaim_state(gfp_mask);
9eeff239
CL
3743 reclaim_state.reclaimed_slab = 0;
3744 p->reclaim_state = &reclaim_state;
c84db23c 3745
a5f5f91d 3746 if (node_pagecache_reclaimable(pgdat) > pgdat->min_unmapped_pages) {
0ff38490
CL
3747 /*
3748 * Free memory by calling shrink zone with increasing
3749 * priorities until we have enough memory freed.
3750 */
0ff38490 3751 do {
970a39a3 3752 shrink_node(pgdat, &sc);
9e3b2f8c 3753 } while (sc.nr_reclaimed < nr_pages && --sc.priority >= 0);
0ff38490 3754 }
c84db23c 3755
9eeff239 3756 p->reclaim_state = NULL;
d4f7796e 3757 current->flags &= ~(PF_MEMALLOC | PF_SWAPWRITE);
76ca542d 3758 lockdep_clear_current_reclaim_state();
a79311c1 3759 return sc.nr_reclaimed >= nr_pages;
9eeff239 3760}
179e9639 3761
a5f5f91d 3762int node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned int order)
179e9639 3763{
d773ed6b 3764 int ret;
179e9639
AM
3765
3766 /*
a5f5f91d 3767 * Node reclaim reclaims unmapped file backed pages and
0ff38490 3768 * slab pages if we are over the defined limits.
34aa1330 3769 *
9614634f
CL
3770 * A small portion of unmapped file backed pages is needed for
3771 * file I/O otherwise pages read by file I/O will be immediately
a5f5f91d
MG
3772 * thrown out if the node is overallocated. So we do not reclaim
3773 * if less than a specified percentage of the node is used by
9614634f 3774 * unmapped file backed pages.
179e9639 3775 */
a5f5f91d
MG
3776 if (node_pagecache_reclaimable(pgdat) <= pgdat->min_unmapped_pages &&
3777 sum_zone_node_page_state(pgdat->node_id, NR_SLAB_RECLAIMABLE) <= pgdat->min_slab_pages)
3778 return NODE_RECLAIM_FULL;
179e9639 3779
a5f5f91d
MG
3780 if (!pgdat_reclaimable(pgdat))
3781 return NODE_RECLAIM_FULL;
d773ed6b 3782
179e9639 3783 /*
d773ed6b 3784 * Do not scan if the allocation should not be delayed.
179e9639 3785 */
d0164adc 3786 if (!gfpflags_allow_blocking(gfp_mask) || (current->flags & PF_MEMALLOC))
a5f5f91d 3787 return NODE_RECLAIM_NOSCAN;
179e9639
AM
3788
3789 /*
a5f5f91d 3790 * Only run node reclaim on the local node or on nodes that do not
179e9639
AM
3791 * have associated processors. This will favor the local processor
3792 * over remote processors and spread off node memory allocations
3793 * as wide as possible.
3794 */
a5f5f91d
MG
3795 if (node_state(pgdat->node_id, N_CPU) && pgdat->node_id != numa_node_id())
3796 return NODE_RECLAIM_NOSCAN;
d773ed6b 3797
a5f5f91d
MG
3798 if (test_and_set_bit(PGDAT_RECLAIM_LOCKED, &pgdat->flags))
3799 return NODE_RECLAIM_NOSCAN;
fa5e084e 3800
a5f5f91d
MG
3801 ret = __node_reclaim(pgdat, gfp_mask, order);
3802 clear_bit(PGDAT_RECLAIM_LOCKED, &pgdat->flags);
d773ed6b 3803
24cf7251
MG
3804 if (!ret)
3805 count_vm_event(PGSCAN_ZONE_RECLAIM_FAILED);
3806
d773ed6b 3807 return ret;
179e9639 3808}
9eeff239 3809#endif
894bc310 3810
894bc310
LS
3811/*
3812 * page_evictable - test whether a page is evictable
3813 * @page: the page to test
894bc310
LS
3814 *
3815 * Test whether page is evictable--i.e., should be placed on active/inactive
39b5f29a 3816 * lists vs unevictable list.
894bc310
LS
3817 *
3818 * Reasons page might not be evictable:
ba9ddf49 3819 * (1) page's mapping marked unevictable
b291f000 3820 * (2) page is part of an mlocked VMA
ba9ddf49 3821 *
894bc310 3822 */
39b5f29a 3823int page_evictable(struct page *page)
894bc310 3824{
39b5f29a 3825 return !mapping_unevictable(page_mapping(page)) && !PageMlocked(page);
894bc310 3826}
89e004ea 3827
85046579 3828#ifdef CONFIG_SHMEM
89e004ea 3829/**
24513264
HD
3830 * check_move_unevictable_pages - check pages for evictability and move to appropriate zone lru list
3831 * @pages: array of pages to check
3832 * @nr_pages: number of pages to check
89e004ea 3833 *
24513264 3834 * Checks pages for evictability and moves them to the appropriate lru list.
85046579
HD
3835 *
3836 * This function is only used for SysV IPC SHM_UNLOCK.
89e004ea 3837 */
24513264 3838void check_move_unevictable_pages(struct page **pages, int nr_pages)
89e004ea 3839{
925b7673 3840 struct lruvec *lruvec;
785b99fe 3841 struct pglist_data *pgdat = NULL;
24513264
HD
3842 int pgscanned = 0;
3843 int pgrescued = 0;
3844 int i;
89e004ea 3845
24513264
HD
3846 for (i = 0; i < nr_pages; i++) {
3847 struct page *page = pages[i];
785b99fe 3848 struct pglist_data *pagepgdat = page_pgdat(page);
89e004ea 3849
24513264 3850 pgscanned++;
785b99fe
MG
3851 if (pagepgdat != pgdat) {
3852 if (pgdat)
3853 spin_unlock_irq(&pgdat->lru_lock);
3854 pgdat = pagepgdat;
3855 spin_lock_irq(&pgdat->lru_lock);
24513264 3856 }
785b99fe 3857 lruvec = mem_cgroup_page_lruvec(page, pgdat);
89e004ea 3858
24513264
HD
3859 if (!PageLRU(page) || !PageUnevictable(page))
3860 continue;
89e004ea 3861
39b5f29a 3862 if (page_evictable(page)) {
24513264
HD
3863 enum lru_list lru = page_lru_base_type(page);
3864
309381fe 3865 VM_BUG_ON_PAGE(PageActive(page), page);
24513264 3866 ClearPageUnevictable(page);
fa9add64
HD
3867 del_page_from_lru_list(page, lruvec, LRU_UNEVICTABLE);
3868 add_page_to_lru_list(page, lruvec, lru);
24513264 3869 pgrescued++;
89e004ea 3870 }
24513264 3871 }
89e004ea 3872
785b99fe 3873 if (pgdat) {
24513264
HD
3874 __count_vm_events(UNEVICTABLE_PGRESCUED, pgrescued);
3875 __count_vm_events(UNEVICTABLE_PGSCANNED, pgscanned);
785b99fe 3876 spin_unlock_irq(&pgdat->lru_lock);
89e004ea 3877 }
89e004ea 3878}
85046579 3879#endif /* CONFIG_SHMEM */