]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - mm/vmscan.c
mm, vmscan: release/reacquire lru_lock on pgdat change
[mirror_ubuntu-artful-kernel.git] / mm / vmscan.c
1 /*
2 * linux/mm/vmscan.c
3 *
4 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
5 *
6 * Swap reorganised 29.12.95, Stephen Tweedie.
7 * kswapd added: 7.1.96 sct
8 * Removed kswapd_ctl limits, and swap out as many pages as needed
9 * to bring the system back to freepages.high: 2.4.97, Rik van Riel.
10 * Zone aware kswapd started 02/00, Kanoj Sarcar (kanoj@sgi.com).
11 * Multiqueue VM started 5.8.00, Rik van Riel.
12 */
13
14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15
16 #include <linux/mm.h>
17 #include <linux/module.h>
18 #include <linux/gfp.h>
19 #include <linux/kernel_stat.h>
20 #include <linux/swap.h>
21 #include <linux/pagemap.h>
22 #include <linux/init.h>
23 #include <linux/highmem.h>
24 #include <linux/vmpressure.h>
25 #include <linux/vmstat.h>
26 #include <linux/file.h>
27 #include <linux/writeback.h>
28 #include <linux/blkdev.h>
29 #include <linux/buffer_head.h> /* for try_to_release_page(),
30 buffer_heads_over_limit */
31 #include <linux/mm_inline.h>
32 #include <linux/backing-dev.h>
33 #include <linux/rmap.h>
34 #include <linux/topology.h>
35 #include <linux/cpu.h>
36 #include <linux/cpuset.h>
37 #include <linux/compaction.h>
38 #include <linux/notifier.h>
39 #include <linux/rwsem.h>
40 #include <linux/delay.h>
41 #include <linux/kthread.h>
42 #include <linux/freezer.h>
43 #include <linux/memcontrol.h>
44 #include <linux/delayacct.h>
45 #include <linux/sysctl.h>
46 #include <linux/oom.h>
47 #include <linux/prefetch.h>
48 #include <linux/printk.h>
49 #include <linux/dax.h>
50
51 #include <asm/tlbflush.h>
52 #include <asm/div64.h>
53
54 #include <linux/swapops.h>
55 #include <linux/balloon_compaction.h>
56
57 #include "internal.h"
58
59 #define CREATE_TRACE_POINTS
60 #include <trace/events/vmscan.h>
61
62 struct scan_control {
63 /* How many pages shrink_list() should reclaim */
64 unsigned long nr_to_reclaim;
65
66 /* This context's GFP mask */
67 gfp_t gfp_mask;
68
69 /* Allocation order */
70 int order;
71
72 /*
73 * Nodemask of nodes allowed by the caller. If NULL, all nodes
74 * are scanned.
75 */
76 nodemask_t *nodemask;
77
78 /*
79 * The memory cgroup that hit its limit and as a result is the
80 * primary target of this reclaim invocation.
81 */
82 struct mem_cgroup *target_mem_cgroup;
83
84 /* Scan (total_size >> priority) pages at once */
85 int priority;
86
87 /* The highest zone to isolate pages for reclaim from */
88 enum zone_type reclaim_idx;
89
90 unsigned int may_writepage:1;
91
92 /* Can mapped pages be reclaimed? */
93 unsigned int may_unmap:1;
94
95 /* Can pages be swapped as part of reclaim? */
96 unsigned int may_swap:1;
97
98 /* Can cgroups be reclaimed below their normal consumption range? */
99 unsigned int may_thrash:1;
100
101 unsigned int hibernation_mode:1;
102
103 /* One of the zones is ready for compaction */
104 unsigned int compaction_ready:1;
105
106 /* Incremented by the number of inactive pages that were scanned */
107 unsigned long nr_scanned;
108
109 /* Number of pages freed so far during a call to shrink_zones() */
110 unsigned long nr_reclaimed;
111 };
112
113 #ifdef ARCH_HAS_PREFETCH
114 #define prefetch_prev_lru_page(_page, _base, _field) \
115 do { \
116 if ((_page)->lru.prev != _base) { \
117 struct page *prev; \
118 \
119 prev = lru_to_page(&(_page->lru)); \
120 prefetch(&prev->_field); \
121 } \
122 } while (0)
123 #else
124 #define prefetch_prev_lru_page(_page, _base, _field) do { } while (0)
125 #endif
126
127 #ifdef ARCH_HAS_PREFETCHW
128 #define prefetchw_prev_lru_page(_page, _base, _field) \
129 do { \
130 if ((_page)->lru.prev != _base) { \
131 struct page *prev; \
132 \
133 prev = lru_to_page(&(_page->lru)); \
134 prefetchw(&prev->_field); \
135 } \
136 } while (0)
137 #else
138 #define prefetchw_prev_lru_page(_page, _base, _field) do { } while (0)
139 #endif
140
141 /*
142 * From 0 .. 100. Higher means more swappy.
143 */
144 int vm_swappiness = 60;
145 /*
146 * The total number of pages which are beyond the high watermark within all
147 * zones.
148 */
149 unsigned long vm_total_pages;
150
151 static LIST_HEAD(shrinker_list);
152 static DECLARE_RWSEM(shrinker_rwsem);
153
154 #ifdef CONFIG_MEMCG
155 static bool global_reclaim(struct scan_control *sc)
156 {
157 return !sc->target_mem_cgroup;
158 }
159
160 /**
161 * sane_reclaim - is the usual dirty throttling mechanism operational?
162 * @sc: scan_control in question
163 *
164 * The normal page dirty throttling mechanism in balance_dirty_pages() is
165 * completely broken with the legacy memcg and direct stalling in
166 * shrink_page_list() is used for throttling instead, which lacks all the
167 * niceties such as fairness, adaptive pausing, bandwidth proportional
168 * allocation and configurability.
169 *
170 * This function tests whether the vmscan currently in progress can assume
171 * that the normal dirty throttling mechanism is operational.
172 */
173 static bool sane_reclaim(struct scan_control *sc)
174 {
175 struct mem_cgroup *memcg = sc->target_mem_cgroup;
176
177 if (!memcg)
178 return true;
179 #ifdef CONFIG_CGROUP_WRITEBACK
180 if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
181 return true;
182 #endif
183 return false;
184 }
185 #else
186 static bool global_reclaim(struct scan_control *sc)
187 {
188 return true;
189 }
190
191 static bool sane_reclaim(struct scan_control *sc)
192 {
193 return true;
194 }
195 #endif
196
197 unsigned long pgdat_reclaimable_pages(struct pglist_data *pgdat)
198 {
199 unsigned long nr;
200
201 nr = node_page_state_snapshot(pgdat, NR_ACTIVE_FILE) +
202 node_page_state_snapshot(pgdat, NR_INACTIVE_FILE) +
203 node_page_state_snapshot(pgdat, NR_ISOLATED_FILE);
204
205 if (get_nr_swap_pages() > 0)
206 nr += node_page_state_snapshot(pgdat, NR_ACTIVE_ANON) +
207 node_page_state_snapshot(pgdat, NR_INACTIVE_ANON) +
208 node_page_state_snapshot(pgdat, NR_ISOLATED_ANON);
209
210 return nr;
211 }
212
213 bool pgdat_reclaimable(struct pglist_data *pgdat)
214 {
215 return node_page_state_snapshot(pgdat, NR_PAGES_SCANNED) <
216 pgdat_reclaimable_pages(pgdat) * 6;
217 }
218
219 unsigned long lruvec_lru_size(struct lruvec *lruvec, enum lru_list lru)
220 {
221 if (!mem_cgroup_disabled())
222 return mem_cgroup_get_lru_size(lruvec, lru);
223
224 return node_page_state(lruvec_pgdat(lruvec), NR_LRU_BASE + lru);
225 }
226
227 /*
228 * Add a shrinker callback to be called from the vm.
229 */
230 int register_shrinker(struct shrinker *shrinker)
231 {
232 size_t size = sizeof(*shrinker->nr_deferred);
233
234 if (shrinker->flags & SHRINKER_NUMA_AWARE)
235 size *= nr_node_ids;
236
237 shrinker->nr_deferred = kzalloc(size, GFP_KERNEL);
238 if (!shrinker->nr_deferred)
239 return -ENOMEM;
240
241 down_write(&shrinker_rwsem);
242 list_add_tail(&shrinker->list, &shrinker_list);
243 up_write(&shrinker_rwsem);
244 return 0;
245 }
246 EXPORT_SYMBOL(register_shrinker);
247
248 /*
249 * Remove one
250 */
251 void unregister_shrinker(struct shrinker *shrinker)
252 {
253 down_write(&shrinker_rwsem);
254 list_del(&shrinker->list);
255 up_write(&shrinker_rwsem);
256 kfree(shrinker->nr_deferred);
257 }
258 EXPORT_SYMBOL(unregister_shrinker);
259
260 #define SHRINK_BATCH 128
261
262 static unsigned long do_shrink_slab(struct shrink_control *shrinkctl,
263 struct shrinker *shrinker,
264 unsigned long nr_scanned,
265 unsigned long nr_eligible)
266 {
267 unsigned long freed = 0;
268 unsigned long long delta;
269 long total_scan;
270 long freeable;
271 long nr;
272 long new_nr;
273 int nid = shrinkctl->nid;
274 long batch_size = shrinker->batch ? shrinker->batch
275 : SHRINK_BATCH;
276
277 freeable = shrinker->count_objects(shrinker, shrinkctl);
278 if (freeable == 0)
279 return 0;
280
281 /*
282 * copy the current shrinker scan count into a local variable
283 * and zero it so that other concurrent shrinker invocations
284 * don't also do this scanning work.
285 */
286 nr = atomic_long_xchg(&shrinker->nr_deferred[nid], 0);
287
288 total_scan = nr;
289 delta = (4 * nr_scanned) / shrinker->seeks;
290 delta *= freeable;
291 do_div(delta, nr_eligible + 1);
292 total_scan += delta;
293 if (total_scan < 0) {
294 pr_err("shrink_slab: %pF negative objects to delete nr=%ld\n",
295 shrinker->scan_objects, total_scan);
296 total_scan = freeable;
297 }
298
299 /*
300 * We need to avoid excessive windup on filesystem shrinkers
301 * due to large numbers of GFP_NOFS allocations causing the
302 * shrinkers to return -1 all the time. This results in a large
303 * nr being built up so when a shrink that can do some work
304 * comes along it empties the entire cache due to nr >>>
305 * freeable. This is bad for sustaining a working set in
306 * memory.
307 *
308 * Hence only allow the shrinker to scan the entire cache when
309 * a large delta change is calculated directly.
310 */
311 if (delta < freeable / 4)
312 total_scan = min(total_scan, freeable / 2);
313
314 /*
315 * Avoid risking looping forever due to too large nr value:
316 * never try to free more than twice the estimate number of
317 * freeable entries.
318 */
319 if (total_scan > freeable * 2)
320 total_scan = freeable * 2;
321
322 trace_mm_shrink_slab_start(shrinker, shrinkctl, nr,
323 nr_scanned, nr_eligible,
324 freeable, delta, total_scan);
325
326 /*
327 * Normally, we should not scan less than batch_size objects in one
328 * pass to avoid too frequent shrinker calls, but if the slab has less
329 * than batch_size objects in total and we are really tight on memory,
330 * we will try to reclaim all available objects, otherwise we can end
331 * up failing allocations although there are plenty of reclaimable
332 * objects spread over several slabs with usage less than the
333 * batch_size.
334 *
335 * We detect the "tight on memory" situations by looking at the total
336 * number of objects we want to scan (total_scan). If it is greater
337 * than the total number of objects on slab (freeable), we must be
338 * scanning at high prio and therefore should try to reclaim as much as
339 * possible.
340 */
341 while (total_scan >= batch_size ||
342 total_scan >= freeable) {
343 unsigned long ret;
344 unsigned long nr_to_scan = min(batch_size, total_scan);
345
346 shrinkctl->nr_to_scan = nr_to_scan;
347 ret = shrinker->scan_objects(shrinker, shrinkctl);
348 if (ret == SHRINK_STOP)
349 break;
350 freed += ret;
351
352 count_vm_events(SLABS_SCANNED, nr_to_scan);
353 total_scan -= nr_to_scan;
354
355 cond_resched();
356 }
357
358 /*
359 * move the unused scan count back into the shrinker in a
360 * manner that handles concurrent updates. If we exhausted the
361 * scan, there is no need to do an update.
362 */
363 if (total_scan > 0)
364 new_nr = atomic_long_add_return(total_scan,
365 &shrinker->nr_deferred[nid]);
366 else
367 new_nr = atomic_long_read(&shrinker->nr_deferred[nid]);
368
369 trace_mm_shrink_slab_end(shrinker, nid, freed, nr, new_nr, total_scan);
370 return freed;
371 }
372
373 /**
374 * shrink_slab - shrink slab caches
375 * @gfp_mask: allocation context
376 * @nid: node whose slab caches to target
377 * @memcg: memory cgroup whose slab caches to target
378 * @nr_scanned: pressure numerator
379 * @nr_eligible: pressure denominator
380 *
381 * Call the shrink functions to age shrinkable caches.
382 *
383 * @nid is passed along to shrinkers with SHRINKER_NUMA_AWARE set,
384 * unaware shrinkers will receive a node id of 0 instead.
385 *
386 * @memcg specifies the memory cgroup to target. If it is not NULL,
387 * only shrinkers with SHRINKER_MEMCG_AWARE set will be called to scan
388 * objects from the memory cgroup specified. Otherwise, only unaware
389 * shrinkers are called.
390 *
391 * @nr_scanned and @nr_eligible form a ratio that indicate how much of
392 * the available objects should be scanned. Page reclaim for example
393 * passes the number of pages scanned and the number of pages on the
394 * LRU lists that it considered on @nid, plus a bias in @nr_scanned
395 * when it encountered mapped pages. The ratio is further biased by
396 * the ->seeks setting of the shrink function, which indicates the
397 * cost to recreate an object relative to that of an LRU page.
398 *
399 * Returns the number of reclaimed slab objects.
400 */
401 static unsigned long shrink_slab(gfp_t gfp_mask, int nid,
402 struct mem_cgroup *memcg,
403 unsigned long nr_scanned,
404 unsigned long nr_eligible)
405 {
406 struct shrinker *shrinker;
407 unsigned long freed = 0;
408
409 if (memcg && (!memcg_kmem_enabled() || !mem_cgroup_online(memcg)))
410 return 0;
411
412 if (nr_scanned == 0)
413 nr_scanned = SWAP_CLUSTER_MAX;
414
415 if (!down_read_trylock(&shrinker_rwsem)) {
416 /*
417 * If we would return 0, our callers would understand that we
418 * have nothing else to shrink and give up trying. By returning
419 * 1 we keep it going and assume we'll be able to shrink next
420 * time.
421 */
422 freed = 1;
423 goto out;
424 }
425
426 list_for_each_entry(shrinker, &shrinker_list, list) {
427 struct shrink_control sc = {
428 .gfp_mask = gfp_mask,
429 .nid = nid,
430 .memcg = memcg,
431 };
432
433 /*
434 * If kernel memory accounting is disabled, we ignore
435 * SHRINKER_MEMCG_AWARE flag and call all shrinkers
436 * passing NULL for memcg.
437 */
438 if (memcg_kmem_enabled() &&
439 !!memcg != !!(shrinker->flags & SHRINKER_MEMCG_AWARE))
440 continue;
441
442 if (!(shrinker->flags & SHRINKER_NUMA_AWARE))
443 sc.nid = 0;
444
445 freed += do_shrink_slab(&sc, shrinker, nr_scanned, nr_eligible);
446 }
447
448 up_read(&shrinker_rwsem);
449 out:
450 cond_resched();
451 return freed;
452 }
453
454 void drop_slab_node(int nid)
455 {
456 unsigned long freed;
457
458 do {
459 struct mem_cgroup *memcg = NULL;
460
461 freed = 0;
462 do {
463 freed += shrink_slab(GFP_KERNEL, nid, memcg,
464 1000, 1000);
465 } while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)) != NULL);
466 } while (freed > 10);
467 }
468
469 void drop_slab(void)
470 {
471 int nid;
472
473 for_each_online_node(nid)
474 drop_slab_node(nid);
475 }
476
477 static inline int is_page_cache_freeable(struct page *page)
478 {
479 /*
480 * A freeable page cache page is referenced only by the caller
481 * that isolated the page, the page cache radix tree and
482 * optional buffer heads at page->private.
483 */
484 return page_count(page) - page_has_private(page) == 2;
485 }
486
487 static int may_write_to_inode(struct inode *inode, struct scan_control *sc)
488 {
489 if (current->flags & PF_SWAPWRITE)
490 return 1;
491 if (!inode_write_congested(inode))
492 return 1;
493 if (inode_to_bdi(inode) == current->backing_dev_info)
494 return 1;
495 return 0;
496 }
497
498 /*
499 * We detected a synchronous write error writing a page out. Probably
500 * -ENOSPC. We need to propagate that into the address_space for a subsequent
501 * fsync(), msync() or close().
502 *
503 * The tricky part is that after writepage we cannot touch the mapping: nothing
504 * prevents it from being freed up. But we have a ref on the page and once
505 * that page is locked, the mapping is pinned.
506 *
507 * We're allowed to run sleeping lock_page() here because we know the caller has
508 * __GFP_FS.
509 */
510 static void handle_write_error(struct address_space *mapping,
511 struct page *page, int error)
512 {
513 lock_page(page);
514 if (page_mapping(page) == mapping)
515 mapping_set_error(mapping, error);
516 unlock_page(page);
517 }
518
519 /* possible outcome of pageout() */
520 typedef enum {
521 /* failed to write page out, page is locked */
522 PAGE_KEEP,
523 /* move page to the active list, page is locked */
524 PAGE_ACTIVATE,
525 /* page has been sent to the disk successfully, page is unlocked */
526 PAGE_SUCCESS,
527 /* page is clean and locked */
528 PAGE_CLEAN,
529 } pageout_t;
530
531 /*
532 * pageout is called by shrink_page_list() for each dirty page.
533 * Calls ->writepage().
534 */
535 static pageout_t pageout(struct page *page, struct address_space *mapping,
536 struct scan_control *sc)
537 {
538 /*
539 * If the page is dirty, only perform writeback if that write
540 * will be non-blocking. To prevent this allocation from being
541 * stalled by pagecache activity. But note that there may be
542 * stalls if we need to run get_block(). We could test
543 * PagePrivate for that.
544 *
545 * If this process is currently in __generic_file_write_iter() against
546 * this page's queue, we can perform writeback even if that
547 * will block.
548 *
549 * If the page is swapcache, write it back even if that would
550 * block, for some throttling. This happens by accident, because
551 * swap_backing_dev_info is bust: it doesn't reflect the
552 * congestion state of the swapdevs. Easy to fix, if needed.
553 */
554 if (!is_page_cache_freeable(page))
555 return PAGE_KEEP;
556 if (!mapping) {
557 /*
558 * Some data journaling orphaned pages can have
559 * page->mapping == NULL while being dirty with clean buffers.
560 */
561 if (page_has_private(page)) {
562 if (try_to_free_buffers(page)) {
563 ClearPageDirty(page);
564 pr_info("%s: orphaned page\n", __func__);
565 return PAGE_CLEAN;
566 }
567 }
568 return PAGE_KEEP;
569 }
570 if (mapping->a_ops->writepage == NULL)
571 return PAGE_ACTIVATE;
572 if (!may_write_to_inode(mapping->host, sc))
573 return PAGE_KEEP;
574
575 if (clear_page_dirty_for_io(page)) {
576 int res;
577 struct writeback_control wbc = {
578 .sync_mode = WB_SYNC_NONE,
579 .nr_to_write = SWAP_CLUSTER_MAX,
580 .range_start = 0,
581 .range_end = LLONG_MAX,
582 .for_reclaim = 1,
583 };
584
585 SetPageReclaim(page);
586 res = mapping->a_ops->writepage(page, &wbc);
587 if (res < 0)
588 handle_write_error(mapping, page, res);
589 if (res == AOP_WRITEPAGE_ACTIVATE) {
590 ClearPageReclaim(page);
591 return PAGE_ACTIVATE;
592 }
593
594 if (!PageWriteback(page)) {
595 /* synchronous write or broken a_ops? */
596 ClearPageReclaim(page);
597 }
598 trace_mm_vmscan_writepage(page);
599 inc_node_page_state(page, NR_VMSCAN_WRITE);
600 return PAGE_SUCCESS;
601 }
602
603 return PAGE_CLEAN;
604 }
605
606 /*
607 * Same as remove_mapping, but if the page is removed from the mapping, it
608 * gets returned with a refcount of 0.
609 */
610 static int __remove_mapping(struct address_space *mapping, struct page *page,
611 bool reclaimed)
612 {
613 unsigned long flags;
614
615 BUG_ON(!PageLocked(page));
616 BUG_ON(mapping != page_mapping(page));
617
618 spin_lock_irqsave(&mapping->tree_lock, flags);
619 /*
620 * The non racy check for a busy page.
621 *
622 * Must be careful with the order of the tests. When someone has
623 * a ref to the page, it may be possible that they dirty it then
624 * drop the reference. So if PageDirty is tested before page_count
625 * here, then the following race may occur:
626 *
627 * get_user_pages(&page);
628 * [user mapping goes away]
629 * write_to(page);
630 * !PageDirty(page) [good]
631 * SetPageDirty(page);
632 * put_page(page);
633 * !page_count(page) [good, discard it]
634 *
635 * [oops, our write_to data is lost]
636 *
637 * Reversing the order of the tests ensures such a situation cannot
638 * escape unnoticed. The smp_rmb is needed to ensure the page->flags
639 * load is not satisfied before that of page->_refcount.
640 *
641 * Note that if SetPageDirty is always performed via set_page_dirty,
642 * and thus under tree_lock, then this ordering is not required.
643 */
644 if (!page_ref_freeze(page, 2))
645 goto cannot_free;
646 /* note: atomic_cmpxchg in page_freeze_refs provides the smp_rmb */
647 if (unlikely(PageDirty(page))) {
648 page_ref_unfreeze(page, 2);
649 goto cannot_free;
650 }
651
652 if (PageSwapCache(page)) {
653 swp_entry_t swap = { .val = page_private(page) };
654 mem_cgroup_swapout(page, swap);
655 __delete_from_swap_cache(page);
656 spin_unlock_irqrestore(&mapping->tree_lock, flags);
657 swapcache_free(swap);
658 } else {
659 void (*freepage)(struct page *);
660 void *shadow = NULL;
661
662 freepage = mapping->a_ops->freepage;
663 /*
664 * Remember a shadow entry for reclaimed file cache in
665 * order to detect refaults, thus thrashing, later on.
666 *
667 * But don't store shadows in an address space that is
668 * already exiting. This is not just an optizimation,
669 * inode reclaim needs to empty out the radix tree or
670 * the nodes are lost. Don't plant shadows behind its
671 * back.
672 *
673 * We also don't store shadows for DAX mappings because the
674 * only page cache pages found in these are zero pages
675 * covering holes, and because we don't want to mix DAX
676 * exceptional entries and shadow exceptional entries in the
677 * same page_tree.
678 */
679 if (reclaimed && page_is_file_cache(page) &&
680 !mapping_exiting(mapping) && !dax_mapping(mapping))
681 shadow = workingset_eviction(mapping, page);
682 __delete_from_page_cache(page, shadow);
683 spin_unlock_irqrestore(&mapping->tree_lock, flags);
684
685 if (freepage != NULL)
686 freepage(page);
687 }
688
689 return 1;
690
691 cannot_free:
692 spin_unlock_irqrestore(&mapping->tree_lock, flags);
693 return 0;
694 }
695
696 /*
697 * Attempt to detach a locked page from its ->mapping. If it is dirty or if
698 * someone else has a ref on the page, abort and return 0. If it was
699 * successfully detached, return 1. Assumes the caller has a single ref on
700 * this page.
701 */
702 int remove_mapping(struct address_space *mapping, struct page *page)
703 {
704 if (__remove_mapping(mapping, page, false)) {
705 /*
706 * Unfreezing the refcount with 1 rather than 2 effectively
707 * drops the pagecache ref for us without requiring another
708 * atomic operation.
709 */
710 page_ref_unfreeze(page, 1);
711 return 1;
712 }
713 return 0;
714 }
715
716 /**
717 * putback_lru_page - put previously isolated page onto appropriate LRU list
718 * @page: page to be put back to appropriate lru list
719 *
720 * Add previously isolated @page to appropriate LRU list.
721 * Page may still be unevictable for other reasons.
722 *
723 * lru_lock must not be held, interrupts must be enabled.
724 */
725 void putback_lru_page(struct page *page)
726 {
727 bool is_unevictable;
728 int was_unevictable = PageUnevictable(page);
729
730 VM_BUG_ON_PAGE(PageLRU(page), page);
731
732 redo:
733 ClearPageUnevictable(page);
734
735 if (page_evictable(page)) {
736 /*
737 * For evictable pages, we can use the cache.
738 * In event of a race, worst case is we end up with an
739 * unevictable page on [in]active list.
740 * We know how to handle that.
741 */
742 is_unevictable = false;
743 lru_cache_add(page);
744 } else {
745 /*
746 * Put unevictable pages directly on zone's unevictable
747 * list.
748 */
749 is_unevictable = true;
750 add_page_to_unevictable_list(page);
751 /*
752 * When racing with an mlock or AS_UNEVICTABLE clearing
753 * (page is unlocked) make sure that if the other thread
754 * does not observe our setting of PG_lru and fails
755 * isolation/check_move_unevictable_pages,
756 * we see PG_mlocked/AS_UNEVICTABLE cleared below and move
757 * the page back to the evictable list.
758 *
759 * The other side is TestClearPageMlocked() or shmem_lock().
760 */
761 smp_mb();
762 }
763
764 /*
765 * page's status can change while we move it among lru. If an evictable
766 * page is on unevictable list, it never be freed. To avoid that,
767 * check after we added it to the list, again.
768 */
769 if (is_unevictable && page_evictable(page)) {
770 if (!isolate_lru_page(page)) {
771 put_page(page);
772 goto redo;
773 }
774 /* This means someone else dropped this page from LRU
775 * So, it will be freed or putback to LRU again. There is
776 * nothing to do here.
777 */
778 }
779
780 if (was_unevictable && !is_unevictable)
781 count_vm_event(UNEVICTABLE_PGRESCUED);
782 else if (!was_unevictable && is_unevictable)
783 count_vm_event(UNEVICTABLE_PGCULLED);
784
785 put_page(page); /* drop ref from isolate */
786 }
787
788 enum page_references {
789 PAGEREF_RECLAIM,
790 PAGEREF_RECLAIM_CLEAN,
791 PAGEREF_KEEP,
792 PAGEREF_ACTIVATE,
793 };
794
795 static enum page_references page_check_references(struct page *page,
796 struct scan_control *sc)
797 {
798 int referenced_ptes, referenced_page;
799 unsigned long vm_flags;
800
801 referenced_ptes = page_referenced(page, 1, sc->target_mem_cgroup,
802 &vm_flags);
803 referenced_page = TestClearPageReferenced(page);
804
805 /*
806 * Mlock lost the isolation race with us. Let try_to_unmap()
807 * move the page to the unevictable list.
808 */
809 if (vm_flags & VM_LOCKED)
810 return PAGEREF_RECLAIM;
811
812 if (referenced_ptes) {
813 if (PageSwapBacked(page))
814 return PAGEREF_ACTIVATE;
815 /*
816 * All mapped pages start out with page table
817 * references from the instantiating fault, so we need
818 * to look twice if a mapped file page is used more
819 * than once.
820 *
821 * Mark it and spare it for another trip around the
822 * inactive list. Another page table reference will
823 * lead to its activation.
824 *
825 * Note: the mark is set for activated pages as well
826 * so that recently deactivated but used pages are
827 * quickly recovered.
828 */
829 SetPageReferenced(page);
830
831 if (referenced_page || referenced_ptes > 1)
832 return PAGEREF_ACTIVATE;
833
834 /*
835 * Activate file-backed executable pages after first usage.
836 */
837 if (vm_flags & VM_EXEC)
838 return PAGEREF_ACTIVATE;
839
840 return PAGEREF_KEEP;
841 }
842
843 /* Reclaim if clean, defer dirty pages to writeback */
844 if (referenced_page && !PageSwapBacked(page))
845 return PAGEREF_RECLAIM_CLEAN;
846
847 return PAGEREF_RECLAIM;
848 }
849
850 /* Check if a page is dirty or under writeback */
851 static void page_check_dirty_writeback(struct page *page,
852 bool *dirty, bool *writeback)
853 {
854 struct address_space *mapping;
855
856 /*
857 * Anonymous pages are not handled by flushers and must be written
858 * from reclaim context. Do not stall reclaim based on them
859 */
860 if (!page_is_file_cache(page)) {
861 *dirty = false;
862 *writeback = false;
863 return;
864 }
865
866 /* By default assume that the page flags are accurate */
867 *dirty = PageDirty(page);
868 *writeback = PageWriteback(page);
869
870 /* Verify dirty/writeback state if the filesystem supports it */
871 if (!page_has_private(page))
872 return;
873
874 mapping = page_mapping(page);
875 if (mapping && mapping->a_ops->is_dirty_writeback)
876 mapping->a_ops->is_dirty_writeback(page, dirty, writeback);
877 }
878
879 /*
880 * shrink_page_list() returns the number of reclaimed pages
881 */
882 static unsigned long shrink_page_list(struct list_head *page_list,
883 struct pglist_data *pgdat,
884 struct scan_control *sc,
885 enum ttu_flags ttu_flags,
886 unsigned long *ret_nr_dirty,
887 unsigned long *ret_nr_unqueued_dirty,
888 unsigned long *ret_nr_congested,
889 unsigned long *ret_nr_writeback,
890 unsigned long *ret_nr_immediate,
891 bool force_reclaim)
892 {
893 LIST_HEAD(ret_pages);
894 LIST_HEAD(free_pages);
895 int pgactivate = 0;
896 unsigned long nr_unqueued_dirty = 0;
897 unsigned long nr_dirty = 0;
898 unsigned long nr_congested = 0;
899 unsigned long nr_reclaimed = 0;
900 unsigned long nr_writeback = 0;
901 unsigned long nr_immediate = 0;
902
903 cond_resched();
904
905 while (!list_empty(page_list)) {
906 struct address_space *mapping;
907 struct page *page;
908 int may_enter_fs;
909 enum page_references references = PAGEREF_RECLAIM_CLEAN;
910 bool dirty, writeback;
911 bool lazyfree = false;
912 int ret = SWAP_SUCCESS;
913
914 cond_resched();
915
916 page = lru_to_page(page_list);
917 list_del(&page->lru);
918
919 if (!trylock_page(page))
920 goto keep;
921
922 VM_BUG_ON_PAGE(PageActive(page), page);
923
924 sc->nr_scanned++;
925
926 if (unlikely(!page_evictable(page)))
927 goto cull_mlocked;
928
929 if (!sc->may_unmap && page_mapped(page))
930 goto keep_locked;
931
932 /* Double the slab pressure for mapped and swapcache pages */
933 if (page_mapped(page) || PageSwapCache(page))
934 sc->nr_scanned++;
935
936 may_enter_fs = (sc->gfp_mask & __GFP_FS) ||
937 (PageSwapCache(page) && (sc->gfp_mask & __GFP_IO));
938
939 /*
940 * The number of dirty pages determines if a zone is marked
941 * reclaim_congested which affects wait_iff_congested. kswapd
942 * will stall and start writing pages if the tail of the LRU
943 * is all dirty unqueued pages.
944 */
945 page_check_dirty_writeback(page, &dirty, &writeback);
946 if (dirty || writeback)
947 nr_dirty++;
948
949 if (dirty && !writeback)
950 nr_unqueued_dirty++;
951
952 /*
953 * Treat this page as congested if the underlying BDI is or if
954 * pages are cycling through the LRU so quickly that the
955 * pages marked for immediate reclaim are making it to the
956 * end of the LRU a second time.
957 */
958 mapping = page_mapping(page);
959 if (((dirty || writeback) && mapping &&
960 inode_write_congested(mapping->host)) ||
961 (writeback && PageReclaim(page)))
962 nr_congested++;
963
964 /*
965 * If a page at the tail of the LRU is under writeback, there
966 * are three cases to consider.
967 *
968 * 1) If reclaim is encountering an excessive number of pages
969 * under writeback and this page is both under writeback and
970 * PageReclaim then it indicates that pages are being queued
971 * for IO but are being recycled through the LRU before the
972 * IO can complete. Waiting on the page itself risks an
973 * indefinite stall if it is impossible to writeback the
974 * page due to IO error or disconnected storage so instead
975 * note that the LRU is being scanned too quickly and the
976 * caller can stall after page list has been processed.
977 *
978 * 2) Global or new memcg reclaim encounters a page that is
979 * not marked for immediate reclaim, or the caller does not
980 * have __GFP_FS (or __GFP_IO if it's simply going to swap,
981 * not to fs). In this case mark the page for immediate
982 * reclaim and continue scanning.
983 *
984 * Require may_enter_fs because we would wait on fs, which
985 * may not have submitted IO yet. And the loop driver might
986 * enter reclaim, and deadlock if it waits on a page for
987 * which it is needed to do the write (loop masks off
988 * __GFP_IO|__GFP_FS for this reason); but more thought
989 * would probably show more reasons.
990 *
991 * 3) Legacy memcg encounters a page that is already marked
992 * PageReclaim. memcg does not have any dirty pages
993 * throttling so we could easily OOM just because too many
994 * pages are in writeback and there is nothing else to
995 * reclaim. Wait for the writeback to complete.
996 */
997 if (PageWriteback(page)) {
998 /* Case 1 above */
999 if (current_is_kswapd() &&
1000 PageReclaim(page) &&
1001 test_bit(PGDAT_WRITEBACK, &pgdat->flags)) {
1002 nr_immediate++;
1003 goto keep_locked;
1004
1005 /* Case 2 above */
1006 } else if (sane_reclaim(sc) ||
1007 !PageReclaim(page) || !may_enter_fs) {
1008 /*
1009 * This is slightly racy - end_page_writeback()
1010 * might have just cleared PageReclaim, then
1011 * setting PageReclaim here end up interpreted
1012 * as PageReadahead - but that does not matter
1013 * enough to care. What we do want is for this
1014 * page to have PageReclaim set next time memcg
1015 * reclaim reaches the tests above, so it will
1016 * then wait_on_page_writeback() to avoid OOM;
1017 * and it's also appropriate in global reclaim.
1018 */
1019 SetPageReclaim(page);
1020 nr_writeback++;
1021 goto keep_locked;
1022
1023 /* Case 3 above */
1024 } else {
1025 unlock_page(page);
1026 wait_on_page_writeback(page);
1027 /* then go back and try same page again */
1028 list_add_tail(&page->lru, page_list);
1029 continue;
1030 }
1031 }
1032
1033 if (!force_reclaim)
1034 references = page_check_references(page, sc);
1035
1036 switch (references) {
1037 case PAGEREF_ACTIVATE:
1038 goto activate_locked;
1039 case PAGEREF_KEEP:
1040 goto keep_locked;
1041 case PAGEREF_RECLAIM:
1042 case PAGEREF_RECLAIM_CLEAN:
1043 ; /* try to reclaim the page below */
1044 }
1045
1046 /*
1047 * Anonymous process memory has backing store?
1048 * Try to allocate it some swap space here.
1049 */
1050 if (PageAnon(page) && !PageSwapCache(page)) {
1051 if (!(sc->gfp_mask & __GFP_IO))
1052 goto keep_locked;
1053 if (!add_to_swap(page, page_list))
1054 goto activate_locked;
1055 lazyfree = true;
1056 may_enter_fs = 1;
1057
1058 /* Adding to swap updated mapping */
1059 mapping = page_mapping(page);
1060 } else if (unlikely(PageTransHuge(page))) {
1061 /* Split file THP */
1062 if (split_huge_page_to_list(page, page_list))
1063 goto keep_locked;
1064 }
1065
1066 VM_BUG_ON_PAGE(PageTransHuge(page), page);
1067
1068 /*
1069 * The page is mapped into the page tables of one or more
1070 * processes. Try to unmap it here.
1071 */
1072 if (page_mapped(page) && mapping) {
1073 switch (ret = try_to_unmap(page, lazyfree ?
1074 (ttu_flags | TTU_BATCH_FLUSH | TTU_LZFREE) :
1075 (ttu_flags | TTU_BATCH_FLUSH))) {
1076 case SWAP_FAIL:
1077 goto activate_locked;
1078 case SWAP_AGAIN:
1079 goto keep_locked;
1080 case SWAP_MLOCK:
1081 goto cull_mlocked;
1082 case SWAP_LZFREE:
1083 goto lazyfree;
1084 case SWAP_SUCCESS:
1085 ; /* try to free the page below */
1086 }
1087 }
1088
1089 if (PageDirty(page)) {
1090 /*
1091 * Only kswapd can writeback filesystem pages to
1092 * avoid risk of stack overflow but only writeback
1093 * if many dirty pages have been encountered.
1094 */
1095 if (page_is_file_cache(page) &&
1096 (!current_is_kswapd() ||
1097 !test_bit(PGDAT_DIRTY, &pgdat->flags))) {
1098 /*
1099 * Immediately reclaim when written back.
1100 * Similar in principal to deactivate_page()
1101 * except we already have the page isolated
1102 * and know it's dirty
1103 */
1104 inc_node_page_state(page, NR_VMSCAN_IMMEDIATE);
1105 SetPageReclaim(page);
1106
1107 goto keep_locked;
1108 }
1109
1110 if (references == PAGEREF_RECLAIM_CLEAN)
1111 goto keep_locked;
1112 if (!may_enter_fs)
1113 goto keep_locked;
1114 if (!sc->may_writepage)
1115 goto keep_locked;
1116
1117 /*
1118 * Page is dirty. Flush the TLB if a writable entry
1119 * potentially exists to avoid CPU writes after IO
1120 * starts and then write it out here.
1121 */
1122 try_to_unmap_flush_dirty();
1123 switch (pageout(page, mapping, sc)) {
1124 case PAGE_KEEP:
1125 goto keep_locked;
1126 case PAGE_ACTIVATE:
1127 goto activate_locked;
1128 case PAGE_SUCCESS:
1129 if (PageWriteback(page))
1130 goto keep;
1131 if (PageDirty(page))
1132 goto keep;
1133
1134 /*
1135 * A synchronous write - probably a ramdisk. Go
1136 * ahead and try to reclaim the page.
1137 */
1138 if (!trylock_page(page))
1139 goto keep;
1140 if (PageDirty(page) || PageWriteback(page))
1141 goto keep_locked;
1142 mapping = page_mapping(page);
1143 case PAGE_CLEAN:
1144 ; /* try to free the page below */
1145 }
1146 }
1147
1148 /*
1149 * If the page has buffers, try to free the buffer mappings
1150 * associated with this page. If we succeed we try to free
1151 * the page as well.
1152 *
1153 * We do this even if the page is PageDirty().
1154 * try_to_release_page() does not perform I/O, but it is
1155 * possible for a page to have PageDirty set, but it is actually
1156 * clean (all its buffers are clean). This happens if the
1157 * buffers were written out directly, with submit_bh(). ext3
1158 * will do this, as well as the blockdev mapping.
1159 * try_to_release_page() will discover that cleanness and will
1160 * drop the buffers and mark the page clean - it can be freed.
1161 *
1162 * Rarely, pages can have buffers and no ->mapping. These are
1163 * the pages which were not successfully invalidated in
1164 * truncate_complete_page(). We try to drop those buffers here
1165 * and if that worked, and the page is no longer mapped into
1166 * process address space (page_count == 1) it can be freed.
1167 * Otherwise, leave the page on the LRU so it is swappable.
1168 */
1169 if (page_has_private(page)) {
1170 if (!try_to_release_page(page, sc->gfp_mask))
1171 goto activate_locked;
1172 if (!mapping && page_count(page) == 1) {
1173 unlock_page(page);
1174 if (put_page_testzero(page))
1175 goto free_it;
1176 else {
1177 /*
1178 * rare race with speculative reference.
1179 * the speculative reference will free
1180 * this page shortly, so we may
1181 * increment nr_reclaimed here (and
1182 * leave it off the LRU).
1183 */
1184 nr_reclaimed++;
1185 continue;
1186 }
1187 }
1188 }
1189
1190 lazyfree:
1191 if (!mapping || !__remove_mapping(mapping, page, true))
1192 goto keep_locked;
1193
1194 /*
1195 * At this point, we have no other references and there is
1196 * no way to pick any more up (removed from LRU, removed
1197 * from pagecache). Can use non-atomic bitops now (and
1198 * we obviously don't have to worry about waking up a process
1199 * waiting on the page lock, because there are no references.
1200 */
1201 __ClearPageLocked(page);
1202 free_it:
1203 if (ret == SWAP_LZFREE)
1204 count_vm_event(PGLAZYFREED);
1205
1206 nr_reclaimed++;
1207
1208 /*
1209 * Is there need to periodically free_page_list? It would
1210 * appear not as the counts should be low
1211 */
1212 list_add(&page->lru, &free_pages);
1213 continue;
1214
1215 cull_mlocked:
1216 if (PageSwapCache(page))
1217 try_to_free_swap(page);
1218 unlock_page(page);
1219 list_add(&page->lru, &ret_pages);
1220 continue;
1221
1222 activate_locked:
1223 /* Not a candidate for swapping, so reclaim swap space. */
1224 if (PageSwapCache(page) && mem_cgroup_swap_full(page))
1225 try_to_free_swap(page);
1226 VM_BUG_ON_PAGE(PageActive(page), page);
1227 SetPageActive(page);
1228 pgactivate++;
1229 keep_locked:
1230 unlock_page(page);
1231 keep:
1232 list_add(&page->lru, &ret_pages);
1233 VM_BUG_ON_PAGE(PageLRU(page) || PageUnevictable(page), page);
1234 }
1235
1236 mem_cgroup_uncharge_list(&free_pages);
1237 try_to_unmap_flush();
1238 free_hot_cold_page_list(&free_pages, true);
1239
1240 list_splice(&ret_pages, page_list);
1241 count_vm_events(PGACTIVATE, pgactivate);
1242
1243 *ret_nr_dirty += nr_dirty;
1244 *ret_nr_congested += nr_congested;
1245 *ret_nr_unqueued_dirty += nr_unqueued_dirty;
1246 *ret_nr_writeback += nr_writeback;
1247 *ret_nr_immediate += nr_immediate;
1248 return nr_reclaimed;
1249 }
1250
1251 unsigned long reclaim_clean_pages_from_list(struct zone *zone,
1252 struct list_head *page_list)
1253 {
1254 struct scan_control sc = {
1255 .gfp_mask = GFP_KERNEL,
1256 .priority = DEF_PRIORITY,
1257 .may_unmap = 1,
1258 };
1259 unsigned long ret, dummy1, dummy2, dummy3, dummy4, dummy5;
1260 struct page *page, *next;
1261 LIST_HEAD(clean_pages);
1262
1263 list_for_each_entry_safe(page, next, page_list, lru) {
1264 if (page_is_file_cache(page) && !PageDirty(page) &&
1265 !__PageMovable(page)) {
1266 ClearPageActive(page);
1267 list_move(&page->lru, &clean_pages);
1268 }
1269 }
1270
1271 ret = shrink_page_list(&clean_pages, zone->zone_pgdat, &sc,
1272 TTU_UNMAP|TTU_IGNORE_ACCESS,
1273 &dummy1, &dummy2, &dummy3, &dummy4, &dummy5, true);
1274 list_splice(&clean_pages, page_list);
1275 mod_node_page_state(zone->zone_pgdat, NR_ISOLATED_FILE, -ret);
1276 return ret;
1277 }
1278
1279 /*
1280 * Attempt to remove the specified page from its LRU. Only take this page
1281 * if it is of the appropriate PageActive status. Pages which are being
1282 * freed elsewhere are also ignored.
1283 *
1284 * page: page to consider
1285 * mode: one of the LRU isolation modes defined above
1286 *
1287 * returns 0 on success, -ve errno on failure.
1288 */
1289 int __isolate_lru_page(struct page *page, isolate_mode_t mode)
1290 {
1291 int ret = -EINVAL;
1292
1293 /* Only take pages on the LRU. */
1294 if (!PageLRU(page))
1295 return ret;
1296
1297 /* Compaction should not handle unevictable pages but CMA can do so */
1298 if (PageUnevictable(page) && !(mode & ISOLATE_UNEVICTABLE))
1299 return ret;
1300
1301 ret = -EBUSY;
1302
1303 /*
1304 * To minimise LRU disruption, the caller can indicate that it only
1305 * wants to isolate pages it will be able to operate on without
1306 * blocking - clean pages for the most part.
1307 *
1308 * ISOLATE_CLEAN means that only clean pages should be isolated. This
1309 * is used by reclaim when it is cannot write to backing storage
1310 *
1311 * ISOLATE_ASYNC_MIGRATE is used to indicate that it only wants to pages
1312 * that it is possible to migrate without blocking
1313 */
1314 if (mode & (ISOLATE_CLEAN|ISOLATE_ASYNC_MIGRATE)) {
1315 /* All the caller can do on PageWriteback is block */
1316 if (PageWriteback(page))
1317 return ret;
1318
1319 if (PageDirty(page)) {
1320 struct address_space *mapping;
1321
1322 /* ISOLATE_CLEAN means only clean pages */
1323 if (mode & ISOLATE_CLEAN)
1324 return ret;
1325
1326 /*
1327 * Only pages without mappings or that have a
1328 * ->migratepage callback are possible to migrate
1329 * without blocking
1330 */
1331 mapping = page_mapping(page);
1332 if (mapping && !mapping->a_ops->migratepage)
1333 return ret;
1334 }
1335 }
1336
1337 if ((mode & ISOLATE_UNMAPPED) && page_mapped(page))
1338 return ret;
1339
1340 if (likely(get_page_unless_zero(page))) {
1341 /*
1342 * Be careful not to clear PageLRU until after we're
1343 * sure the page is not being freed elsewhere -- the
1344 * page release code relies on it.
1345 */
1346 ClearPageLRU(page);
1347 ret = 0;
1348 }
1349
1350 return ret;
1351 }
1352
1353
1354 /*
1355 * Update LRU sizes after isolating pages. The LRU size updates must
1356 * be complete before mem_cgroup_update_lru_size due to a santity check.
1357 */
1358 static __always_inline void update_lru_sizes(struct lruvec *lruvec,
1359 enum lru_list lru, unsigned long *nr_zone_taken,
1360 unsigned long nr_taken)
1361 {
1362 #ifdef CONFIG_HIGHMEM
1363 int zid;
1364
1365 /*
1366 * Highmem has separate accounting for highmem pages so each zone
1367 * is updated separately.
1368 */
1369 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
1370 if (!nr_zone_taken[zid])
1371 continue;
1372
1373 __update_lru_size(lruvec, lru, zid, -nr_zone_taken[zid]);
1374 }
1375 #else
1376 /* Zone ID does not matter on !HIGHMEM */
1377 __update_lru_size(lruvec, lru, 0, -nr_taken);
1378 #endif
1379
1380 #ifdef CONFIG_MEMCG
1381 mem_cgroup_update_lru_size(lruvec, lru, -nr_taken);
1382 #endif
1383 }
1384
1385 /*
1386 * zone_lru_lock is heavily contended. Some of the functions that
1387 * shrink the lists perform better by taking out a batch of pages
1388 * and working on them outside the LRU lock.
1389 *
1390 * For pagecache intensive workloads, this function is the hottest
1391 * spot in the kernel (apart from copy_*_user functions).
1392 *
1393 * Appropriate locks must be held before calling this function.
1394 *
1395 * @nr_to_scan: The number of pages to look through on the list.
1396 * @lruvec: The LRU vector to pull pages from.
1397 * @dst: The temp list to put pages on to.
1398 * @nr_scanned: The number of pages that were scanned.
1399 * @sc: The scan_control struct for this reclaim session
1400 * @mode: One of the LRU isolation modes
1401 * @lru: LRU list id for isolating
1402 *
1403 * returns how many pages were moved onto *@dst.
1404 */
1405 static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
1406 struct lruvec *lruvec, struct list_head *dst,
1407 unsigned long *nr_scanned, struct scan_control *sc,
1408 isolate_mode_t mode, enum lru_list lru)
1409 {
1410 struct list_head *src = &lruvec->lists[lru];
1411 unsigned long nr_taken = 0;
1412 unsigned long nr_zone_taken[MAX_NR_ZONES] = { 0 };
1413 unsigned long nr_skipped[MAX_NR_ZONES] = { 0, };
1414 unsigned long scan, nr_pages;
1415 LIST_HEAD(pages_skipped);
1416
1417 for (scan = 0; scan < nr_to_scan && nr_taken < nr_to_scan &&
1418 !list_empty(src); scan++) {
1419 struct page *page;
1420
1421 page = lru_to_page(src);
1422 prefetchw_prev_lru_page(page, src, flags);
1423
1424 VM_BUG_ON_PAGE(!PageLRU(page), page);
1425
1426 if (page_zonenum(page) > sc->reclaim_idx) {
1427 list_move(&page->lru, &pages_skipped);
1428 nr_skipped[page_zonenum(page)]++;
1429 continue;
1430 }
1431
1432 switch (__isolate_lru_page(page, mode)) {
1433 case 0:
1434 nr_pages = hpage_nr_pages(page);
1435 nr_taken += nr_pages;
1436 nr_zone_taken[page_zonenum(page)] += nr_pages;
1437 list_move(&page->lru, dst);
1438 break;
1439
1440 case -EBUSY:
1441 /* else it is being freed elsewhere */
1442 list_move(&page->lru, src);
1443 continue;
1444
1445 default:
1446 BUG();
1447 }
1448 }
1449
1450 /*
1451 * Splice any skipped pages to the start of the LRU list. Note that
1452 * this disrupts the LRU order when reclaiming for lower zones but
1453 * we cannot splice to the tail. If we did then the SWAP_CLUSTER_MAX
1454 * scanning would soon rescan the same pages to skip and put the
1455 * system at risk of premature OOM.
1456 */
1457 if (!list_empty(&pages_skipped)) {
1458 int zid;
1459
1460 list_splice(&pages_skipped, src);
1461 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
1462 if (!nr_skipped[zid])
1463 continue;
1464
1465 __count_zid_vm_events(PGSCAN_SKIP, zid, nr_skipped[zid]);
1466 }
1467 }
1468 *nr_scanned = scan;
1469 trace_mm_vmscan_lru_isolate(sc->reclaim_idx, sc->order, nr_to_scan, scan,
1470 nr_taken, mode, is_file_lru(lru));
1471 update_lru_sizes(lruvec, lru, nr_zone_taken, nr_taken);
1472 return nr_taken;
1473 }
1474
1475 /**
1476 * isolate_lru_page - tries to isolate a page from its LRU list
1477 * @page: page to isolate from its LRU list
1478 *
1479 * Isolates a @page from an LRU list, clears PageLRU and adjusts the
1480 * vmstat statistic corresponding to whatever LRU list the page was on.
1481 *
1482 * Returns 0 if the page was removed from an LRU list.
1483 * Returns -EBUSY if the page was not on an LRU list.
1484 *
1485 * The returned page will have PageLRU() cleared. If it was found on
1486 * the active list, it will have PageActive set. If it was found on
1487 * the unevictable list, it will have the PageUnevictable bit set. That flag
1488 * may need to be cleared by the caller before letting the page go.
1489 *
1490 * The vmstat statistic corresponding to the list on which the page was
1491 * found will be decremented.
1492 *
1493 * Restrictions:
1494 * (1) Must be called with an elevated refcount on the page. This is a
1495 * fundamentnal difference from isolate_lru_pages (which is called
1496 * without a stable reference).
1497 * (2) the lru_lock must not be held.
1498 * (3) interrupts must be enabled.
1499 */
1500 int isolate_lru_page(struct page *page)
1501 {
1502 int ret = -EBUSY;
1503
1504 VM_BUG_ON_PAGE(!page_count(page), page);
1505 WARN_RATELIMIT(PageTail(page), "trying to isolate tail page");
1506
1507 if (PageLRU(page)) {
1508 struct zone *zone = page_zone(page);
1509 struct lruvec *lruvec;
1510
1511 spin_lock_irq(zone_lru_lock(zone));
1512 lruvec = mem_cgroup_page_lruvec(page, zone->zone_pgdat);
1513 if (PageLRU(page)) {
1514 int lru = page_lru(page);
1515 get_page(page);
1516 ClearPageLRU(page);
1517 del_page_from_lru_list(page, lruvec, lru);
1518 ret = 0;
1519 }
1520 spin_unlock_irq(zone_lru_lock(zone));
1521 }
1522 return ret;
1523 }
1524
1525 /*
1526 * A direct reclaimer may isolate SWAP_CLUSTER_MAX pages from the LRU list and
1527 * then get resheduled. When there are massive number of tasks doing page
1528 * allocation, such sleeping direct reclaimers may keep piling up on each CPU,
1529 * the LRU list will go small and be scanned faster than necessary, leading to
1530 * unnecessary swapping, thrashing and OOM.
1531 */
1532 static int too_many_isolated(struct pglist_data *pgdat, int file,
1533 struct scan_control *sc)
1534 {
1535 unsigned long inactive, isolated;
1536
1537 if (current_is_kswapd())
1538 return 0;
1539
1540 if (!sane_reclaim(sc))
1541 return 0;
1542
1543 if (file) {
1544 inactive = node_page_state(pgdat, NR_INACTIVE_FILE);
1545 isolated = node_page_state(pgdat, NR_ISOLATED_FILE);
1546 } else {
1547 inactive = node_page_state(pgdat, NR_INACTIVE_ANON);
1548 isolated = node_page_state(pgdat, NR_ISOLATED_ANON);
1549 }
1550
1551 /*
1552 * GFP_NOIO/GFP_NOFS callers are allowed to isolate more pages, so they
1553 * won't get blocked by normal direct-reclaimers, forming a circular
1554 * deadlock.
1555 */
1556 if ((sc->gfp_mask & (__GFP_IO | __GFP_FS)) == (__GFP_IO | __GFP_FS))
1557 inactive >>= 3;
1558
1559 return isolated > inactive;
1560 }
1561
1562 static noinline_for_stack void
1563 putback_inactive_pages(struct lruvec *lruvec, struct list_head *page_list)
1564 {
1565 struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat;
1566 struct pglist_data *pgdat = lruvec_pgdat(lruvec);
1567 LIST_HEAD(pages_to_free);
1568
1569 /*
1570 * Put back any unfreeable pages.
1571 */
1572 while (!list_empty(page_list)) {
1573 struct page *page = lru_to_page(page_list);
1574 int lru;
1575
1576 VM_BUG_ON_PAGE(PageLRU(page), page);
1577 list_del(&page->lru);
1578 if (unlikely(!page_evictable(page))) {
1579 spin_unlock_irq(&pgdat->lru_lock);
1580 putback_lru_page(page);
1581 spin_lock_irq(&pgdat->lru_lock);
1582 continue;
1583 }
1584
1585 lruvec = mem_cgroup_page_lruvec(page, pgdat);
1586
1587 SetPageLRU(page);
1588 lru = page_lru(page);
1589 add_page_to_lru_list(page, lruvec, lru);
1590
1591 if (is_active_lru(lru)) {
1592 int file = is_file_lru(lru);
1593 int numpages = hpage_nr_pages(page);
1594 reclaim_stat->recent_rotated[file] += numpages;
1595 }
1596 if (put_page_testzero(page)) {
1597 __ClearPageLRU(page);
1598 __ClearPageActive(page);
1599 del_page_from_lru_list(page, lruvec, lru);
1600
1601 if (unlikely(PageCompound(page))) {
1602 spin_unlock_irq(&pgdat->lru_lock);
1603 mem_cgroup_uncharge(page);
1604 (*get_compound_page_dtor(page))(page);
1605 spin_lock_irq(&pgdat->lru_lock);
1606 } else
1607 list_add(&page->lru, &pages_to_free);
1608 }
1609 }
1610
1611 /*
1612 * To save our caller's stack, now use input list for pages to free.
1613 */
1614 list_splice(&pages_to_free, page_list);
1615 }
1616
1617 /*
1618 * If a kernel thread (such as nfsd for loop-back mounts) services
1619 * a backing device by writing to the page cache it sets PF_LESS_THROTTLE.
1620 * In that case we should only throttle if the backing device it is
1621 * writing to is congested. In other cases it is safe to throttle.
1622 */
1623 static int current_may_throttle(void)
1624 {
1625 return !(current->flags & PF_LESS_THROTTLE) ||
1626 current->backing_dev_info == NULL ||
1627 bdi_write_congested(current->backing_dev_info);
1628 }
1629
1630 /*
1631 * shrink_inactive_list() is a helper for shrink_node(). It returns the number
1632 * of reclaimed pages
1633 */
1634 static noinline_for_stack unsigned long
1635 shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
1636 struct scan_control *sc, enum lru_list lru)
1637 {
1638 LIST_HEAD(page_list);
1639 unsigned long nr_scanned;
1640 unsigned long nr_reclaimed = 0;
1641 unsigned long nr_taken;
1642 unsigned long nr_dirty = 0;
1643 unsigned long nr_congested = 0;
1644 unsigned long nr_unqueued_dirty = 0;
1645 unsigned long nr_writeback = 0;
1646 unsigned long nr_immediate = 0;
1647 isolate_mode_t isolate_mode = 0;
1648 int file = is_file_lru(lru);
1649 struct pglist_data *pgdat = lruvec_pgdat(lruvec);
1650 struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat;
1651
1652 while (unlikely(too_many_isolated(pgdat, file, sc))) {
1653 congestion_wait(BLK_RW_ASYNC, HZ/10);
1654
1655 /* We are about to die and free our memory. Return now. */
1656 if (fatal_signal_pending(current))
1657 return SWAP_CLUSTER_MAX;
1658 }
1659
1660 lru_add_drain();
1661
1662 if (!sc->may_unmap)
1663 isolate_mode |= ISOLATE_UNMAPPED;
1664 if (!sc->may_writepage)
1665 isolate_mode |= ISOLATE_CLEAN;
1666
1667 spin_lock_irq(&pgdat->lru_lock);
1668
1669 nr_taken = isolate_lru_pages(nr_to_scan, lruvec, &page_list,
1670 &nr_scanned, sc, isolate_mode, lru);
1671
1672 __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, nr_taken);
1673 reclaim_stat->recent_scanned[file] += nr_taken;
1674
1675 if (global_reclaim(sc)) {
1676 __mod_node_page_state(pgdat, NR_PAGES_SCANNED, nr_scanned);
1677 if (current_is_kswapd())
1678 __count_vm_events(PGSCAN_KSWAPD, nr_scanned);
1679 else
1680 __count_vm_events(PGSCAN_DIRECT, nr_scanned);
1681 }
1682 spin_unlock_irq(&pgdat->lru_lock);
1683
1684 if (nr_taken == 0)
1685 return 0;
1686
1687 nr_reclaimed = shrink_page_list(&page_list, pgdat, sc, TTU_UNMAP,
1688 &nr_dirty, &nr_unqueued_dirty, &nr_congested,
1689 &nr_writeback, &nr_immediate,
1690 false);
1691
1692 spin_lock_irq(&pgdat->lru_lock);
1693
1694 if (global_reclaim(sc)) {
1695 if (current_is_kswapd())
1696 __count_vm_events(PGSTEAL_KSWAPD, nr_reclaimed);
1697 else
1698 __count_vm_events(PGSTEAL_DIRECT, nr_reclaimed);
1699 }
1700
1701 putback_inactive_pages(lruvec, &page_list);
1702
1703 __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, -nr_taken);
1704
1705 spin_unlock_irq(&pgdat->lru_lock);
1706
1707 mem_cgroup_uncharge_list(&page_list);
1708 free_hot_cold_page_list(&page_list, true);
1709
1710 /*
1711 * If reclaim is isolating dirty pages under writeback, it implies
1712 * that the long-lived page allocation rate is exceeding the page
1713 * laundering rate. Either the global limits are not being effective
1714 * at throttling processes due to the page distribution throughout
1715 * zones or there is heavy usage of a slow backing device. The
1716 * only option is to throttle from reclaim context which is not ideal
1717 * as there is no guarantee the dirtying process is throttled in the
1718 * same way balance_dirty_pages() manages.
1719 *
1720 * Once a zone is flagged ZONE_WRITEBACK, kswapd will count the number
1721 * of pages under pages flagged for immediate reclaim and stall if any
1722 * are encountered in the nr_immediate check below.
1723 */
1724 if (nr_writeback && nr_writeback == nr_taken)
1725 set_bit(PGDAT_WRITEBACK, &pgdat->flags);
1726
1727 /*
1728 * Legacy memcg will stall in page writeback so avoid forcibly
1729 * stalling here.
1730 */
1731 if (sane_reclaim(sc)) {
1732 /*
1733 * Tag a zone as congested if all the dirty pages scanned were
1734 * backed by a congested BDI and wait_iff_congested will stall.
1735 */
1736 if (nr_dirty && nr_dirty == nr_congested)
1737 set_bit(PGDAT_CONGESTED, &pgdat->flags);
1738
1739 /*
1740 * If dirty pages are scanned that are not queued for IO, it
1741 * implies that flushers are not keeping up. In this case, flag
1742 * the pgdat PGDAT_DIRTY and kswapd will start writing pages from
1743 * reclaim context.
1744 */
1745 if (nr_unqueued_dirty == nr_taken)
1746 set_bit(PGDAT_DIRTY, &pgdat->flags);
1747
1748 /*
1749 * If kswapd scans pages marked marked for immediate
1750 * reclaim and under writeback (nr_immediate), it implies
1751 * that pages are cycling through the LRU faster than
1752 * they are written so also forcibly stall.
1753 */
1754 if (nr_immediate && current_may_throttle())
1755 congestion_wait(BLK_RW_ASYNC, HZ/10);
1756 }
1757
1758 /*
1759 * Stall direct reclaim for IO completions if underlying BDIs or zone
1760 * is congested. Allow kswapd to continue until it starts encountering
1761 * unqueued dirty pages or cycling through the LRU too quickly.
1762 */
1763 if (!sc->hibernation_mode && !current_is_kswapd() &&
1764 current_may_throttle())
1765 wait_iff_congested(pgdat, BLK_RW_ASYNC, HZ/10);
1766
1767 trace_mm_vmscan_lru_shrink_inactive(pgdat->node_id,
1768 nr_scanned, nr_reclaimed,
1769 sc->priority, file);
1770 return nr_reclaimed;
1771 }
1772
1773 /*
1774 * This moves pages from the active list to the inactive list.
1775 *
1776 * We move them the other way if the page is referenced by one or more
1777 * processes, from rmap.
1778 *
1779 * If the pages are mostly unmapped, the processing is fast and it is
1780 * appropriate to hold zone_lru_lock across the whole operation. But if
1781 * the pages are mapped, the processing is slow (page_referenced()) so we
1782 * should drop zone_lru_lock around each page. It's impossible to balance
1783 * this, so instead we remove the pages from the LRU while processing them.
1784 * It is safe to rely on PG_active against the non-LRU pages in here because
1785 * nobody will play with that bit on a non-LRU page.
1786 *
1787 * The downside is that we have to touch page->_refcount against each page.
1788 * But we had to alter page->flags anyway.
1789 */
1790
1791 static void move_active_pages_to_lru(struct lruvec *lruvec,
1792 struct list_head *list,
1793 struct list_head *pages_to_free,
1794 enum lru_list lru)
1795 {
1796 struct pglist_data *pgdat = lruvec_pgdat(lruvec);
1797 unsigned long pgmoved = 0;
1798 struct page *page;
1799 int nr_pages;
1800
1801 while (!list_empty(list)) {
1802 page = lru_to_page(list);
1803 lruvec = mem_cgroup_page_lruvec(page, pgdat);
1804
1805 VM_BUG_ON_PAGE(PageLRU(page), page);
1806 SetPageLRU(page);
1807
1808 nr_pages = hpage_nr_pages(page);
1809 update_lru_size(lruvec, lru, page_zonenum(page), nr_pages);
1810 list_move(&page->lru, &lruvec->lists[lru]);
1811 pgmoved += nr_pages;
1812
1813 if (put_page_testzero(page)) {
1814 __ClearPageLRU(page);
1815 __ClearPageActive(page);
1816 del_page_from_lru_list(page, lruvec, lru);
1817
1818 if (unlikely(PageCompound(page))) {
1819 spin_unlock_irq(&pgdat->lru_lock);
1820 mem_cgroup_uncharge(page);
1821 (*get_compound_page_dtor(page))(page);
1822 spin_lock_irq(&pgdat->lru_lock);
1823 } else
1824 list_add(&page->lru, pages_to_free);
1825 }
1826 }
1827
1828 if (!is_active_lru(lru))
1829 __count_vm_events(PGDEACTIVATE, pgmoved);
1830 }
1831
1832 static void shrink_active_list(unsigned long nr_to_scan,
1833 struct lruvec *lruvec,
1834 struct scan_control *sc,
1835 enum lru_list lru)
1836 {
1837 unsigned long nr_taken;
1838 unsigned long nr_scanned;
1839 unsigned long vm_flags;
1840 LIST_HEAD(l_hold); /* The pages which were snipped off */
1841 LIST_HEAD(l_active);
1842 LIST_HEAD(l_inactive);
1843 struct page *page;
1844 struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat;
1845 unsigned long nr_rotated = 0;
1846 isolate_mode_t isolate_mode = 0;
1847 int file = is_file_lru(lru);
1848 struct pglist_data *pgdat = lruvec_pgdat(lruvec);
1849
1850 lru_add_drain();
1851
1852 if (!sc->may_unmap)
1853 isolate_mode |= ISOLATE_UNMAPPED;
1854 if (!sc->may_writepage)
1855 isolate_mode |= ISOLATE_CLEAN;
1856
1857 spin_lock_irq(&pgdat->lru_lock);
1858
1859 nr_taken = isolate_lru_pages(nr_to_scan, lruvec, &l_hold,
1860 &nr_scanned, sc, isolate_mode, lru);
1861
1862 __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, nr_taken);
1863 reclaim_stat->recent_scanned[file] += nr_taken;
1864
1865 if (global_reclaim(sc))
1866 __mod_node_page_state(pgdat, NR_PAGES_SCANNED, nr_scanned);
1867 __count_vm_events(PGREFILL, nr_scanned);
1868
1869 spin_unlock_irq(&pgdat->lru_lock);
1870
1871 while (!list_empty(&l_hold)) {
1872 cond_resched();
1873 page = lru_to_page(&l_hold);
1874 list_del(&page->lru);
1875
1876 if (unlikely(!page_evictable(page))) {
1877 putback_lru_page(page);
1878 continue;
1879 }
1880
1881 if (unlikely(buffer_heads_over_limit)) {
1882 if (page_has_private(page) && trylock_page(page)) {
1883 if (page_has_private(page))
1884 try_to_release_page(page, 0);
1885 unlock_page(page);
1886 }
1887 }
1888
1889 if (page_referenced(page, 0, sc->target_mem_cgroup,
1890 &vm_flags)) {
1891 nr_rotated += hpage_nr_pages(page);
1892 /*
1893 * Identify referenced, file-backed active pages and
1894 * give them one more trip around the active list. So
1895 * that executable code get better chances to stay in
1896 * memory under moderate memory pressure. Anon pages
1897 * are not likely to be evicted by use-once streaming
1898 * IO, plus JVM can create lots of anon VM_EXEC pages,
1899 * so we ignore them here.
1900 */
1901 if ((vm_flags & VM_EXEC) && page_is_file_cache(page)) {
1902 list_add(&page->lru, &l_active);
1903 continue;
1904 }
1905 }
1906
1907 ClearPageActive(page); /* we are de-activating */
1908 list_add(&page->lru, &l_inactive);
1909 }
1910
1911 /*
1912 * Move pages back to the lru list.
1913 */
1914 spin_lock_irq(&pgdat->lru_lock);
1915 /*
1916 * Count referenced pages from currently used mappings as rotated,
1917 * even though only some of them are actually re-activated. This
1918 * helps balance scan pressure between file and anonymous pages in
1919 * get_scan_count.
1920 */
1921 reclaim_stat->recent_rotated[file] += nr_rotated;
1922
1923 move_active_pages_to_lru(lruvec, &l_active, &l_hold, lru);
1924 move_active_pages_to_lru(lruvec, &l_inactive, &l_hold, lru - LRU_ACTIVE);
1925 __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, -nr_taken);
1926 spin_unlock_irq(&pgdat->lru_lock);
1927
1928 mem_cgroup_uncharge_list(&l_hold);
1929 free_hot_cold_page_list(&l_hold, true);
1930 }
1931
1932 /*
1933 * The inactive anon list should be small enough that the VM never has
1934 * to do too much work.
1935 *
1936 * The inactive file list should be small enough to leave most memory
1937 * to the established workingset on the scan-resistant active list,
1938 * but large enough to avoid thrashing the aggregate readahead window.
1939 *
1940 * Both inactive lists should also be large enough that each inactive
1941 * page has a chance to be referenced again before it is reclaimed.
1942 *
1943 * The inactive_ratio is the target ratio of ACTIVE to INACTIVE pages
1944 * on this LRU, maintained by the pageout code. A zone->inactive_ratio
1945 * of 3 means 3:1 or 25% of the pages are kept on the inactive list.
1946 *
1947 * total target max
1948 * memory ratio inactive
1949 * -------------------------------------
1950 * 10MB 1 5MB
1951 * 100MB 1 50MB
1952 * 1GB 3 250MB
1953 * 10GB 10 0.9GB
1954 * 100GB 31 3GB
1955 * 1TB 101 10GB
1956 * 10TB 320 32GB
1957 */
1958 static bool inactive_list_is_low(struct lruvec *lruvec, bool file)
1959 {
1960 unsigned long inactive_ratio;
1961 unsigned long inactive;
1962 unsigned long active;
1963 unsigned long gb;
1964
1965 /*
1966 * If we don't have swap space, anonymous page deactivation
1967 * is pointless.
1968 */
1969 if (!file && !total_swap_pages)
1970 return false;
1971
1972 inactive = lruvec_lru_size(lruvec, file * LRU_FILE);
1973 active = lruvec_lru_size(lruvec, file * LRU_FILE + LRU_ACTIVE);
1974
1975 gb = (inactive + active) >> (30 - PAGE_SHIFT);
1976 if (gb)
1977 inactive_ratio = int_sqrt(10 * gb);
1978 else
1979 inactive_ratio = 1;
1980
1981 return inactive * inactive_ratio < active;
1982 }
1983
1984 static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan,
1985 struct lruvec *lruvec, struct scan_control *sc)
1986 {
1987 if (is_active_lru(lru)) {
1988 if (inactive_list_is_low(lruvec, is_file_lru(lru)))
1989 shrink_active_list(nr_to_scan, lruvec, sc, lru);
1990 return 0;
1991 }
1992
1993 return shrink_inactive_list(nr_to_scan, lruvec, sc, lru);
1994 }
1995
1996 enum scan_balance {
1997 SCAN_EQUAL,
1998 SCAN_FRACT,
1999 SCAN_ANON,
2000 SCAN_FILE,
2001 };
2002
2003 /*
2004 * Determine how aggressively the anon and file LRU lists should be
2005 * scanned. The relative value of each set of LRU lists is determined
2006 * by looking at the fraction of the pages scanned we did rotate back
2007 * onto the active list instead of evict.
2008 *
2009 * nr[0] = anon inactive pages to scan; nr[1] = anon active pages to scan
2010 * nr[2] = file inactive pages to scan; nr[3] = file active pages to scan
2011 */
2012 static void get_scan_count(struct lruvec *lruvec, struct mem_cgroup *memcg,
2013 struct scan_control *sc, unsigned long *nr,
2014 unsigned long *lru_pages)
2015 {
2016 int swappiness = mem_cgroup_swappiness(memcg);
2017 struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat;
2018 u64 fraction[2];
2019 u64 denominator = 0; /* gcc */
2020 struct pglist_data *pgdat = lruvec_pgdat(lruvec);
2021 unsigned long anon_prio, file_prio;
2022 enum scan_balance scan_balance;
2023 unsigned long anon, file;
2024 bool force_scan = false;
2025 unsigned long ap, fp;
2026 enum lru_list lru;
2027 bool some_scanned;
2028 int pass;
2029
2030 /*
2031 * If the zone or memcg is small, nr[l] can be 0. This
2032 * results in no scanning on this priority and a potential
2033 * priority drop. Global direct reclaim can go to the next
2034 * zone and tends to have no problems. Global kswapd is for
2035 * zone balancing and it needs to scan a minimum amount. When
2036 * reclaiming for a memcg, a priority drop can cause high
2037 * latencies, so it's better to scan a minimum amount there as
2038 * well.
2039 */
2040 if (current_is_kswapd()) {
2041 if (!pgdat_reclaimable(pgdat))
2042 force_scan = true;
2043 if (!mem_cgroup_online(memcg))
2044 force_scan = true;
2045 }
2046 if (!global_reclaim(sc))
2047 force_scan = true;
2048
2049 /* If we have no swap space, do not bother scanning anon pages. */
2050 if (!sc->may_swap || mem_cgroup_get_nr_swap_pages(memcg) <= 0) {
2051 scan_balance = SCAN_FILE;
2052 goto out;
2053 }
2054
2055 /*
2056 * Global reclaim will swap to prevent OOM even with no
2057 * swappiness, but memcg users want to use this knob to
2058 * disable swapping for individual groups completely when
2059 * using the memory controller's swap limit feature would be
2060 * too expensive.
2061 */
2062 if (!global_reclaim(sc) && !swappiness) {
2063 scan_balance = SCAN_FILE;
2064 goto out;
2065 }
2066
2067 /*
2068 * Do not apply any pressure balancing cleverness when the
2069 * system is close to OOM, scan both anon and file equally
2070 * (unless the swappiness setting disagrees with swapping).
2071 */
2072 if (!sc->priority && swappiness) {
2073 scan_balance = SCAN_EQUAL;
2074 goto out;
2075 }
2076
2077 /*
2078 * Prevent the reclaimer from falling into the cache trap: as
2079 * cache pages start out inactive, every cache fault will tip
2080 * the scan balance towards the file LRU. And as the file LRU
2081 * shrinks, so does the window for rotation from references.
2082 * This means we have a runaway feedback loop where a tiny
2083 * thrashing file LRU becomes infinitely more attractive than
2084 * anon pages. Try to detect this based on file LRU size.
2085 */
2086 if (global_reclaim(sc)) {
2087 unsigned long pgdatfile;
2088 unsigned long pgdatfree;
2089 int z;
2090 unsigned long total_high_wmark = 0;
2091
2092 pgdatfree = sum_zone_node_page_state(pgdat->node_id, NR_FREE_PAGES);
2093 pgdatfile = node_page_state(pgdat, NR_ACTIVE_FILE) +
2094 node_page_state(pgdat, NR_INACTIVE_FILE);
2095
2096 for (z = 0; z < MAX_NR_ZONES; z++) {
2097 struct zone *zone = &pgdat->node_zones[z];
2098 if (!populated_zone(zone))
2099 continue;
2100
2101 total_high_wmark += high_wmark_pages(zone);
2102 }
2103
2104 if (unlikely(pgdatfile + pgdatfree <= total_high_wmark)) {
2105 scan_balance = SCAN_ANON;
2106 goto out;
2107 }
2108 }
2109
2110 /*
2111 * If there is enough inactive page cache, i.e. if the size of the
2112 * inactive list is greater than that of the active list *and* the
2113 * inactive list actually has some pages to scan on this priority, we
2114 * do not reclaim anything from the anonymous working set right now.
2115 * Without the second condition we could end up never scanning an
2116 * lruvec even if it has plenty of old anonymous pages unless the
2117 * system is under heavy pressure.
2118 */
2119 if (!inactive_list_is_low(lruvec, true) &&
2120 lruvec_lru_size(lruvec, LRU_INACTIVE_FILE) >> sc->priority) {
2121 scan_balance = SCAN_FILE;
2122 goto out;
2123 }
2124
2125 scan_balance = SCAN_FRACT;
2126
2127 /*
2128 * With swappiness at 100, anonymous and file have the same priority.
2129 * This scanning priority is essentially the inverse of IO cost.
2130 */
2131 anon_prio = swappiness;
2132 file_prio = 200 - anon_prio;
2133
2134 /*
2135 * OK, so we have swap space and a fair amount of page cache
2136 * pages. We use the recently rotated / recently scanned
2137 * ratios to determine how valuable each cache is.
2138 *
2139 * Because workloads change over time (and to avoid overflow)
2140 * we keep these statistics as a floating average, which ends
2141 * up weighing recent references more than old ones.
2142 *
2143 * anon in [0], file in [1]
2144 */
2145
2146 anon = lruvec_lru_size(lruvec, LRU_ACTIVE_ANON) +
2147 lruvec_lru_size(lruvec, LRU_INACTIVE_ANON);
2148 file = lruvec_lru_size(lruvec, LRU_ACTIVE_FILE) +
2149 lruvec_lru_size(lruvec, LRU_INACTIVE_FILE);
2150
2151 spin_lock_irq(&pgdat->lru_lock);
2152 if (unlikely(reclaim_stat->recent_scanned[0] > anon / 4)) {
2153 reclaim_stat->recent_scanned[0] /= 2;
2154 reclaim_stat->recent_rotated[0] /= 2;
2155 }
2156
2157 if (unlikely(reclaim_stat->recent_scanned[1] > file / 4)) {
2158 reclaim_stat->recent_scanned[1] /= 2;
2159 reclaim_stat->recent_rotated[1] /= 2;
2160 }
2161
2162 /*
2163 * The amount of pressure on anon vs file pages is inversely
2164 * proportional to the fraction of recently scanned pages on
2165 * each list that were recently referenced and in active use.
2166 */
2167 ap = anon_prio * (reclaim_stat->recent_scanned[0] + 1);
2168 ap /= reclaim_stat->recent_rotated[0] + 1;
2169
2170 fp = file_prio * (reclaim_stat->recent_scanned[1] + 1);
2171 fp /= reclaim_stat->recent_rotated[1] + 1;
2172 spin_unlock_irq(&pgdat->lru_lock);
2173
2174 fraction[0] = ap;
2175 fraction[1] = fp;
2176 denominator = ap + fp + 1;
2177 out:
2178 some_scanned = false;
2179 /* Only use force_scan on second pass. */
2180 for (pass = 0; !some_scanned && pass < 2; pass++) {
2181 *lru_pages = 0;
2182 for_each_evictable_lru(lru) {
2183 int file = is_file_lru(lru);
2184 unsigned long size;
2185 unsigned long scan;
2186
2187 size = lruvec_lru_size(lruvec, lru);
2188 scan = size >> sc->priority;
2189
2190 if (!scan && pass && force_scan)
2191 scan = min(size, SWAP_CLUSTER_MAX);
2192
2193 switch (scan_balance) {
2194 case SCAN_EQUAL:
2195 /* Scan lists relative to size */
2196 break;
2197 case SCAN_FRACT:
2198 /*
2199 * Scan types proportional to swappiness and
2200 * their relative recent reclaim efficiency.
2201 */
2202 scan = div64_u64(scan * fraction[file],
2203 denominator);
2204 break;
2205 case SCAN_FILE:
2206 case SCAN_ANON:
2207 /* Scan one type exclusively */
2208 if ((scan_balance == SCAN_FILE) != file) {
2209 size = 0;
2210 scan = 0;
2211 }
2212 break;
2213 default:
2214 /* Look ma, no brain */
2215 BUG();
2216 }
2217
2218 *lru_pages += size;
2219 nr[lru] = scan;
2220
2221 /*
2222 * Skip the second pass and don't force_scan,
2223 * if we found something to scan.
2224 */
2225 some_scanned |= !!scan;
2226 }
2227 }
2228 }
2229
2230 #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
2231 static void init_tlb_ubc(void)
2232 {
2233 /*
2234 * This deliberately does not clear the cpumask as it's expensive
2235 * and unnecessary. If there happens to be data in there then the
2236 * first SWAP_CLUSTER_MAX pages will send an unnecessary IPI and
2237 * then will be cleared.
2238 */
2239 current->tlb_ubc.flush_required = false;
2240 }
2241 #else
2242 static inline void init_tlb_ubc(void)
2243 {
2244 }
2245 #endif /* CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH */
2246
2247 /*
2248 * This is a basic per-node page freer. Used by both kswapd and direct reclaim.
2249 */
2250 static void shrink_node_memcg(struct pglist_data *pgdat, struct mem_cgroup *memcg,
2251 struct scan_control *sc, unsigned long *lru_pages)
2252 {
2253 struct lruvec *lruvec = mem_cgroup_lruvec(pgdat, memcg);
2254 unsigned long nr[NR_LRU_LISTS];
2255 unsigned long targets[NR_LRU_LISTS];
2256 unsigned long nr_to_scan;
2257 enum lru_list lru;
2258 unsigned long nr_reclaimed = 0;
2259 unsigned long nr_to_reclaim = sc->nr_to_reclaim;
2260 struct blk_plug plug;
2261 bool scan_adjusted;
2262
2263 get_scan_count(lruvec, memcg, sc, nr, lru_pages);
2264
2265 /* Record the original scan target for proportional adjustments later */
2266 memcpy(targets, nr, sizeof(nr));
2267
2268 /*
2269 * Global reclaiming within direct reclaim at DEF_PRIORITY is a normal
2270 * event that can occur when there is little memory pressure e.g.
2271 * multiple streaming readers/writers. Hence, we do not abort scanning
2272 * when the requested number of pages are reclaimed when scanning at
2273 * DEF_PRIORITY on the assumption that the fact we are direct
2274 * reclaiming implies that kswapd is not keeping up and it is best to
2275 * do a batch of work at once. For memcg reclaim one check is made to
2276 * abort proportional reclaim if either the file or anon lru has already
2277 * dropped to zero at the first pass.
2278 */
2279 scan_adjusted = (global_reclaim(sc) && !current_is_kswapd() &&
2280 sc->priority == DEF_PRIORITY);
2281
2282 init_tlb_ubc();
2283
2284 blk_start_plug(&plug);
2285 while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] ||
2286 nr[LRU_INACTIVE_FILE]) {
2287 unsigned long nr_anon, nr_file, percentage;
2288 unsigned long nr_scanned;
2289
2290 for_each_evictable_lru(lru) {
2291 if (nr[lru]) {
2292 nr_to_scan = min(nr[lru], SWAP_CLUSTER_MAX);
2293 nr[lru] -= nr_to_scan;
2294
2295 nr_reclaimed += shrink_list(lru, nr_to_scan,
2296 lruvec, sc);
2297 }
2298 }
2299
2300 if (nr_reclaimed < nr_to_reclaim || scan_adjusted)
2301 continue;
2302
2303 /*
2304 * For kswapd and memcg, reclaim at least the number of pages
2305 * requested. Ensure that the anon and file LRUs are scanned
2306 * proportionally what was requested by get_scan_count(). We
2307 * stop reclaiming one LRU and reduce the amount scanning
2308 * proportional to the original scan target.
2309 */
2310 nr_file = nr[LRU_INACTIVE_FILE] + nr[LRU_ACTIVE_FILE];
2311 nr_anon = nr[LRU_INACTIVE_ANON] + nr[LRU_ACTIVE_ANON];
2312
2313 /*
2314 * It's just vindictive to attack the larger once the smaller
2315 * has gone to zero. And given the way we stop scanning the
2316 * smaller below, this makes sure that we only make one nudge
2317 * towards proportionality once we've got nr_to_reclaim.
2318 */
2319 if (!nr_file || !nr_anon)
2320 break;
2321
2322 if (nr_file > nr_anon) {
2323 unsigned long scan_target = targets[LRU_INACTIVE_ANON] +
2324 targets[LRU_ACTIVE_ANON] + 1;
2325 lru = LRU_BASE;
2326 percentage = nr_anon * 100 / scan_target;
2327 } else {
2328 unsigned long scan_target = targets[LRU_INACTIVE_FILE] +
2329 targets[LRU_ACTIVE_FILE] + 1;
2330 lru = LRU_FILE;
2331 percentage = nr_file * 100 / scan_target;
2332 }
2333
2334 /* Stop scanning the smaller of the LRU */
2335 nr[lru] = 0;
2336 nr[lru + LRU_ACTIVE] = 0;
2337
2338 /*
2339 * Recalculate the other LRU scan count based on its original
2340 * scan target and the percentage scanning already complete
2341 */
2342 lru = (lru == LRU_FILE) ? LRU_BASE : LRU_FILE;
2343 nr_scanned = targets[lru] - nr[lru];
2344 nr[lru] = targets[lru] * (100 - percentage) / 100;
2345 nr[lru] -= min(nr[lru], nr_scanned);
2346
2347 lru += LRU_ACTIVE;
2348 nr_scanned = targets[lru] - nr[lru];
2349 nr[lru] = targets[lru] * (100 - percentage) / 100;
2350 nr[lru] -= min(nr[lru], nr_scanned);
2351
2352 scan_adjusted = true;
2353 }
2354 blk_finish_plug(&plug);
2355 sc->nr_reclaimed += nr_reclaimed;
2356
2357 /*
2358 * Even if we did not try to evict anon pages at all, we want to
2359 * rebalance the anon lru active/inactive ratio.
2360 */
2361 if (inactive_list_is_low(lruvec, false))
2362 shrink_active_list(SWAP_CLUSTER_MAX, lruvec,
2363 sc, LRU_ACTIVE_ANON);
2364
2365 throttle_vm_writeout(sc->gfp_mask);
2366 }
2367
2368 /* Use reclaim/compaction for costly allocs or under memory pressure */
2369 static bool in_reclaim_compaction(struct scan_control *sc)
2370 {
2371 if (IS_ENABLED(CONFIG_COMPACTION) && sc->order &&
2372 (sc->order > PAGE_ALLOC_COSTLY_ORDER ||
2373 sc->priority < DEF_PRIORITY - 2))
2374 return true;
2375
2376 return false;
2377 }
2378
2379 /*
2380 * Reclaim/compaction is used for high-order allocation requests. It reclaims
2381 * order-0 pages before compacting the zone. should_continue_reclaim() returns
2382 * true if more pages should be reclaimed such that when the page allocator
2383 * calls try_to_compact_zone() that it will have enough free pages to succeed.
2384 * It will give up earlier than that if there is difficulty reclaiming pages.
2385 */
2386 static inline bool should_continue_reclaim(struct pglist_data *pgdat,
2387 unsigned long nr_reclaimed,
2388 unsigned long nr_scanned,
2389 struct scan_control *sc)
2390 {
2391 unsigned long pages_for_compaction;
2392 unsigned long inactive_lru_pages;
2393 int z;
2394
2395 /* If not in reclaim/compaction mode, stop */
2396 if (!in_reclaim_compaction(sc))
2397 return false;
2398
2399 /* Consider stopping depending on scan and reclaim activity */
2400 if (sc->gfp_mask & __GFP_REPEAT) {
2401 /*
2402 * For __GFP_REPEAT allocations, stop reclaiming if the
2403 * full LRU list has been scanned and we are still failing
2404 * to reclaim pages. This full LRU scan is potentially
2405 * expensive but a __GFP_REPEAT caller really wants to succeed
2406 */
2407 if (!nr_reclaimed && !nr_scanned)
2408 return false;
2409 } else {
2410 /*
2411 * For non-__GFP_REPEAT allocations which can presumably
2412 * fail without consequence, stop if we failed to reclaim
2413 * any pages from the last SWAP_CLUSTER_MAX number of
2414 * pages that were scanned. This will return to the
2415 * caller faster at the risk reclaim/compaction and
2416 * the resulting allocation attempt fails
2417 */
2418 if (!nr_reclaimed)
2419 return false;
2420 }
2421
2422 /*
2423 * If we have not reclaimed enough pages for compaction and the
2424 * inactive lists are large enough, continue reclaiming
2425 */
2426 pages_for_compaction = (2UL << sc->order);
2427 inactive_lru_pages = node_page_state(pgdat, NR_INACTIVE_FILE);
2428 if (get_nr_swap_pages() > 0)
2429 inactive_lru_pages += node_page_state(pgdat, NR_INACTIVE_ANON);
2430 if (sc->nr_reclaimed < pages_for_compaction &&
2431 inactive_lru_pages > pages_for_compaction)
2432 return true;
2433
2434 /* If compaction would go ahead or the allocation would succeed, stop */
2435 for (z = 0; z <= sc->reclaim_idx; z++) {
2436 struct zone *zone = &pgdat->node_zones[z];
2437 if (!populated_zone(zone))
2438 continue;
2439
2440 switch (compaction_suitable(zone, sc->order, 0, sc->reclaim_idx)) {
2441 case COMPACT_PARTIAL:
2442 case COMPACT_CONTINUE:
2443 return false;
2444 default:
2445 /* check next zone */
2446 ;
2447 }
2448 }
2449 return true;
2450 }
2451
2452 static bool shrink_node(pg_data_t *pgdat, struct scan_control *sc)
2453 {
2454 struct reclaim_state *reclaim_state = current->reclaim_state;
2455 unsigned long nr_reclaimed, nr_scanned;
2456 bool reclaimable = false;
2457
2458 do {
2459 struct mem_cgroup *root = sc->target_mem_cgroup;
2460 struct mem_cgroup_reclaim_cookie reclaim = {
2461 .pgdat = pgdat,
2462 .priority = sc->priority,
2463 };
2464 unsigned long node_lru_pages = 0;
2465 struct mem_cgroup *memcg;
2466
2467 nr_reclaimed = sc->nr_reclaimed;
2468 nr_scanned = sc->nr_scanned;
2469
2470 memcg = mem_cgroup_iter(root, NULL, &reclaim);
2471 do {
2472 unsigned long lru_pages;
2473 unsigned long reclaimed;
2474 unsigned long scanned;
2475
2476 if (mem_cgroup_low(root, memcg)) {
2477 if (!sc->may_thrash)
2478 continue;
2479 mem_cgroup_events(memcg, MEMCG_LOW, 1);
2480 }
2481
2482 reclaimed = sc->nr_reclaimed;
2483 scanned = sc->nr_scanned;
2484
2485 shrink_node_memcg(pgdat, memcg, sc, &lru_pages);
2486 node_lru_pages += lru_pages;
2487
2488 if (!global_reclaim(sc))
2489 shrink_slab(sc->gfp_mask, pgdat->node_id,
2490 memcg, sc->nr_scanned - scanned,
2491 lru_pages);
2492
2493 /* Record the group's reclaim efficiency */
2494 vmpressure(sc->gfp_mask, memcg, false,
2495 sc->nr_scanned - scanned,
2496 sc->nr_reclaimed - reclaimed);
2497
2498 /*
2499 * Direct reclaim and kswapd have to scan all memory
2500 * cgroups to fulfill the overall scan target for the
2501 * node.
2502 *
2503 * Limit reclaim, on the other hand, only cares about
2504 * nr_to_reclaim pages to be reclaimed and it will
2505 * retry with decreasing priority if one round over the
2506 * whole hierarchy is not sufficient.
2507 */
2508 if (!global_reclaim(sc) &&
2509 sc->nr_reclaimed >= sc->nr_to_reclaim) {
2510 mem_cgroup_iter_break(root, memcg);
2511 break;
2512 }
2513 } while ((memcg = mem_cgroup_iter(root, memcg, &reclaim)));
2514
2515 /*
2516 * Shrink the slab caches in the same proportion that
2517 * the eligible LRU pages were scanned.
2518 */
2519 if (global_reclaim(sc))
2520 shrink_slab(sc->gfp_mask, pgdat->node_id, NULL,
2521 sc->nr_scanned - nr_scanned,
2522 node_lru_pages);
2523
2524 if (reclaim_state) {
2525 sc->nr_reclaimed += reclaim_state->reclaimed_slab;
2526 reclaim_state->reclaimed_slab = 0;
2527 }
2528
2529 /* Record the subtree's reclaim efficiency */
2530 vmpressure(sc->gfp_mask, sc->target_mem_cgroup, true,
2531 sc->nr_scanned - nr_scanned,
2532 sc->nr_reclaimed - nr_reclaimed);
2533
2534 if (sc->nr_reclaimed - nr_reclaimed)
2535 reclaimable = true;
2536
2537 } while (should_continue_reclaim(pgdat, sc->nr_reclaimed - nr_reclaimed,
2538 sc->nr_scanned - nr_scanned, sc));
2539
2540 return reclaimable;
2541 }
2542
2543 /*
2544 * Returns true if compaction should go ahead for a high-order request, or
2545 * the high-order allocation would succeed without compaction.
2546 */
2547 static inline bool compaction_ready(struct zone *zone, struct scan_control *sc)
2548 {
2549 unsigned long watermark;
2550 bool watermark_ok;
2551
2552 /*
2553 * Compaction takes time to run and there are potentially other
2554 * callers using the pages just freed. Continue reclaiming until
2555 * there is a buffer of free pages available to give compaction
2556 * a reasonable chance of completing and allocating the page
2557 */
2558 watermark = high_wmark_pages(zone) + (2UL << sc->order);
2559 watermark_ok = zone_watermark_ok_safe(zone, 0, watermark, sc->reclaim_idx);
2560
2561 /*
2562 * If compaction is deferred, reclaim up to a point where
2563 * compaction will have a chance of success when re-enabled
2564 */
2565 if (compaction_deferred(zone, sc->order))
2566 return watermark_ok;
2567
2568 /*
2569 * If compaction is not ready to start and allocation is not likely
2570 * to succeed without it, then keep reclaiming.
2571 */
2572 if (compaction_suitable(zone, sc->order, 0, sc->reclaim_idx) == COMPACT_SKIPPED)
2573 return false;
2574
2575 return watermark_ok;
2576 }
2577
2578 /*
2579 * This is the direct reclaim path, for page-allocating processes. We only
2580 * try to reclaim pages from zones which will satisfy the caller's allocation
2581 * request.
2582 *
2583 * If a zone is deemed to be full of pinned pages then just give it a light
2584 * scan then give up on it.
2585 */
2586 static void shrink_zones(struct zonelist *zonelist, struct scan_control *sc)
2587 {
2588 struct zoneref *z;
2589 struct zone *zone;
2590 unsigned long nr_soft_reclaimed;
2591 unsigned long nr_soft_scanned;
2592 gfp_t orig_mask;
2593 pg_data_t *last_pgdat = NULL;
2594
2595 /*
2596 * If the number of buffer_heads in the machine exceeds the maximum
2597 * allowed level, force direct reclaim to scan the highmem zone as
2598 * highmem pages could be pinning lowmem pages storing buffer_heads
2599 */
2600 orig_mask = sc->gfp_mask;
2601 if (buffer_heads_over_limit) {
2602 sc->gfp_mask |= __GFP_HIGHMEM;
2603 sc->reclaim_idx = gfp_zone(sc->gfp_mask);
2604 }
2605
2606 for_each_zone_zonelist_nodemask(zone, z, zonelist,
2607 sc->reclaim_idx, sc->nodemask) {
2608 /*
2609 * Take care memory controller reclaiming has small influence
2610 * to global LRU.
2611 */
2612 if (global_reclaim(sc)) {
2613 if (!cpuset_zone_allowed(zone,
2614 GFP_KERNEL | __GFP_HARDWALL))
2615 continue;
2616
2617 if (sc->priority != DEF_PRIORITY &&
2618 !pgdat_reclaimable(zone->zone_pgdat))
2619 continue; /* Let kswapd poll it */
2620
2621 /*
2622 * If we already have plenty of memory free for
2623 * compaction in this zone, don't free any more.
2624 * Even though compaction is invoked for any
2625 * non-zero order, only frequent costly order
2626 * reclamation is disruptive enough to become a
2627 * noticeable problem, like transparent huge
2628 * page allocations.
2629 */
2630 if (IS_ENABLED(CONFIG_COMPACTION) &&
2631 sc->order > PAGE_ALLOC_COSTLY_ORDER &&
2632 compaction_ready(zone, sc)) {
2633 sc->compaction_ready = true;
2634 continue;
2635 }
2636
2637 /*
2638 * Shrink each node in the zonelist once. If the
2639 * zonelist is ordered by zone (not the default) then a
2640 * node may be shrunk multiple times but in that case
2641 * the user prefers lower zones being preserved.
2642 */
2643 if (zone->zone_pgdat == last_pgdat)
2644 continue;
2645
2646 /*
2647 * This steals pages from memory cgroups over softlimit
2648 * and returns the number of reclaimed pages and
2649 * scanned pages. This works for global memory pressure
2650 * and balancing, not for a memcg's limit.
2651 */
2652 nr_soft_scanned = 0;
2653 nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(zone->zone_pgdat,
2654 sc->order, sc->gfp_mask,
2655 &nr_soft_scanned);
2656 sc->nr_reclaimed += nr_soft_reclaimed;
2657 sc->nr_scanned += nr_soft_scanned;
2658 /* need some check for avoid more shrink_zone() */
2659 }
2660
2661 /* See comment about same check for global reclaim above */
2662 if (zone->zone_pgdat == last_pgdat)
2663 continue;
2664 last_pgdat = zone->zone_pgdat;
2665 shrink_node(zone->zone_pgdat, sc);
2666 }
2667
2668 /*
2669 * Restore to original mask to avoid the impact on the caller if we
2670 * promoted it to __GFP_HIGHMEM.
2671 */
2672 sc->gfp_mask = orig_mask;
2673 }
2674
2675 /*
2676 * This is the main entry point to direct page reclaim.
2677 *
2678 * If a full scan of the inactive list fails to free enough memory then we
2679 * are "out of memory" and something needs to be killed.
2680 *
2681 * If the caller is !__GFP_FS then the probability of a failure is reasonably
2682 * high - the zone may be full of dirty or under-writeback pages, which this
2683 * caller can't do much about. We kick the writeback threads and take explicit
2684 * naps in the hope that some of these pages can be written. But if the
2685 * allocating task holds filesystem locks which prevent writeout this might not
2686 * work, and the allocation attempt will fail.
2687 *
2688 * returns: 0, if no pages reclaimed
2689 * else, the number of pages reclaimed
2690 */
2691 static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
2692 struct scan_control *sc)
2693 {
2694 int initial_priority = sc->priority;
2695 unsigned long total_scanned = 0;
2696 unsigned long writeback_threshold;
2697 retry:
2698 delayacct_freepages_start();
2699
2700 if (global_reclaim(sc))
2701 __count_zid_vm_events(ALLOCSTALL, sc->reclaim_idx, 1);
2702
2703 do {
2704 vmpressure_prio(sc->gfp_mask, sc->target_mem_cgroup,
2705 sc->priority);
2706 sc->nr_scanned = 0;
2707 shrink_zones(zonelist, sc);
2708
2709 total_scanned += sc->nr_scanned;
2710 if (sc->nr_reclaimed >= sc->nr_to_reclaim)
2711 break;
2712
2713 if (sc->compaction_ready)
2714 break;
2715
2716 /*
2717 * If we're getting trouble reclaiming, start doing
2718 * writepage even in laptop mode.
2719 */
2720 if (sc->priority < DEF_PRIORITY - 2)
2721 sc->may_writepage = 1;
2722
2723 /*
2724 * Try to write back as many pages as we just scanned. This
2725 * tends to cause slow streaming writers to write data to the
2726 * disk smoothly, at the dirtying rate, which is nice. But
2727 * that's undesirable in laptop mode, where we *want* lumpy
2728 * writeout. So in laptop mode, write out the whole world.
2729 */
2730 writeback_threshold = sc->nr_to_reclaim + sc->nr_to_reclaim / 2;
2731 if (total_scanned > writeback_threshold) {
2732 wakeup_flusher_threads(laptop_mode ? 0 : total_scanned,
2733 WB_REASON_TRY_TO_FREE_PAGES);
2734 sc->may_writepage = 1;
2735 }
2736 } while (--sc->priority >= 0);
2737
2738 delayacct_freepages_end();
2739
2740 if (sc->nr_reclaimed)
2741 return sc->nr_reclaimed;
2742
2743 /* Aborted reclaim to try compaction? don't OOM, then */
2744 if (sc->compaction_ready)
2745 return 1;
2746
2747 /* Untapped cgroup reserves? Don't OOM, retry. */
2748 if (!sc->may_thrash) {
2749 sc->priority = initial_priority;
2750 sc->may_thrash = 1;
2751 goto retry;
2752 }
2753
2754 return 0;
2755 }
2756
2757 static bool pfmemalloc_watermark_ok(pg_data_t *pgdat)
2758 {
2759 struct zone *zone;
2760 unsigned long pfmemalloc_reserve = 0;
2761 unsigned long free_pages = 0;
2762 int i;
2763 bool wmark_ok;
2764
2765 for (i = 0; i <= ZONE_NORMAL; i++) {
2766 zone = &pgdat->node_zones[i];
2767 if (!populated_zone(zone) ||
2768 pgdat_reclaimable_pages(pgdat) == 0)
2769 continue;
2770
2771 pfmemalloc_reserve += min_wmark_pages(zone);
2772 free_pages += zone_page_state(zone, NR_FREE_PAGES);
2773 }
2774
2775 /* If there are no reserves (unexpected config) then do not throttle */
2776 if (!pfmemalloc_reserve)
2777 return true;
2778
2779 wmark_ok = free_pages > pfmemalloc_reserve / 2;
2780
2781 /* kswapd must be awake if processes are being throttled */
2782 if (!wmark_ok && waitqueue_active(&pgdat->kswapd_wait)) {
2783 pgdat->kswapd_classzone_idx = min(pgdat->kswapd_classzone_idx,
2784 (enum zone_type)ZONE_NORMAL);
2785 wake_up_interruptible(&pgdat->kswapd_wait);
2786 }
2787
2788 return wmark_ok;
2789 }
2790
2791 /*
2792 * Throttle direct reclaimers if backing storage is backed by the network
2793 * and the PFMEMALLOC reserve for the preferred node is getting dangerously
2794 * depleted. kswapd will continue to make progress and wake the processes
2795 * when the low watermark is reached.
2796 *
2797 * Returns true if a fatal signal was delivered during throttling. If this
2798 * happens, the page allocator should not consider triggering the OOM killer.
2799 */
2800 static bool throttle_direct_reclaim(gfp_t gfp_mask, struct zonelist *zonelist,
2801 nodemask_t *nodemask)
2802 {
2803 struct zoneref *z;
2804 struct zone *zone;
2805 pg_data_t *pgdat = NULL;
2806
2807 /*
2808 * Kernel threads should not be throttled as they may be indirectly
2809 * responsible for cleaning pages necessary for reclaim to make forward
2810 * progress. kjournald for example may enter direct reclaim while
2811 * committing a transaction where throttling it could forcing other
2812 * processes to block on log_wait_commit().
2813 */
2814 if (current->flags & PF_KTHREAD)
2815 goto out;
2816
2817 /*
2818 * If a fatal signal is pending, this process should not throttle.
2819 * It should return quickly so it can exit and free its memory
2820 */
2821 if (fatal_signal_pending(current))
2822 goto out;
2823
2824 /*
2825 * Check if the pfmemalloc reserves are ok by finding the first node
2826 * with a usable ZONE_NORMAL or lower zone. The expectation is that
2827 * GFP_KERNEL will be required for allocating network buffers when
2828 * swapping over the network so ZONE_HIGHMEM is unusable.
2829 *
2830 * Throttling is based on the first usable node and throttled processes
2831 * wait on a queue until kswapd makes progress and wakes them. There
2832 * is an affinity then between processes waking up and where reclaim
2833 * progress has been made assuming the process wakes on the same node.
2834 * More importantly, processes running on remote nodes will not compete
2835 * for remote pfmemalloc reserves and processes on different nodes
2836 * should make reasonable progress.
2837 */
2838 for_each_zone_zonelist_nodemask(zone, z, zonelist,
2839 gfp_zone(gfp_mask), nodemask) {
2840 if (zone_idx(zone) > ZONE_NORMAL)
2841 continue;
2842
2843 /* Throttle based on the first usable node */
2844 pgdat = zone->zone_pgdat;
2845 if (pfmemalloc_watermark_ok(pgdat))
2846 goto out;
2847 break;
2848 }
2849
2850 /* If no zone was usable by the allocation flags then do not throttle */
2851 if (!pgdat)
2852 goto out;
2853
2854 /* Account for the throttling */
2855 count_vm_event(PGSCAN_DIRECT_THROTTLE);
2856
2857 /*
2858 * If the caller cannot enter the filesystem, it's possible that it
2859 * is due to the caller holding an FS lock or performing a journal
2860 * transaction in the case of a filesystem like ext[3|4]. In this case,
2861 * it is not safe to block on pfmemalloc_wait as kswapd could be
2862 * blocked waiting on the same lock. Instead, throttle for up to a
2863 * second before continuing.
2864 */
2865 if (!(gfp_mask & __GFP_FS)) {
2866 wait_event_interruptible_timeout(pgdat->pfmemalloc_wait,
2867 pfmemalloc_watermark_ok(pgdat), HZ);
2868
2869 goto check_pending;
2870 }
2871
2872 /* Throttle until kswapd wakes the process */
2873 wait_event_killable(zone->zone_pgdat->pfmemalloc_wait,
2874 pfmemalloc_watermark_ok(pgdat));
2875
2876 check_pending:
2877 if (fatal_signal_pending(current))
2878 return true;
2879
2880 out:
2881 return false;
2882 }
2883
2884 unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
2885 gfp_t gfp_mask, nodemask_t *nodemask)
2886 {
2887 unsigned long nr_reclaimed;
2888 struct scan_control sc = {
2889 .nr_to_reclaim = SWAP_CLUSTER_MAX,
2890 .gfp_mask = (gfp_mask = memalloc_noio_flags(gfp_mask)),
2891 .reclaim_idx = gfp_zone(gfp_mask),
2892 .order = order,
2893 .nodemask = nodemask,
2894 .priority = DEF_PRIORITY,
2895 .may_writepage = !laptop_mode,
2896 .may_unmap = 1,
2897 .may_swap = 1,
2898 };
2899
2900 /*
2901 * Do not enter reclaim if fatal signal was delivered while throttled.
2902 * 1 is returned so that the page allocator does not OOM kill at this
2903 * point.
2904 */
2905 if (throttle_direct_reclaim(gfp_mask, zonelist, nodemask))
2906 return 1;
2907
2908 trace_mm_vmscan_direct_reclaim_begin(order,
2909 sc.may_writepage,
2910 gfp_mask,
2911 sc.reclaim_idx);
2912
2913 nr_reclaimed = do_try_to_free_pages(zonelist, &sc);
2914
2915 trace_mm_vmscan_direct_reclaim_end(nr_reclaimed);
2916
2917 return nr_reclaimed;
2918 }
2919
2920 #ifdef CONFIG_MEMCG
2921
2922 unsigned long mem_cgroup_shrink_node(struct mem_cgroup *memcg,
2923 gfp_t gfp_mask, bool noswap,
2924 pg_data_t *pgdat,
2925 unsigned long *nr_scanned)
2926 {
2927 struct scan_control sc = {
2928 .nr_to_reclaim = SWAP_CLUSTER_MAX,
2929 .target_mem_cgroup = memcg,
2930 .may_writepage = !laptop_mode,
2931 .may_unmap = 1,
2932 .reclaim_idx = MAX_NR_ZONES - 1,
2933 .may_swap = !noswap,
2934 };
2935 unsigned long lru_pages;
2936
2937 sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
2938 (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK);
2939
2940 trace_mm_vmscan_memcg_softlimit_reclaim_begin(sc.order,
2941 sc.may_writepage,
2942 sc.gfp_mask,
2943 sc.reclaim_idx);
2944
2945 /*
2946 * NOTE: Although we can get the priority field, using it
2947 * here is not a good idea, since it limits the pages we can scan.
2948 * if we don't reclaim here, the shrink_node from balance_pgdat
2949 * will pick up pages from other mem cgroup's as well. We hack
2950 * the priority and make it zero.
2951 */
2952 shrink_node_memcg(pgdat, memcg, &sc, &lru_pages);
2953
2954 trace_mm_vmscan_memcg_softlimit_reclaim_end(sc.nr_reclaimed);
2955
2956 *nr_scanned = sc.nr_scanned;
2957 return sc.nr_reclaimed;
2958 }
2959
2960 unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
2961 unsigned long nr_pages,
2962 gfp_t gfp_mask,
2963 bool may_swap)
2964 {
2965 struct zonelist *zonelist;
2966 unsigned long nr_reclaimed;
2967 int nid;
2968 struct scan_control sc = {
2969 .nr_to_reclaim = max(nr_pages, SWAP_CLUSTER_MAX),
2970 .gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
2971 (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK),
2972 .reclaim_idx = MAX_NR_ZONES - 1,
2973 .target_mem_cgroup = memcg,
2974 .priority = DEF_PRIORITY,
2975 .may_writepage = !laptop_mode,
2976 .may_unmap = 1,
2977 .may_swap = may_swap,
2978 };
2979
2980 /*
2981 * Unlike direct reclaim via alloc_pages(), memcg's reclaim doesn't
2982 * take care of from where we get pages. So the node where we start the
2983 * scan does not need to be the current node.
2984 */
2985 nid = mem_cgroup_select_victim_node(memcg);
2986
2987 zonelist = NODE_DATA(nid)->node_zonelists;
2988
2989 trace_mm_vmscan_memcg_reclaim_begin(0,
2990 sc.may_writepage,
2991 sc.gfp_mask,
2992 sc.reclaim_idx);
2993
2994 nr_reclaimed = do_try_to_free_pages(zonelist, &sc);
2995
2996 trace_mm_vmscan_memcg_reclaim_end(nr_reclaimed);
2997
2998 return nr_reclaimed;
2999 }
3000 #endif
3001
3002 static void age_active_anon(struct pglist_data *pgdat,
3003 struct scan_control *sc)
3004 {
3005 struct mem_cgroup *memcg;
3006
3007 if (!total_swap_pages)
3008 return;
3009
3010 memcg = mem_cgroup_iter(NULL, NULL, NULL);
3011 do {
3012 struct lruvec *lruvec = mem_cgroup_lruvec(pgdat, memcg);
3013
3014 if (inactive_list_is_low(lruvec, false))
3015 shrink_active_list(SWAP_CLUSTER_MAX, lruvec,
3016 sc, LRU_ACTIVE_ANON);
3017
3018 memcg = mem_cgroup_iter(NULL, memcg, NULL);
3019 } while (memcg);
3020 }
3021
3022 static bool zone_balanced(struct zone *zone, int order, int classzone_idx)
3023 {
3024 unsigned long mark = high_wmark_pages(zone);
3025
3026 if (!zone_watermark_ok_safe(zone, order, mark, classzone_idx))
3027 return false;
3028
3029 /*
3030 * If any eligible zone is balanced then the node is not considered
3031 * to be congested or dirty
3032 */
3033 clear_bit(PGDAT_CONGESTED, &zone->zone_pgdat->flags);
3034 clear_bit(PGDAT_DIRTY, &zone->zone_pgdat->flags);
3035
3036 return true;
3037 }
3038
3039 /*
3040 * Prepare kswapd for sleeping. This verifies that there are no processes
3041 * waiting in throttle_direct_reclaim() and that watermarks have been met.
3042 *
3043 * Returns true if kswapd is ready to sleep
3044 */
3045 static bool prepare_kswapd_sleep(pg_data_t *pgdat, int order, int classzone_idx)
3046 {
3047 int i;
3048
3049 /*
3050 * The throttled processes are normally woken up in balance_pgdat() as
3051 * soon as pfmemalloc_watermark_ok() is true. But there is a potential
3052 * race between when kswapd checks the watermarks and a process gets
3053 * throttled. There is also a potential race if processes get
3054 * throttled, kswapd wakes, a large process exits thereby balancing the
3055 * zones, which causes kswapd to exit balance_pgdat() before reaching
3056 * the wake up checks. If kswapd is going to sleep, no process should
3057 * be sleeping on pfmemalloc_wait, so wake them now if necessary. If
3058 * the wake up is premature, processes will wake kswapd and get
3059 * throttled again. The difference from wake ups in balance_pgdat() is
3060 * that here we are under prepare_to_wait().
3061 */
3062 if (waitqueue_active(&pgdat->pfmemalloc_wait))
3063 wake_up_all(&pgdat->pfmemalloc_wait);
3064
3065 for (i = 0; i <= classzone_idx; i++) {
3066 struct zone *zone = pgdat->node_zones + i;
3067
3068 if (!populated_zone(zone))
3069 continue;
3070
3071 if (!zone_balanced(zone, order, classzone_idx))
3072 return false;
3073 }
3074
3075 return true;
3076 }
3077
3078 /*
3079 * kswapd shrinks a node of pages that are at or below the highest usable
3080 * zone that is currently unbalanced.
3081 *
3082 * Returns true if kswapd scanned at least the requested number of pages to
3083 * reclaim or if the lack of progress was due to pages under writeback.
3084 * This is used to determine if the scanning priority needs to be raised.
3085 */
3086 static bool kswapd_shrink_node(pg_data_t *pgdat,
3087 struct scan_control *sc)
3088 {
3089 struct zone *zone;
3090 int z;
3091
3092 /* Reclaim a number of pages proportional to the number of zones */
3093 sc->nr_to_reclaim = 0;
3094 for (z = 0; z <= sc->reclaim_idx; z++) {
3095 zone = pgdat->node_zones + z;
3096 if (!populated_zone(zone))
3097 continue;
3098
3099 sc->nr_to_reclaim += max(high_wmark_pages(zone), SWAP_CLUSTER_MAX);
3100 }
3101
3102 /*
3103 * Historically care was taken to put equal pressure on all zones but
3104 * now pressure is applied based on node LRU order.
3105 */
3106 shrink_node(pgdat, sc);
3107
3108 /*
3109 * Fragmentation may mean that the system cannot be rebalanced for
3110 * high-order allocations. If twice the allocation size has been
3111 * reclaimed then recheck watermarks only at order-0 to prevent
3112 * excessive reclaim. Assume that a process requested a high-order
3113 * can direct reclaim/compact.
3114 */
3115 if (sc->order && sc->nr_reclaimed >= 2UL << sc->order)
3116 sc->order = 0;
3117
3118 return sc->nr_scanned >= sc->nr_to_reclaim;
3119 }
3120
3121 /*
3122 * For kswapd, balance_pgdat() will reclaim pages across a node from zones
3123 * that are eligible for use by the caller until at least one zone is
3124 * balanced.
3125 *
3126 * Returns the order kswapd finished reclaiming at.
3127 *
3128 * kswapd scans the zones in the highmem->normal->dma direction. It skips
3129 * zones which have free_pages > high_wmark_pages(zone), but once a zone is
3130 * found to have free_pages <= high_wmark_pages(zone), any page is that zone
3131 * or lower is eligible for reclaim until at least one usable zone is
3132 * balanced.
3133 */
3134 static int balance_pgdat(pg_data_t *pgdat, int order, int classzone_idx)
3135 {
3136 int i;
3137 unsigned long nr_soft_reclaimed;
3138 unsigned long nr_soft_scanned;
3139 struct zone *zone;
3140 struct scan_control sc = {
3141 .gfp_mask = GFP_KERNEL,
3142 .order = order,
3143 .priority = DEF_PRIORITY,
3144 .may_writepage = !laptop_mode,
3145 .may_unmap = 1,
3146 .may_swap = 1,
3147 };
3148 count_vm_event(PAGEOUTRUN);
3149
3150 do {
3151 bool raise_priority = true;
3152
3153 sc.nr_reclaimed = 0;
3154 sc.reclaim_idx = classzone_idx;
3155
3156 /*
3157 * If the number of buffer_heads exceeds the maximum allowed
3158 * then consider reclaiming from all zones. This has a dual
3159 * purpose -- on 64-bit systems it is expected that
3160 * buffer_heads are stripped during active rotation. On 32-bit
3161 * systems, highmem pages can pin lowmem memory and shrinking
3162 * buffers can relieve lowmem pressure. Reclaim may still not
3163 * go ahead if all eligible zones for the original allocation
3164 * request are balanced to avoid excessive reclaim from kswapd.
3165 */
3166 if (buffer_heads_over_limit) {
3167 for (i = MAX_NR_ZONES - 1; i >= 0; i--) {
3168 zone = pgdat->node_zones + i;
3169 if (!populated_zone(zone))
3170 continue;
3171
3172 sc.reclaim_idx = i;
3173 break;
3174 }
3175 }
3176
3177 /*
3178 * Only reclaim if there are no eligible zones. Check from
3179 * high to low zone as allocations prefer higher zones.
3180 * Scanning from low to high zone would allow congestion to be
3181 * cleared during a very small window when a small low
3182 * zone was balanced even under extreme pressure when the
3183 * overall node may be congested. Note that sc.reclaim_idx
3184 * is not used as buffer_heads_over_limit may have adjusted
3185 * it.
3186 */
3187 for (i = classzone_idx; i >= 0; i--) {
3188 zone = pgdat->node_zones + i;
3189 if (!populated_zone(zone))
3190 continue;
3191
3192 if (zone_balanced(zone, sc.order, classzone_idx))
3193 goto out;
3194 }
3195
3196 /*
3197 * Do some background aging of the anon list, to give
3198 * pages a chance to be referenced before reclaiming. All
3199 * pages are rotated regardless of classzone as this is
3200 * about consistent aging.
3201 */
3202 age_active_anon(pgdat, &sc);
3203
3204 /*
3205 * If we're getting trouble reclaiming, start doing writepage
3206 * even in laptop mode.
3207 */
3208 if (sc.priority < DEF_PRIORITY - 2 || !pgdat_reclaimable(pgdat))
3209 sc.may_writepage = 1;
3210
3211 /* Call soft limit reclaim before calling shrink_node. */
3212 sc.nr_scanned = 0;
3213 nr_soft_scanned = 0;
3214 nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(pgdat, sc.order,
3215 sc.gfp_mask, &nr_soft_scanned);
3216 sc.nr_reclaimed += nr_soft_reclaimed;
3217
3218 /*
3219 * There should be no need to raise the scanning priority if
3220 * enough pages are already being scanned that that high
3221 * watermark would be met at 100% efficiency.
3222 */
3223 if (kswapd_shrink_node(pgdat, &sc))
3224 raise_priority = false;
3225
3226 /*
3227 * If the low watermark is met there is no need for processes
3228 * to be throttled on pfmemalloc_wait as they should not be
3229 * able to safely make forward progress. Wake them
3230 */
3231 if (waitqueue_active(&pgdat->pfmemalloc_wait) &&
3232 pfmemalloc_watermark_ok(pgdat))
3233 wake_up_all(&pgdat->pfmemalloc_wait);
3234
3235 /* Check if kswapd should be suspending */
3236 if (try_to_freeze() || kthread_should_stop())
3237 break;
3238
3239 /*
3240 * Raise priority if scanning rate is too low or there was no
3241 * progress in reclaiming pages
3242 */
3243 if (raise_priority || !sc.nr_reclaimed)
3244 sc.priority--;
3245 } while (sc.priority >= 1);
3246
3247 out:
3248 /*
3249 * Return the order kswapd stopped reclaiming at as
3250 * prepare_kswapd_sleep() takes it into account. If another caller
3251 * entered the allocator slow path while kswapd was awake, order will
3252 * remain at the higher level.
3253 */
3254 return sc.order;
3255 }
3256
3257 static void kswapd_try_to_sleep(pg_data_t *pgdat, int alloc_order, int reclaim_order,
3258 unsigned int classzone_idx)
3259 {
3260 long remaining = 0;
3261 DEFINE_WAIT(wait);
3262
3263 if (freezing(current) || kthread_should_stop())
3264 return;
3265
3266 prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE);
3267
3268 /* Try to sleep for a short interval */
3269 if (prepare_kswapd_sleep(pgdat, reclaim_order, classzone_idx)) {
3270 /*
3271 * Compaction records what page blocks it recently failed to
3272 * isolate pages from and skips them in the future scanning.
3273 * When kswapd is going to sleep, it is reasonable to assume
3274 * that pages and compaction may succeed so reset the cache.
3275 */
3276 reset_isolation_suitable(pgdat);
3277
3278 /*
3279 * We have freed the memory, now we should compact it to make
3280 * allocation of the requested order possible.
3281 */
3282 wakeup_kcompactd(pgdat, alloc_order, classzone_idx);
3283
3284 remaining = schedule_timeout(HZ/10);
3285
3286 /*
3287 * If woken prematurely then reset kswapd_classzone_idx and
3288 * order. The values will either be from a wakeup request or
3289 * the previous request that slept prematurely.
3290 */
3291 if (remaining) {
3292 pgdat->kswapd_classzone_idx = max(pgdat->kswapd_classzone_idx, classzone_idx);
3293 pgdat->kswapd_order = max(pgdat->kswapd_order, reclaim_order);
3294 }
3295
3296 finish_wait(&pgdat->kswapd_wait, &wait);
3297 prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE);
3298 }
3299
3300 /*
3301 * After a short sleep, check if it was a premature sleep. If not, then
3302 * go fully to sleep until explicitly woken up.
3303 */
3304 if (!remaining &&
3305 prepare_kswapd_sleep(pgdat, reclaim_order, classzone_idx)) {
3306 trace_mm_vmscan_kswapd_sleep(pgdat->node_id);
3307
3308 /*
3309 * vmstat counters are not perfectly accurate and the estimated
3310 * value for counters such as NR_FREE_PAGES can deviate from the
3311 * true value by nr_online_cpus * threshold. To avoid the zone
3312 * watermarks being breached while under pressure, we reduce the
3313 * per-cpu vmstat threshold while kswapd is awake and restore
3314 * them before going back to sleep.
3315 */
3316 set_pgdat_percpu_threshold(pgdat, calculate_normal_threshold);
3317
3318 if (!kthread_should_stop())
3319 schedule();
3320
3321 set_pgdat_percpu_threshold(pgdat, calculate_pressure_threshold);
3322 } else {
3323 if (remaining)
3324 count_vm_event(KSWAPD_LOW_WMARK_HIT_QUICKLY);
3325 else
3326 count_vm_event(KSWAPD_HIGH_WMARK_HIT_QUICKLY);
3327 }
3328 finish_wait(&pgdat->kswapd_wait, &wait);
3329 }
3330
3331 /*
3332 * The background pageout daemon, started as a kernel thread
3333 * from the init process.
3334 *
3335 * This basically trickles out pages so that we have _some_
3336 * free memory available even if there is no other activity
3337 * that frees anything up. This is needed for things like routing
3338 * etc, where we otherwise might have all activity going on in
3339 * asynchronous contexts that cannot page things out.
3340 *
3341 * If there are applications that are active memory-allocators
3342 * (most normal use), this basically shouldn't matter.
3343 */
3344 static int kswapd(void *p)
3345 {
3346 unsigned int alloc_order, reclaim_order, classzone_idx;
3347 pg_data_t *pgdat = (pg_data_t*)p;
3348 struct task_struct *tsk = current;
3349
3350 struct reclaim_state reclaim_state = {
3351 .reclaimed_slab = 0,
3352 };
3353 const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id);
3354
3355 lockdep_set_current_reclaim_state(GFP_KERNEL);
3356
3357 if (!cpumask_empty(cpumask))
3358 set_cpus_allowed_ptr(tsk, cpumask);
3359 current->reclaim_state = &reclaim_state;
3360
3361 /*
3362 * Tell the memory management that we're a "memory allocator",
3363 * and that if we need more memory we should get access to it
3364 * regardless (see "__alloc_pages()"). "kswapd" should
3365 * never get caught in the normal page freeing logic.
3366 *
3367 * (Kswapd normally doesn't need memory anyway, but sometimes
3368 * you need a small amount of memory in order to be able to
3369 * page out something else, and this flag essentially protects
3370 * us from recursively trying to free more memory as we're
3371 * trying to free the first piece of memory in the first place).
3372 */
3373 tsk->flags |= PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD;
3374 set_freezable();
3375
3376 pgdat->kswapd_order = alloc_order = reclaim_order = 0;
3377 pgdat->kswapd_classzone_idx = classzone_idx = 0;
3378 for ( ; ; ) {
3379 bool ret;
3380
3381 kswapd_try_sleep:
3382 kswapd_try_to_sleep(pgdat, alloc_order, reclaim_order,
3383 classzone_idx);
3384
3385 /* Read the new order and classzone_idx */
3386 alloc_order = reclaim_order = pgdat->kswapd_order;
3387 classzone_idx = pgdat->kswapd_classzone_idx;
3388 pgdat->kswapd_order = 0;
3389 pgdat->kswapd_classzone_idx = 0;
3390
3391 ret = try_to_freeze();
3392 if (kthread_should_stop())
3393 break;
3394
3395 /*
3396 * We can speed up thawing tasks if we don't call balance_pgdat
3397 * after returning from the refrigerator
3398 */
3399 if (ret)
3400 continue;
3401
3402 /*
3403 * Reclaim begins at the requested order but if a high-order
3404 * reclaim fails then kswapd falls back to reclaiming for
3405 * order-0. If that happens, kswapd will consider sleeping
3406 * for the order it finished reclaiming at (reclaim_order)
3407 * but kcompactd is woken to compact for the original
3408 * request (alloc_order).
3409 */
3410 trace_mm_vmscan_kswapd_wake(pgdat->node_id, classzone_idx,
3411 alloc_order);
3412 reclaim_order = balance_pgdat(pgdat, alloc_order, classzone_idx);
3413 if (reclaim_order < alloc_order)
3414 goto kswapd_try_sleep;
3415
3416 alloc_order = reclaim_order = pgdat->kswapd_order;
3417 classzone_idx = pgdat->kswapd_classzone_idx;
3418 }
3419
3420 tsk->flags &= ~(PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD);
3421 current->reclaim_state = NULL;
3422 lockdep_clear_current_reclaim_state();
3423
3424 return 0;
3425 }
3426
3427 /*
3428 * A zone is low on free memory, so wake its kswapd task to service it.
3429 */
3430 void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx)
3431 {
3432 pg_data_t *pgdat;
3433 int z;
3434
3435 if (!populated_zone(zone))
3436 return;
3437
3438 if (!cpuset_zone_allowed(zone, GFP_KERNEL | __GFP_HARDWALL))
3439 return;
3440 pgdat = zone->zone_pgdat;
3441 pgdat->kswapd_classzone_idx = max(pgdat->kswapd_classzone_idx, classzone_idx);
3442 pgdat->kswapd_order = max(pgdat->kswapd_order, order);
3443 if (!waitqueue_active(&pgdat->kswapd_wait))
3444 return;
3445
3446 /* Only wake kswapd if all zones are unbalanced */
3447 for (z = 0; z <= classzone_idx; z++) {
3448 zone = pgdat->node_zones + z;
3449 if (!populated_zone(zone))
3450 continue;
3451
3452 if (zone_balanced(zone, order, classzone_idx))
3453 return;
3454 }
3455
3456 trace_mm_vmscan_wakeup_kswapd(pgdat->node_id, zone_idx(zone), order);
3457 wake_up_interruptible(&pgdat->kswapd_wait);
3458 }
3459
3460 #ifdef CONFIG_HIBERNATION
3461 /*
3462 * Try to free `nr_to_reclaim' of memory, system-wide, and return the number of
3463 * freed pages.
3464 *
3465 * Rather than trying to age LRUs the aim is to preserve the overall
3466 * LRU order by reclaiming preferentially
3467 * inactive > active > active referenced > active mapped
3468 */
3469 unsigned long shrink_all_memory(unsigned long nr_to_reclaim)
3470 {
3471 struct reclaim_state reclaim_state;
3472 struct scan_control sc = {
3473 .nr_to_reclaim = nr_to_reclaim,
3474 .gfp_mask = GFP_HIGHUSER_MOVABLE,
3475 .reclaim_idx = MAX_NR_ZONES - 1,
3476 .priority = DEF_PRIORITY,
3477 .may_writepage = 1,
3478 .may_unmap = 1,
3479 .may_swap = 1,
3480 .hibernation_mode = 1,
3481 };
3482 struct zonelist *zonelist = node_zonelist(numa_node_id(), sc.gfp_mask);
3483 struct task_struct *p = current;
3484 unsigned long nr_reclaimed;
3485
3486 p->flags |= PF_MEMALLOC;
3487 lockdep_set_current_reclaim_state(sc.gfp_mask);
3488 reclaim_state.reclaimed_slab = 0;
3489 p->reclaim_state = &reclaim_state;
3490
3491 nr_reclaimed = do_try_to_free_pages(zonelist, &sc);
3492
3493 p->reclaim_state = NULL;
3494 lockdep_clear_current_reclaim_state();
3495 p->flags &= ~PF_MEMALLOC;
3496
3497 return nr_reclaimed;
3498 }
3499 #endif /* CONFIG_HIBERNATION */
3500
3501 /* It's optimal to keep kswapds on the same CPUs as their memory, but
3502 not required for correctness. So if the last cpu in a node goes
3503 away, we get changed to run anywhere: as the first one comes back,
3504 restore their cpu bindings. */
3505 static int cpu_callback(struct notifier_block *nfb, unsigned long action,
3506 void *hcpu)
3507 {
3508 int nid;
3509
3510 if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN) {
3511 for_each_node_state(nid, N_MEMORY) {
3512 pg_data_t *pgdat = NODE_DATA(nid);
3513 const struct cpumask *mask;
3514
3515 mask = cpumask_of_node(pgdat->node_id);
3516
3517 if (cpumask_any_and(cpu_online_mask, mask) < nr_cpu_ids)
3518 /* One of our CPUs online: restore mask */
3519 set_cpus_allowed_ptr(pgdat->kswapd, mask);
3520 }
3521 }
3522 return NOTIFY_OK;
3523 }
3524
3525 /*
3526 * This kswapd start function will be called by init and node-hot-add.
3527 * On node-hot-add, kswapd will moved to proper cpus if cpus are hot-added.
3528 */
3529 int kswapd_run(int nid)
3530 {
3531 pg_data_t *pgdat = NODE_DATA(nid);
3532 int ret = 0;
3533
3534 if (pgdat->kswapd)
3535 return 0;
3536
3537 pgdat->kswapd = kthread_run(kswapd, pgdat, "kswapd%d", nid);
3538 if (IS_ERR(pgdat->kswapd)) {
3539 /* failure at boot is fatal */
3540 BUG_ON(system_state == SYSTEM_BOOTING);
3541 pr_err("Failed to start kswapd on node %d\n", nid);
3542 ret = PTR_ERR(pgdat->kswapd);
3543 pgdat->kswapd = NULL;
3544 }
3545 return ret;
3546 }
3547
3548 /*
3549 * Called by memory hotplug when all memory in a node is offlined. Caller must
3550 * hold mem_hotplug_begin/end().
3551 */
3552 void kswapd_stop(int nid)
3553 {
3554 struct task_struct *kswapd = NODE_DATA(nid)->kswapd;
3555
3556 if (kswapd) {
3557 kthread_stop(kswapd);
3558 NODE_DATA(nid)->kswapd = NULL;
3559 }
3560 }
3561
3562 static int __init kswapd_init(void)
3563 {
3564 int nid;
3565
3566 swap_setup();
3567 for_each_node_state(nid, N_MEMORY)
3568 kswapd_run(nid);
3569 hotcpu_notifier(cpu_callback, 0);
3570 return 0;
3571 }
3572
3573 module_init(kswapd_init)
3574
3575 #ifdef CONFIG_NUMA
3576 /*
3577 * Node reclaim mode
3578 *
3579 * If non-zero call node_reclaim when the number of free pages falls below
3580 * the watermarks.
3581 */
3582 int node_reclaim_mode __read_mostly;
3583
3584 #define RECLAIM_OFF 0
3585 #define RECLAIM_ZONE (1<<0) /* Run shrink_inactive_list on the zone */
3586 #define RECLAIM_WRITE (1<<1) /* Writeout pages during reclaim */
3587 #define RECLAIM_UNMAP (1<<2) /* Unmap pages during reclaim */
3588
3589 /*
3590 * Priority for NODE_RECLAIM. This determines the fraction of pages
3591 * of a node considered for each zone_reclaim. 4 scans 1/16th of
3592 * a zone.
3593 */
3594 #define NODE_RECLAIM_PRIORITY 4
3595
3596 /*
3597 * Percentage of pages in a zone that must be unmapped for node_reclaim to
3598 * occur.
3599 */
3600 int sysctl_min_unmapped_ratio = 1;
3601
3602 /*
3603 * If the number of slab pages in a zone grows beyond this percentage then
3604 * slab reclaim needs to occur.
3605 */
3606 int sysctl_min_slab_ratio = 5;
3607
3608 static inline unsigned long node_unmapped_file_pages(struct pglist_data *pgdat)
3609 {
3610 unsigned long file_mapped = node_page_state(pgdat, NR_FILE_MAPPED);
3611 unsigned long file_lru = node_page_state(pgdat, NR_INACTIVE_FILE) +
3612 node_page_state(pgdat, NR_ACTIVE_FILE);
3613
3614 /*
3615 * It's possible for there to be more file mapped pages than
3616 * accounted for by the pages on the file LRU lists because
3617 * tmpfs pages accounted for as ANON can also be FILE_MAPPED
3618 */
3619 return (file_lru > file_mapped) ? (file_lru - file_mapped) : 0;
3620 }
3621
3622 /* Work out how many page cache pages we can reclaim in this reclaim_mode */
3623 static unsigned long node_pagecache_reclaimable(struct pglist_data *pgdat)
3624 {
3625 unsigned long nr_pagecache_reclaimable;
3626 unsigned long delta = 0;
3627
3628 /*
3629 * If RECLAIM_UNMAP is set, then all file pages are considered
3630 * potentially reclaimable. Otherwise, we have to worry about
3631 * pages like swapcache and node_unmapped_file_pages() provides
3632 * a better estimate
3633 */
3634 if (node_reclaim_mode & RECLAIM_UNMAP)
3635 nr_pagecache_reclaimable = node_page_state(pgdat, NR_FILE_PAGES);
3636 else
3637 nr_pagecache_reclaimable = node_unmapped_file_pages(pgdat);
3638
3639 /* If we can't clean pages, remove dirty pages from consideration */
3640 if (!(node_reclaim_mode & RECLAIM_WRITE))
3641 delta += node_page_state(pgdat, NR_FILE_DIRTY);
3642
3643 /* Watch for any possible underflows due to delta */
3644 if (unlikely(delta > nr_pagecache_reclaimable))
3645 delta = nr_pagecache_reclaimable;
3646
3647 return nr_pagecache_reclaimable - delta;
3648 }
3649
3650 /*
3651 * Try to free up some pages from this node through reclaim.
3652 */
3653 static int __node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned int order)
3654 {
3655 /* Minimum pages needed in order to stay on node */
3656 const unsigned long nr_pages = 1 << order;
3657 struct task_struct *p = current;
3658 struct reclaim_state reclaim_state;
3659 int classzone_idx = gfp_zone(gfp_mask);
3660 struct scan_control sc = {
3661 .nr_to_reclaim = max(nr_pages, SWAP_CLUSTER_MAX),
3662 .gfp_mask = (gfp_mask = memalloc_noio_flags(gfp_mask)),
3663 .order = order,
3664 .priority = NODE_RECLAIM_PRIORITY,
3665 .may_writepage = !!(node_reclaim_mode & RECLAIM_WRITE),
3666 .may_unmap = !!(node_reclaim_mode & RECLAIM_UNMAP),
3667 .may_swap = 1,
3668 .reclaim_idx = classzone_idx,
3669 };
3670
3671 cond_resched();
3672 /*
3673 * We need to be able to allocate from the reserves for RECLAIM_UNMAP
3674 * and we also need to be able to write out pages for RECLAIM_WRITE
3675 * and RECLAIM_UNMAP.
3676 */
3677 p->flags |= PF_MEMALLOC | PF_SWAPWRITE;
3678 lockdep_set_current_reclaim_state(gfp_mask);
3679 reclaim_state.reclaimed_slab = 0;
3680 p->reclaim_state = &reclaim_state;
3681
3682 if (node_pagecache_reclaimable(pgdat) > pgdat->min_unmapped_pages) {
3683 /*
3684 * Free memory by calling shrink zone with increasing
3685 * priorities until we have enough memory freed.
3686 */
3687 do {
3688 shrink_node(pgdat, &sc);
3689 } while (sc.nr_reclaimed < nr_pages && --sc.priority >= 0);
3690 }
3691
3692 p->reclaim_state = NULL;
3693 current->flags &= ~(PF_MEMALLOC | PF_SWAPWRITE);
3694 lockdep_clear_current_reclaim_state();
3695 return sc.nr_reclaimed >= nr_pages;
3696 }
3697
3698 int node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned int order)
3699 {
3700 int ret;
3701
3702 /*
3703 * Node reclaim reclaims unmapped file backed pages and
3704 * slab pages if we are over the defined limits.
3705 *
3706 * A small portion of unmapped file backed pages is needed for
3707 * file I/O otherwise pages read by file I/O will be immediately
3708 * thrown out if the node is overallocated. So we do not reclaim
3709 * if less than a specified percentage of the node is used by
3710 * unmapped file backed pages.
3711 */
3712 if (node_pagecache_reclaimable(pgdat) <= pgdat->min_unmapped_pages &&
3713 sum_zone_node_page_state(pgdat->node_id, NR_SLAB_RECLAIMABLE) <= pgdat->min_slab_pages)
3714 return NODE_RECLAIM_FULL;
3715
3716 if (!pgdat_reclaimable(pgdat))
3717 return NODE_RECLAIM_FULL;
3718
3719 /*
3720 * Do not scan if the allocation should not be delayed.
3721 */
3722 if (!gfpflags_allow_blocking(gfp_mask) || (current->flags & PF_MEMALLOC))
3723 return NODE_RECLAIM_NOSCAN;
3724
3725 /*
3726 * Only run node reclaim on the local node or on nodes that do not
3727 * have associated processors. This will favor the local processor
3728 * over remote processors and spread off node memory allocations
3729 * as wide as possible.
3730 */
3731 if (node_state(pgdat->node_id, N_CPU) && pgdat->node_id != numa_node_id())
3732 return NODE_RECLAIM_NOSCAN;
3733
3734 if (test_and_set_bit(PGDAT_RECLAIM_LOCKED, &pgdat->flags))
3735 return NODE_RECLAIM_NOSCAN;
3736
3737 ret = __node_reclaim(pgdat, gfp_mask, order);
3738 clear_bit(PGDAT_RECLAIM_LOCKED, &pgdat->flags);
3739
3740 if (!ret)
3741 count_vm_event(PGSCAN_ZONE_RECLAIM_FAILED);
3742
3743 return ret;
3744 }
3745 #endif
3746
3747 /*
3748 * page_evictable - test whether a page is evictable
3749 * @page: the page to test
3750 *
3751 * Test whether page is evictable--i.e., should be placed on active/inactive
3752 * lists vs unevictable list.
3753 *
3754 * Reasons page might not be evictable:
3755 * (1) page's mapping marked unevictable
3756 * (2) page is part of an mlocked VMA
3757 *
3758 */
3759 int page_evictable(struct page *page)
3760 {
3761 return !mapping_unevictable(page_mapping(page)) && !PageMlocked(page);
3762 }
3763
3764 #ifdef CONFIG_SHMEM
3765 /**
3766 * check_move_unevictable_pages - check pages for evictability and move to appropriate zone lru list
3767 * @pages: array of pages to check
3768 * @nr_pages: number of pages to check
3769 *
3770 * Checks pages for evictability and moves them to the appropriate lru list.
3771 *
3772 * This function is only used for SysV IPC SHM_UNLOCK.
3773 */
3774 void check_move_unevictable_pages(struct page **pages, int nr_pages)
3775 {
3776 struct lruvec *lruvec;
3777 struct pglist_data *pgdat = NULL;
3778 int pgscanned = 0;
3779 int pgrescued = 0;
3780 int i;
3781
3782 for (i = 0; i < nr_pages; i++) {
3783 struct page *page = pages[i];
3784 struct pglist_data *pagepgdat = page_pgdat(page);
3785
3786 pgscanned++;
3787 if (pagepgdat != pgdat) {
3788 if (pgdat)
3789 spin_unlock_irq(&pgdat->lru_lock);
3790 pgdat = pagepgdat;
3791 spin_lock_irq(&pgdat->lru_lock);
3792 }
3793 lruvec = mem_cgroup_page_lruvec(page, pgdat);
3794
3795 if (!PageLRU(page) || !PageUnevictable(page))
3796 continue;
3797
3798 if (page_evictable(page)) {
3799 enum lru_list lru = page_lru_base_type(page);
3800
3801 VM_BUG_ON_PAGE(PageActive(page), page);
3802 ClearPageUnevictable(page);
3803 del_page_from_lru_list(page, lruvec, LRU_UNEVICTABLE);
3804 add_page_to_lru_list(page, lruvec, lru);
3805 pgrescued++;
3806 }
3807 }
3808
3809 if (pgdat) {
3810 __count_vm_events(UNEVICTABLE_PGRESCUED, pgrescued);
3811 __count_vm_events(UNEVICTABLE_PGSCANNED, pgscanned);
3812 spin_unlock_irq(&pgdat->lru_lock);
3813 }
3814 }
3815 #endif /* CONFIG_SHMEM */