]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - mm/compaction.c
mm, compaction: skip rechecks when lock was already held
[mirror_ubuntu-bionic-kernel.git] / mm / compaction.c
CommitLineData
748446bb
MG
1/*
2 * linux/mm/compaction.c
3 *
4 * Memory compaction for the reduction of external fragmentation. Note that
5 * this heavily depends upon page migration to do all the real heavy
6 * lifting
7 *
8 * Copyright IBM Corp. 2007-2010 Mel Gorman <mel@csn.ul.ie>
9 */
10#include <linux/swap.h>
11#include <linux/migrate.h>
12#include <linux/compaction.h>
13#include <linux/mm_inline.h>
14#include <linux/backing-dev.h>
76ab0f53 15#include <linux/sysctl.h>
ed4a6d7f 16#include <linux/sysfs.h>
bf6bddf1 17#include <linux/balloon_compaction.h>
194159fb 18#include <linux/page-isolation.h>
748446bb
MG
19#include "internal.h"
20
010fc29a
MK
21#ifdef CONFIG_COMPACTION
22static inline void count_compact_event(enum vm_event_item item)
23{
24 count_vm_event(item);
25}
26
27static inline void count_compact_events(enum vm_event_item item, long delta)
28{
29 count_vm_events(item, delta);
30}
31#else
32#define count_compact_event(item) do { } while (0)
33#define count_compact_events(item, delta) do { } while (0)
34#endif
35
ff9543fd
MN
36#if defined CONFIG_COMPACTION || defined CONFIG_CMA
37
b7aba698
MG
38#define CREATE_TRACE_POINTS
39#include <trace/events/compaction.h>
40
748446bb
MG
41static unsigned long release_freepages(struct list_head *freelist)
42{
43 struct page *page, *next;
44 unsigned long count = 0;
45
46 list_for_each_entry_safe(page, next, freelist, lru) {
47 list_del(&page->lru);
48 __free_page(page);
49 count++;
50 }
51
52 return count;
53}
54
ff9543fd
MN
55static void map_pages(struct list_head *list)
56{
57 struct page *page;
58
59 list_for_each_entry(page, list, lru) {
60 arch_alloc_page(page, 0);
61 kernel_map_pages(page, 1, 1);
62 }
63}
64
47118af0
MN
65static inline bool migrate_async_suitable(int migratetype)
66{
67 return is_migrate_cma(migratetype) || migratetype == MIGRATE_MOVABLE;
68}
69
7d49d886
VB
70/*
71 * Check that the whole (or subset of) a pageblock given by the interval of
72 * [start_pfn, end_pfn) is valid and within the same zone, before scanning it
73 * with the migration of free compaction scanner. The scanners then need to
74 * use only pfn_valid_within() check for arches that allow holes within
75 * pageblocks.
76 *
77 * Return struct page pointer of start_pfn, or NULL if checks were not passed.
78 *
79 * It's possible on some configurations to have a setup like node0 node1 node0
80 * i.e. it's possible that all pages within a zones range of pages do not
81 * belong to a single zone. We assume that a border between node0 and node1
82 * can occur within a single pageblock, but not a node0 node1 node0
83 * interleaving within a single pageblock. It is therefore sufficient to check
84 * the first and last page of a pageblock and avoid checking each individual
85 * page in a pageblock.
86 */
87static struct page *pageblock_pfn_to_page(unsigned long start_pfn,
88 unsigned long end_pfn, struct zone *zone)
89{
90 struct page *start_page;
91 struct page *end_page;
92
93 /* end_pfn is one past the range we are checking */
94 end_pfn--;
95
96 if (!pfn_valid(start_pfn) || !pfn_valid(end_pfn))
97 return NULL;
98
99 start_page = pfn_to_page(start_pfn);
100
101 if (page_zone(start_page) != zone)
102 return NULL;
103
104 end_page = pfn_to_page(end_pfn);
105
106 /* This gives a shorter code than deriving page_zone(end_page) */
107 if (page_zone_id(start_page) != page_zone_id(end_page))
108 return NULL;
109
110 return start_page;
111}
112
bb13ffeb
MG
113#ifdef CONFIG_COMPACTION
114/* Returns true if the pageblock should be scanned for pages to isolate. */
115static inline bool isolation_suitable(struct compact_control *cc,
116 struct page *page)
117{
118 if (cc->ignore_skip_hint)
119 return true;
120
121 return !get_pageblock_skip(page);
122}
123
124/*
125 * This function is called to clear all cached information on pageblocks that
126 * should be skipped for page isolation when the migrate and free page scanner
127 * meet.
128 */
62997027 129static void __reset_isolation_suitable(struct zone *zone)
bb13ffeb
MG
130{
131 unsigned long start_pfn = zone->zone_start_pfn;
108bcc96 132 unsigned long end_pfn = zone_end_pfn(zone);
bb13ffeb
MG
133 unsigned long pfn;
134
35979ef3
DR
135 zone->compact_cached_migrate_pfn[0] = start_pfn;
136 zone->compact_cached_migrate_pfn[1] = start_pfn;
c89511ab 137 zone->compact_cached_free_pfn = end_pfn;
62997027 138 zone->compact_blockskip_flush = false;
bb13ffeb
MG
139
140 /* Walk the zone and mark every pageblock as suitable for isolation */
141 for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
142 struct page *page;
143
144 cond_resched();
145
146 if (!pfn_valid(pfn))
147 continue;
148
149 page = pfn_to_page(pfn);
150 if (zone != page_zone(page))
151 continue;
152
153 clear_pageblock_skip(page);
154 }
155}
156
62997027
MG
157void reset_isolation_suitable(pg_data_t *pgdat)
158{
159 int zoneid;
160
161 for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) {
162 struct zone *zone = &pgdat->node_zones[zoneid];
163 if (!populated_zone(zone))
164 continue;
165
166 /* Only flush if a full compaction finished recently */
167 if (zone->compact_blockskip_flush)
168 __reset_isolation_suitable(zone);
169 }
170}
171
bb13ffeb
MG
172/*
173 * If no pages were isolated then mark this pageblock to be skipped in the
62997027 174 * future. The information is later cleared by __reset_isolation_suitable().
bb13ffeb 175 */
c89511ab
MG
176static void update_pageblock_skip(struct compact_control *cc,
177 struct page *page, unsigned long nr_isolated,
edc2ca61 178 bool migrate_scanner)
bb13ffeb 179{
c89511ab 180 struct zone *zone = cc->zone;
35979ef3 181 unsigned long pfn;
6815bf3f
JK
182
183 if (cc->ignore_skip_hint)
184 return;
185
bb13ffeb
MG
186 if (!page)
187 return;
188
35979ef3
DR
189 if (nr_isolated)
190 return;
191
edc2ca61 192 set_pageblock_skip(page);
c89511ab 193
35979ef3
DR
194 pfn = page_to_pfn(page);
195
196 /* Update where async and sync compaction should restart */
197 if (migrate_scanner) {
198 if (cc->finished_update_migrate)
199 return;
200 if (pfn > zone->compact_cached_migrate_pfn[0])
201 zone->compact_cached_migrate_pfn[0] = pfn;
e0b9daeb
DR
202 if (cc->mode != MIGRATE_ASYNC &&
203 pfn > zone->compact_cached_migrate_pfn[1])
35979ef3
DR
204 zone->compact_cached_migrate_pfn[1] = pfn;
205 } else {
206 if (cc->finished_update_free)
207 return;
208 if (pfn < zone->compact_cached_free_pfn)
209 zone->compact_cached_free_pfn = pfn;
c89511ab 210 }
bb13ffeb
MG
211}
212#else
213static inline bool isolation_suitable(struct compact_control *cc,
214 struct page *page)
215{
216 return true;
217}
218
c89511ab
MG
219static void update_pageblock_skip(struct compact_control *cc,
220 struct page *page, unsigned long nr_isolated,
edc2ca61 221 bool migrate_scanner)
bb13ffeb
MG
222{
223}
224#endif /* CONFIG_COMPACTION */
225
8b44d279
VB
226/*
227 * Compaction requires the taking of some coarse locks that are potentially
228 * very heavily contended. For async compaction, back out if the lock cannot
229 * be taken immediately. For sync compaction, spin on the lock if needed.
230 *
231 * Returns true if the lock is held
232 * Returns false if the lock is not held and compaction should abort
233 */
234static bool compact_trylock_irqsave(spinlock_t *lock, unsigned long *flags,
235 struct compact_control *cc)
2a1402aa 236{
8b44d279
VB
237 if (cc->mode == MIGRATE_ASYNC) {
238 if (!spin_trylock_irqsave(lock, *flags)) {
239 cc->contended = COMPACT_CONTENDED_LOCK;
240 return false;
241 }
242 } else {
243 spin_lock_irqsave(lock, *flags);
244 }
1f9efdef 245
8b44d279 246 return true;
2a1402aa
MG
247}
248
c67fe375
MG
249/*
250 * Compaction requires the taking of some coarse locks that are potentially
8b44d279
VB
251 * very heavily contended. The lock should be periodically unlocked to avoid
252 * having disabled IRQs for a long time, even when there is nobody waiting on
253 * the lock. It might also be that allowing the IRQs will result in
254 * need_resched() becoming true. If scheduling is needed, async compaction
255 * aborts. Sync compaction schedules.
256 * Either compaction type will also abort if a fatal signal is pending.
257 * In either case if the lock was locked, it is dropped and not regained.
c67fe375 258 *
8b44d279
VB
259 * Returns true if compaction should abort due to fatal signal pending, or
260 * async compaction due to need_resched()
261 * Returns false when compaction can continue (sync compaction might have
262 * scheduled)
c67fe375 263 */
8b44d279
VB
264static bool compact_unlock_should_abort(spinlock_t *lock,
265 unsigned long flags, bool *locked, struct compact_control *cc)
c67fe375 266{
8b44d279
VB
267 if (*locked) {
268 spin_unlock_irqrestore(lock, flags);
269 *locked = false;
270 }
1f9efdef 271
8b44d279
VB
272 if (fatal_signal_pending(current)) {
273 cc->contended = COMPACT_CONTENDED_SCHED;
274 return true;
275 }
c67fe375 276
8b44d279 277 if (need_resched()) {
e0b9daeb 278 if (cc->mode == MIGRATE_ASYNC) {
8b44d279
VB
279 cc->contended = COMPACT_CONTENDED_SCHED;
280 return true;
c67fe375 281 }
c67fe375 282 cond_resched();
c67fe375
MG
283 }
284
8b44d279 285 return false;
c67fe375
MG
286}
287
be976572
VB
288/*
289 * Aside from avoiding lock contention, compaction also periodically checks
290 * need_resched() and either schedules in sync compaction or aborts async
8b44d279 291 * compaction. This is similar to what compact_unlock_should_abort() does, but
be976572
VB
292 * is used where no lock is concerned.
293 *
294 * Returns false when no scheduling was needed, or sync compaction scheduled.
295 * Returns true when async compaction should abort.
296 */
297static inline bool compact_should_abort(struct compact_control *cc)
298{
299 /* async compaction aborts if contended */
300 if (need_resched()) {
301 if (cc->mode == MIGRATE_ASYNC) {
1f9efdef 302 cc->contended = COMPACT_CONTENDED_SCHED;
be976572
VB
303 return true;
304 }
305
306 cond_resched();
307 }
308
309 return false;
310}
311
f40d1e42
MG
312/* Returns true if the page is within a block suitable for migration to */
313static bool suitable_migration_target(struct page *page)
314{
7d348b9e 315 /* If the page is a large free page, then disallow migration */
f40d1e42 316 if (PageBuddy(page) && page_order(page) >= pageblock_order)
7d348b9e 317 return false;
f40d1e42
MG
318
319 /* If the block is MIGRATE_MOVABLE or MIGRATE_CMA, allow migration */
7d348b9e 320 if (migrate_async_suitable(get_pageblock_migratetype(page)))
f40d1e42
MG
321 return true;
322
323 /* Otherwise skip the block */
324 return false;
325}
326
85aa125f 327/*
9e4be470
JM
328 * Isolate free pages onto a private freelist. If @strict is true, will abort
329 * returning 0 on any invalid PFNs or non-free pages inside of the pageblock
330 * (even though it may still end up isolating some pages).
85aa125f 331 */
f40d1e42
MG
332static unsigned long isolate_freepages_block(struct compact_control *cc,
333 unsigned long blockpfn,
85aa125f
MN
334 unsigned long end_pfn,
335 struct list_head *freelist,
336 bool strict)
748446bb 337{
b7aba698 338 int nr_scanned = 0, total_isolated = 0;
bb13ffeb 339 struct page *cursor, *valid_page = NULL;
f40d1e42
MG
340 unsigned long flags;
341 bool locked = false;
748446bb 342
748446bb
MG
343 cursor = pfn_to_page(blockpfn);
344
f40d1e42 345 /* Isolate free pages. */
748446bb
MG
346 for (; blockpfn < end_pfn; blockpfn++, cursor++) {
347 int isolated, i;
348 struct page *page = cursor;
349
8b44d279
VB
350 /*
351 * Periodically drop the lock (if held) regardless of its
352 * contention, to give chance to IRQs. Abort if fatal signal
353 * pending or async compaction detects need_resched()
354 */
355 if (!(blockpfn % SWAP_CLUSTER_MAX)
356 && compact_unlock_should_abort(&cc->zone->lock, flags,
357 &locked, cc))
358 break;
359
b7aba698 360 nr_scanned++;
f40d1e42 361 if (!pfn_valid_within(blockpfn))
2af120bc
LA
362 goto isolate_fail;
363
bb13ffeb
MG
364 if (!valid_page)
365 valid_page = page;
f40d1e42 366 if (!PageBuddy(page))
2af120bc 367 goto isolate_fail;
f40d1e42
MG
368
369 /*
69b7189f
VB
370 * If we already hold the lock, we can skip some rechecking.
371 * Note that if we hold the lock now, checked_pageblock was
372 * already set in some previous iteration (or strict is true),
373 * so it is correct to skip the suitable migration target
374 * recheck as well.
f40d1e42 375 */
69b7189f
VB
376 if (!locked) {
377 /*
378 * The zone lock must be held to isolate freepages.
379 * Unfortunately this is a very coarse lock and can be
380 * heavily contended if there are parallel allocations
381 * or parallel compactions. For async compaction do not
382 * spin on the lock and we acquire the lock as late as
383 * possible.
384 */
8b44d279
VB
385 locked = compact_trylock_irqsave(&cc->zone->lock,
386 &flags, cc);
69b7189f
VB
387 if (!locked)
388 break;
f40d1e42 389
69b7189f
VB
390 /* Recheck this is a buddy page under lock */
391 if (!PageBuddy(page))
392 goto isolate_fail;
393 }
748446bb
MG
394
395 /* Found a free page, break it into order-0 pages */
396 isolated = split_free_page(page);
397 total_isolated += isolated;
398 for (i = 0; i < isolated; i++) {
399 list_add(&page->lru, freelist);
400 page++;
401 }
402
403 /* If a page was split, advance to the end of it */
404 if (isolated) {
405 blockpfn += isolated - 1;
406 cursor += isolated - 1;
2af120bc 407 continue;
748446bb 408 }
2af120bc
LA
409
410isolate_fail:
411 if (strict)
412 break;
413 else
414 continue;
415
748446bb
MG
416 }
417
b7aba698 418 trace_mm_compaction_isolate_freepages(nr_scanned, total_isolated);
f40d1e42
MG
419
420 /*
421 * If strict isolation is requested by CMA then check that all the
422 * pages requested were isolated. If there were any failures, 0 is
423 * returned and CMA will fail.
424 */
2af120bc 425 if (strict && blockpfn < end_pfn)
f40d1e42
MG
426 total_isolated = 0;
427
428 if (locked)
429 spin_unlock_irqrestore(&cc->zone->lock, flags);
430
bb13ffeb
MG
431 /* Update the pageblock-skip if the whole pageblock was scanned */
432 if (blockpfn == end_pfn)
edc2ca61 433 update_pageblock_skip(cc, valid_page, total_isolated, false);
bb13ffeb 434
010fc29a 435 count_compact_events(COMPACTFREE_SCANNED, nr_scanned);
397487db 436 if (total_isolated)
010fc29a 437 count_compact_events(COMPACTISOLATED, total_isolated);
748446bb
MG
438 return total_isolated;
439}
440
85aa125f
MN
441/**
442 * isolate_freepages_range() - isolate free pages.
443 * @start_pfn: The first PFN to start isolating.
444 * @end_pfn: The one-past-last PFN.
445 *
446 * Non-free pages, invalid PFNs, or zone boundaries within the
447 * [start_pfn, end_pfn) range are considered errors, cause function to
448 * undo its actions and return zero.
449 *
450 * Otherwise, function returns one-past-the-last PFN of isolated page
451 * (which may be greater then end_pfn if end fell in a middle of
452 * a free page).
453 */
ff9543fd 454unsigned long
bb13ffeb
MG
455isolate_freepages_range(struct compact_control *cc,
456 unsigned long start_pfn, unsigned long end_pfn)
85aa125f 457{
f40d1e42 458 unsigned long isolated, pfn, block_end_pfn;
85aa125f
MN
459 LIST_HEAD(freelist);
460
7d49d886
VB
461 pfn = start_pfn;
462 block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);
463
464 for (; pfn < end_pfn; pfn += isolated,
465 block_end_pfn += pageblock_nr_pages) {
85aa125f 466
85aa125f
MN
467 block_end_pfn = min(block_end_pfn, end_pfn);
468
7d49d886
VB
469 if (!pageblock_pfn_to_page(pfn, block_end_pfn, cc->zone))
470 break;
471
bb13ffeb 472 isolated = isolate_freepages_block(cc, pfn, block_end_pfn,
85aa125f 473 &freelist, true);
85aa125f
MN
474
475 /*
476 * In strict mode, isolate_freepages_block() returns 0 if
477 * there are any holes in the block (ie. invalid PFNs or
478 * non-free pages).
479 */
480 if (!isolated)
481 break;
482
483 /*
484 * If we managed to isolate pages, it is always (1 << n) *
485 * pageblock_nr_pages for some non-negative n. (Max order
486 * page may span two pageblocks).
487 */
488 }
489
490 /* split_free_page does not map the pages */
491 map_pages(&freelist);
492
493 if (pfn < end_pfn) {
494 /* Loop terminated early, cleanup. */
495 release_freepages(&freelist);
496 return 0;
497 }
498
499 /* We don't use freelists for anything. */
500 return pfn;
501}
502
748446bb 503/* Update the number of anon and file isolated pages in the zone */
edc2ca61 504static void acct_isolated(struct zone *zone, struct compact_control *cc)
748446bb
MG
505{
506 struct page *page;
b9e84ac1 507 unsigned int count[2] = { 0, };
748446bb 508
edc2ca61
VB
509 if (list_empty(&cc->migratepages))
510 return;
511
b9e84ac1
MK
512 list_for_each_entry(page, &cc->migratepages, lru)
513 count[!!page_is_file_cache(page)]++;
748446bb 514
edc2ca61
VB
515 mod_zone_page_state(zone, NR_ISOLATED_ANON, count[0]);
516 mod_zone_page_state(zone, NR_ISOLATED_FILE, count[1]);
748446bb
MG
517}
518
519/* Similar to reclaim, but different enough that they don't share logic */
520static bool too_many_isolated(struct zone *zone)
521{
bc693045 522 unsigned long active, inactive, isolated;
748446bb
MG
523
524 inactive = zone_page_state(zone, NR_INACTIVE_FILE) +
525 zone_page_state(zone, NR_INACTIVE_ANON);
bc693045
MK
526 active = zone_page_state(zone, NR_ACTIVE_FILE) +
527 zone_page_state(zone, NR_ACTIVE_ANON);
748446bb
MG
528 isolated = zone_page_state(zone, NR_ISOLATED_FILE) +
529 zone_page_state(zone, NR_ISOLATED_ANON);
530
bc693045 531 return isolated > (inactive + active) / 2;
748446bb
MG
532}
533
2fe86e00 534/**
edc2ca61
VB
535 * isolate_migratepages_block() - isolate all migrate-able pages within
536 * a single pageblock
2fe86e00 537 * @cc: Compaction control structure.
edc2ca61
VB
538 * @low_pfn: The first PFN to isolate
539 * @end_pfn: The one-past-the-last PFN to isolate, within same pageblock
540 * @isolate_mode: Isolation mode to be used.
2fe86e00
MN
541 *
542 * Isolate all pages that can be migrated from the range specified by
edc2ca61
VB
543 * [low_pfn, end_pfn). The range is expected to be within same pageblock.
544 * Returns zero if there is a fatal signal pending, otherwise PFN of the
545 * first page that was not scanned (which may be both less, equal to or more
546 * than end_pfn).
2fe86e00 547 *
edc2ca61
VB
548 * The pages are isolated on cc->migratepages list (not required to be empty),
549 * and cc->nr_migratepages is updated accordingly. The cc->migrate_pfn field
550 * is neither read nor updated.
748446bb 551 */
edc2ca61
VB
552static unsigned long
553isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
554 unsigned long end_pfn, isolate_mode_t isolate_mode)
748446bb 555{
edc2ca61 556 struct zone *zone = cc->zone;
b7aba698 557 unsigned long nr_scanned = 0, nr_isolated = 0;
748446bb 558 struct list_head *migratelist = &cc->migratepages;
fa9add64 559 struct lruvec *lruvec;
c67fe375 560 unsigned long flags;
2a1402aa 561 bool locked = false;
bb13ffeb 562 struct page *page = NULL, *valid_page = NULL;
748446bb 563
748446bb
MG
564 /*
565 * Ensure that there are not too many pages isolated from the LRU
566 * list by either parallel reclaimers or compaction. If there are,
567 * delay for some time until fewer pages are isolated
568 */
569 while (unlikely(too_many_isolated(zone))) {
f9e35b3b 570 /* async migration should just abort */
e0b9daeb 571 if (cc->mode == MIGRATE_ASYNC)
2fe86e00 572 return 0;
f9e35b3b 573
748446bb
MG
574 congestion_wait(BLK_RW_ASYNC, HZ/10);
575
576 if (fatal_signal_pending(current))
2fe86e00 577 return 0;
748446bb
MG
578 }
579
be976572
VB
580 if (compact_should_abort(cc))
581 return 0;
aeef4b83 582
748446bb 583 /* Time to isolate some pages for migration */
748446bb 584 for (; low_pfn < end_pfn; low_pfn++) {
8b44d279
VB
585 /*
586 * Periodically drop the lock (if held) regardless of its
587 * contention, to give chance to IRQs. Abort async compaction
588 * if contended.
589 */
590 if (!(low_pfn % SWAP_CLUSTER_MAX)
591 && compact_unlock_should_abort(&zone->lru_lock, flags,
592 &locked, cc))
593 break;
c67fe375 594
748446bb
MG
595 if (!pfn_valid_within(low_pfn))
596 continue;
b7aba698 597 nr_scanned++;
748446bb 598
748446bb 599 page = pfn_to_page(low_pfn);
dc908600 600
bb13ffeb
MG
601 if (!valid_page)
602 valid_page = page;
603
6c14466c
MG
604 /*
605 * Skip if free. page_order cannot be used without zone->lock
606 * as nothing prevents parallel allocations or buddy merging.
607 */
748446bb
MG
608 if (PageBuddy(page))
609 continue;
610
bf6bddf1
RA
611 /*
612 * Check may be lockless but that's ok as we recheck later.
613 * It's possible to migrate LRU pages and balloon pages
614 * Skip any other type of page
615 */
616 if (!PageLRU(page)) {
617 if (unlikely(balloon_page_movable(page))) {
618 if (locked && balloon_page_isolate(page)) {
619 /* Successfully isolated */
b6c75016 620 goto isolate_success;
bf6bddf1
RA
621 }
622 }
bc835011 623 continue;
bf6bddf1 624 }
bc835011
AA
625
626 /*
2a1402aa
MG
627 * PageLRU is set. lru_lock normally excludes isolation
628 * splitting and collapsing (collapsing has already happened
629 * if PageLRU is set) but the lock is not necessarily taken
630 * here and it is wasteful to take it just to check transhuge.
631 * Check TransHuge without lock and skip the whole pageblock if
632 * it's either a transhuge or hugetlbfs page, as calling
633 * compound_order() without preventing THP from splitting the
634 * page underneath us may return surprising results.
bc835011 635 */
2a1402aa
MG
636 if (PageTransHuge(page)) {
637 if (!locked)
edc2ca61
VB
638 low_pfn = ALIGN(low_pfn + 1,
639 pageblock_nr_pages) - 1;
640 else
641 low_pfn += (1 << compound_order(page)) - 1;
642
2a1402aa
MG
643 continue;
644 }
645
119d6d59
DR
646 /*
647 * Migration will fail if an anonymous page is pinned in memory,
648 * so avoid taking lru_lock and isolating it unnecessarily in an
649 * admittedly racy check.
650 */
651 if (!page_mapping(page) &&
652 page_count(page) > page_mapcount(page))
653 continue;
654
69b7189f
VB
655 /* If we already hold the lock, we can skip some rechecking */
656 if (!locked) {
8b44d279
VB
657 locked = compact_trylock_irqsave(&zone->lru_lock,
658 &flags, cc);
69b7189f
VB
659 if (!locked)
660 break;
2a1402aa 661
69b7189f
VB
662 /* Recheck PageLRU and PageTransHuge under lock */
663 if (!PageLRU(page))
664 continue;
665 if (PageTransHuge(page)) {
666 low_pfn += (1 << compound_order(page)) - 1;
667 continue;
668 }
bc835011
AA
669 }
670
fa9add64
HD
671 lruvec = mem_cgroup_page_lruvec(page, zone);
672
748446bb 673 /* Try isolate the page */
edc2ca61 674 if (__isolate_lru_page(page, isolate_mode) != 0)
748446bb
MG
675 continue;
676
309381fe 677 VM_BUG_ON_PAGE(PageTransCompound(page), page);
bc835011 678
748446bb 679 /* Successfully isolated */
fa9add64 680 del_page_from_lru_list(page, lruvec, page_lru(page));
b6c75016
JK
681
682isolate_success:
683 cc->finished_update_migrate = true;
748446bb 684 list_add(&page->lru, migratelist);
748446bb 685 cc->nr_migratepages++;
b7aba698 686 nr_isolated++;
748446bb
MG
687
688 /* Avoid isolating too much */
31b8384a
HD
689 if (cc->nr_migratepages == COMPACT_CLUSTER_MAX) {
690 ++low_pfn;
748446bb 691 break;
31b8384a 692 }
748446bb
MG
693 }
694
c67fe375
MG
695 if (locked)
696 spin_unlock_irqrestore(&zone->lru_lock, flags);
748446bb 697
50b5b094
VB
698 /*
699 * Update the pageblock-skip information and cached scanner pfn,
700 * if the whole pageblock was scanned without isolating any page.
50b5b094 701 */
35979ef3 702 if (low_pfn == end_pfn)
edc2ca61 703 update_pageblock_skip(cc, valid_page, nr_isolated, true);
bb13ffeb 704
b7aba698
MG
705 trace_mm_compaction_isolate_migratepages(nr_scanned, nr_isolated);
706
010fc29a 707 count_compact_events(COMPACTMIGRATE_SCANNED, nr_scanned);
397487db 708 if (nr_isolated)
010fc29a 709 count_compact_events(COMPACTISOLATED, nr_isolated);
397487db 710
2fe86e00
MN
711 return low_pfn;
712}
713
edc2ca61
VB
714/**
715 * isolate_migratepages_range() - isolate migrate-able pages in a PFN range
716 * @cc: Compaction control structure.
717 * @start_pfn: The first PFN to start isolating.
718 * @end_pfn: The one-past-last PFN.
719 *
720 * Returns zero if isolation fails fatally due to e.g. pending signal.
721 * Otherwise, function returns one-past-the-last PFN of isolated page
722 * (which may be greater than end_pfn if end fell in a middle of a THP page).
723 */
724unsigned long
725isolate_migratepages_range(struct compact_control *cc, unsigned long start_pfn,
726 unsigned long end_pfn)
727{
728 unsigned long pfn, block_end_pfn;
729
730 /* Scan block by block. First and last block may be incomplete */
731 pfn = start_pfn;
732 block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);
733
734 for (; pfn < end_pfn; pfn = block_end_pfn,
735 block_end_pfn += pageblock_nr_pages) {
736
737 block_end_pfn = min(block_end_pfn, end_pfn);
738
7d49d886 739 if (!pageblock_pfn_to_page(pfn, block_end_pfn, cc->zone))
edc2ca61
VB
740 continue;
741
742 pfn = isolate_migratepages_block(cc, pfn, block_end_pfn,
743 ISOLATE_UNEVICTABLE);
744
745 /*
746 * In case of fatal failure, release everything that might
747 * have been isolated in the previous iteration, and signal
748 * the failure back to caller.
749 */
750 if (!pfn) {
751 putback_movable_pages(&cc->migratepages);
752 cc->nr_migratepages = 0;
753 break;
754 }
755 }
756 acct_isolated(cc->zone, cc);
757
758 return pfn;
759}
760
ff9543fd
MN
761#endif /* CONFIG_COMPACTION || CONFIG_CMA */
762#ifdef CONFIG_COMPACTION
2fe86e00 763/*
ff9543fd
MN
764 * Based on information in the current compact_control, find blocks
765 * suitable for isolating free pages from and then isolate them.
2fe86e00 766 */
edc2ca61 767static void isolate_freepages(struct compact_control *cc)
2fe86e00 768{
edc2ca61 769 struct zone *zone = cc->zone;
ff9543fd 770 struct page *page;
c96b9e50
VB
771 unsigned long block_start_pfn; /* start of current pageblock */
772 unsigned long block_end_pfn; /* end of current pageblock */
773 unsigned long low_pfn; /* lowest pfn scanner is able to scan */
ff9543fd
MN
774 int nr_freepages = cc->nr_freepages;
775 struct list_head *freelist = &cc->freepages;
2fe86e00 776
ff9543fd
MN
777 /*
778 * Initialise the free scanner. The starting point is where we last
49e068f0
VB
779 * successfully isolated from, zone-cached value, or the end of the
780 * zone when isolating for the first time. We need this aligned to
c96b9e50
VB
781 * the pageblock boundary, because we do
782 * block_start_pfn -= pageblock_nr_pages in the for loop.
783 * For ending point, take care when isolating in last pageblock of a
784 * a zone which ends in the middle of a pageblock.
49e068f0
VB
785 * The low boundary is the end of the pageblock the migration scanner
786 * is using.
ff9543fd 787 */
c96b9e50
VB
788 block_start_pfn = cc->free_pfn & ~(pageblock_nr_pages-1);
789 block_end_pfn = min(block_start_pfn + pageblock_nr_pages,
790 zone_end_pfn(zone));
7ed695e0 791 low_pfn = ALIGN(cc->migrate_pfn + 1, pageblock_nr_pages);
2fe86e00 792
ff9543fd
MN
793 /*
794 * Isolate free pages until enough are available to migrate the
795 * pages on cc->migratepages. We stop searching if the migrate
796 * and free page scanners meet or enough free pages are isolated.
797 */
c96b9e50
VB
798 for (; block_start_pfn >= low_pfn && cc->nr_migratepages > nr_freepages;
799 block_end_pfn = block_start_pfn,
800 block_start_pfn -= pageblock_nr_pages) {
ff9543fd 801 unsigned long isolated;
2fe86e00 802
f6ea3adb
DR
803 /*
804 * This can iterate a massively long zone without finding any
805 * suitable migration targets, so periodically check if we need
be976572 806 * to schedule, or even abort async compaction.
f6ea3adb 807 */
be976572
VB
808 if (!(block_start_pfn % (SWAP_CLUSTER_MAX * pageblock_nr_pages))
809 && compact_should_abort(cc))
810 break;
f6ea3adb 811
7d49d886
VB
812 page = pageblock_pfn_to_page(block_start_pfn, block_end_pfn,
813 zone);
814 if (!page)
ff9543fd
MN
815 continue;
816
817 /* Check the block is suitable for migration */
68e3e926 818 if (!suitable_migration_target(page))
ff9543fd 819 continue;
68e3e926 820
bb13ffeb
MG
821 /* If isolation recently failed, do not retry */
822 if (!isolation_suitable(cc, page))
823 continue;
824
f40d1e42 825 /* Found a block suitable for isolating free pages from */
e9ade569 826 cc->free_pfn = block_start_pfn;
c96b9e50
VB
827 isolated = isolate_freepages_block(cc, block_start_pfn,
828 block_end_pfn, freelist, false);
f40d1e42 829 nr_freepages += isolated;
ff9543fd
MN
830
831 /*
e9ade569
VB
832 * Set a flag that we successfully isolated in this pageblock.
833 * In the next loop iteration, zone->compact_cached_free_pfn
834 * will not be updated and thus it will effectively contain the
835 * highest pageblock we isolated pages from.
ff9543fd 836 */
e9ade569 837 if (isolated)
c89511ab 838 cc->finished_update_free = true;
be976572
VB
839
840 /*
841 * isolate_freepages_block() might have aborted due to async
842 * compaction being contended
843 */
844 if (cc->contended)
845 break;
ff9543fd
MN
846 }
847
848 /* split_free_page does not map the pages */
849 map_pages(freelist);
850
7ed695e0
VB
851 /*
852 * If we crossed the migrate scanner, we want to keep it that way
853 * so that compact_finished() may detect this
854 */
c96b9e50 855 if (block_start_pfn < low_pfn)
e9ade569 856 cc->free_pfn = cc->migrate_pfn;
c96b9e50 857
ff9543fd 858 cc->nr_freepages = nr_freepages;
748446bb
MG
859}
860
861/*
862 * This is a migrate-callback that "allocates" freepages by taking pages
863 * from the isolated freelists in the block we are migrating to.
864 */
865static struct page *compaction_alloc(struct page *migratepage,
866 unsigned long data,
867 int **result)
868{
869 struct compact_control *cc = (struct compact_control *)data;
870 struct page *freepage;
871
be976572
VB
872 /*
873 * Isolate free pages if necessary, and if we are not aborting due to
874 * contention.
875 */
748446bb 876 if (list_empty(&cc->freepages)) {
be976572 877 if (!cc->contended)
edc2ca61 878 isolate_freepages(cc);
748446bb
MG
879
880 if (list_empty(&cc->freepages))
881 return NULL;
882 }
883
884 freepage = list_entry(cc->freepages.next, struct page, lru);
885 list_del(&freepage->lru);
886 cc->nr_freepages--;
887
888 return freepage;
889}
890
891/*
d53aea3d
DR
892 * This is a migrate-callback that "frees" freepages back to the isolated
893 * freelist. All pages on the freelist are from the same zone, so there is no
894 * special handling needed for NUMA.
895 */
896static void compaction_free(struct page *page, unsigned long data)
897{
898 struct compact_control *cc = (struct compact_control *)data;
899
900 list_add(&page->lru, &cc->freepages);
901 cc->nr_freepages++;
902}
903
ff9543fd
MN
904/* possible outcome of isolate_migratepages */
905typedef enum {
906 ISOLATE_ABORT, /* Abort compaction now */
907 ISOLATE_NONE, /* No pages isolated, continue scanning */
908 ISOLATE_SUCCESS, /* Pages isolated, migrate */
909} isolate_migrate_t;
910
911/*
edc2ca61
VB
912 * Isolate all pages that can be migrated from the first suitable block,
913 * starting at the block pointed to by the migrate scanner pfn within
914 * compact_control.
ff9543fd
MN
915 */
916static isolate_migrate_t isolate_migratepages(struct zone *zone,
917 struct compact_control *cc)
918{
919 unsigned long low_pfn, end_pfn;
edc2ca61
VB
920 struct page *page;
921 const isolate_mode_t isolate_mode =
922 (cc->mode == MIGRATE_ASYNC ? ISOLATE_ASYNC_MIGRATE : 0);
ff9543fd 923
edc2ca61
VB
924 /*
925 * Start at where we last stopped, or beginning of the zone as
926 * initialized by compact_zone()
927 */
928 low_pfn = cc->migrate_pfn;
ff9543fd
MN
929
930 /* Only scan within a pageblock boundary */
a9aacbcc 931 end_pfn = ALIGN(low_pfn + 1, pageblock_nr_pages);
ff9543fd 932
edc2ca61
VB
933 /*
934 * Iterate over whole pageblocks until we find the first suitable.
935 * Do not cross the free scanner.
936 */
937 for (; end_pfn <= cc->free_pfn;
938 low_pfn = end_pfn, end_pfn += pageblock_nr_pages) {
ff9543fd 939
edc2ca61
VB
940 /*
941 * This can potentially iterate a massively long zone with
942 * many pageblocks unsuitable, so periodically check if we
943 * need to schedule, or even abort async compaction.
944 */
945 if (!(low_pfn % (SWAP_CLUSTER_MAX * pageblock_nr_pages))
946 && compact_should_abort(cc))
947 break;
ff9543fd 948
7d49d886
VB
949 page = pageblock_pfn_to_page(low_pfn, end_pfn, zone);
950 if (!page)
edc2ca61
VB
951 continue;
952
edc2ca61
VB
953 /* If isolation recently failed, do not retry */
954 if (!isolation_suitable(cc, page))
955 continue;
956
957 /*
958 * For async compaction, also only scan in MOVABLE blocks.
959 * Async compaction is optimistic to see if the minimum amount
960 * of work satisfies the allocation.
961 */
962 if (cc->mode == MIGRATE_ASYNC &&
963 !migrate_async_suitable(get_pageblock_migratetype(page)))
964 continue;
965
966 /* Perform the isolation */
967 low_pfn = isolate_migratepages_block(cc, low_pfn, end_pfn,
968 isolate_mode);
969
970 if (!low_pfn || cc->contended)
971 return ISOLATE_ABORT;
972
973 /*
974 * Either we isolated something and proceed with migration. Or
975 * we failed and compact_zone should decide if we should
976 * continue or not.
977 */
978 break;
979 }
980
981 acct_isolated(zone, cc);
982 /* Record where migration scanner will be restarted */
ff9543fd
MN
983 cc->migrate_pfn = low_pfn;
984
edc2ca61 985 return cc->nr_migratepages ? ISOLATE_SUCCESS : ISOLATE_NONE;
ff9543fd
MN
986}
987
748446bb 988static int compact_finished(struct zone *zone,
5a03b051 989 struct compact_control *cc)
748446bb 990{
8fb74b9f 991 unsigned int order;
5a03b051 992 unsigned long watermark;
56de7263 993
be976572 994 if (cc->contended || fatal_signal_pending(current))
748446bb
MG
995 return COMPACT_PARTIAL;
996
753341a4 997 /* Compaction run completes if the migrate and free scanner meet */
bb13ffeb 998 if (cc->free_pfn <= cc->migrate_pfn) {
55b7c4c9 999 /* Let the next compaction start anew. */
35979ef3
DR
1000 zone->compact_cached_migrate_pfn[0] = zone->zone_start_pfn;
1001 zone->compact_cached_migrate_pfn[1] = zone->zone_start_pfn;
55b7c4c9
VB
1002 zone->compact_cached_free_pfn = zone_end_pfn(zone);
1003
62997027
MG
1004 /*
1005 * Mark that the PG_migrate_skip information should be cleared
1006 * by kswapd when it goes to sleep. kswapd does not set the
1007 * flag itself as the decision to be clear should be directly
1008 * based on an allocation request.
1009 */
1010 if (!current_is_kswapd())
1011 zone->compact_blockskip_flush = true;
1012
748446bb 1013 return COMPACT_COMPLETE;
bb13ffeb 1014 }
748446bb 1015
82478fb7
JW
1016 /*
1017 * order == -1 is expected when compacting via
1018 * /proc/sys/vm/compact_memory
1019 */
56de7263
MG
1020 if (cc->order == -1)
1021 return COMPACT_CONTINUE;
1022
3957c776
MH
1023 /* Compaction run is not finished if the watermark is not met */
1024 watermark = low_wmark_pages(zone);
1025 watermark += (1 << cc->order);
1026
1027 if (!zone_watermark_ok(zone, cc->order, watermark, 0, 0))
1028 return COMPACT_CONTINUE;
1029
56de7263 1030 /* Direct compactor: Is a suitable page free? */
8fb74b9f
MG
1031 for (order = cc->order; order < MAX_ORDER; order++) {
1032 struct free_area *area = &zone->free_area[order];
1033
1034 /* Job done if page is free of the right migratetype */
1035 if (!list_empty(&area->free_list[cc->migratetype]))
1036 return COMPACT_PARTIAL;
1037
1038 /* Job done if allocation would set block type */
1039 if (cc->order >= pageblock_order && area->nr_free)
56de7263
MG
1040 return COMPACT_PARTIAL;
1041 }
1042
748446bb
MG
1043 return COMPACT_CONTINUE;
1044}
1045
3e7d3449
MG
1046/*
1047 * compaction_suitable: Is this suitable to run compaction on this zone now?
1048 * Returns
1049 * COMPACT_SKIPPED - If there are too few free pages for compaction
1050 * COMPACT_PARTIAL - If the allocation would succeed without compaction
1051 * COMPACT_CONTINUE - If compaction should run now
1052 */
1053unsigned long compaction_suitable(struct zone *zone, int order)
1054{
1055 int fragindex;
1056 unsigned long watermark;
1057
3957c776
MH
1058 /*
1059 * order == -1 is expected when compacting via
1060 * /proc/sys/vm/compact_memory
1061 */
1062 if (order == -1)
1063 return COMPACT_CONTINUE;
1064
3e7d3449
MG
1065 /*
1066 * Watermarks for order-0 must be met for compaction. Note the 2UL.
1067 * This is because during migration, copies of pages need to be
1068 * allocated and for a short time, the footprint is higher
1069 */
1070 watermark = low_wmark_pages(zone) + (2UL << order);
1071 if (!zone_watermark_ok(zone, 0, watermark, 0, 0))
1072 return COMPACT_SKIPPED;
1073
1074 /*
1075 * fragmentation index determines if allocation failures are due to
1076 * low memory or external fragmentation
1077 *
a582a738
SL
1078 * index of -1000 implies allocations might succeed depending on
1079 * watermarks
3e7d3449
MG
1080 * index towards 0 implies failure is due to lack of memory
1081 * index towards 1000 implies failure is due to fragmentation
1082 *
1083 * Only compact if a failure would be due to fragmentation.
1084 */
1085 fragindex = fragmentation_index(zone, order);
1086 if (fragindex >= 0 && fragindex <= sysctl_extfrag_threshold)
1087 return COMPACT_SKIPPED;
1088
a582a738
SL
1089 if (fragindex == -1000 && zone_watermark_ok(zone, order, watermark,
1090 0, 0))
3e7d3449
MG
1091 return COMPACT_PARTIAL;
1092
1093 return COMPACT_CONTINUE;
1094}
1095
748446bb
MG
1096static int compact_zone(struct zone *zone, struct compact_control *cc)
1097{
1098 int ret;
c89511ab 1099 unsigned long start_pfn = zone->zone_start_pfn;
108bcc96 1100 unsigned long end_pfn = zone_end_pfn(zone);
e0b9daeb 1101 const bool sync = cc->mode != MIGRATE_ASYNC;
748446bb 1102
3e7d3449
MG
1103 ret = compaction_suitable(zone, cc->order);
1104 switch (ret) {
1105 case COMPACT_PARTIAL:
1106 case COMPACT_SKIPPED:
1107 /* Compaction is likely to fail */
1108 return ret;
1109 case COMPACT_CONTINUE:
1110 /* Fall through to compaction */
1111 ;
1112 }
1113
d3132e4b
VB
1114 /*
1115 * Clear pageblock skip if there were failures recently and compaction
1116 * is about to be retried after being deferred. kswapd does not do
1117 * this reset as it'll reset the cached information when going to sleep.
1118 */
1119 if (compaction_restarting(zone, cc->order) && !current_is_kswapd())
1120 __reset_isolation_suitable(zone);
1121
c89511ab
MG
1122 /*
1123 * Setup to move all movable pages to the end of the zone. Used cached
1124 * information on where the scanners should start but check that it
1125 * is initialised by ensuring the values are within zone boundaries.
1126 */
e0b9daeb 1127 cc->migrate_pfn = zone->compact_cached_migrate_pfn[sync];
c89511ab
MG
1128 cc->free_pfn = zone->compact_cached_free_pfn;
1129 if (cc->free_pfn < start_pfn || cc->free_pfn > end_pfn) {
1130 cc->free_pfn = end_pfn & ~(pageblock_nr_pages-1);
1131 zone->compact_cached_free_pfn = cc->free_pfn;
1132 }
1133 if (cc->migrate_pfn < start_pfn || cc->migrate_pfn > end_pfn) {
1134 cc->migrate_pfn = start_pfn;
35979ef3
DR
1135 zone->compact_cached_migrate_pfn[0] = cc->migrate_pfn;
1136 zone->compact_cached_migrate_pfn[1] = cc->migrate_pfn;
c89511ab 1137 }
748446bb 1138
0eb927c0
MG
1139 trace_mm_compaction_begin(start_pfn, cc->migrate_pfn, cc->free_pfn, end_pfn);
1140
748446bb
MG
1141 migrate_prep_local();
1142
1143 while ((ret = compact_finished(zone, cc)) == COMPACT_CONTINUE) {
9d502c1c 1144 int err;
748446bb 1145
f9e35b3b
MG
1146 switch (isolate_migratepages(zone, cc)) {
1147 case ISOLATE_ABORT:
1148 ret = COMPACT_PARTIAL;
5733c7d1 1149 putback_movable_pages(&cc->migratepages);
e64c5237 1150 cc->nr_migratepages = 0;
f9e35b3b
MG
1151 goto out;
1152 case ISOLATE_NONE:
748446bb 1153 continue;
f9e35b3b
MG
1154 case ISOLATE_SUCCESS:
1155 ;
1156 }
748446bb 1157
d53aea3d 1158 err = migrate_pages(&cc->migratepages, compaction_alloc,
e0b9daeb 1159 compaction_free, (unsigned long)cc, cc->mode,
7b2a2d4a 1160 MR_COMPACTION);
748446bb 1161
f8c9301f
VB
1162 trace_mm_compaction_migratepages(cc->nr_migratepages, err,
1163 &cc->migratepages);
748446bb 1164
f8c9301f
VB
1165 /* All pages were either migrated or will be released */
1166 cc->nr_migratepages = 0;
9d502c1c 1167 if (err) {
5733c7d1 1168 putback_movable_pages(&cc->migratepages);
7ed695e0
VB
1169 /*
1170 * migrate_pages() may return -ENOMEM when scanners meet
1171 * and we want compact_finished() to detect it
1172 */
1173 if (err == -ENOMEM && cc->free_pfn > cc->migrate_pfn) {
4bf2bba3
DR
1174 ret = COMPACT_PARTIAL;
1175 goto out;
1176 }
748446bb 1177 }
748446bb
MG
1178 }
1179
f9e35b3b 1180out:
748446bb
MG
1181 /* Release free pages and check accounting */
1182 cc->nr_freepages -= release_freepages(&cc->freepages);
1183 VM_BUG_ON(cc->nr_freepages != 0);
1184
0eb927c0
MG
1185 trace_mm_compaction_end(ret);
1186
748446bb
MG
1187 return ret;
1188}
76ab0f53 1189
e0b9daeb 1190static unsigned long compact_zone_order(struct zone *zone, int order,
1f9efdef 1191 gfp_t gfp_mask, enum migrate_mode mode, int *contended)
56de7263 1192{
e64c5237 1193 unsigned long ret;
56de7263
MG
1194 struct compact_control cc = {
1195 .nr_freepages = 0,
1196 .nr_migratepages = 0,
1197 .order = order,
1198 .migratetype = allocflags_to_migratetype(gfp_mask),
1199 .zone = zone,
e0b9daeb 1200 .mode = mode,
56de7263
MG
1201 };
1202 INIT_LIST_HEAD(&cc.freepages);
1203 INIT_LIST_HEAD(&cc.migratepages);
1204
e64c5237
SL
1205 ret = compact_zone(zone, &cc);
1206
1207 VM_BUG_ON(!list_empty(&cc.freepages));
1208 VM_BUG_ON(!list_empty(&cc.migratepages));
1209
1210 *contended = cc.contended;
1211 return ret;
56de7263
MG
1212}
1213
5e771905
MG
1214int sysctl_extfrag_threshold = 500;
1215
56de7263
MG
1216/**
1217 * try_to_compact_pages - Direct compact to satisfy a high-order allocation
1218 * @zonelist: The zonelist used for the current allocation
1219 * @order: The order of the current allocation
1220 * @gfp_mask: The GFP mask of the current allocation
1221 * @nodemask: The allowed nodes to allocate from
e0b9daeb 1222 * @mode: The migration mode for async, sync light, or sync migration
1f9efdef
VB
1223 * @contended: Return value that determines if compaction was aborted due to
1224 * need_resched() or lock contention
53853e2d 1225 * @candidate_zone: Return the zone where we think allocation should succeed
56de7263
MG
1226 *
1227 * This is the main entry point for direct page compaction.
1228 */
1229unsigned long try_to_compact_pages(struct zonelist *zonelist,
77f1fe6b 1230 int order, gfp_t gfp_mask, nodemask_t *nodemask,
1f9efdef 1231 enum migrate_mode mode, int *contended,
53853e2d 1232 struct zone **candidate_zone)
56de7263
MG
1233{
1234 enum zone_type high_zoneidx = gfp_zone(gfp_mask);
1235 int may_enter_fs = gfp_mask & __GFP_FS;
1236 int may_perform_io = gfp_mask & __GFP_IO;
56de7263
MG
1237 struct zoneref *z;
1238 struct zone *zone;
53853e2d 1239 int rc = COMPACT_DEFERRED;
d95ea5d1 1240 int alloc_flags = 0;
1f9efdef
VB
1241 int all_zones_contended = COMPACT_CONTENDED_LOCK; /* init for &= op */
1242
1243 *contended = COMPACT_CONTENDED_NONE;
56de7263 1244
4ffb6335 1245 /* Check if the GFP flags allow compaction */
c5a73c3d 1246 if (!order || !may_enter_fs || !may_perform_io)
53853e2d 1247 return COMPACT_SKIPPED;
56de7263 1248
d95ea5d1
BZ
1249#ifdef CONFIG_CMA
1250 if (allocflags_to_migratetype(gfp_mask) == MIGRATE_MOVABLE)
1251 alloc_flags |= ALLOC_CMA;
1252#endif
56de7263
MG
1253 /* Compact each zone in the list */
1254 for_each_zone_zonelist_nodemask(zone, z, zonelist, high_zoneidx,
1255 nodemask) {
56de7263 1256 int status;
1f9efdef 1257 int zone_contended;
56de7263 1258
53853e2d
VB
1259 if (compaction_deferred(zone, order))
1260 continue;
1261
e0b9daeb 1262 status = compact_zone_order(zone, order, gfp_mask, mode,
1f9efdef 1263 &zone_contended);
56de7263 1264 rc = max(status, rc);
1f9efdef
VB
1265 /*
1266 * It takes at least one zone that wasn't lock contended
1267 * to clear all_zones_contended.
1268 */
1269 all_zones_contended &= zone_contended;
56de7263 1270
3e7d3449 1271 /* If a normal allocation would succeed, stop compacting */
d95ea5d1 1272 if (zone_watermark_ok(zone, order, low_wmark_pages(zone), 0,
53853e2d
VB
1273 alloc_flags)) {
1274 *candidate_zone = zone;
1275 /*
1276 * We think the allocation will succeed in this zone,
1277 * but it is not certain, hence the false. The caller
1278 * will repeat this with true if allocation indeed
1279 * succeeds in this zone.
1280 */
1281 compaction_defer_reset(zone, order, false);
1f9efdef
VB
1282 /*
1283 * It is possible that async compaction aborted due to
1284 * need_resched() and the watermarks were ok thanks to
1285 * somebody else freeing memory. The allocation can
1286 * however still fail so we better signal the
1287 * need_resched() contention anyway (this will not
1288 * prevent the allocation attempt).
1289 */
1290 if (zone_contended == COMPACT_CONTENDED_SCHED)
1291 *contended = COMPACT_CONTENDED_SCHED;
1292
1293 goto break_loop;
1294 }
1295
1296 if (mode != MIGRATE_ASYNC) {
53853e2d
VB
1297 /*
1298 * We think that allocation won't succeed in this zone
1299 * so we defer compaction there. If it ends up
1300 * succeeding after all, it will be reset.
1301 */
1302 defer_compaction(zone, order);
1303 }
1f9efdef
VB
1304
1305 /*
1306 * We might have stopped compacting due to need_resched() in
1307 * async compaction, or due to a fatal signal detected. In that
1308 * case do not try further zones and signal need_resched()
1309 * contention.
1310 */
1311 if ((zone_contended == COMPACT_CONTENDED_SCHED)
1312 || fatal_signal_pending(current)) {
1313 *contended = COMPACT_CONTENDED_SCHED;
1314 goto break_loop;
1315 }
1316
1317 continue;
1318break_loop:
1319 /*
1320 * We might not have tried all the zones, so be conservative
1321 * and assume they are not all lock contended.
1322 */
1323 all_zones_contended = 0;
1324 break;
56de7263
MG
1325 }
1326
1f9efdef
VB
1327 /*
1328 * If at least one zone wasn't deferred or skipped, we report if all
1329 * zones that were tried were lock contended.
1330 */
1331 if (rc > COMPACT_SKIPPED && all_zones_contended)
1332 *contended = COMPACT_CONTENDED_LOCK;
1333
56de7263
MG
1334 return rc;
1335}
1336
1337
76ab0f53 1338/* Compact all zones within a node */
7103f16d 1339static void __compact_pgdat(pg_data_t *pgdat, struct compact_control *cc)
76ab0f53
MG
1340{
1341 int zoneid;
76ab0f53
MG
1342 struct zone *zone;
1343
76ab0f53 1344 for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) {
76ab0f53
MG
1345
1346 zone = &pgdat->node_zones[zoneid];
1347 if (!populated_zone(zone))
1348 continue;
1349
7be62de9
RR
1350 cc->nr_freepages = 0;
1351 cc->nr_migratepages = 0;
1352 cc->zone = zone;
1353 INIT_LIST_HEAD(&cc->freepages);
1354 INIT_LIST_HEAD(&cc->migratepages);
76ab0f53 1355
aad6ec37 1356 if (cc->order == -1 || !compaction_deferred(zone, cc->order))
7be62de9 1357 compact_zone(zone, cc);
76ab0f53 1358
aff62249 1359 if (cc->order > 0) {
de6c60a6
VB
1360 if (zone_watermark_ok(zone, cc->order,
1361 low_wmark_pages(zone), 0, 0))
1362 compaction_defer_reset(zone, cc->order, false);
aff62249
RR
1363 }
1364
7be62de9
RR
1365 VM_BUG_ON(!list_empty(&cc->freepages));
1366 VM_BUG_ON(!list_empty(&cc->migratepages));
76ab0f53 1367 }
76ab0f53
MG
1368}
1369
7103f16d 1370void compact_pgdat(pg_data_t *pgdat, int order)
7be62de9
RR
1371{
1372 struct compact_control cc = {
1373 .order = order,
e0b9daeb 1374 .mode = MIGRATE_ASYNC,
7be62de9
RR
1375 };
1376
3a7200af
MG
1377 if (!order)
1378 return;
1379
7103f16d 1380 __compact_pgdat(pgdat, &cc);
7be62de9
RR
1381}
1382
7103f16d 1383static void compact_node(int nid)
7be62de9 1384{
7be62de9
RR
1385 struct compact_control cc = {
1386 .order = -1,
e0b9daeb 1387 .mode = MIGRATE_SYNC,
91ca9186 1388 .ignore_skip_hint = true,
7be62de9
RR
1389 };
1390
7103f16d 1391 __compact_pgdat(NODE_DATA(nid), &cc);
7be62de9
RR
1392}
1393
76ab0f53 1394/* Compact all nodes in the system */
7964c06d 1395static void compact_nodes(void)
76ab0f53
MG
1396{
1397 int nid;
1398
8575ec29
HD
1399 /* Flush pending updates to the LRU lists */
1400 lru_add_drain_all();
1401
76ab0f53
MG
1402 for_each_online_node(nid)
1403 compact_node(nid);
76ab0f53
MG
1404}
1405
1406/* The written value is actually unused, all memory is compacted */
1407int sysctl_compact_memory;
1408
1409/* This is the entry point for compacting all nodes via /proc/sys/vm */
1410int sysctl_compaction_handler(struct ctl_table *table, int write,
1411 void __user *buffer, size_t *length, loff_t *ppos)
1412{
1413 if (write)
7964c06d 1414 compact_nodes();
76ab0f53
MG
1415
1416 return 0;
1417}
ed4a6d7f 1418
5e771905
MG
1419int sysctl_extfrag_handler(struct ctl_table *table, int write,
1420 void __user *buffer, size_t *length, loff_t *ppos)
1421{
1422 proc_dointvec_minmax(table, write, buffer, length, ppos);
1423
1424 return 0;
1425}
1426
ed4a6d7f 1427#if defined(CONFIG_SYSFS) && defined(CONFIG_NUMA)
74e77fb9 1428static ssize_t sysfs_compact_node(struct device *dev,
10fbcf4c 1429 struct device_attribute *attr,
ed4a6d7f
MG
1430 const char *buf, size_t count)
1431{
8575ec29
HD
1432 int nid = dev->id;
1433
1434 if (nid >= 0 && nid < nr_node_ids && node_online(nid)) {
1435 /* Flush pending updates to the LRU lists */
1436 lru_add_drain_all();
1437
1438 compact_node(nid);
1439 }
ed4a6d7f
MG
1440
1441 return count;
1442}
10fbcf4c 1443static DEVICE_ATTR(compact, S_IWUSR, NULL, sysfs_compact_node);
ed4a6d7f
MG
1444
1445int compaction_register_node(struct node *node)
1446{
10fbcf4c 1447 return device_create_file(&node->dev, &dev_attr_compact);
ed4a6d7f
MG
1448}
1449
1450void compaction_unregister_node(struct node *node)
1451{
10fbcf4c 1452 return device_remove_file(&node->dev, &dev_attr_compact);
ed4a6d7f
MG
1453}
1454#endif /* CONFIG_SYSFS && CONFIG_NUMA */
ff9543fd
MN
1455
1456#endif /* CONFIG_COMPACTION */