]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - mm/compaction.c
mm/compaction: fix invalid free_pfn and compact_cached_free_pfn
[mirror_ubuntu-artful-kernel.git] / mm / compaction.c
CommitLineData
748446bb
MG
1/*
2 * linux/mm/compaction.c
3 *
4 * Memory compaction for the reduction of external fragmentation. Note that
5 * this heavily depends upon page migration to do all the real heavy
6 * lifting
7 *
8 * Copyright IBM Corp. 2007-2010 Mel Gorman <mel@csn.ul.ie>
9 */
10#include <linux/swap.h>
11#include <linux/migrate.h>
12#include <linux/compaction.h>
13#include <linux/mm_inline.h>
14#include <linux/backing-dev.h>
76ab0f53 15#include <linux/sysctl.h>
ed4a6d7f 16#include <linux/sysfs.h>
bf6bddf1 17#include <linux/balloon_compaction.h>
194159fb 18#include <linux/page-isolation.h>
b8c73fc2 19#include <linux/kasan.h>
748446bb
MG
20#include "internal.h"
21
010fc29a
MK
22#ifdef CONFIG_COMPACTION
23static inline void count_compact_event(enum vm_event_item item)
24{
25 count_vm_event(item);
26}
27
28static inline void count_compact_events(enum vm_event_item item, long delta)
29{
30 count_vm_events(item, delta);
31}
32#else
33#define count_compact_event(item) do { } while (0)
34#define count_compact_events(item, delta) do { } while (0)
35#endif
36
ff9543fd
MN
37#if defined CONFIG_COMPACTION || defined CONFIG_CMA
38
b7aba698
MG
39#define CREATE_TRACE_POINTS
40#include <trace/events/compaction.h>
41
748446bb
MG
42static unsigned long release_freepages(struct list_head *freelist)
43{
44 struct page *page, *next;
6bace090 45 unsigned long high_pfn = 0;
748446bb
MG
46
47 list_for_each_entry_safe(page, next, freelist, lru) {
6bace090 48 unsigned long pfn = page_to_pfn(page);
748446bb
MG
49 list_del(&page->lru);
50 __free_page(page);
6bace090
VB
51 if (pfn > high_pfn)
52 high_pfn = pfn;
748446bb
MG
53 }
54
6bace090 55 return high_pfn;
748446bb
MG
56}
57
ff9543fd
MN
58static void map_pages(struct list_head *list)
59{
60 struct page *page;
61
62 list_for_each_entry(page, list, lru) {
63 arch_alloc_page(page, 0);
64 kernel_map_pages(page, 1, 1);
b8c73fc2 65 kasan_alloc_pages(page, 0);
ff9543fd
MN
66 }
67}
68
47118af0
MN
69static inline bool migrate_async_suitable(int migratetype)
70{
71 return is_migrate_cma(migratetype) || migratetype == MIGRATE_MOVABLE;
72}
73
7d49d886
VB
74/*
75 * Check that the whole (or subset of) a pageblock given by the interval of
76 * [start_pfn, end_pfn) is valid and within the same zone, before scanning it
77 * with the migration of free compaction scanner. The scanners then need to
78 * use only pfn_valid_within() check for arches that allow holes within
79 * pageblocks.
80 *
81 * Return struct page pointer of start_pfn, or NULL if checks were not passed.
82 *
83 * It's possible on some configurations to have a setup like node0 node1 node0
84 * i.e. it's possible that all pages within a zones range of pages do not
85 * belong to a single zone. We assume that a border between node0 and node1
86 * can occur within a single pageblock, but not a node0 node1 node0
87 * interleaving within a single pageblock. It is therefore sufficient to check
88 * the first and last page of a pageblock and avoid checking each individual
89 * page in a pageblock.
90 */
91static struct page *pageblock_pfn_to_page(unsigned long start_pfn,
92 unsigned long end_pfn, struct zone *zone)
93{
94 struct page *start_page;
95 struct page *end_page;
96
97 /* end_pfn is one past the range we are checking */
98 end_pfn--;
99
100 if (!pfn_valid(start_pfn) || !pfn_valid(end_pfn))
101 return NULL;
102
103 start_page = pfn_to_page(start_pfn);
104
105 if (page_zone(start_page) != zone)
106 return NULL;
107
108 end_page = pfn_to_page(end_pfn);
109
110 /* This gives a shorter code than deriving page_zone(end_page) */
111 if (page_zone_id(start_page) != page_zone_id(end_page))
112 return NULL;
113
114 return start_page;
115}
116
bb13ffeb 117#ifdef CONFIG_COMPACTION
24e2716f
JK
118
119/* Do not skip compaction more than 64 times */
120#define COMPACT_MAX_DEFER_SHIFT 6
121
122/*
123 * Compaction is deferred when compaction fails to result in a page
124 * allocation success. 1 << compact_defer_limit compactions are skipped up
125 * to a limit of 1 << COMPACT_MAX_DEFER_SHIFT
126 */
127void defer_compaction(struct zone *zone, int order)
128{
129 zone->compact_considered = 0;
130 zone->compact_defer_shift++;
131
132 if (order < zone->compact_order_failed)
133 zone->compact_order_failed = order;
134
135 if (zone->compact_defer_shift > COMPACT_MAX_DEFER_SHIFT)
136 zone->compact_defer_shift = COMPACT_MAX_DEFER_SHIFT;
137
138 trace_mm_compaction_defer_compaction(zone, order);
139}
140
141/* Returns true if compaction should be skipped this time */
142bool compaction_deferred(struct zone *zone, int order)
143{
144 unsigned long defer_limit = 1UL << zone->compact_defer_shift;
145
146 if (order < zone->compact_order_failed)
147 return false;
148
149 /* Avoid possible overflow */
150 if (++zone->compact_considered > defer_limit)
151 zone->compact_considered = defer_limit;
152
153 if (zone->compact_considered >= defer_limit)
154 return false;
155
156 trace_mm_compaction_deferred(zone, order);
157
158 return true;
159}
160
161/*
162 * Update defer tracking counters after successful compaction of given order,
163 * which means an allocation either succeeded (alloc_success == true) or is
164 * expected to succeed.
165 */
166void compaction_defer_reset(struct zone *zone, int order,
167 bool alloc_success)
168{
169 if (alloc_success) {
170 zone->compact_considered = 0;
171 zone->compact_defer_shift = 0;
172 }
173 if (order >= zone->compact_order_failed)
174 zone->compact_order_failed = order + 1;
175
176 trace_mm_compaction_defer_reset(zone, order);
177}
178
179/* Returns true if restarting compaction after many failures */
180bool compaction_restarting(struct zone *zone, int order)
181{
182 if (order < zone->compact_order_failed)
183 return false;
184
185 return zone->compact_defer_shift == COMPACT_MAX_DEFER_SHIFT &&
186 zone->compact_considered >= 1UL << zone->compact_defer_shift;
187}
188
bb13ffeb
MG
189/* Returns true if the pageblock should be scanned for pages to isolate. */
190static inline bool isolation_suitable(struct compact_control *cc,
191 struct page *page)
192{
193 if (cc->ignore_skip_hint)
194 return true;
195
196 return !get_pageblock_skip(page);
197}
198
02333641
VB
199static void reset_cached_positions(struct zone *zone)
200{
201 zone->compact_cached_migrate_pfn[0] = zone->zone_start_pfn;
202 zone->compact_cached_migrate_pfn[1] = zone->zone_start_pfn;
623446e4
JK
203 zone->compact_cached_free_pfn =
204 round_down(zone_end_pfn(zone) - 1, pageblock_nr_pages);
02333641
VB
205}
206
bb13ffeb
MG
207/*
208 * This function is called to clear all cached information on pageblocks that
209 * should be skipped for page isolation when the migrate and free page scanner
210 * meet.
211 */
62997027 212static void __reset_isolation_suitable(struct zone *zone)
bb13ffeb
MG
213{
214 unsigned long start_pfn = zone->zone_start_pfn;
108bcc96 215 unsigned long end_pfn = zone_end_pfn(zone);
bb13ffeb
MG
216 unsigned long pfn;
217
62997027 218 zone->compact_blockskip_flush = false;
bb13ffeb
MG
219
220 /* Walk the zone and mark every pageblock as suitable for isolation */
221 for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
222 struct page *page;
223
224 cond_resched();
225
226 if (!pfn_valid(pfn))
227 continue;
228
229 page = pfn_to_page(pfn);
230 if (zone != page_zone(page))
231 continue;
232
233 clear_pageblock_skip(page);
234 }
02333641
VB
235
236 reset_cached_positions(zone);
bb13ffeb
MG
237}
238
62997027
MG
239void reset_isolation_suitable(pg_data_t *pgdat)
240{
241 int zoneid;
242
243 for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) {
244 struct zone *zone = &pgdat->node_zones[zoneid];
245 if (!populated_zone(zone))
246 continue;
247
248 /* Only flush if a full compaction finished recently */
249 if (zone->compact_blockskip_flush)
250 __reset_isolation_suitable(zone);
251 }
252}
253
bb13ffeb
MG
254/*
255 * If no pages were isolated then mark this pageblock to be skipped in the
62997027 256 * future. The information is later cleared by __reset_isolation_suitable().
bb13ffeb 257 */
c89511ab
MG
258static void update_pageblock_skip(struct compact_control *cc,
259 struct page *page, unsigned long nr_isolated,
edc2ca61 260 bool migrate_scanner)
bb13ffeb 261{
c89511ab 262 struct zone *zone = cc->zone;
35979ef3 263 unsigned long pfn;
6815bf3f
JK
264
265 if (cc->ignore_skip_hint)
266 return;
267
bb13ffeb
MG
268 if (!page)
269 return;
270
35979ef3
DR
271 if (nr_isolated)
272 return;
273
edc2ca61 274 set_pageblock_skip(page);
c89511ab 275
35979ef3
DR
276 pfn = page_to_pfn(page);
277
278 /* Update where async and sync compaction should restart */
279 if (migrate_scanner) {
35979ef3
DR
280 if (pfn > zone->compact_cached_migrate_pfn[0])
281 zone->compact_cached_migrate_pfn[0] = pfn;
e0b9daeb
DR
282 if (cc->mode != MIGRATE_ASYNC &&
283 pfn > zone->compact_cached_migrate_pfn[1])
35979ef3
DR
284 zone->compact_cached_migrate_pfn[1] = pfn;
285 } else {
35979ef3
DR
286 if (pfn < zone->compact_cached_free_pfn)
287 zone->compact_cached_free_pfn = pfn;
c89511ab 288 }
bb13ffeb
MG
289}
290#else
291static inline bool isolation_suitable(struct compact_control *cc,
292 struct page *page)
293{
294 return true;
295}
296
c89511ab
MG
297static void update_pageblock_skip(struct compact_control *cc,
298 struct page *page, unsigned long nr_isolated,
edc2ca61 299 bool migrate_scanner)
bb13ffeb
MG
300{
301}
302#endif /* CONFIG_COMPACTION */
303
8b44d279
VB
304/*
305 * Compaction requires the taking of some coarse locks that are potentially
306 * very heavily contended. For async compaction, back out if the lock cannot
307 * be taken immediately. For sync compaction, spin on the lock if needed.
308 *
309 * Returns true if the lock is held
310 * Returns false if the lock is not held and compaction should abort
311 */
312static bool compact_trylock_irqsave(spinlock_t *lock, unsigned long *flags,
313 struct compact_control *cc)
2a1402aa 314{
8b44d279
VB
315 if (cc->mode == MIGRATE_ASYNC) {
316 if (!spin_trylock_irqsave(lock, *flags)) {
317 cc->contended = COMPACT_CONTENDED_LOCK;
318 return false;
319 }
320 } else {
321 spin_lock_irqsave(lock, *flags);
322 }
1f9efdef 323
8b44d279 324 return true;
2a1402aa
MG
325}
326
c67fe375
MG
327/*
328 * Compaction requires the taking of some coarse locks that are potentially
8b44d279
VB
329 * very heavily contended. The lock should be periodically unlocked to avoid
330 * having disabled IRQs for a long time, even when there is nobody waiting on
331 * the lock. It might also be that allowing the IRQs will result in
332 * need_resched() becoming true. If scheduling is needed, async compaction
333 * aborts. Sync compaction schedules.
334 * Either compaction type will also abort if a fatal signal is pending.
335 * In either case if the lock was locked, it is dropped and not regained.
c67fe375 336 *
8b44d279
VB
337 * Returns true if compaction should abort due to fatal signal pending, or
338 * async compaction due to need_resched()
339 * Returns false when compaction can continue (sync compaction might have
340 * scheduled)
c67fe375 341 */
8b44d279
VB
342static bool compact_unlock_should_abort(spinlock_t *lock,
343 unsigned long flags, bool *locked, struct compact_control *cc)
c67fe375 344{
8b44d279
VB
345 if (*locked) {
346 spin_unlock_irqrestore(lock, flags);
347 *locked = false;
348 }
1f9efdef 349
8b44d279
VB
350 if (fatal_signal_pending(current)) {
351 cc->contended = COMPACT_CONTENDED_SCHED;
352 return true;
353 }
c67fe375 354
8b44d279 355 if (need_resched()) {
e0b9daeb 356 if (cc->mode == MIGRATE_ASYNC) {
8b44d279
VB
357 cc->contended = COMPACT_CONTENDED_SCHED;
358 return true;
c67fe375 359 }
c67fe375 360 cond_resched();
c67fe375
MG
361 }
362
8b44d279 363 return false;
c67fe375
MG
364}
365
be976572
VB
366/*
367 * Aside from avoiding lock contention, compaction also periodically checks
368 * need_resched() and either schedules in sync compaction or aborts async
8b44d279 369 * compaction. This is similar to what compact_unlock_should_abort() does, but
be976572
VB
370 * is used where no lock is concerned.
371 *
372 * Returns false when no scheduling was needed, or sync compaction scheduled.
373 * Returns true when async compaction should abort.
374 */
375static inline bool compact_should_abort(struct compact_control *cc)
376{
377 /* async compaction aborts if contended */
378 if (need_resched()) {
379 if (cc->mode == MIGRATE_ASYNC) {
1f9efdef 380 cc->contended = COMPACT_CONTENDED_SCHED;
be976572
VB
381 return true;
382 }
383
384 cond_resched();
385 }
386
387 return false;
388}
389
85aa125f 390/*
9e4be470
JM
391 * Isolate free pages onto a private freelist. If @strict is true, will abort
392 * returning 0 on any invalid PFNs or non-free pages inside of the pageblock
393 * (even though it may still end up isolating some pages).
85aa125f 394 */
f40d1e42 395static unsigned long isolate_freepages_block(struct compact_control *cc,
e14c720e 396 unsigned long *start_pfn,
85aa125f
MN
397 unsigned long end_pfn,
398 struct list_head *freelist,
399 bool strict)
748446bb 400{
b7aba698 401 int nr_scanned = 0, total_isolated = 0;
bb13ffeb 402 struct page *cursor, *valid_page = NULL;
b8b2d825 403 unsigned long flags = 0;
f40d1e42 404 bool locked = false;
e14c720e 405 unsigned long blockpfn = *start_pfn;
748446bb 406
748446bb
MG
407 cursor = pfn_to_page(blockpfn);
408
f40d1e42 409 /* Isolate free pages. */
748446bb
MG
410 for (; blockpfn < end_pfn; blockpfn++, cursor++) {
411 int isolated, i;
412 struct page *page = cursor;
413
8b44d279
VB
414 /*
415 * Periodically drop the lock (if held) regardless of its
416 * contention, to give chance to IRQs. Abort if fatal signal
417 * pending or async compaction detects need_resched()
418 */
419 if (!(blockpfn % SWAP_CLUSTER_MAX)
420 && compact_unlock_should_abort(&cc->zone->lock, flags,
421 &locked, cc))
422 break;
423
b7aba698 424 nr_scanned++;
f40d1e42 425 if (!pfn_valid_within(blockpfn))
2af120bc
LA
426 goto isolate_fail;
427
bb13ffeb
MG
428 if (!valid_page)
429 valid_page = page;
9fcd6d2e
VB
430
431 /*
432 * For compound pages such as THP and hugetlbfs, we can save
433 * potentially a lot of iterations if we skip them at once.
434 * The check is racy, but we can consider only valid values
435 * and the only danger is skipping too much.
436 */
437 if (PageCompound(page)) {
438 unsigned int comp_order = compound_order(page);
439
440 if (likely(comp_order < MAX_ORDER)) {
441 blockpfn += (1UL << comp_order) - 1;
442 cursor += (1UL << comp_order) - 1;
443 }
444
445 goto isolate_fail;
446 }
447
f40d1e42 448 if (!PageBuddy(page))
2af120bc 449 goto isolate_fail;
f40d1e42
MG
450
451 /*
69b7189f
VB
452 * If we already hold the lock, we can skip some rechecking.
453 * Note that if we hold the lock now, checked_pageblock was
454 * already set in some previous iteration (or strict is true),
455 * so it is correct to skip the suitable migration target
456 * recheck as well.
f40d1e42 457 */
69b7189f
VB
458 if (!locked) {
459 /*
460 * The zone lock must be held to isolate freepages.
461 * Unfortunately this is a very coarse lock and can be
462 * heavily contended if there are parallel allocations
463 * or parallel compactions. For async compaction do not
464 * spin on the lock and we acquire the lock as late as
465 * possible.
466 */
8b44d279
VB
467 locked = compact_trylock_irqsave(&cc->zone->lock,
468 &flags, cc);
69b7189f
VB
469 if (!locked)
470 break;
f40d1e42 471
69b7189f
VB
472 /* Recheck this is a buddy page under lock */
473 if (!PageBuddy(page))
474 goto isolate_fail;
475 }
748446bb
MG
476
477 /* Found a free page, break it into order-0 pages */
478 isolated = split_free_page(page);
479 total_isolated += isolated;
480 for (i = 0; i < isolated; i++) {
481 list_add(&page->lru, freelist);
482 page++;
483 }
484
485 /* If a page was split, advance to the end of it */
486 if (isolated) {
932ff6bb
JK
487 cc->nr_freepages += isolated;
488 if (!strict &&
489 cc->nr_migratepages <= cc->nr_freepages) {
490 blockpfn += isolated;
491 break;
492 }
493
748446bb
MG
494 blockpfn += isolated - 1;
495 cursor += isolated - 1;
2af120bc 496 continue;
748446bb 497 }
2af120bc
LA
498
499isolate_fail:
500 if (strict)
501 break;
502 else
503 continue;
504
748446bb
MG
505 }
506
9fcd6d2e
VB
507 /*
508 * There is a tiny chance that we have read bogus compound_order(),
509 * so be careful to not go outside of the pageblock.
510 */
511 if (unlikely(blockpfn > end_pfn))
512 blockpfn = end_pfn;
513
e34d85f0
JK
514 trace_mm_compaction_isolate_freepages(*start_pfn, blockpfn,
515 nr_scanned, total_isolated);
516
e14c720e
VB
517 /* Record how far we have got within the block */
518 *start_pfn = blockpfn;
519
f40d1e42
MG
520 /*
521 * If strict isolation is requested by CMA then check that all the
522 * pages requested were isolated. If there were any failures, 0 is
523 * returned and CMA will fail.
524 */
2af120bc 525 if (strict && blockpfn < end_pfn)
f40d1e42
MG
526 total_isolated = 0;
527
528 if (locked)
529 spin_unlock_irqrestore(&cc->zone->lock, flags);
530
bb13ffeb
MG
531 /* Update the pageblock-skip if the whole pageblock was scanned */
532 if (blockpfn == end_pfn)
edc2ca61 533 update_pageblock_skip(cc, valid_page, total_isolated, false);
bb13ffeb 534
010fc29a 535 count_compact_events(COMPACTFREE_SCANNED, nr_scanned);
397487db 536 if (total_isolated)
010fc29a 537 count_compact_events(COMPACTISOLATED, total_isolated);
748446bb
MG
538 return total_isolated;
539}
540
85aa125f
MN
541/**
542 * isolate_freepages_range() - isolate free pages.
543 * @start_pfn: The first PFN to start isolating.
544 * @end_pfn: The one-past-last PFN.
545 *
546 * Non-free pages, invalid PFNs, or zone boundaries within the
547 * [start_pfn, end_pfn) range are considered errors, cause function to
548 * undo its actions and return zero.
549 *
550 * Otherwise, function returns one-past-the-last PFN of isolated page
551 * (which may be greater then end_pfn if end fell in a middle of
552 * a free page).
553 */
ff9543fd 554unsigned long
bb13ffeb
MG
555isolate_freepages_range(struct compact_control *cc,
556 unsigned long start_pfn, unsigned long end_pfn)
85aa125f 557{
f40d1e42 558 unsigned long isolated, pfn, block_end_pfn;
85aa125f
MN
559 LIST_HEAD(freelist);
560
7d49d886
VB
561 pfn = start_pfn;
562 block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);
563
564 for (; pfn < end_pfn; pfn += isolated,
565 block_end_pfn += pageblock_nr_pages) {
e14c720e
VB
566 /* Protect pfn from changing by isolate_freepages_block */
567 unsigned long isolate_start_pfn = pfn;
85aa125f 568
85aa125f
MN
569 block_end_pfn = min(block_end_pfn, end_pfn);
570
58420016
JK
571 /*
572 * pfn could pass the block_end_pfn if isolated freepage
573 * is more than pageblock order. In this case, we adjust
574 * scanning range to right one.
575 */
576 if (pfn >= block_end_pfn) {
577 block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);
578 block_end_pfn = min(block_end_pfn, end_pfn);
579 }
580
7d49d886
VB
581 if (!pageblock_pfn_to_page(pfn, block_end_pfn, cc->zone))
582 break;
583
e14c720e
VB
584 isolated = isolate_freepages_block(cc, &isolate_start_pfn,
585 block_end_pfn, &freelist, true);
85aa125f
MN
586
587 /*
588 * In strict mode, isolate_freepages_block() returns 0 if
589 * there are any holes in the block (ie. invalid PFNs or
590 * non-free pages).
591 */
592 if (!isolated)
593 break;
594
595 /*
596 * If we managed to isolate pages, it is always (1 << n) *
597 * pageblock_nr_pages for some non-negative n. (Max order
598 * page may span two pageblocks).
599 */
600 }
601
602 /* split_free_page does not map the pages */
603 map_pages(&freelist);
604
605 if (pfn < end_pfn) {
606 /* Loop terminated early, cleanup. */
607 release_freepages(&freelist);
608 return 0;
609 }
610
611 /* We don't use freelists for anything. */
612 return pfn;
613}
614
748446bb 615/* Update the number of anon and file isolated pages in the zone */
edc2ca61 616static void acct_isolated(struct zone *zone, struct compact_control *cc)
748446bb
MG
617{
618 struct page *page;
b9e84ac1 619 unsigned int count[2] = { 0, };
748446bb 620
edc2ca61
VB
621 if (list_empty(&cc->migratepages))
622 return;
623
b9e84ac1
MK
624 list_for_each_entry(page, &cc->migratepages, lru)
625 count[!!page_is_file_cache(page)]++;
748446bb 626
edc2ca61
VB
627 mod_zone_page_state(zone, NR_ISOLATED_ANON, count[0]);
628 mod_zone_page_state(zone, NR_ISOLATED_FILE, count[1]);
748446bb
MG
629}
630
631/* Similar to reclaim, but different enough that they don't share logic */
632static bool too_many_isolated(struct zone *zone)
633{
bc693045 634 unsigned long active, inactive, isolated;
748446bb
MG
635
636 inactive = zone_page_state(zone, NR_INACTIVE_FILE) +
637 zone_page_state(zone, NR_INACTIVE_ANON);
bc693045
MK
638 active = zone_page_state(zone, NR_ACTIVE_FILE) +
639 zone_page_state(zone, NR_ACTIVE_ANON);
748446bb
MG
640 isolated = zone_page_state(zone, NR_ISOLATED_FILE) +
641 zone_page_state(zone, NR_ISOLATED_ANON);
642
bc693045 643 return isolated > (inactive + active) / 2;
748446bb
MG
644}
645
2fe86e00 646/**
edc2ca61
VB
647 * isolate_migratepages_block() - isolate all migrate-able pages within
648 * a single pageblock
2fe86e00 649 * @cc: Compaction control structure.
edc2ca61
VB
650 * @low_pfn: The first PFN to isolate
651 * @end_pfn: The one-past-the-last PFN to isolate, within same pageblock
652 * @isolate_mode: Isolation mode to be used.
2fe86e00
MN
653 *
654 * Isolate all pages that can be migrated from the range specified by
edc2ca61
VB
655 * [low_pfn, end_pfn). The range is expected to be within same pageblock.
656 * Returns zero if there is a fatal signal pending, otherwise PFN of the
657 * first page that was not scanned (which may be both less, equal to or more
658 * than end_pfn).
2fe86e00 659 *
edc2ca61
VB
660 * The pages are isolated on cc->migratepages list (not required to be empty),
661 * and cc->nr_migratepages is updated accordingly. The cc->migrate_pfn field
662 * is neither read nor updated.
748446bb 663 */
edc2ca61
VB
664static unsigned long
665isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
666 unsigned long end_pfn, isolate_mode_t isolate_mode)
748446bb 667{
edc2ca61 668 struct zone *zone = cc->zone;
b7aba698 669 unsigned long nr_scanned = 0, nr_isolated = 0;
748446bb 670 struct list_head *migratelist = &cc->migratepages;
fa9add64 671 struct lruvec *lruvec;
b8b2d825 672 unsigned long flags = 0;
2a1402aa 673 bool locked = false;
bb13ffeb 674 struct page *page = NULL, *valid_page = NULL;
e34d85f0 675 unsigned long start_pfn = low_pfn;
748446bb 676
748446bb
MG
677 /*
678 * Ensure that there are not too many pages isolated from the LRU
679 * list by either parallel reclaimers or compaction. If there are,
680 * delay for some time until fewer pages are isolated
681 */
682 while (unlikely(too_many_isolated(zone))) {
f9e35b3b 683 /* async migration should just abort */
e0b9daeb 684 if (cc->mode == MIGRATE_ASYNC)
2fe86e00 685 return 0;
f9e35b3b 686
748446bb
MG
687 congestion_wait(BLK_RW_ASYNC, HZ/10);
688
689 if (fatal_signal_pending(current))
2fe86e00 690 return 0;
748446bb
MG
691 }
692
be976572
VB
693 if (compact_should_abort(cc))
694 return 0;
aeef4b83 695
748446bb 696 /* Time to isolate some pages for migration */
748446bb 697 for (; low_pfn < end_pfn; low_pfn++) {
29c0dde8
VB
698 bool is_lru;
699
8b44d279
VB
700 /*
701 * Periodically drop the lock (if held) regardless of its
702 * contention, to give chance to IRQs. Abort async compaction
703 * if contended.
704 */
705 if (!(low_pfn % SWAP_CLUSTER_MAX)
706 && compact_unlock_should_abort(&zone->lru_lock, flags,
707 &locked, cc))
708 break;
c67fe375 709
748446bb
MG
710 if (!pfn_valid_within(low_pfn))
711 continue;
b7aba698 712 nr_scanned++;
748446bb 713
748446bb 714 page = pfn_to_page(low_pfn);
dc908600 715
bb13ffeb
MG
716 if (!valid_page)
717 valid_page = page;
718
6c14466c 719 /*
99c0fd5e
VB
720 * Skip if free. We read page order here without zone lock
721 * which is generally unsafe, but the race window is small and
722 * the worst thing that can happen is that we skip some
723 * potential isolation targets.
6c14466c 724 */
99c0fd5e
VB
725 if (PageBuddy(page)) {
726 unsigned long freepage_order = page_order_unsafe(page);
727
728 /*
729 * Without lock, we cannot be sure that what we got is
730 * a valid page order. Consider only values in the
731 * valid order range to prevent low_pfn overflow.
732 */
733 if (freepage_order > 0 && freepage_order < MAX_ORDER)
734 low_pfn += (1UL << freepage_order) - 1;
748446bb 735 continue;
99c0fd5e 736 }
748446bb 737
bf6bddf1
RA
738 /*
739 * Check may be lockless but that's ok as we recheck later.
740 * It's possible to migrate LRU pages and balloon pages
741 * Skip any other type of page
742 */
29c0dde8
VB
743 is_lru = PageLRU(page);
744 if (!is_lru) {
bf6bddf1 745 if (unlikely(balloon_page_movable(page))) {
d6d86c0a 746 if (balloon_page_isolate(page)) {
bf6bddf1 747 /* Successfully isolated */
b6c75016 748 goto isolate_success;
bf6bddf1
RA
749 }
750 }
bf6bddf1 751 }
bc835011
AA
752
753 /*
29c0dde8
VB
754 * Regardless of being on LRU, compound pages such as THP and
755 * hugetlbfs are not to be compacted. We can potentially save
756 * a lot of iterations if we skip them at once. The check is
757 * racy, but we can consider only valid values and the only
758 * danger is skipping too much.
bc835011 759 */
29c0dde8
VB
760 if (PageCompound(page)) {
761 unsigned int comp_order = compound_order(page);
762
763 if (likely(comp_order < MAX_ORDER))
764 low_pfn += (1UL << comp_order) - 1;
edc2ca61 765
2a1402aa
MG
766 continue;
767 }
768
29c0dde8
VB
769 if (!is_lru)
770 continue;
771
119d6d59
DR
772 /*
773 * Migration will fail if an anonymous page is pinned in memory,
774 * so avoid taking lru_lock and isolating it unnecessarily in an
775 * admittedly racy check.
776 */
777 if (!page_mapping(page) &&
778 page_count(page) > page_mapcount(page))
779 continue;
780
69b7189f
VB
781 /* If we already hold the lock, we can skip some rechecking */
782 if (!locked) {
8b44d279
VB
783 locked = compact_trylock_irqsave(&zone->lru_lock,
784 &flags, cc);
69b7189f
VB
785 if (!locked)
786 break;
2a1402aa 787
29c0dde8 788 /* Recheck PageLRU and PageCompound under lock */
69b7189f
VB
789 if (!PageLRU(page))
790 continue;
29c0dde8
VB
791
792 /*
793 * Page become compound since the non-locked check,
794 * and it's on LRU. It can only be a THP so the order
795 * is safe to read and it's 0 for tail pages.
796 */
797 if (unlikely(PageCompound(page))) {
798 low_pfn += (1UL << compound_order(page)) - 1;
69b7189f
VB
799 continue;
800 }
bc835011
AA
801 }
802
fa9add64
HD
803 lruvec = mem_cgroup_page_lruvec(page, zone);
804
748446bb 805 /* Try isolate the page */
edc2ca61 806 if (__isolate_lru_page(page, isolate_mode) != 0)
748446bb
MG
807 continue;
808
29c0dde8 809 VM_BUG_ON_PAGE(PageCompound(page), page);
bc835011 810
748446bb 811 /* Successfully isolated */
fa9add64 812 del_page_from_lru_list(page, lruvec, page_lru(page));
b6c75016
JK
813
814isolate_success:
748446bb 815 list_add(&page->lru, migratelist);
748446bb 816 cc->nr_migratepages++;
b7aba698 817 nr_isolated++;
748446bb
MG
818
819 /* Avoid isolating too much */
31b8384a
HD
820 if (cc->nr_migratepages == COMPACT_CLUSTER_MAX) {
821 ++low_pfn;
748446bb 822 break;
31b8384a 823 }
748446bb
MG
824 }
825
99c0fd5e
VB
826 /*
827 * The PageBuddy() check could have potentially brought us outside
828 * the range to be scanned.
829 */
830 if (unlikely(low_pfn > end_pfn))
831 low_pfn = end_pfn;
832
c67fe375
MG
833 if (locked)
834 spin_unlock_irqrestore(&zone->lru_lock, flags);
748446bb 835
50b5b094
VB
836 /*
837 * Update the pageblock-skip information and cached scanner pfn,
838 * if the whole pageblock was scanned without isolating any page.
50b5b094 839 */
35979ef3 840 if (low_pfn == end_pfn)
edc2ca61 841 update_pageblock_skip(cc, valid_page, nr_isolated, true);
bb13ffeb 842
e34d85f0
JK
843 trace_mm_compaction_isolate_migratepages(start_pfn, low_pfn,
844 nr_scanned, nr_isolated);
b7aba698 845
010fc29a 846 count_compact_events(COMPACTMIGRATE_SCANNED, nr_scanned);
397487db 847 if (nr_isolated)
010fc29a 848 count_compact_events(COMPACTISOLATED, nr_isolated);
397487db 849
2fe86e00
MN
850 return low_pfn;
851}
852
edc2ca61
VB
853/**
854 * isolate_migratepages_range() - isolate migrate-able pages in a PFN range
855 * @cc: Compaction control structure.
856 * @start_pfn: The first PFN to start isolating.
857 * @end_pfn: The one-past-last PFN.
858 *
859 * Returns zero if isolation fails fatally due to e.g. pending signal.
860 * Otherwise, function returns one-past-the-last PFN of isolated page
861 * (which may be greater than end_pfn if end fell in a middle of a THP page).
862 */
863unsigned long
864isolate_migratepages_range(struct compact_control *cc, unsigned long start_pfn,
865 unsigned long end_pfn)
866{
867 unsigned long pfn, block_end_pfn;
868
869 /* Scan block by block. First and last block may be incomplete */
870 pfn = start_pfn;
871 block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);
872
873 for (; pfn < end_pfn; pfn = block_end_pfn,
874 block_end_pfn += pageblock_nr_pages) {
875
876 block_end_pfn = min(block_end_pfn, end_pfn);
877
7d49d886 878 if (!pageblock_pfn_to_page(pfn, block_end_pfn, cc->zone))
edc2ca61
VB
879 continue;
880
881 pfn = isolate_migratepages_block(cc, pfn, block_end_pfn,
882 ISOLATE_UNEVICTABLE);
883
884 /*
885 * In case of fatal failure, release everything that might
886 * have been isolated in the previous iteration, and signal
887 * the failure back to caller.
888 */
889 if (!pfn) {
890 putback_movable_pages(&cc->migratepages);
891 cc->nr_migratepages = 0;
892 break;
893 }
6ea41c0c
JK
894
895 if (cc->nr_migratepages == COMPACT_CLUSTER_MAX)
896 break;
edc2ca61
VB
897 }
898 acct_isolated(cc->zone, cc);
899
900 return pfn;
901}
902
ff9543fd
MN
903#endif /* CONFIG_COMPACTION || CONFIG_CMA */
904#ifdef CONFIG_COMPACTION
018e9a49
AM
905
906/* Returns true if the page is within a block suitable for migration to */
907static bool suitable_migration_target(struct page *page)
908{
909 /* If the page is a large free page, then disallow migration */
910 if (PageBuddy(page)) {
911 /*
912 * We are checking page_order without zone->lock taken. But
913 * the only small danger is that we skip a potentially suitable
914 * pageblock, so it's not worth to check order for valid range.
915 */
916 if (page_order_unsafe(page) >= pageblock_order)
917 return false;
918 }
919
920 /* If the block is MIGRATE_MOVABLE or MIGRATE_CMA, allow migration */
921 if (migrate_async_suitable(get_pageblock_migratetype(page)))
922 return true;
923
924 /* Otherwise skip the block */
925 return false;
926}
927
f2849aa0
VB
928/*
929 * Test whether the free scanner has reached the same or lower pageblock than
930 * the migration scanner, and compaction should thus terminate.
931 */
932static inline bool compact_scanners_met(struct compact_control *cc)
933{
934 return (cc->free_pfn >> pageblock_order)
935 <= (cc->migrate_pfn >> pageblock_order);
936}
937
2fe86e00 938/*
ff9543fd
MN
939 * Based on information in the current compact_control, find blocks
940 * suitable for isolating free pages from and then isolate them.
2fe86e00 941 */
edc2ca61 942static void isolate_freepages(struct compact_control *cc)
2fe86e00 943{
edc2ca61 944 struct zone *zone = cc->zone;
ff9543fd 945 struct page *page;
c96b9e50 946 unsigned long block_start_pfn; /* start of current pageblock */
e14c720e 947 unsigned long isolate_start_pfn; /* exact pfn we start at */
c96b9e50
VB
948 unsigned long block_end_pfn; /* end of current pageblock */
949 unsigned long low_pfn; /* lowest pfn scanner is able to scan */
ff9543fd 950 struct list_head *freelist = &cc->freepages;
2fe86e00 951
ff9543fd
MN
952 /*
953 * Initialise the free scanner. The starting point is where we last
49e068f0 954 * successfully isolated from, zone-cached value, or the end of the
e14c720e
VB
955 * zone when isolating for the first time. For looping we also need
956 * this pfn aligned down to the pageblock boundary, because we do
c96b9e50
VB
957 * block_start_pfn -= pageblock_nr_pages in the for loop.
958 * For ending point, take care when isolating in last pageblock of a
959 * a zone which ends in the middle of a pageblock.
49e068f0
VB
960 * The low boundary is the end of the pageblock the migration scanner
961 * is using.
ff9543fd 962 */
e14c720e 963 isolate_start_pfn = cc->free_pfn;
c96b9e50
VB
964 block_start_pfn = cc->free_pfn & ~(pageblock_nr_pages-1);
965 block_end_pfn = min(block_start_pfn + pageblock_nr_pages,
966 zone_end_pfn(zone));
7ed695e0 967 low_pfn = ALIGN(cc->migrate_pfn + 1, pageblock_nr_pages);
2fe86e00 968
ff9543fd
MN
969 /*
970 * Isolate free pages until enough are available to migrate the
971 * pages on cc->migratepages. We stop searching if the migrate
972 * and free page scanners meet or enough free pages are isolated.
973 */
f5f61a32 974 for (; block_start_pfn >= low_pfn;
c96b9e50 975 block_end_pfn = block_start_pfn,
e14c720e
VB
976 block_start_pfn -= pageblock_nr_pages,
977 isolate_start_pfn = block_start_pfn) {
2fe86e00 978
f6ea3adb
DR
979 /*
980 * This can iterate a massively long zone without finding any
981 * suitable migration targets, so periodically check if we need
be976572 982 * to schedule, or even abort async compaction.
f6ea3adb 983 */
be976572
VB
984 if (!(block_start_pfn % (SWAP_CLUSTER_MAX * pageblock_nr_pages))
985 && compact_should_abort(cc))
986 break;
f6ea3adb 987
7d49d886
VB
988 page = pageblock_pfn_to_page(block_start_pfn, block_end_pfn,
989 zone);
990 if (!page)
ff9543fd
MN
991 continue;
992
993 /* Check the block is suitable for migration */
68e3e926 994 if (!suitable_migration_target(page))
ff9543fd 995 continue;
68e3e926 996
bb13ffeb
MG
997 /* If isolation recently failed, do not retry */
998 if (!isolation_suitable(cc, page))
999 continue;
1000
e14c720e 1001 /* Found a block suitable for isolating free pages from. */
932ff6bb 1002 isolate_freepages_block(cc, &isolate_start_pfn,
c96b9e50 1003 block_end_pfn, freelist, false);
ff9543fd 1004
e14c720e 1005 /*
f5f61a32
VB
1006 * If we isolated enough freepages, or aborted due to async
1007 * compaction being contended, terminate the loop.
e14c720e
VB
1008 * Remember where the free scanner should restart next time,
1009 * which is where isolate_freepages_block() left off.
1010 * But if it scanned the whole pageblock, isolate_start_pfn
1011 * now points at block_end_pfn, which is the start of the next
1012 * pageblock.
1013 * In that case we will however want to restart at the start
1014 * of the previous pageblock.
1015 */
f5f61a32
VB
1016 if ((cc->nr_freepages >= cc->nr_migratepages)
1017 || cc->contended) {
1018 if (isolate_start_pfn >= block_end_pfn)
1019 isolate_start_pfn =
1020 block_start_pfn - pageblock_nr_pages;
be976572 1021 break;
f5f61a32
VB
1022 } else {
1023 /*
1024 * isolate_freepages_block() should not terminate
1025 * prematurely unless contended, or isolated enough
1026 */
1027 VM_BUG_ON(isolate_start_pfn < block_end_pfn);
1028 }
ff9543fd
MN
1029 }
1030
1031 /* split_free_page does not map the pages */
1032 map_pages(freelist);
1033
7ed695e0 1034 /*
f5f61a32
VB
1035 * Record where the free scanner will restart next time. Either we
1036 * broke from the loop and set isolate_start_pfn based on the last
1037 * call to isolate_freepages_block(), or we met the migration scanner
1038 * and the loop terminated due to isolate_start_pfn < low_pfn
7ed695e0 1039 */
f5f61a32 1040 cc->free_pfn = isolate_start_pfn;
748446bb
MG
1041}
1042
1043/*
1044 * This is a migrate-callback that "allocates" freepages by taking pages
1045 * from the isolated freelists in the block we are migrating to.
1046 */
1047static struct page *compaction_alloc(struct page *migratepage,
1048 unsigned long data,
1049 int **result)
1050{
1051 struct compact_control *cc = (struct compact_control *)data;
1052 struct page *freepage;
1053
be976572
VB
1054 /*
1055 * Isolate free pages if necessary, and if we are not aborting due to
1056 * contention.
1057 */
748446bb 1058 if (list_empty(&cc->freepages)) {
be976572 1059 if (!cc->contended)
edc2ca61 1060 isolate_freepages(cc);
748446bb
MG
1061
1062 if (list_empty(&cc->freepages))
1063 return NULL;
1064 }
1065
1066 freepage = list_entry(cc->freepages.next, struct page, lru);
1067 list_del(&freepage->lru);
1068 cc->nr_freepages--;
1069
1070 return freepage;
1071}
1072
1073/*
d53aea3d
DR
1074 * This is a migrate-callback that "frees" freepages back to the isolated
1075 * freelist. All pages on the freelist are from the same zone, so there is no
1076 * special handling needed for NUMA.
1077 */
1078static void compaction_free(struct page *page, unsigned long data)
1079{
1080 struct compact_control *cc = (struct compact_control *)data;
1081
1082 list_add(&page->lru, &cc->freepages);
1083 cc->nr_freepages++;
1084}
1085
ff9543fd
MN
1086/* possible outcome of isolate_migratepages */
1087typedef enum {
1088 ISOLATE_ABORT, /* Abort compaction now */
1089 ISOLATE_NONE, /* No pages isolated, continue scanning */
1090 ISOLATE_SUCCESS, /* Pages isolated, migrate */
1091} isolate_migrate_t;
1092
5bbe3547
EM
1093/*
1094 * Allow userspace to control policy on scanning the unevictable LRU for
1095 * compactable pages.
1096 */
1097int sysctl_compact_unevictable_allowed __read_mostly = 1;
1098
ff9543fd 1099/*
edc2ca61
VB
1100 * Isolate all pages that can be migrated from the first suitable block,
1101 * starting at the block pointed to by the migrate scanner pfn within
1102 * compact_control.
ff9543fd
MN
1103 */
1104static isolate_migrate_t isolate_migratepages(struct zone *zone,
1105 struct compact_control *cc)
1106{
1107 unsigned long low_pfn, end_pfn;
1a16718c 1108 unsigned long isolate_start_pfn;
edc2ca61
VB
1109 struct page *page;
1110 const isolate_mode_t isolate_mode =
5bbe3547 1111 (sysctl_compact_unevictable_allowed ? ISOLATE_UNEVICTABLE : 0) |
edc2ca61 1112 (cc->mode == MIGRATE_ASYNC ? ISOLATE_ASYNC_MIGRATE : 0);
ff9543fd 1113
edc2ca61
VB
1114 /*
1115 * Start at where we last stopped, or beginning of the zone as
1116 * initialized by compact_zone()
1117 */
1118 low_pfn = cc->migrate_pfn;
ff9543fd
MN
1119
1120 /* Only scan within a pageblock boundary */
a9aacbcc 1121 end_pfn = ALIGN(low_pfn + 1, pageblock_nr_pages);
ff9543fd 1122
edc2ca61
VB
1123 /*
1124 * Iterate over whole pageblocks until we find the first suitable.
1125 * Do not cross the free scanner.
1126 */
1127 for (; end_pfn <= cc->free_pfn;
1128 low_pfn = end_pfn, end_pfn += pageblock_nr_pages) {
ff9543fd 1129
edc2ca61
VB
1130 /*
1131 * This can potentially iterate a massively long zone with
1132 * many pageblocks unsuitable, so periodically check if we
1133 * need to schedule, or even abort async compaction.
1134 */
1135 if (!(low_pfn % (SWAP_CLUSTER_MAX * pageblock_nr_pages))
1136 && compact_should_abort(cc))
1137 break;
ff9543fd 1138
7d49d886
VB
1139 page = pageblock_pfn_to_page(low_pfn, end_pfn, zone);
1140 if (!page)
edc2ca61
VB
1141 continue;
1142
edc2ca61
VB
1143 /* If isolation recently failed, do not retry */
1144 if (!isolation_suitable(cc, page))
1145 continue;
1146
1147 /*
1148 * For async compaction, also only scan in MOVABLE blocks.
1149 * Async compaction is optimistic to see if the minimum amount
1150 * of work satisfies the allocation.
1151 */
1152 if (cc->mode == MIGRATE_ASYNC &&
1153 !migrate_async_suitable(get_pageblock_migratetype(page)))
1154 continue;
1155
1156 /* Perform the isolation */
1a16718c 1157 isolate_start_pfn = low_pfn;
edc2ca61
VB
1158 low_pfn = isolate_migratepages_block(cc, low_pfn, end_pfn,
1159 isolate_mode);
1160
ff59909a
HD
1161 if (!low_pfn || cc->contended) {
1162 acct_isolated(zone, cc);
edc2ca61 1163 return ISOLATE_ABORT;
ff59909a 1164 }
edc2ca61 1165
1a16718c
JK
1166 /*
1167 * Record where we could have freed pages by migration and not
1168 * yet flushed them to buddy allocator.
1169 * - this is the lowest page that could have been isolated and
1170 * then freed by migration.
1171 */
1172 if (cc->nr_migratepages && !cc->last_migrated_pfn)
1173 cc->last_migrated_pfn = isolate_start_pfn;
1174
edc2ca61
VB
1175 /*
1176 * Either we isolated something and proceed with migration. Or
1177 * we failed and compact_zone should decide if we should
1178 * continue or not.
1179 */
1180 break;
1181 }
1182
1183 acct_isolated(zone, cc);
f2849aa0
VB
1184 /* Record where migration scanner will be restarted. */
1185 cc->migrate_pfn = low_pfn;
ff9543fd 1186
edc2ca61 1187 return cc->nr_migratepages ? ISOLATE_SUCCESS : ISOLATE_NONE;
ff9543fd
MN
1188}
1189
21c527a3
YB
1190/*
1191 * order == -1 is expected when compacting via
1192 * /proc/sys/vm/compact_memory
1193 */
1194static inline bool is_via_compact_memory(int order)
1195{
1196 return order == -1;
1197}
1198
837d026d 1199static int __compact_finished(struct zone *zone, struct compact_control *cc,
6d7ce559 1200 const int migratetype)
748446bb 1201{
8fb74b9f 1202 unsigned int order;
5a03b051 1203 unsigned long watermark;
56de7263 1204
be976572 1205 if (cc->contended || fatal_signal_pending(current))
2d1e1041 1206 return COMPACT_CONTENDED;
748446bb 1207
753341a4 1208 /* Compaction run completes if the migrate and free scanner meet */
f2849aa0 1209 if (compact_scanners_met(cc)) {
55b7c4c9 1210 /* Let the next compaction start anew. */
02333641 1211 reset_cached_positions(zone);
55b7c4c9 1212
62997027
MG
1213 /*
1214 * Mark that the PG_migrate_skip information should be cleared
1215 * by kswapd when it goes to sleep. kswapd does not set the
1216 * flag itself as the decision to be clear should be directly
1217 * based on an allocation request.
1218 */
1219 if (!current_is_kswapd())
1220 zone->compact_blockskip_flush = true;
1221
748446bb 1222 return COMPACT_COMPLETE;
bb13ffeb 1223 }
748446bb 1224
21c527a3 1225 if (is_via_compact_memory(cc->order))
56de7263
MG
1226 return COMPACT_CONTINUE;
1227
3957c776
MH
1228 /* Compaction run is not finished if the watermark is not met */
1229 watermark = low_wmark_pages(zone);
3957c776 1230
ebff3980
VB
1231 if (!zone_watermark_ok(zone, cc->order, watermark, cc->classzone_idx,
1232 cc->alloc_flags))
3957c776
MH
1233 return COMPACT_CONTINUE;
1234
56de7263 1235 /* Direct compactor: Is a suitable page free? */
8fb74b9f
MG
1236 for (order = cc->order; order < MAX_ORDER; order++) {
1237 struct free_area *area = &zone->free_area[order];
2149cdae 1238 bool can_steal;
8fb74b9f
MG
1239
1240 /* Job done if page is free of the right migratetype */
6d7ce559 1241 if (!list_empty(&area->free_list[migratetype]))
8fb74b9f
MG
1242 return COMPACT_PARTIAL;
1243
2149cdae
JK
1244#ifdef CONFIG_CMA
1245 /* MIGRATE_MOVABLE can fallback on MIGRATE_CMA */
1246 if (migratetype == MIGRATE_MOVABLE &&
1247 !list_empty(&area->free_list[MIGRATE_CMA]))
1248 return COMPACT_PARTIAL;
1249#endif
1250 /*
1251 * Job done if allocation would steal freepages from
1252 * other migratetype buddy lists.
1253 */
1254 if (find_suitable_fallback(area, order, migratetype,
1255 true, &can_steal) != -1)
56de7263
MG
1256 return COMPACT_PARTIAL;
1257 }
1258
837d026d
JK
1259 return COMPACT_NO_SUITABLE_PAGE;
1260}
1261
1262static int compact_finished(struct zone *zone, struct compact_control *cc,
1263 const int migratetype)
1264{
1265 int ret;
1266
1267 ret = __compact_finished(zone, cc, migratetype);
1268 trace_mm_compaction_finished(zone, cc->order, ret);
1269 if (ret == COMPACT_NO_SUITABLE_PAGE)
1270 ret = COMPACT_CONTINUE;
1271
1272 return ret;
748446bb
MG
1273}
1274
3e7d3449
MG
1275/*
1276 * compaction_suitable: Is this suitable to run compaction on this zone now?
1277 * Returns
1278 * COMPACT_SKIPPED - If there are too few free pages for compaction
1279 * COMPACT_PARTIAL - If the allocation would succeed without compaction
1280 * COMPACT_CONTINUE - If compaction should run now
1281 */
837d026d 1282static unsigned long __compaction_suitable(struct zone *zone, int order,
ebff3980 1283 int alloc_flags, int classzone_idx)
3e7d3449
MG
1284{
1285 int fragindex;
1286 unsigned long watermark;
1287
21c527a3 1288 if (is_via_compact_memory(order))
3957c776
MH
1289 return COMPACT_CONTINUE;
1290
ebff3980
VB
1291 watermark = low_wmark_pages(zone);
1292 /*
1293 * If watermarks for high-order allocation are already met, there
1294 * should be no need for compaction at all.
1295 */
1296 if (zone_watermark_ok(zone, order, watermark, classzone_idx,
1297 alloc_flags))
1298 return COMPACT_PARTIAL;
1299
3e7d3449
MG
1300 /*
1301 * Watermarks for order-0 must be met for compaction. Note the 2UL.
1302 * This is because during migration, copies of pages need to be
1303 * allocated and for a short time, the footprint is higher
1304 */
ebff3980
VB
1305 watermark += (2UL << order);
1306 if (!zone_watermark_ok(zone, 0, watermark, classzone_idx, alloc_flags))
3e7d3449
MG
1307 return COMPACT_SKIPPED;
1308
1309 /*
1310 * fragmentation index determines if allocation failures are due to
1311 * low memory or external fragmentation
1312 *
ebff3980
VB
1313 * index of -1000 would imply allocations might succeed depending on
1314 * watermarks, but we already failed the high-order watermark check
3e7d3449
MG
1315 * index towards 0 implies failure is due to lack of memory
1316 * index towards 1000 implies failure is due to fragmentation
1317 *
1318 * Only compact if a failure would be due to fragmentation.
1319 */
1320 fragindex = fragmentation_index(zone, order);
1321 if (fragindex >= 0 && fragindex <= sysctl_extfrag_threshold)
837d026d 1322 return COMPACT_NOT_SUITABLE_ZONE;
3e7d3449 1323
3e7d3449
MG
1324 return COMPACT_CONTINUE;
1325}
1326
837d026d
JK
1327unsigned long compaction_suitable(struct zone *zone, int order,
1328 int alloc_flags, int classzone_idx)
1329{
1330 unsigned long ret;
1331
1332 ret = __compaction_suitable(zone, order, alloc_flags, classzone_idx);
1333 trace_mm_compaction_suitable(zone, order, ret);
1334 if (ret == COMPACT_NOT_SUITABLE_ZONE)
1335 ret = COMPACT_SKIPPED;
1336
1337 return ret;
1338}
1339
748446bb
MG
1340static int compact_zone(struct zone *zone, struct compact_control *cc)
1341{
1342 int ret;
c89511ab 1343 unsigned long start_pfn = zone->zone_start_pfn;
108bcc96 1344 unsigned long end_pfn = zone_end_pfn(zone);
6d7ce559 1345 const int migratetype = gfpflags_to_migratetype(cc->gfp_mask);
e0b9daeb 1346 const bool sync = cc->mode != MIGRATE_ASYNC;
748446bb 1347
ebff3980
VB
1348 ret = compaction_suitable(zone, cc->order, cc->alloc_flags,
1349 cc->classzone_idx);
3e7d3449
MG
1350 switch (ret) {
1351 case COMPACT_PARTIAL:
1352 case COMPACT_SKIPPED:
1353 /* Compaction is likely to fail */
1354 return ret;
1355 case COMPACT_CONTINUE:
1356 /* Fall through to compaction */
1357 ;
1358 }
1359
d3132e4b
VB
1360 /*
1361 * Clear pageblock skip if there were failures recently and compaction
1362 * is about to be retried after being deferred. kswapd does not do
1363 * this reset as it'll reset the cached information when going to sleep.
1364 */
1365 if (compaction_restarting(zone, cc->order) && !current_is_kswapd())
1366 __reset_isolation_suitable(zone);
1367
c89511ab
MG
1368 /*
1369 * Setup to move all movable pages to the end of the zone. Used cached
1370 * information on where the scanners should start but check that it
1371 * is initialised by ensuring the values are within zone boundaries.
1372 */
e0b9daeb 1373 cc->migrate_pfn = zone->compact_cached_migrate_pfn[sync];
c89511ab 1374 cc->free_pfn = zone->compact_cached_free_pfn;
623446e4
JK
1375 if (cc->free_pfn < start_pfn || cc->free_pfn >= end_pfn) {
1376 cc->free_pfn = round_down(end_pfn - 1, pageblock_nr_pages);
c89511ab
MG
1377 zone->compact_cached_free_pfn = cc->free_pfn;
1378 }
623446e4 1379 if (cc->migrate_pfn < start_pfn || cc->migrate_pfn >= end_pfn) {
c89511ab 1380 cc->migrate_pfn = start_pfn;
35979ef3
DR
1381 zone->compact_cached_migrate_pfn[0] = cc->migrate_pfn;
1382 zone->compact_cached_migrate_pfn[1] = cc->migrate_pfn;
c89511ab 1383 }
1a16718c 1384 cc->last_migrated_pfn = 0;
748446bb 1385
16c4a097
JK
1386 trace_mm_compaction_begin(start_pfn, cc->migrate_pfn,
1387 cc->free_pfn, end_pfn, sync);
0eb927c0 1388
748446bb
MG
1389 migrate_prep_local();
1390
6d7ce559
DR
1391 while ((ret = compact_finished(zone, cc, migratetype)) ==
1392 COMPACT_CONTINUE) {
9d502c1c 1393 int err;
748446bb 1394
f9e35b3b
MG
1395 switch (isolate_migratepages(zone, cc)) {
1396 case ISOLATE_ABORT:
2d1e1041 1397 ret = COMPACT_CONTENDED;
5733c7d1 1398 putback_movable_pages(&cc->migratepages);
e64c5237 1399 cc->nr_migratepages = 0;
f9e35b3b
MG
1400 goto out;
1401 case ISOLATE_NONE:
fdaf7f5c
VB
1402 /*
1403 * We haven't isolated and migrated anything, but
1404 * there might still be unflushed migrations from
1405 * previous cc->order aligned block.
1406 */
1407 goto check_drain;
f9e35b3b
MG
1408 case ISOLATE_SUCCESS:
1409 ;
1410 }
748446bb 1411
d53aea3d 1412 err = migrate_pages(&cc->migratepages, compaction_alloc,
e0b9daeb 1413 compaction_free, (unsigned long)cc, cc->mode,
7b2a2d4a 1414 MR_COMPACTION);
748446bb 1415
f8c9301f
VB
1416 trace_mm_compaction_migratepages(cc->nr_migratepages, err,
1417 &cc->migratepages);
748446bb 1418
f8c9301f
VB
1419 /* All pages were either migrated or will be released */
1420 cc->nr_migratepages = 0;
9d502c1c 1421 if (err) {
5733c7d1 1422 putback_movable_pages(&cc->migratepages);
7ed695e0
VB
1423 /*
1424 * migrate_pages() may return -ENOMEM when scanners meet
1425 * and we want compact_finished() to detect it
1426 */
f2849aa0 1427 if (err == -ENOMEM && !compact_scanners_met(cc)) {
2d1e1041 1428 ret = COMPACT_CONTENDED;
4bf2bba3
DR
1429 goto out;
1430 }
748446bb 1431 }
fdaf7f5c 1432
fdaf7f5c
VB
1433check_drain:
1434 /*
1435 * Has the migration scanner moved away from the previous
1436 * cc->order aligned block where we migrated from? If yes,
1437 * flush the pages that were freed, so that they can merge and
1438 * compact_finished() can detect immediately if allocation
1439 * would succeed.
1440 */
1a16718c 1441 if (cc->order > 0 && cc->last_migrated_pfn) {
fdaf7f5c
VB
1442 int cpu;
1443 unsigned long current_block_start =
1444 cc->migrate_pfn & ~((1UL << cc->order) - 1);
1445
1a16718c 1446 if (cc->last_migrated_pfn < current_block_start) {
fdaf7f5c
VB
1447 cpu = get_cpu();
1448 lru_add_drain_cpu(cpu);
1449 drain_local_pages(zone);
1450 put_cpu();
1451 /* No more flushing until we migrate again */
1a16718c 1452 cc->last_migrated_pfn = 0;
fdaf7f5c
VB
1453 }
1454 }
1455
748446bb
MG
1456 }
1457
f9e35b3b 1458out:
6bace090
VB
1459 /*
1460 * Release free pages and update where the free scanner should restart,
1461 * so we don't leave any returned pages behind in the next attempt.
1462 */
1463 if (cc->nr_freepages > 0) {
1464 unsigned long free_pfn = release_freepages(&cc->freepages);
1465
1466 cc->nr_freepages = 0;
1467 VM_BUG_ON(free_pfn == 0);
1468 /* The cached pfn is always the first in a pageblock */
1469 free_pfn &= ~(pageblock_nr_pages-1);
1470 /*
1471 * Only go back, not forward. The cached pfn might have been
1472 * already reset to zone end in compact_finished()
1473 */
1474 if (free_pfn > zone->compact_cached_free_pfn)
1475 zone->compact_cached_free_pfn = free_pfn;
1476 }
748446bb 1477
16c4a097
JK
1478 trace_mm_compaction_end(start_pfn, cc->migrate_pfn,
1479 cc->free_pfn, end_pfn, sync, ret);
0eb927c0 1480
2d1e1041
VB
1481 if (ret == COMPACT_CONTENDED)
1482 ret = COMPACT_PARTIAL;
1483
748446bb
MG
1484 return ret;
1485}
76ab0f53 1486
e0b9daeb 1487static unsigned long compact_zone_order(struct zone *zone, int order,
ebff3980
VB
1488 gfp_t gfp_mask, enum migrate_mode mode, int *contended,
1489 int alloc_flags, int classzone_idx)
56de7263 1490{
e64c5237 1491 unsigned long ret;
56de7263
MG
1492 struct compact_control cc = {
1493 .nr_freepages = 0,
1494 .nr_migratepages = 0,
1495 .order = order,
6d7ce559 1496 .gfp_mask = gfp_mask,
56de7263 1497 .zone = zone,
e0b9daeb 1498 .mode = mode,
ebff3980
VB
1499 .alloc_flags = alloc_flags,
1500 .classzone_idx = classzone_idx,
56de7263
MG
1501 };
1502 INIT_LIST_HEAD(&cc.freepages);
1503 INIT_LIST_HEAD(&cc.migratepages);
1504
e64c5237
SL
1505 ret = compact_zone(zone, &cc);
1506
1507 VM_BUG_ON(!list_empty(&cc.freepages));
1508 VM_BUG_ON(!list_empty(&cc.migratepages));
1509
1510 *contended = cc.contended;
1511 return ret;
56de7263
MG
1512}
1513
5e771905
MG
1514int sysctl_extfrag_threshold = 500;
1515
56de7263
MG
1516/**
1517 * try_to_compact_pages - Direct compact to satisfy a high-order allocation
56de7263 1518 * @gfp_mask: The GFP mask of the current allocation
1a6d53a1
VB
1519 * @order: The order of the current allocation
1520 * @alloc_flags: The allocation flags of the current allocation
1521 * @ac: The context of current allocation
e0b9daeb 1522 * @mode: The migration mode for async, sync light, or sync migration
1f9efdef
VB
1523 * @contended: Return value that determines if compaction was aborted due to
1524 * need_resched() or lock contention
56de7263
MG
1525 *
1526 * This is the main entry point for direct page compaction.
1527 */
1a6d53a1
VB
1528unsigned long try_to_compact_pages(gfp_t gfp_mask, unsigned int order,
1529 int alloc_flags, const struct alloc_context *ac,
1530 enum migrate_mode mode, int *contended)
56de7263 1531{
56de7263
MG
1532 int may_enter_fs = gfp_mask & __GFP_FS;
1533 int may_perform_io = gfp_mask & __GFP_IO;
56de7263
MG
1534 struct zoneref *z;
1535 struct zone *zone;
53853e2d 1536 int rc = COMPACT_DEFERRED;
1f9efdef
VB
1537 int all_zones_contended = COMPACT_CONTENDED_LOCK; /* init for &= op */
1538
1539 *contended = COMPACT_CONTENDED_NONE;
56de7263 1540
4ffb6335 1541 /* Check if the GFP flags allow compaction */
c5a73c3d 1542 if (!order || !may_enter_fs || !may_perform_io)
53853e2d 1543 return COMPACT_SKIPPED;
56de7263 1544
837d026d
JK
1545 trace_mm_compaction_try_to_compact_pages(order, gfp_mask, mode);
1546
56de7263 1547 /* Compact each zone in the list */
1a6d53a1
VB
1548 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->high_zoneidx,
1549 ac->nodemask) {
56de7263 1550 int status;
1f9efdef 1551 int zone_contended;
56de7263 1552
53853e2d
VB
1553 if (compaction_deferred(zone, order))
1554 continue;
1555
e0b9daeb 1556 status = compact_zone_order(zone, order, gfp_mask, mode,
1a6d53a1
VB
1557 &zone_contended, alloc_flags,
1558 ac->classzone_idx);
56de7263 1559 rc = max(status, rc);
1f9efdef
VB
1560 /*
1561 * It takes at least one zone that wasn't lock contended
1562 * to clear all_zones_contended.
1563 */
1564 all_zones_contended &= zone_contended;
56de7263 1565
3e7d3449 1566 /* If a normal allocation would succeed, stop compacting */
ebff3980 1567 if (zone_watermark_ok(zone, order, low_wmark_pages(zone),
1a6d53a1 1568 ac->classzone_idx, alloc_flags)) {
53853e2d
VB
1569 /*
1570 * We think the allocation will succeed in this zone,
1571 * but it is not certain, hence the false. The caller
1572 * will repeat this with true if allocation indeed
1573 * succeeds in this zone.
1574 */
1575 compaction_defer_reset(zone, order, false);
1f9efdef
VB
1576 /*
1577 * It is possible that async compaction aborted due to
1578 * need_resched() and the watermarks were ok thanks to
1579 * somebody else freeing memory. The allocation can
1580 * however still fail so we better signal the
1581 * need_resched() contention anyway (this will not
1582 * prevent the allocation attempt).
1583 */
1584 if (zone_contended == COMPACT_CONTENDED_SCHED)
1585 *contended = COMPACT_CONTENDED_SCHED;
1586
1587 goto break_loop;
1588 }
1589
f8669795 1590 if (mode != MIGRATE_ASYNC && status == COMPACT_COMPLETE) {
53853e2d
VB
1591 /*
1592 * We think that allocation won't succeed in this zone
1593 * so we defer compaction there. If it ends up
1594 * succeeding after all, it will be reset.
1595 */
1596 defer_compaction(zone, order);
1597 }
1f9efdef
VB
1598
1599 /*
1600 * We might have stopped compacting due to need_resched() in
1601 * async compaction, or due to a fatal signal detected. In that
1602 * case do not try further zones and signal need_resched()
1603 * contention.
1604 */
1605 if ((zone_contended == COMPACT_CONTENDED_SCHED)
1606 || fatal_signal_pending(current)) {
1607 *contended = COMPACT_CONTENDED_SCHED;
1608 goto break_loop;
1609 }
1610
1611 continue;
1612break_loop:
1613 /*
1614 * We might not have tried all the zones, so be conservative
1615 * and assume they are not all lock contended.
1616 */
1617 all_zones_contended = 0;
1618 break;
56de7263
MG
1619 }
1620
1f9efdef
VB
1621 /*
1622 * If at least one zone wasn't deferred or skipped, we report if all
1623 * zones that were tried were lock contended.
1624 */
1625 if (rc > COMPACT_SKIPPED && all_zones_contended)
1626 *contended = COMPACT_CONTENDED_LOCK;
1627
56de7263
MG
1628 return rc;
1629}
1630
1631
76ab0f53 1632/* Compact all zones within a node */
7103f16d 1633static void __compact_pgdat(pg_data_t *pgdat, struct compact_control *cc)
76ab0f53
MG
1634{
1635 int zoneid;
76ab0f53
MG
1636 struct zone *zone;
1637
76ab0f53 1638 for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) {
76ab0f53
MG
1639
1640 zone = &pgdat->node_zones[zoneid];
1641 if (!populated_zone(zone))
1642 continue;
1643
7be62de9
RR
1644 cc->nr_freepages = 0;
1645 cc->nr_migratepages = 0;
1646 cc->zone = zone;
1647 INIT_LIST_HEAD(&cc->freepages);
1648 INIT_LIST_HEAD(&cc->migratepages);
76ab0f53 1649
195b0c60
GK
1650 /*
1651 * When called via /proc/sys/vm/compact_memory
1652 * this makes sure we compact the whole zone regardless of
1653 * cached scanner positions.
1654 */
21c527a3 1655 if (is_via_compact_memory(cc->order))
195b0c60
GK
1656 __reset_isolation_suitable(zone);
1657
21c527a3
YB
1658 if (is_via_compact_memory(cc->order) ||
1659 !compaction_deferred(zone, cc->order))
7be62de9 1660 compact_zone(zone, cc);
76ab0f53 1661
7be62de9
RR
1662 VM_BUG_ON(!list_empty(&cc->freepages));
1663 VM_BUG_ON(!list_empty(&cc->migratepages));
75469345
JK
1664
1665 if (is_via_compact_memory(cc->order))
1666 continue;
1667
1668 if (zone_watermark_ok(zone, cc->order,
1669 low_wmark_pages(zone), 0, 0))
1670 compaction_defer_reset(zone, cc->order, false);
76ab0f53 1671 }
76ab0f53
MG
1672}
1673
7103f16d 1674void compact_pgdat(pg_data_t *pgdat, int order)
7be62de9
RR
1675{
1676 struct compact_control cc = {
1677 .order = order,
e0b9daeb 1678 .mode = MIGRATE_ASYNC,
7be62de9
RR
1679 };
1680
3a7200af
MG
1681 if (!order)
1682 return;
1683
7103f16d 1684 __compact_pgdat(pgdat, &cc);
7be62de9
RR
1685}
1686
7103f16d 1687static void compact_node(int nid)
7be62de9 1688{
7be62de9
RR
1689 struct compact_control cc = {
1690 .order = -1,
e0b9daeb 1691 .mode = MIGRATE_SYNC,
91ca9186 1692 .ignore_skip_hint = true,
7be62de9
RR
1693 };
1694
7103f16d 1695 __compact_pgdat(NODE_DATA(nid), &cc);
7be62de9
RR
1696}
1697
76ab0f53 1698/* Compact all nodes in the system */
7964c06d 1699static void compact_nodes(void)
76ab0f53
MG
1700{
1701 int nid;
1702
8575ec29
HD
1703 /* Flush pending updates to the LRU lists */
1704 lru_add_drain_all();
1705
76ab0f53
MG
1706 for_each_online_node(nid)
1707 compact_node(nid);
76ab0f53
MG
1708}
1709
1710/* The written value is actually unused, all memory is compacted */
1711int sysctl_compact_memory;
1712
fec4eb2c
YB
1713/*
1714 * This is the entry point for compacting all nodes via
1715 * /proc/sys/vm/compact_memory
1716 */
76ab0f53
MG
1717int sysctl_compaction_handler(struct ctl_table *table, int write,
1718 void __user *buffer, size_t *length, loff_t *ppos)
1719{
1720 if (write)
7964c06d 1721 compact_nodes();
76ab0f53
MG
1722
1723 return 0;
1724}
ed4a6d7f 1725
5e771905
MG
1726int sysctl_extfrag_handler(struct ctl_table *table, int write,
1727 void __user *buffer, size_t *length, loff_t *ppos)
1728{
1729 proc_dointvec_minmax(table, write, buffer, length, ppos);
1730
1731 return 0;
1732}
1733
ed4a6d7f 1734#if defined(CONFIG_SYSFS) && defined(CONFIG_NUMA)
74e77fb9 1735static ssize_t sysfs_compact_node(struct device *dev,
10fbcf4c 1736 struct device_attribute *attr,
ed4a6d7f
MG
1737 const char *buf, size_t count)
1738{
8575ec29
HD
1739 int nid = dev->id;
1740
1741 if (nid >= 0 && nid < nr_node_ids && node_online(nid)) {
1742 /* Flush pending updates to the LRU lists */
1743 lru_add_drain_all();
1744
1745 compact_node(nid);
1746 }
ed4a6d7f
MG
1747
1748 return count;
1749}
10fbcf4c 1750static DEVICE_ATTR(compact, S_IWUSR, NULL, sysfs_compact_node);
ed4a6d7f
MG
1751
1752int compaction_register_node(struct node *node)
1753{
10fbcf4c 1754 return device_create_file(&node->dev, &dev_attr_compact);
ed4a6d7f
MG
1755}
1756
1757void compaction_unregister_node(struct node *node)
1758{
10fbcf4c 1759 return device_remove_file(&node->dev, &dev_attr_compact);
ed4a6d7f
MG
1760}
1761#endif /* CONFIG_SYSFS && CONFIG_NUMA */
ff9543fd
MN
1762
1763#endif /* CONFIG_COMPACTION */