]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - mm/compaction.c
Cross Memory Attach: make it Kconfigurable
[mirror_ubuntu-artful-kernel.git] / mm / compaction.c
CommitLineData
748446bb
MG
1/*
2 * linux/mm/compaction.c
3 *
4 * Memory compaction for the reduction of external fragmentation. Note that
5 * this heavily depends upon page migration to do all the real heavy
6 * lifting
7 *
8 * Copyright IBM Corp. 2007-2010 Mel Gorman <mel@csn.ul.ie>
9 */
10#include <linux/swap.h>
11#include <linux/migrate.h>
12#include <linux/compaction.h>
13#include <linux/mm_inline.h>
14#include <linux/backing-dev.h>
76ab0f53 15#include <linux/sysctl.h>
ed4a6d7f 16#include <linux/sysfs.h>
748446bb
MG
17#include "internal.h"
18
ff9543fd
MN
19#if defined CONFIG_COMPACTION || defined CONFIG_CMA
20
b7aba698
MG
21#define CREATE_TRACE_POINTS
22#include <trace/events/compaction.h>
23
748446bb
MG
24static unsigned long release_freepages(struct list_head *freelist)
25{
26 struct page *page, *next;
27 unsigned long count = 0;
28
29 list_for_each_entry_safe(page, next, freelist, lru) {
30 list_del(&page->lru);
31 __free_page(page);
32 count++;
33 }
34
35 return count;
36}
37
ff9543fd
MN
38static void map_pages(struct list_head *list)
39{
40 struct page *page;
41
42 list_for_each_entry(page, list, lru) {
43 arch_alloc_page(page, 0);
44 kernel_map_pages(page, 1, 1);
45 }
46}
47
47118af0
MN
48static inline bool migrate_async_suitable(int migratetype)
49{
50 return is_migrate_cma(migratetype) || migratetype == MIGRATE_MOVABLE;
51}
52
85aa125f
MN
53/*
54 * Isolate free pages onto a private freelist. Caller must hold zone->lock.
55 * If @strict is true, will abort returning 0 on any invalid PFNs or non-free
56 * pages inside of the pageblock (even though it may still end up isolating
57 * some pages).
58 */
59static unsigned long isolate_freepages_block(unsigned long blockpfn,
60 unsigned long end_pfn,
61 struct list_head *freelist,
62 bool strict)
748446bb 63{
b7aba698 64 int nr_scanned = 0, total_isolated = 0;
748446bb
MG
65 struct page *cursor;
66
748446bb
MG
67 cursor = pfn_to_page(blockpfn);
68
69 /* Isolate free pages. This assumes the block is valid */
70 for (; blockpfn < end_pfn; blockpfn++, cursor++) {
71 int isolated, i;
72 struct page *page = cursor;
73
85aa125f
MN
74 if (!pfn_valid_within(blockpfn)) {
75 if (strict)
76 return 0;
748446bb 77 continue;
85aa125f 78 }
b7aba698 79 nr_scanned++;
748446bb 80
85aa125f
MN
81 if (!PageBuddy(page)) {
82 if (strict)
83 return 0;
748446bb 84 continue;
85aa125f 85 }
748446bb
MG
86
87 /* Found a free page, break it into order-0 pages */
88 isolated = split_free_page(page);
85aa125f
MN
89 if (!isolated && strict)
90 return 0;
748446bb
MG
91 total_isolated += isolated;
92 for (i = 0; i < isolated; i++) {
93 list_add(&page->lru, freelist);
94 page++;
95 }
96
97 /* If a page was split, advance to the end of it */
98 if (isolated) {
99 blockpfn += isolated - 1;
100 cursor += isolated - 1;
101 }
102 }
103
b7aba698 104 trace_mm_compaction_isolate_freepages(nr_scanned, total_isolated);
748446bb
MG
105 return total_isolated;
106}
107
85aa125f
MN
108/**
109 * isolate_freepages_range() - isolate free pages.
110 * @start_pfn: The first PFN to start isolating.
111 * @end_pfn: The one-past-last PFN.
112 *
113 * Non-free pages, invalid PFNs, or zone boundaries within the
114 * [start_pfn, end_pfn) range are considered errors, cause function to
115 * undo its actions and return zero.
116 *
117 * Otherwise, function returns one-past-the-last PFN of isolated page
118 * (which may be greater then end_pfn if end fell in a middle of
119 * a free page).
120 */
ff9543fd 121unsigned long
85aa125f
MN
122isolate_freepages_range(unsigned long start_pfn, unsigned long end_pfn)
123{
124 unsigned long isolated, pfn, block_end_pfn, flags;
125 struct zone *zone = NULL;
126 LIST_HEAD(freelist);
127
128 if (pfn_valid(start_pfn))
129 zone = page_zone(pfn_to_page(start_pfn));
130
131 for (pfn = start_pfn; pfn < end_pfn; pfn += isolated) {
132 if (!pfn_valid(pfn) || zone != page_zone(pfn_to_page(pfn)))
133 break;
134
135 /*
136 * On subsequent iterations ALIGN() is actually not needed,
137 * but we keep it that we not to complicate the code.
138 */
139 block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);
140 block_end_pfn = min(block_end_pfn, end_pfn);
141
142 spin_lock_irqsave(&zone->lock, flags);
143 isolated = isolate_freepages_block(pfn, block_end_pfn,
144 &freelist, true);
145 spin_unlock_irqrestore(&zone->lock, flags);
146
147 /*
148 * In strict mode, isolate_freepages_block() returns 0 if
149 * there are any holes in the block (ie. invalid PFNs or
150 * non-free pages).
151 */
152 if (!isolated)
153 break;
154
155 /*
156 * If we managed to isolate pages, it is always (1 << n) *
157 * pageblock_nr_pages for some non-negative n. (Max order
158 * page may span two pageblocks).
159 */
160 }
161
162 /* split_free_page does not map the pages */
163 map_pages(&freelist);
164
165 if (pfn < end_pfn) {
166 /* Loop terminated early, cleanup. */
167 release_freepages(&freelist);
168 return 0;
169 }
170
171 /* We don't use freelists for anything. */
172 return pfn;
173}
174
748446bb
MG
175/* Update the number of anon and file isolated pages in the zone */
176static void acct_isolated(struct zone *zone, struct compact_control *cc)
177{
178 struct page *page;
b9e84ac1 179 unsigned int count[2] = { 0, };
748446bb 180
b9e84ac1
MK
181 list_for_each_entry(page, &cc->migratepages, lru)
182 count[!!page_is_file_cache(page)]++;
748446bb 183
b9e84ac1
MK
184 __mod_zone_page_state(zone, NR_ISOLATED_ANON, count[0]);
185 __mod_zone_page_state(zone, NR_ISOLATED_FILE, count[1]);
748446bb
MG
186}
187
188/* Similar to reclaim, but different enough that they don't share logic */
189static bool too_many_isolated(struct zone *zone)
190{
bc693045 191 unsigned long active, inactive, isolated;
748446bb
MG
192
193 inactive = zone_page_state(zone, NR_INACTIVE_FILE) +
194 zone_page_state(zone, NR_INACTIVE_ANON);
bc693045
MK
195 active = zone_page_state(zone, NR_ACTIVE_FILE) +
196 zone_page_state(zone, NR_ACTIVE_ANON);
748446bb
MG
197 isolated = zone_page_state(zone, NR_ISOLATED_FILE) +
198 zone_page_state(zone, NR_ISOLATED_ANON);
199
bc693045 200 return isolated > (inactive + active) / 2;
748446bb
MG
201}
202
2fe86e00
MN
203/**
204 * isolate_migratepages_range() - isolate all migrate-able pages in range.
205 * @zone: Zone pages are in.
206 * @cc: Compaction control structure.
207 * @low_pfn: The first PFN of the range.
208 * @end_pfn: The one-past-the-last PFN of the range.
209 *
210 * Isolate all pages that can be migrated from the range specified by
211 * [low_pfn, end_pfn). Returns zero if there is a fatal signal
212 * pending), otherwise PFN of the first page that was not scanned
213 * (which may be both less, equal to or more then end_pfn).
214 *
215 * Assumes that cc->migratepages is empty and cc->nr_migratepages is
216 * zero.
217 *
218 * Apart from cc->migratepages and cc->nr_migratetypes this function
219 * does not modify any cc's fields, in particular it does not modify
220 * (or read for that matter) cc->migrate_pfn.
748446bb 221 */
ff9543fd 222unsigned long
2fe86e00
MN
223isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
224 unsigned long low_pfn, unsigned long end_pfn)
748446bb 225{
9927af74 226 unsigned long last_pageblock_nr = 0, pageblock_nr;
b7aba698 227 unsigned long nr_scanned = 0, nr_isolated = 0;
748446bb 228 struct list_head *migratelist = &cc->migratepages;
39deaf85 229 isolate_mode_t mode = ISOLATE_ACTIVE|ISOLATE_INACTIVE;
748446bb 230
748446bb
MG
231 /*
232 * Ensure that there are not too many pages isolated from the LRU
233 * list by either parallel reclaimers or compaction. If there are,
234 * delay for some time until fewer pages are isolated
235 */
236 while (unlikely(too_many_isolated(zone))) {
f9e35b3b
MG
237 /* async migration should just abort */
238 if (!cc->sync)
2fe86e00 239 return 0;
f9e35b3b 240
748446bb
MG
241 congestion_wait(BLK_RW_ASYNC, HZ/10);
242
243 if (fatal_signal_pending(current))
2fe86e00 244 return 0;
748446bb
MG
245 }
246
247 /* Time to isolate some pages for migration */
b2eef8c0 248 cond_resched();
748446bb
MG
249 spin_lock_irq(&zone->lru_lock);
250 for (; low_pfn < end_pfn; low_pfn++) {
251 struct page *page;
b2eef8c0
AA
252 bool locked = true;
253
254 /* give a chance to irqs before checking need_resched() */
255 if (!((low_pfn+1) % SWAP_CLUSTER_MAX)) {
256 spin_unlock_irq(&zone->lru_lock);
257 locked = false;
258 }
259 if (need_resched() || spin_is_contended(&zone->lru_lock)) {
260 if (locked)
261 spin_unlock_irq(&zone->lru_lock);
262 cond_resched();
263 spin_lock_irq(&zone->lru_lock);
264 if (fatal_signal_pending(current))
265 break;
266 } else if (!locked)
267 spin_lock_irq(&zone->lru_lock);
268
0bf380bc
MG
269 /*
270 * migrate_pfn does not necessarily start aligned to a
271 * pageblock. Ensure that pfn_valid is called when moving
272 * into a new MAX_ORDER_NR_PAGES range in case of large
273 * memory holes within the zone
274 */
275 if ((low_pfn & (MAX_ORDER_NR_PAGES - 1)) == 0) {
276 if (!pfn_valid(low_pfn)) {
277 low_pfn += MAX_ORDER_NR_PAGES - 1;
278 continue;
279 }
280 }
281
748446bb
MG
282 if (!pfn_valid_within(low_pfn))
283 continue;
b7aba698 284 nr_scanned++;
748446bb 285
dc908600
MG
286 /*
287 * Get the page and ensure the page is within the same zone.
288 * See the comment in isolate_freepages about overlapping
289 * nodes. It is deliberate that the new zone lock is not taken
290 * as memory compaction should not move pages between nodes.
291 */
748446bb 292 page = pfn_to_page(low_pfn);
dc908600
MG
293 if (page_zone(page) != zone)
294 continue;
295
296 /* Skip if free */
748446bb
MG
297 if (PageBuddy(page))
298 continue;
299
9927af74
MG
300 /*
301 * For async migration, also only scan in MOVABLE blocks. Async
302 * migration is optimistic to see if the minimum amount of work
303 * satisfies the allocation
304 */
305 pageblock_nr = low_pfn >> pageblock_order;
306 if (!cc->sync && last_pageblock_nr != pageblock_nr &&
47118af0 307 !migrate_async_suitable(get_pageblock_migratetype(page))) {
9927af74
MG
308 low_pfn += pageblock_nr_pages;
309 low_pfn = ALIGN(low_pfn, pageblock_nr_pages) - 1;
310 last_pageblock_nr = pageblock_nr;
311 continue;
312 }
313
bc835011
AA
314 if (!PageLRU(page))
315 continue;
316
317 /*
318 * PageLRU is set, and lru_lock excludes isolation,
319 * splitting and collapsing (collapsing has already
320 * happened if PageLRU is set).
321 */
322 if (PageTransHuge(page)) {
323 low_pfn += (1 << compound_order(page)) - 1;
324 continue;
325 }
326
c8244935
MG
327 if (!cc->sync)
328 mode |= ISOLATE_ASYNC_MIGRATE;
329
748446bb 330 /* Try isolate the page */
39deaf85 331 if (__isolate_lru_page(page, mode, 0) != 0)
748446bb
MG
332 continue;
333
bc835011
AA
334 VM_BUG_ON(PageTransCompound(page));
335
748446bb
MG
336 /* Successfully isolated */
337 del_page_from_lru_list(zone, page, page_lru(page));
338 list_add(&page->lru, migratelist);
748446bb 339 cc->nr_migratepages++;
b7aba698 340 nr_isolated++;
748446bb
MG
341
342 /* Avoid isolating too much */
31b8384a
HD
343 if (cc->nr_migratepages == COMPACT_CLUSTER_MAX) {
344 ++low_pfn;
748446bb 345 break;
31b8384a 346 }
748446bb
MG
347 }
348
349 acct_isolated(zone, cc);
350
351 spin_unlock_irq(&zone->lru_lock);
748446bb 352
b7aba698
MG
353 trace_mm_compaction_isolate_migratepages(nr_scanned, nr_isolated);
354
2fe86e00
MN
355 return low_pfn;
356}
357
ff9543fd
MN
358#endif /* CONFIG_COMPACTION || CONFIG_CMA */
359#ifdef CONFIG_COMPACTION
360
361/* Returns true if the page is within a block suitable for migration to */
362static bool suitable_migration_target(struct page *page)
363{
364
365 int migratetype = get_pageblock_migratetype(page);
366
367 /* Don't interfere with memory hot-remove or the min_free_kbytes blocks */
368 if (migratetype == MIGRATE_ISOLATE || migratetype == MIGRATE_RESERVE)
369 return false;
370
371 /* If the page is a large free page, then allow migration */
372 if (PageBuddy(page) && page_order(page) >= pageblock_order)
373 return true;
374
47118af0
MN
375 /* If the block is MIGRATE_MOVABLE or MIGRATE_CMA, allow migration */
376 if (migrate_async_suitable(migratetype))
ff9543fd
MN
377 return true;
378
379 /* Otherwise skip the block */
380 return false;
381}
382
2fe86e00 383/*
ff9543fd
MN
384 * Based on information in the current compact_control, find blocks
385 * suitable for isolating free pages from and then isolate them.
2fe86e00 386 */
ff9543fd
MN
387static void isolate_freepages(struct zone *zone,
388 struct compact_control *cc)
2fe86e00 389{
ff9543fd
MN
390 struct page *page;
391 unsigned long high_pfn, low_pfn, pfn, zone_end_pfn, end_pfn;
392 unsigned long flags;
393 int nr_freepages = cc->nr_freepages;
394 struct list_head *freelist = &cc->freepages;
2fe86e00 395
ff9543fd
MN
396 /*
397 * Initialise the free scanner. The starting point is where we last
398 * scanned from (or the end of the zone if starting). The low point
399 * is the end of the pageblock the migration scanner is using.
400 */
401 pfn = cc->free_pfn;
402 low_pfn = cc->migrate_pfn + pageblock_nr_pages;
2fe86e00 403
ff9543fd
MN
404 /*
405 * Take care that if the migration scanner is at the end of the zone
406 * that the free scanner does not accidentally move to the next zone
407 * in the next isolation cycle.
408 */
409 high_pfn = min(low_pfn, pfn);
2fe86e00 410
ff9543fd 411 zone_end_pfn = zone->zone_start_pfn + zone->spanned_pages;
2fe86e00 412
ff9543fd
MN
413 /*
414 * Isolate free pages until enough are available to migrate the
415 * pages on cc->migratepages. We stop searching if the migrate
416 * and free page scanners meet or enough free pages are isolated.
417 */
418 for (; pfn > low_pfn && cc->nr_migratepages > nr_freepages;
419 pfn -= pageblock_nr_pages) {
420 unsigned long isolated;
2fe86e00 421
ff9543fd
MN
422 if (!pfn_valid(pfn))
423 continue;
2fe86e00 424
ff9543fd
MN
425 /*
426 * Check for overlapping nodes/zones. It's possible on some
427 * configurations to have a setup like
428 * node0 node1 node0
429 * i.e. it's possible that all pages within a zones range of
430 * pages do not belong to a single zone.
431 */
432 page = pfn_to_page(pfn);
433 if (page_zone(page) != zone)
434 continue;
435
436 /* Check the block is suitable for migration */
437 if (!suitable_migration_target(page))
438 continue;
439
440 /*
441 * Found a block suitable for isolating free pages from. Now
442 * we disabled interrupts, double check things are ok and
443 * isolate the pages. This is to minimise the time IRQs
444 * are disabled
445 */
446 isolated = 0;
447 spin_lock_irqsave(&zone->lock, flags);
448 if (suitable_migration_target(page)) {
449 end_pfn = min(pfn + pageblock_nr_pages, zone_end_pfn);
450 isolated = isolate_freepages_block(pfn, end_pfn,
451 freelist, false);
452 nr_freepages += isolated;
453 }
454 spin_unlock_irqrestore(&zone->lock, flags);
455
456 /*
457 * Record the highest PFN we isolated pages from. When next
458 * looking for free pages, the search will restart here as
459 * page migration may have returned some pages to the allocator
460 */
461 if (isolated)
462 high_pfn = max(high_pfn, pfn);
463 }
464
465 /* split_free_page does not map the pages */
466 map_pages(freelist);
467
468 cc->free_pfn = high_pfn;
469 cc->nr_freepages = nr_freepages;
748446bb
MG
470}
471
472/*
473 * This is a migrate-callback that "allocates" freepages by taking pages
474 * from the isolated freelists in the block we are migrating to.
475 */
476static struct page *compaction_alloc(struct page *migratepage,
477 unsigned long data,
478 int **result)
479{
480 struct compact_control *cc = (struct compact_control *)data;
481 struct page *freepage;
482
483 /* Isolate free pages if necessary */
484 if (list_empty(&cc->freepages)) {
485 isolate_freepages(cc->zone, cc);
486
487 if (list_empty(&cc->freepages))
488 return NULL;
489 }
490
491 freepage = list_entry(cc->freepages.next, struct page, lru);
492 list_del(&freepage->lru);
493 cc->nr_freepages--;
494
495 return freepage;
496}
497
498/*
499 * We cannot control nr_migratepages and nr_freepages fully when migration is
500 * running as migrate_pages() has no knowledge of compact_control. When
501 * migration is complete, we count the number of pages on the lists by hand.
502 */
503static void update_nr_listpages(struct compact_control *cc)
504{
505 int nr_migratepages = 0;
506 int nr_freepages = 0;
507 struct page *page;
508
509 list_for_each_entry(page, &cc->migratepages, lru)
510 nr_migratepages++;
511 list_for_each_entry(page, &cc->freepages, lru)
512 nr_freepages++;
513
514 cc->nr_migratepages = nr_migratepages;
515 cc->nr_freepages = nr_freepages;
516}
517
ff9543fd
MN
518/* possible outcome of isolate_migratepages */
519typedef enum {
520 ISOLATE_ABORT, /* Abort compaction now */
521 ISOLATE_NONE, /* No pages isolated, continue scanning */
522 ISOLATE_SUCCESS, /* Pages isolated, migrate */
523} isolate_migrate_t;
524
525/*
526 * Isolate all pages that can be migrated from the block pointed to by
527 * the migrate scanner within compact_control.
528 */
529static isolate_migrate_t isolate_migratepages(struct zone *zone,
530 struct compact_control *cc)
531{
532 unsigned long low_pfn, end_pfn;
533
534 /* Do not scan outside zone boundaries */
535 low_pfn = max(cc->migrate_pfn, zone->zone_start_pfn);
536
537 /* Only scan within a pageblock boundary */
538 end_pfn = ALIGN(low_pfn + pageblock_nr_pages, pageblock_nr_pages);
539
540 /* Do not cross the free scanner or scan within a memory hole */
541 if (end_pfn > cc->free_pfn || !pfn_valid(low_pfn)) {
542 cc->migrate_pfn = end_pfn;
543 return ISOLATE_NONE;
544 }
545
546 /* Perform the isolation */
547 low_pfn = isolate_migratepages_range(zone, cc, low_pfn, end_pfn);
548 if (!low_pfn)
549 return ISOLATE_ABORT;
550
551 cc->migrate_pfn = low_pfn;
552
553 return ISOLATE_SUCCESS;
554}
555
748446bb 556static int compact_finished(struct zone *zone,
5a03b051 557 struct compact_control *cc)
748446bb 558{
56de7263 559 unsigned int order;
5a03b051 560 unsigned long watermark;
56de7263 561
748446bb
MG
562 if (fatal_signal_pending(current))
563 return COMPACT_PARTIAL;
564
565 /* Compaction run completes if the migrate and free scanner meet */
566 if (cc->free_pfn <= cc->migrate_pfn)
567 return COMPACT_COMPLETE;
568
82478fb7
JW
569 /*
570 * order == -1 is expected when compacting via
571 * /proc/sys/vm/compact_memory
572 */
56de7263
MG
573 if (cc->order == -1)
574 return COMPACT_CONTINUE;
575
3957c776
MH
576 /* Compaction run is not finished if the watermark is not met */
577 watermark = low_wmark_pages(zone);
578 watermark += (1 << cc->order);
579
580 if (!zone_watermark_ok(zone, cc->order, watermark, 0, 0))
581 return COMPACT_CONTINUE;
582
56de7263
MG
583 /* Direct compactor: Is a suitable page free? */
584 for (order = cc->order; order < MAX_ORDER; order++) {
585 /* Job done if page is free of the right migratetype */
586 if (!list_empty(&zone->free_area[order].free_list[cc->migratetype]))
587 return COMPACT_PARTIAL;
588
589 /* Job done if allocation would set block type */
590 if (order >= pageblock_order && zone->free_area[order].nr_free)
591 return COMPACT_PARTIAL;
592 }
593
748446bb
MG
594 return COMPACT_CONTINUE;
595}
596
3e7d3449
MG
597/*
598 * compaction_suitable: Is this suitable to run compaction on this zone now?
599 * Returns
600 * COMPACT_SKIPPED - If there are too few free pages for compaction
601 * COMPACT_PARTIAL - If the allocation would succeed without compaction
602 * COMPACT_CONTINUE - If compaction should run now
603 */
604unsigned long compaction_suitable(struct zone *zone, int order)
605{
606 int fragindex;
607 unsigned long watermark;
608
3957c776
MH
609 /*
610 * order == -1 is expected when compacting via
611 * /proc/sys/vm/compact_memory
612 */
613 if (order == -1)
614 return COMPACT_CONTINUE;
615
3e7d3449
MG
616 /*
617 * Watermarks for order-0 must be met for compaction. Note the 2UL.
618 * This is because during migration, copies of pages need to be
619 * allocated and for a short time, the footprint is higher
620 */
621 watermark = low_wmark_pages(zone) + (2UL << order);
622 if (!zone_watermark_ok(zone, 0, watermark, 0, 0))
623 return COMPACT_SKIPPED;
624
625 /*
626 * fragmentation index determines if allocation failures are due to
627 * low memory or external fragmentation
628 *
a582a738
SL
629 * index of -1000 implies allocations might succeed depending on
630 * watermarks
3e7d3449
MG
631 * index towards 0 implies failure is due to lack of memory
632 * index towards 1000 implies failure is due to fragmentation
633 *
634 * Only compact if a failure would be due to fragmentation.
635 */
636 fragindex = fragmentation_index(zone, order);
637 if (fragindex >= 0 && fragindex <= sysctl_extfrag_threshold)
638 return COMPACT_SKIPPED;
639
a582a738
SL
640 if (fragindex == -1000 && zone_watermark_ok(zone, order, watermark,
641 0, 0))
3e7d3449
MG
642 return COMPACT_PARTIAL;
643
644 return COMPACT_CONTINUE;
645}
646
748446bb
MG
647static int compact_zone(struct zone *zone, struct compact_control *cc)
648{
649 int ret;
650
3e7d3449
MG
651 ret = compaction_suitable(zone, cc->order);
652 switch (ret) {
653 case COMPACT_PARTIAL:
654 case COMPACT_SKIPPED:
655 /* Compaction is likely to fail */
656 return ret;
657 case COMPACT_CONTINUE:
658 /* Fall through to compaction */
659 ;
660 }
661
748446bb
MG
662 /* Setup to move all movable pages to the end of the zone */
663 cc->migrate_pfn = zone->zone_start_pfn;
664 cc->free_pfn = cc->migrate_pfn + zone->spanned_pages;
665 cc->free_pfn &= ~(pageblock_nr_pages-1);
666
667 migrate_prep_local();
668
669 while ((ret = compact_finished(zone, cc)) == COMPACT_CONTINUE) {
670 unsigned long nr_migrate, nr_remaining;
9d502c1c 671 int err;
748446bb 672
f9e35b3b
MG
673 switch (isolate_migratepages(zone, cc)) {
674 case ISOLATE_ABORT:
675 ret = COMPACT_PARTIAL;
676 goto out;
677 case ISOLATE_NONE:
748446bb 678 continue;
f9e35b3b
MG
679 case ISOLATE_SUCCESS:
680 ;
681 }
748446bb
MG
682
683 nr_migrate = cc->nr_migratepages;
9d502c1c 684 err = migrate_pages(&cc->migratepages, compaction_alloc,
7f0f2496 685 (unsigned long)cc, false,
a6bc32b8 686 cc->sync ? MIGRATE_SYNC_LIGHT : MIGRATE_ASYNC);
748446bb
MG
687 update_nr_listpages(cc);
688 nr_remaining = cc->nr_migratepages;
689
690 count_vm_event(COMPACTBLOCKS);
691 count_vm_events(COMPACTPAGES, nr_migrate - nr_remaining);
692 if (nr_remaining)
693 count_vm_events(COMPACTPAGEFAILED, nr_remaining);
b7aba698
MG
694 trace_mm_compaction_migratepages(nr_migrate - nr_remaining,
695 nr_remaining);
748446bb
MG
696
697 /* Release LRU pages not migrated */
9d502c1c 698 if (err) {
748446bb
MG
699 putback_lru_pages(&cc->migratepages);
700 cc->nr_migratepages = 0;
701 }
702
703 }
704
f9e35b3b 705out:
748446bb
MG
706 /* Release free pages and check accounting */
707 cc->nr_freepages -= release_freepages(&cc->freepages);
708 VM_BUG_ON(cc->nr_freepages != 0);
709
710 return ret;
711}
76ab0f53 712
d43a87e6 713static unsigned long compact_zone_order(struct zone *zone,
5a03b051 714 int order, gfp_t gfp_mask,
d527caf2 715 bool sync)
56de7263
MG
716{
717 struct compact_control cc = {
718 .nr_freepages = 0,
719 .nr_migratepages = 0,
720 .order = order,
721 .migratetype = allocflags_to_migratetype(gfp_mask),
722 .zone = zone,
77f1fe6b 723 .sync = sync,
56de7263
MG
724 };
725 INIT_LIST_HEAD(&cc.freepages);
726 INIT_LIST_HEAD(&cc.migratepages);
727
728 return compact_zone(zone, &cc);
729}
730
5e771905
MG
731int sysctl_extfrag_threshold = 500;
732
56de7263
MG
733/**
734 * try_to_compact_pages - Direct compact to satisfy a high-order allocation
735 * @zonelist: The zonelist used for the current allocation
736 * @order: The order of the current allocation
737 * @gfp_mask: The GFP mask of the current allocation
738 * @nodemask: The allowed nodes to allocate from
77f1fe6b 739 * @sync: Whether migration is synchronous or not
56de7263
MG
740 *
741 * This is the main entry point for direct page compaction.
742 */
743unsigned long try_to_compact_pages(struct zonelist *zonelist,
77f1fe6b
MG
744 int order, gfp_t gfp_mask, nodemask_t *nodemask,
745 bool sync)
56de7263
MG
746{
747 enum zone_type high_zoneidx = gfp_zone(gfp_mask);
748 int may_enter_fs = gfp_mask & __GFP_FS;
749 int may_perform_io = gfp_mask & __GFP_IO;
56de7263
MG
750 struct zoneref *z;
751 struct zone *zone;
752 int rc = COMPACT_SKIPPED;
753
754 /*
755 * Check whether it is worth even starting compaction. The order check is
756 * made because an assumption is made that the page allocator can satisfy
757 * the "cheaper" orders without taking special steps
758 */
c5a73c3d 759 if (!order || !may_enter_fs || !may_perform_io)
56de7263
MG
760 return rc;
761
762 count_vm_event(COMPACTSTALL);
763
764 /* Compact each zone in the list */
765 for_each_zone_zonelist_nodemask(zone, z, zonelist, high_zoneidx,
766 nodemask) {
56de7263
MG
767 int status;
768
d527caf2 769 status = compact_zone_order(zone, order, gfp_mask, sync);
56de7263
MG
770 rc = max(status, rc);
771
3e7d3449
MG
772 /* If a normal allocation would succeed, stop compacting */
773 if (zone_watermark_ok(zone, order, low_wmark_pages(zone), 0, 0))
56de7263
MG
774 break;
775 }
776
777 return rc;
778}
779
780
76ab0f53 781/* Compact all zones within a node */
7be62de9 782static int __compact_pgdat(pg_data_t *pgdat, struct compact_control *cc)
76ab0f53
MG
783{
784 int zoneid;
76ab0f53
MG
785 struct zone *zone;
786
76ab0f53 787 for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) {
76ab0f53
MG
788
789 zone = &pgdat->node_zones[zoneid];
790 if (!populated_zone(zone))
791 continue;
792
7be62de9
RR
793 cc->nr_freepages = 0;
794 cc->nr_migratepages = 0;
795 cc->zone = zone;
796 INIT_LIST_HEAD(&cc->freepages);
797 INIT_LIST_HEAD(&cc->migratepages);
76ab0f53 798
aad6ec37 799 if (cc->order == -1 || !compaction_deferred(zone, cc->order))
7be62de9 800 compact_zone(zone, cc);
76ab0f53 801
aff62249
RR
802 if (cc->order > 0) {
803 int ok = zone_watermark_ok(zone, cc->order,
804 low_wmark_pages(zone), 0, 0);
805 if (ok && cc->order > zone->compact_order_failed)
806 zone->compact_order_failed = cc->order + 1;
807 /* Currently async compaction is never deferred. */
808 else if (!ok && cc->sync)
809 defer_compaction(zone, cc->order);
810 }
811
7be62de9
RR
812 VM_BUG_ON(!list_empty(&cc->freepages));
813 VM_BUG_ON(!list_empty(&cc->migratepages));
76ab0f53
MG
814 }
815
816 return 0;
817}
818
7be62de9
RR
819int compact_pgdat(pg_data_t *pgdat, int order)
820{
821 struct compact_control cc = {
822 .order = order,
823 .sync = false,
824 };
825
826 return __compact_pgdat(pgdat, &cc);
827}
828
829static int compact_node(int nid)
830{
7be62de9
RR
831 struct compact_control cc = {
832 .order = -1,
833 .sync = true,
834 };
835
8575ec29 836 return __compact_pgdat(NODE_DATA(nid), &cc);
7be62de9
RR
837}
838
76ab0f53
MG
839/* Compact all nodes in the system */
840static int compact_nodes(void)
841{
842 int nid;
843
8575ec29
HD
844 /* Flush pending updates to the LRU lists */
845 lru_add_drain_all();
846
76ab0f53
MG
847 for_each_online_node(nid)
848 compact_node(nid);
849
850 return COMPACT_COMPLETE;
851}
852
853/* The written value is actually unused, all memory is compacted */
854int sysctl_compact_memory;
855
856/* This is the entry point for compacting all nodes via /proc/sys/vm */
857int sysctl_compaction_handler(struct ctl_table *table, int write,
858 void __user *buffer, size_t *length, loff_t *ppos)
859{
860 if (write)
861 return compact_nodes();
862
863 return 0;
864}
ed4a6d7f 865
5e771905
MG
866int sysctl_extfrag_handler(struct ctl_table *table, int write,
867 void __user *buffer, size_t *length, loff_t *ppos)
868{
869 proc_dointvec_minmax(table, write, buffer, length, ppos);
870
871 return 0;
872}
873
ed4a6d7f 874#if defined(CONFIG_SYSFS) && defined(CONFIG_NUMA)
10fbcf4c
KS
875ssize_t sysfs_compact_node(struct device *dev,
876 struct device_attribute *attr,
ed4a6d7f
MG
877 const char *buf, size_t count)
878{
8575ec29
HD
879 int nid = dev->id;
880
881 if (nid >= 0 && nid < nr_node_ids && node_online(nid)) {
882 /* Flush pending updates to the LRU lists */
883 lru_add_drain_all();
884
885 compact_node(nid);
886 }
ed4a6d7f
MG
887
888 return count;
889}
10fbcf4c 890static DEVICE_ATTR(compact, S_IWUSR, NULL, sysfs_compact_node);
ed4a6d7f
MG
891
892int compaction_register_node(struct node *node)
893{
10fbcf4c 894 return device_create_file(&node->dev, &dev_attr_compact);
ed4a6d7f
MG
895}
896
897void compaction_unregister_node(struct node *node)
898{
10fbcf4c 899 return device_remove_file(&node->dev, &dev_attr_compact);
ed4a6d7f
MG
900}
901#endif /* CONFIG_SYSFS && CONFIG_NUMA */
ff9543fd
MN
902
903#endif /* CONFIG_COMPACTION */