]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - mm/compaction.c
mm/compaction: clean up unused code lines
[mirror_ubuntu-bionic-kernel.git] / mm / compaction.c
CommitLineData
748446bb
MG
1/*
2 * linux/mm/compaction.c
3 *
4 * Memory compaction for the reduction of external fragmentation. Note that
5 * this heavily depends upon page migration to do all the real heavy
6 * lifting
7 *
8 * Copyright IBM Corp. 2007-2010 Mel Gorman <mel@csn.ul.ie>
9 */
10#include <linux/swap.h>
11#include <linux/migrate.h>
12#include <linux/compaction.h>
13#include <linux/mm_inline.h>
14#include <linux/backing-dev.h>
76ab0f53 15#include <linux/sysctl.h>
ed4a6d7f 16#include <linux/sysfs.h>
bf6bddf1 17#include <linux/balloon_compaction.h>
194159fb 18#include <linux/page-isolation.h>
748446bb
MG
19#include "internal.h"
20
010fc29a
MK
21#ifdef CONFIG_COMPACTION
22static inline void count_compact_event(enum vm_event_item item)
23{
24 count_vm_event(item);
25}
26
27static inline void count_compact_events(enum vm_event_item item, long delta)
28{
29 count_vm_events(item, delta);
30}
31#else
32#define count_compact_event(item) do { } while (0)
33#define count_compact_events(item, delta) do { } while (0)
34#endif
35
ff9543fd
MN
36#if defined CONFIG_COMPACTION || defined CONFIG_CMA
37
b7aba698
MG
38#define CREATE_TRACE_POINTS
39#include <trace/events/compaction.h>
40
748446bb
MG
41static unsigned long release_freepages(struct list_head *freelist)
42{
43 struct page *page, *next;
44 unsigned long count = 0;
45
46 list_for_each_entry_safe(page, next, freelist, lru) {
47 list_del(&page->lru);
48 __free_page(page);
49 count++;
50 }
51
52 return count;
53}
54
ff9543fd
MN
55static void map_pages(struct list_head *list)
56{
57 struct page *page;
58
59 list_for_each_entry(page, list, lru) {
60 arch_alloc_page(page, 0);
61 kernel_map_pages(page, 1, 1);
62 }
63}
64
47118af0
MN
65static inline bool migrate_async_suitable(int migratetype)
66{
67 return is_migrate_cma(migratetype) || migratetype == MIGRATE_MOVABLE;
68}
69
bb13ffeb
MG
70#ifdef CONFIG_COMPACTION
71/* Returns true if the pageblock should be scanned for pages to isolate. */
72static inline bool isolation_suitable(struct compact_control *cc,
73 struct page *page)
74{
75 if (cc->ignore_skip_hint)
76 return true;
77
78 return !get_pageblock_skip(page);
79}
80
81/*
82 * This function is called to clear all cached information on pageblocks that
83 * should be skipped for page isolation when the migrate and free page scanner
84 * meet.
85 */
62997027 86static void __reset_isolation_suitable(struct zone *zone)
bb13ffeb
MG
87{
88 unsigned long start_pfn = zone->zone_start_pfn;
108bcc96 89 unsigned long end_pfn = zone_end_pfn(zone);
bb13ffeb
MG
90 unsigned long pfn;
91
c89511ab
MG
92 zone->compact_cached_migrate_pfn = start_pfn;
93 zone->compact_cached_free_pfn = end_pfn;
62997027 94 zone->compact_blockskip_flush = false;
bb13ffeb
MG
95
96 /* Walk the zone and mark every pageblock as suitable for isolation */
97 for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
98 struct page *page;
99
100 cond_resched();
101
102 if (!pfn_valid(pfn))
103 continue;
104
105 page = pfn_to_page(pfn);
106 if (zone != page_zone(page))
107 continue;
108
109 clear_pageblock_skip(page);
110 }
111}
112
62997027
MG
113void reset_isolation_suitable(pg_data_t *pgdat)
114{
115 int zoneid;
116
117 for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) {
118 struct zone *zone = &pgdat->node_zones[zoneid];
119 if (!populated_zone(zone))
120 continue;
121
122 /* Only flush if a full compaction finished recently */
123 if (zone->compact_blockskip_flush)
124 __reset_isolation_suitable(zone);
125 }
126}
127
bb13ffeb
MG
128/*
129 * If no pages were isolated then mark this pageblock to be skipped in the
62997027 130 * future. The information is later cleared by __reset_isolation_suitable().
bb13ffeb 131 */
c89511ab
MG
132static void update_pageblock_skip(struct compact_control *cc,
133 struct page *page, unsigned long nr_isolated,
134 bool migrate_scanner)
bb13ffeb 135{
c89511ab 136 struct zone *zone = cc->zone;
6815bf3f
JK
137
138 if (cc->ignore_skip_hint)
139 return;
140
bb13ffeb
MG
141 if (!page)
142 return;
143
c89511ab
MG
144 if (!nr_isolated) {
145 unsigned long pfn = page_to_pfn(page);
bb13ffeb 146 set_pageblock_skip(page);
c89511ab
MG
147
148 /* Update where compaction should restart */
149 if (migrate_scanner) {
150 if (!cc->finished_update_migrate &&
151 pfn > zone->compact_cached_migrate_pfn)
152 zone->compact_cached_migrate_pfn = pfn;
153 } else {
154 if (!cc->finished_update_free &&
155 pfn < zone->compact_cached_free_pfn)
156 zone->compact_cached_free_pfn = pfn;
157 }
158 }
bb13ffeb
MG
159}
160#else
161static inline bool isolation_suitable(struct compact_control *cc,
162 struct page *page)
163{
164 return true;
165}
166
c89511ab
MG
167static void update_pageblock_skip(struct compact_control *cc,
168 struct page *page, unsigned long nr_isolated,
169 bool migrate_scanner)
bb13ffeb
MG
170{
171}
172#endif /* CONFIG_COMPACTION */
173
2a1402aa
MG
174static inline bool should_release_lock(spinlock_t *lock)
175{
176 return need_resched() || spin_is_contended(lock);
177}
178
c67fe375
MG
179/*
180 * Compaction requires the taking of some coarse locks that are potentially
181 * very heavily contended. Check if the process needs to be scheduled or
182 * if the lock is contended. For async compaction, back out in the event
183 * if contention is severe. For sync compaction, schedule.
184 *
185 * Returns true if the lock is held.
186 * Returns false if the lock is released and compaction should abort
187 */
188static bool compact_checklock_irqsave(spinlock_t *lock, unsigned long *flags,
189 bool locked, struct compact_control *cc)
190{
2a1402aa 191 if (should_release_lock(lock)) {
c67fe375
MG
192 if (locked) {
193 spin_unlock_irqrestore(lock, *flags);
194 locked = false;
195 }
196
197 /* async aborts if taking too long or contended */
198 if (!cc->sync) {
e64c5237 199 cc->contended = true;
c67fe375
MG
200 return false;
201 }
202
203 cond_resched();
c67fe375
MG
204 }
205
206 if (!locked)
207 spin_lock_irqsave(lock, *flags);
208 return true;
209}
210
f40d1e42
MG
211/* Returns true if the page is within a block suitable for migration to */
212static bool suitable_migration_target(struct page *page)
213{
7d348b9e 214 /* If the page is a large free page, then disallow migration */
f40d1e42 215 if (PageBuddy(page) && page_order(page) >= pageblock_order)
7d348b9e 216 return false;
f40d1e42
MG
217
218 /* If the block is MIGRATE_MOVABLE or MIGRATE_CMA, allow migration */
7d348b9e 219 if (migrate_async_suitable(get_pageblock_migratetype(page)))
f40d1e42
MG
220 return true;
221
222 /* Otherwise skip the block */
223 return false;
224}
225
85aa125f 226/*
9e4be470
JM
227 * Isolate free pages onto a private freelist. If @strict is true, will abort
228 * returning 0 on any invalid PFNs or non-free pages inside of the pageblock
229 * (even though it may still end up isolating some pages).
85aa125f 230 */
f40d1e42
MG
231static unsigned long isolate_freepages_block(struct compact_control *cc,
232 unsigned long blockpfn,
85aa125f
MN
233 unsigned long end_pfn,
234 struct list_head *freelist,
235 bool strict)
748446bb 236{
b7aba698 237 int nr_scanned = 0, total_isolated = 0;
bb13ffeb 238 struct page *cursor, *valid_page = NULL;
f40d1e42
MG
239 unsigned long flags;
240 bool locked = false;
01ead534 241 bool checked_pageblock = false;
748446bb 242
748446bb
MG
243 cursor = pfn_to_page(blockpfn);
244
f40d1e42 245 /* Isolate free pages. */
748446bb
MG
246 for (; blockpfn < end_pfn; blockpfn++, cursor++) {
247 int isolated, i;
248 struct page *page = cursor;
249
b7aba698 250 nr_scanned++;
f40d1e42 251 if (!pfn_valid_within(blockpfn))
2af120bc
LA
252 goto isolate_fail;
253
bb13ffeb
MG
254 if (!valid_page)
255 valid_page = page;
f40d1e42 256 if (!PageBuddy(page))
2af120bc 257 goto isolate_fail;
f40d1e42
MG
258
259 /*
260 * The zone lock must be held to isolate freepages.
261 * Unfortunately this is a very coarse lock and can be
262 * heavily contended if there are parallel allocations
263 * or parallel compactions. For async compaction do not
264 * spin on the lock and we acquire the lock as late as
265 * possible.
266 */
267 locked = compact_checklock_irqsave(&cc->zone->lock, &flags,
268 locked, cc);
269 if (!locked)
270 break;
271
272 /* Recheck this is a suitable migration target under lock */
01ead534
JK
273 if (!strict && !checked_pageblock) {
274 /*
275 * We need to check suitability of pageblock only once
276 * and this isolate_freepages_block() is called with
277 * pageblock range, so just check once is sufficient.
278 */
279 checked_pageblock = true;
280 if (!suitable_migration_target(page))
281 break;
282 }
748446bb 283
f40d1e42
MG
284 /* Recheck this is a buddy page under lock */
285 if (!PageBuddy(page))
2af120bc 286 goto isolate_fail;
748446bb
MG
287
288 /* Found a free page, break it into order-0 pages */
289 isolated = split_free_page(page);
290 total_isolated += isolated;
291 for (i = 0; i < isolated; i++) {
292 list_add(&page->lru, freelist);
293 page++;
294 }
295
296 /* If a page was split, advance to the end of it */
297 if (isolated) {
298 blockpfn += isolated - 1;
299 cursor += isolated - 1;
2af120bc 300 continue;
748446bb 301 }
2af120bc
LA
302
303isolate_fail:
304 if (strict)
305 break;
306 else
307 continue;
308
748446bb
MG
309 }
310
b7aba698 311 trace_mm_compaction_isolate_freepages(nr_scanned, total_isolated);
f40d1e42
MG
312
313 /*
314 * If strict isolation is requested by CMA then check that all the
315 * pages requested were isolated. If there were any failures, 0 is
316 * returned and CMA will fail.
317 */
2af120bc 318 if (strict && blockpfn < end_pfn)
f40d1e42
MG
319 total_isolated = 0;
320
321 if (locked)
322 spin_unlock_irqrestore(&cc->zone->lock, flags);
323
bb13ffeb
MG
324 /* Update the pageblock-skip if the whole pageblock was scanned */
325 if (blockpfn == end_pfn)
c89511ab 326 update_pageblock_skip(cc, valid_page, total_isolated, false);
bb13ffeb 327
010fc29a 328 count_compact_events(COMPACTFREE_SCANNED, nr_scanned);
397487db 329 if (total_isolated)
010fc29a 330 count_compact_events(COMPACTISOLATED, total_isolated);
748446bb
MG
331 return total_isolated;
332}
333
85aa125f
MN
334/**
335 * isolate_freepages_range() - isolate free pages.
336 * @start_pfn: The first PFN to start isolating.
337 * @end_pfn: The one-past-last PFN.
338 *
339 * Non-free pages, invalid PFNs, or zone boundaries within the
340 * [start_pfn, end_pfn) range are considered errors, cause function to
341 * undo its actions and return zero.
342 *
343 * Otherwise, function returns one-past-the-last PFN of isolated page
344 * (which may be greater then end_pfn if end fell in a middle of
345 * a free page).
346 */
ff9543fd 347unsigned long
bb13ffeb
MG
348isolate_freepages_range(struct compact_control *cc,
349 unsigned long start_pfn, unsigned long end_pfn)
85aa125f 350{
f40d1e42 351 unsigned long isolated, pfn, block_end_pfn;
85aa125f
MN
352 LIST_HEAD(freelist);
353
85aa125f 354 for (pfn = start_pfn; pfn < end_pfn; pfn += isolated) {
bb13ffeb 355 if (!pfn_valid(pfn) || cc->zone != page_zone(pfn_to_page(pfn)))
85aa125f
MN
356 break;
357
358 /*
359 * On subsequent iterations ALIGN() is actually not needed,
360 * but we keep it that we not to complicate the code.
361 */
362 block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);
363 block_end_pfn = min(block_end_pfn, end_pfn);
364
bb13ffeb 365 isolated = isolate_freepages_block(cc, pfn, block_end_pfn,
85aa125f 366 &freelist, true);
85aa125f
MN
367
368 /*
369 * In strict mode, isolate_freepages_block() returns 0 if
370 * there are any holes in the block (ie. invalid PFNs or
371 * non-free pages).
372 */
373 if (!isolated)
374 break;
375
376 /*
377 * If we managed to isolate pages, it is always (1 << n) *
378 * pageblock_nr_pages for some non-negative n. (Max order
379 * page may span two pageblocks).
380 */
381 }
382
383 /* split_free_page does not map the pages */
384 map_pages(&freelist);
385
386 if (pfn < end_pfn) {
387 /* Loop terminated early, cleanup. */
388 release_freepages(&freelist);
389 return 0;
390 }
391
392 /* We don't use freelists for anything. */
393 return pfn;
394}
395
748446bb 396/* Update the number of anon and file isolated pages in the zone */
c67fe375 397static void acct_isolated(struct zone *zone, bool locked, struct compact_control *cc)
748446bb
MG
398{
399 struct page *page;
b9e84ac1 400 unsigned int count[2] = { 0, };
748446bb 401
b9e84ac1
MK
402 list_for_each_entry(page, &cc->migratepages, lru)
403 count[!!page_is_file_cache(page)]++;
748446bb 404
c67fe375
MG
405 /* If locked we can use the interrupt unsafe versions */
406 if (locked) {
407 __mod_zone_page_state(zone, NR_ISOLATED_ANON, count[0]);
408 __mod_zone_page_state(zone, NR_ISOLATED_FILE, count[1]);
409 } else {
410 mod_zone_page_state(zone, NR_ISOLATED_ANON, count[0]);
411 mod_zone_page_state(zone, NR_ISOLATED_FILE, count[1]);
412 }
748446bb
MG
413}
414
415/* Similar to reclaim, but different enough that they don't share logic */
416static bool too_many_isolated(struct zone *zone)
417{
bc693045 418 unsigned long active, inactive, isolated;
748446bb
MG
419
420 inactive = zone_page_state(zone, NR_INACTIVE_FILE) +
421 zone_page_state(zone, NR_INACTIVE_ANON);
bc693045
MK
422 active = zone_page_state(zone, NR_ACTIVE_FILE) +
423 zone_page_state(zone, NR_ACTIVE_ANON);
748446bb
MG
424 isolated = zone_page_state(zone, NR_ISOLATED_FILE) +
425 zone_page_state(zone, NR_ISOLATED_ANON);
426
bc693045 427 return isolated > (inactive + active) / 2;
748446bb
MG
428}
429
2fe86e00
MN
430/**
431 * isolate_migratepages_range() - isolate all migrate-able pages in range.
432 * @zone: Zone pages are in.
433 * @cc: Compaction control structure.
434 * @low_pfn: The first PFN of the range.
435 * @end_pfn: The one-past-the-last PFN of the range.
e46a2879 436 * @unevictable: true if it allows to isolate unevictable pages
2fe86e00
MN
437 *
438 * Isolate all pages that can be migrated from the range specified by
439 * [low_pfn, end_pfn). Returns zero if there is a fatal signal
440 * pending), otherwise PFN of the first page that was not scanned
441 * (which may be both less, equal to or more then end_pfn).
442 *
443 * Assumes that cc->migratepages is empty and cc->nr_migratepages is
444 * zero.
445 *
446 * Apart from cc->migratepages and cc->nr_migratetypes this function
447 * does not modify any cc's fields, in particular it does not modify
448 * (or read for that matter) cc->migrate_pfn.
748446bb 449 */
ff9543fd 450unsigned long
2fe86e00 451isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
e46a2879 452 unsigned long low_pfn, unsigned long end_pfn, bool unevictable)
748446bb 453{
9927af74 454 unsigned long last_pageblock_nr = 0, pageblock_nr;
b7aba698 455 unsigned long nr_scanned = 0, nr_isolated = 0;
748446bb 456 struct list_head *migratelist = &cc->migratepages;
fa9add64 457 struct lruvec *lruvec;
c67fe375 458 unsigned long flags;
2a1402aa 459 bool locked = false;
bb13ffeb 460 struct page *page = NULL, *valid_page = NULL;
50b5b094 461 bool skipped_async_unsuitable = false;
da1c67a7
DR
462 const isolate_mode_t mode = (!cc->sync ? ISOLATE_ASYNC_MIGRATE : 0) |
463 (unevictable ? ISOLATE_UNEVICTABLE : 0);
748446bb 464
748446bb
MG
465 /*
466 * Ensure that there are not too many pages isolated from the LRU
467 * list by either parallel reclaimers or compaction. If there are,
468 * delay for some time until fewer pages are isolated
469 */
470 while (unlikely(too_many_isolated(zone))) {
f9e35b3b 471 /* async migration should just abort */
68e3e926 472 if (!cc->sync)
2fe86e00 473 return 0;
f9e35b3b 474
748446bb
MG
475 congestion_wait(BLK_RW_ASYNC, HZ/10);
476
477 if (fatal_signal_pending(current))
2fe86e00 478 return 0;
748446bb
MG
479 }
480
481 /* Time to isolate some pages for migration */
b2eef8c0 482 cond_resched();
748446bb 483 for (; low_pfn < end_pfn; low_pfn++) {
b2eef8c0 484 /* give a chance to irqs before checking need_resched() */
be1aa03b 485 if (locked && !(low_pfn % SWAP_CLUSTER_MAX)) {
2a1402aa
MG
486 if (should_release_lock(&zone->lru_lock)) {
487 spin_unlock_irqrestore(&zone->lru_lock, flags);
488 locked = false;
489 }
b2eef8c0 490 }
c67fe375 491
0bf380bc
MG
492 /*
493 * migrate_pfn does not necessarily start aligned to a
494 * pageblock. Ensure that pfn_valid is called when moving
495 * into a new MAX_ORDER_NR_PAGES range in case of large
496 * memory holes within the zone
497 */
498 if ((low_pfn & (MAX_ORDER_NR_PAGES - 1)) == 0) {
499 if (!pfn_valid(low_pfn)) {
500 low_pfn += MAX_ORDER_NR_PAGES - 1;
501 continue;
502 }
503 }
504
748446bb
MG
505 if (!pfn_valid_within(low_pfn))
506 continue;
b7aba698 507 nr_scanned++;
748446bb 508
dc908600
MG
509 /*
510 * Get the page and ensure the page is within the same zone.
511 * See the comment in isolate_freepages about overlapping
512 * nodes. It is deliberate that the new zone lock is not taken
513 * as memory compaction should not move pages between nodes.
514 */
748446bb 515 page = pfn_to_page(low_pfn);
dc908600
MG
516 if (page_zone(page) != zone)
517 continue;
518
bb13ffeb
MG
519 if (!valid_page)
520 valid_page = page;
521
522 /* If isolation recently failed, do not retry */
523 pageblock_nr = low_pfn >> pageblock_order;
c122b208
JK
524 if (last_pageblock_nr != pageblock_nr) {
525 int mt;
526
527 last_pageblock_nr = pageblock_nr;
528 if (!isolation_suitable(cc, page))
529 goto next_pageblock;
530
531 /*
532 * For async migration, also only scan in MOVABLE
533 * blocks. Async migration is optimistic to see if
534 * the minimum amount of work satisfies the allocation
535 */
536 mt = get_pageblock_migratetype(page);
537 if (!cc->sync && !migrate_async_suitable(mt)) {
538 cc->finished_update_migrate = true;
539 skipped_async_unsuitable = true;
540 goto next_pageblock;
541 }
542 }
bb13ffeb 543
6c14466c
MG
544 /*
545 * Skip if free. page_order cannot be used without zone->lock
546 * as nothing prevents parallel allocations or buddy merging.
547 */
748446bb
MG
548 if (PageBuddy(page))
549 continue;
550
bf6bddf1
RA
551 /*
552 * Check may be lockless but that's ok as we recheck later.
553 * It's possible to migrate LRU pages and balloon pages
554 * Skip any other type of page
555 */
556 if (!PageLRU(page)) {
557 if (unlikely(balloon_page_movable(page))) {
558 if (locked && balloon_page_isolate(page)) {
559 /* Successfully isolated */
b6c75016 560 goto isolate_success;
bf6bddf1
RA
561 }
562 }
bc835011 563 continue;
bf6bddf1 564 }
bc835011
AA
565
566 /*
2a1402aa
MG
567 * PageLRU is set. lru_lock normally excludes isolation
568 * splitting and collapsing (collapsing has already happened
569 * if PageLRU is set) but the lock is not necessarily taken
570 * here and it is wasteful to take it just to check transhuge.
571 * Check TransHuge without lock and skip the whole pageblock if
572 * it's either a transhuge or hugetlbfs page, as calling
573 * compound_order() without preventing THP from splitting the
574 * page underneath us may return surprising results.
bc835011 575 */
2a1402aa
MG
576 if (PageTransHuge(page)) {
577 if (!locked)
578 goto next_pageblock;
579 low_pfn += (1 << compound_order(page)) - 1;
580 continue;
581 }
582
119d6d59
DR
583 /*
584 * Migration will fail if an anonymous page is pinned in memory,
585 * so avoid taking lru_lock and isolating it unnecessarily in an
586 * admittedly racy check.
587 */
588 if (!page_mapping(page) &&
589 page_count(page) > page_mapcount(page))
590 continue;
591
2a1402aa
MG
592 /* Check if it is ok to still hold the lock */
593 locked = compact_checklock_irqsave(&zone->lru_lock, &flags,
594 locked, cc);
595 if (!locked || fatal_signal_pending(current))
596 break;
597
598 /* Recheck PageLRU and PageTransHuge under lock */
599 if (!PageLRU(page))
600 continue;
bc835011
AA
601 if (PageTransHuge(page)) {
602 low_pfn += (1 << compound_order(page)) - 1;
603 continue;
604 }
605
fa9add64
HD
606 lruvec = mem_cgroup_page_lruvec(page, zone);
607
748446bb 608 /* Try isolate the page */
f3fd4a61 609 if (__isolate_lru_page(page, mode) != 0)
748446bb
MG
610 continue;
611
309381fe 612 VM_BUG_ON_PAGE(PageTransCompound(page), page);
bc835011 613
748446bb 614 /* Successfully isolated */
fa9add64 615 del_page_from_lru_list(page, lruvec, page_lru(page));
b6c75016
JK
616
617isolate_success:
618 cc->finished_update_migrate = true;
748446bb 619 list_add(&page->lru, migratelist);
748446bb 620 cc->nr_migratepages++;
b7aba698 621 nr_isolated++;
748446bb
MG
622
623 /* Avoid isolating too much */
31b8384a
HD
624 if (cc->nr_migratepages == COMPACT_CLUSTER_MAX) {
625 ++low_pfn;
748446bb 626 break;
31b8384a 627 }
2a1402aa
MG
628
629 continue;
630
631next_pageblock:
a9aacbcc 632 low_pfn = ALIGN(low_pfn + 1, pageblock_nr_pages) - 1;
748446bb
MG
633 }
634
c67fe375 635 acct_isolated(zone, locked, cc);
748446bb 636
c67fe375
MG
637 if (locked)
638 spin_unlock_irqrestore(&zone->lru_lock, flags);
748446bb 639
50b5b094
VB
640 /*
641 * Update the pageblock-skip information and cached scanner pfn,
642 * if the whole pageblock was scanned without isolating any page.
643 * This is not done when pageblock was skipped due to being unsuitable
644 * for async compaction, so that eventual sync compaction can try.
645 */
646 if (low_pfn == end_pfn && !skipped_async_unsuitable)
c89511ab 647 update_pageblock_skip(cc, valid_page, nr_isolated, true);
bb13ffeb 648
b7aba698
MG
649 trace_mm_compaction_isolate_migratepages(nr_scanned, nr_isolated);
650
010fc29a 651 count_compact_events(COMPACTMIGRATE_SCANNED, nr_scanned);
397487db 652 if (nr_isolated)
010fc29a 653 count_compact_events(COMPACTISOLATED, nr_isolated);
397487db 654
2fe86e00
MN
655 return low_pfn;
656}
657
ff9543fd
MN
658#endif /* CONFIG_COMPACTION || CONFIG_CMA */
659#ifdef CONFIG_COMPACTION
2fe86e00 660/*
ff9543fd
MN
661 * Based on information in the current compact_control, find blocks
662 * suitable for isolating free pages from and then isolate them.
2fe86e00 663 */
ff9543fd
MN
664static void isolate_freepages(struct zone *zone,
665 struct compact_control *cc)
2fe86e00 666{
ff9543fd 667 struct page *page;
49e068f0 668 unsigned long high_pfn, low_pfn, pfn, z_end_pfn;
ff9543fd
MN
669 int nr_freepages = cc->nr_freepages;
670 struct list_head *freelist = &cc->freepages;
2fe86e00 671
ff9543fd
MN
672 /*
673 * Initialise the free scanner. The starting point is where we last
49e068f0
VB
674 * successfully isolated from, zone-cached value, or the end of the
675 * zone when isolating for the first time. We need this aligned to
676 * the pageblock boundary, because we do pfn -= pageblock_nr_pages
677 * in the for loop.
678 * The low boundary is the end of the pageblock the migration scanner
679 * is using.
ff9543fd 680 */
49e068f0 681 pfn = cc->free_pfn & ~(pageblock_nr_pages-1);
7ed695e0 682 low_pfn = ALIGN(cc->migrate_pfn + 1, pageblock_nr_pages);
2fe86e00 683
ff9543fd
MN
684 /*
685 * Take care that if the migration scanner is at the end of the zone
686 * that the free scanner does not accidentally move to the next zone
687 * in the next isolation cycle.
688 */
689 high_pfn = min(low_pfn, pfn);
2fe86e00 690
108bcc96 691 z_end_pfn = zone_end_pfn(zone);
2fe86e00 692
ff9543fd
MN
693 /*
694 * Isolate free pages until enough are available to migrate the
695 * pages on cc->migratepages. We stop searching if the migrate
696 * and free page scanners meet or enough free pages are isolated.
697 */
7ed695e0 698 for (; pfn >= low_pfn && cc->nr_migratepages > nr_freepages;
ff9543fd
MN
699 pfn -= pageblock_nr_pages) {
700 unsigned long isolated;
49e068f0 701 unsigned long end_pfn;
2fe86e00 702
f6ea3adb
DR
703 /*
704 * This can iterate a massively long zone without finding any
705 * suitable migration targets, so periodically check if we need
706 * to schedule.
707 */
708 cond_resched();
709
ff9543fd
MN
710 if (!pfn_valid(pfn))
711 continue;
2fe86e00 712
ff9543fd
MN
713 /*
714 * Check for overlapping nodes/zones. It's possible on some
715 * configurations to have a setup like
716 * node0 node1 node0
717 * i.e. it's possible that all pages within a zones range of
718 * pages do not belong to a single zone.
719 */
720 page = pfn_to_page(pfn);
721 if (page_zone(page) != zone)
722 continue;
723
724 /* Check the block is suitable for migration */
68e3e926 725 if (!suitable_migration_target(page))
ff9543fd 726 continue;
68e3e926 727
bb13ffeb
MG
728 /* If isolation recently failed, do not retry */
729 if (!isolation_suitable(cc, page))
730 continue;
731
f40d1e42 732 /* Found a block suitable for isolating free pages from */
60177d31
MG
733
734 /*
49e068f0
VB
735 * Take care when isolating in last pageblock of a zone which
736 * ends in the middle of a pageblock.
60177d31 737 */
49e068f0 738 end_pfn = min(pfn + pageblock_nr_pages, z_end_pfn);
f40d1e42
MG
739 isolated = isolate_freepages_block(cc, pfn, end_pfn,
740 freelist, false);
741 nr_freepages += isolated;
ff9543fd
MN
742
743 /*
744 * Record the highest PFN we isolated pages from. When next
745 * looking for free pages, the search will restart here as
746 * page migration may have returned some pages to the allocator
747 */
c89511ab
MG
748 if (isolated) {
749 cc->finished_update_free = true;
ff9543fd 750 high_pfn = max(high_pfn, pfn);
c89511ab 751 }
ff9543fd
MN
752 }
753
754 /* split_free_page does not map the pages */
755 map_pages(freelist);
756
7ed695e0
VB
757 /*
758 * If we crossed the migrate scanner, we want to keep it that way
759 * so that compact_finished() may detect this
760 */
761 if (pfn < low_pfn)
762 cc->free_pfn = max(pfn, zone->zone_start_pfn);
763 else
764 cc->free_pfn = high_pfn;
ff9543fd 765 cc->nr_freepages = nr_freepages;
748446bb
MG
766}
767
768/*
769 * This is a migrate-callback that "allocates" freepages by taking pages
770 * from the isolated freelists in the block we are migrating to.
771 */
772static struct page *compaction_alloc(struct page *migratepage,
773 unsigned long data,
774 int **result)
775{
776 struct compact_control *cc = (struct compact_control *)data;
777 struct page *freepage;
778
779 /* Isolate free pages if necessary */
780 if (list_empty(&cc->freepages)) {
781 isolate_freepages(cc->zone, cc);
782
783 if (list_empty(&cc->freepages))
784 return NULL;
785 }
786
787 freepage = list_entry(cc->freepages.next, struct page, lru);
788 list_del(&freepage->lru);
789 cc->nr_freepages--;
790
791 return freepage;
792}
793
794/*
795 * We cannot control nr_migratepages and nr_freepages fully when migration is
796 * running as migrate_pages() has no knowledge of compact_control. When
797 * migration is complete, we count the number of pages on the lists by hand.
798 */
799static void update_nr_listpages(struct compact_control *cc)
800{
801 int nr_migratepages = 0;
802 int nr_freepages = 0;
803 struct page *page;
804
805 list_for_each_entry(page, &cc->migratepages, lru)
806 nr_migratepages++;
807 list_for_each_entry(page, &cc->freepages, lru)
808 nr_freepages++;
809
810 cc->nr_migratepages = nr_migratepages;
811 cc->nr_freepages = nr_freepages;
812}
813
ff9543fd
MN
814/* possible outcome of isolate_migratepages */
815typedef enum {
816 ISOLATE_ABORT, /* Abort compaction now */
817 ISOLATE_NONE, /* No pages isolated, continue scanning */
818 ISOLATE_SUCCESS, /* Pages isolated, migrate */
819} isolate_migrate_t;
820
821/*
822 * Isolate all pages that can be migrated from the block pointed to by
823 * the migrate scanner within compact_control.
824 */
825static isolate_migrate_t isolate_migratepages(struct zone *zone,
826 struct compact_control *cc)
827{
828 unsigned long low_pfn, end_pfn;
829
830 /* Do not scan outside zone boundaries */
831 low_pfn = max(cc->migrate_pfn, zone->zone_start_pfn);
832
833 /* Only scan within a pageblock boundary */
a9aacbcc 834 end_pfn = ALIGN(low_pfn + 1, pageblock_nr_pages);
ff9543fd
MN
835
836 /* Do not cross the free scanner or scan within a memory hole */
837 if (end_pfn > cc->free_pfn || !pfn_valid(low_pfn)) {
838 cc->migrate_pfn = end_pfn;
839 return ISOLATE_NONE;
840 }
841
842 /* Perform the isolation */
e46a2879 843 low_pfn = isolate_migratepages_range(zone, cc, low_pfn, end_pfn, false);
e64c5237 844 if (!low_pfn || cc->contended)
ff9543fd
MN
845 return ISOLATE_ABORT;
846
847 cc->migrate_pfn = low_pfn;
848
849 return ISOLATE_SUCCESS;
850}
851
748446bb 852static int compact_finished(struct zone *zone,
5a03b051 853 struct compact_control *cc)
748446bb 854{
8fb74b9f 855 unsigned int order;
5a03b051 856 unsigned long watermark;
56de7263 857
748446bb
MG
858 if (fatal_signal_pending(current))
859 return COMPACT_PARTIAL;
860
753341a4 861 /* Compaction run completes if the migrate and free scanner meet */
bb13ffeb 862 if (cc->free_pfn <= cc->migrate_pfn) {
55b7c4c9
VB
863 /* Let the next compaction start anew. */
864 zone->compact_cached_migrate_pfn = zone->zone_start_pfn;
865 zone->compact_cached_free_pfn = zone_end_pfn(zone);
866
62997027
MG
867 /*
868 * Mark that the PG_migrate_skip information should be cleared
869 * by kswapd when it goes to sleep. kswapd does not set the
870 * flag itself as the decision to be clear should be directly
871 * based on an allocation request.
872 */
873 if (!current_is_kswapd())
874 zone->compact_blockskip_flush = true;
875
748446bb 876 return COMPACT_COMPLETE;
bb13ffeb 877 }
748446bb 878
82478fb7
JW
879 /*
880 * order == -1 is expected when compacting via
881 * /proc/sys/vm/compact_memory
882 */
56de7263
MG
883 if (cc->order == -1)
884 return COMPACT_CONTINUE;
885
3957c776
MH
886 /* Compaction run is not finished if the watermark is not met */
887 watermark = low_wmark_pages(zone);
888 watermark += (1 << cc->order);
889
890 if (!zone_watermark_ok(zone, cc->order, watermark, 0, 0))
891 return COMPACT_CONTINUE;
892
56de7263 893 /* Direct compactor: Is a suitable page free? */
8fb74b9f
MG
894 for (order = cc->order; order < MAX_ORDER; order++) {
895 struct free_area *area = &zone->free_area[order];
896
897 /* Job done if page is free of the right migratetype */
898 if (!list_empty(&area->free_list[cc->migratetype]))
899 return COMPACT_PARTIAL;
900
901 /* Job done if allocation would set block type */
902 if (cc->order >= pageblock_order && area->nr_free)
56de7263
MG
903 return COMPACT_PARTIAL;
904 }
905
748446bb
MG
906 return COMPACT_CONTINUE;
907}
908
3e7d3449
MG
909/*
910 * compaction_suitable: Is this suitable to run compaction on this zone now?
911 * Returns
912 * COMPACT_SKIPPED - If there are too few free pages for compaction
913 * COMPACT_PARTIAL - If the allocation would succeed without compaction
914 * COMPACT_CONTINUE - If compaction should run now
915 */
916unsigned long compaction_suitable(struct zone *zone, int order)
917{
918 int fragindex;
919 unsigned long watermark;
920
3957c776
MH
921 /*
922 * order == -1 is expected when compacting via
923 * /proc/sys/vm/compact_memory
924 */
925 if (order == -1)
926 return COMPACT_CONTINUE;
927
3e7d3449
MG
928 /*
929 * Watermarks for order-0 must be met for compaction. Note the 2UL.
930 * This is because during migration, copies of pages need to be
931 * allocated and for a short time, the footprint is higher
932 */
933 watermark = low_wmark_pages(zone) + (2UL << order);
934 if (!zone_watermark_ok(zone, 0, watermark, 0, 0))
935 return COMPACT_SKIPPED;
936
937 /*
938 * fragmentation index determines if allocation failures are due to
939 * low memory or external fragmentation
940 *
a582a738
SL
941 * index of -1000 implies allocations might succeed depending on
942 * watermarks
3e7d3449
MG
943 * index towards 0 implies failure is due to lack of memory
944 * index towards 1000 implies failure is due to fragmentation
945 *
946 * Only compact if a failure would be due to fragmentation.
947 */
948 fragindex = fragmentation_index(zone, order);
949 if (fragindex >= 0 && fragindex <= sysctl_extfrag_threshold)
950 return COMPACT_SKIPPED;
951
a582a738
SL
952 if (fragindex == -1000 && zone_watermark_ok(zone, order, watermark,
953 0, 0))
3e7d3449
MG
954 return COMPACT_PARTIAL;
955
956 return COMPACT_CONTINUE;
957}
958
748446bb
MG
959static int compact_zone(struct zone *zone, struct compact_control *cc)
960{
961 int ret;
c89511ab 962 unsigned long start_pfn = zone->zone_start_pfn;
108bcc96 963 unsigned long end_pfn = zone_end_pfn(zone);
748446bb 964
3e7d3449
MG
965 ret = compaction_suitable(zone, cc->order);
966 switch (ret) {
967 case COMPACT_PARTIAL:
968 case COMPACT_SKIPPED:
969 /* Compaction is likely to fail */
970 return ret;
971 case COMPACT_CONTINUE:
972 /* Fall through to compaction */
973 ;
974 }
975
d3132e4b
VB
976 /*
977 * Clear pageblock skip if there were failures recently and compaction
978 * is about to be retried after being deferred. kswapd does not do
979 * this reset as it'll reset the cached information when going to sleep.
980 */
981 if (compaction_restarting(zone, cc->order) && !current_is_kswapd())
982 __reset_isolation_suitable(zone);
983
c89511ab
MG
984 /*
985 * Setup to move all movable pages to the end of the zone. Used cached
986 * information on where the scanners should start but check that it
987 * is initialised by ensuring the values are within zone boundaries.
988 */
989 cc->migrate_pfn = zone->compact_cached_migrate_pfn;
990 cc->free_pfn = zone->compact_cached_free_pfn;
991 if (cc->free_pfn < start_pfn || cc->free_pfn > end_pfn) {
992 cc->free_pfn = end_pfn & ~(pageblock_nr_pages-1);
993 zone->compact_cached_free_pfn = cc->free_pfn;
994 }
995 if (cc->migrate_pfn < start_pfn || cc->migrate_pfn > end_pfn) {
996 cc->migrate_pfn = start_pfn;
997 zone->compact_cached_migrate_pfn = cc->migrate_pfn;
998 }
748446bb 999
0eb927c0
MG
1000 trace_mm_compaction_begin(start_pfn, cc->migrate_pfn, cc->free_pfn, end_pfn);
1001
748446bb
MG
1002 migrate_prep_local();
1003
1004 while ((ret = compact_finished(zone, cc)) == COMPACT_CONTINUE) {
1005 unsigned long nr_migrate, nr_remaining;
9d502c1c 1006 int err;
748446bb 1007
f9e35b3b
MG
1008 switch (isolate_migratepages(zone, cc)) {
1009 case ISOLATE_ABORT:
1010 ret = COMPACT_PARTIAL;
5733c7d1 1011 putback_movable_pages(&cc->migratepages);
e64c5237 1012 cc->nr_migratepages = 0;
f9e35b3b
MG
1013 goto out;
1014 case ISOLATE_NONE:
748446bb 1015 continue;
f9e35b3b
MG
1016 case ISOLATE_SUCCESS:
1017 ;
1018 }
748446bb
MG
1019
1020 nr_migrate = cc->nr_migratepages;
9d502c1c 1021 err = migrate_pages(&cc->migratepages, compaction_alloc,
9c620e2b 1022 (unsigned long)cc,
7b2a2d4a
MG
1023 cc->sync ? MIGRATE_SYNC_LIGHT : MIGRATE_ASYNC,
1024 MR_COMPACTION);
748446bb
MG
1025 update_nr_listpages(cc);
1026 nr_remaining = cc->nr_migratepages;
1027
b7aba698
MG
1028 trace_mm_compaction_migratepages(nr_migrate - nr_remaining,
1029 nr_remaining);
748446bb 1030
5733c7d1 1031 /* Release isolated pages not migrated */
9d502c1c 1032 if (err) {
5733c7d1 1033 putback_movable_pages(&cc->migratepages);
748446bb 1034 cc->nr_migratepages = 0;
7ed695e0
VB
1035 /*
1036 * migrate_pages() may return -ENOMEM when scanners meet
1037 * and we want compact_finished() to detect it
1038 */
1039 if (err == -ENOMEM && cc->free_pfn > cc->migrate_pfn) {
4bf2bba3
DR
1040 ret = COMPACT_PARTIAL;
1041 goto out;
1042 }
748446bb 1043 }
748446bb
MG
1044 }
1045
f9e35b3b 1046out:
748446bb
MG
1047 /* Release free pages and check accounting */
1048 cc->nr_freepages -= release_freepages(&cc->freepages);
1049 VM_BUG_ON(cc->nr_freepages != 0);
1050
0eb927c0
MG
1051 trace_mm_compaction_end(ret);
1052
748446bb
MG
1053 return ret;
1054}
76ab0f53 1055
d43a87e6 1056static unsigned long compact_zone_order(struct zone *zone,
5a03b051 1057 int order, gfp_t gfp_mask,
8fb74b9f 1058 bool sync, bool *contended)
56de7263 1059{
e64c5237 1060 unsigned long ret;
56de7263
MG
1061 struct compact_control cc = {
1062 .nr_freepages = 0,
1063 .nr_migratepages = 0,
1064 .order = order,
1065 .migratetype = allocflags_to_migratetype(gfp_mask),
1066 .zone = zone,
68e3e926 1067 .sync = sync,
56de7263
MG
1068 };
1069 INIT_LIST_HEAD(&cc.freepages);
1070 INIT_LIST_HEAD(&cc.migratepages);
1071
e64c5237
SL
1072 ret = compact_zone(zone, &cc);
1073
1074 VM_BUG_ON(!list_empty(&cc.freepages));
1075 VM_BUG_ON(!list_empty(&cc.migratepages));
1076
1077 *contended = cc.contended;
1078 return ret;
56de7263
MG
1079}
1080
5e771905
MG
1081int sysctl_extfrag_threshold = 500;
1082
56de7263
MG
1083/**
1084 * try_to_compact_pages - Direct compact to satisfy a high-order allocation
1085 * @zonelist: The zonelist used for the current allocation
1086 * @order: The order of the current allocation
1087 * @gfp_mask: The GFP mask of the current allocation
1088 * @nodemask: The allowed nodes to allocate from
77f1fe6b 1089 * @sync: Whether migration is synchronous or not
661c4cb9
MG
1090 * @contended: Return value that is true if compaction was aborted due to lock contention
1091 * @page: Optionally capture a free page of the requested order during compaction
56de7263
MG
1092 *
1093 * This is the main entry point for direct page compaction.
1094 */
1095unsigned long try_to_compact_pages(struct zonelist *zonelist,
77f1fe6b 1096 int order, gfp_t gfp_mask, nodemask_t *nodemask,
8fb74b9f 1097 bool sync, bool *contended)
56de7263
MG
1098{
1099 enum zone_type high_zoneidx = gfp_zone(gfp_mask);
1100 int may_enter_fs = gfp_mask & __GFP_FS;
1101 int may_perform_io = gfp_mask & __GFP_IO;
56de7263
MG
1102 struct zoneref *z;
1103 struct zone *zone;
1104 int rc = COMPACT_SKIPPED;
d95ea5d1 1105 int alloc_flags = 0;
56de7263 1106
4ffb6335 1107 /* Check if the GFP flags allow compaction */
c5a73c3d 1108 if (!order || !may_enter_fs || !may_perform_io)
56de7263
MG
1109 return rc;
1110
010fc29a 1111 count_compact_event(COMPACTSTALL);
56de7263 1112
d95ea5d1
BZ
1113#ifdef CONFIG_CMA
1114 if (allocflags_to_migratetype(gfp_mask) == MIGRATE_MOVABLE)
1115 alloc_flags |= ALLOC_CMA;
1116#endif
56de7263
MG
1117 /* Compact each zone in the list */
1118 for_each_zone_zonelist_nodemask(zone, z, zonelist, high_zoneidx,
1119 nodemask) {
56de7263
MG
1120 int status;
1121
c67fe375 1122 status = compact_zone_order(zone, order, gfp_mask, sync,
8fb74b9f 1123 contended);
56de7263
MG
1124 rc = max(status, rc);
1125
3e7d3449 1126 /* If a normal allocation would succeed, stop compacting */
d95ea5d1
BZ
1127 if (zone_watermark_ok(zone, order, low_wmark_pages(zone), 0,
1128 alloc_flags))
56de7263
MG
1129 break;
1130 }
1131
1132 return rc;
1133}
1134
1135
76ab0f53 1136/* Compact all zones within a node */
7103f16d 1137static void __compact_pgdat(pg_data_t *pgdat, struct compact_control *cc)
76ab0f53
MG
1138{
1139 int zoneid;
76ab0f53
MG
1140 struct zone *zone;
1141
76ab0f53 1142 for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) {
76ab0f53
MG
1143
1144 zone = &pgdat->node_zones[zoneid];
1145 if (!populated_zone(zone))
1146 continue;
1147
7be62de9
RR
1148 cc->nr_freepages = 0;
1149 cc->nr_migratepages = 0;
1150 cc->zone = zone;
1151 INIT_LIST_HEAD(&cc->freepages);
1152 INIT_LIST_HEAD(&cc->migratepages);
76ab0f53 1153
aad6ec37 1154 if (cc->order == -1 || !compaction_deferred(zone, cc->order))
7be62de9 1155 compact_zone(zone, cc);
76ab0f53 1156
aff62249 1157 if (cc->order > 0) {
de6c60a6
VB
1158 if (zone_watermark_ok(zone, cc->order,
1159 low_wmark_pages(zone), 0, 0))
1160 compaction_defer_reset(zone, cc->order, false);
aff62249
RR
1161 }
1162
7be62de9
RR
1163 VM_BUG_ON(!list_empty(&cc->freepages));
1164 VM_BUG_ON(!list_empty(&cc->migratepages));
76ab0f53 1165 }
76ab0f53
MG
1166}
1167
7103f16d 1168void compact_pgdat(pg_data_t *pgdat, int order)
7be62de9
RR
1169{
1170 struct compact_control cc = {
1171 .order = order,
68e3e926 1172 .sync = false,
7be62de9
RR
1173 };
1174
3a7200af
MG
1175 if (!order)
1176 return;
1177
7103f16d 1178 __compact_pgdat(pgdat, &cc);
7be62de9
RR
1179}
1180
7103f16d 1181static void compact_node(int nid)
7be62de9 1182{
7be62de9
RR
1183 struct compact_control cc = {
1184 .order = -1,
68e3e926 1185 .sync = true,
91ca9186 1186 .ignore_skip_hint = true,
7be62de9
RR
1187 };
1188
7103f16d 1189 __compact_pgdat(NODE_DATA(nid), &cc);
7be62de9
RR
1190}
1191
76ab0f53 1192/* Compact all nodes in the system */
7964c06d 1193static void compact_nodes(void)
76ab0f53
MG
1194{
1195 int nid;
1196
8575ec29
HD
1197 /* Flush pending updates to the LRU lists */
1198 lru_add_drain_all();
1199
76ab0f53
MG
1200 for_each_online_node(nid)
1201 compact_node(nid);
76ab0f53
MG
1202}
1203
1204/* The written value is actually unused, all memory is compacted */
1205int sysctl_compact_memory;
1206
1207/* This is the entry point for compacting all nodes via /proc/sys/vm */
1208int sysctl_compaction_handler(struct ctl_table *table, int write,
1209 void __user *buffer, size_t *length, loff_t *ppos)
1210{
1211 if (write)
7964c06d 1212 compact_nodes();
76ab0f53
MG
1213
1214 return 0;
1215}
ed4a6d7f 1216
5e771905
MG
1217int sysctl_extfrag_handler(struct ctl_table *table, int write,
1218 void __user *buffer, size_t *length, loff_t *ppos)
1219{
1220 proc_dointvec_minmax(table, write, buffer, length, ppos);
1221
1222 return 0;
1223}
1224
ed4a6d7f 1225#if defined(CONFIG_SYSFS) && defined(CONFIG_NUMA)
74e77fb9 1226static ssize_t sysfs_compact_node(struct device *dev,
10fbcf4c 1227 struct device_attribute *attr,
ed4a6d7f
MG
1228 const char *buf, size_t count)
1229{
8575ec29
HD
1230 int nid = dev->id;
1231
1232 if (nid >= 0 && nid < nr_node_ids && node_online(nid)) {
1233 /* Flush pending updates to the LRU lists */
1234 lru_add_drain_all();
1235
1236 compact_node(nid);
1237 }
ed4a6d7f
MG
1238
1239 return count;
1240}
10fbcf4c 1241static DEVICE_ATTR(compact, S_IWUSR, NULL, sysfs_compact_node);
ed4a6d7f
MG
1242
1243int compaction_register_node(struct node *node)
1244{
10fbcf4c 1245 return device_create_file(&node->dev, &dev_attr_compact);
ed4a6d7f
MG
1246}
1247
1248void compaction_unregister_node(struct node *node)
1249{
10fbcf4c 1250 return device_remove_file(&node->dev, &dev_attr_compact);
ed4a6d7f
MG
1251}
1252#endif /* CONFIG_SYSFS && CONFIG_NUMA */
ff9543fd
MN
1253
1254#endif /* CONFIG_COMPACTION */