]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - mm/compaction.c
fs/hugetlbfs/inode.c: remove null test before kfree
[mirror_ubuntu-artful-kernel.git] / mm / compaction.c
CommitLineData
748446bb
MG
1/*
2 * linux/mm/compaction.c
3 *
4 * Memory compaction for the reduction of external fragmentation. Note that
5 * this heavily depends upon page migration to do all the real heavy
6 * lifting
7 *
8 * Copyright IBM Corp. 2007-2010 Mel Gorman <mel@csn.ul.ie>
9 */
10#include <linux/swap.h>
11#include <linux/migrate.h>
12#include <linux/compaction.h>
13#include <linux/mm_inline.h>
14#include <linux/backing-dev.h>
76ab0f53 15#include <linux/sysctl.h>
ed4a6d7f 16#include <linux/sysfs.h>
bf6bddf1 17#include <linux/balloon_compaction.h>
194159fb 18#include <linux/page-isolation.h>
748446bb
MG
19#include "internal.h"
20
010fc29a
MK
21#ifdef CONFIG_COMPACTION
22static inline void count_compact_event(enum vm_event_item item)
23{
24 count_vm_event(item);
25}
26
27static inline void count_compact_events(enum vm_event_item item, long delta)
28{
29 count_vm_events(item, delta);
30}
31#else
32#define count_compact_event(item) do { } while (0)
33#define count_compact_events(item, delta) do { } while (0)
34#endif
35
ff9543fd
MN
36#if defined CONFIG_COMPACTION || defined CONFIG_CMA
37
b7aba698
MG
38#define CREATE_TRACE_POINTS
39#include <trace/events/compaction.h>
40
748446bb
MG
41static unsigned long release_freepages(struct list_head *freelist)
42{
43 struct page *page, *next;
44 unsigned long count = 0;
45
46 list_for_each_entry_safe(page, next, freelist, lru) {
47 list_del(&page->lru);
48 __free_page(page);
49 count++;
50 }
51
52 return count;
53}
54
ff9543fd
MN
55static void map_pages(struct list_head *list)
56{
57 struct page *page;
58
59 list_for_each_entry(page, list, lru) {
60 arch_alloc_page(page, 0);
61 kernel_map_pages(page, 1, 1);
62 }
63}
64
47118af0
MN
65static inline bool migrate_async_suitable(int migratetype)
66{
67 return is_migrate_cma(migratetype) || migratetype == MIGRATE_MOVABLE;
68}
69
bb13ffeb
MG
70#ifdef CONFIG_COMPACTION
71/* Returns true if the pageblock should be scanned for pages to isolate. */
72static inline bool isolation_suitable(struct compact_control *cc,
73 struct page *page)
74{
75 if (cc->ignore_skip_hint)
76 return true;
77
78 return !get_pageblock_skip(page);
79}
80
81/*
82 * This function is called to clear all cached information on pageblocks that
83 * should be skipped for page isolation when the migrate and free page scanner
84 * meet.
85 */
62997027 86static void __reset_isolation_suitable(struct zone *zone)
bb13ffeb
MG
87{
88 unsigned long start_pfn = zone->zone_start_pfn;
108bcc96 89 unsigned long end_pfn = zone_end_pfn(zone);
bb13ffeb
MG
90 unsigned long pfn;
91
35979ef3
DR
92 zone->compact_cached_migrate_pfn[0] = start_pfn;
93 zone->compact_cached_migrate_pfn[1] = start_pfn;
c89511ab 94 zone->compact_cached_free_pfn = end_pfn;
62997027 95 zone->compact_blockskip_flush = false;
bb13ffeb
MG
96
97 /* Walk the zone and mark every pageblock as suitable for isolation */
98 for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
99 struct page *page;
100
101 cond_resched();
102
103 if (!pfn_valid(pfn))
104 continue;
105
106 page = pfn_to_page(pfn);
107 if (zone != page_zone(page))
108 continue;
109
110 clear_pageblock_skip(page);
111 }
112}
113
62997027
MG
114void reset_isolation_suitable(pg_data_t *pgdat)
115{
116 int zoneid;
117
118 for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) {
119 struct zone *zone = &pgdat->node_zones[zoneid];
120 if (!populated_zone(zone))
121 continue;
122
123 /* Only flush if a full compaction finished recently */
124 if (zone->compact_blockskip_flush)
125 __reset_isolation_suitable(zone);
126 }
127}
128
bb13ffeb
MG
129/*
130 * If no pages were isolated then mark this pageblock to be skipped in the
62997027 131 * future. The information is later cleared by __reset_isolation_suitable().
bb13ffeb 132 */
c89511ab
MG
133static void update_pageblock_skip(struct compact_control *cc,
134 struct page *page, unsigned long nr_isolated,
35979ef3 135 bool set_unsuitable, bool migrate_scanner)
bb13ffeb 136{
c89511ab 137 struct zone *zone = cc->zone;
35979ef3 138 unsigned long pfn;
6815bf3f
JK
139
140 if (cc->ignore_skip_hint)
141 return;
142
bb13ffeb
MG
143 if (!page)
144 return;
145
35979ef3
DR
146 if (nr_isolated)
147 return;
148
149 /*
150 * Only skip pageblocks when all forms of compaction will be known to
151 * fail in the near future.
152 */
153 if (set_unsuitable)
bb13ffeb 154 set_pageblock_skip(page);
c89511ab 155
35979ef3
DR
156 pfn = page_to_pfn(page);
157
158 /* Update where async and sync compaction should restart */
159 if (migrate_scanner) {
160 if (cc->finished_update_migrate)
161 return;
162 if (pfn > zone->compact_cached_migrate_pfn[0])
163 zone->compact_cached_migrate_pfn[0] = pfn;
e0b9daeb
DR
164 if (cc->mode != MIGRATE_ASYNC &&
165 pfn > zone->compact_cached_migrate_pfn[1])
35979ef3
DR
166 zone->compact_cached_migrate_pfn[1] = pfn;
167 } else {
168 if (cc->finished_update_free)
169 return;
170 if (pfn < zone->compact_cached_free_pfn)
171 zone->compact_cached_free_pfn = pfn;
c89511ab 172 }
bb13ffeb
MG
173}
174#else
175static inline bool isolation_suitable(struct compact_control *cc,
176 struct page *page)
177{
178 return true;
179}
180
c89511ab
MG
181static void update_pageblock_skip(struct compact_control *cc,
182 struct page *page, unsigned long nr_isolated,
35979ef3 183 bool set_unsuitable, bool migrate_scanner)
bb13ffeb
MG
184{
185}
186#endif /* CONFIG_COMPACTION */
187
2a1402aa
MG
188static inline bool should_release_lock(spinlock_t *lock)
189{
190 return need_resched() || spin_is_contended(lock);
191}
192
c67fe375
MG
193/*
194 * Compaction requires the taking of some coarse locks that are potentially
195 * very heavily contended. Check if the process needs to be scheduled or
196 * if the lock is contended. For async compaction, back out in the event
197 * if contention is severe. For sync compaction, schedule.
198 *
199 * Returns true if the lock is held.
200 * Returns false if the lock is released and compaction should abort
201 */
202static bool compact_checklock_irqsave(spinlock_t *lock, unsigned long *flags,
203 bool locked, struct compact_control *cc)
204{
2a1402aa 205 if (should_release_lock(lock)) {
c67fe375
MG
206 if (locked) {
207 spin_unlock_irqrestore(lock, *flags);
208 locked = false;
209 }
210
211 /* async aborts if taking too long or contended */
e0b9daeb 212 if (cc->mode == MIGRATE_ASYNC) {
e64c5237 213 cc->contended = true;
c67fe375
MG
214 return false;
215 }
216
217 cond_resched();
c67fe375
MG
218 }
219
220 if (!locked)
221 spin_lock_irqsave(lock, *flags);
222 return true;
223}
224
f40d1e42
MG
225/* Returns true if the page is within a block suitable for migration to */
226static bool suitable_migration_target(struct page *page)
227{
7d348b9e 228 /* If the page is a large free page, then disallow migration */
f40d1e42 229 if (PageBuddy(page) && page_order(page) >= pageblock_order)
7d348b9e 230 return false;
f40d1e42
MG
231
232 /* If the block is MIGRATE_MOVABLE or MIGRATE_CMA, allow migration */
7d348b9e 233 if (migrate_async_suitable(get_pageblock_migratetype(page)))
f40d1e42
MG
234 return true;
235
236 /* Otherwise skip the block */
237 return false;
238}
239
85aa125f 240/*
9e4be470
JM
241 * Isolate free pages onto a private freelist. If @strict is true, will abort
242 * returning 0 on any invalid PFNs or non-free pages inside of the pageblock
243 * (even though it may still end up isolating some pages).
85aa125f 244 */
f40d1e42
MG
245static unsigned long isolate_freepages_block(struct compact_control *cc,
246 unsigned long blockpfn,
85aa125f
MN
247 unsigned long end_pfn,
248 struct list_head *freelist,
249 bool strict)
748446bb 250{
b7aba698 251 int nr_scanned = 0, total_isolated = 0;
bb13ffeb 252 struct page *cursor, *valid_page = NULL;
f40d1e42
MG
253 unsigned long flags;
254 bool locked = false;
01ead534 255 bool checked_pageblock = false;
748446bb 256
748446bb
MG
257 cursor = pfn_to_page(blockpfn);
258
f40d1e42 259 /* Isolate free pages. */
748446bb
MG
260 for (; blockpfn < end_pfn; blockpfn++, cursor++) {
261 int isolated, i;
262 struct page *page = cursor;
263
b7aba698 264 nr_scanned++;
f40d1e42 265 if (!pfn_valid_within(blockpfn))
2af120bc
LA
266 goto isolate_fail;
267
bb13ffeb
MG
268 if (!valid_page)
269 valid_page = page;
f40d1e42 270 if (!PageBuddy(page))
2af120bc 271 goto isolate_fail;
f40d1e42
MG
272
273 /*
274 * The zone lock must be held to isolate freepages.
275 * Unfortunately this is a very coarse lock and can be
276 * heavily contended if there are parallel allocations
277 * or parallel compactions. For async compaction do not
278 * spin on the lock and we acquire the lock as late as
279 * possible.
280 */
281 locked = compact_checklock_irqsave(&cc->zone->lock, &flags,
282 locked, cc);
283 if (!locked)
284 break;
285
286 /* Recheck this is a suitable migration target under lock */
01ead534
JK
287 if (!strict && !checked_pageblock) {
288 /*
289 * We need to check suitability of pageblock only once
290 * and this isolate_freepages_block() is called with
291 * pageblock range, so just check once is sufficient.
292 */
293 checked_pageblock = true;
294 if (!suitable_migration_target(page))
295 break;
296 }
748446bb 297
f40d1e42
MG
298 /* Recheck this is a buddy page under lock */
299 if (!PageBuddy(page))
2af120bc 300 goto isolate_fail;
748446bb
MG
301
302 /* Found a free page, break it into order-0 pages */
303 isolated = split_free_page(page);
304 total_isolated += isolated;
305 for (i = 0; i < isolated; i++) {
306 list_add(&page->lru, freelist);
307 page++;
308 }
309
310 /* If a page was split, advance to the end of it */
311 if (isolated) {
312 blockpfn += isolated - 1;
313 cursor += isolated - 1;
2af120bc 314 continue;
748446bb 315 }
2af120bc
LA
316
317isolate_fail:
318 if (strict)
319 break;
320 else
321 continue;
322
748446bb
MG
323 }
324
b7aba698 325 trace_mm_compaction_isolate_freepages(nr_scanned, total_isolated);
f40d1e42
MG
326
327 /*
328 * If strict isolation is requested by CMA then check that all the
329 * pages requested were isolated. If there were any failures, 0 is
330 * returned and CMA will fail.
331 */
2af120bc 332 if (strict && blockpfn < end_pfn)
f40d1e42
MG
333 total_isolated = 0;
334
335 if (locked)
336 spin_unlock_irqrestore(&cc->zone->lock, flags);
337
bb13ffeb
MG
338 /* Update the pageblock-skip if the whole pageblock was scanned */
339 if (blockpfn == end_pfn)
35979ef3
DR
340 update_pageblock_skip(cc, valid_page, total_isolated, true,
341 false);
bb13ffeb 342
010fc29a 343 count_compact_events(COMPACTFREE_SCANNED, nr_scanned);
397487db 344 if (total_isolated)
010fc29a 345 count_compact_events(COMPACTISOLATED, total_isolated);
748446bb
MG
346 return total_isolated;
347}
348
85aa125f
MN
349/**
350 * isolate_freepages_range() - isolate free pages.
351 * @start_pfn: The first PFN to start isolating.
352 * @end_pfn: The one-past-last PFN.
353 *
354 * Non-free pages, invalid PFNs, or zone boundaries within the
355 * [start_pfn, end_pfn) range are considered errors, cause function to
356 * undo its actions and return zero.
357 *
358 * Otherwise, function returns one-past-the-last PFN of isolated page
359 * (which may be greater then end_pfn if end fell in a middle of
360 * a free page).
361 */
ff9543fd 362unsigned long
bb13ffeb
MG
363isolate_freepages_range(struct compact_control *cc,
364 unsigned long start_pfn, unsigned long end_pfn)
85aa125f 365{
f40d1e42 366 unsigned long isolated, pfn, block_end_pfn;
85aa125f
MN
367 LIST_HEAD(freelist);
368
85aa125f 369 for (pfn = start_pfn; pfn < end_pfn; pfn += isolated) {
bb13ffeb 370 if (!pfn_valid(pfn) || cc->zone != page_zone(pfn_to_page(pfn)))
85aa125f
MN
371 break;
372
373 /*
374 * On subsequent iterations ALIGN() is actually not needed,
375 * but we keep it that we not to complicate the code.
376 */
377 block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);
378 block_end_pfn = min(block_end_pfn, end_pfn);
379
bb13ffeb 380 isolated = isolate_freepages_block(cc, pfn, block_end_pfn,
85aa125f 381 &freelist, true);
85aa125f
MN
382
383 /*
384 * In strict mode, isolate_freepages_block() returns 0 if
385 * there are any holes in the block (ie. invalid PFNs or
386 * non-free pages).
387 */
388 if (!isolated)
389 break;
390
391 /*
392 * If we managed to isolate pages, it is always (1 << n) *
393 * pageblock_nr_pages for some non-negative n. (Max order
394 * page may span two pageblocks).
395 */
396 }
397
398 /* split_free_page does not map the pages */
399 map_pages(&freelist);
400
401 if (pfn < end_pfn) {
402 /* Loop terminated early, cleanup. */
403 release_freepages(&freelist);
404 return 0;
405 }
406
407 /* We don't use freelists for anything. */
408 return pfn;
409}
410
748446bb 411/* Update the number of anon and file isolated pages in the zone */
c67fe375 412static void acct_isolated(struct zone *zone, bool locked, struct compact_control *cc)
748446bb
MG
413{
414 struct page *page;
b9e84ac1 415 unsigned int count[2] = { 0, };
748446bb 416
b9e84ac1
MK
417 list_for_each_entry(page, &cc->migratepages, lru)
418 count[!!page_is_file_cache(page)]++;
748446bb 419
c67fe375
MG
420 /* If locked we can use the interrupt unsafe versions */
421 if (locked) {
422 __mod_zone_page_state(zone, NR_ISOLATED_ANON, count[0]);
423 __mod_zone_page_state(zone, NR_ISOLATED_FILE, count[1]);
424 } else {
425 mod_zone_page_state(zone, NR_ISOLATED_ANON, count[0]);
426 mod_zone_page_state(zone, NR_ISOLATED_FILE, count[1]);
427 }
748446bb
MG
428}
429
430/* Similar to reclaim, but different enough that they don't share logic */
431static bool too_many_isolated(struct zone *zone)
432{
bc693045 433 unsigned long active, inactive, isolated;
748446bb
MG
434
435 inactive = zone_page_state(zone, NR_INACTIVE_FILE) +
436 zone_page_state(zone, NR_INACTIVE_ANON);
bc693045
MK
437 active = zone_page_state(zone, NR_ACTIVE_FILE) +
438 zone_page_state(zone, NR_ACTIVE_ANON);
748446bb
MG
439 isolated = zone_page_state(zone, NR_ISOLATED_FILE) +
440 zone_page_state(zone, NR_ISOLATED_ANON);
441
bc693045 442 return isolated > (inactive + active) / 2;
748446bb
MG
443}
444
2fe86e00
MN
445/**
446 * isolate_migratepages_range() - isolate all migrate-able pages in range.
447 * @zone: Zone pages are in.
448 * @cc: Compaction control structure.
449 * @low_pfn: The first PFN of the range.
450 * @end_pfn: The one-past-the-last PFN of the range.
e46a2879 451 * @unevictable: true if it allows to isolate unevictable pages
2fe86e00
MN
452 *
453 * Isolate all pages that can be migrated from the range specified by
454 * [low_pfn, end_pfn). Returns zero if there is a fatal signal
455 * pending), otherwise PFN of the first page that was not scanned
456 * (which may be both less, equal to or more then end_pfn).
457 *
458 * Assumes that cc->migratepages is empty and cc->nr_migratepages is
459 * zero.
460 *
461 * Apart from cc->migratepages and cc->nr_migratetypes this function
462 * does not modify any cc's fields, in particular it does not modify
463 * (or read for that matter) cc->migrate_pfn.
748446bb 464 */
ff9543fd 465unsigned long
2fe86e00 466isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
e46a2879 467 unsigned long low_pfn, unsigned long end_pfn, bool unevictable)
748446bb 468{
9927af74 469 unsigned long last_pageblock_nr = 0, pageblock_nr;
b7aba698 470 unsigned long nr_scanned = 0, nr_isolated = 0;
748446bb 471 struct list_head *migratelist = &cc->migratepages;
fa9add64 472 struct lruvec *lruvec;
c67fe375 473 unsigned long flags;
2a1402aa 474 bool locked = false;
bb13ffeb 475 struct page *page = NULL, *valid_page = NULL;
35979ef3 476 bool set_unsuitable = true;
e0b9daeb
DR
477 const isolate_mode_t mode = (cc->mode == MIGRATE_ASYNC ?
478 ISOLATE_ASYNC_MIGRATE : 0) |
da1c67a7 479 (unevictable ? ISOLATE_UNEVICTABLE : 0);
748446bb 480
748446bb
MG
481 /*
482 * Ensure that there are not too many pages isolated from the LRU
483 * list by either parallel reclaimers or compaction. If there are,
484 * delay for some time until fewer pages are isolated
485 */
486 while (unlikely(too_many_isolated(zone))) {
f9e35b3b 487 /* async migration should just abort */
e0b9daeb 488 if (cc->mode == MIGRATE_ASYNC)
2fe86e00 489 return 0;
f9e35b3b 490
748446bb
MG
491 congestion_wait(BLK_RW_ASYNC, HZ/10);
492
493 if (fatal_signal_pending(current))
2fe86e00 494 return 0;
748446bb
MG
495 }
496
aeef4b83
DR
497 if (cond_resched()) {
498 /* Async terminates prematurely on need_resched() */
499 if (cc->mode == MIGRATE_ASYNC)
500 return 0;
501 }
502
748446bb 503 /* Time to isolate some pages for migration */
748446bb 504 for (; low_pfn < end_pfn; low_pfn++) {
b2eef8c0 505 /* give a chance to irqs before checking need_resched() */
be1aa03b 506 if (locked && !(low_pfn % SWAP_CLUSTER_MAX)) {
2a1402aa
MG
507 if (should_release_lock(&zone->lru_lock)) {
508 spin_unlock_irqrestore(&zone->lru_lock, flags);
509 locked = false;
510 }
b2eef8c0 511 }
c67fe375 512
0bf380bc
MG
513 /*
514 * migrate_pfn does not necessarily start aligned to a
515 * pageblock. Ensure that pfn_valid is called when moving
516 * into a new MAX_ORDER_NR_PAGES range in case of large
517 * memory holes within the zone
518 */
519 if ((low_pfn & (MAX_ORDER_NR_PAGES - 1)) == 0) {
520 if (!pfn_valid(low_pfn)) {
521 low_pfn += MAX_ORDER_NR_PAGES - 1;
522 continue;
523 }
524 }
525
748446bb
MG
526 if (!pfn_valid_within(low_pfn))
527 continue;
b7aba698 528 nr_scanned++;
748446bb 529
dc908600
MG
530 /*
531 * Get the page and ensure the page is within the same zone.
532 * See the comment in isolate_freepages about overlapping
533 * nodes. It is deliberate that the new zone lock is not taken
534 * as memory compaction should not move pages between nodes.
535 */
748446bb 536 page = pfn_to_page(low_pfn);
dc908600
MG
537 if (page_zone(page) != zone)
538 continue;
539
bb13ffeb
MG
540 if (!valid_page)
541 valid_page = page;
542
543 /* If isolation recently failed, do not retry */
544 pageblock_nr = low_pfn >> pageblock_order;
c122b208
JK
545 if (last_pageblock_nr != pageblock_nr) {
546 int mt;
547
548 last_pageblock_nr = pageblock_nr;
549 if (!isolation_suitable(cc, page))
550 goto next_pageblock;
551
552 /*
553 * For async migration, also only scan in MOVABLE
554 * blocks. Async migration is optimistic to see if
555 * the minimum amount of work satisfies the allocation
556 */
557 mt = get_pageblock_migratetype(page);
e0b9daeb
DR
558 if (cc->mode == MIGRATE_ASYNC &&
559 !migrate_async_suitable(mt)) {
35979ef3 560 set_unsuitable = false;
c122b208
JK
561 goto next_pageblock;
562 }
563 }
bb13ffeb 564
6c14466c
MG
565 /*
566 * Skip if free. page_order cannot be used without zone->lock
567 * as nothing prevents parallel allocations or buddy merging.
568 */
748446bb
MG
569 if (PageBuddy(page))
570 continue;
571
bf6bddf1
RA
572 /*
573 * Check may be lockless but that's ok as we recheck later.
574 * It's possible to migrate LRU pages and balloon pages
575 * Skip any other type of page
576 */
577 if (!PageLRU(page)) {
578 if (unlikely(balloon_page_movable(page))) {
579 if (locked && balloon_page_isolate(page)) {
580 /* Successfully isolated */
b6c75016 581 goto isolate_success;
bf6bddf1
RA
582 }
583 }
bc835011 584 continue;
bf6bddf1 585 }
bc835011
AA
586
587 /*
2a1402aa
MG
588 * PageLRU is set. lru_lock normally excludes isolation
589 * splitting and collapsing (collapsing has already happened
590 * if PageLRU is set) but the lock is not necessarily taken
591 * here and it is wasteful to take it just to check transhuge.
592 * Check TransHuge without lock and skip the whole pageblock if
593 * it's either a transhuge or hugetlbfs page, as calling
594 * compound_order() without preventing THP from splitting the
595 * page underneath us may return surprising results.
bc835011 596 */
2a1402aa
MG
597 if (PageTransHuge(page)) {
598 if (!locked)
599 goto next_pageblock;
600 low_pfn += (1 << compound_order(page)) - 1;
601 continue;
602 }
603
119d6d59
DR
604 /*
605 * Migration will fail if an anonymous page is pinned in memory,
606 * so avoid taking lru_lock and isolating it unnecessarily in an
607 * admittedly racy check.
608 */
609 if (!page_mapping(page) &&
610 page_count(page) > page_mapcount(page))
611 continue;
612
2a1402aa
MG
613 /* Check if it is ok to still hold the lock */
614 locked = compact_checklock_irqsave(&zone->lru_lock, &flags,
615 locked, cc);
616 if (!locked || fatal_signal_pending(current))
617 break;
618
619 /* Recheck PageLRU and PageTransHuge under lock */
620 if (!PageLRU(page))
621 continue;
bc835011
AA
622 if (PageTransHuge(page)) {
623 low_pfn += (1 << compound_order(page)) - 1;
624 continue;
625 }
626
fa9add64
HD
627 lruvec = mem_cgroup_page_lruvec(page, zone);
628
748446bb 629 /* Try isolate the page */
f3fd4a61 630 if (__isolate_lru_page(page, mode) != 0)
748446bb
MG
631 continue;
632
309381fe 633 VM_BUG_ON_PAGE(PageTransCompound(page), page);
bc835011 634
748446bb 635 /* Successfully isolated */
fa9add64 636 del_page_from_lru_list(page, lruvec, page_lru(page));
b6c75016
JK
637
638isolate_success:
639 cc->finished_update_migrate = true;
748446bb 640 list_add(&page->lru, migratelist);
748446bb 641 cc->nr_migratepages++;
b7aba698 642 nr_isolated++;
748446bb
MG
643
644 /* Avoid isolating too much */
31b8384a
HD
645 if (cc->nr_migratepages == COMPACT_CLUSTER_MAX) {
646 ++low_pfn;
748446bb 647 break;
31b8384a 648 }
2a1402aa
MG
649
650 continue;
651
652next_pageblock:
a9aacbcc 653 low_pfn = ALIGN(low_pfn + 1, pageblock_nr_pages) - 1;
748446bb
MG
654 }
655
c67fe375 656 acct_isolated(zone, locked, cc);
748446bb 657
c67fe375
MG
658 if (locked)
659 spin_unlock_irqrestore(&zone->lru_lock, flags);
748446bb 660
50b5b094
VB
661 /*
662 * Update the pageblock-skip information and cached scanner pfn,
663 * if the whole pageblock was scanned without isolating any page.
50b5b094 664 */
35979ef3
DR
665 if (low_pfn == end_pfn)
666 update_pageblock_skip(cc, valid_page, nr_isolated,
667 set_unsuitable, true);
bb13ffeb 668
b7aba698
MG
669 trace_mm_compaction_isolate_migratepages(nr_scanned, nr_isolated);
670
010fc29a 671 count_compact_events(COMPACTMIGRATE_SCANNED, nr_scanned);
397487db 672 if (nr_isolated)
010fc29a 673 count_compact_events(COMPACTISOLATED, nr_isolated);
397487db 674
2fe86e00
MN
675 return low_pfn;
676}
677
ff9543fd
MN
678#endif /* CONFIG_COMPACTION || CONFIG_CMA */
679#ifdef CONFIG_COMPACTION
2fe86e00 680/*
ff9543fd
MN
681 * Based on information in the current compact_control, find blocks
682 * suitable for isolating free pages from and then isolate them.
2fe86e00 683 */
ff9543fd
MN
684static void isolate_freepages(struct zone *zone,
685 struct compact_control *cc)
2fe86e00 686{
ff9543fd 687 struct page *page;
c96b9e50
VB
688 unsigned long block_start_pfn; /* start of current pageblock */
689 unsigned long block_end_pfn; /* end of current pageblock */
690 unsigned long low_pfn; /* lowest pfn scanner is able to scan */
ff9543fd
MN
691 int nr_freepages = cc->nr_freepages;
692 struct list_head *freelist = &cc->freepages;
2fe86e00 693
ff9543fd
MN
694 /*
695 * Initialise the free scanner. The starting point is where we last
49e068f0
VB
696 * successfully isolated from, zone-cached value, or the end of the
697 * zone when isolating for the first time. We need this aligned to
c96b9e50
VB
698 * the pageblock boundary, because we do
699 * block_start_pfn -= pageblock_nr_pages in the for loop.
700 * For ending point, take care when isolating in last pageblock of a
701 * a zone which ends in the middle of a pageblock.
49e068f0
VB
702 * The low boundary is the end of the pageblock the migration scanner
703 * is using.
ff9543fd 704 */
c96b9e50
VB
705 block_start_pfn = cc->free_pfn & ~(pageblock_nr_pages-1);
706 block_end_pfn = min(block_start_pfn + pageblock_nr_pages,
707 zone_end_pfn(zone));
7ed695e0 708 low_pfn = ALIGN(cc->migrate_pfn + 1, pageblock_nr_pages);
2fe86e00 709
ff9543fd
MN
710 /*
711 * Isolate free pages until enough are available to migrate the
712 * pages on cc->migratepages. We stop searching if the migrate
713 * and free page scanners meet or enough free pages are isolated.
714 */
c96b9e50
VB
715 for (; block_start_pfn >= low_pfn && cc->nr_migratepages > nr_freepages;
716 block_end_pfn = block_start_pfn,
717 block_start_pfn -= pageblock_nr_pages) {
ff9543fd 718 unsigned long isolated;
2fe86e00 719
f6ea3adb
DR
720 /*
721 * This can iterate a massively long zone without finding any
722 * suitable migration targets, so periodically check if we need
723 * to schedule.
724 */
725 cond_resched();
726
c96b9e50 727 if (!pfn_valid(block_start_pfn))
ff9543fd 728 continue;
2fe86e00 729
ff9543fd
MN
730 /*
731 * Check for overlapping nodes/zones. It's possible on some
732 * configurations to have a setup like
733 * node0 node1 node0
734 * i.e. it's possible that all pages within a zones range of
735 * pages do not belong to a single zone.
736 */
c96b9e50 737 page = pfn_to_page(block_start_pfn);
ff9543fd
MN
738 if (page_zone(page) != zone)
739 continue;
740
741 /* Check the block is suitable for migration */
68e3e926 742 if (!suitable_migration_target(page))
ff9543fd 743 continue;
68e3e926 744
bb13ffeb
MG
745 /* If isolation recently failed, do not retry */
746 if (!isolation_suitable(cc, page))
747 continue;
748
f40d1e42 749 /* Found a block suitable for isolating free pages from */
e9ade569 750 cc->free_pfn = block_start_pfn;
c96b9e50
VB
751 isolated = isolate_freepages_block(cc, block_start_pfn,
752 block_end_pfn, freelist, false);
f40d1e42 753 nr_freepages += isolated;
ff9543fd
MN
754
755 /*
e9ade569
VB
756 * Set a flag that we successfully isolated in this pageblock.
757 * In the next loop iteration, zone->compact_cached_free_pfn
758 * will not be updated and thus it will effectively contain the
759 * highest pageblock we isolated pages from.
ff9543fd 760 */
e9ade569 761 if (isolated)
c89511ab 762 cc->finished_update_free = true;
ff9543fd
MN
763 }
764
765 /* split_free_page does not map the pages */
766 map_pages(freelist);
767
7ed695e0
VB
768 /*
769 * If we crossed the migrate scanner, we want to keep it that way
770 * so that compact_finished() may detect this
771 */
c96b9e50 772 if (block_start_pfn < low_pfn)
e9ade569 773 cc->free_pfn = cc->migrate_pfn;
c96b9e50 774
ff9543fd 775 cc->nr_freepages = nr_freepages;
748446bb
MG
776}
777
778/*
779 * This is a migrate-callback that "allocates" freepages by taking pages
780 * from the isolated freelists in the block we are migrating to.
781 */
782static struct page *compaction_alloc(struct page *migratepage,
783 unsigned long data,
784 int **result)
785{
786 struct compact_control *cc = (struct compact_control *)data;
787 struct page *freepage;
788
789 /* Isolate free pages if necessary */
790 if (list_empty(&cc->freepages)) {
791 isolate_freepages(cc->zone, cc);
792
793 if (list_empty(&cc->freepages))
794 return NULL;
795 }
796
797 freepage = list_entry(cc->freepages.next, struct page, lru);
798 list_del(&freepage->lru);
799 cc->nr_freepages--;
800
801 return freepage;
802}
803
804/*
d53aea3d
DR
805 * This is a migrate-callback that "frees" freepages back to the isolated
806 * freelist. All pages on the freelist are from the same zone, so there is no
807 * special handling needed for NUMA.
808 */
809static void compaction_free(struct page *page, unsigned long data)
810{
811 struct compact_control *cc = (struct compact_control *)data;
812
813 list_add(&page->lru, &cc->freepages);
814 cc->nr_freepages++;
815}
816
ff9543fd
MN
817/* possible outcome of isolate_migratepages */
818typedef enum {
819 ISOLATE_ABORT, /* Abort compaction now */
820 ISOLATE_NONE, /* No pages isolated, continue scanning */
821 ISOLATE_SUCCESS, /* Pages isolated, migrate */
822} isolate_migrate_t;
823
824/*
825 * Isolate all pages that can be migrated from the block pointed to by
826 * the migrate scanner within compact_control.
827 */
828static isolate_migrate_t isolate_migratepages(struct zone *zone,
829 struct compact_control *cc)
830{
831 unsigned long low_pfn, end_pfn;
832
833 /* Do not scan outside zone boundaries */
834 low_pfn = max(cc->migrate_pfn, zone->zone_start_pfn);
835
836 /* Only scan within a pageblock boundary */
a9aacbcc 837 end_pfn = ALIGN(low_pfn + 1, pageblock_nr_pages);
ff9543fd
MN
838
839 /* Do not cross the free scanner or scan within a memory hole */
840 if (end_pfn > cc->free_pfn || !pfn_valid(low_pfn)) {
841 cc->migrate_pfn = end_pfn;
842 return ISOLATE_NONE;
843 }
844
845 /* Perform the isolation */
e46a2879 846 low_pfn = isolate_migratepages_range(zone, cc, low_pfn, end_pfn, false);
e64c5237 847 if (!low_pfn || cc->contended)
ff9543fd
MN
848 return ISOLATE_ABORT;
849
850 cc->migrate_pfn = low_pfn;
851
852 return ISOLATE_SUCCESS;
853}
854
748446bb 855static int compact_finished(struct zone *zone,
5a03b051 856 struct compact_control *cc)
748446bb 857{
8fb74b9f 858 unsigned int order;
5a03b051 859 unsigned long watermark;
56de7263 860
748446bb
MG
861 if (fatal_signal_pending(current))
862 return COMPACT_PARTIAL;
863
753341a4 864 /* Compaction run completes if the migrate and free scanner meet */
bb13ffeb 865 if (cc->free_pfn <= cc->migrate_pfn) {
55b7c4c9 866 /* Let the next compaction start anew. */
35979ef3
DR
867 zone->compact_cached_migrate_pfn[0] = zone->zone_start_pfn;
868 zone->compact_cached_migrate_pfn[1] = zone->zone_start_pfn;
55b7c4c9
VB
869 zone->compact_cached_free_pfn = zone_end_pfn(zone);
870
62997027
MG
871 /*
872 * Mark that the PG_migrate_skip information should be cleared
873 * by kswapd when it goes to sleep. kswapd does not set the
874 * flag itself as the decision to be clear should be directly
875 * based on an allocation request.
876 */
877 if (!current_is_kswapd())
878 zone->compact_blockskip_flush = true;
879
748446bb 880 return COMPACT_COMPLETE;
bb13ffeb 881 }
748446bb 882
82478fb7
JW
883 /*
884 * order == -1 is expected when compacting via
885 * /proc/sys/vm/compact_memory
886 */
56de7263
MG
887 if (cc->order == -1)
888 return COMPACT_CONTINUE;
889
3957c776
MH
890 /* Compaction run is not finished if the watermark is not met */
891 watermark = low_wmark_pages(zone);
892 watermark += (1 << cc->order);
893
894 if (!zone_watermark_ok(zone, cc->order, watermark, 0, 0))
895 return COMPACT_CONTINUE;
896
56de7263 897 /* Direct compactor: Is a suitable page free? */
8fb74b9f
MG
898 for (order = cc->order; order < MAX_ORDER; order++) {
899 struct free_area *area = &zone->free_area[order];
900
901 /* Job done if page is free of the right migratetype */
902 if (!list_empty(&area->free_list[cc->migratetype]))
903 return COMPACT_PARTIAL;
904
905 /* Job done if allocation would set block type */
906 if (cc->order >= pageblock_order && area->nr_free)
56de7263
MG
907 return COMPACT_PARTIAL;
908 }
909
748446bb
MG
910 return COMPACT_CONTINUE;
911}
912
3e7d3449
MG
913/*
914 * compaction_suitable: Is this suitable to run compaction on this zone now?
915 * Returns
916 * COMPACT_SKIPPED - If there are too few free pages for compaction
917 * COMPACT_PARTIAL - If the allocation would succeed without compaction
918 * COMPACT_CONTINUE - If compaction should run now
919 */
920unsigned long compaction_suitable(struct zone *zone, int order)
921{
922 int fragindex;
923 unsigned long watermark;
924
3957c776
MH
925 /*
926 * order == -1 is expected when compacting via
927 * /proc/sys/vm/compact_memory
928 */
929 if (order == -1)
930 return COMPACT_CONTINUE;
931
3e7d3449
MG
932 /*
933 * Watermarks for order-0 must be met for compaction. Note the 2UL.
934 * This is because during migration, copies of pages need to be
935 * allocated and for a short time, the footprint is higher
936 */
937 watermark = low_wmark_pages(zone) + (2UL << order);
938 if (!zone_watermark_ok(zone, 0, watermark, 0, 0))
939 return COMPACT_SKIPPED;
940
941 /*
942 * fragmentation index determines if allocation failures are due to
943 * low memory or external fragmentation
944 *
a582a738
SL
945 * index of -1000 implies allocations might succeed depending on
946 * watermarks
3e7d3449
MG
947 * index towards 0 implies failure is due to lack of memory
948 * index towards 1000 implies failure is due to fragmentation
949 *
950 * Only compact if a failure would be due to fragmentation.
951 */
952 fragindex = fragmentation_index(zone, order);
953 if (fragindex >= 0 && fragindex <= sysctl_extfrag_threshold)
954 return COMPACT_SKIPPED;
955
a582a738
SL
956 if (fragindex == -1000 && zone_watermark_ok(zone, order, watermark,
957 0, 0))
3e7d3449
MG
958 return COMPACT_PARTIAL;
959
960 return COMPACT_CONTINUE;
961}
962
748446bb
MG
963static int compact_zone(struct zone *zone, struct compact_control *cc)
964{
965 int ret;
c89511ab 966 unsigned long start_pfn = zone->zone_start_pfn;
108bcc96 967 unsigned long end_pfn = zone_end_pfn(zone);
e0b9daeb 968 const bool sync = cc->mode != MIGRATE_ASYNC;
748446bb 969
3e7d3449
MG
970 ret = compaction_suitable(zone, cc->order);
971 switch (ret) {
972 case COMPACT_PARTIAL:
973 case COMPACT_SKIPPED:
974 /* Compaction is likely to fail */
975 return ret;
976 case COMPACT_CONTINUE:
977 /* Fall through to compaction */
978 ;
979 }
980
d3132e4b
VB
981 /*
982 * Clear pageblock skip if there were failures recently and compaction
983 * is about to be retried after being deferred. kswapd does not do
984 * this reset as it'll reset the cached information when going to sleep.
985 */
986 if (compaction_restarting(zone, cc->order) && !current_is_kswapd())
987 __reset_isolation_suitable(zone);
988
c89511ab
MG
989 /*
990 * Setup to move all movable pages to the end of the zone. Used cached
991 * information on where the scanners should start but check that it
992 * is initialised by ensuring the values are within zone boundaries.
993 */
e0b9daeb 994 cc->migrate_pfn = zone->compact_cached_migrate_pfn[sync];
c89511ab
MG
995 cc->free_pfn = zone->compact_cached_free_pfn;
996 if (cc->free_pfn < start_pfn || cc->free_pfn > end_pfn) {
997 cc->free_pfn = end_pfn & ~(pageblock_nr_pages-1);
998 zone->compact_cached_free_pfn = cc->free_pfn;
999 }
1000 if (cc->migrate_pfn < start_pfn || cc->migrate_pfn > end_pfn) {
1001 cc->migrate_pfn = start_pfn;
35979ef3
DR
1002 zone->compact_cached_migrate_pfn[0] = cc->migrate_pfn;
1003 zone->compact_cached_migrate_pfn[1] = cc->migrate_pfn;
c89511ab 1004 }
748446bb 1005
0eb927c0
MG
1006 trace_mm_compaction_begin(start_pfn, cc->migrate_pfn, cc->free_pfn, end_pfn);
1007
748446bb
MG
1008 migrate_prep_local();
1009
1010 while ((ret = compact_finished(zone, cc)) == COMPACT_CONTINUE) {
9d502c1c 1011 int err;
748446bb 1012
f9e35b3b
MG
1013 switch (isolate_migratepages(zone, cc)) {
1014 case ISOLATE_ABORT:
1015 ret = COMPACT_PARTIAL;
5733c7d1 1016 putback_movable_pages(&cc->migratepages);
e64c5237 1017 cc->nr_migratepages = 0;
f9e35b3b
MG
1018 goto out;
1019 case ISOLATE_NONE:
748446bb 1020 continue;
f9e35b3b
MG
1021 case ISOLATE_SUCCESS:
1022 ;
1023 }
748446bb 1024
f8c9301f
VB
1025 if (!cc->nr_migratepages)
1026 continue;
1027
d53aea3d 1028 err = migrate_pages(&cc->migratepages, compaction_alloc,
e0b9daeb 1029 compaction_free, (unsigned long)cc, cc->mode,
7b2a2d4a 1030 MR_COMPACTION);
748446bb 1031
f8c9301f
VB
1032 trace_mm_compaction_migratepages(cc->nr_migratepages, err,
1033 &cc->migratepages);
748446bb 1034
f8c9301f
VB
1035 /* All pages were either migrated or will be released */
1036 cc->nr_migratepages = 0;
9d502c1c 1037 if (err) {
5733c7d1 1038 putback_movable_pages(&cc->migratepages);
7ed695e0
VB
1039 /*
1040 * migrate_pages() may return -ENOMEM when scanners meet
1041 * and we want compact_finished() to detect it
1042 */
1043 if (err == -ENOMEM && cc->free_pfn > cc->migrate_pfn) {
4bf2bba3
DR
1044 ret = COMPACT_PARTIAL;
1045 goto out;
1046 }
748446bb 1047 }
748446bb
MG
1048 }
1049
f9e35b3b 1050out:
748446bb
MG
1051 /* Release free pages and check accounting */
1052 cc->nr_freepages -= release_freepages(&cc->freepages);
1053 VM_BUG_ON(cc->nr_freepages != 0);
1054
0eb927c0
MG
1055 trace_mm_compaction_end(ret);
1056
748446bb
MG
1057 return ret;
1058}
76ab0f53 1059
e0b9daeb
DR
1060static unsigned long compact_zone_order(struct zone *zone, int order,
1061 gfp_t gfp_mask, enum migrate_mode mode, bool *contended)
56de7263 1062{
e64c5237 1063 unsigned long ret;
56de7263
MG
1064 struct compact_control cc = {
1065 .nr_freepages = 0,
1066 .nr_migratepages = 0,
1067 .order = order,
1068 .migratetype = allocflags_to_migratetype(gfp_mask),
1069 .zone = zone,
e0b9daeb 1070 .mode = mode,
56de7263
MG
1071 };
1072 INIT_LIST_HEAD(&cc.freepages);
1073 INIT_LIST_HEAD(&cc.migratepages);
1074
e64c5237
SL
1075 ret = compact_zone(zone, &cc);
1076
1077 VM_BUG_ON(!list_empty(&cc.freepages));
1078 VM_BUG_ON(!list_empty(&cc.migratepages));
1079
1080 *contended = cc.contended;
1081 return ret;
56de7263
MG
1082}
1083
5e771905
MG
1084int sysctl_extfrag_threshold = 500;
1085
56de7263
MG
1086/**
1087 * try_to_compact_pages - Direct compact to satisfy a high-order allocation
1088 * @zonelist: The zonelist used for the current allocation
1089 * @order: The order of the current allocation
1090 * @gfp_mask: The GFP mask of the current allocation
1091 * @nodemask: The allowed nodes to allocate from
e0b9daeb 1092 * @mode: The migration mode for async, sync light, or sync migration
661c4cb9
MG
1093 * @contended: Return value that is true if compaction was aborted due to lock contention
1094 * @page: Optionally capture a free page of the requested order during compaction
56de7263
MG
1095 *
1096 * This is the main entry point for direct page compaction.
1097 */
1098unsigned long try_to_compact_pages(struct zonelist *zonelist,
77f1fe6b 1099 int order, gfp_t gfp_mask, nodemask_t *nodemask,
e0b9daeb 1100 enum migrate_mode mode, bool *contended)
56de7263
MG
1101{
1102 enum zone_type high_zoneidx = gfp_zone(gfp_mask);
1103 int may_enter_fs = gfp_mask & __GFP_FS;
1104 int may_perform_io = gfp_mask & __GFP_IO;
56de7263
MG
1105 struct zoneref *z;
1106 struct zone *zone;
1107 int rc = COMPACT_SKIPPED;
d95ea5d1 1108 int alloc_flags = 0;
56de7263 1109
4ffb6335 1110 /* Check if the GFP flags allow compaction */
c5a73c3d 1111 if (!order || !may_enter_fs || !may_perform_io)
56de7263
MG
1112 return rc;
1113
010fc29a 1114 count_compact_event(COMPACTSTALL);
56de7263 1115
d95ea5d1
BZ
1116#ifdef CONFIG_CMA
1117 if (allocflags_to_migratetype(gfp_mask) == MIGRATE_MOVABLE)
1118 alloc_flags |= ALLOC_CMA;
1119#endif
56de7263
MG
1120 /* Compact each zone in the list */
1121 for_each_zone_zonelist_nodemask(zone, z, zonelist, high_zoneidx,
1122 nodemask) {
56de7263
MG
1123 int status;
1124
e0b9daeb 1125 status = compact_zone_order(zone, order, gfp_mask, mode,
8fb74b9f 1126 contended);
56de7263
MG
1127 rc = max(status, rc);
1128
3e7d3449 1129 /* If a normal allocation would succeed, stop compacting */
d95ea5d1
BZ
1130 if (zone_watermark_ok(zone, order, low_wmark_pages(zone), 0,
1131 alloc_flags))
56de7263
MG
1132 break;
1133 }
1134
1135 return rc;
1136}
1137
1138
76ab0f53 1139/* Compact all zones within a node */
7103f16d 1140static void __compact_pgdat(pg_data_t *pgdat, struct compact_control *cc)
76ab0f53
MG
1141{
1142 int zoneid;
76ab0f53
MG
1143 struct zone *zone;
1144
76ab0f53 1145 for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) {
76ab0f53
MG
1146
1147 zone = &pgdat->node_zones[zoneid];
1148 if (!populated_zone(zone))
1149 continue;
1150
7be62de9
RR
1151 cc->nr_freepages = 0;
1152 cc->nr_migratepages = 0;
1153 cc->zone = zone;
1154 INIT_LIST_HEAD(&cc->freepages);
1155 INIT_LIST_HEAD(&cc->migratepages);
76ab0f53 1156
aad6ec37 1157 if (cc->order == -1 || !compaction_deferred(zone, cc->order))
7be62de9 1158 compact_zone(zone, cc);
76ab0f53 1159
aff62249 1160 if (cc->order > 0) {
de6c60a6
VB
1161 if (zone_watermark_ok(zone, cc->order,
1162 low_wmark_pages(zone), 0, 0))
1163 compaction_defer_reset(zone, cc->order, false);
aff62249
RR
1164 }
1165
7be62de9
RR
1166 VM_BUG_ON(!list_empty(&cc->freepages));
1167 VM_BUG_ON(!list_empty(&cc->migratepages));
76ab0f53 1168 }
76ab0f53
MG
1169}
1170
7103f16d 1171void compact_pgdat(pg_data_t *pgdat, int order)
7be62de9
RR
1172{
1173 struct compact_control cc = {
1174 .order = order,
e0b9daeb 1175 .mode = MIGRATE_ASYNC,
7be62de9
RR
1176 };
1177
3a7200af
MG
1178 if (!order)
1179 return;
1180
7103f16d 1181 __compact_pgdat(pgdat, &cc);
7be62de9
RR
1182}
1183
7103f16d 1184static void compact_node(int nid)
7be62de9 1185{
7be62de9
RR
1186 struct compact_control cc = {
1187 .order = -1,
e0b9daeb 1188 .mode = MIGRATE_SYNC,
91ca9186 1189 .ignore_skip_hint = true,
7be62de9
RR
1190 };
1191
7103f16d 1192 __compact_pgdat(NODE_DATA(nid), &cc);
7be62de9
RR
1193}
1194
76ab0f53 1195/* Compact all nodes in the system */
7964c06d 1196static void compact_nodes(void)
76ab0f53
MG
1197{
1198 int nid;
1199
8575ec29
HD
1200 /* Flush pending updates to the LRU lists */
1201 lru_add_drain_all();
1202
76ab0f53
MG
1203 for_each_online_node(nid)
1204 compact_node(nid);
76ab0f53
MG
1205}
1206
1207/* The written value is actually unused, all memory is compacted */
1208int sysctl_compact_memory;
1209
1210/* This is the entry point for compacting all nodes via /proc/sys/vm */
1211int sysctl_compaction_handler(struct ctl_table *table, int write,
1212 void __user *buffer, size_t *length, loff_t *ppos)
1213{
1214 if (write)
7964c06d 1215 compact_nodes();
76ab0f53
MG
1216
1217 return 0;
1218}
ed4a6d7f 1219
5e771905
MG
1220int sysctl_extfrag_handler(struct ctl_table *table, int write,
1221 void __user *buffer, size_t *length, loff_t *ppos)
1222{
1223 proc_dointvec_minmax(table, write, buffer, length, ppos);
1224
1225 return 0;
1226}
1227
ed4a6d7f 1228#if defined(CONFIG_SYSFS) && defined(CONFIG_NUMA)
74e77fb9 1229static ssize_t sysfs_compact_node(struct device *dev,
10fbcf4c 1230 struct device_attribute *attr,
ed4a6d7f
MG
1231 const char *buf, size_t count)
1232{
8575ec29
HD
1233 int nid = dev->id;
1234
1235 if (nid >= 0 && nid < nr_node_ids && node_online(nid)) {
1236 /* Flush pending updates to the LRU lists */
1237 lru_add_drain_all();
1238
1239 compact_node(nid);
1240 }
ed4a6d7f
MG
1241
1242 return count;
1243}
10fbcf4c 1244static DEVICE_ATTR(compact, S_IWUSR, NULL, sysfs_compact_node);
ed4a6d7f
MG
1245
1246int compaction_register_node(struct node *node)
1247{
10fbcf4c 1248 return device_create_file(&node->dev, &dev_attr_compact);
ed4a6d7f
MG
1249}
1250
1251void compaction_unregister_node(struct node *node)
1252{
10fbcf4c 1253 return device_remove_file(&node->dev, &dev_attr_compact);
ed4a6d7f
MG
1254}
1255#endif /* CONFIG_SYSFS && CONFIG_NUMA */
ff9543fd
MN
1256
1257#endif /* CONFIG_COMPACTION */