]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - mm/page_alloc.c
mm/compaction: pass only pageblock aligned range to pageblock_pfn_to_page
[mirror_ubuntu-artful-kernel.git] / mm / page_alloc.c
CommitLineData
1da177e4
LT
1/*
2 * linux/mm/page_alloc.c
3 *
4 * Manages the free list, the system allocates free pages here.
5 * Note that kmalloc() lives in slab.c
6 *
7 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
8 * Swap reorganised 29.12.95, Stephen Tweedie
9 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
10 * Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999
11 * Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999
12 * Zone balancing, Kanoj Sarcar, SGI, Jan 2000
13 * Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002
14 * (lots of bits borrowed from Ingo Molnar & Andrew Morton)
15 */
16
1da177e4
LT
17#include <linux/stddef.h>
18#include <linux/mm.h>
19#include <linux/swap.h>
20#include <linux/interrupt.h>
21#include <linux/pagemap.h>
10ed273f 22#include <linux/jiffies.h>
1da177e4 23#include <linux/bootmem.h>
edbe7d23 24#include <linux/memblock.h>
1da177e4 25#include <linux/compiler.h>
9f158333 26#include <linux/kernel.h>
b1eeab67 27#include <linux/kmemcheck.h>
b8c73fc2 28#include <linux/kasan.h>
1da177e4
LT
29#include <linux/module.h>
30#include <linux/suspend.h>
31#include <linux/pagevec.h>
32#include <linux/blkdev.h>
33#include <linux/slab.h>
a238ab5b 34#include <linux/ratelimit.h>
5a3135c2 35#include <linux/oom.h>
1da177e4
LT
36#include <linux/notifier.h>
37#include <linux/topology.h>
38#include <linux/sysctl.h>
39#include <linux/cpu.h>
40#include <linux/cpuset.h>
bdc8cb98 41#include <linux/memory_hotplug.h>
1da177e4
LT
42#include <linux/nodemask.h>
43#include <linux/vmalloc.h>
a6cccdc3 44#include <linux/vmstat.h>
4be38e35 45#include <linux/mempolicy.h>
4b94ffdc 46#include <linux/memremap.h>
6811378e 47#include <linux/stop_machine.h>
c713216d
MG
48#include <linux/sort.h>
49#include <linux/pfn.h>
3fcfab16 50#include <linux/backing-dev.h>
933e312e 51#include <linux/fault-inject.h>
a5d76b54 52#include <linux/page-isolation.h>
eefa864b 53#include <linux/page_ext.h>
3ac7fe5a 54#include <linux/debugobjects.h>
dbb1f81c 55#include <linux/kmemleak.h>
56de7263 56#include <linux/compaction.h>
0d3d062a 57#include <trace/events/kmem.h>
268bb0ce 58#include <linux/prefetch.h>
6e543d57 59#include <linux/mm_inline.h>
041d3a8c 60#include <linux/migrate.h>
e30825f1 61#include <linux/page_ext.h>
949f7ec5 62#include <linux/hugetlb.h>
8bd75c77 63#include <linux/sched/rt.h>
48c96a36 64#include <linux/page_owner.h>
0e1cc95b 65#include <linux/kthread.h>
1da177e4 66
7ee3d4e8 67#include <asm/sections.h>
1da177e4 68#include <asm/tlbflush.h>
ac924c60 69#include <asm/div64.h>
1da177e4
LT
70#include "internal.h"
71
c8e251fa
CS
72/* prevent >1 _updater_ of zone percpu pageset ->high and ->batch fields */
73static DEFINE_MUTEX(pcp_batch_high_lock);
7cd2b0a3 74#define MIN_PERCPU_PAGELIST_FRACTION (8)
c8e251fa 75
72812019
LS
76#ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID
77DEFINE_PER_CPU(int, numa_node);
78EXPORT_PER_CPU_SYMBOL(numa_node);
79#endif
80
7aac7898
LS
81#ifdef CONFIG_HAVE_MEMORYLESS_NODES
82/*
83 * N.B., Do NOT reference the '_numa_mem_' per cpu variable directly.
84 * It will not be defined when CONFIG_HAVE_MEMORYLESS_NODES is not defined.
85 * Use the accessor functions set_numa_mem(), numa_mem_id() and cpu_to_mem()
86 * defined in <linux/topology.h>.
87 */
88DEFINE_PER_CPU(int, _numa_mem_); /* Kernel "local memory" node */
89EXPORT_PER_CPU_SYMBOL(_numa_mem_);
ad2c8144 90int _node_numa_mem_[MAX_NUMNODES];
7aac7898
LS
91#endif
92
1da177e4 93/*
13808910 94 * Array of node states.
1da177e4 95 */
13808910
CL
96nodemask_t node_states[NR_NODE_STATES] __read_mostly = {
97 [N_POSSIBLE] = NODE_MASK_ALL,
98 [N_ONLINE] = { { [0] = 1UL } },
99#ifndef CONFIG_NUMA
100 [N_NORMAL_MEMORY] = { { [0] = 1UL } },
101#ifdef CONFIG_HIGHMEM
102 [N_HIGH_MEMORY] = { { [0] = 1UL } },
20b2f52b
LJ
103#endif
104#ifdef CONFIG_MOVABLE_NODE
105 [N_MEMORY] = { { [0] = 1UL } },
13808910
CL
106#endif
107 [N_CPU] = { { [0] = 1UL } },
108#endif /* NUMA */
109};
110EXPORT_SYMBOL(node_states);
111
c3d5f5f0
JL
112/* Protect totalram_pages and zone->managed_pages */
113static DEFINE_SPINLOCK(managed_page_count_lock);
114
6c231b7b 115unsigned long totalram_pages __read_mostly;
cb45b0e9 116unsigned long totalreserve_pages __read_mostly;
e48322ab 117unsigned long totalcma_pages __read_mostly;
ab8fabd4 118
1b76b02f 119int percpu_pagelist_fraction;
dcce284a 120gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK;
1da177e4 121
bb14c2c7
VB
122/*
123 * A cached value of the page's pageblock's migratetype, used when the page is
124 * put on a pcplist. Used to avoid the pageblock migratetype lookup when
125 * freeing from pcplists in most cases, at the cost of possibly becoming stale.
126 * Also the migratetype set in the page does not necessarily match the pcplist
127 * index, e.g. page might have MIGRATE_CMA set but be on a pcplist with any
128 * other index - this ensures that it will be put on the correct CMA freelist.
129 */
130static inline int get_pcppage_migratetype(struct page *page)
131{
132 return page->index;
133}
134
135static inline void set_pcppage_migratetype(struct page *page, int migratetype)
136{
137 page->index = migratetype;
138}
139
452aa699
RW
140#ifdef CONFIG_PM_SLEEP
141/*
142 * The following functions are used by the suspend/hibernate code to temporarily
143 * change gfp_allowed_mask in order to avoid using I/O during memory allocations
144 * while devices are suspended. To avoid races with the suspend/hibernate code,
145 * they should always be called with pm_mutex held (gfp_allowed_mask also should
146 * only be modified with pm_mutex held, unless the suspend/hibernate code is
147 * guaranteed not to run in parallel with that modification).
148 */
c9e664f1
RW
149
150static gfp_t saved_gfp_mask;
151
152void pm_restore_gfp_mask(void)
452aa699
RW
153{
154 WARN_ON(!mutex_is_locked(&pm_mutex));
c9e664f1
RW
155 if (saved_gfp_mask) {
156 gfp_allowed_mask = saved_gfp_mask;
157 saved_gfp_mask = 0;
158 }
452aa699
RW
159}
160
c9e664f1 161void pm_restrict_gfp_mask(void)
452aa699 162{
452aa699 163 WARN_ON(!mutex_is_locked(&pm_mutex));
c9e664f1
RW
164 WARN_ON(saved_gfp_mask);
165 saved_gfp_mask = gfp_allowed_mask;
d0164adc 166 gfp_allowed_mask &= ~(__GFP_IO | __GFP_FS);
452aa699 167}
f90ac398
MG
168
169bool pm_suspended_storage(void)
170{
d0164adc 171 if ((gfp_allowed_mask & (__GFP_IO | __GFP_FS)) == (__GFP_IO | __GFP_FS))
f90ac398
MG
172 return false;
173 return true;
174}
452aa699
RW
175#endif /* CONFIG_PM_SLEEP */
176
d9c23400 177#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
d00181b9 178unsigned int pageblock_order __read_mostly;
d9c23400
MG
179#endif
180
d98c7a09 181static void __free_pages_ok(struct page *page, unsigned int order);
a226f6c8 182
1da177e4
LT
183/*
184 * results with 256, 32 in the lowmem_reserve sysctl:
185 * 1G machine -> (16M dma, 800M-16M normal, 1G-800M high)
186 * 1G machine -> (16M dma, 784M normal, 224M high)
187 * NORMAL allocation will leave 784M/256 of ram reserved in the ZONE_DMA
188 * HIGHMEM allocation will leave 224M/32 of ram reserved in ZONE_NORMAL
84109e15 189 * HIGHMEM allocation will leave (224M+784M)/256 of ram reserved in ZONE_DMA
a2f1b424
AK
190 *
191 * TBD: should special case ZONE_DMA32 machines here - in those we normally
192 * don't need any ZONE_NORMAL reservation
1da177e4 193 */
2f1b6248 194int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1] = {
4b51d669 195#ifdef CONFIG_ZONE_DMA
2f1b6248 196 256,
4b51d669 197#endif
fb0e7942 198#ifdef CONFIG_ZONE_DMA32
2f1b6248 199 256,
fb0e7942 200#endif
e53ef38d 201#ifdef CONFIG_HIGHMEM
2a1e274a 202 32,
e53ef38d 203#endif
2a1e274a 204 32,
2f1b6248 205};
1da177e4
LT
206
207EXPORT_SYMBOL(totalram_pages);
1da177e4 208
15ad7cdc 209static char * const zone_names[MAX_NR_ZONES] = {
4b51d669 210#ifdef CONFIG_ZONE_DMA
2f1b6248 211 "DMA",
4b51d669 212#endif
fb0e7942 213#ifdef CONFIG_ZONE_DMA32
2f1b6248 214 "DMA32",
fb0e7942 215#endif
2f1b6248 216 "Normal",
e53ef38d 217#ifdef CONFIG_HIGHMEM
2a1e274a 218 "HighMem",
e53ef38d 219#endif
2a1e274a 220 "Movable",
033fbae9
DW
221#ifdef CONFIG_ZONE_DEVICE
222 "Device",
223#endif
2f1b6248
CL
224};
225
60f30350
VB
226char * const migratetype_names[MIGRATE_TYPES] = {
227 "Unmovable",
228 "Movable",
229 "Reclaimable",
230 "HighAtomic",
231#ifdef CONFIG_CMA
232 "CMA",
233#endif
234#ifdef CONFIG_MEMORY_ISOLATION
235 "Isolate",
236#endif
237};
238
f1e61557
KS
239compound_page_dtor * const compound_page_dtors[] = {
240 NULL,
241 free_compound_page,
242#ifdef CONFIG_HUGETLB_PAGE
243 free_huge_page,
244#endif
9a982250
KS
245#ifdef CONFIG_TRANSPARENT_HUGEPAGE
246 free_transhuge_page,
247#endif
f1e61557
KS
248};
249
1da177e4 250int min_free_kbytes = 1024;
42aa83cb 251int user_min_free_kbytes = -1;
1da177e4 252
2c85f51d
JB
253static unsigned long __meminitdata nr_kernel_pages;
254static unsigned long __meminitdata nr_all_pages;
a3142c8e 255static unsigned long __meminitdata dma_reserve;
1da177e4 256
0ee332c1
TH
257#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
258static unsigned long __meminitdata arch_zone_lowest_possible_pfn[MAX_NR_ZONES];
259static unsigned long __meminitdata arch_zone_highest_possible_pfn[MAX_NR_ZONES];
260static unsigned long __initdata required_kernelcore;
261static unsigned long __initdata required_movablecore;
262static unsigned long __meminitdata zone_movable_pfn[MAX_NUMNODES];
342332e6 263static bool mirrored_kernelcore;
0ee332c1
TH
264
265/* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */
266int movable_zone;
267EXPORT_SYMBOL(movable_zone);
268#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
c713216d 269
418508c1
MS
270#if MAX_NUMNODES > 1
271int nr_node_ids __read_mostly = MAX_NUMNODES;
62bc62a8 272int nr_online_nodes __read_mostly = 1;
418508c1 273EXPORT_SYMBOL(nr_node_ids);
62bc62a8 274EXPORT_SYMBOL(nr_online_nodes);
418508c1
MS
275#endif
276
9ef9acb0
MG
277int page_group_by_mobility_disabled __read_mostly;
278
3a80a7fa
MG
279#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
280static inline void reset_deferred_meminit(pg_data_t *pgdat)
281{
282 pgdat->first_deferred_pfn = ULONG_MAX;
283}
284
285/* Returns true if the struct page for the pfn is uninitialised */
0e1cc95b 286static inline bool __meminit early_page_uninitialised(unsigned long pfn)
3a80a7fa 287{
ae026b2a 288 if (pfn >= NODE_DATA(early_pfn_to_nid(pfn))->first_deferred_pfn)
3a80a7fa
MG
289 return true;
290
291 return false;
292}
293
7e18adb4
MG
294static inline bool early_page_nid_uninitialised(unsigned long pfn, int nid)
295{
296 if (pfn >= NODE_DATA(nid)->first_deferred_pfn)
297 return true;
298
299 return false;
300}
301
3a80a7fa
MG
302/*
303 * Returns false when the remaining initialisation should be deferred until
304 * later in the boot cycle when it can be parallelised.
305 */
306static inline bool update_defer_init(pg_data_t *pgdat,
307 unsigned long pfn, unsigned long zone_end,
308 unsigned long *nr_initialised)
309{
310 /* Always populate low zones for address-contrained allocations */
311 if (zone_end < pgdat_end_pfn(pgdat))
312 return true;
313
314 /* Initialise at least 2G of the highest zone */
315 (*nr_initialised)++;
316 if (*nr_initialised > (2UL << (30 - PAGE_SHIFT)) &&
317 (pfn & (PAGES_PER_SECTION - 1)) == 0) {
318 pgdat->first_deferred_pfn = pfn;
319 return false;
320 }
321
322 return true;
323}
324#else
325static inline void reset_deferred_meminit(pg_data_t *pgdat)
326{
327}
328
329static inline bool early_page_uninitialised(unsigned long pfn)
330{
331 return false;
332}
333
7e18adb4
MG
334static inline bool early_page_nid_uninitialised(unsigned long pfn, int nid)
335{
336 return false;
337}
338
3a80a7fa
MG
339static inline bool update_defer_init(pg_data_t *pgdat,
340 unsigned long pfn, unsigned long zone_end,
341 unsigned long *nr_initialised)
342{
343 return true;
344}
345#endif
346
347
ee6f509c 348void set_pageblock_migratetype(struct page *page, int migratetype)
b2a0ac88 349{
5d0f3f72
KM
350 if (unlikely(page_group_by_mobility_disabled &&
351 migratetype < MIGRATE_PCPTYPES))
49255c61
MG
352 migratetype = MIGRATE_UNMOVABLE;
353
b2a0ac88
MG
354 set_pageblock_flags_group(page, (unsigned long)migratetype,
355 PB_migrate, PB_migrate_end);
356}
357
13e7444b 358#ifdef CONFIG_DEBUG_VM
c6a57e19 359static int page_outside_zone_boundaries(struct zone *zone, struct page *page)
1da177e4 360{
bdc8cb98
DH
361 int ret = 0;
362 unsigned seq;
363 unsigned long pfn = page_to_pfn(page);
b5e6a5a2 364 unsigned long sp, start_pfn;
c6a57e19 365
bdc8cb98
DH
366 do {
367 seq = zone_span_seqbegin(zone);
b5e6a5a2
CS
368 start_pfn = zone->zone_start_pfn;
369 sp = zone->spanned_pages;
108bcc96 370 if (!zone_spans_pfn(zone, pfn))
bdc8cb98
DH
371 ret = 1;
372 } while (zone_span_seqretry(zone, seq));
373
b5e6a5a2 374 if (ret)
613813e8
DH
375 pr_err("page 0x%lx outside node %d zone %s [ 0x%lx - 0x%lx ]\n",
376 pfn, zone_to_nid(zone), zone->name,
377 start_pfn, start_pfn + sp);
b5e6a5a2 378
bdc8cb98 379 return ret;
c6a57e19
DH
380}
381
382static int page_is_consistent(struct zone *zone, struct page *page)
383{
14e07298 384 if (!pfn_valid_within(page_to_pfn(page)))
c6a57e19 385 return 0;
1da177e4 386 if (zone != page_zone(page))
c6a57e19
DH
387 return 0;
388
389 return 1;
390}
391/*
392 * Temporary debugging check for pages not lying within a given zone.
393 */
394static int bad_range(struct zone *zone, struct page *page)
395{
396 if (page_outside_zone_boundaries(zone, page))
1da177e4 397 return 1;
c6a57e19
DH
398 if (!page_is_consistent(zone, page))
399 return 1;
400
1da177e4
LT
401 return 0;
402}
13e7444b
NP
403#else
404static inline int bad_range(struct zone *zone, struct page *page)
405{
406 return 0;
407}
408#endif
409
d230dec1
KS
410static void bad_page(struct page *page, const char *reason,
411 unsigned long bad_flags)
1da177e4 412{
d936cf9b
HD
413 static unsigned long resume;
414 static unsigned long nr_shown;
415 static unsigned long nr_unshown;
416
2a7684a2
WF
417 /* Don't complain about poisoned pages */
418 if (PageHWPoison(page)) {
22b751c3 419 page_mapcount_reset(page); /* remove PageBuddy */
2a7684a2
WF
420 return;
421 }
422
d936cf9b
HD
423 /*
424 * Allow a burst of 60 reports, then keep quiet for that minute;
425 * or allow a steady drip of one report per second.
426 */
427 if (nr_shown == 60) {
428 if (time_before(jiffies, resume)) {
429 nr_unshown++;
430 goto out;
431 }
432 if (nr_unshown) {
ff8e8116 433 pr_alert(
1e9e6365 434 "BUG: Bad page state: %lu messages suppressed\n",
d936cf9b
HD
435 nr_unshown);
436 nr_unshown = 0;
437 }
438 nr_shown = 0;
439 }
440 if (nr_shown++ == 0)
441 resume = jiffies + 60 * HZ;
442
ff8e8116 443 pr_alert("BUG: Bad page state in process %s pfn:%05lx\n",
3dc14741 444 current->comm, page_to_pfn(page));
ff8e8116
VB
445 __dump_page(page, reason);
446 bad_flags &= page->flags;
447 if (bad_flags)
448 pr_alert("bad because of flags: %#lx(%pGp)\n",
449 bad_flags, &bad_flags);
4e462112 450 dump_page_owner(page);
3dc14741 451
4f31888c 452 print_modules();
1da177e4 453 dump_stack();
d936cf9b 454out:
8cc3b392 455 /* Leave bad fields for debug, except PageBuddy could make trouble */
22b751c3 456 page_mapcount_reset(page); /* remove PageBuddy */
373d4d09 457 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
1da177e4
LT
458}
459
1da177e4
LT
460/*
461 * Higher-order pages are called "compound pages". They are structured thusly:
462 *
1d798ca3 463 * The first PAGE_SIZE page is called the "head page" and have PG_head set.
1da177e4 464 *
1d798ca3
KS
465 * The remaining PAGE_SIZE pages are called "tail pages". PageTail() is encoded
466 * in bit 0 of page->compound_head. The rest of bits is pointer to head page.
1da177e4 467 *
1d798ca3
KS
468 * The first tail page's ->compound_dtor holds the offset in array of compound
469 * page destructors. See compound_page_dtors.
1da177e4 470 *
1d798ca3 471 * The first tail page's ->compound_order holds the order of allocation.
41d78ba5 472 * This usage means that zero-order pages may not be compound.
1da177e4 473 */
d98c7a09 474
9a982250 475void free_compound_page(struct page *page)
d98c7a09 476{
d85f3385 477 __free_pages_ok(page, compound_order(page));
d98c7a09
HD
478}
479
d00181b9 480void prep_compound_page(struct page *page, unsigned int order)
18229df5
AW
481{
482 int i;
483 int nr_pages = 1 << order;
484
f1e61557 485 set_compound_page_dtor(page, COMPOUND_PAGE_DTOR);
18229df5
AW
486 set_compound_order(page, order);
487 __SetPageHead(page);
488 for (i = 1; i < nr_pages; i++) {
489 struct page *p = page + i;
58a84aa9 490 set_page_count(p, 0);
1c290f64 491 p->mapping = TAIL_MAPPING;
1d798ca3 492 set_compound_head(p, page);
18229df5 493 }
53f9263b 494 atomic_set(compound_mapcount_ptr(page), -1);
18229df5
AW
495}
496
c0a32fc5
SG
497#ifdef CONFIG_DEBUG_PAGEALLOC
498unsigned int _debug_guardpage_minorder;
ea6eabb0
CB
499bool _debug_pagealloc_enabled __read_mostly
500 = IS_ENABLED(CONFIG_DEBUG_PAGEALLOC_ENABLE_DEFAULT);
e30825f1
JK
501bool _debug_guardpage_enabled __read_mostly;
502
031bc574
JK
503static int __init early_debug_pagealloc(char *buf)
504{
505 if (!buf)
506 return -EINVAL;
507
508 if (strcmp(buf, "on") == 0)
509 _debug_pagealloc_enabled = true;
510
ea6eabb0
CB
511 if (strcmp(buf, "off") == 0)
512 _debug_pagealloc_enabled = false;
513
031bc574
JK
514 return 0;
515}
516early_param("debug_pagealloc", early_debug_pagealloc);
517
e30825f1
JK
518static bool need_debug_guardpage(void)
519{
031bc574
JK
520 /* If we don't use debug_pagealloc, we don't need guard page */
521 if (!debug_pagealloc_enabled())
522 return false;
523
e30825f1
JK
524 return true;
525}
526
527static void init_debug_guardpage(void)
528{
031bc574
JK
529 if (!debug_pagealloc_enabled())
530 return;
531
e30825f1
JK
532 _debug_guardpage_enabled = true;
533}
534
535struct page_ext_operations debug_guardpage_ops = {
536 .need = need_debug_guardpage,
537 .init = init_debug_guardpage,
538};
c0a32fc5
SG
539
540static int __init debug_guardpage_minorder_setup(char *buf)
541{
542 unsigned long res;
543
544 if (kstrtoul(buf, 10, &res) < 0 || res > MAX_ORDER / 2) {
545 printk(KERN_ERR "Bad debug_guardpage_minorder value\n");
546 return 0;
547 }
548 _debug_guardpage_minorder = res;
549 printk(KERN_INFO "Setting debug_guardpage_minorder to %lu\n", res);
550 return 0;
551}
552__setup("debug_guardpage_minorder=", debug_guardpage_minorder_setup);
553
2847cf95
JK
554static inline void set_page_guard(struct zone *zone, struct page *page,
555 unsigned int order, int migratetype)
c0a32fc5 556{
e30825f1
JK
557 struct page_ext *page_ext;
558
559 if (!debug_guardpage_enabled())
560 return;
561
562 page_ext = lookup_page_ext(page);
563 __set_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags);
564
2847cf95
JK
565 INIT_LIST_HEAD(&page->lru);
566 set_page_private(page, order);
567 /* Guard pages are not available for any usage */
568 __mod_zone_freepage_state(zone, -(1 << order), migratetype);
c0a32fc5
SG
569}
570
2847cf95
JK
571static inline void clear_page_guard(struct zone *zone, struct page *page,
572 unsigned int order, int migratetype)
c0a32fc5 573{
e30825f1
JK
574 struct page_ext *page_ext;
575
576 if (!debug_guardpage_enabled())
577 return;
578
579 page_ext = lookup_page_ext(page);
580 __clear_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags);
581
2847cf95
JK
582 set_page_private(page, 0);
583 if (!is_migrate_isolate(migratetype))
584 __mod_zone_freepage_state(zone, (1 << order), migratetype);
c0a32fc5
SG
585}
586#else
e30825f1 587struct page_ext_operations debug_guardpage_ops = { NULL, };
2847cf95
JK
588static inline void set_page_guard(struct zone *zone, struct page *page,
589 unsigned int order, int migratetype) {}
590static inline void clear_page_guard(struct zone *zone, struct page *page,
591 unsigned int order, int migratetype) {}
c0a32fc5
SG
592#endif
593
7aeb09f9 594static inline void set_page_order(struct page *page, unsigned int order)
6aa3001b 595{
4c21e2f2 596 set_page_private(page, order);
676165a8 597 __SetPageBuddy(page);
1da177e4
LT
598}
599
600static inline void rmv_page_order(struct page *page)
601{
676165a8 602 __ClearPageBuddy(page);
4c21e2f2 603 set_page_private(page, 0);
1da177e4
LT
604}
605
1da177e4
LT
606/*
607 * This function checks whether a page is free && is the buddy
608 * we can do coalesce a page and its buddy if
13e7444b 609 * (a) the buddy is not in a hole &&
676165a8 610 * (b) the buddy is in the buddy system &&
cb2b95e1
AW
611 * (c) a page and its buddy have the same order &&
612 * (d) a page and its buddy are in the same zone.
676165a8 613 *
cf6fe945
WSH
614 * For recording whether a page is in the buddy system, we set ->_mapcount
615 * PAGE_BUDDY_MAPCOUNT_VALUE.
616 * Setting, clearing, and testing _mapcount PAGE_BUDDY_MAPCOUNT_VALUE is
617 * serialized by zone->lock.
1da177e4 618 *
676165a8 619 * For recording page's order, we use page_private(page).
1da177e4 620 */
cb2b95e1 621static inline int page_is_buddy(struct page *page, struct page *buddy,
7aeb09f9 622 unsigned int order)
1da177e4 623{
14e07298 624 if (!pfn_valid_within(page_to_pfn(buddy)))
13e7444b 625 return 0;
13e7444b 626
c0a32fc5 627 if (page_is_guard(buddy) && page_order(buddy) == order) {
d34c5fa0
MG
628 if (page_zone_id(page) != page_zone_id(buddy))
629 return 0;
630
4c5018ce
WY
631 VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy);
632
c0a32fc5
SG
633 return 1;
634 }
635
cb2b95e1 636 if (PageBuddy(buddy) && page_order(buddy) == order) {
d34c5fa0
MG
637 /*
638 * zone check is done late to avoid uselessly
639 * calculating zone/node ids for pages that could
640 * never merge.
641 */
642 if (page_zone_id(page) != page_zone_id(buddy))
643 return 0;
644
4c5018ce
WY
645 VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy);
646
6aa3001b 647 return 1;
676165a8 648 }
6aa3001b 649 return 0;
1da177e4
LT
650}
651
652/*
653 * Freeing function for a buddy system allocator.
654 *
655 * The concept of a buddy system is to maintain direct-mapped table
656 * (containing bit values) for memory blocks of various "orders".
657 * The bottom level table contains the map for the smallest allocatable
658 * units of memory (here, pages), and each level above it describes
659 * pairs of units from the levels below, hence, "buddies".
660 * At a high level, all that happens here is marking the table entry
661 * at the bottom level available, and propagating the changes upward
662 * as necessary, plus some accounting needed to play nicely with other
663 * parts of the VM system.
664 * At each level, we keep a list of pages, which are heads of continuous
cf6fe945
WSH
665 * free pages of length of (1 << order) and marked with _mapcount
666 * PAGE_BUDDY_MAPCOUNT_VALUE. Page's order is recorded in page_private(page)
667 * field.
1da177e4 668 * So when we are allocating or freeing one, we can derive the state of the
5f63b720
MN
669 * other. That is, if we allocate a small block, and both were
670 * free, the remainder of the region must be split into blocks.
1da177e4 671 * If a block is freed, and its buddy is also free, then this
5f63b720 672 * triggers coalescing into a block of larger size.
1da177e4 673 *
6d49e352 674 * -- nyc
1da177e4
LT
675 */
676
48db57f8 677static inline void __free_one_page(struct page *page,
dc4b0caf 678 unsigned long pfn,
ed0ae21d
MG
679 struct zone *zone, unsigned int order,
680 int migratetype)
1da177e4
LT
681{
682 unsigned long page_idx;
6dda9d55 683 unsigned long combined_idx;
43506fad 684 unsigned long uninitialized_var(buddy_idx);
6dda9d55 685 struct page *buddy;
d00181b9 686 unsigned int max_order = MAX_ORDER;
1da177e4 687
d29bb978 688 VM_BUG_ON(!zone_is_initialized(zone));
6e9f0d58 689 VM_BUG_ON_PAGE(page->flags & PAGE_FLAGS_CHECK_AT_PREP, page);
1da177e4 690
ed0ae21d 691 VM_BUG_ON(migratetype == -1);
3c605096
JK
692 if (is_migrate_isolate(migratetype)) {
693 /*
694 * We restrict max order of merging to prevent merge
695 * between freepages on isolate pageblock and normal
696 * pageblock. Without this, pageblock isolation
697 * could cause incorrect freepage accounting.
698 */
d00181b9 699 max_order = min_t(unsigned int, MAX_ORDER, pageblock_order + 1);
3c605096 700 } else {
8f82b55d 701 __mod_zone_freepage_state(zone, 1 << order, migratetype);
3c605096 702 }
ed0ae21d 703
3c605096 704 page_idx = pfn & ((1 << max_order) - 1);
1da177e4 705
309381fe
SL
706 VM_BUG_ON_PAGE(page_idx & ((1 << order) - 1), page);
707 VM_BUG_ON_PAGE(bad_range(zone, page), page);
1da177e4 708
3c605096 709 while (order < max_order - 1) {
43506fad
KC
710 buddy_idx = __find_buddy_index(page_idx, order);
711 buddy = page + (buddy_idx - page_idx);
cb2b95e1 712 if (!page_is_buddy(page, buddy, order))
3c82d0ce 713 break;
c0a32fc5
SG
714 /*
715 * Our buddy is free or it is CONFIG_DEBUG_PAGEALLOC guard page,
716 * merge with it and move up one order.
717 */
718 if (page_is_guard(buddy)) {
2847cf95 719 clear_page_guard(zone, buddy, order, migratetype);
c0a32fc5
SG
720 } else {
721 list_del(&buddy->lru);
722 zone->free_area[order].nr_free--;
723 rmv_page_order(buddy);
724 }
43506fad 725 combined_idx = buddy_idx & page_idx;
1da177e4
LT
726 page = page + (combined_idx - page_idx);
727 page_idx = combined_idx;
728 order++;
729 }
730 set_page_order(page, order);
6dda9d55
CZ
731
732 /*
733 * If this is not the largest possible page, check if the buddy
734 * of the next-highest order is free. If it is, it's possible
735 * that pages are being freed that will coalesce soon. In case,
736 * that is happening, add the free page to the tail of the list
737 * so it's less likely to be used soon and more likely to be merged
738 * as a higher order page
739 */
b7f50cfa 740 if ((order < MAX_ORDER-2) && pfn_valid_within(page_to_pfn(buddy))) {
6dda9d55 741 struct page *higher_page, *higher_buddy;
43506fad
KC
742 combined_idx = buddy_idx & page_idx;
743 higher_page = page + (combined_idx - page_idx);
744 buddy_idx = __find_buddy_index(combined_idx, order + 1);
0ba8f2d5 745 higher_buddy = higher_page + (buddy_idx - combined_idx);
6dda9d55
CZ
746 if (page_is_buddy(higher_page, higher_buddy, order + 1)) {
747 list_add_tail(&page->lru,
748 &zone->free_area[order].free_list[migratetype]);
749 goto out;
750 }
751 }
752
753 list_add(&page->lru, &zone->free_area[order].free_list[migratetype]);
754out:
1da177e4
LT
755 zone->free_area[order].nr_free++;
756}
757
224abf92 758static inline int free_pages_check(struct page *page)
1da177e4 759{
d230dec1 760 const char *bad_reason = NULL;
f0b791a3
DH
761 unsigned long bad_flags = 0;
762
53f9263b 763 if (unlikely(atomic_read(&page->_mapcount) != -1))
f0b791a3
DH
764 bad_reason = "nonzero mapcount";
765 if (unlikely(page->mapping != NULL))
766 bad_reason = "non-NULL mapping";
767 if (unlikely(atomic_read(&page->_count) != 0))
768 bad_reason = "nonzero _count";
769 if (unlikely(page->flags & PAGE_FLAGS_CHECK_AT_FREE)) {
770 bad_reason = "PAGE_FLAGS_CHECK_AT_FREE flag(s) set";
771 bad_flags = PAGE_FLAGS_CHECK_AT_FREE;
772 }
9edad6ea
JW
773#ifdef CONFIG_MEMCG
774 if (unlikely(page->mem_cgroup))
775 bad_reason = "page still charged to cgroup";
776#endif
f0b791a3
DH
777 if (unlikely(bad_reason)) {
778 bad_page(page, bad_reason, bad_flags);
79f4b7bf 779 return 1;
8cc3b392 780 }
90572890 781 page_cpupid_reset_last(page);
79f4b7bf
HD
782 if (page->flags & PAGE_FLAGS_CHECK_AT_PREP)
783 page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
784 return 0;
1da177e4
LT
785}
786
787/*
5f8dcc21 788 * Frees a number of pages from the PCP lists
1da177e4 789 * Assumes all pages on list are in same zone, and of same order.
207f36ee 790 * count is the number of pages to free.
1da177e4
LT
791 *
792 * If the zone was previously in an "all pages pinned" state then look to
793 * see if this freeing clears that state.
794 *
795 * And clear the zone's pages_scanned counter, to hold off the "all pages are
796 * pinned" detection logic.
797 */
5f8dcc21
MG
798static void free_pcppages_bulk(struct zone *zone, int count,
799 struct per_cpu_pages *pcp)
1da177e4 800{
5f8dcc21 801 int migratetype = 0;
a6f9edd6 802 int batch_free = 0;
72853e29 803 int to_free = count;
0d5d823a 804 unsigned long nr_scanned;
5f8dcc21 805
c54ad30c 806 spin_lock(&zone->lock);
0d5d823a
MG
807 nr_scanned = zone_page_state(zone, NR_PAGES_SCANNED);
808 if (nr_scanned)
809 __mod_zone_page_state(zone, NR_PAGES_SCANNED, -nr_scanned);
f2260e6b 810
72853e29 811 while (to_free) {
48db57f8 812 struct page *page;
5f8dcc21
MG
813 struct list_head *list;
814
815 /*
a6f9edd6
MG
816 * Remove pages from lists in a round-robin fashion. A
817 * batch_free count is maintained that is incremented when an
818 * empty list is encountered. This is so more pages are freed
819 * off fuller lists instead of spinning excessively around empty
820 * lists
5f8dcc21
MG
821 */
822 do {
a6f9edd6 823 batch_free++;
5f8dcc21
MG
824 if (++migratetype == MIGRATE_PCPTYPES)
825 migratetype = 0;
826 list = &pcp->lists[migratetype];
827 } while (list_empty(list));
48db57f8 828
1d16871d
NK
829 /* This is the only non-empty list. Free them all. */
830 if (batch_free == MIGRATE_PCPTYPES)
831 batch_free = to_free;
832
a6f9edd6 833 do {
770c8aaa
BZ
834 int mt; /* migratetype of the to-be-freed page */
835
a16601c5 836 page = list_last_entry(list, struct page, lru);
a6f9edd6
MG
837 /* must delete as __free_one_page list manipulates */
838 list_del(&page->lru);
aa016d14 839
bb14c2c7 840 mt = get_pcppage_migratetype(page);
aa016d14
VB
841 /* MIGRATE_ISOLATE page should not go to pcplists */
842 VM_BUG_ON_PAGE(is_migrate_isolate(mt), page);
843 /* Pageblock could have been isolated meanwhile */
8f82b55d 844 if (unlikely(has_isolate_pageblock(zone)))
51bb1a40 845 mt = get_pageblock_migratetype(page);
51bb1a40 846
dc4b0caf 847 __free_one_page(page, page_to_pfn(page), zone, 0, mt);
770c8aaa 848 trace_mm_page_pcpu_drain(page, 0, mt);
72853e29 849 } while (--to_free && --batch_free && !list_empty(list));
1da177e4 850 }
c54ad30c 851 spin_unlock(&zone->lock);
1da177e4
LT
852}
853
dc4b0caf
MG
854static void free_one_page(struct zone *zone,
855 struct page *page, unsigned long pfn,
7aeb09f9 856 unsigned int order,
ed0ae21d 857 int migratetype)
1da177e4 858{
0d5d823a 859 unsigned long nr_scanned;
006d22d9 860 spin_lock(&zone->lock);
0d5d823a
MG
861 nr_scanned = zone_page_state(zone, NR_PAGES_SCANNED);
862 if (nr_scanned)
863 __mod_zone_page_state(zone, NR_PAGES_SCANNED, -nr_scanned);
f2260e6b 864
ad53f92e
JK
865 if (unlikely(has_isolate_pageblock(zone) ||
866 is_migrate_isolate(migratetype))) {
867 migratetype = get_pfnblock_migratetype(page, pfn);
ad53f92e 868 }
dc4b0caf 869 __free_one_page(page, pfn, zone, order, migratetype);
006d22d9 870 spin_unlock(&zone->lock);
48db57f8
NP
871}
872
81422f29
KS
873static int free_tail_pages_check(struct page *head_page, struct page *page)
874{
1d798ca3
KS
875 int ret = 1;
876
877 /*
878 * We rely page->lru.next never has bit 0 set, unless the page
879 * is PageTail(). Let's make sure that's true even for poisoned ->lru.
880 */
881 BUILD_BUG_ON((unsigned long)LIST_POISON1 & 1);
882
883 if (!IS_ENABLED(CONFIG_DEBUG_VM)) {
884 ret = 0;
885 goto out;
886 }
9a982250
KS
887 switch (page - head_page) {
888 case 1:
889 /* the first tail page: ->mapping is compound_mapcount() */
53f9263b
KS
890 if (unlikely(compound_mapcount(page))) {
891 bad_page(page, "nonzero compound_mapcount", 0);
892 goto out;
893 }
9a982250
KS
894 break;
895 case 2:
896 /*
897 * the second tail page: ->mapping is
898 * page_deferred_list().next -- ignore value.
899 */
900 break;
901 default:
902 if (page->mapping != TAIL_MAPPING) {
903 bad_page(page, "corrupted mapping in tail page", 0);
904 goto out;
905 }
906 break;
1c290f64 907 }
81422f29
KS
908 if (unlikely(!PageTail(page))) {
909 bad_page(page, "PageTail not set", 0);
1d798ca3 910 goto out;
81422f29 911 }
1d798ca3
KS
912 if (unlikely(compound_head(page) != head_page)) {
913 bad_page(page, "compound_head not consistent", 0);
914 goto out;
81422f29 915 }
1d798ca3
KS
916 ret = 0;
917out:
1c290f64 918 page->mapping = NULL;
1d798ca3
KS
919 clear_compound_head(page);
920 return ret;
81422f29
KS
921}
922
1e8ce83c
RH
923static void __meminit __init_single_page(struct page *page, unsigned long pfn,
924 unsigned long zone, int nid)
925{
1e8ce83c 926 set_page_links(page, zone, nid, pfn);
1e8ce83c
RH
927 init_page_count(page);
928 page_mapcount_reset(page);
929 page_cpupid_reset_last(page);
1e8ce83c 930
1e8ce83c
RH
931 INIT_LIST_HEAD(&page->lru);
932#ifdef WANT_PAGE_VIRTUAL
933 /* The shift won't overflow because ZONE_NORMAL is below 4G. */
934 if (!is_highmem_idx(zone))
935 set_page_address(page, __va(pfn << PAGE_SHIFT));
936#endif
937}
938
939static void __meminit __init_single_pfn(unsigned long pfn, unsigned long zone,
940 int nid)
941{
942 return __init_single_page(pfn_to_page(pfn), pfn, zone, nid);
943}
944
7e18adb4
MG
945#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
946static void init_reserved_page(unsigned long pfn)
947{
948 pg_data_t *pgdat;
949 int nid, zid;
950
951 if (!early_page_uninitialised(pfn))
952 return;
953
954 nid = early_pfn_to_nid(pfn);
955 pgdat = NODE_DATA(nid);
956
957 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
958 struct zone *zone = &pgdat->node_zones[zid];
959
960 if (pfn >= zone->zone_start_pfn && pfn < zone_end_pfn(zone))
961 break;
962 }
963 __init_single_pfn(pfn, zid, nid);
964}
965#else
966static inline void init_reserved_page(unsigned long pfn)
967{
968}
969#endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
970
92923ca3
NZ
971/*
972 * Initialised pages do not have PageReserved set. This function is
973 * called for each range allocated by the bootmem allocator and
974 * marks the pages PageReserved. The remaining valid pages are later
975 * sent to the buddy page allocator.
976 */
7e18adb4 977void __meminit reserve_bootmem_region(unsigned long start, unsigned long end)
92923ca3
NZ
978{
979 unsigned long start_pfn = PFN_DOWN(start);
980 unsigned long end_pfn = PFN_UP(end);
981
7e18adb4
MG
982 for (; start_pfn < end_pfn; start_pfn++) {
983 if (pfn_valid(start_pfn)) {
984 struct page *page = pfn_to_page(start_pfn);
985
986 init_reserved_page(start_pfn);
1d798ca3
KS
987
988 /* Avoid false-positive PageTail() */
989 INIT_LIST_HEAD(&page->lru);
990
7e18adb4
MG
991 SetPageReserved(page);
992 }
993 }
92923ca3
NZ
994}
995
ec95f53a 996static bool free_pages_prepare(struct page *page, unsigned int order)
48db57f8 997{
81422f29
KS
998 bool compound = PageCompound(page);
999 int i, bad = 0;
1da177e4 1000
ab1f306f 1001 VM_BUG_ON_PAGE(PageTail(page), page);
81422f29 1002 VM_BUG_ON_PAGE(compound && compound_order(page) != order, page);
ab1f306f 1003
b413d48a 1004 trace_mm_page_free(page, order);
b1eeab67 1005 kmemcheck_free_shadow(page, order);
b8c73fc2 1006 kasan_free_pages(page, order);
b1eeab67 1007
8dd60a3a
AA
1008 if (PageAnon(page))
1009 page->mapping = NULL;
81422f29
KS
1010 bad += free_pages_check(page);
1011 for (i = 1; i < (1 << order); i++) {
1012 if (compound)
1013 bad += free_tail_pages_check(page, page + i);
8dd60a3a 1014 bad += free_pages_check(page + i);
81422f29 1015 }
8cc3b392 1016 if (bad)
ec95f53a 1017 return false;
689bcebf 1018
48c96a36
JK
1019 reset_page_owner(page, order);
1020
3ac7fe5a 1021 if (!PageHighMem(page)) {
b8af2941
PK
1022 debug_check_no_locks_freed(page_address(page),
1023 PAGE_SIZE << order);
3ac7fe5a
TG
1024 debug_check_no_obj_freed(page_address(page),
1025 PAGE_SIZE << order);
1026 }
dafb1367 1027 arch_free_page(page, order);
8823b1db 1028 kernel_poison_pages(page, 1 << order, 0);
48db57f8 1029 kernel_map_pages(page, 1 << order, 0);
dafb1367 1030
ec95f53a
KM
1031 return true;
1032}
1033
1034static void __free_pages_ok(struct page *page, unsigned int order)
1035{
1036 unsigned long flags;
95e34412 1037 int migratetype;
dc4b0caf 1038 unsigned long pfn = page_to_pfn(page);
ec95f53a
KM
1039
1040 if (!free_pages_prepare(page, order))
1041 return;
1042
cfc47a28 1043 migratetype = get_pfnblock_migratetype(page, pfn);
c54ad30c 1044 local_irq_save(flags);
f8891e5e 1045 __count_vm_events(PGFREE, 1 << order);
dc4b0caf 1046 free_one_page(page_zone(page), page, pfn, order, migratetype);
c54ad30c 1047 local_irq_restore(flags);
1da177e4
LT
1048}
1049
0e1cc95b 1050static void __init __free_pages_boot_core(struct page *page,
3a80a7fa 1051 unsigned long pfn, unsigned int order)
a226f6c8 1052{
c3993076 1053 unsigned int nr_pages = 1 << order;
e2d0bd2b 1054 struct page *p = page;
c3993076 1055 unsigned int loop;
a226f6c8 1056
e2d0bd2b
YL
1057 prefetchw(p);
1058 for (loop = 0; loop < (nr_pages - 1); loop++, p++) {
1059 prefetchw(p + 1);
c3993076
JW
1060 __ClearPageReserved(p);
1061 set_page_count(p, 0);
a226f6c8 1062 }
e2d0bd2b
YL
1063 __ClearPageReserved(p);
1064 set_page_count(p, 0);
c3993076 1065
e2d0bd2b 1066 page_zone(page)->managed_pages += nr_pages;
c3993076
JW
1067 set_page_refcounted(page);
1068 __free_pages(page, order);
a226f6c8
DH
1069}
1070
75a592a4
MG
1071#if defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) || \
1072 defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP)
7ace9917 1073
75a592a4
MG
1074static struct mminit_pfnnid_cache early_pfnnid_cache __meminitdata;
1075
1076int __meminit early_pfn_to_nid(unsigned long pfn)
1077{
7ace9917 1078 static DEFINE_SPINLOCK(early_pfn_lock);
75a592a4
MG
1079 int nid;
1080
7ace9917 1081 spin_lock(&early_pfn_lock);
75a592a4 1082 nid = __early_pfn_to_nid(pfn, &early_pfnnid_cache);
7ace9917
MG
1083 if (nid < 0)
1084 nid = 0;
1085 spin_unlock(&early_pfn_lock);
1086
1087 return nid;
75a592a4
MG
1088}
1089#endif
1090
1091#ifdef CONFIG_NODES_SPAN_OTHER_NODES
1092static inline bool __meminit meminit_pfn_in_nid(unsigned long pfn, int node,
1093 struct mminit_pfnnid_cache *state)
1094{
1095 int nid;
1096
1097 nid = __early_pfn_to_nid(pfn, state);
1098 if (nid >= 0 && nid != node)
1099 return false;
1100 return true;
1101}
1102
1103/* Only safe to use early in boot when initialisation is single-threaded */
1104static inline bool __meminit early_pfn_in_nid(unsigned long pfn, int node)
1105{
1106 return meminit_pfn_in_nid(pfn, node, &early_pfnnid_cache);
1107}
1108
1109#else
1110
1111static inline bool __meminit early_pfn_in_nid(unsigned long pfn, int node)
1112{
1113 return true;
1114}
1115static inline bool __meminit meminit_pfn_in_nid(unsigned long pfn, int node,
1116 struct mminit_pfnnid_cache *state)
1117{
1118 return true;
1119}
1120#endif
1121
1122
0e1cc95b 1123void __init __free_pages_bootmem(struct page *page, unsigned long pfn,
3a80a7fa
MG
1124 unsigned int order)
1125{
1126 if (early_page_uninitialised(pfn))
1127 return;
1128 return __free_pages_boot_core(page, pfn, order);
1129}
1130
7e18adb4 1131#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
0e1cc95b 1132static void __init deferred_free_range(struct page *page,
a4de83dd
MG
1133 unsigned long pfn, int nr_pages)
1134{
1135 int i;
1136
1137 if (!page)
1138 return;
1139
1140 /* Free a large naturally-aligned chunk if possible */
1141 if (nr_pages == MAX_ORDER_NR_PAGES &&
1142 (pfn & (MAX_ORDER_NR_PAGES-1)) == 0) {
ac5d2539 1143 set_pageblock_migratetype(page, MIGRATE_MOVABLE);
a4de83dd
MG
1144 __free_pages_boot_core(page, pfn, MAX_ORDER-1);
1145 return;
1146 }
1147
1148 for (i = 0; i < nr_pages; i++, page++, pfn++)
1149 __free_pages_boot_core(page, pfn, 0);
1150}
1151
d3cd131d
NS
1152/* Completion tracking for deferred_init_memmap() threads */
1153static atomic_t pgdat_init_n_undone __initdata;
1154static __initdata DECLARE_COMPLETION(pgdat_init_all_done_comp);
1155
1156static inline void __init pgdat_init_report_one_done(void)
1157{
1158 if (atomic_dec_and_test(&pgdat_init_n_undone))
1159 complete(&pgdat_init_all_done_comp);
1160}
0e1cc95b 1161
7e18adb4 1162/* Initialise remaining memory on a node */
0e1cc95b 1163static int __init deferred_init_memmap(void *data)
7e18adb4 1164{
0e1cc95b
MG
1165 pg_data_t *pgdat = data;
1166 int nid = pgdat->node_id;
7e18adb4
MG
1167 struct mminit_pfnnid_cache nid_init_state = { };
1168 unsigned long start = jiffies;
1169 unsigned long nr_pages = 0;
1170 unsigned long walk_start, walk_end;
1171 int i, zid;
1172 struct zone *zone;
7e18adb4 1173 unsigned long first_init_pfn = pgdat->first_deferred_pfn;
0e1cc95b 1174 const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id);
7e18adb4 1175
0e1cc95b 1176 if (first_init_pfn == ULONG_MAX) {
d3cd131d 1177 pgdat_init_report_one_done();
0e1cc95b
MG
1178 return 0;
1179 }
1180
1181 /* Bind memory initialisation thread to a local node if possible */
1182 if (!cpumask_empty(cpumask))
1183 set_cpus_allowed_ptr(current, cpumask);
7e18adb4
MG
1184
1185 /* Sanity check boundaries */
1186 BUG_ON(pgdat->first_deferred_pfn < pgdat->node_start_pfn);
1187 BUG_ON(pgdat->first_deferred_pfn > pgdat_end_pfn(pgdat));
1188 pgdat->first_deferred_pfn = ULONG_MAX;
1189
1190 /* Only the highest zone is deferred so find it */
1191 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
1192 zone = pgdat->node_zones + zid;
1193 if (first_init_pfn < zone_end_pfn(zone))
1194 break;
1195 }
1196
1197 for_each_mem_pfn_range(i, nid, &walk_start, &walk_end, NULL) {
1198 unsigned long pfn, end_pfn;
54608c3f 1199 struct page *page = NULL;
a4de83dd
MG
1200 struct page *free_base_page = NULL;
1201 unsigned long free_base_pfn = 0;
1202 int nr_to_free = 0;
7e18adb4
MG
1203
1204 end_pfn = min(walk_end, zone_end_pfn(zone));
1205 pfn = first_init_pfn;
1206 if (pfn < walk_start)
1207 pfn = walk_start;
1208 if (pfn < zone->zone_start_pfn)
1209 pfn = zone->zone_start_pfn;
1210
1211 for (; pfn < end_pfn; pfn++) {
54608c3f 1212 if (!pfn_valid_within(pfn))
a4de83dd 1213 goto free_range;
7e18adb4 1214
54608c3f
MG
1215 /*
1216 * Ensure pfn_valid is checked every
1217 * MAX_ORDER_NR_PAGES for memory holes
1218 */
1219 if ((pfn & (MAX_ORDER_NR_PAGES - 1)) == 0) {
1220 if (!pfn_valid(pfn)) {
1221 page = NULL;
a4de83dd 1222 goto free_range;
54608c3f
MG
1223 }
1224 }
1225
1226 if (!meminit_pfn_in_nid(pfn, nid, &nid_init_state)) {
1227 page = NULL;
a4de83dd 1228 goto free_range;
54608c3f
MG
1229 }
1230
1231 /* Minimise pfn page lookups and scheduler checks */
1232 if (page && (pfn & (MAX_ORDER_NR_PAGES - 1)) != 0) {
1233 page++;
1234 } else {
a4de83dd
MG
1235 nr_pages += nr_to_free;
1236 deferred_free_range(free_base_page,
1237 free_base_pfn, nr_to_free);
1238 free_base_page = NULL;
1239 free_base_pfn = nr_to_free = 0;
1240
54608c3f
MG
1241 page = pfn_to_page(pfn);
1242 cond_resched();
1243 }
7e18adb4
MG
1244
1245 if (page->flags) {
1246 VM_BUG_ON(page_zone(page) != zone);
a4de83dd 1247 goto free_range;
7e18adb4
MG
1248 }
1249
1250 __init_single_page(page, pfn, zid, nid);
a4de83dd
MG
1251 if (!free_base_page) {
1252 free_base_page = page;
1253 free_base_pfn = pfn;
1254 nr_to_free = 0;
1255 }
1256 nr_to_free++;
1257
1258 /* Where possible, batch up pages for a single free */
1259 continue;
1260free_range:
1261 /* Free the current block of pages to allocator */
1262 nr_pages += nr_to_free;
1263 deferred_free_range(free_base_page, free_base_pfn,
1264 nr_to_free);
1265 free_base_page = NULL;
1266 free_base_pfn = nr_to_free = 0;
7e18adb4 1267 }
a4de83dd 1268
7e18adb4
MG
1269 first_init_pfn = max(end_pfn, first_init_pfn);
1270 }
1271
1272 /* Sanity check that the next zone really is unpopulated */
1273 WARN_ON(++zid < MAX_NR_ZONES && populated_zone(++zone));
1274
0e1cc95b 1275 pr_info("node %d initialised, %lu pages in %ums\n", nid, nr_pages,
7e18adb4 1276 jiffies_to_msecs(jiffies - start));
d3cd131d
NS
1277
1278 pgdat_init_report_one_done();
0e1cc95b
MG
1279 return 0;
1280}
1281
1282void __init page_alloc_init_late(void)
1283{
1284 int nid;
1285
d3cd131d
NS
1286 /* There will be num_node_state(N_MEMORY) threads */
1287 atomic_set(&pgdat_init_n_undone, num_node_state(N_MEMORY));
0e1cc95b 1288 for_each_node_state(nid, N_MEMORY) {
0e1cc95b
MG
1289 kthread_run(deferred_init_memmap, NODE_DATA(nid), "pgdatinit%d", nid);
1290 }
1291
1292 /* Block until all are initialised */
d3cd131d 1293 wait_for_completion(&pgdat_init_all_done_comp);
4248b0da
MG
1294
1295 /* Reinit limits that are based on free pages after the kernel is up */
1296 files_maxfiles_init();
7e18adb4
MG
1297}
1298#endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
1299
47118af0 1300#ifdef CONFIG_CMA
9cf510a5 1301/* Free whole pageblock and set its migration type to MIGRATE_CMA. */
47118af0
MN
1302void __init init_cma_reserved_pageblock(struct page *page)
1303{
1304 unsigned i = pageblock_nr_pages;
1305 struct page *p = page;
1306
1307 do {
1308 __ClearPageReserved(p);
1309 set_page_count(p, 0);
1310 } while (++p, --i);
1311
47118af0 1312 set_pageblock_migratetype(page, MIGRATE_CMA);
dc78327c
MN
1313
1314 if (pageblock_order >= MAX_ORDER) {
1315 i = pageblock_nr_pages;
1316 p = page;
1317 do {
1318 set_page_refcounted(p);
1319 __free_pages(p, MAX_ORDER - 1);
1320 p += MAX_ORDER_NR_PAGES;
1321 } while (i -= MAX_ORDER_NR_PAGES);
1322 } else {
1323 set_page_refcounted(page);
1324 __free_pages(page, pageblock_order);
1325 }
1326
3dcc0571 1327 adjust_managed_page_count(page, pageblock_nr_pages);
47118af0
MN
1328}
1329#endif
1da177e4
LT
1330
1331/*
1332 * The order of subdivision here is critical for the IO subsystem.
1333 * Please do not alter this order without good reasons and regression
1334 * testing. Specifically, as large blocks of memory are subdivided,
1335 * the order in which smaller blocks are delivered depends on the order
1336 * they're subdivided in this function. This is the primary factor
1337 * influencing the order in which pages are delivered to the IO
1338 * subsystem according to empirical testing, and this is also justified
1339 * by considering the behavior of a buddy system containing a single
1340 * large block of memory acted on by a series of small allocations.
1341 * This behavior is a critical factor in sglist merging's success.
1342 *
6d49e352 1343 * -- nyc
1da177e4 1344 */
085cc7d5 1345static inline void expand(struct zone *zone, struct page *page,
b2a0ac88
MG
1346 int low, int high, struct free_area *area,
1347 int migratetype)
1da177e4
LT
1348{
1349 unsigned long size = 1 << high;
1350
1351 while (high > low) {
1352 area--;
1353 high--;
1354 size >>= 1;
309381fe 1355 VM_BUG_ON_PAGE(bad_range(zone, &page[size]), &page[size]);
c0a32fc5 1356
2847cf95 1357 if (IS_ENABLED(CONFIG_DEBUG_PAGEALLOC) &&
e30825f1 1358 debug_guardpage_enabled() &&
2847cf95 1359 high < debug_guardpage_minorder()) {
c0a32fc5
SG
1360 /*
1361 * Mark as guard pages (or page), that will allow to
1362 * merge back to allocator when buddy will be freed.
1363 * Corresponding page table entries will not be touched,
1364 * pages will stay not present in virtual address space
1365 */
2847cf95 1366 set_page_guard(zone, &page[size], high, migratetype);
c0a32fc5
SG
1367 continue;
1368 }
b2a0ac88 1369 list_add(&page[size].lru, &area->free_list[migratetype]);
1da177e4
LT
1370 area->nr_free++;
1371 set_page_order(&page[size], high);
1372 }
1da177e4
LT
1373}
1374
1da177e4
LT
1375/*
1376 * This page is about to be returned from the page allocator
1377 */
2a7684a2 1378static inline int check_new_page(struct page *page)
1da177e4 1379{
d230dec1 1380 const char *bad_reason = NULL;
f0b791a3
DH
1381 unsigned long bad_flags = 0;
1382
53f9263b 1383 if (unlikely(atomic_read(&page->_mapcount) != -1))
f0b791a3
DH
1384 bad_reason = "nonzero mapcount";
1385 if (unlikely(page->mapping != NULL))
1386 bad_reason = "non-NULL mapping";
1387 if (unlikely(atomic_read(&page->_count) != 0))
1388 bad_reason = "nonzero _count";
f4c18e6f
NH
1389 if (unlikely(page->flags & __PG_HWPOISON)) {
1390 bad_reason = "HWPoisoned (hardware-corrupted)";
1391 bad_flags = __PG_HWPOISON;
1392 }
f0b791a3
DH
1393 if (unlikely(page->flags & PAGE_FLAGS_CHECK_AT_PREP)) {
1394 bad_reason = "PAGE_FLAGS_CHECK_AT_PREP flag set";
1395 bad_flags = PAGE_FLAGS_CHECK_AT_PREP;
1396 }
9edad6ea
JW
1397#ifdef CONFIG_MEMCG
1398 if (unlikely(page->mem_cgroup))
1399 bad_reason = "page still charged to cgroup";
1400#endif
f0b791a3
DH
1401 if (unlikely(bad_reason)) {
1402 bad_page(page, bad_reason, bad_flags);
689bcebf 1403 return 1;
8cc3b392 1404 }
2a7684a2
WF
1405 return 0;
1406}
1407
1414c7f4
LA
1408static inline bool free_pages_prezeroed(bool poisoned)
1409{
1410 return IS_ENABLED(CONFIG_PAGE_POISONING_ZERO) &&
1411 page_poisoning_enabled() && poisoned;
1412}
1413
75379191
VB
1414static int prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags,
1415 int alloc_flags)
2a7684a2
WF
1416{
1417 int i;
1414c7f4 1418 bool poisoned = true;
2a7684a2
WF
1419
1420 for (i = 0; i < (1 << order); i++) {
1421 struct page *p = page + i;
1422 if (unlikely(check_new_page(p)))
1423 return 1;
1414c7f4
LA
1424 if (poisoned)
1425 poisoned &= page_is_poisoned(p);
2a7684a2 1426 }
689bcebf 1427
4c21e2f2 1428 set_page_private(page, 0);
7835e98b 1429 set_page_refcounted(page);
cc102509
NP
1430
1431 arch_alloc_page(page, order);
1da177e4 1432 kernel_map_pages(page, 1 << order, 1);
8823b1db 1433 kernel_poison_pages(page, 1 << order, 1);
b8c73fc2 1434 kasan_alloc_pages(page, order);
17cf4406 1435
1414c7f4 1436 if (!free_pages_prezeroed(poisoned) && (gfp_flags & __GFP_ZERO))
f4d2897b
AA
1437 for (i = 0; i < (1 << order); i++)
1438 clear_highpage(page + i);
17cf4406
NP
1439
1440 if (order && (gfp_flags & __GFP_COMP))
1441 prep_compound_page(page, order);
1442
48c96a36
JK
1443 set_page_owner(page, order, gfp_flags);
1444
75379191 1445 /*
2f064f34 1446 * page is set pfmemalloc when ALLOC_NO_WATERMARKS was necessary to
75379191
VB
1447 * allocate the page. The expectation is that the caller is taking
1448 * steps that will free more memory. The caller should avoid the page
1449 * being used for !PFMEMALLOC purposes.
1450 */
2f064f34
MH
1451 if (alloc_flags & ALLOC_NO_WATERMARKS)
1452 set_page_pfmemalloc(page);
1453 else
1454 clear_page_pfmemalloc(page);
75379191 1455
689bcebf 1456 return 0;
1da177e4
LT
1457}
1458
56fd56b8
MG
1459/*
1460 * Go through the free lists for the given migratetype and remove
1461 * the smallest available page from the freelists
1462 */
728ec980
MG
1463static inline
1464struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
56fd56b8
MG
1465 int migratetype)
1466{
1467 unsigned int current_order;
b8af2941 1468 struct free_area *area;
56fd56b8
MG
1469 struct page *page;
1470
1471 /* Find a page of the appropriate size in the preferred list */
1472 for (current_order = order; current_order < MAX_ORDER; ++current_order) {
1473 area = &(zone->free_area[current_order]);
a16601c5 1474 page = list_first_entry_or_null(&area->free_list[migratetype],
56fd56b8 1475 struct page, lru);
a16601c5
GT
1476 if (!page)
1477 continue;
56fd56b8
MG
1478 list_del(&page->lru);
1479 rmv_page_order(page);
1480 area->nr_free--;
56fd56b8 1481 expand(zone, page, order, current_order, area, migratetype);
bb14c2c7 1482 set_pcppage_migratetype(page, migratetype);
56fd56b8
MG
1483 return page;
1484 }
1485
1486 return NULL;
1487}
1488
1489
b2a0ac88
MG
1490/*
1491 * This array describes the order lists are fallen back to when
1492 * the free lists for the desirable migrate type are depleted
1493 */
47118af0 1494static int fallbacks[MIGRATE_TYPES][4] = {
974a786e
MG
1495 [MIGRATE_UNMOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE, MIGRATE_TYPES },
1496 [MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE, MIGRATE_MOVABLE, MIGRATE_TYPES },
1497 [MIGRATE_MOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_TYPES },
47118af0 1498#ifdef CONFIG_CMA
974a786e 1499 [MIGRATE_CMA] = { MIGRATE_TYPES }, /* Never used */
47118af0 1500#endif
194159fb 1501#ifdef CONFIG_MEMORY_ISOLATION
974a786e 1502 [MIGRATE_ISOLATE] = { MIGRATE_TYPES }, /* Never used */
194159fb 1503#endif
b2a0ac88
MG
1504};
1505
dc67647b
JK
1506#ifdef CONFIG_CMA
1507static struct page *__rmqueue_cma_fallback(struct zone *zone,
1508 unsigned int order)
1509{
1510 return __rmqueue_smallest(zone, order, MIGRATE_CMA);
1511}
1512#else
1513static inline struct page *__rmqueue_cma_fallback(struct zone *zone,
1514 unsigned int order) { return NULL; }
1515#endif
1516
c361be55
MG
1517/*
1518 * Move the free pages in a range to the free lists of the requested type.
d9c23400 1519 * Note that start_page and end_pages are not aligned on a pageblock
c361be55
MG
1520 * boundary. If alignment is required, use move_freepages_block()
1521 */
435b405c 1522int move_freepages(struct zone *zone,
b69a7288
AB
1523 struct page *start_page, struct page *end_page,
1524 int migratetype)
c361be55
MG
1525{
1526 struct page *page;
d00181b9 1527 unsigned int order;
d100313f 1528 int pages_moved = 0;
c361be55
MG
1529
1530#ifndef CONFIG_HOLES_IN_ZONE
1531 /*
1532 * page_zone is not safe to call in this context when
1533 * CONFIG_HOLES_IN_ZONE is set. This bug check is probably redundant
1534 * anyway as we check zone boundaries in move_freepages_block().
1535 * Remove at a later date when no bug reports exist related to
ac0e5b7a 1536 * grouping pages by mobility
c361be55 1537 */
97ee4ba7 1538 VM_BUG_ON(page_zone(start_page) != page_zone(end_page));
c361be55
MG
1539#endif
1540
1541 for (page = start_page; page <= end_page;) {
344c790e 1542 /* Make sure we are not inadvertently changing nodes */
309381fe 1543 VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page);
344c790e 1544
c361be55
MG
1545 if (!pfn_valid_within(page_to_pfn(page))) {
1546 page++;
1547 continue;
1548 }
1549
1550 if (!PageBuddy(page)) {
1551 page++;
1552 continue;
1553 }
1554
1555 order = page_order(page);
84be48d8
KS
1556 list_move(&page->lru,
1557 &zone->free_area[order].free_list[migratetype]);
c361be55 1558 page += 1 << order;
d100313f 1559 pages_moved += 1 << order;
c361be55
MG
1560 }
1561
d100313f 1562 return pages_moved;
c361be55
MG
1563}
1564
ee6f509c 1565int move_freepages_block(struct zone *zone, struct page *page,
68e3e926 1566 int migratetype)
c361be55
MG
1567{
1568 unsigned long start_pfn, end_pfn;
1569 struct page *start_page, *end_page;
1570
1571 start_pfn = page_to_pfn(page);
d9c23400 1572 start_pfn = start_pfn & ~(pageblock_nr_pages-1);
c361be55 1573 start_page = pfn_to_page(start_pfn);
d9c23400
MG
1574 end_page = start_page + pageblock_nr_pages - 1;
1575 end_pfn = start_pfn + pageblock_nr_pages - 1;
c361be55
MG
1576
1577 /* Do not cross zone boundaries */
108bcc96 1578 if (!zone_spans_pfn(zone, start_pfn))
c361be55 1579 start_page = page;
108bcc96 1580 if (!zone_spans_pfn(zone, end_pfn))
c361be55
MG
1581 return 0;
1582
1583 return move_freepages(zone, start_page, end_page, migratetype);
1584}
1585
2f66a68f
MG
1586static void change_pageblock_range(struct page *pageblock_page,
1587 int start_order, int migratetype)
1588{
1589 int nr_pageblocks = 1 << (start_order - pageblock_order);
1590
1591 while (nr_pageblocks--) {
1592 set_pageblock_migratetype(pageblock_page, migratetype);
1593 pageblock_page += pageblock_nr_pages;
1594 }
1595}
1596
fef903ef 1597/*
9c0415eb
VB
1598 * When we are falling back to another migratetype during allocation, try to
1599 * steal extra free pages from the same pageblocks to satisfy further
1600 * allocations, instead of polluting multiple pageblocks.
1601 *
1602 * If we are stealing a relatively large buddy page, it is likely there will
1603 * be more free pages in the pageblock, so try to steal them all. For
1604 * reclaimable and unmovable allocations, we steal regardless of page size,
1605 * as fragmentation caused by those allocations polluting movable pageblocks
1606 * is worse than movable allocations stealing from unmovable and reclaimable
1607 * pageblocks.
fef903ef 1608 */
4eb7dce6
JK
1609static bool can_steal_fallback(unsigned int order, int start_mt)
1610{
1611 /*
1612 * Leaving this order check is intended, although there is
1613 * relaxed order check in next check. The reason is that
1614 * we can actually steal whole pageblock if this condition met,
1615 * but, below check doesn't guarantee it and that is just heuristic
1616 * so could be changed anytime.
1617 */
1618 if (order >= pageblock_order)
1619 return true;
1620
1621 if (order >= pageblock_order / 2 ||
1622 start_mt == MIGRATE_RECLAIMABLE ||
1623 start_mt == MIGRATE_UNMOVABLE ||
1624 page_group_by_mobility_disabled)
1625 return true;
1626
1627 return false;
1628}
1629
1630/*
1631 * This function implements actual steal behaviour. If order is large enough,
1632 * we can steal whole pageblock. If not, we first move freepages in this
1633 * pageblock and check whether half of pages are moved or not. If half of
1634 * pages are moved, we can change migratetype of pageblock and permanently
1635 * use it's pages as requested migratetype in the future.
1636 */
1637static void steal_suitable_fallback(struct zone *zone, struct page *page,
1638 int start_type)
fef903ef 1639{
d00181b9 1640 unsigned int current_order = page_order(page);
4eb7dce6 1641 int pages;
fef903ef 1642
fef903ef
SB
1643 /* Take ownership for orders >= pageblock_order */
1644 if (current_order >= pageblock_order) {
1645 change_pageblock_range(page, current_order, start_type);
3a1086fb 1646 return;
fef903ef
SB
1647 }
1648
4eb7dce6 1649 pages = move_freepages_block(zone, page, start_type);
fef903ef 1650
4eb7dce6
JK
1651 /* Claim the whole block if over half of it is free */
1652 if (pages >= (1 << (pageblock_order-1)) ||
1653 page_group_by_mobility_disabled)
1654 set_pageblock_migratetype(page, start_type);
1655}
1656
2149cdae
JK
1657/*
1658 * Check whether there is a suitable fallback freepage with requested order.
1659 * If only_stealable is true, this function returns fallback_mt only if
1660 * we can steal other freepages all together. This would help to reduce
1661 * fragmentation due to mixed migratetype pages in one pageblock.
1662 */
1663int find_suitable_fallback(struct free_area *area, unsigned int order,
1664 int migratetype, bool only_stealable, bool *can_steal)
4eb7dce6
JK
1665{
1666 int i;
1667 int fallback_mt;
1668
1669 if (area->nr_free == 0)
1670 return -1;
1671
1672 *can_steal = false;
1673 for (i = 0;; i++) {
1674 fallback_mt = fallbacks[migratetype][i];
974a786e 1675 if (fallback_mt == MIGRATE_TYPES)
4eb7dce6
JK
1676 break;
1677
1678 if (list_empty(&area->free_list[fallback_mt]))
1679 continue;
fef903ef 1680
4eb7dce6
JK
1681 if (can_steal_fallback(order, migratetype))
1682 *can_steal = true;
1683
2149cdae
JK
1684 if (!only_stealable)
1685 return fallback_mt;
1686
1687 if (*can_steal)
1688 return fallback_mt;
fef903ef 1689 }
4eb7dce6
JK
1690
1691 return -1;
fef903ef
SB
1692}
1693
0aaa29a5
MG
1694/*
1695 * Reserve a pageblock for exclusive use of high-order atomic allocations if
1696 * there are no empty page blocks that contain a page with a suitable order
1697 */
1698static void reserve_highatomic_pageblock(struct page *page, struct zone *zone,
1699 unsigned int alloc_order)
1700{
1701 int mt;
1702 unsigned long max_managed, flags;
1703
1704 /*
1705 * Limit the number reserved to 1 pageblock or roughly 1% of a zone.
1706 * Check is race-prone but harmless.
1707 */
1708 max_managed = (zone->managed_pages / 100) + pageblock_nr_pages;
1709 if (zone->nr_reserved_highatomic >= max_managed)
1710 return;
1711
1712 spin_lock_irqsave(&zone->lock, flags);
1713
1714 /* Recheck the nr_reserved_highatomic limit under the lock */
1715 if (zone->nr_reserved_highatomic >= max_managed)
1716 goto out_unlock;
1717
1718 /* Yoink! */
1719 mt = get_pageblock_migratetype(page);
1720 if (mt != MIGRATE_HIGHATOMIC &&
1721 !is_migrate_isolate(mt) && !is_migrate_cma(mt)) {
1722 zone->nr_reserved_highatomic += pageblock_nr_pages;
1723 set_pageblock_migratetype(page, MIGRATE_HIGHATOMIC);
1724 move_freepages_block(zone, page, MIGRATE_HIGHATOMIC);
1725 }
1726
1727out_unlock:
1728 spin_unlock_irqrestore(&zone->lock, flags);
1729}
1730
1731/*
1732 * Used when an allocation is about to fail under memory pressure. This
1733 * potentially hurts the reliability of high-order allocations when under
1734 * intense memory pressure but failed atomic allocations should be easier
1735 * to recover from than an OOM.
1736 */
1737static void unreserve_highatomic_pageblock(const struct alloc_context *ac)
1738{
1739 struct zonelist *zonelist = ac->zonelist;
1740 unsigned long flags;
1741 struct zoneref *z;
1742 struct zone *zone;
1743 struct page *page;
1744 int order;
1745
1746 for_each_zone_zonelist_nodemask(zone, z, zonelist, ac->high_zoneidx,
1747 ac->nodemask) {
1748 /* Preserve at least one pageblock */
1749 if (zone->nr_reserved_highatomic <= pageblock_nr_pages)
1750 continue;
1751
1752 spin_lock_irqsave(&zone->lock, flags);
1753 for (order = 0; order < MAX_ORDER; order++) {
1754 struct free_area *area = &(zone->free_area[order]);
1755
a16601c5
GT
1756 page = list_first_entry_or_null(
1757 &area->free_list[MIGRATE_HIGHATOMIC],
1758 struct page, lru);
1759 if (!page)
0aaa29a5
MG
1760 continue;
1761
0aaa29a5
MG
1762 /*
1763 * It should never happen but changes to locking could
1764 * inadvertently allow a per-cpu drain to add pages
1765 * to MIGRATE_HIGHATOMIC while unreserving so be safe
1766 * and watch for underflows.
1767 */
1768 zone->nr_reserved_highatomic -= min(pageblock_nr_pages,
1769 zone->nr_reserved_highatomic);
1770
1771 /*
1772 * Convert to ac->migratetype and avoid the normal
1773 * pageblock stealing heuristics. Minimally, the caller
1774 * is doing the work and needs the pages. More
1775 * importantly, if the block was always converted to
1776 * MIGRATE_UNMOVABLE or another type then the number
1777 * of pageblocks that cannot be completely freed
1778 * may increase.
1779 */
1780 set_pageblock_migratetype(page, ac->migratetype);
1781 move_freepages_block(zone, page, ac->migratetype);
1782 spin_unlock_irqrestore(&zone->lock, flags);
1783 return;
1784 }
1785 spin_unlock_irqrestore(&zone->lock, flags);
1786 }
1787}
1788
b2a0ac88 1789/* Remove an element from the buddy allocator from the fallback list */
0ac3a409 1790static inline struct page *
7aeb09f9 1791__rmqueue_fallback(struct zone *zone, unsigned int order, int start_migratetype)
b2a0ac88 1792{
b8af2941 1793 struct free_area *area;
7aeb09f9 1794 unsigned int current_order;
b2a0ac88 1795 struct page *page;
4eb7dce6
JK
1796 int fallback_mt;
1797 bool can_steal;
b2a0ac88
MG
1798
1799 /* Find the largest possible block of pages in the other list */
7aeb09f9
MG
1800 for (current_order = MAX_ORDER-1;
1801 current_order >= order && current_order <= MAX_ORDER-1;
1802 --current_order) {
4eb7dce6
JK
1803 area = &(zone->free_area[current_order]);
1804 fallback_mt = find_suitable_fallback(area, current_order,
2149cdae 1805 start_migratetype, false, &can_steal);
4eb7dce6
JK
1806 if (fallback_mt == -1)
1807 continue;
b2a0ac88 1808
a16601c5 1809 page = list_first_entry(&area->free_list[fallback_mt],
4eb7dce6
JK
1810 struct page, lru);
1811 if (can_steal)
1812 steal_suitable_fallback(zone, page, start_migratetype);
b2a0ac88 1813
4eb7dce6
JK
1814 /* Remove the page from the freelists */
1815 area->nr_free--;
1816 list_del(&page->lru);
1817 rmv_page_order(page);
3a1086fb 1818
4eb7dce6
JK
1819 expand(zone, page, order, current_order, area,
1820 start_migratetype);
1821 /*
bb14c2c7 1822 * The pcppage_migratetype may differ from pageblock's
4eb7dce6 1823 * migratetype depending on the decisions in
bb14c2c7
VB
1824 * find_suitable_fallback(). This is OK as long as it does not
1825 * differ for MIGRATE_CMA pageblocks. Those can be used as
1826 * fallback only via special __rmqueue_cma_fallback() function
4eb7dce6 1827 */
bb14c2c7 1828 set_pcppage_migratetype(page, start_migratetype);
e0fff1bd 1829
4eb7dce6
JK
1830 trace_mm_page_alloc_extfrag(page, order, current_order,
1831 start_migratetype, fallback_mt);
e0fff1bd 1832
4eb7dce6 1833 return page;
b2a0ac88
MG
1834 }
1835
728ec980 1836 return NULL;
b2a0ac88
MG
1837}
1838
56fd56b8 1839/*
1da177e4
LT
1840 * Do the hard work of removing an element from the buddy allocator.
1841 * Call me with the zone->lock already held.
1842 */
b2a0ac88 1843static struct page *__rmqueue(struct zone *zone, unsigned int order,
6ac0206b 1844 int migratetype)
1da177e4 1845{
1da177e4
LT
1846 struct page *page;
1847
56fd56b8 1848 page = __rmqueue_smallest(zone, order, migratetype);
974a786e 1849 if (unlikely(!page)) {
dc67647b
JK
1850 if (migratetype == MIGRATE_MOVABLE)
1851 page = __rmqueue_cma_fallback(zone, order);
1852
1853 if (!page)
1854 page = __rmqueue_fallback(zone, order, migratetype);
728ec980
MG
1855 }
1856
0d3d062a 1857 trace_mm_page_alloc_zone_locked(page, order, migratetype);
b2a0ac88 1858 return page;
1da177e4
LT
1859}
1860
5f63b720 1861/*
1da177e4
LT
1862 * Obtain a specified number of elements from the buddy allocator, all under
1863 * a single hold of the lock, for efficiency. Add them to the supplied list.
1864 * Returns the number of new pages which were placed at *list.
1865 */
5f63b720 1866static int rmqueue_bulk(struct zone *zone, unsigned int order,
b2a0ac88 1867 unsigned long count, struct list_head *list,
b745bc85 1868 int migratetype, bool cold)
1da177e4 1869{
5bcc9f86 1870 int i;
5f63b720 1871
c54ad30c 1872 spin_lock(&zone->lock);
1da177e4 1873 for (i = 0; i < count; ++i) {
6ac0206b 1874 struct page *page = __rmqueue(zone, order, migratetype);
085cc7d5 1875 if (unlikely(page == NULL))
1da177e4 1876 break;
81eabcbe
MG
1877
1878 /*
1879 * Split buddy pages returned by expand() are received here
1880 * in physical page order. The page is added to the callers and
1881 * list and the list head then moves forward. From the callers
1882 * perspective, the linked list is ordered by page number in
1883 * some conditions. This is useful for IO devices that can
1884 * merge IO requests if the physical pages are ordered
1885 * properly.
1886 */
b745bc85 1887 if (likely(!cold))
e084b2d9
MG
1888 list_add(&page->lru, list);
1889 else
1890 list_add_tail(&page->lru, list);
81eabcbe 1891 list = &page->lru;
bb14c2c7 1892 if (is_migrate_cma(get_pcppage_migratetype(page)))
d1ce749a
BZ
1893 __mod_zone_page_state(zone, NR_FREE_CMA_PAGES,
1894 -(1 << order));
1da177e4 1895 }
f2260e6b 1896 __mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order));
c54ad30c 1897 spin_unlock(&zone->lock);
085cc7d5 1898 return i;
1da177e4
LT
1899}
1900
4ae7c039 1901#ifdef CONFIG_NUMA
8fce4d8e 1902/*
4037d452
CL
1903 * Called from the vmstat counter updater to drain pagesets of this
1904 * currently executing processor on remote nodes after they have
1905 * expired.
1906 *
879336c3
CL
1907 * Note that this function must be called with the thread pinned to
1908 * a single processor.
8fce4d8e 1909 */
4037d452 1910void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
4ae7c039 1911{
4ae7c039 1912 unsigned long flags;
7be12fc9 1913 int to_drain, batch;
4ae7c039 1914
4037d452 1915 local_irq_save(flags);
4db0c3c2 1916 batch = READ_ONCE(pcp->batch);
7be12fc9 1917 to_drain = min(pcp->count, batch);
2a13515c
KM
1918 if (to_drain > 0) {
1919 free_pcppages_bulk(zone, to_drain, pcp);
1920 pcp->count -= to_drain;
1921 }
4037d452 1922 local_irq_restore(flags);
4ae7c039
CL
1923}
1924#endif
1925
9f8f2172 1926/*
93481ff0 1927 * Drain pcplists of the indicated processor and zone.
9f8f2172
CL
1928 *
1929 * The processor must either be the current processor and the
1930 * thread pinned to the current processor or a processor that
1931 * is not online.
1932 */
93481ff0 1933static void drain_pages_zone(unsigned int cpu, struct zone *zone)
1da177e4 1934{
c54ad30c 1935 unsigned long flags;
93481ff0
VB
1936 struct per_cpu_pageset *pset;
1937 struct per_cpu_pages *pcp;
1da177e4 1938
93481ff0
VB
1939 local_irq_save(flags);
1940 pset = per_cpu_ptr(zone->pageset, cpu);
1da177e4 1941
93481ff0
VB
1942 pcp = &pset->pcp;
1943 if (pcp->count) {
1944 free_pcppages_bulk(zone, pcp->count, pcp);
1945 pcp->count = 0;
1946 }
1947 local_irq_restore(flags);
1948}
3dfa5721 1949
93481ff0
VB
1950/*
1951 * Drain pcplists of all zones on the indicated processor.
1952 *
1953 * The processor must either be the current processor and the
1954 * thread pinned to the current processor or a processor that
1955 * is not online.
1956 */
1957static void drain_pages(unsigned int cpu)
1958{
1959 struct zone *zone;
1960
1961 for_each_populated_zone(zone) {
1962 drain_pages_zone(cpu, zone);
1da177e4
LT
1963 }
1964}
1da177e4 1965
9f8f2172
CL
1966/*
1967 * Spill all of this CPU's per-cpu pages back into the buddy allocator.
93481ff0
VB
1968 *
1969 * The CPU has to be pinned. When zone parameter is non-NULL, spill just
1970 * the single zone's pages.
9f8f2172 1971 */
93481ff0 1972void drain_local_pages(struct zone *zone)
9f8f2172 1973{
93481ff0
VB
1974 int cpu = smp_processor_id();
1975
1976 if (zone)
1977 drain_pages_zone(cpu, zone);
1978 else
1979 drain_pages(cpu);
9f8f2172
CL
1980}
1981
1982/*
74046494
GBY
1983 * Spill all the per-cpu pages from all CPUs back into the buddy allocator.
1984 *
93481ff0
VB
1985 * When zone parameter is non-NULL, spill just the single zone's pages.
1986 *
74046494
GBY
1987 * Note that this code is protected against sending an IPI to an offline
1988 * CPU but does not guarantee sending an IPI to newly hotplugged CPUs:
1989 * on_each_cpu_mask() blocks hotplug and won't talk to offlined CPUs but
1990 * nothing keeps CPUs from showing up after we populated the cpumask and
1991 * before the call to on_each_cpu_mask().
9f8f2172 1992 */
93481ff0 1993void drain_all_pages(struct zone *zone)
9f8f2172 1994{
74046494 1995 int cpu;
74046494
GBY
1996
1997 /*
1998 * Allocate in the BSS so we wont require allocation in
1999 * direct reclaim path for CONFIG_CPUMASK_OFFSTACK=y
2000 */
2001 static cpumask_t cpus_with_pcps;
2002
2003 /*
2004 * We don't care about racing with CPU hotplug event
2005 * as offline notification will cause the notified
2006 * cpu to drain that CPU pcps and on_each_cpu_mask
2007 * disables preemption as part of its processing
2008 */
2009 for_each_online_cpu(cpu) {
93481ff0
VB
2010 struct per_cpu_pageset *pcp;
2011 struct zone *z;
74046494 2012 bool has_pcps = false;
93481ff0
VB
2013
2014 if (zone) {
74046494 2015 pcp = per_cpu_ptr(zone->pageset, cpu);
93481ff0 2016 if (pcp->pcp.count)
74046494 2017 has_pcps = true;
93481ff0
VB
2018 } else {
2019 for_each_populated_zone(z) {
2020 pcp = per_cpu_ptr(z->pageset, cpu);
2021 if (pcp->pcp.count) {
2022 has_pcps = true;
2023 break;
2024 }
74046494
GBY
2025 }
2026 }
93481ff0 2027
74046494
GBY
2028 if (has_pcps)
2029 cpumask_set_cpu(cpu, &cpus_with_pcps);
2030 else
2031 cpumask_clear_cpu(cpu, &cpus_with_pcps);
2032 }
93481ff0
VB
2033 on_each_cpu_mask(&cpus_with_pcps, (smp_call_func_t) drain_local_pages,
2034 zone, 1);
9f8f2172
CL
2035}
2036
296699de 2037#ifdef CONFIG_HIBERNATION
1da177e4
LT
2038
2039void mark_free_pages(struct zone *zone)
2040{
f623f0db
RW
2041 unsigned long pfn, max_zone_pfn;
2042 unsigned long flags;
7aeb09f9 2043 unsigned int order, t;
86760a2c 2044 struct page *page;
1da177e4 2045
8080fc03 2046 if (zone_is_empty(zone))
1da177e4
LT
2047 return;
2048
2049 spin_lock_irqsave(&zone->lock, flags);
f623f0db 2050
108bcc96 2051 max_zone_pfn = zone_end_pfn(zone);
f623f0db
RW
2052 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
2053 if (pfn_valid(pfn)) {
86760a2c 2054 page = pfn_to_page(pfn);
7be98234
RW
2055 if (!swsusp_page_is_forbidden(page))
2056 swsusp_unset_page_free(page);
f623f0db 2057 }
1da177e4 2058
b2a0ac88 2059 for_each_migratetype_order(order, t) {
86760a2c
GT
2060 list_for_each_entry(page,
2061 &zone->free_area[order].free_list[t], lru) {
f623f0db 2062 unsigned long i;
1da177e4 2063
86760a2c 2064 pfn = page_to_pfn(page);
f623f0db 2065 for (i = 0; i < (1UL << order); i++)
7be98234 2066 swsusp_set_page_free(pfn_to_page(pfn + i));
f623f0db 2067 }
b2a0ac88 2068 }
1da177e4
LT
2069 spin_unlock_irqrestore(&zone->lock, flags);
2070}
e2c55dc8 2071#endif /* CONFIG_PM */
1da177e4 2072
1da177e4
LT
2073/*
2074 * Free a 0-order page
b745bc85 2075 * cold == true ? free a cold page : free a hot page
1da177e4 2076 */
b745bc85 2077void free_hot_cold_page(struct page *page, bool cold)
1da177e4
LT
2078{
2079 struct zone *zone = page_zone(page);
2080 struct per_cpu_pages *pcp;
2081 unsigned long flags;
dc4b0caf 2082 unsigned long pfn = page_to_pfn(page);
5f8dcc21 2083 int migratetype;
1da177e4 2084
ec95f53a 2085 if (!free_pages_prepare(page, 0))
689bcebf
HD
2086 return;
2087
dc4b0caf 2088 migratetype = get_pfnblock_migratetype(page, pfn);
bb14c2c7 2089 set_pcppage_migratetype(page, migratetype);
1da177e4 2090 local_irq_save(flags);
f8891e5e 2091 __count_vm_event(PGFREE);
da456f14 2092
5f8dcc21
MG
2093 /*
2094 * We only track unmovable, reclaimable and movable on pcp lists.
2095 * Free ISOLATE pages back to the allocator because they are being
2096 * offlined but treat RESERVE as movable pages so we can get those
2097 * areas back if necessary. Otherwise, we may have to free
2098 * excessively into the page allocator
2099 */
2100 if (migratetype >= MIGRATE_PCPTYPES) {
194159fb 2101 if (unlikely(is_migrate_isolate(migratetype))) {
dc4b0caf 2102 free_one_page(zone, page, pfn, 0, migratetype);
5f8dcc21
MG
2103 goto out;
2104 }
2105 migratetype = MIGRATE_MOVABLE;
2106 }
2107
99dcc3e5 2108 pcp = &this_cpu_ptr(zone->pageset)->pcp;
b745bc85 2109 if (!cold)
5f8dcc21 2110 list_add(&page->lru, &pcp->lists[migratetype]);
b745bc85
MG
2111 else
2112 list_add_tail(&page->lru, &pcp->lists[migratetype]);
1da177e4 2113 pcp->count++;
48db57f8 2114 if (pcp->count >= pcp->high) {
4db0c3c2 2115 unsigned long batch = READ_ONCE(pcp->batch);
998d39cb
CS
2116 free_pcppages_bulk(zone, batch, pcp);
2117 pcp->count -= batch;
48db57f8 2118 }
5f8dcc21
MG
2119
2120out:
1da177e4 2121 local_irq_restore(flags);
1da177e4
LT
2122}
2123
cc59850e
KK
2124/*
2125 * Free a list of 0-order pages
2126 */
b745bc85 2127void free_hot_cold_page_list(struct list_head *list, bool cold)
cc59850e
KK
2128{
2129 struct page *page, *next;
2130
2131 list_for_each_entry_safe(page, next, list, lru) {
b413d48a 2132 trace_mm_page_free_batched(page, cold);
cc59850e
KK
2133 free_hot_cold_page(page, cold);
2134 }
2135}
2136
8dfcc9ba
NP
2137/*
2138 * split_page takes a non-compound higher-order page, and splits it into
2139 * n (1<<order) sub-pages: page[0..n]
2140 * Each sub-page must be freed individually.
2141 *
2142 * Note: this is probably too low level an operation for use in drivers.
2143 * Please consult with lkml before using this in your driver.
2144 */
2145void split_page(struct page *page, unsigned int order)
2146{
2147 int i;
e2cfc911 2148 gfp_t gfp_mask;
8dfcc9ba 2149
309381fe
SL
2150 VM_BUG_ON_PAGE(PageCompound(page), page);
2151 VM_BUG_ON_PAGE(!page_count(page), page);
b1eeab67
VN
2152
2153#ifdef CONFIG_KMEMCHECK
2154 /*
2155 * Split shadow pages too, because free(page[0]) would
2156 * otherwise free the whole shadow.
2157 */
2158 if (kmemcheck_page_is_tracked(page))
2159 split_page(virt_to_page(page[0].shadow), order);
2160#endif
2161
e2cfc911
JK
2162 gfp_mask = get_page_owner_gfp(page);
2163 set_page_owner(page, 0, gfp_mask);
48c96a36 2164 for (i = 1; i < (1 << order); i++) {
7835e98b 2165 set_page_refcounted(page + i);
e2cfc911 2166 set_page_owner(page + i, 0, gfp_mask);
48c96a36 2167 }
8dfcc9ba 2168}
5853ff23 2169EXPORT_SYMBOL_GPL(split_page);
8dfcc9ba 2170
3c605096 2171int __isolate_free_page(struct page *page, unsigned int order)
748446bb 2172{
748446bb
MG
2173 unsigned long watermark;
2174 struct zone *zone;
2139cbe6 2175 int mt;
748446bb
MG
2176
2177 BUG_ON(!PageBuddy(page));
2178
2179 zone = page_zone(page);
2e30abd1 2180 mt = get_pageblock_migratetype(page);
748446bb 2181
194159fb 2182 if (!is_migrate_isolate(mt)) {
2e30abd1
MS
2183 /* Obey watermarks as if the page was being allocated */
2184 watermark = low_wmark_pages(zone) + (1 << order);
2185 if (!zone_watermark_ok(zone, 0, watermark, 0, 0))
2186 return 0;
2187
8fb74b9f 2188 __mod_zone_freepage_state(zone, -(1UL << order), mt);
2e30abd1 2189 }
748446bb
MG
2190
2191 /* Remove page from free list */
2192 list_del(&page->lru);
2193 zone->free_area[order].nr_free--;
2194 rmv_page_order(page);
2139cbe6 2195
e2cfc911 2196 set_page_owner(page, order, __GFP_MOVABLE);
f3a14ced 2197
8fb74b9f 2198 /* Set the pageblock if the isolated page is at least a pageblock */
748446bb
MG
2199 if (order >= pageblock_order - 1) {
2200 struct page *endpage = page + (1 << order) - 1;
47118af0
MN
2201 for (; page < endpage; page += pageblock_nr_pages) {
2202 int mt = get_pageblock_migratetype(page);
194159fb 2203 if (!is_migrate_isolate(mt) && !is_migrate_cma(mt))
47118af0
MN
2204 set_pageblock_migratetype(page,
2205 MIGRATE_MOVABLE);
2206 }
748446bb
MG
2207 }
2208
f3a14ced 2209
8fb74b9f 2210 return 1UL << order;
1fb3f8ca
MG
2211}
2212
2213/*
2214 * Similar to split_page except the page is already free. As this is only
2215 * being used for migration, the migratetype of the block also changes.
2216 * As this is called with interrupts disabled, the caller is responsible
2217 * for calling arch_alloc_page() and kernel_map_page() after interrupts
2218 * are enabled.
2219 *
2220 * Note: this is probably too low level an operation for use in drivers.
2221 * Please consult with lkml before using this in your driver.
2222 */
2223int split_free_page(struct page *page)
2224{
2225 unsigned int order;
2226 int nr_pages;
2227
1fb3f8ca
MG
2228 order = page_order(page);
2229
8fb74b9f 2230 nr_pages = __isolate_free_page(page, order);
1fb3f8ca
MG
2231 if (!nr_pages)
2232 return 0;
2233
2234 /* Split into individual pages */
2235 set_page_refcounted(page);
2236 split_page(page, order);
2237 return nr_pages;
748446bb
MG
2238}
2239
1da177e4 2240/*
75379191 2241 * Allocate a page from the given zone. Use pcplists for order-0 allocations.
1da177e4 2242 */
0a15c3e9
MG
2243static inline
2244struct page *buffered_rmqueue(struct zone *preferred_zone,
7aeb09f9 2245 struct zone *zone, unsigned int order,
0aaa29a5 2246 gfp_t gfp_flags, int alloc_flags, int migratetype)
1da177e4
LT
2247{
2248 unsigned long flags;
689bcebf 2249 struct page *page;
b745bc85 2250 bool cold = ((gfp_flags & __GFP_COLD) != 0);
1da177e4 2251
48db57f8 2252 if (likely(order == 0)) {
1da177e4 2253 struct per_cpu_pages *pcp;
5f8dcc21 2254 struct list_head *list;
1da177e4 2255
1da177e4 2256 local_irq_save(flags);
99dcc3e5
CL
2257 pcp = &this_cpu_ptr(zone->pageset)->pcp;
2258 list = &pcp->lists[migratetype];
5f8dcc21 2259 if (list_empty(list)) {
535131e6 2260 pcp->count += rmqueue_bulk(zone, 0,
5f8dcc21 2261 pcp->batch, list,
e084b2d9 2262 migratetype, cold);
5f8dcc21 2263 if (unlikely(list_empty(list)))
6fb332fa 2264 goto failed;
535131e6 2265 }
b92a6edd 2266
5f8dcc21 2267 if (cold)
a16601c5 2268 page = list_last_entry(list, struct page, lru);
5f8dcc21 2269 else
a16601c5 2270 page = list_first_entry(list, struct page, lru);
5f8dcc21 2271
b92a6edd
MG
2272 list_del(&page->lru);
2273 pcp->count--;
7fb1d9fc 2274 } else {
dab48dab
AM
2275 if (unlikely(gfp_flags & __GFP_NOFAIL)) {
2276 /*
2277 * __GFP_NOFAIL is not to be used in new code.
2278 *
2279 * All __GFP_NOFAIL callers should be fixed so that they
2280 * properly detect and handle allocation failures.
2281 *
2282 * We most definitely don't want callers attempting to
4923abf9 2283 * allocate greater than order-1 page units with
dab48dab
AM
2284 * __GFP_NOFAIL.
2285 */
4923abf9 2286 WARN_ON_ONCE(order > 1);
dab48dab 2287 }
1da177e4 2288 spin_lock_irqsave(&zone->lock, flags);
0aaa29a5
MG
2289
2290 page = NULL;
2291 if (alloc_flags & ALLOC_HARDER) {
2292 page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC);
2293 if (page)
2294 trace_mm_page_alloc_zone_locked(page, order, migratetype);
2295 }
2296 if (!page)
6ac0206b 2297 page = __rmqueue(zone, order, migratetype);
a74609fa
NP
2298 spin_unlock(&zone->lock);
2299 if (!page)
2300 goto failed;
d1ce749a 2301 __mod_zone_freepage_state(zone, -(1 << order),
bb14c2c7 2302 get_pcppage_migratetype(page));
1da177e4
LT
2303 }
2304
3a025760 2305 __mod_zone_page_state(zone, NR_ALLOC_BATCH, -(1 << order));
abe5f972 2306 if (atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH]) <= 0 &&
57054651
JW
2307 !test_bit(ZONE_FAIR_DEPLETED, &zone->flags))
2308 set_bit(ZONE_FAIR_DEPLETED, &zone->flags);
27329369 2309
f8891e5e 2310 __count_zone_vm_events(PGALLOC, zone, 1 << order);
78afd561 2311 zone_statistics(preferred_zone, zone, gfp_flags);
a74609fa 2312 local_irq_restore(flags);
1da177e4 2313
309381fe 2314 VM_BUG_ON_PAGE(bad_range(zone, page), page);
1da177e4 2315 return page;
a74609fa
NP
2316
2317failed:
2318 local_irq_restore(flags);
a74609fa 2319 return NULL;
1da177e4
LT
2320}
2321
933e312e
AM
2322#ifdef CONFIG_FAIL_PAGE_ALLOC
2323
b2588c4b 2324static struct {
933e312e
AM
2325 struct fault_attr attr;
2326
621a5f7a 2327 bool ignore_gfp_highmem;
71baba4b 2328 bool ignore_gfp_reclaim;
54114994 2329 u32 min_order;
933e312e
AM
2330} fail_page_alloc = {
2331 .attr = FAULT_ATTR_INITIALIZER,
71baba4b 2332 .ignore_gfp_reclaim = true,
621a5f7a 2333 .ignore_gfp_highmem = true,
54114994 2334 .min_order = 1,
933e312e
AM
2335};
2336
2337static int __init setup_fail_page_alloc(char *str)
2338{
2339 return setup_fault_attr(&fail_page_alloc.attr, str);
2340}
2341__setup("fail_page_alloc=", setup_fail_page_alloc);
2342
deaf386e 2343static bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
933e312e 2344{
54114994 2345 if (order < fail_page_alloc.min_order)
deaf386e 2346 return false;
933e312e 2347 if (gfp_mask & __GFP_NOFAIL)
deaf386e 2348 return false;
933e312e 2349 if (fail_page_alloc.ignore_gfp_highmem && (gfp_mask & __GFP_HIGHMEM))
deaf386e 2350 return false;
71baba4b
MG
2351 if (fail_page_alloc.ignore_gfp_reclaim &&
2352 (gfp_mask & __GFP_DIRECT_RECLAIM))
deaf386e 2353 return false;
933e312e
AM
2354
2355 return should_fail(&fail_page_alloc.attr, 1 << order);
2356}
2357
2358#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
2359
2360static int __init fail_page_alloc_debugfs(void)
2361{
f4ae40a6 2362 umode_t mode = S_IFREG | S_IRUSR | S_IWUSR;
933e312e 2363 struct dentry *dir;
933e312e 2364
dd48c085
AM
2365 dir = fault_create_debugfs_attr("fail_page_alloc", NULL,
2366 &fail_page_alloc.attr);
2367 if (IS_ERR(dir))
2368 return PTR_ERR(dir);
933e312e 2369
b2588c4b 2370 if (!debugfs_create_bool("ignore-gfp-wait", mode, dir,
71baba4b 2371 &fail_page_alloc.ignore_gfp_reclaim))
b2588c4b
AM
2372 goto fail;
2373 if (!debugfs_create_bool("ignore-gfp-highmem", mode, dir,
2374 &fail_page_alloc.ignore_gfp_highmem))
2375 goto fail;
2376 if (!debugfs_create_u32("min-order", mode, dir,
2377 &fail_page_alloc.min_order))
2378 goto fail;
2379
2380 return 0;
2381fail:
dd48c085 2382 debugfs_remove_recursive(dir);
933e312e 2383
b2588c4b 2384 return -ENOMEM;
933e312e
AM
2385}
2386
2387late_initcall(fail_page_alloc_debugfs);
2388
2389#endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
2390
2391#else /* CONFIG_FAIL_PAGE_ALLOC */
2392
deaf386e 2393static inline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
933e312e 2394{
deaf386e 2395 return false;
933e312e
AM
2396}
2397
2398#endif /* CONFIG_FAIL_PAGE_ALLOC */
2399
1da177e4 2400/*
97a16fc8
MG
2401 * Return true if free base pages are above 'mark'. For high-order checks it
2402 * will return true of the order-0 watermark is reached and there is at least
2403 * one free page of a suitable size. Checking now avoids taking the zone lock
2404 * to check in the allocation paths if no pages are free.
1da177e4 2405 */
7aeb09f9
MG
2406static bool __zone_watermark_ok(struct zone *z, unsigned int order,
2407 unsigned long mark, int classzone_idx, int alloc_flags,
2408 long free_pages)
1da177e4 2409{
d23ad423 2410 long min = mark;
1da177e4 2411 int o;
97a16fc8 2412 const int alloc_harder = (alloc_flags & ALLOC_HARDER);
1da177e4 2413
0aaa29a5 2414 /* free_pages may go negative - that's OK */
df0a6daa 2415 free_pages -= (1 << order) - 1;
0aaa29a5 2416
7fb1d9fc 2417 if (alloc_flags & ALLOC_HIGH)
1da177e4 2418 min -= min / 2;
0aaa29a5
MG
2419
2420 /*
2421 * If the caller does not have rights to ALLOC_HARDER then subtract
2422 * the high-atomic reserves. This will over-estimate the size of the
2423 * atomic reserve but it avoids a search.
2424 */
97a16fc8 2425 if (likely(!alloc_harder))
0aaa29a5
MG
2426 free_pages -= z->nr_reserved_highatomic;
2427 else
1da177e4 2428 min -= min / 4;
e2b19197 2429
d95ea5d1
BZ
2430#ifdef CONFIG_CMA
2431 /* If allocation can't use CMA areas don't use free CMA pages */
2432 if (!(alloc_flags & ALLOC_CMA))
97a16fc8 2433 free_pages -= zone_page_state(z, NR_FREE_CMA_PAGES);
d95ea5d1 2434#endif
026b0814 2435
97a16fc8
MG
2436 /*
2437 * Check watermarks for an order-0 allocation request. If these
2438 * are not met, then a high-order request also cannot go ahead
2439 * even if a suitable page happened to be free.
2440 */
2441 if (free_pages <= min + z->lowmem_reserve[classzone_idx])
88f5acf8 2442 return false;
1da177e4 2443
97a16fc8
MG
2444 /* If this is an order-0 request then the watermark is fine */
2445 if (!order)
2446 return true;
2447
2448 /* For a high-order request, check at least one suitable page is free */
2449 for (o = order; o < MAX_ORDER; o++) {
2450 struct free_area *area = &z->free_area[o];
2451 int mt;
2452
2453 if (!area->nr_free)
2454 continue;
2455
2456 if (alloc_harder)
2457 return true;
1da177e4 2458
97a16fc8
MG
2459 for (mt = 0; mt < MIGRATE_PCPTYPES; mt++) {
2460 if (!list_empty(&area->free_list[mt]))
2461 return true;
2462 }
2463
2464#ifdef CONFIG_CMA
2465 if ((alloc_flags & ALLOC_CMA) &&
2466 !list_empty(&area->free_list[MIGRATE_CMA])) {
2467 return true;
2468 }
2469#endif
1da177e4 2470 }
97a16fc8 2471 return false;
88f5acf8
MG
2472}
2473
7aeb09f9 2474bool zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
88f5acf8
MG
2475 int classzone_idx, int alloc_flags)
2476{
2477 return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags,
2478 zone_page_state(z, NR_FREE_PAGES));
2479}
2480
7aeb09f9 2481bool zone_watermark_ok_safe(struct zone *z, unsigned int order,
e2b19197 2482 unsigned long mark, int classzone_idx)
88f5acf8
MG
2483{
2484 long free_pages = zone_page_state(z, NR_FREE_PAGES);
2485
2486 if (z->percpu_drift_mark && free_pages < z->percpu_drift_mark)
2487 free_pages = zone_page_state_snapshot(z, NR_FREE_PAGES);
2488
e2b19197 2489 return __zone_watermark_ok(z, order, mark, classzone_idx, 0,
88f5acf8 2490 free_pages);
1da177e4
LT
2491}
2492
9276b1bc 2493#ifdef CONFIG_NUMA
81c0a2bb
JW
2494static bool zone_local(struct zone *local_zone, struct zone *zone)
2495{
fff4068c 2496 return local_zone->node == zone->node;
81c0a2bb
JW
2497}
2498
957f822a
DR
2499static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
2500{
5f7a75ac
MG
2501 return node_distance(zone_to_nid(local_zone), zone_to_nid(zone)) <
2502 RECLAIM_DISTANCE;
957f822a 2503}
9276b1bc 2504#else /* CONFIG_NUMA */
81c0a2bb
JW
2505static bool zone_local(struct zone *local_zone, struct zone *zone)
2506{
2507 return true;
2508}
2509
957f822a
DR
2510static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
2511{
2512 return true;
2513}
9276b1bc
PJ
2514#endif /* CONFIG_NUMA */
2515
4ffeaf35
MG
2516static void reset_alloc_batches(struct zone *preferred_zone)
2517{
2518 struct zone *zone = preferred_zone->zone_pgdat->node_zones;
2519
2520 do {
2521 mod_zone_page_state(zone, NR_ALLOC_BATCH,
2522 high_wmark_pages(zone) - low_wmark_pages(zone) -
2523 atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH]));
57054651 2524 clear_bit(ZONE_FAIR_DEPLETED, &zone->flags);
4ffeaf35
MG
2525 } while (zone++ != preferred_zone);
2526}
2527
7fb1d9fc 2528/*
0798e519 2529 * get_page_from_freelist goes through the zonelist trying to allocate
7fb1d9fc
RS
2530 * a page.
2531 */
2532static struct page *
a9263751
VB
2533get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags,
2534 const struct alloc_context *ac)
753ee728 2535{
a9263751 2536 struct zonelist *zonelist = ac->zonelist;
dd1a239f 2537 struct zoneref *z;
7fb1d9fc 2538 struct page *page = NULL;
5117f45d 2539 struct zone *zone;
4ffeaf35
MG
2540 int nr_fair_skipped = 0;
2541 bool zonelist_rescan;
54a6eb5c 2542
9276b1bc 2543zonelist_scan:
4ffeaf35
MG
2544 zonelist_rescan = false;
2545
7fb1d9fc 2546 /*
9276b1bc 2547 * Scan zonelist, looking for a zone with enough free.
344736f2 2548 * See also __cpuset_node_allowed() comment in kernel/cpuset.c.
7fb1d9fc 2549 */
a9263751
VB
2550 for_each_zone_zonelist_nodemask(zone, z, zonelist, ac->high_zoneidx,
2551 ac->nodemask) {
e085dbc5
JW
2552 unsigned long mark;
2553
664eedde
MG
2554 if (cpusets_enabled() &&
2555 (alloc_flags & ALLOC_CPUSET) &&
344736f2 2556 !cpuset_zone_allowed(zone, gfp_mask))
cd38b115 2557 continue;
81c0a2bb
JW
2558 /*
2559 * Distribute pages in proportion to the individual
2560 * zone size to ensure fair page aging. The zone a
2561 * page was allocated in should have no effect on the
2562 * time the page has in memory before being reclaimed.
81c0a2bb 2563 */
3a025760 2564 if (alloc_flags & ALLOC_FAIR) {
a9263751 2565 if (!zone_local(ac->preferred_zone, zone))
f7b5d647 2566 break;
57054651 2567 if (test_bit(ZONE_FAIR_DEPLETED, &zone->flags)) {
4ffeaf35 2568 nr_fair_skipped++;
3a025760 2569 continue;
4ffeaf35 2570 }
81c0a2bb 2571 }
a756cf59
JW
2572 /*
2573 * When allocating a page cache page for writing, we
2574 * want to get it from a zone that is within its dirty
2575 * limit, such that no single zone holds more than its
2576 * proportional share of globally allowed dirty pages.
2577 * The dirty limits take into account the zone's
2578 * lowmem reserves and high watermark so that kswapd
2579 * should be able to balance it without having to
2580 * write pages from its LRU list.
2581 *
2582 * This may look like it could increase pressure on
2583 * lower zones by failing allocations in higher zones
2584 * before they are full. But the pages that do spill
2585 * over are limited as the lower zones are protected
2586 * by this very same mechanism. It should not become
2587 * a practical burden to them.
2588 *
2589 * XXX: For now, allow allocations to potentially
2590 * exceed the per-zone dirty limit in the slowpath
c9ab0c4f 2591 * (spread_dirty_pages unset) before going into reclaim,
a756cf59
JW
2592 * which is important when on a NUMA setup the allowed
2593 * zones are together not big enough to reach the
2594 * global limit. The proper fix for these situations
2595 * will require awareness of zones in the
2596 * dirty-throttling and the flusher threads.
2597 */
c9ab0c4f 2598 if (ac->spread_dirty_pages && !zone_dirty_ok(zone))
800a1e75 2599 continue;
7fb1d9fc 2600
e085dbc5
JW
2601 mark = zone->watermark[alloc_flags & ALLOC_WMARK_MASK];
2602 if (!zone_watermark_ok(zone, order, mark,
a9263751 2603 ac->classzone_idx, alloc_flags)) {
fa5e084e
MG
2604 int ret;
2605
5dab2911
MG
2606 /* Checked here to keep the fast path fast */
2607 BUILD_BUG_ON(ALLOC_NO_WATERMARKS < NR_WMARK);
2608 if (alloc_flags & ALLOC_NO_WATERMARKS)
2609 goto try_this_zone;
2610
957f822a 2611 if (zone_reclaim_mode == 0 ||
a9263751 2612 !zone_allows_reclaim(ac->preferred_zone, zone))
cd38b115
MG
2613 continue;
2614
fa5e084e
MG
2615 ret = zone_reclaim(zone, gfp_mask, order);
2616 switch (ret) {
2617 case ZONE_RECLAIM_NOSCAN:
2618 /* did not scan */
cd38b115 2619 continue;
fa5e084e
MG
2620 case ZONE_RECLAIM_FULL:
2621 /* scanned but unreclaimable */
cd38b115 2622 continue;
fa5e084e
MG
2623 default:
2624 /* did we reclaim enough */
fed2719e 2625 if (zone_watermark_ok(zone, order, mark,
a9263751 2626 ac->classzone_idx, alloc_flags))
fed2719e
MG
2627 goto try_this_zone;
2628
fed2719e 2629 continue;
0798e519 2630 }
7fb1d9fc
RS
2631 }
2632
fa5e084e 2633try_this_zone:
a9263751 2634 page = buffered_rmqueue(ac->preferred_zone, zone, order,
0aaa29a5 2635 gfp_mask, alloc_flags, ac->migratetype);
75379191
VB
2636 if (page) {
2637 if (prep_new_page(page, order, gfp_mask, alloc_flags))
2638 goto try_this_zone;
0aaa29a5
MG
2639
2640 /*
2641 * If this is a high-order atomic allocation then check
2642 * if the pageblock should be reserved for the future
2643 */
2644 if (unlikely(order && (alloc_flags & ALLOC_HARDER)))
2645 reserve_highatomic_pageblock(page, zone, order);
2646
75379191
VB
2647 return page;
2648 }
54a6eb5c 2649 }
9276b1bc 2650
4ffeaf35
MG
2651 /*
2652 * The first pass makes sure allocations are spread fairly within the
2653 * local node. However, the local node might have free pages left
2654 * after the fairness batches are exhausted, and remote zones haven't
2655 * even been considered yet. Try once more without fairness, and
2656 * include remote zones now, before entering the slowpath and waking
2657 * kswapd: prefer spilling to a remote zone over swapping locally.
2658 */
2659 if (alloc_flags & ALLOC_FAIR) {
2660 alloc_flags &= ~ALLOC_FAIR;
2661 if (nr_fair_skipped) {
2662 zonelist_rescan = true;
a9263751 2663 reset_alloc_batches(ac->preferred_zone);
4ffeaf35
MG
2664 }
2665 if (nr_online_nodes > 1)
2666 zonelist_rescan = true;
2667 }
2668
4ffeaf35
MG
2669 if (zonelist_rescan)
2670 goto zonelist_scan;
2671
2672 return NULL;
753ee728
MH
2673}
2674
29423e77
DR
2675/*
2676 * Large machines with many possible nodes should not always dump per-node
2677 * meminfo in irq context.
2678 */
2679static inline bool should_suppress_show_mem(void)
2680{
2681 bool ret = false;
2682
2683#if NODES_SHIFT > 8
2684 ret = in_interrupt();
2685#endif
2686 return ret;
2687}
2688
a238ab5b
DH
2689static DEFINE_RATELIMIT_STATE(nopage_rs,
2690 DEFAULT_RATELIMIT_INTERVAL,
2691 DEFAULT_RATELIMIT_BURST);
2692
d00181b9 2693void warn_alloc_failed(gfp_t gfp_mask, unsigned int order, const char *fmt, ...)
a238ab5b 2694{
a238ab5b
DH
2695 unsigned int filter = SHOW_MEM_FILTER_NODES;
2696
c0a32fc5
SG
2697 if ((gfp_mask & __GFP_NOWARN) || !__ratelimit(&nopage_rs) ||
2698 debug_guardpage_minorder() > 0)
a238ab5b
DH
2699 return;
2700
2701 /*
2702 * This documents exceptions given to allocations in certain
2703 * contexts that are allowed to allocate outside current's set
2704 * of allowed nodes.
2705 */
2706 if (!(gfp_mask & __GFP_NOMEMALLOC))
2707 if (test_thread_flag(TIF_MEMDIE) ||
2708 (current->flags & (PF_MEMALLOC | PF_EXITING)))
2709 filter &= ~SHOW_MEM_FILTER_NODES;
d0164adc 2710 if (in_interrupt() || !(gfp_mask & __GFP_DIRECT_RECLAIM))
a238ab5b
DH
2711 filter &= ~SHOW_MEM_FILTER_NODES;
2712
2713 if (fmt) {
3ee9a4f0
JP
2714 struct va_format vaf;
2715 va_list args;
2716
a238ab5b 2717 va_start(args, fmt);
3ee9a4f0
JP
2718
2719 vaf.fmt = fmt;
2720 vaf.va = &args;
2721
2722 pr_warn("%pV", &vaf);
2723
a238ab5b
DH
2724 va_end(args);
2725 }
2726
c5c990e8
VB
2727 pr_warn("%s: page allocation failure: order:%u, mode:%#x(%pGg)\n",
2728 current->comm, order, gfp_mask, &gfp_mask);
a238ab5b
DH
2729 dump_stack();
2730 if (!should_suppress_show_mem())
2731 show_mem(filter);
2732}
2733
11e33f6a
MG
2734static inline struct page *
2735__alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
a9263751 2736 const struct alloc_context *ac, unsigned long *did_some_progress)
11e33f6a 2737{
6e0fc46d
DR
2738 struct oom_control oc = {
2739 .zonelist = ac->zonelist,
2740 .nodemask = ac->nodemask,
2741 .gfp_mask = gfp_mask,
2742 .order = order,
6e0fc46d 2743 };
11e33f6a
MG
2744 struct page *page;
2745
9879de73
JW
2746 *did_some_progress = 0;
2747
9879de73 2748 /*
dc56401f
JW
2749 * Acquire the oom lock. If that fails, somebody else is
2750 * making progress for us.
9879de73 2751 */
dc56401f 2752 if (!mutex_trylock(&oom_lock)) {
9879de73 2753 *did_some_progress = 1;
11e33f6a 2754 schedule_timeout_uninterruptible(1);
1da177e4
LT
2755 return NULL;
2756 }
6b1de916 2757
11e33f6a
MG
2758 /*
2759 * Go through the zonelist yet one more time, keep very high watermark
2760 * here, this is only to catch a parallel oom killing, we must fail if
2761 * we're still under heavy pressure.
2762 */
a9263751
VB
2763 page = get_page_from_freelist(gfp_mask | __GFP_HARDWALL, order,
2764 ALLOC_WMARK_HIGH|ALLOC_CPUSET, ac);
7fb1d9fc 2765 if (page)
11e33f6a
MG
2766 goto out;
2767
4365a567 2768 if (!(gfp_mask & __GFP_NOFAIL)) {
9879de73
JW
2769 /* Coredumps can quickly deplete all memory reserves */
2770 if (current->flags & PF_DUMPCORE)
2771 goto out;
4365a567
KH
2772 /* The OOM killer will not help higher order allocs */
2773 if (order > PAGE_ALLOC_COSTLY_ORDER)
2774 goto out;
03668b3c 2775 /* The OOM killer does not needlessly kill tasks for lowmem */
a9263751 2776 if (ac->high_zoneidx < ZONE_NORMAL)
03668b3c 2777 goto out;
9083905a 2778 /* The OOM killer does not compensate for IO-less reclaim */
cc873177
JW
2779 if (!(gfp_mask & __GFP_FS)) {
2780 /*
2781 * XXX: Page reclaim didn't yield anything,
2782 * and the OOM killer can't be invoked, but
9083905a 2783 * keep looping as per tradition.
cc873177
JW
2784 */
2785 *did_some_progress = 1;
9879de73 2786 goto out;
cc873177 2787 }
9083905a
JW
2788 if (pm_suspended_storage())
2789 goto out;
4167e9b2 2790 /* The OOM killer may not free memory on a specific node */
4365a567
KH
2791 if (gfp_mask & __GFP_THISNODE)
2792 goto out;
2793 }
11e33f6a 2794 /* Exhausted what can be done so it's blamo time */
5020e285 2795 if (out_of_memory(&oc) || WARN_ON_ONCE(gfp_mask & __GFP_NOFAIL)) {
c32b3cbe 2796 *did_some_progress = 1;
5020e285
MH
2797
2798 if (gfp_mask & __GFP_NOFAIL) {
2799 page = get_page_from_freelist(gfp_mask, order,
2800 ALLOC_NO_WATERMARKS|ALLOC_CPUSET, ac);
2801 /*
2802 * fallback to ignore cpuset restriction if our nodes
2803 * are depleted
2804 */
2805 if (!page)
2806 page = get_page_from_freelist(gfp_mask, order,
2807 ALLOC_NO_WATERMARKS, ac);
2808 }
2809 }
11e33f6a 2810out:
dc56401f 2811 mutex_unlock(&oom_lock);
11e33f6a
MG
2812 return page;
2813}
2814
56de7263
MG
2815#ifdef CONFIG_COMPACTION
2816/* Try memory compaction for high-order allocations before reclaim */
2817static struct page *
2818__alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
a9263751
VB
2819 int alloc_flags, const struct alloc_context *ac,
2820 enum migrate_mode mode, int *contended_compaction,
2821 bool *deferred_compaction)
56de7263 2822{
53853e2d 2823 unsigned long compact_result;
98dd3b48 2824 struct page *page;
53853e2d
VB
2825
2826 if (!order)
66199712 2827 return NULL;
66199712 2828
c06b1fca 2829 current->flags |= PF_MEMALLOC;
1a6d53a1
VB
2830 compact_result = try_to_compact_pages(gfp_mask, order, alloc_flags, ac,
2831 mode, contended_compaction);
c06b1fca 2832 current->flags &= ~PF_MEMALLOC;
56de7263 2833
98dd3b48
VB
2834 switch (compact_result) {
2835 case COMPACT_DEFERRED:
53853e2d 2836 *deferred_compaction = true;
98dd3b48
VB
2837 /* fall-through */
2838 case COMPACT_SKIPPED:
2839 return NULL;
2840 default:
2841 break;
2842 }
53853e2d 2843
98dd3b48
VB
2844 /*
2845 * At least in one zone compaction wasn't deferred or skipped, so let's
2846 * count a compaction stall
2847 */
2848 count_vm_event(COMPACTSTALL);
8fb74b9f 2849
a9263751
VB
2850 page = get_page_from_freelist(gfp_mask, order,
2851 alloc_flags & ~ALLOC_NO_WATERMARKS, ac);
53853e2d 2852
98dd3b48
VB
2853 if (page) {
2854 struct zone *zone = page_zone(page);
53853e2d 2855
98dd3b48
VB
2856 zone->compact_blockskip_flush = false;
2857 compaction_defer_reset(zone, order, true);
2858 count_vm_event(COMPACTSUCCESS);
2859 return page;
2860 }
56de7263 2861
98dd3b48
VB
2862 /*
2863 * It's bad if compaction run occurs and fails. The most likely reason
2864 * is that pages exist, but not enough to satisfy watermarks.
2865 */
2866 count_vm_event(COMPACTFAIL);
66199712 2867
98dd3b48 2868 cond_resched();
56de7263
MG
2869
2870 return NULL;
2871}
2872#else
2873static inline struct page *
2874__alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
a9263751
VB
2875 int alloc_flags, const struct alloc_context *ac,
2876 enum migrate_mode mode, int *contended_compaction,
2877 bool *deferred_compaction)
56de7263
MG
2878{
2879 return NULL;
2880}
2881#endif /* CONFIG_COMPACTION */
2882
bba90710
MS
2883/* Perform direct synchronous page reclaim */
2884static int
a9263751
VB
2885__perform_reclaim(gfp_t gfp_mask, unsigned int order,
2886 const struct alloc_context *ac)
11e33f6a 2887{
11e33f6a 2888 struct reclaim_state reclaim_state;
bba90710 2889 int progress;
11e33f6a
MG
2890
2891 cond_resched();
2892
2893 /* We now go into synchronous reclaim */
2894 cpuset_memory_pressure_bump();
c06b1fca 2895 current->flags |= PF_MEMALLOC;
11e33f6a
MG
2896 lockdep_set_current_reclaim_state(gfp_mask);
2897 reclaim_state.reclaimed_slab = 0;
c06b1fca 2898 current->reclaim_state = &reclaim_state;
11e33f6a 2899
a9263751
VB
2900 progress = try_to_free_pages(ac->zonelist, order, gfp_mask,
2901 ac->nodemask);
11e33f6a 2902
c06b1fca 2903 current->reclaim_state = NULL;
11e33f6a 2904 lockdep_clear_current_reclaim_state();
c06b1fca 2905 current->flags &= ~PF_MEMALLOC;
11e33f6a
MG
2906
2907 cond_resched();
2908
bba90710
MS
2909 return progress;
2910}
2911
2912/* The really slow allocator path where we enter direct reclaim */
2913static inline struct page *
2914__alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
a9263751
VB
2915 int alloc_flags, const struct alloc_context *ac,
2916 unsigned long *did_some_progress)
bba90710
MS
2917{
2918 struct page *page = NULL;
2919 bool drained = false;
2920
a9263751 2921 *did_some_progress = __perform_reclaim(gfp_mask, order, ac);
9ee493ce
MG
2922 if (unlikely(!(*did_some_progress)))
2923 return NULL;
11e33f6a 2924
9ee493ce 2925retry:
a9263751
VB
2926 page = get_page_from_freelist(gfp_mask, order,
2927 alloc_flags & ~ALLOC_NO_WATERMARKS, ac);
9ee493ce
MG
2928
2929 /*
2930 * If an allocation failed after direct reclaim, it could be because
0aaa29a5
MG
2931 * pages are pinned on the per-cpu lists or in high alloc reserves.
2932 * Shrink them them and try again
9ee493ce
MG
2933 */
2934 if (!page && !drained) {
0aaa29a5 2935 unreserve_highatomic_pageblock(ac);
93481ff0 2936 drain_all_pages(NULL);
9ee493ce
MG
2937 drained = true;
2938 goto retry;
2939 }
2940
11e33f6a
MG
2941 return page;
2942}
2943
a9263751 2944static void wake_all_kswapds(unsigned int order, const struct alloc_context *ac)
3a025760
JW
2945{
2946 struct zoneref *z;
2947 struct zone *zone;
2948
a9263751
VB
2949 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist,
2950 ac->high_zoneidx, ac->nodemask)
2951 wakeup_kswapd(zone, order, zone_idx(ac->preferred_zone));
3a025760
JW
2952}
2953
341ce06f
PZ
2954static inline int
2955gfp_to_alloc_flags(gfp_t gfp_mask)
2956{
341ce06f 2957 int alloc_flags = ALLOC_WMARK_MIN | ALLOC_CPUSET;
1da177e4 2958
a56f57ff 2959 /* __GFP_HIGH is assumed to be the same as ALLOC_HIGH to save a branch. */
e6223a3b 2960 BUILD_BUG_ON(__GFP_HIGH != (__force gfp_t) ALLOC_HIGH);
933e312e 2961
341ce06f
PZ
2962 /*
2963 * The caller may dip into page reserves a bit more if the caller
2964 * cannot run direct reclaim, or if the caller has realtime scheduling
2965 * policy or is asking for __GFP_HIGH memory. GFP_ATOMIC requests will
d0164adc 2966 * set both ALLOC_HARDER (__GFP_ATOMIC) and ALLOC_HIGH (__GFP_HIGH).
341ce06f 2967 */
e6223a3b 2968 alloc_flags |= (__force int) (gfp_mask & __GFP_HIGH);
1da177e4 2969
d0164adc 2970 if (gfp_mask & __GFP_ATOMIC) {
5c3240d9 2971 /*
b104a35d
DR
2972 * Not worth trying to allocate harder for __GFP_NOMEMALLOC even
2973 * if it can't schedule.
5c3240d9 2974 */
b104a35d 2975 if (!(gfp_mask & __GFP_NOMEMALLOC))
5c3240d9 2976 alloc_flags |= ALLOC_HARDER;
523b9458 2977 /*
b104a35d 2978 * Ignore cpuset mems for GFP_ATOMIC rather than fail, see the
344736f2 2979 * comment for __cpuset_node_allowed().
523b9458 2980 */
341ce06f 2981 alloc_flags &= ~ALLOC_CPUSET;
c06b1fca 2982 } else if (unlikely(rt_task(current)) && !in_interrupt())
341ce06f
PZ
2983 alloc_flags |= ALLOC_HARDER;
2984
b37f1dd0
MG
2985 if (likely(!(gfp_mask & __GFP_NOMEMALLOC))) {
2986 if (gfp_mask & __GFP_MEMALLOC)
2987 alloc_flags |= ALLOC_NO_WATERMARKS;
907aed48
MG
2988 else if (in_serving_softirq() && (current->flags & PF_MEMALLOC))
2989 alloc_flags |= ALLOC_NO_WATERMARKS;
2990 else if (!in_interrupt() &&
2991 ((current->flags & PF_MEMALLOC) ||
2992 unlikely(test_thread_flag(TIF_MEMDIE))))
341ce06f 2993 alloc_flags |= ALLOC_NO_WATERMARKS;
1da177e4 2994 }
d95ea5d1 2995#ifdef CONFIG_CMA
43e7a34d 2996 if (gfpflags_to_migratetype(gfp_mask) == MIGRATE_MOVABLE)
d95ea5d1
BZ
2997 alloc_flags |= ALLOC_CMA;
2998#endif
341ce06f
PZ
2999 return alloc_flags;
3000}
3001
072bb0aa
MG
3002bool gfp_pfmemalloc_allowed(gfp_t gfp_mask)
3003{
b37f1dd0 3004 return !!(gfp_to_alloc_flags(gfp_mask) & ALLOC_NO_WATERMARKS);
072bb0aa
MG
3005}
3006
d0164adc
MG
3007static inline bool is_thp_gfp_mask(gfp_t gfp_mask)
3008{
3009 return (gfp_mask & (GFP_TRANSHUGE | __GFP_KSWAPD_RECLAIM)) == GFP_TRANSHUGE;
3010}
3011
11e33f6a
MG
3012static inline struct page *
3013__alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
a9263751 3014 struct alloc_context *ac)
11e33f6a 3015{
d0164adc 3016 bool can_direct_reclaim = gfp_mask & __GFP_DIRECT_RECLAIM;
11e33f6a
MG
3017 struct page *page = NULL;
3018 int alloc_flags;
3019 unsigned long pages_reclaimed = 0;
3020 unsigned long did_some_progress;
e0b9daeb 3021 enum migrate_mode migration_mode = MIGRATE_ASYNC;
66199712 3022 bool deferred_compaction = false;
1f9efdef 3023 int contended_compaction = COMPACT_CONTENDED_NONE;
1da177e4 3024
72807a74
MG
3025 /*
3026 * In the slowpath, we sanity check order to avoid ever trying to
3027 * reclaim >= MAX_ORDER areas which will never succeed. Callers may
3028 * be using allocators in order of preference for an area that is
3029 * too large.
3030 */
1fc28b70
MG
3031 if (order >= MAX_ORDER) {
3032 WARN_ON_ONCE(!(gfp_mask & __GFP_NOWARN));
72807a74 3033 return NULL;
1fc28b70 3034 }
1da177e4 3035
d0164adc
MG
3036 /*
3037 * We also sanity check to catch abuse of atomic reserves being used by
3038 * callers that are not in atomic context.
3039 */
3040 if (WARN_ON_ONCE((gfp_mask & (__GFP_ATOMIC|__GFP_DIRECT_RECLAIM)) ==
3041 (__GFP_ATOMIC|__GFP_DIRECT_RECLAIM)))
3042 gfp_mask &= ~__GFP_ATOMIC;
3043
952f3b51 3044 /*
4167e9b2
DR
3045 * If this allocation cannot block and it is for a specific node, then
3046 * fail early. There's no need to wakeup kswapd or retry for a
3047 * speculative node-specific allocation.
952f3b51 3048 */
d0164adc 3049 if (IS_ENABLED(CONFIG_NUMA) && (gfp_mask & __GFP_THISNODE) && !can_direct_reclaim)
952f3b51
CL
3050 goto nopage;
3051
9879de73 3052retry:
d0164adc 3053 if (gfp_mask & __GFP_KSWAPD_RECLAIM)
a9263751 3054 wake_all_kswapds(order, ac);
1da177e4 3055
9bf2229f 3056 /*
7fb1d9fc
RS
3057 * OK, we're below the kswapd watermark and have kicked background
3058 * reclaim. Now things get more complex, so set up alloc_flags according
3059 * to how we want to proceed.
9bf2229f 3060 */
341ce06f 3061 alloc_flags = gfp_to_alloc_flags(gfp_mask);
1da177e4 3062
f33261d7
DR
3063 /*
3064 * Find the true preferred zone if the allocation is unconstrained by
3065 * cpusets.
3066 */
a9263751 3067 if (!(alloc_flags & ALLOC_CPUSET) && !ac->nodemask) {
d8846374 3068 struct zoneref *preferred_zoneref;
a9263751
VB
3069 preferred_zoneref = first_zones_zonelist(ac->zonelist,
3070 ac->high_zoneidx, NULL, &ac->preferred_zone);
3071 ac->classzone_idx = zonelist_zone_idx(preferred_zoneref);
d8846374 3072 }
f33261d7 3073
341ce06f 3074 /* This is the last chance, in general, before the goto nopage. */
a9263751
VB
3075 page = get_page_from_freelist(gfp_mask, order,
3076 alloc_flags & ~ALLOC_NO_WATERMARKS, ac);
7fb1d9fc
RS
3077 if (page)
3078 goto got_pg;
1da177e4 3079
11e33f6a 3080 /* Allocate without watermarks if the context allows */
341ce06f 3081 if (alloc_flags & ALLOC_NO_WATERMARKS) {
183f6371
MG
3082 /*
3083 * Ignore mempolicies if ALLOC_NO_WATERMARKS on the grounds
3084 * the allocation is high priority and these type of
3085 * allocations are system rather than user orientated
3086 */
a9263751 3087 ac->zonelist = node_zonelist(numa_node_id(), gfp_mask);
33d53103
MH
3088 page = get_page_from_freelist(gfp_mask, order,
3089 ALLOC_NO_WATERMARKS, ac);
3090 if (page)
3091 goto got_pg;
1da177e4
LT
3092 }
3093
d0164adc
MG
3094 /* Caller is not willing to reclaim, we can't balance anything */
3095 if (!can_direct_reclaim) {
aed0a0e3 3096 /*
33d53103
MH
3097 * All existing users of the __GFP_NOFAIL are blockable, so warn
3098 * of any new users that actually allow this type of allocation
3099 * to fail.
aed0a0e3
DR
3100 */
3101 WARN_ON_ONCE(gfp_mask & __GFP_NOFAIL);
1da177e4 3102 goto nopage;
aed0a0e3 3103 }
1da177e4 3104
341ce06f 3105 /* Avoid recursion of direct reclaim */
33d53103
MH
3106 if (current->flags & PF_MEMALLOC) {
3107 /*
3108 * __GFP_NOFAIL request from this context is rather bizarre
3109 * because we cannot reclaim anything and only can loop waiting
3110 * for somebody to do a work for us.
3111 */
3112 if (WARN_ON_ONCE(gfp_mask & __GFP_NOFAIL)) {
3113 cond_resched();
3114 goto retry;
3115 }
341ce06f 3116 goto nopage;
33d53103 3117 }
341ce06f 3118
6583bb64
DR
3119 /* Avoid allocations with no watermarks from looping endlessly */
3120 if (test_thread_flag(TIF_MEMDIE) && !(gfp_mask & __GFP_NOFAIL))
3121 goto nopage;
3122
77f1fe6b
MG
3123 /*
3124 * Try direct compaction. The first pass is asynchronous. Subsequent
3125 * attempts after direct reclaim are synchronous
3126 */
a9263751
VB
3127 page = __alloc_pages_direct_compact(gfp_mask, order, alloc_flags, ac,
3128 migration_mode,
3129 &contended_compaction,
53853e2d 3130 &deferred_compaction);
56de7263
MG
3131 if (page)
3132 goto got_pg;
75f30861 3133
1f9efdef 3134 /* Checks for THP-specific high-order allocations */
d0164adc 3135 if (is_thp_gfp_mask(gfp_mask)) {
1f9efdef
VB
3136 /*
3137 * If compaction is deferred for high-order allocations, it is
3138 * because sync compaction recently failed. If this is the case
3139 * and the caller requested a THP allocation, we do not want
3140 * to heavily disrupt the system, so we fail the allocation
3141 * instead of entering direct reclaim.
3142 */
3143 if (deferred_compaction)
3144 goto nopage;
3145
3146 /*
3147 * In all zones where compaction was attempted (and not
3148 * deferred or skipped), lock contention has been detected.
3149 * For THP allocation we do not want to disrupt the others
3150 * so we fallback to base pages instead.
3151 */
3152 if (contended_compaction == COMPACT_CONTENDED_LOCK)
3153 goto nopage;
3154
3155 /*
3156 * If compaction was aborted due to need_resched(), we do not
3157 * want to further increase allocation latency, unless it is
3158 * khugepaged trying to collapse.
3159 */
3160 if (contended_compaction == COMPACT_CONTENDED_SCHED
3161 && !(current->flags & PF_KTHREAD))
3162 goto nopage;
3163 }
66199712 3164
8fe78048
DR
3165 /*
3166 * It can become very expensive to allocate transparent hugepages at
3167 * fault, so use asynchronous memory compaction for THP unless it is
3168 * khugepaged trying to collapse.
3169 */
d0164adc 3170 if (!is_thp_gfp_mask(gfp_mask) || (current->flags & PF_KTHREAD))
8fe78048
DR
3171 migration_mode = MIGRATE_SYNC_LIGHT;
3172
11e33f6a 3173 /* Try direct reclaim and then allocating */
a9263751
VB
3174 page = __alloc_pages_direct_reclaim(gfp_mask, order, alloc_flags, ac,
3175 &did_some_progress);
11e33f6a
MG
3176 if (page)
3177 goto got_pg;
1da177e4 3178
9083905a
JW
3179 /* Do not loop if specifically requested */
3180 if (gfp_mask & __GFP_NORETRY)
3181 goto noretry;
3182
3183 /* Keep reclaiming pages as long as there is reasonable progress */
a41f24ea 3184 pages_reclaimed += did_some_progress;
9083905a
JW
3185 if ((did_some_progress && order <= PAGE_ALLOC_COSTLY_ORDER) ||
3186 ((gfp_mask & __GFP_REPEAT) && pages_reclaimed < (1 << order))) {
11e33f6a 3187 /* Wait for some write requests to complete then retry */
a9263751 3188 wait_iff_congested(ac->preferred_zone, BLK_RW_ASYNC, HZ/50);
9879de73 3189 goto retry;
1da177e4
LT
3190 }
3191
9083905a
JW
3192 /* Reclaim has failed us, start killing things */
3193 page = __alloc_pages_may_oom(gfp_mask, order, ac, &did_some_progress);
3194 if (page)
3195 goto got_pg;
3196
3197 /* Retry as long as the OOM killer is making progress */
3198 if (did_some_progress)
3199 goto retry;
3200
3201noretry:
3202 /*
3203 * High-order allocations do not necessarily loop after
3204 * direct reclaim and reclaim/compaction depends on compaction
3205 * being called after reclaim so call directly if necessary
3206 */
3207 page = __alloc_pages_direct_compact(gfp_mask, order, alloc_flags,
3208 ac, migration_mode,
3209 &contended_compaction,
3210 &deferred_compaction);
3211 if (page)
3212 goto got_pg;
1da177e4 3213nopage:
a238ab5b 3214 warn_alloc_failed(gfp_mask, order, NULL);
1da177e4 3215got_pg:
072bb0aa 3216 return page;
1da177e4 3217}
11e33f6a
MG
3218
3219/*
3220 * This is the 'heart' of the zoned buddy allocator.
3221 */
3222struct page *
3223__alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
3224 struct zonelist *zonelist, nodemask_t *nodemask)
3225{
d8846374 3226 struct zoneref *preferred_zoneref;
cc9a6c87 3227 struct page *page = NULL;
cc9a6c87 3228 unsigned int cpuset_mems_cookie;
3a025760 3229 int alloc_flags = ALLOC_WMARK_LOW|ALLOC_CPUSET|ALLOC_FAIR;
91fbdc0f 3230 gfp_t alloc_mask; /* The gfp_t that was actually used for allocation */
a9263751
VB
3231 struct alloc_context ac = {
3232 .high_zoneidx = gfp_zone(gfp_mask),
3233 .nodemask = nodemask,
3234 .migratetype = gfpflags_to_migratetype(gfp_mask),
3235 };
11e33f6a 3236
dcce284a
BH
3237 gfp_mask &= gfp_allowed_mask;
3238
11e33f6a
MG
3239 lockdep_trace_alloc(gfp_mask);
3240
d0164adc 3241 might_sleep_if(gfp_mask & __GFP_DIRECT_RECLAIM);
11e33f6a
MG
3242
3243 if (should_fail_alloc_page(gfp_mask, order))
3244 return NULL;
3245
3246 /*
3247 * Check the zones suitable for the gfp_mask contain at least one
3248 * valid zone. It's possible to have an empty zonelist as a result
4167e9b2 3249 * of __GFP_THISNODE and a memoryless node
11e33f6a
MG
3250 */
3251 if (unlikely(!zonelist->_zonerefs->zone))
3252 return NULL;
3253
a9263751 3254 if (IS_ENABLED(CONFIG_CMA) && ac.migratetype == MIGRATE_MOVABLE)
21bb9bd1
VB
3255 alloc_flags |= ALLOC_CMA;
3256
cc9a6c87 3257retry_cpuset:
d26914d1 3258 cpuset_mems_cookie = read_mems_allowed_begin();
cc9a6c87 3259
a9263751
VB
3260 /* We set it here, as __alloc_pages_slowpath might have changed it */
3261 ac.zonelist = zonelist;
c9ab0c4f
MG
3262
3263 /* Dirty zone balancing only done in the fast path */
3264 ac.spread_dirty_pages = (gfp_mask & __GFP_WRITE);
3265
5117f45d 3266 /* The preferred zone is used for statistics later */
a9263751
VB
3267 preferred_zoneref = first_zones_zonelist(ac.zonelist, ac.high_zoneidx,
3268 ac.nodemask ? : &cpuset_current_mems_allowed,
3269 &ac.preferred_zone);
3270 if (!ac.preferred_zone)
cc9a6c87 3271 goto out;
a9263751 3272 ac.classzone_idx = zonelist_zone_idx(preferred_zoneref);
5117f45d
MG
3273
3274 /* First allocation attempt */
91fbdc0f 3275 alloc_mask = gfp_mask|__GFP_HARDWALL;
a9263751 3276 page = get_page_from_freelist(alloc_mask, order, alloc_flags, &ac);
21caf2fc
ML
3277 if (unlikely(!page)) {
3278 /*
3279 * Runtime PM, block IO and its error handling path
3280 * can deadlock because I/O on the device might not
3281 * complete.
3282 */
91fbdc0f 3283 alloc_mask = memalloc_noio_flags(gfp_mask);
c9ab0c4f 3284 ac.spread_dirty_pages = false;
91fbdc0f 3285
a9263751 3286 page = __alloc_pages_slowpath(alloc_mask, order, &ac);
21caf2fc 3287 }
11e33f6a 3288
23f086f9
XQ
3289 if (kmemcheck_enabled && page)
3290 kmemcheck_pagealloc_alloc(page, order, gfp_mask);
3291
a9263751 3292 trace_mm_page_alloc(page, order, alloc_mask, ac.migratetype);
cc9a6c87
MG
3293
3294out:
3295 /*
3296 * When updating a task's mems_allowed, it is possible to race with
3297 * parallel threads in such a way that an allocation can fail while
3298 * the mask is being updated. If a page allocation is about to fail,
3299 * check if the cpuset changed during allocation and if so, retry.
3300 */
d26914d1 3301 if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie)))
cc9a6c87
MG
3302 goto retry_cpuset;
3303
11e33f6a 3304 return page;
1da177e4 3305}
d239171e 3306EXPORT_SYMBOL(__alloc_pages_nodemask);
1da177e4
LT
3307
3308/*
3309 * Common helper functions.
3310 */
920c7a5d 3311unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order)
1da177e4 3312{
945a1113
AM
3313 struct page *page;
3314
3315 /*
3316 * __get_free_pages() returns a 32-bit address, which cannot represent
3317 * a highmem page
3318 */
3319 VM_BUG_ON((gfp_mask & __GFP_HIGHMEM) != 0);
3320
1da177e4
LT
3321 page = alloc_pages(gfp_mask, order);
3322 if (!page)
3323 return 0;
3324 return (unsigned long) page_address(page);
3325}
1da177e4
LT
3326EXPORT_SYMBOL(__get_free_pages);
3327
920c7a5d 3328unsigned long get_zeroed_page(gfp_t gfp_mask)
1da177e4 3329{
945a1113 3330 return __get_free_pages(gfp_mask | __GFP_ZERO, 0);
1da177e4 3331}
1da177e4
LT
3332EXPORT_SYMBOL(get_zeroed_page);
3333
920c7a5d 3334void __free_pages(struct page *page, unsigned int order)
1da177e4 3335{
b5810039 3336 if (put_page_testzero(page)) {
1da177e4 3337 if (order == 0)
b745bc85 3338 free_hot_cold_page(page, false);
1da177e4
LT
3339 else
3340 __free_pages_ok(page, order);
3341 }
3342}
3343
3344EXPORT_SYMBOL(__free_pages);
3345
920c7a5d 3346void free_pages(unsigned long addr, unsigned int order)
1da177e4
LT
3347{
3348 if (addr != 0) {
725d704e 3349 VM_BUG_ON(!virt_addr_valid((void *)addr));
1da177e4
LT
3350 __free_pages(virt_to_page((void *)addr), order);
3351 }
3352}
3353
3354EXPORT_SYMBOL(free_pages);
3355
b63ae8ca
AD
3356/*
3357 * Page Fragment:
3358 * An arbitrary-length arbitrary-offset area of memory which resides
3359 * within a 0 or higher order page. Multiple fragments within that page
3360 * are individually refcounted, in the page's reference counter.
3361 *
3362 * The page_frag functions below provide a simple allocation framework for
3363 * page fragments. This is used by the network stack and network device
3364 * drivers to provide a backing region of memory for use as either an
3365 * sk_buff->head, or to be used in the "frags" portion of skb_shared_info.
3366 */
3367static struct page *__page_frag_refill(struct page_frag_cache *nc,
3368 gfp_t gfp_mask)
3369{
3370 struct page *page = NULL;
3371 gfp_t gfp = gfp_mask;
3372
3373#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
3374 gfp_mask |= __GFP_COMP | __GFP_NOWARN | __GFP_NORETRY |
3375 __GFP_NOMEMALLOC;
3376 page = alloc_pages_node(NUMA_NO_NODE, gfp_mask,
3377 PAGE_FRAG_CACHE_MAX_ORDER);
3378 nc->size = page ? PAGE_FRAG_CACHE_MAX_SIZE : PAGE_SIZE;
3379#endif
3380 if (unlikely(!page))
3381 page = alloc_pages_node(NUMA_NO_NODE, gfp, 0);
3382
3383 nc->va = page ? page_address(page) : NULL;
3384
3385 return page;
3386}
3387
3388void *__alloc_page_frag(struct page_frag_cache *nc,
3389 unsigned int fragsz, gfp_t gfp_mask)
3390{
3391 unsigned int size = PAGE_SIZE;
3392 struct page *page;
3393 int offset;
3394
3395 if (unlikely(!nc->va)) {
3396refill:
3397 page = __page_frag_refill(nc, gfp_mask);
3398 if (!page)
3399 return NULL;
3400
3401#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
3402 /* if size can vary use size else just use PAGE_SIZE */
3403 size = nc->size;
3404#endif
3405 /* Even if we own the page, we do not use atomic_set().
3406 * This would break get_page_unless_zero() users.
3407 */
3408 atomic_add(size - 1, &page->_count);
3409
3410 /* reset page count bias and offset to start of new frag */
2f064f34 3411 nc->pfmemalloc = page_is_pfmemalloc(page);
b63ae8ca
AD
3412 nc->pagecnt_bias = size;
3413 nc->offset = size;
3414 }
3415
3416 offset = nc->offset - fragsz;
3417 if (unlikely(offset < 0)) {
3418 page = virt_to_page(nc->va);
3419
3420 if (!atomic_sub_and_test(nc->pagecnt_bias, &page->_count))
3421 goto refill;
3422
3423#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
3424 /* if size can vary use size else just use PAGE_SIZE */
3425 size = nc->size;
3426#endif
3427 /* OK, page count is 0, we can safely set it */
3428 atomic_set(&page->_count, size);
3429
3430 /* reset page count bias and offset to start of new frag */
3431 nc->pagecnt_bias = size;
3432 offset = size - fragsz;
3433 }
3434
3435 nc->pagecnt_bias--;
3436 nc->offset = offset;
3437
3438 return nc->va + offset;
3439}
3440EXPORT_SYMBOL(__alloc_page_frag);
3441
3442/*
3443 * Frees a page fragment allocated out of either a compound or order 0 page.
3444 */
3445void __free_page_frag(void *addr)
3446{
3447 struct page *page = virt_to_head_page(addr);
3448
3449 if (unlikely(put_page_testzero(page)))
3450 __free_pages_ok(page, compound_order(page));
3451}
3452EXPORT_SYMBOL(__free_page_frag);
3453
6a1a0d3b 3454/*
52383431 3455 * alloc_kmem_pages charges newly allocated pages to the kmem resource counter
a9bb7e62
VD
3456 * of the current memory cgroup if __GFP_ACCOUNT is set, other than that it is
3457 * equivalent to alloc_pages.
6a1a0d3b 3458 *
52383431
VD
3459 * It should be used when the caller would like to use kmalloc, but since the
3460 * allocation is large, it has to fall back to the page allocator.
3461 */
3462struct page *alloc_kmem_pages(gfp_t gfp_mask, unsigned int order)
3463{
3464 struct page *page;
52383431 3465
52383431 3466 page = alloc_pages(gfp_mask, order);
d05e83a6
VD
3467 if (page && memcg_kmem_charge(page, gfp_mask, order) != 0) {
3468 __free_pages(page, order);
3469 page = NULL;
3470 }
52383431
VD
3471 return page;
3472}
3473
3474struct page *alloc_kmem_pages_node(int nid, gfp_t gfp_mask, unsigned int order)
3475{
3476 struct page *page;
52383431 3477
52383431 3478 page = alloc_pages_node(nid, gfp_mask, order);
d05e83a6
VD
3479 if (page && memcg_kmem_charge(page, gfp_mask, order) != 0) {
3480 __free_pages(page, order);
3481 page = NULL;
3482 }
52383431
VD
3483 return page;
3484}
3485
3486/*
3487 * __free_kmem_pages and free_kmem_pages will free pages allocated with
3488 * alloc_kmem_pages.
6a1a0d3b 3489 */
52383431 3490void __free_kmem_pages(struct page *page, unsigned int order)
6a1a0d3b 3491{
d05e83a6 3492 memcg_kmem_uncharge(page, order);
6a1a0d3b
GC
3493 __free_pages(page, order);
3494}
3495
52383431 3496void free_kmem_pages(unsigned long addr, unsigned int order)
6a1a0d3b
GC
3497{
3498 if (addr != 0) {
3499 VM_BUG_ON(!virt_addr_valid((void *)addr));
52383431 3500 __free_kmem_pages(virt_to_page((void *)addr), order);
6a1a0d3b
GC
3501 }
3502}
3503
d00181b9
KS
3504static void *make_alloc_exact(unsigned long addr, unsigned int order,
3505 size_t size)
ee85c2e1
AK
3506{
3507 if (addr) {
3508 unsigned long alloc_end = addr + (PAGE_SIZE << order);
3509 unsigned long used = addr + PAGE_ALIGN(size);
3510
3511 split_page(virt_to_page((void *)addr), order);
3512 while (used < alloc_end) {
3513 free_page(used);
3514 used += PAGE_SIZE;
3515 }
3516 }
3517 return (void *)addr;
3518}
3519
2be0ffe2
TT
3520/**
3521 * alloc_pages_exact - allocate an exact number physically-contiguous pages.
3522 * @size: the number of bytes to allocate
3523 * @gfp_mask: GFP flags for the allocation
3524 *
3525 * This function is similar to alloc_pages(), except that it allocates the
3526 * minimum number of pages to satisfy the request. alloc_pages() can only
3527 * allocate memory in power-of-two pages.
3528 *
3529 * This function is also limited by MAX_ORDER.
3530 *
3531 * Memory allocated by this function must be released by free_pages_exact().
3532 */
3533void *alloc_pages_exact(size_t size, gfp_t gfp_mask)
3534{
3535 unsigned int order = get_order(size);
3536 unsigned long addr;
3537
3538 addr = __get_free_pages(gfp_mask, order);
ee85c2e1 3539 return make_alloc_exact(addr, order, size);
2be0ffe2
TT
3540}
3541EXPORT_SYMBOL(alloc_pages_exact);
3542
ee85c2e1
AK
3543/**
3544 * alloc_pages_exact_nid - allocate an exact number of physically-contiguous
3545 * pages on a node.
b5e6ab58 3546 * @nid: the preferred node ID where memory should be allocated
ee85c2e1
AK
3547 * @size: the number of bytes to allocate
3548 * @gfp_mask: GFP flags for the allocation
3549 *
3550 * Like alloc_pages_exact(), but try to allocate on node nid first before falling
3551 * back.
ee85c2e1 3552 */
e1931811 3553void * __meminit alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask)
ee85c2e1 3554{
d00181b9 3555 unsigned int order = get_order(size);
ee85c2e1
AK
3556 struct page *p = alloc_pages_node(nid, gfp_mask, order);
3557 if (!p)
3558 return NULL;
3559 return make_alloc_exact((unsigned long)page_address(p), order, size);
3560}
ee85c2e1 3561
2be0ffe2
TT
3562/**
3563 * free_pages_exact - release memory allocated via alloc_pages_exact()
3564 * @virt: the value returned by alloc_pages_exact.
3565 * @size: size of allocation, same value as passed to alloc_pages_exact().
3566 *
3567 * Release the memory allocated by a previous call to alloc_pages_exact.
3568 */
3569void free_pages_exact(void *virt, size_t size)
3570{
3571 unsigned long addr = (unsigned long)virt;
3572 unsigned long end = addr + PAGE_ALIGN(size);
3573
3574 while (addr < end) {
3575 free_page(addr);
3576 addr += PAGE_SIZE;
3577 }
3578}
3579EXPORT_SYMBOL(free_pages_exact);
3580
e0fb5815
ZY
3581/**
3582 * nr_free_zone_pages - count number of pages beyond high watermark
3583 * @offset: The zone index of the highest zone
3584 *
3585 * nr_free_zone_pages() counts the number of counts pages which are beyond the
3586 * high watermark within all zones at or below a given zone index. For each
3587 * zone, the number of pages is calculated as:
834405c3 3588 * managed_pages - high_pages
e0fb5815 3589 */
ebec3862 3590static unsigned long nr_free_zone_pages(int offset)
1da177e4 3591{
dd1a239f 3592 struct zoneref *z;
54a6eb5c
MG
3593 struct zone *zone;
3594
e310fd43 3595 /* Just pick one node, since fallback list is circular */
ebec3862 3596 unsigned long sum = 0;
1da177e4 3597
0e88460d 3598 struct zonelist *zonelist = node_zonelist(numa_node_id(), GFP_KERNEL);
1da177e4 3599
54a6eb5c 3600 for_each_zone_zonelist(zone, z, zonelist, offset) {
b40da049 3601 unsigned long size = zone->managed_pages;
41858966 3602 unsigned long high = high_wmark_pages(zone);
e310fd43
MB
3603 if (size > high)
3604 sum += size - high;
1da177e4
LT
3605 }
3606
3607 return sum;
3608}
3609
e0fb5815
ZY
3610/**
3611 * nr_free_buffer_pages - count number of pages beyond high watermark
3612 *
3613 * nr_free_buffer_pages() counts the number of pages which are beyond the high
3614 * watermark within ZONE_DMA and ZONE_NORMAL.
1da177e4 3615 */
ebec3862 3616unsigned long nr_free_buffer_pages(void)
1da177e4 3617{
af4ca457 3618 return nr_free_zone_pages(gfp_zone(GFP_USER));
1da177e4 3619}
c2f1a551 3620EXPORT_SYMBOL_GPL(nr_free_buffer_pages);
1da177e4 3621
e0fb5815
ZY
3622/**
3623 * nr_free_pagecache_pages - count number of pages beyond high watermark
3624 *
3625 * nr_free_pagecache_pages() counts the number of pages which are beyond the
3626 * high watermark within all zones.
1da177e4 3627 */
ebec3862 3628unsigned long nr_free_pagecache_pages(void)
1da177e4 3629{
2a1e274a 3630 return nr_free_zone_pages(gfp_zone(GFP_HIGHUSER_MOVABLE));
1da177e4 3631}
08e0f6a9
CL
3632
3633static inline void show_node(struct zone *zone)
1da177e4 3634{
e5adfffc 3635 if (IS_ENABLED(CONFIG_NUMA))
25ba77c1 3636 printk("Node %d ", zone_to_nid(zone));
1da177e4 3637}
1da177e4 3638
1da177e4
LT
3639void si_meminfo(struct sysinfo *val)
3640{
3641 val->totalram = totalram_pages;
cc7452b6 3642 val->sharedram = global_page_state(NR_SHMEM);
d23ad423 3643 val->freeram = global_page_state(NR_FREE_PAGES);
1da177e4 3644 val->bufferram = nr_blockdev_pages();
1da177e4
LT
3645 val->totalhigh = totalhigh_pages;
3646 val->freehigh = nr_free_highpages();
1da177e4
LT
3647 val->mem_unit = PAGE_SIZE;
3648}
3649
3650EXPORT_SYMBOL(si_meminfo);
3651
3652#ifdef CONFIG_NUMA
3653void si_meminfo_node(struct sysinfo *val, int nid)
3654{
cdd91a77
JL
3655 int zone_type; /* needs to be signed */
3656 unsigned long managed_pages = 0;
1da177e4
LT
3657 pg_data_t *pgdat = NODE_DATA(nid);
3658
cdd91a77
JL
3659 for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++)
3660 managed_pages += pgdat->node_zones[zone_type].managed_pages;
3661 val->totalram = managed_pages;
cc7452b6 3662 val->sharedram = node_page_state(nid, NR_SHMEM);
d23ad423 3663 val->freeram = node_page_state(nid, NR_FREE_PAGES);
98d2b0eb 3664#ifdef CONFIG_HIGHMEM
b40da049 3665 val->totalhigh = pgdat->node_zones[ZONE_HIGHMEM].managed_pages;
d23ad423
CL
3666 val->freehigh = zone_page_state(&pgdat->node_zones[ZONE_HIGHMEM],
3667 NR_FREE_PAGES);
98d2b0eb
CL
3668#else
3669 val->totalhigh = 0;
3670 val->freehigh = 0;
3671#endif
1da177e4
LT
3672 val->mem_unit = PAGE_SIZE;
3673}
3674#endif
3675
ddd588b5 3676/*
7bf02ea2
DR
3677 * Determine whether the node should be displayed or not, depending on whether
3678 * SHOW_MEM_FILTER_NODES was passed to show_free_areas().
ddd588b5 3679 */
7bf02ea2 3680bool skip_free_areas_node(unsigned int flags, int nid)
ddd588b5
DR
3681{
3682 bool ret = false;
cc9a6c87 3683 unsigned int cpuset_mems_cookie;
ddd588b5
DR
3684
3685 if (!(flags & SHOW_MEM_FILTER_NODES))
3686 goto out;
3687
cc9a6c87 3688 do {
d26914d1 3689 cpuset_mems_cookie = read_mems_allowed_begin();
cc9a6c87 3690 ret = !node_isset(nid, cpuset_current_mems_allowed);
d26914d1 3691 } while (read_mems_allowed_retry(cpuset_mems_cookie));
ddd588b5
DR
3692out:
3693 return ret;
3694}
3695
1da177e4
LT
3696#define K(x) ((x) << (PAGE_SHIFT-10))
3697
377e4f16
RV
3698static void show_migration_types(unsigned char type)
3699{
3700 static const char types[MIGRATE_TYPES] = {
3701 [MIGRATE_UNMOVABLE] = 'U',
377e4f16 3702 [MIGRATE_MOVABLE] = 'M',
475a2f90
VB
3703 [MIGRATE_RECLAIMABLE] = 'E',
3704 [MIGRATE_HIGHATOMIC] = 'H',
377e4f16
RV
3705#ifdef CONFIG_CMA
3706 [MIGRATE_CMA] = 'C',
3707#endif
194159fb 3708#ifdef CONFIG_MEMORY_ISOLATION
377e4f16 3709 [MIGRATE_ISOLATE] = 'I',
194159fb 3710#endif
377e4f16
RV
3711 };
3712 char tmp[MIGRATE_TYPES + 1];
3713 char *p = tmp;
3714 int i;
3715
3716 for (i = 0; i < MIGRATE_TYPES; i++) {
3717 if (type & (1 << i))
3718 *p++ = types[i];
3719 }
3720
3721 *p = '\0';
3722 printk("(%s) ", tmp);
3723}
3724
1da177e4
LT
3725/*
3726 * Show free area list (used inside shift_scroll-lock stuff)
3727 * We also calculate the percentage fragmentation. We do this by counting the
3728 * memory on each free list with the exception of the first item on the list.
d1bfcdb8
KK
3729 *
3730 * Bits in @filter:
3731 * SHOW_MEM_FILTER_NODES: suppress nodes that are not allowed by current's
3732 * cpuset.
1da177e4 3733 */
7bf02ea2 3734void show_free_areas(unsigned int filter)
1da177e4 3735{
d1bfcdb8 3736 unsigned long free_pcp = 0;
c7241913 3737 int cpu;
1da177e4
LT
3738 struct zone *zone;
3739
ee99c71c 3740 for_each_populated_zone(zone) {
7bf02ea2 3741 if (skip_free_areas_node(filter, zone_to_nid(zone)))
ddd588b5 3742 continue;
d1bfcdb8 3743
761b0677
KK
3744 for_each_online_cpu(cpu)
3745 free_pcp += per_cpu_ptr(zone->pageset, cpu)->pcp.count;
1da177e4
LT
3746 }
3747
a731286d
KM
3748 printk("active_anon:%lu inactive_anon:%lu isolated_anon:%lu\n"
3749 " active_file:%lu inactive_file:%lu isolated_file:%lu\n"
d1bfcdb8
KK
3750 " unevictable:%lu dirty:%lu writeback:%lu unstable:%lu\n"
3751 " slab_reclaimable:%lu slab_unreclaimable:%lu\n"
d1ce749a 3752 " mapped:%lu shmem:%lu pagetables:%lu bounce:%lu\n"
d1bfcdb8 3753 " free:%lu free_pcp:%lu free_cma:%lu\n",
4f98a2fe 3754 global_page_state(NR_ACTIVE_ANON),
4f98a2fe 3755 global_page_state(NR_INACTIVE_ANON),
a731286d
KM
3756 global_page_state(NR_ISOLATED_ANON),
3757 global_page_state(NR_ACTIVE_FILE),
4f98a2fe 3758 global_page_state(NR_INACTIVE_FILE),
a731286d 3759 global_page_state(NR_ISOLATED_FILE),
7b854121 3760 global_page_state(NR_UNEVICTABLE),
b1e7a8fd 3761 global_page_state(NR_FILE_DIRTY),
ce866b34 3762 global_page_state(NR_WRITEBACK),
fd39fc85 3763 global_page_state(NR_UNSTABLE_NFS),
3701b033
KM
3764 global_page_state(NR_SLAB_RECLAIMABLE),
3765 global_page_state(NR_SLAB_UNRECLAIMABLE),
65ba55f5 3766 global_page_state(NR_FILE_MAPPED),
4b02108a 3767 global_page_state(NR_SHMEM),
a25700a5 3768 global_page_state(NR_PAGETABLE),
d1ce749a 3769 global_page_state(NR_BOUNCE),
d1bfcdb8
KK
3770 global_page_state(NR_FREE_PAGES),
3771 free_pcp,
d1ce749a 3772 global_page_state(NR_FREE_CMA_PAGES));
1da177e4 3773
ee99c71c 3774 for_each_populated_zone(zone) {
1da177e4
LT
3775 int i;
3776
7bf02ea2 3777 if (skip_free_areas_node(filter, zone_to_nid(zone)))
ddd588b5 3778 continue;
d1bfcdb8
KK
3779
3780 free_pcp = 0;
3781 for_each_online_cpu(cpu)
3782 free_pcp += per_cpu_ptr(zone->pageset, cpu)->pcp.count;
3783
1da177e4
LT
3784 show_node(zone);
3785 printk("%s"
3786 " free:%lukB"
3787 " min:%lukB"
3788 " low:%lukB"
3789 " high:%lukB"
4f98a2fe
RR
3790 " active_anon:%lukB"
3791 " inactive_anon:%lukB"
3792 " active_file:%lukB"
3793 " inactive_file:%lukB"
7b854121 3794 " unevictable:%lukB"
a731286d
KM
3795 " isolated(anon):%lukB"
3796 " isolated(file):%lukB"
1da177e4 3797 " present:%lukB"
9feedc9d 3798 " managed:%lukB"
4a0aa73f
KM
3799 " mlocked:%lukB"
3800 " dirty:%lukB"
3801 " writeback:%lukB"
3802 " mapped:%lukB"
4b02108a 3803 " shmem:%lukB"
4a0aa73f
KM
3804 " slab_reclaimable:%lukB"
3805 " slab_unreclaimable:%lukB"
c6a7f572 3806 " kernel_stack:%lukB"
4a0aa73f
KM
3807 " pagetables:%lukB"
3808 " unstable:%lukB"
3809 " bounce:%lukB"
d1bfcdb8
KK
3810 " free_pcp:%lukB"
3811 " local_pcp:%ukB"
d1ce749a 3812 " free_cma:%lukB"
4a0aa73f 3813 " writeback_tmp:%lukB"
1da177e4
LT
3814 " pages_scanned:%lu"
3815 " all_unreclaimable? %s"
3816 "\n",
3817 zone->name,
88f5acf8 3818 K(zone_page_state(zone, NR_FREE_PAGES)),
41858966
MG
3819 K(min_wmark_pages(zone)),
3820 K(low_wmark_pages(zone)),
3821 K(high_wmark_pages(zone)),
4f98a2fe
RR
3822 K(zone_page_state(zone, NR_ACTIVE_ANON)),
3823 K(zone_page_state(zone, NR_INACTIVE_ANON)),
3824 K(zone_page_state(zone, NR_ACTIVE_FILE)),
3825 K(zone_page_state(zone, NR_INACTIVE_FILE)),
7b854121 3826 K(zone_page_state(zone, NR_UNEVICTABLE)),
a731286d
KM
3827 K(zone_page_state(zone, NR_ISOLATED_ANON)),
3828 K(zone_page_state(zone, NR_ISOLATED_FILE)),
1da177e4 3829 K(zone->present_pages),
9feedc9d 3830 K(zone->managed_pages),
4a0aa73f
KM
3831 K(zone_page_state(zone, NR_MLOCK)),
3832 K(zone_page_state(zone, NR_FILE_DIRTY)),
3833 K(zone_page_state(zone, NR_WRITEBACK)),
3834 K(zone_page_state(zone, NR_FILE_MAPPED)),
4b02108a 3835 K(zone_page_state(zone, NR_SHMEM)),
4a0aa73f
KM
3836 K(zone_page_state(zone, NR_SLAB_RECLAIMABLE)),
3837 K(zone_page_state(zone, NR_SLAB_UNRECLAIMABLE)),
c6a7f572
KM
3838 zone_page_state(zone, NR_KERNEL_STACK) *
3839 THREAD_SIZE / 1024,
4a0aa73f
KM
3840 K(zone_page_state(zone, NR_PAGETABLE)),
3841 K(zone_page_state(zone, NR_UNSTABLE_NFS)),
3842 K(zone_page_state(zone, NR_BOUNCE)),
d1bfcdb8
KK
3843 K(free_pcp),
3844 K(this_cpu_read(zone->pageset->pcp.count)),
d1ce749a 3845 K(zone_page_state(zone, NR_FREE_CMA_PAGES)),
4a0aa73f 3846 K(zone_page_state(zone, NR_WRITEBACK_TEMP)),
0d5d823a 3847 K(zone_page_state(zone, NR_PAGES_SCANNED)),
6e543d57 3848 (!zone_reclaimable(zone) ? "yes" : "no")
1da177e4
LT
3849 );
3850 printk("lowmem_reserve[]:");
3851 for (i = 0; i < MAX_NR_ZONES; i++)
3484b2de 3852 printk(" %ld", zone->lowmem_reserve[i]);
1da177e4
LT
3853 printk("\n");
3854 }
3855
ee99c71c 3856 for_each_populated_zone(zone) {
d00181b9
KS
3857 unsigned int order;
3858 unsigned long nr[MAX_ORDER], flags, total = 0;
377e4f16 3859 unsigned char types[MAX_ORDER];
1da177e4 3860
7bf02ea2 3861 if (skip_free_areas_node(filter, zone_to_nid(zone)))
ddd588b5 3862 continue;
1da177e4
LT
3863 show_node(zone);
3864 printk("%s: ", zone->name);
1da177e4
LT
3865
3866 spin_lock_irqsave(&zone->lock, flags);
3867 for (order = 0; order < MAX_ORDER; order++) {
377e4f16
RV
3868 struct free_area *area = &zone->free_area[order];
3869 int type;
3870
3871 nr[order] = area->nr_free;
8f9de51a 3872 total += nr[order] << order;
377e4f16
RV
3873
3874 types[order] = 0;
3875 for (type = 0; type < MIGRATE_TYPES; type++) {
3876 if (!list_empty(&area->free_list[type]))
3877 types[order] |= 1 << type;
3878 }
1da177e4
LT
3879 }
3880 spin_unlock_irqrestore(&zone->lock, flags);
377e4f16 3881 for (order = 0; order < MAX_ORDER; order++) {
8f9de51a 3882 printk("%lu*%lukB ", nr[order], K(1UL) << order);
377e4f16
RV
3883 if (nr[order])
3884 show_migration_types(types[order]);
3885 }
1da177e4
LT
3886 printk("= %lukB\n", K(total));
3887 }
3888
949f7ec5
DR
3889 hugetlb_show_meminfo();
3890
e6f3602d
LW
3891 printk("%ld total pagecache pages\n", global_page_state(NR_FILE_PAGES));
3892
1da177e4
LT
3893 show_swap_cache_info();
3894}
3895
19770b32
MG
3896static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref)
3897{
3898 zoneref->zone = zone;
3899 zoneref->zone_idx = zone_idx(zone);
3900}
3901
1da177e4
LT
3902/*
3903 * Builds allocation fallback zone lists.
1a93205b
CL
3904 *
3905 * Add all populated zones of a node to the zonelist.
1da177e4 3906 */
f0c0b2b8 3907static int build_zonelists_node(pg_data_t *pgdat, struct zonelist *zonelist,
bc732f1d 3908 int nr_zones)
1da177e4 3909{
1a93205b 3910 struct zone *zone;
bc732f1d 3911 enum zone_type zone_type = MAX_NR_ZONES;
02a68a5e
CL
3912
3913 do {
2f6726e5 3914 zone_type--;
070f8032 3915 zone = pgdat->node_zones + zone_type;
1a93205b 3916 if (populated_zone(zone)) {
dd1a239f
MG
3917 zoneref_set_zone(zone,
3918 &zonelist->_zonerefs[nr_zones++]);
070f8032 3919 check_highest_zone(zone_type);
1da177e4 3920 }
2f6726e5 3921 } while (zone_type);
bc732f1d 3922
070f8032 3923 return nr_zones;
1da177e4
LT
3924}
3925
f0c0b2b8
KH
3926
3927/*
3928 * zonelist_order:
3929 * 0 = automatic detection of better ordering.
3930 * 1 = order by ([node] distance, -zonetype)
3931 * 2 = order by (-zonetype, [node] distance)
3932 *
3933 * If not NUMA, ZONELIST_ORDER_ZONE and ZONELIST_ORDER_NODE will create
3934 * the same zonelist. So only NUMA can configure this param.
3935 */
3936#define ZONELIST_ORDER_DEFAULT 0
3937#define ZONELIST_ORDER_NODE 1
3938#define ZONELIST_ORDER_ZONE 2
3939
3940/* zonelist order in the kernel.
3941 * set_zonelist_order() will set this to NODE or ZONE.
3942 */
3943static int current_zonelist_order = ZONELIST_ORDER_DEFAULT;
3944static char zonelist_order_name[3][8] = {"Default", "Node", "Zone"};
3945
3946
1da177e4 3947#ifdef CONFIG_NUMA
f0c0b2b8
KH
3948/* The value user specified ....changed by config */
3949static int user_zonelist_order = ZONELIST_ORDER_DEFAULT;
3950/* string for sysctl */
3951#define NUMA_ZONELIST_ORDER_LEN 16
3952char numa_zonelist_order[16] = "default";
3953
3954/*
3955 * interface for configure zonelist ordering.
3956 * command line option "numa_zonelist_order"
3957 * = "[dD]efault - default, automatic configuration.
3958 * = "[nN]ode - order by node locality, then by zone within node
3959 * = "[zZ]one - order by zone, then by locality within zone
3960 */
3961
3962static int __parse_numa_zonelist_order(char *s)
3963{
3964 if (*s == 'd' || *s == 'D') {
3965 user_zonelist_order = ZONELIST_ORDER_DEFAULT;
3966 } else if (*s == 'n' || *s == 'N') {
3967 user_zonelist_order = ZONELIST_ORDER_NODE;
3968 } else if (*s == 'z' || *s == 'Z') {
3969 user_zonelist_order = ZONELIST_ORDER_ZONE;
3970 } else {
3971 printk(KERN_WARNING
3972 "Ignoring invalid numa_zonelist_order value: "
3973 "%s\n", s);
3974 return -EINVAL;
3975 }
3976 return 0;
3977}
3978
3979static __init int setup_numa_zonelist_order(char *s)
3980{
ecb256f8
VL
3981 int ret;
3982
3983 if (!s)
3984 return 0;
3985
3986 ret = __parse_numa_zonelist_order(s);
3987 if (ret == 0)
3988 strlcpy(numa_zonelist_order, s, NUMA_ZONELIST_ORDER_LEN);
3989
3990 return ret;
f0c0b2b8
KH
3991}
3992early_param("numa_zonelist_order", setup_numa_zonelist_order);
3993
3994/*
3995 * sysctl handler for numa_zonelist_order
3996 */
cccad5b9 3997int numa_zonelist_order_handler(struct ctl_table *table, int write,
8d65af78 3998 void __user *buffer, size_t *length,
f0c0b2b8
KH
3999 loff_t *ppos)
4000{
4001 char saved_string[NUMA_ZONELIST_ORDER_LEN];
4002 int ret;
443c6f14 4003 static DEFINE_MUTEX(zl_order_mutex);
f0c0b2b8 4004
443c6f14 4005 mutex_lock(&zl_order_mutex);
dacbde09
CG
4006 if (write) {
4007 if (strlen((char *)table->data) >= NUMA_ZONELIST_ORDER_LEN) {
4008 ret = -EINVAL;
4009 goto out;
4010 }
4011 strcpy(saved_string, (char *)table->data);
4012 }
8d65af78 4013 ret = proc_dostring(table, write, buffer, length, ppos);
f0c0b2b8 4014 if (ret)
443c6f14 4015 goto out;
f0c0b2b8
KH
4016 if (write) {
4017 int oldval = user_zonelist_order;
dacbde09
CG
4018
4019 ret = __parse_numa_zonelist_order((char *)table->data);
4020 if (ret) {
f0c0b2b8
KH
4021 /*
4022 * bogus value. restore saved string
4023 */
dacbde09 4024 strncpy((char *)table->data, saved_string,
f0c0b2b8
KH
4025 NUMA_ZONELIST_ORDER_LEN);
4026 user_zonelist_order = oldval;
4eaf3f64
HL
4027 } else if (oldval != user_zonelist_order) {
4028 mutex_lock(&zonelists_mutex);
9adb62a5 4029 build_all_zonelists(NULL, NULL);
4eaf3f64
HL
4030 mutex_unlock(&zonelists_mutex);
4031 }
f0c0b2b8 4032 }
443c6f14
AK
4033out:
4034 mutex_unlock(&zl_order_mutex);
4035 return ret;
f0c0b2b8
KH
4036}
4037
4038
62bc62a8 4039#define MAX_NODE_LOAD (nr_online_nodes)
f0c0b2b8
KH
4040static int node_load[MAX_NUMNODES];
4041
1da177e4 4042/**
4dc3b16b 4043 * find_next_best_node - find the next node that should appear in a given node's fallback list
1da177e4
LT
4044 * @node: node whose fallback list we're appending
4045 * @used_node_mask: nodemask_t of already used nodes
4046 *
4047 * We use a number of factors to determine which is the next node that should
4048 * appear on a given node's fallback list. The node should not have appeared
4049 * already in @node's fallback list, and it should be the next closest node
4050 * according to the distance array (which contains arbitrary distance values
4051 * from each node to each node in the system), and should also prefer nodes
4052 * with no CPUs, since presumably they'll have very little allocation pressure
4053 * on them otherwise.
4054 * It returns -1 if no node is found.
4055 */
f0c0b2b8 4056static int find_next_best_node(int node, nodemask_t *used_node_mask)
1da177e4 4057{
4cf808eb 4058 int n, val;
1da177e4 4059 int min_val = INT_MAX;
00ef2d2f 4060 int best_node = NUMA_NO_NODE;
a70f7302 4061 const struct cpumask *tmp = cpumask_of_node(0);
1da177e4 4062
4cf808eb
LT
4063 /* Use the local node if we haven't already */
4064 if (!node_isset(node, *used_node_mask)) {
4065 node_set(node, *used_node_mask);
4066 return node;
4067 }
1da177e4 4068
4b0ef1fe 4069 for_each_node_state(n, N_MEMORY) {
1da177e4
LT
4070
4071 /* Don't want a node to appear more than once */
4072 if (node_isset(n, *used_node_mask))
4073 continue;
4074
1da177e4
LT
4075 /* Use the distance array to find the distance */
4076 val = node_distance(node, n);
4077
4cf808eb
LT
4078 /* Penalize nodes under us ("prefer the next node") */
4079 val += (n < node);
4080
1da177e4 4081 /* Give preference to headless and unused nodes */
a70f7302
RR
4082 tmp = cpumask_of_node(n);
4083 if (!cpumask_empty(tmp))
1da177e4
LT
4084 val += PENALTY_FOR_NODE_WITH_CPUS;
4085
4086 /* Slight preference for less loaded node */
4087 val *= (MAX_NODE_LOAD*MAX_NUMNODES);
4088 val += node_load[n];
4089
4090 if (val < min_val) {
4091 min_val = val;
4092 best_node = n;
4093 }
4094 }
4095
4096 if (best_node >= 0)
4097 node_set(best_node, *used_node_mask);
4098
4099 return best_node;
4100}
4101
f0c0b2b8
KH
4102
4103/*
4104 * Build zonelists ordered by node and zones within node.
4105 * This results in maximum locality--normal zone overflows into local
4106 * DMA zone, if any--but risks exhausting DMA zone.
4107 */
4108static void build_zonelists_in_node_order(pg_data_t *pgdat, int node)
1da177e4 4109{
f0c0b2b8 4110 int j;
1da177e4 4111 struct zonelist *zonelist;
f0c0b2b8 4112
54a6eb5c 4113 zonelist = &pgdat->node_zonelists[0];
dd1a239f 4114 for (j = 0; zonelist->_zonerefs[j].zone != NULL; j++)
54a6eb5c 4115 ;
bc732f1d 4116 j = build_zonelists_node(NODE_DATA(node), zonelist, j);
dd1a239f
MG
4117 zonelist->_zonerefs[j].zone = NULL;
4118 zonelist->_zonerefs[j].zone_idx = 0;
f0c0b2b8
KH
4119}
4120
523b9458
CL
4121/*
4122 * Build gfp_thisnode zonelists
4123 */
4124static void build_thisnode_zonelists(pg_data_t *pgdat)
4125{
523b9458
CL
4126 int j;
4127 struct zonelist *zonelist;
4128
54a6eb5c 4129 zonelist = &pgdat->node_zonelists[1];
bc732f1d 4130 j = build_zonelists_node(pgdat, zonelist, 0);
dd1a239f
MG
4131 zonelist->_zonerefs[j].zone = NULL;
4132 zonelist->_zonerefs[j].zone_idx = 0;
523b9458
CL
4133}
4134
f0c0b2b8
KH
4135/*
4136 * Build zonelists ordered by zone and nodes within zones.
4137 * This results in conserving DMA zone[s] until all Normal memory is
4138 * exhausted, but results in overflowing to remote node while memory
4139 * may still exist in local DMA zone.
4140 */
4141static int node_order[MAX_NUMNODES];
4142
4143static void build_zonelists_in_zone_order(pg_data_t *pgdat, int nr_nodes)
4144{
f0c0b2b8
KH
4145 int pos, j, node;
4146 int zone_type; /* needs to be signed */
4147 struct zone *z;
4148 struct zonelist *zonelist;
4149
54a6eb5c
MG
4150 zonelist = &pgdat->node_zonelists[0];
4151 pos = 0;
4152 for (zone_type = MAX_NR_ZONES - 1; zone_type >= 0; zone_type--) {
4153 for (j = 0; j < nr_nodes; j++) {
4154 node = node_order[j];
4155 z = &NODE_DATA(node)->node_zones[zone_type];
4156 if (populated_zone(z)) {
dd1a239f
MG
4157 zoneref_set_zone(z,
4158 &zonelist->_zonerefs[pos++]);
54a6eb5c 4159 check_highest_zone(zone_type);
f0c0b2b8
KH
4160 }
4161 }
f0c0b2b8 4162 }
dd1a239f
MG
4163 zonelist->_zonerefs[pos].zone = NULL;
4164 zonelist->_zonerefs[pos].zone_idx = 0;
f0c0b2b8
KH
4165}
4166
3193913c
MG
4167#if defined(CONFIG_64BIT)
4168/*
4169 * Devices that require DMA32/DMA are relatively rare and do not justify a
4170 * penalty to every machine in case the specialised case applies. Default
4171 * to Node-ordering on 64-bit NUMA machines
4172 */
4173static int default_zonelist_order(void)
4174{
4175 return ZONELIST_ORDER_NODE;
4176}
4177#else
4178/*
4179 * On 32-bit, the Normal zone needs to be preserved for allocations accessible
4180 * by the kernel. If processes running on node 0 deplete the low memory zone
4181 * then reclaim will occur more frequency increasing stalls and potentially
4182 * be easier to OOM if a large percentage of the zone is under writeback or
4183 * dirty. The problem is significantly worse if CONFIG_HIGHPTE is not set.
4184 * Hence, default to zone ordering on 32-bit.
4185 */
f0c0b2b8
KH
4186static int default_zonelist_order(void)
4187{
f0c0b2b8
KH
4188 return ZONELIST_ORDER_ZONE;
4189}
3193913c 4190#endif /* CONFIG_64BIT */
f0c0b2b8
KH
4191
4192static void set_zonelist_order(void)
4193{
4194 if (user_zonelist_order == ZONELIST_ORDER_DEFAULT)
4195 current_zonelist_order = default_zonelist_order();
4196 else
4197 current_zonelist_order = user_zonelist_order;
4198}
4199
4200static void build_zonelists(pg_data_t *pgdat)
4201{
c00eb15a 4202 int i, node, load;
1da177e4 4203 nodemask_t used_mask;
f0c0b2b8
KH
4204 int local_node, prev_node;
4205 struct zonelist *zonelist;
d00181b9 4206 unsigned int order = current_zonelist_order;
1da177e4
LT
4207
4208 /* initialize zonelists */
523b9458 4209 for (i = 0; i < MAX_ZONELISTS; i++) {
1da177e4 4210 zonelist = pgdat->node_zonelists + i;
dd1a239f
MG
4211 zonelist->_zonerefs[0].zone = NULL;
4212 zonelist->_zonerefs[0].zone_idx = 0;
1da177e4
LT
4213 }
4214
4215 /* NUMA-aware ordering of nodes */
4216 local_node = pgdat->node_id;
62bc62a8 4217 load = nr_online_nodes;
1da177e4
LT
4218 prev_node = local_node;
4219 nodes_clear(used_mask);
f0c0b2b8 4220
f0c0b2b8 4221 memset(node_order, 0, sizeof(node_order));
c00eb15a 4222 i = 0;
f0c0b2b8 4223
1da177e4
LT
4224 while ((node = find_next_best_node(local_node, &used_mask)) >= 0) {
4225 /*
4226 * We don't want to pressure a particular node.
4227 * So adding penalty to the first node in same
4228 * distance group to make it round-robin.
4229 */
957f822a
DR
4230 if (node_distance(local_node, node) !=
4231 node_distance(local_node, prev_node))
f0c0b2b8
KH
4232 node_load[node] = load;
4233
1da177e4
LT
4234 prev_node = node;
4235 load--;
f0c0b2b8
KH
4236 if (order == ZONELIST_ORDER_NODE)
4237 build_zonelists_in_node_order(pgdat, node);
4238 else
c00eb15a 4239 node_order[i++] = node; /* remember order */
f0c0b2b8 4240 }
1da177e4 4241
f0c0b2b8
KH
4242 if (order == ZONELIST_ORDER_ZONE) {
4243 /* calculate node order -- i.e., DMA last! */
c00eb15a 4244 build_zonelists_in_zone_order(pgdat, i);
1da177e4 4245 }
523b9458
CL
4246
4247 build_thisnode_zonelists(pgdat);
1da177e4
LT
4248}
4249
7aac7898
LS
4250#ifdef CONFIG_HAVE_MEMORYLESS_NODES
4251/*
4252 * Return node id of node used for "local" allocations.
4253 * I.e., first node id of first zone in arg node's generic zonelist.
4254 * Used for initializing percpu 'numa_mem', which is used primarily
4255 * for kernel allocations, so use GFP_KERNEL flags to locate zonelist.
4256 */
4257int local_memory_node(int node)
4258{
4259 struct zone *zone;
4260
4261 (void)first_zones_zonelist(node_zonelist(node, GFP_KERNEL),
4262 gfp_zone(GFP_KERNEL),
4263 NULL,
4264 &zone);
4265 return zone->node;
4266}
4267#endif
f0c0b2b8 4268
1da177e4
LT
4269#else /* CONFIG_NUMA */
4270
f0c0b2b8
KH
4271static void set_zonelist_order(void)
4272{
4273 current_zonelist_order = ZONELIST_ORDER_ZONE;
4274}
4275
4276static void build_zonelists(pg_data_t *pgdat)
1da177e4 4277{
19655d34 4278 int node, local_node;
54a6eb5c
MG
4279 enum zone_type j;
4280 struct zonelist *zonelist;
1da177e4
LT
4281
4282 local_node = pgdat->node_id;
1da177e4 4283
54a6eb5c 4284 zonelist = &pgdat->node_zonelists[0];
bc732f1d 4285 j = build_zonelists_node(pgdat, zonelist, 0);
1da177e4 4286
54a6eb5c
MG
4287 /*
4288 * Now we build the zonelist so that it contains the zones
4289 * of all the other nodes.
4290 * We don't want to pressure a particular node, so when
4291 * building the zones for node N, we make sure that the
4292 * zones coming right after the local ones are those from
4293 * node N+1 (modulo N)
4294 */
4295 for (node = local_node + 1; node < MAX_NUMNODES; node++) {
4296 if (!node_online(node))
4297 continue;
bc732f1d 4298 j = build_zonelists_node(NODE_DATA(node), zonelist, j);
1da177e4 4299 }
54a6eb5c
MG
4300 for (node = 0; node < local_node; node++) {
4301 if (!node_online(node))
4302 continue;
bc732f1d 4303 j = build_zonelists_node(NODE_DATA(node), zonelist, j);
54a6eb5c
MG
4304 }
4305
dd1a239f
MG
4306 zonelist->_zonerefs[j].zone = NULL;
4307 zonelist->_zonerefs[j].zone_idx = 0;
1da177e4
LT
4308}
4309
4310#endif /* CONFIG_NUMA */
4311
99dcc3e5
CL
4312/*
4313 * Boot pageset table. One per cpu which is going to be used for all
4314 * zones and all nodes. The parameters will be set in such a way
4315 * that an item put on a list will immediately be handed over to
4316 * the buddy list. This is safe since pageset manipulation is done
4317 * with interrupts disabled.
4318 *
4319 * The boot_pagesets must be kept even after bootup is complete for
4320 * unused processors and/or zones. They do play a role for bootstrapping
4321 * hotplugged processors.
4322 *
4323 * zoneinfo_show() and maybe other functions do
4324 * not check if the processor is online before following the pageset pointer.
4325 * Other parts of the kernel may not check if the zone is available.
4326 */
4327static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch);
4328static DEFINE_PER_CPU(struct per_cpu_pageset, boot_pageset);
1f522509 4329static void setup_zone_pageset(struct zone *zone);
99dcc3e5 4330
4eaf3f64
HL
4331/*
4332 * Global mutex to protect against size modification of zonelists
4333 * as well as to serialize pageset setup for the new populated zone.
4334 */
4335DEFINE_MUTEX(zonelists_mutex);
4336
9b1a4d38 4337/* return values int ....just for stop_machine() */
4ed7e022 4338static int __build_all_zonelists(void *data)
1da177e4 4339{
6811378e 4340 int nid;
99dcc3e5 4341 int cpu;
9adb62a5 4342 pg_data_t *self = data;
9276b1bc 4343
7f9cfb31
BL
4344#ifdef CONFIG_NUMA
4345 memset(node_load, 0, sizeof(node_load));
4346#endif
9adb62a5
JL
4347
4348 if (self && !node_online(self->node_id)) {
4349 build_zonelists(self);
9adb62a5
JL
4350 }
4351
9276b1bc 4352 for_each_online_node(nid) {
7ea1530a
CL
4353 pg_data_t *pgdat = NODE_DATA(nid);
4354
4355 build_zonelists(pgdat);
9276b1bc 4356 }
99dcc3e5
CL
4357
4358 /*
4359 * Initialize the boot_pagesets that are going to be used
4360 * for bootstrapping processors. The real pagesets for
4361 * each zone will be allocated later when the per cpu
4362 * allocator is available.
4363 *
4364 * boot_pagesets are used also for bootstrapping offline
4365 * cpus if the system is already booted because the pagesets
4366 * are needed to initialize allocators on a specific cpu too.
4367 * F.e. the percpu allocator needs the page allocator which
4368 * needs the percpu allocator in order to allocate its pagesets
4369 * (a chicken-egg dilemma).
4370 */
7aac7898 4371 for_each_possible_cpu(cpu) {
99dcc3e5
CL
4372 setup_pageset(&per_cpu(boot_pageset, cpu), 0);
4373
7aac7898
LS
4374#ifdef CONFIG_HAVE_MEMORYLESS_NODES
4375 /*
4376 * We now know the "local memory node" for each node--
4377 * i.e., the node of the first zone in the generic zonelist.
4378 * Set up numa_mem percpu variable for on-line cpus. During
4379 * boot, only the boot cpu should be on-line; we'll init the
4380 * secondary cpus' numa_mem as they come on-line. During
4381 * node/memory hotplug, we'll fixup all on-line cpus.
4382 */
4383 if (cpu_online(cpu))
4384 set_cpu_numa_mem(cpu, local_memory_node(cpu_to_node(cpu)));
4385#endif
4386 }
4387
6811378e
YG
4388 return 0;
4389}
4390
061f67bc
RV
4391static noinline void __init
4392build_all_zonelists_init(void)
4393{
4394 __build_all_zonelists(NULL);
4395 mminit_verify_zonelist();
4396 cpuset_init_current_mems_allowed();
4397}
4398
4eaf3f64
HL
4399/*
4400 * Called with zonelists_mutex held always
4401 * unless system_state == SYSTEM_BOOTING.
061f67bc
RV
4402 *
4403 * __ref due to (1) call of __meminit annotated setup_zone_pageset
4404 * [we're only called with non-NULL zone through __meminit paths] and
4405 * (2) call of __init annotated helper build_all_zonelists_init
4406 * [protected by SYSTEM_BOOTING].
4eaf3f64 4407 */
9adb62a5 4408void __ref build_all_zonelists(pg_data_t *pgdat, struct zone *zone)
6811378e 4409{
f0c0b2b8
KH
4410 set_zonelist_order();
4411
6811378e 4412 if (system_state == SYSTEM_BOOTING) {
061f67bc 4413 build_all_zonelists_init();
6811378e 4414 } else {
e9959f0f 4415#ifdef CONFIG_MEMORY_HOTPLUG
9adb62a5
JL
4416 if (zone)
4417 setup_zone_pageset(zone);
e9959f0f 4418#endif
dd1895e2
CS
4419 /* we have to stop all cpus to guarantee there is no user
4420 of zonelist */
9adb62a5 4421 stop_machine(__build_all_zonelists, pgdat, NULL);
6811378e
YG
4422 /* cpuset refresh routine should be here */
4423 }
bd1e22b8 4424 vm_total_pages = nr_free_pagecache_pages();
9ef9acb0
MG
4425 /*
4426 * Disable grouping by mobility if the number of pages in the
4427 * system is too low to allow the mechanism to work. It would be
4428 * more accurate, but expensive to check per-zone. This check is
4429 * made on memory-hotadd so a system can start with mobility
4430 * disabled and enable it later
4431 */
d9c23400 4432 if (vm_total_pages < (pageblock_nr_pages * MIGRATE_TYPES))
9ef9acb0
MG
4433 page_group_by_mobility_disabled = 1;
4434 else
4435 page_group_by_mobility_disabled = 0;
4436
f88dfff5 4437 pr_info("Built %i zonelists in %s order, mobility grouping %s. "
9ef9acb0 4438 "Total pages: %ld\n",
62bc62a8 4439 nr_online_nodes,
f0c0b2b8 4440 zonelist_order_name[current_zonelist_order],
9ef9acb0 4441 page_group_by_mobility_disabled ? "off" : "on",
f0c0b2b8
KH
4442 vm_total_pages);
4443#ifdef CONFIG_NUMA
f88dfff5 4444 pr_info("Policy zone: %s\n", zone_names[policy_zone]);
f0c0b2b8 4445#endif
1da177e4
LT
4446}
4447
4448/*
4449 * Helper functions to size the waitqueue hash table.
4450 * Essentially these want to choose hash table sizes sufficiently
4451 * large so that collisions trying to wait on pages are rare.
4452 * But in fact, the number of active page waitqueues on typical
4453 * systems is ridiculously low, less than 200. So this is even
4454 * conservative, even though it seems large.
4455 *
4456 * The constant PAGES_PER_WAITQUEUE specifies the ratio of pages to
4457 * waitqueues, i.e. the size of the waitq table given the number of pages.
4458 */
4459#define PAGES_PER_WAITQUEUE 256
4460
cca448fe 4461#ifndef CONFIG_MEMORY_HOTPLUG
02b694de 4462static inline unsigned long wait_table_hash_nr_entries(unsigned long pages)
1da177e4
LT
4463{
4464 unsigned long size = 1;
4465
4466 pages /= PAGES_PER_WAITQUEUE;
4467
4468 while (size < pages)
4469 size <<= 1;
4470
4471 /*
4472 * Once we have dozens or even hundreds of threads sleeping
4473 * on IO we've got bigger problems than wait queue collision.
4474 * Limit the size of the wait table to a reasonable size.
4475 */
4476 size = min(size, 4096UL);
4477
4478 return max(size, 4UL);
4479}
cca448fe
YG
4480#else
4481/*
4482 * A zone's size might be changed by hot-add, so it is not possible to determine
4483 * a suitable size for its wait_table. So we use the maximum size now.
4484 *
4485 * The max wait table size = 4096 x sizeof(wait_queue_head_t). ie:
4486 *
4487 * i386 (preemption config) : 4096 x 16 = 64Kbyte.
4488 * ia64, x86-64 (no preemption): 4096 x 20 = 80Kbyte.
4489 * ia64, x86-64 (preemption) : 4096 x 24 = 96Kbyte.
4490 *
4491 * The maximum entries are prepared when a zone's memory is (512K + 256) pages
4492 * or more by the traditional way. (See above). It equals:
4493 *
4494 * i386, x86-64, powerpc(4K page size) : = ( 2G + 1M)byte.
4495 * ia64(16K page size) : = ( 8G + 4M)byte.
4496 * powerpc (64K page size) : = (32G +16M)byte.
4497 */
4498static inline unsigned long wait_table_hash_nr_entries(unsigned long pages)
4499{
4500 return 4096UL;
4501}
4502#endif
1da177e4
LT
4503
4504/*
4505 * This is an integer logarithm so that shifts can be used later
4506 * to extract the more random high bits from the multiplicative
4507 * hash function before the remainder is taken.
4508 */
4509static inline unsigned long wait_table_bits(unsigned long size)
4510{
4511 return ffz(~size);
4512}
4513
1da177e4
LT
4514/*
4515 * Initially all pages are reserved - free ones are freed
4516 * up by free_all_bootmem() once the early boot process is
4517 * done. Non-atomic initialization, single-pass.
4518 */
c09b4240 4519void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
a2f3aa02 4520 unsigned long start_pfn, enum memmap_context context)
1da177e4 4521{
4b94ffdc 4522 struct vmem_altmap *altmap = to_vmem_altmap(__pfn_to_phys(start_pfn));
29751f69 4523 unsigned long end_pfn = start_pfn + size;
4b94ffdc 4524 pg_data_t *pgdat = NODE_DATA(nid);
29751f69 4525 unsigned long pfn;
3a80a7fa 4526 unsigned long nr_initialised = 0;
342332e6
TI
4527#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
4528 struct memblock_region *r = NULL, *tmp;
4529#endif
1da177e4 4530
22b31eec
HD
4531 if (highest_memmap_pfn < end_pfn - 1)
4532 highest_memmap_pfn = end_pfn - 1;
4533
4b94ffdc
DW
4534 /*
4535 * Honor reservation requested by the driver for this ZONE_DEVICE
4536 * memory
4537 */
4538 if (altmap && start_pfn == altmap->base_pfn)
4539 start_pfn += altmap->reserve;
4540
cbe8dd4a 4541 for (pfn = start_pfn; pfn < end_pfn; pfn++) {
a2f3aa02 4542 /*
b72d0ffb
AM
4543 * There can be holes in boot-time mem_map[]s handed to this
4544 * function. They do not exist on hotplugged memory.
a2f3aa02 4545 */
b72d0ffb
AM
4546 if (context != MEMMAP_EARLY)
4547 goto not_early;
4548
4549 if (!early_pfn_valid(pfn))
4550 continue;
4551 if (!early_pfn_in_nid(pfn, nid))
4552 continue;
4553 if (!update_defer_init(pgdat, pfn, end_pfn, &nr_initialised))
4554 break;
342332e6
TI
4555
4556#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
b72d0ffb
AM
4557 /*
4558 * If not mirrored_kernelcore and ZONE_MOVABLE exists, range
4559 * from zone_movable_pfn[nid] to end of each node should be
4560 * ZONE_MOVABLE not ZONE_NORMAL. skip it.
4561 */
4562 if (!mirrored_kernelcore && zone_movable_pfn[nid])
4563 if (zone == ZONE_NORMAL && pfn >= zone_movable_pfn[nid])
4564 continue;
342332e6 4565
b72d0ffb
AM
4566 /*
4567 * Check given memblock attribute by firmware which can affect
4568 * kernel memory layout. If zone==ZONE_MOVABLE but memory is
4569 * mirrored, it's an overlapped memmap init. skip it.
4570 */
4571 if (mirrored_kernelcore && zone == ZONE_MOVABLE) {
4572 if (!r || pfn >= memblock_region_memory_end_pfn(r)) {
4573 for_each_memblock(memory, tmp)
4574 if (pfn < memblock_region_memory_end_pfn(tmp))
4575 break;
4576 r = tmp;
4577 }
4578 if (pfn >= memblock_region_memory_base_pfn(r) &&
4579 memblock_is_mirror(r)) {
4580 /* already initialized as NORMAL */
4581 pfn = memblock_region_memory_end_pfn(r);
4582 continue;
342332e6 4583 }
a2f3aa02 4584 }
b72d0ffb 4585#endif
ac5d2539 4586
b72d0ffb 4587not_early:
ac5d2539
MG
4588 /*
4589 * Mark the block movable so that blocks are reserved for
4590 * movable at startup. This will force kernel allocations
4591 * to reserve their blocks rather than leaking throughout
4592 * the address space during boot when many long-lived
974a786e 4593 * kernel allocations are made.
ac5d2539
MG
4594 *
4595 * bitmap is created for zone's valid pfn range. but memmap
4596 * can be created for invalid pages (for alignment)
4597 * check here not to call set_pageblock_migratetype() against
4598 * pfn out of zone.
4599 */
4600 if (!(pfn & (pageblock_nr_pages - 1))) {
4601 struct page *page = pfn_to_page(pfn);
4602
4603 __init_single_page(page, pfn, zone, nid);
4604 set_pageblock_migratetype(page, MIGRATE_MOVABLE);
4605 } else {
4606 __init_single_pfn(pfn, zone, nid);
4607 }
1da177e4
LT
4608 }
4609}
4610
1e548deb 4611static void __meminit zone_init_free_lists(struct zone *zone)
1da177e4 4612{
7aeb09f9 4613 unsigned int order, t;
b2a0ac88
MG
4614 for_each_migratetype_order(order, t) {
4615 INIT_LIST_HEAD(&zone->free_area[order].free_list[t]);
1da177e4
LT
4616 zone->free_area[order].nr_free = 0;
4617 }
4618}
4619
4620#ifndef __HAVE_ARCH_MEMMAP_INIT
4621#define memmap_init(size, nid, zone, start_pfn) \
a2f3aa02 4622 memmap_init_zone((size), (nid), (zone), (start_pfn), MEMMAP_EARLY)
1da177e4
LT
4623#endif
4624
7cd2b0a3 4625static int zone_batchsize(struct zone *zone)
e7c8d5c9 4626{
3a6be87f 4627#ifdef CONFIG_MMU
e7c8d5c9
CL
4628 int batch;
4629
4630 /*
4631 * The per-cpu-pages pools are set to around 1000th of the
ba56e91c 4632 * size of the zone. But no more than 1/2 of a meg.
e7c8d5c9
CL
4633 *
4634 * OK, so we don't know how big the cache is. So guess.
4635 */
b40da049 4636 batch = zone->managed_pages / 1024;
ba56e91c
SR
4637 if (batch * PAGE_SIZE > 512 * 1024)
4638 batch = (512 * 1024) / PAGE_SIZE;
e7c8d5c9
CL
4639 batch /= 4; /* We effectively *= 4 below */
4640 if (batch < 1)
4641 batch = 1;
4642
4643 /*
0ceaacc9
NP
4644 * Clamp the batch to a 2^n - 1 value. Having a power
4645 * of 2 value was found to be more likely to have
4646 * suboptimal cache aliasing properties in some cases.
e7c8d5c9 4647 *
0ceaacc9
NP
4648 * For example if 2 tasks are alternately allocating
4649 * batches of pages, one task can end up with a lot
4650 * of pages of one half of the possible page colors
4651 * and the other with pages of the other colors.
e7c8d5c9 4652 */
9155203a 4653 batch = rounddown_pow_of_two(batch + batch/2) - 1;
ba56e91c 4654
e7c8d5c9 4655 return batch;
3a6be87f
DH
4656
4657#else
4658 /* The deferral and batching of frees should be suppressed under NOMMU
4659 * conditions.
4660 *
4661 * The problem is that NOMMU needs to be able to allocate large chunks
4662 * of contiguous memory as there's no hardware page translation to
4663 * assemble apparent contiguous memory from discontiguous pages.
4664 *
4665 * Queueing large contiguous runs of pages for batching, however,
4666 * causes the pages to actually be freed in smaller chunks. As there
4667 * can be a significant delay between the individual batches being
4668 * recycled, this leads to the once large chunks of space being
4669 * fragmented and becoming unavailable for high-order allocations.
4670 */
4671 return 0;
4672#endif
e7c8d5c9
CL
4673}
4674
8d7a8fa9
CS
4675/*
4676 * pcp->high and pcp->batch values are related and dependent on one another:
4677 * ->batch must never be higher then ->high.
4678 * The following function updates them in a safe manner without read side
4679 * locking.
4680 *
4681 * Any new users of pcp->batch and pcp->high should ensure they can cope with
4682 * those fields changing asynchronously (acording the the above rule).
4683 *
4684 * mutex_is_locked(&pcp_batch_high_lock) required when calling this function
4685 * outside of boot time (or some other assurance that no concurrent updaters
4686 * exist).
4687 */
4688static void pageset_update(struct per_cpu_pages *pcp, unsigned long high,
4689 unsigned long batch)
4690{
4691 /* start with a fail safe value for batch */
4692 pcp->batch = 1;
4693 smp_wmb();
4694
4695 /* Update high, then batch, in order */
4696 pcp->high = high;
4697 smp_wmb();
4698
4699 pcp->batch = batch;
4700}
4701
3664033c 4702/* a companion to pageset_set_high() */
4008bab7
CS
4703static void pageset_set_batch(struct per_cpu_pageset *p, unsigned long batch)
4704{
8d7a8fa9 4705 pageset_update(&p->pcp, 6 * batch, max(1UL, 1 * batch));
4008bab7
CS
4706}
4707
88c90dbc 4708static void pageset_init(struct per_cpu_pageset *p)
2caaad41
CL
4709{
4710 struct per_cpu_pages *pcp;
5f8dcc21 4711 int migratetype;
2caaad41 4712
1c6fe946
MD
4713 memset(p, 0, sizeof(*p));
4714
3dfa5721 4715 pcp = &p->pcp;
2caaad41 4716 pcp->count = 0;
5f8dcc21
MG
4717 for (migratetype = 0; migratetype < MIGRATE_PCPTYPES; migratetype++)
4718 INIT_LIST_HEAD(&pcp->lists[migratetype]);
2caaad41
CL
4719}
4720
88c90dbc
CS
4721static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch)
4722{
4723 pageset_init(p);
4724 pageset_set_batch(p, batch);
4725}
4726
8ad4b1fb 4727/*
3664033c 4728 * pageset_set_high() sets the high water mark for hot per_cpu_pagelist
8ad4b1fb
RS
4729 * to the value high for the pageset p.
4730 */
3664033c 4731static void pageset_set_high(struct per_cpu_pageset *p,
8ad4b1fb
RS
4732 unsigned long high)
4733{
8d7a8fa9
CS
4734 unsigned long batch = max(1UL, high / 4);
4735 if ((high / 4) > (PAGE_SHIFT * 8))
4736 batch = PAGE_SHIFT * 8;
8ad4b1fb 4737
8d7a8fa9 4738 pageset_update(&p->pcp, high, batch);
8ad4b1fb
RS
4739}
4740
7cd2b0a3
DR
4741static void pageset_set_high_and_batch(struct zone *zone,
4742 struct per_cpu_pageset *pcp)
56cef2b8 4743{
56cef2b8 4744 if (percpu_pagelist_fraction)
3664033c 4745 pageset_set_high(pcp,
56cef2b8
CS
4746 (zone->managed_pages /
4747 percpu_pagelist_fraction));
4748 else
4749 pageset_set_batch(pcp, zone_batchsize(zone));
4750}
4751
169f6c19
CS
4752static void __meminit zone_pageset_init(struct zone *zone, int cpu)
4753{
4754 struct per_cpu_pageset *pcp = per_cpu_ptr(zone->pageset, cpu);
4755
4756 pageset_init(pcp);
4757 pageset_set_high_and_batch(zone, pcp);
4758}
4759
4ed7e022 4760static void __meminit setup_zone_pageset(struct zone *zone)
319774e2
WF
4761{
4762 int cpu;
319774e2 4763 zone->pageset = alloc_percpu(struct per_cpu_pageset);
56cef2b8
CS
4764 for_each_possible_cpu(cpu)
4765 zone_pageset_init(zone, cpu);
319774e2
WF
4766}
4767
2caaad41 4768/*
99dcc3e5
CL
4769 * Allocate per cpu pagesets and initialize them.
4770 * Before this call only boot pagesets were available.
e7c8d5c9 4771 */
99dcc3e5 4772void __init setup_per_cpu_pageset(void)
e7c8d5c9 4773{
99dcc3e5 4774 struct zone *zone;
e7c8d5c9 4775
319774e2
WF
4776 for_each_populated_zone(zone)
4777 setup_zone_pageset(zone);
e7c8d5c9
CL
4778}
4779
577a32f6 4780static noinline __init_refok
cca448fe 4781int zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages)
ed8ece2e
DH
4782{
4783 int i;
cca448fe 4784 size_t alloc_size;
ed8ece2e
DH
4785
4786 /*
4787 * The per-page waitqueue mechanism uses hashed waitqueues
4788 * per zone.
4789 */
02b694de
YG
4790 zone->wait_table_hash_nr_entries =
4791 wait_table_hash_nr_entries(zone_size_pages);
4792 zone->wait_table_bits =
4793 wait_table_bits(zone->wait_table_hash_nr_entries);
cca448fe
YG
4794 alloc_size = zone->wait_table_hash_nr_entries
4795 * sizeof(wait_queue_head_t);
4796
cd94b9db 4797 if (!slab_is_available()) {
cca448fe 4798 zone->wait_table = (wait_queue_head_t *)
6782832e
SS
4799 memblock_virt_alloc_node_nopanic(
4800 alloc_size, zone->zone_pgdat->node_id);
cca448fe
YG
4801 } else {
4802 /*
4803 * This case means that a zone whose size was 0 gets new memory
4804 * via memory hot-add.
4805 * But it may be the case that a new node was hot-added. In
4806 * this case vmalloc() will not be able to use this new node's
4807 * memory - this wait_table must be initialized to use this new
4808 * node itself as well.
4809 * To use this new node's memory, further consideration will be
4810 * necessary.
4811 */
8691f3a7 4812 zone->wait_table = vmalloc(alloc_size);
cca448fe
YG
4813 }
4814 if (!zone->wait_table)
4815 return -ENOMEM;
ed8ece2e 4816
b8af2941 4817 for (i = 0; i < zone->wait_table_hash_nr_entries; ++i)
ed8ece2e 4818 init_waitqueue_head(zone->wait_table + i);
cca448fe
YG
4819
4820 return 0;
ed8ece2e
DH
4821}
4822
c09b4240 4823static __meminit void zone_pcp_init(struct zone *zone)
ed8ece2e 4824{
99dcc3e5
CL
4825 /*
4826 * per cpu subsystem is not up at this point. The following code
4827 * relies on the ability of the linker to provide the
4828 * offset of a (static) per cpu variable into the per cpu area.
4829 */
4830 zone->pageset = &boot_pageset;
ed8ece2e 4831
b38a8725 4832 if (populated_zone(zone))
99dcc3e5
CL
4833 printk(KERN_DEBUG " %s zone: %lu pages, LIFO batch:%u\n",
4834 zone->name, zone->present_pages,
4835 zone_batchsize(zone));
ed8ece2e
DH
4836}
4837
4ed7e022 4838int __meminit init_currently_empty_zone(struct zone *zone,
718127cc 4839 unsigned long zone_start_pfn,
b171e409 4840 unsigned long size)
ed8ece2e
DH
4841{
4842 struct pglist_data *pgdat = zone->zone_pgdat;
cca448fe
YG
4843 int ret;
4844 ret = zone_wait_table_init(zone, size);
4845 if (ret)
4846 return ret;
ed8ece2e
DH
4847 pgdat->nr_zones = zone_idx(zone) + 1;
4848
ed8ece2e
DH
4849 zone->zone_start_pfn = zone_start_pfn;
4850
708614e6
MG
4851 mminit_dprintk(MMINIT_TRACE, "memmap_init",
4852 "Initialising map node %d zone %lu pfns %lu -> %lu\n",
4853 pgdat->node_id,
4854 (unsigned long)zone_idx(zone),
4855 zone_start_pfn, (zone_start_pfn + size));
4856
1e548deb 4857 zone_init_free_lists(zone);
718127cc
YG
4858
4859 return 0;
ed8ece2e
DH
4860}
4861
0ee332c1 4862#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
c713216d 4863#ifndef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID
8a942fde 4864
c713216d
MG
4865/*
4866 * Required by SPARSEMEM. Given a PFN, return what node the PFN is on.
c713216d 4867 */
8a942fde
MG
4868int __meminit __early_pfn_to_nid(unsigned long pfn,
4869 struct mminit_pfnnid_cache *state)
c713216d 4870{
c13291a5 4871 unsigned long start_pfn, end_pfn;
e76b63f8 4872 int nid;
7c243c71 4873
8a942fde
MG
4874 if (state->last_start <= pfn && pfn < state->last_end)
4875 return state->last_nid;
c713216d 4876
e76b63f8
YL
4877 nid = memblock_search_pfn_nid(pfn, &start_pfn, &end_pfn);
4878 if (nid != -1) {
8a942fde
MG
4879 state->last_start = start_pfn;
4880 state->last_end = end_pfn;
4881 state->last_nid = nid;
e76b63f8
YL
4882 }
4883
4884 return nid;
c713216d
MG
4885}
4886#endif /* CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID */
4887
c713216d 4888/**
6782832e 4889 * free_bootmem_with_active_regions - Call memblock_free_early_nid for each active range
88ca3b94 4890 * @nid: The node to free memory on. If MAX_NUMNODES, all nodes are freed.
6782832e 4891 * @max_low_pfn: The highest PFN that will be passed to memblock_free_early_nid
c713216d 4892 *
7d018176
ZZ
4893 * If an architecture guarantees that all ranges registered contain no holes
4894 * and may be freed, this this function may be used instead of calling
4895 * memblock_free_early_nid() manually.
c713216d 4896 */
c13291a5 4897void __init free_bootmem_with_active_regions(int nid, unsigned long max_low_pfn)
cc289894 4898{
c13291a5
TH
4899 unsigned long start_pfn, end_pfn;
4900 int i, this_nid;
edbe7d23 4901
c13291a5
TH
4902 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, &this_nid) {
4903 start_pfn = min(start_pfn, max_low_pfn);
4904 end_pfn = min(end_pfn, max_low_pfn);
edbe7d23 4905
c13291a5 4906 if (start_pfn < end_pfn)
6782832e
SS
4907 memblock_free_early_nid(PFN_PHYS(start_pfn),
4908 (end_pfn - start_pfn) << PAGE_SHIFT,
4909 this_nid);
edbe7d23 4910 }
edbe7d23 4911}
edbe7d23 4912
c713216d
MG
4913/**
4914 * sparse_memory_present_with_active_regions - Call memory_present for each active range
88ca3b94 4915 * @nid: The node to call memory_present for. If MAX_NUMNODES, all nodes will be used.
c713216d 4916 *
7d018176
ZZ
4917 * If an architecture guarantees that all ranges registered contain no holes and may
4918 * be freed, this function may be used instead of calling memory_present() manually.
c713216d
MG
4919 */
4920void __init sparse_memory_present_with_active_regions(int nid)
4921{
c13291a5
TH
4922 unsigned long start_pfn, end_pfn;
4923 int i, this_nid;
c713216d 4924
c13291a5
TH
4925 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, &this_nid)
4926 memory_present(this_nid, start_pfn, end_pfn);
c713216d
MG
4927}
4928
4929/**
4930 * get_pfn_range_for_nid - Return the start and end page frames for a node
88ca3b94
RD
4931 * @nid: The nid to return the range for. If MAX_NUMNODES, the min and max PFN are returned.
4932 * @start_pfn: Passed by reference. On return, it will have the node start_pfn.
4933 * @end_pfn: Passed by reference. On return, it will have the node end_pfn.
c713216d
MG
4934 *
4935 * It returns the start and end page frame of a node based on information
7d018176 4936 * provided by memblock_set_node(). If called for a node
c713216d 4937 * with no available memory, a warning is printed and the start and end
88ca3b94 4938 * PFNs will be 0.
c713216d 4939 */
a3142c8e 4940void __meminit get_pfn_range_for_nid(unsigned int nid,
c713216d
MG
4941 unsigned long *start_pfn, unsigned long *end_pfn)
4942{
c13291a5 4943 unsigned long this_start_pfn, this_end_pfn;
c713216d 4944 int i;
c13291a5 4945
c713216d
MG
4946 *start_pfn = -1UL;
4947 *end_pfn = 0;
4948
c13291a5
TH
4949 for_each_mem_pfn_range(i, nid, &this_start_pfn, &this_end_pfn, NULL) {
4950 *start_pfn = min(*start_pfn, this_start_pfn);
4951 *end_pfn = max(*end_pfn, this_end_pfn);
c713216d
MG
4952 }
4953
633c0666 4954 if (*start_pfn == -1UL)
c713216d 4955 *start_pfn = 0;
c713216d
MG
4956}
4957
2a1e274a
MG
4958/*
4959 * This finds a zone that can be used for ZONE_MOVABLE pages. The
4960 * assumption is made that zones within a node are ordered in monotonic
4961 * increasing memory addresses so that the "highest" populated zone is used
4962 */
b69a7288 4963static void __init find_usable_zone_for_movable(void)
2a1e274a
MG
4964{
4965 int zone_index;
4966 for (zone_index = MAX_NR_ZONES - 1; zone_index >= 0; zone_index--) {
4967 if (zone_index == ZONE_MOVABLE)
4968 continue;
4969
4970 if (arch_zone_highest_possible_pfn[zone_index] >
4971 arch_zone_lowest_possible_pfn[zone_index])
4972 break;
4973 }
4974
4975 VM_BUG_ON(zone_index == -1);
4976 movable_zone = zone_index;
4977}
4978
4979/*
4980 * The zone ranges provided by the architecture do not include ZONE_MOVABLE
25985edc 4981 * because it is sized independent of architecture. Unlike the other zones,
2a1e274a
MG
4982 * the starting point for ZONE_MOVABLE is not fixed. It may be different
4983 * in each node depending on the size of each node and how evenly kernelcore
4984 * is distributed. This helper function adjusts the zone ranges
4985 * provided by the architecture for a given node by using the end of the
4986 * highest usable zone for ZONE_MOVABLE. This preserves the assumption that
4987 * zones within a node are in order of monotonic increases memory addresses
4988 */
b69a7288 4989static void __meminit adjust_zone_range_for_zone_movable(int nid,
2a1e274a
MG
4990 unsigned long zone_type,
4991 unsigned long node_start_pfn,
4992 unsigned long node_end_pfn,
4993 unsigned long *zone_start_pfn,
4994 unsigned long *zone_end_pfn)
4995{
4996 /* Only adjust if ZONE_MOVABLE is on this node */
4997 if (zone_movable_pfn[nid]) {
4998 /* Size ZONE_MOVABLE */
4999 if (zone_type == ZONE_MOVABLE) {
5000 *zone_start_pfn = zone_movable_pfn[nid];
5001 *zone_end_pfn = min(node_end_pfn,
5002 arch_zone_highest_possible_pfn[movable_zone]);
5003
2a1e274a
MG
5004 /* Check if this whole range is within ZONE_MOVABLE */
5005 } else if (*zone_start_pfn >= zone_movable_pfn[nid])
5006 *zone_start_pfn = *zone_end_pfn;
5007 }
5008}
5009
c713216d
MG
5010/*
5011 * Return the number of pages a zone spans in a node, including holes
5012 * present_pages = zone_spanned_pages_in_node() - zone_absent_pages_in_node()
5013 */
6ea6e688 5014static unsigned long __meminit zone_spanned_pages_in_node(int nid,
c713216d 5015 unsigned long zone_type,
7960aedd
ZY
5016 unsigned long node_start_pfn,
5017 unsigned long node_end_pfn,
d91749c1
TI
5018 unsigned long *zone_start_pfn,
5019 unsigned long *zone_end_pfn,
c713216d
MG
5020 unsigned long *ignored)
5021{
b5685e92 5022 /* When hotadd a new node from cpu_up(), the node should be empty */
f9126ab9
XQ
5023 if (!node_start_pfn && !node_end_pfn)
5024 return 0;
5025
7960aedd 5026 /* Get the start and end of the zone */
d91749c1
TI
5027 *zone_start_pfn = arch_zone_lowest_possible_pfn[zone_type];
5028 *zone_end_pfn = arch_zone_highest_possible_pfn[zone_type];
2a1e274a
MG
5029 adjust_zone_range_for_zone_movable(nid, zone_type,
5030 node_start_pfn, node_end_pfn,
d91749c1 5031 zone_start_pfn, zone_end_pfn);
c713216d
MG
5032
5033 /* Check that this node has pages within the zone's required range */
d91749c1 5034 if (*zone_end_pfn < node_start_pfn || *zone_start_pfn > node_end_pfn)
c713216d
MG
5035 return 0;
5036
5037 /* Move the zone boundaries inside the node if necessary */
d91749c1
TI
5038 *zone_end_pfn = min(*zone_end_pfn, node_end_pfn);
5039 *zone_start_pfn = max(*zone_start_pfn, node_start_pfn);
c713216d
MG
5040
5041 /* Return the spanned pages */
d91749c1 5042 return *zone_end_pfn - *zone_start_pfn;
c713216d
MG
5043}
5044
5045/*
5046 * Return the number of holes in a range on a node. If nid is MAX_NUMNODES,
88ca3b94 5047 * then all holes in the requested range will be accounted for.
c713216d 5048 */
32996250 5049unsigned long __meminit __absent_pages_in_range(int nid,
c713216d
MG
5050 unsigned long range_start_pfn,
5051 unsigned long range_end_pfn)
5052{
96e907d1
TH
5053 unsigned long nr_absent = range_end_pfn - range_start_pfn;
5054 unsigned long start_pfn, end_pfn;
5055 int i;
c713216d 5056
96e907d1
TH
5057 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
5058 start_pfn = clamp(start_pfn, range_start_pfn, range_end_pfn);
5059 end_pfn = clamp(end_pfn, range_start_pfn, range_end_pfn);
5060 nr_absent -= end_pfn - start_pfn;
c713216d 5061 }
96e907d1 5062 return nr_absent;
c713216d
MG
5063}
5064
5065/**
5066 * absent_pages_in_range - Return number of page frames in holes within a range
5067 * @start_pfn: The start PFN to start searching for holes
5068 * @end_pfn: The end PFN to stop searching for holes
5069 *
88ca3b94 5070 * It returns the number of pages frames in memory holes within a range.
c713216d
MG
5071 */
5072unsigned long __init absent_pages_in_range(unsigned long start_pfn,
5073 unsigned long end_pfn)
5074{
5075 return __absent_pages_in_range(MAX_NUMNODES, start_pfn, end_pfn);
5076}
5077
5078/* Return the number of page frames in holes in a zone on a node */
6ea6e688 5079static unsigned long __meminit zone_absent_pages_in_node(int nid,
c713216d 5080 unsigned long zone_type,
7960aedd
ZY
5081 unsigned long node_start_pfn,
5082 unsigned long node_end_pfn,
c713216d
MG
5083 unsigned long *ignored)
5084{
96e907d1
TH
5085 unsigned long zone_low = arch_zone_lowest_possible_pfn[zone_type];
5086 unsigned long zone_high = arch_zone_highest_possible_pfn[zone_type];
9c7cd687 5087 unsigned long zone_start_pfn, zone_end_pfn;
342332e6 5088 unsigned long nr_absent;
9c7cd687 5089
b5685e92 5090 /* When hotadd a new node from cpu_up(), the node should be empty */
f9126ab9
XQ
5091 if (!node_start_pfn && !node_end_pfn)
5092 return 0;
5093
96e907d1
TH
5094 zone_start_pfn = clamp(node_start_pfn, zone_low, zone_high);
5095 zone_end_pfn = clamp(node_end_pfn, zone_low, zone_high);
9c7cd687 5096
2a1e274a
MG
5097 adjust_zone_range_for_zone_movable(nid, zone_type,
5098 node_start_pfn, node_end_pfn,
5099 &zone_start_pfn, &zone_end_pfn);
342332e6
TI
5100 nr_absent = __absent_pages_in_range(nid, zone_start_pfn, zone_end_pfn);
5101
5102 /*
5103 * ZONE_MOVABLE handling.
5104 * Treat pages to be ZONE_MOVABLE in ZONE_NORMAL as absent pages
5105 * and vice versa.
5106 */
5107 if (zone_movable_pfn[nid]) {
5108 if (mirrored_kernelcore) {
5109 unsigned long start_pfn, end_pfn;
5110 struct memblock_region *r;
5111
5112 for_each_memblock(memory, r) {
5113 start_pfn = clamp(memblock_region_memory_base_pfn(r),
5114 zone_start_pfn, zone_end_pfn);
5115 end_pfn = clamp(memblock_region_memory_end_pfn(r),
5116 zone_start_pfn, zone_end_pfn);
5117
5118 if (zone_type == ZONE_MOVABLE &&
5119 memblock_is_mirror(r))
5120 nr_absent += end_pfn - start_pfn;
5121
5122 if (zone_type == ZONE_NORMAL &&
5123 !memblock_is_mirror(r))
5124 nr_absent += end_pfn - start_pfn;
5125 }
5126 } else {
5127 if (zone_type == ZONE_NORMAL)
5128 nr_absent += node_end_pfn - zone_movable_pfn[nid];
5129 }
5130 }
5131
5132 return nr_absent;
c713216d 5133}
0e0b864e 5134
0ee332c1 5135#else /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
6ea6e688 5136static inline unsigned long __meminit zone_spanned_pages_in_node(int nid,
c713216d 5137 unsigned long zone_type,
7960aedd
ZY
5138 unsigned long node_start_pfn,
5139 unsigned long node_end_pfn,
d91749c1
TI
5140 unsigned long *zone_start_pfn,
5141 unsigned long *zone_end_pfn,
c713216d
MG
5142 unsigned long *zones_size)
5143{
d91749c1
TI
5144 unsigned int zone;
5145
5146 *zone_start_pfn = node_start_pfn;
5147 for (zone = 0; zone < zone_type; zone++)
5148 *zone_start_pfn += zones_size[zone];
5149
5150 *zone_end_pfn = *zone_start_pfn + zones_size[zone_type];
5151
c713216d
MG
5152 return zones_size[zone_type];
5153}
5154
6ea6e688 5155static inline unsigned long __meminit zone_absent_pages_in_node(int nid,
c713216d 5156 unsigned long zone_type,
7960aedd
ZY
5157 unsigned long node_start_pfn,
5158 unsigned long node_end_pfn,
c713216d
MG
5159 unsigned long *zholes_size)
5160{
5161 if (!zholes_size)
5162 return 0;
5163
5164 return zholes_size[zone_type];
5165}
20e6926d 5166
0ee332c1 5167#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
c713216d 5168
a3142c8e 5169static void __meminit calculate_node_totalpages(struct pglist_data *pgdat,
7960aedd
ZY
5170 unsigned long node_start_pfn,
5171 unsigned long node_end_pfn,
5172 unsigned long *zones_size,
5173 unsigned long *zholes_size)
c713216d 5174{
febd5949 5175 unsigned long realtotalpages = 0, totalpages = 0;
c713216d
MG
5176 enum zone_type i;
5177
febd5949
GZ
5178 for (i = 0; i < MAX_NR_ZONES; i++) {
5179 struct zone *zone = pgdat->node_zones + i;
d91749c1 5180 unsigned long zone_start_pfn, zone_end_pfn;
febd5949 5181 unsigned long size, real_size;
c713216d 5182
febd5949
GZ
5183 size = zone_spanned_pages_in_node(pgdat->node_id, i,
5184 node_start_pfn,
5185 node_end_pfn,
d91749c1
TI
5186 &zone_start_pfn,
5187 &zone_end_pfn,
febd5949
GZ
5188 zones_size);
5189 real_size = size - zone_absent_pages_in_node(pgdat->node_id, i,
7960aedd
ZY
5190 node_start_pfn, node_end_pfn,
5191 zholes_size);
d91749c1
TI
5192 if (size)
5193 zone->zone_start_pfn = zone_start_pfn;
5194 else
5195 zone->zone_start_pfn = 0;
febd5949
GZ
5196 zone->spanned_pages = size;
5197 zone->present_pages = real_size;
5198
5199 totalpages += size;
5200 realtotalpages += real_size;
5201 }
5202
5203 pgdat->node_spanned_pages = totalpages;
c713216d
MG
5204 pgdat->node_present_pages = realtotalpages;
5205 printk(KERN_DEBUG "On node %d totalpages: %lu\n", pgdat->node_id,
5206 realtotalpages);
5207}
5208
835c134e
MG
5209#ifndef CONFIG_SPARSEMEM
5210/*
5211 * Calculate the size of the zone->blockflags rounded to an unsigned long
d9c23400
MG
5212 * Start by making sure zonesize is a multiple of pageblock_order by rounding
5213 * up. Then use 1 NR_PAGEBLOCK_BITS worth of bits per pageblock, finally
835c134e
MG
5214 * round what is now in bits to nearest long in bits, then return it in
5215 * bytes.
5216 */
7c45512d 5217static unsigned long __init usemap_size(unsigned long zone_start_pfn, unsigned long zonesize)
835c134e
MG
5218{
5219 unsigned long usemapsize;
5220
7c45512d 5221 zonesize += zone_start_pfn & (pageblock_nr_pages-1);
d9c23400
MG
5222 usemapsize = roundup(zonesize, pageblock_nr_pages);
5223 usemapsize = usemapsize >> pageblock_order;
835c134e
MG
5224 usemapsize *= NR_PAGEBLOCK_BITS;
5225 usemapsize = roundup(usemapsize, 8 * sizeof(unsigned long));
5226
5227 return usemapsize / 8;
5228}
5229
5230static void __init setup_usemap(struct pglist_data *pgdat,
7c45512d
LT
5231 struct zone *zone,
5232 unsigned long zone_start_pfn,
5233 unsigned long zonesize)
835c134e 5234{
7c45512d 5235 unsigned long usemapsize = usemap_size(zone_start_pfn, zonesize);
835c134e 5236 zone->pageblock_flags = NULL;
58a01a45 5237 if (usemapsize)
6782832e
SS
5238 zone->pageblock_flags =
5239 memblock_virt_alloc_node_nopanic(usemapsize,
5240 pgdat->node_id);
835c134e
MG
5241}
5242#else
7c45512d
LT
5243static inline void setup_usemap(struct pglist_data *pgdat, struct zone *zone,
5244 unsigned long zone_start_pfn, unsigned long zonesize) {}
835c134e
MG
5245#endif /* CONFIG_SPARSEMEM */
5246
d9c23400 5247#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
ba72cb8c 5248
d9c23400 5249/* Initialise the number of pages represented by NR_PAGEBLOCK_BITS */
15ca220e 5250void __paginginit set_pageblock_order(void)
d9c23400 5251{
955c1cd7
AM
5252 unsigned int order;
5253
d9c23400
MG
5254 /* Check that pageblock_nr_pages has not already been setup */
5255 if (pageblock_order)
5256 return;
5257
955c1cd7
AM
5258 if (HPAGE_SHIFT > PAGE_SHIFT)
5259 order = HUGETLB_PAGE_ORDER;
5260 else
5261 order = MAX_ORDER - 1;
5262
d9c23400
MG
5263 /*
5264 * Assume the largest contiguous order of interest is a huge page.
955c1cd7
AM
5265 * This value may be variable depending on boot parameters on IA64 and
5266 * powerpc.
d9c23400
MG
5267 */
5268 pageblock_order = order;
5269}
5270#else /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
5271
ba72cb8c
MG
5272/*
5273 * When CONFIG_HUGETLB_PAGE_SIZE_VARIABLE is not set, set_pageblock_order()
955c1cd7
AM
5274 * is unused as pageblock_order is set at compile-time. See
5275 * include/linux/pageblock-flags.h for the values of pageblock_order based on
5276 * the kernel config
ba72cb8c 5277 */
15ca220e 5278void __paginginit set_pageblock_order(void)
ba72cb8c 5279{
ba72cb8c 5280}
d9c23400
MG
5281
5282#endif /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
5283
01cefaef
JL
5284static unsigned long __paginginit calc_memmap_size(unsigned long spanned_pages,
5285 unsigned long present_pages)
5286{
5287 unsigned long pages = spanned_pages;
5288
5289 /*
5290 * Provide a more accurate estimation if there are holes within
5291 * the zone and SPARSEMEM is in use. If there are holes within the
5292 * zone, each populated memory region may cost us one or two extra
5293 * memmap pages due to alignment because memmap pages for each
5294 * populated regions may not naturally algined on page boundary.
5295 * So the (present_pages >> 4) heuristic is a tradeoff for that.
5296 */
5297 if (spanned_pages > present_pages + (present_pages >> 4) &&
5298 IS_ENABLED(CONFIG_SPARSEMEM))
5299 pages = present_pages;
5300
5301 return PAGE_ALIGN(pages * sizeof(struct page)) >> PAGE_SHIFT;
5302}
5303
1da177e4
LT
5304/*
5305 * Set up the zone data structures:
5306 * - mark all pages reserved
5307 * - mark all memory queues empty
5308 * - clear the memory bitmaps
6527af5d
MK
5309 *
5310 * NOTE: pgdat should get zeroed by caller.
1da177e4 5311 */
7f3eb55b 5312static void __paginginit free_area_init_core(struct pglist_data *pgdat)
1da177e4 5313{
2f1b6248 5314 enum zone_type j;
ed8ece2e 5315 int nid = pgdat->node_id;
718127cc 5316 int ret;
1da177e4 5317
208d54e5 5318 pgdat_resize_init(pgdat);
8177a420
AA
5319#ifdef CONFIG_NUMA_BALANCING
5320 spin_lock_init(&pgdat->numabalancing_migrate_lock);
5321 pgdat->numabalancing_migrate_nr_pages = 0;
5322 pgdat->numabalancing_migrate_next_window = jiffies;
a3d0a918
KS
5323#endif
5324#ifdef CONFIG_TRANSPARENT_HUGEPAGE
5325 spin_lock_init(&pgdat->split_queue_lock);
5326 INIT_LIST_HEAD(&pgdat->split_queue);
5327 pgdat->split_queue_len = 0;
8177a420 5328#endif
1da177e4 5329 init_waitqueue_head(&pgdat->kswapd_wait);
5515061d 5330 init_waitqueue_head(&pgdat->pfmemalloc_wait);
eefa864b 5331 pgdat_page_ext_init(pgdat);
5f63b720 5332
1da177e4
LT
5333 for (j = 0; j < MAX_NR_ZONES; j++) {
5334 struct zone *zone = pgdat->node_zones + j;
9feedc9d 5335 unsigned long size, realsize, freesize, memmap_pages;
d91749c1 5336 unsigned long zone_start_pfn = zone->zone_start_pfn;
1da177e4 5337
febd5949
GZ
5338 size = zone->spanned_pages;
5339 realsize = freesize = zone->present_pages;
1da177e4 5340
0e0b864e 5341 /*
9feedc9d 5342 * Adjust freesize so that it accounts for how much memory
0e0b864e
MG
5343 * is used by this zone for memmap. This affects the watermark
5344 * and per-cpu initialisations
5345 */
01cefaef 5346 memmap_pages = calc_memmap_size(size, realsize);
ba914f48
ZH
5347 if (!is_highmem_idx(j)) {
5348 if (freesize >= memmap_pages) {
5349 freesize -= memmap_pages;
5350 if (memmap_pages)
5351 printk(KERN_DEBUG
5352 " %s zone: %lu pages used for memmap\n",
5353 zone_names[j], memmap_pages);
5354 } else
5355 printk(KERN_WARNING
5356 " %s zone: %lu pages exceeds freesize %lu\n",
5357 zone_names[j], memmap_pages, freesize);
5358 }
0e0b864e 5359
6267276f 5360 /* Account for reserved pages */
9feedc9d
JL
5361 if (j == 0 && freesize > dma_reserve) {
5362 freesize -= dma_reserve;
d903ef9f 5363 printk(KERN_DEBUG " %s zone: %lu pages reserved\n",
6267276f 5364 zone_names[0], dma_reserve);
0e0b864e
MG
5365 }
5366
98d2b0eb 5367 if (!is_highmem_idx(j))
9feedc9d 5368 nr_kernel_pages += freesize;
01cefaef
JL
5369 /* Charge for highmem memmap if there are enough kernel pages */
5370 else if (nr_kernel_pages > memmap_pages * 2)
5371 nr_kernel_pages -= memmap_pages;
9feedc9d 5372 nr_all_pages += freesize;
1da177e4 5373
9feedc9d
JL
5374 /*
5375 * Set an approximate value for lowmem here, it will be adjusted
5376 * when the bootmem allocator frees pages into the buddy system.
5377 * And all highmem pages will be managed by the buddy system.
5378 */
5379 zone->managed_pages = is_highmem_idx(j) ? realsize : freesize;
9614634f 5380#ifdef CONFIG_NUMA
d5f541ed 5381 zone->node = nid;
9feedc9d 5382 zone->min_unmapped_pages = (freesize*sysctl_min_unmapped_ratio)
9614634f 5383 / 100;
9feedc9d 5384 zone->min_slab_pages = (freesize * sysctl_min_slab_ratio) / 100;
9614634f 5385#endif
1da177e4
LT
5386 zone->name = zone_names[j];
5387 spin_lock_init(&zone->lock);
5388 spin_lock_init(&zone->lru_lock);
bdc8cb98 5389 zone_seqlock_init(zone);
1da177e4 5390 zone->zone_pgdat = pgdat;
ed8ece2e 5391 zone_pcp_init(zone);
81c0a2bb
JW
5392
5393 /* For bootup, initialized properly in watermark setup */
5394 mod_zone_page_state(zone, NR_ALLOC_BATCH, zone->managed_pages);
5395
bea8c150 5396 lruvec_init(&zone->lruvec);
1da177e4
LT
5397 if (!size)
5398 continue;
5399
955c1cd7 5400 set_pageblock_order();
7c45512d 5401 setup_usemap(pgdat, zone, zone_start_pfn, size);
b171e409 5402 ret = init_currently_empty_zone(zone, zone_start_pfn, size);
718127cc 5403 BUG_ON(ret);
76cdd58e 5404 memmap_init(size, nid, j, zone_start_pfn);
1da177e4
LT
5405 }
5406}
5407
577a32f6 5408static void __init_refok alloc_node_mem_map(struct pglist_data *pgdat)
1da177e4 5409{
b0aeba74 5410 unsigned long __maybe_unused start = 0;
a1c34a3b
LA
5411 unsigned long __maybe_unused offset = 0;
5412
1da177e4
LT
5413 /* Skip empty nodes */
5414 if (!pgdat->node_spanned_pages)
5415 return;
5416
d41dee36 5417#ifdef CONFIG_FLAT_NODE_MEM_MAP
b0aeba74
TL
5418 start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1);
5419 offset = pgdat->node_start_pfn - start;
1da177e4
LT
5420 /* ia64 gets its own node_mem_map, before this, without bootmem */
5421 if (!pgdat->node_mem_map) {
b0aeba74 5422 unsigned long size, end;
d41dee36
AW
5423 struct page *map;
5424
e984bb43
BP
5425 /*
5426 * The zone's endpoints aren't required to be MAX_ORDER
5427 * aligned but the node_mem_map endpoints must be in order
5428 * for the buddy allocator to function correctly.
5429 */
108bcc96 5430 end = pgdat_end_pfn(pgdat);
e984bb43
BP
5431 end = ALIGN(end, MAX_ORDER_NR_PAGES);
5432 size = (end - start) * sizeof(struct page);
6f167ec7
DH
5433 map = alloc_remap(pgdat->node_id, size);
5434 if (!map)
6782832e
SS
5435 map = memblock_virt_alloc_node_nopanic(size,
5436 pgdat->node_id);
a1c34a3b 5437 pgdat->node_mem_map = map + offset;
1da177e4 5438 }
12d810c1 5439#ifndef CONFIG_NEED_MULTIPLE_NODES
1da177e4
LT
5440 /*
5441 * With no DISCONTIG, the global mem_map is just set as node 0's
5442 */
c713216d 5443 if (pgdat == NODE_DATA(0)) {
1da177e4 5444 mem_map = NODE_DATA(0)->node_mem_map;
a1c34a3b 5445#if defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP) || defined(CONFIG_FLATMEM)
c713216d 5446 if (page_to_pfn(mem_map) != pgdat->node_start_pfn)
a1c34a3b 5447 mem_map -= offset;
0ee332c1 5448#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
c713216d 5449 }
1da177e4 5450#endif
d41dee36 5451#endif /* CONFIG_FLAT_NODE_MEM_MAP */
1da177e4
LT
5452}
5453
9109fb7b
JW
5454void __paginginit free_area_init_node(int nid, unsigned long *zones_size,
5455 unsigned long node_start_pfn, unsigned long *zholes_size)
1da177e4 5456{
9109fb7b 5457 pg_data_t *pgdat = NODE_DATA(nid);
7960aedd
ZY
5458 unsigned long start_pfn = 0;
5459 unsigned long end_pfn = 0;
9109fb7b 5460
88fdf75d 5461 /* pg_data_t should be reset to zero when it's allocated */
8783b6e2 5462 WARN_ON(pgdat->nr_zones || pgdat->classzone_idx);
88fdf75d 5463
3a80a7fa 5464 reset_deferred_meminit(pgdat);
1da177e4
LT
5465 pgdat->node_id = nid;
5466 pgdat->node_start_pfn = node_start_pfn;
7960aedd
ZY
5467#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
5468 get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
8d29e18a 5469 pr_info("Initmem setup node %d [mem %#018Lx-%#018Lx]\n", nid,
4ada0c5a
ZL
5470 (u64)start_pfn << PAGE_SHIFT,
5471 end_pfn ? ((u64)end_pfn << PAGE_SHIFT) - 1 : 0);
d91749c1
TI
5472#else
5473 start_pfn = node_start_pfn;
7960aedd
ZY
5474#endif
5475 calculate_node_totalpages(pgdat, start_pfn, end_pfn,
5476 zones_size, zholes_size);
1da177e4
LT
5477
5478 alloc_node_mem_map(pgdat);
e8c27ac9
YL
5479#ifdef CONFIG_FLAT_NODE_MEM_MAP
5480 printk(KERN_DEBUG "free_area_init_node: node %d, pgdat %08lx, node_mem_map %08lx\n",
5481 nid, (unsigned long)pgdat,
5482 (unsigned long)pgdat->node_mem_map);
5483#endif
1da177e4 5484
7f3eb55b 5485 free_area_init_core(pgdat);
1da177e4
LT
5486}
5487
0ee332c1 5488#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
418508c1
MS
5489
5490#if MAX_NUMNODES > 1
5491/*
5492 * Figure out the number of possible node ids.
5493 */
f9872caf 5494void __init setup_nr_node_ids(void)
418508c1 5495{
904a9553 5496 unsigned int highest;
418508c1 5497
904a9553 5498 highest = find_last_bit(node_possible_map.bits, MAX_NUMNODES);
418508c1
MS
5499 nr_node_ids = highest + 1;
5500}
418508c1
MS
5501#endif
5502
1e01979c
TH
5503/**
5504 * node_map_pfn_alignment - determine the maximum internode alignment
5505 *
5506 * This function should be called after node map is populated and sorted.
5507 * It calculates the maximum power of two alignment which can distinguish
5508 * all the nodes.
5509 *
5510 * For example, if all nodes are 1GiB and aligned to 1GiB, the return value
5511 * would indicate 1GiB alignment with (1 << (30 - PAGE_SHIFT)). If the
5512 * nodes are shifted by 256MiB, 256MiB. Note that if only the last node is
5513 * shifted, 1GiB is enough and this function will indicate so.
5514 *
5515 * This is used to test whether pfn -> nid mapping of the chosen memory
5516 * model has fine enough granularity to avoid incorrect mapping for the
5517 * populated node map.
5518 *
5519 * Returns the determined alignment in pfn's. 0 if there is no alignment
5520 * requirement (single node).
5521 */
5522unsigned long __init node_map_pfn_alignment(void)
5523{
5524 unsigned long accl_mask = 0, last_end = 0;
c13291a5 5525 unsigned long start, end, mask;
1e01979c 5526 int last_nid = -1;
c13291a5 5527 int i, nid;
1e01979c 5528
c13291a5 5529 for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, &nid) {
1e01979c
TH
5530 if (!start || last_nid < 0 || last_nid == nid) {
5531 last_nid = nid;
5532 last_end = end;
5533 continue;
5534 }
5535
5536 /*
5537 * Start with a mask granular enough to pin-point to the
5538 * start pfn and tick off bits one-by-one until it becomes
5539 * too coarse to separate the current node from the last.
5540 */
5541 mask = ~((1 << __ffs(start)) - 1);
5542 while (mask && last_end <= (start & (mask << 1)))
5543 mask <<= 1;
5544
5545 /* accumulate all internode masks */
5546 accl_mask |= mask;
5547 }
5548
5549 /* convert mask to number of pages */
5550 return ~accl_mask + 1;
5551}
5552
a6af2bc3 5553/* Find the lowest pfn for a node */
b69a7288 5554static unsigned long __init find_min_pfn_for_node(int nid)
c713216d 5555{
a6af2bc3 5556 unsigned long min_pfn = ULONG_MAX;
c13291a5
TH
5557 unsigned long start_pfn;
5558 int i;
1abbfb41 5559
c13291a5
TH
5560 for_each_mem_pfn_range(i, nid, &start_pfn, NULL, NULL)
5561 min_pfn = min(min_pfn, start_pfn);
c713216d 5562
a6af2bc3
MG
5563 if (min_pfn == ULONG_MAX) {
5564 printk(KERN_WARNING
2bc0d261 5565 "Could not find start_pfn for node %d\n", nid);
a6af2bc3
MG
5566 return 0;
5567 }
5568
5569 return min_pfn;
c713216d
MG
5570}
5571
5572/**
5573 * find_min_pfn_with_active_regions - Find the minimum PFN registered
5574 *
5575 * It returns the minimum PFN based on information provided via
7d018176 5576 * memblock_set_node().
c713216d
MG
5577 */
5578unsigned long __init find_min_pfn_with_active_regions(void)
5579{
5580 return find_min_pfn_for_node(MAX_NUMNODES);
5581}
5582
37b07e41
LS
5583/*
5584 * early_calculate_totalpages()
5585 * Sum pages in active regions for movable zone.
4b0ef1fe 5586 * Populate N_MEMORY for calculating usable_nodes.
37b07e41 5587 */
484f51f8 5588static unsigned long __init early_calculate_totalpages(void)
7e63efef 5589{
7e63efef 5590 unsigned long totalpages = 0;
c13291a5
TH
5591 unsigned long start_pfn, end_pfn;
5592 int i, nid;
5593
5594 for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
5595 unsigned long pages = end_pfn - start_pfn;
7e63efef 5596
37b07e41
LS
5597 totalpages += pages;
5598 if (pages)
4b0ef1fe 5599 node_set_state(nid, N_MEMORY);
37b07e41 5600 }
b8af2941 5601 return totalpages;
7e63efef
MG
5602}
5603
2a1e274a
MG
5604/*
5605 * Find the PFN the Movable zone begins in each node. Kernel memory
5606 * is spread evenly between nodes as long as the nodes have enough
5607 * memory. When they don't, some nodes will have more kernelcore than
5608 * others
5609 */
b224ef85 5610static void __init find_zone_movable_pfns_for_nodes(void)
2a1e274a
MG
5611{
5612 int i, nid;
5613 unsigned long usable_startpfn;
5614 unsigned long kernelcore_node, kernelcore_remaining;
66918dcd 5615 /* save the state before borrow the nodemask */
4b0ef1fe 5616 nodemask_t saved_node_state = node_states[N_MEMORY];
37b07e41 5617 unsigned long totalpages = early_calculate_totalpages();
4b0ef1fe 5618 int usable_nodes = nodes_weight(node_states[N_MEMORY]);
136199f0 5619 struct memblock_region *r;
b2f3eebe
TC
5620
5621 /* Need to find movable_zone earlier when movable_node is specified. */
5622 find_usable_zone_for_movable();
5623
5624 /*
5625 * If movable_node is specified, ignore kernelcore and movablecore
5626 * options.
5627 */
5628 if (movable_node_is_enabled()) {
136199f0
EM
5629 for_each_memblock(memory, r) {
5630 if (!memblock_is_hotpluggable(r))
b2f3eebe
TC
5631 continue;
5632
136199f0 5633 nid = r->nid;
b2f3eebe 5634
136199f0 5635 usable_startpfn = PFN_DOWN(r->base);
b2f3eebe
TC
5636 zone_movable_pfn[nid] = zone_movable_pfn[nid] ?
5637 min(usable_startpfn, zone_movable_pfn[nid]) :
5638 usable_startpfn;
5639 }
5640
5641 goto out2;
5642 }
2a1e274a 5643
342332e6
TI
5644 /*
5645 * If kernelcore=mirror is specified, ignore movablecore option
5646 */
5647 if (mirrored_kernelcore) {
5648 bool mem_below_4gb_not_mirrored = false;
5649
5650 for_each_memblock(memory, r) {
5651 if (memblock_is_mirror(r))
5652 continue;
5653
5654 nid = r->nid;
5655
5656 usable_startpfn = memblock_region_memory_base_pfn(r);
5657
5658 if (usable_startpfn < 0x100000) {
5659 mem_below_4gb_not_mirrored = true;
5660 continue;
5661 }
5662
5663 zone_movable_pfn[nid] = zone_movable_pfn[nid] ?
5664 min(usable_startpfn, zone_movable_pfn[nid]) :
5665 usable_startpfn;
5666 }
5667
5668 if (mem_below_4gb_not_mirrored)
5669 pr_warn("This configuration results in unmirrored kernel memory.");
5670
5671 goto out2;
5672 }
5673
7e63efef 5674 /*
b2f3eebe 5675 * If movablecore=nn[KMG] was specified, calculate what size of
7e63efef
MG
5676 * kernelcore that corresponds so that memory usable for
5677 * any allocation type is evenly spread. If both kernelcore
5678 * and movablecore are specified, then the value of kernelcore
5679 * will be used for required_kernelcore if it's greater than
5680 * what movablecore would have allowed.
5681 */
5682 if (required_movablecore) {
7e63efef
MG
5683 unsigned long corepages;
5684
5685 /*
5686 * Round-up so that ZONE_MOVABLE is at least as large as what
5687 * was requested by the user
5688 */
5689 required_movablecore =
5690 roundup(required_movablecore, MAX_ORDER_NR_PAGES);
9fd745d4 5691 required_movablecore = min(totalpages, required_movablecore);
7e63efef
MG
5692 corepages = totalpages - required_movablecore;
5693
5694 required_kernelcore = max(required_kernelcore, corepages);
5695 }
5696
bde304bd
XQ
5697 /*
5698 * If kernelcore was not specified or kernelcore size is larger
5699 * than totalpages, there is no ZONE_MOVABLE.
5700 */
5701 if (!required_kernelcore || required_kernelcore >= totalpages)
66918dcd 5702 goto out;
2a1e274a
MG
5703
5704 /* usable_startpfn is the lowest possible pfn ZONE_MOVABLE can be at */
2a1e274a
MG
5705 usable_startpfn = arch_zone_lowest_possible_pfn[movable_zone];
5706
5707restart:
5708 /* Spread kernelcore memory as evenly as possible throughout nodes */
5709 kernelcore_node = required_kernelcore / usable_nodes;
4b0ef1fe 5710 for_each_node_state(nid, N_MEMORY) {
c13291a5
TH
5711 unsigned long start_pfn, end_pfn;
5712
2a1e274a
MG
5713 /*
5714 * Recalculate kernelcore_node if the division per node
5715 * now exceeds what is necessary to satisfy the requested
5716 * amount of memory for the kernel
5717 */
5718 if (required_kernelcore < kernelcore_node)
5719 kernelcore_node = required_kernelcore / usable_nodes;
5720
5721 /*
5722 * As the map is walked, we track how much memory is usable
5723 * by the kernel using kernelcore_remaining. When it is
5724 * 0, the rest of the node is usable by ZONE_MOVABLE
5725 */
5726 kernelcore_remaining = kernelcore_node;
5727
5728 /* Go through each range of PFNs within this node */
c13291a5 5729 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
2a1e274a
MG
5730 unsigned long size_pages;
5731
c13291a5 5732 start_pfn = max(start_pfn, zone_movable_pfn[nid]);
2a1e274a
MG
5733 if (start_pfn >= end_pfn)
5734 continue;
5735
5736 /* Account for what is only usable for kernelcore */
5737 if (start_pfn < usable_startpfn) {
5738 unsigned long kernel_pages;
5739 kernel_pages = min(end_pfn, usable_startpfn)
5740 - start_pfn;
5741
5742 kernelcore_remaining -= min(kernel_pages,
5743 kernelcore_remaining);
5744 required_kernelcore -= min(kernel_pages,
5745 required_kernelcore);
5746
5747 /* Continue if range is now fully accounted */
5748 if (end_pfn <= usable_startpfn) {
5749
5750 /*
5751 * Push zone_movable_pfn to the end so
5752 * that if we have to rebalance
5753 * kernelcore across nodes, we will
5754 * not double account here
5755 */
5756 zone_movable_pfn[nid] = end_pfn;
5757 continue;
5758 }
5759 start_pfn = usable_startpfn;
5760 }
5761
5762 /*
5763 * The usable PFN range for ZONE_MOVABLE is from
5764 * start_pfn->end_pfn. Calculate size_pages as the
5765 * number of pages used as kernelcore
5766 */
5767 size_pages = end_pfn - start_pfn;
5768 if (size_pages > kernelcore_remaining)
5769 size_pages = kernelcore_remaining;
5770 zone_movable_pfn[nid] = start_pfn + size_pages;
5771
5772 /*
5773 * Some kernelcore has been met, update counts and
5774 * break if the kernelcore for this node has been
b8af2941 5775 * satisfied
2a1e274a
MG
5776 */
5777 required_kernelcore -= min(required_kernelcore,
5778 size_pages);
5779 kernelcore_remaining -= size_pages;
5780 if (!kernelcore_remaining)
5781 break;
5782 }
5783 }
5784
5785 /*
5786 * If there is still required_kernelcore, we do another pass with one
5787 * less node in the count. This will push zone_movable_pfn[nid] further
5788 * along on the nodes that still have memory until kernelcore is
b8af2941 5789 * satisfied
2a1e274a
MG
5790 */
5791 usable_nodes--;
5792 if (usable_nodes && required_kernelcore > usable_nodes)
5793 goto restart;
5794
b2f3eebe 5795out2:
2a1e274a
MG
5796 /* Align start of ZONE_MOVABLE on all nids to MAX_ORDER_NR_PAGES */
5797 for (nid = 0; nid < MAX_NUMNODES; nid++)
5798 zone_movable_pfn[nid] =
5799 roundup(zone_movable_pfn[nid], MAX_ORDER_NR_PAGES);
66918dcd 5800
20e6926d 5801out:
66918dcd 5802 /* restore the node_state */
4b0ef1fe 5803 node_states[N_MEMORY] = saved_node_state;
2a1e274a
MG
5804}
5805
4b0ef1fe
LJ
5806/* Any regular or high memory on that node ? */
5807static void check_for_memory(pg_data_t *pgdat, int nid)
37b07e41 5808{
37b07e41
LS
5809 enum zone_type zone_type;
5810
4b0ef1fe
LJ
5811 if (N_MEMORY == N_NORMAL_MEMORY)
5812 return;
5813
5814 for (zone_type = 0; zone_type <= ZONE_MOVABLE - 1; zone_type++) {
37b07e41 5815 struct zone *zone = &pgdat->node_zones[zone_type];
b38a8725 5816 if (populated_zone(zone)) {
4b0ef1fe
LJ
5817 node_set_state(nid, N_HIGH_MEMORY);
5818 if (N_NORMAL_MEMORY != N_HIGH_MEMORY &&
5819 zone_type <= ZONE_NORMAL)
5820 node_set_state(nid, N_NORMAL_MEMORY);
d0048b0e
BL
5821 break;
5822 }
37b07e41 5823 }
37b07e41
LS
5824}
5825
c713216d
MG
5826/**
5827 * free_area_init_nodes - Initialise all pg_data_t and zone data
88ca3b94 5828 * @max_zone_pfn: an array of max PFNs for each zone
c713216d
MG
5829 *
5830 * This will call free_area_init_node() for each active node in the system.
7d018176 5831 * Using the page ranges provided by memblock_set_node(), the size of each
c713216d
MG
5832 * zone in each node and their holes is calculated. If the maximum PFN
5833 * between two adjacent zones match, it is assumed that the zone is empty.
5834 * For example, if arch_max_dma_pfn == arch_max_dma32_pfn, it is assumed
5835 * that arch_max_dma32_pfn has no pages. It is also assumed that a zone
5836 * starts where the previous one ended. For example, ZONE_DMA32 starts
5837 * at arch_max_dma_pfn.
5838 */
5839void __init free_area_init_nodes(unsigned long *max_zone_pfn)
5840{
c13291a5
TH
5841 unsigned long start_pfn, end_pfn;
5842 int i, nid;
a6af2bc3 5843
c713216d
MG
5844 /* Record where the zone boundaries are */
5845 memset(arch_zone_lowest_possible_pfn, 0,
5846 sizeof(arch_zone_lowest_possible_pfn));
5847 memset(arch_zone_highest_possible_pfn, 0,
5848 sizeof(arch_zone_highest_possible_pfn));
5849 arch_zone_lowest_possible_pfn[0] = find_min_pfn_with_active_regions();
5850 arch_zone_highest_possible_pfn[0] = max_zone_pfn[0];
5851 for (i = 1; i < MAX_NR_ZONES; i++) {
2a1e274a
MG
5852 if (i == ZONE_MOVABLE)
5853 continue;
c713216d
MG
5854 arch_zone_lowest_possible_pfn[i] =
5855 arch_zone_highest_possible_pfn[i-1];
5856 arch_zone_highest_possible_pfn[i] =
5857 max(max_zone_pfn[i], arch_zone_lowest_possible_pfn[i]);
5858 }
2a1e274a
MG
5859 arch_zone_lowest_possible_pfn[ZONE_MOVABLE] = 0;
5860 arch_zone_highest_possible_pfn[ZONE_MOVABLE] = 0;
5861
5862 /* Find the PFNs that ZONE_MOVABLE begins at in each node */
5863 memset(zone_movable_pfn, 0, sizeof(zone_movable_pfn));
b224ef85 5864 find_zone_movable_pfns_for_nodes();
c713216d 5865
c713216d 5866 /* Print out the zone ranges */
f88dfff5 5867 pr_info("Zone ranges:\n");
2a1e274a
MG
5868 for (i = 0; i < MAX_NR_ZONES; i++) {
5869 if (i == ZONE_MOVABLE)
5870 continue;
f88dfff5 5871 pr_info(" %-8s ", zone_names[i]);
72f0ba02
DR
5872 if (arch_zone_lowest_possible_pfn[i] ==
5873 arch_zone_highest_possible_pfn[i])
f88dfff5 5874 pr_cont("empty\n");
72f0ba02 5875 else
8d29e18a
JG
5876 pr_cont("[mem %#018Lx-%#018Lx]\n",
5877 (u64)arch_zone_lowest_possible_pfn[i]
5878 << PAGE_SHIFT,
5879 ((u64)arch_zone_highest_possible_pfn[i]
a62e2f4f 5880 << PAGE_SHIFT) - 1);
2a1e274a
MG
5881 }
5882
5883 /* Print out the PFNs ZONE_MOVABLE begins at in each node */
f88dfff5 5884 pr_info("Movable zone start for each node\n");
2a1e274a
MG
5885 for (i = 0; i < MAX_NUMNODES; i++) {
5886 if (zone_movable_pfn[i])
8d29e18a
JG
5887 pr_info(" Node %d: %#018Lx\n", i,
5888 (u64)zone_movable_pfn[i] << PAGE_SHIFT);
2a1e274a 5889 }
c713216d 5890
f2d52fe5 5891 /* Print out the early node map */
f88dfff5 5892 pr_info("Early memory node ranges\n");
c13291a5 5893 for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid)
8d29e18a
JG
5894 pr_info(" node %3d: [mem %#018Lx-%#018Lx]\n", nid,
5895 (u64)start_pfn << PAGE_SHIFT,
5896 ((u64)end_pfn << PAGE_SHIFT) - 1);
c713216d
MG
5897
5898 /* Initialise every node */
708614e6 5899 mminit_verify_pageflags_layout();
8ef82866 5900 setup_nr_node_ids();
c713216d
MG
5901 for_each_online_node(nid) {
5902 pg_data_t *pgdat = NODE_DATA(nid);
9109fb7b 5903 free_area_init_node(nid, NULL,
c713216d 5904 find_min_pfn_for_node(nid), NULL);
37b07e41
LS
5905
5906 /* Any memory on that node */
5907 if (pgdat->node_present_pages)
4b0ef1fe
LJ
5908 node_set_state(nid, N_MEMORY);
5909 check_for_memory(pgdat, nid);
c713216d
MG
5910 }
5911}
2a1e274a 5912
7e63efef 5913static int __init cmdline_parse_core(char *p, unsigned long *core)
2a1e274a
MG
5914{
5915 unsigned long long coremem;
5916 if (!p)
5917 return -EINVAL;
5918
5919 coremem = memparse(p, &p);
7e63efef 5920 *core = coremem >> PAGE_SHIFT;
2a1e274a 5921
7e63efef 5922 /* Paranoid check that UL is enough for the coremem value */
2a1e274a
MG
5923 WARN_ON((coremem >> PAGE_SHIFT) > ULONG_MAX);
5924
5925 return 0;
5926}
ed7ed365 5927
7e63efef
MG
5928/*
5929 * kernelcore=size sets the amount of memory for use for allocations that
5930 * cannot be reclaimed or migrated.
5931 */
5932static int __init cmdline_parse_kernelcore(char *p)
5933{
342332e6
TI
5934 /* parse kernelcore=mirror */
5935 if (parse_option_str(p, "mirror")) {
5936 mirrored_kernelcore = true;
5937 return 0;
5938 }
5939
7e63efef
MG
5940 return cmdline_parse_core(p, &required_kernelcore);
5941}
5942
5943/*
5944 * movablecore=size sets the amount of memory for use for allocations that
5945 * can be reclaimed or migrated.
5946 */
5947static int __init cmdline_parse_movablecore(char *p)
5948{
5949 return cmdline_parse_core(p, &required_movablecore);
5950}
5951
ed7ed365 5952early_param("kernelcore", cmdline_parse_kernelcore);
7e63efef 5953early_param("movablecore", cmdline_parse_movablecore);
ed7ed365 5954
0ee332c1 5955#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
c713216d 5956
c3d5f5f0
JL
5957void adjust_managed_page_count(struct page *page, long count)
5958{
5959 spin_lock(&managed_page_count_lock);
5960 page_zone(page)->managed_pages += count;
5961 totalram_pages += count;
3dcc0571
JL
5962#ifdef CONFIG_HIGHMEM
5963 if (PageHighMem(page))
5964 totalhigh_pages += count;
5965#endif
c3d5f5f0
JL
5966 spin_unlock(&managed_page_count_lock);
5967}
3dcc0571 5968EXPORT_SYMBOL(adjust_managed_page_count);
c3d5f5f0 5969
11199692 5970unsigned long free_reserved_area(void *start, void *end, int poison, char *s)
69afade7 5971{
11199692
JL
5972 void *pos;
5973 unsigned long pages = 0;
69afade7 5974
11199692
JL
5975 start = (void *)PAGE_ALIGN((unsigned long)start);
5976 end = (void *)((unsigned long)end & PAGE_MASK);
5977 for (pos = start; pos < end; pos += PAGE_SIZE, pages++) {
dbe67df4 5978 if ((unsigned int)poison <= 0xFF)
11199692
JL
5979 memset(pos, poison, PAGE_SIZE);
5980 free_reserved_page(virt_to_page(pos));
69afade7
JL
5981 }
5982
5983 if (pages && s)
11199692 5984 pr_info("Freeing %s memory: %ldK (%p - %p)\n",
69afade7
JL
5985 s, pages << (PAGE_SHIFT - 10), start, end);
5986
5987 return pages;
5988}
11199692 5989EXPORT_SYMBOL(free_reserved_area);
69afade7 5990
cfa11e08
JL
5991#ifdef CONFIG_HIGHMEM
5992void free_highmem_page(struct page *page)
5993{
5994 __free_reserved_page(page);
5995 totalram_pages++;
7b4b2a0d 5996 page_zone(page)->managed_pages++;
cfa11e08
JL
5997 totalhigh_pages++;
5998}
5999#endif
6000
7ee3d4e8
JL
6001
6002void __init mem_init_print_info(const char *str)
6003{
6004 unsigned long physpages, codesize, datasize, rosize, bss_size;
6005 unsigned long init_code_size, init_data_size;
6006
6007 physpages = get_num_physpages();
6008 codesize = _etext - _stext;
6009 datasize = _edata - _sdata;
6010 rosize = __end_rodata - __start_rodata;
6011 bss_size = __bss_stop - __bss_start;
6012 init_data_size = __init_end - __init_begin;
6013 init_code_size = _einittext - _sinittext;
6014
6015 /*
6016 * Detect special cases and adjust section sizes accordingly:
6017 * 1) .init.* may be embedded into .data sections
6018 * 2) .init.text.* may be out of [__init_begin, __init_end],
6019 * please refer to arch/tile/kernel/vmlinux.lds.S.
6020 * 3) .rodata.* may be embedded into .text or .data sections.
6021 */
6022#define adj_init_size(start, end, size, pos, adj) \
b8af2941
PK
6023 do { \
6024 if (start <= pos && pos < end && size > adj) \
6025 size -= adj; \
6026 } while (0)
7ee3d4e8
JL
6027
6028 adj_init_size(__init_begin, __init_end, init_data_size,
6029 _sinittext, init_code_size);
6030 adj_init_size(_stext, _etext, codesize, _sinittext, init_code_size);
6031 adj_init_size(_sdata, _edata, datasize, __init_begin, init_data_size);
6032 adj_init_size(_stext, _etext, codesize, __start_rodata, rosize);
6033 adj_init_size(_sdata, _edata, datasize, __start_rodata, rosize);
6034
6035#undef adj_init_size
6036
f88dfff5 6037 pr_info("Memory: %luK/%luK available "
7ee3d4e8 6038 "(%luK kernel code, %luK rwdata, %luK rodata, "
e48322ab 6039 "%luK init, %luK bss, %luK reserved, %luK cma-reserved"
7ee3d4e8
JL
6040#ifdef CONFIG_HIGHMEM
6041 ", %luK highmem"
6042#endif
6043 "%s%s)\n",
6044 nr_free_pages() << (PAGE_SHIFT-10), physpages << (PAGE_SHIFT-10),
6045 codesize >> 10, datasize >> 10, rosize >> 10,
6046 (init_data_size + init_code_size) >> 10, bss_size >> 10,
e48322ab
PK
6047 (physpages - totalram_pages - totalcma_pages) << (PAGE_SHIFT-10),
6048 totalcma_pages << (PAGE_SHIFT-10),
7ee3d4e8
JL
6049#ifdef CONFIG_HIGHMEM
6050 totalhigh_pages << (PAGE_SHIFT-10),
6051#endif
6052 str ? ", " : "", str ? str : "");
6053}
6054
0e0b864e 6055/**
88ca3b94
RD
6056 * set_dma_reserve - set the specified number of pages reserved in the first zone
6057 * @new_dma_reserve: The number of pages to mark reserved
0e0b864e 6058 *
013110a7 6059 * The per-cpu batchsize and zone watermarks are determined by managed_pages.
0e0b864e
MG
6060 * In the DMA zone, a significant percentage may be consumed by kernel image
6061 * and other unfreeable allocations which can skew the watermarks badly. This
88ca3b94
RD
6062 * function may optionally be used to account for unfreeable pages in the
6063 * first zone (e.g., ZONE_DMA). The effect will be lower watermarks and
6064 * smaller per-cpu batchsize.
0e0b864e
MG
6065 */
6066void __init set_dma_reserve(unsigned long new_dma_reserve)
6067{
6068 dma_reserve = new_dma_reserve;
6069}
6070
1da177e4
LT
6071void __init free_area_init(unsigned long *zones_size)
6072{
9109fb7b 6073 free_area_init_node(0, zones_size,
1da177e4
LT
6074 __pa(PAGE_OFFSET) >> PAGE_SHIFT, NULL);
6075}
1da177e4 6076
1da177e4
LT
6077static int page_alloc_cpu_notify(struct notifier_block *self,
6078 unsigned long action, void *hcpu)
6079{
6080 int cpu = (unsigned long)hcpu;
1da177e4 6081
8bb78442 6082 if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
f0cb3c76 6083 lru_add_drain_cpu(cpu);
9f8f2172
CL
6084 drain_pages(cpu);
6085
6086 /*
6087 * Spill the event counters of the dead processor
6088 * into the current processors event counters.
6089 * This artificially elevates the count of the current
6090 * processor.
6091 */
f8891e5e 6092 vm_events_fold_cpu(cpu);
9f8f2172
CL
6093
6094 /*
6095 * Zero the differential counters of the dead processor
6096 * so that the vm statistics are consistent.
6097 *
6098 * This is only okay since the processor is dead and cannot
6099 * race with what we are doing.
6100 */
2bb921e5 6101 cpu_vm_stats_fold(cpu);
1da177e4
LT
6102 }
6103 return NOTIFY_OK;
6104}
1da177e4
LT
6105
6106void __init page_alloc_init(void)
6107{
6108 hotcpu_notifier(page_alloc_cpu_notify, 0);
6109}
6110
cb45b0e9 6111/*
34b10060 6112 * calculate_totalreserve_pages - called when sysctl_lowmem_reserve_ratio
cb45b0e9
HA
6113 * or min_free_kbytes changes.
6114 */
6115static void calculate_totalreserve_pages(void)
6116{
6117 struct pglist_data *pgdat;
6118 unsigned long reserve_pages = 0;
2f6726e5 6119 enum zone_type i, j;
cb45b0e9
HA
6120
6121 for_each_online_pgdat(pgdat) {
6122 for (i = 0; i < MAX_NR_ZONES; i++) {
6123 struct zone *zone = pgdat->node_zones + i;
3484b2de 6124 long max = 0;
cb45b0e9
HA
6125
6126 /* Find valid and maximum lowmem_reserve in the zone */
6127 for (j = i; j < MAX_NR_ZONES; j++) {
6128 if (zone->lowmem_reserve[j] > max)
6129 max = zone->lowmem_reserve[j];
6130 }
6131
41858966
MG
6132 /* we treat the high watermark as reserved pages. */
6133 max += high_wmark_pages(zone);
cb45b0e9 6134
b40da049
JL
6135 if (max > zone->managed_pages)
6136 max = zone->managed_pages;
a8d01437
JW
6137
6138 zone->totalreserve_pages = max;
6139
cb45b0e9
HA
6140 reserve_pages += max;
6141 }
6142 }
6143 totalreserve_pages = reserve_pages;
6144}
6145
1da177e4
LT
6146/*
6147 * setup_per_zone_lowmem_reserve - called whenever
34b10060 6148 * sysctl_lowmem_reserve_ratio changes. Ensures that each zone
1da177e4
LT
6149 * has a correct pages reserved value, so an adequate number of
6150 * pages are left in the zone after a successful __alloc_pages().
6151 */
6152static void setup_per_zone_lowmem_reserve(void)
6153{
6154 struct pglist_data *pgdat;
2f6726e5 6155 enum zone_type j, idx;
1da177e4 6156
ec936fc5 6157 for_each_online_pgdat(pgdat) {
1da177e4
LT
6158 for (j = 0; j < MAX_NR_ZONES; j++) {
6159 struct zone *zone = pgdat->node_zones + j;
b40da049 6160 unsigned long managed_pages = zone->managed_pages;
1da177e4
LT
6161
6162 zone->lowmem_reserve[j] = 0;
6163
2f6726e5
CL
6164 idx = j;
6165 while (idx) {
1da177e4
LT
6166 struct zone *lower_zone;
6167
2f6726e5
CL
6168 idx--;
6169
1da177e4
LT
6170 if (sysctl_lowmem_reserve_ratio[idx] < 1)
6171 sysctl_lowmem_reserve_ratio[idx] = 1;
6172
6173 lower_zone = pgdat->node_zones + idx;
b40da049 6174 lower_zone->lowmem_reserve[j] = managed_pages /
1da177e4 6175 sysctl_lowmem_reserve_ratio[idx];
b40da049 6176 managed_pages += lower_zone->managed_pages;
1da177e4
LT
6177 }
6178 }
6179 }
cb45b0e9
HA
6180
6181 /* update totalreserve_pages */
6182 calculate_totalreserve_pages();
1da177e4
LT
6183}
6184
cfd3da1e 6185static void __setup_per_zone_wmarks(void)
1da177e4
LT
6186{
6187 unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10);
6188 unsigned long lowmem_pages = 0;
6189 struct zone *zone;
6190 unsigned long flags;
6191
6192 /* Calculate total number of !ZONE_HIGHMEM pages */
6193 for_each_zone(zone) {
6194 if (!is_highmem(zone))
b40da049 6195 lowmem_pages += zone->managed_pages;
1da177e4
LT
6196 }
6197
6198 for_each_zone(zone) {
ac924c60
AM
6199 u64 tmp;
6200
1125b4e3 6201 spin_lock_irqsave(&zone->lock, flags);
b40da049 6202 tmp = (u64)pages_min * zone->managed_pages;
ac924c60 6203 do_div(tmp, lowmem_pages);
1da177e4
LT
6204 if (is_highmem(zone)) {
6205 /*
669ed175
NP
6206 * __GFP_HIGH and PF_MEMALLOC allocations usually don't
6207 * need highmem pages, so cap pages_min to a small
6208 * value here.
6209 *
41858966 6210 * The WMARK_HIGH-WMARK_LOW and (WMARK_LOW-WMARK_MIN)
42ff2703 6211 * deltas control asynch page reclaim, and so should
669ed175 6212 * not be capped for highmem.
1da177e4 6213 */
90ae8d67 6214 unsigned long min_pages;
1da177e4 6215
b40da049 6216 min_pages = zone->managed_pages / 1024;
90ae8d67 6217 min_pages = clamp(min_pages, SWAP_CLUSTER_MAX, 128UL);
41858966 6218 zone->watermark[WMARK_MIN] = min_pages;
1da177e4 6219 } else {
669ed175
NP
6220 /*
6221 * If it's a lowmem zone, reserve a number of pages
1da177e4
LT
6222 * proportionate to the zone's size.
6223 */
41858966 6224 zone->watermark[WMARK_MIN] = tmp;
1da177e4
LT
6225 }
6226
41858966
MG
6227 zone->watermark[WMARK_LOW] = min_wmark_pages(zone) + (tmp >> 2);
6228 zone->watermark[WMARK_HIGH] = min_wmark_pages(zone) + (tmp >> 1);
49f223a9 6229
81c0a2bb 6230 __mod_zone_page_state(zone, NR_ALLOC_BATCH,
abe5f972
JW
6231 high_wmark_pages(zone) - low_wmark_pages(zone) -
6232 atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH]));
81c0a2bb 6233
1125b4e3 6234 spin_unlock_irqrestore(&zone->lock, flags);
1da177e4 6235 }
cb45b0e9
HA
6236
6237 /* update totalreserve_pages */
6238 calculate_totalreserve_pages();
1da177e4
LT
6239}
6240
cfd3da1e
MG
6241/**
6242 * setup_per_zone_wmarks - called when min_free_kbytes changes
6243 * or when memory is hot-{added|removed}
6244 *
6245 * Ensures that the watermark[min,low,high] values for each zone are set
6246 * correctly with respect to min_free_kbytes.
6247 */
6248void setup_per_zone_wmarks(void)
6249{
6250 mutex_lock(&zonelists_mutex);
6251 __setup_per_zone_wmarks();
6252 mutex_unlock(&zonelists_mutex);
6253}
6254
55a4462a 6255/*
556adecb
RR
6256 * The inactive anon list should be small enough that the VM never has to
6257 * do too much work, but large enough that each inactive page has a chance
6258 * to be referenced again before it is swapped out.
6259 *
6260 * The inactive_anon ratio is the target ratio of ACTIVE_ANON to
6261 * INACTIVE_ANON pages on this zone's LRU, maintained by the
6262 * pageout code. A zone->inactive_ratio of 3 means 3:1 or 25% of
6263 * the anonymous pages are kept on the inactive list.
6264 *
6265 * total target max
6266 * memory ratio inactive anon
6267 * -------------------------------------
6268 * 10MB 1 5MB
6269 * 100MB 1 50MB
6270 * 1GB 3 250MB
6271 * 10GB 10 0.9GB
6272 * 100GB 31 3GB
6273 * 1TB 101 10GB
6274 * 10TB 320 32GB
6275 */
1b79acc9 6276static void __meminit calculate_zone_inactive_ratio(struct zone *zone)
556adecb 6277{
96cb4df5 6278 unsigned int gb, ratio;
556adecb 6279
96cb4df5 6280 /* Zone size in gigabytes */
b40da049 6281 gb = zone->managed_pages >> (30 - PAGE_SHIFT);
96cb4df5 6282 if (gb)
556adecb 6283 ratio = int_sqrt(10 * gb);
96cb4df5
MK
6284 else
6285 ratio = 1;
556adecb 6286
96cb4df5
MK
6287 zone->inactive_ratio = ratio;
6288}
556adecb 6289
839a4fcc 6290static void __meminit setup_per_zone_inactive_ratio(void)
96cb4df5
MK
6291{
6292 struct zone *zone;
6293
6294 for_each_zone(zone)
6295 calculate_zone_inactive_ratio(zone);
556adecb
RR
6296}
6297
1da177e4
LT
6298/*
6299 * Initialise min_free_kbytes.
6300 *
6301 * For small machines we want it small (128k min). For large machines
6302 * we want it large (64MB max). But it is not linear, because network
6303 * bandwidth does not increase linearly with machine size. We use
6304 *
b8af2941 6305 * min_free_kbytes = 4 * sqrt(lowmem_kbytes), for better accuracy:
1da177e4
LT
6306 * min_free_kbytes = sqrt(lowmem_kbytes * 16)
6307 *
6308 * which yields
6309 *
6310 * 16MB: 512k
6311 * 32MB: 724k
6312 * 64MB: 1024k
6313 * 128MB: 1448k
6314 * 256MB: 2048k
6315 * 512MB: 2896k
6316 * 1024MB: 4096k
6317 * 2048MB: 5792k
6318 * 4096MB: 8192k
6319 * 8192MB: 11584k
6320 * 16384MB: 16384k
6321 */
1b79acc9 6322int __meminit init_per_zone_wmark_min(void)
1da177e4
LT
6323{
6324 unsigned long lowmem_kbytes;
5f12733e 6325 int new_min_free_kbytes;
1da177e4
LT
6326
6327 lowmem_kbytes = nr_free_buffer_pages() * (PAGE_SIZE >> 10);
5f12733e
MH
6328 new_min_free_kbytes = int_sqrt(lowmem_kbytes * 16);
6329
6330 if (new_min_free_kbytes > user_min_free_kbytes) {
6331 min_free_kbytes = new_min_free_kbytes;
6332 if (min_free_kbytes < 128)
6333 min_free_kbytes = 128;
6334 if (min_free_kbytes > 65536)
6335 min_free_kbytes = 65536;
6336 } else {
6337 pr_warn("min_free_kbytes is not updated to %d because user defined value %d is preferred\n",
6338 new_min_free_kbytes, user_min_free_kbytes);
6339 }
bc75d33f 6340 setup_per_zone_wmarks();
a6cccdc3 6341 refresh_zone_stat_thresholds();
1da177e4 6342 setup_per_zone_lowmem_reserve();
556adecb 6343 setup_per_zone_inactive_ratio();
1da177e4
LT
6344 return 0;
6345}
bc75d33f 6346module_init(init_per_zone_wmark_min)
1da177e4
LT
6347
6348/*
b8af2941 6349 * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so
1da177e4
LT
6350 * that we can call two helper functions whenever min_free_kbytes
6351 * changes.
6352 */
cccad5b9 6353int min_free_kbytes_sysctl_handler(struct ctl_table *table, int write,
8d65af78 6354 void __user *buffer, size_t *length, loff_t *ppos)
1da177e4 6355{
da8c757b
HP
6356 int rc;
6357
6358 rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
6359 if (rc)
6360 return rc;
6361
5f12733e
MH
6362 if (write) {
6363 user_min_free_kbytes = min_free_kbytes;
bc75d33f 6364 setup_per_zone_wmarks();
5f12733e 6365 }
1da177e4
LT
6366 return 0;
6367}
6368
9614634f 6369#ifdef CONFIG_NUMA
cccad5b9 6370int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *table, int write,
8d65af78 6371 void __user *buffer, size_t *length, loff_t *ppos)
9614634f
CL
6372{
6373 struct zone *zone;
6374 int rc;
6375
8d65af78 6376 rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
9614634f
CL
6377 if (rc)
6378 return rc;
6379
6380 for_each_zone(zone)
b40da049 6381 zone->min_unmapped_pages = (zone->managed_pages *
9614634f
CL
6382 sysctl_min_unmapped_ratio) / 100;
6383 return 0;
6384}
0ff38490 6385
cccad5b9 6386int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *table, int write,
8d65af78 6387 void __user *buffer, size_t *length, loff_t *ppos)
0ff38490
CL
6388{
6389 struct zone *zone;
6390 int rc;
6391
8d65af78 6392 rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
0ff38490
CL
6393 if (rc)
6394 return rc;
6395
6396 for_each_zone(zone)
b40da049 6397 zone->min_slab_pages = (zone->managed_pages *
0ff38490
CL
6398 sysctl_min_slab_ratio) / 100;
6399 return 0;
6400}
9614634f
CL
6401#endif
6402
1da177e4
LT
6403/*
6404 * lowmem_reserve_ratio_sysctl_handler - just a wrapper around
6405 * proc_dointvec() so that we can call setup_per_zone_lowmem_reserve()
6406 * whenever sysctl_lowmem_reserve_ratio changes.
6407 *
6408 * The reserve ratio obviously has absolutely no relation with the
41858966 6409 * minimum watermarks. The lowmem reserve ratio can only make sense
1da177e4
LT
6410 * if in function of the boot time zone sizes.
6411 */
cccad5b9 6412int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *table, int write,
8d65af78 6413 void __user *buffer, size_t *length, loff_t *ppos)
1da177e4 6414{
8d65af78 6415 proc_dointvec_minmax(table, write, buffer, length, ppos);
1da177e4
LT
6416 setup_per_zone_lowmem_reserve();
6417 return 0;
6418}
6419
8ad4b1fb
RS
6420/*
6421 * percpu_pagelist_fraction - changes the pcp->high for each zone on each
b8af2941
PK
6422 * cpu. It is the fraction of total pages in each zone that a hot per cpu
6423 * pagelist can have before it gets flushed back to buddy allocator.
8ad4b1fb 6424 */
cccad5b9 6425int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *table, int write,
8d65af78 6426 void __user *buffer, size_t *length, loff_t *ppos)
8ad4b1fb
RS
6427{
6428 struct zone *zone;
7cd2b0a3 6429 int old_percpu_pagelist_fraction;
8ad4b1fb
RS
6430 int ret;
6431
7cd2b0a3
DR
6432 mutex_lock(&pcp_batch_high_lock);
6433 old_percpu_pagelist_fraction = percpu_pagelist_fraction;
6434
8d65af78 6435 ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
7cd2b0a3
DR
6436 if (!write || ret < 0)
6437 goto out;
6438
6439 /* Sanity checking to avoid pcp imbalance */
6440 if (percpu_pagelist_fraction &&
6441 percpu_pagelist_fraction < MIN_PERCPU_PAGELIST_FRACTION) {
6442 percpu_pagelist_fraction = old_percpu_pagelist_fraction;
6443 ret = -EINVAL;
6444 goto out;
6445 }
6446
6447 /* No change? */
6448 if (percpu_pagelist_fraction == old_percpu_pagelist_fraction)
6449 goto out;
c8e251fa 6450
364df0eb 6451 for_each_populated_zone(zone) {
7cd2b0a3
DR
6452 unsigned int cpu;
6453
22a7f12b 6454 for_each_possible_cpu(cpu)
7cd2b0a3
DR
6455 pageset_set_high_and_batch(zone,
6456 per_cpu_ptr(zone->pageset, cpu));
8ad4b1fb 6457 }
7cd2b0a3 6458out:
c8e251fa 6459 mutex_unlock(&pcp_batch_high_lock);
7cd2b0a3 6460 return ret;
8ad4b1fb
RS
6461}
6462
a9919c79 6463#ifdef CONFIG_NUMA
f034b5d4 6464int hashdist = HASHDIST_DEFAULT;
1da177e4 6465
1da177e4
LT
6466static int __init set_hashdist(char *str)
6467{
6468 if (!str)
6469 return 0;
6470 hashdist = simple_strtoul(str, &str, 0);
6471 return 1;
6472}
6473__setup("hashdist=", set_hashdist);
6474#endif
6475
6476/*
6477 * allocate a large system hash table from bootmem
6478 * - it is assumed that the hash table must contain an exact power-of-2
6479 * quantity of entries
6480 * - limit is the number of hash buckets, not the total allocation size
6481 */
6482void *__init alloc_large_system_hash(const char *tablename,
6483 unsigned long bucketsize,
6484 unsigned long numentries,
6485 int scale,
6486 int flags,
6487 unsigned int *_hash_shift,
6488 unsigned int *_hash_mask,
31fe62b9
TB
6489 unsigned long low_limit,
6490 unsigned long high_limit)
1da177e4 6491{
31fe62b9 6492 unsigned long long max = high_limit;
1da177e4
LT
6493 unsigned long log2qty, size;
6494 void *table = NULL;
6495
6496 /* allow the kernel cmdline to have a say */
6497 if (!numentries) {
6498 /* round applicable memory size up to nearest megabyte */
04903664 6499 numentries = nr_kernel_pages;
a7e83318
JZ
6500
6501 /* It isn't necessary when PAGE_SIZE >= 1MB */
6502 if (PAGE_SHIFT < 20)
6503 numentries = round_up(numentries, (1<<20)/PAGE_SIZE);
1da177e4
LT
6504
6505 /* limit to 1 bucket per 2^scale bytes of low memory */
6506 if (scale > PAGE_SHIFT)
6507 numentries >>= (scale - PAGE_SHIFT);
6508 else
6509 numentries <<= (PAGE_SHIFT - scale);
9ab37b8f
PM
6510
6511 /* Make sure we've got at least a 0-order allocation.. */
2c85f51d
JB
6512 if (unlikely(flags & HASH_SMALL)) {
6513 /* Makes no sense without HASH_EARLY */
6514 WARN_ON(!(flags & HASH_EARLY));
6515 if (!(numentries >> *_hash_shift)) {
6516 numentries = 1UL << *_hash_shift;
6517 BUG_ON(!numentries);
6518 }
6519 } else if (unlikely((numentries * bucketsize) < PAGE_SIZE))
9ab37b8f 6520 numentries = PAGE_SIZE / bucketsize;
1da177e4 6521 }
6e692ed3 6522 numentries = roundup_pow_of_two(numentries);
1da177e4
LT
6523
6524 /* limit allocation size to 1/16 total memory by default */
6525 if (max == 0) {
6526 max = ((unsigned long long)nr_all_pages << PAGE_SHIFT) >> 4;
6527 do_div(max, bucketsize);
6528 }
074b8517 6529 max = min(max, 0x80000000ULL);
1da177e4 6530
31fe62b9
TB
6531 if (numentries < low_limit)
6532 numentries = low_limit;
1da177e4
LT
6533 if (numentries > max)
6534 numentries = max;
6535
f0d1b0b3 6536 log2qty = ilog2(numentries);
1da177e4
LT
6537
6538 do {
6539 size = bucketsize << log2qty;
6540 if (flags & HASH_EARLY)
6782832e 6541 table = memblock_virt_alloc_nopanic(size, 0);
1da177e4
LT
6542 else if (hashdist)
6543 table = __vmalloc(size, GFP_ATOMIC, PAGE_KERNEL);
6544 else {
1037b83b
ED
6545 /*
6546 * If bucketsize is not a power-of-two, we may free
a1dd268c
MG
6547 * some pages at the end of hash table which
6548 * alloc_pages_exact() automatically does
1037b83b 6549 */
264ef8a9 6550 if (get_order(size) < MAX_ORDER) {
a1dd268c 6551 table = alloc_pages_exact(size, GFP_ATOMIC);
264ef8a9
CM
6552 kmemleak_alloc(table, size, 1, GFP_ATOMIC);
6553 }
1da177e4
LT
6554 }
6555 } while (!table && size > PAGE_SIZE && --log2qty);
6556
6557 if (!table)
6558 panic("Failed to allocate %s hash table\n", tablename);
6559
f241e660 6560 printk(KERN_INFO "%s hash table entries: %ld (order: %d, %lu bytes)\n",
1da177e4 6561 tablename,
f241e660 6562 (1UL << log2qty),
f0d1b0b3 6563 ilog2(size) - PAGE_SHIFT,
1da177e4
LT
6564 size);
6565
6566 if (_hash_shift)
6567 *_hash_shift = log2qty;
6568 if (_hash_mask)
6569 *_hash_mask = (1 << log2qty) - 1;
6570
6571 return table;
6572}
a117e66e 6573
835c134e
MG
6574/* Return a pointer to the bitmap storing bits affecting a block of pages */
6575static inline unsigned long *get_pageblock_bitmap(struct zone *zone,
6576 unsigned long pfn)
6577{
6578#ifdef CONFIG_SPARSEMEM
6579 return __pfn_to_section(pfn)->pageblock_flags;
6580#else
6581 return zone->pageblock_flags;
6582#endif /* CONFIG_SPARSEMEM */
6583}
6584
6585static inline int pfn_to_bitidx(struct zone *zone, unsigned long pfn)
6586{
6587#ifdef CONFIG_SPARSEMEM
6588 pfn &= (PAGES_PER_SECTION-1);
d9c23400 6589 return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
835c134e 6590#else
c060f943 6591 pfn = pfn - round_down(zone->zone_start_pfn, pageblock_nr_pages);
d9c23400 6592 return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
835c134e
MG
6593#endif /* CONFIG_SPARSEMEM */
6594}
6595
6596/**
1aab4d77 6597 * get_pfnblock_flags_mask - Return the requested group of flags for the pageblock_nr_pages block of pages
835c134e 6598 * @page: The page within the block of interest
1aab4d77
RD
6599 * @pfn: The target page frame number
6600 * @end_bitidx: The last bit of interest to retrieve
6601 * @mask: mask of bits that the caller is interested in
6602 *
6603 * Return: pageblock_bits flags
835c134e 6604 */
dc4b0caf 6605unsigned long get_pfnblock_flags_mask(struct page *page, unsigned long pfn,
e58469ba
MG
6606 unsigned long end_bitidx,
6607 unsigned long mask)
835c134e
MG
6608{
6609 struct zone *zone;
6610 unsigned long *bitmap;
dc4b0caf 6611 unsigned long bitidx, word_bitidx;
e58469ba 6612 unsigned long word;
835c134e
MG
6613
6614 zone = page_zone(page);
835c134e
MG
6615 bitmap = get_pageblock_bitmap(zone, pfn);
6616 bitidx = pfn_to_bitidx(zone, pfn);
e58469ba
MG
6617 word_bitidx = bitidx / BITS_PER_LONG;
6618 bitidx &= (BITS_PER_LONG-1);
835c134e 6619
e58469ba
MG
6620 word = bitmap[word_bitidx];
6621 bitidx += end_bitidx;
6622 return (word >> (BITS_PER_LONG - bitidx - 1)) & mask;
835c134e
MG
6623}
6624
6625/**
dc4b0caf 6626 * set_pfnblock_flags_mask - Set the requested group of flags for a pageblock_nr_pages block of pages
835c134e 6627 * @page: The page within the block of interest
835c134e 6628 * @flags: The flags to set
1aab4d77
RD
6629 * @pfn: The target page frame number
6630 * @end_bitidx: The last bit of interest
6631 * @mask: mask of bits that the caller is interested in
835c134e 6632 */
dc4b0caf
MG
6633void set_pfnblock_flags_mask(struct page *page, unsigned long flags,
6634 unsigned long pfn,
e58469ba
MG
6635 unsigned long end_bitidx,
6636 unsigned long mask)
835c134e
MG
6637{
6638 struct zone *zone;
6639 unsigned long *bitmap;
dc4b0caf 6640 unsigned long bitidx, word_bitidx;
e58469ba
MG
6641 unsigned long old_word, word;
6642
6643 BUILD_BUG_ON(NR_PAGEBLOCK_BITS != 4);
835c134e
MG
6644
6645 zone = page_zone(page);
835c134e
MG
6646 bitmap = get_pageblock_bitmap(zone, pfn);
6647 bitidx = pfn_to_bitidx(zone, pfn);
e58469ba
MG
6648 word_bitidx = bitidx / BITS_PER_LONG;
6649 bitidx &= (BITS_PER_LONG-1);
6650
309381fe 6651 VM_BUG_ON_PAGE(!zone_spans_pfn(zone, pfn), page);
835c134e 6652
e58469ba
MG
6653 bitidx += end_bitidx;
6654 mask <<= (BITS_PER_LONG - bitidx - 1);
6655 flags <<= (BITS_PER_LONG - bitidx - 1);
6656
4db0c3c2 6657 word = READ_ONCE(bitmap[word_bitidx]);
e58469ba
MG
6658 for (;;) {
6659 old_word = cmpxchg(&bitmap[word_bitidx], word, (word & ~mask) | flags);
6660 if (word == old_word)
6661 break;
6662 word = old_word;
6663 }
835c134e 6664}
a5d76b54
KH
6665
6666/*
80934513
MK
6667 * This function checks whether pageblock includes unmovable pages or not.
6668 * If @count is not zero, it is okay to include less @count unmovable pages
6669 *
b8af2941 6670 * PageLRU check without isolation or lru_lock could race so that
80934513
MK
6671 * MIGRATE_MOVABLE block might include unmovable pages. It means you can't
6672 * expect this function should be exact.
a5d76b54 6673 */
b023f468
WC
6674bool has_unmovable_pages(struct zone *zone, struct page *page, int count,
6675 bool skip_hwpoisoned_pages)
49ac8255
KH
6676{
6677 unsigned long pfn, iter, found;
47118af0
MN
6678 int mt;
6679
49ac8255
KH
6680 /*
6681 * For avoiding noise data, lru_add_drain_all() should be called
80934513 6682 * If ZONE_MOVABLE, the zone never contains unmovable pages
49ac8255
KH
6683 */
6684 if (zone_idx(zone) == ZONE_MOVABLE)
80934513 6685 return false;
47118af0
MN
6686 mt = get_pageblock_migratetype(page);
6687 if (mt == MIGRATE_MOVABLE || is_migrate_cma(mt))
80934513 6688 return false;
49ac8255
KH
6689
6690 pfn = page_to_pfn(page);
6691 for (found = 0, iter = 0; iter < pageblock_nr_pages; iter++) {
6692 unsigned long check = pfn + iter;
6693
29723fcc 6694 if (!pfn_valid_within(check))
49ac8255 6695 continue;
29723fcc 6696
49ac8255 6697 page = pfn_to_page(check);
c8721bbb
NH
6698
6699 /*
6700 * Hugepages are not in LRU lists, but they're movable.
6701 * We need not scan over tail pages bacause we don't
6702 * handle each tail page individually in migration.
6703 */
6704 if (PageHuge(page)) {
6705 iter = round_up(iter + 1, 1<<compound_order(page)) - 1;
6706 continue;
6707 }
6708
97d255c8
MK
6709 /*
6710 * We can't use page_count without pin a page
6711 * because another CPU can free compound page.
6712 * This check already skips compound tails of THP
6713 * because their page->_count is zero at all time.
6714 */
6715 if (!atomic_read(&page->_count)) {
49ac8255
KH
6716 if (PageBuddy(page))
6717 iter += (1 << page_order(page)) - 1;
6718 continue;
6719 }
97d255c8 6720
b023f468
WC
6721 /*
6722 * The HWPoisoned page may be not in buddy system, and
6723 * page_count() is not 0.
6724 */
6725 if (skip_hwpoisoned_pages && PageHWPoison(page))
6726 continue;
6727
49ac8255
KH
6728 if (!PageLRU(page))
6729 found++;
6730 /*
6b4f7799
JW
6731 * If there are RECLAIMABLE pages, we need to check
6732 * it. But now, memory offline itself doesn't call
6733 * shrink_node_slabs() and it still to be fixed.
49ac8255
KH
6734 */
6735 /*
6736 * If the page is not RAM, page_count()should be 0.
6737 * we don't need more check. This is an _used_ not-movable page.
6738 *
6739 * The problematic thing here is PG_reserved pages. PG_reserved
6740 * is set to both of a memory hole page and a _used_ kernel
6741 * page at boot.
6742 */
6743 if (found > count)
80934513 6744 return true;
49ac8255 6745 }
80934513 6746 return false;
49ac8255
KH
6747}
6748
6749bool is_pageblock_removable_nolock(struct page *page)
6750{
656a0706
MH
6751 struct zone *zone;
6752 unsigned long pfn;
687875fb
MH
6753
6754 /*
6755 * We have to be careful here because we are iterating over memory
6756 * sections which are not zone aware so we might end up outside of
6757 * the zone but still within the section.
656a0706
MH
6758 * We have to take care about the node as well. If the node is offline
6759 * its NODE_DATA will be NULL - see page_zone.
687875fb 6760 */
656a0706
MH
6761 if (!node_online(page_to_nid(page)))
6762 return false;
6763
6764 zone = page_zone(page);
6765 pfn = page_to_pfn(page);
108bcc96 6766 if (!zone_spans_pfn(zone, pfn))
687875fb
MH
6767 return false;
6768
b023f468 6769 return !has_unmovable_pages(zone, page, 0, true);
a5d76b54 6770}
0c0e6195 6771
080fe206 6772#if (defined(CONFIG_MEMORY_ISOLATION) && defined(CONFIG_COMPACTION)) || defined(CONFIG_CMA)
041d3a8c
MN
6773
6774static unsigned long pfn_max_align_down(unsigned long pfn)
6775{
6776 return pfn & ~(max_t(unsigned long, MAX_ORDER_NR_PAGES,
6777 pageblock_nr_pages) - 1);
6778}
6779
6780static unsigned long pfn_max_align_up(unsigned long pfn)
6781{
6782 return ALIGN(pfn, max_t(unsigned long, MAX_ORDER_NR_PAGES,
6783 pageblock_nr_pages));
6784}
6785
041d3a8c 6786/* [start, end) must belong to a single zone. */
bb13ffeb
MG
6787static int __alloc_contig_migrate_range(struct compact_control *cc,
6788 unsigned long start, unsigned long end)
041d3a8c
MN
6789{
6790 /* This function is based on compact_zone() from compaction.c. */
beb51eaa 6791 unsigned long nr_reclaimed;
041d3a8c
MN
6792 unsigned long pfn = start;
6793 unsigned int tries = 0;
6794 int ret = 0;
6795
be49a6e1 6796 migrate_prep();
041d3a8c 6797
bb13ffeb 6798 while (pfn < end || !list_empty(&cc->migratepages)) {
041d3a8c
MN
6799 if (fatal_signal_pending(current)) {
6800 ret = -EINTR;
6801 break;
6802 }
6803
bb13ffeb
MG
6804 if (list_empty(&cc->migratepages)) {
6805 cc->nr_migratepages = 0;
edc2ca61 6806 pfn = isolate_migratepages_range(cc, pfn, end);
041d3a8c
MN
6807 if (!pfn) {
6808 ret = -EINTR;
6809 break;
6810 }
6811 tries = 0;
6812 } else if (++tries == 5) {
6813 ret = ret < 0 ? ret : -EBUSY;
6814 break;
6815 }
6816
beb51eaa
MK
6817 nr_reclaimed = reclaim_clean_pages_from_list(cc->zone,
6818 &cc->migratepages);
6819 cc->nr_migratepages -= nr_reclaimed;
02c6de8d 6820
9c620e2b 6821 ret = migrate_pages(&cc->migratepages, alloc_migrate_target,
e0b9daeb 6822 NULL, 0, cc->mode, MR_CMA);
041d3a8c 6823 }
2a6f5124
SP
6824 if (ret < 0) {
6825 putback_movable_pages(&cc->migratepages);
6826 return ret;
6827 }
6828 return 0;
041d3a8c
MN
6829}
6830
6831/**
6832 * alloc_contig_range() -- tries to allocate given range of pages
6833 * @start: start PFN to allocate
6834 * @end: one-past-the-last PFN to allocate
0815f3d8
MN
6835 * @migratetype: migratetype of the underlaying pageblocks (either
6836 * #MIGRATE_MOVABLE or #MIGRATE_CMA). All pageblocks
6837 * in range must have the same migratetype and it must
6838 * be either of the two.
041d3a8c
MN
6839 *
6840 * The PFN range does not have to be pageblock or MAX_ORDER_NR_PAGES
6841 * aligned, however it's the caller's responsibility to guarantee that
6842 * we are the only thread that changes migrate type of pageblocks the
6843 * pages fall in.
6844 *
6845 * The PFN range must belong to a single zone.
6846 *
6847 * Returns zero on success or negative error code. On success all
6848 * pages which PFN is in [start, end) are allocated for the caller and
6849 * need to be freed with free_contig_range().
6850 */
0815f3d8
MN
6851int alloc_contig_range(unsigned long start, unsigned long end,
6852 unsigned migratetype)
041d3a8c 6853{
041d3a8c 6854 unsigned long outer_start, outer_end;
d00181b9
KS
6855 unsigned int order;
6856 int ret = 0;
041d3a8c 6857
bb13ffeb
MG
6858 struct compact_control cc = {
6859 .nr_migratepages = 0,
6860 .order = -1,
6861 .zone = page_zone(pfn_to_page(start)),
e0b9daeb 6862 .mode = MIGRATE_SYNC,
bb13ffeb
MG
6863 .ignore_skip_hint = true,
6864 };
6865 INIT_LIST_HEAD(&cc.migratepages);
6866
041d3a8c
MN
6867 /*
6868 * What we do here is we mark all pageblocks in range as
6869 * MIGRATE_ISOLATE. Because pageblock and max order pages may
6870 * have different sizes, and due to the way page allocator
6871 * work, we align the range to biggest of the two pages so
6872 * that page allocator won't try to merge buddies from
6873 * different pageblocks and change MIGRATE_ISOLATE to some
6874 * other migration type.
6875 *
6876 * Once the pageblocks are marked as MIGRATE_ISOLATE, we
6877 * migrate the pages from an unaligned range (ie. pages that
6878 * we are interested in). This will put all the pages in
6879 * range back to page allocator as MIGRATE_ISOLATE.
6880 *
6881 * When this is done, we take the pages in range from page
6882 * allocator removing them from the buddy system. This way
6883 * page allocator will never consider using them.
6884 *
6885 * This lets us mark the pageblocks back as
6886 * MIGRATE_CMA/MIGRATE_MOVABLE so that free pages in the
6887 * aligned range but not in the unaligned, original range are
6888 * put back to page allocator so that buddy can use them.
6889 */
6890
6891 ret = start_isolate_page_range(pfn_max_align_down(start),
b023f468
WC
6892 pfn_max_align_up(end), migratetype,
6893 false);
041d3a8c 6894 if (ret)
86a595f9 6895 return ret;
041d3a8c 6896
8ef5849f
JK
6897 /*
6898 * In case of -EBUSY, we'd like to know which page causes problem.
6899 * So, just fall through. We will check it in test_pages_isolated().
6900 */
bb13ffeb 6901 ret = __alloc_contig_migrate_range(&cc, start, end);
8ef5849f 6902 if (ret && ret != -EBUSY)
041d3a8c
MN
6903 goto done;
6904
6905 /*
6906 * Pages from [start, end) are within a MAX_ORDER_NR_PAGES
6907 * aligned blocks that are marked as MIGRATE_ISOLATE. What's
6908 * more, all pages in [start, end) are free in page allocator.
6909 * What we are going to do is to allocate all pages from
6910 * [start, end) (that is remove them from page allocator).
6911 *
6912 * The only problem is that pages at the beginning and at the
6913 * end of interesting range may be not aligned with pages that
6914 * page allocator holds, ie. they can be part of higher order
6915 * pages. Because of this, we reserve the bigger range and
6916 * once this is done free the pages we are not interested in.
6917 *
6918 * We don't have to hold zone->lock here because the pages are
6919 * isolated thus they won't get removed from buddy.
6920 */
6921
6922 lru_add_drain_all();
510f5507 6923 drain_all_pages(cc.zone);
041d3a8c
MN
6924
6925 order = 0;
6926 outer_start = start;
6927 while (!PageBuddy(pfn_to_page(outer_start))) {
6928 if (++order >= MAX_ORDER) {
8ef5849f
JK
6929 outer_start = start;
6930 break;
041d3a8c
MN
6931 }
6932 outer_start &= ~0UL << order;
6933 }
6934
8ef5849f
JK
6935 if (outer_start != start) {
6936 order = page_order(pfn_to_page(outer_start));
6937
6938 /*
6939 * outer_start page could be small order buddy page and
6940 * it doesn't include start page. Adjust outer_start
6941 * in this case to report failed page properly
6942 * on tracepoint in test_pages_isolated()
6943 */
6944 if (outer_start + (1UL << order) <= start)
6945 outer_start = start;
6946 }
6947
041d3a8c 6948 /* Make sure the range is really isolated. */
b023f468 6949 if (test_pages_isolated(outer_start, end, false)) {
dae803e1
MN
6950 pr_info("%s: [%lx, %lx) PFNs busy\n",
6951 __func__, outer_start, end);
041d3a8c
MN
6952 ret = -EBUSY;
6953 goto done;
6954 }
6955
49f223a9 6956 /* Grab isolated pages from freelists. */
bb13ffeb 6957 outer_end = isolate_freepages_range(&cc, outer_start, end);
041d3a8c
MN
6958 if (!outer_end) {
6959 ret = -EBUSY;
6960 goto done;
6961 }
6962
6963 /* Free head and tail (if any) */
6964 if (start != outer_start)
6965 free_contig_range(outer_start, start - outer_start);
6966 if (end != outer_end)
6967 free_contig_range(end, outer_end - end);
6968
6969done:
6970 undo_isolate_page_range(pfn_max_align_down(start),
0815f3d8 6971 pfn_max_align_up(end), migratetype);
041d3a8c
MN
6972 return ret;
6973}
6974
6975void free_contig_range(unsigned long pfn, unsigned nr_pages)
6976{
bcc2b02f
MS
6977 unsigned int count = 0;
6978
6979 for (; nr_pages--; pfn++) {
6980 struct page *page = pfn_to_page(pfn);
6981
6982 count += page_count(page) != 1;
6983 __free_page(page);
6984 }
6985 WARN(count != 0, "%d pages are still in use!\n", count);
041d3a8c
MN
6986}
6987#endif
6988
4ed7e022 6989#ifdef CONFIG_MEMORY_HOTPLUG
0a647f38
CS
6990/*
6991 * The zone indicated has a new number of managed_pages; batch sizes and percpu
6992 * page high values need to be recalulated.
6993 */
4ed7e022
JL
6994void __meminit zone_pcp_update(struct zone *zone)
6995{
0a647f38 6996 unsigned cpu;
c8e251fa 6997 mutex_lock(&pcp_batch_high_lock);
0a647f38 6998 for_each_possible_cpu(cpu)
169f6c19
CS
6999 pageset_set_high_and_batch(zone,
7000 per_cpu_ptr(zone->pageset, cpu));
c8e251fa 7001 mutex_unlock(&pcp_batch_high_lock);
4ed7e022
JL
7002}
7003#endif
7004
340175b7
JL
7005void zone_pcp_reset(struct zone *zone)
7006{
7007 unsigned long flags;
5a883813
MK
7008 int cpu;
7009 struct per_cpu_pageset *pset;
340175b7
JL
7010
7011 /* avoid races with drain_pages() */
7012 local_irq_save(flags);
7013 if (zone->pageset != &boot_pageset) {
5a883813
MK
7014 for_each_online_cpu(cpu) {
7015 pset = per_cpu_ptr(zone->pageset, cpu);
7016 drain_zonestat(zone, pset);
7017 }
340175b7
JL
7018 free_percpu(zone->pageset);
7019 zone->pageset = &boot_pageset;
7020 }
7021 local_irq_restore(flags);
7022}
7023
6dcd73d7 7024#ifdef CONFIG_MEMORY_HOTREMOVE
0c0e6195
KH
7025/*
7026 * All pages in the range must be isolated before calling this.
7027 */
7028void
7029__offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)
7030{
7031 struct page *page;
7032 struct zone *zone;
7aeb09f9 7033 unsigned int order, i;
0c0e6195
KH
7034 unsigned long pfn;
7035 unsigned long flags;
7036 /* find the first valid pfn */
7037 for (pfn = start_pfn; pfn < end_pfn; pfn++)
7038 if (pfn_valid(pfn))
7039 break;
7040 if (pfn == end_pfn)
7041 return;
7042 zone = page_zone(pfn_to_page(pfn));
7043 spin_lock_irqsave(&zone->lock, flags);
7044 pfn = start_pfn;
7045 while (pfn < end_pfn) {
7046 if (!pfn_valid(pfn)) {
7047 pfn++;
7048 continue;
7049 }
7050 page = pfn_to_page(pfn);
b023f468
WC
7051 /*
7052 * The HWPoisoned page may be not in buddy system, and
7053 * page_count() is not 0.
7054 */
7055 if (unlikely(!PageBuddy(page) && PageHWPoison(page))) {
7056 pfn++;
7057 SetPageReserved(page);
7058 continue;
7059 }
7060
0c0e6195
KH
7061 BUG_ON(page_count(page));
7062 BUG_ON(!PageBuddy(page));
7063 order = page_order(page);
7064#ifdef CONFIG_DEBUG_VM
7065 printk(KERN_INFO "remove from free list %lx %d %lx\n",
7066 pfn, 1 << order, end_pfn);
7067#endif
7068 list_del(&page->lru);
7069 rmv_page_order(page);
7070 zone->free_area[order].nr_free--;
0c0e6195
KH
7071 for (i = 0; i < (1 << order); i++)
7072 SetPageReserved((page+i));
7073 pfn += (1 << order);
7074 }
7075 spin_unlock_irqrestore(&zone->lock, flags);
7076}
7077#endif
8d22ba1b
WF
7078
7079#ifdef CONFIG_MEMORY_FAILURE
7080bool is_free_buddy_page(struct page *page)
7081{
7082 struct zone *zone = page_zone(page);
7083 unsigned long pfn = page_to_pfn(page);
7084 unsigned long flags;
7aeb09f9 7085 unsigned int order;
8d22ba1b
WF
7086
7087 spin_lock_irqsave(&zone->lock, flags);
7088 for (order = 0; order < MAX_ORDER; order++) {
7089 struct page *page_head = page - (pfn & ((1 << order) - 1));
7090
7091 if (PageBuddy(page_head) && page_order(page_head) >= order)
7092 break;
7093 }
7094 spin_unlock_irqrestore(&zone->lock, flags);
7095
7096 return order < MAX_ORDER;
7097}
7098#endif