]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - mm/page_alloc.c
Merge branch 'irq-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[mirror_ubuntu-artful-kernel.git] / mm / page_alloc.c
CommitLineData
1da177e4
LT
1/*
2 * linux/mm/page_alloc.c
3 *
4 * Manages the free list, the system allocates free pages here.
5 * Note that kmalloc() lives in slab.c
6 *
7 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
8 * Swap reorganised 29.12.95, Stephen Tweedie
9 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
10 * Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999
11 * Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999
12 * Zone balancing, Kanoj Sarcar, SGI, Jan 2000
13 * Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002
14 * (lots of bits borrowed from Ingo Molnar & Andrew Morton)
15 */
16
1da177e4
LT
17#include <linux/stddef.h>
18#include <linux/mm.h>
19#include <linux/swap.h>
20#include <linux/interrupt.h>
21#include <linux/pagemap.h>
10ed273f 22#include <linux/jiffies.h>
1da177e4 23#include <linux/bootmem.h>
edbe7d23 24#include <linux/memblock.h>
1da177e4 25#include <linux/compiler.h>
9f158333 26#include <linux/kernel.h>
b1eeab67 27#include <linux/kmemcheck.h>
b8c73fc2 28#include <linux/kasan.h>
1da177e4
LT
29#include <linux/module.h>
30#include <linux/suspend.h>
31#include <linux/pagevec.h>
32#include <linux/blkdev.h>
33#include <linux/slab.h>
a238ab5b 34#include <linux/ratelimit.h>
5a3135c2 35#include <linux/oom.h>
1da177e4
LT
36#include <linux/notifier.h>
37#include <linux/topology.h>
38#include <linux/sysctl.h>
39#include <linux/cpu.h>
40#include <linux/cpuset.h>
bdc8cb98 41#include <linux/memory_hotplug.h>
1da177e4
LT
42#include <linux/nodemask.h>
43#include <linux/vmalloc.h>
a6cccdc3 44#include <linux/vmstat.h>
4be38e35 45#include <linux/mempolicy.h>
4b94ffdc 46#include <linux/memremap.h>
6811378e 47#include <linux/stop_machine.h>
c713216d
MG
48#include <linux/sort.h>
49#include <linux/pfn.h>
3fcfab16 50#include <linux/backing-dev.h>
933e312e 51#include <linux/fault-inject.h>
a5d76b54 52#include <linux/page-isolation.h>
eefa864b 53#include <linux/page_ext.h>
3ac7fe5a 54#include <linux/debugobjects.h>
dbb1f81c 55#include <linux/kmemleak.h>
56de7263 56#include <linux/compaction.h>
0d3d062a 57#include <trace/events/kmem.h>
d379f01d 58#include <trace/events/oom.h>
268bb0ce 59#include <linux/prefetch.h>
6e543d57 60#include <linux/mm_inline.h>
041d3a8c 61#include <linux/migrate.h>
949f7ec5 62#include <linux/hugetlb.h>
8bd75c77 63#include <linux/sched/rt.h>
5b3cc15a 64#include <linux/sched/mm.h>
48c96a36 65#include <linux/page_owner.h>
0e1cc95b 66#include <linux/kthread.h>
4949148a 67#include <linux/memcontrol.h>
42c269c8 68#include <linux/ftrace.h>
1da177e4 69
7ee3d4e8 70#include <asm/sections.h>
1da177e4 71#include <asm/tlbflush.h>
ac924c60 72#include <asm/div64.h>
1da177e4
LT
73#include "internal.h"
74
c8e251fa
CS
75/* prevent >1 _updater_ of zone percpu pageset ->high and ->batch fields */
76static DEFINE_MUTEX(pcp_batch_high_lock);
7cd2b0a3 77#define MIN_PERCPU_PAGELIST_FRACTION (8)
c8e251fa 78
72812019
LS
79#ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID
80DEFINE_PER_CPU(int, numa_node);
81EXPORT_PER_CPU_SYMBOL(numa_node);
82#endif
83
7aac7898
LS
84#ifdef CONFIG_HAVE_MEMORYLESS_NODES
85/*
86 * N.B., Do NOT reference the '_numa_mem_' per cpu variable directly.
87 * It will not be defined when CONFIG_HAVE_MEMORYLESS_NODES is not defined.
88 * Use the accessor functions set_numa_mem(), numa_mem_id() and cpu_to_mem()
89 * defined in <linux/topology.h>.
90 */
91DEFINE_PER_CPU(int, _numa_mem_); /* Kernel "local memory" node */
92EXPORT_PER_CPU_SYMBOL(_numa_mem_);
ad2c8144 93int _node_numa_mem_[MAX_NUMNODES];
7aac7898
LS
94#endif
95
bd233f53
MG
96/* work_structs for global per-cpu drains */
97DEFINE_MUTEX(pcpu_drain_mutex);
98DEFINE_PER_CPU(struct work_struct, pcpu_drain);
99
38addce8 100#ifdef CONFIG_GCC_PLUGIN_LATENT_ENTROPY
58bea414 101volatile unsigned long latent_entropy __latent_entropy;
38addce8
ER
102EXPORT_SYMBOL(latent_entropy);
103#endif
104
1da177e4 105/*
13808910 106 * Array of node states.
1da177e4 107 */
13808910
CL
108nodemask_t node_states[NR_NODE_STATES] __read_mostly = {
109 [N_POSSIBLE] = NODE_MASK_ALL,
110 [N_ONLINE] = { { [0] = 1UL } },
111#ifndef CONFIG_NUMA
112 [N_NORMAL_MEMORY] = { { [0] = 1UL } },
113#ifdef CONFIG_HIGHMEM
114 [N_HIGH_MEMORY] = { { [0] = 1UL } },
20b2f52b 115#endif
20b2f52b 116 [N_MEMORY] = { { [0] = 1UL } },
13808910
CL
117 [N_CPU] = { { [0] = 1UL } },
118#endif /* NUMA */
119};
120EXPORT_SYMBOL(node_states);
121
c3d5f5f0
JL
122/* Protect totalram_pages and zone->managed_pages */
123static DEFINE_SPINLOCK(managed_page_count_lock);
124
6c231b7b 125unsigned long totalram_pages __read_mostly;
cb45b0e9 126unsigned long totalreserve_pages __read_mostly;
e48322ab 127unsigned long totalcma_pages __read_mostly;
ab8fabd4 128
1b76b02f 129int percpu_pagelist_fraction;
dcce284a 130gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK;
1da177e4 131
bb14c2c7
VB
132/*
133 * A cached value of the page's pageblock's migratetype, used when the page is
134 * put on a pcplist. Used to avoid the pageblock migratetype lookup when
135 * freeing from pcplists in most cases, at the cost of possibly becoming stale.
136 * Also the migratetype set in the page does not necessarily match the pcplist
137 * index, e.g. page might have MIGRATE_CMA set but be on a pcplist with any
138 * other index - this ensures that it will be put on the correct CMA freelist.
139 */
140static inline int get_pcppage_migratetype(struct page *page)
141{
142 return page->index;
143}
144
145static inline void set_pcppage_migratetype(struct page *page, int migratetype)
146{
147 page->index = migratetype;
148}
149
452aa699
RW
150#ifdef CONFIG_PM_SLEEP
151/*
152 * The following functions are used by the suspend/hibernate code to temporarily
153 * change gfp_allowed_mask in order to avoid using I/O during memory allocations
154 * while devices are suspended. To avoid races with the suspend/hibernate code,
155 * they should always be called with pm_mutex held (gfp_allowed_mask also should
156 * only be modified with pm_mutex held, unless the suspend/hibernate code is
157 * guaranteed not to run in parallel with that modification).
158 */
c9e664f1
RW
159
160static gfp_t saved_gfp_mask;
161
162void pm_restore_gfp_mask(void)
452aa699
RW
163{
164 WARN_ON(!mutex_is_locked(&pm_mutex));
c9e664f1
RW
165 if (saved_gfp_mask) {
166 gfp_allowed_mask = saved_gfp_mask;
167 saved_gfp_mask = 0;
168 }
452aa699
RW
169}
170
c9e664f1 171void pm_restrict_gfp_mask(void)
452aa699 172{
452aa699 173 WARN_ON(!mutex_is_locked(&pm_mutex));
c9e664f1
RW
174 WARN_ON(saved_gfp_mask);
175 saved_gfp_mask = gfp_allowed_mask;
d0164adc 176 gfp_allowed_mask &= ~(__GFP_IO | __GFP_FS);
452aa699 177}
f90ac398
MG
178
179bool pm_suspended_storage(void)
180{
d0164adc 181 if ((gfp_allowed_mask & (__GFP_IO | __GFP_FS)) == (__GFP_IO | __GFP_FS))
f90ac398
MG
182 return false;
183 return true;
184}
452aa699
RW
185#endif /* CONFIG_PM_SLEEP */
186
d9c23400 187#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
d00181b9 188unsigned int pageblock_order __read_mostly;
d9c23400
MG
189#endif
190
d98c7a09 191static void __free_pages_ok(struct page *page, unsigned int order);
a226f6c8 192
1da177e4
LT
193/*
194 * results with 256, 32 in the lowmem_reserve sysctl:
195 * 1G machine -> (16M dma, 800M-16M normal, 1G-800M high)
196 * 1G machine -> (16M dma, 784M normal, 224M high)
197 * NORMAL allocation will leave 784M/256 of ram reserved in the ZONE_DMA
198 * HIGHMEM allocation will leave 224M/32 of ram reserved in ZONE_NORMAL
84109e15 199 * HIGHMEM allocation will leave (224M+784M)/256 of ram reserved in ZONE_DMA
a2f1b424
AK
200 *
201 * TBD: should special case ZONE_DMA32 machines here - in those we normally
202 * don't need any ZONE_NORMAL reservation
1da177e4 203 */
2f1b6248 204int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1] = {
4b51d669 205#ifdef CONFIG_ZONE_DMA
2f1b6248 206 256,
4b51d669 207#endif
fb0e7942 208#ifdef CONFIG_ZONE_DMA32
2f1b6248 209 256,
fb0e7942 210#endif
e53ef38d 211#ifdef CONFIG_HIGHMEM
2a1e274a 212 32,
e53ef38d 213#endif
2a1e274a 214 32,
2f1b6248 215};
1da177e4
LT
216
217EXPORT_SYMBOL(totalram_pages);
1da177e4 218
15ad7cdc 219static char * const zone_names[MAX_NR_ZONES] = {
4b51d669 220#ifdef CONFIG_ZONE_DMA
2f1b6248 221 "DMA",
4b51d669 222#endif
fb0e7942 223#ifdef CONFIG_ZONE_DMA32
2f1b6248 224 "DMA32",
fb0e7942 225#endif
2f1b6248 226 "Normal",
e53ef38d 227#ifdef CONFIG_HIGHMEM
2a1e274a 228 "HighMem",
e53ef38d 229#endif
2a1e274a 230 "Movable",
033fbae9
DW
231#ifdef CONFIG_ZONE_DEVICE
232 "Device",
233#endif
2f1b6248
CL
234};
235
60f30350
VB
236char * const migratetype_names[MIGRATE_TYPES] = {
237 "Unmovable",
238 "Movable",
239 "Reclaimable",
240 "HighAtomic",
241#ifdef CONFIG_CMA
242 "CMA",
243#endif
244#ifdef CONFIG_MEMORY_ISOLATION
245 "Isolate",
246#endif
247};
248
f1e61557
KS
249compound_page_dtor * const compound_page_dtors[] = {
250 NULL,
251 free_compound_page,
252#ifdef CONFIG_HUGETLB_PAGE
253 free_huge_page,
254#endif
9a982250
KS
255#ifdef CONFIG_TRANSPARENT_HUGEPAGE
256 free_transhuge_page,
257#endif
f1e61557
KS
258};
259
1da177e4 260int min_free_kbytes = 1024;
42aa83cb 261int user_min_free_kbytes = -1;
795ae7a0 262int watermark_scale_factor = 10;
1da177e4 263
2c85f51d
JB
264static unsigned long __meminitdata nr_kernel_pages;
265static unsigned long __meminitdata nr_all_pages;
a3142c8e 266static unsigned long __meminitdata dma_reserve;
1da177e4 267
0ee332c1
TH
268#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
269static unsigned long __meminitdata arch_zone_lowest_possible_pfn[MAX_NR_ZONES];
270static unsigned long __meminitdata arch_zone_highest_possible_pfn[MAX_NR_ZONES];
271static unsigned long __initdata required_kernelcore;
272static unsigned long __initdata required_movablecore;
273static unsigned long __meminitdata zone_movable_pfn[MAX_NUMNODES];
342332e6 274static bool mirrored_kernelcore;
0ee332c1
TH
275
276/* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */
277int movable_zone;
278EXPORT_SYMBOL(movable_zone);
279#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
c713216d 280
418508c1
MS
281#if MAX_NUMNODES > 1
282int nr_node_ids __read_mostly = MAX_NUMNODES;
62bc62a8 283int nr_online_nodes __read_mostly = 1;
418508c1 284EXPORT_SYMBOL(nr_node_ids);
62bc62a8 285EXPORT_SYMBOL(nr_online_nodes);
418508c1
MS
286#endif
287
9ef9acb0
MG
288int page_group_by_mobility_disabled __read_mostly;
289
3a80a7fa
MG
290#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
291static inline void reset_deferred_meminit(pg_data_t *pgdat)
292{
864b9a39
MH
293 unsigned long max_initialise;
294 unsigned long reserved_lowmem;
295
296 /*
297 * Initialise at least 2G of a node but also take into account that
298 * two large system hashes that can take up 1GB for 0.25TB/node.
299 */
300 max_initialise = max(2UL << (30 - PAGE_SHIFT),
301 (pgdat->node_spanned_pages >> 8));
302
303 /*
304 * Compensate the all the memblock reservations (e.g. crash kernel)
305 * from the initial estimation to make sure we will initialize enough
306 * memory to boot.
307 */
308 reserved_lowmem = memblock_reserved_memory_within(pgdat->node_start_pfn,
309 pgdat->node_start_pfn + max_initialise);
310 max_initialise += reserved_lowmem;
311
312 pgdat->static_init_size = min(max_initialise, pgdat->node_spanned_pages);
3a80a7fa
MG
313 pgdat->first_deferred_pfn = ULONG_MAX;
314}
315
316/* Returns true if the struct page for the pfn is uninitialised */
0e1cc95b 317static inline bool __meminit early_page_uninitialised(unsigned long pfn)
3a80a7fa 318{
ef70b6f4
MG
319 int nid = early_pfn_to_nid(pfn);
320
321 if (node_online(nid) && pfn >= NODE_DATA(nid)->first_deferred_pfn)
3a80a7fa
MG
322 return true;
323
324 return false;
325}
326
327/*
328 * Returns false when the remaining initialisation should be deferred until
329 * later in the boot cycle when it can be parallelised.
330 */
331static inline bool update_defer_init(pg_data_t *pgdat,
332 unsigned long pfn, unsigned long zone_end,
333 unsigned long *nr_initialised)
334{
335 /* Always populate low zones for address-contrained allocations */
336 if (zone_end < pgdat_end_pfn(pgdat))
337 return true;
3a80a7fa 338 (*nr_initialised)++;
864b9a39 339 if ((*nr_initialised > pgdat->static_init_size) &&
3a80a7fa
MG
340 (pfn & (PAGES_PER_SECTION - 1)) == 0) {
341 pgdat->first_deferred_pfn = pfn;
342 return false;
343 }
344
345 return true;
346}
347#else
348static inline void reset_deferred_meminit(pg_data_t *pgdat)
349{
350}
351
352static inline bool early_page_uninitialised(unsigned long pfn)
353{
354 return false;
355}
356
357static inline bool update_defer_init(pg_data_t *pgdat,
358 unsigned long pfn, unsigned long zone_end,
359 unsigned long *nr_initialised)
360{
361 return true;
362}
363#endif
364
0b423ca2
MG
365/* Return a pointer to the bitmap storing bits affecting a block of pages */
366static inline unsigned long *get_pageblock_bitmap(struct page *page,
367 unsigned long pfn)
368{
369#ifdef CONFIG_SPARSEMEM
370 return __pfn_to_section(pfn)->pageblock_flags;
371#else
372 return page_zone(page)->pageblock_flags;
373#endif /* CONFIG_SPARSEMEM */
374}
375
376static inline int pfn_to_bitidx(struct page *page, unsigned long pfn)
377{
378#ifdef CONFIG_SPARSEMEM
379 pfn &= (PAGES_PER_SECTION-1);
380 return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
381#else
382 pfn = pfn - round_down(page_zone(page)->zone_start_pfn, pageblock_nr_pages);
383 return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
384#endif /* CONFIG_SPARSEMEM */
385}
386
387/**
388 * get_pfnblock_flags_mask - Return the requested group of flags for the pageblock_nr_pages block of pages
389 * @page: The page within the block of interest
390 * @pfn: The target page frame number
391 * @end_bitidx: The last bit of interest to retrieve
392 * @mask: mask of bits that the caller is interested in
393 *
394 * Return: pageblock_bits flags
395 */
396static __always_inline unsigned long __get_pfnblock_flags_mask(struct page *page,
397 unsigned long pfn,
398 unsigned long end_bitidx,
399 unsigned long mask)
400{
401 unsigned long *bitmap;
402 unsigned long bitidx, word_bitidx;
403 unsigned long word;
404
405 bitmap = get_pageblock_bitmap(page, pfn);
406 bitidx = pfn_to_bitidx(page, pfn);
407 word_bitidx = bitidx / BITS_PER_LONG;
408 bitidx &= (BITS_PER_LONG-1);
409
410 word = bitmap[word_bitidx];
411 bitidx += end_bitidx;
412 return (word >> (BITS_PER_LONG - bitidx - 1)) & mask;
413}
414
415unsigned long get_pfnblock_flags_mask(struct page *page, unsigned long pfn,
416 unsigned long end_bitidx,
417 unsigned long mask)
418{
419 return __get_pfnblock_flags_mask(page, pfn, end_bitidx, mask);
420}
421
422static __always_inline int get_pfnblock_migratetype(struct page *page, unsigned long pfn)
423{
424 return __get_pfnblock_flags_mask(page, pfn, PB_migrate_end, MIGRATETYPE_MASK);
425}
426
427/**
428 * set_pfnblock_flags_mask - Set the requested group of flags for a pageblock_nr_pages block of pages
429 * @page: The page within the block of interest
430 * @flags: The flags to set
431 * @pfn: The target page frame number
432 * @end_bitidx: The last bit of interest
433 * @mask: mask of bits that the caller is interested in
434 */
435void set_pfnblock_flags_mask(struct page *page, unsigned long flags,
436 unsigned long pfn,
437 unsigned long end_bitidx,
438 unsigned long mask)
439{
440 unsigned long *bitmap;
441 unsigned long bitidx, word_bitidx;
442 unsigned long old_word, word;
443
444 BUILD_BUG_ON(NR_PAGEBLOCK_BITS != 4);
445
446 bitmap = get_pageblock_bitmap(page, pfn);
447 bitidx = pfn_to_bitidx(page, pfn);
448 word_bitidx = bitidx / BITS_PER_LONG;
449 bitidx &= (BITS_PER_LONG-1);
450
451 VM_BUG_ON_PAGE(!zone_spans_pfn(page_zone(page), pfn), page);
452
453 bitidx += end_bitidx;
454 mask <<= (BITS_PER_LONG - bitidx - 1);
455 flags <<= (BITS_PER_LONG - bitidx - 1);
456
457 word = READ_ONCE(bitmap[word_bitidx]);
458 for (;;) {
459 old_word = cmpxchg(&bitmap[word_bitidx], word, (word & ~mask) | flags);
460 if (word == old_word)
461 break;
462 word = old_word;
463 }
464}
3a80a7fa 465
ee6f509c 466void set_pageblock_migratetype(struct page *page, int migratetype)
b2a0ac88 467{
5d0f3f72
KM
468 if (unlikely(page_group_by_mobility_disabled &&
469 migratetype < MIGRATE_PCPTYPES))
49255c61
MG
470 migratetype = MIGRATE_UNMOVABLE;
471
b2a0ac88
MG
472 set_pageblock_flags_group(page, (unsigned long)migratetype,
473 PB_migrate, PB_migrate_end);
474}
475
13e7444b 476#ifdef CONFIG_DEBUG_VM
c6a57e19 477static int page_outside_zone_boundaries(struct zone *zone, struct page *page)
1da177e4 478{
bdc8cb98
DH
479 int ret = 0;
480 unsigned seq;
481 unsigned long pfn = page_to_pfn(page);
b5e6a5a2 482 unsigned long sp, start_pfn;
c6a57e19 483
bdc8cb98
DH
484 do {
485 seq = zone_span_seqbegin(zone);
b5e6a5a2
CS
486 start_pfn = zone->zone_start_pfn;
487 sp = zone->spanned_pages;
108bcc96 488 if (!zone_spans_pfn(zone, pfn))
bdc8cb98
DH
489 ret = 1;
490 } while (zone_span_seqretry(zone, seq));
491
b5e6a5a2 492 if (ret)
613813e8
DH
493 pr_err("page 0x%lx outside node %d zone %s [ 0x%lx - 0x%lx ]\n",
494 pfn, zone_to_nid(zone), zone->name,
495 start_pfn, start_pfn + sp);
b5e6a5a2 496
bdc8cb98 497 return ret;
c6a57e19
DH
498}
499
500static int page_is_consistent(struct zone *zone, struct page *page)
501{
14e07298 502 if (!pfn_valid_within(page_to_pfn(page)))
c6a57e19 503 return 0;
1da177e4 504 if (zone != page_zone(page))
c6a57e19
DH
505 return 0;
506
507 return 1;
508}
509/*
510 * Temporary debugging check for pages not lying within a given zone.
511 */
d73d3c9f 512static int __maybe_unused bad_range(struct zone *zone, struct page *page)
c6a57e19
DH
513{
514 if (page_outside_zone_boundaries(zone, page))
1da177e4 515 return 1;
c6a57e19
DH
516 if (!page_is_consistent(zone, page))
517 return 1;
518
1da177e4
LT
519 return 0;
520}
13e7444b 521#else
d73d3c9f 522static inline int __maybe_unused bad_range(struct zone *zone, struct page *page)
13e7444b
NP
523{
524 return 0;
525}
526#endif
527
d230dec1
KS
528static void bad_page(struct page *page, const char *reason,
529 unsigned long bad_flags)
1da177e4 530{
d936cf9b
HD
531 static unsigned long resume;
532 static unsigned long nr_shown;
533 static unsigned long nr_unshown;
534
535 /*
536 * Allow a burst of 60 reports, then keep quiet for that minute;
537 * or allow a steady drip of one report per second.
538 */
539 if (nr_shown == 60) {
540 if (time_before(jiffies, resume)) {
541 nr_unshown++;
542 goto out;
543 }
544 if (nr_unshown) {
ff8e8116 545 pr_alert(
1e9e6365 546 "BUG: Bad page state: %lu messages suppressed\n",
d936cf9b
HD
547 nr_unshown);
548 nr_unshown = 0;
549 }
550 nr_shown = 0;
551 }
552 if (nr_shown++ == 0)
553 resume = jiffies + 60 * HZ;
554
ff8e8116 555 pr_alert("BUG: Bad page state in process %s pfn:%05lx\n",
3dc14741 556 current->comm, page_to_pfn(page));
ff8e8116
VB
557 __dump_page(page, reason);
558 bad_flags &= page->flags;
559 if (bad_flags)
560 pr_alert("bad because of flags: %#lx(%pGp)\n",
561 bad_flags, &bad_flags);
4e462112 562 dump_page_owner(page);
3dc14741 563
4f31888c 564 print_modules();
1da177e4 565 dump_stack();
d936cf9b 566out:
8cc3b392 567 /* Leave bad fields for debug, except PageBuddy could make trouble */
22b751c3 568 page_mapcount_reset(page); /* remove PageBuddy */
373d4d09 569 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
1da177e4
LT
570}
571
1da177e4
LT
572/*
573 * Higher-order pages are called "compound pages". They are structured thusly:
574 *
1d798ca3 575 * The first PAGE_SIZE page is called the "head page" and have PG_head set.
1da177e4 576 *
1d798ca3
KS
577 * The remaining PAGE_SIZE pages are called "tail pages". PageTail() is encoded
578 * in bit 0 of page->compound_head. The rest of bits is pointer to head page.
1da177e4 579 *
1d798ca3
KS
580 * The first tail page's ->compound_dtor holds the offset in array of compound
581 * page destructors. See compound_page_dtors.
1da177e4 582 *
1d798ca3 583 * The first tail page's ->compound_order holds the order of allocation.
41d78ba5 584 * This usage means that zero-order pages may not be compound.
1da177e4 585 */
d98c7a09 586
9a982250 587void free_compound_page(struct page *page)
d98c7a09 588{
d85f3385 589 __free_pages_ok(page, compound_order(page));
d98c7a09
HD
590}
591
d00181b9 592void prep_compound_page(struct page *page, unsigned int order)
18229df5
AW
593{
594 int i;
595 int nr_pages = 1 << order;
596
f1e61557 597 set_compound_page_dtor(page, COMPOUND_PAGE_DTOR);
18229df5
AW
598 set_compound_order(page, order);
599 __SetPageHead(page);
600 for (i = 1; i < nr_pages; i++) {
601 struct page *p = page + i;
58a84aa9 602 set_page_count(p, 0);
1c290f64 603 p->mapping = TAIL_MAPPING;
1d798ca3 604 set_compound_head(p, page);
18229df5 605 }
53f9263b 606 atomic_set(compound_mapcount_ptr(page), -1);
18229df5
AW
607}
608
c0a32fc5
SG
609#ifdef CONFIG_DEBUG_PAGEALLOC
610unsigned int _debug_guardpage_minorder;
ea6eabb0
CB
611bool _debug_pagealloc_enabled __read_mostly
612 = IS_ENABLED(CONFIG_DEBUG_PAGEALLOC_ENABLE_DEFAULT);
505f6d22 613EXPORT_SYMBOL(_debug_pagealloc_enabled);
e30825f1
JK
614bool _debug_guardpage_enabled __read_mostly;
615
031bc574
JK
616static int __init early_debug_pagealloc(char *buf)
617{
618 if (!buf)
619 return -EINVAL;
2a138dc7 620 return kstrtobool(buf, &_debug_pagealloc_enabled);
031bc574
JK
621}
622early_param("debug_pagealloc", early_debug_pagealloc);
623
e30825f1
JK
624static bool need_debug_guardpage(void)
625{
031bc574
JK
626 /* If we don't use debug_pagealloc, we don't need guard page */
627 if (!debug_pagealloc_enabled())
628 return false;
629
f1c1e9f7
JK
630 if (!debug_guardpage_minorder())
631 return false;
632
e30825f1
JK
633 return true;
634}
635
636static void init_debug_guardpage(void)
637{
031bc574
JK
638 if (!debug_pagealloc_enabled())
639 return;
640
f1c1e9f7
JK
641 if (!debug_guardpage_minorder())
642 return;
643
e30825f1
JK
644 _debug_guardpage_enabled = true;
645}
646
647struct page_ext_operations debug_guardpage_ops = {
648 .need = need_debug_guardpage,
649 .init = init_debug_guardpage,
650};
c0a32fc5
SG
651
652static int __init debug_guardpage_minorder_setup(char *buf)
653{
654 unsigned long res;
655
656 if (kstrtoul(buf, 10, &res) < 0 || res > MAX_ORDER / 2) {
1170532b 657 pr_err("Bad debug_guardpage_minorder value\n");
c0a32fc5
SG
658 return 0;
659 }
660 _debug_guardpage_minorder = res;
1170532b 661 pr_info("Setting debug_guardpage_minorder to %lu\n", res);
c0a32fc5
SG
662 return 0;
663}
f1c1e9f7 664early_param("debug_guardpage_minorder", debug_guardpage_minorder_setup);
c0a32fc5 665
acbc15a4 666static inline bool set_page_guard(struct zone *zone, struct page *page,
2847cf95 667 unsigned int order, int migratetype)
c0a32fc5 668{
e30825f1
JK
669 struct page_ext *page_ext;
670
671 if (!debug_guardpage_enabled())
acbc15a4
JK
672 return false;
673
674 if (order >= debug_guardpage_minorder())
675 return false;
e30825f1
JK
676
677 page_ext = lookup_page_ext(page);
f86e4271 678 if (unlikely(!page_ext))
acbc15a4 679 return false;
f86e4271 680
e30825f1
JK
681 __set_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags);
682
2847cf95
JK
683 INIT_LIST_HEAD(&page->lru);
684 set_page_private(page, order);
685 /* Guard pages are not available for any usage */
686 __mod_zone_freepage_state(zone, -(1 << order), migratetype);
acbc15a4
JK
687
688 return true;
c0a32fc5
SG
689}
690
2847cf95
JK
691static inline void clear_page_guard(struct zone *zone, struct page *page,
692 unsigned int order, int migratetype)
c0a32fc5 693{
e30825f1
JK
694 struct page_ext *page_ext;
695
696 if (!debug_guardpage_enabled())
697 return;
698
699 page_ext = lookup_page_ext(page);
f86e4271
YS
700 if (unlikely(!page_ext))
701 return;
702
e30825f1
JK
703 __clear_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags);
704
2847cf95
JK
705 set_page_private(page, 0);
706 if (!is_migrate_isolate(migratetype))
707 __mod_zone_freepage_state(zone, (1 << order), migratetype);
c0a32fc5
SG
708}
709#else
980ac167 710struct page_ext_operations debug_guardpage_ops;
acbc15a4
JK
711static inline bool set_page_guard(struct zone *zone, struct page *page,
712 unsigned int order, int migratetype) { return false; }
2847cf95
JK
713static inline void clear_page_guard(struct zone *zone, struct page *page,
714 unsigned int order, int migratetype) {}
c0a32fc5
SG
715#endif
716
7aeb09f9 717static inline void set_page_order(struct page *page, unsigned int order)
6aa3001b 718{
4c21e2f2 719 set_page_private(page, order);
676165a8 720 __SetPageBuddy(page);
1da177e4
LT
721}
722
723static inline void rmv_page_order(struct page *page)
724{
676165a8 725 __ClearPageBuddy(page);
4c21e2f2 726 set_page_private(page, 0);
1da177e4
LT
727}
728
1da177e4
LT
729/*
730 * This function checks whether a page is free && is the buddy
731 * we can do coalesce a page and its buddy if
13ad59df 732 * (a) the buddy is not in a hole (check before calling!) &&
676165a8 733 * (b) the buddy is in the buddy system &&
cb2b95e1
AW
734 * (c) a page and its buddy have the same order &&
735 * (d) a page and its buddy are in the same zone.
676165a8 736 *
cf6fe945
WSH
737 * For recording whether a page is in the buddy system, we set ->_mapcount
738 * PAGE_BUDDY_MAPCOUNT_VALUE.
739 * Setting, clearing, and testing _mapcount PAGE_BUDDY_MAPCOUNT_VALUE is
740 * serialized by zone->lock.
1da177e4 741 *
676165a8 742 * For recording page's order, we use page_private(page).
1da177e4 743 */
cb2b95e1 744static inline int page_is_buddy(struct page *page, struct page *buddy,
7aeb09f9 745 unsigned int order)
1da177e4 746{
c0a32fc5 747 if (page_is_guard(buddy) && page_order(buddy) == order) {
d34c5fa0
MG
748 if (page_zone_id(page) != page_zone_id(buddy))
749 return 0;
750
4c5018ce
WY
751 VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy);
752
c0a32fc5
SG
753 return 1;
754 }
755
cb2b95e1 756 if (PageBuddy(buddy) && page_order(buddy) == order) {
d34c5fa0
MG
757 /*
758 * zone check is done late to avoid uselessly
759 * calculating zone/node ids for pages that could
760 * never merge.
761 */
762 if (page_zone_id(page) != page_zone_id(buddy))
763 return 0;
764
4c5018ce
WY
765 VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy);
766
6aa3001b 767 return 1;
676165a8 768 }
6aa3001b 769 return 0;
1da177e4
LT
770}
771
772/*
773 * Freeing function for a buddy system allocator.
774 *
775 * The concept of a buddy system is to maintain direct-mapped table
776 * (containing bit values) for memory blocks of various "orders".
777 * The bottom level table contains the map for the smallest allocatable
778 * units of memory (here, pages), and each level above it describes
779 * pairs of units from the levels below, hence, "buddies".
780 * At a high level, all that happens here is marking the table entry
781 * at the bottom level available, and propagating the changes upward
782 * as necessary, plus some accounting needed to play nicely with other
783 * parts of the VM system.
784 * At each level, we keep a list of pages, which are heads of continuous
cf6fe945
WSH
785 * free pages of length of (1 << order) and marked with _mapcount
786 * PAGE_BUDDY_MAPCOUNT_VALUE. Page's order is recorded in page_private(page)
787 * field.
1da177e4 788 * So when we are allocating or freeing one, we can derive the state of the
5f63b720
MN
789 * other. That is, if we allocate a small block, and both were
790 * free, the remainder of the region must be split into blocks.
1da177e4 791 * If a block is freed, and its buddy is also free, then this
5f63b720 792 * triggers coalescing into a block of larger size.
1da177e4 793 *
6d49e352 794 * -- nyc
1da177e4
LT
795 */
796
48db57f8 797static inline void __free_one_page(struct page *page,
dc4b0caf 798 unsigned long pfn,
ed0ae21d
MG
799 struct zone *zone, unsigned int order,
800 int migratetype)
1da177e4 801{
76741e77
VB
802 unsigned long combined_pfn;
803 unsigned long uninitialized_var(buddy_pfn);
6dda9d55 804 struct page *buddy;
d9dddbf5
VB
805 unsigned int max_order;
806
807 max_order = min_t(unsigned int, MAX_ORDER, pageblock_order + 1);
1da177e4 808
d29bb978 809 VM_BUG_ON(!zone_is_initialized(zone));
6e9f0d58 810 VM_BUG_ON_PAGE(page->flags & PAGE_FLAGS_CHECK_AT_PREP, page);
1da177e4 811
ed0ae21d 812 VM_BUG_ON(migratetype == -1);
d9dddbf5 813 if (likely(!is_migrate_isolate(migratetype)))
8f82b55d 814 __mod_zone_freepage_state(zone, 1 << order, migratetype);
ed0ae21d 815
76741e77 816 VM_BUG_ON_PAGE(pfn & ((1 << order) - 1), page);
309381fe 817 VM_BUG_ON_PAGE(bad_range(zone, page), page);
1da177e4 818
d9dddbf5 819continue_merging:
3c605096 820 while (order < max_order - 1) {
76741e77
VB
821 buddy_pfn = __find_buddy_pfn(pfn, order);
822 buddy = page + (buddy_pfn - pfn);
13ad59df
VB
823
824 if (!pfn_valid_within(buddy_pfn))
825 goto done_merging;
cb2b95e1 826 if (!page_is_buddy(page, buddy, order))
d9dddbf5 827 goto done_merging;
c0a32fc5
SG
828 /*
829 * Our buddy is free or it is CONFIG_DEBUG_PAGEALLOC guard page,
830 * merge with it and move up one order.
831 */
832 if (page_is_guard(buddy)) {
2847cf95 833 clear_page_guard(zone, buddy, order, migratetype);
c0a32fc5
SG
834 } else {
835 list_del(&buddy->lru);
836 zone->free_area[order].nr_free--;
837 rmv_page_order(buddy);
838 }
76741e77
VB
839 combined_pfn = buddy_pfn & pfn;
840 page = page + (combined_pfn - pfn);
841 pfn = combined_pfn;
1da177e4
LT
842 order++;
843 }
d9dddbf5
VB
844 if (max_order < MAX_ORDER) {
845 /* If we are here, it means order is >= pageblock_order.
846 * We want to prevent merge between freepages on isolate
847 * pageblock and normal pageblock. Without this, pageblock
848 * isolation could cause incorrect freepage or CMA accounting.
849 *
850 * We don't want to hit this code for the more frequent
851 * low-order merging.
852 */
853 if (unlikely(has_isolate_pageblock(zone))) {
854 int buddy_mt;
855
76741e77
VB
856 buddy_pfn = __find_buddy_pfn(pfn, order);
857 buddy = page + (buddy_pfn - pfn);
d9dddbf5
VB
858 buddy_mt = get_pageblock_migratetype(buddy);
859
860 if (migratetype != buddy_mt
861 && (is_migrate_isolate(migratetype) ||
862 is_migrate_isolate(buddy_mt)))
863 goto done_merging;
864 }
865 max_order++;
866 goto continue_merging;
867 }
868
869done_merging:
1da177e4 870 set_page_order(page, order);
6dda9d55
CZ
871
872 /*
873 * If this is not the largest possible page, check if the buddy
874 * of the next-highest order is free. If it is, it's possible
875 * that pages are being freed that will coalesce soon. In case,
876 * that is happening, add the free page to the tail of the list
877 * so it's less likely to be used soon and more likely to be merged
878 * as a higher order page
879 */
13ad59df 880 if ((order < MAX_ORDER-2) && pfn_valid_within(buddy_pfn)) {
6dda9d55 881 struct page *higher_page, *higher_buddy;
76741e77
VB
882 combined_pfn = buddy_pfn & pfn;
883 higher_page = page + (combined_pfn - pfn);
884 buddy_pfn = __find_buddy_pfn(combined_pfn, order + 1);
885 higher_buddy = higher_page + (buddy_pfn - combined_pfn);
b4fb8f66
TL
886 if (pfn_valid_within(buddy_pfn) &&
887 page_is_buddy(higher_page, higher_buddy, order + 1)) {
6dda9d55
CZ
888 list_add_tail(&page->lru,
889 &zone->free_area[order].free_list[migratetype]);
890 goto out;
891 }
892 }
893
894 list_add(&page->lru, &zone->free_area[order].free_list[migratetype]);
895out:
1da177e4
LT
896 zone->free_area[order].nr_free++;
897}
898
7bfec6f4
MG
899/*
900 * A bad page could be due to a number of fields. Instead of multiple branches,
901 * try and check multiple fields with one check. The caller must do a detailed
902 * check if necessary.
903 */
904static inline bool page_expected_state(struct page *page,
905 unsigned long check_flags)
906{
907 if (unlikely(atomic_read(&page->_mapcount) != -1))
908 return false;
909
910 if (unlikely((unsigned long)page->mapping |
911 page_ref_count(page) |
912#ifdef CONFIG_MEMCG
913 (unsigned long)page->mem_cgroup |
914#endif
915 (page->flags & check_flags)))
916 return false;
917
918 return true;
919}
920
bb552ac6 921static void free_pages_check_bad(struct page *page)
1da177e4 922{
7bfec6f4
MG
923 const char *bad_reason;
924 unsigned long bad_flags;
925
7bfec6f4
MG
926 bad_reason = NULL;
927 bad_flags = 0;
f0b791a3 928
53f9263b 929 if (unlikely(atomic_read(&page->_mapcount) != -1))
f0b791a3
DH
930 bad_reason = "nonzero mapcount";
931 if (unlikely(page->mapping != NULL))
932 bad_reason = "non-NULL mapping";
fe896d18 933 if (unlikely(page_ref_count(page) != 0))
0139aa7b 934 bad_reason = "nonzero _refcount";
f0b791a3
DH
935 if (unlikely(page->flags & PAGE_FLAGS_CHECK_AT_FREE)) {
936 bad_reason = "PAGE_FLAGS_CHECK_AT_FREE flag(s) set";
937 bad_flags = PAGE_FLAGS_CHECK_AT_FREE;
938 }
9edad6ea
JW
939#ifdef CONFIG_MEMCG
940 if (unlikely(page->mem_cgroup))
941 bad_reason = "page still charged to cgroup";
942#endif
7bfec6f4 943 bad_page(page, bad_reason, bad_flags);
bb552ac6
MG
944}
945
946static inline int free_pages_check(struct page *page)
947{
da838d4f 948 if (likely(page_expected_state(page, PAGE_FLAGS_CHECK_AT_FREE)))
bb552ac6 949 return 0;
bb552ac6
MG
950
951 /* Something has gone sideways, find it */
952 free_pages_check_bad(page);
7bfec6f4 953 return 1;
1da177e4
LT
954}
955
4db7548c
MG
956static int free_tail_pages_check(struct page *head_page, struct page *page)
957{
958 int ret = 1;
959
960 /*
961 * We rely page->lru.next never has bit 0 set, unless the page
962 * is PageTail(). Let's make sure that's true even for poisoned ->lru.
963 */
964 BUILD_BUG_ON((unsigned long)LIST_POISON1 & 1);
965
966 if (!IS_ENABLED(CONFIG_DEBUG_VM)) {
967 ret = 0;
968 goto out;
969 }
970 switch (page - head_page) {
971 case 1:
972 /* the first tail page: ->mapping is compound_mapcount() */
973 if (unlikely(compound_mapcount(page))) {
974 bad_page(page, "nonzero compound_mapcount", 0);
975 goto out;
976 }
977 break;
978 case 2:
979 /*
980 * the second tail page: ->mapping is
981 * page_deferred_list().next -- ignore value.
982 */
983 break;
984 default:
985 if (page->mapping != TAIL_MAPPING) {
986 bad_page(page, "corrupted mapping in tail page", 0);
987 goto out;
988 }
989 break;
990 }
991 if (unlikely(!PageTail(page))) {
992 bad_page(page, "PageTail not set", 0);
993 goto out;
994 }
995 if (unlikely(compound_head(page) != head_page)) {
996 bad_page(page, "compound_head not consistent", 0);
997 goto out;
998 }
999 ret = 0;
1000out:
1001 page->mapping = NULL;
1002 clear_compound_head(page);
1003 return ret;
1004}
1005
e2769dbd
MG
1006static __always_inline bool free_pages_prepare(struct page *page,
1007 unsigned int order, bool check_free)
4db7548c 1008{
e2769dbd 1009 int bad = 0;
4db7548c 1010
4db7548c
MG
1011 VM_BUG_ON_PAGE(PageTail(page), page);
1012
e2769dbd
MG
1013 trace_mm_page_free(page, order);
1014 kmemcheck_free_shadow(page, order);
e2769dbd
MG
1015
1016 /*
1017 * Check tail pages before head page information is cleared to
1018 * avoid checking PageCompound for order-0 pages.
1019 */
1020 if (unlikely(order)) {
1021 bool compound = PageCompound(page);
1022 int i;
1023
1024 VM_BUG_ON_PAGE(compound && compound_order(page) != order, page);
4db7548c 1025
9a73f61b
KS
1026 if (compound)
1027 ClearPageDoubleMap(page);
e2769dbd
MG
1028 for (i = 1; i < (1 << order); i++) {
1029 if (compound)
1030 bad += free_tail_pages_check(page, page + i);
1031 if (unlikely(free_pages_check(page + i))) {
1032 bad++;
1033 continue;
1034 }
1035 (page + i)->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
1036 }
1037 }
bda807d4 1038 if (PageMappingFlags(page))
4db7548c 1039 page->mapping = NULL;
c4159a75 1040 if (memcg_kmem_enabled() && PageKmemcg(page))
4949148a 1041 memcg_kmem_uncharge(page, order);
e2769dbd
MG
1042 if (check_free)
1043 bad += free_pages_check(page);
1044 if (bad)
1045 return false;
4db7548c 1046
e2769dbd
MG
1047 page_cpupid_reset_last(page);
1048 page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
1049 reset_page_owner(page, order);
4db7548c
MG
1050
1051 if (!PageHighMem(page)) {
1052 debug_check_no_locks_freed(page_address(page),
e2769dbd 1053 PAGE_SIZE << order);
4db7548c 1054 debug_check_no_obj_freed(page_address(page),
e2769dbd 1055 PAGE_SIZE << order);
4db7548c 1056 }
e2769dbd
MG
1057 arch_free_page(page, order);
1058 kernel_poison_pages(page, 1 << order, 0);
1059 kernel_map_pages(page, 1 << order, 0);
29b52de1 1060 kasan_free_pages(page, order);
4db7548c 1061
4db7548c
MG
1062 return true;
1063}
1064
e2769dbd
MG
1065#ifdef CONFIG_DEBUG_VM
1066static inline bool free_pcp_prepare(struct page *page)
1067{
1068 return free_pages_prepare(page, 0, true);
1069}
1070
1071static inline bool bulkfree_pcp_prepare(struct page *page)
1072{
1073 return false;
1074}
1075#else
1076static bool free_pcp_prepare(struct page *page)
1077{
1078 return free_pages_prepare(page, 0, false);
1079}
1080
4db7548c
MG
1081static bool bulkfree_pcp_prepare(struct page *page)
1082{
1083 return free_pages_check(page);
1084}
1085#endif /* CONFIG_DEBUG_VM */
1086
1da177e4 1087/*
5f8dcc21 1088 * Frees a number of pages from the PCP lists
1da177e4 1089 * Assumes all pages on list are in same zone, and of same order.
207f36ee 1090 * count is the number of pages to free.
1da177e4
LT
1091 *
1092 * If the zone was previously in an "all pages pinned" state then look to
1093 * see if this freeing clears that state.
1094 *
1095 * And clear the zone's pages_scanned counter, to hold off the "all pages are
1096 * pinned" detection logic.
1097 */
5f8dcc21
MG
1098static void free_pcppages_bulk(struct zone *zone, int count,
1099 struct per_cpu_pages *pcp)
1da177e4 1100{
5f8dcc21 1101 int migratetype = 0;
a6f9edd6 1102 int batch_free = 0;
3777999d 1103 bool isolated_pageblocks;
5f8dcc21 1104
d34b0733 1105 spin_lock(&zone->lock);
3777999d 1106 isolated_pageblocks = has_isolate_pageblock(zone);
f2260e6b 1107
e5b31ac2 1108 while (count) {
48db57f8 1109 struct page *page;
5f8dcc21
MG
1110 struct list_head *list;
1111
1112 /*
a6f9edd6
MG
1113 * Remove pages from lists in a round-robin fashion. A
1114 * batch_free count is maintained that is incremented when an
1115 * empty list is encountered. This is so more pages are freed
1116 * off fuller lists instead of spinning excessively around empty
1117 * lists
5f8dcc21
MG
1118 */
1119 do {
a6f9edd6 1120 batch_free++;
5f8dcc21
MG
1121 if (++migratetype == MIGRATE_PCPTYPES)
1122 migratetype = 0;
1123 list = &pcp->lists[migratetype];
1124 } while (list_empty(list));
48db57f8 1125
1d16871d
NK
1126 /* This is the only non-empty list. Free them all. */
1127 if (batch_free == MIGRATE_PCPTYPES)
e5b31ac2 1128 batch_free = count;
1d16871d 1129
a6f9edd6 1130 do {
770c8aaa
BZ
1131 int mt; /* migratetype of the to-be-freed page */
1132
a16601c5 1133 page = list_last_entry(list, struct page, lru);
a6f9edd6
MG
1134 /* must delete as __free_one_page list manipulates */
1135 list_del(&page->lru);
aa016d14 1136
bb14c2c7 1137 mt = get_pcppage_migratetype(page);
aa016d14
VB
1138 /* MIGRATE_ISOLATE page should not go to pcplists */
1139 VM_BUG_ON_PAGE(is_migrate_isolate(mt), page);
1140 /* Pageblock could have been isolated meanwhile */
3777999d 1141 if (unlikely(isolated_pageblocks))
51bb1a40 1142 mt = get_pageblock_migratetype(page);
51bb1a40 1143
4db7548c
MG
1144 if (bulkfree_pcp_prepare(page))
1145 continue;
1146
dc4b0caf 1147 __free_one_page(page, page_to_pfn(page), zone, 0, mt);
770c8aaa 1148 trace_mm_page_pcpu_drain(page, 0, mt);
e5b31ac2 1149 } while (--count && --batch_free && !list_empty(list));
1da177e4 1150 }
d34b0733 1151 spin_unlock(&zone->lock);
1da177e4
LT
1152}
1153
dc4b0caf
MG
1154static void free_one_page(struct zone *zone,
1155 struct page *page, unsigned long pfn,
7aeb09f9 1156 unsigned int order,
ed0ae21d 1157 int migratetype)
1da177e4 1158{
d34b0733 1159 spin_lock(&zone->lock);
ad53f92e
JK
1160 if (unlikely(has_isolate_pageblock(zone) ||
1161 is_migrate_isolate(migratetype))) {
1162 migratetype = get_pfnblock_migratetype(page, pfn);
ad53f92e 1163 }
dc4b0caf 1164 __free_one_page(page, pfn, zone, order, migratetype);
d34b0733 1165 spin_unlock(&zone->lock);
48db57f8
NP
1166}
1167
1e8ce83c
RH
1168static void __meminit __init_single_page(struct page *page, unsigned long pfn,
1169 unsigned long zone, int nid)
1170{
1e8ce83c 1171 set_page_links(page, zone, nid, pfn);
1e8ce83c
RH
1172 init_page_count(page);
1173 page_mapcount_reset(page);
1174 page_cpupid_reset_last(page);
1e8ce83c 1175
1e8ce83c
RH
1176 INIT_LIST_HEAD(&page->lru);
1177#ifdef WANT_PAGE_VIRTUAL
1178 /* The shift won't overflow because ZONE_NORMAL is below 4G. */
1179 if (!is_highmem_idx(zone))
1180 set_page_address(page, __va(pfn << PAGE_SHIFT));
1181#endif
1182}
1183
1184static void __meminit __init_single_pfn(unsigned long pfn, unsigned long zone,
1185 int nid)
1186{
1187 return __init_single_page(pfn_to_page(pfn), pfn, zone, nid);
1188}
1189
7e18adb4
MG
1190#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
1191static void init_reserved_page(unsigned long pfn)
1192{
1193 pg_data_t *pgdat;
1194 int nid, zid;
1195
1196 if (!early_page_uninitialised(pfn))
1197 return;
1198
1199 nid = early_pfn_to_nid(pfn);
1200 pgdat = NODE_DATA(nid);
1201
1202 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
1203 struct zone *zone = &pgdat->node_zones[zid];
1204
1205 if (pfn >= zone->zone_start_pfn && pfn < zone_end_pfn(zone))
1206 break;
1207 }
1208 __init_single_pfn(pfn, zid, nid);
1209}
1210#else
1211static inline void init_reserved_page(unsigned long pfn)
1212{
1213}
1214#endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
1215
92923ca3
NZ
1216/*
1217 * Initialised pages do not have PageReserved set. This function is
1218 * called for each range allocated by the bootmem allocator and
1219 * marks the pages PageReserved. The remaining valid pages are later
1220 * sent to the buddy page allocator.
1221 */
4b50bcc7 1222void __meminit reserve_bootmem_region(phys_addr_t start, phys_addr_t end)
92923ca3
NZ
1223{
1224 unsigned long start_pfn = PFN_DOWN(start);
1225 unsigned long end_pfn = PFN_UP(end);
1226
7e18adb4
MG
1227 for (; start_pfn < end_pfn; start_pfn++) {
1228 if (pfn_valid(start_pfn)) {
1229 struct page *page = pfn_to_page(start_pfn);
1230
1231 init_reserved_page(start_pfn);
1d798ca3
KS
1232
1233 /* Avoid false-positive PageTail() */
1234 INIT_LIST_HEAD(&page->lru);
1235
7e18adb4
MG
1236 SetPageReserved(page);
1237 }
1238 }
92923ca3
NZ
1239}
1240
ec95f53a
KM
1241static void __free_pages_ok(struct page *page, unsigned int order)
1242{
d34b0733 1243 unsigned long flags;
95e34412 1244 int migratetype;
dc4b0caf 1245 unsigned long pfn = page_to_pfn(page);
ec95f53a 1246
e2769dbd 1247 if (!free_pages_prepare(page, order, true))
ec95f53a
KM
1248 return;
1249
cfc47a28 1250 migratetype = get_pfnblock_migratetype(page, pfn);
d34b0733
MG
1251 local_irq_save(flags);
1252 __count_vm_events(PGFREE, 1 << order);
dc4b0caf 1253 free_one_page(page_zone(page), page, pfn, order, migratetype);
d34b0733 1254 local_irq_restore(flags);
1da177e4
LT
1255}
1256
949698a3 1257static void __init __free_pages_boot_core(struct page *page, unsigned int order)
a226f6c8 1258{
c3993076 1259 unsigned int nr_pages = 1 << order;
e2d0bd2b 1260 struct page *p = page;
c3993076 1261 unsigned int loop;
a226f6c8 1262
e2d0bd2b
YL
1263 prefetchw(p);
1264 for (loop = 0; loop < (nr_pages - 1); loop++, p++) {
1265 prefetchw(p + 1);
c3993076
JW
1266 __ClearPageReserved(p);
1267 set_page_count(p, 0);
a226f6c8 1268 }
e2d0bd2b
YL
1269 __ClearPageReserved(p);
1270 set_page_count(p, 0);
c3993076 1271
e2d0bd2b 1272 page_zone(page)->managed_pages += nr_pages;
c3993076
JW
1273 set_page_refcounted(page);
1274 __free_pages(page, order);
a226f6c8
DH
1275}
1276
75a592a4
MG
1277#if defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) || \
1278 defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP)
7ace9917 1279
75a592a4
MG
1280static struct mminit_pfnnid_cache early_pfnnid_cache __meminitdata;
1281
1282int __meminit early_pfn_to_nid(unsigned long pfn)
1283{
7ace9917 1284 static DEFINE_SPINLOCK(early_pfn_lock);
75a592a4
MG
1285 int nid;
1286
7ace9917 1287 spin_lock(&early_pfn_lock);
75a592a4 1288 nid = __early_pfn_to_nid(pfn, &early_pfnnid_cache);
7ace9917 1289 if (nid < 0)
e4568d38 1290 nid = first_online_node;
7ace9917
MG
1291 spin_unlock(&early_pfn_lock);
1292
1293 return nid;
75a592a4
MG
1294}
1295#endif
1296
1297#ifdef CONFIG_NODES_SPAN_OTHER_NODES
d73d3c9f
MK
1298static inline bool __meminit __maybe_unused
1299meminit_pfn_in_nid(unsigned long pfn, int node,
1300 struct mminit_pfnnid_cache *state)
75a592a4
MG
1301{
1302 int nid;
1303
1304 nid = __early_pfn_to_nid(pfn, state);
1305 if (nid >= 0 && nid != node)
1306 return false;
1307 return true;
1308}
1309
1310/* Only safe to use early in boot when initialisation is single-threaded */
1311static inline bool __meminit early_pfn_in_nid(unsigned long pfn, int node)
1312{
1313 return meminit_pfn_in_nid(pfn, node, &early_pfnnid_cache);
1314}
1315
1316#else
1317
1318static inline bool __meminit early_pfn_in_nid(unsigned long pfn, int node)
1319{
1320 return true;
1321}
d73d3c9f
MK
1322static inline bool __meminit __maybe_unused
1323meminit_pfn_in_nid(unsigned long pfn, int node,
1324 struct mminit_pfnnid_cache *state)
75a592a4
MG
1325{
1326 return true;
1327}
1328#endif
1329
1330
0e1cc95b 1331void __init __free_pages_bootmem(struct page *page, unsigned long pfn,
3a80a7fa
MG
1332 unsigned int order)
1333{
1334 if (early_page_uninitialised(pfn))
1335 return;
949698a3 1336 return __free_pages_boot_core(page, order);
3a80a7fa
MG
1337}
1338
7cf91a98
JK
1339/*
1340 * Check that the whole (or subset of) a pageblock given by the interval of
1341 * [start_pfn, end_pfn) is valid and within the same zone, before scanning it
1342 * with the migration of free compaction scanner. The scanners then need to
1343 * use only pfn_valid_within() check for arches that allow holes within
1344 * pageblocks.
1345 *
1346 * Return struct page pointer of start_pfn, or NULL if checks were not passed.
1347 *
1348 * It's possible on some configurations to have a setup like node0 node1 node0
1349 * i.e. it's possible that all pages within a zones range of pages do not
1350 * belong to a single zone. We assume that a border between node0 and node1
1351 * can occur within a single pageblock, but not a node0 node1 node0
1352 * interleaving within a single pageblock. It is therefore sufficient to check
1353 * the first and last page of a pageblock and avoid checking each individual
1354 * page in a pageblock.
1355 */
1356struct page *__pageblock_pfn_to_page(unsigned long start_pfn,
1357 unsigned long end_pfn, struct zone *zone)
1358{
1359 struct page *start_page;
1360 struct page *end_page;
1361
1362 /* end_pfn is one past the range we are checking */
1363 end_pfn--;
1364
1365 if (!pfn_valid(start_pfn) || !pfn_valid(end_pfn))
1366 return NULL;
1367
2d070eab
MH
1368 start_page = pfn_to_online_page(start_pfn);
1369 if (!start_page)
1370 return NULL;
7cf91a98
JK
1371
1372 if (page_zone(start_page) != zone)
1373 return NULL;
1374
1375 end_page = pfn_to_page(end_pfn);
1376
1377 /* This gives a shorter code than deriving page_zone(end_page) */
1378 if (page_zone_id(start_page) != page_zone_id(end_page))
1379 return NULL;
1380
1381 return start_page;
1382}
1383
1384void set_zone_contiguous(struct zone *zone)
1385{
1386 unsigned long block_start_pfn = zone->zone_start_pfn;
1387 unsigned long block_end_pfn;
1388
1389 block_end_pfn = ALIGN(block_start_pfn + 1, pageblock_nr_pages);
1390 for (; block_start_pfn < zone_end_pfn(zone);
1391 block_start_pfn = block_end_pfn,
1392 block_end_pfn += pageblock_nr_pages) {
1393
1394 block_end_pfn = min(block_end_pfn, zone_end_pfn(zone));
1395
1396 if (!__pageblock_pfn_to_page(block_start_pfn,
1397 block_end_pfn, zone))
1398 return;
1399 }
1400
1401 /* We confirm that there is no hole */
1402 zone->contiguous = true;
1403}
1404
1405void clear_zone_contiguous(struct zone *zone)
1406{
1407 zone->contiguous = false;
1408}
1409
7e18adb4 1410#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
0e1cc95b 1411static void __init deferred_free_range(struct page *page,
a4de83dd
MG
1412 unsigned long pfn, int nr_pages)
1413{
1414 int i;
1415
1416 if (!page)
1417 return;
1418
1419 /* Free a large naturally-aligned chunk if possible */
e780149b
XQ
1420 if (nr_pages == pageblock_nr_pages &&
1421 (pfn & (pageblock_nr_pages - 1)) == 0) {
ac5d2539 1422 set_pageblock_migratetype(page, MIGRATE_MOVABLE);
e780149b 1423 __free_pages_boot_core(page, pageblock_order);
a4de83dd
MG
1424 return;
1425 }
1426
e780149b
XQ
1427 for (i = 0; i < nr_pages; i++, page++, pfn++) {
1428 if ((pfn & (pageblock_nr_pages - 1)) == 0)
1429 set_pageblock_migratetype(page, MIGRATE_MOVABLE);
949698a3 1430 __free_pages_boot_core(page, 0);
e780149b 1431 }
a4de83dd
MG
1432}
1433
d3cd131d
NS
1434/* Completion tracking for deferred_init_memmap() threads */
1435static atomic_t pgdat_init_n_undone __initdata;
1436static __initdata DECLARE_COMPLETION(pgdat_init_all_done_comp);
1437
1438static inline void __init pgdat_init_report_one_done(void)
1439{
1440 if (atomic_dec_and_test(&pgdat_init_n_undone))
1441 complete(&pgdat_init_all_done_comp);
1442}
0e1cc95b 1443
7e18adb4 1444/* Initialise remaining memory on a node */
0e1cc95b 1445static int __init deferred_init_memmap(void *data)
7e18adb4 1446{
0e1cc95b
MG
1447 pg_data_t *pgdat = data;
1448 int nid = pgdat->node_id;
7e18adb4
MG
1449 struct mminit_pfnnid_cache nid_init_state = { };
1450 unsigned long start = jiffies;
1451 unsigned long nr_pages = 0;
1452 unsigned long walk_start, walk_end;
1453 int i, zid;
1454 struct zone *zone;
7e18adb4 1455 unsigned long first_init_pfn = pgdat->first_deferred_pfn;
0e1cc95b 1456 const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id);
7e18adb4 1457
0e1cc95b 1458 if (first_init_pfn == ULONG_MAX) {
d3cd131d 1459 pgdat_init_report_one_done();
0e1cc95b
MG
1460 return 0;
1461 }
1462
1463 /* Bind memory initialisation thread to a local node if possible */
1464 if (!cpumask_empty(cpumask))
1465 set_cpus_allowed_ptr(current, cpumask);
7e18adb4
MG
1466
1467 /* Sanity check boundaries */
1468 BUG_ON(pgdat->first_deferred_pfn < pgdat->node_start_pfn);
1469 BUG_ON(pgdat->first_deferred_pfn > pgdat_end_pfn(pgdat));
1470 pgdat->first_deferred_pfn = ULONG_MAX;
1471
1472 /* Only the highest zone is deferred so find it */
1473 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
1474 zone = pgdat->node_zones + zid;
1475 if (first_init_pfn < zone_end_pfn(zone))
1476 break;
1477 }
1478
1479 for_each_mem_pfn_range(i, nid, &walk_start, &walk_end, NULL) {
1480 unsigned long pfn, end_pfn;
54608c3f 1481 struct page *page = NULL;
a4de83dd
MG
1482 struct page *free_base_page = NULL;
1483 unsigned long free_base_pfn = 0;
1484 int nr_to_free = 0;
7e18adb4
MG
1485
1486 end_pfn = min(walk_end, zone_end_pfn(zone));
1487 pfn = first_init_pfn;
1488 if (pfn < walk_start)
1489 pfn = walk_start;
1490 if (pfn < zone->zone_start_pfn)
1491 pfn = zone->zone_start_pfn;
1492
1493 for (; pfn < end_pfn; pfn++) {
54608c3f 1494 if (!pfn_valid_within(pfn))
a4de83dd 1495 goto free_range;
7e18adb4 1496
54608c3f
MG
1497 /*
1498 * Ensure pfn_valid is checked every
e780149b 1499 * pageblock_nr_pages for memory holes
54608c3f 1500 */
e780149b 1501 if ((pfn & (pageblock_nr_pages - 1)) == 0) {
54608c3f
MG
1502 if (!pfn_valid(pfn)) {
1503 page = NULL;
a4de83dd 1504 goto free_range;
54608c3f
MG
1505 }
1506 }
1507
1508 if (!meminit_pfn_in_nid(pfn, nid, &nid_init_state)) {
1509 page = NULL;
a4de83dd 1510 goto free_range;
54608c3f
MG
1511 }
1512
1513 /* Minimise pfn page lookups and scheduler checks */
e780149b 1514 if (page && (pfn & (pageblock_nr_pages - 1)) != 0) {
54608c3f
MG
1515 page++;
1516 } else {
a4de83dd
MG
1517 nr_pages += nr_to_free;
1518 deferred_free_range(free_base_page,
1519 free_base_pfn, nr_to_free);
1520 free_base_page = NULL;
1521 free_base_pfn = nr_to_free = 0;
1522
54608c3f
MG
1523 page = pfn_to_page(pfn);
1524 cond_resched();
1525 }
7e18adb4
MG
1526
1527 if (page->flags) {
1528 VM_BUG_ON(page_zone(page) != zone);
a4de83dd 1529 goto free_range;
7e18adb4
MG
1530 }
1531
1532 __init_single_page(page, pfn, zid, nid);
a4de83dd
MG
1533 if (!free_base_page) {
1534 free_base_page = page;
1535 free_base_pfn = pfn;
1536 nr_to_free = 0;
1537 }
1538 nr_to_free++;
1539
1540 /* Where possible, batch up pages for a single free */
1541 continue;
1542free_range:
1543 /* Free the current block of pages to allocator */
1544 nr_pages += nr_to_free;
1545 deferred_free_range(free_base_page, free_base_pfn,
1546 nr_to_free);
1547 free_base_page = NULL;
1548 free_base_pfn = nr_to_free = 0;
7e18adb4 1549 }
e780149b
XQ
1550 /* Free the last block of pages to allocator */
1551 nr_pages += nr_to_free;
1552 deferred_free_range(free_base_page, free_base_pfn, nr_to_free);
a4de83dd 1553
7e18adb4
MG
1554 first_init_pfn = max(end_pfn, first_init_pfn);
1555 }
1556
1557 /* Sanity check that the next zone really is unpopulated */
1558 WARN_ON(++zid < MAX_NR_ZONES && populated_zone(++zone));
1559
0e1cc95b 1560 pr_info("node %d initialised, %lu pages in %ums\n", nid, nr_pages,
7e18adb4 1561 jiffies_to_msecs(jiffies - start));
d3cd131d
NS
1562
1563 pgdat_init_report_one_done();
0e1cc95b
MG
1564 return 0;
1565}
7cf91a98 1566#endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
0e1cc95b
MG
1567
1568void __init page_alloc_init_late(void)
1569{
7cf91a98
JK
1570 struct zone *zone;
1571
1572#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
0e1cc95b
MG
1573 int nid;
1574
d3cd131d
NS
1575 /* There will be num_node_state(N_MEMORY) threads */
1576 atomic_set(&pgdat_init_n_undone, num_node_state(N_MEMORY));
0e1cc95b 1577 for_each_node_state(nid, N_MEMORY) {
0e1cc95b
MG
1578 kthread_run(deferred_init_memmap, NODE_DATA(nid), "pgdatinit%d", nid);
1579 }
1580
1581 /* Block until all are initialised */
d3cd131d 1582 wait_for_completion(&pgdat_init_all_done_comp);
4248b0da
MG
1583
1584 /* Reinit limits that are based on free pages after the kernel is up */
1585 files_maxfiles_init();
7cf91a98
JK
1586#endif
1587
1588 for_each_populated_zone(zone)
1589 set_zone_contiguous(zone);
7e18adb4 1590}
7e18adb4 1591
47118af0 1592#ifdef CONFIG_CMA
9cf510a5 1593/* Free whole pageblock and set its migration type to MIGRATE_CMA. */
47118af0
MN
1594void __init init_cma_reserved_pageblock(struct page *page)
1595{
1596 unsigned i = pageblock_nr_pages;
1597 struct page *p = page;
1598
1599 do {
1600 __ClearPageReserved(p);
1601 set_page_count(p, 0);
1602 } while (++p, --i);
1603
47118af0 1604 set_pageblock_migratetype(page, MIGRATE_CMA);
dc78327c
MN
1605
1606 if (pageblock_order >= MAX_ORDER) {
1607 i = pageblock_nr_pages;
1608 p = page;
1609 do {
1610 set_page_refcounted(p);
1611 __free_pages(p, MAX_ORDER - 1);
1612 p += MAX_ORDER_NR_PAGES;
1613 } while (i -= MAX_ORDER_NR_PAGES);
1614 } else {
1615 set_page_refcounted(page);
1616 __free_pages(page, pageblock_order);
1617 }
1618
3dcc0571 1619 adjust_managed_page_count(page, pageblock_nr_pages);
47118af0
MN
1620}
1621#endif
1da177e4
LT
1622
1623/*
1624 * The order of subdivision here is critical for the IO subsystem.
1625 * Please do not alter this order without good reasons and regression
1626 * testing. Specifically, as large blocks of memory are subdivided,
1627 * the order in which smaller blocks are delivered depends on the order
1628 * they're subdivided in this function. This is the primary factor
1629 * influencing the order in which pages are delivered to the IO
1630 * subsystem according to empirical testing, and this is also justified
1631 * by considering the behavior of a buddy system containing a single
1632 * large block of memory acted on by a series of small allocations.
1633 * This behavior is a critical factor in sglist merging's success.
1634 *
6d49e352 1635 * -- nyc
1da177e4 1636 */
085cc7d5 1637static inline void expand(struct zone *zone, struct page *page,
b2a0ac88
MG
1638 int low, int high, struct free_area *area,
1639 int migratetype)
1da177e4
LT
1640{
1641 unsigned long size = 1 << high;
1642
1643 while (high > low) {
1644 area--;
1645 high--;
1646 size >>= 1;
309381fe 1647 VM_BUG_ON_PAGE(bad_range(zone, &page[size]), &page[size]);
c0a32fc5 1648
acbc15a4
JK
1649 /*
1650 * Mark as guard pages (or page), that will allow to
1651 * merge back to allocator when buddy will be freed.
1652 * Corresponding page table entries will not be touched,
1653 * pages will stay not present in virtual address space
1654 */
1655 if (set_page_guard(zone, &page[size], high, migratetype))
c0a32fc5 1656 continue;
acbc15a4 1657
b2a0ac88 1658 list_add(&page[size].lru, &area->free_list[migratetype]);
1da177e4
LT
1659 area->nr_free++;
1660 set_page_order(&page[size], high);
1661 }
1da177e4
LT
1662}
1663
4e611801 1664static void check_new_page_bad(struct page *page)
1da177e4 1665{
4e611801
VB
1666 const char *bad_reason = NULL;
1667 unsigned long bad_flags = 0;
7bfec6f4 1668
53f9263b 1669 if (unlikely(atomic_read(&page->_mapcount) != -1))
f0b791a3
DH
1670 bad_reason = "nonzero mapcount";
1671 if (unlikely(page->mapping != NULL))
1672 bad_reason = "non-NULL mapping";
fe896d18 1673 if (unlikely(page_ref_count(page) != 0))
f0b791a3 1674 bad_reason = "nonzero _count";
f4c18e6f
NH
1675 if (unlikely(page->flags & __PG_HWPOISON)) {
1676 bad_reason = "HWPoisoned (hardware-corrupted)";
1677 bad_flags = __PG_HWPOISON;
e570f56c
NH
1678 /* Don't complain about hwpoisoned pages */
1679 page_mapcount_reset(page); /* remove PageBuddy */
1680 return;
f4c18e6f 1681 }
f0b791a3
DH
1682 if (unlikely(page->flags & PAGE_FLAGS_CHECK_AT_PREP)) {
1683 bad_reason = "PAGE_FLAGS_CHECK_AT_PREP flag set";
1684 bad_flags = PAGE_FLAGS_CHECK_AT_PREP;
1685 }
9edad6ea
JW
1686#ifdef CONFIG_MEMCG
1687 if (unlikely(page->mem_cgroup))
1688 bad_reason = "page still charged to cgroup";
1689#endif
4e611801
VB
1690 bad_page(page, bad_reason, bad_flags);
1691}
1692
1693/*
1694 * This page is about to be returned from the page allocator
1695 */
1696static inline int check_new_page(struct page *page)
1697{
1698 if (likely(page_expected_state(page,
1699 PAGE_FLAGS_CHECK_AT_PREP|__PG_HWPOISON)))
1700 return 0;
1701
1702 check_new_page_bad(page);
1703 return 1;
2a7684a2
WF
1704}
1705
bd33ef36 1706static inline bool free_pages_prezeroed(void)
1414c7f4
LA
1707{
1708 return IS_ENABLED(CONFIG_PAGE_POISONING_ZERO) &&
bd33ef36 1709 page_poisoning_enabled();
1414c7f4
LA
1710}
1711
479f854a
MG
1712#ifdef CONFIG_DEBUG_VM
1713static bool check_pcp_refill(struct page *page)
1714{
1715 return false;
1716}
1717
1718static bool check_new_pcp(struct page *page)
1719{
1720 return check_new_page(page);
1721}
1722#else
1723static bool check_pcp_refill(struct page *page)
1724{
1725 return check_new_page(page);
1726}
1727static bool check_new_pcp(struct page *page)
1728{
1729 return false;
1730}
1731#endif /* CONFIG_DEBUG_VM */
1732
1733static bool check_new_pages(struct page *page, unsigned int order)
1734{
1735 int i;
1736 for (i = 0; i < (1 << order); i++) {
1737 struct page *p = page + i;
1738
1739 if (unlikely(check_new_page(p)))
1740 return true;
1741 }
1742
1743 return false;
1744}
1745
46f24fd8
JK
1746inline void post_alloc_hook(struct page *page, unsigned int order,
1747 gfp_t gfp_flags)
1748{
1749 set_page_private(page, 0);
1750 set_page_refcounted(page);
1751
1752 arch_alloc_page(page, order);
1753 kernel_map_pages(page, 1 << order, 1);
1754 kernel_poison_pages(page, 1 << order, 1);
1755 kasan_alloc_pages(page, order);
1756 set_page_owner(page, order, gfp_flags);
1757}
1758
479f854a 1759static void prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags,
c603844b 1760 unsigned int alloc_flags)
2a7684a2
WF
1761{
1762 int i;
689bcebf 1763
46f24fd8 1764 post_alloc_hook(page, order, gfp_flags);
17cf4406 1765
bd33ef36 1766 if (!free_pages_prezeroed() && (gfp_flags & __GFP_ZERO))
f4d2897b
AA
1767 for (i = 0; i < (1 << order); i++)
1768 clear_highpage(page + i);
17cf4406
NP
1769
1770 if (order && (gfp_flags & __GFP_COMP))
1771 prep_compound_page(page, order);
1772
75379191 1773 /*
2f064f34 1774 * page is set pfmemalloc when ALLOC_NO_WATERMARKS was necessary to
75379191
VB
1775 * allocate the page. The expectation is that the caller is taking
1776 * steps that will free more memory. The caller should avoid the page
1777 * being used for !PFMEMALLOC purposes.
1778 */
2f064f34
MH
1779 if (alloc_flags & ALLOC_NO_WATERMARKS)
1780 set_page_pfmemalloc(page);
1781 else
1782 clear_page_pfmemalloc(page);
1da177e4
LT
1783}
1784
56fd56b8
MG
1785/*
1786 * Go through the free lists for the given migratetype and remove
1787 * the smallest available page from the freelists
1788 */
728ec980
MG
1789static inline
1790struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
56fd56b8
MG
1791 int migratetype)
1792{
1793 unsigned int current_order;
b8af2941 1794 struct free_area *area;
56fd56b8
MG
1795 struct page *page;
1796
1797 /* Find a page of the appropriate size in the preferred list */
1798 for (current_order = order; current_order < MAX_ORDER; ++current_order) {
1799 area = &(zone->free_area[current_order]);
a16601c5 1800 page = list_first_entry_or_null(&area->free_list[migratetype],
56fd56b8 1801 struct page, lru);
a16601c5
GT
1802 if (!page)
1803 continue;
56fd56b8
MG
1804 list_del(&page->lru);
1805 rmv_page_order(page);
1806 area->nr_free--;
56fd56b8 1807 expand(zone, page, order, current_order, area, migratetype);
bb14c2c7 1808 set_pcppage_migratetype(page, migratetype);
56fd56b8
MG
1809 return page;
1810 }
1811
1812 return NULL;
1813}
1814
1815
b2a0ac88
MG
1816/*
1817 * This array describes the order lists are fallen back to when
1818 * the free lists for the desirable migrate type are depleted
1819 */
47118af0 1820static int fallbacks[MIGRATE_TYPES][4] = {
974a786e
MG
1821 [MIGRATE_UNMOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE, MIGRATE_TYPES },
1822 [MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE, MIGRATE_MOVABLE, MIGRATE_TYPES },
1823 [MIGRATE_MOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_TYPES },
47118af0 1824#ifdef CONFIG_CMA
974a786e 1825 [MIGRATE_CMA] = { MIGRATE_TYPES }, /* Never used */
47118af0 1826#endif
194159fb 1827#ifdef CONFIG_MEMORY_ISOLATION
974a786e 1828 [MIGRATE_ISOLATE] = { MIGRATE_TYPES }, /* Never used */
194159fb 1829#endif
b2a0ac88
MG
1830};
1831
dc67647b
JK
1832#ifdef CONFIG_CMA
1833static struct page *__rmqueue_cma_fallback(struct zone *zone,
1834 unsigned int order)
1835{
1836 return __rmqueue_smallest(zone, order, MIGRATE_CMA);
1837}
1838#else
1839static inline struct page *__rmqueue_cma_fallback(struct zone *zone,
1840 unsigned int order) { return NULL; }
1841#endif
1842
c361be55
MG
1843/*
1844 * Move the free pages in a range to the free lists of the requested type.
d9c23400 1845 * Note that start_page and end_pages are not aligned on a pageblock
c361be55
MG
1846 * boundary. If alignment is required, use move_freepages_block()
1847 */
02aa0cdd 1848static int move_freepages(struct zone *zone,
b69a7288 1849 struct page *start_page, struct page *end_page,
02aa0cdd 1850 int migratetype, int *num_movable)
c361be55
MG
1851{
1852 struct page *page;
d00181b9 1853 unsigned int order;
d100313f 1854 int pages_moved = 0;
c361be55
MG
1855
1856#ifndef CONFIG_HOLES_IN_ZONE
1857 /*
1858 * page_zone is not safe to call in this context when
1859 * CONFIG_HOLES_IN_ZONE is set. This bug check is probably redundant
1860 * anyway as we check zone boundaries in move_freepages_block().
1861 * Remove at a later date when no bug reports exist related to
ac0e5b7a 1862 * grouping pages by mobility
c361be55 1863 */
97ee4ba7 1864 VM_BUG_ON(page_zone(start_page) != page_zone(end_page));
c361be55
MG
1865#endif
1866
02aa0cdd
VB
1867 if (num_movable)
1868 *num_movable = 0;
1869
c361be55
MG
1870 for (page = start_page; page <= end_page;) {
1871 if (!pfn_valid_within(page_to_pfn(page))) {
1872 page++;
1873 continue;
1874 }
1875
f073bdc5
AB
1876 /* Make sure we are not inadvertently changing nodes */
1877 VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page);
1878
c361be55 1879 if (!PageBuddy(page)) {
02aa0cdd
VB
1880 /*
1881 * We assume that pages that could be isolated for
1882 * migration are movable. But we don't actually try
1883 * isolating, as that would be expensive.
1884 */
1885 if (num_movable &&
1886 (PageLRU(page) || __PageMovable(page)))
1887 (*num_movable)++;
1888
c361be55
MG
1889 page++;
1890 continue;
1891 }
1892
1893 order = page_order(page);
84be48d8
KS
1894 list_move(&page->lru,
1895 &zone->free_area[order].free_list[migratetype]);
c361be55 1896 page += 1 << order;
d100313f 1897 pages_moved += 1 << order;
c361be55
MG
1898 }
1899
d100313f 1900 return pages_moved;
c361be55
MG
1901}
1902
ee6f509c 1903int move_freepages_block(struct zone *zone, struct page *page,
02aa0cdd 1904 int migratetype, int *num_movable)
c361be55
MG
1905{
1906 unsigned long start_pfn, end_pfn;
1907 struct page *start_page, *end_page;
1908
1909 start_pfn = page_to_pfn(page);
d9c23400 1910 start_pfn = start_pfn & ~(pageblock_nr_pages-1);
c361be55 1911 start_page = pfn_to_page(start_pfn);
d9c23400
MG
1912 end_page = start_page + pageblock_nr_pages - 1;
1913 end_pfn = start_pfn + pageblock_nr_pages - 1;
c361be55
MG
1914
1915 /* Do not cross zone boundaries */
108bcc96 1916 if (!zone_spans_pfn(zone, start_pfn))
c361be55 1917 start_page = page;
108bcc96 1918 if (!zone_spans_pfn(zone, end_pfn))
c361be55
MG
1919 return 0;
1920
02aa0cdd
VB
1921 return move_freepages(zone, start_page, end_page, migratetype,
1922 num_movable);
c361be55
MG
1923}
1924
2f66a68f
MG
1925static void change_pageblock_range(struct page *pageblock_page,
1926 int start_order, int migratetype)
1927{
1928 int nr_pageblocks = 1 << (start_order - pageblock_order);
1929
1930 while (nr_pageblocks--) {
1931 set_pageblock_migratetype(pageblock_page, migratetype);
1932 pageblock_page += pageblock_nr_pages;
1933 }
1934}
1935
fef903ef 1936/*
9c0415eb
VB
1937 * When we are falling back to another migratetype during allocation, try to
1938 * steal extra free pages from the same pageblocks to satisfy further
1939 * allocations, instead of polluting multiple pageblocks.
1940 *
1941 * If we are stealing a relatively large buddy page, it is likely there will
1942 * be more free pages in the pageblock, so try to steal them all. For
1943 * reclaimable and unmovable allocations, we steal regardless of page size,
1944 * as fragmentation caused by those allocations polluting movable pageblocks
1945 * is worse than movable allocations stealing from unmovable and reclaimable
1946 * pageblocks.
fef903ef 1947 */
4eb7dce6
JK
1948static bool can_steal_fallback(unsigned int order, int start_mt)
1949{
1950 /*
1951 * Leaving this order check is intended, although there is
1952 * relaxed order check in next check. The reason is that
1953 * we can actually steal whole pageblock if this condition met,
1954 * but, below check doesn't guarantee it and that is just heuristic
1955 * so could be changed anytime.
1956 */
1957 if (order >= pageblock_order)
1958 return true;
1959
1960 if (order >= pageblock_order / 2 ||
1961 start_mt == MIGRATE_RECLAIMABLE ||
1962 start_mt == MIGRATE_UNMOVABLE ||
1963 page_group_by_mobility_disabled)
1964 return true;
1965
1966 return false;
1967}
1968
1969/*
1970 * This function implements actual steal behaviour. If order is large enough,
1971 * we can steal whole pageblock. If not, we first move freepages in this
02aa0cdd
VB
1972 * pageblock to our migratetype and determine how many already-allocated pages
1973 * are there in the pageblock with a compatible migratetype. If at least half
1974 * of pages are free or compatible, we can change migratetype of the pageblock
1975 * itself, so pages freed in the future will be put on the correct free list.
4eb7dce6
JK
1976 */
1977static void steal_suitable_fallback(struct zone *zone, struct page *page,
3bc48f96 1978 int start_type, bool whole_block)
fef903ef 1979{
d00181b9 1980 unsigned int current_order = page_order(page);
3bc48f96 1981 struct free_area *area;
02aa0cdd
VB
1982 int free_pages, movable_pages, alike_pages;
1983 int old_block_type;
1984
1985 old_block_type = get_pageblock_migratetype(page);
fef903ef 1986
3bc48f96
VB
1987 /*
1988 * This can happen due to races and we want to prevent broken
1989 * highatomic accounting.
1990 */
02aa0cdd 1991 if (is_migrate_highatomic(old_block_type))
3bc48f96
VB
1992 goto single_page;
1993
fef903ef
SB
1994 /* Take ownership for orders >= pageblock_order */
1995 if (current_order >= pageblock_order) {
1996 change_pageblock_range(page, current_order, start_type);
3bc48f96 1997 goto single_page;
fef903ef
SB
1998 }
1999
3bc48f96
VB
2000 /* We are not allowed to try stealing from the whole block */
2001 if (!whole_block)
2002 goto single_page;
2003
02aa0cdd
VB
2004 free_pages = move_freepages_block(zone, page, start_type,
2005 &movable_pages);
2006 /*
2007 * Determine how many pages are compatible with our allocation.
2008 * For movable allocation, it's the number of movable pages which
2009 * we just obtained. For other types it's a bit more tricky.
2010 */
2011 if (start_type == MIGRATE_MOVABLE) {
2012 alike_pages = movable_pages;
2013 } else {
2014 /*
2015 * If we are falling back a RECLAIMABLE or UNMOVABLE allocation
2016 * to MOVABLE pageblock, consider all non-movable pages as
2017 * compatible. If it's UNMOVABLE falling back to RECLAIMABLE or
2018 * vice versa, be conservative since we can't distinguish the
2019 * exact migratetype of non-movable pages.
2020 */
2021 if (old_block_type == MIGRATE_MOVABLE)
2022 alike_pages = pageblock_nr_pages
2023 - (free_pages + movable_pages);
2024 else
2025 alike_pages = 0;
2026 }
2027
3bc48f96 2028 /* moving whole block can fail due to zone boundary conditions */
02aa0cdd 2029 if (!free_pages)
3bc48f96 2030 goto single_page;
fef903ef 2031
02aa0cdd
VB
2032 /*
2033 * If a sufficient number of pages in the block are either free or of
2034 * comparable migratability as our allocation, claim the whole block.
2035 */
2036 if (free_pages + alike_pages >= (1 << (pageblock_order-1)) ||
4eb7dce6
JK
2037 page_group_by_mobility_disabled)
2038 set_pageblock_migratetype(page, start_type);
3bc48f96
VB
2039
2040 return;
2041
2042single_page:
2043 area = &zone->free_area[current_order];
2044 list_move(&page->lru, &area->free_list[start_type]);
4eb7dce6
JK
2045}
2046
2149cdae
JK
2047/*
2048 * Check whether there is a suitable fallback freepage with requested order.
2049 * If only_stealable is true, this function returns fallback_mt only if
2050 * we can steal other freepages all together. This would help to reduce
2051 * fragmentation due to mixed migratetype pages in one pageblock.
2052 */
2053int find_suitable_fallback(struct free_area *area, unsigned int order,
2054 int migratetype, bool only_stealable, bool *can_steal)
4eb7dce6
JK
2055{
2056 int i;
2057 int fallback_mt;
2058
2059 if (area->nr_free == 0)
2060 return -1;
2061
2062 *can_steal = false;
2063 for (i = 0;; i++) {
2064 fallback_mt = fallbacks[migratetype][i];
974a786e 2065 if (fallback_mt == MIGRATE_TYPES)
4eb7dce6
JK
2066 break;
2067
2068 if (list_empty(&area->free_list[fallback_mt]))
2069 continue;
fef903ef 2070
4eb7dce6
JK
2071 if (can_steal_fallback(order, migratetype))
2072 *can_steal = true;
2073
2149cdae
JK
2074 if (!only_stealable)
2075 return fallback_mt;
2076
2077 if (*can_steal)
2078 return fallback_mt;
fef903ef 2079 }
4eb7dce6
JK
2080
2081 return -1;
fef903ef
SB
2082}
2083
0aaa29a5
MG
2084/*
2085 * Reserve a pageblock for exclusive use of high-order atomic allocations if
2086 * there are no empty page blocks that contain a page with a suitable order
2087 */
2088static void reserve_highatomic_pageblock(struct page *page, struct zone *zone,
2089 unsigned int alloc_order)
2090{
2091 int mt;
2092 unsigned long max_managed, flags;
2093
2094 /*
2095 * Limit the number reserved to 1 pageblock or roughly 1% of a zone.
2096 * Check is race-prone but harmless.
2097 */
2098 max_managed = (zone->managed_pages / 100) + pageblock_nr_pages;
2099 if (zone->nr_reserved_highatomic >= max_managed)
2100 return;
2101
2102 spin_lock_irqsave(&zone->lock, flags);
2103
2104 /* Recheck the nr_reserved_highatomic limit under the lock */
2105 if (zone->nr_reserved_highatomic >= max_managed)
2106 goto out_unlock;
2107
2108 /* Yoink! */
2109 mt = get_pageblock_migratetype(page);
a6ffdc07
XQ
2110 if (!is_migrate_highatomic(mt) && !is_migrate_isolate(mt)
2111 && !is_migrate_cma(mt)) {
0aaa29a5
MG
2112 zone->nr_reserved_highatomic += pageblock_nr_pages;
2113 set_pageblock_migratetype(page, MIGRATE_HIGHATOMIC);
02aa0cdd 2114 move_freepages_block(zone, page, MIGRATE_HIGHATOMIC, NULL);
0aaa29a5
MG
2115 }
2116
2117out_unlock:
2118 spin_unlock_irqrestore(&zone->lock, flags);
2119}
2120
2121/*
2122 * Used when an allocation is about to fail under memory pressure. This
2123 * potentially hurts the reliability of high-order allocations when under
2124 * intense memory pressure but failed atomic allocations should be easier
2125 * to recover from than an OOM.
29fac03b
MK
2126 *
2127 * If @force is true, try to unreserve a pageblock even though highatomic
2128 * pageblock is exhausted.
0aaa29a5 2129 */
29fac03b
MK
2130static bool unreserve_highatomic_pageblock(const struct alloc_context *ac,
2131 bool force)
0aaa29a5
MG
2132{
2133 struct zonelist *zonelist = ac->zonelist;
2134 unsigned long flags;
2135 struct zoneref *z;
2136 struct zone *zone;
2137 struct page *page;
2138 int order;
04c8716f 2139 bool ret;
0aaa29a5
MG
2140
2141 for_each_zone_zonelist_nodemask(zone, z, zonelist, ac->high_zoneidx,
2142 ac->nodemask) {
29fac03b
MK
2143 /*
2144 * Preserve at least one pageblock unless memory pressure
2145 * is really high.
2146 */
2147 if (!force && zone->nr_reserved_highatomic <=
2148 pageblock_nr_pages)
0aaa29a5
MG
2149 continue;
2150
2151 spin_lock_irqsave(&zone->lock, flags);
2152 for (order = 0; order < MAX_ORDER; order++) {
2153 struct free_area *area = &(zone->free_area[order]);
2154
a16601c5
GT
2155 page = list_first_entry_or_null(
2156 &area->free_list[MIGRATE_HIGHATOMIC],
2157 struct page, lru);
2158 if (!page)
0aaa29a5
MG
2159 continue;
2160
0aaa29a5 2161 /*
4855e4a7
MK
2162 * In page freeing path, migratetype change is racy so
2163 * we can counter several free pages in a pageblock
2164 * in this loop althoug we changed the pageblock type
2165 * from highatomic to ac->migratetype. So we should
2166 * adjust the count once.
0aaa29a5 2167 */
a6ffdc07 2168 if (is_migrate_highatomic_page(page)) {
4855e4a7
MK
2169 /*
2170 * It should never happen but changes to
2171 * locking could inadvertently allow a per-cpu
2172 * drain to add pages to MIGRATE_HIGHATOMIC
2173 * while unreserving so be safe and watch for
2174 * underflows.
2175 */
2176 zone->nr_reserved_highatomic -= min(
2177 pageblock_nr_pages,
2178 zone->nr_reserved_highatomic);
2179 }
0aaa29a5
MG
2180
2181 /*
2182 * Convert to ac->migratetype and avoid the normal
2183 * pageblock stealing heuristics. Minimally, the caller
2184 * is doing the work and needs the pages. More
2185 * importantly, if the block was always converted to
2186 * MIGRATE_UNMOVABLE or another type then the number
2187 * of pageblocks that cannot be completely freed
2188 * may increase.
2189 */
2190 set_pageblock_migratetype(page, ac->migratetype);
02aa0cdd
VB
2191 ret = move_freepages_block(zone, page, ac->migratetype,
2192 NULL);
29fac03b
MK
2193 if (ret) {
2194 spin_unlock_irqrestore(&zone->lock, flags);
2195 return ret;
2196 }
0aaa29a5
MG
2197 }
2198 spin_unlock_irqrestore(&zone->lock, flags);
2199 }
04c8716f
MK
2200
2201 return false;
0aaa29a5
MG
2202}
2203
3bc48f96
VB
2204/*
2205 * Try finding a free buddy page on the fallback list and put it on the free
2206 * list of requested migratetype, possibly along with other pages from the same
2207 * block, depending on fragmentation avoidance heuristics. Returns true if
2208 * fallback was found so that __rmqueue_smallest() can grab it.
2209 */
2210static inline bool
7aeb09f9 2211__rmqueue_fallback(struct zone *zone, unsigned int order, int start_migratetype)
b2a0ac88 2212{
b8af2941 2213 struct free_area *area;
7aeb09f9 2214 unsigned int current_order;
b2a0ac88 2215 struct page *page;
4eb7dce6
JK
2216 int fallback_mt;
2217 bool can_steal;
b2a0ac88
MG
2218
2219 /* Find the largest possible block of pages in the other list */
7aeb09f9
MG
2220 for (current_order = MAX_ORDER-1;
2221 current_order >= order && current_order <= MAX_ORDER-1;
2222 --current_order) {
4eb7dce6
JK
2223 area = &(zone->free_area[current_order]);
2224 fallback_mt = find_suitable_fallback(area, current_order,
2149cdae 2225 start_migratetype, false, &can_steal);
4eb7dce6
JK
2226 if (fallback_mt == -1)
2227 continue;
b2a0ac88 2228
a16601c5 2229 page = list_first_entry(&area->free_list[fallback_mt],
4eb7dce6 2230 struct page, lru);
b2a0ac88 2231
3bc48f96
VB
2232 steal_suitable_fallback(zone, page, start_migratetype,
2233 can_steal);
e0fff1bd 2234
4eb7dce6
JK
2235 trace_mm_page_alloc_extfrag(page, order, current_order,
2236 start_migratetype, fallback_mt);
e0fff1bd 2237
3bc48f96 2238 return true;
b2a0ac88
MG
2239 }
2240
3bc48f96 2241 return false;
b2a0ac88
MG
2242}
2243
56fd56b8 2244/*
1da177e4
LT
2245 * Do the hard work of removing an element from the buddy allocator.
2246 * Call me with the zone->lock already held.
2247 */
b2a0ac88 2248static struct page *__rmqueue(struct zone *zone, unsigned int order,
6ac0206b 2249 int migratetype)
1da177e4 2250{
1da177e4
LT
2251 struct page *page;
2252
3bc48f96 2253retry:
56fd56b8 2254 page = __rmqueue_smallest(zone, order, migratetype);
974a786e 2255 if (unlikely(!page)) {
dc67647b
JK
2256 if (migratetype == MIGRATE_MOVABLE)
2257 page = __rmqueue_cma_fallback(zone, order);
2258
3bc48f96
VB
2259 if (!page && __rmqueue_fallback(zone, order, migratetype))
2260 goto retry;
728ec980
MG
2261 }
2262
0d3d062a 2263 trace_mm_page_alloc_zone_locked(page, order, migratetype);
b2a0ac88 2264 return page;
1da177e4
LT
2265}
2266
5f63b720 2267/*
1da177e4
LT
2268 * Obtain a specified number of elements from the buddy allocator, all under
2269 * a single hold of the lock, for efficiency. Add them to the supplied list.
2270 * Returns the number of new pages which were placed at *list.
2271 */
5f63b720 2272static int rmqueue_bulk(struct zone *zone, unsigned int order,
b2a0ac88 2273 unsigned long count, struct list_head *list,
b745bc85 2274 int migratetype, bool cold)
1da177e4 2275{
a6de734b 2276 int i, alloced = 0;
5f63b720 2277
d34b0733 2278 spin_lock(&zone->lock);
1da177e4 2279 for (i = 0; i < count; ++i) {
6ac0206b 2280 struct page *page = __rmqueue(zone, order, migratetype);
085cc7d5 2281 if (unlikely(page == NULL))
1da177e4 2282 break;
81eabcbe 2283
479f854a
MG
2284 if (unlikely(check_pcp_refill(page)))
2285 continue;
2286
81eabcbe
MG
2287 /*
2288 * Split buddy pages returned by expand() are received here
2289 * in physical page order. The page is added to the callers and
2290 * list and the list head then moves forward. From the callers
2291 * perspective, the linked list is ordered by page number in
2292 * some conditions. This is useful for IO devices that can
2293 * merge IO requests if the physical pages are ordered
2294 * properly.
2295 */
b745bc85 2296 if (likely(!cold))
e084b2d9
MG
2297 list_add(&page->lru, list);
2298 else
2299 list_add_tail(&page->lru, list);
81eabcbe 2300 list = &page->lru;
a6de734b 2301 alloced++;
bb14c2c7 2302 if (is_migrate_cma(get_pcppage_migratetype(page)))
d1ce749a
BZ
2303 __mod_zone_page_state(zone, NR_FREE_CMA_PAGES,
2304 -(1 << order));
1da177e4 2305 }
a6de734b
MG
2306
2307 /*
2308 * i pages were removed from the buddy list even if some leak due
2309 * to check_pcp_refill failing so adjust NR_FREE_PAGES based
2310 * on i. Do not confuse with 'alloced' which is the number of
2311 * pages added to the pcp list.
2312 */
f2260e6b 2313 __mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order));
d34b0733 2314 spin_unlock(&zone->lock);
a6de734b 2315 return alloced;
1da177e4
LT
2316}
2317
4ae7c039 2318#ifdef CONFIG_NUMA
8fce4d8e 2319/*
4037d452
CL
2320 * Called from the vmstat counter updater to drain pagesets of this
2321 * currently executing processor on remote nodes after they have
2322 * expired.
2323 *
879336c3
CL
2324 * Note that this function must be called with the thread pinned to
2325 * a single processor.
8fce4d8e 2326 */
4037d452 2327void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
4ae7c039 2328{
4ae7c039 2329 unsigned long flags;
7be12fc9 2330 int to_drain, batch;
4ae7c039 2331
4037d452 2332 local_irq_save(flags);
4db0c3c2 2333 batch = READ_ONCE(pcp->batch);
7be12fc9 2334 to_drain = min(pcp->count, batch);
2a13515c
KM
2335 if (to_drain > 0) {
2336 free_pcppages_bulk(zone, to_drain, pcp);
2337 pcp->count -= to_drain;
2338 }
4037d452 2339 local_irq_restore(flags);
4ae7c039
CL
2340}
2341#endif
2342
9f8f2172 2343/*
93481ff0 2344 * Drain pcplists of the indicated processor and zone.
9f8f2172
CL
2345 *
2346 * The processor must either be the current processor and the
2347 * thread pinned to the current processor or a processor that
2348 * is not online.
2349 */
93481ff0 2350static void drain_pages_zone(unsigned int cpu, struct zone *zone)
1da177e4 2351{
c54ad30c 2352 unsigned long flags;
93481ff0
VB
2353 struct per_cpu_pageset *pset;
2354 struct per_cpu_pages *pcp;
1da177e4 2355
93481ff0
VB
2356 local_irq_save(flags);
2357 pset = per_cpu_ptr(zone->pageset, cpu);
1da177e4 2358
93481ff0
VB
2359 pcp = &pset->pcp;
2360 if (pcp->count) {
2361 free_pcppages_bulk(zone, pcp->count, pcp);
2362 pcp->count = 0;
2363 }
2364 local_irq_restore(flags);
2365}
3dfa5721 2366
93481ff0
VB
2367/*
2368 * Drain pcplists of all zones on the indicated processor.
2369 *
2370 * The processor must either be the current processor and the
2371 * thread pinned to the current processor or a processor that
2372 * is not online.
2373 */
2374static void drain_pages(unsigned int cpu)
2375{
2376 struct zone *zone;
2377
2378 for_each_populated_zone(zone) {
2379 drain_pages_zone(cpu, zone);
1da177e4
LT
2380 }
2381}
1da177e4 2382
9f8f2172
CL
2383/*
2384 * Spill all of this CPU's per-cpu pages back into the buddy allocator.
93481ff0
VB
2385 *
2386 * The CPU has to be pinned. When zone parameter is non-NULL, spill just
2387 * the single zone's pages.
9f8f2172 2388 */
93481ff0 2389void drain_local_pages(struct zone *zone)
9f8f2172 2390{
93481ff0
VB
2391 int cpu = smp_processor_id();
2392
2393 if (zone)
2394 drain_pages_zone(cpu, zone);
2395 else
2396 drain_pages(cpu);
9f8f2172
CL
2397}
2398
0ccce3b9
MG
2399static void drain_local_pages_wq(struct work_struct *work)
2400{
a459eeb7
MH
2401 /*
2402 * drain_all_pages doesn't use proper cpu hotplug protection so
2403 * we can race with cpu offline when the WQ can move this from
2404 * a cpu pinned worker to an unbound one. We can operate on a different
2405 * cpu which is allright but we also have to make sure to not move to
2406 * a different one.
2407 */
2408 preempt_disable();
0ccce3b9 2409 drain_local_pages(NULL);
a459eeb7 2410 preempt_enable();
0ccce3b9
MG
2411}
2412
9f8f2172 2413/*
74046494
GBY
2414 * Spill all the per-cpu pages from all CPUs back into the buddy allocator.
2415 *
93481ff0
VB
2416 * When zone parameter is non-NULL, spill just the single zone's pages.
2417 *
0ccce3b9 2418 * Note that this can be extremely slow as the draining happens in a workqueue.
9f8f2172 2419 */
93481ff0 2420void drain_all_pages(struct zone *zone)
9f8f2172 2421{
74046494 2422 int cpu;
74046494
GBY
2423
2424 /*
2425 * Allocate in the BSS so we wont require allocation in
2426 * direct reclaim path for CONFIG_CPUMASK_OFFSTACK=y
2427 */
2428 static cpumask_t cpus_with_pcps;
2429
ce612879
MH
2430 /*
2431 * Make sure nobody triggers this path before mm_percpu_wq is fully
2432 * initialized.
2433 */
2434 if (WARN_ON_ONCE(!mm_percpu_wq))
2435 return;
2436
0ccce3b9
MG
2437 /* Workqueues cannot recurse */
2438 if (current->flags & PF_WQ_WORKER)
2439 return;
2440
bd233f53
MG
2441 /*
2442 * Do not drain if one is already in progress unless it's specific to
2443 * a zone. Such callers are primarily CMA and memory hotplug and need
2444 * the drain to be complete when the call returns.
2445 */
2446 if (unlikely(!mutex_trylock(&pcpu_drain_mutex))) {
2447 if (!zone)
2448 return;
2449 mutex_lock(&pcpu_drain_mutex);
2450 }
0ccce3b9 2451
74046494
GBY
2452 /*
2453 * We don't care about racing with CPU hotplug event
2454 * as offline notification will cause the notified
2455 * cpu to drain that CPU pcps and on_each_cpu_mask
2456 * disables preemption as part of its processing
2457 */
2458 for_each_online_cpu(cpu) {
93481ff0
VB
2459 struct per_cpu_pageset *pcp;
2460 struct zone *z;
74046494 2461 bool has_pcps = false;
93481ff0
VB
2462
2463 if (zone) {
74046494 2464 pcp = per_cpu_ptr(zone->pageset, cpu);
93481ff0 2465 if (pcp->pcp.count)
74046494 2466 has_pcps = true;
93481ff0
VB
2467 } else {
2468 for_each_populated_zone(z) {
2469 pcp = per_cpu_ptr(z->pageset, cpu);
2470 if (pcp->pcp.count) {
2471 has_pcps = true;
2472 break;
2473 }
74046494
GBY
2474 }
2475 }
93481ff0 2476
74046494
GBY
2477 if (has_pcps)
2478 cpumask_set_cpu(cpu, &cpus_with_pcps);
2479 else
2480 cpumask_clear_cpu(cpu, &cpus_with_pcps);
2481 }
0ccce3b9 2482
bd233f53
MG
2483 for_each_cpu(cpu, &cpus_with_pcps) {
2484 struct work_struct *work = per_cpu_ptr(&pcpu_drain, cpu);
2485 INIT_WORK(work, drain_local_pages_wq);
ce612879 2486 queue_work_on(cpu, mm_percpu_wq, work);
0ccce3b9 2487 }
bd233f53
MG
2488 for_each_cpu(cpu, &cpus_with_pcps)
2489 flush_work(per_cpu_ptr(&pcpu_drain, cpu));
2490
2491 mutex_unlock(&pcpu_drain_mutex);
9f8f2172
CL
2492}
2493
296699de 2494#ifdef CONFIG_HIBERNATION
1da177e4
LT
2495
2496void mark_free_pages(struct zone *zone)
2497{
f623f0db
RW
2498 unsigned long pfn, max_zone_pfn;
2499 unsigned long flags;
7aeb09f9 2500 unsigned int order, t;
86760a2c 2501 struct page *page;
1da177e4 2502
8080fc03 2503 if (zone_is_empty(zone))
1da177e4
LT
2504 return;
2505
2506 spin_lock_irqsave(&zone->lock, flags);
f623f0db 2507
108bcc96 2508 max_zone_pfn = zone_end_pfn(zone);
f623f0db
RW
2509 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
2510 if (pfn_valid(pfn)) {
86760a2c 2511 page = pfn_to_page(pfn);
ba6b0979
JK
2512
2513 if (page_zone(page) != zone)
2514 continue;
2515
7be98234
RW
2516 if (!swsusp_page_is_forbidden(page))
2517 swsusp_unset_page_free(page);
f623f0db 2518 }
1da177e4 2519
b2a0ac88 2520 for_each_migratetype_order(order, t) {
86760a2c
GT
2521 list_for_each_entry(page,
2522 &zone->free_area[order].free_list[t], lru) {
f623f0db 2523 unsigned long i;
1da177e4 2524
86760a2c 2525 pfn = page_to_pfn(page);
f623f0db 2526 for (i = 0; i < (1UL << order); i++)
7be98234 2527 swsusp_set_page_free(pfn_to_page(pfn + i));
f623f0db 2528 }
b2a0ac88 2529 }
1da177e4
LT
2530 spin_unlock_irqrestore(&zone->lock, flags);
2531}
e2c55dc8 2532#endif /* CONFIG_PM */
1da177e4 2533
1da177e4
LT
2534/*
2535 * Free a 0-order page
b745bc85 2536 * cold == true ? free a cold page : free a hot page
1da177e4 2537 */
b745bc85 2538void free_hot_cold_page(struct page *page, bool cold)
1da177e4
LT
2539{
2540 struct zone *zone = page_zone(page);
2541 struct per_cpu_pages *pcp;
d34b0733 2542 unsigned long flags;
dc4b0caf 2543 unsigned long pfn = page_to_pfn(page);
5f8dcc21 2544 int migratetype;
1da177e4 2545
4db7548c 2546 if (!free_pcp_prepare(page))
689bcebf
HD
2547 return;
2548
dc4b0caf 2549 migratetype = get_pfnblock_migratetype(page, pfn);
bb14c2c7 2550 set_pcppage_migratetype(page, migratetype);
d34b0733
MG
2551 local_irq_save(flags);
2552 __count_vm_event(PGFREE);
da456f14 2553
5f8dcc21
MG
2554 /*
2555 * We only track unmovable, reclaimable and movable on pcp lists.
2556 * Free ISOLATE pages back to the allocator because they are being
a6ffdc07 2557 * offlined but treat HIGHATOMIC as movable pages so we can get those
5f8dcc21
MG
2558 * areas back if necessary. Otherwise, we may have to free
2559 * excessively into the page allocator
2560 */
2561 if (migratetype >= MIGRATE_PCPTYPES) {
194159fb 2562 if (unlikely(is_migrate_isolate(migratetype))) {
dc4b0caf 2563 free_one_page(zone, page, pfn, 0, migratetype);
5f8dcc21
MG
2564 goto out;
2565 }
2566 migratetype = MIGRATE_MOVABLE;
2567 }
2568
99dcc3e5 2569 pcp = &this_cpu_ptr(zone->pageset)->pcp;
b745bc85 2570 if (!cold)
5f8dcc21 2571 list_add(&page->lru, &pcp->lists[migratetype]);
b745bc85
MG
2572 else
2573 list_add_tail(&page->lru, &pcp->lists[migratetype]);
1da177e4 2574 pcp->count++;
48db57f8 2575 if (pcp->count >= pcp->high) {
4db0c3c2 2576 unsigned long batch = READ_ONCE(pcp->batch);
998d39cb
CS
2577 free_pcppages_bulk(zone, batch, pcp);
2578 pcp->count -= batch;
48db57f8 2579 }
5f8dcc21
MG
2580
2581out:
d34b0733 2582 local_irq_restore(flags);
1da177e4
LT
2583}
2584
cc59850e
KK
2585/*
2586 * Free a list of 0-order pages
2587 */
b745bc85 2588void free_hot_cold_page_list(struct list_head *list, bool cold)
cc59850e
KK
2589{
2590 struct page *page, *next;
2591
2592 list_for_each_entry_safe(page, next, list, lru) {
b413d48a 2593 trace_mm_page_free_batched(page, cold);
cc59850e
KK
2594 free_hot_cold_page(page, cold);
2595 }
2596}
2597
8dfcc9ba
NP
2598/*
2599 * split_page takes a non-compound higher-order page, and splits it into
2600 * n (1<<order) sub-pages: page[0..n]
2601 * Each sub-page must be freed individually.
2602 *
2603 * Note: this is probably too low level an operation for use in drivers.
2604 * Please consult with lkml before using this in your driver.
2605 */
2606void split_page(struct page *page, unsigned int order)
2607{
2608 int i;
2609
309381fe
SL
2610 VM_BUG_ON_PAGE(PageCompound(page), page);
2611 VM_BUG_ON_PAGE(!page_count(page), page);
b1eeab67
VN
2612
2613#ifdef CONFIG_KMEMCHECK
2614 /*
2615 * Split shadow pages too, because free(page[0]) would
2616 * otherwise free the whole shadow.
2617 */
2618 if (kmemcheck_page_is_tracked(page))
2619 split_page(virt_to_page(page[0].shadow), order);
2620#endif
2621
a9627bc5 2622 for (i = 1; i < (1 << order); i++)
7835e98b 2623 set_page_refcounted(page + i);
a9627bc5 2624 split_page_owner(page, order);
8dfcc9ba 2625}
5853ff23 2626EXPORT_SYMBOL_GPL(split_page);
8dfcc9ba 2627
3c605096 2628int __isolate_free_page(struct page *page, unsigned int order)
748446bb 2629{
748446bb
MG
2630 unsigned long watermark;
2631 struct zone *zone;
2139cbe6 2632 int mt;
748446bb
MG
2633
2634 BUG_ON(!PageBuddy(page));
2635
2636 zone = page_zone(page);
2e30abd1 2637 mt = get_pageblock_migratetype(page);
748446bb 2638
194159fb 2639 if (!is_migrate_isolate(mt)) {
8348faf9
VB
2640 /*
2641 * Obey watermarks as if the page was being allocated. We can
2642 * emulate a high-order watermark check with a raised order-0
2643 * watermark, because we already know our high-order page
2644 * exists.
2645 */
2646 watermark = min_wmark_pages(zone) + (1UL << order);
984fdba6 2647 if (!zone_watermark_ok(zone, 0, watermark, 0, ALLOC_CMA))
2e30abd1
MS
2648 return 0;
2649
8fb74b9f 2650 __mod_zone_freepage_state(zone, -(1UL << order), mt);
2e30abd1 2651 }
748446bb
MG
2652
2653 /* Remove page from free list */
2654 list_del(&page->lru);
2655 zone->free_area[order].nr_free--;
2656 rmv_page_order(page);
2139cbe6 2657
400bc7fd 2658 /*
2659 * Set the pageblock if the isolated page is at least half of a
2660 * pageblock
2661 */
748446bb
MG
2662 if (order >= pageblock_order - 1) {
2663 struct page *endpage = page + (1 << order) - 1;
47118af0
MN
2664 for (; page < endpage; page += pageblock_nr_pages) {
2665 int mt = get_pageblock_migratetype(page);
88ed365e 2666 if (!is_migrate_isolate(mt) && !is_migrate_cma(mt)
a6ffdc07 2667 && !is_migrate_highatomic(mt))
47118af0
MN
2668 set_pageblock_migratetype(page,
2669 MIGRATE_MOVABLE);
2670 }
748446bb
MG
2671 }
2672
f3a14ced 2673
8fb74b9f 2674 return 1UL << order;
1fb3f8ca
MG
2675}
2676
060e7417
MG
2677/*
2678 * Update NUMA hit/miss statistics
2679 *
2680 * Must be called with interrupts disabled.
060e7417 2681 */
41b6167e 2682static inline void zone_statistics(struct zone *preferred_zone, struct zone *z)
060e7417
MG
2683{
2684#ifdef CONFIG_NUMA
060e7417
MG
2685 enum zone_stat_item local_stat = NUMA_LOCAL;
2686
2df26639 2687 if (z->node != numa_node_id())
060e7417 2688 local_stat = NUMA_OTHER;
060e7417 2689
2df26639 2690 if (z->node == preferred_zone->node)
060e7417 2691 __inc_zone_state(z, NUMA_HIT);
2df26639 2692 else {
060e7417
MG
2693 __inc_zone_state(z, NUMA_MISS);
2694 __inc_zone_state(preferred_zone, NUMA_FOREIGN);
2695 }
2df26639 2696 __inc_zone_state(z, local_stat);
060e7417
MG
2697#endif
2698}
2699
066b2393
MG
2700/* Remove page from the per-cpu list, caller must protect the list */
2701static struct page *__rmqueue_pcplist(struct zone *zone, int migratetype,
2702 bool cold, struct per_cpu_pages *pcp,
2703 struct list_head *list)
2704{
2705 struct page *page;
2706
2707 do {
2708 if (list_empty(list)) {
2709 pcp->count += rmqueue_bulk(zone, 0,
2710 pcp->batch, list,
2711 migratetype, cold);
2712 if (unlikely(list_empty(list)))
2713 return NULL;
2714 }
2715
2716 if (cold)
2717 page = list_last_entry(list, struct page, lru);
2718 else
2719 page = list_first_entry(list, struct page, lru);
2720
2721 list_del(&page->lru);
2722 pcp->count--;
2723 } while (check_new_pcp(page));
2724
2725 return page;
2726}
2727
2728/* Lock and remove page from the per-cpu list */
2729static struct page *rmqueue_pcplist(struct zone *preferred_zone,
2730 struct zone *zone, unsigned int order,
2731 gfp_t gfp_flags, int migratetype)
2732{
2733 struct per_cpu_pages *pcp;
2734 struct list_head *list;
2735 bool cold = ((gfp_flags & __GFP_COLD) != 0);
2736 struct page *page;
d34b0733 2737 unsigned long flags;
066b2393 2738
d34b0733 2739 local_irq_save(flags);
066b2393
MG
2740 pcp = &this_cpu_ptr(zone->pageset)->pcp;
2741 list = &pcp->lists[migratetype];
2742 page = __rmqueue_pcplist(zone, migratetype, cold, pcp, list);
2743 if (page) {
2744 __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order);
2745 zone_statistics(preferred_zone, zone);
2746 }
d34b0733 2747 local_irq_restore(flags);
066b2393
MG
2748 return page;
2749}
2750
1da177e4 2751/*
75379191 2752 * Allocate a page from the given zone. Use pcplists for order-0 allocations.
1da177e4 2753 */
0a15c3e9 2754static inline
066b2393 2755struct page *rmqueue(struct zone *preferred_zone,
7aeb09f9 2756 struct zone *zone, unsigned int order,
c603844b
MG
2757 gfp_t gfp_flags, unsigned int alloc_flags,
2758 int migratetype)
1da177e4
LT
2759{
2760 unsigned long flags;
689bcebf 2761 struct page *page;
1da177e4 2762
d34b0733 2763 if (likely(order == 0)) {
066b2393
MG
2764 page = rmqueue_pcplist(preferred_zone, zone, order,
2765 gfp_flags, migratetype);
2766 goto out;
2767 }
83b9355b 2768
066b2393
MG
2769 /*
2770 * We most definitely don't want callers attempting to
2771 * allocate greater than order-1 page units with __GFP_NOFAIL.
2772 */
2773 WARN_ON_ONCE((gfp_flags & __GFP_NOFAIL) && (order > 1));
2774 spin_lock_irqsave(&zone->lock, flags);
0aaa29a5 2775
066b2393
MG
2776 do {
2777 page = NULL;
2778 if (alloc_flags & ALLOC_HARDER) {
2779 page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC);
2780 if (page)
2781 trace_mm_page_alloc_zone_locked(page, order, migratetype);
2782 }
a74609fa 2783 if (!page)
066b2393
MG
2784 page = __rmqueue(zone, order, migratetype);
2785 } while (page && check_new_pages(page, order));
2786 spin_unlock(&zone->lock);
2787 if (!page)
2788 goto failed;
2789 __mod_zone_freepage_state(zone, -(1 << order),
2790 get_pcppage_migratetype(page));
1da177e4 2791
16709d1d 2792 __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order);
41b6167e 2793 zone_statistics(preferred_zone, zone);
a74609fa 2794 local_irq_restore(flags);
1da177e4 2795
066b2393
MG
2796out:
2797 VM_BUG_ON_PAGE(page && bad_range(zone, page), page);
1da177e4 2798 return page;
a74609fa
NP
2799
2800failed:
2801 local_irq_restore(flags);
a74609fa 2802 return NULL;
1da177e4
LT
2803}
2804
933e312e
AM
2805#ifdef CONFIG_FAIL_PAGE_ALLOC
2806
b2588c4b 2807static struct {
933e312e
AM
2808 struct fault_attr attr;
2809
621a5f7a 2810 bool ignore_gfp_highmem;
71baba4b 2811 bool ignore_gfp_reclaim;
54114994 2812 u32 min_order;
933e312e
AM
2813} fail_page_alloc = {
2814 .attr = FAULT_ATTR_INITIALIZER,
71baba4b 2815 .ignore_gfp_reclaim = true,
621a5f7a 2816 .ignore_gfp_highmem = true,
54114994 2817 .min_order = 1,
933e312e
AM
2818};
2819
2820static int __init setup_fail_page_alloc(char *str)
2821{
2822 return setup_fault_attr(&fail_page_alloc.attr, str);
2823}
2824__setup("fail_page_alloc=", setup_fail_page_alloc);
2825
deaf386e 2826static bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
933e312e 2827{
54114994 2828 if (order < fail_page_alloc.min_order)
deaf386e 2829 return false;
933e312e 2830 if (gfp_mask & __GFP_NOFAIL)
deaf386e 2831 return false;
933e312e 2832 if (fail_page_alloc.ignore_gfp_highmem && (gfp_mask & __GFP_HIGHMEM))
deaf386e 2833 return false;
71baba4b
MG
2834 if (fail_page_alloc.ignore_gfp_reclaim &&
2835 (gfp_mask & __GFP_DIRECT_RECLAIM))
deaf386e 2836 return false;
933e312e
AM
2837
2838 return should_fail(&fail_page_alloc.attr, 1 << order);
2839}
2840
2841#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
2842
2843static int __init fail_page_alloc_debugfs(void)
2844{
f4ae40a6 2845 umode_t mode = S_IFREG | S_IRUSR | S_IWUSR;
933e312e 2846 struct dentry *dir;
933e312e 2847
dd48c085
AM
2848 dir = fault_create_debugfs_attr("fail_page_alloc", NULL,
2849 &fail_page_alloc.attr);
2850 if (IS_ERR(dir))
2851 return PTR_ERR(dir);
933e312e 2852
b2588c4b 2853 if (!debugfs_create_bool("ignore-gfp-wait", mode, dir,
71baba4b 2854 &fail_page_alloc.ignore_gfp_reclaim))
b2588c4b
AM
2855 goto fail;
2856 if (!debugfs_create_bool("ignore-gfp-highmem", mode, dir,
2857 &fail_page_alloc.ignore_gfp_highmem))
2858 goto fail;
2859 if (!debugfs_create_u32("min-order", mode, dir,
2860 &fail_page_alloc.min_order))
2861 goto fail;
2862
2863 return 0;
2864fail:
dd48c085 2865 debugfs_remove_recursive(dir);
933e312e 2866
b2588c4b 2867 return -ENOMEM;
933e312e
AM
2868}
2869
2870late_initcall(fail_page_alloc_debugfs);
2871
2872#endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
2873
2874#else /* CONFIG_FAIL_PAGE_ALLOC */
2875
deaf386e 2876static inline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
933e312e 2877{
deaf386e 2878 return false;
933e312e
AM
2879}
2880
2881#endif /* CONFIG_FAIL_PAGE_ALLOC */
2882
1da177e4 2883/*
97a16fc8
MG
2884 * Return true if free base pages are above 'mark'. For high-order checks it
2885 * will return true of the order-0 watermark is reached and there is at least
2886 * one free page of a suitable size. Checking now avoids taking the zone lock
2887 * to check in the allocation paths if no pages are free.
1da177e4 2888 */
86a294a8
MH
2889bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
2890 int classzone_idx, unsigned int alloc_flags,
2891 long free_pages)
1da177e4 2892{
d23ad423 2893 long min = mark;
1da177e4 2894 int o;
c603844b 2895 const bool alloc_harder = (alloc_flags & ALLOC_HARDER);
1da177e4 2896
0aaa29a5 2897 /* free_pages may go negative - that's OK */
df0a6daa 2898 free_pages -= (1 << order) - 1;
0aaa29a5 2899
7fb1d9fc 2900 if (alloc_flags & ALLOC_HIGH)
1da177e4 2901 min -= min / 2;
0aaa29a5
MG
2902
2903 /*
2904 * If the caller does not have rights to ALLOC_HARDER then subtract
2905 * the high-atomic reserves. This will over-estimate the size of the
2906 * atomic reserve but it avoids a search.
2907 */
97a16fc8 2908 if (likely(!alloc_harder))
0aaa29a5
MG
2909 free_pages -= z->nr_reserved_highatomic;
2910 else
1da177e4 2911 min -= min / 4;
e2b19197 2912
d95ea5d1
BZ
2913#ifdef CONFIG_CMA
2914 /* If allocation can't use CMA areas don't use free CMA pages */
2915 if (!(alloc_flags & ALLOC_CMA))
97a16fc8 2916 free_pages -= zone_page_state(z, NR_FREE_CMA_PAGES);
d95ea5d1 2917#endif
026b0814 2918
97a16fc8
MG
2919 /*
2920 * Check watermarks for an order-0 allocation request. If these
2921 * are not met, then a high-order request also cannot go ahead
2922 * even if a suitable page happened to be free.
2923 */
2924 if (free_pages <= min + z->lowmem_reserve[classzone_idx])
88f5acf8 2925 return false;
1da177e4 2926
97a16fc8
MG
2927 /* If this is an order-0 request then the watermark is fine */
2928 if (!order)
2929 return true;
2930
2931 /* For a high-order request, check at least one suitable page is free */
2932 for (o = order; o < MAX_ORDER; o++) {
2933 struct free_area *area = &z->free_area[o];
2934 int mt;
2935
2936 if (!area->nr_free)
2937 continue;
2938
2939 if (alloc_harder)
2940 return true;
1da177e4 2941
97a16fc8
MG
2942 for (mt = 0; mt < MIGRATE_PCPTYPES; mt++) {
2943 if (!list_empty(&area->free_list[mt]))
2944 return true;
2945 }
2946
2947#ifdef CONFIG_CMA
2948 if ((alloc_flags & ALLOC_CMA) &&
2949 !list_empty(&area->free_list[MIGRATE_CMA])) {
2950 return true;
2951 }
2952#endif
1da177e4 2953 }
97a16fc8 2954 return false;
88f5acf8
MG
2955}
2956
7aeb09f9 2957bool zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
c603844b 2958 int classzone_idx, unsigned int alloc_flags)
88f5acf8
MG
2959{
2960 return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags,
2961 zone_page_state(z, NR_FREE_PAGES));
2962}
2963
48ee5f36
MG
2964static inline bool zone_watermark_fast(struct zone *z, unsigned int order,
2965 unsigned long mark, int classzone_idx, unsigned int alloc_flags)
2966{
2967 long free_pages = zone_page_state(z, NR_FREE_PAGES);
2968 long cma_pages = 0;
2969
2970#ifdef CONFIG_CMA
2971 /* If allocation can't use CMA areas don't use free CMA pages */
2972 if (!(alloc_flags & ALLOC_CMA))
2973 cma_pages = zone_page_state(z, NR_FREE_CMA_PAGES);
2974#endif
2975
2976 /*
2977 * Fast check for order-0 only. If this fails then the reserves
2978 * need to be calculated. There is a corner case where the check
2979 * passes but only the high-order atomic reserve are free. If
2980 * the caller is !atomic then it'll uselessly search the free
2981 * list. That corner case is then slower but it is harmless.
2982 */
2983 if (!order && (free_pages - cma_pages) > mark + z->lowmem_reserve[classzone_idx])
2984 return true;
2985
2986 return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags,
2987 free_pages);
2988}
2989
7aeb09f9 2990bool zone_watermark_ok_safe(struct zone *z, unsigned int order,
e2b19197 2991 unsigned long mark, int classzone_idx)
88f5acf8
MG
2992{
2993 long free_pages = zone_page_state(z, NR_FREE_PAGES);
2994
2995 if (z->percpu_drift_mark && free_pages < z->percpu_drift_mark)
2996 free_pages = zone_page_state_snapshot(z, NR_FREE_PAGES);
2997
e2b19197 2998 return __zone_watermark_ok(z, order, mark, classzone_idx, 0,
88f5acf8 2999 free_pages);
1da177e4
LT
3000}
3001
9276b1bc 3002#ifdef CONFIG_NUMA
957f822a
DR
3003static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
3004{
e02dc017 3005 return node_distance(zone_to_nid(local_zone), zone_to_nid(zone)) <=
5f7a75ac 3006 RECLAIM_DISTANCE;
957f822a 3007}
9276b1bc 3008#else /* CONFIG_NUMA */
957f822a
DR
3009static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
3010{
3011 return true;
3012}
9276b1bc
PJ
3013#endif /* CONFIG_NUMA */
3014
7fb1d9fc 3015/*
0798e519 3016 * get_page_from_freelist goes through the zonelist trying to allocate
7fb1d9fc
RS
3017 * a page.
3018 */
3019static struct page *
a9263751
VB
3020get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags,
3021 const struct alloc_context *ac)
753ee728 3022{
c33d6c06 3023 struct zoneref *z = ac->preferred_zoneref;
5117f45d 3024 struct zone *zone;
3b8c0be4
MG
3025 struct pglist_data *last_pgdat_dirty_limit = NULL;
3026
7fb1d9fc 3027 /*
9276b1bc 3028 * Scan zonelist, looking for a zone with enough free.
344736f2 3029 * See also __cpuset_node_allowed() comment in kernel/cpuset.c.
7fb1d9fc 3030 */
c33d6c06 3031 for_next_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->high_zoneidx,
a9263751 3032 ac->nodemask) {
be06af00 3033 struct page *page;
e085dbc5
JW
3034 unsigned long mark;
3035
664eedde
MG
3036 if (cpusets_enabled() &&
3037 (alloc_flags & ALLOC_CPUSET) &&
002f2906 3038 !__cpuset_zone_allowed(zone, gfp_mask))
cd38b115 3039 continue;
a756cf59
JW
3040 /*
3041 * When allocating a page cache page for writing, we
281e3726
MG
3042 * want to get it from a node that is within its dirty
3043 * limit, such that no single node holds more than its
a756cf59 3044 * proportional share of globally allowed dirty pages.
281e3726 3045 * The dirty limits take into account the node's
a756cf59
JW
3046 * lowmem reserves and high watermark so that kswapd
3047 * should be able to balance it without having to
3048 * write pages from its LRU list.
3049 *
a756cf59 3050 * XXX: For now, allow allocations to potentially
281e3726 3051 * exceed the per-node dirty limit in the slowpath
c9ab0c4f 3052 * (spread_dirty_pages unset) before going into reclaim,
a756cf59 3053 * which is important when on a NUMA setup the allowed
281e3726 3054 * nodes are together not big enough to reach the
a756cf59 3055 * global limit. The proper fix for these situations
281e3726 3056 * will require awareness of nodes in the
a756cf59
JW
3057 * dirty-throttling and the flusher threads.
3058 */
3b8c0be4
MG
3059 if (ac->spread_dirty_pages) {
3060 if (last_pgdat_dirty_limit == zone->zone_pgdat)
3061 continue;
3062
3063 if (!node_dirty_ok(zone->zone_pgdat)) {
3064 last_pgdat_dirty_limit = zone->zone_pgdat;
3065 continue;
3066 }
3067 }
7fb1d9fc 3068
e085dbc5 3069 mark = zone->watermark[alloc_flags & ALLOC_WMARK_MASK];
48ee5f36 3070 if (!zone_watermark_fast(zone, order, mark,
93ea9964 3071 ac_classzone_idx(ac), alloc_flags)) {
fa5e084e
MG
3072 int ret;
3073
5dab2911
MG
3074 /* Checked here to keep the fast path fast */
3075 BUILD_BUG_ON(ALLOC_NO_WATERMARKS < NR_WMARK);
3076 if (alloc_flags & ALLOC_NO_WATERMARKS)
3077 goto try_this_zone;
3078
a5f5f91d 3079 if (node_reclaim_mode == 0 ||
c33d6c06 3080 !zone_allows_reclaim(ac->preferred_zoneref->zone, zone))
cd38b115
MG
3081 continue;
3082
a5f5f91d 3083 ret = node_reclaim(zone->zone_pgdat, gfp_mask, order);
fa5e084e 3084 switch (ret) {
a5f5f91d 3085 case NODE_RECLAIM_NOSCAN:
fa5e084e 3086 /* did not scan */
cd38b115 3087 continue;
a5f5f91d 3088 case NODE_RECLAIM_FULL:
fa5e084e 3089 /* scanned but unreclaimable */
cd38b115 3090 continue;
fa5e084e
MG
3091 default:
3092 /* did we reclaim enough */
fed2719e 3093 if (zone_watermark_ok(zone, order, mark,
93ea9964 3094 ac_classzone_idx(ac), alloc_flags))
fed2719e
MG
3095 goto try_this_zone;
3096
fed2719e 3097 continue;
0798e519 3098 }
7fb1d9fc
RS
3099 }
3100
fa5e084e 3101try_this_zone:
066b2393 3102 page = rmqueue(ac->preferred_zoneref->zone, zone, order,
0aaa29a5 3103 gfp_mask, alloc_flags, ac->migratetype);
75379191 3104 if (page) {
479f854a 3105 prep_new_page(page, order, gfp_mask, alloc_flags);
0aaa29a5
MG
3106
3107 /*
3108 * If this is a high-order atomic allocation then check
3109 * if the pageblock should be reserved for the future
3110 */
3111 if (unlikely(order && (alloc_flags & ALLOC_HARDER)))
3112 reserve_highatomic_pageblock(page, zone, order);
3113
75379191
VB
3114 return page;
3115 }
54a6eb5c 3116 }
9276b1bc 3117
4ffeaf35 3118 return NULL;
753ee728
MH
3119}
3120
29423e77
DR
3121/*
3122 * Large machines with many possible nodes should not always dump per-node
3123 * meminfo in irq context.
3124 */
3125static inline bool should_suppress_show_mem(void)
3126{
3127 bool ret = false;
3128
3129#if NODES_SHIFT > 8
3130 ret = in_interrupt();
3131#endif
3132 return ret;
3133}
3134
9af744d7 3135static void warn_alloc_show_mem(gfp_t gfp_mask, nodemask_t *nodemask)
a238ab5b 3136{
a238ab5b 3137 unsigned int filter = SHOW_MEM_FILTER_NODES;
aa187507 3138 static DEFINE_RATELIMIT_STATE(show_mem_rs, HZ, 1);
a238ab5b 3139
aa187507 3140 if (should_suppress_show_mem() || !__ratelimit(&show_mem_rs))
a238ab5b
DH
3141 return;
3142
3143 /*
3144 * This documents exceptions given to allocations in certain
3145 * contexts that are allowed to allocate outside current's set
3146 * of allowed nodes.
3147 */
3148 if (!(gfp_mask & __GFP_NOMEMALLOC))
3149 if (test_thread_flag(TIF_MEMDIE) ||
3150 (current->flags & (PF_MEMALLOC | PF_EXITING)))
3151 filter &= ~SHOW_MEM_FILTER_NODES;
d0164adc 3152 if (in_interrupt() || !(gfp_mask & __GFP_DIRECT_RECLAIM))
a238ab5b
DH
3153 filter &= ~SHOW_MEM_FILTER_NODES;
3154
9af744d7 3155 show_mem(filter, nodemask);
aa187507
MH
3156}
3157
a8e99259 3158void warn_alloc(gfp_t gfp_mask, nodemask_t *nodemask, const char *fmt, ...)
aa187507
MH
3159{
3160 struct va_format vaf;
3161 va_list args;
3162 static DEFINE_RATELIMIT_STATE(nopage_rs, DEFAULT_RATELIMIT_INTERVAL,
3163 DEFAULT_RATELIMIT_BURST);
3164
0f7896f1 3165 if ((gfp_mask & __GFP_NOWARN) || !__ratelimit(&nopage_rs))
aa187507
MH
3166 return;
3167
7877cdcc 3168 pr_warn("%s: ", current->comm);
3ee9a4f0 3169
7877cdcc
MH
3170 va_start(args, fmt);
3171 vaf.fmt = fmt;
3172 vaf.va = &args;
3173 pr_cont("%pV", &vaf);
3174 va_end(args);
3ee9a4f0 3175
685dbf6f
DR
3176 pr_cont(", mode:%#x(%pGg), nodemask=", gfp_mask, &gfp_mask);
3177 if (nodemask)
3178 pr_cont("%*pbl\n", nodemask_pr_args(nodemask));
3179 else
3180 pr_cont("(null)\n");
3181
a8e99259 3182 cpuset_print_current_mems_allowed();
3ee9a4f0 3183
a238ab5b 3184 dump_stack();
685dbf6f 3185 warn_alloc_show_mem(gfp_mask, nodemask);
a238ab5b
DH
3186}
3187
6c18ba7a
MH
3188static inline struct page *
3189__alloc_pages_cpuset_fallback(gfp_t gfp_mask, unsigned int order,
3190 unsigned int alloc_flags,
3191 const struct alloc_context *ac)
3192{
3193 struct page *page;
3194
3195 page = get_page_from_freelist(gfp_mask, order,
3196 alloc_flags|ALLOC_CPUSET, ac);
3197 /*
3198 * fallback to ignore cpuset restriction if our nodes
3199 * are depleted
3200 */
3201 if (!page)
3202 page = get_page_from_freelist(gfp_mask, order,
3203 alloc_flags, ac);
3204
3205 return page;
3206}
3207
11e33f6a
MG
3208static inline struct page *
3209__alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
a9263751 3210 const struct alloc_context *ac, unsigned long *did_some_progress)
11e33f6a 3211{
6e0fc46d
DR
3212 struct oom_control oc = {
3213 .zonelist = ac->zonelist,
3214 .nodemask = ac->nodemask,
2a966b77 3215 .memcg = NULL,
6e0fc46d
DR
3216 .gfp_mask = gfp_mask,
3217 .order = order,
6e0fc46d 3218 };
11e33f6a
MG
3219 struct page *page;
3220
9879de73
JW
3221 *did_some_progress = 0;
3222
9879de73 3223 /*
dc56401f
JW
3224 * Acquire the oom lock. If that fails, somebody else is
3225 * making progress for us.
9879de73 3226 */
dc56401f 3227 if (!mutex_trylock(&oom_lock)) {
9879de73 3228 *did_some_progress = 1;
11e33f6a 3229 schedule_timeout_uninterruptible(1);
1da177e4
LT
3230 return NULL;
3231 }
6b1de916 3232
11e33f6a
MG
3233 /*
3234 * Go through the zonelist yet one more time, keep very high watermark
3235 * here, this is only to catch a parallel oom killing, we must fail if
3236 * we're still under heavy pressure.
3237 */
a9263751
VB
3238 page = get_page_from_freelist(gfp_mask | __GFP_HARDWALL, order,
3239 ALLOC_WMARK_HIGH|ALLOC_CPUSET, ac);
7fb1d9fc 3240 if (page)
11e33f6a
MG
3241 goto out;
3242
06ad276a
MH
3243 /* Coredumps can quickly deplete all memory reserves */
3244 if (current->flags & PF_DUMPCORE)
3245 goto out;
3246 /* The OOM killer will not help higher order allocs */
3247 if (order > PAGE_ALLOC_COSTLY_ORDER)
3248 goto out;
3249 /* The OOM killer does not needlessly kill tasks for lowmem */
3250 if (ac->high_zoneidx < ZONE_NORMAL)
3251 goto out;
3252 if (pm_suspended_storage())
3253 goto out;
3254 /*
3255 * XXX: GFP_NOFS allocations should rather fail than rely on
3256 * other request to make a forward progress.
3257 * We are in an unfortunate situation where out_of_memory cannot
3258 * do much for this context but let's try it to at least get
3259 * access to memory reserved if the current task is killed (see
3260 * out_of_memory). Once filesystems are ready to handle allocation
3261 * failures more gracefully we should just bail out here.
3262 */
3263
3264 /* The OOM killer may not free memory on a specific node */
3265 if (gfp_mask & __GFP_THISNODE)
3266 goto out;
3da88fb3 3267
11e33f6a 3268 /* Exhausted what can be done so it's blamo time */
5020e285 3269 if (out_of_memory(&oc) || WARN_ON_ONCE(gfp_mask & __GFP_NOFAIL)) {
c32b3cbe 3270 *did_some_progress = 1;
5020e285 3271
6c18ba7a
MH
3272 /*
3273 * Help non-failing allocations by giving them access to memory
3274 * reserves
3275 */
3276 if (gfp_mask & __GFP_NOFAIL)
3277 page = __alloc_pages_cpuset_fallback(gfp_mask, order,
5020e285 3278 ALLOC_NO_WATERMARKS, ac);
5020e285 3279 }
11e33f6a 3280out:
dc56401f 3281 mutex_unlock(&oom_lock);
11e33f6a
MG
3282 return page;
3283}
3284
33c2d214
MH
3285/*
3286 * Maximum number of compaction retries wit a progress before OOM
3287 * killer is consider as the only way to move forward.
3288 */
3289#define MAX_COMPACT_RETRIES 16
3290
56de7263
MG
3291#ifdef CONFIG_COMPACTION
3292/* Try memory compaction for high-order allocations before reclaim */
3293static struct page *
3294__alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
c603844b 3295 unsigned int alloc_flags, const struct alloc_context *ac,
a5508cd8 3296 enum compact_priority prio, enum compact_result *compact_result)
56de7263 3297{
98dd3b48 3298 struct page *page;
499118e9 3299 unsigned int noreclaim_flag;
53853e2d
VB
3300
3301 if (!order)
66199712 3302 return NULL;
66199712 3303
499118e9 3304 noreclaim_flag = memalloc_noreclaim_save();
c5d01d0d 3305 *compact_result = try_to_compact_pages(gfp_mask, order, alloc_flags, ac,
c3486f53 3306 prio);
499118e9 3307 memalloc_noreclaim_restore(noreclaim_flag);
56de7263 3308
c5d01d0d 3309 if (*compact_result <= COMPACT_INACTIVE)
98dd3b48 3310 return NULL;
53853e2d 3311
98dd3b48
VB
3312 /*
3313 * At least in one zone compaction wasn't deferred or skipped, so let's
3314 * count a compaction stall
3315 */
3316 count_vm_event(COMPACTSTALL);
8fb74b9f 3317
31a6c190 3318 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
53853e2d 3319
98dd3b48
VB
3320 if (page) {
3321 struct zone *zone = page_zone(page);
53853e2d 3322
98dd3b48
VB
3323 zone->compact_blockskip_flush = false;
3324 compaction_defer_reset(zone, order, true);
3325 count_vm_event(COMPACTSUCCESS);
3326 return page;
3327 }
56de7263 3328
98dd3b48
VB
3329 /*
3330 * It's bad if compaction run occurs and fails. The most likely reason
3331 * is that pages exist, but not enough to satisfy watermarks.
3332 */
3333 count_vm_event(COMPACTFAIL);
66199712 3334
98dd3b48 3335 cond_resched();
56de7263
MG
3336
3337 return NULL;
3338}
33c2d214 3339
3250845d
VB
3340static inline bool
3341should_compact_retry(struct alloc_context *ac, int order, int alloc_flags,
3342 enum compact_result compact_result,
3343 enum compact_priority *compact_priority,
d9436498 3344 int *compaction_retries)
3250845d
VB
3345{
3346 int max_retries = MAX_COMPACT_RETRIES;
c2033b00 3347 int min_priority;
65190cff
MH
3348 bool ret = false;
3349 int retries = *compaction_retries;
3350 enum compact_priority priority = *compact_priority;
3250845d
VB
3351
3352 if (!order)
3353 return false;
3354
d9436498
VB
3355 if (compaction_made_progress(compact_result))
3356 (*compaction_retries)++;
3357
3250845d
VB
3358 /*
3359 * compaction considers all the zone as desperately out of memory
3360 * so it doesn't really make much sense to retry except when the
3361 * failure could be caused by insufficient priority
3362 */
d9436498
VB
3363 if (compaction_failed(compact_result))
3364 goto check_priority;
3250845d
VB
3365
3366 /*
3367 * make sure the compaction wasn't deferred or didn't bail out early
3368 * due to locks contention before we declare that we should give up.
3369 * But do not retry if the given zonelist is not suitable for
3370 * compaction.
3371 */
65190cff
MH
3372 if (compaction_withdrawn(compact_result)) {
3373 ret = compaction_zonelist_suitable(ac, order, alloc_flags);
3374 goto out;
3375 }
3250845d
VB
3376
3377 /*
3378 * !costly requests are much more important than __GFP_REPEAT
3379 * costly ones because they are de facto nofail and invoke OOM
3380 * killer to move on while costly can fail and users are ready
3381 * to cope with that. 1/4 retries is rather arbitrary but we
3382 * would need much more detailed feedback from compaction to
3383 * make a better decision.
3384 */
3385 if (order > PAGE_ALLOC_COSTLY_ORDER)
3386 max_retries /= 4;
65190cff
MH
3387 if (*compaction_retries <= max_retries) {
3388 ret = true;
3389 goto out;
3390 }
3250845d 3391
d9436498
VB
3392 /*
3393 * Make sure there are attempts at the highest priority if we exhausted
3394 * all retries or failed at the lower priorities.
3395 */
3396check_priority:
c2033b00
VB
3397 min_priority = (order > PAGE_ALLOC_COSTLY_ORDER) ?
3398 MIN_COMPACT_COSTLY_PRIORITY : MIN_COMPACT_PRIORITY;
65190cff 3399
c2033b00 3400 if (*compact_priority > min_priority) {
d9436498
VB
3401 (*compact_priority)--;
3402 *compaction_retries = 0;
65190cff 3403 ret = true;
d9436498 3404 }
65190cff
MH
3405out:
3406 trace_compact_retry(order, priority, compact_result, retries, max_retries, ret);
3407 return ret;
3250845d 3408}
56de7263
MG
3409#else
3410static inline struct page *
3411__alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
c603844b 3412 unsigned int alloc_flags, const struct alloc_context *ac,
a5508cd8 3413 enum compact_priority prio, enum compact_result *compact_result)
56de7263 3414{
33c2d214 3415 *compact_result = COMPACT_SKIPPED;
56de7263
MG
3416 return NULL;
3417}
33c2d214
MH
3418
3419static inline bool
86a294a8
MH
3420should_compact_retry(struct alloc_context *ac, unsigned int order, int alloc_flags,
3421 enum compact_result compact_result,
a5508cd8 3422 enum compact_priority *compact_priority,
d9436498 3423 int *compaction_retries)
33c2d214 3424{
31e49bfd
MH
3425 struct zone *zone;
3426 struct zoneref *z;
3427
3428 if (!order || order > PAGE_ALLOC_COSTLY_ORDER)
3429 return false;
3430
3431 /*
3432 * There are setups with compaction disabled which would prefer to loop
3433 * inside the allocator rather than hit the oom killer prematurely.
3434 * Let's give them a good hope and keep retrying while the order-0
3435 * watermarks are OK.
3436 */
3437 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->high_zoneidx,
3438 ac->nodemask) {
3439 if (zone_watermark_ok(zone, 0, min_wmark_pages(zone),
3440 ac_classzone_idx(ac), alloc_flags))
3441 return true;
3442 }
33c2d214
MH
3443 return false;
3444}
3250845d 3445#endif /* CONFIG_COMPACTION */
56de7263 3446
bba90710
MS
3447/* Perform direct synchronous page reclaim */
3448static int
a9263751
VB
3449__perform_reclaim(gfp_t gfp_mask, unsigned int order,
3450 const struct alloc_context *ac)
11e33f6a 3451{
11e33f6a 3452 struct reclaim_state reclaim_state;
bba90710 3453 int progress;
499118e9 3454 unsigned int noreclaim_flag;
11e33f6a
MG
3455
3456 cond_resched();
3457
3458 /* We now go into synchronous reclaim */
3459 cpuset_memory_pressure_bump();
499118e9 3460 noreclaim_flag = memalloc_noreclaim_save();
11e33f6a
MG
3461 lockdep_set_current_reclaim_state(gfp_mask);
3462 reclaim_state.reclaimed_slab = 0;
c06b1fca 3463 current->reclaim_state = &reclaim_state;
11e33f6a 3464
a9263751
VB
3465 progress = try_to_free_pages(ac->zonelist, order, gfp_mask,
3466 ac->nodemask);
11e33f6a 3467
c06b1fca 3468 current->reclaim_state = NULL;
11e33f6a 3469 lockdep_clear_current_reclaim_state();
499118e9 3470 memalloc_noreclaim_restore(noreclaim_flag);
11e33f6a
MG
3471
3472 cond_resched();
3473
bba90710
MS
3474 return progress;
3475}
3476
3477/* The really slow allocator path where we enter direct reclaim */
3478static inline struct page *
3479__alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
c603844b 3480 unsigned int alloc_flags, const struct alloc_context *ac,
a9263751 3481 unsigned long *did_some_progress)
bba90710
MS
3482{
3483 struct page *page = NULL;
3484 bool drained = false;
3485
a9263751 3486 *did_some_progress = __perform_reclaim(gfp_mask, order, ac);
9ee493ce
MG
3487 if (unlikely(!(*did_some_progress)))
3488 return NULL;
11e33f6a 3489
9ee493ce 3490retry:
31a6c190 3491 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
9ee493ce
MG
3492
3493 /*
3494 * If an allocation failed after direct reclaim, it could be because
0aaa29a5
MG
3495 * pages are pinned on the per-cpu lists or in high alloc reserves.
3496 * Shrink them them and try again
9ee493ce
MG
3497 */
3498 if (!page && !drained) {
29fac03b 3499 unreserve_highatomic_pageblock(ac, false);
93481ff0 3500 drain_all_pages(NULL);
9ee493ce
MG
3501 drained = true;
3502 goto retry;
3503 }
3504
11e33f6a
MG
3505 return page;
3506}
3507
a9263751 3508static void wake_all_kswapds(unsigned int order, const struct alloc_context *ac)
3a025760
JW
3509{
3510 struct zoneref *z;
3511 struct zone *zone;
e1a55637 3512 pg_data_t *last_pgdat = NULL;
3a025760 3513
a9263751 3514 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist,
e1a55637
MG
3515 ac->high_zoneidx, ac->nodemask) {
3516 if (last_pgdat != zone->zone_pgdat)
52e9f87a 3517 wakeup_kswapd(zone, order, ac->high_zoneidx);
e1a55637
MG
3518 last_pgdat = zone->zone_pgdat;
3519 }
3a025760
JW
3520}
3521
c603844b 3522static inline unsigned int
341ce06f
PZ
3523gfp_to_alloc_flags(gfp_t gfp_mask)
3524{
c603844b 3525 unsigned int alloc_flags = ALLOC_WMARK_MIN | ALLOC_CPUSET;
1da177e4 3526
a56f57ff 3527 /* __GFP_HIGH is assumed to be the same as ALLOC_HIGH to save a branch. */
e6223a3b 3528 BUILD_BUG_ON(__GFP_HIGH != (__force gfp_t) ALLOC_HIGH);
933e312e 3529
341ce06f
PZ
3530 /*
3531 * The caller may dip into page reserves a bit more if the caller
3532 * cannot run direct reclaim, or if the caller has realtime scheduling
3533 * policy or is asking for __GFP_HIGH memory. GFP_ATOMIC requests will
d0164adc 3534 * set both ALLOC_HARDER (__GFP_ATOMIC) and ALLOC_HIGH (__GFP_HIGH).
341ce06f 3535 */
e6223a3b 3536 alloc_flags |= (__force int) (gfp_mask & __GFP_HIGH);
1da177e4 3537
d0164adc 3538 if (gfp_mask & __GFP_ATOMIC) {
5c3240d9 3539 /*
b104a35d
DR
3540 * Not worth trying to allocate harder for __GFP_NOMEMALLOC even
3541 * if it can't schedule.
5c3240d9 3542 */
b104a35d 3543 if (!(gfp_mask & __GFP_NOMEMALLOC))
5c3240d9 3544 alloc_flags |= ALLOC_HARDER;
523b9458 3545 /*
b104a35d 3546 * Ignore cpuset mems for GFP_ATOMIC rather than fail, see the
344736f2 3547 * comment for __cpuset_node_allowed().
523b9458 3548 */
341ce06f 3549 alloc_flags &= ~ALLOC_CPUSET;
c06b1fca 3550 } else if (unlikely(rt_task(current)) && !in_interrupt())
341ce06f
PZ
3551 alloc_flags |= ALLOC_HARDER;
3552
d95ea5d1 3553#ifdef CONFIG_CMA
43e7a34d 3554 if (gfpflags_to_migratetype(gfp_mask) == MIGRATE_MOVABLE)
d95ea5d1
BZ
3555 alloc_flags |= ALLOC_CMA;
3556#endif
341ce06f
PZ
3557 return alloc_flags;
3558}
3559
072bb0aa
MG
3560bool gfp_pfmemalloc_allowed(gfp_t gfp_mask)
3561{
31a6c190
VB
3562 if (unlikely(gfp_mask & __GFP_NOMEMALLOC))
3563 return false;
3564
3565 if (gfp_mask & __GFP_MEMALLOC)
3566 return true;
3567 if (in_serving_softirq() && (current->flags & PF_MEMALLOC))
3568 return true;
3569 if (!in_interrupt() &&
3570 ((current->flags & PF_MEMALLOC) ||
3571 unlikely(test_thread_flag(TIF_MEMDIE))))
3572 return true;
3573
3574 return false;
072bb0aa
MG
3575}
3576
0a0337e0
MH
3577/*
3578 * Checks whether it makes sense to retry the reclaim to make a forward progress
3579 * for the given allocation request.
491d79ae
JW
3580 *
3581 * We give up when we either have tried MAX_RECLAIM_RETRIES in a row
3582 * without success, or when we couldn't even meet the watermark if we
3583 * reclaimed all remaining pages on the LRU lists.
0a0337e0
MH
3584 *
3585 * Returns true if a retry is viable or false to enter the oom path.
3586 */
3587static inline bool
3588should_reclaim_retry(gfp_t gfp_mask, unsigned order,
3589 struct alloc_context *ac, int alloc_flags,
423b452e 3590 bool did_some_progress, int *no_progress_loops)
0a0337e0
MH
3591{
3592 struct zone *zone;
3593 struct zoneref *z;
3594
423b452e
VB
3595 /*
3596 * Costly allocations might have made a progress but this doesn't mean
3597 * their order will become available due to high fragmentation so
3598 * always increment the no progress counter for them
3599 */
3600 if (did_some_progress && order <= PAGE_ALLOC_COSTLY_ORDER)
3601 *no_progress_loops = 0;
3602 else
3603 (*no_progress_loops)++;
3604
0a0337e0
MH
3605 /*
3606 * Make sure we converge to OOM if we cannot make any progress
3607 * several times in the row.
3608 */
04c8716f
MK
3609 if (*no_progress_loops > MAX_RECLAIM_RETRIES) {
3610 /* Before OOM, exhaust highatomic_reserve */
29fac03b 3611 return unreserve_highatomic_pageblock(ac, true);
04c8716f 3612 }
0a0337e0 3613
bca67592
MG
3614 /*
3615 * Keep reclaiming pages while there is a chance this will lead
3616 * somewhere. If none of the target zones can satisfy our allocation
3617 * request even if all reclaimable pages are considered then we are
3618 * screwed and have to go OOM.
0a0337e0
MH
3619 */
3620 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->high_zoneidx,
3621 ac->nodemask) {
3622 unsigned long available;
ede37713 3623 unsigned long reclaimable;
d379f01d
MH
3624 unsigned long min_wmark = min_wmark_pages(zone);
3625 bool wmark;
0a0337e0 3626
5a1c84b4 3627 available = reclaimable = zone_reclaimable_pages(zone);
5a1c84b4 3628 available += zone_page_state_snapshot(zone, NR_FREE_PAGES);
0a0337e0
MH
3629
3630 /*
491d79ae
JW
3631 * Would the allocation succeed if we reclaimed all
3632 * reclaimable pages?
0a0337e0 3633 */
d379f01d
MH
3634 wmark = __zone_watermark_ok(zone, order, min_wmark,
3635 ac_classzone_idx(ac), alloc_flags, available);
3636 trace_reclaim_retry_zone(z, order, reclaimable,
3637 available, min_wmark, *no_progress_loops, wmark);
3638 if (wmark) {
ede37713
MH
3639 /*
3640 * If we didn't make any progress and have a lot of
3641 * dirty + writeback pages then we should wait for
3642 * an IO to complete to slow down the reclaim and
3643 * prevent from pre mature OOM
3644 */
3645 if (!did_some_progress) {
11fb9989 3646 unsigned long write_pending;
ede37713 3647
5a1c84b4
MG
3648 write_pending = zone_page_state_snapshot(zone,
3649 NR_ZONE_WRITE_PENDING);
ede37713 3650
11fb9989 3651 if (2 * write_pending > reclaimable) {
ede37713
MH
3652 congestion_wait(BLK_RW_ASYNC, HZ/10);
3653 return true;
3654 }
3655 }
5a1c84b4 3656
ede37713
MH
3657 /*
3658 * Memory allocation/reclaim might be called from a WQ
3659 * context and the current implementation of the WQ
3660 * concurrency control doesn't recognize that
3661 * a particular WQ is congested if the worker thread is
3662 * looping without ever sleeping. Therefore we have to
3663 * do a short sleep here rather than calling
3664 * cond_resched().
3665 */
3666 if (current->flags & PF_WQ_WORKER)
3667 schedule_timeout_uninterruptible(1);
3668 else
3669 cond_resched();
3670
0a0337e0
MH
3671 return true;
3672 }
3673 }
3674
3675 return false;
3676}
3677
902b6281
VB
3678static inline bool
3679check_retry_cpuset(int cpuset_mems_cookie, struct alloc_context *ac)
3680{
3681 /*
3682 * It's possible that cpuset's mems_allowed and the nodemask from
3683 * mempolicy don't intersect. This should be normally dealt with by
3684 * policy_nodemask(), but it's possible to race with cpuset update in
3685 * such a way the check therein was true, and then it became false
3686 * before we got our cpuset_mems_cookie here.
3687 * This assumes that for all allocations, ac->nodemask can come only
3688 * from MPOL_BIND mempolicy (whose documented semantics is to be ignored
3689 * when it does not intersect with the cpuset restrictions) or the
3690 * caller can deal with a violated nodemask.
3691 */
3692 if (cpusets_enabled() && ac->nodemask &&
3693 !cpuset_nodemask_valid_mems_allowed(ac->nodemask)) {
3694 ac->nodemask = NULL;
3695 return true;
3696 }
3697
3698 /*
3699 * When updating a task's mems_allowed or mempolicy nodemask, it is
3700 * possible to race with parallel threads in such a way that our
3701 * allocation can fail while the mask is being updated. If we are about
3702 * to fail, check if the cpuset changed during allocation and if so,
3703 * retry.
3704 */
3705 if (read_mems_allowed_retry(cpuset_mems_cookie))
3706 return true;
3707
3708 return false;
3709}
3710
11e33f6a
MG
3711static inline struct page *
3712__alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
a9263751 3713 struct alloc_context *ac)
11e33f6a 3714{
d0164adc 3715 bool can_direct_reclaim = gfp_mask & __GFP_DIRECT_RECLAIM;
282722b0 3716 const bool costly_order = order > PAGE_ALLOC_COSTLY_ORDER;
11e33f6a 3717 struct page *page = NULL;
c603844b 3718 unsigned int alloc_flags;
11e33f6a 3719 unsigned long did_some_progress;
5ce9bfef 3720 enum compact_priority compact_priority;
c5d01d0d 3721 enum compact_result compact_result;
5ce9bfef
VB
3722 int compaction_retries;
3723 int no_progress_loops;
63f53dea
MH
3724 unsigned long alloc_start = jiffies;
3725 unsigned int stall_timeout = 10 * HZ;
5ce9bfef 3726 unsigned int cpuset_mems_cookie;
1da177e4 3727
72807a74
MG
3728 /*
3729 * In the slowpath, we sanity check order to avoid ever trying to
3730 * reclaim >= MAX_ORDER areas which will never succeed. Callers may
3731 * be using allocators in order of preference for an area that is
3732 * too large.
3733 */
1fc28b70
MG
3734 if (order >= MAX_ORDER) {
3735 WARN_ON_ONCE(!(gfp_mask & __GFP_NOWARN));
72807a74 3736 return NULL;
1fc28b70 3737 }
1da177e4 3738
d0164adc
MG
3739 /*
3740 * We also sanity check to catch abuse of atomic reserves being used by
3741 * callers that are not in atomic context.
3742 */
3743 if (WARN_ON_ONCE((gfp_mask & (__GFP_ATOMIC|__GFP_DIRECT_RECLAIM)) ==
3744 (__GFP_ATOMIC|__GFP_DIRECT_RECLAIM)))
3745 gfp_mask &= ~__GFP_ATOMIC;
3746
5ce9bfef
VB
3747retry_cpuset:
3748 compaction_retries = 0;
3749 no_progress_loops = 0;
3750 compact_priority = DEF_COMPACT_PRIORITY;
3751 cpuset_mems_cookie = read_mems_allowed_begin();
9a67f648
MH
3752
3753 /*
3754 * The fast path uses conservative alloc_flags to succeed only until
3755 * kswapd needs to be woken up, and to avoid the cost of setting up
3756 * alloc_flags precisely. So we do that now.
3757 */
3758 alloc_flags = gfp_to_alloc_flags(gfp_mask);
3759
e47483bc
VB
3760 /*
3761 * We need to recalculate the starting point for the zonelist iterator
3762 * because we might have used different nodemask in the fast path, or
3763 * there was a cpuset modification and we are retrying - otherwise we
3764 * could end up iterating over non-eligible zones endlessly.
3765 */
3766 ac->preferred_zoneref = first_zones_zonelist(ac->zonelist,
3767 ac->high_zoneidx, ac->nodemask);
3768 if (!ac->preferred_zoneref->zone)
3769 goto nopage;
3770
23771235
VB
3771 if (gfp_mask & __GFP_KSWAPD_RECLAIM)
3772 wake_all_kswapds(order, ac);
3773
3774 /*
3775 * The adjusted alloc_flags might result in immediate success, so try
3776 * that first
3777 */
3778 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
3779 if (page)
3780 goto got_pg;
3781
a8161d1e
VB
3782 /*
3783 * For costly allocations, try direct compaction first, as it's likely
282722b0
VB
3784 * that we have enough base pages and don't need to reclaim. For non-
3785 * movable high-order allocations, do that as well, as compaction will
3786 * try prevent permanent fragmentation by migrating from blocks of the
3787 * same migratetype.
3788 * Don't try this for allocations that are allowed to ignore
3789 * watermarks, as the ALLOC_NO_WATERMARKS attempt didn't yet happen.
a8161d1e 3790 */
282722b0
VB
3791 if (can_direct_reclaim &&
3792 (costly_order ||
3793 (order > 0 && ac->migratetype != MIGRATE_MOVABLE))
3794 && !gfp_pfmemalloc_allowed(gfp_mask)) {
a8161d1e
VB
3795 page = __alloc_pages_direct_compact(gfp_mask, order,
3796 alloc_flags, ac,
a5508cd8 3797 INIT_COMPACT_PRIORITY,
a8161d1e
VB
3798 &compact_result);
3799 if (page)
3800 goto got_pg;
3801
3eb2771b
VB
3802 /*
3803 * Checks for costly allocations with __GFP_NORETRY, which
3804 * includes THP page fault allocations
3805 */
282722b0 3806 if (costly_order && (gfp_mask & __GFP_NORETRY)) {
a8161d1e
VB
3807 /*
3808 * If compaction is deferred for high-order allocations,
3809 * it is because sync compaction recently failed. If
3810 * this is the case and the caller requested a THP
3811 * allocation, we do not want to heavily disrupt the
3812 * system, so we fail the allocation instead of entering
3813 * direct reclaim.
3814 */
3815 if (compact_result == COMPACT_DEFERRED)
3816 goto nopage;
3817
a8161d1e 3818 /*
3eb2771b
VB
3819 * Looks like reclaim/compaction is worth trying, but
3820 * sync compaction could be very expensive, so keep
25160354 3821 * using async compaction.
a8161d1e 3822 */
a5508cd8 3823 compact_priority = INIT_COMPACT_PRIORITY;
a8161d1e
VB
3824 }
3825 }
23771235 3826
31a6c190 3827retry:
23771235 3828 /* Ensure kswapd doesn't accidentally go to sleep as long as we loop */
31a6c190
VB
3829 if (gfp_mask & __GFP_KSWAPD_RECLAIM)
3830 wake_all_kswapds(order, ac);
3831
23771235
VB
3832 if (gfp_pfmemalloc_allowed(gfp_mask))
3833 alloc_flags = ALLOC_NO_WATERMARKS;
3834
e46e7b77
MG
3835 /*
3836 * Reset the zonelist iterators if memory policies can be ignored.
3837 * These allocations are high priority and system rather than user
3838 * orientated.
3839 */
23771235 3840 if (!(alloc_flags & ALLOC_CPUSET) || (alloc_flags & ALLOC_NO_WATERMARKS)) {
e46e7b77
MG
3841 ac->zonelist = node_zonelist(numa_node_id(), gfp_mask);
3842 ac->preferred_zoneref = first_zones_zonelist(ac->zonelist,
3843 ac->high_zoneidx, ac->nodemask);
3844 }
3845
23771235 3846 /* Attempt with potentially adjusted zonelist and alloc_flags */
31a6c190 3847 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
7fb1d9fc
RS
3848 if (page)
3849 goto got_pg;
1da177e4 3850
d0164adc 3851 /* Caller is not willing to reclaim, we can't balance anything */
9a67f648 3852 if (!can_direct_reclaim)
1da177e4
LT
3853 goto nopage;
3854
9a67f648
MH
3855 /* Make sure we know about allocations which stall for too long */
3856 if (time_after(jiffies, alloc_start + stall_timeout)) {
82251963 3857 warn_alloc(gfp_mask & ~__GFP_NOWARN, ac->nodemask,
9a67f648
MH
3858 "page allocation stalls for %ums, order:%u",
3859 jiffies_to_msecs(jiffies-alloc_start), order);
3860 stall_timeout += 10 * HZ;
33d53103 3861 }
341ce06f 3862
9a67f648
MH
3863 /* Avoid recursion of direct reclaim */
3864 if (current->flags & PF_MEMALLOC)
6583bb64
DR
3865 goto nopage;
3866
a8161d1e
VB
3867 /* Try direct reclaim and then allocating */
3868 page = __alloc_pages_direct_reclaim(gfp_mask, order, alloc_flags, ac,
3869 &did_some_progress);
3870 if (page)
3871 goto got_pg;
3872
3873 /* Try direct compaction and then allocating */
a9263751 3874 page = __alloc_pages_direct_compact(gfp_mask, order, alloc_flags, ac,
a5508cd8 3875 compact_priority, &compact_result);
56de7263
MG
3876 if (page)
3877 goto got_pg;
75f30861 3878
9083905a
JW
3879 /* Do not loop if specifically requested */
3880 if (gfp_mask & __GFP_NORETRY)
a8161d1e 3881 goto nopage;
9083905a 3882
0a0337e0
MH
3883 /*
3884 * Do not retry costly high order allocations unless they are
3885 * __GFP_REPEAT
3886 */
282722b0 3887 if (costly_order && !(gfp_mask & __GFP_REPEAT))
a8161d1e 3888 goto nopage;
0a0337e0 3889
0a0337e0 3890 if (should_reclaim_retry(gfp_mask, order, ac, alloc_flags,
423b452e 3891 did_some_progress > 0, &no_progress_loops))
0a0337e0
MH
3892 goto retry;
3893
33c2d214
MH
3894 /*
3895 * It doesn't make any sense to retry for the compaction if the order-0
3896 * reclaim is not able to make any progress because the current
3897 * implementation of the compaction depends on the sufficient amount
3898 * of free memory (see __compaction_suitable)
3899 */
3900 if (did_some_progress > 0 &&
86a294a8 3901 should_compact_retry(ac, order, alloc_flags,
a5508cd8 3902 compact_result, &compact_priority,
d9436498 3903 &compaction_retries))
33c2d214
MH
3904 goto retry;
3905
902b6281
VB
3906
3907 /* Deal with possible cpuset update races before we start OOM killing */
3908 if (check_retry_cpuset(cpuset_mems_cookie, ac))
e47483bc
VB
3909 goto retry_cpuset;
3910
9083905a
JW
3911 /* Reclaim has failed us, start killing things */
3912 page = __alloc_pages_may_oom(gfp_mask, order, ac, &did_some_progress);
3913 if (page)
3914 goto got_pg;
3915
9a67f648 3916 /* Avoid allocations with no watermarks from looping endlessly */
c288983d
TH
3917 if (test_thread_flag(TIF_MEMDIE) &&
3918 (alloc_flags == ALLOC_NO_WATERMARKS ||
3919 (gfp_mask & __GFP_NOMEMALLOC)))
9a67f648
MH
3920 goto nopage;
3921
9083905a 3922 /* Retry as long as the OOM killer is making progress */
0a0337e0
MH
3923 if (did_some_progress) {
3924 no_progress_loops = 0;
9083905a 3925 goto retry;
0a0337e0 3926 }
9083905a 3927
1da177e4 3928nopage:
902b6281
VB
3929 /* Deal with possible cpuset update races before we fail */
3930 if (check_retry_cpuset(cpuset_mems_cookie, ac))
5ce9bfef
VB
3931 goto retry_cpuset;
3932
9a67f648
MH
3933 /*
3934 * Make sure that __GFP_NOFAIL request doesn't leak out and make sure
3935 * we always retry
3936 */
3937 if (gfp_mask & __GFP_NOFAIL) {
3938 /*
3939 * All existing users of the __GFP_NOFAIL are blockable, so warn
3940 * of any new users that actually require GFP_NOWAIT
3941 */
3942 if (WARN_ON_ONCE(!can_direct_reclaim))
3943 goto fail;
3944
3945 /*
3946 * PF_MEMALLOC request from this context is rather bizarre
3947 * because we cannot reclaim anything and only can loop waiting
3948 * for somebody to do a work for us
3949 */
3950 WARN_ON_ONCE(current->flags & PF_MEMALLOC);
3951
3952 /*
3953 * non failing costly orders are a hard requirement which we
3954 * are not prepared for much so let's warn about these users
3955 * so that we can identify them and convert them to something
3956 * else.
3957 */
3958 WARN_ON_ONCE(order > PAGE_ALLOC_COSTLY_ORDER);
3959
6c18ba7a
MH
3960 /*
3961 * Help non-failing allocations by giving them access to memory
3962 * reserves but do not use ALLOC_NO_WATERMARKS because this
3963 * could deplete whole memory reserves which would just make
3964 * the situation worse
3965 */
3966 page = __alloc_pages_cpuset_fallback(gfp_mask, order, ALLOC_HARDER, ac);
3967 if (page)
3968 goto got_pg;
3969
9a67f648
MH
3970 cond_resched();
3971 goto retry;
3972 }
3973fail:
a8e99259 3974 warn_alloc(gfp_mask, ac->nodemask,
7877cdcc 3975 "page allocation failure: order:%u", order);
1da177e4 3976got_pg:
072bb0aa 3977 return page;
1da177e4 3978}
11e33f6a 3979
9cd75558 3980static inline bool prepare_alloc_pages(gfp_t gfp_mask, unsigned int order,
04ec6264 3981 int preferred_nid, nodemask_t *nodemask,
9cd75558
MG
3982 struct alloc_context *ac, gfp_t *alloc_mask,
3983 unsigned int *alloc_flags)
11e33f6a 3984{
9cd75558 3985 ac->high_zoneidx = gfp_zone(gfp_mask);
04ec6264 3986 ac->zonelist = node_zonelist(preferred_nid, gfp_mask);
9cd75558
MG
3987 ac->nodemask = nodemask;
3988 ac->migratetype = gfpflags_to_migratetype(gfp_mask);
11e33f6a 3989
682a3385 3990 if (cpusets_enabled()) {
9cd75558 3991 *alloc_mask |= __GFP_HARDWALL;
9cd75558
MG
3992 if (!ac->nodemask)
3993 ac->nodemask = &cpuset_current_mems_allowed;
51047820
VB
3994 else
3995 *alloc_flags |= ALLOC_CPUSET;
682a3385
MG
3996 }
3997
11e33f6a
MG
3998 lockdep_trace_alloc(gfp_mask);
3999
d0164adc 4000 might_sleep_if(gfp_mask & __GFP_DIRECT_RECLAIM);
11e33f6a
MG
4001
4002 if (should_fail_alloc_page(gfp_mask, order))
9cd75558 4003 return false;
11e33f6a 4004
9cd75558
MG
4005 if (IS_ENABLED(CONFIG_CMA) && ac->migratetype == MIGRATE_MOVABLE)
4006 *alloc_flags |= ALLOC_CMA;
4007
4008 return true;
4009}
21bb9bd1 4010
9cd75558
MG
4011/* Determine whether to spread dirty pages and what the first usable zone */
4012static inline void finalise_ac(gfp_t gfp_mask,
4013 unsigned int order, struct alloc_context *ac)
4014{
c9ab0c4f 4015 /* Dirty zone balancing only done in the fast path */
9cd75558 4016 ac->spread_dirty_pages = (gfp_mask & __GFP_WRITE);
c9ab0c4f 4017
e46e7b77
MG
4018 /*
4019 * The preferred zone is used for statistics but crucially it is
4020 * also used as the starting point for the zonelist iterator. It
4021 * may get reset for allocations that ignore memory policies.
4022 */
9cd75558
MG
4023 ac->preferred_zoneref = first_zones_zonelist(ac->zonelist,
4024 ac->high_zoneidx, ac->nodemask);
4025}
4026
4027/*
4028 * This is the 'heart' of the zoned buddy allocator.
4029 */
4030struct page *
04ec6264
VB
4031__alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, int preferred_nid,
4032 nodemask_t *nodemask)
9cd75558
MG
4033{
4034 struct page *page;
4035 unsigned int alloc_flags = ALLOC_WMARK_LOW;
4036 gfp_t alloc_mask = gfp_mask; /* The gfp_t that was actually used for allocation */
4037 struct alloc_context ac = { };
4038
4039 gfp_mask &= gfp_allowed_mask;
04ec6264 4040 if (!prepare_alloc_pages(gfp_mask, order, preferred_nid, nodemask, &ac, &alloc_mask, &alloc_flags))
9cd75558
MG
4041 return NULL;
4042
4043 finalise_ac(gfp_mask, order, &ac);
5bb1b169 4044
5117f45d 4045 /* First allocation attempt */
a9263751 4046 page = get_page_from_freelist(alloc_mask, order, alloc_flags, &ac);
4fcb0971
MG
4047 if (likely(page))
4048 goto out;
11e33f6a 4049
4fcb0971 4050 /*
7dea19f9
MH
4051 * Apply scoped allocation constraints. This is mainly about GFP_NOFS
4052 * resp. GFP_NOIO which has to be inherited for all allocation requests
4053 * from a particular context which has been marked by
4054 * memalloc_no{fs,io}_{save,restore}.
4fcb0971 4055 */
7dea19f9 4056 alloc_mask = current_gfp_context(gfp_mask);
4fcb0971 4057 ac.spread_dirty_pages = false;
23f086f9 4058
4741526b
MG
4059 /*
4060 * Restore the original nodemask if it was potentially replaced with
4061 * &cpuset_current_mems_allowed to optimize the fast-path attempt.
4062 */
e47483bc 4063 if (unlikely(ac.nodemask != nodemask))
4741526b 4064 ac.nodemask = nodemask;
16096c25 4065
4fcb0971 4066 page = __alloc_pages_slowpath(alloc_mask, order, &ac);
cc9a6c87 4067
4fcb0971 4068out:
c4159a75
VD
4069 if (memcg_kmem_enabled() && (gfp_mask & __GFP_ACCOUNT) && page &&
4070 unlikely(memcg_kmem_charge(page, gfp_mask, order) != 0)) {
4071 __free_pages(page, order);
4072 page = NULL;
4949148a
VD
4073 }
4074
4fcb0971
MG
4075 if (kmemcheck_enabled && page)
4076 kmemcheck_pagealloc_alloc(page, order, gfp_mask);
4077
4078 trace_mm_page_alloc(page, order, alloc_mask, ac.migratetype);
4079
11e33f6a 4080 return page;
1da177e4 4081}
d239171e 4082EXPORT_SYMBOL(__alloc_pages_nodemask);
1da177e4
LT
4083
4084/*
4085 * Common helper functions.
4086 */
920c7a5d 4087unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order)
1da177e4 4088{
945a1113
AM
4089 struct page *page;
4090
4091 /*
4092 * __get_free_pages() returns a 32-bit address, which cannot represent
4093 * a highmem page
4094 */
4095 VM_BUG_ON((gfp_mask & __GFP_HIGHMEM) != 0);
4096
1da177e4
LT
4097 page = alloc_pages(gfp_mask, order);
4098 if (!page)
4099 return 0;
4100 return (unsigned long) page_address(page);
4101}
1da177e4
LT
4102EXPORT_SYMBOL(__get_free_pages);
4103
920c7a5d 4104unsigned long get_zeroed_page(gfp_t gfp_mask)
1da177e4 4105{
945a1113 4106 return __get_free_pages(gfp_mask | __GFP_ZERO, 0);
1da177e4 4107}
1da177e4
LT
4108EXPORT_SYMBOL(get_zeroed_page);
4109
920c7a5d 4110void __free_pages(struct page *page, unsigned int order)
1da177e4 4111{
b5810039 4112 if (put_page_testzero(page)) {
1da177e4 4113 if (order == 0)
b745bc85 4114 free_hot_cold_page(page, false);
1da177e4
LT
4115 else
4116 __free_pages_ok(page, order);
4117 }
4118}
4119
4120EXPORT_SYMBOL(__free_pages);
4121
920c7a5d 4122void free_pages(unsigned long addr, unsigned int order)
1da177e4
LT
4123{
4124 if (addr != 0) {
725d704e 4125 VM_BUG_ON(!virt_addr_valid((void *)addr));
1da177e4
LT
4126 __free_pages(virt_to_page((void *)addr), order);
4127 }
4128}
4129
4130EXPORT_SYMBOL(free_pages);
4131
b63ae8ca
AD
4132/*
4133 * Page Fragment:
4134 * An arbitrary-length arbitrary-offset area of memory which resides
4135 * within a 0 or higher order page. Multiple fragments within that page
4136 * are individually refcounted, in the page's reference counter.
4137 *
4138 * The page_frag functions below provide a simple allocation framework for
4139 * page fragments. This is used by the network stack and network device
4140 * drivers to provide a backing region of memory for use as either an
4141 * sk_buff->head, or to be used in the "frags" portion of skb_shared_info.
4142 */
2976db80
AD
4143static struct page *__page_frag_cache_refill(struct page_frag_cache *nc,
4144 gfp_t gfp_mask)
b63ae8ca
AD
4145{
4146 struct page *page = NULL;
4147 gfp_t gfp = gfp_mask;
4148
4149#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
4150 gfp_mask |= __GFP_COMP | __GFP_NOWARN | __GFP_NORETRY |
4151 __GFP_NOMEMALLOC;
4152 page = alloc_pages_node(NUMA_NO_NODE, gfp_mask,
4153 PAGE_FRAG_CACHE_MAX_ORDER);
4154 nc->size = page ? PAGE_FRAG_CACHE_MAX_SIZE : PAGE_SIZE;
4155#endif
4156 if (unlikely(!page))
4157 page = alloc_pages_node(NUMA_NO_NODE, gfp, 0);
4158
4159 nc->va = page ? page_address(page) : NULL;
4160
4161 return page;
4162}
4163
2976db80 4164void __page_frag_cache_drain(struct page *page, unsigned int count)
44fdffd7
AD
4165{
4166 VM_BUG_ON_PAGE(page_ref_count(page) == 0, page);
4167
4168 if (page_ref_sub_and_test(page, count)) {
2976db80
AD
4169 unsigned int order = compound_order(page);
4170
44fdffd7
AD
4171 if (order == 0)
4172 free_hot_cold_page(page, false);
4173 else
4174 __free_pages_ok(page, order);
4175 }
4176}
2976db80 4177EXPORT_SYMBOL(__page_frag_cache_drain);
44fdffd7 4178
8c2dd3e4
AD
4179void *page_frag_alloc(struct page_frag_cache *nc,
4180 unsigned int fragsz, gfp_t gfp_mask)
b63ae8ca
AD
4181{
4182 unsigned int size = PAGE_SIZE;
4183 struct page *page;
4184 int offset;
4185
4186 if (unlikely(!nc->va)) {
4187refill:
2976db80 4188 page = __page_frag_cache_refill(nc, gfp_mask);
b63ae8ca
AD
4189 if (!page)
4190 return NULL;
4191
4192#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
4193 /* if size can vary use size else just use PAGE_SIZE */
4194 size = nc->size;
4195#endif
4196 /* Even if we own the page, we do not use atomic_set().
4197 * This would break get_page_unless_zero() users.
4198 */
fe896d18 4199 page_ref_add(page, size - 1);
b63ae8ca
AD
4200
4201 /* reset page count bias and offset to start of new frag */
2f064f34 4202 nc->pfmemalloc = page_is_pfmemalloc(page);
b63ae8ca
AD
4203 nc->pagecnt_bias = size;
4204 nc->offset = size;
4205 }
4206
4207 offset = nc->offset - fragsz;
4208 if (unlikely(offset < 0)) {
4209 page = virt_to_page(nc->va);
4210
fe896d18 4211 if (!page_ref_sub_and_test(page, nc->pagecnt_bias))
b63ae8ca
AD
4212 goto refill;
4213
4214#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
4215 /* if size can vary use size else just use PAGE_SIZE */
4216 size = nc->size;
4217#endif
4218 /* OK, page count is 0, we can safely set it */
fe896d18 4219 set_page_count(page, size);
b63ae8ca
AD
4220
4221 /* reset page count bias and offset to start of new frag */
4222 nc->pagecnt_bias = size;
4223 offset = size - fragsz;
4224 }
4225
4226 nc->pagecnt_bias--;
4227 nc->offset = offset;
4228
4229 return nc->va + offset;
4230}
8c2dd3e4 4231EXPORT_SYMBOL(page_frag_alloc);
b63ae8ca
AD
4232
4233/*
4234 * Frees a page fragment allocated out of either a compound or order 0 page.
4235 */
8c2dd3e4 4236void page_frag_free(void *addr)
b63ae8ca
AD
4237{
4238 struct page *page = virt_to_head_page(addr);
4239
4240 if (unlikely(put_page_testzero(page)))
4241 __free_pages_ok(page, compound_order(page));
4242}
8c2dd3e4 4243EXPORT_SYMBOL(page_frag_free);
b63ae8ca 4244
d00181b9
KS
4245static void *make_alloc_exact(unsigned long addr, unsigned int order,
4246 size_t size)
ee85c2e1
AK
4247{
4248 if (addr) {
4249 unsigned long alloc_end = addr + (PAGE_SIZE << order);
4250 unsigned long used = addr + PAGE_ALIGN(size);
4251
4252 split_page(virt_to_page((void *)addr), order);
4253 while (used < alloc_end) {
4254 free_page(used);
4255 used += PAGE_SIZE;
4256 }
4257 }
4258 return (void *)addr;
4259}
4260
2be0ffe2
TT
4261/**
4262 * alloc_pages_exact - allocate an exact number physically-contiguous pages.
4263 * @size: the number of bytes to allocate
4264 * @gfp_mask: GFP flags for the allocation
4265 *
4266 * This function is similar to alloc_pages(), except that it allocates the
4267 * minimum number of pages to satisfy the request. alloc_pages() can only
4268 * allocate memory in power-of-two pages.
4269 *
4270 * This function is also limited by MAX_ORDER.
4271 *
4272 * Memory allocated by this function must be released by free_pages_exact().
4273 */
4274void *alloc_pages_exact(size_t size, gfp_t gfp_mask)
4275{
4276 unsigned int order = get_order(size);
4277 unsigned long addr;
4278
4279 addr = __get_free_pages(gfp_mask, order);
ee85c2e1 4280 return make_alloc_exact(addr, order, size);
2be0ffe2
TT
4281}
4282EXPORT_SYMBOL(alloc_pages_exact);
4283
ee85c2e1
AK
4284/**
4285 * alloc_pages_exact_nid - allocate an exact number of physically-contiguous
4286 * pages on a node.
b5e6ab58 4287 * @nid: the preferred node ID where memory should be allocated
ee85c2e1
AK
4288 * @size: the number of bytes to allocate
4289 * @gfp_mask: GFP flags for the allocation
4290 *
4291 * Like alloc_pages_exact(), but try to allocate on node nid first before falling
4292 * back.
ee85c2e1 4293 */
e1931811 4294void * __meminit alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask)
ee85c2e1 4295{
d00181b9 4296 unsigned int order = get_order(size);
ee85c2e1
AK
4297 struct page *p = alloc_pages_node(nid, gfp_mask, order);
4298 if (!p)
4299 return NULL;
4300 return make_alloc_exact((unsigned long)page_address(p), order, size);
4301}
ee85c2e1 4302
2be0ffe2
TT
4303/**
4304 * free_pages_exact - release memory allocated via alloc_pages_exact()
4305 * @virt: the value returned by alloc_pages_exact.
4306 * @size: size of allocation, same value as passed to alloc_pages_exact().
4307 *
4308 * Release the memory allocated by a previous call to alloc_pages_exact.
4309 */
4310void free_pages_exact(void *virt, size_t size)
4311{
4312 unsigned long addr = (unsigned long)virt;
4313 unsigned long end = addr + PAGE_ALIGN(size);
4314
4315 while (addr < end) {
4316 free_page(addr);
4317 addr += PAGE_SIZE;
4318 }
4319}
4320EXPORT_SYMBOL(free_pages_exact);
4321
e0fb5815
ZY
4322/**
4323 * nr_free_zone_pages - count number of pages beyond high watermark
4324 * @offset: The zone index of the highest zone
4325 *
4326 * nr_free_zone_pages() counts the number of counts pages which are beyond the
4327 * high watermark within all zones at or below a given zone index. For each
4328 * zone, the number of pages is calculated as:
0e056eb5
MCC
4329 *
4330 * nr_free_zone_pages = managed_pages - high_pages
e0fb5815 4331 */
ebec3862 4332static unsigned long nr_free_zone_pages(int offset)
1da177e4 4333{
dd1a239f 4334 struct zoneref *z;
54a6eb5c
MG
4335 struct zone *zone;
4336
e310fd43 4337 /* Just pick one node, since fallback list is circular */
ebec3862 4338 unsigned long sum = 0;
1da177e4 4339
0e88460d 4340 struct zonelist *zonelist = node_zonelist(numa_node_id(), GFP_KERNEL);
1da177e4 4341
54a6eb5c 4342 for_each_zone_zonelist(zone, z, zonelist, offset) {
b40da049 4343 unsigned long size = zone->managed_pages;
41858966 4344 unsigned long high = high_wmark_pages(zone);
e310fd43
MB
4345 if (size > high)
4346 sum += size - high;
1da177e4
LT
4347 }
4348
4349 return sum;
4350}
4351
e0fb5815
ZY
4352/**
4353 * nr_free_buffer_pages - count number of pages beyond high watermark
4354 *
4355 * nr_free_buffer_pages() counts the number of pages which are beyond the high
4356 * watermark within ZONE_DMA and ZONE_NORMAL.
1da177e4 4357 */
ebec3862 4358unsigned long nr_free_buffer_pages(void)
1da177e4 4359{
af4ca457 4360 return nr_free_zone_pages(gfp_zone(GFP_USER));
1da177e4 4361}
c2f1a551 4362EXPORT_SYMBOL_GPL(nr_free_buffer_pages);
1da177e4 4363
e0fb5815
ZY
4364/**
4365 * nr_free_pagecache_pages - count number of pages beyond high watermark
4366 *
4367 * nr_free_pagecache_pages() counts the number of pages which are beyond the
4368 * high watermark within all zones.
1da177e4 4369 */
ebec3862 4370unsigned long nr_free_pagecache_pages(void)
1da177e4 4371{
2a1e274a 4372 return nr_free_zone_pages(gfp_zone(GFP_HIGHUSER_MOVABLE));
1da177e4 4373}
08e0f6a9
CL
4374
4375static inline void show_node(struct zone *zone)
1da177e4 4376{
e5adfffc 4377 if (IS_ENABLED(CONFIG_NUMA))
25ba77c1 4378 printk("Node %d ", zone_to_nid(zone));
1da177e4 4379}
1da177e4 4380
d02bd27b
IR
4381long si_mem_available(void)
4382{
4383 long available;
4384 unsigned long pagecache;
4385 unsigned long wmark_low = 0;
4386 unsigned long pages[NR_LRU_LISTS];
4387 struct zone *zone;
4388 int lru;
4389
4390 for (lru = LRU_BASE; lru < NR_LRU_LISTS; lru++)
2f95ff90 4391 pages[lru] = global_node_page_state(NR_LRU_BASE + lru);
d02bd27b
IR
4392
4393 for_each_zone(zone)
4394 wmark_low += zone->watermark[WMARK_LOW];
4395
4396 /*
4397 * Estimate the amount of memory available for userspace allocations,
4398 * without causing swapping.
4399 */
4400 available = global_page_state(NR_FREE_PAGES) - totalreserve_pages;
4401
4402 /*
4403 * Not all the page cache can be freed, otherwise the system will
4404 * start swapping. Assume at least half of the page cache, or the
4405 * low watermark worth of cache, needs to stay.
4406 */
4407 pagecache = pages[LRU_ACTIVE_FILE] + pages[LRU_INACTIVE_FILE];
4408 pagecache -= min(pagecache / 2, wmark_low);
4409 available += pagecache;
4410
4411 /*
4412 * Part of the reclaimable slab consists of items that are in use,
4413 * and cannot be freed. Cap this estimate at the low watermark.
4414 */
4415 available += global_page_state(NR_SLAB_RECLAIMABLE) -
4416 min(global_page_state(NR_SLAB_RECLAIMABLE) / 2, wmark_low);
4417
4418 if (available < 0)
4419 available = 0;
4420 return available;
4421}
4422EXPORT_SYMBOL_GPL(si_mem_available);
4423
1da177e4
LT
4424void si_meminfo(struct sysinfo *val)
4425{
4426 val->totalram = totalram_pages;
11fb9989 4427 val->sharedram = global_node_page_state(NR_SHMEM);
d23ad423 4428 val->freeram = global_page_state(NR_FREE_PAGES);
1da177e4 4429 val->bufferram = nr_blockdev_pages();
1da177e4
LT
4430 val->totalhigh = totalhigh_pages;
4431 val->freehigh = nr_free_highpages();
1da177e4
LT
4432 val->mem_unit = PAGE_SIZE;
4433}
4434
4435EXPORT_SYMBOL(si_meminfo);
4436
4437#ifdef CONFIG_NUMA
4438void si_meminfo_node(struct sysinfo *val, int nid)
4439{
cdd91a77
JL
4440 int zone_type; /* needs to be signed */
4441 unsigned long managed_pages = 0;
fc2bd799
JK
4442 unsigned long managed_highpages = 0;
4443 unsigned long free_highpages = 0;
1da177e4
LT
4444 pg_data_t *pgdat = NODE_DATA(nid);
4445
cdd91a77
JL
4446 for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++)
4447 managed_pages += pgdat->node_zones[zone_type].managed_pages;
4448 val->totalram = managed_pages;
11fb9989 4449 val->sharedram = node_page_state(pgdat, NR_SHMEM);
75ef7184 4450 val->freeram = sum_zone_node_page_state(nid, NR_FREE_PAGES);
98d2b0eb 4451#ifdef CONFIG_HIGHMEM
fc2bd799
JK
4452 for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) {
4453 struct zone *zone = &pgdat->node_zones[zone_type];
4454
4455 if (is_highmem(zone)) {
4456 managed_highpages += zone->managed_pages;
4457 free_highpages += zone_page_state(zone, NR_FREE_PAGES);
4458 }
4459 }
4460 val->totalhigh = managed_highpages;
4461 val->freehigh = free_highpages;
98d2b0eb 4462#else
fc2bd799
JK
4463 val->totalhigh = managed_highpages;
4464 val->freehigh = free_highpages;
98d2b0eb 4465#endif
1da177e4
LT
4466 val->mem_unit = PAGE_SIZE;
4467}
4468#endif
4469
ddd588b5 4470/*
7bf02ea2
DR
4471 * Determine whether the node should be displayed or not, depending on whether
4472 * SHOW_MEM_FILTER_NODES was passed to show_free_areas().
ddd588b5 4473 */
9af744d7 4474static bool show_mem_node_skip(unsigned int flags, int nid, nodemask_t *nodemask)
ddd588b5 4475{
ddd588b5 4476 if (!(flags & SHOW_MEM_FILTER_NODES))
9af744d7 4477 return false;
ddd588b5 4478
9af744d7
MH
4479 /*
4480 * no node mask - aka implicit memory numa policy. Do not bother with
4481 * the synchronization - read_mems_allowed_begin - because we do not
4482 * have to be precise here.
4483 */
4484 if (!nodemask)
4485 nodemask = &cpuset_current_mems_allowed;
4486
4487 return !node_isset(nid, *nodemask);
ddd588b5
DR
4488}
4489
1da177e4
LT
4490#define K(x) ((x) << (PAGE_SHIFT-10))
4491
377e4f16
RV
4492static void show_migration_types(unsigned char type)
4493{
4494 static const char types[MIGRATE_TYPES] = {
4495 [MIGRATE_UNMOVABLE] = 'U',
377e4f16 4496 [MIGRATE_MOVABLE] = 'M',
475a2f90
VB
4497 [MIGRATE_RECLAIMABLE] = 'E',
4498 [MIGRATE_HIGHATOMIC] = 'H',
377e4f16
RV
4499#ifdef CONFIG_CMA
4500 [MIGRATE_CMA] = 'C',
4501#endif
194159fb 4502#ifdef CONFIG_MEMORY_ISOLATION
377e4f16 4503 [MIGRATE_ISOLATE] = 'I',
194159fb 4504#endif
377e4f16
RV
4505 };
4506 char tmp[MIGRATE_TYPES + 1];
4507 char *p = tmp;
4508 int i;
4509
4510 for (i = 0; i < MIGRATE_TYPES; i++) {
4511 if (type & (1 << i))
4512 *p++ = types[i];
4513 }
4514
4515 *p = '\0';
1f84a18f 4516 printk(KERN_CONT "(%s) ", tmp);
377e4f16
RV
4517}
4518
1da177e4
LT
4519/*
4520 * Show free area list (used inside shift_scroll-lock stuff)
4521 * We also calculate the percentage fragmentation. We do this by counting the
4522 * memory on each free list with the exception of the first item on the list.
d1bfcdb8
KK
4523 *
4524 * Bits in @filter:
4525 * SHOW_MEM_FILTER_NODES: suppress nodes that are not allowed by current's
4526 * cpuset.
1da177e4 4527 */
9af744d7 4528void show_free_areas(unsigned int filter, nodemask_t *nodemask)
1da177e4 4529{
d1bfcdb8 4530 unsigned long free_pcp = 0;
c7241913 4531 int cpu;
1da177e4 4532 struct zone *zone;
599d0c95 4533 pg_data_t *pgdat;
1da177e4 4534
ee99c71c 4535 for_each_populated_zone(zone) {
9af744d7 4536 if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask))
ddd588b5 4537 continue;
d1bfcdb8 4538
761b0677
KK
4539 for_each_online_cpu(cpu)
4540 free_pcp += per_cpu_ptr(zone->pageset, cpu)->pcp.count;
1da177e4
LT
4541 }
4542
a731286d
KM
4543 printk("active_anon:%lu inactive_anon:%lu isolated_anon:%lu\n"
4544 " active_file:%lu inactive_file:%lu isolated_file:%lu\n"
d1bfcdb8
KK
4545 " unevictable:%lu dirty:%lu writeback:%lu unstable:%lu\n"
4546 " slab_reclaimable:%lu slab_unreclaimable:%lu\n"
d1ce749a 4547 " mapped:%lu shmem:%lu pagetables:%lu bounce:%lu\n"
d1bfcdb8 4548 " free:%lu free_pcp:%lu free_cma:%lu\n",
599d0c95
MG
4549 global_node_page_state(NR_ACTIVE_ANON),
4550 global_node_page_state(NR_INACTIVE_ANON),
4551 global_node_page_state(NR_ISOLATED_ANON),
4552 global_node_page_state(NR_ACTIVE_FILE),
4553 global_node_page_state(NR_INACTIVE_FILE),
4554 global_node_page_state(NR_ISOLATED_FILE),
4555 global_node_page_state(NR_UNEVICTABLE),
11fb9989
MG
4556 global_node_page_state(NR_FILE_DIRTY),
4557 global_node_page_state(NR_WRITEBACK),
4558 global_node_page_state(NR_UNSTABLE_NFS),
3701b033
KM
4559 global_page_state(NR_SLAB_RECLAIMABLE),
4560 global_page_state(NR_SLAB_UNRECLAIMABLE),
50658e2e 4561 global_node_page_state(NR_FILE_MAPPED),
11fb9989 4562 global_node_page_state(NR_SHMEM),
a25700a5 4563 global_page_state(NR_PAGETABLE),
d1ce749a 4564 global_page_state(NR_BOUNCE),
d1bfcdb8
KK
4565 global_page_state(NR_FREE_PAGES),
4566 free_pcp,
d1ce749a 4567 global_page_state(NR_FREE_CMA_PAGES));
1da177e4 4568
599d0c95 4569 for_each_online_pgdat(pgdat) {
9af744d7 4570 if (show_mem_node_skip(filter, pgdat->node_id, nodemask))
c02e50bb
MH
4571 continue;
4572
599d0c95
MG
4573 printk("Node %d"
4574 " active_anon:%lukB"
4575 " inactive_anon:%lukB"
4576 " active_file:%lukB"
4577 " inactive_file:%lukB"
4578 " unevictable:%lukB"
4579 " isolated(anon):%lukB"
4580 " isolated(file):%lukB"
50658e2e 4581 " mapped:%lukB"
11fb9989
MG
4582 " dirty:%lukB"
4583 " writeback:%lukB"
4584 " shmem:%lukB"
4585#ifdef CONFIG_TRANSPARENT_HUGEPAGE
4586 " shmem_thp: %lukB"
4587 " shmem_pmdmapped: %lukB"
4588 " anon_thp: %lukB"
4589#endif
4590 " writeback_tmp:%lukB"
4591 " unstable:%lukB"
599d0c95
MG
4592 " all_unreclaimable? %s"
4593 "\n",
4594 pgdat->node_id,
4595 K(node_page_state(pgdat, NR_ACTIVE_ANON)),
4596 K(node_page_state(pgdat, NR_INACTIVE_ANON)),
4597 K(node_page_state(pgdat, NR_ACTIVE_FILE)),
4598 K(node_page_state(pgdat, NR_INACTIVE_FILE)),
4599 K(node_page_state(pgdat, NR_UNEVICTABLE)),
4600 K(node_page_state(pgdat, NR_ISOLATED_ANON)),
4601 K(node_page_state(pgdat, NR_ISOLATED_FILE)),
50658e2e 4602 K(node_page_state(pgdat, NR_FILE_MAPPED)),
11fb9989
MG
4603 K(node_page_state(pgdat, NR_FILE_DIRTY)),
4604 K(node_page_state(pgdat, NR_WRITEBACK)),
1f06b81a 4605 K(node_page_state(pgdat, NR_SHMEM)),
11fb9989
MG
4606#ifdef CONFIG_TRANSPARENT_HUGEPAGE
4607 K(node_page_state(pgdat, NR_SHMEM_THPS) * HPAGE_PMD_NR),
4608 K(node_page_state(pgdat, NR_SHMEM_PMDMAPPED)
4609 * HPAGE_PMD_NR),
4610 K(node_page_state(pgdat, NR_ANON_THPS) * HPAGE_PMD_NR),
4611#endif
11fb9989
MG
4612 K(node_page_state(pgdat, NR_WRITEBACK_TEMP)),
4613 K(node_page_state(pgdat, NR_UNSTABLE_NFS)),
c73322d0
JW
4614 pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES ?
4615 "yes" : "no");
599d0c95
MG
4616 }
4617
ee99c71c 4618 for_each_populated_zone(zone) {
1da177e4
LT
4619 int i;
4620
9af744d7 4621 if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask))
ddd588b5 4622 continue;
d1bfcdb8
KK
4623
4624 free_pcp = 0;
4625 for_each_online_cpu(cpu)
4626 free_pcp += per_cpu_ptr(zone->pageset, cpu)->pcp.count;
4627
1da177e4 4628 show_node(zone);
1f84a18f
JP
4629 printk(KERN_CONT
4630 "%s"
1da177e4
LT
4631 " free:%lukB"
4632 " min:%lukB"
4633 " low:%lukB"
4634 " high:%lukB"
71c799f4
MK
4635 " active_anon:%lukB"
4636 " inactive_anon:%lukB"
4637 " active_file:%lukB"
4638 " inactive_file:%lukB"
4639 " unevictable:%lukB"
5a1c84b4 4640 " writepending:%lukB"
1da177e4 4641 " present:%lukB"
9feedc9d 4642 " managed:%lukB"
4a0aa73f 4643 " mlocked:%lukB"
c6a7f572 4644 " kernel_stack:%lukB"
4a0aa73f 4645 " pagetables:%lukB"
4a0aa73f 4646 " bounce:%lukB"
d1bfcdb8
KK
4647 " free_pcp:%lukB"
4648 " local_pcp:%ukB"
d1ce749a 4649 " free_cma:%lukB"
1da177e4
LT
4650 "\n",
4651 zone->name,
88f5acf8 4652 K(zone_page_state(zone, NR_FREE_PAGES)),
41858966
MG
4653 K(min_wmark_pages(zone)),
4654 K(low_wmark_pages(zone)),
4655 K(high_wmark_pages(zone)),
71c799f4
MK
4656 K(zone_page_state(zone, NR_ZONE_ACTIVE_ANON)),
4657 K(zone_page_state(zone, NR_ZONE_INACTIVE_ANON)),
4658 K(zone_page_state(zone, NR_ZONE_ACTIVE_FILE)),
4659 K(zone_page_state(zone, NR_ZONE_INACTIVE_FILE)),
4660 K(zone_page_state(zone, NR_ZONE_UNEVICTABLE)),
5a1c84b4 4661 K(zone_page_state(zone, NR_ZONE_WRITE_PENDING)),
1da177e4 4662 K(zone->present_pages),
9feedc9d 4663 K(zone->managed_pages),
4a0aa73f 4664 K(zone_page_state(zone, NR_MLOCK)),
d30dd8be 4665 zone_page_state(zone, NR_KERNEL_STACK_KB),
4a0aa73f 4666 K(zone_page_state(zone, NR_PAGETABLE)),
4a0aa73f 4667 K(zone_page_state(zone, NR_BOUNCE)),
d1bfcdb8
KK
4668 K(free_pcp),
4669 K(this_cpu_read(zone->pageset->pcp.count)),
33e077bd 4670 K(zone_page_state(zone, NR_FREE_CMA_PAGES)));
1da177e4
LT
4671 printk("lowmem_reserve[]:");
4672 for (i = 0; i < MAX_NR_ZONES; i++)
1f84a18f
JP
4673 printk(KERN_CONT " %ld", zone->lowmem_reserve[i]);
4674 printk(KERN_CONT "\n");
1da177e4
LT
4675 }
4676
ee99c71c 4677 for_each_populated_zone(zone) {
d00181b9
KS
4678 unsigned int order;
4679 unsigned long nr[MAX_ORDER], flags, total = 0;
377e4f16 4680 unsigned char types[MAX_ORDER];
1da177e4 4681
9af744d7 4682 if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask))
ddd588b5 4683 continue;
1da177e4 4684 show_node(zone);
1f84a18f 4685 printk(KERN_CONT "%s: ", zone->name);
1da177e4
LT
4686
4687 spin_lock_irqsave(&zone->lock, flags);
4688 for (order = 0; order < MAX_ORDER; order++) {
377e4f16
RV
4689 struct free_area *area = &zone->free_area[order];
4690 int type;
4691
4692 nr[order] = area->nr_free;
8f9de51a 4693 total += nr[order] << order;
377e4f16
RV
4694
4695 types[order] = 0;
4696 for (type = 0; type < MIGRATE_TYPES; type++) {
4697 if (!list_empty(&area->free_list[type]))
4698 types[order] |= 1 << type;
4699 }
1da177e4
LT
4700 }
4701 spin_unlock_irqrestore(&zone->lock, flags);
377e4f16 4702 for (order = 0; order < MAX_ORDER; order++) {
1f84a18f
JP
4703 printk(KERN_CONT "%lu*%lukB ",
4704 nr[order], K(1UL) << order);
377e4f16
RV
4705 if (nr[order])
4706 show_migration_types(types[order]);
4707 }
1f84a18f 4708 printk(KERN_CONT "= %lukB\n", K(total));
1da177e4
LT
4709 }
4710
949f7ec5
DR
4711 hugetlb_show_meminfo();
4712
11fb9989 4713 printk("%ld total pagecache pages\n", global_node_page_state(NR_FILE_PAGES));
e6f3602d 4714
1da177e4
LT
4715 show_swap_cache_info();
4716}
4717
19770b32
MG
4718static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref)
4719{
4720 zoneref->zone = zone;
4721 zoneref->zone_idx = zone_idx(zone);
4722}
4723
1da177e4
LT
4724/*
4725 * Builds allocation fallback zone lists.
1a93205b
CL
4726 *
4727 * Add all populated zones of a node to the zonelist.
1da177e4 4728 */
f0c0b2b8 4729static int build_zonelists_node(pg_data_t *pgdat, struct zonelist *zonelist,
bc732f1d 4730 int nr_zones)
1da177e4 4731{
1a93205b 4732 struct zone *zone;
bc732f1d 4733 enum zone_type zone_type = MAX_NR_ZONES;
02a68a5e
CL
4734
4735 do {
2f6726e5 4736 zone_type--;
070f8032 4737 zone = pgdat->node_zones + zone_type;
6aa303de 4738 if (managed_zone(zone)) {
dd1a239f
MG
4739 zoneref_set_zone(zone,
4740 &zonelist->_zonerefs[nr_zones++]);
070f8032 4741 check_highest_zone(zone_type);
1da177e4 4742 }
2f6726e5 4743 } while (zone_type);
bc732f1d 4744
070f8032 4745 return nr_zones;
1da177e4
LT
4746}
4747
f0c0b2b8
KH
4748
4749/*
4750 * zonelist_order:
4751 * 0 = automatic detection of better ordering.
4752 * 1 = order by ([node] distance, -zonetype)
4753 * 2 = order by (-zonetype, [node] distance)
4754 *
4755 * If not NUMA, ZONELIST_ORDER_ZONE and ZONELIST_ORDER_NODE will create
4756 * the same zonelist. So only NUMA can configure this param.
4757 */
4758#define ZONELIST_ORDER_DEFAULT 0
4759#define ZONELIST_ORDER_NODE 1
4760#define ZONELIST_ORDER_ZONE 2
4761
4762/* zonelist order in the kernel.
4763 * set_zonelist_order() will set this to NODE or ZONE.
4764 */
4765static int current_zonelist_order = ZONELIST_ORDER_DEFAULT;
4766static char zonelist_order_name[3][8] = {"Default", "Node", "Zone"};
4767
4768
1da177e4 4769#ifdef CONFIG_NUMA
f0c0b2b8
KH
4770/* The value user specified ....changed by config */
4771static int user_zonelist_order = ZONELIST_ORDER_DEFAULT;
4772/* string for sysctl */
4773#define NUMA_ZONELIST_ORDER_LEN 16
4774char numa_zonelist_order[16] = "default";
4775
4776/*
4777 * interface for configure zonelist ordering.
4778 * command line option "numa_zonelist_order"
4779 * = "[dD]efault - default, automatic configuration.
4780 * = "[nN]ode - order by node locality, then by zone within node
4781 * = "[zZ]one - order by zone, then by locality within zone
4782 */
4783
4784static int __parse_numa_zonelist_order(char *s)
4785{
4786 if (*s == 'd' || *s == 'D') {
4787 user_zonelist_order = ZONELIST_ORDER_DEFAULT;
4788 } else if (*s == 'n' || *s == 'N') {
4789 user_zonelist_order = ZONELIST_ORDER_NODE;
4790 } else if (*s == 'z' || *s == 'Z') {
4791 user_zonelist_order = ZONELIST_ORDER_ZONE;
4792 } else {
1170532b 4793 pr_warn("Ignoring invalid numa_zonelist_order value: %s\n", s);
f0c0b2b8
KH
4794 return -EINVAL;
4795 }
4796 return 0;
4797}
4798
4799static __init int setup_numa_zonelist_order(char *s)
4800{
ecb256f8
VL
4801 int ret;
4802
4803 if (!s)
4804 return 0;
4805
4806 ret = __parse_numa_zonelist_order(s);
4807 if (ret == 0)
4808 strlcpy(numa_zonelist_order, s, NUMA_ZONELIST_ORDER_LEN);
4809
4810 return ret;
f0c0b2b8
KH
4811}
4812early_param("numa_zonelist_order", setup_numa_zonelist_order);
4813
4814/*
4815 * sysctl handler for numa_zonelist_order
4816 */
cccad5b9 4817int numa_zonelist_order_handler(struct ctl_table *table, int write,
8d65af78 4818 void __user *buffer, size_t *length,
f0c0b2b8
KH
4819 loff_t *ppos)
4820{
4821 char saved_string[NUMA_ZONELIST_ORDER_LEN];
4822 int ret;
443c6f14 4823 static DEFINE_MUTEX(zl_order_mutex);
f0c0b2b8 4824
443c6f14 4825 mutex_lock(&zl_order_mutex);
dacbde09
CG
4826 if (write) {
4827 if (strlen((char *)table->data) >= NUMA_ZONELIST_ORDER_LEN) {
4828 ret = -EINVAL;
4829 goto out;
4830 }
4831 strcpy(saved_string, (char *)table->data);
4832 }
8d65af78 4833 ret = proc_dostring(table, write, buffer, length, ppos);
f0c0b2b8 4834 if (ret)
443c6f14 4835 goto out;
f0c0b2b8
KH
4836 if (write) {
4837 int oldval = user_zonelist_order;
dacbde09
CG
4838
4839 ret = __parse_numa_zonelist_order((char *)table->data);
4840 if (ret) {
f0c0b2b8
KH
4841 /*
4842 * bogus value. restore saved string
4843 */
dacbde09 4844 strncpy((char *)table->data, saved_string,
f0c0b2b8
KH
4845 NUMA_ZONELIST_ORDER_LEN);
4846 user_zonelist_order = oldval;
4eaf3f64
HL
4847 } else if (oldval != user_zonelist_order) {
4848 mutex_lock(&zonelists_mutex);
9adb62a5 4849 build_all_zonelists(NULL, NULL);
4eaf3f64
HL
4850 mutex_unlock(&zonelists_mutex);
4851 }
f0c0b2b8 4852 }
443c6f14
AK
4853out:
4854 mutex_unlock(&zl_order_mutex);
4855 return ret;
f0c0b2b8
KH
4856}
4857
4858
62bc62a8 4859#define MAX_NODE_LOAD (nr_online_nodes)
f0c0b2b8
KH
4860static int node_load[MAX_NUMNODES];
4861
1da177e4 4862/**
4dc3b16b 4863 * find_next_best_node - find the next node that should appear in a given node's fallback list
1da177e4
LT
4864 * @node: node whose fallback list we're appending
4865 * @used_node_mask: nodemask_t of already used nodes
4866 *
4867 * We use a number of factors to determine which is the next node that should
4868 * appear on a given node's fallback list. The node should not have appeared
4869 * already in @node's fallback list, and it should be the next closest node
4870 * according to the distance array (which contains arbitrary distance values
4871 * from each node to each node in the system), and should also prefer nodes
4872 * with no CPUs, since presumably they'll have very little allocation pressure
4873 * on them otherwise.
4874 * It returns -1 if no node is found.
4875 */
f0c0b2b8 4876static int find_next_best_node(int node, nodemask_t *used_node_mask)
1da177e4 4877{
4cf808eb 4878 int n, val;
1da177e4 4879 int min_val = INT_MAX;
00ef2d2f 4880 int best_node = NUMA_NO_NODE;
a70f7302 4881 const struct cpumask *tmp = cpumask_of_node(0);
1da177e4 4882
4cf808eb
LT
4883 /* Use the local node if we haven't already */
4884 if (!node_isset(node, *used_node_mask)) {
4885 node_set(node, *used_node_mask);
4886 return node;
4887 }
1da177e4 4888
4b0ef1fe 4889 for_each_node_state(n, N_MEMORY) {
1da177e4
LT
4890
4891 /* Don't want a node to appear more than once */
4892 if (node_isset(n, *used_node_mask))
4893 continue;
4894
1da177e4
LT
4895 /* Use the distance array to find the distance */
4896 val = node_distance(node, n);
4897
4cf808eb
LT
4898 /* Penalize nodes under us ("prefer the next node") */
4899 val += (n < node);
4900
1da177e4 4901 /* Give preference to headless and unused nodes */
a70f7302
RR
4902 tmp = cpumask_of_node(n);
4903 if (!cpumask_empty(tmp))
1da177e4
LT
4904 val += PENALTY_FOR_NODE_WITH_CPUS;
4905
4906 /* Slight preference for less loaded node */
4907 val *= (MAX_NODE_LOAD*MAX_NUMNODES);
4908 val += node_load[n];
4909
4910 if (val < min_val) {
4911 min_val = val;
4912 best_node = n;
4913 }
4914 }
4915
4916 if (best_node >= 0)
4917 node_set(best_node, *used_node_mask);
4918
4919 return best_node;
4920}
4921
f0c0b2b8
KH
4922
4923/*
4924 * Build zonelists ordered by node and zones within node.
4925 * This results in maximum locality--normal zone overflows into local
4926 * DMA zone, if any--but risks exhausting DMA zone.
4927 */
4928static void build_zonelists_in_node_order(pg_data_t *pgdat, int node)
1da177e4 4929{
f0c0b2b8 4930 int j;
1da177e4 4931 struct zonelist *zonelist;
f0c0b2b8 4932
c9634cf0 4933 zonelist = &pgdat->node_zonelists[ZONELIST_FALLBACK];
dd1a239f 4934 for (j = 0; zonelist->_zonerefs[j].zone != NULL; j++)
54a6eb5c 4935 ;
bc732f1d 4936 j = build_zonelists_node(NODE_DATA(node), zonelist, j);
dd1a239f
MG
4937 zonelist->_zonerefs[j].zone = NULL;
4938 zonelist->_zonerefs[j].zone_idx = 0;
f0c0b2b8
KH
4939}
4940
523b9458
CL
4941/*
4942 * Build gfp_thisnode zonelists
4943 */
4944static void build_thisnode_zonelists(pg_data_t *pgdat)
4945{
523b9458
CL
4946 int j;
4947 struct zonelist *zonelist;
4948
c9634cf0 4949 zonelist = &pgdat->node_zonelists[ZONELIST_NOFALLBACK];
bc732f1d 4950 j = build_zonelists_node(pgdat, zonelist, 0);
dd1a239f
MG
4951 zonelist->_zonerefs[j].zone = NULL;
4952 zonelist->_zonerefs[j].zone_idx = 0;
523b9458
CL
4953}
4954
f0c0b2b8
KH
4955/*
4956 * Build zonelists ordered by zone and nodes within zones.
4957 * This results in conserving DMA zone[s] until all Normal memory is
4958 * exhausted, but results in overflowing to remote node while memory
4959 * may still exist in local DMA zone.
4960 */
4961static int node_order[MAX_NUMNODES];
4962
4963static void build_zonelists_in_zone_order(pg_data_t *pgdat, int nr_nodes)
4964{
f0c0b2b8
KH
4965 int pos, j, node;
4966 int zone_type; /* needs to be signed */
4967 struct zone *z;
4968 struct zonelist *zonelist;
4969
c9634cf0 4970 zonelist = &pgdat->node_zonelists[ZONELIST_FALLBACK];
54a6eb5c
MG
4971 pos = 0;
4972 for (zone_type = MAX_NR_ZONES - 1; zone_type >= 0; zone_type--) {
4973 for (j = 0; j < nr_nodes; j++) {
4974 node = node_order[j];
4975 z = &NODE_DATA(node)->node_zones[zone_type];
6aa303de 4976 if (managed_zone(z)) {
dd1a239f
MG
4977 zoneref_set_zone(z,
4978 &zonelist->_zonerefs[pos++]);
54a6eb5c 4979 check_highest_zone(zone_type);
f0c0b2b8
KH
4980 }
4981 }
f0c0b2b8 4982 }
dd1a239f
MG
4983 zonelist->_zonerefs[pos].zone = NULL;
4984 zonelist->_zonerefs[pos].zone_idx = 0;
f0c0b2b8
KH
4985}
4986
3193913c
MG
4987#if defined(CONFIG_64BIT)
4988/*
4989 * Devices that require DMA32/DMA are relatively rare and do not justify a
4990 * penalty to every machine in case the specialised case applies. Default
4991 * to Node-ordering on 64-bit NUMA machines
4992 */
4993static int default_zonelist_order(void)
4994{
4995 return ZONELIST_ORDER_NODE;
4996}
4997#else
4998/*
4999 * On 32-bit, the Normal zone needs to be preserved for allocations accessible
5000 * by the kernel. If processes running on node 0 deplete the low memory zone
5001 * then reclaim will occur more frequency increasing stalls and potentially
5002 * be easier to OOM if a large percentage of the zone is under writeback or
5003 * dirty. The problem is significantly worse if CONFIG_HIGHPTE is not set.
5004 * Hence, default to zone ordering on 32-bit.
5005 */
f0c0b2b8
KH
5006static int default_zonelist_order(void)
5007{
f0c0b2b8
KH
5008 return ZONELIST_ORDER_ZONE;
5009}
3193913c 5010#endif /* CONFIG_64BIT */
f0c0b2b8
KH
5011
5012static void set_zonelist_order(void)
5013{
5014 if (user_zonelist_order == ZONELIST_ORDER_DEFAULT)
5015 current_zonelist_order = default_zonelist_order();
5016 else
5017 current_zonelist_order = user_zonelist_order;
5018}
5019
5020static void build_zonelists(pg_data_t *pgdat)
5021{
c00eb15a 5022 int i, node, load;
1da177e4 5023 nodemask_t used_mask;
f0c0b2b8
KH
5024 int local_node, prev_node;
5025 struct zonelist *zonelist;
d00181b9 5026 unsigned int order = current_zonelist_order;
1da177e4
LT
5027
5028 /* initialize zonelists */
523b9458 5029 for (i = 0; i < MAX_ZONELISTS; i++) {
1da177e4 5030 zonelist = pgdat->node_zonelists + i;
dd1a239f
MG
5031 zonelist->_zonerefs[0].zone = NULL;
5032 zonelist->_zonerefs[0].zone_idx = 0;
1da177e4
LT
5033 }
5034
5035 /* NUMA-aware ordering of nodes */
5036 local_node = pgdat->node_id;
62bc62a8 5037 load = nr_online_nodes;
1da177e4
LT
5038 prev_node = local_node;
5039 nodes_clear(used_mask);
f0c0b2b8 5040
f0c0b2b8 5041 memset(node_order, 0, sizeof(node_order));
c00eb15a 5042 i = 0;
f0c0b2b8 5043
1da177e4
LT
5044 while ((node = find_next_best_node(local_node, &used_mask)) >= 0) {
5045 /*
5046 * We don't want to pressure a particular node.
5047 * So adding penalty to the first node in same
5048 * distance group to make it round-robin.
5049 */
957f822a
DR
5050 if (node_distance(local_node, node) !=
5051 node_distance(local_node, prev_node))
f0c0b2b8
KH
5052 node_load[node] = load;
5053
1da177e4
LT
5054 prev_node = node;
5055 load--;
f0c0b2b8
KH
5056 if (order == ZONELIST_ORDER_NODE)
5057 build_zonelists_in_node_order(pgdat, node);
5058 else
c00eb15a 5059 node_order[i++] = node; /* remember order */
f0c0b2b8 5060 }
1da177e4 5061
f0c0b2b8
KH
5062 if (order == ZONELIST_ORDER_ZONE) {
5063 /* calculate node order -- i.e., DMA last! */
c00eb15a 5064 build_zonelists_in_zone_order(pgdat, i);
1da177e4 5065 }
523b9458
CL
5066
5067 build_thisnode_zonelists(pgdat);
1da177e4
LT
5068}
5069
7aac7898
LS
5070#ifdef CONFIG_HAVE_MEMORYLESS_NODES
5071/*
5072 * Return node id of node used for "local" allocations.
5073 * I.e., first node id of first zone in arg node's generic zonelist.
5074 * Used for initializing percpu 'numa_mem', which is used primarily
5075 * for kernel allocations, so use GFP_KERNEL flags to locate zonelist.
5076 */
5077int local_memory_node(int node)
5078{
c33d6c06 5079 struct zoneref *z;
7aac7898 5080
c33d6c06 5081 z = first_zones_zonelist(node_zonelist(node, GFP_KERNEL),
7aac7898 5082 gfp_zone(GFP_KERNEL),
c33d6c06
MG
5083 NULL);
5084 return z->zone->node;
7aac7898
LS
5085}
5086#endif
f0c0b2b8 5087
6423aa81
JK
5088static void setup_min_unmapped_ratio(void);
5089static void setup_min_slab_ratio(void);
1da177e4
LT
5090#else /* CONFIG_NUMA */
5091
f0c0b2b8
KH
5092static void set_zonelist_order(void)
5093{
5094 current_zonelist_order = ZONELIST_ORDER_ZONE;
5095}
5096
5097static void build_zonelists(pg_data_t *pgdat)
1da177e4 5098{
19655d34 5099 int node, local_node;
54a6eb5c
MG
5100 enum zone_type j;
5101 struct zonelist *zonelist;
1da177e4
LT
5102
5103 local_node = pgdat->node_id;
1da177e4 5104
c9634cf0 5105 zonelist = &pgdat->node_zonelists[ZONELIST_FALLBACK];
bc732f1d 5106 j = build_zonelists_node(pgdat, zonelist, 0);
1da177e4 5107
54a6eb5c
MG
5108 /*
5109 * Now we build the zonelist so that it contains the zones
5110 * of all the other nodes.
5111 * We don't want to pressure a particular node, so when
5112 * building the zones for node N, we make sure that the
5113 * zones coming right after the local ones are those from
5114 * node N+1 (modulo N)
5115 */
5116 for (node = local_node + 1; node < MAX_NUMNODES; node++) {
5117 if (!node_online(node))
5118 continue;
bc732f1d 5119 j = build_zonelists_node(NODE_DATA(node), zonelist, j);
1da177e4 5120 }
54a6eb5c
MG
5121 for (node = 0; node < local_node; node++) {
5122 if (!node_online(node))
5123 continue;
bc732f1d 5124 j = build_zonelists_node(NODE_DATA(node), zonelist, j);
54a6eb5c
MG
5125 }
5126
dd1a239f
MG
5127 zonelist->_zonerefs[j].zone = NULL;
5128 zonelist->_zonerefs[j].zone_idx = 0;
1da177e4
LT
5129}
5130
5131#endif /* CONFIG_NUMA */
5132
99dcc3e5
CL
5133/*
5134 * Boot pageset table. One per cpu which is going to be used for all
5135 * zones and all nodes. The parameters will be set in such a way
5136 * that an item put on a list will immediately be handed over to
5137 * the buddy list. This is safe since pageset manipulation is done
5138 * with interrupts disabled.
5139 *
5140 * The boot_pagesets must be kept even after bootup is complete for
5141 * unused processors and/or zones. They do play a role for bootstrapping
5142 * hotplugged processors.
5143 *
5144 * zoneinfo_show() and maybe other functions do
5145 * not check if the processor is online before following the pageset pointer.
5146 * Other parts of the kernel may not check if the zone is available.
5147 */
5148static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch);
5149static DEFINE_PER_CPU(struct per_cpu_pageset, boot_pageset);
385386cf 5150static DEFINE_PER_CPU(struct per_cpu_nodestat, boot_nodestats);
1f522509 5151static void setup_zone_pageset(struct zone *zone);
99dcc3e5 5152
4eaf3f64
HL
5153/*
5154 * Global mutex to protect against size modification of zonelists
5155 * as well as to serialize pageset setup for the new populated zone.
5156 */
5157DEFINE_MUTEX(zonelists_mutex);
5158
9b1a4d38 5159/* return values int ....just for stop_machine() */
4ed7e022 5160static int __build_all_zonelists(void *data)
1da177e4 5161{
6811378e 5162 int nid;
99dcc3e5 5163 int cpu;
9adb62a5 5164 pg_data_t *self = data;
9276b1bc 5165
7f9cfb31
BL
5166#ifdef CONFIG_NUMA
5167 memset(node_load, 0, sizeof(node_load));
5168#endif
9adb62a5
JL
5169
5170 if (self && !node_online(self->node_id)) {
5171 build_zonelists(self);
9adb62a5
JL
5172 }
5173
9276b1bc 5174 for_each_online_node(nid) {
7ea1530a
CL
5175 pg_data_t *pgdat = NODE_DATA(nid);
5176
5177 build_zonelists(pgdat);
9276b1bc 5178 }
99dcc3e5
CL
5179
5180 /*
5181 * Initialize the boot_pagesets that are going to be used
5182 * for bootstrapping processors. The real pagesets for
5183 * each zone will be allocated later when the per cpu
5184 * allocator is available.
5185 *
5186 * boot_pagesets are used also for bootstrapping offline
5187 * cpus if the system is already booted because the pagesets
5188 * are needed to initialize allocators on a specific cpu too.
5189 * F.e. the percpu allocator needs the page allocator which
5190 * needs the percpu allocator in order to allocate its pagesets
5191 * (a chicken-egg dilemma).
5192 */
7aac7898 5193 for_each_possible_cpu(cpu) {
99dcc3e5
CL
5194 setup_pageset(&per_cpu(boot_pageset, cpu), 0);
5195
7aac7898
LS
5196#ifdef CONFIG_HAVE_MEMORYLESS_NODES
5197 /*
5198 * We now know the "local memory node" for each node--
5199 * i.e., the node of the first zone in the generic zonelist.
5200 * Set up numa_mem percpu variable for on-line cpus. During
5201 * boot, only the boot cpu should be on-line; we'll init the
5202 * secondary cpus' numa_mem as they come on-line. During
5203 * node/memory hotplug, we'll fixup all on-line cpus.
5204 */
5205 if (cpu_online(cpu))
5206 set_cpu_numa_mem(cpu, local_memory_node(cpu_to_node(cpu)));
5207#endif
5208 }
5209
6811378e
YG
5210 return 0;
5211}
5212
061f67bc
RV
5213static noinline void __init
5214build_all_zonelists_init(void)
5215{
5216 __build_all_zonelists(NULL);
5217 mminit_verify_zonelist();
5218 cpuset_init_current_mems_allowed();
5219}
5220
4eaf3f64
HL
5221/*
5222 * Called with zonelists_mutex held always
5223 * unless system_state == SYSTEM_BOOTING.
061f67bc
RV
5224 *
5225 * __ref due to (1) call of __meminit annotated setup_zone_pageset
5226 * [we're only called with non-NULL zone through __meminit paths] and
5227 * (2) call of __init annotated helper build_all_zonelists_init
5228 * [protected by SYSTEM_BOOTING].
4eaf3f64 5229 */
9adb62a5 5230void __ref build_all_zonelists(pg_data_t *pgdat, struct zone *zone)
6811378e 5231{
f0c0b2b8
KH
5232 set_zonelist_order();
5233
6811378e 5234 if (system_state == SYSTEM_BOOTING) {
061f67bc 5235 build_all_zonelists_init();
6811378e 5236 } else {
e9959f0f 5237#ifdef CONFIG_MEMORY_HOTPLUG
9adb62a5
JL
5238 if (zone)
5239 setup_zone_pageset(zone);
e9959f0f 5240#endif
dd1895e2
CS
5241 /* we have to stop all cpus to guarantee there is no user
5242 of zonelist */
9adb62a5 5243 stop_machine(__build_all_zonelists, pgdat, NULL);
6811378e
YG
5244 /* cpuset refresh routine should be here */
5245 }
bd1e22b8 5246 vm_total_pages = nr_free_pagecache_pages();
9ef9acb0
MG
5247 /*
5248 * Disable grouping by mobility if the number of pages in the
5249 * system is too low to allow the mechanism to work. It would be
5250 * more accurate, but expensive to check per-zone. This check is
5251 * made on memory-hotadd so a system can start with mobility
5252 * disabled and enable it later
5253 */
d9c23400 5254 if (vm_total_pages < (pageblock_nr_pages * MIGRATE_TYPES))
9ef9acb0
MG
5255 page_group_by_mobility_disabled = 1;
5256 else
5257 page_group_by_mobility_disabled = 0;
5258
756a025f
JP
5259 pr_info("Built %i zonelists in %s order, mobility grouping %s. Total pages: %ld\n",
5260 nr_online_nodes,
5261 zonelist_order_name[current_zonelist_order],
5262 page_group_by_mobility_disabled ? "off" : "on",
5263 vm_total_pages);
f0c0b2b8 5264#ifdef CONFIG_NUMA
f88dfff5 5265 pr_info("Policy zone: %s\n", zone_names[policy_zone]);
f0c0b2b8 5266#endif
1da177e4
LT
5267}
5268
1da177e4
LT
5269/*
5270 * Initially all pages are reserved - free ones are freed
5271 * up by free_all_bootmem() once the early boot process is
5272 * done. Non-atomic initialization, single-pass.
5273 */
c09b4240 5274void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
a2f3aa02 5275 unsigned long start_pfn, enum memmap_context context)
1da177e4 5276{
4b94ffdc 5277 struct vmem_altmap *altmap = to_vmem_altmap(__pfn_to_phys(start_pfn));
29751f69 5278 unsigned long end_pfn = start_pfn + size;
4b94ffdc 5279 pg_data_t *pgdat = NODE_DATA(nid);
29751f69 5280 unsigned long pfn;
3a80a7fa 5281 unsigned long nr_initialised = 0;
342332e6
TI
5282#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
5283 struct memblock_region *r = NULL, *tmp;
5284#endif
1da177e4 5285
22b31eec
HD
5286 if (highest_memmap_pfn < end_pfn - 1)
5287 highest_memmap_pfn = end_pfn - 1;
5288
4b94ffdc
DW
5289 /*
5290 * Honor reservation requested by the driver for this ZONE_DEVICE
5291 * memory
5292 */
5293 if (altmap && start_pfn == altmap->base_pfn)
5294 start_pfn += altmap->reserve;
5295
cbe8dd4a 5296 for (pfn = start_pfn; pfn < end_pfn; pfn++) {
a2f3aa02 5297 /*
b72d0ffb
AM
5298 * There can be holes in boot-time mem_map[]s handed to this
5299 * function. They do not exist on hotplugged memory.
a2f3aa02 5300 */
b72d0ffb
AM
5301 if (context != MEMMAP_EARLY)
5302 goto not_early;
5303
b92df1de
PB
5304 if (!early_pfn_valid(pfn)) {
5305#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
5306 /*
5307 * Skip to the pfn preceding the next valid one (or
5308 * end_pfn), such that we hit a valid pfn (or end_pfn)
5309 * on our next iteration of the loop.
5310 */
5311 pfn = memblock_next_valid_pfn(pfn, end_pfn) - 1;
5312#endif
b72d0ffb 5313 continue;
b92df1de 5314 }
b72d0ffb
AM
5315 if (!early_pfn_in_nid(pfn, nid))
5316 continue;
5317 if (!update_defer_init(pgdat, pfn, end_pfn, &nr_initialised))
5318 break;
342332e6
TI
5319
5320#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
b72d0ffb
AM
5321 /*
5322 * Check given memblock attribute by firmware which can affect
5323 * kernel memory layout. If zone==ZONE_MOVABLE but memory is
5324 * mirrored, it's an overlapped memmap init. skip it.
5325 */
5326 if (mirrored_kernelcore && zone == ZONE_MOVABLE) {
5327 if (!r || pfn >= memblock_region_memory_end_pfn(r)) {
5328 for_each_memblock(memory, tmp)
5329 if (pfn < memblock_region_memory_end_pfn(tmp))
5330 break;
5331 r = tmp;
5332 }
5333 if (pfn >= memblock_region_memory_base_pfn(r) &&
5334 memblock_is_mirror(r)) {
5335 /* already initialized as NORMAL */
5336 pfn = memblock_region_memory_end_pfn(r);
5337 continue;
342332e6 5338 }
a2f3aa02 5339 }
b72d0ffb 5340#endif
ac5d2539 5341
b72d0ffb 5342not_early:
ac5d2539
MG
5343 /*
5344 * Mark the block movable so that blocks are reserved for
5345 * movable at startup. This will force kernel allocations
5346 * to reserve their blocks rather than leaking throughout
5347 * the address space during boot when many long-lived
974a786e 5348 * kernel allocations are made.
ac5d2539
MG
5349 *
5350 * bitmap is created for zone's valid pfn range. but memmap
5351 * can be created for invalid pages (for alignment)
5352 * check here not to call set_pageblock_migratetype() against
5353 * pfn out of zone.
5354 */
5355 if (!(pfn & (pageblock_nr_pages - 1))) {
5356 struct page *page = pfn_to_page(pfn);
5357
5358 __init_single_page(page, pfn, zone, nid);
5359 set_pageblock_migratetype(page, MIGRATE_MOVABLE);
5360 } else {
5361 __init_single_pfn(pfn, zone, nid);
5362 }
1da177e4
LT
5363 }
5364}
5365
1e548deb 5366static void __meminit zone_init_free_lists(struct zone *zone)
1da177e4 5367{
7aeb09f9 5368 unsigned int order, t;
b2a0ac88
MG
5369 for_each_migratetype_order(order, t) {
5370 INIT_LIST_HEAD(&zone->free_area[order].free_list[t]);
1da177e4
LT
5371 zone->free_area[order].nr_free = 0;
5372 }
5373}
5374
5375#ifndef __HAVE_ARCH_MEMMAP_INIT
5376#define memmap_init(size, nid, zone, start_pfn) \
a2f3aa02 5377 memmap_init_zone((size), (nid), (zone), (start_pfn), MEMMAP_EARLY)
1da177e4
LT
5378#endif
5379
7cd2b0a3 5380static int zone_batchsize(struct zone *zone)
e7c8d5c9 5381{
3a6be87f 5382#ifdef CONFIG_MMU
e7c8d5c9
CL
5383 int batch;
5384
5385 /*
5386 * The per-cpu-pages pools are set to around 1000th of the
ba56e91c 5387 * size of the zone. But no more than 1/2 of a meg.
e7c8d5c9
CL
5388 *
5389 * OK, so we don't know how big the cache is. So guess.
5390 */
b40da049 5391 batch = zone->managed_pages / 1024;
ba56e91c
SR
5392 if (batch * PAGE_SIZE > 512 * 1024)
5393 batch = (512 * 1024) / PAGE_SIZE;
e7c8d5c9
CL
5394 batch /= 4; /* We effectively *= 4 below */
5395 if (batch < 1)
5396 batch = 1;
5397
5398 /*
0ceaacc9
NP
5399 * Clamp the batch to a 2^n - 1 value. Having a power
5400 * of 2 value was found to be more likely to have
5401 * suboptimal cache aliasing properties in some cases.
e7c8d5c9 5402 *
0ceaacc9
NP
5403 * For example if 2 tasks are alternately allocating
5404 * batches of pages, one task can end up with a lot
5405 * of pages of one half of the possible page colors
5406 * and the other with pages of the other colors.
e7c8d5c9 5407 */
9155203a 5408 batch = rounddown_pow_of_two(batch + batch/2) - 1;
ba56e91c 5409
e7c8d5c9 5410 return batch;
3a6be87f
DH
5411
5412#else
5413 /* The deferral and batching of frees should be suppressed under NOMMU
5414 * conditions.
5415 *
5416 * The problem is that NOMMU needs to be able to allocate large chunks
5417 * of contiguous memory as there's no hardware page translation to
5418 * assemble apparent contiguous memory from discontiguous pages.
5419 *
5420 * Queueing large contiguous runs of pages for batching, however,
5421 * causes the pages to actually be freed in smaller chunks. As there
5422 * can be a significant delay between the individual batches being
5423 * recycled, this leads to the once large chunks of space being
5424 * fragmented and becoming unavailable for high-order allocations.
5425 */
5426 return 0;
5427#endif
e7c8d5c9
CL
5428}
5429
8d7a8fa9
CS
5430/*
5431 * pcp->high and pcp->batch values are related and dependent on one another:
5432 * ->batch must never be higher then ->high.
5433 * The following function updates them in a safe manner without read side
5434 * locking.
5435 *
5436 * Any new users of pcp->batch and pcp->high should ensure they can cope with
5437 * those fields changing asynchronously (acording the the above rule).
5438 *
5439 * mutex_is_locked(&pcp_batch_high_lock) required when calling this function
5440 * outside of boot time (or some other assurance that no concurrent updaters
5441 * exist).
5442 */
5443static void pageset_update(struct per_cpu_pages *pcp, unsigned long high,
5444 unsigned long batch)
5445{
5446 /* start with a fail safe value for batch */
5447 pcp->batch = 1;
5448 smp_wmb();
5449
5450 /* Update high, then batch, in order */
5451 pcp->high = high;
5452 smp_wmb();
5453
5454 pcp->batch = batch;
5455}
5456
3664033c 5457/* a companion to pageset_set_high() */
4008bab7
CS
5458static void pageset_set_batch(struct per_cpu_pageset *p, unsigned long batch)
5459{
8d7a8fa9 5460 pageset_update(&p->pcp, 6 * batch, max(1UL, 1 * batch));
4008bab7
CS
5461}
5462
88c90dbc 5463static void pageset_init(struct per_cpu_pageset *p)
2caaad41
CL
5464{
5465 struct per_cpu_pages *pcp;
5f8dcc21 5466 int migratetype;
2caaad41 5467
1c6fe946
MD
5468 memset(p, 0, sizeof(*p));
5469
3dfa5721 5470 pcp = &p->pcp;
2caaad41 5471 pcp->count = 0;
5f8dcc21
MG
5472 for (migratetype = 0; migratetype < MIGRATE_PCPTYPES; migratetype++)
5473 INIT_LIST_HEAD(&pcp->lists[migratetype]);
2caaad41
CL
5474}
5475
88c90dbc
CS
5476static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch)
5477{
5478 pageset_init(p);
5479 pageset_set_batch(p, batch);
5480}
5481
8ad4b1fb 5482/*
3664033c 5483 * pageset_set_high() sets the high water mark for hot per_cpu_pagelist
8ad4b1fb
RS
5484 * to the value high for the pageset p.
5485 */
3664033c 5486static void pageset_set_high(struct per_cpu_pageset *p,
8ad4b1fb
RS
5487 unsigned long high)
5488{
8d7a8fa9
CS
5489 unsigned long batch = max(1UL, high / 4);
5490 if ((high / 4) > (PAGE_SHIFT * 8))
5491 batch = PAGE_SHIFT * 8;
8ad4b1fb 5492
8d7a8fa9 5493 pageset_update(&p->pcp, high, batch);
8ad4b1fb
RS
5494}
5495
7cd2b0a3
DR
5496static void pageset_set_high_and_batch(struct zone *zone,
5497 struct per_cpu_pageset *pcp)
56cef2b8 5498{
56cef2b8 5499 if (percpu_pagelist_fraction)
3664033c 5500 pageset_set_high(pcp,
56cef2b8
CS
5501 (zone->managed_pages /
5502 percpu_pagelist_fraction));
5503 else
5504 pageset_set_batch(pcp, zone_batchsize(zone));
5505}
5506
169f6c19
CS
5507static void __meminit zone_pageset_init(struct zone *zone, int cpu)
5508{
5509 struct per_cpu_pageset *pcp = per_cpu_ptr(zone->pageset, cpu);
5510
5511 pageset_init(pcp);
5512 pageset_set_high_and_batch(zone, pcp);
5513}
5514
4ed7e022 5515static void __meminit setup_zone_pageset(struct zone *zone)
319774e2
WF
5516{
5517 int cpu;
319774e2 5518 zone->pageset = alloc_percpu(struct per_cpu_pageset);
56cef2b8
CS
5519 for_each_possible_cpu(cpu)
5520 zone_pageset_init(zone, cpu);
319774e2
WF
5521}
5522
2caaad41 5523/*
99dcc3e5
CL
5524 * Allocate per cpu pagesets and initialize them.
5525 * Before this call only boot pagesets were available.
e7c8d5c9 5526 */
99dcc3e5 5527void __init setup_per_cpu_pageset(void)
e7c8d5c9 5528{
b4911ea2 5529 struct pglist_data *pgdat;
99dcc3e5 5530 struct zone *zone;
e7c8d5c9 5531
319774e2
WF
5532 for_each_populated_zone(zone)
5533 setup_zone_pageset(zone);
b4911ea2
MG
5534
5535 for_each_online_pgdat(pgdat)
5536 pgdat->per_cpu_nodestats =
5537 alloc_percpu(struct per_cpu_nodestat);
e7c8d5c9
CL
5538}
5539
c09b4240 5540static __meminit void zone_pcp_init(struct zone *zone)
ed8ece2e 5541{
99dcc3e5
CL
5542 /*
5543 * per cpu subsystem is not up at this point. The following code
5544 * relies on the ability of the linker to provide the
5545 * offset of a (static) per cpu variable into the per cpu area.
5546 */
5547 zone->pageset = &boot_pageset;
ed8ece2e 5548
b38a8725 5549 if (populated_zone(zone))
99dcc3e5
CL
5550 printk(KERN_DEBUG " %s zone: %lu pages, LIFO batch:%u\n",
5551 zone->name, zone->present_pages,
5552 zone_batchsize(zone));
ed8ece2e
DH
5553}
5554
dc0bbf3b 5555void __meminit init_currently_empty_zone(struct zone *zone,
718127cc 5556 unsigned long zone_start_pfn,
b171e409 5557 unsigned long size)
ed8ece2e
DH
5558{
5559 struct pglist_data *pgdat = zone->zone_pgdat;
9dcb8b68 5560
ed8ece2e
DH
5561 pgdat->nr_zones = zone_idx(zone) + 1;
5562
ed8ece2e
DH
5563 zone->zone_start_pfn = zone_start_pfn;
5564
708614e6
MG
5565 mminit_dprintk(MMINIT_TRACE, "memmap_init",
5566 "Initialising map node %d zone %lu pfns %lu -> %lu\n",
5567 pgdat->node_id,
5568 (unsigned long)zone_idx(zone),
5569 zone_start_pfn, (zone_start_pfn + size));
5570
1e548deb 5571 zone_init_free_lists(zone);
9dcb8b68 5572 zone->initialized = 1;
ed8ece2e
DH
5573}
5574
0ee332c1 5575#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
c713216d 5576#ifndef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID
8a942fde 5577
c713216d
MG
5578/*
5579 * Required by SPARSEMEM. Given a PFN, return what node the PFN is on.
c713216d 5580 */
8a942fde
MG
5581int __meminit __early_pfn_to_nid(unsigned long pfn,
5582 struct mminit_pfnnid_cache *state)
c713216d 5583{
c13291a5 5584 unsigned long start_pfn, end_pfn;
e76b63f8 5585 int nid;
7c243c71 5586
8a942fde
MG
5587 if (state->last_start <= pfn && pfn < state->last_end)
5588 return state->last_nid;
c713216d 5589
e76b63f8
YL
5590 nid = memblock_search_pfn_nid(pfn, &start_pfn, &end_pfn);
5591 if (nid != -1) {
8a942fde
MG
5592 state->last_start = start_pfn;
5593 state->last_end = end_pfn;
5594 state->last_nid = nid;
e76b63f8
YL
5595 }
5596
5597 return nid;
c713216d
MG
5598}
5599#endif /* CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID */
5600
c713216d 5601/**
6782832e 5602 * free_bootmem_with_active_regions - Call memblock_free_early_nid for each active range
88ca3b94 5603 * @nid: The node to free memory on. If MAX_NUMNODES, all nodes are freed.
6782832e 5604 * @max_low_pfn: The highest PFN that will be passed to memblock_free_early_nid
c713216d 5605 *
7d018176
ZZ
5606 * If an architecture guarantees that all ranges registered contain no holes
5607 * and may be freed, this this function may be used instead of calling
5608 * memblock_free_early_nid() manually.
c713216d 5609 */
c13291a5 5610void __init free_bootmem_with_active_regions(int nid, unsigned long max_low_pfn)
cc289894 5611{
c13291a5
TH
5612 unsigned long start_pfn, end_pfn;
5613 int i, this_nid;
edbe7d23 5614
c13291a5
TH
5615 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, &this_nid) {
5616 start_pfn = min(start_pfn, max_low_pfn);
5617 end_pfn = min(end_pfn, max_low_pfn);
edbe7d23 5618
c13291a5 5619 if (start_pfn < end_pfn)
6782832e
SS
5620 memblock_free_early_nid(PFN_PHYS(start_pfn),
5621 (end_pfn - start_pfn) << PAGE_SHIFT,
5622 this_nid);
edbe7d23 5623 }
edbe7d23 5624}
edbe7d23 5625
c713216d
MG
5626/**
5627 * sparse_memory_present_with_active_regions - Call memory_present for each active range
88ca3b94 5628 * @nid: The node to call memory_present for. If MAX_NUMNODES, all nodes will be used.
c713216d 5629 *
7d018176
ZZ
5630 * If an architecture guarantees that all ranges registered contain no holes and may
5631 * be freed, this function may be used instead of calling memory_present() manually.
c713216d
MG
5632 */
5633void __init sparse_memory_present_with_active_regions(int nid)
5634{
c13291a5
TH
5635 unsigned long start_pfn, end_pfn;
5636 int i, this_nid;
c713216d 5637
c13291a5
TH
5638 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, &this_nid)
5639 memory_present(this_nid, start_pfn, end_pfn);
c713216d
MG
5640}
5641
5642/**
5643 * get_pfn_range_for_nid - Return the start and end page frames for a node
88ca3b94
RD
5644 * @nid: The nid to return the range for. If MAX_NUMNODES, the min and max PFN are returned.
5645 * @start_pfn: Passed by reference. On return, it will have the node start_pfn.
5646 * @end_pfn: Passed by reference. On return, it will have the node end_pfn.
c713216d
MG
5647 *
5648 * It returns the start and end page frame of a node based on information
7d018176 5649 * provided by memblock_set_node(). If called for a node
c713216d 5650 * with no available memory, a warning is printed and the start and end
88ca3b94 5651 * PFNs will be 0.
c713216d 5652 */
a3142c8e 5653void __meminit get_pfn_range_for_nid(unsigned int nid,
c713216d
MG
5654 unsigned long *start_pfn, unsigned long *end_pfn)
5655{
c13291a5 5656 unsigned long this_start_pfn, this_end_pfn;
c713216d 5657 int i;
c13291a5 5658
c713216d
MG
5659 *start_pfn = -1UL;
5660 *end_pfn = 0;
5661
c13291a5
TH
5662 for_each_mem_pfn_range(i, nid, &this_start_pfn, &this_end_pfn, NULL) {
5663 *start_pfn = min(*start_pfn, this_start_pfn);
5664 *end_pfn = max(*end_pfn, this_end_pfn);
c713216d
MG
5665 }
5666
633c0666 5667 if (*start_pfn == -1UL)
c713216d 5668 *start_pfn = 0;
c713216d
MG
5669}
5670
2a1e274a
MG
5671/*
5672 * This finds a zone that can be used for ZONE_MOVABLE pages. The
5673 * assumption is made that zones within a node are ordered in monotonic
5674 * increasing memory addresses so that the "highest" populated zone is used
5675 */
b69a7288 5676static void __init find_usable_zone_for_movable(void)
2a1e274a
MG
5677{
5678 int zone_index;
5679 for (zone_index = MAX_NR_ZONES - 1; zone_index >= 0; zone_index--) {
5680 if (zone_index == ZONE_MOVABLE)
5681 continue;
5682
5683 if (arch_zone_highest_possible_pfn[zone_index] >
5684 arch_zone_lowest_possible_pfn[zone_index])
5685 break;
5686 }
5687
5688 VM_BUG_ON(zone_index == -1);
5689 movable_zone = zone_index;
5690}
5691
5692/*
5693 * The zone ranges provided by the architecture do not include ZONE_MOVABLE
25985edc 5694 * because it is sized independent of architecture. Unlike the other zones,
2a1e274a
MG
5695 * the starting point for ZONE_MOVABLE is not fixed. It may be different
5696 * in each node depending on the size of each node and how evenly kernelcore
5697 * is distributed. This helper function adjusts the zone ranges
5698 * provided by the architecture for a given node by using the end of the
5699 * highest usable zone for ZONE_MOVABLE. This preserves the assumption that
5700 * zones within a node are in order of monotonic increases memory addresses
5701 */
b69a7288 5702static void __meminit adjust_zone_range_for_zone_movable(int nid,
2a1e274a
MG
5703 unsigned long zone_type,
5704 unsigned long node_start_pfn,
5705 unsigned long node_end_pfn,
5706 unsigned long *zone_start_pfn,
5707 unsigned long *zone_end_pfn)
5708{
5709 /* Only adjust if ZONE_MOVABLE is on this node */
5710 if (zone_movable_pfn[nid]) {
5711 /* Size ZONE_MOVABLE */
5712 if (zone_type == ZONE_MOVABLE) {
5713 *zone_start_pfn = zone_movable_pfn[nid];
5714 *zone_end_pfn = min(node_end_pfn,
5715 arch_zone_highest_possible_pfn[movable_zone]);
5716
e506b996
XQ
5717 /* Adjust for ZONE_MOVABLE starting within this range */
5718 } else if (!mirrored_kernelcore &&
5719 *zone_start_pfn < zone_movable_pfn[nid] &&
5720 *zone_end_pfn > zone_movable_pfn[nid]) {
5721 *zone_end_pfn = zone_movable_pfn[nid];
5722
2a1e274a
MG
5723 /* Check if this whole range is within ZONE_MOVABLE */
5724 } else if (*zone_start_pfn >= zone_movable_pfn[nid])
5725 *zone_start_pfn = *zone_end_pfn;
5726 }
5727}
5728
c713216d
MG
5729/*
5730 * Return the number of pages a zone spans in a node, including holes
5731 * present_pages = zone_spanned_pages_in_node() - zone_absent_pages_in_node()
5732 */
6ea6e688 5733static unsigned long __meminit zone_spanned_pages_in_node(int nid,
c713216d 5734 unsigned long zone_type,
7960aedd
ZY
5735 unsigned long node_start_pfn,
5736 unsigned long node_end_pfn,
d91749c1
TI
5737 unsigned long *zone_start_pfn,
5738 unsigned long *zone_end_pfn,
c713216d
MG
5739 unsigned long *ignored)
5740{
b5685e92 5741 /* When hotadd a new node from cpu_up(), the node should be empty */
f9126ab9
XQ
5742 if (!node_start_pfn && !node_end_pfn)
5743 return 0;
5744
7960aedd 5745 /* Get the start and end of the zone */
d91749c1
TI
5746 *zone_start_pfn = arch_zone_lowest_possible_pfn[zone_type];
5747 *zone_end_pfn = arch_zone_highest_possible_pfn[zone_type];
2a1e274a
MG
5748 adjust_zone_range_for_zone_movable(nid, zone_type,
5749 node_start_pfn, node_end_pfn,
d91749c1 5750 zone_start_pfn, zone_end_pfn);
c713216d
MG
5751
5752 /* Check that this node has pages within the zone's required range */
d91749c1 5753 if (*zone_end_pfn < node_start_pfn || *zone_start_pfn > node_end_pfn)
c713216d
MG
5754 return 0;
5755
5756 /* Move the zone boundaries inside the node if necessary */
d91749c1
TI
5757 *zone_end_pfn = min(*zone_end_pfn, node_end_pfn);
5758 *zone_start_pfn = max(*zone_start_pfn, node_start_pfn);
c713216d
MG
5759
5760 /* Return the spanned pages */
d91749c1 5761 return *zone_end_pfn - *zone_start_pfn;
c713216d
MG
5762}
5763
5764/*
5765 * Return the number of holes in a range on a node. If nid is MAX_NUMNODES,
88ca3b94 5766 * then all holes in the requested range will be accounted for.
c713216d 5767 */
32996250 5768unsigned long __meminit __absent_pages_in_range(int nid,
c713216d
MG
5769 unsigned long range_start_pfn,
5770 unsigned long range_end_pfn)
5771{
96e907d1
TH
5772 unsigned long nr_absent = range_end_pfn - range_start_pfn;
5773 unsigned long start_pfn, end_pfn;
5774 int i;
c713216d 5775
96e907d1
TH
5776 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
5777 start_pfn = clamp(start_pfn, range_start_pfn, range_end_pfn);
5778 end_pfn = clamp(end_pfn, range_start_pfn, range_end_pfn);
5779 nr_absent -= end_pfn - start_pfn;
c713216d 5780 }
96e907d1 5781 return nr_absent;
c713216d
MG
5782}
5783
5784/**
5785 * absent_pages_in_range - Return number of page frames in holes within a range
5786 * @start_pfn: The start PFN to start searching for holes
5787 * @end_pfn: The end PFN to stop searching for holes
5788 *
88ca3b94 5789 * It returns the number of pages frames in memory holes within a range.
c713216d
MG
5790 */
5791unsigned long __init absent_pages_in_range(unsigned long start_pfn,
5792 unsigned long end_pfn)
5793{
5794 return __absent_pages_in_range(MAX_NUMNODES, start_pfn, end_pfn);
5795}
5796
5797/* Return the number of page frames in holes in a zone on a node */
6ea6e688 5798static unsigned long __meminit zone_absent_pages_in_node(int nid,
c713216d 5799 unsigned long zone_type,
7960aedd
ZY
5800 unsigned long node_start_pfn,
5801 unsigned long node_end_pfn,
c713216d
MG
5802 unsigned long *ignored)
5803{
96e907d1
TH
5804 unsigned long zone_low = arch_zone_lowest_possible_pfn[zone_type];
5805 unsigned long zone_high = arch_zone_highest_possible_pfn[zone_type];
9c7cd687 5806 unsigned long zone_start_pfn, zone_end_pfn;
342332e6 5807 unsigned long nr_absent;
9c7cd687 5808
b5685e92 5809 /* When hotadd a new node from cpu_up(), the node should be empty */
f9126ab9
XQ
5810 if (!node_start_pfn && !node_end_pfn)
5811 return 0;
5812
96e907d1
TH
5813 zone_start_pfn = clamp(node_start_pfn, zone_low, zone_high);
5814 zone_end_pfn = clamp(node_end_pfn, zone_low, zone_high);
9c7cd687 5815
2a1e274a
MG
5816 adjust_zone_range_for_zone_movable(nid, zone_type,
5817 node_start_pfn, node_end_pfn,
5818 &zone_start_pfn, &zone_end_pfn);
342332e6
TI
5819 nr_absent = __absent_pages_in_range(nid, zone_start_pfn, zone_end_pfn);
5820
5821 /*
5822 * ZONE_MOVABLE handling.
5823 * Treat pages to be ZONE_MOVABLE in ZONE_NORMAL as absent pages
5824 * and vice versa.
5825 */
e506b996
XQ
5826 if (mirrored_kernelcore && zone_movable_pfn[nid]) {
5827 unsigned long start_pfn, end_pfn;
5828 struct memblock_region *r;
5829
5830 for_each_memblock(memory, r) {
5831 start_pfn = clamp(memblock_region_memory_base_pfn(r),
5832 zone_start_pfn, zone_end_pfn);
5833 end_pfn = clamp(memblock_region_memory_end_pfn(r),
5834 zone_start_pfn, zone_end_pfn);
5835
5836 if (zone_type == ZONE_MOVABLE &&
5837 memblock_is_mirror(r))
5838 nr_absent += end_pfn - start_pfn;
5839
5840 if (zone_type == ZONE_NORMAL &&
5841 !memblock_is_mirror(r))
5842 nr_absent += end_pfn - start_pfn;
342332e6
TI
5843 }
5844 }
5845
5846 return nr_absent;
c713216d 5847}
0e0b864e 5848
0ee332c1 5849#else /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
6ea6e688 5850static inline unsigned long __meminit zone_spanned_pages_in_node(int nid,
c713216d 5851 unsigned long zone_type,
7960aedd
ZY
5852 unsigned long node_start_pfn,
5853 unsigned long node_end_pfn,
d91749c1
TI
5854 unsigned long *zone_start_pfn,
5855 unsigned long *zone_end_pfn,
c713216d
MG
5856 unsigned long *zones_size)
5857{
d91749c1
TI
5858 unsigned int zone;
5859
5860 *zone_start_pfn = node_start_pfn;
5861 for (zone = 0; zone < zone_type; zone++)
5862 *zone_start_pfn += zones_size[zone];
5863
5864 *zone_end_pfn = *zone_start_pfn + zones_size[zone_type];
5865
c713216d
MG
5866 return zones_size[zone_type];
5867}
5868
6ea6e688 5869static inline unsigned long __meminit zone_absent_pages_in_node(int nid,
c713216d 5870 unsigned long zone_type,
7960aedd
ZY
5871 unsigned long node_start_pfn,
5872 unsigned long node_end_pfn,
c713216d
MG
5873 unsigned long *zholes_size)
5874{
5875 if (!zholes_size)
5876 return 0;
5877
5878 return zholes_size[zone_type];
5879}
20e6926d 5880
0ee332c1 5881#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
c713216d 5882
a3142c8e 5883static void __meminit calculate_node_totalpages(struct pglist_data *pgdat,
7960aedd
ZY
5884 unsigned long node_start_pfn,
5885 unsigned long node_end_pfn,
5886 unsigned long *zones_size,
5887 unsigned long *zholes_size)
c713216d 5888{
febd5949 5889 unsigned long realtotalpages = 0, totalpages = 0;
c713216d
MG
5890 enum zone_type i;
5891
febd5949
GZ
5892 for (i = 0; i < MAX_NR_ZONES; i++) {
5893 struct zone *zone = pgdat->node_zones + i;
d91749c1 5894 unsigned long zone_start_pfn, zone_end_pfn;
febd5949 5895 unsigned long size, real_size;
c713216d 5896
febd5949
GZ
5897 size = zone_spanned_pages_in_node(pgdat->node_id, i,
5898 node_start_pfn,
5899 node_end_pfn,
d91749c1
TI
5900 &zone_start_pfn,
5901 &zone_end_pfn,
febd5949
GZ
5902 zones_size);
5903 real_size = size - zone_absent_pages_in_node(pgdat->node_id, i,
7960aedd
ZY
5904 node_start_pfn, node_end_pfn,
5905 zholes_size);
d91749c1
TI
5906 if (size)
5907 zone->zone_start_pfn = zone_start_pfn;
5908 else
5909 zone->zone_start_pfn = 0;
febd5949
GZ
5910 zone->spanned_pages = size;
5911 zone->present_pages = real_size;
5912
5913 totalpages += size;
5914 realtotalpages += real_size;
5915 }
5916
5917 pgdat->node_spanned_pages = totalpages;
c713216d
MG
5918 pgdat->node_present_pages = realtotalpages;
5919 printk(KERN_DEBUG "On node %d totalpages: %lu\n", pgdat->node_id,
5920 realtotalpages);
5921}
5922
835c134e
MG
5923#ifndef CONFIG_SPARSEMEM
5924/*
5925 * Calculate the size of the zone->blockflags rounded to an unsigned long
d9c23400
MG
5926 * Start by making sure zonesize is a multiple of pageblock_order by rounding
5927 * up. Then use 1 NR_PAGEBLOCK_BITS worth of bits per pageblock, finally
835c134e
MG
5928 * round what is now in bits to nearest long in bits, then return it in
5929 * bytes.
5930 */
7c45512d 5931static unsigned long __init usemap_size(unsigned long zone_start_pfn, unsigned long zonesize)
835c134e
MG
5932{
5933 unsigned long usemapsize;
5934
7c45512d 5935 zonesize += zone_start_pfn & (pageblock_nr_pages-1);
d9c23400
MG
5936 usemapsize = roundup(zonesize, pageblock_nr_pages);
5937 usemapsize = usemapsize >> pageblock_order;
835c134e
MG
5938 usemapsize *= NR_PAGEBLOCK_BITS;
5939 usemapsize = roundup(usemapsize, 8 * sizeof(unsigned long));
5940
5941 return usemapsize / 8;
5942}
5943
5944static void __init setup_usemap(struct pglist_data *pgdat,
7c45512d
LT
5945 struct zone *zone,
5946 unsigned long zone_start_pfn,
5947 unsigned long zonesize)
835c134e 5948{
7c45512d 5949 unsigned long usemapsize = usemap_size(zone_start_pfn, zonesize);
835c134e 5950 zone->pageblock_flags = NULL;
58a01a45 5951 if (usemapsize)
6782832e
SS
5952 zone->pageblock_flags =
5953 memblock_virt_alloc_node_nopanic(usemapsize,
5954 pgdat->node_id);
835c134e
MG
5955}
5956#else
7c45512d
LT
5957static inline void setup_usemap(struct pglist_data *pgdat, struct zone *zone,
5958 unsigned long zone_start_pfn, unsigned long zonesize) {}
835c134e
MG
5959#endif /* CONFIG_SPARSEMEM */
5960
d9c23400 5961#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
ba72cb8c 5962
d9c23400 5963/* Initialise the number of pages represented by NR_PAGEBLOCK_BITS */
15ca220e 5964void __paginginit set_pageblock_order(void)
d9c23400 5965{
955c1cd7
AM
5966 unsigned int order;
5967
d9c23400
MG
5968 /* Check that pageblock_nr_pages has not already been setup */
5969 if (pageblock_order)
5970 return;
5971
955c1cd7
AM
5972 if (HPAGE_SHIFT > PAGE_SHIFT)
5973 order = HUGETLB_PAGE_ORDER;
5974 else
5975 order = MAX_ORDER - 1;
5976
d9c23400
MG
5977 /*
5978 * Assume the largest contiguous order of interest is a huge page.
955c1cd7
AM
5979 * This value may be variable depending on boot parameters on IA64 and
5980 * powerpc.
d9c23400
MG
5981 */
5982 pageblock_order = order;
5983}
5984#else /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
5985
ba72cb8c
MG
5986/*
5987 * When CONFIG_HUGETLB_PAGE_SIZE_VARIABLE is not set, set_pageblock_order()
955c1cd7
AM
5988 * is unused as pageblock_order is set at compile-time. See
5989 * include/linux/pageblock-flags.h for the values of pageblock_order based on
5990 * the kernel config
ba72cb8c 5991 */
15ca220e 5992void __paginginit set_pageblock_order(void)
ba72cb8c 5993{
ba72cb8c 5994}
d9c23400
MG
5995
5996#endif /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
5997
01cefaef
JL
5998static unsigned long __paginginit calc_memmap_size(unsigned long spanned_pages,
5999 unsigned long present_pages)
6000{
6001 unsigned long pages = spanned_pages;
6002
6003 /*
6004 * Provide a more accurate estimation if there are holes within
6005 * the zone and SPARSEMEM is in use. If there are holes within the
6006 * zone, each populated memory region may cost us one or two extra
6007 * memmap pages due to alignment because memmap pages for each
89d790ab 6008 * populated regions may not be naturally aligned on page boundary.
01cefaef
JL
6009 * So the (present_pages >> 4) heuristic is a tradeoff for that.
6010 */
6011 if (spanned_pages > present_pages + (present_pages >> 4) &&
6012 IS_ENABLED(CONFIG_SPARSEMEM))
6013 pages = present_pages;
6014
6015 return PAGE_ALIGN(pages * sizeof(struct page)) >> PAGE_SHIFT;
6016}
6017
1da177e4
LT
6018/*
6019 * Set up the zone data structures:
6020 * - mark all pages reserved
6021 * - mark all memory queues empty
6022 * - clear the memory bitmaps
6527af5d
MK
6023 *
6024 * NOTE: pgdat should get zeroed by caller.
1da177e4 6025 */
7f3eb55b 6026static void __paginginit free_area_init_core(struct pglist_data *pgdat)
1da177e4 6027{
2f1b6248 6028 enum zone_type j;
ed8ece2e 6029 int nid = pgdat->node_id;
1da177e4 6030
208d54e5 6031 pgdat_resize_init(pgdat);
8177a420
AA
6032#ifdef CONFIG_NUMA_BALANCING
6033 spin_lock_init(&pgdat->numabalancing_migrate_lock);
6034 pgdat->numabalancing_migrate_nr_pages = 0;
6035 pgdat->numabalancing_migrate_next_window = jiffies;
a3d0a918
KS
6036#endif
6037#ifdef CONFIG_TRANSPARENT_HUGEPAGE
6038 spin_lock_init(&pgdat->split_queue_lock);
6039 INIT_LIST_HEAD(&pgdat->split_queue);
6040 pgdat->split_queue_len = 0;
8177a420 6041#endif
1da177e4 6042 init_waitqueue_head(&pgdat->kswapd_wait);
5515061d 6043 init_waitqueue_head(&pgdat->pfmemalloc_wait);
698b1b30
VB
6044#ifdef CONFIG_COMPACTION
6045 init_waitqueue_head(&pgdat->kcompactd_wait);
6046#endif
eefa864b 6047 pgdat_page_ext_init(pgdat);
a52633d8 6048 spin_lock_init(&pgdat->lru_lock);
a9dd0a83 6049 lruvec_init(node_lruvec(pgdat));
5f63b720 6050
385386cf
JW
6051 pgdat->per_cpu_nodestats = &boot_nodestats;
6052
1da177e4
LT
6053 for (j = 0; j < MAX_NR_ZONES; j++) {
6054 struct zone *zone = pgdat->node_zones + j;
9feedc9d 6055 unsigned long size, realsize, freesize, memmap_pages;
d91749c1 6056 unsigned long zone_start_pfn = zone->zone_start_pfn;
1da177e4 6057
febd5949
GZ
6058 size = zone->spanned_pages;
6059 realsize = freesize = zone->present_pages;
1da177e4 6060
0e0b864e 6061 /*
9feedc9d 6062 * Adjust freesize so that it accounts for how much memory
0e0b864e
MG
6063 * is used by this zone for memmap. This affects the watermark
6064 * and per-cpu initialisations
6065 */
01cefaef 6066 memmap_pages = calc_memmap_size(size, realsize);
ba914f48
ZH
6067 if (!is_highmem_idx(j)) {
6068 if (freesize >= memmap_pages) {
6069 freesize -= memmap_pages;
6070 if (memmap_pages)
6071 printk(KERN_DEBUG
6072 " %s zone: %lu pages used for memmap\n",
6073 zone_names[j], memmap_pages);
6074 } else
1170532b 6075 pr_warn(" %s zone: %lu pages exceeds freesize %lu\n",
ba914f48
ZH
6076 zone_names[j], memmap_pages, freesize);
6077 }
0e0b864e 6078
6267276f 6079 /* Account for reserved pages */
9feedc9d
JL
6080 if (j == 0 && freesize > dma_reserve) {
6081 freesize -= dma_reserve;
d903ef9f 6082 printk(KERN_DEBUG " %s zone: %lu pages reserved\n",
6267276f 6083 zone_names[0], dma_reserve);
0e0b864e
MG
6084 }
6085
98d2b0eb 6086 if (!is_highmem_idx(j))
9feedc9d 6087 nr_kernel_pages += freesize;
01cefaef
JL
6088 /* Charge for highmem memmap if there are enough kernel pages */
6089 else if (nr_kernel_pages > memmap_pages * 2)
6090 nr_kernel_pages -= memmap_pages;
9feedc9d 6091 nr_all_pages += freesize;
1da177e4 6092
9feedc9d
JL
6093 /*
6094 * Set an approximate value for lowmem here, it will be adjusted
6095 * when the bootmem allocator frees pages into the buddy system.
6096 * And all highmem pages will be managed by the buddy system.
6097 */
6098 zone->managed_pages = is_highmem_idx(j) ? realsize : freesize;
9614634f 6099#ifdef CONFIG_NUMA
d5f541ed 6100 zone->node = nid;
9614634f 6101#endif
1da177e4 6102 zone->name = zone_names[j];
a52633d8 6103 zone->zone_pgdat = pgdat;
1da177e4 6104 spin_lock_init(&zone->lock);
bdc8cb98 6105 zone_seqlock_init(zone);
ed8ece2e 6106 zone_pcp_init(zone);
81c0a2bb 6107
1da177e4
LT
6108 if (!size)
6109 continue;
6110
955c1cd7 6111 set_pageblock_order();
7c45512d 6112 setup_usemap(pgdat, zone, zone_start_pfn, size);
dc0bbf3b 6113 init_currently_empty_zone(zone, zone_start_pfn, size);
76cdd58e 6114 memmap_init(size, nid, j, zone_start_pfn);
1da177e4
LT
6115 }
6116}
6117
bd721ea7 6118static void __ref alloc_node_mem_map(struct pglist_data *pgdat)
1da177e4 6119{
b0aeba74 6120 unsigned long __maybe_unused start = 0;
a1c34a3b
LA
6121 unsigned long __maybe_unused offset = 0;
6122
1da177e4
LT
6123 /* Skip empty nodes */
6124 if (!pgdat->node_spanned_pages)
6125 return;
6126
d41dee36 6127#ifdef CONFIG_FLAT_NODE_MEM_MAP
b0aeba74
TL
6128 start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1);
6129 offset = pgdat->node_start_pfn - start;
1da177e4
LT
6130 /* ia64 gets its own node_mem_map, before this, without bootmem */
6131 if (!pgdat->node_mem_map) {
b0aeba74 6132 unsigned long size, end;
d41dee36
AW
6133 struct page *map;
6134
e984bb43
BP
6135 /*
6136 * The zone's endpoints aren't required to be MAX_ORDER
6137 * aligned but the node_mem_map endpoints must be in order
6138 * for the buddy allocator to function correctly.
6139 */
108bcc96 6140 end = pgdat_end_pfn(pgdat);
e984bb43
BP
6141 end = ALIGN(end, MAX_ORDER_NR_PAGES);
6142 size = (end - start) * sizeof(struct page);
6f167ec7
DH
6143 map = alloc_remap(pgdat->node_id, size);
6144 if (!map)
6782832e
SS
6145 map = memblock_virt_alloc_node_nopanic(size,
6146 pgdat->node_id);
a1c34a3b 6147 pgdat->node_mem_map = map + offset;
1da177e4 6148 }
12d810c1 6149#ifndef CONFIG_NEED_MULTIPLE_NODES
1da177e4
LT
6150 /*
6151 * With no DISCONTIG, the global mem_map is just set as node 0's
6152 */
c713216d 6153 if (pgdat == NODE_DATA(0)) {
1da177e4 6154 mem_map = NODE_DATA(0)->node_mem_map;
a1c34a3b 6155#if defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP) || defined(CONFIG_FLATMEM)
c713216d 6156 if (page_to_pfn(mem_map) != pgdat->node_start_pfn)
a1c34a3b 6157 mem_map -= offset;
0ee332c1 6158#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
c713216d 6159 }
1da177e4 6160#endif
d41dee36 6161#endif /* CONFIG_FLAT_NODE_MEM_MAP */
1da177e4
LT
6162}
6163
9109fb7b
JW
6164void __paginginit free_area_init_node(int nid, unsigned long *zones_size,
6165 unsigned long node_start_pfn, unsigned long *zholes_size)
1da177e4 6166{
9109fb7b 6167 pg_data_t *pgdat = NODE_DATA(nid);
7960aedd
ZY
6168 unsigned long start_pfn = 0;
6169 unsigned long end_pfn = 0;
9109fb7b 6170
88fdf75d 6171 /* pg_data_t should be reset to zero when it's allocated */
38087d9b 6172 WARN_ON(pgdat->nr_zones || pgdat->kswapd_classzone_idx);
88fdf75d 6173
1da177e4
LT
6174 pgdat->node_id = nid;
6175 pgdat->node_start_pfn = node_start_pfn;
75ef7184 6176 pgdat->per_cpu_nodestats = NULL;
7960aedd
ZY
6177#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
6178 get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
8d29e18a 6179 pr_info("Initmem setup node %d [mem %#018Lx-%#018Lx]\n", nid,
4ada0c5a
ZL
6180 (u64)start_pfn << PAGE_SHIFT,
6181 end_pfn ? ((u64)end_pfn << PAGE_SHIFT) - 1 : 0);
d91749c1
TI
6182#else
6183 start_pfn = node_start_pfn;
7960aedd
ZY
6184#endif
6185 calculate_node_totalpages(pgdat, start_pfn, end_pfn,
6186 zones_size, zholes_size);
1da177e4
LT
6187
6188 alloc_node_mem_map(pgdat);
e8c27ac9
YL
6189#ifdef CONFIG_FLAT_NODE_MEM_MAP
6190 printk(KERN_DEBUG "free_area_init_node: node %d, pgdat %08lx, node_mem_map %08lx\n",
6191 nid, (unsigned long)pgdat,
6192 (unsigned long)pgdat->node_mem_map);
6193#endif
1da177e4 6194
864b9a39 6195 reset_deferred_meminit(pgdat);
7f3eb55b 6196 free_area_init_core(pgdat);
1da177e4
LT
6197}
6198
0ee332c1 6199#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
418508c1
MS
6200
6201#if MAX_NUMNODES > 1
6202/*
6203 * Figure out the number of possible node ids.
6204 */
f9872caf 6205void __init setup_nr_node_ids(void)
418508c1 6206{
904a9553 6207 unsigned int highest;
418508c1 6208
904a9553 6209 highest = find_last_bit(node_possible_map.bits, MAX_NUMNODES);
418508c1
MS
6210 nr_node_ids = highest + 1;
6211}
418508c1
MS
6212#endif
6213
1e01979c
TH
6214/**
6215 * node_map_pfn_alignment - determine the maximum internode alignment
6216 *
6217 * This function should be called after node map is populated and sorted.
6218 * It calculates the maximum power of two alignment which can distinguish
6219 * all the nodes.
6220 *
6221 * For example, if all nodes are 1GiB and aligned to 1GiB, the return value
6222 * would indicate 1GiB alignment with (1 << (30 - PAGE_SHIFT)). If the
6223 * nodes are shifted by 256MiB, 256MiB. Note that if only the last node is
6224 * shifted, 1GiB is enough and this function will indicate so.
6225 *
6226 * This is used to test whether pfn -> nid mapping of the chosen memory
6227 * model has fine enough granularity to avoid incorrect mapping for the
6228 * populated node map.
6229 *
6230 * Returns the determined alignment in pfn's. 0 if there is no alignment
6231 * requirement (single node).
6232 */
6233unsigned long __init node_map_pfn_alignment(void)
6234{
6235 unsigned long accl_mask = 0, last_end = 0;
c13291a5 6236 unsigned long start, end, mask;
1e01979c 6237 int last_nid = -1;
c13291a5 6238 int i, nid;
1e01979c 6239
c13291a5 6240 for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, &nid) {
1e01979c
TH
6241 if (!start || last_nid < 0 || last_nid == nid) {
6242 last_nid = nid;
6243 last_end = end;
6244 continue;
6245 }
6246
6247 /*
6248 * Start with a mask granular enough to pin-point to the
6249 * start pfn and tick off bits one-by-one until it becomes
6250 * too coarse to separate the current node from the last.
6251 */
6252 mask = ~((1 << __ffs(start)) - 1);
6253 while (mask && last_end <= (start & (mask << 1)))
6254 mask <<= 1;
6255
6256 /* accumulate all internode masks */
6257 accl_mask |= mask;
6258 }
6259
6260 /* convert mask to number of pages */
6261 return ~accl_mask + 1;
6262}
6263
a6af2bc3 6264/* Find the lowest pfn for a node */
b69a7288 6265static unsigned long __init find_min_pfn_for_node(int nid)
c713216d 6266{
a6af2bc3 6267 unsigned long min_pfn = ULONG_MAX;
c13291a5
TH
6268 unsigned long start_pfn;
6269 int i;
1abbfb41 6270
c13291a5
TH
6271 for_each_mem_pfn_range(i, nid, &start_pfn, NULL, NULL)
6272 min_pfn = min(min_pfn, start_pfn);
c713216d 6273
a6af2bc3 6274 if (min_pfn == ULONG_MAX) {
1170532b 6275 pr_warn("Could not find start_pfn for node %d\n", nid);
a6af2bc3
MG
6276 return 0;
6277 }
6278
6279 return min_pfn;
c713216d
MG
6280}
6281
6282/**
6283 * find_min_pfn_with_active_regions - Find the minimum PFN registered
6284 *
6285 * It returns the minimum PFN based on information provided via
7d018176 6286 * memblock_set_node().
c713216d
MG
6287 */
6288unsigned long __init find_min_pfn_with_active_regions(void)
6289{
6290 return find_min_pfn_for_node(MAX_NUMNODES);
6291}
6292
37b07e41
LS
6293/*
6294 * early_calculate_totalpages()
6295 * Sum pages in active regions for movable zone.
4b0ef1fe 6296 * Populate N_MEMORY for calculating usable_nodes.
37b07e41 6297 */
484f51f8 6298static unsigned long __init early_calculate_totalpages(void)
7e63efef 6299{
7e63efef 6300 unsigned long totalpages = 0;
c13291a5
TH
6301 unsigned long start_pfn, end_pfn;
6302 int i, nid;
6303
6304 for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
6305 unsigned long pages = end_pfn - start_pfn;
7e63efef 6306
37b07e41
LS
6307 totalpages += pages;
6308 if (pages)
4b0ef1fe 6309 node_set_state(nid, N_MEMORY);
37b07e41 6310 }
b8af2941 6311 return totalpages;
7e63efef
MG
6312}
6313
2a1e274a
MG
6314/*
6315 * Find the PFN the Movable zone begins in each node. Kernel memory
6316 * is spread evenly between nodes as long as the nodes have enough
6317 * memory. When they don't, some nodes will have more kernelcore than
6318 * others
6319 */
b224ef85 6320static void __init find_zone_movable_pfns_for_nodes(void)
2a1e274a
MG
6321{
6322 int i, nid;
6323 unsigned long usable_startpfn;
6324 unsigned long kernelcore_node, kernelcore_remaining;
66918dcd 6325 /* save the state before borrow the nodemask */
4b0ef1fe 6326 nodemask_t saved_node_state = node_states[N_MEMORY];
37b07e41 6327 unsigned long totalpages = early_calculate_totalpages();
4b0ef1fe 6328 int usable_nodes = nodes_weight(node_states[N_MEMORY]);
136199f0 6329 struct memblock_region *r;
b2f3eebe
TC
6330
6331 /* Need to find movable_zone earlier when movable_node is specified. */
6332 find_usable_zone_for_movable();
6333
6334 /*
6335 * If movable_node is specified, ignore kernelcore and movablecore
6336 * options.
6337 */
6338 if (movable_node_is_enabled()) {
136199f0
EM
6339 for_each_memblock(memory, r) {
6340 if (!memblock_is_hotpluggable(r))
b2f3eebe
TC
6341 continue;
6342
136199f0 6343 nid = r->nid;
b2f3eebe 6344
136199f0 6345 usable_startpfn = PFN_DOWN(r->base);
b2f3eebe
TC
6346 zone_movable_pfn[nid] = zone_movable_pfn[nid] ?
6347 min(usable_startpfn, zone_movable_pfn[nid]) :
6348 usable_startpfn;
6349 }
6350
6351 goto out2;
6352 }
2a1e274a 6353
342332e6
TI
6354 /*
6355 * If kernelcore=mirror is specified, ignore movablecore option
6356 */
6357 if (mirrored_kernelcore) {
6358 bool mem_below_4gb_not_mirrored = false;
6359
6360 for_each_memblock(memory, r) {
6361 if (memblock_is_mirror(r))
6362 continue;
6363
6364 nid = r->nid;
6365
6366 usable_startpfn = memblock_region_memory_base_pfn(r);
6367
6368 if (usable_startpfn < 0x100000) {
6369 mem_below_4gb_not_mirrored = true;
6370 continue;
6371 }
6372
6373 zone_movable_pfn[nid] = zone_movable_pfn[nid] ?
6374 min(usable_startpfn, zone_movable_pfn[nid]) :
6375 usable_startpfn;
6376 }
6377
6378 if (mem_below_4gb_not_mirrored)
6379 pr_warn("This configuration results in unmirrored kernel memory.");
6380
6381 goto out2;
6382 }
6383
7e63efef 6384 /*
b2f3eebe 6385 * If movablecore=nn[KMG] was specified, calculate what size of
7e63efef
MG
6386 * kernelcore that corresponds so that memory usable for
6387 * any allocation type is evenly spread. If both kernelcore
6388 * and movablecore are specified, then the value of kernelcore
6389 * will be used for required_kernelcore if it's greater than
6390 * what movablecore would have allowed.
6391 */
6392 if (required_movablecore) {
7e63efef
MG
6393 unsigned long corepages;
6394
6395 /*
6396 * Round-up so that ZONE_MOVABLE is at least as large as what
6397 * was requested by the user
6398 */
6399 required_movablecore =
6400 roundup(required_movablecore, MAX_ORDER_NR_PAGES);
9fd745d4 6401 required_movablecore = min(totalpages, required_movablecore);
7e63efef
MG
6402 corepages = totalpages - required_movablecore;
6403
6404 required_kernelcore = max(required_kernelcore, corepages);
6405 }
6406
bde304bd
XQ
6407 /*
6408 * If kernelcore was not specified or kernelcore size is larger
6409 * than totalpages, there is no ZONE_MOVABLE.
6410 */
6411 if (!required_kernelcore || required_kernelcore >= totalpages)
66918dcd 6412 goto out;
2a1e274a
MG
6413
6414 /* usable_startpfn is the lowest possible pfn ZONE_MOVABLE can be at */
2a1e274a
MG
6415 usable_startpfn = arch_zone_lowest_possible_pfn[movable_zone];
6416
6417restart:
6418 /* Spread kernelcore memory as evenly as possible throughout nodes */
6419 kernelcore_node = required_kernelcore / usable_nodes;
4b0ef1fe 6420 for_each_node_state(nid, N_MEMORY) {
c13291a5
TH
6421 unsigned long start_pfn, end_pfn;
6422
2a1e274a
MG
6423 /*
6424 * Recalculate kernelcore_node if the division per node
6425 * now exceeds what is necessary to satisfy the requested
6426 * amount of memory for the kernel
6427 */
6428 if (required_kernelcore < kernelcore_node)
6429 kernelcore_node = required_kernelcore / usable_nodes;
6430
6431 /*
6432 * As the map is walked, we track how much memory is usable
6433 * by the kernel using kernelcore_remaining. When it is
6434 * 0, the rest of the node is usable by ZONE_MOVABLE
6435 */
6436 kernelcore_remaining = kernelcore_node;
6437
6438 /* Go through each range of PFNs within this node */
c13291a5 6439 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
2a1e274a
MG
6440 unsigned long size_pages;
6441
c13291a5 6442 start_pfn = max(start_pfn, zone_movable_pfn[nid]);
2a1e274a
MG
6443 if (start_pfn >= end_pfn)
6444 continue;
6445
6446 /* Account for what is only usable for kernelcore */
6447 if (start_pfn < usable_startpfn) {
6448 unsigned long kernel_pages;
6449 kernel_pages = min(end_pfn, usable_startpfn)
6450 - start_pfn;
6451
6452 kernelcore_remaining -= min(kernel_pages,
6453 kernelcore_remaining);
6454 required_kernelcore -= min(kernel_pages,
6455 required_kernelcore);
6456
6457 /* Continue if range is now fully accounted */
6458 if (end_pfn <= usable_startpfn) {
6459
6460 /*
6461 * Push zone_movable_pfn to the end so
6462 * that if we have to rebalance
6463 * kernelcore across nodes, we will
6464 * not double account here
6465 */
6466 zone_movable_pfn[nid] = end_pfn;
6467 continue;
6468 }
6469 start_pfn = usable_startpfn;
6470 }
6471
6472 /*
6473 * The usable PFN range for ZONE_MOVABLE is from
6474 * start_pfn->end_pfn. Calculate size_pages as the
6475 * number of pages used as kernelcore
6476 */
6477 size_pages = end_pfn - start_pfn;
6478 if (size_pages > kernelcore_remaining)
6479 size_pages = kernelcore_remaining;
6480 zone_movable_pfn[nid] = start_pfn + size_pages;
6481
6482 /*
6483 * Some kernelcore has been met, update counts and
6484 * break if the kernelcore for this node has been
b8af2941 6485 * satisfied
2a1e274a
MG
6486 */
6487 required_kernelcore -= min(required_kernelcore,
6488 size_pages);
6489 kernelcore_remaining -= size_pages;
6490 if (!kernelcore_remaining)
6491 break;
6492 }
6493 }
6494
6495 /*
6496 * If there is still required_kernelcore, we do another pass with one
6497 * less node in the count. This will push zone_movable_pfn[nid] further
6498 * along on the nodes that still have memory until kernelcore is
b8af2941 6499 * satisfied
2a1e274a
MG
6500 */
6501 usable_nodes--;
6502 if (usable_nodes && required_kernelcore > usable_nodes)
6503 goto restart;
6504
b2f3eebe 6505out2:
2a1e274a
MG
6506 /* Align start of ZONE_MOVABLE on all nids to MAX_ORDER_NR_PAGES */
6507 for (nid = 0; nid < MAX_NUMNODES; nid++)
6508 zone_movable_pfn[nid] =
6509 roundup(zone_movable_pfn[nid], MAX_ORDER_NR_PAGES);
66918dcd 6510
20e6926d 6511out:
66918dcd 6512 /* restore the node_state */
4b0ef1fe 6513 node_states[N_MEMORY] = saved_node_state;
2a1e274a
MG
6514}
6515
4b0ef1fe
LJ
6516/* Any regular or high memory on that node ? */
6517static void check_for_memory(pg_data_t *pgdat, int nid)
37b07e41 6518{
37b07e41
LS
6519 enum zone_type zone_type;
6520
4b0ef1fe
LJ
6521 if (N_MEMORY == N_NORMAL_MEMORY)
6522 return;
6523
6524 for (zone_type = 0; zone_type <= ZONE_MOVABLE - 1; zone_type++) {
37b07e41 6525 struct zone *zone = &pgdat->node_zones[zone_type];
b38a8725 6526 if (populated_zone(zone)) {
4b0ef1fe
LJ
6527 node_set_state(nid, N_HIGH_MEMORY);
6528 if (N_NORMAL_MEMORY != N_HIGH_MEMORY &&
6529 zone_type <= ZONE_NORMAL)
6530 node_set_state(nid, N_NORMAL_MEMORY);
d0048b0e
BL
6531 break;
6532 }
37b07e41 6533 }
37b07e41
LS
6534}
6535
c713216d
MG
6536/**
6537 * free_area_init_nodes - Initialise all pg_data_t and zone data
88ca3b94 6538 * @max_zone_pfn: an array of max PFNs for each zone
c713216d
MG
6539 *
6540 * This will call free_area_init_node() for each active node in the system.
7d018176 6541 * Using the page ranges provided by memblock_set_node(), the size of each
c713216d
MG
6542 * zone in each node and their holes is calculated. If the maximum PFN
6543 * between two adjacent zones match, it is assumed that the zone is empty.
6544 * For example, if arch_max_dma_pfn == arch_max_dma32_pfn, it is assumed
6545 * that arch_max_dma32_pfn has no pages. It is also assumed that a zone
6546 * starts where the previous one ended. For example, ZONE_DMA32 starts
6547 * at arch_max_dma_pfn.
6548 */
6549void __init free_area_init_nodes(unsigned long *max_zone_pfn)
6550{
c13291a5
TH
6551 unsigned long start_pfn, end_pfn;
6552 int i, nid;
a6af2bc3 6553
c713216d
MG
6554 /* Record where the zone boundaries are */
6555 memset(arch_zone_lowest_possible_pfn, 0,
6556 sizeof(arch_zone_lowest_possible_pfn));
6557 memset(arch_zone_highest_possible_pfn, 0,
6558 sizeof(arch_zone_highest_possible_pfn));
90cae1fe
OH
6559
6560 start_pfn = find_min_pfn_with_active_regions();
6561
6562 for (i = 0; i < MAX_NR_ZONES; i++) {
2a1e274a
MG
6563 if (i == ZONE_MOVABLE)
6564 continue;
90cae1fe
OH
6565
6566 end_pfn = max(max_zone_pfn[i], start_pfn);
6567 arch_zone_lowest_possible_pfn[i] = start_pfn;
6568 arch_zone_highest_possible_pfn[i] = end_pfn;
6569
6570 start_pfn = end_pfn;
c713216d 6571 }
2a1e274a
MG
6572
6573 /* Find the PFNs that ZONE_MOVABLE begins at in each node */
6574 memset(zone_movable_pfn, 0, sizeof(zone_movable_pfn));
b224ef85 6575 find_zone_movable_pfns_for_nodes();
c713216d 6576
c713216d 6577 /* Print out the zone ranges */
f88dfff5 6578 pr_info("Zone ranges:\n");
2a1e274a
MG
6579 for (i = 0; i < MAX_NR_ZONES; i++) {
6580 if (i == ZONE_MOVABLE)
6581 continue;
f88dfff5 6582 pr_info(" %-8s ", zone_names[i]);
72f0ba02
DR
6583 if (arch_zone_lowest_possible_pfn[i] ==
6584 arch_zone_highest_possible_pfn[i])
f88dfff5 6585 pr_cont("empty\n");
72f0ba02 6586 else
8d29e18a
JG
6587 pr_cont("[mem %#018Lx-%#018Lx]\n",
6588 (u64)arch_zone_lowest_possible_pfn[i]
6589 << PAGE_SHIFT,
6590 ((u64)arch_zone_highest_possible_pfn[i]
a62e2f4f 6591 << PAGE_SHIFT) - 1);
2a1e274a
MG
6592 }
6593
6594 /* Print out the PFNs ZONE_MOVABLE begins at in each node */
f88dfff5 6595 pr_info("Movable zone start for each node\n");
2a1e274a
MG
6596 for (i = 0; i < MAX_NUMNODES; i++) {
6597 if (zone_movable_pfn[i])
8d29e18a
JG
6598 pr_info(" Node %d: %#018Lx\n", i,
6599 (u64)zone_movable_pfn[i] << PAGE_SHIFT);
2a1e274a 6600 }
c713216d 6601
f2d52fe5 6602 /* Print out the early node map */
f88dfff5 6603 pr_info("Early memory node ranges\n");
c13291a5 6604 for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid)
8d29e18a
JG
6605 pr_info(" node %3d: [mem %#018Lx-%#018Lx]\n", nid,
6606 (u64)start_pfn << PAGE_SHIFT,
6607 ((u64)end_pfn << PAGE_SHIFT) - 1);
c713216d
MG
6608
6609 /* Initialise every node */
708614e6 6610 mminit_verify_pageflags_layout();
8ef82866 6611 setup_nr_node_ids();
c713216d
MG
6612 for_each_online_node(nid) {
6613 pg_data_t *pgdat = NODE_DATA(nid);
9109fb7b 6614 free_area_init_node(nid, NULL,
c713216d 6615 find_min_pfn_for_node(nid), NULL);
37b07e41
LS
6616
6617 /* Any memory on that node */
6618 if (pgdat->node_present_pages)
4b0ef1fe
LJ
6619 node_set_state(nid, N_MEMORY);
6620 check_for_memory(pgdat, nid);
c713216d
MG
6621 }
6622}
2a1e274a 6623
7e63efef 6624static int __init cmdline_parse_core(char *p, unsigned long *core)
2a1e274a
MG
6625{
6626 unsigned long long coremem;
6627 if (!p)
6628 return -EINVAL;
6629
6630 coremem = memparse(p, &p);
7e63efef 6631 *core = coremem >> PAGE_SHIFT;
2a1e274a 6632
7e63efef 6633 /* Paranoid check that UL is enough for the coremem value */
2a1e274a
MG
6634 WARN_ON((coremem >> PAGE_SHIFT) > ULONG_MAX);
6635
6636 return 0;
6637}
ed7ed365 6638
7e63efef
MG
6639/*
6640 * kernelcore=size sets the amount of memory for use for allocations that
6641 * cannot be reclaimed or migrated.
6642 */
6643static int __init cmdline_parse_kernelcore(char *p)
6644{
342332e6
TI
6645 /* parse kernelcore=mirror */
6646 if (parse_option_str(p, "mirror")) {
6647 mirrored_kernelcore = true;
6648 return 0;
6649 }
6650
7e63efef
MG
6651 return cmdline_parse_core(p, &required_kernelcore);
6652}
6653
6654/*
6655 * movablecore=size sets the amount of memory for use for allocations that
6656 * can be reclaimed or migrated.
6657 */
6658static int __init cmdline_parse_movablecore(char *p)
6659{
6660 return cmdline_parse_core(p, &required_movablecore);
6661}
6662
ed7ed365 6663early_param("kernelcore", cmdline_parse_kernelcore);
7e63efef 6664early_param("movablecore", cmdline_parse_movablecore);
ed7ed365 6665
0ee332c1 6666#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
c713216d 6667
c3d5f5f0
JL
6668void adjust_managed_page_count(struct page *page, long count)
6669{
6670 spin_lock(&managed_page_count_lock);
6671 page_zone(page)->managed_pages += count;
6672 totalram_pages += count;
3dcc0571
JL
6673#ifdef CONFIG_HIGHMEM
6674 if (PageHighMem(page))
6675 totalhigh_pages += count;
6676#endif
c3d5f5f0
JL
6677 spin_unlock(&managed_page_count_lock);
6678}
3dcc0571 6679EXPORT_SYMBOL(adjust_managed_page_count);
c3d5f5f0 6680
11199692 6681unsigned long free_reserved_area(void *start, void *end, int poison, char *s)
69afade7 6682{
11199692
JL
6683 void *pos;
6684 unsigned long pages = 0;
69afade7 6685
11199692
JL
6686 start = (void *)PAGE_ALIGN((unsigned long)start);
6687 end = (void *)((unsigned long)end & PAGE_MASK);
6688 for (pos = start; pos < end; pos += PAGE_SIZE, pages++) {
dbe67df4 6689 if ((unsigned int)poison <= 0xFF)
11199692
JL
6690 memset(pos, poison, PAGE_SIZE);
6691 free_reserved_page(virt_to_page(pos));
69afade7
JL
6692 }
6693
6694 if (pages && s)
adb1fe9a
JP
6695 pr_info("Freeing %s memory: %ldK\n",
6696 s, pages << (PAGE_SHIFT - 10));
69afade7
JL
6697
6698 return pages;
6699}
11199692 6700EXPORT_SYMBOL(free_reserved_area);
69afade7 6701
cfa11e08
JL
6702#ifdef CONFIG_HIGHMEM
6703void free_highmem_page(struct page *page)
6704{
6705 __free_reserved_page(page);
6706 totalram_pages++;
7b4b2a0d 6707 page_zone(page)->managed_pages++;
cfa11e08
JL
6708 totalhigh_pages++;
6709}
6710#endif
6711
7ee3d4e8
JL
6712
6713void __init mem_init_print_info(const char *str)
6714{
6715 unsigned long physpages, codesize, datasize, rosize, bss_size;
6716 unsigned long init_code_size, init_data_size;
6717
6718 physpages = get_num_physpages();
6719 codesize = _etext - _stext;
6720 datasize = _edata - _sdata;
6721 rosize = __end_rodata - __start_rodata;
6722 bss_size = __bss_stop - __bss_start;
6723 init_data_size = __init_end - __init_begin;
6724 init_code_size = _einittext - _sinittext;
6725
6726 /*
6727 * Detect special cases and adjust section sizes accordingly:
6728 * 1) .init.* may be embedded into .data sections
6729 * 2) .init.text.* may be out of [__init_begin, __init_end],
6730 * please refer to arch/tile/kernel/vmlinux.lds.S.
6731 * 3) .rodata.* may be embedded into .text or .data sections.
6732 */
6733#define adj_init_size(start, end, size, pos, adj) \
b8af2941
PK
6734 do { \
6735 if (start <= pos && pos < end && size > adj) \
6736 size -= adj; \
6737 } while (0)
7ee3d4e8
JL
6738
6739 adj_init_size(__init_begin, __init_end, init_data_size,
6740 _sinittext, init_code_size);
6741 adj_init_size(_stext, _etext, codesize, _sinittext, init_code_size);
6742 adj_init_size(_sdata, _edata, datasize, __init_begin, init_data_size);
6743 adj_init_size(_stext, _etext, codesize, __start_rodata, rosize);
6744 adj_init_size(_sdata, _edata, datasize, __start_rodata, rosize);
6745
6746#undef adj_init_size
6747
756a025f 6748 pr_info("Memory: %luK/%luK available (%luK kernel code, %luK rwdata, %luK rodata, %luK init, %luK bss, %luK reserved, %luK cma-reserved"
7ee3d4e8 6749#ifdef CONFIG_HIGHMEM
756a025f 6750 ", %luK highmem"
7ee3d4e8 6751#endif
756a025f
JP
6752 "%s%s)\n",
6753 nr_free_pages() << (PAGE_SHIFT - 10),
6754 physpages << (PAGE_SHIFT - 10),
6755 codesize >> 10, datasize >> 10, rosize >> 10,
6756 (init_data_size + init_code_size) >> 10, bss_size >> 10,
6757 (physpages - totalram_pages - totalcma_pages) << (PAGE_SHIFT - 10),
6758 totalcma_pages << (PAGE_SHIFT - 10),
7ee3d4e8 6759#ifdef CONFIG_HIGHMEM
756a025f 6760 totalhigh_pages << (PAGE_SHIFT - 10),
7ee3d4e8 6761#endif
756a025f 6762 str ? ", " : "", str ? str : "");
7ee3d4e8
JL
6763}
6764
0e0b864e 6765/**
88ca3b94
RD
6766 * set_dma_reserve - set the specified number of pages reserved in the first zone
6767 * @new_dma_reserve: The number of pages to mark reserved
0e0b864e 6768 *
013110a7 6769 * The per-cpu batchsize and zone watermarks are determined by managed_pages.
0e0b864e
MG
6770 * In the DMA zone, a significant percentage may be consumed by kernel image
6771 * and other unfreeable allocations which can skew the watermarks badly. This
88ca3b94
RD
6772 * function may optionally be used to account for unfreeable pages in the
6773 * first zone (e.g., ZONE_DMA). The effect will be lower watermarks and
6774 * smaller per-cpu batchsize.
0e0b864e
MG
6775 */
6776void __init set_dma_reserve(unsigned long new_dma_reserve)
6777{
6778 dma_reserve = new_dma_reserve;
6779}
6780
1da177e4
LT
6781void __init free_area_init(unsigned long *zones_size)
6782{
9109fb7b 6783 free_area_init_node(0, zones_size,
1da177e4
LT
6784 __pa(PAGE_OFFSET) >> PAGE_SHIFT, NULL);
6785}
1da177e4 6786
005fd4bb 6787static int page_alloc_cpu_dead(unsigned int cpu)
1da177e4 6788{
1da177e4 6789
005fd4bb
SAS
6790 lru_add_drain_cpu(cpu);
6791 drain_pages(cpu);
9f8f2172 6792
005fd4bb
SAS
6793 /*
6794 * Spill the event counters of the dead processor
6795 * into the current processors event counters.
6796 * This artificially elevates the count of the current
6797 * processor.
6798 */
6799 vm_events_fold_cpu(cpu);
9f8f2172 6800
005fd4bb
SAS
6801 /*
6802 * Zero the differential counters of the dead processor
6803 * so that the vm statistics are consistent.
6804 *
6805 * This is only okay since the processor is dead and cannot
6806 * race with what we are doing.
6807 */
6808 cpu_vm_stats_fold(cpu);
6809 return 0;
1da177e4 6810}
1da177e4
LT
6811
6812void __init page_alloc_init(void)
6813{
005fd4bb
SAS
6814 int ret;
6815
6816 ret = cpuhp_setup_state_nocalls(CPUHP_PAGE_ALLOC_DEAD,
6817 "mm/page_alloc:dead", NULL,
6818 page_alloc_cpu_dead);
6819 WARN_ON(ret < 0);
1da177e4
LT
6820}
6821
cb45b0e9 6822/*
34b10060 6823 * calculate_totalreserve_pages - called when sysctl_lowmem_reserve_ratio
cb45b0e9
HA
6824 * or min_free_kbytes changes.
6825 */
6826static void calculate_totalreserve_pages(void)
6827{
6828 struct pglist_data *pgdat;
6829 unsigned long reserve_pages = 0;
2f6726e5 6830 enum zone_type i, j;
cb45b0e9
HA
6831
6832 for_each_online_pgdat(pgdat) {
281e3726
MG
6833
6834 pgdat->totalreserve_pages = 0;
6835
cb45b0e9
HA
6836 for (i = 0; i < MAX_NR_ZONES; i++) {
6837 struct zone *zone = pgdat->node_zones + i;
3484b2de 6838 long max = 0;
cb45b0e9
HA
6839
6840 /* Find valid and maximum lowmem_reserve in the zone */
6841 for (j = i; j < MAX_NR_ZONES; j++) {
6842 if (zone->lowmem_reserve[j] > max)
6843 max = zone->lowmem_reserve[j];
6844 }
6845
41858966
MG
6846 /* we treat the high watermark as reserved pages. */
6847 max += high_wmark_pages(zone);
cb45b0e9 6848
b40da049
JL
6849 if (max > zone->managed_pages)
6850 max = zone->managed_pages;
a8d01437 6851
281e3726 6852 pgdat->totalreserve_pages += max;
a8d01437 6853
cb45b0e9
HA
6854 reserve_pages += max;
6855 }
6856 }
6857 totalreserve_pages = reserve_pages;
6858}
6859
1da177e4
LT
6860/*
6861 * setup_per_zone_lowmem_reserve - called whenever
34b10060 6862 * sysctl_lowmem_reserve_ratio changes. Ensures that each zone
1da177e4
LT
6863 * has a correct pages reserved value, so an adequate number of
6864 * pages are left in the zone after a successful __alloc_pages().
6865 */
6866static void setup_per_zone_lowmem_reserve(void)
6867{
6868 struct pglist_data *pgdat;
2f6726e5 6869 enum zone_type j, idx;
1da177e4 6870
ec936fc5 6871 for_each_online_pgdat(pgdat) {
1da177e4
LT
6872 for (j = 0; j < MAX_NR_ZONES; j++) {
6873 struct zone *zone = pgdat->node_zones + j;
b40da049 6874 unsigned long managed_pages = zone->managed_pages;
1da177e4
LT
6875
6876 zone->lowmem_reserve[j] = 0;
6877
2f6726e5
CL
6878 idx = j;
6879 while (idx) {
1da177e4
LT
6880 struct zone *lower_zone;
6881
2f6726e5
CL
6882 idx--;
6883
1da177e4
LT
6884 if (sysctl_lowmem_reserve_ratio[idx] < 1)
6885 sysctl_lowmem_reserve_ratio[idx] = 1;
6886
6887 lower_zone = pgdat->node_zones + idx;
b40da049 6888 lower_zone->lowmem_reserve[j] = managed_pages /
1da177e4 6889 sysctl_lowmem_reserve_ratio[idx];
b40da049 6890 managed_pages += lower_zone->managed_pages;
1da177e4
LT
6891 }
6892 }
6893 }
cb45b0e9
HA
6894
6895 /* update totalreserve_pages */
6896 calculate_totalreserve_pages();
1da177e4
LT
6897}
6898
cfd3da1e 6899static void __setup_per_zone_wmarks(void)
1da177e4
LT
6900{
6901 unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10);
6902 unsigned long lowmem_pages = 0;
6903 struct zone *zone;
6904 unsigned long flags;
6905
6906 /* Calculate total number of !ZONE_HIGHMEM pages */
6907 for_each_zone(zone) {
6908 if (!is_highmem(zone))
b40da049 6909 lowmem_pages += zone->managed_pages;
1da177e4
LT
6910 }
6911
6912 for_each_zone(zone) {
ac924c60
AM
6913 u64 tmp;
6914
1125b4e3 6915 spin_lock_irqsave(&zone->lock, flags);
b40da049 6916 tmp = (u64)pages_min * zone->managed_pages;
ac924c60 6917 do_div(tmp, lowmem_pages);
1da177e4
LT
6918 if (is_highmem(zone)) {
6919 /*
669ed175
NP
6920 * __GFP_HIGH and PF_MEMALLOC allocations usually don't
6921 * need highmem pages, so cap pages_min to a small
6922 * value here.
6923 *
41858966 6924 * The WMARK_HIGH-WMARK_LOW and (WMARK_LOW-WMARK_MIN)
42ff2703 6925 * deltas control asynch page reclaim, and so should
669ed175 6926 * not be capped for highmem.
1da177e4 6927 */
90ae8d67 6928 unsigned long min_pages;
1da177e4 6929
b40da049 6930 min_pages = zone->managed_pages / 1024;
90ae8d67 6931 min_pages = clamp(min_pages, SWAP_CLUSTER_MAX, 128UL);
41858966 6932 zone->watermark[WMARK_MIN] = min_pages;
1da177e4 6933 } else {
669ed175
NP
6934 /*
6935 * If it's a lowmem zone, reserve a number of pages
1da177e4
LT
6936 * proportionate to the zone's size.
6937 */
41858966 6938 zone->watermark[WMARK_MIN] = tmp;
1da177e4
LT
6939 }
6940
795ae7a0
JW
6941 /*
6942 * Set the kswapd watermarks distance according to the
6943 * scale factor in proportion to available memory, but
6944 * ensure a minimum size on small systems.
6945 */
6946 tmp = max_t(u64, tmp >> 2,
6947 mult_frac(zone->managed_pages,
6948 watermark_scale_factor, 10000));
6949
6950 zone->watermark[WMARK_LOW] = min_wmark_pages(zone) + tmp;
6951 zone->watermark[WMARK_HIGH] = min_wmark_pages(zone) + tmp * 2;
49f223a9 6952
1125b4e3 6953 spin_unlock_irqrestore(&zone->lock, flags);
1da177e4 6954 }
cb45b0e9
HA
6955
6956 /* update totalreserve_pages */
6957 calculate_totalreserve_pages();
1da177e4
LT
6958}
6959
cfd3da1e
MG
6960/**
6961 * setup_per_zone_wmarks - called when min_free_kbytes changes
6962 * or when memory is hot-{added|removed}
6963 *
6964 * Ensures that the watermark[min,low,high] values for each zone are set
6965 * correctly with respect to min_free_kbytes.
6966 */
6967void setup_per_zone_wmarks(void)
6968{
6969 mutex_lock(&zonelists_mutex);
6970 __setup_per_zone_wmarks();
6971 mutex_unlock(&zonelists_mutex);
6972}
6973
1da177e4
LT
6974/*
6975 * Initialise min_free_kbytes.
6976 *
6977 * For small machines we want it small (128k min). For large machines
6978 * we want it large (64MB max). But it is not linear, because network
6979 * bandwidth does not increase linearly with machine size. We use
6980 *
b8af2941 6981 * min_free_kbytes = 4 * sqrt(lowmem_kbytes), for better accuracy:
1da177e4
LT
6982 * min_free_kbytes = sqrt(lowmem_kbytes * 16)
6983 *
6984 * which yields
6985 *
6986 * 16MB: 512k
6987 * 32MB: 724k
6988 * 64MB: 1024k
6989 * 128MB: 1448k
6990 * 256MB: 2048k
6991 * 512MB: 2896k
6992 * 1024MB: 4096k
6993 * 2048MB: 5792k
6994 * 4096MB: 8192k
6995 * 8192MB: 11584k
6996 * 16384MB: 16384k
6997 */
1b79acc9 6998int __meminit init_per_zone_wmark_min(void)
1da177e4
LT
6999{
7000 unsigned long lowmem_kbytes;
5f12733e 7001 int new_min_free_kbytes;
1da177e4
LT
7002
7003 lowmem_kbytes = nr_free_buffer_pages() * (PAGE_SIZE >> 10);
5f12733e
MH
7004 new_min_free_kbytes = int_sqrt(lowmem_kbytes * 16);
7005
7006 if (new_min_free_kbytes > user_min_free_kbytes) {
7007 min_free_kbytes = new_min_free_kbytes;
7008 if (min_free_kbytes < 128)
7009 min_free_kbytes = 128;
7010 if (min_free_kbytes > 65536)
7011 min_free_kbytes = 65536;
7012 } else {
7013 pr_warn("min_free_kbytes is not updated to %d because user defined value %d is preferred\n",
7014 new_min_free_kbytes, user_min_free_kbytes);
7015 }
bc75d33f 7016 setup_per_zone_wmarks();
a6cccdc3 7017 refresh_zone_stat_thresholds();
1da177e4 7018 setup_per_zone_lowmem_reserve();
6423aa81
JK
7019
7020#ifdef CONFIG_NUMA
7021 setup_min_unmapped_ratio();
7022 setup_min_slab_ratio();
7023#endif
7024
1da177e4
LT
7025 return 0;
7026}
bc22af74 7027core_initcall(init_per_zone_wmark_min)
1da177e4
LT
7028
7029/*
b8af2941 7030 * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so
1da177e4
LT
7031 * that we can call two helper functions whenever min_free_kbytes
7032 * changes.
7033 */
cccad5b9 7034int min_free_kbytes_sysctl_handler(struct ctl_table *table, int write,
8d65af78 7035 void __user *buffer, size_t *length, loff_t *ppos)
1da177e4 7036{
da8c757b
HP
7037 int rc;
7038
7039 rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
7040 if (rc)
7041 return rc;
7042
5f12733e
MH
7043 if (write) {
7044 user_min_free_kbytes = min_free_kbytes;
bc75d33f 7045 setup_per_zone_wmarks();
5f12733e 7046 }
1da177e4
LT
7047 return 0;
7048}
7049
795ae7a0
JW
7050int watermark_scale_factor_sysctl_handler(struct ctl_table *table, int write,
7051 void __user *buffer, size_t *length, loff_t *ppos)
7052{
7053 int rc;
7054
7055 rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
7056 if (rc)
7057 return rc;
7058
7059 if (write)
7060 setup_per_zone_wmarks();
7061
7062 return 0;
7063}
7064
9614634f 7065#ifdef CONFIG_NUMA
6423aa81 7066static void setup_min_unmapped_ratio(void)
9614634f 7067{
6423aa81 7068 pg_data_t *pgdat;
9614634f 7069 struct zone *zone;
9614634f 7070
a5f5f91d 7071 for_each_online_pgdat(pgdat)
81cbcbc2 7072 pgdat->min_unmapped_pages = 0;
a5f5f91d 7073
9614634f 7074 for_each_zone(zone)
a5f5f91d 7075 zone->zone_pgdat->min_unmapped_pages += (zone->managed_pages *
9614634f 7076 sysctl_min_unmapped_ratio) / 100;
9614634f 7077}
0ff38490 7078
6423aa81
JK
7079
7080int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *table, int write,
8d65af78 7081 void __user *buffer, size_t *length, loff_t *ppos)
0ff38490 7082{
0ff38490
CL
7083 int rc;
7084
8d65af78 7085 rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
0ff38490
CL
7086 if (rc)
7087 return rc;
7088
6423aa81
JK
7089 setup_min_unmapped_ratio();
7090
7091 return 0;
7092}
7093
7094static void setup_min_slab_ratio(void)
7095{
7096 pg_data_t *pgdat;
7097 struct zone *zone;
7098
a5f5f91d
MG
7099 for_each_online_pgdat(pgdat)
7100 pgdat->min_slab_pages = 0;
7101
0ff38490 7102 for_each_zone(zone)
a5f5f91d 7103 zone->zone_pgdat->min_slab_pages += (zone->managed_pages *
0ff38490 7104 sysctl_min_slab_ratio) / 100;
6423aa81
JK
7105}
7106
7107int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *table, int write,
7108 void __user *buffer, size_t *length, loff_t *ppos)
7109{
7110 int rc;
7111
7112 rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
7113 if (rc)
7114 return rc;
7115
7116 setup_min_slab_ratio();
7117
0ff38490
CL
7118 return 0;
7119}
9614634f
CL
7120#endif
7121
1da177e4
LT
7122/*
7123 * lowmem_reserve_ratio_sysctl_handler - just a wrapper around
7124 * proc_dointvec() so that we can call setup_per_zone_lowmem_reserve()
7125 * whenever sysctl_lowmem_reserve_ratio changes.
7126 *
7127 * The reserve ratio obviously has absolutely no relation with the
41858966 7128 * minimum watermarks. The lowmem reserve ratio can only make sense
1da177e4
LT
7129 * if in function of the boot time zone sizes.
7130 */
cccad5b9 7131int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *table, int write,
8d65af78 7132 void __user *buffer, size_t *length, loff_t *ppos)
1da177e4 7133{
8d65af78 7134 proc_dointvec_minmax(table, write, buffer, length, ppos);
1da177e4
LT
7135 setup_per_zone_lowmem_reserve();
7136 return 0;
7137}
7138
8ad4b1fb
RS
7139/*
7140 * percpu_pagelist_fraction - changes the pcp->high for each zone on each
b8af2941
PK
7141 * cpu. It is the fraction of total pages in each zone that a hot per cpu
7142 * pagelist can have before it gets flushed back to buddy allocator.
8ad4b1fb 7143 */
cccad5b9 7144int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *table, int write,
8d65af78 7145 void __user *buffer, size_t *length, loff_t *ppos)
8ad4b1fb
RS
7146{
7147 struct zone *zone;
7cd2b0a3 7148 int old_percpu_pagelist_fraction;
8ad4b1fb
RS
7149 int ret;
7150
7cd2b0a3
DR
7151 mutex_lock(&pcp_batch_high_lock);
7152 old_percpu_pagelist_fraction = percpu_pagelist_fraction;
7153
8d65af78 7154 ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
7cd2b0a3
DR
7155 if (!write || ret < 0)
7156 goto out;
7157
7158 /* Sanity checking to avoid pcp imbalance */
7159 if (percpu_pagelist_fraction &&
7160 percpu_pagelist_fraction < MIN_PERCPU_PAGELIST_FRACTION) {
7161 percpu_pagelist_fraction = old_percpu_pagelist_fraction;
7162 ret = -EINVAL;
7163 goto out;
7164 }
7165
7166 /* No change? */
7167 if (percpu_pagelist_fraction == old_percpu_pagelist_fraction)
7168 goto out;
c8e251fa 7169
364df0eb 7170 for_each_populated_zone(zone) {
7cd2b0a3
DR
7171 unsigned int cpu;
7172
22a7f12b 7173 for_each_possible_cpu(cpu)
7cd2b0a3
DR
7174 pageset_set_high_and_batch(zone,
7175 per_cpu_ptr(zone->pageset, cpu));
8ad4b1fb 7176 }
7cd2b0a3 7177out:
c8e251fa 7178 mutex_unlock(&pcp_batch_high_lock);
7cd2b0a3 7179 return ret;
8ad4b1fb
RS
7180}
7181
a9919c79 7182#ifdef CONFIG_NUMA
f034b5d4 7183int hashdist = HASHDIST_DEFAULT;
1da177e4 7184
1da177e4
LT
7185static int __init set_hashdist(char *str)
7186{
7187 if (!str)
7188 return 0;
7189 hashdist = simple_strtoul(str, &str, 0);
7190 return 1;
7191}
7192__setup("hashdist=", set_hashdist);
7193#endif
7194
f6f34b43
SD
7195#ifndef __HAVE_ARCH_RESERVED_KERNEL_PAGES
7196/*
7197 * Returns the number of pages that arch has reserved but
7198 * is not known to alloc_large_system_hash().
7199 */
7200static unsigned long __init arch_reserved_kernel_pages(void)
7201{
7202 return 0;
7203}
7204#endif
7205
9017217b
PT
7206/*
7207 * Adaptive scale is meant to reduce sizes of hash tables on large memory
7208 * machines. As memory size is increased the scale is also increased but at
7209 * slower pace. Starting from ADAPT_SCALE_BASE (64G), every time memory
7210 * quadruples the scale is increased by one, which means the size of hash table
7211 * only doubles, instead of quadrupling as well.
7212 * Because 32-bit systems cannot have large physical memory, where this scaling
7213 * makes sense, it is disabled on such platforms.
7214 */
7215#if __BITS_PER_LONG > 32
7216#define ADAPT_SCALE_BASE (64ul << 30)
7217#define ADAPT_SCALE_SHIFT 2
7218#define ADAPT_SCALE_NPAGES (ADAPT_SCALE_BASE >> PAGE_SHIFT)
7219#endif
7220
1da177e4
LT
7221/*
7222 * allocate a large system hash table from bootmem
7223 * - it is assumed that the hash table must contain an exact power-of-2
7224 * quantity of entries
7225 * - limit is the number of hash buckets, not the total allocation size
7226 */
7227void *__init alloc_large_system_hash(const char *tablename,
7228 unsigned long bucketsize,
7229 unsigned long numentries,
7230 int scale,
7231 int flags,
7232 unsigned int *_hash_shift,
7233 unsigned int *_hash_mask,
31fe62b9
TB
7234 unsigned long low_limit,
7235 unsigned long high_limit)
1da177e4 7236{
31fe62b9 7237 unsigned long long max = high_limit;
1da177e4
LT
7238 unsigned long log2qty, size;
7239 void *table = NULL;
3749a8f0 7240 gfp_t gfp_flags;
1da177e4
LT
7241
7242 /* allow the kernel cmdline to have a say */
7243 if (!numentries) {
7244 /* round applicable memory size up to nearest megabyte */
04903664 7245 numentries = nr_kernel_pages;
f6f34b43 7246 numentries -= arch_reserved_kernel_pages();
a7e83318
JZ
7247
7248 /* It isn't necessary when PAGE_SIZE >= 1MB */
7249 if (PAGE_SHIFT < 20)
7250 numentries = round_up(numentries, (1<<20)/PAGE_SIZE);
1da177e4 7251
9017217b
PT
7252#if __BITS_PER_LONG > 32
7253 if (!high_limit) {
7254 unsigned long adapt;
7255
7256 for (adapt = ADAPT_SCALE_NPAGES; adapt < numentries;
7257 adapt <<= ADAPT_SCALE_SHIFT)
7258 scale++;
7259 }
7260#endif
7261
1da177e4
LT
7262 /* limit to 1 bucket per 2^scale bytes of low memory */
7263 if (scale > PAGE_SHIFT)
7264 numentries >>= (scale - PAGE_SHIFT);
7265 else
7266 numentries <<= (PAGE_SHIFT - scale);
9ab37b8f
PM
7267
7268 /* Make sure we've got at least a 0-order allocation.. */
2c85f51d
JB
7269 if (unlikely(flags & HASH_SMALL)) {
7270 /* Makes no sense without HASH_EARLY */
7271 WARN_ON(!(flags & HASH_EARLY));
7272 if (!(numentries >> *_hash_shift)) {
7273 numentries = 1UL << *_hash_shift;
7274 BUG_ON(!numentries);
7275 }
7276 } else if (unlikely((numentries * bucketsize) < PAGE_SIZE))
9ab37b8f 7277 numentries = PAGE_SIZE / bucketsize;
1da177e4 7278 }
6e692ed3 7279 numentries = roundup_pow_of_two(numentries);
1da177e4
LT
7280
7281 /* limit allocation size to 1/16 total memory by default */
7282 if (max == 0) {
7283 max = ((unsigned long long)nr_all_pages << PAGE_SHIFT) >> 4;
7284 do_div(max, bucketsize);
7285 }
074b8517 7286 max = min(max, 0x80000000ULL);
1da177e4 7287
31fe62b9
TB
7288 if (numentries < low_limit)
7289 numentries = low_limit;
1da177e4
LT
7290 if (numentries > max)
7291 numentries = max;
7292
f0d1b0b3 7293 log2qty = ilog2(numentries);
1da177e4 7294
3749a8f0
PT
7295 /*
7296 * memblock allocator returns zeroed memory already, so HASH_ZERO is
7297 * currently not used when HASH_EARLY is specified.
7298 */
7299 gfp_flags = (flags & HASH_ZERO) ? GFP_ATOMIC | __GFP_ZERO : GFP_ATOMIC;
1da177e4
LT
7300 do {
7301 size = bucketsize << log2qty;
7302 if (flags & HASH_EARLY)
6782832e 7303 table = memblock_virt_alloc_nopanic(size, 0);
1da177e4 7304 else if (hashdist)
3749a8f0 7305 table = __vmalloc(size, gfp_flags, PAGE_KERNEL);
1da177e4 7306 else {
1037b83b
ED
7307 /*
7308 * If bucketsize is not a power-of-two, we may free
a1dd268c
MG
7309 * some pages at the end of hash table which
7310 * alloc_pages_exact() automatically does
1037b83b 7311 */
264ef8a9 7312 if (get_order(size) < MAX_ORDER) {
3749a8f0
PT
7313 table = alloc_pages_exact(size, gfp_flags);
7314 kmemleak_alloc(table, size, 1, gfp_flags);
264ef8a9 7315 }
1da177e4
LT
7316 }
7317 } while (!table && size > PAGE_SIZE && --log2qty);
7318
7319 if (!table)
7320 panic("Failed to allocate %s hash table\n", tablename);
7321
1170532b
JP
7322 pr_info("%s hash table entries: %ld (order: %d, %lu bytes)\n",
7323 tablename, 1UL << log2qty, ilog2(size) - PAGE_SHIFT, size);
1da177e4
LT
7324
7325 if (_hash_shift)
7326 *_hash_shift = log2qty;
7327 if (_hash_mask)
7328 *_hash_mask = (1 << log2qty) - 1;
7329
7330 return table;
7331}
a117e66e 7332
a5d76b54 7333/*
80934513
MK
7334 * This function checks whether pageblock includes unmovable pages or not.
7335 * If @count is not zero, it is okay to include less @count unmovable pages
7336 *
b8af2941 7337 * PageLRU check without isolation or lru_lock could race so that
0efadf48
YX
7338 * MIGRATE_MOVABLE block might include unmovable pages. And __PageMovable
7339 * check without lock_page also may miss some movable non-lru pages at
7340 * race condition. So you can't expect this function should be exact.
a5d76b54 7341 */
b023f468
WC
7342bool has_unmovable_pages(struct zone *zone, struct page *page, int count,
7343 bool skip_hwpoisoned_pages)
49ac8255
KH
7344{
7345 unsigned long pfn, iter, found;
47118af0
MN
7346 int mt;
7347
49ac8255
KH
7348 /*
7349 * For avoiding noise data, lru_add_drain_all() should be called
80934513 7350 * If ZONE_MOVABLE, the zone never contains unmovable pages
49ac8255
KH
7351 */
7352 if (zone_idx(zone) == ZONE_MOVABLE)
80934513 7353 return false;
47118af0
MN
7354 mt = get_pageblock_migratetype(page);
7355 if (mt == MIGRATE_MOVABLE || is_migrate_cma(mt))
80934513 7356 return false;
49ac8255
KH
7357
7358 pfn = page_to_pfn(page);
7359 for (found = 0, iter = 0; iter < pageblock_nr_pages; iter++) {
7360 unsigned long check = pfn + iter;
7361
29723fcc 7362 if (!pfn_valid_within(check))
49ac8255 7363 continue;
29723fcc 7364
49ac8255 7365 page = pfn_to_page(check);
c8721bbb
NH
7366
7367 /*
7368 * Hugepages are not in LRU lists, but they're movable.
7369 * We need not scan over tail pages bacause we don't
7370 * handle each tail page individually in migration.
7371 */
7372 if (PageHuge(page)) {
7373 iter = round_up(iter + 1, 1<<compound_order(page)) - 1;
7374 continue;
7375 }
7376
97d255c8
MK
7377 /*
7378 * We can't use page_count without pin a page
7379 * because another CPU can free compound page.
7380 * This check already skips compound tails of THP
0139aa7b 7381 * because their page->_refcount is zero at all time.
97d255c8 7382 */
fe896d18 7383 if (!page_ref_count(page)) {
49ac8255
KH
7384 if (PageBuddy(page))
7385 iter += (1 << page_order(page)) - 1;
7386 continue;
7387 }
97d255c8 7388
b023f468
WC
7389 /*
7390 * The HWPoisoned page may be not in buddy system, and
7391 * page_count() is not 0.
7392 */
7393 if (skip_hwpoisoned_pages && PageHWPoison(page))
7394 continue;
7395
0efadf48
YX
7396 if (__PageMovable(page))
7397 continue;
7398
49ac8255
KH
7399 if (!PageLRU(page))
7400 found++;
7401 /*
6b4f7799
JW
7402 * If there are RECLAIMABLE pages, we need to check
7403 * it. But now, memory offline itself doesn't call
7404 * shrink_node_slabs() and it still to be fixed.
49ac8255
KH
7405 */
7406 /*
7407 * If the page is not RAM, page_count()should be 0.
7408 * we don't need more check. This is an _used_ not-movable page.
7409 *
7410 * The problematic thing here is PG_reserved pages. PG_reserved
7411 * is set to both of a memory hole page and a _used_ kernel
7412 * page at boot.
7413 */
7414 if (found > count)
80934513 7415 return true;
49ac8255 7416 }
80934513 7417 return false;
49ac8255
KH
7418}
7419
7420bool is_pageblock_removable_nolock(struct page *page)
7421{
656a0706
MH
7422 struct zone *zone;
7423 unsigned long pfn;
687875fb
MH
7424
7425 /*
7426 * We have to be careful here because we are iterating over memory
7427 * sections which are not zone aware so we might end up outside of
7428 * the zone but still within the section.
656a0706
MH
7429 * We have to take care about the node as well. If the node is offline
7430 * its NODE_DATA will be NULL - see page_zone.
687875fb 7431 */
656a0706
MH
7432 if (!node_online(page_to_nid(page)))
7433 return false;
7434
7435 zone = page_zone(page);
7436 pfn = page_to_pfn(page);
108bcc96 7437 if (!zone_spans_pfn(zone, pfn))
687875fb
MH
7438 return false;
7439
b023f468 7440 return !has_unmovable_pages(zone, page, 0, true);
a5d76b54 7441}
0c0e6195 7442
080fe206 7443#if (defined(CONFIG_MEMORY_ISOLATION) && defined(CONFIG_COMPACTION)) || defined(CONFIG_CMA)
041d3a8c
MN
7444
7445static unsigned long pfn_max_align_down(unsigned long pfn)
7446{
7447 return pfn & ~(max_t(unsigned long, MAX_ORDER_NR_PAGES,
7448 pageblock_nr_pages) - 1);
7449}
7450
7451static unsigned long pfn_max_align_up(unsigned long pfn)
7452{
7453 return ALIGN(pfn, max_t(unsigned long, MAX_ORDER_NR_PAGES,
7454 pageblock_nr_pages));
7455}
7456
041d3a8c 7457/* [start, end) must belong to a single zone. */
bb13ffeb
MG
7458static int __alloc_contig_migrate_range(struct compact_control *cc,
7459 unsigned long start, unsigned long end)
041d3a8c
MN
7460{
7461 /* This function is based on compact_zone() from compaction.c. */
beb51eaa 7462 unsigned long nr_reclaimed;
041d3a8c
MN
7463 unsigned long pfn = start;
7464 unsigned int tries = 0;
7465 int ret = 0;
7466
be49a6e1 7467 migrate_prep();
041d3a8c 7468
bb13ffeb 7469 while (pfn < end || !list_empty(&cc->migratepages)) {
041d3a8c
MN
7470 if (fatal_signal_pending(current)) {
7471 ret = -EINTR;
7472 break;
7473 }
7474
bb13ffeb
MG
7475 if (list_empty(&cc->migratepages)) {
7476 cc->nr_migratepages = 0;
edc2ca61 7477 pfn = isolate_migratepages_range(cc, pfn, end);
041d3a8c
MN
7478 if (!pfn) {
7479 ret = -EINTR;
7480 break;
7481 }
7482 tries = 0;
7483 } else if (++tries == 5) {
7484 ret = ret < 0 ? ret : -EBUSY;
7485 break;
7486 }
7487
beb51eaa
MK
7488 nr_reclaimed = reclaim_clean_pages_from_list(cc->zone,
7489 &cc->migratepages);
7490 cc->nr_migratepages -= nr_reclaimed;
02c6de8d 7491
9c620e2b 7492 ret = migrate_pages(&cc->migratepages, alloc_migrate_target,
e0b9daeb 7493 NULL, 0, cc->mode, MR_CMA);
041d3a8c 7494 }
2a6f5124
SP
7495 if (ret < 0) {
7496 putback_movable_pages(&cc->migratepages);
7497 return ret;
7498 }
7499 return 0;
041d3a8c
MN
7500}
7501
7502/**
7503 * alloc_contig_range() -- tries to allocate given range of pages
7504 * @start: start PFN to allocate
7505 * @end: one-past-the-last PFN to allocate
0815f3d8
MN
7506 * @migratetype: migratetype of the underlaying pageblocks (either
7507 * #MIGRATE_MOVABLE or #MIGRATE_CMA). All pageblocks
7508 * in range must have the same migratetype and it must
7509 * be either of the two.
ca96b625 7510 * @gfp_mask: GFP mask to use during compaction
041d3a8c
MN
7511 *
7512 * The PFN range does not have to be pageblock or MAX_ORDER_NR_PAGES
7513 * aligned, however it's the caller's responsibility to guarantee that
7514 * we are the only thread that changes migrate type of pageblocks the
7515 * pages fall in.
7516 *
7517 * The PFN range must belong to a single zone.
7518 *
7519 * Returns zero on success or negative error code. On success all
7520 * pages which PFN is in [start, end) are allocated for the caller and
7521 * need to be freed with free_contig_range().
7522 */
0815f3d8 7523int alloc_contig_range(unsigned long start, unsigned long end,
ca96b625 7524 unsigned migratetype, gfp_t gfp_mask)
041d3a8c 7525{
041d3a8c 7526 unsigned long outer_start, outer_end;
d00181b9
KS
7527 unsigned int order;
7528 int ret = 0;
041d3a8c 7529
bb13ffeb
MG
7530 struct compact_control cc = {
7531 .nr_migratepages = 0,
7532 .order = -1,
7533 .zone = page_zone(pfn_to_page(start)),
e0b9daeb 7534 .mode = MIGRATE_SYNC,
bb13ffeb 7535 .ignore_skip_hint = true,
7dea19f9 7536 .gfp_mask = current_gfp_context(gfp_mask),
bb13ffeb
MG
7537 };
7538 INIT_LIST_HEAD(&cc.migratepages);
7539
041d3a8c
MN
7540 /*
7541 * What we do here is we mark all pageblocks in range as
7542 * MIGRATE_ISOLATE. Because pageblock and max order pages may
7543 * have different sizes, and due to the way page allocator
7544 * work, we align the range to biggest of the two pages so
7545 * that page allocator won't try to merge buddies from
7546 * different pageblocks and change MIGRATE_ISOLATE to some
7547 * other migration type.
7548 *
7549 * Once the pageblocks are marked as MIGRATE_ISOLATE, we
7550 * migrate the pages from an unaligned range (ie. pages that
7551 * we are interested in). This will put all the pages in
7552 * range back to page allocator as MIGRATE_ISOLATE.
7553 *
7554 * When this is done, we take the pages in range from page
7555 * allocator removing them from the buddy system. This way
7556 * page allocator will never consider using them.
7557 *
7558 * This lets us mark the pageblocks back as
7559 * MIGRATE_CMA/MIGRATE_MOVABLE so that free pages in the
7560 * aligned range but not in the unaligned, original range are
7561 * put back to page allocator so that buddy can use them.
7562 */
7563
7564 ret = start_isolate_page_range(pfn_max_align_down(start),
b023f468
WC
7565 pfn_max_align_up(end), migratetype,
7566 false);
041d3a8c 7567 if (ret)
86a595f9 7568 return ret;
041d3a8c 7569
8ef5849f
JK
7570 /*
7571 * In case of -EBUSY, we'd like to know which page causes problem.
7572 * So, just fall through. We will check it in test_pages_isolated().
7573 */
bb13ffeb 7574 ret = __alloc_contig_migrate_range(&cc, start, end);
8ef5849f 7575 if (ret && ret != -EBUSY)
041d3a8c
MN
7576 goto done;
7577
7578 /*
7579 * Pages from [start, end) are within a MAX_ORDER_NR_PAGES
7580 * aligned blocks that are marked as MIGRATE_ISOLATE. What's
7581 * more, all pages in [start, end) are free in page allocator.
7582 * What we are going to do is to allocate all pages from
7583 * [start, end) (that is remove them from page allocator).
7584 *
7585 * The only problem is that pages at the beginning and at the
7586 * end of interesting range may be not aligned with pages that
7587 * page allocator holds, ie. they can be part of higher order
7588 * pages. Because of this, we reserve the bigger range and
7589 * once this is done free the pages we are not interested in.
7590 *
7591 * We don't have to hold zone->lock here because the pages are
7592 * isolated thus they won't get removed from buddy.
7593 */
7594
7595 lru_add_drain_all();
510f5507 7596 drain_all_pages(cc.zone);
041d3a8c
MN
7597
7598 order = 0;
7599 outer_start = start;
7600 while (!PageBuddy(pfn_to_page(outer_start))) {
7601 if (++order >= MAX_ORDER) {
8ef5849f
JK
7602 outer_start = start;
7603 break;
041d3a8c
MN
7604 }
7605 outer_start &= ~0UL << order;
7606 }
7607
8ef5849f
JK
7608 if (outer_start != start) {
7609 order = page_order(pfn_to_page(outer_start));
7610
7611 /*
7612 * outer_start page could be small order buddy page and
7613 * it doesn't include start page. Adjust outer_start
7614 * in this case to report failed page properly
7615 * on tracepoint in test_pages_isolated()
7616 */
7617 if (outer_start + (1UL << order) <= start)
7618 outer_start = start;
7619 }
7620
041d3a8c 7621 /* Make sure the range is really isolated. */
b023f468 7622 if (test_pages_isolated(outer_start, end, false)) {
dae803e1
MN
7623 pr_info("%s: [%lx, %lx) PFNs busy\n",
7624 __func__, outer_start, end);
041d3a8c
MN
7625 ret = -EBUSY;
7626 goto done;
7627 }
7628
49f223a9 7629 /* Grab isolated pages from freelists. */
bb13ffeb 7630 outer_end = isolate_freepages_range(&cc, outer_start, end);
041d3a8c
MN
7631 if (!outer_end) {
7632 ret = -EBUSY;
7633 goto done;
7634 }
7635
7636 /* Free head and tail (if any) */
7637 if (start != outer_start)
7638 free_contig_range(outer_start, start - outer_start);
7639 if (end != outer_end)
7640 free_contig_range(end, outer_end - end);
7641
7642done:
7643 undo_isolate_page_range(pfn_max_align_down(start),
0815f3d8 7644 pfn_max_align_up(end), migratetype);
041d3a8c
MN
7645 return ret;
7646}
7647
7648void free_contig_range(unsigned long pfn, unsigned nr_pages)
7649{
bcc2b02f
MS
7650 unsigned int count = 0;
7651
7652 for (; nr_pages--; pfn++) {
7653 struct page *page = pfn_to_page(pfn);
7654
7655 count += page_count(page) != 1;
7656 __free_page(page);
7657 }
7658 WARN(count != 0, "%d pages are still in use!\n", count);
041d3a8c
MN
7659}
7660#endif
7661
4ed7e022 7662#ifdef CONFIG_MEMORY_HOTPLUG
0a647f38
CS
7663/*
7664 * The zone indicated has a new number of managed_pages; batch sizes and percpu
7665 * page high values need to be recalulated.
7666 */
4ed7e022
JL
7667void __meminit zone_pcp_update(struct zone *zone)
7668{
0a647f38 7669 unsigned cpu;
c8e251fa 7670 mutex_lock(&pcp_batch_high_lock);
0a647f38 7671 for_each_possible_cpu(cpu)
169f6c19
CS
7672 pageset_set_high_and_batch(zone,
7673 per_cpu_ptr(zone->pageset, cpu));
c8e251fa 7674 mutex_unlock(&pcp_batch_high_lock);
4ed7e022
JL
7675}
7676#endif
7677
340175b7
JL
7678void zone_pcp_reset(struct zone *zone)
7679{
7680 unsigned long flags;
5a883813
MK
7681 int cpu;
7682 struct per_cpu_pageset *pset;
340175b7
JL
7683
7684 /* avoid races with drain_pages() */
7685 local_irq_save(flags);
7686 if (zone->pageset != &boot_pageset) {
5a883813
MK
7687 for_each_online_cpu(cpu) {
7688 pset = per_cpu_ptr(zone->pageset, cpu);
7689 drain_zonestat(zone, pset);
7690 }
340175b7
JL
7691 free_percpu(zone->pageset);
7692 zone->pageset = &boot_pageset;
7693 }
7694 local_irq_restore(flags);
7695}
7696
6dcd73d7 7697#ifdef CONFIG_MEMORY_HOTREMOVE
0c0e6195 7698/*
b9eb6319
JK
7699 * All pages in the range must be in a single zone and isolated
7700 * before calling this.
0c0e6195
KH
7701 */
7702void
7703__offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)
7704{
7705 struct page *page;
7706 struct zone *zone;
7aeb09f9 7707 unsigned int order, i;
0c0e6195
KH
7708 unsigned long pfn;
7709 unsigned long flags;
7710 /* find the first valid pfn */
7711 for (pfn = start_pfn; pfn < end_pfn; pfn++)
7712 if (pfn_valid(pfn))
7713 break;
7714 if (pfn == end_pfn)
7715 return;
2d070eab 7716 offline_mem_sections(pfn, end_pfn);
0c0e6195
KH
7717 zone = page_zone(pfn_to_page(pfn));
7718 spin_lock_irqsave(&zone->lock, flags);
7719 pfn = start_pfn;
7720 while (pfn < end_pfn) {
7721 if (!pfn_valid(pfn)) {
7722 pfn++;
7723 continue;
7724 }
7725 page = pfn_to_page(pfn);
b023f468
WC
7726 /*
7727 * The HWPoisoned page may be not in buddy system, and
7728 * page_count() is not 0.
7729 */
7730 if (unlikely(!PageBuddy(page) && PageHWPoison(page))) {
7731 pfn++;
7732 SetPageReserved(page);
7733 continue;
7734 }
7735
0c0e6195
KH
7736 BUG_ON(page_count(page));
7737 BUG_ON(!PageBuddy(page));
7738 order = page_order(page);
7739#ifdef CONFIG_DEBUG_VM
1170532b
JP
7740 pr_info("remove from free list %lx %d %lx\n",
7741 pfn, 1 << order, end_pfn);
0c0e6195
KH
7742#endif
7743 list_del(&page->lru);
7744 rmv_page_order(page);
7745 zone->free_area[order].nr_free--;
0c0e6195
KH
7746 for (i = 0; i < (1 << order); i++)
7747 SetPageReserved((page+i));
7748 pfn += (1 << order);
7749 }
7750 spin_unlock_irqrestore(&zone->lock, flags);
7751}
7752#endif
8d22ba1b 7753
8d22ba1b
WF
7754bool is_free_buddy_page(struct page *page)
7755{
7756 struct zone *zone = page_zone(page);
7757 unsigned long pfn = page_to_pfn(page);
7758 unsigned long flags;
7aeb09f9 7759 unsigned int order;
8d22ba1b
WF
7760
7761 spin_lock_irqsave(&zone->lock, flags);
7762 for (order = 0; order < MAX_ORDER; order++) {
7763 struct page *page_head = page - (pfn & ((1 << order) - 1));
7764
7765 if (PageBuddy(page_head) && page_order(page_head) >= order)
7766 break;
7767 }
7768 spin_unlock_irqrestore(&zone->lock, flags);
7769
7770 return order < MAX_ORDER;
7771}