]> git.proxmox.com Git - mirror_ubuntu-focal-kernel.git/blame - mm/page_alloc.c
mm, swap: avoid lock swap_avail_lock when held cluster lock
[mirror_ubuntu-focal-kernel.git] / mm / page_alloc.c
CommitLineData
1da177e4
LT
1/*
2 * linux/mm/page_alloc.c
3 *
4 * Manages the free list, the system allocates free pages here.
5 * Note that kmalloc() lives in slab.c
6 *
7 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
8 * Swap reorganised 29.12.95, Stephen Tweedie
9 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
10 * Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999
11 * Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999
12 * Zone balancing, Kanoj Sarcar, SGI, Jan 2000
13 * Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002
14 * (lots of bits borrowed from Ingo Molnar & Andrew Morton)
15 */
16
1da177e4
LT
17#include <linux/stddef.h>
18#include <linux/mm.h>
19#include <linux/swap.h>
20#include <linux/interrupt.h>
21#include <linux/pagemap.h>
10ed273f 22#include <linux/jiffies.h>
1da177e4 23#include <linux/bootmem.h>
edbe7d23 24#include <linux/memblock.h>
1da177e4 25#include <linux/compiler.h>
9f158333 26#include <linux/kernel.h>
b1eeab67 27#include <linux/kmemcheck.h>
b8c73fc2 28#include <linux/kasan.h>
1da177e4
LT
29#include <linux/module.h>
30#include <linux/suspend.h>
31#include <linux/pagevec.h>
32#include <linux/blkdev.h>
33#include <linux/slab.h>
a238ab5b 34#include <linux/ratelimit.h>
5a3135c2 35#include <linux/oom.h>
1da177e4
LT
36#include <linux/notifier.h>
37#include <linux/topology.h>
38#include <linux/sysctl.h>
39#include <linux/cpu.h>
40#include <linux/cpuset.h>
bdc8cb98 41#include <linux/memory_hotplug.h>
1da177e4
LT
42#include <linux/nodemask.h>
43#include <linux/vmalloc.h>
a6cccdc3 44#include <linux/vmstat.h>
4be38e35 45#include <linux/mempolicy.h>
4b94ffdc 46#include <linux/memremap.h>
6811378e 47#include <linux/stop_machine.h>
c713216d
MG
48#include <linux/sort.h>
49#include <linux/pfn.h>
3fcfab16 50#include <linux/backing-dev.h>
933e312e 51#include <linux/fault-inject.h>
a5d76b54 52#include <linux/page-isolation.h>
eefa864b 53#include <linux/page_ext.h>
3ac7fe5a 54#include <linux/debugobjects.h>
dbb1f81c 55#include <linux/kmemleak.h>
56de7263 56#include <linux/compaction.h>
0d3d062a 57#include <trace/events/kmem.h>
d379f01d 58#include <trace/events/oom.h>
268bb0ce 59#include <linux/prefetch.h>
6e543d57 60#include <linux/mm_inline.h>
041d3a8c 61#include <linux/migrate.h>
949f7ec5 62#include <linux/hugetlb.h>
8bd75c77 63#include <linux/sched/rt.h>
5b3cc15a 64#include <linux/sched/mm.h>
48c96a36 65#include <linux/page_owner.h>
0e1cc95b 66#include <linux/kthread.h>
4949148a 67#include <linux/memcontrol.h>
1da177e4 68
7ee3d4e8 69#include <asm/sections.h>
1da177e4 70#include <asm/tlbflush.h>
ac924c60 71#include <asm/div64.h>
1da177e4
LT
72#include "internal.h"
73
c8e251fa
CS
74/* prevent >1 _updater_ of zone percpu pageset ->high and ->batch fields */
75static DEFINE_MUTEX(pcp_batch_high_lock);
7cd2b0a3 76#define MIN_PERCPU_PAGELIST_FRACTION (8)
c8e251fa 77
72812019
LS
78#ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID
79DEFINE_PER_CPU(int, numa_node);
80EXPORT_PER_CPU_SYMBOL(numa_node);
81#endif
82
7aac7898
LS
83#ifdef CONFIG_HAVE_MEMORYLESS_NODES
84/*
85 * N.B., Do NOT reference the '_numa_mem_' per cpu variable directly.
86 * It will not be defined when CONFIG_HAVE_MEMORYLESS_NODES is not defined.
87 * Use the accessor functions set_numa_mem(), numa_mem_id() and cpu_to_mem()
88 * defined in <linux/topology.h>.
89 */
90DEFINE_PER_CPU(int, _numa_mem_); /* Kernel "local memory" node */
91EXPORT_PER_CPU_SYMBOL(_numa_mem_);
ad2c8144 92int _node_numa_mem_[MAX_NUMNODES];
7aac7898
LS
93#endif
94
bd233f53
MG
95/* work_structs for global per-cpu drains */
96DEFINE_MUTEX(pcpu_drain_mutex);
97DEFINE_PER_CPU(struct work_struct, pcpu_drain);
98
38addce8 99#ifdef CONFIG_GCC_PLUGIN_LATENT_ENTROPY
58bea414 100volatile unsigned long latent_entropy __latent_entropy;
38addce8
ER
101EXPORT_SYMBOL(latent_entropy);
102#endif
103
1da177e4 104/*
13808910 105 * Array of node states.
1da177e4 106 */
13808910
CL
107nodemask_t node_states[NR_NODE_STATES] __read_mostly = {
108 [N_POSSIBLE] = NODE_MASK_ALL,
109 [N_ONLINE] = { { [0] = 1UL } },
110#ifndef CONFIG_NUMA
111 [N_NORMAL_MEMORY] = { { [0] = 1UL } },
112#ifdef CONFIG_HIGHMEM
113 [N_HIGH_MEMORY] = { { [0] = 1UL } },
20b2f52b
LJ
114#endif
115#ifdef CONFIG_MOVABLE_NODE
116 [N_MEMORY] = { { [0] = 1UL } },
13808910
CL
117#endif
118 [N_CPU] = { { [0] = 1UL } },
119#endif /* NUMA */
120};
121EXPORT_SYMBOL(node_states);
122
c3d5f5f0
JL
123/* Protect totalram_pages and zone->managed_pages */
124static DEFINE_SPINLOCK(managed_page_count_lock);
125
6c231b7b 126unsigned long totalram_pages __read_mostly;
cb45b0e9 127unsigned long totalreserve_pages __read_mostly;
e48322ab 128unsigned long totalcma_pages __read_mostly;
ab8fabd4 129
1b76b02f 130int percpu_pagelist_fraction;
dcce284a 131gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK;
1da177e4 132
bb14c2c7
VB
133/*
134 * A cached value of the page's pageblock's migratetype, used when the page is
135 * put on a pcplist. Used to avoid the pageblock migratetype lookup when
136 * freeing from pcplists in most cases, at the cost of possibly becoming stale.
137 * Also the migratetype set in the page does not necessarily match the pcplist
138 * index, e.g. page might have MIGRATE_CMA set but be on a pcplist with any
139 * other index - this ensures that it will be put on the correct CMA freelist.
140 */
141static inline int get_pcppage_migratetype(struct page *page)
142{
143 return page->index;
144}
145
146static inline void set_pcppage_migratetype(struct page *page, int migratetype)
147{
148 page->index = migratetype;
149}
150
452aa699
RW
151#ifdef CONFIG_PM_SLEEP
152/*
153 * The following functions are used by the suspend/hibernate code to temporarily
154 * change gfp_allowed_mask in order to avoid using I/O during memory allocations
155 * while devices are suspended. To avoid races with the suspend/hibernate code,
156 * they should always be called with pm_mutex held (gfp_allowed_mask also should
157 * only be modified with pm_mutex held, unless the suspend/hibernate code is
158 * guaranteed not to run in parallel with that modification).
159 */
c9e664f1
RW
160
161static gfp_t saved_gfp_mask;
162
163void pm_restore_gfp_mask(void)
452aa699
RW
164{
165 WARN_ON(!mutex_is_locked(&pm_mutex));
c9e664f1
RW
166 if (saved_gfp_mask) {
167 gfp_allowed_mask = saved_gfp_mask;
168 saved_gfp_mask = 0;
169 }
452aa699
RW
170}
171
c9e664f1 172void pm_restrict_gfp_mask(void)
452aa699 173{
452aa699 174 WARN_ON(!mutex_is_locked(&pm_mutex));
c9e664f1
RW
175 WARN_ON(saved_gfp_mask);
176 saved_gfp_mask = gfp_allowed_mask;
d0164adc 177 gfp_allowed_mask &= ~(__GFP_IO | __GFP_FS);
452aa699 178}
f90ac398
MG
179
180bool pm_suspended_storage(void)
181{
d0164adc 182 if ((gfp_allowed_mask & (__GFP_IO | __GFP_FS)) == (__GFP_IO | __GFP_FS))
f90ac398
MG
183 return false;
184 return true;
185}
452aa699
RW
186#endif /* CONFIG_PM_SLEEP */
187
d9c23400 188#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
d00181b9 189unsigned int pageblock_order __read_mostly;
d9c23400
MG
190#endif
191
d98c7a09 192static void __free_pages_ok(struct page *page, unsigned int order);
a226f6c8 193
1da177e4
LT
194/*
195 * results with 256, 32 in the lowmem_reserve sysctl:
196 * 1G machine -> (16M dma, 800M-16M normal, 1G-800M high)
197 * 1G machine -> (16M dma, 784M normal, 224M high)
198 * NORMAL allocation will leave 784M/256 of ram reserved in the ZONE_DMA
199 * HIGHMEM allocation will leave 224M/32 of ram reserved in ZONE_NORMAL
84109e15 200 * HIGHMEM allocation will leave (224M+784M)/256 of ram reserved in ZONE_DMA
a2f1b424
AK
201 *
202 * TBD: should special case ZONE_DMA32 machines here - in those we normally
203 * don't need any ZONE_NORMAL reservation
1da177e4 204 */
2f1b6248 205int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1] = {
4b51d669 206#ifdef CONFIG_ZONE_DMA
2f1b6248 207 256,
4b51d669 208#endif
fb0e7942 209#ifdef CONFIG_ZONE_DMA32
2f1b6248 210 256,
fb0e7942 211#endif
e53ef38d 212#ifdef CONFIG_HIGHMEM
2a1e274a 213 32,
e53ef38d 214#endif
2a1e274a 215 32,
2f1b6248 216};
1da177e4
LT
217
218EXPORT_SYMBOL(totalram_pages);
1da177e4 219
15ad7cdc 220static char * const zone_names[MAX_NR_ZONES] = {
4b51d669 221#ifdef CONFIG_ZONE_DMA
2f1b6248 222 "DMA",
4b51d669 223#endif
fb0e7942 224#ifdef CONFIG_ZONE_DMA32
2f1b6248 225 "DMA32",
fb0e7942 226#endif
2f1b6248 227 "Normal",
e53ef38d 228#ifdef CONFIG_HIGHMEM
2a1e274a 229 "HighMem",
e53ef38d 230#endif
2a1e274a 231 "Movable",
033fbae9
DW
232#ifdef CONFIG_ZONE_DEVICE
233 "Device",
234#endif
2f1b6248
CL
235};
236
60f30350
VB
237char * const migratetype_names[MIGRATE_TYPES] = {
238 "Unmovable",
239 "Movable",
240 "Reclaimable",
241 "HighAtomic",
242#ifdef CONFIG_CMA
243 "CMA",
244#endif
245#ifdef CONFIG_MEMORY_ISOLATION
246 "Isolate",
247#endif
248};
249
f1e61557
KS
250compound_page_dtor * const compound_page_dtors[] = {
251 NULL,
252 free_compound_page,
253#ifdef CONFIG_HUGETLB_PAGE
254 free_huge_page,
255#endif
9a982250
KS
256#ifdef CONFIG_TRANSPARENT_HUGEPAGE
257 free_transhuge_page,
258#endif
f1e61557
KS
259};
260
1da177e4 261int min_free_kbytes = 1024;
42aa83cb 262int user_min_free_kbytes = -1;
795ae7a0 263int watermark_scale_factor = 10;
1da177e4 264
2c85f51d
JB
265static unsigned long __meminitdata nr_kernel_pages;
266static unsigned long __meminitdata nr_all_pages;
a3142c8e 267static unsigned long __meminitdata dma_reserve;
1da177e4 268
0ee332c1
TH
269#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
270static unsigned long __meminitdata arch_zone_lowest_possible_pfn[MAX_NR_ZONES];
271static unsigned long __meminitdata arch_zone_highest_possible_pfn[MAX_NR_ZONES];
272static unsigned long __initdata required_kernelcore;
273static unsigned long __initdata required_movablecore;
274static unsigned long __meminitdata zone_movable_pfn[MAX_NUMNODES];
342332e6 275static bool mirrored_kernelcore;
0ee332c1
TH
276
277/* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */
278int movable_zone;
279EXPORT_SYMBOL(movable_zone);
280#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
c713216d 281
418508c1
MS
282#if MAX_NUMNODES > 1
283int nr_node_ids __read_mostly = MAX_NUMNODES;
62bc62a8 284int nr_online_nodes __read_mostly = 1;
418508c1 285EXPORT_SYMBOL(nr_node_ids);
62bc62a8 286EXPORT_SYMBOL(nr_online_nodes);
418508c1
MS
287#endif
288
9ef9acb0
MG
289int page_group_by_mobility_disabled __read_mostly;
290
3a80a7fa
MG
291#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
292static inline void reset_deferred_meminit(pg_data_t *pgdat)
293{
294 pgdat->first_deferred_pfn = ULONG_MAX;
295}
296
297/* Returns true if the struct page for the pfn is uninitialised */
0e1cc95b 298static inline bool __meminit early_page_uninitialised(unsigned long pfn)
3a80a7fa 299{
ef70b6f4
MG
300 int nid = early_pfn_to_nid(pfn);
301
302 if (node_online(nid) && pfn >= NODE_DATA(nid)->first_deferred_pfn)
3a80a7fa
MG
303 return true;
304
305 return false;
306}
307
308/*
309 * Returns false when the remaining initialisation should be deferred until
310 * later in the boot cycle when it can be parallelised.
311 */
312static inline bool update_defer_init(pg_data_t *pgdat,
313 unsigned long pfn, unsigned long zone_end,
314 unsigned long *nr_initialised)
315{
987b3095
LZ
316 unsigned long max_initialise;
317
3a80a7fa
MG
318 /* Always populate low zones for address-contrained allocations */
319 if (zone_end < pgdat_end_pfn(pgdat))
320 return true;
987b3095
LZ
321 /*
322 * Initialise at least 2G of a node but also take into account that
323 * two large system hashes that can take up 1GB for 0.25TB/node.
324 */
325 max_initialise = max(2UL << (30 - PAGE_SHIFT),
326 (pgdat->node_spanned_pages >> 8));
3a80a7fa 327
3a80a7fa 328 (*nr_initialised)++;
987b3095 329 if ((*nr_initialised > max_initialise) &&
3a80a7fa
MG
330 (pfn & (PAGES_PER_SECTION - 1)) == 0) {
331 pgdat->first_deferred_pfn = pfn;
332 return false;
333 }
334
335 return true;
336}
337#else
338static inline void reset_deferred_meminit(pg_data_t *pgdat)
339{
340}
341
342static inline bool early_page_uninitialised(unsigned long pfn)
343{
344 return false;
345}
346
347static inline bool update_defer_init(pg_data_t *pgdat,
348 unsigned long pfn, unsigned long zone_end,
349 unsigned long *nr_initialised)
350{
351 return true;
352}
353#endif
354
0b423ca2
MG
355/* Return a pointer to the bitmap storing bits affecting a block of pages */
356static inline unsigned long *get_pageblock_bitmap(struct page *page,
357 unsigned long pfn)
358{
359#ifdef CONFIG_SPARSEMEM
360 return __pfn_to_section(pfn)->pageblock_flags;
361#else
362 return page_zone(page)->pageblock_flags;
363#endif /* CONFIG_SPARSEMEM */
364}
365
366static inline int pfn_to_bitidx(struct page *page, unsigned long pfn)
367{
368#ifdef CONFIG_SPARSEMEM
369 pfn &= (PAGES_PER_SECTION-1);
370 return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
371#else
372 pfn = pfn - round_down(page_zone(page)->zone_start_pfn, pageblock_nr_pages);
373 return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
374#endif /* CONFIG_SPARSEMEM */
375}
376
377/**
378 * get_pfnblock_flags_mask - Return the requested group of flags for the pageblock_nr_pages block of pages
379 * @page: The page within the block of interest
380 * @pfn: The target page frame number
381 * @end_bitidx: The last bit of interest to retrieve
382 * @mask: mask of bits that the caller is interested in
383 *
384 * Return: pageblock_bits flags
385 */
386static __always_inline unsigned long __get_pfnblock_flags_mask(struct page *page,
387 unsigned long pfn,
388 unsigned long end_bitidx,
389 unsigned long mask)
390{
391 unsigned long *bitmap;
392 unsigned long bitidx, word_bitidx;
393 unsigned long word;
394
395 bitmap = get_pageblock_bitmap(page, pfn);
396 bitidx = pfn_to_bitidx(page, pfn);
397 word_bitidx = bitidx / BITS_PER_LONG;
398 bitidx &= (BITS_PER_LONG-1);
399
400 word = bitmap[word_bitidx];
401 bitidx += end_bitidx;
402 return (word >> (BITS_PER_LONG - bitidx - 1)) & mask;
403}
404
405unsigned long get_pfnblock_flags_mask(struct page *page, unsigned long pfn,
406 unsigned long end_bitidx,
407 unsigned long mask)
408{
409 return __get_pfnblock_flags_mask(page, pfn, end_bitidx, mask);
410}
411
412static __always_inline int get_pfnblock_migratetype(struct page *page, unsigned long pfn)
413{
414 return __get_pfnblock_flags_mask(page, pfn, PB_migrate_end, MIGRATETYPE_MASK);
415}
416
417/**
418 * set_pfnblock_flags_mask - Set the requested group of flags for a pageblock_nr_pages block of pages
419 * @page: The page within the block of interest
420 * @flags: The flags to set
421 * @pfn: The target page frame number
422 * @end_bitidx: The last bit of interest
423 * @mask: mask of bits that the caller is interested in
424 */
425void set_pfnblock_flags_mask(struct page *page, unsigned long flags,
426 unsigned long pfn,
427 unsigned long end_bitidx,
428 unsigned long mask)
429{
430 unsigned long *bitmap;
431 unsigned long bitidx, word_bitidx;
432 unsigned long old_word, word;
433
434 BUILD_BUG_ON(NR_PAGEBLOCK_BITS != 4);
435
436 bitmap = get_pageblock_bitmap(page, pfn);
437 bitidx = pfn_to_bitidx(page, pfn);
438 word_bitidx = bitidx / BITS_PER_LONG;
439 bitidx &= (BITS_PER_LONG-1);
440
441 VM_BUG_ON_PAGE(!zone_spans_pfn(page_zone(page), pfn), page);
442
443 bitidx += end_bitidx;
444 mask <<= (BITS_PER_LONG - bitidx - 1);
445 flags <<= (BITS_PER_LONG - bitidx - 1);
446
447 word = READ_ONCE(bitmap[word_bitidx]);
448 for (;;) {
449 old_word = cmpxchg(&bitmap[word_bitidx], word, (word & ~mask) | flags);
450 if (word == old_word)
451 break;
452 word = old_word;
453 }
454}
3a80a7fa 455
ee6f509c 456void set_pageblock_migratetype(struct page *page, int migratetype)
b2a0ac88 457{
5d0f3f72
KM
458 if (unlikely(page_group_by_mobility_disabled &&
459 migratetype < MIGRATE_PCPTYPES))
49255c61
MG
460 migratetype = MIGRATE_UNMOVABLE;
461
b2a0ac88
MG
462 set_pageblock_flags_group(page, (unsigned long)migratetype,
463 PB_migrate, PB_migrate_end);
464}
465
13e7444b 466#ifdef CONFIG_DEBUG_VM
c6a57e19 467static int page_outside_zone_boundaries(struct zone *zone, struct page *page)
1da177e4 468{
bdc8cb98
DH
469 int ret = 0;
470 unsigned seq;
471 unsigned long pfn = page_to_pfn(page);
b5e6a5a2 472 unsigned long sp, start_pfn;
c6a57e19 473
bdc8cb98
DH
474 do {
475 seq = zone_span_seqbegin(zone);
b5e6a5a2
CS
476 start_pfn = zone->zone_start_pfn;
477 sp = zone->spanned_pages;
108bcc96 478 if (!zone_spans_pfn(zone, pfn))
bdc8cb98
DH
479 ret = 1;
480 } while (zone_span_seqretry(zone, seq));
481
b5e6a5a2 482 if (ret)
613813e8
DH
483 pr_err("page 0x%lx outside node %d zone %s [ 0x%lx - 0x%lx ]\n",
484 pfn, zone_to_nid(zone), zone->name,
485 start_pfn, start_pfn + sp);
b5e6a5a2 486
bdc8cb98 487 return ret;
c6a57e19
DH
488}
489
490static int page_is_consistent(struct zone *zone, struct page *page)
491{
14e07298 492 if (!pfn_valid_within(page_to_pfn(page)))
c6a57e19 493 return 0;
1da177e4 494 if (zone != page_zone(page))
c6a57e19
DH
495 return 0;
496
497 return 1;
498}
499/*
500 * Temporary debugging check for pages not lying within a given zone.
501 */
502static int bad_range(struct zone *zone, struct page *page)
503{
504 if (page_outside_zone_boundaries(zone, page))
1da177e4 505 return 1;
c6a57e19
DH
506 if (!page_is_consistent(zone, page))
507 return 1;
508
1da177e4
LT
509 return 0;
510}
13e7444b
NP
511#else
512static inline int bad_range(struct zone *zone, struct page *page)
513{
514 return 0;
515}
516#endif
517
d230dec1
KS
518static void bad_page(struct page *page, const char *reason,
519 unsigned long bad_flags)
1da177e4 520{
d936cf9b
HD
521 static unsigned long resume;
522 static unsigned long nr_shown;
523 static unsigned long nr_unshown;
524
525 /*
526 * Allow a burst of 60 reports, then keep quiet for that minute;
527 * or allow a steady drip of one report per second.
528 */
529 if (nr_shown == 60) {
530 if (time_before(jiffies, resume)) {
531 nr_unshown++;
532 goto out;
533 }
534 if (nr_unshown) {
ff8e8116 535 pr_alert(
1e9e6365 536 "BUG: Bad page state: %lu messages suppressed\n",
d936cf9b
HD
537 nr_unshown);
538 nr_unshown = 0;
539 }
540 nr_shown = 0;
541 }
542 if (nr_shown++ == 0)
543 resume = jiffies + 60 * HZ;
544
ff8e8116 545 pr_alert("BUG: Bad page state in process %s pfn:%05lx\n",
3dc14741 546 current->comm, page_to_pfn(page));
ff8e8116
VB
547 __dump_page(page, reason);
548 bad_flags &= page->flags;
549 if (bad_flags)
550 pr_alert("bad because of flags: %#lx(%pGp)\n",
551 bad_flags, &bad_flags);
4e462112 552 dump_page_owner(page);
3dc14741 553
4f31888c 554 print_modules();
1da177e4 555 dump_stack();
d936cf9b 556out:
8cc3b392 557 /* Leave bad fields for debug, except PageBuddy could make trouble */
22b751c3 558 page_mapcount_reset(page); /* remove PageBuddy */
373d4d09 559 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
1da177e4
LT
560}
561
1da177e4
LT
562/*
563 * Higher-order pages are called "compound pages". They are structured thusly:
564 *
1d798ca3 565 * The first PAGE_SIZE page is called the "head page" and have PG_head set.
1da177e4 566 *
1d798ca3
KS
567 * The remaining PAGE_SIZE pages are called "tail pages". PageTail() is encoded
568 * in bit 0 of page->compound_head. The rest of bits is pointer to head page.
1da177e4 569 *
1d798ca3
KS
570 * The first tail page's ->compound_dtor holds the offset in array of compound
571 * page destructors. See compound_page_dtors.
1da177e4 572 *
1d798ca3 573 * The first tail page's ->compound_order holds the order of allocation.
41d78ba5 574 * This usage means that zero-order pages may not be compound.
1da177e4 575 */
d98c7a09 576
9a982250 577void free_compound_page(struct page *page)
d98c7a09 578{
d85f3385 579 __free_pages_ok(page, compound_order(page));
d98c7a09
HD
580}
581
d00181b9 582void prep_compound_page(struct page *page, unsigned int order)
18229df5
AW
583{
584 int i;
585 int nr_pages = 1 << order;
586
f1e61557 587 set_compound_page_dtor(page, COMPOUND_PAGE_DTOR);
18229df5
AW
588 set_compound_order(page, order);
589 __SetPageHead(page);
590 for (i = 1; i < nr_pages; i++) {
591 struct page *p = page + i;
58a84aa9 592 set_page_count(p, 0);
1c290f64 593 p->mapping = TAIL_MAPPING;
1d798ca3 594 set_compound_head(p, page);
18229df5 595 }
53f9263b 596 atomic_set(compound_mapcount_ptr(page), -1);
18229df5
AW
597}
598
c0a32fc5
SG
599#ifdef CONFIG_DEBUG_PAGEALLOC
600unsigned int _debug_guardpage_minorder;
ea6eabb0
CB
601bool _debug_pagealloc_enabled __read_mostly
602 = IS_ENABLED(CONFIG_DEBUG_PAGEALLOC_ENABLE_DEFAULT);
505f6d22 603EXPORT_SYMBOL(_debug_pagealloc_enabled);
e30825f1
JK
604bool _debug_guardpage_enabled __read_mostly;
605
031bc574
JK
606static int __init early_debug_pagealloc(char *buf)
607{
608 if (!buf)
609 return -EINVAL;
2a138dc7 610 return kstrtobool(buf, &_debug_pagealloc_enabled);
031bc574
JK
611}
612early_param("debug_pagealloc", early_debug_pagealloc);
613
e30825f1
JK
614static bool need_debug_guardpage(void)
615{
031bc574
JK
616 /* If we don't use debug_pagealloc, we don't need guard page */
617 if (!debug_pagealloc_enabled())
618 return false;
619
f1c1e9f7
JK
620 if (!debug_guardpage_minorder())
621 return false;
622
e30825f1
JK
623 return true;
624}
625
626static void init_debug_guardpage(void)
627{
031bc574
JK
628 if (!debug_pagealloc_enabled())
629 return;
630
f1c1e9f7
JK
631 if (!debug_guardpage_minorder())
632 return;
633
e30825f1
JK
634 _debug_guardpage_enabled = true;
635}
636
637struct page_ext_operations debug_guardpage_ops = {
638 .need = need_debug_guardpage,
639 .init = init_debug_guardpage,
640};
c0a32fc5
SG
641
642static int __init debug_guardpage_minorder_setup(char *buf)
643{
644 unsigned long res;
645
646 if (kstrtoul(buf, 10, &res) < 0 || res > MAX_ORDER / 2) {
1170532b 647 pr_err("Bad debug_guardpage_minorder value\n");
c0a32fc5
SG
648 return 0;
649 }
650 _debug_guardpage_minorder = res;
1170532b 651 pr_info("Setting debug_guardpage_minorder to %lu\n", res);
c0a32fc5
SG
652 return 0;
653}
f1c1e9f7 654early_param("debug_guardpage_minorder", debug_guardpage_minorder_setup);
c0a32fc5 655
acbc15a4 656static inline bool set_page_guard(struct zone *zone, struct page *page,
2847cf95 657 unsigned int order, int migratetype)
c0a32fc5 658{
e30825f1
JK
659 struct page_ext *page_ext;
660
661 if (!debug_guardpage_enabled())
acbc15a4
JK
662 return false;
663
664 if (order >= debug_guardpage_minorder())
665 return false;
e30825f1
JK
666
667 page_ext = lookup_page_ext(page);
f86e4271 668 if (unlikely(!page_ext))
acbc15a4 669 return false;
f86e4271 670
e30825f1
JK
671 __set_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags);
672
2847cf95
JK
673 INIT_LIST_HEAD(&page->lru);
674 set_page_private(page, order);
675 /* Guard pages are not available for any usage */
676 __mod_zone_freepage_state(zone, -(1 << order), migratetype);
acbc15a4
JK
677
678 return true;
c0a32fc5
SG
679}
680
2847cf95
JK
681static inline void clear_page_guard(struct zone *zone, struct page *page,
682 unsigned int order, int migratetype)
c0a32fc5 683{
e30825f1
JK
684 struct page_ext *page_ext;
685
686 if (!debug_guardpage_enabled())
687 return;
688
689 page_ext = lookup_page_ext(page);
f86e4271
YS
690 if (unlikely(!page_ext))
691 return;
692
e30825f1
JK
693 __clear_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags);
694
2847cf95
JK
695 set_page_private(page, 0);
696 if (!is_migrate_isolate(migratetype))
697 __mod_zone_freepage_state(zone, (1 << order), migratetype);
c0a32fc5
SG
698}
699#else
980ac167 700struct page_ext_operations debug_guardpage_ops;
acbc15a4
JK
701static inline bool set_page_guard(struct zone *zone, struct page *page,
702 unsigned int order, int migratetype) { return false; }
2847cf95
JK
703static inline void clear_page_guard(struct zone *zone, struct page *page,
704 unsigned int order, int migratetype) {}
c0a32fc5
SG
705#endif
706
7aeb09f9 707static inline void set_page_order(struct page *page, unsigned int order)
6aa3001b 708{
4c21e2f2 709 set_page_private(page, order);
676165a8 710 __SetPageBuddy(page);
1da177e4
LT
711}
712
713static inline void rmv_page_order(struct page *page)
714{
676165a8 715 __ClearPageBuddy(page);
4c21e2f2 716 set_page_private(page, 0);
1da177e4
LT
717}
718
1da177e4
LT
719/*
720 * This function checks whether a page is free && is the buddy
721 * we can do coalesce a page and its buddy if
13ad59df 722 * (a) the buddy is not in a hole (check before calling!) &&
676165a8 723 * (b) the buddy is in the buddy system &&
cb2b95e1
AW
724 * (c) a page and its buddy have the same order &&
725 * (d) a page and its buddy are in the same zone.
676165a8 726 *
cf6fe945
WSH
727 * For recording whether a page is in the buddy system, we set ->_mapcount
728 * PAGE_BUDDY_MAPCOUNT_VALUE.
729 * Setting, clearing, and testing _mapcount PAGE_BUDDY_MAPCOUNT_VALUE is
730 * serialized by zone->lock.
1da177e4 731 *
676165a8 732 * For recording page's order, we use page_private(page).
1da177e4 733 */
cb2b95e1 734static inline int page_is_buddy(struct page *page, struct page *buddy,
7aeb09f9 735 unsigned int order)
1da177e4 736{
c0a32fc5 737 if (page_is_guard(buddy) && page_order(buddy) == order) {
d34c5fa0
MG
738 if (page_zone_id(page) != page_zone_id(buddy))
739 return 0;
740
4c5018ce
WY
741 VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy);
742
c0a32fc5
SG
743 return 1;
744 }
745
cb2b95e1 746 if (PageBuddy(buddy) && page_order(buddy) == order) {
d34c5fa0
MG
747 /*
748 * zone check is done late to avoid uselessly
749 * calculating zone/node ids for pages that could
750 * never merge.
751 */
752 if (page_zone_id(page) != page_zone_id(buddy))
753 return 0;
754
4c5018ce
WY
755 VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy);
756
6aa3001b 757 return 1;
676165a8 758 }
6aa3001b 759 return 0;
1da177e4
LT
760}
761
762/*
763 * Freeing function for a buddy system allocator.
764 *
765 * The concept of a buddy system is to maintain direct-mapped table
766 * (containing bit values) for memory blocks of various "orders".
767 * The bottom level table contains the map for the smallest allocatable
768 * units of memory (here, pages), and each level above it describes
769 * pairs of units from the levels below, hence, "buddies".
770 * At a high level, all that happens here is marking the table entry
771 * at the bottom level available, and propagating the changes upward
772 * as necessary, plus some accounting needed to play nicely with other
773 * parts of the VM system.
774 * At each level, we keep a list of pages, which are heads of continuous
cf6fe945
WSH
775 * free pages of length of (1 << order) and marked with _mapcount
776 * PAGE_BUDDY_MAPCOUNT_VALUE. Page's order is recorded in page_private(page)
777 * field.
1da177e4 778 * So when we are allocating or freeing one, we can derive the state of the
5f63b720
MN
779 * other. That is, if we allocate a small block, and both were
780 * free, the remainder of the region must be split into blocks.
1da177e4 781 * If a block is freed, and its buddy is also free, then this
5f63b720 782 * triggers coalescing into a block of larger size.
1da177e4 783 *
6d49e352 784 * -- nyc
1da177e4
LT
785 */
786
48db57f8 787static inline void __free_one_page(struct page *page,
dc4b0caf 788 unsigned long pfn,
ed0ae21d
MG
789 struct zone *zone, unsigned int order,
790 int migratetype)
1da177e4 791{
76741e77
VB
792 unsigned long combined_pfn;
793 unsigned long uninitialized_var(buddy_pfn);
6dda9d55 794 struct page *buddy;
d9dddbf5
VB
795 unsigned int max_order;
796
797 max_order = min_t(unsigned int, MAX_ORDER, pageblock_order + 1);
1da177e4 798
d29bb978 799 VM_BUG_ON(!zone_is_initialized(zone));
6e9f0d58 800 VM_BUG_ON_PAGE(page->flags & PAGE_FLAGS_CHECK_AT_PREP, page);
1da177e4 801
ed0ae21d 802 VM_BUG_ON(migratetype == -1);
d9dddbf5 803 if (likely(!is_migrate_isolate(migratetype)))
8f82b55d 804 __mod_zone_freepage_state(zone, 1 << order, migratetype);
ed0ae21d 805
76741e77 806 VM_BUG_ON_PAGE(pfn & ((1 << order) - 1), page);
309381fe 807 VM_BUG_ON_PAGE(bad_range(zone, page), page);
1da177e4 808
d9dddbf5 809continue_merging:
3c605096 810 while (order < max_order - 1) {
76741e77
VB
811 buddy_pfn = __find_buddy_pfn(pfn, order);
812 buddy = page + (buddy_pfn - pfn);
13ad59df
VB
813
814 if (!pfn_valid_within(buddy_pfn))
815 goto done_merging;
cb2b95e1 816 if (!page_is_buddy(page, buddy, order))
d9dddbf5 817 goto done_merging;
c0a32fc5
SG
818 /*
819 * Our buddy is free or it is CONFIG_DEBUG_PAGEALLOC guard page,
820 * merge with it and move up one order.
821 */
822 if (page_is_guard(buddy)) {
2847cf95 823 clear_page_guard(zone, buddy, order, migratetype);
c0a32fc5
SG
824 } else {
825 list_del(&buddy->lru);
826 zone->free_area[order].nr_free--;
827 rmv_page_order(buddy);
828 }
76741e77
VB
829 combined_pfn = buddy_pfn & pfn;
830 page = page + (combined_pfn - pfn);
831 pfn = combined_pfn;
1da177e4
LT
832 order++;
833 }
d9dddbf5
VB
834 if (max_order < MAX_ORDER) {
835 /* If we are here, it means order is >= pageblock_order.
836 * We want to prevent merge between freepages on isolate
837 * pageblock and normal pageblock. Without this, pageblock
838 * isolation could cause incorrect freepage or CMA accounting.
839 *
840 * We don't want to hit this code for the more frequent
841 * low-order merging.
842 */
843 if (unlikely(has_isolate_pageblock(zone))) {
844 int buddy_mt;
845
76741e77
VB
846 buddy_pfn = __find_buddy_pfn(pfn, order);
847 buddy = page + (buddy_pfn - pfn);
d9dddbf5
VB
848 buddy_mt = get_pageblock_migratetype(buddy);
849
850 if (migratetype != buddy_mt
851 && (is_migrate_isolate(migratetype) ||
852 is_migrate_isolate(buddy_mt)))
853 goto done_merging;
854 }
855 max_order++;
856 goto continue_merging;
857 }
858
859done_merging:
1da177e4 860 set_page_order(page, order);
6dda9d55
CZ
861
862 /*
863 * If this is not the largest possible page, check if the buddy
864 * of the next-highest order is free. If it is, it's possible
865 * that pages are being freed that will coalesce soon. In case,
866 * that is happening, add the free page to the tail of the list
867 * so it's less likely to be used soon and more likely to be merged
868 * as a higher order page
869 */
13ad59df 870 if ((order < MAX_ORDER-2) && pfn_valid_within(buddy_pfn)) {
6dda9d55 871 struct page *higher_page, *higher_buddy;
76741e77
VB
872 combined_pfn = buddy_pfn & pfn;
873 higher_page = page + (combined_pfn - pfn);
874 buddy_pfn = __find_buddy_pfn(combined_pfn, order + 1);
875 higher_buddy = higher_page + (buddy_pfn - combined_pfn);
b4fb8f66
TL
876 if (pfn_valid_within(buddy_pfn) &&
877 page_is_buddy(higher_page, higher_buddy, order + 1)) {
6dda9d55
CZ
878 list_add_tail(&page->lru,
879 &zone->free_area[order].free_list[migratetype]);
880 goto out;
881 }
882 }
883
884 list_add(&page->lru, &zone->free_area[order].free_list[migratetype]);
885out:
1da177e4
LT
886 zone->free_area[order].nr_free++;
887}
888
7bfec6f4
MG
889/*
890 * A bad page could be due to a number of fields. Instead of multiple branches,
891 * try and check multiple fields with one check. The caller must do a detailed
892 * check if necessary.
893 */
894static inline bool page_expected_state(struct page *page,
895 unsigned long check_flags)
896{
897 if (unlikely(atomic_read(&page->_mapcount) != -1))
898 return false;
899
900 if (unlikely((unsigned long)page->mapping |
901 page_ref_count(page) |
902#ifdef CONFIG_MEMCG
903 (unsigned long)page->mem_cgroup |
904#endif
905 (page->flags & check_flags)))
906 return false;
907
908 return true;
909}
910
bb552ac6 911static void free_pages_check_bad(struct page *page)
1da177e4 912{
7bfec6f4
MG
913 const char *bad_reason;
914 unsigned long bad_flags;
915
7bfec6f4
MG
916 bad_reason = NULL;
917 bad_flags = 0;
f0b791a3 918
53f9263b 919 if (unlikely(atomic_read(&page->_mapcount) != -1))
f0b791a3
DH
920 bad_reason = "nonzero mapcount";
921 if (unlikely(page->mapping != NULL))
922 bad_reason = "non-NULL mapping";
fe896d18 923 if (unlikely(page_ref_count(page) != 0))
0139aa7b 924 bad_reason = "nonzero _refcount";
f0b791a3
DH
925 if (unlikely(page->flags & PAGE_FLAGS_CHECK_AT_FREE)) {
926 bad_reason = "PAGE_FLAGS_CHECK_AT_FREE flag(s) set";
927 bad_flags = PAGE_FLAGS_CHECK_AT_FREE;
928 }
9edad6ea
JW
929#ifdef CONFIG_MEMCG
930 if (unlikely(page->mem_cgroup))
931 bad_reason = "page still charged to cgroup";
932#endif
7bfec6f4 933 bad_page(page, bad_reason, bad_flags);
bb552ac6
MG
934}
935
936static inline int free_pages_check(struct page *page)
937{
da838d4f 938 if (likely(page_expected_state(page, PAGE_FLAGS_CHECK_AT_FREE)))
bb552ac6 939 return 0;
bb552ac6
MG
940
941 /* Something has gone sideways, find it */
942 free_pages_check_bad(page);
7bfec6f4 943 return 1;
1da177e4
LT
944}
945
4db7548c
MG
946static int free_tail_pages_check(struct page *head_page, struct page *page)
947{
948 int ret = 1;
949
950 /*
951 * We rely page->lru.next never has bit 0 set, unless the page
952 * is PageTail(). Let's make sure that's true even for poisoned ->lru.
953 */
954 BUILD_BUG_ON((unsigned long)LIST_POISON1 & 1);
955
956 if (!IS_ENABLED(CONFIG_DEBUG_VM)) {
957 ret = 0;
958 goto out;
959 }
960 switch (page - head_page) {
961 case 1:
962 /* the first tail page: ->mapping is compound_mapcount() */
963 if (unlikely(compound_mapcount(page))) {
964 bad_page(page, "nonzero compound_mapcount", 0);
965 goto out;
966 }
967 break;
968 case 2:
969 /*
970 * the second tail page: ->mapping is
971 * page_deferred_list().next -- ignore value.
972 */
973 break;
974 default:
975 if (page->mapping != TAIL_MAPPING) {
976 bad_page(page, "corrupted mapping in tail page", 0);
977 goto out;
978 }
979 break;
980 }
981 if (unlikely(!PageTail(page))) {
982 bad_page(page, "PageTail not set", 0);
983 goto out;
984 }
985 if (unlikely(compound_head(page) != head_page)) {
986 bad_page(page, "compound_head not consistent", 0);
987 goto out;
988 }
989 ret = 0;
990out:
991 page->mapping = NULL;
992 clear_compound_head(page);
993 return ret;
994}
995
e2769dbd
MG
996static __always_inline bool free_pages_prepare(struct page *page,
997 unsigned int order, bool check_free)
4db7548c 998{
e2769dbd 999 int bad = 0;
4db7548c 1000
4db7548c
MG
1001 VM_BUG_ON_PAGE(PageTail(page), page);
1002
e2769dbd
MG
1003 trace_mm_page_free(page, order);
1004 kmemcheck_free_shadow(page, order);
e2769dbd
MG
1005
1006 /*
1007 * Check tail pages before head page information is cleared to
1008 * avoid checking PageCompound for order-0 pages.
1009 */
1010 if (unlikely(order)) {
1011 bool compound = PageCompound(page);
1012 int i;
1013
1014 VM_BUG_ON_PAGE(compound && compound_order(page) != order, page);
4db7548c 1015
9a73f61b
KS
1016 if (compound)
1017 ClearPageDoubleMap(page);
e2769dbd
MG
1018 for (i = 1; i < (1 << order); i++) {
1019 if (compound)
1020 bad += free_tail_pages_check(page, page + i);
1021 if (unlikely(free_pages_check(page + i))) {
1022 bad++;
1023 continue;
1024 }
1025 (page + i)->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
1026 }
1027 }
bda807d4 1028 if (PageMappingFlags(page))
4db7548c 1029 page->mapping = NULL;
c4159a75 1030 if (memcg_kmem_enabled() && PageKmemcg(page))
4949148a 1031 memcg_kmem_uncharge(page, order);
e2769dbd
MG
1032 if (check_free)
1033 bad += free_pages_check(page);
1034 if (bad)
1035 return false;
4db7548c 1036
e2769dbd
MG
1037 page_cpupid_reset_last(page);
1038 page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
1039 reset_page_owner(page, order);
4db7548c
MG
1040
1041 if (!PageHighMem(page)) {
1042 debug_check_no_locks_freed(page_address(page),
e2769dbd 1043 PAGE_SIZE << order);
4db7548c 1044 debug_check_no_obj_freed(page_address(page),
e2769dbd 1045 PAGE_SIZE << order);
4db7548c 1046 }
e2769dbd
MG
1047 arch_free_page(page, order);
1048 kernel_poison_pages(page, 1 << order, 0);
1049 kernel_map_pages(page, 1 << order, 0);
29b52de1 1050 kasan_free_pages(page, order);
4db7548c 1051
4db7548c
MG
1052 return true;
1053}
1054
e2769dbd
MG
1055#ifdef CONFIG_DEBUG_VM
1056static inline bool free_pcp_prepare(struct page *page)
1057{
1058 return free_pages_prepare(page, 0, true);
1059}
1060
1061static inline bool bulkfree_pcp_prepare(struct page *page)
1062{
1063 return false;
1064}
1065#else
1066static bool free_pcp_prepare(struct page *page)
1067{
1068 return free_pages_prepare(page, 0, false);
1069}
1070
4db7548c
MG
1071static bool bulkfree_pcp_prepare(struct page *page)
1072{
1073 return free_pages_check(page);
1074}
1075#endif /* CONFIG_DEBUG_VM */
1076
1da177e4 1077/*
5f8dcc21 1078 * Frees a number of pages from the PCP lists
1da177e4 1079 * Assumes all pages on list are in same zone, and of same order.
207f36ee 1080 * count is the number of pages to free.
1da177e4
LT
1081 *
1082 * If the zone was previously in an "all pages pinned" state then look to
1083 * see if this freeing clears that state.
1084 *
1085 * And clear the zone's pages_scanned counter, to hold off the "all pages are
1086 * pinned" detection logic.
1087 */
5f8dcc21
MG
1088static void free_pcppages_bulk(struct zone *zone, int count,
1089 struct per_cpu_pages *pcp)
1da177e4 1090{
5f8dcc21 1091 int migratetype = 0;
a6f9edd6 1092 int batch_free = 0;
3777999d 1093 bool isolated_pageblocks;
5f8dcc21 1094
d34b0733 1095 spin_lock(&zone->lock);
3777999d 1096 isolated_pageblocks = has_isolate_pageblock(zone);
f2260e6b 1097
e5b31ac2 1098 while (count) {
48db57f8 1099 struct page *page;
5f8dcc21
MG
1100 struct list_head *list;
1101
1102 /*
a6f9edd6
MG
1103 * Remove pages from lists in a round-robin fashion. A
1104 * batch_free count is maintained that is incremented when an
1105 * empty list is encountered. This is so more pages are freed
1106 * off fuller lists instead of spinning excessively around empty
1107 * lists
5f8dcc21
MG
1108 */
1109 do {
a6f9edd6 1110 batch_free++;
5f8dcc21
MG
1111 if (++migratetype == MIGRATE_PCPTYPES)
1112 migratetype = 0;
1113 list = &pcp->lists[migratetype];
1114 } while (list_empty(list));
48db57f8 1115
1d16871d
NK
1116 /* This is the only non-empty list. Free them all. */
1117 if (batch_free == MIGRATE_PCPTYPES)
e5b31ac2 1118 batch_free = count;
1d16871d 1119
a6f9edd6 1120 do {
770c8aaa
BZ
1121 int mt; /* migratetype of the to-be-freed page */
1122
a16601c5 1123 page = list_last_entry(list, struct page, lru);
a6f9edd6
MG
1124 /* must delete as __free_one_page list manipulates */
1125 list_del(&page->lru);
aa016d14 1126
bb14c2c7 1127 mt = get_pcppage_migratetype(page);
aa016d14
VB
1128 /* MIGRATE_ISOLATE page should not go to pcplists */
1129 VM_BUG_ON_PAGE(is_migrate_isolate(mt), page);
1130 /* Pageblock could have been isolated meanwhile */
3777999d 1131 if (unlikely(isolated_pageblocks))
51bb1a40 1132 mt = get_pageblock_migratetype(page);
51bb1a40 1133
4db7548c
MG
1134 if (bulkfree_pcp_prepare(page))
1135 continue;
1136
dc4b0caf 1137 __free_one_page(page, page_to_pfn(page), zone, 0, mt);
770c8aaa 1138 trace_mm_page_pcpu_drain(page, 0, mt);
e5b31ac2 1139 } while (--count && --batch_free && !list_empty(list));
1da177e4 1140 }
d34b0733 1141 spin_unlock(&zone->lock);
1da177e4
LT
1142}
1143
dc4b0caf
MG
1144static void free_one_page(struct zone *zone,
1145 struct page *page, unsigned long pfn,
7aeb09f9 1146 unsigned int order,
ed0ae21d 1147 int migratetype)
1da177e4 1148{
d34b0733 1149 spin_lock(&zone->lock);
ad53f92e
JK
1150 if (unlikely(has_isolate_pageblock(zone) ||
1151 is_migrate_isolate(migratetype))) {
1152 migratetype = get_pfnblock_migratetype(page, pfn);
ad53f92e 1153 }
dc4b0caf 1154 __free_one_page(page, pfn, zone, order, migratetype);
d34b0733 1155 spin_unlock(&zone->lock);
48db57f8
NP
1156}
1157
1e8ce83c
RH
1158static void __meminit __init_single_page(struct page *page, unsigned long pfn,
1159 unsigned long zone, int nid)
1160{
1e8ce83c 1161 set_page_links(page, zone, nid, pfn);
1e8ce83c
RH
1162 init_page_count(page);
1163 page_mapcount_reset(page);
1164 page_cpupid_reset_last(page);
1e8ce83c 1165
1e8ce83c
RH
1166 INIT_LIST_HEAD(&page->lru);
1167#ifdef WANT_PAGE_VIRTUAL
1168 /* The shift won't overflow because ZONE_NORMAL is below 4G. */
1169 if (!is_highmem_idx(zone))
1170 set_page_address(page, __va(pfn << PAGE_SHIFT));
1171#endif
1172}
1173
1174static void __meminit __init_single_pfn(unsigned long pfn, unsigned long zone,
1175 int nid)
1176{
1177 return __init_single_page(pfn_to_page(pfn), pfn, zone, nid);
1178}
1179
7e18adb4
MG
1180#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
1181static void init_reserved_page(unsigned long pfn)
1182{
1183 pg_data_t *pgdat;
1184 int nid, zid;
1185
1186 if (!early_page_uninitialised(pfn))
1187 return;
1188
1189 nid = early_pfn_to_nid(pfn);
1190 pgdat = NODE_DATA(nid);
1191
1192 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
1193 struct zone *zone = &pgdat->node_zones[zid];
1194
1195 if (pfn >= zone->zone_start_pfn && pfn < zone_end_pfn(zone))
1196 break;
1197 }
1198 __init_single_pfn(pfn, zid, nid);
1199}
1200#else
1201static inline void init_reserved_page(unsigned long pfn)
1202{
1203}
1204#endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
1205
92923ca3
NZ
1206/*
1207 * Initialised pages do not have PageReserved set. This function is
1208 * called for each range allocated by the bootmem allocator and
1209 * marks the pages PageReserved. The remaining valid pages are later
1210 * sent to the buddy page allocator.
1211 */
4b50bcc7 1212void __meminit reserve_bootmem_region(phys_addr_t start, phys_addr_t end)
92923ca3
NZ
1213{
1214 unsigned long start_pfn = PFN_DOWN(start);
1215 unsigned long end_pfn = PFN_UP(end);
1216
7e18adb4
MG
1217 for (; start_pfn < end_pfn; start_pfn++) {
1218 if (pfn_valid(start_pfn)) {
1219 struct page *page = pfn_to_page(start_pfn);
1220
1221 init_reserved_page(start_pfn);
1d798ca3
KS
1222
1223 /* Avoid false-positive PageTail() */
1224 INIT_LIST_HEAD(&page->lru);
1225
7e18adb4
MG
1226 SetPageReserved(page);
1227 }
1228 }
92923ca3
NZ
1229}
1230
ec95f53a
KM
1231static void __free_pages_ok(struct page *page, unsigned int order)
1232{
d34b0733 1233 unsigned long flags;
95e34412 1234 int migratetype;
dc4b0caf 1235 unsigned long pfn = page_to_pfn(page);
ec95f53a 1236
e2769dbd 1237 if (!free_pages_prepare(page, order, true))
ec95f53a
KM
1238 return;
1239
cfc47a28 1240 migratetype = get_pfnblock_migratetype(page, pfn);
d34b0733
MG
1241 local_irq_save(flags);
1242 __count_vm_events(PGFREE, 1 << order);
dc4b0caf 1243 free_one_page(page_zone(page), page, pfn, order, migratetype);
d34b0733 1244 local_irq_restore(flags);
1da177e4
LT
1245}
1246
949698a3 1247static void __init __free_pages_boot_core(struct page *page, unsigned int order)
a226f6c8 1248{
c3993076 1249 unsigned int nr_pages = 1 << order;
e2d0bd2b 1250 struct page *p = page;
c3993076 1251 unsigned int loop;
a226f6c8 1252
e2d0bd2b
YL
1253 prefetchw(p);
1254 for (loop = 0; loop < (nr_pages - 1); loop++, p++) {
1255 prefetchw(p + 1);
c3993076
JW
1256 __ClearPageReserved(p);
1257 set_page_count(p, 0);
a226f6c8 1258 }
e2d0bd2b
YL
1259 __ClearPageReserved(p);
1260 set_page_count(p, 0);
c3993076 1261
e2d0bd2b 1262 page_zone(page)->managed_pages += nr_pages;
c3993076
JW
1263 set_page_refcounted(page);
1264 __free_pages(page, order);
a226f6c8
DH
1265}
1266
75a592a4
MG
1267#if defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) || \
1268 defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP)
7ace9917 1269
75a592a4
MG
1270static struct mminit_pfnnid_cache early_pfnnid_cache __meminitdata;
1271
1272int __meminit early_pfn_to_nid(unsigned long pfn)
1273{
7ace9917 1274 static DEFINE_SPINLOCK(early_pfn_lock);
75a592a4
MG
1275 int nid;
1276
7ace9917 1277 spin_lock(&early_pfn_lock);
75a592a4 1278 nid = __early_pfn_to_nid(pfn, &early_pfnnid_cache);
7ace9917 1279 if (nid < 0)
e4568d38 1280 nid = first_online_node;
7ace9917
MG
1281 spin_unlock(&early_pfn_lock);
1282
1283 return nid;
75a592a4
MG
1284}
1285#endif
1286
1287#ifdef CONFIG_NODES_SPAN_OTHER_NODES
1288static inline bool __meminit meminit_pfn_in_nid(unsigned long pfn, int node,
1289 struct mminit_pfnnid_cache *state)
1290{
1291 int nid;
1292
1293 nid = __early_pfn_to_nid(pfn, state);
1294 if (nid >= 0 && nid != node)
1295 return false;
1296 return true;
1297}
1298
1299/* Only safe to use early in boot when initialisation is single-threaded */
1300static inline bool __meminit early_pfn_in_nid(unsigned long pfn, int node)
1301{
1302 return meminit_pfn_in_nid(pfn, node, &early_pfnnid_cache);
1303}
1304
1305#else
1306
1307static inline bool __meminit early_pfn_in_nid(unsigned long pfn, int node)
1308{
1309 return true;
1310}
1311static inline bool __meminit meminit_pfn_in_nid(unsigned long pfn, int node,
1312 struct mminit_pfnnid_cache *state)
1313{
1314 return true;
1315}
1316#endif
1317
1318
0e1cc95b 1319void __init __free_pages_bootmem(struct page *page, unsigned long pfn,
3a80a7fa
MG
1320 unsigned int order)
1321{
1322 if (early_page_uninitialised(pfn))
1323 return;
949698a3 1324 return __free_pages_boot_core(page, order);
3a80a7fa
MG
1325}
1326
7cf91a98
JK
1327/*
1328 * Check that the whole (or subset of) a pageblock given by the interval of
1329 * [start_pfn, end_pfn) is valid and within the same zone, before scanning it
1330 * with the migration of free compaction scanner. The scanners then need to
1331 * use only pfn_valid_within() check for arches that allow holes within
1332 * pageblocks.
1333 *
1334 * Return struct page pointer of start_pfn, or NULL if checks were not passed.
1335 *
1336 * It's possible on some configurations to have a setup like node0 node1 node0
1337 * i.e. it's possible that all pages within a zones range of pages do not
1338 * belong to a single zone. We assume that a border between node0 and node1
1339 * can occur within a single pageblock, but not a node0 node1 node0
1340 * interleaving within a single pageblock. It is therefore sufficient to check
1341 * the first and last page of a pageblock and avoid checking each individual
1342 * page in a pageblock.
1343 */
1344struct page *__pageblock_pfn_to_page(unsigned long start_pfn,
1345 unsigned long end_pfn, struct zone *zone)
1346{
1347 struct page *start_page;
1348 struct page *end_page;
1349
1350 /* end_pfn is one past the range we are checking */
1351 end_pfn--;
1352
1353 if (!pfn_valid(start_pfn) || !pfn_valid(end_pfn))
1354 return NULL;
1355
1356 start_page = pfn_to_page(start_pfn);
1357
1358 if (page_zone(start_page) != zone)
1359 return NULL;
1360
1361 end_page = pfn_to_page(end_pfn);
1362
1363 /* This gives a shorter code than deriving page_zone(end_page) */
1364 if (page_zone_id(start_page) != page_zone_id(end_page))
1365 return NULL;
1366
1367 return start_page;
1368}
1369
1370void set_zone_contiguous(struct zone *zone)
1371{
1372 unsigned long block_start_pfn = zone->zone_start_pfn;
1373 unsigned long block_end_pfn;
1374
1375 block_end_pfn = ALIGN(block_start_pfn + 1, pageblock_nr_pages);
1376 for (; block_start_pfn < zone_end_pfn(zone);
1377 block_start_pfn = block_end_pfn,
1378 block_end_pfn += pageblock_nr_pages) {
1379
1380 block_end_pfn = min(block_end_pfn, zone_end_pfn(zone));
1381
1382 if (!__pageblock_pfn_to_page(block_start_pfn,
1383 block_end_pfn, zone))
1384 return;
1385 }
1386
1387 /* We confirm that there is no hole */
1388 zone->contiguous = true;
1389}
1390
1391void clear_zone_contiguous(struct zone *zone)
1392{
1393 zone->contiguous = false;
1394}
1395
7e18adb4 1396#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
0e1cc95b 1397static void __init deferred_free_range(struct page *page,
a4de83dd
MG
1398 unsigned long pfn, int nr_pages)
1399{
1400 int i;
1401
1402 if (!page)
1403 return;
1404
1405 /* Free a large naturally-aligned chunk if possible */
e780149b
XQ
1406 if (nr_pages == pageblock_nr_pages &&
1407 (pfn & (pageblock_nr_pages - 1)) == 0) {
ac5d2539 1408 set_pageblock_migratetype(page, MIGRATE_MOVABLE);
e780149b 1409 __free_pages_boot_core(page, pageblock_order);
a4de83dd
MG
1410 return;
1411 }
1412
e780149b
XQ
1413 for (i = 0; i < nr_pages; i++, page++, pfn++) {
1414 if ((pfn & (pageblock_nr_pages - 1)) == 0)
1415 set_pageblock_migratetype(page, MIGRATE_MOVABLE);
949698a3 1416 __free_pages_boot_core(page, 0);
e780149b 1417 }
a4de83dd
MG
1418}
1419
d3cd131d
NS
1420/* Completion tracking for deferred_init_memmap() threads */
1421static atomic_t pgdat_init_n_undone __initdata;
1422static __initdata DECLARE_COMPLETION(pgdat_init_all_done_comp);
1423
1424static inline void __init pgdat_init_report_one_done(void)
1425{
1426 if (atomic_dec_and_test(&pgdat_init_n_undone))
1427 complete(&pgdat_init_all_done_comp);
1428}
0e1cc95b 1429
7e18adb4 1430/* Initialise remaining memory on a node */
0e1cc95b 1431static int __init deferred_init_memmap(void *data)
7e18adb4 1432{
0e1cc95b
MG
1433 pg_data_t *pgdat = data;
1434 int nid = pgdat->node_id;
7e18adb4
MG
1435 struct mminit_pfnnid_cache nid_init_state = { };
1436 unsigned long start = jiffies;
1437 unsigned long nr_pages = 0;
1438 unsigned long walk_start, walk_end;
1439 int i, zid;
1440 struct zone *zone;
7e18adb4 1441 unsigned long first_init_pfn = pgdat->first_deferred_pfn;
0e1cc95b 1442 const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id);
7e18adb4 1443
0e1cc95b 1444 if (first_init_pfn == ULONG_MAX) {
d3cd131d 1445 pgdat_init_report_one_done();
0e1cc95b
MG
1446 return 0;
1447 }
1448
1449 /* Bind memory initialisation thread to a local node if possible */
1450 if (!cpumask_empty(cpumask))
1451 set_cpus_allowed_ptr(current, cpumask);
7e18adb4
MG
1452
1453 /* Sanity check boundaries */
1454 BUG_ON(pgdat->first_deferred_pfn < pgdat->node_start_pfn);
1455 BUG_ON(pgdat->first_deferred_pfn > pgdat_end_pfn(pgdat));
1456 pgdat->first_deferred_pfn = ULONG_MAX;
1457
1458 /* Only the highest zone is deferred so find it */
1459 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
1460 zone = pgdat->node_zones + zid;
1461 if (first_init_pfn < zone_end_pfn(zone))
1462 break;
1463 }
1464
1465 for_each_mem_pfn_range(i, nid, &walk_start, &walk_end, NULL) {
1466 unsigned long pfn, end_pfn;
54608c3f 1467 struct page *page = NULL;
a4de83dd
MG
1468 struct page *free_base_page = NULL;
1469 unsigned long free_base_pfn = 0;
1470 int nr_to_free = 0;
7e18adb4
MG
1471
1472 end_pfn = min(walk_end, zone_end_pfn(zone));
1473 pfn = first_init_pfn;
1474 if (pfn < walk_start)
1475 pfn = walk_start;
1476 if (pfn < zone->zone_start_pfn)
1477 pfn = zone->zone_start_pfn;
1478
1479 for (; pfn < end_pfn; pfn++) {
54608c3f 1480 if (!pfn_valid_within(pfn))
a4de83dd 1481 goto free_range;
7e18adb4 1482
54608c3f
MG
1483 /*
1484 * Ensure pfn_valid is checked every
e780149b 1485 * pageblock_nr_pages for memory holes
54608c3f 1486 */
e780149b 1487 if ((pfn & (pageblock_nr_pages - 1)) == 0) {
54608c3f
MG
1488 if (!pfn_valid(pfn)) {
1489 page = NULL;
a4de83dd 1490 goto free_range;
54608c3f
MG
1491 }
1492 }
1493
1494 if (!meminit_pfn_in_nid(pfn, nid, &nid_init_state)) {
1495 page = NULL;
a4de83dd 1496 goto free_range;
54608c3f
MG
1497 }
1498
1499 /* Minimise pfn page lookups and scheduler checks */
e780149b 1500 if (page && (pfn & (pageblock_nr_pages - 1)) != 0) {
54608c3f
MG
1501 page++;
1502 } else {
a4de83dd
MG
1503 nr_pages += nr_to_free;
1504 deferred_free_range(free_base_page,
1505 free_base_pfn, nr_to_free);
1506 free_base_page = NULL;
1507 free_base_pfn = nr_to_free = 0;
1508
54608c3f
MG
1509 page = pfn_to_page(pfn);
1510 cond_resched();
1511 }
7e18adb4
MG
1512
1513 if (page->flags) {
1514 VM_BUG_ON(page_zone(page) != zone);
a4de83dd 1515 goto free_range;
7e18adb4
MG
1516 }
1517
1518 __init_single_page(page, pfn, zid, nid);
a4de83dd
MG
1519 if (!free_base_page) {
1520 free_base_page = page;
1521 free_base_pfn = pfn;
1522 nr_to_free = 0;
1523 }
1524 nr_to_free++;
1525
1526 /* Where possible, batch up pages for a single free */
1527 continue;
1528free_range:
1529 /* Free the current block of pages to allocator */
1530 nr_pages += nr_to_free;
1531 deferred_free_range(free_base_page, free_base_pfn,
1532 nr_to_free);
1533 free_base_page = NULL;
1534 free_base_pfn = nr_to_free = 0;
7e18adb4 1535 }
e780149b
XQ
1536 /* Free the last block of pages to allocator */
1537 nr_pages += nr_to_free;
1538 deferred_free_range(free_base_page, free_base_pfn, nr_to_free);
a4de83dd 1539
7e18adb4
MG
1540 first_init_pfn = max(end_pfn, first_init_pfn);
1541 }
1542
1543 /* Sanity check that the next zone really is unpopulated */
1544 WARN_ON(++zid < MAX_NR_ZONES && populated_zone(++zone));
1545
0e1cc95b 1546 pr_info("node %d initialised, %lu pages in %ums\n", nid, nr_pages,
7e18adb4 1547 jiffies_to_msecs(jiffies - start));
d3cd131d
NS
1548
1549 pgdat_init_report_one_done();
0e1cc95b
MG
1550 return 0;
1551}
7cf91a98 1552#endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
0e1cc95b
MG
1553
1554void __init page_alloc_init_late(void)
1555{
7cf91a98
JK
1556 struct zone *zone;
1557
1558#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
0e1cc95b
MG
1559 int nid;
1560
d3cd131d
NS
1561 /* There will be num_node_state(N_MEMORY) threads */
1562 atomic_set(&pgdat_init_n_undone, num_node_state(N_MEMORY));
0e1cc95b 1563 for_each_node_state(nid, N_MEMORY) {
0e1cc95b
MG
1564 kthread_run(deferred_init_memmap, NODE_DATA(nid), "pgdatinit%d", nid);
1565 }
1566
1567 /* Block until all are initialised */
d3cd131d 1568 wait_for_completion(&pgdat_init_all_done_comp);
4248b0da
MG
1569
1570 /* Reinit limits that are based on free pages after the kernel is up */
1571 files_maxfiles_init();
7cf91a98
JK
1572#endif
1573
1574 for_each_populated_zone(zone)
1575 set_zone_contiguous(zone);
7e18adb4 1576}
7e18adb4 1577
47118af0 1578#ifdef CONFIG_CMA
9cf510a5 1579/* Free whole pageblock and set its migration type to MIGRATE_CMA. */
47118af0
MN
1580void __init init_cma_reserved_pageblock(struct page *page)
1581{
1582 unsigned i = pageblock_nr_pages;
1583 struct page *p = page;
1584
1585 do {
1586 __ClearPageReserved(p);
1587 set_page_count(p, 0);
1588 } while (++p, --i);
1589
47118af0 1590 set_pageblock_migratetype(page, MIGRATE_CMA);
dc78327c
MN
1591
1592 if (pageblock_order >= MAX_ORDER) {
1593 i = pageblock_nr_pages;
1594 p = page;
1595 do {
1596 set_page_refcounted(p);
1597 __free_pages(p, MAX_ORDER - 1);
1598 p += MAX_ORDER_NR_PAGES;
1599 } while (i -= MAX_ORDER_NR_PAGES);
1600 } else {
1601 set_page_refcounted(page);
1602 __free_pages(page, pageblock_order);
1603 }
1604
3dcc0571 1605 adjust_managed_page_count(page, pageblock_nr_pages);
47118af0
MN
1606}
1607#endif
1da177e4
LT
1608
1609/*
1610 * The order of subdivision here is critical for the IO subsystem.
1611 * Please do not alter this order without good reasons and regression
1612 * testing. Specifically, as large blocks of memory are subdivided,
1613 * the order in which smaller blocks are delivered depends on the order
1614 * they're subdivided in this function. This is the primary factor
1615 * influencing the order in which pages are delivered to the IO
1616 * subsystem according to empirical testing, and this is also justified
1617 * by considering the behavior of a buddy system containing a single
1618 * large block of memory acted on by a series of small allocations.
1619 * This behavior is a critical factor in sglist merging's success.
1620 *
6d49e352 1621 * -- nyc
1da177e4 1622 */
085cc7d5 1623static inline void expand(struct zone *zone, struct page *page,
b2a0ac88
MG
1624 int low, int high, struct free_area *area,
1625 int migratetype)
1da177e4
LT
1626{
1627 unsigned long size = 1 << high;
1628
1629 while (high > low) {
1630 area--;
1631 high--;
1632 size >>= 1;
309381fe 1633 VM_BUG_ON_PAGE(bad_range(zone, &page[size]), &page[size]);
c0a32fc5 1634
acbc15a4
JK
1635 /*
1636 * Mark as guard pages (or page), that will allow to
1637 * merge back to allocator when buddy will be freed.
1638 * Corresponding page table entries will not be touched,
1639 * pages will stay not present in virtual address space
1640 */
1641 if (set_page_guard(zone, &page[size], high, migratetype))
c0a32fc5 1642 continue;
acbc15a4 1643
b2a0ac88 1644 list_add(&page[size].lru, &area->free_list[migratetype]);
1da177e4
LT
1645 area->nr_free++;
1646 set_page_order(&page[size], high);
1647 }
1da177e4
LT
1648}
1649
4e611801 1650static void check_new_page_bad(struct page *page)
1da177e4 1651{
4e611801
VB
1652 const char *bad_reason = NULL;
1653 unsigned long bad_flags = 0;
7bfec6f4 1654
53f9263b 1655 if (unlikely(atomic_read(&page->_mapcount) != -1))
f0b791a3
DH
1656 bad_reason = "nonzero mapcount";
1657 if (unlikely(page->mapping != NULL))
1658 bad_reason = "non-NULL mapping";
fe896d18 1659 if (unlikely(page_ref_count(page) != 0))
f0b791a3 1660 bad_reason = "nonzero _count";
f4c18e6f
NH
1661 if (unlikely(page->flags & __PG_HWPOISON)) {
1662 bad_reason = "HWPoisoned (hardware-corrupted)";
1663 bad_flags = __PG_HWPOISON;
e570f56c
NH
1664 /* Don't complain about hwpoisoned pages */
1665 page_mapcount_reset(page); /* remove PageBuddy */
1666 return;
f4c18e6f 1667 }
f0b791a3
DH
1668 if (unlikely(page->flags & PAGE_FLAGS_CHECK_AT_PREP)) {
1669 bad_reason = "PAGE_FLAGS_CHECK_AT_PREP flag set";
1670 bad_flags = PAGE_FLAGS_CHECK_AT_PREP;
1671 }
9edad6ea
JW
1672#ifdef CONFIG_MEMCG
1673 if (unlikely(page->mem_cgroup))
1674 bad_reason = "page still charged to cgroup";
1675#endif
4e611801
VB
1676 bad_page(page, bad_reason, bad_flags);
1677}
1678
1679/*
1680 * This page is about to be returned from the page allocator
1681 */
1682static inline int check_new_page(struct page *page)
1683{
1684 if (likely(page_expected_state(page,
1685 PAGE_FLAGS_CHECK_AT_PREP|__PG_HWPOISON)))
1686 return 0;
1687
1688 check_new_page_bad(page);
1689 return 1;
2a7684a2
WF
1690}
1691
1414c7f4
LA
1692static inline bool free_pages_prezeroed(bool poisoned)
1693{
1694 return IS_ENABLED(CONFIG_PAGE_POISONING_ZERO) &&
1695 page_poisoning_enabled() && poisoned;
1696}
1697
479f854a
MG
1698#ifdef CONFIG_DEBUG_VM
1699static bool check_pcp_refill(struct page *page)
1700{
1701 return false;
1702}
1703
1704static bool check_new_pcp(struct page *page)
1705{
1706 return check_new_page(page);
1707}
1708#else
1709static bool check_pcp_refill(struct page *page)
1710{
1711 return check_new_page(page);
1712}
1713static bool check_new_pcp(struct page *page)
1714{
1715 return false;
1716}
1717#endif /* CONFIG_DEBUG_VM */
1718
1719static bool check_new_pages(struct page *page, unsigned int order)
1720{
1721 int i;
1722 for (i = 0; i < (1 << order); i++) {
1723 struct page *p = page + i;
1724
1725 if (unlikely(check_new_page(p)))
1726 return true;
1727 }
1728
1729 return false;
1730}
1731
46f24fd8
JK
1732inline void post_alloc_hook(struct page *page, unsigned int order,
1733 gfp_t gfp_flags)
1734{
1735 set_page_private(page, 0);
1736 set_page_refcounted(page);
1737
1738 arch_alloc_page(page, order);
1739 kernel_map_pages(page, 1 << order, 1);
1740 kernel_poison_pages(page, 1 << order, 1);
1741 kasan_alloc_pages(page, order);
1742 set_page_owner(page, order, gfp_flags);
1743}
1744
479f854a 1745static void prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags,
c603844b 1746 unsigned int alloc_flags)
2a7684a2
WF
1747{
1748 int i;
1414c7f4 1749 bool poisoned = true;
2a7684a2
WF
1750
1751 for (i = 0; i < (1 << order); i++) {
1752 struct page *p = page + i;
1414c7f4
LA
1753 if (poisoned)
1754 poisoned &= page_is_poisoned(p);
2a7684a2 1755 }
689bcebf 1756
46f24fd8 1757 post_alloc_hook(page, order, gfp_flags);
17cf4406 1758
1414c7f4 1759 if (!free_pages_prezeroed(poisoned) && (gfp_flags & __GFP_ZERO))
f4d2897b
AA
1760 for (i = 0; i < (1 << order); i++)
1761 clear_highpage(page + i);
17cf4406
NP
1762
1763 if (order && (gfp_flags & __GFP_COMP))
1764 prep_compound_page(page, order);
1765
75379191 1766 /*
2f064f34 1767 * page is set pfmemalloc when ALLOC_NO_WATERMARKS was necessary to
75379191
VB
1768 * allocate the page. The expectation is that the caller is taking
1769 * steps that will free more memory. The caller should avoid the page
1770 * being used for !PFMEMALLOC purposes.
1771 */
2f064f34
MH
1772 if (alloc_flags & ALLOC_NO_WATERMARKS)
1773 set_page_pfmemalloc(page);
1774 else
1775 clear_page_pfmemalloc(page);
1da177e4
LT
1776}
1777
56fd56b8
MG
1778/*
1779 * Go through the free lists for the given migratetype and remove
1780 * the smallest available page from the freelists
1781 */
728ec980
MG
1782static inline
1783struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
56fd56b8
MG
1784 int migratetype)
1785{
1786 unsigned int current_order;
b8af2941 1787 struct free_area *area;
56fd56b8
MG
1788 struct page *page;
1789
1790 /* Find a page of the appropriate size in the preferred list */
1791 for (current_order = order; current_order < MAX_ORDER; ++current_order) {
1792 area = &(zone->free_area[current_order]);
a16601c5 1793 page = list_first_entry_or_null(&area->free_list[migratetype],
56fd56b8 1794 struct page, lru);
a16601c5
GT
1795 if (!page)
1796 continue;
56fd56b8
MG
1797 list_del(&page->lru);
1798 rmv_page_order(page);
1799 area->nr_free--;
56fd56b8 1800 expand(zone, page, order, current_order, area, migratetype);
bb14c2c7 1801 set_pcppage_migratetype(page, migratetype);
56fd56b8
MG
1802 return page;
1803 }
1804
1805 return NULL;
1806}
1807
1808
b2a0ac88
MG
1809/*
1810 * This array describes the order lists are fallen back to when
1811 * the free lists for the desirable migrate type are depleted
1812 */
47118af0 1813static int fallbacks[MIGRATE_TYPES][4] = {
974a786e
MG
1814 [MIGRATE_UNMOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE, MIGRATE_TYPES },
1815 [MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE, MIGRATE_MOVABLE, MIGRATE_TYPES },
1816 [MIGRATE_MOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_TYPES },
47118af0 1817#ifdef CONFIG_CMA
974a786e 1818 [MIGRATE_CMA] = { MIGRATE_TYPES }, /* Never used */
47118af0 1819#endif
194159fb 1820#ifdef CONFIG_MEMORY_ISOLATION
974a786e 1821 [MIGRATE_ISOLATE] = { MIGRATE_TYPES }, /* Never used */
194159fb 1822#endif
b2a0ac88
MG
1823};
1824
dc67647b
JK
1825#ifdef CONFIG_CMA
1826static struct page *__rmqueue_cma_fallback(struct zone *zone,
1827 unsigned int order)
1828{
1829 return __rmqueue_smallest(zone, order, MIGRATE_CMA);
1830}
1831#else
1832static inline struct page *__rmqueue_cma_fallback(struct zone *zone,
1833 unsigned int order) { return NULL; }
1834#endif
1835
c361be55
MG
1836/*
1837 * Move the free pages in a range to the free lists of the requested type.
d9c23400 1838 * Note that start_page and end_pages are not aligned on a pageblock
c361be55
MG
1839 * boundary. If alignment is required, use move_freepages_block()
1840 */
435b405c 1841int move_freepages(struct zone *zone,
b69a7288
AB
1842 struct page *start_page, struct page *end_page,
1843 int migratetype)
c361be55
MG
1844{
1845 struct page *page;
d00181b9 1846 unsigned int order;
d100313f 1847 int pages_moved = 0;
c361be55
MG
1848
1849#ifndef CONFIG_HOLES_IN_ZONE
1850 /*
1851 * page_zone is not safe to call in this context when
1852 * CONFIG_HOLES_IN_ZONE is set. This bug check is probably redundant
1853 * anyway as we check zone boundaries in move_freepages_block().
1854 * Remove at a later date when no bug reports exist related to
ac0e5b7a 1855 * grouping pages by mobility
c361be55 1856 */
97ee4ba7 1857 VM_BUG_ON(page_zone(start_page) != page_zone(end_page));
c361be55
MG
1858#endif
1859
1860 for (page = start_page; page <= end_page;) {
1861 if (!pfn_valid_within(page_to_pfn(page))) {
1862 page++;
1863 continue;
1864 }
1865
f073bdc5
AB
1866 /* Make sure we are not inadvertently changing nodes */
1867 VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page);
1868
c361be55
MG
1869 if (!PageBuddy(page)) {
1870 page++;
1871 continue;
1872 }
1873
1874 order = page_order(page);
84be48d8
KS
1875 list_move(&page->lru,
1876 &zone->free_area[order].free_list[migratetype]);
c361be55 1877 page += 1 << order;
d100313f 1878 pages_moved += 1 << order;
c361be55
MG
1879 }
1880
d100313f 1881 return pages_moved;
c361be55
MG
1882}
1883
ee6f509c 1884int move_freepages_block(struct zone *zone, struct page *page,
68e3e926 1885 int migratetype)
c361be55
MG
1886{
1887 unsigned long start_pfn, end_pfn;
1888 struct page *start_page, *end_page;
1889
1890 start_pfn = page_to_pfn(page);
d9c23400 1891 start_pfn = start_pfn & ~(pageblock_nr_pages-1);
c361be55 1892 start_page = pfn_to_page(start_pfn);
d9c23400
MG
1893 end_page = start_page + pageblock_nr_pages - 1;
1894 end_pfn = start_pfn + pageblock_nr_pages - 1;
c361be55
MG
1895
1896 /* Do not cross zone boundaries */
108bcc96 1897 if (!zone_spans_pfn(zone, start_pfn))
c361be55 1898 start_page = page;
108bcc96 1899 if (!zone_spans_pfn(zone, end_pfn))
c361be55
MG
1900 return 0;
1901
1902 return move_freepages(zone, start_page, end_page, migratetype);
1903}
1904
2f66a68f
MG
1905static void change_pageblock_range(struct page *pageblock_page,
1906 int start_order, int migratetype)
1907{
1908 int nr_pageblocks = 1 << (start_order - pageblock_order);
1909
1910 while (nr_pageblocks--) {
1911 set_pageblock_migratetype(pageblock_page, migratetype);
1912 pageblock_page += pageblock_nr_pages;
1913 }
1914}
1915
fef903ef 1916/*
9c0415eb
VB
1917 * When we are falling back to another migratetype during allocation, try to
1918 * steal extra free pages from the same pageblocks to satisfy further
1919 * allocations, instead of polluting multiple pageblocks.
1920 *
1921 * If we are stealing a relatively large buddy page, it is likely there will
1922 * be more free pages in the pageblock, so try to steal them all. For
1923 * reclaimable and unmovable allocations, we steal regardless of page size,
1924 * as fragmentation caused by those allocations polluting movable pageblocks
1925 * is worse than movable allocations stealing from unmovable and reclaimable
1926 * pageblocks.
fef903ef 1927 */
4eb7dce6
JK
1928static bool can_steal_fallback(unsigned int order, int start_mt)
1929{
1930 /*
1931 * Leaving this order check is intended, although there is
1932 * relaxed order check in next check. The reason is that
1933 * we can actually steal whole pageblock if this condition met,
1934 * but, below check doesn't guarantee it and that is just heuristic
1935 * so could be changed anytime.
1936 */
1937 if (order >= pageblock_order)
1938 return true;
1939
1940 if (order >= pageblock_order / 2 ||
1941 start_mt == MIGRATE_RECLAIMABLE ||
1942 start_mt == MIGRATE_UNMOVABLE ||
1943 page_group_by_mobility_disabled)
1944 return true;
1945
1946 return false;
1947}
1948
1949/*
1950 * This function implements actual steal behaviour. If order is large enough,
1951 * we can steal whole pageblock. If not, we first move freepages in this
1952 * pageblock and check whether half of pages are moved or not. If half of
1953 * pages are moved, we can change migratetype of pageblock and permanently
1954 * use it's pages as requested migratetype in the future.
1955 */
1956static void steal_suitable_fallback(struct zone *zone, struct page *page,
1957 int start_type)
fef903ef 1958{
d00181b9 1959 unsigned int current_order = page_order(page);
4eb7dce6 1960 int pages;
fef903ef 1961
fef903ef
SB
1962 /* Take ownership for orders >= pageblock_order */
1963 if (current_order >= pageblock_order) {
1964 change_pageblock_range(page, current_order, start_type);
3a1086fb 1965 return;
fef903ef
SB
1966 }
1967
4eb7dce6 1968 pages = move_freepages_block(zone, page, start_type);
fef903ef 1969
4eb7dce6
JK
1970 /* Claim the whole block if over half of it is free */
1971 if (pages >= (1 << (pageblock_order-1)) ||
1972 page_group_by_mobility_disabled)
1973 set_pageblock_migratetype(page, start_type);
1974}
1975
2149cdae
JK
1976/*
1977 * Check whether there is a suitable fallback freepage with requested order.
1978 * If only_stealable is true, this function returns fallback_mt only if
1979 * we can steal other freepages all together. This would help to reduce
1980 * fragmentation due to mixed migratetype pages in one pageblock.
1981 */
1982int find_suitable_fallback(struct free_area *area, unsigned int order,
1983 int migratetype, bool only_stealable, bool *can_steal)
4eb7dce6
JK
1984{
1985 int i;
1986 int fallback_mt;
1987
1988 if (area->nr_free == 0)
1989 return -1;
1990
1991 *can_steal = false;
1992 for (i = 0;; i++) {
1993 fallback_mt = fallbacks[migratetype][i];
974a786e 1994 if (fallback_mt == MIGRATE_TYPES)
4eb7dce6
JK
1995 break;
1996
1997 if (list_empty(&area->free_list[fallback_mt]))
1998 continue;
fef903ef 1999
4eb7dce6
JK
2000 if (can_steal_fallback(order, migratetype))
2001 *can_steal = true;
2002
2149cdae
JK
2003 if (!only_stealable)
2004 return fallback_mt;
2005
2006 if (*can_steal)
2007 return fallback_mt;
fef903ef 2008 }
4eb7dce6
JK
2009
2010 return -1;
fef903ef
SB
2011}
2012
0aaa29a5
MG
2013/*
2014 * Reserve a pageblock for exclusive use of high-order atomic allocations if
2015 * there are no empty page blocks that contain a page with a suitable order
2016 */
2017static void reserve_highatomic_pageblock(struct page *page, struct zone *zone,
2018 unsigned int alloc_order)
2019{
2020 int mt;
2021 unsigned long max_managed, flags;
2022
2023 /*
2024 * Limit the number reserved to 1 pageblock or roughly 1% of a zone.
2025 * Check is race-prone but harmless.
2026 */
2027 max_managed = (zone->managed_pages / 100) + pageblock_nr_pages;
2028 if (zone->nr_reserved_highatomic >= max_managed)
2029 return;
2030
2031 spin_lock_irqsave(&zone->lock, flags);
2032
2033 /* Recheck the nr_reserved_highatomic limit under the lock */
2034 if (zone->nr_reserved_highatomic >= max_managed)
2035 goto out_unlock;
2036
2037 /* Yoink! */
2038 mt = get_pageblock_migratetype(page);
a6ffdc07
XQ
2039 if (!is_migrate_highatomic(mt) && !is_migrate_isolate(mt)
2040 && !is_migrate_cma(mt)) {
0aaa29a5
MG
2041 zone->nr_reserved_highatomic += pageblock_nr_pages;
2042 set_pageblock_migratetype(page, MIGRATE_HIGHATOMIC);
2043 move_freepages_block(zone, page, MIGRATE_HIGHATOMIC);
2044 }
2045
2046out_unlock:
2047 spin_unlock_irqrestore(&zone->lock, flags);
2048}
2049
2050/*
2051 * Used when an allocation is about to fail under memory pressure. This
2052 * potentially hurts the reliability of high-order allocations when under
2053 * intense memory pressure but failed atomic allocations should be easier
2054 * to recover from than an OOM.
29fac03b
MK
2055 *
2056 * If @force is true, try to unreserve a pageblock even though highatomic
2057 * pageblock is exhausted.
0aaa29a5 2058 */
29fac03b
MK
2059static bool unreserve_highatomic_pageblock(const struct alloc_context *ac,
2060 bool force)
0aaa29a5
MG
2061{
2062 struct zonelist *zonelist = ac->zonelist;
2063 unsigned long flags;
2064 struct zoneref *z;
2065 struct zone *zone;
2066 struct page *page;
2067 int order;
04c8716f 2068 bool ret;
0aaa29a5
MG
2069
2070 for_each_zone_zonelist_nodemask(zone, z, zonelist, ac->high_zoneidx,
2071 ac->nodemask) {
29fac03b
MK
2072 /*
2073 * Preserve at least one pageblock unless memory pressure
2074 * is really high.
2075 */
2076 if (!force && zone->nr_reserved_highatomic <=
2077 pageblock_nr_pages)
0aaa29a5
MG
2078 continue;
2079
2080 spin_lock_irqsave(&zone->lock, flags);
2081 for (order = 0; order < MAX_ORDER; order++) {
2082 struct free_area *area = &(zone->free_area[order]);
2083
a16601c5
GT
2084 page = list_first_entry_or_null(
2085 &area->free_list[MIGRATE_HIGHATOMIC],
2086 struct page, lru);
2087 if (!page)
0aaa29a5
MG
2088 continue;
2089
0aaa29a5 2090 /*
4855e4a7
MK
2091 * In page freeing path, migratetype change is racy so
2092 * we can counter several free pages in a pageblock
2093 * in this loop althoug we changed the pageblock type
2094 * from highatomic to ac->migratetype. So we should
2095 * adjust the count once.
0aaa29a5 2096 */
a6ffdc07 2097 if (is_migrate_highatomic_page(page)) {
4855e4a7
MK
2098 /*
2099 * It should never happen but changes to
2100 * locking could inadvertently allow a per-cpu
2101 * drain to add pages to MIGRATE_HIGHATOMIC
2102 * while unreserving so be safe and watch for
2103 * underflows.
2104 */
2105 zone->nr_reserved_highatomic -= min(
2106 pageblock_nr_pages,
2107 zone->nr_reserved_highatomic);
2108 }
0aaa29a5
MG
2109
2110 /*
2111 * Convert to ac->migratetype and avoid the normal
2112 * pageblock stealing heuristics. Minimally, the caller
2113 * is doing the work and needs the pages. More
2114 * importantly, if the block was always converted to
2115 * MIGRATE_UNMOVABLE or another type then the number
2116 * of pageblocks that cannot be completely freed
2117 * may increase.
2118 */
2119 set_pageblock_migratetype(page, ac->migratetype);
04c8716f 2120 ret = move_freepages_block(zone, page, ac->migratetype);
29fac03b
MK
2121 if (ret) {
2122 spin_unlock_irqrestore(&zone->lock, flags);
2123 return ret;
2124 }
0aaa29a5
MG
2125 }
2126 spin_unlock_irqrestore(&zone->lock, flags);
2127 }
04c8716f
MK
2128
2129 return false;
0aaa29a5
MG
2130}
2131
b2a0ac88 2132/* Remove an element from the buddy allocator from the fallback list */
0ac3a409 2133static inline struct page *
7aeb09f9 2134__rmqueue_fallback(struct zone *zone, unsigned int order, int start_migratetype)
b2a0ac88 2135{
b8af2941 2136 struct free_area *area;
7aeb09f9 2137 unsigned int current_order;
b2a0ac88 2138 struct page *page;
4eb7dce6
JK
2139 int fallback_mt;
2140 bool can_steal;
b2a0ac88
MG
2141
2142 /* Find the largest possible block of pages in the other list */
7aeb09f9
MG
2143 for (current_order = MAX_ORDER-1;
2144 current_order >= order && current_order <= MAX_ORDER-1;
2145 --current_order) {
4eb7dce6
JK
2146 area = &(zone->free_area[current_order]);
2147 fallback_mt = find_suitable_fallback(area, current_order,
2149cdae 2148 start_migratetype, false, &can_steal);
4eb7dce6
JK
2149 if (fallback_mt == -1)
2150 continue;
b2a0ac88 2151
a16601c5 2152 page = list_first_entry(&area->free_list[fallback_mt],
4eb7dce6 2153 struct page, lru);
a6ffdc07 2154 if (can_steal && !is_migrate_highatomic_page(page))
4eb7dce6 2155 steal_suitable_fallback(zone, page, start_migratetype);
b2a0ac88 2156
4eb7dce6
JK
2157 /* Remove the page from the freelists */
2158 area->nr_free--;
2159 list_del(&page->lru);
2160 rmv_page_order(page);
3a1086fb 2161
4eb7dce6
JK
2162 expand(zone, page, order, current_order, area,
2163 start_migratetype);
2164 /*
bb14c2c7 2165 * The pcppage_migratetype may differ from pageblock's
4eb7dce6 2166 * migratetype depending on the decisions in
bb14c2c7
VB
2167 * find_suitable_fallback(). This is OK as long as it does not
2168 * differ for MIGRATE_CMA pageblocks. Those can be used as
2169 * fallback only via special __rmqueue_cma_fallback() function
4eb7dce6 2170 */
bb14c2c7 2171 set_pcppage_migratetype(page, start_migratetype);
e0fff1bd 2172
4eb7dce6
JK
2173 trace_mm_page_alloc_extfrag(page, order, current_order,
2174 start_migratetype, fallback_mt);
e0fff1bd 2175
4eb7dce6 2176 return page;
b2a0ac88
MG
2177 }
2178
728ec980 2179 return NULL;
b2a0ac88
MG
2180}
2181
56fd56b8 2182/*
1da177e4
LT
2183 * Do the hard work of removing an element from the buddy allocator.
2184 * Call me with the zone->lock already held.
2185 */
b2a0ac88 2186static struct page *__rmqueue(struct zone *zone, unsigned int order,
6ac0206b 2187 int migratetype)
1da177e4 2188{
1da177e4
LT
2189 struct page *page;
2190
56fd56b8 2191 page = __rmqueue_smallest(zone, order, migratetype);
974a786e 2192 if (unlikely(!page)) {
dc67647b
JK
2193 if (migratetype == MIGRATE_MOVABLE)
2194 page = __rmqueue_cma_fallback(zone, order);
2195
2196 if (!page)
2197 page = __rmqueue_fallback(zone, order, migratetype);
728ec980
MG
2198 }
2199
0d3d062a 2200 trace_mm_page_alloc_zone_locked(page, order, migratetype);
b2a0ac88 2201 return page;
1da177e4
LT
2202}
2203
5f63b720 2204/*
1da177e4
LT
2205 * Obtain a specified number of elements from the buddy allocator, all under
2206 * a single hold of the lock, for efficiency. Add them to the supplied list.
2207 * Returns the number of new pages which were placed at *list.
2208 */
5f63b720 2209static int rmqueue_bulk(struct zone *zone, unsigned int order,
b2a0ac88 2210 unsigned long count, struct list_head *list,
b745bc85 2211 int migratetype, bool cold)
1da177e4 2212{
a6de734b 2213 int i, alloced = 0;
5f63b720 2214
d34b0733 2215 spin_lock(&zone->lock);
1da177e4 2216 for (i = 0; i < count; ++i) {
6ac0206b 2217 struct page *page = __rmqueue(zone, order, migratetype);
085cc7d5 2218 if (unlikely(page == NULL))
1da177e4 2219 break;
81eabcbe 2220
479f854a
MG
2221 if (unlikely(check_pcp_refill(page)))
2222 continue;
2223
81eabcbe
MG
2224 /*
2225 * Split buddy pages returned by expand() are received here
2226 * in physical page order. The page is added to the callers and
2227 * list and the list head then moves forward. From the callers
2228 * perspective, the linked list is ordered by page number in
2229 * some conditions. This is useful for IO devices that can
2230 * merge IO requests if the physical pages are ordered
2231 * properly.
2232 */
b745bc85 2233 if (likely(!cold))
e084b2d9
MG
2234 list_add(&page->lru, list);
2235 else
2236 list_add_tail(&page->lru, list);
81eabcbe 2237 list = &page->lru;
a6de734b 2238 alloced++;
bb14c2c7 2239 if (is_migrate_cma(get_pcppage_migratetype(page)))
d1ce749a
BZ
2240 __mod_zone_page_state(zone, NR_FREE_CMA_PAGES,
2241 -(1 << order));
1da177e4 2242 }
a6de734b
MG
2243
2244 /*
2245 * i pages were removed from the buddy list even if some leak due
2246 * to check_pcp_refill failing so adjust NR_FREE_PAGES based
2247 * on i. Do not confuse with 'alloced' which is the number of
2248 * pages added to the pcp list.
2249 */
f2260e6b 2250 __mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order));
d34b0733 2251 spin_unlock(&zone->lock);
a6de734b 2252 return alloced;
1da177e4
LT
2253}
2254
4ae7c039 2255#ifdef CONFIG_NUMA
8fce4d8e 2256/*
4037d452
CL
2257 * Called from the vmstat counter updater to drain pagesets of this
2258 * currently executing processor on remote nodes after they have
2259 * expired.
2260 *
879336c3
CL
2261 * Note that this function must be called with the thread pinned to
2262 * a single processor.
8fce4d8e 2263 */
4037d452 2264void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
4ae7c039 2265{
4ae7c039 2266 unsigned long flags;
7be12fc9 2267 int to_drain, batch;
4ae7c039 2268
4037d452 2269 local_irq_save(flags);
4db0c3c2 2270 batch = READ_ONCE(pcp->batch);
7be12fc9 2271 to_drain = min(pcp->count, batch);
2a13515c
KM
2272 if (to_drain > 0) {
2273 free_pcppages_bulk(zone, to_drain, pcp);
2274 pcp->count -= to_drain;
2275 }
4037d452 2276 local_irq_restore(flags);
4ae7c039
CL
2277}
2278#endif
2279
9f8f2172 2280/*
93481ff0 2281 * Drain pcplists of the indicated processor and zone.
9f8f2172
CL
2282 *
2283 * The processor must either be the current processor and the
2284 * thread pinned to the current processor or a processor that
2285 * is not online.
2286 */
93481ff0 2287static void drain_pages_zone(unsigned int cpu, struct zone *zone)
1da177e4 2288{
c54ad30c 2289 unsigned long flags;
93481ff0
VB
2290 struct per_cpu_pageset *pset;
2291 struct per_cpu_pages *pcp;
1da177e4 2292
93481ff0
VB
2293 local_irq_save(flags);
2294 pset = per_cpu_ptr(zone->pageset, cpu);
1da177e4 2295
93481ff0
VB
2296 pcp = &pset->pcp;
2297 if (pcp->count) {
2298 free_pcppages_bulk(zone, pcp->count, pcp);
2299 pcp->count = 0;
2300 }
2301 local_irq_restore(flags);
2302}
3dfa5721 2303
93481ff0
VB
2304/*
2305 * Drain pcplists of all zones on the indicated processor.
2306 *
2307 * The processor must either be the current processor and the
2308 * thread pinned to the current processor or a processor that
2309 * is not online.
2310 */
2311static void drain_pages(unsigned int cpu)
2312{
2313 struct zone *zone;
2314
2315 for_each_populated_zone(zone) {
2316 drain_pages_zone(cpu, zone);
1da177e4
LT
2317 }
2318}
1da177e4 2319
9f8f2172
CL
2320/*
2321 * Spill all of this CPU's per-cpu pages back into the buddy allocator.
93481ff0
VB
2322 *
2323 * The CPU has to be pinned. When zone parameter is non-NULL, spill just
2324 * the single zone's pages.
9f8f2172 2325 */
93481ff0 2326void drain_local_pages(struct zone *zone)
9f8f2172 2327{
93481ff0
VB
2328 int cpu = smp_processor_id();
2329
2330 if (zone)
2331 drain_pages_zone(cpu, zone);
2332 else
2333 drain_pages(cpu);
9f8f2172
CL
2334}
2335
0ccce3b9
MG
2336static void drain_local_pages_wq(struct work_struct *work)
2337{
a459eeb7
MH
2338 /*
2339 * drain_all_pages doesn't use proper cpu hotplug protection so
2340 * we can race with cpu offline when the WQ can move this from
2341 * a cpu pinned worker to an unbound one. We can operate on a different
2342 * cpu which is allright but we also have to make sure to not move to
2343 * a different one.
2344 */
2345 preempt_disable();
0ccce3b9 2346 drain_local_pages(NULL);
a459eeb7 2347 preempt_enable();
0ccce3b9
MG
2348}
2349
9f8f2172 2350/*
74046494
GBY
2351 * Spill all the per-cpu pages from all CPUs back into the buddy allocator.
2352 *
93481ff0
VB
2353 * When zone parameter is non-NULL, spill just the single zone's pages.
2354 *
0ccce3b9 2355 * Note that this can be extremely slow as the draining happens in a workqueue.
9f8f2172 2356 */
93481ff0 2357void drain_all_pages(struct zone *zone)
9f8f2172 2358{
74046494 2359 int cpu;
74046494
GBY
2360
2361 /*
2362 * Allocate in the BSS so we wont require allocation in
2363 * direct reclaim path for CONFIG_CPUMASK_OFFSTACK=y
2364 */
2365 static cpumask_t cpus_with_pcps;
2366
ce612879
MH
2367 /*
2368 * Make sure nobody triggers this path before mm_percpu_wq is fully
2369 * initialized.
2370 */
2371 if (WARN_ON_ONCE(!mm_percpu_wq))
2372 return;
2373
0ccce3b9
MG
2374 /* Workqueues cannot recurse */
2375 if (current->flags & PF_WQ_WORKER)
2376 return;
2377
bd233f53
MG
2378 /*
2379 * Do not drain if one is already in progress unless it's specific to
2380 * a zone. Such callers are primarily CMA and memory hotplug and need
2381 * the drain to be complete when the call returns.
2382 */
2383 if (unlikely(!mutex_trylock(&pcpu_drain_mutex))) {
2384 if (!zone)
2385 return;
2386 mutex_lock(&pcpu_drain_mutex);
2387 }
0ccce3b9 2388
74046494
GBY
2389 /*
2390 * We don't care about racing with CPU hotplug event
2391 * as offline notification will cause the notified
2392 * cpu to drain that CPU pcps and on_each_cpu_mask
2393 * disables preemption as part of its processing
2394 */
2395 for_each_online_cpu(cpu) {
93481ff0
VB
2396 struct per_cpu_pageset *pcp;
2397 struct zone *z;
74046494 2398 bool has_pcps = false;
93481ff0
VB
2399
2400 if (zone) {
74046494 2401 pcp = per_cpu_ptr(zone->pageset, cpu);
93481ff0 2402 if (pcp->pcp.count)
74046494 2403 has_pcps = true;
93481ff0
VB
2404 } else {
2405 for_each_populated_zone(z) {
2406 pcp = per_cpu_ptr(z->pageset, cpu);
2407 if (pcp->pcp.count) {
2408 has_pcps = true;
2409 break;
2410 }
74046494
GBY
2411 }
2412 }
93481ff0 2413
74046494
GBY
2414 if (has_pcps)
2415 cpumask_set_cpu(cpu, &cpus_with_pcps);
2416 else
2417 cpumask_clear_cpu(cpu, &cpus_with_pcps);
2418 }
0ccce3b9 2419
bd233f53
MG
2420 for_each_cpu(cpu, &cpus_with_pcps) {
2421 struct work_struct *work = per_cpu_ptr(&pcpu_drain, cpu);
2422 INIT_WORK(work, drain_local_pages_wq);
ce612879 2423 queue_work_on(cpu, mm_percpu_wq, work);
0ccce3b9 2424 }
bd233f53
MG
2425 for_each_cpu(cpu, &cpus_with_pcps)
2426 flush_work(per_cpu_ptr(&pcpu_drain, cpu));
2427
2428 mutex_unlock(&pcpu_drain_mutex);
9f8f2172
CL
2429}
2430
296699de 2431#ifdef CONFIG_HIBERNATION
1da177e4
LT
2432
2433void mark_free_pages(struct zone *zone)
2434{
f623f0db
RW
2435 unsigned long pfn, max_zone_pfn;
2436 unsigned long flags;
7aeb09f9 2437 unsigned int order, t;
86760a2c 2438 struct page *page;
1da177e4 2439
8080fc03 2440 if (zone_is_empty(zone))
1da177e4
LT
2441 return;
2442
2443 spin_lock_irqsave(&zone->lock, flags);
f623f0db 2444
108bcc96 2445 max_zone_pfn = zone_end_pfn(zone);
f623f0db
RW
2446 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
2447 if (pfn_valid(pfn)) {
86760a2c 2448 page = pfn_to_page(pfn);
ba6b0979
JK
2449
2450 if (page_zone(page) != zone)
2451 continue;
2452
7be98234
RW
2453 if (!swsusp_page_is_forbidden(page))
2454 swsusp_unset_page_free(page);
f623f0db 2455 }
1da177e4 2456
b2a0ac88 2457 for_each_migratetype_order(order, t) {
86760a2c
GT
2458 list_for_each_entry(page,
2459 &zone->free_area[order].free_list[t], lru) {
f623f0db 2460 unsigned long i;
1da177e4 2461
86760a2c 2462 pfn = page_to_pfn(page);
f623f0db 2463 for (i = 0; i < (1UL << order); i++)
7be98234 2464 swsusp_set_page_free(pfn_to_page(pfn + i));
f623f0db 2465 }
b2a0ac88 2466 }
1da177e4
LT
2467 spin_unlock_irqrestore(&zone->lock, flags);
2468}
e2c55dc8 2469#endif /* CONFIG_PM */
1da177e4 2470
1da177e4
LT
2471/*
2472 * Free a 0-order page
b745bc85 2473 * cold == true ? free a cold page : free a hot page
1da177e4 2474 */
b745bc85 2475void free_hot_cold_page(struct page *page, bool cold)
1da177e4
LT
2476{
2477 struct zone *zone = page_zone(page);
2478 struct per_cpu_pages *pcp;
d34b0733 2479 unsigned long flags;
dc4b0caf 2480 unsigned long pfn = page_to_pfn(page);
5f8dcc21 2481 int migratetype;
1da177e4 2482
4db7548c 2483 if (!free_pcp_prepare(page))
689bcebf
HD
2484 return;
2485
dc4b0caf 2486 migratetype = get_pfnblock_migratetype(page, pfn);
bb14c2c7 2487 set_pcppage_migratetype(page, migratetype);
d34b0733
MG
2488 local_irq_save(flags);
2489 __count_vm_event(PGFREE);
da456f14 2490
5f8dcc21
MG
2491 /*
2492 * We only track unmovable, reclaimable and movable on pcp lists.
2493 * Free ISOLATE pages back to the allocator because they are being
a6ffdc07 2494 * offlined but treat HIGHATOMIC as movable pages so we can get those
5f8dcc21
MG
2495 * areas back if necessary. Otherwise, we may have to free
2496 * excessively into the page allocator
2497 */
2498 if (migratetype >= MIGRATE_PCPTYPES) {
194159fb 2499 if (unlikely(is_migrate_isolate(migratetype))) {
dc4b0caf 2500 free_one_page(zone, page, pfn, 0, migratetype);
5f8dcc21
MG
2501 goto out;
2502 }
2503 migratetype = MIGRATE_MOVABLE;
2504 }
2505
99dcc3e5 2506 pcp = &this_cpu_ptr(zone->pageset)->pcp;
b745bc85 2507 if (!cold)
5f8dcc21 2508 list_add(&page->lru, &pcp->lists[migratetype]);
b745bc85
MG
2509 else
2510 list_add_tail(&page->lru, &pcp->lists[migratetype]);
1da177e4 2511 pcp->count++;
48db57f8 2512 if (pcp->count >= pcp->high) {
4db0c3c2 2513 unsigned long batch = READ_ONCE(pcp->batch);
998d39cb
CS
2514 free_pcppages_bulk(zone, batch, pcp);
2515 pcp->count -= batch;
48db57f8 2516 }
5f8dcc21
MG
2517
2518out:
d34b0733 2519 local_irq_restore(flags);
1da177e4
LT
2520}
2521
cc59850e
KK
2522/*
2523 * Free a list of 0-order pages
2524 */
b745bc85 2525void free_hot_cold_page_list(struct list_head *list, bool cold)
cc59850e
KK
2526{
2527 struct page *page, *next;
2528
2529 list_for_each_entry_safe(page, next, list, lru) {
b413d48a 2530 trace_mm_page_free_batched(page, cold);
cc59850e
KK
2531 free_hot_cold_page(page, cold);
2532 }
2533}
2534
8dfcc9ba
NP
2535/*
2536 * split_page takes a non-compound higher-order page, and splits it into
2537 * n (1<<order) sub-pages: page[0..n]
2538 * Each sub-page must be freed individually.
2539 *
2540 * Note: this is probably too low level an operation for use in drivers.
2541 * Please consult with lkml before using this in your driver.
2542 */
2543void split_page(struct page *page, unsigned int order)
2544{
2545 int i;
2546
309381fe
SL
2547 VM_BUG_ON_PAGE(PageCompound(page), page);
2548 VM_BUG_ON_PAGE(!page_count(page), page);
b1eeab67
VN
2549
2550#ifdef CONFIG_KMEMCHECK
2551 /*
2552 * Split shadow pages too, because free(page[0]) would
2553 * otherwise free the whole shadow.
2554 */
2555 if (kmemcheck_page_is_tracked(page))
2556 split_page(virt_to_page(page[0].shadow), order);
2557#endif
2558
a9627bc5 2559 for (i = 1; i < (1 << order); i++)
7835e98b 2560 set_page_refcounted(page + i);
a9627bc5 2561 split_page_owner(page, order);
8dfcc9ba 2562}
5853ff23 2563EXPORT_SYMBOL_GPL(split_page);
8dfcc9ba 2564
3c605096 2565int __isolate_free_page(struct page *page, unsigned int order)
748446bb 2566{
748446bb
MG
2567 unsigned long watermark;
2568 struct zone *zone;
2139cbe6 2569 int mt;
748446bb
MG
2570
2571 BUG_ON(!PageBuddy(page));
2572
2573 zone = page_zone(page);
2e30abd1 2574 mt = get_pageblock_migratetype(page);
748446bb 2575
194159fb 2576 if (!is_migrate_isolate(mt)) {
8348faf9
VB
2577 /*
2578 * Obey watermarks as if the page was being allocated. We can
2579 * emulate a high-order watermark check with a raised order-0
2580 * watermark, because we already know our high-order page
2581 * exists.
2582 */
2583 watermark = min_wmark_pages(zone) + (1UL << order);
984fdba6 2584 if (!zone_watermark_ok(zone, 0, watermark, 0, ALLOC_CMA))
2e30abd1
MS
2585 return 0;
2586
8fb74b9f 2587 __mod_zone_freepage_state(zone, -(1UL << order), mt);
2e30abd1 2588 }
748446bb
MG
2589
2590 /* Remove page from free list */
2591 list_del(&page->lru);
2592 zone->free_area[order].nr_free--;
2593 rmv_page_order(page);
2139cbe6 2594
400bc7fd 2595 /*
2596 * Set the pageblock if the isolated page is at least half of a
2597 * pageblock
2598 */
748446bb
MG
2599 if (order >= pageblock_order - 1) {
2600 struct page *endpage = page + (1 << order) - 1;
47118af0
MN
2601 for (; page < endpage; page += pageblock_nr_pages) {
2602 int mt = get_pageblock_migratetype(page);
88ed365e 2603 if (!is_migrate_isolate(mt) && !is_migrate_cma(mt)
a6ffdc07 2604 && !is_migrate_highatomic(mt))
47118af0
MN
2605 set_pageblock_migratetype(page,
2606 MIGRATE_MOVABLE);
2607 }
748446bb
MG
2608 }
2609
f3a14ced 2610
8fb74b9f 2611 return 1UL << order;
1fb3f8ca
MG
2612}
2613
060e7417
MG
2614/*
2615 * Update NUMA hit/miss statistics
2616 *
2617 * Must be called with interrupts disabled.
060e7417 2618 */
41b6167e 2619static inline void zone_statistics(struct zone *preferred_zone, struct zone *z)
060e7417
MG
2620{
2621#ifdef CONFIG_NUMA
060e7417
MG
2622 enum zone_stat_item local_stat = NUMA_LOCAL;
2623
2df26639 2624 if (z->node != numa_node_id())
060e7417 2625 local_stat = NUMA_OTHER;
060e7417 2626
2df26639 2627 if (z->node == preferred_zone->node)
060e7417 2628 __inc_zone_state(z, NUMA_HIT);
2df26639 2629 else {
060e7417
MG
2630 __inc_zone_state(z, NUMA_MISS);
2631 __inc_zone_state(preferred_zone, NUMA_FOREIGN);
2632 }
2df26639 2633 __inc_zone_state(z, local_stat);
060e7417
MG
2634#endif
2635}
2636
066b2393
MG
2637/* Remove page from the per-cpu list, caller must protect the list */
2638static struct page *__rmqueue_pcplist(struct zone *zone, int migratetype,
2639 bool cold, struct per_cpu_pages *pcp,
2640 struct list_head *list)
2641{
2642 struct page *page;
2643
2644 do {
2645 if (list_empty(list)) {
2646 pcp->count += rmqueue_bulk(zone, 0,
2647 pcp->batch, list,
2648 migratetype, cold);
2649 if (unlikely(list_empty(list)))
2650 return NULL;
2651 }
2652
2653 if (cold)
2654 page = list_last_entry(list, struct page, lru);
2655 else
2656 page = list_first_entry(list, struct page, lru);
2657
2658 list_del(&page->lru);
2659 pcp->count--;
2660 } while (check_new_pcp(page));
2661
2662 return page;
2663}
2664
2665/* Lock and remove page from the per-cpu list */
2666static struct page *rmqueue_pcplist(struct zone *preferred_zone,
2667 struct zone *zone, unsigned int order,
2668 gfp_t gfp_flags, int migratetype)
2669{
2670 struct per_cpu_pages *pcp;
2671 struct list_head *list;
2672 bool cold = ((gfp_flags & __GFP_COLD) != 0);
2673 struct page *page;
d34b0733 2674 unsigned long flags;
066b2393 2675
d34b0733 2676 local_irq_save(flags);
066b2393
MG
2677 pcp = &this_cpu_ptr(zone->pageset)->pcp;
2678 list = &pcp->lists[migratetype];
2679 page = __rmqueue_pcplist(zone, migratetype, cold, pcp, list);
2680 if (page) {
2681 __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order);
2682 zone_statistics(preferred_zone, zone);
2683 }
d34b0733 2684 local_irq_restore(flags);
066b2393
MG
2685 return page;
2686}
2687
1da177e4 2688/*
75379191 2689 * Allocate a page from the given zone. Use pcplists for order-0 allocations.
1da177e4 2690 */
0a15c3e9 2691static inline
066b2393 2692struct page *rmqueue(struct zone *preferred_zone,
7aeb09f9 2693 struct zone *zone, unsigned int order,
c603844b
MG
2694 gfp_t gfp_flags, unsigned int alloc_flags,
2695 int migratetype)
1da177e4
LT
2696{
2697 unsigned long flags;
689bcebf 2698 struct page *page;
1da177e4 2699
d34b0733 2700 if (likely(order == 0)) {
066b2393
MG
2701 page = rmqueue_pcplist(preferred_zone, zone, order,
2702 gfp_flags, migratetype);
2703 goto out;
2704 }
83b9355b 2705
066b2393
MG
2706 /*
2707 * We most definitely don't want callers attempting to
2708 * allocate greater than order-1 page units with __GFP_NOFAIL.
2709 */
2710 WARN_ON_ONCE((gfp_flags & __GFP_NOFAIL) && (order > 1));
2711 spin_lock_irqsave(&zone->lock, flags);
0aaa29a5 2712
066b2393
MG
2713 do {
2714 page = NULL;
2715 if (alloc_flags & ALLOC_HARDER) {
2716 page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC);
2717 if (page)
2718 trace_mm_page_alloc_zone_locked(page, order, migratetype);
2719 }
a74609fa 2720 if (!page)
066b2393
MG
2721 page = __rmqueue(zone, order, migratetype);
2722 } while (page && check_new_pages(page, order));
2723 spin_unlock(&zone->lock);
2724 if (!page)
2725 goto failed;
2726 __mod_zone_freepage_state(zone, -(1 << order),
2727 get_pcppage_migratetype(page));
1da177e4 2728
16709d1d 2729 __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order);
41b6167e 2730 zone_statistics(preferred_zone, zone);
a74609fa 2731 local_irq_restore(flags);
1da177e4 2732
066b2393
MG
2733out:
2734 VM_BUG_ON_PAGE(page && bad_range(zone, page), page);
1da177e4 2735 return page;
a74609fa
NP
2736
2737failed:
2738 local_irq_restore(flags);
a74609fa 2739 return NULL;
1da177e4
LT
2740}
2741
933e312e
AM
2742#ifdef CONFIG_FAIL_PAGE_ALLOC
2743
b2588c4b 2744static struct {
933e312e
AM
2745 struct fault_attr attr;
2746
621a5f7a 2747 bool ignore_gfp_highmem;
71baba4b 2748 bool ignore_gfp_reclaim;
54114994 2749 u32 min_order;
933e312e
AM
2750} fail_page_alloc = {
2751 .attr = FAULT_ATTR_INITIALIZER,
71baba4b 2752 .ignore_gfp_reclaim = true,
621a5f7a 2753 .ignore_gfp_highmem = true,
54114994 2754 .min_order = 1,
933e312e
AM
2755};
2756
2757static int __init setup_fail_page_alloc(char *str)
2758{
2759 return setup_fault_attr(&fail_page_alloc.attr, str);
2760}
2761__setup("fail_page_alloc=", setup_fail_page_alloc);
2762
deaf386e 2763static bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
933e312e 2764{
54114994 2765 if (order < fail_page_alloc.min_order)
deaf386e 2766 return false;
933e312e 2767 if (gfp_mask & __GFP_NOFAIL)
deaf386e 2768 return false;
933e312e 2769 if (fail_page_alloc.ignore_gfp_highmem && (gfp_mask & __GFP_HIGHMEM))
deaf386e 2770 return false;
71baba4b
MG
2771 if (fail_page_alloc.ignore_gfp_reclaim &&
2772 (gfp_mask & __GFP_DIRECT_RECLAIM))
deaf386e 2773 return false;
933e312e
AM
2774
2775 return should_fail(&fail_page_alloc.attr, 1 << order);
2776}
2777
2778#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
2779
2780static int __init fail_page_alloc_debugfs(void)
2781{
f4ae40a6 2782 umode_t mode = S_IFREG | S_IRUSR | S_IWUSR;
933e312e 2783 struct dentry *dir;
933e312e 2784
dd48c085
AM
2785 dir = fault_create_debugfs_attr("fail_page_alloc", NULL,
2786 &fail_page_alloc.attr);
2787 if (IS_ERR(dir))
2788 return PTR_ERR(dir);
933e312e 2789
b2588c4b 2790 if (!debugfs_create_bool("ignore-gfp-wait", mode, dir,
71baba4b 2791 &fail_page_alloc.ignore_gfp_reclaim))
b2588c4b
AM
2792 goto fail;
2793 if (!debugfs_create_bool("ignore-gfp-highmem", mode, dir,
2794 &fail_page_alloc.ignore_gfp_highmem))
2795 goto fail;
2796 if (!debugfs_create_u32("min-order", mode, dir,
2797 &fail_page_alloc.min_order))
2798 goto fail;
2799
2800 return 0;
2801fail:
dd48c085 2802 debugfs_remove_recursive(dir);
933e312e 2803
b2588c4b 2804 return -ENOMEM;
933e312e
AM
2805}
2806
2807late_initcall(fail_page_alloc_debugfs);
2808
2809#endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
2810
2811#else /* CONFIG_FAIL_PAGE_ALLOC */
2812
deaf386e 2813static inline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
933e312e 2814{
deaf386e 2815 return false;
933e312e
AM
2816}
2817
2818#endif /* CONFIG_FAIL_PAGE_ALLOC */
2819
1da177e4 2820/*
97a16fc8
MG
2821 * Return true if free base pages are above 'mark'. For high-order checks it
2822 * will return true of the order-0 watermark is reached and there is at least
2823 * one free page of a suitable size. Checking now avoids taking the zone lock
2824 * to check in the allocation paths if no pages are free.
1da177e4 2825 */
86a294a8
MH
2826bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
2827 int classzone_idx, unsigned int alloc_flags,
2828 long free_pages)
1da177e4 2829{
d23ad423 2830 long min = mark;
1da177e4 2831 int o;
c603844b 2832 const bool alloc_harder = (alloc_flags & ALLOC_HARDER);
1da177e4 2833
0aaa29a5 2834 /* free_pages may go negative - that's OK */
df0a6daa 2835 free_pages -= (1 << order) - 1;
0aaa29a5 2836
7fb1d9fc 2837 if (alloc_flags & ALLOC_HIGH)
1da177e4 2838 min -= min / 2;
0aaa29a5
MG
2839
2840 /*
2841 * If the caller does not have rights to ALLOC_HARDER then subtract
2842 * the high-atomic reserves. This will over-estimate the size of the
2843 * atomic reserve but it avoids a search.
2844 */
97a16fc8 2845 if (likely(!alloc_harder))
0aaa29a5
MG
2846 free_pages -= z->nr_reserved_highatomic;
2847 else
1da177e4 2848 min -= min / 4;
e2b19197 2849
d95ea5d1
BZ
2850#ifdef CONFIG_CMA
2851 /* If allocation can't use CMA areas don't use free CMA pages */
2852 if (!(alloc_flags & ALLOC_CMA))
97a16fc8 2853 free_pages -= zone_page_state(z, NR_FREE_CMA_PAGES);
d95ea5d1 2854#endif
026b0814 2855
97a16fc8
MG
2856 /*
2857 * Check watermarks for an order-0 allocation request. If these
2858 * are not met, then a high-order request also cannot go ahead
2859 * even if a suitable page happened to be free.
2860 */
2861 if (free_pages <= min + z->lowmem_reserve[classzone_idx])
88f5acf8 2862 return false;
1da177e4 2863
97a16fc8
MG
2864 /* If this is an order-0 request then the watermark is fine */
2865 if (!order)
2866 return true;
2867
2868 /* For a high-order request, check at least one suitable page is free */
2869 for (o = order; o < MAX_ORDER; o++) {
2870 struct free_area *area = &z->free_area[o];
2871 int mt;
2872
2873 if (!area->nr_free)
2874 continue;
2875
2876 if (alloc_harder)
2877 return true;
1da177e4 2878
97a16fc8
MG
2879 for (mt = 0; mt < MIGRATE_PCPTYPES; mt++) {
2880 if (!list_empty(&area->free_list[mt]))
2881 return true;
2882 }
2883
2884#ifdef CONFIG_CMA
2885 if ((alloc_flags & ALLOC_CMA) &&
2886 !list_empty(&area->free_list[MIGRATE_CMA])) {
2887 return true;
2888 }
2889#endif
1da177e4 2890 }
97a16fc8 2891 return false;
88f5acf8
MG
2892}
2893
7aeb09f9 2894bool zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
c603844b 2895 int classzone_idx, unsigned int alloc_flags)
88f5acf8
MG
2896{
2897 return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags,
2898 zone_page_state(z, NR_FREE_PAGES));
2899}
2900
48ee5f36
MG
2901static inline bool zone_watermark_fast(struct zone *z, unsigned int order,
2902 unsigned long mark, int classzone_idx, unsigned int alloc_flags)
2903{
2904 long free_pages = zone_page_state(z, NR_FREE_PAGES);
2905 long cma_pages = 0;
2906
2907#ifdef CONFIG_CMA
2908 /* If allocation can't use CMA areas don't use free CMA pages */
2909 if (!(alloc_flags & ALLOC_CMA))
2910 cma_pages = zone_page_state(z, NR_FREE_CMA_PAGES);
2911#endif
2912
2913 /*
2914 * Fast check for order-0 only. If this fails then the reserves
2915 * need to be calculated. There is a corner case where the check
2916 * passes but only the high-order atomic reserve are free. If
2917 * the caller is !atomic then it'll uselessly search the free
2918 * list. That corner case is then slower but it is harmless.
2919 */
2920 if (!order && (free_pages - cma_pages) > mark + z->lowmem_reserve[classzone_idx])
2921 return true;
2922
2923 return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags,
2924 free_pages);
2925}
2926
7aeb09f9 2927bool zone_watermark_ok_safe(struct zone *z, unsigned int order,
e2b19197 2928 unsigned long mark, int classzone_idx)
88f5acf8
MG
2929{
2930 long free_pages = zone_page_state(z, NR_FREE_PAGES);
2931
2932 if (z->percpu_drift_mark && free_pages < z->percpu_drift_mark)
2933 free_pages = zone_page_state_snapshot(z, NR_FREE_PAGES);
2934
e2b19197 2935 return __zone_watermark_ok(z, order, mark, classzone_idx, 0,
88f5acf8 2936 free_pages);
1da177e4
LT
2937}
2938
9276b1bc 2939#ifdef CONFIG_NUMA
957f822a
DR
2940static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
2941{
e02dc017 2942 return node_distance(zone_to_nid(local_zone), zone_to_nid(zone)) <=
5f7a75ac 2943 RECLAIM_DISTANCE;
957f822a 2944}
9276b1bc 2945#else /* CONFIG_NUMA */
957f822a
DR
2946static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
2947{
2948 return true;
2949}
9276b1bc
PJ
2950#endif /* CONFIG_NUMA */
2951
7fb1d9fc 2952/*
0798e519 2953 * get_page_from_freelist goes through the zonelist trying to allocate
7fb1d9fc
RS
2954 * a page.
2955 */
2956static struct page *
a9263751
VB
2957get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags,
2958 const struct alloc_context *ac)
753ee728 2959{
c33d6c06 2960 struct zoneref *z = ac->preferred_zoneref;
5117f45d 2961 struct zone *zone;
3b8c0be4
MG
2962 struct pglist_data *last_pgdat_dirty_limit = NULL;
2963
7fb1d9fc 2964 /*
9276b1bc 2965 * Scan zonelist, looking for a zone with enough free.
344736f2 2966 * See also __cpuset_node_allowed() comment in kernel/cpuset.c.
7fb1d9fc 2967 */
c33d6c06 2968 for_next_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->high_zoneidx,
a9263751 2969 ac->nodemask) {
be06af00 2970 struct page *page;
e085dbc5
JW
2971 unsigned long mark;
2972
664eedde
MG
2973 if (cpusets_enabled() &&
2974 (alloc_flags & ALLOC_CPUSET) &&
002f2906 2975 !__cpuset_zone_allowed(zone, gfp_mask))
cd38b115 2976 continue;
a756cf59
JW
2977 /*
2978 * When allocating a page cache page for writing, we
281e3726
MG
2979 * want to get it from a node that is within its dirty
2980 * limit, such that no single node holds more than its
a756cf59 2981 * proportional share of globally allowed dirty pages.
281e3726 2982 * The dirty limits take into account the node's
a756cf59
JW
2983 * lowmem reserves and high watermark so that kswapd
2984 * should be able to balance it without having to
2985 * write pages from its LRU list.
2986 *
a756cf59 2987 * XXX: For now, allow allocations to potentially
281e3726 2988 * exceed the per-node dirty limit in the slowpath
c9ab0c4f 2989 * (spread_dirty_pages unset) before going into reclaim,
a756cf59 2990 * which is important when on a NUMA setup the allowed
281e3726 2991 * nodes are together not big enough to reach the
a756cf59 2992 * global limit. The proper fix for these situations
281e3726 2993 * will require awareness of nodes in the
a756cf59
JW
2994 * dirty-throttling and the flusher threads.
2995 */
3b8c0be4
MG
2996 if (ac->spread_dirty_pages) {
2997 if (last_pgdat_dirty_limit == zone->zone_pgdat)
2998 continue;
2999
3000 if (!node_dirty_ok(zone->zone_pgdat)) {
3001 last_pgdat_dirty_limit = zone->zone_pgdat;
3002 continue;
3003 }
3004 }
7fb1d9fc 3005
e085dbc5 3006 mark = zone->watermark[alloc_flags & ALLOC_WMARK_MASK];
48ee5f36 3007 if (!zone_watermark_fast(zone, order, mark,
93ea9964 3008 ac_classzone_idx(ac), alloc_flags)) {
fa5e084e
MG
3009 int ret;
3010
5dab2911
MG
3011 /* Checked here to keep the fast path fast */
3012 BUILD_BUG_ON(ALLOC_NO_WATERMARKS < NR_WMARK);
3013 if (alloc_flags & ALLOC_NO_WATERMARKS)
3014 goto try_this_zone;
3015
a5f5f91d 3016 if (node_reclaim_mode == 0 ||
c33d6c06 3017 !zone_allows_reclaim(ac->preferred_zoneref->zone, zone))
cd38b115
MG
3018 continue;
3019
a5f5f91d 3020 ret = node_reclaim(zone->zone_pgdat, gfp_mask, order);
fa5e084e 3021 switch (ret) {
a5f5f91d 3022 case NODE_RECLAIM_NOSCAN:
fa5e084e 3023 /* did not scan */
cd38b115 3024 continue;
a5f5f91d 3025 case NODE_RECLAIM_FULL:
fa5e084e 3026 /* scanned but unreclaimable */
cd38b115 3027 continue;
fa5e084e
MG
3028 default:
3029 /* did we reclaim enough */
fed2719e 3030 if (zone_watermark_ok(zone, order, mark,
93ea9964 3031 ac_classzone_idx(ac), alloc_flags))
fed2719e
MG
3032 goto try_this_zone;
3033
fed2719e 3034 continue;
0798e519 3035 }
7fb1d9fc
RS
3036 }
3037
fa5e084e 3038try_this_zone:
066b2393 3039 page = rmqueue(ac->preferred_zoneref->zone, zone, order,
0aaa29a5 3040 gfp_mask, alloc_flags, ac->migratetype);
75379191 3041 if (page) {
479f854a 3042 prep_new_page(page, order, gfp_mask, alloc_flags);
0aaa29a5
MG
3043
3044 /*
3045 * If this is a high-order atomic allocation then check
3046 * if the pageblock should be reserved for the future
3047 */
3048 if (unlikely(order && (alloc_flags & ALLOC_HARDER)))
3049 reserve_highatomic_pageblock(page, zone, order);
3050
75379191
VB
3051 return page;
3052 }
54a6eb5c 3053 }
9276b1bc 3054
4ffeaf35 3055 return NULL;
753ee728
MH
3056}
3057
29423e77
DR
3058/*
3059 * Large machines with many possible nodes should not always dump per-node
3060 * meminfo in irq context.
3061 */
3062static inline bool should_suppress_show_mem(void)
3063{
3064 bool ret = false;
3065
3066#if NODES_SHIFT > 8
3067 ret = in_interrupt();
3068#endif
3069 return ret;
3070}
3071
9af744d7 3072static void warn_alloc_show_mem(gfp_t gfp_mask, nodemask_t *nodemask)
a238ab5b 3073{
a238ab5b 3074 unsigned int filter = SHOW_MEM_FILTER_NODES;
aa187507 3075 static DEFINE_RATELIMIT_STATE(show_mem_rs, HZ, 1);
a238ab5b 3076
aa187507 3077 if (should_suppress_show_mem() || !__ratelimit(&show_mem_rs))
a238ab5b
DH
3078 return;
3079
3080 /*
3081 * This documents exceptions given to allocations in certain
3082 * contexts that are allowed to allocate outside current's set
3083 * of allowed nodes.
3084 */
3085 if (!(gfp_mask & __GFP_NOMEMALLOC))
3086 if (test_thread_flag(TIF_MEMDIE) ||
3087 (current->flags & (PF_MEMALLOC | PF_EXITING)))
3088 filter &= ~SHOW_MEM_FILTER_NODES;
d0164adc 3089 if (in_interrupt() || !(gfp_mask & __GFP_DIRECT_RECLAIM))
a238ab5b
DH
3090 filter &= ~SHOW_MEM_FILTER_NODES;
3091
9af744d7 3092 show_mem(filter, nodemask);
aa187507
MH
3093}
3094
a8e99259 3095void warn_alloc(gfp_t gfp_mask, nodemask_t *nodemask, const char *fmt, ...)
aa187507
MH
3096{
3097 struct va_format vaf;
3098 va_list args;
3099 static DEFINE_RATELIMIT_STATE(nopage_rs, DEFAULT_RATELIMIT_INTERVAL,
3100 DEFAULT_RATELIMIT_BURST);
3101
3102 if ((gfp_mask & __GFP_NOWARN) || !__ratelimit(&nopage_rs) ||
3103 debug_guardpage_minorder() > 0)
3104 return;
3105
7877cdcc 3106 pr_warn("%s: ", current->comm);
3ee9a4f0 3107
7877cdcc
MH
3108 va_start(args, fmt);
3109 vaf.fmt = fmt;
3110 vaf.va = &args;
3111 pr_cont("%pV", &vaf);
3112 va_end(args);
3ee9a4f0 3113
685dbf6f
DR
3114 pr_cont(", mode:%#x(%pGg), nodemask=", gfp_mask, &gfp_mask);
3115 if (nodemask)
3116 pr_cont("%*pbl\n", nodemask_pr_args(nodemask));
3117 else
3118 pr_cont("(null)\n");
3119
a8e99259 3120 cpuset_print_current_mems_allowed();
3ee9a4f0 3121
a238ab5b 3122 dump_stack();
685dbf6f 3123 warn_alloc_show_mem(gfp_mask, nodemask);
a238ab5b
DH
3124}
3125
6c18ba7a
MH
3126static inline struct page *
3127__alloc_pages_cpuset_fallback(gfp_t gfp_mask, unsigned int order,
3128 unsigned int alloc_flags,
3129 const struct alloc_context *ac)
3130{
3131 struct page *page;
3132
3133 page = get_page_from_freelist(gfp_mask, order,
3134 alloc_flags|ALLOC_CPUSET, ac);
3135 /*
3136 * fallback to ignore cpuset restriction if our nodes
3137 * are depleted
3138 */
3139 if (!page)
3140 page = get_page_from_freelist(gfp_mask, order,
3141 alloc_flags, ac);
3142
3143 return page;
3144}
3145
11e33f6a
MG
3146static inline struct page *
3147__alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
a9263751 3148 const struct alloc_context *ac, unsigned long *did_some_progress)
11e33f6a 3149{
6e0fc46d
DR
3150 struct oom_control oc = {
3151 .zonelist = ac->zonelist,
3152 .nodemask = ac->nodemask,
2a966b77 3153 .memcg = NULL,
6e0fc46d
DR
3154 .gfp_mask = gfp_mask,
3155 .order = order,
6e0fc46d 3156 };
11e33f6a
MG
3157 struct page *page;
3158
9879de73
JW
3159 *did_some_progress = 0;
3160
9879de73 3161 /*
dc56401f
JW
3162 * Acquire the oom lock. If that fails, somebody else is
3163 * making progress for us.
9879de73 3164 */
dc56401f 3165 if (!mutex_trylock(&oom_lock)) {
9879de73 3166 *did_some_progress = 1;
11e33f6a 3167 schedule_timeout_uninterruptible(1);
1da177e4
LT
3168 return NULL;
3169 }
6b1de916 3170
11e33f6a
MG
3171 /*
3172 * Go through the zonelist yet one more time, keep very high watermark
3173 * here, this is only to catch a parallel oom killing, we must fail if
3174 * we're still under heavy pressure.
3175 */
a9263751
VB
3176 page = get_page_from_freelist(gfp_mask | __GFP_HARDWALL, order,
3177 ALLOC_WMARK_HIGH|ALLOC_CPUSET, ac);
7fb1d9fc 3178 if (page)
11e33f6a
MG
3179 goto out;
3180
06ad276a
MH
3181 /* Coredumps can quickly deplete all memory reserves */
3182 if (current->flags & PF_DUMPCORE)
3183 goto out;
3184 /* The OOM killer will not help higher order allocs */
3185 if (order > PAGE_ALLOC_COSTLY_ORDER)
3186 goto out;
3187 /* The OOM killer does not needlessly kill tasks for lowmem */
3188 if (ac->high_zoneidx < ZONE_NORMAL)
3189 goto out;
3190 if (pm_suspended_storage())
3191 goto out;
3192 /*
3193 * XXX: GFP_NOFS allocations should rather fail than rely on
3194 * other request to make a forward progress.
3195 * We are in an unfortunate situation where out_of_memory cannot
3196 * do much for this context but let's try it to at least get
3197 * access to memory reserved if the current task is killed (see
3198 * out_of_memory). Once filesystems are ready to handle allocation
3199 * failures more gracefully we should just bail out here.
3200 */
3201
3202 /* The OOM killer may not free memory on a specific node */
3203 if (gfp_mask & __GFP_THISNODE)
3204 goto out;
3da88fb3 3205
11e33f6a 3206 /* Exhausted what can be done so it's blamo time */
5020e285 3207 if (out_of_memory(&oc) || WARN_ON_ONCE(gfp_mask & __GFP_NOFAIL)) {
c32b3cbe 3208 *did_some_progress = 1;
5020e285 3209
6c18ba7a
MH
3210 /*
3211 * Help non-failing allocations by giving them access to memory
3212 * reserves
3213 */
3214 if (gfp_mask & __GFP_NOFAIL)
3215 page = __alloc_pages_cpuset_fallback(gfp_mask, order,
5020e285 3216 ALLOC_NO_WATERMARKS, ac);
5020e285 3217 }
11e33f6a 3218out:
dc56401f 3219 mutex_unlock(&oom_lock);
11e33f6a
MG
3220 return page;
3221}
3222
33c2d214
MH
3223/*
3224 * Maximum number of compaction retries wit a progress before OOM
3225 * killer is consider as the only way to move forward.
3226 */
3227#define MAX_COMPACT_RETRIES 16
3228
56de7263
MG
3229#ifdef CONFIG_COMPACTION
3230/* Try memory compaction for high-order allocations before reclaim */
3231static struct page *
3232__alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
c603844b 3233 unsigned int alloc_flags, const struct alloc_context *ac,
a5508cd8 3234 enum compact_priority prio, enum compact_result *compact_result)
56de7263 3235{
98dd3b48 3236 struct page *page;
53853e2d
VB
3237
3238 if (!order)
66199712 3239 return NULL;
66199712 3240
c06b1fca 3241 current->flags |= PF_MEMALLOC;
c5d01d0d 3242 *compact_result = try_to_compact_pages(gfp_mask, order, alloc_flags, ac,
c3486f53 3243 prio);
c06b1fca 3244 current->flags &= ~PF_MEMALLOC;
56de7263 3245
c5d01d0d 3246 if (*compact_result <= COMPACT_INACTIVE)
98dd3b48 3247 return NULL;
53853e2d 3248
98dd3b48
VB
3249 /*
3250 * At least in one zone compaction wasn't deferred or skipped, so let's
3251 * count a compaction stall
3252 */
3253 count_vm_event(COMPACTSTALL);
8fb74b9f 3254
31a6c190 3255 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
53853e2d 3256
98dd3b48
VB
3257 if (page) {
3258 struct zone *zone = page_zone(page);
53853e2d 3259
98dd3b48
VB
3260 zone->compact_blockskip_flush = false;
3261 compaction_defer_reset(zone, order, true);
3262 count_vm_event(COMPACTSUCCESS);
3263 return page;
3264 }
56de7263 3265
98dd3b48
VB
3266 /*
3267 * It's bad if compaction run occurs and fails. The most likely reason
3268 * is that pages exist, but not enough to satisfy watermarks.
3269 */
3270 count_vm_event(COMPACTFAIL);
66199712 3271
98dd3b48 3272 cond_resched();
56de7263
MG
3273
3274 return NULL;
3275}
33c2d214 3276
3250845d
VB
3277static inline bool
3278should_compact_retry(struct alloc_context *ac, int order, int alloc_flags,
3279 enum compact_result compact_result,
3280 enum compact_priority *compact_priority,
d9436498 3281 int *compaction_retries)
3250845d
VB
3282{
3283 int max_retries = MAX_COMPACT_RETRIES;
c2033b00 3284 int min_priority;
65190cff
MH
3285 bool ret = false;
3286 int retries = *compaction_retries;
3287 enum compact_priority priority = *compact_priority;
3250845d
VB
3288
3289 if (!order)
3290 return false;
3291
d9436498
VB
3292 if (compaction_made_progress(compact_result))
3293 (*compaction_retries)++;
3294
3250845d
VB
3295 /*
3296 * compaction considers all the zone as desperately out of memory
3297 * so it doesn't really make much sense to retry except when the
3298 * failure could be caused by insufficient priority
3299 */
d9436498
VB
3300 if (compaction_failed(compact_result))
3301 goto check_priority;
3250845d
VB
3302
3303 /*
3304 * make sure the compaction wasn't deferred or didn't bail out early
3305 * due to locks contention before we declare that we should give up.
3306 * But do not retry if the given zonelist is not suitable for
3307 * compaction.
3308 */
65190cff
MH
3309 if (compaction_withdrawn(compact_result)) {
3310 ret = compaction_zonelist_suitable(ac, order, alloc_flags);
3311 goto out;
3312 }
3250845d
VB
3313
3314 /*
3315 * !costly requests are much more important than __GFP_REPEAT
3316 * costly ones because they are de facto nofail and invoke OOM
3317 * killer to move on while costly can fail and users are ready
3318 * to cope with that. 1/4 retries is rather arbitrary but we
3319 * would need much more detailed feedback from compaction to
3320 * make a better decision.
3321 */
3322 if (order > PAGE_ALLOC_COSTLY_ORDER)
3323 max_retries /= 4;
65190cff
MH
3324 if (*compaction_retries <= max_retries) {
3325 ret = true;
3326 goto out;
3327 }
3250845d 3328
d9436498
VB
3329 /*
3330 * Make sure there are attempts at the highest priority if we exhausted
3331 * all retries or failed at the lower priorities.
3332 */
3333check_priority:
c2033b00
VB
3334 min_priority = (order > PAGE_ALLOC_COSTLY_ORDER) ?
3335 MIN_COMPACT_COSTLY_PRIORITY : MIN_COMPACT_PRIORITY;
65190cff 3336
c2033b00 3337 if (*compact_priority > min_priority) {
d9436498
VB
3338 (*compact_priority)--;
3339 *compaction_retries = 0;
65190cff 3340 ret = true;
d9436498 3341 }
65190cff
MH
3342out:
3343 trace_compact_retry(order, priority, compact_result, retries, max_retries, ret);
3344 return ret;
3250845d 3345}
56de7263
MG
3346#else
3347static inline struct page *
3348__alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
c603844b 3349 unsigned int alloc_flags, const struct alloc_context *ac,
a5508cd8 3350 enum compact_priority prio, enum compact_result *compact_result)
56de7263 3351{
33c2d214 3352 *compact_result = COMPACT_SKIPPED;
56de7263
MG
3353 return NULL;
3354}
33c2d214
MH
3355
3356static inline bool
86a294a8
MH
3357should_compact_retry(struct alloc_context *ac, unsigned int order, int alloc_flags,
3358 enum compact_result compact_result,
a5508cd8 3359 enum compact_priority *compact_priority,
d9436498 3360 int *compaction_retries)
33c2d214 3361{
31e49bfd
MH
3362 struct zone *zone;
3363 struct zoneref *z;
3364
3365 if (!order || order > PAGE_ALLOC_COSTLY_ORDER)
3366 return false;
3367
3368 /*
3369 * There are setups with compaction disabled which would prefer to loop
3370 * inside the allocator rather than hit the oom killer prematurely.
3371 * Let's give them a good hope and keep retrying while the order-0
3372 * watermarks are OK.
3373 */
3374 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->high_zoneidx,
3375 ac->nodemask) {
3376 if (zone_watermark_ok(zone, 0, min_wmark_pages(zone),
3377 ac_classzone_idx(ac), alloc_flags))
3378 return true;
3379 }
33c2d214
MH
3380 return false;
3381}
3250845d 3382#endif /* CONFIG_COMPACTION */
56de7263 3383
bba90710
MS
3384/* Perform direct synchronous page reclaim */
3385static int
a9263751
VB
3386__perform_reclaim(gfp_t gfp_mask, unsigned int order,
3387 const struct alloc_context *ac)
11e33f6a 3388{
11e33f6a 3389 struct reclaim_state reclaim_state;
bba90710 3390 int progress;
11e33f6a
MG
3391
3392 cond_resched();
3393
3394 /* We now go into synchronous reclaim */
3395 cpuset_memory_pressure_bump();
c06b1fca 3396 current->flags |= PF_MEMALLOC;
11e33f6a
MG
3397 lockdep_set_current_reclaim_state(gfp_mask);
3398 reclaim_state.reclaimed_slab = 0;
c06b1fca 3399 current->reclaim_state = &reclaim_state;
11e33f6a 3400
a9263751
VB
3401 progress = try_to_free_pages(ac->zonelist, order, gfp_mask,
3402 ac->nodemask);
11e33f6a 3403
c06b1fca 3404 current->reclaim_state = NULL;
11e33f6a 3405 lockdep_clear_current_reclaim_state();
c06b1fca 3406 current->flags &= ~PF_MEMALLOC;
11e33f6a
MG
3407
3408 cond_resched();
3409
bba90710
MS
3410 return progress;
3411}
3412
3413/* The really slow allocator path where we enter direct reclaim */
3414static inline struct page *
3415__alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
c603844b 3416 unsigned int alloc_flags, const struct alloc_context *ac,
a9263751 3417 unsigned long *did_some_progress)
bba90710
MS
3418{
3419 struct page *page = NULL;
3420 bool drained = false;
3421
a9263751 3422 *did_some_progress = __perform_reclaim(gfp_mask, order, ac);
9ee493ce
MG
3423 if (unlikely(!(*did_some_progress)))
3424 return NULL;
11e33f6a 3425
9ee493ce 3426retry:
31a6c190 3427 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
9ee493ce
MG
3428
3429 /*
3430 * If an allocation failed after direct reclaim, it could be because
0aaa29a5
MG
3431 * pages are pinned on the per-cpu lists or in high alloc reserves.
3432 * Shrink them them and try again
9ee493ce
MG
3433 */
3434 if (!page && !drained) {
29fac03b 3435 unreserve_highatomic_pageblock(ac, false);
93481ff0 3436 drain_all_pages(NULL);
9ee493ce
MG
3437 drained = true;
3438 goto retry;
3439 }
3440
11e33f6a
MG
3441 return page;
3442}
3443
a9263751 3444static void wake_all_kswapds(unsigned int order, const struct alloc_context *ac)
3a025760
JW
3445{
3446 struct zoneref *z;
3447 struct zone *zone;
e1a55637 3448 pg_data_t *last_pgdat = NULL;
3a025760 3449
a9263751 3450 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist,
e1a55637
MG
3451 ac->high_zoneidx, ac->nodemask) {
3452 if (last_pgdat != zone->zone_pgdat)
52e9f87a 3453 wakeup_kswapd(zone, order, ac->high_zoneidx);
e1a55637
MG
3454 last_pgdat = zone->zone_pgdat;
3455 }
3a025760
JW
3456}
3457
c603844b 3458static inline unsigned int
341ce06f
PZ
3459gfp_to_alloc_flags(gfp_t gfp_mask)
3460{
c603844b 3461 unsigned int alloc_flags = ALLOC_WMARK_MIN | ALLOC_CPUSET;
1da177e4 3462
a56f57ff 3463 /* __GFP_HIGH is assumed to be the same as ALLOC_HIGH to save a branch. */
e6223a3b 3464 BUILD_BUG_ON(__GFP_HIGH != (__force gfp_t) ALLOC_HIGH);
933e312e 3465
341ce06f
PZ
3466 /*
3467 * The caller may dip into page reserves a bit more if the caller
3468 * cannot run direct reclaim, or if the caller has realtime scheduling
3469 * policy or is asking for __GFP_HIGH memory. GFP_ATOMIC requests will
d0164adc 3470 * set both ALLOC_HARDER (__GFP_ATOMIC) and ALLOC_HIGH (__GFP_HIGH).
341ce06f 3471 */
e6223a3b 3472 alloc_flags |= (__force int) (gfp_mask & __GFP_HIGH);
1da177e4 3473
d0164adc 3474 if (gfp_mask & __GFP_ATOMIC) {
5c3240d9 3475 /*
b104a35d
DR
3476 * Not worth trying to allocate harder for __GFP_NOMEMALLOC even
3477 * if it can't schedule.
5c3240d9 3478 */
b104a35d 3479 if (!(gfp_mask & __GFP_NOMEMALLOC))
5c3240d9 3480 alloc_flags |= ALLOC_HARDER;
523b9458 3481 /*
b104a35d 3482 * Ignore cpuset mems for GFP_ATOMIC rather than fail, see the
344736f2 3483 * comment for __cpuset_node_allowed().
523b9458 3484 */
341ce06f 3485 alloc_flags &= ~ALLOC_CPUSET;
c06b1fca 3486 } else if (unlikely(rt_task(current)) && !in_interrupt())
341ce06f
PZ
3487 alloc_flags |= ALLOC_HARDER;
3488
d95ea5d1 3489#ifdef CONFIG_CMA
43e7a34d 3490 if (gfpflags_to_migratetype(gfp_mask) == MIGRATE_MOVABLE)
d95ea5d1
BZ
3491 alloc_flags |= ALLOC_CMA;
3492#endif
341ce06f
PZ
3493 return alloc_flags;
3494}
3495
072bb0aa
MG
3496bool gfp_pfmemalloc_allowed(gfp_t gfp_mask)
3497{
31a6c190
VB
3498 if (unlikely(gfp_mask & __GFP_NOMEMALLOC))
3499 return false;
3500
3501 if (gfp_mask & __GFP_MEMALLOC)
3502 return true;
3503 if (in_serving_softirq() && (current->flags & PF_MEMALLOC))
3504 return true;
3505 if (!in_interrupt() &&
3506 ((current->flags & PF_MEMALLOC) ||
3507 unlikely(test_thread_flag(TIF_MEMDIE))))
3508 return true;
3509
3510 return false;
072bb0aa
MG
3511}
3512
0a0337e0
MH
3513/*
3514 * Checks whether it makes sense to retry the reclaim to make a forward progress
3515 * for the given allocation request.
491d79ae
JW
3516 *
3517 * We give up when we either have tried MAX_RECLAIM_RETRIES in a row
3518 * without success, or when we couldn't even meet the watermark if we
3519 * reclaimed all remaining pages on the LRU lists.
0a0337e0
MH
3520 *
3521 * Returns true if a retry is viable or false to enter the oom path.
3522 */
3523static inline bool
3524should_reclaim_retry(gfp_t gfp_mask, unsigned order,
3525 struct alloc_context *ac, int alloc_flags,
423b452e 3526 bool did_some_progress, int *no_progress_loops)
0a0337e0
MH
3527{
3528 struct zone *zone;
3529 struct zoneref *z;
3530
423b452e
VB
3531 /*
3532 * Costly allocations might have made a progress but this doesn't mean
3533 * their order will become available due to high fragmentation so
3534 * always increment the no progress counter for them
3535 */
3536 if (did_some_progress && order <= PAGE_ALLOC_COSTLY_ORDER)
3537 *no_progress_loops = 0;
3538 else
3539 (*no_progress_loops)++;
3540
0a0337e0
MH
3541 /*
3542 * Make sure we converge to OOM if we cannot make any progress
3543 * several times in the row.
3544 */
04c8716f
MK
3545 if (*no_progress_loops > MAX_RECLAIM_RETRIES) {
3546 /* Before OOM, exhaust highatomic_reserve */
29fac03b 3547 return unreserve_highatomic_pageblock(ac, true);
04c8716f 3548 }
0a0337e0 3549
bca67592
MG
3550 /*
3551 * Keep reclaiming pages while there is a chance this will lead
3552 * somewhere. If none of the target zones can satisfy our allocation
3553 * request even if all reclaimable pages are considered then we are
3554 * screwed and have to go OOM.
0a0337e0
MH
3555 */
3556 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->high_zoneidx,
3557 ac->nodemask) {
3558 unsigned long available;
ede37713 3559 unsigned long reclaimable;
d379f01d
MH
3560 unsigned long min_wmark = min_wmark_pages(zone);
3561 bool wmark;
0a0337e0 3562
5a1c84b4 3563 available = reclaimable = zone_reclaimable_pages(zone);
5a1c84b4 3564 available += zone_page_state_snapshot(zone, NR_FREE_PAGES);
0a0337e0
MH
3565
3566 /*
491d79ae
JW
3567 * Would the allocation succeed if we reclaimed all
3568 * reclaimable pages?
0a0337e0 3569 */
d379f01d
MH
3570 wmark = __zone_watermark_ok(zone, order, min_wmark,
3571 ac_classzone_idx(ac), alloc_flags, available);
3572 trace_reclaim_retry_zone(z, order, reclaimable,
3573 available, min_wmark, *no_progress_loops, wmark);
3574 if (wmark) {
ede37713
MH
3575 /*
3576 * If we didn't make any progress and have a lot of
3577 * dirty + writeback pages then we should wait for
3578 * an IO to complete to slow down the reclaim and
3579 * prevent from pre mature OOM
3580 */
3581 if (!did_some_progress) {
11fb9989 3582 unsigned long write_pending;
ede37713 3583
5a1c84b4
MG
3584 write_pending = zone_page_state_snapshot(zone,
3585 NR_ZONE_WRITE_PENDING);
ede37713 3586
11fb9989 3587 if (2 * write_pending > reclaimable) {
ede37713
MH
3588 congestion_wait(BLK_RW_ASYNC, HZ/10);
3589 return true;
3590 }
3591 }
5a1c84b4 3592
ede37713
MH
3593 /*
3594 * Memory allocation/reclaim might be called from a WQ
3595 * context and the current implementation of the WQ
3596 * concurrency control doesn't recognize that
3597 * a particular WQ is congested if the worker thread is
3598 * looping without ever sleeping. Therefore we have to
3599 * do a short sleep here rather than calling
3600 * cond_resched().
3601 */
3602 if (current->flags & PF_WQ_WORKER)
3603 schedule_timeout_uninterruptible(1);
3604 else
3605 cond_resched();
3606
0a0337e0
MH
3607 return true;
3608 }
3609 }
3610
3611 return false;
3612}
3613
11e33f6a
MG
3614static inline struct page *
3615__alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
a9263751 3616 struct alloc_context *ac)
11e33f6a 3617{
d0164adc 3618 bool can_direct_reclaim = gfp_mask & __GFP_DIRECT_RECLAIM;
11e33f6a 3619 struct page *page = NULL;
c603844b 3620 unsigned int alloc_flags;
11e33f6a 3621 unsigned long did_some_progress;
5ce9bfef 3622 enum compact_priority compact_priority;
c5d01d0d 3623 enum compact_result compact_result;
5ce9bfef
VB
3624 int compaction_retries;
3625 int no_progress_loops;
63f53dea
MH
3626 unsigned long alloc_start = jiffies;
3627 unsigned int stall_timeout = 10 * HZ;
5ce9bfef 3628 unsigned int cpuset_mems_cookie;
1da177e4 3629
72807a74
MG
3630 /*
3631 * In the slowpath, we sanity check order to avoid ever trying to
3632 * reclaim >= MAX_ORDER areas which will never succeed. Callers may
3633 * be using allocators in order of preference for an area that is
3634 * too large.
3635 */
1fc28b70
MG
3636 if (order >= MAX_ORDER) {
3637 WARN_ON_ONCE(!(gfp_mask & __GFP_NOWARN));
72807a74 3638 return NULL;
1fc28b70 3639 }
1da177e4 3640
d0164adc
MG
3641 /*
3642 * We also sanity check to catch abuse of atomic reserves being used by
3643 * callers that are not in atomic context.
3644 */
3645 if (WARN_ON_ONCE((gfp_mask & (__GFP_ATOMIC|__GFP_DIRECT_RECLAIM)) ==
3646 (__GFP_ATOMIC|__GFP_DIRECT_RECLAIM)))
3647 gfp_mask &= ~__GFP_ATOMIC;
3648
5ce9bfef
VB
3649retry_cpuset:
3650 compaction_retries = 0;
3651 no_progress_loops = 0;
3652 compact_priority = DEF_COMPACT_PRIORITY;
3653 cpuset_mems_cookie = read_mems_allowed_begin();
9a67f648
MH
3654
3655 /*
3656 * The fast path uses conservative alloc_flags to succeed only until
3657 * kswapd needs to be woken up, and to avoid the cost of setting up
3658 * alloc_flags precisely. So we do that now.
3659 */
3660 alloc_flags = gfp_to_alloc_flags(gfp_mask);
3661
e47483bc
VB
3662 /*
3663 * We need to recalculate the starting point for the zonelist iterator
3664 * because we might have used different nodemask in the fast path, or
3665 * there was a cpuset modification and we are retrying - otherwise we
3666 * could end up iterating over non-eligible zones endlessly.
3667 */
3668 ac->preferred_zoneref = first_zones_zonelist(ac->zonelist,
3669 ac->high_zoneidx, ac->nodemask);
3670 if (!ac->preferred_zoneref->zone)
3671 goto nopage;
3672
23771235
VB
3673 if (gfp_mask & __GFP_KSWAPD_RECLAIM)
3674 wake_all_kswapds(order, ac);
3675
3676 /*
3677 * The adjusted alloc_flags might result in immediate success, so try
3678 * that first
3679 */
3680 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
3681 if (page)
3682 goto got_pg;
3683
a8161d1e
VB
3684 /*
3685 * For costly allocations, try direct compaction first, as it's likely
3686 * that we have enough base pages and don't need to reclaim. Don't try
3687 * that for allocations that are allowed to ignore watermarks, as the
3688 * ALLOC_NO_WATERMARKS attempt didn't yet happen.
3689 */
3690 if (can_direct_reclaim && order > PAGE_ALLOC_COSTLY_ORDER &&
3691 !gfp_pfmemalloc_allowed(gfp_mask)) {
3692 page = __alloc_pages_direct_compact(gfp_mask, order,
3693 alloc_flags, ac,
a5508cd8 3694 INIT_COMPACT_PRIORITY,
a8161d1e
VB
3695 &compact_result);
3696 if (page)
3697 goto got_pg;
3698
3eb2771b
VB
3699 /*
3700 * Checks for costly allocations with __GFP_NORETRY, which
3701 * includes THP page fault allocations
3702 */
3703 if (gfp_mask & __GFP_NORETRY) {
a8161d1e
VB
3704 /*
3705 * If compaction is deferred for high-order allocations,
3706 * it is because sync compaction recently failed. If
3707 * this is the case and the caller requested a THP
3708 * allocation, we do not want to heavily disrupt the
3709 * system, so we fail the allocation instead of entering
3710 * direct reclaim.
3711 */
3712 if (compact_result == COMPACT_DEFERRED)
3713 goto nopage;
3714
a8161d1e 3715 /*
3eb2771b
VB
3716 * Looks like reclaim/compaction is worth trying, but
3717 * sync compaction could be very expensive, so keep
25160354 3718 * using async compaction.
a8161d1e 3719 */
a5508cd8 3720 compact_priority = INIT_COMPACT_PRIORITY;
a8161d1e
VB
3721 }
3722 }
23771235 3723
31a6c190 3724retry:
23771235 3725 /* Ensure kswapd doesn't accidentally go to sleep as long as we loop */
31a6c190
VB
3726 if (gfp_mask & __GFP_KSWAPD_RECLAIM)
3727 wake_all_kswapds(order, ac);
3728
23771235
VB
3729 if (gfp_pfmemalloc_allowed(gfp_mask))
3730 alloc_flags = ALLOC_NO_WATERMARKS;
3731
e46e7b77
MG
3732 /*
3733 * Reset the zonelist iterators if memory policies can be ignored.
3734 * These allocations are high priority and system rather than user
3735 * orientated.
3736 */
23771235 3737 if (!(alloc_flags & ALLOC_CPUSET) || (alloc_flags & ALLOC_NO_WATERMARKS)) {
e46e7b77
MG
3738 ac->zonelist = node_zonelist(numa_node_id(), gfp_mask);
3739 ac->preferred_zoneref = first_zones_zonelist(ac->zonelist,
3740 ac->high_zoneidx, ac->nodemask);
3741 }
3742
23771235 3743 /* Attempt with potentially adjusted zonelist and alloc_flags */
31a6c190 3744 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
7fb1d9fc
RS
3745 if (page)
3746 goto got_pg;
1da177e4 3747
d0164adc 3748 /* Caller is not willing to reclaim, we can't balance anything */
9a67f648 3749 if (!can_direct_reclaim)
1da177e4
LT
3750 goto nopage;
3751
9a67f648
MH
3752 /* Make sure we know about allocations which stall for too long */
3753 if (time_after(jiffies, alloc_start + stall_timeout)) {
82251963 3754 warn_alloc(gfp_mask & ~__GFP_NOWARN, ac->nodemask,
9a67f648
MH
3755 "page allocation stalls for %ums, order:%u",
3756 jiffies_to_msecs(jiffies-alloc_start), order);
3757 stall_timeout += 10 * HZ;
33d53103 3758 }
341ce06f 3759
9a67f648
MH
3760 /* Avoid recursion of direct reclaim */
3761 if (current->flags & PF_MEMALLOC)
6583bb64
DR
3762 goto nopage;
3763
a8161d1e
VB
3764 /* Try direct reclaim and then allocating */
3765 page = __alloc_pages_direct_reclaim(gfp_mask, order, alloc_flags, ac,
3766 &did_some_progress);
3767 if (page)
3768 goto got_pg;
3769
3770 /* Try direct compaction and then allocating */
a9263751 3771 page = __alloc_pages_direct_compact(gfp_mask, order, alloc_flags, ac,
a5508cd8 3772 compact_priority, &compact_result);
56de7263
MG
3773 if (page)
3774 goto got_pg;
75f30861 3775
9083905a
JW
3776 /* Do not loop if specifically requested */
3777 if (gfp_mask & __GFP_NORETRY)
a8161d1e 3778 goto nopage;
9083905a 3779
0a0337e0
MH
3780 /*
3781 * Do not retry costly high order allocations unless they are
3782 * __GFP_REPEAT
3783 */
3784 if (order > PAGE_ALLOC_COSTLY_ORDER && !(gfp_mask & __GFP_REPEAT))
a8161d1e 3785 goto nopage;
0a0337e0 3786
0a0337e0 3787 if (should_reclaim_retry(gfp_mask, order, ac, alloc_flags,
423b452e 3788 did_some_progress > 0, &no_progress_loops))
0a0337e0
MH
3789 goto retry;
3790
33c2d214
MH
3791 /*
3792 * It doesn't make any sense to retry for the compaction if the order-0
3793 * reclaim is not able to make any progress because the current
3794 * implementation of the compaction depends on the sufficient amount
3795 * of free memory (see __compaction_suitable)
3796 */
3797 if (did_some_progress > 0 &&
86a294a8 3798 should_compact_retry(ac, order, alloc_flags,
a5508cd8 3799 compact_result, &compact_priority,
d9436498 3800 &compaction_retries))
33c2d214
MH
3801 goto retry;
3802
e47483bc
VB
3803 /*
3804 * It's possible we raced with cpuset update so the OOM would be
3805 * premature (see below the nopage: label for full explanation).
3806 */
3807 if (read_mems_allowed_retry(cpuset_mems_cookie))
3808 goto retry_cpuset;
3809
9083905a
JW
3810 /* Reclaim has failed us, start killing things */
3811 page = __alloc_pages_may_oom(gfp_mask, order, ac, &did_some_progress);
3812 if (page)
3813 goto got_pg;
3814
9a67f648
MH
3815 /* Avoid allocations with no watermarks from looping endlessly */
3816 if (test_thread_flag(TIF_MEMDIE))
3817 goto nopage;
3818
9083905a 3819 /* Retry as long as the OOM killer is making progress */
0a0337e0
MH
3820 if (did_some_progress) {
3821 no_progress_loops = 0;
9083905a 3822 goto retry;
0a0337e0 3823 }
9083905a 3824
1da177e4 3825nopage:
5ce9bfef 3826 /*
e47483bc
VB
3827 * When updating a task's mems_allowed or mempolicy nodemask, it is
3828 * possible to race with parallel threads in such a way that our
3829 * allocation can fail while the mask is being updated. If we are about
3830 * to fail, check if the cpuset changed during allocation and if so,
3831 * retry.
5ce9bfef
VB
3832 */
3833 if (read_mems_allowed_retry(cpuset_mems_cookie))
3834 goto retry_cpuset;
3835
9a67f648
MH
3836 /*
3837 * Make sure that __GFP_NOFAIL request doesn't leak out and make sure
3838 * we always retry
3839 */
3840 if (gfp_mask & __GFP_NOFAIL) {
3841 /*
3842 * All existing users of the __GFP_NOFAIL are blockable, so warn
3843 * of any new users that actually require GFP_NOWAIT
3844 */
3845 if (WARN_ON_ONCE(!can_direct_reclaim))
3846 goto fail;
3847
3848 /*
3849 * PF_MEMALLOC request from this context is rather bizarre
3850 * because we cannot reclaim anything and only can loop waiting
3851 * for somebody to do a work for us
3852 */
3853 WARN_ON_ONCE(current->flags & PF_MEMALLOC);
3854
3855 /*
3856 * non failing costly orders are a hard requirement which we
3857 * are not prepared for much so let's warn about these users
3858 * so that we can identify them and convert them to something
3859 * else.
3860 */
3861 WARN_ON_ONCE(order > PAGE_ALLOC_COSTLY_ORDER);
3862
6c18ba7a
MH
3863 /*
3864 * Help non-failing allocations by giving them access to memory
3865 * reserves but do not use ALLOC_NO_WATERMARKS because this
3866 * could deplete whole memory reserves which would just make
3867 * the situation worse
3868 */
3869 page = __alloc_pages_cpuset_fallback(gfp_mask, order, ALLOC_HARDER, ac);
3870 if (page)
3871 goto got_pg;
3872
9a67f648
MH
3873 cond_resched();
3874 goto retry;
3875 }
3876fail:
a8e99259 3877 warn_alloc(gfp_mask, ac->nodemask,
7877cdcc 3878 "page allocation failure: order:%u", order);
1da177e4 3879got_pg:
072bb0aa 3880 return page;
1da177e4 3881}
11e33f6a 3882
9cd75558
MG
3883static inline bool prepare_alloc_pages(gfp_t gfp_mask, unsigned int order,
3884 struct zonelist *zonelist, nodemask_t *nodemask,
3885 struct alloc_context *ac, gfp_t *alloc_mask,
3886 unsigned int *alloc_flags)
11e33f6a 3887{
9cd75558
MG
3888 ac->high_zoneidx = gfp_zone(gfp_mask);
3889 ac->zonelist = zonelist;
3890 ac->nodemask = nodemask;
3891 ac->migratetype = gfpflags_to_migratetype(gfp_mask);
11e33f6a 3892
682a3385 3893 if (cpusets_enabled()) {
9cd75558 3894 *alloc_mask |= __GFP_HARDWALL;
9cd75558
MG
3895 if (!ac->nodemask)
3896 ac->nodemask = &cpuset_current_mems_allowed;
51047820
VB
3897 else
3898 *alloc_flags |= ALLOC_CPUSET;
682a3385
MG
3899 }
3900
11e33f6a
MG
3901 lockdep_trace_alloc(gfp_mask);
3902
d0164adc 3903 might_sleep_if(gfp_mask & __GFP_DIRECT_RECLAIM);
11e33f6a
MG
3904
3905 if (should_fail_alloc_page(gfp_mask, order))
9cd75558 3906 return false;
11e33f6a 3907
9cd75558
MG
3908 if (IS_ENABLED(CONFIG_CMA) && ac->migratetype == MIGRATE_MOVABLE)
3909 *alloc_flags |= ALLOC_CMA;
3910
3911 return true;
3912}
21bb9bd1 3913
9cd75558
MG
3914/* Determine whether to spread dirty pages and what the first usable zone */
3915static inline void finalise_ac(gfp_t gfp_mask,
3916 unsigned int order, struct alloc_context *ac)
3917{
c9ab0c4f 3918 /* Dirty zone balancing only done in the fast path */
9cd75558 3919 ac->spread_dirty_pages = (gfp_mask & __GFP_WRITE);
c9ab0c4f 3920
e46e7b77
MG
3921 /*
3922 * The preferred zone is used for statistics but crucially it is
3923 * also used as the starting point for the zonelist iterator. It
3924 * may get reset for allocations that ignore memory policies.
3925 */
9cd75558
MG
3926 ac->preferred_zoneref = first_zones_zonelist(ac->zonelist,
3927 ac->high_zoneidx, ac->nodemask);
3928}
3929
3930/*
3931 * This is the 'heart' of the zoned buddy allocator.
3932 */
3933struct page *
3934__alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
3935 struct zonelist *zonelist, nodemask_t *nodemask)
3936{
3937 struct page *page;
3938 unsigned int alloc_flags = ALLOC_WMARK_LOW;
3939 gfp_t alloc_mask = gfp_mask; /* The gfp_t that was actually used for allocation */
3940 struct alloc_context ac = { };
3941
3942 gfp_mask &= gfp_allowed_mask;
3943 if (!prepare_alloc_pages(gfp_mask, order, zonelist, nodemask, &ac, &alloc_mask, &alloc_flags))
3944 return NULL;
3945
3946 finalise_ac(gfp_mask, order, &ac);
5bb1b169 3947
5117f45d 3948 /* First allocation attempt */
a9263751 3949 page = get_page_from_freelist(alloc_mask, order, alloc_flags, &ac);
4fcb0971
MG
3950 if (likely(page))
3951 goto out;
11e33f6a 3952
4fcb0971 3953 /*
7dea19f9
MH
3954 * Apply scoped allocation constraints. This is mainly about GFP_NOFS
3955 * resp. GFP_NOIO which has to be inherited for all allocation requests
3956 * from a particular context which has been marked by
3957 * memalloc_no{fs,io}_{save,restore}.
4fcb0971 3958 */
7dea19f9 3959 alloc_mask = current_gfp_context(gfp_mask);
4fcb0971 3960 ac.spread_dirty_pages = false;
23f086f9 3961
4741526b
MG
3962 /*
3963 * Restore the original nodemask if it was potentially replaced with
3964 * &cpuset_current_mems_allowed to optimize the fast-path attempt.
3965 */
e47483bc 3966 if (unlikely(ac.nodemask != nodemask))
4741526b 3967 ac.nodemask = nodemask;
16096c25 3968
4fcb0971 3969 page = __alloc_pages_slowpath(alloc_mask, order, &ac);
cc9a6c87 3970
4fcb0971 3971out:
c4159a75
VD
3972 if (memcg_kmem_enabled() && (gfp_mask & __GFP_ACCOUNT) && page &&
3973 unlikely(memcg_kmem_charge(page, gfp_mask, order) != 0)) {
3974 __free_pages(page, order);
3975 page = NULL;
4949148a
VD
3976 }
3977
4fcb0971
MG
3978 if (kmemcheck_enabled && page)
3979 kmemcheck_pagealloc_alloc(page, order, gfp_mask);
3980
3981 trace_mm_page_alloc(page, order, alloc_mask, ac.migratetype);
3982
11e33f6a 3983 return page;
1da177e4 3984}
d239171e 3985EXPORT_SYMBOL(__alloc_pages_nodemask);
1da177e4
LT
3986
3987/*
3988 * Common helper functions.
3989 */
920c7a5d 3990unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order)
1da177e4 3991{
945a1113
AM
3992 struct page *page;
3993
3994 /*
3995 * __get_free_pages() returns a 32-bit address, which cannot represent
3996 * a highmem page
3997 */
3998 VM_BUG_ON((gfp_mask & __GFP_HIGHMEM) != 0);
3999
1da177e4
LT
4000 page = alloc_pages(gfp_mask, order);
4001 if (!page)
4002 return 0;
4003 return (unsigned long) page_address(page);
4004}
1da177e4
LT
4005EXPORT_SYMBOL(__get_free_pages);
4006
920c7a5d 4007unsigned long get_zeroed_page(gfp_t gfp_mask)
1da177e4 4008{
945a1113 4009 return __get_free_pages(gfp_mask | __GFP_ZERO, 0);
1da177e4 4010}
1da177e4
LT
4011EXPORT_SYMBOL(get_zeroed_page);
4012
920c7a5d 4013void __free_pages(struct page *page, unsigned int order)
1da177e4 4014{
b5810039 4015 if (put_page_testzero(page)) {
1da177e4 4016 if (order == 0)
b745bc85 4017 free_hot_cold_page(page, false);
1da177e4
LT
4018 else
4019 __free_pages_ok(page, order);
4020 }
4021}
4022
4023EXPORT_SYMBOL(__free_pages);
4024
920c7a5d 4025void free_pages(unsigned long addr, unsigned int order)
1da177e4
LT
4026{
4027 if (addr != 0) {
725d704e 4028 VM_BUG_ON(!virt_addr_valid((void *)addr));
1da177e4
LT
4029 __free_pages(virt_to_page((void *)addr), order);
4030 }
4031}
4032
4033EXPORT_SYMBOL(free_pages);
4034
b63ae8ca
AD
4035/*
4036 * Page Fragment:
4037 * An arbitrary-length arbitrary-offset area of memory which resides
4038 * within a 0 or higher order page. Multiple fragments within that page
4039 * are individually refcounted, in the page's reference counter.
4040 *
4041 * The page_frag functions below provide a simple allocation framework for
4042 * page fragments. This is used by the network stack and network device
4043 * drivers to provide a backing region of memory for use as either an
4044 * sk_buff->head, or to be used in the "frags" portion of skb_shared_info.
4045 */
2976db80
AD
4046static struct page *__page_frag_cache_refill(struct page_frag_cache *nc,
4047 gfp_t gfp_mask)
b63ae8ca
AD
4048{
4049 struct page *page = NULL;
4050 gfp_t gfp = gfp_mask;
4051
4052#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
4053 gfp_mask |= __GFP_COMP | __GFP_NOWARN | __GFP_NORETRY |
4054 __GFP_NOMEMALLOC;
4055 page = alloc_pages_node(NUMA_NO_NODE, gfp_mask,
4056 PAGE_FRAG_CACHE_MAX_ORDER);
4057 nc->size = page ? PAGE_FRAG_CACHE_MAX_SIZE : PAGE_SIZE;
4058#endif
4059 if (unlikely(!page))
4060 page = alloc_pages_node(NUMA_NO_NODE, gfp, 0);
4061
4062 nc->va = page ? page_address(page) : NULL;
4063
4064 return page;
4065}
4066
2976db80 4067void __page_frag_cache_drain(struct page *page, unsigned int count)
44fdffd7
AD
4068{
4069 VM_BUG_ON_PAGE(page_ref_count(page) == 0, page);
4070
4071 if (page_ref_sub_and_test(page, count)) {
2976db80
AD
4072 unsigned int order = compound_order(page);
4073
44fdffd7
AD
4074 if (order == 0)
4075 free_hot_cold_page(page, false);
4076 else
4077 __free_pages_ok(page, order);
4078 }
4079}
2976db80 4080EXPORT_SYMBOL(__page_frag_cache_drain);
44fdffd7 4081
8c2dd3e4
AD
4082void *page_frag_alloc(struct page_frag_cache *nc,
4083 unsigned int fragsz, gfp_t gfp_mask)
b63ae8ca
AD
4084{
4085 unsigned int size = PAGE_SIZE;
4086 struct page *page;
4087 int offset;
4088
4089 if (unlikely(!nc->va)) {
4090refill:
2976db80 4091 page = __page_frag_cache_refill(nc, gfp_mask);
b63ae8ca
AD
4092 if (!page)
4093 return NULL;
4094
4095#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
4096 /* if size can vary use size else just use PAGE_SIZE */
4097 size = nc->size;
4098#endif
4099 /* Even if we own the page, we do not use atomic_set().
4100 * This would break get_page_unless_zero() users.
4101 */
fe896d18 4102 page_ref_add(page, size - 1);
b63ae8ca
AD
4103
4104 /* reset page count bias and offset to start of new frag */
2f064f34 4105 nc->pfmemalloc = page_is_pfmemalloc(page);
b63ae8ca
AD
4106 nc->pagecnt_bias = size;
4107 nc->offset = size;
4108 }
4109
4110 offset = nc->offset - fragsz;
4111 if (unlikely(offset < 0)) {
4112 page = virt_to_page(nc->va);
4113
fe896d18 4114 if (!page_ref_sub_and_test(page, nc->pagecnt_bias))
b63ae8ca
AD
4115 goto refill;
4116
4117#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
4118 /* if size can vary use size else just use PAGE_SIZE */
4119 size = nc->size;
4120#endif
4121 /* OK, page count is 0, we can safely set it */
fe896d18 4122 set_page_count(page, size);
b63ae8ca
AD
4123
4124 /* reset page count bias and offset to start of new frag */
4125 nc->pagecnt_bias = size;
4126 offset = size - fragsz;
4127 }
4128
4129 nc->pagecnt_bias--;
4130 nc->offset = offset;
4131
4132 return nc->va + offset;
4133}
8c2dd3e4 4134EXPORT_SYMBOL(page_frag_alloc);
b63ae8ca
AD
4135
4136/*
4137 * Frees a page fragment allocated out of either a compound or order 0 page.
4138 */
8c2dd3e4 4139void page_frag_free(void *addr)
b63ae8ca
AD
4140{
4141 struct page *page = virt_to_head_page(addr);
4142
4143 if (unlikely(put_page_testzero(page)))
4144 __free_pages_ok(page, compound_order(page));
4145}
8c2dd3e4 4146EXPORT_SYMBOL(page_frag_free);
b63ae8ca 4147
d00181b9
KS
4148static void *make_alloc_exact(unsigned long addr, unsigned int order,
4149 size_t size)
ee85c2e1
AK
4150{
4151 if (addr) {
4152 unsigned long alloc_end = addr + (PAGE_SIZE << order);
4153 unsigned long used = addr + PAGE_ALIGN(size);
4154
4155 split_page(virt_to_page((void *)addr), order);
4156 while (used < alloc_end) {
4157 free_page(used);
4158 used += PAGE_SIZE;
4159 }
4160 }
4161 return (void *)addr;
4162}
4163
2be0ffe2
TT
4164/**
4165 * alloc_pages_exact - allocate an exact number physically-contiguous pages.
4166 * @size: the number of bytes to allocate
4167 * @gfp_mask: GFP flags for the allocation
4168 *
4169 * This function is similar to alloc_pages(), except that it allocates the
4170 * minimum number of pages to satisfy the request. alloc_pages() can only
4171 * allocate memory in power-of-two pages.
4172 *
4173 * This function is also limited by MAX_ORDER.
4174 *
4175 * Memory allocated by this function must be released by free_pages_exact().
4176 */
4177void *alloc_pages_exact(size_t size, gfp_t gfp_mask)
4178{
4179 unsigned int order = get_order(size);
4180 unsigned long addr;
4181
4182 addr = __get_free_pages(gfp_mask, order);
ee85c2e1 4183 return make_alloc_exact(addr, order, size);
2be0ffe2
TT
4184}
4185EXPORT_SYMBOL(alloc_pages_exact);
4186
ee85c2e1
AK
4187/**
4188 * alloc_pages_exact_nid - allocate an exact number of physically-contiguous
4189 * pages on a node.
b5e6ab58 4190 * @nid: the preferred node ID where memory should be allocated
ee85c2e1
AK
4191 * @size: the number of bytes to allocate
4192 * @gfp_mask: GFP flags for the allocation
4193 *
4194 * Like alloc_pages_exact(), but try to allocate on node nid first before falling
4195 * back.
ee85c2e1 4196 */
e1931811 4197void * __meminit alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask)
ee85c2e1 4198{
d00181b9 4199 unsigned int order = get_order(size);
ee85c2e1
AK
4200 struct page *p = alloc_pages_node(nid, gfp_mask, order);
4201 if (!p)
4202 return NULL;
4203 return make_alloc_exact((unsigned long)page_address(p), order, size);
4204}
ee85c2e1 4205
2be0ffe2
TT
4206/**
4207 * free_pages_exact - release memory allocated via alloc_pages_exact()
4208 * @virt: the value returned by alloc_pages_exact.
4209 * @size: size of allocation, same value as passed to alloc_pages_exact().
4210 *
4211 * Release the memory allocated by a previous call to alloc_pages_exact.
4212 */
4213void free_pages_exact(void *virt, size_t size)
4214{
4215 unsigned long addr = (unsigned long)virt;
4216 unsigned long end = addr + PAGE_ALIGN(size);
4217
4218 while (addr < end) {
4219 free_page(addr);
4220 addr += PAGE_SIZE;
4221 }
4222}
4223EXPORT_SYMBOL(free_pages_exact);
4224
e0fb5815
ZY
4225/**
4226 * nr_free_zone_pages - count number of pages beyond high watermark
4227 * @offset: The zone index of the highest zone
4228 *
4229 * nr_free_zone_pages() counts the number of counts pages which are beyond the
4230 * high watermark within all zones at or below a given zone index. For each
4231 * zone, the number of pages is calculated as:
0e056eb5
MCC
4232 *
4233 * nr_free_zone_pages = managed_pages - high_pages
e0fb5815 4234 */
ebec3862 4235static unsigned long nr_free_zone_pages(int offset)
1da177e4 4236{
dd1a239f 4237 struct zoneref *z;
54a6eb5c
MG
4238 struct zone *zone;
4239
e310fd43 4240 /* Just pick one node, since fallback list is circular */
ebec3862 4241 unsigned long sum = 0;
1da177e4 4242
0e88460d 4243 struct zonelist *zonelist = node_zonelist(numa_node_id(), GFP_KERNEL);
1da177e4 4244
54a6eb5c 4245 for_each_zone_zonelist(zone, z, zonelist, offset) {
b40da049 4246 unsigned long size = zone->managed_pages;
41858966 4247 unsigned long high = high_wmark_pages(zone);
e310fd43
MB
4248 if (size > high)
4249 sum += size - high;
1da177e4
LT
4250 }
4251
4252 return sum;
4253}
4254
e0fb5815
ZY
4255/**
4256 * nr_free_buffer_pages - count number of pages beyond high watermark
4257 *
4258 * nr_free_buffer_pages() counts the number of pages which are beyond the high
4259 * watermark within ZONE_DMA and ZONE_NORMAL.
1da177e4 4260 */
ebec3862 4261unsigned long nr_free_buffer_pages(void)
1da177e4 4262{
af4ca457 4263 return nr_free_zone_pages(gfp_zone(GFP_USER));
1da177e4 4264}
c2f1a551 4265EXPORT_SYMBOL_GPL(nr_free_buffer_pages);
1da177e4 4266
e0fb5815
ZY
4267/**
4268 * nr_free_pagecache_pages - count number of pages beyond high watermark
4269 *
4270 * nr_free_pagecache_pages() counts the number of pages which are beyond the
4271 * high watermark within all zones.
1da177e4 4272 */
ebec3862 4273unsigned long nr_free_pagecache_pages(void)
1da177e4 4274{
2a1e274a 4275 return nr_free_zone_pages(gfp_zone(GFP_HIGHUSER_MOVABLE));
1da177e4 4276}
08e0f6a9
CL
4277
4278static inline void show_node(struct zone *zone)
1da177e4 4279{
e5adfffc 4280 if (IS_ENABLED(CONFIG_NUMA))
25ba77c1 4281 printk("Node %d ", zone_to_nid(zone));
1da177e4 4282}
1da177e4 4283
d02bd27b
IR
4284long si_mem_available(void)
4285{
4286 long available;
4287 unsigned long pagecache;
4288 unsigned long wmark_low = 0;
4289 unsigned long pages[NR_LRU_LISTS];
4290 struct zone *zone;
4291 int lru;
4292
4293 for (lru = LRU_BASE; lru < NR_LRU_LISTS; lru++)
2f95ff90 4294 pages[lru] = global_node_page_state(NR_LRU_BASE + lru);
d02bd27b
IR
4295
4296 for_each_zone(zone)
4297 wmark_low += zone->watermark[WMARK_LOW];
4298
4299 /*
4300 * Estimate the amount of memory available for userspace allocations,
4301 * without causing swapping.
4302 */
4303 available = global_page_state(NR_FREE_PAGES) - totalreserve_pages;
4304
4305 /*
4306 * Not all the page cache can be freed, otherwise the system will
4307 * start swapping. Assume at least half of the page cache, or the
4308 * low watermark worth of cache, needs to stay.
4309 */
4310 pagecache = pages[LRU_ACTIVE_FILE] + pages[LRU_INACTIVE_FILE];
4311 pagecache -= min(pagecache / 2, wmark_low);
4312 available += pagecache;
4313
4314 /*
4315 * Part of the reclaimable slab consists of items that are in use,
4316 * and cannot be freed. Cap this estimate at the low watermark.
4317 */
4318 available += global_page_state(NR_SLAB_RECLAIMABLE) -
4319 min(global_page_state(NR_SLAB_RECLAIMABLE) / 2, wmark_low);
4320
4321 if (available < 0)
4322 available = 0;
4323 return available;
4324}
4325EXPORT_SYMBOL_GPL(si_mem_available);
4326
1da177e4
LT
4327void si_meminfo(struct sysinfo *val)
4328{
4329 val->totalram = totalram_pages;
11fb9989 4330 val->sharedram = global_node_page_state(NR_SHMEM);
d23ad423 4331 val->freeram = global_page_state(NR_FREE_PAGES);
1da177e4 4332 val->bufferram = nr_blockdev_pages();
1da177e4
LT
4333 val->totalhigh = totalhigh_pages;
4334 val->freehigh = nr_free_highpages();
1da177e4
LT
4335 val->mem_unit = PAGE_SIZE;
4336}
4337
4338EXPORT_SYMBOL(si_meminfo);
4339
4340#ifdef CONFIG_NUMA
4341void si_meminfo_node(struct sysinfo *val, int nid)
4342{
cdd91a77
JL
4343 int zone_type; /* needs to be signed */
4344 unsigned long managed_pages = 0;
fc2bd799
JK
4345 unsigned long managed_highpages = 0;
4346 unsigned long free_highpages = 0;
1da177e4
LT
4347 pg_data_t *pgdat = NODE_DATA(nid);
4348
cdd91a77
JL
4349 for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++)
4350 managed_pages += pgdat->node_zones[zone_type].managed_pages;
4351 val->totalram = managed_pages;
11fb9989 4352 val->sharedram = node_page_state(pgdat, NR_SHMEM);
75ef7184 4353 val->freeram = sum_zone_node_page_state(nid, NR_FREE_PAGES);
98d2b0eb 4354#ifdef CONFIG_HIGHMEM
fc2bd799
JK
4355 for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) {
4356 struct zone *zone = &pgdat->node_zones[zone_type];
4357
4358 if (is_highmem(zone)) {
4359 managed_highpages += zone->managed_pages;
4360 free_highpages += zone_page_state(zone, NR_FREE_PAGES);
4361 }
4362 }
4363 val->totalhigh = managed_highpages;
4364 val->freehigh = free_highpages;
98d2b0eb 4365#else
fc2bd799
JK
4366 val->totalhigh = managed_highpages;
4367 val->freehigh = free_highpages;
98d2b0eb 4368#endif
1da177e4
LT
4369 val->mem_unit = PAGE_SIZE;
4370}
4371#endif
4372
ddd588b5 4373/*
7bf02ea2
DR
4374 * Determine whether the node should be displayed or not, depending on whether
4375 * SHOW_MEM_FILTER_NODES was passed to show_free_areas().
ddd588b5 4376 */
9af744d7 4377static bool show_mem_node_skip(unsigned int flags, int nid, nodemask_t *nodemask)
ddd588b5 4378{
ddd588b5 4379 if (!(flags & SHOW_MEM_FILTER_NODES))
9af744d7 4380 return false;
ddd588b5 4381
9af744d7
MH
4382 /*
4383 * no node mask - aka implicit memory numa policy. Do not bother with
4384 * the synchronization - read_mems_allowed_begin - because we do not
4385 * have to be precise here.
4386 */
4387 if (!nodemask)
4388 nodemask = &cpuset_current_mems_allowed;
4389
4390 return !node_isset(nid, *nodemask);
ddd588b5
DR
4391}
4392
1da177e4
LT
4393#define K(x) ((x) << (PAGE_SHIFT-10))
4394
377e4f16
RV
4395static void show_migration_types(unsigned char type)
4396{
4397 static const char types[MIGRATE_TYPES] = {
4398 [MIGRATE_UNMOVABLE] = 'U',
377e4f16 4399 [MIGRATE_MOVABLE] = 'M',
475a2f90
VB
4400 [MIGRATE_RECLAIMABLE] = 'E',
4401 [MIGRATE_HIGHATOMIC] = 'H',
377e4f16
RV
4402#ifdef CONFIG_CMA
4403 [MIGRATE_CMA] = 'C',
4404#endif
194159fb 4405#ifdef CONFIG_MEMORY_ISOLATION
377e4f16 4406 [MIGRATE_ISOLATE] = 'I',
194159fb 4407#endif
377e4f16
RV
4408 };
4409 char tmp[MIGRATE_TYPES + 1];
4410 char *p = tmp;
4411 int i;
4412
4413 for (i = 0; i < MIGRATE_TYPES; i++) {
4414 if (type & (1 << i))
4415 *p++ = types[i];
4416 }
4417
4418 *p = '\0';
1f84a18f 4419 printk(KERN_CONT "(%s) ", tmp);
377e4f16
RV
4420}
4421
1da177e4
LT
4422/*
4423 * Show free area list (used inside shift_scroll-lock stuff)
4424 * We also calculate the percentage fragmentation. We do this by counting the
4425 * memory on each free list with the exception of the first item on the list.
d1bfcdb8
KK
4426 *
4427 * Bits in @filter:
4428 * SHOW_MEM_FILTER_NODES: suppress nodes that are not allowed by current's
4429 * cpuset.
1da177e4 4430 */
9af744d7 4431void show_free_areas(unsigned int filter, nodemask_t *nodemask)
1da177e4 4432{
d1bfcdb8 4433 unsigned long free_pcp = 0;
c7241913 4434 int cpu;
1da177e4 4435 struct zone *zone;
599d0c95 4436 pg_data_t *pgdat;
1da177e4 4437
ee99c71c 4438 for_each_populated_zone(zone) {
9af744d7 4439 if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask))
ddd588b5 4440 continue;
d1bfcdb8 4441
761b0677
KK
4442 for_each_online_cpu(cpu)
4443 free_pcp += per_cpu_ptr(zone->pageset, cpu)->pcp.count;
1da177e4
LT
4444 }
4445
a731286d
KM
4446 printk("active_anon:%lu inactive_anon:%lu isolated_anon:%lu\n"
4447 " active_file:%lu inactive_file:%lu isolated_file:%lu\n"
d1bfcdb8
KK
4448 " unevictable:%lu dirty:%lu writeback:%lu unstable:%lu\n"
4449 " slab_reclaimable:%lu slab_unreclaimable:%lu\n"
d1ce749a 4450 " mapped:%lu shmem:%lu pagetables:%lu bounce:%lu\n"
d1bfcdb8 4451 " free:%lu free_pcp:%lu free_cma:%lu\n",
599d0c95
MG
4452 global_node_page_state(NR_ACTIVE_ANON),
4453 global_node_page_state(NR_INACTIVE_ANON),
4454 global_node_page_state(NR_ISOLATED_ANON),
4455 global_node_page_state(NR_ACTIVE_FILE),
4456 global_node_page_state(NR_INACTIVE_FILE),
4457 global_node_page_state(NR_ISOLATED_FILE),
4458 global_node_page_state(NR_UNEVICTABLE),
11fb9989
MG
4459 global_node_page_state(NR_FILE_DIRTY),
4460 global_node_page_state(NR_WRITEBACK),
4461 global_node_page_state(NR_UNSTABLE_NFS),
3701b033
KM
4462 global_page_state(NR_SLAB_RECLAIMABLE),
4463 global_page_state(NR_SLAB_UNRECLAIMABLE),
50658e2e 4464 global_node_page_state(NR_FILE_MAPPED),
11fb9989 4465 global_node_page_state(NR_SHMEM),
a25700a5 4466 global_page_state(NR_PAGETABLE),
d1ce749a 4467 global_page_state(NR_BOUNCE),
d1bfcdb8
KK
4468 global_page_state(NR_FREE_PAGES),
4469 free_pcp,
d1ce749a 4470 global_page_state(NR_FREE_CMA_PAGES));
1da177e4 4471
599d0c95 4472 for_each_online_pgdat(pgdat) {
9af744d7 4473 if (show_mem_node_skip(filter, pgdat->node_id, nodemask))
c02e50bb
MH
4474 continue;
4475
599d0c95
MG
4476 printk("Node %d"
4477 " active_anon:%lukB"
4478 " inactive_anon:%lukB"
4479 " active_file:%lukB"
4480 " inactive_file:%lukB"
4481 " unevictable:%lukB"
4482 " isolated(anon):%lukB"
4483 " isolated(file):%lukB"
50658e2e 4484 " mapped:%lukB"
11fb9989
MG
4485 " dirty:%lukB"
4486 " writeback:%lukB"
4487 " shmem:%lukB"
4488#ifdef CONFIG_TRANSPARENT_HUGEPAGE
4489 " shmem_thp: %lukB"
4490 " shmem_pmdmapped: %lukB"
4491 " anon_thp: %lukB"
4492#endif
4493 " writeback_tmp:%lukB"
4494 " unstable:%lukB"
599d0c95
MG
4495 " all_unreclaimable? %s"
4496 "\n",
4497 pgdat->node_id,
4498 K(node_page_state(pgdat, NR_ACTIVE_ANON)),
4499 K(node_page_state(pgdat, NR_INACTIVE_ANON)),
4500 K(node_page_state(pgdat, NR_ACTIVE_FILE)),
4501 K(node_page_state(pgdat, NR_INACTIVE_FILE)),
4502 K(node_page_state(pgdat, NR_UNEVICTABLE)),
4503 K(node_page_state(pgdat, NR_ISOLATED_ANON)),
4504 K(node_page_state(pgdat, NR_ISOLATED_FILE)),
50658e2e 4505 K(node_page_state(pgdat, NR_FILE_MAPPED)),
11fb9989
MG
4506 K(node_page_state(pgdat, NR_FILE_DIRTY)),
4507 K(node_page_state(pgdat, NR_WRITEBACK)),
1f06b81a 4508 K(node_page_state(pgdat, NR_SHMEM)),
11fb9989
MG
4509#ifdef CONFIG_TRANSPARENT_HUGEPAGE
4510 K(node_page_state(pgdat, NR_SHMEM_THPS) * HPAGE_PMD_NR),
4511 K(node_page_state(pgdat, NR_SHMEM_PMDMAPPED)
4512 * HPAGE_PMD_NR),
4513 K(node_page_state(pgdat, NR_ANON_THPS) * HPAGE_PMD_NR),
4514#endif
11fb9989
MG
4515 K(node_page_state(pgdat, NR_WRITEBACK_TEMP)),
4516 K(node_page_state(pgdat, NR_UNSTABLE_NFS)),
c73322d0
JW
4517 pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES ?
4518 "yes" : "no");
599d0c95
MG
4519 }
4520
ee99c71c 4521 for_each_populated_zone(zone) {
1da177e4
LT
4522 int i;
4523
9af744d7 4524 if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask))
ddd588b5 4525 continue;
d1bfcdb8
KK
4526
4527 free_pcp = 0;
4528 for_each_online_cpu(cpu)
4529 free_pcp += per_cpu_ptr(zone->pageset, cpu)->pcp.count;
4530
1da177e4 4531 show_node(zone);
1f84a18f
JP
4532 printk(KERN_CONT
4533 "%s"
1da177e4
LT
4534 " free:%lukB"
4535 " min:%lukB"
4536 " low:%lukB"
4537 " high:%lukB"
71c799f4
MK
4538 " active_anon:%lukB"
4539 " inactive_anon:%lukB"
4540 " active_file:%lukB"
4541 " inactive_file:%lukB"
4542 " unevictable:%lukB"
5a1c84b4 4543 " writepending:%lukB"
1da177e4 4544 " present:%lukB"
9feedc9d 4545 " managed:%lukB"
4a0aa73f 4546 " mlocked:%lukB"
4a0aa73f
KM
4547 " slab_reclaimable:%lukB"
4548 " slab_unreclaimable:%lukB"
c6a7f572 4549 " kernel_stack:%lukB"
4a0aa73f 4550 " pagetables:%lukB"
4a0aa73f 4551 " bounce:%lukB"
d1bfcdb8
KK
4552 " free_pcp:%lukB"
4553 " local_pcp:%ukB"
d1ce749a 4554 " free_cma:%lukB"
1da177e4
LT
4555 "\n",
4556 zone->name,
88f5acf8 4557 K(zone_page_state(zone, NR_FREE_PAGES)),
41858966
MG
4558 K(min_wmark_pages(zone)),
4559 K(low_wmark_pages(zone)),
4560 K(high_wmark_pages(zone)),
71c799f4
MK
4561 K(zone_page_state(zone, NR_ZONE_ACTIVE_ANON)),
4562 K(zone_page_state(zone, NR_ZONE_INACTIVE_ANON)),
4563 K(zone_page_state(zone, NR_ZONE_ACTIVE_FILE)),
4564 K(zone_page_state(zone, NR_ZONE_INACTIVE_FILE)),
4565 K(zone_page_state(zone, NR_ZONE_UNEVICTABLE)),
5a1c84b4 4566 K(zone_page_state(zone, NR_ZONE_WRITE_PENDING)),
1da177e4 4567 K(zone->present_pages),
9feedc9d 4568 K(zone->managed_pages),
4a0aa73f 4569 K(zone_page_state(zone, NR_MLOCK)),
4a0aa73f
KM
4570 K(zone_page_state(zone, NR_SLAB_RECLAIMABLE)),
4571 K(zone_page_state(zone, NR_SLAB_UNRECLAIMABLE)),
d30dd8be 4572 zone_page_state(zone, NR_KERNEL_STACK_KB),
4a0aa73f 4573 K(zone_page_state(zone, NR_PAGETABLE)),
4a0aa73f 4574 K(zone_page_state(zone, NR_BOUNCE)),
d1bfcdb8
KK
4575 K(free_pcp),
4576 K(this_cpu_read(zone->pageset->pcp.count)),
33e077bd 4577 K(zone_page_state(zone, NR_FREE_CMA_PAGES)));
1da177e4
LT
4578 printk("lowmem_reserve[]:");
4579 for (i = 0; i < MAX_NR_ZONES; i++)
1f84a18f
JP
4580 printk(KERN_CONT " %ld", zone->lowmem_reserve[i]);
4581 printk(KERN_CONT "\n");
1da177e4
LT
4582 }
4583
ee99c71c 4584 for_each_populated_zone(zone) {
d00181b9
KS
4585 unsigned int order;
4586 unsigned long nr[MAX_ORDER], flags, total = 0;
377e4f16 4587 unsigned char types[MAX_ORDER];
1da177e4 4588
9af744d7 4589 if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask))
ddd588b5 4590 continue;
1da177e4 4591 show_node(zone);
1f84a18f 4592 printk(KERN_CONT "%s: ", zone->name);
1da177e4
LT
4593
4594 spin_lock_irqsave(&zone->lock, flags);
4595 for (order = 0; order < MAX_ORDER; order++) {
377e4f16
RV
4596 struct free_area *area = &zone->free_area[order];
4597 int type;
4598
4599 nr[order] = area->nr_free;
8f9de51a 4600 total += nr[order] << order;
377e4f16
RV
4601
4602 types[order] = 0;
4603 for (type = 0; type < MIGRATE_TYPES; type++) {
4604 if (!list_empty(&area->free_list[type]))
4605 types[order] |= 1 << type;
4606 }
1da177e4
LT
4607 }
4608 spin_unlock_irqrestore(&zone->lock, flags);
377e4f16 4609 for (order = 0; order < MAX_ORDER; order++) {
1f84a18f
JP
4610 printk(KERN_CONT "%lu*%lukB ",
4611 nr[order], K(1UL) << order);
377e4f16
RV
4612 if (nr[order])
4613 show_migration_types(types[order]);
4614 }
1f84a18f 4615 printk(KERN_CONT "= %lukB\n", K(total));
1da177e4
LT
4616 }
4617
949f7ec5
DR
4618 hugetlb_show_meminfo();
4619
11fb9989 4620 printk("%ld total pagecache pages\n", global_node_page_state(NR_FILE_PAGES));
e6f3602d 4621
1da177e4
LT
4622 show_swap_cache_info();
4623}
4624
19770b32
MG
4625static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref)
4626{
4627 zoneref->zone = zone;
4628 zoneref->zone_idx = zone_idx(zone);
4629}
4630
1da177e4
LT
4631/*
4632 * Builds allocation fallback zone lists.
1a93205b
CL
4633 *
4634 * Add all populated zones of a node to the zonelist.
1da177e4 4635 */
f0c0b2b8 4636static int build_zonelists_node(pg_data_t *pgdat, struct zonelist *zonelist,
bc732f1d 4637 int nr_zones)
1da177e4 4638{
1a93205b 4639 struct zone *zone;
bc732f1d 4640 enum zone_type zone_type = MAX_NR_ZONES;
02a68a5e
CL
4641
4642 do {
2f6726e5 4643 zone_type--;
070f8032 4644 zone = pgdat->node_zones + zone_type;
6aa303de 4645 if (managed_zone(zone)) {
dd1a239f
MG
4646 zoneref_set_zone(zone,
4647 &zonelist->_zonerefs[nr_zones++]);
070f8032 4648 check_highest_zone(zone_type);
1da177e4 4649 }
2f6726e5 4650 } while (zone_type);
bc732f1d 4651
070f8032 4652 return nr_zones;
1da177e4
LT
4653}
4654
f0c0b2b8
KH
4655
4656/*
4657 * zonelist_order:
4658 * 0 = automatic detection of better ordering.
4659 * 1 = order by ([node] distance, -zonetype)
4660 * 2 = order by (-zonetype, [node] distance)
4661 *
4662 * If not NUMA, ZONELIST_ORDER_ZONE and ZONELIST_ORDER_NODE will create
4663 * the same zonelist. So only NUMA can configure this param.
4664 */
4665#define ZONELIST_ORDER_DEFAULT 0
4666#define ZONELIST_ORDER_NODE 1
4667#define ZONELIST_ORDER_ZONE 2
4668
4669/* zonelist order in the kernel.
4670 * set_zonelist_order() will set this to NODE or ZONE.
4671 */
4672static int current_zonelist_order = ZONELIST_ORDER_DEFAULT;
4673static char zonelist_order_name[3][8] = {"Default", "Node", "Zone"};
4674
4675
1da177e4 4676#ifdef CONFIG_NUMA
f0c0b2b8
KH
4677/* The value user specified ....changed by config */
4678static int user_zonelist_order = ZONELIST_ORDER_DEFAULT;
4679/* string for sysctl */
4680#define NUMA_ZONELIST_ORDER_LEN 16
4681char numa_zonelist_order[16] = "default";
4682
4683/*
4684 * interface for configure zonelist ordering.
4685 * command line option "numa_zonelist_order"
4686 * = "[dD]efault - default, automatic configuration.
4687 * = "[nN]ode - order by node locality, then by zone within node
4688 * = "[zZ]one - order by zone, then by locality within zone
4689 */
4690
4691static int __parse_numa_zonelist_order(char *s)
4692{
4693 if (*s == 'd' || *s == 'D') {
4694 user_zonelist_order = ZONELIST_ORDER_DEFAULT;
4695 } else if (*s == 'n' || *s == 'N') {
4696 user_zonelist_order = ZONELIST_ORDER_NODE;
4697 } else if (*s == 'z' || *s == 'Z') {
4698 user_zonelist_order = ZONELIST_ORDER_ZONE;
4699 } else {
1170532b 4700 pr_warn("Ignoring invalid numa_zonelist_order value: %s\n", s);
f0c0b2b8
KH
4701 return -EINVAL;
4702 }
4703 return 0;
4704}
4705
4706static __init int setup_numa_zonelist_order(char *s)
4707{
ecb256f8
VL
4708 int ret;
4709
4710 if (!s)
4711 return 0;
4712
4713 ret = __parse_numa_zonelist_order(s);
4714 if (ret == 0)
4715 strlcpy(numa_zonelist_order, s, NUMA_ZONELIST_ORDER_LEN);
4716
4717 return ret;
f0c0b2b8
KH
4718}
4719early_param("numa_zonelist_order", setup_numa_zonelist_order);
4720
4721/*
4722 * sysctl handler for numa_zonelist_order
4723 */
cccad5b9 4724int numa_zonelist_order_handler(struct ctl_table *table, int write,
8d65af78 4725 void __user *buffer, size_t *length,
f0c0b2b8
KH
4726 loff_t *ppos)
4727{
4728 char saved_string[NUMA_ZONELIST_ORDER_LEN];
4729 int ret;
443c6f14 4730 static DEFINE_MUTEX(zl_order_mutex);
f0c0b2b8 4731
443c6f14 4732 mutex_lock(&zl_order_mutex);
dacbde09
CG
4733 if (write) {
4734 if (strlen((char *)table->data) >= NUMA_ZONELIST_ORDER_LEN) {
4735 ret = -EINVAL;
4736 goto out;
4737 }
4738 strcpy(saved_string, (char *)table->data);
4739 }
8d65af78 4740 ret = proc_dostring(table, write, buffer, length, ppos);
f0c0b2b8 4741 if (ret)
443c6f14 4742 goto out;
f0c0b2b8
KH
4743 if (write) {
4744 int oldval = user_zonelist_order;
dacbde09
CG
4745
4746 ret = __parse_numa_zonelist_order((char *)table->data);
4747 if (ret) {
f0c0b2b8
KH
4748 /*
4749 * bogus value. restore saved string
4750 */
dacbde09 4751 strncpy((char *)table->data, saved_string,
f0c0b2b8
KH
4752 NUMA_ZONELIST_ORDER_LEN);
4753 user_zonelist_order = oldval;
4eaf3f64
HL
4754 } else if (oldval != user_zonelist_order) {
4755 mutex_lock(&zonelists_mutex);
9adb62a5 4756 build_all_zonelists(NULL, NULL);
4eaf3f64
HL
4757 mutex_unlock(&zonelists_mutex);
4758 }
f0c0b2b8 4759 }
443c6f14
AK
4760out:
4761 mutex_unlock(&zl_order_mutex);
4762 return ret;
f0c0b2b8
KH
4763}
4764
4765
62bc62a8 4766#define MAX_NODE_LOAD (nr_online_nodes)
f0c0b2b8
KH
4767static int node_load[MAX_NUMNODES];
4768
1da177e4 4769/**
4dc3b16b 4770 * find_next_best_node - find the next node that should appear in a given node's fallback list
1da177e4
LT
4771 * @node: node whose fallback list we're appending
4772 * @used_node_mask: nodemask_t of already used nodes
4773 *
4774 * We use a number of factors to determine which is the next node that should
4775 * appear on a given node's fallback list. The node should not have appeared
4776 * already in @node's fallback list, and it should be the next closest node
4777 * according to the distance array (which contains arbitrary distance values
4778 * from each node to each node in the system), and should also prefer nodes
4779 * with no CPUs, since presumably they'll have very little allocation pressure
4780 * on them otherwise.
4781 * It returns -1 if no node is found.
4782 */
f0c0b2b8 4783static int find_next_best_node(int node, nodemask_t *used_node_mask)
1da177e4 4784{
4cf808eb 4785 int n, val;
1da177e4 4786 int min_val = INT_MAX;
00ef2d2f 4787 int best_node = NUMA_NO_NODE;
a70f7302 4788 const struct cpumask *tmp = cpumask_of_node(0);
1da177e4 4789
4cf808eb
LT
4790 /* Use the local node if we haven't already */
4791 if (!node_isset(node, *used_node_mask)) {
4792 node_set(node, *used_node_mask);
4793 return node;
4794 }
1da177e4 4795
4b0ef1fe 4796 for_each_node_state(n, N_MEMORY) {
1da177e4
LT
4797
4798 /* Don't want a node to appear more than once */
4799 if (node_isset(n, *used_node_mask))
4800 continue;
4801
1da177e4
LT
4802 /* Use the distance array to find the distance */
4803 val = node_distance(node, n);
4804
4cf808eb
LT
4805 /* Penalize nodes under us ("prefer the next node") */
4806 val += (n < node);
4807
1da177e4 4808 /* Give preference to headless and unused nodes */
a70f7302
RR
4809 tmp = cpumask_of_node(n);
4810 if (!cpumask_empty(tmp))
1da177e4
LT
4811 val += PENALTY_FOR_NODE_WITH_CPUS;
4812
4813 /* Slight preference for less loaded node */
4814 val *= (MAX_NODE_LOAD*MAX_NUMNODES);
4815 val += node_load[n];
4816
4817 if (val < min_val) {
4818 min_val = val;
4819 best_node = n;
4820 }
4821 }
4822
4823 if (best_node >= 0)
4824 node_set(best_node, *used_node_mask);
4825
4826 return best_node;
4827}
4828
f0c0b2b8
KH
4829
4830/*
4831 * Build zonelists ordered by node and zones within node.
4832 * This results in maximum locality--normal zone overflows into local
4833 * DMA zone, if any--but risks exhausting DMA zone.
4834 */
4835static void build_zonelists_in_node_order(pg_data_t *pgdat, int node)
1da177e4 4836{
f0c0b2b8 4837 int j;
1da177e4 4838 struct zonelist *zonelist;
f0c0b2b8 4839
c9634cf0 4840 zonelist = &pgdat->node_zonelists[ZONELIST_FALLBACK];
dd1a239f 4841 for (j = 0; zonelist->_zonerefs[j].zone != NULL; j++)
54a6eb5c 4842 ;
bc732f1d 4843 j = build_zonelists_node(NODE_DATA(node), zonelist, j);
dd1a239f
MG
4844 zonelist->_zonerefs[j].zone = NULL;
4845 zonelist->_zonerefs[j].zone_idx = 0;
f0c0b2b8
KH
4846}
4847
523b9458
CL
4848/*
4849 * Build gfp_thisnode zonelists
4850 */
4851static void build_thisnode_zonelists(pg_data_t *pgdat)
4852{
523b9458
CL
4853 int j;
4854 struct zonelist *zonelist;
4855
c9634cf0 4856 zonelist = &pgdat->node_zonelists[ZONELIST_NOFALLBACK];
bc732f1d 4857 j = build_zonelists_node(pgdat, zonelist, 0);
dd1a239f
MG
4858 zonelist->_zonerefs[j].zone = NULL;
4859 zonelist->_zonerefs[j].zone_idx = 0;
523b9458
CL
4860}
4861
f0c0b2b8
KH
4862/*
4863 * Build zonelists ordered by zone and nodes within zones.
4864 * This results in conserving DMA zone[s] until all Normal memory is
4865 * exhausted, but results in overflowing to remote node while memory
4866 * may still exist in local DMA zone.
4867 */
4868static int node_order[MAX_NUMNODES];
4869
4870static void build_zonelists_in_zone_order(pg_data_t *pgdat, int nr_nodes)
4871{
f0c0b2b8
KH
4872 int pos, j, node;
4873 int zone_type; /* needs to be signed */
4874 struct zone *z;
4875 struct zonelist *zonelist;
4876
c9634cf0 4877 zonelist = &pgdat->node_zonelists[ZONELIST_FALLBACK];
54a6eb5c
MG
4878 pos = 0;
4879 for (zone_type = MAX_NR_ZONES - 1; zone_type >= 0; zone_type--) {
4880 for (j = 0; j < nr_nodes; j++) {
4881 node = node_order[j];
4882 z = &NODE_DATA(node)->node_zones[zone_type];
6aa303de 4883 if (managed_zone(z)) {
dd1a239f
MG
4884 zoneref_set_zone(z,
4885 &zonelist->_zonerefs[pos++]);
54a6eb5c 4886 check_highest_zone(zone_type);
f0c0b2b8
KH
4887 }
4888 }
f0c0b2b8 4889 }
dd1a239f
MG
4890 zonelist->_zonerefs[pos].zone = NULL;
4891 zonelist->_zonerefs[pos].zone_idx = 0;
f0c0b2b8
KH
4892}
4893
3193913c
MG
4894#if defined(CONFIG_64BIT)
4895/*
4896 * Devices that require DMA32/DMA are relatively rare and do not justify a
4897 * penalty to every machine in case the specialised case applies. Default
4898 * to Node-ordering on 64-bit NUMA machines
4899 */
4900static int default_zonelist_order(void)
4901{
4902 return ZONELIST_ORDER_NODE;
4903}
4904#else
4905/*
4906 * On 32-bit, the Normal zone needs to be preserved for allocations accessible
4907 * by the kernel. If processes running on node 0 deplete the low memory zone
4908 * then reclaim will occur more frequency increasing stalls and potentially
4909 * be easier to OOM if a large percentage of the zone is under writeback or
4910 * dirty. The problem is significantly worse if CONFIG_HIGHPTE is not set.
4911 * Hence, default to zone ordering on 32-bit.
4912 */
f0c0b2b8
KH
4913static int default_zonelist_order(void)
4914{
f0c0b2b8
KH
4915 return ZONELIST_ORDER_ZONE;
4916}
3193913c 4917#endif /* CONFIG_64BIT */
f0c0b2b8
KH
4918
4919static void set_zonelist_order(void)
4920{
4921 if (user_zonelist_order == ZONELIST_ORDER_DEFAULT)
4922 current_zonelist_order = default_zonelist_order();
4923 else
4924 current_zonelist_order = user_zonelist_order;
4925}
4926
4927static void build_zonelists(pg_data_t *pgdat)
4928{
c00eb15a 4929 int i, node, load;
1da177e4 4930 nodemask_t used_mask;
f0c0b2b8
KH
4931 int local_node, prev_node;
4932 struct zonelist *zonelist;
d00181b9 4933 unsigned int order = current_zonelist_order;
1da177e4
LT
4934
4935 /* initialize zonelists */
523b9458 4936 for (i = 0; i < MAX_ZONELISTS; i++) {
1da177e4 4937 zonelist = pgdat->node_zonelists + i;
dd1a239f
MG
4938 zonelist->_zonerefs[0].zone = NULL;
4939 zonelist->_zonerefs[0].zone_idx = 0;
1da177e4
LT
4940 }
4941
4942 /* NUMA-aware ordering of nodes */
4943 local_node = pgdat->node_id;
62bc62a8 4944 load = nr_online_nodes;
1da177e4
LT
4945 prev_node = local_node;
4946 nodes_clear(used_mask);
f0c0b2b8 4947
f0c0b2b8 4948 memset(node_order, 0, sizeof(node_order));
c00eb15a 4949 i = 0;
f0c0b2b8 4950
1da177e4
LT
4951 while ((node = find_next_best_node(local_node, &used_mask)) >= 0) {
4952 /*
4953 * We don't want to pressure a particular node.
4954 * So adding penalty to the first node in same
4955 * distance group to make it round-robin.
4956 */
957f822a
DR
4957 if (node_distance(local_node, node) !=
4958 node_distance(local_node, prev_node))
f0c0b2b8
KH
4959 node_load[node] = load;
4960
1da177e4
LT
4961 prev_node = node;
4962 load--;
f0c0b2b8
KH
4963 if (order == ZONELIST_ORDER_NODE)
4964 build_zonelists_in_node_order(pgdat, node);
4965 else
c00eb15a 4966 node_order[i++] = node; /* remember order */
f0c0b2b8 4967 }
1da177e4 4968
f0c0b2b8
KH
4969 if (order == ZONELIST_ORDER_ZONE) {
4970 /* calculate node order -- i.e., DMA last! */
c00eb15a 4971 build_zonelists_in_zone_order(pgdat, i);
1da177e4 4972 }
523b9458
CL
4973
4974 build_thisnode_zonelists(pgdat);
1da177e4
LT
4975}
4976
7aac7898
LS
4977#ifdef CONFIG_HAVE_MEMORYLESS_NODES
4978/*
4979 * Return node id of node used for "local" allocations.
4980 * I.e., first node id of first zone in arg node's generic zonelist.
4981 * Used for initializing percpu 'numa_mem', which is used primarily
4982 * for kernel allocations, so use GFP_KERNEL flags to locate zonelist.
4983 */
4984int local_memory_node(int node)
4985{
c33d6c06 4986 struct zoneref *z;
7aac7898 4987
c33d6c06 4988 z = first_zones_zonelist(node_zonelist(node, GFP_KERNEL),
7aac7898 4989 gfp_zone(GFP_KERNEL),
c33d6c06
MG
4990 NULL);
4991 return z->zone->node;
7aac7898
LS
4992}
4993#endif
f0c0b2b8 4994
6423aa81
JK
4995static void setup_min_unmapped_ratio(void);
4996static void setup_min_slab_ratio(void);
1da177e4
LT
4997#else /* CONFIG_NUMA */
4998
f0c0b2b8
KH
4999static void set_zonelist_order(void)
5000{
5001 current_zonelist_order = ZONELIST_ORDER_ZONE;
5002}
5003
5004static void build_zonelists(pg_data_t *pgdat)
1da177e4 5005{
19655d34 5006 int node, local_node;
54a6eb5c
MG
5007 enum zone_type j;
5008 struct zonelist *zonelist;
1da177e4
LT
5009
5010 local_node = pgdat->node_id;
1da177e4 5011
c9634cf0 5012 zonelist = &pgdat->node_zonelists[ZONELIST_FALLBACK];
bc732f1d 5013 j = build_zonelists_node(pgdat, zonelist, 0);
1da177e4 5014
54a6eb5c
MG
5015 /*
5016 * Now we build the zonelist so that it contains the zones
5017 * of all the other nodes.
5018 * We don't want to pressure a particular node, so when
5019 * building the zones for node N, we make sure that the
5020 * zones coming right after the local ones are those from
5021 * node N+1 (modulo N)
5022 */
5023 for (node = local_node + 1; node < MAX_NUMNODES; node++) {
5024 if (!node_online(node))
5025 continue;
bc732f1d 5026 j = build_zonelists_node(NODE_DATA(node), zonelist, j);
1da177e4 5027 }
54a6eb5c
MG
5028 for (node = 0; node < local_node; node++) {
5029 if (!node_online(node))
5030 continue;
bc732f1d 5031 j = build_zonelists_node(NODE_DATA(node), zonelist, j);
54a6eb5c
MG
5032 }
5033
dd1a239f
MG
5034 zonelist->_zonerefs[j].zone = NULL;
5035 zonelist->_zonerefs[j].zone_idx = 0;
1da177e4
LT
5036}
5037
5038#endif /* CONFIG_NUMA */
5039
99dcc3e5
CL
5040/*
5041 * Boot pageset table. One per cpu which is going to be used for all
5042 * zones and all nodes. The parameters will be set in such a way
5043 * that an item put on a list will immediately be handed over to
5044 * the buddy list. This is safe since pageset manipulation is done
5045 * with interrupts disabled.
5046 *
5047 * The boot_pagesets must be kept even after bootup is complete for
5048 * unused processors and/or zones. They do play a role for bootstrapping
5049 * hotplugged processors.
5050 *
5051 * zoneinfo_show() and maybe other functions do
5052 * not check if the processor is online before following the pageset pointer.
5053 * Other parts of the kernel may not check if the zone is available.
5054 */
5055static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch);
5056static DEFINE_PER_CPU(struct per_cpu_pageset, boot_pageset);
1f522509 5057static void setup_zone_pageset(struct zone *zone);
99dcc3e5 5058
4eaf3f64
HL
5059/*
5060 * Global mutex to protect against size modification of zonelists
5061 * as well as to serialize pageset setup for the new populated zone.
5062 */
5063DEFINE_MUTEX(zonelists_mutex);
5064
9b1a4d38 5065/* return values int ....just for stop_machine() */
4ed7e022 5066static int __build_all_zonelists(void *data)
1da177e4 5067{
6811378e 5068 int nid;
99dcc3e5 5069 int cpu;
9adb62a5 5070 pg_data_t *self = data;
9276b1bc 5071
7f9cfb31
BL
5072#ifdef CONFIG_NUMA
5073 memset(node_load, 0, sizeof(node_load));
5074#endif
9adb62a5
JL
5075
5076 if (self && !node_online(self->node_id)) {
5077 build_zonelists(self);
9adb62a5
JL
5078 }
5079
9276b1bc 5080 for_each_online_node(nid) {
7ea1530a
CL
5081 pg_data_t *pgdat = NODE_DATA(nid);
5082
5083 build_zonelists(pgdat);
9276b1bc 5084 }
99dcc3e5
CL
5085
5086 /*
5087 * Initialize the boot_pagesets that are going to be used
5088 * for bootstrapping processors. The real pagesets for
5089 * each zone will be allocated later when the per cpu
5090 * allocator is available.
5091 *
5092 * boot_pagesets are used also for bootstrapping offline
5093 * cpus if the system is already booted because the pagesets
5094 * are needed to initialize allocators on a specific cpu too.
5095 * F.e. the percpu allocator needs the page allocator which
5096 * needs the percpu allocator in order to allocate its pagesets
5097 * (a chicken-egg dilemma).
5098 */
7aac7898 5099 for_each_possible_cpu(cpu) {
99dcc3e5
CL
5100 setup_pageset(&per_cpu(boot_pageset, cpu), 0);
5101
7aac7898
LS
5102#ifdef CONFIG_HAVE_MEMORYLESS_NODES
5103 /*
5104 * We now know the "local memory node" for each node--
5105 * i.e., the node of the first zone in the generic zonelist.
5106 * Set up numa_mem percpu variable for on-line cpus. During
5107 * boot, only the boot cpu should be on-line; we'll init the
5108 * secondary cpus' numa_mem as they come on-line. During
5109 * node/memory hotplug, we'll fixup all on-line cpus.
5110 */
5111 if (cpu_online(cpu))
5112 set_cpu_numa_mem(cpu, local_memory_node(cpu_to_node(cpu)));
5113#endif
5114 }
5115
6811378e
YG
5116 return 0;
5117}
5118
061f67bc
RV
5119static noinline void __init
5120build_all_zonelists_init(void)
5121{
5122 __build_all_zonelists(NULL);
5123 mminit_verify_zonelist();
5124 cpuset_init_current_mems_allowed();
5125}
5126
4eaf3f64
HL
5127/*
5128 * Called with zonelists_mutex held always
5129 * unless system_state == SYSTEM_BOOTING.
061f67bc
RV
5130 *
5131 * __ref due to (1) call of __meminit annotated setup_zone_pageset
5132 * [we're only called with non-NULL zone through __meminit paths] and
5133 * (2) call of __init annotated helper build_all_zonelists_init
5134 * [protected by SYSTEM_BOOTING].
4eaf3f64 5135 */
9adb62a5 5136void __ref build_all_zonelists(pg_data_t *pgdat, struct zone *zone)
6811378e 5137{
f0c0b2b8
KH
5138 set_zonelist_order();
5139
6811378e 5140 if (system_state == SYSTEM_BOOTING) {
061f67bc 5141 build_all_zonelists_init();
6811378e 5142 } else {
e9959f0f 5143#ifdef CONFIG_MEMORY_HOTPLUG
9adb62a5
JL
5144 if (zone)
5145 setup_zone_pageset(zone);
e9959f0f 5146#endif
dd1895e2
CS
5147 /* we have to stop all cpus to guarantee there is no user
5148 of zonelist */
9adb62a5 5149 stop_machine(__build_all_zonelists, pgdat, NULL);
6811378e
YG
5150 /* cpuset refresh routine should be here */
5151 }
bd1e22b8 5152 vm_total_pages = nr_free_pagecache_pages();
9ef9acb0
MG
5153 /*
5154 * Disable grouping by mobility if the number of pages in the
5155 * system is too low to allow the mechanism to work. It would be
5156 * more accurate, but expensive to check per-zone. This check is
5157 * made on memory-hotadd so a system can start with mobility
5158 * disabled and enable it later
5159 */
d9c23400 5160 if (vm_total_pages < (pageblock_nr_pages * MIGRATE_TYPES))
9ef9acb0
MG
5161 page_group_by_mobility_disabled = 1;
5162 else
5163 page_group_by_mobility_disabled = 0;
5164
756a025f
JP
5165 pr_info("Built %i zonelists in %s order, mobility grouping %s. Total pages: %ld\n",
5166 nr_online_nodes,
5167 zonelist_order_name[current_zonelist_order],
5168 page_group_by_mobility_disabled ? "off" : "on",
5169 vm_total_pages);
f0c0b2b8 5170#ifdef CONFIG_NUMA
f88dfff5 5171 pr_info("Policy zone: %s\n", zone_names[policy_zone]);
f0c0b2b8 5172#endif
1da177e4
LT
5173}
5174
1da177e4
LT
5175/*
5176 * Initially all pages are reserved - free ones are freed
5177 * up by free_all_bootmem() once the early boot process is
5178 * done. Non-atomic initialization, single-pass.
5179 */
c09b4240 5180void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
a2f3aa02 5181 unsigned long start_pfn, enum memmap_context context)
1da177e4 5182{
4b94ffdc 5183 struct vmem_altmap *altmap = to_vmem_altmap(__pfn_to_phys(start_pfn));
29751f69 5184 unsigned long end_pfn = start_pfn + size;
4b94ffdc 5185 pg_data_t *pgdat = NODE_DATA(nid);
29751f69 5186 unsigned long pfn;
3a80a7fa 5187 unsigned long nr_initialised = 0;
342332e6
TI
5188#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
5189 struct memblock_region *r = NULL, *tmp;
5190#endif
1da177e4 5191
22b31eec
HD
5192 if (highest_memmap_pfn < end_pfn - 1)
5193 highest_memmap_pfn = end_pfn - 1;
5194
4b94ffdc
DW
5195 /*
5196 * Honor reservation requested by the driver for this ZONE_DEVICE
5197 * memory
5198 */
5199 if (altmap && start_pfn == altmap->base_pfn)
5200 start_pfn += altmap->reserve;
5201
cbe8dd4a 5202 for (pfn = start_pfn; pfn < end_pfn; pfn++) {
a2f3aa02 5203 /*
b72d0ffb
AM
5204 * There can be holes in boot-time mem_map[]s handed to this
5205 * function. They do not exist on hotplugged memory.
a2f3aa02 5206 */
b72d0ffb
AM
5207 if (context != MEMMAP_EARLY)
5208 goto not_early;
5209
b92df1de
PB
5210 if (!early_pfn_valid(pfn)) {
5211#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
5212 /*
5213 * Skip to the pfn preceding the next valid one (or
5214 * end_pfn), such that we hit a valid pfn (or end_pfn)
5215 * on our next iteration of the loop.
5216 */
5217 pfn = memblock_next_valid_pfn(pfn, end_pfn) - 1;
5218#endif
b72d0ffb 5219 continue;
b92df1de 5220 }
b72d0ffb
AM
5221 if (!early_pfn_in_nid(pfn, nid))
5222 continue;
5223 if (!update_defer_init(pgdat, pfn, end_pfn, &nr_initialised))
5224 break;
342332e6
TI
5225
5226#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
b72d0ffb
AM
5227 /*
5228 * Check given memblock attribute by firmware which can affect
5229 * kernel memory layout. If zone==ZONE_MOVABLE but memory is
5230 * mirrored, it's an overlapped memmap init. skip it.
5231 */
5232 if (mirrored_kernelcore && zone == ZONE_MOVABLE) {
5233 if (!r || pfn >= memblock_region_memory_end_pfn(r)) {
5234 for_each_memblock(memory, tmp)
5235 if (pfn < memblock_region_memory_end_pfn(tmp))
5236 break;
5237 r = tmp;
5238 }
5239 if (pfn >= memblock_region_memory_base_pfn(r) &&
5240 memblock_is_mirror(r)) {
5241 /* already initialized as NORMAL */
5242 pfn = memblock_region_memory_end_pfn(r);
5243 continue;
342332e6 5244 }
a2f3aa02 5245 }
b72d0ffb 5246#endif
ac5d2539 5247
b72d0ffb 5248not_early:
ac5d2539
MG
5249 /*
5250 * Mark the block movable so that blocks are reserved for
5251 * movable at startup. This will force kernel allocations
5252 * to reserve their blocks rather than leaking throughout
5253 * the address space during boot when many long-lived
974a786e 5254 * kernel allocations are made.
ac5d2539
MG
5255 *
5256 * bitmap is created for zone's valid pfn range. but memmap
5257 * can be created for invalid pages (for alignment)
5258 * check here not to call set_pageblock_migratetype() against
5259 * pfn out of zone.
5260 */
5261 if (!(pfn & (pageblock_nr_pages - 1))) {
5262 struct page *page = pfn_to_page(pfn);
5263
5264 __init_single_page(page, pfn, zone, nid);
5265 set_pageblock_migratetype(page, MIGRATE_MOVABLE);
5266 } else {
5267 __init_single_pfn(pfn, zone, nid);
5268 }
1da177e4
LT
5269 }
5270}
5271
1e548deb 5272static void __meminit zone_init_free_lists(struct zone *zone)
1da177e4 5273{
7aeb09f9 5274 unsigned int order, t;
b2a0ac88
MG
5275 for_each_migratetype_order(order, t) {
5276 INIT_LIST_HEAD(&zone->free_area[order].free_list[t]);
1da177e4
LT
5277 zone->free_area[order].nr_free = 0;
5278 }
5279}
5280
5281#ifndef __HAVE_ARCH_MEMMAP_INIT
5282#define memmap_init(size, nid, zone, start_pfn) \
a2f3aa02 5283 memmap_init_zone((size), (nid), (zone), (start_pfn), MEMMAP_EARLY)
1da177e4
LT
5284#endif
5285
7cd2b0a3 5286static int zone_batchsize(struct zone *zone)
e7c8d5c9 5287{
3a6be87f 5288#ifdef CONFIG_MMU
e7c8d5c9
CL
5289 int batch;
5290
5291 /*
5292 * The per-cpu-pages pools are set to around 1000th of the
ba56e91c 5293 * size of the zone. But no more than 1/2 of a meg.
e7c8d5c9
CL
5294 *
5295 * OK, so we don't know how big the cache is. So guess.
5296 */
b40da049 5297 batch = zone->managed_pages / 1024;
ba56e91c
SR
5298 if (batch * PAGE_SIZE > 512 * 1024)
5299 batch = (512 * 1024) / PAGE_SIZE;
e7c8d5c9
CL
5300 batch /= 4; /* We effectively *= 4 below */
5301 if (batch < 1)
5302 batch = 1;
5303
5304 /*
0ceaacc9
NP
5305 * Clamp the batch to a 2^n - 1 value. Having a power
5306 * of 2 value was found to be more likely to have
5307 * suboptimal cache aliasing properties in some cases.
e7c8d5c9 5308 *
0ceaacc9
NP
5309 * For example if 2 tasks are alternately allocating
5310 * batches of pages, one task can end up with a lot
5311 * of pages of one half of the possible page colors
5312 * and the other with pages of the other colors.
e7c8d5c9 5313 */
9155203a 5314 batch = rounddown_pow_of_two(batch + batch/2) - 1;
ba56e91c 5315
e7c8d5c9 5316 return batch;
3a6be87f
DH
5317
5318#else
5319 /* The deferral and batching of frees should be suppressed under NOMMU
5320 * conditions.
5321 *
5322 * The problem is that NOMMU needs to be able to allocate large chunks
5323 * of contiguous memory as there's no hardware page translation to
5324 * assemble apparent contiguous memory from discontiguous pages.
5325 *
5326 * Queueing large contiguous runs of pages for batching, however,
5327 * causes the pages to actually be freed in smaller chunks. As there
5328 * can be a significant delay between the individual batches being
5329 * recycled, this leads to the once large chunks of space being
5330 * fragmented and becoming unavailable for high-order allocations.
5331 */
5332 return 0;
5333#endif
e7c8d5c9
CL
5334}
5335
8d7a8fa9
CS
5336/*
5337 * pcp->high and pcp->batch values are related and dependent on one another:
5338 * ->batch must never be higher then ->high.
5339 * The following function updates them in a safe manner without read side
5340 * locking.
5341 *
5342 * Any new users of pcp->batch and pcp->high should ensure they can cope with
5343 * those fields changing asynchronously (acording the the above rule).
5344 *
5345 * mutex_is_locked(&pcp_batch_high_lock) required when calling this function
5346 * outside of boot time (or some other assurance that no concurrent updaters
5347 * exist).
5348 */
5349static void pageset_update(struct per_cpu_pages *pcp, unsigned long high,
5350 unsigned long batch)
5351{
5352 /* start with a fail safe value for batch */
5353 pcp->batch = 1;
5354 smp_wmb();
5355
5356 /* Update high, then batch, in order */
5357 pcp->high = high;
5358 smp_wmb();
5359
5360 pcp->batch = batch;
5361}
5362
3664033c 5363/* a companion to pageset_set_high() */
4008bab7
CS
5364static void pageset_set_batch(struct per_cpu_pageset *p, unsigned long batch)
5365{
8d7a8fa9 5366 pageset_update(&p->pcp, 6 * batch, max(1UL, 1 * batch));
4008bab7
CS
5367}
5368
88c90dbc 5369static void pageset_init(struct per_cpu_pageset *p)
2caaad41
CL
5370{
5371 struct per_cpu_pages *pcp;
5f8dcc21 5372 int migratetype;
2caaad41 5373
1c6fe946
MD
5374 memset(p, 0, sizeof(*p));
5375
3dfa5721 5376 pcp = &p->pcp;
2caaad41 5377 pcp->count = 0;
5f8dcc21
MG
5378 for (migratetype = 0; migratetype < MIGRATE_PCPTYPES; migratetype++)
5379 INIT_LIST_HEAD(&pcp->lists[migratetype]);
2caaad41
CL
5380}
5381
88c90dbc
CS
5382static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch)
5383{
5384 pageset_init(p);
5385 pageset_set_batch(p, batch);
5386}
5387
8ad4b1fb 5388/*
3664033c 5389 * pageset_set_high() sets the high water mark for hot per_cpu_pagelist
8ad4b1fb
RS
5390 * to the value high for the pageset p.
5391 */
3664033c 5392static void pageset_set_high(struct per_cpu_pageset *p,
8ad4b1fb
RS
5393 unsigned long high)
5394{
8d7a8fa9
CS
5395 unsigned long batch = max(1UL, high / 4);
5396 if ((high / 4) > (PAGE_SHIFT * 8))
5397 batch = PAGE_SHIFT * 8;
8ad4b1fb 5398
8d7a8fa9 5399 pageset_update(&p->pcp, high, batch);
8ad4b1fb
RS
5400}
5401
7cd2b0a3
DR
5402static void pageset_set_high_and_batch(struct zone *zone,
5403 struct per_cpu_pageset *pcp)
56cef2b8 5404{
56cef2b8 5405 if (percpu_pagelist_fraction)
3664033c 5406 pageset_set_high(pcp,
56cef2b8
CS
5407 (zone->managed_pages /
5408 percpu_pagelist_fraction));
5409 else
5410 pageset_set_batch(pcp, zone_batchsize(zone));
5411}
5412
169f6c19
CS
5413static void __meminit zone_pageset_init(struct zone *zone, int cpu)
5414{
5415 struct per_cpu_pageset *pcp = per_cpu_ptr(zone->pageset, cpu);
5416
5417 pageset_init(pcp);
5418 pageset_set_high_and_batch(zone, pcp);
5419}
5420
4ed7e022 5421static void __meminit setup_zone_pageset(struct zone *zone)
319774e2
WF
5422{
5423 int cpu;
319774e2 5424 zone->pageset = alloc_percpu(struct per_cpu_pageset);
56cef2b8
CS
5425 for_each_possible_cpu(cpu)
5426 zone_pageset_init(zone, cpu);
319774e2
WF
5427}
5428
2caaad41 5429/*
99dcc3e5
CL
5430 * Allocate per cpu pagesets and initialize them.
5431 * Before this call only boot pagesets were available.
e7c8d5c9 5432 */
99dcc3e5 5433void __init setup_per_cpu_pageset(void)
e7c8d5c9 5434{
b4911ea2 5435 struct pglist_data *pgdat;
99dcc3e5 5436 struct zone *zone;
e7c8d5c9 5437
319774e2
WF
5438 for_each_populated_zone(zone)
5439 setup_zone_pageset(zone);
b4911ea2
MG
5440
5441 for_each_online_pgdat(pgdat)
5442 pgdat->per_cpu_nodestats =
5443 alloc_percpu(struct per_cpu_nodestat);
e7c8d5c9
CL
5444}
5445
c09b4240 5446static __meminit void zone_pcp_init(struct zone *zone)
ed8ece2e 5447{
99dcc3e5
CL
5448 /*
5449 * per cpu subsystem is not up at this point. The following code
5450 * relies on the ability of the linker to provide the
5451 * offset of a (static) per cpu variable into the per cpu area.
5452 */
5453 zone->pageset = &boot_pageset;
ed8ece2e 5454
b38a8725 5455 if (populated_zone(zone))
99dcc3e5
CL
5456 printk(KERN_DEBUG " %s zone: %lu pages, LIFO batch:%u\n",
5457 zone->name, zone->present_pages,
5458 zone_batchsize(zone));
ed8ece2e
DH
5459}
5460
4ed7e022 5461int __meminit init_currently_empty_zone(struct zone *zone,
718127cc 5462 unsigned long zone_start_pfn,
b171e409 5463 unsigned long size)
ed8ece2e
DH
5464{
5465 struct pglist_data *pgdat = zone->zone_pgdat;
9dcb8b68 5466
ed8ece2e
DH
5467 pgdat->nr_zones = zone_idx(zone) + 1;
5468
ed8ece2e
DH
5469 zone->zone_start_pfn = zone_start_pfn;
5470
708614e6
MG
5471 mminit_dprintk(MMINIT_TRACE, "memmap_init",
5472 "Initialising map node %d zone %lu pfns %lu -> %lu\n",
5473 pgdat->node_id,
5474 (unsigned long)zone_idx(zone),
5475 zone_start_pfn, (zone_start_pfn + size));
5476
1e548deb 5477 zone_init_free_lists(zone);
9dcb8b68 5478 zone->initialized = 1;
718127cc
YG
5479
5480 return 0;
ed8ece2e
DH
5481}
5482
0ee332c1 5483#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
c713216d 5484#ifndef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID
8a942fde 5485
c713216d
MG
5486/*
5487 * Required by SPARSEMEM. Given a PFN, return what node the PFN is on.
c713216d 5488 */
8a942fde
MG
5489int __meminit __early_pfn_to_nid(unsigned long pfn,
5490 struct mminit_pfnnid_cache *state)
c713216d 5491{
c13291a5 5492 unsigned long start_pfn, end_pfn;
e76b63f8 5493 int nid;
7c243c71 5494
8a942fde
MG
5495 if (state->last_start <= pfn && pfn < state->last_end)
5496 return state->last_nid;
c713216d 5497
e76b63f8
YL
5498 nid = memblock_search_pfn_nid(pfn, &start_pfn, &end_pfn);
5499 if (nid != -1) {
8a942fde
MG
5500 state->last_start = start_pfn;
5501 state->last_end = end_pfn;
5502 state->last_nid = nid;
e76b63f8
YL
5503 }
5504
5505 return nid;
c713216d
MG
5506}
5507#endif /* CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID */
5508
c713216d 5509/**
6782832e 5510 * free_bootmem_with_active_regions - Call memblock_free_early_nid for each active range
88ca3b94 5511 * @nid: The node to free memory on. If MAX_NUMNODES, all nodes are freed.
6782832e 5512 * @max_low_pfn: The highest PFN that will be passed to memblock_free_early_nid
c713216d 5513 *
7d018176
ZZ
5514 * If an architecture guarantees that all ranges registered contain no holes
5515 * and may be freed, this this function may be used instead of calling
5516 * memblock_free_early_nid() manually.
c713216d 5517 */
c13291a5 5518void __init free_bootmem_with_active_regions(int nid, unsigned long max_low_pfn)
cc289894 5519{
c13291a5
TH
5520 unsigned long start_pfn, end_pfn;
5521 int i, this_nid;
edbe7d23 5522
c13291a5
TH
5523 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, &this_nid) {
5524 start_pfn = min(start_pfn, max_low_pfn);
5525 end_pfn = min(end_pfn, max_low_pfn);
edbe7d23 5526
c13291a5 5527 if (start_pfn < end_pfn)
6782832e
SS
5528 memblock_free_early_nid(PFN_PHYS(start_pfn),
5529 (end_pfn - start_pfn) << PAGE_SHIFT,
5530 this_nid);
edbe7d23 5531 }
edbe7d23 5532}
edbe7d23 5533
c713216d
MG
5534/**
5535 * sparse_memory_present_with_active_regions - Call memory_present for each active range
88ca3b94 5536 * @nid: The node to call memory_present for. If MAX_NUMNODES, all nodes will be used.
c713216d 5537 *
7d018176
ZZ
5538 * If an architecture guarantees that all ranges registered contain no holes and may
5539 * be freed, this function may be used instead of calling memory_present() manually.
c713216d
MG
5540 */
5541void __init sparse_memory_present_with_active_regions(int nid)
5542{
c13291a5
TH
5543 unsigned long start_pfn, end_pfn;
5544 int i, this_nid;
c713216d 5545
c13291a5
TH
5546 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, &this_nid)
5547 memory_present(this_nid, start_pfn, end_pfn);
c713216d
MG
5548}
5549
5550/**
5551 * get_pfn_range_for_nid - Return the start and end page frames for a node
88ca3b94
RD
5552 * @nid: The nid to return the range for. If MAX_NUMNODES, the min and max PFN are returned.
5553 * @start_pfn: Passed by reference. On return, it will have the node start_pfn.
5554 * @end_pfn: Passed by reference. On return, it will have the node end_pfn.
c713216d
MG
5555 *
5556 * It returns the start and end page frame of a node based on information
7d018176 5557 * provided by memblock_set_node(). If called for a node
c713216d 5558 * with no available memory, a warning is printed and the start and end
88ca3b94 5559 * PFNs will be 0.
c713216d 5560 */
a3142c8e 5561void __meminit get_pfn_range_for_nid(unsigned int nid,
c713216d
MG
5562 unsigned long *start_pfn, unsigned long *end_pfn)
5563{
c13291a5 5564 unsigned long this_start_pfn, this_end_pfn;
c713216d 5565 int i;
c13291a5 5566
c713216d
MG
5567 *start_pfn = -1UL;
5568 *end_pfn = 0;
5569
c13291a5
TH
5570 for_each_mem_pfn_range(i, nid, &this_start_pfn, &this_end_pfn, NULL) {
5571 *start_pfn = min(*start_pfn, this_start_pfn);
5572 *end_pfn = max(*end_pfn, this_end_pfn);
c713216d
MG
5573 }
5574
633c0666 5575 if (*start_pfn == -1UL)
c713216d 5576 *start_pfn = 0;
c713216d
MG
5577}
5578
2a1e274a
MG
5579/*
5580 * This finds a zone that can be used for ZONE_MOVABLE pages. The
5581 * assumption is made that zones within a node are ordered in monotonic
5582 * increasing memory addresses so that the "highest" populated zone is used
5583 */
b69a7288 5584static void __init find_usable_zone_for_movable(void)
2a1e274a
MG
5585{
5586 int zone_index;
5587 for (zone_index = MAX_NR_ZONES - 1; zone_index >= 0; zone_index--) {
5588 if (zone_index == ZONE_MOVABLE)
5589 continue;
5590
5591 if (arch_zone_highest_possible_pfn[zone_index] >
5592 arch_zone_lowest_possible_pfn[zone_index])
5593 break;
5594 }
5595
5596 VM_BUG_ON(zone_index == -1);
5597 movable_zone = zone_index;
5598}
5599
5600/*
5601 * The zone ranges provided by the architecture do not include ZONE_MOVABLE
25985edc 5602 * because it is sized independent of architecture. Unlike the other zones,
2a1e274a
MG
5603 * the starting point for ZONE_MOVABLE is not fixed. It may be different
5604 * in each node depending on the size of each node and how evenly kernelcore
5605 * is distributed. This helper function adjusts the zone ranges
5606 * provided by the architecture for a given node by using the end of the
5607 * highest usable zone for ZONE_MOVABLE. This preserves the assumption that
5608 * zones within a node are in order of monotonic increases memory addresses
5609 */
b69a7288 5610static void __meminit adjust_zone_range_for_zone_movable(int nid,
2a1e274a
MG
5611 unsigned long zone_type,
5612 unsigned long node_start_pfn,
5613 unsigned long node_end_pfn,
5614 unsigned long *zone_start_pfn,
5615 unsigned long *zone_end_pfn)
5616{
5617 /* Only adjust if ZONE_MOVABLE is on this node */
5618 if (zone_movable_pfn[nid]) {
5619 /* Size ZONE_MOVABLE */
5620 if (zone_type == ZONE_MOVABLE) {
5621 *zone_start_pfn = zone_movable_pfn[nid];
5622 *zone_end_pfn = min(node_end_pfn,
5623 arch_zone_highest_possible_pfn[movable_zone]);
5624
e506b996
XQ
5625 /* Adjust for ZONE_MOVABLE starting within this range */
5626 } else if (!mirrored_kernelcore &&
5627 *zone_start_pfn < zone_movable_pfn[nid] &&
5628 *zone_end_pfn > zone_movable_pfn[nid]) {
5629 *zone_end_pfn = zone_movable_pfn[nid];
5630
2a1e274a
MG
5631 /* Check if this whole range is within ZONE_MOVABLE */
5632 } else if (*zone_start_pfn >= zone_movable_pfn[nid])
5633 *zone_start_pfn = *zone_end_pfn;
5634 }
5635}
5636
c713216d
MG
5637/*
5638 * Return the number of pages a zone spans in a node, including holes
5639 * present_pages = zone_spanned_pages_in_node() - zone_absent_pages_in_node()
5640 */
6ea6e688 5641static unsigned long __meminit zone_spanned_pages_in_node(int nid,
c713216d 5642 unsigned long zone_type,
7960aedd
ZY
5643 unsigned long node_start_pfn,
5644 unsigned long node_end_pfn,
d91749c1
TI
5645 unsigned long *zone_start_pfn,
5646 unsigned long *zone_end_pfn,
c713216d
MG
5647 unsigned long *ignored)
5648{
b5685e92 5649 /* When hotadd a new node from cpu_up(), the node should be empty */
f9126ab9
XQ
5650 if (!node_start_pfn && !node_end_pfn)
5651 return 0;
5652
7960aedd 5653 /* Get the start and end of the zone */
d91749c1
TI
5654 *zone_start_pfn = arch_zone_lowest_possible_pfn[zone_type];
5655 *zone_end_pfn = arch_zone_highest_possible_pfn[zone_type];
2a1e274a
MG
5656 adjust_zone_range_for_zone_movable(nid, zone_type,
5657 node_start_pfn, node_end_pfn,
d91749c1 5658 zone_start_pfn, zone_end_pfn);
c713216d
MG
5659
5660 /* Check that this node has pages within the zone's required range */
d91749c1 5661 if (*zone_end_pfn < node_start_pfn || *zone_start_pfn > node_end_pfn)
c713216d
MG
5662 return 0;
5663
5664 /* Move the zone boundaries inside the node if necessary */
d91749c1
TI
5665 *zone_end_pfn = min(*zone_end_pfn, node_end_pfn);
5666 *zone_start_pfn = max(*zone_start_pfn, node_start_pfn);
c713216d
MG
5667
5668 /* Return the spanned pages */
d91749c1 5669 return *zone_end_pfn - *zone_start_pfn;
c713216d
MG
5670}
5671
5672/*
5673 * Return the number of holes in a range on a node. If nid is MAX_NUMNODES,
88ca3b94 5674 * then all holes in the requested range will be accounted for.
c713216d 5675 */
32996250 5676unsigned long __meminit __absent_pages_in_range(int nid,
c713216d
MG
5677 unsigned long range_start_pfn,
5678 unsigned long range_end_pfn)
5679{
96e907d1
TH
5680 unsigned long nr_absent = range_end_pfn - range_start_pfn;
5681 unsigned long start_pfn, end_pfn;
5682 int i;
c713216d 5683
96e907d1
TH
5684 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
5685 start_pfn = clamp(start_pfn, range_start_pfn, range_end_pfn);
5686 end_pfn = clamp(end_pfn, range_start_pfn, range_end_pfn);
5687 nr_absent -= end_pfn - start_pfn;
c713216d 5688 }
96e907d1 5689 return nr_absent;
c713216d
MG
5690}
5691
5692/**
5693 * absent_pages_in_range - Return number of page frames in holes within a range
5694 * @start_pfn: The start PFN to start searching for holes
5695 * @end_pfn: The end PFN to stop searching for holes
5696 *
88ca3b94 5697 * It returns the number of pages frames in memory holes within a range.
c713216d
MG
5698 */
5699unsigned long __init absent_pages_in_range(unsigned long start_pfn,
5700 unsigned long end_pfn)
5701{
5702 return __absent_pages_in_range(MAX_NUMNODES, start_pfn, end_pfn);
5703}
5704
5705/* Return the number of page frames in holes in a zone on a node */
6ea6e688 5706static unsigned long __meminit zone_absent_pages_in_node(int nid,
c713216d 5707 unsigned long zone_type,
7960aedd
ZY
5708 unsigned long node_start_pfn,
5709 unsigned long node_end_pfn,
c713216d
MG
5710 unsigned long *ignored)
5711{
96e907d1
TH
5712 unsigned long zone_low = arch_zone_lowest_possible_pfn[zone_type];
5713 unsigned long zone_high = arch_zone_highest_possible_pfn[zone_type];
9c7cd687 5714 unsigned long zone_start_pfn, zone_end_pfn;
342332e6 5715 unsigned long nr_absent;
9c7cd687 5716
b5685e92 5717 /* When hotadd a new node from cpu_up(), the node should be empty */
f9126ab9
XQ
5718 if (!node_start_pfn && !node_end_pfn)
5719 return 0;
5720
96e907d1
TH
5721 zone_start_pfn = clamp(node_start_pfn, zone_low, zone_high);
5722 zone_end_pfn = clamp(node_end_pfn, zone_low, zone_high);
9c7cd687 5723
2a1e274a
MG
5724 adjust_zone_range_for_zone_movable(nid, zone_type,
5725 node_start_pfn, node_end_pfn,
5726 &zone_start_pfn, &zone_end_pfn);
342332e6
TI
5727 nr_absent = __absent_pages_in_range(nid, zone_start_pfn, zone_end_pfn);
5728
5729 /*
5730 * ZONE_MOVABLE handling.
5731 * Treat pages to be ZONE_MOVABLE in ZONE_NORMAL as absent pages
5732 * and vice versa.
5733 */
e506b996
XQ
5734 if (mirrored_kernelcore && zone_movable_pfn[nid]) {
5735 unsigned long start_pfn, end_pfn;
5736 struct memblock_region *r;
5737
5738 for_each_memblock(memory, r) {
5739 start_pfn = clamp(memblock_region_memory_base_pfn(r),
5740 zone_start_pfn, zone_end_pfn);
5741 end_pfn = clamp(memblock_region_memory_end_pfn(r),
5742 zone_start_pfn, zone_end_pfn);
5743
5744 if (zone_type == ZONE_MOVABLE &&
5745 memblock_is_mirror(r))
5746 nr_absent += end_pfn - start_pfn;
5747
5748 if (zone_type == ZONE_NORMAL &&
5749 !memblock_is_mirror(r))
5750 nr_absent += end_pfn - start_pfn;
342332e6
TI
5751 }
5752 }
5753
5754 return nr_absent;
c713216d 5755}
0e0b864e 5756
0ee332c1 5757#else /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
6ea6e688 5758static inline unsigned long __meminit zone_spanned_pages_in_node(int nid,
c713216d 5759 unsigned long zone_type,
7960aedd
ZY
5760 unsigned long node_start_pfn,
5761 unsigned long node_end_pfn,
d91749c1
TI
5762 unsigned long *zone_start_pfn,
5763 unsigned long *zone_end_pfn,
c713216d
MG
5764 unsigned long *zones_size)
5765{
d91749c1
TI
5766 unsigned int zone;
5767
5768 *zone_start_pfn = node_start_pfn;
5769 for (zone = 0; zone < zone_type; zone++)
5770 *zone_start_pfn += zones_size[zone];
5771
5772 *zone_end_pfn = *zone_start_pfn + zones_size[zone_type];
5773
c713216d
MG
5774 return zones_size[zone_type];
5775}
5776
6ea6e688 5777static inline unsigned long __meminit zone_absent_pages_in_node(int nid,
c713216d 5778 unsigned long zone_type,
7960aedd
ZY
5779 unsigned long node_start_pfn,
5780 unsigned long node_end_pfn,
c713216d
MG
5781 unsigned long *zholes_size)
5782{
5783 if (!zholes_size)
5784 return 0;
5785
5786 return zholes_size[zone_type];
5787}
20e6926d 5788
0ee332c1 5789#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
c713216d 5790
a3142c8e 5791static void __meminit calculate_node_totalpages(struct pglist_data *pgdat,
7960aedd
ZY
5792 unsigned long node_start_pfn,
5793 unsigned long node_end_pfn,
5794 unsigned long *zones_size,
5795 unsigned long *zholes_size)
c713216d 5796{
febd5949 5797 unsigned long realtotalpages = 0, totalpages = 0;
c713216d
MG
5798 enum zone_type i;
5799
febd5949
GZ
5800 for (i = 0; i < MAX_NR_ZONES; i++) {
5801 struct zone *zone = pgdat->node_zones + i;
d91749c1 5802 unsigned long zone_start_pfn, zone_end_pfn;
febd5949 5803 unsigned long size, real_size;
c713216d 5804
febd5949
GZ
5805 size = zone_spanned_pages_in_node(pgdat->node_id, i,
5806 node_start_pfn,
5807 node_end_pfn,
d91749c1
TI
5808 &zone_start_pfn,
5809 &zone_end_pfn,
febd5949
GZ
5810 zones_size);
5811 real_size = size - zone_absent_pages_in_node(pgdat->node_id, i,
7960aedd
ZY
5812 node_start_pfn, node_end_pfn,
5813 zholes_size);
d91749c1
TI
5814 if (size)
5815 zone->zone_start_pfn = zone_start_pfn;
5816 else
5817 zone->zone_start_pfn = 0;
febd5949
GZ
5818 zone->spanned_pages = size;
5819 zone->present_pages = real_size;
5820
5821 totalpages += size;
5822 realtotalpages += real_size;
5823 }
5824
5825 pgdat->node_spanned_pages = totalpages;
c713216d
MG
5826 pgdat->node_present_pages = realtotalpages;
5827 printk(KERN_DEBUG "On node %d totalpages: %lu\n", pgdat->node_id,
5828 realtotalpages);
5829}
5830
835c134e
MG
5831#ifndef CONFIG_SPARSEMEM
5832/*
5833 * Calculate the size of the zone->blockflags rounded to an unsigned long
d9c23400
MG
5834 * Start by making sure zonesize is a multiple of pageblock_order by rounding
5835 * up. Then use 1 NR_PAGEBLOCK_BITS worth of bits per pageblock, finally
835c134e
MG
5836 * round what is now in bits to nearest long in bits, then return it in
5837 * bytes.
5838 */
7c45512d 5839static unsigned long __init usemap_size(unsigned long zone_start_pfn, unsigned long zonesize)
835c134e
MG
5840{
5841 unsigned long usemapsize;
5842
7c45512d 5843 zonesize += zone_start_pfn & (pageblock_nr_pages-1);
d9c23400
MG
5844 usemapsize = roundup(zonesize, pageblock_nr_pages);
5845 usemapsize = usemapsize >> pageblock_order;
835c134e
MG
5846 usemapsize *= NR_PAGEBLOCK_BITS;
5847 usemapsize = roundup(usemapsize, 8 * sizeof(unsigned long));
5848
5849 return usemapsize / 8;
5850}
5851
5852static void __init setup_usemap(struct pglist_data *pgdat,
7c45512d
LT
5853 struct zone *zone,
5854 unsigned long zone_start_pfn,
5855 unsigned long zonesize)
835c134e 5856{
7c45512d 5857 unsigned long usemapsize = usemap_size(zone_start_pfn, zonesize);
835c134e 5858 zone->pageblock_flags = NULL;
58a01a45 5859 if (usemapsize)
6782832e
SS
5860 zone->pageblock_flags =
5861 memblock_virt_alloc_node_nopanic(usemapsize,
5862 pgdat->node_id);
835c134e
MG
5863}
5864#else
7c45512d
LT
5865static inline void setup_usemap(struct pglist_data *pgdat, struct zone *zone,
5866 unsigned long zone_start_pfn, unsigned long zonesize) {}
835c134e
MG
5867#endif /* CONFIG_SPARSEMEM */
5868
d9c23400 5869#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
ba72cb8c 5870
d9c23400 5871/* Initialise the number of pages represented by NR_PAGEBLOCK_BITS */
15ca220e 5872void __paginginit set_pageblock_order(void)
d9c23400 5873{
955c1cd7
AM
5874 unsigned int order;
5875
d9c23400
MG
5876 /* Check that pageblock_nr_pages has not already been setup */
5877 if (pageblock_order)
5878 return;
5879
955c1cd7
AM
5880 if (HPAGE_SHIFT > PAGE_SHIFT)
5881 order = HUGETLB_PAGE_ORDER;
5882 else
5883 order = MAX_ORDER - 1;
5884
d9c23400
MG
5885 /*
5886 * Assume the largest contiguous order of interest is a huge page.
955c1cd7
AM
5887 * This value may be variable depending on boot parameters on IA64 and
5888 * powerpc.
d9c23400
MG
5889 */
5890 pageblock_order = order;
5891}
5892#else /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
5893
ba72cb8c
MG
5894/*
5895 * When CONFIG_HUGETLB_PAGE_SIZE_VARIABLE is not set, set_pageblock_order()
955c1cd7
AM
5896 * is unused as pageblock_order is set at compile-time. See
5897 * include/linux/pageblock-flags.h for the values of pageblock_order based on
5898 * the kernel config
ba72cb8c 5899 */
15ca220e 5900void __paginginit set_pageblock_order(void)
ba72cb8c 5901{
ba72cb8c 5902}
d9c23400
MG
5903
5904#endif /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
5905
01cefaef
JL
5906static unsigned long __paginginit calc_memmap_size(unsigned long spanned_pages,
5907 unsigned long present_pages)
5908{
5909 unsigned long pages = spanned_pages;
5910
5911 /*
5912 * Provide a more accurate estimation if there are holes within
5913 * the zone and SPARSEMEM is in use. If there are holes within the
5914 * zone, each populated memory region may cost us one or two extra
5915 * memmap pages due to alignment because memmap pages for each
89d790ab 5916 * populated regions may not be naturally aligned on page boundary.
01cefaef
JL
5917 * So the (present_pages >> 4) heuristic is a tradeoff for that.
5918 */
5919 if (spanned_pages > present_pages + (present_pages >> 4) &&
5920 IS_ENABLED(CONFIG_SPARSEMEM))
5921 pages = present_pages;
5922
5923 return PAGE_ALIGN(pages * sizeof(struct page)) >> PAGE_SHIFT;
5924}
5925
1da177e4
LT
5926/*
5927 * Set up the zone data structures:
5928 * - mark all pages reserved
5929 * - mark all memory queues empty
5930 * - clear the memory bitmaps
6527af5d
MK
5931 *
5932 * NOTE: pgdat should get zeroed by caller.
1da177e4 5933 */
7f3eb55b 5934static void __paginginit free_area_init_core(struct pglist_data *pgdat)
1da177e4 5935{
2f1b6248 5936 enum zone_type j;
ed8ece2e 5937 int nid = pgdat->node_id;
718127cc 5938 int ret;
1da177e4 5939
208d54e5 5940 pgdat_resize_init(pgdat);
8177a420
AA
5941#ifdef CONFIG_NUMA_BALANCING
5942 spin_lock_init(&pgdat->numabalancing_migrate_lock);
5943 pgdat->numabalancing_migrate_nr_pages = 0;
5944 pgdat->numabalancing_migrate_next_window = jiffies;
a3d0a918
KS
5945#endif
5946#ifdef CONFIG_TRANSPARENT_HUGEPAGE
5947 spin_lock_init(&pgdat->split_queue_lock);
5948 INIT_LIST_HEAD(&pgdat->split_queue);
5949 pgdat->split_queue_len = 0;
8177a420 5950#endif
1da177e4 5951 init_waitqueue_head(&pgdat->kswapd_wait);
5515061d 5952 init_waitqueue_head(&pgdat->pfmemalloc_wait);
698b1b30
VB
5953#ifdef CONFIG_COMPACTION
5954 init_waitqueue_head(&pgdat->kcompactd_wait);
5955#endif
eefa864b 5956 pgdat_page_ext_init(pgdat);
a52633d8 5957 spin_lock_init(&pgdat->lru_lock);
a9dd0a83 5958 lruvec_init(node_lruvec(pgdat));
5f63b720 5959
1da177e4
LT
5960 for (j = 0; j < MAX_NR_ZONES; j++) {
5961 struct zone *zone = pgdat->node_zones + j;
9feedc9d 5962 unsigned long size, realsize, freesize, memmap_pages;
d91749c1 5963 unsigned long zone_start_pfn = zone->zone_start_pfn;
1da177e4 5964
febd5949
GZ
5965 size = zone->spanned_pages;
5966 realsize = freesize = zone->present_pages;
1da177e4 5967
0e0b864e 5968 /*
9feedc9d 5969 * Adjust freesize so that it accounts for how much memory
0e0b864e
MG
5970 * is used by this zone for memmap. This affects the watermark
5971 * and per-cpu initialisations
5972 */
01cefaef 5973 memmap_pages = calc_memmap_size(size, realsize);
ba914f48
ZH
5974 if (!is_highmem_idx(j)) {
5975 if (freesize >= memmap_pages) {
5976 freesize -= memmap_pages;
5977 if (memmap_pages)
5978 printk(KERN_DEBUG
5979 " %s zone: %lu pages used for memmap\n",
5980 zone_names[j], memmap_pages);
5981 } else
1170532b 5982 pr_warn(" %s zone: %lu pages exceeds freesize %lu\n",
ba914f48
ZH
5983 zone_names[j], memmap_pages, freesize);
5984 }
0e0b864e 5985
6267276f 5986 /* Account for reserved pages */
9feedc9d
JL
5987 if (j == 0 && freesize > dma_reserve) {
5988 freesize -= dma_reserve;
d903ef9f 5989 printk(KERN_DEBUG " %s zone: %lu pages reserved\n",
6267276f 5990 zone_names[0], dma_reserve);
0e0b864e
MG
5991 }
5992
98d2b0eb 5993 if (!is_highmem_idx(j))
9feedc9d 5994 nr_kernel_pages += freesize;
01cefaef
JL
5995 /* Charge for highmem memmap if there are enough kernel pages */
5996 else if (nr_kernel_pages > memmap_pages * 2)
5997 nr_kernel_pages -= memmap_pages;
9feedc9d 5998 nr_all_pages += freesize;
1da177e4 5999
9feedc9d
JL
6000 /*
6001 * Set an approximate value for lowmem here, it will be adjusted
6002 * when the bootmem allocator frees pages into the buddy system.
6003 * And all highmem pages will be managed by the buddy system.
6004 */
6005 zone->managed_pages = is_highmem_idx(j) ? realsize : freesize;
9614634f 6006#ifdef CONFIG_NUMA
d5f541ed 6007 zone->node = nid;
9614634f 6008#endif
1da177e4 6009 zone->name = zone_names[j];
a52633d8 6010 zone->zone_pgdat = pgdat;
1da177e4 6011 spin_lock_init(&zone->lock);
bdc8cb98 6012 zone_seqlock_init(zone);
ed8ece2e 6013 zone_pcp_init(zone);
81c0a2bb 6014
1da177e4
LT
6015 if (!size)
6016 continue;
6017
955c1cd7 6018 set_pageblock_order();
7c45512d 6019 setup_usemap(pgdat, zone, zone_start_pfn, size);
b171e409 6020 ret = init_currently_empty_zone(zone, zone_start_pfn, size);
718127cc 6021 BUG_ON(ret);
76cdd58e 6022 memmap_init(size, nid, j, zone_start_pfn);
1da177e4
LT
6023 }
6024}
6025
bd721ea7 6026static void __ref alloc_node_mem_map(struct pglist_data *pgdat)
1da177e4 6027{
b0aeba74 6028 unsigned long __maybe_unused start = 0;
a1c34a3b
LA
6029 unsigned long __maybe_unused offset = 0;
6030
1da177e4
LT
6031 /* Skip empty nodes */
6032 if (!pgdat->node_spanned_pages)
6033 return;
6034
d41dee36 6035#ifdef CONFIG_FLAT_NODE_MEM_MAP
b0aeba74
TL
6036 start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1);
6037 offset = pgdat->node_start_pfn - start;
1da177e4
LT
6038 /* ia64 gets its own node_mem_map, before this, without bootmem */
6039 if (!pgdat->node_mem_map) {
b0aeba74 6040 unsigned long size, end;
d41dee36
AW
6041 struct page *map;
6042
e984bb43
BP
6043 /*
6044 * The zone's endpoints aren't required to be MAX_ORDER
6045 * aligned but the node_mem_map endpoints must be in order
6046 * for the buddy allocator to function correctly.
6047 */
108bcc96 6048 end = pgdat_end_pfn(pgdat);
e984bb43
BP
6049 end = ALIGN(end, MAX_ORDER_NR_PAGES);
6050 size = (end - start) * sizeof(struct page);
6f167ec7
DH
6051 map = alloc_remap(pgdat->node_id, size);
6052 if (!map)
6782832e
SS
6053 map = memblock_virt_alloc_node_nopanic(size,
6054 pgdat->node_id);
a1c34a3b 6055 pgdat->node_mem_map = map + offset;
1da177e4 6056 }
12d810c1 6057#ifndef CONFIG_NEED_MULTIPLE_NODES
1da177e4
LT
6058 /*
6059 * With no DISCONTIG, the global mem_map is just set as node 0's
6060 */
c713216d 6061 if (pgdat == NODE_DATA(0)) {
1da177e4 6062 mem_map = NODE_DATA(0)->node_mem_map;
a1c34a3b 6063#if defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP) || defined(CONFIG_FLATMEM)
c713216d 6064 if (page_to_pfn(mem_map) != pgdat->node_start_pfn)
a1c34a3b 6065 mem_map -= offset;
0ee332c1 6066#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
c713216d 6067 }
1da177e4 6068#endif
d41dee36 6069#endif /* CONFIG_FLAT_NODE_MEM_MAP */
1da177e4
LT
6070}
6071
9109fb7b
JW
6072void __paginginit free_area_init_node(int nid, unsigned long *zones_size,
6073 unsigned long node_start_pfn, unsigned long *zholes_size)
1da177e4 6074{
9109fb7b 6075 pg_data_t *pgdat = NODE_DATA(nid);
7960aedd
ZY
6076 unsigned long start_pfn = 0;
6077 unsigned long end_pfn = 0;
9109fb7b 6078
88fdf75d 6079 /* pg_data_t should be reset to zero when it's allocated */
38087d9b 6080 WARN_ON(pgdat->nr_zones || pgdat->kswapd_classzone_idx);
88fdf75d 6081
3a80a7fa 6082 reset_deferred_meminit(pgdat);
1da177e4
LT
6083 pgdat->node_id = nid;
6084 pgdat->node_start_pfn = node_start_pfn;
75ef7184 6085 pgdat->per_cpu_nodestats = NULL;
7960aedd
ZY
6086#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
6087 get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
8d29e18a 6088 pr_info("Initmem setup node %d [mem %#018Lx-%#018Lx]\n", nid,
4ada0c5a
ZL
6089 (u64)start_pfn << PAGE_SHIFT,
6090 end_pfn ? ((u64)end_pfn << PAGE_SHIFT) - 1 : 0);
d91749c1
TI
6091#else
6092 start_pfn = node_start_pfn;
7960aedd
ZY
6093#endif
6094 calculate_node_totalpages(pgdat, start_pfn, end_pfn,
6095 zones_size, zholes_size);
1da177e4
LT
6096
6097 alloc_node_mem_map(pgdat);
e8c27ac9
YL
6098#ifdef CONFIG_FLAT_NODE_MEM_MAP
6099 printk(KERN_DEBUG "free_area_init_node: node %d, pgdat %08lx, node_mem_map %08lx\n",
6100 nid, (unsigned long)pgdat,
6101 (unsigned long)pgdat->node_mem_map);
6102#endif
1da177e4 6103
7f3eb55b 6104 free_area_init_core(pgdat);
1da177e4
LT
6105}
6106
0ee332c1 6107#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
418508c1
MS
6108
6109#if MAX_NUMNODES > 1
6110/*
6111 * Figure out the number of possible node ids.
6112 */
f9872caf 6113void __init setup_nr_node_ids(void)
418508c1 6114{
904a9553 6115 unsigned int highest;
418508c1 6116
904a9553 6117 highest = find_last_bit(node_possible_map.bits, MAX_NUMNODES);
418508c1
MS
6118 nr_node_ids = highest + 1;
6119}
418508c1
MS
6120#endif
6121
1e01979c
TH
6122/**
6123 * node_map_pfn_alignment - determine the maximum internode alignment
6124 *
6125 * This function should be called after node map is populated and sorted.
6126 * It calculates the maximum power of two alignment which can distinguish
6127 * all the nodes.
6128 *
6129 * For example, if all nodes are 1GiB and aligned to 1GiB, the return value
6130 * would indicate 1GiB alignment with (1 << (30 - PAGE_SHIFT)). If the
6131 * nodes are shifted by 256MiB, 256MiB. Note that if only the last node is
6132 * shifted, 1GiB is enough and this function will indicate so.
6133 *
6134 * This is used to test whether pfn -> nid mapping of the chosen memory
6135 * model has fine enough granularity to avoid incorrect mapping for the
6136 * populated node map.
6137 *
6138 * Returns the determined alignment in pfn's. 0 if there is no alignment
6139 * requirement (single node).
6140 */
6141unsigned long __init node_map_pfn_alignment(void)
6142{
6143 unsigned long accl_mask = 0, last_end = 0;
c13291a5 6144 unsigned long start, end, mask;
1e01979c 6145 int last_nid = -1;
c13291a5 6146 int i, nid;
1e01979c 6147
c13291a5 6148 for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, &nid) {
1e01979c
TH
6149 if (!start || last_nid < 0 || last_nid == nid) {
6150 last_nid = nid;
6151 last_end = end;
6152 continue;
6153 }
6154
6155 /*
6156 * Start with a mask granular enough to pin-point to the
6157 * start pfn and tick off bits one-by-one until it becomes
6158 * too coarse to separate the current node from the last.
6159 */
6160 mask = ~((1 << __ffs(start)) - 1);
6161 while (mask && last_end <= (start & (mask << 1)))
6162 mask <<= 1;
6163
6164 /* accumulate all internode masks */
6165 accl_mask |= mask;
6166 }
6167
6168 /* convert mask to number of pages */
6169 return ~accl_mask + 1;
6170}
6171
a6af2bc3 6172/* Find the lowest pfn for a node */
b69a7288 6173static unsigned long __init find_min_pfn_for_node(int nid)
c713216d 6174{
a6af2bc3 6175 unsigned long min_pfn = ULONG_MAX;
c13291a5
TH
6176 unsigned long start_pfn;
6177 int i;
1abbfb41 6178
c13291a5
TH
6179 for_each_mem_pfn_range(i, nid, &start_pfn, NULL, NULL)
6180 min_pfn = min(min_pfn, start_pfn);
c713216d 6181
a6af2bc3 6182 if (min_pfn == ULONG_MAX) {
1170532b 6183 pr_warn("Could not find start_pfn for node %d\n", nid);
a6af2bc3
MG
6184 return 0;
6185 }
6186
6187 return min_pfn;
c713216d
MG
6188}
6189
6190/**
6191 * find_min_pfn_with_active_regions - Find the minimum PFN registered
6192 *
6193 * It returns the minimum PFN based on information provided via
7d018176 6194 * memblock_set_node().
c713216d
MG
6195 */
6196unsigned long __init find_min_pfn_with_active_regions(void)
6197{
6198 return find_min_pfn_for_node(MAX_NUMNODES);
6199}
6200
37b07e41
LS
6201/*
6202 * early_calculate_totalpages()
6203 * Sum pages in active regions for movable zone.
4b0ef1fe 6204 * Populate N_MEMORY for calculating usable_nodes.
37b07e41 6205 */
484f51f8 6206static unsigned long __init early_calculate_totalpages(void)
7e63efef 6207{
7e63efef 6208 unsigned long totalpages = 0;
c13291a5
TH
6209 unsigned long start_pfn, end_pfn;
6210 int i, nid;
6211
6212 for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
6213 unsigned long pages = end_pfn - start_pfn;
7e63efef 6214
37b07e41
LS
6215 totalpages += pages;
6216 if (pages)
4b0ef1fe 6217 node_set_state(nid, N_MEMORY);
37b07e41 6218 }
b8af2941 6219 return totalpages;
7e63efef
MG
6220}
6221
2a1e274a
MG
6222/*
6223 * Find the PFN the Movable zone begins in each node. Kernel memory
6224 * is spread evenly between nodes as long as the nodes have enough
6225 * memory. When they don't, some nodes will have more kernelcore than
6226 * others
6227 */
b224ef85 6228static void __init find_zone_movable_pfns_for_nodes(void)
2a1e274a
MG
6229{
6230 int i, nid;
6231 unsigned long usable_startpfn;
6232 unsigned long kernelcore_node, kernelcore_remaining;
66918dcd 6233 /* save the state before borrow the nodemask */
4b0ef1fe 6234 nodemask_t saved_node_state = node_states[N_MEMORY];
37b07e41 6235 unsigned long totalpages = early_calculate_totalpages();
4b0ef1fe 6236 int usable_nodes = nodes_weight(node_states[N_MEMORY]);
136199f0 6237 struct memblock_region *r;
b2f3eebe
TC
6238
6239 /* Need to find movable_zone earlier when movable_node is specified. */
6240 find_usable_zone_for_movable();
6241
6242 /*
6243 * If movable_node is specified, ignore kernelcore and movablecore
6244 * options.
6245 */
6246 if (movable_node_is_enabled()) {
136199f0
EM
6247 for_each_memblock(memory, r) {
6248 if (!memblock_is_hotpluggable(r))
b2f3eebe
TC
6249 continue;
6250
136199f0 6251 nid = r->nid;
b2f3eebe 6252
136199f0 6253 usable_startpfn = PFN_DOWN(r->base);
b2f3eebe
TC
6254 zone_movable_pfn[nid] = zone_movable_pfn[nid] ?
6255 min(usable_startpfn, zone_movable_pfn[nid]) :
6256 usable_startpfn;
6257 }
6258
6259 goto out2;
6260 }
2a1e274a 6261
342332e6
TI
6262 /*
6263 * If kernelcore=mirror is specified, ignore movablecore option
6264 */
6265 if (mirrored_kernelcore) {
6266 bool mem_below_4gb_not_mirrored = false;
6267
6268 for_each_memblock(memory, r) {
6269 if (memblock_is_mirror(r))
6270 continue;
6271
6272 nid = r->nid;
6273
6274 usable_startpfn = memblock_region_memory_base_pfn(r);
6275
6276 if (usable_startpfn < 0x100000) {
6277 mem_below_4gb_not_mirrored = true;
6278 continue;
6279 }
6280
6281 zone_movable_pfn[nid] = zone_movable_pfn[nid] ?
6282 min(usable_startpfn, zone_movable_pfn[nid]) :
6283 usable_startpfn;
6284 }
6285
6286 if (mem_below_4gb_not_mirrored)
6287 pr_warn("This configuration results in unmirrored kernel memory.");
6288
6289 goto out2;
6290 }
6291
7e63efef 6292 /*
b2f3eebe 6293 * If movablecore=nn[KMG] was specified, calculate what size of
7e63efef
MG
6294 * kernelcore that corresponds so that memory usable for
6295 * any allocation type is evenly spread. If both kernelcore
6296 * and movablecore are specified, then the value of kernelcore
6297 * will be used for required_kernelcore if it's greater than
6298 * what movablecore would have allowed.
6299 */
6300 if (required_movablecore) {
7e63efef
MG
6301 unsigned long corepages;
6302
6303 /*
6304 * Round-up so that ZONE_MOVABLE is at least as large as what
6305 * was requested by the user
6306 */
6307 required_movablecore =
6308 roundup(required_movablecore, MAX_ORDER_NR_PAGES);
9fd745d4 6309 required_movablecore = min(totalpages, required_movablecore);
7e63efef
MG
6310 corepages = totalpages - required_movablecore;
6311
6312 required_kernelcore = max(required_kernelcore, corepages);
6313 }
6314
bde304bd
XQ
6315 /*
6316 * If kernelcore was not specified or kernelcore size is larger
6317 * than totalpages, there is no ZONE_MOVABLE.
6318 */
6319 if (!required_kernelcore || required_kernelcore >= totalpages)
66918dcd 6320 goto out;
2a1e274a
MG
6321
6322 /* usable_startpfn is the lowest possible pfn ZONE_MOVABLE can be at */
2a1e274a
MG
6323 usable_startpfn = arch_zone_lowest_possible_pfn[movable_zone];
6324
6325restart:
6326 /* Spread kernelcore memory as evenly as possible throughout nodes */
6327 kernelcore_node = required_kernelcore / usable_nodes;
4b0ef1fe 6328 for_each_node_state(nid, N_MEMORY) {
c13291a5
TH
6329 unsigned long start_pfn, end_pfn;
6330
2a1e274a
MG
6331 /*
6332 * Recalculate kernelcore_node if the division per node
6333 * now exceeds what is necessary to satisfy the requested
6334 * amount of memory for the kernel
6335 */
6336 if (required_kernelcore < kernelcore_node)
6337 kernelcore_node = required_kernelcore / usable_nodes;
6338
6339 /*
6340 * As the map is walked, we track how much memory is usable
6341 * by the kernel using kernelcore_remaining. When it is
6342 * 0, the rest of the node is usable by ZONE_MOVABLE
6343 */
6344 kernelcore_remaining = kernelcore_node;
6345
6346 /* Go through each range of PFNs within this node */
c13291a5 6347 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
2a1e274a
MG
6348 unsigned long size_pages;
6349
c13291a5 6350 start_pfn = max(start_pfn, zone_movable_pfn[nid]);
2a1e274a
MG
6351 if (start_pfn >= end_pfn)
6352 continue;
6353
6354 /* Account for what is only usable for kernelcore */
6355 if (start_pfn < usable_startpfn) {
6356 unsigned long kernel_pages;
6357 kernel_pages = min(end_pfn, usable_startpfn)
6358 - start_pfn;
6359
6360 kernelcore_remaining -= min(kernel_pages,
6361 kernelcore_remaining);
6362 required_kernelcore -= min(kernel_pages,
6363 required_kernelcore);
6364
6365 /* Continue if range is now fully accounted */
6366 if (end_pfn <= usable_startpfn) {
6367
6368 /*
6369 * Push zone_movable_pfn to the end so
6370 * that if we have to rebalance
6371 * kernelcore across nodes, we will
6372 * not double account here
6373 */
6374 zone_movable_pfn[nid] = end_pfn;
6375 continue;
6376 }
6377 start_pfn = usable_startpfn;
6378 }
6379
6380 /*
6381 * The usable PFN range for ZONE_MOVABLE is from
6382 * start_pfn->end_pfn. Calculate size_pages as the
6383 * number of pages used as kernelcore
6384 */
6385 size_pages = end_pfn - start_pfn;
6386 if (size_pages > kernelcore_remaining)
6387 size_pages = kernelcore_remaining;
6388 zone_movable_pfn[nid] = start_pfn + size_pages;
6389
6390 /*
6391 * Some kernelcore has been met, update counts and
6392 * break if the kernelcore for this node has been
b8af2941 6393 * satisfied
2a1e274a
MG
6394 */
6395 required_kernelcore -= min(required_kernelcore,
6396 size_pages);
6397 kernelcore_remaining -= size_pages;
6398 if (!kernelcore_remaining)
6399 break;
6400 }
6401 }
6402
6403 /*
6404 * If there is still required_kernelcore, we do another pass with one
6405 * less node in the count. This will push zone_movable_pfn[nid] further
6406 * along on the nodes that still have memory until kernelcore is
b8af2941 6407 * satisfied
2a1e274a
MG
6408 */
6409 usable_nodes--;
6410 if (usable_nodes && required_kernelcore > usable_nodes)
6411 goto restart;
6412
b2f3eebe 6413out2:
2a1e274a
MG
6414 /* Align start of ZONE_MOVABLE on all nids to MAX_ORDER_NR_PAGES */
6415 for (nid = 0; nid < MAX_NUMNODES; nid++)
6416 zone_movable_pfn[nid] =
6417 roundup(zone_movable_pfn[nid], MAX_ORDER_NR_PAGES);
66918dcd 6418
20e6926d 6419out:
66918dcd 6420 /* restore the node_state */
4b0ef1fe 6421 node_states[N_MEMORY] = saved_node_state;
2a1e274a
MG
6422}
6423
4b0ef1fe
LJ
6424/* Any regular or high memory on that node ? */
6425static void check_for_memory(pg_data_t *pgdat, int nid)
37b07e41 6426{
37b07e41
LS
6427 enum zone_type zone_type;
6428
4b0ef1fe
LJ
6429 if (N_MEMORY == N_NORMAL_MEMORY)
6430 return;
6431
6432 for (zone_type = 0; zone_type <= ZONE_MOVABLE - 1; zone_type++) {
37b07e41 6433 struct zone *zone = &pgdat->node_zones[zone_type];
b38a8725 6434 if (populated_zone(zone)) {
4b0ef1fe
LJ
6435 node_set_state(nid, N_HIGH_MEMORY);
6436 if (N_NORMAL_MEMORY != N_HIGH_MEMORY &&
6437 zone_type <= ZONE_NORMAL)
6438 node_set_state(nid, N_NORMAL_MEMORY);
d0048b0e
BL
6439 break;
6440 }
37b07e41 6441 }
37b07e41
LS
6442}
6443
c713216d
MG
6444/**
6445 * free_area_init_nodes - Initialise all pg_data_t and zone data
88ca3b94 6446 * @max_zone_pfn: an array of max PFNs for each zone
c713216d
MG
6447 *
6448 * This will call free_area_init_node() for each active node in the system.
7d018176 6449 * Using the page ranges provided by memblock_set_node(), the size of each
c713216d
MG
6450 * zone in each node and their holes is calculated. If the maximum PFN
6451 * between two adjacent zones match, it is assumed that the zone is empty.
6452 * For example, if arch_max_dma_pfn == arch_max_dma32_pfn, it is assumed
6453 * that arch_max_dma32_pfn has no pages. It is also assumed that a zone
6454 * starts where the previous one ended. For example, ZONE_DMA32 starts
6455 * at arch_max_dma_pfn.
6456 */
6457void __init free_area_init_nodes(unsigned long *max_zone_pfn)
6458{
c13291a5
TH
6459 unsigned long start_pfn, end_pfn;
6460 int i, nid;
a6af2bc3 6461
c713216d
MG
6462 /* Record where the zone boundaries are */
6463 memset(arch_zone_lowest_possible_pfn, 0,
6464 sizeof(arch_zone_lowest_possible_pfn));
6465 memset(arch_zone_highest_possible_pfn, 0,
6466 sizeof(arch_zone_highest_possible_pfn));
90cae1fe
OH
6467
6468 start_pfn = find_min_pfn_with_active_regions();
6469
6470 for (i = 0; i < MAX_NR_ZONES; i++) {
2a1e274a
MG
6471 if (i == ZONE_MOVABLE)
6472 continue;
90cae1fe
OH
6473
6474 end_pfn = max(max_zone_pfn[i], start_pfn);
6475 arch_zone_lowest_possible_pfn[i] = start_pfn;
6476 arch_zone_highest_possible_pfn[i] = end_pfn;
6477
6478 start_pfn = end_pfn;
c713216d 6479 }
2a1e274a
MG
6480
6481 /* Find the PFNs that ZONE_MOVABLE begins at in each node */
6482 memset(zone_movable_pfn, 0, sizeof(zone_movable_pfn));
b224ef85 6483 find_zone_movable_pfns_for_nodes();
c713216d 6484
c713216d 6485 /* Print out the zone ranges */
f88dfff5 6486 pr_info("Zone ranges:\n");
2a1e274a
MG
6487 for (i = 0; i < MAX_NR_ZONES; i++) {
6488 if (i == ZONE_MOVABLE)
6489 continue;
f88dfff5 6490 pr_info(" %-8s ", zone_names[i]);
72f0ba02
DR
6491 if (arch_zone_lowest_possible_pfn[i] ==
6492 arch_zone_highest_possible_pfn[i])
f88dfff5 6493 pr_cont("empty\n");
72f0ba02 6494 else
8d29e18a
JG
6495 pr_cont("[mem %#018Lx-%#018Lx]\n",
6496 (u64)arch_zone_lowest_possible_pfn[i]
6497 << PAGE_SHIFT,
6498 ((u64)arch_zone_highest_possible_pfn[i]
a62e2f4f 6499 << PAGE_SHIFT) - 1);
2a1e274a
MG
6500 }
6501
6502 /* Print out the PFNs ZONE_MOVABLE begins at in each node */
f88dfff5 6503 pr_info("Movable zone start for each node\n");
2a1e274a
MG
6504 for (i = 0; i < MAX_NUMNODES; i++) {
6505 if (zone_movable_pfn[i])
8d29e18a
JG
6506 pr_info(" Node %d: %#018Lx\n", i,
6507 (u64)zone_movable_pfn[i] << PAGE_SHIFT);
2a1e274a 6508 }
c713216d 6509
f2d52fe5 6510 /* Print out the early node map */
f88dfff5 6511 pr_info("Early memory node ranges\n");
c13291a5 6512 for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid)
8d29e18a
JG
6513 pr_info(" node %3d: [mem %#018Lx-%#018Lx]\n", nid,
6514 (u64)start_pfn << PAGE_SHIFT,
6515 ((u64)end_pfn << PAGE_SHIFT) - 1);
c713216d
MG
6516
6517 /* Initialise every node */
708614e6 6518 mminit_verify_pageflags_layout();
8ef82866 6519 setup_nr_node_ids();
c713216d
MG
6520 for_each_online_node(nid) {
6521 pg_data_t *pgdat = NODE_DATA(nid);
9109fb7b 6522 free_area_init_node(nid, NULL,
c713216d 6523 find_min_pfn_for_node(nid), NULL);
37b07e41
LS
6524
6525 /* Any memory on that node */
6526 if (pgdat->node_present_pages)
4b0ef1fe
LJ
6527 node_set_state(nid, N_MEMORY);
6528 check_for_memory(pgdat, nid);
c713216d
MG
6529 }
6530}
2a1e274a 6531
7e63efef 6532static int __init cmdline_parse_core(char *p, unsigned long *core)
2a1e274a
MG
6533{
6534 unsigned long long coremem;
6535 if (!p)
6536 return -EINVAL;
6537
6538 coremem = memparse(p, &p);
7e63efef 6539 *core = coremem >> PAGE_SHIFT;
2a1e274a 6540
7e63efef 6541 /* Paranoid check that UL is enough for the coremem value */
2a1e274a
MG
6542 WARN_ON((coremem >> PAGE_SHIFT) > ULONG_MAX);
6543
6544 return 0;
6545}
ed7ed365 6546
7e63efef
MG
6547/*
6548 * kernelcore=size sets the amount of memory for use for allocations that
6549 * cannot be reclaimed or migrated.
6550 */
6551static int __init cmdline_parse_kernelcore(char *p)
6552{
342332e6
TI
6553 /* parse kernelcore=mirror */
6554 if (parse_option_str(p, "mirror")) {
6555 mirrored_kernelcore = true;
6556 return 0;
6557 }
6558
7e63efef
MG
6559 return cmdline_parse_core(p, &required_kernelcore);
6560}
6561
6562/*
6563 * movablecore=size sets the amount of memory for use for allocations that
6564 * can be reclaimed or migrated.
6565 */
6566static int __init cmdline_parse_movablecore(char *p)
6567{
6568 return cmdline_parse_core(p, &required_movablecore);
6569}
6570
ed7ed365 6571early_param("kernelcore", cmdline_parse_kernelcore);
7e63efef 6572early_param("movablecore", cmdline_parse_movablecore);
ed7ed365 6573
0ee332c1 6574#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
c713216d 6575
c3d5f5f0
JL
6576void adjust_managed_page_count(struct page *page, long count)
6577{
6578 spin_lock(&managed_page_count_lock);
6579 page_zone(page)->managed_pages += count;
6580 totalram_pages += count;
3dcc0571
JL
6581#ifdef CONFIG_HIGHMEM
6582 if (PageHighMem(page))
6583 totalhigh_pages += count;
6584#endif
c3d5f5f0
JL
6585 spin_unlock(&managed_page_count_lock);
6586}
3dcc0571 6587EXPORT_SYMBOL(adjust_managed_page_count);
c3d5f5f0 6588
11199692 6589unsigned long free_reserved_area(void *start, void *end, int poison, char *s)
69afade7 6590{
11199692
JL
6591 void *pos;
6592 unsigned long pages = 0;
69afade7 6593
11199692
JL
6594 start = (void *)PAGE_ALIGN((unsigned long)start);
6595 end = (void *)((unsigned long)end & PAGE_MASK);
6596 for (pos = start; pos < end; pos += PAGE_SIZE, pages++) {
dbe67df4 6597 if ((unsigned int)poison <= 0xFF)
11199692
JL
6598 memset(pos, poison, PAGE_SIZE);
6599 free_reserved_page(virt_to_page(pos));
69afade7
JL
6600 }
6601
6602 if (pages && s)
adb1fe9a
JP
6603 pr_info("Freeing %s memory: %ldK\n",
6604 s, pages << (PAGE_SHIFT - 10));
69afade7
JL
6605
6606 return pages;
6607}
11199692 6608EXPORT_SYMBOL(free_reserved_area);
69afade7 6609
cfa11e08
JL
6610#ifdef CONFIG_HIGHMEM
6611void free_highmem_page(struct page *page)
6612{
6613 __free_reserved_page(page);
6614 totalram_pages++;
7b4b2a0d 6615 page_zone(page)->managed_pages++;
cfa11e08
JL
6616 totalhigh_pages++;
6617}
6618#endif
6619
7ee3d4e8
JL
6620
6621void __init mem_init_print_info(const char *str)
6622{
6623 unsigned long physpages, codesize, datasize, rosize, bss_size;
6624 unsigned long init_code_size, init_data_size;
6625
6626 physpages = get_num_physpages();
6627 codesize = _etext - _stext;
6628 datasize = _edata - _sdata;
6629 rosize = __end_rodata - __start_rodata;
6630 bss_size = __bss_stop - __bss_start;
6631 init_data_size = __init_end - __init_begin;
6632 init_code_size = _einittext - _sinittext;
6633
6634 /*
6635 * Detect special cases and adjust section sizes accordingly:
6636 * 1) .init.* may be embedded into .data sections
6637 * 2) .init.text.* may be out of [__init_begin, __init_end],
6638 * please refer to arch/tile/kernel/vmlinux.lds.S.
6639 * 3) .rodata.* may be embedded into .text or .data sections.
6640 */
6641#define adj_init_size(start, end, size, pos, adj) \
b8af2941
PK
6642 do { \
6643 if (start <= pos && pos < end && size > adj) \
6644 size -= adj; \
6645 } while (0)
7ee3d4e8
JL
6646
6647 adj_init_size(__init_begin, __init_end, init_data_size,
6648 _sinittext, init_code_size);
6649 adj_init_size(_stext, _etext, codesize, _sinittext, init_code_size);
6650 adj_init_size(_sdata, _edata, datasize, __init_begin, init_data_size);
6651 adj_init_size(_stext, _etext, codesize, __start_rodata, rosize);
6652 adj_init_size(_sdata, _edata, datasize, __start_rodata, rosize);
6653
6654#undef adj_init_size
6655
756a025f 6656 pr_info("Memory: %luK/%luK available (%luK kernel code, %luK rwdata, %luK rodata, %luK init, %luK bss, %luK reserved, %luK cma-reserved"
7ee3d4e8 6657#ifdef CONFIG_HIGHMEM
756a025f 6658 ", %luK highmem"
7ee3d4e8 6659#endif
756a025f
JP
6660 "%s%s)\n",
6661 nr_free_pages() << (PAGE_SHIFT - 10),
6662 physpages << (PAGE_SHIFT - 10),
6663 codesize >> 10, datasize >> 10, rosize >> 10,
6664 (init_data_size + init_code_size) >> 10, bss_size >> 10,
6665 (physpages - totalram_pages - totalcma_pages) << (PAGE_SHIFT - 10),
6666 totalcma_pages << (PAGE_SHIFT - 10),
7ee3d4e8 6667#ifdef CONFIG_HIGHMEM
756a025f 6668 totalhigh_pages << (PAGE_SHIFT - 10),
7ee3d4e8 6669#endif
756a025f 6670 str ? ", " : "", str ? str : "");
7ee3d4e8
JL
6671}
6672
0e0b864e 6673/**
88ca3b94
RD
6674 * set_dma_reserve - set the specified number of pages reserved in the first zone
6675 * @new_dma_reserve: The number of pages to mark reserved
0e0b864e 6676 *
013110a7 6677 * The per-cpu batchsize and zone watermarks are determined by managed_pages.
0e0b864e
MG
6678 * In the DMA zone, a significant percentage may be consumed by kernel image
6679 * and other unfreeable allocations which can skew the watermarks badly. This
88ca3b94
RD
6680 * function may optionally be used to account for unfreeable pages in the
6681 * first zone (e.g., ZONE_DMA). The effect will be lower watermarks and
6682 * smaller per-cpu batchsize.
0e0b864e
MG
6683 */
6684void __init set_dma_reserve(unsigned long new_dma_reserve)
6685{
6686 dma_reserve = new_dma_reserve;
6687}
6688
1da177e4
LT
6689void __init free_area_init(unsigned long *zones_size)
6690{
9109fb7b 6691 free_area_init_node(0, zones_size,
1da177e4
LT
6692 __pa(PAGE_OFFSET) >> PAGE_SHIFT, NULL);
6693}
1da177e4 6694
005fd4bb 6695static int page_alloc_cpu_dead(unsigned int cpu)
1da177e4 6696{
1da177e4 6697
005fd4bb
SAS
6698 lru_add_drain_cpu(cpu);
6699 drain_pages(cpu);
9f8f2172 6700
005fd4bb
SAS
6701 /*
6702 * Spill the event counters of the dead processor
6703 * into the current processors event counters.
6704 * This artificially elevates the count of the current
6705 * processor.
6706 */
6707 vm_events_fold_cpu(cpu);
9f8f2172 6708
005fd4bb
SAS
6709 /*
6710 * Zero the differential counters of the dead processor
6711 * so that the vm statistics are consistent.
6712 *
6713 * This is only okay since the processor is dead and cannot
6714 * race with what we are doing.
6715 */
6716 cpu_vm_stats_fold(cpu);
6717 return 0;
1da177e4 6718}
1da177e4
LT
6719
6720void __init page_alloc_init(void)
6721{
005fd4bb
SAS
6722 int ret;
6723
6724 ret = cpuhp_setup_state_nocalls(CPUHP_PAGE_ALLOC_DEAD,
6725 "mm/page_alloc:dead", NULL,
6726 page_alloc_cpu_dead);
6727 WARN_ON(ret < 0);
1da177e4
LT
6728}
6729
cb45b0e9 6730/*
34b10060 6731 * calculate_totalreserve_pages - called when sysctl_lowmem_reserve_ratio
cb45b0e9
HA
6732 * or min_free_kbytes changes.
6733 */
6734static void calculate_totalreserve_pages(void)
6735{
6736 struct pglist_data *pgdat;
6737 unsigned long reserve_pages = 0;
2f6726e5 6738 enum zone_type i, j;
cb45b0e9
HA
6739
6740 for_each_online_pgdat(pgdat) {
281e3726
MG
6741
6742 pgdat->totalreserve_pages = 0;
6743
cb45b0e9
HA
6744 for (i = 0; i < MAX_NR_ZONES; i++) {
6745 struct zone *zone = pgdat->node_zones + i;
3484b2de 6746 long max = 0;
cb45b0e9
HA
6747
6748 /* Find valid and maximum lowmem_reserve in the zone */
6749 for (j = i; j < MAX_NR_ZONES; j++) {
6750 if (zone->lowmem_reserve[j] > max)
6751 max = zone->lowmem_reserve[j];
6752 }
6753
41858966
MG
6754 /* we treat the high watermark as reserved pages. */
6755 max += high_wmark_pages(zone);
cb45b0e9 6756
b40da049
JL
6757 if (max > zone->managed_pages)
6758 max = zone->managed_pages;
a8d01437 6759
281e3726 6760 pgdat->totalreserve_pages += max;
a8d01437 6761
cb45b0e9
HA
6762 reserve_pages += max;
6763 }
6764 }
6765 totalreserve_pages = reserve_pages;
6766}
6767
1da177e4
LT
6768/*
6769 * setup_per_zone_lowmem_reserve - called whenever
34b10060 6770 * sysctl_lowmem_reserve_ratio changes. Ensures that each zone
1da177e4
LT
6771 * has a correct pages reserved value, so an adequate number of
6772 * pages are left in the zone after a successful __alloc_pages().
6773 */
6774static void setup_per_zone_lowmem_reserve(void)
6775{
6776 struct pglist_data *pgdat;
2f6726e5 6777 enum zone_type j, idx;
1da177e4 6778
ec936fc5 6779 for_each_online_pgdat(pgdat) {
1da177e4
LT
6780 for (j = 0; j < MAX_NR_ZONES; j++) {
6781 struct zone *zone = pgdat->node_zones + j;
b40da049 6782 unsigned long managed_pages = zone->managed_pages;
1da177e4
LT
6783
6784 zone->lowmem_reserve[j] = 0;
6785
2f6726e5
CL
6786 idx = j;
6787 while (idx) {
1da177e4
LT
6788 struct zone *lower_zone;
6789
2f6726e5
CL
6790 idx--;
6791
1da177e4
LT
6792 if (sysctl_lowmem_reserve_ratio[idx] < 1)
6793 sysctl_lowmem_reserve_ratio[idx] = 1;
6794
6795 lower_zone = pgdat->node_zones + idx;
b40da049 6796 lower_zone->lowmem_reserve[j] = managed_pages /
1da177e4 6797 sysctl_lowmem_reserve_ratio[idx];
b40da049 6798 managed_pages += lower_zone->managed_pages;
1da177e4
LT
6799 }
6800 }
6801 }
cb45b0e9
HA
6802
6803 /* update totalreserve_pages */
6804 calculate_totalreserve_pages();
1da177e4
LT
6805}
6806
cfd3da1e 6807static void __setup_per_zone_wmarks(void)
1da177e4
LT
6808{
6809 unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10);
6810 unsigned long lowmem_pages = 0;
6811 struct zone *zone;
6812 unsigned long flags;
6813
6814 /* Calculate total number of !ZONE_HIGHMEM pages */
6815 for_each_zone(zone) {
6816 if (!is_highmem(zone))
b40da049 6817 lowmem_pages += zone->managed_pages;
1da177e4
LT
6818 }
6819
6820 for_each_zone(zone) {
ac924c60
AM
6821 u64 tmp;
6822
1125b4e3 6823 spin_lock_irqsave(&zone->lock, flags);
b40da049 6824 tmp = (u64)pages_min * zone->managed_pages;
ac924c60 6825 do_div(tmp, lowmem_pages);
1da177e4
LT
6826 if (is_highmem(zone)) {
6827 /*
669ed175
NP
6828 * __GFP_HIGH and PF_MEMALLOC allocations usually don't
6829 * need highmem pages, so cap pages_min to a small
6830 * value here.
6831 *
41858966 6832 * The WMARK_HIGH-WMARK_LOW and (WMARK_LOW-WMARK_MIN)
42ff2703 6833 * deltas control asynch page reclaim, and so should
669ed175 6834 * not be capped for highmem.
1da177e4 6835 */
90ae8d67 6836 unsigned long min_pages;
1da177e4 6837
b40da049 6838 min_pages = zone->managed_pages / 1024;
90ae8d67 6839 min_pages = clamp(min_pages, SWAP_CLUSTER_MAX, 128UL);
41858966 6840 zone->watermark[WMARK_MIN] = min_pages;
1da177e4 6841 } else {
669ed175
NP
6842 /*
6843 * If it's a lowmem zone, reserve a number of pages
1da177e4
LT
6844 * proportionate to the zone's size.
6845 */
41858966 6846 zone->watermark[WMARK_MIN] = tmp;
1da177e4
LT
6847 }
6848
795ae7a0
JW
6849 /*
6850 * Set the kswapd watermarks distance according to the
6851 * scale factor in proportion to available memory, but
6852 * ensure a minimum size on small systems.
6853 */
6854 tmp = max_t(u64, tmp >> 2,
6855 mult_frac(zone->managed_pages,
6856 watermark_scale_factor, 10000));
6857
6858 zone->watermark[WMARK_LOW] = min_wmark_pages(zone) + tmp;
6859 zone->watermark[WMARK_HIGH] = min_wmark_pages(zone) + tmp * 2;
49f223a9 6860
1125b4e3 6861 spin_unlock_irqrestore(&zone->lock, flags);
1da177e4 6862 }
cb45b0e9
HA
6863
6864 /* update totalreserve_pages */
6865 calculate_totalreserve_pages();
1da177e4
LT
6866}
6867
cfd3da1e
MG
6868/**
6869 * setup_per_zone_wmarks - called when min_free_kbytes changes
6870 * or when memory is hot-{added|removed}
6871 *
6872 * Ensures that the watermark[min,low,high] values for each zone are set
6873 * correctly with respect to min_free_kbytes.
6874 */
6875void setup_per_zone_wmarks(void)
6876{
6877 mutex_lock(&zonelists_mutex);
6878 __setup_per_zone_wmarks();
6879 mutex_unlock(&zonelists_mutex);
6880}
6881
1da177e4
LT
6882/*
6883 * Initialise min_free_kbytes.
6884 *
6885 * For small machines we want it small (128k min). For large machines
6886 * we want it large (64MB max). But it is not linear, because network
6887 * bandwidth does not increase linearly with machine size. We use
6888 *
b8af2941 6889 * min_free_kbytes = 4 * sqrt(lowmem_kbytes), for better accuracy:
1da177e4
LT
6890 * min_free_kbytes = sqrt(lowmem_kbytes * 16)
6891 *
6892 * which yields
6893 *
6894 * 16MB: 512k
6895 * 32MB: 724k
6896 * 64MB: 1024k
6897 * 128MB: 1448k
6898 * 256MB: 2048k
6899 * 512MB: 2896k
6900 * 1024MB: 4096k
6901 * 2048MB: 5792k
6902 * 4096MB: 8192k
6903 * 8192MB: 11584k
6904 * 16384MB: 16384k
6905 */
1b79acc9 6906int __meminit init_per_zone_wmark_min(void)
1da177e4
LT
6907{
6908 unsigned long lowmem_kbytes;
5f12733e 6909 int new_min_free_kbytes;
1da177e4
LT
6910
6911 lowmem_kbytes = nr_free_buffer_pages() * (PAGE_SIZE >> 10);
5f12733e
MH
6912 new_min_free_kbytes = int_sqrt(lowmem_kbytes * 16);
6913
6914 if (new_min_free_kbytes > user_min_free_kbytes) {
6915 min_free_kbytes = new_min_free_kbytes;
6916 if (min_free_kbytes < 128)
6917 min_free_kbytes = 128;
6918 if (min_free_kbytes > 65536)
6919 min_free_kbytes = 65536;
6920 } else {
6921 pr_warn("min_free_kbytes is not updated to %d because user defined value %d is preferred\n",
6922 new_min_free_kbytes, user_min_free_kbytes);
6923 }
bc75d33f 6924 setup_per_zone_wmarks();
a6cccdc3 6925 refresh_zone_stat_thresholds();
1da177e4 6926 setup_per_zone_lowmem_reserve();
6423aa81
JK
6927
6928#ifdef CONFIG_NUMA
6929 setup_min_unmapped_ratio();
6930 setup_min_slab_ratio();
6931#endif
6932
1da177e4
LT
6933 return 0;
6934}
bc22af74 6935core_initcall(init_per_zone_wmark_min)
1da177e4
LT
6936
6937/*
b8af2941 6938 * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so
1da177e4
LT
6939 * that we can call two helper functions whenever min_free_kbytes
6940 * changes.
6941 */
cccad5b9 6942int min_free_kbytes_sysctl_handler(struct ctl_table *table, int write,
8d65af78 6943 void __user *buffer, size_t *length, loff_t *ppos)
1da177e4 6944{
da8c757b
HP
6945 int rc;
6946
6947 rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
6948 if (rc)
6949 return rc;
6950
5f12733e
MH
6951 if (write) {
6952 user_min_free_kbytes = min_free_kbytes;
bc75d33f 6953 setup_per_zone_wmarks();
5f12733e 6954 }
1da177e4
LT
6955 return 0;
6956}
6957
795ae7a0
JW
6958int watermark_scale_factor_sysctl_handler(struct ctl_table *table, int write,
6959 void __user *buffer, size_t *length, loff_t *ppos)
6960{
6961 int rc;
6962
6963 rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
6964 if (rc)
6965 return rc;
6966
6967 if (write)
6968 setup_per_zone_wmarks();
6969
6970 return 0;
6971}
6972
9614634f 6973#ifdef CONFIG_NUMA
6423aa81 6974static void setup_min_unmapped_ratio(void)
9614634f 6975{
6423aa81 6976 pg_data_t *pgdat;
9614634f 6977 struct zone *zone;
9614634f 6978
a5f5f91d 6979 for_each_online_pgdat(pgdat)
81cbcbc2 6980 pgdat->min_unmapped_pages = 0;
a5f5f91d 6981
9614634f 6982 for_each_zone(zone)
a5f5f91d 6983 zone->zone_pgdat->min_unmapped_pages += (zone->managed_pages *
9614634f 6984 sysctl_min_unmapped_ratio) / 100;
9614634f 6985}
0ff38490 6986
6423aa81
JK
6987
6988int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *table, int write,
8d65af78 6989 void __user *buffer, size_t *length, loff_t *ppos)
0ff38490 6990{
0ff38490
CL
6991 int rc;
6992
8d65af78 6993 rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
0ff38490
CL
6994 if (rc)
6995 return rc;
6996
6423aa81
JK
6997 setup_min_unmapped_ratio();
6998
6999 return 0;
7000}
7001
7002static void setup_min_slab_ratio(void)
7003{
7004 pg_data_t *pgdat;
7005 struct zone *zone;
7006
a5f5f91d
MG
7007 for_each_online_pgdat(pgdat)
7008 pgdat->min_slab_pages = 0;
7009
0ff38490 7010 for_each_zone(zone)
a5f5f91d 7011 zone->zone_pgdat->min_slab_pages += (zone->managed_pages *
0ff38490 7012 sysctl_min_slab_ratio) / 100;
6423aa81
JK
7013}
7014
7015int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *table, int write,
7016 void __user *buffer, size_t *length, loff_t *ppos)
7017{
7018 int rc;
7019
7020 rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
7021 if (rc)
7022 return rc;
7023
7024 setup_min_slab_ratio();
7025
0ff38490
CL
7026 return 0;
7027}
9614634f
CL
7028#endif
7029
1da177e4
LT
7030/*
7031 * lowmem_reserve_ratio_sysctl_handler - just a wrapper around
7032 * proc_dointvec() so that we can call setup_per_zone_lowmem_reserve()
7033 * whenever sysctl_lowmem_reserve_ratio changes.
7034 *
7035 * The reserve ratio obviously has absolutely no relation with the
41858966 7036 * minimum watermarks. The lowmem reserve ratio can only make sense
1da177e4
LT
7037 * if in function of the boot time zone sizes.
7038 */
cccad5b9 7039int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *table, int write,
8d65af78 7040 void __user *buffer, size_t *length, loff_t *ppos)
1da177e4 7041{
8d65af78 7042 proc_dointvec_minmax(table, write, buffer, length, ppos);
1da177e4
LT
7043 setup_per_zone_lowmem_reserve();
7044 return 0;
7045}
7046
8ad4b1fb
RS
7047/*
7048 * percpu_pagelist_fraction - changes the pcp->high for each zone on each
b8af2941
PK
7049 * cpu. It is the fraction of total pages in each zone that a hot per cpu
7050 * pagelist can have before it gets flushed back to buddy allocator.
8ad4b1fb 7051 */
cccad5b9 7052int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *table, int write,
8d65af78 7053 void __user *buffer, size_t *length, loff_t *ppos)
8ad4b1fb
RS
7054{
7055 struct zone *zone;
7cd2b0a3 7056 int old_percpu_pagelist_fraction;
8ad4b1fb
RS
7057 int ret;
7058
7cd2b0a3
DR
7059 mutex_lock(&pcp_batch_high_lock);
7060 old_percpu_pagelist_fraction = percpu_pagelist_fraction;
7061
8d65af78 7062 ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
7cd2b0a3
DR
7063 if (!write || ret < 0)
7064 goto out;
7065
7066 /* Sanity checking to avoid pcp imbalance */
7067 if (percpu_pagelist_fraction &&
7068 percpu_pagelist_fraction < MIN_PERCPU_PAGELIST_FRACTION) {
7069 percpu_pagelist_fraction = old_percpu_pagelist_fraction;
7070 ret = -EINVAL;
7071 goto out;
7072 }
7073
7074 /* No change? */
7075 if (percpu_pagelist_fraction == old_percpu_pagelist_fraction)
7076 goto out;
c8e251fa 7077
364df0eb 7078 for_each_populated_zone(zone) {
7cd2b0a3
DR
7079 unsigned int cpu;
7080
22a7f12b 7081 for_each_possible_cpu(cpu)
7cd2b0a3
DR
7082 pageset_set_high_and_batch(zone,
7083 per_cpu_ptr(zone->pageset, cpu));
8ad4b1fb 7084 }
7cd2b0a3 7085out:
c8e251fa 7086 mutex_unlock(&pcp_batch_high_lock);
7cd2b0a3 7087 return ret;
8ad4b1fb
RS
7088}
7089
a9919c79 7090#ifdef CONFIG_NUMA
f034b5d4 7091int hashdist = HASHDIST_DEFAULT;
1da177e4 7092
1da177e4
LT
7093static int __init set_hashdist(char *str)
7094{
7095 if (!str)
7096 return 0;
7097 hashdist = simple_strtoul(str, &str, 0);
7098 return 1;
7099}
7100__setup("hashdist=", set_hashdist);
7101#endif
7102
f6f34b43
SD
7103#ifndef __HAVE_ARCH_RESERVED_KERNEL_PAGES
7104/*
7105 * Returns the number of pages that arch has reserved but
7106 * is not known to alloc_large_system_hash().
7107 */
7108static unsigned long __init arch_reserved_kernel_pages(void)
7109{
7110 return 0;
7111}
7112#endif
7113
1da177e4
LT
7114/*
7115 * allocate a large system hash table from bootmem
7116 * - it is assumed that the hash table must contain an exact power-of-2
7117 * quantity of entries
7118 * - limit is the number of hash buckets, not the total allocation size
7119 */
7120void *__init alloc_large_system_hash(const char *tablename,
7121 unsigned long bucketsize,
7122 unsigned long numentries,
7123 int scale,
7124 int flags,
7125 unsigned int *_hash_shift,
7126 unsigned int *_hash_mask,
31fe62b9
TB
7127 unsigned long low_limit,
7128 unsigned long high_limit)
1da177e4 7129{
31fe62b9 7130 unsigned long long max = high_limit;
1da177e4
LT
7131 unsigned long log2qty, size;
7132 void *table = NULL;
7133
7134 /* allow the kernel cmdline to have a say */
7135 if (!numentries) {
7136 /* round applicable memory size up to nearest megabyte */
04903664 7137 numentries = nr_kernel_pages;
f6f34b43 7138 numentries -= arch_reserved_kernel_pages();
a7e83318
JZ
7139
7140 /* It isn't necessary when PAGE_SIZE >= 1MB */
7141 if (PAGE_SHIFT < 20)
7142 numentries = round_up(numentries, (1<<20)/PAGE_SIZE);
1da177e4
LT
7143
7144 /* limit to 1 bucket per 2^scale bytes of low memory */
7145 if (scale > PAGE_SHIFT)
7146 numentries >>= (scale - PAGE_SHIFT);
7147 else
7148 numentries <<= (PAGE_SHIFT - scale);
9ab37b8f
PM
7149
7150 /* Make sure we've got at least a 0-order allocation.. */
2c85f51d
JB
7151 if (unlikely(flags & HASH_SMALL)) {
7152 /* Makes no sense without HASH_EARLY */
7153 WARN_ON(!(flags & HASH_EARLY));
7154 if (!(numentries >> *_hash_shift)) {
7155 numentries = 1UL << *_hash_shift;
7156 BUG_ON(!numentries);
7157 }
7158 } else if (unlikely((numentries * bucketsize) < PAGE_SIZE))
9ab37b8f 7159 numentries = PAGE_SIZE / bucketsize;
1da177e4 7160 }
6e692ed3 7161 numentries = roundup_pow_of_two(numentries);
1da177e4
LT
7162
7163 /* limit allocation size to 1/16 total memory by default */
7164 if (max == 0) {
7165 max = ((unsigned long long)nr_all_pages << PAGE_SHIFT) >> 4;
7166 do_div(max, bucketsize);
7167 }
074b8517 7168 max = min(max, 0x80000000ULL);
1da177e4 7169
31fe62b9
TB
7170 if (numentries < low_limit)
7171 numentries = low_limit;
1da177e4
LT
7172 if (numentries > max)
7173 numentries = max;
7174
f0d1b0b3 7175 log2qty = ilog2(numentries);
1da177e4
LT
7176
7177 do {
7178 size = bucketsize << log2qty;
7179 if (flags & HASH_EARLY)
6782832e 7180 table = memblock_virt_alloc_nopanic(size, 0);
1da177e4
LT
7181 else if (hashdist)
7182 table = __vmalloc(size, GFP_ATOMIC, PAGE_KERNEL);
7183 else {
1037b83b
ED
7184 /*
7185 * If bucketsize is not a power-of-two, we may free
a1dd268c
MG
7186 * some pages at the end of hash table which
7187 * alloc_pages_exact() automatically does
1037b83b 7188 */
264ef8a9 7189 if (get_order(size) < MAX_ORDER) {
a1dd268c 7190 table = alloc_pages_exact(size, GFP_ATOMIC);
264ef8a9
CM
7191 kmemleak_alloc(table, size, 1, GFP_ATOMIC);
7192 }
1da177e4
LT
7193 }
7194 } while (!table && size > PAGE_SIZE && --log2qty);
7195
7196 if (!table)
7197 panic("Failed to allocate %s hash table\n", tablename);
7198
1170532b
JP
7199 pr_info("%s hash table entries: %ld (order: %d, %lu bytes)\n",
7200 tablename, 1UL << log2qty, ilog2(size) - PAGE_SHIFT, size);
1da177e4
LT
7201
7202 if (_hash_shift)
7203 *_hash_shift = log2qty;
7204 if (_hash_mask)
7205 *_hash_mask = (1 << log2qty) - 1;
7206
7207 return table;
7208}
a117e66e 7209
a5d76b54 7210/*
80934513
MK
7211 * This function checks whether pageblock includes unmovable pages or not.
7212 * If @count is not zero, it is okay to include less @count unmovable pages
7213 *
b8af2941 7214 * PageLRU check without isolation or lru_lock could race so that
0efadf48
YX
7215 * MIGRATE_MOVABLE block might include unmovable pages. And __PageMovable
7216 * check without lock_page also may miss some movable non-lru pages at
7217 * race condition. So you can't expect this function should be exact.
a5d76b54 7218 */
b023f468
WC
7219bool has_unmovable_pages(struct zone *zone, struct page *page, int count,
7220 bool skip_hwpoisoned_pages)
49ac8255
KH
7221{
7222 unsigned long pfn, iter, found;
47118af0
MN
7223 int mt;
7224
49ac8255
KH
7225 /*
7226 * For avoiding noise data, lru_add_drain_all() should be called
80934513 7227 * If ZONE_MOVABLE, the zone never contains unmovable pages
49ac8255
KH
7228 */
7229 if (zone_idx(zone) == ZONE_MOVABLE)
80934513 7230 return false;
47118af0
MN
7231 mt = get_pageblock_migratetype(page);
7232 if (mt == MIGRATE_MOVABLE || is_migrate_cma(mt))
80934513 7233 return false;
49ac8255
KH
7234
7235 pfn = page_to_pfn(page);
7236 for (found = 0, iter = 0; iter < pageblock_nr_pages; iter++) {
7237 unsigned long check = pfn + iter;
7238
29723fcc 7239 if (!pfn_valid_within(check))
49ac8255 7240 continue;
29723fcc 7241
49ac8255 7242 page = pfn_to_page(check);
c8721bbb
NH
7243
7244 /*
7245 * Hugepages are not in LRU lists, but they're movable.
7246 * We need not scan over tail pages bacause we don't
7247 * handle each tail page individually in migration.
7248 */
7249 if (PageHuge(page)) {
7250 iter = round_up(iter + 1, 1<<compound_order(page)) - 1;
7251 continue;
7252 }
7253
97d255c8
MK
7254 /*
7255 * We can't use page_count without pin a page
7256 * because another CPU can free compound page.
7257 * This check already skips compound tails of THP
0139aa7b 7258 * because their page->_refcount is zero at all time.
97d255c8 7259 */
fe896d18 7260 if (!page_ref_count(page)) {
49ac8255
KH
7261 if (PageBuddy(page))
7262 iter += (1 << page_order(page)) - 1;
7263 continue;
7264 }
97d255c8 7265
b023f468
WC
7266 /*
7267 * The HWPoisoned page may be not in buddy system, and
7268 * page_count() is not 0.
7269 */
7270 if (skip_hwpoisoned_pages && PageHWPoison(page))
7271 continue;
7272
0efadf48
YX
7273 if (__PageMovable(page))
7274 continue;
7275
49ac8255
KH
7276 if (!PageLRU(page))
7277 found++;
7278 /*
6b4f7799
JW
7279 * If there are RECLAIMABLE pages, we need to check
7280 * it. But now, memory offline itself doesn't call
7281 * shrink_node_slabs() and it still to be fixed.
49ac8255
KH
7282 */
7283 /*
7284 * If the page is not RAM, page_count()should be 0.
7285 * we don't need more check. This is an _used_ not-movable page.
7286 *
7287 * The problematic thing here is PG_reserved pages. PG_reserved
7288 * is set to both of a memory hole page and a _used_ kernel
7289 * page at boot.
7290 */
7291 if (found > count)
80934513 7292 return true;
49ac8255 7293 }
80934513 7294 return false;
49ac8255
KH
7295}
7296
7297bool is_pageblock_removable_nolock(struct page *page)
7298{
656a0706
MH
7299 struct zone *zone;
7300 unsigned long pfn;
687875fb
MH
7301
7302 /*
7303 * We have to be careful here because we are iterating over memory
7304 * sections which are not zone aware so we might end up outside of
7305 * the zone but still within the section.
656a0706
MH
7306 * We have to take care about the node as well. If the node is offline
7307 * its NODE_DATA will be NULL - see page_zone.
687875fb 7308 */
656a0706
MH
7309 if (!node_online(page_to_nid(page)))
7310 return false;
7311
7312 zone = page_zone(page);
7313 pfn = page_to_pfn(page);
108bcc96 7314 if (!zone_spans_pfn(zone, pfn))
687875fb
MH
7315 return false;
7316
b023f468 7317 return !has_unmovable_pages(zone, page, 0, true);
a5d76b54 7318}
0c0e6195 7319
080fe206 7320#if (defined(CONFIG_MEMORY_ISOLATION) && defined(CONFIG_COMPACTION)) || defined(CONFIG_CMA)
041d3a8c
MN
7321
7322static unsigned long pfn_max_align_down(unsigned long pfn)
7323{
7324 return pfn & ~(max_t(unsigned long, MAX_ORDER_NR_PAGES,
7325 pageblock_nr_pages) - 1);
7326}
7327
7328static unsigned long pfn_max_align_up(unsigned long pfn)
7329{
7330 return ALIGN(pfn, max_t(unsigned long, MAX_ORDER_NR_PAGES,
7331 pageblock_nr_pages));
7332}
7333
041d3a8c 7334/* [start, end) must belong to a single zone. */
bb13ffeb
MG
7335static int __alloc_contig_migrate_range(struct compact_control *cc,
7336 unsigned long start, unsigned long end)
041d3a8c
MN
7337{
7338 /* This function is based on compact_zone() from compaction.c. */
beb51eaa 7339 unsigned long nr_reclaimed;
041d3a8c
MN
7340 unsigned long pfn = start;
7341 unsigned int tries = 0;
7342 int ret = 0;
7343
be49a6e1 7344 migrate_prep();
041d3a8c 7345
bb13ffeb 7346 while (pfn < end || !list_empty(&cc->migratepages)) {
041d3a8c
MN
7347 if (fatal_signal_pending(current)) {
7348 ret = -EINTR;
7349 break;
7350 }
7351
bb13ffeb
MG
7352 if (list_empty(&cc->migratepages)) {
7353 cc->nr_migratepages = 0;
edc2ca61 7354 pfn = isolate_migratepages_range(cc, pfn, end);
041d3a8c
MN
7355 if (!pfn) {
7356 ret = -EINTR;
7357 break;
7358 }
7359 tries = 0;
7360 } else if (++tries == 5) {
7361 ret = ret < 0 ? ret : -EBUSY;
7362 break;
7363 }
7364
beb51eaa
MK
7365 nr_reclaimed = reclaim_clean_pages_from_list(cc->zone,
7366 &cc->migratepages);
7367 cc->nr_migratepages -= nr_reclaimed;
02c6de8d 7368
9c620e2b 7369 ret = migrate_pages(&cc->migratepages, alloc_migrate_target,
e0b9daeb 7370 NULL, 0, cc->mode, MR_CMA);
041d3a8c 7371 }
2a6f5124
SP
7372 if (ret < 0) {
7373 putback_movable_pages(&cc->migratepages);
7374 return ret;
7375 }
7376 return 0;
041d3a8c
MN
7377}
7378
7379/**
7380 * alloc_contig_range() -- tries to allocate given range of pages
7381 * @start: start PFN to allocate
7382 * @end: one-past-the-last PFN to allocate
0815f3d8
MN
7383 * @migratetype: migratetype of the underlaying pageblocks (either
7384 * #MIGRATE_MOVABLE or #MIGRATE_CMA). All pageblocks
7385 * in range must have the same migratetype and it must
7386 * be either of the two.
ca96b625 7387 * @gfp_mask: GFP mask to use during compaction
041d3a8c
MN
7388 *
7389 * The PFN range does not have to be pageblock or MAX_ORDER_NR_PAGES
7390 * aligned, however it's the caller's responsibility to guarantee that
7391 * we are the only thread that changes migrate type of pageblocks the
7392 * pages fall in.
7393 *
7394 * The PFN range must belong to a single zone.
7395 *
7396 * Returns zero on success or negative error code. On success all
7397 * pages which PFN is in [start, end) are allocated for the caller and
7398 * need to be freed with free_contig_range().
7399 */
0815f3d8 7400int alloc_contig_range(unsigned long start, unsigned long end,
ca96b625 7401 unsigned migratetype, gfp_t gfp_mask)
041d3a8c 7402{
041d3a8c 7403 unsigned long outer_start, outer_end;
d00181b9
KS
7404 unsigned int order;
7405 int ret = 0;
041d3a8c 7406
bb13ffeb
MG
7407 struct compact_control cc = {
7408 .nr_migratepages = 0,
7409 .order = -1,
7410 .zone = page_zone(pfn_to_page(start)),
e0b9daeb 7411 .mode = MIGRATE_SYNC,
bb13ffeb 7412 .ignore_skip_hint = true,
7dea19f9 7413 .gfp_mask = current_gfp_context(gfp_mask),
bb13ffeb
MG
7414 };
7415 INIT_LIST_HEAD(&cc.migratepages);
7416
041d3a8c
MN
7417 /*
7418 * What we do here is we mark all pageblocks in range as
7419 * MIGRATE_ISOLATE. Because pageblock and max order pages may
7420 * have different sizes, and due to the way page allocator
7421 * work, we align the range to biggest of the two pages so
7422 * that page allocator won't try to merge buddies from
7423 * different pageblocks and change MIGRATE_ISOLATE to some
7424 * other migration type.
7425 *
7426 * Once the pageblocks are marked as MIGRATE_ISOLATE, we
7427 * migrate the pages from an unaligned range (ie. pages that
7428 * we are interested in). This will put all the pages in
7429 * range back to page allocator as MIGRATE_ISOLATE.
7430 *
7431 * When this is done, we take the pages in range from page
7432 * allocator removing them from the buddy system. This way
7433 * page allocator will never consider using them.
7434 *
7435 * This lets us mark the pageblocks back as
7436 * MIGRATE_CMA/MIGRATE_MOVABLE so that free pages in the
7437 * aligned range but not in the unaligned, original range are
7438 * put back to page allocator so that buddy can use them.
7439 */
7440
7441 ret = start_isolate_page_range(pfn_max_align_down(start),
b023f468
WC
7442 pfn_max_align_up(end), migratetype,
7443 false);
041d3a8c 7444 if (ret)
86a595f9 7445 return ret;
041d3a8c 7446
8ef5849f
JK
7447 /*
7448 * In case of -EBUSY, we'd like to know which page causes problem.
7449 * So, just fall through. We will check it in test_pages_isolated().
7450 */
bb13ffeb 7451 ret = __alloc_contig_migrate_range(&cc, start, end);
8ef5849f 7452 if (ret && ret != -EBUSY)
041d3a8c
MN
7453 goto done;
7454
7455 /*
7456 * Pages from [start, end) are within a MAX_ORDER_NR_PAGES
7457 * aligned blocks that are marked as MIGRATE_ISOLATE. What's
7458 * more, all pages in [start, end) are free in page allocator.
7459 * What we are going to do is to allocate all pages from
7460 * [start, end) (that is remove them from page allocator).
7461 *
7462 * The only problem is that pages at the beginning and at the
7463 * end of interesting range may be not aligned with pages that
7464 * page allocator holds, ie. they can be part of higher order
7465 * pages. Because of this, we reserve the bigger range and
7466 * once this is done free the pages we are not interested in.
7467 *
7468 * We don't have to hold zone->lock here because the pages are
7469 * isolated thus they won't get removed from buddy.
7470 */
7471
7472 lru_add_drain_all();
510f5507 7473 drain_all_pages(cc.zone);
041d3a8c
MN
7474
7475 order = 0;
7476 outer_start = start;
7477 while (!PageBuddy(pfn_to_page(outer_start))) {
7478 if (++order >= MAX_ORDER) {
8ef5849f
JK
7479 outer_start = start;
7480 break;
041d3a8c
MN
7481 }
7482 outer_start &= ~0UL << order;
7483 }
7484
8ef5849f
JK
7485 if (outer_start != start) {
7486 order = page_order(pfn_to_page(outer_start));
7487
7488 /*
7489 * outer_start page could be small order buddy page and
7490 * it doesn't include start page. Adjust outer_start
7491 * in this case to report failed page properly
7492 * on tracepoint in test_pages_isolated()
7493 */
7494 if (outer_start + (1UL << order) <= start)
7495 outer_start = start;
7496 }
7497
041d3a8c 7498 /* Make sure the range is really isolated. */
b023f468 7499 if (test_pages_isolated(outer_start, end, false)) {
dae803e1
MN
7500 pr_info("%s: [%lx, %lx) PFNs busy\n",
7501 __func__, outer_start, end);
041d3a8c
MN
7502 ret = -EBUSY;
7503 goto done;
7504 }
7505
49f223a9 7506 /* Grab isolated pages from freelists. */
bb13ffeb 7507 outer_end = isolate_freepages_range(&cc, outer_start, end);
041d3a8c
MN
7508 if (!outer_end) {
7509 ret = -EBUSY;
7510 goto done;
7511 }
7512
7513 /* Free head and tail (if any) */
7514 if (start != outer_start)
7515 free_contig_range(outer_start, start - outer_start);
7516 if (end != outer_end)
7517 free_contig_range(end, outer_end - end);
7518
7519done:
7520 undo_isolate_page_range(pfn_max_align_down(start),
0815f3d8 7521 pfn_max_align_up(end), migratetype);
041d3a8c
MN
7522 return ret;
7523}
7524
7525void free_contig_range(unsigned long pfn, unsigned nr_pages)
7526{
bcc2b02f
MS
7527 unsigned int count = 0;
7528
7529 for (; nr_pages--; pfn++) {
7530 struct page *page = pfn_to_page(pfn);
7531
7532 count += page_count(page) != 1;
7533 __free_page(page);
7534 }
7535 WARN(count != 0, "%d pages are still in use!\n", count);
041d3a8c
MN
7536}
7537#endif
7538
4ed7e022 7539#ifdef CONFIG_MEMORY_HOTPLUG
0a647f38
CS
7540/*
7541 * The zone indicated has a new number of managed_pages; batch sizes and percpu
7542 * page high values need to be recalulated.
7543 */
4ed7e022
JL
7544void __meminit zone_pcp_update(struct zone *zone)
7545{
0a647f38 7546 unsigned cpu;
c8e251fa 7547 mutex_lock(&pcp_batch_high_lock);
0a647f38 7548 for_each_possible_cpu(cpu)
169f6c19
CS
7549 pageset_set_high_and_batch(zone,
7550 per_cpu_ptr(zone->pageset, cpu));
c8e251fa 7551 mutex_unlock(&pcp_batch_high_lock);
4ed7e022
JL
7552}
7553#endif
7554
340175b7
JL
7555void zone_pcp_reset(struct zone *zone)
7556{
7557 unsigned long flags;
5a883813
MK
7558 int cpu;
7559 struct per_cpu_pageset *pset;
340175b7
JL
7560
7561 /* avoid races with drain_pages() */
7562 local_irq_save(flags);
7563 if (zone->pageset != &boot_pageset) {
5a883813
MK
7564 for_each_online_cpu(cpu) {
7565 pset = per_cpu_ptr(zone->pageset, cpu);
7566 drain_zonestat(zone, pset);
7567 }
340175b7
JL
7568 free_percpu(zone->pageset);
7569 zone->pageset = &boot_pageset;
7570 }
7571 local_irq_restore(flags);
7572}
7573
6dcd73d7 7574#ifdef CONFIG_MEMORY_HOTREMOVE
0c0e6195 7575/*
b9eb6319
JK
7576 * All pages in the range must be in a single zone and isolated
7577 * before calling this.
0c0e6195
KH
7578 */
7579void
7580__offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)
7581{
7582 struct page *page;
7583 struct zone *zone;
7aeb09f9 7584 unsigned int order, i;
0c0e6195
KH
7585 unsigned long pfn;
7586 unsigned long flags;
7587 /* find the first valid pfn */
7588 for (pfn = start_pfn; pfn < end_pfn; pfn++)
7589 if (pfn_valid(pfn))
7590 break;
7591 if (pfn == end_pfn)
7592 return;
7593 zone = page_zone(pfn_to_page(pfn));
7594 spin_lock_irqsave(&zone->lock, flags);
7595 pfn = start_pfn;
7596 while (pfn < end_pfn) {
7597 if (!pfn_valid(pfn)) {
7598 pfn++;
7599 continue;
7600 }
7601 page = pfn_to_page(pfn);
b023f468
WC
7602 /*
7603 * The HWPoisoned page may be not in buddy system, and
7604 * page_count() is not 0.
7605 */
7606 if (unlikely(!PageBuddy(page) && PageHWPoison(page))) {
7607 pfn++;
7608 SetPageReserved(page);
7609 continue;
7610 }
7611
0c0e6195
KH
7612 BUG_ON(page_count(page));
7613 BUG_ON(!PageBuddy(page));
7614 order = page_order(page);
7615#ifdef CONFIG_DEBUG_VM
1170532b
JP
7616 pr_info("remove from free list %lx %d %lx\n",
7617 pfn, 1 << order, end_pfn);
0c0e6195
KH
7618#endif
7619 list_del(&page->lru);
7620 rmv_page_order(page);
7621 zone->free_area[order].nr_free--;
0c0e6195
KH
7622 for (i = 0; i < (1 << order); i++)
7623 SetPageReserved((page+i));
7624 pfn += (1 << order);
7625 }
7626 spin_unlock_irqrestore(&zone->lock, flags);
7627}
7628#endif
8d22ba1b 7629
8d22ba1b
WF
7630bool is_free_buddy_page(struct page *page)
7631{
7632 struct zone *zone = page_zone(page);
7633 unsigned long pfn = page_to_pfn(page);
7634 unsigned long flags;
7aeb09f9 7635 unsigned int order;
8d22ba1b
WF
7636
7637 spin_lock_irqsave(&zone->lock, flags);
7638 for (order = 0; order < MAX_ORDER; order++) {
7639 struct page *page_head = page - (pfn & ((1 << order) - 1));
7640
7641 if (PageBuddy(page_head) && page_order(page_head) >= order)
7642 break;
7643 }
7644 spin_unlock_irqrestore(&zone->lock, flags);
7645
7646 return order < MAX_ORDER;
7647}