1 // SPDX-License-Identifier: GPL-2.0-only
3 * linux/mm/page_alloc.c
5 * Manages the free list, the system allocates free pages here.
6 * Note that kmalloc() lives in slab.c
8 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
9 * Swap reorganised 29.12.95, Stephen Tweedie
10 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
11 * Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999
12 * Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999
13 * Zone balancing, Kanoj Sarcar, SGI, Jan 2000
14 * Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002
15 * (lots of bits borrowed from Ingo Molnar & Andrew Morton)
18 #include <linux/stddef.h>
20 #include <linux/highmem.h>
21 #include <linux/swap.h>
22 #include <linux/swapops.h>
23 #include <linux/interrupt.h>
24 #include <linux/pagemap.h>
25 #include <linux/jiffies.h>
26 #include <linux/memblock.h>
27 #include <linux/compiler.h>
28 #include <linux/kernel.h>
29 #include <linux/kasan.h>
30 #include <linux/module.h>
31 #include <linux/suspend.h>
32 #include <linux/pagevec.h>
33 #include <linux/blkdev.h>
34 #include <linux/slab.h>
35 #include <linux/ratelimit.h>
36 #include <linux/oom.h>
37 #include <linux/topology.h>
38 #include <linux/sysctl.h>
39 #include <linux/cpu.h>
40 #include <linux/cpuset.h>
41 #include <linux/memory_hotplug.h>
42 #include <linux/nodemask.h>
43 #include <linux/vmalloc.h>
44 #include <linux/vmstat.h>
45 #include <linux/mempolicy.h>
46 #include <linux/memremap.h>
47 #include <linux/stop_machine.h>
48 #include <linux/random.h>
49 #include <linux/sort.h>
50 #include <linux/pfn.h>
51 #include <linux/backing-dev.h>
52 #include <linux/fault-inject.h>
53 #include <linux/page-isolation.h>
54 #include <linux/debugobjects.h>
55 #include <linux/kmemleak.h>
56 #include <linux/compaction.h>
57 #include <trace/events/kmem.h>
58 #include <trace/events/oom.h>
59 #include <linux/prefetch.h>
60 #include <linux/mm_inline.h>
61 #include <linux/mmu_notifier.h>
62 #include <linux/migrate.h>
63 #include <linux/hugetlb.h>
64 #include <linux/sched/rt.h>
65 #include <linux/sched/mm.h>
66 #include <linux/page_owner.h>
67 #include <linux/page_table_check.h>
68 #include <linux/kthread.h>
69 #include <linux/memcontrol.h>
70 #include <linux/ftrace.h>
71 #include <linux/lockdep.h>
72 #include <linux/nmi.h>
73 #include <linux/psi.h>
74 #include <linux/padata.h>
75 #include <linux/khugepaged.h>
76 #include <linux/buffer_head.h>
77 #include <linux/delayacct.h>
78 #include <asm/sections.h>
79 #include <asm/tlbflush.h>
80 #include <asm/div64.h>
83 #include "page_reporting.h"
86 /* Free Page Internal flags: for internal, non-pcp variants of free_pages(). */
87 typedef int __bitwise fpi_t
;
89 /* No special request */
90 #define FPI_NONE ((__force fpi_t)0)
93 * Skip free page reporting notification for the (possibly merged) page.
94 * This does not hinder free page reporting from grabbing the page,
95 * reporting it and marking it "reported" - it only skips notifying
96 * the free page reporting infrastructure about a newly freed page. For
97 * example, used when temporarily pulling a page from a freelist and
98 * putting it back unmodified.
100 #define FPI_SKIP_REPORT_NOTIFY ((__force fpi_t)BIT(0))
103 * Place the (possibly merged) page to the tail of the freelist. Will ignore
104 * page shuffling (relevant code - e.g., memory onlining - is expected to
105 * shuffle the whole zone).
107 * Note: No code should rely on this flag for correctness - it's purely
108 * to allow for optimizations when handing back either fresh pages
109 * (memory onlining) or untouched pages (page isolation, free page
112 #define FPI_TO_TAIL ((__force fpi_t)BIT(1))
115 * Don't poison memory with KASAN (only for the tag-based modes).
116 * During boot, all non-reserved memblock memory is exposed to page_alloc.
117 * Poisoning all that memory lengthens boot time, especially on systems with
118 * large amount of RAM. This flag is used to skip that poisoning.
119 * This is only done for the tag-based KASAN modes, as those are able to
120 * detect memory corruptions with the memory tags assigned by default.
121 * All memory allocated normally after boot gets poisoned as usual.
123 #define FPI_SKIP_KASAN_POISON ((__force fpi_t)BIT(2))
125 /* prevent >1 _updater_ of zone percpu pageset ->high and ->batch fields */
126 static DEFINE_MUTEX(pcp_batch_high_lock
);
127 #define MIN_PERCPU_PAGELIST_HIGH_FRACTION (8)
129 #if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT)
131 * On SMP, spin_trylock is sufficient protection.
132 * On PREEMPT_RT, spin_trylock is equivalent on both SMP and UP.
134 #define pcp_trylock_prepare(flags) do { } while (0)
135 #define pcp_trylock_finish(flag) do { } while (0)
138 /* UP spin_trylock always succeeds so disable IRQs to prevent re-entrancy. */
139 #define pcp_trylock_prepare(flags) local_irq_save(flags)
140 #define pcp_trylock_finish(flags) local_irq_restore(flags)
144 * Locking a pcp requires a PCP lookup followed by a spinlock. To avoid
145 * a migration causing the wrong PCP to be locked and remote memory being
146 * potentially allocated, pin the task to the CPU for the lookup+lock.
147 * preempt_disable is used on !RT because it is faster than migrate_disable.
148 * migrate_disable is used on RT because otherwise RT spinlock usage is
149 * interfered with and a high priority task cannot preempt the allocator.
151 #ifndef CONFIG_PREEMPT_RT
152 #define pcpu_task_pin() preempt_disable()
153 #define pcpu_task_unpin() preempt_enable()
155 #define pcpu_task_pin() migrate_disable()
156 #define pcpu_task_unpin() migrate_enable()
160 * Generic helper to lookup and a per-cpu variable with an embedded spinlock.
161 * Return value should be used with equivalent unlock helper.
163 #define pcpu_spin_lock(type, member, ptr) \
167 _ret = this_cpu_ptr(ptr); \
168 spin_lock(&_ret->member); \
172 #define pcpu_spin_lock_irqsave(type, member, ptr, flags) \
176 _ret = this_cpu_ptr(ptr); \
177 spin_lock_irqsave(&_ret->member, flags); \
181 #define pcpu_spin_trylock_irqsave(type, member, ptr, flags) \
185 _ret = this_cpu_ptr(ptr); \
186 if (!spin_trylock_irqsave(&_ret->member, flags)) { \
193 #define pcpu_spin_unlock(member, ptr) \
195 spin_unlock(&ptr->member); \
199 #define pcpu_spin_unlock_irqrestore(member, ptr, flags) \
201 spin_unlock_irqrestore(&ptr->member, flags); \
205 /* struct per_cpu_pages specific helpers. */
206 #define pcp_spin_lock(ptr) \
207 pcpu_spin_lock(struct per_cpu_pages, lock, ptr)
209 #define pcp_spin_lock_irqsave(ptr, flags) \
210 pcpu_spin_lock_irqsave(struct per_cpu_pages, lock, ptr, flags)
212 #define pcp_spin_trylock_irqsave(ptr, flags) \
213 pcpu_spin_trylock_irqsave(struct per_cpu_pages, lock, ptr, flags)
215 #define pcp_spin_unlock(ptr) \
216 pcpu_spin_unlock(lock, ptr)
218 #define pcp_spin_unlock_irqrestore(ptr, flags) \
219 pcpu_spin_unlock_irqrestore(lock, ptr, flags)
220 #ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID
221 DEFINE_PER_CPU(int, numa_node
);
222 EXPORT_PER_CPU_SYMBOL(numa_node
);
225 DEFINE_STATIC_KEY_TRUE(vm_numa_stat_key
);
227 #ifdef CONFIG_HAVE_MEMORYLESS_NODES
229 * N.B., Do NOT reference the '_numa_mem_' per cpu variable directly.
230 * It will not be defined when CONFIG_HAVE_MEMORYLESS_NODES is not defined.
231 * Use the accessor functions set_numa_mem(), numa_mem_id() and cpu_to_mem()
232 * defined in <linux/topology.h>.
234 DEFINE_PER_CPU(int, _numa_mem_
); /* Kernel "local memory" node */
235 EXPORT_PER_CPU_SYMBOL(_numa_mem_
);
238 static DEFINE_MUTEX(pcpu_drain_mutex
);
240 #ifdef CONFIG_GCC_PLUGIN_LATENT_ENTROPY
241 volatile unsigned long latent_entropy __latent_entropy
;
242 EXPORT_SYMBOL(latent_entropy
);
246 * Array of node states.
248 nodemask_t node_states
[NR_NODE_STATES
] __read_mostly
= {
249 [N_POSSIBLE
] = NODE_MASK_ALL
,
250 [N_ONLINE
] = { { [0] = 1UL } },
252 [N_NORMAL_MEMORY
] = { { [0] = 1UL } },
253 #ifdef CONFIG_HIGHMEM
254 [N_HIGH_MEMORY
] = { { [0] = 1UL } },
256 [N_MEMORY
] = { { [0] = 1UL } },
257 [N_CPU
] = { { [0] = 1UL } },
260 EXPORT_SYMBOL(node_states
);
262 atomic_long_t _totalram_pages __read_mostly
;
263 EXPORT_SYMBOL(_totalram_pages
);
264 unsigned long totalreserve_pages __read_mostly
;
265 unsigned long totalcma_pages __read_mostly
;
267 int percpu_pagelist_high_fraction
;
268 gfp_t gfp_allowed_mask __read_mostly
= GFP_BOOT_MASK
;
269 DEFINE_STATIC_KEY_MAYBE(CONFIG_INIT_ON_ALLOC_DEFAULT_ON
, init_on_alloc
);
270 EXPORT_SYMBOL(init_on_alloc
);
272 DEFINE_STATIC_KEY_MAYBE(CONFIG_INIT_ON_FREE_DEFAULT_ON
, init_on_free
);
273 EXPORT_SYMBOL(init_on_free
);
275 static bool _init_on_alloc_enabled_early __read_mostly
276 = IS_ENABLED(CONFIG_INIT_ON_ALLOC_DEFAULT_ON
);
277 static int __init
early_init_on_alloc(char *buf
)
280 return kstrtobool(buf
, &_init_on_alloc_enabled_early
);
282 early_param("init_on_alloc", early_init_on_alloc
);
284 static bool _init_on_free_enabled_early __read_mostly
285 = IS_ENABLED(CONFIG_INIT_ON_FREE_DEFAULT_ON
);
286 static int __init
early_init_on_free(char *buf
)
288 return kstrtobool(buf
, &_init_on_free_enabled_early
);
290 early_param("init_on_free", early_init_on_free
);
293 * A cached value of the page's pageblock's migratetype, used when the page is
294 * put on a pcplist. Used to avoid the pageblock migratetype lookup when
295 * freeing from pcplists in most cases, at the cost of possibly becoming stale.
296 * Also the migratetype set in the page does not necessarily match the pcplist
297 * index, e.g. page might have MIGRATE_CMA set but be on a pcplist with any
298 * other index - this ensures that it will be put on the correct CMA freelist.
300 static inline int get_pcppage_migratetype(struct page
*page
)
305 static inline void set_pcppage_migratetype(struct page
*page
, int migratetype
)
307 page
->index
= migratetype
;
310 #ifdef CONFIG_PM_SLEEP
312 * The following functions are used by the suspend/hibernate code to temporarily
313 * change gfp_allowed_mask in order to avoid using I/O during memory allocations
314 * while devices are suspended. To avoid races with the suspend/hibernate code,
315 * they should always be called with system_transition_mutex held
316 * (gfp_allowed_mask also should only be modified with system_transition_mutex
317 * held, unless the suspend/hibernate code is guaranteed not to run in parallel
318 * with that modification).
321 static gfp_t saved_gfp_mask
;
323 void pm_restore_gfp_mask(void)
325 WARN_ON(!mutex_is_locked(&system_transition_mutex
));
326 if (saved_gfp_mask
) {
327 gfp_allowed_mask
= saved_gfp_mask
;
332 void pm_restrict_gfp_mask(void)
334 WARN_ON(!mutex_is_locked(&system_transition_mutex
));
335 WARN_ON(saved_gfp_mask
);
336 saved_gfp_mask
= gfp_allowed_mask
;
337 gfp_allowed_mask
&= ~(__GFP_IO
| __GFP_FS
);
340 bool pm_suspended_storage(void)
342 if ((gfp_allowed_mask
& (__GFP_IO
| __GFP_FS
)) == (__GFP_IO
| __GFP_FS
))
346 #endif /* CONFIG_PM_SLEEP */
348 #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
349 unsigned int pageblock_order __read_mostly
;
352 static void __free_pages_ok(struct page
*page
, unsigned int order
,
356 * results with 256, 32 in the lowmem_reserve sysctl:
357 * 1G machine -> (16M dma, 800M-16M normal, 1G-800M high)
358 * 1G machine -> (16M dma, 784M normal, 224M high)
359 * NORMAL allocation will leave 784M/256 of ram reserved in the ZONE_DMA
360 * HIGHMEM allocation will leave 224M/32 of ram reserved in ZONE_NORMAL
361 * HIGHMEM allocation will leave (224M+784M)/256 of ram reserved in ZONE_DMA
363 * TBD: should special case ZONE_DMA32 machines here - in those we normally
364 * don't need any ZONE_NORMAL reservation
366 int sysctl_lowmem_reserve_ratio
[MAX_NR_ZONES
] = {
367 #ifdef CONFIG_ZONE_DMA
370 #ifdef CONFIG_ZONE_DMA32
374 #ifdef CONFIG_HIGHMEM
380 static char * const zone_names
[MAX_NR_ZONES
] = {
381 #ifdef CONFIG_ZONE_DMA
384 #ifdef CONFIG_ZONE_DMA32
388 #ifdef CONFIG_HIGHMEM
392 #ifdef CONFIG_ZONE_DEVICE
397 const char * const migratetype_names
[MIGRATE_TYPES
] = {
405 #ifdef CONFIG_MEMORY_ISOLATION
410 compound_page_dtor
* const compound_page_dtors
[NR_COMPOUND_DTORS
] = {
411 [NULL_COMPOUND_DTOR
] = NULL
,
412 [COMPOUND_PAGE_DTOR
] = free_compound_page
,
413 #ifdef CONFIG_HUGETLB_PAGE
414 [HUGETLB_PAGE_DTOR
] = free_huge_page
,
416 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
417 [TRANSHUGE_PAGE_DTOR
] = free_transhuge_page
,
421 int min_free_kbytes
= 1024;
422 int user_min_free_kbytes
= -1;
423 int watermark_boost_factor __read_mostly
= 15000;
424 int watermark_scale_factor
= 10;
426 static unsigned long nr_kernel_pages __initdata
;
427 static unsigned long nr_all_pages __initdata
;
428 static unsigned long dma_reserve __initdata
;
430 static unsigned long arch_zone_lowest_possible_pfn
[MAX_NR_ZONES
] __initdata
;
431 static unsigned long arch_zone_highest_possible_pfn
[MAX_NR_ZONES
] __initdata
;
432 static unsigned long required_kernelcore __initdata
;
433 static unsigned long required_kernelcore_percent __initdata
;
434 static unsigned long required_movablecore __initdata
;
435 static unsigned long required_movablecore_percent __initdata
;
436 static unsigned long zone_movable_pfn
[MAX_NUMNODES
] __initdata
;
437 bool mirrored_kernelcore __initdata_memblock
;
439 /* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */
441 EXPORT_SYMBOL(movable_zone
);
444 unsigned int nr_node_ids __read_mostly
= MAX_NUMNODES
;
445 unsigned int nr_online_nodes __read_mostly
= 1;
446 EXPORT_SYMBOL(nr_node_ids
);
447 EXPORT_SYMBOL(nr_online_nodes
);
450 int page_group_by_mobility_disabled __read_mostly
;
452 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
454 * During boot we initialize deferred pages on-demand, as needed, but once
455 * page_alloc_init_late() has finished, the deferred pages are all initialized,
456 * and we can permanently disable that path.
458 static DEFINE_STATIC_KEY_TRUE(deferred_pages
);
460 static inline bool deferred_pages_enabled(void)
462 return static_branch_unlikely(&deferred_pages
);
465 /* Returns true if the struct page for the pfn is uninitialised */
466 static inline bool __meminit
early_page_uninitialised(unsigned long pfn
)
468 int nid
= early_pfn_to_nid(pfn
);
470 if (node_online(nid
) && pfn
>= NODE_DATA(nid
)->first_deferred_pfn
)
477 * Returns true when the remaining initialisation should be deferred until
478 * later in the boot cycle when it can be parallelised.
480 static bool __meminit
481 defer_init(int nid
, unsigned long pfn
, unsigned long end_pfn
)
483 static unsigned long prev_end_pfn
, nr_initialised
;
486 * prev_end_pfn static that contains the end of previous zone
487 * No need to protect because called very early in boot before smp_init.
489 if (prev_end_pfn
!= end_pfn
) {
490 prev_end_pfn
= end_pfn
;
494 /* Always populate low zones for address-constrained allocations */
495 if (end_pfn
< pgdat_end_pfn(NODE_DATA(nid
)))
498 if (NODE_DATA(nid
)->first_deferred_pfn
!= ULONG_MAX
)
501 * We start only with one section of pages, more pages are added as
502 * needed until the rest of deferred pages are initialized.
505 if ((nr_initialised
> PAGES_PER_SECTION
) &&
506 (pfn
& (PAGES_PER_SECTION
- 1)) == 0) {
507 NODE_DATA(nid
)->first_deferred_pfn
= pfn
;
513 static inline bool deferred_pages_enabled(void)
518 static inline bool early_page_uninitialised(unsigned long pfn
)
523 static inline bool defer_init(int nid
, unsigned long pfn
, unsigned long end_pfn
)
529 /* Return a pointer to the bitmap storing bits affecting a block of pages */
530 static inline unsigned long *get_pageblock_bitmap(const struct page
*page
,
533 #ifdef CONFIG_SPARSEMEM
534 return section_to_usemap(__pfn_to_section(pfn
));
536 return page_zone(page
)->pageblock_flags
;
537 #endif /* CONFIG_SPARSEMEM */
540 static inline int pfn_to_bitidx(const struct page
*page
, unsigned long pfn
)
542 #ifdef CONFIG_SPARSEMEM
543 pfn
&= (PAGES_PER_SECTION
-1);
545 pfn
= pfn
- round_down(page_zone(page
)->zone_start_pfn
, pageblock_nr_pages
);
546 #endif /* CONFIG_SPARSEMEM */
547 return (pfn
>> pageblock_order
) * NR_PAGEBLOCK_BITS
;
550 static __always_inline
551 unsigned long __get_pfnblock_flags_mask(const struct page
*page
,
555 unsigned long *bitmap
;
556 unsigned long bitidx
, word_bitidx
;
559 bitmap
= get_pageblock_bitmap(page
, pfn
);
560 bitidx
= pfn_to_bitidx(page
, pfn
);
561 word_bitidx
= bitidx
/ BITS_PER_LONG
;
562 bitidx
&= (BITS_PER_LONG
-1);
564 * This races, without locks, with set_pfnblock_flags_mask(). Ensure
565 * a consistent read of the memory array, so that results, even though
566 * racy, are not corrupted.
568 word
= READ_ONCE(bitmap
[word_bitidx
]);
569 return (word
>> bitidx
) & mask
;
573 * get_pfnblock_flags_mask - Return the requested group of flags for the pageblock_nr_pages block of pages
574 * @page: The page within the block of interest
575 * @pfn: The target page frame number
576 * @mask: mask of bits that the caller is interested in
578 * Return: pageblock_bits flags
580 unsigned long get_pfnblock_flags_mask(const struct page
*page
,
581 unsigned long pfn
, unsigned long mask
)
583 return __get_pfnblock_flags_mask(page
, pfn
, mask
);
586 static __always_inline
int get_pfnblock_migratetype(const struct page
*page
,
589 return __get_pfnblock_flags_mask(page
, pfn
, MIGRATETYPE_MASK
);
593 * set_pfnblock_flags_mask - Set the requested group of flags for a pageblock_nr_pages block of pages
594 * @page: The page within the block of interest
595 * @flags: The flags to set
596 * @pfn: The target page frame number
597 * @mask: mask of bits that the caller is interested in
599 void set_pfnblock_flags_mask(struct page
*page
, unsigned long flags
,
603 unsigned long *bitmap
;
604 unsigned long bitidx
, word_bitidx
;
607 BUILD_BUG_ON(NR_PAGEBLOCK_BITS
!= 4);
608 BUILD_BUG_ON(MIGRATE_TYPES
> (1 << PB_migratetype_bits
));
610 bitmap
= get_pageblock_bitmap(page
, pfn
);
611 bitidx
= pfn_to_bitidx(page
, pfn
);
612 word_bitidx
= bitidx
/ BITS_PER_LONG
;
613 bitidx
&= (BITS_PER_LONG
-1);
615 VM_BUG_ON_PAGE(!zone_spans_pfn(page_zone(page
), pfn
), page
);
620 word
= READ_ONCE(bitmap
[word_bitidx
]);
622 } while (!try_cmpxchg(&bitmap
[word_bitidx
], &word
, (word
& ~mask
) | flags
));
625 void set_pageblock_migratetype(struct page
*page
, int migratetype
)
627 if (unlikely(page_group_by_mobility_disabled
&&
628 migratetype
< MIGRATE_PCPTYPES
))
629 migratetype
= MIGRATE_UNMOVABLE
;
631 set_pfnblock_flags_mask(page
, (unsigned long)migratetype
,
632 page_to_pfn(page
), MIGRATETYPE_MASK
);
635 #ifdef CONFIG_DEBUG_VM
636 static int page_outside_zone_boundaries(struct zone
*zone
, struct page
*page
)
640 unsigned long pfn
= page_to_pfn(page
);
641 unsigned long sp
, start_pfn
;
644 seq
= zone_span_seqbegin(zone
);
645 start_pfn
= zone
->zone_start_pfn
;
646 sp
= zone
->spanned_pages
;
647 if (!zone_spans_pfn(zone
, pfn
))
649 } while (zone_span_seqretry(zone
, seq
));
652 pr_err("page 0x%lx outside node %d zone %s [ 0x%lx - 0x%lx ]\n",
653 pfn
, zone_to_nid(zone
), zone
->name
,
654 start_pfn
, start_pfn
+ sp
);
659 static int page_is_consistent(struct zone
*zone
, struct page
*page
)
661 if (zone
!= page_zone(page
))
667 * Temporary debugging check for pages not lying within a given zone.
669 static int __maybe_unused
bad_range(struct zone
*zone
, struct page
*page
)
671 if (page_outside_zone_boundaries(zone
, page
))
673 if (!page_is_consistent(zone
, page
))
679 static inline int __maybe_unused
bad_range(struct zone
*zone
, struct page
*page
)
685 static void bad_page(struct page
*page
, const char *reason
)
687 static unsigned long resume
;
688 static unsigned long nr_shown
;
689 static unsigned long nr_unshown
;
692 * Allow a burst of 60 reports, then keep quiet for that minute;
693 * or allow a steady drip of one report per second.
695 if (nr_shown
== 60) {
696 if (time_before(jiffies
, resume
)) {
702 "BUG: Bad page state: %lu messages suppressed\n",
709 resume
= jiffies
+ 60 * HZ
;
711 pr_alert("BUG: Bad page state in process %s pfn:%05lx\n",
712 current
->comm
, page_to_pfn(page
));
713 dump_page(page
, reason
);
718 /* Leave bad fields for debug, except PageBuddy could make trouble */
719 page_mapcount_reset(page
); /* remove PageBuddy */
720 add_taint(TAINT_BAD_PAGE
, LOCKDEP_NOW_UNRELIABLE
);
723 static inline unsigned int order_to_pindex(int migratetype
, int order
)
727 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
728 if (order
> PAGE_ALLOC_COSTLY_ORDER
) {
729 VM_BUG_ON(order
!= pageblock_order
);
730 return NR_LOWORDER_PCP_LISTS
;
733 VM_BUG_ON(order
> PAGE_ALLOC_COSTLY_ORDER
);
736 return (MIGRATE_PCPTYPES
* base
) + migratetype
;
739 static inline int pindex_to_order(unsigned int pindex
)
741 int order
= pindex
/ MIGRATE_PCPTYPES
;
743 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
744 if (pindex
== NR_LOWORDER_PCP_LISTS
)
745 order
= pageblock_order
;
747 VM_BUG_ON(order
> PAGE_ALLOC_COSTLY_ORDER
);
753 static inline bool pcp_allowed_order(unsigned int order
)
755 if (order
<= PAGE_ALLOC_COSTLY_ORDER
)
757 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
758 if (order
== pageblock_order
)
764 static inline void free_the_page(struct page
*page
, unsigned int order
)
766 if (pcp_allowed_order(order
)) /* Via pcp? */
767 free_unref_page(page
, order
);
769 __free_pages_ok(page
, order
, FPI_NONE
);
773 * Higher-order pages are called "compound pages". They are structured thusly:
775 * The first PAGE_SIZE page is called the "head page" and have PG_head set.
777 * The remaining PAGE_SIZE pages are called "tail pages". PageTail() is encoded
778 * in bit 0 of page->compound_head. The rest of bits is pointer to head page.
780 * The first tail page's ->compound_dtor holds the offset in array of compound
781 * page destructors. See compound_page_dtors.
783 * The first tail page's ->compound_order holds the order of allocation.
784 * This usage means that zero-order pages may not be compound.
787 void free_compound_page(struct page
*page
)
789 mem_cgroup_uncharge(page_folio(page
));
790 free_the_page(page
, compound_order(page
));
793 static void prep_compound_head(struct page
*page
, unsigned int order
)
795 set_compound_page_dtor(page
, COMPOUND_PAGE_DTOR
);
796 set_compound_order(page
, order
);
797 atomic_set(compound_mapcount_ptr(page
), -1);
798 atomic_set(compound_pincount_ptr(page
), 0);
801 static void prep_compound_tail(struct page
*head
, int tail_idx
)
803 struct page
*p
= head
+ tail_idx
;
805 p
->mapping
= TAIL_MAPPING
;
806 set_compound_head(p
, head
);
809 void prep_compound_page(struct page
*page
, unsigned int order
)
812 int nr_pages
= 1 << order
;
815 for (i
= 1; i
< nr_pages
; i
++)
816 prep_compound_tail(page
, i
);
818 prep_compound_head(page
, order
);
821 void destroy_large_folio(struct folio
*folio
)
823 enum compound_dtor_id dtor
= folio_page(folio
, 1)->compound_dtor
;
825 VM_BUG_ON_FOLIO(dtor
>= NR_COMPOUND_DTORS
, folio
);
826 compound_page_dtors
[dtor
](&folio
->page
);
829 #ifdef CONFIG_DEBUG_PAGEALLOC
830 unsigned int _debug_guardpage_minorder
;
832 bool _debug_pagealloc_enabled_early __read_mostly
833 = IS_ENABLED(CONFIG_DEBUG_PAGEALLOC_ENABLE_DEFAULT
);
834 EXPORT_SYMBOL(_debug_pagealloc_enabled_early
);
835 DEFINE_STATIC_KEY_FALSE(_debug_pagealloc_enabled
);
836 EXPORT_SYMBOL(_debug_pagealloc_enabled
);
838 DEFINE_STATIC_KEY_FALSE(_debug_guardpage_enabled
);
840 static int __init
early_debug_pagealloc(char *buf
)
842 return kstrtobool(buf
, &_debug_pagealloc_enabled_early
);
844 early_param("debug_pagealloc", early_debug_pagealloc
);
846 static int __init
debug_guardpage_minorder_setup(char *buf
)
850 if (kstrtoul(buf
, 10, &res
) < 0 || res
> MAX_ORDER
/ 2) {
851 pr_err("Bad debug_guardpage_minorder value\n");
854 _debug_guardpage_minorder
= res
;
855 pr_info("Setting debug_guardpage_minorder to %lu\n", res
);
858 early_param("debug_guardpage_minorder", debug_guardpage_minorder_setup
);
860 static inline bool set_page_guard(struct zone
*zone
, struct page
*page
,
861 unsigned int order
, int migratetype
)
863 if (!debug_guardpage_enabled())
866 if (order
>= debug_guardpage_minorder())
869 __SetPageGuard(page
);
870 INIT_LIST_HEAD(&page
->buddy_list
);
871 set_page_private(page
, order
);
872 /* Guard pages are not available for any usage */
873 __mod_zone_freepage_state(zone
, -(1 << order
), migratetype
);
878 static inline void clear_page_guard(struct zone
*zone
, struct page
*page
,
879 unsigned int order
, int migratetype
)
881 if (!debug_guardpage_enabled())
884 __ClearPageGuard(page
);
886 set_page_private(page
, 0);
887 if (!is_migrate_isolate(migratetype
))
888 __mod_zone_freepage_state(zone
, (1 << order
), migratetype
);
891 static inline bool set_page_guard(struct zone
*zone
, struct page
*page
,
892 unsigned int order
, int migratetype
) { return false; }
893 static inline void clear_page_guard(struct zone
*zone
, struct page
*page
,
894 unsigned int order
, int migratetype
) {}
898 * Enable static keys related to various memory debugging and hardening options.
899 * Some override others, and depend on early params that are evaluated in the
900 * order of appearance. So we need to first gather the full picture of what was
901 * enabled, and then make decisions.
903 void init_mem_debugging_and_hardening(void)
905 bool page_poisoning_requested
= false;
907 #ifdef CONFIG_PAGE_POISONING
909 * Page poisoning is debug page alloc for some arches. If
910 * either of those options are enabled, enable poisoning.
912 if (page_poisoning_enabled() ||
913 (!IS_ENABLED(CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC
) &&
914 debug_pagealloc_enabled())) {
915 static_branch_enable(&_page_poisoning_enabled
);
916 page_poisoning_requested
= true;
920 if ((_init_on_alloc_enabled_early
|| _init_on_free_enabled_early
) &&
921 page_poisoning_requested
) {
922 pr_info("mem auto-init: CONFIG_PAGE_POISONING is on, "
923 "will take precedence over init_on_alloc and init_on_free\n");
924 _init_on_alloc_enabled_early
= false;
925 _init_on_free_enabled_early
= false;
928 if (_init_on_alloc_enabled_early
)
929 static_branch_enable(&init_on_alloc
);
931 static_branch_disable(&init_on_alloc
);
933 if (_init_on_free_enabled_early
)
934 static_branch_enable(&init_on_free
);
936 static_branch_disable(&init_on_free
);
938 #ifdef CONFIG_DEBUG_PAGEALLOC
939 if (!debug_pagealloc_enabled())
942 static_branch_enable(&_debug_pagealloc_enabled
);
944 if (!debug_guardpage_minorder())
947 static_branch_enable(&_debug_guardpage_enabled
);
951 static inline void set_buddy_order(struct page
*page
, unsigned int order
)
953 set_page_private(page
, order
);
954 __SetPageBuddy(page
);
957 #ifdef CONFIG_COMPACTION
958 static inline struct capture_control
*task_capc(struct zone
*zone
)
960 struct capture_control
*capc
= current
->capture_control
;
962 return unlikely(capc
) &&
963 !(current
->flags
& PF_KTHREAD
) &&
965 capc
->cc
->zone
== zone
? capc
: NULL
;
969 compaction_capture(struct capture_control
*capc
, struct page
*page
,
970 int order
, int migratetype
)
972 if (!capc
|| order
!= capc
->cc
->order
)
975 /* Do not accidentally pollute CMA or isolated regions*/
976 if (is_migrate_cma(migratetype
) ||
977 is_migrate_isolate(migratetype
))
981 * Do not let lower order allocations pollute a movable pageblock.
982 * This might let an unmovable request use a reclaimable pageblock
983 * and vice-versa but no more than normal fallback logic which can
984 * have trouble finding a high-order free page.
986 if (order
< pageblock_order
&& migratetype
== MIGRATE_MOVABLE
)
994 static inline struct capture_control
*task_capc(struct zone
*zone
)
1000 compaction_capture(struct capture_control
*capc
, struct page
*page
,
1001 int order
, int migratetype
)
1005 #endif /* CONFIG_COMPACTION */
1007 /* Used for pages not on another list */
1008 static inline void add_to_free_list(struct page
*page
, struct zone
*zone
,
1009 unsigned int order
, int migratetype
)
1011 struct free_area
*area
= &zone
->free_area
[order
];
1013 list_add(&page
->buddy_list
, &area
->free_list
[migratetype
]);
1017 /* Used for pages not on another list */
1018 static inline void add_to_free_list_tail(struct page
*page
, struct zone
*zone
,
1019 unsigned int order
, int migratetype
)
1021 struct free_area
*area
= &zone
->free_area
[order
];
1023 list_add_tail(&page
->buddy_list
, &area
->free_list
[migratetype
]);
1028 * Used for pages which are on another list. Move the pages to the tail
1029 * of the list - so the moved pages won't immediately be considered for
1030 * allocation again (e.g., optimization for memory onlining).
1032 static inline void move_to_free_list(struct page
*page
, struct zone
*zone
,
1033 unsigned int order
, int migratetype
)
1035 struct free_area
*area
= &zone
->free_area
[order
];
1037 list_move_tail(&page
->buddy_list
, &area
->free_list
[migratetype
]);
1040 static inline void del_page_from_free_list(struct page
*page
, struct zone
*zone
,
1043 /* clear reported state and update reported page count */
1044 if (page_reported(page
))
1045 __ClearPageReported(page
);
1047 list_del(&page
->buddy_list
);
1048 __ClearPageBuddy(page
);
1049 set_page_private(page
, 0);
1050 zone
->free_area
[order
].nr_free
--;
1054 * If this is not the largest possible page, check if the buddy
1055 * of the next-highest order is free. If it is, it's possible
1056 * that pages are being freed that will coalesce soon. In case,
1057 * that is happening, add the free page to the tail of the list
1058 * so it's less likely to be used soon and more likely to be merged
1059 * as a higher order page
1062 buddy_merge_likely(unsigned long pfn
, unsigned long buddy_pfn
,
1063 struct page
*page
, unsigned int order
)
1065 unsigned long higher_page_pfn
;
1066 struct page
*higher_page
;
1068 if (order
>= MAX_ORDER
- 2)
1071 higher_page_pfn
= buddy_pfn
& pfn
;
1072 higher_page
= page
+ (higher_page_pfn
- pfn
);
1074 return find_buddy_page_pfn(higher_page
, higher_page_pfn
, order
+ 1,
1079 * Freeing function for a buddy system allocator.
1081 * The concept of a buddy system is to maintain direct-mapped table
1082 * (containing bit values) for memory blocks of various "orders".
1083 * The bottom level table contains the map for the smallest allocatable
1084 * units of memory (here, pages), and each level above it describes
1085 * pairs of units from the levels below, hence, "buddies".
1086 * At a high level, all that happens here is marking the table entry
1087 * at the bottom level available, and propagating the changes upward
1088 * as necessary, plus some accounting needed to play nicely with other
1089 * parts of the VM system.
1090 * At each level, we keep a list of pages, which are heads of continuous
1091 * free pages of length of (1 << order) and marked with PageBuddy.
1092 * Page's order is recorded in page_private(page) field.
1093 * So when we are allocating or freeing one, we can derive the state of the
1094 * other. That is, if we allocate a small block, and both were
1095 * free, the remainder of the region must be split into blocks.
1096 * If a block is freed, and its buddy is also free, then this
1097 * triggers coalescing into a block of larger size.
1102 static inline void __free_one_page(struct page
*page
,
1104 struct zone
*zone
, unsigned int order
,
1105 int migratetype
, fpi_t fpi_flags
)
1107 struct capture_control
*capc
= task_capc(zone
);
1108 unsigned long buddy_pfn
;
1109 unsigned long combined_pfn
;
1113 VM_BUG_ON(!zone_is_initialized(zone
));
1114 VM_BUG_ON_PAGE(page
->flags
& PAGE_FLAGS_CHECK_AT_PREP
, page
);
1116 VM_BUG_ON(migratetype
== -1);
1117 if (likely(!is_migrate_isolate(migratetype
)))
1118 __mod_zone_freepage_state(zone
, 1 << order
, migratetype
);
1120 VM_BUG_ON_PAGE(pfn
& ((1 << order
) - 1), page
);
1121 VM_BUG_ON_PAGE(bad_range(zone
, page
), page
);
1123 while (order
< MAX_ORDER
- 1) {
1124 if (compaction_capture(capc
, page
, order
, migratetype
)) {
1125 __mod_zone_freepage_state(zone
, -(1 << order
),
1130 buddy
= find_buddy_page_pfn(page
, pfn
, order
, &buddy_pfn
);
1134 if (unlikely(order
>= pageblock_order
)) {
1136 * We want to prevent merge between freepages on pageblock
1137 * without fallbacks and normal pageblock. Without this,
1138 * pageblock isolation could cause incorrect freepage or CMA
1139 * accounting or HIGHATOMIC accounting.
1141 int buddy_mt
= get_pageblock_migratetype(buddy
);
1143 if (migratetype
!= buddy_mt
1144 && (!migratetype_is_mergeable(migratetype
) ||
1145 !migratetype_is_mergeable(buddy_mt
)))
1150 * Our buddy is free or it is CONFIG_DEBUG_PAGEALLOC guard page,
1151 * merge with it and move up one order.
1153 if (page_is_guard(buddy
))
1154 clear_page_guard(zone
, buddy
, order
, migratetype
);
1156 del_page_from_free_list(buddy
, zone
, order
);
1157 combined_pfn
= buddy_pfn
& pfn
;
1158 page
= page
+ (combined_pfn
- pfn
);
1164 set_buddy_order(page
, order
);
1166 if (fpi_flags
& FPI_TO_TAIL
)
1168 else if (is_shuffle_order(order
))
1169 to_tail
= shuffle_pick_tail();
1171 to_tail
= buddy_merge_likely(pfn
, buddy_pfn
, page
, order
);
1174 add_to_free_list_tail(page
, zone
, order
, migratetype
);
1176 add_to_free_list(page
, zone
, order
, migratetype
);
1178 /* Notify page reporting subsystem of freed page */
1179 if (!(fpi_flags
& FPI_SKIP_REPORT_NOTIFY
))
1180 page_reporting_notify_free(order
);
1184 * split_free_page() -- split a free page at split_pfn_offset
1185 * @free_page: the original free page
1186 * @order: the order of the page
1187 * @split_pfn_offset: split offset within the page
1189 * Return -ENOENT if the free page is changed, otherwise 0
1191 * It is used when the free page crosses two pageblocks with different migratetypes
1192 * at split_pfn_offset within the page. The split free page will be put into
1193 * separate migratetype lists afterwards. Otherwise, the function achieves
1196 int split_free_page(struct page
*free_page
,
1197 unsigned int order
, unsigned long split_pfn_offset
)
1199 struct zone
*zone
= page_zone(free_page
);
1200 unsigned long free_page_pfn
= page_to_pfn(free_page
);
1202 unsigned long flags
;
1203 int free_page_order
;
1207 if (split_pfn_offset
== 0)
1210 spin_lock_irqsave(&zone
->lock
, flags
);
1212 if (!PageBuddy(free_page
) || buddy_order(free_page
) != order
) {
1217 mt
= get_pageblock_migratetype(free_page
);
1218 if (likely(!is_migrate_isolate(mt
)))
1219 __mod_zone_freepage_state(zone
, -(1UL << order
), mt
);
1221 del_page_from_free_list(free_page
, zone
, order
);
1222 for (pfn
= free_page_pfn
;
1223 pfn
< free_page_pfn
+ (1UL << order
);) {
1224 int mt
= get_pfnblock_migratetype(pfn_to_page(pfn
), pfn
);
1226 free_page_order
= min_t(unsigned int,
1227 pfn
? __ffs(pfn
) : order
,
1228 __fls(split_pfn_offset
));
1229 __free_one_page(pfn_to_page(pfn
), pfn
, zone
, free_page_order
,
1231 pfn
+= 1UL << free_page_order
;
1232 split_pfn_offset
-= (1UL << free_page_order
);
1233 /* we have done the first part, now switch to second part */
1234 if (split_pfn_offset
== 0)
1235 split_pfn_offset
= (1UL << order
) - (pfn
- free_page_pfn
);
1238 spin_unlock_irqrestore(&zone
->lock
, flags
);
1242 * A bad page could be due to a number of fields. Instead of multiple branches,
1243 * try and check multiple fields with one check. The caller must do a detailed
1244 * check if necessary.
1246 static inline bool page_expected_state(struct page
*page
,
1247 unsigned long check_flags
)
1249 if (unlikely(atomic_read(&page
->_mapcount
) != -1))
1252 if (unlikely((unsigned long)page
->mapping
|
1253 page_ref_count(page
) |
1257 (page
->flags
& check_flags
)))
1263 static const char *page_bad_reason(struct page
*page
, unsigned long flags
)
1265 const char *bad_reason
= NULL
;
1267 if (unlikely(atomic_read(&page
->_mapcount
) != -1))
1268 bad_reason
= "nonzero mapcount";
1269 if (unlikely(page
->mapping
!= NULL
))
1270 bad_reason
= "non-NULL mapping";
1271 if (unlikely(page_ref_count(page
) != 0))
1272 bad_reason
= "nonzero _refcount";
1273 if (unlikely(page
->flags
& flags
)) {
1274 if (flags
== PAGE_FLAGS_CHECK_AT_PREP
)
1275 bad_reason
= "PAGE_FLAGS_CHECK_AT_PREP flag(s) set";
1277 bad_reason
= "PAGE_FLAGS_CHECK_AT_FREE flag(s) set";
1280 if (unlikely(page
->memcg_data
))
1281 bad_reason
= "page still charged to cgroup";
1286 static void check_free_page_bad(struct page
*page
)
1289 page_bad_reason(page
, PAGE_FLAGS_CHECK_AT_FREE
));
1292 static inline int check_free_page(struct page
*page
)
1294 if (likely(page_expected_state(page
, PAGE_FLAGS_CHECK_AT_FREE
)))
1297 /* Something has gone sideways, find it */
1298 check_free_page_bad(page
);
1302 static int free_tail_pages_check(struct page
*head_page
, struct page
*page
)
1307 * We rely page->lru.next never has bit 0 set, unless the page
1308 * is PageTail(). Let's make sure that's true even for poisoned ->lru.
1310 BUILD_BUG_ON((unsigned long)LIST_POISON1
& 1);
1312 if (!IS_ENABLED(CONFIG_DEBUG_VM
)) {
1316 switch (page
- head_page
) {
1318 /* the first tail page: ->mapping may be compound_mapcount() */
1319 if (unlikely(compound_mapcount(page
))) {
1320 bad_page(page
, "nonzero compound_mapcount");
1326 * the second tail page: ->mapping is
1327 * deferred_list.next -- ignore value.
1331 if (page
->mapping
!= TAIL_MAPPING
) {
1332 bad_page(page
, "corrupted mapping in tail page");
1337 if (unlikely(!PageTail(page
))) {
1338 bad_page(page
, "PageTail not set");
1341 if (unlikely(compound_head(page
) != head_page
)) {
1342 bad_page(page
, "compound_head not consistent");
1347 page
->mapping
= NULL
;
1348 clear_compound_head(page
);
1353 * Skip KASAN memory poisoning when either:
1355 * 1. Deferred memory initialization has not yet completed,
1356 * see the explanation below.
1357 * 2. Skipping poisoning is requested via FPI_SKIP_KASAN_POISON,
1358 * see the comment next to it.
1359 * 3. Skipping poisoning is requested via __GFP_SKIP_KASAN_POISON,
1360 * see the comment next to it.
1362 * Poisoning pages during deferred memory init will greatly lengthen the
1363 * process and cause problem in large memory systems as the deferred pages
1364 * initialization is done with interrupt disabled.
1366 * Assuming that there will be no reference to those newly initialized
1367 * pages before they are ever allocated, this should have no effect on
1368 * KASAN memory tracking as the poison will be properly inserted at page
1369 * allocation time. The only corner case is when pages are allocated by
1370 * on-demand allocation and then freed again before the deferred pages
1371 * initialization is done, but this is not likely to happen.
1373 static inline bool should_skip_kasan_poison(struct page
*page
, fpi_t fpi_flags
)
1375 return deferred_pages_enabled() ||
1376 (!IS_ENABLED(CONFIG_KASAN_GENERIC
) &&
1377 (fpi_flags
& FPI_SKIP_KASAN_POISON
)) ||
1378 PageSkipKASanPoison(page
);
1381 static void kernel_init_pages(struct page
*page
, int numpages
)
1385 /* s390's use of memset() could override KASAN redzones. */
1386 kasan_disable_current();
1387 for (i
= 0; i
< numpages
; i
++)
1388 clear_highpage_kasan_tagged(page
+ i
);
1389 kasan_enable_current();
1392 static __always_inline
bool free_pages_prepare(struct page
*page
,
1393 unsigned int order
, bool check_free
, fpi_t fpi_flags
)
1396 bool init
= want_init_on_free();
1398 VM_BUG_ON_PAGE(PageTail(page
), page
);
1400 trace_mm_page_free(page
, order
);
1402 if (unlikely(PageHWPoison(page
)) && !order
) {
1404 * Do not let hwpoison pages hit pcplists/buddy
1405 * Untie memcg state and reset page's owner
1407 if (memcg_kmem_enabled() && PageMemcgKmem(page
))
1408 __memcg_kmem_uncharge_page(page
, order
);
1409 reset_page_owner(page
, order
);
1410 page_table_check_free(page
, order
);
1415 * Check tail pages before head page information is cleared to
1416 * avoid checking PageCompound for order-0 pages.
1418 if (unlikely(order
)) {
1419 bool compound
= PageCompound(page
);
1422 VM_BUG_ON_PAGE(compound
&& compound_order(page
) != order
, page
);
1425 ClearPageDoubleMap(page
);
1426 ClearPageHasHWPoisoned(page
);
1428 for (i
= 1; i
< (1 << order
); i
++) {
1430 bad
+= free_tail_pages_check(page
, page
+ i
);
1431 if (unlikely(check_free_page(page
+ i
))) {
1435 (page
+ i
)->flags
&= ~PAGE_FLAGS_CHECK_AT_PREP
;
1438 if (PageMappingFlags(page
))
1439 page
->mapping
= NULL
;
1440 if (memcg_kmem_enabled() && PageMemcgKmem(page
))
1441 __memcg_kmem_uncharge_page(page
, order
);
1443 bad
+= check_free_page(page
);
1447 page_cpupid_reset_last(page
);
1448 page
->flags
&= ~PAGE_FLAGS_CHECK_AT_PREP
;
1449 reset_page_owner(page
, order
);
1450 page_table_check_free(page
, order
);
1452 if (!PageHighMem(page
)) {
1453 debug_check_no_locks_freed(page_address(page
),
1454 PAGE_SIZE
<< order
);
1455 debug_check_no_obj_freed(page_address(page
),
1456 PAGE_SIZE
<< order
);
1459 kernel_poison_pages(page
, 1 << order
);
1462 * As memory initialization might be integrated into KASAN,
1463 * KASAN poisoning and memory initialization code must be
1464 * kept together to avoid discrepancies in behavior.
1466 * With hardware tag-based KASAN, memory tags must be set before the
1467 * page becomes unavailable via debug_pagealloc or arch_free_page.
1469 if (!should_skip_kasan_poison(page
, fpi_flags
)) {
1470 kasan_poison_pages(page
, order
, init
);
1472 /* Memory is already initialized if KASAN did it internally. */
1473 if (kasan_has_integrated_init())
1477 kernel_init_pages(page
, 1 << order
);
1480 * arch_free_page() can make the page's contents inaccessible. s390
1481 * does this. So nothing which can access the page's contents should
1482 * happen after this.
1484 arch_free_page(page
, order
);
1486 debug_pagealloc_unmap_pages(page
, 1 << order
);
1491 #ifdef CONFIG_DEBUG_VM
1493 * With DEBUG_VM enabled, order-0 pages are checked immediately when being freed
1494 * to pcp lists. With debug_pagealloc also enabled, they are also rechecked when
1495 * moved from pcp lists to free lists.
1497 static bool free_pcp_prepare(struct page
*page
, unsigned int order
)
1499 return free_pages_prepare(page
, order
, true, FPI_NONE
);
1502 static bool bulkfree_pcp_prepare(struct page
*page
)
1504 if (debug_pagealloc_enabled_static())
1505 return check_free_page(page
);
1511 * With DEBUG_VM disabled, order-0 pages being freed are checked only when
1512 * moving from pcp lists to free list in order to reduce overhead. With
1513 * debug_pagealloc enabled, they are checked also immediately when being freed
1516 static bool free_pcp_prepare(struct page
*page
, unsigned int order
)
1518 if (debug_pagealloc_enabled_static())
1519 return free_pages_prepare(page
, order
, true, FPI_NONE
);
1521 return free_pages_prepare(page
, order
, false, FPI_NONE
);
1524 static bool bulkfree_pcp_prepare(struct page
*page
)
1526 return check_free_page(page
);
1528 #endif /* CONFIG_DEBUG_VM */
1531 * Frees a number of pages from the PCP lists
1532 * Assumes all pages on list are in same zone.
1533 * count is the number of pages to free.
1535 static void free_pcppages_bulk(struct zone
*zone
, int count
,
1536 struct per_cpu_pages
*pcp
,
1540 int max_pindex
= NR_PCP_LISTS
- 1;
1542 bool isolated_pageblocks
;
1546 * Ensure proper count is passed which otherwise would stuck in the
1547 * below while (list_empty(list)) loop.
1549 count
= min(pcp
->count
, count
);
1551 /* Ensure requested pindex is drained first. */
1552 pindex
= pindex
- 1;
1554 /* Caller must hold IRQ-safe pcp->lock so IRQs are disabled. */
1555 spin_lock(&zone
->lock
);
1556 isolated_pageblocks
= has_isolate_pageblock(zone
);
1559 struct list_head
*list
;
1562 /* Remove pages from lists in a round-robin fashion. */
1564 if (++pindex
> max_pindex
)
1565 pindex
= min_pindex
;
1566 list
= &pcp
->lists
[pindex
];
1567 if (!list_empty(list
))
1570 if (pindex
== max_pindex
)
1572 if (pindex
== min_pindex
)
1576 order
= pindex_to_order(pindex
);
1577 nr_pages
= 1 << order
;
1578 BUILD_BUG_ON(MAX_ORDER
>= (1<<NR_PCP_ORDER_WIDTH
));
1582 page
= list_last_entry(list
, struct page
, pcp_list
);
1583 mt
= get_pcppage_migratetype(page
);
1585 /* must delete to avoid corrupting pcp list */
1586 list_del(&page
->pcp_list
);
1588 pcp
->count
-= nr_pages
;
1590 if (bulkfree_pcp_prepare(page
))
1593 /* MIGRATE_ISOLATE page should not go to pcplists */
1594 VM_BUG_ON_PAGE(is_migrate_isolate(mt
), page
);
1595 /* Pageblock could have been isolated meanwhile */
1596 if (unlikely(isolated_pageblocks
))
1597 mt
= get_pageblock_migratetype(page
);
1599 __free_one_page(page
, page_to_pfn(page
), zone
, order
, mt
, FPI_NONE
);
1600 trace_mm_page_pcpu_drain(page
, order
, mt
);
1601 } while (count
> 0 && !list_empty(list
));
1604 spin_unlock(&zone
->lock
);
1607 static void free_one_page(struct zone
*zone
,
1608 struct page
*page
, unsigned long pfn
,
1610 int migratetype
, fpi_t fpi_flags
)
1612 unsigned long flags
;
1614 spin_lock_irqsave(&zone
->lock
, flags
);
1615 if (unlikely(has_isolate_pageblock(zone
) ||
1616 is_migrate_isolate(migratetype
))) {
1617 migratetype
= get_pfnblock_migratetype(page
, pfn
);
1619 __free_one_page(page
, pfn
, zone
, order
, migratetype
, fpi_flags
);
1620 spin_unlock_irqrestore(&zone
->lock
, flags
);
1623 static void __meminit
__init_single_page(struct page
*page
, unsigned long pfn
,
1624 unsigned long zone
, int nid
)
1626 mm_zero_struct_page(page
);
1627 set_page_links(page
, zone
, nid
, pfn
);
1628 init_page_count(page
);
1629 page_mapcount_reset(page
);
1630 page_cpupid_reset_last(page
);
1631 page_kasan_tag_reset(page
);
1633 INIT_LIST_HEAD(&page
->lru
);
1634 #ifdef WANT_PAGE_VIRTUAL
1635 /* The shift won't overflow because ZONE_NORMAL is below 4G. */
1636 if (!is_highmem_idx(zone
))
1637 set_page_address(page
, __va(pfn
<< PAGE_SHIFT
));
1641 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
1642 static void __meminit
init_reserved_page(unsigned long pfn
)
1647 if (!early_page_uninitialised(pfn
))
1650 nid
= early_pfn_to_nid(pfn
);
1651 pgdat
= NODE_DATA(nid
);
1653 for (zid
= 0; zid
< MAX_NR_ZONES
; zid
++) {
1654 struct zone
*zone
= &pgdat
->node_zones
[zid
];
1656 if (zone_spans_pfn(zone
, pfn
))
1659 __init_single_page(pfn_to_page(pfn
), pfn
, zid
, nid
);
1662 static inline void init_reserved_page(unsigned long pfn
)
1665 #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
1668 * Initialised pages do not have PageReserved set. This function is
1669 * called for each range allocated by the bootmem allocator and
1670 * marks the pages PageReserved. The remaining valid pages are later
1671 * sent to the buddy page allocator.
1673 void __meminit
reserve_bootmem_region(phys_addr_t start
, phys_addr_t end
)
1675 unsigned long start_pfn
= PFN_DOWN(start
);
1676 unsigned long end_pfn
= PFN_UP(end
);
1678 for (; start_pfn
< end_pfn
; start_pfn
++) {
1679 if (pfn_valid(start_pfn
)) {
1680 struct page
*page
= pfn_to_page(start_pfn
);
1682 init_reserved_page(start_pfn
);
1684 /* Avoid false-positive PageTail() */
1685 INIT_LIST_HEAD(&page
->lru
);
1688 * no need for atomic set_bit because the struct
1689 * page is not visible yet so nobody should
1692 __SetPageReserved(page
);
1697 static void __free_pages_ok(struct page
*page
, unsigned int order
,
1700 unsigned long flags
;
1702 unsigned long pfn
= page_to_pfn(page
);
1703 struct zone
*zone
= page_zone(page
);
1705 if (!free_pages_prepare(page
, order
, true, fpi_flags
))
1708 migratetype
= get_pfnblock_migratetype(page
, pfn
);
1710 spin_lock_irqsave(&zone
->lock
, flags
);
1711 if (unlikely(has_isolate_pageblock(zone
) ||
1712 is_migrate_isolate(migratetype
))) {
1713 migratetype
= get_pfnblock_migratetype(page
, pfn
);
1715 __free_one_page(page
, pfn
, zone
, order
, migratetype
, fpi_flags
);
1716 spin_unlock_irqrestore(&zone
->lock
, flags
);
1718 __count_vm_events(PGFREE
, 1 << order
);
1721 void __free_pages_core(struct page
*page
, unsigned int order
)
1723 unsigned int nr_pages
= 1 << order
;
1724 struct page
*p
= page
;
1728 * When initializing the memmap, __init_single_page() sets the refcount
1729 * of all pages to 1 ("allocated"/"not free"). We have to set the
1730 * refcount of all involved pages to 0.
1733 for (loop
= 0; loop
< (nr_pages
- 1); loop
++, p
++) {
1735 __ClearPageReserved(p
);
1736 set_page_count(p
, 0);
1738 __ClearPageReserved(p
);
1739 set_page_count(p
, 0);
1741 atomic_long_add(nr_pages
, &page_zone(page
)->managed_pages
);
1744 * Bypass PCP and place fresh pages right to the tail, primarily
1745 * relevant for memory onlining.
1747 __free_pages_ok(page
, order
, FPI_TO_TAIL
| FPI_SKIP_KASAN_POISON
);
1753 * During memory init memblocks map pfns to nids. The search is expensive and
1754 * this caches recent lookups. The implementation of __early_pfn_to_nid
1755 * treats start/end as pfns.
1757 struct mminit_pfnnid_cache
{
1758 unsigned long last_start
;
1759 unsigned long last_end
;
1763 static struct mminit_pfnnid_cache early_pfnnid_cache __meminitdata
;
1766 * Required by SPARSEMEM. Given a PFN, return what node the PFN is on.
1768 static int __meminit
__early_pfn_to_nid(unsigned long pfn
,
1769 struct mminit_pfnnid_cache
*state
)
1771 unsigned long start_pfn
, end_pfn
;
1774 if (state
->last_start
<= pfn
&& pfn
< state
->last_end
)
1775 return state
->last_nid
;
1777 nid
= memblock_search_pfn_nid(pfn
, &start_pfn
, &end_pfn
);
1778 if (nid
!= NUMA_NO_NODE
) {
1779 state
->last_start
= start_pfn
;
1780 state
->last_end
= end_pfn
;
1781 state
->last_nid
= nid
;
1787 int __meminit
early_pfn_to_nid(unsigned long pfn
)
1789 static DEFINE_SPINLOCK(early_pfn_lock
);
1792 spin_lock(&early_pfn_lock
);
1793 nid
= __early_pfn_to_nid(pfn
, &early_pfnnid_cache
);
1795 nid
= first_online_node
;
1796 spin_unlock(&early_pfn_lock
);
1800 #endif /* CONFIG_NUMA */
1802 void __init
memblock_free_pages(struct page
*page
, unsigned long pfn
,
1805 if (early_page_uninitialised(pfn
))
1807 __free_pages_core(page
, order
);
1811 * Check that the whole (or subset of) a pageblock given by the interval of
1812 * [start_pfn, end_pfn) is valid and within the same zone, before scanning it
1813 * with the migration of free compaction scanner.
1815 * Return struct page pointer of start_pfn, or NULL if checks were not passed.
1817 * It's possible on some configurations to have a setup like node0 node1 node0
1818 * i.e. it's possible that all pages within a zones range of pages do not
1819 * belong to a single zone. We assume that a border between node0 and node1
1820 * can occur within a single pageblock, but not a node0 node1 node0
1821 * interleaving within a single pageblock. It is therefore sufficient to check
1822 * the first and last page of a pageblock and avoid checking each individual
1823 * page in a pageblock.
1825 struct page
*__pageblock_pfn_to_page(unsigned long start_pfn
,
1826 unsigned long end_pfn
, struct zone
*zone
)
1828 struct page
*start_page
;
1829 struct page
*end_page
;
1831 /* end_pfn is one past the range we are checking */
1834 if (!pfn_valid(start_pfn
) || !pfn_valid(end_pfn
))
1837 start_page
= pfn_to_online_page(start_pfn
);
1841 if (page_zone(start_page
) != zone
)
1844 end_page
= pfn_to_page(end_pfn
);
1846 /* This gives a shorter code than deriving page_zone(end_page) */
1847 if (page_zone_id(start_page
) != page_zone_id(end_page
))
1853 void set_zone_contiguous(struct zone
*zone
)
1855 unsigned long block_start_pfn
= zone
->zone_start_pfn
;
1856 unsigned long block_end_pfn
;
1858 block_end_pfn
= ALIGN(block_start_pfn
+ 1, pageblock_nr_pages
);
1859 for (; block_start_pfn
< zone_end_pfn(zone
);
1860 block_start_pfn
= block_end_pfn
,
1861 block_end_pfn
+= pageblock_nr_pages
) {
1863 block_end_pfn
= min(block_end_pfn
, zone_end_pfn(zone
));
1865 if (!__pageblock_pfn_to_page(block_start_pfn
,
1866 block_end_pfn
, zone
))
1871 /* We confirm that there is no hole */
1872 zone
->contiguous
= true;
1875 void clear_zone_contiguous(struct zone
*zone
)
1877 zone
->contiguous
= false;
1880 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
1881 static void __init
deferred_free_range(unsigned long pfn
,
1882 unsigned long nr_pages
)
1890 page
= pfn_to_page(pfn
);
1892 /* Free a large naturally-aligned chunk if possible */
1893 if (nr_pages
== pageblock_nr_pages
&&
1894 (pfn
& (pageblock_nr_pages
- 1)) == 0) {
1895 set_pageblock_migratetype(page
, MIGRATE_MOVABLE
);
1896 __free_pages_core(page
, pageblock_order
);
1900 for (i
= 0; i
< nr_pages
; i
++, page
++, pfn
++) {
1901 if ((pfn
& (pageblock_nr_pages
- 1)) == 0)
1902 set_pageblock_migratetype(page
, MIGRATE_MOVABLE
);
1903 __free_pages_core(page
, 0);
1907 /* Completion tracking for deferred_init_memmap() threads */
1908 static atomic_t pgdat_init_n_undone __initdata
;
1909 static __initdata
DECLARE_COMPLETION(pgdat_init_all_done_comp
);
1911 static inline void __init
pgdat_init_report_one_done(void)
1913 if (atomic_dec_and_test(&pgdat_init_n_undone
))
1914 complete(&pgdat_init_all_done_comp
);
1918 * Returns true if page needs to be initialized or freed to buddy allocator.
1920 * First we check if pfn is valid on architectures where it is possible to have
1921 * holes within pageblock_nr_pages. On systems where it is not possible, this
1922 * function is optimized out.
1924 * Then, we check if a current large page is valid by only checking the validity
1927 static inline bool __init
deferred_pfn_valid(unsigned long pfn
)
1929 if (!(pfn
& (pageblock_nr_pages
- 1)) && !pfn_valid(pfn
))
1935 * Free pages to buddy allocator. Try to free aligned pages in
1936 * pageblock_nr_pages sizes.
1938 static void __init
deferred_free_pages(unsigned long pfn
,
1939 unsigned long end_pfn
)
1941 unsigned long nr_pgmask
= pageblock_nr_pages
- 1;
1942 unsigned long nr_free
= 0;
1944 for (; pfn
< end_pfn
; pfn
++) {
1945 if (!deferred_pfn_valid(pfn
)) {
1946 deferred_free_range(pfn
- nr_free
, nr_free
);
1948 } else if (!(pfn
& nr_pgmask
)) {
1949 deferred_free_range(pfn
- nr_free
, nr_free
);
1955 /* Free the last block of pages to allocator */
1956 deferred_free_range(pfn
- nr_free
, nr_free
);
1960 * Initialize struct pages. We minimize pfn page lookups and scheduler checks
1961 * by performing it only once every pageblock_nr_pages.
1962 * Return number of pages initialized.
1964 static unsigned long __init
deferred_init_pages(struct zone
*zone
,
1966 unsigned long end_pfn
)
1968 unsigned long nr_pgmask
= pageblock_nr_pages
- 1;
1969 int nid
= zone_to_nid(zone
);
1970 unsigned long nr_pages
= 0;
1971 int zid
= zone_idx(zone
);
1972 struct page
*page
= NULL
;
1974 for (; pfn
< end_pfn
; pfn
++) {
1975 if (!deferred_pfn_valid(pfn
)) {
1978 } else if (!page
|| !(pfn
& nr_pgmask
)) {
1979 page
= pfn_to_page(pfn
);
1983 __init_single_page(page
, pfn
, zid
, nid
);
1990 * This function is meant to pre-load the iterator for the zone init.
1991 * Specifically it walks through the ranges until we are caught up to the
1992 * first_init_pfn value and exits there. If we never encounter the value we
1993 * return false indicating there are no valid ranges left.
1996 deferred_init_mem_pfn_range_in_zone(u64
*i
, struct zone
*zone
,
1997 unsigned long *spfn
, unsigned long *epfn
,
1998 unsigned long first_init_pfn
)
2003 * Start out by walking through the ranges in this zone that have
2004 * already been initialized. We don't need to do anything with them
2005 * so we just need to flush them out of the system.
2007 for_each_free_mem_pfn_range_in_zone(j
, zone
, spfn
, epfn
) {
2008 if (*epfn
<= first_init_pfn
)
2010 if (*spfn
< first_init_pfn
)
2011 *spfn
= first_init_pfn
;
2020 * Initialize and free pages. We do it in two loops: first we initialize
2021 * struct page, then free to buddy allocator, because while we are
2022 * freeing pages we can access pages that are ahead (computing buddy
2023 * page in __free_one_page()).
2025 * In order to try and keep some memory in the cache we have the loop
2026 * broken along max page order boundaries. This way we will not cause
2027 * any issues with the buddy page computation.
2029 static unsigned long __init
2030 deferred_init_maxorder(u64
*i
, struct zone
*zone
, unsigned long *start_pfn
,
2031 unsigned long *end_pfn
)
2033 unsigned long mo_pfn
= ALIGN(*start_pfn
+ 1, MAX_ORDER_NR_PAGES
);
2034 unsigned long spfn
= *start_pfn
, epfn
= *end_pfn
;
2035 unsigned long nr_pages
= 0;
2038 /* First we loop through and initialize the page values */
2039 for_each_free_mem_pfn_range_in_zone_from(j
, zone
, start_pfn
, end_pfn
) {
2042 if (mo_pfn
<= *start_pfn
)
2045 t
= min(mo_pfn
, *end_pfn
);
2046 nr_pages
+= deferred_init_pages(zone
, *start_pfn
, t
);
2048 if (mo_pfn
< *end_pfn
) {
2049 *start_pfn
= mo_pfn
;
2054 /* Reset values and now loop through freeing pages as needed */
2057 for_each_free_mem_pfn_range_in_zone_from(j
, zone
, &spfn
, &epfn
) {
2063 t
= min(mo_pfn
, epfn
);
2064 deferred_free_pages(spfn
, t
);
2074 deferred_init_memmap_chunk(unsigned long start_pfn
, unsigned long end_pfn
,
2077 unsigned long spfn
, epfn
;
2078 struct zone
*zone
= arg
;
2081 deferred_init_mem_pfn_range_in_zone(&i
, zone
, &spfn
, &epfn
, start_pfn
);
2084 * Initialize and free pages in MAX_ORDER sized increments so that we
2085 * can avoid introducing any issues with the buddy allocator.
2087 while (spfn
< end_pfn
) {
2088 deferred_init_maxorder(&i
, zone
, &spfn
, &epfn
);
2093 /* An arch may override for more concurrency. */
2095 deferred_page_init_max_threads(const struct cpumask
*node_cpumask
)
2100 /* Initialise remaining memory on a node */
2101 static int __init
deferred_init_memmap(void *data
)
2103 pg_data_t
*pgdat
= data
;
2104 const struct cpumask
*cpumask
= cpumask_of_node(pgdat
->node_id
);
2105 unsigned long spfn
= 0, epfn
= 0;
2106 unsigned long first_init_pfn
, flags
;
2107 unsigned long start
= jiffies
;
2109 int zid
, max_threads
;
2112 /* Bind memory initialisation thread to a local node if possible */
2113 if (!cpumask_empty(cpumask
))
2114 set_cpus_allowed_ptr(current
, cpumask
);
2116 pgdat_resize_lock(pgdat
, &flags
);
2117 first_init_pfn
= pgdat
->first_deferred_pfn
;
2118 if (first_init_pfn
== ULONG_MAX
) {
2119 pgdat_resize_unlock(pgdat
, &flags
);
2120 pgdat_init_report_one_done();
2124 /* Sanity check boundaries */
2125 BUG_ON(pgdat
->first_deferred_pfn
< pgdat
->node_start_pfn
);
2126 BUG_ON(pgdat
->first_deferred_pfn
> pgdat_end_pfn(pgdat
));
2127 pgdat
->first_deferred_pfn
= ULONG_MAX
;
2130 * Once we unlock here, the zone cannot be grown anymore, thus if an
2131 * interrupt thread must allocate this early in boot, zone must be
2132 * pre-grown prior to start of deferred page initialization.
2134 pgdat_resize_unlock(pgdat
, &flags
);
2136 /* Only the highest zone is deferred so find it */
2137 for (zid
= 0; zid
< MAX_NR_ZONES
; zid
++) {
2138 zone
= pgdat
->node_zones
+ zid
;
2139 if (first_init_pfn
< zone_end_pfn(zone
))
2143 /* If the zone is empty somebody else may have cleared out the zone */
2144 if (!deferred_init_mem_pfn_range_in_zone(&i
, zone
, &spfn
, &epfn
,
2148 max_threads
= deferred_page_init_max_threads(cpumask
);
2150 while (spfn
< epfn
) {
2151 unsigned long epfn_align
= ALIGN(epfn
, PAGES_PER_SECTION
);
2152 struct padata_mt_job job
= {
2153 .thread_fn
= deferred_init_memmap_chunk
,
2156 .size
= epfn_align
- spfn
,
2157 .align
= PAGES_PER_SECTION
,
2158 .min_chunk
= PAGES_PER_SECTION
,
2159 .max_threads
= max_threads
,
2162 padata_do_multithreaded(&job
);
2163 deferred_init_mem_pfn_range_in_zone(&i
, zone
, &spfn
, &epfn
,
2167 /* Sanity check that the next zone really is unpopulated */
2168 WARN_ON(++zid
< MAX_NR_ZONES
&& populated_zone(++zone
));
2170 pr_info("node %d deferred pages initialised in %ums\n",
2171 pgdat
->node_id
, jiffies_to_msecs(jiffies
- start
));
2173 pgdat_init_report_one_done();
2178 * If this zone has deferred pages, try to grow it by initializing enough
2179 * deferred pages to satisfy the allocation specified by order, rounded up to
2180 * the nearest PAGES_PER_SECTION boundary. So we're adding memory in increments
2181 * of SECTION_SIZE bytes by initializing struct pages in increments of
2182 * PAGES_PER_SECTION * sizeof(struct page) bytes.
2184 * Return true when zone was grown, otherwise return false. We return true even
2185 * when we grow less than requested, to let the caller decide if there are
2186 * enough pages to satisfy the allocation.
2188 * Note: We use noinline because this function is needed only during boot, and
2189 * it is called from a __ref function _deferred_grow_zone. This way we are
2190 * making sure that it is not inlined into permanent text section.
2192 static noinline
bool __init
2193 deferred_grow_zone(struct zone
*zone
, unsigned int order
)
2195 unsigned long nr_pages_needed
= ALIGN(1 << order
, PAGES_PER_SECTION
);
2196 pg_data_t
*pgdat
= zone
->zone_pgdat
;
2197 unsigned long first_deferred_pfn
= pgdat
->first_deferred_pfn
;
2198 unsigned long spfn
, epfn
, flags
;
2199 unsigned long nr_pages
= 0;
2202 /* Only the last zone may have deferred pages */
2203 if (zone_end_pfn(zone
) != pgdat_end_pfn(pgdat
))
2206 pgdat_resize_lock(pgdat
, &flags
);
2209 * If someone grew this zone while we were waiting for spinlock, return
2210 * true, as there might be enough pages already.
2212 if (first_deferred_pfn
!= pgdat
->first_deferred_pfn
) {
2213 pgdat_resize_unlock(pgdat
, &flags
);
2217 /* If the zone is empty somebody else may have cleared out the zone */
2218 if (!deferred_init_mem_pfn_range_in_zone(&i
, zone
, &spfn
, &epfn
,
2219 first_deferred_pfn
)) {
2220 pgdat
->first_deferred_pfn
= ULONG_MAX
;
2221 pgdat_resize_unlock(pgdat
, &flags
);
2222 /* Retry only once. */
2223 return first_deferred_pfn
!= ULONG_MAX
;
2227 * Initialize and free pages in MAX_ORDER sized increments so
2228 * that we can avoid introducing any issues with the buddy
2231 while (spfn
< epfn
) {
2232 /* update our first deferred PFN for this section */
2233 first_deferred_pfn
= spfn
;
2235 nr_pages
+= deferred_init_maxorder(&i
, zone
, &spfn
, &epfn
);
2236 touch_nmi_watchdog();
2238 /* We should only stop along section boundaries */
2239 if ((first_deferred_pfn
^ spfn
) < PAGES_PER_SECTION
)
2242 /* If our quota has been met we can stop here */
2243 if (nr_pages
>= nr_pages_needed
)
2247 pgdat
->first_deferred_pfn
= spfn
;
2248 pgdat_resize_unlock(pgdat
, &flags
);
2250 return nr_pages
> 0;
2254 * deferred_grow_zone() is __init, but it is called from
2255 * get_page_from_freelist() during early boot until deferred_pages permanently
2256 * disables this call. This is why we have refdata wrapper to avoid warning,
2257 * and to ensure that the function body gets unloaded.
2260 _deferred_grow_zone(struct zone
*zone
, unsigned int order
)
2262 return deferred_grow_zone(zone
, order
);
2265 #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
2267 void __init
page_alloc_init_late(void)
2272 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
2274 /* There will be num_node_state(N_MEMORY) threads */
2275 atomic_set(&pgdat_init_n_undone
, num_node_state(N_MEMORY
));
2276 for_each_node_state(nid
, N_MEMORY
) {
2277 kthread_run(deferred_init_memmap
, NODE_DATA(nid
), "pgdatinit%d", nid
);
2280 /* Block until all are initialised */
2281 wait_for_completion(&pgdat_init_all_done_comp
);
2284 * We initialized the rest of the deferred pages. Permanently disable
2285 * on-demand struct page initialization.
2287 static_branch_disable(&deferred_pages
);
2289 /* Reinit limits that are based on free pages after the kernel is up */
2290 files_maxfiles_init();
2295 /* Discard memblock private memory */
2298 for_each_node_state(nid
, N_MEMORY
)
2299 shuffle_free_memory(NODE_DATA(nid
));
2301 for_each_populated_zone(zone
)
2302 set_zone_contiguous(zone
);
2306 /* Free whole pageblock and set its migration type to MIGRATE_CMA. */
2307 void __init
init_cma_reserved_pageblock(struct page
*page
)
2309 unsigned i
= pageblock_nr_pages
;
2310 struct page
*p
= page
;
2313 __ClearPageReserved(p
);
2314 set_page_count(p
, 0);
2317 set_pageblock_migratetype(page
, MIGRATE_CMA
);
2318 set_page_refcounted(page
);
2319 __free_pages(page
, pageblock_order
);
2321 adjust_managed_page_count(page
, pageblock_nr_pages
);
2322 page_zone(page
)->cma_pages
+= pageblock_nr_pages
;
2327 * The order of subdivision here is critical for the IO subsystem.
2328 * Please do not alter this order without good reasons and regression
2329 * testing. Specifically, as large blocks of memory are subdivided,
2330 * the order in which smaller blocks are delivered depends on the order
2331 * they're subdivided in this function. This is the primary factor
2332 * influencing the order in which pages are delivered to the IO
2333 * subsystem according to empirical testing, and this is also justified
2334 * by considering the behavior of a buddy system containing a single
2335 * large block of memory acted on by a series of small allocations.
2336 * This behavior is a critical factor in sglist merging's success.
2340 static inline void expand(struct zone
*zone
, struct page
*page
,
2341 int low
, int high
, int migratetype
)
2343 unsigned long size
= 1 << high
;
2345 while (high
> low
) {
2348 VM_BUG_ON_PAGE(bad_range(zone
, &page
[size
]), &page
[size
]);
2351 * Mark as guard pages (or page), that will allow to
2352 * merge back to allocator when buddy will be freed.
2353 * Corresponding page table entries will not be touched,
2354 * pages will stay not present in virtual address space
2356 if (set_page_guard(zone
, &page
[size
], high
, migratetype
))
2359 add_to_free_list(&page
[size
], zone
, high
, migratetype
);
2360 set_buddy_order(&page
[size
], high
);
2364 static void check_new_page_bad(struct page
*page
)
2366 if (unlikely(page
->flags
& __PG_HWPOISON
)) {
2367 /* Don't complain about hwpoisoned pages */
2368 page_mapcount_reset(page
); /* remove PageBuddy */
2373 page_bad_reason(page
, PAGE_FLAGS_CHECK_AT_PREP
));
2377 * This page is about to be returned from the page allocator
2379 static inline int check_new_page(struct page
*page
)
2381 if (likely(page_expected_state(page
,
2382 PAGE_FLAGS_CHECK_AT_PREP
|__PG_HWPOISON
)))
2385 check_new_page_bad(page
);
2389 static bool check_new_pages(struct page
*page
, unsigned int order
)
2392 for (i
= 0; i
< (1 << order
); i
++) {
2393 struct page
*p
= page
+ i
;
2395 if (unlikely(check_new_page(p
)))
2402 #ifdef CONFIG_DEBUG_VM
2404 * With DEBUG_VM enabled, order-0 pages are checked for expected state when
2405 * being allocated from pcp lists. With debug_pagealloc also enabled, they are
2406 * also checked when pcp lists are refilled from the free lists.
2408 static inline bool check_pcp_refill(struct page
*page
, unsigned int order
)
2410 if (debug_pagealloc_enabled_static())
2411 return check_new_pages(page
, order
);
2416 static inline bool check_new_pcp(struct page
*page
, unsigned int order
)
2418 return check_new_pages(page
, order
);
2422 * With DEBUG_VM disabled, free order-0 pages are checked for expected state
2423 * when pcp lists are being refilled from the free lists. With debug_pagealloc
2424 * enabled, they are also checked when being allocated from the pcp lists.
2426 static inline bool check_pcp_refill(struct page
*page
, unsigned int order
)
2428 return check_new_pages(page
, order
);
2430 static inline bool check_new_pcp(struct page
*page
, unsigned int order
)
2432 if (debug_pagealloc_enabled_static())
2433 return check_new_pages(page
, order
);
2437 #endif /* CONFIG_DEBUG_VM */
2439 static inline bool should_skip_kasan_unpoison(gfp_t flags
)
2441 /* Don't skip if a software KASAN mode is enabled. */
2442 if (IS_ENABLED(CONFIG_KASAN_GENERIC
) ||
2443 IS_ENABLED(CONFIG_KASAN_SW_TAGS
))
2446 /* Skip, if hardware tag-based KASAN is not enabled. */
2447 if (!kasan_hw_tags_enabled())
2451 * With hardware tag-based KASAN enabled, skip if this has been
2452 * requested via __GFP_SKIP_KASAN_UNPOISON.
2454 return flags
& __GFP_SKIP_KASAN_UNPOISON
;
2457 static inline bool should_skip_init(gfp_t flags
)
2459 /* Don't skip, if hardware tag-based KASAN is not enabled. */
2460 if (!kasan_hw_tags_enabled())
2463 /* For hardware tag-based KASAN, skip if requested. */
2464 return (flags
& __GFP_SKIP_ZERO
);
2467 inline void post_alloc_hook(struct page
*page
, unsigned int order
,
2470 bool init
= !want_init_on_free() && want_init_on_alloc(gfp_flags
) &&
2471 !should_skip_init(gfp_flags
);
2472 bool init_tags
= init
&& (gfp_flags
& __GFP_ZEROTAGS
);
2475 set_page_private(page
, 0);
2476 set_page_refcounted(page
);
2478 arch_alloc_page(page
, order
);
2479 debug_pagealloc_map_pages(page
, 1 << order
);
2482 * Page unpoisoning must happen before memory initialization.
2483 * Otherwise, the poison pattern will be overwritten for __GFP_ZERO
2484 * allocations and the page unpoisoning code will complain.
2486 kernel_unpoison_pages(page
, 1 << order
);
2489 * As memory initialization might be integrated into KASAN,
2490 * KASAN unpoisoning and memory initializion code must be
2491 * kept together to avoid discrepancies in behavior.
2495 * If memory tags should be zeroed (which happens only when memory
2496 * should be initialized as well).
2499 /* Initialize both memory and tags. */
2500 for (i
= 0; i
!= 1 << order
; ++i
)
2501 tag_clear_highpage(page
+ i
);
2503 /* Note that memory is already initialized by the loop above. */
2506 if (!should_skip_kasan_unpoison(gfp_flags
)) {
2507 /* Unpoison shadow memory or set memory tags. */
2508 kasan_unpoison_pages(page
, order
, init
);
2510 /* Note that memory is already initialized by KASAN. */
2511 if (kasan_has_integrated_init())
2514 /* Ensure page_address() dereferencing does not fault. */
2515 for (i
= 0; i
!= 1 << order
; ++i
)
2516 page_kasan_tag_reset(page
+ i
);
2518 /* If memory is still not initialized, do it now. */
2520 kernel_init_pages(page
, 1 << order
);
2521 /* Propagate __GFP_SKIP_KASAN_POISON to page flags. */
2522 if (kasan_hw_tags_enabled() && (gfp_flags
& __GFP_SKIP_KASAN_POISON
))
2523 SetPageSkipKASanPoison(page
);
2525 set_page_owner(page
, order
, gfp_flags
);
2526 page_table_check_alloc(page
, order
);
2529 static void prep_new_page(struct page
*page
, unsigned int order
, gfp_t gfp_flags
,
2530 unsigned int alloc_flags
)
2532 post_alloc_hook(page
, order
, gfp_flags
);
2534 if (order
&& (gfp_flags
& __GFP_COMP
))
2535 prep_compound_page(page
, order
);
2538 * page is set pfmemalloc when ALLOC_NO_WATERMARKS was necessary to
2539 * allocate the page. The expectation is that the caller is taking
2540 * steps that will free more memory. The caller should avoid the page
2541 * being used for !PFMEMALLOC purposes.
2543 if (alloc_flags
& ALLOC_NO_WATERMARKS
)
2544 set_page_pfmemalloc(page
);
2546 clear_page_pfmemalloc(page
);
2550 * Go through the free lists for the given migratetype and remove
2551 * the smallest available page from the freelists
2553 static __always_inline
2554 struct page
*__rmqueue_smallest(struct zone
*zone
, unsigned int order
,
2557 unsigned int current_order
;
2558 struct free_area
*area
;
2561 /* Find a page of the appropriate size in the preferred list */
2562 for (current_order
= order
; current_order
< MAX_ORDER
; ++current_order
) {
2563 area
= &(zone
->free_area
[current_order
]);
2564 page
= get_page_from_free_area(area
, migratetype
);
2567 del_page_from_free_list(page
, zone
, current_order
);
2568 expand(zone
, page
, order
, current_order
, migratetype
);
2569 set_pcppage_migratetype(page
, migratetype
);
2570 trace_mm_page_alloc_zone_locked(page
, order
, migratetype
,
2571 pcp_allowed_order(order
) &&
2572 migratetype
< MIGRATE_PCPTYPES
);
2581 * This array describes the order lists are fallen back to when
2582 * the free lists for the desirable migrate type are depleted
2584 * The other migratetypes do not have fallbacks.
2586 static int fallbacks
[MIGRATE_TYPES
][3] = {
2587 [MIGRATE_UNMOVABLE
] = { MIGRATE_RECLAIMABLE
, MIGRATE_MOVABLE
, MIGRATE_TYPES
},
2588 [MIGRATE_MOVABLE
] = { MIGRATE_RECLAIMABLE
, MIGRATE_UNMOVABLE
, MIGRATE_TYPES
},
2589 [MIGRATE_RECLAIMABLE
] = { MIGRATE_UNMOVABLE
, MIGRATE_MOVABLE
, MIGRATE_TYPES
},
2593 static __always_inline
struct page
*__rmqueue_cma_fallback(struct zone
*zone
,
2596 return __rmqueue_smallest(zone
, order
, MIGRATE_CMA
);
2599 static inline struct page
*__rmqueue_cma_fallback(struct zone
*zone
,
2600 unsigned int order
) { return NULL
; }
2604 * Move the free pages in a range to the freelist tail of the requested type.
2605 * Note that start_page and end_pages are not aligned on a pageblock
2606 * boundary. If alignment is required, use move_freepages_block()
2608 static int move_freepages(struct zone
*zone
,
2609 unsigned long start_pfn
, unsigned long end_pfn
,
2610 int migratetype
, int *num_movable
)
2615 int pages_moved
= 0;
2617 for (pfn
= start_pfn
; pfn
<= end_pfn
;) {
2618 page
= pfn_to_page(pfn
);
2619 if (!PageBuddy(page
)) {
2621 * We assume that pages that could be isolated for
2622 * migration are movable. But we don't actually try
2623 * isolating, as that would be expensive.
2626 (PageLRU(page
) || __PageMovable(page
)))
2632 /* Make sure we are not inadvertently changing nodes */
2633 VM_BUG_ON_PAGE(page_to_nid(page
) != zone_to_nid(zone
), page
);
2634 VM_BUG_ON_PAGE(page_zone(page
) != zone
, page
);
2636 order
= buddy_order(page
);
2637 move_to_free_list(page
, zone
, order
, migratetype
);
2639 pages_moved
+= 1 << order
;
2645 int move_freepages_block(struct zone
*zone
, struct page
*page
,
2646 int migratetype
, int *num_movable
)
2648 unsigned long start_pfn
, end_pfn
, pfn
;
2653 pfn
= page_to_pfn(page
);
2654 start_pfn
= pfn
& ~(pageblock_nr_pages
- 1);
2655 end_pfn
= start_pfn
+ pageblock_nr_pages
- 1;
2657 /* Do not cross zone boundaries */
2658 if (!zone_spans_pfn(zone
, start_pfn
))
2660 if (!zone_spans_pfn(zone
, end_pfn
))
2663 return move_freepages(zone
, start_pfn
, end_pfn
, migratetype
,
2667 static void change_pageblock_range(struct page
*pageblock_page
,
2668 int start_order
, int migratetype
)
2670 int nr_pageblocks
= 1 << (start_order
- pageblock_order
);
2672 while (nr_pageblocks
--) {
2673 set_pageblock_migratetype(pageblock_page
, migratetype
);
2674 pageblock_page
+= pageblock_nr_pages
;
2679 * When we are falling back to another migratetype during allocation, try to
2680 * steal extra free pages from the same pageblocks to satisfy further
2681 * allocations, instead of polluting multiple pageblocks.
2683 * If we are stealing a relatively large buddy page, it is likely there will
2684 * be more free pages in the pageblock, so try to steal them all. For
2685 * reclaimable and unmovable allocations, we steal regardless of page size,
2686 * as fragmentation caused by those allocations polluting movable pageblocks
2687 * is worse than movable allocations stealing from unmovable and reclaimable
2690 static bool can_steal_fallback(unsigned int order
, int start_mt
)
2693 * Leaving this order check is intended, although there is
2694 * relaxed order check in next check. The reason is that
2695 * we can actually steal whole pageblock if this condition met,
2696 * but, below check doesn't guarantee it and that is just heuristic
2697 * so could be changed anytime.
2699 if (order
>= pageblock_order
)
2702 if (order
>= pageblock_order
/ 2 ||
2703 start_mt
== MIGRATE_RECLAIMABLE
||
2704 start_mt
== MIGRATE_UNMOVABLE
||
2705 page_group_by_mobility_disabled
)
2711 static inline bool boost_watermark(struct zone
*zone
)
2713 unsigned long max_boost
;
2715 if (!watermark_boost_factor
)
2718 * Don't bother in zones that are unlikely to produce results.
2719 * On small machines, including kdump capture kernels running
2720 * in a small area, boosting the watermark can cause an out of
2721 * memory situation immediately.
2723 if ((pageblock_nr_pages
* 4) > zone_managed_pages(zone
))
2726 max_boost
= mult_frac(zone
->_watermark
[WMARK_HIGH
],
2727 watermark_boost_factor
, 10000);
2730 * high watermark may be uninitialised if fragmentation occurs
2731 * very early in boot so do not boost. We do not fall
2732 * through and boost by pageblock_nr_pages as failing
2733 * allocations that early means that reclaim is not going
2734 * to help and it may even be impossible to reclaim the
2735 * boosted watermark resulting in a hang.
2740 max_boost
= max(pageblock_nr_pages
, max_boost
);
2742 zone
->watermark_boost
= min(zone
->watermark_boost
+ pageblock_nr_pages
,
2749 * This function implements actual steal behaviour. If order is large enough,
2750 * we can steal whole pageblock. If not, we first move freepages in this
2751 * pageblock to our migratetype and determine how many already-allocated pages
2752 * are there in the pageblock with a compatible migratetype. If at least half
2753 * of pages are free or compatible, we can change migratetype of the pageblock
2754 * itself, so pages freed in the future will be put on the correct free list.
2756 static void steal_suitable_fallback(struct zone
*zone
, struct page
*page
,
2757 unsigned int alloc_flags
, int start_type
, bool whole_block
)
2759 unsigned int current_order
= buddy_order(page
);
2760 int free_pages
, movable_pages
, alike_pages
;
2763 old_block_type
= get_pageblock_migratetype(page
);
2766 * This can happen due to races and we want to prevent broken
2767 * highatomic accounting.
2769 if (is_migrate_highatomic(old_block_type
))
2772 /* Take ownership for orders >= pageblock_order */
2773 if (current_order
>= pageblock_order
) {
2774 change_pageblock_range(page
, current_order
, start_type
);
2779 * Boost watermarks to increase reclaim pressure to reduce the
2780 * likelihood of future fallbacks. Wake kswapd now as the node
2781 * may be balanced overall and kswapd will not wake naturally.
2783 if (boost_watermark(zone
) && (alloc_flags
& ALLOC_KSWAPD
))
2784 set_bit(ZONE_BOOSTED_WATERMARK
, &zone
->flags
);
2786 /* We are not allowed to try stealing from the whole block */
2790 free_pages
= move_freepages_block(zone
, page
, start_type
,
2793 * Determine how many pages are compatible with our allocation.
2794 * For movable allocation, it's the number of movable pages which
2795 * we just obtained. For other types it's a bit more tricky.
2797 if (start_type
== MIGRATE_MOVABLE
) {
2798 alike_pages
= movable_pages
;
2801 * If we are falling back a RECLAIMABLE or UNMOVABLE allocation
2802 * to MOVABLE pageblock, consider all non-movable pages as
2803 * compatible. If it's UNMOVABLE falling back to RECLAIMABLE or
2804 * vice versa, be conservative since we can't distinguish the
2805 * exact migratetype of non-movable pages.
2807 if (old_block_type
== MIGRATE_MOVABLE
)
2808 alike_pages
= pageblock_nr_pages
2809 - (free_pages
+ movable_pages
);
2814 /* moving whole block can fail due to zone boundary conditions */
2819 * If a sufficient number of pages in the block are either free or of
2820 * comparable migratability as our allocation, claim the whole block.
2822 if (free_pages
+ alike_pages
>= (1 << (pageblock_order
-1)) ||
2823 page_group_by_mobility_disabled
)
2824 set_pageblock_migratetype(page
, start_type
);
2829 move_to_free_list(page
, zone
, current_order
, start_type
);
2833 * Check whether there is a suitable fallback freepage with requested order.
2834 * If only_stealable is true, this function returns fallback_mt only if
2835 * we can steal other freepages all together. This would help to reduce
2836 * fragmentation due to mixed migratetype pages in one pageblock.
2838 int find_suitable_fallback(struct free_area
*area
, unsigned int order
,
2839 int migratetype
, bool only_stealable
, bool *can_steal
)
2844 if (area
->nr_free
== 0)
2849 fallback_mt
= fallbacks
[migratetype
][i
];
2850 if (fallback_mt
== MIGRATE_TYPES
)
2853 if (free_area_empty(area
, fallback_mt
))
2856 if (can_steal_fallback(order
, migratetype
))
2859 if (!only_stealable
)
2870 * Reserve a pageblock for exclusive use of high-order atomic allocations if
2871 * there are no empty page blocks that contain a page with a suitable order
2873 static void reserve_highatomic_pageblock(struct page
*page
, struct zone
*zone
,
2874 unsigned int alloc_order
)
2877 unsigned long max_managed
, flags
;
2880 * Limit the number reserved to 1 pageblock or roughly 1% of a zone.
2881 * Check is race-prone but harmless.
2883 max_managed
= (zone_managed_pages(zone
) / 100) + pageblock_nr_pages
;
2884 if (zone
->nr_reserved_highatomic
>= max_managed
)
2887 spin_lock_irqsave(&zone
->lock
, flags
);
2889 /* Recheck the nr_reserved_highatomic limit under the lock */
2890 if (zone
->nr_reserved_highatomic
>= max_managed
)
2894 mt
= get_pageblock_migratetype(page
);
2895 /* Only reserve normal pageblocks (i.e., they can merge with others) */
2896 if (migratetype_is_mergeable(mt
)) {
2897 zone
->nr_reserved_highatomic
+= pageblock_nr_pages
;
2898 set_pageblock_migratetype(page
, MIGRATE_HIGHATOMIC
);
2899 move_freepages_block(zone
, page
, MIGRATE_HIGHATOMIC
, NULL
);
2903 spin_unlock_irqrestore(&zone
->lock
, flags
);
2907 * Used when an allocation is about to fail under memory pressure. This
2908 * potentially hurts the reliability of high-order allocations when under
2909 * intense memory pressure but failed atomic allocations should be easier
2910 * to recover from than an OOM.
2912 * If @force is true, try to unreserve a pageblock even though highatomic
2913 * pageblock is exhausted.
2915 static bool unreserve_highatomic_pageblock(const struct alloc_context
*ac
,
2918 struct zonelist
*zonelist
= ac
->zonelist
;
2919 unsigned long flags
;
2926 for_each_zone_zonelist_nodemask(zone
, z
, zonelist
, ac
->highest_zoneidx
,
2929 * Preserve at least one pageblock unless memory pressure
2932 if (!force
&& zone
->nr_reserved_highatomic
<=
2936 spin_lock_irqsave(&zone
->lock
, flags
);
2937 for (order
= 0; order
< MAX_ORDER
; order
++) {
2938 struct free_area
*area
= &(zone
->free_area
[order
]);
2940 page
= get_page_from_free_area(area
, MIGRATE_HIGHATOMIC
);
2945 * In page freeing path, migratetype change is racy so
2946 * we can counter several free pages in a pageblock
2947 * in this loop although we changed the pageblock type
2948 * from highatomic to ac->migratetype. So we should
2949 * adjust the count once.
2951 if (is_migrate_highatomic_page(page
)) {
2953 * It should never happen but changes to
2954 * locking could inadvertently allow a per-cpu
2955 * drain to add pages to MIGRATE_HIGHATOMIC
2956 * while unreserving so be safe and watch for
2959 zone
->nr_reserved_highatomic
-= min(
2961 zone
->nr_reserved_highatomic
);
2965 * Convert to ac->migratetype and avoid the normal
2966 * pageblock stealing heuristics. Minimally, the caller
2967 * is doing the work and needs the pages. More
2968 * importantly, if the block was always converted to
2969 * MIGRATE_UNMOVABLE or another type then the number
2970 * of pageblocks that cannot be completely freed
2973 set_pageblock_migratetype(page
, ac
->migratetype
);
2974 ret
= move_freepages_block(zone
, page
, ac
->migratetype
,
2977 spin_unlock_irqrestore(&zone
->lock
, flags
);
2981 spin_unlock_irqrestore(&zone
->lock
, flags
);
2988 * Try finding a free buddy page on the fallback list and put it on the free
2989 * list of requested migratetype, possibly along with other pages from the same
2990 * block, depending on fragmentation avoidance heuristics. Returns true if
2991 * fallback was found so that __rmqueue_smallest() can grab it.
2993 * The use of signed ints for order and current_order is a deliberate
2994 * deviation from the rest of this file, to make the for loop
2995 * condition simpler.
2997 static __always_inline
bool
2998 __rmqueue_fallback(struct zone
*zone
, int order
, int start_migratetype
,
2999 unsigned int alloc_flags
)
3001 struct free_area
*area
;
3003 int min_order
= order
;
3009 * Do not steal pages from freelists belonging to other pageblocks
3010 * i.e. orders < pageblock_order. If there are no local zones free,
3011 * the zonelists will be reiterated without ALLOC_NOFRAGMENT.
3013 if (alloc_flags
& ALLOC_NOFRAGMENT
)
3014 min_order
= pageblock_order
;
3017 * Find the largest available free page in the other list. This roughly
3018 * approximates finding the pageblock with the most free pages, which
3019 * would be too costly to do exactly.
3021 for (current_order
= MAX_ORDER
- 1; current_order
>= min_order
;
3023 area
= &(zone
->free_area
[current_order
]);
3024 fallback_mt
= find_suitable_fallback(area
, current_order
,
3025 start_migratetype
, false, &can_steal
);
3026 if (fallback_mt
== -1)
3030 * We cannot steal all free pages from the pageblock and the
3031 * requested migratetype is movable. In that case it's better to
3032 * steal and split the smallest available page instead of the
3033 * largest available page, because even if the next movable
3034 * allocation falls back into a different pageblock than this
3035 * one, it won't cause permanent fragmentation.
3037 if (!can_steal
&& start_migratetype
== MIGRATE_MOVABLE
3038 && current_order
> order
)
3047 for (current_order
= order
; current_order
< MAX_ORDER
;
3049 area
= &(zone
->free_area
[current_order
]);
3050 fallback_mt
= find_suitable_fallback(area
, current_order
,
3051 start_migratetype
, false, &can_steal
);
3052 if (fallback_mt
!= -1)
3057 * This should not happen - we already found a suitable fallback
3058 * when looking for the largest page.
3060 VM_BUG_ON(current_order
== MAX_ORDER
);
3063 page
= get_page_from_free_area(area
, fallback_mt
);
3065 steal_suitable_fallback(zone
, page
, alloc_flags
, start_migratetype
,
3068 trace_mm_page_alloc_extfrag(page
, order
, current_order
,
3069 start_migratetype
, fallback_mt
);
3076 * Do the hard work of removing an element from the buddy allocator.
3077 * Call me with the zone->lock already held.
3079 static __always_inline
struct page
*
3080 __rmqueue(struct zone
*zone
, unsigned int order
, int migratetype
,
3081 unsigned int alloc_flags
)
3085 if (IS_ENABLED(CONFIG_CMA
)) {
3087 * Balance movable allocations between regular and CMA areas by
3088 * allocating from CMA when over half of the zone's free memory
3089 * is in the CMA area.
3091 if (alloc_flags
& ALLOC_CMA
&&
3092 zone_page_state(zone
, NR_FREE_CMA_PAGES
) >
3093 zone_page_state(zone
, NR_FREE_PAGES
) / 2) {
3094 page
= __rmqueue_cma_fallback(zone
, order
);
3100 page
= __rmqueue_smallest(zone
, order
, migratetype
);
3101 if (unlikely(!page
)) {
3102 if (alloc_flags
& ALLOC_CMA
)
3103 page
= __rmqueue_cma_fallback(zone
, order
);
3105 if (!page
&& __rmqueue_fallback(zone
, order
, migratetype
,
3113 * Obtain a specified number of elements from the buddy allocator, all under
3114 * a single hold of the lock, for efficiency. Add them to the supplied list.
3115 * Returns the number of new pages which were placed at *list.
3117 static int rmqueue_bulk(struct zone
*zone
, unsigned int order
,
3118 unsigned long count
, struct list_head
*list
,
3119 int migratetype
, unsigned int alloc_flags
)
3121 int i
, allocated
= 0;
3123 /* Caller must hold IRQ-safe pcp->lock so IRQs are disabled. */
3124 spin_lock(&zone
->lock
);
3125 for (i
= 0; i
< count
; ++i
) {
3126 struct page
*page
= __rmqueue(zone
, order
, migratetype
,
3128 if (unlikely(page
== NULL
))
3131 if (unlikely(check_pcp_refill(page
, order
)))
3135 * Split buddy pages returned by expand() are received here in
3136 * physical page order. The page is added to the tail of
3137 * caller's list. From the callers perspective, the linked list
3138 * is ordered by page number under some conditions. This is
3139 * useful for IO devices that can forward direction from the
3140 * head, thus also in the physical page order. This is useful
3141 * for IO devices that can merge IO requests if the physical
3142 * pages are ordered properly.
3144 list_add_tail(&page
->pcp_list
, list
);
3146 if (is_migrate_cma(get_pcppage_migratetype(page
)))
3147 __mod_zone_page_state(zone
, NR_FREE_CMA_PAGES
,
3152 * i pages were removed from the buddy list even if some leak due
3153 * to check_pcp_refill failing so adjust NR_FREE_PAGES based
3154 * on i. Do not confuse with 'allocated' which is the number of
3155 * pages added to the pcp list.
3157 __mod_zone_page_state(zone
, NR_FREE_PAGES
, -(i
<< order
));
3158 spin_unlock(&zone
->lock
);
3164 * Called from the vmstat counter updater to drain pagesets of this
3165 * currently executing processor on remote nodes after they have
3168 void drain_zone_pages(struct zone
*zone
, struct per_cpu_pages
*pcp
)
3170 int to_drain
, batch
;
3172 batch
= READ_ONCE(pcp
->batch
);
3173 to_drain
= min(pcp
->count
, batch
);
3175 unsigned long flags
;
3178 * free_pcppages_bulk expects IRQs disabled for zone->lock
3179 * so even though pcp->lock is not intended to be IRQ-safe,
3180 * it's needed in this context.
3182 spin_lock_irqsave(&pcp
->lock
, flags
);
3183 free_pcppages_bulk(zone
, to_drain
, pcp
, 0);
3184 spin_unlock_irqrestore(&pcp
->lock
, flags
);
3190 * Drain pcplists of the indicated processor and zone.
3192 static void drain_pages_zone(unsigned int cpu
, struct zone
*zone
)
3194 struct per_cpu_pages
*pcp
;
3196 pcp
= per_cpu_ptr(zone
->per_cpu_pageset
, cpu
);
3198 unsigned long flags
;
3200 /* See drain_zone_pages on why this is disabling IRQs */
3201 spin_lock_irqsave(&pcp
->lock
, flags
);
3202 free_pcppages_bulk(zone
, pcp
->count
, pcp
, 0);
3203 spin_unlock_irqrestore(&pcp
->lock
, flags
);
3208 * Drain pcplists of all zones on the indicated processor.
3210 static void drain_pages(unsigned int cpu
)
3214 for_each_populated_zone(zone
) {
3215 drain_pages_zone(cpu
, zone
);
3220 * Spill all of this CPU's per-cpu pages back into the buddy allocator.
3222 void drain_local_pages(struct zone
*zone
)
3224 int cpu
= smp_processor_id();
3227 drain_pages_zone(cpu
, zone
);
3233 * The implementation of drain_all_pages(), exposing an extra parameter to
3234 * drain on all cpus.
3236 * drain_all_pages() is optimized to only execute on cpus where pcplists are
3237 * not empty. The check for non-emptiness can however race with a free to
3238 * pcplist that has not yet increased the pcp->count from 0 to 1. Callers
3239 * that need the guarantee that every CPU has drained can disable the
3240 * optimizing racy check.
3242 static void __drain_all_pages(struct zone
*zone
, bool force_all_cpus
)
3247 * Allocate in the BSS so we won't require allocation in
3248 * direct reclaim path for CONFIG_CPUMASK_OFFSTACK=y
3250 static cpumask_t cpus_with_pcps
;
3253 * Do not drain if one is already in progress unless it's specific to
3254 * a zone. Such callers are primarily CMA and memory hotplug and need
3255 * the drain to be complete when the call returns.
3257 if (unlikely(!mutex_trylock(&pcpu_drain_mutex
))) {
3260 mutex_lock(&pcpu_drain_mutex
);
3264 * We don't care about racing with CPU hotplug event
3265 * as offline notification will cause the notified
3266 * cpu to drain that CPU pcps and on_each_cpu_mask
3267 * disables preemption as part of its processing
3269 for_each_online_cpu(cpu
) {
3270 struct per_cpu_pages
*pcp
;
3272 bool has_pcps
= false;
3274 if (force_all_cpus
) {
3276 * The pcp.count check is racy, some callers need a
3277 * guarantee that no cpu is missed.
3281 pcp
= per_cpu_ptr(zone
->per_cpu_pageset
, cpu
);
3285 for_each_populated_zone(z
) {
3286 pcp
= per_cpu_ptr(z
->per_cpu_pageset
, cpu
);
3295 cpumask_set_cpu(cpu
, &cpus_with_pcps
);
3297 cpumask_clear_cpu(cpu
, &cpus_with_pcps
);
3300 for_each_cpu(cpu
, &cpus_with_pcps
) {
3302 drain_pages_zone(cpu
, zone
);
3307 mutex_unlock(&pcpu_drain_mutex
);
3311 * Spill all the per-cpu pages from all CPUs back into the buddy allocator.
3313 * When zone parameter is non-NULL, spill just the single zone's pages.
3315 void drain_all_pages(struct zone
*zone
)
3317 __drain_all_pages(zone
, false);
3320 #ifdef CONFIG_HIBERNATION
3323 * Touch the watchdog for every WD_PAGE_COUNT pages.
3325 #define WD_PAGE_COUNT (128*1024)
3327 void mark_free_pages(struct zone
*zone
)
3329 unsigned long pfn
, max_zone_pfn
, page_count
= WD_PAGE_COUNT
;
3330 unsigned long flags
;
3331 unsigned int order
, t
;
3334 if (zone_is_empty(zone
))
3337 spin_lock_irqsave(&zone
->lock
, flags
);
3339 max_zone_pfn
= zone_end_pfn(zone
);
3340 for (pfn
= zone
->zone_start_pfn
; pfn
< max_zone_pfn
; pfn
++)
3341 if (pfn_valid(pfn
)) {
3342 page
= pfn_to_page(pfn
);
3344 if (!--page_count
) {
3345 touch_nmi_watchdog();
3346 page_count
= WD_PAGE_COUNT
;
3349 if (page_zone(page
) != zone
)
3352 if (!swsusp_page_is_forbidden(page
))
3353 swsusp_unset_page_free(page
);
3356 for_each_migratetype_order(order
, t
) {
3357 list_for_each_entry(page
,
3358 &zone
->free_area
[order
].free_list
[t
], buddy_list
) {
3361 pfn
= page_to_pfn(page
);
3362 for (i
= 0; i
< (1UL << order
); i
++) {
3363 if (!--page_count
) {
3364 touch_nmi_watchdog();
3365 page_count
= WD_PAGE_COUNT
;
3367 swsusp_set_page_free(pfn_to_page(pfn
+ i
));
3371 spin_unlock_irqrestore(&zone
->lock
, flags
);
3373 #endif /* CONFIG_PM */
3375 static bool free_unref_page_prepare(struct page
*page
, unsigned long pfn
,
3380 if (!free_pcp_prepare(page
, order
))
3383 migratetype
= get_pfnblock_migratetype(page
, pfn
);
3384 set_pcppage_migratetype(page
, migratetype
);
3388 static int nr_pcp_free(struct per_cpu_pages
*pcp
, int high
, int batch
,
3391 int min_nr_free
, max_nr_free
;
3393 /* Free everything if batch freeing high-order pages. */
3394 if (unlikely(free_high
))
3397 /* Check for PCP disabled or boot pageset */
3398 if (unlikely(high
< batch
))
3401 /* Leave at least pcp->batch pages on the list */
3402 min_nr_free
= batch
;
3403 max_nr_free
= high
- batch
;
3406 * Double the number of pages freed each time there is subsequent
3407 * freeing of pages without any allocation.
3409 batch
<<= pcp
->free_factor
;
3410 if (batch
< max_nr_free
)
3412 batch
= clamp(batch
, min_nr_free
, max_nr_free
);
3417 static int nr_pcp_high(struct per_cpu_pages
*pcp
, struct zone
*zone
,
3420 int high
= READ_ONCE(pcp
->high
);
3422 if (unlikely(!high
|| free_high
))
3425 if (!test_bit(ZONE_RECLAIM_ACTIVE
, &zone
->flags
))
3429 * If reclaim is active, limit the number of pages that can be
3430 * stored on pcp lists
3432 return min(READ_ONCE(pcp
->batch
) << 2, high
);
3435 static void free_unref_page_commit(struct zone
*zone
, struct per_cpu_pages
*pcp
,
3436 struct page
*page
, int migratetype
,
3443 __count_vm_event(PGFREE
);
3444 pindex
= order_to_pindex(migratetype
, order
);
3445 list_add(&page
->pcp_list
, &pcp
->lists
[pindex
]);
3446 pcp
->count
+= 1 << order
;
3449 * As high-order pages other than THP's stored on PCP can contribute
3450 * to fragmentation, limit the number stored when PCP is heavily
3451 * freeing without allocation. The remainder after bulk freeing
3452 * stops will be drained from vmstat refresh context.
3454 free_high
= (pcp
->free_factor
&& order
&& order
<= PAGE_ALLOC_COSTLY_ORDER
);
3456 high
= nr_pcp_high(pcp
, zone
, free_high
);
3457 if (pcp
->count
>= high
) {
3458 int batch
= READ_ONCE(pcp
->batch
);
3460 free_pcppages_bulk(zone
, nr_pcp_free(pcp
, high
, batch
, free_high
), pcp
, pindex
);
3467 void free_unref_page(struct page
*page
, unsigned int order
)
3469 unsigned long flags
;
3470 unsigned long __maybe_unused UP_flags
;
3471 struct per_cpu_pages
*pcp
;
3473 unsigned long pfn
= page_to_pfn(page
);
3476 if (!free_unref_page_prepare(page
, pfn
, order
))
3480 * We only track unmovable, reclaimable and movable on pcp lists.
3481 * Place ISOLATE pages on the isolated list because they are being
3482 * offlined but treat HIGHATOMIC as movable pages so we can get those
3483 * areas back if necessary. Otherwise, we may have to free
3484 * excessively into the page allocator
3486 migratetype
= get_pcppage_migratetype(page
);
3487 if (unlikely(migratetype
>= MIGRATE_PCPTYPES
)) {
3488 if (unlikely(is_migrate_isolate(migratetype
))) {
3489 free_one_page(page_zone(page
), page
, pfn
, order
, migratetype
, FPI_NONE
);
3492 migratetype
= MIGRATE_MOVABLE
;
3495 zone
= page_zone(page
);
3496 pcp_trylock_prepare(UP_flags
);
3497 pcp
= pcp_spin_trylock_irqsave(zone
->per_cpu_pageset
, flags
);
3499 free_unref_page_commit(zone
, pcp
, page
, migratetype
, order
);
3500 pcp_spin_unlock_irqrestore(pcp
, flags
);
3502 free_one_page(zone
, page
, pfn
, order
, migratetype
, FPI_NONE
);
3504 pcp_trylock_finish(UP_flags
);
3508 * Free a list of 0-order pages
3510 void free_unref_page_list(struct list_head
*list
)
3512 struct page
*page
, *next
;
3513 struct per_cpu_pages
*pcp
= NULL
;
3514 struct zone
*locked_zone
= NULL
;
3515 unsigned long flags
;
3516 int batch_count
= 0;
3519 /* Prepare pages for freeing */
3520 list_for_each_entry_safe(page
, next
, list
, lru
) {
3521 unsigned long pfn
= page_to_pfn(page
);
3522 if (!free_unref_page_prepare(page
, pfn
, 0)) {
3523 list_del(&page
->lru
);
3528 * Free isolated pages directly to the allocator, see
3529 * comment in free_unref_page.
3531 migratetype
= get_pcppage_migratetype(page
);
3532 if (unlikely(is_migrate_isolate(migratetype
))) {
3533 list_del(&page
->lru
);
3534 free_one_page(page_zone(page
), page
, pfn
, 0, migratetype
, FPI_NONE
);
3539 list_for_each_entry_safe(page
, next
, list
, lru
) {
3540 struct zone
*zone
= page_zone(page
);
3542 /* Different zone, different pcp lock. */
3543 if (zone
!= locked_zone
) {
3545 pcp_spin_unlock_irqrestore(pcp
, flags
);
3548 pcp
= pcp_spin_lock_irqsave(locked_zone
->per_cpu_pageset
, flags
);
3552 * Non-isolated types over MIGRATE_PCPTYPES get added
3553 * to the MIGRATE_MOVABLE pcp list.
3555 migratetype
= get_pcppage_migratetype(page
);
3556 if (unlikely(migratetype
>= MIGRATE_PCPTYPES
))
3557 migratetype
= MIGRATE_MOVABLE
;
3559 trace_mm_page_free_batched(page
);
3560 free_unref_page_commit(zone
, pcp
, page
, migratetype
, 0);
3563 * Guard against excessive IRQ disabled times when we get
3564 * a large list of pages to free.
3566 if (++batch_count
== SWAP_CLUSTER_MAX
) {
3567 pcp_spin_unlock_irqrestore(pcp
, flags
);
3569 pcp
= pcp_spin_lock_irqsave(locked_zone
->per_cpu_pageset
, flags
);
3574 pcp_spin_unlock_irqrestore(pcp
, flags
);
3578 * split_page takes a non-compound higher-order page, and splits it into
3579 * n (1<<order) sub-pages: page[0..n]
3580 * Each sub-page must be freed individually.
3582 * Note: this is probably too low level an operation for use in drivers.
3583 * Please consult with lkml before using this in your driver.
3585 void split_page(struct page
*page
, unsigned int order
)
3589 VM_BUG_ON_PAGE(PageCompound(page
), page
);
3590 VM_BUG_ON_PAGE(!page_count(page
), page
);
3592 for (i
= 1; i
< (1 << order
); i
++)
3593 set_page_refcounted(page
+ i
);
3594 split_page_owner(page
, 1 << order
);
3595 split_page_memcg(page
, 1 << order
);
3597 EXPORT_SYMBOL_GPL(split_page
);
3599 int __isolate_free_page(struct page
*page
, unsigned int order
)
3601 unsigned long watermark
;
3605 BUG_ON(!PageBuddy(page
));
3607 zone
= page_zone(page
);
3608 mt
= get_pageblock_migratetype(page
);
3610 if (!is_migrate_isolate(mt
)) {
3612 * Obey watermarks as if the page was being allocated. We can
3613 * emulate a high-order watermark check with a raised order-0
3614 * watermark, because we already know our high-order page
3617 watermark
= zone
->_watermark
[WMARK_MIN
] + (1UL << order
);
3618 if (!zone_watermark_ok(zone
, 0, watermark
, 0, ALLOC_CMA
))
3621 __mod_zone_freepage_state(zone
, -(1UL << order
), mt
);
3624 /* Remove page from free list */
3626 del_page_from_free_list(page
, zone
, order
);
3629 * Set the pageblock if the isolated page is at least half of a
3632 if (order
>= pageblock_order
- 1) {
3633 struct page
*endpage
= page
+ (1 << order
) - 1;
3634 for (; page
< endpage
; page
+= pageblock_nr_pages
) {
3635 int mt
= get_pageblock_migratetype(page
);
3637 * Only change normal pageblocks (i.e., they can merge
3640 if (migratetype_is_mergeable(mt
))
3641 set_pageblock_migratetype(page
,
3647 return 1UL << order
;
3651 * __putback_isolated_page - Return a now-isolated page back where we got it
3652 * @page: Page that was isolated
3653 * @order: Order of the isolated page
3654 * @mt: The page's pageblock's migratetype
3656 * This function is meant to return a page pulled from the free lists via
3657 * __isolate_free_page back to the free lists they were pulled from.
3659 void __putback_isolated_page(struct page
*page
, unsigned int order
, int mt
)
3661 struct zone
*zone
= page_zone(page
);
3663 /* zone lock should be held when this function is called */
3664 lockdep_assert_held(&zone
->lock
);
3666 /* Return isolated page to tail of freelist. */
3667 __free_one_page(page
, page_to_pfn(page
), zone
, order
, mt
,
3668 FPI_SKIP_REPORT_NOTIFY
| FPI_TO_TAIL
);
3672 * Update NUMA hit/miss statistics
3674 * Must be called with interrupts disabled.
3676 static inline void zone_statistics(struct zone
*preferred_zone
, struct zone
*z
,
3680 enum numa_stat_item local_stat
= NUMA_LOCAL
;
3682 /* skip numa counters update if numa stats is disabled */
3683 if (!static_branch_likely(&vm_numa_stat_key
))
3686 if (zone_to_nid(z
) != numa_node_id())
3687 local_stat
= NUMA_OTHER
;
3689 if (zone_to_nid(z
) == zone_to_nid(preferred_zone
))
3690 __count_numa_events(z
, NUMA_HIT
, nr_account
);
3692 __count_numa_events(z
, NUMA_MISS
, nr_account
);
3693 __count_numa_events(preferred_zone
, NUMA_FOREIGN
, nr_account
);
3695 __count_numa_events(z
, local_stat
, nr_account
);
3699 static __always_inline
3700 struct page
*rmqueue_buddy(struct zone
*preferred_zone
, struct zone
*zone
,
3701 unsigned int order
, unsigned int alloc_flags
,
3705 unsigned long flags
;
3709 spin_lock_irqsave(&zone
->lock
, flags
);
3711 * order-0 request can reach here when the pcplist is skipped
3712 * due to non-CMA allocation context. HIGHATOMIC area is
3713 * reserved for high-order atomic allocation, so order-0
3714 * request should skip it.
3716 if (order
> 0 && alloc_flags
& ALLOC_HARDER
)
3717 page
= __rmqueue_smallest(zone
, order
, MIGRATE_HIGHATOMIC
);
3719 page
= __rmqueue(zone
, order
, migratetype
, alloc_flags
);
3721 spin_unlock_irqrestore(&zone
->lock
, flags
);
3725 __mod_zone_freepage_state(zone
, -(1 << order
),
3726 get_pcppage_migratetype(page
));
3727 spin_unlock_irqrestore(&zone
->lock
, flags
);
3728 } while (check_new_pages(page
, order
));
3730 __count_zid_vm_events(PGALLOC
, page_zonenum(page
), 1 << order
);
3731 zone_statistics(preferred_zone
, zone
, 1);
3736 /* Remove page from the per-cpu list, caller must protect the list */
3738 struct page
*__rmqueue_pcplist(struct zone
*zone
, unsigned int order
,
3740 unsigned int alloc_flags
,
3741 struct per_cpu_pages
*pcp
,
3742 struct list_head
*list
)
3747 if (list_empty(list
)) {
3748 int batch
= READ_ONCE(pcp
->batch
);
3752 * Scale batch relative to order if batch implies
3753 * free pages can be stored on the PCP. Batch can
3754 * be 1 for small zones or for boot pagesets which
3755 * should never store free pages as the pages may
3756 * belong to arbitrary zones.
3759 batch
= max(batch
>> order
, 2);
3760 alloced
= rmqueue_bulk(zone
, order
,
3762 migratetype
, alloc_flags
);
3764 pcp
->count
+= alloced
<< order
;
3765 if (unlikely(list_empty(list
)))
3769 page
= list_first_entry(list
, struct page
, pcp_list
);
3770 list_del(&page
->pcp_list
);
3771 pcp
->count
-= 1 << order
;
3772 } while (check_new_pcp(page
, order
));
3777 /* Lock and remove page from the per-cpu list */
3778 static struct page
*rmqueue_pcplist(struct zone
*preferred_zone
,
3779 struct zone
*zone
, unsigned int order
,
3780 gfp_t gfp_flags
, int migratetype
,
3781 unsigned int alloc_flags
)
3783 struct per_cpu_pages
*pcp
;
3784 struct list_head
*list
;
3786 unsigned long flags
;
3787 unsigned long __maybe_unused UP_flags
;
3790 * spin_trylock may fail due to a parallel drain. In the future, the
3791 * trylock will also protect against IRQ reentrancy.
3793 pcp_trylock_prepare(UP_flags
);
3794 pcp
= pcp_spin_trylock_irqsave(zone
->per_cpu_pageset
, flags
);
3796 pcp_trylock_finish(UP_flags
);
3801 * On allocation, reduce the number of pages that are batch freed.
3802 * See nr_pcp_free() where free_factor is increased for subsequent
3805 pcp
->free_factor
>>= 1;
3806 list
= &pcp
->lists
[order_to_pindex(migratetype
, order
)];
3807 page
= __rmqueue_pcplist(zone
, order
, migratetype
, alloc_flags
, pcp
, list
);
3808 pcp_spin_unlock_irqrestore(pcp
, flags
);
3809 pcp_trylock_finish(UP_flags
);
3811 __count_zid_vm_events(PGALLOC
, page_zonenum(page
), 1);
3812 zone_statistics(preferred_zone
, zone
, 1);
3818 * Allocate a page from the given zone. Use pcplists for order-0 allocations.
3821 struct page
*rmqueue(struct zone
*preferred_zone
,
3822 struct zone
*zone
, unsigned int order
,
3823 gfp_t gfp_flags
, unsigned int alloc_flags
,
3829 * We most definitely don't want callers attempting to
3830 * allocate greater than order-1 page units with __GFP_NOFAIL.
3832 WARN_ON_ONCE((gfp_flags
& __GFP_NOFAIL
) && (order
> 1));
3834 if (likely(pcp_allowed_order(order
))) {
3836 * MIGRATE_MOVABLE pcplist could have the pages on CMA area and
3837 * we need to skip it when CMA area isn't allowed.
3839 if (!IS_ENABLED(CONFIG_CMA
) || alloc_flags
& ALLOC_CMA
||
3840 migratetype
!= MIGRATE_MOVABLE
) {
3841 page
= rmqueue_pcplist(preferred_zone
, zone
, order
,
3842 gfp_flags
, migratetype
, alloc_flags
);
3848 page
= rmqueue_buddy(preferred_zone
, zone
, order
, alloc_flags
,
3852 /* Separate test+clear to avoid unnecessary atomics */
3853 if (unlikely(test_bit(ZONE_BOOSTED_WATERMARK
, &zone
->flags
))) {
3854 clear_bit(ZONE_BOOSTED_WATERMARK
, &zone
->flags
);
3855 wakeup_kswapd(zone
, 0, 0, zone_idx(zone
));
3858 VM_BUG_ON_PAGE(page
&& bad_range(zone
, page
), page
);
3862 #ifdef CONFIG_FAIL_PAGE_ALLOC
3865 struct fault_attr attr
;
3867 bool ignore_gfp_highmem
;
3868 bool ignore_gfp_reclaim
;
3870 } fail_page_alloc
= {
3871 .attr
= FAULT_ATTR_INITIALIZER
,
3872 .ignore_gfp_reclaim
= true,
3873 .ignore_gfp_highmem
= true,
3877 static int __init
setup_fail_page_alloc(char *str
)
3879 return setup_fault_attr(&fail_page_alloc
.attr
, str
);
3881 __setup("fail_page_alloc=", setup_fail_page_alloc
);
3883 static bool __should_fail_alloc_page(gfp_t gfp_mask
, unsigned int order
)
3885 if (order
< fail_page_alloc
.min_order
)
3887 if (gfp_mask
& __GFP_NOFAIL
)
3889 if (fail_page_alloc
.ignore_gfp_highmem
&& (gfp_mask
& __GFP_HIGHMEM
))
3891 if (fail_page_alloc
.ignore_gfp_reclaim
&&
3892 (gfp_mask
& __GFP_DIRECT_RECLAIM
))
3895 if (gfp_mask
& __GFP_NOWARN
)
3896 fail_page_alloc
.attr
.no_warn
= true;
3898 return should_fail(&fail_page_alloc
.attr
, 1 << order
);
3901 #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
3903 static int __init
fail_page_alloc_debugfs(void)
3905 umode_t mode
= S_IFREG
| 0600;
3908 dir
= fault_create_debugfs_attr("fail_page_alloc", NULL
,
3909 &fail_page_alloc
.attr
);
3911 debugfs_create_bool("ignore-gfp-wait", mode
, dir
,
3912 &fail_page_alloc
.ignore_gfp_reclaim
);
3913 debugfs_create_bool("ignore-gfp-highmem", mode
, dir
,
3914 &fail_page_alloc
.ignore_gfp_highmem
);
3915 debugfs_create_u32("min-order", mode
, dir
, &fail_page_alloc
.min_order
);
3920 late_initcall(fail_page_alloc_debugfs
);
3922 #endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
3924 #else /* CONFIG_FAIL_PAGE_ALLOC */
3926 static inline bool __should_fail_alloc_page(gfp_t gfp_mask
, unsigned int order
)
3931 #endif /* CONFIG_FAIL_PAGE_ALLOC */
3933 noinline
bool should_fail_alloc_page(gfp_t gfp_mask
, unsigned int order
)
3935 return __should_fail_alloc_page(gfp_mask
, order
);
3937 ALLOW_ERROR_INJECTION(should_fail_alloc_page
, TRUE
);
3939 static inline long __zone_watermark_unusable_free(struct zone
*z
,
3940 unsigned int order
, unsigned int alloc_flags
)
3942 const bool alloc_harder
= (alloc_flags
& (ALLOC_HARDER
|ALLOC_OOM
));
3943 long unusable_free
= (1 << order
) - 1;
3946 * If the caller does not have rights to ALLOC_HARDER then subtract
3947 * the high-atomic reserves. This will over-estimate the size of the
3948 * atomic reserve but it avoids a search.
3950 if (likely(!alloc_harder
))
3951 unusable_free
+= z
->nr_reserved_highatomic
;
3954 /* If allocation can't use CMA areas don't use free CMA pages */
3955 if (!(alloc_flags
& ALLOC_CMA
))
3956 unusable_free
+= zone_page_state(z
, NR_FREE_CMA_PAGES
);
3959 return unusable_free
;
3963 * Return true if free base pages are above 'mark'. For high-order checks it
3964 * will return true of the order-0 watermark is reached and there is at least
3965 * one free page of a suitable size. Checking now avoids taking the zone lock
3966 * to check in the allocation paths if no pages are free.
3968 bool __zone_watermark_ok(struct zone
*z
, unsigned int order
, unsigned long mark
,
3969 int highest_zoneidx
, unsigned int alloc_flags
,
3974 const bool alloc_harder
= (alloc_flags
& (ALLOC_HARDER
|ALLOC_OOM
));
3976 /* free_pages may go negative - that's OK */
3977 free_pages
-= __zone_watermark_unusable_free(z
, order
, alloc_flags
);
3979 if (alloc_flags
& ALLOC_HIGH
)
3982 if (unlikely(alloc_harder
)) {
3984 * OOM victims can try even harder than normal ALLOC_HARDER
3985 * users on the grounds that it's definitely going to be in
3986 * the exit path shortly and free memory. Any allocation it
3987 * makes during the free path will be small and short-lived.
3989 if (alloc_flags
& ALLOC_OOM
)
3996 * Check watermarks for an order-0 allocation request. If these
3997 * are not met, then a high-order request also cannot go ahead
3998 * even if a suitable page happened to be free.
4000 if (free_pages
<= min
+ z
->lowmem_reserve
[highest_zoneidx
])
4003 /* If this is an order-0 request then the watermark is fine */
4007 /* For a high-order request, check at least one suitable page is free */
4008 for (o
= order
; o
< MAX_ORDER
; o
++) {
4009 struct free_area
*area
= &z
->free_area
[o
];
4015 for (mt
= 0; mt
< MIGRATE_PCPTYPES
; mt
++) {
4016 if (!free_area_empty(area
, mt
))
4021 if ((alloc_flags
& ALLOC_CMA
) &&
4022 !free_area_empty(area
, MIGRATE_CMA
)) {
4026 if (alloc_harder
&& !free_area_empty(area
, MIGRATE_HIGHATOMIC
))
4032 bool zone_watermark_ok(struct zone
*z
, unsigned int order
, unsigned long mark
,
4033 int highest_zoneidx
, unsigned int alloc_flags
)
4035 return __zone_watermark_ok(z
, order
, mark
, highest_zoneidx
, alloc_flags
,
4036 zone_page_state(z
, NR_FREE_PAGES
));
4039 static inline bool zone_watermark_fast(struct zone
*z
, unsigned int order
,
4040 unsigned long mark
, int highest_zoneidx
,
4041 unsigned int alloc_flags
, gfp_t gfp_mask
)
4045 free_pages
= zone_page_state(z
, NR_FREE_PAGES
);
4048 * Fast check for order-0 only. If this fails then the reserves
4049 * need to be calculated.
4055 usable_free
= free_pages
;
4056 reserved
= __zone_watermark_unusable_free(z
, 0, alloc_flags
);
4058 /* reserved may over estimate high-atomic reserves. */
4059 usable_free
-= min(usable_free
, reserved
);
4060 if (usable_free
> mark
+ z
->lowmem_reserve
[highest_zoneidx
])
4064 if (__zone_watermark_ok(z
, order
, mark
, highest_zoneidx
, alloc_flags
,
4068 * Ignore watermark boosting for GFP_ATOMIC order-0 allocations
4069 * when checking the min watermark. The min watermark is the
4070 * point where boosting is ignored so that kswapd is woken up
4071 * when below the low watermark.
4073 if (unlikely(!order
&& (gfp_mask
& __GFP_ATOMIC
) && z
->watermark_boost
4074 && ((alloc_flags
& ALLOC_WMARK_MASK
) == WMARK_MIN
))) {
4075 mark
= z
->_watermark
[WMARK_MIN
];
4076 return __zone_watermark_ok(z
, order
, mark
, highest_zoneidx
,
4077 alloc_flags
, free_pages
);
4083 bool zone_watermark_ok_safe(struct zone
*z
, unsigned int order
,
4084 unsigned long mark
, int highest_zoneidx
)
4086 long free_pages
= zone_page_state(z
, NR_FREE_PAGES
);
4088 if (z
->percpu_drift_mark
&& free_pages
< z
->percpu_drift_mark
)
4089 free_pages
= zone_page_state_snapshot(z
, NR_FREE_PAGES
);
4091 return __zone_watermark_ok(z
, order
, mark
, highest_zoneidx
, 0,
4096 int __read_mostly node_reclaim_distance
= RECLAIM_DISTANCE
;
4098 static bool zone_allows_reclaim(struct zone
*local_zone
, struct zone
*zone
)
4100 return node_distance(zone_to_nid(local_zone
), zone_to_nid(zone
)) <=
4101 node_reclaim_distance
;
4103 #else /* CONFIG_NUMA */
4104 static bool zone_allows_reclaim(struct zone
*local_zone
, struct zone
*zone
)
4108 #endif /* CONFIG_NUMA */
4111 * The restriction on ZONE_DMA32 as being a suitable zone to use to avoid
4112 * fragmentation is subtle. If the preferred zone was HIGHMEM then
4113 * premature use of a lower zone may cause lowmem pressure problems that
4114 * are worse than fragmentation. If the next zone is ZONE_DMA then it is
4115 * probably too small. It only makes sense to spread allocations to avoid
4116 * fragmentation between the Normal and DMA32 zones.
4118 static inline unsigned int
4119 alloc_flags_nofragment(struct zone
*zone
, gfp_t gfp_mask
)
4121 unsigned int alloc_flags
;
4124 * __GFP_KSWAPD_RECLAIM is assumed to be the same as ALLOC_KSWAPD
4127 alloc_flags
= (__force
int) (gfp_mask
& __GFP_KSWAPD_RECLAIM
);
4129 #ifdef CONFIG_ZONE_DMA32
4133 if (zone_idx(zone
) != ZONE_NORMAL
)
4137 * If ZONE_DMA32 exists, assume it is the one after ZONE_NORMAL and
4138 * the pointer is within zone->zone_pgdat->node_zones[]. Also assume
4139 * on UMA that if Normal is populated then so is DMA32.
4141 BUILD_BUG_ON(ZONE_NORMAL
- ZONE_DMA32
!= 1);
4142 if (nr_online_nodes
> 1 && !populated_zone(--zone
))
4145 alloc_flags
|= ALLOC_NOFRAGMENT
;
4146 #endif /* CONFIG_ZONE_DMA32 */
4150 /* Must be called after current_gfp_context() which can change gfp_mask */
4151 static inline unsigned int gfp_to_alloc_flags_cma(gfp_t gfp_mask
,
4152 unsigned int alloc_flags
)
4155 if (gfp_migratetype(gfp_mask
) == MIGRATE_MOVABLE
)
4156 alloc_flags
|= ALLOC_CMA
;
4162 * get_page_from_freelist goes through the zonelist trying to allocate
4165 static struct page
*
4166 get_page_from_freelist(gfp_t gfp_mask
, unsigned int order
, int alloc_flags
,
4167 const struct alloc_context
*ac
)
4171 struct pglist_data
*last_pgdat
= NULL
;
4172 bool last_pgdat_dirty_ok
= false;
4177 * Scan zonelist, looking for a zone with enough free.
4178 * See also __cpuset_node_allowed() comment in kernel/cgroup/cpuset.c.
4180 no_fallback
= alloc_flags
& ALLOC_NOFRAGMENT
;
4181 z
= ac
->preferred_zoneref
;
4182 for_next_zone_zonelist_nodemask(zone
, z
, ac
->highest_zoneidx
,
4187 if (cpusets_enabled() &&
4188 (alloc_flags
& ALLOC_CPUSET
) &&
4189 !__cpuset_zone_allowed(zone
, gfp_mask
))
4192 * When allocating a page cache page for writing, we
4193 * want to get it from a node that is within its dirty
4194 * limit, such that no single node holds more than its
4195 * proportional share of globally allowed dirty pages.
4196 * The dirty limits take into account the node's
4197 * lowmem reserves and high watermark so that kswapd
4198 * should be able to balance it without having to
4199 * write pages from its LRU list.
4201 * XXX: For now, allow allocations to potentially
4202 * exceed the per-node dirty limit in the slowpath
4203 * (spread_dirty_pages unset) before going into reclaim,
4204 * which is important when on a NUMA setup the allowed
4205 * nodes are together not big enough to reach the
4206 * global limit. The proper fix for these situations
4207 * will require awareness of nodes in the
4208 * dirty-throttling and the flusher threads.
4210 if (ac
->spread_dirty_pages
) {
4211 if (last_pgdat
!= zone
->zone_pgdat
) {
4212 last_pgdat
= zone
->zone_pgdat
;
4213 last_pgdat_dirty_ok
= node_dirty_ok(zone
->zone_pgdat
);
4216 if (!last_pgdat_dirty_ok
)
4220 if (no_fallback
&& nr_online_nodes
> 1 &&
4221 zone
!= ac
->preferred_zoneref
->zone
) {
4225 * If moving to a remote node, retry but allow
4226 * fragmenting fallbacks. Locality is more important
4227 * than fragmentation avoidance.
4229 local_nid
= zone_to_nid(ac
->preferred_zoneref
->zone
);
4230 if (zone_to_nid(zone
) != local_nid
) {
4231 alloc_flags
&= ~ALLOC_NOFRAGMENT
;
4236 mark
= wmark_pages(zone
, alloc_flags
& ALLOC_WMARK_MASK
);
4237 if (!zone_watermark_fast(zone
, order
, mark
,
4238 ac
->highest_zoneidx
, alloc_flags
,
4242 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
4244 * Watermark failed for this zone, but see if we can
4245 * grow this zone if it contains deferred pages.
4247 if (static_branch_unlikely(&deferred_pages
)) {
4248 if (_deferred_grow_zone(zone
, order
))
4252 /* Checked here to keep the fast path fast */
4253 BUILD_BUG_ON(ALLOC_NO_WATERMARKS
< NR_WMARK
);
4254 if (alloc_flags
& ALLOC_NO_WATERMARKS
)
4257 if (!node_reclaim_enabled() ||
4258 !zone_allows_reclaim(ac
->preferred_zoneref
->zone
, zone
))
4261 ret
= node_reclaim(zone
->zone_pgdat
, gfp_mask
, order
);
4263 case NODE_RECLAIM_NOSCAN
:
4266 case NODE_RECLAIM_FULL
:
4267 /* scanned but unreclaimable */
4270 /* did we reclaim enough */
4271 if (zone_watermark_ok(zone
, order
, mark
,
4272 ac
->highest_zoneidx
, alloc_flags
))
4280 page
= rmqueue(ac
->preferred_zoneref
->zone
, zone
, order
,
4281 gfp_mask
, alloc_flags
, ac
->migratetype
);
4283 prep_new_page(page
, order
, gfp_mask
, alloc_flags
);
4286 * If this is a high-order atomic allocation then check
4287 * if the pageblock should be reserved for the future
4289 if (unlikely(order
&& (alloc_flags
& ALLOC_HARDER
)))
4290 reserve_highatomic_pageblock(page
, zone
, order
);
4294 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
4295 /* Try again if zone has deferred pages */
4296 if (static_branch_unlikely(&deferred_pages
)) {
4297 if (_deferred_grow_zone(zone
, order
))
4305 * It's possible on a UMA machine to get through all zones that are
4306 * fragmented. If avoiding fragmentation, reset and try again.
4309 alloc_flags
&= ~ALLOC_NOFRAGMENT
;
4316 static void warn_alloc_show_mem(gfp_t gfp_mask
, nodemask_t
*nodemask
)
4318 unsigned int filter
= SHOW_MEM_FILTER_NODES
;
4321 * This documents exceptions given to allocations in certain
4322 * contexts that are allowed to allocate outside current's set
4325 if (!(gfp_mask
& __GFP_NOMEMALLOC
))
4326 if (tsk_is_oom_victim(current
) ||
4327 (current
->flags
& (PF_MEMALLOC
| PF_EXITING
)))
4328 filter
&= ~SHOW_MEM_FILTER_NODES
;
4329 if (!in_task() || !(gfp_mask
& __GFP_DIRECT_RECLAIM
))
4330 filter
&= ~SHOW_MEM_FILTER_NODES
;
4332 show_mem(filter
, nodemask
);
4335 void warn_alloc(gfp_t gfp_mask
, nodemask_t
*nodemask
, const char *fmt
, ...)
4337 struct va_format vaf
;
4339 static DEFINE_RATELIMIT_STATE(nopage_rs
, 10*HZ
, 1);
4341 if ((gfp_mask
& __GFP_NOWARN
) ||
4342 !__ratelimit(&nopage_rs
) ||
4343 ((gfp_mask
& __GFP_DMA
) && !has_managed_dma()))
4346 va_start(args
, fmt
);
4349 pr_warn("%s: %pV, mode:%#x(%pGg), nodemask=%*pbl",
4350 current
->comm
, &vaf
, gfp_mask
, &gfp_mask
,
4351 nodemask_pr_args(nodemask
));
4354 cpuset_print_current_mems_allowed();
4357 warn_alloc_show_mem(gfp_mask
, nodemask
);
4360 static inline struct page
*
4361 __alloc_pages_cpuset_fallback(gfp_t gfp_mask
, unsigned int order
,
4362 unsigned int alloc_flags
,
4363 const struct alloc_context
*ac
)
4367 page
= get_page_from_freelist(gfp_mask
, order
,
4368 alloc_flags
|ALLOC_CPUSET
, ac
);
4370 * fallback to ignore cpuset restriction if our nodes
4374 page
= get_page_from_freelist(gfp_mask
, order
,
4380 static inline struct page
*
4381 __alloc_pages_may_oom(gfp_t gfp_mask
, unsigned int order
,
4382 const struct alloc_context
*ac
, unsigned long *did_some_progress
)
4384 struct oom_control oc
= {
4385 .zonelist
= ac
->zonelist
,
4386 .nodemask
= ac
->nodemask
,
4388 .gfp_mask
= gfp_mask
,
4393 *did_some_progress
= 0;
4396 * Acquire the oom lock. If that fails, somebody else is
4397 * making progress for us.
4399 if (!mutex_trylock(&oom_lock
)) {
4400 *did_some_progress
= 1;
4401 schedule_timeout_uninterruptible(1);
4406 * Go through the zonelist yet one more time, keep very high watermark
4407 * here, this is only to catch a parallel oom killing, we must fail if
4408 * we're still under heavy pressure. But make sure that this reclaim
4409 * attempt shall not depend on __GFP_DIRECT_RECLAIM && !__GFP_NORETRY
4410 * allocation which will never fail due to oom_lock already held.
4412 page
= get_page_from_freelist((gfp_mask
| __GFP_HARDWALL
) &
4413 ~__GFP_DIRECT_RECLAIM
, order
,
4414 ALLOC_WMARK_HIGH
|ALLOC_CPUSET
, ac
);
4418 /* Coredumps can quickly deplete all memory reserves */
4419 if (current
->flags
& PF_DUMPCORE
)
4421 /* The OOM killer will not help higher order allocs */
4422 if (order
> PAGE_ALLOC_COSTLY_ORDER
)
4425 * We have already exhausted all our reclaim opportunities without any
4426 * success so it is time to admit defeat. We will skip the OOM killer
4427 * because it is very likely that the caller has a more reasonable
4428 * fallback than shooting a random task.
4430 * The OOM killer may not free memory on a specific node.
4432 if (gfp_mask
& (__GFP_RETRY_MAYFAIL
| __GFP_THISNODE
))
4434 /* The OOM killer does not needlessly kill tasks for lowmem */
4435 if (ac
->highest_zoneidx
< ZONE_NORMAL
)
4437 if (pm_suspended_storage())
4440 * XXX: GFP_NOFS allocations should rather fail than rely on
4441 * other request to make a forward progress.
4442 * We are in an unfortunate situation where out_of_memory cannot
4443 * do much for this context but let's try it to at least get
4444 * access to memory reserved if the current task is killed (see
4445 * out_of_memory). Once filesystems are ready to handle allocation
4446 * failures more gracefully we should just bail out here.
4449 /* Exhausted what can be done so it's blame time */
4450 if (out_of_memory(&oc
) ||
4451 WARN_ON_ONCE_GFP(gfp_mask
& __GFP_NOFAIL
, gfp_mask
)) {
4452 *did_some_progress
= 1;
4455 * Help non-failing allocations by giving them access to memory
4458 if (gfp_mask
& __GFP_NOFAIL
)
4459 page
= __alloc_pages_cpuset_fallback(gfp_mask
, order
,
4460 ALLOC_NO_WATERMARKS
, ac
);
4463 mutex_unlock(&oom_lock
);
4468 * Maximum number of compaction retries with a progress before OOM
4469 * killer is consider as the only way to move forward.
4471 #define MAX_COMPACT_RETRIES 16
4473 #ifdef CONFIG_COMPACTION
4474 /* Try memory compaction for high-order allocations before reclaim */
4475 static struct page
*
4476 __alloc_pages_direct_compact(gfp_t gfp_mask
, unsigned int order
,
4477 unsigned int alloc_flags
, const struct alloc_context
*ac
,
4478 enum compact_priority prio
, enum compact_result
*compact_result
)
4480 struct page
*page
= NULL
;
4481 unsigned long pflags
;
4482 unsigned int noreclaim_flag
;
4487 psi_memstall_enter(&pflags
);
4488 delayacct_compact_start();
4489 noreclaim_flag
= memalloc_noreclaim_save();
4491 *compact_result
= try_to_compact_pages(gfp_mask
, order
, alloc_flags
, ac
,
4494 memalloc_noreclaim_restore(noreclaim_flag
);
4495 psi_memstall_leave(&pflags
);
4496 delayacct_compact_end();
4498 if (*compact_result
== COMPACT_SKIPPED
)
4501 * At least in one zone compaction wasn't deferred or skipped, so let's
4502 * count a compaction stall
4504 count_vm_event(COMPACTSTALL
);
4506 /* Prep a captured page if available */
4508 prep_new_page(page
, order
, gfp_mask
, alloc_flags
);
4510 /* Try get a page from the freelist if available */
4512 page
= get_page_from_freelist(gfp_mask
, order
, alloc_flags
, ac
);
4515 struct zone
*zone
= page_zone(page
);
4517 zone
->compact_blockskip_flush
= false;
4518 compaction_defer_reset(zone
, order
, true);
4519 count_vm_event(COMPACTSUCCESS
);
4524 * It's bad if compaction run occurs and fails. The most likely reason
4525 * is that pages exist, but not enough to satisfy watermarks.
4527 count_vm_event(COMPACTFAIL
);
4535 should_compact_retry(struct alloc_context
*ac
, int order
, int alloc_flags
,
4536 enum compact_result compact_result
,
4537 enum compact_priority
*compact_priority
,
4538 int *compaction_retries
)
4540 int max_retries
= MAX_COMPACT_RETRIES
;
4543 int retries
= *compaction_retries
;
4544 enum compact_priority priority
= *compact_priority
;
4549 if (fatal_signal_pending(current
))
4552 if (compaction_made_progress(compact_result
))
4553 (*compaction_retries
)++;
4556 * compaction considers all the zone as desperately out of memory
4557 * so it doesn't really make much sense to retry except when the
4558 * failure could be caused by insufficient priority
4560 if (compaction_failed(compact_result
))
4561 goto check_priority
;
4564 * compaction was skipped because there are not enough order-0 pages
4565 * to work with, so we retry only if it looks like reclaim can help.
4567 if (compaction_needs_reclaim(compact_result
)) {
4568 ret
= compaction_zonelist_suitable(ac
, order
, alloc_flags
);
4573 * make sure the compaction wasn't deferred or didn't bail out early
4574 * due to locks contention before we declare that we should give up.
4575 * But the next retry should use a higher priority if allowed, so
4576 * we don't just keep bailing out endlessly.
4578 if (compaction_withdrawn(compact_result
)) {
4579 goto check_priority
;
4583 * !costly requests are much more important than __GFP_RETRY_MAYFAIL
4584 * costly ones because they are de facto nofail and invoke OOM
4585 * killer to move on while costly can fail and users are ready
4586 * to cope with that. 1/4 retries is rather arbitrary but we
4587 * would need much more detailed feedback from compaction to
4588 * make a better decision.
4590 if (order
> PAGE_ALLOC_COSTLY_ORDER
)
4592 if (*compaction_retries
<= max_retries
) {
4598 * Make sure there are attempts at the highest priority if we exhausted
4599 * all retries or failed at the lower priorities.
4602 min_priority
= (order
> PAGE_ALLOC_COSTLY_ORDER
) ?
4603 MIN_COMPACT_COSTLY_PRIORITY
: MIN_COMPACT_PRIORITY
;
4605 if (*compact_priority
> min_priority
) {
4606 (*compact_priority
)--;
4607 *compaction_retries
= 0;
4611 trace_compact_retry(order
, priority
, compact_result
, retries
, max_retries
, ret
);
4615 static inline struct page
*
4616 __alloc_pages_direct_compact(gfp_t gfp_mask
, unsigned int order
,
4617 unsigned int alloc_flags
, const struct alloc_context
*ac
,
4618 enum compact_priority prio
, enum compact_result
*compact_result
)
4620 *compact_result
= COMPACT_SKIPPED
;
4625 should_compact_retry(struct alloc_context
*ac
, unsigned int order
, int alloc_flags
,
4626 enum compact_result compact_result
,
4627 enum compact_priority
*compact_priority
,
4628 int *compaction_retries
)
4633 if (!order
|| order
> PAGE_ALLOC_COSTLY_ORDER
)
4637 * There are setups with compaction disabled which would prefer to loop
4638 * inside the allocator rather than hit the oom killer prematurely.
4639 * Let's give them a good hope and keep retrying while the order-0
4640 * watermarks are OK.
4642 for_each_zone_zonelist_nodemask(zone
, z
, ac
->zonelist
,
4643 ac
->highest_zoneidx
, ac
->nodemask
) {
4644 if (zone_watermark_ok(zone
, 0, min_wmark_pages(zone
),
4645 ac
->highest_zoneidx
, alloc_flags
))
4650 #endif /* CONFIG_COMPACTION */
4652 #ifdef CONFIG_LOCKDEP
4653 static struct lockdep_map __fs_reclaim_map
=
4654 STATIC_LOCKDEP_MAP_INIT("fs_reclaim", &__fs_reclaim_map
);
4656 static bool __need_reclaim(gfp_t gfp_mask
)
4658 /* no reclaim without waiting on it */
4659 if (!(gfp_mask
& __GFP_DIRECT_RECLAIM
))
4662 /* this guy won't enter reclaim */
4663 if (current
->flags
& PF_MEMALLOC
)
4666 if (gfp_mask
& __GFP_NOLOCKDEP
)
4672 void __fs_reclaim_acquire(unsigned long ip
)
4674 lock_acquire_exclusive(&__fs_reclaim_map
, 0, 0, NULL
, ip
);
4677 void __fs_reclaim_release(unsigned long ip
)
4679 lock_release(&__fs_reclaim_map
, ip
);
4682 void fs_reclaim_acquire(gfp_t gfp_mask
)
4684 gfp_mask
= current_gfp_context(gfp_mask
);
4686 if (__need_reclaim(gfp_mask
)) {
4687 if (gfp_mask
& __GFP_FS
)
4688 __fs_reclaim_acquire(_RET_IP_
);
4690 #ifdef CONFIG_MMU_NOTIFIER
4691 lock_map_acquire(&__mmu_notifier_invalidate_range_start_map
);
4692 lock_map_release(&__mmu_notifier_invalidate_range_start_map
);
4697 EXPORT_SYMBOL_GPL(fs_reclaim_acquire
);
4699 void fs_reclaim_release(gfp_t gfp_mask
)
4701 gfp_mask
= current_gfp_context(gfp_mask
);
4703 if (__need_reclaim(gfp_mask
)) {
4704 if (gfp_mask
& __GFP_FS
)
4705 __fs_reclaim_release(_RET_IP_
);
4708 EXPORT_SYMBOL_GPL(fs_reclaim_release
);
4711 /* Perform direct synchronous page reclaim */
4712 static unsigned long
4713 __perform_reclaim(gfp_t gfp_mask
, unsigned int order
,
4714 const struct alloc_context
*ac
)
4716 unsigned int noreclaim_flag
;
4717 unsigned long progress
;
4721 /* We now go into synchronous reclaim */
4722 cpuset_memory_pressure_bump();
4723 fs_reclaim_acquire(gfp_mask
);
4724 noreclaim_flag
= memalloc_noreclaim_save();
4726 progress
= try_to_free_pages(ac
->zonelist
, order
, gfp_mask
,
4729 memalloc_noreclaim_restore(noreclaim_flag
);
4730 fs_reclaim_release(gfp_mask
);
4737 /* The really slow allocator path where we enter direct reclaim */
4738 static inline struct page
*
4739 __alloc_pages_direct_reclaim(gfp_t gfp_mask
, unsigned int order
,
4740 unsigned int alloc_flags
, const struct alloc_context
*ac
,
4741 unsigned long *did_some_progress
)
4743 struct page
*page
= NULL
;
4744 unsigned long pflags
;
4745 bool drained
= false;
4747 psi_memstall_enter(&pflags
);
4748 *did_some_progress
= __perform_reclaim(gfp_mask
, order
, ac
);
4749 if (unlikely(!(*did_some_progress
)))
4753 page
= get_page_from_freelist(gfp_mask
, order
, alloc_flags
, ac
);
4756 * If an allocation failed after direct reclaim, it could be because
4757 * pages are pinned on the per-cpu lists or in high alloc reserves.
4758 * Shrink them and try again
4760 if (!page
&& !drained
) {
4761 unreserve_highatomic_pageblock(ac
, false);
4762 drain_all_pages(NULL
);
4767 psi_memstall_leave(&pflags
);
4772 static void wake_all_kswapds(unsigned int order
, gfp_t gfp_mask
,
4773 const struct alloc_context
*ac
)
4777 pg_data_t
*last_pgdat
= NULL
;
4778 enum zone_type highest_zoneidx
= ac
->highest_zoneidx
;
4780 for_each_zone_zonelist_nodemask(zone
, z
, ac
->zonelist
, highest_zoneidx
,
4782 if (!managed_zone(zone
))
4784 if (last_pgdat
!= zone
->zone_pgdat
) {
4785 wakeup_kswapd(zone
, gfp_mask
, order
, highest_zoneidx
);
4786 last_pgdat
= zone
->zone_pgdat
;
4791 static inline unsigned int
4792 gfp_to_alloc_flags(gfp_t gfp_mask
)
4794 unsigned int alloc_flags
= ALLOC_WMARK_MIN
| ALLOC_CPUSET
;
4797 * __GFP_HIGH is assumed to be the same as ALLOC_HIGH
4798 * and __GFP_KSWAPD_RECLAIM is assumed to be the same as ALLOC_KSWAPD
4799 * to save two branches.
4801 BUILD_BUG_ON(__GFP_HIGH
!= (__force gfp_t
) ALLOC_HIGH
);
4802 BUILD_BUG_ON(__GFP_KSWAPD_RECLAIM
!= (__force gfp_t
) ALLOC_KSWAPD
);
4805 * The caller may dip into page reserves a bit more if the caller
4806 * cannot run direct reclaim, or if the caller has realtime scheduling
4807 * policy or is asking for __GFP_HIGH memory. GFP_ATOMIC requests will
4808 * set both ALLOC_HARDER (__GFP_ATOMIC) and ALLOC_HIGH (__GFP_HIGH).
4810 alloc_flags
|= (__force
int)
4811 (gfp_mask
& (__GFP_HIGH
| __GFP_KSWAPD_RECLAIM
));
4813 if (gfp_mask
& __GFP_ATOMIC
) {
4815 * Not worth trying to allocate harder for __GFP_NOMEMALLOC even
4816 * if it can't schedule.
4818 if (!(gfp_mask
& __GFP_NOMEMALLOC
))
4819 alloc_flags
|= ALLOC_HARDER
;
4821 * Ignore cpuset mems for GFP_ATOMIC rather than fail, see the
4822 * comment for __cpuset_node_allowed().
4824 alloc_flags
&= ~ALLOC_CPUSET
;
4825 } else if (unlikely(rt_task(current
)) && in_task())
4826 alloc_flags
|= ALLOC_HARDER
;
4828 alloc_flags
= gfp_to_alloc_flags_cma(gfp_mask
, alloc_flags
);
4833 static bool oom_reserves_allowed(struct task_struct
*tsk
)
4835 if (!tsk_is_oom_victim(tsk
))
4839 * !MMU doesn't have oom reaper so give access to memory reserves
4840 * only to the thread with TIF_MEMDIE set
4842 if (!IS_ENABLED(CONFIG_MMU
) && !test_thread_flag(TIF_MEMDIE
))
4849 * Distinguish requests which really need access to full memory
4850 * reserves from oom victims which can live with a portion of it
4852 static inline int __gfp_pfmemalloc_flags(gfp_t gfp_mask
)
4854 if (unlikely(gfp_mask
& __GFP_NOMEMALLOC
))
4856 if (gfp_mask
& __GFP_MEMALLOC
)
4857 return ALLOC_NO_WATERMARKS
;
4858 if (in_serving_softirq() && (current
->flags
& PF_MEMALLOC
))
4859 return ALLOC_NO_WATERMARKS
;
4860 if (!in_interrupt()) {
4861 if (current
->flags
& PF_MEMALLOC
)
4862 return ALLOC_NO_WATERMARKS
;
4863 else if (oom_reserves_allowed(current
))
4870 bool gfp_pfmemalloc_allowed(gfp_t gfp_mask
)
4872 return !!__gfp_pfmemalloc_flags(gfp_mask
);
4876 * Checks whether it makes sense to retry the reclaim to make a forward progress
4877 * for the given allocation request.
4879 * We give up when we either have tried MAX_RECLAIM_RETRIES in a row
4880 * without success, or when we couldn't even meet the watermark if we
4881 * reclaimed all remaining pages on the LRU lists.
4883 * Returns true if a retry is viable or false to enter the oom path.
4886 should_reclaim_retry(gfp_t gfp_mask
, unsigned order
,
4887 struct alloc_context
*ac
, int alloc_flags
,
4888 bool did_some_progress
, int *no_progress_loops
)
4895 * Costly allocations might have made a progress but this doesn't mean
4896 * their order will become available due to high fragmentation so
4897 * always increment the no progress counter for them
4899 if (did_some_progress
&& order
<= PAGE_ALLOC_COSTLY_ORDER
)
4900 *no_progress_loops
= 0;
4902 (*no_progress_loops
)++;
4905 * Make sure we converge to OOM if we cannot make any progress
4906 * several times in the row.
4908 if (*no_progress_loops
> MAX_RECLAIM_RETRIES
) {
4909 /* Before OOM, exhaust highatomic_reserve */
4910 return unreserve_highatomic_pageblock(ac
, true);
4914 * Keep reclaiming pages while there is a chance this will lead
4915 * somewhere. If none of the target zones can satisfy our allocation
4916 * request even if all reclaimable pages are considered then we are
4917 * screwed and have to go OOM.
4919 for_each_zone_zonelist_nodemask(zone
, z
, ac
->zonelist
,
4920 ac
->highest_zoneidx
, ac
->nodemask
) {
4921 unsigned long available
;
4922 unsigned long reclaimable
;
4923 unsigned long min_wmark
= min_wmark_pages(zone
);
4926 available
= reclaimable
= zone_reclaimable_pages(zone
);
4927 available
+= zone_page_state_snapshot(zone
, NR_FREE_PAGES
);
4930 * Would the allocation succeed if we reclaimed all
4931 * reclaimable pages?
4933 wmark
= __zone_watermark_ok(zone
, order
, min_wmark
,
4934 ac
->highest_zoneidx
, alloc_flags
, available
);
4935 trace_reclaim_retry_zone(z
, order
, reclaimable
,
4936 available
, min_wmark
, *no_progress_loops
, wmark
);
4944 * Memory allocation/reclaim might be called from a WQ context and the
4945 * current implementation of the WQ concurrency control doesn't
4946 * recognize that a particular WQ is congested if the worker thread is
4947 * looping without ever sleeping. Therefore we have to do a short sleep
4948 * here rather than calling cond_resched().
4950 if (current
->flags
& PF_WQ_WORKER
)
4951 schedule_timeout_uninterruptible(1);
4958 check_retry_cpuset(int cpuset_mems_cookie
, struct alloc_context
*ac
)
4961 * It's possible that cpuset's mems_allowed and the nodemask from
4962 * mempolicy don't intersect. This should be normally dealt with by
4963 * policy_nodemask(), but it's possible to race with cpuset update in
4964 * such a way the check therein was true, and then it became false
4965 * before we got our cpuset_mems_cookie here.
4966 * This assumes that for all allocations, ac->nodemask can come only
4967 * from MPOL_BIND mempolicy (whose documented semantics is to be ignored
4968 * when it does not intersect with the cpuset restrictions) or the
4969 * caller can deal with a violated nodemask.
4971 if (cpusets_enabled() && ac
->nodemask
&&
4972 !cpuset_nodemask_valid_mems_allowed(ac
->nodemask
)) {
4973 ac
->nodemask
= NULL
;
4978 * When updating a task's mems_allowed or mempolicy nodemask, it is
4979 * possible to race with parallel threads in such a way that our
4980 * allocation can fail while the mask is being updated. If we are about
4981 * to fail, check if the cpuset changed during allocation and if so,
4984 if (read_mems_allowed_retry(cpuset_mems_cookie
))
4990 static inline struct page
*
4991 __alloc_pages_slowpath(gfp_t gfp_mask
, unsigned int order
,
4992 struct alloc_context
*ac
)
4994 bool can_direct_reclaim
= gfp_mask
& __GFP_DIRECT_RECLAIM
;
4995 const bool costly_order
= order
> PAGE_ALLOC_COSTLY_ORDER
;
4996 struct page
*page
= NULL
;
4997 unsigned int alloc_flags
;
4998 unsigned long did_some_progress
;
4999 enum compact_priority compact_priority
;
5000 enum compact_result compact_result
;
5001 int compaction_retries
;
5002 int no_progress_loops
;
5003 unsigned int cpuset_mems_cookie
;
5007 * We also sanity check to catch abuse of atomic reserves being used by
5008 * callers that are not in atomic context.
5010 if (WARN_ON_ONCE((gfp_mask
& (__GFP_ATOMIC
|__GFP_DIRECT_RECLAIM
)) ==
5011 (__GFP_ATOMIC
|__GFP_DIRECT_RECLAIM
)))
5012 gfp_mask
&= ~__GFP_ATOMIC
;
5015 compaction_retries
= 0;
5016 no_progress_loops
= 0;
5017 compact_priority
= DEF_COMPACT_PRIORITY
;
5018 cpuset_mems_cookie
= read_mems_allowed_begin();
5021 * The fast path uses conservative alloc_flags to succeed only until
5022 * kswapd needs to be woken up, and to avoid the cost of setting up
5023 * alloc_flags precisely. So we do that now.
5025 alloc_flags
= gfp_to_alloc_flags(gfp_mask
);
5028 * We need to recalculate the starting point for the zonelist iterator
5029 * because we might have used different nodemask in the fast path, or
5030 * there was a cpuset modification and we are retrying - otherwise we
5031 * could end up iterating over non-eligible zones endlessly.
5033 ac
->preferred_zoneref
= first_zones_zonelist(ac
->zonelist
,
5034 ac
->highest_zoneidx
, ac
->nodemask
);
5035 if (!ac
->preferred_zoneref
->zone
)
5039 * Check for insane configurations where the cpuset doesn't contain
5040 * any suitable zone to satisfy the request - e.g. non-movable
5041 * GFP_HIGHUSER allocations from MOVABLE nodes only.
5043 if (cpusets_insane_config() && (gfp_mask
& __GFP_HARDWALL
)) {
5044 struct zoneref
*z
= first_zones_zonelist(ac
->zonelist
,
5045 ac
->highest_zoneidx
,
5046 &cpuset_current_mems_allowed
);
5051 if (alloc_flags
& ALLOC_KSWAPD
)
5052 wake_all_kswapds(order
, gfp_mask
, ac
);
5055 * The adjusted alloc_flags might result in immediate success, so try
5058 page
= get_page_from_freelist(gfp_mask
, order
, alloc_flags
, ac
);
5063 * For costly allocations, try direct compaction first, as it's likely
5064 * that we have enough base pages and don't need to reclaim. For non-
5065 * movable high-order allocations, do that as well, as compaction will
5066 * try prevent permanent fragmentation by migrating from blocks of the
5068 * Don't try this for allocations that are allowed to ignore
5069 * watermarks, as the ALLOC_NO_WATERMARKS attempt didn't yet happen.
5071 if (can_direct_reclaim
&&
5073 (order
> 0 && ac
->migratetype
!= MIGRATE_MOVABLE
))
5074 && !gfp_pfmemalloc_allowed(gfp_mask
)) {
5075 page
= __alloc_pages_direct_compact(gfp_mask
, order
,
5077 INIT_COMPACT_PRIORITY
,
5083 * Checks for costly allocations with __GFP_NORETRY, which
5084 * includes some THP page fault allocations
5086 if (costly_order
&& (gfp_mask
& __GFP_NORETRY
)) {
5088 * If allocating entire pageblock(s) and compaction
5089 * failed because all zones are below low watermarks
5090 * or is prohibited because it recently failed at this
5091 * order, fail immediately unless the allocator has
5092 * requested compaction and reclaim retry.
5095 * - potentially very expensive because zones are far
5096 * below their low watermarks or this is part of very
5097 * bursty high order allocations,
5098 * - not guaranteed to help because isolate_freepages()
5099 * may not iterate over freed pages as part of its
5101 * - unlikely to make entire pageblocks free on its
5104 if (compact_result
== COMPACT_SKIPPED
||
5105 compact_result
== COMPACT_DEFERRED
)
5109 * Looks like reclaim/compaction is worth trying, but
5110 * sync compaction could be very expensive, so keep
5111 * using async compaction.
5113 compact_priority
= INIT_COMPACT_PRIORITY
;
5118 /* Ensure kswapd doesn't accidentally go to sleep as long as we loop */
5119 if (alloc_flags
& ALLOC_KSWAPD
)
5120 wake_all_kswapds(order
, gfp_mask
, ac
);
5122 reserve_flags
= __gfp_pfmemalloc_flags(gfp_mask
);
5124 alloc_flags
= gfp_to_alloc_flags_cma(gfp_mask
, reserve_flags
);
5127 * Reset the nodemask and zonelist iterators if memory policies can be
5128 * ignored. These allocations are high priority and system rather than
5131 if (!(alloc_flags
& ALLOC_CPUSET
) || reserve_flags
) {
5132 ac
->nodemask
= NULL
;
5133 ac
->preferred_zoneref
= first_zones_zonelist(ac
->zonelist
,
5134 ac
->highest_zoneidx
, ac
->nodemask
);
5137 /* Attempt with potentially adjusted zonelist and alloc_flags */
5138 page
= get_page_from_freelist(gfp_mask
, order
, alloc_flags
, ac
);
5142 /* Caller is not willing to reclaim, we can't balance anything */
5143 if (!can_direct_reclaim
)
5146 /* Avoid recursion of direct reclaim */
5147 if (current
->flags
& PF_MEMALLOC
)
5150 /* Try direct reclaim and then allocating */
5151 page
= __alloc_pages_direct_reclaim(gfp_mask
, order
, alloc_flags
, ac
,
5152 &did_some_progress
);
5156 /* Try direct compaction and then allocating */
5157 page
= __alloc_pages_direct_compact(gfp_mask
, order
, alloc_flags
, ac
,
5158 compact_priority
, &compact_result
);
5162 /* Do not loop if specifically requested */
5163 if (gfp_mask
& __GFP_NORETRY
)
5167 * Do not retry costly high order allocations unless they are
5168 * __GFP_RETRY_MAYFAIL
5170 if (costly_order
&& !(gfp_mask
& __GFP_RETRY_MAYFAIL
))
5173 if (should_reclaim_retry(gfp_mask
, order
, ac
, alloc_flags
,
5174 did_some_progress
> 0, &no_progress_loops
))
5178 * It doesn't make any sense to retry for the compaction if the order-0
5179 * reclaim is not able to make any progress because the current
5180 * implementation of the compaction depends on the sufficient amount
5181 * of free memory (see __compaction_suitable)
5183 if (did_some_progress
> 0 &&
5184 should_compact_retry(ac
, order
, alloc_flags
,
5185 compact_result
, &compact_priority
,
5186 &compaction_retries
))
5190 /* Deal with possible cpuset update races before we start OOM killing */
5191 if (check_retry_cpuset(cpuset_mems_cookie
, ac
))
5194 /* Reclaim has failed us, start killing things */
5195 page
= __alloc_pages_may_oom(gfp_mask
, order
, ac
, &did_some_progress
);
5199 /* Avoid allocations with no watermarks from looping endlessly */
5200 if (tsk_is_oom_victim(current
) &&
5201 (alloc_flags
& ALLOC_OOM
||
5202 (gfp_mask
& __GFP_NOMEMALLOC
)))
5205 /* Retry as long as the OOM killer is making progress */
5206 if (did_some_progress
) {
5207 no_progress_loops
= 0;
5212 /* Deal with possible cpuset update races before we fail */
5213 if (check_retry_cpuset(cpuset_mems_cookie
, ac
))
5217 * Make sure that __GFP_NOFAIL request doesn't leak out and make sure
5220 if (gfp_mask
& __GFP_NOFAIL
) {
5222 * All existing users of the __GFP_NOFAIL are blockable, so warn
5223 * of any new users that actually require GFP_NOWAIT
5225 if (WARN_ON_ONCE_GFP(!can_direct_reclaim
, gfp_mask
))
5229 * PF_MEMALLOC request from this context is rather bizarre
5230 * because we cannot reclaim anything and only can loop waiting
5231 * for somebody to do a work for us
5233 WARN_ON_ONCE_GFP(current
->flags
& PF_MEMALLOC
, gfp_mask
);
5236 * non failing costly orders are a hard requirement which we
5237 * are not prepared for much so let's warn about these users
5238 * so that we can identify them and convert them to something
5241 WARN_ON_ONCE_GFP(order
> PAGE_ALLOC_COSTLY_ORDER
, gfp_mask
);
5244 * Help non-failing allocations by giving them access to memory
5245 * reserves but do not use ALLOC_NO_WATERMARKS because this
5246 * could deplete whole memory reserves which would just make
5247 * the situation worse
5249 page
= __alloc_pages_cpuset_fallback(gfp_mask
, order
, ALLOC_HARDER
, ac
);
5257 warn_alloc(gfp_mask
, ac
->nodemask
,
5258 "page allocation failure: order:%u", order
);
5263 static inline bool prepare_alloc_pages(gfp_t gfp_mask
, unsigned int order
,
5264 int preferred_nid
, nodemask_t
*nodemask
,
5265 struct alloc_context
*ac
, gfp_t
*alloc_gfp
,
5266 unsigned int *alloc_flags
)
5268 ac
->highest_zoneidx
= gfp_zone(gfp_mask
);
5269 ac
->zonelist
= node_zonelist(preferred_nid
, gfp_mask
);
5270 ac
->nodemask
= nodemask
;
5271 ac
->migratetype
= gfp_migratetype(gfp_mask
);
5273 if (cpusets_enabled()) {
5274 *alloc_gfp
|= __GFP_HARDWALL
;
5276 * When we are in the interrupt context, it is irrelevant
5277 * to the current task context. It means that any node ok.
5279 if (in_task() && !ac
->nodemask
)
5280 ac
->nodemask
= &cpuset_current_mems_allowed
;
5282 *alloc_flags
|= ALLOC_CPUSET
;
5285 might_alloc(gfp_mask
);
5287 if (should_fail_alloc_page(gfp_mask
, order
))
5290 *alloc_flags
= gfp_to_alloc_flags_cma(gfp_mask
, *alloc_flags
);
5292 /* Dirty zone balancing only done in the fast path */
5293 ac
->spread_dirty_pages
= (gfp_mask
& __GFP_WRITE
);
5296 * The preferred zone is used for statistics but crucially it is
5297 * also used as the starting point for the zonelist iterator. It
5298 * may get reset for allocations that ignore memory policies.
5300 ac
->preferred_zoneref
= first_zones_zonelist(ac
->zonelist
,
5301 ac
->highest_zoneidx
, ac
->nodemask
);
5307 * __alloc_pages_bulk - Allocate a number of order-0 pages to a list or array
5308 * @gfp: GFP flags for the allocation
5309 * @preferred_nid: The preferred NUMA node ID to allocate from
5310 * @nodemask: Set of nodes to allocate from, may be NULL
5311 * @nr_pages: The number of pages desired on the list or array
5312 * @page_list: Optional list to store the allocated pages
5313 * @page_array: Optional array to store the pages
5315 * This is a batched version of the page allocator that attempts to
5316 * allocate nr_pages quickly. Pages are added to page_list if page_list
5317 * is not NULL, otherwise it is assumed that the page_array is valid.
5319 * For lists, nr_pages is the number of pages that should be allocated.
5321 * For arrays, only NULL elements are populated with pages and nr_pages
5322 * is the maximum number of pages that will be stored in the array.
5324 * Returns the number of pages on the list or array.
5326 unsigned long __alloc_pages_bulk(gfp_t gfp
, int preferred_nid
,
5327 nodemask_t
*nodemask
, int nr_pages
,
5328 struct list_head
*page_list
,
5329 struct page
**page_array
)
5332 unsigned long flags
;
5333 unsigned long __maybe_unused UP_flags
;
5336 struct per_cpu_pages
*pcp
;
5337 struct list_head
*pcp_list
;
5338 struct alloc_context ac
;
5340 unsigned int alloc_flags
= ALLOC_WMARK_LOW
;
5341 int nr_populated
= 0, nr_account
= 0;
5344 * Skip populated array elements to determine if any pages need
5345 * to be allocated before disabling IRQs.
5347 while (page_array
&& nr_populated
< nr_pages
&& page_array
[nr_populated
])
5350 /* No pages requested? */
5351 if (unlikely(nr_pages
<= 0))
5354 /* Already populated array? */
5355 if (unlikely(page_array
&& nr_pages
- nr_populated
== 0))
5358 /* Bulk allocator does not support memcg accounting. */
5359 if (memcg_kmem_enabled() && (gfp
& __GFP_ACCOUNT
))
5362 /* Use the single page allocator for one page. */
5363 if (nr_pages
- nr_populated
== 1)
5366 #ifdef CONFIG_PAGE_OWNER
5368 * PAGE_OWNER may recurse into the allocator to allocate space to
5369 * save the stack with pagesets.lock held. Releasing/reacquiring
5370 * removes much of the performance benefit of bulk allocation so
5371 * force the caller to allocate one page at a time as it'll have
5372 * similar performance to added complexity to the bulk allocator.
5374 if (static_branch_unlikely(&page_owner_inited
))
5378 /* May set ALLOC_NOFRAGMENT, fragmentation will return 1 page. */
5379 gfp
&= gfp_allowed_mask
;
5381 if (!prepare_alloc_pages(gfp
, 0, preferred_nid
, nodemask
, &ac
, &alloc_gfp
, &alloc_flags
))
5385 /* Find an allowed local zone that meets the low watermark. */
5386 for_each_zone_zonelist_nodemask(zone
, z
, ac
.zonelist
, ac
.highest_zoneidx
, ac
.nodemask
) {
5389 if (cpusets_enabled() && (alloc_flags
& ALLOC_CPUSET
) &&
5390 !__cpuset_zone_allowed(zone
, gfp
)) {
5394 if (nr_online_nodes
> 1 && zone
!= ac
.preferred_zoneref
->zone
&&
5395 zone_to_nid(zone
) != zone_to_nid(ac
.preferred_zoneref
->zone
)) {
5399 mark
= wmark_pages(zone
, alloc_flags
& ALLOC_WMARK_MASK
) + nr_pages
;
5400 if (zone_watermark_fast(zone
, 0, mark
,
5401 zonelist_zone_idx(ac
.preferred_zoneref
),
5402 alloc_flags
, gfp
)) {
5408 * If there are no allowed local zones that meets the watermarks then
5409 * try to allocate a single page and reclaim if necessary.
5411 if (unlikely(!zone
))
5414 /* Is a parallel drain in progress? */
5415 pcp_trylock_prepare(UP_flags
);
5416 pcp
= pcp_spin_trylock_irqsave(zone
->per_cpu_pageset
, flags
);
5420 /* Attempt the batch allocation */
5421 pcp_list
= &pcp
->lists
[order_to_pindex(ac
.migratetype
, 0)];
5422 while (nr_populated
< nr_pages
) {
5424 /* Skip existing pages */
5425 if (page_array
&& page_array
[nr_populated
]) {
5430 page
= __rmqueue_pcplist(zone
, 0, ac
.migratetype
, alloc_flags
,
5432 if (unlikely(!page
)) {
5433 /* Try and allocate at least one page */
5435 pcp_spin_unlock_irqrestore(pcp
, flags
);
5442 prep_new_page(page
, 0, gfp
, 0);
5444 list_add(&page
->lru
, page_list
);
5446 page_array
[nr_populated
] = page
;
5450 pcp_spin_unlock_irqrestore(pcp
, flags
);
5451 pcp_trylock_finish(UP_flags
);
5453 __count_zid_vm_events(PGALLOC
, zone_idx(zone
), nr_account
);
5454 zone_statistics(ac
.preferred_zoneref
->zone
, zone
, nr_account
);
5457 return nr_populated
;
5460 pcp_trylock_finish(UP_flags
);
5463 page
= __alloc_pages(gfp
, 0, preferred_nid
, nodemask
);
5466 list_add(&page
->lru
, page_list
);
5468 page_array
[nr_populated
] = page
;
5474 EXPORT_SYMBOL_GPL(__alloc_pages_bulk
);
5477 * This is the 'heart' of the zoned buddy allocator.
5479 struct page
*__alloc_pages(gfp_t gfp
, unsigned int order
, int preferred_nid
,
5480 nodemask_t
*nodemask
)
5483 unsigned int alloc_flags
= ALLOC_WMARK_LOW
;
5484 gfp_t alloc_gfp
; /* The gfp_t that was actually used for allocation */
5485 struct alloc_context ac
= { };
5488 * There are several places where we assume that the order value is sane
5489 * so bail out early if the request is out of bound.
5491 if (WARN_ON_ONCE_GFP(order
>= MAX_ORDER
, gfp
))
5494 gfp
&= gfp_allowed_mask
;
5496 * Apply scoped allocation constraints. This is mainly about GFP_NOFS
5497 * resp. GFP_NOIO which has to be inherited for all allocation requests
5498 * from a particular context which has been marked by
5499 * memalloc_no{fs,io}_{save,restore}. And PF_MEMALLOC_PIN which ensures
5500 * movable zones are not used during allocation.
5502 gfp
= current_gfp_context(gfp
);
5504 if (!prepare_alloc_pages(gfp
, order
, preferred_nid
, nodemask
, &ac
,
5505 &alloc_gfp
, &alloc_flags
))
5509 * Forbid the first pass from falling back to types that fragment
5510 * memory until all local zones are considered.
5512 alloc_flags
|= alloc_flags_nofragment(ac
.preferred_zoneref
->zone
, gfp
);
5514 /* First allocation attempt */
5515 page
= get_page_from_freelist(alloc_gfp
, order
, alloc_flags
, &ac
);
5520 ac
.spread_dirty_pages
= false;
5523 * Restore the original nodemask if it was potentially replaced with
5524 * &cpuset_current_mems_allowed to optimize the fast-path attempt.
5526 ac
.nodemask
= nodemask
;
5528 page
= __alloc_pages_slowpath(alloc_gfp
, order
, &ac
);
5531 if (memcg_kmem_enabled() && (gfp
& __GFP_ACCOUNT
) && page
&&
5532 unlikely(__memcg_kmem_charge_page(page
, gfp
, order
) != 0)) {
5533 __free_pages(page
, order
);
5537 trace_mm_page_alloc(page
, order
, alloc_gfp
, ac
.migratetype
);
5541 EXPORT_SYMBOL(__alloc_pages
);
5543 struct folio
*__folio_alloc(gfp_t gfp
, unsigned int order
, int preferred_nid
,
5544 nodemask_t
*nodemask
)
5546 struct page
*page
= __alloc_pages(gfp
| __GFP_COMP
, order
,
5547 preferred_nid
, nodemask
);
5549 if (page
&& order
> 1)
5550 prep_transhuge_page(page
);
5551 return (struct folio
*)page
;
5553 EXPORT_SYMBOL(__folio_alloc
);
5556 * Common helper functions. Never use with __GFP_HIGHMEM because the returned
5557 * address cannot represent highmem pages. Use alloc_pages and then kmap if
5558 * you need to access high mem.
5560 unsigned long __get_free_pages(gfp_t gfp_mask
, unsigned int order
)
5564 page
= alloc_pages(gfp_mask
& ~__GFP_HIGHMEM
, order
);
5567 return (unsigned long) page_address(page
);
5569 EXPORT_SYMBOL(__get_free_pages
);
5571 unsigned long get_zeroed_page(gfp_t gfp_mask
)
5573 return __get_free_pages(gfp_mask
| __GFP_ZERO
, 0);
5575 EXPORT_SYMBOL(get_zeroed_page
);
5578 * __free_pages - Free pages allocated with alloc_pages().
5579 * @page: The page pointer returned from alloc_pages().
5580 * @order: The order of the allocation.
5582 * This function can free multi-page allocations that are not compound
5583 * pages. It does not check that the @order passed in matches that of
5584 * the allocation, so it is easy to leak memory. Freeing more memory
5585 * than was allocated will probably emit a warning.
5587 * If the last reference to this page is speculative, it will be released
5588 * by put_page() which only frees the first page of a non-compound
5589 * allocation. To prevent the remaining pages from being leaked, we free
5590 * the subsequent pages here. If you want to use the page's reference
5591 * count to decide when to free the allocation, you should allocate a
5592 * compound page, and use put_page() instead of __free_pages().
5594 * Context: May be called in interrupt context or while holding a normal
5595 * spinlock, but not in NMI context or while holding a raw spinlock.
5597 void __free_pages(struct page
*page
, unsigned int order
)
5599 if (put_page_testzero(page
))
5600 free_the_page(page
, order
);
5601 else if (!PageHead(page
))
5603 free_the_page(page
+ (1 << order
), order
);
5605 EXPORT_SYMBOL(__free_pages
);
5607 void free_pages(unsigned long addr
, unsigned int order
)
5610 VM_BUG_ON(!virt_addr_valid((void *)addr
));
5611 __free_pages(virt_to_page((void *)addr
), order
);
5615 EXPORT_SYMBOL(free_pages
);
5619 * An arbitrary-length arbitrary-offset area of memory which resides
5620 * within a 0 or higher order page. Multiple fragments within that page
5621 * are individually refcounted, in the page's reference counter.
5623 * The page_frag functions below provide a simple allocation framework for
5624 * page fragments. This is used by the network stack and network device
5625 * drivers to provide a backing region of memory for use as either an
5626 * sk_buff->head, or to be used in the "frags" portion of skb_shared_info.
5628 static struct page
*__page_frag_cache_refill(struct page_frag_cache
*nc
,
5631 struct page
*page
= NULL
;
5632 gfp_t gfp
= gfp_mask
;
5634 #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
5635 gfp_mask
|= __GFP_COMP
| __GFP_NOWARN
| __GFP_NORETRY
|
5637 page
= alloc_pages_node(NUMA_NO_NODE
, gfp_mask
,
5638 PAGE_FRAG_CACHE_MAX_ORDER
);
5639 nc
->size
= page
? PAGE_FRAG_CACHE_MAX_SIZE
: PAGE_SIZE
;
5641 if (unlikely(!page
))
5642 page
= alloc_pages_node(NUMA_NO_NODE
, gfp
, 0);
5644 nc
->va
= page
? page_address(page
) : NULL
;
5649 void __page_frag_cache_drain(struct page
*page
, unsigned int count
)
5651 VM_BUG_ON_PAGE(page_ref_count(page
) == 0, page
);
5653 if (page_ref_sub_and_test(page
, count
))
5654 free_the_page(page
, compound_order(page
));
5656 EXPORT_SYMBOL(__page_frag_cache_drain
);
5658 void *page_frag_alloc_align(struct page_frag_cache
*nc
,
5659 unsigned int fragsz
, gfp_t gfp_mask
,
5660 unsigned int align_mask
)
5662 unsigned int size
= PAGE_SIZE
;
5666 if (unlikely(!nc
->va
)) {
5668 page
= __page_frag_cache_refill(nc
, gfp_mask
);
5672 #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
5673 /* if size can vary use size else just use PAGE_SIZE */
5676 /* Even if we own the page, we do not use atomic_set().
5677 * This would break get_page_unless_zero() users.
5679 page_ref_add(page
, PAGE_FRAG_CACHE_MAX_SIZE
);
5681 /* reset page count bias and offset to start of new frag */
5682 nc
->pfmemalloc
= page_is_pfmemalloc(page
);
5683 nc
->pagecnt_bias
= PAGE_FRAG_CACHE_MAX_SIZE
+ 1;
5687 offset
= nc
->offset
- fragsz
;
5688 if (unlikely(offset
< 0)) {
5689 page
= virt_to_page(nc
->va
);
5691 if (!page_ref_sub_and_test(page
, nc
->pagecnt_bias
))
5694 if (unlikely(nc
->pfmemalloc
)) {
5695 free_the_page(page
, compound_order(page
));
5699 #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
5700 /* if size can vary use size else just use PAGE_SIZE */
5703 /* OK, page count is 0, we can safely set it */
5704 set_page_count(page
, PAGE_FRAG_CACHE_MAX_SIZE
+ 1);
5706 /* reset page count bias and offset to start of new frag */
5707 nc
->pagecnt_bias
= PAGE_FRAG_CACHE_MAX_SIZE
+ 1;
5708 offset
= size
- fragsz
;
5712 offset
&= align_mask
;
5713 nc
->offset
= offset
;
5715 return nc
->va
+ offset
;
5717 EXPORT_SYMBOL(page_frag_alloc_align
);
5720 * Frees a page fragment allocated out of either a compound or order 0 page.
5722 void page_frag_free(void *addr
)
5724 struct page
*page
= virt_to_head_page(addr
);
5726 if (unlikely(put_page_testzero(page
)))
5727 free_the_page(page
, compound_order(page
));
5729 EXPORT_SYMBOL(page_frag_free
);
5731 static void *make_alloc_exact(unsigned long addr
, unsigned int order
,
5735 unsigned long alloc_end
= addr
+ (PAGE_SIZE
<< order
);
5736 unsigned long used
= addr
+ PAGE_ALIGN(size
);
5738 split_page(virt_to_page((void *)addr
), order
);
5739 while (used
< alloc_end
) {
5744 return (void *)addr
;
5748 * alloc_pages_exact - allocate an exact number physically-contiguous pages.
5749 * @size: the number of bytes to allocate
5750 * @gfp_mask: GFP flags for the allocation, must not contain __GFP_COMP
5752 * This function is similar to alloc_pages(), except that it allocates the
5753 * minimum number of pages to satisfy the request. alloc_pages() can only
5754 * allocate memory in power-of-two pages.
5756 * This function is also limited by MAX_ORDER.
5758 * Memory allocated by this function must be released by free_pages_exact().
5760 * Return: pointer to the allocated area or %NULL in case of error.
5762 void *alloc_pages_exact(size_t size
, gfp_t gfp_mask
)
5764 unsigned int order
= get_order(size
);
5767 if (WARN_ON_ONCE(gfp_mask
& (__GFP_COMP
| __GFP_HIGHMEM
)))
5768 gfp_mask
&= ~(__GFP_COMP
| __GFP_HIGHMEM
);
5770 addr
= __get_free_pages(gfp_mask
, order
);
5771 return make_alloc_exact(addr
, order
, size
);
5773 EXPORT_SYMBOL(alloc_pages_exact
);
5776 * alloc_pages_exact_nid - allocate an exact number of physically-contiguous
5778 * @nid: the preferred node ID where memory should be allocated
5779 * @size: the number of bytes to allocate
5780 * @gfp_mask: GFP flags for the allocation, must not contain __GFP_COMP
5782 * Like alloc_pages_exact(), but try to allocate on node nid first before falling
5785 * Return: pointer to the allocated area or %NULL in case of error.
5787 void * __meminit
alloc_pages_exact_nid(int nid
, size_t size
, gfp_t gfp_mask
)
5789 unsigned int order
= get_order(size
);
5792 if (WARN_ON_ONCE(gfp_mask
& (__GFP_COMP
| __GFP_HIGHMEM
)))
5793 gfp_mask
&= ~(__GFP_COMP
| __GFP_HIGHMEM
);
5795 p
= alloc_pages_node(nid
, gfp_mask
, order
);
5798 return make_alloc_exact((unsigned long)page_address(p
), order
, size
);
5802 * free_pages_exact - release memory allocated via alloc_pages_exact()
5803 * @virt: the value returned by alloc_pages_exact.
5804 * @size: size of allocation, same value as passed to alloc_pages_exact().
5806 * Release the memory allocated by a previous call to alloc_pages_exact.
5808 void free_pages_exact(void *virt
, size_t size
)
5810 unsigned long addr
= (unsigned long)virt
;
5811 unsigned long end
= addr
+ PAGE_ALIGN(size
);
5813 while (addr
< end
) {
5818 EXPORT_SYMBOL(free_pages_exact
);
5821 * nr_free_zone_pages - count number of pages beyond high watermark
5822 * @offset: The zone index of the highest zone
5824 * nr_free_zone_pages() counts the number of pages which are beyond the
5825 * high watermark within all zones at or below a given zone index. For each
5826 * zone, the number of pages is calculated as:
5828 * nr_free_zone_pages = managed_pages - high_pages
5830 * Return: number of pages beyond high watermark.
5832 static unsigned long nr_free_zone_pages(int offset
)
5837 /* Just pick one node, since fallback list is circular */
5838 unsigned long sum
= 0;
5840 struct zonelist
*zonelist
= node_zonelist(numa_node_id(), GFP_KERNEL
);
5842 for_each_zone_zonelist(zone
, z
, zonelist
, offset
) {
5843 unsigned long size
= zone_managed_pages(zone
);
5844 unsigned long high
= high_wmark_pages(zone
);
5853 * nr_free_buffer_pages - count number of pages beyond high watermark
5855 * nr_free_buffer_pages() counts the number of pages which are beyond the high
5856 * watermark within ZONE_DMA and ZONE_NORMAL.
5858 * Return: number of pages beyond high watermark within ZONE_DMA and
5861 unsigned long nr_free_buffer_pages(void)
5863 return nr_free_zone_pages(gfp_zone(GFP_USER
));
5865 EXPORT_SYMBOL_GPL(nr_free_buffer_pages
);
5867 static inline void show_node(struct zone
*zone
)
5869 if (IS_ENABLED(CONFIG_NUMA
))
5870 printk("Node %d ", zone_to_nid(zone
));
5873 long si_mem_available(void)
5876 unsigned long pagecache
;
5877 unsigned long wmark_low
= 0;
5878 unsigned long pages
[NR_LRU_LISTS
];
5879 unsigned long reclaimable
;
5883 for (lru
= LRU_BASE
; lru
< NR_LRU_LISTS
; lru
++)
5884 pages
[lru
] = global_node_page_state(NR_LRU_BASE
+ lru
);
5887 wmark_low
+= low_wmark_pages(zone
);
5890 * Estimate the amount of memory available for userspace allocations,
5891 * without causing swapping or OOM.
5893 available
= global_zone_page_state(NR_FREE_PAGES
) - totalreserve_pages
;
5896 * Not all the page cache can be freed, otherwise the system will
5897 * start swapping or thrashing. Assume at least half of the page
5898 * cache, or the low watermark worth of cache, needs to stay.
5900 pagecache
= pages
[LRU_ACTIVE_FILE
] + pages
[LRU_INACTIVE_FILE
];
5901 pagecache
-= min(pagecache
/ 2, wmark_low
);
5902 available
+= pagecache
;
5905 * Part of the reclaimable slab and other kernel memory consists of
5906 * items that are in use, and cannot be freed. Cap this estimate at the
5909 reclaimable
= global_node_page_state_pages(NR_SLAB_RECLAIMABLE_B
) +
5910 global_node_page_state(NR_KERNEL_MISC_RECLAIMABLE
);
5911 available
+= reclaimable
- min(reclaimable
/ 2, wmark_low
);
5917 EXPORT_SYMBOL_GPL(si_mem_available
);
5919 void si_meminfo(struct sysinfo
*val
)
5921 val
->totalram
= totalram_pages();
5922 val
->sharedram
= global_node_page_state(NR_SHMEM
);
5923 val
->freeram
= global_zone_page_state(NR_FREE_PAGES
);
5924 val
->bufferram
= nr_blockdev_pages();
5925 val
->totalhigh
= totalhigh_pages();
5926 val
->freehigh
= nr_free_highpages();
5927 val
->mem_unit
= PAGE_SIZE
;
5930 EXPORT_SYMBOL(si_meminfo
);
5933 void si_meminfo_node(struct sysinfo
*val
, int nid
)
5935 int zone_type
; /* needs to be signed */
5936 unsigned long managed_pages
= 0;
5937 unsigned long managed_highpages
= 0;
5938 unsigned long free_highpages
= 0;
5939 pg_data_t
*pgdat
= NODE_DATA(nid
);
5941 for (zone_type
= 0; zone_type
< MAX_NR_ZONES
; zone_type
++)
5942 managed_pages
+= zone_managed_pages(&pgdat
->node_zones
[zone_type
]);
5943 val
->totalram
= managed_pages
;
5944 val
->sharedram
= node_page_state(pgdat
, NR_SHMEM
);
5945 val
->freeram
= sum_zone_node_page_state(nid
, NR_FREE_PAGES
);
5946 #ifdef CONFIG_HIGHMEM
5947 for (zone_type
= 0; zone_type
< MAX_NR_ZONES
; zone_type
++) {
5948 struct zone
*zone
= &pgdat
->node_zones
[zone_type
];
5950 if (is_highmem(zone
)) {
5951 managed_highpages
+= zone_managed_pages(zone
);
5952 free_highpages
+= zone_page_state(zone
, NR_FREE_PAGES
);
5955 val
->totalhigh
= managed_highpages
;
5956 val
->freehigh
= free_highpages
;
5958 val
->totalhigh
= managed_highpages
;
5959 val
->freehigh
= free_highpages
;
5961 val
->mem_unit
= PAGE_SIZE
;
5966 * Determine whether the node should be displayed or not, depending on whether
5967 * SHOW_MEM_FILTER_NODES was passed to show_free_areas().
5969 static bool show_mem_node_skip(unsigned int flags
, int nid
, nodemask_t
*nodemask
)
5971 if (!(flags
& SHOW_MEM_FILTER_NODES
))
5975 * no node mask - aka implicit memory numa policy. Do not bother with
5976 * the synchronization - read_mems_allowed_begin - because we do not
5977 * have to be precise here.
5980 nodemask
= &cpuset_current_mems_allowed
;
5982 return !node_isset(nid
, *nodemask
);
5985 #define K(x) ((x) << (PAGE_SHIFT-10))
5987 static void show_migration_types(unsigned char type
)
5989 static const char types
[MIGRATE_TYPES
] = {
5990 [MIGRATE_UNMOVABLE
] = 'U',
5991 [MIGRATE_MOVABLE
] = 'M',
5992 [MIGRATE_RECLAIMABLE
] = 'E',
5993 [MIGRATE_HIGHATOMIC
] = 'H',
5995 [MIGRATE_CMA
] = 'C',
5997 #ifdef CONFIG_MEMORY_ISOLATION
5998 [MIGRATE_ISOLATE
] = 'I',
6001 char tmp
[MIGRATE_TYPES
+ 1];
6005 for (i
= 0; i
< MIGRATE_TYPES
; i
++) {
6006 if (type
& (1 << i
))
6011 printk(KERN_CONT
"(%s) ", tmp
);
6015 * Show free area list (used inside shift_scroll-lock stuff)
6016 * We also calculate the percentage fragmentation. We do this by counting the
6017 * memory on each free list with the exception of the first item on the list.
6020 * SHOW_MEM_FILTER_NODES: suppress nodes that are not allowed by current's
6023 void show_free_areas(unsigned int filter
, nodemask_t
*nodemask
)
6025 unsigned long free_pcp
= 0;
6030 for_each_populated_zone(zone
) {
6031 if (show_mem_node_skip(filter
, zone_to_nid(zone
), nodemask
))
6034 for_each_online_cpu(cpu
)
6035 free_pcp
+= per_cpu_ptr(zone
->per_cpu_pageset
, cpu
)->count
;
6038 printk("active_anon:%lu inactive_anon:%lu isolated_anon:%lu\n"
6039 " active_file:%lu inactive_file:%lu isolated_file:%lu\n"
6040 " unevictable:%lu dirty:%lu writeback:%lu\n"
6041 " slab_reclaimable:%lu slab_unreclaimable:%lu\n"
6042 " mapped:%lu shmem:%lu pagetables:%lu bounce:%lu\n"
6043 " kernel_misc_reclaimable:%lu\n"
6044 " free:%lu free_pcp:%lu free_cma:%lu\n",
6045 global_node_page_state(NR_ACTIVE_ANON
),
6046 global_node_page_state(NR_INACTIVE_ANON
),
6047 global_node_page_state(NR_ISOLATED_ANON
),
6048 global_node_page_state(NR_ACTIVE_FILE
),
6049 global_node_page_state(NR_INACTIVE_FILE
),
6050 global_node_page_state(NR_ISOLATED_FILE
),
6051 global_node_page_state(NR_UNEVICTABLE
),
6052 global_node_page_state(NR_FILE_DIRTY
),
6053 global_node_page_state(NR_WRITEBACK
),
6054 global_node_page_state_pages(NR_SLAB_RECLAIMABLE_B
),
6055 global_node_page_state_pages(NR_SLAB_UNRECLAIMABLE_B
),
6056 global_node_page_state(NR_FILE_MAPPED
),
6057 global_node_page_state(NR_SHMEM
),
6058 global_node_page_state(NR_PAGETABLE
),
6059 global_zone_page_state(NR_BOUNCE
),
6060 global_node_page_state(NR_KERNEL_MISC_RECLAIMABLE
),
6061 global_zone_page_state(NR_FREE_PAGES
),
6063 global_zone_page_state(NR_FREE_CMA_PAGES
));
6065 for_each_online_pgdat(pgdat
) {
6066 if (show_mem_node_skip(filter
, pgdat
->node_id
, nodemask
))
6070 " active_anon:%lukB"
6071 " inactive_anon:%lukB"
6072 " active_file:%lukB"
6073 " inactive_file:%lukB"
6074 " unevictable:%lukB"
6075 " isolated(anon):%lukB"
6076 " isolated(file):%lukB"
6081 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
6083 " shmem_pmdmapped: %lukB"
6086 " writeback_tmp:%lukB"
6087 " kernel_stack:%lukB"
6088 #ifdef CONFIG_SHADOW_CALL_STACK
6089 " shadow_call_stack:%lukB"
6092 " all_unreclaimable? %s"
6095 K(node_page_state(pgdat
, NR_ACTIVE_ANON
)),
6096 K(node_page_state(pgdat
, NR_INACTIVE_ANON
)),
6097 K(node_page_state(pgdat
, NR_ACTIVE_FILE
)),
6098 K(node_page_state(pgdat
, NR_INACTIVE_FILE
)),
6099 K(node_page_state(pgdat
, NR_UNEVICTABLE
)),
6100 K(node_page_state(pgdat
, NR_ISOLATED_ANON
)),
6101 K(node_page_state(pgdat
, NR_ISOLATED_FILE
)),
6102 K(node_page_state(pgdat
, NR_FILE_MAPPED
)),
6103 K(node_page_state(pgdat
, NR_FILE_DIRTY
)),
6104 K(node_page_state(pgdat
, NR_WRITEBACK
)),
6105 K(node_page_state(pgdat
, NR_SHMEM
)),
6106 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
6107 K(node_page_state(pgdat
, NR_SHMEM_THPS
)),
6108 K(node_page_state(pgdat
, NR_SHMEM_PMDMAPPED
)),
6109 K(node_page_state(pgdat
, NR_ANON_THPS
)),
6111 K(node_page_state(pgdat
, NR_WRITEBACK_TEMP
)),
6112 node_page_state(pgdat
, NR_KERNEL_STACK_KB
),
6113 #ifdef CONFIG_SHADOW_CALL_STACK
6114 node_page_state(pgdat
, NR_KERNEL_SCS_KB
),
6116 K(node_page_state(pgdat
, NR_PAGETABLE
)),
6117 pgdat
->kswapd_failures
>= MAX_RECLAIM_RETRIES
?
6121 for_each_populated_zone(zone
) {
6124 if (show_mem_node_skip(filter
, zone_to_nid(zone
), nodemask
))
6128 for_each_online_cpu(cpu
)
6129 free_pcp
+= per_cpu_ptr(zone
->per_cpu_pageset
, cpu
)->count
;
6139 " reserved_highatomic:%luKB"
6140 " active_anon:%lukB"
6141 " inactive_anon:%lukB"
6142 " active_file:%lukB"
6143 " inactive_file:%lukB"
6144 " unevictable:%lukB"
6145 " writepending:%lukB"
6155 K(zone_page_state(zone
, NR_FREE_PAGES
)),
6156 K(zone
->watermark_boost
),
6157 K(min_wmark_pages(zone
)),
6158 K(low_wmark_pages(zone
)),
6159 K(high_wmark_pages(zone
)),
6160 K(zone
->nr_reserved_highatomic
),
6161 K(zone_page_state(zone
, NR_ZONE_ACTIVE_ANON
)),
6162 K(zone_page_state(zone
, NR_ZONE_INACTIVE_ANON
)),
6163 K(zone_page_state(zone
, NR_ZONE_ACTIVE_FILE
)),
6164 K(zone_page_state(zone
, NR_ZONE_INACTIVE_FILE
)),
6165 K(zone_page_state(zone
, NR_ZONE_UNEVICTABLE
)),
6166 K(zone_page_state(zone
, NR_ZONE_WRITE_PENDING
)),
6167 K(zone
->present_pages
),
6168 K(zone_managed_pages(zone
)),
6169 K(zone_page_state(zone
, NR_MLOCK
)),
6170 K(zone_page_state(zone
, NR_BOUNCE
)),
6172 K(this_cpu_read(zone
->per_cpu_pageset
->count
)),
6173 K(zone_page_state(zone
, NR_FREE_CMA_PAGES
)));
6174 printk("lowmem_reserve[]:");
6175 for (i
= 0; i
< MAX_NR_ZONES
; i
++)
6176 printk(KERN_CONT
" %ld", zone
->lowmem_reserve
[i
]);
6177 printk(KERN_CONT
"\n");
6180 for_each_populated_zone(zone
) {
6182 unsigned long nr
[MAX_ORDER
], flags
, total
= 0;
6183 unsigned char types
[MAX_ORDER
];
6185 if (show_mem_node_skip(filter
, zone_to_nid(zone
), nodemask
))
6188 printk(KERN_CONT
"%s: ", zone
->name
);
6190 spin_lock_irqsave(&zone
->lock
, flags
);
6191 for (order
= 0; order
< MAX_ORDER
; order
++) {
6192 struct free_area
*area
= &zone
->free_area
[order
];
6195 nr
[order
] = area
->nr_free
;
6196 total
+= nr
[order
] << order
;
6199 for (type
= 0; type
< MIGRATE_TYPES
; type
++) {
6200 if (!free_area_empty(area
, type
))
6201 types
[order
] |= 1 << type
;
6204 spin_unlock_irqrestore(&zone
->lock
, flags
);
6205 for (order
= 0; order
< MAX_ORDER
; order
++) {
6206 printk(KERN_CONT
"%lu*%lukB ",
6207 nr
[order
], K(1UL) << order
);
6209 show_migration_types(types
[order
]);
6211 printk(KERN_CONT
"= %lukB\n", K(total
));
6214 for_each_online_node(nid
) {
6215 if (show_mem_node_skip(filter
, nid
, nodemask
))
6217 hugetlb_show_meminfo_node(nid
);
6220 printk("%ld total pagecache pages\n", global_node_page_state(NR_FILE_PAGES
));
6222 show_swap_cache_info();
6225 static void zoneref_set_zone(struct zone
*zone
, struct zoneref
*zoneref
)
6227 zoneref
->zone
= zone
;
6228 zoneref
->zone_idx
= zone_idx(zone
);
6232 * Builds allocation fallback zone lists.
6234 * Add all populated zones of a node to the zonelist.
6236 static int build_zonerefs_node(pg_data_t
*pgdat
, struct zoneref
*zonerefs
)
6239 enum zone_type zone_type
= MAX_NR_ZONES
;
6244 zone
= pgdat
->node_zones
+ zone_type
;
6245 if (populated_zone(zone
)) {
6246 zoneref_set_zone(zone
, &zonerefs
[nr_zones
++]);
6247 check_highest_zone(zone_type
);
6249 } while (zone_type
);
6256 static int __parse_numa_zonelist_order(char *s
)
6259 * We used to support different zonelists modes but they turned
6260 * out to be just not useful. Let's keep the warning in place
6261 * if somebody still use the cmd line parameter so that we do
6262 * not fail it silently
6264 if (!(*s
== 'd' || *s
== 'D' || *s
== 'n' || *s
== 'N')) {
6265 pr_warn("Ignoring unsupported numa_zonelist_order value: %s\n", s
);
6271 char numa_zonelist_order
[] = "Node";
6274 * sysctl handler for numa_zonelist_order
6276 int numa_zonelist_order_handler(struct ctl_table
*table
, int write
,
6277 void *buffer
, size_t *length
, loff_t
*ppos
)
6280 return __parse_numa_zonelist_order(buffer
);
6281 return proc_dostring(table
, write
, buffer
, length
, ppos
);
6285 static int node_load
[MAX_NUMNODES
];
6288 * find_next_best_node - find the next node that should appear in a given node's fallback list
6289 * @node: node whose fallback list we're appending
6290 * @used_node_mask: nodemask_t of already used nodes
6292 * We use a number of factors to determine which is the next node that should
6293 * appear on a given node's fallback list. The node should not have appeared
6294 * already in @node's fallback list, and it should be the next closest node
6295 * according to the distance array (which contains arbitrary distance values
6296 * from each node to each node in the system), and should also prefer nodes
6297 * with no CPUs, since presumably they'll have very little allocation pressure
6298 * on them otherwise.
6300 * Return: node id of the found node or %NUMA_NO_NODE if no node is found.
6302 int find_next_best_node(int node
, nodemask_t
*used_node_mask
)
6305 int min_val
= INT_MAX
;
6306 int best_node
= NUMA_NO_NODE
;
6308 /* Use the local node if we haven't already */
6309 if (!node_isset(node
, *used_node_mask
)) {
6310 node_set(node
, *used_node_mask
);
6314 for_each_node_state(n
, N_MEMORY
) {
6316 /* Don't want a node to appear more than once */
6317 if (node_isset(n
, *used_node_mask
))
6320 /* Use the distance array to find the distance */
6321 val
= node_distance(node
, n
);
6323 /* Penalize nodes under us ("prefer the next node") */
6326 /* Give preference to headless and unused nodes */
6327 if (!cpumask_empty(cpumask_of_node(n
)))
6328 val
+= PENALTY_FOR_NODE_WITH_CPUS
;
6330 /* Slight preference for less loaded node */
6331 val
*= MAX_NUMNODES
;
6332 val
+= node_load
[n
];
6334 if (val
< min_val
) {
6341 node_set(best_node
, *used_node_mask
);
6348 * Build zonelists ordered by node and zones within node.
6349 * This results in maximum locality--normal zone overflows into local
6350 * DMA zone, if any--but risks exhausting DMA zone.
6352 static void build_zonelists_in_node_order(pg_data_t
*pgdat
, int *node_order
,
6355 struct zoneref
*zonerefs
;
6358 zonerefs
= pgdat
->node_zonelists
[ZONELIST_FALLBACK
]._zonerefs
;
6360 for (i
= 0; i
< nr_nodes
; i
++) {
6363 pg_data_t
*node
= NODE_DATA(node_order
[i
]);
6365 nr_zones
= build_zonerefs_node(node
, zonerefs
);
6366 zonerefs
+= nr_zones
;
6368 zonerefs
->zone
= NULL
;
6369 zonerefs
->zone_idx
= 0;
6373 * Build gfp_thisnode zonelists
6375 static void build_thisnode_zonelists(pg_data_t
*pgdat
)
6377 struct zoneref
*zonerefs
;
6380 zonerefs
= pgdat
->node_zonelists
[ZONELIST_NOFALLBACK
]._zonerefs
;
6381 nr_zones
= build_zonerefs_node(pgdat
, zonerefs
);
6382 zonerefs
+= nr_zones
;
6383 zonerefs
->zone
= NULL
;
6384 zonerefs
->zone_idx
= 0;
6388 * Build zonelists ordered by zone and nodes within zones.
6389 * This results in conserving DMA zone[s] until all Normal memory is
6390 * exhausted, but results in overflowing to remote node while memory
6391 * may still exist in local DMA zone.
6394 static void build_zonelists(pg_data_t
*pgdat
)
6396 static int node_order
[MAX_NUMNODES
];
6397 int node
, nr_nodes
= 0;
6398 nodemask_t used_mask
= NODE_MASK_NONE
;
6399 int local_node
, prev_node
;
6401 /* NUMA-aware ordering of nodes */
6402 local_node
= pgdat
->node_id
;
6403 prev_node
= local_node
;
6405 memset(node_order
, 0, sizeof(node_order
));
6406 while ((node
= find_next_best_node(local_node
, &used_mask
)) >= 0) {
6408 * We don't want to pressure a particular node.
6409 * So adding penalty to the first node in same
6410 * distance group to make it round-robin.
6412 if (node_distance(local_node
, node
) !=
6413 node_distance(local_node
, prev_node
))
6414 node_load
[node
] += 1;
6416 node_order
[nr_nodes
++] = node
;
6420 build_zonelists_in_node_order(pgdat
, node_order
, nr_nodes
);
6421 build_thisnode_zonelists(pgdat
);
6422 pr_info("Fallback order for Node %d: ", local_node
);
6423 for (node
= 0; node
< nr_nodes
; node
++)
6424 pr_cont("%d ", node_order
[node
]);
6428 #ifdef CONFIG_HAVE_MEMORYLESS_NODES
6430 * Return node id of node used for "local" allocations.
6431 * I.e., first node id of first zone in arg node's generic zonelist.
6432 * Used for initializing percpu 'numa_mem', which is used primarily
6433 * for kernel allocations, so use GFP_KERNEL flags to locate zonelist.
6435 int local_memory_node(int node
)
6439 z
= first_zones_zonelist(node_zonelist(node
, GFP_KERNEL
),
6440 gfp_zone(GFP_KERNEL
),
6442 return zone_to_nid(z
->zone
);
6446 static void setup_min_unmapped_ratio(void);
6447 static void setup_min_slab_ratio(void);
6448 #else /* CONFIG_NUMA */
6450 static void build_zonelists(pg_data_t
*pgdat
)
6452 int node
, local_node
;
6453 struct zoneref
*zonerefs
;
6456 local_node
= pgdat
->node_id
;
6458 zonerefs
= pgdat
->node_zonelists
[ZONELIST_FALLBACK
]._zonerefs
;
6459 nr_zones
= build_zonerefs_node(pgdat
, zonerefs
);
6460 zonerefs
+= nr_zones
;
6463 * Now we build the zonelist so that it contains the zones
6464 * of all the other nodes.
6465 * We don't want to pressure a particular node, so when
6466 * building the zones for node N, we make sure that the
6467 * zones coming right after the local ones are those from
6468 * node N+1 (modulo N)
6470 for (node
= local_node
+ 1; node
< MAX_NUMNODES
; node
++) {
6471 if (!node_online(node
))
6473 nr_zones
= build_zonerefs_node(NODE_DATA(node
), zonerefs
);
6474 zonerefs
+= nr_zones
;
6476 for (node
= 0; node
< local_node
; node
++) {
6477 if (!node_online(node
))
6479 nr_zones
= build_zonerefs_node(NODE_DATA(node
), zonerefs
);
6480 zonerefs
+= nr_zones
;
6483 zonerefs
->zone
= NULL
;
6484 zonerefs
->zone_idx
= 0;
6487 #endif /* CONFIG_NUMA */
6490 * Boot pageset table. One per cpu which is going to be used for all
6491 * zones and all nodes. The parameters will be set in such a way
6492 * that an item put on a list will immediately be handed over to
6493 * the buddy list. This is safe since pageset manipulation is done
6494 * with interrupts disabled.
6496 * The boot_pagesets must be kept even after bootup is complete for
6497 * unused processors and/or zones. They do play a role for bootstrapping
6498 * hotplugged processors.
6500 * zoneinfo_show() and maybe other functions do
6501 * not check if the processor is online before following the pageset pointer.
6502 * Other parts of the kernel may not check if the zone is available.
6504 static void per_cpu_pages_init(struct per_cpu_pages
*pcp
, struct per_cpu_zonestat
*pzstats
);
6505 /* These effectively disable the pcplists in the boot pageset completely */
6506 #define BOOT_PAGESET_HIGH 0
6507 #define BOOT_PAGESET_BATCH 1
6508 static DEFINE_PER_CPU(struct per_cpu_pages
, boot_pageset
);
6509 static DEFINE_PER_CPU(struct per_cpu_zonestat
, boot_zonestats
);
6510 DEFINE_PER_CPU(struct per_cpu_nodestat
, boot_nodestats
);
6512 static void __build_all_zonelists(void *data
)
6515 int __maybe_unused cpu
;
6516 pg_data_t
*self
= data
;
6517 static DEFINE_SPINLOCK(lock
);
6522 memset(node_load
, 0, sizeof(node_load
));
6526 * This node is hotadded and no memory is yet present. So just
6527 * building zonelists is fine - no need to touch other nodes.
6529 if (self
&& !node_online(self
->node_id
)) {
6530 build_zonelists(self
);
6533 * All possible nodes have pgdat preallocated
6536 for_each_node(nid
) {
6537 pg_data_t
*pgdat
= NODE_DATA(nid
);
6539 build_zonelists(pgdat
);
6542 #ifdef CONFIG_HAVE_MEMORYLESS_NODES
6544 * We now know the "local memory node" for each node--
6545 * i.e., the node of the first zone in the generic zonelist.
6546 * Set up numa_mem percpu variable for on-line cpus. During
6547 * boot, only the boot cpu should be on-line; we'll init the
6548 * secondary cpus' numa_mem as they come on-line. During
6549 * node/memory hotplug, we'll fixup all on-line cpus.
6551 for_each_online_cpu(cpu
)
6552 set_cpu_numa_mem(cpu
, local_memory_node(cpu_to_node(cpu
)));
6559 static noinline
void __init
6560 build_all_zonelists_init(void)
6564 __build_all_zonelists(NULL
);
6567 * Initialize the boot_pagesets that are going to be used
6568 * for bootstrapping processors. The real pagesets for
6569 * each zone will be allocated later when the per cpu
6570 * allocator is available.
6572 * boot_pagesets are used also for bootstrapping offline
6573 * cpus if the system is already booted because the pagesets
6574 * are needed to initialize allocators on a specific cpu too.
6575 * F.e. the percpu allocator needs the page allocator which
6576 * needs the percpu allocator in order to allocate its pagesets
6577 * (a chicken-egg dilemma).
6579 for_each_possible_cpu(cpu
)
6580 per_cpu_pages_init(&per_cpu(boot_pageset
, cpu
), &per_cpu(boot_zonestats
, cpu
));
6582 mminit_verify_zonelist();
6583 cpuset_init_current_mems_allowed();
6587 * unless system_state == SYSTEM_BOOTING.
6589 * __ref due to call of __init annotated helper build_all_zonelists_init
6590 * [protected by SYSTEM_BOOTING].
6592 void __ref
build_all_zonelists(pg_data_t
*pgdat
)
6594 unsigned long vm_total_pages
;
6596 if (system_state
== SYSTEM_BOOTING
) {
6597 build_all_zonelists_init();
6599 __build_all_zonelists(pgdat
);
6600 /* cpuset refresh routine should be here */
6602 /* Get the number of free pages beyond high watermark in all zones. */
6603 vm_total_pages
= nr_free_zone_pages(gfp_zone(GFP_HIGHUSER_MOVABLE
));
6605 * Disable grouping by mobility if the number of pages in the
6606 * system is too low to allow the mechanism to work. It would be
6607 * more accurate, but expensive to check per-zone. This check is
6608 * made on memory-hotadd so a system can start with mobility
6609 * disabled and enable it later
6611 if (vm_total_pages
< (pageblock_nr_pages
* MIGRATE_TYPES
))
6612 page_group_by_mobility_disabled
= 1;
6614 page_group_by_mobility_disabled
= 0;
6616 pr_info("Built %u zonelists, mobility grouping %s. Total pages: %ld\n",
6618 page_group_by_mobility_disabled
? "off" : "on",
6621 pr_info("Policy zone: %s\n", zone_names
[policy_zone
]);
6625 /* If zone is ZONE_MOVABLE but memory is mirrored, it is an overlapped init */
6626 static bool __meminit
6627 overlap_memmap_init(unsigned long zone
, unsigned long *pfn
)
6629 static struct memblock_region
*r
;
6631 if (mirrored_kernelcore
&& zone
== ZONE_MOVABLE
) {
6632 if (!r
|| *pfn
>= memblock_region_memory_end_pfn(r
)) {
6633 for_each_mem_region(r
) {
6634 if (*pfn
< memblock_region_memory_end_pfn(r
))
6638 if (*pfn
>= memblock_region_memory_base_pfn(r
) &&
6639 memblock_is_mirror(r
)) {
6640 *pfn
= memblock_region_memory_end_pfn(r
);
6648 * Initially all pages are reserved - free ones are freed
6649 * up by memblock_free_all() once the early boot process is
6650 * done. Non-atomic initialization, single-pass.
6652 * All aligned pageblocks are initialized to the specified migratetype
6653 * (usually MIGRATE_MOVABLE). Besides setting the migratetype, no related
6654 * zone stats (e.g., nr_isolate_pageblock) are touched.
6656 void __meminit
memmap_init_range(unsigned long size
, int nid
, unsigned long zone
,
6657 unsigned long start_pfn
, unsigned long zone_end_pfn
,
6658 enum meminit_context context
,
6659 struct vmem_altmap
*altmap
, int migratetype
)
6661 unsigned long pfn
, end_pfn
= start_pfn
+ size
;
6664 if (highest_memmap_pfn
< end_pfn
- 1)
6665 highest_memmap_pfn
= end_pfn
- 1;
6667 #ifdef CONFIG_ZONE_DEVICE
6669 * Honor reservation requested by the driver for this ZONE_DEVICE
6670 * memory. We limit the total number of pages to initialize to just
6671 * those that might contain the memory mapping. We will defer the
6672 * ZONE_DEVICE page initialization until after we have released
6675 if (zone
== ZONE_DEVICE
) {
6679 if (start_pfn
== altmap
->base_pfn
)
6680 start_pfn
+= altmap
->reserve
;
6681 end_pfn
= altmap
->base_pfn
+ vmem_altmap_offset(altmap
);
6685 for (pfn
= start_pfn
; pfn
< end_pfn
; ) {
6687 * There can be holes in boot-time mem_map[]s handed to this
6688 * function. They do not exist on hotplugged memory.
6690 if (context
== MEMINIT_EARLY
) {
6691 if (overlap_memmap_init(zone
, &pfn
))
6693 if (defer_init(nid
, pfn
, zone_end_pfn
))
6697 page
= pfn_to_page(pfn
);
6698 __init_single_page(page
, pfn
, zone
, nid
);
6699 if (context
== MEMINIT_HOTPLUG
)
6700 __SetPageReserved(page
);
6703 * Usually, we want to mark the pageblock MIGRATE_MOVABLE,
6704 * such that unmovable allocations won't be scattered all
6705 * over the place during system boot.
6707 if (IS_ALIGNED(pfn
, pageblock_nr_pages
)) {
6708 set_pageblock_migratetype(page
, migratetype
);
6715 #ifdef CONFIG_ZONE_DEVICE
6716 static void __ref
__init_zone_device_page(struct page
*page
, unsigned long pfn
,
6717 unsigned long zone_idx
, int nid
,
6718 struct dev_pagemap
*pgmap
)
6721 __init_single_page(page
, pfn
, zone_idx
, nid
);
6724 * Mark page reserved as it will need to wait for onlining
6725 * phase for it to be fully associated with a zone.
6727 * We can use the non-atomic __set_bit operation for setting
6728 * the flag as we are still initializing the pages.
6730 __SetPageReserved(page
);
6733 * ZONE_DEVICE pages union ->lru with a ->pgmap back pointer
6734 * and zone_device_data. It is a bug if a ZONE_DEVICE page is
6735 * ever freed or placed on a driver-private list.
6737 page
->pgmap
= pgmap
;
6738 page
->zone_device_data
= NULL
;
6741 * Mark the block movable so that blocks are reserved for
6742 * movable at startup. This will force kernel allocations
6743 * to reserve their blocks rather than leaking throughout
6744 * the address space during boot when many long-lived
6745 * kernel allocations are made.
6747 * Please note that MEMINIT_HOTPLUG path doesn't clear memmap
6748 * because this is done early in section_activate()
6750 if (IS_ALIGNED(pfn
, pageblock_nr_pages
)) {
6751 set_pageblock_migratetype(page
, MIGRATE_MOVABLE
);
6757 * With compound page geometry and when struct pages are stored in ram most
6758 * tail pages are reused. Consequently, the amount of unique struct pages to
6759 * initialize is a lot smaller that the total amount of struct pages being
6760 * mapped. This is a paired / mild layering violation with explicit knowledge
6761 * of how the sparse_vmemmap internals handle compound pages in the lack
6762 * of an altmap. See vmemmap_populate_compound_pages().
6764 static inline unsigned long compound_nr_pages(struct vmem_altmap
*altmap
,
6765 unsigned long nr_pages
)
6767 return is_power_of_2(sizeof(struct page
)) &&
6768 !altmap
? 2 * (PAGE_SIZE
/ sizeof(struct page
)) : nr_pages
;
6771 static void __ref
memmap_init_compound(struct page
*head
,
6772 unsigned long head_pfn
,
6773 unsigned long zone_idx
, int nid
,
6774 struct dev_pagemap
*pgmap
,
6775 unsigned long nr_pages
)
6777 unsigned long pfn
, end_pfn
= head_pfn
+ nr_pages
;
6778 unsigned int order
= pgmap
->vmemmap_shift
;
6780 __SetPageHead(head
);
6781 for (pfn
= head_pfn
+ 1; pfn
< end_pfn
; pfn
++) {
6782 struct page
*page
= pfn_to_page(pfn
);
6784 __init_zone_device_page(page
, pfn
, zone_idx
, nid
, pgmap
);
6785 prep_compound_tail(head
, pfn
- head_pfn
);
6786 set_page_count(page
, 0);
6789 * The first tail page stores compound_mapcount_ptr() and
6790 * compound_order() and the second tail page stores
6791 * compound_pincount_ptr(). Call prep_compound_head() after
6792 * the first and second tail pages have been initialized to
6793 * not have the data overwritten.
6795 if (pfn
== head_pfn
+ 2)
6796 prep_compound_head(head
, order
);
6800 void __ref
memmap_init_zone_device(struct zone
*zone
,
6801 unsigned long start_pfn
,
6802 unsigned long nr_pages
,
6803 struct dev_pagemap
*pgmap
)
6805 unsigned long pfn
, end_pfn
= start_pfn
+ nr_pages
;
6806 struct pglist_data
*pgdat
= zone
->zone_pgdat
;
6807 struct vmem_altmap
*altmap
= pgmap_altmap(pgmap
);
6808 unsigned int pfns_per_compound
= pgmap_vmemmap_nr(pgmap
);
6809 unsigned long zone_idx
= zone_idx(zone
);
6810 unsigned long start
= jiffies
;
6811 int nid
= pgdat
->node_id
;
6813 if (WARN_ON_ONCE(!pgmap
|| zone_idx(zone
) != ZONE_DEVICE
))
6817 * The call to memmap_init should have already taken care
6818 * of the pages reserved for the memmap, so we can just jump to
6819 * the end of that region and start processing the device pages.
6822 start_pfn
= altmap
->base_pfn
+ vmem_altmap_offset(altmap
);
6823 nr_pages
= end_pfn
- start_pfn
;
6826 for (pfn
= start_pfn
; pfn
< end_pfn
; pfn
+= pfns_per_compound
) {
6827 struct page
*page
= pfn_to_page(pfn
);
6829 __init_zone_device_page(page
, pfn
, zone_idx
, nid
, pgmap
);
6831 if (pfns_per_compound
== 1)
6834 memmap_init_compound(page
, pfn
, zone_idx
, nid
, pgmap
,
6835 compound_nr_pages(altmap
, pfns_per_compound
));
6838 pr_info("%s initialised %lu pages in %ums\n", __func__
,
6839 nr_pages
, jiffies_to_msecs(jiffies
- start
));
6843 static void __meminit
zone_init_free_lists(struct zone
*zone
)
6845 unsigned int order
, t
;
6846 for_each_migratetype_order(order
, t
) {
6847 INIT_LIST_HEAD(&zone
->free_area
[order
].free_list
[t
]);
6848 zone
->free_area
[order
].nr_free
= 0;
6853 * Only struct pages that correspond to ranges defined by memblock.memory
6854 * are zeroed and initialized by going through __init_single_page() during
6855 * memmap_init_zone_range().
6857 * But, there could be struct pages that correspond to holes in
6858 * memblock.memory. This can happen because of the following reasons:
6859 * - physical memory bank size is not necessarily the exact multiple of the
6860 * arbitrary section size
6861 * - early reserved memory may not be listed in memblock.memory
6862 * - memory layouts defined with memmap= kernel parameter may not align
6863 * nicely with memmap sections
6865 * Explicitly initialize those struct pages so that:
6866 * - PG_Reserved is set
6867 * - zone and node links point to zone and node that span the page if the
6868 * hole is in the middle of a zone
6869 * - zone and node links point to adjacent zone/node if the hole falls on
6870 * the zone boundary; the pages in such holes will be prepended to the
6871 * zone/node above the hole except for the trailing pages in the last
6872 * section that will be appended to the zone/node below.
6874 static void __init
init_unavailable_range(unsigned long spfn
,
6881 for (pfn
= spfn
; pfn
< epfn
; pfn
++) {
6882 if (!pfn_valid(ALIGN_DOWN(pfn
, pageblock_nr_pages
))) {
6883 pfn
= ALIGN_DOWN(pfn
, pageblock_nr_pages
)
6884 + pageblock_nr_pages
- 1;
6887 __init_single_page(pfn_to_page(pfn
), pfn
, zone
, node
);
6888 __SetPageReserved(pfn_to_page(pfn
));
6893 pr_info("On node %d, zone %s: %lld pages in unavailable ranges",
6894 node
, zone_names
[zone
], pgcnt
);
6897 static void __init
memmap_init_zone_range(struct zone
*zone
,
6898 unsigned long start_pfn
,
6899 unsigned long end_pfn
,
6900 unsigned long *hole_pfn
)
6902 unsigned long zone_start_pfn
= zone
->zone_start_pfn
;
6903 unsigned long zone_end_pfn
= zone_start_pfn
+ zone
->spanned_pages
;
6904 int nid
= zone_to_nid(zone
), zone_id
= zone_idx(zone
);
6906 start_pfn
= clamp(start_pfn
, zone_start_pfn
, zone_end_pfn
);
6907 end_pfn
= clamp(end_pfn
, zone_start_pfn
, zone_end_pfn
);
6909 if (start_pfn
>= end_pfn
)
6912 memmap_init_range(end_pfn
- start_pfn
, nid
, zone_id
, start_pfn
,
6913 zone_end_pfn
, MEMINIT_EARLY
, NULL
, MIGRATE_MOVABLE
);
6915 if (*hole_pfn
< start_pfn
)
6916 init_unavailable_range(*hole_pfn
, start_pfn
, zone_id
, nid
);
6918 *hole_pfn
= end_pfn
;
6921 static void __init
memmap_init(void)
6923 unsigned long start_pfn
, end_pfn
;
6924 unsigned long hole_pfn
= 0;
6925 int i
, j
, zone_id
= 0, nid
;
6927 for_each_mem_pfn_range(i
, MAX_NUMNODES
, &start_pfn
, &end_pfn
, &nid
) {
6928 struct pglist_data
*node
= NODE_DATA(nid
);
6930 for (j
= 0; j
< MAX_NR_ZONES
; j
++) {
6931 struct zone
*zone
= node
->node_zones
+ j
;
6933 if (!populated_zone(zone
))
6936 memmap_init_zone_range(zone
, start_pfn
, end_pfn
,
6942 #ifdef CONFIG_SPARSEMEM
6944 * Initialize the memory map for hole in the range [memory_end,
6946 * Append the pages in this hole to the highest zone in the last
6948 * The call to init_unavailable_range() is outside the ifdef to
6949 * silence the compiler warining about zone_id set but not used;
6950 * for FLATMEM it is a nop anyway
6952 end_pfn
= round_up(end_pfn
, PAGES_PER_SECTION
);
6953 if (hole_pfn
< end_pfn
)
6955 init_unavailable_range(hole_pfn
, end_pfn
, zone_id
, nid
);
6958 void __init
*memmap_alloc(phys_addr_t size
, phys_addr_t align
,
6959 phys_addr_t min_addr
, int nid
, bool exact_nid
)
6964 ptr
= memblock_alloc_exact_nid_raw(size
, align
, min_addr
,
6965 MEMBLOCK_ALLOC_ACCESSIBLE
,
6968 ptr
= memblock_alloc_try_nid_raw(size
, align
, min_addr
,
6969 MEMBLOCK_ALLOC_ACCESSIBLE
,
6972 if (ptr
&& size
> 0)
6973 page_init_poison(ptr
, size
);
6978 static int zone_batchsize(struct zone
*zone
)
6984 * The number of pages to batch allocate is either ~0.1%
6985 * of the zone or 1MB, whichever is smaller. The batch
6986 * size is striking a balance between allocation latency
6987 * and zone lock contention.
6989 batch
= min(zone_managed_pages(zone
) >> 10, (1024 * 1024) / PAGE_SIZE
);
6990 batch
/= 4; /* We effectively *= 4 below */
6995 * Clamp the batch to a 2^n - 1 value. Having a power
6996 * of 2 value was found to be more likely to have
6997 * suboptimal cache aliasing properties in some cases.
6999 * For example if 2 tasks are alternately allocating
7000 * batches of pages, one task can end up with a lot
7001 * of pages of one half of the possible page colors
7002 * and the other with pages of the other colors.
7004 batch
= rounddown_pow_of_two(batch
+ batch
/2) - 1;
7009 /* The deferral and batching of frees should be suppressed under NOMMU
7012 * The problem is that NOMMU needs to be able to allocate large chunks
7013 * of contiguous memory as there's no hardware page translation to
7014 * assemble apparent contiguous memory from discontiguous pages.
7016 * Queueing large contiguous runs of pages for batching, however,
7017 * causes the pages to actually be freed in smaller chunks. As there
7018 * can be a significant delay between the individual batches being
7019 * recycled, this leads to the once large chunks of space being
7020 * fragmented and becoming unavailable for high-order allocations.
7026 static int zone_highsize(struct zone
*zone
, int batch
, int cpu_online
)
7031 unsigned long total_pages
;
7033 if (!percpu_pagelist_high_fraction
) {
7035 * By default, the high value of the pcp is based on the zone
7036 * low watermark so that if they are full then background
7037 * reclaim will not be started prematurely.
7039 total_pages
= low_wmark_pages(zone
);
7042 * If percpu_pagelist_high_fraction is configured, the high
7043 * value is based on a fraction of the managed pages in the
7046 total_pages
= zone_managed_pages(zone
) / percpu_pagelist_high_fraction
;
7050 * Split the high value across all online CPUs local to the zone. Note
7051 * that early in boot that CPUs may not be online yet and that during
7052 * CPU hotplug that the cpumask is not yet updated when a CPU is being
7053 * onlined. For memory nodes that have no CPUs, split pcp->high across
7054 * all online CPUs to mitigate the risk that reclaim is triggered
7055 * prematurely due to pages stored on pcp lists.
7057 nr_split_cpus
= cpumask_weight(cpumask_of_node(zone_to_nid(zone
))) + cpu_online
;
7059 nr_split_cpus
= num_online_cpus();
7060 high
= total_pages
/ nr_split_cpus
;
7063 * Ensure high is at least batch*4. The multiple is based on the
7064 * historical relationship between high and batch.
7066 high
= max(high
, batch
<< 2);
7075 * pcp->high and pcp->batch values are related and generally batch is lower
7076 * than high. They are also related to pcp->count such that count is lower
7077 * than high, and as soon as it reaches high, the pcplist is flushed.
7079 * However, guaranteeing these relations at all times would require e.g. write
7080 * barriers here but also careful usage of read barriers at the read side, and
7081 * thus be prone to error and bad for performance. Thus the update only prevents
7082 * store tearing. Any new users of pcp->batch and pcp->high should ensure they
7083 * can cope with those fields changing asynchronously, and fully trust only the
7084 * pcp->count field on the local CPU with interrupts disabled.
7086 * mutex_is_locked(&pcp_batch_high_lock) required when calling this function
7087 * outside of boot time (or some other assurance that no concurrent updaters
7090 static void pageset_update(struct per_cpu_pages
*pcp
, unsigned long high
,
7091 unsigned long batch
)
7093 WRITE_ONCE(pcp
->batch
, batch
);
7094 WRITE_ONCE(pcp
->high
, high
);
7097 static void per_cpu_pages_init(struct per_cpu_pages
*pcp
, struct per_cpu_zonestat
*pzstats
)
7101 memset(pcp
, 0, sizeof(*pcp
));
7102 memset(pzstats
, 0, sizeof(*pzstats
));
7104 spin_lock_init(&pcp
->lock
);
7105 for (pindex
= 0; pindex
< NR_PCP_LISTS
; pindex
++)
7106 INIT_LIST_HEAD(&pcp
->lists
[pindex
]);
7109 * Set batch and high values safe for a boot pageset. A true percpu
7110 * pageset's initialization will update them subsequently. Here we don't
7111 * need to be as careful as pageset_update() as nobody can access the
7114 pcp
->high
= BOOT_PAGESET_HIGH
;
7115 pcp
->batch
= BOOT_PAGESET_BATCH
;
7116 pcp
->free_factor
= 0;
7119 static void __zone_set_pageset_high_and_batch(struct zone
*zone
, unsigned long high
,
7120 unsigned long batch
)
7122 struct per_cpu_pages
*pcp
;
7125 for_each_possible_cpu(cpu
) {
7126 pcp
= per_cpu_ptr(zone
->per_cpu_pageset
, cpu
);
7127 pageset_update(pcp
, high
, batch
);
7132 * Calculate and set new high and batch values for all per-cpu pagesets of a
7133 * zone based on the zone's size.
7135 static void zone_set_pageset_high_and_batch(struct zone
*zone
, int cpu_online
)
7137 int new_high
, new_batch
;
7139 new_batch
= max(1, zone_batchsize(zone
));
7140 new_high
= zone_highsize(zone
, new_batch
, cpu_online
);
7142 if (zone
->pageset_high
== new_high
&&
7143 zone
->pageset_batch
== new_batch
)
7146 zone
->pageset_high
= new_high
;
7147 zone
->pageset_batch
= new_batch
;
7149 __zone_set_pageset_high_and_batch(zone
, new_high
, new_batch
);
7152 void __meminit
setup_zone_pageset(struct zone
*zone
)
7156 /* Size may be 0 on !SMP && !NUMA */
7157 if (sizeof(struct per_cpu_zonestat
) > 0)
7158 zone
->per_cpu_zonestats
= alloc_percpu(struct per_cpu_zonestat
);
7160 zone
->per_cpu_pageset
= alloc_percpu(struct per_cpu_pages
);
7161 for_each_possible_cpu(cpu
) {
7162 struct per_cpu_pages
*pcp
;
7163 struct per_cpu_zonestat
*pzstats
;
7165 pcp
= per_cpu_ptr(zone
->per_cpu_pageset
, cpu
);
7166 pzstats
= per_cpu_ptr(zone
->per_cpu_zonestats
, cpu
);
7167 per_cpu_pages_init(pcp
, pzstats
);
7170 zone_set_pageset_high_and_batch(zone
, 0);
7174 * Allocate per cpu pagesets and initialize them.
7175 * Before this call only boot pagesets were available.
7177 void __init
setup_per_cpu_pageset(void)
7179 struct pglist_data
*pgdat
;
7181 int __maybe_unused cpu
;
7183 for_each_populated_zone(zone
)
7184 setup_zone_pageset(zone
);
7188 * Unpopulated zones continue using the boot pagesets.
7189 * The numa stats for these pagesets need to be reset.
7190 * Otherwise, they will end up skewing the stats of
7191 * the nodes these zones are associated with.
7193 for_each_possible_cpu(cpu
) {
7194 struct per_cpu_zonestat
*pzstats
= &per_cpu(boot_zonestats
, cpu
);
7195 memset(pzstats
->vm_numa_event
, 0,
7196 sizeof(pzstats
->vm_numa_event
));
7200 for_each_online_pgdat(pgdat
)
7201 pgdat
->per_cpu_nodestats
=
7202 alloc_percpu(struct per_cpu_nodestat
);
7205 static __meminit
void zone_pcp_init(struct zone
*zone
)
7208 * per cpu subsystem is not up at this point. The following code
7209 * relies on the ability of the linker to provide the
7210 * offset of a (static) per cpu variable into the per cpu area.
7212 zone
->per_cpu_pageset
= &boot_pageset
;
7213 zone
->per_cpu_zonestats
= &boot_zonestats
;
7214 zone
->pageset_high
= BOOT_PAGESET_HIGH
;
7215 zone
->pageset_batch
= BOOT_PAGESET_BATCH
;
7217 if (populated_zone(zone
))
7218 pr_debug(" %s zone: %lu pages, LIFO batch:%u\n", zone
->name
,
7219 zone
->present_pages
, zone_batchsize(zone
));
7222 void __meminit
init_currently_empty_zone(struct zone
*zone
,
7223 unsigned long zone_start_pfn
,
7226 struct pglist_data
*pgdat
= zone
->zone_pgdat
;
7227 int zone_idx
= zone_idx(zone
) + 1;
7229 if (zone_idx
> pgdat
->nr_zones
)
7230 pgdat
->nr_zones
= zone_idx
;
7232 zone
->zone_start_pfn
= zone_start_pfn
;
7234 mminit_dprintk(MMINIT_TRACE
, "memmap_init",
7235 "Initialising map node %d zone %lu pfns %lu -> %lu\n",
7237 (unsigned long)zone_idx(zone
),
7238 zone_start_pfn
, (zone_start_pfn
+ size
));
7240 zone_init_free_lists(zone
);
7241 zone
->initialized
= 1;
7245 * get_pfn_range_for_nid - Return the start and end page frames for a node
7246 * @nid: The nid to return the range for. If MAX_NUMNODES, the min and max PFN are returned.
7247 * @start_pfn: Passed by reference. On return, it will have the node start_pfn.
7248 * @end_pfn: Passed by reference. On return, it will have the node end_pfn.
7250 * It returns the start and end page frame of a node based on information
7251 * provided by memblock_set_node(). If called for a node
7252 * with no available memory, a warning is printed and the start and end
7255 void __init
get_pfn_range_for_nid(unsigned int nid
,
7256 unsigned long *start_pfn
, unsigned long *end_pfn
)
7258 unsigned long this_start_pfn
, this_end_pfn
;
7264 for_each_mem_pfn_range(i
, nid
, &this_start_pfn
, &this_end_pfn
, NULL
) {
7265 *start_pfn
= min(*start_pfn
, this_start_pfn
);
7266 *end_pfn
= max(*end_pfn
, this_end_pfn
);
7269 if (*start_pfn
== -1UL)
7274 * This finds a zone that can be used for ZONE_MOVABLE pages. The
7275 * assumption is made that zones within a node are ordered in monotonic
7276 * increasing memory addresses so that the "highest" populated zone is used
7278 static void __init
find_usable_zone_for_movable(void)
7281 for (zone_index
= MAX_NR_ZONES
- 1; zone_index
>= 0; zone_index
--) {
7282 if (zone_index
== ZONE_MOVABLE
)
7285 if (arch_zone_highest_possible_pfn
[zone_index
] >
7286 arch_zone_lowest_possible_pfn
[zone_index
])
7290 VM_BUG_ON(zone_index
== -1);
7291 movable_zone
= zone_index
;
7295 * The zone ranges provided by the architecture do not include ZONE_MOVABLE
7296 * because it is sized independent of architecture. Unlike the other zones,
7297 * the starting point for ZONE_MOVABLE is not fixed. It may be different
7298 * in each node depending on the size of each node and how evenly kernelcore
7299 * is distributed. This helper function adjusts the zone ranges
7300 * provided by the architecture for a given node by using the end of the
7301 * highest usable zone for ZONE_MOVABLE. This preserves the assumption that
7302 * zones within a node are in order of monotonic increases memory addresses
7304 static void __init
adjust_zone_range_for_zone_movable(int nid
,
7305 unsigned long zone_type
,
7306 unsigned long node_start_pfn
,
7307 unsigned long node_end_pfn
,
7308 unsigned long *zone_start_pfn
,
7309 unsigned long *zone_end_pfn
)
7311 /* Only adjust if ZONE_MOVABLE is on this node */
7312 if (zone_movable_pfn
[nid
]) {
7313 /* Size ZONE_MOVABLE */
7314 if (zone_type
== ZONE_MOVABLE
) {
7315 *zone_start_pfn
= zone_movable_pfn
[nid
];
7316 *zone_end_pfn
= min(node_end_pfn
,
7317 arch_zone_highest_possible_pfn
[movable_zone
]);
7319 /* Adjust for ZONE_MOVABLE starting within this range */
7320 } else if (!mirrored_kernelcore
&&
7321 *zone_start_pfn
< zone_movable_pfn
[nid
] &&
7322 *zone_end_pfn
> zone_movable_pfn
[nid
]) {
7323 *zone_end_pfn
= zone_movable_pfn
[nid
];
7325 /* Check if this whole range is within ZONE_MOVABLE */
7326 } else if (*zone_start_pfn
>= zone_movable_pfn
[nid
])
7327 *zone_start_pfn
= *zone_end_pfn
;
7332 * Return the number of pages a zone spans in a node, including holes
7333 * present_pages = zone_spanned_pages_in_node() - zone_absent_pages_in_node()
7335 static unsigned long __init
zone_spanned_pages_in_node(int nid
,
7336 unsigned long zone_type
,
7337 unsigned long node_start_pfn
,
7338 unsigned long node_end_pfn
,
7339 unsigned long *zone_start_pfn
,
7340 unsigned long *zone_end_pfn
)
7342 unsigned long zone_low
= arch_zone_lowest_possible_pfn
[zone_type
];
7343 unsigned long zone_high
= arch_zone_highest_possible_pfn
[zone_type
];
7344 /* When hotadd a new node from cpu_up(), the node should be empty */
7345 if (!node_start_pfn
&& !node_end_pfn
)
7348 /* Get the start and end of the zone */
7349 *zone_start_pfn
= clamp(node_start_pfn
, zone_low
, zone_high
);
7350 *zone_end_pfn
= clamp(node_end_pfn
, zone_low
, zone_high
);
7351 adjust_zone_range_for_zone_movable(nid
, zone_type
,
7352 node_start_pfn
, node_end_pfn
,
7353 zone_start_pfn
, zone_end_pfn
);
7355 /* Check that this node has pages within the zone's required range */
7356 if (*zone_end_pfn
< node_start_pfn
|| *zone_start_pfn
> node_end_pfn
)
7359 /* Move the zone boundaries inside the node if necessary */
7360 *zone_end_pfn
= min(*zone_end_pfn
, node_end_pfn
);
7361 *zone_start_pfn
= max(*zone_start_pfn
, node_start_pfn
);
7363 /* Return the spanned pages */
7364 return *zone_end_pfn
- *zone_start_pfn
;
7368 * Return the number of holes in a range on a node. If nid is MAX_NUMNODES,
7369 * then all holes in the requested range will be accounted for.
7371 unsigned long __init
__absent_pages_in_range(int nid
,
7372 unsigned long range_start_pfn
,
7373 unsigned long range_end_pfn
)
7375 unsigned long nr_absent
= range_end_pfn
- range_start_pfn
;
7376 unsigned long start_pfn
, end_pfn
;
7379 for_each_mem_pfn_range(i
, nid
, &start_pfn
, &end_pfn
, NULL
) {
7380 start_pfn
= clamp(start_pfn
, range_start_pfn
, range_end_pfn
);
7381 end_pfn
= clamp(end_pfn
, range_start_pfn
, range_end_pfn
);
7382 nr_absent
-= end_pfn
- start_pfn
;
7388 * absent_pages_in_range - Return number of page frames in holes within a range
7389 * @start_pfn: The start PFN to start searching for holes
7390 * @end_pfn: The end PFN to stop searching for holes
7392 * Return: the number of pages frames in memory holes within a range.
7394 unsigned long __init
absent_pages_in_range(unsigned long start_pfn
,
7395 unsigned long end_pfn
)
7397 return __absent_pages_in_range(MAX_NUMNODES
, start_pfn
, end_pfn
);
7400 /* Return the number of page frames in holes in a zone on a node */
7401 static unsigned long __init
zone_absent_pages_in_node(int nid
,
7402 unsigned long zone_type
,
7403 unsigned long node_start_pfn
,
7404 unsigned long node_end_pfn
)
7406 unsigned long zone_low
= arch_zone_lowest_possible_pfn
[zone_type
];
7407 unsigned long zone_high
= arch_zone_highest_possible_pfn
[zone_type
];
7408 unsigned long zone_start_pfn
, zone_end_pfn
;
7409 unsigned long nr_absent
;
7411 /* When hotadd a new node from cpu_up(), the node should be empty */
7412 if (!node_start_pfn
&& !node_end_pfn
)
7415 zone_start_pfn
= clamp(node_start_pfn
, zone_low
, zone_high
);
7416 zone_end_pfn
= clamp(node_end_pfn
, zone_low
, zone_high
);
7418 adjust_zone_range_for_zone_movable(nid
, zone_type
,
7419 node_start_pfn
, node_end_pfn
,
7420 &zone_start_pfn
, &zone_end_pfn
);
7421 nr_absent
= __absent_pages_in_range(nid
, zone_start_pfn
, zone_end_pfn
);
7424 * ZONE_MOVABLE handling.
7425 * Treat pages to be ZONE_MOVABLE in ZONE_NORMAL as absent pages
7428 if (mirrored_kernelcore
&& zone_movable_pfn
[nid
]) {
7429 unsigned long start_pfn
, end_pfn
;
7430 struct memblock_region
*r
;
7432 for_each_mem_region(r
) {
7433 start_pfn
= clamp(memblock_region_memory_base_pfn(r
),
7434 zone_start_pfn
, zone_end_pfn
);
7435 end_pfn
= clamp(memblock_region_memory_end_pfn(r
),
7436 zone_start_pfn
, zone_end_pfn
);
7438 if (zone_type
== ZONE_MOVABLE
&&
7439 memblock_is_mirror(r
))
7440 nr_absent
+= end_pfn
- start_pfn
;
7442 if (zone_type
== ZONE_NORMAL
&&
7443 !memblock_is_mirror(r
))
7444 nr_absent
+= end_pfn
- start_pfn
;
7451 static void __init
calculate_node_totalpages(struct pglist_data
*pgdat
,
7452 unsigned long node_start_pfn
,
7453 unsigned long node_end_pfn
)
7455 unsigned long realtotalpages
= 0, totalpages
= 0;
7458 for (i
= 0; i
< MAX_NR_ZONES
; i
++) {
7459 struct zone
*zone
= pgdat
->node_zones
+ i
;
7460 unsigned long zone_start_pfn
, zone_end_pfn
;
7461 unsigned long spanned
, absent
;
7462 unsigned long size
, real_size
;
7464 spanned
= zone_spanned_pages_in_node(pgdat
->node_id
, i
,
7469 absent
= zone_absent_pages_in_node(pgdat
->node_id
, i
,
7474 real_size
= size
- absent
;
7477 zone
->zone_start_pfn
= zone_start_pfn
;
7479 zone
->zone_start_pfn
= 0;
7480 zone
->spanned_pages
= size
;
7481 zone
->present_pages
= real_size
;
7482 #if defined(CONFIG_MEMORY_HOTPLUG)
7483 zone
->present_early_pages
= real_size
;
7487 realtotalpages
+= real_size
;
7490 pgdat
->node_spanned_pages
= totalpages
;
7491 pgdat
->node_present_pages
= realtotalpages
;
7492 pr_debug("On node %d totalpages: %lu\n", pgdat
->node_id
, realtotalpages
);
7495 #ifndef CONFIG_SPARSEMEM
7497 * Calculate the size of the zone->blockflags rounded to an unsigned long
7498 * Start by making sure zonesize is a multiple of pageblock_order by rounding
7499 * up. Then use 1 NR_PAGEBLOCK_BITS worth of bits per pageblock, finally
7500 * round what is now in bits to nearest long in bits, then return it in
7503 static unsigned long __init
usemap_size(unsigned long zone_start_pfn
, unsigned long zonesize
)
7505 unsigned long usemapsize
;
7507 zonesize
+= zone_start_pfn
& (pageblock_nr_pages
-1);
7508 usemapsize
= roundup(zonesize
, pageblock_nr_pages
);
7509 usemapsize
= usemapsize
>> pageblock_order
;
7510 usemapsize
*= NR_PAGEBLOCK_BITS
;
7511 usemapsize
= roundup(usemapsize
, 8 * sizeof(unsigned long));
7513 return usemapsize
/ 8;
7516 static void __ref
setup_usemap(struct zone
*zone
)
7518 unsigned long usemapsize
= usemap_size(zone
->zone_start_pfn
,
7519 zone
->spanned_pages
);
7520 zone
->pageblock_flags
= NULL
;
7522 zone
->pageblock_flags
=
7523 memblock_alloc_node(usemapsize
, SMP_CACHE_BYTES
,
7525 if (!zone
->pageblock_flags
)
7526 panic("Failed to allocate %ld bytes for zone %s pageblock flags on node %d\n",
7527 usemapsize
, zone
->name
, zone_to_nid(zone
));
7531 static inline void setup_usemap(struct zone
*zone
) {}
7532 #endif /* CONFIG_SPARSEMEM */
7534 #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
7536 /* Initialise the number of pages represented by NR_PAGEBLOCK_BITS */
7537 void __init
set_pageblock_order(void)
7539 unsigned int order
= MAX_ORDER
- 1;
7541 /* Check that pageblock_nr_pages has not already been setup */
7542 if (pageblock_order
)
7545 /* Don't let pageblocks exceed the maximum allocation granularity. */
7546 if (HPAGE_SHIFT
> PAGE_SHIFT
&& HUGETLB_PAGE_ORDER
< order
)
7547 order
= HUGETLB_PAGE_ORDER
;
7550 * Assume the largest contiguous order of interest is a huge page.
7551 * This value may be variable depending on boot parameters on IA64 and
7554 pageblock_order
= order
;
7556 #else /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
7559 * When CONFIG_HUGETLB_PAGE_SIZE_VARIABLE is not set, set_pageblock_order()
7560 * is unused as pageblock_order is set at compile-time. See
7561 * include/linux/pageblock-flags.h for the values of pageblock_order based on
7564 void __init
set_pageblock_order(void)
7568 #endif /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
7570 static unsigned long __init
calc_memmap_size(unsigned long spanned_pages
,
7571 unsigned long present_pages
)
7573 unsigned long pages
= spanned_pages
;
7576 * Provide a more accurate estimation if there are holes within
7577 * the zone and SPARSEMEM is in use. If there are holes within the
7578 * zone, each populated memory region may cost us one or two extra
7579 * memmap pages due to alignment because memmap pages for each
7580 * populated regions may not be naturally aligned on page boundary.
7581 * So the (present_pages >> 4) heuristic is a tradeoff for that.
7583 if (spanned_pages
> present_pages
+ (present_pages
>> 4) &&
7584 IS_ENABLED(CONFIG_SPARSEMEM
))
7585 pages
= present_pages
;
7587 return PAGE_ALIGN(pages
* sizeof(struct page
)) >> PAGE_SHIFT
;
7590 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
7591 static void pgdat_init_split_queue(struct pglist_data
*pgdat
)
7593 struct deferred_split
*ds_queue
= &pgdat
->deferred_split_queue
;
7595 spin_lock_init(&ds_queue
->split_queue_lock
);
7596 INIT_LIST_HEAD(&ds_queue
->split_queue
);
7597 ds_queue
->split_queue_len
= 0;
7600 static void pgdat_init_split_queue(struct pglist_data
*pgdat
) {}
7603 #ifdef CONFIG_COMPACTION
7604 static void pgdat_init_kcompactd(struct pglist_data
*pgdat
)
7606 init_waitqueue_head(&pgdat
->kcompactd_wait
);
7609 static void pgdat_init_kcompactd(struct pglist_data
*pgdat
) {}
7612 static void __meminit
pgdat_init_internals(struct pglist_data
*pgdat
)
7616 pgdat_resize_init(pgdat
);
7618 pgdat_init_split_queue(pgdat
);
7619 pgdat_init_kcompactd(pgdat
);
7621 init_waitqueue_head(&pgdat
->kswapd_wait
);
7622 init_waitqueue_head(&pgdat
->pfmemalloc_wait
);
7624 for (i
= 0; i
< NR_VMSCAN_THROTTLE
; i
++)
7625 init_waitqueue_head(&pgdat
->reclaim_wait
[i
]);
7627 pgdat_page_ext_init(pgdat
);
7628 lruvec_init(&pgdat
->__lruvec
);
7631 static void __meminit
zone_init_internals(struct zone
*zone
, enum zone_type idx
, int nid
,
7632 unsigned long remaining_pages
)
7634 atomic_long_set(&zone
->managed_pages
, remaining_pages
);
7635 zone_set_nid(zone
, nid
);
7636 zone
->name
= zone_names
[idx
];
7637 zone
->zone_pgdat
= NODE_DATA(nid
);
7638 spin_lock_init(&zone
->lock
);
7639 zone_seqlock_init(zone
);
7640 zone_pcp_init(zone
);
7644 * Set up the zone data structures
7645 * - init pgdat internals
7646 * - init all zones belonging to this node
7648 * NOTE: this function is only called during memory hotplug
7650 #ifdef CONFIG_MEMORY_HOTPLUG
7651 void __ref
free_area_init_core_hotplug(struct pglist_data
*pgdat
)
7653 int nid
= pgdat
->node_id
;
7657 pgdat_init_internals(pgdat
);
7659 if (pgdat
->per_cpu_nodestats
== &boot_nodestats
)
7660 pgdat
->per_cpu_nodestats
= alloc_percpu(struct per_cpu_nodestat
);
7663 * Reset the nr_zones, order and highest_zoneidx before reuse.
7664 * Note that kswapd will init kswapd_highest_zoneidx properly
7665 * when it starts in the near future.
7667 pgdat
->nr_zones
= 0;
7668 pgdat
->kswapd_order
= 0;
7669 pgdat
->kswapd_highest_zoneidx
= 0;
7670 pgdat
->node_start_pfn
= 0;
7671 for_each_online_cpu(cpu
) {
7672 struct per_cpu_nodestat
*p
;
7674 p
= per_cpu_ptr(pgdat
->per_cpu_nodestats
, cpu
);
7675 memset(p
, 0, sizeof(*p
));
7678 for (z
= 0; z
< MAX_NR_ZONES
; z
++)
7679 zone_init_internals(&pgdat
->node_zones
[z
], z
, nid
, 0);
7684 * Set up the zone data structures:
7685 * - mark all pages reserved
7686 * - mark all memory queues empty
7687 * - clear the memory bitmaps
7689 * NOTE: pgdat should get zeroed by caller.
7690 * NOTE: this function is only called during early init.
7692 static void __init
free_area_init_core(struct pglist_data
*pgdat
)
7695 int nid
= pgdat
->node_id
;
7697 pgdat_init_internals(pgdat
);
7698 pgdat
->per_cpu_nodestats
= &boot_nodestats
;
7700 for (j
= 0; j
< MAX_NR_ZONES
; j
++) {
7701 struct zone
*zone
= pgdat
->node_zones
+ j
;
7702 unsigned long size
, freesize
, memmap_pages
;
7704 size
= zone
->spanned_pages
;
7705 freesize
= zone
->present_pages
;
7708 * Adjust freesize so that it accounts for how much memory
7709 * is used by this zone for memmap. This affects the watermark
7710 * and per-cpu initialisations
7712 memmap_pages
= calc_memmap_size(size
, freesize
);
7713 if (!is_highmem_idx(j
)) {
7714 if (freesize
>= memmap_pages
) {
7715 freesize
-= memmap_pages
;
7717 pr_debug(" %s zone: %lu pages used for memmap\n",
7718 zone_names
[j
], memmap_pages
);
7720 pr_warn(" %s zone: %lu memmap pages exceeds freesize %lu\n",
7721 zone_names
[j
], memmap_pages
, freesize
);
7724 /* Account for reserved pages */
7725 if (j
== 0 && freesize
> dma_reserve
) {
7726 freesize
-= dma_reserve
;
7727 pr_debug(" %s zone: %lu pages reserved\n", zone_names
[0], dma_reserve
);
7730 if (!is_highmem_idx(j
))
7731 nr_kernel_pages
+= freesize
;
7732 /* Charge for highmem memmap if there are enough kernel pages */
7733 else if (nr_kernel_pages
> memmap_pages
* 2)
7734 nr_kernel_pages
-= memmap_pages
;
7735 nr_all_pages
+= freesize
;
7738 * Set an approximate value for lowmem here, it will be adjusted
7739 * when the bootmem allocator frees pages into the buddy system.
7740 * And all highmem pages will be managed by the buddy system.
7742 zone_init_internals(zone
, j
, nid
, freesize
);
7747 set_pageblock_order();
7749 init_currently_empty_zone(zone
, zone
->zone_start_pfn
, size
);
7753 #ifdef CONFIG_FLATMEM
7754 static void __init
alloc_node_mem_map(struct pglist_data
*pgdat
)
7756 unsigned long __maybe_unused start
= 0;
7757 unsigned long __maybe_unused offset
= 0;
7759 /* Skip empty nodes */
7760 if (!pgdat
->node_spanned_pages
)
7763 start
= pgdat
->node_start_pfn
& ~(MAX_ORDER_NR_PAGES
- 1);
7764 offset
= pgdat
->node_start_pfn
- start
;
7765 /* ia64 gets its own node_mem_map, before this, without bootmem */
7766 if (!pgdat
->node_mem_map
) {
7767 unsigned long size
, end
;
7771 * The zone's endpoints aren't required to be MAX_ORDER
7772 * aligned but the node_mem_map endpoints must be in order
7773 * for the buddy allocator to function correctly.
7775 end
= pgdat_end_pfn(pgdat
);
7776 end
= ALIGN(end
, MAX_ORDER_NR_PAGES
);
7777 size
= (end
- start
) * sizeof(struct page
);
7778 map
= memmap_alloc(size
, SMP_CACHE_BYTES
, MEMBLOCK_LOW_LIMIT
,
7779 pgdat
->node_id
, false);
7781 panic("Failed to allocate %ld bytes for node %d memory map\n",
7782 size
, pgdat
->node_id
);
7783 pgdat
->node_mem_map
= map
+ offset
;
7785 pr_debug("%s: node %d, pgdat %08lx, node_mem_map %08lx\n",
7786 __func__
, pgdat
->node_id
, (unsigned long)pgdat
,
7787 (unsigned long)pgdat
->node_mem_map
);
7790 * With no DISCONTIG, the global mem_map is just set as node 0's
7792 if (pgdat
== NODE_DATA(0)) {
7793 mem_map
= NODE_DATA(0)->node_mem_map
;
7794 if (page_to_pfn(mem_map
) != pgdat
->node_start_pfn
)
7800 static inline void alloc_node_mem_map(struct pglist_data
*pgdat
) { }
7801 #endif /* CONFIG_FLATMEM */
7803 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
7804 static inline void pgdat_set_deferred_range(pg_data_t
*pgdat
)
7806 pgdat
->first_deferred_pfn
= ULONG_MAX
;
7809 static inline void pgdat_set_deferred_range(pg_data_t
*pgdat
) {}
7812 static void __init
free_area_init_node(int nid
)
7814 pg_data_t
*pgdat
= NODE_DATA(nid
);
7815 unsigned long start_pfn
= 0;
7816 unsigned long end_pfn
= 0;
7818 /* pg_data_t should be reset to zero when it's allocated */
7819 WARN_ON(pgdat
->nr_zones
|| pgdat
->kswapd_highest_zoneidx
);
7821 get_pfn_range_for_nid(nid
, &start_pfn
, &end_pfn
);
7823 pgdat
->node_id
= nid
;
7824 pgdat
->node_start_pfn
= start_pfn
;
7825 pgdat
->per_cpu_nodestats
= NULL
;
7827 if (start_pfn
!= end_pfn
) {
7828 pr_info("Initmem setup node %d [mem %#018Lx-%#018Lx]\n", nid
,
7829 (u64
)start_pfn
<< PAGE_SHIFT
,
7830 end_pfn
? ((u64
)end_pfn
<< PAGE_SHIFT
) - 1 : 0);
7832 pr_info("Initmem setup node %d as memoryless\n", nid
);
7835 calculate_node_totalpages(pgdat
, start_pfn
, end_pfn
);
7837 alloc_node_mem_map(pgdat
);
7838 pgdat_set_deferred_range(pgdat
);
7840 free_area_init_core(pgdat
);
7843 static void __init
free_area_init_memoryless_node(int nid
)
7845 free_area_init_node(nid
);
7848 #if MAX_NUMNODES > 1
7850 * Figure out the number of possible node ids.
7852 void __init
setup_nr_node_ids(void)
7854 unsigned int highest
;
7856 highest
= find_last_bit(node_possible_map
.bits
, MAX_NUMNODES
);
7857 nr_node_ids
= highest
+ 1;
7862 * node_map_pfn_alignment - determine the maximum internode alignment
7864 * This function should be called after node map is populated and sorted.
7865 * It calculates the maximum power of two alignment which can distinguish
7868 * For example, if all nodes are 1GiB and aligned to 1GiB, the return value
7869 * would indicate 1GiB alignment with (1 << (30 - PAGE_SHIFT)). If the
7870 * nodes are shifted by 256MiB, 256MiB. Note that if only the last node is
7871 * shifted, 1GiB is enough and this function will indicate so.
7873 * This is used to test whether pfn -> nid mapping of the chosen memory
7874 * model has fine enough granularity to avoid incorrect mapping for the
7875 * populated node map.
7877 * Return: the determined alignment in pfn's. 0 if there is no alignment
7878 * requirement (single node).
7880 unsigned long __init
node_map_pfn_alignment(void)
7882 unsigned long accl_mask
= 0, last_end
= 0;
7883 unsigned long start
, end
, mask
;
7884 int last_nid
= NUMA_NO_NODE
;
7887 for_each_mem_pfn_range(i
, MAX_NUMNODES
, &start
, &end
, &nid
) {
7888 if (!start
|| last_nid
< 0 || last_nid
== nid
) {
7895 * Start with a mask granular enough to pin-point to the
7896 * start pfn and tick off bits one-by-one until it becomes
7897 * too coarse to separate the current node from the last.
7899 mask
= ~((1 << __ffs(start
)) - 1);
7900 while (mask
&& last_end
<= (start
& (mask
<< 1)))
7903 /* accumulate all internode masks */
7907 /* convert mask to number of pages */
7908 return ~accl_mask
+ 1;
7912 * find_min_pfn_with_active_regions - Find the minimum PFN registered
7914 * Return: the minimum PFN based on information provided via
7915 * memblock_set_node().
7917 unsigned long __init
find_min_pfn_with_active_regions(void)
7919 return PHYS_PFN(memblock_start_of_DRAM());
7923 * early_calculate_totalpages()
7924 * Sum pages in active regions for movable zone.
7925 * Populate N_MEMORY for calculating usable_nodes.
7927 static unsigned long __init
early_calculate_totalpages(void)
7929 unsigned long totalpages
= 0;
7930 unsigned long start_pfn
, end_pfn
;
7933 for_each_mem_pfn_range(i
, MAX_NUMNODES
, &start_pfn
, &end_pfn
, &nid
) {
7934 unsigned long pages
= end_pfn
- start_pfn
;
7936 totalpages
+= pages
;
7938 node_set_state(nid
, N_MEMORY
);
7944 * Find the PFN the Movable zone begins in each node. Kernel memory
7945 * is spread evenly between nodes as long as the nodes have enough
7946 * memory. When they don't, some nodes will have more kernelcore than
7949 static void __init
find_zone_movable_pfns_for_nodes(void)
7952 unsigned long usable_startpfn
;
7953 unsigned long kernelcore_node
, kernelcore_remaining
;
7954 /* save the state before borrow the nodemask */
7955 nodemask_t saved_node_state
= node_states
[N_MEMORY
];
7956 unsigned long totalpages
= early_calculate_totalpages();
7957 int usable_nodes
= nodes_weight(node_states
[N_MEMORY
]);
7958 struct memblock_region
*r
;
7960 /* Need to find movable_zone earlier when movable_node is specified. */
7961 find_usable_zone_for_movable();
7964 * If movable_node is specified, ignore kernelcore and movablecore
7967 if (movable_node_is_enabled()) {
7968 for_each_mem_region(r
) {
7969 if (!memblock_is_hotpluggable(r
))
7972 nid
= memblock_get_region_node(r
);
7974 usable_startpfn
= PFN_DOWN(r
->base
);
7975 zone_movable_pfn
[nid
] = zone_movable_pfn
[nid
] ?
7976 min(usable_startpfn
, zone_movable_pfn
[nid
]) :
7984 * If kernelcore=mirror is specified, ignore movablecore option
7986 if (mirrored_kernelcore
) {
7987 bool mem_below_4gb_not_mirrored
= false;
7989 for_each_mem_region(r
) {
7990 if (memblock_is_mirror(r
))
7993 nid
= memblock_get_region_node(r
);
7995 usable_startpfn
= memblock_region_memory_base_pfn(r
);
7997 if (usable_startpfn
< PHYS_PFN(SZ_4G
)) {
7998 mem_below_4gb_not_mirrored
= true;
8002 zone_movable_pfn
[nid
] = zone_movable_pfn
[nid
] ?
8003 min(usable_startpfn
, zone_movable_pfn
[nid
]) :
8007 if (mem_below_4gb_not_mirrored
)
8008 pr_warn("This configuration results in unmirrored kernel memory.\n");
8014 * If kernelcore=nn% or movablecore=nn% was specified, calculate the
8015 * amount of necessary memory.
8017 if (required_kernelcore_percent
)
8018 required_kernelcore
= (totalpages
* 100 * required_kernelcore_percent
) /
8020 if (required_movablecore_percent
)
8021 required_movablecore
= (totalpages
* 100 * required_movablecore_percent
) /
8025 * If movablecore= was specified, calculate what size of
8026 * kernelcore that corresponds so that memory usable for
8027 * any allocation type is evenly spread. If both kernelcore
8028 * and movablecore are specified, then the value of kernelcore
8029 * will be used for required_kernelcore if it's greater than
8030 * what movablecore would have allowed.
8032 if (required_movablecore
) {
8033 unsigned long corepages
;
8036 * Round-up so that ZONE_MOVABLE is at least as large as what
8037 * was requested by the user
8039 required_movablecore
=
8040 roundup(required_movablecore
, MAX_ORDER_NR_PAGES
);
8041 required_movablecore
= min(totalpages
, required_movablecore
);
8042 corepages
= totalpages
- required_movablecore
;
8044 required_kernelcore
= max(required_kernelcore
, corepages
);
8048 * If kernelcore was not specified or kernelcore size is larger
8049 * than totalpages, there is no ZONE_MOVABLE.
8051 if (!required_kernelcore
|| required_kernelcore
>= totalpages
)
8054 /* usable_startpfn is the lowest possible pfn ZONE_MOVABLE can be at */
8055 usable_startpfn
= arch_zone_lowest_possible_pfn
[movable_zone
];
8058 /* Spread kernelcore memory as evenly as possible throughout nodes */
8059 kernelcore_node
= required_kernelcore
/ usable_nodes
;
8060 for_each_node_state(nid
, N_MEMORY
) {
8061 unsigned long start_pfn
, end_pfn
;
8064 * Recalculate kernelcore_node if the division per node
8065 * now exceeds what is necessary to satisfy the requested
8066 * amount of memory for the kernel
8068 if (required_kernelcore
< kernelcore_node
)
8069 kernelcore_node
= required_kernelcore
/ usable_nodes
;
8072 * As the map is walked, we track how much memory is usable
8073 * by the kernel using kernelcore_remaining. When it is
8074 * 0, the rest of the node is usable by ZONE_MOVABLE
8076 kernelcore_remaining
= kernelcore_node
;
8078 /* Go through each range of PFNs within this node */
8079 for_each_mem_pfn_range(i
, nid
, &start_pfn
, &end_pfn
, NULL
) {
8080 unsigned long size_pages
;
8082 start_pfn
= max(start_pfn
, zone_movable_pfn
[nid
]);
8083 if (start_pfn
>= end_pfn
)
8086 /* Account for what is only usable for kernelcore */
8087 if (start_pfn
< usable_startpfn
) {
8088 unsigned long kernel_pages
;
8089 kernel_pages
= min(end_pfn
, usable_startpfn
)
8092 kernelcore_remaining
-= min(kernel_pages
,
8093 kernelcore_remaining
);
8094 required_kernelcore
-= min(kernel_pages
,
8095 required_kernelcore
);
8097 /* Continue if range is now fully accounted */
8098 if (end_pfn
<= usable_startpfn
) {
8101 * Push zone_movable_pfn to the end so
8102 * that if we have to rebalance
8103 * kernelcore across nodes, we will
8104 * not double account here
8106 zone_movable_pfn
[nid
] = end_pfn
;
8109 start_pfn
= usable_startpfn
;
8113 * The usable PFN range for ZONE_MOVABLE is from
8114 * start_pfn->end_pfn. Calculate size_pages as the
8115 * number of pages used as kernelcore
8117 size_pages
= end_pfn
- start_pfn
;
8118 if (size_pages
> kernelcore_remaining
)
8119 size_pages
= kernelcore_remaining
;
8120 zone_movable_pfn
[nid
] = start_pfn
+ size_pages
;
8123 * Some kernelcore has been met, update counts and
8124 * break if the kernelcore for this node has been
8127 required_kernelcore
-= min(required_kernelcore
,
8129 kernelcore_remaining
-= size_pages
;
8130 if (!kernelcore_remaining
)
8136 * If there is still required_kernelcore, we do another pass with one
8137 * less node in the count. This will push zone_movable_pfn[nid] further
8138 * along on the nodes that still have memory until kernelcore is
8142 if (usable_nodes
&& required_kernelcore
> usable_nodes
)
8146 /* Align start of ZONE_MOVABLE on all nids to MAX_ORDER_NR_PAGES */
8147 for (nid
= 0; nid
< MAX_NUMNODES
; nid
++) {
8148 unsigned long start_pfn
, end_pfn
;
8150 zone_movable_pfn
[nid
] =
8151 roundup(zone_movable_pfn
[nid
], MAX_ORDER_NR_PAGES
);
8153 get_pfn_range_for_nid(nid
, &start_pfn
, &end_pfn
);
8154 if (zone_movable_pfn
[nid
] >= end_pfn
)
8155 zone_movable_pfn
[nid
] = 0;
8159 /* restore the node_state */
8160 node_states
[N_MEMORY
] = saved_node_state
;
8163 /* Any regular or high memory on that node ? */
8164 static void check_for_memory(pg_data_t
*pgdat
, int nid
)
8166 enum zone_type zone_type
;
8168 for (zone_type
= 0; zone_type
<= ZONE_MOVABLE
- 1; zone_type
++) {
8169 struct zone
*zone
= &pgdat
->node_zones
[zone_type
];
8170 if (populated_zone(zone
)) {
8171 if (IS_ENABLED(CONFIG_HIGHMEM
))
8172 node_set_state(nid
, N_HIGH_MEMORY
);
8173 if (zone_type
<= ZONE_NORMAL
)
8174 node_set_state(nid
, N_NORMAL_MEMORY
);
8181 * Some architectures, e.g. ARC may have ZONE_HIGHMEM below ZONE_NORMAL. For
8182 * such cases we allow max_zone_pfn sorted in the descending order
8184 bool __weak
arch_has_descending_max_zone_pfns(void)
8190 * free_area_init - Initialise all pg_data_t and zone data
8191 * @max_zone_pfn: an array of max PFNs for each zone
8193 * This will call free_area_init_node() for each active node in the system.
8194 * Using the page ranges provided by memblock_set_node(), the size of each
8195 * zone in each node and their holes is calculated. If the maximum PFN
8196 * between two adjacent zones match, it is assumed that the zone is empty.
8197 * For example, if arch_max_dma_pfn == arch_max_dma32_pfn, it is assumed
8198 * that arch_max_dma32_pfn has no pages. It is also assumed that a zone
8199 * starts where the previous one ended. For example, ZONE_DMA32 starts
8200 * at arch_max_dma_pfn.
8202 void __init
free_area_init(unsigned long *max_zone_pfn
)
8204 unsigned long start_pfn
, end_pfn
;
8208 /* Record where the zone boundaries are */
8209 memset(arch_zone_lowest_possible_pfn
, 0,
8210 sizeof(arch_zone_lowest_possible_pfn
));
8211 memset(arch_zone_highest_possible_pfn
, 0,
8212 sizeof(arch_zone_highest_possible_pfn
));
8214 start_pfn
= find_min_pfn_with_active_regions();
8215 descending
= arch_has_descending_max_zone_pfns();
8217 for (i
= 0; i
< MAX_NR_ZONES
; i
++) {
8219 zone
= MAX_NR_ZONES
- i
- 1;
8223 if (zone
== ZONE_MOVABLE
)
8226 end_pfn
= max(max_zone_pfn
[zone
], start_pfn
);
8227 arch_zone_lowest_possible_pfn
[zone
] = start_pfn
;
8228 arch_zone_highest_possible_pfn
[zone
] = end_pfn
;
8230 start_pfn
= end_pfn
;
8233 /* Find the PFNs that ZONE_MOVABLE begins at in each node */
8234 memset(zone_movable_pfn
, 0, sizeof(zone_movable_pfn
));
8235 find_zone_movable_pfns_for_nodes();
8237 /* Print out the zone ranges */
8238 pr_info("Zone ranges:\n");
8239 for (i
= 0; i
< MAX_NR_ZONES
; i
++) {
8240 if (i
== ZONE_MOVABLE
)
8242 pr_info(" %-8s ", zone_names
[i
]);
8243 if (arch_zone_lowest_possible_pfn
[i
] ==
8244 arch_zone_highest_possible_pfn
[i
])
8247 pr_cont("[mem %#018Lx-%#018Lx]\n",
8248 (u64
)arch_zone_lowest_possible_pfn
[i
]
8250 ((u64
)arch_zone_highest_possible_pfn
[i
]
8251 << PAGE_SHIFT
) - 1);
8254 /* Print out the PFNs ZONE_MOVABLE begins at in each node */
8255 pr_info("Movable zone start for each node\n");
8256 for (i
= 0; i
< MAX_NUMNODES
; i
++) {
8257 if (zone_movable_pfn
[i
])
8258 pr_info(" Node %d: %#018Lx\n", i
,
8259 (u64
)zone_movable_pfn
[i
] << PAGE_SHIFT
);
8263 * Print out the early node map, and initialize the
8264 * subsection-map relative to active online memory ranges to
8265 * enable future "sub-section" extensions of the memory map.
8267 pr_info("Early memory node ranges\n");
8268 for_each_mem_pfn_range(i
, MAX_NUMNODES
, &start_pfn
, &end_pfn
, &nid
) {
8269 pr_info(" node %3d: [mem %#018Lx-%#018Lx]\n", nid
,
8270 (u64
)start_pfn
<< PAGE_SHIFT
,
8271 ((u64
)end_pfn
<< PAGE_SHIFT
) - 1);
8272 subsection_map_init(start_pfn
, end_pfn
- start_pfn
);
8275 /* Initialise every node */
8276 mminit_verify_pageflags_layout();
8277 setup_nr_node_ids();
8278 for_each_node(nid
) {
8281 if (!node_online(nid
)) {
8282 pr_info("Initializing node %d as memoryless\n", nid
);
8284 /* Allocator not initialized yet */
8285 pgdat
= arch_alloc_nodedata(nid
);
8287 pr_err("Cannot allocate %zuB for node %d.\n",
8288 sizeof(*pgdat
), nid
);
8291 arch_refresh_nodedata(nid
, pgdat
);
8292 free_area_init_memoryless_node(nid
);
8295 * We do not want to confuse userspace by sysfs
8296 * files/directories for node without any memory
8297 * attached to it, so this node is not marked as
8298 * N_MEMORY and not marked online so that no sysfs
8299 * hierarchy will be created via register_one_node for
8300 * it. The pgdat will get fully initialized by
8301 * hotadd_init_pgdat() when memory is hotplugged into
8307 pgdat
= NODE_DATA(nid
);
8308 free_area_init_node(nid
);
8310 /* Any memory on that node */
8311 if (pgdat
->node_present_pages
)
8312 node_set_state(nid
, N_MEMORY
);
8313 check_for_memory(pgdat
, nid
);
8319 static int __init
cmdline_parse_core(char *p
, unsigned long *core
,
8320 unsigned long *percent
)
8322 unsigned long long coremem
;
8328 /* Value may be a percentage of total memory, otherwise bytes */
8329 coremem
= simple_strtoull(p
, &endptr
, 0);
8330 if (*endptr
== '%') {
8331 /* Paranoid check for percent values greater than 100 */
8332 WARN_ON(coremem
> 100);
8336 coremem
= memparse(p
, &p
);
8337 /* Paranoid check that UL is enough for the coremem value */
8338 WARN_ON((coremem
>> PAGE_SHIFT
) > ULONG_MAX
);
8340 *core
= coremem
>> PAGE_SHIFT
;
8347 * kernelcore=size sets the amount of memory for use for allocations that
8348 * cannot be reclaimed or migrated.
8350 static int __init
cmdline_parse_kernelcore(char *p
)
8352 /* parse kernelcore=mirror */
8353 if (parse_option_str(p
, "mirror")) {
8354 mirrored_kernelcore
= true;
8358 return cmdline_parse_core(p
, &required_kernelcore
,
8359 &required_kernelcore_percent
);
8363 * movablecore=size sets the amount of memory for use for allocations that
8364 * can be reclaimed or migrated.
8366 static int __init
cmdline_parse_movablecore(char *p
)
8368 return cmdline_parse_core(p
, &required_movablecore
,
8369 &required_movablecore_percent
);
8372 early_param("kernelcore", cmdline_parse_kernelcore
);
8373 early_param("movablecore", cmdline_parse_movablecore
);
8375 void adjust_managed_page_count(struct page
*page
, long count
)
8377 atomic_long_add(count
, &page_zone(page
)->managed_pages
);
8378 totalram_pages_add(count
);
8379 #ifdef CONFIG_HIGHMEM
8380 if (PageHighMem(page
))
8381 totalhigh_pages_add(count
);
8384 EXPORT_SYMBOL(adjust_managed_page_count
);
8386 unsigned long free_reserved_area(void *start
, void *end
, int poison
, const char *s
)
8389 unsigned long pages
= 0;
8391 start
= (void *)PAGE_ALIGN((unsigned long)start
);
8392 end
= (void *)((unsigned long)end
& PAGE_MASK
);
8393 for (pos
= start
; pos
< end
; pos
+= PAGE_SIZE
, pages
++) {
8394 struct page
*page
= virt_to_page(pos
);
8395 void *direct_map_addr
;
8398 * 'direct_map_addr' might be different from 'pos'
8399 * because some architectures' virt_to_page()
8400 * work with aliases. Getting the direct map
8401 * address ensures that we get a _writeable_
8402 * alias for the memset().
8404 direct_map_addr
= page_address(page
);
8406 * Perform a kasan-unchecked memset() since this memory
8407 * has not been initialized.
8409 direct_map_addr
= kasan_reset_tag(direct_map_addr
);
8410 if ((unsigned int)poison
<= 0xFF)
8411 memset(direct_map_addr
, poison
, PAGE_SIZE
);
8413 free_reserved_page(page
);
8417 pr_info("Freeing %s memory: %ldK\n", s
, K(pages
));
8422 void __init
mem_init_print_info(void)
8424 unsigned long physpages
, codesize
, datasize
, rosize
, bss_size
;
8425 unsigned long init_code_size
, init_data_size
;
8427 physpages
= get_num_physpages();
8428 codesize
= _etext
- _stext
;
8429 datasize
= _edata
- _sdata
;
8430 rosize
= __end_rodata
- __start_rodata
;
8431 bss_size
= __bss_stop
- __bss_start
;
8432 init_data_size
= __init_end
- __init_begin
;
8433 init_code_size
= _einittext
- _sinittext
;
8436 * Detect special cases and adjust section sizes accordingly:
8437 * 1) .init.* may be embedded into .data sections
8438 * 2) .init.text.* may be out of [__init_begin, __init_end],
8439 * please refer to arch/tile/kernel/vmlinux.lds.S.
8440 * 3) .rodata.* may be embedded into .text or .data sections.
8442 #define adj_init_size(start, end, size, pos, adj) \
8444 if (&start[0] <= &pos[0] && &pos[0] < &end[0] && size > adj) \
8448 adj_init_size(__init_begin
, __init_end
, init_data_size
,
8449 _sinittext
, init_code_size
);
8450 adj_init_size(_stext
, _etext
, codesize
, _sinittext
, init_code_size
);
8451 adj_init_size(_sdata
, _edata
, datasize
, __init_begin
, init_data_size
);
8452 adj_init_size(_stext
, _etext
, codesize
, __start_rodata
, rosize
);
8453 adj_init_size(_sdata
, _edata
, datasize
, __start_rodata
, rosize
);
8455 #undef adj_init_size
8457 pr_info("Memory: %luK/%luK available (%luK kernel code, %luK rwdata, %luK rodata, %luK init, %luK bss, %luK reserved, %luK cma-reserved"
8458 #ifdef CONFIG_HIGHMEM
8462 K(nr_free_pages()), K(physpages
),
8463 codesize
>> 10, datasize
>> 10, rosize
>> 10,
8464 (init_data_size
+ init_code_size
) >> 10, bss_size
>> 10,
8465 K(physpages
- totalram_pages() - totalcma_pages
),
8467 #ifdef CONFIG_HIGHMEM
8468 , K(totalhigh_pages())
8474 * set_dma_reserve - set the specified number of pages reserved in the first zone
8475 * @new_dma_reserve: The number of pages to mark reserved
8477 * The per-cpu batchsize and zone watermarks are determined by managed_pages.
8478 * In the DMA zone, a significant percentage may be consumed by kernel image
8479 * and other unfreeable allocations which can skew the watermarks badly. This
8480 * function may optionally be used to account for unfreeable pages in the
8481 * first zone (e.g., ZONE_DMA). The effect will be lower watermarks and
8482 * smaller per-cpu batchsize.
8484 void __init
set_dma_reserve(unsigned long new_dma_reserve
)
8486 dma_reserve
= new_dma_reserve
;
8489 static int page_alloc_cpu_dead(unsigned int cpu
)
8493 lru_add_drain_cpu(cpu
);
8494 mlock_page_drain_remote(cpu
);
8498 * Spill the event counters of the dead processor
8499 * into the current processors event counters.
8500 * This artificially elevates the count of the current
8503 vm_events_fold_cpu(cpu
);
8506 * Zero the differential counters of the dead processor
8507 * so that the vm statistics are consistent.
8509 * This is only okay since the processor is dead and cannot
8510 * race with what we are doing.
8512 cpu_vm_stats_fold(cpu
);
8514 for_each_populated_zone(zone
)
8515 zone_pcp_update(zone
, 0);
8520 static int page_alloc_cpu_online(unsigned int cpu
)
8524 for_each_populated_zone(zone
)
8525 zone_pcp_update(zone
, 1);
8530 int hashdist
= HASHDIST_DEFAULT
;
8532 static int __init
set_hashdist(char *str
)
8536 hashdist
= simple_strtoul(str
, &str
, 0);
8539 __setup("hashdist=", set_hashdist
);
8542 void __init
page_alloc_init(void)
8547 if (num_node_state(N_MEMORY
) == 1)
8551 ret
= cpuhp_setup_state_nocalls(CPUHP_PAGE_ALLOC
,
8552 "mm/page_alloc:pcp",
8553 page_alloc_cpu_online
,
8554 page_alloc_cpu_dead
);
8559 * calculate_totalreserve_pages - called when sysctl_lowmem_reserve_ratio
8560 * or min_free_kbytes changes.
8562 static void calculate_totalreserve_pages(void)
8564 struct pglist_data
*pgdat
;
8565 unsigned long reserve_pages
= 0;
8566 enum zone_type i
, j
;
8568 for_each_online_pgdat(pgdat
) {
8570 pgdat
->totalreserve_pages
= 0;
8572 for (i
= 0; i
< MAX_NR_ZONES
; i
++) {
8573 struct zone
*zone
= pgdat
->node_zones
+ i
;
8575 unsigned long managed_pages
= zone_managed_pages(zone
);
8577 /* Find valid and maximum lowmem_reserve in the zone */
8578 for (j
= i
; j
< MAX_NR_ZONES
; j
++) {
8579 if (zone
->lowmem_reserve
[j
] > max
)
8580 max
= zone
->lowmem_reserve
[j
];
8583 /* we treat the high watermark as reserved pages. */
8584 max
+= high_wmark_pages(zone
);
8586 if (max
> managed_pages
)
8587 max
= managed_pages
;
8589 pgdat
->totalreserve_pages
+= max
;
8591 reserve_pages
+= max
;
8594 totalreserve_pages
= reserve_pages
;
8598 * setup_per_zone_lowmem_reserve - called whenever
8599 * sysctl_lowmem_reserve_ratio changes. Ensures that each zone
8600 * has a correct pages reserved value, so an adequate number of
8601 * pages are left in the zone after a successful __alloc_pages().
8603 static void setup_per_zone_lowmem_reserve(void)
8605 struct pglist_data
*pgdat
;
8606 enum zone_type i
, j
;
8608 for_each_online_pgdat(pgdat
) {
8609 for (i
= 0; i
< MAX_NR_ZONES
- 1; i
++) {
8610 struct zone
*zone
= &pgdat
->node_zones
[i
];
8611 int ratio
= sysctl_lowmem_reserve_ratio
[i
];
8612 bool clear
= !ratio
|| !zone_managed_pages(zone
);
8613 unsigned long managed_pages
= 0;
8615 for (j
= i
+ 1; j
< MAX_NR_ZONES
; j
++) {
8616 struct zone
*upper_zone
= &pgdat
->node_zones
[j
];
8618 managed_pages
+= zone_managed_pages(upper_zone
);
8621 zone
->lowmem_reserve
[j
] = 0;
8623 zone
->lowmem_reserve
[j
] = managed_pages
/ ratio
;
8628 /* update totalreserve_pages */
8629 calculate_totalreserve_pages();
8632 static void __setup_per_zone_wmarks(void)
8634 unsigned long pages_min
= min_free_kbytes
>> (PAGE_SHIFT
- 10);
8635 unsigned long lowmem_pages
= 0;
8637 unsigned long flags
;
8639 /* Calculate total number of !ZONE_HIGHMEM pages */
8640 for_each_zone(zone
) {
8641 if (!is_highmem(zone
))
8642 lowmem_pages
+= zone_managed_pages(zone
);
8645 for_each_zone(zone
) {
8648 spin_lock_irqsave(&zone
->lock
, flags
);
8649 tmp
= (u64
)pages_min
* zone_managed_pages(zone
);
8650 do_div(tmp
, lowmem_pages
);
8651 if (is_highmem(zone
)) {
8653 * __GFP_HIGH and PF_MEMALLOC allocations usually don't
8654 * need highmem pages, so cap pages_min to a small
8657 * The WMARK_HIGH-WMARK_LOW and (WMARK_LOW-WMARK_MIN)
8658 * deltas control async page reclaim, and so should
8659 * not be capped for highmem.
8661 unsigned long min_pages
;
8663 min_pages
= zone_managed_pages(zone
) / 1024;
8664 min_pages
= clamp(min_pages
, SWAP_CLUSTER_MAX
, 128UL);
8665 zone
->_watermark
[WMARK_MIN
] = min_pages
;
8668 * If it's a lowmem zone, reserve a number of pages
8669 * proportionate to the zone's size.
8671 zone
->_watermark
[WMARK_MIN
] = tmp
;
8675 * Set the kswapd watermarks distance according to the
8676 * scale factor in proportion to available memory, but
8677 * ensure a minimum size on small systems.
8679 tmp
= max_t(u64
, tmp
>> 2,
8680 mult_frac(zone_managed_pages(zone
),
8681 watermark_scale_factor
, 10000));
8683 zone
->watermark_boost
= 0;
8684 zone
->_watermark
[WMARK_LOW
] = min_wmark_pages(zone
) + tmp
;
8685 zone
->_watermark
[WMARK_HIGH
] = low_wmark_pages(zone
) + tmp
;
8686 zone
->_watermark
[WMARK_PROMO
] = high_wmark_pages(zone
) + tmp
;
8688 spin_unlock_irqrestore(&zone
->lock
, flags
);
8691 /* update totalreserve_pages */
8692 calculate_totalreserve_pages();
8696 * setup_per_zone_wmarks - called when min_free_kbytes changes
8697 * or when memory is hot-{added|removed}
8699 * Ensures that the watermark[min,low,high] values for each zone are set
8700 * correctly with respect to min_free_kbytes.
8702 void setup_per_zone_wmarks(void)
8705 static DEFINE_SPINLOCK(lock
);
8708 __setup_per_zone_wmarks();
8712 * The watermark size have changed so update the pcpu batch
8713 * and high limits or the limits may be inappropriate.
8716 zone_pcp_update(zone
, 0);
8720 * Initialise min_free_kbytes.
8722 * For small machines we want it small (128k min). For large machines
8723 * we want it large (256MB max). But it is not linear, because network
8724 * bandwidth does not increase linearly with machine size. We use
8726 * min_free_kbytes = 4 * sqrt(lowmem_kbytes), for better accuracy:
8727 * min_free_kbytes = sqrt(lowmem_kbytes * 16)
8743 void calculate_min_free_kbytes(void)
8745 unsigned long lowmem_kbytes
;
8746 int new_min_free_kbytes
;
8748 lowmem_kbytes
= nr_free_buffer_pages() * (PAGE_SIZE
>> 10);
8749 new_min_free_kbytes
= int_sqrt(lowmem_kbytes
* 16);
8751 if (new_min_free_kbytes
> user_min_free_kbytes
)
8752 min_free_kbytes
= clamp(new_min_free_kbytes
, 128, 262144);
8754 pr_warn("min_free_kbytes is not updated to %d because user defined value %d is preferred\n",
8755 new_min_free_kbytes
, user_min_free_kbytes
);
8759 int __meminit
init_per_zone_wmark_min(void)
8761 calculate_min_free_kbytes();
8762 setup_per_zone_wmarks();
8763 refresh_zone_stat_thresholds();
8764 setup_per_zone_lowmem_reserve();
8767 setup_min_unmapped_ratio();
8768 setup_min_slab_ratio();
8771 khugepaged_min_free_kbytes_update();
8775 postcore_initcall(init_per_zone_wmark_min
)
8778 * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so
8779 * that we can call two helper functions whenever min_free_kbytes
8782 int min_free_kbytes_sysctl_handler(struct ctl_table
*table
, int write
,
8783 void *buffer
, size_t *length
, loff_t
*ppos
)
8787 rc
= proc_dointvec_minmax(table
, write
, buffer
, length
, ppos
);
8792 user_min_free_kbytes
= min_free_kbytes
;
8793 setup_per_zone_wmarks();
8798 int watermark_scale_factor_sysctl_handler(struct ctl_table
*table
, int write
,
8799 void *buffer
, size_t *length
, loff_t
*ppos
)
8803 rc
= proc_dointvec_minmax(table
, write
, buffer
, length
, ppos
);
8808 setup_per_zone_wmarks();
8814 static void setup_min_unmapped_ratio(void)
8819 for_each_online_pgdat(pgdat
)
8820 pgdat
->min_unmapped_pages
= 0;
8823 zone
->zone_pgdat
->min_unmapped_pages
+= (zone_managed_pages(zone
) *
8824 sysctl_min_unmapped_ratio
) / 100;
8828 int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table
*table
, int write
,
8829 void *buffer
, size_t *length
, loff_t
*ppos
)
8833 rc
= proc_dointvec_minmax(table
, write
, buffer
, length
, ppos
);
8837 setup_min_unmapped_ratio();
8842 static void setup_min_slab_ratio(void)
8847 for_each_online_pgdat(pgdat
)
8848 pgdat
->min_slab_pages
= 0;
8851 zone
->zone_pgdat
->min_slab_pages
+= (zone_managed_pages(zone
) *
8852 sysctl_min_slab_ratio
) / 100;
8855 int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table
*table
, int write
,
8856 void *buffer
, size_t *length
, loff_t
*ppos
)
8860 rc
= proc_dointvec_minmax(table
, write
, buffer
, length
, ppos
);
8864 setup_min_slab_ratio();
8871 * lowmem_reserve_ratio_sysctl_handler - just a wrapper around
8872 * proc_dointvec() so that we can call setup_per_zone_lowmem_reserve()
8873 * whenever sysctl_lowmem_reserve_ratio changes.
8875 * The reserve ratio obviously has absolutely no relation with the
8876 * minimum watermarks. The lowmem reserve ratio can only make sense
8877 * if in function of the boot time zone sizes.
8879 int lowmem_reserve_ratio_sysctl_handler(struct ctl_table
*table
, int write
,
8880 void *buffer
, size_t *length
, loff_t
*ppos
)
8884 proc_dointvec_minmax(table
, write
, buffer
, length
, ppos
);
8886 for (i
= 0; i
< MAX_NR_ZONES
; i
++) {
8887 if (sysctl_lowmem_reserve_ratio
[i
] < 1)
8888 sysctl_lowmem_reserve_ratio
[i
] = 0;
8891 setup_per_zone_lowmem_reserve();
8896 * percpu_pagelist_high_fraction - changes the pcp->high for each zone on each
8897 * cpu. It is the fraction of total pages in each zone that a hot per cpu
8898 * pagelist can have before it gets flushed back to buddy allocator.
8900 int percpu_pagelist_high_fraction_sysctl_handler(struct ctl_table
*table
,
8901 int write
, void *buffer
, size_t *length
, loff_t
*ppos
)
8904 int old_percpu_pagelist_high_fraction
;
8907 mutex_lock(&pcp_batch_high_lock
);
8908 old_percpu_pagelist_high_fraction
= percpu_pagelist_high_fraction
;
8910 ret
= proc_dointvec_minmax(table
, write
, buffer
, length
, ppos
);
8911 if (!write
|| ret
< 0)
8914 /* Sanity checking to avoid pcp imbalance */
8915 if (percpu_pagelist_high_fraction
&&
8916 percpu_pagelist_high_fraction
< MIN_PERCPU_PAGELIST_HIGH_FRACTION
) {
8917 percpu_pagelist_high_fraction
= old_percpu_pagelist_high_fraction
;
8923 if (percpu_pagelist_high_fraction
== old_percpu_pagelist_high_fraction
)
8926 for_each_populated_zone(zone
)
8927 zone_set_pageset_high_and_batch(zone
, 0);
8929 mutex_unlock(&pcp_batch_high_lock
);
8933 #ifndef __HAVE_ARCH_RESERVED_KERNEL_PAGES
8935 * Returns the number of pages that arch has reserved but
8936 * is not known to alloc_large_system_hash().
8938 static unsigned long __init
arch_reserved_kernel_pages(void)
8945 * Adaptive scale is meant to reduce sizes of hash tables on large memory
8946 * machines. As memory size is increased the scale is also increased but at
8947 * slower pace. Starting from ADAPT_SCALE_BASE (64G), every time memory
8948 * quadruples the scale is increased by one, which means the size of hash table
8949 * only doubles, instead of quadrupling as well.
8950 * Because 32-bit systems cannot have large physical memory, where this scaling
8951 * makes sense, it is disabled on such platforms.
8953 #if __BITS_PER_LONG > 32
8954 #define ADAPT_SCALE_BASE (64ul << 30)
8955 #define ADAPT_SCALE_SHIFT 2
8956 #define ADAPT_SCALE_NPAGES (ADAPT_SCALE_BASE >> PAGE_SHIFT)
8960 * allocate a large system hash table from bootmem
8961 * - it is assumed that the hash table must contain an exact power-of-2
8962 * quantity of entries
8963 * - limit is the number of hash buckets, not the total allocation size
8965 void *__init
alloc_large_system_hash(const char *tablename
,
8966 unsigned long bucketsize
,
8967 unsigned long numentries
,
8970 unsigned int *_hash_shift
,
8971 unsigned int *_hash_mask
,
8972 unsigned long low_limit
,
8973 unsigned long high_limit
)
8975 unsigned long long max
= high_limit
;
8976 unsigned long log2qty
, size
;
8982 /* allow the kernel cmdline to have a say */
8984 /* round applicable memory size up to nearest megabyte */
8985 numentries
= nr_kernel_pages
;
8986 numentries
-= arch_reserved_kernel_pages();
8988 /* It isn't necessary when PAGE_SIZE >= 1MB */
8989 if (PAGE_SHIFT
< 20)
8990 numentries
= round_up(numentries
, (1<<20)/PAGE_SIZE
);
8992 #if __BITS_PER_LONG > 32
8994 unsigned long adapt
;
8996 for (adapt
= ADAPT_SCALE_NPAGES
; adapt
< numentries
;
8997 adapt
<<= ADAPT_SCALE_SHIFT
)
9002 /* limit to 1 bucket per 2^scale bytes of low memory */
9003 if (scale
> PAGE_SHIFT
)
9004 numentries
>>= (scale
- PAGE_SHIFT
);
9006 numentries
<<= (PAGE_SHIFT
- scale
);
9008 /* Make sure we've got at least a 0-order allocation.. */
9009 if (unlikely(flags
& HASH_SMALL
)) {
9010 /* Makes no sense without HASH_EARLY */
9011 WARN_ON(!(flags
& HASH_EARLY
));
9012 if (!(numentries
>> *_hash_shift
)) {
9013 numentries
= 1UL << *_hash_shift
;
9014 BUG_ON(!numentries
);
9016 } else if (unlikely((numentries
* bucketsize
) < PAGE_SIZE
))
9017 numentries
= PAGE_SIZE
/ bucketsize
;
9019 numentries
= roundup_pow_of_two(numentries
);
9021 /* limit allocation size to 1/16 total memory by default */
9023 max
= ((unsigned long long)nr_all_pages
<< PAGE_SHIFT
) >> 4;
9024 do_div(max
, bucketsize
);
9026 max
= min(max
, 0x80000000ULL
);
9028 if (numentries
< low_limit
)
9029 numentries
= low_limit
;
9030 if (numentries
> max
)
9033 log2qty
= ilog2(numentries
);
9035 gfp_flags
= (flags
& HASH_ZERO
) ? GFP_ATOMIC
| __GFP_ZERO
: GFP_ATOMIC
;
9038 size
= bucketsize
<< log2qty
;
9039 if (flags
& HASH_EARLY
) {
9040 if (flags
& HASH_ZERO
)
9041 table
= memblock_alloc(size
, SMP_CACHE_BYTES
);
9043 table
= memblock_alloc_raw(size
,
9045 } else if (get_order(size
) >= MAX_ORDER
|| hashdist
) {
9046 table
= vmalloc_huge(size
, gfp_flags
);
9049 huge
= is_vm_area_hugepages(table
);
9052 * If bucketsize is not a power-of-two, we may free
9053 * some pages at the end of hash table which
9054 * alloc_pages_exact() automatically does
9056 table
= alloc_pages_exact(size
, gfp_flags
);
9057 kmemleak_alloc(table
, size
, 1, gfp_flags
);
9059 } while (!table
&& size
> PAGE_SIZE
&& --log2qty
);
9062 panic("Failed to allocate %s hash table\n", tablename
);
9064 pr_info("%s hash table entries: %ld (order: %d, %lu bytes, %s)\n",
9065 tablename
, 1UL << log2qty
, ilog2(size
) - PAGE_SHIFT
, size
,
9066 virt
? (huge
? "vmalloc hugepage" : "vmalloc") : "linear");
9069 *_hash_shift
= log2qty
;
9071 *_hash_mask
= (1 << log2qty
) - 1;
9076 #ifdef CONFIG_CONTIG_ALLOC
9077 #if defined(CONFIG_DYNAMIC_DEBUG) || \
9078 (defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE))
9079 /* Usage: See admin-guide/dynamic-debug-howto.rst */
9080 static void alloc_contig_dump_pages(struct list_head
*page_list
)
9082 DEFINE_DYNAMIC_DEBUG_METADATA(descriptor
, "migrate failure");
9084 if (DYNAMIC_DEBUG_BRANCH(descriptor
)) {
9088 list_for_each_entry(page
, page_list
, lru
)
9089 dump_page(page
, "migration failure");
9093 static inline void alloc_contig_dump_pages(struct list_head
*page_list
)
9098 /* [start, end) must belong to a single zone. */
9099 int __alloc_contig_migrate_range(struct compact_control
*cc
,
9100 unsigned long start
, unsigned long end
)
9102 /* This function is based on compact_zone() from compaction.c. */
9103 unsigned int nr_reclaimed
;
9104 unsigned long pfn
= start
;
9105 unsigned int tries
= 0;
9107 struct migration_target_control mtc
= {
9108 .nid
= zone_to_nid(cc
->zone
),
9109 .gfp_mask
= GFP_USER
| __GFP_MOVABLE
| __GFP_RETRY_MAYFAIL
,
9112 lru_cache_disable();
9114 while (pfn
< end
|| !list_empty(&cc
->migratepages
)) {
9115 if (fatal_signal_pending(current
)) {
9120 if (list_empty(&cc
->migratepages
)) {
9121 cc
->nr_migratepages
= 0;
9122 ret
= isolate_migratepages_range(cc
, pfn
, end
);
9123 if (ret
&& ret
!= -EAGAIN
)
9125 pfn
= cc
->migrate_pfn
;
9127 } else if (++tries
== 5) {
9132 nr_reclaimed
= reclaim_clean_pages_from_list(cc
->zone
,
9134 cc
->nr_migratepages
-= nr_reclaimed
;
9136 ret
= migrate_pages(&cc
->migratepages
, alloc_migration_target
,
9137 NULL
, (unsigned long)&mtc
, cc
->mode
, MR_CONTIG_RANGE
, NULL
);
9140 * On -ENOMEM, migrate_pages() bails out right away. It is pointless
9141 * to retry again over this error, so do the same here.
9149 if (!(cc
->gfp_mask
& __GFP_NOWARN
) && ret
== -EBUSY
)
9150 alloc_contig_dump_pages(&cc
->migratepages
);
9151 putback_movable_pages(&cc
->migratepages
);
9158 * alloc_contig_range() -- tries to allocate given range of pages
9159 * @start: start PFN to allocate
9160 * @end: one-past-the-last PFN to allocate
9161 * @migratetype: migratetype of the underlying pageblocks (either
9162 * #MIGRATE_MOVABLE or #MIGRATE_CMA). All pageblocks
9163 * in range must have the same migratetype and it must
9164 * be either of the two.
9165 * @gfp_mask: GFP mask to use during compaction
9167 * The PFN range does not have to be pageblock aligned. The PFN range must
9168 * belong to a single zone.
9170 * The first thing this routine does is attempt to MIGRATE_ISOLATE all
9171 * pageblocks in the range. Once isolated, the pageblocks should not
9172 * be modified by others.
9174 * Return: zero on success or negative error code. On success all
9175 * pages which PFN is in [start, end) are allocated for the caller and
9176 * need to be freed with free_contig_range().
9178 int alloc_contig_range(unsigned long start
, unsigned long end
,
9179 unsigned migratetype
, gfp_t gfp_mask
)
9181 unsigned long outer_start
, outer_end
;
9185 struct compact_control cc
= {
9186 .nr_migratepages
= 0,
9188 .zone
= page_zone(pfn_to_page(start
)),
9189 .mode
= MIGRATE_SYNC
,
9190 .ignore_skip_hint
= true,
9191 .no_set_skip_hint
= true,
9192 .gfp_mask
= current_gfp_context(gfp_mask
),
9193 .alloc_contig
= true,
9195 INIT_LIST_HEAD(&cc
.migratepages
);
9198 * What we do here is we mark all pageblocks in range as
9199 * MIGRATE_ISOLATE. Because pageblock and max order pages may
9200 * have different sizes, and due to the way page allocator
9201 * work, start_isolate_page_range() has special handlings for this.
9203 * Once the pageblocks are marked as MIGRATE_ISOLATE, we
9204 * migrate the pages from an unaligned range (ie. pages that
9205 * we are interested in). This will put all the pages in
9206 * range back to page allocator as MIGRATE_ISOLATE.
9208 * When this is done, we take the pages in range from page
9209 * allocator removing them from the buddy system. This way
9210 * page allocator will never consider using them.
9212 * This lets us mark the pageblocks back as
9213 * MIGRATE_CMA/MIGRATE_MOVABLE so that free pages in the
9214 * aligned range but not in the unaligned, original range are
9215 * put back to page allocator so that buddy can use them.
9218 ret
= start_isolate_page_range(start
, end
, migratetype
, 0, gfp_mask
);
9222 drain_all_pages(cc
.zone
);
9225 * In case of -EBUSY, we'd like to know which page causes problem.
9226 * So, just fall through. test_pages_isolated() has a tracepoint
9227 * which will report the busy page.
9229 * It is possible that busy pages could become available before
9230 * the call to test_pages_isolated, and the range will actually be
9231 * allocated. So, if we fall through be sure to clear ret so that
9232 * -EBUSY is not accidentally used or returned to caller.
9234 ret
= __alloc_contig_migrate_range(&cc
, start
, end
);
9235 if (ret
&& ret
!= -EBUSY
)
9240 * Pages from [start, end) are within a pageblock_nr_pages
9241 * aligned blocks that are marked as MIGRATE_ISOLATE. What's
9242 * more, all pages in [start, end) are free in page allocator.
9243 * What we are going to do is to allocate all pages from
9244 * [start, end) (that is remove them from page allocator).
9246 * The only problem is that pages at the beginning and at the
9247 * end of interesting range may be not aligned with pages that
9248 * page allocator holds, ie. they can be part of higher order
9249 * pages. Because of this, we reserve the bigger range and
9250 * once this is done free the pages we are not interested in.
9252 * We don't have to hold zone->lock here because the pages are
9253 * isolated thus they won't get removed from buddy.
9257 outer_start
= start
;
9258 while (!PageBuddy(pfn_to_page(outer_start
))) {
9259 if (++order
>= MAX_ORDER
) {
9260 outer_start
= start
;
9263 outer_start
&= ~0UL << order
;
9266 if (outer_start
!= start
) {
9267 order
= buddy_order(pfn_to_page(outer_start
));
9270 * outer_start page could be small order buddy page and
9271 * it doesn't include start page. Adjust outer_start
9272 * in this case to report failed page properly
9273 * on tracepoint in test_pages_isolated()
9275 if (outer_start
+ (1UL << order
) <= start
)
9276 outer_start
= start
;
9279 /* Make sure the range is really isolated. */
9280 if (test_pages_isolated(outer_start
, end
, 0)) {
9285 /* Grab isolated pages from freelists. */
9286 outer_end
= isolate_freepages_range(&cc
, outer_start
, end
);
9292 /* Free head and tail (if any) */
9293 if (start
!= outer_start
)
9294 free_contig_range(outer_start
, start
- outer_start
);
9295 if (end
!= outer_end
)
9296 free_contig_range(end
, outer_end
- end
);
9299 undo_isolate_page_range(start
, end
, migratetype
);
9302 EXPORT_SYMBOL(alloc_contig_range
);
9304 static int __alloc_contig_pages(unsigned long start_pfn
,
9305 unsigned long nr_pages
, gfp_t gfp_mask
)
9307 unsigned long end_pfn
= start_pfn
+ nr_pages
;
9309 return alloc_contig_range(start_pfn
, end_pfn
, MIGRATE_MOVABLE
,
9313 static bool pfn_range_valid_contig(struct zone
*z
, unsigned long start_pfn
,
9314 unsigned long nr_pages
)
9316 unsigned long i
, end_pfn
= start_pfn
+ nr_pages
;
9319 for (i
= start_pfn
; i
< end_pfn
; i
++) {
9320 page
= pfn_to_online_page(i
);
9324 if (page_zone(page
) != z
)
9327 if (PageReserved(page
))
9333 static bool zone_spans_last_pfn(const struct zone
*zone
,
9334 unsigned long start_pfn
, unsigned long nr_pages
)
9336 unsigned long last_pfn
= start_pfn
+ nr_pages
- 1;
9338 return zone_spans_pfn(zone
, last_pfn
);
9342 * alloc_contig_pages() -- tries to find and allocate contiguous range of pages
9343 * @nr_pages: Number of contiguous pages to allocate
9344 * @gfp_mask: GFP mask to limit search and used during compaction
9346 * @nodemask: Mask for other possible nodes
9348 * This routine is a wrapper around alloc_contig_range(). It scans over zones
9349 * on an applicable zonelist to find a contiguous pfn range which can then be
9350 * tried for allocation with alloc_contig_range(). This routine is intended
9351 * for allocation requests which can not be fulfilled with the buddy allocator.
9353 * The allocated memory is always aligned to a page boundary. If nr_pages is a
9354 * power of two, then allocated range is also guaranteed to be aligned to same
9355 * nr_pages (e.g. 1GB request would be aligned to 1GB).
9357 * Allocated pages can be freed with free_contig_range() or by manually calling
9358 * __free_page() on each allocated page.
9360 * Return: pointer to contiguous pages on success, or NULL if not successful.
9362 struct page
*alloc_contig_pages(unsigned long nr_pages
, gfp_t gfp_mask
,
9363 int nid
, nodemask_t
*nodemask
)
9365 unsigned long ret
, pfn
, flags
;
9366 struct zonelist
*zonelist
;
9370 zonelist
= node_zonelist(nid
, gfp_mask
);
9371 for_each_zone_zonelist_nodemask(zone
, z
, zonelist
,
9372 gfp_zone(gfp_mask
), nodemask
) {
9373 spin_lock_irqsave(&zone
->lock
, flags
);
9375 pfn
= ALIGN(zone
->zone_start_pfn
, nr_pages
);
9376 while (zone_spans_last_pfn(zone
, pfn
, nr_pages
)) {
9377 if (pfn_range_valid_contig(zone
, pfn
, nr_pages
)) {
9379 * We release the zone lock here because
9380 * alloc_contig_range() will also lock the zone
9381 * at some point. If there's an allocation
9382 * spinning on this lock, it may win the race
9383 * and cause alloc_contig_range() to fail...
9385 spin_unlock_irqrestore(&zone
->lock
, flags
);
9386 ret
= __alloc_contig_pages(pfn
, nr_pages
,
9389 return pfn_to_page(pfn
);
9390 spin_lock_irqsave(&zone
->lock
, flags
);
9394 spin_unlock_irqrestore(&zone
->lock
, flags
);
9398 #endif /* CONFIG_CONTIG_ALLOC */
9400 void free_contig_range(unsigned long pfn
, unsigned long nr_pages
)
9402 unsigned long count
= 0;
9404 for (; nr_pages
--; pfn
++) {
9405 struct page
*page
= pfn_to_page(pfn
);
9407 count
+= page_count(page
) != 1;
9410 WARN(count
!= 0, "%lu pages are still in use!\n", count
);
9412 EXPORT_SYMBOL(free_contig_range
);
9415 * The zone indicated has a new number of managed_pages; batch sizes and percpu
9416 * page high values need to be recalculated.
9418 void zone_pcp_update(struct zone
*zone
, int cpu_online
)
9420 mutex_lock(&pcp_batch_high_lock
);
9421 zone_set_pageset_high_and_batch(zone
, cpu_online
);
9422 mutex_unlock(&pcp_batch_high_lock
);
9426 * Effectively disable pcplists for the zone by setting the high limit to 0
9427 * and draining all cpus. A concurrent page freeing on another CPU that's about
9428 * to put the page on pcplist will either finish before the drain and the page
9429 * will be drained, or observe the new high limit and skip the pcplist.
9431 * Must be paired with a call to zone_pcp_enable().
9433 void zone_pcp_disable(struct zone
*zone
)
9435 mutex_lock(&pcp_batch_high_lock
);
9436 __zone_set_pageset_high_and_batch(zone
, 0, 1);
9437 __drain_all_pages(zone
, true);
9440 void zone_pcp_enable(struct zone
*zone
)
9442 __zone_set_pageset_high_and_batch(zone
, zone
->pageset_high
, zone
->pageset_batch
);
9443 mutex_unlock(&pcp_batch_high_lock
);
9446 void zone_pcp_reset(struct zone
*zone
)
9449 struct per_cpu_zonestat
*pzstats
;
9451 if (zone
->per_cpu_pageset
!= &boot_pageset
) {
9452 for_each_online_cpu(cpu
) {
9453 pzstats
= per_cpu_ptr(zone
->per_cpu_zonestats
, cpu
);
9454 drain_zonestat(zone
, pzstats
);
9456 free_percpu(zone
->per_cpu_pageset
);
9457 free_percpu(zone
->per_cpu_zonestats
);
9458 zone
->per_cpu_pageset
= &boot_pageset
;
9459 zone
->per_cpu_zonestats
= &boot_zonestats
;
9463 #ifdef CONFIG_MEMORY_HOTREMOVE
9465 * All pages in the range must be in a single zone, must not contain holes,
9466 * must span full sections, and must be isolated before calling this function.
9468 void __offline_isolated_pages(unsigned long start_pfn
, unsigned long end_pfn
)
9470 unsigned long pfn
= start_pfn
;
9474 unsigned long flags
;
9476 offline_mem_sections(pfn
, end_pfn
);
9477 zone
= page_zone(pfn_to_page(pfn
));
9478 spin_lock_irqsave(&zone
->lock
, flags
);
9479 while (pfn
< end_pfn
) {
9480 page
= pfn_to_page(pfn
);
9482 * The HWPoisoned page may be not in buddy system, and
9483 * page_count() is not 0.
9485 if (unlikely(!PageBuddy(page
) && PageHWPoison(page
))) {
9490 * At this point all remaining PageOffline() pages have a
9491 * reference count of 0 and can simply be skipped.
9493 if (PageOffline(page
)) {
9494 BUG_ON(page_count(page
));
9495 BUG_ON(PageBuddy(page
));
9500 BUG_ON(page_count(page
));
9501 BUG_ON(!PageBuddy(page
));
9502 order
= buddy_order(page
);
9503 del_page_from_free_list(page
, zone
, order
);
9504 pfn
+= (1 << order
);
9506 spin_unlock_irqrestore(&zone
->lock
, flags
);
9511 * This function returns a stable result only if called under zone lock.
9513 bool is_free_buddy_page(struct page
*page
)
9515 unsigned long pfn
= page_to_pfn(page
);
9518 for (order
= 0; order
< MAX_ORDER
; order
++) {
9519 struct page
*page_head
= page
- (pfn
& ((1 << order
) - 1));
9521 if (PageBuddy(page_head
) &&
9522 buddy_order_unsafe(page_head
) >= order
)
9526 return order
< MAX_ORDER
;
9528 EXPORT_SYMBOL(is_free_buddy_page
);
9530 #ifdef CONFIG_MEMORY_FAILURE
9532 * Break down a higher-order page in sub-pages, and keep our target out of
9535 static void break_down_buddy_pages(struct zone
*zone
, struct page
*page
,
9536 struct page
*target
, int low
, int high
,
9539 unsigned long size
= 1 << high
;
9540 struct page
*current_buddy
, *next_page
;
9542 while (high
> low
) {
9546 if (target
>= &page
[size
]) {
9547 next_page
= page
+ size
;
9548 current_buddy
= page
;
9551 current_buddy
= page
+ size
;
9554 if (set_page_guard(zone
, current_buddy
, high
, migratetype
))
9557 if (current_buddy
!= target
) {
9558 add_to_free_list(current_buddy
, zone
, high
, migratetype
);
9559 set_buddy_order(current_buddy
, high
);
9566 * Take a page that will be marked as poisoned off the buddy allocator.
9568 bool take_page_off_buddy(struct page
*page
)
9570 struct zone
*zone
= page_zone(page
);
9571 unsigned long pfn
= page_to_pfn(page
);
9572 unsigned long flags
;
9576 spin_lock_irqsave(&zone
->lock
, flags
);
9577 for (order
= 0; order
< MAX_ORDER
; order
++) {
9578 struct page
*page_head
= page
- (pfn
& ((1 << order
) - 1));
9579 int page_order
= buddy_order(page_head
);
9581 if (PageBuddy(page_head
) && page_order
>= order
) {
9582 unsigned long pfn_head
= page_to_pfn(page_head
);
9583 int migratetype
= get_pfnblock_migratetype(page_head
,
9586 del_page_from_free_list(page_head
, zone
, page_order
);
9587 break_down_buddy_pages(zone
, page_head
, page
, 0,
9588 page_order
, migratetype
);
9589 SetPageHWPoisonTakenOff(page
);
9590 if (!is_migrate_isolate(migratetype
))
9591 __mod_zone_freepage_state(zone
, -1, migratetype
);
9595 if (page_count(page_head
) > 0)
9598 spin_unlock_irqrestore(&zone
->lock
, flags
);
9603 * Cancel takeoff done by take_page_off_buddy().
9605 bool put_page_back_buddy(struct page
*page
)
9607 struct zone
*zone
= page_zone(page
);
9608 unsigned long pfn
= page_to_pfn(page
);
9609 unsigned long flags
;
9610 int migratetype
= get_pfnblock_migratetype(page
, pfn
);
9613 spin_lock_irqsave(&zone
->lock
, flags
);
9614 if (put_page_testzero(page
)) {
9615 ClearPageHWPoisonTakenOff(page
);
9616 __free_one_page(page
, pfn
, zone
, 0, migratetype
, FPI_NONE
);
9617 if (TestClearPageHWPoison(page
)) {
9621 spin_unlock_irqrestore(&zone
->lock
, flags
);
9627 #ifdef CONFIG_ZONE_DMA
9628 bool has_managed_dma(void)
9630 struct pglist_data
*pgdat
;
9632 for_each_online_pgdat(pgdat
) {
9633 struct zone
*zone
= &pgdat
->node_zones
[ZONE_DMA
];
9635 if (managed_zone(zone
))
9640 #endif /* CONFIG_ZONE_DMA */