1 // SPDX-License-Identifier: GPL-2.0-only
3 * linux/mm/page_alloc.c
5 * Manages the free list, the system allocates free pages here.
6 * Note that kmalloc() lives in slab.c
8 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
9 * Swap reorganised 29.12.95, Stephen Tweedie
10 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
11 * Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999
12 * Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999
13 * Zone balancing, Kanoj Sarcar, SGI, Jan 2000
14 * Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002
15 * (lots of bits borrowed from Ingo Molnar & Andrew Morton)
18 #include <linux/stddef.h>
20 #include <linux/highmem.h>
21 #include <linux/swap.h>
22 #include <linux/interrupt.h>
23 #include <linux/pagemap.h>
24 #include <linux/jiffies.h>
25 #include <linux/memblock.h>
26 #include <linux/compiler.h>
27 #include <linux/kernel.h>
28 #include <linux/kasan.h>
29 #include <linux/module.h>
30 #include <linux/suspend.h>
31 #include <linux/pagevec.h>
32 #include <linux/blkdev.h>
33 #include <linux/slab.h>
34 #include <linux/ratelimit.h>
35 #include <linux/oom.h>
36 #include <linux/topology.h>
37 #include <linux/sysctl.h>
38 #include <linux/cpu.h>
39 #include <linux/cpuset.h>
40 #include <linux/memory_hotplug.h>
41 #include <linux/nodemask.h>
42 #include <linux/vmalloc.h>
43 #include <linux/vmstat.h>
44 #include <linux/mempolicy.h>
45 #include <linux/memremap.h>
46 #include <linux/stop_machine.h>
47 #include <linux/random.h>
48 #include <linux/sort.h>
49 #include <linux/pfn.h>
50 #include <linux/backing-dev.h>
51 #include <linux/fault-inject.h>
52 #include <linux/page-isolation.h>
53 #include <linux/debugobjects.h>
54 #include <linux/kmemleak.h>
55 #include <linux/compaction.h>
56 #include <trace/events/kmem.h>
57 #include <trace/events/oom.h>
58 #include <linux/prefetch.h>
59 #include <linux/mm_inline.h>
60 #include <linux/mmu_notifier.h>
61 #include <linux/migrate.h>
62 #include <linux/hugetlb.h>
63 #include <linux/sched/rt.h>
64 #include <linux/sched/mm.h>
65 #include <linux/page_owner.h>
66 #include <linux/kthread.h>
67 #include <linux/memcontrol.h>
68 #include <linux/ftrace.h>
69 #include <linux/lockdep.h>
70 #include <linux/nmi.h>
71 #include <linux/psi.h>
72 #include <linux/padata.h>
73 #include <linux/khugepaged.h>
74 #include <linux/buffer_head.h>
75 #include <asm/sections.h>
76 #include <asm/tlbflush.h>
77 #include <asm/div64.h>
80 #include "page_reporting.h"
82 /* Free Page Internal flags: for internal, non-pcp variants of free_pages(). */
83 typedef int __bitwise fpi_t
;
85 /* No special request */
86 #define FPI_NONE ((__force fpi_t)0)
89 * Skip free page reporting notification for the (possibly merged) page.
90 * This does not hinder free page reporting from grabbing the page,
91 * reporting it and marking it "reported" - it only skips notifying
92 * the free page reporting infrastructure about a newly freed page. For
93 * example, used when temporarily pulling a page from a freelist and
94 * putting it back unmodified.
96 #define FPI_SKIP_REPORT_NOTIFY ((__force fpi_t)BIT(0))
99 * Place the (possibly merged) page to the tail of the freelist. Will ignore
100 * page shuffling (relevant code - e.g., memory onlining - is expected to
101 * shuffle the whole zone).
103 * Note: No code should rely on this flag for correctness - it's purely
104 * to allow for optimizations when handing back either fresh pages
105 * (memory onlining) or untouched pages (page isolation, free page
108 #define FPI_TO_TAIL ((__force fpi_t)BIT(1))
111 * Don't poison memory with KASAN (only for the tag-based modes).
112 * During boot, all non-reserved memblock memory is exposed to page_alloc.
113 * Poisoning all that memory lengthens boot time, especially on systems with
114 * large amount of RAM. This flag is used to skip that poisoning.
115 * This is only done for the tag-based KASAN modes, as those are able to
116 * detect memory corruptions with the memory tags assigned by default.
117 * All memory allocated normally after boot gets poisoned as usual.
119 #define FPI_SKIP_KASAN_POISON ((__force fpi_t)BIT(2))
121 /* prevent >1 _updater_ of zone percpu pageset ->high and ->batch fields */
122 static DEFINE_MUTEX(pcp_batch_high_lock
);
123 #define MIN_PERCPU_PAGELIST_HIGH_FRACTION (8)
128 static DEFINE_PER_CPU(struct pagesets
, pagesets
) = {
129 .lock
= INIT_LOCAL_LOCK(lock
),
132 #ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID
133 DEFINE_PER_CPU(int, numa_node
);
134 EXPORT_PER_CPU_SYMBOL(numa_node
);
137 DEFINE_STATIC_KEY_TRUE(vm_numa_stat_key
);
139 #ifdef CONFIG_HAVE_MEMORYLESS_NODES
141 * N.B., Do NOT reference the '_numa_mem_' per cpu variable directly.
142 * It will not be defined when CONFIG_HAVE_MEMORYLESS_NODES is not defined.
143 * Use the accessor functions set_numa_mem(), numa_mem_id() and cpu_to_mem()
144 * defined in <linux/topology.h>.
146 DEFINE_PER_CPU(int, _numa_mem_
); /* Kernel "local memory" node */
147 EXPORT_PER_CPU_SYMBOL(_numa_mem_
);
150 /* work_structs for global per-cpu drains */
153 struct work_struct work
;
155 static DEFINE_MUTEX(pcpu_drain_mutex
);
156 static DEFINE_PER_CPU(struct pcpu_drain
, pcpu_drain
);
158 #ifdef CONFIG_GCC_PLUGIN_LATENT_ENTROPY
159 volatile unsigned long latent_entropy __latent_entropy
;
160 EXPORT_SYMBOL(latent_entropy
);
164 * Array of node states.
166 nodemask_t node_states
[NR_NODE_STATES
] __read_mostly
= {
167 [N_POSSIBLE
] = NODE_MASK_ALL
,
168 [N_ONLINE
] = { { [0] = 1UL } },
170 [N_NORMAL_MEMORY
] = { { [0] = 1UL } },
171 #ifdef CONFIG_HIGHMEM
172 [N_HIGH_MEMORY
] = { { [0] = 1UL } },
174 [N_MEMORY
] = { { [0] = 1UL } },
175 [N_CPU
] = { { [0] = 1UL } },
178 EXPORT_SYMBOL(node_states
);
180 atomic_long_t _totalram_pages __read_mostly
;
181 EXPORT_SYMBOL(_totalram_pages
);
182 unsigned long totalreserve_pages __read_mostly
;
183 unsigned long totalcma_pages __read_mostly
;
185 int percpu_pagelist_high_fraction
;
186 gfp_t gfp_allowed_mask __read_mostly
= GFP_BOOT_MASK
;
187 DEFINE_STATIC_KEY_MAYBE(CONFIG_INIT_ON_ALLOC_DEFAULT_ON
, init_on_alloc
);
188 EXPORT_SYMBOL(init_on_alloc
);
190 DEFINE_STATIC_KEY_MAYBE(CONFIG_INIT_ON_FREE_DEFAULT_ON
, init_on_free
);
191 EXPORT_SYMBOL(init_on_free
);
193 static bool _init_on_alloc_enabled_early __read_mostly
194 = IS_ENABLED(CONFIG_INIT_ON_ALLOC_DEFAULT_ON
);
195 static int __init
early_init_on_alloc(char *buf
)
198 return kstrtobool(buf
, &_init_on_alloc_enabled_early
);
200 early_param("init_on_alloc", early_init_on_alloc
);
202 static bool _init_on_free_enabled_early __read_mostly
203 = IS_ENABLED(CONFIG_INIT_ON_FREE_DEFAULT_ON
);
204 static int __init
early_init_on_free(char *buf
)
206 return kstrtobool(buf
, &_init_on_free_enabled_early
);
208 early_param("init_on_free", early_init_on_free
);
211 * A cached value of the page's pageblock's migratetype, used when the page is
212 * put on a pcplist. Used to avoid the pageblock migratetype lookup when
213 * freeing from pcplists in most cases, at the cost of possibly becoming stale.
214 * Also the migratetype set in the page does not necessarily match the pcplist
215 * index, e.g. page might have MIGRATE_CMA set but be on a pcplist with any
216 * other index - this ensures that it will be put on the correct CMA freelist.
218 static inline int get_pcppage_migratetype(struct page
*page
)
223 static inline void set_pcppage_migratetype(struct page
*page
, int migratetype
)
225 page
->index
= migratetype
;
228 #ifdef CONFIG_PM_SLEEP
230 * The following functions are used by the suspend/hibernate code to temporarily
231 * change gfp_allowed_mask in order to avoid using I/O during memory allocations
232 * while devices are suspended. To avoid races with the suspend/hibernate code,
233 * they should always be called with system_transition_mutex held
234 * (gfp_allowed_mask also should only be modified with system_transition_mutex
235 * held, unless the suspend/hibernate code is guaranteed not to run in parallel
236 * with that modification).
239 static gfp_t saved_gfp_mask
;
241 void pm_restore_gfp_mask(void)
243 WARN_ON(!mutex_is_locked(&system_transition_mutex
));
244 if (saved_gfp_mask
) {
245 gfp_allowed_mask
= saved_gfp_mask
;
250 void pm_restrict_gfp_mask(void)
252 WARN_ON(!mutex_is_locked(&system_transition_mutex
));
253 WARN_ON(saved_gfp_mask
);
254 saved_gfp_mask
= gfp_allowed_mask
;
255 gfp_allowed_mask
&= ~(__GFP_IO
| __GFP_FS
);
258 bool pm_suspended_storage(void)
260 if ((gfp_allowed_mask
& (__GFP_IO
| __GFP_FS
)) == (__GFP_IO
| __GFP_FS
))
264 #endif /* CONFIG_PM_SLEEP */
266 #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
267 unsigned int pageblock_order __read_mostly
;
270 static void __free_pages_ok(struct page
*page
, unsigned int order
,
274 * results with 256, 32 in the lowmem_reserve sysctl:
275 * 1G machine -> (16M dma, 800M-16M normal, 1G-800M high)
276 * 1G machine -> (16M dma, 784M normal, 224M high)
277 * NORMAL allocation will leave 784M/256 of ram reserved in the ZONE_DMA
278 * HIGHMEM allocation will leave 224M/32 of ram reserved in ZONE_NORMAL
279 * HIGHMEM allocation will leave (224M+784M)/256 of ram reserved in ZONE_DMA
281 * TBD: should special case ZONE_DMA32 machines here - in those we normally
282 * don't need any ZONE_NORMAL reservation
284 int sysctl_lowmem_reserve_ratio
[MAX_NR_ZONES
] = {
285 #ifdef CONFIG_ZONE_DMA
288 #ifdef CONFIG_ZONE_DMA32
292 #ifdef CONFIG_HIGHMEM
298 static char * const zone_names
[MAX_NR_ZONES
] = {
299 #ifdef CONFIG_ZONE_DMA
302 #ifdef CONFIG_ZONE_DMA32
306 #ifdef CONFIG_HIGHMEM
310 #ifdef CONFIG_ZONE_DEVICE
315 const char * const migratetype_names
[MIGRATE_TYPES
] = {
323 #ifdef CONFIG_MEMORY_ISOLATION
328 compound_page_dtor
* const compound_page_dtors
[NR_COMPOUND_DTORS
] = {
329 [NULL_COMPOUND_DTOR
] = NULL
,
330 [COMPOUND_PAGE_DTOR
] = free_compound_page
,
331 #ifdef CONFIG_HUGETLB_PAGE
332 [HUGETLB_PAGE_DTOR
] = free_huge_page
,
334 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
335 [TRANSHUGE_PAGE_DTOR
] = free_transhuge_page
,
339 int min_free_kbytes
= 1024;
340 int user_min_free_kbytes
= -1;
341 int watermark_boost_factor __read_mostly
= 15000;
342 int watermark_scale_factor
= 10;
344 static unsigned long nr_kernel_pages __initdata
;
345 static unsigned long nr_all_pages __initdata
;
346 static unsigned long dma_reserve __initdata
;
348 static unsigned long arch_zone_lowest_possible_pfn
[MAX_NR_ZONES
] __initdata
;
349 static unsigned long arch_zone_highest_possible_pfn
[MAX_NR_ZONES
] __initdata
;
350 static unsigned long required_kernelcore __initdata
;
351 static unsigned long required_kernelcore_percent __initdata
;
352 static unsigned long required_movablecore __initdata
;
353 static unsigned long required_movablecore_percent __initdata
;
354 static unsigned long zone_movable_pfn
[MAX_NUMNODES
] __initdata
;
355 static bool mirrored_kernelcore __meminitdata
;
357 /* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */
359 EXPORT_SYMBOL(movable_zone
);
362 unsigned int nr_node_ids __read_mostly
= MAX_NUMNODES
;
363 unsigned int nr_online_nodes __read_mostly
= 1;
364 EXPORT_SYMBOL(nr_node_ids
);
365 EXPORT_SYMBOL(nr_online_nodes
);
368 int page_group_by_mobility_disabled __read_mostly
;
370 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
372 * During boot we initialize deferred pages on-demand, as needed, but once
373 * page_alloc_init_late() has finished, the deferred pages are all initialized,
374 * and we can permanently disable that path.
376 static DEFINE_STATIC_KEY_TRUE(deferred_pages
);
379 * Calling kasan_poison_pages() only after deferred memory initialization
380 * has completed. Poisoning pages during deferred memory init will greatly
381 * lengthen the process and cause problem in large memory systems as the
382 * deferred pages initialization is done with interrupt disabled.
384 * Assuming that there will be no reference to those newly initialized
385 * pages before they are ever allocated, this should have no effect on
386 * KASAN memory tracking as the poison will be properly inserted at page
387 * allocation time. The only corner case is when pages are allocated by
388 * on-demand allocation and then freed again before the deferred pages
389 * initialization is done, but this is not likely to happen.
391 static inline bool should_skip_kasan_poison(struct page
*page
, fpi_t fpi_flags
)
393 return static_branch_unlikely(&deferred_pages
) ||
394 (!IS_ENABLED(CONFIG_KASAN_GENERIC
) &&
395 (fpi_flags
& FPI_SKIP_KASAN_POISON
)) ||
396 PageSkipKASanPoison(page
);
399 /* Returns true if the struct page for the pfn is uninitialised */
400 static inline bool __meminit
early_page_uninitialised(unsigned long pfn
)
402 int nid
= early_pfn_to_nid(pfn
);
404 if (node_online(nid
) && pfn
>= NODE_DATA(nid
)->first_deferred_pfn
)
411 * Returns true when the remaining initialisation should be deferred until
412 * later in the boot cycle when it can be parallelised.
414 static bool __meminit
415 defer_init(int nid
, unsigned long pfn
, unsigned long end_pfn
)
417 static unsigned long prev_end_pfn
, nr_initialised
;
420 * prev_end_pfn static that contains the end of previous zone
421 * No need to protect because called very early in boot before smp_init.
423 if (prev_end_pfn
!= end_pfn
) {
424 prev_end_pfn
= end_pfn
;
428 /* Always populate low zones for address-constrained allocations */
429 if (end_pfn
< pgdat_end_pfn(NODE_DATA(nid
)))
432 if (NODE_DATA(nid
)->first_deferred_pfn
!= ULONG_MAX
)
435 * We start only with one section of pages, more pages are added as
436 * needed until the rest of deferred pages are initialized.
439 if ((nr_initialised
> PAGES_PER_SECTION
) &&
440 (pfn
& (PAGES_PER_SECTION
- 1)) == 0) {
441 NODE_DATA(nid
)->first_deferred_pfn
= pfn
;
447 static inline bool should_skip_kasan_poison(struct page
*page
, fpi_t fpi_flags
)
449 return (!IS_ENABLED(CONFIG_KASAN_GENERIC
) &&
450 (fpi_flags
& FPI_SKIP_KASAN_POISON
)) ||
451 PageSkipKASanPoison(page
);
454 static inline bool early_page_uninitialised(unsigned long pfn
)
459 static inline bool defer_init(int nid
, unsigned long pfn
, unsigned long end_pfn
)
465 /* Return a pointer to the bitmap storing bits affecting a block of pages */
466 static inline unsigned long *get_pageblock_bitmap(const struct page
*page
,
469 #ifdef CONFIG_SPARSEMEM
470 return section_to_usemap(__pfn_to_section(pfn
));
472 return page_zone(page
)->pageblock_flags
;
473 #endif /* CONFIG_SPARSEMEM */
476 static inline int pfn_to_bitidx(const struct page
*page
, unsigned long pfn
)
478 #ifdef CONFIG_SPARSEMEM
479 pfn
&= (PAGES_PER_SECTION
-1);
481 pfn
= pfn
- round_down(page_zone(page
)->zone_start_pfn
, pageblock_nr_pages
);
482 #endif /* CONFIG_SPARSEMEM */
483 return (pfn
>> pageblock_order
) * NR_PAGEBLOCK_BITS
;
486 static __always_inline
487 unsigned long __get_pfnblock_flags_mask(const struct page
*page
,
491 unsigned long *bitmap
;
492 unsigned long bitidx
, word_bitidx
;
495 bitmap
= get_pageblock_bitmap(page
, pfn
);
496 bitidx
= pfn_to_bitidx(page
, pfn
);
497 word_bitidx
= bitidx
/ BITS_PER_LONG
;
498 bitidx
&= (BITS_PER_LONG
-1);
500 word
= bitmap
[word_bitidx
];
501 return (word
>> bitidx
) & mask
;
505 * get_pfnblock_flags_mask - Return the requested group of flags for the pageblock_nr_pages block of pages
506 * @page: The page within the block of interest
507 * @pfn: The target page frame number
508 * @mask: mask of bits that the caller is interested in
510 * Return: pageblock_bits flags
512 unsigned long get_pfnblock_flags_mask(const struct page
*page
,
513 unsigned long pfn
, unsigned long mask
)
515 return __get_pfnblock_flags_mask(page
, pfn
, mask
);
518 static __always_inline
int get_pfnblock_migratetype(const struct page
*page
,
521 return __get_pfnblock_flags_mask(page
, pfn
, MIGRATETYPE_MASK
);
525 * set_pfnblock_flags_mask - Set the requested group of flags for a pageblock_nr_pages block of pages
526 * @page: The page within the block of interest
527 * @flags: The flags to set
528 * @pfn: The target page frame number
529 * @mask: mask of bits that the caller is interested in
531 void set_pfnblock_flags_mask(struct page
*page
, unsigned long flags
,
535 unsigned long *bitmap
;
536 unsigned long bitidx
, word_bitidx
;
537 unsigned long old_word
, word
;
539 BUILD_BUG_ON(NR_PAGEBLOCK_BITS
!= 4);
540 BUILD_BUG_ON(MIGRATE_TYPES
> (1 << PB_migratetype_bits
));
542 bitmap
= get_pageblock_bitmap(page
, pfn
);
543 bitidx
= pfn_to_bitidx(page
, pfn
);
544 word_bitidx
= bitidx
/ BITS_PER_LONG
;
545 bitidx
&= (BITS_PER_LONG
-1);
547 VM_BUG_ON_PAGE(!zone_spans_pfn(page_zone(page
), pfn
), page
);
552 word
= READ_ONCE(bitmap
[word_bitidx
]);
554 old_word
= cmpxchg(&bitmap
[word_bitidx
], word
, (word
& ~mask
) | flags
);
555 if (word
== old_word
)
561 void set_pageblock_migratetype(struct page
*page
, int migratetype
)
563 if (unlikely(page_group_by_mobility_disabled
&&
564 migratetype
< MIGRATE_PCPTYPES
))
565 migratetype
= MIGRATE_UNMOVABLE
;
567 set_pfnblock_flags_mask(page
, (unsigned long)migratetype
,
568 page_to_pfn(page
), MIGRATETYPE_MASK
);
571 #ifdef CONFIG_DEBUG_VM
572 static int page_outside_zone_boundaries(struct zone
*zone
, struct page
*page
)
576 unsigned long pfn
= page_to_pfn(page
);
577 unsigned long sp
, start_pfn
;
580 seq
= zone_span_seqbegin(zone
);
581 start_pfn
= zone
->zone_start_pfn
;
582 sp
= zone
->spanned_pages
;
583 if (!zone_spans_pfn(zone
, pfn
))
585 } while (zone_span_seqretry(zone
, seq
));
588 pr_err("page 0x%lx outside node %d zone %s [ 0x%lx - 0x%lx ]\n",
589 pfn
, zone_to_nid(zone
), zone
->name
,
590 start_pfn
, start_pfn
+ sp
);
595 static int page_is_consistent(struct zone
*zone
, struct page
*page
)
597 if (zone
!= page_zone(page
))
603 * Temporary debugging check for pages not lying within a given zone.
605 static int __maybe_unused
bad_range(struct zone
*zone
, struct page
*page
)
607 if (page_outside_zone_boundaries(zone
, page
))
609 if (!page_is_consistent(zone
, page
))
615 static inline int __maybe_unused
bad_range(struct zone
*zone
, struct page
*page
)
621 static void bad_page(struct page
*page
, const char *reason
)
623 static unsigned long resume
;
624 static unsigned long nr_shown
;
625 static unsigned long nr_unshown
;
628 * Allow a burst of 60 reports, then keep quiet for that minute;
629 * or allow a steady drip of one report per second.
631 if (nr_shown
== 60) {
632 if (time_before(jiffies
, resume
)) {
638 "BUG: Bad page state: %lu messages suppressed\n",
645 resume
= jiffies
+ 60 * HZ
;
647 pr_alert("BUG: Bad page state in process %s pfn:%05lx\n",
648 current
->comm
, page_to_pfn(page
));
649 dump_page(page
, reason
);
654 /* Leave bad fields for debug, except PageBuddy could make trouble */
655 page_mapcount_reset(page
); /* remove PageBuddy */
656 add_taint(TAINT_BAD_PAGE
, LOCKDEP_NOW_UNRELIABLE
);
659 static inline unsigned int order_to_pindex(int migratetype
, int order
)
663 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
664 if (order
> PAGE_ALLOC_COSTLY_ORDER
) {
665 VM_BUG_ON(order
!= pageblock_order
);
666 base
= PAGE_ALLOC_COSTLY_ORDER
+ 1;
669 VM_BUG_ON(order
> PAGE_ALLOC_COSTLY_ORDER
);
672 return (MIGRATE_PCPTYPES
* base
) + migratetype
;
675 static inline int pindex_to_order(unsigned int pindex
)
677 int order
= pindex
/ MIGRATE_PCPTYPES
;
679 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
680 if (order
> PAGE_ALLOC_COSTLY_ORDER
) {
681 order
= pageblock_order
;
682 VM_BUG_ON(order
!= pageblock_order
);
685 VM_BUG_ON(order
> PAGE_ALLOC_COSTLY_ORDER
);
691 static inline bool pcp_allowed_order(unsigned int order
)
693 if (order
<= PAGE_ALLOC_COSTLY_ORDER
)
695 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
696 if (order
== pageblock_order
)
702 static inline void free_the_page(struct page
*page
, unsigned int order
)
704 if (pcp_allowed_order(order
)) /* Via pcp? */
705 free_unref_page(page
, order
);
707 __free_pages_ok(page
, order
, FPI_NONE
);
711 * Higher-order pages are called "compound pages". They are structured thusly:
713 * The first PAGE_SIZE page is called the "head page" and have PG_head set.
715 * The remaining PAGE_SIZE pages are called "tail pages". PageTail() is encoded
716 * in bit 0 of page->compound_head. The rest of bits is pointer to head page.
718 * The first tail page's ->compound_dtor holds the offset in array of compound
719 * page destructors. See compound_page_dtors.
721 * The first tail page's ->compound_order holds the order of allocation.
722 * This usage means that zero-order pages may not be compound.
725 void free_compound_page(struct page
*page
)
727 mem_cgroup_uncharge(page
);
728 free_the_page(page
, compound_order(page
));
731 void prep_compound_page(struct page
*page
, unsigned int order
)
734 int nr_pages
= 1 << order
;
737 for (i
= 1; i
< nr_pages
; i
++) {
738 struct page
*p
= page
+ i
;
739 p
->mapping
= TAIL_MAPPING
;
740 set_compound_head(p
, page
);
743 set_compound_page_dtor(page
, COMPOUND_PAGE_DTOR
);
744 set_compound_order(page
, order
);
745 atomic_set(compound_mapcount_ptr(page
), -1);
746 if (hpage_pincount_available(page
))
747 atomic_set(compound_pincount_ptr(page
), 0);
750 #ifdef CONFIG_DEBUG_PAGEALLOC
751 unsigned int _debug_guardpage_minorder
;
753 bool _debug_pagealloc_enabled_early __read_mostly
754 = IS_ENABLED(CONFIG_DEBUG_PAGEALLOC_ENABLE_DEFAULT
);
755 EXPORT_SYMBOL(_debug_pagealloc_enabled_early
);
756 DEFINE_STATIC_KEY_FALSE(_debug_pagealloc_enabled
);
757 EXPORT_SYMBOL(_debug_pagealloc_enabled
);
759 DEFINE_STATIC_KEY_FALSE(_debug_guardpage_enabled
);
761 static int __init
early_debug_pagealloc(char *buf
)
763 return kstrtobool(buf
, &_debug_pagealloc_enabled_early
);
765 early_param("debug_pagealloc", early_debug_pagealloc
);
767 static int __init
debug_guardpage_minorder_setup(char *buf
)
771 if (kstrtoul(buf
, 10, &res
) < 0 || res
> MAX_ORDER
/ 2) {
772 pr_err("Bad debug_guardpage_minorder value\n");
775 _debug_guardpage_minorder
= res
;
776 pr_info("Setting debug_guardpage_minorder to %lu\n", res
);
779 early_param("debug_guardpage_minorder", debug_guardpage_minorder_setup
);
781 static inline bool set_page_guard(struct zone
*zone
, struct page
*page
,
782 unsigned int order
, int migratetype
)
784 if (!debug_guardpage_enabled())
787 if (order
>= debug_guardpage_minorder())
790 __SetPageGuard(page
);
791 INIT_LIST_HEAD(&page
->lru
);
792 set_page_private(page
, order
);
793 /* Guard pages are not available for any usage */
794 __mod_zone_freepage_state(zone
, -(1 << order
), migratetype
);
799 static inline void clear_page_guard(struct zone
*zone
, struct page
*page
,
800 unsigned int order
, int migratetype
)
802 if (!debug_guardpage_enabled())
805 __ClearPageGuard(page
);
807 set_page_private(page
, 0);
808 if (!is_migrate_isolate(migratetype
))
809 __mod_zone_freepage_state(zone
, (1 << order
), migratetype
);
812 static inline bool set_page_guard(struct zone
*zone
, struct page
*page
,
813 unsigned int order
, int migratetype
) { return false; }
814 static inline void clear_page_guard(struct zone
*zone
, struct page
*page
,
815 unsigned int order
, int migratetype
) {}
819 * Enable static keys related to various memory debugging and hardening options.
820 * Some override others, and depend on early params that are evaluated in the
821 * order of appearance. So we need to first gather the full picture of what was
822 * enabled, and then make decisions.
824 void init_mem_debugging_and_hardening(void)
826 bool page_poisoning_requested
= false;
828 #ifdef CONFIG_PAGE_POISONING
830 * Page poisoning is debug page alloc for some arches. If
831 * either of those options are enabled, enable poisoning.
833 if (page_poisoning_enabled() ||
834 (!IS_ENABLED(CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC
) &&
835 debug_pagealloc_enabled())) {
836 static_branch_enable(&_page_poisoning_enabled
);
837 page_poisoning_requested
= true;
841 if ((_init_on_alloc_enabled_early
|| _init_on_free_enabled_early
) &&
842 page_poisoning_requested
) {
843 pr_info("mem auto-init: CONFIG_PAGE_POISONING is on, "
844 "will take precedence over init_on_alloc and init_on_free\n");
845 _init_on_alloc_enabled_early
= false;
846 _init_on_free_enabled_early
= false;
849 if (_init_on_alloc_enabled_early
)
850 static_branch_enable(&init_on_alloc
);
852 static_branch_disable(&init_on_alloc
);
854 if (_init_on_free_enabled_early
)
855 static_branch_enable(&init_on_free
);
857 static_branch_disable(&init_on_free
);
859 #ifdef CONFIG_DEBUG_PAGEALLOC
860 if (!debug_pagealloc_enabled())
863 static_branch_enable(&_debug_pagealloc_enabled
);
865 if (!debug_guardpage_minorder())
868 static_branch_enable(&_debug_guardpage_enabled
);
872 static inline void set_buddy_order(struct page
*page
, unsigned int order
)
874 set_page_private(page
, order
);
875 __SetPageBuddy(page
);
879 * This function checks whether a page is free && is the buddy
880 * we can coalesce a page and its buddy if
881 * (a) the buddy is not in a hole (check before calling!) &&
882 * (b) the buddy is in the buddy system &&
883 * (c) a page and its buddy have the same order &&
884 * (d) a page and its buddy are in the same zone.
886 * For recording whether a page is in the buddy system, we set PageBuddy.
887 * Setting, clearing, and testing PageBuddy is serialized by zone->lock.
889 * For recording page's order, we use page_private(page).
891 static inline bool page_is_buddy(struct page
*page
, struct page
*buddy
,
894 if (!page_is_guard(buddy
) && !PageBuddy(buddy
))
897 if (buddy_order(buddy
) != order
)
901 * zone check is done late to avoid uselessly calculating
902 * zone/node ids for pages that could never merge.
904 if (page_zone_id(page
) != page_zone_id(buddy
))
907 VM_BUG_ON_PAGE(page_count(buddy
) != 0, buddy
);
912 #ifdef CONFIG_COMPACTION
913 static inline struct capture_control
*task_capc(struct zone
*zone
)
915 struct capture_control
*capc
= current
->capture_control
;
917 return unlikely(capc
) &&
918 !(current
->flags
& PF_KTHREAD
) &&
920 capc
->cc
->zone
== zone
? capc
: NULL
;
924 compaction_capture(struct capture_control
*capc
, struct page
*page
,
925 int order
, int migratetype
)
927 if (!capc
|| order
!= capc
->cc
->order
)
930 /* Do not accidentally pollute CMA or isolated regions*/
931 if (is_migrate_cma(migratetype
) ||
932 is_migrate_isolate(migratetype
))
936 * Do not let lower order allocations pollute a movable pageblock.
937 * This might let an unmovable request use a reclaimable pageblock
938 * and vice-versa but no more than normal fallback logic which can
939 * have trouble finding a high-order free page.
941 if (order
< pageblock_order
&& migratetype
== MIGRATE_MOVABLE
)
949 static inline struct capture_control
*task_capc(struct zone
*zone
)
955 compaction_capture(struct capture_control
*capc
, struct page
*page
,
956 int order
, int migratetype
)
960 #endif /* CONFIG_COMPACTION */
962 /* Used for pages not on another list */
963 static inline void add_to_free_list(struct page
*page
, struct zone
*zone
,
964 unsigned int order
, int migratetype
)
966 struct free_area
*area
= &zone
->free_area
[order
];
968 list_add(&page
->lru
, &area
->free_list
[migratetype
]);
972 /* Used for pages not on another list */
973 static inline void add_to_free_list_tail(struct page
*page
, struct zone
*zone
,
974 unsigned int order
, int migratetype
)
976 struct free_area
*area
= &zone
->free_area
[order
];
978 list_add_tail(&page
->lru
, &area
->free_list
[migratetype
]);
983 * Used for pages which are on another list. Move the pages to the tail
984 * of the list - so the moved pages won't immediately be considered for
985 * allocation again (e.g., optimization for memory onlining).
987 static inline void move_to_free_list(struct page
*page
, struct zone
*zone
,
988 unsigned int order
, int migratetype
)
990 struct free_area
*area
= &zone
->free_area
[order
];
992 list_move_tail(&page
->lru
, &area
->free_list
[migratetype
]);
995 static inline void del_page_from_free_list(struct page
*page
, struct zone
*zone
,
998 /* clear reported state and update reported page count */
999 if (page_reported(page
))
1000 __ClearPageReported(page
);
1002 list_del(&page
->lru
);
1003 __ClearPageBuddy(page
);
1004 set_page_private(page
, 0);
1005 zone
->free_area
[order
].nr_free
--;
1009 * If this is not the largest possible page, check if the buddy
1010 * of the next-highest order is free. If it is, it's possible
1011 * that pages are being freed that will coalesce soon. In case,
1012 * that is happening, add the free page to the tail of the list
1013 * so it's less likely to be used soon and more likely to be merged
1014 * as a higher order page
1017 buddy_merge_likely(unsigned long pfn
, unsigned long buddy_pfn
,
1018 struct page
*page
, unsigned int order
)
1020 struct page
*higher_page
, *higher_buddy
;
1021 unsigned long combined_pfn
;
1023 if (order
>= MAX_ORDER
- 2)
1026 combined_pfn
= buddy_pfn
& pfn
;
1027 higher_page
= page
+ (combined_pfn
- pfn
);
1028 buddy_pfn
= __find_buddy_pfn(combined_pfn
, order
+ 1);
1029 higher_buddy
= higher_page
+ (buddy_pfn
- combined_pfn
);
1031 return page_is_buddy(higher_page
, higher_buddy
, order
+ 1);
1035 * Freeing function for a buddy system allocator.
1037 * The concept of a buddy system is to maintain direct-mapped table
1038 * (containing bit values) for memory blocks of various "orders".
1039 * The bottom level table contains the map for the smallest allocatable
1040 * units of memory (here, pages), and each level above it describes
1041 * pairs of units from the levels below, hence, "buddies".
1042 * At a high level, all that happens here is marking the table entry
1043 * at the bottom level available, and propagating the changes upward
1044 * as necessary, plus some accounting needed to play nicely with other
1045 * parts of the VM system.
1046 * At each level, we keep a list of pages, which are heads of continuous
1047 * free pages of length of (1 << order) and marked with PageBuddy.
1048 * Page's order is recorded in page_private(page) field.
1049 * So when we are allocating or freeing one, we can derive the state of the
1050 * other. That is, if we allocate a small block, and both were
1051 * free, the remainder of the region must be split into blocks.
1052 * If a block is freed, and its buddy is also free, then this
1053 * triggers coalescing into a block of larger size.
1058 static inline void __free_one_page(struct page
*page
,
1060 struct zone
*zone
, unsigned int order
,
1061 int migratetype
, fpi_t fpi_flags
)
1063 struct capture_control
*capc
= task_capc(zone
);
1064 unsigned long buddy_pfn
;
1065 unsigned long combined_pfn
;
1066 unsigned int max_order
;
1070 max_order
= min_t(unsigned int, MAX_ORDER
- 1, pageblock_order
);
1072 VM_BUG_ON(!zone_is_initialized(zone
));
1073 VM_BUG_ON_PAGE(page
->flags
& PAGE_FLAGS_CHECK_AT_PREP
, page
);
1075 VM_BUG_ON(migratetype
== -1);
1076 if (likely(!is_migrate_isolate(migratetype
)))
1077 __mod_zone_freepage_state(zone
, 1 << order
, migratetype
);
1079 VM_BUG_ON_PAGE(pfn
& ((1 << order
) - 1), page
);
1080 VM_BUG_ON_PAGE(bad_range(zone
, page
), page
);
1083 while (order
< max_order
) {
1084 if (compaction_capture(capc
, page
, order
, migratetype
)) {
1085 __mod_zone_freepage_state(zone
, -(1 << order
),
1089 buddy_pfn
= __find_buddy_pfn(pfn
, order
);
1090 buddy
= page
+ (buddy_pfn
- pfn
);
1092 if (!page_is_buddy(page
, buddy
, order
))
1095 * Our buddy is free or it is CONFIG_DEBUG_PAGEALLOC guard page,
1096 * merge with it and move up one order.
1098 if (page_is_guard(buddy
))
1099 clear_page_guard(zone
, buddy
, order
, migratetype
);
1101 del_page_from_free_list(buddy
, zone
, order
);
1102 combined_pfn
= buddy_pfn
& pfn
;
1103 page
= page
+ (combined_pfn
- pfn
);
1107 if (order
< MAX_ORDER
- 1) {
1108 /* If we are here, it means order is >= pageblock_order.
1109 * We want to prevent merge between freepages on isolate
1110 * pageblock and normal pageblock. Without this, pageblock
1111 * isolation could cause incorrect freepage or CMA accounting.
1113 * We don't want to hit this code for the more frequent
1114 * low-order merging.
1116 if (unlikely(has_isolate_pageblock(zone
))) {
1119 buddy_pfn
= __find_buddy_pfn(pfn
, order
);
1120 buddy
= page
+ (buddy_pfn
- pfn
);
1121 buddy_mt
= get_pageblock_migratetype(buddy
);
1123 if (migratetype
!= buddy_mt
1124 && (is_migrate_isolate(migratetype
) ||
1125 is_migrate_isolate(buddy_mt
)))
1128 max_order
= order
+ 1;
1129 goto continue_merging
;
1133 set_buddy_order(page
, order
);
1135 if (fpi_flags
& FPI_TO_TAIL
)
1137 else if (is_shuffle_order(order
))
1138 to_tail
= shuffle_pick_tail();
1140 to_tail
= buddy_merge_likely(pfn
, buddy_pfn
, page
, order
);
1143 add_to_free_list_tail(page
, zone
, order
, migratetype
);
1145 add_to_free_list(page
, zone
, order
, migratetype
);
1147 /* Notify page reporting subsystem of freed page */
1148 if (!(fpi_flags
& FPI_SKIP_REPORT_NOTIFY
))
1149 page_reporting_notify_free(order
);
1153 * A bad page could be due to a number of fields. Instead of multiple branches,
1154 * try and check multiple fields with one check. The caller must do a detailed
1155 * check if necessary.
1157 static inline bool page_expected_state(struct page
*page
,
1158 unsigned long check_flags
)
1160 if (unlikely(atomic_read(&page
->_mapcount
) != -1))
1163 if (unlikely((unsigned long)page
->mapping
|
1164 page_ref_count(page
) |
1168 (page
->flags
& check_flags
)))
1174 static const char *page_bad_reason(struct page
*page
, unsigned long flags
)
1176 const char *bad_reason
= NULL
;
1178 if (unlikely(atomic_read(&page
->_mapcount
) != -1))
1179 bad_reason
= "nonzero mapcount";
1180 if (unlikely(page
->mapping
!= NULL
))
1181 bad_reason
= "non-NULL mapping";
1182 if (unlikely(page_ref_count(page
) != 0))
1183 bad_reason
= "nonzero _refcount";
1184 if (unlikely(page
->flags
& flags
)) {
1185 if (flags
== PAGE_FLAGS_CHECK_AT_PREP
)
1186 bad_reason
= "PAGE_FLAGS_CHECK_AT_PREP flag(s) set";
1188 bad_reason
= "PAGE_FLAGS_CHECK_AT_FREE flag(s) set";
1191 if (unlikely(page
->memcg_data
))
1192 bad_reason
= "page still charged to cgroup";
1197 static void check_free_page_bad(struct page
*page
)
1200 page_bad_reason(page
, PAGE_FLAGS_CHECK_AT_FREE
));
1203 static inline int check_free_page(struct page
*page
)
1205 if (likely(page_expected_state(page
, PAGE_FLAGS_CHECK_AT_FREE
)))
1208 /* Something has gone sideways, find it */
1209 check_free_page_bad(page
);
1213 static int free_tail_pages_check(struct page
*head_page
, struct page
*page
)
1218 * We rely page->lru.next never has bit 0 set, unless the page
1219 * is PageTail(). Let's make sure that's true even for poisoned ->lru.
1221 BUILD_BUG_ON((unsigned long)LIST_POISON1
& 1);
1223 if (!IS_ENABLED(CONFIG_DEBUG_VM
)) {
1227 switch (page
- head_page
) {
1229 /* the first tail page: ->mapping may be compound_mapcount() */
1230 if (unlikely(compound_mapcount(page
))) {
1231 bad_page(page
, "nonzero compound_mapcount");
1237 * the second tail page: ->mapping is
1238 * deferred_list.next -- ignore value.
1242 if (page
->mapping
!= TAIL_MAPPING
) {
1243 bad_page(page
, "corrupted mapping in tail page");
1248 if (unlikely(!PageTail(page
))) {
1249 bad_page(page
, "PageTail not set");
1252 if (unlikely(compound_head(page
) != head_page
)) {
1253 bad_page(page
, "compound_head not consistent");
1258 page
->mapping
= NULL
;
1259 clear_compound_head(page
);
1263 static void kernel_init_free_pages(struct page
*page
, int numpages
, bool zero_tags
)
1268 for (i
= 0; i
< numpages
; i
++)
1269 tag_clear_highpage(page
+ i
);
1273 /* s390's use of memset() could override KASAN redzones. */
1274 kasan_disable_current();
1275 for (i
= 0; i
< numpages
; i
++) {
1276 u8 tag
= page_kasan_tag(page
+ i
);
1277 page_kasan_tag_reset(page
+ i
);
1278 clear_highpage(page
+ i
);
1279 page_kasan_tag_set(page
+ i
, tag
);
1281 kasan_enable_current();
1284 static __always_inline
bool free_pages_prepare(struct page
*page
,
1285 unsigned int order
, bool check_free
, fpi_t fpi_flags
)
1288 bool skip_kasan_poison
= should_skip_kasan_poison(page
, fpi_flags
);
1290 VM_BUG_ON_PAGE(PageTail(page
), page
);
1292 trace_mm_page_free(page
, order
);
1294 if (unlikely(PageHWPoison(page
)) && !order
) {
1296 * Do not let hwpoison pages hit pcplists/buddy
1297 * Untie memcg state and reset page's owner
1299 if (memcg_kmem_enabled() && PageMemcgKmem(page
))
1300 __memcg_kmem_uncharge_page(page
, order
);
1301 reset_page_owner(page
, order
);
1306 * Check tail pages before head page information is cleared to
1307 * avoid checking PageCompound for order-0 pages.
1309 if (unlikely(order
)) {
1310 bool compound
= PageCompound(page
);
1313 VM_BUG_ON_PAGE(compound
&& compound_order(page
) != order
, page
);
1316 ClearPageDoubleMap(page
);
1317 ClearPageHasHWPoisoned(page
);
1319 for (i
= 1; i
< (1 << order
); i
++) {
1321 bad
+= free_tail_pages_check(page
, page
+ i
);
1322 if (unlikely(check_free_page(page
+ i
))) {
1326 (page
+ i
)->flags
&= ~PAGE_FLAGS_CHECK_AT_PREP
;
1329 if (PageMappingFlags(page
))
1330 page
->mapping
= NULL
;
1331 if (memcg_kmem_enabled() && PageMemcgKmem(page
))
1332 __memcg_kmem_uncharge_page(page
, order
);
1334 bad
+= check_free_page(page
);
1338 page_cpupid_reset_last(page
);
1339 page
->flags
&= ~PAGE_FLAGS_CHECK_AT_PREP
;
1340 reset_page_owner(page
, order
);
1342 if (!PageHighMem(page
)) {
1343 debug_check_no_locks_freed(page_address(page
),
1344 PAGE_SIZE
<< order
);
1345 debug_check_no_obj_freed(page_address(page
),
1346 PAGE_SIZE
<< order
);
1349 kernel_poison_pages(page
, 1 << order
);
1352 * As memory initialization might be integrated into KASAN,
1353 * kasan_free_pages and kernel_init_free_pages must be
1354 * kept together to avoid discrepancies in behavior.
1356 * With hardware tag-based KASAN, memory tags must be set before the
1357 * page becomes unavailable via debug_pagealloc or arch_free_page.
1359 if (kasan_has_integrated_init()) {
1360 if (!skip_kasan_poison
)
1361 kasan_free_pages(page
, order
);
1363 bool init
= want_init_on_free();
1366 kernel_init_free_pages(page
, 1 << order
, false);
1367 if (!skip_kasan_poison
)
1368 kasan_poison_pages(page
, order
, init
);
1372 * arch_free_page() can make the page's contents inaccessible. s390
1373 * does this. So nothing which can access the page's contents should
1374 * happen after this.
1376 arch_free_page(page
, order
);
1378 debug_pagealloc_unmap_pages(page
, 1 << order
);
1383 #ifdef CONFIG_DEBUG_VM
1385 * With DEBUG_VM enabled, order-0 pages are checked immediately when being freed
1386 * to pcp lists. With debug_pagealloc also enabled, they are also rechecked when
1387 * moved from pcp lists to free lists.
1389 static bool free_pcp_prepare(struct page
*page
, unsigned int order
)
1391 return free_pages_prepare(page
, order
, true, FPI_NONE
);
1394 static bool bulkfree_pcp_prepare(struct page
*page
)
1396 if (debug_pagealloc_enabled_static())
1397 return check_free_page(page
);
1403 * With DEBUG_VM disabled, order-0 pages being freed are checked only when
1404 * moving from pcp lists to free list in order to reduce overhead. With
1405 * debug_pagealloc enabled, they are checked also immediately when being freed
1408 static bool free_pcp_prepare(struct page
*page
, unsigned int order
)
1410 if (debug_pagealloc_enabled_static())
1411 return free_pages_prepare(page
, order
, true, FPI_NONE
);
1413 return free_pages_prepare(page
, order
, false, FPI_NONE
);
1416 static bool bulkfree_pcp_prepare(struct page
*page
)
1418 return check_free_page(page
);
1420 #endif /* CONFIG_DEBUG_VM */
1422 static inline void prefetch_buddy(struct page
*page
)
1424 unsigned long pfn
= page_to_pfn(page
);
1425 unsigned long buddy_pfn
= __find_buddy_pfn(pfn
, 0);
1426 struct page
*buddy
= page
+ (buddy_pfn
- pfn
);
1432 * Frees a number of pages from the PCP lists
1433 * Assumes all pages on list are in same zone, and of same order.
1434 * count is the number of pages to free.
1436 * If the zone was previously in an "all pages pinned" state then look to
1437 * see if this freeing clears that state.
1439 * And clear the zone's pages_scanned counter, to hold off the "all pages are
1440 * pinned" detection logic.
1442 static void free_pcppages_bulk(struct zone
*zone
, int count
,
1443 struct per_cpu_pages
*pcp
)
1449 int prefetch_nr
= READ_ONCE(pcp
->batch
);
1450 bool isolated_pageblocks
;
1451 struct page
*page
, *tmp
;
1455 * Ensure proper count is passed which otherwise would stuck in the
1456 * below while (list_empty(list)) loop.
1458 count
= min(pcp
->count
, count
);
1460 struct list_head
*list
;
1463 * Remove pages from lists in a round-robin fashion. A
1464 * batch_free count is maintained that is incremented when an
1465 * empty list is encountered. This is so more pages are freed
1466 * off fuller lists instead of spinning excessively around empty
1471 if (++pindex
== NR_PCP_LISTS
)
1473 list
= &pcp
->lists
[pindex
];
1474 } while (list_empty(list
));
1476 /* This is the only non-empty list. Free them all. */
1477 if (batch_free
== NR_PCP_LISTS
)
1480 order
= pindex_to_order(pindex
);
1481 BUILD_BUG_ON(MAX_ORDER
>= (1<<NR_PCP_ORDER_WIDTH
));
1483 page
= list_last_entry(list
, struct page
, lru
);
1484 /* must delete to avoid corrupting pcp list */
1485 list_del(&page
->lru
);
1486 nr_freed
+= 1 << order
;
1487 count
-= 1 << order
;
1489 if (bulkfree_pcp_prepare(page
))
1492 /* Encode order with the migratetype */
1493 page
->index
<<= NR_PCP_ORDER_WIDTH
;
1494 page
->index
|= order
;
1496 list_add_tail(&page
->lru
, &head
);
1499 * We are going to put the page back to the global
1500 * pool, prefetch its buddy to speed up later access
1501 * under zone->lock. It is believed the overhead of
1502 * an additional test and calculating buddy_pfn here
1503 * can be offset by reduced memory latency later. To
1504 * avoid excessive prefetching due to large count, only
1505 * prefetch buddy for the first pcp->batch nr of pages.
1508 prefetch_buddy(page
);
1511 } while (count
> 0 && --batch_free
&& !list_empty(list
));
1513 pcp
->count
-= nr_freed
;
1516 * local_lock_irq held so equivalent to spin_lock_irqsave for
1517 * both PREEMPT_RT and non-PREEMPT_RT configurations.
1519 spin_lock(&zone
->lock
);
1520 isolated_pageblocks
= has_isolate_pageblock(zone
);
1523 * Use safe version since after __free_one_page(),
1524 * page->lru.next will not point to original list.
1526 list_for_each_entry_safe(page
, tmp
, &head
, lru
) {
1527 int mt
= get_pcppage_migratetype(page
);
1529 /* mt has been encoded with the order (see above) */
1530 order
= mt
& NR_PCP_ORDER_MASK
;
1531 mt
>>= NR_PCP_ORDER_WIDTH
;
1533 /* MIGRATE_ISOLATE page should not go to pcplists */
1534 VM_BUG_ON_PAGE(is_migrate_isolate(mt
), page
);
1535 /* Pageblock could have been isolated meanwhile */
1536 if (unlikely(isolated_pageblocks
))
1537 mt
= get_pageblock_migratetype(page
);
1539 __free_one_page(page
, page_to_pfn(page
), zone
, order
, mt
, FPI_NONE
);
1540 trace_mm_page_pcpu_drain(page
, order
, mt
);
1542 spin_unlock(&zone
->lock
);
1545 static void free_one_page(struct zone
*zone
,
1546 struct page
*page
, unsigned long pfn
,
1548 int migratetype
, fpi_t fpi_flags
)
1550 unsigned long flags
;
1552 spin_lock_irqsave(&zone
->lock
, flags
);
1553 if (unlikely(has_isolate_pageblock(zone
) ||
1554 is_migrate_isolate(migratetype
))) {
1555 migratetype
= get_pfnblock_migratetype(page
, pfn
);
1557 __free_one_page(page
, pfn
, zone
, order
, migratetype
, fpi_flags
);
1558 spin_unlock_irqrestore(&zone
->lock
, flags
);
1561 static void __meminit
__init_single_page(struct page
*page
, unsigned long pfn
,
1562 unsigned long zone
, int nid
)
1564 mm_zero_struct_page(page
);
1565 set_page_links(page
, zone
, nid
, pfn
);
1566 init_page_count(page
);
1567 page_mapcount_reset(page
);
1568 page_cpupid_reset_last(page
);
1569 page_kasan_tag_reset(page
);
1571 INIT_LIST_HEAD(&page
->lru
);
1572 #ifdef WANT_PAGE_VIRTUAL
1573 /* The shift won't overflow because ZONE_NORMAL is below 4G. */
1574 if (!is_highmem_idx(zone
))
1575 set_page_address(page
, __va(pfn
<< PAGE_SHIFT
));
1579 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
1580 static void __meminit
init_reserved_page(unsigned long pfn
)
1585 if (!early_page_uninitialised(pfn
))
1588 nid
= early_pfn_to_nid(pfn
);
1589 pgdat
= NODE_DATA(nid
);
1591 for (zid
= 0; zid
< MAX_NR_ZONES
; zid
++) {
1592 struct zone
*zone
= &pgdat
->node_zones
[zid
];
1594 if (pfn
>= zone
->zone_start_pfn
&& pfn
< zone_end_pfn(zone
))
1597 __init_single_page(pfn_to_page(pfn
), pfn
, zid
, nid
);
1600 static inline void init_reserved_page(unsigned long pfn
)
1603 #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
1606 * Initialised pages do not have PageReserved set. This function is
1607 * called for each range allocated by the bootmem allocator and
1608 * marks the pages PageReserved. The remaining valid pages are later
1609 * sent to the buddy page allocator.
1611 void __meminit
reserve_bootmem_region(phys_addr_t start
, phys_addr_t end
)
1613 unsigned long start_pfn
= PFN_DOWN(start
);
1614 unsigned long end_pfn
= PFN_UP(end
);
1616 for (; start_pfn
< end_pfn
; start_pfn
++) {
1617 if (pfn_valid(start_pfn
)) {
1618 struct page
*page
= pfn_to_page(start_pfn
);
1620 init_reserved_page(start_pfn
);
1622 /* Avoid false-positive PageTail() */
1623 INIT_LIST_HEAD(&page
->lru
);
1626 * no need for atomic set_bit because the struct
1627 * page is not visible yet so nobody should
1630 __SetPageReserved(page
);
1635 static void __free_pages_ok(struct page
*page
, unsigned int order
,
1638 unsigned long flags
;
1640 unsigned long pfn
= page_to_pfn(page
);
1641 struct zone
*zone
= page_zone(page
);
1643 if (!free_pages_prepare(page
, order
, true, fpi_flags
))
1646 migratetype
= get_pfnblock_migratetype(page
, pfn
);
1648 spin_lock_irqsave(&zone
->lock
, flags
);
1649 if (unlikely(has_isolate_pageblock(zone
) ||
1650 is_migrate_isolate(migratetype
))) {
1651 migratetype
= get_pfnblock_migratetype(page
, pfn
);
1653 __free_one_page(page
, pfn
, zone
, order
, migratetype
, fpi_flags
);
1654 spin_unlock_irqrestore(&zone
->lock
, flags
);
1656 __count_vm_events(PGFREE
, 1 << order
);
1659 void __free_pages_core(struct page
*page
, unsigned int order
)
1661 unsigned int nr_pages
= 1 << order
;
1662 struct page
*p
= page
;
1666 * When initializing the memmap, __init_single_page() sets the refcount
1667 * of all pages to 1 ("allocated"/"not free"). We have to set the
1668 * refcount of all involved pages to 0.
1671 for (loop
= 0; loop
< (nr_pages
- 1); loop
++, p
++) {
1673 __ClearPageReserved(p
);
1674 set_page_count(p
, 0);
1676 __ClearPageReserved(p
);
1677 set_page_count(p
, 0);
1679 atomic_long_add(nr_pages
, &page_zone(page
)->managed_pages
);
1682 * Bypass PCP and place fresh pages right to the tail, primarily
1683 * relevant for memory onlining.
1685 __free_pages_ok(page
, order
, FPI_TO_TAIL
| FPI_SKIP_KASAN_POISON
);
1691 * During memory init memblocks map pfns to nids. The search is expensive and
1692 * this caches recent lookups. The implementation of __early_pfn_to_nid
1693 * treats start/end as pfns.
1695 struct mminit_pfnnid_cache
{
1696 unsigned long last_start
;
1697 unsigned long last_end
;
1701 static struct mminit_pfnnid_cache early_pfnnid_cache __meminitdata
;
1704 * Required by SPARSEMEM. Given a PFN, return what node the PFN is on.
1706 static int __meminit
__early_pfn_to_nid(unsigned long pfn
,
1707 struct mminit_pfnnid_cache
*state
)
1709 unsigned long start_pfn
, end_pfn
;
1712 if (state
->last_start
<= pfn
&& pfn
< state
->last_end
)
1713 return state
->last_nid
;
1715 nid
= memblock_search_pfn_nid(pfn
, &start_pfn
, &end_pfn
);
1716 if (nid
!= NUMA_NO_NODE
) {
1717 state
->last_start
= start_pfn
;
1718 state
->last_end
= end_pfn
;
1719 state
->last_nid
= nid
;
1725 int __meminit
early_pfn_to_nid(unsigned long pfn
)
1727 static DEFINE_SPINLOCK(early_pfn_lock
);
1730 spin_lock(&early_pfn_lock
);
1731 nid
= __early_pfn_to_nid(pfn
, &early_pfnnid_cache
);
1733 nid
= first_online_node
;
1734 spin_unlock(&early_pfn_lock
);
1738 #endif /* CONFIG_NUMA */
1740 void __init
memblock_free_pages(struct page
*page
, unsigned long pfn
,
1743 if (early_page_uninitialised(pfn
))
1745 __free_pages_core(page
, order
);
1749 * Check that the whole (or subset of) a pageblock given by the interval of
1750 * [start_pfn, end_pfn) is valid and within the same zone, before scanning it
1751 * with the migration of free compaction scanner.
1753 * Return struct page pointer of start_pfn, or NULL if checks were not passed.
1755 * It's possible on some configurations to have a setup like node0 node1 node0
1756 * i.e. it's possible that all pages within a zones range of pages do not
1757 * belong to a single zone. We assume that a border between node0 and node1
1758 * can occur within a single pageblock, but not a node0 node1 node0
1759 * interleaving within a single pageblock. It is therefore sufficient to check
1760 * the first and last page of a pageblock and avoid checking each individual
1761 * page in a pageblock.
1763 struct page
*__pageblock_pfn_to_page(unsigned long start_pfn
,
1764 unsigned long end_pfn
, struct zone
*zone
)
1766 struct page
*start_page
;
1767 struct page
*end_page
;
1769 /* end_pfn is one past the range we are checking */
1772 if (!pfn_valid(start_pfn
) || !pfn_valid(end_pfn
))
1775 start_page
= pfn_to_online_page(start_pfn
);
1779 if (page_zone(start_page
) != zone
)
1782 end_page
= pfn_to_page(end_pfn
);
1784 /* This gives a shorter code than deriving page_zone(end_page) */
1785 if (page_zone_id(start_page
) != page_zone_id(end_page
))
1791 void set_zone_contiguous(struct zone
*zone
)
1793 unsigned long block_start_pfn
= zone
->zone_start_pfn
;
1794 unsigned long block_end_pfn
;
1796 block_end_pfn
= ALIGN(block_start_pfn
+ 1, pageblock_nr_pages
);
1797 for (; block_start_pfn
< zone_end_pfn(zone
);
1798 block_start_pfn
= block_end_pfn
,
1799 block_end_pfn
+= pageblock_nr_pages
) {
1801 block_end_pfn
= min(block_end_pfn
, zone_end_pfn(zone
));
1803 if (!__pageblock_pfn_to_page(block_start_pfn
,
1804 block_end_pfn
, zone
))
1809 /* We confirm that there is no hole */
1810 zone
->contiguous
= true;
1813 void clear_zone_contiguous(struct zone
*zone
)
1815 zone
->contiguous
= false;
1818 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
1819 static void __init
deferred_free_range(unsigned long pfn
,
1820 unsigned long nr_pages
)
1828 page
= pfn_to_page(pfn
);
1830 /* Free a large naturally-aligned chunk if possible */
1831 if (nr_pages
== pageblock_nr_pages
&&
1832 (pfn
& (pageblock_nr_pages
- 1)) == 0) {
1833 set_pageblock_migratetype(page
, MIGRATE_MOVABLE
);
1834 __free_pages_core(page
, pageblock_order
);
1838 for (i
= 0; i
< nr_pages
; i
++, page
++, pfn
++) {
1839 if ((pfn
& (pageblock_nr_pages
- 1)) == 0)
1840 set_pageblock_migratetype(page
, MIGRATE_MOVABLE
);
1841 __free_pages_core(page
, 0);
1845 /* Completion tracking for deferred_init_memmap() threads */
1846 static atomic_t pgdat_init_n_undone __initdata
;
1847 static __initdata
DECLARE_COMPLETION(pgdat_init_all_done_comp
);
1849 static inline void __init
pgdat_init_report_one_done(void)
1851 if (atomic_dec_and_test(&pgdat_init_n_undone
))
1852 complete(&pgdat_init_all_done_comp
);
1856 * Returns true if page needs to be initialized or freed to buddy allocator.
1858 * First we check if pfn is valid on architectures where it is possible to have
1859 * holes within pageblock_nr_pages. On systems where it is not possible, this
1860 * function is optimized out.
1862 * Then, we check if a current large page is valid by only checking the validity
1865 static inline bool __init
deferred_pfn_valid(unsigned long pfn
)
1867 if (!(pfn
& (pageblock_nr_pages
- 1)) && !pfn_valid(pfn
))
1873 * Free pages to buddy allocator. Try to free aligned pages in
1874 * pageblock_nr_pages sizes.
1876 static void __init
deferred_free_pages(unsigned long pfn
,
1877 unsigned long end_pfn
)
1879 unsigned long nr_pgmask
= pageblock_nr_pages
- 1;
1880 unsigned long nr_free
= 0;
1882 for (; pfn
< end_pfn
; pfn
++) {
1883 if (!deferred_pfn_valid(pfn
)) {
1884 deferred_free_range(pfn
- nr_free
, nr_free
);
1886 } else if (!(pfn
& nr_pgmask
)) {
1887 deferred_free_range(pfn
- nr_free
, nr_free
);
1893 /* Free the last block of pages to allocator */
1894 deferred_free_range(pfn
- nr_free
, nr_free
);
1898 * Initialize struct pages. We minimize pfn page lookups and scheduler checks
1899 * by performing it only once every pageblock_nr_pages.
1900 * Return number of pages initialized.
1902 static unsigned long __init
deferred_init_pages(struct zone
*zone
,
1904 unsigned long end_pfn
)
1906 unsigned long nr_pgmask
= pageblock_nr_pages
- 1;
1907 int nid
= zone_to_nid(zone
);
1908 unsigned long nr_pages
= 0;
1909 int zid
= zone_idx(zone
);
1910 struct page
*page
= NULL
;
1912 for (; pfn
< end_pfn
; pfn
++) {
1913 if (!deferred_pfn_valid(pfn
)) {
1916 } else if (!page
|| !(pfn
& nr_pgmask
)) {
1917 page
= pfn_to_page(pfn
);
1921 __init_single_page(page
, pfn
, zid
, nid
);
1928 * This function is meant to pre-load the iterator for the zone init.
1929 * Specifically it walks through the ranges until we are caught up to the
1930 * first_init_pfn value and exits there. If we never encounter the value we
1931 * return false indicating there are no valid ranges left.
1934 deferred_init_mem_pfn_range_in_zone(u64
*i
, struct zone
*zone
,
1935 unsigned long *spfn
, unsigned long *epfn
,
1936 unsigned long first_init_pfn
)
1941 * Start out by walking through the ranges in this zone that have
1942 * already been initialized. We don't need to do anything with them
1943 * so we just need to flush them out of the system.
1945 for_each_free_mem_pfn_range_in_zone(j
, zone
, spfn
, epfn
) {
1946 if (*epfn
<= first_init_pfn
)
1948 if (*spfn
< first_init_pfn
)
1949 *spfn
= first_init_pfn
;
1958 * Initialize and free pages. We do it in two loops: first we initialize
1959 * struct page, then free to buddy allocator, because while we are
1960 * freeing pages we can access pages that are ahead (computing buddy
1961 * page in __free_one_page()).
1963 * In order to try and keep some memory in the cache we have the loop
1964 * broken along max page order boundaries. This way we will not cause
1965 * any issues with the buddy page computation.
1967 static unsigned long __init
1968 deferred_init_maxorder(u64
*i
, struct zone
*zone
, unsigned long *start_pfn
,
1969 unsigned long *end_pfn
)
1971 unsigned long mo_pfn
= ALIGN(*start_pfn
+ 1, MAX_ORDER_NR_PAGES
);
1972 unsigned long spfn
= *start_pfn
, epfn
= *end_pfn
;
1973 unsigned long nr_pages
= 0;
1976 /* First we loop through and initialize the page values */
1977 for_each_free_mem_pfn_range_in_zone_from(j
, zone
, start_pfn
, end_pfn
) {
1980 if (mo_pfn
<= *start_pfn
)
1983 t
= min(mo_pfn
, *end_pfn
);
1984 nr_pages
+= deferred_init_pages(zone
, *start_pfn
, t
);
1986 if (mo_pfn
< *end_pfn
) {
1987 *start_pfn
= mo_pfn
;
1992 /* Reset values and now loop through freeing pages as needed */
1995 for_each_free_mem_pfn_range_in_zone_from(j
, zone
, &spfn
, &epfn
) {
2001 t
= min(mo_pfn
, epfn
);
2002 deferred_free_pages(spfn
, t
);
2012 deferred_init_memmap_chunk(unsigned long start_pfn
, unsigned long end_pfn
,
2015 unsigned long spfn
, epfn
;
2016 struct zone
*zone
= arg
;
2019 deferred_init_mem_pfn_range_in_zone(&i
, zone
, &spfn
, &epfn
, start_pfn
);
2022 * Initialize and free pages in MAX_ORDER sized increments so that we
2023 * can avoid introducing any issues with the buddy allocator.
2025 while (spfn
< end_pfn
) {
2026 deferred_init_maxorder(&i
, zone
, &spfn
, &epfn
);
2031 /* An arch may override for more concurrency. */
2033 deferred_page_init_max_threads(const struct cpumask
*node_cpumask
)
2038 /* Initialise remaining memory on a node */
2039 static int __init
deferred_init_memmap(void *data
)
2041 pg_data_t
*pgdat
= data
;
2042 const struct cpumask
*cpumask
= cpumask_of_node(pgdat
->node_id
);
2043 unsigned long spfn
= 0, epfn
= 0;
2044 unsigned long first_init_pfn
, flags
;
2045 unsigned long start
= jiffies
;
2047 int zid
, max_threads
;
2050 /* Bind memory initialisation thread to a local node if possible */
2051 if (!cpumask_empty(cpumask
))
2052 set_cpus_allowed_ptr(current
, cpumask
);
2054 pgdat_resize_lock(pgdat
, &flags
);
2055 first_init_pfn
= pgdat
->first_deferred_pfn
;
2056 if (first_init_pfn
== ULONG_MAX
) {
2057 pgdat_resize_unlock(pgdat
, &flags
);
2058 pgdat_init_report_one_done();
2062 /* Sanity check boundaries */
2063 BUG_ON(pgdat
->first_deferred_pfn
< pgdat
->node_start_pfn
);
2064 BUG_ON(pgdat
->first_deferred_pfn
> pgdat_end_pfn(pgdat
));
2065 pgdat
->first_deferred_pfn
= ULONG_MAX
;
2068 * Once we unlock here, the zone cannot be grown anymore, thus if an
2069 * interrupt thread must allocate this early in boot, zone must be
2070 * pre-grown prior to start of deferred page initialization.
2072 pgdat_resize_unlock(pgdat
, &flags
);
2074 /* Only the highest zone is deferred so find it */
2075 for (zid
= 0; zid
< MAX_NR_ZONES
; zid
++) {
2076 zone
= pgdat
->node_zones
+ zid
;
2077 if (first_init_pfn
< zone_end_pfn(zone
))
2081 /* If the zone is empty somebody else may have cleared out the zone */
2082 if (!deferred_init_mem_pfn_range_in_zone(&i
, zone
, &spfn
, &epfn
,
2086 max_threads
= deferred_page_init_max_threads(cpumask
);
2088 while (spfn
< epfn
) {
2089 unsigned long epfn_align
= ALIGN(epfn
, PAGES_PER_SECTION
);
2090 struct padata_mt_job job
= {
2091 .thread_fn
= deferred_init_memmap_chunk
,
2094 .size
= epfn_align
- spfn
,
2095 .align
= PAGES_PER_SECTION
,
2096 .min_chunk
= PAGES_PER_SECTION
,
2097 .max_threads
= max_threads
,
2100 padata_do_multithreaded(&job
);
2101 deferred_init_mem_pfn_range_in_zone(&i
, zone
, &spfn
, &epfn
,
2105 /* Sanity check that the next zone really is unpopulated */
2106 WARN_ON(++zid
< MAX_NR_ZONES
&& populated_zone(++zone
));
2108 pr_info("node %d deferred pages initialised in %ums\n",
2109 pgdat
->node_id
, jiffies_to_msecs(jiffies
- start
));
2111 pgdat_init_report_one_done();
2116 * If this zone has deferred pages, try to grow it by initializing enough
2117 * deferred pages to satisfy the allocation specified by order, rounded up to
2118 * the nearest PAGES_PER_SECTION boundary. So we're adding memory in increments
2119 * of SECTION_SIZE bytes by initializing struct pages in increments of
2120 * PAGES_PER_SECTION * sizeof(struct page) bytes.
2122 * Return true when zone was grown, otherwise return false. We return true even
2123 * when we grow less than requested, to let the caller decide if there are
2124 * enough pages to satisfy the allocation.
2126 * Note: We use noinline because this function is needed only during boot, and
2127 * it is called from a __ref function _deferred_grow_zone. This way we are
2128 * making sure that it is not inlined into permanent text section.
2130 static noinline
bool __init
2131 deferred_grow_zone(struct zone
*zone
, unsigned int order
)
2133 unsigned long nr_pages_needed
= ALIGN(1 << order
, PAGES_PER_SECTION
);
2134 pg_data_t
*pgdat
= zone
->zone_pgdat
;
2135 unsigned long first_deferred_pfn
= pgdat
->first_deferred_pfn
;
2136 unsigned long spfn
, epfn
, flags
;
2137 unsigned long nr_pages
= 0;
2140 /* Only the last zone may have deferred pages */
2141 if (zone_end_pfn(zone
) != pgdat_end_pfn(pgdat
))
2144 pgdat_resize_lock(pgdat
, &flags
);
2147 * If someone grew this zone while we were waiting for spinlock, return
2148 * true, as there might be enough pages already.
2150 if (first_deferred_pfn
!= pgdat
->first_deferred_pfn
) {
2151 pgdat_resize_unlock(pgdat
, &flags
);
2155 /* If the zone is empty somebody else may have cleared out the zone */
2156 if (!deferred_init_mem_pfn_range_in_zone(&i
, zone
, &spfn
, &epfn
,
2157 first_deferred_pfn
)) {
2158 pgdat
->first_deferred_pfn
= ULONG_MAX
;
2159 pgdat_resize_unlock(pgdat
, &flags
);
2160 /* Retry only once. */
2161 return first_deferred_pfn
!= ULONG_MAX
;
2165 * Initialize and free pages in MAX_ORDER sized increments so
2166 * that we can avoid introducing any issues with the buddy
2169 while (spfn
< epfn
) {
2170 /* update our first deferred PFN for this section */
2171 first_deferred_pfn
= spfn
;
2173 nr_pages
+= deferred_init_maxorder(&i
, zone
, &spfn
, &epfn
);
2174 touch_nmi_watchdog();
2176 /* We should only stop along section boundaries */
2177 if ((first_deferred_pfn
^ spfn
) < PAGES_PER_SECTION
)
2180 /* If our quota has been met we can stop here */
2181 if (nr_pages
>= nr_pages_needed
)
2185 pgdat
->first_deferred_pfn
= spfn
;
2186 pgdat_resize_unlock(pgdat
, &flags
);
2188 return nr_pages
> 0;
2192 * deferred_grow_zone() is __init, but it is called from
2193 * get_page_from_freelist() during early boot until deferred_pages permanently
2194 * disables this call. This is why we have refdata wrapper to avoid warning,
2195 * and to ensure that the function body gets unloaded.
2198 _deferred_grow_zone(struct zone
*zone
, unsigned int order
)
2200 return deferred_grow_zone(zone
, order
);
2203 #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
2205 void __init
page_alloc_init_late(void)
2210 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
2212 /* There will be num_node_state(N_MEMORY) threads */
2213 atomic_set(&pgdat_init_n_undone
, num_node_state(N_MEMORY
));
2214 for_each_node_state(nid
, N_MEMORY
) {
2215 kthread_run(deferred_init_memmap
, NODE_DATA(nid
), "pgdatinit%d", nid
);
2218 /* Block until all are initialised */
2219 wait_for_completion(&pgdat_init_all_done_comp
);
2222 * We initialized the rest of the deferred pages. Permanently disable
2223 * on-demand struct page initialization.
2225 static_branch_disable(&deferred_pages
);
2227 /* Reinit limits that are based on free pages after the kernel is up */
2228 files_maxfiles_init();
2233 /* Discard memblock private memory */
2236 for_each_node_state(nid
, N_MEMORY
)
2237 shuffle_free_memory(NODE_DATA(nid
));
2239 for_each_populated_zone(zone
)
2240 set_zone_contiguous(zone
);
2244 /* Free whole pageblock and set its migration type to MIGRATE_CMA. */
2245 void __init
init_cma_reserved_pageblock(struct page
*page
)
2247 unsigned i
= pageblock_nr_pages
;
2248 struct page
*p
= page
;
2251 __ClearPageReserved(p
);
2252 set_page_count(p
, 0);
2255 set_pageblock_migratetype(page
, MIGRATE_CMA
);
2257 if (pageblock_order
>= MAX_ORDER
) {
2258 i
= pageblock_nr_pages
;
2261 set_page_refcounted(p
);
2262 __free_pages(p
, MAX_ORDER
- 1);
2263 p
+= MAX_ORDER_NR_PAGES
;
2264 } while (i
-= MAX_ORDER_NR_PAGES
);
2266 set_page_refcounted(page
);
2267 __free_pages(page
, pageblock_order
);
2270 adjust_managed_page_count(page
, pageblock_nr_pages
);
2271 page_zone(page
)->cma_pages
+= pageblock_nr_pages
;
2276 * The order of subdivision here is critical for the IO subsystem.
2277 * Please do not alter this order without good reasons and regression
2278 * testing. Specifically, as large blocks of memory are subdivided,
2279 * the order in which smaller blocks are delivered depends on the order
2280 * they're subdivided in this function. This is the primary factor
2281 * influencing the order in which pages are delivered to the IO
2282 * subsystem according to empirical testing, and this is also justified
2283 * by considering the behavior of a buddy system containing a single
2284 * large block of memory acted on by a series of small allocations.
2285 * This behavior is a critical factor in sglist merging's success.
2289 static inline void expand(struct zone
*zone
, struct page
*page
,
2290 int low
, int high
, int migratetype
)
2292 unsigned long size
= 1 << high
;
2294 while (high
> low
) {
2297 VM_BUG_ON_PAGE(bad_range(zone
, &page
[size
]), &page
[size
]);
2300 * Mark as guard pages (or page), that will allow to
2301 * merge back to allocator when buddy will be freed.
2302 * Corresponding page table entries will not be touched,
2303 * pages will stay not present in virtual address space
2305 if (set_page_guard(zone
, &page
[size
], high
, migratetype
))
2308 add_to_free_list(&page
[size
], zone
, high
, migratetype
);
2309 set_buddy_order(&page
[size
], high
);
2313 static void check_new_page_bad(struct page
*page
)
2315 if (unlikely(page
->flags
& __PG_HWPOISON
)) {
2316 /* Don't complain about hwpoisoned pages */
2317 page_mapcount_reset(page
); /* remove PageBuddy */
2322 page_bad_reason(page
, PAGE_FLAGS_CHECK_AT_PREP
));
2326 * This page is about to be returned from the page allocator
2328 static inline int check_new_page(struct page
*page
)
2330 if (likely(page_expected_state(page
,
2331 PAGE_FLAGS_CHECK_AT_PREP
|__PG_HWPOISON
)))
2334 check_new_page_bad(page
);
2338 #ifdef CONFIG_DEBUG_VM
2340 * With DEBUG_VM enabled, order-0 pages are checked for expected state when
2341 * being allocated from pcp lists. With debug_pagealloc also enabled, they are
2342 * also checked when pcp lists are refilled from the free lists.
2344 static inline bool check_pcp_refill(struct page
*page
)
2346 if (debug_pagealloc_enabled_static())
2347 return check_new_page(page
);
2352 static inline bool check_new_pcp(struct page
*page
)
2354 return check_new_page(page
);
2358 * With DEBUG_VM disabled, free order-0 pages are checked for expected state
2359 * when pcp lists are being refilled from the free lists. With debug_pagealloc
2360 * enabled, they are also checked when being allocated from the pcp lists.
2362 static inline bool check_pcp_refill(struct page
*page
)
2364 return check_new_page(page
);
2366 static inline bool check_new_pcp(struct page
*page
)
2368 if (debug_pagealloc_enabled_static())
2369 return check_new_page(page
);
2373 #endif /* CONFIG_DEBUG_VM */
2375 static bool check_new_pages(struct page
*page
, unsigned int order
)
2378 for (i
= 0; i
< (1 << order
); i
++) {
2379 struct page
*p
= page
+ i
;
2381 if (unlikely(check_new_page(p
)))
2388 inline void post_alloc_hook(struct page
*page
, unsigned int order
,
2391 set_page_private(page
, 0);
2392 set_page_refcounted(page
);
2394 arch_alloc_page(page
, order
);
2395 debug_pagealloc_map_pages(page
, 1 << order
);
2398 * Page unpoisoning must happen before memory initialization.
2399 * Otherwise, the poison pattern will be overwritten for __GFP_ZERO
2400 * allocations and the page unpoisoning code will complain.
2402 kernel_unpoison_pages(page
, 1 << order
);
2405 * As memory initialization might be integrated into KASAN,
2406 * kasan_alloc_pages and kernel_init_free_pages must be
2407 * kept together to avoid discrepancies in behavior.
2409 if (kasan_has_integrated_init()) {
2410 kasan_alloc_pages(page
, order
, gfp_flags
);
2412 bool init
= !want_init_on_free() && want_init_on_alloc(gfp_flags
);
2414 kasan_unpoison_pages(page
, order
, init
);
2416 kernel_init_free_pages(page
, 1 << order
,
2417 gfp_flags
& __GFP_ZEROTAGS
);
2420 set_page_owner(page
, order
, gfp_flags
);
2423 static void prep_new_page(struct page
*page
, unsigned int order
, gfp_t gfp_flags
,
2424 unsigned int alloc_flags
)
2426 post_alloc_hook(page
, order
, gfp_flags
);
2428 if (order
&& (gfp_flags
& __GFP_COMP
))
2429 prep_compound_page(page
, order
);
2432 * page is set pfmemalloc when ALLOC_NO_WATERMARKS was necessary to
2433 * allocate the page. The expectation is that the caller is taking
2434 * steps that will free more memory. The caller should avoid the page
2435 * being used for !PFMEMALLOC purposes.
2437 if (alloc_flags
& ALLOC_NO_WATERMARKS
)
2438 set_page_pfmemalloc(page
);
2440 clear_page_pfmemalloc(page
);
2444 * Go through the free lists for the given migratetype and remove
2445 * the smallest available page from the freelists
2447 static __always_inline
2448 struct page
*__rmqueue_smallest(struct zone
*zone
, unsigned int order
,
2451 unsigned int current_order
;
2452 struct free_area
*area
;
2455 /* Find a page of the appropriate size in the preferred list */
2456 for (current_order
= order
; current_order
< MAX_ORDER
; ++current_order
) {
2457 area
= &(zone
->free_area
[current_order
]);
2458 page
= get_page_from_free_area(area
, migratetype
);
2461 del_page_from_free_list(page
, zone
, current_order
);
2462 expand(zone
, page
, order
, current_order
, migratetype
);
2463 set_pcppage_migratetype(page
, migratetype
);
2472 * This array describes the order lists are fallen back to when
2473 * the free lists for the desirable migrate type are depleted
2475 static int fallbacks
[MIGRATE_TYPES
][3] = {
2476 [MIGRATE_UNMOVABLE
] = { MIGRATE_RECLAIMABLE
, MIGRATE_MOVABLE
, MIGRATE_TYPES
},
2477 [MIGRATE_MOVABLE
] = { MIGRATE_RECLAIMABLE
, MIGRATE_UNMOVABLE
, MIGRATE_TYPES
},
2478 [MIGRATE_RECLAIMABLE
] = { MIGRATE_UNMOVABLE
, MIGRATE_MOVABLE
, MIGRATE_TYPES
},
2480 [MIGRATE_CMA
] = { MIGRATE_TYPES
}, /* Never used */
2482 #ifdef CONFIG_MEMORY_ISOLATION
2483 [MIGRATE_ISOLATE
] = { MIGRATE_TYPES
}, /* Never used */
2488 static __always_inline
struct page
*__rmqueue_cma_fallback(struct zone
*zone
,
2491 return __rmqueue_smallest(zone
, order
, MIGRATE_CMA
);
2494 static inline struct page
*__rmqueue_cma_fallback(struct zone
*zone
,
2495 unsigned int order
) { return NULL
; }
2499 * Move the free pages in a range to the freelist tail of the requested type.
2500 * Note that start_page and end_pages are not aligned on a pageblock
2501 * boundary. If alignment is required, use move_freepages_block()
2503 static int move_freepages(struct zone
*zone
,
2504 unsigned long start_pfn
, unsigned long end_pfn
,
2505 int migratetype
, int *num_movable
)
2510 int pages_moved
= 0;
2512 for (pfn
= start_pfn
; pfn
<= end_pfn
;) {
2513 page
= pfn_to_page(pfn
);
2514 if (!PageBuddy(page
)) {
2516 * We assume that pages that could be isolated for
2517 * migration are movable. But we don't actually try
2518 * isolating, as that would be expensive.
2521 (PageLRU(page
) || __PageMovable(page
)))
2527 /* Make sure we are not inadvertently changing nodes */
2528 VM_BUG_ON_PAGE(page_to_nid(page
) != zone_to_nid(zone
), page
);
2529 VM_BUG_ON_PAGE(page_zone(page
) != zone
, page
);
2531 order
= buddy_order(page
);
2532 move_to_free_list(page
, zone
, order
, migratetype
);
2534 pages_moved
+= 1 << order
;
2540 int move_freepages_block(struct zone
*zone
, struct page
*page
,
2541 int migratetype
, int *num_movable
)
2543 unsigned long start_pfn
, end_pfn
, pfn
;
2548 pfn
= page_to_pfn(page
);
2549 start_pfn
= pfn
& ~(pageblock_nr_pages
- 1);
2550 end_pfn
= start_pfn
+ pageblock_nr_pages
- 1;
2552 /* Do not cross zone boundaries */
2553 if (!zone_spans_pfn(zone
, start_pfn
))
2555 if (!zone_spans_pfn(zone
, end_pfn
))
2558 return move_freepages(zone
, start_pfn
, end_pfn
, migratetype
,
2562 static void change_pageblock_range(struct page
*pageblock_page
,
2563 int start_order
, int migratetype
)
2565 int nr_pageblocks
= 1 << (start_order
- pageblock_order
);
2567 while (nr_pageblocks
--) {
2568 set_pageblock_migratetype(pageblock_page
, migratetype
);
2569 pageblock_page
+= pageblock_nr_pages
;
2574 * When we are falling back to another migratetype during allocation, try to
2575 * steal extra free pages from the same pageblocks to satisfy further
2576 * allocations, instead of polluting multiple pageblocks.
2578 * If we are stealing a relatively large buddy page, it is likely there will
2579 * be more free pages in the pageblock, so try to steal them all. For
2580 * reclaimable and unmovable allocations, we steal regardless of page size,
2581 * as fragmentation caused by those allocations polluting movable pageblocks
2582 * is worse than movable allocations stealing from unmovable and reclaimable
2585 static bool can_steal_fallback(unsigned int order
, int start_mt
)
2588 * Leaving this order check is intended, although there is
2589 * relaxed order check in next check. The reason is that
2590 * we can actually steal whole pageblock if this condition met,
2591 * but, below check doesn't guarantee it and that is just heuristic
2592 * so could be changed anytime.
2594 if (order
>= pageblock_order
)
2597 if (order
>= pageblock_order
/ 2 ||
2598 start_mt
== MIGRATE_RECLAIMABLE
||
2599 start_mt
== MIGRATE_UNMOVABLE
||
2600 page_group_by_mobility_disabled
)
2606 static inline bool boost_watermark(struct zone
*zone
)
2608 unsigned long max_boost
;
2610 if (!watermark_boost_factor
)
2613 * Don't bother in zones that are unlikely to produce results.
2614 * On small machines, including kdump capture kernels running
2615 * in a small area, boosting the watermark can cause an out of
2616 * memory situation immediately.
2618 if ((pageblock_nr_pages
* 4) > zone_managed_pages(zone
))
2621 max_boost
= mult_frac(zone
->_watermark
[WMARK_HIGH
],
2622 watermark_boost_factor
, 10000);
2625 * high watermark may be uninitialised if fragmentation occurs
2626 * very early in boot so do not boost. We do not fall
2627 * through and boost by pageblock_nr_pages as failing
2628 * allocations that early means that reclaim is not going
2629 * to help and it may even be impossible to reclaim the
2630 * boosted watermark resulting in a hang.
2635 max_boost
= max(pageblock_nr_pages
, max_boost
);
2637 zone
->watermark_boost
= min(zone
->watermark_boost
+ pageblock_nr_pages
,
2644 * This function implements actual steal behaviour. If order is large enough,
2645 * we can steal whole pageblock. If not, we first move freepages in this
2646 * pageblock to our migratetype and determine how many already-allocated pages
2647 * are there in the pageblock with a compatible migratetype. If at least half
2648 * of pages are free or compatible, we can change migratetype of the pageblock
2649 * itself, so pages freed in the future will be put on the correct free list.
2651 static void steal_suitable_fallback(struct zone
*zone
, struct page
*page
,
2652 unsigned int alloc_flags
, int start_type
, bool whole_block
)
2654 unsigned int current_order
= buddy_order(page
);
2655 int free_pages
, movable_pages
, alike_pages
;
2658 old_block_type
= get_pageblock_migratetype(page
);
2661 * This can happen due to races and we want to prevent broken
2662 * highatomic accounting.
2664 if (is_migrate_highatomic(old_block_type
))
2667 /* Take ownership for orders >= pageblock_order */
2668 if (current_order
>= pageblock_order
) {
2669 change_pageblock_range(page
, current_order
, start_type
);
2674 * Boost watermarks to increase reclaim pressure to reduce the
2675 * likelihood of future fallbacks. Wake kswapd now as the node
2676 * may be balanced overall and kswapd will not wake naturally.
2678 if (boost_watermark(zone
) && (alloc_flags
& ALLOC_KSWAPD
))
2679 set_bit(ZONE_BOOSTED_WATERMARK
, &zone
->flags
);
2681 /* We are not allowed to try stealing from the whole block */
2685 free_pages
= move_freepages_block(zone
, page
, start_type
,
2688 * Determine how many pages are compatible with our allocation.
2689 * For movable allocation, it's the number of movable pages which
2690 * we just obtained. For other types it's a bit more tricky.
2692 if (start_type
== MIGRATE_MOVABLE
) {
2693 alike_pages
= movable_pages
;
2696 * If we are falling back a RECLAIMABLE or UNMOVABLE allocation
2697 * to MOVABLE pageblock, consider all non-movable pages as
2698 * compatible. If it's UNMOVABLE falling back to RECLAIMABLE or
2699 * vice versa, be conservative since we can't distinguish the
2700 * exact migratetype of non-movable pages.
2702 if (old_block_type
== MIGRATE_MOVABLE
)
2703 alike_pages
= pageblock_nr_pages
2704 - (free_pages
+ movable_pages
);
2709 /* moving whole block can fail due to zone boundary conditions */
2714 * If a sufficient number of pages in the block are either free or of
2715 * comparable migratability as our allocation, claim the whole block.
2717 if (free_pages
+ alike_pages
>= (1 << (pageblock_order
-1)) ||
2718 page_group_by_mobility_disabled
)
2719 set_pageblock_migratetype(page
, start_type
);
2724 move_to_free_list(page
, zone
, current_order
, start_type
);
2728 * Check whether there is a suitable fallback freepage with requested order.
2729 * If only_stealable is true, this function returns fallback_mt only if
2730 * we can steal other freepages all together. This would help to reduce
2731 * fragmentation due to mixed migratetype pages in one pageblock.
2733 int find_suitable_fallback(struct free_area
*area
, unsigned int order
,
2734 int migratetype
, bool only_stealable
, bool *can_steal
)
2739 if (area
->nr_free
== 0)
2744 fallback_mt
= fallbacks
[migratetype
][i
];
2745 if (fallback_mt
== MIGRATE_TYPES
)
2748 if (free_area_empty(area
, fallback_mt
))
2751 if (can_steal_fallback(order
, migratetype
))
2754 if (!only_stealable
)
2765 * Reserve a pageblock for exclusive use of high-order atomic allocations if
2766 * there are no empty page blocks that contain a page with a suitable order
2768 static void reserve_highatomic_pageblock(struct page
*page
, struct zone
*zone
,
2769 unsigned int alloc_order
)
2772 unsigned long max_managed
, flags
;
2775 * Limit the number reserved to 1 pageblock or roughly 1% of a zone.
2776 * Check is race-prone but harmless.
2778 max_managed
= (zone_managed_pages(zone
) / 100) + pageblock_nr_pages
;
2779 if (zone
->nr_reserved_highatomic
>= max_managed
)
2782 spin_lock_irqsave(&zone
->lock
, flags
);
2784 /* Recheck the nr_reserved_highatomic limit under the lock */
2785 if (zone
->nr_reserved_highatomic
>= max_managed
)
2789 mt
= get_pageblock_migratetype(page
);
2790 if (!is_migrate_highatomic(mt
) && !is_migrate_isolate(mt
)
2791 && !is_migrate_cma(mt
)) {
2792 zone
->nr_reserved_highatomic
+= pageblock_nr_pages
;
2793 set_pageblock_migratetype(page
, MIGRATE_HIGHATOMIC
);
2794 move_freepages_block(zone
, page
, MIGRATE_HIGHATOMIC
, NULL
);
2798 spin_unlock_irqrestore(&zone
->lock
, flags
);
2802 * Used when an allocation is about to fail under memory pressure. This
2803 * potentially hurts the reliability of high-order allocations when under
2804 * intense memory pressure but failed atomic allocations should be easier
2805 * to recover from than an OOM.
2807 * If @force is true, try to unreserve a pageblock even though highatomic
2808 * pageblock is exhausted.
2810 static bool unreserve_highatomic_pageblock(const struct alloc_context
*ac
,
2813 struct zonelist
*zonelist
= ac
->zonelist
;
2814 unsigned long flags
;
2821 for_each_zone_zonelist_nodemask(zone
, z
, zonelist
, ac
->highest_zoneidx
,
2824 * Preserve at least one pageblock unless memory pressure
2827 if (!force
&& zone
->nr_reserved_highatomic
<=
2831 spin_lock_irqsave(&zone
->lock
, flags
);
2832 for (order
= 0; order
< MAX_ORDER
; order
++) {
2833 struct free_area
*area
= &(zone
->free_area
[order
]);
2835 page
= get_page_from_free_area(area
, MIGRATE_HIGHATOMIC
);
2840 * In page freeing path, migratetype change is racy so
2841 * we can counter several free pages in a pageblock
2842 * in this loop although we changed the pageblock type
2843 * from highatomic to ac->migratetype. So we should
2844 * adjust the count once.
2846 if (is_migrate_highatomic_page(page
)) {
2848 * It should never happen but changes to
2849 * locking could inadvertently allow a per-cpu
2850 * drain to add pages to MIGRATE_HIGHATOMIC
2851 * while unreserving so be safe and watch for
2854 zone
->nr_reserved_highatomic
-= min(
2856 zone
->nr_reserved_highatomic
);
2860 * Convert to ac->migratetype and avoid the normal
2861 * pageblock stealing heuristics. Minimally, the caller
2862 * is doing the work and needs the pages. More
2863 * importantly, if the block was always converted to
2864 * MIGRATE_UNMOVABLE or another type then the number
2865 * of pageblocks that cannot be completely freed
2868 set_pageblock_migratetype(page
, ac
->migratetype
);
2869 ret
= move_freepages_block(zone
, page
, ac
->migratetype
,
2872 spin_unlock_irqrestore(&zone
->lock
, flags
);
2876 spin_unlock_irqrestore(&zone
->lock
, flags
);
2883 * Try finding a free buddy page on the fallback list and put it on the free
2884 * list of requested migratetype, possibly along with other pages from the same
2885 * block, depending on fragmentation avoidance heuristics. Returns true if
2886 * fallback was found so that __rmqueue_smallest() can grab it.
2888 * The use of signed ints for order and current_order is a deliberate
2889 * deviation from the rest of this file, to make the for loop
2890 * condition simpler.
2892 static __always_inline
bool
2893 __rmqueue_fallback(struct zone
*zone
, int order
, int start_migratetype
,
2894 unsigned int alloc_flags
)
2896 struct free_area
*area
;
2898 int min_order
= order
;
2904 * Do not steal pages from freelists belonging to other pageblocks
2905 * i.e. orders < pageblock_order. If there are no local zones free,
2906 * the zonelists will be reiterated without ALLOC_NOFRAGMENT.
2908 if (alloc_flags
& ALLOC_NOFRAGMENT
)
2909 min_order
= pageblock_order
;
2912 * Find the largest available free page in the other list. This roughly
2913 * approximates finding the pageblock with the most free pages, which
2914 * would be too costly to do exactly.
2916 for (current_order
= MAX_ORDER
- 1; current_order
>= min_order
;
2918 area
= &(zone
->free_area
[current_order
]);
2919 fallback_mt
= find_suitable_fallback(area
, current_order
,
2920 start_migratetype
, false, &can_steal
);
2921 if (fallback_mt
== -1)
2925 * We cannot steal all free pages from the pageblock and the
2926 * requested migratetype is movable. In that case it's better to
2927 * steal and split the smallest available page instead of the
2928 * largest available page, because even if the next movable
2929 * allocation falls back into a different pageblock than this
2930 * one, it won't cause permanent fragmentation.
2932 if (!can_steal
&& start_migratetype
== MIGRATE_MOVABLE
2933 && current_order
> order
)
2942 for (current_order
= order
; current_order
< MAX_ORDER
;
2944 area
= &(zone
->free_area
[current_order
]);
2945 fallback_mt
= find_suitable_fallback(area
, current_order
,
2946 start_migratetype
, false, &can_steal
);
2947 if (fallback_mt
!= -1)
2952 * This should not happen - we already found a suitable fallback
2953 * when looking for the largest page.
2955 VM_BUG_ON(current_order
== MAX_ORDER
);
2958 page
= get_page_from_free_area(area
, fallback_mt
);
2960 steal_suitable_fallback(zone
, page
, alloc_flags
, start_migratetype
,
2963 trace_mm_page_alloc_extfrag(page
, order
, current_order
,
2964 start_migratetype
, fallback_mt
);
2971 * Do the hard work of removing an element from the buddy allocator.
2972 * Call me with the zone->lock already held.
2974 static __always_inline
struct page
*
2975 __rmqueue(struct zone
*zone
, unsigned int order
, int migratetype
,
2976 unsigned int alloc_flags
)
2980 if (IS_ENABLED(CONFIG_CMA
)) {
2982 * Balance movable allocations between regular and CMA areas by
2983 * allocating from CMA when over half of the zone's free memory
2984 * is in the CMA area.
2986 if (alloc_flags
& ALLOC_CMA
&&
2987 zone_page_state(zone
, NR_FREE_CMA_PAGES
) >
2988 zone_page_state(zone
, NR_FREE_PAGES
) / 2) {
2989 page
= __rmqueue_cma_fallback(zone
, order
);
2995 page
= __rmqueue_smallest(zone
, order
, migratetype
);
2996 if (unlikely(!page
)) {
2997 if (alloc_flags
& ALLOC_CMA
)
2998 page
= __rmqueue_cma_fallback(zone
, order
);
3000 if (!page
&& __rmqueue_fallback(zone
, order
, migratetype
,
3006 trace_mm_page_alloc_zone_locked(page
, order
, migratetype
);
3011 * Obtain a specified number of elements from the buddy allocator, all under
3012 * a single hold of the lock, for efficiency. Add them to the supplied list.
3013 * Returns the number of new pages which were placed at *list.
3015 static int rmqueue_bulk(struct zone
*zone
, unsigned int order
,
3016 unsigned long count
, struct list_head
*list
,
3017 int migratetype
, unsigned int alloc_flags
)
3019 int i
, allocated
= 0;
3022 * local_lock_irq held so equivalent to spin_lock_irqsave for
3023 * both PREEMPT_RT and non-PREEMPT_RT configurations.
3025 spin_lock(&zone
->lock
);
3026 for (i
= 0; i
< count
; ++i
) {
3027 struct page
*page
= __rmqueue(zone
, order
, migratetype
,
3029 if (unlikely(page
== NULL
))
3032 if (unlikely(check_pcp_refill(page
)))
3036 * Split buddy pages returned by expand() are received here in
3037 * physical page order. The page is added to the tail of
3038 * caller's list. From the callers perspective, the linked list
3039 * is ordered by page number under some conditions. This is
3040 * useful for IO devices that can forward direction from the
3041 * head, thus also in the physical page order. This is useful
3042 * for IO devices that can merge IO requests if the physical
3043 * pages are ordered properly.
3045 list_add_tail(&page
->lru
, list
);
3047 if (is_migrate_cma(get_pcppage_migratetype(page
)))
3048 __mod_zone_page_state(zone
, NR_FREE_CMA_PAGES
,
3053 * i pages were removed from the buddy list even if some leak due
3054 * to check_pcp_refill failing so adjust NR_FREE_PAGES based
3055 * on i. Do not confuse with 'allocated' which is the number of
3056 * pages added to the pcp list.
3058 __mod_zone_page_state(zone
, NR_FREE_PAGES
, -(i
<< order
));
3059 spin_unlock(&zone
->lock
);
3065 * Called from the vmstat counter updater to drain pagesets of this
3066 * currently executing processor on remote nodes after they have
3069 * Note that this function must be called with the thread pinned to
3070 * a single processor.
3072 void drain_zone_pages(struct zone
*zone
, struct per_cpu_pages
*pcp
)
3074 unsigned long flags
;
3075 int to_drain
, batch
;
3077 local_lock_irqsave(&pagesets
.lock
, flags
);
3078 batch
= READ_ONCE(pcp
->batch
);
3079 to_drain
= min(pcp
->count
, batch
);
3081 free_pcppages_bulk(zone
, to_drain
, pcp
);
3082 local_unlock_irqrestore(&pagesets
.lock
, flags
);
3087 * Drain pcplists of the indicated processor and zone.
3089 * The processor must either be the current processor and the
3090 * thread pinned to the current processor or a processor that
3093 static void drain_pages_zone(unsigned int cpu
, struct zone
*zone
)
3095 unsigned long flags
;
3096 struct per_cpu_pages
*pcp
;
3098 local_lock_irqsave(&pagesets
.lock
, flags
);
3100 pcp
= per_cpu_ptr(zone
->per_cpu_pageset
, cpu
);
3102 free_pcppages_bulk(zone
, pcp
->count
, pcp
);
3104 local_unlock_irqrestore(&pagesets
.lock
, flags
);
3108 * Drain pcplists of all zones on the indicated processor.
3110 * The processor must either be the current processor and the
3111 * thread pinned to the current processor or a processor that
3114 static void drain_pages(unsigned int cpu
)
3118 for_each_populated_zone(zone
) {
3119 drain_pages_zone(cpu
, zone
);
3124 * Spill all of this CPU's per-cpu pages back into the buddy allocator.
3126 * The CPU has to be pinned. When zone parameter is non-NULL, spill just
3127 * the single zone's pages.
3129 void drain_local_pages(struct zone
*zone
)
3131 int cpu
= smp_processor_id();
3134 drain_pages_zone(cpu
, zone
);
3139 static void drain_local_pages_wq(struct work_struct
*work
)
3141 struct pcpu_drain
*drain
;
3143 drain
= container_of(work
, struct pcpu_drain
, work
);
3146 * drain_all_pages doesn't use proper cpu hotplug protection so
3147 * we can race with cpu offline when the WQ can move this from
3148 * a cpu pinned worker to an unbound one. We can operate on a different
3149 * cpu which is alright but we also have to make sure to not move to
3153 drain_local_pages(drain
->zone
);
3158 * The implementation of drain_all_pages(), exposing an extra parameter to
3159 * drain on all cpus.
3161 * drain_all_pages() is optimized to only execute on cpus where pcplists are
3162 * not empty. The check for non-emptiness can however race with a free to
3163 * pcplist that has not yet increased the pcp->count from 0 to 1. Callers
3164 * that need the guarantee that every CPU has drained can disable the
3165 * optimizing racy check.
3167 static void __drain_all_pages(struct zone
*zone
, bool force_all_cpus
)
3172 * Allocate in the BSS so we won't require allocation in
3173 * direct reclaim path for CONFIG_CPUMASK_OFFSTACK=y
3175 static cpumask_t cpus_with_pcps
;
3178 * Make sure nobody triggers this path before mm_percpu_wq is fully
3181 if (WARN_ON_ONCE(!mm_percpu_wq
))
3185 * Do not drain if one is already in progress unless it's specific to
3186 * a zone. Such callers are primarily CMA and memory hotplug and need
3187 * the drain to be complete when the call returns.
3189 if (unlikely(!mutex_trylock(&pcpu_drain_mutex
))) {
3192 mutex_lock(&pcpu_drain_mutex
);
3196 * We don't care about racing with CPU hotplug event
3197 * as offline notification will cause the notified
3198 * cpu to drain that CPU pcps and on_each_cpu_mask
3199 * disables preemption as part of its processing
3201 for_each_online_cpu(cpu
) {
3202 struct per_cpu_pages
*pcp
;
3204 bool has_pcps
= false;
3206 if (force_all_cpus
) {
3208 * The pcp.count check is racy, some callers need a
3209 * guarantee that no cpu is missed.
3213 pcp
= per_cpu_ptr(zone
->per_cpu_pageset
, cpu
);
3217 for_each_populated_zone(z
) {
3218 pcp
= per_cpu_ptr(z
->per_cpu_pageset
, cpu
);
3227 cpumask_set_cpu(cpu
, &cpus_with_pcps
);
3229 cpumask_clear_cpu(cpu
, &cpus_with_pcps
);
3232 for_each_cpu(cpu
, &cpus_with_pcps
) {
3233 struct pcpu_drain
*drain
= per_cpu_ptr(&pcpu_drain
, cpu
);
3236 INIT_WORK(&drain
->work
, drain_local_pages_wq
);
3237 queue_work_on(cpu
, mm_percpu_wq
, &drain
->work
);
3239 for_each_cpu(cpu
, &cpus_with_pcps
)
3240 flush_work(&per_cpu_ptr(&pcpu_drain
, cpu
)->work
);
3242 mutex_unlock(&pcpu_drain_mutex
);
3246 * Spill all the per-cpu pages from all CPUs back into the buddy allocator.
3248 * When zone parameter is non-NULL, spill just the single zone's pages.
3250 * Note that this can be extremely slow as the draining happens in a workqueue.
3252 void drain_all_pages(struct zone
*zone
)
3254 __drain_all_pages(zone
, false);
3257 #ifdef CONFIG_HIBERNATION
3260 * Touch the watchdog for every WD_PAGE_COUNT pages.
3262 #define WD_PAGE_COUNT (128*1024)
3264 void mark_free_pages(struct zone
*zone
)
3266 unsigned long pfn
, max_zone_pfn
, page_count
= WD_PAGE_COUNT
;
3267 unsigned long flags
;
3268 unsigned int order
, t
;
3271 if (zone_is_empty(zone
))
3274 spin_lock_irqsave(&zone
->lock
, flags
);
3276 max_zone_pfn
= zone_end_pfn(zone
);
3277 for (pfn
= zone
->zone_start_pfn
; pfn
< max_zone_pfn
; pfn
++)
3278 if (pfn_valid(pfn
)) {
3279 page
= pfn_to_page(pfn
);
3281 if (!--page_count
) {
3282 touch_nmi_watchdog();
3283 page_count
= WD_PAGE_COUNT
;
3286 if (page_zone(page
) != zone
)
3289 if (!swsusp_page_is_forbidden(page
))
3290 swsusp_unset_page_free(page
);
3293 for_each_migratetype_order(order
, t
) {
3294 list_for_each_entry(page
,
3295 &zone
->free_area
[order
].free_list
[t
], lru
) {
3298 pfn
= page_to_pfn(page
);
3299 for (i
= 0; i
< (1UL << order
); i
++) {
3300 if (!--page_count
) {
3301 touch_nmi_watchdog();
3302 page_count
= WD_PAGE_COUNT
;
3304 swsusp_set_page_free(pfn_to_page(pfn
+ i
));
3308 spin_unlock_irqrestore(&zone
->lock
, flags
);
3310 #endif /* CONFIG_PM */
3312 static bool free_unref_page_prepare(struct page
*page
, unsigned long pfn
,
3317 if (!free_pcp_prepare(page
, order
))
3320 migratetype
= get_pfnblock_migratetype(page
, pfn
);
3321 set_pcppage_migratetype(page
, migratetype
);
3325 static int nr_pcp_free(struct per_cpu_pages
*pcp
, int high
, int batch
)
3327 int min_nr_free
, max_nr_free
;
3329 /* Check for PCP disabled or boot pageset */
3330 if (unlikely(high
< batch
))
3333 /* Leave at least pcp->batch pages on the list */
3334 min_nr_free
= batch
;
3335 max_nr_free
= high
- batch
;
3338 * Double the number of pages freed each time there is subsequent
3339 * freeing of pages without any allocation.
3341 batch
<<= pcp
->free_factor
;
3342 if (batch
< max_nr_free
)
3344 batch
= clamp(batch
, min_nr_free
, max_nr_free
);
3349 static int nr_pcp_high(struct per_cpu_pages
*pcp
, struct zone
*zone
)
3351 int high
= READ_ONCE(pcp
->high
);
3353 if (unlikely(!high
))
3356 if (!test_bit(ZONE_RECLAIM_ACTIVE
, &zone
->flags
))
3360 * If reclaim is active, limit the number of pages that can be
3361 * stored on pcp lists
3363 return min(READ_ONCE(pcp
->batch
) << 2, high
);
3366 static void free_unref_page_commit(struct page
*page
, unsigned long pfn
,
3367 int migratetype
, unsigned int order
)
3369 struct zone
*zone
= page_zone(page
);
3370 struct per_cpu_pages
*pcp
;
3374 __count_vm_event(PGFREE
);
3375 pcp
= this_cpu_ptr(zone
->per_cpu_pageset
);
3376 pindex
= order_to_pindex(migratetype
, order
);
3377 list_add(&page
->lru
, &pcp
->lists
[pindex
]);
3378 pcp
->count
+= 1 << order
;
3379 high
= nr_pcp_high(pcp
, zone
);
3380 if (pcp
->count
>= high
) {
3381 int batch
= READ_ONCE(pcp
->batch
);
3383 free_pcppages_bulk(zone
, nr_pcp_free(pcp
, high
, batch
), pcp
);
3390 void free_unref_page(struct page
*page
, unsigned int order
)
3392 unsigned long flags
;
3393 unsigned long pfn
= page_to_pfn(page
);
3396 if (!free_unref_page_prepare(page
, pfn
, order
))
3400 * We only track unmovable, reclaimable and movable on pcp lists.
3401 * Place ISOLATE pages on the isolated list because they are being
3402 * offlined but treat HIGHATOMIC as movable pages so we can get those
3403 * areas back if necessary. Otherwise, we may have to free
3404 * excessively into the page allocator
3406 migratetype
= get_pcppage_migratetype(page
);
3407 if (unlikely(migratetype
>= MIGRATE_PCPTYPES
)) {
3408 if (unlikely(is_migrate_isolate(migratetype
))) {
3409 free_one_page(page_zone(page
), page
, pfn
, order
, migratetype
, FPI_NONE
);
3412 migratetype
= MIGRATE_MOVABLE
;
3415 local_lock_irqsave(&pagesets
.lock
, flags
);
3416 free_unref_page_commit(page
, pfn
, migratetype
, order
);
3417 local_unlock_irqrestore(&pagesets
.lock
, flags
);
3421 * Free a list of 0-order pages
3423 void free_unref_page_list(struct list_head
*list
)
3425 struct page
*page
, *next
;
3426 unsigned long flags
, pfn
;
3427 int batch_count
= 0;
3430 /* Prepare pages for freeing */
3431 list_for_each_entry_safe(page
, next
, list
, lru
) {
3432 pfn
= page_to_pfn(page
);
3433 if (!free_unref_page_prepare(page
, pfn
, 0)) {
3434 list_del(&page
->lru
);
3439 * Free isolated pages directly to the allocator, see
3440 * comment in free_unref_page.
3442 migratetype
= get_pcppage_migratetype(page
);
3443 if (unlikely(is_migrate_isolate(migratetype
))) {
3444 list_del(&page
->lru
);
3445 free_one_page(page_zone(page
), page
, pfn
, 0, migratetype
, FPI_NONE
);
3449 set_page_private(page
, pfn
);
3452 local_lock_irqsave(&pagesets
.lock
, flags
);
3453 list_for_each_entry_safe(page
, next
, list
, lru
) {
3454 pfn
= page_private(page
);
3455 set_page_private(page
, 0);
3458 * Non-isolated types over MIGRATE_PCPTYPES get added
3459 * to the MIGRATE_MOVABLE pcp list.
3461 migratetype
= get_pcppage_migratetype(page
);
3462 if (unlikely(migratetype
>= MIGRATE_PCPTYPES
))
3463 migratetype
= MIGRATE_MOVABLE
;
3465 trace_mm_page_free_batched(page
);
3466 free_unref_page_commit(page
, pfn
, migratetype
, 0);
3469 * Guard against excessive IRQ disabled times when we get
3470 * a large list of pages to free.
3472 if (++batch_count
== SWAP_CLUSTER_MAX
) {
3473 local_unlock_irqrestore(&pagesets
.lock
, flags
);
3475 local_lock_irqsave(&pagesets
.lock
, flags
);
3478 local_unlock_irqrestore(&pagesets
.lock
, flags
);
3482 * split_page takes a non-compound higher-order page, and splits it into
3483 * n (1<<order) sub-pages: page[0..n]
3484 * Each sub-page must be freed individually.
3486 * Note: this is probably too low level an operation for use in drivers.
3487 * Please consult with lkml before using this in your driver.
3489 void split_page(struct page
*page
, unsigned int order
)
3493 VM_BUG_ON_PAGE(PageCompound(page
), page
);
3494 VM_BUG_ON_PAGE(!page_count(page
), page
);
3496 for (i
= 1; i
< (1 << order
); i
++)
3497 set_page_refcounted(page
+ i
);
3498 split_page_owner(page
, 1 << order
);
3499 split_page_memcg(page
, 1 << order
);
3501 EXPORT_SYMBOL_GPL(split_page
);
3503 int __isolate_free_page(struct page
*page
, unsigned int order
)
3505 unsigned long watermark
;
3509 BUG_ON(!PageBuddy(page
));
3511 zone
= page_zone(page
);
3512 mt
= get_pageblock_migratetype(page
);
3514 if (!is_migrate_isolate(mt
)) {
3516 * Obey watermarks as if the page was being allocated. We can
3517 * emulate a high-order watermark check with a raised order-0
3518 * watermark, because we already know our high-order page
3521 watermark
= zone
->_watermark
[WMARK_MIN
] + (1UL << order
);
3522 if (!zone_watermark_ok(zone
, 0, watermark
, 0, ALLOC_CMA
))
3525 __mod_zone_freepage_state(zone
, -(1UL << order
), mt
);
3528 /* Remove page from free list */
3530 del_page_from_free_list(page
, zone
, order
);
3533 * Set the pageblock if the isolated page is at least half of a
3536 if (order
>= pageblock_order
- 1) {
3537 struct page
*endpage
= page
+ (1 << order
) - 1;
3538 for (; page
< endpage
; page
+= pageblock_nr_pages
) {
3539 int mt
= get_pageblock_migratetype(page
);
3540 if (!is_migrate_isolate(mt
) && !is_migrate_cma(mt
)
3541 && !is_migrate_highatomic(mt
))
3542 set_pageblock_migratetype(page
,
3548 return 1UL << order
;
3552 * __putback_isolated_page - Return a now-isolated page back where we got it
3553 * @page: Page that was isolated
3554 * @order: Order of the isolated page
3555 * @mt: The page's pageblock's migratetype
3557 * This function is meant to return a page pulled from the free lists via
3558 * __isolate_free_page back to the free lists they were pulled from.
3560 void __putback_isolated_page(struct page
*page
, unsigned int order
, int mt
)
3562 struct zone
*zone
= page_zone(page
);
3564 /* zone lock should be held when this function is called */
3565 lockdep_assert_held(&zone
->lock
);
3567 /* Return isolated page to tail of freelist. */
3568 __free_one_page(page
, page_to_pfn(page
), zone
, order
, mt
,
3569 FPI_SKIP_REPORT_NOTIFY
| FPI_TO_TAIL
);
3573 * Update NUMA hit/miss statistics
3575 * Must be called with interrupts disabled.
3577 static inline void zone_statistics(struct zone
*preferred_zone
, struct zone
*z
,
3581 enum numa_stat_item local_stat
= NUMA_LOCAL
;
3583 /* skip numa counters update if numa stats is disabled */
3584 if (!static_branch_likely(&vm_numa_stat_key
))
3587 if (zone_to_nid(z
) != numa_node_id())
3588 local_stat
= NUMA_OTHER
;
3590 if (zone_to_nid(z
) == zone_to_nid(preferred_zone
))
3591 __count_numa_events(z
, NUMA_HIT
, nr_account
);
3593 __count_numa_events(z
, NUMA_MISS
, nr_account
);
3594 __count_numa_events(preferred_zone
, NUMA_FOREIGN
, nr_account
);
3596 __count_numa_events(z
, local_stat
, nr_account
);
3600 /* Remove page from the per-cpu list, caller must protect the list */
3602 struct page
*__rmqueue_pcplist(struct zone
*zone
, unsigned int order
,
3604 unsigned int alloc_flags
,
3605 struct per_cpu_pages
*pcp
,
3606 struct list_head
*list
)
3611 if (list_empty(list
)) {
3612 int batch
= READ_ONCE(pcp
->batch
);
3616 * Scale batch relative to order if batch implies
3617 * free pages can be stored on the PCP. Batch can
3618 * be 1 for small zones or for boot pagesets which
3619 * should never store free pages as the pages may
3620 * belong to arbitrary zones.
3623 batch
= max(batch
>> order
, 2);
3624 alloced
= rmqueue_bulk(zone
, order
,
3626 migratetype
, alloc_flags
);
3628 pcp
->count
+= alloced
<< order
;
3629 if (unlikely(list_empty(list
)))
3633 page
= list_first_entry(list
, struct page
, lru
);
3634 list_del(&page
->lru
);
3635 pcp
->count
-= 1 << order
;
3636 } while (check_new_pcp(page
));
3641 /* Lock and remove page from the per-cpu list */
3642 static struct page
*rmqueue_pcplist(struct zone
*preferred_zone
,
3643 struct zone
*zone
, unsigned int order
,
3644 gfp_t gfp_flags
, int migratetype
,
3645 unsigned int alloc_flags
)
3647 struct per_cpu_pages
*pcp
;
3648 struct list_head
*list
;
3650 unsigned long flags
;
3652 local_lock_irqsave(&pagesets
.lock
, flags
);
3655 * On allocation, reduce the number of pages that are batch freed.
3656 * See nr_pcp_free() where free_factor is increased for subsequent
3659 pcp
= this_cpu_ptr(zone
->per_cpu_pageset
);
3660 pcp
->free_factor
>>= 1;
3661 list
= &pcp
->lists
[order_to_pindex(migratetype
, order
)];
3662 page
= __rmqueue_pcplist(zone
, order
, migratetype
, alloc_flags
, pcp
, list
);
3663 local_unlock_irqrestore(&pagesets
.lock
, flags
);
3665 __count_zid_vm_events(PGALLOC
, page_zonenum(page
), 1);
3666 zone_statistics(preferred_zone
, zone
, 1);
3672 * Allocate a page from the given zone. Use pcplists for order-0 allocations.
3675 struct page
*rmqueue(struct zone
*preferred_zone
,
3676 struct zone
*zone
, unsigned int order
,
3677 gfp_t gfp_flags
, unsigned int alloc_flags
,
3680 unsigned long flags
;
3683 if (likely(pcp_allowed_order(order
))) {
3685 * MIGRATE_MOVABLE pcplist could have the pages on CMA area and
3686 * we need to skip it when CMA area isn't allowed.
3688 if (!IS_ENABLED(CONFIG_CMA
) || alloc_flags
& ALLOC_CMA
||
3689 migratetype
!= MIGRATE_MOVABLE
) {
3690 page
= rmqueue_pcplist(preferred_zone
, zone
, order
,
3691 gfp_flags
, migratetype
, alloc_flags
);
3697 * We most definitely don't want callers attempting to
3698 * allocate greater than order-1 page units with __GFP_NOFAIL.
3700 WARN_ON_ONCE((gfp_flags
& __GFP_NOFAIL
) && (order
> 1));
3701 spin_lock_irqsave(&zone
->lock
, flags
);
3706 * order-0 request can reach here when the pcplist is skipped
3707 * due to non-CMA allocation context. HIGHATOMIC area is
3708 * reserved for high-order atomic allocation, so order-0
3709 * request should skip it.
3711 if (order
> 0 && alloc_flags
& ALLOC_HARDER
) {
3712 page
= __rmqueue_smallest(zone
, order
, MIGRATE_HIGHATOMIC
);
3714 trace_mm_page_alloc_zone_locked(page
, order
, migratetype
);
3717 page
= __rmqueue(zone
, order
, migratetype
, alloc_flags
);
3718 } while (page
&& check_new_pages(page
, order
));
3722 __mod_zone_freepage_state(zone
, -(1 << order
),
3723 get_pcppage_migratetype(page
));
3724 spin_unlock_irqrestore(&zone
->lock
, flags
);
3726 __count_zid_vm_events(PGALLOC
, page_zonenum(page
), 1 << order
);
3727 zone_statistics(preferred_zone
, zone
, 1);
3730 /* Separate test+clear to avoid unnecessary atomics */
3731 if (test_bit(ZONE_BOOSTED_WATERMARK
, &zone
->flags
)) {
3732 clear_bit(ZONE_BOOSTED_WATERMARK
, &zone
->flags
);
3733 wakeup_kswapd(zone
, 0, 0, zone_idx(zone
));
3736 VM_BUG_ON_PAGE(page
&& bad_range(zone
, page
), page
);
3740 spin_unlock_irqrestore(&zone
->lock
, flags
);
3744 #ifdef CONFIG_FAIL_PAGE_ALLOC
3747 struct fault_attr attr
;
3749 bool ignore_gfp_highmem
;
3750 bool ignore_gfp_reclaim
;
3752 } fail_page_alloc
= {
3753 .attr
= FAULT_ATTR_INITIALIZER
,
3754 .ignore_gfp_reclaim
= true,
3755 .ignore_gfp_highmem
= true,
3759 static int __init
setup_fail_page_alloc(char *str
)
3761 return setup_fault_attr(&fail_page_alloc
.attr
, str
);
3763 __setup("fail_page_alloc=", setup_fail_page_alloc
);
3765 static bool __should_fail_alloc_page(gfp_t gfp_mask
, unsigned int order
)
3767 if (order
< fail_page_alloc
.min_order
)
3769 if (gfp_mask
& __GFP_NOFAIL
)
3771 if (fail_page_alloc
.ignore_gfp_highmem
&& (gfp_mask
& __GFP_HIGHMEM
))
3773 if (fail_page_alloc
.ignore_gfp_reclaim
&&
3774 (gfp_mask
& __GFP_DIRECT_RECLAIM
))
3777 return should_fail(&fail_page_alloc
.attr
, 1 << order
);
3780 #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
3782 static int __init
fail_page_alloc_debugfs(void)
3784 umode_t mode
= S_IFREG
| 0600;
3787 dir
= fault_create_debugfs_attr("fail_page_alloc", NULL
,
3788 &fail_page_alloc
.attr
);
3790 debugfs_create_bool("ignore-gfp-wait", mode
, dir
,
3791 &fail_page_alloc
.ignore_gfp_reclaim
);
3792 debugfs_create_bool("ignore-gfp-highmem", mode
, dir
,
3793 &fail_page_alloc
.ignore_gfp_highmem
);
3794 debugfs_create_u32("min-order", mode
, dir
, &fail_page_alloc
.min_order
);
3799 late_initcall(fail_page_alloc_debugfs
);
3801 #endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
3803 #else /* CONFIG_FAIL_PAGE_ALLOC */
3805 static inline bool __should_fail_alloc_page(gfp_t gfp_mask
, unsigned int order
)
3810 #endif /* CONFIG_FAIL_PAGE_ALLOC */
3812 noinline
bool should_fail_alloc_page(gfp_t gfp_mask
, unsigned int order
)
3814 return __should_fail_alloc_page(gfp_mask
, order
);
3816 ALLOW_ERROR_INJECTION(should_fail_alloc_page
, TRUE
);
3818 static inline long __zone_watermark_unusable_free(struct zone
*z
,
3819 unsigned int order
, unsigned int alloc_flags
)
3821 const bool alloc_harder
= (alloc_flags
& (ALLOC_HARDER
|ALLOC_OOM
));
3822 long unusable_free
= (1 << order
) - 1;
3825 * If the caller does not have rights to ALLOC_HARDER then subtract
3826 * the high-atomic reserves. This will over-estimate the size of the
3827 * atomic reserve but it avoids a search.
3829 if (likely(!alloc_harder
))
3830 unusable_free
+= z
->nr_reserved_highatomic
;
3833 /* If allocation can't use CMA areas don't use free CMA pages */
3834 if (!(alloc_flags
& ALLOC_CMA
))
3835 unusable_free
+= zone_page_state(z
, NR_FREE_CMA_PAGES
);
3838 return unusable_free
;
3842 * Return true if free base pages are above 'mark'. For high-order checks it
3843 * will return true of the order-0 watermark is reached and there is at least
3844 * one free page of a suitable size. Checking now avoids taking the zone lock
3845 * to check in the allocation paths if no pages are free.
3847 bool __zone_watermark_ok(struct zone
*z
, unsigned int order
, unsigned long mark
,
3848 int highest_zoneidx
, unsigned int alloc_flags
,
3853 const bool alloc_harder
= (alloc_flags
& (ALLOC_HARDER
|ALLOC_OOM
));
3855 /* free_pages may go negative - that's OK */
3856 free_pages
-= __zone_watermark_unusable_free(z
, order
, alloc_flags
);
3858 if (alloc_flags
& ALLOC_HIGH
)
3861 if (unlikely(alloc_harder
)) {
3863 * OOM victims can try even harder than normal ALLOC_HARDER
3864 * users on the grounds that it's definitely going to be in
3865 * the exit path shortly and free memory. Any allocation it
3866 * makes during the free path will be small and short-lived.
3868 if (alloc_flags
& ALLOC_OOM
)
3875 * Check watermarks for an order-0 allocation request. If these
3876 * are not met, then a high-order request also cannot go ahead
3877 * even if a suitable page happened to be free.
3879 if (free_pages
<= min
+ z
->lowmem_reserve
[highest_zoneidx
])
3882 /* If this is an order-0 request then the watermark is fine */
3886 /* For a high-order request, check at least one suitable page is free */
3887 for (o
= order
; o
< MAX_ORDER
; o
++) {
3888 struct free_area
*area
= &z
->free_area
[o
];
3894 for (mt
= 0; mt
< MIGRATE_PCPTYPES
; mt
++) {
3895 if (!free_area_empty(area
, mt
))
3900 if ((alloc_flags
& ALLOC_CMA
) &&
3901 !free_area_empty(area
, MIGRATE_CMA
)) {
3905 if (alloc_harder
&& !free_area_empty(area
, MIGRATE_HIGHATOMIC
))
3911 bool zone_watermark_ok(struct zone
*z
, unsigned int order
, unsigned long mark
,
3912 int highest_zoneidx
, unsigned int alloc_flags
)
3914 return __zone_watermark_ok(z
, order
, mark
, highest_zoneidx
, alloc_flags
,
3915 zone_page_state(z
, NR_FREE_PAGES
));
3918 static inline bool zone_watermark_fast(struct zone
*z
, unsigned int order
,
3919 unsigned long mark
, int highest_zoneidx
,
3920 unsigned int alloc_flags
, gfp_t gfp_mask
)
3924 free_pages
= zone_page_state(z
, NR_FREE_PAGES
);
3927 * Fast check for order-0 only. If this fails then the reserves
3928 * need to be calculated.
3933 fast_free
= free_pages
;
3934 fast_free
-= __zone_watermark_unusable_free(z
, 0, alloc_flags
);
3935 if (fast_free
> mark
+ z
->lowmem_reserve
[highest_zoneidx
])
3939 if (__zone_watermark_ok(z
, order
, mark
, highest_zoneidx
, alloc_flags
,
3943 * Ignore watermark boosting for GFP_ATOMIC order-0 allocations
3944 * when checking the min watermark. The min watermark is the
3945 * point where boosting is ignored so that kswapd is woken up
3946 * when below the low watermark.
3948 if (unlikely(!order
&& (gfp_mask
& __GFP_ATOMIC
) && z
->watermark_boost
3949 && ((alloc_flags
& ALLOC_WMARK_MASK
) == WMARK_MIN
))) {
3950 mark
= z
->_watermark
[WMARK_MIN
];
3951 return __zone_watermark_ok(z
, order
, mark
, highest_zoneidx
,
3952 alloc_flags
, free_pages
);
3958 bool zone_watermark_ok_safe(struct zone
*z
, unsigned int order
,
3959 unsigned long mark
, int highest_zoneidx
)
3961 long free_pages
= zone_page_state(z
, NR_FREE_PAGES
);
3963 if (z
->percpu_drift_mark
&& free_pages
< z
->percpu_drift_mark
)
3964 free_pages
= zone_page_state_snapshot(z
, NR_FREE_PAGES
);
3966 return __zone_watermark_ok(z
, order
, mark
, highest_zoneidx
, 0,
3971 static bool zone_allows_reclaim(struct zone
*local_zone
, struct zone
*zone
)
3973 return node_distance(zone_to_nid(local_zone
), zone_to_nid(zone
)) <=
3974 node_reclaim_distance
;
3976 #else /* CONFIG_NUMA */
3977 static bool zone_allows_reclaim(struct zone
*local_zone
, struct zone
*zone
)
3981 #endif /* CONFIG_NUMA */
3984 * The restriction on ZONE_DMA32 as being a suitable zone to use to avoid
3985 * fragmentation is subtle. If the preferred zone was HIGHMEM then
3986 * premature use of a lower zone may cause lowmem pressure problems that
3987 * are worse than fragmentation. If the next zone is ZONE_DMA then it is
3988 * probably too small. It only makes sense to spread allocations to avoid
3989 * fragmentation between the Normal and DMA32 zones.
3991 static inline unsigned int
3992 alloc_flags_nofragment(struct zone
*zone
, gfp_t gfp_mask
)
3994 unsigned int alloc_flags
;
3997 * __GFP_KSWAPD_RECLAIM is assumed to be the same as ALLOC_KSWAPD
4000 alloc_flags
= (__force
int) (gfp_mask
& __GFP_KSWAPD_RECLAIM
);
4002 #ifdef CONFIG_ZONE_DMA32
4006 if (zone_idx(zone
) != ZONE_NORMAL
)
4010 * If ZONE_DMA32 exists, assume it is the one after ZONE_NORMAL and
4011 * the pointer is within zone->zone_pgdat->node_zones[]. Also assume
4012 * on UMA that if Normal is populated then so is DMA32.
4014 BUILD_BUG_ON(ZONE_NORMAL
- ZONE_DMA32
!= 1);
4015 if (nr_online_nodes
> 1 && !populated_zone(--zone
))
4018 alloc_flags
|= ALLOC_NOFRAGMENT
;
4019 #endif /* CONFIG_ZONE_DMA32 */
4023 /* Must be called after current_gfp_context() which can change gfp_mask */
4024 static inline unsigned int gfp_to_alloc_flags_cma(gfp_t gfp_mask
,
4025 unsigned int alloc_flags
)
4028 if (gfp_migratetype(gfp_mask
) == MIGRATE_MOVABLE
)
4029 alloc_flags
|= ALLOC_CMA
;
4035 * get_page_from_freelist goes through the zonelist trying to allocate
4038 static struct page
*
4039 get_page_from_freelist(gfp_t gfp_mask
, unsigned int order
, int alloc_flags
,
4040 const struct alloc_context
*ac
)
4044 struct pglist_data
*last_pgdat_dirty_limit
= NULL
;
4049 * Scan zonelist, looking for a zone with enough free.
4050 * See also __cpuset_node_allowed() comment in kernel/cpuset.c.
4052 no_fallback
= alloc_flags
& ALLOC_NOFRAGMENT
;
4053 z
= ac
->preferred_zoneref
;
4054 for_next_zone_zonelist_nodemask(zone
, z
, ac
->highest_zoneidx
,
4059 if (cpusets_enabled() &&
4060 (alloc_flags
& ALLOC_CPUSET
) &&
4061 !__cpuset_zone_allowed(zone
, gfp_mask
))
4064 * When allocating a page cache page for writing, we
4065 * want to get it from a node that is within its dirty
4066 * limit, such that no single node holds more than its
4067 * proportional share of globally allowed dirty pages.
4068 * The dirty limits take into account the node's
4069 * lowmem reserves and high watermark so that kswapd
4070 * should be able to balance it without having to
4071 * write pages from its LRU list.
4073 * XXX: For now, allow allocations to potentially
4074 * exceed the per-node dirty limit in the slowpath
4075 * (spread_dirty_pages unset) before going into reclaim,
4076 * which is important when on a NUMA setup the allowed
4077 * nodes are together not big enough to reach the
4078 * global limit. The proper fix for these situations
4079 * will require awareness of nodes in the
4080 * dirty-throttling and the flusher threads.
4082 if (ac
->spread_dirty_pages
) {
4083 if (last_pgdat_dirty_limit
== zone
->zone_pgdat
)
4086 if (!node_dirty_ok(zone
->zone_pgdat
)) {
4087 last_pgdat_dirty_limit
= zone
->zone_pgdat
;
4092 if (no_fallback
&& nr_online_nodes
> 1 &&
4093 zone
!= ac
->preferred_zoneref
->zone
) {
4097 * If moving to a remote node, retry but allow
4098 * fragmenting fallbacks. Locality is more important
4099 * than fragmentation avoidance.
4101 local_nid
= zone_to_nid(ac
->preferred_zoneref
->zone
);
4102 if (zone_to_nid(zone
) != local_nid
) {
4103 alloc_flags
&= ~ALLOC_NOFRAGMENT
;
4108 mark
= wmark_pages(zone
, alloc_flags
& ALLOC_WMARK_MASK
);
4109 if (!zone_watermark_fast(zone
, order
, mark
,
4110 ac
->highest_zoneidx
, alloc_flags
,
4114 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
4116 * Watermark failed for this zone, but see if we can
4117 * grow this zone if it contains deferred pages.
4119 if (static_branch_unlikely(&deferred_pages
)) {
4120 if (_deferred_grow_zone(zone
, order
))
4124 /* Checked here to keep the fast path fast */
4125 BUILD_BUG_ON(ALLOC_NO_WATERMARKS
< NR_WMARK
);
4126 if (alloc_flags
& ALLOC_NO_WATERMARKS
)
4129 if (!node_reclaim_enabled() ||
4130 !zone_allows_reclaim(ac
->preferred_zoneref
->zone
, zone
))
4133 ret
= node_reclaim(zone
->zone_pgdat
, gfp_mask
, order
);
4135 case NODE_RECLAIM_NOSCAN
:
4138 case NODE_RECLAIM_FULL
:
4139 /* scanned but unreclaimable */
4142 /* did we reclaim enough */
4143 if (zone_watermark_ok(zone
, order
, mark
,
4144 ac
->highest_zoneidx
, alloc_flags
))
4152 page
= rmqueue(ac
->preferred_zoneref
->zone
, zone
, order
,
4153 gfp_mask
, alloc_flags
, ac
->migratetype
);
4155 prep_new_page(page
, order
, gfp_mask
, alloc_flags
);
4158 * If this is a high-order atomic allocation then check
4159 * if the pageblock should be reserved for the future
4161 if (unlikely(order
&& (alloc_flags
& ALLOC_HARDER
)))
4162 reserve_highatomic_pageblock(page
, zone
, order
);
4166 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
4167 /* Try again if zone has deferred pages */
4168 if (static_branch_unlikely(&deferred_pages
)) {
4169 if (_deferred_grow_zone(zone
, order
))
4177 * It's possible on a UMA machine to get through all zones that are
4178 * fragmented. If avoiding fragmentation, reset and try again.
4181 alloc_flags
&= ~ALLOC_NOFRAGMENT
;
4188 static void warn_alloc_show_mem(gfp_t gfp_mask
, nodemask_t
*nodemask
)
4190 unsigned int filter
= SHOW_MEM_FILTER_NODES
;
4193 * This documents exceptions given to allocations in certain
4194 * contexts that are allowed to allocate outside current's set
4197 if (!(gfp_mask
& __GFP_NOMEMALLOC
))
4198 if (tsk_is_oom_victim(current
) ||
4199 (current
->flags
& (PF_MEMALLOC
| PF_EXITING
)))
4200 filter
&= ~SHOW_MEM_FILTER_NODES
;
4201 if (!in_task() || !(gfp_mask
& __GFP_DIRECT_RECLAIM
))
4202 filter
&= ~SHOW_MEM_FILTER_NODES
;
4204 show_mem(filter
, nodemask
);
4207 void warn_alloc(gfp_t gfp_mask
, nodemask_t
*nodemask
, const char *fmt
, ...)
4209 struct va_format vaf
;
4211 static DEFINE_RATELIMIT_STATE(nopage_rs
, 10*HZ
, 1);
4213 if ((gfp_mask
& __GFP_NOWARN
) ||
4214 !__ratelimit(&nopage_rs
) ||
4215 ((gfp_mask
& __GFP_DMA
) && !has_managed_dma()))
4218 va_start(args
, fmt
);
4221 pr_warn("%s: %pV, mode:%#x(%pGg), nodemask=%*pbl",
4222 current
->comm
, &vaf
, gfp_mask
, &gfp_mask
,
4223 nodemask_pr_args(nodemask
));
4226 cpuset_print_current_mems_allowed();
4229 warn_alloc_show_mem(gfp_mask
, nodemask
);
4232 static inline struct page
*
4233 __alloc_pages_cpuset_fallback(gfp_t gfp_mask
, unsigned int order
,
4234 unsigned int alloc_flags
,
4235 const struct alloc_context
*ac
)
4239 page
= get_page_from_freelist(gfp_mask
, order
,
4240 alloc_flags
|ALLOC_CPUSET
, ac
);
4242 * fallback to ignore cpuset restriction if our nodes
4246 page
= get_page_from_freelist(gfp_mask
, order
,
4252 static inline struct page
*
4253 __alloc_pages_may_oom(gfp_t gfp_mask
, unsigned int order
,
4254 const struct alloc_context
*ac
, unsigned long *did_some_progress
)
4256 struct oom_control oc
= {
4257 .zonelist
= ac
->zonelist
,
4258 .nodemask
= ac
->nodemask
,
4260 .gfp_mask
= gfp_mask
,
4265 *did_some_progress
= 0;
4268 * Acquire the oom lock. If that fails, somebody else is
4269 * making progress for us.
4271 if (!mutex_trylock(&oom_lock
)) {
4272 *did_some_progress
= 1;
4273 schedule_timeout_uninterruptible(1);
4278 * Go through the zonelist yet one more time, keep very high watermark
4279 * here, this is only to catch a parallel oom killing, we must fail if
4280 * we're still under heavy pressure. But make sure that this reclaim
4281 * attempt shall not depend on __GFP_DIRECT_RECLAIM && !__GFP_NORETRY
4282 * allocation which will never fail due to oom_lock already held.
4284 page
= get_page_from_freelist((gfp_mask
| __GFP_HARDWALL
) &
4285 ~__GFP_DIRECT_RECLAIM
, order
,
4286 ALLOC_WMARK_HIGH
|ALLOC_CPUSET
, ac
);
4290 /* Coredumps can quickly deplete all memory reserves */
4291 if (current
->flags
& PF_DUMPCORE
)
4293 /* The OOM killer will not help higher order allocs */
4294 if (order
> PAGE_ALLOC_COSTLY_ORDER
)
4297 * We have already exhausted all our reclaim opportunities without any
4298 * success so it is time to admit defeat. We will skip the OOM killer
4299 * because it is very likely that the caller has a more reasonable
4300 * fallback than shooting a random task.
4302 * The OOM killer may not free memory on a specific node.
4304 if (gfp_mask
& (__GFP_RETRY_MAYFAIL
| __GFP_THISNODE
))
4306 /* The OOM killer does not needlessly kill tasks for lowmem */
4307 if (ac
->highest_zoneidx
< ZONE_NORMAL
)
4309 if (pm_suspended_storage())
4312 * XXX: GFP_NOFS allocations should rather fail than rely on
4313 * other request to make a forward progress.
4314 * We are in an unfortunate situation where out_of_memory cannot
4315 * do much for this context but let's try it to at least get
4316 * access to memory reserved if the current task is killed (see
4317 * out_of_memory). Once filesystems are ready to handle allocation
4318 * failures more gracefully we should just bail out here.
4321 /* Exhausted what can be done so it's blame time */
4322 if (out_of_memory(&oc
) || WARN_ON_ONCE(gfp_mask
& __GFP_NOFAIL
)) {
4323 *did_some_progress
= 1;
4326 * Help non-failing allocations by giving them access to memory
4329 if (gfp_mask
& __GFP_NOFAIL
)
4330 page
= __alloc_pages_cpuset_fallback(gfp_mask
, order
,
4331 ALLOC_NO_WATERMARKS
, ac
);
4334 mutex_unlock(&oom_lock
);
4339 * Maximum number of compaction retries with a progress before OOM
4340 * killer is consider as the only way to move forward.
4342 #define MAX_COMPACT_RETRIES 16
4344 #ifdef CONFIG_COMPACTION
4345 /* Try memory compaction for high-order allocations before reclaim */
4346 static struct page
*
4347 __alloc_pages_direct_compact(gfp_t gfp_mask
, unsigned int order
,
4348 unsigned int alloc_flags
, const struct alloc_context
*ac
,
4349 enum compact_priority prio
, enum compact_result
*compact_result
)
4351 struct page
*page
= NULL
;
4352 unsigned long pflags
;
4353 unsigned int noreclaim_flag
;
4358 psi_memstall_enter(&pflags
);
4359 noreclaim_flag
= memalloc_noreclaim_save();
4361 *compact_result
= try_to_compact_pages(gfp_mask
, order
, alloc_flags
, ac
,
4364 memalloc_noreclaim_restore(noreclaim_flag
);
4365 psi_memstall_leave(&pflags
);
4367 if (*compact_result
== COMPACT_SKIPPED
)
4370 * At least in one zone compaction wasn't deferred or skipped, so let's
4371 * count a compaction stall
4373 count_vm_event(COMPACTSTALL
);
4375 /* Prep a captured page if available */
4377 prep_new_page(page
, order
, gfp_mask
, alloc_flags
);
4379 /* Try get a page from the freelist if available */
4381 page
= get_page_from_freelist(gfp_mask
, order
, alloc_flags
, ac
);
4384 struct zone
*zone
= page_zone(page
);
4386 zone
->compact_blockskip_flush
= false;
4387 compaction_defer_reset(zone
, order
, true);
4388 count_vm_event(COMPACTSUCCESS
);
4393 * It's bad if compaction run occurs and fails. The most likely reason
4394 * is that pages exist, but not enough to satisfy watermarks.
4396 count_vm_event(COMPACTFAIL
);
4404 should_compact_retry(struct alloc_context
*ac
, int order
, int alloc_flags
,
4405 enum compact_result compact_result
,
4406 enum compact_priority
*compact_priority
,
4407 int *compaction_retries
)
4409 int max_retries
= MAX_COMPACT_RETRIES
;
4412 int retries
= *compaction_retries
;
4413 enum compact_priority priority
= *compact_priority
;
4418 if (fatal_signal_pending(current
))
4421 if (compaction_made_progress(compact_result
))
4422 (*compaction_retries
)++;
4425 * compaction considers all the zone as desperately out of memory
4426 * so it doesn't really make much sense to retry except when the
4427 * failure could be caused by insufficient priority
4429 if (compaction_failed(compact_result
))
4430 goto check_priority
;
4433 * compaction was skipped because there are not enough order-0 pages
4434 * to work with, so we retry only if it looks like reclaim can help.
4436 if (compaction_needs_reclaim(compact_result
)) {
4437 ret
= compaction_zonelist_suitable(ac
, order
, alloc_flags
);
4442 * make sure the compaction wasn't deferred or didn't bail out early
4443 * due to locks contention before we declare that we should give up.
4444 * But the next retry should use a higher priority if allowed, so
4445 * we don't just keep bailing out endlessly.
4447 if (compaction_withdrawn(compact_result
)) {
4448 goto check_priority
;
4452 * !costly requests are much more important than __GFP_RETRY_MAYFAIL
4453 * costly ones because they are de facto nofail and invoke OOM
4454 * killer to move on while costly can fail and users are ready
4455 * to cope with that. 1/4 retries is rather arbitrary but we
4456 * would need much more detailed feedback from compaction to
4457 * make a better decision.
4459 if (order
> PAGE_ALLOC_COSTLY_ORDER
)
4461 if (*compaction_retries
<= max_retries
) {
4467 * Make sure there are attempts at the highest priority if we exhausted
4468 * all retries or failed at the lower priorities.
4471 min_priority
= (order
> PAGE_ALLOC_COSTLY_ORDER
) ?
4472 MIN_COMPACT_COSTLY_PRIORITY
: MIN_COMPACT_PRIORITY
;
4474 if (*compact_priority
> min_priority
) {
4475 (*compact_priority
)--;
4476 *compaction_retries
= 0;
4480 trace_compact_retry(order
, priority
, compact_result
, retries
, max_retries
, ret
);
4484 static inline struct page
*
4485 __alloc_pages_direct_compact(gfp_t gfp_mask
, unsigned int order
,
4486 unsigned int alloc_flags
, const struct alloc_context
*ac
,
4487 enum compact_priority prio
, enum compact_result
*compact_result
)
4489 *compact_result
= COMPACT_SKIPPED
;
4494 should_compact_retry(struct alloc_context
*ac
, unsigned int order
, int alloc_flags
,
4495 enum compact_result compact_result
,
4496 enum compact_priority
*compact_priority
,
4497 int *compaction_retries
)
4502 if (!order
|| order
> PAGE_ALLOC_COSTLY_ORDER
)
4506 * There are setups with compaction disabled which would prefer to loop
4507 * inside the allocator rather than hit the oom killer prematurely.
4508 * Let's give them a good hope and keep retrying while the order-0
4509 * watermarks are OK.
4511 for_each_zone_zonelist_nodemask(zone
, z
, ac
->zonelist
,
4512 ac
->highest_zoneidx
, ac
->nodemask
) {
4513 if (zone_watermark_ok(zone
, 0, min_wmark_pages(zone
),
4514 ac
->highest_zoneidx
, alloc_flags
))
4519 #endif /* CONFIG_COMPACTION */
4521 #ifdef CONFIG_LOCKDEP
4522 static struct lockdep_map __fs_reclaim_map
=
4523 STATIC_LOCKDEP_MAP_INIT("fs_reclaim", &__fs_reclaim_map
);
4525 static bool __need_reclaim(gfp_t gfp_mask
)
4527 /* no reclaim without waiting on it */
4528 if (!(gfp_mask
& __GFP_DIRECT_RECLAIM
))
4531 /* this guy won't enter reclaim */
4532 if (current
->flags
& PF_MEMALLOC
)
4535 if (gfp_mask
& __GFP_NOLOCKDEP
)
4541 void __fs_reclaim_acquire(unsigned long ip
)
4543 lock_acquire_exclusive(&__fs_reclaim_map
, 0, 0, NULL
, ip
);
4546 void __fs_reclaim_release(unsigned long ip
)
4548 lock_release(&__fs_reclaim_map
, ip
);
4551 void fs_reclaim_acquire(gfp_t gfp_mask
)
4553 gfp_mask
= current_gfp_context(gfp_mask
);
4555 if (__need_reclaim(gfp_mask
)) {
4556 if (gfp_mask
& __GFP_FS
)
4557 __fs_reclaim_acquire(_RET_IP_
);
4559 #ifdef CONFIG_MMU_NOTIFIER
4560 lock_map_acquire(&__mmu_notifier_invalidate_range_start_map
);
4561 lock_map_release(&__mmu_notifier_invalidate_range_start_map
);
4566 EXPORT_SYMBOL_GPL(fs_reclaim_acquire
);
4568 void fs_reclaim_release(gfp_t gfp_mask
)
4570 gfp_mask
= current_gfp_context(gfp_mask
);
4572 if (__need_reclaim(gfp_mask
)) {
4573 if (gfp_mask
& __GFP_FS
)
4574 __fs_reclaim_release(_RET_IP_
);
4577 EXPORT_SYMBOL_GPL(fs_reclaim_release
);
4580 /* Perform direct synchronous page reclaim */
4581 static unsigned long
4582 __perform_reclaim(gfp_t gfp_mask
, unsigned int order
,
4583 const struct alloc_context
*ac
)
4585 unsigned int noreclaim_flag
;
4586 unsigned long pflags
, progress
;
4590 /* We now go into synchronous reclaim */
4591 cpuset_memory_pressure_bump();
4592 psi_memstall_enter(&pflags
);
4593 fs_reclaim_acquire(gfp_mask
);
4594 noreclaim_flag
= memalloc_noreclaim_save();
4596 progress
= try_to_free_pages(ac
->zonelist
, order
, gfp_mask
,
4599 memalloc_noreclaim_restore(noreclaim_flag
);
4600 fs_reclaim_release(gfp_mask
);
4601 psi_memstall_leave(&pflags
);
4608 /* The really slow allocator path where we enter direct reclaim */
4609 static inline struct page
*
4610 __alloc_pages_direct_reclaim(gfp_t gfp_mask
, unsigned int order
,
4611 unsigned int alloc_flags
, const struct alloc_context
*ac
,
4612 unsigned long *did_some_progress
)
4614 struct page
*page
= NULL
;
4615 bool drained
= false;
4617 *did_some_progress
= __perform_reclaim(gfp_mask
, order
, ac
);
4618 if (unlikely(!(*did_some_progress
)))
4622 page
= get_page_from_freelist(gfp_mask
, order
, alloc_flags
, ac
);
4625 * If an allocation failed after direct reclaim, it could be because
4626 * pages are pinned on the per-cpu lists or in high alloc reserves.
4627 * Shrink them and try again
4629 if (!page
&& !drained
) {
4630 unreserve_highatomic_pageblock(ac
, false);
4631 drain_all_pages(NULL
);
4639 static void wake_all_kswapds(unsigned int order
, gfp_t gfp_mask
,
4640 const struct alloc_context
*ac
)
4644 pg_data_t
*last_pgdat
= NULL
;
4645 enum zone_type highest_zoneidx
= ac
->highest_zoneidx
;
4647 for_each_zone_zonelist_nodemask(zone
, z
, ac
->zonelist
, highest_zoneidx
,
4649 if (last_pgdat
!= zone
->zone_pgdat
)
4650 wakeup_kswapd(zone
, gfp_mask
, order
, highest_zoneidx
);
4651 last_pgdat
= zone
->zone_pgdat
;
4655 static inline unsigned int
4656 gfp_to_alloc_flags(gfp_t gfp_mask
)
4658 unsigned int alloc_flags
= ALLOC_WMARK_MIN
| ALLOC_CPUSET
;
4661 * __GFP_HIGH is assumed to be the same as ALLOC_HIGH
4662 * and __GFP_KSWAPD_RECLAIM is assumed to be the same as ALLOC_KSWAPD
4663 * to save two branches.
4665 BUILD_BUG_ON(__GFP_HIGH
!= (__force gfp_t
) ALLOC_HIGH
);
4666 BUILD_BUG_ON(__GFP_KSWAPD_RECLAIM
!= (__force gfp_t
) ALLOC_KSWAPD
);
4669 * The caller may dip into page reserves a bit more if the caller
4670 * cannot run direct reclaim, or if the caller has realtime scheduling
4671 * policy or is asking for __GFP_HIGH memory. GFP_ATOMIC requests will
4672 * set both ALLOC_HARDER (__GFP_ATOMIC) and ALLOC_HIGH (__GFP_HIGH).
4674 alloc_flags
|= (__force
int)
4675 (gfp_mask
& (__GFP_HIGH
| __GFP_KSWAPD_RECLAIM
));
4677 if (gfp_mask
& __GFP_ATOMIC
) {
4679 * Not worth trying to allocate harder for __GFP_NOMEMALLOC even
4680 * if it can't schedule.
4682 if (!(gfp_mask
& __GFP_NOMEMALLOC
))
4683 alloc_flags
|= ALLOC_HARDER
;
4685 * Ignore cpuset mems for GFP_ATOMIC rather than fail, see the
4686 * comment for __cpuset_node_allowed().
4688 alloc_flags
&= ~ALLOC_CPUSET
;
4689 } else if (unlikely(rt_task(current
)) && in_task())
4690 alloc_flags
|= ALLOC_HARDER
;
4692 alloc_flags
= gfp_to_alloc_flags_cma(gfp_mask
, alloc_flags
);
4697 static bool oom_reserves_allowed(struct task_struct
*tsk
)
4699 if (!tsk_is_oom_victim(tsk
))
4703 * !MMU doesn't have oom reaper so give access to memory reserves
4704 * only to the thread with TIF_MEMDIE set
4706 if (!IS_ENABLED(CONFIG_MMU
) && !test_thread_flag(TIF_MEMDIE
))
4713 * Distinguish requests which really need access to full memory
4714 * reserves from oom victims which can live with a portion of it
4716 static inline int __gfp_pfmemalloc_flags(gfp_t gfp_mask
)
4718 if (unlikely(gfp_mask
& __GFP_NOMEMALLOC
))
4720 if (gfp_mask
& __GFP_MEMALLOC
)
4721 return ALLOC_NO_WATERMARKS
;
4722 if (in_serving_softirq() && (current
->flags
& PF_MEMALLOC
))
4723 return ALLOC_NO_WATERMARKS
;
4724 if (!in_interrupt()) {
4725 if (current
->flags
& PF_MEMALLOC
)
4726 return ALLOC_NO_WATERMARKS
;
4727 else if (oom_reserves_allowed(current
))
4734 bool gfp_pfmemalloc_allowed(gfp_t gfp_mask
)
4736 return !!__gfp_pfmemalloc_flags(gfp_mask
);
4740 * Checks whether it makes sense to retry the reclaim to make a forward progress
4741 * for the given allocation request.
4743 * We give up when we either have tried MAX_RECLAIM_RETRIES in a row
4744 * without success, or when we couldn't even meet the watermark if we
4745 * reclaimed all remaining pages on the LRU lists.
4747 * Returns true if a retry is viable or false to enter the oom path.
4750 should_reclaim_retry(gfp_t gfp_mask
, unsigned order
,
4751 struct alloc_context
*ac
, int alloc_flags
,
4752 bool did_some_progress
, int *no_progress_loops
)
4759 * Costly allocations might have made a progress but this doesn't mean
4760 * their order will become available due to high fragmentation so
4761 * always increment the no progress counter for them
4763 if (did_some_progress
&& order
<= PAGE_ALLOC_COSTLY_ORDER
)
4764 *no_progress_loops
= 0;
4766 (*no_progress_loops
)++;
4769 * Make sure we converge to OOM if we cannot make any progress
4770 * several times in the row.
4772 if (*no_progress_loops
> MAX_RECLAIM_RETRIES
) {
4773 /* Before OOM, exhaust highatomic_reserve */
4774 return unreserve_highatomic_pageblock(ac
, true);
4778 * Keep reclaiming pages while there is a chance this will lead
4779 * somewhere. If none of the target zones can satisfy our allocation
4780 * request even if all reclaimable pages are considered then we are
4781 * screwed and have to go OOM.
4783 for_each_zone_zonelist_nodemask(zone
, z
, ac
->zonelist
,
4784 ac
->highest_zoneidx
, ac
->nodemask
) {
4785 unsigned long available
;
4786 unsigned long reclaimable
;
4787 unsigned long min_wmark
= min_wmark_pages(zone
);
4790 available
= reclaimable
= zone_reclaimable_pages(zone
);
4791 available
+= zone_page_state_snapshot(zone
, NR_FREE_PAGES
);
4794 * Would the allocation succeed if we reclaimed all
4795 * reclaimable pages?
4797 wmark
= __zone_watermark_ok(zone
, order
, min_wmark
,
4798 ac
->highest_zoneidx
, alloc_flags
, available
);
4799 trace_reclaim_retry_zone(z
, order
, reclaimable
,
4800 available
, min_wmark
, *no_progress_loops
, wmark
);
4803 * If we didn't make any progress and have a lot of
4804 * dirty + writeback pages then we should wait for
4805 * an IO to complete to slow down the reclaim and
4806 * prevent from pre mature OOM
4808 if (!did_some_progress
) {
4809 unsigned long write_pending
;
4811 write_pending
= zone_page_state_snapshot(zone
,
4812 NR_ZONE_WRITE_PENDING
);
4814 if (2 * write_pending
> reclaimable
) {
4815 congestion_wait(BLK_RW_ASYNC
, HZ
/10);
4827 * Memory allocation/reclaim might be called from a WQ context and the
4828 * current implementation of the WQ concurrency control doesn't
4829 * recognize that a particular WQ is congested if the worker thread is
4830 * looping without ever sleeping. Therefore we have to do a short sleep
4831 * here rather than calling cond_resched().
4833 if (current
->flags
& PF_WQ_WORKER
)
4834 schedule_timeout_uninterruptible(1);
4841 check_retry_cpuset(int cpuset_mems_cookie
, struct alloc_context
*ac
)
4844 * It's possible that cpuset's mems_allowed and the nodemask from
4845 * mempolicy don't intersect. This should be normally dealt with by
4846 * policy_nodemask(), but it's possible to race with cpuset update in
4847 * such a way the check therein was true, and then it became false
4848 * before we got our cpuset_mems_cookie here.
4849 * This assumes that for all allocations, ac->nodemask can come only
4850 * from MPOL_BIND mempolicy (whose documented semantics is to be ignored
4851 * when it does not intersect with the cpuset restrictions) or the
4852 * caller can deal with a violated nodemask.
4854 if (cpusets_enabled() && ac
->nodemask
&&
4855 !cpuset_nodemask_valid_mems_allowed(ac
->nodemask
)) {
4856 ac
->nodemask
= NULL
;
4861 * When updating a task's mems_allowed or mempolicy nodemask, it is
4862 * possible to race with parallel threads in such a way that our
4863 * allocation can fail while the mask is being updated. If we are about
4864 * to fail, check if the cpuset changed during allocation and if so,
4867 if (read_mems_allowed_retry(cpuset_mems_cookie
))
4873 static inline struct page
*
4874 __alloc_pages_slowpath(gfp_t gfp_mask
, unsigned int order
,
4875 struct alloc_context
*ac
)
4877 bool can_direct_reclaim
= gfp_mask
& __GFP_DIRECT_RECLAIM
;
4878 const bool costly_order
= order
> PAGE_ALLOC_COSTLY_ORDER
;
4879 struct page
*page
= NULL
;
4880 unsigned int alloc_flags
;
4881 unsigned long did_some_progress
;
4882 enum compact_priority compact_priority
;
4883 enum compact_result compact_result
;
4884 int compaction_retries
;
4885 int no_progress_loops
;
4886 unsigned int cpuset_mems_cookie
;
4890 * We also sanity check to catch abuse of atomic reserves being used by
4891 * callers that are not in atomic context.
4893 if (WARN_ON_ONCE((gfp_mask
& (__GFP_ATOMIC
|__GFP_DIRECT_RECLAIM
)) ==
4894 (__GFP_ATOMIC
|__GFP_DIRECT_RECLAIM
)))
4895 gfp_mask
&= ~__GFP_ATOMIC
;
4898 compaction_retries
= 0;
4899 no_progress_loops
= 0;
4900 compact_priority
= DEF_COMPACT_PRIORITY
;
4901 cpuset_mems_cookie
= read_mems_allowed_begin();
4904 * The fast path uses conservative alloc_flags to succeed only until
4905 * kswapd needs to be woken up, and to avoid the cost of setting up
4906 * alloc_flags precisely. So we do that now.
4908 alloc_flags
= gfp_to_alloc_flags(gfp_mask
);
4911 * We need to recalculate the starting point for the zonelist iterator
4912 * because we might have used different nodemask in the fast path, or
4913 * there was a cpuset modification and we are retrying - otherwise we
4914 * could end up iterating over non-eligible zones endlessly.
4916 ac
->preferred_zoneref
= first_zones_zonelist(ac
->zonelist
,
4917 ac
->highest_zoneidx
, ac
->nodemask
);
4918 if (!ac
->preferred_zoneref
->zone
)
4921 if (alloc_flags
& ALLOC_KSWAPD
)
4922 wake_all_kswapds(order
, gfp_mask
, ac
);
4925 * The adjusted alloc_flags might result in immediate success, so try
4928 page
= get_page_from_freelist(gfp_mask
, order
, alloc_flags
, ac
);
4933 * For costly allocations, try direct compaction first, as it's likely
4934 * that we have enough base pages and don't need to reclaim. For non-
4935 * movable high-order allocations, do that as well, as compaction will
4936 * try prevent permanent fragmentation by migrating from blocks of the
4938 * Don't try this for allocations that are allowed to ignore
4939 * watermarks, as the ALLOC_NO_WATERMARKS attempt didn't yet happen.
4941 if (can_direct_reclaim
&&
4943 (order
> 0 && ac
->migratetype
!= MIGRATE_MOVABLE
))
4944 && !gfp_pfmemalloc_allowed(gfp_mask
)) {
4945 page
= __alloc_pages_direct_compact(gfp_mask
, order
,
4947 INIT_COMPACT_PRIORITY
,
4953 * Checks for costly allocations with __GFP_NORETRY, which
4954 * includes some THP page fault allocations
4956 if (costly_order
&& (gfp_mask
& __GFP_NORETRY
)) {
4958 * If allocating entire pageblock(s) and compaction
4959 * failed because all zones are below low watermarks
4960 * or is prohibited because it recently failed at this
4961 * order, fail immediately unless the allocator has
4962 * requested compaction and reclaim retry.
4965 * - potentially very expensive because zones are far
4966 * below their low watermarks or this is part of very
4967 * bursty high order allocations,
4968 * - not guaranteed to help because isolate_freepages()
4969 * may not iterate over freed pages as part of its
4971 * - unlikely to make entire pageblocks free on its
4974 if (compact_result
== COMPACT_SKIPPED
||
4975 compact_result
== COMPACT_DEFERRED
)
4979 * Looks like reclaim/compaction is worth trying, but
4980 * sync compaction could be very expensive, so keep
4981 * using async compaction.
4983 compact_priority
= INIT_COMPACT_PRIORITY
;
4988 /* Ensure kswapd doesn't accidentally go to sleep as long as we loop */
4989 if (alloc_flags
& ALLOC_KSWAPD
)
4990 wake_all_kswapds(order
, gfp_mask
, ac
);
4992 reserve_flags
= __gfp_pfmemalloc_flags(gfp_mask
);
4994 alloc_flags
= gfp_to_alloc_flags_cma(gfp_mask
, reserve_flags
);
4997 * Reset the nodemask and zonelist iterators if memory policies can be
4998 * ignored. These allocations are high priority and system rather than
5001 if (!(alloc_flags
& ALLOC_CPUSET
) || reserve_flags
) {
5002 ac
->nodemask
= NULL
;
5003 ac
->preferred_zoneref
= first_zones_zonelist(ac
->zonelist
,
5004 ac
->highest_zoneidx
, ac
->nodemask
);
5007 /* Attempt with potentially adjusted zonelist and alloc_flags */
5008 page
= get_page_from_freelist(gfp_mask
, order
, alloc_flags
, ac
);
5012 /* Caller is not willing to reclaim, we can't balance anything */
5013 if (!can_direct_reclaim
)
5016 /* Avoid recursion of direct reclaim */
5017 if (current
->flags
& PF_MEMALLOC
)
5020 /* Try direct reclaim and then allocating */
5021 page
= __alloc_pages_direct_reclaim(gfp_mask
, order
, alloc_flags
, ac
,
5022 &did_some_progress
);
5026 /* Try direct compaction and then allocating */
5027 page
= __alloc_pages_direct_compact(gfp_mask
, order
, alloc_flags
, ac
,
5028 compact_priority
, &compact_result
);
5032 /* Do not loop if specifically requested */
5033 if (gfp_mask
& __GFP_NORETRY
)
5037 * Do not retry costly high order allocations unless they are
5038 * __GFP_RETRY_MAYFAIL
5040 if (costly_order
&& !(gfp_mask
& __GFP_RETRY_MAYFAIL
))
5043 if (should_reclaim_retry(gfp_mask
, order
, ac
, alloc_flags
,
5044 did_some_progress
> 0, &no_progress_loops
))
5048 * It doesn't make any sense to retry for the compaction if the order-0
5049 * reclaim is not able to make any progress because the current
5050 * implementation of the compaction depends on the sufficient amount
5051 * of free memory (see __compaction_suitable)
5053 if (did_some_progress
> 0 &&
5054 should_compact_retry(ac
, order
, alloc_flags
,
5055 compact_result
, &compact_priority
,
5056 &compaction_retries
))
5060 /* Deal with possible cpuset update races before we start OOM killing */
5061 if (check_retry_cpuset(cpuset_mems_cookie
, ac
))
5064 /* Reclaim has failed us, start killing things */
5065 page
= __alloc_pages_may_oom(gfp_mask
, order
, ac
, &did_some_progress
);
5069 /* Avoid allocations with no watermarks from looping endlessly */
5070 if (tsk_is_oom_victim(current
) &&
5071 (alloc_flags
& ALLOC_OOM
||
5072 (gfp_mask
& __GFP_NOMEMALLOC
)))
5075 /* Retry as long as the OOM killer is making progress */
5076 if (did_some_progress
) {
5077 no_progress_loops
= 0;
5082 /* Deal with possible cpuset update races before we fail */
5083 if (check_retry_cpuset(cpuset_mems_cookie
, ac
))
5087 * Make sure that __GFP_NOFAIL request doesn't leak out and make sure
5090 if (gfp_mask
& __GFP_NOFAIL
) {
5092 * All existing users of the __GFP_NOFAIL are blockable, so warn
5093 * of any new users that actually require GFP_NOWAIT
5095 if (WARN_ON_ONCE(!can_direct_reclaim
))
5099 * PF_MEMALLOC request from this context is rather bizarre
5100 * because we cannot reclaim anything and only can loop waiting
5101 * for somebody to do a work for us
5103 WARN_ON_ONCE(current
->flags
& PF_MEMALLOC
);
5106 * non failing costly orders are a hard requirement which we
5107 * are not prepared for much so let's warn about these users
5108 * so that we can identify them and convert them to something
5111 WARN_ON_ONCE(order
> PAGE_ALLOC_COSTLY_ORDER
);
5114 * Help non-failing allocations by giving them access to memory
5115 * reserves but do not use ALLOC_NO_WATERMARKS because this
5116 * could deplete whole memory reserves which would just make
5117 * the situation worse
5119 page
= __alloc_pages_cpuset_fallback(gfp_mask
, order
, ALLOC_HARDER
, ac
);
5127 warn_alloc(gfp_mask
, ac
->nodemask
,
5128 "page allocation failure: order:%u", order
);
5133 static inline bool prepare_alloc_pages(gfp_t gfp_mask
, unsigned int order
,
5134 int preferred_nid
, nodemask_t
*nodemask
,
5135 struct alloc_context
*ac
, gfp_t
*alloc_gfp
,
5136 unsigned int *alloc_flags
)
5138 ac
->highest_zoneidx
= gfp_zone(gfp_mask
);
5139 ac
->zonelist
= node_zonelist(preferred_nid
, gfp_mask
);
5140 ac
->nodemask
= nodemask
;
5141 ac
->migratetype
= gfp_migratetype(gfp_mask
);
5143 if (cpusets_enabled()) {
5144 *alloc_gfp
|= __GFP_HARDWALL
;
5146 * When we are in the interrupt context, it is irrelevant
5147 * to the current task context. It means that any node ok.
5149 if (in_task() && !ac
->nodemask
)
5150 ac
->nodemask
= &cpuset_current_mems_allowed
;
5152 *alloc_flags
|= ALLOC_CPUSET
;
5155 fs_reclaim_acquire(gfp_mask
);
5156 fs_reclaim_release(gfp_mask
);
5158 might_sleep_if(gfp_mask
& __GFP_DIRECT_RECLAIM
);
5160 if (should_fail_alloc_page(gfp_mask
, order
))
5163 *alloc_flags
= gfp_to_alloc_flags_cma(gfp_mask
, *alloc_flags
);
5165 /* Dirty zone balancing only done in the fast path */
5166 ac
->spread_dirty_pages
= (gfp_mask
& __GFP_WRITE
);
5169 * The preferred zone is used for statistics but crucially it is
5170 * also used as the starting point for the zonelist iterator. It
5171 * may get reset for allocations that ignore memory policies.
5173 ac
->preferred_zoneref
= first_zones_zonelist(ac
->zonelist
,
5174 ac
->highest_zoneidx
, ac
->nodemask
);
5180 * __alloc_pages_bulk - Allocate a number of order-0 pages to a list or array
5181 * @gfp: GFP flags for the allocation
5182 * @preferred_nid: The preferred NUMA node ID to allocate from
5183 * @nodemask: Set of nodes to allocate from, may be NULL
5184 * @nr_pages: The number of pages desired on the list or array
5185 * @page_list: Optional list to store the allocated pages
5186 * @page_array: Optional array to store the pages
5188 * This is a batched version of the page allocator that attempts to
5189 * allocate nr_pages quickly. Pages are added to page_list if page_list
5190 * is not NULL, otherwise it is assumed that the page_array is valid.
5192 * For lists, nr_pages is the number of pages that should be allocated.
5194 * For arrays, only NULL elements are populated with pages and nr_pages
5195 * is the maximum number of pages that will be stored in the array.
5197 * Returns the number of pages on the list or array.
5199 unsigned long __alloc_pages_bulk(gfp_t gfp
, int preferred_nid
,
5200 nodemask_t
*nodemask
, int nr_pages
,
5201 struct list_head
*page_list
,
5202 struct page
**page_array
)
5205 unsigned long flags
;
5208 struct per_cpu_pages
*pcp
;
5209 struct list_head
*pcp_list
;
5210 struct alloc_context ac
;
5212 unsigned int alloc_flags
= ALLOC_WMARK_LOW
;
5213 int nr_populated
= 0, nr_account
= 0;
5216 * Skip populated array elements to determine if any pages need
5217 * to be allocated before disabling IRQs.
5219 while (page_array
&& nr_populated
< nr_pages
&& page_array
[nr_populated
])
5222 /* No pages requested? */
5223 if (unlikely(nr_pages
<= 0))
5226 /* Already populated array? */
5227 if (unlikely(page_array
&& nr_pages
- nr_populated
== 0))
5230 /* Bulk allocator does not support memcg accounting. */
5231 if (memcg_kmem_enabled() && (gfp
& __GFP_ACCOUNT
))
5234 /* Use the single page allocator for one page. */
5235 if (nr_pages
- nr_populated
== 1)
5238 #ifdef CONFIG_PAGE_OWNER
5240 * PAGE_OWNER may recurse into the allocator to allocate space to
5241 * save the stack with pagesets.lock held. Releasing/reacquiring
5242 * removes much of the performance benefit of bulk allocation so
5243 * force the caller to allocate one page at a time as it'll have
5244 * similar performance to added complexity to the bulk allocator.
5246 if (static_branch_unlikely(&page_owner_inited
))
5250 /* May set ALLOC_NOFRAGMENT, fragmentation will return 1 page. */
5251 gfp
&= gfp_allowed_mask
;
5253 if (!prepare_alloc_pages(gfp
, 0, preferred_nid
, nodemask
, &ac
, &alloc_gfp
, &alloc_flags
))
5257 /* Find an allowed local zone that meets the low watermark. */
5258 for_each_zone_zonelist_nodemask(zone
, z
, ac
.zonelist
, ac
.highest_zoneidx
, ac
.nodemask
) {
5261 if (cpusets_enabled() && (alloc_flags
& ALLOC_CPUSET
) &&
5262 !__cpuset_zone_allowed(zone
, gfp
)) {
5266 if (nr_online_nodes
> 1 && zone
!= ac
.preferred_zoneref
->zone
&&
5267 zone_to_nid(zone
) != zone_to_nid(ac
.preferred_zoneref
->zone
)) {
5271 mark
= wmark_pages(zone
, alloc_flags
& ALLOC_WMARK_MASK
) + nr_pages
;
5272 if (zone_watermark_fast(zone
, 0, mark
,
5273 zonelist_zone_idx(ac
.preferred_zoneref
),
5274 alloc_flags
, gfp
)) {
5280 * If there are no allowed local zones that meets the watermarks then
5281 * try to allocate a single page and reclaim if necessary.
5283 if (unlikely(!zone
))
5286 /* Attempt the batch allocation */
5287 local_lock_irqsave(&pagesets
.lock
, flags
);
5288 pcp
= this_cpu_ptr(zone
->per_cpu_pageset
);
5289 pcp_list
= &pcp
->lists
[order_to_pindex(ac
.migratetype
, 0)];
5291 while (nr_populated
< nr_pages
) {
5293 /* Skip existing pages */
5294 if (page_array
&& page_array
[nr_populated
]) {
5299 page
= __rmqueue_pcplist(zone
, 0, ac
.migratetype
, alloc_flags
,
5301 if (unlikely(!page
)) {
5302 /* Try and get at least one page */
5309 prep_new_page(page
, 0, gfp
, 0);
5311 list_add(&page
->lru
, page_list
);
5313 page_array
[nr_populated
] = page
;
5317 local_unlock_irqrestore(&pagesets
.lock
, flags
);
5319 __count_zid_vm_events(PGALLOC
, zone_idx(zone
), nr_account
);
5320 zone_statistics(ac
.preferred_zoneref
->zone
, zone
, nr_account
);
5323 return nr_populated
;
5326 local_unlock_irqrestore(&pagesets
.lock
, flags
);
5329 page
= __alloc_pages(gfp
, 0, preferred_nid
, nodemask
);
5332 list_add(&page
->lru
, page_list
);
5334 page_array
[nr_populated
] = page
;
5340 EXPORT_SYMBOL_GPL(__alloc_pages_bulk
);
5343 * This is the 'heart' of the zoned buddy allocator.
5345 struct page
*__alloc_pages(gfp_t gfp
, unsigned int order
, int preferred_nid
,
5346 nodemask_t
*nodemask
)
5349 unsigned int alloc_flags
= ALLOC_WMARK_LOW
;
5350 gfp_t alloc_gfp
; /* The gfp_t that was actually used for allocation */
5351 struct alloc_context ac
= { };
5354 * There are several places where we assume that the order value is sane
5355 * so bail out early if the request is out of bound.
5357 if (unlikely(order
>= MAX_ORDER
)) {
5358 WARN_ON_ONCE(!(gfp
& __GFP_NOWARN
));
5362 gfp
&= gfp_allowed_mask
;
5364 * Apply scoped allocation constraints. This is mainly about GFP_NOFS
5365 * resp. GFP_NOIO which has to be inherited for all allocation requests
5366 * from a particular context which has been marked by
5367 * memalloc_no{fs,io}_{save,restore}. And PF_MEMALLOC_PIN which ensures
5368 * movable zones are not used during allocation.
5370 gfp
= current_gfp_context(gfp
);
5372 if (!prepare_alloc_pages(gfp
, order
, preferred_nid
, nodemask
, &ac
,
5373 &alloc_gfp
, &alloc_flags
))
5377 * Forbid the first pass from falling back to types that fragment
5378 * memory until all local zones are considered.
5380 alloc_flags
|= alloc_flags_nofragment(ac
.preferred_zoneref
->zone
, gfp
);
5382 /* First allocation attempt */
5383 page
= get_page_from_freelist(alloc_gfp
, order
, alloc_flags
, &ac
);
5388 ac
.spread_dirty_pages
= false;
5391 * Restore the original nodemask if it was potentially replaced with
5392 * &cpuset_current_mems_allowed to optimize the fast-path attempt.
5394 ac
.nodemask
= nodemask
;
5396 page
= __alloc_pages_slowpath(alloc_gfp
, order
, &ac
);
5399 if (memcg_kmem_enabled() && (gfp
& __GFP_ACCOUNT
) && page
&&
5400 unlikely(__memcg_kmem_charge_page(page
, gfp
, order
) != 0)) {
5401 __free_pages(page
, order
);
5405 trace_mm_page_alloc(page
, order
, alloc_gfp
, ac
.migratetype
);
5409 EXPORT_SYMBOL(__alloc_pages
);
5412 * Common helper functions. Never use with __GFP_HIGHMEM because the returned
5413 * address cannot represent highmem pages. Use alloc_pages and then kmap if
5414 * you need to access high mem.
5416 unsigned long __get_free_pages(gfp_t gfp_mask
, unsigned int order
)
5420 page
= alloc_pages(gfp_mask
& ~__GFP_HIGHMEM
, order
);
5423 return (unsigned long) page_address(page
);
5425 EXPORT_SYMBOL(__get_free_pages
);
5427 unsigned long get_zeroed_page(gfp_t gfp_mask
)
5429 return __get_free_pages(gfp_mask
| __GFP_ZERO
, 0);
5431 EXPORT_SYMBOL(get_zeroed_page
);
5434 * __free_pages - Free pages allocated with alloc_pages().
5435 * @page: The page pointer returned from alloc_pages().
5436 * @order: The order of the allocation.
5438 * This function can free multi-page allocations that are not compound
5439 * pages. It does not check that the @order passed in matches that of
5440 * the allocation, so it is easy to leak memory. Freeing more memory
5441 * than was allocated will probably emit a warning.
5443 * If the last reference to this page is speculative, it will be released
5444 * by put_page() which only frees the first page of a non-compound
5445 * allocation. To prevent the remaining pages from being leaked, we free
5446 * the subsequent pages here. If you want to use the page's reference
5447 * count to decide when to free the allocation, you should allocate a
5448 * compound page, and use put_page() instead of __free_pages().
5450 * Context: May be called in interrupt context or while holding a normal
5451 * spinlock, but not in NMI context or while holding a raw spinlock.
5453 void __free_pages(struct page
*page
, unsigned int order
)
5455 if (put_page_testzero(page
))
5456 free_the_page(page
, order
);
5457 else if (!PageHead(page
))
5459 free_the_page(page
+ (1 << order
), order
);
5461 EXPORT_SYMBOL(__free_pages
);
5463 void free_pages(unsigned long addr
, unsigned int order
)
5466 VM_BUG_ON(!virt_addr_valid((void *)addr
));
5467 __free_pages(virt_to_page((void *)addr
), order
);
5471 EXPORT_SYMBOL(free_pages
);
5475 * An arbitrary-length arbitrary-offset area of memory which resides
5476 * within a 0 or higher order page. Multiple fragments within that page
5477 * are individually refcounted, in the page's reference counter.
5479 * The page_frag functions below provide a simple allocation framework for
5480 * page fragments. This is used by the network stack and network device
5481 * drivers to provide a backing region of memory for use as either an
5482 * sk_buff->head, or to be used in the "frags" portion of skb_shared_info.
5484 static struct page
*__page_frag_cache_refill(struct page_frag_cache
*nc
,
5487 struct page
*page
= NULL
;
5488 gfp_t gfp
= gfp_mask
;
5490 #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
5491 gfp_mask
|= __GFP_COMP
| __GFP_NOWARN
| __GFP_NORETRY
|
5493 page
= alloc_pages_node(NUMA_NO_NODE
, gfp_mask
,
5494 PAGE_FRAG_CACHE_MAX_ORDER
);
5495 nc
->size
= page
? PAGE_FRAG_CACHE_MAX_SIZE
: PAGE_SIZE
;
5497 if (unlikely(!page
))
5498 page
= alloc_pages_node(NUMA_NO_NODE
, gfp
, 0);
5500 nc
->va
= page
? page_address(page
) : NULL
;
5505 void __page_frag_cache_drain(struct page
*page
, unsigned int count
)
5507 VM_BUG_ON_PAGE(page_ref_count(page
) == 0, page
);
5509 if (page_ref_sub_and_test(page
, count
))
5510 free_the_page(page
, compound_order(page
));
5512 EXPORT_SYMBOL(__page_frag_cache_drain
);
5514 void *page_frag_alloc_align(struct page_frag_cache
*nc
,
5515 unsigned int fragsz
, gfp_t gfp_mask
,
5516 unsigned int align_mask
)
5518 unsigned int size
= PAGE_SIZE
;
5522 if (unlikely(!nc
->va
)) {
5524 page
= __page_frag_cache_refill(nc
, gfp_mask
);
5528 #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
5529 /* if size can vary use size else just use PAGE_SIZE */
5532 /* Even if we own the page, we do not use atomic_set().
5533 * This would break get_page_unless_zero() users.
5535 page_ref_add(page
, PAGE_FRAG_CACHE_MAX_SIZE
);
5537 /* reset page count bias and offset to start of new frag */
5538 nc
->pfmemalloc
= page_is_pfmemalloc(page
);
5539 nc
->pagecnt_bias
= PAGE_FRAG_CACHE_MAX_SIZE
+ 1;
5543 offset
= nc
->offset
- fragsz
;
5544 if (unlikely(offset
< 0)) {
5545 page
= virt_to_page(nc
->va
);
5547 if (!page_ref_sub_and_test(page
, nc
->pagecnt_bias
))
5550 if (unlikely(nc
->pfmemalloc
)) {
5551 free_the_page(page
, compound_order(page
));
5555 #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
5556 /* if size can vary use size else just use PAGE_SIZE */
5559 /* OK, page count is 0, we can safely set it */
5560 set_page_count(page
, PAGE_FRAG_CACHE_MAX_SIZE
+ 1);
5562 /* reset page count bias and offset to start of new frag */
5563 nc
->pagecnt_bias
= PAGE_FRAG_CACHE_MAX_SIZE
+ 1;
5564 offset
= size
- fragsz
;
5568 offset
&= align_mask
;
5569 nc
->offset
= offset
;
5571 return nc
->va
+ offset
;
5573 EXPORT_SYMBOL(page_frag_alloc_align
);
5576 * Frees a page fragment allocated out of either a compound or order 0 page.
5578 void page_frag_free(void *addr
)
5580 struct page
*page
= virt_to_head_page(addr
);
5582 if (unlikely(put_page_testzero(page
)))
5583 free_the_page(page
, compound_order(page
));
5585 EXPORT_SYMBOL(page_frag_free
);
5587 static void *make_alloc_exact(unsigned long addr
, unsigned int order
,
5591 unsigned long alloc_end
= addr
+ (PAGE_SIZE
<< order
);
5592 unsigned long used
= addr
+ PAGE_ALIGN(size
);
5594 split_page(virt_to_page((void *)addr
), order
);
5595 while (used
< alloc_end
) {
5600 return (void *)addr
;
5604 * alloc_pages_exact - allocate an exact number physically-contiguous pages.
5605 * @size: the number of bytes to allocate
5606 * @gfp_mask: GFP flags for the allocation, must not contain __GFP_COMP
5608 * This function is similar to alloc_pages(), except that it allocates the
5609 * minimum number of pages to satisfy the request. alloc_pages() can only
5610 * allocate memory in power-of-two pages.
5612 * This function is also limited by MAX_ORDER.
5614 * Memory allocated by this function must be released by free_pages_exact().
5616 * Return: pointer to the allocated area or %NULL in case of error.
5618 void *alloc_pages_exact(size_t size
, gfp_t gfp_mask
)
5620 unsigned int order
= get_order(size
);
5623 if (WARN_ON_ONCE(gfp_mask
& __GFP_COMP
))
5624 gfp_mask
&= ~__GFP_COMP
;
5626 addr
= __get_free_pages(gfp_mask
, order
);
5627 return make_alloc_exact(addr
, order
, size
);
5629 EXPORT_SYMBOL(alloc_pages_exact
);
5632 * alloc_pages_exact_nid - allocate an exact number of physically-contiguous
5634 * @nid: the preferred node ID where memory should be allocated
5635 * @size: the number of bytes to allocate
5636 * @gfp_mask: GFP flags for the allocation, must not contain __GFP_COMP
5638 * Like alloc_pages_exact(), but try to allocate on node nid first before falling
5641 * Return: pointer to the allocated area or %NULL in case of error.
5643 void * __meminit
alloc_pages_exact_nid(int nid
, size_t size
, gfp_t gfp_mask
)
5645 unsigned int order
= get_order(size
);
5648 if (WARN_ON_ONCE(gfp_mask
& __GFP_COMP
))
5649 gfp_mask
&= ~__GFP_COMP
;
5651 p
= alloc_pages_node(nid
, gfp_mask
, order
);
5654 return make_alloc_exact((unsigned long)page_address(p
), order
, size
);
5658 * free_pages_exact - release memory allocated via alloc_pages_exact()
5659 * @virt: the value returned by alloc_pages_exact.
5660 * @size: size of allocation, same value as passed to alloc_pages_exact().
5662 * Release the memory allocated by a previous call to alloc_pages_exact.
5664 void free_pages_exact(void *virt
, size_t size
)
5666 unsigned long addr
= (unsigned long)virt
;
5667 unsigned long end
= addr
+ PAGE_ALIGN(size
);
5669 while (addr
< end
) {
5674 EXPORT_SYMBOL(free_pages_exact
);
5677 * nr_free_zone_pages - count number of pages beyond high watermark
5678 * @offset: The zone index of the highest zone
5680 * nr_free_zone_pages() counts the number of pages which are beyond the
5681 * high watermark within all zones at or below a given zone index. For each
5682 * zone, the number of pages is calculated as:
5684 * nr_free_zone_pages = managed_pages - high_pages
5686 * Return: number of pages beyond high watermark.
5688 static unsigned long nr_free_zone_pages(int offset
)
5693 /* Just pick one node, since fallback list is circular */
5694 unsigned long sum
= 0;
5696 struct zonelist
*zonelist
= node_zonelist(numa_node_id(), GFP_KERNEL
);
5698 for_each_zone_zonelist(zone
, z
, zonelist
, offset
) {
5699 unsigned long size
= zone_managed_pages(zone
);
5700 unsigned long high
= high_wmark_pages(zone
);
5709 * nr_free_buffer_pages - count number of pages beyond high watermark
5711 * nr_free_buffer_pages() counts the number of pages which are beyond the high
5712 * watermark within ZONE_DMA and ZONE_NORMAL.
5714 * Return: number of pages beyond high watermark within ZONE_DMA and
5717 unsigned long nr_free_buffer_pages(void)
5719 return nr_free_zone_pages(gfp_zone(GFP_USER
));
5721 EXPORT_SYMBOL_GPL(nr_free_buffer_pages
);
5723 static inline void show_node(struct zone
*zone
)
5725 if (IS_ENABLED(CONFIG_NUMA
))
5726 printk("Node %d ", zone_to_nid(zone
));
5729 long si_mem_available(void)
5732 unsigned long pagecache
;
5733 unsigned long wmark_low
= 0;
5734 unsigned long pages
[NR_LRU_LISTS
];
5735 unsigned long reclaimable
;
5739 for (lru
= LRU_BASE
; lru
< NR_LRU_LISTS
; lru
++)
5740 pages
[lru
] = global_node_page_state(NR_LRU_BASE
+ lru
);
5743 wmark_low
+= low_wmark_pages(zone
);
5746 * Estimate the amount of memory available for userspace allocations,
5747 * without causing swapping.
5749 available
= global_zone_page_state(NR_FREE_PAGES
) - totalreserve_pages
;
5752 * Not all the page cache can be freed, otherwise the system will
5753 * start swapping. Assume at least half of the page cache, or the
5754 * low watermark worth of cache, needs to stay.
5756 pagecache
= pages
[LRU_ACTIVE_FILE
] + pages
[LRU_INACTIVE_FILE
];
5757 pagecache
-= min(pagecache
/ 2, wmark_low
);
5758 available
+= pagecache
;
5761 * Part of the reclaimable slab and other kernel memory consists of
5762 * items that are in use, and cannot be freed. Cap this estimate at the
5765 reclaimable
= global_node_page_state_pages(NR_SLAB_RECLAIMABLE_B
) +
5766 global_node_page_state(NR_KERNEL_MISC_RECLAIMABLE
);
5767 available
+= reclaimable
- min(reclaimable
/ 2, wmark_low
);
5773 EXPORT_SYMBOL_GPL(si_mem_available
);
5775 void si_meminfo(struct sysinfo
*val
)
5777 val
->totalram
= totalram_pages();
5778 val
->sharedram
= global_node_page_state(NR_SHMEM
);
5779 val
->freeram
= global_zone_page_state(NR_FREE_PAGES
);
5780 val
->bufferram
= nr_blockdev_pages();
5781 val
->totalhigh
= totalhigh_pages();
5782 val
->freehigh
= nr_free_highpages();
5783 val
->mem_unit
= PAGE_SIZE
;
5786 EXPORT_SYMBOL(si_meminfo
);
5789 void si_meminfo_node(struct sysinfo
*val
, int nid
)
5791 int zone_type
; /* needs to be signed */
5792 unsigned long managed_pages
= 0;
5793 unsigned long managed_highpages
= 0;
5794 unsigned long free_highpages
= 0;
5795 pg_data_t
*pgdat
= NODE_DATA(nid
);
5797 for (zone_type
= 0; zone_type
< MAX_NR_ZONES
; zone_type
++)
5798 managed_pages
+= zone_managed_pages(&pgdat
->node_zones
[zone_type
]);
5799 val
->totalram
= managed_pages
;
5800 val
->sharedram
= node_page_state(pgdat
, NR_SHMEM
);
5801 val
->freeram
= sum_zone_node_page_state(nid
, NR_FREE_PAGES
);
5802 #ifdef CONFIG_HIGHMEM
5803 for (zone_type
= 0; zone_type
< MAX_NR_ZONES
; zone_type
++) {
5804 struct zone
*zone
= &pgdat
->node_zones
[zone_type
];
5806 if (is_highmem(zone
)) {
5807 managed_highpages
+= zone_managed_pages(zone
);
5808 free_highpages
+= zone_page_state(zone
, NR_FREE_PAGES
);
5811 val
->totalhigh
= managed_highpages
;
5812 val
->freehigh
= free_highpages
;
5814 val
->totalhigh
= managed_highpages
;
5815 val
->freehigh
= free_highpages
;
5817 val
->mem_unit
= PAGE_SIZE
;
5822 * Determine whether the node should be displayed or not, depending on whether
5823 * SHOW_MEM_FILTER_NODES was passed to show_free_areas().
5825 static bool show_mem_node_skip(unsigned int flags
, int nid
, nodemask_t
*nodemask
)
5827 if (!(flags
& SHOW_MEM_FILTER_NODES
))
5831 * no node mask - aka implicit memory numa policy. Do not bother with
5832 * the synchronization - read_mems_allowed_begin - because we do not
5833 * have to be precise here.
5836 nodemask
= &cpuset_current_mems_allowed
;
5838 return !node_isset(nid
, *nodemask
);
5841 #define K(x) ((x) << (PAGE_SHIFT-10))
5843 static void show_migration_types(unsigned char type
)
5845 static const char types
[MIGRATE_TYPES
] = {
5846 [MIGRATE_UNMOVABLE
] = 'U',
5847 [MIGRATE_MOVABLE
] = 'M',
5848 [MIGRATE_RECLAIMABLE
] = 'E',
5849 [MIGRATE_HIGHATOMIC
] = 'H',
5851 [MIGRATE_CMA
] = 'C',
5853 #ifdef CONFIG_MEMORY_ISOLATION
5854 [MIGRATE_ISOLATE
] = 'I',
5857 char tmp
[MIGRATE_TYPES
+ 1];
5861 for (i
= 0; i
< MIGRATE_TYPES
; i
++) {
5862 if (type
& (1 << i
))
5867 printk(KERN_CONT
"(%s) ", tmp
);
5871 * Show free area list (used inside shift_scroll-lock stuff)
5872 * We also calculate the percentage fragmentation. We do this by counting the
5873 * memory on each free list with the exception of the first item on the list.
5876 * SHOW_MEM_FILTER_NODES: suppress nodes that are not allowed by current's
5879 void show_free_areas(unsigned int filter
, nodemask_t
*nodemask
)
5881 unsigned long free_pcp
= 0;
5886 for_each_populated_zone(zone
) {
5887 if (show_mem_node_skip(filter
, zone_to_nid(zone
), nodemask
))
5890 for_each_online_cpu(cpu
)
5891 free_pcp
+= per_cpu_ptr(zone
->per_cpu_pageset
, cpu
)->count
;
5894 printk("active_anon:%lu inactive_anon:%lu isolated_anon:%lu\n"
5895 " active_file:%lu inactive_file:%lu isolated_file:%lu\n"
5896 " unevictable:%lu dirty:%lu writeback:%lu\n"
5897 " slab_reclaimable:%lu slab_unreclaimable:%lu\n"
5898 " mapped:%lu shmem:%lu pagetables:%lu bounce:%lu\n"
5899 " kernel_misc_reclaimable:%lu\n"
5900 " free:%lu free_pcp:%lu free_cma:%lu\n",
5901 global_node_page_state(NR_ACTIVE_ANON
),
5902 global_node_page_state(NR_INACTIVE_ANON
),
5903 global_node_page_state(NR_ISOLATED_ANON
),
5904 global_node_page_state(NR_ACTIVE_FILE
),
5905 global_node_page_state(NR_INACTIVE_FILE
),
5906 global_node_page_state(NR_ISOLATED_FILE
),
5907 global_node_page_state(NR_UNEVICTABLE
),
5908 global_node_page_state(NR_FILE_DIRTY
),
5909 global_node_page_state(NR_WRITEBACK
),
5910 global_node_page_state_pages(NR_SLAB_RECLAIMABLE_B
),
5911 global_node_page_state_pages(NR_SLAB_UNRECLAIMABLE_B
),
5912 global_node_page_state(NR_FILE_MAPPED
),
5913 global_node_page_state(NR_SHMEM
),
5914 global_node_page_state(NR_PAGETABLE
),
5915 global_zone_page_state(NR_BOUNCE
),
5916 global_node_page_state(NR_KERNEL_MISC_RECLAIMABLE
),
5917 global_zone_page_state(NR_FREE_PAGES
),
5919 global_zone_page_state(NR_FREE_CMA_PAGES
));
5921 for_each_online_pgdat(pgdat
) {
5922 if (show_mem_node_skip(filter
, pgdat
->node_id
, nodemask
))
5926 " active_anon:%lukB"
5927 " inactive_anon:%lukB"
5928 " active_file:%lukB"
5929 " inactive_file:%lukB"
5930 " unevictable:%lukB"
5931 " isolated(anon):%lukB"
5932 " isolated(file):%lukB"
5937 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
5939 " shmem_pmdmapped: %lukB"
5942 " writeback_tmp:%lukB"
5943 " kernel_stack:%lukB"
5944 #ifdef CONFIG_SHADOW_CALL_STACK
5945 " shadow_call_stack:%lukB"
5948 " all_unreclaimable? %s"
5951 K(node_page_state(pgdat
, NR_ACTIVE_ANON
)),
5952 K(node_page_state(pgdat
, NR_INACTIVE_ANON
)),
5953 K(node_page_state(pgdat
, NR_ACTIVE_FILE
)),
5954 K(node_page_state(pgdat
, NR_INACTIVE_FILE
)),
5955 K(node_page_state(pgdat
, NR_UNEVICTABLE
)),
5956 K(node_page_state(pgdat
, NR_ISOLATED_ANON
)),
5957 K(node_page_state(pgdat
, NR_ISOLATED_FILE
)),
5958 K(node_page_state(pgdat
, NR_FILE_MAPPED
)),
5959 K(node_page_state(pgdat
, NR_FILE_DIRTY
)),
5960 K(node_page_state(pgdat
, NR_WRITEBACK
)),
5961 K(node_page_state(pgdat
, NR_SHMEM
)),
5962 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
5963 K(node_page_state(pgdat
, NR_SHMEM_THPS
)),
5964 K(node_page_state(pgdat
, NR_SHMEM_PMDMAPPED
)),
5965 K(node_page_state(pgdat
, NR_ANON_THPS
)),
5967 K(node_page_state(pgdat
, NR_WRITEBACK_TEMP
)),
5968 node_page_state(pgdat
, NR_KERNEL_STACK_KB
),
5969 #ifdef CONFIG_SHADOW_CALL_STACK
5970 node_page_state(pgdat
, NR_KERNEL_SCS_KB
),
5972 K(node_page_state(pgdat
, NR_PAGETABLE
)),
5973 pgdat
->kswapd_failures
>= MAX_RECLAIM_RETRIES
?
5977 for_each_populated_zone(zone
) {
5980 if (show_mem_node_skip(filter
, zone_to_nid(zone
), nodemask
))
5984 for_each_online_cpu(cpu
)
5985 free_pcp
+= per_cpu_ptr(zone
->per_cpu_pageset
, cpu
)->count
;
5994 " reserved_highatomic:%luKB"
5995 " active_anon:%lukB"
5996 " inactive_anon:%lukB"
5997 " active_file:%lukB"
5998 " inactive_file:%lukB"
5999 " unevictable:%lukB"
6000 " writepending:%lukB"
6010 K(zone_page_state(zone
, NR_FREE_PAGES
)),
6011 K(min_wmark_pages(zone
)),
6012 K(low_wmark_pages(zone
)),
6013 K(high_wmark_pages(zone
)),
6014 K(zone
->nr_reserved_highatomic
),
6015 K(zone_page_state(zone
, NR_ZONE_ACTIVE_ANON
)),
6016 K(zone_page_state(zone
, NR_ZONE_INACTIVE_ANON
)),
6017 K(zone_page_state(zone
, NR_ZONE_ACTIVE_FILE
)),
6018 K(zone_page_state(zone
, NR_ZONE_INACTIVE_FILE
)),
6019 K(zone_page_state(zone
, NR_ZONE_UNEVICTABLE
)),
6020 K(zone_page_state(zone
, NR_ZONE_WRITE_PENDING
)),
6021 K(zone
->present_pages
),
6022 K(zone_managed_pages(zone
)),
6023 K(zone_page_state(zone
, NR_MLOCK
)),
6024 K(zone_page_state(zone
, NR_BOUNCE
)),
6026 K(this_cpu_read(zone
->per_cpu_pageset
->count
)),
6027 K(zone_page_state(zone
, NR_FREE_CMA_PAGES
)));
6028 printk("lowmem_reserve[]:");
6029 for (i
= 0; i
< MAX_NR_ZONES
; i
++)
6030 printk(KERN_CONT
" %ld", zone
->lowmem_reserve
[i
]);
6031 printk(KERN_CONT
"\n");
6034 for_each_populated_zone(zone
) {
6036 unsigned long nr
[MAX_ORDER
], flags
, total
= 0;
6037 unsigned char types
[MAX_ORDER
];
6039 if (show_mem_node_skip(filter
, zone_to_nid(zone
), nodemask
))
6042 printk(KERN_CONT
"%s: ", zone
->name
);
6044 spin_lock_irqsave(&zone
->lock
, flags
);
6045 for (order
= 0; order
< MAX_ORDER
; order
++) {
6046 struct free_area
*area
= &zone
->free_area
[order
];
6049 nr
[order
] = area
->nr_free
;
6050 total
+= nr
[order
] << order
;
6053 for (type
= 0; type
< MIGRATE_TYPES
; type
++) {
6054 if (!free_area_empty(area
, type
))
6055 types
[order
] |= 1 << type
;
6058 spin_unlock_irqrestore(&zone
->lock
, flags
);
6059 for (order
= 0; order
< MAX_ORDER
; order
++) {
6060 printk(KERN_CONT
"%lu*%lukB ",
6061 nr
[order
], K(1UL) << order
);
6063 show_migration_types(types
[order
]);
6065 printk(KERN_CONT
"= %lukB\n", K(total
));
6068 hugetlb_show_meminfo();
6070 printk("%ld total pagecache pages\n", global_node_page_state(NR_FILE_PAGES
));
6072 show_swap_cache_info();
6075 static void zoneref_set_zone(struct zone
*zone
, struct zoneref
*zoneref
)
6077 zoneref
->zone
= zone
;
6078 zoneref
->zone_idx
= zone_idx(zone
);
6082 * Builds allocation fallback zone lists.
6084 * Add all populated zones of a node to the zonelist.
6086 static int build_zonerefs_node(pg_data_t
*pgdat
, struct zoneref
*zonerefs
)
6089 enum zone_type zone_type
= MAX_NR_ZONES
;
6094 zone
= pgdat
->node_zones
+ zone_type
;
6095 if (populated_zone(zone
)) {
6096 zoneref_set_zone(zone
, &zonerefs
[nr_zones
++]);
6097 check_highest_zone(zone_type
);
6099 } while (zone_type
);
6106 static int __parse_numa_zonelist_order(char *s
)
6109 * We used to support different zonelists modes but they turned
6110 * out to be just not useful. Let's keep the warning in place
6111 * if somebody still use the cmd line parameter so that we do
6112 * not fail it silently
6114 if (!(*s
== 'd' || *s
== 'D' || *s
== 'n' || *s
== 'N')) {
6115 pr_warn("Ignoring unsupported numa_zonelist_order value: %s\n", s
);
6121 char numa_zonelist_order
[] = "Node";
6124 * sysctl handler for numa_zonelist_order
6126 int numa_zonelist_order_handler(struct ctl_table
*table
, int write
,
6127 void *buffer
, size_t *length
, loff_t
*ppos
)
6130 return __parse_numa_zonelist_order(buffer
);
6131 return proc_dostring(table
, write
, buffer
, length
, ppos
);
6135 #define MAX_NODE_LOAD (nr_online_nodes)
6136 static int node_load
[MAX_NUMNODES
];
6139 * find_next_best_node - find the next node that should appear in a given node's fallback list
6140 * @node: node whose fallback list we're appending
6141 * @used_node_mask: nodemask_t of already used nodes
6143 * We use a number of factors to determine which is the next node that should
6144 * appear on a given node's fallback list. The node should not have appeared
6145 * already in @node's fallback list, and it should be the next closest node
6146 * according to the distance array (which contains arbitrary distance values
6147 * from each node to each node in the system), and should also prefer nodes
6148 * with no CPUs, since presumably they'll have very little allocation pressure
6149 * on them otherwise.
6151 * Return: node id of the found node or %NUMA_NO_NODE if no node is found.
6153 int find_next_best_node(int node
, nodemask_t
*used_node_mask
)
6156 int min_val
= INT_MAX
;
6157 int best_node
= NUMA_NO_NODE
;
6159 /* Use the local node if we haven't already */
6160 if (!node_isset(node
, *used_node_mask
)) {
6161 node_set(node
, *used_node_mask
);
6165 for_each_node_state(n
, N_MEMORY
) {
6167 /* Don't want a node to appear more than once */
6168 if (node_isset(n
, *used_node_mask
))
6171 /* Use the distance array to find the distance */
6172 val
= node_distance(node
, n
);
6174 /* Penalize nodes under us ("prefer the next node") */
6177 /* Give preference to headless and unused nodes */
6178 if (!cpumask_empty(cpumask_of_node(n
)))
6179 val
+= PENALTY_FOR_NODE_WITH_CPUS
;
6181 /* Slight preference for less loaded node */
6182 val
*= (MAX_NODE_LOAD
*MAX_NUMNODES
);
6183 val
+= node_load
[n
];
6185 if (val
< min_val
) {
6192 node_set(best_node
, *used_node_mask
);
6199 * Build zonelists ordered by node and zones within node.
6200 * This results in maximum locality--normal zone overflows into local
6201 * DMA zone, if any--but risks exhausting DMA zone.
6203 static void build_zonelists_in_node_order(pg_data_t
*pgdat
, int *node_order
,
6206 struct zoneref
*zonerefs
;
6209 zonerefs
= pgdat
->node_zonelists
[ZONELIST_FALLBACK
]._zonerefs
;
6211 for (i
= 0; i
< nr_nodes
; i
++) {
6214 pg_data_t
*node
= NODE_DATA(node_order
[i
]);
6216 nr_zones
= build_zonerefs_node(node
, zonerefs
);
6217 zonerefs
+= nr_zones
;
6219 zonerefs
->zone
= NULL
;
6220 zonerefs
->zone_idx
= 0;
6224 * Build gfp_thisnode zonelists
6226 static void build_thisnode_zonelists(pg_data_t
*pgdat
)
6228 struct zoneref
*zonerefs
;
6231 zonerefs
= pgdat
->node_zonelists
[ZONELIST_NOFALLBACK
]._zonerefs
;
6232 nr_zones
= build_zonerefs_node(pgdat
, zonerefs
);
6233 zonerefs
+= nr_zones
;
6234 zonerefs
->zone
= NULL
;
6235 zonerefs
->zone_idx
= 0;
6239 * Build zonelists ordered by zone and nodes within zones.
6240 * This results in conserving DMA zone[s] until all Normal memory is
6241 * exhausted, but results in overflowing to remote node while memory
6242 * may still exist in local DMA zone.
6245 static void build_zonelists(pg_data_t
*pgdat
)
6247 static int node_order
[MAX_NUMNODES
];
6248 int node
, load
, nr_nodes
= 0;
6249 nodemask_t used_mask
= NODE_MASK_NONE
;
6250 int local_node
, prev_node
;
6252 /* NUMA-aware ordering of nodes */
6253 local_node
= pgdat
->node_id
;
6254 load
= nr_online_nodes
;
6255 prev_node
= local_node
;
6257 memset(node_order
, 0, sizeof(node_order
));
6258 while ((node
= find_next_best_node(local_node
, &used_mask
)) >= 0) {
6260 * We don't want to pressure a particular node.
6261 * So adding penalty to the first node in same
6262 * distance group to make it round-robin.
6264 if (node_distance(local_node
, node
) !=
6265 node_distance(local_node
, prev_node
))
6266 node_load
[node
] = load
;
6268 node_order
[nr_nodes
++] = node
;
6273 build_zonelists_in_node_order(pgdat
, node_order
, nr_nodes
);
6274 build_thisnode_zonelists(pgdat
);
6277 #ifdef CONFIG_HAVE_MEMORYLESS_NODES
6279 * Return node id of node used for "local" allocations.
6280 * I.e., first node id of first zone in arg node's generic zonelist.
6281 * Used for initializing percpu 'numa_mem', which is used primarily
6282 * for kernel allocations, so use GFP_KERNEL flags to locate zonelist.
6284 int local_memory_node(int node
)
6288 z
= first_zones_zonelist(node_zonelist(node
, GFP_KERNEL
),
6289 gfp_zone(GFP_KERNEL
),
6291 return zone_to_nid(z
->zone
);
6295 static void setup_min_unmapped_ratio(void);
6296 static void setup_min_slab_ratio(void);
6297 #else /* CONFIG_NUMA */
6299 static void build_zonelists(pg_data_t
*pgdat
)
6301 int node
, local_node
;
6302 struct zoneref
*zonerefs
;
6305 local_node
= pgdat
->node_id
;
6307 zonerefs
= pgdat
->node_zonelists
[ZONELIST_FALLBACK
]._zonerefs
;
6308 nr_zones
= build_zonerefs_node(pgdat
, zonerefs
);
6309 zonerefs
+= nr_zones
;
6312 * Now we build the zonelist so that it contains the zones
6313 * of all the other nodes.
6314 * We don't want to pressure a particular node, so when
6315 * building the zones for node N, we make sure that the
6316 * zones coming right after the local ones are those from
6317 * node N+1 (modulo N)
6319 for (node
= local_node
+ 1; node
< MAX_NUMNODES
; node
++) {
6320 if (!node_online(node
))
6322 nr_zones
= build_zonerefs_node(NODE_DATA(node
), zonerefs
);
6323 zonerefs
+= nr_zones
;
6325 for (node
= 0; node
< local_node
; node
++) {
6326 if (!node_online(node
))
6328 nr_zones
= build_zonerefs_node(NODE_DATA(node
), zonerefs
);
6329 zonerefs
+= nr_zones
;
6332 zonerefs
->zone
= NULL
;
6333 zonerefs
->zone_idx
= 0;
6336 #endif /* CONFIG_NUMA */
6339 * Boot pageset table. One per cpu which is going to be used for all
6340 * zones and all nodes. The parameters will be set in such a way
6341 * that an item put on a list will immediately be handed over to
6342 * the buddy list. This is safe since pageset manipulation is done
6343 * with interrupts disabled.
6345 * The boot_pagesets must be kept even after bootup is complete for
6346 * unused processors and/or zones. They do play a role for bootstrapping
6347 * hotplugged processors.
6349 * zoneinfo_show() and maybe other functions do
6350 * not check if the processor is online before following the pageset pointer.
6351 * Other parts of the kernel may not check if the zone is available.
6353 static void per_cpu_pages_init(struct per_cpu_pages
*pcp
, struct per_cpu_zonestat
*pzstats
);
6354 /* These effectively disable the pcplists in the boot pageset completely */
6355 #define BOOT_PAGESET_HIGH 0
6356 #define BOOT_PAGESET_BATCH 1
6357 static DEFINE_PER_CPU(struct per_cpu_pages
, boot_pageset
);
6358 static DEFINE_PER_CPU(struct per_cpu_zonestat
, boot_zonestats
);
6359 static DEFINE_PER_CPU(struct per_cpu_nodestat
, boot_nodestats
);
6361 static void __build_all_zonelists(void *data
)
6364 int __maybe_unused cpu
;
6365 pg_data_t
*self
= data
;
6366 static DEFINE_SPINLOCK(lock
);
6371 memset(node_load
, 0, sizeof(node_load
));
6375 * This node is hotadded and no memory is yet present. So just
6376 * building zonelists is fine - no need to touch other nodes.
6378 if (self
&& !node_online(self
->node_id
)) {
6379 build_zonelists(self
);
6381 for_each_online_node(nid
) {
6382 pg_data_t
*pgdat
= NODE_DATA(nid
);
6384 build_zonelists(pgdat
);
6387 #ifdef CONFIG_HAVE_MEMORYLESS_NODES
6389 * We now know the "local memory node" for each node--
6390 * i.e., the node of the first zone in the generic zonelist.
6391 * Set up numa_mem percpu variable for on-line cpus. During
6392 * boot, only the boot cpu should be on-line; we'll init the
6393 * secondary cpus' numa_mem as they come on-line. During
6394 * node/memory hotplug, we'll fixup all on-line cpus.
6396 for_each_online_cpu(cpu
)
6397 set_cpu_numa_mem(cpu
, local_memory_node(cpu_to_node(cpu
)));
6404 static noinline
void __init
6405 build_all_zonelists_init(void)
6409 __build_all_zonelists(NULL
);
6412 * Initialize the boot_pagesets that are going to be used
6413 * for bootstrapping processors. The real pagesets for
6414 * each zone will be allocated later when the per cpu
6415 * allocator is available.
6417 * boot_pagesets are used also for bootstrapping offline
6418 * cpus if the system is already booted because the pagesets
6419 * are needed to initialize allocators on a specific cpu too.
6420 * F.e. the percpu allocator needs the page allocator which
6421 * needs the percpu allocator in order to allocate its pagesets
6422 * (a chicken-egg dilemma).
6424 for_each_possible_cpu(cpu
)
6425 per_cpu_pages_init(&per_cpu(boot_pageset
, cpu
), &per_cpu(boot_zonestats
, cpu
));
6427 mminit_verify_zonelist();
6428 cpuset_init_current_mems_allowed();
6432 * unless system_state == SYSTEM_BOOTING.
6434 * __ref due to call of __init annotated helper build_all_zonelists_init
6435 * [protected by SYSTEM_BOOTING].
6437 void __ref
build_all_zonelists(pg_data_t
*pgdat
)
6439 unsigned long vm_total_pages
;
6441 if (system_state
== SYSTEM_BOOTING
) {
6442 build_all_zonelists_init();
6444 __build_all_zonelists(pgdat
);
6445 /* cpuset refresh routine should be here */
6447 /* Get the number of free pages beyond high watermark in all zones. */
6448 vm_total_pages
= nr_free_zone_pages(gfp_zone(GFP_HIGHUSER_MOVABLE
));
6450 * Disable grouping by mobility if the number of pages in the
6451 * system is too low to allow the mechanism to work. It would be
6452 * more accurate, but expensive to check per-zone. This check is
6453 * made on memory-hotadd so a system can start with mobility
6454 * disabled and enable it later
6456 if (vm_total_pages
< (pageblock_nr_pages
* MIGRATE_TYPES
))
6457 page_group_by_mobility_disabled
= 1;
6459 page_group_by_mobility_disabled
= 0;
6461 pr_info("Built %u zonelists, mobility grouping %s. Total pages: %ld\n",
6463 page_group_by_mobility_disabled
? "off" : "on",
6466 pr_info("Policy zone: %s\n", zone_names
[policy_zone
]);
6470 /* If zone is ZONE_MOVABLE but memory is mirrored, it is an overlapped init */
6471 static bool __meminit
6472 overlap_memmap_init(unsigned long zone
, unsigned long *pfn
)
6474 static struct memblock_region
*r
;
6476 if (mirrored_kernelcore
&& zone
== ZONE_MOVABLE
) {
6477 if (!r
|| *pfn
>= memblock_region_memory_end_pfn(r
)) {
6478 for_each_mem_region(r
) {
6479 if (*pfn
< memblock_region_memory_end_pfn(r
))
6483 if (*pfn
>= memblock_region_memory_base_pfn(r
) &&
6484 memblock_is_mirror(r
)) {
6485 *pfn
= memblock_region_memory_end_pfn(r
);
6493 * Initially all pages are reserved - free ones are freed
6494 * up by memblock_free_all() once the early boot process is
6495 * done. Non-atomic initialization, single-pass.
6497 * All aligned pageblocks are initialized to the specified migratetype
6498 * (usually MIGRATE_MOVABLE). Besides setting the migratetype, no related
6499 * zone stats (e.g., nr_isolate_pageblock) are touched.
6501 void __meminit
memmap_init_range(unsigned long size
, int nid
, unsigned long zone
,
6502 unsigned long start_pfn
, unsigned long zone_end_pfn
,
6503 enum meminit_context context
,
6504 struct vmem_altmap
*altmap
, int migratetype
)
6506 unsigned long pfn
, end_pfn
= start_pfn
+ size
;
6509 if (highest_memmap_pfn
< end_pfn
- 1)
6510 highest_memmap_pfn
= end_pfn
- 1;
6512 #ifdef CONFIG_ZONE_DEVICE
6514 * Honor reservation requested by the driver for this ZONE_DEVICE
6515 * memory. We limit the total number of pages to initialize to just
6516 * those that might contain the memory mapping. We will defer the
6517 * ZONE_DEVICE page initialization until after we have released
6520 if (zone
== ZONE_DEVICE
) {
6524 if (start_pfn
== altmap
->base_pfn
)
6525 start_pfn
+= altmap
->reserve
;
6526 end_pfn
= altmap
->base_pfn
+ vmem_altmap_offset(altmap
);
6530 for (pfn
= start_pfn
; pfn
< end_pfn
; ) {
6532 * There can be holes in boot-time mem_map[]s handed to this
6533 * function. They do not exist on hotplugged memory.
6535 if (context
== MEMINIT_EARLY
) {
6536 if (overlap_memmap_init(zone
, &pfn
))
6538 if (defer_init(nid
, pfn
, zone_end_pfn
))
6542 page
= pfn_to_page(pfn
);
6543 __init_single_page(page
, pfn
, zone
, nid
);
6544 if (context
== MEMINIT_HOTPLUG
)
6545 __SetPageReserved(page
);
6548 * Usually, we want to mark the pageblock MIGRATE_MOVABLE,
6549 * such that unmovable allocations won't be scattered all
6550 * over the place during system boot.
6552 if (IS_ALIGNED(pfn
, pageblock_nr_pages
)) {
6553 set_pageblock_migratetype(page
, migratetype
);
6560 #ifdef CONFIG_ZONE_DEVICE
6561 void __ref
memmap_init_zone_device(struct zone
*zone
,
6562 unsigned long start_pfn
,
6563 unsigned long nr_pages
,
6564 struct dev_pagemap
*pgmap
)
6566 unsigned long pfn
, end_pfn
= start_pfn
+ nr_pages
;
6567 struct pglist_data
*pgdat
= zone
->zone_pgdat
;
6568 struct vmem_altmap
*altmap
= pgmap_altmap(pgmap
);
6569 unsigned long zone_idx
= zone_idx(zone
);
6570 unsigned long start
= jiffies
;
6571 int nid
= pgdat
->node_id
;
6573 if (WARN_ON_ONCE(!pgmap
|| zone_idx(zone
) != ZONE_DEVICE
))
6577 * The call to memmap_init should have already taken care
6578 * of the pages reserved for the memmap, so we can just jump to
6579 * the end of that region and start processing the device pages.
6582 start_pfn
= altmap
->base_pfn
+ vmem_altmap_offset(altmap
);
6583 nr_pages
= end_pfn
- start_pfn
;
6586 for (pfn
= start_pfn
; pfn
< end_pfn
; pfn
++) {
6587 struct page
*page
= pfn_to_page(pfn
);
6589 __init_single_page(page
, pfn
, zone_idx
, nid
);
6592 * Mark page reserved as it will need to wait for onlining
6593 * phase for it to be fully associated with a zone.
6595 * We can use the non-atomic __set_bit operation for setting
6596 * the flag as we are still initializing the pages.
6598 __SetPageReserved(page
);
6601 * ZONE_DEVICE pages union ->lru with a ->pgmap back pointer
6602 * and zone_device_data. It is a bug if a ZONE_DEVICE page is
6603 * ever freed or placed on a driver-private list.
6605 page
->pgmap
= pgmap
;
6606 page
->zone_device_data
= NULL
;
6609 * Mark the block movable so that blocks are reserved for
6610 * movable at startup. This will force kernel allocations
6611 * to reserve their blocks rather than leaking throughout
6612 * the address space during boot when many long-lived
6613 * kernel allocations are made.
6615 * Please note that MEMINIT_HOTPLUG path doesn't clear memmap
6616 * because this is done early in section_activate()
6618 if (IS_ALIGNED(pfn
, pageblock_nr_pages
)) {
6619 set_pageblock_migratetype(page
, MIGRATE_MOVABLE
);
6624 pr_info("%s initialised %lu pages in %ums\n", __func__
,
6625 nr_pages
, jiffies_to_msecs(jiffies
- start
));
6629 static void __meminit
zone_init_free_lists(struct zone
*zone
)
6631 unsigned int order
, t
;
6632 for_each_migratetype_order(order
, t
) {
6633 INIT_LIST_HEAD(&zone
->free_area
[order
].free_list
[t
]);
6634 zone
->free_area
[order
].nr_free
= 0;
6639 * Only struct pages that correspond to ranges defined by memblock.memory
6640 * are zeroed and initialized by going through __init_single_page() during
6641 * memmap_init_zone_range().
6643 * But, there could be struct pages that correspond to holes in
6644 * memblock.memory. This can happen because of the following reasons:
6645 * - physical memory bank size is not necessarily the exact multiple of the
6646 * arbitrary section size
6647 * - early reserved memory may not be listed in memblock.memory
6648 * - memory layouts defined with memmap= kernel parameter may not align
6649 * nicely with memmap sections
6651 * Explicitly initialize those struct pages so that:
6652 * - PG_Reserved is set
6653 * - zone and node links point to zone and node that span the page if the
6654 * hole is in the middle of a zone
6655 * - zone and node links point to adjacent zone/node if the hole falls on
6656 * the zone boundary; the pages in such holes will be prepended to the
6657 * zone/node above the hole except for the trailing pages in the last
6658 * section that will be appended to the zone/node below.
6660 static void __init
init_unavailable_range(unsigned long spfn
,
6667 for (pfn
= spfn
; pfn
< epfn
; pfn
++) {
6668 if (!pfn_valid(ALIGN_DOWN(pfn
, pageblock_nr_pages
))) {
6669 pfn
= ALIGN_DOWN(pfn
, pageblock_nr_pages
)
6670 + pageblock_nr_pages
- 1;
6673 __init_single_page(pfn_to_page(pfn
), pfn
, zone
, node
);
6674 __SetPageReserved(pfn_to_page(pfn
));
6679 pr_info("On node %d, zone %s: %lld pages in unavailable ranges",
6680 node
, zone_names
[zone
], pgcnt
);
6683 static void __init
memmap_init_zone_range(struct zone
*zone
,
6684 unsigned long start_pfn
,
6685 unsigned long end_pfn
,
6686 unsigned long *hole_pfn
)
6688 unsigned long zone_start_pfn
= zone
->zone_start_pfn
;
6689 unsigned long zone_end_pfn
= zone_start_pfn
+ zone
->spanned_pages
;
6690 int nid
= zone_to_nid(zone
), zone_id
= zone_idx(zone
);
6692 start_pfn
= clamp(start_pfn
, zone_start_pfn
, zone_end_pfn
);
6693 end_pfn
= clamp(end_pfn
, zone_start_pfn
, zone_end_pfn
);
6695 if (start_pfn
>= end_pfn
)
6698 memmap_init_range(end_pfn
- start_pfn
, nid
, zone_id
, start_pfn
,
6699 zone_end_pfn
, MEMINIT_EARLY
, NULL
, MIGRATE_MOVABLE
);
6701 if (*hole_pfn
< start_pfn
)
6702 init_unavailable_range(*hole_pfn
, start_pfn
, zone_id
, nid
);
6704 *hole_pfn
= end_pfn
;
6707 static void __init
memmap_init(void)
6709 unsigned long start_pfn
, end_pfn
;
6710 unsigned long hole_pfn
= 0;
6711 int i
, j
, zone_id
= 0, nid
;
6713 for_each_mem_pfn_range(i
, MAX_NUMNODES
, &start_pfn
, &end_pfn
, &nid
) {
6714 struct pglist_data
*node
= NODE_DATA(nid
);
6716 for (j
= 0; j
< MAX_NR_ZONES
; j
++) {
6717 struct zone
*zone
= node
->node_zones
+ j
;
6719 if (!populated_zone(zone
))
6722 memmap_init_zone_range(zone
, start_pfn
, end_pfn
,
6728 #ifdef CONFIG_SPARSEMEM
6730 * Initialize the memory map for hole in the range [memory_end,
6732 * Append the pages in this hole to the highest zone in the last
6734 * The call to init_unavailable_range() is outside the ifdef to
6735 * silence the compiler warining about zone_id set but not used;
6736 * for FLATMEM it is a nop anyway
6738 end_pfn
= round_up(end_pfn
, PAGES_PER_SECTION
);
6739 if (hole_pfn
< end_pfn
)
6741 init_unavailable_range(hole_pfn
, end_pfn
, zone_id
, nid
);
6744 void __init
*memmap_alloc(phys_addr_t size
, phys_addr_t align
,
6745 phys_addr_t min_addr
, int nid
, bool exact_nid
)
6750 ptr
= memblock_alloc_exact_nid_raw(size
, align
, min_addr
,
6751 MEMBLOCK_ALLOC_ACCESSIBLE
,
6754 ptr
= memblock_alloc_try_nid_raw(size
, align
, min_addr
,
6755 MEMBLOCK_ALLOC_ACCESSIBLE
,
6758 if (ptr
&& size
> 0)
6759 page_init_poison(ptr
, size
);
6764 static int zone_batchsize(struct zone
*zone
)
6770 * The number of pages to batch allocate is either ~0.1%
6771 * of the zone or 1MB, whichever is smaller. The batch
6772 * size is striking a balance between allocation latency
6773 * and zone lock contention.
6775 batch
= min(zone_managed_pages(zone
) >> 10, (1024 * 1024) / PAGE_SIZE
);
6776 batch
/= 4; /* We effectively *= 4 below */
6781 * Clamp the batch to a 2^n - 1 value. Having a power
6782 * of 2 value was found to be more likely to have
6783 * suboptimal cache aliasing properties in some cases.
6785 * For example if 2 tasks are alternately allocating
6786 * batches of pages, one task can end up with a lot
6787 * of pages of one half of the possible page colors
6788 * and the other with pages of the other colors.
6790 batch
= rounddown_pow_of_two(batch
+ batch
/2) - 1;
6795 /* The deferral and batching of frees should be suppressed under NOMMU
6798 * The problem is that NOMMU needs to be able to allocate large chunks
6799 * of contiguous memory as there's no hardware page translation to
6800 * assemble apparent contiguous memory from discontiguous pages.
6802 * Queueing large contiguous runs of pages for batching, however,
6803 * causes the pages to actually be freed in smaller chunks. As there
6804 * can be a significant delay between the individual batches being
6805 * recycled, this leads to the once large chunks of space being
6806 * fragmented and becoming unavailable for high-order allocations.
6812 static int zone_highsize(struct zone
*zone
, int batch
, int cpu_online
)
6817 unsigned long total_pages
;
6819 if (!percpu_pagelist_high_fraction
) {
6821 * By default, the high value of the pcp is based on the zone
6822 * low watermark so that if they are full then background
6823 * reclaim will not be started prematurely.
6825 total_pages
= low_wmark_pages(zone
);
6828 * If percpu_pagelist_high_fraction is configured, the high
6829 * value is based on a fraction of the managed pages in the
6832 total_pages
= zone_managed_pages(zone
) / percpu_pagelist_high_fraction
;
6836 * Split the high value across all online CPUs local to the zone. Note
6837 * that early in boot that CPUs may not be online yet and that during
6838 * CPU hotplug that the cpumask is not yet updated when a CPU is being
6839 * onlined. For memory nodes that have no CPUs, split pcp->high across
6840 * all online CPUs to mitigate the risk that reclaim is triggered
6841 * prematurely due to pages stored on pcp lists.
6843 nr_split_cpus
= cpumask_weight(cpumask_of_node(zone_to_nid(zone
))) + cpu_online
;
6845 nr_split_cpus
= num_online_cpus();
6846 high
= total_pages
/ nr_split_cpus
;
6849 * Ensure high is at least batch*4. The multiple is based on the
6850 * historical relationship between high and batch.
6852 high
= max(high
, batch
<< 2);
6861 * pcp->high and pcp->batch values are related and generally batch is lower
6862 * than high. They are also related to pcp->count such that count is lower
6863 * than high, and as soon as it reaches high, the pcplist is flushed.
6865 * However, guaranteeing these relations at all times would require e.g. write
6866 * barriers here but also careful usage of read barriers at the read side, and
6867 * thus be prone to error and bad for performance. Thus the update only prevents
6868 * store tearing. Any new users of pcp->batch and pcp->high should ensure they
6869 * can cope with those fields changing asynchronously, and fully trust only the
6870 * pcp->count field on the local CPU with interrupts disabled.
6872 * mutex_is_locked(&pcp_batch_high_lock) required when calling this function
6873 * outside of boot time (or some other assurance that no concurrent updaters
6876 static void pageset_update(struct per_cpu_pages
*pcp
, unsigned long high
,
6877 unsigned long batch
)
6879 WRITE_ONCE(pcp
->batch
, batch
);
6880 WRITE_ONCE(pcp
->high
, high
);
6883 static void per_cpu_pages_init(struct per_cpu_pages
*pcp
, struct per_cpu_zonestat
*pzstats
)
6887 memset(pcp
, 0, sizeof(*pcp
));
6888 memset(pzstats
, 0, sizeof(*pzstats
));
6890 for (pindex
= 0; pindex
< NR_PCP_LISTS
; pindex
++)
6891 INIT_LIST_HEAD(&pcp
->lists
[pindex
]);
6894 * Set batch and high values safe for a boot pageset. A true percpu
6895 * pageset's initialization will update them subsequently. Here we don't
6896 * need to be as careful as pageset_update() as nobody can access the
6899 pcp
->high
= BOOT_PAGESET_HIGH
;
6900 pcp
->batch
= BOOT_PAGESET_BATCH
;
6901 pcp
->free_factor
= 0;
6904 static void __zone_set_pageset_high_and_batch(struct zone
*zone
, unsigned long high
,
6905 unsigned long batch
)
6907 struct per_cpu_pages
*pcp
;
6910 for_each_possible_cpu(cpu
) {
6911 pcp
= per_cpu_ptr(zone
->per_cpu_pageset
, cpu
);
6912 pageset_update(pcp
, high
, batch
);
6917 * Calculate and set new high and batch values for all per-cpu pagesets of a
6918 * zone based on the zone's size.
6920 static void zone_set_pageset_high_and_batch(struct zone
*zone
, int cpu_online
)
6922 int new_high
, new_batch
;
6924 new_batch
= max(1, zone_batchsize(zone
));
6925 new_high
= zone_highsize(zone
, new_batch
, cpu_online
);
6927 if (zone
->pageset_high
== new_high
&&
6928 zone
->pageset_batch
== new_batch
)
6931 zone
->pageset_high
= new_high
;
6932 zone
->pageset_batch
= new_batch
;
6934 __zone_set_pageset_high_and_batch(zone
, new_high
, new_batch
);
6937 void __meminit
setup_zone_pageset(struct zone
*zone
)
6941 /* Size may be 0 on !SMP && !NUMA */
6942 if (sizeof(struct per_cpu_zonestat
) > 0)
6943 zone
->per_cpu_zonestats
= alloc_percpu(struct per_cpu_zonestat
);
6945 zone
->per_cpu_pageset
= alloc_percpu(struct per_cpu_pages
);
6946 for_each_possible_cpu(cpu
) {
6947 struct per_cpu_pages
*pcp
;
6948 struct per_cpu_zonestat
*pzstats
;
6950 pcp
= per_cpu_ptr(zone
->per_cpu_pageset
, cpu
);
6951 pzstats
= per_cpu_ptr(zone
->per_cpu_zonestats
, cpu
);
6952 per_cpu_pages_init(pcp
, pzstats
);
6955 zone_set_pageset_high_and_batch(zone
, 0);
6959 * Allocate per cpu pagesets and initialize them.
6960 * Before this call only boot pagesets were available.
6962 void __init
setup_per_cpu_pageset(void)
6964 struct pglist_data
*pgdat
;
6966 int __maybe_unused cpu
;
6968 for_each_populated_zone(zone
)
6969 setup_zone_pageset(zone
);
6973 * Unpopulated zones continue using the boot pagesets.
6974 * The numa stats for these pagesets need to be reset.
6975 * Otherwise, they will end up skewing the stats of
6976 * the nodes these zones are associated with.
6978 for_each_possible_cpu(cpu
) {
6979 struct per_cpu_zonestat
*pzstats
= &per_cpu(boot_zonestats
, cpu
);
6980 memset(pzstats
->vm_numa_event
, 0,
6981 sizeof(pzstats
->vm_numa_event
));
6985 for_each_online_pgdat(pgdat
)
6986 pgdat
->per_cpu_nodestats
=
6987 alloc_percpu(struct per_cpu_nodestat
);
6990 static __meminit
void zone_pcp_init(struct zone
*zone
)
6993 * per cpu subsystem is not up at this point. The following code
6994 * relies on the ability of the linker to provide the
6995 * offset of a (static) per cpu variable into the per cpu area.
6997 zone
->per_cpu_pageset
= &boot_pageset
;
6998 zone
->per_cpu_zonestats
= &boot_zonestats
;
6999 zone
->pageset_high
= BOOT_PAGESET_HIGH
;
7000 zone
->pageset_batch
= BOOT_PAGESET_BATCH
;
7002 if (populated_zone(zone
))
7003 pr_debug(" %s zone: %lu pages, LIFO batch:%u\n", zone
->name
,
7004 zone
->present_pages
, zone_batchsize(zone
));
7007 void __meminit
init_currently_empty_zone(struct zone
*zone
,
7008 unsigned long zone_start_pfn
,
7011 struct pglist_data
*pgdat
= zone
->zone_pgdat
;
7012 int zone_idx
= zone_idx(zone
) + 1;
7014 if (zone_idx
> pgdat
->nr_zones
)
7015 pgdat
->nr_zones
= zone_idx
;
7017 zone
->zone_start_pfn
= zone_start_pfn
;
7019 mminit_dprintk(MMINIT_TRACE
, "memmap_init",
7020 "Initialising map node %d zone %lu pfns %lu -> %lu\n",
7022 (unsigned long)zone_idx(zone
),
7023 zone_start_pfn
, (zone_start_pfn
+ size
));
7025 zone_init_free_lists(zone
);
7026 zone
->initialized
= 1;
7030 * get_pfn_range_for_nid - Return the start and end page frames for a node
7031 * @nid: The nid to return the range for. If MAX_NUMNODES, the min and max PFN are returned.
7032 * @start_pfn: Passed by reference. On return, it will have the node start_pfn.
7033 * @end_pfn: Passed by reference. On return, it will have the node end_pfn.
7035 * It returns the start and end page frame of a node based on information
7036 * provided by memblock_set_node(). If called for a node
7037 * with no available memory, a warning is printed and the start and end
7040 void __init
get_pfn_range_for_nid(unsigned int nid
,
7041 unsigned long *start_pfn
, unsigned long *end_pfn
)
7043 unsigned long this_start_pfn
, this_end_pfn
;
7049 for_each_mem_pfn_range(i
, nid
, &this_start_pfn
, &this_end_pfn
, NULL
) {
7050 *start_pfn
= min(*start_pfn
, this_start_pfn
);
7051 *end_pfn
= max(*end_pfn
, this_end_pfn
);
7054 if (*start_pfn
== -1UL)
7059 * This finds a zone that can be used for ZONE_MOVABLE pages. The
7060 * assumption is made that zones within a node are ordered in monotonic
7061 * increasing memory addresses so that the "highest" populated zone is used
7063 static void __init
find_usable_zone_for_movable(void)
7066 for (zone_index
= MAX_NR_ZONES
- 1; zone_index
>= 0; zone_index
--) {
7067 if (zone_index
== ZONE_MOVABLE
)
7070 if (arch_zone_highest_possible_pfn
[zone_index
] >
7071 arch_zone_lowest_possible_pfn
[zone_index
])
7075 VM_BUG_ON(zone_index
== -1);
7076 movable_zone
= zone_index
;
7080 * The zone ranges provided by the architecture do not include ZONE_MOVABLE
7081 * because it is sized independent of architecture. Unlike the other zones,
7082 * the starting point for ZONE_MOVABLE is not fixed. It may be different
7083 * in each node depending on the size of each node and how evenly kernelcore
7084 * is distributed. This helper function adjusts the zone ranges
7085 * provided by the architecture for a given node by using the end of the
7086 * highest usable zone for ZONE_MOVABLE. This preserves the assumption that
7087 * zones within a node are in order of monotonic increases memory addresses
7089 static void __init
adjust_zone_range_for_zone_movable(int nid
,
7090 unsigned long zone_type
,
7091 unsigned long node_start_pfn
,
7092 unsigned long node_end_pfn
,
7093 unsigned long *zone_start_pfn
,
7094 unsigned long *zone_end_pfn
)
7096 /* Only adjust if ZONE_MOVABLE is on this node */
7097 if (zone_movable_pfn
[nid
]) {
7098 /* Size ZONE_MOVABLE */
7099 if (zone_type
== ZONE_MOVABLE
) {
7100 *zone_start_pfn
= zone_movable_pfn
[nid
];
7101 *zone_end_pfn
= min(node_end_pfn
,
7102 arch_zone_highest_possible_pfn
[movable_zone
]);
7104 /* Adjust for ZONE_MOVABLE starting within this range */
7105 } else if (!mirrored_kernelcore
&&
7106 *zone_start_pfn
< zone_movable_pfn
[nid
] &&
7107 *zone_end_pfn
> zone_movable_pfn
[nid
]) {
7108 *zone_end_pfn
= zone_movable_pfn
[nid
];
7110 /* Check if this whole range is within ZONE_MOVABLE */
7111 } else if (*zone_start_pfn
>= zone_movable_pfn
[nid
])
7112 *zone_start_pfn
= *zone_end_pfn
;
7117 * Return the number of pages a zone spans in a node, including holes
7118 * present_pages = zone_spanned_pages_in_node() - zone_absent_pages_in_node()
7120 static unsigned long __init
zone_spanned_pages_in_node(int nid
,
7121 unsigned long zone_type
,
7122 unsigned long node_start_pfn
,
7123 unsigned long node_end_pfn
,
7124 unsigned long *zone_start_pfn
,
7125 unsigned long *zone_end_pfn
)
7127 unsigned long zone_low
= arch_zone_lowest_possible_pfn
[zone_type
];
7128 unsigned long zone_high
= arch_zone_highest_possible_pfn
[zone_type
];
7129 /* When hotadd a new node from cpu_up(), the node should be empty */
7130 if (!node_start_pfn
&& !node_end_pfn
)
7133 /* Get the start and end of the zone */
7134 *zone_start_pfn
= clamp(node_start_pfn
, zone_low
, zone_high
);
7135 *zone_end_pfn
= clamp(node_end_pfn
, zone_low
, zone_high
);
7136 adjust_zone_range_for_zone_movable(nid
, zone_type
,
7137 node_start_pfn
, node_end_pfn
,
7138 zone_start_pfn
, zone_end_pfn
);
7140 /* Check that this node has pages within the zone's required range */
7141 if (*zone_end_pfn
< node_start_pfn
|| *zone_start_pfn
> node_end_pfn
)
7144 /* Move the zone boundaries inside the node if necessary */
7145 *zone_end_pfn
= min(*zone_end_pfn
, node_end_pfn
);
7146 *zone_start_pfn
= max(*zone_start_pfn
, node_start_pfn
);
7148 /* Return the spanned pages */
7149 return *zone_end_pfn
- *zone_start_pfn
;
7153 * Return the number of holes in a range on a node. If nid is MAX_NUMNODES,
7154 * then all holes in the requested range will be accounted for.
7156 unsigned long __init
__absent_pages_in_range(int nid
,
7157 unsigned long range_start_pfn
,
7158 unsigned long range_end_pfn
)
7160 unsigned long nr_absent
= range_end_pfn
- range_start_pfn
;
7161 unsigned long start_pfn
, end_pfn
;
7164 for_each_mem_pfn_range(i
, nid
, &start_pfn
, &end_pfn
, NULL
) {
7165 start_pfn
= clamp(start_pfn
, range_start_pfn
, range_end_pfn
);
7166 end_pfn
= clamp(end_pfn
, range_start_pfn
, range_end_pfn
);
7167 nr_absent
-= end_pfn
- start_pfn
;
7173 * absent_pages_in_range - Return number of page frames in holes within a range
7174 * @start_pfn: The start PFN to start searching for holes
7175 * @end_pfn: The end PFN to stop searching for holes
7177 * Return: the number of pages frames in memory holes within a range.
7179 unsigned long __init
absent_pages_in_range(unsigned long start_pfn
,
7180 unsigned long end_pfn
)
7182 return __absent_pages_in_range(MAX_NUMNODES
, start_pfn
, end_pfn
);
7185 /* Return the number of page frames in holes in a zone on a node */
7186 static unsigned long __init
zone_absent_pages_in_node(int nid
,
7187 unsigned long zone_type
,
7188 unsigned long node_start_pfn
,
7189 unsigned long node_end_pfn
)
7191 unsigned long zone_low
= arch_zone_lowest_possible_pfn
[zone_type
];
7192 unsigned long zone_high
= arch_zone_highest_possible_pfn
[zone_type
];
7193 unsigned long zone_start_pfn
, zone_end_pfn
;
7194 unsigned long nr_absent
;
7196 /* When hotadd a new node from cpu_up(), the node should be empty */
7197 if (!node_start_pfn
&& !node_end_pfn
)
7200 zone_start_pfn
= clamp(node_start_pfn
, zone_low
, zone_high
);
7201 zone_end_pfn
= clamp(node_end_pfn
, zone_low
, zone_high
);
7203 adjust_zone_range_for_zone_movable(nid
, zone_type
,
7204 node_start_pfn
, node_end_pfn
,
7205 &zone_start_pfn
, &zone_end_pfn
);
7206 nr_absent
= __absent_pages_in_range(nid
, zone_start_pfn
, zone_end_pfn
);
7209 * ZONE_MOVABLE handling.
7210 * Treat pages to be ZONE_MOVABLE in ZONE_NORMAL as absent pages
7213 if (mirrored_kernelcore
&& zone_movable_pfn
[nid
]) {
7214 unsigned long start_pfn
, end_pfn
;
7215 struct memblock_region
*r
;
7217 for_each_mem_region(r
) {
7218 start_pfn
= clamp(memblock_region_memory_base_pfn(r
),
7219 zone_start_pfn
, zone_end_pfn
);
7220 end_pfn
= clamp(memblock_region_memory_end_pfn(r
),
7221 zone_start_pfn
, zone_end_pfn
);
7223 if (zone_type
== ZONE_MOVABLE
&&
7224 memblock_is_mirror(r
))
7225 nr_absent
+= end_pfn
- start_pfn
;
7227 if (zone_type
== ZONE_NORMAL
&&
7228 !memblock_is_mirror(r
))
7229 nr_absent
+= end_pfn
- start_pfn
;
7236 static void __init
calculate_node_totalpages(struct pglist_data
*pgdat
,
7237 unsigned long node_start_pfn
,
7238 unsigned long node_end_pfn
)
7240 unsigned long realtotalpages
= 0, totalpages
= 0;
7243 for (i
= 0; i
< MAX_NR_ZONES
; i
++) {
7244 struct zone
*zone
= pgdat
->node_zones
+ i
;
7245 unsigned long zone_start_pfn
, zone_end_pfn
;
7246 unsigned long spanned
, absent
;
7247 unsigned long size
, real_size
;
7249 spanned
= zone_spanned_pages_in_node(pgdat
->node_id
, i
,
7254 absent
= zone_absent_pages_in_node(pgdat
->node_id
, i
,
7259 real_size
= size
- absent
;
7262 zone
->zone_start_pfn
= zone_start_pfn
;
7264 zone
->zone_start_pfn
= 0;
7265 zone
->spanned_pages
= size
;
7266 zone
->present_pages
= real_size
;
7267 #if defined(CONFIG_MEMORY_HOTPLUG)
7268 zone
->present_early_pages
= real_size
;
7272 realtotalpages
+= real_size
;
7275 pgdat
->node_spanned_pages
= totalpages
;
7276 pgdat
->node_present_pages
= realtotalpages
;
7277 pr_debug("On node %d totalpages: %lu\n", pgdat
->node_id
, realtotalpages
);
7280 #ifndef CONFIG_SPARSEMEM
7282 * Calculate the size of the zone->blockflags rounded to an unsigned long
7283 * Start by making sure zonesize is a multiple of pageblock_order by rounding
7284 * up. Then use 1 NR_PAGEBLOCK_BITS worth of bits per pageblock, finally
7285 * round what is now in bits to nearest long in bits, then return it in
7288 static unsigned long __init
usemap_size(unsigned long zone_start_pfn
, unsigned long zonesize
)
7290 unsigned long usemapsize
;
7292 zonesize
+= zone_start_pfn
& (pageblock_nr_pages
-1);
7293 usemapsize
= roundup(zonesize
, pageblock_nr_pages
);
7294 usemapsize
= usemapsize
>> pageblock_order
;
7295 usemapsize
*= NR_PAGEBLOCK_BITS
;
7296 usemapsize
= roundup(usemapsize
, 8 * sizeof(unsigned long));
7298 return usemapsize
/ 8;
7301 static void __ref
setup_usemap(struct zone
*zone
)
7303 unsigned long usemapsize
= usemap_size(zone
->zone_start_pfn
,
7304 zone
->spanned_pages
);
7305 zone
->pageblock_flags
= NULL
;
7307 zone
->pageblock_flags
=
7308 memblock_alloc_node(usemapsize
, SMP_CACHE_BYTES
,
7310 if (!zone
->pageblock_flags
)
7311 panic("Failed to allocate %ld bytes for zone %s pageblock flags on node %d\n",
7312 usemapsize
, zone
->name
, zone_to_nid(zone
));
7316 static inline void setup_usemap(struct zone
*zone
) {}
7317 #endif /* CONFIG_SPARSEMEM */
7319 #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
7321 /* Initialise the number of pages represented by NR_PAGEBLOCK_BITS */
7322 void __init
set_pageblock_order(void)
7326 /* Check that pageblock_nr_pages has not already been setup */
7327 if (pageblock_order
)
7330 if (HPAGE_SHIFT
> PAGE_SHIFT
)
7331 order
= HUGETLB_PAGE_ORDER
;
7333 order
= MAX_ORDER
- 1;
7336 * Assume the largest contiguous order of interest is a huge page.
7337 * This value may be variable depending on boot parameters on IA64 and
7340 pageblock_order
= order
;
7342 #else /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
7345 * When CONFIG_HUGETLB_PAGE_SIZE_VARIABLE is not set, set_pageblock_order()
7346 * is unused as pageblock_order is set at compile-time. See
7347 * include/linux/pageblock-flags.h for the values of pageblock_order based on
7350 void __init
set_pageblock_order(void)
7354 #endif /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
7356 static unsigned long __init
calc_memmap_size(unsigned long spanned_pages
,
7357 unsigned long present_pages
)
7359 unsigned long pages
= spanned_pages
;
7362 * Provide a more accurate estimation if there are holes within
7363 * the zone and SPARSEMEM is in use. If there are holes within the
7364 * zone, each populated memory region may cost us one or two extra
7365 * memmap pages due to alignment because memmap pages for each
7366 * populated regions may not be naturally aligned on page boundary.
7367 * So the (present_pages >> 4) heuristic is a tradeoff for that.
7369 if (spanned_pages
> present_pages
+ (present_pages
>> 4) &&
7370 IS_ENABLED(CONFIG_SPARSEMEM
))
7371 pages
= present_pages
;
7373 return PAGE_ALIGN(pages
* sizeof(struct page
)) >> PAGE_SHIFT
;
7376 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
7377 static void pgdat_init_split_queue(struct pglist_data
*pgdat
)
7379 struct deferred_split
*ds_queue
= &pgdat
->deferred_split_queue
;
7381 spin_lock_init(&ds_queue
->split_queue_lock
);
7382 INIT_LIST_HEAD(&ds_queue
->split_queue
);
7383 ds_queue
->split_queue_len
= 0;
7386 static void pgdat_init_split_queue(struct pglist_data
*pgdat
) {}
7389 #ifdef CONFIG_COMPACTION
7390 static void pgdat_init_kcompactd(struct pglist_data
*pgdat
)
7392 init_waitqueue_head(&pgdat
->kcompactd_wait
);
7395 static void pgdat_init_kcompactd(struct pglist_data
*pgdat
) {}
7398 static void __meminit
pgdat_init_internals(struct pglist_data
*pgdat
)
7400 pgdat_resize_init(pgdat
);
7402 pgdat_init_split_queue(pgdat
);
7403 pgdat_init_kcompactd(pgdat
);
7405 init_waitqueue_head(&pgdat
->kswapd_wait
);
7406 init_waitqueue_head(&pgdat
->pfmemalloc_wait
);
7408 pgdat_page_ext_init(pgdat
);
7409 lruvec_init(&pgdat
->__lruvec
);
7412 static void __meminit
zone_init_internals(struct zone
*zone
, enum zone_type idx
, int nid
,
7413 unsigned long remaining_pages
)
7415 atomic_long_set(&zone
->managed_pages
, remaining_pages
);
7416 zone_set_nid(zone
, nid
);
7417 zone
->name
= zone_names
[idx
];
7418 zone
->zone_pgdat
= NODE_DATA(nid
);
7419 spin_lock_init(&zone
->lock
);
7420 zone_seqlock_init(zone
);
7421 zone_pcp_init(zone
);
7425 * Set up the zone data structures
7426 * - init pgdat internals
7427 * - init all zones belonging to this node
7429 * NOTE: this function is only called during memory hotplug
7431 #ifdef CONFIG_MEMORY_HOTPLUG
7432 void __ref
free_area_init_core_hotplug(int nid
)
7435 pg_data_t
*pgdat
= NODE_DATA(nid
);
7437 pgdat_init_internals(pgdat
);
7438 for (z
= 0; z
< MAX_NR_ZONES
; z
++)
7439 zone_init_internals(&pgdat
->node_zones
[z
], z
, nid
, 0);
7444 * Set up the zone data structures:
7445 * - mark all pages reserved
7446 * - mark all memory queues empty
7447 * - clear the memory bitmaps
7449 * NOTE: pgdat should get zeroed by caller.
7450 * NOTE: this function is only called during early init.
7452 static void __init
free_area_init_core(struct pglist_data
*pgdat
)
7455 int nid
= pgdat
->node_id
;
7457 pgdat_init_internals(pgdat
);
7458 pgdat
->per_cpu_nodestats
= &boot_nodestats
;
7460 for (j
= 0; j
< MAX_NR_ZONES
; j
++) {
7461 struct zone
*zone
= pgdat
->node_zones
+ j
;
7462 unsigned long size
, freesize
, memmap_pages
;
7464 size
= zone
->spanned_pages
;
7465 freesize
= zone
->present_pages
;
7468 * Adjust freesize so that it accounts for how much memory
7469 * is used by this zone for memmap. This affects the watermark
7470 * and per-cpu initialisations
7472 memmap_pages
= calc_memmap_size(size
, freesize
);
7473 if (!is_highmem_idx(j
)) {
7474 if (freesize
>= memmap_pages
) {
7475 freesize
-= memmap_pages
;
7477 pr_debug(" %s zone: %lu pages used for memmap\n",
7478 zone_names
[j
], memmap_pages
);
7480 pr_warn(" %s zone: %lu memmap pages exceeds freesize %lu\n",
7481 zone_names
[j
], memmap_pages
, freesize
);
7484 /* Account for reserved pages */
7485 if (j
== 0 && freesize
> dma_reserve
) {
7486 freesize
-= dma_reserve
;
7487 pr_debug(" %s zone: %lu pages reserved\n", zone_names
[0], dma_reserve
);
7490 if (!is_highmem_idx(j
))
7491 nr_kernel_pages
+= freesize
;
7492 /* Charge for highmem memmap if there are enough kernel pages */
7493 else if (nr_kernel_pages
> memmap_pages
* 2)
7494 nr_kernel_pages
-= memmap_pages
;
7495 nr_all_pages
+= freesize
;
7498 * Set an approximate value for lowmem here, it will be adjusted
7499 * when the bootmem allocator frees pages into the buddy system.
7500 * And all highmem pages will be managed by the buddy system.
7502 zone_init_internals(zone
, j
, nid
, freesize
);
7507 set_pageblock_order();
7509 init_currently_empty_zone(zone
, zone
->zone_start_pfn
, size
);
7513 #ifdef CONFIG_FLATMEM
7514 static void __init
alloc_node_mem_map(struct pglist_data
*pgdat
)
7516 unsigned long __maybe_unused start
= 0;
7517 unsigned long __maybe_unused offset
= 0;
7519 /* Skip empty nodes */
7520 if (!pgdat
->node_spanned_pages
)
7523 start
= pgdat
->node_start_pfn
& ~(MAX_ORDER_NR_PAGES
- 1);
7524 offset
= pgdat
->node_start_pfn
- start
;
7525 /* ia64 gets its own node_mem_map, before this, without bootmem */
7526 if (!pgdat
->node_mem_map
) {
7527 unsigned long size
, end
;
7531 * The zone's endpoints aren't required to be MAX_ORDER
7532 * aligned but the node_mem_map endpoints must be in order
7533 * for the buddy allocator to function correctly.
7535 end
= pgdat_end_pfn(pgdat
);
7536 end
= ALIGN(end
, MAX_ORDER_NR_PAGES
);
7537 size
= (end
- start
) * sizeof(struct page
);
7538 map
= memmap_alloc(size
, SMP_CACHE_BYTES
, MEMBLOCK_LOW_LIMIT
,
7539 pgdat
->node_id
, false);
7541 panic("Failed to allocate %ld bytes for node %d memory map\n",
7542 size
, pgdat
->node_id
);
7543 pgdat
->node_mem_map
= map
+ offset
;
7545 pr_debug("%s: node %d, pgdat %08lx, node_mem_map %08lx\n",
7546 __func__
, pgdat
->node_id
, (unsigned long)pgdat
,
7547 (unsigned long)pgdat
->node_mem_map
);
7550 * With no DISCONTIG, the global mem_map is just set as node 0's
7552 if (pgdat
== NODE_DATA(0)) {
7553 mem_map
= NODE_DATA(0)->node_mem_map
;
7554 if (page_to_pfn(mem_map
) != pgdat
->node_start_pfn
)
7560 static inline void alloc_node_mem_map(struct pglist_data
*pgdat
) { }
7561 #endif /* CONFIG_FLATMEM */
7563 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
7564 static inline void pgdat_set_deferred_range(pg_data_t
*pgdat
)
7566 pgdat
->first_deferred_pfn
= ULONG_MAX
;
7569 static inline void pgdat_set_deferred_range(pg_data_t
*pgdat
) {}
7572 static void __init
free_area_init_node(int nid
)
7574 pg_data_t
*pgdat
= NODE_DATA(nid
);
7575 unsigned long start_pfn
= 0;
7576 unsigned long end_pfn
= 0;
7578 /* pg_data_t should be reset to zero when it's allocated */
7579 WARN_ON(pgdat
->nr_zones
|| pgdat
->kswapd_highest_zoneidx
);
7581 get_pfn_range_for_nid(nid
, &start_pfn
, &end_pfn
);
7583 pgdat
->node_id
= nid
;
7584 pgdat
->node_start_pfn
= start_pfn
;
7585 pgdat
->per_cpu_nodestats
= NULL
;
7587 pr_info("Initmem setup node %d [mem %#018Lx-%#018Lx]\n", nid
,
7588 (u64
)start_pfn
<< PAGE_SHIFT
,
7589 end_pfn
? ((u64
)end_pfn
<< PAGE_SHIFT
) - 1 : 0);
7590 calculate_node_totalpages(pgdat
, start_pfn
, end_pfn
);
7592 alloc_node_mem_map(pgdat
);
7593 pgdat_set_deferred_range(pgdat
);
7595 free_area_init_core(pgdat
);
7598 void __init
free_area_init_memoryless_node(int nid
)
7600 free_area_init_node(nid
);
7603 #if MAX_NUMNODES > 1
7605 * Figure out the number of possible node ids.
7607 void __init
setup_nr_node_ids(void)
7609 unsigned int highest
;
7611 highest
= find_last_bit(node_possible_map
.bits
, MAX_NUMNODES
);
7612 nr_node_ids
= highest
+ 1;
7617 * node_map_pfn_alignment - determine the maximum internode alignment
7619 * This function should be called after node map is populated and sorted.
7620 * It calculates the maximum power of two alignment which can distinguish
7623 * For example, if all nodes are 1GiB and aligned to 1GiB, the return value
7624 * would indicate 1GiB alignment with (1 << (30 - PAGE_SHIFT)). If the
7625 * nodes are shifted by 256MiB, 256MiB. Note that if only the last node is
7626 * shifted, 1GiB is enough and this function will indicate so.
7628 * This is used to test whether pfn -> nid mapping of the chosen memory
7629 * model has fine enough granularity to avoid incorrect mapping for the
7630 * populated node map.
7632 * Return: the determined alignment in pfn's. 0 if there is no alignment
7633 * requirement (single node).
7635 unsigned long __init
node_map_pfn_alignment(void)
7637 unsigned long accl_mask
= 0, last_end
= 0;
7638 unsigned long start
, end
, mask
;
7639 int last_nid
= NUMA_NO_NODE
;
7642 for_each_mem_pfn_range(i
, MAX_NUMNODES
, &start
, &end
, &nid
) {
7643 if (!start
|| last_nid
< 0 || last_nid
== nid
) {
7650 * Start with a mask granular enough to pin-point to the
7651 * start pfn and tick off bits one-by-one until it becomes
7652 * too coarse to separate the current node from the last.
7654 mask
= ~((1 << __ffs(start
)) - 1);
7655 while (mask
&& last_end
<= (start
& (mask
<< 1)))
7658 /* accumulate all internode masks */
7662 /* convert mask to number of pages */
7663 return ~accl_mask
+ 1;
7667 * find_min_pfn_with_active_regions - Find the minimum PFN registered
7669 * Return: the minimum PFN based on information provided via
7670 * memblock_set_node().
7672 unsigned long __init
find_min_pfn_with_active_regions(void)
7674 return PHYS_PFN(memblock_start_of_DRAM());
7678 * early_calculate_totalpages()
7679 * Sum pages in active regions for movable zone.
7680 * Populate N_MEMORY for calculating usable_nodes.
7682 static unsigned long __init
early_calculate_totalpages(void)
7684 unsigned long totalpages
= 0;
7685 unsigned long start_pfn
, end_pfn
;
7688 for_each_mem_pfn_range(i
, MAX_NUMNODES
, &start_pfn
, &end_pfn
, &nid
) {
7689 unsigned long pages
= end_pfn
- start_pfn
;
7691 totalpages
+= pages
;
7693 node_set_state(nid
, N_MEMORY
);
7699 * Find the PFN the Movable zone begins in each node. Kernel memory
7700 * is spread evenly between nodes as long as the nodes have enough
7701 * memory. When they don't, some nodes will have more kernelcore than
7704 static void __init
find_zone_movable_pfns_for_nodes(void)
7707 unsigned long usable_startpfn
;
7708 unsigned long kernelcore_node
, kernelcore_remaining
;
7709 /* save the state before borrow the nodemask */
7710 nodemask_t saved_node_state
= node_states
[N_MEMORY
];
7711 unsigned long totalpages
= early_calculate_totalpages();
7712 int usable_nodes
= nodes_weight(node_states
[N_MEMORY
]);
7713 struct memblock_region
*r
;
7715 /* Need to find movable_zone earlier when movable_node is specified. */
7716 find_usable_zone_for_movable();
7719 * If movable_node is specified, ignore kernelcore and movablecore
7722 if (movable_node_is_enabled()) {
7723 for_each_mem_region(r
) {
7724 if (!memblock_is_hotpluggable(r
))
7727 nid
= memblock_get_region_node(r
);
7729 usable_startpfn
= PFN_DOWN(r
->base
);
7730 zone_movable_pfn
[nid
] = zone_movable_pfn
[nid
] ?
7731 min(usable_startpfn
, zone_movable_pfn
[nid
]) :
7739 * If kernelcore=mirror is specified, ignore movablecore option
7741 if (mirrored_kernelcore
) {
7742 bool mem_below_4gb_not_mirrored
= false;
7744 for_each_mem_region(r
) {
7745 if (memblock_is_mirror(r
))
7748 nid
= memblock_get_region_node(r
);
7750 usable_startpfn
= memblock_region_memory_base_pfn(r
);
7752 if (usable_startpfn
< 0x100000) {
7753 mem_below_4gb_not_mirrored
= true;
7757 zone_movable_pfn
[nid
] = zone_movable_pfn
[nid
] ?
7758 min(usable_startpfn
, zone_movable_pfn
[nid
]) :
7762 if (mem_below_4gb_not_mirrored
)
7763 pr_warn("This configuration results in unmirrored kernel memory.\n");
7769 * If kernelcore=nn% or movablecore=nn% was specified, calculate the
7770 * amount of necessary memory.
7772 if (required_kernelcore_percent
)
7773 required_kernelcore
= (totalpages
* 100 * required_kernelcore_percent
) /
7775 if (required_movablecore_percent
)
7776 required_movablecore
= (totalpages
* 100 * required_movablecore_percent
) /
7780 * If movablecore= was specified, calculate what size of
7781 * kernelcore that corresponds so that memory usable for
7782 * any allocation type is evenly spread. If both kernelcore
7783 * and movablecore are specified, then the value of kernelcore
7784 * will be used for required_kernelcore if it's greater than
7785 * what movablecore would have allowed.
7787 if (required_movablecore
) {
7788 unsigned long corepages
;
7791 * Round-up so that ZONE_MOVABLE is at least as large as what
7792 * was requested by the user
7794 required_movablecore
=
7795 roundup(required_movablecore
, MAX_ORDER_NR_PAGES
);
7796 required_movablecore
= min(totalpages
, required_movablecore
);
7797 corepages
= totalpages
- required_movablecore
;
7799 required_kernelcore
= max(required_kernelcore
, corepages
);
7803 * If kernelcore was not specified or kernelcore size is larger
7804 * than totalpages, there is no ZONE_MOVABLE.
7806 if (!required_kernelcore
|| required_kernelcore
>= totalpages
)
7809 /* usable_startpfn is the lowest possible pfn ZONE_MOVABLE can be at */
7810 usable_startpfn
= arch_zone_lowest_possible_pfn
[movable_zone
];
7813 /* Spread kernelcore memory as evenly as possible throughout nodes */
7814 kernelcore_node
= required_kernelcore
/ usable_nodes
;
7815 for_each_node_state(nid
, N_MEMORY
) {
7816 unsigned long start_pfn
, end_pfn
;
7819 * Recalculate kernelcore_node if the division per node
7820 * now exceeds what is necessary to satisfy the requested
7821 * amount of memory for the kernel
7823 if (required_kernelcore
< kernelcore_node
)
7824 kernelcore_node
= required_kernelcore
/ usable_nodes
;
7827 * As the map is walked, we track how much memory is usable
7828 * by the kernel using kernelcore_remaining. When it is
7829 * 0, the rest of the node is usable by ZONE_MOVABLE
7831 kernelcore_remaining
= kernelcore_node
;
7833 /* Go through each range of PFNs within this node */
7834 for_each_mem_pfn_range(i
, nid
, &start_pfn
, &end_pfn
, NULL
) {
7835 unsigned long size_pages
;
7837 start_pfn
= max(start_pfn
, zone_movable_pfn
[nid
]);
7838 if (start_pfn
>= end_pfn
)
7841 /* Account for what is only usable for kernelcore */
7842 if (start_pfn
< usable_startpfn
) {
7843 unsigned long kernel_pages
;
7844 kernel_pages
= min(end_pfn
, usable_startpfn
)
7847 kernelcore_remaining
-= min(kernel_pages
,
7848 kernelcore_remaining
);
7849 required_kernelcore
-= min(kernel_pages
,
7850 required_kernelcore
);
7852 /* Continue if range is now fully accounted */
7853 if (end_pfn
<= usable_startpfn
) {
7856 * Push zone_movable_pfn to the end so
7857 * that if we have to rebalance
7858 * kernelcore across nodes, we will
7859 * not double account here
7861 zone_movable_pfn
[nid
] = end_pfn
;
7864 start_pfn
= usable_startpfn
;
7868 * The usable PFN range for ZONE_MOVABLE is from
7869 * start_pfn->end_pfn. Calculate size_pages as the
7870 * number of pages used as kernelcore
7872 size_pages
= end_pfn
- start_pfn
;
7873 if (size_pages
> kernelcore_remaining
)
7874 size_pages
= kernelcore_remaining
;
7875 zone_movable_pfn
[nid
] = start_pfn
+ size_pages
;
7878 * Some kernelcore has been met, update counts and
7879 * break if the kernelcore for this node has been
7882 required_kernelcore
-= min(required_kernelcore
,
7884 kernelcore_remaining
-= size_pages
;
7885 if (!kernelcore_remaining
)
7891 * If there is still required_kernelcore, we do another pass with one
7892 * less node in the count. This will push zone_movable_pfn[nid] further
7893 * along on the nodes that still have memory until kernelcore is
7897 if (usable_nodes
&& required_kernelcore
> usable_nodes
)
7901 /* Align start of ZONE_MOVABLE on all nids to MAX_ORDER_NR_PAGES */
7902 for (nid
= 0; nid
< MAX_NUMNODES
; nid
++) {
7903 unsigned long start_pfn
, end_pfn
;
7905 zone_movable_pfn
[nid
] =
7906 roundup(zone_movable_pfn
[nid
], MAX_ORDER_NR_PAGES
);
7908 get_pfn_range_for_nid(nid
, &start_pfn
, &end_pfn
);
7909 if (zone_movable_pfn
[nid
] >= end_pfn
)
7910 zone_movable_pfn
[nid
] = 0;
7914 /* restore the node_state */
7915 node_states
[N_MEMORY
] = saved_node_state
;
7918 /* Any regular or high memory on that node ? */
7919 static void check_for_memory(pg_data_t
*pgdat
, int nid
)
7921 enum zone_type zone_type
;
7923 for (zone_type
= 0; zone_type
<= ZONE_MOVABLE
- 1; zone_type
++) {
7924 struct zone
*zone
= &pgdat
->node_zones
[zone_type
];
7925 if (populated_zone(zone
)) {
7926 if (IS_ENABLED(CONFIG_HIGHMEM
))
7927 node_set_state(nid
, N_HIGH_MEMORY
);
7928 if (zone_type
<= ZONE_NORMAL
)
7929 node_set_state(nid
, N_NORMAL_MEMORY
);
7936 * Some architectures, e.g. ARC may have ZONE_HIGHMEM below ZONE_NORMAL. For
7937 * such cases we allow max_zone_pfn sorted in the descending order
7939 bool __weak
arch_has_descending_max_zone_pfns(void)
7945 * free_area_init - Initialise all pg_data_t and zone data
7946 * @max_zone_pfn: an array of max PFNs for each zone
7948 * This will call free_area_init_node() for each active node in the system.
7949 * Using the page ranges provided by memblock_set_node(), the size of each
7950 * zone in each node and their holes is calculated. If the maximum PFN
7951 * between two adjacent zones match, it is assumed that the zone is empty.
7952 * For example, if arch_max_dma_pfn == arch_max_dma32_pfn, it is assumed
7953 * that arch_max_dma32_pfn has no pages. It is also assumed that a zone
7954 * starts where the previous one ended. For example, ZONE_DMA32 starts
7955 * at arch_max_dma_pfn.
7957 void __init
free_area_init(unsigned long *max_zone_pfn
)
7959 unsigned long start_pfn
, end_pfn
;
7963 /* Record where the zone boundaries are */
7964 memset(arch_zone_lowest_possible_pfn
, 0,
7965 sizeof(arch_zone_lowest_possible_pfn
));
7966 memset(arch_zone_highest_possible_pfn
, 0,
7967 sizeof(arch_zone_highest_possible_pfn
));
7969 start_pfn
= find_min_pfn_with_active_regions();
7970 descending
= arch_has_descending_max_zone_pfns();
7972 for (i
= 0; i
< MAX_NR_ZONES
; i
++) {
7974 zone
= MAX_NR_ZONES
- i
- 1;
7978 if (zone
== ZONE_MOVABLE
)
7981 end_pfn
= max(max_zone_pfn
[zone
], start_pfn
);
7982 arch_zone_lowest_possible_pfn
[zone
] = start_pfn
;
7983 arch_zone_highest_possible_pfn
[zone
] = end_pfn
;
7985 start_pfn
= end_pfn
;
7988 /* Find the PFNs that ZONE_MOVABLE begins at in each node */
7989 memset(zone_movable_pfn
, 0, sizeof(zone_movable_pfn
));
7990 find_zone_movable_pfns_for_nodes();
7992 /* Print out the zone ranges */
7993 pr_info("Zone ranges:\n");
7994 for (i
= 0; i
< MAX_NR_ZONES
; i
++) {
7995 if (i
== ZONE_MOVABLE
)
7997 pr_info(" %-8s ", zone_names
[i
]);
7998 if (arch_zone_lowest_possible_pfn
[i
] ==
7999 arch_zone_highest_possible_pfn
[i
])
8002 pr_cont("[mem %#018Lx-%#018Lx]\n",
8003 (u64
)arch_zone_lowest_possible_pfn
[i
]
8005 ((u64
)arch_zone_highest_possible_pfn
[i
]
8006 << PAGE_SHIFT
) - 1);
8009 /* Print out the PFNs ZONE_MOVABLE begins at in each node */
8010 pr_info("Movable zone start for each node\n");
8011 for (i
= 0; i
< MAX_NUMNODES
; i
++) {
8012 if (zone_movable_pfn
[i
])
8013 pr_info(" Node %d: %#018Lx\n", i
,
8014 (u64
)zone_movable_pfn
[i
] << PAGE_SHIFT
);
8018 * Print out the early node map, and initialize the
8019 * subsection-map relative to active online memory ranges to
8020 * enable future "sub-section" extensions of the memory map.
8022 pr_info("Early memory node ranges\n");
8023 for_each_mem_pfn_range(i
, MAX_NUMNODES
, &start_pfn
, &end_pfn
, &nid
) {
8024 pr_info(" node %3d: [mem %#018Lx-%#018Lx]\n", nid
,
8025 (u64
)start_pfn
<< PAGE_SHIFT
,
8026 ((u64
)end_pfn
<< PAGE_SHIFT
) - 1);
8027 subsection_map_init(start_pfn
, end_pfn
- start_pfn
);
8030 /* Initialise every node */
8031 mminit_verify_pageflags_layout();
8032 setup_nr_node_ids();
8033 for_each_online_node(nid
) {
8034 pg_data_t
*pgdat
= NODE_DATA(nid
);
8035 free_area_init_node(nid
);
8037 /* Any memory on that node */
8038 if (pgdat
->node_present_pages
)
8039 node_set_state(nid
, N_MEMORY
);
8040 check_for_memory(pgdat
, nid
);
8046 static int __init
cmdline_parse_core(char *p
, unsigned long *core
,
8047 unsigned long *percent
)
8049 unsigned long long coremem
;
8055 /* Value may be a percentage of total memory, otherwise bytes */
8056 coremem
= simple_strtoull(p
, &endptr
, 0);
8057 if (*endptr
== '%') {
8058 /* Paranoid check for percent values greater than 100 */
8059 WARN_ON(coremem
> 100);
8063 coremem
= memparse(p
, &p
);
8064 /* Paranoid check that UL is enough for the coremem value */
8065 WARN_ON((coremem
>> PAGE_SHIFT
) > ULONG_MAX
);
8067 *core
= coremem
>> PAGE_SHIFT
;
8074 * kernelcore=size sets the amount of memory for use for allocations that
8075 * cannot be reclaimed or migrated.
8077 static int __init
cmdline_parse_kernelcore(char *p
)
8079 /* parse kernelcore=mirror */
8080 if (parse_option_str(p
, "mirror")) {
8081 mirrored_kernelcore
= true;
8085 return cmdline_parse_core(p
, &required_kernelcore
,
8086 &required_kernelcore_percent
);
8090 * movablecore=size sets the amount of memory for use for allocations that
8091 * can be reclaimed or migrated.
8093 static int __init
cmdline_parse_movablecore(char *p
)
8095 return cmdline_parse_core(p
, &required_movablecore
,
8096 &required_movablecore_percent
);
8099 early_param("kernelcore", cmdline_parse_kernelcore
);
8100 early_param("movablecore", cmdline_parse_movablecore
);
8102 void adjust_managed_page_count(struct page
*page
, long count
)
8104 atomic_long_add(count
, &page_zone(page
)->managed_pages
);
8105 totalram_pages_add(count
);
8106 #ifdef CONFIG_HIGHMEM
8107 if (PageHighMem(page
))
8108 totalhigh_pages_add(count
);
8111 EXPORT_SYMBOL(adjust_managed_page_count
);
8113 unsigned long free_reserved_area(void *start
, void *end
, int poison
, const char *s
)
8116 unsigned long pages
= 0;
8118 start
= (void *)PAGE_ALIGN((unsigned long)start
);
8119 end
= (void *)((unsigned long)end
& PAGE_MASK
);
8120 for (pos
= start
; pos
< end
; pos
+= PAGE_SIZE
, pages
++) {
8121 struct page
*page
= virt_to_page(pos
);
8122 void *direct_map_addr
;
8125 * 'direct_map_addr' might be different from 'pos'
8126 * because some architectures' virt_to_page()
8127 * work with aliases. Getting the direct map
8128 * address ensures that we get a _writeable_
8129 * alias for the memset().
8131 direct_map_addr
= page_address(page
);
8133 * Perform a kasan-unchecked memset() since this memory
8134 * has not been initialized.
8136 direct_map_addr
= kasan_reset_tag(direct_map_addr
);
8137 if ((unsigned int)poison
<= 0xFF)
8138 memset(direct_map_addr
, poison
, PAGE_SIZE
);
8140 free_reserved_page(page
);
8144 pr_info("Freeing %s memory: %ldK\n",
8145 s
, pages
<< (PAGE_SHIFT
- 10));
8150 void __init
mem_init_print_info(void)
8152 unsigned long physpages
, codesize
, datasize
, rosize
, bss_size
;
8153 unsigned long init_code_size
, init_data_size
;
8155 physpages
= get_num_physpages();
8156 codesize
= _etext
- _stext
;
8157 datasize
= _edata
- _sdata
;
8158 rosize
= __end_rodata
- __start_rodata
;
8159 bss_size
= __bss_stop
- __bss_start
;
8160 init_data_size
= __init_end
- __init_begin
;
8161 init_code_size
= _einittext
- _sinittext
;
8164 * Detect special cases and adjust section sizes accordingly:
8165 * 1) .init.* may be embedded into .data sections
8166 * 2) .init.text.* may be out of [__init_begin, __init_end],
8167 * please refer to arch/tile/kernel/vmlinux.lds.S.
8168 * 3) .rodata.* may be embedded into .text or .data sections.
8170 #define adj_init_size(start, end, size, pos, adj) \
8172 if (start <= pos && pos < end && size > adj) \
8176 adj_init_size(__init_begin
, __init_end
, init_data_size
,
8177 _sinittext
, init_code_size
);
8178 adj_init_size(_stext
, _etext
, codesize
, _sinittext
, init_code_size
);
8179 adj_init_size(_sdata
, _edata
, datasize
, __init_begin
, init_data_size
);
8180 adj_init_size(_stext
, _etext
, codesize
, __start_rodata
, rosize
);
8181 adj_init_size(_sdata
, _edata
, datasize
, __start_rodata
, rosize
);
8183 #undef adj_init_size
8185 pr_info("Memory: %luK/%luK available (%luK kernel code, %luK rwdata, %luK rodata, %luK init, %luK bss, %luK reserved, %luK cma-reserved"
8186 #ifdef CONFIG_HIGHMEM
8190 nr_free_pages() << (PAGE_SHIFT
- 10),
8191 physpages
<< (PAGE_SHIFT
- 10),
8192 codesize
>> 10, datasize
>> 10, rosize
>> 10,
8193 (init_data_size
+ init_code_size
) >> 10, bss_size
>> 10,
8194 (physpages
- totalram_pages() - totalcma_pages
) << (PAGE_SHIFT
- 10),
8195 totalcma_pages
<< (PAGE_SHIFT
- 10)
8196 #ifdef CONFIG_HIGHMEM
8197 , totalhigh_pages() << (PAGE_SHIFT
- 10)
8203 * set_dma_reserve - set the specified number of pages reserved in the first zone
8204 * @new_dma_reserve: The number of pages to mark reserved
8206 * The per-cpu batchsize and zone watermarks are determined by managed_pages.
8207 * In the DMA zone, a significant percentage may be consumed by kernel image
8208 * and other unfreeable allocations which can skew the watermarks badly. This
8209 * function may optionally be used to account for unfreeable pages in the
8210 * first zone (e.g., ZONE_DMA). The effect will be lower watermarks and
8211 * smaller per-cpu batchsize.
8213 void __init
set_dma_reserve(unsigned long new_dma_reserve
)
8215 dma_reserve
= new_dma_reserve
;
8218 static int page_alloc_cpu_dead(unsigned int cpu
)
8222 lru_add_drain_cpu(cpu
);
8226 * Spill the event counters of the dead processor
8227 * into the current processors event counters.
8228 * This artificially elevates the count of the current
8231 vm_events_fold_cpu(cpu
);
8234 * Zero the differential counters of the dead processor
8235 * so that the vm statistics are consistent.
8237 * This is only okay since the processor is dead and cannot
8238 * race with what we are doing.
8240 cpu_vm_stats_fold(cpu
);
8242 for_each_populated_zone(zone
)
8243 zone_pcp_update(zone
, 0);
8248 static int page_alloc_cpu_online(unsigned int cpu
)
8252 for_each_populated_zone(zone
)
8253 zone_pcp_update(zone
, 1);
8258 int hashdist
= HASHDIST_DEFAULT
;
8260 static int __init
set_hashdist(char *str
)
8264 hashdist
= simple_strtoul(str
, &str
, 0);
8267 __setup("hashdist=", set_hashdist
);
8270 void __init
page_alloc_init(void)
8275 if (num_node_state(N_MEMORY
) == 1)
8279 ret
= cpuhp_setup_state_nocalls(CPUHP_PAGE_ALLOC
,
8280 "mm/page_alloc:pcp",
8281 page_alloc_cpu_online
,
8282 page_alloc_cpu_dead
);
8287 * calculate_totalreserve_pages - called when sysctl_lowmem_reserve_ratio
8288 * or min_free_kbytes changes.
8290 static void calculate_totalreserve_pages(void)
8292 struct pglist_data
*pgdat
;
8293 unsigned long reserve_pages
= 0;
8294 enum zone_type i
, j
;
8296 for_each_online_pgdat(pgdat
) {
8298 pgdat
->totalreserve_pages
= 0;
8300 for (i
= 0; i
< MAX_NR_ZONES
; i
++) {
8301 struct zone
*zone
= pgdat
->node_zones
+ i
;
8303 unsigned long managed_pages
= zone_managed_pages(zone
);
8305 /* Find valid and maximum lowmem_reserve in the zone */
8306 for (j
= i
; j
< MAX_NR_ZONES
; j
++) {
8307 if (zone
->lowmem_reserve
[j
] > max
)
8308 max
= zone
->lowmem_reserve
[j
];
8311 /* we treat the high watermark as reserved pages. */
8312 max
+= high_wmark_pages(zone
);
8314 if (max
> managed_pages
)
8315 max
= managed_pages
;
8317 pgdat
->totalreserve_pages
+= max
;
8319 reserve_pages
+= max
;
8322 totalreserve_pages
= reserve_pages
;
8326 * setup_per_zone_lowmem_reserve - called whenever
8327 * sysctl_lowmem_reserve_ratio changes. Ensures that each zone
8328 * has a correct pages reserved value, so an adequate number of
8329 * pages are left in the zone after a successful __alloc_pages().
8331 static void setup_per_zone_lowmem_reserve(void)
8333 struct pglist_data
*pgdat
;
8334 enum zone_type i
, j
;
8336 for_each_online_pgdat(pgdat
) {
8337 for (i
= 0; i
< MAX_NR_ZONES
- 1; i
++) {
8338 struct zone
*zone
= &pgdat
->node_zones
[i
];
8339 int ratio
= sysctl_lowmem_reserve_ratio
[i
];
8340 bool clear
= !ratio
|| !zone_managed_pages(zone
);
8341 unsigned long managed_pages
= 0;
8343 for (j
= i
+ 1; j
< MAX_NR_ZONES
; j
++) {
8344 struct zone
*upper_zone
= &pgdat
->node_zones
[j
];
8346 managed_pages
+= zone_managed_pages(upper_zone
);
8349 zone
->lowmem_reserve
[j
] = 0;
8351 zone
->lowmem_reserve
[j
] = managed_pages
/ ratio
;
8356 /* update totalreserve_pages */
8357 calculate_totalreserve_pages();
8360 static void __setup_per_zone_wmarks(void)
8362 unsigned long pages_min
= min_free_kbytes
>> (PAGE_SHIFT
- 10);
8363 unsigned long lowmem_pages
= 0;
8365 unsigned long flags
;
8367 /* Calculate total number of !ZONE_HIGHMEM pages */
8368 for_each_zone(zone
) {
8369 if (!is_highmem(zone
))
8370 lowmem_pages
+= zone_managed_pages(zone
);
8373 for_each_zone(zone
) {
8376 spin_lock_irqsave(&zone
->lock
, flags
);
8377 tmp
= (u64
)pages_min
* zone_managed_pages(zone
);
8378 do_div(tmp
, lowmem_pages
);
8379 if (is_highmem(zone
)) {
8381 * __GFP_HIGH and PF_MEMALLOC allocations usually don't
8382 * need highmem pages, so cap pages_min to a small
8385 * The WMARK_HIGH-WMARK_LOW and (WMARK_LOW-WMARK_MIN)
8386 * deltas control async page reclaim, and so should
8387 * not be capped for highmem.
8389 unsigned long min_pages
;
8391 min_pages
= zone_managed_pages(zone
) / 1024;
8392 min_pages
= clamp(min_pages
, SWAP_CLUSTER_MAX
, 128UL);
8393 zone
->_watermark
[WMARK_MIN
] = min_pages
;
8396 * If it's a lowmem zone, reserve a number of pages
8397 * proportionate to the zone's size.
8399 zone
->_watermark
[WMARK_MIN
] = tmp
;
8403 * Set the kswapd watermarks distance according to the
8404 * scale factor in proportion to available memory, but
8405 * ensure a minimum size on small systems.
8407 tmp
= max_t(u64
, tmp
>> 2,
8408 mult_frac(zone_managed_pages(zone
),
8409 watermark_scale_factor
, 10000));
8411 zone
->watermark_boost
= 0;
8412 zone
->_watermark
[WMARK_LOW
] = min_wmark_pages(zone
) + tmp
;
8413 zone
->_watermark
[WMARK_HIGH
] = min_wmark_pages(zone
) + tmp
* 2;
8415 spin_unlock_irqrestore(&zone
->lock
, flags
);
8418 /* update totalreserve_pages */
8419 calculate_totalreserve_pages();
8423 * setup_per_zone_wmarks - called when min_free_kbytes changes
8424 * or when memory is hot-{added|removed}
8426 * Ensures that the watermark[min,low,high] values for each zone are set
8427 * correctly with respect to min_free_kbytes.
8429 void setup_per_zone_wmarks(void)
8432 static DEFINE_SPINLOCK(lock
);
8435 __setup_per_zone_wmarks();
8439 * The watermark size have changed so update the pcpu batch
8440 * and high limits or the limits may be inappropriate.
8443 zone_pcp_update(zone
, 0);
8447 * Initialise min_free_kbytes.
8449 * For small machines we want it small (128k min). For large machines
8450 * we want it large (256MB max). But it is not linear, because network
8451 * bandwidth does not increase linearly with machine size. We use
8453 * min_free_kbytes = 4 * sqrt(lowmem_kbytes), for better accuracy:
8454 * min_free_kbytes = sqrt(lowmem_kbytes * 16)
8470 int __meminit
init_per_zone_wmark_min(void)
8472 unsigned long lowmem_kbytes
;
8473 int new_min_free_kbytes
;
8475 lowmem_kbytes
= nr_free_buffer_pages() * (PAGE_SIZE
>> 10);
8476 new_min_free_kbytes
= int_sqrt(lowmem_kbytes
* 16);
8478 if (new_min_free_kbytes
> user_min_free_kbytes
) {
8479 min_free_kbytes
= new_min_free_kbytes
;
8480 if (min_free_kbytes
< 128)
8481 min_free_kbytes
= 128;
8482 if (min_free_kbytes
> 262144)
8483 min_free_kbytes
= 262144;
8485 pr_warn("min_free_kbytes is not updated to %d because user defined value %d is preferred\n",
8486 new_min_free_kbytes
, user_min_free_kbytes
);
8488 setup_per_zone_wmarks();
8489 refresh_zone_stat_thresholds();
8490 setup_per_zone_lowmem_reserve();
8493 setup_min_unmapped_ratio();
8494 setup_min_slab_ratio();
8497 khugepaged_min_free_kbytes_update();
8501 postcore_initcall(init_per_zone_wmark_min
)
8504 * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so
8505 * that we can call two helper functions whenever min_free_kbytes
8508 int min_free_kbytes_sysctl_handler(struct ctl_table
*table
, int write
,
8509 void *buffer
, size_t *length
, loff_t
*ppos
)
8513 rc
= proc_dointvec_minmax(table
, write
, buffer
, length
, ppos
);
8518 user_min_free_kbytes
= min_free_kbytes
;
8519 setup_per_zone_wmarks();
8524 int watermark_scale_factor_sysctl_handler(struct ctl_table
*table
, int write
,
8525 void *buffer
, size_t *length
, loff_t
*ppos
)
8529 rc
= proc_dointvec_minmax(table
, write
, buffer
, length
, ppos
);
8534 setup_per_zone_wmarks();
8540 static void setup_min_unmapped_ratio(void)
8545 for_each_online_pgdat(pgdat
)
8546 pgdat
->min_unmapped_pages
= 0;
8549 zone
->zone_pgdat
->min_unmapped_pages
+= (zone_managed_pages(zone
) *
8550 sysctl_min_unmapped_ratio
) / 100;
8554 int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table
*table
, int write
,
8555 void *buffer
, size_t *length
, loff_t
*ppos
)
8559 rc
= proc_dointvec_minmax(table
, write
, buffer
, length
, ppos
);
8563 setup_min_unmapped_ratio();
8568 static void setup_min_slab_ratio(void)
8573 for_each_online_pgdat(pgdat
)
8574 pgdat
->min_slab_pages
= 0;
8577 zone
->zone_pgdat
->min_slab_pages
+= (zone_managed_pages(zone
) *
8578 sysctl_min_slab_ratio
) / 100;
8581 int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table
*table
, int write
,
8582 void *buffer
, size_t *length
, loff_t
*ppos
)
8586 rc
= proc_dointvec_minmax(table
, write
, buffer
, length
, ppos
);
8590 setup_min_slab_ratio();
8597 * lowmem_reserve_ratio_sysctl_handler - just a wrapper around
8598 * proc_dointvec() so that we can call setup_per_zone_lowmem_reserve()
8599 * whenever sysctl_lowmem_reserve_ratio changes.
8601 * The reserve ratio obviously has absolutely no relation with the
8602 * minimum watermarks. The lowmem reserve ratio can only make sense
8603 * if in function of the boot time zone sizes.
8605 int lowmem_reserve_ratio_sysctl_handler(struct ctl_table
*table
, int write
,
8606 void *buffer
, size_t *length
, loff_t
*ppos
)
8610 proc_dointvec_minmax(table
, write
, buffer
, length
, ppos
);
8612 for (i
= 0; i
< MAX_NR_ZONES
; i
++) {
8613 if (sysctl_lowmem_reserve_ratio
[i
] < 1)
8614 sysctl_lowmem_reserve_ratio
[i
] = 0;
8617 setup_per_zone_lowmem_reserve();
8622 * percpu_pagelist_high_fraction - changes the pcp->high for each zone on each
8623 * cpu. It is the fraction of total pages in each zone that a hot per cpu
8624 * pagelist can have before it gets flushed back to buddy allocator.
8626 int percpu_pagelist_high_fraction_sysctl_handler(struct ctl_table
*table
,
8627 int write
, void *buffer
, size_t *length
, loff_t
*ppos
)
8630 int old_percpu_pagelist_high_fraction
;
8633 mutex_lock(&pcp_batch_high_lock
);
8634 old_percpu_pagelist_high_fraction
= percpu_pagelist_high_fraction
;
8636 ret
= proc_dointvec_minmax(table
, write
, buffer
, length
, ppos
);
8637 if (!write
|| ret
< 0)
8640 /* Sanity checking to avoid pcp imbalance */
8641 if (percpu_pagelist_high_fraction
&&
8642 percpu_pagelist_high_fraction
< MIN_PERCPU_PAGELIST_HIGH_FRACTION
) {
8643 percpu_pagelist_high_fraction
= old_percpu_pagelist_high_fraction
;
8649 if (percpu_pagelist_high_fraction
== old_percpu_pagelist_high_fraction
)
8652 for_each_populated_zone(zone
)
8653 zone_set_pageset_high_and_batch(zone
, 0);
8655 mutex_unlock(&pcp_batch_high_lock
);
8659 #ifndef __HAVE_ARCH_RESERVED_KERNEL_PAGES
8661 * Returns the number of pages that arch has reserved but
8662 * is not known to alloc_large_system_hash().
8664 static unsigned long __init
arch_reserved_kernel_pages(void)
8671 * Adaptive scale is meant to reduce sizes of hash tables on large memory
8672 * machines. As memory size is increased the scale is also increased but at
8673 * slower pace. Starting from ADAPT_SCALE_BASE (64G), every time memory
8674 * quadruples the scale is increased by one, which means the size of hash table
8675 * only doubles, instead of quadrupling as well.
8676 * Because 32-bit systems cannot have large physical memory, where this scaling
8677 * makes sense, it is disabled on such platforms.
8679 #if __BITS_PER_LONG > 32
8680 #define ADAPT_SCALE_BASE (64ul << 30)
8681 #define ADAPT_SCALE_SHIFT 2
8682 #define ADAPT_SCALE_NPAGES (ADAPT_SCALE_BASE >> PAGE_SHIFT)
8686 * allocate a large system hash table from bootmem
8687 * - it is assumed that the hash table must contain an exact power-of-2
8688 * quantity of entries
8689 * - limit is the number of hash buckets, not the total allocation size
8691 void *__init
alloc_large_system_hash(const char *tablename
,
8692 unsigned long bucketsize
,
8693 unsigned long numentries
,
8696 unsigned int *_hash_shift
,
8697 unsigned int *_hash_mask
,
8698 unsigned long low_limit
,
8699 unsigned long high_limit
)
8701 unsigned long long max
= high_limit
;
8702 unsigned long log2qty
, size
;
8708 /* allow the kernel cmdline to have a say */
8710 /* round applicable memory size up to nearest megabyte */
8711 numentries
= nr_kernel_pages
;
8712 numentries
-= arch_reserved_kernel_pages();
8714 /* It isn't necessary when PAGE_SIZE >= 1MB */
8715 if (PAGE_SHIFT
< 20)
8716 numentries
= round_up(numentries
, (1<<20)/PAGE_SIZE
);
8718 #if __BITS_PER_LONG > 32
8720 unsigned long adapt
;
8722 for (adapt
= ADAPT_SCALE_NPAGES
; adapt
< numentries
;
8723 adapt
<<= ADAPT_SCALE_SHIFT
)
8728 /* limit to 1 bucket per 2^scale bytes of low memory */
8729 if (scale
> PAGE_SHIFT
)
8730 numentries
>>= (scale
- PAGE_SHIFT
);
8732 numentries
<<= (PAGE_SHIFT
- scale
);
8734 /* Make sure we've got at least a 0-order allocation.. */
8735 if (unlikely(flags
& HASH_SMALL
)) {
8736 /* Makes no sense without HASH_EARLY */
8737 WARN_ON(!(flags
& HASH_EARLY
));
8738 if (!(numentries
>> *_hash_shift
)) {
8739 numentries
= 1UL << *_hash_shift
;
8740 BUG_ON(!numentries
);
8742 } else if (unlikely((numentries
* bucketsize
) < PAGE_SIZE
))
8743 numentries
= PAGE_SIZE
/ bucketsize
;
8745 numentries
= roundup_pow_of_two(numentries
);
8747 /* limit allocation size to 1/16 total memory by default */
8749 max
= ((unsigned long long)nr_all_pages
<< PAGE_SHIFT
) >> 4;
8750 do_div(max
, bucketsize
);
8752 max
= min(max
, 0x80000000ULL
);
8754 if (numentries
< low_limit
)
8755 numentries
= low_limit
;
8756 if (numentries
> max
)
8759 log2qty
= ilog2(numentries
);
8761 gfp_flags
= (flags
& HASH_ZERO
) ? GFP_ATOMIC
| __GFP_ZERO
: GFP_ATOMIC
;
8764 size
= bucketsize
<< log2qty
;
8765 if (flags
& HASH_EARLY
) {
8766 if (flags
& HASH_ZERO
)
8767 table
= memblock_alloc(size
, SMP_CACHE_BYTES
);
8769 table
= memblock_alloc_raw(size
,
8771 } else if (get_order(size
) >= MAX_ORDER
|| hashdist
) {
8772 table
= __vmalloc(size
, gfp_flags
);
8774 huge
= is_vm_area_hugepages(table
);
8777 * If bucketsize is not a power-of-two, we may free
8778 * some pages at the end of hash table which
8779 * alloc_pages_exact() automatically does
8781 table
= alloc_pages_exact(size
, gfp_flags
);
8782 kmemleak_alloc(table
, size
, 1, gfp_flags
);
8784 } while (!table
&& size
> PAGE_SIZE
&& --log2qty
);
8787 panic("Failed to allocate %s hash table\n", tablename
);
8789 pr_info("%s hash table entries: %ld (order: %d, %lu bytes, %s)\n",
8790 tablename
, 1UL << log2qty
, ilog2(size
) - PAGE_SHIFT
, size
,
8791 virt
? (huge
? "vmalloc hugepage" : "vmalloc") : "linear");
8794 *_hash_shift
= log2qty
;
8796 *_hash_mask
= (1 << log2qty
) - 1;
8802 * This function checks whether pageblock includes unmovable pages or not.
8804 * PageLRU check without isolation or lru_lock could race so that
8805 * MIGRATE_MOVABLE block might include unmovable pages. And __PageMovable
8806 * check without lock_page also may miss some movable non-lru pages at
8807 * race condition. So you can't expect this function should be exact.
8809 * Returns a page without holding a reference. If the caller wants to
8810 * dereference that page (e.g., dumping), it has to make sure that it
8811 * cannot get removed (e.g., via memory unplug) concurrently.
8814 struct page
*has_unmovable_pages(struct zone
*zone
, struct page
*page
,
8815 int migratetype
, int flags
)
8817 unsigned long iter
= 0;
8818 unsigned long pfn
= page_to_pfn(page
);
8819 unsigned long offset
= pfn
% pageblock_nr_pages
;
8821 if (is_migrate_cma_page(page
)) {
8823 * CMA allocations (alloc_contig_range) really need to mark
8824 * isolate CMA pageblocks even when they are not movable in fact
8825 * so consider them movable here.
8827 if (is_migrate_cma(migratetype
))
8833 for (; iter
< pageblock_nr_pages
- offset
; iter
++) {
8834 page
= pfn_to_page(pfn
+ iter
);
8837 * Both, bootmem allocations and memory holes are marked
8838 * PG_reserved and are unmovable. We can even have unmovable
8839 * allocations inside ZONE_MOVABLE, for example when
8840 * specifying "movablecore".
8842 if (PageReserved(page
))
8846 * If the zone is movable and we have ruled out all reserved
8847 * pages then it should be reasonably safe to assume the rest
8850 if (zone_idx(zone
) == ZONE_MOVABLE
)
8854 * Hugepages are not in LRU lists, but they're movable.
8855 * THPs are on the LRU, but need to be counted as #small pages.
8856 * We need not scan over tail pages because we don't
8857 * handle each tail page individually in migration.
8859 if (PageHuge(page
) || PageTransCompound(page
)) {
8860 struct page
*head
= compound_head(page
);
8861 unsigned int skip_pages
;
8863 if (PageHuge(page
)) {
8864 if (!hugepage_migration_supported(page_hstate(head
)))
8866 } else if (!PageLRU(head
) && !__PageMovable(head
)) {
8870 skip_pages
= compound_nr(head
) - (page
- head
);
8871 iter
+= skip_pages
- 1;
8876 * We can't use page_count without pin a page
8877 * because another CPU can free compound page.
8878 * This check already skips compound tails of THP
8879 * because their page->_refcount is zero at all time.
8881 if (!page_ref_count(page
)) {
8882 if (PageBuddy(page
))
8883 iter
+= (1 << buddy_order(page
)) - 1;
8888 * The HWPoisoned page may be not in buddy system, and
8889 * page_count() is not 0.
8891 if ((flags
& MEMORY_OFFLINE
) && PageHWPoison(page
))
8895 * We treat all PageOffline() pages as movable when offlining
8896 * to give drivers a chance to decrement their reference count
8897 * in MEM_GOING_OFFLINE in order to indicate that these pages
8898 * can be offlined as there are no direct references anymore.
8899 * For actually unmovable PageOffline() where the driver does
8900 * not support this, we will fail later when trying to actually
8901 * move these pages that still have a reference count > 0.
8902 * (false negatives in this function only)
8904 if ((flags
& MEMORY_OFFLINE
) && PageOffline(page
))
8907 if (__PageMovable(page
) || PageLRU(page
))
8911 * If there are RECLAIMABLE pages, we need to check
8912 * it. But now, memory offline itself doesn't call
8913 * shrink_node_slabs() and it still to be fixed.
8920 #ifdef CONFIG_CONTIG_ALLOC
8921 static unsigned long pfn_max_align_down(unsigned long pfn
)
8923 return pfn
& ~(max_t(unsigned long, MAX_ORDER_NR_PAGES
,
8924 pageblock_nr_pages
) - 1);
8927 static unsigned long pfn_max_align_up(unsigned long pfn
)
8929 return ALIGN(pfn
, max_t(unsigned long, MAX_ORDER_NR_PAGES
,
8930 pageblock_nr_pages
));
8933 #if defined(CONFIG_DYNAMIC_DEBUG) || \
8934 (defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE))
8935 /* Usage: See admin-guide/dynamic-debug-howto.rst */
8936 static void alloc_contig_dump_pages(struct list_head
*page_list
)
8938 DEFINE_DYNAMIC_DEBUG_METADATA(descriptor
, "migrate failure");
8940 if (DYNAMIC_DEBUG_BRANCH(descriptor
)) {
8944 list_for_each_entry(page
, page_list
, lru
)
8945 dump_page(page
, "migration failure");
8949 static inline void alloc_contig_dump_pages(struct list_head
*page_list
)
8954 /* [start, end) must belong to a single zone. */
8955 static int __alloc_contig_migrate_range(struct compact_control
*cc
,
8956 unsigned long start
, unsigned long end
)
8958 /* This function is based on compact_zone() from compaction.c. */
8959 unsigned int nr_reclaimed
;
8960 unsigned long pfn
= start
;
8961 unsigned int tries
= 0;
8963 struct migration_target_control mtc
= {
8964 .nid
= zone_to_nid(cc
->zone
),
8965 .gfp_mask
= GFP_USER
| __GFP_MOVABLE
| __GFP_RETRY_MAYFAIL
,
8968 lru_cache_disable();
8970 while (pfn
< end
|| !list_empty(&cc
->migratepages
)) {
8971 if (fatal_signal_pending(current
)) {
8976 if (list_empty(&cc
->migratepages
)) {
8977 cc
->nr_migratepages
= 0;
8978 ret
= isolate_migratepages_range(cc
, pfn
, end
);
8979 if (ret
&& ret
!= -EAGAIN
)
8981 pfn
= cc
->migrate_pfn
;
8983 } else if (++tries
== 5) {
8988 nr_reclaimed
= reclaim_clean_pages_from_list(cc
->zone
,
8990 cc
->nr_migratepages
-= nr_reclaimed
;
8992 ret
= migrate_pages(&cc
->migratepages
, alloc_migration_target
,
8993 NULL
, (unsigned long)&mtc
, cc
->mode
, MR_CONTIG_RANGE
, NULL
);
8996 * On -ENOMEM, migrate_pages() bails out right away. It is pointless
8997 * to retry again over this error, so do the same here.
9006 alloc_contig_dump_pages(&cc
->migratepages
);
9007 putback_movable_pages(&cc
->migratepages
);
9014 * alloc_contig_range() -- tries to allocate given range of pages
9015 * @start: start PFN to allocate
9016 * @end: one-past-the-last PFN to allocate
9017 * @migratetype: migratetype of the underlying pageblocks (either
9018 * #MIGRATE_MOVABLE or #MIGRATE_CMA). All pageblocks
9019 * in range must have the same migratetype and it must
9020 * be either of the two.
9021 * @gfp_mask: GFP mask to use during compaction
9023 * The PFN range does not have to be pageblock or MAX_ORDER_NR_PAGES
9024 * aligned. The PFN range must belong to a single zone.
9026 * The first thing this routine does is attempt to MIGRATE_ISOLATE all
9027 * pageblocks in the range. Once isolated, the pageblocks should not
9028 * be modified by others.
9030 * Return: zero on success or negative error code. On success all
9031 * pages which PFN is in [start, end) are allocated for the caller and
9032 * need to be freed with free_contig_range().
9034 int alloc_contig_range(unsigned long start
, unsigned long end
,
9035 unsigned migratetype
, gfp_t gfp_mask
)
9037 unsigned long outer_start
, outer_end
;
9041 struct compact_control cc
= {
9042 .nr_migratepages
= 0,
9044 .zone
= page_zone(pfn_to_page(start
)),
9045 .mode
= MIGRATE_SYNC
,
9046 .ignore_skip_hint
= true,
9047 .no_set_skip_hint
= true,
9048 .gfp_mask
= current_gfp_context(gfp_mask
),
9049 .alloc_contig
= true,
9051 INIT_LIST_HEAD(&cc
.migratepages
);
9054 * What we do here is we mark all pageblocks in range as
9055 * MIGRATE_ISOLATE. Because pageblock and max order pages may
9056 * have different sizes, and due to the way page allocator
9057 * work, we align the range to biggest of the two pages so
9058 * that page allocator won't try to merge buddies from
9059 * different pageblocks and change MIGRATE_ISOLATE to some
9060 * other migration type.
9062 * Once the pageblocks are marked as MIGRATE_ISOLATE, we
9063 * migrate the pages from an unaligned range (ie. pages that
9064 * we are interested in). This will put all the pages in
9065 * range back to page allocator as MIGRATE_ISOLATE.
9067 * When this is done, we take the pages in range from page
9068 * allocator removing them from the buddy system. This way
9069 * page allocator will never consider using them.
9071 * This lets us mark the pageblocks back as
9072 * MIGRATE_CMA/MIGRATE_MOVABLE so that free pages in the
9073 * aligned range but not in the unaligned, original range are
9074 * put back to page allocator so that buddy can use them.
9077 ret
= start_isolate_page_range(pfn_max_align_down(start
),
9078 pfn_max_align_up(end
), migratetype
, 0);
9082 drain_all_pages(cc
.zone
);
9085 * In case of -EBUSY, we'd like to know which page causes problem.
9086 * So, just fall through. test_pages_isolated() has a tracepoint
9087 * which will report the busy page.
9089 * It is possible that busy pages could become available before
9090 * the call to test_pages_isolated, and the range will actually be
9091 * allocated. So, if we fall through be sure to clear ret so that
9092 * -EBUSY is not accidentally used or returned to caller.
9094 ret
= __alloc_contig_migrate_range(&cc
, start
, end
);
9095 if (ret
&& ret
!= -EBUSY
)
9100 * Pages from [start, end) are within a MAX_ORDER_NR_PAGES
9101 * aligned blocks that are marked as MIGRATE_ISOLATE. What's
9102 * more, all pages in [start, end) are free in page allocator.
9103 * What we are going to do is to allocate all pages from
9104 * [start, end) (that is remove them from page allocator).
9106 * The only problem is that pages at the beginning and at the
9107 * end of interesting range may be not aligned with pages that
9108 * page allocator holds, ie. they can be part of higher order
9109 * pages. Because of this, we reserve the bigger range and
9110 * once this is done free the pages we are not interested in.
9112 * We don't have to hold zone->lock here because the pages are
9113 * isolated thus they won't get removed from buddy.
9117 outer_start
= start
;
9118 while (!PageBuddy(pfn_to_page(outer_start
))) {
9119 if (++order
>= MAX_ORDER
) {
9120 outer_start
= start
;
9123 outer_start
&= ~0UL << order
;
9126 if (outer_start
!= start
) {
9127 order
= buddy_order(pfn_to_page(outer_start
));
9130 * outer_start page could be small order buddy page and
9131 * it doesn't include start page. Adjust outer_start
9132 * in this case to report failed page properly
9133 * on tracepoint in test_pages_isolated()
9135 if (outer_start
+ (1UL << order
) <= start
)
9136 outer_start
= start
;
9139 /* Make sure the range is really isolated. */
9140 if (test_pages_isolated(outer_start
, end
, 0)) {
9145 /* Grab isolated pages from freelists. */
9146 outer_end
= isolate_freepages_range(&cc
, outer_start
, end
);
9152 /* Free head and tail (if any) */
9153 if (start
!= outer_start
)
9154 free_contig_range(outer_start
, start
- outer_start
);
9155 if (end
!= outer_end
)
9156 free_contig_range(end
, outer_end
- end
);
9159 undo_isolate_page_range(pfn_max_align_down(start
),
9160 pfn_max_align_up(end
), migratetype
);
9163 EXPORT_SYMBOL(alloc_contig_range
);
9165 static int __alloc_contig_pages(unsigned long start_pfn
,
9166 unsigned long nr_pages
, gfp_t gfp_mask
)
9168 unsigned long end_pfn
= start_pfn
+ nr_pages
;
9170 return alloc_contig_range(start_pfn
, end_pfn
, MIGRATE_MOVABLE
,
9174 static bool pfn_range_valid_contig(struct zone
*z
, unsigned long start_pfn
,
9175 unsigned long nr_pages
)
9177 unsigned long i
, end_pfn
= start_pfn
+ nr_pages
;
9180 for (i
= start_pfn
; i
< end_pfn
; i
++) {
9181 page
= pfn_to_online_page(i
);
9185 if (page_zone(page
) != z
)
9188 if (PageReserved(page
))
9194 static bool zone_spans_last_pfn(const struct zone
*zone
,
9195 unsigned long start_pfn
, unsigned long nr_pages
)
9197 unsigned long last_pfn
= start_pfn
+ nr_pages
- 1;
9199 return zone_spans_pfn(zone
, last_pfn
);
9203 * alloc_contig_pages() -- tries to find and allocate contiguous range of pages
9204 * @nr_pages: Number of contiguous pages to allocate
9205 * @gfp_mask: GFP mask to limit search and used during compaction
9207 * @nodemask: Mask for other possible nodes
9209 * This routine is a wrapper around alloc_contig_range(). It scans over zones
9210 * on an applicable zonelist to find a contiguous pfn range which can then be
9211 * tried for allocation with alloc_contig_range(). This routine is intended
9212 * for allocation requests which can not be fulfilled with the buddy allocator.
9214 * The allocated memory is always aligned to a page boundary. If nr_pages is a
9215 * power of two then the alignment is guaranteed to be to the given nr_pages
9216 * (e.g. 1GB request would be aligned to 1GB).
9218 * Allocated pages can be freed with free_contig_range() or by manually calling
9219 * __free_page() on each allocated page.
9221 * Return: pointer to contiguous pages on success, or NULL if not successful.
9223 struct page
*alloc_contig_pages(unsigned long nr_pages
, gfp_t gfp_mask
,
9224 int nid
, nodemask_t
*nodemask
)
9226 unsigned long ret
, pfn
, flags
;
9227 struct zonelist
*zonelist
;
9231 zonelist
= node_zonelist(nid
, gfp_mask
);
9232 for_each_zone_zonelist_nodemask(zone
, z
, zonelist
,
9233 gfp_zone(gfp_mask
), nodemask
) {
9234 spin_lock_irqsave(&zone
->lock
, flags
);
9236 pfn
= ALIGN(zone
->zone_start_pfn
, nr_pages
);
9237 while (zone_spans_last_pfn(zone
, pfn
, nr_pages
)) {
9238 if (pfn_range_valid_contig(zone
, pfn
, nr_pages
)) {
9240 * We release the zone lock here because
9241 * alloc_contig_range() will also lock the zone
9242 * at some point. If there's an allocation
9243 * spinning on this lock, it may win the race
9244 * and cause alloc_contig_range() to fail...
9246 spin_unlock_irqrestore(&zone
->lock
, flags
);
9247 ret
= __alloc_contig_pages(pfn
, nr_pages
,
9250 return pfn_to_page(pfn
);
9251 spin_lock_irqsave(&zone
->lock
, flags
);
9255 spin_unlock_irqrestore(&zone
->lock
, flags
);
9259 #endif /* CONFIG_CONTIG_ALLOC */
9261 void free_contig_range(unsigned long pfn
, unsigned long nr_pages
)
9263 unsigned long count
= 0;
9265 for (; nr_pages
--; pfn
++) {
9266 struct page
*page
= pfn_to_page(pfn
);
9268 count
+= page_count(page
) != 1;
9271 WARN(count
!= 0, "%lu pages are still in use!\n", count
);
9273 EXPORT_SYMBOL(free_contig_range
);
9276 * The zone indicated has a new number of managed_pages; batch sizes and percpu
9277 * page high values need to be recalculated.
9279 void zone_pcp_update(struct zone
*zone
, int cpu_online
)
9281 mutex_lock(&pcp_batch_high_lock
);
9282 zone_set_pageset_high_and_batch(zone
, cpu_online
);
9283 mutex_unlock(&pcp_batch_high_lock
);
9287 * Effectively disable pcplists for the zone by setting the high limit to 0
9288 * and draining all cpus. A concurrent page freeing on another CPU that's about
9289 * to put the page on pcplist will either finish before the drain and the page
9290 * will be drained, or observe the new high limit and skip the pcplist.
9292 * Must be paired with a call to zone_pcp_enable().
9294 void zone_pcp_disable(struct zone
*zone
)
9296 mutex_lock(&pcp_batch_high_lock
);
9297 __zone_set_pageset_high_and_batch(zone
, 0, 1);
9298 __drain_all_pages(zone
, true);
9301 void zone_pcp_enable(struct zone
*zone
)
9303 __zone_set_pageset_high_and_batch(zone
, zone
->pageset_high
, zone
->pageset_batch
);
9304 mutex_unlock(&pcp_batch_high_lock
);
9307 void zone_pcp_reset(struct zone
*zone
)
9310 struct per_cpu_zonestat
*pzstats
;
9312 if (zone
->per_cpu_pageset
!= &boot_pageset
) {
9313 for_each_online_cpu(cpu
) {
9314 pzstats
= per_cpu_ptr(zone
->per_cpu_zonestats
, cpu
);
9315 drain_zonestat(zone
, pzstats
);
9317 free_percpu(zone
->per_cpu_pageset
);
9318 free_percpu(zone
->per_cpu_zonestats
);
9319 zone
->per_cpu_pageset
= &boot_pageset
;
9320 zone
->per_cpu_zonestats
= &boot_zonestats
;
9324 #ifdef CONFIG_MEMORY_HOTREMOVE
9326 * All pages in the range must be in a single zone, must not contain holes,
9327 * must span full sections, and must be isolated before calling this function.
9329 void __offline_isolated_pages(unsigned long start_pfn
, unsigned long end_pfn
)
9331 unsigned long pfn
= start_pfn
;
9335 unsigned long flags
;
9337 offline_mem_sections(pfn
, end_pfn
);
9338 zone
= page_zone(pfn_to_page(pfn
));
9339 spin_lock_irqsave(&zone
->lock
, flags
);
9340 while (pfn
< end_pfn
) {
9341 page
= pfn_to_page(pfn
);
9343 * The HWPoisoned page may be not in buddy system, and
9344 * page_count() is not 0.
9346 if (unlikely(!PageBuddy(page
) && PageHWPoison(page
))) {
9351 * At this point all remaining PageOffline() pages have a
9352 * reference count of 0 and can simply be skipped.
9354 if (PageOffline(page
)) {
9355 BUG_ON(page_count(page
));
9356 BUG_ON(PageBuddy(page
));
9361 BUG_ON(page_count(page
));
9362 BUG_ON(!PageBuddy(page
));
9363 order
= buddy_order(page
);
9364 del_page_from_free_list(page
, zone
, order
);
9365 pfn
+= (1 << order
);
9367 spin_unlock_irqrestore(&zone
->lock
, flags
);
9371 bool is_free_buddy_page(struct page
*page
)
9373 struct zone
*zone
= page_zone(page
);
9374 unsigned long pfn
= page_to_pfn(page
);
9375 unsigned long flags
;
9378 spin_lock_irqsave(&zone
->lock
, flags
);
9379 for (order
= 0; order
< MAX_ORDER
; order
++) {
9380 struct page
*page_head
= page
- (pfn
& ((1 << order
) - 1));
9382 if (PageBuddy(page_head
) && buddy_order(page_head
) >= order
)
9385 spin_unlock_irqrestore(&zone
->lock
, flags
);
9387 return order
< MAX_ORDER
;
9390 #ifdef CONFIG_MEMORY_FAILURE
9392 * Break down a higher-order page in sub-pages, and keep our target out of
9395 static void break_down_buddy_pages(struct zone
*zone
, struct page
*page
,
9396 struct page
*target
, int low
, int high
,
9399 unsigned long size
= 1 << high
;
9400 struct page
*current_buddy
, *next_page
;
9402 while (high
> low
) {
9406 if (target
>= &page
[size
]) {
9407 next_page
= page
+ size
;
9408 current_buddy
= page
;
9411 current_buddy
= page
+ size
;
9414 if (set_page_guard(zone
, current_buddy
, high
, migratetype
))
9417 if (current_buddy
!= target
) {
9418 add_to_free_list(current_buddy
, zone
, high
, migratetype
);
9419 set_buddy_order(current_buddy
, high
);
9426 * Take a page that will be marked as poisoned off the buddy allocator.
9428 bool take_page_off_buddy(struct page
*page
)
9430 struct zone
*zone
= page_zone(page
);
9431 unsigned long pfn
= page_to_pfn(page
);
9432 unsigned long flags
;
9436 spin_lock_irqsave(&zone
->lock
, flags
);
9437 for (order
= 0; order
< MAX_ORDER
; order
++) {
9438 struct page
*page_head
= page
- (pfn
& ((1 << order
) - 1));
9439 int page_order
= buddy_order(page_head
);
9441 if (PageBuddy(page_head
) && page_order
>= order
) {
9442 unsigned long pfn_head
= page_to_pfn(page_head
);
9443 int migratetype
= get_pfnblock_migratetype(page_head
,
9446 del_page_from_free_list(page_head
, zone
, page_order
);
9447 break_down_buddy_pages(zone
, page_head
, page
, 0,
9448 page_order
, migratetype
);
9449 if (!is_migrate_isolate(migratetype
))
9450 __mod_zone_freepage_state(zone
, -1, migratetype
);
9454 if (page_count(page_head
) > 0)
9457 spin_unlock_irqrestore(&zone
->lock
, flags
);
9462 #ifdef CONFIG_ZONE_DMA
9463 bool has_managed_dma(void)
9465 struct pglist_data
*pgdat
;
9467 for_each_online_pgdat(pgdat
) {
9468 struct zone
*zone
= &pgdat
->node_zones
[ZONE_DMA
];
9470 if (managed_zone(zone
))
9475 #endif /* CONFIG_ZONE_DMA */