]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/blame - include/linux/mmzone.h
Merge tag 'csky-for-linus-5.2-fixup-gcc-unwind' of git://github.com/c-sky/csky-linux
[mirror_ubuntu-eoan-kernel.git] / include / linux / mmzone.h
CommitLineData
b2441318 1/* SPDX-License-Identifier: GPL-2.0 */
1da177e4
LT
2#ifndef _LINUX_MMZONE_H
3#define _LINUX_MMZONE_H
4
1da177e4 5#ifndef __ASSEMBLY__
97965478 6#ifndef __GENERATING_BOUNDS_H
1da177e4 7
1da177e4
LT
8#include <linux/spinlock.h>
9#include <linux/list.h>
10#include <linux/wait.h>
e815af95 11#include <linux/bitops.h>
1da177e4
LT
12#include <linux/cache.h>
13#include <linux/threads.h>
14#include <linux/numa.h>
15#include <linux/init.h>
bdc8cb98 16#include <linux/seqlock.h>
8357f869 17#include <linux/nodemask.h>
835c134e 18#include <linux/pageblock-flags.h>
bbeae5b0 19#include <linux/page-flags-layout.h>
60063497 20#include <linux/atomic.h>
b03641af
DW
21#include <linux/mm_types.h>
22#include <linux/page-flags.h>
93ff66bf 23#include <asm/page.h>
1da177e4
LT
24
25/* Free memory management - zoned buddy allocator. */
26#ifndef CONFIG_FORCE_MAX_ZONEORDER
27#define MAX_ORDER 11
28#else
29#define MAX_ORDER CONFIG_FORCE_MAX_ZONEORDER
30#endif
e984bb43 31#define MAX_ORDER_NR_PAGES (1 << (MAX_ORDER - 1))
1da177e4 32
5ad333eb
AW
33/*
34 * PAGE_ALLOC_COSTLY_ORDER is the order at which allocations are deemed
35 * costly to service. That is between allocation orders which should
35fca53e 36 * coalesce naturally under reasonable reclaim pressure and those which
5ad333eb
AW
37 * will not.
38 */
39#define PAGE_ALLOC_COSTLY_ORDER 3
40
a6ffdc07 41enum migratetype {
47118af0 42 MIGRATE_UNMOVABLE,
47118af0 43 MIGRATE_MOVABLE,
016c13da 44 MIGRATE_RECLAIMABLE,
0aaa29a5
MG
45 MIGRATE_PCPTYPES, /* the number of types on the pcp lists */
46 MIGRATE_HIGHATOMIC = MIGRATE_PCPTYPES,
47118af0
MN
47#ifdef CONFIG_CMA
48 /*
49 * MIGRATE_CMA migration type is designed to mimic the way
50 * ZONE_MOVABLE works. Only movable pages can be allocated
51 * from MIGRATE_CMA pageblocks and page allocator never
52 * implicitly change migration type of MIGRATE_CMA pageblock.
53 *
54 * The way to use it is to change migratetype of a range of
55 * pageblocks to MIGRATE_CMA which can be done by
56 * __free_pageblock_cma() function. What is important though
57 * is that a range of pageblocks must be aligned to
58 * MAX_ORDER_NR_PAGES should biggest page be bigger then
59 * a single pageblock.
60 */
61 MIGRATE_CMA,
62#endif
194159fb 63#ifdef CONFIG_MEMORY_ISOLATION
47118af0 64 MIGRATE_ISOLATE, /* can't allocate from here */
194159fb 65#endif
47118af0
MN
66 MIGRATE_TYPES
67};
68
60f30350 69/* In mm/page_alloc.c; keep in sync also with show_migration_types() there */
c999fbd3 70extern const char * const migratetype_names[MIGRATE_TYPES];
60f30350 71
47118af0
MN
72#ifdef CONFIG_CMA
73# define is_migrate_cma(migratetype) unlikely((migratetype) == MIGRATE_CMA)
7c15d9bb 74# define is_migrate_cma_page(_page) (get_pageblock_migratetype(_page) == MIGRATE_CMA)
47118af0
MN
75#else
76# define is_migrate_cma(migratetype) false
7c15d9bb 77# define is_migrate_cma_page(_page) false
47118af0 78#endif
b2a0ac88 79
b682debd
VB
80static inline bool is_migrate_movable(int mt)
81{
82 return is_migrate_cma(mt) || mt == MIGRATE_MOVABLE;
83}
84
b2a0ac88
MG
85#define for_each_migratetype_order(order, type) \
86 for (order = 0; order < MAX_ORDER; order++) \
87 for (type = 0; type < MIGRATE_TYPES; type++)
88
467c996c
MG
89extern int page_group_by_mobility_disabled;
90
e58469ba
MG
91#define NR_MIGRATETYPE_BITS (PB_migrate_end - PB_migrate + 1)
92#define MIGRATETYPE_MASK ((1UL << NR_MIGRATETYPE_BITS) - 1)
93
dc4b0caf
MG
94#define get_pageblock_migratetype(page) \
95 get_pfnblock_flags_mask(page, page_to_pfn(page), \
96 PB_migrate_end, MIGRATETYPE_MASK)
97
1da177e4 98struct free_area {
b2a0ac88 99 struct list_head free_list[MIGRATE_TYPES];
1da177e4
LT
100 unsigned long nr_free;
101};
102
b03641af
DW
103/* Used for pages not on another list */
104static inline void add_to_free_area(struct page *page, struct free_area *area,
105 int migratetype)
106{
107 list_add(&page->lru, &area->free_list[migratetype]);
108 area->nr_free++;
109}
110
111/* Used for pages not on another list */
112static inline void add_to_free_area_tail(struct page *page, struct free_area *area,
113 int migratetype)
114{
115 list_add_tail(&page->lru, &area->free_list[migratetype]);
116 area->nr_free++;
117}
118
97500a4a
DW
119#ifdef CONFIG_SHUFFLE_PAGE_ALLOCATOR
120/* Used to preserve page allocation order entropy */
121void add_to_free_area_random(struct page *page, struct free_area *area,
122 int migratetype);
123#else
124static inline void add_to_free_area_random(struct page *page,
125 struct free_area *area, int migratetype)
126{
127 add_to_free_area(page, area, migratetype);
128}
129#endif
130
b03641af
DW
131/* Used for pages which are on another list */
132static inline void move_to_free_area(struct page *page, struct free_area *area,
133 int migratetype)
134{
135 list_move(&page->lru, &area->free_list[migratetype]);
136}
137
138static inline struct page *get_page_from_free_area(struct free_area *area,
139 int migratetype)
140{
141 return list_first_entry_or_null(&area->free_list[migratetype],
142 struct page, lru);
143}
144
145static inline void del_page_from_free_area(struct page *page,
146 struct free_area *area)
147{
148 list_del(&page->lru);
149 __ClearPageBuddy(page);
150 set_page_private(page, 0);
151 area->nr_free--;
152}
153
154static inline bool free_area_empty(struct free_area *area, int migratetype)
155{
156 return list_empty(&area->free_list[migratetype]);
157}
158
1da177e4
LT
159struct pglist_data;
160
161/*
a52633d8 162 * zone->lock and the zone lru_lock are two of the hottest locks in the kernel.
1da177e4
LT
163 * So add a wild amount of padding here to ensure that they fall into separate
164 * cachelines. There are very few zone structures in the machine, so space
165 * consumption is not a concern here.
166 */
167#if defined(CONFIG_SMP)
168struct zone_padding {
169 char x[0];
22fc6ecc 170} ____cacheline_internodealigned_in_smp;
1da177e4
LT
171#define ZONE_PADDING(name) struct zone_padding name;
172#else
173#define ZONE_PADDING(name)
174#endif
175
3a321d2a
KW
176#ifdef CONFIG_NUMA
177enum numa_stat_item {
178 NUMA_HIT, /* allocated in intended node */
179 NUMA_MISS, /* allocated in non intended node */
180 NUMA_FOREIGN, /* was intended here, hit elsewhere */
181 NUMA_INTERLEAVE_HIT, /* interleaver preferred this zone */
182 NUMA_LOCAL, /* allocation from local node */
183 NUMA_OTHER, /* allocation from other node */
184 NR_VM_NUMA_STAT_ITEMS
185};
186#else
187#define NR_VM_NUMA_STAT_ITEMS 0
188#endif
189
2244b95a 190enum zone_stat_item {
51ed4491 191 /* First 128 byte cacheline (assuming 64 bit words) */
d23ad423 192 NR_FREE_PAGES,
71c799f4
MK
193 NR_ZONE_LRU_BASE, /* Used only for compaction and reclaim retry */
194 NR_ZONE_INACTIVE_ANON = NR_ZONE_LRU_BASE,
195 NR_ZONE_ACTIVE_ANON,
196 NR_ZONE_INACTIVE_FILE,
197 NR_ZONE_ACTIVE_FILE,
198 NR_ZONE_UNEVICTABLE,
5a1c84b4 199 NR_ZONE_WRITE_PENDING, /* Count of dirty, writeback and unstable pages */
5344b7e6 200 NR_MLOCK, /* mlock()ed pages found and moved off LRU */
51ed4491 201 NR_PAGETABLE, /* used for pagetables */
d30dd8be 202 NR_KERNEL_STACK_KB, /* measured in KiB */
c6a7f572 203 /* Second 128 byte cacheline */
d2c5e30c 204 NR_BOUNCE,
91537fee
MK
205#if IS_ENABLED(CONFIG_ZSMALLOC)
206 NR_ZSPAGES, /* allocated in zsmalloc */
ca889e6c 207#endif
d1ce749a 208 NR_FREE_CMA_PAGES,
2244b95a
CL
209 NR_VM_ZONE_STAT_ITEMS };
210
75ef7184 211enum node_stat_item {
599d0c95
MG
212 NR_LRU_BASE,
213 NR_INACTIVE_ANON = NR_LRU_BASE, /* must match order of LRU_[IN]ACTIVE */
214 NR_ACTIVE_ANON, /* " " " " " */
215 NR_INACTIVE_FILE, /* " " " " " */
216 NR_ACTIVE_FILE, /* " " " " " */
217 NR_UNEVICTABLE, /* " " " " " */
385386cf
JW
218 NR_SLAB_RECLAIMABLE,
219 NR_SLAB_UNRECLAIMABLE,
599d0c95
MG
220 NR_ISOLATED_ANON, /* Temporary isolated pages from anon lru */
221 NR_ISOLATED_FILE, /* Temporary isolated pages from file lru */
68d48e6a 222 WORKINGSET_NODES,
1e6b1085
MG
223 WORKINGSET_REFAULT,
224 WORKINGSET_ACTIVATE,
1899ad18 225 WORKINGSET_RESTORE,
1e6b1085 226 WORKINGSET_NODERECLAIM,
4b9d0fab 227 NR_ANON_MAPPED, /* Mapped anonymous pages */
50658e2e
MG
228 NR_FILE_MAPPED, /* pagecache pages mapped into pagetables.
229 only modified from process context */
11fb9989
MG
230 NR_FILE_PAGES,
231 NR_FILE_DIRTY,
232 NR_WRITEBACK,
233 NR_WRITEBACK_TEMP, /* Writeback using temporary buffers */
234 NR_SHMEM, /* shmem pages (included tmpfs/GEM pages) */
235 NR_SHMEM_THPS,
236 NR_SHMEM_PMDMAPPED,
237 NR_ANON_THPS,
238 NR_UNSTABLE_NFS, /* NFS unstable pages */
c4a25635
MG
239 NR_VMSCAN_WRITE,
240 NR_VMSCAN_IMMEDIATE, /* Prioritise for reclaim when writeback ends */
241 NR_DIRTIED, /* page dirtyings since bootup */
242 NR_WRITTEN, /* page writings since bootup */
b29940c1 243 NR_KERNEL_MISC_RECLAIMABLE, /* reclaimable non-slab kernel pages */
75ef7184
MG
244 NR_VM_NODE_STAT_ITEMS
245};
246
4f98a2fe
RR
247/*
248 * We do arithmetic on the LRU lists in various places in the code,
249 * so it is important to keep the active lists LRU_ACTIVE higher in
250 * the array than the corresponding inactive lists, and to keep
251 * the *_FILE lists LRU_FILE higher than the corresponding _ANON lists.
252 *
253 * This has to be kept in sync with the statistics in zone_stat_item
254 * above and the descriptions in vmstat_text in mm/vmstat.c
255 */
256#define LRU_BASE 0
257#define LRU_ACTIVE 1
258#define LRU_FILE 2
259
b69408e8 260enum lru_list {
4f98a2fe
RR
261 LRU_INACTIVE_ANON = LRU_BASE,
262 LRU_ACTIVE_ANON = LRU_BASE + LRU_ACTIVE,
263 LRU_INACTIVE_FILE = LRU_BASE + LRU_FILE,
264 LRU_ACTIVE_FILE = LRU_BASE + LRU_FILE + LRU_ACTIVE,
894bc310 265 LRU_UNEVICTABLE,
894bc310
LS
266 NR_LRU_LISTS
267};
b69408e8 268
4111304d 269#define for_each_lru(lru) for (lru = 0; lru < NR_LRU_LISTS; lru++)
b69408e8 270
4111304d 271#define for_each_evictable_lru(lru) for (lru = 0; lru <= LRU_ACTIVE_FILE; lru++)
894bc310 272
4111304d 273static inline int is_file_lru(enum lru_list lru)
4f98a2fe 274{
4111304d 275 return (lru == LRU_INACTIVE_FILE || lru == LRU_ACTIVE_FILE);
4f98a2fe
RR
276}
277
4111304d 278static inline int is_active_lru(enum lru_list lru)
b69408e8 279{
4111304d 280 return (lru == LRU_ACTIVE_ANON || lru == LRU_ACTIVE_FILE);
b69408e8
CL
281}
282
89abfab1
HD
283struct zone_reclaim_stat {
284 /*
285 * The pageout code in vmscan.c keeps track of how many of the
59f91e5d 286 * mem/swap backed and file backed pages are referenced.
89abfab1
HD
287 * The higher the rotated/scanned ratio, the more valuable
288 * that cache is.
289 *
290 * The anon LRU stats live in [0], file LRU stats in [1]
291 */
292 unsigned long recent_rotated[2];
293 unsigned long recent_scanned[2];
294};
295
6290df54 296struct lruvec {
23047a96
JW
297 struct list_head lists[NR_LRU_LISTS];
298 struct zone_reclaim_stat reclaim_stat;
299 /* Evictions & activations on the inactive file list */
300 atomic_long_t inactive_age;
2a2e4885
JW
301 /* Refaults at the time of last reclaim cycle */
302 unsigned long refaults;
c255a458 303#ifdef CONFIG_MEMCG
599d0c95 304 struct pglist_data *pgdat;
7f5e86c2 305#endif
6290df54
JW
306};
307
f80c0673 308/* Isolate unmapped file */
f3fd4a61 309#define ISOLATE_UNMAPPED ((__force isolate_mode_t)0x2)
c8244935 310/* Isolate for asynchronous migration */
f3fd4a61 311#define ISOLATE_ASYNC_MIGRATE ((__force isolate_mode_t)0x4)
e46a2879
MK
312/* Isolate unevictable pages */
313#define ISOLATE_UNEVICTABLE ((__force isolate_mode_t)0x8)
4356f21d
MK
314
315/* LRU Isolation modes. */
9efeccac 316typedef unsigned __bitwise isolate_mode_t;
4356f21d 317
41858966
MG
318enum zone_watermarks {
319 WMARK_MIN,
320 WMARK_LOW,
321 WMARK_HIGH,
322 NR_WMARK
323};
324
1c30844d
MG
325#define min_wmark_pages(z) (z->_watermark[WMARK_MIN] + z->watermark_boost)
326#define low_wmark_pages(z) (z->_watermark[WMARK_LOW] + z->watermark_boost)
327#define high_wmark_pages(z) (z->_watermark[WMARK_HIGH] + z->watermark_boost)
328#define wmark_pages(z, i) (z->_watermark[i] + z->watermark_boost)
41858966 329
1da177e4
LT
330struct per_cpu_pages {
331 int count; /* number of pages in the list */
1da177e4
LT
332 int high; /* high watermark, emptying needed */
333 int batch; /* chunk size for buddy add/remove */
5f8dcc21
MG
334
335 /* Lists of pages, one per migrate type stored on the pcp-lists */
336 struct list_head lists[MIGRATE_PCPTYPES];
1da177e4
LT
337};
338
339struct per_cpu_pageset {
3dfa5721 340 struct per_cpu_pages pcp;
4037d452
CL
341#ifdef CONFIG_NUMA
342 s8 expire;
1d90ca89 343 u16 vm_numa_stat_diff[NR_VM_NUMA_STAT_ITEMS];
4037d452 344#endif
2244b95a 345#ifdef CONFIG_SMP
df9ecaba 346 s8 stat_threshold;
2244b95a
CL
347 s8 vm_stat_diff[NR_VM_ZONE_STAT_ITEMS];
348#endif
99dcc3e5 349};
e7c8d5c9 350
75ef7184
MG
351struct per_cpu_nodestat {
352 s8 stat_threshold;
353 s8 vm_node_stat_diff[NR_VM_NODE_STAT_ITEMS];
354};
355
97965478
CL
356#endif /* !__GENERATING_BOUNDS.H */
357
2f1b6248 358enum zone_type {
4b51d669 359#ifdef CONFIG_ZONE_DMA
2f1b6248
CL
360 /*
361 * ZONE_DMA is used when there are devices that are not able
362 * to do DMA to all of addressable memory (ZONE_NORMAL). Then we
363 * carve out the portion of memory that is needed for these devices.
364 * The range is arch specific.
365 *
366 * Some examples
367 *
368 * Architecture Limit
369 * ---------------------------
370 * parisc, ia64, sparc <4G
25078dc1 371 * s390, powerpc <2G
2f1b6248
CL
372 * arm Various
373 * alpha Unlimited or 0-16MB.
374 *
375 * i386, x86_64 and multiple other arches
376 * <16M.
377 */
378 ZONE_DMA,
4b51d669 379#endif
fb0e7942 380#ifdef CONFIG_ZONE_DMA32
2f1b6248
CL
381 /*
382 * x86_64 needs two ZONE_DMAs because it supports devices that are
383 * only able to do DMA to the lower 16M but also 32 bit devices that
384 * can only do DMA areas below 4G.
385 */
386 ZONE_DMA32,
fb0e7942 387#endif
2f1b6248
CL
388 /*
389 * Normal addressable memory is in ZONE_NORMAL. DMA operations can be
390 * performed on pages in ZONE_NORMAL if the DMA devices support
391 * transfers to all addressable memory.
392 */
393 ZONE_NORMAL,
e53ef38d 394#ifdef CONFIG_HIGHMEM
2f1b6248
CL
395 /*
396 * A memory area that is only addressable by the kernel through
397 * mapping portions into its own address space. This is for example
398 * used by i386 to allow the kernel to address the memory beyond
399 * 900MB. The kernel will set up special mappings (page
400 * table entries on i386) for each page that the kernel needs to
401 * access.
402 */
403 ZONE_HIGHMEM,
e53ef38d 404#endif
2a1e274a 405 ZONE_MOVABLE,
033fbae9
DW
406#ifdef CONFIG_ZONE_DEVICE
407 ZONE_DEVICE,
408#endif
97965478 409 __MAX_NR_ZONES
033fbae9 410
2f1b6248 411};
1da177e4 412
97965478
CL
413#ifndef __GENERATING_BOUNDS_H
414
1da177e4 415struct zone {
3484b2de 416 /* Read-mostly fields */
41858966
MG
417
418 /* zone watermarks, access with *_wmark_pages(zone) macros */
a9214443 419 unsigned long _watermark[NR_WMARK];
1c30844d 420 unsigned long watermark_boost;
41858966 421
0aaa29a5
MG
422 unsigned long nr_reserved_highatomic;
423
1da177e4 424 /*
89903327
AM
425 * We don't know if the memory that we're going to allocate will be
426 * freeable or/and it will be released eventually, so to avoid totally
427 * wasting several GB of ram we must reserve some of the lower zone
428 * memory (otherwise we risk to run OOM on the lower zones despite
429 * there being tons of freeable ram on the higher zones). This array is
430 * recalculated at runtime if the sysctl_lowmem_reserve_ratio sysctl
431 * changes.
1da177e4 432 */
3484b2de 433 long lowmem_reserve[MAX_NR_ZONES];
ab8fabd4 434
e7c8d5c9 435#ifdef CONFIG_NUMA
d5f541ed 436 int node;
3484b2de 437#endif
3484b2de 438 struct pglist_data *zone_pgdat;
43cf38eb 439 struct per_cpu_pageset __percpu *pageset;
3484b2de 440
835c134e
MG
441#ifndef CONFIG_SPARSEMEM
442 /*
d9c23400 443 * Flags for a pageblock_nr_pages block. See pageblock-flags.h.
835c134e
MG
444 * In SPARSEMEM, this map is stored in struct mem_section
445 */
446 unsigned long *pageblock_flags;
447#endif /* CONFIG_SPARSEMEM */
448
1da177e4
LT
449 /* zone_start_pfn == zone_start_paddr >> PAGE_SHIFT */
450 unsigned long zone_start_pfn;
451
bdc8cb98 452 /*
9feedc9d
JL
453 * spanned_pages is the total pages spanned by the zone, including
454 * holes, which is calculated as:
455 * spanned_pages = zone_end_pfn - zone_start_pfn;
bdc8cb98 456 *
9feedc9d
JL
457 * present_pages is physical pages existing within the zone, which
458 * is calculated as:
8761e31c 459 * present_pages = spanned_pages - absent_pages(pages in holes);
9feedc9d
JL
460 *
461 * managed_pages is present pages managed by the buddy system, which
462 * is calculated as (reserved_pages includes pages allocated by the
463 * bootmem allocator):
464 * managed_pages = present_pages - reserved_pages;
465 *
466 * So present_pages may be used by memory hotplug or memory power
467 * management logic to figure out unmanaged pages by checking
468 * (present_pages - managed_pages). And managed_pages should be used
469 * by page allocator and vm scanner to calculate all kinds of watermarks
470 * and thresholds.
471 *
472 * Locking rules:
473 *
474 * zone_start_pfn and spanned_pages are protected by span_seqlock.
475 * It is a seqlock because it has to be read outside of zone->lock,
476 * and it is done in the main allocator path. But, it is written
477 * quite infrequently.
478 *
479 * The span_seq lock is declared along with zone->lock because it is
bdc8cb98
DH
480 * frequently read in proximity to zone->lock. It's good to
481 * give them a chance of being in the same cacheline.
9feedc9d 482 *
c3d5f5f0 483 * Write access to present_pages at runtime should be protected by
bfc8c901
VD
484 * mem_hotplug_begin/end(). Any reader who can't tolerant drift of
485 * present_pages should get_online_mems() to get a stable value.
bdc8cb98 486 */
9705bea5 487 atomic_long_t managed_pages;
9feedc9d
JL
488 unsigned long spanned_pages;
489 unsigned long present_pages;
3484b2de
MG
490
491 const char *name;
1da177e4 492
ad53f92e
JK
493#ifdef CONFIG_MEMORY_ISOLATION
494 /*
495 * Number of isolated pageblock. It is used to solve incorrect
496 * freepage counting problem due to racy retrieving migratetype
497 * of pageblock. Protected by zone->lock.
498 */
499 unsigned long nr_isolate_pageblock;
500#endif
501
3484b2de
MG
502#ifdef CONFIG_MEMORY_HOTPLUG
503 /* see spanned/present_pages for more description */
504 seqlock_t span_seqlock;
505#endif
506
9dcb8b68 507 int initialized;
3484b2de 508
0f661148 509 /* Write-intensive fields used from the page allocator */
3484b2de 510 ZONE_PADDING(_pad1_)
0f661148 511
3484b2de
MG
512 /* free areas of different sizes */
513 struct free_area free_area[MAX_ORDER];
514
515 /* zone flags, see below */
516 unsigned long flags;
517
0f661148 518 /* Primarily protects free_area */
a368ab67
MG
519 spinlock_t lock;
520
0f661148 521 /* Write-intensive fields used by compaction and vmstats. */
3484b2de
MG
522 ZONE_PADDING(_pad2_)
523
3484b2de
MG
524 /*
525 * When free pages are below this point, additional steps are taken
526 * when reading the number of free pages to avoid per-cpu counter
527 * drift allowing watermarks to be breached
528 */
529 unsigned long percpu_drift_mark;
530
531#if defined CONFIG_COMPACTION || defined CONFIG_CMA
532 /* pfn where compaction free scanner should start */
533 unsigned long compact_cached_free_pfn;
534 /* pfn where async and sync compaction migration scanner should start */
535 unsigned long compact_cached_migrate_pfn[2];
e332f741
MG
536 unsigned long compact_init_migrate_pfn;
537 unsigned long compact_init_free_pfn;
3484b2de
MG
538#endif
539
540#ifdef CONFIG_COMPACTION
541 /*
542 * On compaction failure, 1<<compact_defer_shift compactions
543 * are skipped before trying again. The number attempted since
544 * last failure is tracked with compact_considered.
545 */
546 unsigned int compact_considered;
547 unsigned int compact_defer_shift;
548 int compact_order_failed;
549#endif
550
551#if defined CONFIG_COMPACTION || defined CONFIG_CMA
552 /* Set to true when the PG_migrate_skip bits should be cleared */
553 bool compact_blockskip_flush;
554#endif
555
7cf91a98
JK
556 bool contiguous;
557
3484b2de
MG
558 ZONE_PADDING(_pad3_)
559 /* Zone statistics */
560 atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
3a321d2a 561 atomic_long_t vm_numa_stat[NR_VM_NUMA_STAT_ITEMS];
22fc6ecc 562} ____cacheline_internodealigned_in_smp;
1da177e4 563
599d0c95
MG
564enum pgdat_flags {
565 PGDAT_CONGESTED, /* pgdat has many dirty pages backed by
0e093d99
MG
566 * a congested BDI
567 */
599d0c95 568 PGDAT_DIRTY, /* reclaim scanning has recently found
d43006d5
MG
569 * many dirty file pages at the tail
570 * of the LRU.
571 */
599d0c95 572 PGDAT_WRITEBACK, /* reclaim scanning has recently found
283aba9f
MG
573 * many pages under writeback
574 */
a5f5f91d 575 PGDAT_RECLAIM_LOCKED, /* prevents concurrent reclaim */
57054651 576};
e815af95 577
73444bc4
MG
578enum zone_flags {
579 ZONE_BOOSTED_WATERMARK, /* zone recently boosted watermarks.
580 * Cleared when kswapd is woken.
581 */
582};
583
9705bea5
AK
584static inline unsigned long zone_managed_pages(struct zone *zone)
585{
586 return (unsigned long)atomic_long_read(&zone->managed_pages);
587}
588
f9228b20 589static inline unsigned long zone_end_pfn(const struct zone *zone)
108bcc96
CS
590{
591 return zone->zone_start_pfn + zone->spanned_pages;
592}
593
594static inline bool zone_spans_pfn(const struct zone *zone, unsigned long pfn)
595{
596 return zone->zone_start_pfn <= pfn && pfn < zone_end_pfn(zone);
597}
598
2a6e3ebe
CS
599static inline bool zone_is_initialized(struct zone *zone)
600{
9dcb8b68 601 return zone->initialized;
2a6e3ebe
CS
602}
603
604static inline bool zone_is_empty(struct zone *zone)
605{
606 return zone->spanned_pages == 0;
607}
608
f1dd2cd1
MH
609/*
610 * Return true if [start_pfn, start_pfn + nr_pages) range has a non-empty
611 * intersection with the given zone
612 */
613static inline bool zone_intersects(struct zone *zone,
614 unsigned long start_pfn, unsigned long nr_pages)
615{
616 if (zone_is_empty(zone))
617 return false;
618 if (start_pfn >= zone_end_pfn(zone) ||
619 start_pfn + nr_pages <= zone->zone_start_pfn)
620 return false;
621
622 return true;
623}
624
1da177e4
LT
625/*
626 * The "priority" of VM scanning is how much of the queues we will scan in one
627 * go. A value of 12 for DEF_PRIORITY implies that we will scan 1/4096th of the
628 * queues ("queue_length >> 12") during an aging round.
629 */
630#define DEF_PRIORITY 12
631
9276b1bc
PJ
632/* Maximum number of zones on a zonelist */
633#define MAX_ZONES_PER_ZONELIST (MAX_NUMNODES * MAX_NR_ZONES)
634
c00eb15a
YB
635enum {
636 ZONELIST_FALLBACK, /* zonelist with fallback */
9276b1bc 637#ifdef CONFIG_NUMA
c00eb15a
YB
638 /*
639 * The NUMA zonelists are doubled because we need zonelists that
640 * restrict the allocations to a single node for __GFP_THISNODE.
641 */
642 ZONELIST_NOFALLBACK, /* zonelist without fallback (__GFP_THISNODE) */
9276b1bc 643#endif
c00eb15a
YB
644 MAX_ZONELISTS
645};
9276b1bc 646
dd1a239f
MG
647/*
648 * This struct contains information about a zone in a zonelist. It is stored
649 * here to avoid dereferences into large structures and lookups of tables
650 */
651struct zoneref {
652 struct zone *zone; /* Pointer to actual zone */
653 int zone_idx; /* zone_idx(zoneref->zone) */
654};
655
1da177e4
LT
656/*
657 * One allocation request operates on a zonelist. A zonelist
658 * is a list of zones, the first one is the 'goal' of the
659 * allocation, the other zones are fallback zones, in decreasing
660 * priority.
661 *
dd1a239f
MG
662 * To speed the reading of the zonelist, the zonerefs contain the zone index
663 * of the entry being read. Helper functions to access information given
664 * a struct zoneref are
665 *
666 * zonelist_zone() - Return the struct zone * for an entry in _zonerefs
667 * zonelist_zone_idx() - Return the index of the zone for an entry
668 * zonelist_node_idx() - Return the index of the node for an entry
1da177e4
LT
669 */
670struct zonelist {
dd1a239f 671 struct zoneref _zonerefs[MAX_ZONES_PER_ZONELIST + 1];
1da177e4
LT
672};
673
5b99cd0e
HC
674#ifndef CONFIG_DISCONTIGMEM
675/* The array of struct pages - for discontigmem use pgdat->lmem_map */
676extern struct page *mem_map;
677#endif
678
1da177e4 679/*
1da177e4 680 * On NUMA machines, each NUMA node would have a pg_data_t to describe
618b8c20
NB
681 * it's memory layout. On UMA machines there is a single pglist_data which
682 * describes the whole memory.
1da177e4
LT
683 *
684 * Memory statistics and page replacement data structures are maintained on a
685 * per-zone basis.
686 */
687struct bootmem_data;
688typedef struct pglist_data {
689 struct zone node_zones[MAX_NR_ZONES];
523b9458 690 struct zonelist node_zonelists[MAX_ZONELISTS];
1da177e4 691 int nr_zones;
52d4b9ac 692#ifdef CONFIG_FLAT_NODE_MEM_MAP /* means !SPARSEMEM */
1da177e4 693 struct page *node_mem_map;
eefa864b
JK
694#ifdef CONFIG_PAGE_EXTENSION
695 struct page_ext *node_page_ext;
696#endif
d41dee36 697#endif
3a2d7fa8 698#if defined(CONFIG_MEMORY_HOTPLUG) || defined(CONFIG_DEFERRED_STRUCT_PAGE_INIT)
208d54e5 699 /*
fa004ab7
WY
700 * Must be held any time you expect node_start_pfn,
701 * node_present_pages, node_spanned_pages or nr_zones to stay constant.
208d54e5 702 *
114d4b79 703 * pgdat_resize_lock() and pgdat_resize_unlock() are provided to
3a2d7fa8
PT
704 * manipulate node_size_lock without checking for CONFIG_MEMORY_HOTPLUG
705 * or CONFIG_DEFERRED_STRUCT_PAGE_INIT.
114d4b79 706 *
72c3b51b 707 * Nests above zone->lock and zone->span_seqlock
208d54e5
DH
708 */
709 spinlock_t node_size_lock;
710#endif
1da177e4
LT
711 unsigned long node_start_pfn;
712 unsigned long node_present_pages; /* total number of physical pages */
713 unsigned long node_spanned_pages; /* total size of physical page
714 range, including holes */
715 int node_id;
1da177e4 716 wait_queue_head_t kswapd_wait;
5515061d 717 wait_queue_head_t pfmemalloc_wait;
bfc8c901
VD
718 struct task_struct *kswapd; /* Protected by
719 mem_hotplug_begin/end() */
38087d9b
MG
720 int kswapd_order;
721 enum zone_type kswapd_classzone_idx;
722
c73322d0
JW
723 int kswapd_failures; /* Number of 'reclaimed == 0' runs */
724
698b1b30
VB
725#ifdef CONFIG_COMPACTION
726 int kcompactd_max_order;
727 enum zone_type kcompactd_classzone_idx;
728 wait_queue_head_t kcompactd_wait;
729 struct task_struct *kcompactd;
8177a420 730#endif
281e3726
MG
731 /*
732 * This is a per-node reserve of pages that are not available
733 * to userspace allocations.
734 */
735 unsigned long totalreserve_pages;
736
a5f5f91d
MG
737#ifdef CONFIG_NUMA
738 /*
739 * zone reclaim becomes active if more unmapped pages exist.
740 */
741 unsigned long min_unmapped_pages;
742 unsigned long min_slab_pages;
743#endif /* CONFIG_NUMA */
744
a52633d8
MG
745 /* Write-intensive fields used by page reclaim */
746 ZONE_PADDING(_pad1_)
747 spinlock_t lru_lock;
3a80a7fa
MG
748
749#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
750 /*
751 * If memory initialisation on large machines is deferred then this
752 * is the first PFN that needs to be initialised.
753 */
754 unsigned long first_deferred_pfn;
755#endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
a3d0a918
KS
756
757#ifdef CONFIG_TRANSPARENT_HUGEPAGE
758 spinlock_t split_queue_lock;
759 struct list_head split_queue;
760 unsigned long split_queue_len;
761#endif
75ef7184 762
599d0c95
MG
763 /* Fields commonly accessed by the page reclaim scanner */
764 struct lruvec lruvec;
765
599d0c95
MG
766 unsigned long flags;
767
768 ZONE_PADDING(_pad2_)
769
75ef7184
MG
770 /* Per-node vmstats */
771 struct per_cpu_nodestat __percpu *per_cpu_nodestats;
772 atomic_long_t vm_stat[NR_VM_NODE_STAT_ITEMS];
1da177e4
LT
773} pg_data_t;
774
775#define node_present_pages(nid) (NODE_DATA(nid)->node_present_pages)
776#define node_spanned_pages(nid) (NODE_DATA(nid)->node_spanned_pages)
d41dee36 777#ifdef CONFIG_FLAT_NODE_MEM_MAP
408fde81 778#define pgdat_page_nr(pgdat, pagenr) ((pgdat)->node_mem_map + (pagenr))
d41dee36
AW
779#else
780#define pgdat_page_nr(pgdat, pagenr) pfn_to_page((pgdat)->node_start_pfn + (pagenr))
781#endif
408fde81 782#define nid_page_nr(nid, pagenr) pgdat_page_nr(NODE_DATA(nid),(pagenr))
1da177e4 783
c6830c22 784#define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn)
da3649e1 785#define node_end_pfn(nid) pgdat_end_pfn(NODE_DATA(nid))
c6830c22 786
a9dd0a83 787static inline struct lruvec *node_lruvec(struct pglist_data *pgdat)
599d0c95 788{
a9dd0a83 789 return &pgdat->lruvec;
599d0c95
MG
790}
791
da3649e1
CS
792static inline unsigned long pgdat_end_pfn(pg_data_t *pgdat)
793{
794 return pgdat->node_start_pfn + pgdat->node_spanned_pages;
795}
796
797static inline bool pgdat_is_empty(pg_data_t *pgdat)
798{
799 return !pgdat->node_start_pfn && !pgdat->node_spanned_pages;
800}
c6830c22 801
208d54e5
DH
802#include <linux/memory_hotplug.h>
803
72675e13 804void build_all_zonelists(pg_data_t *pgdat);
5ecd9d40
DR
805void wakeup_kswapd(struct zone *zone, gfp_t gfp_mask, int order,
806 enum zone_type classzone_idx);
86a294a8
MH
807bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
808 int classzone_idx, unsigned int alloc_flags,
809 long free_pages);
7aeb09f9 810bool zone_watermark_ok(struct zone *z, unsigned int order,
c603844b
MG
811 unsigned long mark, int classzone_idx,
812 unsigned int alloc_flags);
7aeb09f9 813bool zone_watermark_ok_safe(struct zone *z, unsigned int order,
e2b19197 814 unsigned long mark, int classzone_idx);
a2f3aa02
DH
815enum memmap_context {
816 MEMMAP_EARLY,
817 MEMMAP_HOTPLUG,
818};
dc0bbf3b 819extern void init_currently_empty_zone(struct zone *zone, unsigned long start_pfn,
b171e409 820 unsigned long size);
718127cc 821
bea8c150 822extern void lruvec_init(struct lruvec *lruvec);
7f5e86c2 823
599d0c95 824static inline struct pglist_data *lruvec_pgdat(struct lruvec *lruvec)
7f5e86c2 825{
c255a458 826#ifdef CONFIG_MEMCG
599d0c95 827 return lruvec->pgdat;
7f5e86c2 828#else
599d0c95 829 return container_of(lruvec, struct pglist_data, lruvec);
7f5e86c2
KK
830#endif
831}
832
fd538803 833extern unsigned long lruvec_lru_size(struct lruvec *lruvec, enum lru_list lru, int zone_idx);
23047a96 834
1da177e4
LT
835#ifdef CONFIG_HAVE_MEMORY_PRESENT
836void memory_present(int nid, unsigned long start, unsigned long end);
837#else
838static inline void memory_present(int nid, unsigned long start, unsigned long end) {}
839#endif
840
9def36e0
LG
841#if defined(CONFIG_SPARSEMEM)
842void memblocks_present(void);
843#else
844static inline void memblocks_present(void) {}
845#endif
846
7aac7898
LS
847#ifdef CONFIG_HAVE_MEMORYLESS_NODES
848int local_memory_node(int node_id);
849#else
850static inline int local_memory_node(int node_id) { return node_id; };
851#endif
852
1da177e4
LT
853/*
854 * zone_idx() returns 0 for the ZONE_DMA zone, 1 for the ZONE_NORMAL zone, etc.
855 */
856#define zone_idx(zone) ((zone) - (zone)->zone_pgdat->node_zones)
857
89696701
OS
858#ifdef CONFIG_ZONE_DEVICE
859static inline bool is_dev_zone(const struct zone *zone)
860{
861 return zone_idx(zone) == ZONE_DEVICE;
862}
863#else
864static inline bool is_dev_zone(const struct zone *zone)
865{
866 return false;
867}
868#endif
869
6aa303de
MG
870/*
871 * Returns true if a zone has pages managed by the buddy allocator.
872 * All the reclaim decisions have to use this function rather than
873 * populated_zone(). If the whole zone is reserved then we can easily
874 * end up with populated_zone() && !managed_zone().
875 */
876static inline bool managed_zone(struct zone *zone)
877{
9705bea5 878 return zone_managed_pages(zone);
6aa303de
MG
879}
880
881/* Returns true if a zone has memory */
882static inline bool populated_zone(struct zone *zone)
f3fe6512 883{
6aa303de 884 return zone->present_pages;
f3fe6512
CK
885}
886
c1093b74
PT
887#ifdef CONFIG_NUMA
888static inline int zone_to_nid(struct zone *zone)
889{
890 return zone->node;
891}
892
893static inline void zone_set_nid(struct zone *zone, int nid)
894{
895 zone->node = nid;
896}
897#else
898static inline int zone_to_nid(struct zone *zone)
899{
900 return 0;
901}
902
903static inline void zone_set_nid(struct zone *zone, int nid) {}
904#endif
905
2a1e274a
MG
906extern int movable_zone;
907
d7e4a2ea 908#ifdef CONFIG_HIGHMEM
2a1e274a
MG
909static inline int zone_movable_is_highmem(void)
910{
d7e4a2ea 911#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
2a1e274a
MG
912 return movable_zone == ZONE_HIGHMEM;
913#else
d7e4a2ea 914 return (ZONE_MOVABLE - 1) == ZONE_HIGHMEM;
2a1e274a
MG
915#endif
916}
d7e4a2ea 917#endif
2a1e274a 918
2f1b6248 919static inline int is_highmem_idx(enum zone_type idx)
1da177e4 920{
e53ef38d 921#ifdef CONFIG_HIGHMEM
2a1e274a
MG
922 return (idx == ZONE_HIGHMEM ||
923 (idx == ZONE_MOVABLE && zone_movable_is_highmem()));
e53ef38d
CL
924#else
925 return 0;
926#endif
1da177e4
LT
927}
928
1da177e4 929/**
b4a991ec 930 * is_highmem - helper function to quickly check if a struct zone is a
1da177e4
LT
931 * highmem zone or not. This is an attempt to keep references
932 * to ZONE_{DMA/NORMAL/HIGHMEM/etc} in general code to a minimum.
933 * @zone - pointer to struct zone variable
934 */
935static inline int is_highmem(struct zone *zone)
936{
e53ef38d 937#ifdef CONFIG_HIGHMEM
29f9cb53 938 return is_highmem_idx(zone_idx(zone));
e53ef38d
CL
939#else
940 return 0;
941#endif
1da177e4
LT
942}
943
1da177e4
LT
944/* These two functions are used to setup the per zone pages min values */
945struct ctl_table;
8d65af78 946int min_free_kbytes_sysctl_handler(struct ctl_table *, int,
1da177e4 947 void __user *, size_t *, loff_t *);
1c30844d
MG
948int watermark_boost_factor_sysctl_handler(struct ctl_table *, int,
949 void __user *, size_t *, loff_t *);
795ae7a0
JW
950int watermark_scale_factor_sysctl_handler(struct ctl_table *, int,
951 void __user *, size_t *, loff_t *);
d3cda233 952extern int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES];
8d65af78 953int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *, int,
1da177e4 954 void __user *, size_t *, loff_t *);
8d65af78 955int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *, int,
8ad4b1fb 956 void __user *, size_t *, loff_t *);
9614634f 957int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *, int,
8d65af78 958 void __user *, size_t *, loff_t *);
0ff38490 959int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *, int,
8d65af78 960 void __user *, size_t *, loff_t *);
1da177e4 961
f0c0b2b8 962extern int numa_zonelist_order_handler(struct ctl_table *, int,
8d65af78 963 void __user *, size_t *, loff_t *);
f0c0b2b8 964extern char numa_zonelist_order[];
c9bff3ee 965#define NUMA_ZONELIST_ORDER_LEN 16
f0c0b2b8 966
93b7504e 967#ifndef CONFIG_NEED_MULTIPLE_NODES
1da177e4
LT
968
969extern struct pglist_data contig_page_data;
970#define NODE_DATA(nid) (&contig_page_data)
971#define NODE_MEM_MAP(nid) mem_map
1da177e4 972
93b7504e 973#else /* CONFIG_NEED_MULTIPLE_NODES */
1da177e4
LT
974
975#include <asm/mmzone.h>
976
93b7504e 977#endif /* !CONFIG_NEED_MULTIPLE_NODES */
348f8b6c 978
95144c78
KH
979extern struct pglist_data *first_online_pgdat(void);
980extern struct pglist_data *next_online_pgdat(struct pglist_data *pgdat);
981extern struct zone *next_zone(struct zone *zone);
8357f869
KH
982
983/**
12d15f0d 984 * for_each_online_pgdat - helper macro to iterate over all online nodes
8357f869
KH
985 * @pgdat - pointer to a pg_data_t variable
986 */
987#define for_each_online_pgdat(pgdat) \
988 for (pgdat = first_online_pgdat(); \
989 pgdat; \
990 pgdat = next_online_pgdat(pgdat))
8357f869
KH
991/**
992 * for_each_zone - helper macro to iterate over all memory zones
993 * @zone - pointer to struct zone variable
994 *
995 * The user only needs to declare the zone variable, for_each_zone
996 * fills it in.
997 */
998#define for_each_zone(zone) \
999 for (zone = (first_online_pgdat())->node_zones; \
1000 zone; \
1001 zone = next_zone(zone))
1002
ee99c71c
KM
1003#define for_each_populated_zone(zone) \
1004 for (zone = (first_online_pgdat())->node_zones; \
1005 zone; \
1006 zone = next_zone(zone)) \
1007 if (!populated_zone(zone)) \
1008 ; /* do nothing */ \
1009 else
1010
dd1a239f
MG
1011static inline struct zone *zonelist_zone(struct zoneref *zoneref)
1012{
1013 return zoneref->zone;
1014}
1015
1016static inline int zonelist_zone_idx(struct zoneref *zoneref)
1017{
1018 return zoneref->zone_idx;
1019}
1020
1021static inline int zonelist_node_idx(struct zoneref *zoneref)
1022{
c1093b74 1023 return zone_to_nid(zoneref->zone);
dd1a239f
MG
1024}
1025
682a3385
MG
1026struct zoneref *__next_zones_zonelist(struct zoneref *z,
1027 enum zone_type highest_zoneidx,
1028 nodemask_t *nodes);
1029
19770b32
MG
1030/**
1031 * next_zones_zonelist - Returns the next zone at or below highest_zoneidx within the allowed nodemask using a cursor within a zonelist as a starting point
1032 * @z - The cursor used as a starting point for the search
1033 * @highest_zoneidx - The zone index of the highest zone to return
1034 * @nodes - An optional nodemask to filter the zonelist with
19770b32
MG
1035 *
1036 * This function returns the next zone at or below a given zone index that is
1037 * within the allowed nodemask using a cursor as the starting point for the
5bead2a0
MG
1038 * search. The zoneref returned is a cursor that represents the current zone
1039 * being examined. It should be advanced by one before calling
1040 * next_zones_zonelist again.
19770b32 1041 */
682a3385 1042static __always_inline struct zoneref *next_zones_zonelist(struct zoneref *z,
19770b32 1043 enum zone_type highest_zoneidx,
682a3385
MG
1044 nodemask_t *nodes)
1045{
1046 if (likely(!nodes && zonelist_zone_idx(z) <= highest_zoneidx))
1047 return z;
1048 return __next_zones_zonelist(z, highest_zoneidx, nodes);
1049}
dd1a239f 1050
19770b32
MG
1051/**
1052 * first_zones_zonelist - Returns the first zone at or below highest_zoneidx within the allowed nodemask in a zonelist
1053 * @zonelist - The zonelist to search for a suitable zone
1054 * @highest_zoneidx - The zone index of the highest zone to return
1055 * @nodes - An optional nodemask to filter the zonelist with
ea57485a 1056 * @return - Zoneref pointer for the first suitable zone found (see below)
19770b32
MG
1057 *
1058 * This function returns the first zone at or below a given zone index that is
1059 * within the allowed nodemask. The zoneref returned is a cursor that can be
5bead2a0
MG
1060 * used to iterate the zonelist with next_zones_zonelist by advancing it by
1061 * one before calling.
ea57485a
VB
1062 *
1063 * When no eligible zone is found, zoneref->zone is NULL (zoneref itself is
1064 * never NULL). This may happen either genuinely, or due to concurrent nodemask
1065 * update due to cpuset modification.
19770b32 1066 */
dd1a239f 1067static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist,
19770b32 1068 enum zone_type highest_zoneidx,
c33d6c06 1069 nodemask_t *nodes)
54a6eb5c 1070{
c33d6c06 1071 return next_zones_zonelist(zonelist->_zonerefs,
05891fb0 1072 highest_zoneidx, nodes);
54a6eb5c
MG
1073}
1074
19770b32
MG
1075/**
1076 * for_each_zone_zonelist_nodemask - helper macro to iterate over valid zones in a zonelist at or below a given zone index and within a nodemask
1077 * @zone - The current zone in the iterator
1078 * @z - The current pointer within zonelist->zones being iterated
1079 * @zlist - The zonelist being iterated
1080 * @highidx - The zone index of the highest zone to return
1081 * @nodemask - Nodemask allowed by the allocator
1082 *
1083 * This iterator iterates though all zones at or below a given zone index and
1084 * within a given nodemask
1085 */
1086#define for_each_zone_zonelist_nodemask(zone, z, zlist, highidx, nodemask) \
c33d6c06 1087 for (z = first_zones_zonelist(zlist, highidx, nodemask), zone = zonelist_zone(z); \
19770b32 1088 zone; \
05891fb0 1089 z = next_zones_zonelist(++z, highidx, nodemask), \
c33d6c06
MG
1090 zone = zonelist_zone(z))
1091
1092#define for_next_zone_zonelist_nodemask(zone, z, zlist, highidx, nodemask) \
1093 for (zone = z->zone; \
1094 zone; \
1095 z = next_zones_zonelist(++z, highidx, nodemask), \
1096 zone = zonelist_zone(z))
1097
54a6eb5c
MG
1098
1099/**
1100 * for_each_zone_zonelist - helper macro to iterate over valid zones in a zonelist at or below a given zone index
1101 * @zone - The current zone in the iterator
1102 * @z - The current pointer within zonelist->zones being iterated
1103 * @zlist - The zonelist being iterated
1104 * @highidx - The zone index of the highest zone to return
1105 *
1106 * This iterator iterates though all zones at or below a given zone index.
1107 */
1108#define for_each_zone_zonelist(zone, z, zlist, highidx) \
19770b32 1109 for_each_zone_zonelist_nodemask(zone, z, zlist, highidx, NULL)
54a6eb5c 1110
d41dee36
AW
1111#ifdef CONFIG_SPARSEMEM
1112#include <asm/sparsemem.h>
1113#endif
1114
c713216d 1115#if !defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) && \
0ee332c1 1116 !defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP)
b4544568
AM
1117static inline unsigned long early_pfn_to_nid(unsigned long pfn)
1118{
9d1f4b3f 1119 BUILD_BUG_ON(IS_ENABLED(CONFIG_NUMA));
b4544568
AM
1120 return 0;
1121}
b159d43f
AW
1122#endif
1123
2bdaf115
AW
1124#ifdef CONFIG_FLATMEM
1125#define pfn_to_nid(pfn) (0)
1126#endif
1127
d41dee36
AW
1128#ifdef CONFIG_SPARSEMEM
1129
1130/*
1131 * SECTION_SHIFT #bits space required to store a section #
1132 *
1133 * PA_SECTION_SHIFT physical address to/from section number
1134 * PFN_SECTION_SHIFT pfn to/from section number
1135 */
d41dee36
AW
1136#define PA_SECTION_SHIFT (SECTION_SIZE_BITS)
1137#define PFN_SECTION_SHIFT (SECTION_SIZE_BITS - PAGE_SHIFT)
1138
1139#define NR_MEM_SECTIONS (1UL << SECTIONS_SHIFT)
1140
1141#define PAGES_PER_SECTION (1UL << PFN_SECTION_SHIFT)
1142#define PAGE_SECTION_MASK (~(PAGES_PER_SECTION-1))
1143
835c134e 1144#define SECTION_BLOCKFLAGS_BITS \
d9c23400 1145 ((1UL << (PFN_SECTION_SHIFT - pageblock_order)) * NR_PAGEBLOCK_BITS)
835c134e 1146
d41dee36
AW
1147#if (MAX_ORDER - 1 + PAGE_SHIFT) > SECTION_SIZE_BITS
1148#error Allocator MAX_ORDER exceeds SECTION_SIZE
1149#endif
1150
1dd2bfc8
YI
1151static inline unsigned long pfn_to_section_nr(unsigned long pfn)
1152{
1153 return pfn >> PFN_SECTION_SHIFT;
1154}
1155static inline unsigned long section_nr_to_pfn(unsigned long sec)
1156{
1157 return sec << PFN_SECTION_SHIFT;
1158}
e3c40f37 1159
a539f353
DK
1160#define SECTION_ALIGN_UP(pfn) (((pfn) + PAGES_PER_SECTION - 1) & PAGE_SECTION_MASK)
1161#define SECTION_ALIGN_DOWN(pfn) ((pfn) & PAGE_SECTION_MASK)
1162
d41dee36 1163struct page;
eefa864b 1164struct page_ext;
d41dee36 1165struct mem_section {
29751f69
AW
1166 /*
1167 * This is, logically, a pointer to an array of struct
1168 * pages. However, it is stored with some other magic.
1169 * (see sparse.c::sparse_init_one_section())
1170 *
30c253e6
AW
1171 * Additionally during early boot we encode node id of
1172 * the location of the section here to guide allocation.
1173 * (see sparse.c::memory_present())
1174 *
29751f69
AW
1175 * Making it a UL at least makes someone do a cast
1176 * before using it wrong.
1177 */
1178 unsigned long section_mem_map;
5c0e3066
MG
1179
1180 /* See declaration of similar field in struct zone */
1181 unsigned long *pageblock_flags;
eefa864b
JK
1182#ifdef CONFIG_PAGE_EXTENSION
1183 /*
0c9ad804 1184 * If SPARSEMEM, pgdat doesn't have page_ext pointer. We use
eefa864b
JK
1185 * section. (see page_ext.h about this.)
1186 */
1187 struct page_ext *page_ext;
1188 unsigned long pad;
1189#endif
55878e88
CS
1190 /*
1191 * WARNING: mem_section must be a power-of-2 in size for the
1192 * calculation and use of SECTION_ROOT_MASK to make sense.
1193 */
d41dee36
AW
1194};
1195
3e347261
BP
1196#ifdef CONFIG_SPARSEMEM_EXTREME
1197#define SECTIONS_PER_ROOT (PAGE_SIZE / sizeof (struct mem_section))
1198#else
1199#define SECTIONS_PER_ROOT 1
1200#endif
802f192e 1201
3e347261 1202#define SECTION_NR_TO_ROOT(sec) ((sec) / SECTIONS_PER_ROOT)
0faa5638 1203#define NR_SECTION_ROOTS DIV_ROUND_UP(NR_MEM_SECTIONS, SECTIONS_PER_ROOT)
3e347261 1204#define SECTION_ROOT_MASK (SECTIONS_PER_ROOT - 1)
802f192e 1205
3e347261 1206#ifdef CONFIG_SPARSEMEM_EXTREME
83e3c487 1207extern struct mem_section **mem_section;
802f192e 1208#else
3e347261
BP
1209extern struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT];
1210#endif
d41dee36 1211
29751f69
AW
1212static inline struct mem_section *__nr_to_section(unsigned long nr)
1213{
83e3c487
KS
1214#ifdef CONFIG_SPARSEMEM_EXTREME
1215 if (!mem_section)
1216 return NULL;
1217#endif
3e347261
BP
1218 if (!mem_section[SECTION_NR_TO_ROOT(nr)])
1219 return NULL;
1220 return &mem_section[SECTION_NR_TO_ROOT(nr)][nr & SECTION_ROOT_MASK];
29751f69 1221}
4ca644d9 1222extern int __section_nr(struct mem_section* ms);
04753278 1223extern unsigned long usemap_size(void);
29751f69
AW
1224
1225/*
1226 * We use the lower bits of the mem_map pointer to store
def9b71e
PT
1227 * a little bit of information. The pointer is calculated
1228 * as mem_map - section_nr_to_pfn(pnum). The result is
1229 * aligned to the minimum alignment of the two values:
1230 * 1. All mem_map arrays are page-aligned.
1231 * 2. section_nr_to_pfn() always clears PFN_SECTION_SHIFT
1232 * lowest bits. PFN_SECTION_SHIFT is arch-specific
1233 * (equal SECTION_SIZE_BITS - PAGE_SHIFT), and the
1234 * worst combination is powerpc with 256k pages,
1235 * which results in PFN_SECTION_SHIFT equal 6.
1236 * To sum it up, at least 6 bits are available.
29751f69
AW
1237 */
1238#define SECTION_MARKED_PRESENT (1UL<<0)
1239#define SECTION_HAS_MEM_MAP (1UL<<1)
2d070eab
MH
1240#define SECTION_IS_ONLINE (1UL<<2)
1241#define SECTION_MAP_LAST_BIT (1UL<<3)
29751f69 1242#define SECTION_MAP_MASK (~(SECTION_MAP_LAST_BIT-1))
2d070eab 1243#define SECTION_NID_SHIFT 3
29751f69
AW
1244
1245static inline struct page *__section_mem_map_addr(struct mem_section *section)
1246{
1247 unsigned long map = section->section_mem_map;
1248 map &= SECTION_MAP_MASK;
1249 return (struct page *)map;
1250}
1251
540557b9 1252static inline int present_section(struct mem_section *section)
29751f69 1253{
802f192e 1254 return (section && (section->section_mem_map & SECTION_MARKED_PRESENT));
29751f69
AW
1255}
1256
540557b9
AW
1257static inline int present_section_nr(unsigned long nr)
1258{
1259 return present_section(__nr_to_section(nr));
1260}
1261
1262static inline int valid_section(struct mem_section *section)
29751f69 1263{
802f192e 1264 return (section && (section->section_mem_map & SECTION_HAS_MEM_MAP));
29751f69
AW
1265}
1266
1267static inline int valid_section_nr(unsigned long nr)
1268{
1269 return valid_section(__nr_to_section(nr));
1270}
1271
2d070eab
MH
1272static inline int online_section(struct mem_section *section)
1273{
1274 return (section && (section->section_mem_map & SECTION_IS_ONLINE));
1275}
1276
1277static inline int online_section_nr(unsigned long nr)
1278{
1279 return online_section(__nr_to_section(nr));
1280}
1281
1282#ifdef CONFIG_MEMORY_HOTPLUG
1283void online_mem_sections(unsigned long start_pfn, unsigned long end_pfn);
1284#ifdef CONFIG_MEMORY_HOTREMOVE
1285void offline_mem_sections(unsigned long start_pfn, unsigned long end_pfn);
1286#endif
1287#endif
1288
d41dee36
AW
1289static inline struct mem_section *__pfn_to_section(unsigned long pfn)
1290{
29751f69 1291 return __nr_to_section(pfn_to_section_nr(pfn));
d41dee36
AW
1292}
1293
c4e1be9e
DH
1294extern int __highest_present_section_nr;
1295
7b7bf499 1296#ifndef CONFIG_HAVE_ARCH_PFN_VALID
d41dee36
AW
1297static inline int pfn_valid(unsigned long pfn)
1298{
1299 if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS)
1300 return 0;
29751f69 1301 return valid_section(__nr_to_section(pfn_to_section_nr(pfn)));
d41dee36 1302}
7b7bf499 1303#endif
d41dee36 1304
540557b9
AW
1305static inline int pfn_present(unsigned long pfn)
1306{
1307 if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS)
1308 return 0;
1309 return present_section(__nr_to_section(pfn_to_section_nr(pfn)));
1310}
1311
d41dee36
AW
1312/*
1313 * These are _only_ used during initialisation, therefore they
1314 * can use __initdata ... They could have names to indicate
1315 * this restriction.
1316 */
1317#ifdef CONFIG_NUMA
161599ff
AW
1318#define pfn_to_nid(pfn) \
1319({ \
1320 unsigned long __pfn_to_nid_pfn = (pfn); \
1321 page_to_nid(pfn_to_page(__pfn_to_nid_pfn)); \
1322})
2bdaf115
AW
1323#else
1324#define pfn_to_nid(pfn) (0)
d41dee36
AW
1325#endif
1326
d41dee36
AW
1327#define early_pfn_valid(pfn) pfn_valid(pfn)
1328void sparse_init(void);
1329#else
1330#define sparse_init() do {} while (0)
28ae55c9 1331#define sparse_index_init(_sec, _nid) do {} while (0)
e900a918 1332#define pfn_present pfn_valid
d41dee36
AW
1333#endif /* CONFIG_SPARSEMEM */
1334
8a942fde
MG
1335/*
1336 * During memory init memblocks map pfns to nids. The search is expensive and
1337 * this caches recent lookups. The implementation of __early_pfn_to_nid
1338 * may treat start/end as pfns or sections.
1339 */
1340struct mminit_pfnnid_cache {
1341 unsigned long last_start;
1342 unsigned long last_end;
1343 int last_nid;
1344};
1345
d41dee36
AW
1346#ifndef early_pfn_valid
1347#define early_pfn_valid(pfn) (1)
1348#endif
1349
1350void memory_present(int nid, unsigned long start, unsigned long end);
d41dee36 1351
14e07298
AW
1352/*
1353 * If it is possible to have holes within a MAX_ORDER_NR_PAGES, then we
8bb4e7a2 1354 * need to check pfn validity within that MAX_ORDER_NR_PAGES block.
14e07298
AW
1355 * pfn_valid_within() should be used in this case; we optimise this away
1356 * when we have no holes within a MAX_ORDER_NR_PAGES block.
1357 */
1358#ifdef CONFIG_HOLES_IN_ZONE
1359#define pfn_valid_within(pfn) pfn_valid(pfn)
1360#else
1361#define pfn_valid_within(pfn) (1)
1362#endif
1363
eb33575c
MG
1364#ifdef CONFIG_ARCH_HAS_HOLES_MEMORYMODEL
1365/*
1366 * pfn_valid() is meant to be able to tell if a given PFN has valid memmap
2d070eab
MH
1367 * associated with it or not. This means that a struct page exists for this
1368 * pfn. The caller cannot assume the page is fully initialized in general.
1369 * Hotplugable pages might not have been onlined yet. pfn_to_online_page()
1370 * will ensure the struct page is fully online and initialized. Special pages
1371 * (e.g. ZONE_DEVICE) are never onlined and should be treated accordingly.
1372 *
1373 * In FLATMEM, it is expected that holes always have valid memmap as long as
1374 * there is valid PFNs either side of the hole. In SPARSEMEM, it is assumed
1375 * that a valid section has a memmap for the entire section.
eb33575c
MG
1376 *
1377 * However, an ARM, and maybe other embedded architectures in the future
1378 * free memmap backing holes to save memory on the assumption the memmap is
1379 * never used. The page_zone linkages are then broken even though pfn_valid()
1380 * returns true. A walker of the full memmap must then do this additional
1381 * check to ensure the memmap they are looking at is sane by making sure
1382 * the zone and PFN linkages are still valid. This is expensive, but walkers
1383 * of the full memmap are extremely rare.
1384 */
5b80287a 1385bool memmap_valid_within(unsigned long pfn,
eb33575c
MG
1386 struct page *page, struct zone *zone);
1387#else
5b80287a 1388static inline bool memmap_valid_within(unsigned long pfn,
eb33575c
MG
1389 struct page *page, struct zone *zone)
1390{
5b80287a 1391 return true;
eb33575c
MG
1392}
1393#endif /* CONFIG_ARCH_HAS_HOLES_MEMORYMODEL */
1394
97965478 1395#endif /* !__GENERATING_BOUNDS.H */
1da177e4 1396#endif /* !__ASSEMBLY__ */
1da177e4 1397#endif /* _LINUX_MMZONE_H */