]>
Commit | Line | Data |
---|---|---|
1 | /* SPDX-License-Identifier: GPL-2.0 */ | |
2 | #ifndef _LINUX_MMZONE_H | |
3 | #define _LINUX_MMZONE_H | |
4 | ||
5 | #ifndef __ASSEMBLY__ | |
6 | #ifndef __GENERATING_BOUNDS_H | |
7 | ||
8 | #include <linux/spinlock.h> | |
9 | #include <linux/list.h> | |
10 | #include <linux/wait.h> | |
11 | #include <linux/bitops.h> | |
12 | #include <linux/cache.h> | |
13 | #include <linux/threads.h> | |
14 | #include <linux/numa.h> | |
15 | #include <linux/init.h> | |
16 | #include <linux/seqlock.h> | |
17 | #include <linux/nodemask.h> | |
18 | #include <linux/pageblock-flags.h> | |
19 | #include <linux/page-flags-layout.h> | |
20 | #include <linux/atomic.h> | |
21 | #include <asm/page.h> | |
22 | ||
23 | /* Free memory management - zoned buddy allocator. */ | |
24 | #ifndef CONFIG_FORCE_MAX_ZONEORDER | |
25 | #define MAX_ORDER 11 | |
26 | #else | |
27 | #define MAX_ORDER CONFIG_FORCE_MAX_ZONEORDER | |
28 | #endif | |
29 | #define MAX_ORDER_NR_PAGES (1 << (MAX_ORDER - 1)) | |
30 | ||
31 | /* | |
32 | * PAGE_ALLOC_COSTLY_ORDER is the order at which allocations are deemed | |
33 | * costly to service. That is between allocation orders which should | |
34 | * coalesce naturally under reasonable reclaim pressure and those which | |
35 | * will not. | |
36 | */ | |
37 | #define PAGE_ALLOC_COSTLY_ORDER 3 | |
38 | ||
39 | enum migratetype { | |
40 | MIGRATE_UNMOVABLE, | |
41 | MIGRATE_MOVABLE, | |
42 | MIGRATE_RECLAIMABLE, | |
43 | MIGRATE_PCPTYPES, /* the number of types on the pcp lists */ | |
44 | MIGRATE_HIGHATOMIC = MIGRATE_PCPTYPES, | |
45 | #ifdef CONFIG_CMA | |
46 | /* | |
47 | * MIGRATE_CMA migration type is designed to mimic the way | |
48 | * ZONE_MOVABLE works. Only movable pages can be allocated | |
49 | * from MIGRATE_CMA pageblocks and page allocator never | |
50 | * implicitly change migration type of MIGRATE_CMA pageblock. | |
51 | * | |
52 | * The way to use it is to change migratetype of a range of | |
53 | * pageblocks to MIGRATE_CMA which can be done by | |
54 | * __free_pageblock_cma() function. What is important though | |
55 | * is that a range of pageblocks must be aligned to | |
56 | * MAX_ORDER_NR_PAGES should biggest page be bigger then | |
57 | * a single pageblock. | |
58 | */ | |
59 | MIGRATE_CMA, | |
60 | #endif | |
61 | #ifdef CONFIG_MEMORY_ISOLATION | |
62 | MIGRATE_ISOLATE, /* can't allocate from here */ | |
63 | #endif | |
64 | MIGRATE_TYPES | |
65 | }; | |
66 | ||
67 | /* In mm/page_alloc.c; keep in sync also with show_migration_types() there */ | |
68 | extern char * const migratetype_names[MIGRATE_TYPES]; | |
69 | ||
70 | #ifdef CONFIG_CMA | |
71 | # define is_migrate_cma(migratetype) unlikely((migratetype) == MIGRATE_CMA) | |
72 | # define is_migrate_cma_page(_page) (get_pageblock_migratetype(_page) == MIGRATE_CMA) | |
73 | #else | |
74 | # define is_migrate_cma(migratetype) false | |
75 | # define is_migrate_cma_page(_page) false | |
76 | #endif | |
77 | ||
78 | static inline bool is_migrate_movable(int mt) | |
79 | { | |
80 | return is_migrate_cma(mt) || mt == MIGRATE_MOVABLE; | |
81 | } | |
82 | ||
83 | #define for_each_migratetype_order(order, type) \ | |
84 | for (order = 0; order < MAX_ORDER; order++) \ | |
85 | for (type = 0; type < MIGRATE_TYPES; type++) | |
86 | ||
87 | extern int page_group_by_mobility_disabled; | |
88 | ||
89 | #define NR_MIGRATETYPE_BITS (PB_migrate_end - PB_migrate + 1) | |
90 | #define MIGRATETYPE_MASK ((1UL << NR_MIGRATETYPE_BITS) - 1) | |
91 | ||
92 | #define get_pageblock_migratetype(page) \ | |
93 | get_pfnblock_flags_mask(page, page_to_pfn(page), \ | |
94 | PB_migrate_end, MIGRATETYPE_MASK) | |
95 | ||
96 | struct free_area { | |
97 | struct list_head free_list[MIGRATE_TYPES]; | |
98 | unsigned long nr_free; | |
99 | }; | |
100 | ||
101 | struct pglist_data; | |
102 | ||
103 | /* | |
104 | * zone->lock and the zone lru_lock are two of the hottest locks in the kernel. | |
105 | * So add a wild amount of padding here to ensure that they fall into separate | |
106 | * cachelines. There are very few zone structures in the machine, so space | |
107 | * consumption is not a concern here. | |
108 | */ | |
109 | #if defined(CONFIG_SMP) | |
110 | struct zone_padding { | |
111 | char x[0]; | |
112 | } ____cacheline_internodealigned_in_smp; | |
113 | #define ZONE_PADDING(name) struct zone_padding name; | |
114 | #else | |
115 | #define ZONE_PADDING(name) | |
116 | #endif | |
117 | ||
118 | #ifdef CONFIG_NUMA | |
119 | enum numa_stat_item { | |
120 | NUMA_HIT, /* allocated in intended node */ | |
121 | NUMA_MISS, /* allocated in non intended node */ | |
122 | NUMA_FOREIGN, /* was intended here, hit elsewhere */ | |
123 | NUMA_INTERLEAVE_HIT, /* interleaver preferred this zone */ | |
124 | NUMA_LOCAL, /* allocation from local node */ | |
125 | NUMA_OTHER, /* allocation from other node */ | |
126 | NR_VM_NUMA_STAT_ITEMS | |
127 | }; | |
128 | #else | |
129 | #define NR_VM_NUMA_STAT_ITEMS 0 | |
130 | #endif | |
131 | ||
132 | enum zone_stat_item { | |
133 | /* First 128 byte cacheline (assuming 64 bit words) */ | |
134 | NR_FREE_PAGES, | |
135 | NR_ZONE_LRU_BASE, /* Used only for compaction and reclaim retry */ | |
136 | NR_ZONE_INACTIVE_ANON = NR_ZONE_LRU_BASE, | |
137 | NR_ZONE_ACTIVE_ANON, | |
138 | NR_ZONE_INACTIVE_FILE, | |
139 | NR_ZONE_ACTIVE_FILE, | |
140 | NR_ZONE_UNEVICTABLE, | |
141 | NR_ZONE_WRITE_PENDING, /* Count of dirty, writeback and unstable pages */ | |
142 | NR_MLOCK, /* mlock()ed pages found and moved off LRU */ | |
143 | NR_PAGETABLE, /* used for pagetables */ | |
144 | NR_KERNEL_STACK_KB, /* measured in KiB */ | |
145 | /* Second 128 byte cacheline */ | |
146 | NR_BOUNCE, | |
147 | #if IS_ENABLED(CONFIG_ZSMALLOC) | |
148 | NR_ZSPAGES, /* allocated in zsmalloc */ | |
149 | #endif | |
150 | NR_FREE_CMA_PAGES, | |
151 | NR_VM_ZONE_STAT_ITEMS }; | |
152 | ||
153 | enum node_stat_item { | |
154 | NR_LRU_BASE, | |
155 | NR_INACTIVE_ANON = NR_LRU_BASE, /* must match order of LRU_[IN]ACTIVE */ | |
156 | NR_ACTIVE_ANON, /* " " " " " */ | |
157 | NR_INACTIVE_FILE, /* " " " " " */ | |
158 | NR_ACTIVE_FILE, /* " " " " " */ | |
159 | NR_UNEVICTABLE, /* " " " " " */ | |
160 | NR_SLAB_RECLAIMABLE, | |
161 | NR_SLAB_UNRECLAIMABLE, | |
162 | NR_ISOLATED_ANON, /* Temporary isolated pages from anon lru */ | |
163 | NR_ISOLATED_FILE, /* Temporary isolated pages from file lru */ | |
164 | WORKINGSET_REFAULT, | |
165 | WORKINGSET_ACTIVATE, | |
166 | WORKINGSET_NODERECLAIM, | |
167 | NR_ANON_MAPPED, /* Mapped anonymous pages */ | |
168 | NR_FILE_MAPPED, /* pagecache pages mapped into pagetables. | |
169 | only modified from process context */ | |
170 | NR_FILE_PAGES, | |
171 | NR_FILE_DIRTY, | |
172 | NR_WRITEBACK, | |
173 | NR_WRITEBACK_TEMP, /* Writeback using temporary buffers */ | |
174 | NR_SHMEM, /* shmem pages (included tmpfs/GEM pages) */ | |
175 | NR_SHMEM_THPS, | |
176 | NR_SHMEM_PMDMAPPED, | |
177 | NR_ANON_THPS, | |
178 | NR_UNSTABLE_NFS, /* NFS unstable pages */ | |
179 | NR_VMSCAN_WRITE, | |
180 | NR_VMSCAN_IMMEDIATE, /* Prioritise for reclaim when writeback ends */ | |
181 | NR_DIRTIED, /* page dirtyings since bootup */ | |
182 | NR_WRITTEN, /* page writings since bootup */ | |
183 | NR_INDIRECTLY_RECLAIMABLE_BYTES, /* measured in bytes */ | |
184 | NR_VM_NODE_STAT_ITEMS | |
185 | }; | |
186 | ||
187 | /* | |
188 | * We do arithmetic on the LRU lists in various places in the code, | |
189 | * so it is important to keep the active lists LRU_ACTIVE higher in | |
190 | * the array than the corresponding inactive lists, and to keep | |
191 | * the *_FILE lists LRU_FILE higher than the corresponding _ANON lists. | |
192 | * | |
193 | * This has to be kept in sync with the statistics in zone_stat_item | |
194 | * above and the descriptions in vmstat_text in mm/vmstat.c | |
195 | */ | |
196 | #define LRU_BASE 0 | |
197 | #define LRU_ACTIVE 1 | |
198 | #define LRU_FILE 2 | |
199 | ||
200 | enum lru_list { | |
201 | LRU_INACTIVE_ANON = LRU_BASE, | |
202 | LRU_ACTIVE_ANON = LRU_BASE + LRU_ACTIVE, | |
203 | LRU_INACTIVE_FILE = LRU_BASE + LRU_FILE, | |
204 | LRU_ACTIVE_FILE = LRU_BASE + LRU_FILE + LRU_ACTIVE, | |
205 | LRU_UNEVICTABLE, | |
206 | NR_LRU_LISTS | |
207 | }; | |
208 | ||
209 | #define for_each_lru(lru) for (lru = 0; lru < NR_LRU_LISTS; lru++) | |
210 | ||
211 | #define for_each_evictable_lru(lru) for (lru = 0; lru <= LRU_ACTIVE_FILE; lru++) | |
212 | ||
213 | static inline int is_file_lru(enum lru_list lru) | |
214 | { | |
215 | return (lru == LRU_INACTIVE_FILE || lru == LRU_ACTIVE_FILE); | |
216 | } | |
217 | ||
218 | static inline int is_active_lru(enum lru_list lru) | |
219 | { | |
220 | return (lru == LRU_ACTIVE_ANON || lru == LRU_ACTIVE_FILE); | |
221 | } | |
222 | ||
223 | struct zone_reclaim_stat { | |
224 | /* | |
225 | * The pageout code in vmscan.c keeps track of how many of the | |
226 | * mem/swap backed and file backed pages are referenced. | |
227 | * The higher the rotated/scanned ratio, the more valuable | |
228 | * that cache is. | |
229 | * | |
230 | * The anon LRU stats live in [0], file LRU stats in [1] | |
231 | */ | |
232 | unsigned long recent_rotated[2]; | |
233 | unsigned long recent_scanned[2]; | |
234 | }; | |
235 | ||
236 | struct lruvec { | |
237 | struct list_head lists[NR_LRU_LISTS]; | |
238 | struct zone_reclaim_stat reclaim_stat; | |
239 | /* Evictions & activations on the inactive file list */ | |
240 | atomic_long_t inactive_age; | |
241 | /* Refaults at the time of last reclaim cycle */ | |
242 | unsigned long refaults; | |
243 | #ifdef CONFIG_MEMCG | |
244 | struct pglist_data *pgdat; | |
245 | #endif | |
246 | }; | |
247 | ||
248 | /* Mask used at gathering information at once (see memcontrol.c) */ | |
249 | #define LRU_ALL_FILE (BIT(LRU_INACTIVE_FILE) | BIT(LRU_ACTIVE_FILE)) | |
250 | #define LRU_ALL_ANON (BIT(LRU_INACTIVE_ANON) | BIT(LRU_ACTIVE_ANON)) | |
251 | #define LRU_ALL ((1 << NR_LRU_LISTS) - 1) | |
252 | ||
253 | /* Isolate unmapped file */ | |
254 | #define ISOLATE_UNMAPPED ((__force isolate_mode_t)0x2) | |
255 | /* Isolate for asynchronous migration */ | |
256 | #define ISOLATE_ASYNC_MIGRATE ((__force isolate_mode_t)0x4) | |
257 | /* Isolate unevictable pages */ | |
258 | #define ISOLATE_UNEVICTABLE ((__force isolate_mode_t)0x8) | |
259 | ||
260 | /* LRU Isolation modes. */ | |
261 | typedef unsigned __bitwise isolate_mode_t; | |
262 | ||
263 | enum zone_watermarks { | |
264 | WMARK_MIN, | |
265 | WMARK_LOW, | |
266 | WMARK_HIGH, | |
267 | NR_WMARK | |
268 | }; | |
269 | ||
270 | #define min_wmark_pages(z) (z->watermark[WMARK_MIN]) | |
271 | #define low_wmark_pages(z) (z->watermark[WMARK_LOW]) | |
272 | #define high_wmark_pages(z) (z->watermark[WMARK_HIGH]) | |
273 | ||
274 | struct per_cpu_pages { | |
275 | int count; /* number of pages in the list */ | |
276 | int high; /* high watermark, emptying needed */ | |
277 | int batch; /* chunk size for buddy add/remove */ | |
278 | ||
279 | /* Lists of pages, one per migrate type stored on the pcp-lists */ | |
280 | struct list_head lists[MIGRATE_PCPTYPES]; | |
281 | }; | |
282 | ||
283 | struct per_cpu_pageset { | |
284 | struct per_cpu_pages pcp; | |
285 | #ifdef CONFIG_NUMA | |
286 | s8 expire; | |
287 | u16 vm_numa_stat_diff[NR_VM_NUMA_STAT_ITEMS]; | |
288 | #endif | |
289 | #ifdef CONFIG_SMP | |
290 | s8 stat_threshold; | |
291 | s8 vm_stat_diff[NR_VM_ZONE_STAT_ITEMS]; | |
292 | #endif | |
293 | }; | |
294 | ||
295 | struct per_cpu_nodestat { | |
296 | s8 stat_threshold; | |
297 | s8 vm_node_stat_diff[NR_VM_NODE_STAT_ITEMS]; | |
298 | }; | |
299 | ||
300 | #endif /* !__GENERATING_BOUNDS.H */ | |
301 | ||
302 | enum zone_type { | |
303 | #ifdef CONFIG_ZONE_DMA | |
304 | /* | |
305 | * ZONE_DMA is used when there are devices that are not able | |
306 | * to do DMA to all of addressable memory (ZONE_NORMAL). Then we | |
307 | * carve out the portion of memory that is needed for these devices. | |
308 | * The range is arch specific. | |
309 | * | |
310 | * Some examples | |
311 | * | |
312 | * Architecture Limit | |
313 | * --------------------------- | |
314 | * parisc, ia64, sparc <4G | |
315 | * s390 <2G | |
316 | * arm Various | |
317 | * alpha Unlimited or 0-16MB. | |
318 | * | |
319 | * i386, x86_64 and multiple other arches | |
320 | * <16M. | |
321 | */ | |
322 | ZONE_DMA, | |
323 | #endif | |
324 | #ifdef CONFIG_ZONE_DMA32 | |
325 | /* | |
326 | * x86_64 needs two ZONE_DMAs because it supports devices that are | |
327 | * only able to do DMA to the lower 16M but also 32 bit devices that | |
328 | * can only do DMA areas below 4G. | |
329 | */ | |
330 | ZONE_DMA32, | |
331 | #endif | |
332 | /* | |
333 | * Normal addressable memory is in ZONE_NORMAL. DMA operations can be | |
334 | * performed on pages in ZONE_NORMAL if the DMA devices support | |
335 | * transfers to all addressable memory. | |
336 | */ | |
337 | ZONE_NORMAL, | |
338 | #ifdef CONFIG_HIGHMEM | |
339 | /* | |
340 | * A memory area that is only addressable by the kernel through | |
341 | * mapping portions into its own address space. This is for example | |
342 | * used by i386 to allow the kernel to address the memory beyond | |
343 | * 900MB. The kernel will set up special mappings (page | |
344 | * table entries on i386) for each page that the kernel needs to | |
345 | * access. | |
346 | */ | |
347 | ZONE_HIGHMEM, | |
348 | #endif | |
349 | ZONE_MOVABLE, | |
350 | #ifdef CONFIG_ZONE_DEVICE | |
351 | ZONE_DEVICE, | |
352 | #endif | |
353 | __MAX_NR_ZONES | |
354 | ||
355 | }; | |
356 | ||
357 | #ifndef __GENERATING_BOUNDS_H | |
358 | ||
359 | struct zone { | |
360 | /* Read-mostly fields */ | |
361 | ||
362 | /* zone watermarks, access with *_wmark_pages(zone) macros */ | |
363 | unsigned long watermark[NR_WMARK]; | |
364 | ||
365 | unsigned long nr_reserved_highatomic; | |
366 | ||
367 | /* | |
368 | * We don't know if the memory that we're going to allocate will be | |
369 | * freeable or/and it will be released eventually, so to avoid totally | |
370 | * wasting several GB of ram we must reserve some of the lower zone | |
371 | * memory (otherwise we risk to run OOM on the lower zones despite | |
372 | * there being tons of freeable ram on the higher zones). This array is | |
373 | * recalculated at runtime if the sysctl_lowmem_reserve_ratio sysctl | |
374 | * changes. | |
375 | */ | |
376 | long lowmem_reserve[MAX_NR_ZONES]; | |
377 | ||
378 | #ifdef CONFIG_NUMA | |
379 | int node; | |
380 | #endif | |
381 | struct pglist_data *zone_pgdat; | |
382 | struct per_cpu_pageset __percpu *pageset; | |
383 | ||
384 | #ifndef CONFIG_SPARSEMEM | |
385 | /* | |
386 | * Flags for a pageblock_nr_pages block. See pageblock-flags.h. | |
387 | * In SPARSEMEM, this map is stored in struct mem_section | |
388 | */ | |
389 | unsigned long *pageblock_flags; | |
390 | #endif /* CONFIG_SPARSEMEM */ | |
391 | ||
392 | /* zone_start_pfn == zone_start_paddr >> PAGE_SHIFT */ | |
393 | unsigned long zone_start_pfn; | |
394 | ||
395 | /* | |
396 | * spanned_pages is the total pages spanned by the zone, including | |
397 | * holes, which is calculated as: | |
398 | * spanned_pages = zone_end_pfn - zone_start_pfn; | |
399 | * | |
400 | * present_pages is physical pages existing within the zone, which | |
401 | * is calculated as: | |
402 | * present_pages = spanned_pages - absent_pages(pages in holes); | |
403 | * | |
404 | * managed_pages is present pages managed by the buddy system, which | |
405 | * is calculated as (reserved_pages includes pages allocated by the | |
406 | * bootmem allocator): | |
407 | * managed_pages = present_pages - reserved_pages; | |
408 | * | |
409 | * So present_pages may be used by memory hotplug or memory power | |
410 | * management logic to figure out unmanaged pages by checking | |
411 | * (present_pages - managed_pages). And managed_pages should be used | |
412 | * by page allocator and vm scanner to calculate all kinds of watermarks | |
413 | * and thresholds. | |
414 | * | |
415 | * Locking rules: | |
416 | * | |
417 | * zone_start_pfn and spanned_pages are protected by span_seqlock. | |
418 | * It is a seqlock because it has to be read outside of zone->lock, | |
419 | * and it is done in the main allocator path. But, it is written | |
420 | * quite infrequently. | |
421 | * | |
422 | * The span_seq lock is declared along with zone->lock because it is | |
423 | * frequently read in proximity to zone->lock. It's good to | |
424 | * give them a chance of being in the same cacheline. | |
425 | * | |
426 | * Write access to present_pages at runtime should be protected by | |
427 | * mem_hotplug_begin/end(). Any reader who can't tolerant drift of | |
428 | * present_pages should get_online_mems() to get a stable value. | |
429 | * | |
430 | * Read access to managed_pages should be safe because it's unsigned | |
431 | * long. Write access to zone->managed_pages and totalram_pages are | |
432 | * protected by managed_page_count_lock at runtime. Idealy only | |
433 | * adjust_managed_page_count() should be used instead of directly | |
434 | * touching zone->managed_pages and totalram_pages. | |
435 | */ | |
436 | unsigned long managed_pages; | |
437 | unsigned long spanned_pages; | |
438 | unsigned long present_pages; | |
439 | ||
440 | const char *name; | |
441 | ||
442 | #ifdef CONFIG_MEMORY_ISOLATION | |
443 | /* | |
444 | * Number of isolated pageblock. It is used to solve incorrect | |
445 | * freepage counting problem due to racy retrieving migratetype | |
446 | * of pageblock. Protected by zone->lock. | |
447 | */ | |
448 | unsigned long nr_isolate_pageblock; | |
449 | #endif | |
450 | ||
451 | #ifdef CONFIG_MEMORY_HOTPLUG | |
452 | /* see spanned/present_pages for more description */ | |
453 | seqlock_t span_seqlock; | |
454 | #endif | |
455 | ||
456 | int initialized; | |
457 | ||
458 | /* Write-intensive fields used from the page allocator */ | |
459 | ZONE_PADDING(_pad1_) | |
460 | ||
461 | /* free areas of different sizes */ | |
462 | struct free_area free_area[MAX_ORDER]; | |
463 | ||
464 | /* zone flags, see below */ | |
465 | unsigned long flags; | |
466 | ||
467 | /* Primarily protects free_area */ | |
468 | spinlock_t lock; | |
469 | ||
470 | /* Write-intensive fields used by compaction and vmstats. */ | |
471 | ZONE_PADDING(_pad2_) | |
472 | ||
473 | /* | |
474 | * When free pages are below this point, additional steps are taken | |
475 | * when reading the number of free pages to avoid per-cpu counter | |
476 | * drift allowing watermarks to be breached | |
477 | */ | |
478 | unsigned long percpu_drift_mark; | |
479 | ||
480 | #if defined CONFIG_COMPACTION || defined CONFIG_CMA | |
481 | /* pfn where compaction free scanner should start */ | |
482 | unsigned long compact_cached_free_pfn; | |
483 | /* pfn where async and sync compaction migration scanner should start */ | |
484 | unsigned long compact_cached_migrate_pfn[2]; | |
485 | #endif | |
486 | ||
487 | #ifdef CONFIG_COMPACTION | |
488 | /* | |
489 | * On compaction failure, 1<<compact_defer_shift compactions | |
490 | * are skipped before trying again. The number attempted since | |
491 | * last failure is tracked with compact_considered. | |
492 | */ | |
493 | unsigned int compact_considered; | |
494 | unsigned int compact_defer_shift; | |
495 | int compact_order_failed; | |
496 | #endif | |
497 | ||
498 | #if defined CONFIG_COMPACTION || defined CONFIG_CMA | |
499 | /* Set to true when the PG_migrate_skip bits should be cleared */ | |
500 | bool compact_blockskip_flush; | |
501 | #endif | |
502 | ||
503 | bool contiguous; | |
504 | ||
505 | ZONE_PADDING(_pad3_) | |
506 | /* Zone statistics */ | |
507 | atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS]; | |
508 | atomic_long_t vm_numa_stat[NR_VM_NUMA_STAT_ITEMS]; | |
509 | } ____cacheline_internodealigned_in_smp; | |
510 | ||
511 | enum pgdat_flags { | |
512 | PGDAT_CONGESTED, /* pgdat has many dirty pages backed by | |
513 | * a congested BDI | |
514 | */ | |
515 | PGDAT_DIRTY, /* reclaim scanning has recently found | |
516 | * many dirty file pages at the tail | |
517 | * of the LRU. | |
518 | */ | |
519 | PGDAT_WRITEBACK, /* reclaim scanning has recently found | |
520 | * many pages under writeback | |
521 | */ | |
522 | PGDAT_RECLAIM_LOCKED, /* prevents concurrent reclaim */ | |
523 | }; | |
524 | ||
525 | static inline unsigned long zone_end_pfn(const struct zone *zone) | |
526 | { | |
527 | return zone->zone_start_pfn + zone->spanned_pages; | |
528 | } | |
529 | ||
530 | static inline bool zone_spans_pfn(const struct zone *zone, unsigned long pfn) | |
531 | { | |
532 | return zone->zone_start_pfn <= pfn && pfn < zone_end_pfn(zone); | |
533 | } | |
534 | ||
535 | static inline bool zone_is_initialized(struct zone *zone) | |
536 | { | |
537 | return zone->initialized; | |
538 | } | |
539 | ||
540 | static inline bool zone_is_empty(struct zone *zone) | |
541 | { | |
542 | return zone->spanned_pages == 0; | |
543 | } | |
544 | ||
545 | /* | |
546 | * Return true if [start_pfn, start_pfn + nr_pages) range has a non-empty | |
547 | * intersection with the given zone | |
548 | */ | |
549 | static inline bool zone_intersects(struct zone *zone, | |
550 | unsigned long start_pfn, unsigned long nr_pages) | |
551 | { | |
552 | if (zone_is_empty(zone)) | |
553 | return false; | |
554 | if (start_pfn >= zone_end_pfn(zone) || | |
555 | start_pfn + nr_pages <= zone->zone_start_pfn) | |
556 | return false; | |
557 | ||
558 | return true; | |
559 | } | |
560 | ||
561 | /* | |
562 | * The "priority" of VM scanning is how much of the queues we will scan in one | |
563 | * go. A value of 12 for DEF_PRIORITY implies that we will scan 1/4096th of the | |
564 | * queues ("queue_length >> 12") during an aging round. | |
565 | */ | |
566 | #define DEF_PRIORITY 12 | |
567 | ||
568 | /* Maximum number of zones on a zonelist */ | |
569 | #define MAX_ZONES_PER_ZONELIST (MAX_NUMNODES * MAX_NR_ZONES) | |
570 | ||
571 | enum { | |
572 | ZONELIST_FALLBACK, /* zonelist with fallback */ | |
573 | #ifdef CONFIG_NUMA | |
574 | /* | |
575 | * The NUMA zonelists are doubled because we need zonelists that | |
576 | * restrict the allocations to a single node for __GFP_THISNODE. | |
577 | */ | |
578 | ZONELIST_NOFALLBACK, /* zonelist without fallback (__GFP_THISNODE) */ | |
579 | #endif | |
580 | MAX_ZONELISTS | |
581 | }; | |
582 | ||
583 | /* | |
584 | * This struct contains information about a zone in a zonelist. It is stored | |
585 | * here to avoid dereferences into large structures and lookups of tables | |
586 | */ | |
587 | struct zoneref { | |
588 | struct zone *zone; /* Pointer to actual zone */ | |
589 | int zone_idx; /* zone_idx(zoneref->zone) */ | |
590 | }; | |
591 | ||
592 | /* | |
593 | * One allocation request operates on a zonelist. A zonelist | |
594 | * is a list of zones, the first one is the 'goal' of the | |
595 | * allocation, the other zones are fallback zones, in decreasing | |
596 | * priority. | |
597 | * | |
598 | * To speed the reading of the zonelist, the zonerefs contain the zone index | |
599 | * of the entry being read. Helper functions to access information given | |
600 | * a struct zoneref are | |
601 | * | |
602 | * zonelist_zone() - Return the struct zone * for an entry in _zonerefs | |
603 | * zonelist_zone_idx() - Return the index of the zone for an entry | |
604 | * zonelist_node_idx() - Return the index of the node for an entry | |
605 | */ | |
606 | struct zonelist { | |
607 | struct zoneref _zonerefs[MAX_ZONES_PER_ZONELIST + 1]; | |
608 | }; | |
609 | ||
610 | #ifndef CONFIG_DISCONTIGMEM | |
611 | /* The array of struct pages - for discontigmem use pgdat->lmem_map */ | |
612 | extern struct page *mem_map; | |
613 | #endif | |
614 | ||
615 | /* | |
616 | * On NUMA machines, each NUMA node would have a pg_data_t to describe | |
617 | * it's memory layout. On UMA machines there is a single pglist_data which | |
618 | * describes the whole memory. | |
619 | * | |
620 | * Memory statistics and page replacement data structures are maintained on a | |
621 | * per-zone basis. | |
622 | */ | |
623 | struct bootmem_data; | |
624 | typedef struct pglist_data { | |
625 | struct zone node_zones[MAX_NR_ZONES]; | |
626 | struct zonelist node_zonelists[MAX_ZONELISTS]; | |
627 | int nr_zones; | |
628 | #ifdef CONFIG_FLAT_NODE_MEM_MAP /* means !SPARSEMEM */ | |
629 | struct page *node_mem_map; | |
630 | #ifdef CONFIG_PAGE_EXTENSION | |
631 | struct page_ext *node_page_ext; | |
632 | #endif | |
633 | #endif | |
634 | #ifndef CONFIG_NO_BOOTMEM | |
635 | struct bootmem_data *bdata; | |
636 | #endif | |
637 | #ifdef CONFIG_MEMORY_HOTPLUG | |
638 | /* | |
639 | * Must be held any time you expect node_start_pfn, node_present_pages | |
640 | * or node_spanned_pages stay constant. Holding this will also | |
641 | * guarantee that any pfn_valid() stays that way. | |
642 | * | |
643 | * pgdat_resize_lock() and pgdat_resize_unlock() are provided to | |
644 | * manipulate node_size_lock without checking for CONFIG_MEMORY_HOTPLUG. | |
645 | * | |
646 | * Nests above zone->lock and zone->span_seqlock | |
647 | */ | |
648 | spinlock_t node_size_lock; | |
649 | #endif | |
650 | unsigned long node_start_pfn; | |
651 | unsigned long node_present_pages; /* total number of physical pages */ | |
652 | unsigned long node_spanned_pages; /* total size of physical page | |
653 | range, including holes */ | |
654 | int node_id; | |
655 | wait_queue_head_t kswapd_wait; | |
656 | wait_queue_head_t pfmemalloc_wait; | |
657 | struct task_struct *kswapd; /* Protected by | |
658 | mem_hotplug_begin/end() */ | |
659 | int kswapd_order; | |
660 | enum zone_type kswapd_classzone_idx; | |
661 | ||
662 | int kswapd_failures; /* Number of 'reclaimed == 0' runs */ | |
663 | ||
664 | #ifdef CONFIG_COMPACTION | |
665 | int kcompactd_max_order; | |
666 | enum zone_type kcompactd_classzone_idx; | |
667 | wait_queue_head_t kcompactd_wait; | |
668 | struct task_struct *kcompactd; | |
669 | #endif | |
670 | #ifdef CONFIG_NUMA_BALANCING | |
671 | /* Lock serializing the migrate rate limiting window */ | |
672 | spinlock_t numabalancing_migrate_lock; | |
673 | ||
674 | /* Rate limiting time interval */ | |
675 | unsigned long numabalancing_migrate_next_window; | |
676 | ||
677 | /* Number of pages migrated during the rate limiting time interval */ | |
678 | unsigned long numabalancing_migrate_nr_pages; | |
679 | #endif | |
680 | /* | |
681 | * This is a per-node reserve of pages that are not available | |
682 | * to userspace allocations. | |
683 | */ | |
684 | unsigned long totalreserve_pages; | |
685 | ||
686 | #ifdef CONFIG_NUMA | |
687 | /* | |
688 | * zone reclaim becomes active if more unmapped pages exist. | |
689 | */ | |
690 | unsigned long min_unmapped_pages; | |
691 | unsigned long min_slab_pages; | |
692 | #endif /* CONFIG_NUMA */ | |
693 | ||
694 | /* Write-intensive fields used by page reclaim */ | |
695 | ZONE_PADDING(_pad1_) | |
696 | spinlock_t lru_lock; | |
697 | ||
698 | #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT | |
699 | /* | |
700 | * If memory initialisation on large machines is deferred then this | |
701 | * is the first PFN that needs to be initialised. | |
702 | */ | |
703 | unsigned long first_deferred_pfn; | |
704 | /* Number of non-deferred pages */ | |
705 | unsigned long static_init_pgcnt; | |
706 | #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */ | |
707 | ||
708 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | |
709 | spinlock_t split_queue_lock; | |
710 | struct list_head split_queue; | |
711 | unsigned long split_queue_len; | |
712 | #endif | |
713 | ||
714 | /* Fields commonly accessed by the page reclaim scanner */ | |
715 | struct lruvec lruvec; | |
716 | ||
717 | unsigned long flags; | |
718 | ||
719 | ZONE_PADDING(_pad2_) | |
720 | ||
721 | /* Per-node vmstats */ | |
722 | struct per_cpu_nodestat __percpu *per_cpu_nodestats; | |
723 | atomic_long_t vm_stat[NR_VM_NODE_STAT_ITEMS]; | |
724 | } pg_data_t; | |
725 | ||
726 | #define node_present_pages(nid) (NODE_DATA(nid)->node_present_pages) | |
727 | #define node_spanned_pages(nid) (NODE_DATA(nid)->node_spanned_pages) | |
728 | #ifdef CONFIG_FLAT_NODE_MEM_MAP | |
729 | #define pgdat_page_nr(pgdat, pagenr) ((pgdat)->node_mem_map + (pagenr)) | |
730 | #else | |
731 | #define pgdat_page_nr(pgdat, pagenr) pfn_to_page((pgdat)->node_start_pfn + (pagenr)) | |
732 | #endif | |
733 | #define nid_page_nr(nid, pagenr) pgdat_page_nr(NODE_DATA(nid),(pagenr)) | |
734 | ||
735 | #define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn) | |
736 | #define node_end_pfn(nid) pgdat_end_pfn(NODE_DATA(nid)) | |
737 | static inline spinlock_t *zone_lru_lock(struct zone *zone) | |
738 | { | |
739 | return &zone->zone_pgdat->lru_lock; | |
740 | } | |
741 | ||
742 | static inline struct lruvec *node_lruvec(struct pglist_data *pgdat) | |
743 | { | |
744 | return &pgdat->lruvec; | |
745 | } | |
746 | ||
747 | static inline unsigned long pgdat_end_pfn(pg_data_t *pgdat) | |
748 | { | |
749 | return pgdat->node_start_pfn + pgdat->node_spanned_pages; | |
750 | } | |
751 | ||
752 | static inline bool pgdat_is_empty(pg_data_t *pgdat) | |
753 | { | |
754 | return !pgdat->node_start_pfn && !pgdat->node_spanned_pages; | |
755 | } | |
756 | ||
757 | static inline int zone_id(const struct zone *zone) | |
758 | { | |
759 | struct pglist_data *pgdat = zone->zone_pgdat; | |
760 | ||
761 | return zone - pgdat->node_zones; | |
762 | } | |
763 | ||
764 | #ifdef CONFIG_ZONE_DEVICE | |
765 | static inline bool is_dev_zone(const struct zone *zone) | |
766 | { | |
767 | return zone_id(zone) == ZONE_DEVICE; | |
768 | } | |
769 | #else | |
770 | static inline bool is_dev_zone(const struct zone *zone) | |
771 | { | |
772 | return false; | |
773 | } | |
774 | #endif | |
775 | ||
776 | #include <linux/memory_hotplug.h> | |
777 | ||
778 | void build_all_zonelists(pg_data_t *pgdat); | |
779 | void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx); | |
780 | bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark, | |
781 | int classzone_idx, unsigned int alloc_flags, | |
782 | long free_pages); | |
783 | bool zone_watermark_ok(struct zone *z, unsigned int order, | |
784 | unsigned long mark, int classzone_idx, | |
785 | unsigned int alloc_flags); | |
786 | bool zone_watermark_ok_safe(struct zone *z, unsigned int order, | |
787 | unsigned long mark, int classzone_idx); | |
788 | enum memmap_context { | |
789 | MEMMAP_EARLY, | |
790 | MEMMAP_HOTPLUG, | |
791 | }; | |
792 | extern void init_currently_empty_zone(struct zone *zone, unsigned long start_pfn, | |
793 | unsigned long size); | |
794 | ||
795 | extern void lruvec_init(struct lruvec *lruvec); | |
796 | ||
797 | static inline struct pglist_data *lruvec_pgdat(struct lruvec *lruvec) | |
798 | { | |
799 | #ifdef CONFIG_MEMCG | |
800 | return lruvec->pgdat; | |
801 | #else | |
802 | return container_of(lruvec, struct pglist_data, lruvec); | |
803 | #endif | |
804 | } | |
805 | ||
806 | extern unsigned long lruvec_lru_size(struct lruvec *lruvec, enum lru_list lru, int zone_idx); | |
807 | ||
808 | #ifdef CONFIG_HAVE_MEMORY_PRESENT | |
809 | void memory_present(int nid, unsigned long start, unsigned long end); | |
810 | #else | |
811 | static inline void memory_present(int nid, unsigned long start, unsigned long end) {} | |
812 | #endif | |
813 | ||
814 | #ifdef CONFIG_HAVE_MEMORYLESS_NODES | |
815 | int local_memory_node(int node_id); | |
816 | #else | |
817 | static inline int local_memory_node(int node_id) { return node_id; }; | |
818 | #endif | |
819 | ||
820 | #ifdef CONFIG_NEED_NODE_MEMMAP_SIZE | |
821 | unsigned long __init node_memmap_size_bytes(int, unsigned long, unsigned long); | |
822 | #endif | |
823 | ||
824 | /* | |
825 | * zone_idx() returns 0 for the ZONE_DMA zone, 1 for the ZONE_NORMAL zone, etc. | |
826 | */ | |
827 | #define zone_idx(zone) ((zone) - (zone)->zone_pgdat->node_zones) | |
828 | ||
829 | /* | |
830 | * Returns true if a zone has pages managed by the buddy allocator. | |
831 | * All the reclaim decisions have to use this function rather than | |
832 | * populated_zone(). If the whole zone is reserved then we can easily | |
833 | * end up with populated_zone() && !managed_zone(). | |
834 | */ | |
835 | static inline bool managed_zone(struct zone *zone) | |
836 | { | |
837 | return zone->managed_pages; | |
838 | } | |
839 | ||
840 | /* Returns true if a zone has memory */ | |
841 | static inline bool populated_zone(struct zone *zone) | |
842 | { | |
843 | return zone->present_pages; | |
844 | } | |
845 | ||
846 | extern int movable_zone; | |
847 | ||
848 | #ifdef CONFIG_HIGHMEM | |
849 | static inline int zone_movable_is_highmem(void) | |
850 | { | |
851 | #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP | |
852 | return movable_zone == ZONE_HIGHMEM; | |
853 | #else | |
854 | return (ZONE_MOVABLE - 1) == ZONE_HIGHMEM; | |
855 | #endif | |
856 | } | |
857 | #endif | |
858 | ||
859 | static inline int is_highmem_idx(enum zone_type idx) | |
860 | { | |
861 | #ifdef CONFIG_HIGHMEM | |
862 | return (idx == ZONE_HIGHMEM || | |
863 | (idx == ZONE_MOVABLE && zone_movable_is_highmem())); | |
864 | #else | |
865 | return 0; | |
866 | #endif | |
867 | } | |
868 | ||
869 | /** | |
870 | * is_highmem - helper function to quickly check if a struct zone is a | |
871 | * highmem zone or not. This is an attempt to keep references | |
872 | * to ZONE_{DMA/NORMAL/HIGHMEM/etc} in general code to a minimum. | |
873 | * @zone - pointer to struct zone variable | |
874 | */ | |
875 | static inline int is_highmem(struct zone *zone) | |
876 | { | |
877 | #ifdef CONFIG_HIGHMEM | |
878 | return is_highmem_idx(zone_idx(zone)); | |
879 | #else | |
880 | return 0; | |
881 | #endif | |
882 | } | |
883 | ||
884 | /* These two functions are used to setup the per zone pages min values */ | |
885 | struct ctl_table; | |
886 | int min_free_kbytes_sysctl_handler(struct ctl_table *, int, | |
887 | void __user *, size_t *, loff_t *); | |
888 | int watermark_scale_factor_sysctl_handler(struct ctl_table *, int, | |
889 | void __user *, size_t *, loff_t *); | |
890 | extern int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1]; | |
891 | int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *, int, | |
892 | void __user *, size_t *, loff_t *); | |
893 | int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *, int, | |
894 | void __user *, size_t *, loff_t *); | |
895 | int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *, int, | |
896 | void __user *, size_t *, loff_t *); | |
897 | int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *, int, | |
898 | void __user *, size_t *, loff_t *); | |
899 | ||
900 | extern int numa_zonelist_order_handler(struct ctl_table *, int, | |
901 | void __user *, size_t *, loff_t *); | |
902 | extern char numa_zonelist_order[]; | |
903 | #define NUMA_ZONELIST_ORDER_LEN 16 | |
904 | ||
905 | #ifndef CONFIG_NEED_MULTIPLE_NODES | |
906 | ||
907 | extern struct pglist_data contig_page_data; | |
908 | #define NODE_DATA(nid) (&contig_page_data) | |
909 | #define NODE_MEM_MAP(nid) mem_map | |
910 | ||
911 | #else /* CONFIG_NEED_MULTIPLE_NODES */ | |
912 | ||
913 | #include <asm/mmzone.h> | |
914 | ||
915 | #endif /* !CONFIG_NEED_MULTIPLE_NODES */ | |
916 | ||
917 | extern struct pglist_data *first_online_pgdat(void); | |
918 | extern struct pglist_data *next_online_pgdat(struct pglist_data *pgdat); | |
919 | extern struct zone *next_zone(struct zone *zone); | |
920 | ||
921 | /** | |
922 | * for_each_online_pgdat - helper macro to iterate over all online nodes | |
923 | * @pgdat - pointer to a pg_data_t variable | |
924 | */ | |
925 | #define for_each_online_pgdat(pgdat) \ | |
926 | for (pgdat = first_online_pgdat(); \ | |
927 | pgdat; \ | |
928 | pgdat = next_online_pgdat(pgdat)) | |
929 | /** | |
930 | * for_each_zone - helper macro to iterate over all memory zones | |
931 | * @zone - pointer to struct zone variable | |
932 | * | |
933 | * The user only needs to declare the zone variable, for_each_zone | |
934 | * fills it in. | |
935 | */ | |
936 | #define for_each_zone(zone) \ | |
937 | for (zone = (first_online_pgdat())->node_zones; \ | |
938 | zone; \ | |
939 | zone = next_zone(zone)) | |
940 | ||
941 | #define for_each_populated_zone(zone) \ | |
942 | for (zone = (first_online_pgdat())->node_zones; \ | |
943 | zone; \ | |
944 | zone = next_zone(zone)) \ | |
945 | if (!populated_zone(zone)) \ | |
946 | ; /* do nothing */ \ | |
947 | else | |
948 | ||
949 | static inline struct zone *zonelist_zone(struct zoneref *zoneref) | |
950 | { | |
951 | return zoneref->zone; | |
952 | } | |
953 | ||
954 | static inline int zonelist_zone_idx(struct zoneref *zoneref) | |
955 | { | |
956 | return zoneref->zone_idx; | |
957 | } | |
958 | ||
959 | static inline int zonelist_node_idx(struct zoneref *zoneref) | |
960 | { | |
961 | #ifdef CONFIG_NUMA | |
962 | /* zone_to_nid not available in this context */ | |
963 | return zoneref->zone->node; | |
964 | #else | |
965 | return 0; | |
966 | #endif /* CONFIG_NUMA */ | |
967 | } | |
968 | ||
969 | struct zoneref *__next_zones_zonelist(struct zoneref *z, | |
970 | enum zone_type highest_zoneidx, | |
971 | nodemask_t *nodes); | |
972 | ||
973 | /** | |
974 | * next_zones_zonelist - Returns the next zone at or below highest_zoneidx within the allowed nodemask using a cursor within a zonelist as a starting point | |
975 | * @z - The cursor used as a starting point for the search | |
976 | * @highest_zoneidx - The zone index of the highest zone to return | |
977 | * @nodes - An optional nodemask to filter the zonelist with | |
978 | * | |
979 | * This function returns the next zone at or below a given zone index that is | |
980 | * within the allowed nodemask using a cursor as the starting point for the | |
981 | * search. The zoneref returned is a cursor that represents the current zone | |
982 | * being examined. It should be advanced by one before calling | |
983 | * next_zones_zonelist again. | |
984 | */ | |
985 | static __always_inline struct zoneref *next_zones_zonelist(struct zoneref *z, | |
986 | enum zone_type highest_zoneidx, | |
987 | nodemask_t *nodes) | |
988 | { | |
989 | if (likely(!nodes && zonelist_zone_idx(z) <= highest_zoneidx)) | |
990 | return z; | |
991 | return __next_zones_zonelist(z, highest_zoneidx, nodes); | |
992 | } | |
993 | ||
994 | /** | |
995 | * first_zones_zonelist - Returns the first zone at or below highest_zoneidx within the allowed nodemask in a zonelist | |
996 | * @zonelist - The zonelist to search for a suitable zone | |
997 | * @highest_zoneidx - The zone index of the highest zone to return | |
998 | * @nodes - An optional nodemask to filter the zonelist with | |
999 | * @return - Zoneref pointer for the first suitable zone found (see below) | |
1000 | * | |
1001 | * This function returns the first zone at or below a given zone index that is | |
1002 | * within the allowed nodemask. The zoneref returned is a cursor that can be | |
1003 | * used to iterate the zonelist with next_zones_zonelist by advancing it by | |
1004 | * one before calling. | |
1005 | * | |
1006 | * When no eligible zone is found, zoneref->zone is NULL (zoneref itself is | |
1007 | * never NULL). This may happen either genuinely, or due to concurrent nodemask | |
1008 | * update due to cpuset modification. | |
1009 | */ | |
1010 | static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist, | |
1011 | enum zone_type highest_zoneidx, | |
1012 | nodemask_t *nodes) | |
1013 | { | |
1014 | return next_zones_zonelist(zonelist->_zonerefs, | |
1015 | highest_zoneidx, nodes); | |
1016 | } | |
1017 | ||
1018 | /** | |
1019 | * for_each_zone_zonelist_nodemask - helper macro to iterate over valid zones in a zonelist at or below a given zone index and within a nodemask | |
1020 | * @zone - The current zone in the iterator | |
1021 | * @z - The current pointer within zonelist->zones being iterated | |
1022 | * @zlist - The zonelist being iterated | |
1023 | * @highidx - The zone index of the highest zone to return | |
1024 | * @nodemask - Nodemask allowed by the allocator | |
1025 | * | |
1026 | * This iterator iterates though all zones at or below a given zone index and | |
1027 | * within a given nodemask | |
1028 | */ | |
1029 | #define for_each_zone_zonelist_nodemask(zone, z, zlist, highidx, nodemask) \ | |
1030 | for (z = first_zones_zonelist(zlist, highidx, nodemask), zone = zonelist_zone(z); \ | |
1031 | zone; \ | |
1032 | z = next_zones_zonelist(++z, highidx, nodemask), \ | |
1033 | zone = zonelist_zone(z)) | |
1034 | ||
1035 | #define for_next_zone_zonelist_nodemask(zone, z, zlist, highidx, nodemask) \ | |
1036 | for (zone = z->zone; \ | |
1037 | zone; \ | |
1038 | z = next_zones_zonelist(++z, highidx, nodemask), \ | |
1039 | zone = zonelist_zone(z)) | |
1040 | ||
1041 | ||
1042 | /** | |
1043 | * for_each_zone_zonelist - helper macro to iterate over valid zones in a zonelist at or below a given zone index | |
1044 | * @zone - The current zone in the iterator | |
1045 | * @z - The current pointer within zonelist->zones being iterated | |
1046 | * @zlist - The zonelist being iterated | |
1047 | * @highidx - The zone index of the highest zone to return | |
1048 | * | |
1049 | * This iterator iterates though all zones at or below a given zone index. | |
1050 | */ | |
1051 | #define for_each_zone_zonelist(zone, z, zlist, highidx) \ | |
1052 | for_each_zone_zonelist_nodemask(zone, z, zlist, highidx, NULL) | |
1053 | ||
1054 | #ifdef CONFIG_SPARSEMEM | |
1055 | #include <asm/sparsemem.h> | |
1056 | #endif | |
1057 | ||
1058 | #if !defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) && \ | |
1059 | !defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP) | |
1060 | static inline unsigned long early_pfn_to_nid(unsigned long pfn) | |
1061 | { | |
1062 | BUILD_BUG_ON(IS_ENABLED(CONFIG_NUMA)); | |
1063 | return 0; | |
1064 | } | |
1065 | #endif | |
1066 | ||
1067 | #ifdef CONFIG_FLATMEM | |
1068 | #define pfn_to_nid(pfn) (0) | |
1069 | #endif | |
1070 | ||
1071 | #ifdef CONFIG_SPARSEMEM | |
1072 | ||
1073 | /* | |
1074 | * SECTION_SHIFT #bits space required to store a section # | |
1075 | * | |
1076 | * PA_SECTION_SHIFT physical address to/from section number | |
1077 | * PFN_SECTION_SHIFT pfn to/from section number | |
1078 | */ | |
1079 | #define PA_SECTION_SHIFT (SECTION_SIZE_BITS) | |
1080 | #define PFN_SECTION_SHIFT (SECTION_SIZE_BITS - PAGE_SHIFT) | |
1081 | ||
1082 | #define NR_MEM_SECTIONS (1UL << SECTIONS_SHIFT) | |
1083 | ||
1084 | #define PAGES_PER_SECTION (1UL << PFN_SECTION_SHIFT) | |
1085 | #define PAGE_SECTION_MASK (~(PAGES_PER_SECTION-1)) | |
1086 | ||
1087 | #define SECTION_BLOCKFLAGS_BITS \ | |
1088 | ((1UL << (PFN_SECTION_SHIFT - pageblock_order)) * NR_PAGEBLOCK_BITS) | |
1089 | ||
1090 | #if (MAX_ORDER - 1 + PAGE_SHIFT) > SECTION_SIZE_BITS | |
1091 | #error Allocator MAX_ORDER exceeds SECTION_SIZE | |
1092 | #endif | |
1093 | ||
1094 | static inline unsigned long pfn_to_section_nr(unsigned long pfn) | |
1095 | { | |
1096 | return pfn >> PFN_SECTION_SHIFT; | |
1097 | } | |
1098 | static inline unsigned long section_nr_to_pfn(unsigned long sec) | |
1099 | { | |
1100 | return sec << PFN_SECTION_SHIFT; | |
1101 | } | |
1102 | ||
1103 | #define SECTION_ALIGN_UP(pfn) (((pfn) + PAGES_PER_SECTION - 1) & PAGE_SECTION_MASK) | |
1104 | #define SECTION_ALIGN_DOWN(pfn) ((pfn) & PAGE_SECTION_MASK) | |
1105 | ||
1106 | struct page; | |
1107 | struct page_ext; | |
1108 | struct mem_section { | |
1109 | /* | |
1110 | * This is, logically, a pointer to an array of struct | |
1111 | * pages. However, it is stored with some other magic. | |
1112 | * (see sparse.c::sparse_init_one_section()) | |
1113 | * | |
1114 | * Additionally during early boot we encode node id of | |
1115 | * the location of the section here to guide allocation. | |
1116 | * (see sparse.c::memory_present()) | |
1117 | * | |
1118 | * Making it a UL at least makes someone do a cast | |
1119 | * before using it wrong. | |
1120 | */ | |
1121 | unsigned long section_mem_map; | |
1122 | ||
1123 | /* See declaration of similar field in struct zone */ | |
1124 | unsigned long *pageblock_flags; | |
1125 | #ifdef CONFIG_PAGE_EXTENSION | |
1126 | /* | |
1127 | * If SPARSEMEM, pgdat doesn't have page_ext pointer. We use | |
1128 | * section. (see page_ext.h about this.) | |
1129 | */ | |
1130 | struct page_ext *page_ext; | |
1131 | unsigned long pad; | |
1132 | #endif | |
1133 | /* | |
1134 | * WARNING: mem_section must be a power-of-2 in size for the | |
1135 | * calculation and use of SECTION_ROOT_MASK to make sense. | |
1136 | */ | |
1137 | }; | |
1138 | ||
1139 | #ifdef CONFIG_SPARSEMEM_EXTREME | |
1140 | #define SECTIONS_PER_ROOT (PAGE_SIZE / sizeof (struct mem_section)) | |
1141 | #else | |
1142 | #define SECTIONS_PER_ROOT 1 | |
1143 | #endif | |
1144 | ||
1145 | #define SECTION_NR_TO_ROOT(sec) ((sec) / SECTIONS_PER_ROOT) | |
1146 | #define NR_SECTION_ROOTS DIV_ROUND_UP(NR_MEM_SECTIONS, SECTIONS_PER_ROOT) | |
1147 | #define SECTION_ROOT_MASK (SECTIONS_PER_ROOT - 1) | |
1148 | ||
1149 | #ifdef CONFIG_SPARSEMEM_EXTREME | |
1150 | extern struct mem_section **mem_section; | |
1151 | #else | |
1152 | extern struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT]; | |
1153 | #endif | |
1154 | ||
1155 | static inline struct mem_section *__nr_to_section(unsigned long nr) | |
1156 | { | |
1157 | #ifdef CONFIG_SPARSEMEM_EXTREME | |
1158 | if (!mem_section) | |
1159 | return NULL; | |
1160 | #endif | |
1161 | if (!mem_section[SECTION_NR_TO_ROOT(nr)]) | |
1162 | return NULL; | |
1163 | return &mem_section[SECTION_NR_TO_ROOT(nr)][nr & SECTION_ROOT_MASK]; | |
1164 | } | |
1165 | extern int __section_nr(struct mem_section* ms); | |
1166 | extern unsigned long usemap_size(void); | |
1167 | ||
1168 | /* | |
1169 | * We use the lower bits of the mem_map pointer to store | |
1170 | * a little bit of information. There should be at least | |
1171 | * 3 bits here due to 32-bit alignment. | |
1172 | */ | |
1173 | #define SECTION_MARKED_PRESENT (1UL<<0) | |
1174 | #define SECTION_HAS_MEM_MAP (1UL<<1) | |
1175 | #define SECTION_IS_ONLINE (1UL<<2) | |
1176 | #define SECTION_MAP_LAST_BIT (1UL<<3) | |
1177 | #define SECTION_MAP_MASK (~(SECTION_MAP_LAST_BIT-1)) | |
1178 | #define SECTION_NID_SHIFT 3 | |
1179 | ||
1180 | static inline struct page *__section_mem_map_addr(struct mem_section *section) | |
1181 | { | |
1182 | unsigned long map = section->section_mem_map; | |
1183 | map &= SECTION_MAP_MASK; | |
1184 | return (struct page *)map; | |
1185 | } | |
1186 | ||
1187 | static inline int present_section(struct mem_section *section) | |
1188 | { | |
1189 | return (section && (section->section_mem_map & SECTION_MARKED_PRESENT)); | |
1190 | } | |
1191 | ||
1192 | static inline int present_section_nr(unsigned long nr) | |
1193 | { | |
1194 | return present_section(__nr_to_section(nr)); | |
1195 | } | |
1196 | ||
1197 | static inline int valid_section(struct mem_section *section) | |
1198 | { | |
1199 | return (section && (section->section_mem_map & SECTION_HAS_MEM_MAP)); | |
1200 | } | |
1201 | ||
1202 | static inline int valid_section_nr(unsigned long nr) | |
1203 | { | |
1204 | return valid_section(__nr_to_section(nr)); | |
1205 | } | |
1206 | ||
1207 | static inline int online_section(struct mem_section *section) | |
1208 | { | |
1209 | return (section && (section->section_mem_map & SECTION_IS_ONLINE)); | |
1210 | } | |
1211 | ||
1212 | static inline int online_section_nr(unsigned long nr) | |
1213 | { | |
1214 | return online_section(__nr_to_section(nr)); | |
1215 | } | |
1216 | ||
1217 | #ifdef CONFIG_MEMORY_HOTPLUG | |
1218 | void online_mem_sections(unsigned long start_pfn, unsigned long end_pfn); | |
1219 | #ifdef CONFIG_MEMORY_HOTREMOVE | |
1220 | void offline_mem_sections(unsigned long start_pfn, unsigned long end_pfn); | |
1221 | #endif | |
1222 | #endif | |
1223 | ||
1224 | static inline struct mem_section *__pfn_to_section(unsigned long pfn) | |
1225 | { | |
1226 | return __nr_to_section(pfn_to_section_nr(pfn)); | |
1227 | } | |
1228 | ||
1229 | extern int __highest_present_section_nr; | |
1230 | ||
1231 | #ifndef CONFIG_HAVE_ARCH_PFN_VALID | |
1232 | static inline int pfn_valid(unsigned long pfn) | |
1233 | { | |
1234 | if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS) | |
1235 | return 0; | |
1236 | return valid_section(__nr_to_section(pfn_to_section_nr(pfn))); | |
1237 | } | |
1238 | #endif | |
1239 | ||
1240 | static inline int pfn_present(unsigned long pfn) | |
1241 | { | |
1242 | if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS) | |
1243 | return 0; | |
1244 | return present_section(__nr_to_section(pfn_to_section_nr(pfn))); | |
1245 | } | |
1246 | ||
1247 | /* | |
1248 | * These are _only_ used during initialisation, therefore they | |
1249 | * can use __initdata ... They could have names to indicate | |
1250 | * this restriction. | |
1251 | */ | |
1252 | #ifdef CONFIG_NUMA | |
1253 | #define pfn_to_nid(pfn) \ | |
1254 | ({ \ | |
1255 | unsigned long __pfn_to_nid_pfn = (pfn); \ | |
1256 | page_to_nid(pfn_to_page(__pfn_to_nid_pfn)); \ | |
1257 | }) | |
1258 | #else | |
1259 | #define pfn_to_nid(pfn) (0) | |
1260 | #endif | |
1261 | ||
1262 | #define early_pfn_valid(pfn) pfn_valid(pfn) | |
1263 | void sparse_init(void); | |
1264 | #else | |
1265 | #define sparse_init() do {} while (0) | |
1266 | #define sparse_index_init(_sec, _nid) do {} while (0) | |
1267 | #endif /* CONFIG_SPARSEMEM */ | |
1268 | ||
1269 | /* | |
1270 | * During memory init memblocks map pfns to nids. The search is expensive and | |
1271 | * this caches recent lookups. The implementation of __early_pfn_to_nid | |
1272 | * may treat start/end as pfns or sections. | |
1273 | */ | |
1274 | struct mminit_pfnnid_cache { | |
1275 | unsigned long last_start; | |
1276 | unsigned long last_end; | |
1277 | int last_nid; | |
1278 | }; | |
1279 | ||
1280 | #ifndef early_pfn_valid | |
1281 | #define early_pfn_valid(pfn) (1) | |
1282 | #endif | |
1283 | ||
1284 | void memory_present(int nid, unsigned long start, unsigned long end); | |
1285 | unsigned long __init node_memmap_size_bytes(int, unsigned long, unsigned long); | |
1286 | ||
1287 | /* | |
1288 | * If it is possible to have holes within a MAX_ORDER_NR_PAGES, then we | |
1289 | * need to check pfn validility within that MAX_ORDER_NR_PAGES block. | |
1290 | * pfn_valid_within() should be used in this case; we optimise this away | |
1291 | * when we have no holes within a MAX_ORDER_NR_PAGES block. | |
1292 | */ | |
1293 | #ifdef CONFIG_HOLES_IN_ZONE | |
1294 | #define pfn_valid_within(pfn) pfn_valid(pfn) | |
1295 | #else | |
1296 | #define pfn_valid_within(pfn) (1) | |
1297 | #endif | |
1298 | ||
1299 | #ifdef CONFIG_ARCH_HAS_HOLES_MEMORYMODEL | |
1300 | /* | |
1301 | * pfn_valid() is meant to be able to tell if a given PFN has valid memmap | |
1302 | * associated with it or not. This means that a struct page exists for this | |
1303 | * pfn. The caller cannot assume the page is fully initialized in general. | |
1304 | * Hotplugable pages might not have been onlined yet. pfn_to_online_page() | |
1305 | * will ensure the struct page is fully online and initialized. Special pages | |
1306 | * (e.g. ZONE_DEVICE) are never onlined and should be treated accordingly. | |
1307 | * | |
1308 | * In FLATMEM, it is expected that holes always have valid memmap as long as | |
1309 | * there is valid PFNs either side of the hole. In SPARSEMEM, it is assumed | |
1310 | * that a valid section has a memmap for the entire section. | |
1311 | * | |
1312 | * However, an ARM, and maybe other embedded architectures in the future | |
1313 | * free memmap backing holes to save memory on the assumption the memmap is | |
1314 | * never used. The page_zone linkages are then broken even though pfn_valid() | |
1315 | * returns true. A walker of the full memmap must then do this additional | |
1316 | * check to ensure the memmap they are looking at is sane by making sure | |
1317 | * the zone and PFN linkages are still valid. This is expensive, but walkers | |
1318 | * of the full memmap are extremely rare. | |
1319 | */ | |
1320 | bool memmap_valid_within(unsigned long pfn, | |
1321 | struct page *page, struct zone *zone); | |
1322 | #else | |
1323 | static inline bool memmap_valid_within(unsigned long pfn, | |
1324 | struct page *page, struct zone *zone) | |
1325 | { | |
1326 | return true; | |
1327 | } | |
1328 | #endif /* CONFIG_ARCH_HAS_HOLES_MEMORYMODEL */ | |
1329 | ||
1330 | #endif /* !__GENERATING_BOUNDS.H */ | |
1331 | #endif /* !__ASSEMBLY__ */ | |
1332 | #endif /* _LINUX_MMZONE_H */ |