1 #ifndef _LINUX_MMZONE_H
2 #define _LINUX_MMZONE_H
5 #ifndef __GENERATING_BOUNDS_H
7 #include <linux/spinlock.h>
8 #include <linux/list.h>
9 #include <linux/wait.h>
10 #include <linux/bitops.h>
11 #include <linux/cache.h>
12 #include <linux/threads.h>
13 #include <linux/numa.h>
14 #include <linux/init.h>
15 #include <linux/seqlock.h>
16 #include <linux/nodemask.h>
17 #include <linux/pageblock-flags.h>
18 #include <generated/bounds.h>
19 #include <linux/atomic.h>
22 /* Free memory management - zoned buddy allocator. */
23 #ifndef CONFIG_FORCE_MAX_ZONEORDER
26 #define MAX_ORDER CONFIG_FORCE_MAX_ZONEORDER
28 #define MAX_ORDER_NR_PAGES (1 << (MAX_ORDER - 1))
31 * PAGE_ALLOC_COSTLY_ORDER is the order at which allocations are deemed
32 * costly to service. That is between allocation orders which should
33 * coelesce naturally under reasonable reclaim pressure and those which
36 #define PAGE_ALLOC_COSTLY_ORDER 3
38 #define MIGRATE_UNMOVABLE 0
39 #define MIGRATE_RECLAIMABLE 1
40 #define MIGRATE_MOVABLE 2
41 #define MIGRATE_PCPTYPES 3 /* the number of types on the pcp lists */
42 #define MIGRATE_RESERVE 3
43 #define MIGRATE_ISOLATE 4 /* can't allocate from here */
44 #define MIGRATE_TYPES 5
46 #define for_each_migratetype_order(order, type) \
47 for (order = 0; order < MAX_ORDER; order++) \
48 for (type = 0; type < MIGRATE_TYPES; type++)
50 extern int page_group_by_mobility_disabled
;
52 static inline int get_pageblock_migratetype(struct page
*page
)
54 return get_pageblock_flags_group(page
, PB_migrate
, PB_migrate_end
);
58 struct list_head free_list
[MIGRATE_TYPES
];
59 unsigned long nr_free
;
65 * zone->lock and zone->lru_lock are two of the hottest locks in the kernel.
66 * So add a wild amount of padding here to ensure that they fall into separate
67 * cachelines. There are very few zone structures in the machine, so space
68 * consumption is not a concern here.
70 #if defined(CONFIG_SMP)
73 } ____cacheline_internodealigned_in_smp
;
74 #define ZONE_PADDING(name) struct zone_padding name;
76 #define ZONE_PADDING(name)
80 /* First 128 byte cacheline (assuming 64 bit words) */
83 NR_INACTIVE_ANON
= NR_LRU_BASE
, /* must match order of LRU_[IN]ACTIVE */
84 NR_ACTIVE_ANON
, /* " " " " " */
85 NR_INACTIVE_FILE
, /* " " " " " */
86 NR_ACTIVE_FILE
, /* " " " " " */
87 NR_UNEVICTABLE
, /* " " " " " */
88 NR_MLOCK
, /* mlock()ed pages found and moved off LRU */
89 NR_ANON_PAGES
, /* Mapped anonymous pages */
90 NR_FILE_MAPPED
, /* pagecache pages mapped into pagetables.
91 only modified from process context */
96 NR_SLAB_UNRECLAIMABLE
,
97 NR_PAGETABLE
, /* used for pagetables */
99 /* Second 128 byte cacheline */
100 NR_UNSTABLE_NFS
, /* NFS unstable pages */
103 NR_WRITEBACK_TEMP
, /* Writeback using temporary buffers */
104 NR_ISOLATED_ANON
, /* Temporary isolated pages from anon lru */
105 NR_ISOLATED_FILE
, /* Temporary isolated pages from file lru */
106 NR_SHMEM
, /* shmem pages (included tmpfs/GEM pages) */
107 NR_DIRTIED
, /* page dirtyings since bootup */
108 NR_WRITTEN
, /* page writings since bootup */
110 NUMA_HIT
, /* allocated in intended node */
111 NUMA_MISS
, /* allocated in non intended node */
112 NUMA_FOREIGN
, /* was intended here, hit elsewhere */
113 NUMA_INTERLEAVE_HIT
, /* interleaver preferred this zone */
114 NUMA_LOCAL
, /* allocation from local node */
115 NUMA_OTHER
, /* allocation from other node */
117 NR_ANON_TRANSPARENT_HUGEPAGES
,
118 NR_VM_ZONE_STAT_ITEMS
};
121 * We do arithmetic on the LRU lists in various places in the code,
122 * so it is important to keep the active lists LRU_ACTIVE higher in
123 * the array than the corresponding inactive lists, and to keep
124 * the *_FILE lists LRU_FILE higher than the corresponding _ANON lists.
126 * This has to be kept in sync with the statistics in zone_stat_item
127 * above and the descriptions in vmstat_text in mm/vmstat.c
134 LRU_INACTIVE_ANON
= LRU_BASE
,
135 LRU_ACTIVE_ANON
= LRU_BASE
+ LRU_ACTIVE
,
136 LRU_INACTIVE_FILE
= LRU_BASE
+ LRU_FILE
,
137 LRU_ACTIVE_FILE
= LRU_BASE
+ LRU_FILE
+ LRU_ACTIVE
,
142 #define for_each_lru(l) for (l = 0; l < NR_LRU_LISTS; l++)
144 #define for_each_evictable_lru(l) for (l = 0; l <= LRU_ACTIVE_FILE; l++)
146 static inline int is_file_lru(enum lru_list l
)
148 return (l
== LRU_INACTIVE_FILE
|| l
== LRU_ACTIVE_FILE
);
151 static inline int is_active_lru(enum lru_list l
)
153 return (l
== LRU_ACTIVE_ANON
|| l
== LRU_ACTIVE_FILE
);
156 static inline int is_unevictable_lru(enum lru_list l
)
158 return (l
== LRU_UNEVICTABLE
);
161 /* Mask used at gathering information at once (see memcontrol.c) */
162 #define LRU_ALL_FILE (BIT(LRU_INACTIVE_FILE) | BIT(LRU_ACTIVE_FILE))
163 #define LRU_ALL_ANON (BIT(LRU_INACTIVE_ANON) | BIT(LRU_ACTIVE_ANON))
164 #define LRU_ALL_EVICTABLE (LRU_ALL_FILE | LRU_ALL_ANON)
165 #define LRU_ALL ((1 << NR_LRU_LISTS) - 1)
167 /* Isolate inactive pages */
168 #define ISOLATE_INACTIVE ((__force isolate_mode_t)0x1)
169 /* Isolate active pages */
170 #define ISOLATE_ACTIVE ((__force isolate_mode_t)0x2)
171 /* Isolate clean file */
172 #define ISOLATE_CLEAN ((__force isolate_mode_t)0x4)
174 /* LRU Isolation modes. */
175 typedef unsigned __bitwise__ isolate_mode_t
;
177 enum zone_watermarks
{
184 #define min_wmark_pages(z) (z->watermark[WMARK_MIN])
185 #define low_wmark_pages(z) (z->watermark[WMARK_LOW])
186 #define high_wmark_pages(z) (z->watermark[WMARK_HIGH])
188 struct per_cpu_pages
{
189 int count
; /* number of pages in the list */
190 int high
; /* high watermark, emptying needed */
191 int batch
; /* chunk size for buddy add/remove */
193 /* Lists of pages, one per migrate type stored on the pcp-lists */
194 struct list_head lists
[MIGRATE_PCPTYPES
];
197 struct per_cpu_pageset
{
198 struct per_cpu_pages pcp
;
204 s8 vm_stat_diff
[NR_VM_ZONE_STAT_ITEMS
];
208 #endif /* !__GENERATING_BOUNDS.H */
211 #ifdef CONFIG_ZONE_DMA
213 * ZONE_DMA is used when there are devices that are not able
214 * to do DMA to all of addressable memory (ZONE_NORMAL). Then we
215 * carve out the portion of memory that is needed for these devices.
216 * The range is arch specific.
221 * ---------------------------
222 * parisc, ia64, sparc <4G
225 * alpha Unlimited or 0-16MB.
227 * i386, x86_64 and multiple other arches
232 #ifdef CONFIG_ZONE_DMA32
234 * x86_64 needs two ZONE_DMAs because it supports devices that are
235 * only able to do DMA to the lower 16M but also 32 bit devices that
236 * can only do DMA areas below 4G.
241 * Normal addressable memory is in ZONE_NORMAL. DMA operations can be
242 * performed on pages in ZONE_NORMAL if the DMA devices support
243 * transfers to all addressable memory.
246 #ifdef CONFIG_HIGHMEM
248 * A memory area that is only addressable by the kernel through
249 * mapping portions into its own address space. This is for example
250 * used by i386 to allow the kernel to address the memory beyond
251 * 900MB. The kernel will set up special mappings (page
252 * table entries on i386) for each page that the kernel needs to
261 #ifndef __GENERATING_BOUNDS_H
264 * When a memory allocation must conform to specific limitations (such
265 * as being suitable for DMA) the caller will pass in hints to the
266 * allocator in the gfp_mask, in the zone modifier bits. These bits
267 * are used to select a priority ordered list of memory zones which
268 * match the requested limits. See gfp_zone() in include/linux/gfp.h
272 #define ZONES_SHIFT 0
273 #elif MAX_NR_ZONES <= 2
274 #define ZONES_SHIFT 1
275 #elif MAX_NR_ZONES <= 4
276 #define ZONES_SHIFT 2
278 #error ZONES_SHIFT -- too many zones configured adjust calculation
281 struct zone_reclaim_stat
{
283 * The pageout code in vmscan.c keeps track of how many of the
284 * mem/swap backed and file backed pages are refeferenced.
285 * The higher the rotated/scanned ratio, the more valuable
288 * The anon LRU stats live in [0], file LRU stats in [1]
290 unsigned long recent_rotated
[2];
291 unsigned long recent_scanned
[2];
295 /* Fields commonly accessed by the page allocator */
297 /* zone watermarks, access with *_wmark_pages(zone) macros */
298 unsigned long watermark
[NR_WMARK
];
301 * When free pages are below this point, additional steps are taken
302 * when reading the number of free pages to avoid per-cpu counter
303 * drift allowing watermarks to be breached
305 unsigned long percpu_drift_mark
;
308 * We don't know if the memory that we're going to allocate will be freeable
309 * or/and it will be released eventually, so to avoid totally wasting several
310 * GB of ram we must reserve some of the lower zone memory (otherwise we risk
311 * to run OOM on the lower zones despite there's tons of freeable ram
312 * on the higher zones). This array is recalculated at runtime if the
313 * sysctl_lowmem_reserve_ratio sysctl changes.
315 unsigned long lowmem_reserve
[MAX_NR_ZONES
];
320 * zone reclaim becomes active if more unmapped pages exist.
322 unsigned long min_unmapped_pages
;
323 unsigned long min_slab_pages
;
325 struct per_cpu_pageset __percpu
*pageset
;
327 * free areas of different sizes
330 int all_unreclaimable
; /* All pages pinned */
331 #ifdef CONFIG_MEMORY_HOTPLUG
332 /* see spanned/present_pages for more description */
333 seqlock_t span_seqlock
;
335 struct free_area free_area
[MAX_ORDER
];
337 #ifndef CONFIG_SPARSEMEM
339 * Flags for a pageblock_nr_pages block. See pageblock-flags.h.
340 * In SPARSEMEM, this map is stored in struct mem_section
342 unsigned long *pageblock_flags
;
343 #endif /* CONFIG_SPARSEMEM */
345 #ifdef CONFIG_COMPACTION
347 * On compaction failure, 1<<compact_defer_shift compactions
348 * are skipped before trying again. The number attempted since
349 * last failure is tracked with compact_considered.
351 unsigned int compact_considered
;
352 unsigned int compact_defer_shift
;
357 /* Fields commonly accessed by the page reclaim scanner */
360 struct list_head list
;
363 struct zone_reclaim_stat reclaim_stat
;
365 unsigned long pages_scanned
; /* since last reclaim */
366 unsigned long flags
; /* zone flags, see below */
368 /* Zone statistics */
369 atomic_long_t vm_stat
[NR_VM_ZONE_STAT_ITEMS
];
372 * The target ratio of ACTIVE_ANON to INACTIVE_ANON pages on
373 * this zone's LRU. Maintained by the pageout code.
375 unsigned int inactive_ratio
;
379 /* Rarely used or read-mostly fields */
382 * wait_table -- the array holding the hash table
383 * wait_table_hash_nr_entries -- the size of the hash table array
384 * wait_table_bits -- wait_table_size == (1 << wait_table_bits)
386 * The purpose of all these is to keep track of the people
387 * waiting for a page to become available and make them
388 * runnable again when possible. The trouble is that this
389 * consumes a lot of space, especially when so few things
390 * wait on pages at a given time. So instead of using
391 * per-page waitqueues, we use a waitqueue hash table.
393 * The bucket discipline is to sleep on the same queue when
394 * colliding and wake all in that wait queue when removing.
395 * When something wakes, it must check to be sure its page is
396 * truly available, a la thundering herd. The cost of a
397 * collision is great, but given the expected load of the
398 * table, they should be so rare as to be outweighed by the
399 * benefits from the saved space.
401 * __wait_on_page_locked() and unlock_page() in mm/filemap.c, are the
402 * primary users of these fields, and in mm/page_alloc.c
403 * free_area_init_core() performs the initialization of them.
405 wait_queue_head_t
* wait_table
;
406 unsigned long wait_table_hash_nr_entries
;
407 unsigned long wait_table_bits
;
410 * Discontig memory support fields.
412 struct pglist_data
*zone_pgdat
;
413 /* zone_start_pfn == zone_start_paddr >> PAGE_SHIFT */
414 unsigned long zone_start_pfn
;
417 * zone_start_pfn, spanned_pages and present_pages are all
418 * protected by span_seqlock. It is a seqlock because it has
419 * to be read outside of zone->lock, and it is done in the main
420 * allocator path. But, it is written quite infrequently.
422 * The lock is declared along with zone->lock because it is
423 * frequently read in proximity to zone->lock. It's good to
424 * give them a chance of being in the same cacheline.
426 unsigned long spanned_pages
; /* total size, including holes */
427 unsigned long present_pages
; /* amount of memory (excluding holes) */
430 * rarely used fields:
433 } ____cacheline_internodealigned_in_smp
;
436 ZONE_RECLAIM_LOCKED
, /* prevents concurrent reclaim */
437 ZONE_OOM_LOCKED
, /* zone is in OOM killer zonelist */
438 ZONE_CONGESTED
, /* zone has many dirty pages backed by
443 static inline void zone_set_flag(struct zone
*zone
, zone_flags_t flag
)
445 set_bit(flag
, &zone
->flags
);
448 static inline int zone_test_and_set_flag(struct zone
*zone
, zone_flags_t flag
)
450 return test_and_set_bit(flag
, &zone
->flags
);
453 static inline void zone_clear_flag(struct zone
*zone
, zone_flags_t flag
)
455 clear_bit(flag
, &zone
->flags
);
458 static inline int zone_is_reclaim_congested(const struct zone
*zone
)
460 return test_bit(ZONE_CONGESTED
, &zone
->flags
);
463 static inline int zone_is_reclaim_locked(const struct zone
*zone
)
465 return test_bit(ZONE_RECLAIM_LOCKED
, &zone
->flags
);
468 static inline int zone_is_oom_locked(const struct zone
*zone
)
470 return test_bit(ZONE_OOM_LOCKED
, &zone
->flags
);
474 * The "priority" of VM scanning is how much of the queues we will scan in one
475 * go. A value of 12 for DEF_PRIORITY implies that we will scan 1/4096th of the
476 * queues ("queue_length >> 12") during an aging round.
478 #define DEF_PRIORITY 12
480 /* Maximum number of zones on a zonelist */
481 #define MAX_ZONES_PER_ZONELIST (MAX_NUMNODES * MAX_NR_ZONES)
486 * The NUMA zonelists are doubled because we need zonelists that restrict the
487 * allocations to a single node for GFP_THISNODE.
489 * [0] : Zonelist with fallback
490 * [1] : No fallback (GFP_THISNODE)
492 #define MAX_ZONELISTS 2
496 * We cache key information from each zonelist for smaller cache
497 * footprint when scanning for free pages in get_page_from_freelist().
499 * 1) The BITMAP fullzones tracks which zones in a zonelist have come
500 * up short of free memory since the last time (last_fullzone_zap)
501 * we zero'd fullzones.
502 * 2) The array z_to_n[] maps each zone in the zonelist to its node
503 * id, so that we can efficiently evaluate whether that node is
504 * set in the current tasks mems_allowed.
506 * Both fullzones and z_to_n[] are one-to-one with the zonelist,
507 * indexed by a zones offset in the zonelist zones[] array.
509 * The get_page_from_freelist() routine does two scans. During the
510 * first scan, we skip zones whose corresponding bit in 'fullzones'
511 * is set or whose corresponding node in current->mems_allowed (which
512 * comes from cpusets) is not set. During the second scan, we bypass
513 * this zonelist_cache, to ensure we look methodically at each zone.
515 * Once per second, we zero out (zap) fullzones, forcing us to
516 * reconsider nodes that might have regained more free memory.
517 * The field last_full_zap is the time we last zapped fullzones.
519 * This mechanism reduces the amount of time we waste repeatedly
520 * reexaming zones for free memory when they just came up low on
521 * memory momentarilly ago.
523 * The zonelist_cache struct members logically belong in struct
524 * zonelist. However, the mempolicy zonelists constructed for
525 * MPOL_BIND are intentionally variable length (and usually much
526 * shorter). A general purpose mechanism for handling structs with
527 * multiple variable length members is more mechanism than we want
528 * here. We resort to some special case hackery instead.
530 * The MPOL_BIND zonelists don't need this zonelist_cache (in good
531 * part because they are shorter), so we put the fixed length stuff
532 * at the front of the zonelist struct, ending in a variable length
533 * zones[], as is needed by MPOL_BIND.
535 * Then we put the optional zonelist cache on the end of the zonelist
536 * struct. This optional stuff is found by a 'zlcache_ptr' pointer in
537 * the fixed length portion at the front of the struct. This pointer
538 * both enables us to find the zonelist cache, and in the case of
539 * MPOL_BIND zonelists, (which will just set the zlcache_ptr to NULL)
540 * to know that the zonelist cache is not there.
542 * The end result is that struct zonelists come in two flavors:
543 * 1) The full, fixed length version, shown below, and
544 * 2) The custom zonelists for MPOL_BIND.
545 * The custom MPOL_BIND zonelists have a NULL zlcache_ptr and no zlcache.
547 * Even though there may be multiple CPU cores on a node modifying
548 * fullzones or last_full_zap in the same zonelist_cache at the same
549 * time, we don't lock it. This is just hint data - if it is wrong now
550 * and then, the allocator will still function, perhaps a bit slower.
554 struct zonelist_cache
{
555 unsigned short z_to_n
[MAX_ZONES_PER_ZONELIST
]; /* zone->nid */
556 DECLARE_BITMAP(fullzones
, MAX_ZONES_PER_ZONELIST
); /* zone full? */
557 unsigned long last_full_zap
; /* when last zap'd (jiffies) */
560 #define MAX_ZONELISTS 1
561 struct zonelist_cache
;
565 * This struct contains information about a zone in a zonelist. It is stored
566 * here to avoid dereferences into large structures and lookups of tables
569 struct zone
*zone
; /* Pointer to actual zone */
570 int zone_idx
; /* zone_idx(zoneref->zone) */
574 * One allocation request operates on a zonelist. A zonelist
575 * is a list of zones, the first one is the 'goal' of the
576 * allocation, the other zones are fallback zones, in decreasing
579 * If zlcache_ptr is not NULL, then it is just the address of zlcache,
580 * as explained above. If zlcache_ptr is NULL, there is no zlcache.
582 * To speed the reading of the zonelist, the zonerefs contain the zone index
583 * of the entry being read. Helper functions to access information given
584 * a struct zoneref are
586 * zonelist_zone() - Return the struct zone * for an entry in _zonerefs
587 * zonelist_zone_idx() - Return the index of the zone for an entry
588 * zonelist_node_idx() - Return the index of the node for an entry
591 struct zonelist_cache
*zlcache_ptr
; // NULL or &zlcache
592 struct zoneref _zonerefs
[MAX_ZONES_PER_ZONELIST
+ 1];
594 struct zonelist_cache zlcache
; // optional ...
598 #ifdef CONFIG_ARCH_POPULATES_NODE_MAP
599 struct node_active_region
{
600 unsigned long start_pfn
;
601 unsigned long end_pfn
;
604 #endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
606 #ifndef CONFIG_DISCONTIGMEM
607 /* The array of struct pages - for discontigmem use pgdat->lmem_map */
608 extern struct page
*mem_map
;
612 * The pg_data_t structure is used in machines with CONFIG_DISCONTIGMEM
613 * (mostly NUMA machines?) to denote a higher-level memory zone than the
616 * On NUMA machines, each NUMA node would have a pg_data_t to describe
617 * it's memory layout.
619 * Memory statistics and page replacement data structures are maintained on a
623 typedef struct pglist_data
{
624 struct zone node_zones
[MAX_NR_ZONES
];
625 struct zonelist node_zonelists
[MAX_ZONELISTS
];
627 #ifdef CONFIG_FLAT_NODE_MEM_MAP /* means !SPARSEMEM */
628 struct page
*node_mem_map
;
629 #ifdef CONFIG_CGROUP_MEM_RES_CTLR
630 struct page_cgroup
*node_page_cgroup
;
633 #ifndef CONFIG_NO_BOOTMEM
634 struct bootmem_data
*bdata
;
636 #ifdef CONFIG_MEMORY_HOTPLUG
638 * Must be held any time you expect node_start_pfn, node_present_pages
639 * or node_spanned_pages stay constant. Holding this will also
640 * guarantee that any pfn_valid() stays that way.
642 * Nests above zone->lock and zone->size_seqlock.
644 spinlock_t node_size_lock
;
646 unsigned long node_start_pfn
;
647 unsigned long node_present_pages
; /* total number of physical pages */
648 unsigned long node_spanned_pages
; /* total size of physical page
649 range, including holes */
651 wait_queue_head_t kswapd_wait
;
652 struct task_struct
*kswapd
;
653 int kswapd_max_order
;
654 enum zone_type classzone_idx
;
657 #define node_present_pages(nid) (NODE_DATA(nid)->node_present_pages)
658 #define node_spanned_pages(nid) (NODE_DATA(nid)->node_spanned_pages)
659 #ifdef CONFIG_FLAT_NODE_MEM_MAP
660 #define pgdat_page_nr(pgdat, pagenr) ((pgdat)->node_mem_map + (pagenr))
662 #define pgdat_page_nr(pgdat, pagenr) pfn_to_page((pgdat)->node_start_pfn + (pagenr))
664 #define nid_page_nr(nid, pagenr) pgdat_page_nr(NODE_DATA(nid),(pagenr))
666 #define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn)
668 #define node_end_pfn(nid) ({\
669 pg_data_t *__pgdat = NODE_DATA(nid);\
670 __pgdat->node_start_pfn + __pgdat->node_spanned_pages;\
673 #include <linux/memory_hotplug.h>
675 extern struct mutex zonelists_mutex
;
676 void build_all_zonelists(void *data
);
677 void wakeup_kswapd(struct zone
*zone
, int order
, enum zone_type classzone_idx
);
678 bool zone_watermark_ok(struct zone
*z
, int order
, unsigned long mark
,
679 int classzone_idx
, int alloc_flags
);
680 bool zone_watermark_ok_safe(struct zone
*z
, int order
, unsigned long mark
,
681 int classzone_idx
, int alloc_flags
);
682 enum memmap_context
{
686 extern int init_currently_empty_zone(struct zone
*zone
, unsigned long start_pfn
,
688 enum memmap_context context
);
690 #ifdef CONFIG_HAVE_MEMORY_PRESENT
691 void memory_present(int nid
, unsigned long start
, unsigned long end
);
693 static inline void memory_present(int nid
, unsigned long start
, unsigned long end
) {}
696 #ifdef CONFIG_HAVE_MEMORYLESS_NODES
697 int local_memory_node(int node_id
);
699 static inline int local_memory_node(int node_id
) { return node_id
; };
702 #ifdef CONFIG_NEED_NODE_MEMMAP_SIZE
703 unsigned long __init
node_memmap_size_bytes(int, unsigned long, unsigned long);
707 * zone_idx() returns 0 for the ZONE_DMA zone, 1 for the ZONE_NORMAL zone, etc.
709 #define zone_idx(zone) ((zone) - (zone)->zone_pgdat->node_zones)
711 static inline int populated_zone(struct zone
*zone
)
713 return (!!zone
->present_pages
);
716 extern int movable_zone
;
718 static inline int zone_movable_is_highmem(void)
720 #if defined(CONFIG_HIGHMEM) && defined(CONFIG_ARCH_POPULATES_NODE_MAP)
721 return movable_zone
== ZONE_HIGHMEM
;
727 static inline int is_highmem_idx(enum zone_type idx
)
729 #ifdef CONFIG_HIGHMEM
730 return (idx
== ZONE_HIGHMEM
||
731 (idx
== ZONE_MOVABLE
&& zone_movable_is_highmem()));
737 static inline int is_normal_idx(enum zone_type idx
)
739 return (idx
== ZONE_NORMAL
);
743 * is_highmem - helper function to quickly check if a struct zone is a
744 * highmem zone or not. This is an attempt to keep references
745 * to ZONE_{DMA/NORMAL/HIGHMEM/etc} in general code to a minimum.
746 * @zone - pointer to struct zone variable
748 static inline int is_highmem(struct zone
*zone
)
750 #ifdef CONFIG_HIGHMEM
751 int zone_off
= (char *)zone
- (char *)zone
->zone_pgdat
->node_zones
;
752 return zone_off
== ZONE_HIGHMEM
* sizeof(*zone
) ||
753 (zone_off
== ZONE_MOVABLE
* sizeof(*zone
) &&
754 zone_movable_is_highmem());
760 static inline int is_normal(struct zone
*zone
)
762 return zone
== zone
->zone_pgdat
->node_zones
+ ZONE_NORMAL
;
765 static inline int is_dma32(struct zone
*zone
)
767 #ifdef CONFIG_ZONE_DMA32
768 return zone
== zone
->zone_pgdat
->node_zones
+ ZONE_DMA32
;
774 static inline int is_dma(struct zone
*zone
)
776 #ifdef CONFIG_ZONE_DMA
777 return zone
== zone
->zone_pgdat
->node_zones
+ ZONE_DMA
;
783 /* These two functions are used to setup the per zone pages min values */
785 int min_free_kbytes_sysctl_handler(struct ctl_table
*, int,
786 void __user
*, size_t *, loff_t
*);
787 extern int sysctl_lowmem_reserve_ratio
[MAX_NR_ZONES
-1];
788 int lowmem_reserve_ratio_sysctl_handler(struct ctl_table
*, int,
789 void __user
*, size_t *, loff_t
*);
790 int percpu_pagelist_fraction_sysctl_handler(struct ctl_table
*, int,
791 void __user
*, size_t *, loff_t
*);
792 int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table
*, int,
793 void __user
*, size_t *, loff_t
*);
794 int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table
*, int,
795 void __user
*, size_t *, loff_t
*);
797 extern int numa_zonelist_order_handler(struct ctl_table
*, int,
798 void __user
*, size_t *, loff_t
*);
799 extern char numa_zonelist_order
[];
800 #define NUMA_ZONELIST_ORDER_LEN 16 /* string buffer size */
802 #ifndef CONFIG_NEED_MULTIPLE_NODES
804 extern struct pglist_data contig_page_data
;
805 #define NODE_DATA(nid) (&contig_page_data)
806 #define NODE_MEM_MAP(nid) mem_map
808 #else /* CONFIG_NEED_MULTIPLE_NODES */
810 #include <asm/mmzone.h>
812 #endif /* !CONFIG_NEED_MULTIPLE_NODES */
814 extern struct pglist_data
*first_online_pgdat(void);
815 extern struct pglist_data
*next_online_pgdat(struct pglist_data
*pgdat
);
816 extern struct zone
*next_zone(struct zone
*zone
);
819 * for_each_online_pgdat - helper macro to iterate over all online nodes
820 * @pgdat - pointer to a pg_data_t variable
822 #define for_each_online_pgdat(pgdat) \
823 for (pgdat = first_online_pgdat(); \
825 pgdat = next_online_pgdat(pgdat))
827 * for_each_zone - helper macro to iterate over all memory zones
828 * @zone - pointer to struct zone variable
830 * The user only needs to declare the zone variable, for_each_zone
833 #define for_each_zone(zone) \
834 for (zone = (first_online_pgdat())->node_zones; \
836 zone = next_zone(zone))
838 #define for_each_populated_zone(zone) \
839 for (zone = (first_online_pgdat())->node_zones; \
841 zone = next_zone(zone)) \
842 if (!populated_zone(zone)) \
846 static inline struct zone
*zonelist_zone(struct zoneref
*zoneref
)
848 return zoneref
->zone
;
851 static inline int zonelist_zone_idx(struct zoneref
*zoneref
)
853 return zoneref
->zone_idx
;
856 static inline int zonelist_node_idx(struct zoneref
*zoneref
)
859 /* zone_to_nid not available in this context */
860 return zoneref
->zone
->node
;
863 #endif /* CONFIG_NUMA */
867 * next_zones_zonelist - Returns the next zone at or below highest_zoneidx within the allowed nodemask using a cursor within a zonelist as a starting point
868 * @z - The cursor used as a starting point for the search
869 * @highest_zoneidx - The zone index of the highest zone to return
870 * @nodes - An optional nodemask to filter the zonelist with
871 * @zone - The first suitable zone found is returned via this parameter
873 * This function returns the next zone at or below a given zone index that is
874 * within the allowed nodemask using a cursor as the starting point for the
875 * search. The zoneref returned is a cursor that represents the current zone
876 * being examined. It should be advanced by one before calling
877 * next_zones_zonelist again.
879 struct zoneref
*next_zones_zonelist(struct zoneref
*z
,
880 enum zone_type highest_zoneidx
,
885 * first_zones_zonelist - Returns the first zone at or below highest_zoneidx within the allowed nodemask in a zonelist
886 * @zonelist - The zonelist to search for a suitable zone
887 * @highest_zoneidx - The zone index of the highest zone to return
888 * @nodes - An optional nodemask to filter the zonelist with
889 * @zone - The first suitable zone found is returned via this parameter
891 * This function returns the first zone at or below a given zone index that is
892 * within the allowed nodemask. The zoneref returned is a cursor that can be
893 * used to iterate the zonelist with next_zones_zonelist by advancing it by
894 * one before calling.
896 static inline struct zoneref
*first_zones_zonelist(struct zonelist
*zonelist
,
897 enum zone_type highest_zoneidx
,
901 return next_zones_zonelist(zonelist
->_zonerefs
, highest_zoneidx
, nodes
,
906 * for_each_zone_zonelist_nodemask - helper macro to iterate over valid zones in a zonelist at or below a given zone index and within a nodemask
907 * @zone - The current zone in the iterator
908 * @z - The current pointer within zonelist->zones being iterated
909 * @zlist - The zonelist being iterated
910 * @highidx - The zone index of the highest zone to return
911 * @nodemask - Nodemask allowed by the allocator
913 * This iterator iterates though all zones at or below a given zone index and
914 * within a given nodemask
916 #define for_each_zone_zonelist_nodemask(zone, z, zlist, highidx, nodemask) \
917 for (z = first_zones_zonelist(zlist, highidx, nodemask, &zone); \
919 z = next_zones_zonelist(++z, highidx, nodemask, &zone)) \
922 * for_each_zone_zonelist - helper macro to iterate over valid zones in a zonelist at or below a given zone index
923 * @zone - The current zone in the iterator
924 * @z - The current pointer within zonelist->zones being iterated
925 * @zlist - The zonelist being iterated
926 * @highidx - The zone index of the highest zone to return
928 * This iterator iterates though all zones at or below a given zone index.
930 #define for_each_zone_zonelist(zone, z, zlist, highidx) \
931 for_each_zone_zonelist_nodemask(zone, z, zlist, highidx, NULL)
933 #ifdef CONFIG_SPARSEMEM
934 #include <asm/sparsemem.h>
937 #if !defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) && \
938 !defined(CONFIG_ARCH_POPULATES_NODE_MAP)
939 static inline unsigned long early_pfn_to_nid(unsigned long pfn
)
945 #ifdef CONFIG_FLATMEM
946 #define pfn_to_nid(pfn) (0)
949 #ifdef CONFIG_SPARSEMEM
952 * SECTION_SHIFT #bits space required to store a section #
954 * PA_SECTION_SHIFT physical address to/from section number
955 * PFN_SECTION_SHIFT pfn to/from section number
957 #define SECTIONS_SHIFT (MAX_PHYSMEM_BITS - SECTION_SIZE_BITS)
959 #define PA_SECTION_SHIFT (SECTION_SIZE_BITS)
960 #define PFN_SECTION_SHIFT (SECTION_SIZE_BITS - PAGE_SHIFT)
962 #define NR_MEM_SECTIONS (1UL << SECTIONS_SHIFT)
964 #define PAGES_PER_SECTION (1UL << PFN_SECTION_SHIFT)
965 #define PAGE_SECTION_MASK (~(PAGES_PER_SECTION-1))
967 #define SECTION_BLOCKFLAGS_BITS \
968 ((1UL << (PFN_SECTION_SHIFT - pageblock_order)) * NR_PAGEBLOCK_BITS)
970 #if (MAX_ORDER - 1 + PAGE_SHIFT) > SECTION_SIZE_BITS
971 #error Allocator MAX_ORDER exceeds SECTION_SIZE
974 #define pfn_to_section_nr(pfn) ((pfn) >> PFN_SECTION_SHIFT)
975 #define section_nr_to_pfn(sec) ((sec) << PFN_SECTION_SHIFT)
977 #define SECTION_ALIGN_UP(pfn) (((pfn) + PAGES_PER_SECTION - 1) & PAGE_SECTION_MASK)
978 #define SECTION_ALIGN_DOWN(pfn) ((pfn) & PAGE_SECTION_MASK)
984 * This is, logically, a pointer to an array of struct
985 * pages. However, it is stored with some other magic.
986 * (see sparse.c::sparse_init_one_section())
988 * Additionally during early boot we encode node id of
989 * the location of the section here to guide allocation.
990 * (see sparse.c::memory_present())
992 * Making it a UL at least makes someone do a cast
993 * before using it wrong.
995 unsigned long section_mem_map
;
997 /* See declaration of similar field in struct zone */
998 unsigned long *pageblock_flags
;
999 #ifdef CONFIG_CGROUP_MEM_RES_CTLR
1001 * If !SPARSEMEM, pgdat doesn't have page_cgroup pointer. We use
1002 * section. (see memcontrol.h/page_cgroup.h about this.)
1004 struct page_cgroup
*page_cgroup
;
1009 #ifdef CONFIG_SPARSEMEM_EXTREME
1010 #define SECTIONS_PER_ROOT (PAGE_SIZE / sizeof (struct mem_section))
1012 #define SECTIONS_PER_ROOT 1
1015 #define SECTION_NR_TO_ROOT(sec) ((sec) / SECTIONS_PER_ROOT)
1016 #define NR_SECTION_ROOTS DIV_ROUND_UP(NR_MEM_SECTIONS, SECTIONS_PER_ROOT)
1017 #define SECTION_ROOT_MASK (SECTIONS_PER_ROOT - 1)
1019 #ifdef CONFIG_SPARSEMEM_EXTREME
1020 extern struct mem_section
*mem_section
[NR_SECTION_ROOTS
];
1022 extern struct mem_section mem_section
[NR_SECTION_ROOTS
][SECTIONS_PER_ROOT
];
1025 static inline struct mem_section
*__nr_to_section(unsigned long nr
)
1027 if (!mem_section
[SECTION_NR_TO_ROOT(nr
)])
1029 return &mem_section
[SECTION_NR_TO_ROOT(nr
)][nr
& SECTION_ROOT_MASK
];
1031 extern int __section_nr(struct mem_section
* ms
);
1032 extern unsigned long usemap_size(void);
1035 * We use the lower bits of the mem_map pointer to store
1036 * a little bit of information. There should be at least
1037 * 3 bits here due to 32-bit alignment.
1039 #define SECTION_MARKED_PRESENT (1UL<<0)
1040 #define SECTION_HAS_MEM_MAP (1UL<<1)
1041 #define SECTION_MAP_LAST_BIT (1UL<<2)
1042 #define SECTION_MAP_MASK (~(SECTION_MAP_LAST_BIT-1))
1043 #define SECTION_NID_SHIFT 2
1045 static inline struct page
*__section_mem_map_addr(struct mem_section
*section
)
1047 unsigned long map
= section
->section_mem_map
;
1048 map
&= SECTION_MAP_MASK
;
1049 return (struct page
*)map
;
1052 static inline int present_section(struct mem_section
*section
)
1054 return (section
&& (section
->section_mem_map
& SECTION_MARKED_PRESENT
));
1057 static inline int present_section_nr(unsigned long nr
)
1059 return present_section(__nr_to_section(nr
));
1062 static inline int valid_section(struct mem_section
*section
)
1064 return (section
&& (section
->section_mem_map
& SECTION_HAS_MEM_MAP
));
1067 static inline int valid_section_nr(unsigned long nr
)
1069 return valid_section(__nr_to_section(nr
));
1072 static inline struct mem_section
*__pfn_to_section(unsigned long pfn
)
1074 return __nr_to_section(pfn_to_section_nr(pfn
));
1077 #ifndef CONFIG_HAVE_ARCH_PFN_VALID
1078 static inline int pfn_valid(unsigned long pfn
)
1080 if (pfn_to_section_nr(pfn
) >= NR_MEM_SECTIONS
)
1082 return valid_section(__nr_to_section(pfn_to_section_nr(pfn
)));
1086 static inline int pfn_present(unsigned long pfn
)
1088 if (pfn_to_section_nr(pfn
) >= NR_MEM_SECTIONS
)
1090 return present_section(__nr_to_section(pfn_to_section_nr(pfn
)));
1094 * These are _only_ used during initialisation, therefore they
1095 * can use __initdata ... They could have names to indicate
1099 #define pfn_to_nid(pfn) \
1101 unsigned long __pfn_to_nid_pfn = (pfn); \
1102 page_to_nid(pfn_to_page(__pfn_to_nid_pfn)); \
1105 #define pfn_to_nid(pfn) (0)
1108 #define early_pfn_valid(pfn) pfn_valid(pfn)
1109 void sparse_init(void);
1111 #define sparse_init() do {} while (0)
1112 #define sparse_index_init(_sec, _nid) do {} while (0)
1113 #endif /* CONFIG_SPARSEMEM */
1115 #ifdef CONFIG_NODES_SPAN_OTHER_NODES
1116 bool early_pfn_in_nid(unsigned long pfn
, int nid
);
1118 #define early_pfn_in_nid(pfn, nid) (1)
1121 #ifndef early_pfn_valid
1122 #define early_pfn_valid(pfn) (1)
1125 void memory_present(int nid
, unsigned long start
, unsigned long end
);
1126 unsigned long __init
node_memmap_size_bytes(int, unsigned long, unsigned long);
1129 * If it is possible to have holes within a MAX_ORDER_NR_PAGES, then we
1130 * need to check pfn validility within that MAX_ORDER_NR_PAGES block.
1131 * pfn_valid_within() should be used in this case; we optimise this away
1132 * when we have no holes within a MAX_ORDER_NR_PAGES block.
1134 #ifdef CONFIG_HOLES_IN_ZONE
1135 #define pfn_valid_within(pfn) pfn_valid(pfn)
1137 #define pfn_valid_within(pfn) (1)
1140 #ifdef CONFIG_ARCH_HAS_HOLES_MEMORYMODEL
1142 * pfn_valid() is meant to be able to tell if a given PFN has valid memmap
1143 * associated with it or not. In FLATMEM, it is expected that holes always
1144 * have valid memmap as long as there is valid PFNs either side of the hole.
1145 * In SPARSEMEM, it is assumed that a valid section has a memmap for the
1148 * However, an ARM, and maybe other embedded architectures in the future
1149 * free memmap backing holes to save memory on the assumption the memmap is
1150 * never used. The page_zone linkages are then broken even though pfn_valid()
1151 * returns true. A walker of the full memmap must then do this additional
1152 * check to ensure the memmap they are looking at is sane by making sure
1153 * the zone and PFN linkages are still valid. This is expensive, but walkers
1154 * of the full memmap are extremely rare.
1156 int memmap_valid_within(unsigned long pfn
,
1157 struct page
*page
, struct zone
*zone
);
1159 static inline int memmap_valid_within(unsigned long pfn
,
1160 struct page
*page
, struct zone
*zone
)
1164 #endif /* CONFIG_ARCH_HAS_HOLES_MEMORYMODEL */
1166 #endif /* !__GENERATING_BOUNDS.H */
1167 #endif /* !__ASSEMBLY__ */
1168 #endif /* _LINUX_MMZONE_H */