]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef _LINUX_MMZONE_H |
2 | #define _LINUX_MMZONE_H | |
3 | ||
4 | #ifdef __KERNEL__ | |
5 | #ifndef __ASSEMBLY__ | |
6 | ||
1da177e4 LT |
7 | #include <linux/spinlock.h> |
8 | #include <linux/list.h> | |
9 | #include <linux/wait.h> | |
e815af95 | 10 | #include <linux/bitops.h> |
1da177e4 LT |
11 | #include <linux/cache.h> |
12 | #include <linux/threads.h> | |
13 | #include <linux/numa.h> | |
14 | #include <linux/init.h> | |
bdc8cb98 | 15 | #include <linux/seqlock.h> |
8357f869 | 16 | #include <linux/nodemask.h> |
835c134e | 17 | #include <linux/pageblock-flags.h> |
1da177e4 | 18 | #include <asm/atomic.h> |
93ff66bf | 19 | #include <asm/page.h> |
1da177e4 LT |
20 | |
21 | /* Free memory management - zoned buddy allocator. */ | |
22 | #ifndef CONFIG_FORCE_MAX_ZONEORDER | |
23 | #define MAX_ORDER 11 | |
24 | #else | |
25 | #define MAX_ORDER CONFIG_FORCE_MAX_ZONEORDER | |
26 | #endif | |
e984bb43 | 27 | #define MAX_ORDER_NR_PAGES (1 << (MAX_ORDER - 1)) |
1da177e4 | 28 | |
5ad333eb AW |
29 | /* |
30 | * PAGE_ALLOC_COSTLY_ORDER is the order at which allocations are deemed | |
31 | * costly to service. That is between allocation orders which should | |
32 | * coelesce naturally under reasonable reclaim pressure and those which | |
33 | * will not. | |
34 | */ | |
35 | #define PAGE_ALLOC_COSTLY_ORDER 3 | |
36 | ||
b2a0ac88 | 37 | #define MIGRATE_UNMOVABLE 0 |
e12ba74d MG |
38 | #define MIGRATE_RECLAIMABLE 1 |
39 | #define MIGRATE_MOVABLE 2 | |
64c5e135 | 40 | #define MIGRATE_RESERVE 3 |
a5d76b54 KH |
41 | #define MIGRATE_ISOLATE 4 /* can't allocate from here */ |
42 | #define MIGRATE_TYPES 5 | |
b2a0ac88 MG |
43 | |
44 | #define for_each_migratetype_order(order, type) \ | |
45 | for (order = 0; order < MAX_ORDER; order++) \ | |
46 | for (type = 0; type < MIGRATE_TYPES; type++) | |
47 | ||
467c996c MG |
48 | extern int page_group_by_mobility_disabled; |
49 | ||
50 | static inline int get_pageblock_migratetype(struct page *page) | |
51 | { | |
52 | if (unlikely(page_group_by_mobility_disabled)) | |
53 | return MIGRATE_UNMOVABLE; | |
54 | ||
55 | return get_pageblock_flags_group(page, PB_migrate, PB_migrate_end); | |
56 | } | |
57 | ||
1da177e4 | 58 | struct free_area { |
b2a0ac88 | 59 | struct list_head free_list[MIGRATE_TYPES]; |
1da177e4 LT |
60 | unsigned long nr_free; |
61 | }; | |
62 | ||
63 | struct pglist_data; | |
64 | ||
65 | /* | |
66 | * zone->lock and zone->lru_lock are two of the hottest locks in the kernel. | |
67 | * So add a wild amount of padding here to ensure that they fall into separate | |
68 | * cachelines. There are very few zone structures in the machine, so space | |
69 | * consumption is not a concern here. | |
70 | */ | |
71 | #if defined(CONFIG_SMP) | |
72 | struct zone_padding { | |
73 | char x[0]; | |
22fc6ecc | 74 | } ____cacheline_internodealigned_in_smp; |
1da177e4 LT |
75 | #define ZONE_PADDING(name) struct zone_padding name; |
76 | #else | |
77 | #define ZONE_PADDING(name) | |
78 | #endif | |
79 | ||
2244b95a | 80 | enum zone_stat_item { |
51ed4491 | 81 | /* First 128 byte cacheline (assuming 64 bit words) */ |
d23ad423 | 82 | NR_FREE_PAGES, |
c8785385 CL |
83 | NR_INACTIVE, |
84 | NR_ACTIVE, | |
f3dbd344 CL |
85 | NR_ANON_PAGES, /* Mapped anonymous pages */ |
86 | NR_FILE_MAPPED, /* pagecache pages mapped into pagetables. | |
65ba55f5 | 87 | only modified from process context */ |
347ce434 | 88 | NR_FILE_PAGES, |
b1e7a8fd | 89 | NR_FILE_DIRTY, |
ce866b34 | 90 | NR_WRITEBACK, |
51ed4491 CL |
91 | /* Second 128 byte cacheline */ |
92 | NR_SLAB_RECLAIMABLE, | |
93 | NR_SLAB_UNRECLAIMABLE, | |
94 | NR_PAGETABLE, /* used for pagetables */ | |
fd39fc85 | 95 | NR_UNSTABLE_NFS, /* NFS unstable pages */ |
d2c5e30c | 96 | NR_BOUNCE, |
e129b5c2 | 97 | NR_VMSCAN_WRITE, |
ca889e6c CL |
98 | #ifdef CONFIG_NUMA |
99 | NUMA_HIT, /* allocated in intended node */ | |
100 | NUMA_MISS, /* allocated in non intended node */ | |
101 | NUMA_FOREIGN, /* was intended here, hit elsewhere */ | |
102 | NUMA_INTERLEAVE_HIT, /* interleaver preferred this zone */ | |
103 | NUMA_LOCAL, /* allocation from local node */ | |
104 | NUMA_OTHER, /* allocation from other node */ | |
105 | #endif | |
2244b95a CL |
106 | NR_VM_ZONE_STAT_ITEMS }; |
107 | ||
1da177e4 LT |
108 | struct per_cpu_pages { |
109 | int count; /* number of pages in the list */ | |
1da177e4 LT |
110 | int high; /* high watermark, emptying needed */ |
111 | int batch; /* chunk size for buddy add/remove */ | |
112 | struct list_head list; /* the list of pages */ | |
113 | }; | |
114 | ||
115 | struct per_cpu_pageset { | |
116 | struct per_cpu_pages pcp[2]; /* 0: hot. 1: cold */ | |
4037d452 CL |
117 | #ifdef CONFIG_NUMA |
118 | s8 expire; | |
119 | #endif | |
2244b95a | 120 | #ifdef CONFIG_SMP |
df9ecaba | 121 | s8 stat_threshold; |
2244b95a CL |
122 | s8 vm_stat_diff[NR_VM_ZONE_STAT_ITEMS]; |
123 | #endif | |
1da177e4 LT |
124 | } ____cacheline_aligned_in_smp; |
125 | ||
e7c8d5c9 CL |
126 | #ifdef CONFIG_NUMA |
127 | #define zone_pcp(__z, __cpu) ((__z)->pageset[(__cpu)]) | |
128 | #else | |
129 | #define zone_pcp(__z, __cpu) (&(__z)->pageset[(__cpu)]) | |
130 | #endif | |
131 | ||
2f1b6248 | 132 | enum zone_type { |
4b51d669 | 133 | #ifdef CONFIG_ZONE_DMA |
2f1b6248 CL |
134 | /* |
135 | * ZONE_DMA is used when there are devices that are not able | |
136 | * to do DMA to all of addressable memory (ZONE_NORMAL). Then we | |
137 | * carve out the portion of memory that is needed for these devices. | |
138 | * The range is arch specific. | |
139 | * | |
140 | * Some examples | |
141 | * | |
142 | * Architecture Limit | |
143 | * --------------------------- | |
144 | * parisc, ia64, sparc <4G | |
145 | * s390 <2G | |
2f1b6248 CL |
146 | * arm Various |
147 | * alpha Unlimited or 0-16MB. | |
148 | * | |
149 | * i386, x86_64 and multiple other arches | |
150 | * <16M. | |
151 | */ | |
152 | ZONE_DMA, | |
4b51d669 | 153 | #endif |
fb0e7942 | 154 | #ifdef CONFIG_ZONE_DMA32 |
2f1b6248 CL |
155 | /* |
156 | * x86_64 needs two ZONE_DMAs because it supports devices that are | |
157 | * only able to do DMA to the lower 16M but also 32 bit devices that | |
158 | * can only do DMA areas below 4G. | |
159 | */ | |
160 | ZONE_DMA32, | |
fb0e7942 | 161 | #endif |
2f1b6248 CL |
162 | /* |
163 | * Normal addressable memory is in ZONE_NORMAL. DMA operations can be | |
164 | * performed on pages in ZONE_NORMAL if the DMA devices support | |
165 | * transfers to all addressable memory. | |
166 | */ | |
167 | ZONE_NORMAL, | |
e53ef38d | 168 | #ifdef CONFIG_HIGHMEM |
2f1b6248 CL |
169 | /* |
170 | * A memory area that is only addressable by the kernel through | |
171 | * mapping portions into its own address space. This is for example | |
172 | * used by i386 to allow the kernel to address the memory beyond | |
173 | * 900MB. The kernel will set up special mappings (page | |
174 | * table entries on i386) for each page that the kernel needs to | |
175 | * access. | |
176 | */ | |
177 | ZONE_HIGHMEM, | |
e53ef38d | 178 | #endif |
2a1e274a | 179 | ZONE_MOVABLE, |
2f1b6248 CL |
180 | MAX_NR_ZONES |
181 | }; | |
1da177e4 | 182 | |
1da177e4 LT |
183 | /* |
184 | * When a memory allocation must conform to specific limitations (such | |
185 | * as being suitable for DMA) the caller will pass in hints to the | |
186 | * allocator in the gfp_mask, in the zone modifier bits. These bits | |
187 | * are used to select a priority ordered list of memory zones which | |
19655d34 | 188 | * match the requested limits. See gfp_zone() in include/linux/gfp.h |
1da177e4 | 189 | */ |
fb0e7942 | 190 | |
4b51d669 CL |
191 | /* |
192 | * Count the active zones. Note that the use of defined(X) outside | |
193 | * #if and family is not necessarily defined so ensure we cannot use | |
194 | * it later. Use __ZONE_COUNT to work out how many shift bits we need. | |
195 | */ | |
196 | #define __ZONE_COUNT ( \ | |
197 | defined(CONFIG_ZONE_DMA) \ | |
198 | + defined(CONFIG_ZONE_DMA32) \ | |
199 | + 1 \ | |
200 | + defined(CONFIG_HIGHMEM) \ | |
2a1e274a | 201 | + 1 \ |
4b51d669 CL |
202 | ) |
203 | #if __ZONE_COUNT < 2 | |
204 | #define ZONES_SHIFT 0 | |
205 | #elif __ZONE_COUNT <= 2 | |
19655d34 | 206 | #define ZONES_SHIFT 1 |
4b51d669 | 207 | #elif __ZONE_COUNT <= 4 |
19655d34 | 208 | #define ZONES_SHIFT 2 |
4b51d669 CL |
209 | #else |
210 | #error ZONES_SHIFT -- too many zones configured adjust calculation | |
fb0e7942 | 211 | #endif |
4b51d669 | 212 | #undef __ZONE_COUNT |
1da177e4 | 213 | |
1da177e4 LT |
214 | struct zone { |
215 | /* Fields commonly accessed by the page allocator */ | |
1da177e4 LT |
216 | unsigned long pages_min, pages_low, pages_high; |
217 | /* | |
218 | * We don't know if the memory that we're going to allocate will be freeable | |
219 | * or/and it will be released eventually, so to avoid totally wasting several | |
220 | * GB of ram we must reserve some of the lower zone memory (otherwise we risk | |
221 | * to run OOM on the lower zones despite there's tons of freeable ram | |
222 | * on the higher zones). This array is recalculated at runtime if the | |
223 | * sysctl_lowmem_reserve_ratio sysctl changes. | |
224 | */ | |
225 | unsigned long lowmem_reserve[MAX_NR_ZONES]; | |
226 | ||
e7c8d5c9 | 227 | #ifdef CONFIG_NUMA |
d5f541ed | 228 | int node; |
9614634f CL |
229 | /* |
230 | * zone reclaim becomes active if more unmapped pages exist. | |
231 | */ | |
8417bba4 | 232 | unsigned long min_unmapped_pages; |
0ff38490 | 233 | unsigned long min_slab_pages; |
e7c8d5c9 CL |
234 | struct per_cpu_pageset *pageset[NR_CPUS]; |
235 | #else | |
1da177e4 | 236 | struct per_cpu_pageset pageset[NR_CPUS]; |
e7c8d5c9 | 237 | #endif |
1da177e4 LT |
238 | /* |
239 | * free areas of different sizes | |
240 | */ | |
241 | spinlock_t lock; | |
bdc8cb98 DH |
242 | #ifdef CONFIG_MEMORY_HOTPLUG |
243 | /* see spanned/present_pages for more description */ | |
244 | seqlock_t span_seqlock; | |
245 | #endif | |
1da177e4 LT |
246 | struct free_area free_area[MAX_ORDER]; |
247 | ||
835c134e MG |
248 | #ifndef CONFIG_SPARSEMEM |
249 | /* | |
d9c23400 | 250 | * Flags for a pageblock_nr_pages block. See pageblock-flags.h. |
835c134e MG |
251 | * In SPARSEMEM, this map is stored in struct mem_section |
252 | */ | |
253 | unsigned long *pageblock_flags; | |
254 | #endif /* CONFIG_SPARSEMEM */ | |
255 | ||
1da177e4 LT |
256 | |
257 | ZONE_PADDING(_pad1_) | |
258 | ||
259 | /* Fields commonly accessed by the page reclaim scanner */ | |
260 | spinlock_t lru_lock; | |
261 | struct list_head active_list; | |
262 | struct list_head inactive_list; | |
263 | unsigned long nr_scan_active; | |
264 | unsigned long nr_scan_inactive; | |
1da177e4 | 265 | unsigned long pages_scanned; /* since last reclaim */ |
e815af95 | 266 | unsigned long flags; /* zone flags, see below */ |
753ee728 | 267 | |
2244b95a CL |
268 | /* Zone statistics */ |
269 | atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS]; | |
9eeff239 | 270 | |
1da177e4 LT |
271 | /* |
272 | * prev_priority holds the scanning priority for this zone. It is | |
273 | * defined as the scanning priority at which we achieved our reclaim | |
274 | * target at the previous try_to_free_pages() or balance_pgdat() | |
275 | * invokation. | |
276 | * | |
277 | * We use prev_priority as a measure of how much stress page reclaim is | |
278 | * under - it drives the swappiness decision: whether to unmap mapped | |
279 | * pages. | |
280 | * | |
3bb1a852 | 281 | * Access to both this field is quite racy even on uniprocessor. But |
1da177e4 LT |
282 | * it is expected to average out OK. |
283 | */ | |
1da177e4 LT |
284 | int prev_priority; |
285 | ||
286 | ||
287 | ZONE_PADDING(_pad2_) | |
288 | /* Rarely used or read-mostly fields */ | |
289 | ||
290 | /* | |
291 | * wait_table -- the array holding the hash table | |
02b694de | 292 | * wait_table_hash_nr_entries -- the size of the hash table array |
1da177e4 LT |
293 | * wait_table_bits -- wait_table_size == (1 << wait_table_bits) |
294 | * | |
295 | * The purpose of all these is to keep track of the people | |
296 | * waiting for a page to become available and make them | |
297 | * runnable again when possible. The trouble is that this | |
298 | * consumes a lot of space, especially when so few things | |
299 | * wait on pages at a given time. So instead of using | |
300 | * per-page waitqueues, we use a waitqueue hash table. | |
301 | * | |
302 | * The bucket discipline is to sleep on the same queue when | |
303 | * colliding and wake all in that wait queue when removing. | |
304 | * When something wakes, it must check to be sure its page is | |
305 | * truly available, a la thundering herd. The cost of a | |
306 | * collision is great, but given the expected load of the | |
307 | * table, they should be so rare as to be outweighed by the | |
308 | * benefits from the saved space. | |
309 | * | |
310 | * __wait_on_page_locked() and unlock_page() in mm/filemap.c, are the | |
311 | * primary users of these fields, and in mm/page_alloc.c | |
312 | * free_area_init_core() performs the initialization of them. | |
313 | */ | |
314 | wait_queue_head_t * wait_table; | |
02b694de | 315 | unsigned long wait_table_hash_nr_entries; |
1da177e4 LT |
316 | unsigned long wait_table_bits; |
317 | ||
318 | /* | |
319 | * Discontig memory support fields. | |
320 | */ | |
321 | struct pglist_data *zone_pgdat; | |
1da177e4 LT |
322 | /* zone_start_pfn == zone_start_paddr >> PAGE_SHIFT */ |
323 | unsigned long zone_start_pfn; | |
324 | ||
bdc8cb98 DH |
325 | /* |
326 | * zone_start_pfn, spanned_pages and present_pages are all | |
327 | * protected by span_seqlock. It is a seqlock because it has | |
328 | * to be read outside of zone->lock, and it is done in the main | |
329 | * allocator path. But, it is written quite infrequently. | |
330 | * | |
331 | * The lock is declared along with zone->lock because it is | |
332 | * frequently read in proximity to zone->lock. It's good to | |
333 | * give them a chance of being in the same cacheline. | |
334 | */ | |
1da177e4 LT |
335 | unsigned long spanned_pages; /* total size, including holes */ |
336 | unsigned long present_pages; /* amount of memory (excluding holes) */ | |
337 | ||
338 | /* | |
339 | * rarely used fields: | |
340 | */ | |
15ad7cdc | 341 | const char *name; |
22fc6ecc | 342 | } ____cacheline_internodealigned_in_smp; |
1da177e4 | 343 | |
e815af95 DR |
344 | typedef enum { |
345 | ZONE_ALL_UNRECLAIMABLE, /* all pages pinned */ | |
346 | ZONE_RECLAIM_LOCKED, /* prevents concurrent reclaim */ | |
347 | } zone_flags_t; | |
348 | ||
349 | static inline void zone_set_flag(struct zone *zone, zone_flags_t flag) | |
350 | { | |
351 | set_bit(flag, &zone->flags); | |
352 | } | |
353 | static inline void zone_clear_flag(struct zone *zone, zone_flags_t flag) | |
354 | { | |
355 | clear_bit(flag, &zone->flags); | |
356 | } | |
357 | ||
358 | static inline int zone_is_all_unreclaimable(const struct zone *zone) | |
359 | { | |
360 | return test_bit(ZONE_ALL_UNRECLAIMABLE, &zone->flags); | |
361 | } | |
362 | static inline int zone_is_reclaim_locked(const struct zone *zone) | |
363 | { | |
364 | return test_bit(ZONE_RECLAIM_LOCKED, &zone->flags); | |
365 | } | |
366 | ||
1da177e4 LT |
367 | /* |
368 | * The "priority" of VM scanning is how much of the queues we will scan in one | |
369 | * go. A value of 12 for DEF_PRIORITY implies that we will scan 1/4096th of the | |
370 | * queues ("queue_length >> 12") during an aging round. | |
371 | */ | |
372 | #define DEF_PRIORITY 12 | |
373 | ||
9276b1bc PJ |
374 | /* Maximum number of zones on a zonelist */ |
375 | #define MAX_ZONES_PER_ZONELIST (MAX_NUMNODES * MAX_NR_ZONES) | |
376 | ||
377 | #ifdef CONFIG_NUMA | |
523b9458 CL |
378 | |
379 | /* | |
380 | * The NUMA zonelists are doubled becausse we need zonelists that restrict the | |
381 | * allocations to a single node for GFP_THISNODE. | |
382 | * | |
383 | * [0 .. MAX_NR_ZONES -1] : Zonelists with fallback | |
384 | * [MAZ_NR_ZONES ... MAZ_ZONELISTS -1] : No fallback (GFP_THISNODE) | |
385 | */ | |
386 | #define MAX_ZONELISTS (2 * MAX_NR_ZONES) | |
387 | ||
388 | ||
9276b1bc PJ |
389 | /* |
390 | * We cache key information from each zonelist for smaller cache | |
391 | * footprint when scanning for free pages in get_page_from_freelist(). | |
392 | * | |
393 | * 1) The BITMAP fullzones tracks which zones in a zonelist have come | |
394 | * up short of free memory since the last time (last_fullzone_zap) | |
395 | * we zero'd fullzones. | |
396 | * 2) The array z_to_n[] maps each zone in the zonelist to its node | |
397 | * id, so that we can efficiently evaluate whether that node is | |
398 | * set in the current tasks mems_allowed. | |
399 | * | |
400 | * Both fullzones and z_to_n[] are one-to-one with the zonelist, | |
401 | * indexed by a zones offset in the zonelist zones[] array. | |
402 | * | |
403 | * The get_page_from_freelist() routine does two scans. During the | |
404 | * first scan, we skip zones whose corresponding bit in 'fullzones' | |
405 | * is set or whose corresponding node in current->mems_allowed (which | |
406 | * comes from cpusets) is not set. During the second scan, we bypass | |
407 | * this zonelist_cache, to ensure we look methodically at each zone. | |
408 | * | |
409 | * Once per second, we zero out (zap) fullzones, forcing us to | |
410 | * reconsider nodes that might have regained more free memory. | |
411 | * The field last_full_zap is the time we last zapped fullzones. | |
412 | * | |
413 | * This mechanism reduces the amount of time we waste repeatedly | |
414 | * reexaming zones for free memory when they just came up low on | |
415 | * memory momentarilly ago. | |
416 | * | |
417 | * The zonelist_cache struct members logically belong in struct | |
418 | * zonelist. However, the mempolicy zonelists constructed for | |
419 | * MPOL_BIND are intentionally variable length (and usually much | |
420 | * shorter). A general purpose mechanism for handling structs with | |
421 | * multiple variable length members is more mechanism than we want | |
422 | * here. We resort to some special case hackery instead. | |
423 | * | |
424 | * The MPOL_BIND zonelists don't need this zonelist_cache (in good | |
425 | * part because they are shorter), so we put the fixed length stuff | |
426 | * at the front of the zonelist struct, ending in a variable length | |
427 | * zones[], as is needed by MPOL_BIND. | |
428 | * | |
429 | * Then we put the optional zonelist cache on the end of the zonelist | |
430 | * struct. This optional stuff is found by a 'zlcache_ptr' pointer in | |
431 | * the fixed length portion at the front of the struct. This pointer | |
432 | * both enables us to find the zonelist cache, and in the case of | |
433 | * MPOL_BIND zonelists, (which will just set the zlcache_ptr to NULL) | |
434 | * to know that the zonelist cache is not there. | |
435 | * | |
436 | * The end result is that struct zonelists come in two flavors: | |
437 | * 1) The full, fixed length version, shown below, and | |
438 | * 2) The custom zonelists for MPOL_BIND. | |
439 | * The custom MPOL_BIND zonelists have a NULL zlcache_ptr and no zlcache. | |
440 | * | |
441 | * Even though there may be multiple CPU cores on a node modifying | |
442 | * fullzones or last_full_zap in the same zonelist_cache at the same | |
443 | * time, we don't lock it. This is just hint data - if it is wrong now | |
444 | * and then, the allocator will still function, perhaps a bit slower. | |
445 | */ | |
446 | ||
447 | ||
448 | struct zonelist_cache { | |
9276b1bc | 449 | unsigned short z_to_n[MAX_ZONES_PER_ZONELIST]; /* zone->nid */ |
7253f4ef | 450 | DECLARE_BITMAP(fullzones, MAX_ZONES_PER_ZONELIST); /* zone full? */ |
9276b1bc PJ |
451 | unsigned long last_full_zap; /* when last zap'd (jiffies) */ |
452 | }; | |
453 | #else | |
523b9458 | 454 | #define MAX_ZONELISTS MAX_NR_ZONES |
9276b1bc PJ |
455 | struct zonelist_cache; |
456 | #endif | |
457 | ||
1da177e4 LT |
458 | /* |
459 | * One allocation request operates on a zonelist. A zonelist | |
460 | * is a list of zones, the first one is the 'goal' of the | |
461 | * allocation, the other zones are fallback zones, in decreasing | |
462 | * priority. | |
463 | * | |
9276b1bc PJ |
464 | * If zlcache_ptr is not NULL, then it is just the address of zlcache, |
465 | * as explained above. If zlcache_ptr is NULL, there is no zlcache. | |
1da177e4 | 466 | */ |
9276b1bc | 467 | |
1da177e4 | 468 | struct zonelist { |
9276b1bc PJ |
469 | struct zonelist_cache *zlcache_ptr; // NULL or &zlcache |
470 | struct zone *zones[MAX_ZONES_PER_ZONELIST + 1]; // NULL delimited | |
471 | #ifdef CONFIG_NUMA | |
472 | struct zonelist_cache zlcache; // optional ... | |
473 | #endif | |
1da177e4 LT |
474 | }; |
475 | ||
b377fd39 MG |
476 | #ifdef CONFIG_NUMA |
477 | /* | |
478 | * Only custom zonelists like MPOL_BIND need to be filtered as part of | |
479 | * policies. As described in the comment for struct zonelist_cache, these | |
480 | * zonelists will not have a zlcache so zlcache_ptr will not be set. Use | |
481 | * that to determine if the zonelists needs to be filtered or not. | |
482 | */ | |
483 | static inline int alloc_should_filter_zonelist(struct zonelist *zonelist) | |
484 | { | |
485 | return !zonelist->zlcache_ptr; | |
486 | } | |
487 | #else | |
488 | static inline int alloc_should_filter_zonelist(struct zonelist *zonelist) | |
489 | { | |
490 | return 0; | |
491 | } | |
492 | #endif /* CONFIG_NUMA */ | |
493 | ||
c713216d MG |
494 | #ifdef CONFIG_ARCH_POPULATES_NODE_MAP |
495 | struct node_active_region { | |
496 | unsigned long start_pfn; | |
497 | unsigned long end_pfn; | |
498 | int nid; | |
499 | }; | |
500 | #endif /* CONFIG_ARCH_POPULATES_NODE_MAP */ | |
1da177e4 | 501 | |
5b99cd0e HC |
502 | #ifndef CONFIG_DISCONTIGMEM |
503 | /* The array of struct pages - for discontigmem use pgdat->lmem_map */ | |
504 | extern struct page *mem_map; | |
505 | #endif | |
506 | ||
1da177e4 LT |
507 | /* |
508 | * The pg_data_t structure is used in machines with CONFIG_DISCONTIGMEM | |
509 | * (mostly NUMA machines?) to denote a higher-level memory zone than the | |
510 | * zone denotes. | |
511 | * | |
512 | * On NUMA machines, each NUMA node would have a pg_data_t to describe | |
513 | * it's memory layout. | |
514 | * | |
515 | * Memory statistics and page replacement data structures are maintained on a | |
516 | * per-zone basis. | |
517 | */ | |
518 | struct bootmem_data; | |
519 | typedef struct pglist_data { | |
520 | struct zone node_zones[MAX_NR_ZONES]; | |
523b9458 | 521 | struct zonelist node_zonelists[MAX_ZONELISTS]; |
1da177e4 | 522 | int nr_zones; |
d41dee36 | 523 | #ifdef CONFIG_FLAT_NODE_MEM_MAP |
1da177e4 | 524 | struct page *node_mem_map; |
d41dee36 | 525 | #endif |
1da177e4 | 526 | struct bootmem_data *bdata; |
208d54e5 DH |
527 | #ifdef CONFIG_MEMORY_HOTPLUG |
528 | /* | |
529 | * Must be held any time you expect node_start_pfn, node_present_pages | |
530 | * or node_spanned_pages stay constant. Holding this will also | |
531 | * guarantee that any pfn_valid() stays that way. | |
532 | * | |
533 | * Nests above zone->lock and zone->size_seqlock. | |
534 | */ | |
535 | spinlock_t node_size_lock; | |
536 | #endif | |
1da177e4 LT |
537 | unsigned long node_start_pfn; |
538 | unsigned long node_present_pages; /* total number of physical pages */ | |
539 | unsigned long node_spanned_pages; /* total size of physical page | |
540 | range, including holes */ | |
541 | int node_id; | |
1da177e4 LT |
542 | wait_queue_head_t kswapd_wait; |
543 | struct task_struct *kswapd; | |
544 | int kswapd_max_order; | |
545 | } pg_data_t; | |
546 | ||
547 | #define node_present_pages(nid) (NODE_DATA(nid)->node_present_pages) | |
548 | #define node_spanned_pages(nid) (NODE_DATA(nid)->node_spanned_pages) | |
d41dee36 | 549 | #ifdef CONFIG_FLAT_NODE_MEM_MAP |
408fde81 | 550 | #define pgdat_page_nr(pgdat, pagenr) ((pgdat)->node_mem_map + (pagenr)) |
d41dee36 AW |
551 | #else |
552 | #define pgdat_page_nr(pgdat, pagenr) pfn_to_page((pgdat)->node_start_pfn + (pagenr)) | |
553 | #endif | |
408fde81 | 554 | #define nid_page_nr(nid, pagenr) pgdat_page_nr(NODE_DATA(nid),(pagenr)) |
1da177e4 | 555 | |
208d54e5 DH |
556 | #include <linux/memory_hotplug.h> |
557 | ||
1da177e4 LT |
558 | void get_zone_counts(unsigned long *active, unsigned long *inactive, |
559 | unsigned long *free); | |
560 | void build_all_zonelists(void); | |
561 | void wakeup_kswapd(struct zone *zone, int order); | |
562 | int zone_watermark_ok(struct zone *z, int order, unsigned long mark, | |
7fb1d9fc | 563 | int classzone_idx, int alloc_flags); |
a2f3aa02 DH |
564 | enum memmap_context { |
565 | MEMMAP_EARLY, | |
566 | MEMMAP_HOTPLUG, | |
567 | }; | |
718127cc | 568 | extern int init_currently_empty_zone(struct zone *zone, unsigned long start_pfn, |
a2f3aa02 DH |
569 | unsigned long size, |
570 | enum memmap_context context); | |
718127cc | 571 | |
1da177e4 LT |
572 | #ifdef CONFIG_HAVE_MEMORY_PRESENT |
573 | void memory_present(int nid, unsigned long start, unsigned long end); | |
574 | #else | |
575 | static inline void memory_present(int nid, unsigned long start, unsigned long end) {} | |
576 | #endif | |
577 | ||
578 | #ifdef CONFIG_NEED_NODE_MEMMAP_SIZE | |
579 | unsigned long __init node_memmap_size_bytes(int, unsigned long, unsigned long); | |
580 | #endif | |
581 | ||
582 | /* | |
583 | * zone_idx() returns 0 for the ZONE_DMA zone, 1 for the ZONE_NORMAL zone, etc. | |
584 | */ | |
585 | #define zone_idx(zone) ((zone) - (zone)->zone_pgdat->node_zones) | |
586 | ||
f3fe6512 CK |
587 | static inline int populated_zone(struct zone *zone) |
588 | { | |
589 | return (!!zone->present_pages); | |
590 | } | |
591 | ||
2a1e274a MG |
592 | extern int movable_zone; |
593 | ||
594 | static inline int zone_movable_is_highmem(void) | |
595 | { | |
596 | #if defined(CONFIG_HIGHMEM) && defined(CONFIG_ARCH_POPULATES_NODE_MAP) | |
597 | return movable_zone == ZONE_HIGHMEM; | |
598 | #else | |
599 | return 0; | |
600 | #endif | |
601 | } | |
602 | ||
2f1b6248 | 603 | static inline int is_highmem_idx(enum zone_type idx) |
1da177e4 | 604 | { |
e53ef38d | 605 | #ifdef CONFIG_HIGHMEM |
2a1e274a MG |
606 | return (idx == ZONE_HIGHMEM || |
607 | (idx == ZONE_MOVABLE && zone_movable_is_highmem())); | |
e53ef38d CL |
608 | #else |
609 | return 0; | |
610 | #endif | |
1da177e4 LT |
611 | } |
612 | ||
2f1b6248 | 613 | static inline int is_normal_idx(enum zone_type idx) |
1da177e4 LT |
614 | { |
615 | return (idx == ZONE_NORMAL); | |
616 | } | |
9328b8fa | 617 | |
1da177e4 LT |
618 | /** |
619 | * is_highmem - helper function to quickly check if a struct zone is a | |
620 | * highmem zone or not. This is an attempt to keep references | |
621 | * to ZONE_{DMA/NORMAL/HIGHMEM/etc} in general code to a minimum. | |
622 | * @zone - pointer to struct zone variable | |
623 | */ | |
624 | static inline int is_highmem(struct zone *zone) | |
625 | { | |
e53ef38d | 626 | #ifdef CONFIG_HIGHMEM |
2a1e274a MG |
627 | int zone_idx = zone - zone->zone_pgdat->node_zones; |
628 | return zone_idx == ZONE_HIGHMEM || | |
629 | (zone_idx == ZONE_MOVABLE && zone_movable_is_highmem()); | |
e53ef38d CL |
630 | #else |
631 | return 0; | |
632 | #endif | |
1da177e4 LT |
633 | } |
634 | ||
635 | static inline int is_normal(struct zone *zone) | |
636 | { | |
637 | return zone == zone->zone_pgdat->node_zones + ZONE_NORMAL; | |
638 | } | |
639 | ||
9328b8fa NP |
640 | static inline int is_dma32(struct zone *zone) |
641 | { | |
fb0e7942 | 642 | #ifdef CONFIG_ZONE_DMA32 |
9328b8fa | 643 | return zone == zone->zone_pgdat->node_zones + ZONE_DMA32; |
fb0e7942 CL |
644 | #else |
645 | return 0; | |
646 | #endif | |
9328b8fa NP |
647 | } |
648 | ||
649 | static inline int is_dma(struct zone *zone) | |
650 | { | |
4b51d669 | 651 | #ifdef CONFIG_ZONE_DMA |
9328b8fa | 652 | return zone == zone->zone_pgdat->node_zones + ZONE_DMA; |
4b51d669 CL |
653 | #else |
654 | return 0; | |
655 | #endif | |
9328b8fa NP |
656 | } |
657 | ||
1da177e4 LT |
658 | /* These two functions are used to setup the per zone pages min values */ |
659 | struct ctl_table; | |
660 | struct file; | |
661 | int min_free_kbytes_sysctl_handler(struct ctl_table *, int, struct file *, | |
662 | void __user *, size_t *, loff_t *); | |
663 | extern int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1]; | |
664 | int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *, int, struct file *, | |
665 | void __user *, size_t *, loff_t *); | |
8ad4b1fb RS |
666 | int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *, int, struct file *, |
667 | void __user *, size_t *, loff_t *); | |
9614634f CL |
668 | int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *, int, |
669 | struct file *, void __user *, size_t *, loff_t *); | |
0ff38490 CL |
670 | int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *, int, |
671 | struct file *, void __user *, size_t *, loff_t *); | |
1da177e4 | 672 | |
f0c0b2b8 KH |
673 | extern int numa_zonelist_order_handler(struct ctl_table *, int, |
674 | struct file *, void __user *, size_t *, loff_t *); | |
675 | extern char numa_zonelist_order[]; | |
676 | #define NUMA_ZONELIST_ORDER_LEN 16 /* string buffer size */ | |
677 | ||
1da177e4 LT |
678 | #include <linux/topology.h> |
679 | /* Returns the number of the current Node. */ | |
69d81fcd | 680 | #ifndef numa_node_id |
39c715b7 | 681 | #define numa_node_id() (cpu_to_node(raw_smp_processor_id())) |
69d81fcd | 682 | #endif |
1da177e4 | 683 | |
93b7504e | 684 | #ifndef CONFIG_NEED_MULTIPLE_NODES |
1da177e4 LT |
685 | |
686 | extern struct pglist_data contig_page_data; | |
687 | #define NODE_DATA(nid) (&contig_page_data) | |
688 | #define NODE_MEM_MAP(nid) mem_map | |
689 | #define MAX_NODES_SHIFT 1 | |
1da177e4 | 690 | |
93b7504e | 691 | #else /* CONFIG_NEED_MULTIPLE_NODES */ |
1da177e4 LT |
692 | |
693 | #include <asm/mmzone.h> | |
694 | ||
93b7504e | 695 | #endif /* !CONFIG_NEED_MULTIPLE_NODES */ |
348f8b6c | 696 | |
95144c78 KH |
697 | extern struct pglist_data *first_online_pgdat(void); |
698 | extern struct pglist_data *next_online_pgdat(struct pglist_data *pgdat); | |
699 | extern struct zone *next_zone(struct zone *zone); | |
8357f869 KH |
700 | |
701 | /** | |
702 | * for_each_pgdat - helper macro to iterate over all nodes | |
703 | * @pgdat - pointer to a pg_data_t variable | |
704 | */ | |
705 | #define for_each_online_pgdat(pgdat) \ | |
706 | for (pgdat = first_online_pgdat(); \ | |
707 | pgdat; \ | |
708 | pgdat = next_online_pgdat(pgdat)) | |
8357f869 KH |
709 | /** |
710 | * for_each_zone - helper macro to iterate over all memory zones | |
711 | * @zone - pointer to struct zone variable | |
712 | * | |
713 | * The user only needs to declare the zone variable, for_each_zone | |
714 | * fills it in. | |
715 | */ | |
716 | #define for_each_zone(zone) \ | |
717 | for (zone = (first_online_pgdat())->node_zones; \ | |
718 | zone; \ | |
719 | zone = next_zone(zone)) | |
720 | ||
d41dee36 AW |
721 | #ifdef CONFIG_SPARSEMEM |
722 | #include <asm/sparsemem.h> | |
723 | #endif | |
724 | ||
07808b74 | 725 | #if BITS_PER_LONG == 32 |
1da177e4 | 726 | /* |
a2f1b424 AK |
727 | * with 32 bit page->flags field, we reserve 9 bits for node/zone info. |
728 | * there are 4 zones (3 bits) and this leaves 9-3=6 bits for nodes. | |
1da177e4 | 729 | */ |
a2f1b424 | 730 | #define FLAGS_RESERVED 9 |
348f8b6c | 731 | |
1da177e4 LT |
732 | #elif BITS_PER_LONG == 64 |
733 | /* | |
734 | * with 64 bit flags field, there's plenty of room. | |
735 | */ | |
348f8b6c | 736 | #define FLAGS_RESERVED 32 |
1da177e4 | 737 | |
348f8b6c | 738 | #else |
1da177e4 | 739 | |
348f8b6c | 740 | #error BITS_PER_LONG not defined |
1da177e4 | 741 | |
1da177e4 LT |
742 | #endif |
743 | ||
c713216d MG |
744 | #if !defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) && \ |
745 | !defined(CONFIG_ARCH_POPULATES_NODE_MAP) | |
b159d43f AW |
746 | #define early_pfn_to_nid(nid) (0UL) |
747 | #endif | |
748 | ||
2bdaf115 AW |
749 | #ifdef CONFIG_FLATMEM |
750 | #define pfn_to_nid(pfn) (0) | |
751 | #endif | |
752 | ||
d41dee36 AW |
753 | #define pfn_to_section_nr(pfn) ((pfn) >> PFN_SECTION_SHIFT) |
754 | #define section_nr_to_pfn(sec) ((sec) << PFN_SECTION_SHIFT) | |
755 | ||
756 | #ifdef CONFIG_SPARSEMEM | |
757 | ||
758 | /* | |
759 | * SECTION_SHIFT #bits space required to store a section # | |
760 | * | |
761 | * PA_SECTION_SHIFT physical address to/from section number | |
762 | * PFN_SECTION_SHIFT pfn to/from section number | |
763 | */ | |
764 | #define SECTIONS_SHIFT (MAX_PHYSMEM_BITS - SECTION_SIZE_BITS) | |
765 | ||
766 | #define PA_SECTION_SHIFT (SECTION_SIZE_BITS) | |
767 | #define PFN_SECTION_SHIFT (SECTION_SIZE_BITS - PAGE_SHIFT) | |
768 | ||
769 | #define NR_MEM_SECTIONS (1UL << SECTIONS_SHIFT) | |
770 | ||
771 | #define PAGES_PER_SECTION (1UL << PFN_SECTION_SHIFT) | |
772 | #define PAGE_SECTION_MASK (~(PAGES_PER_SECTION-1)) | |
773 | ||
835c134e | 774 | #define SECTION_BLOCKFLAGS_BITS \ |
d9c23400 | 775 | ((1UL << (PFN_SECTION_SHIFT - pageblock_order)) * NR_PAGEBLOCK_BITS) |
835c134e | 776 | |
d41dee36 AW |
777 | #if (MAX_ORDER - 1 + PAGE_SHIFT) > SECTION_SIZE_BITS |
778 | #error Allocator MAX_ORDER exceeds SECTION_SIZE | |
779 | #endif | |
780 | ||
781 | struct page; | |
782 | struct mem_section { | |
29751f69 AW |
783 | /* |
784 | * This is, logically, a pointer to an array of struct | |
785 | * pages. However, it is stored with some other magic. | |
786 | * (see sparse.c::sparse_init_one_section()) | |
787 | * | |
30c253e6 AW |
788 | * Additionally during early boot we encode node id of |
789 | * the location of the section here to guide allocation. | |
790 | * (see sparse.c::memory_present()) | |
791 | * | |
29751f69 AW |
792 | * Making it a UL at least makes someone do a cast |
793 | * before using it wrong. | |
794 | */ | |
795 | unsigned long section_mem_map; | |
5c0e3066 MG |
796 | |
797 | /* See declaration of similar field in struct zone */ | |
798 | unsigned long *pageblock_flags; | |
d41dee36 AW |
799 | }; |
800 | ||
3e347261 BP |
801 | #ifdef CONFIG_SPARSEMEM_EXTREME |
802 | #define SECTIONS_PER_ROOT (PAGE_SIZE / sizeof (struct mem_section)) | |
803 | #else | |
804 | #define SECTIONS_PER_ROOT 1 | |
805 | #endif | |
802f192e | 806 | |
3e347261 BP |
807 | #define SECTION_NR_TO_ROOT(sec) ((sec) / SECTIONS_PER_ROOT) |
808 | #define NR_SECTION_ROOTS (NR_MEM_SECTIONS / SECTIONS_PER_ROOT) | |
809 | #define SECTION_ROOT_MASK (SECTIONS_PER_ROOT - 1) | |
802f192e | 810 | |
3e347261 BP |
811 | #ifdef CONFIG_SPARSEMEM_EXTREME |
812 | extern struct mem_section *mem_section[NR_SECTION_ROOTS]; | |
802f192e | 813 | #else |
3e347261 BP |
814 | extern struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT]; |
815 | #endif | |
d41dee36 | 816 | |
29751f69 AW |
817 | static inline struct mem_section *__nr_to_section(unsigned long nr) |
818 | { | |
3e347261 BP |
819 | if (!mem_section[SECTION_NR_TO_ROOT(nr)]) |
820 | return NULL; | |
821 | return &mem_section[SECTION_NR_TO_ROOT(nr)][nr & SECTION_ROOT_MASK]; | |
29751f69 | 822 | } |
4ca644d9 | 823 | extern int __section_nr(struct mem_section* ms); |
29751f69 AW |
824 | |
825 | /* | |
826 | * We use the lower bits of the mem_map pointer to store | |
827 | * a little bit of information. There should be at least | |
828 | * 3 bits here due to 32-bit alignment. | |
829 | */ | |
830 | #define SECTION_MARKED_PRESENT (1UL<<0) | |
831 | #define SECTION_HAS_MEM_MAP (1UL<<1) | |
832 | #define SECTION_MAP_LAST_BIT (1UL<<2) | |
833 | #define SECTION_MAP_MASK (~(SECTION_MAP_LAST_BIT-1)) | |
30c253e6 | 834 | #define SECTION_NID_SHIFT 2 |
29751f69 AW |
835 | |
836 | static inline struct page *__section_mem_map_addr(struct mem_section *section) | |
837 | { | |
838 | unsigned long map = section->section_mem_map; | |
839 | map &= SECTION_MAP_MASK; | |
840 | return (struct page *)map; | |
841 | } | |
842 | ||
540557b9 | 843 | static inline int present_section(struct mem_section *section) |
29751f69 | 844 | { |
802f192e | 845 | return (section && (section->section_mem_map & SECTION_MARKED_PRESENT)); |
29751f69 AW |
846 | } |
847 | ||
540557b9 AW |
848 | static inline int present_section_nr(unsigned long nr) |
849 | { | |
850 | return present_section(__nr_to_section(nr)); | |
851 | } | |
852 | ||
853 | static inline int valid_section(struct mem_section *section) | |
29751f69 | 854 | { |
802f192e | 855 | return (section && (section->section_mem_map & SECTION_HAS_MEM_MAP)); |
29751f69 AW |
856 | } |
857 | ||
858 | static inline int valid_section_nr(unsigned long nr) | |
859 | { | |
860 | return valid_section(__nr_to_section(nr)); | |
861 | } | |
862 | ||
d41dee36 AW |
863 | static inline struct mem_section *__pfn_to_section(unsigned long pfn) |
864 | { | |
29751f69 | 865 | return __nr_to_section(pfn_to_section_nr(pfn)); |
d41dee36 AW |
866 | } |
867 | ||
d41dee36 AW |
868 | static inline int pfn_valid(unsigned long pfn) |
869 | { | |
870 | if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS) | |
871 | return 0; | |
29751f69 | 872 | return valid_section(__nr_to_section(pfn_to_section_nr(pfn))); |
d41dee36 AW |
873 | } |
874 | ||
540557b9 AW |
875 | static inline int pfn_present(unsigned long pfn) |
876 | { | |
877 | if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS) | |
878 | return 0; | |
879 | return present_section(__nr_to_section(pfn_to_section_nr(pfn))); | |
880 | } | |
881 | ||
d41dee36 AW |
882 | /* |
883 | * These are _only_ used during initialisation, therefore they | |
884 | * can use __initdata ... They could have names to indicate | |
885 | * this restriction. | |
886 | */ | |
887 | #ifdef CONFIG_NUMA | |
161599ff AW |
888 | #define pfn_to_nid(pfn) \ |
889 | ({ \ | |
890 | unsigned long __pfn_to_nid_pfn = (pfn); \ | |
891 | page_to_nid(pfn_to_page(__pfn_to_nid_pfn)); \ | |
892 | }) | |
2bdaf115 AW |
893 | #else |
894 | #define pfn_to_nid(pfn) (0) | |
d41dee36 AW |
895 | #endif |
896 | ||
d41dee36 AW |
897 | #define early_pfn_valid(pfn) pfn_valid(pfn) |
898 | void sparse_init(void); | |
899 | #else | |
900 | #define sparse_init() do {} while (0) | |
28ae55c9 | 901 | #define sparse_index_init(_sec, _nid) do {} while (0) |
d41dee36 AW |
902 | #endif /* CONFIG_SPARSEMEM */ |
903 | ||
75167957 AW |
904 | #ifdef CONFIG_NODES_SPAN_OTHER_NODES |
905 | #define early_pfn_in_nid(pfn, nid) (early_pfn_to_nid(pfn) == (nid)) | |
906 | #else | |
907 | #define early_pfn_in_nid(pfn, nid) (1) | |
908 | #endif | |
909 | ||
d41dee36 AW |
910 | #ifndef early_pfn_valid |
911 | #define early_pfn_valid(pfn) (1) | |
912 | #endif | |
913 | ||
914 | void memory_present(int nid, unsigned long start, unsigned long end); | |
915 | unsigned long __init node_memmap_size_bytes(int, unsigned long, unsigned long); | |
916 | ||
14e07298 AW |
917 | /* |
918 | * If it is possible to have holes within a MAX_ORDER_NR_PAGES, then we | |
919 | * need to check pfn validility within that MAX_ORDER_NR_PAGES block. | |
920 | * pfn_valid_within() should be used in this case; we optimise this away | |
921 | * when we have no holes within a MAX_ORDER_NR_PAGES block. | |
922 | */ | |
923 | #ifdef CONFIG_HOLES_IN_ZONE | |
924 | #define pfn_valid_within(pfn) pfn_valid(pfn) | |
925 | #else | |
926 | #define pfn_valid_within(pfn) (1) | |
927 | #endif | |
928 | ||
1da177e4 LT |
929 | #endif /* !__ASSEMBLY__ */ |
930 | #endif /* __KERNEL__ */ | |
931 | #endif /* _LINUX_MMZONE_H */ |