1 #ifndef _LINUX_MEMBLOCK_H
2 #define _LINUX_MEMBLOCK_H
6 * Logical memory blocks.
8 * Copyright (C) 2001 Peter Bergner, IBM Corp.
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
16 #include <linux/init.h>
20 extern unsigned long max_low_pfn
;
21 extern unsigned long min_low_pfn
;
26 extern unsigned long max_pfn
;
28 * highest possible page
30 extern unsigned long long max_possible_pfn
;
33 * enum memblock_flags - definition of memory region attributes
34 * @MEMBLOCK_NONE: no special request
35 * @MEMBLOCK_HOTPLUG: hotpluggable region
36 * @MEMBLOCK_MIRROR: mirrored region
37 * @MEMBLOCK_NOMAP: don't add to kernel direct mapping
40 MEMBLOCK_NONE
= 0x0, /* No special request */
41 MEMBLOCK_HOTPLUG
= 0x1, /* hotpluggable region */
42 MEMBLOCK_MIRROR
= 0x2, /* mirrored region */
43 MEMBLOCK_NOMAP
= 0x4, /* don't add to kernel direct mapping */
47 * struct memblock_region - represents a memory region
48 * @base: physical address of the region
49 * @size: size of the region
50 * @flags: memory region attributes
53 struct memblock_region
{
56 enum memblock_flags flags
;
57 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
63 * struct memblock_type - collection of memory regions of certain type
64 * @cnt: number of regions
65 * @max: size of the allocated array
66 * @total_size: size of all regions
67 * @regions: array of regions
68 * @name: the memory type symbolic name
70 struct memblock_type
{
73 phys_addr_t total_size
;
74 struct memblock_region
*regions
;
79 * struct memblock - memblock allocator metadata
80 * @bottom_up: is bottom up direction?
81 * @current_limit: physical address of the current allocation limit
82 * @memory: usabe memory regions
83 * @reserved: reserved memory regions
84 * @physmem: all physical memory
87 bool bottom_up
; /* is bottom up direction? */
88 phys_addr_t current_limit
;
89 struct memblock_type memory
;
90 struct memblock_type reserved
;
91 #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
92 struct memblock_type physmem
;
96 extern struct memblock memblock
;
97 extern int memblock_debug
;
99 #ifdef CONFIG_ARCH_DISCARD_MEMBLOCK
100 #define __init_memblock __meminit
101 #define __initdata_memblock __meminitdata
102 void memblock_discard(void);
104 #define __init_memblock
105 #define __initdata_memblock
108 #define memblock_dbg(fmt, ...) \
109 if (memblock_debug) printk(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__)
111 phys_addr_t
memblock_find_in_range(phys_addr_t start
, phys_addr_t end
,
112 phys_addr_t size
, phys_addr_t align
);
113 void memblock_allow_resize(void);
114 int memblock_add_node(phys_addr_t base
, phys_addr_t size
, int nid
);
115 int memblock_add(phys_addr_t base
, phys_addr_t size
);
116 int memblock_remove(phys_addr_t base
, phys_addr_t size
);
117 int memblock_free(phys_addr_t base
, phys_addr_t size
);
118 int memblock_reserve(phys_addr_t base
, phys_addr_t size
);
119 void memblock_trim_memory(phys_addr_t align
);
120 bool memblock_overlaps_region(struct memblock_type
*type
,
121 phys_addr_t base
, phys_addr_t size
);
122 int memblock_mark_hotplug(phys_addr_t base
, phys_addr_t size
);
123 int memblock_clear_hotplug(phys_addr_t base
, phys_addr_t size
);
124 int memblock_mark_mirror(phys_addr_t base
, phys_addr_t size
);
125 int memblock_mark_nomap(phys_addr_t base
, phys_addr_t size
);
126 int memblock_clear_nomap(phys_addr_t base
, phys_addr_t size
);
128 unsigned long memblock_free_all(void);
129 void reset_node_managed_pages(pg_data_t
*pgdat
);
130 void reset_all_zones_managed_pages(void);
132 /* Low level functions */
133 int memblock_add_range(struct memblock_type
*type
,
134 phys_addr_t base
, phys_addr_t size
,
135 int nid
, enum memblock_flags flags
);
137 void __next_mem_range(u64
*idx
, int nid
, enum memblock_flags flags
,
138 struct memblock_type
*type_a
,
139 struct memblock_type
*type_b
, phys_addr_t
*out_start
,
140 phys_addr_t
*out_end
, int *out_nid
);
142 void __next_mem_range_rev(u64
*idx
, int nid
, enum memblock_flags flags
,
143 struct memblock_type
*type_a
,
144 struct memblock_type
*type_b
, phys_addr_t
*out_start
,
145 phys_addr_t
*out_end
, int *out_nid
);
147 void __next_reserved_mem_region(u64
*idx
, phys_addr_t
*out_start
,
148 phys_addr_t
*out_end
);
150 void __memblock_free_late(phys_addr_t base
, phys_addr_t size
);
153 * for_each_mem_range - iterate through memblock areas from type_a and not
154 * included in type_b. Or just type_a if type_b is NULL.
155 * @i: u64 used as loop variable
156 * @type_a: ptr to memblock_type to iterate
157 * @type_b: ptr to memblock_type which excludes from the iteration
158 * @nid: node selector, %NUMA_NO_NODE for all nodes
159 * @flags: pick from blocks based on memory attributes
160 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
161 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
162 * @p_nid: ptr to int for nid of the range, can be %NULL
164 #define for_each_mem_range(i, type_a, type_b, nid, flags, \
165 p_start, p_end, p_nid) \
166 for (i = 0, __next_mem_range(&i, nid, flags, type_a, type_b, \
167 p_start, p_end, p_nid); \
168 i != (u64)ULLONG_MAX; \
169 __next_mem_range(&i, nid, flags, type_a, type_b, \
170 p_start, p_end, p_nid))
173 * for_each_mem_range_rev - reverse iterate through memblock areas from
174 * type_a and not included in type_b. Or just type_a if type_b is NULL.
175 * @i: u64 used as loop variable
176 * @type_a: ptr to memblock_type to iterate
177 * @type_b: ptr to memblock_type which excludes from the iteration
178 * @nid: node selector, %NUMA_NO_NODE for all nodes
179 * @flags: pick from blocks based on memory attributes
180 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
181 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
182 * @p_nid: ptr to int for nid of the range, can be %NULL
184 #define for_each_mem_range_rev(i, type_a, type_b, nid, flags, \
185 p_start, p_end, p_nid) \
186 for (i = (u64)ULLONG_MAX, \
187 __next_mem_range_rev(&i, nid, flags, type_a, type_b,\
188 p_start, p_end, p_nid); \
189 i != (u64)ULLONG_MAX; \
190 __next_mem_range_rev(&i, nid, flags, type_a, type_b, \
191 p_start, p_end, p_nid))
194 * for_each_reserved_mem_region - iterate over all reserved memblock areas
195 * @i: u64 used as loop variable
196 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
197 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
199 * Walks over reserved areas of memblock. Available as soon as memblock
202 #define for_each_reserved_mem_region(i, p_start, p_end) \
203 for (i = 0UL, __next_reserved_mem_region(&i, p_start, p_end); \
204 i != (u64)ULLONG_MAX; \
205 __next_reserved_mem_region(&i, p_start, p_end))
207 static inline bool memblock_is_hotpluggable(struct memblock_region
*m
)
209 return m
->flags
& MEMBLOCK_HOTPLUG
;
212 static inline bool memblock_is_mirror(struct memblock_region
*m
)
214 return m
->flags
& MEMBLOCK_MIRROR
;
217 static inline bool memblock_is_nomap(struct memblock_region
*m
)
219 return m
->flags
& MEMBLOCK_NOMAP
;
222 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
223 int memblock_search_pfn_nid(unsigned long pfn
, unsigned long *start_pfn
,
224 unsigned long *end_pfn
);
225 void __next_mem_pfn_range(int *idx
, int nid
, unsigned long *out_start_pfn
,
226 unsigned long *out_end_pfn
, int *out_nid
);
229 * for_each_mem_pfn_range - early memory pfn range iterator
230 * @i: an integer used as loop variable
231 * @nid: node selector, %MAX_NUMNODES for all nodes
232 * @p_start: ptr to ulong for start pfn of the range, can be %NULL
233 * @p_end: ptr to ulong for end pfn of the range, can be %NULL
234 * @p_nid: ptr to int for nid of the range, can be %NULL
236 * Walks over configured memory ranges.
238 #define for_each_mem_pfn_range(i, nid, p_start, p_end, p_nid) \
239 for (i = -1, __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid); \
240 i >= 0; __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid))
241 #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
243 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
244 void __next_mem_pfn_range_in_zone(u64
*idx
, struct zone
*zone
,
245 unsigned long *out_spfn
,
246 unsigned long *out_epfn
);
248 * for_each_free_mem_range_in_zone - iterate through zone specific free
250 * @i: u64 used as loop variable
251 * @zone: zone in which all of the memory blocks reside
252 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
253 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
255 * Walks over free (memory && !reserved) areas of memblock in a specific
256 * zone. Available once memblock and an empty zone is initialized. The main
257 * assumption is that the zone start, end, and pgdat have been associated.
258 * This way we can use the zone to determine NUMA node, and if a given part
259 * of the memblock is valid for the zone.
261 #define for_each_free_mem_pfn_range_in_zone(i, zone, p_start, p_end) \
263 __next_mem_pfn_range_in_zone(&i, zone, p_start, p_end); \
265 __next_mem_pfn_range_in_zone(&i, zone, p_start, p_end))
268 * for_each_free_mem_range_in_zone_from - iterate through zone specific
269 * free memblock areas from a given point
270 * @i: u64 used as loop variable
271 * @zone: zone in which all of the memory blocks reside
272 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
273 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
275 * Walks over free (memory && !reserved) areas of memblock in a specific
276 * zone, continuing from current position. Available as soon as memblock is
279 #define for_each_free_mem_pfn_range_in_zone_from(i, zone, p_start, p_end) \
280 for (; i != U64_MAX; \
281 __next_mem_pfn_range_in_zone(&i, zone, p_start, p_end))
282 #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
285 * for_each_free_mem_range - iterate through free memblock areas
286 * @i: u64 used as loop variable
287 * @nid: node selector, %NUMA_NO_NODE for all nodes
288 * @flags: pick from blocks based on memory attributes
289 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
290 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
291 * @p_nid: ptr to int for nid of the range, can be %NULL
293 * Walks over free (memory && !reserved) areas of memblock. Available as
294 * soon as memblock is initialized.
296 #define for_each_free_mem_range(i, nid, flags, p_start, p_end, p_nid) \
297 for_each_mem_range(i, &memblock.memory, &memblock.reserved, \
298 nid, flags, p_start, p_end, p_nid)
301 * for_each_free_mem_range_reverse - rev-iterate through free memblock areas
302 * @i: u64 used as loop variable
303 * @nid: node selector, %NUMA_NO_NODE for all nodes
304 * @flags: pick from blocks based on memory attributes
305 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
306 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
307 * @p_nid: ptr to int for nid of the range, can be %NULL
309 * Walks over free (memory && !reserved) areas of memblock in reverse
310 * order. Available as soon as memblock is initialized.
312 #define for_each_free_mem_range_reverse(i, nid, flags, p_start, p_end, \
314 for_each_mem_range_rev(i, &memblock.memory, &memblock.reserved, \
315 nid, flags, p_start, p_end, p_nid)
317 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
318 int memblock_set_node(phys_addr_t base
, phys_addr_t size
,
319 struct memblock_type
*type
, int nid
);
321 static inline void memblock_set_region_node(struct memblock_region
*r
, int nid
)
326 static inline int memblock_get_region_node(const struct memblock_region
*r
)
331 static inline void memblock_set_region_node(struct memblock_region
*r
, int nid
)
335 static inline int memblock_get_region_node(const struct memblock_region
*r
)
339 #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
341 /* Flags for memblock allocation APIs */
342 #define MEMBLOCK_ALLOC_ANYWHERE (~(phys_addr_t)0)
343 #define MEMBLOCK_ALLOC_ACCESSIBLE 0
344 #define MEMBLOCK_ALLOC_KASAN 1
346 /* We are using top down, so it is safe to use 0 here */
347 #define MEMBLOCK_LOW_LIMIT 0
349 #ifndef ARCH_LOW_ADDRESS_LIMIT
350 #define ARCH_LOW_ADDRESS_LIMIT 0xffffffffUL
353 phys_addr_t
memblock_phys_alloc_range(phys_addr_t size
, phys_addr_t align
,
354 phys_addr_t start
, phys_addr_t end
);
355 phys_addr_t
memblock_phys_alloc_try_nid(phys_addr_t size
, phys_addr_t align
, int nid
);
357 static inline phys_addr_t
memblock_phys_alloc(phys_addr_t size
,
360 return memblock_phys_alloc_range(size
, align
, 0,
361 MEMBLOCK_ALLOC_ACCESSIBLE
);
364 void *memblock_alloc_try_nid_raw(phys_addr_t size
, phys_addr_t align
,
365 phys_addr_t min_addr
, phys_addr_t max_addr
,
367 void *memblock_alloc_try_nid(phys_addr_t size
, phys_addr_t align
,
368 phys_addr_t min_addr
, phys_addr_t max_addr
,
371 static inline void * __init
memblock_alloc(phys_addr_t size
, phys_addr_t align
)
373 return memblock_alloc_try_nid(size
, align
, MEMBLOCK_LOW_LIMIT
,
374 MEMBLOCK_ALLOC_ACCESSIBLE
, NUMA_NO_NODE
);
377 static inline void * __init
memblock_alloc_raw(phys_addr_t size
,
380 return memblock_alloc_try_nid_raw(size
, align
, MEMBLOCK_LOW_LIMIT
,
381 MEMBLOCK_ALLOC_ACCESSIBLE
,
385 static inline void * __init
memblock_alloc_from(phys_addr_t size
,
387 phys_addr_t min_addr
)
389 return memblock_alloc_try_nid(size
, align
, min_addr
,
390 MEMBLOCK_ALLOC_ACCESSIBLE
, NUMA_NO_NODE
);
393 static inline void * __init
memblock_alloc_low(phys_addr_t size
,
396 return memblock_alloc_try_nid(size
, align
, MEMBLOCK_LOW_LIMIT
,
397 ARCH_LOW_ADDRESS_LIMIT
, NUMA_NO_NODE
);
400 static inline void * __init
memblock_alloc_node(phys_addr_t size
,
401 phys_addr_t align
, int nid
)
403 return memblock_alloc_try_nid(size
, align
, MEMBLOCK_LOW_LIMIT
,
404 MEMBLOCK_ALLOC_ACCESSIBLE
, nid
);
407 static inline void __init
memblock_free_early(phys_addr_t base
,
410 memblock_free(base
, size
);
413 static inline void __init
memblock_free_early_nid(phys_addr_t base
,
414 phys_addr_t size
, int nid
)
416 memblock_free(base
, size
);
419 static inline void __init
memblock_free_late(phys_addr_t base
, phys_addr_t size
)
421 __memblock_free_late(base
, size
);
425 * Set the allocation direction to bottom-up or top-down.
427 static inline void __init
memblock_set_bottom_up(bool enable
)
429 memblock
.bottom_up
= enable
;
433 * Check if the allocation direction is bottom-up or not.
434 * if this is true, that said, memblock will allocate memory
435 * in bottom-up direction.
437 static inline bool memblock_bottom_up(void)
439 return memblock
.bottom_up
;
442 phys_addr_t
memblock_phys_mem_size(void);
443 phys_addr_t
memblock_reserved_size(void);
444 phys_addr_t
memblock_mem_size(unsigned long limit_pfn
);
445 phys_addr_t
memblock_start_of_DRAM(void);
446 phys_addr_t
memblock_end_of_DRAM(void);
447 void memblock_enforce_memory_limit(phys_addr_t memory_limit
);
448 void memblock_cap_memory_range(phys_addr_t base
, phys_addr_t size
);
449 void memblock_mem_limit_remove_map(phys_addr_t limit
);
450 bool memblock_is_memory(phys_addr_t addr
);
451 bool memblock_is_map_memory(phys_addr_t addr
);
452 bool memblock_is_region_memory(phys_addr_t base
, phys_addr_t size
);
453 bool memblock_is_reserved(phys_addr_t addr
);
454 bool memblock_is_region_reserved(phys_addr_t base
, phys_addr_t size
);
456 extern void __memblock_dump_all(void);
458 static inline void memblock_dump_all(void)
461 __memblock_dump_all();
465 * memblock_set_current_limit - Set the current allocation limit to allow
466 * limiting allocations to what is currently
467 * accessible during boot
468 * @limit: New limit value (physical address)
470 void memblock_set_current_limit(phys_addr_t limit
);
473 phys_addr_t
memblock_get_current_limit(void);
476 * pfn conversion functions
478 * While the memory MEMBLOCKs should always be page aligned, the reserved
479 * MEMBLOCKs may not be. This accessor attempt to provide a very clear
480 * idea of what they return for such non aligned MEMBLOCKs.
484 * memblock_region_memory_base_pfn - get the lowest pfn of the memory region
485 * @reg: memblock_region structure
487 * Return: the lowest pfn intersecting with the memory region
489 static inline unsigned long memblock_region_memory_base_pfn(const struct memblock_region
*reg
)
491 return PFN_UP(reg
->base
);
495 * memblock_region_memory_end_pfn - get the end pfn of the memory region
496 * @reg: memblock_region structure
498 * Return: the end_pfn of the reserved region
500 static inline unsigned long memblock_region_memory_end_pfn(const struct memblock_region
*reg
)
502 return PFN_DOWN(reg
->base
+ reg
->size
);
506 * memblock_region_reserved_base_pfn - get the lowest pfn of the reserved region
507 * @reg: memblock_region structure
509 * Return: the lowest pfn intersecting with the reserved region
511 static inline unsigned long memblock_region_reserved_base_pfn(const struct memblock_region
*reg
)
513 return PFN_DOWN(reg
->base
);
517 * memblock_region_reserved_end_pfn - get the end pfn of the reserved region
518 * @reg: memblock_region structure
520 * Return: the end_pfn of the reserved region
522 static inline unsigned long memblock_region_reserved_end_pfn(const struct memblock_region
*reg
)
524 return PFN_UP(reg
->base
+ reg
->size
);
527 #define for_each_memblock(memblock_type, region) \
528 for (region = memblock.memblock_type.regions; \
529 region < (memblock.memblock_type.regions + memblock.memblock_type.cnt); \
532 #define for_each_memblock_type(i, memblock_type, rgn) \
533 for (i = 0, rgn = &memblock_type->regions[0]; \
534 i < memblock_type->cnt; \
535 i++, rgn = &memblock_type->regions[i])
537 extern void *alloc_large_system_hash(const char *tablename
,
538 unsigned long bucketsize
,
539 unsigned long numentries
,
542 unsigned int *_hash_shift
,
543 unsigned int *_hash_mask
,
544 unsigned long low_limit
,
545 unsigned long high_limit
);
547 #define HASH_EARLY 0x00000001 /* Allocating during early boot? */
548 #define HASH_SMALL 0x00000002 /* sub-page allocation allowed, min
549 * shift passed via *_hash_shift */
550 #define HASH_ZERO 0x00000004 /* Zero allocated hash table */
552 /* Only NUMA needs hash distribution. 64bit NUMA architectures have
553 * sufficient vmalloc space.
556 #define HASHDIST_DEFAULT IS_ENABLED(CONFIG_64BIT)
557 extern int hashdist
; /* Distribute hashes across NUMA nodes? */
562 #ifdef CONFIG_MEMTEST
563 extern void early_memtest(phys_addr_t start
, phys_addr_t end
);
565 static inline void early_memtest(phys_addr_t start
, phys_addr_t end
)
570 #endif /* __KERNEL__ */
572 #endif /* _LINUX_MEMBLOCK_H */