]>
Commit | Line | Data |
---|---|---|
95f72d1e YL |
1 | #ifndef _LINUX_MEMBLOCK_H |
2 | #define _LINUX_MEMBLOCK_H | |
3 | #ifdef __KERNEL__ | |
4 | ||
f0b37fad | 5 | #ifdef CONFIG_HAVE_MEMBLOCK |
95f72d1e YL |
6 | /* |
7 | * Logical memory blocks. | |
8 | * | |
9 | * Copyright (C) 2001 Peter Bergner, IBM Corp. | |
10 | * | |
11 | * This program is free software; you can redistribute it and/or | |
12 | * modify it under the terms of the GNU General Public License | |
13 | * as published by the Free Software Foundation; either version | |
14 | * 2 of the License, or (at your option) any later version. | |
15 | */ | |
16 | ||
17 | #include <linux/init.h> | |
18 | #include <linux/mm.h> | |
19 | ||
37d8d4bf | 20 | #define INIT_MEMBLOCK_REGIONS 128 |
70210ed9 | 21 | #define INIT_PHYSMEM_REGIONS 4 |
95f72d1e | 22 | |
66b16edf | 23 | /* Definition of memblock flags. */ |
fc6daaf9 TL |
24 | enum { |
25 | MEMBLOCK_NONE = 0x0, /* No special request */ | |
26 | MEMBLOCK_HOTPLUG = 0x1, /* hotpluggable region */ | |
a3f5bafc | 27 | MEMBLOCK_MIRROR = 0x2, /* mirrored region */ |
fc6daaf9 | 28 | }; |
66b16edf | 29 | |
e3239ff9 | 30 | struct memblock_region { |
2898cc4c BH |
31 | phys_addr_t base; |
32 | phys_addr_t size; | |
66a20757 | 33 | unsigned long flags; |
7c0caeb8 TH |
34 | #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP |
35 | int nid; | |
36 | #endif | |
95f72d1e YL |
37 | }; |
38 | ||
e3239ff9 | 39 | struct memblock_type { |
bf23c51f BH |
40 | unsigned long cnt; /* number of regions */ |
41 | unsigned long max; /* size of the allocated array */ | |
1440c4e2 | 42 | phys_addr_t total_size; /* size of all regions */ |
bf23c51f | 43 | struct memblock_region *regions; |
95f72d1e YL |
44 | }; |
45 | ||
46 | struct memblock { | |
79442ed1 | 47 | bool bottom_up; /* is bottom up direction? */ |
2898cc4c | 48 | phys_addr_t current_limit; |
e3239ff9 BH |
49 | struct memblock_type memory; |
50 | struct memblock_type reserved; | |
70210ed9 PH |
51 | #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP |
52 | struct memblock_type physmem; | |
53 | #endif | |
95f72d1e YL |
54 | }; |
55 | ||
56 | extern struct memblock memblock; | |
5e63cf43 | 57 | extern int memblock_debug; |
55ac590c TC |
58 | #ifdef CONFIG_MOVABLE_NODE |
59 | /* If movable_node boot option specified */ | |
60 | extern bool movable_node_enabled; | |
61 | #endif /* CONFIG_MOVABLE_NODE */ | |
5e63cf43 YL |
62 | |
63 | #define memblock_dbg(fmt, ...) \ | |
64 | if (memblock_debug) printk(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__) | |
95f72d1e | 65 | |
87029ee9 GS |
66 | phys_addr_t memblock_find_in_range_node(phys_addr_t size, phys_addr_t align, |
67 | phys_addr_t start, phys_addr_t end, | |
fc6daaf9 | 68 | int nid, ulong flags); |
fc769a8e TH |
69 | phys_addr_t memblock_find_in_range(phys_addr_t start, phys_addr_t end, |
70 | phys_addr_t size, phys_addr_t align); | |
29f67386 | 71 | phys_addr_t get_allocated_memblock_reserved_regions_info(phys_addr_t *addr); |
5e270e25 | 72 | phys_addr_t get_allocated_memblock_memory_regions_info(phys_addr_t *addr); |
1aadc056 | 73 | void memblock_allow_resize(void); |
7fb0bc3f | 74 | int memblock_add_node(phys_addr_t base, phys_addr_t size, int nid); |
581adcbe TH |
75 | int memblock_add(phys_addr_t base, phys_addr_t size); |
76 | int memblock_remove(phys_addr_t base, phys_addr_t size); | |
77 | int memblock_free(phys_addr_t base, phys_addr_t size); | |
78 | int memblock_reserve(phys_addr_t base, phys_addr_t size); | |
6ede1fd3 | 79 | void memblock_trim_memory(phys_addr_t align); |
95cf82ec TC |
80 | bool memblock_overlaps_region(struct memblock_type *type, |
81 | phys_addr_t base, phys_addr_t size); | |
66b16edf TC |
82 | int memblock_mark_hotplug(phys_addr_t base, phys_addr_t size); |
83 | int memblock_clear_hotplug(phys_addr_t base, phys_addr_t size); | |
a3f5bafc TL |
84 | int memblock_mark_mirror(phys_addr_t base, phys_addr_t size); |
85 | ulong choose_memblock_flags(void); | |
f1af9d3a PH |
86 | |
87 | /* Low level functions */ | |
88 | int memblock_add_range(struct memblock_type *type, | |
89 | phys_addr_t base, phys_addr_t size, | |
90 | int nid, unsigned long flags); | |
91 | ||
fc6daaf9 TL |
92 | void __next_mem_range(u64 *idx, int nid, ulong flags, |
93 | struct memblock_type *type_a, | |
f1af9d3a PH |
94 | struct memblock_type *type_b, phys_addr_t *out_start, |
95 | phys_addr_t *out_end, int *out_nid); | |
96 | ||
fc6daaf9 TL |
97 | void __next_mem_range_rev(u64 *idx, int nid, ulong flags, |
98 | struct memblock_type *type_a, | |
f1af9d3a PH |
99 | struct memblock_type *type_b, phys_addr_t *out_start, |
100 | phys_addr_t *out_end, int *out_nid); | |
101 | ||
8e7a7f86 RH |
102 | void __next_reserved_mem_region(u64 *idx, phys_addr_t *out_start, |
103 | phys_addr_t *out_end); | |
104 | ||
f1af9d3a PH |
105 | /** |
106 | * for_each_mem_range - iterate through memblock areas from type_a and not | |
107 | * included in type_b. Or just type_a if type_b is NULL. | |
108 | * @i: u64 used as loop variable | |
109 | * @type_a: ptr to memblock_type to iterate | |
110 | * @type_b: ptr to memblock_type which excludes from the iteration | |
111 | * @nid: node selector, %NUMA_NO_NODE for all nodes | |
fc6daaf9 | 112 | * @flags: pick from blocks based on memory attributes |
f1af9d3a PH |
113 | * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL |
114 | * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL | |
115 | * @p_nid: ptr to int for nid of the range, can be %NULL | |
116 | */ | |
fc6daaf9 | 117 | #define for_each_mem_range(i, type_a, type_b, nid, flags, \ |
f1af9d3a | 118 | p_start, p_end, p_nid) \ |
fc6daaf9 | 119 | for (i = 0, __next_mem_range(&i, nid, flags, type_a, type_b, \ |
f1af9d3a PH |
120 | p_start, p_end, p_nid); \ |
121 | i != (u64)ULLONG_MAX; \ | |
fc6daaf9 | 122 | __next_mem_range(&i, nid, flags, type_a, type_b, \ |
f1af9d3a PH |
123 | p_start, p_end, p_nid)) |
124 | ||
125 | /** | |
126 | * for_each_mem_range_rev - reverse iterate through memblock areas from | |
127 | * type_a and not included in type_b. Or just type_a if type_b is NULL. | |
128 | * @i: u64 used as loop variable | |
129 | * @type_a: ptr to memblock_type to iterate | |
130 | * @type_b: ptr to memblock_type which excludes from the iteration | |
131 | * @nid: node selector, %NUMA_NO_NODE for all nodes | |
fc6daaf9 | 132 | * @flags: pick from blocks based on memory attributes |
f1af9d3a PH |
133 | * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL |
134 | * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL | |
135 | * @p_nid: ptr to int for nid of the range, can be %NULL | |
136 | */ | |
fc6daaf9 | 137 | #define for_each_mem_range_rev(i, type_a, type_b, nid, flags, \ |
f1af9d3a PH |
138 | p_start, p_end, p_nid) \ |
139 | for (i = (u64)ULLONG_MAX, \ | |
fc6daaf9 | 140 | __next_mem_range_rev(&i, nid, flags, type_a, type_b,\ |
f1af9d3a PH |
141 | p_start, p_end, p_nid); \ |
142 | i != (u64)ULLONG_MAX; \ | |
fc6daaf9 | 143 | __next_mem_range_rev(&i, nid, flags, type_a, type_b, \ |
f1af9d3a PH |
144 | p_start, p_end, p_nid)) |
145 | ||
8e7a7f86 RH |
146 | /** |
147 | * for_each_reserved_mem_region - iterate over all reserved memblock areas | |
148 | * @i: u64 used as loop variable | |
149 | * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL | |
150 | * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL | |
151 | * | |
152 | * Walks over reserved areas of memblock. Available as soon as memblock | |
153 | * is initialized. | |
154 | */ | |
155 | #define for_each_reserved_mem_region(i, p_start, p_end) \ | |
156 | for (i = 0UL, \ | |
157 | __next_reserved_mem_region(&i, p_start, p_end); \ | |
158 | i != (u64)ULLONG_MAX; \ | |
159 | __next_reserved_mem_region(&i, p_start, p_end)) | |
160 | ||
55ac590c TC |
161 | #ifdef CONFIG_MOVABLE_NODE |
162 | static inline bool memblock_is_hotpluggable(struct memblock_region *m) | |
163 | { | |
164 | return m->flags & MEMBLOCK_HOTPLUG; | |
165 | } | |
166 | ||
167 | static inline bool movable_node_is_enabled(void) | |
168 | { | |
169 | return movable_node_enabled; | |
170 | } | |
171 | #else | |
172 | static inline bool memblock_is_hotpluggable(struct memblock_region *m) | |
173 | { | |
174 | return false; | |
175 | } | |
176 | static inline bool movable_node_is_enabled(void) | |
177 | { | |
178 | return false; | |
179 | } | |
180 | #endif | |
e63075a3 | 181 | |
a3f5bafc TL |
182 | static inline bool memblock_is_mirror(struct memblock_region *m) |
183 | { | |
184 | return m->flags & MEMBLOCK_MIRROR; | |
185 | } | |
186 | ||
0ee332c1 | 187 | #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP |
e76b63f8 YL |
188 | int memblock_search_pfn_nid(unsigned long pfn, unsigned long *start_pfn, |
189 | unsigned long *end_pfn); | |
0ee332c1 TH |
190 | void __next_mem_pfn_range(int *idx, int nid, unsigned long *out_start_pfn, |
191 | unsigned long *out_end_pfn, int *out_nid); | |
192 | ||
193 | /** | |
194 | * for_each_mem_pfn_range - early memory pfn range iterator | |
195 | * @i: an integer used as loop variable | |
196 | * @nid: node selector, %MAX_NUMNODES for all nodes | |
197 | * @p_start: ptr to ulong for start pfn of the range, can be %NULL | |
198 | * @p_end: ptr to ulong for end pfn of the range, can be %NULL | |
199 | * @p_nid: ptr to int for nid of the range, can be %NULL | |
200 | * | |
f2d52fe5 | 201 | * Walks over configured memory ranges. |
0ee332c1 TH |
202 | */ |
203 | #define for_each_mem_pfn_range(i, nid, p_start, p_end, p_nid) \ | |
204 | for (i = -1, __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid); \ | |
205 | i >= 0; __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid)) | |
206 | #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ | |
207 | ||
35fd0808 TH |
208 | /** |
209 | * for_each_free_mem_range - iterate through free memblock areas | |
210 | * @i: u64 used as loop variable | |
b1154233 | 211 | * @nid: node selector, %NUMA_NO_NODE for all nodes |
35fd0808 TH |
212 | * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL |
213 | * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL | |
214 | * @p_nid: ptr to int for nid of the range, can be %NULL | |
fc6daaf9 | 215 | * @flags: pick from blocks based on memory attributes |
35fd0808 TH |
216 | * |
217 | * Walks over free (memory && !reserved) areas of memblock. Available as | |
218 | * soon as memblock is initialized. | |
219 | */ | |
fc6daaf9 | 220 | #define for_each_free_mem_range(i, nid, flags, p_start, p_end, p_nid) \ |
f1af9d3a | 221 | for_each_mem_range(i, &memblock.memory, &memblock.reserved, \ |
fc6daaf9 | 222 | nid, flags, p_start, p_end, p_nid) |
7bd0b0f0 TH |
223 | |
224 | /** | |
225 | * for_each_free_mem_range_reverse - rev-iterate through free memblock areas | |
226 | * @i: u64 used as loop variable | |
b1154233 | 227 | * @nid: node selector, %NUMA_NO_NODE for all nodes |
7bd0b0f0 TH |
228 | * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL |
229 | * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL | |
230 | * @p_nid: ptr to int for nid of the range, can be %NULL | |
fc6daaf9 | 231 | * @flags: pick from blocks based on memory attributes |
7bd0b0f0 TH |
232 | * |
233 | * Walks over free (memory && !reserved) areas of memblock in reverse | |
234 | * order. Available as soon as memblock is initialized. | |
235 | */ | |
fc6daaf9 TL |
236 | #define for_each_free_mem_range_reverse(i, nid, flags, p_start, p_end, \ |
237 | p_nid) \ | |
f1af9d3a | 238 | for_each_mem_range_rev(i, &memblock.memory, &memblock.reserved, \ |
fc6daaf9 | 239 | nid, flags, p_start, p_end, p_nid) |
7bd0b0f0 | 240 | |
66b16edf TC |
241 | static inline void memblock_set_region_flags(struct memblock_region *r, |
242 | unsigned long flags) | |
243 | { | |
244 | r->flags |= flags; | |
245 | } | |
246 | ||
247 | static inline void memblock_clear_region_flags(struct memblock_region *r, | |
248 | unsigned long flags) | |
249 | { | |
250 | r->flags &= ~flags; | |
251 | } | |
252 | ||
7c0caeb8 | 253 | #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP |
e7e8de59 TC |
254 | int memblock_set_node(phys_addr_t base, phys_addr_t size, |
255 | struct memblock_type *type, int nid); | |
7c0caeb8 TH |
256 | |
257 | static inline void memblock_set_region_node(struct memblock_region *r, int nid) | |
258 | { | |
259 | r->nid = nid; | |
260 | } | |
261 | ||
262 | static inline int memblock_get_region_node(const struct memblock_region *r) | |
263 | { | |
264 | return r->nid; | |
265 | } | |
266 | #else | |
267 | static inline void memblock_set_region_node(struct memblock_region *r, int nid) | |
268 | { | |
269 | } | |
270 | ||
271 | static inline int memblock_get_region_node(const struct memblock_region *r) | |
272 | { | |
273 | return 0; | |
274 | } | |
275 | #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ | |
276 | ||
581adcbe TH |
277 | phys_addr_t memblock_alloc_nid(phys_addr_t size, phys_addr_t align, int nid); |
278 | phys_addr_t memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid); | |
9d1e2492 | 279 | |
581adcbe | 280 | phys_addr_t memblock_alloc(phys_addr_t size, phys_addr_t align); |
e63075a3 | 281 | |
79442ed1 TC |
282 | #ifdef CONFIG_MOVABLE_NODE |
283 | /* | |
284 | * Set the allocation direction to bottom-up or top-down. | |
285 | */ | |
2cfb3665 | 286 | static inline void __init memblock_set_bottom_up(bool enable) |
79442ed1 TC |
287 | { |
288 | memblock.bottom_up = enable; | |
289 | } | |
290 | ||
291 | /* | |
292 | * Check if the allocation direction is bottom-up or not. | |
293 | * if this is true, that said, memblock will allocate memory | |
294 | * in bottom-up direction. | |
295 | */ | |
296 | static inline bool memblock_bottom_up(void) | |
297 | { | |
298 | return memblock.bottom_up; | |
299 | } | |
300 | #else | |
2cfb3665 | 301 | static inline void __init memblock_set_bottom_up(bool enable) {} |
79442ed1 TC |
302 | static inline bool memblock_bottom_up(void) { return false; } |
303 | #endif | |
304 | ||
e63075a3 | 305 | /* Flags for memblock_alloc_base() amd __memblock_alloc_base() */ |
2898cc4c | 306 | #define MEMBLOCK_ALLOC_ANYWHERE (~(phys_addr_t)0) |
e63075a3 BH |
307 | #define MEMBLOCK_ALLOC_ACCESSIBLE 0 |
308 | ||
2bfc2862 | 309 | phys_addr_t __init memblock_alloc_range(phys_addr_t size, phys_addr_t align, |
fc6daaf9 TL |
310 | phys_addr_t start, phys_addr_t end, |
311 | ulong flags); | |
581adcbe TH |
312 | phys_addr_t memblock_alloc_base(phys_addr_t size, phys_addr_t align, |
313 | phys_addr_t max_addr); | |
314 | phys_addr_t __memblock_alloc_base(phys_addr_t size, phys_addr_t align, | |
315 | phys_addr_t max_addr); | |
316 | phys_addr_t memblock_phys_mem_size(void); | |
595ad9af | 317 | phys_addr_t memblock_mem_size(unsigned long limit_pfn); |
581adcbe TH |
318 | phys_addr_t memblock_start_of_DRAM(void); |
319 | phys_addr_t memblock_end_of_DRAM(void); | |
320 | void memblock_enforce_memory_limit(phys_addr_t memory_limit); | |
321 | int memblock_is_memory(phys_addr_t addr); | |
322 | int memblock_is_region_memory(phys_addr_t base, phys_addr_t size); | |
323 | int memblock_is_reserved(phys_addr_t addr); | |
c5c5c9d1 | 324 | bool memblock_is_region_reserved(phys_addr_t base, phys_addr_t size); |
581adcbe | 325 | |
4ff7b82f TH |
326 | extern void __memblock_dump_all(void); |
327 | ||
328 | static inline void memblock_dump_all(void) | |
329 | { | |
330 | if (memblock_debug) | |
331 | __memblock_dump_all(); | |
332 | } | |
95f72d1e | 333 | |
e63075a3 BH |
334 | /** |
335 | * memblock_set_current_limit - Set the current allocation limit to allow | |
336 | * limiting allocations to what is currently | |
337 | * accessible during boot | |
338 | * @limit: New limit value (physical address) | |
339 | */ | |
581adcbe | 340 | void memblock_set_current_limit(phys_addr_t limit); |
e63075a3 | 341 | |
35a1f0bd | 342 | |
fec51014 LA |
343 | phys_addr_t memblock_get_current_limit(void); |
344 | ||
5b385f25 BH |
345 | /* |
346 | * pfn conversion functions | |
347 | * | |
348 | * While the memory MEMBLOCKs should always be page aligned, the reserved | |
349 | * MEMBLOCKs may not be. This accessor attempt to provide a very clear | |
350 | * idea of what they return for such non aligned MEMBLOCKs. | |
351 | */ | |
352 | ||
353 | /** | |
c7fc2de0 | 354 | * memblock_region_memory_base_pfn - Return the lowest pfn intersecting with the memory region |
5b385f25 BH |
355 | * @reg: memblock_region structure |
356 | */ | |
c7fc2de0 | 357 | static inline unsigned long memblock_region_memory_base_pfn(const struct memblock_region *reg) |
5b385f25 | 358 | { |
c7fc2de0 | 359 | return PFN_UP(reg->base); |
5b385f25 BH |
360 | } |
361 | ||
362 | /** | |
c7fc2de0 | 363 | * memblock_region_memory_end_pfn - Return the end_pfn this region |
5b385f25 BH |
364 | * @reg: memblock_region structure |
365 | */ | |
c7fc2de0 | 366 | static inline unsigned long memblock_region_memory_end_pfn(const struct memblock_region *reg) |
5b385f25 | 367 | { |
c7fc2de0 | 368 | return PFN_DOWN(reg->base + reg->size); |
5b385f25 BH |
369 | } |
370 | ||
371 | /** | |
c7fc2de0 | 372 | * memblock_region_reserved_base_pfn - Return the lowest pfn intersecting with the reserved region |
5b385f25 BH |
373 | * @reg: memblock_region structure |
374 | */ | |
c7fc2de0 | 375 | static inline unsigned long memblock_region_reserved_base_pfn(const struct memblock_region *reg) |
5b385f25 | 376 | { |
c7fc2de0 | 377 | return PFN_DOWN(reg->base); |
5b385f25 BH |
378 | } |
379 | ||
380 | /** | |
c7fc2de0 | 381 | * memblock_region_reserved_end_pfn - Return the end_pfn this region |
5b385f25 BH |
382 | * @reg: memblock_region structure |
383 | */ | |
c7fc2de0 | 384 | static inline unsigned long memblock_region_reserved_end_pfn(const struct memblock_region *reg) |
5b385f25 | 385 | { |
c7fc2de0 | 386 | return PFN_UP(reg->base + reg->size); |
5b385f25 BH |
387 | } |
388 | ||
389 | #define for_each_memblock(memblock_type, region) \ | |
390 | for (region = memblock.memblock_type.regions; \ | |
391 | region < (memblock.memblock_type.regions + memblock.memblock_type.cnt); \ | |
392 | region++) | |
393 | ||
394 | ||
c378ddd5 | 395 | #ifdef CONFIG_ARCH_DISCARD_MEMBLOCK |
67e24bcb TH |
396 | #define __init_memblock __meminit |
397 | #define __initdata_memblock __meminitdata | |
10d06439 YL |
398 | #else |
399 | #define __init_memblock | |
400 | #define __initdata_memblock | |
401 | #endif | |
402 | ||
4a20799d | 403 | #ifdef CONFIG_MEMTEST |
7f70baee | 404 | extern void early_memtest(phys_addr_t start, phys_addr_t end); |
4a20799d | 405 | #else |
7f70baee | 406 | static inline void early_memtest(phys_addr_t start, phys_addr_t end) |
4a20799d VM |
407 | { |
408 | } | |
409 | #endif | |
410 | ||
95dde501 YL |
411 | #else |
412 | static inline phys_addr_t memblock_alloc(phys_addr_t size, phys_addr_t align) | |
413 | { | |
1f5026a7 | 414 | return 0; |
95dde501 YL |
415 | } |
416 | ||
f0b37fad YL |
417 | #endif /* CONFIG_HAVE_MEMBLOCK */ |
418 | ||
95f72d1e YL |
419 | #endif /* __KERNEL__ */ |
420 | ||
421 | #endif /* _LINUX_MEMBLOCK_H */ |