]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - include/linux/memblock.h
firmware/psci: Expose PSCI conduit
[mirror_ubuntu-artful-kernel.git] / include / linux / memblock.h
1 #ifndef _LINUX_MEMBLOCK_H
2 #define _LINUX_MEMBLOCK_H
3 #ifdef __KERNEL__
4
5 #ifdef CONFIG_HAVE_MEMBLOCK
6 /*
7 * Logical memory blocks.
8 *
9 * Copyright (C) 2001 Peter Bergner, IBM Corp.
10 *
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation; either version
14 * 2 of the License, or (at your option) any later version.
15 */
16
17 #include <linux/init.h>
18 #include <linux/mm.h>
19
20 #define INIT_MEMBLOCK_REGIONS 128
21 #define INIT_PHYSMEM_REGIONS 4
22
23 /* Definition of memblock flags. */
24 enum {
25 MEMBLOCK_NONE = 0x0, /* No special request */
26 MEMBLOCK_HOTPLUG = 0x1, /* hotpluggable region */
27 MEMBLOCK_MIRROR = 0x2, /* mirrored region */
28 MEMBLOCK_NOMAP = 0x4, /* don't add to kernel direct mapping */
29 };
30
31 struct memblock_region {
32 phys_addr_t base;
33 phys_addr_t size;
34 unsigned long flags;
35 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
36 int nid;
37 #endif
38 };
39
40 struct memblock_type {
41 unsigned long cnt; /* number of regions */
42 unsigned long max; /* size of the allocated array */
43 phys_addr_t total_size; /* size of all regions */
44 struct memblock_region *regions;
45 char *name;
46 };
47
48 struct memblock {
49 bool bottom_up; /* is bottom up direction? */
50 phys_addr_t current_limit;
51 struct memblock_type memory;
52 struct memblock_type reserved;
53 #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
54 struct memblock_type physmem;
55 #endif
56 };
57
58 extern struct memblock memblock;
59 extern int memblock_debug;
60
61 #ifdef CONFIG_ARCH_DISCARD_MEMBLOCK
62 #define __init_memblock __meminit
63 #define __initdata_memblock __meminitdata
64 void memblock_discard(void);
65 #else
66 #define __init_memblock
67 #define __initdata_memblock
68 #endif
69
70 #define memblock_dbg(fmt, ...) \
71 if (memblock_debug) printk(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__)
72
73 phys_addr_t memblock_find_in_range_node(phys_addr_t size, phys_addr_t align,
74 phys_addr_t start, phys_addr_t end,
75 int nid, ulong flags);
76 phys_addr_t memblock_find_in_range(phys_addr_t start, phys_addr_t end,
77 phys_addr_t size, phys_addr_t align);
78 void memblock_allow_resize(void);
79 int memblock_add_node(phys_addr_t base, phys_addr_t size, int nid);
80 int memblock_add(phys_addr_t base, phys_addr_t size);
81 int memblock_remove(phys_addr_t base, phys_addr_t size);
82 int memblock_free(phys_addr_t base, phys_addr_t size);
83 int memblock_reserve(phys_addr_t base, phys_addr_t size);
84 void memblock_trim_memory(phys_addr_t align);
85 bool memblock_overlaps_region(struct memblock_type *type,
86 phys_addr_t base, phys_addr_t size);
87 int memblock_mark_hotplug(phys_addr_t base, phys_addr_t size);
88 int memblock_clear_hotplug(phys_addr_t base, phys_addr_t size);
89 int memblock_mark_mirror(phys_addr_t base, phys_addr_t size);
90 int memblock_mark_nomap(phys_addr_t base, phys_addr_t size);
91 int memblock_clear_nomap(phys_addr_t base, phys_addr_t size);
92 ulong choose_memblock_flags(void);
93
94 /* Low level functions */
95 int memblock_add_range(struct memblock_type *type,
96 phys_addr_t base, phys_addr_t size,
97 int nid, unsigned long flags);
98
99 void __next_mem_range(u64 *idx, int nid, ulong flags,
100 struct memblock_type *type_a,
101 struct memblock_type *type_b, phys_addr_t *out_start,
102 phys_addr_t *out_end, int *out_nid);
103
104 void __next_mem_range_rev(u64 *idx, int nid, ulong flags,
105 struct memblock_type *type_a,
106 struct memblock_type *type_b, phys_addr_t *out_start,
107 phys_addr_t *out_end, int *out_nid);
108
109 void __next_reserved_mem_region(u64 *idx, phys_addr_t *out_start,
110 phys_addr_t *out_end);
111
112 void __memblock_free_early(phys_addr_t base, phys_addr_t size);
113 void __memblock_free_late(phys_addr_t base, phys_addr_t size);
114
115 /**
116 * for_each_mem_range - iterate through memblock areas from type_a and not
117 * included in type_b. Or just type_a if type_b is NULL.
118 * @i: u64 used as loop variable
119 * @type_a: ptr to memblock_type to iterate
120 * @type_b: ptr to memblock_type which excludes from the iteration
121 * @nid: node selector, %NUMA_NO_NODE for all nodes
122 * @flags: pick from blocks based on memory attributes
123 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
124 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
125 * @p_nid: ptr to int for nid of the range, can be %NULL
126 */
127 #define for_each_mem_range(i, type_a, type_b, nid, flags, \
128 p_start, p_end, p_nid) \
129 for (i = 0, __next_mem_range(&i, nid, flags, type_a, type_b, \
130 p_start, p_end, p_nid); \
131 i != (u64)ULLONG_MAX; \
132 __next_mem_range(&i, nid, flags, type_a, type_b, \
133 p_start, p_end, p_nid))
134
135 /**
136 * for_each_mem_range_rev - reverse iterate through memblock areas from
137 * type_a and not included in type_b. Or just type_a if type_b is NULL.
138 * @i: u64 used as loop variable
139 * @type_a: ptr to memblock_type to iterate
140 * @type_b: ptr to memblock_type which excludes from the iteration
141 * @nid: node selector, %NUMA_NO_NODE for all nodes
142 * @flags: pick from blocks based on memory attributes
143 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
144 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
145 * @p_nid: ptr to int for nid of the range, can be %NULL
146 */
147 #define for_each_mem_range_rev(i, type_a, type_b, nid, flags, \
148 p_start, p_end, p_nid) \
149 for (i = (u64)ULLONG_MAX, \
150 __next_mem_range_rev(&i, nid, flags, type_a, type_b,\
151 p_start, p_end, p_nid); \
152 i != (u64)ULLONG_MAX; \
153 __next_mem_range_rev(&i, nid, flags, type_a, type_b, \
154 p_start, p_end, p_nid))
155
156 /**
157 * for_each_reserved_mem_region - iterate over all reserved memblock areas
158 * @i: u64 used as loop variable
159 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
160 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
161 *
162 * Walks over reserved areas of memblock. Available as soon as memblock
163 * is initialized.
164 */
165 #define for_each_reserved_mem_region(i, p_start, p_end) \
166 for (i = 0UL, __next_reserved_mem_region(&i, p_start, p_end); \
167 i != (u64)ULLONG_MAX; \
168 __next_reserved_mem_region(&i, p_start, p_end))
169
170 static inline bool memblock_is_hotpluggable(struct memblock_region *m)
171 {
172 return m->flags & MEMBLOCK_HOTPLUG;
173 }
174
175 static inline bool memblock_is_mirror(struct memblock_region *m)
176 {
177 return m->flags & MEMBLOCK_MIRROR;
178 }
179
180 static inline bool memblock_is_nomap(struct memblock_region *m)
181 {
182 return m->flags & MEMBLOCK_NOMAP;
183 }
184
185 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
186 int memblock_search_pfn_nid(unsigned long pfn, unsigned long *start_pfn,
187 unsigned long *end_pfn);
188 void __next_mem_pfn_range(int *idx, int nid, unsigned long *out_start_pfn,
189 unsigned long *out_end_pfn, int *out_nid);
190 unsigned long memblock_next_valid_pfn(unsigned long pfn, unsigned long max_pfn);
191
192 /**
193 * for_each_mem_pfn_range - early memory pfn range iterator
194 * @i: an integer used as loop variable
195 * @nid: node selector, %MAX_NUMNODES for all nodes
196 * @p_start: ptr to ulong for start pfn of the range, can be %NULL
197 * @p_end: ptr to ulong for end pfn of the range, can be %NULL
198 * @p_nid: ptr to int for nid of the range, can be %NULL
199 *
200 * Walks over configured memory ranges.
201 */
202 #define for_each_mem_pfn_range(i, nid, p_start, p_end, p_nid) \
203 for (i = -1, __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid); \
204 i >= 0; __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid))
205 #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
206
207 /**
208 * for_each_free_mem_range - iterate through free memblock areas
209 * @i: u64 used as loop variable
210 * @nid: node selector, %NUMA_NO_NODE for all nodes
211 * @flags: pick from blocks based on memory attributes
212 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
213 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
214 * @p_nid: ptr to int for nid of the range, can be %NULL
215 *
216 * Walks over free (memory && !reserved) areas of memblock. Available as
217 * soon as memblock is initialized.
218 */
219 #define for_each_free_mem_range(i, nid, flags, p_start, p_end, p_nid) \
220 for_each_mem_range(i, &memblock.memory, &memblock.reserved, \
221 nid, flags, p_start, p_end, p_nid)
222
223 /**
224 * for_each_free_mem_range_reverse - rev-iterate through free memblock areas
225 * @i: u64 used as loop variable
226 * @nid: node selector, %NUMA_NO_NODE for all nodes
227 * @flags: pick from blocks based on memory attributes
228 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
229 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
230 * @p_nid: ptr to int for nid of the range, can be %NULL
231 *
232 * Walks over free (memory && !reserved) areas of memblock in reverse
233 * order. Available as soon as memblock is initialized.
234 */
235 #define for_each_free_mem_range_reverse(i, nid, flags, p_start, p_end, \
236 p_nid) \
237 for_each_mem_range_rev(i, &memblock.memory, &memblock.reserved, \
238 nid, flags, p_start, p_end, p_nid)
239
240 static inline void memblock_set_region_flags(struct memblock_region *r,
241 unsigned long flags)
242 {
243 r->flags |= flags;
244 }
245
246 static inline void memblock_clear_region_flags(struct memblock_region *r,
247 unsigned long flags)
248 {
249 r->flags &= ~flags;
250 }
251
252 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
253 int memblock_set_node(phys_addr_t base, phys_addr_t size,
254 struct memblock_type *type, int nid);
255
256 static inline void memblock_set_region_node(struct memblock_region *r, int nid)
257 {
258 r->nid = nid;
259 }
260
261 static inline int memblock_get_region_node(const struct memblock_region *r)
262 {
263 return r->nid;
264 }
265 #else
266 static inline void memblock_set_region_node(struct memblock_region *r, int nid)
267 {
268 }
269
270 static inline int memblock_get_region_node(const struct memblock_region *r)
271 {
272 return 0;
273 }
274 #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
275
276 phys_addr_t memblock_alloc_nid(phys_addr_t size, phys_addr_t align, int nid);
277 phys_addr_t memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid);
278
279 phys_addr_t memblock_alloc(phys_addr_t size, phys_addr_t align);
280
281 /*
282 * Set the allocation direction to bottom-up or top-down.
283 */
284 static inline void __init memblock_set_bottom_up(bool enable)
285 {
286 memblock.bottom_up = enable;
287 }
288
289 /*
290 * Check if the allocation direction is bottom-up or not.
291 * if this is true, that said, memblock will allocate memory
292 * in bottom-up direction.
293 */
294 static inline bool memblock_bottom_up(void)
295 {
296 return memblock.bottom_up;
297 }
298
299 /* Flags for memblock_alloc_base() amd __memblock_alloc_base() */
300 #define MEMBLOCK_ALLOC_ANYWHERE (~(phys_addr_t)0)
301 #define MEMBLOCK_ALLOC_ACCESSIBLE 0
302
303 phys_addr_t __init memblock_alloc_range(phys_addr_t size, phys_addr_t align,
304 phys_addr_t start, phys_addr_t end,
305 ulong flags);
306 phys_addr_t memblock_alloc_base(phys_addr_t size, phys_addr_t align,
307 phys_addr_t max_addr);
308 phys_addr_t __memblock_alloc_base(phys_addr_t size, phys_addr_t align,
309 phys_addr_t max_addr);
310 phys_addr_t memblock_phys_mem_size(void);
311 phys_addr_t memblock_reserved_size(void);
312 phys_addr_t memblock_mem_size(unsigned long limit_pfn);
313 phys_addr_t memblock_start_of_DRAM(void);
314 phys_addr_t memblock_end_of_DRAM(void);
315 void memblock_enforce_memory_limit(phys_addr_t memory_limit);
316 void memblock_cap_memory_range(phys_addr_t base, phys_addr_t size);
317 void memblock_mem_limit_remove_map(phys_addr_t limit);
318 bool memblock_is_memory(phys_addr_t addr);
319 int memblock_is_map_memory(phys_addr_t addr);
320 int memblock_is_region_memory(phys_addr_t base, phys_addr_t size);
321 bool memblock_is_reserved(phys_addr_t addr);
322 bool memblock_is_region_reserved(phys_addr_t base, phys_addr_t size);
323
324 extern void __memblock_dump_all(void);
325
326 static inline void memblock_dump_all(void)
327 {
328 if (memblock_debug)
329 __memblock_dump_all();
330 }
331
332 /**
333 * memblock_set_current_limit - Set the current allocation limit to allow
334 * limiting allocations to what is currently
335 * accessible during boot
336 * @limit: New limit value (physical address)
337 */
338 void memblock_set_current_limit(phys_addr_t limit);
339
340
341 phys_addr_t memblock_get_current_limit(void);
342
343 /*
344 * pfn conversion functions
345 *
346 * While the memory MEMBLOCKs should always be page aligned, the reserved
347 * MEMBLOCKs may not be. This accessor attempt to provide a very clear
348 * idea of what they return for such non aligned MEMBLOCKs.
349 */
350
351 /**
352 * memblock_region_memory_base_pfn - Return the lowest pfn intersecting with the memory region
353 * @reg: memblock_region structure
354 */
355 static inline unsigned long memblock_region_memory_base_pfn(const struct memblock_region *reg)
356 {
357 return PFN_UP(reg->base);
358 }
359
360 /**
361 * memblock_region_memory_end_pfn - Return the end_pfn this region
362 * @reg: memblock_region structure
363 */
364 static inline unsigned long memblock_region_memory_end_pfn(const struct memblock_region *reg)
365 {
366 return PFN_DOWN(reg->base + reg->size);
367 }
368
369 /**
370 * memblock_region_reserved_base_pfn - Return the lowest pfn intersecting with the reserved region
371 * @reg: memblock_region structure
372 */
373 static inline unsigned long memblock_region_reserved_base_pfn(const struct memblock_region *reg)
374 {
375 return PFN_DOWN(reg->base);
376 }
377
378 /**
379 * memblock_region_reserved_end_pfn - Return the end_pfn this region
380 * @reg: memblock_region structure
381 */
382 static inline unsigned long memblock_region_reserved_end_pfn(const struct memblock_region *reg)
383 {
384 return PFN_UP(reg->base + reg->size);
385 }
386
387 #define for_each_memblock(memblock_type, region) \
388 for (region = memblock.memblock_type.regions; \
389 region < (memblock.memblock_type.regions + memblock.memblock_type.cnt); \
390 region++)
391
392 #define for_each_memblock_type(memblock_type, rgn) \
393 for (idx = 0, rgn = &memblock_type->regions[0]; \
394 idx < memblock_type->cnt; \
395 idx++, rgn = &memblock_type->regions[idx])
396
397 #ifdef CONFIG_MEMTEST
398 extern void early_memtest(phys_addr_t start, phys_addr_t end);
399 #else
400 static inline void early_memtest(phys_addr_t start, phys_addr_t end)
401 {
402 }
403 #endif
404
405 extern unsigned long memblock_reserved_memory_within(phys_addr_t start_addr,
406 phys_addr_t end_addr);
407 #else
408 static inline phys_addr_t memblock_alloc(phys_addr_t size, phys_addr_t align)
409 {
410 return 0;
411 }
412
413 static inline unsigned long memblock_reserved_memory_within(phys_addr_t start_addr,
414 phys_addr_t end_addr)
415 {
416 return 0;
417 }
418
419 #endif /* CONFIG_HAVE_MEMBLOCK */
420
421 #endif /* __KERNEL__ */
422
423 #endif /* _LINUX_MEMBLOCK_H */