]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - include/linux/bootmem.h
powerpc/perf: Add thread IMC PMU support
[mirror_ubuntu-artful-kernel.git] / include / linux / bootmem.h
1 /*
2 * Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999
3 */
4 #ifndef _LINUX_BOOTMEM_H
5 #define _LINUX_BOOTMEM_H
6
7 #include <linux/mmzone.h>
8 #include <linux/mm_types.h>
9 #include <asm/dma.h>
10 #include <asm/processor.h>
11
12 /*
13 * simple boot-time physical memory area allocator.
14 */
15
16 extern unsigned long max_low_pfn;
17 extern unsigned long min_low_pfn;
18
19 /*
20 * highest page
21 */
22 extern unsigned long max_pfn;
23 /*
24 * highest possible page
25 */
26 extern unsigned long long max_possible_pfn;
27
28 #ifndef CONFIG_NO_BOOTMEM
29 /*
30 * node_bootmem_map is a map pointer - the bits represent all physical
31 * memory pages (including holes) on the node.
32 */
33 typedef struct bootmem_data {
34 unsigned long node_min_pfn;
35 unsigned long node_low_pfn;
36 void *node_bootmem_map;
37 unsigned long last_end_off;
38 unsigned long hint_idx;
39 struct list_head list;
40 } bootmem_data_t;
41
42 extern bootmem_data_t bootmem_node_data[];
43 #endif
44
45 extern unsigned long bootmem_bootmap_pages(unsigned long);
46
47 extern unsigned long init_bootmem_node(pg_data_t *pgdat,
48 unsigned long freepfn,
49 unsigned long startpfn,
50 unsigned long endpfn);
51 extern unsigned long init_bootmem(unsigned long addr, unsigned long memend);
52
53 extern unsigned long free_all_bootmem(void);
54 extern void reset_node_managed_pages(pg_data_t *pgdat);
55 extern void reset_all_zones_managed_pages(void);
56
57 extern void free_bootmem_node(pg_data_t *pgdat,
58 unsigned long addr,
59 unsigned long size);
60 extern void free_bootmem(unsigned long physaddr, unsigned long size);
61 extern void free_bootmem_late(unsigned long physaddr, unsigned long size);
62
63 /*
64 * Flags for reserve_bootmem (also if CONFIG_HAVE_ARCH_BOOTMEM_NODE,
65 * the architecture-specific code should honor this).
66 *
67 * If flags is BOOTMEM_DEFAULT, then the return value is always 0 (success).
68 * If flags contains BOOTMEM_EXCLUSIVE, then -EBUSY is returned if the memory
69 * already was reserved.
70 */
71 #define BOOTMEM_DEFAULT 0
72 #define BOOTMEM_EXCLUSIVE (1<<0)
73
74 extern int reserve_bootmem(unsigned long addr,
75 unsigned long size,
76 int flags);
77 extern int reserve_bootmem_node(pg_data_t *pgdat,
78 unsigned long physaddr,
79 unsigned long size,
80 int flags);
81
82 extern void *__alloc_bootmem(unsigned long size,
83 unsigned long align,
84 unsigned long goal);
85 extern void *__alloc_bootmem_nopanic(unsigned long size,
86 unsigned long align,
87 unsigned long goal) __malloc;
88 extern void *__alloc_bootmem_node(pg_data_t *pgdat,
89 unsigned long size,
90 unsigned long align,
91 unsigned long goal) __malloc;
92 void *__alloc_bootmem_node_high(pg_data_t *pgdat,
93 unsigned long size,
94 unsigned long align,
95 unsigned long goal) __malloc;
96 extern void *__alloc_bootmem_node_nopanic(pg_data_t *pgdat,
97 unsigned long size,
98 unsigned long align,
99 unsigned long goal) __malloc;
100 void *___alloc_bootmem_node_nopanic(pg_data_t *pgdat,
101 unsigned long size,
102 unsigned long align,
103 unsigned long goal,
104 unsigned long limit) __malloc;
105 extern void *__alloc_bootmem_low(unsigned long size,
106 unsigned long align,
107 unsigned long goal) __malloc;
108 void *__alloc_bootmem_low_nopanic(unsigned long size,
109 unsigned long align,
110 unsigned long goal) __malloc;
111 extern void *__alloc_bootmem_low_node(pg_data_t *pgdat,
112 unsigned long size,
113 unsigned long align,
114 unsigned long goal) __malloc;
115
116 #ifdef CONFIG_NO_BOOTMEM
117 /* We are using top down, so it is safe to use 0 here */
118 #define BOOTMEM_LOW_LIMIT 0
119 #else
120 #define BOOTMEM_LOW_LIMIT __pa(MAX_DMA_ADDRESS)
121 #endif
122
123 #ifndef ARCH_LOW_ADDRESS_LIMIT
124 #define ARCH_LOW_ADDRESS_LIMIT 0xffffffffUL
125 #endif
126
127 #define alloc_bootmem(x) \
128 __alloc_bootmem(x, SMP_CACHE_BYTES, BOOTMEM_LOW_LIMIT)
129 #define alloc_bootmem_align(x, align) \
130 __alloc_bootmem(x, align, BOOTMEM_LOW_LIMIT)
131 #define alloc_bootmem_nopanic(x) \
132 __alloc_bootmem_nopanic(x, SMP_CACHE_BYTES, BOOTMEM_LOW_LIMIT)
133 #define alloc_bootmem_pages(x) \
134 __alloc_bootmem(x, PAGE_SIZE, BOOTMEM_LOW_LIMIT)
135 #define alloc_bootmem_pages_nopanic(x) \
136 __alloc_bootmem_nopanic(x, PAGE_SIZE, BOOTMEM_LOW_LIMIT)
137 #define alloc_bootmem_node(pgdat, x) \
138 __alloc_bootmem_node(pgdat, x, SMP_CACHE_BYTES, BOOTMEM_LOW_LIMIT)
139 #define alloc_bootmem_node_nopanic(pgdat, x) \
140 __alloc_bootmem_node_nopanic(pgdat, x, SMP_CACHE_BYTES, BOOTMEM_LOW_LIMIT)
141 #define alloc_bootmem_pages_node(pgdat, x) \
142 __alloc_bootmem_node(pgdat, x, PAGE_SIZE, BOOTMEM_LOW_LIMIT)
143 #define alloc_bootmem_pages_node_nopanic(pgdat, x) \
144 __alloc_bootmem_node_nopanic(pgdat, x, PAGE_SIZE, BOOTMEM_LOW_LIMIT)
145
146 #define alloc_bootmem_low(x) \
147 __alloc_bootmem_low(x, SMP_CACHE_BYTES, 0)
148 #define alloc_bootmem_low_pages_nopanic(x) \
149 __alloc_bootmem_low_nopanic(x, PAGE_SIZE, 0)
150 #define alloc_bootmem_low_pages(x) \
151 __alloc_bootmem_low(x, PAGE_SIZE, 0)
152 #define alloc_bootmem_low_pages_node(pgdat, x) \
153 __alloc_bootmem_low_node(pgdat, x, PAGE_SIZE, 0)
154
155
156 #if defined(CONFIG_HAVE_MEMBLOCK) && defined(CONFIG_NO_BOOTMEM)
157
158 /* FIXME: use MEMBLOCK_ALLOC_* variants here */
159 #define BOOTMEM_ALLOC_ACCESSIBLE 0
160 #define BOOTMEM_ALLOC_ANYWHERE (~(phys_addr_t)0)
161
162 /* FIXME: Move to memblock.h at a point where we remove nobootmem.c */
163 void *memblock_virt_alloc_try_nid_nopanic(phys_addr_t size,
164 phys_addr_t align, phys_addr_t min_addr,
165 phys_addr_t max_addr, int nid);
166 void *memblock_virt_alloc_try_nid(phys_addr_t size, phys_addr_t align,
167 phys_addr_t min_addr, phys_addr_t max_addr, int nid);
168 void __memblock_free_early(phys_addr_t base, phys_addr_t size);
169 void __memblock_free_late(phys_addr_t base, phys_addr_t size);
170
171 static inline void * __init memblock_virt_alloc(
172 phys_addr_t size, phys_addr_t align)
173 {
174 return memblock_virt_alloc_try_nid(size, align, BOOTMEM_LOW_LIMIT,
175 BOOTMEM_ALLOC_ACCESSIBLE,
176 NUMA_NO_NODE);
177 }
178
179 static inline void * __init memblock_virt_alloc_nopanic(
180 phys_addr_t size, phys_addr_t align)
181 {
182 return memblock_virt_alloc_try_nid_nopanic(size, align,
183 BOOTMEM_LOW_LIMIT,
184 BOOTMEM_ALLOC_ACCESSIBLE,
185 NUMA_NO_NODE);
186 }
187
188 static inline void * __init memblock_virt_alloc_low(
189 phys_addr_t size, phys_addr_t align)
190 {
191 return memblock_virt_alloc_try_nid(size, align,
192 BOOTMEM_LOW_LIMIT,
193 ARCH_LOW_ADDRESS_LIMIT,
194 NUMA_NO_NODE);
195 }
196 static inline void * __init memblock_virt_alloc_low_nopanic(
197 phys_addr_t size, phys_addr_t align)
198 {
199 return memblock_virt_alloc_try_nid_nopanic(size, align,
200 BOOTMEM_LOW_LIMIT,
201 ARCH_LOW_ADDRESS_LIMIT,
202 NUMA_NO_NODE);
203 }
204
205 static inline void * __init memblock_virt_alloc_from_nopanic(
206 phys_addr_t size, phys_addr_t align, phys_addr_t min_addr)
207 {
208 return memblock_virt_alloc_try_nid_nopanic(size, align, min_addr,
209 BOOTMEM_ALLOC_ACCESSIBLE,
210 NUMA_NO_NODE);
211 }
212
213 static inline void * __init memblock_virt_alloc_node(
214 phys_addr_t size, int nid)
215 {
216 return memblock_virt_alloc_try_nid(size, 0, BOOTMEM_LOW_LIMIT,
217 BOOTMEM_ALLOC_ACCESSIBLE, nid);
218 }
219
220 static inline void * __init memblock_virt_alloc_node_nopanic(
221 phys_addr_t size, int nid)
222 {
223 return memblock_virt_alloc_try_nid_nopanic(size, 0, BOOTMEM_LOW_LIMIT,
224 BOOTMEM_ALLOC_ACCESSIBLE,
225 nid);
226 }
227
228 static inline void __init memblock_free_early(
229 phys_addr_t base, phys_addr_t size)
230 {
231 __memblock_free_early(base, size);
232 }
233
234 static inline void __init memblock_free_early_nid(
235 phys_addr_t base, phys_addr_t size, int nid)
236 {
237 __memblock_free_early(base, size);
238 }
239
240 static inline void __init memblock_free_late(
241 phys_addr_t base, phys_addr_t size)
242 {
243 __memblock_free_late(base, size);
244 }
245
246 #else
247
248 #define BOOTMEM_ALLOC_ACCESSIBLE 0
249
250
251 /* Fall back to all the existing bootmem APIs */
252 static inline void * __init memblock_virt_alloc(
253 phys_addr_t size, phys_addr_t align)
254 {
255 if (!align)
256 align = SMP_CACHE_BYTES;
257 return __alloc_bootmem(size, align, BOOTMEM_LOW_LIMIT);
258 }
259
260 static inline void * __init memblock_virt_alloc_nopanic(
261 phys_addr_t size, phys_addr_t align)
262 {
263 if (!align)
264 align = SMP_CACHE_BYTES;
265 return __alloc_bootmem_nopanic(size, align, BOOTMEM_LOW_LIMIT);
266 }
267
268 static inline void * __init memblock_virt_alloc_low(
269 phys_addr_t size, phys_addr_t align)
270 {
271 if (!align)
272 align = SMP_CACHE_BYTES;
273 return __alloc_bootmem_low(size, align, 0);
274 }
275
276 static inline void * __init memblock_virt_alloc_low_nopanic(
277 phys_addr_t size, phys_addr_t align)
278 {
279 if (!align)
280 align = SMP_CACHE_BYTES;
281 return __alloc_bootmem_low_nopanic(size, align, 0);
282 }
283
284 static inline void * __init memblock_virt_alloc_from_nopanic(
285 phys_addr_t size, phys_addr_t align, phys_addr_t min_addr)
286 {
287 return __alloc_bootmem_nopanic(size, align, min_addr);
288 }
289
290 static inline void * __init memblock_virt_alloc_node(
291 phys_addr_t size, int nid)
292 {
293 return __alloc_bootmem_node(NODE_DATA(nid), size, SMP_CACHE_BYTES,
294 BOOTMEM_LOW_LIMIT);
295 }
296
297 static inline void * __init memblock_virt_alloc_node_nopanic(
298 phys_addr_t size, int nid)
299 {
300 return __alloc_bootmem_node_nopanic(NODE_DATA(nid), size,
301 SMP_CACHE_BYTES,
302 BOOTMEM_LOW_LIMIT);
303 }
304
305 static inline void * __init memblock_virt_alloc_try_nid(phys_addr_t size,
306 phys_addr_t align, phys_addr_t min_addr, phys_addr_t max_addr, int nid)
307 {
308 return __alloc_bootmem_node_high(NODE_DATA(nid), size, align,
309 min_addr);
310 }
311
312 static inline void * __init memblock_virt_alloc_try_nid_nopanic(
313 phys_addr_t size, phys_addr_t align,
314 phys_addr_t min_addr, phys_addr_t max_addr, int nid)
315 {
316 return ___alloc_bootmem_node_nopanic(NODE_DATA(nid), size, align,
317 min_addr, max_addr);
318 }
319
320 static inline void __init memblock_free_early(
321 phys_addr_t base, phys_addr_t size)
322 {
323 free_bootmem(base, size);
324 }
325
326 static inline void __init memblock_free_early_nid(
327 phys_addr_t base, phys_addr_t size, int nid)
328 {
329 free_bootmem_node(NODE_DATA(nid), base, size);
330 }
331
332 static inline void __init memblock_free_late(
333 phys_addr_t base, phys_addr_t size)
334 {
335 free_bootmem_late(base, size);
336 }
337 #endif /* defined(CONFIG_HAVE_MEMBLOCK) && defined(CONFIG_NO_BOOTMEM) */
338
339 #ifdef CONFIG_HAVE_ARCH_ALLOC_REMAP
340 extern void *alloc_remap(int nid, unsigned long size);
341 #else
342 static inline void *alloc_remap(int nid, unsigned long size)
343 {
344 return NULL;
345 }
346 #endif /* CONFIG_HAVE_ARCH_ALLOC_REMAP */
347
348 extern void *alloc_large_system_hash(const char *tablename,
349 unsigned long bucketsize,
350 unsigned long numentries,
351 int scale,
352 int flags,
353 unsigned int *_hash_shift,
354 unsigned int *_hash_mask,
355 unsigned long low_limit,
356 unsigned long high_limit);
357
358 #define HASH_EARLY 0x00000001 /* Allocating during early boot? */
359 #define HASH_SMALL 0x00000002 /* sub-page allocation allowed, min
360 * shift passed via *_hash_shift */
361 #define HASH_ZERO 0x00000004 /* Zero allocated hash table */
362
363 /* Only NUMA needs hash distribution. 64bit NUMA architectures have
364 * sufficient vmalloc space.
365 */
366 #ifdef CONFIG_NUMA
367 #define HASHDIST_DEFAULT IS_ENABLED(CONFIG_64BIT)
368 extern int hashdist; /* Distribute hashes across NUMA nodes? */
369 #else
370 #define hashdist (0)
371 #endif
372
373
374 #endif /* _LINUX_BOOTMEM_H */