]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - mm/nobootmem.c
mm: use a dedicated lock to protect totalram_pages and zone->managed_pages
[mirror_ubuntu-bionic-kernel.git] / mm / nobootmem.c
1 /*
2 * bootmem - A boot-time physical memory allocator and configurator
3 *
4 * Copyright (C) 1999 Ingo Molnar
5 * 1999 Kanoj Sarcar, SGI
6 * 2008 Johannes Weiner
7 *
8 * Access to this subsystem has to be serialized externally (which is true
9 * for the boot process anyway).
10 */
11 #include <linux/init.h>
12 #include <linux/pfn.h>
13 #include <linux/slab.h>
14 #include <linux/bootmem.h>
15 #include <linux/export.h>
16 #include <linux/kmemleak.h>
17 #include <linux/range.h>
18 #include <linux/memblock.h>
19
20 #include <asm/bug.h>
21 #include <asm/io.h>
22 #include <asm/processor.h>
23
24 #include "internal.h"
25
26 #ifndef CONFIG_NEED_MULTIPLE_NODES
27 struct pglist_data __refdata contig_page_data;
28 EXPORT_SYMBOL(contig_page_data);
29 #endif
30
31 unsigned long max_low_pfn;
32 unsigned long min_low_pfn;
33 unsigned long max_pfn;
34
35 static void * __init __alloc_memory_core_early(int nid, u64 size, u64 align,
36 u64 goal, u64 limit)
37 {
38 void *ptr;
39 u64 addr;
40
41 if (limit > memblock.current_limit)
42 limit = memblock.current_limit;
43
44 addr = memblock_find_in_range_node(goal, limit, size, align, nid);
45 if (!addr)
46 return NULL;
47
48 memblock_reserve(addr, size);
49 ptr = phys_to_virt(addr);
50 memset(ptr, 0, size);
51 /*
52 * The min_count is set to 0 so that bootmem allocated blocks
53 * are never reported as leaks.
54 */
55 kmemleak_alloc(ptr, size, 0, 0);
56 return ptr;
57 }
58
59 /*
60 * free_bootmem_late - free bootmem pages directly to page allocator
61 * @addr: starting address of the range
62 * @size: size of the range in bytes
63 *
64 * This is only useful when the bootmem allocator has already been torn
65 * down, but we are still initializing the system. Pages are given directly
66 * to the page allocator, no bootmem metadata is updated because it is gone.
67 */
68 void __init free_bootmem_late(unsigned long addr, unsigned long size)
69 {
70 unsigned long cursor, end;
71
72 kmemleak_free_part(__va(addr), size);
73
74 cursor = PFN_UP(addr);
75 end = PFN_DOWN(addr + size);
76
77 for (; cursor < end; cursor++) {
78 __free_pages_bootmem(pfn_to_page(cursor), 0);
79 totalram_pages++;
80 }
81 }
82
83 static void __init __free_pages_memory(unsigned long start, unsigned long end)
84 {
85 unsigned long i, start_aligned, end_aligned;
86 int order = ilog2(BITS_PER_LONG);
87
88 start_aligned = (start + (BITS_PER_LONG - 1)) & ~(BITS_PER_LONG - 1);
89 end_aligned = end & ~(BITS_PER_LONG - 1);
90
91 if (end_aligned <= start_aligned) {
92 for (i = start; i < end; i++)
93 __free_pages_bootmem(pfn_to_page(i), 0);
94
95 return;
96 }
97
98 for (i = start; i < start_aligned; i++)
99 __free_pages_bootmem(pfn_to_page(i), 0);
100
101 for (i = start_aligned; i < end_aligned; i += BITS_PER_LONG)
102 __free_pages_bootmem(pfn_to_page(i), order);
103
104 for (i = end_aligned; i < end; i++)
105 __free_pages_bootmem(pfn_to_page(i), 0);
106 }
107
108 static unsigned long __init __free_memory_core(phys_addr_t start,
109 phys_addr_t end)
110 {
111 unsigned long start_pfn = PFN_UP(start);
112 unsigned long end_pfn = min_t(unsigned long,
113 PFN_DOWN(end), max_low_pfn);
114
115 if (start_pfn > end_pfn)
116 return 0;
117
118 __free_pages_memory(start_pfn, end_pfn);
119
120 return end_pfn - start_pfn;
121 }
122
123 static unsigned long __init free_low_memory_core_early(void)
124 {
125 unsigned long count = 0;
126 phys_addr_t start, end, size;
127 u64 i;
128
129 for_each_free_mem_range(i, MAX_NUMNODES, &start, &end, NULL)
130 count += __free_memory_core(start, end);
131
132 /* free range that is used for reserved array if we allocate it */
133 size = get_allocated_memblock_reserved_regions_info(&start);
134 if (size)
135 count += __free_memory_core(start, start + size);
136
137 return count;
138 }
139
140 static int reset_managed_pages_done __initdata;
141
142 static inline void __init reset_node_managed_pages(pg_data_t *pgdat)
143 {
144 struct zone *z;
145
146 if (reset_managed_pages_done)
147 return;
148 for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++)
149 z->managed_pages = 0;
150 }
151
152 void __init reset_all_zones_managed_pages(void)
153 {
154 struct pglist_data *pgdat;
155
156 for_each_online_pgdat(pgdat)
157 reset_node_managed_pages(pgdat);
158 reset_managed_pages_done = 1;
159 }
160
161 /**
162 * free_all_bootmem - release free pages to the buddy allocator
163 *
164 * Returns the number of pages actually released.
165 */
166 unsigned long __init free_all_bootmem(void)
167 {
168 reset_all_zones_managed_pages();
169
170 /*
171 * We need to use MAX_NUMNODES instead of NODE_DATA(0)->node_id
172 * because in some case like Node0 doesn't have RAM installed
173 * low ram will be on Node1
174 */
175 return free_low_memory_core_early();
176 }
177
178 /**
179 * free_bootmem_node - mark a page range as usable
180 * @pgdat: node the range resides on
181 * @physaddr: starting address of the range
182 * @size: size of the range in bytes
183 *
184 * Partial pages will be considered reserved and left as they are.
185 *
186 * The range must reside completely on the specified node.
187 */
188 void __init free_bootmem_node(pg_data_t *pgdat, unsigned long physaddr,
189 unsigned long size)
190 {
191 kmemleak_free_part(__va(physaddr), size);
192 memblock_free(physaddr, size);
193 }
194
195 /**
196 * free_bootmem - mark a page range as usable
197 * @addr: starting address of the range
198 * @size: size of the range in bytes
199 *
200 * Partial pages will be considered reserved and left as they are.
201 *
202 * The range must be contiguous but may span node boundaries.
203 */
204 void __init free_bootmem(unsigned long addr, unsigned long size)
205 {
206 kmemleak_free_part(__va(addr), size);
207 memblock_free(addr, size);
208 }
209
210 static void * __init ___alloc_bootmem_nopanic(unsigned long size,
211 unsigned long align,
212 unsigned long goal,
213 unsigned long limit)
214 {
215 void *ptr;
216
217 if (WARN_ON_ONCE(slab_is_available()))
218 return kzalloc(size, GFP_NOWAIT);
219
220 restart:
221
222 ptr = __alloc_memory_core_early(MAX_NUMNODES, size, align, goal, limit);
223
224 if (ptr)
225 return ptr;
226
227 if (goal != 0) {
228 goal = 0;
229 goto restart;
230 }
231
232 return NULL;
233 }
234
235 /**
236 * __alloc_bootmem_nopanic - allocate boot memory without panicking
237 * @size: size of the request in bytes
238 * @align: alignment of the region
239 * @goal: preferred starting address of the region
240 *
241 * The goal is dropped if it can not be satisfied and the allocation will
242 * fall back to memory below @goal.
243 *
244 * Allocation may happen on any node in the system.
245 *
246 * Returns NULL on failure.
247 */
248 void * __init __alloc_bootmem_nopanic(unsigned long size, unsigned long align,
249 unsigned long goal)
250 {
251 unsigned long limit = -1UL;
252
253 return ___alloc_bootmem_nopanic(size, align, goal, limit);
254 }
255
256 static void * __init ___alloc_bootmem(unsigned long size, unsigned long align,
257 unsigned long goal, unsigned long limit)
258 {
259 void *mem = ___alloc_bootmem_nopanic(size, align, goal, limit);
260
261 if (mem)
262 return mem;
263 /*
264 * Whoops, we cannot satisfy the allocation request.
265 */
266 printk(KERN_ALERT "bootmem alloc of %lu bytes failed!\n", size);
267 panic("Out of memory");
268 return NULL;
269 }
270
271 /**
272 * __alloc_bootmem - allocate boot memory
273 * @size: size of the request in bytes
274 * @align: alignment of the region
275 * @goal: preferred starting address of the region
276 *
277 * The goal is dropped if it can not be satisfied and the allocation will
278 * fall back to memory below @goal.
279 *
280 * Allocation may happen on any node in the system.
281 *
282 * The function panics if the request can not be satisfied.
283 */
284 void * __init __alloc_bootmem(unsigned long size, unsigned long align,
285 unsigned long goal)
286 {
287 unsigned long limit = -1UL;
288
289 return ___alloc_bootmem(size, align, goal, limit);
290 }
291
292 void * __init ___alloc_bootmem_node_nopanic(pg_data_t *pgdat,
293 unsigned long size,
294 unsigned long align,
295 unsigned long goal,
296 unsigned long limit)
297 {
298 void *ptr;
299
300 again:
301 ptr = __alloc_memory_core_early(pgdat->node_id, size, align,
302 goal, limit);
303 if (ptr)
304 return ptr;
305
306 ptr = __alloc_memory_core_early(MAX_NUMNODES, size, align,
307 goal, limit);
308 if (ptr)
309 return ptr;
310
311 if (goal) {
312 goal = 0;
313 goto again;
314 }
315
316 return NULL;
317 }
318
319 void * __init __alloc_bootmem_node_nopanic(pg_data_t *pgdat, unsigned long size,
320 unsigned long align, unsigned long goal)
321 {
322 if (WARN_ON_ONCE(slab_is_available()))
323 return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
324
325 return ___alloc_bootmem_node_nopanic(pgdat, size, align, goal, 0);
326 }
327
328 void * __init ___alloc_bootmem_node(pg_data_t *pgdat, unsigned long size,
329 unsigned long align, unsigned long goal,
330 unsigned long limit)
331 {
332 void *ptr;
333
334 ptr = ___alloc_bootmem_node_nopanic(pgdat, size, align, goal, limit);
335 if (ptr)
336 return ptr;
337
338 printk(KERN_ALERT "bootmem alloc of %lu bytes failed!\n", size);
339 panic("Out of memory");
340 return NULL;
341 }
342
343 /**
344 * __alloc_bootmem_node - allocate boot memory from a specific node
345 * @pgdat: node to allocate from
346 * @size: size of the request in bytes
347 * @align: alignment of the region
348 * @goal: preferred starting address of the region
349 *
350 * The goal is dropped if it can not be satisfied and the allocation will
351 * fall back to memory below @goal.
352 *
353 * Allocation may fall back to any node in the system if the specified node
354 * can not hold the requested memory.
355 *
356 * The function panics if the request can not be satisfied.
357 */
358 void * __init __alloc_bootmem_node(pg_data_t *pgdat, unsigned long size,
359 unsigned long align, unsigned long goal)
360 {
361 if (WARN_ON_ONCE(slab_is_available()))
362 return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
363
364 return ___alloc_bootmem_node(pgdat, size, align, goal, 0);
365 }
366
367 void * __init __alloc_bootmem_node_high(pg_data_t *pgdat, unsigned long size,
368 unsigned long align, unsigned long goal)
369 {
370 return __alloc_bootmem_node(pgdat, size, align, goal);
371 }
372
373 #ifndef ARCH_LOW_ADDRESS_LIMIT
374 #define ARCH_LOW_ADDRESS_LIMIT 0xffffffffUL
375 #endif
376
377 /**
378 * __alloc_bootmem_low - allocate low boot memory
379 * @size: size of the request in bytes
380 * @align: alignment of the region
381 * @goal: preferred starting address of the region
382 *
383 * The goal is dropped if it can not be satisfied and the allocation will
384 * fall back to memory below @goal.
385 *
386 * Allocation may happen on any node in the system.
387 *
388 * The function panics if the request can not be satisfied.
389 */
390 void * __init __alloc_bootmem_low(unsigned long size, unsigned long align,
391 unsigned long goal)
392 {
393 return ___alloc_bootmem(size, align, goal, ARCH_LOW_ADDRESS_LIMIT);
394 }
395
396 void * __init __alloc_bootmem_low_nopanic(unsigned long size,
397 unsigned long align,
398 unsigned long goal)
399 {
400 return ___alloc_bootmem_nopanic(size, align, goal,
401 ARCH_LOW_ADDRESS_LIMIT);
402 }
403
404 /**
405 * __alloc_bootmem_low_node - allocate low boot memory from a specific node
406 * @pgdat: node to allocate from
407 * @size: size of the request in bytes
408 * @align: alignment of the region
409 * @goal: preferred starting address of the region
410 *
411 * The goal is dropped if it can not be satisfied and the allocation will
412 * fall back to memory below @goal.
413 *
414 * Allocation may fall back to any node in the system if the specified node
415 * can not hold the requested memory.
416 *
417 * The function panics if the request can not be satisfied.
418 */
419 void * __init __alloc_bootmem_low_node(pg_data_t *pgdat, unsigned long size,
420 unsigned long align, unsigned long goal)
421 {
422 if (WARN_ON_ONCE(slab_is_available()))
423 return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
424
425 return ___alloc_bootmem_node(pgdat, size, align, goal,
426 ARCH_LOW_ADDRESS_LIMIT);
427 }