]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - mm/bootmem.c
Merge branch 'urgent' of git://git.kernel.org/pub/scm/linux/kernel/git/brodo/pcmcia-2.6
[mirror_ubuntu-zesty-kernel.git] / mm / bootmem.c
1 /*
2 * bootmem - A boot-time physical memory allocator and configurator
3 *
4 * Copyright (C) 1999 Ingo Molnar
5 * 1999 Kanoj Sarcar, SGI
6 * 2008 Johannes Weiner
7 *
8 * Access to this subsystem has to be serialized externally (which is true
9 * for the boot process anyway).
10 */
11 #include <linux/init.h>
12 #include <linux/pfn.h>
13 #include <linux/slab.h>
14 #include <linux/bootmem.h>
15 #include <linux/module.h>
16 #include <linux/kmemleak.h>
17 #include <linux/range.h>
18
19 #include <asm/bug.h>
20 #include <asm/io.h>
21 #include <asm/processor.h>
22
23 #include "internal.h"
24
25 unsigned long max_low_pfn;
26 unsigned long min_low_pfn;
27 unsigned long max_pfn;
28
29 #ifdef CONFIG_CRASH_DUMP
30 /*
31 * If we have booted due to a crash, max_pfn will be a very low value. We need
32 * to know the amount of memory that the previous kernel used.
33 */
34 unsigned long saved_max_pfn;
35 #endif
36
37 #ifndef CONFIG_NO_BOOTMEM
38 bootmem_data_t bootmem_node_data[MAX_NUMNODES] __initdata;
39
40 static struct list_head bdata_list __initdata = LIST_HEAD_INIT(bdata_list);
41
42 static int bootmem_debug;
43
44 static int __init bootmem_debug_setup(char *buf)
45 {
46 bootmem_debug = 1;
47 return 0;
48 }
49 early_param("bootmem_debug", bootmem_debug_setup);
50
51 #define bdebug(fmt, args...) ({ \
52 if (unlikely(bootmem_debug)) \
53 printk(KERN_INFO \
54 "bootmem::%s " fmt, \
55 __func__, ## args); \
56 })
57
58 static unsigned long __init bootmap_bytes(unsigned long pages)
59 {
60 unsigned long bytes = (pages + 7) / 8;
61
62 return ALIGN(bytes, sizeof(long));
63 }
64
65 /**
66 * bootmem_bootmap_pages - calculate bitmap size in pages
67 * @pages: number of pages the bitmap has to represent
68 */
69 unsigned long __init bootmem_bootmap_pages(unsigned long pages)
70 {
71 unsigned long bytes = bootmap_bytes(pages);
72
73 return PAGE_ALIGN(bytes) >> PAGE_SHIFT;
74 }
75
76 /*
77 * link bdata in order
78 */
79 static void __init link_bootmem(bootmem_data_t *bdata)
80 {
81 struct list_head *iter;
82
83 list_for_each(iter, &bdata_list) {
84 bootmem_data_t *ent;
85
86 ent = list_entry(iter, bootmem_data_t, list);
87 if (bdata->node_min_pfn < ent->node_min_pfn)
88 break;
89 }
90 list_add_tail(&bdata->list, iter);
91 }
92
93 /*
94 * Called once to set up the allocator itself.
95 */
96 static unsigned long __init init_bootmem_core(bootmem_data_t *bdata,
97 unsigned long mapstart, unsigned long start, unsigned long end)
98 {
99 unsigned long mapsize;
100
101 mminit_validate_memmodel_limits(&start, &end);
102 bdata->node_bootmem_map = phys_to_virt(PFN_PHYS(mapstart));
103 bdata->node_min_pfn = start;
104 bdata->node_low_pfn = end;
105 link_bootmem(bdata);
106
107 /*
108 * Initially all pages are reserved - setup_arch() has to
109 * register free RAM areas explicitly.
110 */
111 mapsize = bootmap_bytes(end - start);
112 memset(bdata->node_bootmem_map, 0xff, mapsize);
113
114 bdebug("nid=%td start=%lx map=%lx end=%lx mapsize=%lx\n",
115 bdata - bootmem_node_data, start, mapstart, end, mapsize);
116
117 return mapsize;
118 }
119
120 /**
121 * init_bootmem_node - register a node as boot memory
122 * @pgdat: node to register
123 * @freepfn: pfn where the bitmap for this node is to be placed
124 * @startpfn: first pfn on the node
125 * @endpfn: first pfn after the node
126 *
127 * Returns the number of bytes needed to hold the bitmap for this node.
128 */
129 unsigned long __init init_bootmem_node(pg_data_t *pgdat, unsigned long freepfn,
130 unsigned long startpfn, unsigned long endpfn)
131 {
132 return init_bootmem_core(pgdat->bdata, freepfn, startpfn, endpfn);
133 }
134
135 /**
136 * init_bootmem - register boot memory
137 * @start: pfn where the bitmap is to be placed
138 * @pages: number of available physical pages
139 *
140 * Returns the number of bytes needed to hold the bitmap.
141 */
142 unsigned long __init init_bootmem(unsigned long start, unsigned long pages)
143 {
144 max_low_pfn = pages;
145 min_low_pfn = start;
146 return init_bootmem_core(NODE_DATA(0)->bdata, start, 0, pages);
147 }
148 #endif
149 /*
150 * free_bootmem_late - free bootmem pages directly to page allocator
151 * @addr: starting address of the range
152 * @size: size of the range in bytes
153 *
154 * This is only useful when the bootmem allocator has already been torn
155 * down, but we are still initializing the system. Pages are given directly
156 * to the page allocator, no bootmem metadata is updated because it is gone.
157 */
158 void __init free_bootmem_late(unsigned long addr, unsigned long size)
159 {
160 unsigned long cursor, end;
161
162 kmemleak_free_part(__va(addr), size);
163
164 cursor = PFN_UP(addr);
165 end = PFN_DOWN(addr + size);
166
167 for (; cursor < end; cursor++) {
168 __free_pages_bootmem(pfn_to_page(cursor), 0);
169 totalram_pages++;
170 }
171 }
172
173 #ifdef CONFIG_NO_BOOTMEM
174 static void __init __free_pages_memory(unsigned long start, unsigned long end)
175 {
176 int i;
177 unsigned long start_aligned, end_aligned;
178 int order = ilog2(BITS_PER_LONG);
179
180 start_aligned = (start + (BITS_PER_LONG - 1)) & ~(BITS_PER_LONG - 1);
181 end_aligned = end & ~(BITS_PER_LONG - 1);
182
183 if (end_aligned <= start_aligned) {
184 for (i = start; i < end; i++)
185 __free_pages_bootmem(pfn_to_page(i), 0);
186
187 return;
188 }
189
190 for (i = start; i < start_aligned; i++)
191 __free_pages_bootmem(pfn_to_page(i), 0);
192
193 for (i = start_aligned; i < end_aligned; i += BITS_PER_LONG)
194 __free_pages_bootmem(pfn_to_page(i), order);
195
196 for (i = end_aligned; i < end; i++)
197 __free_pages_bootmem(pfn_to_page(i), 0);
198 }
199
200 unsigned long __init free_all_memory_core_early(int nodeid)
201 {
202 int i;
203 u64 start, end;
204 unsigned long count = 0;
205 struct range *range = NULL;
206 int nr_range;
207
208 nr_range = get_free_all_memory_range(&range, nodeid);
209
210 for (i = 0; i < nr_range; i++) {
211 start = range[i].start;
212 end = range[i].end;
213 count += end - start;
214 __free_pages_memory(start, end);
215 }
216
217 return count;
218 }
219 #else
220 static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata)
221 {
222 int aligned;
223 struct page *page;
224 unsigned long start, end, pages, count = 0;
225
226 if (!bdata->node_bootmem_map)
227 return 0;
228
229 start = bdata->node_min_pfn;
230 end = bdata->node_low_pfn;
231
232 /*
233 * If the start is aligned to the machines wordsize, we might
234 * be able to free pages in bulks of that order.
235 */
236 aligned = !(start & (BITS_PER_LONG - 1));
237
238 bdebug("nid=%td start=%lx end=%lx aligned=%d\n",
239 bdata - bootmem_node_data, start, end, aligned);
240
241 while (start < end) {
242 unsigned long *map, idx, vec;
243
244 map = bdata->node_bootmem_map;
245 idx = start - bdata->node_min_pfn;
246 vec = ~map[idx / BITS_PER_LONG];
247
248 if (aligned && vec == ~0UL && start + BITS_PER_LONG < end) {
249 int order = ilog2(BITS_PER_LONG);
250
251 __free_pages_bootmem(pfn_to_page(start), order);
252 count += BITS_PER_LONG;
253 } else {
254 unsigned long off = 0;
255
256 while (vec && off < BITS_PER_LONG) {
257 if (vec & 1) {
258 page = pfn_to_page(start + off);
259 __free_pages_bootmem(page, 0);
260 count++;
261 }
262 vec >>= 1;
263 off++;
264 }
265 }
266 start += BITS_PER_LONG;
267 }
268
269 page = virt_to_page(bdata->node_bootmem_map);
270 pages = bdata->node_low_pfn - bdata->node_min_pfn;
271 pages = bootmem_bootmap_pages(pages);
272 count += pages;
273 while (pages--)
274 __free_pages_bootmem(page++, 0);
275
276 bdebug("nid=%td released=%lx\n", bdata - bootmem_node_data, count);
277
278 return count;
279 }
280 #endif
281
282 /**
283 * free_all_bootmem_node - release a node's free pages to the buddy allocator
284 * @pgdat: node to be released
285 *
286 * Returns the number of pages actually released.
287 */
288 unsigned long __init free_all_bootmem_node(pg_data_t *pgdat)
289 {
290 register_page_bootmem_info_node(pgdat);
291 #ifdef CONFIG_NO_BOOTMEM
292 /* free_all_memory_core_early(MAX_NUMNODES) will be called later */
293 return 0;
294 #else
295 return free_all_bootmem_core(pgdat->bdata);
296 #endif
297 }
298
299 /**
300 * free_all_bootmem - release free pages to the buddy allocator
301 *
302 * Returns the number of pages actually released.
303 */
304 unsigned long __init free_all_bootmem(void)
305 {
306 #ifdef CONFIG_NO_BOOTMEM
307 return free_all_memory_core_early(NODE_DATA(0)->node_id);
308 #else
309 return free_all_bootmem_core(NODE_DATA(0)->bdata);
310 #endif
311 }
312
313 #ifndef CONFIG_NO_BOOTMEM
314 static void __init __free(bootmem_data_t *bdata,
315 unsigned long sidx, unsigned long eidx)
316 {
317 unsigned long idx;
318
319 bdebug("nid=%td start=%lx end=%lx\n", bdata - bootmem_node_data,
320 sidx + bdata->node_min_pfn,
321 eidx + bdata->node_min_pfn);
322
323 if (bdata->hint_idx > sidx)
324 bdata->hint_idx = sidx;
325
326 for (idx = sidx; idx < eidx; idx++)
327 if (!test_and_clear_bit(idx, bdata->node_bootmem_map))
328 BUG();
329 }
330
331 static int __init __reserve(bootmem_data_t *bdata, unsigned long sidx,
332 unsigned long eidx, int flags)
333 {
334 unsigned long idx;
335 int exclusive = flags & BOOTMEM_EXCLUSIVE;
336
337 bdebug("nid=%td start=%lx end=%lx flags=%x\n",
338 bdata - bootmem_node_data,
339 sidx + bdata->node_min_pfn,
340 eidx + bdata->node_min_pfn,
341 flags);
342
343 for (idx = sidx; idx < eidx; idx++)
344 if (test_and_set_bit(idx, bdata->node_bootmem_map)) {
345 if (exclusive) {
346 __free(bdata, sidx, idx);
347 return -EBUSY;
348 }
349 bdebug("silent double reserve of PFN %lx\n",
350 idx + bdata->node_min_pfn);
351 }
352 return 0;
353 }
354
355 static int __init mark_bootmem_node(bootmem_data_t *bdata,
356 unsigned long start, unsigned long end,
357 int reserve, int flags)
358 {
359 unsigned long sidx, eidx;
360
361 bdebug("nid=%td start=%lx end=%lx reserve=%d flags=%x\n",
362 bdata - bootmem_node_data, start, end, reserve, flags);
363
364 BUG_ON(start < bdata->node_min_pfn);
365 BUG_ON(end > bdata->node_low_pfn);
366
367 sidx = start - bdata->node_min_pfn;
368 eidx = end - bdata->node_min_pfn;
369
370 if (reserve)
371 return __reserve(bdata, sidx, eidx, flags);
372 else
373 __free(bdata, sidx, eidx);
374 return 0;
375 }
376
377 static int __init mark_bootmem(unsigned long start, unsigned long end,
378 int reserve, int flags)
379 {
380 unsigned long pos;
381 bootmem_data_t *bdata;
382
383 pos = start;
384 list_for_each_entry(bdata, &bdata_list, list) {
385 int err;
386 unsigned long max;
387
388 if (pos < bdata->node_min_pfn ||
389 pos >= bdata->node_low_pfn) {
390 BUG_ON(pos != start);
391 continue;
392 }
393
394 max = min(bdata->node_low_pfn, end);
395
396 err = mark_bootmem_node(bdata, pos, max, reserve, flags);
397 if (reserve && err) {
398 mark_bootmem(start, pos, 0, 0);
399 return err;
400 }
401
402 if (max == end)
403 return 0;
404 pos = bdata->node_low_pfn;
405 }
406 BUG();
407 }
408 #endif
409
410 /**
411 * free_bootmem_node - mark a page range as usable
412 * @pgdat: node the range resides on
413 * @physaddr: starting address of the range
414 * @size: size of the range in bytes
415 *
416 * Partial pages will be considered reserved and left as they are.
417 *
418 * The range must reside completely on the specified node.
419 */
420 void __init free_bootmem_node(pg_data_t *pgdat, unsigned long physaddr,
421 unsigned long size)
422 {
423 #ifdef CONFIG_NO_BOOTMEM
424 free_early(physaddr, physaddr + size);
425 #else
426 unsigned long start, end;
427
428 kmemleak_free_part(__va(physaddr), size);
429
430 start = PFN_UP(physaddr);
431 end = PFN_DOWN(physaddr + size);
432
433 mark_bootmem_node(pgdat->bdata, start, end, 0, 0);
434 #endif
435 }
436
437 /**
438 * free_bootmem - mark a page range as usable
439 * @addr: starting address of the range
440 * @size: size of the range in bytes
441 *
442 * Partial pages will be considered reserved and left as they are.
443 *
444 * The range must be contiguous but may span node boundaries.
445 */
446 void __init free_bootmem(unsigned long addr, unsigned long size)
447 {
448 #ifdef CONFIG_NO_BOOTMEM
449 free_early(addr, addr + size);
450 #else
451 unsigned long start, end;
452
453 kmemleak_free_part(__va(addr), size);
454
455 start = PFN_UP(addr);
456 end = PFN_DOWN(addr + size);
457
458 mark_bootmem(start, end, 0, 0);
459 #endif
460 }
461
462 /**
463 * reserve_bootmem_node - mark a page range as reserved
464 * @pgdat: node the range resides on
465 * @physaddr: starting address of the range
466 * @size: size of the range in bytes
467 * @flags: reservation flags (see linux/bootmem.h)
468 *
469 * Partial pages will be reserved.
470 *
471 * The range must reside completely on the specified node.
472 */
473 int __init reserve_bootmem_node(pg_data_t *pgdat, unsigned long physaddr,
474 unsigned long size, int flags)
475 {
476 #ifdef CONFIG_NO_BOOTMEM
477 panic("no bootmem");
478 return 0;
479 #else
480 unsigned long start, end;
481
482 start = PFN_DOWN(physaddr);
483 end = PFN_UP(physaddr + size);
484
485 return mark_bootmem_node(pgdat->bdata, start, end, 1, flags);
486 #endif
487 }
488
489 /**
490 * reserve_bootmem - mark a page range as usable
491 * @addr: starting address of the range
492 * @size: size of the range in bytes
493 * @flags: reservation flags (see linux/bootmem.h)
494 *
495 * Partial pages will be reserved.
496 *
497 * The range must be contiguous but may span node boundaries.
498 */
499 int __init reserve_bootmem(unsigned long addr, unsigned long size,
500 int flags)
501 {
502 #ifdef CONFIG_NO_BOOTMEM
503 panic("no bootmem");
504 return 0;
505 #else
506 unsigned long start, end;
507
508 start = PFN_DOWN(addr);
509 end = PFN_UP(addr + size);
510
511 return mark_bootmem(start, end, 1, flags);
512 #endif
513 }
514
515 #ifndef CONFIG_NO_BOOTMEM
516 static unsigned long __init align_idx(struct bootmem_data *bdata,
517 unsigned long idx, unsigned long step)
518 {
519 unsigned long base = bdata->node_min_pfn;
520
521 /*
522 * Align the index with respect to the node start so that the
523 * combination of both satisfies the requested alignment.
524 */
525
526 return ALIGN(base + idx, step) - base;
527 }
528
529 static unsigned long __init align_off(struct bootmem_data *bdata,
530 unsigned long off, unsigned long align)
531 {
532 unsigned long base = PFN_PHYS(bdata->node_min_pfn);
533
534 /* Same as align_idx for byte offsets */
535
536 return ALIGN(base + off, align) - base;
537 }
538
539 static void * __init alloc_bootmem_core(struct bootmem_data *bdata,
540 unsigned long size, unsigned long align,
541 unsigned long goal, unsigned long limit)
542 {
543 unsigned long fallback = 0;
544 unsigned long min, max, start, sidx, midx, step;
545
546 bdebug("nid=%td size=%lx [%lu pages] align=%lx goal=%lx limit=%lx\n",
547 bdata - bootmem_node_data, size, PAGE_ALIGN(size) >> PAGE_SHIFT,
548 align, goal, limit);
549
550 BUG_ON(!size);
551 BUG_ON(align & (align - 1));
552 BUG_ON(limit && goal + size > limit);
553
554 if (!bdata->node_bootmem_map)
555 return NULL;
556
557 min = bdata->node_min_pfn;
558 max = bdata->node_low_pfn;
559
560 goal >>= PAGE_SHIFT;
561 limit >>= PAGE_SHIFT;
562
563 if (limit && max > limit)
564 max = limit;
565 if (max <= min)
566 return NULL;
567
568 step = max(align >> PAGE_SHIFT, 1UL);
569
570 if (goal && min < goal && goal < max)
571 start = ALIGN(goal, step);
572 else
573 start = ALIGN(min, step);
574
575 sidx = start - bdata->node_min_pfn;
576 midx = max - bdata->node_min_pfn;
577
578 if (bdata->hint_idx > sidx) {
579 /*
580 * Handle the valid case of sidx being zero and still
581 * catch the fallback below.
582 */
583 fallback = sidx + 1;
584 sidx = align_idx(bdata, bdata->hint_idx, step);
585 }
586
587 while (1) {
588 int merge;
589 void *region;
590 unsigned long eidx, i, start_off, end_off;
591 find_block:
592 sidx = find_next_zero_bit(bdata->node_bootmem_map, midx, sidx);
593 sidx = align_idx(bdata, sidx, step);
594 eidx = sidx + PFN_UP(size);
595
596 if (sidx >= midx || eidx > midx)
597 break;
598
599 for (i = sidx; i < eidx; i++)
600 if (test_bit(i, bdata->node_bootmem_map)) {
601 sidx = align_idx(bdata, i, step);
602 if (sidx == i)
603 sidx += step;
604 goto find_block;
605 }
606
607 if (bdata->last_end_off & (PAGE_SIZE - 1) &&
608 PFN_DOWN(bdata->last_end_off) + 1 == sidx)
609 start_off = align_off(bdata, bdata->last_end_off, align);
610 else
611 start_off = PFN_PHYS(sidx);
612
613 merge = PFN_DOWN(start_off) < sidx;
614 end_off = start_off + size;
615
616 bdata->last_end_off = end_off;
617 bdata->hint_idx = PFN_UP(end_off);
618
619 /*
620 * Reserve the area now:
621 */
622 if (__reserve(bdata, PFN_DOWN(start_off) + merge,
623 PFN_UP(end_off), BOOTMEM_EXCLUSIVE))
624 BUG();
625
626 region = phys_to_virt(PFN_PHYS(bdata->node_min_pfn) +
627 start_off);
628 memset(region, 0, size);
629 /*
630 * The min_count is set to 0 so that bootmem allocated blocks
631 * are never reported as leaks.
632 */
633 kmemleak_alloc(region, size, 0, 0);
634 return region;
635 }
636
637 if (fallback) {
638 sidx = align_idx(bdata, fallback - 1, step);
639 fallback = 0;
640 goto find_block;
641 }
642
643 return NULL;
644 }
645
646 static void * __init alloc_arch_preferred_bootmem(bootmem_data_t *bdata,
647 unsigned long size, unsigned long align,
648 unsigned long goal, unsigned long limit)
649 {
650 if (WARN_ON_ONCE(slab_is_available()))
651 return kzalloc(size, GFP_NOWAIT);
652
653 #ifdef CONFIG_HAVE_ARCH_BOOTMEM
654 {
655 bootmem_data_t *p_bdata;
656
657 p_bdata = bootmem_arch_preferred_node(bdata, size, align,
658 goal, limit);
659 if (p_bdata)
660 return alloc_bootmem_core(p_bdata, size, align,
661 goal, limit);
662 }
663 #endif
664 return NULL;
665 }
666 #endif
667
668 static void * __init ___alloc_bootmem_nopanic(unsigned long size,
669 unsigned long align,
670 unsigned long goal,
671 unsigned long limit)
672 {
673 #ifdef CONFIG_NO_BOOTMEM
674 void *ptr;
675
676 if (WARN_ON_ONCE(slab_is_available()))
677 return kzalloc(size, GFP_NOWAIT);
678
679 restart:
680
681 ptr = __alloc_memory_core_early(MAX_NUMNODES, size, align, goal, limit);
682
683 if (ptr)
684 return ptr;
685
686 if (goal != 0) {
687 goal = 0;
688 goto restart;
689 }
690
691 return NULL;
692 #else
693 bootmem_data_t *bdata;
694 void *region;
695
696 restart:
697 region = alloc_arch_preferred_bootmem(NULL, size, align, goal, limit);
698 if (region)
699 return region;
700
701 list_for_each_entry(bdata, &bdata_list, list) {
702 if (goal && bdata->node_low_pfn <= PFN_DOWN(goal))
703 continue;
704 if (limit && bdata->node_min_pfn >= PFN_DOWN(limit))
705 break;
706
707 region = alloc_bootmem_core(bdata, size, align, goal, limit);
708 if (region)
709 return region;
710 }
711
712 if (goal) {
713 goal = 0;
714 goto restart;
715 }
716
717 return NULL;
718 #endif
719 }
720
721 /**
722 * __alloc_bootmem_nopanic - allocate boot memory without panicking
723 * @size: size of the request in bytes
724 * @align: alignment of the region
725 * @goal: preferred starting address of the region
726 *
727 * The goal is dropped if it can not be satisfied and the allocation will
728 * fall back to memory below @goal.
729 *
730 * Allocation may happen on any node in the system.
731 *
732 * Returns NULL on failure.
733 */
734 void * __init __alloc_bootmem_nopanic(unsigned long size, unsigned long align,
735 unsigned long goal)
736 {
737 unsigned long limit = 0;
738
739 #ifdef CONFIG_NO_BOOTMEM
740 limit = -1UL;
741 #endif
742
743 return ___alloc_bootmem_nopanic(size, align, goal, limit);
744 }
745
746 static void * __init ___alloc_bootmem(unsigned long size, unsigned long align,
747 unsigned long goal, unsigned long limit)
748 {
749 void *mem = ___alloc_bootmem_nopanic(size, align, goal, limit);
750
751 if (mem)
752 return mem;
753 /*
754 * Whoops, we cannot satisfy the allocation request.
755 */
756 printk(KERN_ALERT "bootmem alloc of %lu bytes failed!\n", size);
757 panic("Out of memory");
758 return NULL;
759 }
760
761 /**
762 * __alloc_bootmem - allocate boot memory
763 * @size: size of the request in bytes
764 * @align: alignment of the region
765 * @goal: preferred starting address of the region
766 *
767 * The goal is dropped if it can not be satisfied and the allocation will
768 * fall back to memory below @goal.
769 *
770 * Allocation may happen on any node in the system.
771 *
772 * The function panics if the request can not be satisfied.
773 */
774 void * __init __alloc_bootmem(unsigned long size, unsigned long align,
775 unsigned long goal)
776 {
777 unsigned long limit = 0;
778
779 #ifdef CONFIG_NO_BOOTMEM
780 limit = -1UL;
781 #endif
782
783 return ___alloc_bootmem(size, align, goal, limit);
784 }
785
786 #ifndef CONFIG_NO_BOOTMEM
787 static void * __init ___alloc_bootmem_node(bootmem_data_t *bdata,
788 unsigned long size, unsigned long align,
789 unsigned long goal, unsigned long limit)
790 {
791 void *ptr;
792
793 ptr = alloc_arch_preferred_bootmem(bdata, size, align, goal, limit);
794 if (ptr)
795 return ptr;
796
797 ptr = alloc_bootmem_core(bdata, size, align, goal, limit);
798 if (ptr)
799 return ptr;
800
801 return ___alloc_bootmem(size, align, goal, limit);
802 }
803 #endif
804
805 /**
806 * __alloc_bootmem_node - allocate boot memory from a specific node
807 * @pgdat: node to allocate from
808 * @size: size of the request in bytes
809 * @align: alignment of the region
810 * @goal: preferred starting address of the region
811 *
812 * The goal is dropped if it can not be satisfied and the allocation will
813 * fall back to memory below @goal.
814 *
815 * Allocation may fall back to any node in the system if the specified node
816 * can not hold the requested memory.
817 *
818 * The function panics if the request can not be satisfied.
819 */
820 void * __init __alloc_bootmem_node(pg_data_t *pgdat, unsigned long size,
821 unsigned long align, unsigned long goal)
822 {
823 if (WARN_ON_ONCE(slab_is_available()))
824 return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
825
826 #ifdef CONFIG_NO_BOOTMEM
827 return __alloc_memory_core_early(pgdat->node_id, size, align,
828 goal, -1ULL);
829 #else
830 return ___alloc_bootmem_node(pgdat->bdata, size, align, goal, 0);
831 #endif
832 }
833
834 void * __init __alloc_bootmem_node_high(pg_data_t *pgdat, unsigned long size,
835 unsigned long align, unsigned long goal)
836 {
837 #ifdef MAX_DMA32_PFN
838 unsigned long end_pfn;
839
840 if (WARN_ON_ONCE(slab_is_available()))
841 return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
842
843 /* update goal according ...MAX_DMA32_PFN */
844 end_pfn = pgdat->node_start_pfn + pgdat->node_spanned_pages;
845
846 if (end_pfn > MAX_DMA32_PFN + (128 >> (20 - PAGE_SHIFT)) &&
847 (goal >> PAGE_SHIFT) < MAX_DMA32_PFN) {
848 void *ptr;
849 unsigned long new_goal;
850
851 new_goal = MAX_DMA32_PFN << PAGE_SHIFT;
852 #ifdef CONFIG_NO_BOOTMEM
853 ptr = __alloc_memory_core_early(pgdat->node_id, size, align,
854 new_goal, -1ULL);
855 #else
856 ptr = alloc_bootmem_core(pgdat->bdata, size, align,
857 new_goal, 0);
858 #endif
859 if (ptr)
860 return ptr;
861 }
862 #endif
863
864 return __alloc_bootmem_node(pgdat, size, align, goal);
865
866 }
867
868 #ifdef CONFIG_SPARSEMEM
869 /**
870 * alloc_bootmem_section - allocate boot memory from a specific section
871 * @size: size of the request in bytes
872 * @section_nr: sparse map section to allocate from
873 *
874 * Return NULL on failure.
875 */
876 void * __init alloc_bootmem_section(unsigned long size,
877 unsigned long section_nr)
878 {
879 #ifdef CONFIG_NO_BOOTMEM
880 unsigned long pfn, goal, limit;
881
882 pfn = section_nr_to_pfn(section_nr);
883 goal = pfn << PAGE_SHIFT;
884 limit = section_nr_to_pfn(section_nr + 1) << PAGE_SHIFT;
885
886 return __alloc_memory_core_early(early_pfn_to_nid(pfn), size,
887 SMP_CACHE_BYTES, goal, limit);
888 #else
889 bootmem_data_t *bdata;
890 unsigned long pfn, goal, limit;
891
892 pfn = section_nr_to_pfn(section_nr);
893 goal = pfn << PAGE_SHIFT;
894 limit = section_nr_to_pfn(section_nr + 1) << PAGE_SHIFT;
895 bdata = &bootmem_node_data[early_pfn_to_nid(pfn)];
896
897 return alloc_bootmem_core(bdata, size, SMP_CACHE_BYTES, goal, limit);
898 #endif
899 }
900 #endif
901
902 void * __init __alloc_bootmem_node_nopanic(pg_data_t *pgdat, unsigned long size,
903 unsigned long align, unsigned long goal)
904 {
905 void *ptr;
906
907 if (WARN_ON_ONCE(slab_is_available()))
908 return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
909
910 #ifdef CONFIG_NO_BOOTMEM
911 ptr = __alloc_memory_core_early(pgdat->node_id, size, align,
912 goal, -1ULL);
913 #else
914 ptr = alloc_arch_preferred_bootmem(pgdat->bdata, size, align, goal, 0);
915 if (ptr)
916 return ptr;
917
918 ptr = alloc_bootmem_core(pgdat->bdata, size, align, goal, 0);
919 #endif
920 if (ptr)
921 return ptr;
922
923 return __alloc_bootmem_nopanic(size, align, goal);
924 }
925
926 #ifndef ARCH_LOW_ADDRESS_LIMIT
927 #define ARCH_LOW_ADDRESS_LIMIT 0xffffffffUL
928 #endif
929
930 /**
931 * __alloc_bootmem_low - allocate low boot memory
932 * @size: size of the request in bytes
933 * @align: alignment of the region
934 * @goal: preferred starting address of the region
935 *
936 * The goal is dropped if it can not be satisfied and the allocation will
937 * fall back to memory below @goal.
938 *
939 * Allocation may happen on any node in the system.
940 *
941 * The function panics if the request can not be satisfied.
942 */
943 void * __init __alloc_bootmem_low(unsigned long size, unsigned long align,
944 unsigned long goal)
945 {
946 return ___alloc_bootmem(size, align, goal, ARCH_LOW_ADDRESS_LIMIT);
947 }
948
949 /**
950 * __alloc_bootmem_low_node - allocate low boot memory from a specific node
951 * @pgdat: node to allocate from
952 * @size: size of the request in bytes
953 * @align: alignment of the region
954 * @goal: preferred starting address of the region
955 *
956 * The goal is dropped if it can not be satisfied and the allocation will
957 * fall back to memory below @goal.
958 *
959 * Allocation may fall back to any node in the system if the specified node
960 * can not hold the requested memory.
961 *
962 * The function panics if the request can not be satisfied.
963 */
964 void * __init __alloc_bootmem_low_node(pg_data_t *pgdat, unsigned long size,
965 unsigned long align, unsigned long goal)
966 {
967 if (WARN_ON_ONCE(slab_is_available()))
968 return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
969
970 #ifdef CONFIG_NO_BOOTMEM
971 return __alloc_memory_core_early(pgdat->node_id, size, align,
972 goal, ARCH_LOW_ADDRESS_LIMIT);
973 #else
974 return ___alloc_bootmem_node(pgdat->bdata, size, align,
975 goal, ARCH_LOW_ADDRESS_LIMIT);
976 #endif
977 }