]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - mm/bootmem.c
Merge branch 'power-meter' into release
[mirror_ubuntu-bionic-kernel.git] / mm / bootmem.c
1 /*
2 * bootmem - A boot-time physical memory allocator and configurator
3 *
4 * Copyright (C) 1999 Ingo Molnar
5 * 1999 Kanoj Sarcar, SGI
6 * 2008 Johannes Weiner
7 *
8 * Access to this subsystem has to be serialized externally (which is true
9 * for the boot process anyway).
10 */
11 #include <linux/init.h>
12 #include <linux/pfn.h>
13 #include <linux/bootmem.h>
14 #include <linux/module.h>
15 #include <linux/kmemleak.h>
16
17 #include <asm/bug.h>
18 #include <asm/io.h>
19 #include <asm/processor.h>
20
21 #include "internal.h"
22
23 unsigned long max_low_pfn;
24 unsigned long min_low_pfn;
25 unsigned long max_pfn;
26
27 #ifdef CONFIG_CRASH_DUMP
28 /*
29 * If we have booted due to a crash, max_pfn will be a very low value. We need
30 * to know the amount of memory that the previous kernel used.
31 */
32 unsigned long saved_max_pfn;
33 #endif
34
35 bootmem_data_t bootmem_node_data[MAX_NUMNODES] __initdata;
36
37 static struct list_head bdata_list __initdata = LIST_HEAD_INIT(bdata_list);
38
39 static int bootmem_debug;
40
41 static int __init bootmem_debug_setup(char *buf)
42 {
43 bootmem_debug = 1;
44 return 0;
45 }
46 early_param("bootmem_debug", bootmem_debug_setup);
47
48 #define bdebug(fmt, args...) ({ \
49 if (unlikely(bootmem_debug)) \
50 printk(KERN_INFO \
51 "bootmem::%s " fmt, \
52 __func__, ## args); \
53 })
54
55 static unsigned long __init bootmap_bytes(unsigned long pages)
56 {
57 unsigned long bytes = (pages + 7) / 8;
58
59 return ALIGN(bytes, sizeof(long));
60 }
61
62 /**
63 * bootmem_bootmap_pages - calculate bitmap size in pages
64 * @pages: number of pages the bitmap has to represent
65 */
66 unsigned long __init bootmem_bootmap_pages(unsigned long pages)
67 {
68 unsigned long bytes = bootmap_bytes(pages);
69
70 return PAGE_ALIGN(bytes) >> PAGE_SHIFT;
71 }
72
73 /*
74 * link bdata in order
75 */
76 static void __init link_bootmem(bootmem_data_t *bdata)
77 {
78 struct list_head *iter;
79
80 list_for_each(iter, &bdata_list) {
81 bootmem_data_t *ent;
82
83 ent = list_entry(iter, bootmem_data_t, list);
84 if (bdata->node_min_pfn < ent->node_min_pfn)
85 break;
86 }
87 list_add_tail(&bdata->list, iter);
88 }
89
90 /*
91 * Called once to set up the allocator itself.
92 */
93 static unsigned long __init init_bootmem_core(bootmem_data_t *bdata,
94 unsigned long mapstart, unsigned long start, unsigned long end)
95 {
96 unsigned long mapsize;
97
98 mminit_validate_memmodel_limits(&start, &end);
99 bdata->node_bootmem_map = phys_to_virt(PFN_PHYS(mapstart));
100 bdata->node_min_pfn = start;
101 bdata->node_low_pfn = end;
102 link_bootmem(bdata);
103
104 /*
105 * Initially all pages are reserved - setup_arch() has to
106 * register free RAM areas explicitly.
107 */
108 mapsize = bootmap_bytes(end - start);
109 memset(bdata->node_bootmem_map, 0xff, mapsize);
110
111 bdebug("nid=%td start=%lx map=%lx end=%lx mapsize=%lx\n",
112 bdata - bootmem_node_data, start, mapstart, end, mapsize);
113
114 return mapsize;
115 }
116
117 /**
118 * init_bootmem_node - register a node as boot memory
119 * @pgdat: node to register
120 * @freepfn: pfn where the bitmap for this node is to be placed
121 * @startpfn: first pfn on the node
122 * @endpfn: first pfn after the node
123 *
124 * Returns the number of bytes needed to hold the bitmap for this node.
125 */
126 unsigned long __init init_bootmem_node(pg_data_t *pgdat, unsigned long freepfn,
127 unsigned long startpfn, unsigned long endpfn)
128 {
129 return init_bootmem_core(pgdat->bdata, freepfn, startpfn, endpfn);
130 }
131
132 /**
133 * init_bootmem - register boot memory
134 * @start: pfn where the bitmap is to be placed
135 * @pages: number of available physical pages
136 *
137 * Returns the number of bytes needed to hold the bitmap.
138 */
139 unsigned long __init init_bootmem(unsigned long start, unsigned long pages)
140 {
141 max_low_pfn = pages;
142 min_low_pfn = start;
143 return init_bootmem_core(NODE_DATA(0)->bdata, start, 0, pages);
144 }
145
146 static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata)
147 {
148 int aligned;
149 struct page *page;
150 unsigned long start, end, pages, count = 0;
151
152 if (!bdata->node_bootmem_map)
153 return 0;
154
155 start = bdata->node_min_pfn;
156 end = bdata->node_low_pfn;
157
158 /*
159 * If the start is aligned to the machines wordsize, we might
160 * be able to free pages in bulks of that order.
161 */
162 aligned = !(start & (BITS_PER_LONG - 1));
163
164 bdebug("nid=%td start=%lx end=%lx aligned=%d\n",
165 bdata - bootmem_node_data, start, end, aligned);
166
167 while (start < end) {
168 unsigned long *map, idx, vec;
169
170 map = bdata->node_bootmem_map;
171 idx = start - bdata->node_min_pfn;
172 vec = ~map[idx / BITS_PER_LONG];
173
174 if (aligned && vec == ~0UL && start + BITS_PER_LONG < end) {
175 int order = ilog2(BITS_PER_LONG);
176
177 __free_pages_bootmem(pfn_to_page(start), order);
178 count += BITS_PER_LONG;
179 } else {
180 unsigned long off = 0;
181
182 while (vec && off < BITS_PER_LONG) {
183 if (vec & 1) {
184 page = pfn_to_page(start + off);
185 __free_pages_bootmem(page, 0);
186 count++;
187 }
188 vec >>= 1;
189 off++;
190 }
191 }
192 start += BITS_PER_LONG;
193 }
194
195 page = virt_to_page(bdata->node_bootmem_map);
196 pages = bdata->node_low_pfn - bdata->node_min_pfn;
197 pages = bootmem_bootmap_pages(pages);
198 count += pages;
199 while (pages--)
200 __free_pages_bootmem(page++, 0);
201
202 bdebug("nid=%td released=%lx\n", bdata - bootmem_node_data, count);
203
204 return count;
205 }
206
207 /**
208 * free_all_bootmem_node - release a node's free pages to the buddy allocator
209 * @pgdat: node to be released
210 *
211 * Returns the number of pages actually released.
212 */
213 unsigned long __init free_all_bootmem_node(pg_data_t *pgdat)
214 {
215 register_page_bootmem_info_node(pgdat);
216 return free_all_bootmem_core(pgdat->bdata);
217 }
218
219 /**
220 * free_all_bootmem - release free pages to the buddy allocator
221 *
222 * Returns the number of pages actually released.
223 */
224 unsigned long __init free_all_bootmem(void)
225 {
226 return free_all_bootmem_core(NODE_DATA(0)->bdata);
227 }
228
229 static void __init __free(bootmem_data_t *bdata,
230 unsigned long sidx, unsigned long eidx)
231 {
232 unsigned long idx;
233
234 bdebug("nid=%td start=%lx end=%lx\n", bdata - bootmem_node_data,
235 sidx + bdata->node_min_pfn,
236 eidx + bdata->node_min_pfn);
237
238 if (bdata->hint_idx > sidx)
239 bdata->hint_idx = sidx;
240
241 for (idx = sidx; idx < eidx; idx++)
242 if (!test_and_clear_bit(idx, bdata->node_bootmem_map))
243 BUG();
244 }
245
246 static int __init __reserve(bootmem_data_t *bdata, unsigned long sidx,
247 unsigned long eidx, int flags)
248 {
249 unsigned long idx;
250 int exclusive = flags & BOOTMEM_EXCLUSIVE;
251
252 bdebug("nid=%td start=%lx end=%lx flags=%x\n",
253 bdata - bootmem_node_data,
254 sidx + bdata->node_min_pfn,
255 eidx + bdata->node_min_pfn,
256 flags);
257
258 for (idx = sidx; idx < eidx; idx++)
259 if (test_and_set_bit(idx, bdata->node_bootmem_map)) {
260 if (exclusive) {
261 __free(bdata, sidx, idx);
262 return -EBUSY;
263 }
264 bdebug("silent double reserve of PFN %lx\n",
265 idx + bdata->node_min_pfn);
266 }
267 return 0;
268 }
269
270 static int __init mark_bootmem_node(bootmem_data_t *bdata,
271 unsigned long start, unsigned long end,
272 int reserve, int flags)
273 {
274 unsigned long sidx, eidx;
275
276 bdebug("nid=%td start=%lx end=%lx reserve=%d flags=%x\n",
277 bdata - bootmem_node_data, start, end, reserve, flags);
278
279 BUG_ON(start < bdata->node_min_pfn);
280 BUG_ON(end > bdata->node_low_pfn);
281
282 sidx = start - bdata->node_min_pfn;
283 eidx = end - bdata->node_min_pfn;
284
285 if (reserve)
286 return __reserve(bdata, sidx, eidx, flags);
287 else
288 __free(bdata, sidx, eidx);
289 return 0;
290 }
291
292 static int __init mark_bootmem(unsigned long start, unsigned long end,
293 int reserve, int flags)
294 {
295 unsigned long pos;
296 bootmem_data_t *bdata;
297
298 pos = start;
299 list_for_each_entry(bdata, &bdata_list, list) {
300 int err;
301 unsigned long max;
302
303 if (pos < bdata->node_min_pfn ||
304 pos >= bdata->node_low_pfn) {
305 BUG_ON(pos != start);
306 continue;
307 }
308
309 max = min(bdata->node_low_pfn, end);
310
311 err = mark_bootmem_node(bdata, pos, max, reserve, flags);
312 if (reserve && err) {
313 mark_bootmem(start, pos, 0, 0);
314 return err;
315 }
316
317 if (max == end)
318 return 0;
319 pos = bdata->node_low_pfn;
320 }
321 BUG();
322 }
323
324 /**
325 * free_bootmem_node - mark a page range as usable
326 * @pgdat: node the range resides on
327 * @physaddr: starting address of the range
328 * @size: size of the range in bytes
329 *
330 * Partial pages will be considered reserved and left as they are.
331 *
332 * The range must reside completely on the specified node.
333 */
334 void __init free_bootmem_node(pg_data_t *pgdat, unsigned long physaddr,
335 unsigned long size)
336 {
337 unsigned long start, end;
338
339 kmemleak_free_part(__va(physaddr), size);
340
341 start = PFN_UP(physaddr);
342 end = PFN_DOWN(physaddr + size);
343
344 mark_bootmem_node(pgdat->bdata, start, end, 0, 0);
345 }
346
347 /**
348 * free_bootmem - mark a page range as usable
349 * @addr: starting address of the range
350 * @size: size of the range in bytes
351 *
352 * Partial pages will be considered reserved and left as they are.
353 *
354 * The range must be contiguous but may span node boundaries.
355 */
356 void __init free_bootmem(unsigned long addr, unsigned long size)
357 {
358 unsigned long start, end;
359
360 kmemleak_free_part(__va(addr), size);
361
362 start = PFN_UP(addr);
363 end = PFN_DOWN(addr + size);
364
365 mark_bootmem(start, end, 0, 0);
366 }
367
368 /**
369 * reserve_bootmem_node - mark a page range as reserved
370 * @pgdat: node the range resides on
371 * @physaddr: starting address of the range
372 * @size: size of the range in bytes
373 * @flags: reservation flags (see linux/bootmem.h)
374 *
375 * Partial pages will be reserved.
376 *
377 * The range must reside completely on the specified node.
378 */
379 int __init reserve_bootmem_node(pg_data_t *pgdat, unsigned long physaddr,
380 unsigned long size, int flags)
381 {
382 unsigned long start, end;
383
384 start = PFN_DOWN(physaddr);
385 end = PFN_UP(physaddr + size);
386
387 return mark_bootmem_node(pgdat->bdata, start, end, 1, flags);
388 }
389
390 /**
391 * reserve_bootmem - mark a page range as usable
392 * @addr: starting address of the range
393 * @size: size of the range in bytes
394 * @flags: reservation flags (see linux/bootmem.h)
395 *
396 * Partial pages will be reserved.
397 *
398 * The range must be contiguous but may span node boundaries.
399 */
400 int __init reserve_bootmem(unsigned long addr, unsigned long size,
401 int flags)
402 {
403 unsigned long start, end;
404
405 start = PFN_DOWN(addr);
406 end = PFN_UP(addr + size);
407
408 return mark_bootmem(start, end, 1, flags);
409 }
410
411 static unsigned long align_idx(struct bootmem_data *bdata, unsigned long idx,
412 unsigned long step)
413 {
414 unsigned long base = bdata->node_min_pfn;
415
416 /*
417 * Align the index with respect to the node start so that the
418 * combination of both satisfies the requested alignment.
419 */
420
421 return ALIGN(base + idx, step) - base;
422 }
423
424 static unsigned long align_off(struct bootmem_data *bdata, unsigned long off,
425 unsigned long align)
426 {
427 unsigned long base = PFN_PHYS(bdata->node_min_pfn);
428
429 /* Same as align_idx for byte offsets */
430
431 return ALIGN(base + off, align) - base;
432 }
433
434 static void * __init alloc_bootmem_core(struct bootmem_data *bdata,
435 unsigned long size, unsigned long align,
436 unsigned long goal, unsigned long limit)
437 {
438 unsigned long fallback = 0;
439 unsigned long min, max, start, sidx, midx, step;
440
441 bdebug("nid=%td size=%lx [%lu pages] align=%lx goal=%lx limit=%lx\n",
442 bdata - bootmem_node_data, size, PAGE_ALIGN(size) >> PAGE_SHIFT,
443 align, goal, limit);
444
445 BUG_ON(!size);
446 BUG_ON(align & (align - 1));
447 BUG_ON(limit && goal + size > limit);
448
449 if (!bdata->node_bootmem_map)
450 return NULL;
451
452 min = bdata->node_min_pfn;
453 max = bdata->node_low_pfn;
454
455 goal >>= PAGE_SHIFT;
456 limit >>= PAGE_SHIFT;
457
458 if (limit && max > limit)
459 max = limit;
460 if (max <= min)
461 return NULL;
462
463 step = max(align >> PAGE_SHIFT, 1UL);
464
465 if (goal && min < goal && goal < max)
466 start = ALIGN(goal, step);
467 else
468 start = ALIGN(min, step);
469
470 sidx = start - bdata->node_min_pfn;
471 midx = max - bdata->node_min_pfn;
472
473 if (bdata->hint_idx > sidx) {
474 /*
475 * Handle the valid case of sidx being zero and still
476 * catch the fallback below.
477 */
478 fallback = sidx + 1;
479 sidx = align_idx(bdata, bdata->hint_idx, step);
480 }
481
482 while (1) {
483 int merge;
484 void *region;
485 unsigned long eidx, i, start_off, end_off;
486 find_block:
487 sidx = find_next_zero_bit(bdata->node_bootmem_map, midx, sidx);
488 sidx = align_idx(bdata, sidx, step);
489 eidx = sidx + PFN_UP(size);
490
491 if (sidx >= midx || eidx > midx)
492 break;
493
494 for (i = sidx; i < eidx; i++)
495 if (test_bit(i, bdata->node_bootmem_map)) {
496 sidx = align_idx(bdata, i, step);
497 if (sidx == i)
498 sidx += step;
499 goto find_block;
500 }
501
502 if (bdata->last_end_off & (PAGE_SIZE - 1) &&
503 PFN_DOWN(bdata->last_end_off) + 1 == sidx)
504 start_off = align_off(bdata, bdata->last_end_off, align);
505 else
506 start_off = PFN_PHYS(sidx);
507
508 merge = PFN_DOWN(start_off) < sidx;
509 end_off = start_off + size;
510
511 bdata->last_end_off = end_off;
512 bdata->hint_idx = PFN_UP(end_off);
513
514 /*
515 * Reserve the area now:
516 */
517 if (__reserve(bdata, PFN_DOWN(start_off) + merge,
518 PFN_UP(end_off), BOOTMEM_EXCLUSIVE))
519 BUG();
520
521 region = phys_to_virt(PFN_PHYS(bdata->node_min_pfn) +
522 start_off);
523 memset(region, 0, size);
524 /*
525 * The min_count is set to 0 so that bootmem allocated blocks
526 * are never reported as leaks.
527 */
528 kmemleak_alloc(region, size, 0, 0);
529 return region;
530 }
531
532 if (fallback) {
533 sidx = align_idx(bdata, fallback - 1, step);
534 fallback = 0;
535 goto find_block;
536 }
537
538 return NULL;
539 }
540
541 static void * __init alloc_arch_preferred_bootmem(bootmem_data_t *bdata,
542 unsigned long size, unsigned long align,
543 unsigned long goal, unsigned long limit)
544 {
545 if (WARN_ON_ONCE(slab_is_available()))
546 return kzalloc(size, GFP_NOWAIT);
547
548 #ifdef CONFIG_HAVE_ARCH_BOOTMEM
549 {
550 bootmem_data_t *p_bdata;
551
552 p_bdata = bootmem_arch_preferred_node(bdata, size, align,
553 goal, limit);
554 if (p_bdata)
555 return alloc_bootmem_core(p_bdata, size, align,
556 goal, limit);
557 }
558 #endif
559 return NULL;
560 }
561
562 static void * __init ___alloc_bootmem_nopanic(unsigned long size,
563 unsigned long align,
564 unsigned long goal,
565 unsigned long limit)
566 {
567 bootmem_data_t *bdata;
568 void *region;
569
570 restart:
571 region = alloc_arch_preferred_bootmem(NULL, size, align, goal, limit);
572 if (region)
573 return region;
574
575 list_for_each_entry(bdata, &bdata_list, list) {
576 if (goal && bdata->node_low_pfn <= PFN_DOWN(goal))
577 continue;
578 if (limit && bdata->node_min_pfn >= PFN_DOWN(limit))
579 break;
580
581 region = alloc_bootmem_core(bdata, size, align, goal, limit);
582 if (region)
583 return region;
584 }
585
586 if (goal) {
587 goal = 0;
588 goto restart;
589 }
590
591 return NULL;
592 }
593
594 /**
595 * __alloc_bootmem_nopanic - allocate boot memory without panicking
596 * @size: size of the request in bytes
597 * @align: alignment of the region
598 * @goal: preferred starting address of the region
599 *
600 * The goal is dropped if it can not be satisfied and the allocation will
601 * fall back to memory below @goal.
602 *
603 * Allocation may happen on any node in the system.
604 *
605 * Returns NULL on failure.
606 */
607 void * __init __alloc_bootmem_nopanic(unsigned long size, unsigned long align,
608 unsigned long goal)
609 {
610 return ___alloc_bootmem_nopanic(size, align, goal, 0);
611 }
612
613 static void * __init ___alloc_bootmem(unsigned long size, unsigned long align,
614 unsigned long goal, unsigned long limit)
615 {
616 void *mem = ___alloc_bootmem_nopanic(size, align, goal, limit);
617
618 if (mem)
619 return mem;
620 /*
621 * Whoops, we cannot satisfy the allocation request.
622 */
623 printk(KERN_ALERT "bootmem alloc of %lu bytes failed!\n", size);
624 panic("Out of memory");
625 return NULL;
626 }
627
628 /**
629 * __alloc_bootmem - allocate boot memory
630 * @size: size of the request in bytes
631 * @align: alignment of the region
632 * @goal: preferred starting address of the region
633 *
634 * The goal is dropped if it can not be satisfied and the allocation will
635 * fall back to memory below @goal.
636 *
637 * Allocation may happen on any node in the system.
638 *
639 * The function panics if the request can not be satisfied.
640 */
641 void * __init __alloc_bootmem(unsigned long size, unsigned long align,
642 unsigned long goal)
643 {
644 return ___alloc_bootmem(size, align, goal, 0);
645 }
646
647 static void * __init ___alloc_bootmem_node(bootmem_data_t *bdata,
648 unsigned long size, unsigned long align,
649 unsigned long goal, unsigned long limit)
650 {
651 void *ptr;
652
653 ptr = alloc_arch_preferred_bootmem(bdata, size, align, goal, limit);
654 if (ptr)
655 return ptr;
656
657 ptr = alloc_bootmem_core(bdata, size, align, goal, limit);
658 if (ptr)
659 return ptr;
660
661 return ___alloc_bootmem(size, align, goal, limit);
662 }
663
664 /**
665 * __alloc_bootmem_node - allocate boot memory from a specific node
666 * @pgdat: node to allocate from
667 * @size: size of the request in bytes
668 * @align: alignment of the region
669 * @goal: preferred starting address of the region
670 *
671 * The goal is dropped if it can not be satisfied and the allocation will
672 * fall back to memory below @goal.
673 *
674 * Allocation may fall back to any node in the system if the specified node
675 * can not hold the requested memory.
676 *
677 * The function panics if the request can not be satisfied.
678 */
679 void * __init __alloc_bootmem_node(pg_data_t *pgdat, unsigned long size,
680 unsigned long align, unsigned long goal)
681 {
682 if (WARN_ON_ONCE(slab_is_available()))
683 return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
684
685 return ___alloc_bootmem_node(pgdat->bdata, size, align, goal, 0);
686 }
687
688 #ifdef CONFIG_SPARSEMEM
689 /**
690 * alloc_bootmem_section - allocate boot memory from a specific section
691 * @size: size of the request in bytes
692 * @section_nr: sparse map section to allocate from
693 *
694 * Return NULL on failure.
695 */
696 void * __init alloc_bootmem_section(unsigned long size,
697 unsigned long section_nr)
698 {
699 bootmem_data_t *bdata;
700 unsigned long pfn, goal, limit;
701
702 pfn = section_nr_to_pfn(section_nr);
703 goal = pfn << PAGE_SHIFT;
704 limit = section_nr_to_pfn(section_nr + 1) << PAGE_SHIFT;
705 bdata = &bootmem_node_data[early_pfn_to_nid(pfn)];
706
707 return alloc_bootmem_core(bdata, size, SMP_CACHE_BYTES, goal, limit);
708 }
709 #endif
710
711 void * __init __alloc_bootmem_node_nopanic(pg_data_t *pgdat, unsigned long size,
712 unsigned long align, unsigned long goal)
713 {
714 void *ptr;
715
716 if (WARN_ON_ONCE(slab_is_available()))
717 return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
718
719 ptr = alloc_arch_preferred_bootmem(pgdat->bdata, size, align, goal, 0);
720 if (ptr)
721 return ptr;
722
723 ptr = alloc_bootmem_core(pgdat->bdata, size, align, goal, 0);
724 if (ptr)
725 return ptr;
726
727 return __alloc_bootmem_nopanic(size, align, goal);
728 }
729
730 #ifndef ARCH_LOW_ADDRESS_LIMIT
731 #define ARCH_LOW_ADDRESS_LIMIT 0xffffffffUL
732 #endif
733
734 /**
735 * __alloc_bootmem_low - allocate low boot memory
736 * @size: size of the request in bytes
737 * @align: alignment of the region
738 * @goal: preferred starting address of the region
739 *
740 * The goal is dropped if it can not be satisfied and the allocation will
741 * fall back to memory below @goal.
742 *
743 * Allocation may happen on any node in the system.
744 *
745 * The function panics if the request can not be satisfied.
746 */
747 void * __init __alloc_bootmem_low(unsigned long size, unsigned long align,
748 unsigned long goal)
749 {
750 return ___alloc_bootmem(size, align, goal, ARCH_LOW_ADDRESS_LIMIT);
751 }
752
753 /**
754 * __alloc_bootmem_low_node - allocate low boot memory from a specific node
755 * @pgdat: node to allocate from
756 * @size: size of the request in bytes
757 * @align: alignment of the region
758 * @goal: preferred starting address of the region
759 *
760 * The goal is dropped if it can not be satisfied and the allocation will
761 * fall back to memory below @goal.
762 *
763 * Allocation may fall back to any node in the system if the specified node
764 * can not hold the requested memory.
765 *
766 * The function panics if the request can not be satisfied.
767 */
768 void * __init __alloc_bootmem_low_node(pg_data_t *pgdat, unsigned long size,
769 unsigned long align, unsigned long goal)
770 {
771 if (WARN_ON_ONCE(slab_is_available()))
772 return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
773
774 return ___alloc_bootmem_node(pgdat->bdata, size, align,
775 goal, ARCH_LOW_ADDRESS_LIMIT);
776 }