2 #include <linux/initrd.h>
3 #include <linux/ioport.h>
4 #include <linux/swap.h>
5 #include <linux/memblock.h>
6 #include <linux/bootmem.h> /* for max_low_pfn */
8 #include <asm/cacheflush.h>
12 #include <asm/page_types.h>
13 #include <asm/sections.h>
14 #include <asm/setup.h>
15 #include <asm/tlbflush.h>
17 #include <asm/proto.h>
18 #include <asm/dma.h> /* for MAX_DMA_PFN */
19 #include <asm/microcode.h>
21 #include "mm_internal.h"
23 static unsigned long __initdata pgt_buf_start
;
24 static unsigned long __initdata pgt_buf_end
;
25 static unsigned long __initdata pgt_buf_top
;
27 static unsigned long min_pfn_mapped
;
29 static bool __initdata can_use_brk_pgt
= true;
32 * Pages returned are already directly mapped.
34 * Changing that is likely to break Xen, see commit:
36 * 279b706 x86,xen: introduce x86_init.mapping.pagetable_reserve
38 * for detailed information.
40 __ref
void *alloc_low_pages(unsigned int num
)
48 order
= get_order((unsigned long)num
<< PAGE_SHIFT
);
49 return (void *)__get_free_pages(GFP_ATOMIC
| __GFP_NOTRACK
|
53 if ((pgt_buf_end
+ num
) > pgt_buf_top
|| !can_use_brk_pgt
) {
55 if (min_pfn_mapped
>= max_pfn_mapped
)
56 panic("alloc_low_pages: ran out of memory");
57 ret
= memblock_find_in_range(min_pfn_mapped
<< PAGE_SHIFT
,
58 max_pfn_mapped
<< PAGE_SHIFT
,
59 PAGE_SIZE
* num
, PAGE_SIZE
);
61 panic("alloc_low_pages: can not alloc memory");
62 memblock_reserve(ret
, PAGE_SIZE
* num
);
63 pfn
= ret
>> PAGE_SHIFT
;
67 printk(KERN_DEBUG
"BRK [%#010lx, %#010lx] PGTABLE\n",
68 pfn
<< PAGE_SHIFT
, (pgt_buf_end
<< PAGE_SHIFT
) - 1);
71 for (i
= 0; i
< num
; i
++) {
74 adr
= __va((pfn
+ i
) << PAGE_SHIFT
);
78 return __va(pfn
<< PAGE_SHIFT
);
81 /* need 3 4k for initial PMD_SIZE, 3 4k for 0-ISA_END_ADDRESS */
82 #define INIT_PGT_BUF_SIZE (6 * PAGE_SIZE)
83 RESERVE_BRK(early_pgt_alloc
, INIT_PGT_BUF_SIZE
);
84 void __init
early_alloc_pgt_buf(void)
86 unsigned long tables
= INIT_PGT_BUF_SIZE
;
89 base
= __pa(extend_brk(tables
, PAGE_SIZE
));
91 pgt_buf_start
= base
>> PAGE_SHIFT
;
92 pgt_buf_end
= pgt_buf_start
;
93 pgt_buf_top
= pgt_buf_start
+ (tables
>> PAGE_SHIFT
);
99 #ifdef CONFIG_DIRECT_GBPAGES
104 static void __init
init_gbpages(void)
107 if (direct_gbpages
&& cpu_has_gbpages
)
108 printk(KERN_INFO
"Using GB pages for direct mapping\n");
117 unsigned page_size_mask
;
120 static int page_size_mask
;
122 static void __init
probe_page_size_mask(void)
126 #if !defined(CONFIG_DEBUG_PAGEALLOC) && !defined(CONFIG_KMEMCHECK)
128 * For CONFIG_DEBUG_PAGEALLOC, identity mapping will use small pages.
129 * This will simplify cpa(), which otherwise needs to support splitting
130 * large pages into small in interrupt context, etc.
133 page_size_mask
|= 1 << PG_LEVEL_1G
;
135 page_size_mask
|= 1 << PG_LEVEL_2M
;
138 /* Enable PSE if available */
140 set_in_cr4(X86_CR4_PSE
);
142 /* Enable PGE if available */
144 set_in_cr4(X86_CR4_PGE
);
145 __supported_pte_mask
|= _PAGE_GLOBAL
;
150 #define NR_RANGE_MR 3
151 #else /* CONFIG_X86_64 */
152 #define NR_RANGE_MR 5
155 static int __meminit
save_mr(struct map_range
*mr
, int nr_range
,
156 unsigned long start_pfn
, unsigned long end_pfn
,
157 unsigned long page_size_mask
)
159 if (start_pfn
< end_pfn
) {
160 if (nr_range
>= NR_RANGE_MR
)
161 panic("run out of range for init_memory_mapping\n");
162 mr
[nr_range
].start
= start_pfn
<<PAGE_SHIFT
;
163 mr
[nr_range
].end
= end_pfn
<<PAGE_SHIFT
;
164 mr
[nr_range
].page_size_mask
= page_size_mask
;
172 * adjust the page_size_mask for small range to go with
173 * big page size instead small one if nearby are ram too.
175 static void __init_refok
adjust_range_page_size_mask(struct map_range
*mr
,
180 for (i
= 0; i
< nr_range
; i
++) {
181 if ((page_size_mask
& (1<<PG_LEVEL_2M
)) &&
182 !(mr
[i
].page_size_mask
& (1<<PG_LEVEL_2M
))) {
183 unsigned long start
= round_down(mr
[i
].start
, PMD_SIZE
);
184 unsigned long end
= round_up(mr
[i
].end
, PMD_SIZE
);
187 if ((end
>> PAGE_SHIFT
) > max_low_pfn
)
191 if (memblock_is_region_memory(start
, end
- start
))
192 mr
[i
].page_size_mask
|= 1<<PG_LEVEL_2M
;
194 if ((page_size_mask
& (1<<PG_LEVEL_1G
)) &&
195 !(mr
[i
].page_size_mask
& (1<<PG_LEVEL_1G
))) {
196 unsigned long start
= round_down(mr
[i
].start
, PUD_SIZE
);
197 unsigned long end
= round_up(mr
[i
].end
, PUD_SIZE
);
199 if (memblock_is_region_memory(start
, end
- start
))
200 mr
[i
].page_size_mask
|= 1<<PG_LEVEL_1G
;
205 static int __meminit
split_mem_range(struct map_range
*mr
, int nr_range
,
209 unsigned long start_pfn
, end_pfn
, limit_pfn
;
213 limit_pfn
= PFN_DOWN(end
);
215 /* head if not big page alignment ? */
216 pfn
= start_pfn
= PFN_DOWN(start
);
219 * Don't use a large page for the first 2/4MB of memory
220 * because there are often fixed size MTRRs in there
221 * and overlapping MTRRs into large pages can cause
225 end_pfn
= PFN_DOWN(PMD_SIZE
);
227 end_pfn
= round_up(pfn
, PFN_DOWN(PMD_SIZE
));
228 #else /* CONFIG_X86_64 */
229 end_pfn
= round_up(pfn
, PFN_DOWN(PMD_SIZE
));
231 if (end_pfn
> limit_pfn
)
233 if (start_pfn
< end_pfn
) {
234 nr_range
= save_mr(mr
, nr_range
, start_pfn
, end_pfn
, 0);
238 /* big page (2M) range */
239 start_pfn
= round_up(pfn
, PFN_DOWN(PMD_SIZE
));
241 end_pfn
= round_down(limit_pfn
, PFN_DOWN(PMD_SIZE
));
242 #else /* CONFIG_X86_64 */
243 end_pfn
= round_up(pfn
, PFN_DOWN(PUD_SIZE
));
244 if (end_pfn
> round_down(limit_pfn
, PFN_DOWN(PMD_SIZE
)))
245 end_pfn
= round_down(limit_pfn
, PFN_DOWN(PMD_SIZE
));
248 if (start_pfn
< end_pfn
) {
249 nr_range
= save_mr(mr
, nr_range
, start_pfn
, end_pfn
,
250 page_size_mask
& (1<<PG_LEVEL_2M
));
255 /* big page (1G) range */
256 start_pfn
= round_up(pfn
, PFN_DOWN(PUD_SIZE
));
257 end_pfn
= round_down(limit_pfn
, PFN_DOWN(PUD_SIZE
));
258 if (start_pfn
< end_pfn
) {
259 nr_range
= save_mr(mr
, nr_range
, start_pfn
, end_pfn
,
261 ((1<<PG_LEVEL_2M
)|(1<<PG_LEVEL_1G
)));
265 /* tail is not big page (1G) alignment */
266 start_pfn
= round_up(pfn
, PFN_DOWN(PMD_SIZE
));
267 end_pfn
= round_down(limit_pfn
, PFN_DOWN(PMD_SIZE
));
268 if (start_pfn
< end_pfn
) {
269 nr_range
= save_mr(mr
, nr_range
, start_pfn
, end_pfn
,
270 page_size_mask
& (1<<PG_LEVEL_2M
));
275 /* tail is not big page (2M) alignment */
278 nr_range
= save_mr(mr
, nr_range
, start_pfn
, end_pfn
, 0);
281 adjust_range_page_size_mask(mr
, nr_range
);
283 /* try to merge same page size and continuous */
284 for (i
= 0; nr_range
> 1 && i
< nr_range
- 1; i
++) {
285 unsigned long old_start
;
286 if (mr
[i
].end
!= mr
[i
+1].start
||
287 mr
[i
].page_size_mask
!= mr
[i
+1].page_size_mask
)
290 old_start
= mr
[i
].start
;
291 memmove(&mr
[i
], &mr
[i
+1],
292 (nr_range
- 1 - i
) * sizeof(struct map_range
));
293 mr
[i
--].start
= old_start
;
297 for (i
= 0; i
< nr_range
; i
++)
298 printk(KERN_DEBUG
" [mem %#010lx-%#010lx] page %s\n",
299 mr
[i
].start
, mr
[i
].end
- 1,
300 (mr
[i
].page_size_mask
& (1<<PG_LEVEL_1G
))?"1G":(
301 (mr
[i
].page_size_mask
& (1<<PG_LEVEL_2M
))?"2M":"4k"));
306 struct range pfn_mapped
[E820_X_MAX
];
309 static void add_pfn_range_mapped(unsigned long start_pfn
, unsigned long end_pfn
)
311 nr_pfn_mapped
= add_range_with_merge(pfn_mapped
, E820_X_MAX
,
312 nr_pfn_mapped
, start_pfn
, end_pfn
);
313 nr_pfn_mapped
= clean_sort_range(pfn_mapped
, E820_X_MAX
);
315 max_pfn_mapped
= max(max_pfn_mapped
, end_pfn
);
317 if (start_pfn
< (1UL<<(32-PAGE_SHIFT
)))
318 max_low_pfn_mapped
= max(max_low_pfn_mapped
,
319 min(end_pfn
, 1UL<<(32-PAGE_SHIFT
)));
322 bool pfn_range_is_mapped(unsigned long start_pfn
, unsigned long end_pfn
)
326 for (i
= 0; i
< nr_pfn_mapped
; i
++)
327 if ((start_pfn
>= pfn_mapped
[i
].start
) &&
328 (end_pfn
<= pfn_mapped
[i
].end
))
335 * Setup the direct mapping of the physical memory at PAGE_OFFSET.
336 * This runs before bootmem is initialized and gets pages directly from
337 * the physical memory. To access them they are temporarily mapped.
339 unsigned long __init_refok
init_memory_mapping(unsigned long start
,
342 struct map_range mr
[NR_RANGE_MR
];
343 unsigned long ret
= 0;
346 pr_info("init_memory_mapping: [mem %#010lx-%#010lx]\n",
349 memset(mr
, 0, sizeof(mr
));
350 nr_range
= split_mem_range(mr
, 0, start
, end
);
352 for (i
= 0; i
< nr_range
; i
++)
353 ret
= kernel_physical_mapping_init(mr
[i
].start
, mr
[i
].end
,
354 mr
[i
].page_size_mask
);
356 add_pfn_range_mapped(start
>> PAGE_SHIFT
, ret
>> PAGE_SHIFT
);
358 return ret
>> PAGE_SHIFT
;
362 * We need to iterate through the E820 memory map and create direct mappings
363 * for only E820_RAM and E820_KERN_RESERVED regions. We cannot simply
364 * create direct mappings for all pfns from [0 to max_low_pfn) and
365 * [4GB to max_pfn) because of possible memory holes in high addresses
366 * that cannot be marked as UC by fixed/variable range MTRRs.
367 * Depending on the alignment of E820 ranges, this may possibly result
368 * in using smaller size (i.e. 4K instead of 2M or 1G) page tables.
370 * init_mem_mapping() calls init_range_memory_mapping() with big range.
371 * That range would have hole in the middle or ends, and only ram parts
372 * will be mapped in init_range_memory_mapping().
374 static unsigned long __init
init_range_memory_mapping(
375 unsigned long r_start
,
378 unsigned long start_pfn
, end_pfn
;
379 unsigned long mapped_ram_size
= 0;
382 for_each_mem_pfn_range(i
, MAX_NUMNODES
, &start_pfn
, &end_pfn
, NULL
) {
383 u64 start
= clamp_val(PFN_PHYS(start_pfn
), r_start
, r_end
);
384 u64 end
= clamp_val(PFN_PHYS(end_pfn
), r_start
, r_end
);
389 * if it is overlapping with brk pgt, we need to
390 * alloc pgt buf from memblock instead.
392 can_use_brk_pgt
= max(start
, (u64
)pgt_buf_end
<<PAGE_SHIFT
) >=
393 min(end
, (u64
)pgt_buf_top
<<PAGE_SHIFT
);
394 init_memory_mapping(start
, end
);
395 mapped_ram_size
+= end
- start
;
396 can_use_brk_pgt
= true;
399 return mapped_ram_size
;
402 static unsigned long __init
get_new_step_size(unsigned long step_size
)
405 * Explain why we shift by 5 and why we don't have to worry about
406 * 'step_size << 5' overflowing:
408 * initial mapped size is PMD_SIZE (2M).
409 * We can not set step_size to be PUD_SIZE (1G) yet.
410 * In worse case, when we cross the 1G boundary, and
411 * PG_LEVEL_2M is not set, we will need 1+1+512 pages (2M + 8k)
412 * to map 1G range with PTE. Use 5 as shift for now.
414 * Don't need to worry about overflow, on 32bit, when step_size
415 * is 0, round_down() returns 0 for start, and that turns it
416 * into 0x100000000ULL.
418 return step_size
<< 5;
422 * memory_map_top_down - Map [map_start, map_end) top down
423 * @map_start: start address of the target memory range
424 * @map_end: end address of the target memory range
426 * This function will setup direct mapping for memory range
427 * [map_start, map_end) in top-down. That said, the page tables
428 * will be allocated at the end of the memory, and we map the
429 * memory in top-down.
431 static void __init
memory_map_top_down(unsigned long map_start
,
432 unsigned long map_end
)
434 unsigned long real_end
, start
, last_start
;
435 unsigned long step_size
;
437 unsigned long mapped_ram_size
= 0;
438 unsigned long new_mapped_ram_size
;
440 /* xen has big range in reserved near end of ram, skip it at first.*/
441 addr
= memblock_find_in_range(map_start
, map_end
, PMD_SIZE
, PMD_SIZE
);
442 real_end
= addr
+ PMD_SIZE
;
444 /* step_size need to be small so pgt_buf from BRK could cover it */
445 step_size
= PMD_SIZE
;
446 max_pfn_mapped
= 0; /* will get exact value next */
447 min_pfn_mapped
= real_end
>> PAGE_SHIFT
;
448 last_start
= start
= real_end
;
451 * We start from the top (end of memory) and go to the bottom.
452 * The memblock_find_in_range() gets us a block of RAM from the
453 * end of RAM in [min_pfn_mapped, max_pfn_mapped) used as new pages
456 while (last_start
> map_start
) {
457 if (last_start
> step_size
) {
458 start
= round_down(last_start
- 1, step_size
);
459 if (start
< map_start
)
463 new_mapped_ram_size
= init_range_memory_mapping(start
,
466 min_pfn_mapped
= last_start
>> PAGE_SHIFT
;
467 /* only increase step_size after big range get mapped */
468 if (new_mapped_ram_size
> mapped_ram_size
)
469 step_size
= get_new_step_size(step_size
);
470 mapped_ram_size
+= new_mapped_ram_size
;
473 if (real_end
< map_end
)
474 init_range_memory_mapping(real_end
, map_end
);
478 * memory_map_bottom_up - Map [map_start, map_end) bottom up
479 * @map_start: start address of the target memory range
480 * @map_end: end address of the target memory range
482 * This function will setup direct mapping for memory range
483 * [map_start, map_end) in bottom-up. Since we have limited the
484 * bottom-up allocation above the kernel, the page tables will
485 * be allocated just above the kernel and we map the memory
486 * in [map_start, map_end) in bottom-up.
488 static void __init
memory_map_bottom_up(unsigned long map_start
,
489 unsigned long map_end
)
491 unsigned long next
, new_mapped_ram_size
, start
;
492 unsigned long mapped_ram_size
= 0;
493 /* step_size need to be small so pgt_buf from BRK could cover it */
494 unsigned long step_size
= PMD_SIZE
;
497 min_pfn_mapped
= start
>> PAGE_SHIFT
;
500 * We start from the bottom (@map_start) and go to the top (@map_end).
501 * The memblock_find_in_range() gets us a block of RAM from the
502 * end of RAM in [min_pfn_mapped, max_pfn_mapped) used as new pages
505 while (start
< map_end
) {
506 if (map_end
- start
> step_size
) {
507 next
= round_up(start
+ 1, step_size
);
513 new_mapped_ram_size
= init_range_memory_mapping(start
, next
);
516 if (new_mapped_ram_size
> mapped_ram_size
)
517 step_size
= get_new_step_size(step_size
);
518 mapped_ram_size
+= new_mapped_ram_size
;
522 void __init
init_mem_mapping(void)
526 probe_page_size_mask();
529 end
= max_pfn
<< PAGE_SHIFT
;
531 end
= max_low_pfn
<< PAGE_SHIFT
;
534 /* the ISA range is always mapped regardless of memory holes */
535 init_memory_mapping(0, ISA_END_ADDRESS
);
538 * If the allocation is in bottom-up direction, we setup direct mapping
539 * in bottom-up, otherwise we setup direct mapping in top-down.
541 if (memblock_bottom_up()) {
542 unsigned long kernel_end
= __pa_symbol(_end
);
545 * we need two separate calls here. This is because we want to
546 * allocate page tables above the kernel. So we first map
547 * [kernel_end, end) to make memory above the kernel be mapped
548 * as soon as possible. And then use page tables allocated above
549 * the kernel to map [ISA_END_ADDRESS, kernel_end).
551 memory_map_bottom_up(kernel_end
, end
);
552 memory_map_bottom_up(ISA_END_ADDRESS
, kernel_end
);
554 memory_map_top_down(ISA_END_ADDRESS
, end
);
558 if (max_pfn
> max_low_pfn
) {
559 /* can we preseve max_low_pfn ?*/
560 max_low_pfn
= max_pfn
;
563 early_ioremap_page_table_range_init();
566 load_cr3(swapper_pg_dir
);
569 early_memtest(0, max_pfn_mapped
<< PAGE_SHIFT
);
573 * devmem_is_allowed() checks to see if /dev/mem access to a certain address
574 * is valid. The argument is a physical page number.
577 * On x86, access has to be given to the first megabyte of ram because that area
578 * contains bios code and data regions used by X and dosemu and similar apps.
579 * Access has to be given to non-kernel-ram areas as well, these contain the PCI
580 * mmio resources as well as potential bios/acpi data regions.
582 int devmem_is_allowed(unsigned long pagenr
)
586 if (iomem_is_exclusive(pagenr
<< PAGE_SHIFT
))
588 if (!page_is_ram(pagenr
))
593 void free_init_pages(char *what
, unsigned long begin
, unsigned long end
)
595 unsigned long begin_aligned
, end_aligned
;
597 /* Make sure boundaries are page aligned */
598 begin_aligned
= PAGE_ALIGN(begin
);
599 end_aligned
= end
& PAGE_MASK
;
601 if (WARN_ON(begin_aligned
!= begin
|| end_aligned
!= end
)) {
602 begin
= begin_aligned
;
610 * If debugging page accesses then do not free this memory but
611 * mark them not present - any buggy init-section access will
612 * create a kernel page fault:
614 #ifdef CONFIG_DEBUG_PAGEALLOC
615 printk(KERN_INFO
"debug: unmapping init [mem %#010lx-%#010lx]\n",
617 set_memory_np(begin
, (end
- begin
) >> PAGE_SHIFT
);
620 * We just marked the kernel text read only above, now that
621 * we are going to free part of that, we need to make that
622 * writeable and non-executable first.
624 set_memory_nx(begin
, (end
- begin
) >> PAGE_SHIFT
);
625 set_memory_rw(begin
, (end
- begin
) >> PAGE_SHIFT
);
627 free_reserved_area((void *)begin
, (void *)end
, POISON_FREE_INITMEM
, what
);
631 void free_initmem(void)
633 free_init_pages("unused kernel",
634 (unsigned long)(&__init_begin
),
635 (unsigned long)(&__init_end
));
638 #ifdef CONFIG_BLK_DEV_INITRD
639 void __init
free_initrd_mem(unsigned long start
, unsigned long end
)
641 #ifdef CONFIG_MICROCODE_EARLY
643 * Remember, initrd memory may contain microcode or other useful things.
644 * Before we lose initrd mem, we need to find a place to hold them
645 * now that normal virtual memory is enabled.
647 save_microcode_in_initrd();
651 * end could be not aligned, and We can not align that,
652 * decompresser could be confused by aligned initrd_end
653 * We already reserve the end partial page before in
654 * - i386_start_kernel()
655 * - x86_64_start_kernel()
656 * - relocate_initrd()
657 * So here We can do PAGE_ALIGN() safely to get partial page to be freed
659 free_init_pages("initrd", start
, PAGE_ALIGN(end
));
663 void __init
zone_sizes_init(void)
665 unsigned long max_zone_pfns
[MAX_NR_ZONES
];
667 memset(max_zone_pfns
, 0, sizeof(max_zone_pfns
));
669 #ifdef CONFIG_ZONE_DMA
670 max_zone_pfns
[ZONE_DMA
] = MAX_DMA_PFN
;
672 #ifdef CONFIG_ZONE_DMA32
673 max_zone_pfns
[ZONE_DMA32
] = MAX_DMA32_PFN
;
675 max_zone_pfns
[ZONE_NORMAL
] = max_low_pfn
;
676 #ifdef CONFIG_HIGHMEM
677 max_zone_pfns
[ZONE_HIGHMEM
] = max_pfn
;
680 free_area_init_nodes(max_zone_pfns
);