2 * Initialize MMU support.
4 * Copyright (C) 1998-2003 Hewlett-Packard Co
5 * David Mosberger-Tang <davidm@hpl.hp.com>
7 #include <linux/config.h>
8 #include <linux/kernel.h>
9 #include <linux/init.h>
11 #include <linux/bootmem.h>
12 #include <linux/efi.h>
13 #include <linux/elf.h>
15 #include <linux/mmzone.h>
16 #include <linux/module.h>
17 #include <linux/personality.h>
18 #include <linux/reboot.h>
19 #include <linux/slab.h>
20 #include <linux/swap.h>
21 #include <linux/proc_fs.h>
22 #include <linux/bitops.h>
24 #include <asm/a.out.h>
28 #include <asm/machvec.h>
30 #include <asm/patch.h>
31 #include <asm/pgalloc.h>
33 #include <asm/sections.h>
34 #include <asm/system.h>
36 #include <asm/uaccess.h>
37 #include <asm/unistd.h>
40 DEFINE_PER_CPU(struct mmu_gather
, mmu_gathers
);
42 extern void ia64_tlb_init (void);
44 unsigned long MAX_DMA_ADDRESS
= PAGE_OFFSET
+ 0x100000000UL
;
46 #ifdef CONFIG_VIRTUAL_MEM_MAP
47 unsigned long vmalloc_end
= VMALLOC_END_INIT
;
48 EXPORT_SYMBOL(vmalloc_end
);
49 struct page
*vmem_map
;
50 EXPORT_SYMBOL(vmem_map
);
53 static int pgt_cache_water
[2] = { 25, 50 };
55 struct page
*zero_page_memmap_ptr
; /* map entry for zero page */
56 EXPORT_SYMBOL(zero_page_memmap_ptr
);
59 check_pgt_cache (void)
63 low
= pgt_cache_water
[0];
64 high
= pgt_cache_water
[1];
67 if (pgtable_cache_size
> (u64
) high
) {
70 free_page((unsigned long)pgd_alloc_one_fast(NULL
));
72 free_page((unsigned long)pmd_alloc_one_fast(NULL
, 0));
73 } while (pgtable_cache_size
> (u64
) low
);
79 lazy_mmu_prot_update (pte_t pte
)
85 return; /* not an executable page... */
88 addr
= (unsigned long) page_address(page
);
90 if (test_bit(PG_arch_1
, &page
->flags
))
91 return; /* i-cache is already coherent with d-cache */
93 flush_icache_range(addr
, addr
+ PAGE_SIZE
);
94 set_bit(PG_arch_1
, &page
->flags
); /* mark page as clean */
98 ia64_set_rbs_bot (void)
100 unsigned long stack_size
= current
->signal
->rlim
[RLIMIT_STACK
].rlim_max
& -16;
102 if (stack_size
> MAX_USER_STACK_SIZE
)
103 stack_size
= MAX_USER_STACK_SIZE
;
104 current
->thread
.rbs_bot
= STACK_TOP
- stack_size
;
108 * This performs some platform-dependent address space initialization.
109 * On IA-64, we want to setup the VM area for the register backing
110 * store (which grows upwards) and install the gateway page which is
111 * used for signal trampolines, etc.
114 ia64_init_addr_space (void)
116 struct vm_area_struct
*vma
;
121 * If we're out of memory and kmem_cache_alloc() returns NULL, we simply ignore
122 * the problem. When the process attempts to write to the register backing store
123 * for the first time, it will get a SEGFAULT in this case.
125 vma
= kmem_cache_alloc(vm_area_cachep
, SLAB_KERNEL
);
127 memset(vma
, 0, sizeof(*vma
));
128 vma
->vm_mm
= current
->mm
;
129 vma
->vm_start
= current
->thread
.rbs_bot
& PAGE_MASK
;
130 vma
->vm_end
= vma
->vm_start
+ PAGE_SIZE
;
131 vma
->vm_page_prot
= protection_map
[VM_DATA_DEFAULT_FLAGS
& 0x7];
132 vma
->vm_flags
= VM_DATA_DEFAULT_FLAGS
| VM_GROWSUP
;
133 down_write(¤t
->mm
->mmap_sem
);
134 if (insert_vm_struct(current
->mm
, vma
)) {
135 up_write(¤t
->mm
->mmap_sem
);
136 kmem_cache_free(vm_area_cachep
, vma
);
139 up_write(¤t
->mm
->mmap_sem
);
142 /* map NaT-page at address zero to speed up speculative dereferencing of NULL: */
143 if (!(current
->personality
& MMAP_PAGE_ZERO
)) {
144 vma
= kmem_cache_alloc(vm_area_cachep
, SLAB_KERNEL
);
146 memset(vma
, 0, sizeof(*vma
));
147 vma
->vm_mm
= current
->mm
;
148 vma
->vm_end
= PAGE_SIZE
;
149 vma
->vm_page_prot
= __pgprot(pgprot_val(PAGE_READONLY
) | _PAGE_MA_NAT
);
150 vma
->vm_flags
= VM_READ
| VM_MAYREAD
| VM_IO
| VM_RESERVED
;
151 down_write(¤t
->mm
->mmap_sem
);
152 if (insert_vm_struct(current
->mm
, vma
)) {
153 up_write(¤t
->mm
->mmap_sem
);
154 kmem_cache_free(vm_area_cachep
, vma
);
157 up_write(¤t
->mm
->mmap_sem
);
165 unsigned long addr
, eaddr
;
167 addr
= (unsigned long) ia64_imva(__init_begin
);
168 eaddr
= (unsigned long) ia64_imva(__init_end
);
169 while (addr
< eaddr
) {
170 ClearPageReserved(virt_to_page(addr
));
171 set_page_count(virt_to_page(addr
), 1);
176 printk(KERN_INFO
"Freeing unused kernel memory: %ldkB freed\n",
177 (__init_end
- __init_begin
) >> 10);
181 free_initrd_mem (unsigned long start
, unsigned long end
)
185 * EFI uses 4KB pages while the kernel can use 4KB or bigger.
186 * Thus EFI and the kernel may have different page sizes. It is
187 * therefore possible to have the initrd share the same page as
188 * the end of the kernel (given current setup).
190 * To avoid freeing/using the wrong page (kernel sized) we:
191 * - align up the beginning of initrd
192 * - align down the end of initrd
195 * |=============| a000
201 * |=============| 8000
204 * |/////////////| 7000
207 * |=============| 6000
210 * K=kernel using 8KB pages
212 * In this example, we must free page 8000 ONLY. So we must align up
213 * initrd_start and keep initrd_end as is.
215 start
= PAGE_ALIGN(start
);
216 end
= end
& PAGE_MASK
;
219 printk(KERN_INFO
"Freeing initrd memory: %ldkB freed\n", (end
- start
) >> 10);
221 for (; start
< end
; start
+= PAGE_SIZE
) {
222 if (!virt_addr_valid(start
))
224 page
= virt_to_page(start
);
225 ClearPageReserved(page
);
226 set_page_count(page
, 1);
233 * This installs a clean page in the kernel's page table.
236 put_kernel_page (struct page
*page
, unsigned long address
, pgprot_t pgprot
)
243 if (!PageReserved(page
))
244 printk(KERN_ERR
"put_kernel_page: page at 0x%p not in reserved memory\n",
247 pgd
= pgd_offset_k(address
); /* note: this is NOT pgd_offset()! */
249 spin_lock(&init_mm
.page_table_lock
);
251 pud
= pud_alloc(&init_mm
, pgd
, address
);
255 pmd
= pmd_alloc(&init_mm
, pud
, address
);
258 pte
= pte_alloc_map(&init_mm
, pmd
, address
);
261 if (!pte_none(*pte
)) {
265 set_pte(pte
, mk_pte(page
, pgprot
));
268 out
: spin_unlock(&init_mm
.page_table_lock
);
269 /* no need for flush_tlb */
279 * Map the gate page twice: once read-only to export the ELF headers etc. and once
280 * execute-only page to enable privilege-promotion via "epc":
282 page
= virt_to_page(ia64_imva(__start_gate_section
));
283 put_kernel_page(page
, GATE_ADDR
, PAGE_READONLY
);
284 #ifdef HAVE_BUGGY_SEGREL
285 page
= virt_to_page(ia64_imva(__start_gate_section
+ PAGE_SIZE
));
286 put_kernel_page(page
, GATE_ADDR
+ PAGE_SIZE
, PAGE_GATE
);
288 put_kernel_page(page
, GATE_ADDR
+ PERCPU_PAGE_SIZE
, PAGE_GATE
);
294 ia64_mmu_init (void *my_cpu_data
)
296 unsigned long psr
, pta
, impl_va_bits
;
297 extern void __devinit
tlb_init (void);
299 #ifdef CONFIG_DISABLE_VHPT
300 # define VHPT_ENABLE_BIT 0
302 # define VHPT_ENABLE_BIT 1
305 /* Pin mapping for percpu area into TLB */
306 psr
= ia64_clear_ic();
307 ia64_itr(0x2, IA64_TR_PERCPU_DATA
, PERCPU_ADDR
,
308 pte_val(pfn_pte(__pa(my_cpu_data
) >> PAGE_SHIFT
, PAGE_KERNEL
)),
315 * Check if the virtually mapped linear page table (VMLPT) overlaps with a mapped
316 * address space. The IA-64 architecture guarantees that at least 50 bits of
317 * virtual address space are implemented but if we pick a large enough page size
318 * (e.g., 64KB), the mapped address space is big enough that it will overlap with
319 * VMLPT. I assume that once we run on machines big enough to warrant 64KB pages,
320 * IMPL_VA_MSB will be significantly bigger, so this is unlikely to become a
321 * problem in practice. Alternatively, we could truncate the top of the mapped
322 * address space to not permit mappings that would overlap with the VMLPT.
326 # define mapped_space_bits (3*(PAGE_SHIFT - pte_bits) + PAGE_SHIFT)
328 * The virtual page table has to cover the entire implemented address space within
329 * a region even though not all of this space may be mappable. The reason for
330 * this is that the Access bit and Dirty bit fault handlers perform
331 * non-speculative accesses to the virtual page table, so the address range of the
332 * virtual page table itself needs to be covered by virtual page table.
334 # define vmlpt_bits (impl_va_bits - PAGE_SHIFT + pte_bits)
335 # define POW2(n) (1ULL << (n))
337 impl_va_bits
= ffz(~(local_cpu_data
->unimpl_va_mask
| (7UL << 61)));
339 if (impl_va_bits
< 51 || impl_va_bits
> 61)
340 panic("CPU has bogus IMPL_VA_MSB value of %lu!\n", impl_va_bits
- 1);
342 /* place the VMLPT at the end of each page-table mapped region: */
343 pta
= POW2(61) - POW2(vmlpt_bits
);
345 if (POW2(mapped_space_bits
) >= pta
)
346 panic("mm/init: overlap between virtually mapped linear page table and "
347 "mapped kernel space!");
349 * Set the (virtually mapped linear) page table address. Bit
350 * 8 selects between the short and long format, bits 2-7 the
351 * size of the table, and bit 0 whether the VHPT walker is
354 ia64_set_pta(pta
| (0 << 8) | (vmlpt_bits
<< 2) | VHPT_ENABLE_BIT
);
358 #ifdef CONFIG_HUGETLB_PAGE
359 ia64_set_rr(HPAGE_REGION_BASE
, HPAGE_SHIFT
<< 2);
364 #ifdef CONFIG_VIRTUAL_MEM_MAP
367 create_mem_map_page_table (u64 start
, u64 end
, void *arg
)
369 unsigned long address
, start_page
, end_page
;
370 struct page
*map_start
, *map_end
;
377 map_start
= vmem_map
+ (__pa(start
) >> PAGE_SHIFT
);
378 map_end
= vmem_map
+ (__pa(end
) >> PAGE_SHIFT
);
380 start_page
= (unsigned long) map_start
& PAGE_MASK
;
381 end_page
= PAGE_ALIGN((unsigned long) map_end
);
382 node
= paddr_to_nid(__pa(start
));
384 for (address
= start_page
; address
< end_page
; address
+= PAGE_SIZE
) {
385 pgd
= pgd_offset_k(address
);
387 pgd_populate(&init_mm
, pgd
, alloc_bootmem_pages_node(NODE_DATA(node
), PAGE_SIZE
));
388 pud
= pud_offset(pgd
, address
);
391 pud_populate(&init_mm
, pud
, alloc_bootmem_pages_node(NODE_DATA(node
), PAGE_SIZE
));
392 pmd
= pmd_offset(pud
, address
);
395 pmd_populate_kernel(&init_mm
, pmd
, alloc_bootmem_pages_node(NODE_DATA(node
), PAGE_SIZE
));
396 pte
= pte_offset_kernel(pmd
, address
);
399 set_pte(pte
, pfn_pte(__pa(alloc_bootmem_pages_node(NODE_DATA(node
), PAGE_SIZE
)) >> PAGE_SHIFT
,
405 struct memmap_init_callback_data
{
413 virtual_memmap_init (u64 start
, u64 end
, void *arg
)
415 struct memmap_init_callback_data
*args
;
416 struct page
*map_start
, *map_end
;
418 args
= (struct memmap_init_callback_data
*) arg
;
419 map_start
= vmem_map
+ (__pa(start
) >> PAGE_SHIFT
);
420 map_end
= vmem_map
+ (__pa(end
) >> PAGE_SHIFT
);
422 if (map_start
< args
->start
)
423 map_start
= args
->start
;
424 if (map_end
> args
->end
)
428 * We have to initialize "out of bounds" struct page elements that fit completely
429 * on the same pages that were allocated for the "in bounds" elements because they
430 * may be referenced later (and found to be "reserved").
432 map_start
-= ((unsigned long) map_start
& (PAGE_SIZE
- 1)) / sizeof(struct page
);
433 map_end
+= ((PAGE_ALIGN((unsigned long) map_end
) - (unsigned long) map_end
)
434 / sizeof(struct page
));
436 if (map_start
< map_end
)
437 memmap_init_zone((unsigned long)(map_end
- map_start
),
438 args
->nid
, args
->zone
, page_to_pfn(map_start
));
443 memmap_init (unsigned long size
, int nid
, unsigned long zone
,
444 unsigned long start_pfn
)
447 memmap_init_zone(size
, nid
, zone
, start_pfn
);
450 struct memmap_init_callback_data args
;
452 start
= pfn_to_page(start_pfn
);
454 args
.end
= start
+ size
;
458 efi_memmap_walk(virtual_memmap_init
, &args
);
463 ia64_pfn_valid (unsigned long pfn
)
466 struct page
*pg
= pfn_to_page(pfn
);
468 return (__get_user(byte
, (char __user
*) pg
) == 0)
469 && ((((u64
)pg
& PAGE_MASK
) == (((u64
)(pg
+ 1) - 1) & PAGE_MASK
))
470 || (__get_user(byte
, (char __user
*) (pg
+ 1) - 1) == 0));
472 EXPORT_SYMBOL(ia64_pfn_valid
);
475 find_largest_hole (u64 start
, u64 end
, void *arg
)
479 static u64 last_end
= PAGE_OFFSET
;
481 /* NOTE: this algorithm assumes efi memmap table is ordered */
483 if (*max_gap
< (start
- last_end
))
484 *max_gap
= start
- last_end
;
488 #endif /* CONFIG_VIRTUAL_MEM_MAP */
491 count_reserved_pages (u64 start
, u64 end
, void *arg
)
493 unsigned long num_reserved
= 0;
494 unsigned long *count
= arg
;
496 for (; start
< end
; start
+= PAGE_SIZE
)
497 if (PageReserved(virt_to_page(start
)))
499 *count
+= num_reserved
;
504 * Boot command-line option "nolwsys" can be used to disable the use of any light-weight
505 * system call handler. When this option is in effect, all fsyscalls will end up bubbling
506 * down into the kernel and calling the normal (heavy-weight) syscall handler. This is
507 * useful for performance testing, but conceivably could also come in handy for debugging
514 nolwsys_setup (char *s
)
520 __setup("nolwsys", nolwsys_setup
);
525 long reserved_pages
, codesize
, datasize
, initsize
;
526 unsigned long num_pgt_pages
;
529 static struct kcore_list kcore_mem
, kcore_vmem
, kcore_kernel
;
533 * This needs to be called _after_ the command line has been parsed but _before_
534 * any drivers that may need the PCI DMA interface are initialized or bootmem has
540 #ifndef CONFIG_DISCONTIGMEM
543 max_mapnr
= max_low_pfn
;
546 high_memory
= __va(max_low_pfn
* PAGE_SIZE
);
548 kclist_add(&kcore_mem
, __va(0), max_low_pfn
* PAGE_SIZE
);
549 kclist_add(&kcore_vmem
, (void *)VMALLOC_START
, VMALLOC_END
-VMALLOC_START
);
550 kclist_add(&kcore_kernel
, _stext
, _end
- _stext
);
552 for_each_pgdat(pgdat
)
553 totalram_pages
+= free_all_bootmem_node(pgdat
);
556 efi_memmap_walk(count_reserved_pages
, &reserved_pages
);
558 codesize
= (unsigned long) _etext
- (unsigned long) _stext
;
559 datasize
= (unsigned long) _edata
- (unsigned long) _etext
;
560 initsize
= (unsigned long) __init_end
- (unsigned long) __init_begin
;
562 printk(KERN_INFO
"Memory: %luk/%luk available (%luk code, %luk reserved, "
563 "%luk data, %luk init)\n", (unsigned long) nr_free_pages() << (PAGE_SHIFT
- 10),
564 num_physpages
<< (PAGE_SHIFT
- 10), codesize
>> 10,
565 reserved_pages
<< (PAGE_SHIFT
- 10), datasize
>> 10, initsize
>> 10);
568 * Allow for enough (cached) page table pages so that we can map the entire memory
569 * at least once. Each task also needs a couple of page tables pages, so add in a
570 * fudge factor for that (don't use "threads-max" here; that would be wrong!).
571 * Don't allow the cache to be more than 10% of total memory, though.
573 # define NUM_TASKS 500 /* typical number of tasks */
574 num_pgt_pages
= nr_free_pages() / PTRS_PER_PGD
+ NUM_TASKS
;
575 if (num_pgt_pages
> nr_free_pages() / 10)
576 num_pgt_pages
= nr_free_pages() / 10;
577 if (num_pgt_pages
> (u64
) pgt_cache_water
[1])
578 pgt_cache_water
[1] = num_pgt_pages
;
581 * For fsyscall entrpoints with no light-weight handler, use the ordinary
582 * (heavy-weight) handler, but mark it by setting bit 0, so the fsyscall entry
583 * code can tell them apart.
585 for (i
= 0; i
< NR_syscalls
; ++i
) {
586 extern unsigned long fsyscall_table
[NR_syscalls
];
587 extern unsigned long sys_call_table
[NR_syscalls
];
589 if (!fsyscall_table
[i
] || nolwsys
)
590 fsyscall_table
[i
] = sys_call_table
[i
] | 1;
594 #ifdef CONFIG_IA32_SUPPORT