]>
Commit | Line | Data |
---|---|---|
1da177e4 | 1 | /* |
1da177e4 LT |
2 | * |
3 | * Copyright (C) 1995 Linus Torvalds | |
4 | * | |
5 | * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 | |
6 | */ | |
7 | ||
1da177e4 LT |
8 | #include <linux/module.h> |
9 | #include <linux/signal.h> | |
10 | #include <linux/sched.h> | |
11 | #include <linux/kernel.h> | |
12 | #include <linux/errno.h> | |
13 | #include <linux/string.h> | |
14 | #include <linux/types.h> | |
15 | #include <linux/ptrace.h> | |
16 | #include <linux/mman.h> | |
17 | #include <linux/mm.h> | |
18 | #include <linux/hugetlb.h> | |
19 | #include <linux/swap.h> | |
20 | #include <linux/smp.h> | |
21 | #include <linux/init.h> | |
22 | #include <linux/highmem.h> | |
23 | #include <linux/pagemap.h> | |
6fb14755 | 24 | #include <linux/pfn.h> |
c9cf5528 | 25 | #include <linux/poison.h> |
1da177e4 LT |
26 | #include <linux/bootmem.h> |
27 | #include <linux/slab.h> | |
28 | #include <linux/proc_fs.h> | |
05039b92 | 29 | #include <linux/memory_hotplug.h> |
27d99f7e | 30 | #include <linux/initrd.h> |
55b2355e | 31 | #include <linux/cpumask.h> |
1da177e4 | 32 | |
f832ff18 | 33 | #include <asm/asm.h> |
1da177e4 LT |
34 | #include <asm/processor.h> |
35 | #include <asm/system.h> | |
36 | #include <asm/uaccess.h> | |
37 | #include <asm/pgtable.h> | |
38 | #include <asm/dma.h> | |
39 | #include <asm/fixmap.h> | |
40 | #include <asm/e820.h> | |
41 | #include <asm/apic.h> | |
8550eb99 | 42 | #include <asm/bugs.h> |
1da177e4 LT |
43 | #include <asm/tlb.h> |
44 | #include <asm/tlbflush.h> | |
a5a19c63 | 45 | #include <asm/pgalloc.h> |
1da177e4 | 46 | #include <asm/sections.h> |
b239fb25 | 47 | #include <asm/paravirt.h> |
551889a6 | 48 | #include <asm/setup.h> |
7bfeab9a | 49 | #include <asm/cacheflush.h> |
1da177e4 LT |
50 | |
51 | unsigned int __VMALLOC_RESERVE = 128 << 20; | |
52 | ||
67794292 | 53 | unsigned long max_pfn_mapped; |
7d1116a9 | 54 | |
1da177e4 LT |
55 | DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); |
56 | unsigned long highstart_pfn, highend_pfn; | |
57 | ||
8550eb99 | 58 | static noinline int do_test_wp_bit(void); |
1da177e4 LT |
59 | |
60 | /* | |
61 | * Creates a middle page table and puts a pointer to it in the | |
62 | * given global directory entry. This only returns the gd entry | |
63 | * in non-PAE compilation mode, since the middle layer is folded. | |
64 | */ | |
65 | static pmd_t * __init one_md_table_init(pgd_t *pgd) | |
66 | { | |
67 | pud_t *pud; | |
68 | pmd_t *pmd_table; | |
8550eb99 | 69 | |
1da177e4 | 70 | #ifdef CONFIG_X86_PAE |
b239fb25 JF |
71 | if (!(pgd_val(*pgd) & _PAGE_PRESENT)) { |
72 | pmd_table = (pmd_t *) alloc_bootmem_low_pages(PAGE_SIZE); | |
73 | ||
6944a9c8 | 74 | paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT); |
b239fb25 JF |
75 | set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT)); |
76 | pud = pud_offset(pgd, 0); | |
8550eb99 | 77 | BUG_ON(pmd_table != pmd_offset(pud, 0)); |
b239fb25 JF |
78 | } |
79 | #endif | |
1da177e4 LT |
80 | pud = pud_offset(pgd, 0); |
81 | pmd_table = pmd_offset(pud, 0); | |
8550eb99 | 82 | |
1da177e4 LT |
83 | return pmd_table; |
84 | } | |
85 | ||
86 | /* | |
87 | * Create a page table and place a pointer to it in a middle page | |
8550eb99 | 88 | * directory entry: |
1da177e4 LT |
89 | */ |
90 | static pte_t * __init one_page_table_init(pmd_t *pmd) | |
91 | { | |
b239fb25 | 92 | if (!(pmd_val(*pmd) & _PAGE_PRESENT)) { |
509a80c4 IM |
93 | pte_t *page_table = NULL; |
94 | ||
95 | #ifdef CONFIG_DEBUG_PAGEALLOC | |
96 | page_table = (pte_t *) alloc_bootmem_pages(PAGE_SIZE); | |
97 | #endif | |
8550eb99 | 98 | if (!page_table) { |
509a80c4 IM |
99 | page_table = |
100 | (pte_t *)alloc_bootmem_low_pages(PAGE_SIZE); | |
8550eb99 | 101 | } |
b239fb25 | 102 | |
6944a9c8 | 103 | paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT); |
1da177e4 | 104 | set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE)); |
b239fb25 | 105 | BUG_ON(page_table != pte_offset_kernel(pmd, 0)); |
1da177e4 | 106 | } |
509a80c4 | 107 | |
1da177e4 LT |
108 | return pte_offset_kernel(pmd, 0); |
109 | } | |
110 | ||
111 | /* | |
8550eb99 | 112 | * This function initializes a certain range of kernel virtual memory |
1da177e4 LT |
113 | * with new bootmem page tables, everywhere page tables are missing in |
114 | * the given range. | |
8550eb99 IM |
115 | * |
116 | * NOTE: The pagetables are allocated contiguous on the physical space | |
117 | * so we can cache the place of the first one and move around without | |
1da177e4 LT |
118 | * checking the pgd every time. |
119 | */ | |
8550eb99 IM |
120 | static void __init |
121 | page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base) | |
1da177e4 | 122 | { |
1da177e4 LT |
123 | int pgd_idx, pmd_idx; |
124 | unsigned long vaddr; | |
8550eb99 IM |
125 | pgd_t *pgd; |
126 | pmd_t *pmd; | |
1da177e4 LT |
127 | |
128 | vaddr = start; | |
129 | pgd_idx = pgd_index(vaddr); | |
130 | pmd_idx = pmd_index(vaddr); | |
131 | pgd = pgd_base + pgd_idx; | |
132 | ||
133 | for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) { | |
b239fb25 JF |
134 | pmd = one_md_table_init(pgd); |
135 | pmd = pmd + pmd_index(vaddr); | |
8550eb99 IM |
136 | for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end); |
137 | pmd++, pmd_idx++) { | |
b239fb25 | 138 | one_page_table_init(pmd); |
1da177e4 LT |
139 | |
140 | vaddr += PMD_SIZE; | |
141 | } | |
142 | pmd_idx = 0; | |
143 | } | |
144 | } | |
145 | ||
146 | static inline int is_kernel_text(unsigned long addr) | |
147 | { | |
148 | if (addr >= PAGE_OFFSET && addr <= (unsigned long)__init_end) | |
149 | return 1; | |
150 | return 0; | |
151 | } | |
152 | ||
153 | /* | |
8550eb99 IM |
154 | * This maps the physical memory to kernel virtual address space, a total |
155 | * of max_low_pfn pages, by creating page tables starting from address | |
156 | * PAGE_OFFSET: | |
1da177e4 LT |
157 | */ |
158 | static void __init kernel_physical_mapping_init(pgd_t *pgd_base) | |
159 | { | |
8550eb99 | 160 | int pgd_idx, pmd_idx, pte_ofs; |
1da177e4 LT |
161 | unsigned long pfn; |
162 | pgd_t *pgd; | |
163 | pmd_t *pmd; | |
164 | pte_t *pte; | |
ce0c0e50 | 165 | unsigned pages_2m = 0, pages_4k = 0; |
1da177e4 LT |
166 | |
167 | pgd_idx = pgd_index(PAGE_OFFSET); | |
168 | pgd = pgd_base + pgd_idx; | |
169 | pfn = 0; | |
170 | ||
171 | for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) { | |
172 | pmd = one_md_table_init(pgd); | |
173 | if (pfn >= max_low_pfn) | |
174 | continue; | |
8550eb99 | 175 | |
f3f20de8 JF |
176 | for (pmd_idx = 0; |
177 | pmd_idx < PTRS_PER_PMD && pfn < max_low_pfn; | |
178 | pmd++, pmd_idx++) { | |
8550eb99 | 179 | unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET; |
1da177e4 | 180 | |
8550eb99 IM |
181 | /* |
182 | * Map with big pages if possible, otherwise | |
183 | * create normal page tables: | |
f5c24a7f AK |
184 | * |
185 | * Don't use a large page for the first 2/4MB of memory | |
186 | * because there are often fixed size MTRRs in there | |
187 | * and overlapping MTRRs into large pages can cause | |
188 | * slowdowns. | |
8550eb99 | 189 | */ |
f5c24a7f | 190 | if (cpu_has_pse && !(pgd_idx == 0 && pmd_idx == 0)) { |
8550eb99 | 191 | unsigned int addr2; |
f3f20de8 JF |
192 | pgprot_t prot = PAGE_KERNEL_LARGE; |
193 | ||
8550eb99 | 194 | addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE + |
f3f20de8 JF |
195 | PAGE_OFFSET + PAGE_SIZE-1; |
196 | ||
8550eb99 IM |
197 | if (is_kernel_text(addr) || |
198 | is_kernel_text(addr2)) | |
f3f20de8 JF |
199 | prot = PAGE_KERNEL_LARGE_EXEC; |
200 | ||
ce0c0e50 | 201 | pages_2m++; |
f3f20de8 | 202 | set_pmd(pmd, pfn_pmd(pfn, prot)); |
b239fb25 | 203 | |
1da177e4 | 204 | pfn += PTRS_PER_PTE; |
67794292 | 205 | max_pfn_mapped = pfn; |
8550eb99 IM |
206 | continue; |
207 | } | |
208 | pte = one_page_table_init(pmd); | |
1da177e4 | 209 | |
8550eb99 IM |
210 | for (pte_ofs = 0; |
211 | pte_ofs < PTRS_PER_PTE && pfn < max_low_pfn; | |
212 | pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) { | |
213 | pgprot_t prot = PAGE_KERNEL; | |
f3f20de8 | 214 | |
8550eb99 IM |
215 | if (is_kernel_text(addr)) |
216 | prot = PAGE_KERNEL_EXEC; | |
f3f20de8 | 217 | |
ce0c0e50 | 218 | pages_4k++; |
8550eb99 | 219 | set_pte(pte, pfn_pte(pfn, prot)); |
1da177e4 | 220 | } |
67794292 | 221 | max_pfn_mapped = pfn; |
1da177e4 LT |
222 | } |
223 | } | |
ce0c0e50 AK |
224 | update_page_count(PG_LEVEL_2M, pages_2m); |
225 | update_page_count(PG_LEVEL_4K, pages_4k); | |
1da177e4 LT |
226 | } |
227 | ||
ae531c26 AV |
228 | /* |
229 | * devmem_is_allowed() checks to see if /dev/mem access to a certain address | |
230 | * is valid. The argument is a physical page number. | |
231 | * | |
232 | * | |
233 | * On x86, access has to be given to the first megabyte of ram because that area | |
234 | * contains bios code and data regions used by X and dosemu and similar apps. | |
235 | * Access has to be given to non-kernel-ram areas as well, these contain the PCI | |
236 | * mmio resources as well as potential bios/acpi data regions. | |
237 | */ | |
238 | int devmem_is_allowed(unsigned long pagenr) | |
239 | { | |
240 | if (pagenr <= 256) | |
241 | return 1; | |
242 | if (!page_is_ram(pagenr)) | |
243 | return 1; | |
244 | return 0; | |
245 | } | |
246 | ||
1da177e4 LT |
247 | #ifdef CONFIG_HIGHMEM |
248 | pte_t *kmap_pte; | |
249 | pgprot_t kmap_prot; | |
250 | ||
8550eb99 IM |
251 | static inline pte_t *kmap_get_fixmap_pte(unsigned long vaddr) |
252 | { | |
253 | return pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), | |
254 | vaddr), vaddr), vaddr); | |
255 | } | |
1da177e4 LT |
256 | |
257 | static void __init kmap_init(void) | |
258 | { | |
259 | unsigned long kmap_vstart; | |
260 | ||
8550eb99 IM |
261 | /* |
262 | * Cache the first kmap pte: | |
263 | */ | |
1da177e4 LT |
264 | kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN); |
265 | kmap_pte = kmap_get_fixmap_pte(kmap_vstart); | |
266 | ||
267 | kmap_prot = PAGE_KERNEL; | |
268 | } | |
269 | ||
270 | static void __init permanent_kmaps_init(pgd_t *pgd_base) | |
271 | { | |
8550eb99 | 272 | unsigned long vaddr; |
1da177e4 LT |
273 | pgd_t *pgd; |
274 | pud_t *pud; | |
275 | pmd_t *pmd; | |
276 | pte_t *pte; | |
1da177e4 LT |
277 | |
278 | vaddr = PKMAP_BASE; | |
279 | page_table_range_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base); | |
280 | ||
281 | pgd = swapper_pg_dir + pgd_index(vaddr); | |
282 | pud = pud_offset(pgd, vaddr); | |
283 | pmd = pmd_offset(pud, vaddr); | |
284 | pte = pte_offset_kernel(pmd, vaddr); | |
8550eb99 | 285 | pkmap_page_table = pte; |
1da177e4 LT |
286 | } |
287 | ||
cc9f7a0c | 288 | static void __init add_one_highpage_init(struct page *page, int pfn) |
1da177e4 | 289 | { |
cc9f7a0c YL |
290 | ClearPageReserved(page); |
291 | init_page_count(page); | |
292 | __free_page(page); | |
293 | totalhigh_pages++; | |
1da177e4 LT |
294 | } |
295 | ||
b5bc6c0e YL |
296 | struct add_highpages_data { |
297 | unsigned long start_pfn; | |
298 | unsigned long end_pfn; | |
b5bc6c0e YL |
299 | }; |
300 | ||
d52d53b8 | 301 | static int __init add_highpages_work_fn(unsigned long start_pfn, |
b5bc6c0e | 302 | unsigned long end_pfn, void *datax) |
1da177e4 | 303 | { |
b5bc6c0e YL |
304 | int node_pfn; |
305 | struct page *page; | |
306 | unsigned long final_start_pfn, final_end_pfn; | |
307 | struct add_highpages_data *data; | |
8550eb99 | 308 | |
b5bc6c0e | 309 | data = (struct add_highpages_data *)datax; |
b5bc6c0e YL |
310 | |
311 | final_start_pfn = max(start_pfn, data->start_pfn); | |
312 | final_end_pfn = min(end_pfn, data->end_pfn); | |
313 | if (final_start_pfn >= final_end_pfn) | |
d52d53b8 | 314 | return 0; |
b5bc6c0e YL |
315 | |
316 | for (node_pfn = final_start_pfn; node_pfn < final_end_pfn; | |
317 | node_pfn++) { | |
318 | if (!pfn_valid(node_pfn)) | |
319 | continue; | |
320 | page = pfn_to_page(node_pfn); | |
cc9f7a0c | 321 | add_one_highpage_init(page, node_pfn); |
23be8c7d | 322 | } |
b5bc6c0e | 323 | |
d52d53b8 YL |
324 | return 0; |
325 | ||
b5bc6c0e YL |
326 | } |
327 | ||
328 | void __init add_highpages_with_active_regions(int nid, unsigned long start_pfn, | |
cc9f7a0c | 329 | unsigned long end_pfn) |
b5bc6c0e YL |
330 | { |
331 | struct add_highpages_data data; | |
332 | ||
333 | data.start_pfn = start_pfn; | |
334 | data.end_pfn = end_pfn; | |
b5bc6c0e YL |
335 | |
336 | work_with_active_regions(nid, add_highpages_work_fn, &data); | |
337 | } | |
338 | ||
8550eb99 | 339 | #ifndef CONFIG_NUMA |
cc9f7a0c | 340 | static void __init set_highmem_pages_init(void) |
1da177e4 | 341 | { |
cc9f7a0c | 342 | add_highpages_with_active_regions(0, highstart_pfn, highend_pfn); |
b5bc6c0e | 343 | |
1da177e4 LT |
344 | totalram_pages += totalhigh_pages; |
345 | } | |
8550eb99 | 346 | #endif /* !CONFIG_NUMA */ |
1da177e4 LT |
347 | |
348 | #else | |
8550eb99 IM |
349 | # define kmap_init() do { } while (0) |
350 | # define permanent_kmaps_init(pgd_base) do { } while (0) | |
cc9f7a0c | 351 | # define set_highmem_pages_init() do { } while (0) |
1da177e4 LT |
352 | #endif /* CONFIG_HIGHMEM */ |
353 | ||
c93c82bb | 354 | pteval_t __PAGE_KERNEL = _PAGE_KERNEL; |
129f6946 | 355 | EXPORT_SYMBOL(__PAGE_KERNEL); |
1da177e4 | 356 | |
8550eb99 | 357 | pteval_t __PAGE_KERNEL_EXEC = _PAGE_KERNEL_EXEC; |
1da177e4 | 358 | |
b239fb25 | 359 | void __init native_pagetable_setup_start(pgd_t *base) |
1da177e4 | 360 | { |
551889a6 IC |
361 | unsigned long pfn, va; |
362 | pgd_t *pgd; | |
363 | pud_t *pud; | |
364 | pmd_t *pmd; | |
365 | pte_t *pte; | |
b239fb25 JF |
366 | |
367 | /* | |
551889a6 IC |
368 | * Remove any mappings which extend past the end of physical |
369 | * memory from the boot time page table: | |
b239fb25 | 370 | */ |
551889a6 IC |
371 | for (pfn = max_low_pfn + 1; pfn < 1<<(32-PAGE_SHIFT); pfn++) { |
372 | va = PAGE_OFFSET + (pfn<<PAGE_SHIFT); | |
373 | pgd = base + pgd_index(va); | |
374 | if (!pgd_present(*pgd)) | |
375 | break; | |
376 | ||
377 | pud = pud_offset(pgd, va); | |
378 | pmd = pmd_offset(pud, va); | |
379 | if (!pmd_present(*pmd)) | |
380 | break; | |
381 | ||
382 | pte = pte_offset_kernel(pmd, va); | |
383 | if (!pte_present(*pte)) | |
384 | break; | |
385 | ||
386 | pte_clear(NULL, va, pte); | |
387 | } | |
6944a9c8 | 388 | paravirt_alloc_pmd(&init_mm, __pa(base) >> PAGE_SHIFT); |
b239fb25 JF |
389 | } |
390 | ||
391 | void __init native_pagetable_setup_done(pgd_t *base) | |
392 | { | |
b239fb25 JF |
393 | } |
394 | ||
395 | /* | |
396 | * Build a proper pagetable for the kernel mappings. Up until this | |
397 | * point, we've been running on some set of pagetables constructed by | |
398 | * the boot process. | |
399 | * | |
400 | * If we're booting on native hardware, this will be a pagetable | |
551889a6 IC |
401 | * constructed in arch/x86/kernel/head_32.S. The root of the |
402 | * pagetable will be swapper_pg_dir. | |
b239fb25 JF |
403 | * |
404 | * If we're booting paravirtualized under a hypervisor, then there are | |
405 | * more options: we may already be running PAE, and the pagetable may | |
406 | * or may not be based in swapper_pg_dir. In any case, | |
407 | * paravirt_pagetable_setup_start() will set up swapper_pg_dir | |
408 | * appropriately for the rest of the initialization to work. | |
409 | * | |
410 | * In general, pagetable_init() assumes that the pagetable may already | |
411 | * be partially populated, and so it avoids stomping on any existing | |
412 | * mappings. | |
413 | */ | |
8550eb99 | 414 | static void __init pagetable_init(void) |
b239fb25 | 415 | { |
b239fb25 | 416 | pgd_t *pgd_base = swapper_pg_dir; |
8550eb99 | 417 | unsigned long vaddr, end; |
b239fb25 JF |
418 | |
419 | paravirt_pagetable_setup_start(pgd_base); | |
1da177e4 LT |
420 | |
421 | /* Enable PSE if available */ | |
b239fb25 | 422 | if (cpu_has_pse) |
1da177e4 | 423 | set_in_cr4(X86_CR4_PSE); |
1da177e4 LT |
424 | |
425 | /* Enable PGE if available */ | |
426 | if (cpu_has_pge) { | |
427 | set_in_cr4(X86_CR4_PGE); | |
428 | __PAGE_KERNEL |= _PAGE_GLOBAL; | |
429 | __PAGE_KERNEL_EXEC |= _PAGE_GLOBAL; | |
430 | } | |
431 | ||
432 | kernel_physical_mapping_init(pgd_base); | |
433 | remap_numa_kva(); | |
434 | ||
435 | /* | |
436 | * Fixed mappings, only the page table structure has to be | |
437 | * created - mappings will be set by set_fixmap(): | |
438 | */ | |
beacfaac | 439 | early_ioremap_clear(); |
1da177e4 | 440 | vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK; |
b239fb25 JF |
441 | end = (FIXADDR_TOP + PMD_SIZE - 1) & PMD_MASK; |
442 | page_table_range_init(vaddr, end, pgd_base); | |
beacfaac | 443 | early_ioremap_reset(); |
1da177e4 LT |
444 | |
445 | permanent_kmaps_init(pgd_base); | |
446 | ||
b239fb25 | 447 | paravirt_pagetable_setup_done(pgd_base); |
1da177e4 LT |
448 | } |
449 | ||
a6eb84bc | 450 | #ifdef CONFIG_ACPI_SLEEP |
1da177e4 | 451 | /* |
a6eb84bc | 452 | * ACPI suspend needs this for resume, because things like the intel-agp |
1da177e4 LT |
453 | * driver might have split up a kernel 4MB mapping. |
454 | */ | |
a6eb84bc | 455 | char swsusp_pg_dir[PAGE_SIZE] |
8550eb99 | 456 | __attribute__ ((aligned(PAGE_SIZE))); |
1da177e4 LT |
457 | |
458 | static inline void save_pg_dir(void) | |
459 | { | |
460 | memcpy(swsusp_pg_dir, swapper_pg_dir, PAGE_SIZE); | |
461 | } | |
a6eb84bc | 462 | #else /* !CONFIG_ACPI_SLEEP */ |
1da177e4 LT |
463 | static inline void save_pg_dir(void) |
464 | { | |
465 | } | |
a6eb84bc | 466 | #endif /* !CONFIG_ACPI_SLEEP */ |
1da177e4 | 467 | |
8550eb99 | 468 | void zap_low_mappings(void) |
1da177e4 LT |
469 | { |
470 | int i; | |
471 | ||
1da177e4 LT |
472 | /* |
473 | * Zap initial low-memory mappings. | |
474 | * | |
475 | * Note that "pgd_clear()" doesn't do it for | |
476 | * us, because pgd_clear() is a no-op on i386. | |
477 | */ | |
68db065c | 478 | for (i = 0; i < KERNEL_PGD_BOUNDARY; i++) { |
1da177e4 LT |
479 | #ifdef CONFIG_X86_PAE |
480 | set_pgd(swapper_pg_dir+i, __pgd(1 + __pa(empty_zero_page))); | |
481 | #else | |
482 | set_pgd(swapper_pg_dir+i, __pgd(0)); | |
483 | #endif | |
8550eb99 | 484 | } |
1da177e4 LT |
485 | flush_tlb_all(); |
486 | } | |
487 | ||
8550eb99 | 488 | int nx_enabled; |
d5321abe | 489 | |
6fdc05d4 JF |
490 | pteval_t __supported_pte_mask __read_mostly = ~_PAGE_NX; |
491 | EXPORT_SYMBOL_GPL(__supported_pte_mask); | |
492 | ||
d5321abe JB |
493 | #ifdef CONFIG_X86_PAE |
494 | ||
8550eb99 | 495 | static int disable_nx __initdata; |
1da177e4 LT |
496 | |
497 | /* | |
498 | * noexec = on|off | |
499 | * | |
500 | * Control non executable mappings. | |
501 | * | |
502 | * on Enable | |
503 | * off Disable | |
504 | */ | |
1a3f239d | 505 | static int __init noexec_setup(char *str) |
1da177e4 | 506 | { |
1a3f239d RR |
507 | if (!str || !strcmp(str, "on")) { |
508 | if (cpu_has_nx) { | |
509 | __supported_pte_mask |= _PAGE_NX; | |
510 | disable_nx = 0; | |
511 | } | |
8550eb99 IM |
512 | } else { |
513 | if (!strcmp(str, "off")) { | |
514 | disable_nx = 1; | |
515 | __supported_pte_mask &= ~_PAGE_NX; | |
516 | } else { | |
517 | return -EINVAL; | |
518 | } | |
519 | } | |
1a3f239d RR |
520 | |
521 | return 0; | |
1da177e4 | 522 | } |
1a3f239d | 523 | early_param("noexec", noexec_setup); |
1da177e4 | 524 | |
1da177e4 LT |
525 | static void __init set_nx(void) |
526 | { | |
527 | unsigned int v[4], l, h; | |
528 | ||
529 | if (cpu_has_pae && (cpuid_eax(0x80000000) > 0x80000001)) { | |
530 | cpuid(0x80000001, &v[0], &v[1], &v[2], &v[3]); | |
8550eb99 | 531 | |
1da177e4 LT |
532 | if ((v[3] & (1 << 20)) && !disable_nx) { |
533 | rdmsr(MSR_EFER, l, h); | |
534 | l |= EFER_NX; | |
535 | wrmsr(MSR_EFER, l, h); | |
536 | nx_enabled = 1; | |
537 | __supported_pte_mask |= _PAGE_NX; | |
538 | } | |
539 | } | |
540 | } | |
1da177e4 LT |
541 | #endif |
542 | ||
543 | /* | |
544 | * paging_init() sets up the page tables - note that the first 8MB are | |
545 | * already mapped by head.S. | |
546 | * | |
547 | * This routines also unmaps the page at virtual kernel address 0, so | |
548 | * that we can trap those pesky NULL-reference errors in the kernel. | |
549 | */ | |
550 | void __init paging_init(void) | |
551 | { | |
552 | #ifdef CONFIG_X86_PAE | |
553 | set_nx(); | |
554 | if (nx_enabled) | |
d7d119d7 | 555 | printk(KERN_INFO "NX (Execute Disable) protection: active\n"); |
1da177e4 | 556 | #endif |
1da177e4 LT |
557 | pagetable_init(); |
558 | ||
559 | load_cr3(swapper_pg_dir); | |
560 | ||
1da177e4 LT |
561 | __flush_tlb_all(); |
562 | ||
563 | kmap_init(); | |
564 | } | |
565 | ||
566 | /* | |
567 | * Test if the WP bit works in supervisor mode. It isn't supported on 386's | |
f7f17a67 DV |
568 | * and also on some strange 486's. All 586+'s are OK. This used to involve |
569 | * black magic jumps to work around some nasty CPU bugs, but fortunately the | |
570 | * switch to using exceptions got rid of all that. | |
1da177e4 | 571 | */ |
1da177e4 LT |
572 | static void __init test_wp_bit(void) |
573 | { | |
d7d119d7 IM |
574 | printk(KERN_INFO |
575 | "Checking if this processor honours the WP bit even in supervisor mode..."); | |
1da177e4 LT |
576 | |
577 | /* Any page-aligned address will do, the test is non-destructive */ | |
578 | __set_fixmap(FIX_WP_TEST, __pa(&swapper_pg_dir), PAGE_READONLY); | |
579 | boot_cpu_data.wp_works_ok = do_test_wp_bit(); | |
580 | clear_fixmap(FIX_WP_TEST); | |
581 | ||
582 | if (!boot_cpu_data.wp_works_ok) { | |
d7d119d7 | 583 | printk(KERN_CONT "No.\n"); |
1da177e4 | 584 | #ifdef CONFIG_X86_WP_WORKS_OK |
d7d119d7 IM |
585 | panic( |
586 | "This kernel doesn't support CPU's with broken WP. Recompile it for a 386!"); | |
1da177e4 LT |
587 | #endif |
588 | } else { | |
d7d119d7 | 589 | printk(KERN_CONT "Ok.\n"); |
1da177e4 LT |
590 | } |
591 | } | |
592 | ||
8550eb99 | 593 | static struct kcore_list kcore_mem, kcore_vmalloc; |
1da177e4 LT |
594 | |
595 | void __init mem_init(void) | |
596 | { | |
1da177e4 | 597 | int codesize, reservedpages, datasize, initsize; |
cc9f7a0c | 598 | int tmp; |
1da177e4 | 599 | |
05b79bdc | 600 | #ifdef CONFIG_FLATMEM |
8d8f3cbe | 601 | BUG_ON(!mem_map); |
1da177e4 | 602 | #endif |
1da177e4 LT |
603 | /* this will put all low memory onto the freelists */ |
604 | totalram_pages += free_all_bootmem(); | |
605 | ||
606 | reservedpages = 0; | |
607 | for (tmp = 0; tmp < max_low_pfn; tmp++) | |
608 | /* | |
8550eb99 | 609 | * Only count reserved RAM pages: |
1da177e4 LT |
610 | */ |
611 | if (page_is_ram(tmp) && PageReserved(pfn_to_page(tmp))) | |
612 | reservedpages++; | |
613 | ||
cc9f7a0c | 614 | set_highmem_pages_init(); |
1da177e4 LT |
615 | |
616 | codesize = (unsigned long) &_etext - (unsigned long) &_text; | |
617 | datasize = (unsigned long) &_edata - (unsigned long) &_etext; | |
618 | initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin; | |
619 | ||
8550eb99 IM |
620 | kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT); |
621 | kclist_add(&kcore_vmalloc, (void *)VMALLOC_START, | |
1da177e4 LT |
622 | VMALLOC_END-VMALLOC_START); |
623 | ||
8550eb99 IM |
624 | printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, " |
625 | "%dk reserved, %dk data, %dk init, %ldk highmem)\n", | |
1da177e4 LT |
626 | (unsigned long) nr_free_pages() << (PAGE_SHIFT-10), |
627 | num_physpages << (PAGE_SHIFT-10), | |
628 | codesize >> 10, | |
629 | reservedpages << (PAGE_SHIFT-10), | |
630 | datasize >> 10, | |
631 | initsize >> 10, | |
632 | (unsigned long) (totalhigh_pages << (PAGE_SHIFT-10)) | |
633 | ); | |
634 | ||
d7d119d7 | 635 | printk(KERN_INFO "virtual kernel memory layout:\n" |
8550eb99 | 636 | " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n" |
052e7994 | 637 | #ifdef CONFIG_HIGHMEM |
8550eb99 | 638 | " pkmap : 0x%08lx - 0x%08lx (%4ld kB)\n" |
052e7994 | 639 | #endif |
8550eb99 IM |
640 | " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n" |
641 | " lowmem : 0x%08lx - 0x%08lx (%4ld MB)\n" | |
642 | " .init : 0x%08lx - 0x%08lx (%4ld kB)\n" | |
643 | " .data : 0x%08lx - 0x%08lx (%4ld kB)\n" | |
644 | " .text : 0x%08lx - 0x%08lx (%4ld kB)\n", | |
645 | FIXADDR_START, FIXADDR_TOP, | |
646 | (FIXADDR_TOP - FIXADDR_START) >> 10, | |
052e7994 JF |
647 | |
648 | #ifdef CONFIG_HIGHMEM | |
8550eb99 IM |
649 | PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE, |
650 | (LAST_PKMAP*PAGE_SIZE) >> 10, | |
052e7994 JF |
651 | #endif |
652 | ||
8550eb99 IM |
653 | VMALLOC_START, VMALLOC_END, |
654 | (VMALLOC_END - VMALLOC_START) >> 20, | |
052e7994 | 655 | |
8550eb99 IM |
656 | (unsigned long)__va(0), (unsigned long)high_memory, |
657 | ((unsigned long)high_memory - (unsigned long)__va(0)) >> 20, | |
052e7994 | 658 | |
8550eb99 IM |
659 | (unsigned long)&__init_begin, (unsigned long)&__init_end, |
660 | ((unsigned long)&__init_end - | |
661 | (unsigned long)&__init_begin) >> 10, | |
052e7994 | 662 | |
8550eb99 IM |
663 | (unsigned long)&_etext, (unsigned long)&_edata, |
664 | ((unsigned long)&_edata - (unsigned long)&_etext) >> 10, | |
052e7994 | 665 | |
8550eb99 IM |
666 | (unsigned long)&_text, (unsigned long)&_etext, |
667 | ((unsigned long)&_etext - (unsigned long)&_text) >> 10); | |
052e7994 JF |
668 | |
669 | #ifdef CONFIG_HIGHMEM | |
8550eb99 IM |
670 | BUG_ON(PKMAP_BASE + LAST_PKMAP*PAGE_SIZE > FIXADDR_START); |
671 | BUG_ON(VMALLOC_END > PKMAP_BASE); | |
052e7994 | 672 | #endif |
8550eb99 IM |
673 | BUG_ON(VMALLOC_START > VMALLOC_END); |
674 | BUG_ON((unsigned long)high_memory > VMALLOC_START); | |
052e7994 | 675 | |
1da177e4 LT |
676 | if (boot_cpu_data.wp_works_ok < 0) |
677 | test_wp_bit(); | |
678 | ||
76ebd054 | 679 | cpa_init(); |
61165d7a | 680 | save_pg_dir(); |
1da177e4 | 681 | zap_low_mappings(); |
1da177e4 LT |
682 | } |
683 | ||
ad8f5797 | 684 | #ifdef CONFIG_MEMORY_HOTPLUG |
bc02af93 | 685 | int arch_add_memory(int nid, u64 start, u64 size) |
05039b92 | 686 | { |
7c7e9425 | 687 | struct pglist_data *pgdata = NODE_DATA(nid); |
776ed98b | 688 | struct zone *zone = pgdata->node_zones + ZONE_HIGHMEM; |
05039b92 DH |
689 | unsigned long start_pfn = start >> PAGE_SHIFT; |
690 | unsigned long nr_pages = size >> PAGE_SHIFT; | |
691 | ||
692 | return __add_pages(zone, start_pfn, nr_pages); | |
693 | } | |
9d99aaa3 | 694 | #endif |
05039b92 | 695 | |
1da177e4 LT |
696 | /* |
697 | * This function cannot be __init, since exceptions don't work in that | |
698 | * section. Put this after the callers, so that it cannot be inlined. | |
699 | */ | |
8550eb99 | 700 | static noinline int do_test_wp_bit(void) |
1da177e4 LT |
701 | { |
702 | char tmp_reg; | |
703 | int flag; | |
704 | ||
705 | __asm__ __volatile__( | |
8550eb99 IM |
706 | " movb %0, %1 \n" |
707 | "1: movb %1, %0 \n" | |
708 | " xorl %2, %2 \n" | |
1da177e4 | 709 | "2: \n" |
f832ff18 | 710 | _ASM_EXTABLE(1b,2b) |
1da177e4 LT |
711 | :"=m" (*(char *)fix_to_virt(FIX_WP_TEST)), |
712 | "=q" (tmp_reg), | |
713 | "=r" (flag) | |
714 | :"2" (1) | |
715 | :"memory"); | |
8550eb99 | 716 | |
1da177e4 LT |
717 | return flag; |
718 | } | |
719 | ||
63aaf308 | 720 | #ifdef CONFIG_DEBUG_RODATA |
edeed305 AV |
721 | const int rodata_test_data = 0xC3; |
722 | EXPORT_SYMBOL_GPL(rodata_test_data); | |
63aaf308 | 723 | |
63aaf308 AV |
724 | void mark_rodata_ro(void) |
725 | { | |
6fb14755 JB |
726 | unsigned long start = PFN_ALIGN(_text); |
727 | unsigned long size = PFN_ALIGN(_etext) - start; | |
63aaf308 | 728 | |
4e4eee0e MD |
729 | set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT); |
730 | printk(KERN_INFO "Write protecting the kernel text: %luk\n", | |
731 | size >> 10); | |
0c42f392 AK |
732 | |
733 | #ifdef CONFIG_CPA_DEBUG | |
4e4eee0e MD |
734 | printk(KERN_INFO "Testing CPA: Reverting %lx-%lx\n", |
735 | start, start+size); | |
736 | set_pages_rw(virt_to_page(start), size>>PAGE_SHIFT); | |
0c42f392 | 737 | |
4e4eee0e MD |
738 | printk(KERN_INFO "Testing CPA: write protecting again\n"); |
739 | set_pages_ro(virt_to_page(start), size>>PAGE_SHIFT); | |
602033ed | 740 | #endif |
6fb14755 JB |
741 | start += size; |
742 | size = (unsigned long)__end_rodata - start; | |
6d238cc4 | 743 | set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT); |
d7d119d7 IM |
744 | printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n", |
745 | size >> 10); | |
edeed305 | 746 | rodata_test(); |
63aaf308 | 747 | |
0c42f392 | 748 | #ifdef CONFIG_CPA_DEBUG |
d7d119d7 | 749 | printk(KERN_INFO "Testing CPA: undo %lx-%lx\n", start, start + size); |
6d238cc4 | 750 | set_pages_rw(virt_to_page(start), size >> PAGE_SHIFT); |
0c42f392 | 751 | |
d7d119d7 | 752 | printk(KERN_INFO "Testing CPA: write protecting again\n"); |
6d238cc4 | 753 | set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT); |
0c42f392 | 754 | #endif |
63aaf308 AV |
755 | } |
756 | #endif | |
757 | ||
9a0b5817 GH |
758 | void free_init_pages(char *what, unsigned long begin, unsigned long end) |
759 | { | |
ee01f112 IM |
760 | #ifdef CONFIG_DEBUG_PAGEALLOC |
761 | /* | |
762 | * If debugging page accesses then do not free this memory but | |
763 | * mark them not present - any buggy init-section access will | |
764 | * create a kernel page fault: | |
765 | */ | |
766 | printk(KERN_INFO "debug: unmapping init memory %08lx..%08lx\n", | |
767 | begin, PAGE_ALIGN(end)); | |
768 | set_memory_np(begin, (end - begin) >> PAGE_SHIFT); | |
769 | #else | |
86f03989 IM |
770 | unsigned long addr; |
771 | ||
3c1df68b AV |
772 | /* |
773 | * We just marked the kernel text read only above, now that | |
774 | * we are going to free part of that, we need to make that | |
775 | * writeable first. | |
776 | */ | |
777 | set_memory_rw(begin, (end - begin) >> PAGE_SHIFT); | |
778 | ||
9a0b5817 | 779 | for (addr = begin; addr < end; addr += PAGE_SIZE) { |
e3ebadd9 LT |
780 | ClearPageReserved(virt_to_page(addr)); |
781 | init_page_count(virt_to_page(addr)); | |
782 | memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE); | |
783 | free_page(addr); | |
9a0b5817 GH |
784 | totalram_pages++; |
785 | } | |
6fb14755 | 786 | printk(KERN_INFO "Freeing %s: %luk freed\n", what, (end - begin) >> 10); |
ee01f112 | 787 | #endif |
9a0b5817 GH |
788 | } |
789 | ||
790 | void free_initmem(void) | |
791 | { | |
792 | free_init_pages("unused kernel memory", | |
e3ebadd9 LT |
793 | (unsigned long)(&__init_begin), |
794 | (unsigned long)(&__init_end)); | |
9a0b5817 | 795 | } |
63aaf308 | 796 | |
1da177e4 LT |
797 | #ifdef CONFIG_BLK_DEV_INITRD |
798 | void free_initrd_mem(unsigned long start, unsigned long end) | |
799 | { | |
e3ebadd9 | 800 | free_init_pages("initrd memory", start, end); |
1da177e4 LT |
801 | } |
802 | #endif | |
d2dbf343 YL |
803 | |
804 | int __init reserve_bootmem_generic(unsigned long phys, unsigned long len, | |
805 | int flags) | |
806 | { | |
807 | return reserve_bootmem(phys, len, flags); | |
808 | } |