]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blob - arch/x86/mm/init_32.c
x86: introduce initmem_init for 64 bit
[mirror_ubuntu-hirsute-kernel.git] / arch / x86 / mm / init_32.c
1 /*
2 *
3 * Copyright (C) 1995 Linus Torvalds
4 *
5 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
6 */
7
8 #include <linux/module.h>
9 #include <linux/signal.h>
10 #include <linux/sched.h>
11 #include <linux/kernel.h>
12 #include <linux/errno.h>
13 #include <linux/string.h>
14 #include <linux/types.h>
15 #include <linux/ptrace.h>
16 #include <linux/mman.h>
17 #include <linux/mm.h>
18 #include <linux/hugetlb.h>
19 #include <linux/swap.h>
20 #include <linux/smp.h>
21 #include <linux/init.h>
22 #include <linux/highmem.h>
23 #include <linux/pagemap.h>
24 #include <linux/pfn.h>
25 #include <linux/poison.h>
26 #include <linux/bootmem.h>
27 #include <linux/slab.h>
28 #include <linux/proc_fs.h>
29 #include <linux/memory_hotplug.h>
30 #include <linux/initrd.h>
31 #include <linux/cpumask.h>
32
33 #include <asm/asm.h>
34 #include <asm/processor.h>
35 #include <asm/system.h>
36 #include <asm/uaccess.h>
37 #include <asm/pgtable.h>
38 #include <asm/dma.h>
39 #include <asm/fixmap.h>
40 #include <asm/e820.h>
41 #include <asm/apic.h>
42 #include <asm/bugs.h>
43 #include <asm/tlb.h>
44 #include <asm/tlbflush.h>
45 #include <asm/pgalloc.h>
46 #include <asm/sections.h>
47 #include <asm/paravirt.h>
48 #include <asm/setup.h>
49 #include <asm/cacheflush.h>
50
51 unsigned int __VMALLOC_RESERVE = 128 << 20;
52
53 unsigned long max_pfn_mapped;
54
55 DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
56 unsigned long highstart_pfn, highend_pfn;
57
58 static noinline int do_test_wp_bit(void);
59
60 /*
61 * Creates a middle page table and puts a pointer to it in the
62 * given global directory entry. This only returns the gd entry
63 * in non-PAE compilation mode, since the middle layer is folded.
64 */
65 static pmd_t * __init one_md_table_init(pgd_t *pgd)
66 {
67 pud_t *pud;
68 pmd_t *pmd_table;
69
70 #ifdef CONFIG_X86_PAE
71 if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
72 pmd_table = (pmd_t *) alloc_bootmem_low_pages(PAGE_SIZE);
73
74 paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
75 set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
76 pud = pud_offset(pgd, 0);
77 BUG_ON(pmd_table != pmd_offset(pud, 0));
78 }
79 #endif
80 pud = pud_offset(pgd, 0);
81 pmd_table = pmd_offset(pud, 0);
82
83 return pmd_table;
84 }
85
86 /*
87 * Create a page table and place a pointer to it in a middle page
88 * directory entry:
89 */
90 static pte_t * __init one_page_table_init(pmd_t *pmd)
91 {
92 if (!(pmd_val(*pmd) & _PAGE_PRESENT)) {
93 pte_t *page_table = NULL;
94
95 #ifdef CONFIG_DEBUG_PAGEALLOC
96 page_table = (pte_t *) alloc_bootmem_pages(PAGE_SIZE);
97 #endif
98 if (!page_table) {
99 page_table =
100 (pte_t *)alloc_bootmem_low_pages(PAGE_SIZE);
101 }
102
103 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
104 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
105 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
106 }
107
108 return pte_offset_kernel(pmd, 0);
109 }
110
111 /*
112 * This function initializes a certain range of kernel virtual memory
113 * with new bootmem page tables, everywhere page tables are missing in
114 * the given range.
115 *
116 * NOTE: The pagetables are allocated contiguous on the physical space
117 * so we can cache the place of the first one and move around without
118 * checking the pgd every time.
119 */
120 static void __init
121 page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
122 {
123 int pgd_idx, pmd_idx;
124 unsigned long vaddr;
125 pgd_t *pgd;
126 pmd_t *pmd;
127
128 vaddr = start;
129 pgd_idx = pgd_index(vaddr);
130 pmd_idx = pmd_index(vaddr);
131 pgd = pgd_base + pgd_idx;
132
133 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
134 pmd = one_md_table_init(pgd);
135 pmd = pmd + pmd_index(vaddr);
136 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
137 pmd++, pmd_idx++) {
138 one_page_table_init(pmd);
139
140 vaddr += PMD_SIZE;
141 }
142 pmd_idx = 0;
143 }
144 }
145
146 static inline int is_kernel_text(unsigned long addr)
147 {
148 if (addr >= PAGE_OFFSET && addr <= (unsigned long)__init_end)
149 return 1;
150 return 0;
151 }
152
153 /*
154 * This maps the physical memory to kernel virtual address space, a total
155 * of max_low_pfn pages, by creating page tables starting from address
156 * PAGE_OFFSET:
157 */
158 static void __init kernel_physical_mapping_init(pgd_t *pgd_base)
159 {
160 int pgd_idx, pmd_idx, pte_ofs;
161 unsigned long pfn;
162 pgd_t *pgd;
163 pmd_t *pmd;
164 pte_t *pte;
165 unsigned pages_2m = 0, pages_4k = 0;
166
167 pgd_idx = pgd_index(PAGE_OFFSET);
168 pgd = pgd_base + pgd_idx;
169 pfn = 0;
170
171 for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
172 pmd = one_md_table_init(pgd);
173 if (pfn >= max_low_pfn)
174 continue;
175
176 for (pmd_idx = 0;
177 pmd_idx < PTRS_PER_PMD && pfn < max_low_pfn;
178 pmd++, pmd_idx++) {
179 unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
180
181 /*
182 * Map with big pages if possible, otherwise
183 * create normal page tables:
184 *
185 * Don't use a large page for the first 2/4MB of memory
186 * because there are often fixed size MTRRs in there
187 * and overlapping MTRRs into large pages can cause
188 * slowdowns.
189 */
190 if (cpu_has_pse && !(pgd_idx == 0 && pmd_idx == 0)) {
191 unsigned int addr2;
192 pgprot_t prot = PAGE_KERNEL_LARGE;
193
194 addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
195 PAGE_OFFSET + PAGE_SIZE-1;
196
197 if (is_kernel_text(addr) ||
198 is_kernel_text(addr2))
199 prot = PAGE_KERNEL_LARGE_EXEC;
200
201 pages_2m++;
202 set_pmd(pmd, pfn_pmd(pfn, prot));
203
204 pfn += PTRS_PER_PTE;
205 max_pfn_mapped = pfn;
206 continue;
207 }
208 pte = one_page_table_init(pmd);
209
210 for (pte_ofs = 0;
211 pte_ofs < PTRS_PER_PTE && pfn < max_low_pfn;
212 pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
213 pgprot_t prot = PAGE_KERNEL;
214
215 if (is_kernel_text(addr))
216 prot = PAGE_KERNEL_EXEC;
217
218 pages_4k++;
219 set_pte(pte, pfn_pte(pfn, prot));
220 }
221 max_pfn_mapped = pfn;
222 }
223 }
224 update_page_count(PG_LEVEL_2M, pages_2m);
225 update_page_count(PG_LEVEL_4K, pages_4k);
226 }
227
228 /*
229 * devmem_is_allowed() checks to see if /dev/mem access to a certain address
230 * is valid. The argument is a physical page number.
231 *
232 *
233 * On x86, access has to be given to the first megabyte of ram because that area
234 * contains bios code and data regions used by X and dosemu and similar apps.
235 * Access has to be given to non-kernel-ram areas as well, these contain the PCI
236 * mmio resources as well as potential bios/acpi data regions.
237 */
238 int devmem_is_allowed(unsigned long pagenr)
239 {
240 if (pagenr <= 256)
241 return 1;
242 if (!page_is_ram(pagenr))
243 return 1;
244 return 0;
245 }
246
247 #ifdef CONFIG_HIGHMEM
248 pte_t *kmap_pte;
249 pgprot_t kmap_prot;
250
251 static inline pte_t *kmap_get_fixmap_pte(unsigned long vaddr)
252 {
253 return pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr),
254 vaddr), vaddr), vaddr);
255 }
256
257 static void __init kmap_init(void)
258 {
259 unsigned long kmap_vstart;
260
261 /*
262 * Cache the first kmap pte:
263 */
264 kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
265 kmap_pte = kmap_get_fixmap_pte(kmap_vstart);
266
267 kmap_prot = PAGE_KERNEL;
268 }
269
270 static void __init permanent_kmaps_init(pgd_t *pgd_base)
271 {
272 unsigned long vaddr;
273 pgd_t *pgd;
274 pud_t *pud;
275 pmd_t *pmd;
276 pte_t *pte;
277
278 vaddr = PKMAP_BASE;
279 page_table_range_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base);
280
281 pgd = swapper_pg_dir + pgd_index(vaddr);
282 pud = pud_offset(pgd, vaddr);
283 pmd = pmd_offset(pud, vaddr);
284 pte = pte_offset_kernel(pmd, vaddr);
285 pkmap_page_table = pte;
286 }
287
288 static void __init add_one_highpage_init(struct page *page, int pfn)
289 {
290 ClearPageReserved(page);
291 init_page_count(page);
292 __free_page(page);
293 totalhigh_pages++;
294 }
295
296 struct add_highpages_data {
297 unsigned long start_pfn;
298 unsigned long end_pfn;
299 };
300
301 static int __init add_highpages_work_fn(unsigned long start_pfn,
302 unsigned long end_pfn, void *datax)
303 {
304 int node_pfn;
305 struct page *page;
306 unsigned long final_start_pfn, final_end_pfn;
307 struct add_highpages_data *data;
308
309 data = (struct add_highpages_data *)datax;
310
311 final_start_pfn = max(start_pfn, data->start_pfn);
312 final_end_pfn = min(end_pfn, data->end_pfn);
313 if (final_start_pfn >= final_end_pfn)
314 return 0;
315
316 for (node_pfn = final_start_pfn; node_pfn < final_end_pfn;
317 node_pfn++) {
318 if (!pfn_valid(node_pfn))
319 continue;
320 page = pfn_to_page(node_pfn);
321 add_one_highpage_init(page, node_pfn);
322 }
323
324 return 0;
325
326 }
327
328 void __init add_highpages_with_active_regions(int nid, unsigned long start_pfn,
329 unsigned long end_pfn)
330 {
331 struct add_highpages_data data;
332
333 data.start_pfn = start_pfn;
334 data.end_pfn = end_pfn;
335
336 work_with_active_regions(nid, add_highpages_work_fn, &data);
337 }
338
339 #ifndef CONFIG_NUMA
340 static void __init set_highmem_pages_init(void)
341 {
342 add_highpages_with_active_regions(0, highstart_pfn, highend_pfn);
343
344 totalram_pages += totalhigh_pages;
345 }
346 #endif /* !CONFIG_NUMA */
347
348 #else
349 # define kmap_init() do { } while (0)
350 # define permanent_kmaps_init(pgd_base) do { } while (0)
351 # define set_highmem_pages_init() do { } while (0)
352 #endif /* CONFIG_HIGHMEM */
353
354 pteval_t __PAGE_KERNEL = _PAGE_KERNEL;
355 EXPORT_SYMBOL(__PAGE_KERNEL);
356
357 pteval_t __PAGE_KERNEL_EXEC = _PAGE_KERNEL_EXEC;
358
359 void __init native_pagetable_setup_start(pgd_t *base)
360 {
361 unsigned long pfn, va;
362 pgd_t *pgd;
363 pud_t *pud;
364 pmd_t *pmd;
365 pte_t *pte;
366
367 /*
368 * Remove any mappings which extend past the end of physical
369 * memory from the boot time page table:
370 */
371 for (pfn = max_low_pfn + 1; pfn < 1<<(32-PAGE_SHIFT); pfn++) {
372 va = PAGE_OFFSET + (pfn<<PAGE_SHIFT);
373 pgd = base + pgd_index(va);
374 if (!pgd_present(*pgd))
375 break;
376
377 pud = pud_offset(pgd, va);
378 pmd = pmd_offset(pud, va);
379 if (!pmd_present(*pmd))
380 break;
381
382 pte = pte_offset_kernel(pmd, va);
383 if (!pte_present(*pte))
384 break;
385
386 pte_clear(NULL, va, pte);
387 }
388 paravirt_alloc_pmd(&init_mm, __pa(base) >> PAGE_SHIFT);
389 }
390
391 void __init native_pagetable_setup_done(pgd_t *base)
392 {
393 }
394
395 /*
396 * Build a proper pagetable for the kernel mappings. Up until this
397 * point, we've been running on some set of pagetables constructed by
398 * the boot process.
399 *
400 * If we're booting on native hardware, this will be a pagetable
401 * constructed in arch/x86/kernel/head_32.S. The root of the
402 * pagetable will be swapper_pg_dir.
403 *
404 * If we're booting paravirtualized under a hypervisor, then there are
405 * more options: we may already be running PAE, and the pagetable may
406 * or may not be based in swapper_pg_dir. In any case,
407 * paravirt_pagetable_setup_start() will set up swapper_pg_dir
408 * appropriately for the rest of the initialization to work.
409 *
410 * In general, pagetable_init() assumes that the pagetable may already
411 * be partially populated, and so it avoids stomping on any existing
412 * mappings.
413 */
414 static void __init pagetable_init(void)
415 {
416 pgd_t *pgd_base = swapper_pg_dir;
417 unsigned long vaddr, end;
418
419 paravirt_pagetable_setup_start(pgd_base);
420
421 /* Enable PSE if available */
422 if (cpu_has_pse)
423 set_in_cr4(X86_CR4_PSE);
424
425 /* Enable PGE if available */
426 if (cpu_has_pge) {
427 set_in_cr4(X86_CR4_PGE);
428 __PAGE_KERNEL |= _PAGE_GLOBAL;
429 __PAGE_KERNEL_EXEC |= _PAGE_GLOBAL;
430 }
431
432 kernel_physical_mapping_init(pgd_base);
433 remap_numa_kva();
434
435 /*
436 * Fixed mappings, only the page table structure has to be
437 * created - mappings will be set by set_fixmap():
438 */
439 early_ioremap_clear();
440 vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
441 end = (FIXADDR_TOP + PMD_SIZE - 1) & PMD_MASK;
442 page_table_range_init(vaddr, end, pgd_base);
443 early_ioremap_reset();
444
445 permanent_kmaps_init(pgd_base);
446
447 paravirt_pagetable_setup_done(pgd_base);
448 }
449
450 #ifdef CONFIG_ACPI_SLEEP
451 /*
452 * ACPI suspend needs this for resume, because things like the intel-agp
453 * driver might have split up a kernel 4MB mapping.
454 */
455 char swsusp_pg_dir[PAGE_SIZE]
456 __attribute__ ((aligned(PAGE_SIZE)));
457
458 static inline void save_pg_dir(void)
459 {
460 memcpy(swsusp_pg_dir, swapper_pg_dir, PAGE_SIZE);
461 }
462 #else /* !CONFIG_ACPI_SLEEP */
463 static inline void save_pg_dir(void)
464 {
465 }
466 #endif /* !CONFIG_ACPI_SLEEP */
467
468 void zap_low_mappings(void)
469 {
470 int i;
471
472 /*
473 * Zap initial low-memory mappings.
474 *
475 * Note that "pgd_clear()" doesn't do it for
476 * us, because pgd_clear() is a no-op on i386.
477 */
478 for (i = 0; i < KERNEL_PGD_BOUNDARY; i++) {
479 #ifdef CONFIG_X86_PAE
480 set_pgd(swapper_pg_dir+i, __pgd(1 + __pa(empty_zero_page)));
481 #else
482 set_pgd(swapper_pg_dir+i, __pgd(0));
483 #endif
484 }
485 flush_tlb_all();
486 }
487
488 int nx_enabled;
489
490 pteval_t __supported_pte_mask __read_mostly = ~_PAGE_NX;
491 EXPORT_SYMBOL_GPL(__supported_pte_mask);
492
493 #ifdef CONFIG_X86_PAE
494
495 static int disable_nx __initdata;
496
497 /*
498 * noexec = on|off
499 *
500 * Control non executable mappings.
501 *
502 * on Enable
503 * off Disable
504 */
505 static int __init noexec_setup(char *str)
506 {
507 if (!str || !strcmp(str, "on")) {
508 if (cpu_has_nx) {
509 __supported_pte_mask |= _PAGE_NX;
510 disable_nx = 0;
511 }
512 } else {
513 if (!strcmp(str, "off")) {
514 disable_nx = 1;
515 __supported_pte_mask &= ~_PAGE_NX;
516 } else {
517 return -EINVAL;
518 }
519 }
520
521 return 0;
522 }
523 early_param("noexec", noexec_setup);
524
525 static void __init set_nx(void)
526 {
527 unsigned int v[4], l, h;
528
529 if (cpu_has_pae && (cpuid_eax(0x80000000) > 0x80000001)) {
530 cpuid(0x80000001, &v[0], &v[1], &v[2], &v[3]);
531
532 if ((v[3] & (1 << 20)) && !disable_nx) {
533 rdmsr(MSR_EFER, l, h);
534 l |= EFER_NX;
535 wrmsr(MSR_EFER, l, h);
536 nx_enabled = 1;
537 __supported_pte_mask |= _PAGE_NX;
538 }
539 }
540 }
541 #endif
542
543 /*
544 * paging_init() sets up the page tables - note that the first 8MB are
545 * already mapped by head.S.
546 *
547 * This routines also unmaps the page at virtual kernel address 0, so
548 * that we can trap those pesky NULL-reference errors in the kernel.
549 */
550 void __init paging_init(void)
551 {
552 #ifdef CONFIG_X86_PAE
553 set_nx();
554 if (nx_enabled)
555 printk(KERN_INFO "NX (Execute Disable) protection: active\n");
556 #endif
557 pagetable_init();
558
559 load_cr3(swapper_pg_dir);
560
561 __flush_tlb_all();
562
563 kmap_init();
564 }
565
566 /*
567 * Test if the WP bit works in supervisor mode. It isn't supported on 386's
568 * and also on some strange 486's. All 586+'s are OK. This used to involve
569 * black magic jumps to work around some nasty CPU bugs, but fortunately the
570 * switch to using exceptions got rid of all that.
571 */
572 static void __init test_wp_bit(void)
573 {
574 printk(KERN_INFO
575 "Checking if this processor honours the WP bit even in supervisor mode...");
576
577 /* Any page-aligned address will do, the test is non-destructive */
578 __set_fixmap(FIX_WP_TEST, __pa(&swapper_pg_dir), PAGE_READONLY);
579 boot_cpu_data.wp_works_ok = do_test_wp_bit();
580 clear_fixmap(FIX_WP_TEST);
581
582 if (!boot_cpu_data.wp_works_ok) {
583 printk(KERN_CONT "No.\n");
584 #ifdef CONFIG_X86_WP_WORKS_OK
585 panic(
586 "This kernel doesn't support CPU's with broken WP. Recompile it for a 386!");
587 #endif
588 } else {
589 printk(KERN_CONT "Ok.\n");
590 }
591 }
592
593 static struct kcore_list kcore_mem, kcore_vmalloc;
594
595 void __init mem_init(void)
596 {
597 int codesize, reservedpages, datasize, initsize;
598 int tmp;
599
600 #ifdef CONFIG_FLATMEM
601 BUG_ON(!mem_map);
602 #endif
603 /* this will put all low memory onto the freelists */
604 totalram_pages += free_all_bootmem();
605
606 reservedpages = 0;
607 for (tmp = 0; tmp < max_low_pfn; tmp++)
608 /*
609 * Only count reserved RAM pages:
610 */
611 if (page_is_ram(tmp) && PageReserved(pfn_to_page(tmp)))
612 reservedpages++;
613
614 set_highmem_pages_init();
615
616 codesize = (unsigned long) &_etext - (unsigned long) &_text;
617 datasize = (unsigned long) &_edata - (unsigned long) &_etext;
618 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
619
620 kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT);
621 kclist_add(&kcore_vmalloc, (void *)VMALLOC_START,
622 VMALLOC_END-VMALLOC_START);
623
624 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
625 "%dk reserved, %dk data, %dk init, %ldk highmem)\n",
626 (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
627 num_physpages << (PAGE_SHIFT-10),
628 codesize >> 10,
629 reservedpages << (PAGE_SHIFT-10),
630 datasize >> 10,
631 initsize >> 10,
632 (unsigned long) (totalhigh_pages << (PAGE_SHIFT-10))
633 );
634
635 printk(KERN_INFO "virtual kernel memory layout:\n"
636 " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n"
637 #ifdef CONFIG_HIGHMEM
638 " pkmap : 0x%08lx - 0x%08lx (%4ld kB)\n"
639 #endif
640 " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n"
641 " lowmem : 0x%08lx - 0x%08lx (%4ld MB)\n"
642 " .init : 0x%08lx - 0x%08lx (%4ld kB)\n"
643 " .data : 0x%08lx - 0x%08lx (%4ld kB)\n"
644 " .text : 0x%08lx - 0x%08lx (%4ld kB)\n",
645 FIXADDR_START, FIXADDR_TOP,
646 (FIXADDR_TOP - FIXADDR_START) >> 10,
647
648 #ifdef CONFIG_HIGHMEM
649 PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE,
650 (LAST_PKMAP*PAGE_SIZE) >> 10,
651 #endif
652
653 VMALLOC_START, VMALLOC_END,
654 (VMALLOC_END - VMALLOC_START) >> 20,
655
656 (unsigned long)__va(0), (unsigned long)high_memory,
657 ((unsigned long)high_memory - (unsigned long)__va(0)) >> 20,
658
659 (unsigned long)&__init_begin, (unsigned long)&__init_end,
660 ((unsigned long)&__init_end -
661 (unsigned long)&__init_begin) >> 10,
662
663 (unsigned long)&_etext, (unsigned long)&_edata,
664 ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
665
666 (unsigned long)&_text, (unsigned long)&_etext,
667 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
668
669 #ifdef CONFIG_HIGHMEM
670 BUG_ON(PKMAP_BASE + LAST_PKMAP*PAGE_SIZE > FIXADDR_START);
671 BUG_ON(VMALLOC_END > PKMAP_BASE);
672 #endif
673 BUG_ON(VMALLOC_START > VMALLOC_END);
674 BUG_ON((unsigned long)high_memory > VMALLOC_START);
675
676 if (boot_cpu_data.wp_works_ok < 0)
677 test_wp_bit();
678
679 cpa_init();
680 save_pg_dir();
681 zap_low_mappings();
682 }
683
684 #ifdef CONFIG_MEMORY_HOTPLUG
685 int arch_add_memory(int nid, u64 start, u64 size)
686 {
687 struct pglist_data *pgdata = NODE_DATA(nid);
688 struct zone *zone = pgdata->node_zones + ZONE_HIGHMEM;
689 unsigned long start_pfn = start >> PAGE_SHIFT;
690 unsigned long nr_pages = size >> PAGE_SHIFT;
691
692 return __add_pages(zone, start_pfn, nr_pages);
693 }
694 #endif
695
696 /*
697 * This function cannot be __init, since exceptions don't work in that
698 * section. Put this after the callers, so that it cannot be inlined.
699 */
700 static noinline int do_test_wp_bit(void)
701 {
702 char tmp_reg;
703 int flag;
704
705 __asm__ __volatile__(
706 " movb %0, %1 \n"
707 "1: movb %1, %0 \n"
708 " xorl %2, %2 \n"
709 "2: \n"
710 _ASM_EXTABLE(1b,2b)
711 :"=m" (*(char *)fix_to_virt(FIX_WP_TEST)),
712 "=q" (tmp_reg),
713 "=r" (flag)
714 :"2" (1)
715 :"memory");
716
717 return flag;
718 }
719
720 #ifdef CONFIG_DEBUG_RODATA
721 const int rodata_test_data = 0xC3;
722 EXPORT_SYMBOL_GPL(rodata_test_data);
723
724 void mark_rodata_ro(void)
725 {
726 unsigned long start = PFN_ALIGN(_text);
727 unsigned long size = PFN_ALIGN(_etext) - start;
728
729 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
730 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
731 size >> 10);
732
733 #ifdef CONFIG_CPA_DEBUG
734 printk(KERN_INFO "Testing CPA: Reverting %lx-%lx\n",
735 start, start+size);
736 set_pages_rw(virt_to_page(start), size>>PAGE_SHIFT);
737
738 printk(KERN_INFO "Testing CPA: write protecting again\n");
739 set_pages_ro(virt_to_page(start), size>>PAGE_SHIFT);
740 #endif
741 start += size;
742 size = (unsigned long)__end_rodata - start;
743 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
744 printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n",
745 size >> 10);
746 rodata_test();
747
748 #ifdef CONFIG_CPA_DEBUG
749 printk(KERN_INFO "Testing CPA: undo %lx-%lx\n", start, start + size);
750 set_pages_rw(virt_to_page(start), size >> PAGE_SHIFT);
751
752 printk(KERN_INFO "Testing CPA: write protecting again\n");
753 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
754 #endif
755 }
756 #endif
757
758 void free_init_pages(char *what, unsigned long begin, unsigned long end)
759 {
760 #ifdef CONFIG_DEBUG_PAGEALLOC
761 /*
762 * If debugging page accesses then do not free this memory but
763 * mark them not present - any buggy init-section access will
764 * create a kernel page fault:
765 */
766 printk(KERN_INFO "debug: unmapping init memory %08lx..%08lx\n",
767 begin, PAGE_ALIGN(end));
768 set_memory_np(begin, (end - begin) >> PAGE_SHIFT);
769 #else
770 unsigned long addr;
771
772 /*
773 * We just marked the kernel text read only above, now that
774 * we are going to free part of that, we need to make that
775 * writeable first.
776 */
777 set_memory_rw(begin, (end - begin) >> PAGE_SHIFT);
778
779 for (addr = begin; addr < end; addr += PAGE_SIZE) {
780 ClearPageReserved(virt_to_page(addr));
781 init_page_count(virt_to_page(addr));
782 memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
783 free_page(addr);
784 totalram_pages++;
785 }
786 printk(KERN_INFO "Freeing %s: %luk freed\n", what, (end - begin) >> 10);
787 #endif
788 }
789
790 void free_initmem(void)
791 {
792 free_init_pages("unused kernel memory",
793 (unsigned long)(&__init_begin),
794 (unsigned long)(&__init_end));
795 }
796
797 #ifdef CONFIG_BLK_DEV_INITRD
798 void free_initrd_mem(unsigned long start, unsigned long end)
799 {
800 free_init_pages("initrd memory", start, end);
801 }
802 #endif
803
804 int __init reserve_bootmem_generic(unsigned long phys, unsigned long len,
805 int flags)
806 {
807 return reserve_bootmem(phys, len, flags);
808 }