]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blob - arch/i386/mm/init.c
Merge git://git.kernel.org/pub/scm/linux/kernel/git/paulus/powerpc
[mirror_ubuntu-hirsute-kernel.git] / arch / i386 / mm / init.c
1 /*
2 * linux/arch/i386/mm/init.c
3 *
4 * Copyright (C) 1995 Linus Torvalds
5 *
6 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
7 */
8
9 #include <linux/config.h>
10 #include <linux/module.h>
11 #include <linux/signal.h>
12 #include <linux/sched.h>
13 #include <linux/kernel.h>
14 #include <linux/errno.h>
15 #include <linux/string.h>
16 #include <linux/types.h>
17 #include <linux/ptrace.h>
18 #include <linux/mman.h>
19 #include <linux/mm.h>
20 #include <linux/hugetlb.h>
21 #include <linux/swap.h>
22 #include <linux/smp.h>
23 #include <linux/init.h>
24 #include <linux/highmem.h>
25 #include <linux/pagemap.h>
26 #include <linux/poison.h>
27 #include <linux/bootmem.h>
28 #include <linux/slab.h>
29 #include <linux/proc_fs.h>
30 #include <linux/efi.h>
31 #include <linux/memory_hotplug.h>
32 #include <linux/initrd.h>
33 #include <linux/cpumask.h>
34
35 #include <asm/processor.h>
36 #include <asm/system.h>
37 #include <asm/uaccess.h>
38 #include <asm/pgtable.h>
39 #include <asm/dma.h>
40 #include <asm/fixmap.h>
41 #include <asm/e820.h>
42 #include <asm/apic.h>
43 #include <asm/tlb.h>
44 #include <asm/tlbflush.h>
45 #include <asm/sections.h>
46
47 unsigned int __VMALLOC_RESERVE = 128 << 20;
48
49 DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
50 unsigned long highstart_pfn, highend_pfn;
51
52 static int noinline do_test_wp_bit(void);
53
54 /*
55 * Creates a middle page table and puts a pointer to it in the
56 * given global directory entry. This only returns the gd entry
57 * in non-PAE compilation mode, since the middle layer is folded.
58 */
59 static pmd_t * __init one_md_table_init(pgd_t *pgd)
60 {
61 pud_t *pud;
62 pmd_t *pmd_table;
63
64 #ifdef CONFIG_X86_PAE
65 pmd_table = (pmd_t *) alloc_bootmem_low_pages(PAGE_SIZE);
66 set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
67 pud = pud_offset(pgd, 0);
68 if (pmd_table != pmd_offset(pud, 0))
69 BUG();
70 #else
71 pud = pud_offset(pgd, 0);
72 pmd_table = pmd_offset(pud, 0);
73 #endif
74
75 return pmd_table;
76 }
77
78 /*
79 * Create a page table and place a pointer to it in a middle page
80 * directory entry.
81 */
82 static pte_t * __init one_page_table_init(pmd_t *pmd)
83 {
84 if (pmd_none(*pmd)) {
85 pte_t *page_table = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
86 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
87 if (page_table != pte_offset_kernel(pmd, 0))
88 BUG();
89
90 return page_table;
91 }
92
93 return pte_offset_kernel(pmd, 0);
94 }
95
96 /*
97 * This function initializes a certain range of kernel virtual memory
98 * with new bootmem page tables, everywhere page tables are missing in
99 * the given range.
100 */
101
102 /*
103 * NOTE: The pagetables are allocated contiguous on the physical space
104 * so we can cache the place of the first one and move around without
105 * checking the pgd every time.
106 */
107 static void __init page_table_range_init (unsigned long start, unsigned long end, pgd_t *pgd_base)
108 {
109 pgd_t *pgd;
110 pud_t *pud;
111 pmd_t *pmd;
112 int pgd_idx, pmd_idx;
113 unsigned long vaddr;
114
115 vaddr = start;
116 pgd_idx = pgd_index(vaddr);
117 pmd_idx = pmd_index(vaddr);
118 pgd = pgd_base + pgd_idx;
119
120 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
121 if (pgd_none(*pgd))
122 one_md_table_init(pgd);
123 pud = pud_offset(pgd, vaddr);
124 pmd = pmd_offset(pud, vaddr);
125 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end); pmd++, pmd_idx++) {
126 if (pmd_none(*pmd))
127 one_page_table_init(pmd);
128
129 vaddr += PMD_SIZE;
130 }
131 pmd_idx = 0;
132 }
133 }
134
135 static inline int is_kernel_text(unsigned long addr)
136 {
137 if (addr >= PAGE_OFFSET && addr <= (unsigned long)__init_end)
138 return 1;
139 return 0;
140 }
141
142 /*
143 * This maps the physical memory to kernel virtual address space, a total
144 * of max_low_pfn pages, by creating page tables starting from address
145 * PAGE_OFFSET.
146 */
147 static void __init kernel_physical_mapping_init(pgd_t *pgd_base)
148 {
149 unsigned long pfn;
150 pgd_t *pgd;
151 pmd_t *pmd;
152 pte_t *pte;
153 int pgd_idx, pmd_idx, pte_ofs;
154
155 pgd_idx = pgd_index(PAGE_OFFSET);
156 pgd = pgd_base + pgd_idx;
157 pfn = 0;
158
159 for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
160 pmd = one_md_table_init(pgd);
161 if (pfn >= max_low_pfn)
162 continue;
163 for (pmd_idx = 0; pmd_idx < PTRS_PER_PMD && pfn < max_low_pfn; pmd++, pmd_idx++) {
164 unsigned int address = pfn * PAGE_SIZE + PAGE_OFFSET;
165
166 /* Map with big pages if possible, otherwise create normal page tables. */
167 if (cpu_has_pse) {
168 unsigned int address2 = (pfn + PTRS_PER_PTE - 1) * PAGE_SIZE + PAGE_OFFSET + PAGE_SIZE-1;
169
170 if (is_kernel_text(address) || is_kernel_text(address2))
171 set_pmd(pmd, pfn_pmd(pfn, PAGE_KERNEL_LARGE_EXEC));
172 else
173 set_pmd(pmd, pfn_pmd(pfn, PAGE_KERNEL_LARGE));
174 pfn += PTRS_PER_PTE;
175 } else {
176 pte = one_page_table_init(pmd);
177
178 for (pte_ofs = 0; pte_ofs < PTRS_PER_PTE && pfn < max_low_pfn; pte++, pfn++, pte_ofs++) {
179 if (is_kernel_text(address))
180 set_pte(pte, pfn_pte(pfn, PAGE_KERNEL_EXEC));
181 else
182 set_pte(pte, pfn_pte(pfn, PAGE_KERNEL));
183 }
184 }
185 }
186 }
187 }
188
189 static inline int page_kills_ppro(unsigned long pagenr)
190 {
191 if (pagenr >= 0x70000 && pagenr <= 0x7003F)
192 return 1;
193 return 0;
194 }
195
196 extern int is_available_memory(efi_memory_desc_t *);
197
198 int page_is_ram(unsigned long pagenr)
199 {
200 int i;
201 unsigned long addr, end;
202
203 if (efi_enabled) {
204 efi_memory_desc_t *md;
205 void *p;
206
207 for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
208 md = p;
209 if (!is_available_memory(md))
210 continue;
211 addr = (md->phys_addr+PAGE_SIZE-1) >> PAGE_SHIFT;
212 end = (md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT)) >> PAGE_SHIFT;
213
214 if ((pagenr >= addr) && (pagenr < end))
215 return 1;
216 }
217 return 0;
218 }
219
220 for (i = 0; i < e820.nr_map; i++) {
221
222 if (e820.map[i].type != E820_RAM) /* not usable memory */
223 continue;
224 /*
225 * !!!FIXME!!! Some BIOSen report areas as RAM that
226 * are not. Notably the 640->1Mb area. We need a sanity
227 * check here.
228 */
229 addr = (e820.map[i].addr+PAGE_SIZE-1) >> PAGE_SHIFT;
230 end = (e820.map[i].addr+e820.map[i].size) >> PAGE_SHIFT;
231 if ((pagenr >= addr) && (pagenr < end))
232 return 1;
233 }
234 return 0;
235 }
236
237 #ifdef CONFIG_HIGHMEM
238 pte_t *kmap_pte;
239 pgprot_t kmap_prot;
240
241 #define kmap_get_fixmap_pte(vaddr) \
242 pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), vaddr), (vaddr)), (vaddr))
243
244 static void __init kmap_init(void)
245 {
246 unsigned long kmap_vstart;
247
248 /* cache the first kmap pte */
249 kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
250 kmap_pte = kmap_get_fixmap_pte(kmap_vstart);
251
252 kmap_prot = PAGE_KERNEL;
253 }
254
255 static void __init permanent_kmaps_init(pgd_t *pgd_base)
256 {
257 pgd_t *pgd;
258 pud_t *pud;
259 pmd_t *pmd;
260 pte_t *pte;
261 unsigned long vaddr;
262
263 vaddr = PKMAP_BASE;
264 page_table_range_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base);
265
266 pgd = swapper_pg_dir + pgd_index(vaddr);
267 pud = pud_offset(pgd, vaddr);
268 pmd = pmd_offset(pud, vaddr);
269 pte = pte_offset_kernel(pmd, vaddr);
270 pkmap_page_table = pte;
271 }
272
273 static void __meminit free_new_highpage(struct page *page)
274 {
275 init_page_count(page);
276 __free_page(page);
277 totalhigh_pages++;
278 }
279
280 void __init add_one_highpage_init(struct page *page, int pfn, int bad_ppro)
281 {
282 if (page_is_ram(pfn) && !(bad_ppro && page_kills_ppro(pfn))) {
283 ClearPageReserved(page);
284 free_new_highpage(page);
285 } else
286 SetPageReserved(page);
287 }
288
289 static int add_one_highpage_hotplug(struct page *page, unsigned long pfn)
290 {
291 free_new_highpage(page);
292 totalram_pages++;
293 #ifdef CONFIG_FLATMEM
294 max_mapnr = max(pfn, max_mapnr);
295 #endif
296 num_physpages++;
297 return 0;
298 }
299
300 /*
301 * Not currently handling the NUMA case.
302 * Assuming single node and all memory that
303 * has been added dynamically that would be
304 * onlined here is in HIGHMEM
305 */
306 void online_page(struct page *page)
307 {
308 ClearPageReserved(page);
309 add_one_highpage_hotplug(page, page_to_pfn(page));
310 }
311
312
313 #ifdef CONFIG_NUMA
314 extern void set_highmem_pages_init(int);
315 #else
316 static void __init set_highmem_pages_init(int bad_ppro)
317 {
318 int pfn;
319 for (pfn = highstart_pfn; pfn < highend_pfn; pfn++)
320 add_one_highpage_init(pfn_to_page(pfn), pfn, bad_ppro);
321 totalram_pages += totalhigh_pages;
322 }
323 #endif /* CONFIG_FLATMEM */
324
325 #else
326 #define kmap_init() do { } while (0)
327 #define permanent_kmaps_init(pgd_base) do { } while (0)
328 #define set_highmem_pages_init(bad_ppro) do { } while (0)
329 #endif /* CONFIG_HIGHMEM */
330
331 unsigned long long __PAGE_KERNEL = _PAGE_KERNEL;
332 EXPORT_SYMBOL(__PAGE_KERNEL);
333 unsigned long long __PAGE_KERNEL_EXEC = _PAGE_KERNEL_EXEC;
334
335 #ifdef CONFIG_NUMA
336 extern void __init remap_numa_kva(void);
337 #else
338 #define remap_numa_kva() do {} while (0)
339 #endif
340
341 static void __init pagetable_init (void)
342 {
343 unsigned long vaddr;
344 pgd_t *pgd_base = swapper_pg_dir;
345
346 #ifdef CONFIG_X86_PAE
347 int i;
348 /* Init entries of the first-level page table to the zero page */
349 for (i = 0; i < PTRS_PER_PGD; i++)
350 set_pgd(pgd_base + i, __pgd(__pa(empty_zero_page) | _PAGE_PRESENT));
351 #endif
352
353 /* Enable PSE if available */
354 if (cpu_has_pse) {
355 set_in_cr4(X86_CR4_PSE);
356 }
357
358 /* Enable PGE if available */
359 if (cpu_has_pge) {
360 set_in_cr4(X86_CR4_PGE);
361 __PAGE_KERNEL |= _PAGE_GLOBAL;
362 __PAGE_KERNEL_EXEC |= _PAGE_GLOBAL;
363 }
364
365 kernel_physical_mapping_init(pgd_base);
366 remap_numa_kva();
367
368 /*
369 * Fixed mappings, only the page table structure has to be
370 * created - mappings will be set by set_fixmap():
371 */
372 vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
373 page_table_range_init(vaddr, 0, pgd_base);
374
375 permanent_kmaps_init(pgd_base);
376
377 #ifdef CONFIG_X86_PAE
378 /*
379 * Add low memory identity-mappings - SMP needs it when
380 * starting up on an AP from real-mode. In the non-PAE
381 * case we already have these mappings through head.S.
382 * All user-space mappings are explicitly cleared after
383 * SMP startup.
384 */
385 set_pgd(&pgd_base[0], pgd_base[USER_PTRS_PER_PGD]);
386 #endif
387 }
388
389 #if defined(CONFIG_SOFTWARE_SUSPEND) || defined(CONFIG_ACPI_SLEEP)
390 /*
391 * Swap suspend & friends need this for resume because things like the intel-agp
392 * driver might have split up a kernel 4MB mapping.
393 */
394 char __nosavedata swsusp_pg_dir[PAGE_SIZE]
395 __attribute__ ((aligned (PAGE_SIZE)));
396
397 static inline void save_pg_dir(void)
398 {
399 memcpy(swsusp_pg_dir, swapper_pg_dir, PAGE_SIZE);
400 }
401 #else
402 static inline void save_pg_dir(void)
403 {
404 }
405 #endif
406
407 void zap_low_mappings (void)
408 {
409 int i;
410
411 save_pg_dir();
412
413 /*
414 * Zap initial low-memory mappings.
415 *
416 * Note that "pgd_clear()" doesn't do it for
417 * us, because pgd_clear() is a no-op on i386.
418 */
419 for (i = 0; i < USER_PTRS_PER_PGD; i++)
420 #ifdef CONFIG_X86_PAE
421 set_pgd(swapper_pg_dir+i, __pgd(1 + __pa(empty_zero_page)));
422 #else
423 set_pgd(swapper_pg_dir+i, __pgd(0));
424 #endif
425 flush_tlb_all();
426 }
427
428 static int disable_nx __initdata = 0;
429 u64 __supported_pte_mask __read_mostly = ~_PAGE_NX;
430
431 /*
432 * noexec = on|off
433 *
434 * Control non executable mappings.
435 *
436 * on Enable
437 * off Disable
438 */
439 void __init noexec_setup(const char *str)
440 {
441 if (!strncmp(str, "on",2) && cpu_has_nx) {
442 __supported_pte_mask |= _PAGE_NX;
443 disable_nx = 0;
444 } else if (!strncmp(str,"off",3)) {
445 disable_nx = 1;
446 __supported_pte_mask &= ~_PAGE_NX;
447 }
448 }
449
450 int nx_enabled = 0;
451 #ifdef CONFIG_X86_PAE
452
453 static void __init set_nx(void)
454 {
455 unsigned int v[4], l, h;
456
457 if (cpu_has_pae && (cpuid_eax(0x80000000) > 0x80000001)) {
458 cpuid(0x80000001, &v[0], &v[1], &v[2], &v[3]);
459 if ((v[3] & (1 << 20)) && !disable_nx) {
460 rdmsr(MSR_EFER, l, h);
461 l |= EFER_NX;
462 wrmsr(MSR_EFER, l, h);
463 nx_enabled = 1;
464 __supported_pte_mask |= _PAGE_NX;
465 }
466 }
467 }
468
469 /*
470 * Enables/disables executability of a given kernel page and
471 * returns the previous setting.
472 */
473 int __init set_kernel_exec(unsigned long vaddr, int enable)
474 {
475 pte_t *pte;
476 int ret = 1;
477
478 if (!nx_enabled)
479 goto out;
480
481 pte = lookup_address(vaddr);
482 BUG_ON(!pte);
483
484 if (!pte_exec_kernel(*pte))
485 ret = 0;
486
487 if (enable)
488 pte->pte_high &= ~(1 << (_PAGE_BIT_NX - 32));
489 else
490 pte->pte_high |= 1 << (_PAGE_BIT_NX - 32);
491 __flush_tlb_all();
492 out:
493 return ret;
494 }
495
496 #endif
497
498 /*
499 * paging_init() sets up the page tables - note that the first 8MB are
500 * already mapped by head.S.
501 *
502 * This routines also unmaps the page at virtual kernel address 0, so
503 * that we can trap those pesky NULL-reference errors in the kernel.
504 */
505 void __init paging_init(void)
506 {
507 #ifdef CONFIG_X86_PAE
508 set_nx();
509 if (nx_enabled)
510 printk("NX (Execute Disable) protection: active\n");
511 #endif
512
513 pagetable_init();
514
515 load_cr3(swapper_pg_dir);
516
517 #ifdef CONFIG_X86_PAE
518 /*
519 * We will bail out later - printk doesn't work right now so
520 * the user would just see a hanging kernel.
521 */
522 if (cpu_has_pae)
523 set_in_cr4(X86_CR4_PAE);
524 #endif
525 __flush_tlb_all();
526
527 kmap_init();
528 }
529
530 /*
531 * Test if the WP bit works in supervisor mode. It isn't supported on 386's
532 * and also on some strange 486's (NexGen etc.). All 586+'s are OK. This
533 * used to involve black magic jumps to work around some nasty CPU bugs,
534 * but fortunately the switch to using exceptions got rid of all that.
535 */
536
537 static void __init test_wp_bit(void)
538 {
539 printk("Checking if this processor honours the WP bit even in supervisor mode... ");
540
541 /* Any page-aligned address will do, the test is non-destructive */
542 __set_fixmap(FIX_WP_TEST, __pa(&swapper_pg_dir), PAGE_READONLY);
543 boot_cpu_data.wp_works_ok = do_test_wp_bit();
544 clear_fixmap(FIX_WP_TEST);
545
546 if (!boot_cpu_data.wp_works_ok) {
547 printk("No.\n");
548 #ifdef CONFIG_X86_WP_WORKS_OK
549 panic("This kernel doesn't support CPU's with broken WP. Recompile it for a 386!");
550 #endif
551 } else {
552 printk("Ok.\n");
553 }
554 }
555
556 static void __init set_max_mapnr_init(void)
557 {
558 #ifdef CONFIG_HIGHMEM
559 num_physpages = highend_pfn;
560 #else
561 num_physpages = max_low_pfn;
562 #endif
563 #ifdef CONFIG_FLATMEM
564 max_mapnr = num_physpages;
565 #endif
566 }
567
568 static struct kcore_list kcore_mem, kcore_vmalloc;
569
570 void __init mem_init(void)
571 {
572 extern int ppro_with_ram_bug(void);
573 int codesize, reservedpages, datasize, initsize;
574 int tmp;
575 int bad_ppro;
576
577 #ifdef CONFIG_FLATMEM
578 if (!mem_map)
579 BUG();
580 #endif
581
582 bad_ppro = ppro_with_ram_bug();
583
584 #ifdef CONFIG_HIGHMEM
585 /* check that fixmap and pkmap do not overlap */
586 if (PKMAP_BASE+LAST_PKMAP*PAGE_SIZE >= FIXADDR_START) {
587 printk(KERN_ERR "fixmap and kmap areas overlap - this will crash\n");
588 printk(KERN_ERR "pkstart: %lxh pkend: %lxh fixstart %lxh\n",
589 PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE, FIXADDR_START);
590 BUG();
591 }
592 #endif
593
594 set_max_mapnr_init();
595
596 #ifdef CONFIG_HIGHMEM
597 high_memory = (void *) __va(highstart_pfn * PAGE_SIZE - 1) + 1;
598 #else
599 high_memory = (void *) __va(max_low_pfn * PAGE_SIZE - 1) + 1;
600 #endif
601
602 /* this will put all low memory onto the freelists */
603 totalram_pages += free_all_bootmem();
604
605 reservedpages = 0;
606 for (tmp = 0; tmp < max_low_pfn; tmp++)
607 /*
608 * Only count reserved RAM pages
609 */
610 if (page_is_ram(tmp) && PageReserved(pfn_to_page(tmp)))
611 reservedpages++;
612
613 set_highmem_pages_init(bad_ppro);
614
615 codesize = (unsigned long) &_etext - (unsigned long) &_text;
616 datasize = (unsigned long) &_edata - (unsigned long) &_etext;
617 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
618
619 kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT);
620 kclist_add(&kcore_vmalloc, (void *)VMALLOC_START,
621 VMALLOC_END-VMALLOC_START);
622
623 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data, %dk init, %ldk highmem)\n",
624 (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
625 num_physpages << (PAGE_SHIFT-10),
626 codesize >> 10,
627 reservedpages << (PAGE_SHIFT-10),
628 datasize >> 10,
629 initsize >> 10,
630 (unsigned long) (totalhigh_pages << (PAGE_SHIFT-10))
631 );
632
633 #ifdef CONFIG_X86_PAE
634 if (!cpu_has_pae)
635 panic("cannot execute a PAE-enabled kernel on a PAE-less CPU!");
636 #endif
637 if (boot_cpu_data.wp_works_ok < 0)
638 test_wp_bit();
639
640 /*
641 * Subtle. SMP is doing it's boot stuff late (because it has to
642 * fork idle threads) - but it also needs low mappings for the
643 * protected-mode entry to work. We zap these entries only after
644 * the WP-bit has been tested.
645 */
646 #ifndef CONFIG_SMP
647 zap_low_mappings();
648 #endif
649 }
650
651 /*
652 * this is for the non-NUMA, single node SMP system case.
653 * Specifically, in the case of x86, we will always add
654 * memory to the highmem for now.
655 */
656 #ifdef CONFIG_MEMORY_HOTPLUG
657 #ifndef CONFIG_NEED_MULTIPLE_NODES
658 int arch_add_memory(int nid, u64 start, u64 size)
659 {
660 struct pglist_data *pgdata = &contig_page_data;
661 struct zone *zone = pgdata->node_zones + MAX_NR_ZONES-1;
662 unsigned long start_pfn = start >> PAGE_SHIFT;
663 unsigned long nr_pages = size >> PAGE_SHIFT;
664
665 return __add_pages(zone, start_pfn, nr_pages);
666 }
667
668 int remove_memory(u64 start, u64 size)
669 {
670 return -EINVAL;
671 }
672 #endif
673 #endif
674
675 kmem_cache_t *pgd_cache;
676 kmem_cache_t *pmd_cache;
677
678 void __init pgtable_cache_init(void)
679 {
680 if (PTRS_PER_PMD > 1) {
681 pmd_cache = kmem_cache_create("pmd",
682 PTRS_PER_PMD*sizeof(pmd_t),
683 PTRS_PER_PMD*sizeof(pmd_t),
684 0,
685 pmd_ctor,
686 NULL);
687 if (!pmd_cache)
688 panic("pgtable_cache_init(): cannot create pmd cache");
689 }
690 pgd_cache = kmem_cache_create("pgd",
691 PTRS_PER_PGD*sizeof(pgd_t),
692 PTRS_PER_PGD*sizeof(pgd_t),
693 0,
694 pgd_ctor,
695 PTRS_PER_PMD == 1 ? pgd_dtor : NULL);
696 if (!pgd_cache)
697 panic("pgtable_cache_init(): Cannot create pgd cache");
698 }
699
700 /*
701 * This function cannot be __init, since exceptions don't work in that
702 * section. Put this after the callers, so that it cannot be inlined.
703 */
704 static int noinline do_test_wp_bit(void)
705 {
706 char tmp_reg;
707 int flag;
708
709 __asm__ __volatile__(
710 " movb %0,%1 \n"
711 "1: movb %1,%0 \n"
712 " xorl %2,%2 \n"
713 "2: \n"
714 ".section __ex_table,\"a\"\n"
715 " .align 4 \n"
716 " .long 1b,2b \n"
717 ".previous \n"
718 :"=m" (*(char *)fix_to_virt(FIX_WP_TEST)),
719 "=q" (tmp_reg),
720 "=r" (flag)
721 :"2" (1)
722 :"memory");
723
724 return flag;
725 }
726
727 #ifdef CONFIG_DEBUG_RODATA
728
729 extern char __start_rodata, __end_rodata;
730 void mark_rodata_ro(void)
731 {
732 unsigned long addr = (unsigned long)&__start_rodata;
733
734 for (; addr < (unsigned long)&__end_rodata; addr += PAGE_SIZE)
735 change_page_attr(virt_to_page(addr), 1, PAGE_KERNEL_RO);
736
737 printk ("Write protecting the kernel read-only data: %luk\n",
738 (unsigned long)(&__end_rodata - &__start_rodata) >> 10);
739
740 /*
741 * change_page_attr() requires a global_flush_tlb() call after it.
742 * We do this after the printk so that if something went wrong in the
743 * change, the printk gets out at least to give a better debug hint
744 * of who is the culprit.
745 */
746 global_flush_tlb();
747 }
748 #endif
749
750 void free_init_pages(char *what, unsigned long begin, unsigned long end)
751 {
752 unsigned long addr;
753
754 for (addr = begin; addr < end; addr += PAGE_SIZE) {
755 ClearPageReserved(virt_to_page(addr));
756 init_page_count(virt_to_page(addr));
757 memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
758 free_page(addr);
759 totalram_pages++;
760 }
761 printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10);
762 }
763
764 void free_initmem(void)
765 {
766 free_init_pages("unused kernel memory",
767 (unsigned long)(&__init_begin),
768 (unsigned long)(&__init_end));
769 }
770
771 #ifdef CONFIG_BLK_DEV_INITRD
772 void free_initrd_mem(unsigned long start, unsigned long end)
773 {
774 free_init_pages("initrd memory", start, end);
775 }
776 #endif
777