]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - arch/x86/kernel/head32.c
x86/process: Allow runtime control of Speculative Store Bypass
[mirror_ubuntu-artful-kernel.git] / arch / x86 / kernel / head32.c
1 /*
2 * linux/arch/i386/kernel/head32.c -- prepare to run common code
3 *
4 * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
5 * Copyright (C) 2007 Eric Biederman <ebiederm@xmission.com>
6 */
7
8 #include <linux/init.h>
9 #include <linux/start_kernel.h>
10 #include <linux/mm.h>
11 #include <linux/memblock.h>
12
13 #include <asm/setup.h>
14 #include <asm/sections.h>
15 #include <asm/e820/api.h>
16 #include <asm/page.h>
17 #include <asm/apic.h>
18 #include <asm/io_apic.h>
19 #include <asm/bios_ebda.h>
20 #include <asm/tlbflush.h>
21 #include <asm/bootparam_utils.h>
22
23 static void __init i386_default_early_setup(void)
24 {
25 /* Initialize 32bit specific setup functions */
26 x86_init.resources.reserve_resources = i386_reserve_resources;
27 x86_init.mpparse.setup_ioapic_ids = setup_ioapic_ids_from_mpc;
28 }
29
30 asmlinkage __visible void __init i386_start_kernel(void)
31 {
32 cr4_init_shadow();
33 sanitize_boot_params(&boot_params);
34
35 x86_early_init_platform_quirks();
36
37 /* Call the subarch specific early setup function */
38 switch (boot_params.hdr.hardware_subarch) {
39 case X86_SUBARCH_INTEL_MID:
40 x86_intel_mid_early_setup();
41 break;
42 case X86_SUBARCH_CE4100:
43 x86_ce4100_early_setup();
44 break;
45 default:
46 i386_default_early_setup();
47 break;
48 }
49
50 start_kernel();
51 }
52
53 /*
54 * Initialize page tables. This creates a PDE and a set of page
55 * tables, which are located immediately beyond __brk_base. The variable
56 * _brk_end is set up to point to the first "safe" location.
57 * Mappings are created both at virtual address 0 (identity mapping)
58 * and PAGE_OFFSET for up to _end.
59 *
60 * In PAE mode initial_page_table is statically defined to contain
61 * enough entries to cover the VMSPLIT option (that is the top 1, 2 or 3
62 * entries). The identity mapping is handled by pointing two PGD entries
63 * to the first kernel PMD. Note the upper half of each PMD or PTE are
64 * always zero at this stage.
65 */
66 void __init mk_early_pgtbl_32(void)
67 {
68 #ifdef __pa
69 #undef __pa
70 #endif
71 #define __pa(x) ((unsigned long)(x) - PAGE_OFFSET)
72 pte_t pte, *ptep;
73 int i;
74 unsigned long *ptr;
75 /* Enough space to fit pagetables for the low memory linear map */
76 const unsigned long limit = __pa(_end) +
77 (PAGE_TABLE_SIZE(LOWMEM_PAGES) << PAGE_SHIFT);
78 #ifdef CONFIG_X86_PAE
79 pmd_t pl2, *pl2p = (pmd_t *)__pa(initial_pg_pmd);
80 #define SET_PL2(pl2, val) { (pl2).pmd = (val); }
81 #else
82 pgd_t pl2, *pl2p = (pgd_t *)__pa(initial_page_table);
83 #define SET_PL2(pl2, val) { (pl2).pgd = (val); }
84 #endif
85
86 ptep = (pte_t *)__pa(__brk_base);
87 pte.pte = PTE_IDENT_ATTR;
88
89 while ((pte.pte & PTE_PFN_MASK) < limit) {
90
91 SET_PL2(pl2, (unsigned long)ptep | PDE_IDENT_ATTR);
92 *pl2p = pl2;
93 #ifndef CONFIG_X86_PAE
94 /* Kernel PDE entry */
95 *(pl2p + ((PAGE_OFFSET >> PGDIR_SHIFT))) = pl2;
96 #endif
97 for (i = 0; i < PTRS_PER_PTE; i++) {
98 *ptep = pte;
99 pte.pte += PAGE_SIZE;
100 ptep++;
101 }
102
103 pl2p++;
104 }
105
106 ptr = (unsigned long *)__pa(&max_pfn_mapped);
107 /* Can't use pte_pfn() since it's a call with CONFIG_PARAVIRT */
108 *ptr = (pte.pte & PTE_PFN_MASK) >> PAGE_SHIFT;
109
110 ptr = (unsigned long *)__pa(&_brk_end);
111 *ptr = (unsigned long)ptep + PAGE_OFFSET;
112 }
113