]> git.proxmox.com Git - mirror_ubuntu-disco-kernel.git/blame - arch/x86/mm/cpu_entry_area.c
x86/cpu_entry_area: Move it out of the fixmap
[mirror_ubuntu-disco-kernel.git] / arch / x86 / mm / cpu_entry_area.c
CommitLineData
ed1bbc40
TG
1// SPDX-License-Identifier: GPL-2.0
2
3#include <linux/spinlock.h>
4#include <linux/percpu.h>
5
6#include <asm/cpu_entry_area.h>
7#include <asm/pgtable.h>
8#include <asm/fixmap.h>
9#include <asm/desc.h>
10
11static DEFINE_PER_CPU_PAGE_ALIGNED(struct entry_stack_page, entry_stack_storage);
12
13#ifdef CONFIG_X86_64
14static DEFINE_PER_CPU_PAGE_ALIGNED(char, exception_stacks
15 [(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ]);
16#endif
17
92a0f81d
TG
18struct cpu_entry_area *get_cpu_entry_area(int cpu)
19{
20 unsigned long va = CPU_ENTRY_AREA_PER_CPU + cpu * CPU_ENTRY_AREA_SIZE;
21 BUILD_BUG_ON(sizeof(struct cpu_entry_area) % PAGE_SIZE != 0);
22
23 return (struct cpu_entry_area *) va;
24}
25EXPORT_SYMBOL(get_cpu_entry_area);
26
27void cea_set_pte(void *cea_vaddr, phys_addr_t pa, pgprot_t flags)
28{
29 unsigned long va = (unsigned long) cea_vaddr;
30
31 set_pte_vaddr(va, pfn_pte(pa >> PAGE_SHIFT, flags));
32}
33
ed1bbc40 34static void __init
92a0f81d 35cea_map_percpu_pages(void *cea_vaddr, void *ptr, int pages, pgprot_t prot)
ed1bbc40 36{
92a0f81d
TG
37 for ( ; pages; pages--, cea_vaddr+= PAGE_SIZE, ptr += PAGE_SIZE)
38 cea_set_pte(cea_vaddr, per_cpu_ptr_to_phys(ptr), prot);
ed1bbc40
TG
39}
40
41/* Setup the fixmap mappings only once per-processor */
42static void __init setup_cpu_entry_area(int cpu)
43{
44#ifdef CONFIG_X86_64
45 extern char _entry_trampoline[];
46
47 /* On 64-bit systems, we use a read-only fixmap GDT and TSS. */
48 pgprot_t gdt_prot = PAGE_KERNEL_RO;
49 pgprot_t tss_prot = PAGE_KERNEL_RO;
50#else
51 /*
52 * On native 32-bit systems, the GDT cannot be read-only because
53 * our double fault handler uses a task gate, and entering through
54 * a task gate needs to change an available TSS to busy. If the
55 * GDT is read-only, that will triple fault. The TSS cannot be
56 * read-only because the CPU writes to it on task switches.
57 *
58 * On Xen PV, the GDT must be read-only because the hypervisor
59 * requires it.
60 */
61 pgprot_t gdt_prot = boot_cpu_has(X86_FEATURE_XENPV) ?
62 PAGE_KERNEL_RO : PAGE_KERNEL;
63 pgprot_t tss_prot = PAGE_KERNEL;
64#endif
65
92a0f81d
TG
66 cea_set_pte(&get_cpu_entry_area(cpu)->gdt, get_cpu_gdt_paddr(cpu),
67 gdt_prot);
68
69 cea_map_percpu_pages(&get_cpu_entry_area(cpu)->entry_stack_page,
70 per_cpu_ptr(&entry_stack_storage, cpu), 1,
71 PAGE_KERNEL);
ed1bbc40
TG
72
73 /*
74 * The Intel SDM says (Volume 3, 7.2.1):
75 *
76 * Avoid placing a page boundary in the part of the TSS that the
77 * processor reads during a task switch (the first 104 bytes). The
78 * processor may not correctly perform address translations if a
79 * boundary occurs in this area. During a task switch, the processor
80 * reads and writes into the first 104 bytes of each TSS (using
81 * contiguous physical addresses beginning with the physical address
82 * of the first byte of the TSS). So, after TSS access begins, if
83 * part of the 104 bytes is not physically contiguous, the processor
84 * will access incorrect information without generating a page-fault
85 * exception.
86 *
87 * There are also a lot of errata involving the TSS spanning a page
88 * boundary. Assert that we're not doing that.
89 */
90 BUILD_BUG_ON((offsetof(struct tss_struct, x86_tss) ^
91 offsetofend(struct tss_struct, x86_tss)) & PAGE_MASK);
92 BUILD_BUG_ON(sizeof(struct tss_struct) % PAGE_SIZE != 0);
92a0f81d
TG
93 cea_map_percpu_pages(&get_cpu_entry_area(cpu)->tss,
94 &per_cpu(cpu_tss_rw, cpu),
95 sizeof(struct tss_struct) / PAGE_SIZE, tss_prot);
ed1bbc40
TG
96
97#ifdef CONFIG_X86_32
98 per_cpu(cpu_entry_area, cpu) = get_cpu_entry_area(cpu);
99#endif
100
101#ifdef CONFIG_X86_64
102 BUILD_BUG_ON(sizeof(exception_stacks) % PAGE_SIZE != 0);
103 BUILD_BUG_ON(sizeof(exception_stacks) !=
104 sizeof(((struct cpu_entry_area *)0)->exception_stacks));
92a0f81d
TG
105 cea_map_percpu_pages(&get_cpu_entry_area(cpu)->exception_stacks,
106 &per_cpu(exception_stacks, cpu),
107 sizeof(exception_stacks) / PAGE_SIZE, PAGE_KERNEL);
ed1bbc40 108
92a0f81d 109 cea_set_pte(&get_cpu_entry_area(cpu)->entry_trampoline,
ed1bbc40
TG
110 __pa_symbol(_entry_trampoline), PAGE_KERNEL_RX);
111#endif
112}
113
92a0f81d
TG
114static __init void setup_cpu_entry_area_ptes(void)
115{
116#ifdef CONFIG_X86_32
117 unsigned long start, end;
118
119 BUILD_BUG_ON(CPU_ENTRY_AREA_PAGES * PAGE_SIZE < CPU_ENTRY_AREA_MAP_SIZE);
120 BUG_ON(CPU_ENTRY_AREA_BASE & ~PMD_MASK);
121
122 start = CPU_ENTRY_AREA_BASE;
123 end = start + CPU_ENTRY_AREA_MAP_SIZE;
124
125 for (; start < end; start += PMD_SIZE)
126 populate_extra_pte(start);
127#endif
128}
129
ed1bbc40
TG
130void __init setup_cpu_entry_areas(void)
131{
132 unsigned int cpu;
133
92a0f81d
TG
134 setup_cpu_entry_area_ptes();
135
ed1bbc40
TG
136 for_each_possible_cpu(cpu)
137 setup_cpu_entry_area(cpu);
138}