]>
Commit | Line | Data |
---|---|---|
0fa11d2c TG |
1 | // SPDX-License-Identifier: GPL-2.0 |
2 | ||
3 | #ifndef _ASM_X86_CPU_ENTRY_AREA_H | |
4 | #define _ASM_X86_CPU_ENTRY_AREA_H | |
5 | ||
6 | #include <linux/percpu-defs.h> | |
7 | #include <asm/processor.h> | |
8 | ||
9 | /* | |
10 | * cpu_entry_area is a percpu region that contains things needed by the CPU | |
11 | * and early entry/exit code. Real types aren't used for all fields here | |
12 | * to avoid circular header dependencies. | |
13 | * | |
14 | * Every field is a virtual alias of some other allocated backing store. | |
15 | * There is no direct allocation of a struct cpu_entry_area. | |
16 | */ | |
17 | struct cpu_entry_area { | |
18 | char gdt[PAGE_SIZE]; | |
19 | ||
20 | /* | |
21 | * The GDT is just below entry_stack and thus serves (on x86_64) as | |
22 | * a a read-only guard page. | |
23 | */ | |
24 | struct entry_stack_page entry_stack_page; | |
25 | ||
26 | /* | |
27 | * On x86_64, the TSS is mapped RO. On x86_32, it's mapped RW because | |
28 | * we need task switches to work, and task switches write to the TSS. | |
29 | */ | |
30 | struct tss_struct tss; | |
31 | ||
32 | char entry_trampoline[PAGE_SIZE]; | |
33 | ||
34 | #ifdef CONFIG_X86_64 | |
35 | /* | |
36 | * Exception stacks used for IST entries. | |
37 | * | |
38 | * In the future, this should have a separate slot for each stack | |
39 | * with guard pages between them. | |
40 | */ | |
41 | char exception_stacks[(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ]; | |
42 | #endif | |
43 | }; | |
44 | ||
45 | #define CPU_ENTRY_AREA_SIZE (sizeof(struct cpu_entry_area)) | |
bda9eb32 | 46 | #define CPU_ENTRY_AREA_TOT_SIZE (CPU_ENTRY_AREA_SIZE * NR_CPUS) |
0fa11d2c TG |
47 | |
48 | DECLARE_PER_CPU(struct cpu_entry_area *, cpu_entry_area); | |
49 | ||
50 | extern void setup_cpu_entry_areas(void); | |
bda9eb32 TG |
51 | extern void cea_set_pte(void *cea_vaddr, phys_addr_t pa, pgprot_t flags); |
52 | ||
53 | #define CPU_ENTRY_AREA_RO_IDT CPU_ENTRY_AREA_BASE | |
54 | #define CPU_ENTRY_AREA_PER_CPU (CPU_ENTRY_AREA_RO_IDT + PAGE_SIZE) | |
55 | ||
56 | #define CPU_ENTRY_AREA_RO_IDT_VADDR ((void *)CPU_ENTRY_AREA_RO_IDT) | |
57 | ||
58 | #define CPU_ENTRY_AREA_MAP_SIZE \ | |
59 | (CPU_ENTRY_AREA_PER_CPU + CPU_ENTRY_AREA_TOT_SIZE - CPU_ENTRY_AREA_BASE) | |
60 | ||
61 | extern struct cpu_entry_area *get_cpu_entry_area(int cpu); | |
62 | ||
63 | static inline struct entry_stack *cpu_entry_stack(int cpu) | |
64 | { | |
65 | return &get_cpu_entry_area(cpu)->entry_stack_page.stack; | |
66 | } | |
0fa11d2c TG |
67 | |
68 | #endif |