]>
Commit | Line | Data |
---|---|---|
1965aae3 PA |
1 | #ifndef _ASM_X86_MMU_CONTEXT_H |
2 | #define _ASM_X86_MMU_CONTEXT_H | |
c3c2fee3 JF |
3 | |
4 | #include <asm/desc.h> | |
60063497 | 5 | #include <linux/atomic.h> |
d17d8f9d DH |
6 | #include <linux/mm_types.h> |
7 | ||
8 | #include <trace/events/tlb.h> | |
9 | ||
c3c2fee3 JF |
10 | #include <asm/pgalloc.h> |
11 | #include <asm/tlbflush.h> | |
12 | #include <asm/paravirt.h> | |
fe3d197f | 13 | #include <asm/mpx.h> |
c3c2fee3 | 14 | #ifndef CONFIG_PARAVIRT |
c3c2fee3 JF |
15 | static inline void paravirt_activate_mm(struct mm_struct *prev, |
16 | struct mm_struct *next) | |
17 | { | |
18 | } | |
19 | #endif /* !CONFIG_PARAVIRT */ | |
20 | ||
7911d3f7 | 21 | #ifdef CONFIG_PERF_EVENTS |
a6673429 AL |
22 | extern struct static_key rdpmc_always_available; |
23 | ||
7911d3f7 AL |
24 | static inline void load_mm_cr4(struct mm_struct *mm) |
25 | { | |
a6673429 AL |
26 | if (static_key_true(&rdpmc_always_available) || |
27 | atomic_read(&mm->context.perf_rdpmc_allowed)) | |
7911d3f7 AL |
28 | cr4_set_bits(X86_CR4_PCE); |
29 | else | |
30 | cr4_clear_bits(X86_CR4_PCE); | |
31 | } | |
32 | #else | |
33 | static inline void load_mm_cr4(struct mm_struct *mm) {} | |
34 | #endif | |
35 | ||
c3c2fee3 JF |
36 | /* |
37 | * Used for LDT copy/destruction. | |
38 | */ | |
39 | int init_new_context(struct task_struct *tsk, struct mm_struct *mm); | |
40 | void destroy_context(struct mm_struct *mm); | |
41 | ||
6826c8ff BG |
42 | |
43 | static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) | |
44 | { | |
45 | #ifdef CONFIG_SMP | |
c6ae41e7 AS |
46 | if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK) |
47 | this_cpu_write(cpu_tlbstate.state, TLBSTATE_LAZY); | |
6826c8ff BG |
48 | #endif |
49 | } | |
50 | ||
51 | static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, | |
52 | struct task_struct *tsk) | |
53 | { | |
54 | unsigned cpu = smp_processor_id(); | |
55 | ||
56 | if (likely(prev != next)) { | |
6826c8ff | 57 | #ifdef CONFIG_SMP |
c6ae41e7 AS |
58 | this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK); |
59 | this_cpu_write(cpu_tlbstate.active_mm, next); | |
96a388de | 60 | #endif |
78f1c4d6 | 61 | cpumask_set_cpu(cpu, mm_cpumask(next)); |
6826c8ff BG |
62 | |
63 | /* Re-load page tables */ | |
64 | load_cr3(next->pgd); | |
d17d8f9d | 65 | trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL); |
6826c8ff | 66 | |
8f898fbb | 67 | /* Stop flush ipis for the previous mm */ |
831d52bc SS |
68 | cpumask_clear_cpu(cpu, mm_cpumask(prev)); |
69 | ||
7911d3f7 AL |
70 | /* Load per-mm CR4 state */ |
71 | load_mm_cr4(next); | |
72 | ||
c4a7bba2 AL |
73 | /* |
74 | * Load the LDT, if the LDT is different. | |
75 | * | |
22c4bd9f AL |
76 | * It's possible that prev->context.ldt doesn't match |
77 | * the LDT register. This can happen if leave_mm(prev) | |
78 | * was called and then modify_ldt changed | |
79 | * prev->context.ldt but suppressed an IPI to this CPU. | |
80 | * In this case, prev->context.ldt != NULL, because we | |
81 | * never free an LDT while the mm still exists. That | |
82 | * means that next->context.ldt != prev->context.ldt, | |
83 | * because mms never share an LDT. | |
c4a7bba2 | 84 | */ |
6826c8ff BG |
85 | if (unlikely(prev->context.ldt != next->context.ldt)) |
86 | load_LDT_nolock(&next->context); | |
87 | } | |
88 | #ifdef CONFIG_SMP | |
8f898fbb | 89 | else { |
c6ae41e7 AS |
90 | this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK); |
91 | BUG_ON(this_cpu_read(cpu_tlbstate.active_mm) != next); | |
6826c8ff | 92 | |
8f898fbb RR |
93 | if (!cpumask_test_cpu(cpu, mm_cpumask(next))) { |
94 | /* | |
95 | * On established mms, the mm_cpumask is only changed | |
96 | * from irq context, from ptep_clear_flush() while in | |
97 | * lazy tlb mode, and here. Irqs are blocked during | |
98 | * schedule, protecting us from simultaneous changes. | |
99 | */ | |
100 | cpumask_set_cpu(cpu, mm_cpumask(next)); | |
101 | /* | |
102 | * We were in lazy tlb mode and leave_mm disabled | |
6826c8ff BG |
103 | * tlb flush IPI delivery. We must reload CR3 |
104 | * to make sure to use no freed page tables. | |
105 | */ | |
106 | load_cr3(next->pgd); | |
d17d8f9d | 107 | trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL); |
7911d3f7 | 108 | load_mm_cr4(next); |
6826c8ff BG |
109 | load_LDT_nolock(&next->context); |
110 | } | |
111 | } | |
112 | #endif | |
113 | } | |
c3c2fee3 JF |
114 | |
115 | #define activate_mm(prev, next) \ | |
116 | do { \ | |
117 | paravirt_activate_mm((prev), (next)); \ | |
118 | switch_mm((prev), (next), NULL); \ | |
119 | } while (0); | |
120 | ||
6826c8ff BG |
121 | #ifdef CONFIG_X86_32 |
122 | #define deactivate_mm(tsk, mm) \ | |
123 | do { \ | |
ccbeed3a | 124 | lazy_load_gs(0); \ |
6826c8ff BG |
125 | } while (0) |
126 | #else | |
127 | #define deactivate_mm(tsk, mm) \ | |
128 | do { \ | |
129 | load_gs_index(0); \ | |
130 | loadsegment(fs, 0); \ | |
131 | } while (0) | |
132 | #endif | |
c3c2fee3 | 133 | |
a1ea1c03 DH |
134 | static inline void arch_dup_mmap(struct mm_struct *oldmm, |
135 | struct mm_struct *mm) | |
136 | { | |
137 | paravirt_arch_dup_mmap(oldmm, mm); | |
138 | } | |
139 | ||
140 | static inline void arch_exit_mmap(struct mm_struct *mm) | |
141 | { | |
142 | paravirt_arch_exit_mmap(mm); | |
143 | } | |
144 | ||
b0e9b09b DH |
145 | #ifdef CONFIG_X86_64 |
146 | static inline bool is_64bit_mm(struct mm_struct *mm) | |
147 | { | |
148 | return !config_enabled(CONFIG_IA32_EMULATION) || | |
149 | !(mm->context.ia32_compat == TIF_IA32); | |
150 | } | |
151 | #else | |
152 | static inline bool is_64bit_mm(struct mm_struct *mm) | |
153 | { | |
154 | return false; | |
155 | } | |
156 | #endif | |
157 | ||
fe3d197f DH |
158 | static inline void arch_bprm_mm_init(struct mm_struct *mm, |
159 | struct vm_area_struct *vma) | |
160 | { | |
161 | mpx_mm_init(mm); | |
162 | } | |
163 | ||
1de4fa14 DH |
164 | static inline void arch_unmap(struct mm_struct *mm, struct vm_area_struct *vma, |
165 | unsigned long start, unsigned long end) | |
166 | { | |
c922228e DH |
167 | /* |
168 | * mpx_notify_unmap() goes and reads a rarely-hot | |
169 | * cacheline in the mm_struct. That can be expensive | |
170 | * enough to be seen in profiles. | |
171 | * | |
172 | * The mpx_notify_unmap() call and its contents have been | |
173 | * observed to affect munmap() performance on hardware | |
174 | * where MPX is not present. | |
175 | * | |
176 | * The unlikely() optimizes for the fast case: no MPX | |
177 | * in the CPU, or no MPX use in the process. Even if | |
178 | * we get this wrong (in the unlikely event that MPX | |
179 | * is widely enabled on some system) the overhead of | |
180 | * MPX itself (reading bounds tables) is expected to | |
181 | * overwhelm the overhead of getting this unlikely() | |
182 | * consistently wrong. | |
183 | */ | |
184 | if (unlikely(cpu_feature_enabled(X86_FEATURE_MPX))) | |
185 | mpx_notify_unmap(mm, vma, start, end); | |
1de4fa14 DH |
186 | } |
187 | ||
1965aae3 | 188 | #endif /* _ASM_X86_MMU_CONTEXT_H */ |