]>
Commit | Line | Data |
---|---|---|
1965aae3 PA |
1 | #ifndef _ASM_X86_MMU_CONTEXT_H |
2 | #define _ASM_X86_MMU_CONTEXT_H | |
c3c2fee3 JF |
3 | |
4 | #include <asm/desc.h> | |
60063497 | 5 | #include <linux/atomic.h> |
d17d8f9d DH |
6 | #include <linux/mm_types.h> |
7 | ||
8 | #include <trace/events/tlb.h> | |
9 | ||
c3c2fee3 JF |
10 | #include <asm/pgalloc.h> |
11 | #include <asm/tlbflush.h> | |
12 | #include <asm/paravirt.h> | |
fe3d197f | 13 | #include <asm/mpx.h> |
c3c2fee3 | 14 | #ifndef CONFIG_PARAVIRT |
c3c2fee3 JF |
15 | static inline void paravirt_activate_mm(struct mm_struct *prev, |
16 | struct mm_struct *next) | |
17 | { | |
18 | } | |
19 | #endif /* !CONFIG_PARAVIRT */ | |
20 | ||
7911d3f7 | 21 | #ifdef CONFIG_PERF_EVENTS |
a6673429 AL |
22 | extern struct static_key rdpmc_always_available; |
23 | ||
7911d3f7 AL |
24 | static inline void load_mm_cr4(struct mm_struct *mm) |
25 | { | |
a833581e | 26 | if (static_key_false(&rdpmc_always_available) || |
a6673429 | 27 | atomic_read(&mm->context.perf_rdpmc_allowed)) |
7911d3f7 AL |
28 | cr4_set_bits(X86_CR4_PCE); |
29 | else | |
30 | cr4_clear_bits(X86_CR4_PCE); | |
31 | } | |
32 | #else | |
33 | static inline void load_mm_cr4(struct mm_struct *mm) {} | |
34 | #endif | |
35 | ||
a5b9e5a2 | 36 | #ifdef CONFIG_MODIFY_LDT_SYSCALL |
37868fe1 AL |
37 | /* |
38 | * ldt_structs can be allocated, used, and freed, but they are never | |
39 | * modified while live. | |
40 | */ | |
41 | struct ldt_struct { | |
42 | /* | |
43 | * Xen requires page-aligned LDTs with special permissions. This is | |
44 | * needed to prevent us from installing evil descriptors such as | |
45 | * call gates. On native, we could merge the ldt_struct and LDT | |
46 | * allocations, but it's not worth trying to optimize. | |
47 | */ | |
48 | struct desc_struct *entries; | |
49 | int size; | |
50 | }; | |
51 | ||
a5b9e5a2 AL |
52 | /* |
53 | * Used for LDT copy/destruction. | |
54 | */ | |
55 | int init_new_context(struct task_struct *tsk, struct mm_struct *mm); | |
56 | void destroy_context(struct mm_struct *mm); | |
57 | #else /* CONFIG_MODIFY_LDT_SYSCALL */ | |
58 | static inline int init_new_context(struct task_struct *tsk, | |
59 | struct mm_struct *mm) | |
60 | { | |
61 | return 0; | |
62 | } | |
63 | static inline void destroy_context(struct mm_struct *mm) {} | |
64 | #endif | |
65 | ||
37868fe1 AL |
66 | static inline void load_mm_ldt(struct mm_struct *mm) |
67 | { | |
a5b9e5a2 | 68 | #ifdef CONFIG_MODIFY_LDT_SYSCALL |
37868fe1 AL |
69 | struct ldt_struct *ldt; |
70 | ||
71 | /* lockless_dereference synchronizes with smp_store_release */ | |
72 | ldt = lockless_dereference(mm->context.ldt); | |
73 | ||
74 | /* | |
75 | * Any change to mm->context.ldt is followed by an IPI to all | |
76 | * CPUs with the mm active. The LDT will not be freed until | |
77 | * after the IPI is handled by all such CPUs. This means that, | |
78 | * if the ldt_struct changes before we return, the values we see | |
79 | * will be safe, and the new values will be loaded before we run | |
80 | * any user code. | |
81 | * | |
82 | * NB: don't try to convert this to use RCU without extreme care. | |
83 | * We would still need IRQs off, because we don't want to change | |
84 | * the local LDT after an IPI loaded a newer value than the one | |
85 | * that we can see. | |
86 | */ | |
87 | ||
88 | if (unlikely(ldt)) | |
89 | set_ldt(ldt->entries, ldt->size); | |
90 | else | |
91 | clear_LDT(); | |
a5b9e5a2 AL |
92 | #else |
93 | clear_LDT(); | |
94 | #endif | |
37868fe1 AL |
95 | |
96 | DEBUG_LOCKS_WARN_ON(preemptible()); | |
97 | } | |
98 | ||
6826c8ff BG |
99 | static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) |
100 | { | |
101 | #ifdef CONFIG_SMP | |
c6ae41e7 AS |
102 | if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK) |
103 | this_cpu_write(cpu_tlbstate.state, TLBSTATE_LAZY); | |
6826c8ff BG |
104 | #endif |
105 | } | |
106 | ||
107 | static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, | |
108 | struct task_struct *tsk) | |
109 | { | |
110 | unsigned cpu = smp_processor_id(); | |
111 | ||
112 | if (likely(prev != next)) { | |
6826c8ff | 113 | #ifdef CONFIG_SMP |
c6ae41e7 AS |
114 | this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK); |
115 | this_cpu_write(cpu_tlbstate.active_mm, next); | |
96a388de | 116 | #endif |
78f1c4d6 | 117 | cpumask_set_cpu(cpu, mm_cpumask(next)); |
6826c8ff | 118 | |
71b3c126 AL |
119 | /* |
120 | * Re-load page tables. | |
121 | * | |
122 | * This logic has an ordering constraint: | |
123 | * | |
124 | * CPU 0: Write to a PTE for 'next' | |
125 | * CPU 0: load bit 1 in mm_cpumask. if nonzero, send IPI. | |
126 | * CPU 1: set bit 1 in next's mm_cpumask | |
127 | * CPU 1: load from the PTE that CPU 0 writes (implicit) | |
128 | * | |
129 | * We need to prevent an outcome in which CPU 1 observes | |
130 | * the new PTE value and CPU 0 observes bit 1 clear in | |
131 | * mm_cpumask. (If that occurs, then the IPI will never | |
132 | * be sent, and CPU 0's TLB will contain a stale entry.) | |
133 | * | |
134 | * The bad outcome can occur if either CPU's load is | |
4eaffdd5 | 135 | * reordered before that CPU's store, so both CPUs must |
71b3c126 AL |
136 | * execute full barriers to prevent this from happening. |
137 | * | |
138 | * Thus, switch_mm needs a full barrier between the | |
139 | * store to mm_cpumask and any operation that could load | |
4eaffdd5 AL |
140 | * from next->pgd. TLB fills are special and can happen |
141 | * due to instruction fetches or for no reason at all, | |
142 | * and neither LOCK nor MFENCE orders them. | |
143 | * Fortunately, load_cr3() is serializing and gives the | |
144 | * ordering guarantee we need. | |
71b3c126 AL |
145 | * |
146 | */ | |
6826c8ff | 147 | load_cr3(next->pgd); |
71b3c126 | 148 | |
d17d8f9d | 149 | trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL); |
6826c8ff | 150 | |
8f898fbb | 151 | /* Stop flush ipis for the previous mm */ |
831d52bc SS |
152 | cpumask_clear_cpu(cpu, mm_cpumask(prev)); |
153 | ||
7911d3f7 AL |
154 | /* Load per-mm CR4 state */ |
155 | load_mm_cr4(next); | |
156 | ||
a5b9e5a2 | 157 | #ifdef CONFIG_MODIFY_LDT_SYSCALL |
c4a7bba2 AL |
158 | /* |
159 | * Load the LDT, if the LDT is different. | |
160 | * | |
22c4bd9f AL |
161 | * It's possible that prev->context.ldt doesn't match |
162 | * the LDT register. This can happen if leave_mm(prev) | |
163 | * was called and then modify_ldt changed | |
164 | * prev->context.ldt but suppressed an IPI to this CPU. | |
165 | * In this case, prev->context.ldt != NULL, because we | |
37868fe1 AL |
166 | * never set context.ldt to NULL while the mm still |
167 | * exists. That means that next->context.ldt != | |
168 | * prev->context.ldt, because mms never share an LDT. | |
c4a7bba2 | 169 | */ |
6826c8ff | 170 | if (unlikely(prev->context.ldt != next->context.ldt)) |
37868fe1 | 171 | load_mm_ldt(next); |
a5b9e5a2 | 172 | #endif |
6826c8ff BG |
173 | } |
174 | #ifdef CONFIG_SMP | |
8f898fbb | 175 | else { |
c6ae41e7 AS |
176 | this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK); |
177 | BUG_ON(this_cpu_read(cpu_tlbstate.active_mm) != next); | |
6826c8ff | 178 | |
8f898fbb RR |
179 | if (!cpumask_test_cpu(cpu, mm_cpumask(next))) { |
180 | /* | |
181 | * On established mms, the mm_cpumask is only changed | |
182 | * from irq context, from ptep_clear_flush() while in | |
183 | * lazy tlb mode, and here. Irqs are blocked during | |
184 | * schedule, protecting us from simultaneous changes. | |
185 | */ | |
186 | cpumask_set_cpu(cpu, mm_cpumask(next)); | |
71b3c126 | 187 | |
8f898fbb RR |
188 | /* |
189 | * We were in lazy tlb mode and leave_mm disabled | |
6826c8ff BG |
190 | * tlb flush IPI delivery. We must reload CR3 |
191 | * to make sure to use no freed page tables. | |
71b3c126 | 192 | * |
4eaffdd5 AL |
193 | * As above, load_cr3() is serializing and orders TLB |
194 | * fills with respect to the mm_cpumask write. | |
6826c8ff BG |
195 | */ |
196 | load_cr3(next->pgd); | |
d17d8f9d | 197 | trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL); |
7911d3f7 | 198 | load_mm_cr4(next); |
37868fe1 | 199 | load_mm_ldt(next); |
6826c8ff BG |
200 | } |
201 | } | |
202 | #endif | |
203 | } | |
c3c2fee3 JF |
204 | |
205 | #define activate_mm(prev, next) \ | |
206 | do { \ | |
207 | paravirt_activate_mm((prev), (next)); \ | |
208 | switch_mm((prev), (next), NULL); \ | |
209 | } while (0); | |
210 | ||
6826c8ff BG |
211 | #ifdef CONFIG_X86_32 |
212 | #define deactivate_mm(tsk, mm) \ | |
213 | do { \ | |
ccbeed3a | 214 | lazy_load_gs(0); \ |
6826c8ff BG |
215 | } while (0) |
216 | #else | |
217 | #define deactivate_mm(tsk, mm) \ | |
218 | do { \ | |
219 | load_gs_index(0); \ | |
220 | loadsegment(fs, 0); \ | |
221 | } while (0) | |
222 | #endif | |
c3c2fee3 | 223 | |
a1ea1c03 DH |
224 | static inline void arch_dup_mmap(struct mm_struct *oldmm, |
225 | struct mm_struct *mm) | |
226 | { | |
227 | paravirt_arch_dup_mmap(oldmm, mm); | |
228 | } | |
229 | ||
230 | static inline void arch_exit_mmap(struct mm_struct *mm) | |
231 | { | |
232 | paravirt_arch_exit_mmap(mm); | |
233 | } | |
234 | ||
b0e9b09b DH |
235 | #ifdef CONFIG_X86_64 |
236 | static inline bool is_64bit_mm(struct mm_struct *mm) | |
237 | { | |
238 | return !config_enabled(CONFIG_IA32_EMULATION) || | |
239 | !(mm->context.ia32_compat == TIF_IA32); | |
240 | } | |
241 | #else | |
242 | static inline bool is_64bit_mm(struct mm_struct *mm) | |
243 | { | |
244 | return false; | |
245 | } | |
246 | #endif | |
247 | ||
fe3d197f DH |
248 | static inline void arch_bprm_mm_init(struct mm_struct *mm, |
249 | struct vm_area_struct *vma) | |
250 | { | |
251 | mpx_mm_init(mm); | |
252 | } | |
253 | ||
1de4fa14 DH |
254 | static inline void arch_unmap(struct mm_struct *mm, struct vm_area_struct *vma, |
255 | unsigned long start, unsigned long end) | |
256 | { | |
c922228e DH |
257 | /* |
258 | * mpx_notify_unmap() goes and reads a rarely-hot | |
259 | * cacheline in the mm_struct. That can be expensive | |
260 | * enough to be seen in profiles. | |
261 | * | |
262 | * The mpx_notify_unmap() call and its contents have been | |
263 | * observed to affect munmap() performance on hardware | |
264 | * where MPX is not present. | |
265 | * | |
266 | * The unlikely() optimizes for the fast case: no MPX | |
267 | * in the CPU, or no MPX use in the process. Even if | |
268 | * we get this wrong (in the unlikely event that MPX | |
269 | * is widely enabled on some system) the overhead of | |
270 | * MPX itself (reading bounds tables) is expected to | |
271 | * overwhelm the overhead of getting this unlikely() | |
272 | * consistently wrong. | |
273 | */ | |
274 | if (unlikely(cpu_feature_enabled(X86_FEATURE_MPX))) | |
275 | mpx_notify_unmap(mm, vma, start, end); | |
1de4fa14 DH |
276 | } |
277 | ||
8f62c883 DH |
278 | static inline int vma_pkey(struct vm_area_struct *vma) |
279 | { | |
280 | u16 pkey = 0; | |
281 | #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS | |
282 | unsigned long vma_pkey_mask = VM_PKEY_BIT0 | VM_PKEY_BIT1 | | |
283 | VM_PKEY_BIT2 | VM_PKEY_BIT3; | |
284 | pkey = (vma->vm_flags & vma_pkey_mask) >> VM_PKEY_SHIFT; | |
285 | #endif | |
286 | return pkey; | |
287 | } | |
288 | ||
1965aae3 | 289 | #endif /* _ASM_X86_MMU_CONTEXT_H */ |