]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
1965aae3 PA |
2 | #ifndef _ASM_X86_MMU_CONTEXT_H |
3 | #define _ASM_X86_MMU_CONTEXT_H | |
c3c2fee3 JF |
4 | |
5 | #include <asm/desc.h> | |
60063497 | 6 | #include <linux/atomic.h> |
d17d8f9d | 7 | #include <linux/mm_types.h> |
7d06d9c9 | 8 | #include <linux/pkeys.h> |
d17d8f9d DH |
9 | |
10 | #include <trace/events/tlb.h> | |
11 | ||
c3c2fee3 JF |
12 | #include <asm/pgalloc.h> |
13 | #include <asm/tlbflush.h> | |
14 | #include <asm/paravirt.h> | |
fe3d197f | 15 | #include <asm/mpx.h> |
f39681ed AL |
16 | |
17 | extern atomic64_t last_mm_ctx_id; | |
18 | ||
c3c2fee3 | 19 | #ifndef CONFIG_PARAVIRT |
c3c2fee3 JF |
20 | static inline void paravirt_activate_mm(struct mm_struct *prev, |
21 | struct mm_struct *next) | |
22 | { | |
23 | } | |
24 | #endif /* !CONFIG_PARAVIRT */ | |
25 | ||
7911d3f7 | 26 | #ifdef CONFIG_PERF_EVENTS |
a6673429 AL |
27 | extern struct static_key rdpmc_always_available; |
28 | ||
7911d3f7 AL |
29 | static inline void load_mm_cr4(struct mm_struct *mm) |
30 | { | |
a833581e | 31 | if (static_key_false(&rdpmc_always_available) || |
a6673429 | 32 | atomic_read(&mm->context.perf_rdpmc_allowed)) |
7911d3f7 AL |
33 | cr4_set_bits(X86_CR4_PCE); |
34 | else | |
35 | cr4_clear_bits(X86_CR4_PCE); | |
36 | } | |
37 | #else | |
38 | static inline void load_mm_cr4(struct mm_struct *mm) {} | |
39 | #endif | |
40 | ||
a5b9e5a2 | 41 | #ifdef CONFIG_MODIFY_LDT_SYSCALL |
37868fe1 AL |
42 | /* |
43 | * ldt_structs can be allocated, used, and freed, but they are never | |
44 | * modified while live. | |
45 | */ | |
46 | struct ldt_struct { | |
47 | /* | |
48 | * Xen requires page-aligned LDTs with special permissions. This is | |
49 | * needed to prevent us from installing evil descriptors such as | |
50 | * call gates. On native, we could merge the ldt_struct and LDT | |
51 | * allocations, but it's not worth trying to optimize. | |
52 | */ | |
53 | struct desc_struct *entries; | |
bbf79d21 | 54 | unsigned int nr_entries; |
37868fe1 AL |
55 | }; |
56 | ||
a5b9e5a2 AL |
57 | /* |
58 | * Used for LDT copy/destruction. | |
59 | */ | |
39a0526f DH |
60 | int init_new_context_ldt(struct task_struct *tsk, struct mm_struct *mm); |
61 | void destroy_context_ldt(struct mm_struct *mm); | |
a5b9e5a2 | 62 | #else /* CONFIG_MODIFY_LDT_SYSCALL */ |
39a0526f DH |
63 | static inline int init_new_context_ldt(struct task_struct *tsk, |
64 | struct mm_struct *mm) | |
a5b9e5a2 AL |
65 | { |
66 | return 0; | |
67 | } | |
39a0526f | 68 | static inline void destroy_context_ldt(struct mm_struct *mm) {} |
a5b9e5a2 AL |
69 | #endif |
70 | ||
37868fe1 AL |
71 | static inline void load_mm_ldt(struct mm_struct *mm) |
72 | { | |
a5b9e5a2 | 73 | #ifdef CONFIG_MODIFY_LDT_SYSCALL |
37868fe1 AL |
74 | struct ldt_struct *ldt; |
75 | ||
3382290e WD |
76 | /* READ_ONCE synchronizes with smp_store_release */ |
77 | ldt = READ_ONCE(mm->context.ldt); | |
37868fe1 AL |
78 | |
79 | /* | |
80 | * Any change to mm->context.ldt is followed by an IPI to all | |
81 | * CPUs with the mm active. The LDT will not be freed until | |
82 | * after the IPI is handled by all such CPUs. This means that, | |
83 | * if the ldt_struct changes before we return, the values we see | |
84 | * will be safe, and the new values will be loaded before we run | |
85 | * any user code. | |
86 | * | |
87 | * NB: don't try to convert this to use RCU without extreme care. | |
88 | * We would still need IRQs off, because we don't want to change | |
89 | * the local LDT after an IPI loaded a newer value than the one | |
90 | * that we can see. | |
91 | */ | |
92 | ||
93 | if (unlikely(ldt)) | |
bbf79d21 | 94 | set_ldt(ldt->entries, ldt->nr_entries); |
37868fe1 AL |
95 | else |
96 | clear_LDT(); | |
a5b9e5a2 AL |
97 | #else |
98 | clear_LDT(); | |
99 | #endif | |
73534258 AL |
100 | } |
101 | ||
102 | static inline void switch_ldt(struct mm_struct *prev, struct mm_struct *next) | |
103 | { | |
104 | #ifdef CONFIG_MODIFY_LDT_SYSCALL | |
105 | /* | |
106 | * Load the LDT if either the old or new mm had an LDT. | |
107 | * | |
108 | * An mm will never go from having an LDT to not having an LDT. Two | |
109 | * mms never share an LDT, so we don't gain anything by checking to | |
110 | * see whether the LDT changed. There's also no guarantee that | |
111 | * prev->context.ldt actually matches LDTR, but, if LDTR is non-NULL, | |
112 | * then prev->context.ldt will also be non-NULL. | |
113 | * | |
114 | * If we really cared, we could optimize the case where prev == next | |
115 | * and we're exiting lazy mode. Most of the time, if this happens, | |
116 | * we don't actually need to reload LDTR, but modify_ldt() is mostly | |
117 | * used by legacy code and emulators where we don't need this level of | |
118 | * performance. | |
119 | * | |
120 | * This uses | instead of || because it generates better code. | |
121 | */ | |
122 | if (unlikely((unsigned long)prev->context.ldt | | |
123 | (unsigned long)next->context.ldt)) | |
124 | load_mm_ldt(next); | |
125 | #endif | |
37868fe1 AL |
126 | |
127 | DEBUG_LOCKS_WARN_ON(preemptible()); | |
128 | } | |
129 | ||
b956575b | 130 | void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk); |
6826c8ff | 131 | |
39a0526f DH |
132 | static inline int init_new_context(struct task_struct *tsk, |
133 | struct mm_struct *mm) | |
134 | { | |
f39681ed AL |
135 | mm->context.ctx_id = atomic64_inc_return(&last_mm_ctx_id); |
136 | atomic64_set(&mm->context.tlb_gen, 0); | |
137 | ||
e8c24d3a DH |
138 | #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS |
139 | if (cpu_feature_enabled(X86_FEATURE_OSPKE)) { | |
140 | /* pkey 0 is the default and always allocated */ | |
141 | mm->context.pkey_allocation_map = 0x1; | |
142 | /* -1 means unallocated or invalid */ | |
143 | mm->context.execute_only_pkey = -1; | |
144 | } | |
145 | #endif | |
ccd5b323 | 146 | return init_new_context_ldt(tsk, mm); |
39a0526f DH |
147 | } |
148 | static inline void destroy_context(struct mm_struct *mm) | |
149 | { | |
150 | destroy_context_ldt(mm); | |
151 | } | |
152 | ||
69c0319a AL |
153 | extern void switch_mm(struct mm_struct *prev, struct mm_struct *next, |
154 | struct task_struct *tsk); | |
6826c8ff | 155 | |
078194f8 AL |
156 | extern void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, |
157 | struct task_struct *tsk); | |
158 | #define switch_mm_irqs_off switch_mm_irqs_off | |
c3c2fee3 JF |
159 | |
160 | #define activate_mm(prev, next) \ | |
161 | do { \ | |
162 | paravirt_activate_mm((prev), (next)); \ | |
163 | switch_mm((prev), (next), NULL); \ | |
164 | } while (0); | |
165 | ||
6826c8ff BG |
166 | #ifdef CONFIG_X86_32 |
167 | #define deactivate_mm(tsk, mm) \ | |
168 | do { \ | |
ccbeed3a | 169 | lazy_load_gs(0); \ |
6826c8ff BG |
170 | } while (0) |
171 | #else | |
172 | #define deactivate_mm(tsk, mm) \ | |
173 | do { \ | |
174 | load_gs_index(0); \ | |
175 | loadsegment(fs, 0); \ | |
176 | } while (0) | |
177 | #endif | |
c3c2fee3 | 178 | |
a1ea1c03 DH |
179 | static inline void arch_dup_mmap(struct mm_struct *oldmm, |
180 | struct mm_struct *mm) | |
181 | { | |
182 | paravirt_arch_dup_mmap(oldmm, mm); | |
183 | } | |
184 | ||
185 | static inline void arch_exit_mmap(struct mm_struct *mm) | |
186 | { | |
187 | paravirt_arch_exit_mmap(mm); | |
188 | } | |
189 | ||
b0e9b09b DH |
190 | #ifdef CONFIG_X86_64 |
191 | static inline bool is_64bit_mm(struct mm_struct *mm) | |
192 | { | |
97f2645f | 193 | return !IS_ENABLED(CONFIG_IA32_EMULATION) || |
b0e9b09b DH |
194 | !(mm->context.ia32_compat == TIF_IA32); |
195 | } | |
196 | #else | |
197 | static inline bool is_64bit_mm(struct mm_struct *mm) | |
198 | { | |
199 | return false; | |
200 | } | |
201 | #endif | |
202 | ||
fe3d197f DH |
203 | static inline void arch_bprm_mm_init(struct mm_struct *mm, |
204 | struct vm_area_struct *vma) | |
205 | { | |
206 | mpx_mm_init(mm); | |
207 | } | |
208 | ||
1de4fa14 DH |
209 | static inline void arch_unmap(struct mm_struct *mm, struct vm_area_struct *vma, |
210 | unsigned long start, unsigned long end) | |
211 | { | |
c922228e DH |
212 | /* |
213 | * mpx_notify_unmap() goes and reads a rarely-hot | |
214 | * cacheline in the mm_struct. That can be expensive | |
215 | * enough to be seen in profiles. | |
216 | * | |
217 | * The mpx_notify_unmap() call and its contents have been | |
218 | * observed to affect munmap() performance on hardware | |
219 | * where MPX is not present. | |
220 | * | |
221 | * The unlikely() optimizes for the fast case: no MPX | |
222 | * in the CPU, or no MPX use in the process. Even if | |
223 | * we get this wrong (in the unlikely event that MPX | |
224 | * is widely enabled on some system) the overhead of | |
225 | * MPX itself (reading bounds tables) is expected to | |
226 | * overwhelm the overhead of getting this unlikely() | |
227 | * consistently wrong. | |
228 | */ | |
229 | if (unlikely(cpu_feature_enabled(X86_FEATURE_MPX))) | |
230 | mpx_notify_unmap(mm, vma, start, end); | |
1de4fa14 DH |
231 | } |
232 | ||
7d06d9c9 | 233 | #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS |
8f62c883 DH |
234 | static inline int vma_pkey(struct vm_area_struct *vma) |
235 | { | |
8f62c883 DH |
236 | unsigned long vma_pkey_mask = VM_PKEY_BIT0 | VM_PKEY_BIT1 | |
237 | VM_PKEY_BIT2 | VM_PKEY_BIT3; | |
7d06d9c9 DH |
238 | |
239 | return (vma->vm_flags & vma_pkey_mask) >> VM_PKEY_SHIFT; | |
240 | } | |
241 | #else | |
242 | static inline int vma_pkey(struct vm_area_struct *vma) | |
243 | { | |
244 | return 0; | |
8f62c883 | 245 | } |
7d06d9c9 | 246 | #endif |
8f62c883 | 247 | |
33a709b2 DH |
248 | /* |
249 | * We only want to enforce protection keys on the current process | |
250 | * because we effectively have no access to PKRU for other | |
251 | * processes or any way to tell *which * PKRU in a threaded | |
252 | * process we could use. | |
253 | * | |
254 | * So do not enforce things if the VMA is not from the current | |
255 | * mm, or if we are in a kernel thread. | |
256 | */ | |
257 | static inline bool vma_is_foreign(struct vm_area_struct *vma) | |
258 | { | |
259 | if (!current->mm) | |
260 | return true; | |
261 | /* | |
262 | * Should PKRU be enforced on the access to this VMA? If | |
263 | * the VMA is from another process, then PKRU has no | |
264 | * relevance and should not be enforced. | |
265 | */ | |
266 | if (current->mm != vma->vm_mm) | |
267 | return true; | |
268 | ||
269 | return false; | |
270 | } | |
271 | ||
1b2ee126 | 272 | static inline bool arch_vma_access_permitted(struct vm_area_struct *vma, |
d61172b4 | 273 | bool write, bool execute, bool foreign) |
33a709b2 | 274 | { |
d61172b4 DH |
275 | /* pkeys never affect instruction fetches */ |
276 | if (execute) | |
277 | return true; | |
33a709b2 | 278 | /* allow access if the VMA is not one from this process */ |
1b2ee126 | 279 | if (foreign || vma_is_foreign(vma)) |
33a709b2 DH |
280 | return true; |
281 | return __pkru_allows_pkey(vma_pkey(vma), write); | |
282 | } | |
283 | ||
52a2af40 AL |
284 | /* |
285 | * If PCID is on, ASID-aware code paths put the ASID+1 into the PCID | |
286 | * bits. This serves two purposes. It prevents a nasty situation in | |
287 | * which PCID-unaware code saves CR3, loads some other value (with PCID | |
288 | * == 0), and then restores CR3, thus corrupting the TLB for ASID 0 if | |
289 | * the saved ASID was nonzero. It also means that any bugs involving | |
290 | * loading a PCID-enabled CR3 with CR4.PCIDE off will trigger | |
291 | * deterministically. | |
292 | */ | |
293 | ||
47061a24 AL |
294 | static inline unsigned long build_cr3(struct mm_struct *mm, u16 asid) |
295 | { | |
52a2af40 AL |
296 | if (static_cpu_has(X86_FEATURE_PCID)) { |
297 | VM_WARN_ON_ONCE(asid > 4094); | |
298 | return __sme_pa(mm->pgd) | (asid + 1); | |
299 | } else { | |
300 | VM_WARN_ON_ONCE(asid != 0); | |
301 | return __sme_pa(mm->pgd); | |
302 | } | |
47061a24 AL |
303 | } |
304 | ||
305 | static inline unsigned long build_cr3_noflush(struct mm_struct *mm, u16 asid) | |
306 | { | |
52a2af40 AL |
307 | VM_WARN_ON_ONCE(asid > 4094); |
308 | return __sme_pa(mm->pgd) | (asid + 1) | CR3_NOFLUSH; | |
47061a24 | 309 | } |
d6e41f11 AL |
310 | |
311 | /* | |
312 | * This can be used from process context to figure out what the value of | |
6c690ee1 | 313 | * CR3 is without needing to do a (slow) __read_cr3(). |
d6e41f11 AL |
314 | * |
315 | * It's intended to be used for code like KVM that sneakily changes CR3 | |
316 | * and needs to restore it. It needs to be used very carefully. | |
317 | */ | |
318 | static inline unsigned long __get_current_cr3_fast(void) | |
319 | { | |
47061a24 AL |
320 | unsigned long cr3 = build_cr3(this_cpu_read(cpu_tlbstate.loaded_mm), |
321 | this_cpu_read(cpu_tlbstate.loaded_mm_asid)); | |
10af6235 | 322 | |
d6e41f11 | 323 | /* For now, be very restrictive about when this can be called. */ |
4c07f904 | 324 | VM_WARN_ON(in_nmi() || preemptible()); |
d6e41f11 | 325 | |
6c690ee1 | 326 | VM_BUG_ON(cr3 != __read_cr3()); |
d6e41f11 AL |
327 | return cr3; |
328 | } | |
329 | ||
1965aae3 | 330 | #endif /* _ASM_X86_MMU_CONTEXT_H */ |