]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
1965aae3 PA |
2 | #ifndef _ASM_X86_MMU_CONTEXT_H |
3 | #define _ASM_X86_MMU_CONTEXT_H | |
c3c2fee3 JF |
4 | |
5 | #include <asm/desc.h> | |
60063497 | 6 | #include <linux/atomic.h> |
d17d8f9d | 7 | #include <linux/mm_types.h> |
7d06d9c9 | 8 | #include <linux/pkeys.h> |
d17d8f9d DH |
9 | |
10 | #include <trace/events/tlb.h> | |
11 | ||
c3c2fee3 JF |
12 | #include <asm/pgalloc.h> |
13 | #include <asm/tlbflush.h> | |
14 | #include <asm/paravirt.h> | |
fe3d197f | 15 | #include <asm/mpx.h> |
f39681ed AL |
16 | |
17 | extern atomic64_t last_mm_ctx_id; | |
18 | ||
c3c2fee3 | 19 | #ifndef CONFIG_PARAVIRT |
c3c2fee3 JF |
20 | static inline void paravirt_activate_mm(struct mm_struct *prev, |
21 | struct mm_struct *next) | |
22 | { | |
23 | } | |
24 | #endif /* !CONFIG_PARAVIRT */ | |
25 | ||
7911d3f7 | 26 | #ifdef CONFIG_PERF_EVENTS |
631fe154 DB |
27 | |
28 | DECLARE_STATIC_KEY_FALSE(rdpmc_always_available_key); | |
a6673429 | 29 | |
7911d3f7 AL |
30 | static inline void load_mm_cr4(struct mm_struct *mm) |
31 | { | |
631fe154 | 32 | if (static_branch_unlikely(&rdpmc_always_available_key) || |
a6673429 | 33 | atomic_read(&mm->context.perf_rdpmc_allowed)) |
7911d3f7 AL |
34 | cr4_set_bits(X86_CR4_PCE); |
35 | else | |
36 | cr4_clear_bits(X86_CR4_PCE); | |
37 | } | |
38 | #else | |
39 | static inline void load_mm_cr4(struct mm_struct *mm) {} | |
40 | #endif | |
41 | ||
a5b9e5a2 | 42 | #ifdef CONFIG_MODIFY_LDT_SYSCALL |
37868fe1 AL |
43 | /* |
44 | * ldt_structs can be allocated, used, and freed, but they are never | |
45 | * modified while live. | |
46 | */ | |
47 | struct ldt_struct { | |
48 | /* | |
49 | * Xen requires page-aligned LDTs with special permissions. This is | |
50 | * needed to prevent us from installing evil descriptors such as | |
51 | * call gates. On native, we could merge the ldt_struct and LDT | |
52 | * allocations, but it's not worth trying to optimize. | |
53 | */ | |
f55f0501 AL |
54 | struct desc_struct *entries; |
55 | unsigned int nr_entries; | |
56 | ||
57 | /* | |
58 | * If PTI is in use, then the entries array is not mapped while we're | |
59 | * in user mode. The whole array will be aliased at the addressed | |
60 | * given by ldt_slot_va(slot). We use two slots so that we can allocate | |
61 | * and map, and enable a new LDT without invalidating the mapping | |
62 | * of an older, still-in-use LDT. | |
63 | * | |
64 | * slot will be -1 if this LDT doesn't have an alias mapping. | |
65 | */ | |
66 | int slot; | |
37868fe1 AL |
67 | }; |
68 | ||
f55f0501 AL |
69 | /* This is a multiple of PAGE_SIZE. */ |
70 | #define LDT_SLOT_STRIDE (LDT_ENTRIES * LDT_ENTRY_SIZE) | |
71 | ||
72 | static inline void *ldt_slot_va(int slot) | |
73 | { | |
74 | #ifdef CONFIG_X86_64 | |
75 | return (void *)(LDT_BASE_ADDR + LDT_SLOT_STRIDE * slot); | |
76 | #else | |
77 | BUG(); | |
f2f18b16 | 78 | return (void *)fix_to_virt(FIX_HOLE); |
f55f0501 AL |
79 | #endif |
80 | } | |
81 | ||
a5b9e5a2 AL |
82 | /* |
83 | * Used for LDT copy/destruction. | |
84 | */ | |
a4828f81 TG |
85 | static inline void init_new_context_ldt(struct mm_struct *mm) |
86 | { | |
87 | mm->context.ldt = NULL; | |
88 | init_rwsem(&mm->context.ldt_usr_sem); | |
89 | } | |
90 | int ldt_dup_context(struct mm_struct *oldmm, struct mm_struct *mm); | |
39a0526f | 91 | void destroy_context_ldt(struct mm_struct *mm); |
f55f0501 | 92 | void ldt_arch_exit_mmap(struct mm_struct *mm); |
a5b9e5a2 | 93 | #else /* CONFIG_MODIFY_LDT_SYSCALL */ |
a4828f81 TG |
94 | static inline void init_new_context_ldt(struct mm_struct *mm) { } |
95 | static inline int ldt_dup_context(struct mm_struct *oldmm, | |
96 | struct mm_struct *mm) | |
a5b9e5a2 AL |
97 | { |
98 | return 0; | |
99 | } | |
f55f0501 AL |
100 | static inline void destroy_context_ldt(struct mm_struct *mm) { } |
101 | static inline void ldt_arch_exit_mmap(struct mm_struct *mm) { } | |
a5b9e5a2 AL |
102 | #endif |
103 | ||
37868fe1 AL |
104 | static inline void load_mm_ldt(struct mm_struct *mm) |
105 | { | |
a5b9e5a2 | 106 | #ifdef CONFIG_MODIFY_LDT_SYSCALL |
37868fe1 AL |
107 | struct ldt_struct *ldt; |
108 | ||
3382290e WD |
109 | /* READ_ONCE synchronizes with smp_store_release */ |
110 | ldt = READ_ONCE(mm->context.ldt); | |
37868fe1 AL |
111 | |
112 | /* | |
113 | * Any change to mm->context.ldt is followed by an IPI to all | |
114 | * CPUs with the mm active. The LDT will not be freed until | |
115 | * after the IPI is handled by all such CPUs. This means that, | |
116 | * if the ldt_struct changes before we return, the values we see | |
117 | * will be safe, and the new values will be loaded before we run | |
118 | * any user code. | |
119 | * | |
120 | * NB: don't try to convert this to use RCU without extreme care. | |
121 | * We would still need IRQs off, because we don't want to change | |
122 | * the local LDT after an IPI loaded a newer value than the one | |
123 | * that we can see. | |
124 | */ | |
125 | ||
f55f0501 AL |
126 | if (unlikely(ldt)) { |
127 | if (static_cpu_has(X86_FEATURE_PTI)) { | |
128 | if (WARN_ON_ONCE((unsigned long)ldt->slot > 1)) { | |
129 | /* | |
130 | * Whoops -- either the new LDT isn't mapped | |
131 | * (if slot == -1) or is mapped into a bogus | |
132 | * slot (if slot > 1). | |
133 | */ | |
134 | clear_LDT(); | |
135 | return; | |
136 | } | |
137 | ||
138 | /* | |
139 | * If page table isolation is enabled, ldt->entries | |
140 | * will not be mapped in the userspace pagetables. | |
141 | * Tell the CPU to access the LDT through the alias | |
142 | * at ldt_slot_va(ldt->slot). | |
143 | */ | |
144 | set_ldt(ldt_slot_va(ldt->slot), ldt->nr_entries); | |
145 | } else { | |
146 | set_ldt(ldt->entries, ldt->nr_entries); | |
147 | } | |
148 | } else { | |
37868fe1 | 149 | clear_LDT(); |
f55f0501 | 150 | } |
a5b9e5a2 AL |
151 | #else |
152 | clear_LDT(); | |
153 | #endif | |
73534258 AL |
154 | } |
155 | ||
156 | static inline void switch_ldt(struct mm_struct *prev, struct mm_struct *next) | |
157 | { | |
158 | #ifdef CONFIG_MODIFY_LDT_SYSCALL | |
159 | /* | |
160 | * Load the LDT if either the old or new mm had an LDT. | |
161 | * | |
162 | * An mm will never go from having an LDT to not having an LDT. Two | |
163 | * mms never share an LDT, so we don't gain anything by checking to | |
164 | * see whether the LDT changed. There's also no guarantee that | |
165 | * prev->context.ldt actually matches LDTR, but, if LDTR is non-NULL, | |
166 | * then prev->context.ldt will also be non-NULL. | |
167 | * | |
168 | * If we really cared, we could optimize the case where prev == next | |
169 | * and we're exiting lazy mode. Most of the time, if this happens, | |
170 | * we don't actually need to reload LDTR, but modify_ldt() is mostly | |
171 | * used by legacy code and emulators where we don't need this level of | |
172 | * performance. | |
173 | * | |
174 | * This uses | instead of || because it generates better code. | |
175 | */ | |
176 | if (unlikely((unsigned long)prev->context.ldt | | |
177 | (unsigned long)next->context.ldt)) | |
178 | load_mm_ldt(next); | |
179 | #endif | |
37868fe1 AL |
180 | |
181 | DEBUG_LOCKS_WARN_ON(preemptible()); | |
182 | } | |
183 | ||
b956575b | 184 | void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk); |
6826c8ff | 185 | |
39a0526f DH |
186 | static inline int init_new_context(struct task_struct *tsk, |
187 | struct mm_struct *mm) | |
188 | { | |
c2b3496b PZ |
189 | mutex_init(&mm->context.lock); |
190 | ||
f39681ed AL |
191 | mm->context.ctx_id = atomic64_inc_return(&last_mm_ctx_id); |
192 | atomic64_set(&mm->context.tlb_gen, 0); | |
193 | ||
a4828f81 | 194 | #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS |
e8c24d3a DH |
195 | if (cpu_feature_enabled(X86_FEATURE_OSPKE)) { |
196 | /* pkey 0 is the default and always allocated */ | |
197 | mm->context.pkey_allocation_map = 0x1; | |
198 | /* -1 means unallocated or invalid */ | |
199 | mm->context.execute_only_pkey = -1; | |
200 | } | |
a4828f81 TG |
201 | #endif |
202 | init_new_context_ldt(mm); | |
203 | return 0; | |
39a0526f DH |
204 | } |
205 | static inline void destroy_context(struct mm_struct *mm) | |
206 | { | |
207 | destroy_context_ldt(mm); | |
208 | } | |
209 | ||
69c0319a AL |
210 | extern void switch_mm(struct mm_struct *prev, struct mm_struct *next, |
211 | struct task_struct *tsk); | |
6826c8ff | 212 | |
078194f8 AL |
213 | extern void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, |
214 | struct task_struct *tsk); | |
215 | #define switch_mm_irqs_off switch_mm_irqs_off | |
c3c2fee3 JF |
216 | |
217 | #define activate_mm(prev, next) \ | |
218 | do { \ | |
219 | paravirt_activate_mm((prev), (next)); \ | |
220 | switch_mm((prev), (next), NULL); \ | |
221 | } while (0); | |
222 | ||
6826c8ff BG |
223 | #ifdef CONFIG_X86_32 |
224 | #define deactivate_mm(tsk, mm) \ | |
225 | do { \ | |
ccbeed3a | 226 | lazy_load_gs(0); \ |
6826c8ff BG |
227 | } while (0) |
228 | #else | |
229 | #define deactivate_mm(tsk, mm) \ | |
230 | do { \ | |
231 | load_gs_index(0); \ | |
232 | loadsegment(fs, 0); \ | |
233 | } while (0) | |
234 | #endif | |
c3c2fee3 | 235 | |
c10e83f5 | 236 | static inline int arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm) |
a1ea1c03 DH |
237 | { |
238 | paravirt_arch_dup_mmap(oldmm, mm); | |
a4828f81 | 239 | return ldt_dup_context(oldmm, mm); |
a1ea1c03 DH |
240 | } |
241 | ||
242 | static inline void arch_exit_mmap(struct mm_struct *mm) | |
243 | { | |
244 | paravirt_arch_exit_mmap(mm); | |
f55f0501 | 245 | ldt_arch_exit_mmap(mm); |
a1ea1c03 DH |
246 | } |
247 | ||
b0e9b09b DH |
248 | #ifdef CONFIG_X86_64 |
249 | static inline bool is_64bit_mm(struct mm_struct *mm) | |
250 | { | |
97f2645f | 251 | return !IS_ENABLED(CONFIG_IA32_EMULATION) || |
b0e9b09b DH |
252 | !(mm->context.ia32_compat == TIF_IA32); |
253 | } | |
254 | #else | |
255 | static inline bool is_64bit_mm(struct mm_struct *mm) | |
256 | { | |
257 | return false; | |
258 | } | |
259 | #endif | |
260 | ||
fe3d197f DH |
261 | static inline void arch_bprm_mm_init(struct mm_struct *mm, |
262 | struct vm_area_struct *vma) | |
263 | { | |
264 | mpx_mm_init(mm); | |
265 | } | |
266 | ||
1de4fa14 DH |
267 | static inline void arch_unmap(struct mm_struct *mm, struct vm_area_struct *vma, |
268 | unsigned long start, unsigned long end) | |
269 | { | |
c922228e DH |
270 | /* |
271 | * mpx_notify_unmap() goes and reads a rarely-hot | |
272 | * cacheline in the mm_struct. That can be expensive | |
273 | * enough to be seen in profiles. | |
274 | * | |
275 | * The mpx_notify_unmap() call and its contents have been | |
276 | * observed to affect munmap() performance on hardware | |
277 | * where MPX is not present. | |
278 | * | |
279 | * The unlikely() optimizes for the fast case: no MPX | |
280 | * in the CPU, or no MPX use in the process. Even if | |
281 | * we get this wrong (in the unlikely event that MPX | |
282 | * is widely enabled on some system) the overhead of | |
283 | * MPX itself (reading bounds tables) is expected to | |
284 | * overwhelm the overhead of getting this unlikely() | |
285 | * consistently wrong. | |
286 | */ | |
287 | if (unlikely(cpu_feature_enabled(X86_FEATURE_MPX))) | |
288 | mpx_notify_unmap(mm, vma, start, end); | |
1de4fa14 DH |
289 | } |
290 | ||
7d06d9c9 | 291 | #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS |
8f62c883 DH |
292 | static inline int vma_pkey(struct vm_area_struct *vma) |
293 | { | |
8f62c883 DH |
294 | unsigned long vma_pkey_mask = VM_PKEY_BIT0 | VM_PKEY_BIT1 | |
295 | VM_PKEY_BIT2 | VM_PKEY_BIT3; | |
7d06d9c9 DH |
296 | |
297 | return (vma->vm_flags & vma_pkey_mask) >> VM_PKEY_SHIFT; | |
298 | } | |
299 | #else | |
300 | static inline int vma_pkey(struct vm_area_struct *vma) | |
301 | { | |
302 | return 0; | |
8f62c883 | 303 | } |
7d06d9c9 | 304 | #endif |
8f62c883 | 305 | |
33a709b2 DH |
306 | /* |
307 | * We only want to enforce protection keys on the current process | |
308 | * because we effectively have no access to PKRU for other | |
309 | * processes or any way to tell *which * PKRU in a threaded | |
310 | * process we could use. | |
311 | * | |
312 | * So do not enforce things if the VMA is not from the current | |
313 | * mm, or if we are in a kernel thread. | |
314 | */ | |
315 | static inline bool vma_is_foreign(struct vm_area_struct *vma) | |
316 | { | |
317 | if (!current->mm) | |
318 | return true; | |
319 | /* | |
320 | * Should PKRU be enforced on the access to this VMA? If | |
321 | * the VMA is from another process, then PKRU has no | |
322 | * relevance and should not be enforced. | |
323 | */ | |
324 | if (current->mm != vma->vm_mm) | |
325 | return true; | |
326 | ||
327 | return false; | |
328 | } | |
329 | ||
1b2ee126 | 330 | static inline bool arch_vma_access_permitted(struct vm_area_struct *vma, |
d61172b4 | 331 | bool write, bool execute, bool foreign) |
33a709b2 | 332 | { |
d61172b4 DH |
333 | /* pkeys never affect instruction fetches */ |
334 | if (execute) | |
335 | return true; | |
33a709b2 | 336 | /* allow access if the VMA is not one from this process */ |
1b2ee126 | 337 | if (foreign || vma_is_foreign(vma)) |
33a709b2 DH |
338 | return true; |
339 | return __pkru_allows_pkey(vma_pkey(vma), write); | |
340 | } | |
341 | ||
d6e41f11 AL |
342 | /* |
343 | * This can be used from process context to figure out what the value of | |
6c690ee1 | 344 | * CR3 is without needing to do a (slow) __read_cr3(). |
d6e41f11 AL |
345 | * |
346 | * It's intended to be used for code like KVM that sneakily changes CR3 | |
347 | * and needs to restore it. It needs to be used very carefully. | |
348 | */ | |
349 | static inline unsigned long __get_current_cr3_fast(void) | |
350 | { | |
50fb83a6 | 351 | unsigned long cr3 = build_cr3(this_cpu_read(cpu_tlbstate.loaded_mm)->pgd, |
47061a24 | 352 | this_cpu_read(cpu_tlbstate.loaded_mm_asid)); |
10af6235 | 353 | |
d6e41f11 | 354 | /* For now, be very restrictive about when this can be called. */ |
4c07f904 | 355 | VM_WARN_ON(in_nmi() || preemptible()); |
d6e41f11 | 356 | |
6c690ee1 | 357 | VM_BUG_ON(cr3 != __read_cr3()); |
d6e41f11 AL |
358 | return cr3; |
359 | } | |
360 | ||
1965aae3 | 361 | #endif /* _ASM_X86_MMU_CONTEXT_H */ |