]>
Commit | Line | Data |
---|---|---|
1965aae3 PA |
1 | #ifndef _ASM_X86_MMU_CONTEXT_H |
2 | #define _ASM_X86_MMU_CONTEXT_H | |
c3c2fee3 JF |
3 | |
4 | #include <asm/desc.h> | |
60063497 | 5 | #include <linux/atomic.h> |
d17d8f9d | 6 | #include <linux/mm_types.h> |
7d06d9c9 | 7 | #include <linux/pkeys.h> |
d17d8f9d DH |
8 | |
9 | #include <trace/events/tlb.h> | |
10 | ||
c3c2fee3 JF |
11 | #include <asm/pgalloc.h> |
12 | #include <asm/tlbflush.h> | |
13 | #include <asm/paravirt.h> | |
fe3d197f | 14 | #include <asm/mpx.h> |
c3c2fee3 | 15 | #ifndef CONFIG_PARAVIRT |
c3c2fee3 JF |
16 | static inline void paravirt_activate_mm(struct mm_struct *prev, |
17 | struct mm_struct *next) | |
18 | { | |
19 | } | |
20 | #endif /* !CONFIG_PARAVIRT */ | |
21 | ||
7911d3f7 | 22 | #ifdef CONFIG_PERF_EVENTS |
a6673429 AL |
23 | extern struct static_key rdpmc_always_available; |
24 | ||
7911d3f7 AL |
25 | static inline void load_mm_cr4(struct mm_struct *mm) |
26 | { | |
a833581e | 27 | if (static_key_false(&rdpmc_always_available) || |
a6673429 | 28 | atomic_read(&mm->context.perf_rdpmc_allowed)) |
7911d3f7 AL |
29 | cr4_set_bits(X86_CR4_PCE); |
30 | else | |
31 | cr4_clear_bits(X86_CR4_PCE); | |
32 | } | |
33 | #else | |
34 | static inline void load_mm_cr4(struct mm_struct *mm) {} | |
35 | #endif | |
36 | ||
a5b9e5a2 | 37 | #ifdef CONFIG_MODIFY_LDT_SYSCALL |
37868fe1 AL |
38 | /* |
39 | * ldt_structs can be allocated, used, and freed, but they are never | |
40 | * modified while live. | |
41 | */ | |
42 | struct ldt_struct { | |
43 | /* | |
44 | * Xen requires page-aligned LDTs with special permissions. This is | |
45 | * needed to prevent us from installing evil descriptors such as | |
46 | * call gates. On native, we could merge the ldt_struct and LDT | |
47 | * allocations, but it's not worth trying to optimize. | |
48 | */ | |
49 | struct desc_struct *entries; | |
bbf79d21 | 50 | unsigned int nr_entries; |
37868fe1 AL |
51 | }; |
52 | ||
a5b9e5a2 AL |
53 | /* |
54 | * Used for LDT copy/destruction. | |
55 | */ | |
39a0526f DH |
56 | int init_new_context_ldt(struct task_struct *tsk, struct mm_struct *mm); |
57 | void destroy_context_ldt(struct mm_struct *mm); | |
a5b9e5a2 | 58 | #else /* CONFIG_MODIFY_LDT_SYSCALL */ |
39a0526f DH |
59 | static inline int init_new_context_ldt(struct task_struct *tsk, |
60 | struct mm_struct *mm) | |
a5b9e5a2 AL |
61 | { |
62 | return 0; | |
63 | } | |
39a0526f | 64 | static inline void destroy_context_ldt(struct mm_struct *mm) {} |
a5b9e5a2 AL |
65 | #endif |
66 | ||
37868fe1 AL |
67 | static inline void load_mm_ldt(struct mm_struct *mm) |
68 | { | |
a5b9e5a2 | 69 | #ifdef CONFIG_MODIFY_LDT_SYSCALL |
37868fe1 AL |
70 | struct ldt_struct *ldt; |
71 | ||
72 | /* lockless_dereference synchronizes with smp_store_release */ | |
73 | ldt = lockless_dereference(mm->context.ldt); | |
74 | ||
75 | /* | |
76 | * Any change to mm->context.ldt is followed by an IPI to all | |
77 | * CPUs with the mm active. The LDT will not be freed until | |
78 | * after the IPI is handled by all such CPUs. This means that, | |
79 | * if the ldt_struct changes before we return, the values we see | |
80 | * will be safe, and the new values will be loaded before we run | |
81 | * any user code. | |
82 | * | |
83 | * NB: don't try to convert this to use RCU without extreme care. | |
84 | * We would still need IRQs off, because we don't want to change | |
85 | * the local LDT after an IPI loaded a newer value than the one | |
86 | * that we can see. | |
87 | */ | |
88 | ||
89 | if (unlikely(ldt)) | |
bbf79d21 | 90 | set_ldt(ldt->entries, ldt->nr_entries); |
37868fe1 AL |
91 | else |
92 | clear_LDT(); | |
a5b9e5a2 AL |
93 | #else |
94 | clear_LDT(); | |
95 | #endif | |
73534258 AL |
96 | } |
97 | ||
98 | static inline void switch_ldt(struct mm_struct *prev, struct mm_struct *next) | |
99 | { | |
100 | #ifdef CONFIG_MODIFY_LDT_SYSCALL | |
101 | /* | |
102 | * Load the LDT if either the old or new mm had an LDT. | |
103 | * | |
104 | * An mm will never go from having an LDT to not having an LDT. Two | |
105 | * mms never share an LDT, so we don't gain anything by checking to | |
106 | * see whether the LDT changed. There's also no guarantee that | |
107 | * prev->context.ldt actually matches LDTR, but, if LDTR is non-NULL, | |
108 | * then prev->context.ldt will also be non-NULL. | |
109 | * | |
110 | * If we really cared, we could optimize the case where prev == next | |
111 | * and we're exiting lazy mode. Most of the time, if this happens, | |
112 | * we don't actually need to reload LDTR, but modify_ldt() is mostly | |
113 | * used by legacy code and emulators where we don't need this level of | |
114 | * performance. | |
115 | * | |
116 | * This uses | instead of || because it generates better code. | |
117 | */ | |
118 | if (unlikely((unsigned long)prev->context.ldt | | |
119 | (unsigned long)next->context.ldt)) | |
120 | load_mm_ldt(next); | |
121 | #endif | |
37868fe1 AL |
122 | |
123 | DEBUG_LOCKS_WARN_ON(preemptible()); | |
124 | } | |
125 | ||
6826c8ff BG |
126 | static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) |
127 | { | |
c6ae41e7 AS |
128 | if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK) |
129 | this_cpu_write(cpu_tlbstate.state, TLBSTATE_LAZY); | |
6826c8ff BG |
130 | } |
131 | ||
39a0526f DH |
132 | static inline int init_new_context(struct task_struct *tsk, |
133 | struct mm_struct *mm) | |
134 | { | |
e8c24d3a DH |
135 | #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS |
136 | if (cpu_feature_enabled(X86_FEATURE_OSPKE)) { | |
137 | /* pkey 0 is the default and always allocated */ | |
138 | mm->context.pkey_allocation_map = 0x1; | |
139 | /* -1 means unallocated or invalid */ | |
140 | mm->context.execute_only_pkey = -1; | |
141 | } | |
142 | #endif | |
39a0526f | 143 | init_new_context_ldt(tsk, mm); |
e8c24d3a | 144 | |
39a0526f DH |
145 | return 0; |
146 | } | |
147 | static inline void destroy_context(struct mm_struct *mm) | |
148 | { | |
149 | destroy_context_ldt(mm); | |
150 | } | |
151 | ||
69c0319a AL |
152 | extern void switch_mm(struct mm_struct *prev, struct mm_struct *next, |
153 | struct task_struct *tsk); | |
6826c8ff | 154 | |
078194f8 AL |
155 | extern void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, |
156 | struct task_struct *tsk); | |
157 | #define switch_mm_irqs_off switch_mm_irqs_off | |
c3c2fee3 JF |
158 | |
159 | #define activate_mm(prev, next) \ | |
160 | do { \ | |
161 | paravirt_activate_mm((prev), (next)); \ | |
162 | switch_mm((prev), (next), NULL); \ | |
163 | } while (0); | |
164 | ||
6826c8ff BG |
165 | #ifdef CONFIG_X86_32 |
166 | #define deactivate_mm(tsk, mm) \ | |
167 | do { \ | |
ccbeed3a | 168 | lazy_load_gs(0); \ |
6826c8ff BG |
169 | } while (0) |
170 | #else | |
171 | #define deactivate_mm(tsk, mm) \ | |
172 | do { \ | |
173 | load_gs_index(0); \ | |
174 | loadsegment(fs, 0); \ | |
175 | } while (0) | |
176 | #endif | |
c3c2fee3 | 177 | |
a1ea1c03 DH |
178 | static inline void arch_dup_mmap(struct mm_struct *oldmm, |
179 | struct mm_struct *mm) | |
180 | { | |
181 | paravirt_arch_dup_mmap(oldmm, mm); | |
182 | } | |
183 | ||
184 | static inline void arch_exit_mmap(struct mm_struct *mm) | |
185 | { | |
186 | paravirt_arch_exit_mmap(mm); | |
187 | } | |
188 | ||
b0e9b09b DH |
189 | #ifdef CONFIG_X86_64 |
190 | static inline bool is_64bit_mm(struct mm_struct *mm) | |
191 | { | |
97f2645f | 192 | return !IS_ENABLED(CONFIG_IA32_EMULATION) || |
b0e9b09b DH |
193 | !(mm->context.ia32_compat == TIF_IA32); |
194 | } | |
195 | #else | |
196 | static inline bool is_64bit_mm(struct mm_struct *mm) | |
197 | { | |
198 | return false; | |
199 | } | |
200 | #endif | |
201 | ||
fe3d197f DH |
202 | static inline void arch_bprm_mm_init(struct mm_struct *mm, |
203 | struct vm_area_struct *vma) | |
204 | { | |
205 | mpx_mm_init(mm); | |
206 | } | |
207 | ||
1de4fa14 DH |
208 | static inline void arch_unmap(struct mm_struct *mm, struct vm_area_struct *vma, |
209 | unsigned long start, unsigned long end) | |
210 | { | |
c922228e DH |
211 | /* |
212 | * mpx_notify_unmap() goes and reads a rarely-hot | |
213 | * cacheline in the mm_struct. That can be expensive | |
214 | * enough to be seen in profiles. | |
215 | * | |
216 | * The mpx_notify_unmap() call and its contents have been | |
217 | * observed to affect munmap() performance on hardware | |
218 | * where MPX is not present. | |
219 | * | |
220 | * The unlikely() optimizes for the fast case: no MPX | |
221 | * in the CPU, or no MPX use in the process. Even if | |
222 | * we get this wrong (in the unlikely event that MPX | |
223 | * is widely enabled on some system) the overhead of | |
224 | * MPX itself (reading bounds tables) is expected to | |
225 | * overwhelm the overhead of getting this unlikely() | |
226 | * consistently wrong. | |
227 | */ | |
228 | if (unlikely(cpu_feature_enabled(X86_FEATURE_MPX))) | |
229 | mpx_notify_unmap(mm, vma, start, end); | |
1de4fa14 DH |
230 | } |
231 | ||
7d06d9c9 | 232 | #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS |
8f62c883 DH |
233 | static inline int vma_pkey(struct vm_area_struct *vma) |
234 | { | |
8f62c883 DH |
235 | unsigned long vma_pkey_mask = VM_PKEY_BIT0 | VM_PKEY_BIT1 | |
236 | VM_PKEY_BIT2 | VM_PKEY_BIT3; | |
7d06d9c9 DH |
237 | |
238 | return (vma->vm_flags & vma_pkey_mask) >> VM_PKEY_SHIFT; | |
239 | } | |
240 | #else | |
241 | static inline int vma_pkey(struct vm_area_struct *vma) | |
242 | { | |
243 | return 0; | |
8f62c883 | 244 | } |
7d06d9c9 | 245 | #endif |
8f62c883 | 246 | |
33a709b2 DH |
247 | /* |
248 | * We only want to enforce protection keys on the current process | |
249 | * because we effectively have no access to PKRU for other | |
250 | * processes or any way to tell *which * PKRU in a threaded | |
251 | * process we could use. | |
252 | * | |
253 | * So do not enforce things if the VMA is not from the current | |
254 | * mm, or if we are in a kernel thread. | |
255 | */ | |
256 | static inline bool vma_is_foreign(struct vm_area_struct *vma) | |
257 | { | |
258 | if (!current->mm) | |
259 | return true; | |
260 | /* | |
261 | * Should PKRU be enforced on the access to this VMA? If | |
262 | * the VMA is from another process, then PKRU has no | |
263 | * relevance and should not be enforced. | |
264 | */ | |
265 | if (current->mm != vma->vm_mm) | |
266 | return true; | |
267 | ||
268 | return false; | |
269 | } | |
270 | ||
1b2ee126 | 271 | static inline bool arch_vma_access_permitted(struct vm_area_struct *vma, |
d61172b4 | 272 | bool write, bool execute, bool foreign) |
33a709b2 | 273 | { |
d61172b4 DH |
274 | /* pkeys never affect instruction fetches */ |
275 | if (execute) | |
276 | return true; | |
33a709b2 | 277 | /* allow access if the VMA is not one from this process */ |
1b2ee126 | 278 | if (foreign || vma_is_foreign(vma)) |
33a709b2 DH |
279 | return true; |
280 | return __pkru_allows_pkey(vma_pkey(vma), write); | |
281 | } | |
282 | ||
d6e41f11 AL |
283 | |
284 | /* | |
285 | * This can be used from process context to figure out what the value of | |
6c690ee1 | 286 | * CR3 is without needing to do a (slow) __read_cr3(). |
d6e41f11 AL |
287 | * |
288 | * It's intended to be used for code like KVM that sneakily changes CR3 | |
289 | * and needs to restore it. It needs to be used very carefully. | |
290 | */ | |
291 | static inline unsigned long __get_current_cr3_fast(void) | |
292 | { | |
293 | unsigned long cr3 = __pa(this_cpu_read(cpu_tlbstate.loaded_mm)->pgd); | |
294 | ||
295 | /* For now, be very restrictive about when this can be called. */ | |
4c07f904 | 296 | VM_WARN_ON(in_nmi() || preemptible()); |
d6e41f11 | 297 | |
6c690ee1 | 298 | VM_BUG_ON(cr3 != __read_cr3()); |
d6e41f11 AL |
299 | return cr3; |
300 | } | |
301 | ||
1965aae3 | 302 | #endif /* _ASM_X86_MMU_CONTEXT_H */ |