]>
Commit | Line | Data |
---|---|---|
1965aae3 PA |
1 | #ifndef _ASM_X86_MMU_CONTEXT_H |
2 | #define _ASM_X86_MMU_CONTEXT_H | |
c3c2fee3 JF |
3 | |
4 | #include <asm/desc.h> | |
60063497 | 5 | #include <linux/atomic.h> |
d17d8f9d DH |
6 | #include <linux/mm_types.h> |
7 | ||
8 | #include <trace/events/tlb.h> | |
9 | ||
c3c2fee3 JF |
10 | #include <asm/pgalloc.h> |
11 | #include <asm/tlbflush.h> | |
12 | #include <asm/paravirt.h> | |
fe3d197f | 13 | #include <asm/mpx.h> |
c3c2fee3 | 14 | #ifndef CONFIG_PARAVIRT |
c3c2fee3 JF |
15 | static inline void paravirt_activate_mm(struct mm_struct *prev, |
16 | struct mm_struct *next) | |
17 | { | |
18 | } | |
19 | #endif /* !CONFIG_PARAVIRT */ | |
20 | ||
7911d3f7 | 21 | #ifdef CONFIG_PERF_EVENTS |
a6673429 AL |
22 | extern struct static_key rdpmc_always_available; |
23 | ||
7911d3f7 AL |
24 | static inline void load_mm_cr4(struct mm_struct *mm) |
25 | { | |
a833581e | 26 | if (static_key_false(&rdpmc_always_available) || |
a6673429 | 27 | atomic_read(&mm->context.perf_rdpmc_allowed)) |
7911d3f7 AL |
28 | cr4_set_bits(X86_CR4_PCE); |
29 | else | |
30 | cr4_clear_bits(X86_CR4_PCE); | |
31 | } | |
32 | #else | |
33 | static inline void load_mm_cr4(struct mm_struct *mm) {} | |
34 | #endif | |
35 | ||
a5b9e5a2 | 36 | #ifdef CONFIG_MODIFY_LDT_SYSCALL |
37868fe1 AL |
37 | /* |
38 | * ldt_structs can be allocated, used, and freed, but they are never | |
39 | * modified while live. | |
40 | */ | |
41 | struct ldt_struct { | |
42 | /* | |
43 | * Xen requires page-aligned LDTs with special permissions. This is | |
44 | * needed to prevent us from installing evil descriptors such as | |
45 | * call gates. On native, we could merge the ldt_struct and LDT | |
46 | * allocations, but it's not worth trying to optimize. | |
47 | */ | |
48 | struct desc_struct *entries; | |
49 | int size; | |
50 | }; | |
51 | ||
a5b9e5a2 AL |
52 | /* |
53 | * Used for LDT copy/destruction. | |
54 | */ | |
39a0526f DH |
55 | int init_new_context_ldt(struct task_struct *tsk, struct mm_struct *mm); |
56 | void destroy_context_ldt(struct mm_struct *mm); | |
a5b9e5a2 | 57 | #else /* CONFIG_MODIFY_LDT_SYSCALL */ |
39a0526f DH |
58 | static inline int init_new_context_ldt(struct task_struct *tsk, |
59 | struct mm_struct *mm) | |
a5b9e5a2 AL |
60 | { |
61 | return 0; | |
62 | } | |
39a0526f | 63 | static inline void destroy_context_ldt(struct mm_struct *mm) {} |
a5b9e5a2 AL |
64 | #endif |
65 | ||
37868fe1 AL |
66 | static inline void load_mm_ldt(struct mm_struct *mm) |
67 | { | |
a5b9e5a2 | 68 | #ifdef CONFIG_MODIFY_LDT_SYSCALL |
37868fe1 AL |
69 | struct ldt_struct *ldt; |
70 | ||
71 | /* lockless_dereference synchronizes with smp_store_release */ | |
72 | ldt = lockless_dereference(mm->context.ldt); | |
73 | ||
74 | /* | |
75 | * Any change to mm->context.ldt is followed by an IPI to all | |
76 | * CPUs with the mm active. The LDT will not be freed until | |
77 | * after the IPI is handled by all such CPUs. This means that, | |
78 | * if the ldt_struct changes before we return, the values we see | |
79 | * will be safe, and the new values will be loaded before we run | |
80 | * any user code. | |
81 | * | |
82 | * NB: don't try to convert this to use RCU without extreme care. | |
83 | * We would still need IRQs off, because we don't want to change | |
84 | * the local LDT after an IPI loaded a newer value than the one | |
85 | * that we can see. | |
86 | */ | |
87 | ||
88 | if (unlikely(ldt)) | |
89 | set_ldt(ldt->entries, ldt->size); | |
90 | else | |
91 | clear_LDT(); | |
a5b9e5a2 AL |
92 | #else |
93 | clear_LDT(); | |
94 | #endif | |
37868fe1 AL |
95 | |
96 | DEBUG_LOCKS_WARN_ON(preemptible()); | |
97 | } | |
98 | ||
6826c8ff BG |
99 | static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) |
100 | { | |
101 | #ifdef CONFIG_SMP | |
c6ae41e7 AS |
102 | if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK) |
103 | this_cpu_write(cpu_tlbstate.state, TLBSTATE_LAZY); | |
6826c8ff BG |
104 | #endif |
105 | } | |
106 | ||
39a0526f DH |
107 | static inline int init_new_context(struct task_struct *tsk, |
108 | struct mm_struct *mm) | |
109 | { | |
110 | init_new_context_ldt(tsk, mm); | |
111 | return 0; | |
112 | } | |
113 | static inline void destroy_context(struct mm_struct *mm) | |
114 | { | |
115 | destroy_context_ldt(mm); | |
116 | } | |
117 | ||
69c0319a AL |
118 | extern void switch_mm(struct mm_struct *prev, struct mm_struct *next, |
119 | struct task_struct *tsk); | |
6826c8ff | 120 | |
078194f8 AL |
121 | extern void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, |
122 | struct task_struct *tsk); | |
123 | #define switch_mm_irqs_off switch_mm_irqs_off | |
c3c2fee3 JF |
124 | |
125 | #define activate_mm(prev, next) \ | |
126 | do { \ | |
127 | paravirt_activate_mm((prev), (next)); \ | |
128 | switch_mm((prev), (next), NULL); \ | |
129 | } while (0); | |
130 | ||
6826c8ff BG |
131 | #ifdef CONFIG_X86_32 |
132 | #define deactivate_mm(tsk, mm) \ | |
133 | do { \ | |
ccbeed3a | 134 | lazy_load_gs(0); \ |
6826c8ff BG |
135 | } while (0) |
136 | #else | |
137 | #define deactivate_mm(tsk, mm) \ | |
138 | do { \ | |
139 | load_gs_index(0); \ | |
140 | loadsegment(fs, 0); \ | |
141 | } while (0) | |
142 | #endif | |
c3c2fee3 | 143 | |
a1ea1c03 DH |
144 | static inline void arch_dup_mmap(struct mm_struct *oldmm, |
145 | struct mm_struct *mm) | |
146 | { | |
147 | paravirt_arch_dup_mmap(oldmm, mm); | |
148 | } | |
149 | ||
150 | static inline void arch_exit_mmap(struct mm_struct *mm) | |
151 | { | |
152 | paravirt_arch_exit_mmap(mm); | |
153 | } | |
154 | ||
b0e9b09b DH |
155 | #ifdef CONFIG_X86_64 |
156 | static inline bool is_64bit_mm(struct mm_struct *mm) | |
157 | { | |
97f2645f | 158 | return !IS_ENABLED(CONFIG_IA32_EMULATION) || |
b0e9b09b DH |
159 | !(mm->context.ia32_compat == TIF_IA32); |
160 | } | |
161 | #else | |
162 | static inline bool is_64bit_mm(struct mm_struct *mm) | |
163 | { | |
164 | return false; | |
165 | } | |
166 | #endif | |
167 | ||
fe3d197f DH |
168 | static inline void arch_bprm_mm_init(struct mm_struct *mm, |
169 | struct vm_area_struct *vma) | |
170 | { | |
171 | mpx_mm_init(mm); | |
172 | } | |
173 | ||
1de4fa14 DH |
174 | static inline void arch_unmap(struct mm_struct *mm, struct vm_area_struct *vma, |
175 | unsigned long start, unsigned long end) | |
176 | { | |
c922228e DH |
177 | /* |
178 | * mpx_notify_unmap() goes and reads a rarely-hot | |
179 | * cacheline in the mm_struct. That can be expensive | |
180 | * enough to be seen in profiles. | |
181 | * | |
182 | * The mpx_notify_unmap() call and its contents have been | |
183 | * observed to affect munmap() performance on hardware | |
184 | * where MPX is not present. | |
185 | * | |
186 | * The unlikely() optimizes for the fast case: no MPX | |
187 | * in the CPU, or no MPX use in the process. Even if | |
188 | * we get this wrong (in the unlikely event that MPX | |
189 | * is widely enabled on some system) the overhead of | |
190 | * MPX itself (reading bounds tables) is expected to | |
191 | * overwhelm the overhead of getting this unlikely() | |
192 | * consistently wrong. | |
193 | */ | |
194 | if (unlikely(cpu_feature_enabled(X86_FEATURE_MPX))) | |
195 | mpx_notify_unmap(mm, vma, start, end); | |
1de4fa14 DH |
196 | } |
197 | ||
8f62c883 DH |
198 | static inline int vma_pkey(struct vm_area_struct *vma) |
199 | { | |
200 | u16 pkey = 0; | |
201 | #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS | |
202 | unsigned long vma_pkey_mask = VM_PKEY_BIT0 | VM_PKEY_BIT1 | | |
203 | VM_PKEY_BIT2 | VM_PKEY_BIT3; | |
204 | pkey = (vma->vm_flags & vma_pkey_mask) >> VM_PKEY_SHIFT; | |
205 | #endif | |
206 | return pkey; | |
207 | } | |
208 | ||
33a709b2 DH |
209 | static inline bool __pkru_allows_pkey(u16 pkey, bool write) |
210 | { | |
211 | u32 pkru = read_pkru(); | |
212 | ||
213 | if (!__pkru_allows_read(pkru, pkey)) | |
214 | return false; | |
215 | if (write && !__pkru_allows_write(pkru, pkey)) | |
216 | return false; | |
217 | ||
218 | return true; | |
219 | } | |
220 | ||
221 | /* | |
222 | * We only want to enforce protection keys on the current process | |
223 | * because we effectively have no access to PKRU for other | |
224 | * processes or any way to tell *which * PKRU in a threaded | |
225 | * process we could use. | |
226 | * | |
227 | * So do not enforce things if the VMA is not from the current | |
228 | * mm, or if we are in a kernel thread. | |
229 | */ | |
230 | static inline bool vma_is_foreign(struct vm_area_struct *vma) | |
231 | { | |
232 | if (!current->mm) | |
233 | return true; | |
234 | /* | |
235 | * Should PKRU be enforced on the access to this VMA? If | |
236 | * the VMA is from another process, then PKRU has no | |
237 | * relevance and should not be enforced. | |
238 | */ | |
239 | if (current->mm != vma->vm_mm) | |
240 | return true; | |
241 | ||
242 | return false; | |
243 | } | |
244 | ||
1b2ee126 | 245 | static inline bool arch_vma_access_permitted(struct vm_area_struct *vma, |
d61172b4 | 246 | bool write, bool execute, bool foreign) |
33a709b2 | 247 | { |
d61172b4 DH |
248 | /* pkeys never affect instruction fetches */ |
249 | if (execute) | |
250 | return true; | |
33a709b2 | 251 | /* allow access if the VMA is not one from this process */ |
1b2ee126 | 252 | if (foreign || vma_is_foreign(vma)) |
33a709b2 DH |
253 | return true; |
254 | return __pkru_allows_pkey(vma_pkey(vma), write); | |
255 | } | |
256 | ||
257 | static inline bool arch_pte_access_permitted(pte_t pte, bool write) | |
258 | { | |
259 | return __pkru_allows_pkey(pte_flags_pkey(pte_flags(pte)), write); | |
260 | } | |
261 | ||
1965aae3 | 262 | #endif /* _ASM_X86_MMU_CONTEXT_H */ |