1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_X86_MMU_CONTEXT_H
3 #define _ASM_X86_MMU_CONTEXT_H
6 #include <linux/atomic.h>
7 #include <linux/mm_types.h>
8 #include <linux/pkeys.h>
10 #include <trace/events/tlb.h>
12 #include <asm/pgalloc.h>
13 #include <asm/tlbflush.h>
14 #include <asm/paravirt.h>
17 extern atomic64_t last_mm_ctx_id
;
19 #ifndef CONFIG_PARAVIRT
20 static inline void paravirt_activate_mm(struct mm_struct
*prev
,
21 struct mm_struct
*next
)
24 #endif /* !CONFIG_PARAVIRT */
26 #ifdef CONFIG_PERF_EVENTS
27 extern struct static_key rdpmc_always_available
;
29 static inline void load_mm_cr4(struct mm_struct
*mm
)
31 if (static_key_false(&rdpmc_always_available
) ||
32 atomic_read(&mm
->context
.perf_rdpmc_allowed
))
33 cr4_set_bits(X86_CR4_PCE
);
35 cr4_clear_bits(X86_CR4_PCE
);
38 static inline void load_mm_cr4(struct mm_struct
*mm
) {}
41 #ifdef CONFIG_MODIFY_LDT_SYSCALL
43 * ldt_structs can be allocated, used, and freed, but they are never
44 * modified while live.
48 * Xen requires page-aligned LDTs with special permissions. This is
49 * needed to prevent us from installing evil descriptors such as
50 * call gates. On native, we could merge the ldt_struct and LDT
51 * allocations, but it's not worth trying to optimize.
53 struct desc_struct
*entries
;
54 unsigned int nr_entries
;
57 * If PTI is in use, then the entries array is not mapped while we're
58 * in user mode. The whole array will be aliased at the addressed
59 * given by ldt_slot_va(slot). We use two slots so that we can allocate
60 * and map, and enable a new LDT without invalidating the mapping
61 * of an older, still-in-use LDT.
63 * slot will be -1 if this LDT doesn't have an alias mapping.
68 /* This is a multiple of PAGE_SIZE. */
69 #define LDT_SLOT_STRIDE (LDT_ENTRIES * LDT_ENTRY_SIZE)
71 static inline void *ldt_slot_va(int slot
)
73 return (void *)(LDT_BASE_ADDR
+ LDT_SLOT_STRIDE
* slot
);
77 * Used for LDT copy/destruction.
79 static inline void init_new_context_ldt(struct mm_struct
*mm
)
81 mm
->context
.ldt
= NULL
;
82 init_rwsem(&mm
->context
.ldt_usr_sem
);
84 int ldt_dup_context(struct mm_struct
*oldmm
, struct mm_struct
*mm
);
85 void destroy_context_ldt(struct mm_struct
*mm
);
86 void ldt_arch_exit_mmap(struct mm_struct
*mm
);
87 #else /* CONFIG_MODIFY_LDT_SYSCALL */
88 static inline void init_new_context_ldt(struct mm_struct
*mm
) { }
89 static inline int ldt_dup_context(struct mm_struct
*oldmm
,
94 static inline void destroy_context_ldt(struct mm_struct
*mm
) { }
95 static inline void ldt_arch_exit_mmap(struct mm_struct
*mm
) { }
98 static inline void load_mm_ldt(struct mm_struct
*mm
)
100 #ifdef CONFIG_MODIFY_LDT_SYSCALL
101 struct ldt_struct
*ldt
;
103 /* READ_ONCE synchronizes with smp_store_release */
104 ldt
= READ_ONCE(mm
->context
.ldt
);
107 * Any change to mm->context.ldt is followed by an IPI to all
108 * CPUs with the mm active. The LDT will not be freed until
109 * after the IPI is handled by all such CPUs. This means that,
110 * if the ldt_struct changes before we return, the values we see
111 * will be safe, and the new values will be loaded before we run
114 * NB: don't try to convert this to use RCU without extreme care.
115 * We would still need IRQs off, because we don't want to change
116 * the local LDT after an IPI loaded a newer value than the one
121 if (static_cpu_has(X86_FEATURE_PTI
)) {
122 if (WARN_ON_ONCE((unsigned long)ldt
->slot
> 1)) {
124 * Whoops -- either the new LDT isn't mapped
125 * (if slot == -1) or is mapped into a bogus
126 * slot (if slot > 1).
133 * If page table isolation is enabled, ldt->entries
134 * will not be mapped in the userspace pagetables.
135 * Tell the CPU to access the LDT through the alias
136 * at ldt_slot_va(ldt->slot).
138 set_ldt(ldt_slot_va(ldt
->slot
), ldt
->nr_entries
);
140 set_ldt(ldt
->entries
, ldt
->nr_entries
);
150 static inline void switch_ldt(struct mm_struct
*prev
, struct mm_struct
*next
)
152 #ifdef CONFIG_MODIFY_LDT_SYSCALL
154 * Load the LDT if either the old or new mm had an LDT.
156 * An mm will never go from having an LDT to not having an LDT. Two
157 * mms never share an LDT, so we don't gain anything by checking to
158 * see whether the LDT changed. There's also no guarantee that
159 * prev->context.ldt actually matches LDTR, but, if LDTR is non-NULL,
160 * then prev->context.ldt will also be non-NULL.
162 * If we really cared, we could optimize the case where prev == next
163 * and we're exiting lazy mode. Most of the time, if this happens,
164 * we don't actually need to reload LDTR, but modify_ldt() is mostly
165 * used by legacy code and emulators where we don't need this level of
168 * This uses | instead of || because it generates better code.
170 if (unlikely((unsigned long)prev
->context
.ldt
|
171 (unsigned long)next
->context
.ldt
))
175 DEBUG_LOCKS_WARN_ON(preemptible());
178 void enter_lazy_tlb(struct mm_struct
*mm
, struct task_struct
*tsk
);
180 static inline int init_new_context(struct task_struct
*tsk
,
181 struct mm_struct
*mm
)
183 mutex_init(&mm
->context
.lock
);
185 mm
->context
.ctx_id
= atomic64_inc_return(&last_mm_ctx_id
);
186 atomic64_set(&mm
->context
.tlb_gen
, 0);
188 #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
189 if (cpu_feature_enabled(X86_FEATURE_OSPKE
)) {
190 /* pkey 0 is the default and allocated implicitly */
191 mm
->context
.pkey_allocation_map
= 0x1;
192 /* -1 means unallocated or invalid */
193 mm
->context
.execute_only_pkey
= -1;
196 init_new_context_ldt(mm
);
199 static inline void destroy_context(struct mm_struct
*mm
)
201 destroy_context_ldt(mm
);
204 extern void switch_mm(struct mm_struct
*prev
, struct mm_struct
*next
,
205 struct task_struct
*tsk
);
207 extern void switch_mm_irqs_off(struct mm_struct
*prev
, struct mm_struct
*next
,
208 struct task_struct
*tsk
);
209 #define switch_mm_irqs_off switch_mm_irqs_off
211 #define activate_mm(prev, next) \
213 paravirt_activate_mm((prev), (next)); \
214 switch_mm((prev), (next), NULL); \
218 #define deactivate_mm(tsk, mm) \
223 #define deactivate_mm(tsk, mm) \
226 loadsegment(fs, 0); \
230 static inline int arch_dup_mmap(struct mm_struct
*oldmm
, struct mm_struct
*mm
)
232 paravirt_arch_dup_mmap(oldmm
, mm
);
233 return ldt_dup_context(oldmm
, mm
);
236 static inline void arch_exit_mmap(struct mm_struct
*mm
)
238 paravirt_arch_exit_mmap(mm
);
239 ldt_arch_exit_mmap(mm
);
243 static inline bool is_64bit_mm(struct mm_struct
*mm
)
245 return !IS_ENABLED(CONFIG_IA32_EMULATION
) ||
246 !(mm
->context
.ia32_compat
== TIF_IA32
);
249 static inline bool is_64bit_mm(struct mm_struct
*mm
)
255 static inline void arch_bprm_mm_init(struct mm_struct
*mm
,
256 struct vm_area_struct
*vma
)
261 static inline void arch_unmap(struct mm_struct
*mm
, struct vm_area_struct
*vma
,
262 unsigned long start
, unsigned long end
)
265 * mpx_notify_unmap() goes and reads a rarely-hot
266 * cacheline in the mm_struct. That can be expensive
267 * enough to be seen in profiles.
269 * The mpx_notify_unmap() call and its contents have been
270 * observed to affect munmap() performance on hardware
271 * where MPX is not present.
273 * The unlikely() optimizes for the fast case: no MPX
274 * in the CPU, or no MPX use in the process. Even if
275 * we get this wrong (in the unlikely event that MPX
276 * is widely enabled on some system) the overhead of
277 * MPX itself (reading bounds tables) is expected to
278 * overwhelm the overhead of getting this unlikely()
279 * consistently wrong.
281 if (unlikely(cpu_feature_enabled(X86_FEATURE_MPX
)))
282 mpx_notify_unmap(mm
, vma
, start
, end
);
285 #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
286 static inline int vma_pkey(struct vm_area_struct
*vma
)
288 unsigned long vma_pkey_mask
= VM_PKEY_BIT0
| VM_PKEY_BIT1
|
289 VM_PKEY_BIT2
| VM_PKEY_BIT3
;
291 return (vma
->vm_flags
& vma_pkey_mask
) >> VM_PKEY_SHIFT
;
294 static inline int vma_pkey(struct vm_area_struct
*vma
)
301 * We only want to enforce protection keys on the current process
302 * because we effectively have no access to PKRU for other
303 * processes or any way to tell *which * PKRU in a threaded
304 * process we could use.
306 * So do not enforce things if the VMA is not from the current
307 * mm, or if we are in a kernel thread.
309 static inline bool vma_is_foreign(struct vm_area_struct
*vma
)
314 * Should PKRU be enforced on the access to this VMA? If
315 * the VMA is from another process, then PKRU has no
316 * relevance and should not be enforced.
318 if (current
->mm
!= vma
->vm_mm
)
324 static inline bool arch_vma_access_permitted(struct vm_area_struct
*vma
,
325 bool write
, bool execute
, bool foreign
)
327 /* pkeys never affect instruction fetches */
330 /* allow access if the VMA is not one from this process */
331 if (foreign
|| vma_is_foreign(vma
))
333 return __pkru_allows_pkey(vma_pkey(vma
), write
);
337 * This can be used from process context to figure out what the value of
338 * CR3 is without needing to do a (slow) __read_cr3().
340 * It's intended to be used for code like KVM that sneakily changes CR3
341 * and needs to restore it. It needs to be used very carefully.
343 static inline unsigned long __get_current_cr3_fast(void)
345 unsigned long cr3
= build_cr3(this_cpu_read(cpu_tlbstate
.loaded_mm
)->pgd
,
346 this_cpu_read(cpu_tlbstate
.loaded_mm_asid
));
348 /* For now, be very restrictive about when this can be called. */
349 VM_WARN_ON(in_nmi() || preemptible());
351 VM_BUG_ON(cr3
!= __read_cr3());
355 #endif /* _ASM_X86_MMU_CONTEXT_H */