1 #ifndef _ASM_X86_MMU_CONTEXT_H
2 #define _ASM_X86_MMU_CONTEXT_H
5 #include <linux/atomic.h>
6 #include <linux/mm_types.h>
7 #include <linux/pkeys.h>
9 #include <trace/events/tlb.h>
11 #include <asm/pgalloc.h>
12 #include <asm/tlbflush.h>
13 #include <asm/paravirt.h>
16 extern atomic64_t last_mm_ctx_id
;
18 #ifndef CONFIG_PARAVIRT
19 static inline void paravirt_activate_mm(struct mm_struct
*prev
,
20 struct mm_struct
*next
)
23 #endif /* !CONFIG_PARAVIRT */
25 #ifdef CONFIG_PERF_EVENTS
26 extern struct static_key rdpmc_always_available
;
28 static inline void load_mm_cr4(struct mm_struct
*mm
)
30 if (static_key_false(&rdpmc_always_available
) ||
31 atomic_read(&mm
->context
.perf_rdpmc_allowed
))
32 cr4_set_bits(X86_CR4_PCE
);
34 cr4_clear_bits(X86_CR4_PCE
);
37 static inline void load_mm_cr4(struct mm_struct
*mm
) {}
40 #ifdef CONFIG_MODIFY_LDT_SYSCALL
42 * ldt_structs can be allocated, used, and freed, but they are never
43 * modified while live.
47 * Xen requires page-aligned LDTs with special permissions. This is
48 * needed to prevent us from installing evil descriptors such as
49 * call gates. On native, we could merge the ldt_struct and LDT
50 * allocations, but it's not worth trying to optimize.
52 struct desc_struct
*entries
;
53 unsigned int nr_entries
;
57 * Used for LDT copy/destruction.
59 int init_new_context_ldt(struct task_struct
*tsk
, struct mm_struct
*mm
);
60 void destroy_context_ldt(struct mm_struct
*mm
);
61 #else /* CONFIG_MODIFY_LDT_SYSCALL */
62 static inline int init_new_context_ldt(struct task_struct
*tsk
,
67 static inline void destroy_context_ldt(struct mm_struct
*mm
) {}
70 static inline void load_mm_ldt(struct mm_struct
*mm
)
72 #ifdef CONFIG_MODIFY_LDT_SYSCALL
73 struct ldt_struct
*ldt
;
75 /* lockless_dereference synchronizes with smp_store_release */
76 ldt
= lockless_dereference(mm
->context
.ldt
);
79 * Any change to mm->context.ldt is followed by an IPI to all
80 * CPUs with the mm active. The LDT will not be freed until
81 * after the IPI is handled by all such CPUs. This means that,
82 * if the ldt_struct changes before we return, the values we see
83 * will be safe, and the new values will be loaded before we run
86 * NB: don't try to convert this to use RCU without extreme care.
87 * We would still need IRQs off, because we don't want to change
88 * the local LDT after an IPI loaded a newer value than the one
93 set_ldt(ldt
->entries
, ldt
->nr_entries
);
101 static inline void switch_ldt(struct mm_struct
*prev
, struct mm_struct
*next
)
103 #ifdef CONFIG_MODIFY_LDT_SYSCALL
105 * Load the LDT if either the old or new mm had an LDT.
107 * An mm will never go from having an LDT to not having an LDT. Two
108 * mms never share an LDT, so we don't gain anything by checking to
109 * see whether the LDT changed. There's also no guarantee that
110 * prev->context.ldt actually matches LDTR, but, if LDTR is non-NULL,
111 * then prev->context.ldt will also be non-NULL.
113 * If we really cared, we could optimize the case where prev == next
114 * and we're exiting lazy mode. Most of the time, if this happens,
115 * we don't actually need to reload LDTR, but modify_ldt() is mostly
116 * used by legacy code and emulators where we don't need this level of
119 * This uses | instead of || because it generates better code.
121 if (unlikely((unsigned long)prev
->context
.ldt
|
122 (unsigned long)next
->context
.ldt
))
126 DEBUG_LOCKS_WARN_ON(preemptible());
129 static inline void enter_lazy_tlb(struct mm_struct
*mm
, struct task_struct
*tsk
)
131 int cpu
= smp_processor_id();
133 if (cpumask_test_cpu(cpu
, mm_cpumask(mm
)))
134 cpumask_clear_cpu(cpu
, mm_cpumask(mm
));
137 static inline int init_new_context(struct task_struct
*tsk
,
138 struct mm_struct
*mm
)
140 mm
->context
.ctx_id
= atomic64_inc_return(&last_mm_ctx_id
);
141 atomic64_set(&mm
->context
.tlb_gen
, 0);
143 #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
144 if (cpu_feature_enabled(X86_FEATURE_OSPKE
)) {
145 /* pkey 0 is the default and always allocated */
146 mm
->context
.pkey_allocation_map
= 0x1;
147 /* -1 means unallocated or invalid */
148 mm
->context
.execute_only_pkey
= -1;
151 return init_new_context_ldt(tsk
, mm
);
153 static inline void destroy_context(struct mm_struct
*mm
)
155 destroy_context_ldt(mm
);
158 extern void switch_mm(struct mm_struct
*prev
, struct mm_struct
*next
,
159 struct task_struct
*tsk
);
161 extern void switch_mm_irqs_off(struct mm_struct
*prev
, struct mm_struct
*next
,
162 struct task_struct
*tsk
);
163 #define switch_mm_irqs_off switch_mm_irqs_off
165 #define activate_mm(prev, next) \
167 paravirt_activate_mm((prev), (next)); \
168 switch_mm((prev), (next), NULL); \
172 #define deactivate_mm(tsk, mm) \
177 #define deactivate_mm(tsk, mm) \
180 loadsegment(fs, 0); \
184 static inline void arch_dup_mmap(struct mm_struct
*oldmm
,
185 struct mm_struct
*mm
)
187 paravirt_arch_dup_mmap(oldmm
, mm
);
190 static inline void arch_exit_mmap(struct mm_struct
*mm
)
192 paravirt_arch_exit_mmap(mm
);
196 static inline bool is_64bit_mm(struct mm_struct
*mm
)
198 return !IS_ENABLED(CONFIG_IA32_EMULATION
) ||
199 !(mm
->context
.ia32_compat
== TIF_IA32
);
202 static inline bool is_64bit_mm(struct mm_struct
*mm
)
208 static inline void arch_bprm_mm_init(struct mm_struct
*mm
,
209 struct vm_area_struct
*vma
)
214 static inline void arch_unmap(struct mm_struct
*mm
, struct vm_area_struct
*vma
,
215 unsigned long start
, unsigned long end
)
218 * mpx_notify_unmap() goes and reads a rarely-hot
219 * cacheline in the mm_struct. That can be expensive
220 * enough to be seen in profiles.
222 * The mpx_notify_unmap() call and its contents have been
223 * observed to affect munmap() performance on hardware
224 * where MPX is not present.
226 * The unlikely() optimizes for the fast case: no MPX
227 * in the CPU, or no MPX use in the process. Even if
228 * we get this wrong (in the unlikely event that MPX
229 * is widely enabled on some system) the overhead of
230 * MPX itself (reading bounds tables) is expected to
231 * overwhelm the overhead of getting this unlikely()
232 * consistently wrong.
234 if (unlikely(cpu_feature_enabled(X86_FEATURE_MPX
)))
235 mpx_notify_unmap(mm
, vma
, start
, end
);
238 #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
239 static inline int vma_pkey(struct vm_area_struct
*vma
)
241 unsigned long vma_pkey_mask
= VM_PKEY_BIT0
| VM_PKEY_BIT1
|
242 VM_PKEY_BIT2
| VM_PKEY_BIT3
;
244 return (vma
->vm_flags
& vma_pkey_mask
) >> VM_PKEY_SHIFT
;
247 static inline int vma_pkey(struct vm_area_struct
*vma
)
254 * We only want to enforce protection keys on the current process
255 * because we effectively have no access to PKRU for other
256 * processes or any way to tell *which * PKRU in a threaded
257 * process we could use.
259 * So do not enforce things if the VMA is not from the current
260 * mm, or if we are in a kernel thread.
262 static inline bool vma_is_foreign(struct vm_area_struct
*vma
)
267 * Should PKRU be enforced on the access to this VMA? If
268 * the VMA is from another process, then PKRU has no
269 * relevance and should not be enforced.
271 if (current
->mm
!= vma
->vm_mm
)
277 static inline bool arch_vma_access_permitted(struct vm_area_struct
*vma
,
278 bool write
, bool execute
, bool foreign
)
280 /* pkeys never affect instruction fetches */
283 /* allow access if the VMA is not one from this process */
284 if (foreign
|| vma_is_foreign(vma
))
286 return __pkru_allows_pkey(vma_pkey(vma
), write
);
290 * If PCID is on, ASID-aware code paths put the ASID+1 into the PCID
291 * bits. This serves two purposes. It prevents a nasty situation in
292 * which PCID-unaware code saves CR3, loads some other value (with PCID
293 * == 0), and then restores CR3, thus corrupting the TLB for ASID 0 if
294 * the saved ASID was nonzero. It also means that any bugs involving
295 * loading a PCID-enabled CR3 with CR4.PCIDE off will trigger
299 static inline unsigned long build_cr3(struct mm_struct
*mm
, u16 asid
)
301 if (static_cpu_has(X86_FEATURE_PCID
)) {
302 VM_WARN_ON_ONCE(asid
> 4094);
303 return __sme_pa(mm
->pgd
) | (asid
+ 1);
305 VM_WARN_ON_ONCE(asid
!= 0);
306 return __sme_pa(mm
->pgd
);
310 static inline unsigned long build_cr3_noflush(struct mm_struct
*mm
, u16 asid
)
312 VM_WARN_ON_ONCE(asid
> 4094);
313 return __sme_pa(mm
->pgd
) | (asid
+ 1) | CR3_NOFLUSH
;
317 * This can be used from process context to figure out what the value of
318 * CR3 is without needing to do a (slow) __read_cr3().
320 * It's intended to be used for code like KVM that sneakily changes CR3
321 * and needs to restore it. It needs to be used very carefully.
323 static inline unsigned long __get_current_cr3_fast(void)
325 unsigned long cr3
= build_cr3(this_cpu_read(cpu_tlbstate
.loaded_mm
),
326 this_cpu_read(cpu_tlbstate
.loaded_mm_asid
));
328 /* For now, be very restrictive about when this can be called. */
329 VM_WARN_ON(in_nmi() || preemptible());
331 VM_BUG_ON(cr3
!= __read_cr3());
335 #endif /* _ASM_X86_MMU_CONTEXT_H */