]> git.proxmox.com Git - mirror_ubuntu-disco-kernel.git/blame - arch/x86/include/asm/mmu_context.h
perf/x86: Only allow rdpmc if a perf_event is mapped
[mirror_ubuntu-disco-kernel.git] / arch / x86 / include / asm / mmu_context.h
CommitLineData
1965aae3
PA
1#ifndef _ASM_X86_MMU_CONTEXT_H
2#define _ASM_X86_MMU_CONTEXT_H
c3c2fee3
JF
3
4#include <asm/desc.h>
60063497 5#include <linux/atomic.h>
d17d8f9d
DH
6#include <linux/mm_types.h>
7
8#include <trace/events/tlb.h>
9
c3c2fee3
JF
10#include <asm/pgalloc.h>
11#include <asm/tlbflush.h>
12#include <asm/paravirt.h>
fe3d197f 13#include <asm/mpx.h>
c3c2fee3 14#ifndef CONFIG_PARAVIRT
c3c2fee3
JF
15static inline void paravirt_activate_mm(struct mm_struct *prev,
16 struct mm_struct *next)
17{
18}
19#endif /* !CONFIG_PARAVIRT */
20
7911d3f7
AL
21#ifdef CONFIG_PERF_EVENTS
22static inline void load_mm_cr4(struct mm_struct *mm)
23{
24 if (atomic_read(&mm->context.perf_rdpmc_allowed))
25 cr4_set_bits(X86_CR4_PCE);
26 else
27 cr4_clear_bits(X86_CR4_PCE);
28}
29#else
30static inline void load_mm_cr4(struct mm_struct *mm) {}
31#endif
32
c3c2fee3
JF
33/*
34 * Used for LDT copy/destruction.
35 */
36int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
37void destroy_context(struct mm_struct *mm);
38
6826c8ff
BG
39
40static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
41{
42#ifdef CONFIG_SMP
c6ae41e7
AS
43 if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
44 this_cpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
6826c8ff
BG
45#endif
46}
47
48static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
49 struct task_struct *tsk)
50{
51 unsigned cpu = smp_processor_id();
52
53 if (likely(prev != next)) {
6826c8ff 54#ifdef CONFIG_SMP
c6ae41e7
AS
55 this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
56 this_cpu_write(cpu_tlbstate.active_mm, next);
96a388de 57#endif
78f1c4d6 58 cpumask_set_cpu(cpu, mm_cpumask(next));
6826c8ff
BG
59
60 /* Re-load page tables */
61 load_cr3(next->pgd);
d17d8f9d 62 trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
6826c8ff 63
8f898fbb 64 /* Stop flush ipis for the previous mm */
831d52bc
SS
65 cpumask_clear_cpu(cpu, mm_cpumask(prev));
66
7911d3f7
AL
67 /* Load per-mm CR4 state */
68 load_mm_cr4(next);
69
c4a7bba2
AL
70 /*
71 * Load the LDT, if the LDT is different.
72 *
22c4bd9f
AL
73 * It's possible that prev->context.ldt doesn't match
74 * the LDT register. This can happen if leave_mm(prev)
75 * was called and then modify_ldt changed
76 * prev->context.ldt but suppressed an IPI to this CPU.
77 * In this case, prev->context.ldt != NULL, because we
78 * never free an LDT while the mm still exists. That
79 * means that next->context.ldt != prev->context.ldt,
80 * because mms never share an LDT.
c4a7bba2 81 */
6826c8ff
BG
82 if (unlikely(prev->context.ldt != next->context.ldt))
83 load_LDT_nolock(&next->context);
84 }
85#ifdef CONFIG_SMP
8f898fbb 86 else {
c6ae41e7
AS
87 this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
88 BUG_ON(this_cpu_read(cpu_tlbstate.active_mm) != next);
6826c8ff 89
8f898fbb
RR
90 if (!cpumask_test_cpu(cpu, mm_cpumask(next))) {
91 /*
92 * On established mms, the mm_cpumask is only changed
93 * from irq context, from ptep_clear_flush() while in
94 * lazy tlb mode, and here. Irqs are blocked during
95 * schedule, protecting us from simultaneous changes.
96 */
97 cpumask_set_cpu(cpu, mm_cpumask(next));
98 /*
99 * We were in lazy tlb mode and leave_mm disabled
6826c8ff
BG
100 * tlb flush IPI delivery. We must reload CR3
101 * to make sure to use no freed page tables.
102 */
103 load_cr3(next->pgd);
d17d8f9d 104 trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
7911d3f7 105 load_mm_cr4(next);
6826c8ff
BG
106 load_LDT_nolock(&next->context);
107 }
108 }
109#endif
110}
c3c2fee3
JF
111
112#define activate_mm(prev, next) \
113do { \
114 paravirt_activate_mm((prev), (next)); \
115 switch_mm((prev), (next), NULL); \
116} while (0);
117
6826c8ff
BG
118#ifdef CONFIG_X86_32
119#define deactivate_mm(tsk, mm) \
120do { \
ccbeed3a 121 lazy_load_gs(0); \
6826c8ff
BG
122} while (0)
123#else
124#define deactivate_mm(tsk, mm) \
125do { \
126 load_gs_index(0); \
127 loadsegment(fs, 0); \
128} while (0)
129#endif
c3c2fee3 130
a1ea1c03
DH
131static inline void arch_dup_mmap(struct mm_struct *oldmm,
132 struct mm_struct *mm)
133{
134 paravirt_arch_dup_mmap(oldmm, mm);
135}
136
137static inline void arch_exit_mmap(struct mm_struct *mm)
138{
139 paravirt_arch_exit_mmap(mm);
140}
141
fe3d197f
DH
142static inline void arch_bprm_mm_init(struct mm_struct *mm,
143 struct vm_area_struct *vma)
144{
145 mpx_mm_init(mm);
146}
147
1de4fa14
DH
148static inline void arch_unmap(struct mm_struct *mm, struct vm_area_struct *vma,
149 unsigned long start, unsigned long end)
150{
c922228e
DH
151 /*
152 * mpx_notify_unmap() goes and reads a rarely-hot
153 * cacheline in the mm_struct. That can be expensive
154 * enough to be seen in profiles.
155 *
156 * The mpx_notify_unmap() call and its contents have been
157 * observed to affect munmap() performance on hardware
158 * where MPX is not present.
159 *
160 * The unlikely() optimizes for the fast case: no MPX
161 * in the CPU, or no MPX use in the process. Even if
162 * we get this wrong (in the unlikely event that MPX
163 * is widely enabled on some system) the overhead of
164 * MPX itself (reading bounds tables) is expected to
165 * overwhelm the overhead of getting this unlikely()
166 * consistently wrong.
167 */
168 if (unlikely(cpu_feature_enabled(X86_FEATURE_MPX)))
169 mpx_notify_unmap(mm, vma, start, end);
1de4fa14
DH
170}
171
1965aae3 172#endif /* _ASM_X86_MMU_CONTEXT_H */