]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - arch/x86/include/asm/tlbflush.h
x86/mm: Remove the UP asm/tlbflush.h code, always use the (formerly) SMP code
[mirror_ubuntu-bionic-kernel.git] / arch / x86 / include / asm / tlbflush.h
CommitLineData
1965aae3
PA
1#ifndef _ASM_X86_TLBFLUSH_H
2#define _ASM_X86_TLBFLUSH_H
d291cf83
TG
3
4#include <linux/mm.h>
5#include <linux/sched.h>
6
7#include <asm/processor.h>
cd4d09ec 8#include <asm/cpufeature.h>
f05e798a 9#include <asm/special_insns.h>
ce4a4e56 10#include <asm/smp.h>
d291cf83 11
060a402a
AL
12static inline void __invpcid(unsigned long pcid, unsigned long addr,
13 unsigned long type)
14{
e2c7698c 15 struct { u64 d[2]; } desc = { { pcid, addr } };
060a402a
AL
16
17 /*
18 * The memory clobber is because the whole point is to invalidate
19 * stale TLB entries and, especially if we're flushing global
20 * mappings, we don't want the compiler to reorder any subsequent
21 * memory accesses before the TLB flush.
22 *
23 * The hex opcode is invpcid (%ecx), %eax in 32-bit mode and
24 * invpcid (%rcx), %rax in long mode.
25 */
26 asm volatile (".byte 0x66, 0x0f, 0x38, 0x82, 0x01"
e2c7698c 27 : : "m" (desc), "a" (type), "c" (&desc) : "memory");
060a402a
AL
28}
29
30#define INVPCID_TYPE_INDIV_ADDR 0
31#define INVPCID_TYPE_SINGLE_CTXT 1
32#define INVPCID_TYPE_ALL_INCL_GLOBAL 2
33#define INVPCID_TYPE_ALL_NON_GLOBAL 3
34
35/* Flush all mappings for a given pcid and addr, not including globals. */
36static inline void invpcid_flush_one(unsigned long pcid,
37 unsigned long addr)
38{
39 __invpcid(pcid, addr, INVPCID_TYPE_INDIV_ADDR);
40}
41
42/* Flush all mappings for a given PCID, not including globals. */
43static inline void invpcid_flush_single_context(unsigned long pcid)
44{
45 __invpcid(pcid, 0, INVPCID_TYPE_SINGLE_CTXT);
46}
47
48/* Flush all mappings, including globals, for all PCIDs. */
49static inline void invpcid_flush_all(void)
50{
51 __invpcid(0, 0, INVPCID_TYPE_ALL_INCL_GLOBAL);
52}
53
54/* Flush all mappings for all PCIDs except globals. */
55static inline void invpcid_flush_all_nonglobals(void)
56{
57 __invpcid(0, 0, INVPCID_TYPE_ALL_NON_GLOBAL);
58}
59
d291cf83
TG
60#ifdef CONFIG_PARAVIRT
61#include <asm/paravirt.h>
62#else
63#define __flush_tlb() __native_flush_tlb()
64#define __flush_tlb_global() __native_flush_tlb_global()
65#define __flush_tlb_single(addr) __native_flush_tlb_single(addr)
66#endif
67
1e02ce4c 68struct tlb_state {
1e02ce4c
AL
69 struct mm_struct *active_mm;
70 int state;
1e02ce4c
AL
71
72 /*
73 * Access to this CR4 shadow and to H/W CR4 is protected by
74 * disabling interrupts when modifying either one.
75 */
76 unsigned long cr4;
77};
78DECLARE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate);
79
80/* Initialize cr4 shadow for this CPU. */
81static inline void cr4_init_shadow(void)
82{
1ef55be1 83 this_cpu_write(cpu_tlbstate.cr4, __read_cr4());
1e02ce4c
AL
84}
85
375074cc
AL
86/* Set in this cpu's CR4. */
87static inline void cr4_set_bits(unsigned long mask)
88{
89 unsigned long cr4;
90
1e02ce4c
AL
91 cr4 = this_cpu_read(cpu_tlbstate.cr4);
92 if ((cr4 | mask) != cr4) {
93 cr4 |= mask;
94 this_cpu_write(cpu_tlbstate.cr4, cr4);
95 __write_cr4(cr4);
96 }
375074cc
AL
97}
98
99/* Clear in this cpu's CR4. */
100static inline void cr4_clear_bits(unsigned long mask)
101{
102 unsigned long cr4;
103
1e02ce4c
AL
104 cr4 = this_cpu_read(cpu_tlbstate.cr4);
105 if ((cr4 & ~mask) != cr4) {
106 cr4 &= ~mask;
107 this_cpu_write(cpu_tlbstate.cr4, cr4);
108 __write_cr4(cr4);
109 }
110}
111
5a920155
TG
112static inline void cr4_toggle_bits(unsigned long mask)
113{
114 unsigned long cr4;
115
116 cr4 = this_cpu_read(cpu_tlbstate.cr4);
117 cr4 ^= mask;
118 this_cpu_write(cpu_tlbstate.cr4, cr4);
119 __write_cr4(cr4);
120}
121
1e02ce4c
AL
122/* Read the CR4 shadow. */
123static inline unsigned long cr4_read_shadow(void)
124{
125 return this_cpu_read(cpu_tlbstate.cr4);
375074cc
AL
126}
127
128/*
129 * Save some of cr4 feature set we're using (e.g. Pentium 4MB
130 * enable and PPro Global page enable), so that any CPU's that boot
131 * up after us can get the correct flags. This should only be used
132 * during boot on the boot cpu.
133 */
134extern unsigned long mmu_cr4_features;
135extern u32 *trampoline_cr4_features;
136
137static inline void cr4_set_bits_and_update_boot(unsigned long mask)
138{
139 mmu_cr4_features |= mask;
140 if (trampoline_cr4_features)
141 *trampoline_cr4_features = mmu_cr4_features;
142 cr4_set_bits(mask);
143}
144
d291cf83
TG
145static inline void __native_flush_tlb(void)
146{
5cf0791d
SAS
147 /*
148 * If current->mm == NULL then we borrow a mm which may change during a
149 * task switch and therefore we must not be preempted while we write CR3
150 * back:
151 */
152 preempt_disable();
d7285c6b 153 native_write_cr3(native_read_cr3());
5cf0791d 154 preempt_enable();
d291cf83
TG
155}
156
086fc8f8
FY
157static inline void __native_flush_tlb_global_irq_disabled(void)
158{
159 unsigned long cr4;
160
1e02ce4c 161 cr4 = this_cpu_read(cpu_tlbstate.cr4);
086fc8f8
FY
162 /* clear PGE */
163 native_write_cr4(cr4 & ~X86_CR4_PGE);
164 /* write old PGE again and flush TLBs */
165 native_write_cr4(cr4);
166}
167
d291cf83
TG
168static inline void __native_flush_tlb_global(void)
169{
b1979a5f 170 unsigned long flags;
d291cf83 171
d8bced79
AL
172 if (static_cpu_has(X86_FEATURE_INVPCID)) {
173 /*
174 * Using INVPCID is considerably faster than a pair of writes
175 * to CR4 sandwiched inside an IRQ flag save/restore.
176 */
177 invpcid_flush_all();
178 return;
179 }
180
b1979a5f
IM
181 /*
182 * Read-modify-write to CR4 - protect it from preemption and
183 * from interrupts. (Use the raw variant because this code can
184 * be called from deep inside debugging code.)
185 */
186 raw_local_irq_save(flags);
187
086fc8f8 188 __native_flush_tlb_global_irq_disabled();
b1979a5f
IM
189
190 raw_local_irq_restore(flags);
d291cf83
TG
191}
192
193static inline void __native_flush_tlb_single(unsigned long addr)
194{
94cf8de0 195 asm volatile("invlpg (%0)" ::"r" (addr) : "memory");
d291cf83
TG
196}
197
198static inline void __flush_tlb_all(void)
199{
2c4ea6e2 200 if (boot_cpu_has(X86_FEATURE_PGE))
d291cf83
TG
201 __flush_tlb_global();
202 else
203 __flush_tlb();
204}
205
206static inline void __flush_tlb_one(unsigned long addr)
207{
ec659934 208 count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE);
e8747f10 209 __flush_tlb_single(addr);
d291cf83
TG
210}
211
3e7f3db0 212#define TLB_FLUSH_ALL -1UL
d291cf83
TG
213
214/*
215 * TLB flushing:
216 *
d291cf83
TG
217 * - flush_tlb_all() flushes all processes TLBs
218 * - flush_tlb_mm(mm) flushes the specified mm context TLB's
219 * - flush_tlb_page(vma, vmaddr) flushes one page
220 * - flush_tlb_range(vma, start, end) flushes a range of pages
221 * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
a2055abe 222 * - flush_tlb_others(cpumask, info) flushes TLBs on other cpus
d291cf83
TG
223 *
224 * ..but the i386 has somewhat limited tlb flushing capabilities,
225 * and page-granular flushes are available only on i486 and up.
d291cf83 226 */
a2055abe
AL
227struct flush_tlb_info {
228 struct mm_struct *mm;
229 unsigned long start;
230 unsigned long end;
231};
232
d291cf83
TG
233#define local_flush_tlb() __flush_tlb()
234
611ae8e3
AS
235#define flush_tlb_mm(mm) flush_tlb_mm_range(mm, 0UL, TLB_FLUSH_ALL, 0UL)
236
237#define flush_tlb_range(vma, start, end) \
238 flush_tlb_mm_range(vma->vm_mm, start, end, vma->vm_flags)
239
d291cf83 240extern void flush_tlb_all(void);
611ae8e3
AS
241extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
242 unsigned long end, unsigned long vmflag);
effee4b9 243extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
d291cf83 244
ca6c99c0
AL
245static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long a)
246{
247 flush_tlb_mm_range(vma->vm_mm, a, a + PAGE_SIZE, VM_NONE);
248}
249
4595f962 250void native_flush_tlb_others(const struct cpumask *cpumask,
a2055abe 251 const struct flush_tlb_info *info);
d291cf83
TG
252
253#define TLBSTATE_OK 1
254#define TLBSTATE_LAZY 2
255
913da64b
AN
256static inline void reset_lazy_tlbstate(void)
257{
c6ae41e7
AS
258 this_cpu_write(cpu_tlbstate.state, 0);
259 this_cpu_write(cpu_tlbstate.active_mm, &init_mm);
913da64b 260}
d291cf83 261
e73ad5ff
AL
262static inline void arch_tlbbatch_add_mm(struct arch_tlbflush_unmap_batch *batch,
263 struct mm_struct *mm)
264{
265 cpumask_or(&batch->cpumask, &batch->cpumask, mm_cpumask(mm));
266}
267
268extern void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch);
269
d291cf83 270#ifndef CONFIG_PARAVIRT
a2055abe
AL
271#define flush_tlb_others(mask, info) \
272 native_flush_tlb_others(mask, info)
96a388de 273#endif
d291cf83 274
1965aae3 275#endif /* _ASM_X86_TLBFLUSH_H */