]>
Commit | Line | Data |
---|---|---|
26eef70c AK |
1 | #ifndef ARCH_X86_KVM_X86_H |
2 | #define ARCH_X86_KVM_X86_H | |
3 | ||
668fffa3 MT |
4 | #include <asm/processor.h> |
5 | #include <asm/mwait.h> | |
26eef70c | 6 | #include <linux/kvm_host.h> |
8d93c874 | 7 | #include <asm/pvclock.h> |
3eeb3288 | 8 | #include "kvm_cache_regs.h" |
26eef70c | 9 | |
74545705 RK |
10 | #define MSR_IA32_CR_PAT_DEFAULT 0x0007040600070406ULL |
11 | ||
26eef70c AK |
12 | static inline void kvm_clear_exception_queue(struct kvm_vcpu *vcpu) |
13 | { | |
14 | vcpu->arch.exception.pending = false; | |
15 | } | |
16 | ||
66fd3f7f GN |
17 | static inline void kvm_queue_interrupt(struct kvm_vcpu *vcpu, u8 vector, |
18 | bool soft) | |
937a7eae AK |
19 | { |
20 | vcpu->arch.interrupt.pending = true; | |
66fd3f7f | 21 | vcpu->arch.interrupt.soft = soft; |
937a7eae AK |
22 | vcpu->arch.interrupt.nr = vector; |
23 | } | |
24 | ||
25 | static inline void kvm_clear_interrupt_queue(struct kvm_vcpu *vcpu) | |
26 | { | |
27 | vcpu->arch.interrupt.pending = false; | |
28 | } | |
29 | ||
3298b75c GN |
30 | static inline bool kvm_event_needs_reinjection(struct kvm_vcpu *vcpu) |
31 | { | |
32 | return vcpu->arch.exception.pending || vcpu->arch.interrupt.pending || | |
33 | vcpu->arch.nmi_injected; | |
34 | } | |
66fd3f7f GN |
35 | |
36 | static inline bool kvm_exception_is_soft(unsigned int nr) | |
37 | { | |
38 | return (nr == BP_VECTOR) || (nr == OF_VECTOR); | |
39 | } | |
fc61b800 | 40 | |
3eeb3288 AK |
41 | static inline bool is_protmode(struct kvm_vcpu *vcpu) |
42 | { | |
43 | return kvm_read_cr0_bits(vcpu, X86_CR0_PE); | |
44 | } | |
45 | ||
836a1b3c AK |
46 | static inline int is_long_mode(struct kvm_vcpu *vcpu) |
47 | { | |
48 | #ifdef CONFIG_X86_64 | |
f6801dff | 49 | return vcpu->arch.efer & EFER_LMA; |
836a1b3c AK |
50 | #else |
51 | return 0; | |
52 | #endif | |
53 | } | |
54 | ||
5777392e NA |
55 | static inline bool is_64_bit_mode(struct kvm_vcpu *vcpu) |
56 | { | |
57 | int cs_db, cs_l; | |
58 | ||
59 | if (!is_long_mode(vcpu)) | |
60 | return false; | |
61 | kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l); | |
62 | return cs_l; | |
63 | } | |
64 | ||
855feb67 YZ |
65 | static inline bool is_la57_mode(struct kvm_vcpu *vcpu) |
66 | { | |
67 | #ifdef CONFIG_X86_64 | |
68 | return (vcpu->arch.efer & EFER_LMA) && | |
69 | kvm_read_cr4_bits(vcpu, X86_CR4_LA57); | |
70 | #else | |
71 | return 0; | |
72 | #endif | |
73 | } | |
74 | ||
6539e738 JR |
75 | static inline bool mmu_is_nested(struct kvm_vcpu *vcpu) |
76 | { | |
77 | return vcpu->arch.walk_mmu == &vcpu->arch.nested_mmu; | |
78 | } | |
79 | ||
836a1b3c AK |
80 | static inline int is_pae(struct kvm_vcpu *vcpu) |
81 | { | |
82 | return kvm_read_cr4_bits(vcpu, X86_CR4_PAE); | |
83 | } | |
84 | ||
85 | static inline int is_pse(struct kvm_vcpu *vcpu) | |
86 | { | |
87 | return kvm_read_cr4_bits(vcpu, X86_CR4_PSE); | |
88 | } | |
89 | ||
90 | static inline int is_paging(struct kvm_vcpu *vcpu) | |
91 | { | |
c36fc04e | 92 | return likely(kvm_read_cr0_bits(vcpu, X86_CR0_PG)); |
836a1b3c AK |
93 | } |
94 | ||
24d1b15f JR |
95 | static inline u32 bit(int bitno) |
96 | { | |
97 | return 1 << (bitno & 31); | |
98 | } | |
99 | ||
fd8cb433 YZ |
100 | static inline u8 vcpu_virt_addr_bits(struct kvm_vcpu *vcpu) |
101 | { | |
102 | return kvm_read_cr4_bits(vcpu, X86_CR4_LA57) ? 57 : 48; | |
103 | } | |
104 | ||
105 | static inline u8 ctxt_virt_addr_bits(struct x86_emulate_ctxt *ctxt) | |
106 | { | |
107 | return (ctxt->ops->get_cr(ctxt, 4) & X86_CR4_LA57) ? 57 : 48; | |
108 | } | |
109 | ||
110 | static inline u64 get_canonical(u64 la, u8 vaddr_bits) | |
111 | { | |
112 | return ((int64_t)la << (64 - vaddr_bits)) >> (64 - vaddr_bits); | |
113 | } | |
114 | ||
115 | static inline bool is_noncanonical_address(u64 la, struct kvm_vcpu *vcpu) | |
116 | { | |
117 | #ifdef CONFIG_X86_64 | |
118 | return get_canonical(la, vcpu_virt_addr_bits(vcpu)) != la; | |
119 | #else | |
120 | return false; | |
121 | #endif | |
122 | } | |
123 | ||
124 | static inline bool emul_is_noncanonical_address(u64 la, | |
125 | struct x86_emulate_ctxt *ctxt) | |
126 | { | |
127 | #ifdef CONFIG_X86_64 | |
128 | return get_canonical(la, ctxt_virt_addr_bits(ctxt)) != la; | |
129 | #else | |
130 | return false; | |
131 | #endif | |
132 | } | |
133 | ||
bebb106a XG |
134 | static inline void vcpu_cache_mmio_info(struct kvm_vcpu *vcpu, |
135 | gva_t gva, gfn_t gfn, unsigned access) | |
136 | { | |
9034e6e8 PB |
137 | /* |
138 | * If this is a shadow nested page table, the "GVA" is | |
139 | * actually a nGPA. | |
140 | */ | |
141 | vcpu->arch.mmio_gva = mmu_is_nested(vcpu) ? 0 : gva & PAGE_MASK; | |
bebb106a XG |
142 | vcpu->arch.access = access; |
143 | vcpu->arch.mmio_gfn = gfn; | |
56f17dd3 DM |
144 | vcpu->arch.mmio_gen = kvm_memslots(vcpu->kvm)->generation; |
145 | } | |
146 | ||
147 | static inline bool vcpu_match_mmio_gen(struct kvm_vcpu *vcpu) | |
148 | { | |
149 | return vcpu->arch.mmio_gen == kvm_memslots(vcpu->kvm)->generation; | |
bebb106a XG |
150 | } |
151 | ||
152 | /* | |
56f17dd3 DM |
153 | * Clear the mmio cache info for the given gva. If gva is MMIO_GVA_ANY, we |
154 | * clear all mmio cache info. | |
bebb106a | 155 | */ |
56f17dd3 DM |
156 | #define MMIO_GVA_ANY (~(gva_t)0) |
157 | ||
bebb106a XG |
158 | static inline void vcpu_clear_mmio_info(struct kvm_vcpu *vcpu, gva_t gva) |
159 | { | |
56f17dd3 | 160 | if (gva != MMIO_GVA_ANY && vcpu->arch.mmio_gva != (gva & PAGE_MASK)) |
bebb106a XG |
161 | return; |
162 | ||
163 | vcpu->arch.mmio_gva = 0; | |
164 | } | |
165 | ||
166 | static inline bool vcpu_match_mmio_gva(struct kvm_vcpu *vcpu, unsigned long gva) | |
167 | { | |
56f17dd3 DM |
168 | if (vcpu_match_mmio_gen(vcpu) && vcpu->arch.mmio_gva && |
169 | vcpu->arch.mmio_gva == (gva & PAGE_MASK)) | |
bebb106a XG |
170 | return true; |
171 | ||
172 | return false; | |
173 | } | |
174 | ||
175 | static inline bool vcpu_match_mmio_gpa(struct kvm_vcpu *vcpu, gpa_t gpa) | |
176 | { | |
56f17dd3 DM |
177 | if (vcpu_match_mmio_gen(vcpu) && vcpu->arch.mmio_gfn && |
178 | vcpu->arch.mmio_gfn == gpa >> PAGE_SHIFT) | |
bebb106a XG |
179 | return true; |
180 | ||
181 | return false; | |
182 | } | |
183 | ||
5777392e NA |
184 | static inline unsigned long kvm_register_readl(struct kvm_vcpu *vcpu, |
185 | enum kvm_reg reg) | |
186 | { | |
187 | unsigned long val = kvm_register_read(vcpu, reg); | |
188 | ||
189 | return is_64_bit_mode(vcpu) ? val : (u32)val; | |
190 | } | |
191 | ||
27e6fb5d NA |
192 | static inline void kvm_register_writel(struct kvm_vcpu *vcpu, |
193 | enum kvm_reg reg, | |
194 | unsigned long val) | |
195 | { | |
196 | if (!is_64_bit_mode(vcpu)) | |
197 | val = (u32)val; | |
198 | return kvm_register_write(vcpu, reg, val); | |
199 | } | |
200 | ||
41dbc6bc PB |
201 | static inline bool kvm_check_has_quirk(struct kvm *kvm, u64 quirk) |
202 | { | |
203 | return !(kvm->arch.disabled_quirks & quirk); | |
204 | } | |
205 | ||
ff9d07a0 ZY |
206 | void kvm_before_handle_nmi(struct kvm_vcpu *vcpu); |
207 | void kvm_after_handle_nmi(struct kvm_vcpu *vcpu); | |
bab5bb39 | 208 | void kvm_set_pending_timer(struct kvm_vcpu *vcpu); |
71f9833b | 209 | int kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip); |
ff9d07a0 | 210 | |
8fe8ab46 | 211 | void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr); |
108b249c | 212 | u64 get_kvmclock_ns(struct kvm *kvm); |
99e3e30a | 213 | |
064aea77 NHE |
214 | int kvm_read_guest_virt(struct x86_emulate_ctxt *ctxt, |
215 | gva_t addr, void *val, unsigned int bytes, | |
216 | struct x86_exception *exception); | |
217 | ||
6a4d7550 NHE |
218 | int kvm_write_guest_virt_system(struct x86_emulate_ctxt *ctxt, |
219 | gva_t addr, void *val, unsigned int bytes, | |
220 | struct x86_exception *exception); | |
221 | ||
19efffa2 | 222 | void kvm_vcpu_mtrr_init(struct kvm_vcpu *vcpu); |
ff53604b | 223 | u8 kvm_mtrr_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn); |
4566654b | 224 | bool kvm_mtrr_valid(struct kvm_vcpu *vcpu, u32 msr, u64 data); |
ff53604b XG |
225 | int kvm_mtrr_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data); |
226 | int kvm_mtrr_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata); | |
6a39bbc5 XG |
227 | bool kvm_mtrr_check_gfn_range_consistency(struct kvm_vcpu *vcpu, gfn_t gfn, |
228 | int page_num); | |
52004014 | 229 | bool kvm_vector_hashing_enabled(void); |
4566654b | 230 | |
d91cab78 DH |
231 | #define KVM_SUPPORTED_XCR0 (XFEATURE_MASK_FP | XFEATURE_MASK_SSE \ |
232 | | XFEATURE_MASK_YMM | XFEATURE_MASK_BNDREGS \ | |
17a511f8 HH |
233 | | XFEATURE_MASK_BNDCSR | XFEATURE_MASK_AVX512 \ |
234 | | XFEATURE_MASK_PKRU) | |
00b27a3e AK |
235 | extern u64 host_xcr0; |
236 | ||
4ff41732 PB |
237 | extern u64 kvm_supported_xcr0(void); |
238 | ||
9ed96e87 MT |
239 | extern unsigned int min_timer_period_us; |
240 | ||
d0659d94 MT |
241 | extern unsigned int lapic_timer_advance_ns; |
242 | ||
54e9818f | 243 | extern struct static_key kvm_no_apic_vcpu; |
b51012de | 244 | |
8d93c874 MT |
245 | static inline u64 nsec_to_cycles(struct kvm_vcpu *vcpu, u64 nsec) |
246 | { | |
247 | return pvclock_scale_delta(nsec, vcpu->arch.virtual_tsc_mult, | |
248 | vcpu->arch.virtual_tsc_shift); | |
249 | } | |
250 | ||
b51012de PB |
251 | /* Same "calling convention" as do_div: |
252 | * - divide (n << 32) by base | |
253 | * - put result in n | |
254 | * - return remainder | |
255 | */ | |
256 | #define do_shl32_div32(n, base) \ | |
257 | ({ \ | |
258 | u32 __quot, __rem; \ | |
259 | asm("divl %2" : "=a" (__quot), "=d" (__rem) \ | |
260 | : "rm" (base), "0" (0), "1" ((u32) n)); \ | |
261 | n = __quot; \ | |
262 | __rem; \ | |
263 | }) | |
264 | ||
668fffa3 MT |
265 | static inline bool kvm_mwait_in_guest(void) |
266 | { | |
267 | unsigned int eax, ebx, ecx, edx; | |
268 | ||
269 | if (!cpu_has(&boot_cpu_data, X86_FEATURE_MWAIT)) | |
270 | return false; | |
271 | ||
272 | switch (boot_cpu_data.x86_vendor) { | |
273 | case X86_VENDOR_AMD: | |
274 | /* All AMD CPUs have a working MWAIT implementation */ | |
275 | return true; | |
276 | case X86_VENDOR_INTEL: | |
277 | /* Handle Intel below */ | |
278 | break; | |
279 | default: | |
280 | return false; | |
281 | } | |
282 | ||
283 | /* | |
284 | * Intel CPUs without CPUID5_ECX_INTERRUPT_BREAK are problematic as | |
285 | * they would allow guest to stop the CPU completely by disabling | |
286 | * interrupts then invoking MWAIT. | |
287 | */ | |
288 | if (boot_cpu_data.cpuid_level < CPUID_MWAIT_LEAF) | |
289 | return false; | |
290 | ||
291 | cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &edx); | |
292 | ||
293 | if (!(ecx & CPUID5_ECX_INTERRUPT_BREAK)) | |
294 | return false; | |
295 | ||
296 | return true; | |
297 | } | |
298 | ||
26eef70c | 299 | #endif |