]>
Commit | Line | Data |
---|---|---|
1 | /* SPDX-License-Identifier: GPL-2.0-only */ | |
2 | /* | |
3 | * Kernel-based Virtual Machine driver for Linux | |
4 | * | |
5 | * This header defines architecture specific interfaces, x86 version | |
6 | */ | |
7 | ||
8 | #ifndef _ASM_X86_KVM_HOST_H | |
9 | #define _ASM_X86_KVM_HOST_H | |
10 | ||
11 | #include <linux/types.h> | |
12 | #include <linux/mm.h> | |
13 | #include <linux/mmu_notifier.h> | |
14 | #include <linux/tracepoint.h> | |
15 | #include <linux/cpumask.h> | |
16 | #include <linux/irq_work.h> | |
17 | #include <linux/irq.h> | |
18 | ||
19 | #include <linux/kvm.h> | |
20 | #include <linux/kvm_para.h> | |
21 | #include <linux/kvm_types.h> | |
22 | #include <linux/perf_event.h> | |
23 | #include <linux/pvclock_gtod.h> | |
24 | #include <linux/clocksource.h> | |
25 | #include <linux/irqbypass.h> | |
26 | #include <linux/hyperv.h> | |
27 | ||
28 | #include <asm/apic.h> | |
29 | #include <asm/pvclock-abi.h> | |
30 | #include <asm/desc.h> | |
31 | #include <asm/mtrr.h> | |
32 | #include <asm/msr-index.h> | |
33 | #include <asm/asm.h> | |
34 | #include <asm/kvm_page_track.h> | |
35 | #include <asm/kvm_vcpu_regs.h> | |
36 | #include <asm/hyperv-tlfs.h> | |
37 | ||
38 | #define __KVM_HAVE_ARCH_VCPU_DEBUGFS | |
39 | ||
40 | #define KVM_MAX_VCPUS 1024 | |
41 | #define KVM_SOFT_MAX_VCPUS 710 | |
42 | ||
43 | /* | |
44 | * In x86, the VCPU ID corresponds to the APIC ID, and APIC IDs | |
45 | * might be larger than the actual number of VCPUs because the | |
46 | * APIC ID encodes CPU topology information. | |
47 | * | |
48 | * In the worst case, we'll need less than one extra bit for the | |
49 | * Core ID, and less than one extra bit for the Package (Die) ID, | |
50 | * so ratio of 4 should be enough. | |
51 | */ | |
52 | #define KVM_VCPU_ID_RATIO 4 | |
53 | #define KVM_MAX_VCPU_ID (KVM_MAX_VCPUS * KVM_VCPU_ID_RATIO) | |
54 | ||
55 | /* memory slots that are not exposed to userspace */ | |
56 | #define KVM_PRIVATE_MEM_SLOTS 3 | |
57 | ||
58 | #define KVM_HALT_POLL_NS_DEFAULT 200000 | |
59 | ||
60 | #define KVM_IRQCHIP_NUM_PINS KVM_IOAPIC_NUM_PINS | |
61 | ||
62 | #define KVM_DIRTY_LOG_MANUAL_CAPS (KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE | \ | |
63 | KVM_DIRTY_LOG_INITIALLY_SET) | |
64 | ||
65 | #define KVM_BUS_LOCK_DETECTION_VALID_MODE (KVM_BUS_LOCK_DETECTION_OFF | \ | |
66 | KVM_BUS_LOCK_DETECTION_EXIT) | |
67 | ||
68 | /* x86-specific vcpu->requests bit members */ | |
69 | #define KVM_REQ_MIGRATE_TIMER KVM_ARCH_REQ(0) | |
70 | #define KVM_REQ_REPORT_TPR_ACCESS KVM_ARCH_REQ(1) | |
71 | #define KVM_REQ_TRIPLE_FAULT KVM_ARCH_REQ(2) | |
72 | #define KVM_REQ_MMU_SYNC KVM_ARCH_REQ(3) | |
73 | #define KVM_REQ_CLOCK_UPDATE KVM_ARCH_REQ(4) | |
74 | #define KVM_REQ_LOAD_MMU_PGD KVM_ARCH_REQ(5) | |
75 | #define KVM_REQ_EVENT KVM_ARCH_REQ(6) | |
76 | #define KVM_REQ_APF_HALT KVM_ARCH_REQ(7) | |
77 | #define KVM_REQ_STEAL_UPDATE KVM_ARCH_REQ(8) | |
78 | #define KVM_REQ_NMI KVM_ARCH_REQ(9) | |
79 | #define KVM_REQ_PMU KVM_ARCH_REQ(10) | |
80 | #define KVM_REQ_PMI KVM_ARCH_REQ(11) | |
81 | #define KVM_REQ_SMI KVM_ARCH_REQ(12) | |
82 | #define KVM_REQ_MASTERCLOCK_UPDATE KVM_ARCH_REQ(13) | |
83 | #define KVM_REQ_MCLOCK_INPROGRESS \ | |
84 | KVM_ARCH_REQ_FLAGS(14, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) | |
85 | #define KVM_REQ_SCAN_IOAPIC \ | |
86 | KVM_ARCH_REQ_FLAGS(15, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) | |
87 | #define KVM_REQ_GLOBAL_CLOCK_UPDATE KVM_ARCH_REQ(16) | |
88 | #define KVM_REQ_APIC_PAGE_RELOAD \ | |
89 | KVM_ARCH_REQ_FLAGS(17, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) | |
90 | #define KVM_REQ_HV_CRASH KVM_ARCH_REQ(18) | |
91 | #define KVM_REQ_IOAPIC_EOI_EXIT KVM_ARCH_REQ(19) | |
92 | #define KVM_REQ_HV_RESET KVM_ARCH_REQ(20) | |
93 | #define KVM_REQ_HV_EXIT KVM_ARCH_REQ(21) | |
94 | #define KVM_REQ_HV_STIMER KVM_ARCH_REQ(22) | |
95 | #define KVM_REQ_LOAD_EOI_EXITMAP KVM_ARCH_REQ(23) | |
96 | #define KVM_REQ_GET_NESTED_STATE_PAGES KVM_ARCH_REQ(24) | |
97 | #define KVM_REQ_APICV_UPDATE \ | |
98 | KVM_ARCH_REQ_FLAGS(25, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) | |
99 | #define KVM_REQ_TLB_FLUSH_CURRENT KVM_ARCH_REQ(26) | |
100 | #define KVM_REQ_TLB_FLUSH_GUEST \ | |
101 | KVM_ARCH_REQ_FLAGS(27, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) | |
102 | #define KVM_REQ_APF_READY KVM_ARCH_REQ(28) | |
103 | #define KVM_REQ_MSR_FILTER_CHANGED KVM_ARCH_REQ(29) | |
104 | #define KVM_REQ_UPDATE_CPU_DIRTY_LOGGING \ | |
105 | KVM_ARCH_REQ_FLAGS(30, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) | |
106 | ||
107 | #define CR0_RESERVED_BITS \ | |
108 | (~(unsigned long)(X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS \ | |
109 | | X86_CR0_ET | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM \ | |
110 | | X86_CR0_NW | X86_CR0_CD | X86_CR0_PG)) | |
111 | ||
112 | #define CR4_RESERVED_BITS \ | |
113 | (~(unsigned long)(X86_CR4_VME | X86_CR4_PVI | X86_CR4_TSD | X86_CR4_DE\ | |
114 | | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_MCE \ | |
115 | | X86_CR4_PGE | X86_CR4_PCE | X86_CR4_OSFXSR | X86_CR4_PCIDE \ | |
116 | | X86_CR4_OSXSAVE | X86_CR4_SMEP | X86_CR4_FSGSBASE \ | |
117 | | X86_CR4_OSXMMEXCPT | X86_CR4_LA57 | X86_CR4_VMXE \ | |
118 | | X86_CR4_SMAP | X86_CR4_PKE | X86_CR4_UMIP)) | |
119 | ||
120 | #define CR8_RESERVED_BITS (~(unsigned long)X86_CR8_TPR) | |
121 | ||
122 | ||
123 | ||
124 | #define INVALID_PAGE (~(hpa_t)0) | |
125 | #define VALID_PAGE(x) ((x) != INVALID_PAGE) | |
126 | ||
127 | #define UNMAPPED_GVA (~(gpa_t)0) | |
128 | #define INVALID_GPA (~(gpa_t)0) | |
129 | ||
130 | /* KVM Hugepage definitions for x86 */ | |
131 | #define KVM_MAX_HUGEPAGE_LEVEL PG_LEVEL_1G | |
132 | #define KVM_NR_PAGE_SIZES (KVM_MAX_HUGEPAGE_LEVEL - PG_LEVEL_4K + 1) | |
133 | #define KVM_HPAGE_GFN_SHIFT(x) (((x) - 1) * 9) | |
134 | #define KVM_HPAGE_SHIFT(x) (PAGE_SHIFT + KVM_HPAGE_GFN_SHIFT(x)) | |
135 | #define KVM_HPAGE_SIZE(x) (1UL << KVM_HPAGE_SHIFT(x)) | |
136 | #define KVM_HPAGE_MASK(x) (~(KVM_HPAGE_SIZE(x) - 1)) | |
137 | #define KVM_PAGES_PER_HPAGE(x) (KVM_HPAGE_SIZE(x) / PAGE_SIZE) | |
138 | ||
139 | #define KVM_PERMILLE_MMU_PAGES 20 | |
140 | #define KVM_MIN_ALLOC_MMU_PAGES 64UL | |
141 | #define KVM_MMU_HASH_SHIFT 12 | |
142 | #define KVM_NUM_MMU_PAGES (1 << KVM_MMU_HASH_SHIFT) | |
143 | #define KVM_MIN_FREE_MMU_PAGES 5 | |
144 | #define KVM_REFILL_PAGES 25 | |
145 | #define KVM_MAX_CPUID_ENTRIES 256 | |
146 | #define KVM_NR_FIXED_MTRR_REGION 88 | |
147 | #define KVM_NR_VAR_MTRR 8 | |
148 | ||
149 | #define ASYNC_PF_PER_VCPU 64 | |
150 | ||
151 | enum kvm_reg { | |
152 | VCPU_REGS_RAX = __VCPU_REGS_RAX, | |
153 | VCPU_REGS_RCX = __VCPU_REGS_RCX, | |
154 | VCPU_REGS_RDX = __VCPU_REGS_RDX, | |
155 | VCPU_REGS_RBX = __VCPU_REGS_RBX, | |
156 | VCPU_REGS_RSP = __VCPU_REGS_RSP, | |
157 | VCPU_REGS_RBP = __VCPU_REGS_RBP, | |
158 | VCPU_REGS_RSI = __VCPU_REGS_RSI, | |
159 | VCPU_REGS_RDI = __VCPU_REGS_RDI, | |
160 | #ifdef CONFIG_X86_64 | |
161 | VCPU_REGS_R8 = __VCPU_REGS_R8, | |
162 | VCPU_REGS_R9 = __VCPU_REGS_R9, | |
163 | VCPU_REGS_R10 = __VCPU_REGS_R10, | |
164 | VCPU_REGS_R11 = __VCPU_REGS_R11, | |
165 | VCPU_REGS_R12 = __VCPU_REGS_R12, | |
166 | VCPU_REGS_R13 = __VCPU_REGS_R13, | |
167 | VCPU_REGS_R14 = __VCPU_REGS_R14, | |
168 | VCPU_REGS_R15 = __VCPU_REGS_R15, | |
169 | #endif | |
170 | VCPU_REGS_RIP, | |
171 | NR_VCPU_REGS, | |
172 | ||
173 | VCPU_EXREG_PDPTR = NR_VCPU_REGS, | |
174 | VCPU_EXREG_CR0, | |
175 | VCPU_EXREG_CR3, | |
176 | VCPU_EXREG_CR4, | |
177 | VCPU_EXREG_RFLAGS, | |
178 | VCPU_EXREG_SEGMENTS, | |
179 | VCPU_EXREG_EXIT_INFO_1, | |
180 | VCPU_EXREG_EXIT_INFO_2, | |
181 | }; | |
182 | ||
183 | enum { | |
184 | VCPU_SREG_ES, | |
185 | VCPU_SREG_CS, | |
186 | VCPU_SREG_SS, | |
187 | VCPU_SREG_DS, | |
188 | VCPU_SREG_FS, | |
189 | VCPU_SREG_GS, | |
190 | VCPU_SREG_TR, | |
191 | VCPU_SREG_LDTR, | |
192 | }; | |
193 | ||
194 | enum exit_fastpath_completion { | |
195 | EXIT_FASTPATH_NONE, | |
196 | EXIT_FASTPATH_REENTER_GUEST, | |
197 | EXIT_FASTPATH_EXIT_HANDLED, | |
198 | }; | |
199 | typedef enum exit_fastpath_completion fastpath_t; | |
200 | ||
201 | struct x86_emulate_ctxt; | |
202 | struct x86_exception; | |
203 | enum x86_intercept; | |
204 | enum x86_intercept_stage; | |
205 | ||
206 | #define KVM_NR_DB_REGS 4 | |
207 | ||
208 | #define DR6_BUS_LOCK (1 << 11) | |
209 | #define DR6_BD (1 << 13) | |
210 | #define DR6_BS (1 << 14) | |
211 | #define DR6_BT (1 << 15) | |
212 | #define DR6_RTM (1 << 16) | |
213 | /* | |
214 | * DR6_ACTIVE_LOW combines fixed-1 and active-low bits. | |
215 | * We can regard all the bits in DR6_FIXED_1 as active_low bits; | |
216 | * they will never be 0 for now, but when they are defined | |
217 | * in the future it will require no code change. | |
218 | * | |
219 | * DR6_ACTIVE_LOW is also used as the init/reset value for DR6. | |
220 | */ | |
221 | #define DR6_ACTIVE_LOW 0xffff0ff0 | |
222 | #define DR6_VOLATILE 0x0001e80f | |
223 | #define DR6_FIXED_1 (DR6_ACTIVE_LOW & ~DR6_VOLATILE) | |
224 | ||
225 | #define DR7_BP_EN_MASK 0x000000ff | |
226 | #define DR7_GE (1 << 9) | |
227 | #define DR7_GD (1 << 13) | |
228 | #define DR7_FIXED_1 0x00000400 | |
229 | #define DR7_VOLATILE 0xffff2bff | |
230 | ||
231 | #define KVM_GUESTDBG_VALID_MASK \ | |
232 | (KVM_GUESTDBG_ENABLE | \ | |
233 | KVM_GUESTDBG_SINGLESTEP | \ | |
234 | KVM_GUESTDBG_USE_HW_BP | \ | |
235 | KVM_GUESTDBG_USE_SW_BP | \ | |
236 | KVM_GUESTDBG_INJECT_BP | \ | |
237 | KVM_GUESTDBG_INJECT_DB | \ | |
238 | KVM_GUESTDBG_BLOCKIRQ) | |
239 | ||
240 | ||
241 | #define PFERR_PRESENT_BIT 0 | |
242 | #define PFERR_WRITE_BIT 1 | |
243 | #define PFERR_USER_BIT 2 | |
244 | #define PFERR_RSVD_BIT 3 | |
245 | #define PFERR_FETCH_BIT 4 | |
246 | #define PFERR_PK_BIT 5 | |
247 | #define PFERR_SGX_BIT 15 | |
248 | #define PFERR_GUEST_FINAL_BIT 32 | |
249 | #define PFERR_GUEST_PAGE_BIT 33 | |
250 | ||
251 | #define PFERR_PRESENT_MASK (1U << PFERR_PRESENT_BIT) | |
252 | #define PFERR_WRITE_MASK (1U << PFERR_WRITE_BIT) | |
253 | #define PFERR_USER_MASK (1U << PFERR_USER_BIT) | |
254 | #define PFERR_RSVD_MASK (1U << PFERR_RSVD_BIT) | |
255 | #define PFERR_FETCH_MASK (1U << PFERR_FETCH_BIT) | |
256 | #define PFERR_PK_MASK (1U << PFERR_PK_BIT) | |
257 | #define PFERR_SGX_MASK (1U << PFERR_SGX_BIT) | |
258 | #define PFERR_GUEST_FINAL_MASK (1ULL << PFERR_GUEST_FINAL_BIT) | |
259 | #define PFERR_GUEST_PAGE_MASK (1ULL << PFERR_GUEST_PAGE_BIT) | |
260 | ||
261 | #define PFERR_NESTED_GUEST_PAGE (PFERR_GUEST_PAGE_MASK | \ | |
262 | PFERR_WRITE_MASK | \ | |
263 | PFERR_PRESENT_MASK) | |
264 | ||
265 | /* apic attention bits */ | |
266 | #define KVM_APIC_CHECK_VAPIC 0 | |
267 | /* | |
268 | * The following bit is set with PV-EOI, unset on EOI. | |
269 | * We detect PV-EOI changes by guest by comparing | |
270 | * this bit with PV-EOI in guest memory. | |
271 | * See the implementation in apic_update_pv_eoi. | |
272 | */ | |
273 | #define KVM_APIC_PV_EOI_PENDING 1 | |
274 | ||
275 | struct kvm_kernel_irq_routing_entry; | |
276 | ||
277 | /* | |
278 | * kvm_mmu_page_role tracks the properties of a shadow page (where shadow page | |
279 | * also includes TDP pages) to determine whether or not a page can be used in | |
280 | * the given MMU context. This is a subset of the overall kvm_mmu_role to | |
281 | * minimize the size of kvm_memory_slot.arch.gfn_track, i.e. allows allocating | |
282 | * 2 bytes per gfn instead of 4 bytes per gfn. | |
283 | * | |
284 | * Indirect upper-level shadow pages are tracked for write-protection via | |
285 | * gfn_track. As above, gfn_track is a 16 bit counter, so KVM must not create | |
286 | * more than 2^16-1 upper-level shadow pages at a single gfn, otherwise | |
287 | * gfn_track will overflow and explosions will ensure. | |
288 | * | |
289 | * A unique shadow page (SP) for a gfn is created if and only if an existing SP | |
290 | * cannot be reused. The ability to reuse a SP is tracked by its role, which | |
291 | * incorporates various mode bits and properties of the SP. Roughly speaking, | |
292 | * the number of unique SPs that can theoretically be created is 2^n, where n | |
293 | * is the number of bits that are used to compute the role. | |
294 | * | |
295 | * But, even though there are 18 bits in the mask below, not all combinations | |
296 | * of modes and flags are possible. The maximum number of possible upper-level | |
297 | * shadow pages for a single gfn is in the neighborhood of 2^13. | |
298 | * | |
299 | * - invalid shadow pages are not accounted. | |
300 | * - level is effectively limited to four combinations, not 16 as the number | |
301 | * bits would imply, as 4k SPs are not tracked (allowed to go unsync). | |
302 | * - level is effectively unused for non-PAE paging because there is exactly | |
303 | * one upper level (see 4k SP exception above). | |
304 | * - quadrant is used only for non-PAE paging and is exclusive with | |
305 | * gpte_is_8_bytes. | |
306 | * - execonly and ad_disabled are used only for nested EPT, which makes it | |
307 | * exclusive with quadrant. | |
308 | */ | |
309 | union kvm_mmu_page_role { | |
310 | u32 word; | |
311 | struct { | |
312 | unsigned level:4; | |
313 | unsigned gpte_is_8_bytes:1; | |
314 | unsigned quadrant:2; | |
315 | unsigned direct:1; | |
316 | unsigned access:3; | |
317 | unsigned invalid:1; | |
318 | unsigned efer_nx:1; | |
319 | unsigned cr0_wp:1; | |
320 | unsigned smep_andnot_wp:1; | |
321 | unsigned smap_andnot_wp:1; | |
322 | unsigned ad_disabled:1; | |
323 | unsigned guest_mode:1; | |
324 | unsigned :6; | |
325 | ||
326 | /* | |
327 | * This is left at the top of the word so that | |
328 | * kvm_memslots_for_spte_role can extract it with a | |
329 | * simple shift. While there is room, give it a whole | |
330 | * byte so it is also faster to load it from memory. | |
331 | */ | |
332 | unsigned smm:8; | |
333 | }; | |
334 | }; | |
335 | ||
336 | /* | |
337 | * kvm_mmu_extended_role complements kvm_mmu_page_role, tracking properties | |
338 | * relevant to the current MMU configuration. When loading CR0, CR4, or EFER, | |
339 | * including on nested transitions, if nothing in the full role changes then | |
340 | * MMU re-configuration can be skipped. @valid bit is set on first usage so we | |
341 | * don't treat all-zero structure as valid data. | |
342 | * | |
343 | * The properties that are tracked in the extended role but not the page role | |
344 | * are for things that either (a) do not affect the validity of the shadow page | |
345 | * or (b) are indirectly reflected in the shadow page's role. For example, | |
346 | * CR4.PKE only affects permission checks for software walks of the guest page | |
347 | * tables (because KVM doesn't support Protection Keys with shadow paging), and | |
348 | * CR0.PG, CR4.PAE, and CR4.PSE are indirectly reflected in role.level. | |
349 | * | |
350 | * Note, SMEP and SMAP are not redundant with sm*p_andnot_wp in the page role. | |
351 | * If CR0.WP=1, KVM can reuse shadow pages for the guest regardless of SMEP and | |
352 | * SMAP, but the MMU's permission checks for software walks need to be SMEP and | |
353 | * SMAP aware regardless of CR0.WP. | |
354 | */ | |
355 | union kvm_mmu_extended_role { | |
356 | u32 word; | |
357 | struct { | |
358 | unsigned int valid:1; | |
359 | unsigned int execonly:1; | |
360 | unsigned int cr0_pg:1; | |
361 | unsigned int cr4_pae:1; | |
362 | unsigned int cr4_pse:1; | |
363 | unsigned int cr4_pke:1; | |
364 | unsigned int cr4_smap:1; | |
365 | unsigned int cr4_smep:1; | |
366 | unsigned int cr4_la57:1; | |
367 | unsigned int efer_lma:1; | |
368 | }; | |
369 | }; | |
370 | ||
371 | union kvm_mmu_role { | |
372 | u64 as_u64; | |
373 | struct { | |
374 | union kvm_mmu_page_role base; | |
375 | union kvm_mmu_extended_role ext; | |
376 | }; | |
377 | }; | |
378 | ||
379 | struct kvm_rmap_head { | |
380 | unsigned long val; | |
381 | }; | |
382 | ||
383 | struct kvm_pio_request { | |
384 | unsigned long linear_rip; | |
385 | unsigned long count; | |
386 | int in; | |
387 | int port; | |
388 | int size; | |
389 | }; | |
390 | ||
391 | #define PT64_ROOT_MAX_LEVEL 5 | |
392 | ||
393 | struct rsvd_bits_validate { | |
394 | u64 rsvd_bits_mask[2][PT64_ROOT_MAX_LEVEL]; | |
395 | u64 bad_mt_xwr; | |
396 | }; | |
397 | ||
398 | struct kvm_mmu_root_info { | |
399 | gpa_t pgd; | |
400 | hpa_t hpa; | |
401 | }; | |
402 | ||
403 | #define KVM_MMU_ROOT_INFO_INVALID \ | |
404 | ((struct kvm_mmu_root_info) { .pgd = INVALID_PAGE, .hpa = INVALID_PAGE }) | |
405 | ||
406 | #define KVM_MMU_NUM_PREV_ROOTS 3 | |
407 | ||
408 | #define KVM_HAVE_MMU_RWLOCK | |
409 | ||
410 | struct kvm_mmu_page; | |
411 | ||
412 | /* | |
413 | * x86 supports 4 paging modes (5-level 64-bit, 4-level 64-bit, 3-level 32-bit, | |
414 | * and 2-level 32-bit). The kvm_mmu structure abstracts the details of the | |
415 | * current mmu mode. | |
416 | */ | |
417 | struct kvm_mmu { | |
418 | unsigned long (*get_guest_pgd)(struct kvm_vcpu *vcpu); | |
419 | u64 (*get_pdptr)(struct kvm_vcpu *vcpu, int index); | |
420 | int (*page_fault)(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u32 err, | |
421 | bool prefault); | |
422 | void (*inject_page_fault)(struct kvm_vcpu *vcpu, | |
423 | struct x86_exception *fault); | |
424 | gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, gpa_t gva_or_gpa, | |
425 | u32 access, struct x86_exception *exception); | |
426 | gpa_t (*translate_gpa)(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access, | |
427 | struct x86_exception *exception); | |
428 | int (*sync_page)(struct kvm_vcpu *vcpu, | |
429 | struct kvm_mmu_page *sp); | |
430 | void (*invlpg)(struct kvm_vcpu *vcpu, gva_t gva, hpa_t root_hpa); | |
431 | hpa_t root_hpa; | |
432 | gpa_t root_pgd; | |
433 | union kvm_mmu_role mmu_role; | |
434 | u8 root_level; | |
435 | u8 shadow_root_level; | |
436 | u8 ept_ad; | |
437 | bool direct_map; | |
438 | struct kvm_mmu_root_info prev_roots[KVM_MMU_NUM_PREV_ROOTS]; | |
439 | ||
440 | /* | |
441 | * Bitmap; bit set = permission fault | |
442 | * Byte index: page fault error code [4:1] | |
443 | * Bit index: pte permissions in ACC_* format | |
444 | */ | |
445 | u8 permissions[16]; | |
446 | ||
447 | /* | |
448 | * The pkru_mask indicates if protection key checks are needed. It | |
449 | * consists of 16 domains indexed by page fault error code bits [4:1], | |
450 | * with PFEC.RSVD replaced by ACC_USER_MASK from the page tables. | |
451 | * Each domain has 2 bits which are ANDed with AD and WD from PKRU. | |
452 | */ | |
453 | u32 pkru_mask; | |
454 | ||
455 | u64 *pae_root; | |
456 | u64 *pml4_root; | |
457 | u64 *pml5_root; | |
458 | ||
459 | /* | |
460 | * check zero bits on shadow page table entries, these | |
461 | * bits include not only hardware reserved bits but also | |
462 | * the bits spte never used. | |
463 | */ | |
464 | struct rsvd_bits_validate shadow_zero_check; | |
465 | ||
466 | struct rsvd_bits_validate guest_rsvd_check; | |
467 | ||
468 | u64 pdptrs[4]; /* pae */ | |
469 | }; | |
470 | ||
471 | struct kvm_tlb_range { | |
472 | u64 start_gfn; | |
473 | u64 pages; | |
474 | }; | |
475 | ||
476 | enum pmc_type { | |
477 | KVM_PMC_GP = 0, | |
478 | KVM_PMC_FIXED, | |
479 | }; | |
480 | ||
481 | struct kvm_pmc { | |
482 | enum pmc_type type; | |
483 | u8 idx; | |
484 | u64 counter; | |
485 | u64 eventsel; | |
486 | struct perf_event *perf_event; | |
487 | struct kvm_vcpu *vcpu; | |
488 | /* | |
489 | * eventsel value for general purpose counters, | |
490 | * ctrl value for fixed counters. | |
491 | */ | |
492 | u64 current_config; | |
493 | bool is_paused; | |
494 | }; | |
495 | ||
496 | struct kvm_pmu { | |
497 | unsigned nr_arch_gp_counters; | |
498 | unsigned nr_arch_fixed_counters; | |
499 | unsigned available_event_types; | |
500 | u64 fixed_ctr_ctrl; | |
501 | u64 global_ctrl; | |
502 | u64 global_status; | |
503 | u64 global_ovf_ctrl; | |
504 | u64 counter_bitmask[2]; | |
505 | u64 global_ctrl_mask; | |
506 | u64 global_ovf_ctrl_mask; | |
507 | u64 reserved_bits; | |
508 | u64 raw_event_mask; | |
509 | u8 version; | |
510 | struct kvm_pmc gp_counters[INTEL_PMC_MAX_GENERIC]; | |
511 | struct kvm_pmc fixed_counters[INTEL_PMC_MAX_FIXED]; | |
512 | struct irq_work irq_work; | |
513 | DECLARE_BITMAP(reprogram_pmi, X86_PMC_IDX_MAX); | |
514 | DECLARE_BITMAP(all_valid_pmc_idx, X86_PMC_IDX_MAX); | |
515 | DECLARE_BITMAP(pmc_in_use, X86_PMC_IDX_MAX); | |
516 | ||
517 | /* | |
518 | * The gate to release perf_events not marked in | |
519 | * pmc_in_use only once in a vcpu time slice. | |
520 | */ | |
521 | bool need_cleanup; | |
522 | ||
523 | /* | |
524 | * The total number of programmed perf_events and it helps to avoid | |
525 | * redundant check before cleanup if guest don't use vPMU at all. | |
526 | */ | |
527 | u8 event_count; | |
528 | }; | |
529 | ||
530 | struct kvm_pmu_ops; | |
531 | ||
532 | enum { | |
533 | KVM_DEBUGREG_BP_ENABLED = 1, | |
534 | KVM_DEBUGREG_WONT_EXIT = 2, | |
535 | }; | |
536 | ||
537 | struct kvm_mtrr_range { | |
538 | u64 base; | |
539 | u64 mask; | |
540 | struct list_head node; | |
541 | }; | |
542 | ||
543 | struct kvm_mtrr { | |
544 | struct kvm_mtrr_range var_ranges[KVM_NR_VAR_MTRR]; | |
545 | mtrr_type fixed_ranges[KVM_NR_FIXED_MTRR_REGION]; | |
546 | u64 deftype; | |
547 | ||
548 | struct list_head head; | |
549 | }; | |
550 | ||
551 | /* Hyper-V SynIC timer */ | |
552 | struct kvm_vcpu_hv_stimer { | |
553 | struct hrtimer timer; | |
554 | int index; | |
555 | union hv_stimer_config config; | |
556 | u64 count; | |
557 | u64 exp_time; | |
558 | struct hv_message msg; | |
559 | bool msg_pending; | |
560 | }; | |
561 | ||
562 | /* Hyper-V synthetic interrupt controller (SynIC)*/ | |
563 | struct kvm_vcpu_hv_synic { | |
564 | u64 version; | |
565 | u64 control; | |
566 | u64 msg_page; | |
567 | u64 evt_page; | |
568 | atomic64_t sint[HV_SYNIC_SINT_COUNT]; | |
569 | atomic_t sint_to_gsi[HV_SYNIC_SINT_COUNT]; | |
570 | DECLARE_BITMAP(auto_eoi_bitmap, 256); | |
571 | DECLARE_BITMAP(vec_bitmap, 256); | |
572 | bool active; | |
573 | bool dont_zero_synic_pages; | |
574 | }; | |
575 | ||
576 | /* Hyper-V per vcpu emulation context */ | |
577 | struct kvm_vcpu_hv { | |
578 | struct kvm_vcpu *vcpu; | |
579 | u32 vp_index; | |
580 | u64 hv_vapic; | |
581 | s64 runtime_offset; | |
582 | struct kvm_vcpu_hv_synic synic; | |
583 | struct kvm_hyperv_exit exit; | |
584 | struct kvm_vcpu_hv_stimer stimer[HV_SYNIC_STIMER_COUNT]; | |
585 | DECLARE_BITMAP(stimer_pending_bitmap, HV_SYNIC_STIMER_COUNT); | |
586 | cpumask_t tlb_flush; | |
587 | bool enforce_cpuid; | |
588 | struct { | |
589 | u32 features_eax; /* HYPERV_CPUID_FEATURES.EAX */ | |
590 | u32 features_ebx; /* HYPERV_CPUID_FEATURES.EBX */ | |
591 | u32 features_edx; /* HYPERV_CPUID_FEATURES.EDX */ | |
592 | u32 enlightenments_eax; /* HYPERV_CPUID_ENLIGHTMENT_INFO.EAX */ | |
593 | u32 enlightenments_ebx; /* HYPERV_CPUID_ENLIGHTMENT_INFO.EBX */ | |
594 | u32 syndbg_cap_eax; /* HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES.EAX */ | |
595 | } cpuid_cache; | |
596 | }; | |
597 | ||
598 | /* Xen HVM per vcpu emulation context */ | |
599 | struct kvm_vcpu_xen { | |
600 | u64 hypercall_rip; | |
601 | u32 current_runstate; | |
602 | bool vcpu_info_set; | |
603 | bool vcpu_time_info_set; | |
604 | bool runstate_set; | |
605 | struct gfn_to_hva_cache vcpu_info_cache; | |
606 | struct gfn_to_hva_cache vcpu_time_info_cache; | |
607 | struct gfn_to_hva_cache runstate_cache; | |
608 | u64 last_steal; | |
609 | u64 runstate_entry_time; | |
610 | u64 runstate_times[4]; | |
611 | }; | |
612 | ||
613 | struct kvm_vcpu_arch { | |
614 | /* | |
615 | * rip and regs accesses must go through | |
616 | * kvm_{register,rip}_{read,write} functions. | |
617 | */ | |
618 | unsigned long regs[NR_VCPU_REGS]; | |
619 | u32 regs_avail; | |
620 | u32 regs_dirty; | |
621 | ||
622 | unsigned long cr0; | |
623 | unsigned long cr0_guest_owned_bits; | |
624 | unsigned long cr2; | |
625 | unsigned long cr3; | |
626 | unsigned long cr4; | |
627 | unsigned long cr4_guest_owned_bits; | |
628 | unsigned long cr4_guest_rsvd_bits; | |
629 | unsigned long cr8; | |
630 | u32 host_pkru; | |
631 | u32 pkru; | |
632 | u32 hflags; | |
633 | u64 efer; | |
634 | u64 apic_base; | |
635 | struct kvm_lapic *apic; /* kernel irqchip context */ | |
636 | bool apicv_active; | |
637 | bool load_eoi_exitmap_pending; | |
638 | DECLARE_BITMAP(ioapic_handled_vectors, 256); | |
639 | unsigned long apic_attention; | |
640 | int32_t apic_arb_prio; | |
641 | int mp_state; | |
642 | u64 ia32_misc_enable_msr; | |
643 | u64 smbase; | |
644 | u64 smi_count; | |
645 | bool tpr_access_reporting; | |
646 | bool xsaves_enabled; | |
647 | u64 ia32_xss; | |
648 | u64 microcode_version; | |
649 | u64 arch_capabilities; | |
650 | u64 perf_capabilities; | |
651 | ||
652 | /* | |
653 | * Paging state of the vcpu | |
654 | * | |
655 | * If the vcpu runs in guest mode with two level paging this still saves | |
656 | * the paging mode of the l1 guest. This context is always used to | |
657 | * handle faults. | |
658 | */ | |
659 | struct kvm_mmu *mmu; | |
660 | ||
661 | /* Non-nested MMU for L1 */ | |
662 | struct kvm_mmu root_mmu; | |
663 | ||
664 | /* L1 MMU when running nested */ | |
665 | struct kvm_mmu guest_mmu; | |
666 | ||
667 | /* | |
668 | * Paging state of an L2 guest (used for nested npt) | |
669 | * | |
670 | * This context will save all necessary information to walk page tables | |
671 | * of an L2 guest. This context is only initialized for page table | |
672 | * walking and not for faulting since we never handle l2 page faults on | |
673 | * the host. | |
674 | */ | |
675 | struct kvm_mmu nested_mmu; | |
676 | ||
677 | /* | |
678 | * Pointer to the mmu context currently used for | |
679 | * gva_to_gpa translations. | |
680 | */ | |
681 | struct kvm_mmu *walk_mmu; | |
682 | ||
683 | struct kvm_mmu_memory_cache mmu_pte_list_desc_cache; | |
684 | struct kvm_mmu_memory_cache mmu_shadow_page_cache; | |
685 | struct kvm_mmu_memory_cache mmu_gfn_array_cache; | |
686 | struct kvm_mmu_memory_cache mmu_page_header_cache; | |
687 | ||
688 | /* | |
689 | * QEMU userspace and the guest each have their own FPU state. | |
690 | * In vcpu_run, we switch between the user and guest FPU contexts. | |
691 | * While running a VCPU, the VCPU thread will have the guest FPU | |
692 | * context. | |
693 | * | |
694 | * Note that while the PKRU state lives inside the fpu registers, | |
695 | * it is switched out separately at VMENTER and VMEXIT time. The | |
696 | * "guest_fpstate" state here contains the guest FPU context, with the | |
697 | * host PRKU bits. | |
698 | */ | |
699 | struct fpu_guest guest_fpu; | |
700 | ||
701 | u64 xcr0; | |
702 | u64 guest_supported_xcr0; | |
703 | ||
704 | struct kvm_pio_request pio; | |
705 | void *pio_data; | |
706 | void *sev_pio_data; | |
707 | unsigned sev_pio_count; | |
708 | ||
709 | u8 event_exit_inst_len; | |
710 | ||
711 | struct kvm_queued_exception { | |
712 | bool pending; | |
713 | bool injected; | |
714 | bool has_error_code; | |
715 | u8 nr; | |
716 | u32 error_code; | |
717 | unsigned long payload; | |
718 | bool has_payload; | |
719 | u8 nested_apf; | |
720 | } exception; | |
721 | ||
722 | struct kvm_queued_interrupt { | |
723 | bool injected; | |
724 | bool soft; | |
725 | u8 nr; | |
726 | } interrupt; | |
727 | ||
728 | int halt_request; /* real mode on Intel only */ | |
729 | ||
730 | int cpuid_nent; | |
731 | struct kvm_cpuid_entry2 *cpuid_entries; | |
732 | ||
733 | u64 reserved_gpa_bits; | |
734 | int maxphyaddr; | |
735 | ||
736 | /* emulate context */ | |
737 | ||
738 | struct x86_emulate_ctxt *emulate_ctxt; | |
739 | bool emulate_regs_need_sync_to_vcpu; | |
740 | bool emulate_regs_need_sync_from_vcpu; | |
741 | int (*complete_userspace_io)(struct kvm_vcpu *vcpu); | |
742 | ||
743 | gpa_t time; | |
744 | struct pvclock_vcpu_time_info hv_clock; | |
745 | unsigned int hw_tsc_khz; | |
746 | struct gfn_to_hva_cache pv_time; | |
747 | bool pv_time_enabled; | |
748 | /* set guest stopped flag in pvclock flags field */ | |
749 | bool pvclock_set_guest_stopped_request; | |
750 | ||
751 | struct { | |
752 | u8 preempted; | |
753 | u64 msr_val; | |
754 | u64 last_steal; | |
755 | struct gfn_to_hva_cache cache; | |
756 | } st; | |
757 | ||
758 | u64 l1_tsc_offset; | |
759 | u64 tsc_offset; /* current tsc offset */ | |
760 | u64 last_guest_tsc; | |
761 | u64 last_host_tsc; | |
762 | u64 tsc_offset_adjustment; | |
763 | u64 this_tsc_nsec; | |
764 | u64 this_tsc_write; | |
765 | u64 this_tsc_generation; | |
766 | bool tsc_catchup; | |
767 | bool tsc_always_catchup; | |
768 | s8 virtual_tsc_shift; | |
769 | u32 virtual_tsc_mult; | |
770 | u32 virtual_tsc_khz; | |
771 | s64 ia32_tsc_adjust_msr; | |
772 | u64 msr_ia32_power_ctl; | |
773 | u64 l1_tsc_scaling_ratio; | |
774 | u64 tsc_scaling_ratio; /* current scaling ratio */ | |
775 | ||
776 | atomic_t nmi_queued; /* unprocessed asynchronous NMIs */ | |
777 | unsigned nmi_pending; /* NMI queued after currently running handler */ | |
778 | bool nmi_injected; /* Trying to inject an NMI this entry */ | |
779 | bool smi_pending; /* SMI queued after currently running handler */ | |
780 | ||
781 | struct kvm_mtrr mtrr_state; | |
782 | u64 pat; | |
783 | ||
784 | unsigned switch_db_regs; | |
785 | unsigned long db[KVM_NR_DB_REGS]; | |
786 | unsigned long dr6; | |
787 | unsigned long dr7; | |
788 | unsigned long eff_db[KVM_NR_DB_REGS]; | |
789 | unsigned long guest_debug_dr7; | |
790 | u64 msr_platform_info; | |
791 | u64 msr_misc_features_enables; | |
792 | ||
793 | u64 mcg_cap; | |
794 | u64 mcg_status; | |
795 | u64 mcg_ctl; | |
796 | u64 mcg_ext_ctl; | |
797 | u64 *mce_banks; | |
798 | ||
799 | /* Cache MMIO info */ | |
800 | u64 mmio_gva; | |
801 | unsigned mmio_access; | |
802 | gfn_t mmio_gfn; | |
803 | u64 mmio_gen; | |
804 | ||
805 | struct kvm_pmu pmu; | |
806 | ||
807 | /* used for guest single stepping over the given code position */ | |
808 | unsigned long singlestep_rip; | |
809 | ||
810 | bool hyperv_enabled; | |
811 | struct kvm_vcpu_hv *hyperv; | |
812 | struct kvm_vcpu_xen xen; | |
813 | ||
814 | cpumask_var_t wbinvd_dirty_mask; | |
815 | ||
816 | unsigned long last_retry_eip; | |
817 | unsigned long last_retry_addr; | |
818 | ||
819 | struct { | |
820 | bool halted; | |
821 | gfn_t gfns[ASYNC_PF_PER_VCPU]; | |
822 | struct gfn_to_hva_cache data; | |
823 | u64 msr_en_val; /* MSR_KVM_ASYNC_PF_EN */ | |
824 | u64 msr_int_val; /* MSR_KVM_ASYNC_PF_INT */ | |
825 | u16 vec; | |
826 | u32 id; | |
827 | bool send_user_only; | |
828 | u32 host_apf_flags; | |
829 | unsigned long nested_apf_token; | |
830 | bool delivery_as_pf_vmexit; | |
831 | bool pageready_pending; | |
832 | } apf; | |
833 | ||
834 | /* OSVW MSRs (AMD only) */ | |
835 | struct { | |
836 | u64 length; | |
837 | u64 status; | |
838 | } osvw; | |
839 | ||
840 | struct { | |
841 | u64 msr_val; | |
842 | struct gfn_to_hva_cache data; | |
843 | } pv_eoi; | |
844 | ||
845 | u64 msr_kvm_poll_control; | |
846 | ||
847 | /* | |
848 | * Indicates the guest is trying to write a gfn that contains one or | |
849 | * more of the PTEs used to translate the write itself, i.e. the access | |
850 | * is changing its own translation in the guest page tables. KVM exits | |
851 | * to userspace if emulation of the faulting instruction fails and this | |
852 | * flag is set, as KVM cannot make forward progress. | |
853 | * | |
854 | * If emulation fails for a write to guest page tables, KVM unprotects | |
855 | * (zaps) the shadow page for the target gfn and resumes the guest to | |
856 | * retry the non-emulatable instruction (on hardware). Unprotecting the | |
857 | * gfn doesn't allow forward progress for a self-changing access because | |
858 | * doing so also zaps the translation for the gfn, i.e. retrying the | |
859 | * instruction will hit a !PRESENT fault, which results in a new shadow | |
860 | * page and sends KVM back to square one. | |
861 | */ | |
862 | bool write_fault_to_shadow_pgtable; | |
863 | ||
864 | /* set at EPT violation at this point */ | |
865 | unsigned long exit_qualification; | |
866 | ||
867 | /* pv related host specific info */ | |
868 | struct { | |
869 | bool pv_unhalted; | |
870 | } pv; | |
871 | ||
872 | int pending_ioapic_eoi; | |
873 | int pending_external_vector; | |
874 | ||
875 | /* be preempted when it's in kernel-mode(cpl=0) */ | |
876 | bool preempted_in_kernel; | |
877 | ||
878 | /* Flush the L1 Data cache for L1TF mitigation on VMENTER */ | |
879 | bool l1tf_flush_l1d; | |
880 | ||
881 | /* Host CPU on which VM-entry was most recently attempted */ | |
882 | int last_vmentry_cpu; | |
883 | ||
884 | /* AMD MSRC001_0015 Hardware Configuration */ | |
885 | u64 msr_hwcr; | |
886 | ||
887 | /* pv related cpuid info */ | |
888 | struct { | |
889 | /* | |
890 | * value of the eax register in the KVM_CPUID_FEATURES CPUID | |
891 | * leaf. | |
892 | */ | |
893 | u32 features; | |
894 | ||
895 | /* | |
896 | * indicates whether pv emulation should be disabled if features | |
897 | * are not present in the guest's cpuid | |
898 | */ | |
899 | bool enforce; | |
900 | } pv_cpuid; | |
901 | ||
902 | /* Protected Guests */ | |
903 | bool guest_state_protected; | |
904 | ||
905 | /* | |
906 | * Set when PDPTS were loaded directly by the userspace without | |
907 | * reading the guest memory | |
908 | */ | |
909 | bool pdptrs_from_userspace; | |
910 | ||
911 | #if IS_ENABLED(CONFIG_HYPERV) | |
912 | hpa_t hv_root_tdp; | |
913 | #endif | |
914 | }; | |
915 | ||
916 | struct kvm_lpage_info { | |
917 | int disallow_lpage; | |
918 | }; | |
919 | ||
920 | struct kvm_arch_memory_slot { | |
921 | struct kvm_rmap_head *rmap[KVM_NR_PAGE_SIZES]; | |
922 | struct kvm_lpage_info *lpage_info[KVM_NR_PAGE_SIZES - 1]; | |
923 | unsigned short *gfn_track[KVM_PAGE_TRACK_MAX]; | |
924 | }; | |
925 | ||
926 | /* | |
927 | * We use as the mode the number of bits allocated in the LDR for the | |
928 | * logical processor ID. It happens that these are all powers of two. | |
929 | * This makes it is very easy to detect cases where the APICs are | |
930 | * configured for multiple modes; in that case, we cannot use the map and | |
931 | * hence cannot use kvm_irq_delivery_to_apic_fast either. | |
932 | */ | |
933 | #define KVM_APIC_MODE_XAPIC_CLUSTER 4 | |
934 | #define KVM_APIC_MODE_XAPIC_FLAT 8 | |
935 | #define KVM_APIC_MODE_X2APIC 16 | |
936 | ||
937 | struct kvm_apic_map { | |
938 | struct rcu_head rcu; | |
939 | u8 mode; | |
940 | u32 max_apic_id; | |
941 | union { | |
942 | struct kvm_lapic *xapic_flat_map[8]; | |
943 | struct kvm_lapic *xapic_cluster_map[16][4]; | |
944 | }; | |
945 | struct kvm_lapic *phys_map[]; | |
946 | }; | |
947 | ||
948 | /* Hyper-V synthetic debugger (SynDbg)*/ | |
949 | struct kvm_hv_syndbg { | |
950 | struct { | |
951 | u64 control; | |
952 | u64 status; | |
953 | u64 send_page; | |
954 | u64 recv_page; | |
955 | u64 pending_page; | |
956 | } control; | |
957 | u64 options; | |
958 | }; | |
959 | ||
960 | /* Current state of Hyper-V TSC page clocksource */ | |
961 | enum hv_tsc_page_status { | |
962 | /* TSC page was not set up or disabled */ | |
963 | HV_TSC_PAGE_UNSET = 0, | |
964 | /* TSC page MSR was written by the guest, update pending */ | |
965 | HV_TSC_PAGE_GUEST_CHANGED, | |
966 | /* TSC page MSR was written by KVM userspace, update pending */ | |
967 | HV_TSC_PAGE_HOST_CHANGED, | |
968 | /* TSC page was properly set up and is currently active */ | |
969 | HV_TSC_PAGE_SET, | |
970 | /* TSC page is currently being updated and therefore is inactive */ | |
971 | HV_TSC_PAGE_UPDATING, | |
972 | /* TSC page was set up with an inaccessible GPA */ | |
973 | HV_TSC_PAGE_BROKEN, | |
974 | }; | |
975 | ||
976 | /* Hyper-V emulation context */ | |
977 | struct kvm_hv { | |
978 | struct mutex hv_lock; | |
979 | u64 hv_guest_os_id; | |
980 | u64 hv_hypercall; | |
981 | u64 hv_tsc_page; | |
982 | enum hv_tsc_page_status hv_tsc_page_status; | |
983 | ||
984 | /* Hyper-v based guest crash (NT kernel bugcheck) parameters */ | |
985 | u64 hv_crash_param[HV_X64_MSR_CRASH_PARAMS]; | |
986 | u64 hv_crash_ctl; | |
987 | ||
988 | struct ms_hyperv_tsc_page tsc_ref; | |
989 | ||
990 | struct idr conn_to_evt; | |
991 | ||
992 | u64 hv_reenlightenment_control; | |
993 | u64 hv_tsc_emulation_control; | |
994 | u64 hv_tsc_emulation_status; | |
995 | ||
996 | /* How many vCPUs have VP index != vCPU index */ | |
997 | atomic_t num_mismatched_vp_indexes; | |
998 | ||
999 | /* | |
1000 | * How many SynICs use 'AutoEOI' feature | |
1001 | * (protected by arch.apicv_update_lock) | |
1002 | */ | |
1003 | unsigned int synic_auto_eoi_used; | |
1004 | ||
1005 | struct hv_partition_assist_pg *hv_pa_pg; | |
1006 | struct kvm_hv_syndbg hv_syndbg; | |
1007 | }; | |
1008 | ||
1009 | struct msr_bitmap_range { | |
1010 | u32 flags; | |
1011 | u32 nmsrs; | |
1012 | u32 base; | |
1013 | unsigned long *bitmap; | |
1014 | }; | |
1015 | ||
1016 | /* Xen emulation context */ | |
1017 | struct kvm_xen { | |
1018 | bool long_mode; | |
1019 | u8 upcall_vector; | |
1020 | gfn_t shinfo_gfn; | |
1021 | }; | |
1022 | ||
1023 | enum kvm_irqchip_mode { | |
1024 | KVM_IRQCHIP_NONE, | |
1025 | KVM_IRQCHIP_KERNEL, /* created with KVM_CREATE_IRQCHIP */ | |
1026 | KVM_IRQCHIP_SPLIT, /* created with KVM_CAP_SPLIT_IRQCHIP */ | |
1027 | }; | |
1028 | ||
1029 | struct kvm_x86_msr_filter { | |
1030 | u8 count; | |
1031 | bool default_allow:1; | |
1032 | struct msr_bitmap_range ranges[16]; | |
1033 | }; | |
1034 | ||
1035 | #define APICV_INHIBIT_REASON_DISABLE 0 | |
1036 | #define APICV_INHIBIT_REASON_HYPERV 1 | |
1037 | #define APICV_INHIBIT_REASON_NESTED 2 | |
1038 | #define APICV_INHIBIT_REASON_IRQWIN 3 | |
1039 | #define APICV_INHIBIT_REASON_PIT_REINJ 4 | |
1040 | #define APICV_INHIBIT_REASON_X2APIC 5 | |
1041 | ||
1042 | struct kvm_arch { | |
1043 | unsigned long n_used_mmu_pages; | |
1044 | unsigned long n_requested_mmu_pages; | |
1045 | unsigned long n_max_mmu_pages; | |
1046 | unsigned int indirect_shadow_pages; | |
1047 | u8 mmu_valid_gen; | |
1048 | struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES]; | |
1049 | struct list_head active_mmu_pages; | |
1050 | struct list_head zapped_obsolete_pages; | |
1051 | struct list_head lpage_disallowed_mmu_pages; | |
1052 | struct kvm_page_track_notifier_node mmu_sp_tracker; | |
1053 | struct kvm_page_track_notifier_head track_notifier_head; | |
1054 | /* | |
1055 | * Protects marking pages unsync during page faults, as TDP MMU page | |
1056 | * faults only take mmu_lock for read. For simplicity, the unsync | |
1057 | * pages lock is always taken when marking pages unsync regardless of | |
1058 | * whether mmu_lock is held for read or write. | |
1059 | */ | |
1060 | spinlock_t mmu_unsync_pages_lock; | |
1061 | ||
1062 | struct list_head assigned_dev_head; | |
1063 | struct iommu_domain *iommu_domain; | |
1064 | bool iommu_noncoherent; | |
1065 | #define __KVM_HAVE_ARCH_NONCOHERENT_DMA | |
1066 | atomic_t noncoherent_dma_count; | |
1067 | #define __KVM_HAVE_ARCH_ASSIGNED_DEVICE | |
1068 | atomic_t assigned_device_count; | |
1069 | struct kvm_pic *vpic; | |
1070 | struct kvm_ioapic *vioapic; | |
1071 | struct kvm_pit *vpit; | |
1072 | atomic_t vapics_in_nmi_mode; | |
1073 | struct mutex apic_map_lock; | |
1074 | struct kvm_apic_map __rcu *apic_map; | |
1075 | atomic_t apic_map_dirty; | |
1076 | ||
1077 | /* Protects apic_access_memslot_enabled and apicv_inhibit_reasons */ | |
1078 | struct mutex apicv_update_lock; | |
1079 | ||
1080 | bool apic_access_memslot_enabled; | |
1081 | unsigned long apicv_inhibit_reasons; | |
1082 | ||
1083 | gpa_t wall_clock; | |
1084 | ||
1085 | bool mwait_in_guest; | |
1086 | bool hlt_in_guest; | |
1087 | bool pause_in_guest; | |
1088 | bool cstate_in_guest; | |
1089 | ||
1090 | unsigned long irq_sources_bitmap; | |
1091 | s64 kvmclock_offset; | |
1092 | raw_spinlock_t tsc_write_lock; | |
1093 | u64 last_tsc_nsec; | |
1094 | u64 last_tsc_write; | |
1095 | u32 last_tsc_khz; | |
1096 | u64 cur_tsc_nsec; | |
1097 | u64 cur_tsc_write; | |
1098 | u64 cur_tsc_offset; | |
1099 | u64 cur_tsc_generation; | |
1100 | int nr_vcpus_matched_tsc; | |
1101 | ||
1102 | raw_spinlock_t pvclock_gtod_sync_lock; | |
1103 | bool use_master_clock; | |
1104 | u64 master_kernel_ns; | |
1105 | u64 master_cycle_now; | |
1106 | struct delayed_work kvmclock_update_work; | |
1107 | struct delayed_work kvmclock_sync_work; | |
1108 | ||
1109 | struct kvm_xen_hvm_config xen_hvm_config; | |
1110 | ||
1111 | /* reads protected by irq_srcu, writes by irq_lock */ | |
1112 | struct hlist_head mask_notifier_list; | |
1113 | ||
1114 | struct kvm_hv hyperv; | |
1115 | struct kvm_xen xen; | |
1116 | ||
1117 | #ifdef CONFIG_KVM_MMU_AUDIT | |
1118 | int audit_point; | |
1119 | #endif | |
1120 | ||
1121 | bool backwards_tsc_observed; | |
1122 | bool boot_vcpu_runs_old_kvmclock; | |
1123 | u32 bsp_vcpu_id; | |
1124 | ||
1125 | u64 disabled_quirks; | |
1126 | int cpu_dirty_logging_count; | |
1127 | ||
1128 | enum kvm_irqchip_mode irqchip_mode; | |
1129 | u8 nr_reserved_ioapic_pins; | |
1130 | ||
1131 | bool disabled_lapic_found; | |
1132 | ||
1133 | bool x2apic_format; | |
1134 | bool x2apic_broadcast_quirk_disabled; | |
1135 | ||
1136 | bool guest_can_read_msr_platform_info; | |
1137 | bool exception_payload_enabled; | |
1138 | ||
1139 | bool bus_lock_detection_enabled; | |
1140 | /* | |
1141 | * If exit_on_emulation_error is set, and the in-kernel instruction | |
1142 | * emulator fails to emulate an instruction, allow userspace | |
1143 | * the opportunity to look at it. | |
1144 | */ | |
1145 | bool exit_on_emulation_error; | |
1146 | ||
1147 | /* Deflect RDMSR and WRMSR to user space when they trigger a #GP */ | |
1148 | u32 user_space_msr_mask; | |
1149 | struct kvm_x86_msr_filter __rcu *msr_filter; | |
1150 | ||
1151 | u32 hypercall_exit_enabled; | |
1152 | ||
1153 | /* Guest can access the SGX PROVISIONKEY. */ | |
1154 | bool sgx_provisioning_allowed; | |
1155 | ||
1156 | struct kvm_pmu_event_filter __rcu *pmu_event_filter; | |
1157 | struct task_struct *nx_lpage_recovery_thread; | |
1158 | ||
1159 | #ifdef CONFIG_X86_64 | |
1160 | /* | |
1161 | * Whether the TDP MMU is enabled for this VM. This contains a | |
1162 | * snapshot of the TDP MMU module parameter from when the VM was | |
1163 | * created and remains unchanged for the life of the VM. If this is | |
1164 | * true, TDP MMU handler functions will run for various MMU | |
1165 | * operations. | |
1166 | */ | |
1167 | bool tdp_mmu_enabled; | |
1168 | ||
1169 | /* | |
1170 | * List of struct kvm_mmu_pages being used as roots. | |
1171 | * All struct kvm_mmu_pages in the list should have | |
1172 | * tdp_mmu_page set. | |
1173 | * | |
1174 | * For reads, this list is protected by: | |
1175 | * the MMU lock in read mode + RCU or | |
1176 | * the MMU lock in write mode | |
1177 | * | |
1178 | * For writes, this list is protected by: | |
1179 | * the MMU lock in read mode + the tdp_mmu_pages_lock or | |
1180 | * the MMU lock in write mode | |
1181 | * | |
1182 | * Roots will remain in the list until their tdp_mmu_root_count | |
1183 | * drops to zero, at which point the thread that decremented the | |
1184 | * count to zero should removed the root from the list and clean | |
1185 | * it up, freeing the root after an RCU grace period. | |
1186 | */ | |
1187 | struct list_head tdp_mmu_roots; | |
1188 | ||
1189 | /* | |
1190 | * List of struct kvmp_mmu_pages not being used as roots. | |
1191 | * All struct kvm_mmu_pages in the list should have | |
1192 | * tdp_mmu_page set and a tdp_mmu_root_count of 0. | |
1193 | */ | |
1194 | struct list_head tdp_mmu_pages; | |
1195 | ||
1196 | /* | |
1197 | * Protects accesses to the following fields when the MMU lock | |
1198 | * is held in read mode: | |
1199 | * - tdp_mmu_roots (above) | |
1200 | * - tdp_mmu_pages (above) | |
1201 | * - the link field of struct kvm_mmu_pages used by the TDP MMU | |
1202 | * - lpage_disallowed_mmu_pages | |
1203 | * - the lpage_disallowed_link field of struct kvm_mmu_pages used | |
1204 | * by the TDP MMU | |
1205 | * It is acceptable, but not necessary, to acquire this lock when | |
1206 | * the thread holds the MMU lock in write mode. | |
1207 | */ | |
1208 | spinlock_t tdp_mmu_pages_lock; | |
1209 | #endif /* CONFIG_X86_64 */ | |
1210 | ||
1211 | /* | |
1212 | * If set, rmaps have been allocated for all memslots and should be | |
1213 | * allocated for any newly created or modified memslots. | |
1214 | */ | |
1215 | bool memslots_have_rmaps; | |
1216 | ||
1217 | #if IS_ENABLED(CONFIG_HYPERV) | |
1218 | hpa_t hv_root_tdp; | |
1219 | spinlock_t hv_root_tdp_lock; | |
1220 | #endif | |
1221 | }; | |
1222 | ||
1223 | struct kvm_vm_stat { | |
1224 | struct kvm_vm_stat_generic generic; | |
1225 | u64 mmu_shadow_zapped; | |
1226 | u64 mmu_pte_write; | |
1227 | u64 mmu_pde_zapped; | |
1228 | u64 mmu_flooded; | |
1229 | u64 mmu_recycled; | |
1230 | u64 mmu_cache_miss; | |
1231 | u64 mmu_unsync; | |
1232 | union { | |
1233 | struct { | |
1234 | atomic64_t pages_4k; | |
1235 | atomic64_t pages_2m; | |
1236 | atomic64_t pages_1g; | |
1237 | }; | |
1238 | atomic64_t pages[KVM_NR_PAGE_SIZES]; | |
1239 | }; | |
1240 | u64 nx_lpage_splits; | |
1241 | u64 max_mmu_page_hash_collisions; | |
1242 | u64 max_mmu_rmap_size; | |
1243 | }; | |
1244 | ||
1245 | struct kvm_vcpu_stat { | |
1246 | struct kvm_vcpu_stat_generic generic; | |
1247 | u64 pf_fixed; | |
1248 | u64 pf_guest; | |
1249 | u64 tlb_flush; | |
1250 | u64 invlpg; | |
1251 | ||
1252 | u64 exits; | |
1253 | u64 io_exits; | |
1254 | u64 mmio_exits; | |
1255 | u64 signal_exits; | |
1256 | u64 irq_window_exits; | |
1257 | u64 nmi_window_exits; | |
1258 | u64 l1d_flush; | |
1259 | u64 halt_exits; | |
1260 | u64 request_irq_exits; | |
1261 | u64 irq_exits; | |
1262 | u64 host_state_reload; | |
1263 | u64 fpu_reload; | |
1264 | u64 insn_emulation; | |
1265 | u64 insn_emulation_fail; | |
1266 | u64 hypercalls; | |
1267 | u64 irq_injections; | |
1268 | u64 nmi_injections; | |
1269 | u64 req_event; | |
1270 | u64 nested_run; | |
1271 | u64 directed_yield_attempted; | |
1272 | u64 directed_yield_successful; | |
1273 | u64 guest_mode; | |
1274 | }; | |
1275 | ||
1276 | struct x86_instruction_info; | |
1277 | ||
1278 | struct msr_data { | |
1279 | bool host_initiated; | |
1280 | u32 index; | |
1281 | u64 data; | |
1282 | }; | |
1283 | ||
1284 | struct kvm_lapic_irq { | |
1285 | u32 vector; | |
1286 | u16 delivery_mode; | |
1287 | u16 dest_mode; | |
1288 | bool level; | |
1289 | u16 trig_mode; | |
1290 | u32 shorthand; | |
1291 | u32 dest_id; | |
1292 | bool msi_redir_hint; | |
1293 | }; | |
1294 | ||
1295 | static inline u16 kvm_lapic_irq_dest_mode(bool dest_mode_logical) | |
1296 | { | |
1297 | return dest_mode_logical ? APIC_DEST_LOGICAL : APIC_DEST_PHYSICAL; | |
1298 | } | |
1299 | ||
1300 | struct kvm_x86_ops { | |
1301 | int (*hardware_enable)(void); | |
1302 | void (*hardware_disable)(void); | |
1303 | void (*hardware_unsetup)(void); | |
1304 | bool (*cpu_has_accelerated_tpr)(void); | |
1305 | bool (*has_emulated_msr)(struct kvm *kvm, u32 index); | |
1306 | void (*vcpu_after_set_cpuid)(struct kvm_vcpu *vcpu); | |
1307 | ||
1308 | unsigned int vm_size; | |
1309 | int (*vm_init)(struct kvm *kvm); | |
1310 | void (*vm_destroy)(struct kvm *kvm); | |
1311 | ||
1312 | /* Create, but do not attach this VCPU */ | |
1313 | int (*vcpu_create)(struct kvm_vcpu *vcpu); | |
1314 | void (*vcpu_free)(struct kvm_vcpu *vcpu); | |
1315 | void (*vcpu_reset)(struct kvm_vcpu *vcpu, bool init_event); | |
1316 | ||
1317 | void (*prepare_guest_switch)(struct kvm_vcpu *vcpu); | |
1318 | void (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu); | |
1319 | void (*vcpu_put)(struct kvm_vcpu *vcpu); | |
1320 | ||
1321 | void (*update_exception_bitmap)(struct kvm_vcpu *vcpu); | |
1322 | int (*get_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr); | |
1323 | int (*set_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr); | |
1324 | u64 (*get_segment_base)(struct kvm_vcpu *vcpu, int seg); | |
1325 | void (*get_segment)(struct kvm_vcpu *vcpu, | |
1326 | struct kvm_segment *var, int seg); | |
1327 | int (*get_cpl)(struct kvm_vcpu *vcpu); | |
1328 | void (*set_segment)(struct kvm_vcpu *vcpu, | |
1329 | struct kvm_segment *var, int seg); | |
1330 | void (*get_cs_db_l_bits)(struct kvm_vcpu *vcpu, int *db, int *l); | |
1331 | void (*set_cr0)(struct kvm_vcpu *vcpu, unsigned long cr0); | |
1332 | bool (*is_valid_cr4)(struct kvm_vcpu *vcpu, unsigned long cr0); | |
1333 | void (*set_cr4)(struct kvm_vcpu *vcpu, unsigned long cr4); | |
1334 | int (*set_efer)(struct kvm_vcpu *vcpu, u64 efer); | |
1335 | void (*get_idt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt); | |
1336 | void (*set_idt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt); | |
1337 | void (*get_gdt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt); | |
1338 | void (*set_gdt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt); | |
1339 | void (*sync_dirty_debug_regs)(struct kvm_vcpu *vcpu); | |
1340 | void (*set_dr7)(struct kvm_vcpu *vcpu, unsigned long value); | |
1341 | void (*cache_reg)(struct kvm_vcpu *vcpu, enum kvm_reg reg); | |
1342 | unsigned long (*get_rflags)(struct kvm_vcpu *vcpu); | |
1343 | void (*set_rflags)(struct kvm_vcpu *vcpu, unsigned long rflags); | |
1344 | bool (*get_if_flag)(struct kvm_vcpu *vcpu); | |
1345 | ||
1346 | void (*tlb_flush_all)(struct kvm_vcpu *vcpu); | |
1347 | void (*tlb_flush_current)(struct kvm_vcpu *vcpu); | |
1348 | int (*tlb_remote_flush)(struct kvm *kvm); | |
1349 | int (*tlb_remote_flush_with_range)(struct kvm *kvm, | |
1350 | struct kvm_tlb_range *range); | |
1351 | ||
1352 | /* | |
1353 | * Flush any TLB entries associated with the given GVA. | |
1354 | * Does not need to flush GPA->HPA mappings. | |
1355 | * Can potentially get non-canonical addresses through INVLPGs, which | |
1356 | * the implementation may choose to ignore if appropriate. | |
1357 | */ | |
1358 | void (*tlb_flush_gva)(struct kvm_vcpu *vcpu, gva_t addr); | |
1359 | ||
1360 | /* | |
1361 | * Flush any TLB entries created by the guest. Like tlb_flush_gva(), | |
1362 | * does not need to flush GPA->HPA mappings. | |
1363 | */ | |
1364 | void (*tlb_flush_guest)(struct kvm_vcpu *vcpu); | |
1365 | ||
1366 | enum exit_fastpath_completion (*run)(struct kvm_vcpu *vcpu); | |
1367 | int (*handle_exit)(struct kvm_vcpu *vcpu, | |
1368 | enum exit_fastpath_completion exit_fastpath); | |
1369 | int (*skip_emulated_instruction)(struct kvm_vcpu *vcpu); | |
1370 | void (*update_emulated_instruction)(struct kvm_vcpu *vcpu); | |
1371 | void (*set_interrupt_shadow)(struct kvm_vcpu *vcpu, int mask); | |
1372 | u32 (*get_interrupt_shadow)(struct kvm_vcpu *vcpu); | |
1373 | void (*patch_hypercall)(struct kvm_vcpu *vcpu, | |
1374 | unsigned char *hypercall_addr); | |
1375 | void (*set_irq)(struct kvm_vcpu *vcpu); | |
1376 | void (*set_nmi)(struct kvm_vcpu *vcpu); | |
1377 | void (*queue_exception)(struct kvm_vcpu *vcpu); | |
1378 | void (*cancel_injection)(struct kvm_vcpu *vcpu); | |
1379 | int (*interrupt_allowed)(struct kvm_vcpu *vcpu, bool for_injection); | |
1380 | int (*nmi_allowed)(struct kvm_vcpu *vcpu, bool for_injection); | |
1381 | bool (*get_nmi_mask)(struct kvm_vcpu *vcpu); | |
1382 | void (*set_nmi_mask)(struct kvm_vcpu *vcpu, bool masked); | |
1383 | void (*enable_nmi_window)(struct kvm_vcpu *vcpu); | |
1384 | void (*enable_irq_window)(struct kvm_vcpu *vcpu); | |
1385 | void (*update_cr8_intercept)(struct kvm_vcpu *vcpu, int tpr, int irr); | |
1386 | bool (*check_apicv_inhibit_reasons)(ulong bit); | |
1387 | void (*refresh_apicv_exec_ctrl)(struct kvm_vcpu *vcpu); | |
1388 | void (*hwapic_irr_update)(struct kvm_vcpu *vcpu, int max_irr); | |
1389 | void (*hwapic_isr_update)(struct kvm_vcpu *vcpu, int isr); | |
1390 | bool (*guest_apic_has_interrupt)(struct kvm_vcpu *vcpu); | |
1391 | void (*load_eoi_exitmap)(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap); | |
1392 | void (*set_virtual_apic_mode)(struct kvm_vcpu *vcpu); | |
1393 | void (*set_apic_access_page_addr)(struct kvm_vcpu *vcpu); | |
1394 | int (*deliver_posted_interrupt)(struct kvm_vcpu *vcpu, int vector); | |
1395 | int (*sync_pir_to_irr)(struct kvm_vcpu *vcpu); | |
1396 | int (*set_tss_addr)(struct kvm *kvm, unsigned int addr); | |
1397 | int (*set_identity_map_addr)(struct kvm *kvm, u64 ident_addr); | |
1398 | u64 (*get_mt_mask)(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio); | |
1399 | ||
1400 | void (*load_mmu_pgd)(struct kvm_vcpu *vcpu, hpa_t root_hpa, | |
1401 | int root_level); | |
1402 | ||
1403 | bool (*has_wbinvd_exit)(void); | |
1404 | ||
1405 | u64 (*get_l2_tsc_offset)(struct kvm_vcpu *vcpu); | |
1406 | u64 (*get_l2_tsc_multiplier)(struct kvm_vcpu *vcpu); | |
1407 | void (*write_tsc_offset)(struct kvm_vcpu *vcpu, u64 offset); | |
1408 | void (*write_tsc_multiplier)(struct kvm_vcpu *vcpu, u64 multiplier); | |
1409 | ||
1410 | /* | |
1411 | * Retrieve somewhat arbitrary exit information. Intended to be used | |
1412 | * only from within tracepoints to avoid VMREADs when tracing is off. | |
1413 | */ | |
1414 | void (*get_exit_info)(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2, | |
1415 | u32 *exit_int_info, u32 *exit_int_info_err_code); | |
1416 | ||
1417 | int (*check_intercept)(struct kvm_vcpu *vcpu, | |
1418 | struct x86_instruction_info *info, | |
1419 | enum x86_intercept_stage stage, | |
1420 | struct x86_exception *exception); | |
1421 | void (*handle_exit_irqoff)(struct kvm_vcpu *vcpu); | |
1422 | ||
1423 | void (*request_immediate_exit)(struct kvm_vcpu *vcpu); | |
1424 | ||
1425 | void (*sched_in)(struct kvm_vcpu *kvm, int cpu); | |
1426 | ||
1427 | /* | |
1428 | * Size of the CPU's dirty log buffer, i.e. VMX's PML buffer. A zero | |
1429 | * value indicates CPU dirty logging is unsupported or disabled. | |
1430 | */ | |
1431 | int cpu_dirty_log_size; | |
1432 | void (*update_cpu_dirty_logging)(struct kvm_vcpu *vcpu); | |
1433 | ||
1434 | /* pmu operations of sub-arch */ | |
1435 | const struct kvm_pmu_ops *pmu_ops; | |
1436 | const struct kvm_x86_nested_ops *nested_ops; | |
1437 | ||
1438 | /* | |
1439 | * Architecture specific hooks for vCPU blocking due to | |
1440 | * HLT instruction. | |
1441 | * Returns for .pre_block(): | |
1442 | * - 0 means continue to block the vCPU. | |
1443 | * - 1 means we cannot block the vCPU since some event | |
1444 | * happens during this period, such as, 'ON' bit in | |
1445 | * posted-interrupts descriptor is set. | |
1446 | */ | |
1447 | int (*pre_block)(struct kvm_vcpu *vcpu); | |
1448 | void (*post_block)(struct kvm_vcpu *vcpu); | |
1449 | ||
1450 | void (*vcpu_blocking)(struct kvm_vcpu *vcpu); | |
1451 | void (*vcpu_unblocking)(struct kvm_vcpu *vcpu); | |
1452 | ||
1453 | int (*update_pi_irte)(struct kvm *kvm, unsigned int host_irq, | |
1454 | uint32_t guest_irq, bool set); | |
1455 | void (*start_assignment)(struct kvm *kvm); | |
1456 | void (*apicv_post_state_restore)(struct kvm_vcpu *vcpu); | |
1457 | bool (*dy_apicv_has_pending_interrupt)(struct kvm_vcpu *vcpu); | |
1458 | ||
1459 | int (*set_hv_timer)(struct kvm_vcpu *vcpu, u64 guest_deadline_tsc, | |
1460 | bool *expired); | |
1461 | void (*cancel_hv_timer)(struct kvm_vcpu *vcpu); | |
1462 | ||
1463 | void (*setup_mce)(struct kvm_vcpu *vcpu); | |
1464 | ||
1465 | int (*smi_allowed)(struct kvm_vcpu *vcpu, bool for_injection); | |
1466 | int (*enter_smm)(struct kvm_vcpu *vcpu, char *smstate); | |
1467 | int (*leave_smm)(struct kvm_vcpu *vcpu, const char *smstate); | |
1468 | void (*enable_smi_window)(struct kvm_vcpu *vcpu); | |
1469 | ||
1470 | int (*mem_enc_op)(struct kvm *kvm, void __user *argp); | |
1471 | int (*mem_enc_reg_region)(struct kvm *kvm, struct kvm_enc_region *argp); | |
1472 | int (*mem_enc_unreg_region)(struct kvm *kvm, struct kvm_enc_region *argp); | |
1473 | int (*vm_copy_enc_context_from)(struct kvm *kvm, unsigned int source_fd); | |
1474 | ||
1475 | int (*get_msr_feature)(struct kvm_msr_entry *entry); | |
1476 | ||
1477 | bool (*can_emulate_instruction)(struct kvm_vcpu *vcpu, void *insn, int insn_len); | |
1478 | ||
1479 | bool (*apic_init_signal_blocked)(struct kvm_vcpu *vcpu); | |
1480 | int (*enable_direct_tlbflush)(struct kvm_vcpu *vcpu); | |
1481 | ||
1482 | void (*migrate_timers)(struct kvm_vcpu *vcpu); | |
1483 | void (*msr_filter_changed)(struct kvm_vcpu *vcpu); | |
1484 | int (*complete_emulated_msr)(struct kvm_vcpu *vcpu, int err); | |
1485 | ||
1486 | void (*vcpu_deliver_sipi_vector)(struct kvm_vcpu *vcpu, u8 vector); | |
1487 | }; | |
1488 | ||
1489 | struct kvm_x86_nested_ops { | |
1490 | void (*leave_nested)(struct kvm_vcpu *vcpu); | |
1491 | int (*check_events)(struct kvm_vcpu *vcpu); | |
1492 | bool (*hv_timer_pending)(struct kvm_vcpu *vcpu); | |
1493 | void (*triple_fault)(struct kvm_vcpu *vcpu); | |
1494 | int (*get_state)(struct kvm_vcpu *vcpu, | |
1495 | struct kvm_nested_state __user *user_kvm_nested_state, | |
1496 | unsigned user_data_size); | |
1497 | int (*set_state)(struct kvm_vcpu *vcpu, | |
1498 | struct kvm_nested_state __user *user_kvm_nested_state, | |
1499 | struct kvm_nested_state *kvm_state); | |
1500 | bool (*get_nested_state_pages)(struct kvm_vcpu *vcpu); | |
1501 | int (*write_log_dirty)(struct kvm_vcpu *vcpu, gpa_t l2_gpa); | |
1502 | ||
1503 | int (*enable_evmcs)(struct kvm_vcpu *vcpu, | |
1504 | uint16_t *vmcs_version); | |
1505 | uint16_t (*get_evmcs_version)(struct kvm_vcpu *vcpu); | |
1506 | }; | |
1507 | ||
1508 | struct kvm_x86_init_ops { | |
1509 | int (*cpu_has_kvm_support)(void); | |
1510 | int (*disabled_by_bios)(void); | |
1511 | int (*check_processor_compatibility)(void); | |
1512 | int (*hardware_setup)(void); | |
1513 | bool (*intel_pt_intr_in_guest)(void); | |
1514 | ||
1515 | struct kvm_x86_ops *runtime_ops; | |
1516 | }; | |
1517 | ||
1518 | struct kvm_arch_async_pf { | |
1519 | u32 token; | |
1520 | gfn_t gfn; | |
1521 | unsigned long cr3; | |
1522 | bool direct_map; | |
1523 | }; | |
1524 | ||
1525 | extern u32 __read_mostly kvm_nr_uret_msrs; | |
1526 | extern u64 __read_mostly host_efer; | |
1527 | extern bool __read_mostly allow_smaller_maxphyaddr; | |
1528 | extern bool __read_mostly enable_apicv; | |
1529 | extern struct kvm_x86_ops kvm_x86_ops; | |
1530 | ||
1531 | #define KVM_X86_OP(func) \ | |
1532 | DECLARE_STATIC_CALL(kvm_x86_##func, *(((struct kvm_x86_ops *)0)->func)); | |
1533 | #define KVM_X86_OP_NULL KVM_X86_OP | |
1534 | #include <asm/kvm-x86-ops.h> | |
1535 | ||
1536 | static inline void kvm_ops_static_call_update(void) | |
1537 | { | |
1538 | #define KVM_X86_OP(func) \ | |
1539 | static_call_update(kvm_x86_##func, kvm_x86_ops.func); | |
1540 | #define KVM_X86_OP_NULL KVM_X86_OP | |
1541 | #include <asm/kvm-x86-ops.h> | |
1542 | } | |
1543 | ||
1544 | #define __KVM_HAVE_ARCH_VM_ALLOC | |
1545 | static inline struct kvm *kvm_arch_alloc_vm(void) | |
1546 | { | |
1547 | return __vmalloc(kvm_x86_ops.vm_size, GFP_KERNEL_ACCOUNT | __GFP_ZERO); | |
1548 | } | |
1549 | void kvm_arch_free_vm(struct kvm *kvm); | |
1550 | ||
1551 | #define __KVM_HAVE_ARCH_FLUSH_REMOTE_TLB | |
1552 | static inline int kvm_arch_flush_remote_tlb(struct kvm *kvm) | |
1553 | { | |
1554 | if (kvm_x86_ops.tlb_remote_flush && | |
1555 | !static_call(kvm_x86_tlb_remote_flush)(kvm)) | |
1556 | return 0; | |
1557 | else | |
1558 | return -ENOTSUPP; | |
1559 | } | |
1560 | ||
1561 | void kvm_mmu_x86_module_init(void); | |
1562 | int kvm_mmu_vendor_module_init(void); | |
1563 | void kvm_mmu_vendor_module_exit(void); | |
1564 | ||
1565 | void kvm_mmu_destroy(struct kvm_vcpu *vcpu); | |
1566 | int kvm_mmu_create(struct kvm_vcpu *vcpu); | |
1567 | void kvm_mmu_init_vm(struct kvm *kvm); | |
1568 | void kvm_mmu_uninit_vm(struct kvm *kvm); | |
1569 | ||
1570 | void kvm_mmu_after_set_cpuid(struct kvm_vcpu *vcpu); | |
1571 | void kvm_mmu_reset_context(struct kvm_vcpu *vcpu); | |
1572 | void kvm_mmu_slot_remove_write_access(struct kvm *kvm, | |
1573 | const struct kvm_memory_slot *memslot, | |
1574 | int start_level); | |
1575 | void kvm_mmu_zap_collapsible_sptes(struct kvm *kvm, | |
1576 | const struct kvm_memory_slot *memslot); | |
1577 | void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm, | |
1578 | const struct kvm_memory_slot *memslot); | |
1579 | void kvm_mmu_zap_all(struct kvm *kvm); | |
1580 | void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen); | |
1581 | unsigned long kvm_mmu_calculate_default_mmu_pages(struct kvm *kvm); | |
1582 | void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned long kvm_nr_mmu_pages); | |
1583 | ||
1584 | int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3); | |
1585 | ||
1586 | int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa, | |
1587 | const void *val, int bytes); | |
1588 | ||
1589 | struct kvm_irq_mask_notifier { | |
1590 | void (*func)(struct kvm_irq_mask_notifier *kimn, bool masked); | |
1591 | int irq; | |
1592 | struct hlist_node link; | |
1593 | }; | |
1594 | ||
1595 | void kvm_register_irq_mask_notifier(struct kvm *kvm, int irq, | |
1596 | struct kvm_irq_mask_notifier *kimn); | |
1597 | void kvm_unregister_irq_mask_notifier(struct kvm *kvm, int irq, | |
1598 | struct kvm_irq_mask_notifier *kimn); | |
1599 | void kvm_fire_mask_notifiers(struct kvm *kvm, unsigned irqchip, unsigned pin, | |
1600 | bool mask); | |
1601 | ||
1602 | extern bool tdp_enabled; | |
1603 | ||
1604 | u64 vcpu_tsc_khz(struct kvm_vcpu *vcpu); | |
1605 | ||
1606 | /* control of guest tsc rate supported? */ | |
1607 | extern bool kvm_has_tsc_control; | |
1608 | /* maximum supported tsc_khz for guests */ | |
1609 | extern u32 kvm_max_guest_tsc_khz; | |
1610 | /* number of bits of the fractional part of the TSC scaling ratio */ | |
1611 | extern u8 kvm_tsc_scaling_ratio_frac_bits; | |
1612 | /* maximum allowed value of TSC scaling ratio */ | |
1613 | extern u64 kvm_max_tsc_scaling_ratio; | |
1614 | /* 1ull << kvm_tsc_scaling_ratio_frac_bits */ | |
1615 | extern u64 kvm_default_tsc_scaling_ratio; | |
1616 | /* bus lock detection supported? */ | |
1617 | extern bool kvm_has_bus_lock_exit; | |
1618 | ||
1619 | extern u64 kvm_mce_cap_supported; | |
1620 | ||
1621 | /* | |
1622 | * EMULTYPE_NO_DECODE - Set when re-emulating an instruction (after completing | |
1623 | * userspace I/O) to indicate that the emulation context | |
1624 | * should be reused as is, i.e. skip initialization of | |
1625 | * emulation context, instruction fetch and decode. | |
1626 | * | |
1627 | * EMULTYPE_TRAP_UD - Set when emulating an intercepted #UD from hardware. | |
1628 | * Indicates that only select instructions (tagged with | |
1629 | * EmulateOnUD) should be emulated (to minimize the emulator | |
1630 | * attack surface). See also EMULTYPE_TRAP_UD_FORCED. | |
1631 | * | |
1632 | * EMULTYPE_SKIP - Set when emulating solely to skip an instruction, i.e. to | |
1633 | * decode the instruction length. For use *only* by | |
1634 | * kvm_x86_ops.skip_emulated_instruction() implementations. | |
1635 | * | |
1636 | * EMULTYPE_ALLOW_RETRY_PF - Set when the emulator should resume the guest to | |
1637 | * retry native execution under certain conditions, | |
1638 | * Can only be set in conjunction with EMULTYPE_PF. | |
1639 | * | |
1640 | * EMULTYPE_TRAP_UD_FORCED - Set when emulating an intercepted #UD that was | |
1641 | * triggered by KVM's magic "force emulation" prefix, | |
1642 | * which is opt in via module param (off by default). | |
1643 | * Bypasses EmulateOnUD restriction despite emulating | |
1644 | * due to an intercepted #UD (see EMULTYPE_TRAP_UD). | |
1645 | * Used to test the full emulator from userspace. | |
1646 | * | |
1647 | * EMULTYPE_VMWARE_GP - Set when emulating an intercepted #GP for VMware | |
1648 | * backdoor emulation, which is opt in via module param. | |
1649 | * VMware backdoor emulation handles select instructions | |
1650 | * and reinjects the #GP for all other cases. | |
1651 | * | |
1652 | * EMULTYPE_PF - Set when emulating MMIO by way of an intercepted #PF, in which | |
1653 | * case the CR2/GPA value pass on the stack is valid. | |
1654 | */ | |
1655 | #define EMULTYPE_NO_DECODE (1 << 0) | |
1656 | #define EMULTYPE_TRAP_UD (1 << 1) | |
1657 | #define EMULTYPE_SKIP (1 << 2) | |
1658 | #define EMULTYPE_ALLOW_RETRY_PF (1 << 3) | |
1659 | #define EMULTYPE_TRAP_UD_FORCED (1 << 4) | |
1660 | #define EMULTYPE_VMWARE_GP (1 << 5) | |
1661 | #define EMULTYPE_PF (1 << 6) | |
1662 | ||
1663 | int kvm_emulate_instruction(struct kvm_vcpu *vcpu, int emulation_type); | |
1664 | int kvm_emulate_instruction_from_buffer(struct kvm_vcpu *vcpu, | |
1665 | void *insn, int insn_len); | |
1666 | ||
1667 | void kvm_enable_efer_bits(u64); | |
1668 | bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer); | |
1669 | int __kvm_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data, bool host_initiated); | |
1670 | int kvm_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data); | |
1671 | int kvm_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data); | |
1672 | int kvm_emulate_rdmsr(struct kvm_vcpu *vcpu); | |
1673 | int kvm_emulate_wrmsr(struct kvm_vcpu *vcpu); | |
1674 | int kvm_emulate_as_nop(struct kvm_vcpu *vcpu); | |
1675 | int kvm_emulate_invd(struct kvm_vcpu *vcpu); | |
1676 | int kvm_emulate_mwait(struct kvm_vcpu *vcpu); | |
1677 | int kvm_handle_invalid_op(struct kvm_vcpu *vcpu); | |
1678 | int kvm_emulate_monitor(struct kvm_vcpu *vcpu); | |
1679 | ||
1680 | int kvm_fast_pio(struct kvm_vcpu *vcpu, int size, unsigned short port, int in); | |
1681 | int kvm_emulate_cpuid(struct kvm_vcpu *vcpu); | |
1682 | int kvm_emulate_halt(struct kvm_vcpu *vcpu); | |
1683 | int kvm_vcpu_halt(struct kvm_vcpu *vcpu); | |
1684 | int kvm_emulate_ap_reset_hold(struct kvm_vcpu *vcpu); | |
1685 | int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu); | |
1686 | ||
1687 | void kvm_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg); | |
1688 | int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector, int seg); | |
1689 | void kvm_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector); | |
1690 | ||
1691 | int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int idt_index, | |
1692 | int reason, bool has_error_code, u32 error_code); | |
1693 | ||
1694 | void kvm_post_set_cr0(struct kvm_vcpu *vcpu, unsigned long old_cr0, unsigned long cr0); | |
1695 | void kvm_post_set_cr4(struct kvm_vcpu *vcpu, unsigned long old_cr4, unsigned long cr4); | |
1696 | int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0); | |
1697 | int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3); | |
1698 | int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4); | |
1699 | int kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8); | |
1700 | int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val); | |
1701 | void kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val); | |
1702 | unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu); | |
1703 | void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw); | |
1704 | void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l); | |
1705 | int kvm_emulate_xsetbv(struct kvm_vcpu *vcpu); | |
1706 | ||
1707 | int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr); | |
1708 | int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr); | |
1709 | ||
1710 | unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu); | |
1711 | void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags); | |
1712 | int kvm_emulate_rdpmc(struct kvm_vcpu *vcpu); | |
1713 | ||
1714 | void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr); | |
1715 | void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code); | |
1716 | void kvm_queue_exception_p(struct kvm_vcpu *vcpu, unsigned nr, unsigned long payload); | |
1717 | void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr); | |
1718 | void kvm_requeue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code); | |
1719 | void kvm_inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault); | |
1720 | bool kvm_inject_emulated_page_fault(struct kvm_vcpu *vcpu, | |
1721 | struct x86_exception *fault); | |
1722 | int kvm_read_guest_page_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, | |
1723 | gfn_t gfn, void *data, int offset, int len, | |
1724 | u32 access); | |
1725 | bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl); | |
1726 | bool kvm_require_dr(struct kvm_vcpu *vcpu, int dr); | |
1727 | ||
1728 | static inline int __kvm_irq_line_state(unsigned long *irq_state, | |
1729 | int irq_source_id, int level) | |
1730 | { | |
1731 | /* Logical OR for level trig interrupt */ | |
1732 | if (level) | |
1733 | __set_bit(irq_source_id, irq_state); | |
1734 | else | |
1735 | __clear_bit(irq_source_id, irq_state); | |
1736 | ||
1737 | return !!(*irq_state); | |
1738 | } | |
1739 | ||
1740 | #define KVM_MMU_ROOT_CURRENT BIT(0) | |
1741 | #define KVM_MMU_ROOT_PREVIOUS(i) BIT(1+i) | |
1742 | #define KVM_MMU_ROOTS_ALL (~0UL) | |
1743 | ||
1744 | int kvm_pic_set_irq(struct kvm_pic *pic, int irq, int irq_source_id, int level); | |
1745 | void kvm_pic_clear_all(struct kvm_pic *pic, int irq_source_id); | |
1746 | ||
1747 | void kvm_inject_nmi(struct kvm_vcpu *vcpu); | |
1748 | ||
1749 | void kvm_update_dr7(struct kvm_vcpu *vcpu); | |
1750 | ||
1751 | int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn); | |
1752 | void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu); | |
1753 | void kvm_mmu_free_roots(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, | |
1754 | ulong roots_to_free); | |
1755 | void kvm_mmu_free_guest_mode_roots(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu); | |
1756 | gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access, | |
1757 | struct x86_exception *exception); | |
1758 | gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva, | |
1759 | struct x86_exception *exception); | |
1760 | gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva, | |
1761 | struct x86_exception *exception); | |
1762 | gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva, | |
1763 | struct x86_exception *exception); | |
1764 | gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva, | |
1765 | struct x86_exception *exception); | |
1766 | ||
1767 | bool kvm_apicv_activated(struct kvm *kvm); | |
1768 | void kvm_vcpu_update_apicv(struct kvm_vcpu *vcpu); | |
1769 | void kvm_request_apicv_update(struct kvm *kvm, bool activate, | |
1770 | unsigned long bit); | |
1771 | ||
1772 | void __kvm_request_apicv_update(struct kvm *kvm, bool activate, | |
1773 | unsigned long bit); | |
1774 | ||
1775 | int kvm_emulate_hypercall(struct kvm_vcpu *vcpu); | |
1776 | ||
1777 | int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u64 error_code, | |
1778 | void *insn, int insn_len); | |
1779 | void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva); | |
1780 | void kvm_mmu_invalidate_gva(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, | |
1781 | gva_t gva, hpa_t root_hpa); | |
1782 | void kvm_mmu_invpcid_gva(struct kvm_vcpu *vcpu, gva_t gva, unsigned long pcid); | |
1783 | void kvm_mmu_new_pgd(struct kvm_vcpu *vcpu, gpa_t new_pgd); | |
1784 | ||
1785 | void kvm_configure_mmu(bool enable_tdp, int tdp_forced_root_level, | |
1786 | int tdp_max_root_level, int tdp_huge_page_level); | |
1787 | ||
1788 | static inline u16 kvm_read_ldt(void) | |
1789 | { | |
1790 | u16 ldt; | |
1791 | asm("sldt %0" : "=g"(ldt)); | |
1792 | return ldt; | |
1793 | } | |
1794 | ||
1795 | static inline void kvm_load_ldt(u16 sel) | |
1796 | { | |
1797 | asm("lldt %0" : : "rm"(sel)); | |
1798 | } | |
1799 | ||
1800 | #ifdef CONFIG_X86_64 | |
1801 | static inline unsigned long read_msr(unsigned long msr) | |
1802 | { | |
1803 | u64 value; | |
1804 | ||
1805 | rdmsrl(msr, value); | |
1806 | return value; | |
1807 | } | |
1808 | #endif | |
1809 | ||
1810 | static inline void kvm_inject_gp(struct kvm_vcpu *vcpu, u32 error_code) | |
1811 | { | |
1812 | kvm_queue_exception_e(vcpu, GP_VECTOR, error_code); | |
1813 | } | |
1814 | ||
1815 | #define TSS_IOPB_BASE_OFFSET 0x66 | |
1816 | #define TSS_BASE_SIZE 0x68 | |
1817 | #define TSS_IOPB_SIZE (65536 / 8) | |
1818 | #define TSS_REDIRECTION_SIZE (256 / 8) | |
1819 | #define RMODE_TSS_SIZE \ | |
1820 | (TSS_BASE_SIZE + TSS_REDIRECTION_SIZE + TSS_IOPB_SIZE + 1) | |
1821 | ||
1822 | enum { | |
1823 | TASK_SWITCH_CALL = 0, | |
1824 | TASK_SWITCH_IRET = 1, | |
1825 | TASK_SWITCH_JMP = 2, | |
1826 | TASK_SWITCH_GATE = 3, | |
1827 | }; | |
1828 | ||
1829 | #define HF_GIF_MASK (1 << 0) | |
1830 | #define HF_NMI_MASK (1 << 3) | |
1831 | #define HF_IRET_MASK (1 << 4) | |
1832 | #define HF_GUEST_MASK (1 << 5) /* VCPU is in guest-mode */ | |
1833 | #define HF_SMM_MASK (1 << 6) | |
1834 | #define HF_SMM_INSIDE_NMI_MASK (1 << 7) | |
1835 | ||
1836 | #define __KVM_VCPU_MULTIPLE_ADDRESS_SPACE | |
1837 | #define KVM_ADDRESS_SPACE_NUM 2 | |
1838 | ||
1839 | #define kvm_arch_vcpu_memslots_id(vcpu) ((vcpu)->arch.hflags & HF_SMM_MASK ? 1 : 0) | |
1840 | #define kvm_memslots_for_spte_role(kvm, role) __kvm_memslots(kvm, (role).smm) | |
1841 | ||
1842 | #define KVM_ARCH_WANT_MMU_NOTIFIER | |
1843 | ||
1844 | int kvm_cpu_has_injectable_intr(struct kvm_vcpu *v); | |
1845 | int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu); | |
1846 | int kvm_cpu_has_extint(struct kvm_vcpu *v); | |
1847 | int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu); | |
1848 | int kvm_cpu_get_interrupt(struct kvm_vcpu *v); | |
1849 | void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event); | |
1850 | void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu); | |
1851 | ||
1852 | int kvm_pv_send_ipi(struct kvm *kvm, unsigned long ipi_bitmap_low, | |
1853 | unsigned long ipi_bitmap_high, u32 min, | |
1854 | unsigned long icr, int op_64_bit); | |
1855 | ||
1856 | int kvm_add_user_return_msr(u32 msr); | |
1857 | int kvm_find_user_return_msr(u32 msr); | |
1858 | int kvm_set_user_return_msr(unsigned index, u64 val, u64 mask); | |
1859 | ||
1860 | static inline bool kvm_is_supported_user_return_msr(u32 msr) | |
1861 | { | |
1862 | return kvm_find_user_return_msr(msr) >= 0; | |
1863 | } | |
1864 | ||
1865 | u64 kvm_scale_tsc(struct kvm_vcpu *vcpu, u64 tsc, u64 ratio); | |
1866 | u64 kvm_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc); | |
1867 | u64 kvm_calc_nested_tsc_offset(u64 l1_offset, u64 l2_offset, u64 l2_multiplier); | |
1868 | u64 kvm_calc_nested_tsc_multiplier(u64 l1_multiplier, u64 l2_multiplier); | |
1869 | ||
1870 | unsigned long kvm_get_linear_rip(struct kvm_vcpu *vcpu); | |
1871 | bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip); | |
1872 | ||
1873 | void kvm_make_mclock_inprogress_request(struct kvm *kvm); | |
1874 | void kvm_make_scan_ioapic_request(struct kvm *kvm); | |
1875 | void kvm_make_scan_ioapic_request_mask(struct kvm *kvm, | |
1876 | unsigned long *vcpu_bitmap); | |
1877 | ||
1878 | bool kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu, | |
1879 | struct kvm_async_pf *work); | |
1880 | void kvm_arch_async_page_present(struct kvm_vcpu *vcpu, | |
1881 | struct kvm_async_pf *work); | |
1882 | void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, | |
1883 | struct kvm_async_pf *work); | |
1884 | void kvm_arch_async_page_present_queued(struct kvm_vcpu *vcpu); | |
1885 | bool kvm_arch_can_dequeue_async_page_present(struct kvm_vcpu *vcpu); | |
1886 | extern bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn); | |
1887 | ||
1888 | int kvm_skip_emulated_instruction(struct kvm_vcpu *vcpu); | |
1889 | int kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err); | |
1890 | void __kvm_request_immediate_exit(struct kvm_vcpu *vcpu); | |
1891 | ||
1892 | int kvm_is_in_guest(void); | |
1893 | ||
1894 | void __user *__x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, | |
1895 | u32 size); | |
1896 | bool kvm_vcpu_is_reset_bsp(struct kvm_vcpu *vcpu); | |
1897 | bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu); | |
1898 | ||
1899 | bool kvm_intr_is_single_vcpu(struct kvm *kvm, struct kvm_lapic_irq *irq, | |
1900 | struct kvm_vcpu **dest_vcpu); | |
1901 | ||
1902 | void kvm_set_msi_irq(struct kvm *kvm, struct kvm_kernel_irq_routing_entry *e, | |
1903 | struct kvm_lapic_irq *irq); | |
1904 | ||
1905 | static inline bool kvm_irq_is_postable(struct kvm_lapic_irq *irq) | |
1906 | { | |
1907 | /* We can only post Fixed and LowPrio IRQs */ | |
1908 | return (irq->delivery_mode == APIC_DM_FIXED || | |
1909 | irq->delivery_mode == APIC_DM_LOWEST); | |
1910 | } | |
1911 | ||
1912 | static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) | |
1913 | { | |
1914 | static_call_cond(kvm_x86_vcpu_blocking)(vcpu); | |
1915 | } | |
1916 | ||
1917 | static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) | |
1918 | { | |
1919 | static_call_cond(kvm_x86_vcpu_unblocking)(vcpu); | |
1920 | } | |
1921 | ||
1922 | static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {} | |
1923 | ||
1924 | static inline int kvm_cpu_get_apicid(int mps_cpu) | |
1925 | { | |
1926 | #ifdef CONFIG_X86_LOCAL_APIC | |
1927 | return default_cpu_present_to_apicid(mps_cpu); | |
1928 | #else | |
1929 | WARN_ON_ONCE(1); | |
1930 | return BAD_APICID; | |
1931 | #endif | |
1932 | } | |
1933 | ||
1934 | #define put_smstate(type, buf, offset, val) \ | |
1935 | *(type *)((buf) + (offset) - 0x7e00) = val | |
1936 | ||
1937 | #define GET_SMSTATE(type, buf, offset) \ | |
1938 | (*(type *)((buf) + (offset) - 0x7e00)) | |
1939 | ||
1940 | int kvm_cpu_dirty_log_size(void); | |
1941 | ||
1942 | int alloc_all_memslots_rmaps(struct kvm *kvm); | |
1943 | ||
1944 | #endif /* _ASM_X86_KVM_HOST_H */ |