]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - arch/x86/include/asm/kvm_host.h
KVM: X86: Remove unneeded KVM_DEBUGREG_RELOAD
[mirror_ubuntu-jammy-kernel.git] / arch / x86 / include / asm / kvm_host.h
CommitLineData
20c8ccb1 1/* SPDX-License-Identifier: GPL-2.0-only */
a656c8ef 2/*
043405e1
CO
3 * Kernel-based Virtual Machine driver for Linux
4 *
5 * This header defines architecture specific interfaces, x86 version
043405e1
CO
6 */
7
1965aae3
PA
8#ifndef _ASM_X86_KVM_HOST_H
9#define _ASM_X86_KVM_HOST_H
043405e1 10
34c16eec
ZX
11#include <linux/types.h>
12#include <linux/mm.h>
e930bffe 13#include <linux/mmu_notifier.h>
229456fc 14#include <linux/tracepoint.h>
f5f48ee1 15#include <linux/cpumask.h>
f5132b01 16#include <linux/irq_work.h>
447ae316 17#include <linux/irq.h>
34c16eec
ZX
18
19#include <linux/kvm.h>
20#include <linux/kvm_para.h>
edf88417 21#include <linux/kvm_types.h>
f5132b01 22#include <linux/perf_event.h>
d828199e
MT
23#include <linux/pvclock_gtod.h>
24#include <linux/clocksource.h>
87276880 25#include <linux/irqbypass.h>
5c919412 26#include <linux/hyperv.h>
34c16eec 27
7d669f50 28#include <asm/apic.h>
50d0a0f9 29#include <asm/pvclock-abi.h>
e01a1b57 30#include <asm/desc.h>
0bed3b56 31#include <asm/mtrr.h>
9962d032 32#include <asm/msr-index.h>
3ee89722 33#include <asm/asm.h>
21ebbeda 34#include <asm/kvm_page_track.h>
95c7b77d 35#include <asm/kvm_vcpu_regs.h>
5a485803 36#include <asm/hyperv-tlfs.h>
e01a1b57 37
741cbbae
PB
38#define __KVM_HAVE_ARCH_VCPU_DEBUGFS
39
682f732e 40#define KVM_MAX_VCPUS 288
757883de 41#define KVM_SOFT_MAX_VCPUS 240
af1bae54 42#define KVM_MAX_VCPU_ID 1023
0743247f
AW
43/* memory slots that are not exposed to userspace */
44#define KVM_PRIVATE_MEM_SLOTS 3
93a5cef0 45
b401ee0b 46#define KVM_HALT_POLL_NS_DEFAULT 200000
69a9f69b 47
8175e5b7
AG
48#define KVM_IRQCHIP_NUM_PINS KVM_IOAPIC_NUM_PINS
49
3c9bd400
JZ
50#define KVM_DIRTY_LOG_MANUAL_CAPS (KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE | \
51 KVM_DIRTY_LOG_INITIALLY_SET)
52
fe6b6bc8
CQ
53#define KVM_BUS_LOCK_DETECTION_VALID_MODE (KVM_BUS_LOCK_DETECTION_OFF | \
54 KVM_BUS_LOCK_DETECTION_EXIT)
55
2860c4b1 56/* x86-specific vcpu->requests bit members */
2387149e
AJ
57#define KVM_REQ_MIGRATE_TIMER KVM_ARCH_REQ(0)
58#define KVM_REQ_REPORT_TPR_ACCESS KVM_ARCH_REQ(1)
59#define KVM_REQ_TRIPLE_FAULT KVM_ARCH_REQ(2)
60#define KVM_REQ_MMU_SYNC KVM_ARCH_REQ(3)
61#define KVM_REQ_CLOCK_UPDATE KVM_ARCH_REQ(4)
727a7e27 62#define KVM_REQ_LOAD_MMU_PGD KVM_ARCH_REQ(5)
2387149e
AJ
63#define KVM_REQ_EVENT KVM_ARCH_REQ(6)
64#define KVM_REQ_APF_HALT KVM_ARCH_REQ(7)
65#define KVM_REQ_STEAL_UPDATE KVM_ARCH_REQ(8)
66#define KVM_REQ_NMI KVM_ARCH_REQ(9)
67#define KVM_REQ_PMU KVM_ARCH_REQ(10)
68#define KVM_REQ_PMI KVM_ARCH_REQ(11)
69#define KVM_REQ_SMI KVM_ARCH_REQ(12)
70#define KVM_REQ_MASTERCLOCK_UPDATE KVM_ARCH_REQ(13)
71#define KVM_REQ_MCLOCK_INPROGRESS \
72 KVM_ARCH_REQ_FLAGS(14, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
73#define KVM_REQ_SCAN_IOAPIC \
74 KVM_ARCH_REQ_FLAGS(15, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
75#define KVM_REQ_GLOBAL_CLOCK_UPDATE KVM_ARCH_REQ(16)
76#define KVM_REQ_APIC_PAGE_RELOAD \
77 KVM_ARCH_REQ_FLAGS(17, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
78#define KVM_REQ_HV_CRASH KVM_ARCH_REQ(18)
79#define KVM_REQ_IOAPIC_EOI_EXIT KVM_ARCH_REQ(19)
80#define KVM_REQ_HV_RESET KVM_ARCH_REQ(20)
81#define KVM_REQ_HV_EXIT KVM_ARCH_REQ(21)
82#define KVM_REQ_HV_STIMER KVM_ARCH_REQ(22)
e40ff1d6 83#define KVM_REQ_LOAD_EOI_EXITMAP KVM_ARCH_REQ(23)
729c15c2 84#define KVM_REQ_GET_NESTED_STATE_PAGES KVM_ARCH_REQ(24)
8df14af4
SS
85#define KVM_REQ_APICV_UPDATE \
86 KVM_ARCH_REQ_FLAGS(25, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
eeeb4f67 87#define KVM_REQ_TLB_FLUSH_CURRENT KVM_ARCH_REQ(26)
07ffaf34 88#define KVM_REQ_TLB_FLUSH_GUEST \
eeeb4f67 89 KVM_ARCH_REQ_FLAGS(27, KVM_REQUEST_NO_WAKEUP)
557a961a 90#define KVM_REQ_APF_READY KVM_ARCH_REQ(28)
1a155254 91#define KVM_REQ_MSR_FILTER_CHANGED KVM_ARCH_REQ(29)
a85863c2
MS
92#define KVM_REQ_UPDATE_CPU_DIRTY_LOGGING \
93 KVM_ARCH_REQ_FLAGS(30, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
2860c4b1 94
cfec82cb
JR
95#define CR0_RESERVED_BITS \
96 (~(unsigned long)(X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS \
97 | X86_CR0_ET | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM \
98 | X86_CR0_NW | X86_CR0_CD | X86_CR0_PG))
99
cfec82cb
JR
100#define CR4_RESERVED_BITS \
101 (~(unsigned long)(X86_CR4_VME | X86_CR4_PVI | X86_CR4_TSD | X86_CR4_DE\
102 | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_MCE \
ad756a16 103 | X86_CR4_PGE | X86_CR4_PCE | X86_CR4_OSFXSR | X86_CR4_PCIDE \
afcbf13f 104 | X86_CR4_OSXSAVE | X86_CR4_SMEP | X86_CR4_FSGSBASE \
fd8cb433 105 | X86_CR4_OSXMMEXCPT | X86_CR4_LA57 | X86_CR4_VMXE \
ae3e61e1 106 | X86_CR4_SMAP | X86_CR4_PKE | X86_CR4_UMIP))
cfec82cb
JR
107
108#define CR8_RESERVED_BITS (~(unsigned long)X86_CR8_TPR)
109
110
cd6e8f87 111
cd6e8f87 112#define INVALID_PAGE (~(hpa_t)0)
dd180b3e
XG
113#define VALID_PAGE(x) ((x) != INVALID_PAGE)
114
cd6e8f87 115#define UNMAPPED_GVA (~(gpa_t)0)
c74ad08f 116#define INVALID_GPA (~(gpa_t)0)
cd6e8f87 117
ec04b260 118/* KVM Hugepage definitions for x86 */
3bae0459
SC
119#define KVM_MAX_HUGEPAGE_LEVEL PG_LEVEL_1G
120#define KVM_NR_PAGE_SIZES (KVM_MAX_HUGEPAGE_LEVEL - PG_LEVEL_4K + 1)
82855413
JR
121#define KVM_HPAGE_GFN_SHIFT(x) (((x) - 1) * 9)
122#define KVM_HPAGE_SHIFT(x) (PAGE_SHIFT + KVM_HPAGE_GFN_SHIFT(x))
ec04b260
JR
123#define KVM_HPAGE_SIZE(x) (1UL << KVM_HPAGE_SHIFT(x))
124#define KVM_HPAGE_MASK(x) (~(KVM_HPAGE_SIZE(x) - 1))
125#define KVM_PAGES_PER_HPAGE(x) (KVM_HPAGE_SIZE(x) / PAGE_SIZE)
05da4558 126
6d9d41e5
CD
127static inline gfn_t gfn_to_index(gfn_t gfn, gfn_t base_gfn, int level)
128{
3bae0459 129 /* KVM_HPAGE_GFN_SHIFT(PG_LEVEL_4K) must be 0. */
6d9d41e5
CD
130 return (gfn >> KVM_HPAGE_GFN_SHIFT(level)) -
131 (base_gfn >> KVM_HPAGE_GFN_SHIFT(level));
132}
133
d657a98e 134#define KVM_PERMILLE_MMU_PAGES 20
bc8a3d89 135#define KVM_MIN_ALLOC_MMU_PAGES 64UL
114df303 136#define KVM_MMU_HASH_SHIFT 12
1ae0a13d 137#define KVM_NUM_MMU_PAGES (1 << KVM_MMU_HASH_SHIFT)
d657a98e
ZX
138#define KVM_MIN_FREE_MMU_PAGES 5
139#define KVM_REFILL_PAGES 25
3f4e3eb4 140#define KVM_MAX_CPUID_ENTRIES 256
0bed3b56 141#define KVM_NR_FIXED_MTRR_REGION 88
0d234daf 142#define KVM_NR_VAR_MTRR 8
d657a98e 143
af585b92
GN
144#define ASYNC_PF_PER_VCPU 64
145
5fdbf976 146enum kvm_reg {
95c7b77d
SC
147 VCPU_REGS_RAX = __VCPU_REGS_RAX,
148 VCPU_REGS_RCX = __VCPU_REGS_RCX,
149 VCPU_REGS_RDX = __VCPU_REGS_RDX,
150 VCPU_REGS_RBX = __VCPU_REGS_RBX,
151 VCPU_REGS_RSP = __VCPU_REGS_RSP,
152 VCPU_REGS_RBP = __VCPU_REGS_RBP,
153 VCPU_REGS_RSI = __VCPU_REGS_RSI,
154 VCPU_REGS_RDI = __VCPU_REGS_RDI,
2b3ccfa0 155#ifdef CONFIG_X86_64
95c7b77d
SC
156 VCPU_REGS_R8 = __VCPU_REGS_R8,
157 VCPU_REGS_R9 = __VCPU_REGS_R9,
158 VCPU_REGS_R10 = __VCPU_REGS_R10,
159 VCPU_REGS_R11 = __VCPU_REGS_R11,
160 VCPU_REGS_R12 = __VCPU_REGS_R12,
161 VCPU_REGS_R13 = __VCPU_REGS_R13,
162 VCPU_REGS_R14 = __VCPU_REGS_R14,
163 VCPU_REGS_R15 = __VCPU_REGS_R15,
2b3ccfa0 164#endif
5fdbf976 165 VCPU_REGS_RIP,
f8845541 166 NR_VCPU_REGS,
2b3ccfa0 167
6de4f3ad 168 VCPU_EXREG_PDPTR = NR_VCPU_REGS,
bd31fe49 169 VCPU_EXREG_CR0,
aff48baa 170 VCPU_EXREG_CR3,
f98c1e77 171 VCPU_EXREG_CR4,
6de12732 172 VCPU_EXREG_RFLAGS,
2fb92db1 173 VCPU_EXREG_SEGMENTS,
5addc235 174 VCPU_EXREG_EXIT_INFO_1,
87915858 175 VCPU_EXREG_EXIT_INFO_2,
6de4f3ad
AK
176};
177
2b3ccfa0 178enum {
81609e3e 179 VCPU_SREG_ES,
2b3ccfa0 180 VCPU_SREG_CS,
81609e3e 181 VCPU_SREG_SS,
2b3ccfa0 182 VCPU_SREG_DS,
2b3ccfa0
ZX
183 VCPU_SREG_FS,
184 VCPU_SREG_GS,
2b3ccfa0
ZX
185 VCPU_SREG_TR,
186 VCPU_SREG_LDTR,
187};
188
1e9e2622
WL
189enum exit_fastpath_completion {
190 EXIT_FASTPATH_NONE,
404d5d7b
WL
191 EXIT_FASTPATH_REENTER_GUEST,
192 EXIT_FASTPATH_EXIT_HANDLED,
1e9e2622 193};
404d5d7b 194typedef enum exit_fastpath_completion fastpath_t;
1e9e2622 195
2f728d66
SC
196struct x86_emulate_ctxt;
197struct x86_exception;
198enum x86_intercept;
199enum x86_intercept_stage;
2b3ccfa0 200
42dbaa5a
JK
201#define KVM_NR_DB_REGS 4
202
e8ea85fb 203#define DR6_BUS_LOCK (1 << 11)
42dbaa5a
JK
204#define DR6_BD (1 << 13)
205#define DR6_BS (1 << 14)
cfb634fe 206#define DR6_BT (1 << 15)
6f43ed01 207#define DR6_RTM (1 << 16)
9a3ecd5e
CQ
208/*
209 * DR6_ACTIVE_LOW combines fixed-1 and active-low bits.
210 * We can regard all the bits in DR6_FIXED_1 as active_low bits;
211 * they will never be 0 for now, but when they are defined
212 * in the future it will require no code change.
213 *
214 * DR6_ACTIVE_LOW is also used as the init/reset value for DR6.
215 */
216#define DR6_ACTIVE_LOW 0xffff0ff0
e8ea85fb 217#define DR6_VOLATILE 0x0001e80f
9a3ecd5e 218#define DR6_FIXED_1 (DR6_ACTIVE_LOW & ~DR6_VOLATILE)
42dbaa5a
JK
219
220#define DR7_BP_EN_MASK 0x000000ff
221#define DR7_GE (1 << 9)
222#define DR7_GD (1 << 13)
223#define DR7_FIXED_1 0x00000400
6f43ed01 224#define DR7_VOLATILE 0xffff2bff
42dbaa5a 225
7e582ccb
ML
226#define KVM_GUESTDBG_VALID_MASK \
227 (KVM_GUESTDBG_ENABLE | \
228 KVM_GUESTDBG_SINGLESTEP | \
229 KVM_GUESTDBG_USE_HW_BP | \
230 KVM_GUESTDBG_USE_SW_BP | \
231 KVM_GUESTDBG_INJECT_BP | \
232 KVM_GUESTDBG_INJECT_DB)
233
234
c205fb7d
NA
235#define PFERR_PRESENT_BIT 0
236#define PFERR_WRITE_BIT 1
237#define PFERR_USER_BIT 2
238#define PFERR_RSVD_BIT 3
239#define PFERR_FETCH_BIT 4
be94f6b7 240#define PFERR_PK_BIT 5
00e7646c 241#define PFERR_SGX_BIT 15
14727754
TL
242#define PFERR_GUEST_FINAL_BIT 32
243#define PFERR_GUEST_PAGE_BIT 33
c205fb7d
NA
244
245#define PFERR_PRESENT_MASK (1U << PFERR_PRESENT_BIT)
246#define PFERR_WRITE_MASK (1U << PFERR_WRITE_BIT)
247#define PFERR_USER_MASK (1U << PFERR_USER_BIT)
248#define PFERR_RSVD_MASK (1U << PFERR_RSVD_BIT)
249#define PFERR_FETCH_MASK (1U << PFERR_FETCH_BIT)
be94f6b7 250#define PFERR_PK_MASK (1U << PFERR_PK_BIT)
00e7646c 251#define PFERR_SGX_MASK (1U << PFERR_SGX_BIT)
14727754
TL
252#define PFERR_GUEST_FINAL_MASK (1ULL << PFERR_GUEST_FINAL_BIT)
253#define PFERR_GUEST_PAGE_MASK (1ULL << PFERR_GUEST_PAGE_BIT)
254
255#define PFERR_NESTED_GUEST_PAGE (PFERR_GUEST_PAGE_MASK | \
14727754
TL
256 PFERR_WRITE_MASK | \
257 PFERR_PRESENT_MASK)
c205fb7d 258
41383771
GN
259/* apic attention bits */
260#define KVM_APIC_CHECK_VAPIC 0
ae7a2a3f
MT
261/*
262 * The following bit is set with PV-EOI, unset on EOI.
263 * We detect PV-EOI changes by guest by comparing
264 * this bit with PV-EOI in guest memory.
265 * See the implementation in apic_update_pv_eoi.
266 */
267#define KVM_APIC_PV_EOI_PENDING 1
41383771 268
d84f1e07
FW
269struct kvm_kernel_irq_routing_entry;
270
21ebbeda 271/*
616007c8
SC
272 * kvm_mmu_page_role tracks the properties of a shadow page (where shadow page
273 * also includes TDP pages) to determine whether or not a page can be used in
274 * the given MMU context. This is a subset of the overall kvm_mmu_role to
275 * minimize the size of kvm_memory_slot.arch.gfn_track, i.e. allows allocating
276 * 2 bytes per gfn instead of 4 bytes per gfn.
21ebbeda 277 *
616007c8
SC
278 * Indirect upper-level shadow pages are tracked for write-protection via
279 * gfn_track. As above, gfn_track is a 16 bit counter, so KVM must not create
280 * more than 2^16-1 upper-level shadow pages at a single gfn, otherwise
281 * gfn_track will overflow and explosions will ensure.
282 *
283 * A unique shadow page (SP) for a gfn is created if and only if an existing SP
284 * cannot be reused. The ability to reuse a SP is tracked by its role, which
285 * incorporates various mode bits and properties of the SP. Roughly speaking,
286 * the number of unique SPs that can theoretically be created is 2^n, where n
287 * is the number of bits that are used to compute the role.
288 *
289 * But, even though there are 18 bits in the mask below, not all combinations
290 * of modes and flags are possible. The maximum number of possible upper-level
291 * shadow pages for a single gfn is in the neighborhood of 2^13.
292 *
293 * - invalid shadow pages are not accounted.
294 * - level is effectively limited to four combinations, not 16 as the number
295 * bits would imply, as 4k SPs are not tracked (allowed to go unsync).
296 * - level is effectively unused for non-PAE paging because there is exactly
297 * one upper level (see 4k SP exception above).
298 * - quadrant is used only for non-PAE paging and is exclusive with
299 * gpte_is_8_bytes.
300 * - execonly and ad_disabled are used only for nested EPT, which makes it
301 * exclusive with quadrant.
21ebbeda 302 */
d657a98e 303union kvm_mmu_page_role {
36d9594d 304 u32 word;
d657a98e 305 struct {
7d76b4d3 306 unsigned level:4;
47c42e6b 307 unsigned gpte_is_8_bytes:1;
7d76b4d3 308 unsigned quadrant:2;
f6e2c02b 309 unsigned direct:1;
7d76b4d3 310 unsigned access:3;
2e53d63a 311 unsigned invalid:1;
167f8a5c 312 unsigned efer_nx:1;
3dbe1415 313 unsigned cr0_wp:1;
411c588d 314 unsigned smep_andnot_wp:1;
0be0226f 315 unsigned smap_andnot_wp:1;
ac8d57e5 316 unsigned ad_disabled:1;
1313cc2b
JM
317 unsigned guest_mode:1;
318 unsigned :6;
699023e2
PB
319
320 /*
321 * This is left at the top of the word so that
322 * kvm_memslots_for_spte_role can extract it with a
323 * simple shift. While there is room, give it a whole
324 * byte so it is also faster to load it from memory.
325 */
326 unsigned smm:8;
d657a98e
ZX
327 };
328};
329
a336282d 330/*
616007c8
SC
331 * kvm_mmu_extended_role complements kvm_mmu_page_role, tracking properties
332 * relevant to the current MMU configuration. When loading CR0, CR4, or EFER,
333 * including on nested transitions, if nothing in the full role changes then
334 * MMU re-configuration can be skipped. @valid bit is set on first usage so we
335 * don't treat all-zero structure as valid data.
336 *
337 * The properties that are tracked in the extended role but not the page role
338 * are for things that either (a) do not affect the validity of the shadow page
339 * or (b) are indirectly reflected in the shadow page's role. For example,
340 * CR4.PKE only affects permission checks for software walks of the guest page
341 * tables (because KVM doesn't support Protection Keys with shadow paging), and
342 * CR0.PG, CR4.PAE, and CR4.PSE are indirectly reflected in role.level.
343 *
344 * Note, SMEP and SMAP are not redundant with sm*p_andnot_wp in the page role.
345 * If CR0.WP=1, KVM can reuse shadow pages for the guest regardless of SMEP and
346 * SMAP, but the MMU's permission checks for software walks need to be SMEP and
347 * SMAP aware regardless of CR0.WP.
a336282d 348 */
616007c8 349union kvm_mmu_extended_role {
36d9594d 350 u32 word;
a336282d
VK
351 struct {
352 unsigned int valid:1;
353 unsigned int execonly:1;
7dcd5755 354 unsigned int cr0_pg:1;
0699c64a 355 unsigned int cr4_pae:1;
a336282d
VK
356 unsigned int cr4_pse:1;
357 unsigned int cr4_pke:1;
358 unsigned int cr4_smap:1;
359 unsigned int cr4_smep:1;
f71a53d1 360 unsigned int cr4_la57:1;
a336282d 361 };
36d9594d
VK
362};
363
364union kvm_mmu_role {
365 u64 as_u64;
366 struct {
367 union kvm_mmu_page_role base;
368 union kvm_mmu_extended_role ext;
369 };
370};
371
018aabb5
TY
372struct kvm_rmap_head {
373 unsigned long val;
374};
375
1c08364c 376struct kvm_pio_request {
45def77e 377 unsigned long linear_rip;
1c08364c 378 unsigned long count;
1c08364c
AK
379 int in;
380 int port;
381 int size;
1c08364c
AK
382};
383
855feb67 384#define PT64_ROOT_MAX_LEVEL 5
2a7266a8 385
a0a64f50 386struct rsvd_bits_validate {
2a7266a8 387 u64 rsvd_bits_mask[2][PT64_ROOT_MAX_LEVEL];
a0a64f50
XG
388 u64 bad_mt_xwr;
389};
390
7c390d35 391struct kvm_mmu_root_info {
be01e8e2 392 gpa_t pgd;
7c390d35
JS
393 hpa_t hpa;
394};
395
396#define KVM_MMU_ROOT_INFO_INVALID \
be01e8e2 397 ((struct kvm_mmu_root_info) { .pgd = INVALID_PAGE, .hpa = INVALID_PAGE })
7c390d35 398
b94742c9
JS
399#define KVM_MMU_NUM_PREV_ROOTS 3
400
531810ca
BG
401#define KVM_HAVE_MMU_RWLOCK
402
985ab278
SC
403struct kvm_mmu_page;
404
d657a98e 405/*
855feb67
YZ
406 * x86 supports 4 paging modes (5-level 64-bit, 4-level 64-bit, 3-level 32-bit,
407 * and 2-level 32-bit). The kvm_mmu structure abstracts the details of the
408 * current mmu mode.
d657a98e
ZX
409 */
410struct kvm_mmu {
d8dd54e0 411 unsigned long (*get_guest_pgd)(struct kvm_vcpu *vcpu);
e4e517b4 412 u64 (*get_pdptr)(struct kvm_vcpu *vcpu, int index);
736c291c 413 int (*page_fault)(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u32 err,
78b2c54a 414 bool prefault);
6389ee94
AK
415 void (*inject_page_fault)(struct kvm_vcpu *vcpu,
416 struct x86_exception *fault);
736c291c
SC
417 gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, gpa_t gva_or_gpa,
418 u32 access, struct x86_exception *exception);
54987b7a
PB
419 gpa_t (*translate_gpa)(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access,
420 struct x86_exception *exception);
e8bc217a 421 int (*sync_page)(struct kvm_vcpu *vcpu,
a4a8e6f7 422 struct kvm_mmu_page *sp);
7eb77e9f 423 void (*invlpg)(struct kvm_vcpu *vcpu, gva_t gva, hpa_t root_hpa);
d657a98e 424 hpa_t root_hpa;
be01e8e2 425 gpa_t root_pgd;
36d9594d 426 union kvm_mmu_role mmu_role;
ae1e2d10
PB
427 u8 root_level;
428 u8 shadow_root_level;
429 u8 ept_ad;
c5a78f2b 430 bool direct_map;
b94742c9 431 struct kvm_mmu_root_info prev_roots[KVM_MMU_NUM_PREV_ROOTS];
d657a98e 432
97d64b78
AK
433 /*
434 * Bitmap; bit set = permission fault
435 * Byte index: page fault error code [4:1]
436 * Bit index: pte permissions in ACC_* format
437 */
438 u8 permissions[16];
439
2d344105
HH
440 /*
441 * The pkru_mask indicates if protection key checks are needed. It
442 * consists of 16 domains indexed by page fault error code bits [4:1],
443 * with PFEC.RSVD replaced by ACC_USER_MASK from the page tables.
444 * Each domain has 2 bits which are ANDed with AD and WD from PKRU.
445 */
446 u32 pkru_mask;
447
d657a98e 448 u64 *pae_root;
03ca4589 449 u64 *pml4_root;
c258b62b
XG
450
451 /*
452 * check zero bits on shadow page table entries, these
453 * bits include not only hardware reserved bits but also
454 * the bits spte never used.
455 */
456 struct rsvd_bits_validate shadow_zero_check;
457
a0a64f50 458 struct rsvd_bits_validate guest_rsvd_check;
ff03a073
JR
459
460 u64 pdptrs[4]; /* pae */
d657a98e
ZX
461};
462
a49b9635
LT
463struct kvm_tlb_range {
464 u64 start_gfn;
465 u64 pages;
466};
467
f5132b01
GN
468enum pmc_type {
469 KVM_PMC_GP = 0,
470 KVM_PMC_FIXED,
471};
472
473struct kvm_pmc {
474 enum pmc_type type;
475 u8 idx;
476 u64 counter;
477 u64 eventsel;
478 struct perf_event *perf_event;
479 struct kvm_vcpu *vcpu;
a6da0d77
LX
480 /*
481 * eventsel value for general purpose counters,
482 * ctrl value for fixed counters.
483 */
484 u64 current_config;
e79f49c3 485 bool is_paused;
f5132b01
GN
486};
487
488struct kvm_pmu {
489 unsigned nr_arch_gp_counters;
490 unsigned nr_arch_fixed_counters;
491 unsigned available_event_types;
492 u64 fixed_ctr_ctrl;
493 u64 global_ctrl;
494 u64 global_status;
495 u64 global_ovf_ctrl;
496 u64 counter_bitmask[2];
497 u64 global_ctrl_mask;
c715eb9f 498 u64 global_ovf_ctrl_mask;
103af0a9 499 u64 reserved_bits;
f5132b01 500 u8 version;
15c7ad51
RR
501 struct kvm_pmc gp_counters[INTEL_PMC_MAX_GENERIC];
502 struct kvm_pmc fixed_counters[INTEL_PMC_MAX_FIXED];
f5132b01 503 struct irq_work irq_work;
4be94672 504 DECLARE_BITMAP(reprogram_pmi, X86_PMC_IDX_MAX);
b35e5548
LX
505 DECLARE_BITMAP(all_valid_pmc_idx, X86_PMC_IDX_MAX);
506 DECLARE_BITMAP(pmc_in_use, X86_PMC_IDX_MAX);
507
508 /*
509 * The gate to release perf_events not marked in
510 * pmc_in_use only once in a vcpu time slice.
511 */
512 bool need_cleanup;
513
514 /*
515 * The total number of programmed perf_events and it helps to avoid
516 * redundant check before cleanup if guest don't use vPMU at all.
517 */
518 u8 event_count;
f5132b01
GN
519};
520
25462f7f
WH
521struct kvm_pmu_ops;
522
360b948d
PB
523enum {
524 KVM_DEBUGREG_BP_ENABLED = 1,
c77fb5fe 525 KVM_DEBUGREG_WONT_EXIT = 2,
360b948d
PB
526};
527
86fd5270
XG
528struct kvm_mtrr_range {
529 u64 base;
530 u64 mask;
19efffa2 531 struct list_head node;
86fd5270
XG
532};
533
70109e7d 534struct kvm_mtrr {
86fd5270 535 struct kvm_mtrr_range var_ranges[KVM_NR_VAR_MTRR];
70109e7d 536 mtrr_type fixed_ranges[KVM_NR_FIXED_MTRR_REGION];
10fac2dc 537 u64 deftype;
19efffa2
XG
538
539 struct list_head head;
70109e7d
XG
540};
541
1f4b34f8
AS
542/* Hyper-V SynIC timer */
543struct kvm_vcpu_hv_stimer {
544 struct hrtimer timer;
545 int index;
6a058a1e 546 union hv_stimer_config config;
1f4b34f8
AS
547 u64 count;
548 u64 exp_time;
549 struct hv_message msg;
550 bool msg_pending;
551};
552
5c919412
AS
553/* Hyper-V synthetic interrupt controller (SynIC)*/
554struct kvm_vcpu_hv_synic {
555 u64 version;
556 u64 control;
557 u64 msg_page;
558 u64 evt_page;
559 atomic64_t sint[HV_SYNIC_SINT_COUNT];
560 atomic_t sint_to_gsi[HV_SYNIC_SINT_COUNT];
561 DECLARE_BITMAP(auto_eoi_bitmap, 256);
562 DECLARE_BITMAP(vec_bitmap, 256);
563 bool active;
efc479e6 564 bool dont_zero_synic_pages;
5c919412
AS
565};
566
e83d5887
AS
567/* Hyper-V per vcpu emulation context */
568struct kvm_vcpu_hv {
4592b7ea 569 struct kvm_vcpu *vcpu;
d3457c87 570 u32 vp_index;
e83d5887 571 u64 hv_vapic;
9eec50b8 572 s64 runtime_offset;
5c919412 573 struct kvm_vcpu_hv_synic synic;
db397571 574 struct kvm_hyperv_exit exit;
1f4b34f8
AS
575 struct kvm_vcpu_hv_stimer stimer[HV_SYNIC_STIMER_COUNT];
576 DECLARE_BITMAP(stimer_pending_bitmap, HV_SYNIC_STIMER_COUNT);
e6b6c483 577 cpumask_t tlb_flush;
644f7067 578 bool enforce_cpuid;
10d7bf1e
VK
579 struct {
580 u32 features_eax; /* HYPERV_CPUID_FEATURES.EAX */
581 u32 features_ebx; /* HYPERV_CPUID_FEATURES.EBX */
582 u32 features_edx; /* HYPERV_CPUID_FEATURES.EDX */
583 u32 enlightenments_eax; /* HYPERV_CPUID_ENLIGHTMENT_INFO.EAX */
584 u32 enlightenments_ebx; /* HYPERV_CPUID_ENLIGHTMENT_INFO.EBX */
585 u32 syndbg_cap_eax; /* HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES.EAX */
586 } cpuid_cache;
e83d5887
AS
587};
588
23200b7a
JM
589/* Xen HVM per vcpu emulation context */
590struct kvm_vcpu_xen {
591 u64 hypercall_rip;
30b5c851 592 u32 current_runstate;
73e69a86 593 bool vcpu_info_set;
f2340cd9 594 bool vcpu_time_info_set;
30b5c851 595 bool runstate_set;
73e69a86 596 struct gfn_to_hva_cache vcpu_info_cache;
f2340cd9 597 struct gfn_to_hva_cache vcpu_time_info_cache;
30b5c851
DW
598 struct gfn_to_hva_cache runstate_cache;
599 u64 last_steal;
600 u64 runstate_entry_time;
601 u64 runstate_times[4];
23200b7a
JM
602};
603
ad312c7c 604struct kvm_vcpu_arch {
5fdbf976
MT
605 /*
606 * rip and regs accesses must go through
607 * kvm_{register,rip}_{read,write} functions.
608 */
609 unsigned long regs[NR_VCPU_REGS];
610 u32 regs_avail;
611 u32 regs_dirty;
34c16eec
ZX
612
613 unsigned long cr0;
e8467fda 614 unsigned long cr0_guest_owned_bits;
34c16eec
ZX
615 unsigned long cr2;
616 unsigned long cr3;
617 unsigned long cr4;
fc78f519 618 unsigned long cr4_guest_owned_bits;
b899c132 619 unsigned long cr4_guest_rsvd_bits;
34c16eec 620 unsigned long cr8;
37486135 621 u32 host_pkru;
b9dd21e1 622 u32 pkru;
1371d904 623 u32 hflags;
f6801dff 624 u64 efer;
34c16eec
ZX
625 u64 apic_base;
626 struct kvm_lapic *apic; /* kernel irqchip context */
d62caabb 627 bool apicv_active;
e40ff1d6 628 bool load_eoi_exitmap_pending;
6308630b 629 DECLARE_BITMAP(ioapic_handled_vectors, 256);
41383771 630 unsigned long apic_attention;
e1035715 631 int32_t apic_arb_prio;
34c16eec 632 int mp_state;
34c16eec 633 u64 ia32_misc_enable_msr;
64d60670 634 u64 smbase;
52797bf9 635 u64 smi_count;
b209749f 636 bool tpr_access_reporting;
7204160e 637 bool xsaves_enabled;
20300099 638 u64 ia32_xss;
518e7b94 639 u64 microcode_version;
0cf9135b 640 u64 arch_capabilities;
27461da3 641 u64 perf_capabilities;
34c16eec 642
14dfe855
JR
643 /*
644 * Paging state of the vcpu
645 *
646 * If the vcpu runs in guest mode with two level paging this still saves
647 * the paging mode of the l1 guest. This context is always used to
648 * handle faults.
649 */
44dd3ffa
VK
650 struct kvm_mmu *mmu;
651
652 /* Non-nested MMU for L1 */
653 struct kvm_mmu root_mmu;
8df25a32 654
14c07ad8
VK
655 /* L1 MMU when running nested */
656 struct kvm_mmu guest_mmu;
657
6539e738
JR
658 /*
659 * Paging state of an L2 guest (used for nested npt)
660 *
661 * This context will save all necessary information to walk page tables
311497e0 662 * of an L2 guest. This context is only initialized for page table
6539e738
JR
663 * walking and not for faulting since we never handle l2 page faults on
664 * the host.
665 */
666 struct kvm_mmu nested_mmu;
667
14dfe855
JR
668 /*
669 * Pointer to the mmu context currently used for
670 * gva_to_gpa translations.
671 */
672 struct kvm_mmu *walk_mmu;
673
53c07b18 674 struct kvm_mmu_memory_cache mmu_pte_list_desc_cache;
171a90d7
SC
675 struct kvm_mmu_memory_cache mmu_shadow_page_cache;
676 struct kvm_mmu_memory_cache mmu_gfn_array_cache;
34c16eec
ZX
677 struct kvm_mmu_memory_cache mmu_page_header_cache;
678
f775b13e
RR
679 /*
680 * QEMU userspace and the guest each have their own FPU state.
ec269475
PB
681 * In vcpu_run, we switch between the user and guest FPU contexts.
682 * While running a VCPU, the VCPU thread will have the guest FPU
683 * context.
f775b13e
RR
684 *
685 * Note that while the PKRU state lives inside the fpu registers,
686 * it is switched out separately at VMENTER and VMEXIT time. The
687 * "guest_fpu" state here contains the guest FPU context, with the
688 * host PRKU bits.
689 */
d9a710e5 690 struct fpu *user_fpu;
b666a4b6 691 struct fpu *guest_fpu;
f775b13e 692
2acf923e 693 u64 xcr0;
d7876f1b 694 u64 guest_supported_xcr0;
34c16eec 695
34c16eec
ZX
696 struct kvm_pio_request pio;
697 void *pio_data;
7ed9abfe 698 void *guest_ins_data;
34c16eec 699
66fd3f7f
GN
700 u8 event_exit_inst_len;
701
298101da
AK
702 struct kvm_queued_exception {
703 bool pending;
664f8e26 704 bool injected;
298101da
AK
705 bool has_error_code;
706 u8 nr;
707 u32 error_code;
c851436a
JM
708 unsigned long payload;
709 bool has_payload;
adfe20fb 710 u8 nested_apf;
298101da
AK
711 } exception;
712
937a7eae 713 struct kvm_queued_interrupt {
04140b41 714 bool injected;
66fd3f7f 715 bool soft;
937a7eae
AK
716 u8 nr;
717 } interrupt;
718
34c16eec
ZX
719 int halt_request; /* real mode on Intel only */
720
721 int cpuid_nent;
255cbecf 722 struct kvm_cpuid_entry2 *cpuid_entries;
5a4f55cd 723
ca29e145 724 u64 reserved_gpa_bits;
5a4f55cd 725 int maxphyaddr;
d468d94b 726 int max_tdp_level;
5a4f55cd 727
34c16eec
ZX
728 /* emulate context */
729
c9b8b07c 730 struct x86_emulate_ctxt *emulate_ctxt;
7ae441ea
GN
731 bool emulate_regs_need_sync_to_vcpu;
732 bool emulate_regs_need_sync_from_vcpu;
716d51ab 733 int (*complete_userspace_io)(struct kvm_vcpu *vcpu);
18068523
GOC
734
735 gpa_t time;
50d0a0f9 736 struct pvclock_vcpu_time_info hv_clock;
e48672fa 737 unsigned int hw_tsc_khz;
0b79459b
AH
738 struct gfn_to_hva_cache pv_time;
739 bool pv_time_enabled;
51d59c6b
MT
740 /* set guest stopped flag in pvclock flags field */
741 bool pvclock_set_guest_stopped_request;
c9aaa895
GC
742
743 struct {
a6bd811f 744 u8 preempted;
c9aaa895
GC
745 u64 msr_val;
746 u64 last_steal;
91724814 747 struct gfn_to_pfn_cache cache;
c9aaa895
GC
748 } st;
749
56ba77a4 750 u64 l1_tsc_offset;
805d705f 751 u64 tsc_offset; /* current tsc offset */
1d5f066e 752 u64 last_guest_tsc;
6f526ec5 753 u64 last_host_tsc;
0dd6a6ed 754 u64 tsc_offset_adjustment;
e26101b1
ZA
755 u64 this_tsc_nsec;
756 u64 this_tsc_write;
0d3da0d2 757 u64 this_tsc_generation;
c285545f 758 bool tsc_catchup;
cc578287
ZA
759 bool tsc_always_catchup;
760 s8 virtual_tsc_shift;
761 u32 virtual_tsc_mult;
762 u32 virtual_tsc_khz;
ba904635 763 s64 ia32_tsc_adjust_msr;
73f624f4 764 u64 msr_ia32_power_ctl;
805d705f
IS
765 u64 l1_tsc_scaling_ratio;
766 u64 tsc_scaling_ratio; /* current scaling ratio */
3419ffc8 767
7460fb4a
AK
768 atomic_t nmi_queued; /* unprocessed asynchronous NMIs */
769 unsigned nmi_pending; /* NMI queued after currently running handler */
770 bool nmi_injected; /* Trying to inject an NMI this entry */
f077825a 771 bool smi_pending; /* SMI queued after currently running handler */
9ba075a6 772
70109e7d 773 struct kvm_mtrr mtrr_state;
7cb060a9 774 u64 pat;
42dbaa5a 775
360b948d 776 unsigned switch_db_regs;
42dbaa5a
JK
777 unsigned long db[KVM_NR_DB_REGS];
778 unsigned long dr6;
779 unsigned long dr7;
780 unsigned long eff_db[KVM_NR_DB_REGS];
c8639010 781 unsigned long guest_debug_dr7;
db2336a8
KH
782 u64 msr_platform_info;
783 u64 msr_misc_features_enables;
890ca9ae
HY
784
785 u64 mcg_cap;
786 u64 mcg_status;
787 u64 mcg_ctl;
c45dcc71 788 u64 mcg_ext_ctl;
890ca9ae 789 u64 *mce_banks;
94fe45da 790
bebb106a
XG
791 /* Cache MMIO info */
792 u64 mmio_gva;
871bd034 793 unsigned mmio_access;
bebb106a 794 gfn_t mmio_gfn;
56f17dd3 795 u64 mmio_gen;
bebb106a 796
f5132b01
GN
797 struct kvm_pmu pmu;
798
94fe45da 799 /* used for guest single stepping over the given code position */
94fe45da 800 unsigned long singlestep_rip;
f92653ee 801
8f014550 802 bool hyperv_enabled;
4592b7ea 803 struct kvm_vcpu_hv *hyperv;
23200b7a 804 struct kvm_vcpu_xen xen;
f5f48ee1
SY
805
806 cpumask_var_t wbinvd_dirty_mask;
af585b92 807
1cb3f3ae
XG
808 unsigned long last_retry_eip;
809 unsigned long last_retry_addr;
810
af585b92
GN
811 struct {
812 bool halted;
dd03bcaa 813 gfn_t gfns[ASYNC_PF_PER_VCPU];
344d9588 814 struct gfn_to_hva_cache data;
2635b5c4
VK
815 u64 msr_en_val; /* MSR_KVM_ASYNC_PF_EN */
816 u64 msr_int_val; /* MSR_KVM_ASYNC_PF_INT */
817 u16 vec;
7c90705b 818 u32 id;
6adba527 819 bool send_user_only;
68fd66f1 820 u32 host_apf_flags;
adfe20fb 821 unsigned long nested_apf_token;
52a5c155 822 bool delivery_as_pf_vmexit;
557a961a 823 bool pageready_pending;
af585b92 824 } apf;
2b036c6b
BO
825
826 /* OSVW MSRs (AMD only) */
827 struct {
828 u64 length;
829 u64 status;
830 } osvw;
ae7a2a3f
MT
831
832 struct {
833 u64 msr_val;
834 struct gfn_to_hva_cache data;
835 } pv_eoi;
93c05d3e 836
2d5ba19b
MT
837 u64 msr_kvm_poll_control;
838
93c05d3e 839 /*
ffdbd50d
ML
840 * Indicates the guest is trying to write a gfn that contains one or
841 * more of the PTEs used to translate the write itself, i.e. the access
842 * is changing its own translation in the guest page tables. KVM exits
843 * to userspace if emulation of the faulting instruction fails and this
844 * flag is set, as KVM cannot make forward progress.
845 *
846 * If emulation fails for a write to guest page tables, KVM unprotects
847 * (zaps) the shadow page for the target gfn and resumes the guest to
848 * retry the non-emulatable instruction (on hardware). Unprotecting the
849 * gfn doesn't allow forward progress for a self-changing access because
850 * doing so also zaps the translation for the gfn, i.e. retrying the
851 * instruction will hit a !PRESENT fault, which results in a new shadow
852 * page and sends KVM back to square one.
93c05d3e
XG
853 */
854 bool write_fault_to_shadow_pgtable;
25d92081
YZ
855
856 /* set at EPT violation at this point */
857 unsigned long exit_qualification;
6aef266c
SV
858
859 /* pv related host specific info */
860 struct {
861 bool pv_unhalted;
862 } pv;
7543a635
SR
863
864 int pending_ioapic_eoi;
1c1a9ce9 865 int pending_external_vector;
0f89b207 866
de63ad4c
LM
867 /* be preempted when it's in kernel-mode(cpl=0) */
868 bool preempted_in_kernel;
c595ceee
PB
869
870 /* Flush the L1 Data cache for L1TF mitigation on VMENTER */
871 bool l1tf_flush_l1d;
191c8137 872
8a14fe4f 873 /* Host CPU on which VM-entry was most recently attempted */
63f5a190 874 int last_vmentry_cpu;
8a14fe4f 875
191c8137
BP
876 /* AMD MSRC001_0015 Hardware Configuration */
877 u64 msr_hwcr;
66570e96
OU
878
879 /* pv related cpuid info */
880 struct {
881 /*
882 * value of the eax register in the KVM_CPUID_FEATURES CPUID
883 * leaf.
884 */
885 u32 features;
886
887 /*
888 * indicates whether pv emulation should be disabled if features
889 * are not present in the guest's cpuid
890 */
891 bool enforce;
892 } pv_cpuid;
add5e2f0
TL
893
894 /* Protected Guests */
895 bool guest_state_protected;
3c86c0d3 896
158a48ec
ML
897 /*
898 * Set when PDPTS were loaded directly by the userspace without
899 * reading the guest memory
900 */
901 bool pdptrs_from_userspace;
902
3c86c0d3
VP
903#if IS_ENABLED(CONFIG_HYPERV)
904 hpa_t hv_root_tdp;
905#endif
34c16eec
ZX
906};
907
db3fe4eb 908struct kvm_lpage_info {
92f94f1e 909 int disallow_lpage;
db3fe4eb
TY
910};
911
912struct kvm_arch_memory_slot {
018aabb5 913 struct kvm_rmap_head *rmap[KVM_NR_PAGE_SIZES];
db3fe4eb 914 struct kvm_lpage_info *lpage_info[KVM_NR_PAGE_SIZES - 1];
21ebbeda 915 unsigned short *gfn_track[KVM_PAGE_TRACK_MAX];
db3fe4eb
TY
916};
917
3548a259
RK
918/*
919 * We use as the mode the number of bits allocated in the LDR for the
920 * logical processor ID. It happens that these are all powers of two.
921 * This makes it is very easy to detect cases where the APICs are
922 * configured for multiple modes; in that case, we cannot use the map and
923 * hence cannot use kvm_irq_delivery_to_apic_fast either.
924 */
925#define KVM_APIC_MODE_XAPIC_CLUSTER 4
926#define KVM_APIC_MODE_XAPIC_FLAT 8
927#define KVM_APIC_MODE_X2APIC 16
928
1e08ec4a
GN
929struct kvm_apic_map {
930 struct rcu_head rcu;
3548a259 931 u8 mode;
0ca52e7b 932 u32 max_apic_id;
e45115b6
RK
933 union {
934 struct kvm_lapic *xapic_flat_map[8];
935 struct kvm_lapic *xapic_cluster_map[16][4];
936 };
0ca52e7b 937 struct kvm_lapic *phys_map[];
1e08ec4a
GN
938};
939
f97f5a56
JD
940/* Hyper-V synthetic debugger (SynDbg)*/
941struct kvm_hv_syndbg {
942 struct {
943 u64 control;
944 u64 status;
945 u64 send_page;
946 u64 recv_page;
947 u64 pending_page;
948 } control;
949 u64 options;
950};
951
cc9cfddb
VK
952/* Current state of Hyper-V TSC page clocksource */
953enum hv_tsc_page_status {
954 /* TSC page was not set up or disabled */
955 HV_TSC_PAGE_UNSET = 0,
956 /* TSC page MSR was written by the guest, update pending */
957 HV_TSC_PAGE_GUEST_CHANGED,
958 /* TSC page MSR was written by KVM userspace, update pending */
959 HV_TSC_PAGE_HOST_CHANGED,
960 /* TSC page was properly set up and is currently active */
961 HV_TSC_PAGE_SET,
962 /* TSC page is currently being updated and therefore is inactive */
963 HV_TSC_PAGE_UPDATING,
964 /* TSC page was set up with an inaccessible GPA */
965 HV_TSC_PAGE_BROKEN,
966};
967
e83d5887
AS
968/* Hyper-V emulation context */
969struct kvm_hv {
3f5ad8be 970 struct mutex hv_lock;
e83d5887
AS
971 u64 hv_guest_os_id;
972 u64 hv_hypercall;
973 u64 hv_tsc_page;
cc9cfddb 974 enum hv_tsc_page_status hv_tsc_page_status;
e7d9513b
AS
975
976 /* Hyper-v based guest crash (NT kernel bugcheck) parameters */
977 u64 hv_crash_param[HV_X64_MSR_CRASH_PARAMS];
978 u64 hv_crash_ctl;
095cf55d 979
7357b1df 980 struct ms_hyperv_tsc_page tsc_ref;
faeb7833
RK
981
982 struct idr conn_to_evt;
a2e164e7
VK
983
984 u64 hv_reenlightenment_control;
985 u64 hv_tsc_emulation_control;
986 u64 hv_tsc_emulation_status;
87ee613d
VK
987
988 /* How many vCPUs have VP index != vCPU index */
989 atomic_t num_mismatched_vp_indexes;
6f6a657c
VK
990
991 struct hv_partition_assist_pg *hv_pa_pg;
f97f5a56 992 struct kvm_hv_syndbg hv_syndbg;
e83d5887
AS
993};
994
1a155254
AG
995struct msr_bitmap_range {
996 u32 flags;
997 u32 nmsrs;
998 u32 base;
999 unsigned long *bitmap;
1000};
1001
a3833b81
DW
1002/* Xen emulation context */
1003struct kvm_xen {
1004 bool long_mode;
40da8ccd 1005 u8 upcall_vector;
319afe68 1006 gfn_t shinfo_gfn;
a3833b81
DW
1007};
1008
49776faf
RK
1009enum kvm_irqchip_mode {
1010 KVM_IRQCHIP_NONE,
1011 KVM_IRQCHIP_KERNEL, /* created with KVM_CREATE_IRQCHIP */
1012 KVM_IRQCHIP_SPLIT, /* created with KVM_CAP_SPLIT_IRQCHIP */
1013};
1014
b318e8de
SC
1015struct kvm_x86_msr_filter {
1016 u8 count;
1017 bool default_allow:1;
1018 struct msr_bitmap_range ranges[16];
1019};
1020
4e19c36f 1021#define APICV_INHIBIT_REASON_DISABLE 0
f4fdc0a2 1022#define APICV_INHIBIT_REASON_HYPERV 1
9a0bf054 1023#define APICV_INHIBIT_REASON_NESTED 2
f3515dc3 1024#define APICV_INHIBIT_REASON_IRQWIN 3
e2ed4078 1025#define APICV_INHIBIT_REASON_PIT_REINJ 4
cc7f5577 1026#define APICV_INHIBIT_REASON_X2APIC 5
4e19c36f 1027
fef9cce0 1028struct kvm_arch {
bc8a3d89
BG
1029 unsigned long n_used_mmu_pages;
1030 unsigned long n_requested_mmu_pages;
1031 unsigned long n_max_mmu_pages;
332b207d 1032 unsigned int indirect_shadow_pages;
ca333add 1033 u8 mmu_valid_gen;
f05e70ac 1034 struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
f05e70ac 1035 struct list_head active_mmu_pages;
31741eb1 1036 struct list_head zapped_obsolete_pages;
1aa9b957 1037 struct list_head lpage_disallowed_mmu_pages;
13d268ca 1038 struct kvm_page_track_notifier_node mmu_sp_tracker;
0eb05bf2 1039 struct kvm_page_track_notifier_head track_notifier_head;
ce25681d
SC
1040 /*
1041 * Protects marking pages unsync during page faults, as TDP MMU page
1042 * faults only take mmu_lock for read. For simplicity, the unsync
1043 * pages lock is always taken when marking pages unsync regardless of
1044 * whether mmu_lock is held for read or write.
1045 */
1046 spinlock_t mmu_unsync_pages_lock;
365c8868 1047
4d5c5d0f 1048 struct list_head assigned_dev_head;
19de40a8 1049 struct iommu_domain *iommu_domain;
d96eb2c6 1050 bool iommu_noncoherent;
e0f0bbc5
AW
1051#define __KVM_HAVE_ARCH_NONCOHERENT_DMA
1052 atomic_t noncoherent_dma_count;
5544eb9b
PB
1053#define __KVM_HAVE_ARCH_ASSIGNED_DEVICE
1054 atomic_t assigned_device_count;
d7deeeb0
ZX
1055 struct kvm_pic *vpic;
1056 struct kvm_ioapic *vioapic;
7837699f 1057 struct kvm_pit *vpit;
42720138 1058 atomic_t vapics_in_nmi_mode;
1e08ec4a 1059 struct mutex apic_map_lock;
6fcd9cbc 1060 struct kvm_apic_map __rcu *apic_map;
44d52717 1061 atomic_t apic_map_dirty;
bfc6d222 1062
a01b45e9 1063 bool apic_access_memslot_enabled;
4e19c36f 1064 unsigned long apicv_inhibit_reasons;
18068523
GOC
1065
1066 gpa_t wall_clock;
b7ebfb05 1067
4d5422ce 1068 bool mwait_in_guest;
caa057a2 1069 bool hlt_in_guest;
b31c114b 1070 bool pause_in_guest;
b5170063 1071 bool cstate_in_guest;
4d5422ce 1072
5550af4d 1073 unsigned long irq_sources_bitmap;
afbcf7ab 1074 s64 kvmclock_offset;
038f8c11 1075 raw_spinlock_t tsc_write_lock;
f38e098f 1076 u64 last_tsc_nsec;
f38e098f 1077 u64 last_tsc_write;
5d3cb0f6 1078 u32 last_tsc_khz;
e26101b1
ZA
1079 u64 cur_tsc_nsec;
1080 u64 cur_tsc_write;
1081 u64 cur_tsc_offset;
0d3da0d2 1082 u64 cur_tsc_generation;
b48aa97e 1083 int nr_vcpus_matched_tsc;
ffde22ac 1084
d828199e
MT
1085 spinlock_t pvclock_gtod_sync_lock;
1086 bool use_master_clock;
1087 u64 master_kernel_ns;
a5a1d1c2 1088 u64 master_cycle_now;
7e44e449 1089 struct delayed_work kvmclock_update_work;
332967a3 1090 struct delayed_work kvmclock_sync_work;
d828199e 1091
ffde22ac 1092 struct kvm_xen_hvm_config xen_hvm_config;
55cd8e5a 1093
6ef768fa
PB
1094 /* reads protected by irq_srcu, writes by irq_lock */
1095 struct hlist_head mask_notifier_list;
1096
e83d5887 1097 struct kvm_hv hyperv;
a3833b81 1098 struct kvm_xen xen;
b034cf01
XG
1099
1100 #ifdef CONFIG_KVM_MMU_AUDIT
1101 int audit_point;
1102 #endif
54750f2c 1103
a826faf1 1104 bool backwards_tsc_observed;
54750f2c 1105 bool boot_vcpu_runs_old_kvmclock;
d71ba788 1106 u32 bsp_vcpu_id;
90de4a18
NA
1107
1108 u64 disabled_quirks;
a85863c2 1109 int cpu_dirty_logging_count;
49df6397 1110
49776faf 1111 enum kvm_irqchip_mode irqchip_mode;
b053b2ae 1112 u8 nr_reserved_ioapic_pins;
52004014
FW
1113
1114 bool disabled_lapic_found;
44a95dae 1115
37131313 1116 bool x2apic_format;
c519265f 1117 bool x2apic_broadcast_quirk_disabled;
6fbbde9a
DS
1118
1119 bool guest_can_read_msr_platform_info;
59073aaf 1120 bool exception_payload_enabled;
66bb8a06 1121
b318e8de 1122 bool bus_lock_detection_enabled;
19238e75
AL
1123 /*
1124 * If exit_on_emulation_error is set, and the in-kernel instruction
1125 * emulator fails to emulate an instruction, allow userspace
1126 * the opportunity to look at it.
1127 */
1128 bool exit_on_emulation_error;
b318e8de 1129
1ae09954
AG
1130 /* Deflect RDMSR and WRMSR to user space when they trigger a #GP */
1131 u32 user_space_msr_mask;
b318e8de 1132 struct kvm_x86_msr_filter __rcu *msr_filter;
fe6b6bc8 1133
0dbb1123
AK
1134 u32 hypercall_exit_enabled;
1135
70210c04
SC
1136 /* Guest can access the SGX PROVISIONKEY. */
1137 bool sgx_provisioning_allowed;
1138
6fcd9cbc 1139 struct kvm_pmu_event_filter __rcu *pmu_event_filter;
1aa9b957 1140 struct task_struct *nx_lpage_recovery_thread;
fe5db27d 1141
897218ff 1142#ifdef CONFIG_X86_64
fe5db27d
BG
1143 /*
1144 * Whether the TDP MMU is enabled for this VM. This contains a
1145 * snapshot of the TDP MMU module parameter from when the VM was
1146 * created and remains unchanged for the life of the VM. If this is
1147 * true, TDP MMU handler functions will run for various MMU
1148 * operations.
1149 */
1150 bool tdp_mmu_enabled;
89c0fd49 1151
c0dba6e4 1152 /*
c0e64238 1153 * List of struct kvm_mmu_pages being used as roots.
c0dba6e4
BG
1154 * All struct kvm_mmu_pages in the list should have
1155 * tdp_mmu_page set.
c0e64238
BG
1156 *
1157 * For reads, this list is protected by:
1158 * the MMU lock in read mode + RCU or
1159 * the MMU lock in write mode
1160 *
1161 * For writes, this list is protected by:
1162 * the MMU lock in read mode + the tdp_mmu_pages_lock or
1163 * the MMU lock in write mode
1164 *
1165 * Roots will remain in the list until their tdp_mmu_root_count
1166 * drops to zero, at which point the thread that decremented the
1167 * count to zero should removed the root from the list and clean
1168 * it up, freeing the root after an RCU grace period.
c0dba6e4 1169 */
02c00b3a 1170 struct list_head tdp_mmu_roots;
c0dba6e4
BG
1171
1172 /*
1173 * List of struct kvmp_mmu_pages not being used as roots.
1174 * All struct kvm_mmu_pages in the list should have
c0e64238 1175 * tdp_mmu_page set and a tdp_mmu_root_count of 0.
c0dba6e4 1176 */
89c0fd49 1177 struct list_head tdp_mmu_pages;
9a77daac
BG
1178
1179 /*
1180 * Protects accesses to the following fields when the MMU lock
1181 * is held in read mode:
c0e64238 1182 * - tdp_mmu_roots (above)
9a77daac
BG
1183 * - tdp_mmu_pages (above)
1184 * - the link field of struct kvm_mmu_pages used by the TDP MMU
1185 * - lpage_disallowed_mmu_pages
1186 * - the lpage_disallowed_link field of struct kvm_mmu_pages used
1187 * by the TDP MMU
1188 * It is acceptable, but not necessary, to acquire this lock when
1189 * the thread holds the MMU lock in write mode.
1190 */
1191 spinlock_t tdp_mmu_pages_lock;
897218ff 1192#endif /* CONFIG_X86_64 */
a2557408
BG
1193
1194 /*
1195 * If set, rmaps have been allocated for all memslots and should be
1196 * allocated for any newly created or modified memslots.
1197 */
1198 bool memslots_have_rmaps;
3c86c0d3
VP
1199
1200#if IS_ENABLED(CONFIG_HYPERV)
1201 hpa_t hv_root_tdp;
1202 spinlock_t hv_root_tdp_lock;
1203#endif
d69fb81f
ZX
1204};
1205
0711456c 1206struct kvm_vm_stat {
0193cc90 1207 struct kvm_vm_stat_generic generic;
e3cb6fa0
PB
1208 u64 mmu_shadow_zapped;
1209 u64 mmu_pte_write;
1210 u64 mmu_pde_zapped;
1211 u64 mmu_flooded;
1212 u64 mmu_recycled;
1213 u64 mmu_cache_miss;
1214 u64 mmu_unsync;
e3cb6fa0
PB
1215 u64 lpages;
1216 u64 nx_lpage_splits;
1217 u64 max_mmu_page_hash_collisions;
ec1cf69c 1218 u64 max_mmu_rmap_size;
0711456c
ZX
1219};
1220
77b4c255 1221struct kvm_vcpu_stat {
0193cc90 1222 struct kvm_vcpu_stat_generic generic;
8a7e75d4
SJS
1223 u64 pf_fixed;
1224 u64 pf_guest;
1225 u64 tlb_flush;
1226 u64 invlpg;
1227
1228 u64 exits;
1229 u64 io_exits;
1230 u64 mmio_exits;
1231 u64 signal_exits;
1232 u64 irq_window_exits;
1233 u64 nmi_window_exits;
c595ceee 1234 u64 l1d_flush;
8a7e75d4 1235 u64 halt_exits;
8a7e75d4
SJS
1236 u64 request_irq_exits;
1237 u64 irq_exits;
1238 u64 host_state_reload;
8a7e75d4
SJS
1239 u64 fpu_reload;
1240 u64 insn_emulation;
1241 u64 insn_emulation_fail;
1242 u64 hypercalls;
1243 u64 irq_injections;
1244 u64 nmi_injections;
0f1e261e 1245 u64 req_event;
43c11d91 1246 u64 nested_run;
4a7132ef
WL
1247 u64 directed_yield_attempted;
1248 u64 directed_yield_successful;
d5a0483f 1249 u64 guest_mode;
77b4c255 1250};
ad312c7c 1251
8a76d7f2
JR
1252struct x86_instruction_info;
1253
8fe8ab46
WA
1254struct msr_data {
1255 bool host_initiated;
1256 u32 index;
1257 u64 data;
1258};
1259
cb5281a5
PB
1260struct kvm_lapic_irq {
1261 u32 vector;
b7cb2231
PB
1262 u16 delivery_mode;
1263 u16 dest_mode;
1264 bool level;
1265 u16 trig_mode;
cb5281a5
PB
1266 u32 shorthand;
1267 u32 dest_id;
93bbf0b8 1268 bool msi_redir_hint;
cb5281a5
PB
1269};
1270
c96001c5
PX
1271static inline u16 kvm_lapic_irq_dest_mode(bool dest_mode_logical)
1272{
1273 return dest_mode_logical ? APIC_DEST_LOGICAL : APIC_DEST_PHYSICAL;
1274}
1275
ea4a5ff8 1276struct kvm_x86_ops {
13a34e06
RK
1277 int (*hardware_enable)(void);
1278 void (*hardware_disable)(void);
6e4fd06f 1279 void (*hardware_unsetup)(void);
774ead3a 1280 bool (*cpu_has_accelerated_tpr)(void);
5719455f 1281 bool (*has_emulated_msr)(struct kvm *kvm, u32 index);
7c1b761b 1282 void (*vcpu_after_set_cpuid)(struct kvm_vcpu *vcpu);
ea4a5ff8 1283
562b6b08 1284 unsigned int vm_size;
03543133
SS
1285 int (*vm_init)(struct kvm *kvm);
1286 void (*vm_destroy)(struct kvm *kvm);
1287
ea4a5ff8 1288 /* Create, but do not attach this VCPU */
987b2594 1289 int (*vcpu_create)(struct kvm_vcpu *vcpu);
ea4a5ff8 1290 void (*vcpu_free)(struct kvm_vcpu *vcpu);
d28bc9dd 1291 void (*vcpu_reset)(struct kvm_vcpu *vcpu, bool init_event);
ea4a5ff8
ZX
1292
1293 void (*prepare_guest_switch)(struct kvm_vcpu *vcpu);
1294 void (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu);
1295 void (*vcpu_put)(struct kvm_vcpu *vcpu);
ea4a5ff8 1296
6986982f 1297 void (*update_exception_bitmap)(struct kvm_vcpu *vcpu);
609e36d3 1298 int (*get_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr);
8fe8ab46 1299 int (*set_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr);
ea4a5ff8
ZX
1300 u64 (*get_segment_base)(struct kvm_vcpu *vcpu, int seg);
1301 void (*get_segment)(struct kvm_vcpu *vcpu,
1302 struct kvm_segment *var, int seg);
2e4d2653 1303 int (*get_cpl)(struct kvm_vcpu *vcpu);
ea4a5ff8
ZX
1304 void (*set_segment)(struct kvm_vcpu *vcpu,
1305 struct kvm_segment *var, int seg);
1306 void (*get_cs_db_l_bits)(struct kvm_vcpu *vcpu, int *db, int *l);
ea4a5ff8 1307 void (*set_cr0)(struct kvm_vcpu *vcpu, unsigned long cr0);
c2fe3cd4
SC
1308 bool (*is_valid_cr4)(struct kvm_vcpu *vcpu, unsigned long cr0);
1309 void (*set_cr4)(struct kvm_vcpu *vcpu, unsigned long cr4);
72f211ec 1310 int (*set_efer)(struct kvm_vcpu *vcpu, u64 efer);
89a27f4d
GN
1311 void (*get_idt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
1312 void (*set_idt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
1313 void (*get_gdt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
1314 void (*set_gdt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
c77fb5fe 1315 void (*sync_dirty_debug_regs)(struct kvm_vcpu *vcpu);
020df079 1316 void (*set_dr7)(struct kvm_vcpu *vcpu, unsigned long value);
5fdbf976 1317 void (*cache_reg)(struct kvm_vcpu *vcpu, enum kvm_reg reg);
ea4a5ff8
ZX
1318 unsigned long (*get_rflags)(struct kvm_vcpu *vcpu);
1319 void (*set_rflags)(struct kvm_vcpu *vcpu, unsigned long rflags);
1320
7780938c 1321 void (*tlb_flush_all)(struct kvm_vcpu *vcpu);
eeeb4f67 1322 void (*tlb_flush_current)(struct kvm_vcpu *vcpu);
b08660e5 1323 int (*tlb_remote_flush)(struct kvm *kvm);
a49b9635
LT
1324 int (*tlb_remote_flush_with_range)(struct kvm *kvm,
1325 struct kvm_tlb_range *range);
ea4a5ff8 1326
faff8758
JS
1327 /*
1328 * Flush any TLB entries associated with the given GVA.
1329 * Does not need to flush GPA->HPA mappings.
1330 * Can potentially get non-canonical addresses through INVLPGs, which
1331 * the implementation may choose to ignore if appropriate.
1332 */
1333 void (*tlb_flush_gva)(struct kvm_vcpu *vcpu, gva_t addr);
ea4a5ff8 1334
e64419d9
SC
1335 /*
1336 * Flush any TLB entries created by the guest. Like tlb_flush_gva(),
1337 * does not need to flush GPA->HPA mappings.
1338 */
1339 void (*tlb_flush_guest)(struct kvm_vcpu *vcpu);
1340
a9ab13ff 1341 enum exit_fastpath_completion (*run)(struct kvm_vcpu *vcpu);
1e9e2622
WL
1342 int (*handle_exit)(struct kvm_vcpu *vcpu,
1343 enum exit_fastpath_completion exit_fastpath);
f8ea7c60 1344 int (*skip_emulated_instruction)(struct kvm_vcpu *vcpu);
5ef8acbd 1345 void (*update_emulated_instruction)(struct kvm_vcpu *vcpu);
2809f5d2 1346 void (*set_interrupt_shadow)(struct kvm_vcpu *vcpu, int mask);
37ccdcbe 1347 u32 (*get_interrupt_shadow)(struct kvm_vcpu *vcpu);
ea4a5ff8
ZX
1348 void (*patch_hypercall)(struct kvm_vcpu *vcpu,
1349 unsigned char *hypercall_addr);
66fd3f7f 1350 void (*set_irq)(struct kvm_vcpu *vcpu);
95ba8273 1351 void (*set_nmi)(struct kvm_vcpu *vcpu);
cfcd20e5 1352 void (*queue_exception)(struct kvm_vcpu *vcpu);
b463a6f7 1353 void (*cancel_injection)(struct kvm_vcpu *vcpu);
c9d40913
PB
1354 int (*interrupt_allowed)(struct kvm_vcpu *vcpu, bool for_injection);
1355 int (*nmi_allowed)(struct kvm_vcpu *vcpu, bool for_injection);
3cfc3092
JK
1356 bool (*get_nmi_mask)(struct kvm_vcpu *vcpu);
1357 void (*set_nmi_mask)(struct kvm_vcpu *vcpu, bool masked);
c9a7953f
JK
1358 void (*enable_nmi_window)(struct kvm_vcpu *vcpu);
1359 void (*enable_irq_window)(struct kvm_vcpu *vcpu);
95ba8273 1360 void (*update_cr8_intercept)(struct kvm_vcpu *vcpu, int tpr, int irr);
ef8efd7a 1361 bool (*check_apicv_inhibit_reasons)(ulong bit);
2de9d0cc 1362 void (*pre_update_apicv_exec_ctrl)(struct kvm *kvm, bool activate);
d62caabb 1363 void (*refresh_apicv_exec_ctrl)(struct kvm_vcpu *vcpu);
c7c9c56c 1364 void (*hwapic_irr_update)(struct kvm_vcpu *vcpu, int max_irr);
67c9dddc 1365 void (*hwapic_isr_update)(struct kvm_vcpu *vcpu, int isr);
e6c67d8c 1366 bool (*guest_apic_has_interrupt)(struct kvm_vcpu *vcpu);
6308630b 1367 void (*load_eoi_exitmap)(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap);
8d860bbe 1368 void (*set_virtual_apic_mode)(struct kvm_vcpu *vcpu);
a4148b7c 1369 void (*set_apic_access_page_addr)(struct kvm_vcpu *vcpu);
91a5f413 1370 int (*deliver_posted_interrupt)(struct kvm_vcpu *vcpu, int vector);
76dfafd5 1371 int (*sync_pir_to_irr)(struct kvm_vcpu *vcpu);
ea4a5ff8 1372 int (*set_tss_addr)(struct kvm *kvm, unsigned int addr);
2ac52ab8 1373 int (*set_identity_map_addr)(struct kvm *kvm, u64 ident_addr);
4b12f0de 1374 u64 (*get_mt_mask)(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio);
344f414f 1375
e83bc09c
SC
1376 void (*load_mmu_pgd)(struct kvm_vcpu *vcpu, hpa_t root_hpa,
1377 int root_level);
727a7e27 1378
f5f48ee1
SY
1379 bool (*has_wbinvd_exit)(void);
1380
307a94c7
IS
1381 u64 (*get_l2_tsc_offset)(struct kvm_vcpu *vcpu);
1382 u64 (*get_l2_tsc_multiplier)(struct kvm_vcpu *vcpu);
edcfe540 1383 void (*write_tsc_offset)(struct kvm_vcpu *vcpu, u64 offset);
1ab9287a 1384 void (*write_tsc_multiplier)(struct kvm_vcpu *vcpu, u64 multiplier);
99e3e30a 1385
235ba74f
SC
1386 /*
1387 * Retrieve somewhat arbitrary exit information. Intended to be used
1388 * only from within tracepoints to avoid VMREADs when tracing is off.
1389 */
1390 void (*get_exit_info)(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2,
1391 u32 *exit_int_info, u32 *exit_int_info_err_code);
8a76d7f2
JR
1392
1393 int (*check_intercept)(struct kvm_vcpu *vcpu,
1394 struct x86_instruction_info *info,
21f1b8f2
SC
1395 enum x86_intercept_stage stage,
1396 struct x86_exception *exception);
a9ab13ff 1397 void (*handle_exit_irqoff)(struct kvm_vcpu *vcpu);
7f5581f5 1398
d264ee0c 1399 void (*request_immediate_exit)(struct kvm_vcpu *vcpu);
ae97a3b8
RK
1400
1401 void (*sched_in)(struct kvm_vcpu *kvm, int cpu);
88178fd4
KH
1402
1403 /*
a018eba5
SC
1404 * Size of the CPU's dirty log buffer, i.e. VMX's PML buffer. A zero
1405 * value indicates CPU dirty logging is unsupported or disabled.
88178fd4 1406 */
6dd03800 1407 int cpu_dirty_log_size;
a85863c2 1408 void (*update_cpu_dirty_logging)(struct kvm_vcpu *vcpu);
bab4165e 1409
25462f7f
WH
1410 /* pmu operations of sub-arch */
1411 const struct kvm_pmu_ops *pmu_ops;
33b22172 1412 const struct kvm_x86_nested_ops *nested_ops;
efc64404 1413
bf9f6ac8
FW
1414 /*
1415 * Architecture specific hooks for vCPU blocking due to
1416 * HLT instruction.
1417 * Returns for .pre_block():
1418 * - 0 means continue to block the vCPU.
1419 * - 1 means we cannot block the vCPU since some event
1420 * happens during this period, such as, 'ON' bit in
1421 * posted-interrupts descriptor is set.
1422 */
1423 int (*pre_block)(struct kvm_vcpu *vcpu);
1424 void (*post_block)(struct kvm_vcpu *vcpu);
d1ed092f
SS
1425
1426 void (*vcpu_blocking)(struct kvm_vcpu *vcpu);
1427 void (*vcpu_unblocking)(struct kvm_vcpu *vcpu);
1428
efc64404
FW
1429 int (*update_pi_irte)(struct kvm *kvm, unsigned int host_irq,
1430 uint32_t guest_irq, bool set);
57ab8794 1431 void (*start_assignment)(struct kvm *kvm);
be8ca170 1432 void (*apicv_post_state_restore)(struct kvm_vcpu *vcpu);
17e433b5 1433 bool (*dy_apicv_has_pending_interrupt)(struct kvm_vcpu *vcpu);
ce7a058a 1434
f9927982
SC
1435 int (*set_hv_timer)(struct kvm_vcpu *vcpu, u64 guest_deadline_tsc,
1436 bool *expired);
ce7a058a 1437 void (*cancel_hv_timer)(struct kvm_vcpu *vcpu);
c45dcc71
AR
1438
1439 void (*setup_mce)(struct kvm_vcpu *vcpu);
0234bf88 1440
c9d40913 1441 int (*smi_allowed)(struct kvm_vcpu *vcpu, bool for_injection);
ecc513e5
SC
1442 int (*enter_smm)(struct kvm_vcpu *vcpu, char *smstate);
1443 int (*leave_smm)(struct kvm_vcpu *vcpu, const char *smstate);
c9d40913 1444 void (*enable_smi_window)(struct kvm_vcpu *vcpu);
5acc5c06
BS
1445
1446 int (*mem_enc_op)(struct kvm *kvm, void __user *argp);
69eaedee
BS
1447 int (*mem_enc_reg_region)(struct kvm *kvm, struct kvm_enc_region *argp);
1448 int (*mem_enc_unreg_region)(struct kvm *kvm, struct kvm_enc_region *argp);
54526d1f 1449 int (*vm_copy_enc_context_from)(struct kvm *kvm, unsigned int source_fd);
801e459a
TL
1450
1451 int (*get_msr_feature)(struct kvm_msr_entry *entry);
57b119da 1452
09e3e2a1 1453 bool (*can_emulate_instruction)(struct kvm_vcpu *vcpu, void *insn, int insn_len);
4b9852f4
LA
1454
1455 bool (*apic_init_signal_blocked)(struct kvm_vcpu *vcpu);
344c6c80 1456 int (*enable_direct_tlbflush)(struct kvm_vcpu *vcpu);
93dff2fe
JM
1457
1458 void (*migrate_timers)(struct kvm_vcpu *vcpu);
51de8151 1459 void (*msr_filter_changed)(struct kvm_vcpu *vcpu);
f9a4d621 1460 int (*complete_emulated_msr)(struct kvm_vcpu *vcpu, int err);
647daca2
TL
1461
1462 void (*vcpu_deliver_sipi_vector)(struct kvm_vcpu *vcpu, u8 vector);
ea4a5ff8
ZX
1463};
1464
33b22172
PB
1465struct kvm_x86_nested_ops {
1466 int (*check_events)(struct kvm_vcpu *vcpu);
d2060bd4 1467 bool (*hv_timer_pending)(struct kvm_vcpu *vcpu);
cb6a32c2 1468 void (*triple_fault)(struct kvm_vcpu *vcpu);
33b22172
PB
1469 int (*get_state)(struct kvm_vcpu *vcpu,
1470 struct kvm_nested_state __user *user_kvm_nested_state,
1471 unsigned user_data_size);
1472 int (*set_state)(struct kvm_vcpu *vcpu,
1473 struct kvm_nested_state __user *user_kvm_nested_state,
1474 struct kvm_nested_state *kvm_state);
729c15c2 1475 bool (*get_nested_state_pages)(struct kvm_vcpu *vcpu);
02f5fb2e 1476 int (*write_log_dirty)(struct kvm_vcpu *vcpu, gpa_t l2_gpa);
33b22172
PB
1477
1478 int (*enable_evmcs)(struct kvm_vcpu *vcpu,
1479 uint16_t *vmcs_version);
1480 uint16_t (*get_evmcs_version)(struct kvm_vcpu *vcpu);
ea4a5ff8
ZX
1481};
1482
d008dfdb
SC
1483struct kvm_x86_init_ops {
1484 int (*cpu_has_kvm_support)(void);
1485 int (*disabled_by_bios)(void);
1486 int (*check_processor_compatibility)(void);
1487 int (*hardware_setup)(void);
1488
1489 struct kvm_x86_ops *runtime_ops;
1490};
1491
af585b92 1492struct kvm_arch_async_pf {
7c90705b 1493 u32 token;
af585b92 1494 gfn_t gfn;
fb67e14f 1495 unsigned long cr3;
c4806acd 1496 bool direct_map;
af585b92
GN
1497};
1498
9cc39a5a 1499extern u32 __read_mostly kvm_nr_uret_msrs;
91661989 1500extern u64 __read_mostly host_efer;
3edd6839 1501extern bool __read_mostly allow_smaller_maxphyaddr;
fdf513e3 1502extern bool __read_mostly enable_apicv;
afaf0b2f 1503extern struct kvm_x86_ops kvm_x86_ops;
97896d04 1504
9af5471b
JB
1505#define KVM_X86_OP(func) \
1506 DECLARE_STATIC_CALL(kvm_x86_##func, *(((struct kvm_x86_ops *)0)->func));
1507#define KVM_X86_OP_NULL KVM_X86_OP
1508#include <asm/kvm-x86-ops.h>
1509
1510static inline void kvm_ops_static_call_update(void)
1511{
1512#define KVM_X86_OP(func) \
1513 static_call_update(kvm_x86_##func, kvm_x86_ops.func);
1514#define KVM_X86_OP_NULL KVM_X86_OP
1515#include <asm/kvm-x86-ops.h>
1516}
1517
434a1e94
SC
1518#define __KVM_HAVE_ARCH_VM_ALLOC
1519static inline struct kvm *kvm_arch_alloc_vm(void)
1520{
88dca4ca 1521 return __vmalloc(kvm_x86_ops.vm_size, GFP_KERNEL_ACCOUNT | __GFP_ZERO);
434a1e94 1522}
562b6b08 1523void kvm_arch_free_vm(struct kvm *kvm);
434a1e94 1524
b08660e5
TL
1525#define __KVM_HAVE_ARCH_FLUSH_REMOTE_TLB
1526static inline int kvm_arch_flush_remote_tlb(struct kvm *kvm)
1527{
afaf0b2f 1528 if (kvm_x86_ops.tlb_remote_flush &&
b3646477 1529 !static_call(kvm_x86_tlb_remote_flush)(kvm))
b08660e5
TL
1530 return 0;
1531 else
1532 return -ENOTSUPP;
1533}
1534
54f1585a
ZX
1535int kvm_mmu_module_init(void);
1536void kvm_mmu_module_exit(void);
1537
1538void kvm_mmu_destroy(struct kvm_vcpu *vcpu);
1539int kvm_mmu_create(struct kvm_vcpu *vcpu);
13d268ca
XG
1540void kvm_mmu_init_vm(struct kvm *kvm);
1541void kvm_mmu_uninit_vm(struct kvm *kvm);
54f1585a 1542
49c6f875 1543void kvm_mmu_after_set_cpuid(struct kvm_vcpu *vcpu);
8a3c1a33 1544void kvm_mmu_reset_context(struct kvm_vcpu *vcpu);
1c91cad4 1545void kvm_mmu_slot_remove_write_access(struct kvm *kvm,
269e9552 1546 const struct kvm_memory_slot *memslot,
3c9bd400 1547 int start_level);
3ea3b7fa 1548void kvm_mmu_zap_collapsible_sptes(struct kvm *kvm,
f36f3f28 1549 const struct kvm_memory_slot *memslot);
f4b4b180 1550void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm,
269e9552 1551 const struct kvm_memory_slot *memslot);
54f1585a 1552void kvm_mmu_zap_all(struct kvm *kvm);
15248258 1553void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen);
bc8a3d89
BG
1554unsigned long kvm_mmu_calculate_default_mmu_pages(struct kvm *kvm);
1555void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned long kvm_nr_mmu_pages);
54f1585a 1556
ff03a073 1557int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3);
cc4b6871 1558
3200f405 1559int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
9f811285 1560 const void *val, int bytes);
2f333bcb 1561
6ef768fa
PB
1562struct kvm_irq_mask_notifier {
1563 void (*func)(struct kvm_irq_mask_notifier *kimn, bool masked);
1564 int irq;
1565 struct hlist_node link;
1566};
1567
1568void kvm_register_irq_mask_notifier(struct kvm *kvm, int irq,
1569 struct kvm_irq_mask_notifier *kimn);
1570void kvm_unregister_irq_mask_notifier(struct kvm *kvm, int irq,
1571 struct kvm_irq_mask_notifier *kimn);
1572void kvm_fire_mask_notifiers(struct kvm *kvm, unsigned irqchip, unsigned pin,
1573 bool mask);
1574
2f333bcb 1575extern bool tdp_enabled;
9f811285 1576
a3e06bbe
LJ
1577u64 vcpu_tsc_khz(struct kvm_vcpu *vcpu);
1578
92a1f12d
JR
1579/* control of guest tsc rate supported? */
1580extern bool kvm_has_tsc_control;
92a1f12d
JR
1581/* maximum supported tsc_khz for guests */
1582extern u32 kvm_max_guest_tsc_khz;
bc9b961b
HZ
1583/* number of bits of the fractional part of the TSC scaling ratio */
1584extern u8 kvm_tsc_scaling_ratio_frac_bits;
1585/* maximum allowed value of TSC scaling ratio */
1586extern u64 kvm_max_tsc_scaling_ratio;
64672c95
YJ
1587/* 1ull << kvm_tsc_scaling_ratio_frac_bits */
1588extern u64 kvm_default_tsc_scaling_ratio;
fe6b6bc8
CQ
1589/* bus lock detection supported? */
1590extern bool kvm_has_bus_lock_exit;
92a1f12d 1591
c45dcc71 1592extern u64 kvm_mce_cap_supported;
92a1f12d 1593
41577ab8
SC
1594/*
1595 * EMULTYPE_NO_DECODE - Set when re-emulating an instruction (after completing
1596 * userspace I/O) to indicate that the emulation context
d9f6e12f 1597 * should be reused as is, i.e. skip initialization of
41577ab8
SC
1598 * emulation context, instruction fetch and decode.
1599 *
1600 * EMULTYPE_TRAP_UD - Set when emulating an intercepted #UD from hardware.
1601 * Indicates that only select instructions (tagged with
1602 * EmulateOnUD) should be emulated (to minimize the emulator
1603 * attack surface). See also EMULTYPE_TRAP_UD_FORCED.
1604 *
1605 * EMULTYPE_SKIP - Set when emulating solely to skip an instruction, i.e. to
1606 * decode the instruction length. For use *only* by
afaf0b2f 1607 * kvm_x86_ops.skip_emulated_instruction() implementations.
41577ab8 1608 *
92daa48b
SC
1609 * EMULTYPE_ALLOW_RETRY_PF - Set when the emulator should resume the guest to
1610 * retry native execution under certain conditions,
1611 * Can only be set in conjunction with EMULTYPE_PF.
41577ab8
SC
1612 *
1613 * EMULTYPE_TRAP_UD_FORCED - Set when emulating an intercepted #UD that was
1614 * triggered by KVM's magic "force emulation" prefix,
1615 * which is opt in via module param (off by default).
1616 * Bypasses EmulateOnUD restriction despite emulating
1617 * due to an intercepted #UD (see EMULTYPE_TRAP_UD).
1618 * Used to test the full emulator from userspace.
1619 *
1620 * EMULTYPE_VMWARE_GP - Set when emulating an intercepted #GP for VMware
1621 * backdoor emulation, which is opt in via module param.
d9f6e12f 1622 * VMware backdoor emulation handles select instructions
41577ab8 1623 * and reinjects the #GP for all other cases.
92daa48b
SC
1624 *
1625 * EMULTYPE_PF - Set when emulating MMIO by way of an intercepted #PF, in which
1626 * case the CR2/GPA value pass on the stack is valid.
41577ab8 1627 */
571008da
SY
1628#define EMULTYPE_NO_DECODE (1 << 0)
1629#define EMULTYPE_TRAP_UD (1 << 1)
ba8afb6b 1630#define EMULTYPE_SKIP (1 << 2)
92daa48b 1631#define EMULTYPE_ALLOW_RETRY_PF (1 << 3)
b4000606 1632#define EMULTYPE_TRAP_UD_FORCED (1 << 4)
42cbf068 1633#define EMULTYPE_VMWARE_GP (1 << 5)
92daa48b
SC
1634#define EMULTYPE_PF (1 << 6)
1635
c60658d1
SC
1636int kvm_emulate_instruction(struct kvm_vcpu *vcpu, int emulation_type);
1637int kvm_emulate_instruction_from_buffer(struct kvm_vcpu *vcpu,
1638 void *insn, int insn_len);
35be0ade 1639
f2b4b7dd 1640void kvm_enable_efer_bits(u64);
384bb783 1641bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer);
edef5c36 1642int __kvm_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data, bool host_initiated);
f20935d8
SC
1643int kvm_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data);
1644int kvm_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data);
1edce0a9
SC
1645int kvm_emulate_rdmsr(struct kvm_vcpu *vcpu);
1646int kvm_emulate_wrmsr(struct kvm_vcpu *vcpu);
5ff3a351
SC
1647int kvm_emulate_as_nop(struct kvm_vcpu *vcpu);
1648int kvm_emulate_invd(struct kvm_vcpu *vcpu);
1649int kvm_emulate_mwait(struct kvm_vcpu *vcpu);
1650int kvm_handle_invalid_op(struct kvm_vcpu *vcpu);
1651int kvm_emulate_monitor(struct kvm_vcpu *vcpu);
54f1585a 1652
dca7f128 1653int kvm_fast_pio(struct kvm_vcpu *vcpu, int size, unsigned short port, int in);
6a908b62 1654int kvm_emulate_cpuid(struct kvm_vcpu *vcpu);
54f1585a 1655int kvm_emulate_halt(struct kvm_vcpu *vcpu);
5cb56059 1656int kvm_vcpu_halt(struct kvm_vcpu *vcpu);
647daca2 1657int kvm_emulate_ap_reset_hold(struct kvm_vcpu *vcpu);
f5f48ee1 1658int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu);
54f1585a 1659
3e6e0aab 1660void kvm_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
c697518a 1661int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector, int seg);
2b4a273b 1662void kvm_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector);
3e6e0aab 1663
7f3d35fd
KW
1664int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int idt_index,
1665 int reason, bool has_error_code, u32 error_code);
37817f29 1666
ed02b213
TL
1667void kvm_free_guest_fpu(struct kvm_vcpu *vcpu);
1668
f27ad38a 1669void kvm_post_set_cr0(struct kvm_vcpu *vcpu, unsigned long old_cr0, unsigned long cr0);
5b51cb13 1670void kvm_post_set_cr4(struct kvm_vcpu *vcpu, unsigned long old_cr4, unsigned long cr4);
49a9b07e 1671int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
2390218b 1672int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3);
a83b29c6 1673int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
eea1cff9 1674int kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8);
020df079 1675int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val);
29d6ca41 1676void kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val);
2d3ad1f4
AK
1677unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu);
1678void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw);
54f1585a 1679void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l);
92f9895c 1680int kvm_emulate_xsetbv(struct kvm_vcpu *vcpu);
54f1585a 1681
609e36d3 1682int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr);
8fe8ab46 1683int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr);
54f1585a 1684
91586a3b
JK
1685unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu);
1686void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags);
c483c454 1687int kvm_emulate_rdpmc(struct kvm_vcpu *vcpu);
91586a3b 1688
298101da
AK
1689void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr);
1690void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code);
4d5523cf 1691void kvm_queue_exception_p(struct kvm_vcpu *vcpu, unsigned nr, unsigned long payload);
ce7ddec4
JR
1692void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr);
1693void kvm_requeue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code);
6389ee94 1694void kvm_inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault);
53b3d8e9
SC
1695bool kvm_inject_emulated_page_fault(struct kvm_vcpu *vcpu,
1696 struct x86_exception *fault);
ec92fe44
JR
1697int kvm_read_guest_page_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
1698 gfn_t gfn, void *data, int offset, int len,
1699 u32 access);
0a79b009 1700bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl);
16f8a6f9 1701bool kvm_require_dr(struct kvm_vcpu *vcpu, int dr);
298101da 1702
1a577b72
MT
1703static inline int __kvm_irq_line_state(unsigned long *irq_state,
1704 int irq_source_id, int level)
1705{
1706 /* Logical OR for level trig interrupt */
1707 if (level)
1708 __set_bit(irq_source_id, irq_state);
1709 else
1710 __clear_bit(irq_source_id, irq_state);
1711
1712 return !!(*irq_state);
1713}
1714
b94742c9
JS
1715#define KVM_MMU_ROOT_CURRENT BIT(0)
1716#define KVM_MMU_ROOT_PREVIOUS(i) BIT(1+i)
1717#define KVM_MMU_ROOTS_ALL (~0UL)
08fb59d8 1718
1a577b72
MT
1719int kvm_pic_set_irq(struct kvm_pic *pic, int irq, int irq_source_id, int level);
1720void kvm_pic_clear_all(struct kvm_pic *pic, int irq_source_id);
3de42dc0 1721
3419ffc8
SY
1722void kvm_inject_nmi(struct kvm_vcpu *vcpu);
1723
7c86663b
PB
1724void kvm_update_dr7(struct kvm_vcpu *vcpu);
1725
1cb3f3ae 1726int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn);
54f1585a 1727void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu);
6a82cd1c
VK
1728void kvm_mmu_free_roots(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
1729 ulong roots_to_free);
25b62c62 1730void kvm_mmu_free_guest_mode_roots(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu);
54987b7a
PB
1731gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access,
1732 struct x86_exception *exception);
ab9ae313
AK
1733gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva,
1734 struct x86_exception *exception);
1735gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva,
1736 struct x86_exception *exception);
1737gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva,
1738 struct x86_exception *exception);
1739gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva,
1740 struct x86_exception *exception);
54f1585a 1741
4e19c36f 1742bool kvm_apicv_activated(struct kvm *kvm);
8df14af4
SS
1743void kvm_vcpu_update_apicv(struct kvm_vcpu *vcpu);
1744void kvm_request_apicv_update(struct kvm *kvm, bool activate,
1745 unsigned long bit);
d62caabb 1746
54f1585a
ZX
1747int kvm_emulate_hypercall(struct kvm_vcpu *vcpu);
1748
736c291c 1749int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u64 error_code,
dc25e89e 1750 void *insn, int insn_len);
a7052897 1751void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva);
5efac074
PB
1752void kvm_mmu_invalidate_gva(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
1753 gva_t gva, hpa_t root_hpa);
eb4b248e 1754void kvm_mmu_invpcid_gva(struct kvm_vcpu *vcpu, gva_t gva, unsigned long pcid);
b5129100 1755void kvm_mmu_new_pgd(struct kvm_vcpu *vcpu, gpa_t new_pgd);
34c16eec 1756
83013059
SC
1757void kvm_configure_mmu(bool enable_tdp, int tdp_max_root_level,
1758 int tdp_huge_page_level);
18552672 1759
d6e88aec 1760static inline u16 kvm_read_ldt(void)
ec6d273d
ZX
1761{
1762 u16 ldt;
1763 asm("sldt %0" : "=g"(ldt));
1764 return ldt;
1765}
1766
d6e88aec 1767static inline void kvm_load_ldt(u16 sel)
ec6d273d
ZX
1768{
1769 asm("lldt %0" : : "rm"(sel));
1770}
ec6d273d 1771
ec6d273d
ZX
1772#ifdef CONFIG_X86_64
1773static inline unsigned long read_msr(unsigned long msr)
1774{
1775 u64 value;
1776
1777 rdmsrl(msr, value);
1778 return value;
1779}
1780#endif
1781
c1a5d4f9
AK
1782static inline void kvm_inject_gp(struct kvm_vcpu *vcpu, u32 error_code)
1783{
1784 kvm_queue_exception_e(vcpu, GP_VECTOR, error_code);
1785}
1786
ec6d273d
ZX
1787#define TSS_IOPB_BASE_OFFSET 0x66
1788#define TSS_BASE_SIZE 0x68
1789#define TSS_IOPB_SIZE (65536 / 8)
1790#define TSS_REDIRECTION_SIZE (256 / 8)
7d76b4d3
JP
1791#define RMODE_TSS_SIZE \
1792 (TSS_BASE_SIZE + TSS_REDIRECTION_SIZE + TSS_IOPB_SIZE + 1)
53e0aa7b 1793
37817f29
IE
1794enum {
1795 TASK_SWITCH_CALL = 0,
1796 TASK_SWITCH_IRET = 1,
1797 TASK_SWITCH_JMP = 2,
1798 TASK_SWITCH_GATE = 3,
1799};
1800
1371d904 1801#define HF_GIF_MASK (1 << 0)
95ba8273 1802#define HF_NMI_MASK (1 << 3)
44c11430 1803#define HF_IRET_MASK (1 << 4)
ec9e60b2 1804#define HF_GUEST_MASK (1 << 5) /* VCPU is in guest-mode */
f077825a
PB
1805#define HF_SMM_MASK (1 << 6)
1806#define HF_SMM_INSIDE_NMI_MASK (1 << 7)
1371d904 1807
699023e2
PB
1808#define __KVM_VCPU_MULTIPLE_ADDRESS_SPACE
1809#define KVM_ADDRESS_SPACE_NUM 2
1810
1811#define kvm_arch_vcpu_memslots_id(vcpu) ((vcpu)->arch.hflags & HF_SMM_MASK ? 1 : 0)
1812#define kvm_memslots_for_spte_role(kvm, role) __kvm_memslots(kvm, (role).smm)
1371d904 1813
4b526de5 1814asmlinkage void kvm_spurious_fault(void);
3901336e 1815
4ecac3fd
AK
1816/*
1817 * Hardware virtualization extension instructions may fault if a
1818 * reboot turns off virtualization while processes are running.
3901336e
JP
1819 * Usually after catching the fault we just panic; during reboot
1820 * instead the instruction is ignored.
4ecac3fd 1821 */
98cd382d 1822#define __kvm_handle_fault_on_reboot(insn) \
3901336e
JP
1823 "666: \n\t" \
1824 insn "\n\t" \
1825 "jmp 668f \n\t" \
1826 "667: \n\t" \
3ebccdf3
TG
1827 "1: \n\t" \
1828 ".pushsection .discard.instr_begin \n\t" \
1829 ".long 1b - . \n\t" \
1830 ".popsection \n\t" \
3901336e 1831 "call kvm_spurious_fault \n\t" \
3ebccdf3
TG
1832 "1: \n\t" \
1833 ".pushsection .discard.instr_end \n\t" \
1834 ".long 1b - . \n\t" \
1835 ".popsection \n\t" \
3901336e 1836 "668: \n\t" \
f209a26d 1837 _ASM_EXTABLE(666b, 667b)
4ecac3fd 1838
e930bffe 1839#define KVM_ARCH_WANT_MMU_NOTIFIER
5f7c292b 1840
c7c9c56c 1841int kvm_cpu_has_injectable_intr(struct kvm_vcpu *v);
a1b37100 1842int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu);
71cc849b 1843int kvm_cpu_has_extint(struct kvm_vcpu *v);
a1b37100 1844int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu);
0b71785d 1845int kvm_cpu_get_interrupt(struct kvm_vcpu *v);
d28bc9dd 1846void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event);
4256f43f 1847void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu);
e930bffe 1848
4180bf1b 1849int kvm_pv_send_ipi(struct kvm *kvm, unsigned long ipi_bitmap_low,
bdf7ffc8 1850 unsigned long ipi_bitmap_high, u32 min,
4180bf1b
WL
1851 unsigned long icr, int op_64_bit);
1852
e5fda4bb 1853int kvm_add_user_return_msr(u32 msr);
8ea8b8d6 1854int kvm_find_user_return_msr(u32 msr);
7e34fbd0 1855int kvm_set_user_return_msr(unsigned index, u64 val, u64 mask);
18863bdd 1856
61a05d44
SC
1857static inline bool kvm_is_supported_user_return_msr(u32 msr)
1858{
1859 return kvm_find_user_return_msr(msr) >= 0;
1860}
1861
fe3eb504 1862u64 kvm_scale_tsc(struct kvm_vcpu *vcpu, u64 tsc, u64 ratio);
4ba76538 1863u64 kvm_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc);
83150f29
IS
1864u64 kvm_calc_nested_tsc_offset(u64 l1_offset, u64 l2_offset, u64 l2_multiplier);
1865u64 kvm_calc_nested_tsc_multiplier(u64 l1_multiplier, u64 l2_multiplier);
35181e86 1866
82b32774 1867unsigned long kvm_get_linear_rip(struct kvm_vcpu *vcpu);
f92653ee
JK
1868bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip);
1869
2860c4b1
PB
1870void kvm_make_mclock_inprogress_request(struct kvm *kvm);
1871void kvm_make_scan_ioapic_request(struct kvm *kvm);
7ee30bc1
NNL
1872void kvm_make_scan_ioapic_request_mask(struct kvm *kvm,
1873 unsigned long *vcpu_bitmap);
2860c4b1 1874
2a18b7e7 1875bool kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
af585b92
GN
1876 struct kvm_async_pf *work);
1877void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
1878 struct kvm_async_pf *work);
56028d08
GN
1879void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
1880 struct kvm_async_pf *work);
557a961a 1881void kvm_arch_async_page_present_queued(struct kvm_vcpu *vcpu);
7c0ade6c 1882bool kvm_arch_can_dequeue_async_page_present(struct kvm_vcpu *vcpu);
af585b92
GN
1883extern bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn);
1884
6affcbed
KH
1885int kvm_skip_emulated_instruction(struct kvm_vcpu *vcpu);
1886int kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err);
d264ee0c 1887void __kvm_request_immediate_exit(struct kvm_vcpu *vcpu);
db8fcefa 1888
f5132b01
GN
1889int kvm_is_in_guest(void);
1890
ff5a983c
PX
1891void __user *__x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa,
1892 u32 size);
d71ba788
PB
1893bool kvm_vcpu_is_reset_bsp(struct kvm_vcpu *vcpu);
1894bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu);
f5132b01 1895
8feb4a04
FW
1896bool kvm_intr_is_single_vcpu(struct kvm *kvm, struct kvm_lapic_irq *irq,
1897 struct kvm_vcpu **dest_vcpu);
1898
37131313 1899void kvm_set_msi_irq(struct kvm *kvm, struct kvm_kernel_irq_routing_entry *e,
d84f1e07 1900 struct kvm_lapic_irq *irq);
197a4f4b 1901
fdcf7562
AG
1902static inline bool kvm_irq_is_postable(struct kvm_lapic_irq *irq)
1903{
1904 /* We can only post Fixed and LowPrio IRQs */
637543a8
SS
1905 return (irq->delivery_mode == APIC_DM_FIXED ||
1906 irq->delivery_mode == APIC_DM_LOWEST);
fdcf7562
AG
1907}
1908
d1ed092f
SS
1909static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu)
1910{
b3646477 1911 static_call_cond(kvm_x86_vcpu_blocking)(vcpu);
d1ed092f
SS
1912}
1913
1914static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu)
1915{
b3646477 1916 static_call_cond(kvm_x86_vcpu_unblocking)(vcpu);
d1ed092f
SS
1917}
1918
3491caf2 1919static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {}
3217f7c2 1920
7d669f50
SS
1921static inline int kvm_cpu_get_apicid(int mps_cpu)
1922{
1923#ifdef CONFIG_X86_LOCAL_APIC
64063505 1924 return default_cpu_present_to_apicid(mps_cpu);
7d669f50
SS
1925#else
1926 WARN_ON_ONCE(1);
1927 return BAD_APICID;
1928#endif
1929}
1930
05cade71
LP
1931#define put_smstate(type, buf, offset, val) \
1932 *(type *)((buf) + (offset) - 0x7e00) = val
1933
ed19321f
SC
1934#define GET_SMSTATE(type, buf, offset) \
1935 (*(type *)((buf) + (offset) - 0x7e00))
1936
fb04a1ed
PX
1937int kvm_cpu_dirty_log_size(void);
1938
d501f747
BG
1939int alloc_all_memslots_rmaps(struct kvm *kvm);
1940
1965aae3 1941#endif /* _ASM_X86_KVM_HOST_H */