]>
Commit | Line | Data |
---|---|---|
a656c8ef | 1 | /* |
043405e1 CO |
2 | * Kernel-based Virtual Machine driver for Linux |
3 | * | |
4 | * This header defines architecture specific interfaces, x86 version | |
5 | * | |
6 | * This work is licensed under the terms of the GNU GPL, version 2. See | |
7 | * the COPYING file in the top-level directory. | |
8 | * | |
9 | */ | |
10 | ||
1965aae3 PA |
11 | #ifndef _ASM_X86_KVM_HOST_H |
12 | #define _ASM_X86_KVM_HOST_H | |
043405e1 | 13 | |
34c16eec ZX |
14 | #include <linux/types.h> |
15 | #include <linux/mm.h> | |
e930bffe | 16 | #include <linux/mmu_notifier.h> |
229456fc | 17 | #include <linux/tracepoint.h> |
f5f48ee1 | 18 | #include <linux/cpumask.h> |
f5132b01 | 19 | #include <linux/irq_work.h> |
34c16eec ZX |
20 | |
21 | #include <linux/kvm.h> | |
22 | #include <linux/kvm_para.h> | |
edf88417 | 23 | #include <linux/kvm_types.h> |
f5132b01 | 24 | #include <linux/perf_event.h> |
d828199e MT |
25 | #include <linux/pvclock_gtod.h> |
26 | #include <linux/clocksource.h> | |
87276880 | 27 | #include <linux/irqbypass.h> |
5c919412 | 28 | #include <linux/hyperv.h> |
34c16eec | 29 | |
7d669f50 | 30 | #include <asm/apic.h> |
50d0a0f9 | 31 | #include <asm/pvclock-abi.h> |
e01a1b57 | 32 | #include <asm/desc.h> |
0bed3b56 | 33 | #include <asm/mtrr.h> |
9962d032 | 34 | #include <asm/msr-index.h> |
3ee89722 | 35 | #include <asm/asm.h> |
21ebbeda | 36 | #include <asm/kvm_page_track.h> |
e01a1b57 | 37 | |
682f732e | 38 | #define KVM_MAX_VCPUS 288 |
757883de | 39 | #define KVM_SOFT_MAX_VCPUS 240 |
af1bae54 | 40 | #define KVM_MAX_VCPU_ID 1023 |
1d4e7e3c | 41 | #define KVM_USER_MEM_SLOTS 509 |
0743247f AW |
42 | /* memory slots that are not exposed to userspace */ |
43 | #define KVM_PRIVATE_MEM_SLOTS 3 | |
bbacc0c1 | 44 | #define KVM_MEM_SLOTS_NUM (KVM_USER_MEM_SLOTS + KVM_PRIVATE_MEM_SLOTS) |
93a5cef0 | 45 | |
b401ee0b | 46 | #define KVM_HALT_POLL_NS_DEFAULT 200000 |
69a9f69b | 47 | |
8175e5b7 AG |
48 | #define KVM_IRQCHIP_NUM_PINS KVM_IOAPIC_NUM_PINS |
49 | ||
2860c4b1 | 50 | /* x86-specific vcpu->requests bit members */ |
2387149e AJ |
51 | #define KVM_REQ_MIGRATE_TIMER KVM_ARCH_REQ(0) |
52 | #define KVM_REQ_REPORT_TPR_ACCESS KVM_ARCH_REQ(1) | |
53 | #define KVM_REQ_TRIPLE_FAULT KVM_ARCH_REQ(2) | |
54 | #define KVM_REQ_MMU_SYNC KVM_ARCH_REQ(3) | |
55 | #define KVM_REQ_CLOCK_UPDATE KVM_ARCH_REQ(4) | |
56 | #define KVM_REQ_EVENT KVM_ARCH_REQ(6) | |
57 | #define KVM_REQ_APF_HALT KVM_ARCH_REQ(7) | |
58 | #define KVM_REQ_STEAL_UPDATE KVM_ARCH_REQ(8) | |
59 | #define KVM_REQ_NMI KVM_ARCH_REQ(9) | |
60 | #define KVM_REQ_PMU KVM_ARCH_REQ(10) | |
61 | #define KVM_REQ_PMI KVM_ARCH_REQ(11) | |
62 | #define KVM_REQ_SMI KVM_ARCH_REQ(12) | |
63 | #define KVM_REQ_MASTERCLOCK_UPDATE KVM_ARCH_REQ(13) | |
64 | #define KVM_REQ_MCLOCK_INPROGRESS \ | |
65 | KVM_ARCH_REQ_FLAGS(14, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) | |
66 | #define KVM_REQ_SCAN_IOAPIC \ | |
67 | KVM_ARCH_REQ_FLAGS(15, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) | |
68 | #define KVM_REQ_GLOBAL_CLOCK_UPDATE KVM_ARCH_REQ(16) | |
69 | #define KVM_REQ_APIC_PAGE_RELOAD \ | |
70 | KVM_ARCH_REQ_FLAGS(17, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) | |
71 | #define KVM_REQ_HV_CRASH KVM_ARCH_REQ(18) | |
72 | #define KVM_REQ_IOAPIC_EOI_EXIT KVM_ARCH_REQ(19) | |
73 | #define KVM_REQ_HV_RESET KVM_ARCH_REQ(20) | |
74 | #define KVM_REQ_HV_EXIT KVM_ARCH_REQ(21) | |
75 | #define KVM_REQ_HV_STIMER KVM_ARCH_REQ(22) | |
2860c4b1 | 76 | |
cfec82cb JR |
77 | #define CR0_RESERVED_BITS \ |
78 | (~(unsigned long)(X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS \ | |
79 | | X86_CR0_ET | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM \ | |
80 | | X86_CR0_NW | X86_CR0_CD | X86_CR0_PG)) | |
81 | ||
346874c9 | 82 | #define CR3_L_MODE_RESERVED_BITS 0xFFFFFF0000000000ULL |
cfaa790a | 83 | #define CR3_PCID_INVD BIT_64(63) |
cfec82cb JR |
84 | #define CR4_RESERVED_BITS \ |
85 | (~(unsigned long)(X86_CR4_VME | X86_CR4_PVI | X86_CR4_TSD | X86_CR4_DE\ | |
86 | | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_MCE \ | |
ad756a16 | 87 | | X86_CR4_PGE | X86_CR4_PCE | X86_CR4_OSFXSR | X86_CR4_PCIDE \ |
afcbf13f | 88 | | X86_CR4_OSXSAVE | X86_CR4_SMEP | X86_CR4_FSGSBASE \ |
b9baba86 HH |
89 | | X86_CR4_OSXMMEXCPT | X86_CR4_VMXE | X86_CR4_SMAP \ |
90 | | X86_CR4_PKE)) | |
cfec82cb JR |
91 | |
92 | #define CR8_RESERVED_BITS (~(unsigned long)X86_CR8_TPR) | |
93 | ||
94 | ||
cd6e8f87 | 95 | |
cd6e8f87 | 96 | #define INVALID_PAGE (~(hpa_t)0) |
dd180b3e XG |
97 | #define VALID_PAGE(x) ((x) != INVALID_PAGE) |
98 | ||
cd6e8f87 ZX |
99 | #define UNMAPPED_GVA (~(gpa_t)0) |
100 | ||
ec04b260 | 101 | /* KVM Hugepage definitions for x86 */ |
04326caa | 102 | #define KVM_NR_PAGE_SIZES 3 |
82855413 JR |
103 | #define KVM_HPAGE_GFN_SHIFT(x) (((x) - 1) * 9) |
104 | #define KVM_HPAGE_SHIFT(x) (PAGE_SHIFT + KVM_HPAGE_GFN_SHIFT(x)) | |
ec04b260 JR |
105 | #define KVM_HPAGE_SIZE(x) (1UL << KVM_HPAGE_SHIFT(x)) |
106 | #define KVM_HPAGE_MASK(x) (~(KVM_HPAGE_SIZE(x) - 1)) | |
107 | #define KVM_PAGES_PER_HPAGE(x) (KVM_HPAGE_SIZE(x) / PAGE_SIZE) | |
05da4558 | 108 | |
6d9d41e5 CD |
109 | static inline gfn_t gfn_to_index(gfn_t gfn, gfn_t base_gfn, int level) |
110 | { | |
111 | /* KVM_HPAGE_GFN_SHIFT(PT_PAGE_TABLE_LEVEL) must be 0. */ | |
112 | return (gfn >> KVM_HPAGE_GFN_SHIFT(level)) - | |
113 | (base_gfn >> KVM_HPAGE_GFN_SHIFT(level)); | |
114 | } | |
115 | ||
d657a98e ZX |
116 | #define KVM_PERMILLE_MMU_PAGES 20 |
117 | #define KVM_MIN_ALLOC_MMU_PAGES 64 | |
114df303 | 118 | #define KVM_MMU_HASH_SHIFT 12 |
1ae0a13d | 119 | #define KVM_NUM_MMU_PAGES (1 << KVM_MMU_HASH_SHIFT) |
d657a98e ZX |
120 | #define KVM_MIN_FREE_MMU_PAGES 5 |
121 | #define KVM_REFILL_PAGES 25 | |
73c1160c | 122 | #define KVM_MAX_CPUID_ENTRIES 80 |
0bed3b56 | 123 | #define KVM_NR_FIXED_MTRR_REGION 88 |
0d234daf | 124 | #define KVM_NR_VAR_MTRR 8 |
d657a98e | 125 | |
af585b92 GN |
126 | #define ASYNC_PF_PER_VCPU 64 |
127 | ||
5fdbf976 | 128 | enum kvm_reg { |
2b3ccfa0 ZX |
129 | VCPU_REGS_RAX = 0, |
130 | VCPU_REGS_RCX = 1, | |
131 | VCPU_REGS_RDX = 2, | |
132 | VCPU_REGS_RBX = 3, | |
133 | VCPU_REGS_RSP = 4, | |
134 | VCPU_REGS_RBP = 5, | |
135 | VCPU_REGS_RSI = 6, | |
136 | VCPU_REGS_RDI = 7, | |
137 | #ifdef CONFIG_X86_64 | |
138 | VCPU_REGS_R8 = 8, | |
139 | VCPU_REGS_R9 = 9, | |
140 | VCPU_REGS_R10 = 10, | |
141 | VCPU_REGS_R11 = 11, | |
142 | VCPU_REGS_R12 = 12, | |
143 | VCPU_REGS_R13 = 13, | |
144 | VCPU_REGS_R14 = 14, | |
145 | VCPU_REGS_R15 = 15, | |
146 | #endif | |
5fdbf976 | 147 | VCPU_REGS_RIP, |
2b3ccfa0 ZX |
148 | NR_VCPU_REGS |
149 | }; | |
150 | ||
6de4f3ad AK |
151 | enum kvm_reg_ex { |
152 | VCPU_EXREG_PDPTR = NR_VCPU_REGS, | |
aff48baa | 153 | VCPU_EXREG_CR3, |
6de12732 | 154 | VCPU_EXREG_RFLAGS, |
2fb92db1 | 155 | VCPU_EXREG_SEGMENTS, |
6de4f3ad AK |
156 | }; |
157 | ||
2b3ccfa0 | 158 | enum { |
81609e3e | 159 | VCPU_SREG_ES, |
2b3ccfa0 | 160 | VCPU_SREG_CS, |
81609e3e | 161 | VCPU_SREG_SS, |
2b3ccfa0 | 162 | VCPU_SREG_DS, |
2b3ccfa0 ZX |
163 | VCPU_SREG_FS, |
164 | VCPU_SREG_GS, | |
2b3ccfa0 ZX |
165 | VCPU_SREG_TR, |
166 | VCPU_SREG_LDTR, | |
167 | }; | |
168 | ||
56e82318 | 169 | #include <asm/kvm_emulate.h> |
2b3ccfa0 | 170 | |
d657a98e ZX |
171 | #define KVM_NR_MEM_OBJS 40 |
172 | ||
42dbaa5a JK |
173 | #define KVM_NR_DB_REGS 4 |
174 | ||
175 | #define DR6_BD (1 << 13) | |
176 | #define DR6_BS (1 << 14) | |
6f43ed01 NA |
177 | #define DR6_RTM (1 << 16) |
178 | #define DR6_FIXED_1 0xfffe0ff0 | |
179 | #define DR6_INIT 0xffff0ff0 | |
180 | #define DR6_VOLATILE 0x0001e00f | |
42dbaa5a JK |
181 | |
182 | #define DR7_BP_EN_MASK 0x000000ff | |
183 | #define DR7_GE (1 << 9) | |
184 | #define DR7_GD (1 << 13) | |
185 | #define DR7_FIXED_1 0x00000400 | |
6f43ed01 | 186 | #define DR7_VOLATILE 0xffff2bff |
42dbaa5a | 187 | |
c205fb7d NA |
188 | #define PFERR_PRESENT_BIT 0 |
189 | #define PFERR_WRITE_BIT 1 | |
190 | #define PFERR_USER_BIT 2 | |
191 | #define PFERR_RSVD_BIT 3 | |
192 | #define PFERR_FETCH_BIT 4 | |
be94f6b7 | 193 | #define PFERR_PK_BIT 5 |
14727754 TL |
194 | #define PFERR_GUEST_FINAL_BIT 32 |
195 | #define PFERR_GUEST_PAGE_BIT 33 | |
c205fb7d NA |
196 | |
197 | #define PFERR_PRESENT_MASK (1U << PFERR_PRESENT_BIT) | |
198 | #define PFERR_WRITE_MASK (1U << PFERR_WRITE_BIT) | |
199 | #define PFERR_USER_MASK (1U << PFERR_USER_BIT) | |
200 | #define PFERR_RSVD_MASK (1U << PFERR_RSVD_BIT) | |
201 | #define PFERR_FETCH_MASK (1U << PFERR_FETCH_BIT) | |
be94f6b7 | 202 | #define PFERR_PK_MASK (1U << PFERR_PK_BIT) |
14727754 TL |
203 | #define PFERR_GUEST_FINAL_MASK (1ULL << PFERR_GUEST_FINAL_BIT) |
204 | #define PFERR_GUEST_PAGE_MASK (1ULL << PFERR_GUEST_PAGE_BIT) | |
205 | ||
206 | #define PFERR_NESTED_GUEST_PAGE (PFERR_GUEST_PAGE_MASK | \ | |
207 | PFERR_USER_MASK | \ | |
208 | PFERR_WRITE_MASK | \ | |
209 | PFERR_PRESENT_MASK) | |
c205fb7d | 210 | |
37f0e8fe JS |
211 | /* |
212 | * The mask used to denote special SPTEs, which can be either MMIO SPTEs or | |
213 | * Access Tracking SPTEs. We use bit 62 instead of bit 63 to avoid conflicting | |
214 | * with the SVE bit in EPT PTEs. | |
215 | */ | |
216 | #define SPTE_SPECIAL_MASK (1ULL << 62) | |
217 | ||
41383771 GN |
218 | /* apic attention bits */ |
219 | #define KVM_APIC_CHECK_VAPIC 0 | |
ae7a2a3f MT |
220 | /* |
221 | * The following bit is set with PV-EOI, unset on EOI. | |
222 | * We detect PV-EOI changes by guest by comparing | |
223 | * this bit with PV-EOI in guest memory. | |
224 | * See the implementation in apic_update_pv_eoi. | |
225 | */ | |
226 | #define KVM_APIC_PV_EOI_PENDING 1 | |
41383771 | 227 | |
d84f1e07 FW |
228 | struct kvm_kernel_irq_routing_entry; |
229 | ||
d657a98e ZX |
230 | /* |
231 | * We don't want allocation failures within the mmu code, so we preallocate | |
232 | * enough memory for a single page fault in a cache. | |
233 | */ | |
234 | struct kvm_mmu_memory_cache { | |
235 | int nobjs; | |
236 | void *objects[KVM_NR_MEM_OBJS]; | |
237 | }; | |
238 | ||
21ebbeda XG |
239 | /* |
240 | * the pages used as guest page table on soft mmu are tracked by | |
241 | * kvm_memory_slot.arch.gfn_track which is 16 bits, so the role bits used | |
242 | * by indirect shadow page can not be more than 15 bits. | |
243 | * | |
244 | * Currently, we used 14 bits that are @level, @cr4_pae, @quadrant, @access, | |
245 | * @nxe, @cr0_wp, @smep_andnot_wp and @smap_andnot_wp. | |
246 | */ | |
d657a98e ZX |
247 | union kvm_mmu_page_role { |
248 | unsigned word; | |
249 | struct { | |
7d76b4d3 | 250 | unsigned level:4; |
5b7e0102 | 251 | unsigned cr4_pae:1; |
7d76b4d3 | 252 | unsigned quadrant:2; |
f6e2c02b | 253 | unsigned direct:1; |
7d76b4d3 | 254 | unsigned access:3; |
2e53d63a | 255 | unsigned invalid:1; |
9645bb56 | 256 | unsigned nxe:1; |
3dbe1415 | 257 | unsigned cr0_wp:1; |
411c588d | 258 | unsigned smep_andnot_wp:1; |
0be0226f | 259 | unsigned smap_andnot_wp:1; |
699023e2 PB |
260 | unsigned :8; |
261 | ||
262 | /* | |
263 | * This is left at the top of the word so that | |
264 | * kvm_memslots_for_spte_role can extract it with a | |
265 | * simple shift. While there is room, give it a whole | |
266 | * byte so it is also faster to load it from memory. | |
267 | */ | |
268 | unsigned smm:8; | |
d657a98e ZX |
269 | }; |
270 | }; | |
271 | ||
018aabb5 TY |
272 | struct kvm_rmap_head { |
273 | unsigned long val; | |
274 | }; | |
275 | ||
d657a98e ZX |
276 | struct kvm_mmu_page { |
277 | struct list_head link; | |
278 | struct hlist_node hash_link; | |
279 | ||
280 | /* | |
281 | * The following two entries are used to key the shadow page in the | |
282 | * hash table. | |
283 | */ | |
284 | gfn_t gfn; | |
285 | union kvm_mmu_page_role role; | |
286 | ||
287 | u64 *spt; | |
288 | /* hold the gfn of each spte inside spt */ | |
289 | gfn_t *gfns; | |
4731d4c7 | 290 | bool unsync; |
0571d366 | 291 | int root_count; /* Currently serving as active root */ |
60c8aec6 | 292 | unsigned int unsync_children; |
018aabb5 | 293 | struct kvm_rmap_head parent_ptes; /* rmap pointers to parent sptes */ |
f6f8adee XG |
294 | |
295 | /* The page is obsolete if mmu_valid_gen != kvm->arch.mmu_valid_gen. */ | |
5304b8d3 | 296 | unsigned long mmu_valid_gen; |
f6f8adee | 297 | |
0074ff63 | 298 | DECLARE_BITMAP(unsync_child_bitmap, 512); |
c2a2ac2b XG |
299 | |
300 | #ifdef CONFIG_X86_32 | |
accaefe0 XG |
301 | /* |
302 | * Used out of the mmu-lock to avoid reading spte values while an | |
303 | * update is in progress; see the comments in __get_spte_lockless(). | |
304 | */ | |
c2a2ac2b XG |
305 | int clear_spte_count; |
306 | #endif | |
307 | ||
0cbf8e43 | 308 | /* Number of writes since the last time traversal visited this page. */ |
e5691a81 | 309 | atomic_t write_flooding_count; |
d657a98e ZX |
310 | }; |
311 | ||
1c08364c AK |
312 | struct kvm_pio_request { |
313 | unsigned long count; | |
1c08364c AK |
314 | int in; |
315 | int port; | |
316 | int size; | |
1c08364c AK |
317 | }; |
318 | ||
a0a64f50 XG |
319 | struct rsvd_bits_validate { |
320 | u64 rsvd_bits_mask[2][4]; | |
321 | u64 bad_mt_xwr; | |
322 | }; | |
323 | ||
d657a98e ZX |
324 | /* |
325 | * x86 supports 3 paging modes (4-level 64-bit, 3-level 64-bit, and 2-level | |
326 | * 32-bit). The kvm_mmu structure abstracts the details of the current mmu | |
327 | * mode. | |
328 | */ | |
329 | struct kvm_mmu { | |
f43addd4 | 330 | void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long root); |
5777ed34 | 331 | unsigned long (*get_cr3)(struct kvm_vcpu *vcpu); |
e4e517b4 | 332 | u64 (*get_pdptr)(struct kvm_vcpu *vcpu, int index); |
78b2c54a XG |
333 | int (*page_fault)(struct kvm_vcpu *vcpu, gva_t gva, u32 err, |
334 | bool prefault); | |
6389ee94 AK |
335 | void (*inject_page_fault)(struct kvm_vcpu *vcpu, |
336 | struct x86_exception *fault); | |
1871c602 | 337 | gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t gva, u32 access, |
ab9ae313 | 338 | struct x86_exception *exception); |
54987b7a PB |
339 | gpa_t (*translate_gpa)(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access, |
340 | struct x86_exception *exception); | |
e8bc217a | 341 | int (*sync_page)(struct kvm_vcpu *vcpu, |
a4a8e6f7 | 342 | struct kvm_mmu_page *sp); |
a7052897 | 343 | void (*invlpg)(struct kvm_vcpu *vcpu, gva_t gva); |
0f53b5b1 | 344 | void (*update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, |
7c562522 | 345 | u64 *spte, const void *pte); |
d657a98e | 346 | hpa_t root_hpa; |
a770f6f2 | 347 | union kvm_mmu_page_role base_role; |
ae1e2d10 PB |
348 | u8 root_level; |
349 | u8 shadow_root_level; | |
350 | u8 ept_ad; | |
c5a78f2b | 351 | bool direct_map; |
d657a98e | 352 | |
97d64b78 AK |
353 | /* |
354 | * Bitmap; bit set = permission fault | |
355 | * Byte index: page fault error code [4:1] | |
356 | * Bit index: pte permissions in ACC_* format | |
357 | */ | |
358 | u8 permissions[16]; | |
359 | ||
2d344105 HH |
360 | /* |
361 | * The pkru_mask indicates if protection key checks are needed. It | |
362 | * consists of 16 domains indexed by page fault error code bits [4:1], | |
363 | * with PFEC.RSVD replaced by ACC_USER_MASK from the page tables. | |
364 | * Each domain has 2 bits which are ANDed with AD and WD from PKRU. | |
365 | */ | |
366 | u32 pkru_mask; | |
367 | ||
d657a98e | 368 | u64 *pae_root; |
81407ca5 | 369 | u64 *lm_root; |
c258b62b XG |
370 | |
371 | /* | |
372 | * check zero bits on shadow page table entries, these | |
373 | * bits include not only hardware reserved bits but also | |
374 | * the bits spte never used. | |
375 | */ | |
376 | struct rsvd_bits_validate shadow_zero_check; | |
377 | ||
a0a64f50 | 378 | struct rsvd_bits_validate guest_rsvd_check; |
ff03a073 | 379 | |
6bb69c9b PB |
380 | /* Can have large pages at levels 2..last_nonleaf_level-1. */ |
381 | u8 last_nonleaf_level; | |
6fd01b71 | 382 | |
2d48a985 JR |
383 | bool nx; |
384 | ||
ff03a073 | 385 | u64 pdptrs[4]; /* pae */ |
d657a98e ZX |
386 | }; |
387 | ||
f5132b01 GN |
388 | enum pmc_type { |
389 | KVM_PMC_GP = 0, | |
390 | KVM_PMC_FIXED, | |
391 | }; | |
392 | ||
393 | struct kvm_pmc { | |
394 | enum pmc_type type; | |
395 | u8 idx; | |
396 | u64 counter; | |
397 | u64 eventsel; | |
398 | struct perf_event *perf_event; | |
399 | struct kvm_vcpu *vcpu; | |
400 | }; | |
401 | ||
402 | struct kvm_pmu { | |
403 | unsigned nr_arch_gp_counters; | |
404 | unsigned nr_arch_fixed_counters; | |
405 | unsigned available_event_types; | |
406 | u64 fixed_ctr_ctrl; | |
407 | u64 global_ctrl; | |
408 | u64 global_status; | |
409 | u64 global_ovf_ctrl; | |
410 | u64 counter_bitmask[2]; | |
411 | u64 global_ctrl_mask; | |
103af0a9 | 412 | u64 reserved_bits; |
f5132b01 | 413 | u8 version; |
15c7ad51 RR |
414 | struct kvm_pmc gp_counters[INTEL_PMC_MAX_GENERIC]; |
415 | struct kvm_pmc fixed_counters[INTEL_PMC_MAX_FIXED]; | |
f5132b01 GN |
416 | struct irq_work irq_work; |
417 | u64 reprogram_pmi; | |
418 | }; | |
419 | ||
25462f7f WH |
420 | struct kvm_pmu_ops; |
421 | ||
360b948d PB |
422 | enum { |
423 | KVM_DEBUGREG_BP_ENABLED = 1, | |
c77fb5fe | 424 | KVM_DEBUGREG_WONT_EXIT = 2, |
ae561ede | 425 | KVM_DEBUGREG_RELOAD = 4, |
360b948d PB |
426 | }; |
427 | ||
86fd5270 XG |
428 | struct kvm_mtrr_range { |
429 | u64 base; | |
430 | u64 mask; | |
19efffa2 | 431 | struct list_head node; |
86fd5270 XG |
432 | }; |
433 | ||
70109e7d | 434 | struct kvm_mtrr { |
86fd5270 | 435 | struct kvm_mtrr_range var_ranges[KVM_NR_VAR_MTRR]; |
70109e7d | 436 | mtrr_type fixed_ranges[KVM_NR_FIXED_MTRR_REGION]; |
10fac2dc | 437 | u64 deftype; |
19efffa2 XG |
438 | |
439 | struct list_head head; | |
70109e7d XG |
440 | }; |
441 | ||
1f4b34f8 AS |
442 | /* Hyper-V SynIC timer */ |
443 | struct kvm_vcpu_hv_stimer { | |
444 | struct hrtimer timer; | |
445 | int index; | |
446 | u64 config; | |
447 | u64 count; | |
448 | u64 exp_time; | |
449 | struct hv_message msg; | |
450 | bool msg_pending; | |
451 | }; | |
452 | ||
5c919412 AS |
453 | /* Hyper-V synthetic interrupt controller (SynIC)*/ |
454 | struct kvm_vcpu_hv_synic { | |
455 | u64 version; | |
456 | u64 control; | |
457 | u64 msg_page; | |
458 | u64 evt_page; | |
459 | atomic64_t sint[HV_SYNIC_SINT_COUNT]; | |
460 | atomic_t sint_to_gsi[HV_SYNIC_SINT_COUNT]; | |
461 | DECLARE_BITMAP(auto_eoi_bitmap, 256); | |
462 | DECLARE_BITMAP(vec_bitmap, 256); | |
463 | bool active; | |
464 | }; | |
465 | ||
e83d5887 AS |
466 | /* Hyper-V per vcpu emulation context */ |
467 | struct kvm_vcpu_hv { | |
468 | u64 hv_vapic; | |
9eec50b8 | 469 | s64 runtime_offset; |
5c919412 | 470 | struct kvm_vcpu_hv_synic synic; |
db397571 | 471 | struct kvm_hyperv_exit exit; |
1f4b34f8 AS |
472 | struct kvm_vcpu_hv_stimer stimer[HV_SYNIC_STIMER_COUNT]; |
473 | DECLARE_BITMAP(stimer_pending_bitmap, HV_SYNIC_STIMER_COUNT); | |
e83d5887 AS |
474 | }; |
475 | ||
ad312c7c | 476 | struct kvm_vcpu_arch { |
5fdbf976 MT |
477 | /* |
478 | * rip and regs accesses must go through | |
479 | * kvm_{register,rip}_{read,write} functions. | |
480 | */ | |
481 | unsigned long regs[NR_VCPU_REGS]; | |
482 | u32 regs_avail; | |
483 | u32 regs_dirty; | |
34c16eec ZX |
484 | |
485 | unsigned long cr0; | |
e8467fda | 486 | unsigned long cr0_guest_owned_bits; |
34c16eec ZX |
487 | unsigned long cr2; |
488 | unsigned long cr3; | |
489 | unsigned long cr4; | |
fc78f519 | 490 | unsigned long cr4_guest_owned_bits; |
34c16eec | 491 | unsigned long cr8; |
1371d904 | 492 | u32 hflags; |
f6801dff | 493 | u64 efer; |
34c16eec ZX |
494 | u64 apic_base; |
495 | struct kvm_lapic *apic; /* kernel irqchip context */ | |
d62caabb | 496 | bool apicv_active; |
6308630b | 497 | DECLARE_BITMAP(ioapic_handled_vectors, 256); |
41383771 | 498 | unsigned long apic_attention; |
e1035715 | 499 | int32_t apic_arb_prio; |
34c16eec | 500 | int mp_state; |
34c16eec | 501 | u64 ia32_misc_enable_msr; |
64d60670 | 502 | u64 smbase; |
b209749f | 503 | bool tpr_access_reporting; |
20300099 | 504 | u64 ia32_xss; |
34c16eec | 505 | |
14dfe855 JR |
506 | /* |
507 | * Paging state of the vcpu | |
508 | * | |
509 | * If the vcpu runs in guest mode with two level paging this still saves | |
510 | * the paging mode of the l1 guest. This context is always used to | |
511 | * handle faults. | |
512 | */ | |
34c16eec | 513 | struct kvm_mmu mmu; |
8df25a32 | 514 | |
6539e738 JR |
515 | /* |
516 | * Paging state of an L2 guest (used for nested npt) | |
517 | * | |
518 | * This context will save all necessary information to walk page tables | |
519 | * of the an L2 guest. This context is only initialized for page table | |
520 | * walking and not for faulting since we never handle l2 page faults on | |
521 | * the host. | |
522 | */ | |
523 | struct kvm_mmu nested_mmu; | |
524 | ||
14dfe855 JR |
525 | /* |
526 | * Pointer to the mmu context currently used for | |
527 | * gva_to_gpa translations. | |
528 | */ | |
529 | struct kvm_mmu *walk_mmu; | |
530 | ||
53c07b18 | 531 | struct kvm_mmu_memory_cache mmu_pte_list_desc_cache; |
34c16eec ZX |
532 | struct kvm_mmu_memory_cache mmu_page_cache; |
533 | struct kvm_mmu_memory_cache mmu_page_header_cache; | |
534 | ||
98918833 | 535 | struct fpu guest_fpu; |
2acf923e | 536 | u64 xcr0; |
d7876f1b | 537 | u64 guest_supported_xcr0; |
4344ee98 | 538 | u32 guest_xstate_size; |
34c16eec | 539 | |
34c16eec ZX |
540 | struct kvm_pio_request pio; |
541 | void *pio_data; | |
542 | ||
66fd3f7f GN |
543 | u8 event_exit_inst_len; |
544 | ||
298101da AK |
545 | struct kvm_queued_exception { |
546 | bool pending; | |
547 | bool has_error_code; | |
ce7ddec4 | 548 | bool reinject; |
298101da AK |
549 | u8 nr; |
550 | u32 error_code; | |
551 | } exception; | |
552 | ||
937a7eae AK |
553 | struct kvm_queued_interrupt { |
554 | bool pending; | |
66fd3f7f | 555 | bool soft; |
937a7eae AK |
556 | u8 nr; |
557 | } interrupt; | |
558 | ||
34c16eec ZX |
559 | int halt_request; /* real mode on Intel only */ |
560 | ||
561 | int cpuid_nent; | |
07716717 | 562 | struct kvm_cpuid_entry2 cpuid_entries[KVM_MAX_CPUID_ENTRIES]; |
5a4f55cd EK |
563 | |
564 | int maxphyaddr; | |
565 | ||
34c16eec ZX |
566 | /* emulate context */ |
567 | ||
568 | struct x86_emulate_ctxt emulate_ctxt; | |
7ae441ea GN |
569 | bool emulate_regs_need_sync_to_vcpu; |
570 | bool emulate_regs_need_sync_from_vcpu; | |
716d51ab | 571 | int (*complete_userspace_io)(struct kvm_vcpu *vcpu); |
18068523 GOC |
572 | |
573 | gpa_t time; | |
50d0a0f9 | 574 | struct pvclock_vcpu_time_info hv_clock; |
e48672fa | 575 | unsigned int hw_tsc_khz; |
0b79459b AH |
576 | struct gfn_to_hva_cache pv_time; |
577 | bool pv_time_enabled; | |
51d59c6b MT |
578 | /* set guest stopped flag in pvclock flags field */ |
579 | bool pvclock_set_guest_stopped_request; | |
c9aaa895 GC |
580 | |
581 | struct { | |
582 | u64 msr_val; | |
583 | u64 last_steal; | |
c9aaa895 GC |
584 | struct gfn_to_hva_cache stime; |
585 | struct kvm_steal_time steal; | |
586 | } st; | |
587 | ||
a545ab6a | 588 | u64 tsc_offset; |
1d5f066e | 589 | u64 last_guest_tsc; |
6f526ec5 | 590 | u64 last_host_tsc; |
0dd6a6ed | 591 | u64 tsc_offset_adjustment; |
e26101b1 ZA |
592 | u64 this_tsc_nsec; |
593 | u64 this_tsc_write; | |
0d3da0d2 | 594 | u64 this_tsc_generation; |
c285545f | 595 | bool tsc_catchup; |
cc578287 ZA |
596 | bool tsc_always_catchup; |
597 | s8 virtual_tsc_shift; | |
598 | u32 virtual_tsc_mult; | |
599 | u32 virtual_tsc_khz; | |
ba904635 | 600 | s64 ia32_tsc_adjust_msr; |
ad721883 | 601 | u64 tsc_scaling_ratio; |
3419ffc8 | 602 | |
7460fb4a AK |
603 | atomic_t nmi_queued; /* unprocessed asynchronous NMIs */ |
604 | unsigned nmi_pending; /* NMI queued after currently running handler */ | |
605 | bool nmi_injected; /* Trying to inject an NMI this entry */ | |
f077825a | 606 | bool smi_pending; /* SMI queued after currently running handler */ |
9ba075a6 | 607 | |
70109e7d | 608 | struct kvm_mtrr mtrr_state; |
7cb060a9 | 609 | u64 pat; |
42dbaa5a | 610 | |
360b948d | 611 | unsigned switch_db_regs; |
42dbaa5a JK |
612 | unsigned long db[KVM_NR_DB_REGS]; |
613 | unsigned long dr6; | |
614 | unsigned long dr7; | |
615 | unsigned long eff_db[KVM_NR_DB_REGS]; | |
c8639010 | 616 | unsigned long guest_debug_dr7; |
db2336a8 KH |
617 | u64 msr_platform_info; |
618 | u64 msr_misc_features_enables; | |
890ca9ae HY |
619 | |
620 | u64 mcg_cap; | |
621 | u64 mcg_status; | |
622 | u64 mcg_ctl; | |
c45dcc71 | 623 | u64 mcg_ext_ctl; |
890ca9ae | 624 | u64 *mce_banks; |
94fe45da | 625 | |
bebb106a XG |
626 | /* Cache MMIO info */ |
627 | u64 mmio_gva; | |
628 | unsigned access; | |
629 | gfn_t mmio_gfn; | |
56f17dd3 | 630 | u64 mmio_gen; |
bebb106a | 631 | |
f5132b01 GN |
632 | struct kvm_pmu pmu; |
633 | ||
94fe45da | 634 | /* used for guest single stepping over the given code position */ |
94fe45da | 635 | unsigned long singlestep_rip; |
f92653ee | 636 | |
e83d5887 | 637 | struct kvm_vcpu_hv hyperv; |
f5f48ee1 SY |
638 | |
639 | cpumask_var_t wbinvd_dirty_mask; | |
af585b92 | 640 | |
1cb3f3ae XG |
641 | unsigned long last_retry_eip; |
642 | unsigned long last_retry_addr; | |
643 | ||
af585b92 GN |
644 | struct { |
645 | bool halted; | |
646 | gfn_t gfns[roundup_pow_of_two(ASYNC_PF_PER_VCPU)]; | |
344d9588 GN |
647 | struct gfn_to_hva_cache data; |
648 | u64 msr_val; | |
7c90705b | 649 | u32 id; |
6adba527 | 650 | bool send_user_only; |
af585b92 | 651 | } apf; |
2b036c6b BO |
652 | |
653 | /* OSVW MSRs (AMD only) */ | |
654 | struct { | |
655 | u64 length; | |
656 | u64 status; | |
657 | } osvw; | |
ae7a2a3f MT |
658 | |
659 | struct { | |
660 | u64 msr_val; | |
661 | struct gfn_to_hva_cache data; | |
662 | } pv_eoi; | |
93c05d3e XG |
663 | |
664 | /* | |
665 | * Indicate whether the access faults on its page table in guest | |
666 | * which is set when fix page fault and used to detect unhandeable | |
667 | * instruction. | |
668 | */ | |
669 | bool write_fault_to_shadow_pgtable; | |
25d92081 YZ |
670 | |
671 | /* set at EPT violation at this point */ | |
672 | unsigned long exit_qualification; | |
6aef266c SV |
673 | |
674 | /* pv related host specific info */ | |
675 | struct { | |
676 | bool pv_unhalted; | |
677 | } pv; | |
7543a635 SR |
678 | |
679 | int pending_ioapic_eoi; | |
1c1a9ce9 | 680 | int pending_external_vector; |
0f89b207 TL |
681 | |
682 | /* GPA available (AMD only) */ | |
683 | bool gpa_available; | |
34c16eec ZX |
684 | }; |
685 | ||
db3fe4eb | 686 | struct kvm_lpage_info { |
92f94f1e | 687 | int disallow_lpage; |
db3fe4eb TY |
688 | }; |
689 | ||
690 | struct kvm_arch_memory_slot { | |
018aabb5 | 691 | struct kvm_rmap_head *rmap[KVM_NR_PAGE_SIZES]; |
db3fe4eb | 692 | struct kvm_lpage_info *lpage_info[KVM_NR_PAGE_SIZES - 1]; |
21ebbeda | 693 | unsigned short *gfn_track[KVM_PAGE_TRACK_MAX]; |
db3fe4eb TY |
694 | }; |
695 | ||
3548a259 RK |
696 | /* |
697 | * We use as the mode the number of bits allocated in the LDR for the | |
698 | * logical processor ID. It happens that these are all powers of two. | |
699 | * This makes it is very easy to detect cases where the APICs are | |
700 | * configured for multiple modes; in that case, we cannot use the map and | |
701 | * hence cannot use kvm_irq_delivery_to_apic_fast either. | |
702 | */ | |
703 | #define KVM_APIC_MODE_XAPIC_CLUSTER 4 | |
704 | #define KVM_APIC_MODE_XAPIC_FLAT 8 | |
705 | #define KVM_APIC_MODE_X2APIC 16 | |
706 | ||
1e08ec4a GN |
707 | struct kvm_apic_map { |
708 | struct rcu_head rcu; | |
3548a259 | 709 | u8 mode; |
0ca52e7b | 710 | u32 max_apic_id; |
e45115b6 RK |
711 | union { |
712 | struct kvm_lapic *xapic_flat_map[8]; | |
713 | struct kvm_lapic *xapic_cluster_map[16][4]; | |
714 | }; | |
0ca52e7b | 715 | struct kvm_lapic *phys_map[]; |
1e08ec4a GN |
716 | }; |
717 | ||
e83d5887 AS |
718 | /* Hyper-V emulation context */ |
719 | struct kvm_hv { | |
3f5ad8be | 720 | struct mutex hv_lock; |
e83d5887 AS |
721 | u64 hv_guest_os_id; |
722 | u64 hv_hypercall; | |
723 | u64 hv_tsc_page; | |
e7d9513b AS |
724 | |
725 | /* Hyper-v based guest crash (NT kernel bugcheck) parameters */ | |
726 | u64 hv_crash_param[HV_X64_MSR_CRASH_PARAMS]; | |
727 | u64 hv_crash_ctl; | |
095cf55d PB |
728 | |
729 | HV_REFERENCE_TSC_PAGE tsc_ref; | |
e83d5887 AS |
730 | }; |
731 | ||
49776faf RK |
732 | enum kvm_irqchip_mode { |
733 | KVM_IRQCHIP_NONE, | |
734 | KVM_IRQCHIP_KERNEL, /* created with KVM_CREATE_IRQCHIP */ | |
735 | KVM_IRQCHIP_SPLIT, /* created with KVM_CAP_SPLIT_IRQCHIP */ | |
736 | }; | |
737 | ||
fef9cce0 | 738 | struct kvm_arch { |
49d5ca26 | 739 | unsigned int n_used_mmu_pages; |
f05e70ac | 740 | unsigned int n_requested_mmu_pages; |
39de71ec | 741 | unsigned int n_max_mmu_pages; |
332b207d | 742 | unsigned int indirect_shadow_pages; |
5304b8d3 | 743 | unsigned long mmu_valid_gen; |
f05e70ac ZX |
744 | struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES]; |
745 | /* | |
746 | * Hash table of struct kvm_mmu_page. | |
747 | */ | |
748 | struct list_head active_mmu_pages; | |
365c8868 | 749 | struct list_head zapped_obsolete_pages; |
13d268ca | 750 | struct kvm_page_track_notifier_node mmu_sp_tracker; |
0eb05bf2 | 751 | struct kvm_page_track_notifier_head track_notifier_head; |
365c8868 | 752 | |
4d5c5d0f | 753 | struct list_head assigned_dev_head; |
19de40a8 | 754 | struct iommu_domain *iommu_domain; |
d96eb2c6 | 755 | bool iommu_noncoherent; |
e0f0bbc5 AW |
756 | #define __KVM_HAVE_ARCH_NONCOHERENT_DMA |
757 | atomic_t noncoherent_dma_count; | |
5544eb9b PB |
758 | #define __KVM_HAVE_ARCH_ASSIGNED_DEVICE |
759 | atomic_t assigned_device_count; | |
d7deeeb0 ZX |
760 | struct kvm_pic *vpic; |
761 | struct kvm_ioapic *vioapic; | |
7837699f | 762 | struct kvm_pit *vpit; |
42720138 | 763 | atomic_t vapics_in_nmi_mode; |
1e08ec4a GN |
764 | struct mutex apic_map_lock; |
765 | struct kvm_apic_map *apic_map; | |
bfc6d222 | 766 | |
bfc6d222 | 767 | unsigned int tss_addr; |
c24ae0dc | 768 | bool apic_access_page_done; |
18068523 GOC |
769 | |
770 | gpa_t wall_clock; | |
b7ebfb05 | 771 | |
b7ebfb05 | 772 | bool ept_identity_pagetable_done; |
b927a3ce | 773 | gpa_t ept_identity_map_addr; |
5550af4d SY |
774 | |
775 | unsigned long irq_sources_bitmap; | |
afbcf7ab | 776 | s64 kvmclock_offset; |
038f8c11 | 777 | raw_spinlock_t tsc_write_lock; |
f38e098f | 778 | u64 last_tsc_nsec; |
f38e098f | 779 | u64 last_tsc_write; |
5d3cb0f6 | 780 | u32 last_tsc_khz; |
e26101b1 ZA |
781 | u64 cur_tsc_nsec; |
782 | u64 cur_tsc_write; | |
783 | u64 cur_tsc_offset; | |
0d3da0d2 | 784 | u64 cur_tsc_generation; |
b48aa97e | 785 | int nr_vcpus_matched_tsc; |
ffde22ac | 786 | |
d828199e MT |
787 | spinlock_t pvclock_gtod_sync_lock; |
788 | bool use_master_clock; | |
789 | u64 master_kernel_ns; | |
a5a1d1c2 | 790 | u64 master_cycle_now; |
7e44e449 | 791 | struct delayed_work kvmclock_update_work; |
332967a3 | 792 | struct delayed_work kvmclock_sync_work; |
d828199e | 793 | |
ffde22ac | 794 | struct kvm_xen_hvm_config xen_hvm_config; |
55cd8e5a | 795 | |
6ef768fa PB |
796 | /* reads protected by irq_srcu, writes by irq_lock */ |
797 | struct hlist_head mask_notifier_list; | |
798 | ||
e83d5887 | 799 | struct kvm_hv hyperv; |
b034cf01 XG |
800 | |
801 | #ifdef CONFIG_KVM_MMU_AUDIT | |
802 | int audit_point; | |
803 | #endif | |
54750f2c MT |
804 | |
805 | bool boot_vcpu_runs_old_kvmclock; | |
d71ba788 | 806 | u32 bsp_vcpu_id; |
90de4a18 NA |
807 | |
808 | u64 disabled_quirks; | |
49df6397 | 809 | |
49776faf | 810 | enum kvm_irqchip_mode irqchip_mode; |
b053b2ae | 811 | u8 nr_reserved_ioapic_pins; |
52004014 FW |
812 | |
813 | bool disabled_lapic_found; | |
44a95dae SS |
814 | |
815 | /* Struct members for AVIC */ | |
5ea11f2b | 816 | u32 avic_vm_id; |
18f40c53 | 817 | u32 ldr_mode; |
44a95dae SS |
818 | struct page *avic_logical_id_table_page; |
819 | struct page *avic_physical_id_table_page; | |
5881f737 | 820 | struct hlist_node hnode; |
37131313 RK |
821 | |
822 | bool x2apic_format; | |
c519265f | 823 | bool x2apic_broadcast_quirk_disabled; |
d69fb81f ZX |
824 | }; |
825 | ||
0711456c | 826 | struct kvm_vm_stat { |
8a7e75d4 SJS |
827 | ulong mmu_shadow_zapped; |
828 | ulong mmu_pte_write; | |
829 | ulong mmu_pte_updated; | |
830 | ulong mmu_pde_zapped; | |
831 | ulong mmu_flooded; | |
832 | ulong mmu_recycled; | |
833 | ulong mmu_cache_miss; | |
834 | ulong mmu_unsync; | |
835 | ulong remote_tlb_flush; | |
836 | ulong lpages; | |
f3414bc7 | 837 | ulong max_mmu_page_hash_collisions; |
0711456c ZX |
838 | }; |
839 | ||
77b4c255 | 840 | struct kvm_vcpu_stat { |
8a7e75d4 SJS |
841 | u64 pf_fixed; |
842 | u64 pf_guest; | |
843 | u64 tlb_flush; | |
844 | u64 invlpg; | |
845 | ||
846 | u64 exits; | |
847 | u64 io_exits; | |
848 | u64 mmio_exits; | |
849 | u64 signal_exits; | |
850 | u64 irq_window_exits; | |
851 | u64 nmi_window_exits; | |
852 | u64 halt_exits; | |
853 | u64 halt_successful_poll; | |
854 | u64 halt_attempted_poll; | |
855 | u64 halt_poll_invalid; | |
856 | u64 halt_wakeup; | |
857 | u64 request_irq_exits; | |
858 | u64 irq_exits; | |
859 | u64 host_state_reload; | |
860 | u64 efer_reload; | |
861 | u64 fpu_reload; | |
862 | u64 insn_emulation; | |
863 | u64 insn_emulation_fail; | |
864 | u64 hypercalls; | |
865 | u64 irq_injections; | |
866 | u64 nmi_injections; | |
0f1e261e | 867 | u64 req_event; |
77b4c255 | 868 | }; |
ad312c7c | 869 | |
8a76d7f2 JR |
870 | struct x86_instruction_info; |
871 | ||
8fe8ab46 WA |
872 | struct msr_data { |
873 | bool host_initiated; | |
874 | u32 index; | |
875 | u64 data; | |
876 | }; | |
877 | ||
cb5281a5 PB |
878 | struct kvm_lapic_irq { |
879 | u32 vector; | |
b7cb2231 PB |
880 | u16 delivery_mode; |
881 | u16 dest_mode; | |
882 | bool level; | |
883 | u16 trig_mode; | |
cb5281a5 PB |
884 | u32 shorthand; |
885 | u32 dest_id; | |
93bbf0b8 | 886 | bool msi_redir_hint; |
cb5281a5 PB |
887 | }; |
888 | ||
ea4a5ff8 ZX |
889 | struct kvm_x86_ops { |
890 | int (*cpu_has_kvm_support)(void); /* __init */ | |
891 | int (*disabled_by_bios)(void); /* __init */ | |
13a34e06 RK |
892 | int (*hardware_enable)(void); |
893 | void (*hardware_disable)(void); | |
ea4a5ff8 ZX |
894 | void (*check_processor_compatibility)(void *rtn); |
895 | int (*hardware_setup)(void); /* __init */ | |
896 | void (*hardware_unsetup)(void); /* __exit */ | |
774ead3a | 897 | bool (*cpu_has_accelerated_tpr)(void); |
6d396b55 | 898 | bool (*cpu_has_high_real_mode_segbase)(void); |
0e851880 | 899 | void (*cpuid_update)(struct kvm_vcpu *vcpu); |
ea4a5ff8 | 900 | |
03543133 SS |
901 | int (*vm_init)(struct kvm *kvm); |
902 | void (*vm_destroy)(struct kvm *kvm); | |
903 | ||
ea4a5ff8 ZX |
904 | /* Create, but do not attach this VCPU */ |
905 | struct kvm_vcpu *(*vcpu_create)(struct kvm *kvm, unsigned id); | |
906 | void (*vcpu_free)(struct kvm_vcpu *vcpu); | |
d28bc9dd | 907 | void (*vcpu_reset)(struct kvm_vcpu *vcpu, bool init_event); |
ea4a5ff8 ZX |
908 | |
909 | void (*prepare_guest_switch)(struct kvm_vcpu *vcpu); | |
910 | void (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu); | |
911 | void (*vcpu_put)(struct kvm_vcpu *vcpu); | |
ea4a5ff8 | 912 | |
a96036b8 | 913 | void (*update_bp_intercept)(struct kvm_vcpu *vcpu); |
609e36d3 | 914 | int (*get_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr); |
8fe8ab46 | 915 | int (*set_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr); |
ea4a5ff8 ZX |
916 | u64 (*get_segment_base)(struct kvm_vcpu *vcpu, int seg); |
917 | void (*get_segment)(struct kvm_vcpu *vcpu, | |
918 | struct kvm_segment *var, int seg); | |
2e4d2653 | 919 | int (*get_cpl)(struct kvm_vcpu *vcpu); |
ea4a5ff8 ZX |
920 | void (*set_segment)(struct kvm_vcpu *vcpu, |
921 | struct kvm_segment *var, int seg); | |
922 | void (*get_cs_db_l_bits)(struct kvm_vcpu *vcpu, int *db, int *l); | |
e8467fda | 923 | void (*decache_cr0_guest_bits)(struct kvm_vcpu *vcpu); |
aff48baa | 924 | void (*decache_cr3)(struct kvm_vcpu *vcpu); |
ea4a5ff8 ZX |
925 | void (*decache_cr4_guest_bits)(struct kvm_vcpu *vcpu); |
926 | void (*set_cr0)(struct kvm_vcpu *vcpu, unsigned long cr0); | |
927 | void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3); | |
5e1746d6 | 928 | int (*set_cr4)(struct kvm_vcpu *vcpu, unsigned long cr4); |
ea4a5ff8 | 929 | void (*set_efer)(struct kvm_vcpu *vcpu, u64 efer); |
89a27f4d GN |
930 | void (*get_idt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt); |
931 | void (*set_idt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt); | |
932 | void (*get_gdt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt); | |
933 | void (*set_gdt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt); | |
73aaf249 JK |
934 | u64 (*get_dr6)(struct kvm_vcpu *vcpu); |
935 | void (*set_dr6)(struct kvm_vcpu *vcpu, unsigned long value); | |
c77fb5fe | 936 | void (*sync_dirty_debug_regs)(struct kvm_vcpu *vcpu); |
020df079 | 937 | void (*set_dr7)(struct kvm_vcpu *vcpu, unsigned long value); |
5fdbf976 | 938 | void (*cache_reg)(struct kvm_vcpu *vcpu, enum kvm_reg reg); |
ea4a5ff8 ZX |
939 | unsigned long (*get_rflags)(struct kvm_vcpu *vcpu); |
940 | void (*set_rflags)(struct kvm_vcpu *vcpu, unsigned long rflags); | |
be94f6b7 | 941 | u32 (*get_pkru)(struct kvm_vcpu *vcpu); |
ea4a5ff8 ZX |
942 | |
943 | void (*tlb_flush)(struct kvm_vcpu *vcpu); | |
ea4a5ff8 | 944 | |
851ba692 AK |
945 | void (*run)(struct kvm_vcpu *vcpu); |
946 | int (*handle_exit)(struct kvm_vcpu *vcpu); | |
ea4a5ff8 | 947 | void (*skip_emulated_instruction)(struct kvm_vcpu *vcpu); |
2809f5d2 | 948 | void (*set_interrupt_shadow)(struct kvm_vcpu *vcpu, int mask); |
37ccdcbe | 949 | u32 (*get_interrupt_shadow)(struct kvm_vcpu *vcpu); |
ea4a5ff8 ZX |
950 | void (*patch_hypercall)(struct kvm_vcpu *vcpu, |
951 | unsigned char *hypercall_addr); | |
66fd3f7f | 952 | void (*set_irq)(struct kvm_vcpu *vcpu); |
95ba8273 | 953 | void (*set_nmi)(struct kvm_vcpu *vcpu); |
298101da | 954 | void (*queue_exception)(struct kvm_vcpu *vcpu, unsigned nr, |
ce7ddec4 JR |
955 | bool has_error_code, u32 error_code, |
956 | bool reinject); | |
b463a6f7 | 957 | void (*cancel_injection)(struct kvm_vcpu *vcpu); |
78646121 | 958 | int (*interrupt_allowed)(struct kvm_vcpu *vcpu); |
95ba8273 | 959 | int (*nmi_allowed)(struct kvm_vcpu *vcpu); |
3cfc3092 JK |
960 | bool (*get_nmi_mask)(struct kvm_vcpu *vcpu); |
961 | void (*set_nmi_mask)(struct kvm_vcpu *vcpu, bool masked); | |
c9a7953f JK |
962 | void (*enable_nmi_window)(struct kvm_vcpu *vcpu); |
963 | void (*enable_irq_window)(struct kvm_vcpu *vcpu); | |
95ba8273 | 964 | void (*update_cr8_intercept)(struct kvm_vcpu *vcpu, int tpr, int irr); |
d62caabb AS |
965 | bool (*get_enable_apicv)(void); |
966 | void (*refresh_apicv_exec_ctrl)(struct kvm_vcpu *vcpu); | |
c7c9c56c | 967 | void (*hwapic_irr_update)(struct kvm_vcpu *vcpu, int max_irr); |
67c9dddc | 968 | void (*hwapic_isr_update)(struct kvm_vcpu *vcpu, int isr); |
6308630b | 969 | void (*load_eoi_exitmap)(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap); |
8d14695f | 970 | void (*set_virtual_x2apic_mode)(struct kvm_vcpu *vcpu, bool set); |
4256f43f | 971 | void (*set_apic_access_page_addr)(struct kvm_vcpu *vcpu, hpa_t hpa); |
a20ed54d | 972 | void (*deliver_posted_interrupt)(struct kvm_vcpu *vcpu, int vector); |
76dfafd5 | 973 | int (*sync_pir_to_irr)(struct kvm_vcpu *vcpu); |
ea4a5ff8 | 974 | int (*set_tss_addr)(struct kvm *kvm, unsigned int addr); |
67253af5 | 975 | int (*get_tdp_level)(void); |
4b12f0de | 976 | u64 (*get_mt_mask)(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio); |
17cc3935 | 977 | int (*get_lpage_level)(void); |
4e47c7a6 | 978 | bool (*rdtscp_supported)(void); |
ad756a16 | 979 | bool (*invpcid_supported)(void); |
344f414f | 980 | |
1c97f0a0 JR |
981 | void (*set_tdp_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3); |
982 | ||
d4330ef2 JR |
983 | void (*set_supported_cpuid)(u32 func, struct kvm_cpuid_entry2 *entry); |
984 | ||
f5f48ee1 SY |
985 | bool (*has_wbinvd_exit)(void); |
986 | ||
99e3e30a ZA |
987 | void (*write_tsc_offset)(struct kvm_vcpu *vcpu, u64 offset); |
988 | ||
586f9607 | 989 | void (*get_exit_info)(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2); |
8a76d7f2 JR |
990 | |
991 | int (*check_intercept)(struct kvm_vcpu *vcpu, | |
992 | struct x86_instruction_info *info, | |
993 | enum x86_intercept_stage stage); | |
a547c6db | 994 | void (*handle_external_intr)(struct kvm_vcpu *vcpu); |
da8999d3 | 995 | bool (*mpx_supported)(void); |
55412b2e | 996 | bool (*xsaves_supported)(void); |
b6b8a145 JK |
997 | |
998 | int (*check_nested_events)(struct kvm_vcpu *vcpu, bool external_intr); | |
ae97a3b8 RK |
999 | |
1000 | void (*sched_in)(struct kvm_vcpu *kvm, int cpu); | |
88178fd4 KH |
1001 | |
1002 | /* | |
1003 | * Arch-specific dirty logging hooks. These hooks are only supposed to | |
1004 | * be valid if the specific arch has hardware-accelerated dirty logging | |
1005 | * mechanism. Currently only for PML on VMX. | |
1006 | * | |
1007 | * - slot_enable_log_dirty: | |
1008 | * called when enabling log dirty mode for the slot. | |
1009 | * - slot_disable_log_dirty: | |
1010 | * called when disabling log dirty mode for the slot. | |
1011 | * also called when slot is created with log dirty disabled. | |
1012 | * - flush_log_dirty: | |
1013 | * called before reporting dirty_bitmap to userspace. | |
1014 | * - enable_log_dirty_pt_masked: | |
1015 | * called when reenabling log dirty for the GFNs in the mask after | |
1016 | * corresponding bits are cleared in slot->dirty_bitmap. | |
1017 | */ | |
1018 | void (*slot_enable_log_dirty)(struct kvm *kvm, | |
1019 | struct kvm_memory_slot *slot); | |
1020 | void (*slot_disable_log_dirty)(struct kvm *kvm, | |
1021 | struct kvm_memory_slot *slot); | |
1022 | void (*flush_log_dirty)(struct kvm *kvm); | |
1023 | void (*enable_log_dirty_pt_masked)(struct kvm *kvm, | |
1024 | struct kvm_memory_slot *slot, | |
1025 | gfn_t offset, unsigned long mask); | |
bab4165e BD |
1026 | int (*write_log_dirty)(struct kvm_vcpu *vcpu); |
1027 | ||
25462f7f WH |
1028 | /* pmu operations of sub-arch */ |
1029 | const struct kvm_pmu_ops *pmu_ops; | |
efc64404 | 1030 | |
bf9f6ac8 FW |
1031 | /* |
1032 | * Architecture specific hooks for vCPU blocking due to | |
1033 | * HLT instruction. | |
1034 | * Returns for .pre_block(): | |
1035 | * - 0 means continue to block the vCPU. | |
1036 | * - 1 means we cannot block the vCPU since some event | |
1037 | * happens during this period, such as, 'ON' bit in | |
1038 | * posted-interrupts descriptor is set. | |
1039 | */ | |
1040 | int (*pre_block)(struct kvm_vcpu *vcpu); | |
1041 | void (*post_block)(struct kvm_vcpu *vcpu); | |
d1ed092f SS |
1042 | |
1043 | void (*vcpu_blocking)(struct kvm_vcpu *vcpu); | |
1044 | void (*vcpu_unblocking)(struct kvm_vcpu *vcpu); | |
1045 | ||
efc64404 FW |
1046 | int (*update_pi_irte)(struct kvm *kvm, unsigned int host_irq, |
1047 | uint32_t guest_irq, bool set); | |
be8ca170 | 1048 | void (*apicv_post_state_restore)(struct kvm_vcpu *vcpu); |
ce7a058a YJ |
1049 | |
1050 | int (*set_hv_timer)(struct kvm_vcpu *vcpu, u64 guest_deadline_tsc); | |
1051 | void (*cancel_hv_timer)(struct kvm_vcpu *vcpu); | |
c45dcc71 AR |
1052 | |
1053 | void (*setup_mce)(struct kvm_vcpu *vcpu); | |
ea4a5ff8 ZX |
1054 | }; |
1055 | ||
af585b92 | 1056 | struct kvm_arch_async_pf { |
7c90705b | 1057 | u32 token; |
af585b92 | 1058 | gfn_t gfn; |
fb67e14f | 1059 | unsigned long cr3; |
c4806acd | 1060 | bool direct_map; |
af585b92 GN |
1061 | }; |
1062 | ||
97896d04 ZX |
1063 | extern struct kvm_x86_ops *kvm_x86_ops; |
1064 | ||
54f1585a ZX |
1065 | int kvm_mmu_module_init(void); |
1066 | void kvm_mmu_module_exit(void); | |
1067 | ||
1068 | void kvm_mmu_destroy(struct kvm_vcpu *vcpu); | |
1069 | int kvm_mmu_create(struct kvm_vcpu *vcpu); | |
8a3c1a33 | 1070 | void kvm_mmu_setup(struct kvm_vcpu *vcpu); |
13d268ca XG |
1071 | void kvm_mmu_init_vm(struct kvm *kvm); |
1072 | void kvm_mmu_uninit_vm(struct kvm *kvm); | |
7b52345e | 1073 | void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask, |
f160c7b7 JS |
1074 | u64 dirty_mask, u64 nx_mask, u64 x_mask, u64 p_mask, |
1075 | u64 acc_track_mask); | |
54f1585a | 1076 | |
8a3c1a33 | 1077 | void kvm_mmu_reset_context(struct kvm_vcpu *vcpu); |
1c91cad4 KH |
1078 | void kvm_mmu_slot_remove_write_access(struct kvm *kvm, |
1079 | struct kvm_memory_slot *memslot); | |
3ea3b7fa | 1080 | void kvm_mmu_zap_collapsible_sptes(struct kvm *kvm, |
f36f3f28 | 1081 | const struct kvm_memory_slot *memslot); |
f4b4b180 KH |
1082 | void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm, |
1083 | struct kvm_memory_slot *memslot); | |
1084 | void kvm_mmu_slot_largepage_remove_write_access(struct kvm *kvm, | |
1085 | struct kvm_memory_slot *memslot); | |
1086 | void kvm_mmu_slot_set_dirty(struct kvm *kvm, | |
1087 | struct kvm_memory_slot *memslot); | |
1088 | void kvm_mmu_clear_dirty_pt_masked(struct kvm *kvm, | |
1089 | struct kvm_memory_slot *slot, | |
1090 | gfn_t gfn_offset, unsigned long mask); | |
54f1585a | 1091 | void kvm_mmu_zap_all(struct kvm *kvm); |
54bf36aa | 1092 | void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, struct kvm_memslots *slots); |
3ad82a7e | 1093 | unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm); |
54f1585a ZX |
1094 | void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages); |
1095 | ||
ff03a073 | 1096 | int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3); |
9ed38ffa | 1097 | bool pdptrs_changed(struct kvm_vcpu *vcpu); |
cc4b6871 | 1098 | |
3200f405 | 1099 | int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa, |
9f811285 | 1100 | const void *val, int bytes); |
2f333bcb | 1101 | |
6ef768fa PB |
1102 | struct kvm_irq_mask_notifier { |
1103 | void (*func)(struct kvm_irq_mask_notifier *kimn, bool masked); | |
1104 | int irq; | |
1105 | struct hlist_node link; | |
1106 | }; | |
1107 | ||
1108 | void kvm_register_irq_mask_notifier(struct kvm *kvm, int irq, | |
1109 | struct kvm_irq_mask_notifier *kimn); | |
1110 | void kvm_unregister_irq_mask_notifier(struct kvm *kvm, int irq, | |
1111 | struct kvm_irq_mask_notifier *kimn); | |
1112 | void kvm_fire_mask_notifiers(struct kvm *kvm, unsigned irqchip, unsigned pin, | |
1113 | bool mask); | |
1114 | ||
2f333bcb | 1115 | extern bool tdp_enabled; |
9f811285 | 1116 | |
a3e06bbe LJ |
1117 | u64 vcpu_tsc_khz(struct kvm_vcpu *vcpu); |
1118 | ||
92a1f12d JR |
1119 | /* control of guest tsc rate supported? */ |
1120 | extern bool kvm_has_tsc_control; | |
92a1f12d JR |
1121 | /* maximum supported tsc_khz for guests */ |
1122 | extern u32 kvm_max_guest_tsc_khz; | |
bc9b961b HZ |
1123 | /* number of bits of the fractional part of the TSC scaling ratio */ |
1124 | extern u8 kvm_tsc_scaling_ratio_frac_bits; | |
1125 | /* maximum allowed value of TSC scaling ratio */ | |
1126 | extern u64 kvm_max_tsc_scaling_ratio; | |
64672c95 YJ |
1127 | /* 1ull << kvm_tsc_scaling_ratio_frac_bits */ |
1128 | extern u64 kvm_default_tsc_scaling_ratio; | |
92a1f12d | 1129 | |
c45dcc71 | 1130 | extern u64 kvm_mce_cap_supported; |
92a1f12d | 1131 | |
54f1585a | 1132 | enum emulation_result { |
ac0a48c3 PB |
1133 | EMULATE_DONE, /* no further processing */ |
1134 | EMULATE_USER_EXIT, /* kvm_run ready for userspace exit */ | |
54f1585a ZX |
1135 | EMULATE_FAIL, /* can't emulate this instruction */ |
1136 | }; | |
1137 | ||
571008da SY |
1138 | #define EMULTYPE_NO_DECODE (1 << 0) |
1139 | #define EMULTYPE_TRAP_UD (1 << 1) | |
ba8afb6b | 1140 | #define EMULTYPE_SKIP (1 << 2) |
1cb3f3ae | 1141 | #define EMULTYPE_RETRY (1 << 3) |
991eebf9 | 1142 | #define EMULTYPE_NO_REEXECUTE (1 << 4) |
dc25e89e AP |
1143 | int x86_emulate_instruction(struct kvm_vcpu *vcpu, unsigned long cr2, |
1144 | int emulation_type, void *insn, int insn_len); | |
51d8b661 AP |
1145 | |
1146 | static inline int emulate_instruction(struct kvm_vcpu *vcpu, | |
1147 | int emulation_type) | |
1148 | { | |
dc25e89e | 1149 | return x86_emulate_instruction(vcpu, 0, emulation_type, NULL, 0); |
51d8b661 AP |
1150 | } |
1151 | ||
f2b4b7dd | 1152 | void kvm_enable_efer_bits(u64); |
384bb783 | 1153 | bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer); |
609e36d3 | 1154 | int kvm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr); |
8fe8ab46 | 1155 | int kvm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr); |
54f1585a ZX |
1156 | |
1157 | struct x86_emulate_ctxt; | |
1158 | ||
cf8f70bf | 1159 | int kvm_fast_pio_out(struct kvm_vcpu *vcpu, int size, unsigned short port); |
8370c3d0 | 1160 | int kvm_fast_pio_in(struct kvm_vcpu *vcpu, int size, unsigned short port); |
6a908b62 | 1161 | int kvm_emulate_cpuid(struct kvm_vcpu *vcpu); |
54f1585a | 1162 | int kvm_emulate_halt(struct kvm_vcpu *vcpu); |
5cb56059 | 1163 | int kvm_vcpu_halt(struct kvm_vcpu *vcpu); |
f5f48ee1 | 1164 | int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu); |
54f1585a | 1165 | |
3e6e0aab | 1166 | void kvm_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg); |
c697518a | 1167 | int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector, int seg); |
2b4a273b | 1168 | void kvm_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector); |
3e6e0aab | 1169 | |
7f3d35fd KW |
1170 | int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int idt_index, |
1171 | int reason, bool has_error_code, u32 error_code); | |
37817f29 | 1172 | |
49a9b07e | 1173 | int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0); |
2390218b | 1174 | int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3); |
a83b29c6 | 1175 | int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4); |
eea1cff9 | 1176 | int kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8); |
020df079 GN |
1177 | int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val); |
1178 | int kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val); | |
2d3ad1f4 AK |
1179 | unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu); |
1180 | void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw); | |
54f1585a | 1181 | void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l); |
2acf923e | 1182 | int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr); |
54f1585a | 1183 | |
609e36d3 | 1184 | int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr); |
8fe8ab46 | 1185 | int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr); |
54f1585a | 1186 | |
91586a3b JK |
1187 | unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu); |
1188 | void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags); | |
022cd0e8 | 1189 | bool kvm_rdpmc(struct kvm_vcpu *vcpu); |
91586a3b | 1190 | |
298101da AK |
1191 | void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr); |
1192 | void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code); | |
ce7ddec4 JR |
1193 | void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr); |
1194 | void kvm_requeue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code); | |
6389ee94 | 1195 | void kvm_inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault); |
ec92fe44 JR |
1196 | int kvm_read_guest_page_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, |
1197 | gfn_t gfn, void *data, int offset, int len, | |
1198 | u32 access); | |
0a79b009 | 1199 | bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl); |
16f8a6f9 | 1200 | bool kvm_require_dr(struct kvm_vcpu *vcpu, int dr); |
298101da | 1201 | |
1a577b72 MT |
1202 | static inline int __kvm_irq_line_state(unsigned long *irq_state, |
1203 | int irq_source_id, int level) | |
1204 | { | |
1205 | /* Logical OR for level trig interrupt */ | |
1206 | if (level) | |
1207 | __set_bit(irq_source_id, irq_state); | |
1208 | else | |
1209 | __clear_bit(irq_source_id, irq_state); | |
1210 | ||
1211 | return !!(*irq_state); | |
1212 | } | |
1213 | ||
1214 | int kvm_pic_set_irq(struct kvm_pic *pic, int irq, int irq_source_id, int level); | |
1215 | void kvm_pic_clear_all(struct kvm_pic *pic, int irq_source_id); | |
3de42dc0 | 1216 | |
3419ffc8 SY |
1217 | void kvm_inject_nmi(struct kvm_vcpu *vcpu); |
1218 | ||
1cb3f3ae | 1219 | int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn); |
54f1585a ZX |
1220 | int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva); |
1221 | void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu); | |
1222 | int kvm_mmu_load(struct kvm_vcpu *vcpu); | |
1223 | void kvm_mmu_unload(struct kvm_vcpu *vcpu); | |
0ba73cda | 1224 | void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu); |
54987b7a PB |
1225 | gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access, |
1226 | struct x86_exception *exception); | |
ab9ae313 AK |
1227 | gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva, |
1228 | struct x86_exception *exception); | |
1229 | gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva, | |
1230 | struct x86_exception *exception); | |
1231 | gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva, | |
1232 | struct x86_exception *exception); | |
1233 | gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva, | |
1234 | struct x86_exception *exception); | |
54f1585a | 1235 | |
d62caabb AS |
1236 | void kvm_vcpu_deactivate_apicv(struct kvm_vcpu *vcpu); |
1237 | ||
54f1585a ZX |
1238 | int kvm_emulate_hypercall(struct kvm_vcpu *vcpu); |
1239 | ||
14727754 | 1240 | int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t gva, u64 error_code, |
dc25e89e | 1241 | void *insn, int insn_len); |
a7052897 | 1242 | void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva); |
d8d173da | 1243 | void kvm_mmu_new_cr3(struct kvm_vcpu *vcpu); |
34c16eec | 1244 | |
18552672 | 1245 | void kvm_enable_tdp(void); |
5f4cb662 | 1246 | void kvm_disable_tdp(void); |
18552672 | 1247 | |
54987b7a PB |
1248 | static inline gpa_t translate_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access, |
1249 | struct x86_exception *exception) | |
e459e322 XG |
1250 | { |
1251 | return gpa; | |
1252 | } | |
1253 | ||
ec6d273d ZX |
1254 | static inline struct kvm_mmu_page *page_header(hpa_t shadow_page) |
1255 | { | |
1256 | struct page *page = pfn_to_page(shadow_page >> PAGE_SHIFT); | |
1257 | ||
1258 | return (struct kvm_mmu_page *)page_private(page); | |
1259 | } | |
1260 | ||
d6e88aec | 1261 | static inline u16 kvm_read_ldt(void) |
ec6d273d ZX |
1262 | { |
1263 | u16 ldt; | |
1264 | asm("sldt %0" : "=g"(ldt)); | |
1265 | return ldt; | |
1266 | } | |
1267 | ||
d6e88aec | 1268 | static inline void kvm_load_ldt(u16 sel) |
ec6d273d ZX |
1269 | { |
1270 | asm("lldt %0" : : "rm"(sel)); | |
1271 | } | |
ec6d273d | 1272 | |
ec6d273d ZX |
1273 | #ifdef CONFIG_X86_64 |
1274 | static inline unsigned long read_msr(unsigned long msr) | |
1275 | { | |
1276 | u64 value; | |
1277 | ||
1278 | rdmsrl(msr, value); | |
1279 | return value; | |
1280 | } | |
1281 | #endif | |
1282 | ||
ec6d273d ZX |
1283 | static inline u32 get_rdx_init_val(void) |
1284 | { | |
1285 | return 0x600; /* P6 family */ | |
1286 | } | |
1287 | ||
c1a5d4f9 AK |
1288 | static inline void kvm_inject_gp(struct kvm_vcpu *vcpu, u32 error_code) |
1289 | { | |
1290 | kvm_queue_exception_e(vcpu, GP_VECTOR, error_code); | |
1291 | } | |
1292 | ||
854e8bb1 NA |
1293 | static inline u64 get_canonical(u64 la) |
1294 | { | |
1295 | return ((int64_t)la << 16) >> 16; | |
1296 | } | |
1297 | ||
1298 | static inline bool is_noncanonical_address(u64 la) | |
1299 | { | |
1300 | #ifdef CONFIG_X86_64 | |
1301 | return get_canonical(la) != la; | |
1302 | #else | |
1303 | return false; | |
1304 | #endif | |
1305 | } | |
1306 | ||
ec6d273d ZX |
1307 | #define TSS_IOPB_BASE_OFFSET 0x66 |
1308 | #define TSS_BASE_SIZE 0x68 | |
1309 | #define TSS_IOPB_SIZE (65536 / 8) | |
1310 | #define TSS_REDIRECTION_SIZE (256 / 8) | |
7d76b4d3 JP |
1311 | #define RMODE_TSS_SIZE \ |
1312 | (TSS_BASE_SIZE + TSS_REDIRECTION_SIZE + TSS_IOPB_SIZE + 1) | |
53e0aa7b | 1313 | |
37817f29 IE |
1314 | enum { |
1315 | TASK_SWITCH_CALL = 0, | |
1316 | TASK_SWITCH_IRET = 1, | |
1317 | TASK_SWITCH_JMP = 2, | |
1318 | TASK_SWITCH_GATE = 3, | |
1319 | }; | |
1320 | ||
1371d904 | 1321 | #define HF_GIF_MASK (1 << 0) |
3d6368ef AG |
1322 | #define HF_HIF_MASK (1 << 1) |
1323 | #define HF_VINTR_MASK (1 << 2) | |
95ba8273 | 1324 | #define HF_NMI_MASK (1 << 3) |
44c11430 | 1325 | #define HF_IRET_MASK (1 << 4) |
ec9e60b2 | 1326 | #define HF_GUEST_MASK (1 << 5) /* VCPU is in guest-mode */ |
f077825a PB |
1327 | #define HF_SMM_MASK (1 << 6) |
1328 | #define HF_SMM_INSIDE_NMI_MASK (1 << 7) | |
1371d904 | 1329 | |
699023e2 PB |
1330 | #define __KVM_VCPU_MULTIPLE_ADDRESS_SPACE |
1331 | #define KVM_ADDRESS_SPACE_NUM 2 | |
1332 | ||
1333 | #define kvm_arch_vcpu_memslots_id(vcpu) ((vcpu)->arch.hflags & HF_SMM_MASK ? 1 : 0) | |
1334 | #define kvm_memslots_for_spte_role(kvm, role) __kvm_memslots(kvm, (role).smm) | |
1371d904 | 1335 | |
4ecac3fd AK |
1336 | /* |
1337 | * Hardware virtualization extension instructions may fault if a | |
1338 | * reboot turns off virtualization while processes are running. | |
1339 | * Trap the fault and ignore the instruction if that happens. | |
1340 | */ | |
b7c4145b | 1341 | asmlinkage void kvm_spurious_fault(void); |
4ecac3fd | 1342 | |
5e520e62 | 1343 | #define ____kvm_handle_fault_on_reboot(insn, cleanup_insn) \ |
4ecac3fd | 1344 | "666: " insn "\n\t" \ |
b7c4145b | 1345 | "668: \n\t" \ |
18b13e54 | 1346 | ".pushsection .fixup, \"ax\" \n" \ |
4ecac3fd | 1347 | "667: \n\t" \ |
5e520e62 | 1348 | cleanup_insn "\n\t" \ |
b7c4145b AK |
1349 | "cmpb $0, kvm_rebooting \n\t" \ |
1350 | "jne 668b \n\t" \ | |
8ceed347 | 1351 | __ASM_SIZE(push) " $666b \n\t" \ |
b7c4145b | 1352 | "call kvm_spurious_fault \n\t" \ |
4ecac3fd | 1353 | ".popsection \n\t" \ |
3ee89722 | 1354 | _ASM_EXTABLE(666b, 667b) |
4ecac3fd | 1355 | |
5e520e62 AK |
1356 | #define __kvm_handle_fault_on_reboot(insn) \ |
1357 | ____kvm_handle_fault_on_reboot(insn, "") | |
1358 | ||
e930bffe AA |
1359 | #define KVM_ARCH_WANT_MMU_NOTIFIER |
1360 | int kvm_unmap_hva(struct kvm *kvm, unsigned long hva); | |
b3ae2096 | 1361 | int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end); |
57128468 | 1362 | int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end); |
8ee53820 | 1363 | int kvm_test_age_hva(struct kvm *kvm, unsigned long hva); |
3da0dd43 | 1364 | void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte); |
c7c9c56c | 1365 | int kvm_cpu_has_injectable_intr(struct kvm_vcpu *v); |
a1b37100 GN |
1366 | int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu); |
1367 | int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu); | |
0b71785d | 1368 | int kvm_cpu_get_interrupt(struct kvm_vcpu *v); |
d28bc9dd | 1369 | void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event); |
4256f43f | 1370 | void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu); |
fe71557a TC |
1371 | void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm, |
1372 | unsigned long address); | |
e930bffe | 1373 | |
18863bdd | 1374 | void kvm_define_shared_msr(unsigned index, u32 msr); |
8b3c3104 | 1375 | int kvm_set_shared_msr(unsigned index, u64 val, u64 mask); |
18863bdd | 1376 | |
35181e86 | 1377 | u64 kvm_scale_tsc(struct kvm_vcpu *vcpu, u64 tsc); |
4ba76538 | 1378 | u64 kvm_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc); |
35181e86 | 1379 | |
82b32774 | 1380 | unsigned long kvm_get_linear_rip(struct kvm_vcpu *vcpu); |
f92653ee JK |
1381 | bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip); |
1382 | ||
2860c4b1 PB |
1383 | void kvm_make_mclock_inprogress_request(struct kvm *kvm); |
1384 | void kvm_make_scan_ioapic_request(struct kvm *kvm); | |
1385 | ||
af585b92 GN |
1386 | void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu, |
1387 | struct kvm_async_pf *work); | |
1388 | void kvm_arch_async_page_present(struct kvm_vcpu *vcpu, | |
1389 | struct kvm_async_pf *work); | |
56028d08 GN |
1390 | void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, |
1391 | struct kvm_async_pf *work); | |
7c90705b | 1392 | bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu); |
af585b92 GN |
1393 | extern bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn); |
1394 | ||
6affcbed KH |
1395 | int kvm_skip_emulated_instruction(struct kvm_vcpu *vcpu); |
1396 | int kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err); | |
db8fcefa | 1397 | |
f5132b01 GN |
1398 | int kvm_is_in_guest(void); |
1399 | ||
1d8007bd PB |
1400 | int __x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size); |
1401 | int x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size); | |
d71ba788 PB |
1402 | bool kvm_vcpu_is_reset_bsp(struct kvm_vcpu *vcpu); |
1403 | bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu); | |
f5132b01 | 1404 | |
8feb4a04 FW |
1405 | bool kvm_intr_is_single_vcpu(struct kvm *kvm, struct kvm_lapic_irq *irq, |
1406 | struct kvm_vcpu **dest_vcpu); | |
1407 | ||
37131313 | 1408 | void kvm_set_msi_irq(struct kvm *kvm, struct kvm_kernel_irq_routing_entry *e, |
d84f1e07 | 1409 | struct kvm_lapic_irq *irq); |
197a4f4b | 1410 | |
d1ed092f SS |
1411 | static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) |
1412 | { | |
1413 | if (kvm_x86_ops->vcpu_blocking) | |
1414 | kvm_x86_ops->vcpu_blocking(vcpu); | |
1415 | } | |
1416 | ||
1417 | static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) | |
1418 | { | |
1419 | if (kvm_x86_ops->vcpu_unblocking) | |
1420 | kvm_x86_ops->vcpu_unblocking(vcpu); | |
1421 | } | |
1422 | ||
3491caf2 | 1423 | static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {} |
3217f7c2 | 1424 | |
7d669f50 SS |
1425 | static inline int kvm_cpu_get_apicid(int mps_cpu) |
1426 | { | |
1427 | #ifdef CONFIG_X86_LOCAL_APIC | |
1428 | return __default_cpu_present_to_apicid(mps_cpu); | |
1429 | #else | |
1430 | WARN_ON_ONCE(1); | |
1431 | return BAD_APICID; | |
1432 | #endif | |
1433 | } | |
1434 | ||
1965aae3 | 1435 | #endif /* _ASM_X86_KVM_HOST_H */ |