2 * Kernel-based Virtual Machine driver for Linux
4 * This header defines architecture specific interfaces, x86 version
6 * This work is licensed under the terms of the GNU GPL, version 2. See
7 * the COPYING file in the top-level directory.
16 #include <linux/types.h>
19 #include <linux/kvm.h>
20 #include <linux/kvm_para.h>
24 #define CR3_PAE_RESERVED_BITS ((X86_CR3_PWT | X86_CR3_PCD) - 1)
25 #define CR3_NONPAE_RESERVED_BITS ((PAGE_SIZE-1) & ~(X86_CR3_PWT | X86_CR3_PCD))
26 #define CR3_L_MODE_RESERVED_BITS (CR3_NONPAE_RESERVED_BITS|0xFFFFFF0000000000ULL)
28 #define KVM_GUEST_CR0_MASK \
29 (X86_CR0_PG | X86_CR0_PE | X86_CR0_WP | X86_CR0_NE \
30 | X86_CR0_NW | X86_CR0_CD)
31 #define KVM_VM_CR0_ALWAYS_ON \
32 (X86_CR0_PG | X86_CR0_PE | X86_CR0_WP | X86_CR0_NE | X86_CR0_TS \
34 #define KVM_GUEST_CR4_MASK \
35 (X86_CR4_VME | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE | X86_CR4_VMXE)
36 #define KVM_PMODE_VM_CR4_ALWAYS_ON (X86_CR4_PAE | X86_CR4_VMXE)
37 #define KVM_RMODE_VM_CR4_ALWAYS_ON (X86_CR4_VME | X86_CR4_PAE | X86_CR4_VMXE)
39 #define INVALID_PAGE (~(hpa_t)0)
40 #define UNMAPPED_GVA (~(gpa_t)0)
52 #define SELECTOR_TI_MASK (1 << 2)
53 #define SELECTOR_RPL_MASK 0x03
57 extern spinlock_t kvm_lock
;
58 extern struct list_head vm_list
;
93 #include "x86_emulate.h"
95 struct kvm_vcpu_arch
{
97 int interrupt_window_open
;
98 unsigned long irq_summary
; /* bit vector: 1 per word in irq_pending */
99 DECLARE_BITMAP(irq_pending
, KVM_NR_INTERRUPTS
);
100 unsigned long regs
[NR_VCPU_REGS
]; /* for rsp: vcpu_load_rsp_rip() */
101 unsigned long rip
; /* needs vcpu_load_rsp_rip() */
108 u64 pdptrs
[4]; /* pae */
111 struct kvm_lapic
*apic
; /* kernel irqchip context */
112 #define VCPU_MP_STATE_RUNNABLE 0
113 #define VCPU_MP_STATE_UNINITIALIZED 1
114 #define VCPU_MP_STATE_INIT_RECEIVED 2
115 #define VCPU_MP_STATE_SIPI_RECEIVED 3
116 #define VCPU_MP_STATE_HALTED 4
119 u64 ia32_misc_enable_msr
;
123 struct kvm_mmu_memory_cache mmu_pte_chain_cache
;
124 struct kvm_mmu_memory_cache mmu_rmap_desc_cache
;
125 struct kvm_mmu_memory_cache mmu_page_cache
;
126 struct kvm_mmu_memory_cache mmu_page_header_cache
;
128 gfn_t last_pt_write_gfn
;
129 int last_pt_write_count
;
130 u64
*last_pte_updated
;
132 struct i387_fxsave_struct host_fx_image
;
133 struct i387_fxsave_struct guest_fx_image
;
135 gva_t mmio_fault_cr2
;
136 struct kvm_pio_request pio
;
139 struct kvm_queued_exception
{
149 struct kvm_save_segment
{
154 } tr
, es
, ds
, fs
, gs
;
156 int halt_request
; /* real mode on Intel only */
159 struct kvm_cpuid_entry2 cpuid_entries
[KVM_MAX_CPUID_ENTRIES
];
160 /* emulate context */
162 struct x86_emulate_ctxt emulate_ctxt
;
168 struct kvm_vcpu_arch arch
;
171 struct descriptor_table
{
174 } __attribute__((packed
));
177 int (*cpu_has_kvm_support
)(void); /* __init */
178 int (*disabled_by_bios
)(void); /* __init */
179 void (*hardware_enable
)(void *dummy
); /* __init */
180 void (*hardware_disable
)(void *dummy
);
181 void (*check_processor_compatibility
)(void *rtn
);
182 int (*hardware_setup
)(void); /* __init */
183 void (*hardware_unsetup
)(void); /* __exit */
185 /* Create, but do not attach this VCPU */
186 struct kvm_vcpu
*(*vcpu_create
)(struct kvm
*kvm
, unsigned id
);
187 void (*vcpu_free
)(struct kvm_vcpu
*vcpu
);
188 int (*vcpu_reset
)(struct kvm_vcpu
*vcpu
);
190 void (*prepare_guest_switch
)(struct kvm_vcpu
*vcpu
);
191 void (*vcpu_load
)(struct kvm_vcpu
*vcpu
, int cpu
);
192 void (*vcpu_put
)(struct kvm_vcpu
*vcpu
);
193 void (*vcpu_decache
)(struct kvm_vcpu
*vcpu
);
195 int (*set_guest_debug
)(struct kvm_vcpu
*vcpu
,
196 struct kvm_debug_guest
*dbg
);
197 void (*guest_debug_pre
)(struct kvm_vcpu
*vcpu
);
198 int (*get_msr
)(struct kvm_vcpu
*vcpu
, u32 msr_index
, u64
*pdata
);
199 int (*set_msr
)(struct kvm_vcpu
*vcpu
, u32 msr_index
, u64 data
);
200 u64 (*get_segment_base
)(struct kvm_vcpu
*vcpu
, int seg
);
201 void (*get_segment
)(struct kvm_vcpu
*vcpu
,
202 struct kvm_segment
*var
, int seg
);
203 void (*set_segment
)(struct kvm_vcpu
*vcpu
,
204 struct kvm_segment
*var
, int seg
);
205 void (*get_cs_db_l_bits
)(struct kvm_vcpu
*vcpu
, int *db
, int *l
);
206 void (*decache_cr4_guest_bits
)(struct kvm_vcpu
*vcpu
);
207 void (*set_cr0
)(struct kvm_vcpu
*vcpu
, unsigned long cr0
);
208 void (*set_cr3
)(struct kvm_vcpu
*vcpu
, unsigned long cr3
);
209 void (*set_cr4
)(struct kvm_vcpu
*vcpu
, unsigned long cr4
);
210 void (*set_efer
)(struct kvm_vcpu
*vcpu
, u64 efer
);
211 void (*get_idt
)(struct kvm_vcpu
*vcpu
, struct descriptor_table
*dt
);
212 void (*set_idt
)(struct kvm_vcpu
*vcpu
, struct descriptor_table
*dt
);
213 void (*get_gdt
)(struct kvm_vcpu
*vcpu
, struct descriptor_table
*dt
);
214 void (*set_gdt
)(struct kvm_vcpu
*vcpu
, struct descriptor_table
*dt
);
215 unsigned long (*get_dr
)(struct kvm_vcpu
*vcpu
, int dr
);
216 void (*set_dr
)(struct kvm_vcpu
*vcpu
, int dr
, unsigned long value
,
218 void (*cache_regs
)(struct kvm_vcpu
*vcpu
);
219 void (*decache_regs
)(struct kvm_vcpu
*vcpu
);
220 unsigned long (*get_rflags
)(struct kvm_vcpu
*vcpu
);
221 void (*set_rflags
)(struct kvm_vcpu
*vcpu
, unsigned long rflags
);
223 void (*tlb_flush
)(struct kvm_vcpu
*vcpu
);
225 void (*run
)(struct kvm_vcpu
*vcpu
, struct kvm_run
*run
);
226 int (*handle_exit
)(struct kvm_run
*run
, struct kvm_vcpu
*vcpu
);
227 void (*skip_emulated_instruction
)(struct kvm_vcpu
*vcpu
);
228 void (*patch_hypercall
)(struct kvm_vcpu
*vcpu
,
229 unsigned char *hypercall_addr
);
230 int (*get_irq
)(struct kvm_vcpu
*vcpu
);
231 void (*set_irq
)(struct kvm_vcpu
*vcpu
, int vec
);
232 void (*queue_exception
)(struct kvm_vcpu
*vcpu
, unsigned nr
,
233 bool has_error_code
, u32 error_code
);
234 bool (*exception_injected
)(struct kvm_vcpu
*vcpu
);
235 void (*inject_pending_irq
)(struct kvm_vcpu
*vcpu
);
236 void (*inject_pending_vectors
)(struct kvm_vcpu
*vcpu
,
237 struct kvm_run
*run
);
239 int (*set_tss_addr
)(struct kvm
*kvm
, unsigned int addr
);
242 extern struct kvm_x86_ops
*kvm_x86_ops
;
244 int kvm_mmu_module_init(void);
245 void kvm_mmu_module_exit(void);
247 void kvm_mmu_destroy(struct kvm_vcpu
*vcpu
);
248 int kvm_mmu_create(struct kvm_vcpu
*vcpu
);
249 int kvm_mmu_setup(struct kvm_vcpu
*vcpu
);
250 void kvm_mmu_set_nonpresent_ptes(u64 trap_pte
, u64 notrap_pte
);
252 int kvm_mmu_reset_context(struct kvm_vcpu
*vcpu
);
253 void kvm_mmu_slot_remove_write_access(struct kvm
*kvm
, int slot
);
254 void kvm_mmu_zap_all(struct kvm
*kvm
);
255 unsigned int kvm_mmu_calculate_mmu_pages(struct kvm
*kvm
);
256 void kvm_mmu_change_mmu_pages(struct kvm
*kvm
, unsigned int kvm_nr_mmu_pages
);
258 enum emulation_result
{
259 EMULATE_DONE
, /* no further processing */
260 EMULATE_DO_MMIO
, /* kvm_run filled with mmio request */
261 EMULATE_FAIL
, /* can't emulate this instruction */
264 int emulate_instruction(struct kvm_vcpu
*vcpu
, struct kvm_run
*run
,
265 unsigned long cr2
, u16 error_code
, int no_decode
);
266 void kvm_report_emulation_failure(struct kvm_vcpu
*cvpu
, const char *context
);
267 void realmode_lgdt(struct kvm_vcpu
*vcpu
, u16 size
, unsigned long address
);
268 void realmode_lidt(struct kvm_vcpu
*vcpu
, u16 size
, unsigned long address
);
269 void realmode_lmsw(struct kvm_vcpu
*vcpu
, unsigned long msw
,
270 unsigned long *rflags
);
272 unsigned long realmode_get_cr(struct kvm_vcpu
*vcpu
, int cr
);
273 void realmode_set_cr(struct kvm_vcpu
*vcpu
, int cr
, unsigned long value
,
274 unsigned long *rflags
);
275 int kvm_get_msr(struct kvm_vcpu
*vcpu
, u32 msr_index
, u64
*data
);
276 int kvm_set_msr(struct kvm_vcpu
*vcpu
, u32 msr_index
, u64 data
);
278 struct x86_emulate_ctxt
;
280 int kvm_emulate_pio(struct kvm_vcpu
*vcpu
, struct kvm_run
*run
, int in
,
281 int size
, unsigned port
);
282 int kvm_emulate_pio_string(struct kvm_vcpu
*vcpu
, struct kvm_run
*run
, int in
,
283 int size
, unsigned long count
, int down
,
284 gva_t address
, int rep
, unsigned port
);
285 void kvm_emulate_cpuid(struct kvm_vcpu
*vcpu
);
286 int kvm_emulate_halt(struct kvm_vcpu
*vcpu
);
287 int emulate_invlpg(struct kvm_vcpu
*vcpu
, gva_t address
);
288 int emulate_clts(struct kvm_vcpu
*vcpu
);
289 int emulator_get_dr(struct x86_emulate_ctxt
*ctxt
, int dr
,
290 unsigned long *dest
);
291 int emulator_set_dr(struct x86_emulate_ctxt
*ctxt
, int dr
,
292 unsigned long value
);
294 void set_cr0(struct kvm_vcpu
*vcpu
, unsigned long cr0
);
295 void set_cr3(struct kvm_vcpu
*vcpu
, unsigned long cr0
);
296 void set_cr4(struct kvm_vcpu
*vcpu
, unsigned long cr0
);
297 void set_cr8(struct kvm_vcpu
*vcpu
, unsigned long cr0
);
298 unsigned long get_cr8(struct kvm_vcpu
*vcpu
);
299 void lmsw(struct kvm_vcpu
*vcpu
, unsigned long msw
);
300 void kvm_get_cs_db_l_bits(struct kvm_vcpu
*vcpu
, int *db
, int *l
);
302 int kvm_get_msr_common(struct kvm_vcpu
*vcpu
, u32 msr
, u64
*pdata
);
303 int kvm_set_msr_common(struct kvm_vcpu
*vcpu
, u32 msr
, u64 data
);
305 void kvm_queue_exception(struct kvm_vcpu
*vcpu
, unsigned nr
);
306 void kvm_queue_exception_e(struct kvm_vcpu
*vcpu
, unsigned nr
, u32 error_code
);
307 void kvm_inject_page_fault(struct kvm_vcpu
*vcpu
, unsigned long cr2
,
310 void fx_init(struct kvm_vcpu
*vcpu
);
312 int emulator_read_std(unsigned long addr
,
315 struct kvm_vcpu
*vcpu
);
316 int emulator_write_emulated(unsigned long addr
,
319 struct kvm_vcpu
*vcpu
);
321 unsigned long segment_base(u16 selector
);
323 void kvm_mmu_flush_tlb(struct kvm_vcpu
*vcpu
);
324 void kvm_mmu_pte_write(struct kvm_vcpu
*vcpu
, gpa_t gpa
,
325 const u8
*new, int bytes
);
326 int kvm_mmu_unprotect_page_virt(struct kvm_vcpu
*vcpu
, gva_t gva
);
327 void __kvm_mmu_free_some_pages(struct kvm_vcpu
*vcpu
);
328 int kvm_mmu_load(struct kvm_vcpu
*vcpu
);
329 void kvm_mmu_unload(struct kvm_vcpu
*vcpu
);
331 int kvm_emulate_hypercall(struct kvm_vcpu
*vcpu
);
333 int kvm_fix_hypercall(struct kvm_vcpu
*vcpu
);
335 int kvm_mmu_page_fault(struct kvm_vcpu
*vcpu
, gva_t gva
, u32 error_code
);
337 static inline void kvm_mmu_free_some_pages(struct kvm_vcpu
*vcpu
)
339 if (unlikely(vcpu
->kvm
->n_free_mmu_pages
< KVM_MIN_FREE_MMU_PAGES
))
340 __kvm_mmu_free_some_pages(vcpu
);
343 static inline int kvm_mmu_reload(struct kvm_vcpu
*vcpu
)
345 if (likely(vcpu
->arch
.mmu
.root_hpa
!= INVALID_PAGE
))
348 return kvm_mmu_load(vcpu
);
351 static inline int is_long_mode(struct kvm_vcpu
*vcpu
)
354 return vcpu
->arch
.shadow_efer
& EFER_LME
;
360 static inline int is_pae(struct kvm_vcpu
*vcpu
)
362 return vcpu
->arch
.cr4
& X86_CR4_PAE
;
365 static inline int is_pse(struct kvm_vcpu
*vcpu
)
367 return vcpu
->arch
.cr4
& X86_CR4_PSE
;
370 static inline int is_paging(struct kvm_vcpu
*vcpu
)
372 return vcpu
->arch
.cr0
& X86_CR0_PG
;
375 int load_pdptrs(struct kvm_vcpu
*vcpu
, unsigned long cr3
);
376 int complete_pio(struct kvm_vcpu
*vcpu
);
378 static inline struct kvm_mmu_page
*page_header(hpa_t shadow_page
)
380 struct page
*page
= pfn_to_page(shadow_page
>> PAGE_SHIFT
);
382 return (struct kvm_mmu_page
*)page_private(page
);
385 static inline u16
read_fs(void)
388 asm("mov %%fs, %0" : "=g"(seg
));
392 static inline u16
read_gs(void)
395 asm("mov %%gs, %0" : "=g"(seg
));
399 static inline u16
read_ldt(void)
402 asm("sldt %0" : "=g"(ldt
));
406 static inline void load_fs(u16 sel
)
408 asm("mov %0, %%fs" : : "rm"(sel
));
411 static inline void load_gs(u16 sel
)
413 asm("mov %0, %%gs" : : "rm"(sel
));
417 static inline void load_ldt(u16 sel
)
419 asm("lldt %0" : : "rm"(sel
));
423 static inline void get_idt(struct descriptor_table
*table
)
425 asm("sidt %0" : "=m"(*table
));
428 static inline void get_gdt(struct descriptor_table
*table
)
430 asm("sgdt %0" : "=m"(*table
));
433 static inline unsigned long read_tr_base(void)
436 asm("str %0" : "=g"(tr
));
437 return segment_base(tr
);
441 static inline unsigned long read_msr(unsigned long msr
)
450 static inline void fx_save(struct i387_fxsave_struct
*image
)
452 asm("fxsave (%0)":: "r" (image
));
455 static inline void fx_restore(struct i387_fxsave_struct
*image
)
457 asm("fxrstor (%0)":: "r" (image
));
460 static inline void fpu_init(void)
465 static inline u32
get_rdx_init_val(void)
467 return 0x600; /* P6 family */
470 static inline void kvm_inject_gp(struct kvm_vcpu
*vcpu
, u32 error_code
)
472 kvm_queue_exception_e(vcpu
, GP_VECTOR
, error_code
);
475 #define ASM_VMX_VMCLEAR_RAX ".byte 0x66, 0x0f, 0xc7, 0x30"
476 #define ASM_VMX_VMLAUNCH ".byte 0x0f, 0x01, 0xc2"
477 #define ASM_VMX_VMRESUME ".byte 0x0f, 0x01, 0xc3"
478 #define ASM_VMX_VMPTRLD_RAX ".byte 0x0f, 0xc7, 0x30"
479 #define ASM_VMX_VMREAD_RDX_RAX ".byte 0x0f, 0x78, 0xd0"
480 #define ASM_VMX_VMWRITE_RAX_RDX ".byte 0x0f, 0x79, 0xd0"
481 #define ASM_VMX_VMWRITE_RSP_RDX ".byte 0x0f, 0x79, 0xd4"
482 #define ASM_VMX_VMXOFF ".byte 0x0f, 0x01, 0xc4"
483 #define ASM_VMX_VMXON_RAX ".byte 0xf3, 0x0f, 0xc7, 0x30"
485 #define MSR_IA32_TIME_STAMP_COUNTER 0x010
487 #define TSS_IOPB_BASE_OFFSET 0x66
488 #define TSS_BASE_SIZE 0x68
489 #define TSS_IOPB_SIZE (65536 / 8)
490 #define TSS_REDIRECTION_SIZE (256 / 8)
491 #define RMODE_TSS_SIZE (TSS_BASE_SIZE + TSS_REDIRECTION_SIZE + TSS_IOPB_SIZE + 1)
493 static inline int kvm_arch_vcpu_runnable(struct kvm_vcpu
*vcpu
)
495 return vcpu
->arch
.mp_state
== VCPU_MP_STATE_RUNNABLE
496 || vcpu
->arch
.mp_state
== VCPU_MP_STATE_SIPI_RECEIVED
;