2 * Kernel-based Virtual Machine driver for Linux
4 * This header defines architecture specific interfaces, x86 version
6 * This work is licensed under the terms of the GNU GPL, version 2. See
7 * the COPYING file in the top-level directory.
16 #include <linux/types.h>
19 #include <linux/kvm.h>
20 #include <linux/kvm_para.h>
22 #define CR3_PAE_RESERVED_BITS ((X86_CR3_PWT | X86_CR3_PCD) - 1)
23 #define CR3_NONPAE_RESERVED_BITS ((PAGE_SIZE-1) & ~(X86_CR3_PWT | X86_CR3_PCD))
24 #define CR3_L_MODE_RESERVED_BITS (CR3_NONPAE_RESERVED_BITS|0xFFFFFF0000000000ULL)
26 #define KVM_GUEST_CR0_MASK \
27 (X86_CR0_PG | X86_CR0_PE | X86_CR0_WP | X86_CR0_NE \
28 | X86_CR0_NW | X86_CR0_CD)
29 #define KVM_VM_CR0_ALWAYS_ON \
30 (X86_CR0_PG | X86_CR0_PE | X86_CR0_WP | X86_CR0_NE | X86_CR0_TS \
32 #define KVM_GUEST_CR4_MASK \
33 (X86_CR4_VME | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE | X86_CR4_VMXE)
34 #define KVM_PMODE_VM_CR4_ALWAYS_ON (X86_CR4_PAE | X86_CR4_VMXE)
35 #define KVM_RMODE_VM_CR4_ALWAYS_ON (X86_CR4_VME | X86_CR4_PAE | X86_CR4_VMXE)
37 #define INVALID_PAGE (~(hpa_t)0)
38 #define UNMAPPED_GVA (~(gpa_t)0)
50 #define SELECTOR_TI_MASK (1 << 2)
51 #define SELECTOR_RPL_MASK 0x03
55 extern spinlock_t kvm_lock
;
56 extern struct list_head vm_list
;
91 #include "x86_emulate.h"
96 int interrupt_window_open
;
97 unsigned long irq_summary
; /* bit vector: 1 per word in irq_pending */
98 DECLARE_BITMAP(irq_pending
, KVM_NR_INTERRUPTS
);
99 unsigned long regs
[NR_VCPU_REGS
]; /* for rsp: vcpu_load_rsp_rip() */
100 unsigned long rip
; /* needs vcpu_load_rsp_rip() */
107 u64 pdptrs
[4]; /* pae */
110 struct kvm_lapic
*apic
; /* kernel irqchip context */
111 #define VCPU_MP_STATE_RUNNABLE 0
112 #define VCPU_MP_STATE_UNINITIALIZED 1
113 #define VCPU_MP_STATE_INIT_RECEIVED 2
114 #define VCPU_MP_STATE_SIPI_RECEIVED 3
115 #define VCPU_MP_STATE_HALTED 4
118 u64 ia32_misc_enable_msr
;
122 struct kvm_mmu_memory_cache mmu_pte_chain_cache
;
123 struct kvm_mmu_memory_cache mmu_rmap_desc_cache
;
124 struct kvm_mmu_memory_cache mmu_page_cache
;
125 struct kvm_mmu_memory_cache mmu_page_header_cache
;
127 gfn_t last_pt_write_gfn
;
128 int last_pt_write_count
;
129 u64
*last_pte_updated
;
132 struct i387_fxsave_struct host_fx_image
;
133 struct i387_fxsave_struct guest_fx_image
;
135 gva_t mmio_fault_cr2
;
136 struct kvm_pio_request pio
;
142 struct kvm_save_segment
{
147 } tr
, es
, ds
, fs
, gs
;
149 int halt_request
; /* real mode on Intel only */
152 struct kvm_cpuid_entry cpuid_entries
[KVM_MAX_CPUID_ENTRIES
];
154 /* emulate context */
156 struct x86_emulate_ctxt emulate_ctxt
;
160 int (*cpu_has_kvm_support
)(void); /* __init */
161 int (*disabled_by_bios
)(void); /* __init */
162 void (*hardware_enable
)(void *dummy
); /* __init */
163 void (*hardware_disable
)(void *dummy
);
164 void (*check_processor_compatibility
)(void *rtn
);
165 int (*hardware_setup
)(void); /* __init */
166 void (*hardware_unsetup
)(void); /* __exit */
168 /* Create, but do not attach this VCPU */
169 struct kvm_vcpu
*(*vcpu_create
)(struct kvm
*kvm
, unsigned id
);
170 void (*vcpu_free
)(struct kvm_vcpu
*vcpu
);
171 int (*vcpu_reset
)(struct kvm_vcpu
*vcpu
);
173 void (*prepare_guest_switch
)(struct kvm_vcpu
*vcpu
);
174 void (*vcpu_load
)(struct kvm_vcpu
*vcpu
, int cpu
);
175 void (*vcpu_put
)(struct kvm_vcpu
*vcpu
);
176 void (*vcpu_decache
)(struct kvm_vcpu
*vcpu
);
178 int (*set_guest_debug
)(struct kvm_vcpu
*vcpu
,
179 struct kvm_debug_guest
*dbg
);
180 void (*guest_debug_pre
)(struct kvm_vcpu
*vcpu
);
181 int (*get_msr
)(struct kvm_vcpu
*vcpu
, u32 msr_index
, u64
*pdata
);
182 int (*set_msr
)(struct kvm_vcpu
*vcpu
, u32 msr_index
, u64 data
);
183 u64 (*get_segment_base
)(struct kvm_vcpu
*vcpu
, int seg
);
184 void (*get_segment
)(struct kvm_vcpu
*vcpu
,
185 struct kvm_segment
*var
, int seg
);
186 void (*set_segment
)(struct kvm_vcpu
*vcpu
,
187 struct kvm_segment
*var
, int seg
);
188 void (*get_cs_db_l_bits
)(struct kvm_vcpu
*vcpu
, int *db
, int *l
);
189 void (*decache_cr4_guest_bits
)(struct kvm_vcpu
*vcpu
);
190 void (*set_cr0
)(struct kvm_vcpu
*vcpu
, unsigned long cr0
);
191 void (*set_cr3
)(struct kvm_vcpu
*vcpu
, unsigned long cr3
);
192 void (*set_cr4
)(struct kvm_vcpu
*vcpu
, unsigned long cr4
);
193 void (*set_efer
)(struct kvm_vcpu
*vcpu
, u64 efer
);
194 void (*get_idt
)(struct kvm_vcpu
*vcpu
, struct descriptor_table
*dt
);
195 void (*set_idt
)(struct kvm_vcpu
*vcpu
, struct descriptor_table
*dt
);
196 void (*get_gdt
)(struct kvm_vcpu
*vcpu
, struct descriptor_table
*dt
);
197 void (*set_gdt
)(struct kvm_vcpu
*vcpu
, struct descriptor_table
*dt
);
198 unsigned long (*get_dr
)(struct kvm_vcpu
*vcpu
, int dr
);
199 void (*set_dr
)(struct kvm_vcpu
*vcpu
, int dr
, unsigned long value
,
201 void (*cache_regs
)(struct kvm_vcpu
*vcpu
);
202 void (*decache_regs
)(struct kvm_vcpu
*vcpu
);
203 unsigned long (*get_rflags
)(struct kvm_vcpu
*vcpu
);
204 void (*set_rflags
)(struct kvm_vcpu
*vcpu
, unsigned long rflags
);
206 void (*tlb_flush
)(struct kvm_vcpu
*vcpu
);
207 void (*inject_page_fault
)(struct kvm_vcpu
*vcpu
,
208 unsigned long addr
, u32 err_code
);
210 void (*inject_gp
)(struct kvm_vcpu
*vcpu
, unsigned err_code
);
212 void (*run
)(struct kvm_vcpu
*vcpu
, struct kvm_run
*run
);
213 int (*handle_exit
)(struct kvm_run
*run
, struct kvm_vcpu
*vcpu
);
214 void (*skip_emulated_instruction
)(struct kvm_vcpu
*vcpu
);
215 void (*patch_hypercall
)(struct kvm_vcpu
*vcpu
,
216 unsigned char *hypercall_addr
);
217 int (*get_irq
)(struct kvm_vcpu
*vcpu
);
218 void (*set_irq
)(struct kvm_vcpu
*vcpu
, int vec
);
219 void (*inject_pending_irq
)(struct kvm_vcpu
*vcpu
);
220 void (*inject_pending_vectors
)(struct kvm_vcpu
*vcpu
,
221 struct kvm_run
*run
);
223 int (*set_tss_addr
)(struct kvm
*kvm
, unsigned int addr
);
226 extern struct kvm_x86_ops
*kvm_x86_ops
;
228 int kvm_mmu_module_init(void);
229 void kvm_mmu_module_exit(void);
231 void kvm_mmu_destroy(struct kvm_vcpu
*vcpu
);
232 int kvm_mmu_create(struct kvm_vcpu
*vcpu
);
233 int kvm_mmu_setup(struct kvm_vcpu
*vcpu
);
234 void kvm_mmu_set_nonpresent_ptes(u64 trap_pte
, u64 notrap_pte
);
236 int kvm_mmu_reset_context(struct kvm_vcpu
*vcpu
);
237 void kvm_mmu_slot_remove_write_access(struct kvm
*kvm
, int slot
);
238 void kvm_mmu_zap_all(struct kvm
*kvm
);
239 void kvm_mmu_change_mmu_pages(struct kvm
*kvm
, unsigned int kvm_nr_mmu_pages
);
241 enum emulation_result
{
242 EMULATE_DONE
, /* no further processing */
243 EMULATE_DO_MMIO
, /* kvm_run filled with mmio request */
244 EMULATE_FAIL
, /* can't emulate this instruction */
247 int emulate_instruction(struct kvm_vcpu
*vcpu
, struct kvm_run
*run
,
248 unsigned long cr2
, u16 error_code
, int no_decode
);
249 void kvm_report_emulation_failure(struct kvm_vcpu
*cvpu
, const char *context
);
250 void realmode_lgdt(struct kvm_vcpu
*vcpu
, u16 size
, unsigned long address
);
251 void realmode_lidt(struct kvm_vcpu
*vcpu
, u16 size
, unsigned long address
);
252 void realmode_lmsw(struct kvm_vcpu
*vcpu
, unsigned long msw
,
253 unsigned long *rflags
);
255 unsigned long realmode_get_cr(struct kvm_vcpu
*vcpu
, int cr
);
256 void realmode_set_cr(struct kvm_vcpu
*vcpu
, int cr
, unsigned long value
,
257 unsigned long *rflags
);
258 int kvm_get_msr(struct kvm_vcpu
*vcpu
, u32 msr_index
, u64
*data
);
259 int kvm_set_msr(struct kvm_vcpu
*vcpu
, u32 msr_index
, u64 data
);
261 struct x86_emulate_ctxt
;
263 int kvm_emulate_pio(struct kvm_vcpu
*vcpu
, struct kvm_run
*run
, int in
,
264 int size
, unsigned port
);
265 int kvm_emulate_pio_string(struct kvm_vcpu
*vcpu
, struct kvm_run
*run
, int in
,
266 int size
, unsigned long count
, int down
,
267 gva_t address
, int rep
, unsigned port
);
268 void kvm_emulate_cpuid(struct kvm_vcpu
*vcpu
);
269 int kvm_emulate_halt(struct kvm_vcpu
*vcpu
);
270 int emulate_invlpg(struct kvm_vcpu
*vcpu
, gva_t address
);
271 int emulate_clts(struct kvm_vcpu
*vcpu
);
272 int emulator_get_dr(struct x86_emulate_ctxt
*ctxt
, int dr
,
273 unsigned long *dest
);
274 int emulator_set_dr(struct x86_emulate_ctxt
*ctxt
, int dr
,
275 unsigned long value
);
277 void set_cr0(struct kvm_vcpu
*vcpu
, unsigned long cr0
);
278 void set_cr3(struct kvm_vcpu
*vcpu
, unsigned long cr0
);
279 void set_cr4(struct kvm_vcpu
*vcpu
, unsigned long cr0
);
280 void set_cr8(struct kvm_vcpu
*vcpu
, unsigned long cr0
);
281 unsigned long get_cr8(struct kvm_vcpu
*vcpu
);
282 void lmsw(struct kvm_vcpu
*vcpu
, unsigned long msw
);
283 void kvm_get_cs_db_l_bits(struct kvm_vcpu
*vcpu
, int *db
, int *l
);
285 int kvm_get_msr_common(struct kvm_vcpu
*vcpu
, u32 msr
, u64
*pdata
);
286 int kvm_set_msr_common(struct kvm_vcpu
*vcpu
, u32 msr
, u64 data
);
288 void fx_init(struct kvm_vcpu
*vcpu
);
290 int emulator_read_std(unsigned long addr
,
293 struct kvm_vcpu
*vcpu
);
294 int emulator_write_emulated(unsigned long addr
,
297 struct kvm_vcpu
*vcpu
);
299 unsigned long segment_base(u16 selector
);
301 void kvm_mmu_pte_write(struct kvm_vcpu
*vcpu
, gpa_t gpa
,
302 const u8
*new, int bytes
);
303 int kvm_mmu_unprotect_page_virt(struct kvm_vcpu
*vcpu
, gva_t gva
);
304 void __kvm_mmu_free_some_pages(struct kvm_vcpu
*vcpu
);
305 int kvm_mmu_load(struct kvm_vcpu
*vcpu
);
306 void kvm_mmu_unload(struct kvm_vcpu
*vcpu
);
308 int kvm_emulate_hypercall(struct kvm_vcpu
*vcpu
);
310 int kvm_fix_hypercall(struct kvm_vcpu
*vcpu
);
312 int kvm_mmu_page_fault(struct kvm_vcpu
*vcpu
, gva_t gva
, u32 error_code
);
314 static inline void kvm_mmu_free_some_pages(struct kvm_vcpu
*vcpu
)
316 if (unlikely(vcpu
->kvm
->n_free_mmu_pages
< KVM_MIN_FREE_MMU_PAGES
))
317 __kvm_mmu_free_some_pages(vcpu
);
320 static inline int kvm_mmu_reload(struct kvm_vcpu
*vcpu
)
322 if (likely(vcpu
->mmu
.root_hpa
!= INVALID_PAGE
))
325 return kvm_mmu_load(vcpu
);
328 static inline int is_long_mode(struct kvm_vcpu
*vcpu
)
331 return vcpu
->shadow_efer
& EFER_LME
;
337 static inline int is_pae(struct kvm_vcpu
*vcpu
)
339 return vcpu
->cr4
& X86_CR4_PAE
;
342 static inline int is_pse(struct kvm_vcpu
*vcpu
)
344 return vcpu
->cr4
& X86_CR4_PSE
;
347 static inline int is_paging(struct kvm_vcpu
*vcpu
)
349 return vcpu
->cr0
& X86_CR0_PG
;
352 int load_pdptrs(struct kvm_vcpu
*vcpu
, unsigned long cr3
);
353 int complete_pio(struct kvm_vcpu
*vcpu
);
355 static inline struct kvm_mmu_page
*page_header(hpa_t shadow_page
)
357 struct page
*page
= pfn_to_page(shadow_page
>> PAGE_SHIFT
);
359 return (struct kvm_mmu_page
*)page_private(page
);
362 static inline u16
read_fs(void)
365 asm("mov %%fs, %0" : "=g"(seg
));
369 static inline u16
read_gs(void)
372 asm("mov %%gs, %0" : "=g"(seg
));
376 static inline u16
read_ldt(void)
379 asm("sldt %0" : "=g"(ldt
));
383 static inline void load_fs(u16 sel
)
385 asm("mov %0, %%fs" : : "rm"(sel
));
388 static inline void load_gs(u16 sel
)
390 asm("mov %0, %%gs" : : "rm"(sel
));
394 static inline void load_ldt(u16 sel
)
396 asm("lldt %0" : : "rm"(sel
));
400 static inline void get_idt(struct descriptor_table
*table
)
402 asm("sidt %0" : "=m"(*table
));
405 static inline void get_gdt(struct descriptor_table
*table
)
407 asm("sgdt %0" : "=m"(*table
));
410 static inline unsigned long read_tr_base(void)
413 asm("str %0" : "=g"(tr
));
414 return segment_base(tr
);
418 static inline unsigned long read_msr(unsigned long msr
)
427 static inline void fx_save(struct i387_fxsave_struct
*image
)
429 asm("fxsave (%0)":: "r" (image
));
432 static inline void fx_restore(struct i387_fxsave_struct
*image
)
434 asm("fxrstor (%0)":: "r" (image
));
437 static inline void fpu_init(void)
442 static inline u32
get_rdx_init_val(void)
444 return 0x600; /* P6 family */
447 #define ASM_VMX_VMCLEAR_RAX ".byte 0x66, 0x0f, 0xc7, 0x30"
448 #define ASM_VMX_VMLAUNCH ".byte 0x0f, 0x01, 0xc2"
449 #define ASM_VMX_VMRESUME ".byte 0x0f, 0x01, 0xc3"
450 #define ASM_VMX_VMPTRLD_RAX ".byte 0x0f, 0xc7, 0x30"
451 #define ASM_VMX_VMREAD_RDX_RAX ".byte 0x0f, 0x78, 0xd0"
452 #define ASM_VMX_VMWRITE_RAX_RDX ".byte 0x0f, 0x79, 0xd0"
453 #define ASM_VMX_VMWRITE_RSP_RDX ".byte 0x0f, 0x79, 0xd4"
454 #define ASM_VMX_VMXOFF ".byte 0x0f, 0x01, 0xc4"
455 #define ASM_VMX_VMXON_RAX ".byte 0xf3, 0x0f, 0xc7, 0x30"
457 #define MSR_IA32_TIME_STAMP_COUNTER 0x010
459 #define TSS_IOPB_BASE_OFFSET 0x66
460 #define TSS_BASE_SIZE 0x68
461 #define TSS_IOPB_SIZE (65536 / 8)
462 #define TSS_REDIRECTION_SIZE (256 / 8)
463 #define RMODE_TSS_SIZE (TSS_BASE_SIZE + TSS_REDIRECTION_SIZE + TSS_IOPB_SIZE + 1)