]>
Commit | Line | Data |
---|---|---|
6aa8b732 AK |
1 | #ifndef __KVM_H |
2 | #define __KVM_H | |
3 | ||
4 | /* | |
5 | * This work is licensed under the terms of the GNU GPL, version 2. See | |
6 | * the COPYING file in the top-level directory. | |
7 | */ | |
8 | ||
9 | #include <linux/types.h> | |
10 | #include <linux/list.h> | |
11 | #include <linux/mutex.h> | |
12 | #include <linux/spinlock.h> | |
06ff0d37 MR |
13 | #include <linux/signal.h> |
14 | #include <linux/sched.h> | |
6aa8b732 | 15 | #include <linux/mm.h> |
e8edc6e0 | 16 | #include <asm/signal.h> |
6aa8b732 AK |
17 | |
18 | #include "vmx.h" | |
19 | #include <linux/kvm.h> | |
102d8325 | 20 | #include <linux/kvm_para.h> |
6aa8b732 AK |
21 | |
22 | #define CR0_PE_MASK (1ULL << 0) | |
a3a06367 | 23 | #define CR0_MP_MASK (1ULL << 1) |
6aa8b732 AK |
24 | #define CR0_TS_MASK (1ULL << 3) |
25 | #define CR0_NE_MASK (1ULL << 5) | |
26 | #define CR0_WP_MASK (1ULL << 16) | |
27 | #define CR0_NW_MASK (1ULL << 29) | |
28 | #define CR0_CD_MASK (1ULL << 30) | |
29 | #define CR0_PG_MASK (1ULL << 31) | |
30 | ||
31 | #define CR3_WPT_MASK (1ULL << 3) | |
32 | #define CR3_PCD_MASK (1ULL << 4) | |
33 | ||
34 | #define CR3_RESEVED_BITS 0x07ULL | |
35 | #define CR3_L_MODE_RESEVED_BITS (~((1ULL << 40) - 1) | 0x0fe7ULL) | |
36 | #define CR3_FLAGS_MASK ((1ULL << 5) - 1) | |
37 | ||
38 | #define CR4_VME_MASK (1ULL << 0) | |
39 | #define CR4_PSE_MASK (1ULL << 4) | |
40 | #define CR4_PAE_MASK (1ULL << 5) | |
41 | #define CR4_PGE_MASK (1ULL << 7) | |
42 | #define CR4_VMXE_MASK (1ULL << 13) | |
43 | ||
44 | #define KVM_GUEST_CR0_MASK \ | |
45 | (CR0_PG_MASK | CR0_PE_MASK | CR0_WP_MASK | CR0_NE_MASK \ | |
46 | | CR0_NW_MASK | CR0_CD_MASK) | |
47 | #define KVM_VM_CR0_ALWAYS_ON \ | |
a3a06367 AK |
48 | (CR0_PG_MASK | CR0_PE_MASK | CR0_WP_MASK | CR0_NE_MASK | CR0_TS_MASK \ |
49 | | CR0_MP_MASK) | |
6aa8b732 AK |
50 | #define KVM_GUEST_CR4_MASK \ |
51 | (CR4_PSE_MASK | CR4_PAE_MASK | CR4_PGE_MASK | CR4_VMXE_MASK | CR4_VME_MASK) | |
52 | #define KVM_PMODE_VM_CR4_ALWAYS_ON (CR4_VMXE_MASK | CR4_PAE_MASK) | |
53 | #define KVM_RMODE_VM_CR4_ALWAYS_ON (CR4_VMXE_MASK | CR4_PAE_MASK | CR4_VME_MASK) | |
54 | ||
55 | #define INVALID_PAGE (~(hpa_t)0) | |
56 | #define UNMAPPED_GVA (~(gpa_t)0) | |
57 | ||
ef9254df | 58 | #define KVM_MAX_VCPUS 4 |
e8207547 | 59 | #define KVM_ALIAS_SLOTS 4 |
6aa8b732 | 60 | #define KVM_MEMORY_SLOTS 4 |
7494c0cc | 61 | #define KVM_NUM_MMU_PAGES 1024 |
ebeace86 AK |
62 | #define KVM_MIN_FREE_MMU_PAGES 5 |
63 | #define KVM_REFILL_PAGES 25 | |
06465c5a | 64 | #define KVM_MAX_CPUID_ENTRIES 40 |
6aa8b732 AK |
65 | |
66 | #define FX_IMAGE_SIZE 512 | |
67 | #define FX_IMAGE_ALIGN 16 | |
68 | #define FX_BUF_SIZE (2 * FX_IMAGE_SIZE + FX_IMAGE_ALIGN) | |
69 | ||
70 | #define DE_VECTOR 0 | |
7807fa6c | 71 | #define NM_VECTOR 7 |
6aa8b732 AK |
72 | #define DF_VECTOR 8 |
73 | #define TS_VECTOR 10 | |
74 | #define NP_VECTOR 11 | |
75 | #define SS_VECTOR 12 | |
76 | #define GP_VECTOR 13 | |
77 | #define PF_VECTOR 14 | |
78 | ||
79 | #define SELECTOR_TI_MASK (1 << 2) | |
80 | #define SELECTOR_RPL_MASK 0x03 | |
81 | ||
82 | #define IOPL_SHIFT 12 | |
83 | ||
039576c0 AK |
84 | #define KVM_PIO_PAGE_OFFSET 1 |
85 | ||
d9e368d6 AK |
86 | /* |
87 | * vcpu->requests bit members | |
88 | */ | |
89 | #define KVM_TLB_FLUSH 0 | |
90 | ||
6aa8b732 AK |
91 | /* |
92 | * Address types: | |
93 | * | |
94 | * gva - guest virtual address | |
95 | * gpa - guest physical address | |
96 | * gfn - guest frame number | |
97 | * hva - host virtual address | |
98 | * hpa - host physical address | |
99 | * hfn - host frame number | |
100 | */ | |
101 | ||
102 | typedef unsigned long gva_t; | |
103 | typedef u64 gpa_t; | |
104 | typedef unsigned long gfn_t; | |
105 | ||
106 | typedef unsigned long hva_t; | |
107 | typedef u64 hpa_t; | |
108 | typedef unsigned long hfn_t; | |
109 | ||
cea0f0e7 AK |
110 | #define NR_PTE_CHAIN_ENTRIES 5 |
111 | ||
112 | struct kvm_pte_chain { | |
113 | u64 *parent_ptes[NR_PTE_CHAIN_ENTRIES]; | |
114 | struct hlist_node link; | |
115 | }; | |
116 | ||
117 | /* | |
118 | * kvm_mmu_page_role, below, is defined as: | |
119 | * | |
120 | * bits 0:3 - total guest paging levels (2-4, or zero for real mode) | |
121 | * bits 4:7 - page table level for this shadow (1-4) | |
122 | * bits 8:9 - page table quadrant for 2-level guests | |
123 | * bit 16 - "metaphysical" - gfn is not a real page (huge page/real mode) | |
d55e2cb2 | 124 | * bits 17:19 - "access" - the user, writable, and nx bits of a huge page pde |
cea0f0e7 AK |
125 | */ |
126 | union kvm_mmu_page_role { | |
127 | unsigned word; | |
128 | struct { | |
129 | unsigned glevels : 4; | |
130 | unsigned level : 4; | |
131 | unsigned quadrant : 2; | |
132 | unsigned pad_for_nice_hex_output : 6; | |
133 | unsigned metaphysical : 1; | |
d55e2cb2 | 134 | unsigned hugepage_access : 3; |
cea0f0e7 AK |
135 | }; |
136 | }; | |
137 | ||
6aa8b732 AK |
138 | struct kvm_mmu_page { |
139 | struct list_head link; | |
cea0f0e7 AK |
140 | struct hlist_node hash_link; |
141 | ||
142 | /* | |
143 | * The following two entries are used to key the shadow page in the | |
144 | * hash table. | |
145 | */ | |
146 | gfn_t gfn; | |
147 | union kvm_mmu_page_role role; | |
148 | ||
47ad8e68 | 149 | u64 *spt; |
6aa8b732 AK |
150 | unsigned long slot_bitmap; /* One bit set per slot which has memory |
151 | * in this shadow page. | |
152 | */ | |
cea0f0e7 | 153 | int multimapped; /* More than one parent_pte? */ |
3bb65a22 | 154 | int root_count; /* Currently serving as active root */ |
cea0f0e7 AK |
155 | union { |
156 | u64 *parent_pte; /* !multimapped */ | |
157 | struct hlist_head parent_ptes; /* multimapped, kvm_pte_chain */ | |
158 | }; | |
6aa8b732 AK |
159 | }; |
160 | ||
161 | struct vmcs { | |
162 | u32 revision_id; | |
163 | u32 abort; | |
164 | char data[0]; | |
165 | }; | |
166 | ||
167 | #define vmx_msr_entry kvm_msr_entry | |
168 | ||
169 | struct kvm_vcpu; | |
170 | ||
171 | /* | |
172 | * x86 supports 3 paging modes (4-level 64-bit, 3-level 64-bit, and 2-level | |
173 | * 32-bit). The kvm_mmu structure abstracts the details of the current mmu | |
174 | * mode. | |
175 | */ | |
176 | struct kvm_mmu { | |
177 | void (*new_cr3)(struct kvm_vcpu *vcpu); | |
178 | int (*page_fault)(struct kvm_vcpu *vcpu, gva_t gva, u32 err); | |
6aa8b732 AK |
179 | void (*free)(struct kvm_vcpu *vcpu); |
180 | gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t gva); | |
181 | hpa_t root_hpa; | |
182 | int root_level; | |
183 | int shadow_root_level; | |
17ac10ad AK |
184 | |
185 | u64 *pae_root; | |
6aa8b732 AK |
186 | }; |
187 | ||
714b93da AK |
188 | #define KVM_NR_MEM_OBJS 20 |
189 | ||
190 | struct kvm_mmu_memory_cache { | |
191 | int nobjs; | |
192 | void *objects[KVM_NR_MEM_OBJS]; | |
193 | }; | |
194 | ||
195 | /* | |
196 | * We don't want allocation failures within the mmu code, so we preallocate | |
197 | * enough memory for a single page fault in a cache. | |
198 | */ | |
6aa8b732 AK |
199 | struct kvm_guest_debug { |
200 | int enabled; | |
201 | unsigned long bp[4]; | |
202 | int singlestep; | |
203 | }; | |
204 | ||
205 | enum { | |
206 | VCPU_REGS_RAX = 0, | |
207 | VCPU_REGS_RCX = 1, | |
208 | VCPU_REGS_RDX = 2, | |
209 | VCPU_REGS_RBX = 3, | |
210 | VCPU_REGS_RSP = 4, | |
211 | VCPU_REGS_RBP = 5, | |
212 | VCPU_REGS_RSI = 6, | |
213 | VCPU_REGS_RDI = 7, | |
05b3e0c2 | 214 | #ifdef CONFIG_X86_64 |
6aa8b732 AK |
215 | VCPU_REGS_R8 = 8, |
216 | VCPU_REGS_R9 = 9, | |
217 | VCPU_REGS_R10 = 10, | |
218 | VCPU_REGS_R11 = 11, | |
219 | VCPU_REGS_R12 = 12, | |
220 | VCPU_REGS_R13 = 13, | |
221 | VCPU_REGS_R14 = 14, | |
222 | VCPU_REGS_R15 = 15, | |
223 | #endif | |
224 | NR_VCPU_REGS | |
225 | }; | |
226 | ||
227 | enum { | |
228 | VCPU_SREG_CS, | |
229 | VCPU_SREG_DS, | |
230 | VCPU_SREG_ES, | |
231 | VCPU_SREG_FS, | |
232 | VCPU_SREG_GS, | |
233 | VCPU_SREG_SS, | |
234 | VCPU_SREG_TR, | |
235 | VCPU_SREG_LDTR, | |
236 | }; | |
237 | ||
039576c0 AK |
238 | struct kvm_pio_request { |
239 | unsigned long count; | |
240 | int cur_count; | |
241 | struct page *guest_pages[2]; | |
242 | unsigned guest_page_offset; | |
243 | int in; | |
74906345 | 244 | int port; |
039576c0 AK |
245 | int size; |
246 | int string; | |
247 | int down; | |
248 | int rep; | |
249 | }; | |
250 | ||
1165f5fe AK |
251 | struct kvm_stat { |
252 | u32 pf_fixed; | |
253 | u32 pf_guest; | |
254 | u32 tlb_flush; | |
255 | u32 invlpg; | |
256 | ||
257 | u32 exits; | |
258 | u32 io_exits; | |
259 | u32 mmio_exits; | |
260 | u32 signal_exits; | |
261 | u32 irq_window_exits; | |
262 | u32 halt_exits; | |
263 | u32 request_irq_exits; | |
264 | u32 irq_exits; | |
e6adf283 | 265 | u32 light_exits; |
2cc51560 | 266 | u32 efer_reload; |
1165f5fe AK |
267 | }; |
268 | ||
2eeb2e94 GH |
269 | struct kvm_io_device { |
270 | void (*read)(struct kvm_io_device *this, | |
271 | gpa_t addr, | |
272 | int len, | |
273 | void *val); | |
274 | void (*write)(struct kvm_io_device *this, | |
275 | gpa_t addr, | |
276 | int len, | |
277 | const void *val); | |
278 | int (*in_range)(struct kvm_io_device *this, gpa_t addr); | |
279 | void (*destructor)(struct kvm_io_device *this); | |
280 | ||
281 | void *private; | |
282 | }; | |
283 | ||
284 | static inline void kvm_iodevice_read(struct kvm_io_device *dev, | |
285 | gpa_t addr, | |
286 | int len, | |
287 | void *val) | |
288 | { | |
289 | dev->read(dev, addr, len, val); | |
290 | } | |
291 | ||
292 | static inline void kvm_iodevice_write(struct kvm_io_device *dev, | |
293 | gpa_t addr, | |
294 | int len, | |
295 | const void *val) | |
296 | { | |
297 | dev->write(dev, addr, len, val); | |
298 | } | |
299 | ||
300 | static inline int kvm_iodevice_inrange(struct kvm_io_device *dev, gpa_t addr) | |
301 | { | |
302 | return dev->in_range(dev, addr); | |
303 | } | |
304 | ||
305 | static inline void kvm_iodevice_destructor(struct kvm_io_device *dev) | |
306 | { | |
74906345 ED |
307 | if (dev->destructor) |
308 | dev->destructor(dev); | |
2eeb2e94 GH |
309 | } |
310 | ||
311 | /* | |
312 | * It would be nice to use something smarter than a linear search, TBD... | |
313 | * Thankfully we dont expect many devices to register (famous last words :), | |
314 | * so until then it will suffice. At least its abstracted so we can change | |
315 | * in one place. | |
316 | */ | |
317 | struct kvm_io_bus { | |
318 | int dev_count; | |
319 | #define NR_IOBUS_DEVS 6 | |
320 | struct kvm_io_device *devs[NR_IOBUS_DEVS]; | |
321 | }; | |
322 | ||
323 | void kvm_io_bus_init(struct kvm_io_bus *bus); | |
324 | void kvm_io_bus_destroy(struct kvm_io_bus *bus); | |
325 | struct kvm_io_device *kvm_io_bus_find_dev(struct kvm_io_bus *bus, gpa_t addr); | |
326 | void kvm_io_bus_register_dev(struct kvm_io_bus *bus, | |
327 | struct kvm_io_device *dev); | |
328 | ||
6aa8b732 AK |
329 | struct kvm_vcpu { |
330 | struct kvm *kvm; | |
dad3795d | 331 | int vcpu_id; |
6aa8b732 AK |
332 | union { |
333 | struct vmcs *vmcs; | |
334 | struct vcpu_svm *svm; | |
335 | }; | |
336 | struct mutex mutex; | |
337 | int cpu; | |
338 | int launched; | |
0cc5064d | 339 | u64 host_tsc; |
9a2bb7f4 | 340 | struct kvm_run *run; |
c1150d8c | 341 | int interrupt_window_open; |
d9e368d6 AK |
342 | int guest_mode; |
343 | unsigned long requests; | |
6aa8b732 AK |
344 | unsigned long irq_summary; /* bit vector: 1 per word in irq_pending */ |
345 | #define NR_IRQ_WORDS KVM_IRQ_BITMAP_SIZE(unsigned long) | |
346 | unsigned long irq_pending[NR_IRQ_WORDS]; | |
347 | unsigned long regs[NR_VCPU_REGS]; /* for rsp: vcpu_load_rsp_rip() */ | |
348 | unsigned long rip; /* needs vcpu_load_rsp_rip() */ | |
349 | ||
350 | unsigned long cr0; | |
351 | unsigned long cr2; | |
352 | unsigned long cr3; | |
102d8325 IM |
353 | gpa_t para_state_gpa; |
354 | struct page *para_state_page; | |
355 | gpa_t hypercall_gpa; | |
6aa8b732 AK |
356 | unsigned long cr4; |
357 | unsigned long cr8; | |
1342d353 | 358 | u64 pdptrs[4]; /* pae */ |
6aa8b732 AK |
359 | u64 shadow_efer; |
360 | u64 apic_base; | |
6f00e68f | 361 | u64 ia32_misc_enable_msr; |
6aa8b732 | 362 | int nmsrs; |
a75beee6 | 363 | int save_nmsrs; |
2cc51560 | 364 | int msr_offset_efer; |
a75beee6 ED |
365 | #ifdef CONFIG_X86_64 |
366 | int msr_offset_kernel_gs_base; | |
367 | #endif | |
6aa8b732 AK |
368 | struct vmx_msr_entry *guest_msrs; |
369 | struct vmx_msr_entry *host_msrs; | |
370 | ||
6aa8b732 AK |
371 | struct kvm_mmu mmu; |
372 | ||
714b93da AK |
373 | struct kvm_mmu_memory_cache mmu_pte_chain_cache; |
374 | struct kvm_mmu_memory_cache mmu_rmap_desc_cache; | |
d3d25b04 AK |
375 | struct kvm_mmu_memory_cache mmu_page_cache; |
376 | struct kvm_mmu_memory_cache mmu_page_header_cache; | |
714b93da | 377 | |
86a5ba02 AK |
378 | gfn_t last_pt_write_gfn; |
379 | int last_pt_write_count; | |
380 | ||
6aa8b732 AK |
381 | struct kvm_guest_debug guest_debug; |
382 | ||
383 | char fx_buf[FX_BUF_SIZE]; | |
384 | char *host_fx_image; | |
385 | char *guest_fx_image; | |
7807fa6c | 386 | int fpu_active; |
7702fd1f | 387 | int guest_fpu_loaded; |
33ed6329 AK |
388 | struct vmx_host_state { |
389 | int loaded; | |
390 | u16 fs_sel, gs_sel, ldt_sel; | |
391 | int fs_gs_ldt_reload_needed; | |
392 | } vmx_host_state; | |
6aa8b732 AK |
393 | |
394 | int mmio_needed; | |
395 | int mmio_read_completed; | |
396 | int mmio_is_write; | |
397 | int mmio_size; | |
398 | unsigned char mmio_data[8]; | |
399 | gpa_t mmio_phys_addr; | |
e7df56e4 | 400 | gva_t mmio_fault_cr2; |
039576c0 AK |
401 | struct kvm_pio_request pio; |
402 | void *pio_data; | |
6aa8b732 | 403 | |
1961d276 AK |
404 | int sigset_active; |
405 | sigset_t sigset; | |
406 | ||
1165f5fe AK |
407 | struct kvm_stat stat; |
408 | ||
6aa8b732 AK |
409 | struct { |
410 | int active; | |
411 | u8 save_iopl; | |
412 | struct kvm_save_segment { | |
413 | u16 selector; | |
414 | unsigned long base; | |
415 | u32 limit; | |
416 | u32 ar; | |
417 | } tr, es, ds, fs, gs; | |
418 | } rmode; | |
72d6e5a0 | 419 | int halt_request; /* real mode on Intel only */ |
06465c5a AK |
420 | |
421 | int cpuid_nent; | |
422 | struct kvm_cpuid_entry cpuid_entries[KVM_MAX_CPUID_ENTRIES]; | |
6aa8b732 AK |
423 | }; |
424 | ||
e8207547 AK |
425 | struct kvm_mem_alias { |
426 | gfn_t base_gfn; | |
427 | unsigned long npages; | |
428 | gfn_t target_gfn; | |
429 | }; | |
430 | ||
6aa8b732 AK |
431 | struct kvm_memory_slot { |
432 | gfn_t base_gfn; | |
433 | unsigned long npages; | |
434 | unsigned long flags; | |
435 | struct page **phys_mem; | |
436 | unsigned long *dirty_bitmap; | |
437 | }; | |
438 | ||
439 | struct kvm { | |
440 | spinlock_t lock; /* protects everything except vcpus */ | |
e8207547 AK |
441 | int naliases; |
442 | struct kvm_mem_alias aliases[KVM_ALIAS_SLOTS]; | |
6aa8b732 AK |
443 | int nmemslots; |
444 | struct kvm_memory_slot memslots[KVM_MEMORY_SLOTS]; | |
cea0f0e7 AK |
445 | /* |
446 | * Hash table of struct kvm_mmu_page. | |
447 | */ | |
6aa8b732 | 448 | struct list_head active_mmu_pages; |
ebeace86 | 449 | int n_free_mmu_pages; |
cea0f0e7 | 450 | struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES]; |
39c3b86e | 451 | int nvcpus; |
6aa8b732 AK |
452 | struct kvm_vcpu vcpus[KVM_MAX_VCPUS]; |
453 | int memory_config_version; | |
454 | int busy; | |
cd4a4e53 | 455 | unsigned long rmap_overflow; |
133de902 | 456 | struct list_head vm_list; |
bccf2150 | 457 | struct file *filp; |
2eeb2e94 | 458 | struct kvm_io_bus mmio_bus; |
74906345 | 459 | struct kvm_io_bus pio_bus; |
6aa8b732 AK |
460 | }; |
461 | ||
6aa8b732 AK |
462 | struct descriptor_table { |
463 | u16 limit; | |
464 | unsigned long base; | |
465 | } __attribute__((packed)); | |
466 | ||
467 | struct kvm_arch_ops { | |
468 | int (*cpu_has_kvm_support)(void); /* __init */ | |
469 | int (*disabled_by_bios)(void); /* __init */ | |
470 | void (*hardware_enable)(void *dummy); /* __init */ | |
471 | void (*hardware_disable)(void *dummy); | |
472 | int (*hardware_setup)(void); /* __init */ | |
473 | void (*hardware_unsetup)(void); /* __exit */ | |
474 | ||
475 | int (*vcpu_create)(struct kvm_vcpu *vcpu); | |
476 | void (*vcpu_free)(struct kvm_vcpu *vcpu); | |
477 | ||
bccf2150 | 478 | void (*vcpu_load)(struct kvm_vcpu *vcpu); |
6aa8b732 | 479 | void (*vcpu_put)(struct kvm_vcpu *vcpu); |
774c47f1 | 480 | void (*vcpu_decache)(struct kvm_vcpu *vcpu); |
6aa8b732 AK |
481 | |
482 | int (*set_guest_debug)(struct kvm_vcpu *vcpu, | |
483 | struct kvm_debug_guest *dbg); | |
484 | int (*get_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata); | |
485 | int (*set_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 data); | |
486 | u64 (*get_segment_base)(struct kvm_vcpu *vcpu, int seg); | |
487 | void (*get_segment)(struct kvm_vcpu *vcpu, | |
488 | struct kvm_segment *var, int seg); | |
489 | void (*set_segment)(struct kvm_vcpu *vcpu, | |
490 | struct kvm_segment *var, int seg); | |
6aa8b732 | 491 | void (*get_cs_db_l_bits)(struct kvm_vcpu *vcpu, int *db, int *l); |
25c4c276 | 492 | void (*decache_cr4_guest_bits)(struct kvm_vcpu *vcpu); |
6aa8b732 | 493 | void (*set_cr0)(struct kvm_vcpu *vcpu, unsigned long cr0); |
6aa8b732 AK |
494 | void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3); |
495 | void (*set_cr4)(struct kvm_vcpu *vcpu, unsigned long cr4); | |
496 | void (*set_efer)(struct kvm_vcpu *vcpu, u64 efer); | |
497 | void (*get_idt)(struct kvm_vcpu *vcpu, struct descriptor_table *dt); | |
498 | void (*set_idt)(struct kvm_vcpu *vcpu, struct descriptor_table *dt); | |
499 | void (*get_gdt)(struct kvm_vcpu *vcpu, struct descriptor_table *dt); | |
500 | void (*set_gdt)(struct kvm_vcpu *vcpu, struct descriptor_table *dt); | |
501 | unsigned long (*get_dr)(struct kvm_vcpu *vcpu, int dr); | |
502 | void (*set_dr)(struct kvm_vcpu *vcpu, int dr, unsigned long value, | |
503 | int *exception); | |
504 | void (*cache_regs)(struct kvm_vcpu *vcpu); | |
505 | void (*decache_regs)(struct kvm_vcpu *vcpu); | |
506 | unsigned long (*get_rflags)(struct kvm_vcpu *vcpu); | |
507 | void (*set_rflags)(struct kvm_vcpu *vcpu, unsigned long rflags); | |
508 | ||
509 | void (*invlpg)(struct kvm_vcpu *vcpu, gva_t addr); | |
510 | void (*tlb_flush)(struct kvm_vcpu *vcpu); | |
511 | void (*inject_page_fault)(struct kvm_vcpu *vcpu, | |
512 | unsigned long addr, u32 err_code); | |
513 | ||
514 | void (*inject_gp)(struct kvm_vcpu *vcpu, unsigned err_code); | |
515 | ||
516 | int (*run)(struct kvm_vcpu *vcpu, struct kvm_run *run); | |
517 | int (*vcpu_setup)(struct kvm_vcpu *vcpu); | |
518 | void (*skip_emulated_instruction)(struct kvm_vcpu *vcpu); | |
102d8325 IM |
519 | void (*patch_hypercall)(struct kvm_vcpu *vcpu, |
520 | unsigned char *hypercall_addr); | |
6aa8b732 AK |
521 | }; |
522 | ||
6aa8b732 AK |
523 | extern struct kvm_arch_ops *kvm_arch_ops; |
524 | ||
525 | #define kvm_printf(kvm, fmt ...) printk(KERN_DEBUG fmt) | |
526 | #define vcpu_printf(vcpu, fmt...) kvm_printf(vcpu->kvm, fmt) | |
527 | ||
528 | int kvm_init_arch(struct kvm_arch_ops *ops, struct module *module); | |
529 | void kvm_exit_arch(void); | |
530 | ||
b5a33a75 AK |
531 | int kvm_mmu_module_init(void); |
532 | void kvm_mmu_module_exit(void); | |
533 | ||
6aa8b732 | 534 | void kvm_mmu_destroy(struct kvm_vcpu *vcpu); |
8018c27b IM |
535 | int kvm_mmu_create(struct kvm_vcpu *vcpu); |
536 | int kvm_mmu_setup(struct kvm_vcpu *vcpu); | |
6aa8b732 AK |
537 | |
538 | int kvm_mmu_reset_context(struct kvm_vcpu *vcpu); | |
90cb0529 AK |
539 | void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot); |
540 | void kvm_mmu_zap_all(struct kvm *kvm); | |
6aa8b732 AK |
541 | |
542 | hpa_t gpa_to_hpa(struct kvm_vcpu *vcpu, gpa_t gpa); | |
543 | #define HPA_MSB ((sizeof(hpa_t) * 8) - 1) | |
544 | #define HPA_ERR_MASK ((hpa_t)1 << HPA_MSB) | |
545 | static inline int is_error_hpa(hpa_t hpa) { return hpa >> HPA_MSB; } | |
546 | hpa_t gva_to_hpa(struct kvm_vcpu *vcpu, gva_t gva); | |
039576c0 | 547 | struct page *gva_to_page(struct kvm_vcpu *vcpu, gva_t gva); |
6aa8b732 AK |
548 | |
549 | void kvm_emulator_want_group7_invlpg(void); | |
550 | ||
551 | extern hpa_t bad_page_address; | |
552 | ||
954bbbc2 | 553 | struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn); |
6aa8b732 AK |
554 | struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn); |
555 | void mark_page_dirty(struct kvm *kvm, gfn_t gfn); | |
556 | ||
557 | enum emulation_result { | |
558 | EMULATE_DONE, /* no further processing */ | |
559 | EMULATE_DO_MMIO, /* kvm_run filled with mmio request */ | |
560 | EMULATE_FAIL, /* can't emulate this instruction */ | |
561 | }; | |
562 | ||
563 | int emulate_instruction(struct kvm_vcpu *vcpu, struct kvm_run *run, | |
564 | unsigned long cr2, u16 error_code); | |
565 | void realmode_lgdt(struct kvm_vcpu *vcpu, u16 size, unsigned long address); | |
566 | void realmode_lidt(struct kvm_vcpu *vcpu, u16 size, unsigned long address); | |
567 | void realmode_lmsw(struct kvm_vcpu *vcpu, unsigned long msw, | |
568 | unsigned long *rflags); | |
569 | ||
570 | unsigned long realmode_get_cr(struct kvm_vcpu *vcpu, int cr); | |
571 | void realmode_set_cr(struct kvm_vcpu *vcpu, int cr, unsigned long value, | |
572 | unsigned long *rflags); | |
35f3f286 AK |
573 | int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *data); |
574 | int kvm_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data); | |
6aa8b732 AK |
575 | |
576 | struct x86_emulate_ctxt; | |
577 | ||
039576c0 AK |
578 | int kvm_setup_pio(struct kvm_vcpu *vcpu, struct kvm_run *run, int in, |
579 | int size, unsigned long count, int string, int down, | |
580 | gva_t address, int rep, unsigned port); | |
06465c5a | 581 | void kvm_emulate_cpuid(struct kvm_vcpu *vcpu); |
d3bef15f | 582 | int kvm_emulate_halt(struct kvm_vcpu *vcpu); |
6aa8b732 AK |
583 | int emulate_invlpg(struct kvm_vcpu *vcpu, gva_t address); |
584 | int emulate_clts(struct kvm_vcpu *vcpu); | |
585 | int emulator_get_dr(struct x86_emulate_ctxt* ctxt, int dr, | |
586 | unsigned long *dest); | |
587 | int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr, | |
588 | unsigned long value); | |
589 | ||
590 | void set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0); | |
591 | void set_cr3(struct kvm_vcpu *vcpu, unsigned long cr0); | |
592 | void set_cr4(struct kvm_vcpu *vcpu, unsigned long cr0); | |
593 | void set_cr8(struct kvm_vcpu *vcpu, unsigned long cr0); | |
594 | void lmsw(struct kvm_vcpu *vcpu, unsigned long msw); | |
595 | ||
3bab1f5d AK |
596 | int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata); |
597 | int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data); | |
6aa8b732 AK |
598 | |
599 | void fx_init(struct kvm_vcpu *vcpu); | |
600 | ||
601 | void load_msrs(struct vmx_msr_entry *e, int n); | |
602 | void save_msrs(struct vmx_msr_entry *e, int n); | |
603 | void kvm_resched(struct kvm_vcpu *vcpu); | |
7702fd1f AK |
604 | void kvm_load_guest_fpu(struct kvm_vcpu *vcpu); |
605 | void kvm_put_guest_fpu(struct kvm_vcpu *vcpu); | |
d9e368d6 | 606 | void kvm_flush_remote_tlbs(struct kvm *kvm); |
6aa8b732 AK |
607 | |
608 | int kvm_read_guest(struct kvm_vcpu *vcpu, | |
609 | gva_t addr, | |
610 | unsigned long size, | |
611 | void *dest); | |
612 | ||
613 | int kvm_write_guest(struct kvm_vcpu *vcpu, | |
614 | gva_t addr, | |
615 | unsigned long size, | |
616 | void *data); | |
617 | ||
618 | unsigned long segment_base(u16 selector); | |
619 | ||
09072daf AK |
620 | void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, |
621 | const u8 *old, const u8 *new, int bytes); | |
a436036b | 622 | int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva); |
22d95b12 | 623 | void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu); |
17c3ba9d AK |
624 | int kvm_mmu_load(struct kvm_vcpu *vcpu); |
625 | void kvm_mmu_unload(struct kvm_vcpu *vcpu); | |
ebeace86 | 626 | |
270fd9b9 AK |
627 | int kvm_hypercall(struct kvm_vcpu *vcpu, struct kvm_run *run); |
628 | ||
ebeace86 AK |
629 | static inline int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t gva, |
630 | u32 error_code) | |
631 | { | |
ebeace86 AK |
632 | return vcpu->mmu.page_fault(vcpu, gva, error_code); |
633 | } | |
da4a00f0 | 634 | |
22d95b12 AK |
635 | static inline void kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu) |
636 | { | |
637 | if (unlikely(vcpu->kvm->n_free_mmu_pages < KVM_MIN_FREE_MMU_PAGES)) | |
638 | __kvm_mmu_free_some_pages(vcpu); | |
639 | } | |
640 | ||
17c3ba9d AK |
641 | static inline int kvm_mmu_reload(struct kvm_vcpu *vcpu) |
642 | { | |
643 | if (likely(vcpu->mmu.root_hpa != INVALID_PAGE)) | |
644 | return 0; | |
645 | ||
646 | return kvm_mmu_load(vcpu); | |
647 | } | |
648 | ||
a9058ecd AK |
649 | static inline int is_long_mode(struct kvm_vcpu *vcpu) |
650 | { | |
651 | #ifdef CONFIG_X86_64 | |
652 | return vcpu->shadow_efer & EFER_LME; | |
653 | #else | |
654 | return 0; | |
655 | #endif | |
656 | } | |
657 | ||
6aa8b732 AK |
658 | static inline int is_pae(struct kvm_vcpu *vcpu) |
659 | { | |
660 | return vcpu->cr4 & CR4_PAE_MASK; | |
661 | } | |
662 | ||
663 | static inline int is_pse(struct kvm_vcpu *vcpu) | |
664 | { | |
665 | return vcpu->cr4 & CR4_PSE_MASK; | |
666 | } | |
667 | ||
668 | static inline int is_paging(struct kvm_vcpu *vcpu) | |
669 | { | |
670 | return vcpu->cr0 & CR0_PG_MASK; | |
671 | } | |
672 | ||
673 | static inline int memslot_id(struct kvm *kvm, struct kvm_memory_slot *slot) | |
674 | { | |
675 | return slot - kvm->memslots; | |
676 | } | |
677 | ||
678 | static inline struct kvm_mmu_page *page_header(hpa_t shadow_page) | |
679 | { | |
680 | struct page *page = pfn_to_page(shadow_page >> PAGE_SHIFT); | |
681 | ||
5972e953 | 682 | return (struct kvm_mmu_page *)page_private(page); |
6aa8b732 AK |
683 | } |
684 | ||
685 | static inline u16 read_fs(void) | |
686 | { | |
687 | u16 seg; | |
688 | asm ("mov %%fs, %0" : "=g"(seg)); | |
689 | return seg; | |
690 | } | |
691 | ||
692 | static inline u16 read_gs(void) | |
693 | { | |
694 | u16 seg; | |
695 | asm ("mov %%gs, %0" : "=g"(seg)); | |
696 | return seg; | |
697 | } | |
698 | ||
699 | static inline u16 read_ldt(void) | |
700 | { | |
701 | u16 ldt; | |
702 | asm ("sldt %0" : "=g"(ldt)); | |
703 | return ldt; | |
704 | } | |
705 | ||
706 | static inline void load_fs(u16 sel) | |
707 | { | |
708 | asm ("mov %0, %%fs" : : "rm"(sel)); | |
709 | } | |
710 | ||
711 | static inline void load_gs(u16 sel) | |
712 | { | |
713 | asm ("mov %0, %%gs" : : "rm"(sel)); | |
714 | } | |
715 | ||
716 | #ifndef load_ldt | |
717 | static inline void load_ldt(u16 sel) | |
718 | { | |
a0610ddf | 719 | asm ("lldt %0" : : "rm"(sel)); |
6aa8b732 AK |
720 | } |
721 | #endif | |
722 | ||
723 | static inline void get_idt(struct descriptor_table *table) | |
724 | { | |
725 | asm ("sidt %0" : "=m"(*table)); | |
726 | } | |
727 | ||
728 | static inline void get_gdt(struct descriptor_table *table) | |
729 | { | |
730 | asm ("sgdt %0" : "=m"(*table)); | |
731 | } | |
732 | ||
733 | static inline unsigned long read_tr_base(void) | |
734 | { | |
735 | u16 tr; | |
736 | asm ("str %0" : "=g"(tr)); | |
737 | return segment_base(tr); | |
738 | } | |
739 | ||
05b3e0c2 | 740 | #ifdef CONFIG_X86_64 |
6aa8b732 AK |
741 | static inline unsigned long read_msr(unsigned long msr) |
742 | { | |
743 | u64 value; | |
744 | ||
745 | rdmsrl(msr, value); | |
746 | return value; | |
747 | } | |
748 | #endif | |
749 | ||
750 | static inline void fx_save(void *image) | |
751 | { | |
752 | asm ("fxsave (%0)":: "r" (image)); | |
753 | } | |
754 | ||
755 | static inline void fx_restore(void *image) | |
756 | { | |
757 | asm ("fxrstor (%0)":: "r" (image)); | |
758 | } | |
759 | ||
760 | static inline void fpu_init(void) | |
761 | { | |
762 | asm ("finit"); | |
763 | } | |
764 | ||
765 | static inline u32 get_rdx_init_val(void) | |
766 | { | |
767 | return 0x600; /* P6 family */ | |
768 | } | |
769 | ||
770 | #define ASM_VMX_VMCLEAR_RAX ".byte 0x66, 0x0f, 0xc7, 0x30" | |
771 | #define ASM_VMX_VMLAUNCH ".byte 0x0f, 0x01, 0xc2" | |
772 | #define ASM_VMX_VMRESUME ".byte 0x0f, 0x01, 0xc3" | |
773 | #define ASM_VMX_VMPTRLD_RAX ".byte 0x0f, 0xc7, 0x30" | |
774 | #define ASM_VMX_VMREAD_RDX_RAX ".byte 0x0f, 0x78, 0xd0" | |
775 | #define ASM_VMX_VMWRITE_RAX_RDX ".byte 0x0f, 0x79, 0xd0" | |
776 | #define ASM_VMX_VMWRITE_RSP_RDX ".byte 0x0f, 0x79, 0xd4" | |
777 | #define ASM_VMX_VMXOFF ".byte 0x0f, 0x01, 0xc4" | |
778 | #define ASM_VMX_VMXON_RAX ".byte 0xf3, 0x0f, 0xc7, 0x30" | |
779 | ||
780 | #define MSR_IA32_TIME_STAMP_COUNTER 0x010 | |
781 | ||
782 | #define TSS_IOPB_BASE_OFFSET 0x66 | |
783 | #define TSS_BASE_SIZE 0x68 | |
784 | #define TSS_IOPB_SIZE (65536 / 8) | |
785 | #define TSS_REDIRECTION_SIZE (256 / 8) | |
786 | #define RMODE_TSS_SIZE (TSS_BASE_SIZE + TSS_REDIRECTION_SIZE + TSS_IOPB_SIZE + 1) | |
787 | ||
6aa8b732 | 788 | #endif |