5 * This work is licensed under the terms of the GNU GPL, version 2. See
6 * the COPYING file in the top-level directory.
9 #include <linux/types.h>
10 #include <linux/hardirq.h>
11 #include <linux/list.h>
12 #include <linux/mutex.h>
13 #include <linux/spinlock.h>
14 #include <linux/signal.h>
15 #include <linux/sched.h>
17 #include <linux/preempt.h>
18 #include <asm/signal.h>
20 #include <linux/kvm.h>
21 #include <linux/kvm_para.h>
25 #define KVM_MAX_VCPUS 4
26 #define KVM_ALIAS_SLOTS 4
27 #define KVM_MEMORY_SLOTS 8
28 /* memory slots that does not exposed to userspace */
29 #define KVM_PRIVATE_MEM_SLOTS 4
30 #define KVM_PERMILLE_MMU_PAGES 20
31 #define KVM_MIN_ALLOC_MMU_PAGES 64
32 #define KVM_NUM_MMU_PAGES 1024
33 #define KVM_MIN_FREE_MMU_PAGES 5
34 #define KVM_REFILL_PAGES 25
35 #define KVM_MAX_CPUID_ENTRIES 40
37 #define KVM_PIO_PAGE_OFFSET 1
40 * vcpu->requests bit members
42 #define KVM_REQ_TLB_FLUSH 0
44 #define NR_PTE_CHAIN_ENTRIES 5
46 struct kvm_pte_chain
{
47 u64
*parent_ptes
[NR_PTE_CHAIN_ENTRIES
];
48 struct hlist_node link
;
52 * kvm_mmu_page_role, below, is defined as:
54 * bits 0:3 - total guest paging levels (2-4, or zero for real mode)
55 * bits 4:7 - page table level for this shadow (1-4)
56 * bits 8:9 - page table quadrant for 2-level guests
57 * bit 16 - "metaphysical" - gfn is not a real page (huge page/real mode)
58 * bits 17:19 - common access permissions for all ptes in this shadow page
60 union kvm_mmu_page_role
{
65 unsigned quadrant
: 2;
66 unsigned pad_for_nice_hex_output
: 6;
67 unsigned metaphysical
: 1;
73 struct list_head link
;
74 struct hlist_node hash_link
;
77 * The following two entries are used to key the shadow page in the
81 union kvm_mmu_page_role role
;
84 /* hold the gfn of each spte inside spt */
86 unsigned long slot_bitmap
; /* One bit set per slot which has memory
87 * in this shadow page.
89 int multimapped
; /* More than one parent_pte? */
90 int root_count
; /* Currently serving as active root */
92 u64
*parent_pte
; /* !multimapped */
93 struct hlist_head parent_ptes
; /* multimapped, kvm_pte_chain */
98 extern struct kmem_cache
*kvm_vcpu_cache
;
101 * x86 supports 3 paging modes (4-level 64-bit, 3-level 64-bit, and 2-level
102 * 32-bit). The kvm_mmu structure abstracts the details of the current mmu
106 void (*new_cr3
)(struct kvm_vcpu
*vcpu
);
107 int (*page_fault
)(struct kvm_vcpu
*vcpu
, gva_t gva
, u32 err
);
108 void (*free
)(struct kvm_vcpu
*vcpu
);
109 gpa_t (*gva_to_gpa
)(struct kvm_vcpu
*vcpu
, gva_t gva
);
110 void (*prefetch_page
)(struct kvm_vcpu
*vcpu
,
111 struct kvm_mmu_page
*page
);
114 int shadow_root_level
;
119 #define KVM_NR_MEM_OBJS 40
122 * We don't want allocation failures within the mmu code, so we preallocate
123 * enough memory for a single page fault in a cache.
125 struct kvm_mmu_memory_cache
{
127 void *objects
[KVM_NR_MEM_OBJS
];
130 struct kvm_guest_debug
{
136 struct kvm_pio_request
{
139 struct page
*guest_pages
[2];
140 unsigned guest_page_offset
;
149 struct kvm_vcpu_stat
{
159 u32 irq_window_exits
;
162 u32 request_irq_exits
;
164 u32 host_state_reload
;
168 u32 insn_emulation_fail
;
172 * It would be nice to use something smarter than a linear search, TBD...
173 * Thankfully we dont expect many devices to register (famous last words :),
174 * so until then it will suffice. At least its abstracted so we can change
179 #define NR_IOBUS_DEVS 6
180 struct kvm_io_device
*devs
[NR_IOBUS_DEVS
];
183 void kvm_io_bus_init(struct kvm_io_bus
*bus
);
184 void kvm_io_bus_destroy(struct kvm_io_bus
*bus
);
185 struct kvm_io_device
*kvm_io_bus_find_dev(struct kvm_io_bus
*bus
, gpa_t addr
);
186 void kvm_io_bus_register_dev(struct kvm_io_bus
*bus
,
187 struct kvm_io_device
*dev
);
189 #ifdef CONFIG_HAS_IOMEM
190 #define KVM_VCPU_MMIO \
192 int mmio_read_completed; \
195 unsigned char mmio_data[8]; \
196 gpa_t mmio_phys_addr;
199 #define KVM_VCPU_MMIO
203 #define KVM_VCPU_COMM \
205 struct preempt_notifier preempt_notifier; \
207 struct mutex mutex; \
209 struct kvm_run *run; \
211 unsigned long requests; \
212 struct kvm_guest_debug guest_debug; \
214 int guest_fpu_loaded; \
215 wait_queue_head_t wq; \
218 struct kvm_vcpu_stat stat; \
221 struct kvm_mem_alias
{
223 unsigned long npages
;
227 struct kvm_memory_slot
{
229 unsigned long npages
;
232 unsigned long *dirty_bitmap
;
233 unsigned long userspace_addr
;
238 u32 mmu_shadow_zapped
;
244 u32 remote_tlb_flush
;
248 struct mutex lock
; /* protects everything except vcpus */
249 struct mm_struct
*mm
; /* userspace tied to this vm */
251 struct kvm_mem_alias aliases
[KVM_ALIAS_SLOTS
];
253 struct kvm_memory_slot memslots
[KVM_MEMORY_SLOTS
+
254 KVM_PRIVATE_MEM_SLOTS
];
256 * Hash table of struct kvm_mmu_page.
258 struct list_head active_mmu_pages
;
259 unsigned int n_free_mmu_pages
;
260 unsigned int n_requested_mmu_pages
;
261 unsigned int n_alloc_mmu_pages
;
262 struct hlist_head mmu_page_hash
[KVM_NUM_MMU_PAGES
];
263 struct kvm_vcpu
*vcpus
[KVM_MAX_VCPUS
];
264 struct list_head vm_list
;
266 struct kvm_io_bus mmio_bus
;
267 struct kvm_io_bus pio_bus
;
268 struct kvm_pic
*vpic
;
269 struct kvm_ioapic
*vioapic
;
270 int round_robin_prev_vcpu
;
271 unsigned int tss_addr
;
272 struct page
*apic_access_page
;
273 struct kvm_vm_stat stat
;
276 /* The guest did something we don't support. */
277 #define pr_unimpl(vcpu, fmt, ...) \
279 if (printk_ratelimit()) \
280 printk(KERN_ERR "kvm: %i: cpu%i " fmt, \
281 current->tgid, (vcpu)->vcpu_id , ## __VA_ARGS__); \
284 #define kvm_printf(kvm, fmt ...) printk(KERN_DEBUG fmt)
285 #define vcpu_printf(vcpu, fmt...) kvm_printf(vcpu->kvm, fmt)
287 int kvm_vcpu_init(struct kvm_vcpu
*vcpu
, struct kvm
*kvm
, unsigned id
);
288 void kvm_vcpu_uninit(struct kvm_vcpu
*vcpu
);
290 void vcpu_load(struct kvm_vcpu
*vcpu
);
291 void vcpu_put(struct kvm_vcpu
*vcpu
);
293 void decache_vcpus_on_cpu(int cpu
);
296 int kvm_init(void *opaque
, unsigned int vcpu_size
,
297 struct module
*module
);
300 #define HPA_MSB ((sizeof(hpa_t) * 8) - 1)
301 #define HPA_ERR_MASK ((hpa_t)1 << HPA_MSB)
302 static inline int is_error_hpa(hpa_t hpa
) { return hpa
>> HPA_MSB
; }
303 struct page
*gva_to_page(struct kvm_vcpu
*vcpu
, gva_t gva
);
305 extern struct page
*bad_page
;
307 int is_error_page(struct page
*page
);
308 int kvm_is_error_hva(unsigned long addr
);
309 int kvm_set_memory_region(struct kvm
*kvm
,
310 struct kvm_userspace_memory_region
*mem
,
312 int __kvm_set_memory_region(struct kvm
*kvm
,
313 struct kvm_userspace_memory_region
*mem
,
315 int kvm_arch_set_memory_region(struct kvm
*kvm
,
316 struct kvm_userspace_memory_region
*mem
,
317 struct kvm_memory_slot old
,
319 gfn_t
unalias_gfn(struct kvm
*kvm
, gfn_t gfn
);
320 struct page
*gfn_to_page(struct kvm
*kvm
, gfn_t gfn
);
321 void kvm_release_page_clean(struct page
*page
);
322 void kvm_release_page_dirty(struct page
*page
);
323 int kvm_read_guest_page(struct kvm
*kvm
, gfn_t gfn
, void *data
, int offset
,
325 int kvm_read_guest(struct kvm
*kvm
, gpa_t gpa
, void *data
, unsigned long len
);
326 int kvm_write_guest_page(struct kvm
*kvm
, gfn_t gfn
, const void *data
,
327 int offset
, int len
);
328 int kvm_write_guest(struct kvm
*kvm
, gpa_t gpa
, const void *data
,
330 int kvm_clear_guest_page(struct kvm
*kvm
, gfn_t gfn
, int offset
, int len
);
331 int kvm_clear_guest(struct kvm
*kvm
, gpa_t gpa
, unsigned long len
);
332 struct kvm_memory_slot
*gfn_to_memslot(struct kvm
*kvm
, gfn_t gfn
);
333 int kvm_is_visible_gfn(struct kvm
*kvm
, gfn_t gfn
);
334 void mark_page_dirty(struct kvm
*kvm
, gfn_t gfn
);
336 void kvm_vcpu_block(struct kvm_vcpu
*vcpu
);
337 void kvm_resched(struct kvm_vcpu
*vcpu
);
338 void kvm_load_guest_fpu(struct kvm_vcpu
*vcpu
);
339 void kvm_put_guest_fpu(struct kvm_vcpu
*vcpu
);
340 void kvm_flush_remote_tlbs(struct kvm
*kvm
);
342 long kvm_arch_dev_ioctl(struct file
*filp
,
343 unsigned int ioctl
, unsigned long arg
);
344 long kvm_arch_vcpu_ioctl(struct file
*filp
,
345 unsigned int ioctl
, unsigned long arg
);
346 void kvm_arch_vcpu_load(struct kvm_vcpu
*vcpu
, int cpu
);
347 void kvm_arch_vcpu_put(struct kvm_vcpu
*vcpu
);
349 int kvm_dev_ioctl_check_extension(long ext
);
351 int kvm_get_dirty_log(struct kvm
*kvm
,
352 struct kvm_dirty_log
*log
, int *is_dirty
);
353 int kvm_vm_ioctl_get_dirty_log(struct kvm
*kvm
,
354 struct kvm_dirty_log
*log
);
356 int kvm_vm_ioctl_set_memory_region(struct kvm
*kvm
,
358 kvm_userspace_memory_region
*mem
,
360 long kvm_arch_vm_ioctl(struct file
*filp
,
361 unsigned int ioctl
, unsigned long arg
);
362 void kvm_arch_destroy_vm(struct kvm
*kvm
);
364 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu
*vcpu
, struct kvm_fpu
*fpu
);
365 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu
*vcpu
, struct kvm_fpu
*fpu
);
367 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu
*vcpu
,
368 struct kvm_translation
*tr
);
370 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu
*vcpu
, struct kvm_regs
*regs
);
371 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu
*vcpu
, struct kvm_regs
*regs
);
372 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu
*vcpu
,
373 struct kvm_sregs
*sregs
);
374 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu
*vcpu
,
375 struct kvm_sregs
*sregs
);
376 int kvm_arch_vcpu_ioctl_debug_guest(struct kvm_vcpu
*vcpu
,
377 struct kvm_debug_guest
*dbg
);
378 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu
*vcpu
, struct kvm_run
*kvm_run
);
380 int kvm_arch_init(void *opaque
);
381 void kvm_arch_exit(void);
383 int kvm_arch_vcpu_init(struct kvm_vcpu
*vcpu
);
384 void kvm_arch_vcpu_uninit(struct kvm_vcpu
*vcpu
);
386 void kvm_arch_vcpu_free(struct kvm_vcpu
*vcpu
);
387 void kvm_arch_vcpu_load(struct kvm_vcpu
*vcpu
, int cpu
);
388 void kvm_arch_vcpu_put(struct kvm_vcpu
*vcpu
);
389 struct kvm_vcpu
*kvm_arch_vcpu_create(struct kvm
*kvm
, unsigned int id
);
390 int kvm_arch_vcpu_setup(struct kvm_vcpu
*vcpu
);
391 void kvm_arch_vcpu_destroy(struct kvm_vcpu
*vcpu
);
393 int kvm_arch_vcpu_reset(struct kvm_vcpu
*vcpu
);
394 void kvm_arch_hardware_enable(void *garbage
);
395 void kvm_arch_hardware_disable(void *garbage
);
396 int kvm_arch_hardware_setup(void);
397 void kvm_arch_hardware_unsetup(void);
398 void kvm_arch_check_processor_compat(void *rtn
);
399 int kvm_arch_vcpu_runnable(struct kvm_vcpu
*vcpu
);
401 void kvm_free_physmem(struct kvm
*kvm
);
403 struct kvm
*kvm_arch_create_vm(void);
404 void kvm_arch_destroy_vm(struct kvm
*kvm
);
406 int kvm_cpu_get_interrupt(struct kvm_vcpu
*v
);
407 int kvm_cpu_has_interrupt(struct kvm_vcpu
*v
);
409 static inline void kvm_guest_enter(void)
411 account_system_vtime(current
);
412 current
->flags
|= PF_VCPU
;
415 static inline void kvm_guest_exit(void)
417 account_system_vtime(current
);
418 current
->flags
&= ~PF_VCPU
;
421 static inline int memslot_id(struct kvm
*kvm
, struct kvm_memory_slot
*slot
)
423 return slot
- kvm
->memslots
;
426 static inline gpa_t
gfn_to_gpa(gfn_t gfn
)
428 return (gpa_t
)gfn
<< PAGE_SHIFT
;
436 struct kvm_stats_debugfs_item
{
439 enum kvm_stat_kind kind
;
440 struct dentry
*dentry
;
442 extern struct kvm_stats_debugfs_item debugfs_entries
[];
444 #if defined(CONFIG_X86)