]>
Commit | Line | Data |
---|---|---|
05330448 AL |
1 | /* |
2 | * QEMU KVM support | |
3 | * | |
4 | * Copyright IBM, Corp. 2008 | |
5 | * | |
6 | * Authors: | |
7 | * Anthony Liguori <aliguori@us.ibm.com> | |
8 | * | |
9 | * This work is licensed under the terms of the GNU GPL, version 2 or later. | |
10 | * See the COPYING file in the top-level directory. | |
11 | * | |
12 | */ | |
13 | ||
14 | #ifndef QEMU_KVM_H | |
15 | #define QEMU_KVM_H | |
16 | ||
ca821806 | 17 | #include <errno.h> |
1c14f162 | 18 | #include "config-host.h" |
72cf2d4f | 19 | #include "qemu-queue.h" |
05330448 | 20 | |
ca821806 MT |
21 | #ifdef CONFIG_KVM |
22 | #include <linux/kvm.h> | |
23 | #endif | |
05330448 | 24 | |
ca821806 | 25 | extern int kvm_allowed; |
3d4b2649 | 26 | extern bool kvm_kernel_irqchip; |
7ae26bd4 | 27 | extern bool kvm_async_interrupts_allowed; |
cc7e0ddf | 28 | extern bool kvm_irqfds_allowed; |
614e41bc | 29 | extern bool kvm_msi_via_irqfd_allowed; |
f3e1bed8 | 30 | extern bool kvm_gsi_routing_allowed; |
98c8573e PB |
31 | |
32 | #if defined CONFIG_KVM || !defined NEED_CPU_H | |
3d4b2649 | 33 | #define kvm_enabled() (kvm_allowed) |
96fda35a PM |
34 | /** |
35 | * kvm_irqchip_in_kernel: | |
36 | * | |
37 | * Returns: true if the user asked us to create an in-kernel | |
38 | * irqchip via the "kernel_irqchip=on" machine option. | |
39 | * What this actually means is architecture and machine model | |
40 | * specific: on PC, for instance, it means that the LAPIC, | |
41 | * IOAPIC and PIT are all in kernel. This function should never | |
42 | * be used from generic target-independent code: use one of the | |
43 | * following functions or some other specific check instead. | |
44 | */ | |
3d4b2649 | 45 | #define kvm_irqchip_in_kernel() (kvm_kernel_irqchip) |
7ae26bd4 PM |
46 | |
47 | /** | |
48 | * kvm_async_interrupts_enabled: | |
49 | * | |
50 | * Returns: true if we can deliver interrupts to KVM | |
51 | * asynchronously (ie by ioctl from any thread at any time) | |
52 | * rather than having to do interrupt delivery synchronously | |
53 | * (where the vcpu must be stopped at a suitable point first). | |
54 | */ | |
55 | #define kvm_async_interrupts_enabled() (kvm_async_interrupts_allowed) | |
56 | ||
cc7e0ddf PM |
57 | /** |
58 | * kvm_irqfds_enabled: | |
59 | * | |
60 | * Returns: true if we can use irqfds to inject interrupts into | |
61 | * a KVM CPU (ie the kernel supports irqfds and we are running | |
62 | * with a configuration where it is meaningful to use them). | |
63 | */ | |
64 | #define kvm_irqfds_enabled() (kvm_irqfds_allowed) | |
65 | ||
614e41bc PM |
66 | /** |
67 | * kvm_msi_via_irqfd_enabled: | |
68 | * | |
69 | * Returns: true if we can route a PCI MSI (Message Signaled Interrupt) | |
70 | * to a KVM CPU via an irqfd. This requires that the kernel supports | |
71 | * this and that we're running in a configuration that permits it. | |
72 | */ | |
73 | #define kvm_msi_via_irqfd_enabled() (kvm_msi_via_irqfd_allowed) | |
74 | ||
f3e1bed8 PM |
75 | /** |
76 | * kvm_gsi_routing_enabled: | |
77 | * | |
78 | * Returns: true if GSI routing is enabled (ie the kernel supports | |
79 | * it and we're running in a configuration that permits it). | |
80 | */ | |
81 | #define kvm_gsi_routing_enabled() (kvm_gsi_routing_allowed) | |
82 | ||
05330448 | 83 | #else |
3d4b2649 JK |
84 | #define kvm_enabled() (0) |
85 | #define kvm_irqchip_in_kernel() (false) | |
7ae26bd4 | 86 | #define kvm_async_interrupts_enabled() (false) |
cc7e0ddf | 87 | #define kvm_irqfds_enabled() (false) |
614e41bc | 88 | #define kvm_msi_via_irqfd_enabled() (false) |
f3e1bed8 | 89 | #define kvm_gsi_routing_allowed() (false) |
05330448 AL |
90 | #endif |
91 | ||
92 | struct kvm_run; | |
680c1c6f | 93 | struct kvm_lapic_state; |
05330448 | 94 | |
94a8d39a JK |
95 | typedef struct KVMCapabilityInfo { |
96 | const char *name; | |
97 | int value; | |
98 | } KVMCapabilityInfo; | |
99 | ||
100 | #define KVM_CAP_INFO(CAP) { "KVM_CAP_" stringify(CAP), KVM_CAP_##CAP } | |
101 | #define KVM_CAP_LAST_INFO { NULL, 0 } | |
102 | ||
92b4e489 JK |
103 | struct KVMState; |
104 | typedef struct KVMState KVMState; | |
105 | extern KVMState *kvm_state; | |
106 | ||
05330448 AL |
107 | /* external API */ |
108 | ||
cad1e282 | 109 | int kvm_init(void); |
05330448 | 110 | |
00a1555e PB |
111 | int kvm_has_sync_mmu(void); |
112 | int kvm_has_vcpu_events(void); | |
113 | int kvm_has_robust_singlestep(void); | |
ff44f1a3 | 114 | int kvm_has_debugregs(void); |
f1665b21 SY |
115 | int kvm_has_xsave(void); |
116 | int kvm_has_xcrs(void); | |
8a7c7393 | 117 | int kvm_has_pit_state2(void); |
d2f2b8a7 | 118 | int kvm_has_many_ioeventfds(void); |
84b058d7 | 119 | int kvm_has_gsi_routing(void); |
3ab73842 | 120 | int kvm_has_intx_set_mask(void); |
00a1555e | 121 | |
1c14f162 | 122 | #ifdef NEED_CPU_H |
9349b4f9 | 123 | int kvm_init_vcpu(CPUArchState *env); |
05330448 | 124 | |
9349b4f9 | 125 | int kvm_cpu_exec(CPUArchState *env); |
05330448 | 126 | |
b3755a91 | 127 | #if !defined(CONFIG_USER_ONLY) |
fdec9918 CB |
128 | void *kvm_vmalloc(ram_addr_t size); |
129 | void *kvm_arch_vmalloc(ram_addr_t size); | |
6f0437e8 JK |
130 | void kvm_setup_guest_memory(void *start, size_t size); |
131 | ||
c227f099 AL |
132 | int kvm_coalesce_mmio_region(target_phys_addr_t start, ram_addr_t size); |
133 | int kvm_uncoalesce_mmio_region(target_phys_addr_t start, ram_addr_t size); | |
62a2744c | 134 | void kvm_flush_coalesced_mmio_buffer(void); |
b3755a91 | 135 | #endif |
f65ed4c1 | 136 | |
9349b4f9 | 137 | int kvm_insert_breakpoint(CPUArchState *current_env, target_ulong addr, |
e22a25c9 | 138 | target_ulong len, int type); |
9349b4f9 | 139 | int kvm_remove_breakpoint(CPUArchState *current_env, target_ulong addr, |
e22a25c9 | 140 | target_ulong len, int type); |
9349b4f9 AF |
141 | void kvm_remove_all_breakpoints(CPUArchState *current_env); |
142 | int kvm_update_guest_debug(CPUArchState *env, unsigned long reinject_trap); | |
20c20526 | 143 | #ifndef _WIN32 |
9349b4f9 | 144 | int kvm_set_signal_mask(CPUArchState *env, const sigset_t *sigset); |
20c20526 | 145 | #endif |
e22a25c9 | 146 | |
9349b4f9 | 147 | int kvm_on_sigbus_vcpu(CPUArchState *env, int code, void *addr); |
a1b87fe0 JK |
148 | int kvm_on_sigbus(int code, void *addr); |
149 | ||
05330448 AL |
150 | /* internal API */ |
151 | ||
984b5181 | 152 | int kvm_ioctl(KVMState *s, int type, ...); |
05330448 | 153 | |
984b5181 | 154 | int kvm_vm_ioctl(KVMState *s, int type, ...); |
05330448 | 155 | |
9349b4f9 | 156 | int kvm_vcpu_ioctl(CPUArchState *env, int type, ...); |
05330448 AL |
157 | |
158 | /* Arch specific hooks */ | |
159 | ||
94a8d39a JK |
160 | extern const KVMCapabilityInfo kvm_arch_required_capabilities[]; |
161 | ||
9349b4f9 AF |
162 | void kvm_arch_pre_run(CPUArchState *env, struct kvm_run *run); |
163 | void kvm_arch_post_run(CPUArchState *env, struct kvm_run *run); | |
05330448 | 164 | |
9349b4f9 | 165 | int kvm_arch_handle_exit(CPUArchState *env, struct kvm_run *run); |
05330448 | 166 | |
9349b4f9 | 167 | int kvm_arch_process_async_events(CPUArchState *env); |
0af691d7 | 168 | |
9349b4f9 | 169 | int kvm_arch_get_registers(CPUArchState *env); |
05330448 | 170 | |
ea375f9a JK |
171 | /* state subset only touched by the VCPU itself during runtime */ |
172 | #define KVM_PUT_RUNTIME_STATE 1 | |
173 | /* state subset modified during VCPU reset */ | |
174 | #define KVM_PUT_RESET_STATE 2 | |
175 | /* full state set, modified during initialization or on vmload */ | |
176 | #define KVM_PUT_FULL_STATE 3 | |
177 | ||
9349b4f9 | 178 | int kvm_arch_put_registers(CPUArchState *env, int level); |
05330448 | 179 | |
cad1e282 | 180 | int kvm_arch_init(KVMState *s); |
05330448 | 181 | |
9349b4f9 | 182 | int kvm_arch_init_vcpu(CPUArchState *env); |
05330448 | 183 | |
9349b4f9 | 184 | void kvm_arch_reset_vcpu(CPUArchState *env); |
caa5af0f | 185 | |
9349b4f9 | 186 | int kvm_arch_on_sigbus_vcpu(CPUArchState *env, int code, void *addr); |
a1b87fe0 | 187 | int kvm_arch_on_sigbus(int code, void *addr); |
c0532a76 | 188 | |
84b058d7 JK |
189 | void kvm_arch_init_irq_routing(KVMState *s); |
190 | ||
3889c3fa | 191 | int kvm_set_irq(KVMState *s, int irq, int level); |
04fa27f5 | 192 | int kvm_irqchip_send_msi(KVMState *s, MSIMessage msg); |
84b058d7 | 193 | |
1df186df | 194 | void kvm_irqchip_add_irq_route(KVMState *s, int gsi, int irqchip, int pin); |
84b058d7 | 195 | |
680c1c6f JK |
196 | void kvm_put_apic_state(DeviceState *d, struct kvm_lapic_state *kapic); |
197 | void kvm_get_apic_state(DeviceState *d, struct kvm_lapic_state *kapic); | |
198 | ||
e22a25c9 AL |
199 | struct kvm_guest_debug; |
200 | struct kvm_debug_exit_arch; | |
201 | ||
202 | struct kvm_sw_breakpoint { | |
203 | target_ulong pc; | |
204 | target_ulong saved_insn; | |
205 | int use_count; | |
72cf2d4f | 206 | QTAILQ_ENTRY(kvm_sw_breakpoint) entry; |
e22a25c9 AL |
207 | }; |
208 | ||
72cf2d4f | 209 | QTAILQ_HEAD(kvm_sw_breakpoint_head, kvm_sw_breakpoint); |
e22a25c9 | 210 | |
9349b4f9 | 211 | struct kvm_sw_breakpoint *kvm_find_sw_breakpoint(CPUArchState *env, |
e22a25c9 AL |
212 | target_ulong pc); |
213 | ||
9349b4f9 | 214 | int kvm_sw_breakpoints_active(CPUArchState *env); |
e22a25c9 | 215 | |
9349b4f9 | 216 | int kvm_arch_insert_sw_breakpoint(CPUArchState *current_env, |
e22a25c9 | 217 | struct kvm_sw_breakpoint *bp); |
9349b4f9 | 218 | int kvm_arch_remove_sw_breakpoint(CPUArchState *current_env, |
e22a25c9 AL |
219 | struct kvm_sw_breakpoint *bp); |
220 | int kvm_arch_insert_hw_breakpoint(target_ulong addr, | |
221 | target_ulong len, int type); | |
222 | int kvm_arch_remove_hw_breakpoint(target_ulong addr, | |
223 | target_ulong len, int type); | |
224 | void kvm_arch_remove_all_hw_breakpoints(void); | |
225 | ||
9349b4f9 | 226 | void kvm_arch_update_guest_debug(CPUArchState *env, struct kvm_guest_debug *dbg); |
e22a25c9 | 227 | |
9349b4f9 | 228 | bool kvm_arch_stop_on_emulation_error(CPUArchState *env); |
4513d923 | 229 | |
ad7b8b33 AL |
230 | int kvm_check_extension(KVMState *s, unsigned int extension); |
231 | ||
ba9bc59e | 232 | uint32_t kvm_arch_get_supported_cpuid(KVMState *env, uint32_t function, |
c958a8bd | 233 | uint32_t index, int reg); |
9349b4f9 AF |
234 | void kvm_cpu_synchronize_state(CPUArchState *env); |
235 | void kvm_cpu_synchronize_post_reset(CPUArchState *env); | |
236 | void kvm_cpu_synchronize_post_init(CPUArchState *env); | |
b827df58 | 237 | |
e22a25c9 AL |
238 | /* generic hooks - to be moved/refactored once there are more users */ |
239 | ||
9349b4f9 | 240 | static inline void cpu_synchronize_state(CPUArchState *env) |
e22a25c9 AL |
241 | { |
242 | if (kvm_enabled()) { | |
4c0960c0 | 243 | kvm_cpu_synchronize_state(env); |
e22a25c9 AL |
244 | } |
245 | } | |
246 | ||
9349b4f9 | 247 | static inline void cpu_synchronize_post_reset(CPUArchState *env) |
ea375f9a JK |
248 | { |
249 | if (kvm_enabled()) { | |
250 | kvm_cpu_synchronize_post_reset(env); | |
251 | } | |
252 | } | |
253 | ||
9349b4f9 | 254 | static inline void cpu_synchronize_post_init(CPUArchState *env) |
ea375f9a JK |
255 | { |
256 | if (kvm_enabled()) { | |
257 | kvm_cpu_synchronize_post_init(env); | |
258 | } | |
259 | } | |
ca821806 | 260 | |
983dfc3b HY |
261 | |
262 | #if !defined(CONFIG_USER_ONLY) | |
9f213ed9 AK |
263 | int kvm_physical_memory_addr_from_host(KVMState *s, void *ram_addr, |
264 | target_phys_addr_t *phys_addr); | |
983dfc3b HY |
265 | #endif |
266 | ||
ca821806 | 267 | #endif |
4b8f1c88 MT |
268 | int kvm_set_ioeventfd_mmio(int fd, uint32_t adr, uint32_t val, bool assign, |
269 | uint32_t size); | |
ca821806 | 270 | |
98c8573e | 271 | int kvm_set_ioeventfd_pio_word(int fd, uint16_t adr, uint16_t val, bool assign); |
92b4e489 JK |
272 | |
273 | int kvm_irqchip_add_msi_route(KVMState *s, MSIMessage msg); | |
cc57407e | 274 | int kvm_irqchip_update_msi_route(KVMState *s, int virq, MSIMessage msg); |
1e2aa8be | 275 | void kvm_irqchip_release_virq(KVMState *s, int virq); |
39853bbc | 276 | |
b131c74a JK |
277 | int kvm_irqchip_add_irqfd_notifier(KVMState *s, EventNotifier *n, int virq); |
278 | int kvm_irqchip_remove_irqfd_notifier(KVMState *s, EventNotifier *n, int virq); | |
05330448 | 279 | #endif |