]>
Commit | Line | Data |
---|---|---|
05330448 AL |
1 | /* |
2 | * QEMU KVM support | |
3 | * | |
4 | * Copyright (C) 2006-2008 Qumranet Technologies | |
5 | * Copyright IBM, Corp. 2008 | |
6 | * | |
7 | * Authors: | |
8 | * Anthony Liguori <aliguori@us.ibm.com> | |
9 | * | |
10 | * This work is licensed under the terms of the GNU GPL, version 2 or later. | |
11 | * See the COPYING file in the top-level directory. | |
12 | * | |
13 | */ | |
14 | ||
15 | #include <sys/types.h> | |
16 | #include <sys/ioctl.h> | |
17 | #include <sys/mman.h> | |
18 | ||
19 | #include <linux/kvm.h> | |
20 | ||
21 | #include "qemu-common.h" | |
22 | #include "sysemu.h" | |
23 | #include "kvm.h" | |
24 | #include "cpu.h" | |
e22a25c9 | 25 | #include "gdbstub.h" |
0e607a80 | 26 | #include "host-utils.h" |
4c5b10b7 | 27 | #include "hw/pc.h" |
408392b3 | 28 | #include "hw/apic.h" |
35bed8ee | 29 | #include "ioport.h" |
e7701825 | 30 | #include "kvm_x86.h" |
05330448 | 31 | |
bb0300dc GN |
32 | #ifdef CONFIG_KVM_PARA |
33 | #include <linux/kvm_para.h> | |
34 | #endif | |
35 | // | |
05330448 AL |
36 | //#define DEBUG_KVM |
37 | ||
38 | #ifdef DEBUG_KVM | |
8c0d577e | 39 | #define DPRINTF(fmt, ...) \ |
05330448 AL |
40 | do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0) |
41 | #else | |
8c0d577e | 42 | #define DPRINTF(fmt, ...) \ |
05330448 AL |
43 | do { } while (0) |
44 | #endif | |
45 | ||
1a03675d GC |
46 | #define MSR_KVM_WALL_CLOCK 0x11 |
47 | #define MSR_KVM_SYSTEM_TIME 0x12 | |
48 | ||
c0532a76 MT |
49 | #ifndef BUS_MCEERR_AR |
50 | #define BUS_MCEERR_AR 4 | |
51 | #endif | |
52 | #ifndef BUS_MCEERR_AO | |
53 | #define BUS_MCEERR_AO 5 | |
54 | #endif | |
55 | ||
b827df58 AK |
56 | #ifdef KVM_CAP_EXT_CPUID |
57 | ||
58 | static struct kvm_cpuid2 *try_get_cpuid(KVMState *s, int max) | |
59 | { | |
60 | struct kvm_cpuid2 *cpuid; | |
61 | int r, size; | |
62 | ||
63 | size = sizeof(*cpuid) + max * sizeof(*cpuid->entries); | |
64 | cpuid = (struct kvm_cpuid2 *)qemu_mallocz(size); | |
65 | cpuid->nent = max; | |
66 | r = kvm_ioctl(s, KVM_GET_SUPPORTED_CPUID, cpuid); | |
76ae317f MM |
67 | if (r == 0 && cpuid->nent >= max) { |
68 | r = -E2BIG; | |
69 | } | |
b827df58 AK |
70 | if (r < 0) { |
71 | if (r == -E2BIG) { | |
72 | qemu_free(cpuid); | |
73 | return NULL; | |
74 | } else { | |
75 | fprintf(stderr, "KVM_GET_SUPPORTED_CPUID failed: %s\n", | |
76 | strerror(-r)); | |
77 | exit(1); | |
78 | } | |
79 | } | |
80 | return cpuid; | |
81 | } | |
82 | ||
c958a8bd SY |
83 | uint32_t kvm_arch_get_supported_cpuid(CPUState *env, uint32_t function, |
84 | uint32_t index, int reg) | |
b827df58 AK |
85 | { |
86 | struct kvm_cpuid2 *cpuid; | |
87 | int i, max; | |
88 | uint32_t ret = 0; | |
89 | uint32_t cpuid_1_edx; | |
90 | ||
91 | if (!kvm_check_extension(env->kvm_state, KVM_CAP_EXT_CPUID)) { | |
92 | return -1U; | |
93 | } | |
94 | ||
95 | max = 1; | |
96 | while ((cpuid = try_get_cpuid(env->kvm_state, max)) == NULL) { | |
97 | max *= 2; | |
98 | } | |
99 | ||
100 | for (i = 0; i < cpuid->nent; ++i) { | |
c958a8bd SY |
101 | if (cpuid->entries[i].function == function && |
102 | cpuid->entries[i].index == index) { | |
b827df58 AK |
103 | switch (reg) { |
104 | case R_EAX: | |
105 | ret = cpuid->entries[i].eax; | |
106 | break; | |
107 | case R_EBX: | |
108 | ret = cpuid->entries[i].ebx; | |
109 | break; | |
110 | case R_ECX: | |
111 | ret = cpuid->entries[i].ecx; | |
112 | break; | |
113 | case R_EDX: | |
114 | ret = cpuid->entries[i].edx; | |
19ccb8ea JK |
115 | switch (function) { |
116 | case 1: | |
117 | /* KVM before 2.6.30 misreports the following features */ | |
118 | ret |= CPUID_MTRR | CPUID_PAT | CPUID_MCE | CPUID_MCA; | |
119 | break; | |
120 | case 0x80000001: | |
b827df58 AK |
121 | /* On Intel, kvm returns cpuid according to the Intel spec, |
122 | * so add missing bits according to the AMD spec: | |
123 | */ | |
c958a8bd | 124 | cpuid_1_edx = kvm_arch_get_supported_cpuid(env, 1, 0, R_EDX); |
c1667e40 | 125 | ret |= cpuid_1_edx & 0x183f7ff; |
19ccb8ea | 126 | break; |
b827df58 AK |
127 | } |
128 | break; | |
129 | } | |
130 | } | |
131 | } | |
132 | ||
133 | qemu_free(cpuid); | |
134 | ||
135 | return ret; | |
136 | } | |
137 | ||
138 | #else | |
139 | ||
c958a8bd SY |
140 | uint32_t kvm_arch_get_supported_cpuid(CPUState *env, uint32_t function, |
141 | uint32_t index, int reg) | |
b827df58 AK |
142 | { |
143 | return -1U; | |
144 | } | |
145 | ||
146 | #endif | |
147 | ||
bb0300dc GN |
148 | #ifdef CONFIG_KVM_PARA |
149 | struct kvm_para_features { | |
150 | int cap; | |
151 | int feature; | |
152 | } para_features[] = { | |
153 | #ifdef KVM_CAP_CLOCKSOURCE | |
154 | { KVM_CAP_CLOCKSOURCE, KVM_FEATURE_CLOCKSOURCE }, | |
155 | #endif | |
156 | #ifdef KVM_CAP_NOP_IO_DELAY | |
157 | { KVM_CAP_NOP_IO_DELAY, KVM_FEATURE_NOP_IO_DELAY }, | |
158 | #endif | |
159 | #ifdef KVM_CAP_PV_MMU | |
160 | { KVM_CAP_PV_MMU, KVM_FEATURE_MMU_OP }, | |
bb0300dc GN |
161 | #endif |
162 | { -1, -1 } | |
163 | }; | |
164 | ||
165 | static int get_para_features(CPUState *env) | |
166 | { | |
167 | int i, features = 0; | |
168 | ||
169 | for (i = 0; i < ARRAY_SIZE(para_features) - 1; i++) { | |
170 | if (kvm_check_extension(env->kvm_state, para_features[i].cap)) | |
171 | features |= (1 << para_features[i].feature); | |
172 | } | |
173 | ||
174 | return features; | |
175 | } | |
176 | #endif | |
177 | ||
e7701825 MT |
178 | #ifdef KVM_CAP_MCE |
179 | static int kvm_get_mce_cap_supported(KVMState *s, uint64_t *mce_cap, | |
180 | int *max_banks) | |
181 | { | |
182 | int r; | |
183 | ||
184 | r = kvm_ioctl(s, KVM_CHECK_EXTENSION, KVM_CAP_MCE); | |
185 | if (r > 0) { | |
186 | *max_banks = r; | |
187 | return kvm_ioctl(s, KVM_X86_GET_MCE_CAP_SUPPORTED, mce_cap); | |
188 | } | |
189 | return -ENOSYS; | |
190 | } | |
191 | ||
192 | static int kvm_setup_mce(CPUState *env, uint64_t *mcg_cap) | |
193 | { | |
194 | return kvm_vcpu_ioctl(env, KVM_X86_SETUP_MCE, mcg_cap); | |
195 | } | |
196 | ||
197 | static int kvm_set_mce(CPUState *env, struct kvm_x86_mce *m) | |
198 | { | |
199 | return kvm_vcpu_ioctl(env, KVM_X86_SET_MCE, m); | |
200 | } | |
201 | ||
c0532a76 MT |
202 | static int kvm_get_msr(CPUState *env, struct kvm_msr_entry *msrs, int n) |
203 | { | |
204 | struct kvm_msrs *kmsrs = qemu_malloc(sizeof *kmsrs + n * sizeof *msrs); | |
205 | int r; | |
206 | ||
207 | kmsrs->nmsrs = n; | |
208 | memcpy(kmsrs->entries, msrs, n * sizeof *msrs); | |
209 | r = kvm_vcpu_ioctl(env, KVM_GET_MSRS, kmsrs); | |
210 | memcpy(msrs, kmsrs->entries, n * sizeof *msrs); | |
211 | free(kmsrs); | |
212 | return r; | |
213 | } | |
214 | ||
215 | /* FIXME: kill this and kvm_get_msr, use env->mcg_status instead */ | |
216 | static int kvm_mce_in_exception(CPUState *env) | |
217 | { | |
218 | struct kvm_msr_entry msr_mcg_status = { | |
219 | .index = MSR_MCG_STATUS, | |
220 | }; | |
221 | int r; | |
222 | ||
223 | r = kvm_get_msr(env, &msr_mcg_status, 1); | |
224 | if (r == -1 || r == 0) { | |
225 | return -1; | |
226 | } | |
227 | return !!(msr_mcg_status.data & MCG_STATUS_MCIP); | |
228 | } | |
229 | ||
e7701825 MT |
230 | struct kvm_x86_mce_data |
231 | { | |
232 | CPUState *env; | |
233 | struct kvm_x86_mce *mce; | |
c0532a76 | 234 | int abort_on_error; |
e7701825 MT |
235 | }; |
236 | ||
237 | static void kvm_do_inject_x86_mce(void *_data) | |
238 | { | |
239 | struct kvm_x86_mce_data *data = _data; | |
240 | int r; | |
241 | ||
c0532a76 MT |
242 | /* If there is an MCE excpetion being processed, ignore this SRAO MCE */ |
243 | r = kvm_mce_in_exception(data->env); | |
244 | if (r == -1) | |
245 | fprintf(stderr, "Failed to get MCE status\n"); | |
246 | else if (r && !(data->mce->status & MCI_STATUS_AR)) | |
247 | return; | |
248 | ||
e7701825 | 249 | r = kvm_set_mce(data->env, data->mce); |
c0532a76 | 250 | if (r < 0) { |
e7701825 | 251 | perror("kvm_set_mce FAILED"); |
c0532a76 MT |
252 | if (data->abort_on_error) { |
253 | abort(); | |
254 | } | |
255 | } | |
e7701825 MT |
256 | } |
257 | #endif | |
258 | ||
259 | void kvm_inject_x86_mce(CPUState *cenv, int bank, uint64_t status, | |
c0532a76 MT |
260 | uint64_t mcg_status, uint64_t addr, uint64_t misc, |
261 | int abort_on_error) | |
e7701825 MT |
262 | { |
263 | #ifdef KVM_CAP_MCE | |
264 | struct kvm_x86_mce mce = { | |
265 | .bank = bank, | |
266 | .status = status, | |
267 | .mcg_status = mcg_status, | |
268 | .addr = addr, | |
269 | .misc = misc, | |
270 | }; | |
271 | struct kvm_x86_mce_data data = { | |
272 | .env = cenv, | |
273 | .mce = &mce, | |
274 | }; | |
275 | ||
c0532a76 MT |
276 | if (!cenv->mcg_cap) { |
277 | fprintf(stderr, "MCE support is not enabled!\n"); | |
278 | return; | |
279 | } | |
280 | ||
e7701825 | 281 | run_on_cpu(cenv, kvm_do_inject_x86_mce, &data); |
c0532a76 MT |
282 | #else |
283 | if (abort_on_error) | |
284 | abort(); | |
e7701825 MT |
285 | #endif |
286 | } | |
287 | ||
05330448 AL |
288 | int kvm_arch_init_vcpu(CPUState *env) |
289 | { | |
290 | struct { | |
486bd5a2 AL |
291 | struct kvm_cpuid2 cpuid; |
292 | struct kvm_cpuid_entry2 entries[100]; | |
05330448 | 293 | } __attribute__((packed)) cpuid_data; |
486bd5a2 | 294 | uint32_t limit, i, j, cpuid_i; |
a33609ca | 295 | uint32_t unused; |
bb0300dc GN |
296 | struct kvm_cpuid_entry2 *c; |
297 | #ifdef KVM_CPUID_SIGNATURE | |
298 | uint32_t signature[3]; | |
299 | #endif | |
05330448 | 300 | |
f8d926e9 JK |
301 | env->mp_state = KVM_MP_STATE_RUNNABLE; |
302 | ||
c958a8bd | 303 | env->cpuid_features &= kvm_arch_get_supported_cpuid(env, 1, 0, R_EDX); |
6c0d7ee8 AP |
304 | |
305 | i = env->cpuid_ext_features & CPUID_EXT_HYPERVISOR; | |
c958a8bd | 306 | env->cpuid_ext_features &= kvm_arch_get_supported_cpuid(env, 1, 0, R_ECX); |
6c0d7ee8 AP |
307 | env->cpuid_ext_features |= i; |
308 | ||
457dfed6 | 309 | env->cpuid_ext2_features &= kvm_arch_get_supported_cpuid(env, 0x80000001, |
c958a8bd | 310 | 0, R_EDX); |
457dfed6 | 311 | env->cpuid_ext3_features &= kvm_arch_get_supported_cpuid(env, 0x80000001, |
c958a8bd | 312 | 0, R_ECX); |
296acb64 JR |
313 | env->cpuid_svm_features &= kvm_arch_get_supported_cpuid(env, 0x8000000A, |
314 | 0, R_EDX); | |
315 | ||
6c1f42fe | 316 | |
05330448 AL |
317 | cpuid_i = 0; |
318 | ||
bb0300dc GN |
319 | #ifdef CONFIG_KVM_PARA |
320 | /* Paravirtualization CPUIDs */ | |
321 | memcpy(signature, "KVMKVMKVM\0\0\0", 12); | |
322 | c = &cpuid_data.entries[cpuid_i++]; | |
323 | memset(c, 0, sizeof(*c)); | |
324 | c->function = KVM_CPUID_SIGNATURE; | |
325 | c->eax = 0; | |
326 | c->ebx = signature[0]; | |
327 | c->ecx = signature[1]; | |
328 | c->edx = signature[2]; | |
329 | ||
330 | c = &cpuid_data.entries[cpuid_i++]; | |
331 | memset(c, 0, sizeof(*c)); | |
332 | c->function = KVM_CPUID_FEATURES; | |
333 | c->eax = env->cpuid_kvm_features & get_para_features(env); | |
334 | #endif | |
335 | ||
a33609ca | 336 | cpu_x86_cpuid(env, 0, 0, &limit, &unused, &unused, &unused); |
05330448 AL |
337 | |
338 | for (i = 0; i <= limit; i++) { | |
bb0300dc | 339 | c = &cpuid_data.entries[cpuid_i++]; |
486bd5a2 AL |
340 | |
341 | switch (i) { | |
a36b1029 AL |
342 | case 2: { |
343 | /* Keep reading function 2 till all the input is received */ | |
344 | int times; | |
345 | ||
a36b1029 | 346 | c->function = i; |
a33609ca AL |
347 | c->flags = KVM_CPUID_FLAG_STATEFUL_FUNC | |
348 | KVM_CPUID_FLAG_STATE_READ_NEXT; | |
349 | cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx); | |
350 | times = c->eax & 0xff; | |
a36b1029 AL |
351 | |
352 | for (j = 1; j < times; ++j) { | |
a33609ca | 353 | c = &cpuid_data.entries[cpuid_i++]; |
a36b1029 | 354 | c->function = i; |
a33609ca AL |
355 | c->flags = KVM_CPUID_FLAG_STATEFUL_FUNC; |
356 | cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx); | |
a36b1029 AL |
357 | } |
358 | break; | |
359 | } | |
486bd5a2 AL |
360 | case 4: |
361 | case 0xb: | |
362 | case 0xd: | |
363 | for (j = 0; ; j++) { | |
486bd5a2 AL |
364 | c->function = i; |
365 | c->flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX; | |
366 | c->index = j; | |
a33609ca | 367 | cpu_x86_cpuid(env, i, j, &c->eax, &c->ebx, &c->ecx, &c->edx); |
486bd5a2 | 368 | |
a33609ca | 369 | if (i == 4 && c->eax == 0) |
486bd5a2 | 370 | break; |
a33609ca | 371 | if (i == 0xb && !(c->ecx & 0xff00)) |
486bd5a2 | 372 | break; |
a33609ca | 373 | if (i == 0xd && c->eax == 0) |
486bd5a2 | 374 | break; |
a33609ca AL |
375 | |
376 | c = &cpuid_data.entries[cpuid_i++]; | |
486bd5a2 AL |
377 | } |
378 | break; | |
379 | default: | |
486bd5a2 | 380 | c->function = i; |
a33609ca AL |
381 | c->flags = 0; |
382 | cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx); | |
486bd5a2 AL |
383 | break; |
384 | } | |
05330448 | 385 | } |
a33609ca | 386 | cpu_x86_cpuid(env, 0x80000000, 0, &limit, &unused, &unused, &unused); |
05330448 AL |
387 | |
388 | for (i = 0x80000000; i <= limit; i++) { | |
bb0300dc | 389 | c = &cpuid_data.entries[cpuid_i++]; |
05330448 | 390 | |
05330448 | 391 | c->function = i; |
a33609ca AL |
392 | c->flags = 0; |
393 | cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx); | |
05330448 AL |
394 | } |
395 | ||
396 | cpuid_data.cpuid.nent = cpuid_i; | |
397 | ||
e7701825 MT |
398 | #ifdef KVM_CAP_MCE |
399 | if (((env->cpuid_version >> 8)&0xF) >= 6 | |
400 | && (env->cpuid_features&(CPUID_MCE|CPUID_MCA)) == (CPUID_MCE|CPUID_MCA) | |
401 | && kvm_check_extension(env->kvm_state, KVM_CAP_MCE) > 0) { | |
402 | uint64_t mcg_cap; | |
403 | int banks; | |
404 | ||
405 | if (kvm_get_mce_cap_supported(env->kvm_state, &mcg_cap, &banks)) | |
406 | perror("kvm_get_mce_cap_supported FAILED"); | |
407 | else { | |
408 | if (banks > MCE_BANKS_DEF) | |
409 | banks = MCE_BANKS_DEF; | |
410 | mcg_cap &= MCE_CAP_DEF; | |
411 | mcg_cap |= banks; | |
412 | if (kvm_setup_mce(env, &mcg_cap)) | |
413 | perror("kvm_setup_mce FAILED"); | |
414 | else | |
415 | env->mcg_cap = mcg_cap; | |
416 | } | |
417 | } | |
418 | #endif | |
419 | ||
486bd5a2 | 420 | return kvm_vcpu_ioctl(env, KVM_SET_CPUID2, &cpuid_data); |
05330448 AL |
421 | } |
422 | ||
caa5af0f JK |
423 | void kvm_arch_reset_vcpu(CPUState *env) |
424 | { | |
e73223a5 | 425 | env->exception_injected = -1; |
0e607a80 | 426 | env->interrupt_injected = -1; |
a0fb002c JK |
427 | env->nmi_injected = 0; |
428 | env->nmi_pending = 0; | |
ddced198 MT |
429 | if (kvm_irqchip_in_kernel()) { |
430 | env->mp_state = cpu_is_bsp(env) ? KVM_MP_STATE_RUNNABLE : | |
431 | KVM_MP_STATE_UNINITIALIZED; | |
432 | } else { | |
433 | env->mp_state = KVM_MP_STATE_RUNNABLE; | |
434 | } | |
caa5af0f JK |
435 | } |
436 | ||
05330448 AL |
437 | static int kvm_has_msr_star(CPUState *env) |
438 | { | |
439 | static int has_msr_star; | |
440 | int ret; | |
441 | ||
442 | /* first time */ | |
443 | if (has_msr_star == 0) { | |
444 | struct kvm_msr_list msr_list, *kvm_msr_list; | |
445 | ||
446 | has_msr_star = -1; | |
447 | ||
448 | /* Obtain MSR list from KVM. These are the MSRs that we must | |
449 | * save/restore */ | |
4c9f7372 | 450 | msr_list.nmsrs = 0; |
05330448 | 451 | ret = kvm_ioctl(env->kvm_state, KVM_GET_MSR_INDEX_LIST, &msr_list); |
6fb6d245 | 452 | if (ret < 0 && ret != -E2BIG) { |
05330448 | 453 | return 0; |
6fb6d245 | 454 | } |
d9db889f JK |
455 | /* Old kernel modules had a bug and could write beyond the provided |
456 | memory. Allocate at least a safe amount of 1K. */ | |
457 | kvm_msr_list = qemu_mallocz(MAX(1024, sizeof(msr_list) + | |
458 | msr_list.nmsrs * | |
459 | sizeof(msr_list.indices[0]))); | |
05330448 | 460 | |
55308450 | 461 | kvm_msr_list->nmsrs = msr_list.nmsrs; |
05330448 AL |
462 | ret = kvm_ioctl(env->kvm_state, KVM_GET_MSR_INDEX_LIST, kvm_msr_list); |
463 | if (ret >= 0) { | |
464 | int i; | |
465 | ||
466 | for (i = 0; i < kvm_msr_list->nmsrs; i++) { | |
467 | if (kvm_msr_list->indices[i] == MSR_STAR) { | |
468 | has_msr_star = 1; | |
469 | break; | |
470 | } | |
471 | } | |
472 | } | |
473 | ||
474 | free(kvm_msr_list); | |
475 | } | |
476 | ||
477 | if (has_msr_star == 1) | |
478 | return 1; | |
479 | return 0; | |
480 | } | |
481 | ||
20420430 SY |
482 | static int kvm_init_identity_map_page(KVMState *s) |
483 | { | |
484 | #ifdef KVM_CAP_SET_IDENTITY_MAP_ADDR | |
485 | int ret; | |
486 | uint64_t addr = 0xfffbc000; | |
487 | ||
488 | if (!kvm_check_extension(s, KVM_CAP_SET_IDENTITY_MAP_ADDR)) { | |
489 | return 0; | |
490 | } | |
491 | ||
492 | ret = kvm_vm_ioctl(s, KVM_SET_IDENTITY_MAP_ADDR, &addr); | |
493 | if (ret < 0) { | |
494 | fprintf(stderr, "kvm_set_identity_map_addr: %s\n", strerror(ret)); | |
495 | return ret; | |
496 | } | |
497 | #endif | |
498 | return 0; | |
499 | } | |
500 | ||
05330448 AL |
501 | int kvm_arch_init(KVMState *s, int smp_cpus) |
502 | { | |
503 | int ret; | |
504 | ||
505 | /* create vm86 tss. KVM uses vm86 mode to emulate 16-bit code | |
506 | * directly. In order to use vm86 mode, a TSS is needed. Since this | |
507 | * must be part of guest physical memory, we need to allocate it. Older | |
508 | * versions of KVM just assumed that it would be at the end of physical | |
509 | * memory but that doesn't work with more than 4GB of memory. We simply | |
510 | * refuse to work with those older versions of KVM. */ | |
984b5181 | 511 | ret = kvm_ioctl(s, KVM_CHECK_EXTENSION, KVM_CAP_SET_TSS_ADDR); |
05330448 AL |
512 | if (ret <= 0) { |
513 | fprintf(stderr, "kvm does not support KVM_CAP_SET_TSS_ADDR\n"); | |
514 | return ret; | |
515 | } | |
516 | ||
517 | /* this address is 3 pages before the bios, and the bios should present | |
518 | * as unavaible memory. FIXME, need to ensure the e820 map deals with | |
519 | * this? | |
520 | */ | |
4c5b10b7 JS |
521 | /* |
522 | * Tell fw_cfg to notify the BIOS to reserve the range. | |
523 | */ | |
524 | if (e820_add_entry(0xfffbc000, 0x4000, E820_RESERVED) < 0) { | |
525 | perror("e820_add_entry() table is full"); | |
526 | exit(1); | |
527 | } | |
20420430 SY |
528 | ret = kvm_vm_ioctl(s, KVM_SET_TSS_ADDR, 0xfffbd000); |
529 | if (ret < 0) { | |
530 | return ret; | |
531 | } | |
532 | ||
533 | return kvm_init_identity_map_page(s); | |
05330448 AL |
534 | } |
535 | ||
536 | static void set_v8086_seg(struct kvm_segment *lhs, const SegmentCache *rhs) | |
537 | { | |
538 | lhs->selector = rhs->selector; | |
539 | lhs->base = rhs->base; | |
540 | lhs->limit = rhs->limit; | |
541 | lhs->type = 3; | |
542 | lhs->present = 1; | |
543 | lhs->dpl = 3; | |
544 | lhs->db = 0; | |
545 | lhs->s = 1; | |
546 | lhs->l = 0; | |
547 | lhs->g = 0; | |
548 | lhs->avl = 0; | |
549 | lhs->unusable = 0; | |
550 | } | |
551 | ||
552 | static void set_seg(struct kvm_segment *lhs, const SegmentCache *rhs) | |
553 | { | |
554 | unsigned flags = rhs->flags; | |
555 | lhs->selector = rhs->selector; | |
556 | lhs->base = rhs->base; | |
557 | lhs->limit = rhs->limit; | |
558 | lhs->type = (flags >> DESC_TYPE_SHIFT) & 15; | |
559 | lhs->present = (flags & DESC_P_MASK) != 0; | |
560 | lhs->dpl = rhs->selector & 3; | |
561 | lhs->db = (flags >> DESC_B_SHIFT) & 1; | |
562 | lhs->s = (flags & DESC_S_MASK) != 0; | |
563 | lhs->l = (flags >> DESC_L_SHIFT) & 1; | |
564 | lhs->g = (flags & DESC_G_MASK) != 0; | |
565 | lhs->avl = (flags & DESC_AVL_MASK) != 0; | |
566 | lhs->unusable = 0; | |
567 | } | |
568 | ||
569 | static void get_seg(SegmentCache *lhs, const struct kvm_segment *rhs) | |
570 | { | |
571 | lhs->selector = rhs->selector; | |
572 | lhs->base = rhs->base; | |
573 | lhs->limit = rhs->limit; | |
574 | lhs->flags = | |
575 | (rhs->type << DESC_TYPE_SHIFT) | |
576 | | (rhs->present * DESC_P_MASK) | |
577 | | (rhs->dpl << DESC_DPL_SHIFT) | |
578 | | (rhs->db << DESC_B_SHIFT) | |
579 | | (rhs->s * DESC_S_MASK) | |
580 | | (rhs->l << DESC_L_SHIFT) | |
581 | | (rhs->g * DESC_G_MASK) | |
582 | | (rhs->avl * DESC_AVL_MASK); | |
583 | } | |
584 | ||
585 | static void kvm_getput_reg(__u64 *kvm_reg, target_ulong *qemu_reg, int set) | |
586 | { | |
587 | if (set) | |
588 | *kvm_reg = *qemu_reg; | |
589 | else | |
590 | *qemu_reg = *kvm_reg; | |
591 | } | |
592 | ||
593 | static int kvm_getput_regs(CPUState *env, int set) | |
594 | { | |
595 | struct kvm_regs regs; | |
596 | int ret = 0; | |
597 | ||
598 | if (!set) { | |
599 | ret = kvm_vcpu_ioctl(env, KVM_GET_REGS, ®s); | |
600 | if (ret < 0) | |
601 | return ret; | |
602 | } | |
603 | ||
604 | kvm_getput_reg(®s.rax, &env->regs[R_EAX], set); | |
605 | kvm_getput_reg(®s.rbx, &env->regs[R_EBX], set); | |
606 | kvm_getput_reg(®s.rcx, &env->regs[R_ECX], set); | |
607 | kvm_getput_reg(®s.rdx, &env->regs[R_EDX], set); | |
608 | kvm_getput_reg(®s.rsi, &env->regs[R_ESI], set); | |
609 | kvm_getput_reg(®s.rdi, &env->regs[R_EDI], set); | |
610 | kvm_getput_reg(®s.rsp, &env->regs[R_ESP], set); | |
611 | kvm_getput_reg(®s.rbp, &env->regs[R_EBP], set); | |
612 | #ifdef TARGET_X86_64 | |
613 | kvm_getput_reg(®s.r8, &env->regs[8], set); | |
614 | kvm_getput_reg(®s.r9, &env->regs[9], set); | |
615 | kvm_getput_reg(®s.r10, &env->regs[10], set); | |
616 | kvm_getput_reg(®s.r11, &env->regs[11], set); | |
617 | kvm_getput_reg(®s.r12, &env->regs[12], set); | |
618 | kvm_getput_reg(®s.r13, &env->regs[13], set); | |
619 | kvm_getput_reg(®s.r14, &env->regs[14], set); | |
620 | kvm_getput_reg(®s.r15, &env->regs[15], set); | |
621 | #endif | |
622 | ||
623 | kvm_getput_reg(®s.rflags, &env->eflags, set); | |
624 | kvm_getput_reg(®s.rip, &env->eip, set); | |
625 | ||
626 | if (set) | |
627 | ret = kvm_vcpu_ioctl(env, KVM_SET_REGS, ®s); | |
628 | ||
629 | return ret; | |
630 | } | |
631 | ||
632 | static int kvm_put_fpu(CPUState *env) | |
633 | { | |
634 | struct kvm_fpu fpu; | |
635 | int i; | |
636 | ||
637 | memset(&fpu, 0, sizeof fpu); | |
638 | fpu.fsw = env->fpus & ~(7 << 11); | |
639 | fpu.fsw |= (env->fpstt & 7) << 11; | |
640 | fpu.fcw = env->fpuc; | |
641 | for (i = 0; i < 8; ++i) | |
642 | fpu.ftwx |= (!env->fptags[i]) << i; | |
643 | memcpy(fpu.fpr, env->fpregs, sizeof env->fpregs); | |
644 | memcpy(fpu.xmm, env->xmm_regs, sizeof env->xmm_regs); | |
645 | fpu.mxcsr = env->mxcsr; | |
646 | ||
647 | return kvm_vcpu_ioctl(env, KVM_SET_FPU, &fpu); | |
648 | } | |
649 | ||
f1665b21 SY |
650 | #ifdef KVM_CAP_XSAVE |
651 | #define XSAVE_CWD_RIP 2 | |
652 | #define XSAVE_CWD_RDP 4 | |
653 | #define XSAVE_MXCSR 6 | |
654 | #define XSAVE_ST_SPACE 8 | |
655 | #define XSAVE_XMM_SPACE 40 | |
656 | #define XSAVE_XSTATE_BV 128 | |
657 | #define XSAVE_YMMH_SPACE 144 | |
658 | #endif | |
659 | ||
660 | static int kvm_put_xsave(CPUState *env) | |
661 | { | |
662 | #ifdef KVM_CAP_XSAVE | |
663 | int i; | |
664 | struct kvm_xsave* xsave; | |
665 | uint16_t cwd, swd, twd, fop; | |
666 | ||
667 | if (!kvm_has_xsave()) | |
668 | return kvm_put_fpu(env); | |
669 | ||
670 | xsave = qemu_memalign(4096, sizeof(struct kvm_xsave)); | |
671 | memset(xsave, 0, sizeof(struct kvm_xsave)); | |
672 | cwd = swd = twd = fop = 0; | |
673 | swd = env->fpus & ~(7 << 11); | |
674 | swd |= (env->fpstt & 7) << 11; | |
675 | cwd = env->fpuc; | |
676 | for (i = 0; i < 8; ++i) | |
677 | twd |= (!env->fptags[i]) << i; | |
678 | xsave->region[0] = (uint32_t)(swd << 16) + cwd; | |
679 | xsave->region[1] = (uint32_t)(fop << 16) + twd; | |
680 | memcpy(&xsave->region[XSAVE_ST_SPACE], env->fpregs, | |
681 | sizeof env->fpregs); | |
682 | memcpy(&xsave->region[XSAVE_XMM_SPACE], env->xmm_regs, | |
683 | sizeof env->xmm_regs); | |
684 | xsave->region[XSAVE_MXCSR] = env->mxcsr; | |
685 | *(uint64_t *)&xsave->region[XSAVE_XSTATE_BV] = env->xstate_bv; | |
686 | memcpy(&xsave->region[XSAVE_YMMH_SPACE], env->ymmh_regs, | |
687 | sizeof env->ymmh_regs); | |
688 | return kvm_vcpu_ioctl(env, KVM_SET_XSAVE, xsave); | |
689 | #else | |
690 | return kvm_put_fpu(env); | |
691 | #endif | |
692 | } | |
693 | ||
694 | static int kvm_put_xcrs(CPUState *env) | |
695 | { | |
696 | #ifdef KVM_CAP_XCRS | |
697 | struct kvm_xcrs xcrs; | |
698 | ||
699 | if (!kvm_has_xcrs()) | |
700 | return 0; | |
701 | ||
702 | xcrs.nr_xcrs = 1; | |
703 | xcrs.flags = 0; | |
704 | xcrs.xcrs[0].xcr = 0; | |
705 | xcrs.xcrs[0].value = env->xcr0; | |
706 | return kvm_vcpu_ioctl(env, KVM_SET_XCRS, &xcrs); | |
707 | #else | |
708 | return 0; | |
709 | #endif | |
710 | } | |
711 | ||
05330448 AL |
712 | static int kvm_put_sregs(CPUState *env) |
713 | { | |
714 | struct kvm_sregs sregs; | |
715 | ||
0e607a80 JK |
716 | memset(sregs.interrupt_bitmap, 0, sizeof(sregs.interrupt_bitmap)); |
717 | if (env->interrupt_injected >= 0) { | |
718 | sregs.interrupt_bitmap[env->interrupt_injected / 64] |= | |
719 | (uint64_t)1 << (env->interrupt_injected % 64); | |
720 | } | |
05330448 AL |
721 | |
722 | if ((env->eflags & VM_MASK)) { | |
723 | set_v8086_seg(&sregs.cs, &env->segs[R_CS]); | |
724 | set_v8086_seg(&sregs.ds, &env->segs[R_DS]); | |
725 | set_v8086_seg(&sregs.es, &env->segs[R_ES]); | |
726 | set_v8086_seg(&sregs.fs, &env->segs[R_FS]); | |
727 | set_v8086_seg(&sregs.gs, &env->segs[R_GS]); | |
728 | set_v8086_seg(&sregs.ss, &env->segs[R_SS]); | |
729 | } else { | |
730 | set_seg(&sregs.cs, &env->segs[R_CS]); | |
731 | set_seg(&sregs.ds, &env->segs[R_DS]); | |
732 | set_seg(&sregs.es, &env->segs[R_ES]); | |
733 | set_seg(&sregs.fs, &env->segs[R_FS]); | |
734 | set_seg(&sregs.gs, &env->segs[R_GS]); | |
735 | set_seg(&sregs.ss, &env->segs[R_SS]); | |
736 | ||
737 | if (env->cr[0] & CR0_PE_MASK) { | |
738 | /* force ss cpl to cs cpl */ | |
739 | sregs.ss.selector = (sregs.ss.selector & ~3) | | |
740 | (sregs.cs.selector & 3); | |
741 | sregs.ss.dpl = sregs.ss.selector & 3; | |
742 | } | |
743 | } | |
744 | ||
745 | set_seg(&sregs.tr, &env->tr); | |
746 | set_seg(&sregs.ldt, &env->ldt); | |
747 | ||
748 | sregs.idt.limit = env->idt.limit; | |
749 | sregs.idt.base = env->idt.base; | |
750 | sregs.gdt.limit = env->gdt.limit; | |
751 | sregs.gdt.base = env->gdt.base; | |
752 | ||
753 | sregs.cr0 = env->cr[0]; | |
754 | sregs.cr2 = env->cr[2]; | |
755 | sregs.cr3 = env->cr[3]; | |
756 | sregs.cr4 = env->cr[4]; | |
757 | ||
4a942cea BS |
758 | sregs.cr8 = cpu_get_apic_tpr(env->apic_state); |
759 | sregs.apic_base = cpu_get_apic_base(env->apic_state); | |
05330448 AL |
760 | |
761 | sregs.efer = env->efer; | |
762 | ||
763 | return kvm_vcpu_ioctl(env, KVM_SET_SREGS, &sregs); | |
764 | } | |
765 | ||
766 | static void kvm_msr_entry_set(struct kvm_msr_entry *entry, | |
767 | uint32_t index, uint64_t value) | |
768 | { | |
769 | entry->index = index; | |
770 | entry->data = value; | |
771 | } | |
772 | ||
ea643051 | 773 | static int kvm_put_msrs(CPUState *env, int level) |
05330448 AL |
774 | { |
775 | struct { | |
776 | struct kvm_msrs info; | |
777 | struct kvm_msr_entry entries[100]; | |
778 | } msr_data; | |
779 | struct kvm_msr_entry *msrs = msr_data.entries; | |
57780495 | 780 | int i, n = 0; |
05330448 AL |
781 | |
782 | kvm_msr_entry_set(&msrs[n++], MSR_IA32_SYSENTER_CS, env->sysenter_cs); | |
783 | kvm_msr_entry_set(&msrs[n++], MSR_IA32_SYSENTER_ESP, env->sysenter_esp); | |
784 | kvm_msr_entry_set(&msrs[n++], MSR_IA32_SYSENTER_EIP, env->sysenter_eip); | |
785 | if (kvm_has_msr_star(env)) | |
786 | kvm_msr_entry_set(&msrs[n++], MSR_STAR, env->star); | |
05330448 AL |
787 | #ifdef TARGET_X86_64 |
788 | /* FIXME if lm capable */ | |
789 | kvm_msr_entry_set(&msrs[n++], MSR_CSTAR, env->cstar); | |
790 | kvm_msr_entry_set(&msrs[n++], MSR_KERNELGSBASE, env->kernelgsbase); | |
791 | kvm_msr_entry_set(&msrs[n++], MSR_FMASK, env->fmask); | |
792 | kvm_msr_entry_set(&msrs[n++], MSR_LSTAR, env->lstar); | |
793 | #endif | |
ea643051 JK |
794 | if (level == KVM_PUT_FULL_STATE) { |
795 | kvm_msr_entry_set(&msrs[n++], MSR_IA32_TSC, env->tsc); | |
796 | kvm_msr_entry_set(&msrs[n++], MSR_KVM_SYSTEM_TIME, | |
797 | env->system_time_msr); | |
798 | kvm_msr_entry_set(&msrs[n++], MSR_KVM_WALL_CLOCK, env->wall_clock_msr); | |
799 | } | |
57780495 MT |
800 | #ifdef KVM_CAP_MCE |
801 | if (env->mcg_cap) { | |
802 | if (level == KVM_PUT_RESET_STATE) | |
803 | kvm_msr_entry_set(&msrs[n++], MSR_MCG_STATUS, env->mcg_status); | |
804 | else if (level == KVM_PUT_FULL_STATE) { | |
805 | kvm_msr_entry_set(&msrs[n++], MSR_MCG_STATUS, env->mcg_status); | |
806 | kvm_msr_entry_set(&msrs[n++], MSR_MCG_CTL, env->mcg_ctl); | |
807 | for (i = 0; i < (env->mcg_cap & 0xff) * 4; i++) | |
808 | kvm_msr_entry_set(&msrs[n++], MSR_MC0_CTL + i, env->mce_banks[i]); | |
809 | } | |
810 | } | |
811 | #endif | |
1a03675d | 812 | |
05330448 AL |
813 | msr_data.info.nmsrs = n; |
814 | ||
815 | return kvm_vcpu_ioctl(env, KVM_SET_MSRS, &msr_data); | |
816 | ||
817 | } | |
818 | ||
819 | ||
820 | static int kvm_get_fpu(CPUState *env) | |
821 | { | |
822 | struct kvm_fpu fpu; | |
823 | int i, ret; | |
824 | ||
825 | ret = kvm_vcpu_ioctl(env, KVM_GET_FPU, &fpu); | |
826 | if (ret < 0) | |
827 | return ret; | |
828 | ||
829 | env->fpstt = (fpu.fsw >> 11) & 7; | |
830 | env->fpus = fpu.fsw; | |
831 | env->fpuc = fpu.fcw; | |
832 | for (i = 0; i < 8; ++i) | |
833 | env->fptags[i] = !((fpu.ftwx >> i) & 1); | |
834 | memcpy(env->fpregs, fpu.fpr, sizeof env->fpregs); | |
835 | memcpy(env->xmm_regs, fpu.xmm, sizeof env->xmm_regs); | |
836 | env->mxcsr = fpu.mxcsr; | |
837 | ||
838 | return 0; | |
839 | } | |
840 | ||
f1665b21 SY |
841 | static int kvm_get_xsave(CPUState *env) |
842 | { | |
843 | #ifdef KVM_CAP_XSAVE | |
844 | struct kvm_xsave* xsave; | |
845 | int ret, i; | |
846 | uint16_t cwd, swd, twd, fop; | |
847 | ||
848 | if (!kvm_has_xsave()) | |
849 | return kvm_get_fpu(env); | |
850 | ||
851 | xsave = qemu_memalign(4096, sizeof(struct kvm_xsave)); | |
852 | ret = kvm_vcpu_ioctl(env, KVM_GET_XSAVE, xsave); | |
853 | if (ret < 0) | |
854 | return ret; | |
855 | ||
856 | cwd = (uint16_t)xsave->region[0]; | |
857 | swd = (uint16_t)(xsave->region[0] >> 16); | |
858 | twd = (uint16_t)xsave->region[1]; | |
859 | fop = (uint16_t)(xsave->region[1] >> 16); | |
860 | env->fpstt = (swd >> 11) & 7; | |
861 | env->fpus = swd; | |
862 | env->fpuc = cwd; | |
863 | for (i = 0; i < 8; ++i) | |
864 | env->fptags[i] = !((twd >> i) & 1); | |
865 | env->mxcsr = xsave->region[XSAVE_MXCSR]; | |
866 | memcpy(env->fpregs, &xsave->region[XSAVE_ST_SPACE], | |
867 | sizeof env->fpregs); | |
868 | memcpy(env->xmm_regs, &xsave->region[XSAVE_XMM_SPACE], | |
869 | sizeof env->xmm_regs); | |
870 | env->xstate_bv = *(uint64_t *)&xsave->region[XSAVE_XSTATE_BV]; | |
871 | memcpy(env->ymmh_regs, &xsave->region[XSAVE_YMMH_SPACE], | |
872 | sizeof env->ymmh_regs); | |
873 | return 0; | |
874 | #else | |
875 | return kvm_get_fpu(env); | |
876 | #endif | |
877 | } | |
878 | ||
879 | static int kvm_get_xcrs(CPUState *env) | |
880 | { | |
881 | #ifdef KVM_CAP_XCRS | |
882 | int i, ret; | |
883 | struct kvm_xcrs xcrs; | |
884 | ||
885 | if (!kvm_has_xcrs()) | |
886 | return 0; | |
887 | ||
888 | ret = kvm_vcpu_ioctl(env, KVM_GET_XCRS, &xcrs); | |
889 | if (ret < 0) | |
890 | return ret; | |
891 | ||
892 | for (i = 0; i < xcrs.nr_xcrs; i++) | |
893 | /* Only support xcr0 now */ | |
894 | if (xcrs.xcrs[0].xcr == 0) { | |
895 | env->xcr0 = xcrs.xcrs[0].value; | |
896 | break; | |
897 | } | |
898 | return 0; | |
899 | #else | |
900 | return 0; | |
901 | #endif | |
902 | } | |
903 | ||
05330448 AL |
904 | static int kvm_get_sregs(CPUState *env) |
905 | { | |
906 | struct kvm_sregs sregs; | |
907 | uint32_t hflags; | |
0e607a80 | 908 | int bit, i, ret; |
05330448 AL |
909 | |
910 | ret = kvm_vcpu_ioctl(env, KVM_GET_SREGS, &sregs); | |
911 | if (ret < 0) | |
912 | return ret; | |
913 | ||
0e607a80 JK |
914 | /* There can only be one pending IRQ set in the bitmap at a time, so try |
915 | to find it and save its number instead (-1 for none). */ | |
916 | env->interrupt_injected = -1; | |
917 | for (i = 0; i < ARRAY_SIZE(sregs.interrupt_bitmap); i++) { | |
918 | if (sregs.interrupt_bitmap[i]) { | |
919 | bit = ctz64(sregs.interrupt_bitmap[i]); | |
920 | env->interrupt_injected = i * 64 + bit; | |
921 | break; | |
922 | } | |
923 | } | |
05330448 AL |
924 | |
925 | get_seg(&env->segs[R_CS], &sregs.cs); | |
926 | get_seg(&env->segs[R_DS], &sregs.ds); | |
927 | get_seg(&env->segs[R_ES], &sregs.es); | |
928 | get_seg(&env->segs[R_FS], &sregs.fs); | |
929 | get_seg(&env->segs[R_GS], &sregs.gs); | |
930 | get_seg(&env->segs[R_SS], &sregs.ss); | |
931 | ||
932 | get_seg(&env->tr, &sregs.tr); | |
933 | get_seg(&env->ldt, &sregs.ldt); | |
934 | ||
935 | env->idt.limit = sregs.idt.limit; | |
936 | env->idt.base = sregs.idt.base; | |
937 | env->gdt.limit = sregs.gdt.limit; | |
938 | env->gdt.base = sregs.gdt.base; | |
939 | ||
940 | env->cr[0] = sregs.cr0; | |
941 | env->cr[2] = sregs.cr2; | |
942 | env->cr[3] = sregs.cr3; | |
943 | env->cr[4] = sregs.cr4; | |
944 | ||
4a942cea | 945 | cpu_set_apic_base(env->apic_state, sregs.apic_base); |
05330448 AL |
946 | |
947 | env->efer = sregs.efer; | |
4a942cea | 948 | //cpu_set_apic_tpr(env->apic_state, sregs.cr8); |
05330448 AL |
949 | |
950 | #define HFLAG_COPY_MASK ~( \ | |
951 | HF_CPL_MASK | HF_PE_MASK | HF_MP_MASK | HF_EM_MASK | \ | |
952 | HF_TS_MASK | HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK | \ | |
953 | HF_OSFXSR_MASK | HF_LMA_MASK | HF_CS32_MASK | \ | |
954 | HF_SS32_MASK | HF_CS64_MASK | HF_ADDSEG_MASK) | |
955 | ||
956 | ||
957 | ||
958 | hflags = (env->segs[R_CS].flags >> DESC_DPL_SHIFT) & HF_CPL_MASK; | |
959 | hflags |= (env->cr[0] & CR0_PE_MASK) << (HF_PE_SHIFT - CR0_PE_SHIFT); | |
960 | hflags |= (env->cr[0] << (HF_MP_SHIFT - CR0_MP_SHIFT)) & | |
961 | (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK); | |
962 | hflags |= (env->eflags & (HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK)); | |
963 | hflags |= (env->cr[4] & CR4_OSFXSR_MASK) << | |
964 | (HF_OSFXSR_SHIFT - CR4_OSFXSR_SHIFT); | |
965 | ||
966 | if (env->efer & MSR_EFER_LMA) { | |
967 | hflags |= HF_LMA_MASK; | |
968 | } | |
969 | ||
970 | if ((hflags & HF_LMA_MASK) && (env->segs[R_CS].flags & DESC_L_MASK)) { | |
971 | hflags |= HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK; | |
972 | } else { | |
973 | hflags |= (env->segs[R_CS].flags & DESC_B_MASK) >> | |
974 | (DESC_B_SHIFT - HF_CS32_SHIFT); | |
975 | hflags |= (env->segs[R_SS].flags & DESC_B_MASK) >> | |
976 | (DESC_B_SHIFT - HF_SS32_SHIFT); | |
977 | if (!(env->cr[0] & CR0_PE_MASK) || | |
978 | (env->eflags & VM_MASK) || | |
979 | !(hflags & HF_CS32_MASK)) { | |
980 | hflags |= HF_ADDSEG_MASK; | |
981 | } else { | |
982 | hflags |= ((env->segs[R_DS].base | | |
983 | env->segs[R_ES].base | | |
984 | env->segs[R_SS].base) != 0) << | |
985 | HF_ADDSEG_SHIFT; | |
986 | } | |
987 | } | |
988 | env->hflags = (env->hflags & HFLAG_COPY_MASK) | hflags; | |
05330448 AL |
989 | |
990 | return 0; | |
991 | } | |
992 | ||
993 | static int kvm_get_msrs(CPUState *env) | |
994 | { | |
995 | struct { | |
996 | struct kvm_msrs info; | |
997 | struct kvm_msr_entry entries[100]; | |
998 | } msr_data; | |
999 | struct kvm_msr_entry *msrs = msr_data.entries; | |
1000 | int ret, i, n; | |
1001 | ||
1002 | n = 0; | |
1003 | msrs[n++].index = MSR_IA32_SYSENTER_CS; | |
1004 | msrs[n++].index = MSR_IA32_SYSENTER_ESP; | |
1005 | msrs[n++].index = MSR_IA32_SYSENTER_EIP; | |
1006 | if (kvm_has_msr_star(env)) | |
1007 | msrs[n++].index = MSR_STAR; | |
1008 | msrs[n++].index = MSR_IA32_TSC; | |
1009 | #ifdef TARGET_X86_64 | |
1010 | /* FIXME lm_capable_kernel */ | |
1011 | msrs[n++].index = MSR_CSTAR; | |
1012 | msrs[n++].index = MSR_KERNELGSBASE; | |
1013 | msrs[n++].index = MSR_FMASK; | |
1014 | msrs[n++].index = MSR_LSTAR; | |
1015 | #endif | |
1a03675d GC |
1016 | msrs[n++].index = MSR_KVM_SYSTEM_TIME; |
1017 | msrs[n++].index = MSR_KVM_WALL_CLOCK; | |
1018 | ||
57780495 MT |
1019 | #ifdef KVM_CAP_MCE |
1020 | if (env->mcg_cap) { | |
1021 | msrs[n++].index = MSR_MCG_STATUS; | |
1022 | msrs[n++].index = MSR_MCG_CTL; | |
1023 | for (i = 0; i < (env->mcg_cap & 0xff) * 4; i++) | |
1024 | msrs[n++].index = MSR_MC0_CTL + i; | |
1025 | } | |
1026 | #endif | |
1027 | ||
05330448 AL |
1028 | msr_data.info.nmsrs = n; |
1029 | ret = kvm_vcpu_ioctl(env, KVM_GET_MSRS, &msr_data); | |
1030 | if (ret < 0) | |
1031 | return ret; | |
1032 | ||
1033 | for (i = 0; i < ret; i++) { | |
1034 | switch (msrs[i].index) { | |
1035 | case MSR_IA32_SYSENTER_CS: | |
1036 | env->sysenter_cs = msrs[i].data; | |
1037 | break; | |
1038 | case MSR_IA32_SYSENTER_ESP: | |
1039 | env->sysenter_esp = msrs[i].data; | |
1040 | break; | |
1041 | case MSR_IA32_SYSENTER_EIP: | |
1042 | env->sysenter_eip = msrs[i].data; | |
1043 | break; | |
1044 | case MSR_STAR: | |
1045 | env->star = msrs[i].data; | |
1046 | break; | |
1047 | #ifdef TARGET_X86_64 | |
1048 | case MSR_CSTAR: | |
1049 | env->cstar = msrs[i].data; | |
1050 | break; | |
1051 | case MSR_KERNELGSBASE: | |
1052 | env->kernelgsbase = msrs[i].data; | |
1053 | break; | |
1054 | case MSR_FMASK: | |
1055 | env->fmask = msrs[i].data; | |
1056 | break; | |
1057 | case MSR_LSTAR: | |
1058 | env->lstar = msrs[i].data; | |
1059 | break; | |
1060 | #endif | |
1061 | case MSR_IA32_TSC: | |
1062 | env->tsc = msrs[i].data; | |
1063 | break; | |
1a03675d GC |
1064 | case MSR_KVM_SYSTEM_TIME: |
1065 | env->system_time_msr = msrs[i].data; | |
1066 | break; | |
1067 | case MSR_KVM_WALL_CLOCK: | |
1068 | env->wall_clock_msr = msrs[i].data; | |
1069 | break; | |
57780495 MT |
1070 | #ifdef KVM_CAP_MCE |
1071 | case MSR_MCG_STATUS: | |
1072 | env->mcg_status = msrs[i].data; | |
1073 | break; | |
1074 | case MSR_MCG_CTL: | |
1075 | env->mcg_ctl = msrs[i].data; | |
1076 | break; | |
1077 | #endif | |
1078 | default: | |
1079 | #ifdef KVM_CAP_MCE | |
1080 | if (msrs[i].index >= MSR_MC0_CTL && | |
1081 | msrs[i].index < MSR_MC0_CTL + (env->mcg_cap & 0xff) * 4) { | |
1082 | env->mce_banks[msrs[i].index - MSR_MC0_CTL] = msrs[i].data; | |
1083 | break; | |
1084 | } | |
1085 | #endif | |
05330448 AL |
1086 | } |
1087 | } | |
1088 | ||
1089 | return 0; | |
1090 | } | |
1091 | ||
9bdbe550 HB |
1092 | static int kvm_put_mp_state(CPUState *env) |
1093 | { | |
1094 | struct kvm_mp_state mp_state = { .mp_state = env->mp_state }; | |
1095 | ||
1096 | return kvm_vcpu_ioctl(env, KVM_SET_MP_STATE, &mp_state); | |
1097 | } | |
1098 | ||
1099 | static int kvm_get_mp_state(CPUState *env) | |
1100 | { | |
1101 | struct kvm_mp_state mp_state; | |
1102 | int ret; | |
1103 | ||
1104 | ret = kvm_vcpu_ioctl(env, KVM_GET_MP_STATE, &mp_state); | |
1105 | if (ret < 0) { | |
1106 | return ret; | |
1107 | } | |
1108 | env->mp_state = mp_state.mp_state; | |
1109 | return 0; | |
1110 | } | |
1111 | ||
ea643051 | 1112 | static int kvm_put_vcpu_events(CPUState *env, int level) |
a0fb002c JK |
1113 | { |
1114 | #ifdef KVM_CAP_VCPU_EVENTS | |
1115 | struct kvm_vcpu_events events; | |
1116 | ||
1117 | if (!kvm_has_vcpu_events()) { | |
1118 | return 0; | |
1119 | } | |
1120 | ||
31827373 JK |
1121 | events.exception.injected = (env->exception_injected >= 0); |
1122 | events.exception.nr = env->exception_injected; | |
a0fb002c JK |
1123 | events.exception.has_error_code = env->has_error_code; |
1124 | events.exception.error_code = env->error_code; | |
1125 | ||
1126 | events.interrupt.injected = (env->interrupt_injected >= 0); | |
1127 | events.interrupt.nr = env->interrupt_injected; | |
1128 | events.interrupt.soft = env->soft_interrupt; | |
1129 | ||
1130 | events.nmi.injected = env->nmi_injected; | |
1131 | events.nmi.pending = env->nmi_pending; | |
1132 | events.nmi.masked = !!(env->hflags2 & HF2_NMI_MASK); | |
1133 | ||
1134 | events.sipi_vector = env->sipi_vector; | |
1135 | ||
ea643051 JK |
1136 | events.flags = 0; |
1137 | if (level >= KVM_PUT_RESET_STATE) { | |
1138 | events.flags |= | |
1139 | KVM_VCPUEVENT_VALID_NMI_PENDING | KVM_VCPUEVENT_VALID_SIPI_VECTOR; | |
1140 | } | |
aee028b9 | 1141 | |
a0fb002c JK |
1142 | return kvm_vcpu_ioctl(env, KVM_SET_VCPU_EVENTS, &events); |
1143 | #else | |
1144 | return 0; | |
1145 | #endif | |
1146 | } | |
1147 | ||
1148 | static int kvm_get_vcpu_events(CPUState *env) | |
1149 | { | |
1150 | #ifdef KVM_CAP_VCPU_EVENTS | |
1151 | struct kvm_vcpu_events events; | |
1152 | int ret; | |
1153 | ||
1154 | if (!kvm_has_vcpu_events()) { | |
1155 | return 0; | |
1156 | } | |
1157 | ||
1158 | ret = kvm_vcpu_ioctl(env, KVM_GET_VCPU_EVENTS, &events); | |
1159 | if (ret < 0) { | |
1160 | return ret; | |
1161 | } | |
31827373 | 1162 | env->exception_injected = |
a0fb002c JK |
1163 | events.exception.injected ? events.exception.nr : -1; |
1164 | env->has_error_code = events.exception.has_error_code; | |
1165 | env->error_code = events.exception.error_code; | |
1166 | ||
1167 | env->interrupt_injected = | |
1168 | events.interrupt.injected ? events.interrupt.nr : -1; | |
1169 | env->soft_interrupt = events.interrupt.soft; | |
1170 | ||
1171 | env->nmi_injected = events.nmi.injected; | |
1172 | env->nmi_pending = events.nmi.pending; | |
1173 | if (events.nmi.masked) { | |
1174 | env->hflags2 |= HF2_NMI_MASK; | |
1175 | } else { | |
1176 | env->hflags2 &= ~HF2_NMI_MASK; | |
1177 | } | |
1178 | ||
1179 | env->sipi_vector = events.sipi_vector; | |
1180 | #endif | |
1181 | ||
1182 | return 0; | |
1183 | } | |
1184 | ||
b0b1d690 JK |
1185 | static int kvm_guest_debug_workarounds(CPUState *env) |
1186 | { | |
1187 | int ret = 0; | |
1188 | #ifdef KVM_CAP_SET_GUEST_DEBUG | |
1189 | unsigned long reinject_trap = 0; | |
1190 | ||
1191 | if (!kvm_has_vcpu_events()) { | |
1192 | if (env->exception_injected == 1) { | |
1193 | reinject_trap = KVM_GUESTDBG_INJECT_DB; | |
1194 | } else if (env->exception_injected == 3) { | |
1195 | reinject_trap = KVM_GUESTDBG_INJECT_BP; | |
1196 | } | |
1197 | env->exception_injected = -1; | |
1198 | } | |
1199 | ||
1200 | /* | |
1201 | * Kernels before KVM_CAP_X86_ROBUST_SINGLESTEP overwrote flags.TF | |
1202 | * injected via SET_GUEST_DEBUG while updating GP regs. Work around this | |
1203 | * by updating the debug state once again if single-stepping is on. | |
1204 | * Another reason to call kvm_update_guest_debug here is a pending debug | |
1205 | * trap raise by the guest. On kernels without SET_VCPU_EVENTS we have to | |
1206 | * reinject them via SET_GUEST_DEBUG. | |
1207 | */ | |
1208 | if (reinject_trap || | |
1209 | (!kvm_has_robust_singlestep() && env->singlestep_enabled)) { | |
1210 | ret = kvm_update_guest_debug(env, reinject_trap); | |
1211 | } | |
1212 | #endif /* KVM_CAP_SET_GUEST_DEBUG */ | |
1213 | return ret; | |
1214 | } | |
1215 | ||
ff44f1a3 JK |
1216 | static int kvm_put_debugregs(CPUState *env) |
1217 | { | |
1218 | #ifdef KVM_CAP_DEBUGREGS | |
1219 | struct kvm_debugregs dbgregs; | |
1220 | int i; | |
1221 | ||
1222 | if (!kvm_has_debugregs()) { | |
1223 | return 0; | |
1224 | } | |
1225 | ||
1226 | for (i = 0; i < 4; i++) { | |
1227 | dbgregs.db[i] = env->dr[i]; | |
1228 | } | |
1229 | dbgregs.dr6 = env->dr[6]; | |
1230 | dbgregs.dr7 = env->dr[7]; | |
1231 | dbgregs.flags = 0; | |
1232 | ||
1233 | return kvm_vcpu_ioctl(env, KVM_SET_DEBUGREGS, &dbgregs); | |
1234 | #else | |
1235 | return 0; | |
1236 | #endif | |
1237 | } | |
1238 | ||
1239 | static int kvm_get_debugregs(CPUState *env) | |
1240 | { | |
1241 | #ifdef KVM_CAP_DEBUGREGS | |
1242 | struct kvm_debugregs dbgregs; | |
1243 | int i, ret; | |
1244 | ||
1245 | if (!kvm_has_debugregs()) { | |
1246 | return 0; | |
1247 | } | |
1248 | ||
1249 | ret = kvm_vcpu_ioctl(env, KVM_GET_DEBUGREGS, &dbgregs); | |
1250 | if (ret < 0) { | |
1251 | return ret; | |
1252 | } | |
1253 | for (i = 0; i < 4; i++) { | |
1254 | env->dr[i] = dbgregs.db[i]; | |
1255 | } | |
1256 | env->dr[4] = env->dr[6] = dbgregs.dr6; | |
1257 | env->dr[5] = env->dr[7] = dbgregs.dr7; | |
1258 | #endif | |
1259 | ||
1260 | return 0; | |
1261 | } | |
1262 | ||
ea375f9a | 1263 | int kvm_arch_put_registers(CPUState *env, int level) |
05330448 AL |
1264 | { |
1265 | int ret; | |
1266 | ||
dbaa07c4 JK |
1267 | assert(cpu_is_stopped(env) || qemu_cpu_self(env)); |
1268 | ||
05330448 AL |
1269 | ret = kvm_getput_regs(env, 1); |
1270 | if (ret < 0) | |
1271 | return ret; | |
1272 | ||
f1665b21 SY |
1273 | ret = kvm_put_xsave(env); |
1274 | if (ret < 0) | |
1275 | return ret; | |
1276 | ||
1277 | ret = kvm_put_xcrs(env); | |
05330448 AL |
1278 | if (ret < 0) |
1279 | return ret; | |
1280 | ||
1281 | ret = kvm_put_sregs(env); | |
1282 | if (ret < 0) | |
1283 | return ret; | |
1284 | ||
ea643051 | 1285 | ret = kvm_put_msrs(env, level); |
05330448 AL |
1286 | if (ret < 0) |
1287 | return ret; | |
1288 | ||
ea643051 JK |
1289 | if (level >= KVM_PUT_RESET_STATE) { |
1290 | ret = kvm_put_mp_state(env); | |
1291 | if (ret < 0) | |
1292 | return ret; | |
1293 | } | |
f8d926e9 | 1294 | |
ea643051 | 1295 | ret = kvm_put_vcpu_events(env, level); |
a0fb002c JK |
1296 | if (ret < 0) |
1297 | return ret; | |
1298 | ||
b0b1d690 JK |
1299 | /* must be last */ |
1300 | ret = kvm_guest_debug_workarounds(env); | |
1301 | if (ret < 0) | |
1302 | return ret; | |
1303 | ||
ff44f1a3 JK |
1304 | ret = kvm_put_debugregs(env); |
1305 | if (ret < 0) | |
1306 | return ret; | |
1307 | ||
05330448 AL |
1308 | return 0; |
1309 | } | |
1310 | ||
1311 | int kvm_arch_get_registers(CPUState *env) | |
1312 | { | |
1313 | int ret; | |
1314 | ||
dbaa07c4 JK |
1315 | assert(cpu_is_stopped(env) || qemu_cpu_self(env)); |
1316 | ||
05330448 AL |
1317 | ret = kvm_getput_regs(env, 0); |
1318 | if (ret < 0) | |
1319 | return ret; | |
1320 | ||
f1665b21 SY |
1321 | ret = kvm_get_xsave(env); |
1322 | if (ret < 0) | |
1323 | return ret; | |
1324 | ||
1325 | ret = kvm_get_xcrs(env); | |
05330448 AL |
1326 | if (ret < 0) |
1327 | return ret; | |
1328 | ||
1329 | ret = kvm_get_sregs(env); | |
1330 | if (ret < 0) | |
1331 | return ret; | |
1332 | ||
1333 | ret = kvm_get_msrs(env); | |
1334 | if (ret < 0) | |
1335 | return ret; | |
1336 | ||
5a2e3c2e JK |
1337 | ret = kvm_get_mp_state(env); |
1338 | if (ret < 0) | |
1339 | return ret; | |
1340 | ||
a0fb002c JK |
1341 | ret = kvm_get_vcpu_events(env); |
1342 | if (ret < 0) | |
1343 | return ret; | |
1344 | ||
ff44f1a3 JK |
1345 | ret = kvm_get_debugregs(env); |
1346 | if (ret < 0) | |
1347 | return ret; | |
1348 | ||
05330448 AL |
1349 | return 0; |
1350 | } | |
1351 | ||
1352 | int kvm_arch_pre_run(CPUState *env, struct kvm_run *run) | |
1353 | { | |
1354 | /* Try to inject an interrupt if the guest can accept it */ | |
1355 | if (run->ready_for_interrupt_injection && | |
1356 | (env->interrupt_request & CPU_INTERRUPT_HARD) && | |
1357 | (env->eflags & IF_MASK)) { | |
1358 | int irq; | |
1359 | ||
1360 | env->interrupt_request &= ~CPU_INTERRUPT_HARD; | |
1361 | irq = cpu_get_pic_interrupt(env); | |
1362 | if (irq >= 0) { | |
1363 | struct kvm_interrupt intr; | |
1364 | intr.irq = irq; | |
1365 | /* FIXME: errors */ | |
8c0d577e | 1366 | DPRINTF("injected interrupt %d\n", irq); |
05330448 AL |
1367 | kvm_vcpu_ioctl(env, KVM_INTERRUPT, &intr); |
1368 | } | |
1369 | } | |
1370 | ||
1371 | /* If we have an interrupt but the guest is not ready to receive an | |
1372 | * interrupt, request an interrupt window exit. This will | |
1373 | * cause a return to userspace as soon as the guest is ready to | |
1374 | * receive interrupts. */ | |
1375 | if ((env->interrupt_request & CPU_INTERRUPT_HARD)) | |
1376 | run->request_interrupt_window = 1; | |
1377 | else | |
1378 | run->request_interrupt_window = 0; | |
1379 | ||
8c0d577e | 1380 | DPRINTF("setting tpr\n"); |
4a942cea | 1381 | run->cr8 = cpu_get_apic_tpr(env->apic_state); |
05330448 AL |
1382 | |
1383 | return 0; | |
1384 | } | |
1385 | ||
1386 | int kvm_arch_post_run(CPUState *env, struct kvm_run *run) | |
1387 | { | |
1388 | if (run->if_flag) | |
1389 | env->eflags |= IF_MASK; | |
1390 | else | |
1391 | env->eflags &= ~IF_MASK; | |
1392 | ||
4a942cea BS |
1393 | cpu_set_apic_tpr(env->apic_state, run->cr8); |
1394 | cpu_set_apic_base(env->apic_state, run->apic_base); | |
05330448 AL |
1395 | |
1396 | return 0; | |
1397 | } | |
1398 | ||
0af691d7 MT |
1399 | int kvm_arch_process_irqchip_events(CPUState *env) |
1400 | { | |
1401 | if (env->interrupt_request & CPU_INTERRUPT_INIT) { | |
1402 | kvm_cpu_synchronize_state(env); | |
1403 | do_cpu_init(env); | |
1404 | env->exception_index = EXCP_HALTED; | |
1405 | } | |
1406 | ||
1407 | if (env->interrupt_request & CPU_INTERRUPT_SIPI) { | |
1408 | kvm_cpu_synchronize_state(env); | |
1409 | do_cpu_sipi(env); | |
1410 | } | |
1411 | ||
1412 | return env->halted; | |
1413 | } | |
1414 | ||
05330448 AL |
1415 | static int kvm_handle_halt(CPUState *env) |
1416 | { | |
1417 | if (!((env->interrupt_request & CPU_INTERRUPT_HARD) && | |
1418 | (env->eflags & IF_MASK)) && | |
1419 | !(env->interrupt_request & CPU_INTERRUPT_NMI)) { | |
1420 | env->halted = 1; | |
1421 | env->exception_index = EXCP_HLT; | |
1422 | return 0; | |
1423 | } | |
1424 | ||
1425 | return 1; | |
1426 | } | |
1427 | ||
1428 | int kvm_arch_handle_exit(CPUState *env, struct kvm_run *run) | |
1429 | { | |
1430 | int ret = 0; | |
1431 | ||
1432 | switch (run->exit_reason) { | |
1433 | case KVM_EXIT_HLT: | |
8c0d577e | 1434 | DPRINTF("handle_hlt\n"); |
05330448 AL |
1435 | ret = kvm_handle_halt(env); |
1436 | break; | |
1437 | } | |
1438 | ||
1439 | return ret; | |
1440 | } | |
e22a25c9 AL |
1441 | |
1442 | #ifdef KVM_CAP_SET_GUEST_DEBUG | |
e22a25c9 AL |
1443 | int kvm_arch_insert_sw_breakpoint(CPUState *env, struct kvm_sw_breakpoint *bp) |
1444 | { | |
38972938 | 1445 | static const uint8_t int3 = 0xcc; |
64bf3f4e | 1446 | |
e22a25c9 | 1447 | if (cpu_memory_rw_debug(env, bp->pc, (uint8_t *)&bp->saved_insn, 1, 0) || |
64bf3f4e | 1448 | cpu_memory_rw_debug(env, bp->pc, (uint8_t *)&int3, 1, 1)) |
e22a25c9 AL |
1449 | return -EINVAL; |
1450 | return 0; | |
1451 | } | |
1452 | ||
1453 | int kvm_arch_remove_sw_breakpoint(CPUState *env, struct kvm_sw_breakpoint *bp) | |
1454 | { | |
1455 | uint8_t int3; | |
1456 | ||
1457 | if (cpu_memory_rw_debug(env, bp->pc, &int3, 1, 0) || int3 != 0xcc || | |
64bf3f4e | 1458 | cpu_memory_rw_debug(env, bp->pc, (uint8_t *)&bp->saved_insn, 1, 1)) |
e22a25c9 AL |
1459 | return -EINVAL; |
1460 | return 0; | |
1461 | } | |
1462 | ||
1463 | static struct { | |
1464 | target_ulong addr; | |
1465 | int len; | |
1466 | int type; | |
1467 | } hw_breakpoint[4]; | |
1468 | ||
1469 | static int nb_hw_breakpoint; | |
1470 | ||
1471 | static int find_hw_breakpoint(target_ulong addr, int len, int type) | |
1472 | { | |
1473 | int n; | |
1474 | ||
1475 | for (n = 0; n < nb_hw_breakpoint; n++) | |
1476 | if (hw_breakpoint[n].addr == addr && hw_breakpoint[n].type == type && | |
1477 | (hw_breakpoint[n].len == len || len == -1)) | |
1478 | return n; | |
1479 | return -1; | |
1480 | } | |
1481 | ||
1482 | int kvm_arch_insert_hw_breakpoint(target_ulong addr, | |
1483 | target_ulong len, int type) | |
1484 | { | |
1485 | switch (type) { | |
1486 | case GDB_BREAKPOINT_HW: | |
1487 | len = 1; | |
1488 | break; | |
1489 | case GDB_WATCHPOINT_WRITE: | |
1490 | case GDB_WATCHPOINT_ACCESS: | |
1491 | switch (len) { | |
1492 | case 1: | |
1493 | break; | |
1494 | case 2: | |
1495 | case 4: | |
1496 | case 8: | |
1497 | if (addr & (len - 1)) | |
1498 | return -EINVAL; | |
1499 | break; | |
1500 | default: | |
1501 | return -EINVAL; | |
1502 | } | |
1503 | break; | |
1504 | default: | |
1505 | return -ENOSYS; | |
1506 | } | |
1507 | ||
1508 | if (nb_hw_breakpoint == 4) | |
1509 | return -ENOBUFS; | |
1510 | ||
1511 | if (find_hw_breakpoint(addr, len, type) >= 0) | |
1512 | return -EEXIST; | |
1513 | ||
1514 | hw_breakpoint[nb_hw_breakpoint].addr = addr; | |
1515 | hw_breakpoint[nb_hw_breakpoint].len = len; | |
1516 | hw_breakpoint[nb_hw_breakpoint].type = type; | |
1517 | nb_hw_breakpoint++; | |
1518 | ||
1519 | return 0; | |
1520 | } | |
1521 | ||
1522 | int kvm_arch_remove_hw_breakpoint(target_ulong addr, | |
1523 | target_ulong len, int type) | |
1524 | { | |
1525 | int n; | |
1526 | ||
1527 | n = find_hw_breakpoint(addr, (type == GDB_BREAKPOINT_HW) ? 1 : len, type); | |
1528 | if (n < 0) | |
1529 | return -ENOENT; | |
1530 | ||
1531 | nb_hw_breakpoint--; | |
1532 | hw_breakpoint[n] = hw_breakpoint[nb_hw_breakpoint]; | |
1533 | ||
1534 | return 0; | |
1535 | } | |
1536 | ||
1537 | void kvm_arch_remove_all_hw_breakpoints(void) | |
1538 | { | |
1539 | nb_hw_breakpoint = 0; | |
1540 | } | |
1541 | ||
1542 | static CPUWatchpoint hw_watchpoint; | |
1543 | ||
1544 | int kvm_arch_debug(struct kvm_debug_exit_arch *arch_info) | |
1545 | { | |
1546 | int handle = 0; | |
1547 | int n; | |
1548 | ||
1549 | if (arch_info->exception == 1) { | |
1550 | if (arch_info->dr6 & (1 << 14)) { | |
1551 | if (cpu_single_env->singlestep_enabled) | |
1552 | handle = 1; | |
1553 | } else { | |
1554 | for (n = 0; n < 4; n++) | |
1555 | if (arch_info->dr6 & (1 << n)) | |
1556 | switch ((arch_info->dr7 >> (16 + n*4)) & 0x3) { | |
1557 | case 0x0: | |
1558 | handle = 1; | |
1559 | break; | |
1560 | case 0x1: | |
1561 | handle = 1; | |
1562 | cpu_single_env->watchpoint_hit = &hw_watchpoint; | |
1563 | hw_watchpoint.vaddr = hw_breakpoint[n].addr; | |
1564 | hw_watchpoint.flags = BP_MEM_WRITE; | |
1565 | break; | |
1566 | case 0x3: | |
1567 | handle = 1; | |
1568 | cpu_single_env->watchpoint_hit = &hw_watchpoint; | |
1569 | hw_watchpoint.vaddr = hw_breakpoint[n].addr; | |
1570 | hw_watchpoint.flags = BP_MEM_ACCESS; | |
1571 | break; | |
1572 | } | |
1573 | } | |
1574 | } else if (kvm_find_sw_breakpoint(cpu_single_env, arch_info->pc)) | |
1575 | handle = 1; | |
1576 | ||
b0b1d690 JK |
1577 | if (!handle) { |
1578 | cpu_synchronize_state(cpu_single_env); | |
1579 | assert(cpu_single_env->exception_injected == -1); | |
1580 | ||
1581 | cpu_single_env->exception_injected = arch_info->exception; | |
1582 | cpu_single_env->has_error_code = 0; | |
1583 | } | |
e22a25c9 AL |
1584 | |
1585 | return handle; | |
1586 | } | |
1587 | ||
1588 | void kvm_arch_update_guest_debug(CPUState *env, struct kvm_guest_debug *dbg) | |
1589 | { | |
1590 | const uint8_t type_code[] = { | |
1591 | [GDB_BREAKPOINT_HW] = 0x0, | |
1592 | [GDB_WATCHPOINT_WRITE] = 0x1, | |
1593 | [GDB_WATCHPOINT_ACCESS] = 0x3 | |
1594 | }; | |
1595 | const uint8_t len_code[] = { | |
1596 | [1] = 0x0, [2] = 0x1, [4] = 0x3, [8] = 0x2 | |
1597 | }; | |
1598 | int n; | |
1599 | ||
1600 | if (kvm_sw_breakpoints_active(env)) | |
1601 | dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP; | |
1602 | ||
1603 | if (nb_hw_breakpoint > 0) { | |
1604 | dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP; | |
1605 | dbg->arch.debugreg[7] = 0x0600; | |
1606 | for (n = 0; n < nb_hw_breakpoint; n++) { | |
1607 | dbg->arch.debugreg[n] = hw_breakpoint[n].addr; | |
1608 | dbg->arch.debugreg[7] |= (2 << (n * 2)) | | |
1609 | (type_code[hw_breakpoint[n].type] << (16 + n*4)) | | |
1610 | (len_code[hw_breakpoint[n].len] << (18 + n*4)); | |
1611 | } | |
1612 | } | |
f1665b21 SY |
1613 | /* Legal xcr0 for loading */ |
1614 | env->xcr0 = 1; | |
e22a25c9 AL |
1615 | } |
1616 | #endif /* KVM_CAP_SET_GUEST_DEBUG */ | |
4513d923 GN |
1617 | |
1618 | bool kvm_arch_stop_on_emulation_error(CPUState *env) | |
1619 | { | |
1620 | return !(env->cr[0] & CR0_PE_MASK) || | |
1621 | ((env->segs[R_CS].selector & 3) != 3); | |
1622 | } | |
1623 | ||
c0532a76 MT |
1624 | static void hardware_memory_error(void) |
1625 | { | |
1626 | fprintf(stderr, "Hardware memory error!\n"); | |
1627 | exit(1); | |
1628 | } | |
1629 | ||
1630 | int kvm_on_sigbus_vcpu(CPUState *env, int code, void *addr) | |
1631 | { | |
1632 | #if defined(KVM_CAP_MCE) | |
1633 | struct kvm_x86_mce mce = { | |
1634 | .bank = 9, | |
1635 | }; | |
1636 | void *vaddr; | |
1637 | ram_addr_t ram_addr; | |
1638 | target_phys_addr_t paddr; | |
1639 | int r; | |
1640 | ||
1641 | if ((env->mcg_cap & MCG_SER_P) && addr | |
1642 | && (code == BUS_MCEERR_AR | |
1643 | || code == BUS_MCEERR_AO)) { | |
1644 | if (code == BUS_MCEERR_AR) { | |
1645 | /* Fake an Intel architectural Data Load SRAR UCR */ | |
1646 | mce.status = MCI_STATUS_VAL | MCI_STATUS_UC | MCI_STATUS_EN | |
1647 | | MCI_STATUS_MISCV | MCI_STATUS_ADDRV | MCI_STATUS_S | |
1648 | | MCI_STATUS_AR | 0x134; | |
1649 | mce.misc = (MCM_ADDR_PHYS << 6) | 0xc; | |
1650 | mce.mcg_status = MCG_STATUS_MCIP | MCG_STATUS_EIPV; | |
1651 | } else { | |
1652 | /* | |
1653 | * If there is an MCE excpetion being processed, ignore | |
1654 | * this SRAO MCE | |
1655 | */ | |
1656 | r = kvm_mce_in_exception(env); | |
1657 | if (r == -1) { | |
1658 | fprintf(stderr, "Failed to get MCE status\n"); | |
1659 | } else if (r) { | |
1660 | return 0; | |
1661 | } | |
1662 | /* Fake an Intel architectural Memory scrubbing UCR */ | |
1663 | mce.status = MCI_STATUS_VAL | MCI_STATUS_UC | MCI_STATUS_EN | |
1664 | | MCI_STATUS_MISCV | MCI_STATUS_ADDRV | MCI_STATUS_S | |
1665 | | 0xc0; | |
1666 | mce.misc = (MCM_ADDR_PHYS << 6) | 0xc; | |
1667 | mce.mcg_status = MCG_STATUS_MCIP | MCG_STATUS_RIPV; | |
1668 | } | |
1669 | vaddr = (void *)addr; | |
1670 | if (qemu_ram_addr_from_host(vaddr, &ram_addr) || | |
1671 | !kvm_physical_memory_addr_from_ram(env->kvm_state, ram_addr, &paddr)) { | |
1672 | fprintf(stderr, "Hardware memory error for memory used by " | |
1673 | "QEMU itself instead of guest system!\n"); | |
1674 | /* Hope we are lucky for AO MCE */ | |
1675 | if (code == BUS_MCEERR_AO) { | |
1676 | return 0; | |
1677 | } else { | |
1678 | hardware_memory_error(); | |
1679 | } | |
1680 | } | |
1681 | mce.addr = paddr; | |
1682 | r = kvm_set_mce(env, &mce); | |
1683 | if (r < 0) { | |
1684 | fprintf(stderr, "kvm_set_mce: %s\n", strerror(errno)); | |
1685 | abort(); | |
1686 | } | |
1687 | } else | |
1688 | #endif | |
1689 | { | |
1690 | if (code == BUS_MCEERR_AO) { | |
1691 | return 0; | |
1692 | } else if (code == BUS_MCEERR_AR) { | |
1693 | hardware_memory_error(); | |
1694 | } else { | |
1695 | return 1; | |
1696 | } | |
1697 | } | |
1698 | return 0; | |
1699 | } | |
1700 | ||
1701 | int kvm_on_sigbus(int code, void *addr) | |
1702 | { | |
1703 | #if defined(KVM_CAP_MCE) | |
1704 | if ((first_cpu->mcg_cap & MCG_SER_P) && addr && code == BUS_MCEERR_AO) { | |
1705 | uint64_t status; | |
1706 | void *vaddr; | |
1707 | ram_addr_t ram_addr; | |
1708 | target_phys_addr_t paddr; | |
1709 | CPUState *cenv; | |
1710 | ||
1711 | /* Hope we are lucky for AO MCE */ | |
1712 | vaddr = addr; | |
1713 | if (qemu_ram_addr_from_host(vaddr, &ram_addr) || | |
1714 | !kvm_physical_memory_addr_from_ram(first_cpu->kvm_state, ram_addr, &paddr)) { | |
1715 | fprintf(stderr, "Hardware memory error for memory used by " | |
1716 | "QEMU itself instead of guest system!: %p\n", addr); | |
1717 | return 0; | |
1718 | } | |
1719 | status = MCI_STATUS_VAL | MCI_STATUS_UC | MCI_STATUS_EN | |
1720 | | MCI_STATUS_MISCV | MCI_STATUS_ADDRV | MCI_STATUS_S | |
1721 | | 0xc0; | |
1722 | kvm_inject_x86_mce(first_cpu, 9, status, | |
1723 | MCG_STATUS_MCIP | MCG_STATUS_RIPV, paddr, | |
1724 | (MCM_ADDR_PHYS << 6) | 0xc, 1); | |
1725 | for (cenv = first_cpu->next_cpu; cenv != NULL; cenv = cenv->next_cpu) { | |
1726 | kvm_inject_x86_mce(cenv, 1, MCI_STATUS_VAL | MCI_STATUS_UC, | |
1727 | MCG_STATUS_MCIP | MCG_STATUS_RIPV, 0, 0, 1); | |
1728 | } | |
1729 | } else | |
1730 | #endif | |
1731 | { | |
1732 | if (code == BUS_MCEERR_AO) { | |
1733 | return 0; | |
1734 | } else if (code == BUS_MCEERR_AR) { | |
1735 | hardware_memory_error(); | |
1736 | } else { | |
1737 | return 1; | |
1738 | } | |
1739 | } | |
1740 | return 0; | |
1741 | } |