]> git.proxmox.com Git - mirror_qemu.git/blame - kqemu.c
Initial SPARC SMP support (Blue Swirl)
[mirror_qemu.git] / kqemu.c
CommitLineData
9df217a3
FB
1/*
2 * KQEMU support
3 *
4 * Copyright (c) 2005 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20#include "config.h"
21#ifdef _WIN32
22#include <windows.h>
6e4255f6 23#include <winioctl.h>
9df217a3
FB
24#else
25#include <sys/types.h>
26#include <sys/mman.h>
6e4255f6 27#include <sys/ioctl.h>
9df217a3
FB
28#endif
29#include <stdlib.h>
30#include <stdio.h>
31#include <stdarg.h>
32#include <string.h>
33#include <errno.h>
34#include <unistd.h>
35#include <inttypes.h>
36
37#include "cpu.h"
38#include "exec-all.h"
39
40#ifdef USE_KQEMU
41
42#define DEBUG
aa062973 43//#define PROFILE
9df217a3
FB
44
45#include <unistd.h>
46#include <fcntl.h>
9df217a3
FB
47#include "kqemu/kqemu.h"
48
c28e951f
FB
49/* compatibility stuff */
50#ifndef KQEMU_RET_SYSCALL
51#define KQEMU_RET_SYSCALL 0x0300 /* syscall insn */
52#endif
aa062973
FB
53#ifndef KQEMU_MAX_RAM_PAGES_TO_UPDATE
54#define KQEMU_MAX_RAM_PAGES_TO_UPDATE 512
55#define KQEMU_RAM_PAGES_UPDATE_ALL (KQEMU_MAX_RAM_PAGES_TO_UPDATE + 1)
56#endif
c28e951f 57
6e4255f6
FB
58#ifdef _WIN32
59#define KQEMU_DEVICE "\\\\.\\kqemu"
60#else
9df217a3 61#define KQEMU_DEVICE "/dev/kqemu"
6e4255f6
FB
62#endif
63
64#ifdef _WIN32
65#define KQEMU_INVALID_FD INVALID_HANDLE_VALUE
66HANDLE kqemu_fd = KQEMU_INVALID_FD;
67#define kqemu_closefd(x) CloseHandle(x)
68#else
69#define KQEMU_INVALID_FD -1
70int kqemu_fd = KQEMU_INVALID_FD;
71#define kqemu_closefd(x) close(x)
72#endif
9df217a3
FB
73
74int kqemu_allowed = 1;
9df217a3
FB
75unsigned long *pages_to_flush;
76unsigned int nb_pages_to_flush;
aa062973
FB
77unsigned long *ram_pages_to_update;
78unsigned int nb_ram_pages_to_update;
9df217a3
FB
79extern uint32_t **l1_phys_map;
80
81#define cpuid(index, eax, ebx, ecx, edx) \
82 asm volatile ("cpuid" \
83 : "=a" (eax), "=b" (ebx), "=c" (ecx), "=d" (edx) \
84 : "0" (index))
85
c28e951f
FB
86#ifdef __x86_64__
87static int is_cpuid_supported(void)
88{
89 return 1;
90}
91#else
9df217a3
FB
92static int is_cpuid_supported(void)
93{
94 int v0, v1;
95 asm volatile ("pushf\n"
96 "popl %0\n"
97 "movl %0, %1\n"
98 "xorl $0x00200000, %0\n"
99 "pushl %0\n"
100 "popf\n"
101 "pushf\n"
102 "popl %0\n"
103 : "=a" (v0), "=d" (v1)
104 :
105 : "cc");
106 return (v0 != v1);
107}
c28e951f 108#endif
9df217a3
FB
109
110static void kqemu_update_cpuid(CPUState *env)
111{
112 int critical_features_mask, features;
113 uint32_t eax, ebx, ecx, edx;
114
115 /* the following features are kept identical on the host and
116 target cpus because they are important for user code. Strictly
117 speaking, only SSE really matters because the OS must support
118 it if the user code uses it. */
119 critical_features_mask =
120 CPUID_CMOV | CPUID_CX8 |
121 CPUID_FXSR | CPUID_MMX | CPUID_SSE |
ca0d1734 122 CPUID_SSE2 | CPUID_SEP;
9df217a3
FB
123 if (!is_cpuid_supported()) {
124 features = 0;
125 } else {
126 cpuid(1, eax, ebx, ecx, edx);
127 features = edx;
128 }
ca0d1734
FB
129#ifdef __x86_64__
130 /* NOTE: on x86_64 CPUs, SYSENTER is not supported in
131 compatibility mode, so in order to have the best performances
132 it is better not to use it */
133 features &= ~CPUID_SEP;
134#endif
9df217a3
FB
135 env->cpuid_features = (env->cpuid_features & ~critical_features_mask) |
136 (features & critical_features_mask);
137 /* XXX: we could update more of the target CPUID state so that the
138 non accelerated code sees exactly the same CPU features as the
139 accelerated code */
140}
141
142int kqemu_init(CPUState *env)
143{
144 struct kqemu_init init;
145 int ret, version;
6e4255f6
FB
146#ifdef _WIN32
147 DWORD temp;
148#endif
9df217a3
FB
149
150 if (!kqemu_allowed)
151 return -1;
152
6e4255f6
FB
153#ifdef _WIN32
154 kqemu_fd = CreateFile(KQEMU_DEVICE, GENERIC_WRITE | GENERIC_READ,
155 FILE_SHARE_READ | FILE_SHARE_WRITE,
156 NULL, OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL,
157 NULL);
158#else
9df217a3 159 kqemu_fd = open(KQEMU_DEVICE, O_RDWR);
6e4255f6
FB
160#endif
161 if (kqemu_fd == KQEMU_INVALID_FD) {
9df217a3
FB
162 fprintf(stderr, "Could not open '%s' - QEMU acceleration layer not activated\n", KQEMU_DEVICE);
163 return -1;
164 }
165 version = 0;
6e4255f6
FB
166#ifdef _WIN32
167 DeviceIoControl(kqemu_fd, KQEMU_GET_VERSION, NULL, 0,
168 &version, sizeof(version), &temp, NULL);
169#else
9df217a3 170 ioctl(kqemu_fd, KQEMU_GET_VERSION, &version);
6e4255f6 171#endif
9df217a3
FB
172 if (version != KQEMU_VERSION) {
173 fprintf(stderr, "Version mismatch between kqemu module and qemu (%08x %08x) - disabling kqemu use\n",
174 version, KQEMU_VERSION);
175 goto fail;
176 }
177
178 pages_to_flush = qemu_vmalloc(KQEMU_MAX_PAGES_TO_FLUSH *
179 sizeof(unsigned long));
180 if (!pages_to_flush)
181 goto fail;
182
aa062973
FB
183 ram_pages_to_update = qemu_vmalloc(KQEMU_MAX_RAM_PAGES_TO_UPDATE *
184 sizeof(unsigned long));
185 if (!ram_pages_to_update)
186 goto fail;
187
9df217a3
FB
188 init.ram_base = phys_ram_base;
189 init.ram_size = phys_ram_size;
190 init.ram_dirty = phys_ram_dirty;
191 init.phys_to_ram_map = l1_phys_map;
192 init.pages_to_flush = pages_to_flush;
aa062973
FB
193#if KQEMU_VERSION >= 0x010200
194 init.ram_pages_to_update = ram_pages_to_update;
195#endif
6e4255f6
FB
196#ifdef _WIN32
197 ret = DeviceIoControl(kqemu_fd, KQEMU_INIT, &init, sizeof(init),
198 NULL, 0, &temp, NULL) == TRUE ? 0 : -1;
199#else
9df217a3 200 ret = ioctl(kqemu_fd, KQEMU_INIT, &init);
6e4255f6 201#endif
9df217a3
FB
202 if (ret < 0) {
203 fprintf(stderr, "Error %d while initializing QEMU acceleration layer - disabling it for now\n", ret);
204 fail:
6e4255f6
FB
205 kqemu_closefd(kqemu_fd);
206 kqemu_fd = KQEMU_INVALID_FD;
9df217a3
FB
207 return -1;
208 }
209 kqemu_update_cpuid(env);
210 env->kqemu_enabled = 1;
211 nb_pages_to_flush = 0;
aa062973 212 nb_ram_pages_to_update = 0;
9df217a3
FB
213 return 0;
214}
215
216void kqemu_flush_page(CPUState *env, target_ulong addr)
217{
218#ifdef DEBUG
219 if (loglevel & CPU_LOG_INT) {
220 fprintf(logfile, "kqemu_flush_page: addr=" TARGET_FMT_lx "\n", addr);
221 }
222#endif
223 if (nb_pages_to_flush >= KQEMU_MAX_PAGES_TO_FLUSH)
224 nb_pages_to_flush = KQEMU_FLUSH_ALL;
225 else
226 pages_to_flush[nb_pages_to_flush++] = addr;
227}
228
229void kqemu_flush(CPUState *env, int global)
230{
231#ifdef DEBUG
232 if (loglevel & CPU_LOG_INT) {
233 fprintf(logfile, "kqemu_flush:\n");
234 }
235#endif
236 nb_pages_to_flush = KQEMU_FLUSH_ALL;
237}
238
aa062973
FB
239void kqemu_set_notdirty(CPUState *env, ram_addr_t ram_addr)
240{
241#ifdef DEBUG
242 if (loglevel & CPU_LOG_INT) {
243 fprintf(logfile, "kqemu_set_notdirty: addr=%08lx\n", ram_addr);
244 }
245#endif
fc8dc060
FB
246 /* we only track transitions to dirty state */
247 if (phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] != 0xff)
248 return;
aa062973
FB
249 if (nb_ram_pages_to_update >= KQEMU_MAX_RAM_PAGES_TO_UPDATE)
250 nb_ram_pages_to_update = KQEMU_RAM_PAGES_UPDATE_ALL;
251 else
252 ram_pages_to_update[nb_ram_pages_to_update++] = ram_addr;
253}
254
9df217a3
FB
255struct fpstate {
256 uint16_t fpuc;
257 uint16_t dummy1;
258 uint16_t fpus;
259 uint16_t dummy2;
260 uint16_t fptag;
261 uint16_t dummy3;
262
263 uint32_t fpip;
264 uint32_t fpcs;
265 uint32_t fpoo;
266 uint32_t fpos;
267 uint8_t fpregs1[8 * 10];
268};
269
270struct fpxstate {
271 uint16_t fpuc;
272 uint16_t fpus;
273 uint16_t fptag;
274 uint16_t fop;
275 uint32_t fpuip;
276 uint16_t cs_sel;
277 uint16_t dummy0;
278 uint32_t fpudp;
279 uint16_t ds_sel;
280 uint16_t dummy1;
281 uint32_t mxcsr;
282 uint32_t mxcsr_mask;
283 uint8_t fpregs1[8 * 16];
c28e951f
FB
284 uint8_t xmm_regs[16 * 16];
285 uint8_t dummy2[96];
9df217a3
FB
286};
287
288static struct fpxstate fpx1 __attribute__((aligned(16)));
289
290static void restore_native_fp_frstor(CPUState *env)
291{
292 int fptag, i, j;
293 struct fpstate fp1, *fp = &fp1;
294
295 fp->fpuc = env->fpuc;
296 fp->fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
297 fptag = 0;
298 for (i=7; i>=0; i--) {
299 fptag <<= 2;
300 if (env->fptags[i]) {
301 fptag |= 3;
302 } else {
303 /* the FPU automatically computes it */
304 }
305 }
306 fp->fptag = fptag;
307 j = env->fpstt;
308 for(i = 0;i < 8; i++) {
309 memcpy(&fp->fpregs1[i * 10], &env->fpregs[j].d, 10);
310 j = (j + 1) & 7;
311 }
312 asm volatile ("frstor %0" : "=m" (*fp));
313}
314
315static void save_native_fp_fsave(CPUState *env)
316{
317 int fptag, i, j;
318 uint16_t fpuc;
319 struct fpstate fp1, *fp = &fp1;
320
321 asm volatile ("fsave %0" : : "m" (*fp));
322 env->fpuc = fp->fpuc;
323 env->fpstt = (fp->fpus >> 11) & 7;
324 env->fpus = fp->fpus & ~0x3800;
325 fptag = fp->fptag;
326 for(i = 0;i < 8; i++) {
327 env->fptags[i] = ((fptag & 3) == 3);
328 fptag >>= 2;
329 }
330 j = env->fpstt;
331 for(i = 0;i < 8; i++) {
332 memcpy(&env->fpregs[j].d, &fp->fpregs1[i * 10], 10);
333 j = (j + 1) & 7;
334 }
335 /* we must restore the default rounding state */
336 fpuc = 0x037f | (env->fpuc & (3 << 10));
337 asm volatile("fldcw %0" : : "m" (fpuc));
338}
339
340static void restore_native_fp_fxrstor(CPUState *env)
341{
342 struct fpxstate *fp = &fpx1;
343 int i, j, fptag;
344
345 fp->fpuc = env->fpuc;
346 fp->fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
347 fptag = 0;
348 for(i = 0; i < 8; i++)
349 fptag |= (env->fptags[i] << i);
350 fp->fptag = fptag ^ 0xff;
351
352 j = env->fpstt;
353 for(i = 0;i < 8; i++) {
354 memcpy(&fp->fpregs1[i * 16], &env->fpregs[j].d, 10);
355 j = (j + 1) & 7;
356 }
357 if (env->cpuid_features & CPUID_SSE) {
358 fp->mxcsr = env->mxcsr;
359 /* XXX: check if DAZ is not available */
360 fp->mxcsr_mask = 0xffff;
c28e951f 361 memcpy(fp->xmm_regs, env->xmm_regs, CPU_NB_REGS * 16);
9df217a3
FB
362 }
363 asm volatile ("fxrstor %0" : "=m" (*fp));
364}
365
366static void save_native_fp_fxsave(CPUState *env)
367{
368 struct fpxstate *fp = &fpx1;
369 int fptag, i, j;
370 uint16_t fpuc;
371
372 asm volatile ("fxsave %0" : : "m" (*fp));
373 env->fpuc = fp->fpuc;
374 env->fpstt = (fp->fpus >> 11) & 7;
375 env->fpus = fp->fpus & ~0x3800;
376 fptag = fp->fptag ^ 0xff;
377 for(i = 0;i < 8; i++) {
378 env->fptags[i] = (fptag >> i) & 1;
379 }
380 j = env->fpstt;
381 for(i = 0;i < 8; i++) {
382 memcpy(&env->fpregs[j].d, &fp->fpregs1[i * 16], 10);
383 j = (j + 1) & 7;
384 }
385 if (env->cpuid_features & CPUID_SSE) {
386 env->mxcsr = fp->mxcsr;
c28e951f 387 memcpy(env->xmm_regs, fp->xmm_regs, CPU_NB_REGS * 16);
9df217a3
FB
388 }
389
390 /* we must restore the default rounding state */
391 asm volatile ("fninit");
392 fpuc = 0x037f | (env->fpuc & (3 << 10));
393 asm volatile("fldcw %0" : : "m" (fpuc));
394}
395
c28e951f
FB
396static int do_syscall(CPUState *env,
397 struct kqemu_cpu_state *kenv)
398{
399 int selector;
400
401 selector = (env->star >> 32) & 0xffff;
402#ifdef __x86_64__
403 if (env->hflags & HF_LMA_MASK) {
404 env->regs[R_ECX] = kenv->next_eip;
405 env->regs[11] = env->eflags;
406
407 cpu_x86_set_cpl(env, 0);
408 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
409 0, 0xffffffff,
410 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
411 DESC_S_MASK |
412 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
413 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
414 0, 0xffffffff,
415 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
416 DESC_S_MASK |
417 DESC_W_MASK | DESC_A_MASK);
418 env->eflags &= ~env->fmask;
419 if (env->hflags & HF_CS64_MASK)
420 env->eip = env->lstar;
421 else
422 env->eip = env->cstar;
423 } else
424#endif
425 {
426 env->regs[R_ECX] = (uint32_t)kenv->next_eip;
427
428 cpu_x86_set_cpl(env, 0);
429 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
430 0, 0xffffffff,
431 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
432 DESC_S_MASK |
433 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
434 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
435 0, 0xffffffff,
436 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
437 DESC_S_MASK |
438 DESC_W_MASK | DESC_A_MASK);
439 env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
440 env->eip = (uint32_t)env->star;
441 }
442 return 2;
443}
444
aa062973
FB
445#ifdef PROFILE
446
447#define PC_REC_SIZE 1
448#define PC_REC_HASH_BITS 16
449#define PC_REC_HASH_SIZE (1 << PC_REC_HASH_BITS)
450
451typedef struct PCRecord {
452 unsigned long pc;
453 int64_t count;
454 struct PCRecord *next;
455} PCRecord;
456
457PCRecord *pc_rec_hash[PC_REC_HASH_SIZE];
458int nb_pc_records;
459
460void kqemu_record_pc(unsigned long pc)
461{
462 unsigned long h;
463 PCRecord **pr, *r;
464
465 h = pc / PC_REC_SIZE;
466 h = h ^ (h >> PC_REC_HASH_BITS);
467 h &= (PC_REC_HASH_SIZE - 1);
468 pr = &pc_rec_hash[h];
469 for(;;) {
470 r = *pr;
471 if (r == NULL)
472 break;
473 if (r->pc == pc) {
474 r->count++;
475 return;
476 }
477 pr = &r->next;
478 }
479 r = malloc(sizeof(PCRecord));
480 r->count = 1;
481 r->pc = pc;
482 r->next = NULL;
483 *pr = r;
484 nb_pc_records++;
485}
486
487int pc_rec_cmp(const void *p1, const void *p2)
488{
489 PCRecord *r1 = *(PCRecord **)p1;
490 PCRecord *r2 = *(PCRecord **)p2;
491 if (r1->count < r2->count)
492 return 1;
493 else if (r1->count == r2->count)
494 return 0;
495 else
496 return -1;
497}
498
499void kqemu_record_dump(void)
500{
501 PCRecord **pr, *r;
502 int i, h;
503 FILE *f;
504 int64_t total, sum;
505
506 pr = malloc(sizeof(PCRecord *) * nb_pc_records);
507 i = 0;
508 total = 0;
509 for(h = 0; h < PC_REC_HASH_SIZE; h++) {
510 for(r = pc_rec_hash[h]; r != NULL; r = r->next) {
511 pr[i++] = r;
512 total += r->count;
513 }
514 }
515 qsort(pr, nb_pc_records, sizeof(PCRecord *), pc_rec_cmp);
516
517 f = fopen("/tmp/kqemu.stats", "w");
518 if (!f) {
519 perror("/tmp/kqemu.stats");
520 exit(1);
521 }
522 fprintf(f, "total: %lld\n", total);
523 sum = 0;
524 for(i = 0; i < nb_pc_records; i++) {
525 r = pr[i];
526 sum += r->count;
527 fprintf(f, "%08lx: %lld %0.2f%% %0.2f%%\n",
528 r->pc,
529 r->count,
530 (double)r->count / (double)total * 100.0,
531 (double)sum / (double)total * 100.0);
532 }
533 fclose(f);
534 free(pr);
535}
536#else
537void kqemu_record_dump(void)
538{
539}
540#endif
541
9df217a3
FB
542int kqemu_cpu_exec(CPUState *env)
543{
544 struct kqemu_cpu_state kcpu_state, *kenv = &kcpu_state;
545 int ret;
6e4255f6
FB
546#ifdef _WIN32
547 DWORD temp;
548#endif
9df217a3
FB
549
550#ifdef DEBUG
551 if (loglevel & CPU_LOG_INT) {
552 fprintf(logfile, "kqemu: cpu_exec: enter\n");
553 cpu_dump_state(env, logfile, fprintf, 0);
554 }
555#endif
556 memcpy(kenv->regs, env->regs, sizeof(kenv->regs));
557 kenv->eip = env->eip;
558 kenv->eflags = env->eflags;
559 memcpy(&kenv->segs, &env->segs, sizeof(env->segs));
560 memcpy(&kenv->ldt, &env->ldt, sizeof(env->ldt));
561 memcpy(&kenv->tr, &env->tr, sizeof(env->tr));
562 memcpy(&kenv->gdt, &env->gdt, sizeof(env->gdt));
563 memcpy(&kenv->idt, &env->idt, sizeof(env->idt));
564 kenv->cr0 = env->cr[0];
565 kenv->cr2 = env->cr[2];
566 kenv->cr3 = env->cr[3];
567 kenv->cr4 = env->cr[4];
568 kenv->a20_mask = env->a20_mask;
c45b3c0e 569#if KQEMU_VERSION >= 0x010100
c28e951f
FB
570 kenv->efer = env->efer;
571#endif
9df217a3
FB
572 if (env->dr[7] & 0xff) {
573 kenv->dr7 = env->dr[7];
574 kenv->dr0 = env->dr[0];
575 kenv->dr1 = env->dr[1];
576 kenv->dr2 = env->dr[2];
577 kenv->dr3 = env->dr[3];
578 } else {
579 kenv->dr7 = 0;
580 }
581 kenv->dr6 = env->dr[6];
582 kenv->cpl = 3;
583 kenv->nb_pages_to_flush = nb_pages_to_flush;
584 nb_pages_to_flush = 0;
aa062973
FB
585#if KQEMU_VERSION >= 0x010200
586 kenv->user_only = 1;
587 kenv->nb_ram_pages_to_update = nb_ram_pages_to_update;
588#endif
589 nb_ram_pages_to_update = 0;
9df217a3
FB
590
591 if (!(kenv->cr0 & CR0_TS_MASK)) {
592 if (env->cpuid_features & CPUID_FXSR)
593 restore_native_fp_fxrstor(env);
594 else
595 restore_native_fp_frstor(env);
596 }
597
6e4255f6 598#ifdef _WIN32
a332e112
FB
599 if (DeviceIoControl(kqemu_fd, KQEMU_EXEC,
600 kenv, sizeof(struct kqemu_cpu_state),
601 kenv, sizeof(struct kqemu_cpu_state),
602 &temp, NULL)) {
603 ret = kenv->retval;
604 } else {
605 ret = -1;
606 }
6e4255f6
FB
607#else
608#if KQEMU_VERSION >= 0x010100
609 ioctl(kqemu_fd, KQEMU_EXEC, kenv);
610 ret = kenv->retval;
611#else
9df217a3 612 ret = ioctl(kqemu_fd, KQEMU_EXEC, kenv);
6e4255f6
FB
613#endif
614#endif
9df217a3
FB
615 if (!(kenv->cr0 & CR0_TS_MASK)) {
616 if (env->cpuid_features & CPUID_FXSR)
617 save_native_fp_fxsave(env);
618 else
619 save_native_fp_fsave(env);
620 }
621
622 memcpy(env->regs, kenv->regs, sizeof(env->regs));
623 env->eip = kenv->eip;
624 env->eflags = kenv->eflags;
625 memcpy(env->segs, kenv->segs, sizeof(env->segs));
626#if 0
627 /* no need to restore that */
628 memcpy(env->ldt, kenv->ldt, sizeof(env->ldt));
629 memcpy(env->tr, kenv->tr, sizeof(env->tr));
630 memcpy(env->gdt, kenv->gdt, sizeof(env->gdt));
631 memcpy(env->idt, kenv->idt, sizeof(env->idt));
632 env->cr[0] = kenv->cr0;
633 env->cr[3] = kenv->cr3;
634 env->cr[4] = kenv->cr4;
635 env->a20_mask = kenv->a20_mask;
636#endif
637 env->cr[2] = kenv->cr2;
638 env->dr[6] = kenv->dr6;
639
aa062973
FB
640#if KQEMU_VERSION >= 0x010200
641 if (kenv->nb_ram_pages_to_update > 0) {
642 cpu_tlb_update_dirty(env);
643 }
644#endif
645
646 /* restore the hidden flags */
647 {
648 unsigned int new_hflags;
649#ifdef TARGET_X86_64
650 if ((env->hflags & HF_LMA_MASK) &&
651 (env->segs[R_CS].flags & DESC_L_MASK)) {
652 /* long mode */
653 new_hflags = HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK;
654 } else
655#endif
656 {
657 /* legacy / compatibility case */
658 new_hflags = (env->segs[R_CS].flags & DESC_B_MASK)
659 >> (DESC_B_SHIFT - HF_CS32_SHIFT);
660 new_hflags |= (env->segs[R_SS].flags & DESC_B_MASK)
661 >> (DESC_B_SHIFT - HF_SS32_SHIFT);
662 if (!(env->cr[0] & CR0_PE_MASK) ||
663 (env->eflags & VM_MASK) ||
664 !(env->hflags & HF_CS32_MASK)) {
665 /* XXX: try to avoid this test. The problem comes from the
666 fact that is real mode or vm86 mode we only modify the
667 'base' and 'selector' fields of the segment cache to go
668 faster. A solution may be to force addseg to one in
669 translate-i386.c. */
670 new_hflags |= HF_ADDSEG_MASK;
671 } else {
672 new_hflags |= ((env->segs[R_DS].base |
673 env->segs[R_ES].base |
674 env->segs[R_SS].base) != 0) <<
675 HF_ADDSEG_SHIFT;
676 }
677 }
678 env->hflags = (env->hflags &
679 ~(HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK | HF_ADDSEG_MASK)) |
680 new_hflags;
681 }
682
9df217a3
FB
683#ifdef DEBUG
684 if (loglevel & CPU_LOG_INT) {
685 fprintf(logfile, "kqemu: kqemu_cpu_exec: ret=0x%x\n", ret);
686 }
687#endif
c28e951f
FB
688 if (ret == KQEMU_RET_SYSCALL) {
689 /* syscall instruction */
690 return do_syscall(env, kenv);
691 } else
9df217a3
FB
692 if ((ret & 0xff00) == KQEMU_RET_INT) {
693 env->exception_index = ret & 0xff;
694 env->error_code = 0;
695 env->exception_is_int = 1;
696 env->exception_next_eip = kenv->next_eip;
697#ifdef DEBUG
c28e951f
FB
698 if (loglevel & CPU_LOG_INT) {
699 fprintf(logfile, "kqemu: interrupt v=%02x:\n",
700 env->exception_index);
701 cpu_dump_state(env, logfile, fprintf, 0);
702 }
9df217a3
FB
703#endif
704 return 1;
705 } else if ((ret & 0xff00) == KQEMU_RET_EXCEPTION) {
706 env->exception_index = ret & 0xff;
707 env->error_code = kenv->error_code;
708 env->exception_is_int = 0;
709 env->exception_next_eip = 0;
710#ifdef DEBUG
711 if (loglevel & CPU_LOG_INT) {
712 fprintf(logfile, "kqemu: exception v=%02x e=%04x:\n",
713 env->exception_index, env->error_code);
714 cpu_dump_state(env, logfile, fprintf, 0);
715 }
716#endif
717 return 1;
718 } else if (ret == KQEMU_RET_INTR) {
c45b3c0e
FB
719#ifdef DEBUG
720 if (loglevel & CPU_LOG_INT) {
721 cpu_dump_state(env, logfile, fprintf, 0);
722 }
723#endif
9df217a3
FB
724 return 0;
725 } else if (ret == KQEMU_RET_SOFTMMU) {
aa062973
FB
726#ifdef PROFILE
727 kqemu_record_pc(env->eip + env->segs[R_CS].base);
728#endif
729#ifdef DEBUG
730 if (loglevel & CPU_LOG_INT) {
731 cpu_dump_state(env, logfile, fprintf, 0);
732 }
733#endif
9df217a3
FB
734 return 2;
735 } else {
736 cpu_dump_state(env, stderr, fprintf, 0);
737 fprintf(stderr, "Unsupported return value: 0x%x\n", ret);
738 exit(1);
739 }
740 return 0;
741}
742
a332e112
FB
743void kqemu_cpu_interrupt(CPUState *env)
744{
745#if defined(_WIN32) && KQEMU_VERSION >= 0x010101
746 /* cancelling the I/O request causes KQEMU to finish executing the
747 current block and successfully returning. */
748 CancelIo(kqemu_fd);
749#endif
750}
751
9df217a3 752#endif