]> git.proxmox.com Git - qemu.git/blame - kqemu.c
dirty ram page handling fixes
[qemu.git] / kqemu.c
CommitLineData
9df217a3
FB
1/*
2 * KQEMU support
3 *
4 * Copyright (c) 2005 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20#include "config.h"
21#ifdef _WIN32
22#include <windows.h>
6e4255f6 23#include <winioctl.h>
9df217a3
FB
24#else
25#include <sys/types.h>
26#include <sys/mman.h>
6e4255f6 27#include <sys/ioctl.h>
9df217a3
FB
28#endif
29#include <stdlib.h>
30#include <stdio.h>
31#include <stdarg.h>
32#include <string.h>
33#include <errno.h>
34#include <unistd.h>
35#include <inttypes.h>
36
37#include "cpu.h"
38#include "exec-all.h"
39
40#ifdef USE_KQEMU
41
42#define DEBUG
aa062973 43//#define PROFILE
9df217a3
FB
44
45#include <unistd.h>
46#include <fcntl.h>
9df217a3
FB
47#include "kqemu/kqemu.h"
48
c28e951f
FB
49/* compatibility stuff */
50#ifndef KQEMU_RET_SYSCALL
51#define KQEMU_RET_SYSCALL 0x0300 /* syscall insn */
52#endif
aa062973
FB
53#ifndef KQEMU_MAX_RAM_PAGES_TO_UPDATE
54#define KQEMU_MAX_RAM_PAGES_TO_UPDATE 512
55#define KQEMU_RAM_PAGES_UPDATE_ALL (KQEMU_MAX_RAM_PAGES_TO_UPDATE + 1)
56#endif
c28e951f 57
6e4255f6
FB
58#ifdef _WIN32
59#define KQEMU_DEVICE "\\\\.\\kqemu"
60#else
9df217a3 61#define KQEMU_DEVICE "/dev/kqemu"
6e4255f6
FB
62#endif
63
64#ifdef _WIN32
65#define KQEMU_INVALID_FD INVALID_HANDLE_VALUE
66HANDLE kqemu_fd = KQEMU_INVALID_FD;
67#define kqemu_closefd(x) CloseHandle(x)
68#else
69#define KQEMU_INVALID_FD -1
70int kqemu_fd = KQEMU_INVALID_FD;
71#define kqemu_closefd(x) close(x)
72#endif
9df217a3
FB
73
74int kqemu_allowed = 1;
9df217a3
FB
75unsigned long *pages_to_flush;
76unsigned int nb_pages_to_flush;
aa062973
FB
77unsigned long *ram_pages_to_update;
78unsigned int nb_ram_pages_to_update;
9df217a3
FB
79extern uint32_t **l1_phys_map;
80
81#define cpuid(index, eax, ebx, ecx, edx) \
82 asm volatile ("cpuid" \
83 : "=a" (eax), "=b" (ebx), "=c" (ecx), "=d" (edx) \
84 : "0" (index))
85
c28e951f
FB
86#ifdef __x86_64__
87static int is_cpuid_supported(void)
88{
89 return 1;
90}
91#else
9df217a3
FB
92static int is_cpuid_supported(void)
93{
94 int v0, v1;
95 asm volatile ("pushf\n"
96 "popl %0\n"
97 "movl %0, %1\n"
98 "xorl $0x00200000, %0\n"
99 "pushl %0\n"
100 "popf\n"
101 "pushf\n"
102 "popl %0\n"
103 : "=a" (v0), "=d" (v1)
104 :
105 : "cc");
106 return (v0 != v1);
107}
c28e951f 108#endif
9df217a3
FB
109
110static void kqemu_update_cpuid(CPUState *env)
111{
112 int critical_features_mask, features;
113 uint32_t eax, ebx, ecx, edx;
114
115 /* the following features are kept identical on the host and
116 target cpus because they are important for user code. Strictly
117 speaking, only SSE really matters because the OS must support
118 it if the user code uses it. */
119 critical_features_mask =
120 CPUID_CMOV | CPUID_CX8 |
121 CPUID_FXSR | CPUID_MMX | CPUID_SSE |
122 CPUID_SSE2;
123 if (!is_cpuid_supported()) {
124 features = 0;
125 } else {
126 cpuid(1, eax, ebx, ecx, edx);
127 features = edx;
128 }
129 env->cpuid_features = (env->cpuid_features & ~critical_features_mask) |
130 (features & critical_features_mask);
131 /* XXX: we could update more of the target CPUID state so that the
132 non accelerated code sees exactly the same CPU features as the
133 accelerated code */
134}
135
136int kqemu_init(CPUState *env)
137{
138 struct kqemu_init init;
139 int ret, version;
6e4255f6
FB
140#ifdef _WIN32
141 DWORD temp;
142#endif
9df217a3
FB
143
144 if (!kqemu_allowed)
145 return -1;
146
6e4255f6
FB
147#ifdef _WIN32
148 kqemu_fd = CreateFile(KQEMU_DEVICE, GENERIC_WRITE | GENERIC_READ,
149 FILE_SHARE_READ | FILE_SHARE_WRITE,
150 NULL, OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL,
151 NULL);
152#else
9df217a3 153 kqemu_fd = open(KQEMU_DEVICE, O_RDWR);
6e4255f6
FB
154#endif
155 if (kqemu_fd == KQEMU_INVALID_FD) {
9df217a3
FB
156 fprintf(stderr, "Could not open '%s' - QEMU acceleration layer not activated\n", KQEMU_DEVICE);
157 return -1;
158 }
159 version = 0;
6e4255f6
FB
160#ifdef _WIN32
161 DeviceIoControl(kqemu_fd, KQEMU_GET_VERSION, NULL, 0,
162 &version, sizeof(version), &temp, NULL);
163#else
9df217a3 164 ioctl(kqemu_fd, KQEMU_GET_VERSION, &version);
6e4255f6 165#endif
9df217a3
FB
166 if (version != KQEMU_VERSION) {
167 fprintf(stderr, "Version mismatch between kqemu module and qemu (%08x %08x) - disabling kqemu use\n",
168 version, KQEMU_VERSION);
169 goto fail;
170 }
171
172 pages_to_flush = qemu_vmalloc(KQEMU_MAX_PAGES_TO_FLUSH *
173 sizeof(unsigned long));
174 if (!pages_to_flush)
175 goto fail;
176
aa062973
FB
177 ram_pages_to_update = qemu_vmalloc(KQEMU_MAX_RAM_PAGES_TO_UPDATE *
178 sizeof(unsigned long));
179 if (!ram_pages_to_update)
180 goto fail;
181
9df217a3
FB
182 init.ram_base = phys_ram_base;
183 init.ram_size = phys_ram_size;
184 init.ram_dirty = phys_ram_dirty;
185 init.phys_to_ram_map = l1_phys_map;
186 init.pages_to_flush = pages_to_flush;
aa062973
FB
187#if KQEMU_VERSION >= 0x010200
188 init.ram_pages_to_update = ram_pages_to_update;
189#endif
6e4255f6
FB
190#ifdef _WIN32
191 ret = DeviceIoControl(kqemu_fd, KQEMU_INIT, &init, sizeof(init),
192 NULL, 0, &temp, NULL) == TRUE ? 0 : -1;
193#else
9df217a3 194 ret = ioctl(kqemu_fd, KQEMU_INIT, &init);
6e4255f6 195#endif
9df217a3
FB
196 if (ret < 0) {
197 fprintf(stderr, "Error %d while initializing QEMU acceleration layer - disabling it for now\n", ret);
198 fail:
6e4255f6
FB
199 kqemu_closefd(kqemu_fd);
200 kqemu_fd = KQEMU_INVALID_FD;
9df217a3
FB
201 return -1;
202 }
203 kqemu_update_cpuid(env);
204 env->kqemu_enabled = 1;
205 nb_pages_to_flush = 0;
aa062973 206 nb_ram_pages_to_update = 0;
9df217a3
FB
207 return 0;
208}
209
210void kqemu_flush_page(CPUState *env, target_ulong addr)
211{
212#ifdef DEBUG
213 if (loglevel & CPU_LOG_INT) {
214 fprintf(logfile, "kqemu_flush_page: addr=" TARGET_FMT_lx "\n", addr);
215 }
216#endif
217 if (nb_pages_to_flush >= KQEMU_MAX_PAGES_TO_FLUSH)
218 nb_pages_to_flush = KQEMU_FLUSH_ALL;
219 else
220 pages_to_flush[nb_pages_to_flush++] = addr;
221}
222
223void kqemu_flush(CPUState *env, int global)
224{
225#ifdef DEBUG
226 if (loglevel & CPU_LOG_INT) {
227 fprintf(logfile, "kqemu_flush:\n");
228 }
229#endif
230 nb_pages_to_flush = KQEMU_FLUSH_ALL;
231}
232
aa062973
FB
233void kqemu_set_notdirty(CPUState *env, ram_addr_t ram_addr)
234{
235#ifdef DEBUG
236 if (loglevel & CPU_LOG_INT) {
237 fprintf(logfile, "kqemu_set_notdirty: addr=%08lx\n", ram_addr);
238 }
239#endif
240 if (nb_ram_pages_to_update >= KQEMU_MAX_RAM_PAGES_TO_UPDATE)
241 nb_ram_pages_to_update = KQEMU_RAM_PAGES_UPDATE_ALL;
242 else
243 ram_pages_to_update[nb_ram_pages_to_update++] = ram_addr;
244}
245
9df217a3
FB
246struct fpstate {
247 uint16_t fpuc;
248 uint16_t dummy1;
249 uint16_t fpus;
250 uint16_t dummy2;
251 uint16_t fptag;
252 uint16_t dummy3;
253
254 uint32_t fpip;
255 uint32_t fpcs;
256 uint32_t fpoo;
257 uint32_t fpos;
258 uint8_t fpregs1[8 * 10];
259};
260
261struct fpxstate {
262 uint16_t fpuc;
263 uint16_t fpus;
264 uint16_t fptag;
265 uint16_t fop;
266 uint32_t fpuip;
267 uint16_t cs_sel;
268 uint16_t dummy0;
269 uint32_t fpudp;
270 uint16_t ds_sel;
271 uint16_t dummy1;
272 uint32_t mxcsr;
273 uint32_t mxcsr_mask;
274 uint8_t fpregs1[8 * 16];
c28e951f
FB
275 uint8_t xmm_regs[16 * 16];
276 uint8_t dummy2[96];
9df217a3
FB
277};
278
279static struct fpxstate fpx1 __attribute__((aligned(16)));
280
281static void restore_native_fp_frstor(CPUState *env)
282{
283 int fptag, i, j;
284 struct fpstate fp1, *fp = &fp1;
285
286 fp->fpuc = env->fpuc;
287 fp->fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
288 fptag = 0;
289 for (i=7; i>=0; i--) {
290 fptag <<= 2;
291 if (env->fptags[i]) {
292 fptag |= 3;
293 } else {
294 /* the FPU automatically computes it */
295 }
296 }
297 fp->fptag = fptag;
298 j = env->fpstt;
299 for(i = 0;i < 8; i++) {
300 memcpy(&fp->fpregs1[i * 10], &env->fpregs[j].d, 10);
301 j = (j + 1) & 7;
302 }
303 asm volatile ("frstor %0" : "=m" (*fp));
304}
305
306static void save_native_fp_fsave(CPUState *env)
307{
308 int fptag, i, j;
309 uint16_t fpuc;
310 struct fpstate fp1, *fp = &fp1;
311
312 asm volatile ("fsave %0" : : "m" (*fp));
313 env->fpuc = fp->fpuc;
314 env->fpstt = (fp->fpus >> 11) & 7;
315 env->fpus = fp->fpus & ~0x3800;
316 fptag = fp->fptag;
317 for(i = 0;i < 8; i++) {
318 env->fptags[i] = ((fptag & 3) == 3);
319 fptag >>= 2;
320 }
321 j = env->fpstt;
322 for(i = 0;i < 8; i++) {
323 memcpy(&env->fpregs[j].d, &fp->fpregs1[i * 10], 10);
324 j = (j + 1) & 7;
325 }
326 /* we must restore the default rounding state */
327 fpuc = 0x037f | (env->fpuc & (3 << 10));
328 asm volatile("fldcw %0" : : "m" (fpuc));
329}
330
331static void restore_native_fp_fxrstor(CPUState *env)
332{
333 struct fpxstate *fp = &fpx1;
334 int i, j, fptag;
335
336 fp->fpuc = env->fpuc;
337 fp->fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
338 fptag = 0;
339 for(i = 0; i < 8; i++)
340 fptag |= (env->fptags[i] << i);
341 fp->fptag = fptag ^ 0xff;
342
343 j = env->fpstt;
344 for(i = 0;i < 8; i++) {
345 memcpy(&fp->fpregs1[i * 16], &env->fpregs[j].d, 10);
346 j = (j + 1) & 7;
347 }
348 if (env->cpuid_features & CPUID_SSE) {
349 fp->mxcsr = env->mxcsr;
350 /* XXX: check if DAZ is not available */
351 fp->mxcsr_mask = 0xffff;
c28e951f 352 memcpy(fp->xmm_regs, env->xmm_regs, CPU_NB_REGS * 16);
9df217a3
FB
353 }
354 asm volatile ("fxrstor %0" : "=m" (*fp));
355}
356
357static void save_native_fp_fxsave(CPUState *env)
358{
359 struct fpxstate *fp = &fpx1;
360 int fptag, i, j;
361 uint16_t fpuc;
362
363 asm volatile ("fxsave %0" : : "m" (*fp));
364 env->fpuc = fp->fpuc;
365 env->fpstt = (fp->fpus >> 11) & 7;
366 env->fpus = fp->fpus & ~0x3800;
367 fptag = fp->fptag ^ 0xff;
368 for(i = 0;i < 8; i++) {
369 env->fptags[i] = (fptag >> i) & 1;
370 }
371 j = env->fpstt;
372 for(i = 0;i < 8; i++) {
373 memcpy(&env->fpregs[j].d, &fp->fpregs1[i * 16], 10);
374 j = (j + 1) & 7;
375 }
376 if (env->cpuid_features & CPUID_SSE) {
377 env->mxcsr = fp->mxcsr;
c28e951f 378 memcpy(env->xmm_regs, fp->xmm_regs, CPU_NB_REGS * 16);
9df217a3
FB
379 }
380
381 /* we must restore the default rounding state */
382 asm volatile ("fninit");
383 fpuc = 0x037f | (env->fpuc & (3 << 10));
384 asm volatile("fldcw %0" : : "m" (fpuc));
385}
386
c28e951f
FB
387static int do_syscall(CPUState *env,
388 struct kqemu_cpu_state *kenv)
389{
390 int selector;
391
392 selector = (env->star >> 32) & 0xffff;
393#ifdef __x86_64__
394 if (env->hflags & HF_LMA_MASK) {
395 env->regs[R_ECX] = kenv->next_eip;
396 env->regs[11] = env->eflags;
397
398 cpu_x86_set_cpl(env, 0);
399 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
400 0, 0xffffffff,
401 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
402 DESC_S_MASK |
403 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
404 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
405 0, 0xffffffff,
406 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
407 DESC_S_MASK |
408 DESC_W_MASK | DESC_A_MASK);
409 env->eflags &= ~env->fmask;
410 if (env->hflags & HF_CS64_MASK)
411 env->eip = env->lstar;
412 else
413 env->eip = env->cstar;
414 } else
415#endif
416 {
417 env->regs[R_ECX] = (uint32_t)kenv->next_eip;
418
419 cpu_x86_set_cpl(env, 0);
420 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
421 0, 0xffffffff,
422 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
423 DESC_S_MASK |
424 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
425 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
426 0, 0xffffffff,
427 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
428 DESC_S_MASK |
429 DESC_W_MASK | DESC_A_MASK);
430 env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
431 env->eip = (uint32_t)env->star;
432 }
433 return 2;
434}
435
aa062973
FB
436#ifdef PROFILE
437
438#define PC_REC_SIZE 1
439#define PC_REC_HASH_BITS 16
440#define PC_REC_HASH_SIZE (1 << PC_REC_HASH_BITS)
441
442typedef struct PCRecord {
443 unsigned long pc;
444 int64_t count;
445 struct PCRecord *next;
446} PCRecord;
447
448PCRecord *pc_rec_hash[PC_REC_HASH_SIZE];
449int nb_pc_records;
450
451void kqemu_record_pc(unsigned long pc)
452{
453 unsigned long h;
454 PCRecord **pr, *r;
455
456 h = pc / PC_REC_SIZE;
457 h = h ^ (h >> PC_REC_HASH_BITS);
458 h &= (PC_REC_HASH_SIZE - 1);
459 pr = &pc_rec_hash[h];
460 for(;;) {
461 r = *pr;
462 if (r == NULL)
463 break;
464 if (r->pc == pc) {
465 r->count++;
466 return;
467 }
468 pr = &r->next;
469 }
470 r = malloc(sizeof(PCRecord));
471 r->count = 1;
472 r->pc = pc;
473 r->next = NULL;
474 *pr = r;
475 nb_pc_records++;
476}
477
478int pc_rec_cmp(const void *p1, const void *p2)
479{
480 PCRecord *r1 = *(PCRecord **)p1;
481 PCRecord *r2 = *(PCRecord **)p2;
482 if (r1->count < r2->count)
483 return 1;
484 else if (r1->count == r2->count)
485 return 0;
486 else
487 return -1;
488}
489
490void kqemu_record_dump(void)
491{
492 PCRecord **pr, *r;
493 int i, h;
494 FILE *f;
495 int64_t total, sum;
496
497 pr = malloc(sizeof(PCRecord *) * nb_pc_records);
498 i = 0;
499 total = 0;
500 for(h = 0; h < PC_REC_HASH_SIZE; h++) {
501 for(r = pc_rec_hash[h]; r != NULL; r = r->next) {
502 pr[i++] = r;
503 total += r->count;
504 }
505 }
506 qsort(pr, nb_pc_records, sizeof(PCRecord *), pc_rec_cmp);
507
508 f = fopen("/tmp/kqemu.stats", "w");
509 if (!f) {
510 perror("/tmp/kqemu.stats");
511 exit(1);
512 }
513 fprintf(f, "total: %lld\n", total);
514 sum = 0;
515 for(i = 0; i < nb_pc_records; i++) {
516 r = pr[i];
517 sum += r->count;
518 fprintf(f, "%08lx: %lld %0.2f%% %0.2f%%\n",
519 r->pc,
520 r->count,
521 (double)r->count / (double)total * 100.0,
522 (double)sum / (double)total * 100.0);
523 }
524 fclose(f);
525 free(pr);
526}
527#else
528void kqemu_record_dump(void)
529{
530}
531#endif
532
9df217a3
FB
533int kqemu_cpu_exec(CPUState *env)
534{
535 struct kqemu_cpu_state kcpu_state, *kenv = &kcpu_state;
536 int ret;
6e4255f6
FB
537#ifdef _WIN32
538 DWORD temp;
539#endif
9df217a3
FB
540
541#ifdef DEBUG
542 if (loglevel & CPU_LOG_INT) {
543 fprintf(logfile, "kqemu: cpu_exec: enter\n");
544 cpu_dump_state(env, logfile, fprintf, 0);
545 }
546#endif
547 memcpy(kenv->regs, env->regs, sizeof(kenv->regs));
548 kenv->eip = env->eip;
549 kenv->eflags = env->eflags;
550 memcpy(&kenv->segs, &env->segs, sizeof(env->segs));
551 memcpy(&kenv->ldt, &env->ldt, sizeof(env->ldt));
552 memcpy(&kenv->tr, &env->tr, sizeof(env->tr));
553 memcpy(&kenv->gdt, &env->gdt, sizeof(env->gdt));
554 memcpy(&kenv->idt, &env->idt, sizeof(env->idt));
555 kenv->cr0 = env->cr[0];
556 kenv->cr2 = env->cr[2];
557 kenv->cr3 = env->cr[3];
558 kenv->cr4 = env->cr[4];
559 kenv->a20_mask = env->a20_mask;
c45b3c0e 560#if KQEMU_VERSION >= 0x010100
c28e951f
FB
561 kenv->efer = env->efer;
562#endif
9df217a3
FB
563 if (env->dr[7] & 0xff) {
564 kenv->dr7 = env->dr[7];
565 kenv->dr0 = env->dr[0];
566 kenv->dr1 = env->dr[1];
567 kenv->dr2 = env->dr[2];
568 kenv->dr3 = env->dr[3];
569 } else {
570 kenv->dr7 = 0;
571 }
572 kenv->dr6 = env->dr[6];
573 kenv->cpl = 3;
574 kenv->nb_pages_to_flush = nb_pages_to_flush;
575 nb_pages_to_flush = 0;
aa062973
FB
576#if KQEMU_VERSION >= 0x010200
577 kenv->user_only = 1;
578 kenv->nb_ram_pages_to_update = nb_ram_pages_to_update;
579#endif
580 nb_ram_pages_to_update = 0;
9df217a3
FB
581
582 if (!(kenv->cr0 & CR0_TS_MASK)) {
583 if (env->cpuid_features & CPUID_FXSR)
584 restore_native_fp_fxrstor(env);
585 else
586 restore_native_fp_frstor(env);
587 }
588
6e4255f6
FB
589#ifdef _WIN32
590 DeviceIoControl(kqemu_fd, KQEMU_EXEC,
591 kenv, sizeof(struct kqemu_cpu_state),
592 kenv, sizeof(struct kqemu_cpu_state),
593 &temp, NULL);
594 ret = kenv->retval;
595#else
596#if KQEMU_VERSION >= 0x010100
597 ioctl(kqemu_fd, KQEMU_EXEC, kenv);
598 ret = kenv->retval;
599#else
9df217a3 600 ret = ioctl(kqemu_fd, KQEMU_EXEC, kenv);
6e4255f6
FB
601#endif
602#endif
9df217a3
FB
603 if (!(kenv->cr0 & CR0_TS_MASK)) {
604 if (env->cpuid_features & CPUID_FXSR)
605 save_native_fp_fxsave(env);
606 else
607 save_native_fp_fsave(env);
608 }
609
610 memcpy(env->regs, kenv->regs, sizeof(env->regs));
611 env->eip = kenv->eip;
612 env->eflags = kenv->eflags;
613 memcpy(env->segs, kenv->segs, sizeof(env->segs));
614#if 0
615 /* no need to restore that */
616 memcpy(env->ldt, kenv->ldt, sizeof(env->ldt));
617 memcpy(env->tr, kenv->tr, sizeof(env->tr));
618 memcpy(env->gdt, kenv->gdt, sizeof(env->gdt));
619 memcpy(env->idt, kenv->idt, sizeof(env->idt));
620 env->cr[0] = kenv->cr0;
621 env->cr[3] = kenv->cr3;
622 env->cr[4] = kenv->cr4;
623 env->a20_mask = kenv->a20_mask;
624#endif
625 env->cr[2] = kenv->cr2;
626 env->dr[6] = kenv->dr6;
627
aa062973
FB
628#if KQEMU_VERSION >= 0x010200
629 if (kenv->nb_ram_pages_to_update > 0) {
630 cpu_tlb_update_dirty(env);
631 }
632#endif
633
634 /* restore the hidden flags */
635 {
636 unsigned int new_hflags;
637#ifdef TARGET_X86_64
638 if ((env->hflags & HF_LMA_MASK) &&
639 (env->segs[R_CS].flags & DESC_L_MASK)) {
640 /* long mode */
641 new_hflags = HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK;
642 } else
643#endif
644 {
645 /* legacy / compatibility case */
646 new_hflags = (env->segs[R_CS].flags & DESC_B_MASK)
647 >> (DESC_B_SHIFT - HF_CS32_SHIFT);
648 new_hflags |= (env->segs[R_SS].flags & DESC_B_MASK)
649 >> (DESC_B_SHIFT - HF_SS32_SHIFT);
650 if (!(env->cr[0] & CR0_PE_MASK) ||
651 (env->eflags & VM_MASK) ||
652 !(env->hflags & HF_CS32_MASK)) {
653 /* XXX: try to avoid this test. The problem comes from the
654 fact that is real mode or vm86 mode we only modify the
655 'base' and 'selector' fields of the segment cache to go
656 faster. A solution may be to force addseg to one in
657 translate-i386.c. */
658 new_hflags |= HF_ADDSEG_MASK;
659 } else {
660 new_hflags |= ((env->segs[R_DS].base |
661 env->segs[R_ES].base |
662 env->segs[R_SS].base) != 0) <<
663 HF_ADDSEG_SHIFT;
664 }
665 }
666 env->hflags = (env->hflags &
667 ~(HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK | HF_ADDSEG_MASK)) |
668 new_hflags;
669 }
670
9df217a3
FB
671#ifdef DEBUG
672 if (loglevel & CPU_LOG_INT) {
673 fprintf(logfile, "kqemu: kqemu_cpu_exec: ret=0x%x\n", ret);
674 }
675#endif
c28e951f
FB
676 if (ret == KQEMU_RET_SYSCALL) {
677 /* syscall instruction */
678 return do_syscall(env, kenv);
679 } else
9df217a3
FB
680 if ((ret & 0xff00) == KQEMU_RET_INT) {
681 env->exception_index = ret & 0xff;
682 env->error_code = 0;
683 env->exception_is_int = 1;
684 env->exception_next_eip = kenv->next_eip;
685#ifdef DEBUG
c28e951f
FB
686 if (loglevel & CPU_LOG_INT) {
687 fprintf(logfile, "kqemu: interrupt v=%02x:\n",
688 env->exception_index);
689 cpu_dump_state(env, logfile, fprintf, 0);
690 }
9df217a3
FB
691#endif
692 return 1;
693 } else if ((ret & 0xff00) == KQEMU_RET_EXCEPTION) {
694 env->exception_index = ret & 0xff;
695 env->error_code = kenv->error_code;
696 env->exception_is_int = 0;
697 env->exception_next_eip = 0;
698#ifdef DEBUG
699 if (loglevel & CPU_LOG_INT) {
700 fprintf(logfile, "kqemu: exception v=%02x e=%04x:\n",
701 env->exception_index, env->error_code);
702 cpu_dump_state(env, logfile, fprintf, 0);
703 }
704#endif
705 return 1;
706 } else if (ret == KQEMU_RET_INTR) {
c45b3c0e
FB
707#ifdef DEBUG
708 if (loglevel & CPU_LOG_INT) {
709 cpu_dump_state(env, logfile, fprintf, 0);
710 }
711#endif
9df217a3
FB
712 return 0;
713 } else if (ret == KQEMU_RET_SOFTMMU) {
aa062973
FB
714#ifdef PROFILE
715 kqemu_record_pc(env->eip + env->segs[R_CS].base);
716#endif
717#ifdef DEBUG
718 if (loglevel & CPU_LOG_INT) {
719 cpu_dump_state(env, logfile, fprintf, 0);
720 }
721#endif
9df217a3
FB
722 return 2;
723 } else {
724 cpu_dump_state(env, stderr, fprintf, 0);
725 fprintf(stderr, "Unsupported return value: 0x%x\n", ret);
726 exit(1);
727 }
728 return 0;
729}
730
731#endif