]> git.proxmox.com Git - mirror_qemu.git/blob - kqemu.c
94792c09f95c5f6dbf12cfaeaa0aae360847c2f2
[mirror_qemu.git] / kqemu.c
1 /*
2 * KQEMU support
3 *
4 * Copyright (c) 2005 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20 #include "config.h"
21 #ifdef _WIN32
22 #include <windows.h>
23 #include <winioctl.h>
24 #else
25 #include <sys/types.h>
26 #include <sys/mman.h>
27 #include <sys/ioctl.h>
28 #endif
29 #include <stdlib.h>
30 #include <stdio.h>
31 #include <stdarg.h>
32 #include <string.h>
33 #include <errno.h>
34 #include <unistd.h>
35 #include <inttypes.h>
36
37 #include "cpu.h"
38 #include "exec-all.h"
39
40 #ifdef USE_KQEMU
41
42 #define DEBUG
43 //#define PROFILE
44
45 #include <unistd.h>
46 #include <fcntl.h>
47 #include "kqemu/kqemu.h"
48
49 /* compatibility stuff */
50 #ifndef KQEMU_RET_SYSCALL
51 #define KQEMU_RET_SYSCALL 0x0300 /* syscall insn */
52 #endif
53 #ifndef KQEMU_MAX_RAM_PAGES_TO_UPDATE
54 #define KQEMU_MAX_RAM_PAGES_TO_UPDATE 512
55 #define KQEMU_RAM_PAGES_UPDATE_ALL (KQEMU_MAX_RAM_PAGES_TO_UPDATE + 1)
56 #endif
57
58 #ifdef _WIN32
59 #define KQEMU_DEVICE "\\\\.\\kqemu"
60 #else
61 #define KQEMU_DEVICE "/dev/kqemu"
62 #endif
63
64 #ifdef _WIN32
65 #define KQEMU_INVALID_FD INVALID_HANDLE_VALUE
66 HANDLE kqemu_fd = KQEMU_INVALID_FD;
67 #define kqemu_closefd(x) CloseHandle(x)
68 #else
69 #define KQEMU_INVALID_FD -1
70 int kqemu_fd = KQEMU_INVALID_FD;
71 #define kqemu_closefd(x) close(x)
72 #endif
73
74 int kqemu_allowed = 1;
75 unsigned long *pages_to_flush;
76 unsigned int nb_pages_to_flush;
77 unsigned long *ram_pages_to_update;
78 unsigned int nb_ram_pages_to_update;
79 extern uint32_t **l1_phys_map;
80
81 #define cpuid(index, eax, ebx, ecx, edx) \
82 asm volatile ("cpuid" \
83 : "=a" (eax), "=b" (ebx), "=c" (ecx), "=d" (edx) \
84 : "0" (index))
85
86 #ifdef __x86_64__
87 static int is_cpuid_supported(void)
88 {
89 return 1;
90 }
91 #else
92 static int is_cpuid_supported(void)
93 {
94 int v0, v1;
95 asm volatile ("pushf\n"
96 "popl %0\n"
97 "movl %0, %1\n"
98 "xorl $0x00200000, %0\n"
99 "pushl %0\n"
100 "popf\n"
101 "pushf\n"
102 "popl %0\n"
103 : "=a" (v0), "=d" (v1)
104 :
105 : "cc");
106 return (v0 != v1);
107 }
108 #endif
109
110 static void kqemu_update_cpuid(CPUState *env)
111 {
112 int critical_features_mask, features;
113 uint32_t eax, ebx, ecx, edx;
114
115 /* the following features are kept identical on the host and
116 target cpus because they are important for user code. Strictly
117 speaking, only SSE really matters because the OS must support
118 it if the user code uses it. */
119 critical_features_mask =
120 CPUID_CMOV | CPUID_CX8 |
121 CPUID_FXSR | CPUID_MMX | CPUID_SSE |
122 CPUID_SSE2 | CPUID_SEP;
123 if (!is_cpuid_supported()) {
124 features = 0;
125 } else {
126 cpuid(1, eax, ebx, ecx, edx);
127 features = edx;
128 }
129 #ifdef __x86_64__
130 /* NOTE: on x86_64 CPUs, SYSENTER is not supported in
131 compatibility mode, so in order to have the best performances
132 it is better not to use it */
133 features &= ~CPUID_SEP;
134 #endif
135 env->cpuid_features = (env->cpuid_features & ~critical_features_mask) |
136 (features & critical_features_mask);
137 /* XXX: we could update more of the target CPUID state so that the
138 non accelerated code sees exactly the same CPU features as the
139 accelerated code */
140 }
141
142 int kqemu_init(CPUState *env)
143 {
144 struct kqemu_init init;
145 int ret, version;
146 #ifdef _WIN32
147 DWORD temp;
148 #endif
149
150 if (!kqemu_allowed)
151 return -1;
152
153 #ifdef _WIN32
154 kqemu_fd = CreateFile(KQEMU_DEVICE, GENERIC_WRITE | GENERIC_READ,
155 FILE_SHARE_READ | FILE_SHARE_WRITE,
156 NULL, OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL,
157 NULL);
158 #else
159 kqemu_fd = open(KQEMU_DEVICE, O_RDWR);
160 #endif
161 if (kqemu_fd == KQEMU_INVALID_FD) {
162 fprintf(stderr, "Could not open '%s' - QEMU acceleration layer not activated\n", KQEMU_DEVICE);
163 return -1;
164 }
165 version = 0;
166 #ifdef _WIN32
167 DeviceIoControl(kqemu_fd, KQEMU_GET_VERSION, NULL, 0,
168 &version, sizeof(version), &temp, NULL);
169 #else
170 ioctl(kqemu_fd, KQEMU_GET_VERSION, &version);
171 #endif
172 if (version != KQEMU_VERSION) {
173 fprintf(stderr, "Version mismatch between kqemu module and qemu (%08x %08x) - disabling kqemu use\n",
174 version, KQEMU_VERSION);
175 goto fail;
176 }
177
178 pages_to_flush = qemu_vmalloc(KQEMU_MAX_PAGES_TO_FLUSH *
179 sizeof(unsigned long));
180 if (!pages_to_flush)
181 goto fail;
182
183 ram_pages_to_update = qemu_vmalloc(KQEMU_MAX_RAM_PAGES_TO_UPDATE *
184 sizeof(unsigned long));
185 if (!ram_pages_to_update)
186 goto fail;
187
188 init.ram_base = phys_ram_base;
189 init.ram_size = phys_ram_size;
190 init.ram_dirty = phys_ram_dirty;
191 init.phys_to_ram_map = l1_phys_map;
192 init.pages_to_flush = pages_to_flush;
193 #if KQEMU_VERSION >= 0x010200
194 init.ram_pages_to_update = ram_pages_to_update;
195 #endif
196 #ifdef _WIN32
197 ret = DeviceIoControl(kqemu_fd, KQEMU_INIT, &init, sizeof(init),
198 NULL, 0, &temp, NULL) == TRUE ? 0 : -1;
199 #else
200 ret = ioctl(kqemu_fd, KQEMU_INIT, &init);
201 #endif
202 if (ret < 0) {
203 fprintf(stderr, "Error %d while initializing QEMU acceleration layer - disabling it for now\n", ret);
204 fail:
205 kqemu_closefd(kqemu_fd);
206 kqemu_fd = KQEMU_INVALID_FD;
207 return -1;
208 }
209 kqemu_update_cpuid(env);
210 env->kqemu_enabled = 1;
211 nb_pages_to_flush = 0;
212 nb_ram_pages_to_update = 0;
213 return 0;
214 }
215
216 void kqemu_flush_page(CPUState *env, target_ulong addr)
217 {
218 #ifdef DEBUG
219 if (loglevel & CPU_LOG_INT) {
220 fprintf(logfile, "kqemu_flush_page: addr=" TARGET_FMT_lx "\n", addr);
221 }
222 #endif
223 if (nb_pages_to_flush >= KQEMU_MAX_PAGES_TO_FLUSH)
224 nb_pages_to_flush = KQEMU_FLUSH_ALL;
225 else
226 pages_to_flush[nb_pages_to_flush++] = addr;
227 }
228
229 void kqemu_flush(CPUState *env, int global)
230 {
231 #ifdef DEBUG
232 if (loglevel & CPU_LOG_INT) {
233 fprintf(logfile, "kqemu_flush:\n");
234 }
235 #endif
236 nb_pages_to_flush = KQEMU_FLUSH_ALL;
237 }
238
239 void kqemu_set_notdirty(CPUState *env, ram_addr_t ram_addr)
240 {
241 #ifdef DEBUG
242 if (loglevel & CPU_LOG_INT) {
243 fprintf(logfile, "kqemu_set_notdirty: addr=%08lx\n", ram_addr);
244 }
245 #endif
246 /* we only track transitions to dirty state */
247 if (phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] != 0xff)
248 return;
249 if (nb_ram_pages_to_update >= KQEMU_MAX_RAM_PAGES_TO_UPDATE)
250 nb_ram_pages_to_update = KQEMU_RAM_PAGES_UPDATE_ALL;
251 else
252 ram_pages_to_update[nb_ram_pages_to_update++] = ram_addr;
253 }
254
255 struct fpstate {
256 uint16_t fpuc;
257 uint16_t dummy1;
258 uint16_t fpus;
259 uint16_t dummy2;
260 uint16_t fptag;
261 uint16_t dummy3;
262
263 uint32_t fpip;
264 uint32_t fpcs;
265 uint32_t fpoo;
266 uint32_t fpos;
267 uint8_t fpregs1[8 * 10];
268 };
269
270 struct fpxstate {
271 uint16_t fpuc;
272 uint16_t fpus;
273 uint16_t fptag;
274 uint16_t fop;
275 uint32_t fpuip;
276 uint16_t cs_sel;
277 uint16_t dummy0;
278 uint32_t fpudp;
279 uint16_t ds_sel;
280 uint16_t dummy1;
281 uint32_t mxcsr;
282 uint32_t mxcsr_mask;
283 uint8_t fpregs1[8 * 16];
284 uint8_t xmm_regs[16 * 16];
285 uint8_t dummy2[96];
286 };
287
288 static struct fpxstate fpx1 __attribute__((aligned(16)));
289
290 static void restore_native_fp_frstor(CPUState *env)
291 {
292 int fptag, i, j;
293 struct fpstate fp1, *fp = &fp1;
294
295 fp->fpuc = env->fpuc;
296 fp->fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
297 fptag = 0;
298 for (i=7; i>=0; i--) {
299 fptag <<= 2;
300 if (env->fptags[i]) {
301 fptag |= 3;
302 } else {
303 /* the FPU automatically computes it */
304 }
305 }
306 fp->fptag = fptag;
307 j = env->fpstt;
308 for(i = 0;i < 8; i++) {
309 memcpy(&fp->fpregs1[i * 10], &env->fpregs[j].d, 10);
310 j = (j + 1) & 7;
311 }
312 asm volatile ("frstor %0" : "=m" (*fp));
313 }
314
315 static void save_native_fp_fsave(CPUState *env)
316 {
317 int fptag, i, j;
318 uint16_t fpuc;
319 struct fpstate fp1, *fp = &fp1;
320
321 asm volatile ("fsave %0" : : "m" (*fp));
322 env->fpuc = fp->fpuc;
323 env->fpstt = (fp->fpus >> 11) & 7;
324 env->fpus = fp->fpus & ~0x3800;
325 fptag = fp->fptag;
326 for(i = 0;i < 8; i++) {
327 env->fptags[i] = ((fptag & 3) == 3);
328 fptag >>= 2;
329 }
330 j = env->fpstt;
331 for(i = 0;i < 8; i++) {
332 memcpy(&env->fpregs[j].d, &fp->fpregs1[i * 10], 10);
333 j = (j + 1) & 7;
334 }
335 /* we must restore the default rounding state */
336 fpuc = 0x037f | (env->fpuc & (3 << 10));
337 asm volatile("fldcw %0" : : "m" (fpuc));
338 }
339
340 static void restore_native_fp_fxrstor(CPUState *env)
341 {
342 struct fpxstate *fp = &fpx1;
343 int i, j, fptag;
344
345 fp->fpuc = env->fpuc;
346 fp->fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
347 fptag = 0;
348 for(i = 0; i < 8; i++)
349 fptag |= (env->fptags[i] << i);
350 fp->fptag = fptag ^ 0xff;
351
352 j = env->fpstt;
353 for(i = 0;i < 8; i++) {
354 memcpy(&fp->fpregs1[i * 16], &env->fpregs[j].d, 10);
355 j = (j + 1) & 7;
356 }
357 if (env->cpuid_features & CPUID_SSE) {
358 fp->mxcsr = env->mxcsr;
359 /* XXX: check if DAZ is not available */
360 fp->mxcsr_mask = 0xffff;
361 memcpy(fp->xmm_regs, env->xmm_regs, CPU_NB_REGS * 16);
362 }
363 asm volatile ("fxrstor %0" : "=m" (*fp));
364 }
365
366 static void save_native_fp_fxsave(CPUState *env)
367 {
368 struct fpxstate *fp = &fpx1;
369 int fptag, i, j;
370 uint16_t fpuc;
371
372 asm volatile ("fxsave %0" : : "m" (*fp));
373 env->fpuc = fp->fpuc;
374 env->fpstt = (fp->fpus >> 11) & 7;
375 env->fpus = fp->fpus & ~0x3800;
376 fptag = fp->fptag ^ 0xff;
377 for(i = 0;i < 8; i++) {
378 env->fptags[i] = (fptag >> i) & 1;
379 }
380 j = env->fpstt;
381 for(i = 0;i < 8; i++) {
382 memcpy(&env->fpregs[j].d, &fp->fpregs1[i * 16], 10);
383 j = (j + 1) & 7;
384 }
385 if (env->cpuid_features & CPUID_SSE) {
386 env->mxcsr = fp->mxcsr;
387 memcpy(env->xmm_regs, fp->xmm_regs, CPU_NB_REGS * 16);
388 }
389
390 /* we must restore the default rounding state */
391 asm volatile ("fninit");
392 fpuc = 0x037f | (env->fpuc & (3 << 10));
393 asm volatile("fldcw %0" : : "m" (fpuc));
394 }
395
396 static int do_syscall(CPUState *env,
397 struct kqemu_cpu_state *kenv)
398 {
399 int selector;
400
401 selector = (env->star >> 32) & 0xffff;
402 #ifdef __x86_64__
403 if (env->hflags & HF_LMA_MASK) {
404 env->regs[R_ECX] = kenv->next_eip;
405 env->regs[11] = env->eflags;
406
407 cpu_x86_set_cpl(env, 0);
408 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
409 0, 0xffffffff,
410 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
411 DESC_S_MASK |
412 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
413 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
414 0, 0xffffffff,
415 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
416 DESC_S_MASK |
417 DESC_W_MASK | DESC_A_MASK);
418 env->eflags &= ~env->fmask;
419 if (env->hflags & HF_CS64_MASK)
420 env->eip = env->lstar;
421 else
422 env->eip = env->cstar;
423 } else
424 #endif
425 {
426 env->regs[R_ECX] = (uint32_t)kenv->next_eip;
427
428 cpu_x86_set_cpl(env, 0);
429 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
430 0, 0xffffffff,
431 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
432 DESC_S_MASK |
433 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
434 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
435 0, 0xffffffff,
436 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
437 DESC_S_MASK |
438 DESC_W_MASK | DESC_A_MASK);
439 env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
440 env->eip = (uint32_t)env->star;
441 }
442 return 2;
443 }
444
445 #ifdef PROFILE
446
447 #define PC_REC_SIZE 1
448 #define PC_REC_HASH_BITS 16
449 #define PC_REC_HASH_SIZE (1 << PC_REC_HASH_BITS)
450
451 typedef struct PCRecord {
452 unsigned long pc;
453 int64_t count;
454 struct PCRecord *next;
455 } PCRecord;
456
457 PCRecord *pc_rec_hash[PC_REC_HASH_SIZE];
458 int nb_pc_records;
459
460 void kqemu_record_pc(unsigned long pc)
461 {
462 unsigned long h;
463 PCRecord **pr, *r;
464
465 h = pc / PC_REC_SIZE;
466 h = h ^ (h >> PC_REC_HASH_BITS);
467 h &= (PC_REC_HASH_SIZE - 1);
468 pr = &pc_rec_hash[h];
469 for(;;) {
470 r = *pr;
471 if (r == NULL)
472 break;
473 if (r->pc == pc) {
474 r->count++;
475 return;
476 }
477 pr = &r->next;
478 }
479 r = malloc(sizeof(PCRecord));
480 r->count = 1;
481 r->pc = pc;
482 r->next = NULL;
483 *pr = r;
484 nb_pc_records++;
485 }
486
487 int pc_rec_cmp(const void *p1, const void *p2)
488 {
489 PCRecord *r1 = *(PCRecord **)p1;
490 PCRecord *r2 = *(PCRecord **)p2;
491 if (r1->count < r2->count)
492 return 1;
493 else if (r1->count == r2->count)
494 return 0;
495 else
496 return -1;
497 }
498
499 void kqemu_record_dump(void)
500 {
501 PCRecord **pr, *r;
502 int i, h;
503 FILE *f;
504 int64_t total, sum;
505
506 pr = malloc(sizeof(PCRecord *) * nb_pc_records);
507 i = 0;
508 total = 0;
509 for(h = 0; h < PC_REC_HASH_SIZE; h++) {
510 for(r = pc_rec_hash[h]; r != NULL; r = r->next) {
511 pr[i++] = r;
512 total += r->count;
513 }
514 }
515 qsort(pr, nb_pc_records, sizeof(PCRecord *), pc_rec_cmp);
516
517 f = fopen("/tmp/kqemu.stats", "w");
518 if (!f) {
519 perror("/tmp/kqemu.stats");
520 exit(1);
521 }
522 fprintf(f, "total: %lld\n", total);
523 sum = 0;
524 for(i = 0; i < nb_pc_records; i++) {
525 r = pr[i];
526 sum += r->count;
527 fprintf(f, "%08lx: %lld %0.2f%% %0.2f%%\n",
528 r->pc,
529 r->count,
530 (double)r->count / (double)total * 100.0,
531 (double)sum / (double)total * 100.0);
532 }
533 fclose(f);
534 free(pr);
535 }
536 #else
537 void kqemu_record_dump(void)
538 {
539 }
540 #endif
541
542 int kqemu_cpu_exec(CPUState *env)
543 {
544 struct kqemu_cpu_state kcpu_state, *kenv = &kcpu_state;
545 int ret;
546 #ifdef _WIN32
547 DWORD temp;
548 #endif
549
550 #ifdef DEBUG
551 if (loglevel & CPU_LOG_INT) {
552 fprintf(logfile, "kqemu: cpu_exec: enter\n");
553 cpu_dump_state(env, logfile, fprintf, 0);
554 }
555 #endif
556 memcpy(kenv->regs, env->regs, sizeof(kenv->regs));
557 kenv->eip = env->eip;
558 kenv->eflags = env->eflags;
559 memcpy(&kenv->segs, &env->segs, sizeof(env->segs));
560 memcpy(&kenv->ldt, &env->ldt, sizeof(env->ldt));
561 memcpy(&kenv->tr, &env->tr, sizeof(env->tr));
562 memcpy(&kenv->gdt, &env->gdt, sizeof(env->gdt));
563 memcpy(&kenv->idt, &env->idt, sizeof(env->idt));
564 kenv->cr0 = env->cr[0];
565 kenv->cr2 = env->cr[2];
566 kenv->cr3 = env->cr[3];
567 kenv->cr4 = env->cr[4];
568 kenv->a20_mask = env->a20_mask;
569 #if KQEMU_VERSION >= 0x010100
570 kenv->efer = env->efer;
571 #endif
572 if (env->dr[7] & 0xff) {
573 kenv->dr7 = env->dr[7];
574 kenv->dr0 = env->dr[0];
575 kenv->dr1 = env->dr[1];
576 kenv->dr2 = env->dr[2];
577 kenv->dr3 = env->dr[3];
578 } else {
579 kenv->dr7 = 0;
580 }
581 kenv->dr6 = env->dr[6];
582 kenv->cpl = 3;
583 kenv->nb_pages_to_flush = nb_pages_to_flush;
584 nb_pages_to_flush = 0;
585 #if KQEMU_VERSION >= 0x010200
586 kenv->user_only = 1;
587 kenv->nb_ram_pages_to_update = nb_ram_pages_to_update;
588 #endif
589 nb_ram_pages_to_update = 0;
590
591 if (!(kenv->cr0 & CR0_TS_MASK)) {
592 if (env->cpuid_features & CPUID_FXSR)
593 restore_native_fp_fxrstor(env);
594 else
595 restore_native_fp_frstor(env);
596 }
597
598 #ifdef _WIN32
599 DeviceIoControl(kqemu_fd, KQEMU_EXEC,
600 kenv, sizeof(struct kqemu_cpu_state),
601 kenv, sizeof(struct kqemu_cpu_state),
602 &temp, NULL);
603 ret = kenv->retval;
604 #else
605 #if KQEMU_VERSION >= 0x010100
606 ioctl(kqemu_fd, KQEMU_EXEC, kenv);
607 ret = kenv->retval;
608 #else
609 ret = ioctl(kqemu_fd, KQEMU_EXEC, kenv);
610 #endif
611 #endif
612 if (!(kenv->cr0 & CR0_TS_MASK)) {
613 if (env->cpuid_features & CPUID_FXSR)
614 save_native_fp_fxsave(env);
615 else
616 save_native_fp_fsave(env);
617 }
618
619 memcpy(env->regs, kenv->regs, sizeof(env->regs));
620 env->eip = kenv->eip;
621 env->eflags = kenv->eflags;
622 memcpy(env->segs, kenv->segs, sizeof(env->segs));
623 #if 0
624 /* no need to restore that */
625 memcpy(env->ldt, kenv->ldt, sizeof(env->ldt));
626 memcpy(env->tr, kenv->tr, sizeof(env->tr));
627 memcpy(env->gdt, kenv->gdt, sizeof(env->gdt));
628 memcpy(env->idt, kenv->idt, sizeof(env->idt));
629 env->cr[0] = kenv->cr0;
630 env->cr[3] = kenv->cr3;
631 env->cr[4] = kenv->cr4;
632 env->a20_mask = kenv->a20_mask;
633 #endif
634 env->cr[2] = kenv->cr2;
635 env->dr[6] = kenv->dr6;
636
637 #if KQEMU_VERSION >= 0x010200
638 if (kenv->nb_ram_pages_to_update > 0) {
639 cpu_tlb_update_dirty(env);
640 }
641 #endif
642
643 /* restore the hidden flags */
644 {
645 unsigned int new_hflags;
646 #ifdef TARGET_X86_64
647 if ((env->hflags & HF_LMA_MASK) &&
648 (env->segs[R_CS].flags & DESC_L_MASK)) {
649 /* long mode */
650 new_hflags = HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK;
651 } else
652 #endif
653 {
654 /* legacy / compatibility case */
655 new_hflags = (env->segs[R_CS].flags & DESC_B_MASK)
656 >> (DESC_B_SHIFT - HF_CS32_SHIFT);
657 new_hflags |= (env->segs[R_SS].flags & DESC_B_MASK)
658 >> (DESC_B_SHIFT - HF_SS32_SHIFT);
659 if (!(env->cr[0] & CR0_PE_MASK) ||
660 (env->eflags & VM_MASK) ||
661 !(env->hflags & HF_CS32_MASK)) {
662 /* XXX: try to avoid this test. The problem comes from the
663 fact that is real mode or vm86 mode we only modify the
664 'base' and 'selector' fields of the segment cache to go
665 faster. A solution may be to force addseg to one in
666 translate-i386.c. */
667 new_hflags |= HF_ADDSEG_MASK;
668 } else {
669 new_hflags |= ((env->segs[R_DS].base |
670 env->segs[R_ES].base |
671 env->segs[R_SS].base) != 0) <<
672 HF_ADDSEG_SHIFT;
673 }
674 }
675 env->hflags = (env->hflags &
676 ~(HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK | HF_ADDSEG_MASK)) |
677 new_hflags;
678 }
679
680 #ifdef DEBUG
681 if (loglevel & CPU_LOG_INT) {
682 fprintf(logfile, "kqemu: kqemu_cpu_exec: ret=0x%x\n", ret);
683 }
684 #endif
685 if (ret == KQEMU_RET_SYSCALL) {
686 /* syscall instruction */
687 return do_syscall(env, kenv);
688 } else
689 if ((ret & 0xff00) == KQEMU_RET_INT) {
690 env->exception_index = ret & 0xff;
691 env->error_code = 0;
692 env->exception_is_int = 1;
693 env->exception_next_eip = kenv->next_eip;
694 #ifdef DEBUG
695 if (loglevel & CPU_LOG_INT) {
696 fprintf(logfile, "kqemu: interrupt v=%02x:\n",
697 env->exception_index);
698 cpu_dump_state(env, logfile, fprintf, 0);
699 }
700 #endif
701 return 1;
702 } else if ((ret & 0xff00) == KQEMU_RET_EXCEPTION) {
703 env->exception_index = ret & 0xff;
704 env->error_code = kenv->error_code;
705 env->exception_is_int = 0;
706 env->exception_next_eip = 0;
707 #ifdef DEBUG
708 if (loglevel & CPU_LOG_INT) {
709 fprintf(logfile, "kqemu: exception v=%02x e=%04x:\n",
710 env->exception_index, env->error_code);
711 cpu_dump_state(env, logfile, fprintf, 0);
712 }
713 #endif
714 return 1;
715 } else if (ret == KQEMU_RET_INTR) {
716 #ifdef DEBUG
717 if (loglevel & CPU_LOG_INT) {
718 cpu_dump_state(env, logfile, fprintf, 0);
719 }
720 #endif
721 return 0;
722 } else if (ret == KQEMU_RET_SOFTMMU) {
723 #ifdef PROFILE
724 kqemu_record_pc(env->eip + env->segs[R_CS].base);
725 #endif
726 #ifdef DEBUG
727 if (loglevel & CPU_LOG_INT) {
728 cpu_dump_state(env, logfile, fprintf, 0);
729 }
730 #endif
731 return 2;
732 } else {
733 cpu_dump_state(env, stderr, fprintf, 0);
734 fprintf(stderr, "Unsupported return value: 0x%x\n", ret);
735 exit(1);
736 }
737 return 0;
738 }
739
740 #endif