]> git.proxmox.com Git - mirror_qemu.git/blob - kqemu.c
Solaris port (Ben Taylor)
[mirror_qemu.git] / kqemu.c
1 /*
2 * KQEMU support
3 *
4 * Copyright (c) 2005 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20 #include "config.h"
21 #ifdef _WIN32
22 #include <windows.h>
23 #include <winioctl.h>
24 #else
25 #include <sys/types.h>
26 #include <sys/mman.h>
27 #include <sys/ioctl.h>
28 #endif
29 #include <stdlib.h>
30 #include <stdio.h>
31 #include <stdarg.h>
32 #include <string.h>
33 #include <errno.h>
34 #include <unistd.h>
35 #include <inttypes.h>
36
37 #include "cpu.h"
38 #include "exec-all.h"
39
40 #ifdef USE_KQEMU
41
42 #define DEBUG
43 //#define PROFILE
44
45 #include <unistd.h>
46 #include <fcntl.h>
47 #include "kqemu.h"
48
49 /* compatibility stuff */
50 #ifndef KQEMU_RET_SYSCALL
51 #define KQEMU_RET_SYSCALL 0x0300 /* syscall insn */
52 #endif
53 #ifndef KQEMU_MAX_RAM_PAGES_TO_UPDATE
54 #define KQEMU_MAX_RAM_PAGES_TO_UPDATE 512
55 #define KQEMU_RAM_PAGES_UPDATE_ALL (KQEMU_MAX_RAM_PAGES_TO_UPDATE + 1)
56 #endif
57 #ifndef KQEMU_MAX_MODIFIED_RAM_PAGES
58 #define KQEMU_MAX_MODIFIED_RAM_PAGES 512
59 #endif
60
61 #ifdef _WIN32
62 #define KQEMU_DEVICE "\\\\.\\kqemu"
63 #else
64 #define KQEMU_DEVICE "/dev/kqemu"
65 #endif
66
67 #ifdef _WIN32
68 #define KQEMU_INVALID_FD INVALID_HANDLE_VALUE
69 HANDLE kqemu_fd = KQEMU_INVALID_FD;
70 #define kqemu_closefd(x) CloseHandle(x)
71 #else
72 #define KQEMU_INVALID_FD -1
73 int kqemu_fd = KQEMU_INVALID_FD;
74 #define kqemu_closefd(x) close(x)
75 #endif
76
77 /* 0 = not allowed
78 1 = user kqemu
79 2 = kernel kqemu
80 */
81 int kqemu_allowed = 1;
82 unsigned long *pages_to_flush;
83 unsigned int nb_pages_to_flush;
84 unsigned long *ram_pages_to_update;
85 unsigned int nb_ram_pages_to_update;
86 unsigned long *modified_ram_pages;
87 unsigned int nb_modified_ram_pages;
88 uint8_t *modified_ram_pages_table;
89 extern uint32_t **l1_phys_map;
90
91 #define cpuid(index, eax, ebx, ecx, edx) \
92 asm volatile ("cpuid" \
93 : "=a" (eax), "=b" (ebx), "=c" (ecx), "=d" (edx) \
94 : "0" (index))
95
96 #ifdef __x86_64__
97 static int is_cpuid_supported(void)
98 {
99 return 1;
100 }
101 #else
102 static int is_cpuid_supported(void)
103 {
104 int v0, v1;
105 asm volatile ("pushf\n"
106 "popl %0\n"
107 "movl %0, %1\n"
108 "xorl $0x00200000, %0\n"
109 "pushl %0\n"
110 "popf\n"
111 "pushf\n"
112 "popl %0\n"
113 : "=a" (v0), "=d" (v1)
114 :
115 : "cc");
116 return (v0 != v1);
117 }
118 #endif
119
120 static void kqemu_update_cpuid(CPUState *env)
121 {
122 int critical_features_mask, features;
123 uint32_t eax, ebx, ecx, edx;
124
125 /* the following features are kept identical on the host and
126 target cpus because they are important for user code. Strictly
127 speaking, only SSE really matters because the OS must support
128 it if the user code uses it. */
129 critical_features_mask =
130 CPUID_CMOV | CPUID_CX8 |
131 CPUID_FXSR | CPUID_MMX | CPUID_SSE |
132 CPUID_SSE2 | CPUID_SEP;
133 if (!is_cpuid_supported()) {
134 features = 0;
135 } else {
136 cpuid(1, eax, ebx, ecx, edx);
137 features = edx;
138 }
139 #ifdef __x86_64__
140 /* NOTE: on x86_64 CPUs, SYSENTER is not supported in
141 compatibility mode, so in order to have the best performances
142 it is better not to use it */
143 features &= ~CPUID_SEP;
144 #endif
145 env->cpuid_features = (env->cpuid_features & ~critical_features_mask) |
146 (features & critical_features_mask);
147 /* XXX: we could update more of the target CPUID state so that the
148 non accelerated code sees exactly the same CPU features as the
149 accelerated code */
150 }
151
152 int kqemu_init(CPUState *env)
153 {
154 struct kqemu_init init;
155 int ret, version;
156 #ifdef _WIN32
157 DWORD temp;
158 #endif
159
160 if (!kqemu_allowed)
161 return -1;
162
163 #ifdef _WIN32
164 kqemu_fd = CreateFile(KQEMU_DEVICE, GENERIC_WRITE | GENERIC_READ,
165 FILE_SHARE_READ | FILE_SHARE_WRITE,
166 NULL, OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL,
167 NULL);
168 #else
169 kqemu_fd = open(KQEMU_DEVICE, O_RDWR);
170 #endif
171 if (kqemu_fd == KQEMU_INVALID_FD) {
172 fprintf(stderr, "Could not open '%s' - QEMU acceleration layer not activated\n", KQEMU_DEVICE);
173 return -1;
174 }
175 version = 0;
176 #ifdef _WIN32
177 DeviceIoControl(kqemu_fd, KQEMU_GET_VERSION, NULL, 0,
178 &version, sizeof(version), &temp, NULL);
179 #else
180 ioctl(kqemu_fd, KQEMU_GET_VERSION, &version);
181 #endif
182 if (version != KQEMU_VERSION) {
183 fprintf(stderr, "Version mismatch between kqemu module and qemu (%08x %08x) - disabling kqemu use\n",
184 version, KQEMU_VERSION);
185 goto fail;
186 }
187
188 pages_to_flush = qemu_vmalloc(KQEMU_MAX_PAGES_TO_FLUSH *
189 sizeof(unsigned long));
190 if (!pages_to_flush)
191 goto fail;
192
193 ram_pages_to_update = qemu_vmalloc(KQEMU_MAX_RAM_PAGES_TO_UPDATE *
194 sizeof(unsigned long));
195 if (!ram_pages_to_update)
196 goto fail;
197
198 modified_ram_pages = qemu_vmalloc(KQEMU_MAX_MODIFIED_RAM_PAGES *
199 sizeof(unsigned long));
200 if (!modified_ram_pages)
201 goto fail;
202 modified_ram_pages_table = qemu_mallocz(phys_ram_size >> TARGET_PAGE_BITS);
203 if (!modified_ram_pages_table)
204 goto fail;
205
206 init.ram_base = phys_ram_base;
207 init.ram_size = phys_ram_size;
208 init.ram_dirty = phys_ram_dirty;
209 init.phys_to_ram_map = l1_phys_map;
210 init.pages_to_flush = pages_to_flush;
211 #if KQEMU_VERSION >= 0x010200
212 init.ram_pages_to_update = ram_pages_to_update;
213 #endif
214 #if KQEMU_VERSION >= 0x010300
215 init.modified_ram_pages = modified_ram_pages;
216 #endif
217 #ifdef _WIN32
218 ret = DeviceIoControl(kqemu_fd, KQEMU_INIT, &init, sizeof(init),
219 NULL, 0, &temp, NULL) == TRUE ? 0 : -1;
220 #else
221 ret = ioctl(kqemu_fd, KQEMU_INIT, &init);
222 #endif
223 if (ret < 0) {
224 fprintf(stderr, "Error %d while initializing QEMU acceleration layer - disabling it for now\n", ret);
225 fail:
226 kqemu_closefd(kqemu_fd);
227 kqemu_fd = KQEMU_INVALID_FD;
228 return -1;
229 }
230 kqemu_update_cpuid(env);
231 env->kqemu_enabled = kqemu_allowed;
232 nb_pages_to_flush = 0;
233 nb_ram_pages_to_update = 0;
234 return 0;
235 }
236
237 void kqemu_flush_page(CPUState *env, target_ulong addr)
238 {
239 #if defined(DEBUG)
240 if (loglevel & CPU_LOG_INT) {
241 fprintf(logfile, "kqemu_flush_page: addr=" TARGET_FMT_lx "\n", addr);
242 }
243 #endif
244 if (nb_pages_to_flush >= KQEMU_MAX_PAGES_TO_FLUSH)
245 nb_pages_to_flush = KQEMU_FLUSH_ALL;
246 else
247 pages_to_flush[nb_pages_to_flush++] = addr;
248 }
249
250 void kqemu_flush(CPUState *env, int global)
251 {
252 #ifdef DEBUG
253 if (loglevel & CPU_LOG_INT) {
254 fprintf(logfile, "kqemu_flush:\n");
255 }
256 #endif
257 nb_pages_to_flush = KQEMU_FLUSH_ALL;
258 }
259
260 void kqemu_set_notdirty(CPUState *env, ram_addr_t ram_addr)
261 {
262 #ifdef DEBUG
263 if (loglevel & CPU_LOG_INT) {
264 fprintf(logfile, "kqemu_set_notdirty: addr=%08lx\n", ram_addr);
265 }
266 #endif
267 /* we only track transitions to dirty state */
268 if (phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] != 0xff)
269 return;
270 if (nb_ram_pages_to_update >= KQEMU_MAX_RAM_PAGES_TO_UPDATE)
271 nb_ram_pages_to_update = KQEMU_RAM_PAGES_UPDATE_ALL;
272 else
273 ram_pages_to_update[nb_ram_pages_to_update++] = ram_addr;
274 }
275
276 static void kqemu_reset_modified_ram_pages(void)
277 {
278 int i;
279 unsigned long page_index;
280
281 for(i = 0; i < nb_modified_ram_pages; i++) {
282 page_index = modified_ram_pages[i] >> TARGET_PAGE_BITS;
283 modified_ram_pages_table[page_index] = 0;
284 }
285 nb_modified_ram_pages = 0;
286 }
287
288 void kqemu_modify_page(CPUState *env, ram_addr_t ram_addr)
289 {
290 unsigned long page_index;
291 int ret;
292 #ifdef _WIN32
293 DWORD temp;
294 #endif
295
296 page_index = ram_addr >> TARGET_PAGE_BITS;
297 if (!modified_ram_pages_table[page_index]) {
298 #if 0
299 printf("%d: modify_page=%08lx\n", nb_modified_ram_pages, ram_addr);
300 #endif
301 modified_ram_pages_table[page_index] = 1;
302 modified_ram_pages[nb_modified_ram_pages++] = ram_addr;
303 if (nb_modified_ram_pages >= KQEMU_MAX_MODIFIED_RAM_PAGES) {
304 /* flush */
305 #ifdef _WIN32
306 ret = DeviceIoControl(kqemu_fd, KQEMU_MODIFY_RAM_PAGES,
307 &nb_modified_ram_pages,
308 sizeof(nb_modified_ram_pages),
309 NULL, 0, &temp, NULL);
310 #else
311 ret = ioctl(kqemu_fd, KQEMU_MODIFY_RAM_PAGES,
312 &nb_modified_ram_pages);
313 #endif
314 kqemu_reset_modified_ram_pages();
315 }
316 }
317 }
318
319 struct fpstate {
320 uint16_t fpuc;
321 uint16_t dummy1;
322 uint16_t fpus;
323 uint16_t dummy2;
324 uint16_t fptag;
325 uint16_t dummy3;
326
327 uint32_t fpip;
328 uint32_t fpcs;
329 uint32_t fpoo;
330 uint32_t fpos;
331 uint8_t fpregs1[8 * 10];
332 };
333
334 struct fpxstate {
335 uint16_t fpuc;
336 uint16_t fpus;
337 uint16_t fptag;
338 uint16_t fop;
339 uint32_t fpuip;
340 uint16_t cs_sel;
341 uint16_t dummy0;
342 uint32_t fpudp;
343 uint16_t ds_sel;
344 uint16_t dummy1;
345 uint32_t mxcsr;
346 uint32_t mxcsr_mask;
347 uint8_t fpregs1[8 * 16];
348 uint8_t xmm_regs[16 * 16];
349 uint8_t dummy2[96];
350 };
351
352 static struct fpxstate fpx1 __attribute__((aligned(16)));
353
354 static void restore_native_fp_frstor(CPUState *env)
355 {
356 int fptag, i, j;
357 struct fpstate fp1, *fp = &fp1;
358
359 fp->fpuc = env->fpuc;
360 fp->fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
361 fptag = 0;
362 for (i=7; i>=0; i--) {
363 fptag <<= 2;
364 if (env->fptags[i]) {
365 fptag |= 3;
366 } else {
367 /* the FPU automatically computes it */
368 }
369 }
370 fp->fptag = fptag;
371 j = env->fpstt;
372 for(i = 0;i < 8; i++) {
373 memcpy(&fp->fpregs1[i * 10], &env->fpregs[j].d, 10);
374 j = (j + 1) & 7;
375 }
376 asm volatile ("frstor %0" : "=m" (*fp));
377 }
378
379 static void save_native_fp_fsave(CPUState *env)
380 {
381 int fptag, i, j;
382 uint16_t fpuc;
383 struct fpstate fp1, *fp = &fp1;
384
385 asm volatile ("fsave %0" : : "m" (*fp));
386 env->fpuc = fp->fpuc;
387 env->fpstt = (fp->fpus >> 11) & 7;
388 env->fpus = fp->fpus & ~0x3800;
389 fptag = fp->fptag;
390 for(i = 0;i < 8; i++) {
391 env->fptags[i] = ((fptag & 3) == 3);
392 fptag >>= 2;
393 }
394 j = env->fpstt;
395 for(i = 0;i < 8; i++) {
396 memcpy(&env->fpregs[j].d, &fp->fpregs1[i * 10], 10);
397 j = (j + 1) & 7;
398 }
399 /* we must restore the default rounding state */
400 fpuc = 0x037f | (env->fpuc & (3 << 10));
401 asm volatile("fldcw %0" : : "m" (fpuc));
402 }
403
404 static void restore_native_fp_fxrstor(CPUState *env)
405 {
406 struct fpxstate *fp = &fpx1;
407 int i, j, fptag;
408
409 fp->fpuc = env->fpuc;
410 fp->fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
411 fptag = 0;
412 for(i = 0; i < 8; i++)
413 fptag |= (env->fptags[i] << i);
414 fp->fptag = fptag ^ 0xff;
415
416 j = env->fpstt;
417 for(i = 0;i < 8; i++) {
418 memcpy(&fp->fpregs1[i * 16], &env->fpregs[j].d, 10);
419 j = (j + 1) & 7;
420 }
421 if (env->cpuid_features & CPUID_SSE) {
422 fp->mxcsr = env->mxcsr;
423 /* XXX: check if DAZ is not available */
424 fp->mxcsr_mask = 0xffff;
425 memcpy(fp->xmm_regs, env->xmm_regs, CPU_NB_REGS * 16);
426 }
427 asm volatile ("fxrstor %0" : "=m" (*fp));
428 }
429
430 static void save_native_fp_fxsave(CPUState *env)
431 {
432 struct fpxstate *fp = &fpx1;
433 int fptag, i, j;
434 uint16_t fpuc;
435
436 asm volatile ("fxsave %0" : : "m" (*fp));
437 env->fpuc = fp->fpuc;
438 env->fpstt = (fp->fpus >> 11) & 7;
439 env->fpus = fp->fpus & ~0x3800;
440 fptag = fp->fptag ^ 0xff;
441 for(i = 0;i < 8; i++) {
442 env->fptags[i] = (fptag >> i) & 1;
443 }
444 j = env->fpstt;
445 for(i = 0;i < 8; i++) {
446 memcpy(&env->fpregs[j].d, &fp->fpregs1[i * 16], 10);
447 j = (j + 1) & 7;
448 }
449 if (env->cpuid_features & CPUID_SSE) {
450 env->mxcsr = fp->mxcsr;
451 memcpy(env->xmm_regs, fp->xmm_regs, CPU_NB_REGS * 16);
452 }
453
454 /* we must restore the default rounding state */
455 asm volatile ("fninit");
456 fpuc = 0x037f | (env->fpuc & (3 << 10));
457 asm volatile("fldcw %0" : : "m" (fpuc));
458 }
459
460 static int do_syscall(CPUState *env,
461 struct kqemu_cpu_state *kenv)
462 {
463 int selector;
464
465 selector = (env->star >> 32) & 0xffff;
466 #ifdef __x86_64__
467 if (env->hflags & HF_LMA_MASK) {
468 env->regs[R_ECX] = kenv->next_eip;
469 env->regs[11] = env->eflags;
470
471 cpu_x86_set_cpl(env, 0);
472 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
473 0, 0xffffffff,
474 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
475 DESC_S_MASK |
476 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
477 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
478 0, 0xffffffff,
479 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
480 DESC_S_MASK |
481 DESC_W_MASK | DESC_A_MASK);
482 env->eflags &= ~env->fmask;
483 if (env->hflags & HF_CS64_MASK)
484 env->eip = env->lstar;
485 else
486 env->eip = env->cstar;
487 } else
488 #endif
489 {
490 env->regs[R_ECX] = (uint32_t)kenv->next_eip;
491
492 cpu_x86_set_cpl(env, 0);
493 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
494 0, 0xffffffff,
495 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
496 DESC_S_MASK |
497 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
498 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
499 0, 0xffffffff,
500 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
501 DESC_S_MASK |
502 DESC_W_MASK | DESC_A_MASK);
503 env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
504 env->eip = (uint32_t)env->star;
505 }
506 return 2;
507 }
508
509 #ifdef CONFIG_PROFILER
510
511 #define PC_REC_SIZE 1
512 #define PC_REC_HASH_BITS 16
513 #define PC_REC_HASH_SIZE (1 << PC_REC_HASH_BITS)
514
515 typedef struct PCRecord {
516 unsigned long pc;
517 int64_t count;
518 struct PCRecord *next;
519 } PCRecord;
520
521 static PCRecord *pc_rec_hash[PC_REC_HASH_SIZE];
522 static int nb_pc_records;
523
524 static void kqemu_record_pc(unsigned long pc)
525 {
526 unsigned long h;
527 PCRecord **pr, *r;
528
529 h = pc / PC_REC_SIZE;
530 h = h ^ (h >> PC_REC_HASH_BITS);
531 h &= (PC_REC_HASH_SIZE - 1);
532 pr = &pc_rec_hash[h];
533 for(;;) {
534 r = *pr;
535 if (r == NULL)
536 break;
537 if (r->pc == pc) {
538 r->count++;
539 return;
540 }
541 pr = &r->next;
542 }
543 r = malloc(sizeof(PCRecord));
544 r->count = 1;
545 r->pc = pc;
546 r->next = NULL;
547 *pr = r;
548 nb_pc_records++;
549 }
550
551 static int pc_rec_cmp(const void *p1, const void *p2)
552 {
553 PCRecord *r1 = *(PCRecord **)p1;
554 PCRecord *r2 = *(PCRecord **)p2;
555 if (r1->count < r2->count)
556 return 1;
557 else if (r1->count == r2->count)
558 return 0;
559 else
560 return -1;
561 }
562
563 static void kqemu_record_flush(void)
564 {
565 PCRecord *r, *r_next;
566 int h;
567
568 for(h = 0; h < PC_REC_HASH_SIZE; h++) {
569 for(r = pc_rec_hash[h]; r != NULL; r = r_next) {
570 r_next = r->next;
571 free(r);
572 }
573 pc_rec_hash[h] = NULL;
574 }
575 nb_pc_records = 0;
576 }
577
578 void kqemu_record_dump(void)
579 {
580 PCRecord **pr, *r;
581 int i, h;
582 FILE *f;
583 int64_t total, sum;
584
585 pr = malloc(sizeof(PCRecord *) * nb_pc_records);
586 i = 0;
587 total = 0;
588 for(h = 0; h < PC_REC_HASH_SIZE; h++) {
589 for(r = pc_rec_hash[h]; r != NULL; r = r->next) {
590 pr[i++] = r;
591 total += r->count;
592 }
593 }
594 qsort(pr, nb_pc_records, sizeof(PCRecord *), pc_rec_cmp);
595
596 f = fopen("/tmp/kqemu.stats", "w");
597 if (!f) {
598 perror("/tmp/kqemu.stats");
599 exit(1);
600 }
601 fprintf(f, "total: %lld\n", total);
602 sum = 0;
603 for(i = 0; i < nb_pc_records; i++) {
604 r = pr[i];
605 sum += r->count;
606 fprintf(f, "%08lx: %lld %0.2f%% %0.2f%%\n",
607 r->pc,
608 r->count,
609 (double)r->count / (double)total * 100.0,
610 (double)sum / (double)total * 100.0);
611 }
612 fclose(f);
613 free(pr);
614
615 kqemu_record_flush();
616 }
617 #endif
618
619 int kqemu_cpu_exec(CPUState *env)
620 {
621 struct kqemu_cpu_state kcpu_state, *kenv = &kcpu_state;
622 int ret, cpl, i;
623 #ifdef CONFIG_PROFILER
624 int64_t ti;
625 #endif
626
627 #ifdef _WIN32
628 DWORD temp;
629 #endif
630
631 #ifdef CONFIG_PROFILER
632 ti = profile_getclock();
633 #endif
634 #ifdef DEBUG
635 if (loglevel & CPU_LOG_INT) {
636 fprintf(logfile, "kqemu: cpu_exec: enter\n");
637 cpu_dump_state(env, logfile, fprintf, 0);
638 }
639 #endif
640 memcpy(kenv->regs, env->regs, sizeof(kenv->regs));
641 kenv->eip = env->eip;
642 kenv->eflags = env->eflags;
643 memcpy(&kenv->segs, &env->segs, sizeof(env->segs));
644 memcpy(&kenv->ldt, &env->ldt, sizeof(env->ldt));
645 memcpy(&kenv->tr, &env->tr, sizeof(env->tr));
646 memcpy(&kenv->gdt, &env->gdt, sizeof(env->gdt));
647 memcpy(&kenv->idt, &env->idt, sizeof(env->idt));
648 kenv->cr0 = env->cr[0];
649 kenv->cr2 = env->cr[2];
650 kenv->cr3 = env->cr[3];
651 kenv->cr4 = env->cr[4];
652 kenv->a20_mask = env->a20_mask;
653 #if KQEMU_VERSION >= 0x010100
654 kenv->efer = env->efer;
655 #endif
656 #if KQEMU_VERSION >= 0x010300
657 kenv->tsc_offset = 0;
658 kenv->star = env->star;
659 kenv->sysenter_cs = env->sysenter_cs;
660 kenv->sysenter_esp = env->sysenter_esp;
661 kenv->sysenter_eip = env->sysenter_eip;
662 #ifdef __x86_64__
663 kenv->lstar = env->lstar;
664 kenv->cstar = env->cstar;
665 kenv->fmask = env->fmask;
666 kenv->kernelgsbase = env->kernelgsbase;
667 #endif
668 #endif
669 if (env->dr[7] & 0xff) {
670 kenv->dr7 = env->dr[7];
671 kenv->dr0 = env->dr[0];
672 kenv->dr1 = env->dr[1];
673 kenv->dr2 = env->dr[2];
674 kenv->dr3 = env->dr[3];
675 } else {
676 kenv->dr7 = 0;
677 }
678 kenv->dr6 = env->dr[6];
679 cpl = (env->hflags & HF_CPL_MASK);
680 kenv->cpl = cpl;
681 kenv->nb_pages_to_flush = nb_pages_to_flush;
682 #if KQEMU_VERSION >= 0x010200
683 kenv->user_only = (env->kqemu_enabled == 1);
684 kenv->nb_ram_pages_to_update = nb_ram_pages_to_update;
685 #endif
686 nb_ram_pages_to_update = 0;
687
688 #if KQEMU_VERSION >= 0x010300
689 kenv->nb_modified_ram_pages = nb_modified_ram_pages;
690 #endif
691 kqemu_reset_modified_ram_pages();
692
693 if (env->cpuid_features & CPUID_FXSR)
694 restore_native_fp_fxrstor(env);
695 else
696 restore_native_fp_frstor(env);
697
698 #ifdef _WIN32
699 if (DeviceIoControl(kqemu_fd, KQEMU_EXEC,
700 kenv, sizeof(struct kqemu_cpu_state),
701 kenv, sizeof(struct kqemu_cpu_state),
702 &temp, NULL)) {
703 ret = kenv->retval;
704 } else {
705 ret = -1;
706 }
707 #else
708 #if KQEMU_VERSION >= 0x010100
709 ioctl(kqemu_fd, KQEMU_EXEC, kenv);
710 ret = kenv->retval;
711 #else
712 ret = ioctl(kqemu_fd, KQEMU_EXEC, kenv);
713 #endif
714 #endif
715 if (env->cpuid_features & CPUID_FXSR)
716 save_native_fp_fxsave(env);
717 else
718 save_native_fp_fsave(env);
719
720 memcpy(env->regs, kenv->regs, sizeof(env->regs));
721 env->eip = kenv->eip;
722 env->eflags = kenv->eflags;
723 memcpy(env->segs, kenv->segs, sizeof(env->segs));
724 cpu_x86_set_cpl(env, kenv->cpl);
725 memcpy(&env->ldt, &kenv->ldt, sizeof(env->ldt));
726 #if 0
727 /* no need to restore that */
728 memcpy(env->tr, kenv->tr, sizeof(env->tr));
729 memcpy(env->gdt, kenv->gdt, sizeof(env->gdt));
730 memcpy(env->idt, kenv->idt, sizeof(env->idt));
731 env->a20_mask = kenv->a20_mask;
732 #endif
733 env->cr[0] = kenv->cr0;
734 env->cr[4] = kenv->cr4;
735 env->cr[3] = kenv->cr3;
736 env->cr[2] = kenv->cr2;
737 env->dr[6] = kenv->dr6;
738 #if KQEMU_VERSION >= 0x010300
739 #ifdef __x86_64__
740 env->kernelgsbase = kenv->kernelgsbase;
741 #endif
742 #endif
743
744 /* flush pages as indicated by kqemu */
745 if (kenv->nb_pages_to_flush >= KQEMU_FLUSH_ALL) {
746 tlb_flush(env, 1);
747 } else {
748 for(i = 0; i < kenv->nb_pages_to_flush; i++) {
749 tlb_flush_page(env, pages_to_flush[i]);
750 }
751 }
752 nb_pages_to_flush = 0;
753
754 #ifdef CONFIG_PROFILER
755 kqemu_time += profile_getclock() - ti;
756 kqemu_exec_count++;
757 #endif
758
759 #if KQEMU_VERSION >= 0x010200
760 if (kenv->nb_ram_pages_to_update > 0) {
761 cpu_tlb_update_dirty(env);
762 }
763 #endif
764
765 #if KQEMU_VERSION >= 0x010300
766 if (kenv->nb_modified_ram_pages > 0) {
767 for(i = 0; i < kenv->nb_modified_ram_pages; i++) {
768 unsigned long addr;
769 addr = modified_ram_pages[i];
770 tb_invalidate_phys_page_range(addr, addr + TARGET_PAGE_SIZE, 0);
771 }
772 }
773 #endif
774
775 /* restore the hidden flags */
776 {
777 unsigned int new_hflags;
778 #ifdef TARGET_X86_64
779 if ((env->hflags & HF_LMA_MASK) &&
780 (env->segs[R_CS].flags & DESC_L_MASK)) {
781 /* long mode */
782 new_hflags = HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK;
783 } else
784 #endif
785 {
786 /* legacy / compatibility case */
787 new_hflags = (env->segs[R_CS].flags & DESC_B_MASK)
788 >> (DESC_B_SHIFT - HF_CS32_SHIFT);
789 new_hflags |= (env->segs[R_SS].flags & DESC_B_MASK)
790 >> (DESC_B_SHIFT - HF_SS32_SHIFT);
791 if (!(env->cr[0] & CR0_PE_MASK) ||
792 (env->eflags & VM_MASK) ||
793 !(env->hflags & HF_CS32_MASK)) {
794 /* XXX: try to avoid this test. The problem comes from the
795 fact that is real mode or vm86 mode we only modify the
796 'base' and 'selector' fields of the segment cache to go
797 faster. A solution may be to force addseg to one in
798 translate-i386.c. */
799 new_hflags |= HF_ADDSEG_MASK;
800 } else {
801 new_hflags |= ((env->segs[R_DS].base |
802 env->segs[R_ES].base |
803 env->segs[R_SS].base) != 0) <<
804 HF_ADDSEG_SHIFT;
805 }
806 }
807 env->hflags = (env->hflags &
808 ~(HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK | HF_ADDSEG_MASK)) |
809 new_hflags;
810 }
811 /* update FPU flags */
812 env->hflags = (env->hflags & ~(HF_MP_MASK | HF_EM_MASK | HF_TS_MASK)) |
813 ((env->cr[0] << (HF_MP_SHIFT - 1)) & (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK));
814 if (env->cr[4] & CR4_OSFXSR_MASK)
815 env->hflags |= HF_OSFXSR_MASK;
816 else
817 env->hflags &= ~HF_OSFXSR_MASK;
818
819 #ifdef DEBUG
820 if (loglevel & CPU_LOG_INT) {
821 fprintf(logfile, "kqemu: kqemu_cpu_exec: ret=0x%x\n", ret);
822 }
823 #endif
824 if (ret == KQEMU_RET_SYSCALL) {
825 /* syscall instruction */
826 return do_syscall(env, kenv);
827 } else
828 if ((ret & 0xff00) == KQEMU_RET_INT) {
829 env->exception_index = ret & 0xff;
830 env->error_code = 0;
831 env->exception_is_int = 1;
832 env->exception_next_eip = kenv->next_eip;
833 #ifdef CONFIG_PROFILER
834 kqemu_ret_int_count++;
835 #endif
836 #ifdef DEBUG
837 if (loglevel & CPU_LOG_INT) {
838 fprintf(logfile, "kqemu: interrupt v=%02x:\n",
839 env->exception_index);
840 cpu_dump_state(env, logfile, fprintf, 0);
841 }
842 #endif
843 return 1;
844 } else if ((ret & 0xff00) == KQEMU_RET_EXCEPTION) {
845 env->exception_index = ret & 0xff;
846 env->error_code = kenv->error_code;
847 env->exception_is_int = 0;
848 env->exception_next_eip = 0;
849 #ifdef CONFIG_PROFILER
850 kqemu_ret_excp_count++;
851 #endif
852 #ifdef DEBUG
853 if (loglevel & CPU_LOG_INT) {
854 fprintf(logfile, "kqemu: exception v=%02x e=%04x:\n",
855 env->exception_index, env->error_code);
856 cpu_dump_state(env, logfile, fprintf, 0);
857 }
858 #endif
859 return 1;
860 } else if (ret == KQEMU_RET_INTR) {
861 #ifdef CONFIG_PROFILER
862 kqemu_ret_intr_count++;
863 #endif
864 #ifdef DEBUG
865 if (loglevel & CPU_LOG_INT) {
866 cpu_dump_state(env, logfile, fprintf, 0);
867 }
868 #endif
869 return 0;
870 } else if (ret == KQEMU_RET_SOFTMMU) {
871 #ifdef CONFIG_PROFILER
872 {
873 unsigned long pc = env->eip + env->segs[R_CS].base;
874 kqemu_record_pc(pc);
875 }
876 #endif
877 #ifdef DEBUG
878 if (loglevel & CPU_LOG_INT) {
879 cpu_dump_state(env, logfile, fprintf, 0);
880 }
881 #endif
882 return 2;
883 } else {
884 cpu_dump_state(env, stderr, fprintf, 0);
885 fprintf(stderr, "Unsupported return value: 0x%x\n", ret);
886 exit(1);
887 }
888 return 0;
889 }
890
891 void kqemu_cpu_interrupt(CPUState *env)
892 {
893 #if defined(_WIN32) && KQEMU_VERSION >= 0x010101
894 /* cancelling the I/O request causes KQEMU to finish executing the
895 current block and successfully returning. */
896 CancelIo(kqemu_fd);
897 #endif
898 }
899
900 #endif