]> git.proxmox.com Git - qemu.git/blame - kqemu.c
Optimize MIPS timer read/write functions
[qemu.git] / kqemu.c
CommitLineData
9df217a3
FB
1/*
2 * KQEMU support
5fafdf24 3 *
9df217a3
FB
4 * Copyright (c) 2005 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20#include "config.h"
21#ifdef _WIN32
4fddf62a 22#define WIN32_LEAN_AND_MEAN
9df217a3 23#include <windows.h>
6e4255f6 24#include <winioctl.h>
9df217a3
FB
25#else
26#include <sys/types.h>
27#include <sys/mman.h>
6e4255f6 28#include <sys/ioctl.h>
9df217a3 29#endif
605686cd 30#ifdef HOST_SOLARIS
aafd8139 31#include <sys/ioccom.h>
605686cd 32#endif
9df217a3
FB
33#include <stdlib.h>
34#include <stdio.h>
35#include <stdarg.h>
36#include <string.h>
37#include <errno.h>
38#include <unistd.h>
39#include <inttypes.h>
40
41#include "cpu.h"
42#include "exec-all.h"
43
44#ifdef USE_KQEMU
45
46#define DEBUG
aa062973 47//#define PROFILE
9df217a3
FB
48
49#include <unistd.h>
50#include <fcntl.h>
b88a3832 51#include "kqemu.h"
9df217a3 52
c28e951f
FB
53/* compatibility stuff */
54#ifndef KQEMU_RET_SYSCALL
55#define KQEMU_RET_SYSCALL 0x0300 /* syscall insn */
56#endif
aa062973
FB
57#ifndef KQEMU_MAX_RAM_PAGES_TO_UPDATE
58#define KQEMU_MAX_RAM_PAGES_TO_UPDATE 512
59#define KQEMU_RAM_PAGES_UPDATE_ALL (KQEMU_MAX_RAM_PAGES_TO_UPDATE + 1)
60#endif
f32fc648
FB
61#ifndef KQEMU_MAX_MODIFIED_RAM_PAGES
62#define KQEMU_MAX_MODIFIED_RAM_PAGES 512
63#endif
c28e951f 64
6e4255f6
FB
65#ifdef _WIN32
66#define KQEMU_DEVICE "\\\\.\\kqemu"
67#else
9df217a3 68#define KQEMU_DEVICE "/dev/kqemu"
6e4255f6
FB
69#endif
70
71#ifdef _WIN32
72#define KQEMU_INVALID_FD INVALID_HANDLE_VALUE
73HANDLE kqemu_fd = KQEMU_INVALID_FD;
74#define kqemu_closefd(x) CloseHandle(x)
75#else
76#define KQEMU_INVALID_FD -1
77int kqemu_fd = KQEMU_INVALID_FD;
78#define kqemu_closefd(x) close(x)
79#endif
9df217a3 80
f32fc648
FB
81/* 0 = not allowed
82 1 = user kqemu
83 2 = kernel kqemu
84*/
9df217a3 85int kqemu_allowed = 1;
9df217a3
FB
86unsigned long *pages_to_flush;
87unsigned int nb_pages_to_flush;
aa062973
FB
88unsigned long *ram_pages_to_update;
89unsigned int nb_ram_pages_to_update;
f32fc648
FB
90unsigned long *modified_ram_pages;
91unsigned int nb_modified_ram_pages;
92uint8_t *modified_ram_pages_table;
9df217a3
FB
93extern uint32_t **l1_phys_map;
94
95#define cpuid(index, eax, ebx, ecx, edx) \
96 asm volatile ("cpuid" \
97 : "=a" (eax), "=b" (ebx), "=c" (ecx), "=d" (edx) \
98 : "0" (index))
99
c28e951f
FB
100#ifdef __x86_64__
101static int is_cpuid_supported(void)
102{
103 return 1;
104}
105#else
9df217a3
FB
106static int is_cpuid_supported(void)
107{
108 int v0, v1;
109 asm volatile ("pushf\n"
110 "popl %0\n"
111 "movl %0, %1\n"
112 "xorl $0x00200000, %0\n"
113 "pushl %0\n"
114 "popf\n"
115 "pushf\n"
116 "popl %0\n"
117 : "=a" (v0), "=d" (v1)
118 :
119 : "cc");
120 return (v0 != v1);
121}
c28e951f 122#endif
9df217a3
FB
123
124static void kqemu_update_cpuid(CPUState *env)
125{
0de6bb73 126 int critical_features_mask, features, ext_features, ext_features_mask;
9df217a3
FB
127 uint32_t eax, ebx, ecx, edx;
128
129 /* the following features are kept identical on the host and
130 target cpus because they are important for user code. Strictly
131 speaking, only SSE really matters because the OS must support
132 it if the user code uses it. */
5fafdf24
TS
133 critical_features_mask =
134 CPUID_CMOV | CPUID_CX8 |
135 CPUID_FXSR | CPUID_MMX | CPUID_SSE |
ca0d1734 136 CPUID_SSE2 | CPUID_SEP;
0de6bb73 137 ext_features_mask = CPUID_EXT_SSE3 | CPUID_EXT_MONITOR;
9df217a3
FB
138 if (!is_cpuid_supported()) {
139 features = 0;
0de6bb73 140 ext_features = 0;
9df217a3
FB
141 } else {
142 cpuid(1, eax, ebx, ecx, edx);
143 features = edx;
0de6bb73 144 ext_features = ecx;
9df217a3 145 }
ca0d1734
FB
146#ifdef __x86_64__
147 /* NOTE: on x86_64 CPUs, SYSENTER is not supported in
148 compatibility mode, so in order to have the best performances
149 it is better not to use it */
150 features &= ~CPUID_SEP;
151#endif
9df217a3
FB
152 env->cpuid_features = (env->cpuid_features & ~critical_features_mask) |
153 (features & critical_features_mask);
0de6bb73
FB
154 env->cpuid_ext_features = (env->cpuid_ext_features & ~ext_features_mask) |
155 (ext_features & ext_features_mask);
9df217a3
FB
156 /* XXX: we could update more of the target CPUID state so that the
157 non accelerated code sees exactly the same CPU features as the
158 accelerated code */
159}
160
161int kqemu_init(CPUState *env)
162{
163 struct kqemu_init init;
164 int ret, version;
6e4255f6
FB
165#ifdef _WIN32
166 DWORD temp;
167#endif
9df217a3
FB
168
169 if (!kqemu_allowed)
170 return -1;
171
6e4255f6
FB
172#ifdef _WIN32
173 kqemu_fd = CreateFile(KQEMU_DEVICE, GENERIC_WRITE | GENERIC_READ,
174 FILE_SHARE_READ | FILE_SHARE_WRITE,
175 NULL, OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL,
176 NULL);
177#else
9df217a3 178 kqemu_fd = open(KQEMU_DEVICE, O_RDWR);
6e4255f6
FB
179#endif
180 if (kqemu_fd == KQEMU_INVALID_FD) {
99c19686
TS
181 fprintf(stderr, "Could not open '%s' - QEMU acceleration layer not activated: %s\n",
182 KQEMU_DEVICE, strerror(errno));
9df217a3
FB
183 return -1;
184 }
185 version = 0;
6e4255f6
FB
186#ifdef _WIN32
187 DeviceIoControl(kqemu_fd, KQEMU_GET_VERSION, NULL, 0,
188 &version, sizeof(version), &temp, NULL);
189#else
9df217a3 190 ioctl(kqemu_fd, KQEMU_GET_VERSION, &version);
6e4255f6 191#endif
9df217a3
FB
192 if (version != KQEMU_VERSION) {
193 fprintf(stderr, "Version mismatch between kqemu module and qemu (%08x %08x) - disabling kqemu use\n",
194 version, KQEMU_VERSION);
195 goto fail;
196 }
197
5fafdf24 198 pages_to_flush = qemu_vmalloc(KQEMU_MAX_PAGES_TO_FLUSH *
9df217a3
FB
199 sizeof(unsigned long));
200 if (!pages_to_flush)
201 goto fail;
202
5fafdf24 203 ram_pages_to_update = qemu_vmalloc(KQEMU_MAX_RAM_PAGES_TO_UPDATE *
aa062973
FB
204 sizeof(unsigned long));
205 if (!ram_pages_to_update)
206 goto fail;
207
5fafdf24 208 modified_ram_pages = qemu_vmalloc(KQEMU_MAX_MODIFIED_RAM_PAGES *
f32fc648
FB
209 sizeof(unsigned long));
210 if (!modified_ram_pages)
211 goto fail;
212 modified_ram_pages_table = qemu_mallocz(phys_ram_size >> TARGET_PAGE_BITS);
213 if (!modified_ram_pages_table)
214 goto fail;
215
9df217a3
FB
216 init.ram_base = phys_ram_base;
217 init.ram_size = phys_ram_size;
218 init.ram_dirty = phys_ram_dirty;
219 init.phys_to_ram_map = l1_phys_map;
220 init.pages_to_flush = pages_to_flush;
aa062973
FB
221#if KQEMU_VERSION >= 0x010200
222 init.ram_pages_to_update = ram_pages_to_update;
223#endif
f32fc648
FB
224#if KQEMU_VERSION >= 0x010300
225 init.modified_ram_pages = modified_ram_pages;
226#endif
6e4255f6
FB
227#ifdef _WIN32
228 ret = DeviceIoControl(kqemu_fd, KQEMU_INIT, &init, sizeof(init),
229 NULL, 0, &temp, NULL) == TRUE ? 0 : -1;
230#else
9df217a3 231 ret = ioctl(kqemu_fd, KQEMU_INIT, &init);
6e4255f6 232#endif
9df217a3
FB
233 if (ret < 0) {
234 fprintf(stderr, "Error %d while initializing QEMU acceleration layer - disabling it for now\n", ret);
235 fail:
6e4255f6
FB
236 kqemu_closefd(kqemu_fd);
237 kqemu_fd = KQEMU_INVALID_FD;
9df217a3
FB
238 return -1;
239 }
240 kqemu_update_cpuid(env);
f32fc648 241 env->kqemu_enabled = kqemu_allowed;
9df217a3 242 nb_pages_to_flush = 0;
aa062973 243 nb_ram_pages_to_update = 0;
9df217a3
FB
244 return 0;
245}
246
247void kqemu_flush_page(CPUState *env, target_ulong addr)
248{
f32fc648 249#if defined(DEBUG)
9df217a3
FB
250 if (loglevel & CPU_LOG_INT) {
251 fprintf(logfile, "kqemu_flush_page: addr=" TARGET_FMT_lx "\n", addr);
252 }
253#endif
254 if (nb_pages_to_flush >= KQEMU_MAX_PAGES_TO_FLUSH)
255 nb_pages_to_flush = KQEMU_FLUSH_ALL;
256 else
257 pages_to_flush[nb_pages_to_flush++] = addr;
258}
259
260void kqemu_flush(CPUState *env, int global)
261{
262#ifdef DEBUG
263 if (loglevel & CPU_LOG_INT) {
264 fprintf(logfile, "kqemu_flush:\n");
265 }
266#endif
267 nb_pages_to_flush = KQEMU_FLUSH_ALL;
268}
269
aa062973
FB
270void kqemu_set_notdirty(CPUState *env, ram_addr_t ram_addr)
271{
272#ifdef DEBUG
273 if (loglevel & CPU_LOG_INT) {
274 fprintf(logfile, "kqemu_set_notdirty: addr=%08lx\n", ram_addr);
275 }
276#endif
fc8dc060
FB
277 /* we only track transitions to dirty state */
278 if (phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] != 0xff)
279 return;
aa062973
FB
280 if (nb_ram_pages_to_update >= KQEMU_MAX_RAM_PAGES_TO_UPDATE)
281 nb_ram_pages_to_update = KQEMU_RAM_PAGES_UPDATE_ALL;
282 else
283 ram_pages_to_update[nb_ram_pages_to_update++] = ram_addr;
284}
285
f32fc648
FB
286static void kqemu_reset_modified_ram_pages(void)
287{
288 int i;
289 unsigned long page_index;
3b46e624 290
f32fc648
FB
291 for(i = 0; i < nb_modified_ram_pages; i++) {
292 page_index = modified_ram_pages[i] >> TARGET_PAGE_BITS;
293 modified_ram_pages_table[page_index] = 0;
294 }
295 nb_modified_ram_pages = 0;
296}
297
298void kqemu_modify_page(CPUState *env, ram_addr_t ram_addr)
299{
300 unsigned long page_index;
301 int ret;
302#ifdef _WIN32
303 DWORD temp;
304#endif
305
306 page_index = ram_addr >> TARGET_PAGE_BITS;
307 if (!modified_ram_pages_table[page_index]) {
308#if 0
309 printf("%d: modify_page=%08lx\n", nb_modified_ram_pages, ram_addr);
310#endif
311 modified_ram_pages_table[page_index] = 1;
312 modified_ram_pages[nb_modified_ram_pages++] = ram_addr;
313 if (nb_modified_ram_pages >= KQEMU_MAX_MODIFIED_RAM_PAGES) {
314 /* flush */
315#ifdef _WIN32
5fafdf24
TS
316 ret = DeviceIoControl(kqemu_fd, KQEMU_MODIFY_RAM_PAGES,
317 &nb_modified_ram_pages,
f32fc648
FB
318 sizeof(nb_modified_ram_pages),
319 NULL, 0, &temp, NULL);
320#else
5fafdf24 321 ret = ioctl(kqemu_fd, KQEMU_MODIFY_RAM_PAGES,
f32fc648
FB
322 &nb_modified_ram_pages);
323#endif
324 kqemu_reset_modified_ram_pages();
325 }
326 }
327}
328
9df217a3
FB
329struct fpstate {
330 uint16_t fpuc;
331 uint16_t dummy1;
332 uint16_t fpus;
333 uint16_t dummy2;
334 uint16_t fptag;
335 uint16_t dummy3;
336
337 uint32_t fpip;
338 uint32_t fpcs;
339 uint32_t fpoo;
340 uint32_t fpos;
341 uint8_t fpregs1[8 * 10];
342};
343
344struct fpxstate {
345 uint16_t fpuc;
346 uint16_t fpus;
347 uint16_t fptag;
348 uint16_t fop;
349 uint32_t fpuip;
350 uint16_t cs_sel;
351 uint16_t dummy0;
352 uint32_t fpudp;
353 uint16_t ds_sel;
354 uint16_t dummy1;
355 uint32_t mxcsr;
356 uint32_t mxcsr_mask;
357 uint8_t fpregs1[8 * 16];
c28e951f
FB
358 uint8_t xmm_regs[16 * 16];
359 uint8_t dummy2[96];
9df217a3
FB
360};
361
362static struct fpxstate fpx1 __attribute__((aligned(16)));
363
364static void restore_native_fp_frstor(CPUState *env)
365{
366 int fptag, i, j;
367 struct fpstate fp1, *fp = &fp1;
3b46e624 368
9df217a3
FB
369 fp->fpuc = env->fpuc;
370 fp->fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
371 fptag = 0;
372 for (i=7; i>=0; i--) {
373 fptag <<= 2;
374 if (env->fptags[i]) {
375 fptag |= 3;
376 } else {
377 /* the FPU automatically computes it */
378 }
379 }
380 fp->fptag = fptag;
381 j = env->fpstt;
382 for(i = 0;i < 8; i++) {
383 memcpy(&fp->fpregs1[i * 10], &env->fpregs[j].d, 10);
384 j = (j + 1) & 7;
385 }
386 asm volatile ("frstor %0" : "=m" (*fp));
387}
5fafdf24 388
9df217a3
FB
389static void save_native_fp_fsave(CPUState *env)
390{
391 int fptag, i, j;
392 uint16_t fpuc;
393 struct fpstate fp1, *fp = &fp1;
394
395 asm volatile ("fsave %0" : : "m" (*fp));
396 env->fpuc = fp->fpuc;
397 env->fpstt = (fp->fpus >> 11) & 7;
398 env->fpus = fp->fpus & ~0x3800;
399 fptag = fp->fptag;
400 for(i = 0;i < 8; i++) {
401 env->fptags[i] = ((fptag & 3) == 3);
402 fptag >>= 2;
403 }
404 j = env->fpstt;
405 for(i = 0;i < 8; i++) {
406 memcpy(&env->fpregs[j].d, &fp->fpregs1[i * 10], 10);
407 j = (j + 1) & 7;
408 }
409 /* we must restore the default rounding state */
410 fpuc = 0x037f | (env->fpuc & (3 << 10));
411 asm volatile("fldcw %0" : : "m" (fpuc));
412}
413
414static void restore_native_fp_fxrstor(CPUState *env)
415{
416 struct fpxstate *fp = &fpx1;
417 int i, j, fptag;
418
419 fp->fpuc = env->fpuc;
420 fp->fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
421 fptag = 0;
422 for(i = 0; i < 8; i++)
423 fptag |= (env->fptags[i] << i);
424 fp->fptag = fptag ^ 0xff;
425
426 j = env->fpstt;
427 for(i = 0;i < 8; i++) {
428 memcpy(&fp->fpregs1[i * 16], &env->fpregs[j].d, 10);
429 j = (j + 1) & 7;
430 }
431 if (env->cpuid_features & CPUID_SSE) {
432 fp->mxcsr = env->mxcsr;
433 /* XXX: check if DAZ is not available */
434 fp->mxcsr_mask = 0xffff;
c28e951f 435 memcpy(fp->xmm_regs, env->xmm_regs, CPU_NB_REGS * 16);
9df217a3
FB
436 }
437 asm volatile ("fxrstor %0" : "=m" (*fp));
438}
439
440static void save_native_fp_fxsave(CPUState *env)
441{
442 struct fpxstate *fp = &fpx1;
443 int fptag, i, j;
444 uint16_t fpuc;
445
446 asm volatile ("fxsave %0" : : "m" (*fp));
447 env->fpuc = fp->fpuc;
448 env->fpstt = (fp->fpus >> 11) & 7;
449 env->fpus = fp->fpus & ~0x3800;
450 fptag = fp->fptag ^ 0xff;
451 for(i = 0;i < 8; i++) {
452 env->fptags[i] = (fptag >> i) & 1;
453 }
454 j = env->fpstt;
455 for(i = 0;i < 8; i++) {
456 memcpy(&env->fpregs[j].d, &fp->fpregs1[i * 16], 10);
457 j = (j + 1) & 7;
458 }
459 if (env->cpuid_features & CPUID_SSE) {
460 env->mxcsr = fp->mxcsr;
c28e951f 461 memcpy(env->xmm_regs, fp->xmm_regs, CPU_NB_REGS * 16);
9df217a3
FB
462 }
463
464 /* we must restore the default rounding state */
465 asm volatile ("fninit");
466 fpuc = 0x037f | (env->fpuc & (3 << 10));
467 asm volatile("fldcw %0" : : "m" (fpuc));
468}
469
c28e951f
FB
470static int do_syscall(CPUState *env,
471 struct kqemu_cpu_state *kenv)
472{
473 int selector;
3b46e624 474
c28e951f
FB
475 selector = (env->star >> 32) & 0xffff;
476#ifdef __x86_64__
477 if (env->hflags & HF_LMA_MASK) {
93eac243
FB
478 int code64;
479
c28e951f
FB
480 env->regs[R_ECX] = kenv->next_eip;
481 env->regs[11] = env->eflags;
482
93eac243
FB
483 code64 = env->hflags & HF_CS64_MASK;
484
c28e951f 485 cpu_x86_set_cpl(env, 0);
5fafdf24
TS
486 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
487 0, 0xffffffff,
c4e27dd4 488 DESC_G_MASK | DESC_P_MASK |
c28e951f
FB
489 DESC_S_MASK |
490 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
5fafdf24 491 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
c28e951f
FB
492 0, 0xffffffff,
493 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
494 DESC_S_MASK |
495 DESC_W_MASK | DESC_A_MASK);
496 env->eflags &= ~env->fmask;
93eac243 497 if (code64)
c28e951f
FB
498 env->eip = env->lstar;
499 else
500 env->eip = env->cstar;
5fafdf24 501 } else
c28e951f
FB
502#endif
503 {
504 env->regs[R_ECX] = (uint32_t)kenv->next_eip;
3b46e624 505
c28e951f 506 cpu_x86_set_cpl(env, 0);
5fafdf24
TS
507 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
508 0, 0xffffffff,
c28e951f
FB
509 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
510 DESC_S_MASK |
511 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
5fafdf24 512 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
c28e951f
FB
513 0, 0xffffffff,
514 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
515 DESC_S_MASK |
516 DESC_W_MASK | DESC_A_MASK);
517 env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
518 env->eip = (uint32_t)env->star;
519 }
520 return 2;
521}
522
f32fc648 523#ifdef CONFIG_PROFILER
aa062973
FB
524
525#define PC_REC_SIZE 1
526#define PC_REC_HASH_BITS 16
527#define PC_REC_HASH_SIZE (1 << PC_REC_HASH_BITS)
528
529typedef struct PCRecord {
530 unsigned long pc;
531 int64_t count;
532 struct PCRecord *next;
533} PCRecord;
534
f32fc648
FB
535static PCRecord *pc_rec_hash[PC_REC_HASH_SIZE];
536static int nb_pc_records;
aa062973 537
f32fc648 538static void kqemu_record_pc(unsigned long pc)
aa062973
FB
539{
540 unsigned long h;
541 PCRecord **pr, *r;
542
543 h = pc / PC_REC_SIZE;
544 h = h ^ (h >> PC_REC_HASH_BITS);
545 h &= (PC_REC_HASH_SIZE - 1);
546 pr = &pc_rec_hash[h];
547 for(;;) {
548 r = *pr;
549 if (r == NULL)
550 break;
551 if (r->pc == pc) {
552 r->count++;
553 return;
554 }
555 pr = &r->next;
556 }
557 r = malloc(sizeof(PCRecord));
558 r->count = 1;
559 r->pc = pc;
560 r->next = NULL;
561 *pr = r;
562 nb_pc_records++;
563}
564
f32fc648 565static int pc_rec_cmp(const void *p1, const void *p2)
aa062973
FB
566{
567 PCRecord *r1 = *(PCRecord **)p1;
568 PCRecord *r2 = *(PCRecord **)p2;
569 if (r1->count < r2->count)
570 return 1;
571 else if (r1->count == r2->count)
572 return 0;
573 else
574 return -1;
575}
576
f32fc648
FB
577static void kqemu_record_flush(void)
578{
579 PCRecord *r, *r_next;
580 int h;
581
582 for(h = 0; h < PC_REC_HASH_SIZE; h++) {
583 for(r = pc_rec_hash[h]; r != NULL; r = r_next) {
584 r_next = r->next;
585 free(r);
586 }
587 pc_rec_hash[h] = NULL;
588 }
589 nb_pc_records = 0;
590}
591
aa062973
FB
592void kqemu_record_dump(void)
593{
594 PCRecord **pr, *r;
595 int i, h;
596 FILE *f;
597 int64_t total, sum;
598
599 pr = malloc(sizeof(PCRecord *) * nb_pc_records);
600 i = 0;
601 total = 0;
602 for(h = 0; h < PC_REC_HASH_SIZE; h++) {
603 for(r = pc_rec_hash[h]; r != NULL; r = r->next) {
604 pr[i++] = r;
605 total += r->count;
606 }
607 }
608 qsort(pr, nb_pc_records, sizeof(PCRecord *), pc_rec_cmp);
3b46e624 609
aa062973
FB
610 f = fopen("/tmp/kqemu.stats", "w");
611 if (!f) {
612 perror("/tmp/kqemu.stats");
613 exit(1);
614 }
26a76461 615 fprintf(f, "total: %" PRId64 "\n", total);
aa062973
FB
616 sum = 0;
617 for(i = 0; i < nb_pc_records; i++) {
618 r = pr[i];
619 sum += r->count;
5fafdf24
TS
620 fprintf(f, "%08lx: %" PRId64 " %0.2f%% %0.2f%%\n",
621 r->pc,
622 r->count,
aa062973
FB
623 (double)r->count / (double)total * 100.0,
624 (double)sum / (double)total * 100.0);
625 }
626 fclose(f);
627 free(pr);
f32fc648
FB
628
629 kqemu_record_flush();
aa062973
FB
630}
631#endif
632
9df217a3
FB
633int kqemu_cpu_exec(CPUState *env)
634{
635 struct kqemu_cpu_state kcpu_state, *kenv = &kcpu_state;
f32fc648
FB
636 int ret, cpl, i;
637#ifdef CONFIG_PROFILER
638 int64_t ti;
639#endif
640
6e4255f6
FB
641#ifdef _WIN32
642 DWORD temp;
643#endif
9df217a3 644
f32fc648
FB
645#ifdef CONFIG_PROFILER
646 ti = profile_getclock();
647#endif
9df217a3
FB
648#ifdef DEBUG
649 if (loglevel & CPU_LOG_INT) {
650 fprintf(logfile, "kqemu: cpu_exec: enter\n");
651 cpu_dump_state(env, logfile, fprintf, 0);
652 }
653#endif
654 memcpy(kenv->regs, env->regs, sizeof(kenv->regs));
655 kenv->eip = env->eip;
656 kenv->eflags = env->eflags;
657 memcpy(&kenv->segs, &env->segs, sizeof(env->segs));
658 memcpy(&kenv->ldt, &env->ldt, sizeof(env->ldt));
659 memcpy(&kenv->tr, &env->tr, sizeof(env->tr));
660 memcpy(&kenv->gdt, &env->gdt, sizeof(env->gdt));
661 memcpy(&kenv->idt, &env->idt, sizeof(env->idt));
662 kenv->cr0 = env->cr[0];
663 kenv->cr2 = env->cr[2];
664 kenv->cr3 = env->cr[3];
665 kenv->cr4 = env->cr[4];
666 kenv->a20_mask = env->a20_mask;
c45b3c0e 667#if KQEMU_VERSION >= 0x010100
c28e951f 668 kenv->efer = env->efer;
f32fc648
FB
669#endif
670#if KQEMU_VERSION >= 0x010300
671 kenv->tsc_offset = 0;
672 kenv->star = env->star;
673 kenv->sysenter_cs = env->sysenter_cs;
674 kenv->sysenter_esp = env->sysenter_esp;
675 kenv->sysenter_eip = env->sysenter_eip;
676#ifdef __x86_64__
677 kenv->lstar = env->lstar;
678 kenv->cstar = env->cstar;
679 kenv->fmask = env->fmask;
680 kenv->kernelgsbase = env->kernelgsbase;
681#endif
c28e951f 682#endif
9df217a3
FB
683 if (env->dr[7] & 0xff) {
684 kenv->dr7 = env->dr[7];
685 kenv->dr0 = env->dr[0];
686 kenv->dr1 = env->dr[1];
687 kenv->dr2 = env->dr[2];
688 kenv->dr3 = env->dr[3];
689 } else {
690 kenv->dr7 = 0;
691 }
692 kenv->dr6 = env->dr[6];
f32fc648
FB
693 cpl = (env->hflags & HF_CPL_MASK);
694 kenv->cpl = cpl;
9df217a3 695 kenv->nb_pages_to_flush = nb_pages_to_flush;
aa062973 696#if KQEMU_VERSION >= 0x010200
f32fc648 697 kenv->user_only = (env->kqemu_enabled == 1);
aa062973
FB
698 kenv->nb_ram_pages_to_update = nb_ram_pages_to_update;
699#endif
700 nb_ram_pages_to_update = 0;
3b46e624 701
f32fc648
FB
702#if KQEMU_VERSION >= 0x010300
703 kenv->nb_modified_ram_pages = nb_modified_ram_pages;
704#endif
705 kqemu_reset_modified_ram_pages();
706
707 if (env->cpuid_features & CPUID_FXSR)
708 restore_native_fp_fxrstor(env);
709 else
710 restore_native_fp_frstor(env);
9df217a3 711
6e4255f6 712#ifdef _WIN32
a332e112
FB
713 if (DeviceIoControl(kqemu_fd, KQEMU_EXEC,
714 kenv, sizeof(struct kqemu_cpu_state),
715 kenv, sizeof(struct kqemu_cpu_state),
716 &temp, NULL)) {
717 ret = kenv->retval;
718 } else {
719 ret = -1;
720 }
6e4255f6
FB
721#else
722#if KQEMU_VERSION >= 0x010100
723 ioctl(kqemu_fd, KQEMU_EXEC, kenv);
724 ret = kenv->retval;
725#else
9df217a3 726 ret = ioctl(kqemu_fd, KQEMU_EXEC, kenv);
6e4255f6
FB
727#endif
728#endif
f32fc648
FB
729 if (env->cpuid_features & CPUID_FXSR)
730 save_native_fp_fxsave(env);
731 else
732 save_native_fp_fsave(env);
9df217a3
FB
733
734 memcpy(env->regs, kenv->regs, sizeof(env->regs));
735 env->eip = kenv->eip;
736 env->eflags = kenv->eflags;
737 memcpy(env->segs, kenv->segs, sizeof(env->segs));
f32fc648
FB
738 cpu_x86_set_cpl(env, kenv->cpl);
739 memcpy(&env->ldt, &kenv->ldt, sizeof(env->ldt));
9df217a3
FB
740#if 0
741 /* no need to restore that */
9df217a3
FB
742 memcpy(env->tr, kenv->tr, sizeof(env->tr));
743 memcpy(env->gdt, kenv->gdt, sizeof(env->gdt));
744 memcpy(env->idt, kenv->idt, sizeof(env->idt));
9df217a3
FB
745 env->a20_mask = kenv->a20_mask;
746#endif
f32fc648
FB
747 env->cr[0] = kenv->cr0;
748 env->cr[4] = kenv->cr4;
749 env->cr[3] = kenv->cr3;
9df217a3
FB
750 env->cr[2] = kenv->cr2;
751 env->dr[6] = kenv->dr6;
f32fc648
FB
752#if KQEMU_VERSION >= 0x010300
753#ifdef __x86_64__
754 env->kernelgsbase = kenv->kernelgsbase;
755#endif
756#endif
757
758 /* flush pages as indicated by kqemu */
759 if (kenv->nb_pages_to_flush >= KQEMU_FLUSH_ALL) {
760 tlb_flush(env, 1);
761 } else {
762 for(i = 0; i < kenv->nb_pages_to_flush; i++) {
763 tlb_flush_page(env, pages_to_flush[i]);
764 }
765 }
766 nb_pages_to_flush = 0;
767
768#ifdef CONFIG_PROFILER
769 kqemu_time += profile_getclock() - ti;
770 kqemu_exec_count++;
771#endif
9df217a3 772
aa062973
FB
773#if KQEMU_VERSION >= 0x010200
774 if (kenv->nb_ram_pages_to_update > 0) {
775 cpu_tlb_update_dirty(env);
776 }
777#endif
778
f32fc648
FB
779#if KQEMU_VERSION >= 0x010300
780 if (kenv->nb_modified_ram_pages > 0) {
781 for(i = 0; i < kenv->nb_modified_ram_pages; i++) {
782 unsigned long addr;
783 addr = modified_ram_pages[i];
784 tb_invalidate_phys_page_range(addr, addr + TARGET_PAGE_SIZE, 0);
785 }
786 }
787#endif
788
aa062973
FB
789 /* restore the hidden flags */
790 {
791 unsigned int new_hflags;
792#ifdef TARGET_X86_64
5fafdf24 793 if ((env->hflags & HF_LMA_MASK) &&
aa062973
FB
794 (env->segs[R_CS].flags & DESC_L_MASK)) {
795 /* long mode */
796 new_hflags = HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK;
797 } else
798#endif
799 {
800 /* legacy / compatibility case */
801 new_hflags = (env->segs[R_CS].flags & DESC_B_MASK)
802 >> (DESC_B_SHIFT - HF_CS32_SHIFT);
803 new_hflags |= (env->segs[R_SS].flags & DESC_B_MASK)
804 >> (DESC_B_SHIFT - HF_SS32_SHIFT);
5fafdf24 805 if (!(env->cr[0] & CR0_PE_MASK) ||
aa062973
FB
806 (env->eflags & VM_MASK) ||
807 !(env->hflags & HF_CS32_MASK)) {
808 /* XXX: try to avoid this test. The problem comes from the
809 fact that is real mode or vm86 mode we only modify the
810 'base' and 'selector' fields of the segment cache to go
811 faster. A solution may be to force addseg to one in
812 translate-i386.c. */
813 new_hflags |= HF_ADDSEG_MASK;
814 } else {
5fafdf24 815 new_hflags |= ((env->segs[R_DS].base |
aa062973 816 env->segs[R_ES].base |
5fafdf24 817 env->segs[R_SS].base) != 0) <<
aa062973
FB
818 HF_ADDSEG_SHIFT;
819 }
820 }
5fafdf24 821 env->hflags = (env->hflags &
aa062973
FB
822 ~(HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK | HF_ADDSEG_MASK)) |
823 new_hflags;
824 }
f32fc648
FB
825 /* update FPU flags */
826 env->hflags = (env->hflags & ~(HF_MP_MASK | HF_EM_MASK | HF_TS_MASK)) |
827 ((env->cr[0] << (HF_MP_SHIFT - 1)) & (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK));
828 if (env->cr[4] & CR4_OSFXSR_MASK)
829 env->hflags |= HF_OSFXSR_MASK;
830 else
831 env->hflags &= ~HF_OSFXSR_MASK;
3b46e624 832
9df217a3
FB
833#ifdef DEBUG
834 if (loglevel & CPU_LOG_INT) {
835 fprintf(logfile, "kqemu: kqemu_cpu_exec: ret=0x%x\n", ret);
836 }
837#endif
c28e951f
FB
838 if (ret == KQEMU_RET_SYSCALL) {
839 /* syscall instruction */
840 return do_syscall(env, kenv);
5fafdf24 841 } else
9df217a3
FB
842 if ((ret & 0xff00) == KQEMU_RET_INT) {
843 env->exception_index = ret & 0xff;
844 env->error_code = 0;
845 env->exception_is_int = 1;
846 env->exception_next_eip = kenv->next_eip;
f32fc648
FB
847#ifdef CONFIG_PROFILER
848 kqemu_ret_int_count++;
849#endif
9df217a3 850#ifdef DEBUG
c28e951f 851 if (loglevel & CPU_LOG_INT) {
5fafdf24 852 fprintf(logfile, "kqemu: interrupt v=%02x:\n",
c28e951f
FB
853 env->exception_index);
854 cpu_dump_state(env, logfile, fprintf, 0);
855 }
9df217a3
FB
856#endif
857 return 1;
858 } else if ((ret & 0xff00) == KQEMU_RET_EXCEPTION) {
859 env->exception_index = ret & 0xff;
860 env->error_code = kenv->error_code;
861 env->exception_is_int = 0;
862 env->exception_next_eip = 0;
f32fc648
FB
863#ifdef CONFIG_PROFILER
864 kqemu_ret_excp_count++;
865#endif
9df217a3
FB
866#ifdef DEBUG
867 if (loglevel & CPU_LOG_INT) {
868 fprintf(logfile, "kqemu: exception v=%02x e=%04x:\n",
869 env->exception_index, env->error_code);
870 cpu_dump_state(env, logfile, fprintf, 0);
871 }
872#endif
873 return 1;
874 } else if (ret == KQEMU_RET_INTR) {
f32fc648
FB
875#ifdef CONFIG_PROFILER
876 kqemu_ret_intr_count++;
877#endif
c45b3c0e
FB
878#ifdef DEBUG
879 if (loglevel & CPU_LOG_INT) {
880 cpu_dump_state(env, logfile, fprintf, 0);
881 }
882#endif
9df217a3 883 return 0;
5fafdf24 884 } else if (ret == KQEMU_RET_SOFTMMU) {
f32fc648
FB
885#ifdef CONFIG_PROFILER
886 {
887 unsigned long pc = env->eip + env->segs[R_CS].base;
888 kqemu_record_pc(pc);
889 }
aa062973
FB
890#endif
891#ifdef DEBUG
892 if (loglevel & CPU_LOG_INT) {
893 cpu_dump_state(env, logfile, fprintf, 0);
894 }
895#endif
9df217a3
FB
896 return 2;
897 } else {
898 cpu_dump_state(env, stderr, fprintf, 0);
899 fprintf(stderr, "Unsupported return value: 0x%x\n", ret);
900 exit(1);
901 }
902 return 0;
903}
904
a332e112
FB
905void kqemu_cpu_interrupt(CPUState *env)
906{
907#if defined(_WIN32) && KQEMU_VERSION >= 0x010101
5fafdf24 908 /* cancelling the I/O request causes KQEMU to finish executing the
a332e112
FB
909 current block and successfully returning. */
910 CancelIo(kqemu_fd);
911#endif
912}
913
9df217a3 914#endif