4 * Copyright (c) 2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
25 #include <sys/types.h>
27 #include <sys/ioctl.h>
46 #include "kqemu/kqemu.h"
49 #define KQEMU_DEVICE "\\\\.\\kqemu"
51 #define KQEMU_DEVICE "/dev/kqemu"
55 #define KQEMU_INVALID_FD INVALID_HANDLE_VALUE
56 HANDLE kqemu_fd
= KQEMU_INVALID_FD
;
57 #define kqemu_closefd(x) CloseHandle(x)
59 #define KQEMU_INVALID_FD -1
60 int kqemu_fd
= KQEMU_INVALID_FD
;
61 #define kqemu_closefd(x) close(x)
64 int kqemu_allowed
= 1;
65 unsigned long *pages_to_flush
;
66 unsigned int nb_pages_to_flush
;
67 extern uint32_t **l1_phys_map
;
69 #define cpuid(index, eax, ebx, ecx, edx) \
70 asm volatile ("cpuid" \
71 : "=a" (eax), "=b" (ebx), "=c" (ecx), "=d" (edx) \
74 static int is_cpuid_supported(void)
77 asm volatile ("pushf\n"
80 "xorl $0x00200000, %0\n"
85 : "=a" (v0
), "=d" (v1
)
91 static void kqemu_update_cpuid(CPUState
*env
)
93 int critical_features_mask
, features
;
94 uint32_t eax
, ebx
, ecx
, edx
;
96 /* the following features are kept identical on the host and
97 target cpus because they are important for user code. Strictly
98 speaking, only SSE really matters because the OS must support
99 it if the user code uses it. */
100 critical_features_mask
=
101 CPUID_CMOV
| CPUID_CX8
|
102 CPUID_FXSR
| CPUID_MMX
| CPUID_SSE
|
104 if (!is_cpuid_supported()) {
107 cpuid(1, eax
, ebx
, ecx
, edx
);
110 env
->cpuid_features
= (env
->cpuid_features
& ~critical_features_mask
) |
111 (features
& critical_features_mask
);
112 /* XXX: we could update more of the target CPUID state so that the
113 non accelerated code sees exactly the same CPU features as the
117 int kqemu_init(CPUState
*env
)
119 struct kqemu_init init
;
129 kqemu_fd
= CreateFile(KQEMU_DEVICE
, GENERIC_WRITE
| GENERIC_READ
,
130 FILE_SHARE_READ
| FILE_SHARE_WRITE
,
131 NULL
, OPEN_EXISTING
, FILE_ATTRIBUTE_NORMAL
,
134 kqemu_fd
= open(KQEMU_DEVICE
, O_RDWR
);
136 if (kqemu_fd
== KQEMU_INVALID_FD
) {
137 fprintf(stderr
, "Could not open '%s' - QEMU acceleration layer not activated\n", KQEMU_DEVICE
);
142 DeviceIoControl(kqemu_fd
, KQEMU_GET_VERSION
, NULL
, 0,
143 &version
, sizeof(version
), &temp
, NULL
);
145 ioctl(kqemu_fd
, KQEMU_GET_VERSION
, &version
);
147 if (version
!= KQEMU_VERSION
) {
148 fprintf(stderr
, "Version mismatch between kqemu module and qemu (%08x %08x) - disabling kqemu use\n",
149 version
, KQEMU_VERSION
);
153 pages_to_flush
= qemu_vmalloc(KQEMU_MAX_PAGES_TO_FLUSH
*
154 sizeof(unsigned long));
158 init
.ram_base
= phys_ram_base
;
159 init
.ram_size
= phys_ram_size
;
160 init
.ram_dirty
= phys_ram_dirty
;
161 init
.phys_to_ram_map
= l1_phys_map
;
162 init
.pages_to_flush
= pages_to_flush
;
164 ret
= DeviceIoControl(kqemu_fd
, KQEMU_INIT
, &init
, sizeof(init
),
165 NULL
, 0, &temp
, NULL
) == TRUE
? 0 : -1;
167 ret
= ioctl(kqemu_fd
, KQEMU_INIT
, &init
);
170 fprintf(stderr
, "Error %d while initializing QEMU acceleration layer - disabling it for now\n", ret
);
172 kqemu_closefd(kqemu_fd
);
173 kqemu_fd
= KQEMU_INVALID_FD
;
176 kqemu_update_cpuid(env
);
177 env
->kqemu_enabled
= 1;
178 nb_pages_to_flush
= 0;
182 void kqemu_flush_page(CPUState
*env
, target_ulong addr
)
185 if (loglevel
& CPU_LOG_INT
) {
186 fprintf(logfile
, "kqemu_flush_page: addr=" TARGET_FMT_lx
"\n", addr
);
189 if (nb_pages_to_flush
>= KQEMU_MAX_PAGES_TO_FLUSH
)
190 nb_pages_to_flush
= KQEMU_FLUSH_ALL
;
192 pages_to_flush
[nb_pages_to_flush
++] = addr
;
195 void kqemu_flush(CPUState
*env
, int global
)
198 if (loglevel
& CPU_LOG_INT
) {
199 fprintf(logfile
, "kqemu_flush:\n");
202 nb_pages_to_flush
= KQEMU_FLUSH_ALL
;
217 uint8_t fpregs1
[8 * 10];
233 uint8_t fpregs1
[8 * 16];
234 uint8_t xmm_regs
[8 * 16];
238 static struct fpxstate fpx1
__attribute__((aligned(16)));
240 static void restore_native_fp_frstor(CPUState
*env
)
243 struct fpstate fp1
, *fp
= &fp1
;
245 fp
->fpuc
= env
->fpuc
;
246 fp
->fpus
= (env
->fpus
& ~0x3800) | (env
->fpstt
& 0x7) << 11;
248 for (i
=7; i
>=0; i
--) {
250 if (env
->fptags
[i
]) {
253 /* the FPU automatically computes it */
258 for(i
= 0;i
< 8; i
++) {
259 memcpy(&fp
->fpregs1
[i
* 10], &env
->fpregs
[j
].d
, 10);
262 asm volatile ("frstor %0" : "=m" (*fp
));
265 static void save_native_fp_fsave(CPUState
*env
)
269 struct fpstate fp1
, *fp
= &fp1
;
271 asm volatile ("fsave %0" : : "m" (*fp
));
272 env
->fpuc
= fp
->fpuc
;
273 env
->fpstt
= (fp
->fpus
>> 11) & 7;
274 env
->fpus
= fp
->fpus
& ~0x3800;
276 for(i
= 0;i
< 8; i
++) {
277 env
->fptags
[i
] = ((fptag
& 3) == 3);
281 for(i
= 0;i
< 8; i
++) {
282 memcpy(&env
->fpregs
[j
].d
, &fp
->fpregs1
[i
* 10], 10);
285 /* we must restore the default rounding state */
286 fpuc
= 0x037f | (env
->fpuc
& (3 << 10));
287 asm volatile("fldcw %0" : : "m" (fpuc
));
290 static void restore_native_fp_fxrstor(CPUState
*env
)
292 struct fpxstate
*fp
= &fpx1
;
295 fp
->fpuc
= env
->fpuc
;
296 fp
->fpus
= (env
->fpus
& ~0x3800) | (env
->fpstt
& 0x7) << 11;
298 for(i
= 0; i
< 8; i
++)
299 fptag
|= (env
->fptags
[i
] << i
);
300 fp
->fptag
= fptag
^ 0xff;
303 for(i
= 0;i
< 8; i
++) {
304 memcpy(&fp
->fpregs1
[i
* 16], &env
->fpregs
[j
].d
, 10);
307 if (env
->cpuid_features
& CPUID_SSE
) {
308 fp
->mxcsr
= env
->mxcsr
;
309 /* XXX: check if DAZ is not available */
310 fp
->mxcsr_mask
= 0xffff;
311 memcpy(fp
->xmm_regs
, env
->xmm_regs
, 8 * 16);
313 asm volatile ("fxrstor %0" : "=m" (*fp
));
316 static void save_native_fp_fxsave(CPUState
*env
)
318 struct fpxstate
*fp
= &fpx1
;
322 asm volatile ("fxsave %0" : : "m" (*fp
));
323 env
->fpuc
= fp
->fpuc
;
324 env
->fpstt
= (fp
->fpus
>> 11) & 7;
325 env
->fpus
= fp
->fpus
& ~0x3800;
326 fptag
= fp
->fptag
^ 0xff;
327 for(i
= 0;i
< 8; i
++) {
328 env
->fptags
[i
] = (fptag
>> i
) & 1;
331 for(i
= 0;i
< 8; i
++) {
332 memcpy(&env
->fpregs
[j
].d
, &fp
->fpregs1
[i
* 16], 10);
335 if (env
->cpuid_features
& CPUID_SSE
) {
336 env
->mxcsr
= fp
->mxcsr
;
337 memcpy(env
->xmm_regs
, fp
->xmm_regs
, 8 * 16);
340 /* we must restore the default rounding state */
341 asm volatile ("fninit");
342 fpuc
= 0x037f | (env
->fpuc
& (3 << 10));
343 asm volatile("fldcw %0" : : "m" (fpuc
));
346 int kqemu_cpu_exec(CPUState
*env
)
348 struct kqemu_cpu_state kcpu_state
, *kenv
= &kcpu_state
;
355 if (loglevel
& CPU_LOG_INT
) {
356 fprintf(logfile
, "kqemu: cpu_exec: enter\n");
357 cpu_dump_state(env
, logfile
, fprintf
, 0);
360 memcpy(kenv
->regs
, env
->regs
, sizeof(kenv
->regs
));
361 kenv
->eip
= env
->eip
;
362 kenv
->eflags
= env
->eflags
;
363 memcpy(&kenv
->segs
, &env
->segs
, sizeof(env
->segs
));
364 memcpy(&kenv
->ldt
, &env
->ldt
, sizeof(env
->ldt
));
365 memcpy(&kenv
->tr
, &env
->tr
, sizeof(env
->tr
));
366 memcpy(&kenv
->gdt
, &env
->gdt
, sizeof(env
->gdt
));
367 memcpy(&kenv
->idt
, &env
->idt
, sizeof(env
->idt
));
368 kenv
->cr0
= env
->cr
[0];
369 kenv
->cr2
= env
->cr
[2];
370 kenv
->cr3
= env
->cr
[3];
371 kenv
->cr4
= env
->cr
[4];
372 kenv
->a20_mask
= env
->a20_mask
;
373 if (env
->dr
[7] & 0xff) {
374 kenv
->dr7
= env
->dr
[7];
375 kenv
->dr0
= env
->dr
[0];
376 kenv
->dr1
= env
->dr
[1];
377 kenv
->dr2
= env
->dr
[2];
378 kenv
->dr3
= env
->dr
[3];
382 kenv
->dr6
= env
->dr
[6];
384 kenv
->nb_pages_to_flush
= nb_pages_to_flush
;
385 nb_pages_to_flush
= 0;
387 if (!(kenv
->cr0
& CR0_TS_MASK
)) {
388 if (env
->cpuid_features
& CPUID_FXSR
)
389 restore_native_fp_fxrstor(env
);
391 restore_native_fp_frstor(env
);
395 DeviceIoControl(kqemu_fd
, KQEMU_EXEC
,
396 kenv
, sizeof(struct kqemu_cpu_state
),
397 kenv
, sizeof(struct kqemu_cpu_state
),
401 #if KQEMU_VERSION >= 0x010100
402 ioctl(kqemu_fd
, KQEMU_EXEC
, kenv
);
405 ret
= ioctl(kqemu_fd
, KQEMU_EXEC
, kenv
);
408 if (!(kenv
->cr0
& CR0_TS_MASK
)) {
409 if (env
->cpuid_features
& CPUID_FXSR
)
410 save_native_fp_fxsave(env
);
412 save_native_fp_fsave(env
);
415 memcpy(env
->regs
, kenv
->regs
, sizeof(env
->regs
));
416 env
->eip
= kenv
->eip
;
417 env
->eflags
= kenv
->eflags
;
418 memcpy(env
->segs
, kenv
->segs
, sizeof(env
->segs
));
420 /* no need to restore that */
421 memcpy(env
->ldt
, kenv
->ldt
, sizeof(env
->ldt
));
422 memcpy(env
->tr
, kenv
->tr
, sizeof(env
->tr
));
423 memcpy(env
->gdt
, kenv
->gdt
, sizeof(env
->gdt
));
424 memcpy(env
->idt
, kenv
->idt
, sizeof(env
->idt
));
425 env
->cr
[0] = kenv
->cr0
;
426 env
->cr
[3] = kenv
->cr3
;
427 env
->cr
[4] = kenv
->cr4
;
428 env
->a20_mask
= kenv
->a20_mask
;
430 env
->cr
[2] = kenv
->cr2
;
431 env
->dr
[6] = kenv
->dr6
;
434 if (loglevel
& CPU_LOG_INT
) {
435 fprintf(logfile
, "kqemu: kqemu_cpu_exec: ret=0x%x\n", ret
);
438 if ((ret
& 0xff00) == KQEMU_RET_INT
) {
439 env
->exception_index
= ret
& 0xff;
441 env
->exception_is_int
= 1;
442 env
->exception_next_eip
= kenv
->next_eip
;
444 if (loglevel
& CPU_LOG_INT
) {
445 fprintf(logfile
, "kqemu: interrupt v=%02x:\n",
446 env
->exception_index
);
447 cpu_dump_state(env
, logfile
, fprintf
, 0);
451 } else if ((ret
& 0xff00) == KQEMU_RET_EXCEPTION
) {
452 env
->exception_index
= ret
& 0xff;
453 env
->error_code
= kenv
->error_code
;
454 env
->exception_is_int
= 0;
455 env
->exception_next_eip
= 0;
457 if (loglevel
& CPU_LOG_INT
) {
458 fprintf(logfile
, "kqemu: exception v=%02x e=%04x:\n",
459 env
->exception_index
, env
->error_code
);
460 cpu_dump_state(env
, logfile
, fprintf
, 0);
464 } else if (ret
== KQEMU_RET_INTR
) {
466 } else if (ret
== KQEMU_RET_SOFTMMU
) {
469 cpu_dump_state(env
, stderr
, fprintf
, 0);
470 fprintf(stderr
, "Unsupported return value: 0x%x\n", ret
);