]> git.proxmox.com Git - mirror_qemu.git/blob - target-i386/helper2.c
CR0.MP/EM/TS support - native fpu support in code copy mode
[mirror_qemu.git] / target-i386 / helper2.c
1 /*
2 * i386 helpers (without register variable usage)
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20 #include <stdarg.h>
21 #include <stdlib.h>
22 #include <stdio.h>
23 #include <string.h>
24 #include <inttypes.h>
25 #include <signal.h>
26 #include <assert.h>
27 #include <sys/mman.h>
28
29 #include "cpu.h"
30 #include "exec-all.h"
31
32 //#define DEBUG_MMU
33
34 #ifdef USE_CODE_COPY
35 #include <asm/ldt.h>
36 #include <linux/unistd.h>
37
38 _syscall3(int, modify_ldt, int, func, void *, ptr, unsigned long, bytecount)
39 #endif
40
41 CPUX86State *cpu_x86_init(void)
42 {
43 CPUX86State *env;
44 int i;
45 static int inited;
46
47 cpu_exec_init();
48
49 env = malloc(sizeof(CPUX86State));
50 if (!env)
51 return NULL;
52 memset(env, 0, sizeof(CPUX86State));
53
54 /* init to reset state */
55
56 tlb_flush(env, 1);
57 #ifdef CONFIG_SOFTMMU
58 env->hflags |= HF_SOFTMMU_MASK;
59 #endif
60
61 cpu_x86_update_cr0(env, 0x60000010);
62 env->a20_mask = 0xffffffff;
63
64 env->idt.limit = 0xffff;
65 env->gdt.limit = 0xffff;
66 env->ldt.limit = 0xffff;
67 env->ldt.flags = DESC_P_MASK;
68 env->tr.limit = 0xffff;
69 env->tr.flags = DESC_P_MASK;
70
71 /* not correct (CS base=0xffff0000) */
72 cpu_x86_load_seg_cache(env, R_CS, 0xf000, (uint8_t *)0x000f0000, 0xffff, 0);
73 cpu_x86_load_seg_cache(env, R_DS, 0, NULL, 0xffff, 0);
74 cpu_x86_load_seg_cache(env, R_ES, 0, NULL, 0xffff, 0);
75 cpu_x86_load_seg_cache(env, R_SS, 0, NULL, 0xffff, 0);
76 cpu_x86_load_seg_cache(env, R_FS, 0, NULL, 0xffff, 0);
77 cpu_x86_load_seg_cache(env, R_GS, 0, NULL, 0xffff, 0);
78
79 env->eip = 0xfff0;
80 env->regs[R_EDX] = 0x600; /* indicate P6 processor */
81
82 env->eflags = 0x2;
83
84 /* FPU init */
85 for(i = 0;i < 8; i++)
86 env->fptags[i] = 1;
87 env->fpuc = 0x37f;
88
89 /* init various static tables */
90 if (!inited) {
91 inited = 1;
92 optimize_flags_init();
93 }
94 #ifdef USE_CODE_COPY
95 /* testing code for code copy case */
96 {
97 struct modify_ldt_ldt_s ldt;
98
99 ldt.entry_number = 1;
100 ldt.base_addr = (unsigned long)env;
101 ldt.limit = (sizeof(CPUState) + 0xfff) >> 12;
102 ldt.seg_32bit = 1;
103 ldt.contents = MODIFY_LDT_CONTENTS_DATA;
104 ldt.read_exec_only = 0;
105 ldt.limit_in_pages = 1;
106 ldt.seg_not_present = 0;
107 ldt.useable = 1;
108 modify_ldt(1, &ldt, sizeof(ldt)); /* write ldt entry */
109
110 asm volatile ("movl %0, %%fs" : : "r" ((1 << 3) | 7));
111 cpu_single_env = env;
112 }
113 #endif
114 return env;
115 }
116
117 void cpu_x86_close(CPUX86State *env)
118 {
119 free(env);
120 }
121
122 /***********************************************************/
123 /* x86 debug */
124
125 static const char *cc_op_str[] = {
126 "DYNAMIC",
127 "EFLAGS",
128 "MULB",
129 "MULW",
130 "MULL",
131 "ADDB",
132 "ADDW",
133 "ADDL",
134 "ADCB",
135 "ADCW",
136 "ADCL",
137 "SUBB",
138 "SUBW",
139 "SUBL",
140 "SBBB",
141 "SBBW",
142 "SBBL",
143 "LOGICB",
144 "LOGICW",
145 "LOGICL",
146 "INCB",
147 "INCW",
148 "INCL",
149 "DECB",
150 "DECW",
151 "DECL",
152 "SHLB",
153 "SHLW",
154 "SHLL",
155 "SARB",
156 "SARW",
157 "SARL",
158 };
159
160 void cpu_x86_dump_state(CPUX86State *env, FILE *f, int flags)
161 {
162 int eflags, i;
163 char cc_op_name[32];
164 static const char *seg_name[6] = { "ES", "CS", "SS", "DS", "FS", "GS" };
165
166 eflags = env->eflags;
167 fprintf(f, "EAX=%08x EBX=%08x ECX=%08x EDX=%08x\n"
168 "ESI=%08x EDI=%08x EBP=%08x ESP=%08x\n"
169 "EIP=%08x EFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d\n",
170 env->regs[R_EAX], env->regs[R_EBX], env->regs[R_ECX], env->regs[R_EDX],
171 env->regs[R_ESI], env->regs[R_EDI], env->regs[R_EBP], env->regs[R_ESP],
172 env->eip, eflags,
173 eflags & DF_MASK ? 'D' : '-',
174 eflags & CC_O ? 'O' : '-',
175 eflags & CC_S ? 'S' : '-',
176 eflags & CC_Z ? 'Z' : '-',
177 eflags & CC_A ? 'A' : '-',
178 eflags & CC_P ? 'P' : '-',
179 eflags & CC_C ? 'C' : '-',
180 env->hflags & HF_CPL_MASK,
181 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1);
182 for(i = 0; i < 6; i++) {
183 SegmentCache *sc = &env->segs[i];
184 fprintf(f, "%s =%04x %08x %08x %08x\n",
185 seg_name[i],
186 sc->selector,
187 (int)sc->base,
188 sc->limit,
189 sc->flags);
190 }
191 fprintf(f, "LDT=%04x %08x %08x %08x\n",
192 env->ldt.selector,
193 (int)env->ldt.base,
194 env->ldt.limit,
195 env->ldt.flags);
196 fprintf(f, "TR =%04x %08x %08x %08x\n",
197 env->tr.selector,
198 (int)env->tr.base,
199 env->tr.limit,
200 env->tr.flags);
201 fprintf(f, "GDT= %08x %08x\n",
202 (int)env->gdt.base, env->gdt.limit);
203 fprintf(f, "IDT= %08x %08x\n",
204 (int)env->idt.base, env->idt.limit);
205 fprintf(f, "CR0=%08x CR2=%08x CR3=%08x CR4=%08x\n",
206 env->cr[0], env->cr[2], env->cr[3], env->cr[4]);
207
208 if (flags & X86_DUMP_CCOP) {
209 if ((unsigned)env->cc_op < CC_OP_NB)
210 strcpy(cc_op_name, cc_op_str[env->cc_op]);
211 else
212 snprintf(cc_op_name, sizeof(cc_op_name), "[%d]", env->cc_op);
213 fprintf(f, "CCS=%08x CCD=%08x CCO=%-8s\n",
214 env->cc_src, env->cc_dst, cc_op_name);
215 }
216 if (flags & X86_DUMP_FPU) {
217 fprintf(f, "ST0=%f ST1=%f ST2=%f ST3=%f\n",
218 (double)env->fpregs[0],
219 (double)env->fpregs[1],
220 (double)env->fpregs[2],
221 (double)env->fpregs[3]);
222 fprintf(f, "ST4=%f ST5=%f ST6=%f ST7=%f\n",
223 (double)env->fpregs[4],
224 (double)env->fpregs[5],
225 (double)env->fpregs[7],
226 (double)env->fpregs[8]);
227 }
228 }
229
230 /***********************************************************/
231 /* x86 mmu */
232 /* XXX: add PGE support */
233
234 void cpu_x86_set_a20(CPUX86State *env, int a20_state)
235 {
236 a20_state = (a20_state != 0);
237 if (a20_state != ((env->a20_mask >> 20) & 1)) {
238 #if defined(DEBUG_MMU)
239 printf("A20 update: a20=%d\n", a20_state);
240 #endif
241 /* if the cpu is currently executing code, we must unlink it and
242 all the potentially executing TB */
243 cpu_interrupt(env, CPU_INTERRUPT_EXITTB);
244
245 /* when a20 is changed, all the MMU mappings are invalid, so
246 we must flush everything */
247 tlb_flush(env, 1);
248 env->a20_mask = 0xffefffff | (a20_state << 20);
249 }
250 }
251
252 void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0)
253 {
254 int pe_state;
255
256 #if defined(DEBUG_MMU)
257 printf("CR0 update: CR0=0x%08x\n", new_cr0);
258 #endif
259 if ((new_cr0 & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK)) !=
260 (env->cr[0] & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK))) {
261 tlb_flush(env, 1);
262 }
263 env->cr[0] = new_cr0;
264
265 /* update PE flag in hidden flags */
266 pe_state = (env->cr[0] & CR0_PE_MASK);
267 env->hflags = (env->hflags & ~HF_PE_MASK) | (pe_state << HF_PE_SHIFT);
268 /* ensure that ADDSEG is always set in real mode */
269 env->hflags |= ((pe_state ^ 1) << HF_ADDSEG_SHIFT);
270 /* update FPU flags */
271 env->hflags = (env->hflags & ~(HF_MP_MASK | HF_EM_MASK | HF_TS_MASK)) |
272 ((new_cr0 << (HF_MP_SHIFT - 1)) & (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK));
273 }
274
275 void cpu_x86_update_cr3(CPUX86State *env, uint32_t new_cr3)
276 {
277 env->cr[3] = new_cr3;
278 if (env->cr[0] & CR0_PG_MASK) {
279 #if defined(DEBUG_MMU)
280 printf("CR3 update: CR3=%08x\n", new_cr3);
281 #endif
282 tlb_flush(env, 0);
283 }
284 }
285
286 void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4)
287 {
288 #if defined(DEBUG_MMU)
289 printf("CR4 update: CR4=%08x\n", env->cr[4]);
290 #endif
291 if ((new_cr4 & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK)) !=
292 (env->cr[4] & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK))) {
293 tlb_flush(env, 1);
294 }
295 env->cr[4] = new_cr4;
296 }
297
298 /* XXX: also flush 4MB pages */
299 void cpu_x86_flush_tlb(CPUX86State *env, uint32_t addr)
300 {
301 tlb_flush_page(env, addr);
302 }
303
304 /* return value:
305 -1 = cannot handle fault
306 0 = nothing more to do
307 1 = generate PF fault
308 2 = soft MMU activation required for this block
309 */
310 int cpu_x86_handle_mmu_fault(CPUX86State *env, uint32_t addr,
311 int is_write, int is_user, int is_softmmu)
312 {
313 uint8_t *pde_ptr, *pte_ptr;
314 uint32_t pde, pte, virt_addr, ptep;
315 int error_code, is_dirty, prot, page_size, ret;
316 unsigned long paddr, vaddr, page_offset;
317
318 #if defined(DEBUG_MMU)
319 printf("MMU fault: addr=0x%08x w=%d u=%d eip=%08x\n",
320 addr, is_write, is_user, env->eip);
321 #endif
322
323 if (env->user_mode_only) {
324 /* user mode only emulation */
325 error_code = 0;
326 goto do_fault;
327 }
328
329 if (!(env->cr[0] & CR0_PG_MASK)) {
330 pte = addr;
331 virt_addr = addr & TARGET_PAGE_MASK;
332 prot = PROT_READ | PROT_WRITE;
333 page_size = 4096;
334 goto do_mapping;
335 }
336
337 /* page directory entry */
338 pde_ptr = phys_ram_base +
339 (((env->cr[3] & ~0xfff) + ((addr >> 20) & ~3)) & env->a20_mask);
340 pde = ldl_raw(pde_ptr);
341 if (!(pde & PG_PRESENT_MASK)) {
342 error_code = 0;
343 goto do_fault;
344 }
345 /* if PSE bit is set, then we use a 4MB page */
346 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
347 if (is_user) {
348 if (!(pde & PG_USER_MASK))
349 goto do_fault_protect;
350 if (is_write && !(pde & PG_RW_MASK))
351 goto do_fault_protect;
352 } else {
353 if ((env->cr[0] & CR0_WP_MASK) &&
354 is_write && !(pde & PG_RW_MASK))
355 goto do_fault_protect;
356 }
357 is_dirty = is_write && !(pde & PG_DIRTY_MASK);
358 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
359 pde |= PG_ACCESSED_MASK;
360 if (is_dirty)
361 pde |= PG_DIRTY_MASK;
362 stl_raw(pde_ptr, pde);
363 }
364
365 pte = pde & ~0x003ff000; /* align to 4MB */
366 ptep = pte;
367 page_size = 4096 * 1024;
368 virt_addr = addr & ~0x003fffff;
369 } else {
370 if (!(pde & PG_ACCESSED_MASK)) {
371 pde |= PG_ACCESSED_MASK;
372 stl_raw(pde_ptr, pde);
373 }
374
375 /* page directory entry */
376 pte_ptr = phys_ram_base +
377 (((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & env->a20_mask);
378 pte = ldl_raw(pte_ptr);
379 if (!(pte & PG_PRESENT_MASK)) {
380 error_code = 0;
381 goto do_fault;
382 }
383 /* combine pde and pte user and rw protections */
384 ptep = pte & pde;
385 if (is_user) {
386 if (!(ptep & PG_USER_MASK))
387 goto do_fault_protect;
388 if (is_write && !(ptep & PG_RW_MASK))
389 goto do_fault_protect;
390 } else {
391 if ((env->cr[0] & CR0_WP_MASK) &&
392 is_write && !(ptep & PG_RW_MASK))
393 goto do_fault_protect;
394 }
395 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
396 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
397 pte |= PG_ACCESSED_MASK;
398 if (is_dirty)
399 pte |= PG_DIRTY_MASK;
400 stl_raw(pte_ptr, pte);
401 }
402 page_size = 4096;
403 virt_addr = addr & ~0xfff;
404 }
405
406 /* the page can be put in the TLB */
407 prot = PROT_READ;
408 if (pte & PG_DIRTY_MASK) {
409 /* only set write access if already dirty... otherwise wait
410 for dirty access */
411 if (is_user) {
412 if (ptep & PG_RW_MASK)
413 prot |= PROT_WRITE;
414 } else {
415 if (!(env->cr[0] & CR0_WP_MASK) ||
416 (ptep & PG_RW_MASK))
417 prot |= PROT_WRITE;
418 }
419 }
420
421 do_mapping:
422 pte = pte & env->a20_mask;
423
424 /* Even if 4MB pages, we map only one 4KB page in the cache to
425 avoid filling it too fast */
426 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
427 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
428 vaddr = virt_addr + page_offset;
429
430 ret = tlb_set_page(env, vaddr, paddr, prot, is_user, is_softmmu);
431 return ret;
432 do_fault_protect:
433 error_code = PG_ERROR_P_MASK;
434 do_fault:
435 env->cr[2] = addr;
436 env->error_code = (is_write << PG_ERROR_W_BIT) | error_code;
437 if (is_user)
438 env->error_code |= PG_ERROR_U_MASK;
439 return 1;
440 }
441
442 #if defined(CONFIG_USER_ONLY)
443 target_ulong cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
444 {
445 return addr;
446 }
447 #else
448 target_ulong cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
449 {
450 uint8_t *pde_ptr, *pte_ptr;
451 uint32_t pde, pte, paddr, page_offset, page_size;
452
453 if (!(env->cr[0] & CR0_PG_MASK)) {
454 pte = addr;
455 page_size = 4096;
456 } else {
457 /* page directory entry */
458 pde_ptr = phys_ram_base +
459 (((env->cr[3] & ~0xfff) + ((addr >> 20) & ~3)) & env->a20_mask);
460 pde = ldl_raw(pde_ptr);
461 if (!(pde & PG_PRESENT_MASK))
462 return -1;
463 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
464 pte = pde & ~0x003ff000; /* align to 4MB */
465 page_size = 4096 * 1024;
466 } else {
467 /* page directory entry */
468 pte_ptr = phys_ram_base +
469 (((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & env->a20_mask);
470 pte = ldl_raw(pte_ptr);
471 if (!(pte & PG_PRESENT_MASK))
472 return -1;
473 page_size = 4096;
474 }
475 }
476 pte = pte & env->a20_mask;
477 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
478 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
479 return paddr;
480 }
481 #endif
482
483 #if defined(USE_CODE_COPY)
484 struct fpstate {
485 uint16_t fpuc;
486 uint16_t dummy1;
487 uint16_t fpus;
488 uint16_t dummy2;
489 uint16_t fptag;
490 uint16_t dummy3;
491
492 uint32_t fpip;
493 uint32_t fpcs;
494 uint32_t fpoo;
495 uint32_t fpos;
496 uint8_t fpregs1[8 * 10];
497 };
498
499 void restore_native_fp_state(CPUState *env)
500 {
501 int fptag, i, j;
502 struct fpstate fp1, *fp = &fp1;
503
504 fp->fpuc = env->fpuc;
505 fp->fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
506 fptag = 0;
507 for (i=7; i>=0; i--) {
508 fptag <<= 2;
509 if (env->fptags[i]) {
510 fptag |= 3;
511 } else {
512 /* the FPU automatically computes it */
513 }
514 }
515 fp->fptag = fptag;
516 j = env->fpstt;
517 for(i = 0;i < 8; i++) {
518 memcpy(&fp->fpregs1[i * 10], &env->fpregs[j], 10);
519 j = (j + 1) & 7;
520 }
521 asm volatile ("frstor %0" : "=m" (*fp));
522 env->native_fp_regs = 1;
523 }
524
525 void save_native_fp_state(CPUState *env)
526 {
527 int fptag, i, j;
528 uint16_t fpuc;
529 struct fpstate fp1, *fp = &fp1;
530
531 asm volatile ("fsave %0" : : "m" (*fp));
532 env->fpuc = fp->fpuc;
533 env->fpstt = (fp->fpus >> 11) & 7;
534 env->fpus = fp->fpus & ~0x3800;
535 fptag = fp->fptag;
536 for(i = 0;i < 8; i++) {
537 env->fptags[i] = ((fptag & 3) == 3);
538 fptag >>= 2;
539 }
540 j = env->fpstt;
541 for(i = 0;i < 8; i++) {
542 memcpy(&env->fpregs[j], &fp->fpregs1[i * 10], 10);
543 j = (j + 1) & 7;
544 }
545 /* we must restore the default rounding state */
546 /* XXX: we do not restore the exception state */
547 fpuc = 0x037f | (env->fpuc & (3 << 10));
548 asm volatile("fldcw %0" : : "m" (fpuc));
549 env->native_fp_regs = 0;
550 }
551 #endif