]> git.proxmox.com Git - qemu.git/blob - target-i386/helper2.c
monitor fixes
[qemu.git] / target-i386 / helper2.c
1 /*
2 * i386 helpers (without register variable usage)
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20 #include <stdarg.h>
21 #include <stdlib.h>
22 #include <stdio.h>
23 #include <string.h>
24 #include <inttypes.h>
25 #include <signal.h>
26 #include <assert.h>
27
28 #include "cpu.h"
29 #include "exec-all.h"
30
31 //#define DEBUG_MMU
32
33 #ifdef USE_CODE_COPY
34 #include <asm/ldt.h>
35 #include <linux/unistd.h>
36 #include <linux/version.h>
37
38 _syscall3(int, modify_ldt, int, func, void *, ptr, unsigned long, bytecount)
39
40 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 66)
41 #define modify_ldt_ldt_s user_desc
42 #endif
43 #endif /* USE_CODE_COPY */
44
45 CPUX86State *cpu_x86_init(void)
46 {
47 CPUX86State *env;
48 static int inited;
49
50 cpu_exec_init();
51
52 env = malloc(sizeof(CPUX86State));
53 if (!env)
54 return NULL;
55 memset(env, 0, sizeof(CPUX86State));
56 /* init various static tables */
57 if (!inited) {
58 inited = 1;
59 optimize_flags_init();
60 }
61 #ifdef USE_CODE_COPY
62 /* testing code for code copy case */
63 {
64 struct modify_ldt_ldt_s ldt;
65
66 ldt.entry_number = 1;
67 ldt.base_addr = (unsigned long)env;
68 ldt.limit = (sizeof(CPUState) + 0xfff) >> 12;
69 ldt.seg_32bit = 1;
70 ldt.contents = MODIFY_LDT_CONTENTS_DATA;
71 ldt.read_exec_only = 0;
72 ldt.limit_in_pages = 1;
73 ldt.seg_not_present = 0;
74 ldt.useable = 1;
75 modify_ldt(1, &ldt, sizeof(ldt)); /* write ldt entry */
76
77 asm volatile ("movl %0, %%fs" : : "r" ((1 << 3) | 7));
78 }
79 #endif
80 cpu_single_env = env;
81 cpu_reset(env);
82 return env;
83 }
84
85 /* NOTE: must be called outside the CPU execute loop */
86 void cpu_reset(CPUX86State *env)
87 {
88 int i;
89
90 memset(env, 0, offsetof(CPUX86State, breakpoints));
91
92 tlb_flush(env, 1);
93
94 /* init to reset state */
95
96 #ifdef CONFIG_SOFTMMU
97 env->hflags |= HF_SOFTMMU_MASK;
98 #endif
99
100 cpu_x86_update_cr0(env, 0x60000010);
101 env->a20_mask = 0xffffffff;
102
103 env->idt.limit = 0xffff;
104 env->gdt.limit = 0xffff;
105 env->ldt.limit = 0xffff;
106 env->ldt.flags = DESC_P_MASK;
107 env->tr.limit = 0xffff;
108 env->tr.flags = DESC_P_MASK;
109
110 cpu_x86_load_seg_cache(env, R_CS, 0xf000, (uint8_t *)0xffff0000, 0xffff, 0);
111 cpu_x86_load_seg_cache(env, R_DS, 0, NULL, 0xffff, 0);
112 cpu_x86_load_seg_cache(env, R_ES, 0, NULL, 0xffff, 0);
113 cpu_x86_load_seg_cache(env, R_SS, 0, NULL, 0xffff, 0);
114 cpu_x86_load_seg_cache(env, R_FS, 0, NULL, 0xffff, 0);
115 cpu_x86_load_seg_cache(env, R_GS, 0, NULL, 0xffff, 0);
116
117 env->eip = 0xfff0;
118 env->regs[R_EDX] = 0x600; /* indicate P6 processor */
119
120 env->eflags = 0x2;
121
122 /* FPU init */
123 for(i = 0;i < 8; i++)
124 env->fptags[i] = 1;
125 env->fpuc = 0x37f;
126 }
127
128 void cpu_x86_close(CPUX86State *env)
129 {
130 free(env);
131 }
132
133 /***********************************************************/
134 /* x86 debug */
135
136 static const char *cc_op_str[] = {
137 "DYNAMIC",
138 "EFLAGS",
139 "MULB",
140 "MULW",
141 "MULL",
142 "ADDB",
143 "ADDW",
144 "ADDL",
145 "ADCB",
146 "ADCW",
147 "ADCL",
148 "SUBB",
149 "SUBW",
150 "SUBL",
151 "SBBB",
152 "SBBW",
153 "SBBL",
154 "LOGICB",
155 "LOGICW",
156 "LOGICL",
157 "INCB",
158 "INCW",
159 "INCL",
160 "DECB",
161 "DECW",
162 "DECL",
163 "SHLB",
164 "SHLW",
165 "SHLL",
166 "SARB",
167 "SARW",
168 "SARL",
169 };
170
171 void cpu_dump_state(CPUState *env, FILE *f,
172 int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
173 int flags)
174 {
175 int eflags, i;
176 char cc_op_name[32];
177 static const char *seg_name[6] = { "ES", "CS", "SS", "DS", "FS", "GS" };
178
179 eflags = env->eflags;
180 cpu_fprintf(f, "EAX=%08x EBX=%08x ECX=%08x EDX=%08x\n"
181 "ESI=%08x EDI=%08x EBP=%08x ESP=%08x\n"
182 "EIP=%08x EFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d\n",
183 env->regs[R_EAX], env->regs[R_EBX], env->regs[R_ECX], env->regs[R_EDX],
184 env->regs[R_ESI], env->regs[R_EDI], env->regs[R_EBP], env->regs[R_ESP],
185 env->eip, eflags,
186 eflags & DF_MASK ? 'D' : '-',
187 eflags & CC_O ? 'O' : '-',
188 eflags & CC_S ? 'S' : '-',
189 eflags & CC_Z ? 'Z' : '-',
190 eflags & CC_A ? 'A' : '-',
191 eflags & CC_P ? 'P' : '-',
192 eflags & CC_C ? 'C' : '-',
193 env->hflags & HF_CPL_MASK,
194 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
195 (env->a20_mask >> 20) & 1);
196 for(i = 0; i < 6; i++) {
197 SegmentCache *sc = &env->segs[i];
198 cpu_fprintf(f, "%s =%04x %08x %08x %08x\n",
199 seg_name[i],
200 sc->selector,
201 (int)sc->base,
202 sc->limit,
203 sc->flags);
204 }
205 cpu_fprintf(f, "LDT=%04x %08x %08x %08x\n",
206 env->ldt.selector,
207 (int)env->ldt.base,
208 env->ldt.limit,
209 env->ldt.flags);
210 cpu_fprintf(f, "TR =%04x %08x %08x %08x\n",
211 env->tr.selector,
212 (int)env->tr.base,
213 env->tr.limit,
214 env->tr.flags);
215 cpu_fprintf(f, "GDT= %08x %08x\n",
216 (int)env->gdt.base, env->gdt.limit);
217 cpu_fprintf(f, "IDT= %08x %08x\n",
218 (int)env->idt.base, env->idt.limit);
219 cpu_fprintf(f, "CR0=%08x CR2=%08x CR3=%08x CR4=%08x\n",
220 env->cr[0], env->cr[2], env->cr[3], env->cr[4]);
221
222 if (flags & X86_DUMP_CCOP) {
223 if ((unsigned)env->cc_op < CC_OP_NB)
224 snprintf(cc_op_name, sizeof(cc_op_name), "%s", cc_op_str[env->cc_op]);
225 else
226 snprintf(cc_op_name, sizeof(cc_op_name), "[%d]", env->cc_op);
227 cpu_fprintf(f, "CCS=%08x CCD=%08x CCO=%-8s\n",
228 env->cc_src, env->cc_dst, cc_op_name);
229 }
230 if (flags & X86_DUMP_FPU) {
231 cpu_fprintf(f, "ST0=%f ST1=%f ST2=%f ST3=%f\n",
232 (double)env->fpregs[0],
233 (double)env->fpregs[1],
234 (double)env->fpregs[2],
235 (double)env->fpregs[3]);
236 cpu_fprintf(f, "ST4=%f ST5=%f ST6=%f ST7=%f\n",
237 (double)env->fpregs[4],
238 (double)env->fpregs[5],
239 (double)env->fpregs[7],
240 (double)env->fpregs[8]);
241 }
242 }
243
244 /***********************************************************/
245 /* x86 mmu */
246 /* XXX: add PGE support */
247
248 void cpu_x86_set_a20(CPUX86State *env, int a20_state)
249 {
250 a20_state = (a20_state != 0);
251 if (a20_state != ((env->a20_mask >> 20) & 1)) {
252 #if defined(DEBUG_MMU)
253 printf("A20 update: a20=%d\n", a20_state);
254 #endif
255 /* if the cpu is currently executing code, we must unlink it and
256 all the potentially executing TB */
257 cpu_interrupt(env, CPU_INTERRUPT_EXITTB);
258
259 /* when a20 is changed, all the MMU mappings are invalid, so
260 we must flush everything */
261 tlb_flush(env, 1);
262 env->a20_mask = 0xffefffff | (a20_state << 20);
263 }
264 }
265
266 void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0)
267 {
268 int pe_state;
269
270 #if defined(DEBUG_MMU)
271 printf("CR0 update: CR0=0x%08x\n", new_cr0);
272 #endif
273 if ((new_cr0 & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK)) !=
274 (env->cr[0] & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK))) {
275 tlb_flush(env, 1);
276 }
277 env->cr[0] = new_cr0 | CR0_ET_MASK;
278
279 /* update PE flag in hidden flags */
280 pe_state = (env->cr[0] & CR0_PE_MASK);
281 env->hflags = (env->hflags & ~HF_PE_MASK) | (pe_state << HF_PE_SHIFT);
282 /* ensure that ADDSEG is always set in real mode */
283 env->hflags |= ((pe_state ^ 1) << HF_ADDSEG_SHIFT);
284 /* update FPU flags */
285 env->hflags = (env->hflags & ~(HF_MP_MASK | HF_EM_MASK | HF_TS_MASK)) |
286 ((new_cr0 << (HF_MP_SHIFT - 1)) & (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK));
287 }
288
289 void cpu_x86_update_cr3(CPUX86State *env, uint32_t new_cr3)
290 {
291 env->cr[3] = new_cr3;
292 if (env->cr[0] & CR0_PG_MASK) {
293 #if defined(DEBUG_MMU)
294 printf("CR3 update: CR3=%08x\n", new_cr3);
295 #endif
296 tlb_flush(env, 0);
297 }
298 }
299
300 void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4)
301 {
302 #if defined(DEBUG_MMU)
303 printf("CR4 update: CR4=%08x\n", env->cr[4]);
304 #endif
305 if ((new_cr4 & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK)) !=
306 (env->cr[4] & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK))) {
307 tlb_flush(env, 1);
308 }
309 env->cr[4] = new_cr4;
310 }
311
312 /* XXX: also flush 4MB pages */
313 void cpu_x86_flush_tlb(CPUX86State *env, uint32_t addr)
314 {
315 tlb_flush_page(env, addr);
316 }
317
318 /* return value:
319 -1 = cannot handle fault
320 0 = nothing more to do
321 1 = generate PF fault
322 2 = soft MMU activation required for this block
323 */
324 int cpu_x86_handle_mmu_fault(CPUX86State *env, uint32_t addr,
325 int is_write, int is_user, int is_softmmu)
326 {
327 uint8_t *pde_ptr, *pte_ptr;
328 uint32_t pde, pte, virt_addr, ptep;
329 int error_code, is_dirty, prot, page_size, ret;
330 unsigned long paddr, vaddr, page_offset;
331
332 #if defined(DEBUG_MMU)
333 printf("MMU fault: addr=0x%08x w=%d u=%d eip=%08x\n",
334 addr, is_write, is_user, env->eip);
335 #endif
336 is_write &= 1;
337
338 if (env->user_mode_only) {
339 /* user mode only emulation */
340 error_code = 0;
341 goto do_fault;
342 }
343
344 if (!(env->cr[0] & CR0_PG_MASK)) {
345 pte = addr;
346 virt_addr = addr & TARGET_PAGE_MASK;
347 prot = PAGE_READ | PAGE_WRITE;
348 page_size = 4096;
349 goto do_mapping;
350 }
351
352 /* page directory entry */
353 pde_ptr = phys_ram_base +
354 (((env->cr[3] & ~0xfff) + ((addr >> 20) & ~3)) & env->a20_mask);
355 pde = ldl_raw(pde_ptr);
356 if (!(pde & PG_PRESENT_MASK)) {
357 error_code = 0;
358 goto do_fault;
359 }
360 /* if PSE bit is set, then we use a 4MB page */
361 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
362 if (is_user) {
363 if (!(pde & PG_USER_MASK))
364 goto do_fault_protect;
365 if (is_write && !(pde & PG_RW_MASK))
366 goto do_fault_protect;
367 } else {
368 if ((env->cr[0] & CR0_WP_MASK) &&
369 is_write && !(pde & PG_RW_MASK))
370 goto do_fault_protect;
371 }
372 is_dirty = is_write && !(pde & PG_DIRTY_MASK);
373 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
374 pde |= PG_ACCESSED_MASK;
375 if (is_dirty)
376 pde |= PG_DIRTY_MASK;
377 stl_raw(pde_ptr, pde);
378 }
379
380 pte = pde & ~0x003ff000; /* align to 4MB */
381 ptep = pte;
382 page_size = 4096 * 1024;
383 virt_addr = addr & ~0x003fffff;
384 } else {
385 if (!(pde & PG_ACCESSED_MASK)) {
386 pde |= PG_ACCESSED_MASK;
387 stl_raw(pde_ptr, pde);
388 }
389
390 /* page directory entry */
391 pte_ptr = phys_ram_base +
392 (((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & env->a20_mask);
393 pte = ldl_raw(pte_ptr);
394 if (!(pte & PG_PRESENT_MASK)) {
395 error_code = 0;
396 goto do_fault;
397 }
398 /* combine pde and pte user and rw protections */
399 ptep = pte & pde;
400 if (is_user) {
401 if (!(ptep & PG_USER_MASK))
402 goto do_fault_protect;
403 if (is_write && !(ptep & PG_RW_MASK))
404 goto do_fault_protect;
405 } else {
406 if ((env->cr[0] & CR0_WP_MASK) &&
407 is_write && !(ptep & PG_RW_MASK))
408 goto do_fault_protect;
409 }
410 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
411 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
412 pte |= PG_ACCESSED_MASK;
413 if (is_dirty)
414 pte |= PG_DIRTY_MASK;
415 stl_raw(pte_ptr, pte);
416 }
417 page_size = 4096;
418 virt_addr = addr & ~0xfff;
419 }
420
421 /* the page can be put in the TLB */
422 prot = PAGE_READ;
423 if (pte & PG_DIRTY_MASK) {
424 /* only set write access if already dirty... otherwise wait
425 for dirty access */
426 if (is_user) {
427 if (ptep & PG_RW_MASK)
428 prot |= PAGE_WRITE;
429 } else {
430 if (!(env->cr[0] & CR0_WP_MASK) ||
431 (ptep & PG_RW_MASK))
432 prot |= PAGE_WRITE;
433 }
434 }
435
436 do_mapping:
437 pte = pte & env->a20_mask;
438
439 /* Even if 4MB pages, we map only one 4KB page in the cache to
440 avoid filling it too fast */
441 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
442 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
443 vaddr = virt_addr + page_offset;
444
445 ret = tlb_set_page(env, vaddr, paddr, prot, is_user, is_softmmu);
446 return ret;
447 do_fault_protect:
448 error_code = PG_ERROR_P_MASK;
449 do_fault:
450 env->cr[2] = addr;
451 env->error_code = (is_write << PG_ERROR_W_BIT) | error_code;
452 if (is_user)
453 env->error_code |= PG_ERROR_U_MASK;
454 return 1;
455 }
456
457 #if defined(CONFIG_USER_ONLY)
458 target_ulong cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
459 {
460 return addr;
461 }
462 #else
463 target_ulong cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
464 {
465 uint8_t *pde_ptr, *pte_ptr;
466 uint32_t pde, pte, paddr, page_offset, page_size;
467
468 if (!(env->cr[0] & CR0_PG_MASK)) {
469 pte = addr;
470 page_size = 4096;
471 } else {
472 /* page directory entry */
473 pde_ptr = phys_ram_base +
474 (((env->cr[3] & ~0xfff) + ((addr >> 20) & ~3)) & env->a20_mask);
475 pde = ldl_raw(pde_ptr);
476 if (!(pde & PG_PRESENT_MASK))
477 return -1;
478 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
479 pte = pde & ~0x003ff000; /* align to 4MB */
480 page_size = 4096 * 1024;
481 } else {
482 /* page directory entry */
483 pte_ptr = phys_ram_base +
484 (((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & env->a20_mask);
485 pte = ldl_raw(pte_ptr);
486 if (!(pte & PG_PRESENT_MASK))
487 return -1;
488 page_size = 4096;
489 }
490 }
491 pte = pte & env->a20_mask;
492 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
493 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
494 return paddr;
495 }
496 #endif
497
498 #if defined(USE_CODE_COPY)
499 struct fpstate {
500 uint16_t fpuc;
501 uint16_t dummy1;
502 uint16_t fpus;
503 uint16_t dummy2;
504 uint16_t fptag;
505 uint16_t dummy3;
506
507 uint32_t fpip;
508 uint32_t fpcs;
509 uint32_t fpoo;
510 uint32_t fpos;
511 uint8_t fpregs1[8 * 10];
512 };
513
514 void restore_native_fp_state(CPUState *env)
515 {
516 int fptag, i, j;
517 struct fpstate fp1, *fp = &fp1;
518
519 fp->fpuc = env->fpuc;
520 fp->fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
521 fptag = 0;
522 for (i=7; i>=0; i--) {
523 fptag <<= 2;
524 if (env->fptags[i]) {
525 fptag |= 3;
526 } else {
527 /* the FPU automatically computes it */
528 }
529 }
530 fp->fptag = fptag;
531 j = env->fpstt;
532 for(i = 0;i < 8; i++) {
533 memcpy(&fp->fpregs1[i * 10], &env->fpregs[j], 10);
534 j = (j + 1) & 7;
535 }
536 asm volatile ("frstor %0" : "=m" (*fp));
537 env->native_fp_regs = 1;
538 }
539
540 void save_native_fp_state(CPUState *env)
541 {
542 int fptag, i, j;
543 uint16_t fpuc;
544 struct fpstate fp1, *fp = &fp1;
545
546 asm volatile ("fsave %0" : : "m" (*fp));
547 env->fpuc = fp->fpuc;
548 env->fpstt = (fp->fpus >> 11) & 7;
549 env->fpus = fp->fpus & ~0x3800;
550 fptag = fp->fptag;
551 for(i = 0;i < 8; i++) {
552 env->fptags[i] = ((fptag & 3) == 3);
553 fptag >>= 2;
554 }
555 j = env->fpstt;
556 for(i = 0;i < 8; i++) {
557 memcpy(&env->fpregs[j], &fp->fpregs1[i * 10], 10);
558 j = (j + 1) & 7;
559 }
560 /* we must restore the default rounding state */
561 /* XXX: we do not restore the exception state */
562 fpuc = 0x037f | (env->fpuc & (3 << 10));
563 asm volatile("fldcw %0" : : "m" (fpuc));
564 env->native_fp_regs = 0;
565 }
566 #endif