]> git.proxmox.com Git - qemu.git/blob - target-i386/helper2.c
2.6 kernel compile fix
[qemu.git] / target-i386 / helper2.c
1 /*
2 * i386 helpers (without register variable usage)
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20 #include <stdarg.h>
21 #include <stdlib.h>
22 #include <stdio.h>
23 #include <string.h>
24 #include <inttypes.h>
25 #include <signal.h>
26 #include <assert.h>
27 #include <sys/mman.h>
28
29 #include "cpu.h"
30 #include "exec-all.h"
31
32 //#define DEBUG_MMU
33
34 #ifdef USE_CODE_COPY
35 #include <asm/ldt.h>
36 #include <linux/unistd.h>
37 #include <linux/version.h>
38
39 _syscall3(int, modify_ldt, int, func, void *, ptr, unsigned long, bytecount)
40
41 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 66)
42 #define modify_ldt_ldt_s user_desc
43 #endif
44 #endif /* USE_CODE_COPY */
45
46 CPUX86State *cpu_x86_init(void)
47 {
48 CPUX86State *env;
49 int i;
50 static int inited;
51
52 cpu_exec_init();
53
54 env = malloc(sizeof(CPUX86State));
55 if (!env)
56 return NULL;
57 memset(env, 0, sizeof(CPUX86State));
58
59 /* init to reset state */
60
61 tlb_flush(env, 1);
62 #ifdef CONFIG_SOFTMMU
63 env->hflags |= HF_SOFTMMU_MASK;
64 #endif
65
66 cpu_x86_update_cr0(env, 0x60000010);
67 env->a20_mask = 0xffffffff;
68
69 env->idt.limit = 0xffff;
70 env->gdt.limit = 0xffff;
71 env->ldt.limit = 0xffff;
72 env->ldt.flags = DESC_P_MASK;
73 env->tr.limit = 0xffff;
74 env->tr.flags = DESC_P_MASK;
75
76 /* not correct (CS base=0xffff0000) */
77 cpu_x86_load_seg_cache(env, R_CS, 0xf000, (uint8_t *)0x000f0000, 0xffff, 0);
78 cpu_x86_load_seg_cache(env, R_DS, 0, NULL, 0xffff, 0);
79 cpu_x86_load_seg_cache(env, R_ES, 0, NULL, 0xffff, 0);
80 cpu_x86_load_seg_cache(env, R_SS, 0, NULL, 0xffff, 0);
81 cpu_x86_load_seg_cache(env, R_FS, 0, NULL, 0xffff, 0);
82 cpu_x86_load_seg_cache(env, R_GS, 0, NULL, 0xffff, 0);
83
84 env->eip = 0xfff0;
85 env->regs[R_EDX] = 0x600; /* indicate P6 processor */
86
87 env->eflags = 0x2;
88
89 /* FPU init */
90 for(i = 0;i < 8; i++)
91 env->fptags[i] = 1;
92 env->fpuc = 0x37f;
93
94 /* init various static tables */
95 if (!inited) {
96 inited = 1;
97 optimize_flags_init();
98 }
99 #ifdef USE_CODE_COPY
100 /* testing code for code copy case */
101 {
102 struct modify_ldt_ldt_s ldt;
103
104 ldt.entry_number = 1;
105 ldt.base_addr = (unsigned long)env;
106 ldt.limit = (sizeof(CPUState) + 0xfff) >> 12;
107 ldt.seg_32bit = 1;
108 ldt.contents = MODIFY_LDT_CONTENTS_DATA;
109 ldt.read_exec_only = 0;
110 ldt.limit_in_pages = 1;
111 ldt.seg_not_present = 0;
112 ldt.useable = 1;
113 modify_ldt(1, &ldt, sizeof(ldt)); /* write ldt entry */
114
115 asm volatile ("movl %0, %%fs" : : "r" ((1 << 3) | 7));
116 cpu_single_env = env;
117 }
118 #endif
119 return env;
120 }
121
122 void cpu_x86_close(CPUX86State *env)
123 {
124 free(env);
125 }
126
127 /***********************************************************/
128 /* x86 debug */
129
130 static const char *cc_op_str[] = {
131 "DYNAMIC",
132 "EFLAGS",
133 "MULB",
134 "MULW",
135 "MULL",
136 "ADDB",
137 "ADDW",
138 "ADDL",
139 "ADCB",
140 "ADCW",
141 "ADCL",
142 "SUBB",
143 "SUBW",
144 "SUBL",
145 "SBBB",
146 "SBBW",
147 "SBBL",
148 "LOGICB",
149 "LOGICW",
150 "LOGICL",
151 "INCB",
152 "INCW",
153 "INCL",
154 "DECB",
155 "DECW",
156 "DECL",
157 "SHLB",
158 "SHLW",
159 "SHLL",
160 "SARB",
161 "SARW",
162 "SARL",
163 };
164
165 void cpu_x86_dump_state(CPUX86State *env, FILE *f, int flags)
166 {
167 int eflags, i;
168 char cc_op_name[32];
169 static const char *seg_name[6] = { "ES", "CS", "SS", "DS", "FS", "GS" };
170
171 eflags = env->eflags;
172 fprintf(f, "EAX=%08x EBX=%08x ECX=%08x EDX=%08x\n"
173 "ESI=%08x EDI=%08x EBP=%08x ESP=%08x\n"
174 "EIP=%08x EFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d\n",
175 env->regs[R_EAX], env->regs[R_EBX], env->regs[R_ECX], env->regs[R_EDX],
176 env->regs[R_ESI], env->regs[R_EDI], env->regs[R_EBP], env->regs[R_ESP],
177 env->eip, eflags,
178 eflags & DF_MASK ? 'D' : '-',
179 eflags & CC_O ? 'O' : '-',
180 eflags & CC_S ? 'S' : '-',
181 eflags & CC_Z ? 'Z' : '-',
182 eflags & CC_A ? 'A' : '-',
183 eflags & CC_P ? 'P' : '-',
184 eflags & CC_C ? 'C' : '-',
185 env->hflags & HF_CPL_MASK,
186 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1);
187 for(i = 0; i < 6; i++) {
188 SegmentCache *sc = &env->segs[i];
189 fprintf(f, "%s =%04x %08x %08x %08x\n",
190 seg_name[i],
191 sc->selector,
192 (int)sc->base,
193 sc->limit,
194 sc->flags);
195 }
196 fprintf(f, "LDT=%04x %08x %08x %08x\n",
197 env->ldt.selector,
198 (int)env->ldt.base,
199 env->ldt.limit,
200 env->ldt.flags);
201 fprintf(f, "TR =%04x %08x %08x %08x\n",
202 env->tr.selector,
203 (int)env->tr.base,
204 env->tr.limit,
205 env->tr.flags);
206 fprintf(f, "GDT= %08x %08x\n",
207 (int)env->gdt.base, env->gdt.limit);
208 fprintf(f, "IDT= %08x %08x\n",
209 (int)env->idt.base, env->idt.limit);
210 fprintf(f, "CR0=%08x CR2=%08x CR3=%08x CR4=%08x\n",
211 env->cr[0], env->cr[2], env->cr[3], env->cr[4]);
212
213 if (flags & X86_DUMP_CCOP) {
214 if ((unsigned)env->cc_op < CC_OP_NB)
215 strcpy(cc_op_name, cc_op_str[env->cc_op]);
216 else
217 snprintf(cc_op_name, sizeof(cc_op_name), "[%d]", env->cc_op);
218 fprintf(f, "CCS=%08x CCD=%08x CCO=%-8s\n",
219 env->cc_src, env->cc_dst, cc_op_name);
220 }
221 if (flags & X86_DUMP_FPU) {
222 fprintf(f, "ST0=%f ST1=%f ST2=%f ST3=%f\n",
223 (double)env->fpregs[0],
224 (double)env->fpregs[1],
225 (double)env->fpregs[2],
226 (double)env->fpregs[3]);
227 fprintf(f, "ST4=%f ST5=%f ST6=%f ST7=%f\n",
228 (double)env->fpregs[4],
229 (double)env->fpregs[5],
230 (double)env->fpregs[7],
231 (double)env->fpregs[8]);
232 }
233 }
234
235 /***********************************************************/
236 /* x86 mmu */
237 /* XXX: add PGE support */
238
239 void cpu_x86_set_a20(CPUX86State *env, int a20_state)
240 {
241 a20_state = (a20_state != 0);
242 if (a20_state != ((env->a20_mask >> 20) & 1)) {
243 #if defined(DEBUG_MMU)
244 printf("A20 update: a20=%d\n", a20_state);
245 #endif
246 /* if the cpu is currently executing code, we must unlink it and
247 all the potentially executing TB */
248 cpu_interrupt(env, CPU_INTERRUPT_EXITTB);
249
250 /* when a20 is changed, all the MMU mappings are invalid, so
251 we must flush everything */
252 tlb_flush(env, 1);
253 env->a20_mask = 0xffefffff | (a20_state << 20);
254 }
255 }
256
257 void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0)
258 {
259 int pe_state;
260
261 #if defined(DEBUG_MMU)
262 printf("CR0 update: CR0=0x%08x\n", new_cr0);
263 #endif
264 if ((new_cr0 & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK)) !=
265 (env->cr[0] & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK))) {
266 tlb_flush(env, 1);
267 }
268 env->cr[0] = new_cr0;
269
270 /* update PE flag in hidden flags */
271 pe_state = (env->cr[0] & CR0_PE_MASK);
272 env->hflags = (env->hflags & ~HF_PE_MASK) | (pe_state << HF_PE_SHIFT);
273 /* ensure that ADDSEG is always set in real mode */
274 env->hflags |= ((pe_state ^ 1) << HF_ADDSEG_SHIFT);
275 /* update FPU flags */
276 env->hflags = (env->hflags & ~(HF_MP_MASK | HF_EM_MASK | HF_TS_MASK)) |
277 ((new_cr0 << (HF_MP_SHIFT - 1)) & (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK));
278 }
279
280 void cpu_x86_update_cr3(CPUX86State *env, uint32_t new_cr3)
281 {
282 env->cr[3] = new_cr3;
283 if (env->cr[0] & CR0_PG_MASK) {
284 #if defined(DEBUG_MMU)
285 printf("CR3 update: CR3=%08x\n", new_cr3);
286 #endif
287 tlb_flush(env, 0);
288 }
289 }
290
291 void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4)
292 {
293 #if defined(DEBUG_MMU)
294 printf("CR4 update: CR4=%08x\n", env->cr[4]);
295 #endif
296 if ((new_cr4 & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK)) !=
297 (env->cr[4] & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK))) {
298 tlb_flush(env, 1);
299 }
300 env->cr[4] = new_cr4;
301 }
302
303 /* XXX: also flush 4MB pages */
304 void cpu_x86_flush_tlb(CPUX86State *env, uint32_t addr)
305 {
306 tlb_flush_page(env, addr);
307 }
308
309 /* return value:
310 -1 = cannot handle fault
311 0 = nothing more to do
312 1 = generate PF fault
313 2 = soft MMU activation required for this block
314 */
315 int cpu_x86_handle_mmu_fault(CPUX86State *env, uint32_t addr,
316 int is_write, int is_user, int is_softmmu)
317 {
318 uint8_t *pde_ptr, *pte_ptr;
319 uint32_t pde, pte, virt_addr, ptep;
320 int error_code, is_dirty, prot, page_size, ret;
321 unsigned long paddr, vaddr, page_offset;
322
323 #if defined(DEBUG_MMU)
324 printf("MMU fault: addr=0x%08x w=%d u=%d eip=%08x\n",
325 addr, is_write, is_user, env->eip);
326 #endif
327
328 if (env->user_mode_only) {
329 /* user mode only emulation */
330 error_code = 0;
331 goto do_fault;
332 }
333
334 if (!(env->cr[0] & CR0_PG_MASK)) {
335 pte = addr;
336 virt_addr = addr & TARGET_PAGE_MASK;
337 prot = PROT_READ | PROT_WRITE;
338 page_size = 4096;
339 goto do_mapping;
340 }
341
342 /* page directory entry */
343 pde_ptr = phys_ram_base +
344 (((env->cr[3] & ~0xfff) + ((addr >> 20) & ~3)) & env->a20_mask);
345 pde = ldl_raw(pde_ptr);
346 if (!(pde & PG_PRESENT_MASK)) {
347 error_code = 0;
348 goto do_fault;
349 }
350 /* if PSE bit is set, then we use a 4MB page */
351 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
352 if (is_user) {
353 if (!(pde & PG_USER_MASK))
354 goto do_fault_protect;
355 if (is_write && !(pde & PG_RW_MASK))
356 goto do_fault_protect;
357 } else {
358 if ((env->cr[0] & CR0_WP_MASK) &&
359 is_write && !(pde & PG_RW_MASK))
360 goto do_fault_protect;
361 }
362 is_dirty = is_write && !(pde & PG_DIRTY_MASK);
363 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
364 pde |= PG_ACCESSED_MASK;
365 if (is_dirty)
366 pde |= PG_DIRTY_MASK;
367 stl_raw(pde_ptr, pde);
368 }
369
370 pte = pde & ~0x003ff000; /* align to 4MB */
371 ptep = pte;
372 page_size = 4096 * 1024;
373 virt_addr = addr & ~0x003fffff;
374 } else {
375 if (!(pde & PG_ACCESSED_MASK)) {
376 pde |= PG_ACCESSED_MASK;
377 stl_raw(pde_ptr, pde);
378 }
379
380 /* page directory entry */
381 pte_ptr = phys_ram_base +
382 (((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & env->a20_mask);
383 pte = ldl_raw(pte_ptr);
384 if (!(pte & PG_PRESENT_MASK)) {
385 error_code = 0;
386 goto do_fault;
387 }
388 /* combine pde and pte user and rw protections */
389 ptep = pte & pde;
390 if (is_user) {
391 if (!(ptep & PG_USER_MASK))
392 goto do_fault_protect;
393 if (is_write && !(ptep & PG_RW_MASK))
394 goto do_fault_protect;
395 } else {
396 if ((env->cr[0] & CR0_WP_MASK) &&
397 is_write && !(ptep & PG_RW_MASK))
398 goto do_fault_protect;
399 }
400 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
401 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
402 pte |= PG_ACCESSED_MASK;
403 if (is_dirty)
404 pte |= PG_DIRTY_MASK;
405 stl_raw(pte_ptr, pte);
406 }
407 page_size = 4096;
408 virt_addr = addr & ~0xfff;
409 }
410
411 /* the page can be put in the TLB */
412 prot = PROT_READ;
413 if (pte & PG_DIRTY_MASK) {
414 /* only set write access if already dirty... otherwise wait
415 for dirty access */
416 if (is_user) {
417 if (ptep & PG_RW_MASK)
418 prot |= PROT_WRITE;
419 } else {
420 if (!(env->cr[0] & CR0_WP_MASK) ||
421 (ptep & PG_RW_MASK))
422 prot |= PROT_WRITE;
423 }
424 }
425
426 do_mapping:
427 pte = pte & env->a20_mask;
428
429 /* Even if 4MB pages, we map only one 4KB page in the cache to
430 avoid filling it too fast */
431 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
432 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
433 vaddr = virt_addr + page_offset;
434
435 ret = tlb_set_page(env, vaddr, paddr, prot, is_user, is_softmmu);
436 return ret;
437 do_fault_protect:
438 error_code = PG_ERROR_P_MASK;
439 do_fault:
440 env->cr[2] = addr;
441 env->error_code = (is_write << PG_ERROR_W_BIT) | error_code;
442 if (is_user)
443 env->error_code |= PG_ERROR_U_MASK;
444 return 1;
445 }
446
447 #if defined(CONFIG_USER_ONLY)
448 target_ulong cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
449 {
450 return addr;
451 }
452 #else
453 target_ulong cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
454 {
455 uint8_t *pde_ptr, *pte_ptr;
456 uint32_t pde, pte, paddr, page_offset, page_size;
457
458 if (!(env->cr[0] & CR0_PG_MASK)) {
459 pte = addr;
460 page_size = 4096;
461 } else {
462 /* page directory entry */
463 pde_ptr = phys_ram_base +
464 (((env->cr[3] & ~0xfff) + ((addr >> 20) & ~3)) & env->a20_mask);
465 pde = ldl_raw(pde_ptr);
466 if (!(pde & PG_PRESENT_MASK))
467 return -1;
468 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
469 pte = pde & ~0x003ff000; /* align to 4MB */
470 page_size = 4096 * 1024;
471 } else {
472 /* page directory entry */
473 pte_ptr = phys_ram_base +
474 (((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & env->a20_mask);
475 pte = ldl_raw(pte_ptr);
476 if (!(pte & PG_PRESENT_MASK))
477 return -1;
478 page_size = 4096;
479 }
480 }
481 pte = pte & env->a20_mask;
482 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
483 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
484 return paddr;
485 }
486 #endif
487
488 #if defined(USE_CODE_COPY)
489 struct fpstate {
490 uint16_t fpuc;
491 uint16_t dummy1;
492 uint16_t fpus;
493 uint16_t dummy2;
494 uint16_t fptag;
495 uint16_t dummy3;
496
497 uint32_t fpip;
498 uint32_t fpcs;
499 uint32_t fpoo;
500 uint32_t fpos;
501 uint8_t fpregs1[8 * 10];
502 };
503
504 void restore_native_fp_state(CPUState *env)
505 {
506 int fptag, i, j;
507 struct fpstate fp1, *fp = &fp1;
508
509 fp->fpuc = env->fpuc;
510 fp->fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
511 fptag = 0;
512 for (i=7; i>=0; i--) {
513 fptag <<= 2;
514 if (env->fptags[i]) {
515 fptag |= 3;
516 } else {
517 /* the FPU automatically computes it */
518 }
519 }
520 fp->fptag = fptag;
521 j = env->fpstt;
522 for(i = 0;i < 8; i++) {
523 memcpy(&fp->fpregs1[i * 10], &env->fpregs[j], 10);
524 j = (j + 1) & 7;
525 }
526 asm volatile ("frstor %0" : "=m" (*fp));
527 env->native_fp_regs = 1;
528 }
529
530 void save_native_fp_state(CPUState *env)
531 {
532 int fptag, i, j;
533 uint16_t fpuc;
534 struct fpstate fp1, *fp = &fp1;
535
536 asm volatile ("fsave %0" : : "m" (*fp));
537 env->fpuc = fp->fpuc;
538 env->fpstt = (fp->fpus >> 11) & 7;
539 env->fpus = fp->fpus & ~0x3800;
540 fptag = fp->fptag;
541 for(i = 0;i < 8; i++) {
542 env->fptags[i] = ((fptag & 3) == 3);
543 fptag >>= 2;
544 }
545 j = env->fpstt;
546 for(i = 0;i < 8; i++) {
547 memcpy(&env->fpregs[j], &fp->fpregs1[i * 10], 10);
548 j = (j + 1) & 7;
549 }
550 /* we must restore the default rounding state */
551 /* XXX: we do not restore the exception state */
552 fpuc = 0x037f | (env->fpuc & (3 << 10));
553 asm volatile("fldcw %0" : : "m" (fpuc));
554 env->native_fp_regs = 0;
555 }
556 #endif