]> git.proxmox.com Git - mirror_qemu.git/blob - target-i386/helper.c
x86/cpuid: move CPUID functions into separate file
[mirror_qemu.git] / target-i386 / helper.c
1 /*
2 * i386 helpers (without register variable usage)
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #include <stdarg.h>
20 #include <stdlib.h>
21 #include <stdio.h>
22 #include <string.h>
23 #include <inttypes.h>
24 #include <signal.h>
25
26 #include "cpu.h"
27 #include "exec-all.h"
28 #include "qemu-common.h"
29 #include "kvm.h"
30
31 //#define DEBUG_MMU
32
33 /* NOTE: must be called outside the CPU execute loop */
34 void cpu_reset(CPUX86State *env)
35 {
36 int i;
37
38 if (qemu_loglevel_mask(CPU_LOG_RESET)) {
39 qemu_log("CPU Reset (CPU %d)\n", env->cpu_index);
40 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
41 }
42
43 memset(env, 0, offsetof(CPUX86State, breakpoints));
44
45 tlb_flush(env, 1);
46
47 env->old_exception = -1;
48
49 /* init to reset state */
50
51 #ifdef CONFIG_SOFTMMU
52 env->hflags |= HF_SOFTMMU_MASK;
53 #endif
54 env->hflags2 |= HF2_GIF_MASK;
55
56 cpu_x86_update_cr0(env, 0x60000010);
57 env->a20_mask = ~0x0;
58 env->smbase = 0x30000;
59
60 env->idt.limit = 0xffff;
61 env->gdt.limit = 0xffff;
62 env->ldt.limit = 0xffff;
63 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
64 env->tr.limit = 0xffff;
65 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
66
67 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
68 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK |
69 DESC_R_MASK | DESC_A_MASK);
70 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
71 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
72 DESC_A_MASK);
73 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
74 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
75 DESC_A_MASK);
76 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
77 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
78 DESC_A_MASK);
79 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
80 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
81 DESC_A_MASK);
82 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
83 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
84 DESC_A_MASK);
85
86 env->eip = 0xfff0;
87 env->regs[R_EDX] = env->cpuid_version;
88
89 env->eflags = 0x2;
90
91 /* FPU init */
92 for(i = 0;i < 8; i++)
93 env->fptags[i] = 1;
94 env->fpuc = 0x37f;
95
96 env->mxcsr = 0x1f80;
97
98 memset(env->dr, 0, sizeof(env->dr));
99 env->dr[6] = DR6_FIXED_1;
100 env->dr[7] = DR7_FIXED_1;
101 cpu_breakpoint_remove_all(env, BP_CPU);
102 cpu_watchpoint_remove_all(env, BP_CPU);
103
104 env->mcg_status = 0;
105 }
106
107 void cpu_x86_close(CPUX86State *env)
108 {
109 qemu_free(env);
110 }
111
112 /***********************************************************/
113 /* x86 debug */
114
115 static const char *cc_op_str[] = {
116 "DYNAMIC",
117 "EFLAGS",
118
119 "MULB",
120 "MULW",
121 "MULL",
122 "MULQ",
123
124 "ADDB",
125 "ADDW",
126 "ADDL",
127 "ADDQ",
128
129 "ADCB",
130 "ADCW",
131 "ADCL",
132 "ADCQ",
133
134 "SUBB",
135 "SUBW",
136 "SUBL",
137 "SUBQ",
138
139 "SBBB",
140 "SBBW",
141 "SBBL",
142 "SBBQ",
143
144 "LOGICB",
145 "LOGICW",
146 "LOGICL",
147 "LOGICQ",
148
149 "INCB",
150 "INCW",
151 "INCL",
152 "INCQ",
153
154 "DECB",
155 "DECW",
156 "DECL",
157 "DECQ",
158
159 "SHLB",
160 "SHLW",
161 "SHLL",
162 "SHLQ",
163
164 "SARB",
165 "SARW",
166 "SARL",
167 "SARQ",
168 };
169
170 static void
171 cpu_x86_dump_seg_cache(CPUState *env, FILE *f,
172 int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
173 const char *name, struct SegmentCache *sc)
174 {
175 #ifdef TARGET_X86_64
176 if (env->hflags & HF_CS64_MASK) {
177 cpu_fprintf(f, "%-3s=%04x %016" PRIx64 " %08x %08x", name,
178 sc->selector, sc->base, sc->limit, sc->flags);
179 } else
180 #endif
181 {
182 cpu_fprintf(f, "%-3s=%04x %08x %08x %08x", name, sc->selector,
183 (uint32_t)sc->base, sc->limit, sc->flags);
184 }
185
186 if (!(env->hflags & HF_PE_MASK) || !(sc->flags & DESC_P_MASK))
187 goto done;
188
189 cpu_fprintf(f, " DPL=%d ", (sc->flags & DESC_DPL_MASK) >> DESC_DPL_SHIFT);
190 if (sc->flags & DESC_S_MASK) {
191 if (sc->flags & DESC_CS_MASK) {
192 cpu_fprintf(f, (sc->flags & DESC_L_MASK) ? "CS64" :
193 ((sc->flags & DESC_B_MASK) ? "CS32" : "CS16"));
194 cpu_fprintf(f, " [%c%c", (sc->flags & DESC_C_MASK) ? 'C' : '-',
195 (sc->flags & DESC_R_MASK) ? 'R' : '-');
196 } else {
197 cpu_fprintf(f, (sc->flags & DESC_B_MASK) ? "DS " : "DS16");
198 cpu_fprintf(f, " [%c%c", (sc->flags & DESC_E_MASK) ? 'E' : '-',
199 (sc->flags & DESC_W_MASK) ? 'W' : '-');
200 }
201 cpu_fprintf(f, "%c]", (sc->flags & DESC_A_MASK) ? 'A' : '-');
202 } else {
203 static const char *sys_type_name[2][16] = {
204 { /* 32 bit mode */
205 "Reserved", "TSS16-avl", "LDT", "TSS16-busy",
206 "CallGate16", "TaskGate", "IntGate16", "TrapGate16",
207 "Reserved", "TSS32-avl", "Reserved", "TSS32-busy",
208 "CallGate32", "Reserved", "IntGate32", "TrapGate32"
209 },
210 { /* 64 bit mode */
211 "<hiword>", "Reserved", "LDT", "Reserved", "Reserved",
212 "Reserved", "Reserved", "Reserved", "Reserved",
213 "TSS64-avl", "Reserved", "TSS64-busy", "CallGate64",
214 "Reserved", "IntGate64", "TrapGate64"
215 }
216 };
217 cpu_fprintf(f, sys_type_name[(env->hflags & HF_LMA_MASK) ? 1 : 0]
218 [(sc->flags & DESC_TYPE_MASK)
219 >> DESC_TYPE_SHIFT]);
220 }
221 done:
222 cpu_fprintf(f, "\n");
223 }
224
225 void cpu_dump_state(CPUState *env, FILE *f,
226 int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
227 int flags)
228 {
229 int eflags, i, nb;
230 char cc_op_name[32];
231 static const char *seg_name[6] = { "ES", "CS", "SS", "DS", "FS", "GS" };
232
233 cpu_synchronize_state(env);
234
235 eflags = env->eflags;
236 #ifdef TARGET_X86_64
237 if (env->hflags & HF_CS64_MASK) {
238 cpu_fprintf(f,
239 "RAX=%016" PRIx64 " RBX=%016" PRIx64 " RCX=%016" PRIx64 " RDX=%016" PRIx64 "\n"
240 "RSI=%016" PRIx64 " RDI=%016" PRIx64 " RBP=%016" PRIx64 " RSP=%016" PRIx64 "\n"
241 "R8 =%016" PRIx64 " R9 =%016" PRIx64 " R10=%016" PRIx64 " R11=%016" PRIx64 "\n"
242 "R12=%016" PRIx64 " R13=%016" PRIx64 " R14=%016" PRIx64 " R15=%016" PRIx64 "\n"
243 "RIP=%016" PRIx64 " RFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
244 env->regs[R_EAX],
245 env->regs[R_EBX],
246 env->regs[R_ECX],
247 env->regs[R_EDX],
248 env->regs[R_ESI],
249 env->regs[R_EDI],
250 env->regs[R_EBP],
251 env->regs[R_ESP],
252 env->regs[8],
253 env->regs[9],
254 env->regs[10],
255 env->regs[11],
256 env->regs[12],
257 env->regs[13],
258 env->regs[14],
259 env->regs[15],
260 env->eip, eflags,
261 eflags & DF_MASK ? 'D' : '-',
262 eflags & CC_O ? 'O' : '-',
263 eflags & CC_S ? 'S' : '-',
264 eflags & CC_Z ? 'Z' : '-',
265 eflags & CC_A ? 'A' : '-',
266 eflags & CC_P ? 'P' : '-',
267 eflags & CC_C ? 'C' : '-',
268 env->hflags & HF_CPL_MASK,
269 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
270 (env->a20_mask >> 20) & 1,
271 (env->hflags >> HF_SMM_SHIFT) & 1,
272 env->halted);
273 } else
274 #endif
275 {
276 cpu_fprintf(f, "EAX=%08x EBX=%08x ECX=%08x EDX=%08x\n"
277 "ESI=%08x EDI=%08x EBP=%08x ESP=%08x\n"
278 "EIP=%08x EFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
279 (uint32_t)env->regs[R_EAX],
280 (uint32_t)env->regs[R_EBX],
281 (uint32_t)env->regs[R_ECX],
282 (uint32_t)env->regs[R_EDX],
283 (uint32_t)env->regs[R_ESI],
284 (uint32_t)env->regs[R_EDI],
285 (uint32_t)env->regs[R_EBP],
286 (uint32_t)env->regs[R_ESP],
287 (uint32_t)env->eip, eflags,
288 eflags & DF_MASK ? 'D' : '-',
289 eflags & CC_O ? 'O' : '-',
290 eflags & CC_S ? 'S' : '-',
291 eflags & CC_Z ? 'Z' : '-',
292 eflags & CC_A ? 'A' : '-',
293 eflags & CC_P ? 'P' : '-',
294 eflags & CC_C ? 'C' : '-',
295 env->hflags & HF_CPL_MASK,
296 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
297 (env->a20_mask >> 20) & 1,
298 (env->hflags >> HF_SMM_SHIFT) & 1,
299 env->halted);
300 }
301
302 for(i = 0; i < 6; i++) {
303 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, seg_name[i],
304 &env->segs[i]);
305 }
306 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, "LDT", &env->ldt);
307 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, "TR", &env->tr);
308
309 #ifdef TARGET_X86_64
310 if (env->hflags & HF_LMA_MASK) {
311 cpu_fprintf(f, "GDT= %016" PRIx64 " %08x\n",
312 env->gdt.base, env->gdt.limit);
313 cpu_fprintf(f, "IDT= %016" PRIx64 " %08x\n",
314 env->idt.base, env->idt.limit);
315 cpu_fprintf(f, "CR0=%08x CR2=%016" PRIx64 " CR3=%016" PRIx64 " CR4=%08x\n",
316 (uint32_t)env->cr[0],
317 env->cr[2],
318 env->cr[3],
319 (uint32_t)env->cr[4]);
320 for(i = 0; i < 4; i++)
321 cpu_fprintf(f, "DR%d=%016" PRIx64 " ", i, env->dr[i]);
322 cpu_fprintf(f, "\nDR6=%016" PRIx64 " DR7=%016" PRIx64 "\n",
323 env->dr[6], env->dr[7]);
324 } else
325 #endif
326 {
327 cpu_fprintf(f, "GDT= %08x %08x\n",
328 (uint32_t)env->gdt.base, env->gdt.limit);
329 cpu_fprintf(f, "IDT= %08x %08x\n",
330 (uint32_t)env->idt.base, env->idt.limit);
331 cpu_fprintf(f, "CR0=%08x CR2=%08x CR3=%08x CR4=%08x\n",
332 (uint32_t)env->cr[0],
333 (uint32_t)env->cr[2],
334 (uint32_t)env->cr[3],
335 (uint32_t)env->cr[4]);
336 for(i = 0; i < 4; i++)
337 cpu_fprintf(f, "DR%d=%08x ", i, env->dr[i]);
338 cpu_fprintf(f, "\nDR6=%08x DR7=%08x\n", env->dr[6], env->dr[7]);
339 }
340 if (flags & X86_DUMP_CCOP) {
341 if ((unsigned)env->cc_op < CC_OP_NB)
342 snprintf(cc_op_name, sizeof(cc_op_name), "%s", cc_op_str[env->cc_op]);
343 else
344 snprintf(cc_op_name, sizeof(cc_op_name), "[%d]", env->cc_op);
345 #ifdef TARGET_X86_64
346 if (env->hflags & HF_CS64_MASK) {
347 cpu_fprintf(f, "CCS=%016" PRIx64 " CCD=%016" PRIx64 " CCO=%-8s\n",
348 env->cc_src, env->cc_dst,
349 cc_op_name);
350 } else
351 #endif
352 {
353 cpu_fprintf(f, "CCS=%08x CCD=%08x CCO=%-8s\n",
354 (uint32_t)env->cc_src, (uint32_t)env->cc_dst,
355 cc_op_name);
356 }
357 }
358 if (flags & X86_DUMP_FPU) {
359 int fptag;
360 fptag = 0;
361 for(i = 0; i < 8; i++) {
362 fptag |= ((!env->fptags[i]) << i);
363 }
364 cpu_fprintf(f, "FCW=%04x FSW=%04x [ST=%d] FTW=%02x MXCSR=%08x\n",
365 env->fpuc,
366 (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11,
367 env->fpstt,
368 fptag,
369 env->mxcsr);
370 for(i=0;i<8;i++) {
371 #if defined(USE_X86LDOUBLE)
372 union {
373 long double d;
374 struct {
375 uint64_t lower;
376 uint16_t upper;
377 } l;
378 } tmp;
379 tmp.d = env->fpregs[i].d;
380 cpu_fprintf(f, "FPR%d=%016" PRIx64 " %04x",
381 i, tmp.l.lower, tmp.l.upper);
382 #else
383 cpu_fprintf(f, "FPR%d=%016" PRIx64,
384 i, env->fpregs[i].mmx.q);
385 #endif
386 if ((i & 1) == 1)
387 cpu_fprintf(f, "\n");
388 else
389 cpu_fprintf(f, " ");
390 }
391 if (env->hflags & HF_CS64_MASK)
392 nb = 16;
393 else
394 nb = 8;
395 for(i=0;i<nb;i++) {
396 cpu_fprintf(f, "XMM%02d=%08x%08x%08x%08x",
397 i,
398 env->xmm_regs[i].XMM_L(3),
399 env->xmm_regs[i].XMM_L(2),
400 env->xmm_regs[i].XMM_L(1),
401 env->xmm_regs[i].XMM_L(0));
402 if ((i & 1) == 1)
403 cpu_fprintf(f, "\n");
404 else
405 cpu_fprintf(f, " ");
406 }
407 }
408 }
409
410 /***********************************************************/
411 /* x86 mmu */
412 /* XXX: add PGE support */
413
414 void cpu_x86_set_a20(CPUX86State *env, int a20_state)
415 {
416 a20_state = (a20_state != 0);
417 if (a20_state != ((env->a20_mask >> 20) & 1)) {
418 #if defined(DEBUG_MMU)
419 printf("A20 update: a20=%d\n", a20_state);
420 #endif
421 /* if the cpu is currently executing code, we must unlink it and
422 all the potentially executing TB */
423 cpu_interrupt(env, CPU_INTERRUPT_EXITTB);
424
425 /* when a20 is changed, all the MMU mappings are invalid, so
426 we must flush everything */
427 tlb_flush(env, 1);
428 env->a20_mask = ~(1 << 20) | (a20_state << 20);
429 }
430 }
431
432 void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0)
433 {
434 int pe_state;
435
436 #if defined(DEBUG_MMU)
437 printf("CR0 update: CR0=0x%08x\n", new_cr0);
438 #endif
439 if ((new_cr0 & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK)) !=
440 (env->cr[0] & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK))) {
441 tlb_flush(env, 1);
442 }
443
444 #ifdef TARGET_X86_64
445 if (!(env->cr[0] & CR0_PG_MASK) && (new_cr0 & CR0_PG_MASK) &&
446 (env->efer & MSR_EFER_LME)) {
447 /* enter in long mode */
448 /* XXX: generate an exception */
449 if (!(env->cr[4] & CR4_PAE_MASK))
450 return;
451 env->efer |= MSR_EFER_LMA;
452 env->hflags |= HF_LMA_MASK;
453 } else if ((env->cr[0] & CR0_PG_MASK) && !(new_cr0 & CR0_PG_MASK) &&
454 (env->efer & MSR_EFER_LMA)) {
455 /* exit long mode */
456 env->efer &= ~MSR_EFER_LMA;
457 env->hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
458 env->eip &= 0xffffffff;
459 }
460 #endif
461 env->cr[0] = new_cr0 | CR0_ET_MASK;
462
463 /* update PE flag in hidden flags */
464 pe_state = (env->cr[0] & CR0_PE_MASK);
465 env->hflags = (env->hflags & ~HF_PE_MASK) | (pe_state << HF_PE_SHIFT);
466 /* ensure that ADDSEG is always set in real mode */
467 env->hflags |= ((pe_state ^ 1) << HF_ADDSEG_SHIFT);
468 /* update FPU flags */
469 env->hflags = (env->hflags & ~(HF_MP_MASK | HF_EM_MASK | HF_TS_MASK)) |
470 ((new_cr0 << (HF_MP_SHIFT - 1)) & (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK));
471 }
472
473 /* XXX: in legacy PAE mode, generate a GPF if reserved bits are set in
474 the PDPT */
475 void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3)
476 {
477 env->cr[3] = new_cr3;
478 if (env->cr[0] & CR0_PG_MASK) {
479 #if defined(DEBUG_MMU)
480 printf("CR3 update: CR3=" TARGET_FMT_lx "\n", new_cr3);
481 #endif
482 tlb_flush(env, 0);
483 }
484 }
485
486 void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4)
487 {
488 #if defined(DEBUG_MMU)
489 printf("CR4 update: CR4=%08x\n", (uint32_t)env->cr[4]);
490 #endif
491 if ((new_cr4 & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK)) !=
492 (env->cr[4] & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK))) {
493 tlb_flush(env, 1);
494 }
495 /* SSE handling */
496 if (!(env->cpuid_features & CPUID_SSE))
497 new_cr4 &= ~CR4_OSFXSR_MASK;
498 if (new_cr4 & CR4_OSFXSR_MASK)
499 env->hflags |= HF_OSFXSR_MASK;
500 else
501 env->hflags &= ~HF_OSFXSR_MASK;
502
503 env->cr[4] = new_cr4;
504 }
505
506 #if defined(CONFIG_USER_ONLY)
507
508 int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
509 int is_write, int mmu_idx, int is_softmmu)
510 {
511 /* user mode only emulation */
512 is_write &= 1;
513 env->cr[2] = addr;
514 env->error_code = (is_write << PG_ERROR_W_BIT);
515 env->error_code |= PG_ERROR_U_MASK;
516 env->exception_index = EXCP0E_PAGE;
517 return 1;
518 }
519
520 #else
521
522 /* XXX: This value should match the one returned by CPUID
523 * and in exec.c */
524 # if defined(TARGET_X86_64)
525 # define PHYS_ADDR_MASK 0xfffffff000LL
526 # else
527 # define PHYS_ADDR_MASK 0xffffff000LL
528 # endif
529
530 /* return value:
531 -1 = cannot handle fault
532 0 = nothing more to do
533 1 = generate PF fault
534 2 = soft MMU activation required for this block
535 */
536 int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
537 int is_write1, int mmu_idx, int is_softmmu)
538 {
539 uint64_t ptep, pte;
540 target_ulong pde_addr, pte_addr;
541 int error_code, is_dirty, prot, page_size, ret, is_write, is_user;
542 target_phys_addr_t paddr;
543 uint32_t page_offset;
544 target_ulong vaddr, virt_addr;
545
546 is_user = mmu_idx == MMU_USER_IDX;
547 #if defined(DEBUG_MMU)
548 printf("MMU fault: addr=" TARGET_FMT_lx " w=%d u=%d eip=" TARGET_FMT_lx "\n",
549 addr, is_write1, is_user, env->eip);
550 #endif
551 is_write = is_write1 & 1;
552
553 if (!(env->cr[0] & CR0_PG_MASK)) {
554 pte = addr;
555 virt_addr = addr & TARGET_PAGE_MASK;
556 prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
557 page_size = 4096;
558 goto do_mapping;
559 }
560
561 if (env->cr[4] & CR4_PAE_MASK) {
562 uint64_t pde, pdpe;
563 target_ulong pdpe_addr;
564
565 #ifdef TARGET_X86_64
566 if (env->hflags & HF_LMA_MASK) {
567 uint64_t pml4e_addr, pml4e;
568 int32_t sext;
569
570 /* test virtual address sign extension */
571 sext = (int64_t)addr >> 47;
572 if (sext != 0 && sext != -1) {
573 env->error_code = 0;
574 env->exception_index = EXCP0D_GPF;
575 return 1;
576 }
577
578 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
579 env->a20_mask;
580 pml4e = ldq_phys(pml4e_addr);
581 if (!(pml4e & PG_PRESENT_MASK)) {
582 error_code = 0;
583 goto do_fault;
584 }
585 if (!(env->efer & MSR_EFER_NXE) && (pml4e & PG_NX_MASK)) {
586 error_code = PG_ERROR_RSVD_MASK;
587 goto do_fault;
588 }
589 if (!(pml4e & PG_ACCESSED_MASK)) {
590 pml4e |= PG_ACCESSED_MASK;
591 stl_phys_notdirty(pml4e_addr, pml4e);
592 }
593 ptep = pml4e ^ PG_NX_MASK;
594 pdpe_addr = ((pml4e & PHYS_ADDR_MASK) + (((addr >> 30) & 0x1ff) << 3)) &
595 env->a20_mask;
596 pdpe = ldq_phys(pdpe_addr);
597 if (!(pdpe & PG_PRESENT_MASK)) {
598 error_code = 0;
599 goto do_fault;
600 }
601 if (!(env->efer & MSR_EFER_NXE) && (pdpe & PG_NX_MASK)) {
602 error_code = PG_ERROR_RSVD_MASK;
603 goto do_fault;
604 }
605 ptep &= pdpe ^ PG_NX_MASK;
606 if (!(pdpe & PG_ACCESSED_MASK)) {
607 pdpe |= PG_ACCESSED_MASK;
608 stl_phys_notdirty(pdpe_addr, pdpe);
609 }
610 } else
611 #endif
612 {
613 /* XXX: load them when cr3 is loaded ? */
614 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
615 env->a20_mask;
616 pdpe = ldq_phys(pdpe_addr);
617 if (!(pdpe & PG_PRESENT_MASK)) {
618 error_code = 0;
619 goto do_fault;
620 }
621 ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
622 }
623
624 pde_addr = ((pdpe & PHYS_ADDR_MASK) + (((addr >> 21) & 0x1ff) << 3)) &
625 env->a20_mask;
626 pde = ldq_phys(pde_addr);
627 if (!(pde & PG_PRESENT_MASK)) {
628 error_code = 0;
629 goto do_fault;
630 }
631 if (!(env->efer & MSR_EFER_NXE) && (pde & PG_NX_MASK)) {
632 error_code = PG_ERROR_RSVD_MASK;
633 goto do_fault;
634 }
635 ptep &= pde ^ PG_NX_MASK;
636 if (pde & PG_PSE_MASK) {
637 /* 2 MB page */
638 page_size = 2048 * 1024;
639 ptep ^= PG_NX_MASK;
640 if ((ptep & PG_NX_MASK) && is_write1 == 2)
641 goto do_fault_protect;
642 if (is_user) {
643 if (!(ptep & PG_USER_MASK))
644 goto do_fault_protect;
645 if (is_write && !(ptep & PG_RW_MASK))
646 goto do_fault_protect;
647 } else {
648 if ((env->cr[0] & CR0_WP_MASK) &&
649 is_write && !(ptep & PG_RW_MASK))
650 goto do_fault_protect;
651 }
652 is_dirty = is_write && !(pde & PG_DIRTY_MASK);
653 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
654 pde |= PG_ACCESSED_MASK;
655 if (is_dirty)
656 pde |= PG_DIRTY_MASK;
657 stl_phys_notdirty(pde_addr, pde);
658 }
659 /* align to page_size */
660 pte = pde & ((PHYS_ADDR_MASK & ~(page_size - 1)) | 0xfff);
661 virt_addr = addr & ~(page_size - 1);
662 } else {
663 /* 4 KB page */
664 if (!(pde & PG_ACCESSED_MASK)) {
665 pde |= PG_ACCESSED_MASK;
666 stl_phys_notdirty(pde_addr, pde);
667 }
668 pte_addr = ((pde & PHYS_ADDR_MASK) + (((addr >> 12) & 0x1ff) << 3)) &
669 env->a20_mask;
670 pte = ldq_phys(pte_addr);
671 if (!(pte & PG_PRESENT_MASK)) {
672 error_code = 0;
673 goto do_fault;
674 }
675 if (!(env->efer & MSR_EFER_NXE) && (pte & PG_NX_MASK)) {
676 error_code = PG_ERROR_RSVD_MASK;
677 goto do_fault;
678 }
679 /* combine pde and pte nx, user and rw protections */
680 ptep &= pte ^ PG_NX_MASK;
681 ptep ^= PG_NX_MASK;
682 if ((ptep & PG_NX_MASK) && is_write1 == 2)
683 goto do_fault_protect;
684 if (is_user) {
685 if (!(ptep & PG_USER_MASK))
686 goto do_fault_protect;
687 if (is_write && !(ptep & PG_RW_MASK))
688 goto do_fault_protect;
689 } else {
690 if ((env->cr[0] & CR0_WP_MASK) &&
691 is_write && !(ptep & PG_RW_MASK))
692 goto do_fault_protect;
693 }
694 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
695 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
696 pte |= PG_ACCESSED_MASK;
697 if (is_dirty)
698 pte |= PG_DIRTY_MASK;
699 stl_phys_notdirty(pte_addr, pte);
700 }
701 page_size = 4096;
702 virt_addr = addr & ~0xfff;
703 pte = pte & (PHYS_ADDR_MASK | 0xfff);
704 }
705 } else {
706 uint32_t pde;
707
708 /* page directory entry */
709 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) &
710 env->a20_mask;
711 pde = ldl_phys(pde_addr);
712 if (!(pde & PG_PRESENT_MASK)) {
713 error_code = 0;
714 goto do_fault;
715 }
716 /* if PSE bit is set, then we use a 4MB page */
717 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
718 page_size = 4096 * 1024;
719 if (is_user) {
720 if (!(pde & PG_USER_MASK))
721 goto do_fault_protect;
722 if (is_write && !(pde & PG_RW_MASK))
723 goto do_fault_protect;
724 } else {
725 if ((env->cr[0] & CR0_WP_MASK) &&
726 is_write && !(pde & PG_RW_MASK))
727 goto do_fault_protect;
728 }
729 is_dirty = is_write && !(pde & PG_DIRTY_MASK);
730 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
731 pde |= PG_ACCESSED_MASK;
732 if (is_dirty)
733 pde |= PG_DIRTY_MASK;
734 stl_phys_notdirty(pde_addr, pde);
735 }
736
737 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
738 ptep = pte;
739 virt_addr = addr & ~(page_size - 1);
740 } else {
741 if (!(pde & PG_ACCESSED_MASK)) {
742 pde |= PG_ACCESSED_MASK;
743 stl_phys_notdirty(pde_addr, pde);
744 }
745
746 /* page directory entry */
747 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) &
748 env->a20_mask;
749 pte = ldl_phys(pte_addr);
750 if (!(pte & PG_PRESENT_MASK)) {
751 error_code = 0;
752 goto do_fault;
753 }
754 /* combine pde and pte user and rw protections */
755 ptep = pte & pde;
756 if (is_user) {
757 if (!(ptep & PG_USER_MASK))
758 goto do_fault_protect;
759 if (is_write && !(ptep & PG_RW_MASK))
760 goto do_fault_protect;
761 } else {
762 if ((env->cr[0] & CR0_WP_MASK) &&
763 is_write && !(ptep & PG_RW_MASK))
764 goto do_fault_protect;
765 }
766 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
767 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
768 pte |= PG_ACCESSED_MASK;
769 if (is_dirty)
770 pte |= PG_DIRTY_MASK;
771 stl_phys_notdirty(pte_addr, pte);
772 }
773 page_size = 4096;
774 virt_addr = addr & ~0xfff;
775 }
776 }
777 /* the page can be put in the TLB */
778 prot = PAGE_READ;
779 if (!(ptep & PG_NX_MASK))
780 prot |= PAGE_EXEC;
781 if (pte & PG_DIRTY_MASK) {
782 /* only set write access if already dirty... otherwise wait
783 for dirty access */
784 if (is_user) {
785 if (ptep & PG_RW_MASK)
786 prot |= PAGE_WRITE;
787 } else {
788 if (!(env->cr[0] & CR0_WP_MASK) ||
789 (ptep & PG_RW_MASK))
790 prot |= PAGE_WRITE;
791 }
792 }
793 do_mapping:
794 pte = pte & env->a20_mask;
795
796 /* Even if 4MB pages, we map only one 4KB page in the cache to
797 avoid filling it too fast */
798 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
799 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
800 vaddr = virt_addr + page_offset;
801
802 ret = tlb_set_page_exec(env, vaddr, paddr, prot, mmu_idx, is_softmmu);
803 return ret;
804 do_fault_protect:
805 error_code = PG_ERROR_P_MASK;
806 do_fault:
807 error_code |= (is_write << PG_ERROR_W_BIT);
808 if (is_user)
809 error_code |= PG_ERROR_U_MASK;
810 if (is_write1 == 2 &&
811 (env->efer & MSR_EFER_NXE) &&
812 (env->cr[4] & CR4_PAE_MASK))
813 error_code |= PG_ERROR_I_D_MASK;
814 if (env->intercept_exceptions & (1 << EXCP0E_PAGE)) {
815 /* cr2 is not modified in case of exceptions */
816 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
817 addr);
818 } else {
819 env->cr[2] = addr;
820 }
821 env->error_code = error_code;
822 env->exception_index = EXCP0E_PAGE;
823 return 1;
824 }
825
826 target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
827 {
828 target_ulong pde_addr, pte_addr;
829 uint64_t pte;
830 target_phys_addr_t paddr;
831 uint32_t page_offset;
832 int page_size;
833
834 if (env->cr[4] & CR4_PAE_MASK) {
835 target_ulong pdpe_addr;
836 uint64_t pde, pdpe;
837
838 #ifdef TARGET_X86_64
839 if (env->hflags & HF_LMA_MASK) {
840 uint64_t pml4e_addr, pml4e;
841 int32_t sext;
842
843 /* test virtual address sign extension */
844 sext = (int64_t)addr >> 47;
845 if (sext != 0 && sext != -1)
846 return -1;
847
848 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
849 env->a20_mask;
850 pml4e = ldq_phys(pml4e_addr);
851 if (!(pml4e & PG_PRESENT_MASK))
852 return -1;
853
854 pdpe_addr = ((pml4e & ~0xfff) + (((addr >> 30) & 0x1ff) << 3)) &
855 env->a20_mask;
856 pdpe = ldq_phys(pdpe_addr);
857 if (!(pdpe & PG_PRESENT_MASK))
858 return -1;
859 } else
860 #endif
861 {
862 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
863 env->a20_mask;
864 pdpe = ldq_phys(pdpe_addr);
865 if (!(pdpe & PG_PRESENT_MASK))
866 return -1;
867 }
868
869 pde_addr = ((pdpe & ~0xfff) + (((addr >> 21) & 0x1ff) << 3)) &
870 env->a20_mask;
871 pde = ldq_phys(pde_addr);
872 if (!(pde & PG_PRESENT_MASK)) {
873 return -1;
874 }
875 if (pde & PG_PSE_MASK) {
876 /* 2 MB page */
877 page_size = 2048 * 1024;
878 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
879 } else {
880 /* 4 KB page */
881 pte_addr = ((pde & ~0xfff) + (((addr >> 12) & 0x1ff) << 3)) &
882 env->a20_mask;
883 page_size = 4096;
884 pte = ldq_phys(pte_addr);
885 }
886 if (!(pte & PG_PRESENT_MASK))
887 return -1;
888 } else {
889 uint32_t pde;
890
891 if (!(env->cr[0] & CR0_PG_MASK)) {
892 pte = addr;
893 page_size = 4096;
894 } else {
895 /* page directory entry */
896 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) & env->a20_mask;
897 pde = ldl_phys(pde_addr);
898 if (!(pde & PG_PRESENT_MASK))
899 return -1;
900 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
901 pte = pde & ~0x003ff000; /* align to 4MB */
902 page_size = 4096 * 1024;
903 } else {
904 /* page directory entry */
905 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & env->a20_mask;
906 pte = ldl_phys(pte_addr);
907 if (!(pte & PG_PRESENT_MASK))
908 return -1;
909 page_size = 4096;
910 }
911 }
912 pte = pte & env->a20_mask;
913 }
914
915 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
916 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
917 return paddr;
918 }
919
920 void hw_breakpoint_insert(CPUState *env, int index)
921 {
922 int type, err = 0;
923
924 switch (hw_breakpoint_type(env->dr[7], index)) {
925 case 0:
926 if (hw_breakpoint_enabled(env->dr[7], index))
927 err = cpu_breakpoint_insert(env, env->dr[index], BP_CPU,
928 &env->cpu_breakpoint[index]);
929 break;
930 case 1:
931 type = BP_CPU | BP_MEM_WRITE;
932 goto insert_wp;
933 case 2:
934 /* No support for I/O watchpoints yet */
935 break;
936 case 3:
937 type = BP_CPU | BP_MEM_ACCESS;
938 insert_wp:
939 err = cpu_watchpoint_insert(env, env->dr[index],
940 hw_breakpoint_len(env->dr[7], index),
941 type, &env->cpu_watchpoint[index]);
942 break;
943 }
944 if (err)
945 env->cpu_breakpoint[index] = NULL;
946 }
947
948 void hw_breakpoint_remove(CPUState *env, int index)
949 {
950 if (!env->cpu_breakpoint[index])
951 return;
952 switch (hw_breakpoint_type(env->dr[7], index)) {
953 case 0:
954 if (hw_breakpoint_enabled(env->dr[7], index))
955 cpu_breakpoint_remove_by_ref(env, env->cpu_breakpoint[index]);
956 break;
957 case 1:
958 case 3:
959 cpu_watchpoint_remove_by_ref(env, env->cpu_watchpoint[index]);
960 break;
961 case 2:
962 /* No support for I/O watchpoints yet */
963 break;
964 }
965 }
966
967 int check_hw_breakpoints(CPUState *env, int force_dr6_update)
968 {
969 target_ulong dr6;
970 int reg, type;
971 int hit_enabled = 0;
972
973 dr6 = env->dr[6] & ~0xf;
974 for (reg = 0; reg < 4; reg++) {
975 type = hw_breakpoint_type(env->dr[7], reg);
976 if ((type == 0 && env->dr[reg] == env->eip) ||
977 ((type & 1) && env->cpu_watchpoint[reg] &&
978 (env->cpu_watchpoint[reg]->flags & BP_WATCHPOINT_HIT))) {
979 dr6 |= 1 << reg;
980 if (hw_breakpoint_enabled(env->dr[7], reg))
981 hit_enabled = 1;
982 }
983 }
984 if (hit_enabled || force_dr6_update)
985 env->dr[6] = dr6;
986 return hit_enabled;
987 }
988
989 static CPUDebugExcpHandler *prev_debug_excp_handler;
990
991 void raise_exception_env(int exception_index, CPUState *env);
992
993 static void breakpoint_handler(CPUState *env)
994 {
995 CPUBreakpoint *bp;
996
997 if (env->watchpoint_hit) {
998 if (env->watchpoint_hit->flags & BP_CPU) {
999 env->watchpoint_hit = NULL;
1000 if (check_hw_breakpoints(env, 0))
1001 raise_exception_env(EXCP01_DB, env);
1002 else
1003 cpu_resume_from_signal(env, NULL);
1004 }
1005 } else {
1006 QTAILQ_FOREACH(bp, &env->breakpoints, entry)
1007 if (bp->pc == env->eip) {
1008 if (bp->flags & BP_CPU) {
1009 check_hw_breakpoints(env, 1);
1010 raise_exception_env(EXCP01_DB, env);
1011 }
1012 break;
1013 }
1014 }
1015 if (prev_debug_excp_handler)
1016 prev_debug_excp_handler(env);
1017 }
1018
1019 /* This should come from sysemu.h - if we could include it here... */
1020 void qemu_system_reset_request(void);
1021
1022 void cpu_inject_x86_mce(CPUState *cenv, int bank, uint64_t status,
1023 uint64_t mcg_status, uint64_t addr, uint64_t misc)
1024 {
1025 uint64_t mcg_cap = cenv->mcg_cap;
1026 unsigned bank_num = mcg_cap & 0xff;
1027 uint64_t *banks = cenv->mce_banks;
1028
1029 if (bank >= bank_num || !(status & MCI_STATUS_VAL))
1030 return;
1031
1032 /*
1033 * if MSR_MCG_CTL is not all 1s, the uncorrected error
1034 * reporting is disabled
1035 */
1036 if ((status & MCI_STATUS_UC) && (mcg_cap & MCG_CTL_P) &&
1037 cenv->mcg_ctl != ~(uint64_t)0)
1038 return;
1039 banks += 4 * bank;
1040 /*
1041 * if MSR_MCi_CTL is not all 1s, the uncorrected error
1042 * reporting is disabled for the bank
1043 */
1044 if ((status & MCI_STATUS_UC) && banks[0] != ~(uint64_t)0)
1045 return;
1046 if (status & MCI_STATUS_UC) {
1047 if ((cenv->mcg_status & MCG_STATUS_MCIP) ||
1048 !(cenv->cr[4] & CR4_MCE_MASK)) {
1049 fprintf(stderr, "injects mce exception while previous "
1050 "one is in progress!\n");
1051 qemu_log_mask(CPU_LOG_RESET, "Triple fault\n");
1052 qemu_system_reset_request();
1053 return;
1054 }
1055 if (banks[1] & MCI_STATUS_VAL)
1056 status |= MCI_STATUS_OVER;
1057 banks[2] = addr;
1058 banks[3] = misc;
1059 cenv->mcg_status = mcg_status;
1060 banks[1] = status;
1061 cpu_interrupt(cenv, CPU_INTERRUPT_MCE);
1062 } else if (!(banks[1] & MCI_STATUS_VAL)
1063 || !(banks[1] & MCI_STATUS_UC)) {
1064 if (banks[1] & MCI_STATUS_VAL)
1065 status |= MCI_STATUS_OVER;
1066 banks[2] = addr;
1067 banks[3] = misc;
1068 banks[1] = status;
1069 } else
1070 banks[1] |= MCI_STATUS_OVER;
1071 }
1072 #endif /* !CONFIG_USER_ONLY */
1073
1074 static void mce_init(CPUX86State *cenv)
1075 {
1076 unsigned int bank, bank_num;
1077
1078 if (((cenv->cpuid_version >> 8)&0xf) >= 6
1079 && (cenv->cpuid_features&(CPUID_MCE|CPUID_MCA)) == (CPUID_MCE|CPUID_MCA)) {
1080 cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF;
1081 cenv->mcg_ctl = ~(uint64_t)0;
1082 bank_num = MCE_BANKS_DEF;
1083 for (bank = 0; bank < bank_num; bank++)
1084 cenv->mce_banks[bank*4] = ~(uint64_t)0;
1085 }
1086 }
1087
1088 int cpu_x86_get_descr_debug(CPUX86State *env, unsigned int selector,
1089 target_ulong *base, unsigned int *limit,
1090 unsigned int *flags)
1091 {
1092 SegmentCache *dt;
1093 target_ulong ptr;
1094 uint32_t e1, e2;
1095 int index;
1096
1097 if (selector & 0x4)
1098 dt = &env->ldt;
1099 else
1100 dt = &env->gdt;
1101 index = selector & ~7;
1102 ptr = dt->base + index;
1103 if ((index + 7) > dt->limit
1104 || cpu_memory_rw_debug(env, ptr, (uint8_t *)&e1, sizeof(e1), 0) != 0
1105 || cpu_memory_rw_debug(env, ptr+4, (uint8_t *)&e2, sizeof(e2), 0) != 0)
1106 return 0;
1107
1108 *base = ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
1109 *limit = (e1 & 0xffff) | (e2 & 0x000f0000);
1110 if (e2 & DESC_G_MASK)
1111 *limit = (*limit << 12) | 0xfff;
1112 *flags = e2;
1113
1114 return 1;
1115 }
1116
1117 CPUX86State *cpu_x86_init(const char *cpu_model)
1118 {
1119 CPUX86State *env;
1120 static int inited;
1121
1122 env = qemu_mallocz(sizeof(CPUX86State));
1123 cpu_exec_init(env);
1124 env->cpu_model_str = cpu_model;
1125
1126 /* init various static tables */
1127 if (!inited) {
1128 inited = 1;
1129 optimize_flags_init();
1130 #ifndef CONFIG_USER_ONLY
1131 prev_debug_excp_handler =
1132 cpu_set_debug_excp_handler(breakpoint_handler);
1133 #endif
1134 }
1135 if (cpu_x86_register(env, cpu_model) < 0) {
1136 cpu_x86_close(env);
1137 return NULL;
1138 }
1139 mce_init(env);
1140
1141 qemu_init_vcpu(env);
1142
1143 return env;
1144 }
1145
1146 #if !defined(CONFIG_USER_ONLY)
1147 void do_cpu_init(CPUState *env)
1148 {
1149 int sipi = env->interrupt_request & CPU_INTERRUPT_SIPI;
1150 cpu_reset(env);
1151 env->interrupt_request = sipi;
1152 apic_init_reset(env);
1153 }
1154
1155 void do_cpu_sipi(CPUState *env)
1156 {
1157 apic_sipi(env);
1158 }
1159 #else
1160 void do_cpu_init(CPUState *env)
1161 {
1162 }
1163 void do_cpu_sipi(CPUState *env)
1164 {
1165 }
1166 #endif