]> git.proxmox.com Git - qemu.git/blob - target-i386/helper.c
kvm: x86: Consolidate TCG and KVM MCE injection code
[qemu.git] / target-i386 / helper.c
1 /*
2 * i386 helpers (without register variable usage)
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #include <stdarg.h>
20 #include <stdlib.h>
21 #include <stdio.h>
22 #include <string.h>
23 #include <inttypes.h>
24 #include <signal.h>
25
26 #include "cpu.h"
27 #include "exec-all.h"
28 #include "qemu-common.h"
29 #include "kvm.h"
30 #ifndef CONFIG_USER_ONLY
31 #include "sysemu.h"
32 #include "monitor.h"
33 #endif
34
35 //#define DEBUG_MMU
36
37 /* NOTE: must be called outside the CPU execute loop */
38 void cpu_reset(CPUX86State *env)
39 {
40 int i;
41
42 if (qemu_loglevel_mask(CPU_LOG_RESET)) {
43 qemu_log("CPU Reset (CPU %d)\n", env->cpu_index);
44 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
45 }
46
47 memset(env, 0, offsetof(CPUX86State, breakpoints));
48
49 tlb_flush(env, 1);
50
51 env->old_exception = -1;
52
53 /* init to reset state */
54
55 #ifdef CONFIG_SOFTMMU
56 env->hflags |= HF_SOFTMMU_MASK;
57 #endif
58 env->hflags2 |= HF2_GIF_MASK;
59
60 cpu_x86_update_cr0(env, 0x60000010);
61 env->a20_mask = ~0x0;
62 env->smbase = 0x30000;
63
64 env->idt.limit = 0xffff;
65 env->gdt.limit = 0xffff;
66 env->ldt.limit = 0xffff;
67 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
68 env->tr.limit = 0xffff;
69 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
70
71 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
72 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK |
73 DESC_R_MASK | DESC_A_MASK);
74 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
75 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
76 DESC_A_MASK);
77 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
78 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
79 DESC_A_MASK);
80 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
81 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
82 DESC_A_MASK);
83 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
84 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
85 DESC_A_MASK);
86 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
87 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
88 DESC_A_MASK);
89
90 env->eip = 0xfff0;
91 env->regs[R_EDX] = env->cpuid_version;
92
93 env->eflags = 0x2;
94
95 /* FPU init */
96 for(i = 0;i < 8; i++)
97 env->fptags[i] = 1;
98 env->fpuc = 0x37f;
99
100 env->mxcsr = 0x1f80;
101
102 memset(env->dr, 0, sizeof(env->dr));
103 env->dr[6] = DR6_FIXED_1;
104 env->dr[7] = DR7_FIXED_1;
105 cpu_breakpoint_remove_all(env, BP_CPU);
106 cpu_watchpoint_remove_all(env, BP_CPU);
107 }
108
109 void cpu_x86_close(CPUX86State *env)
110 {
111 qemu_free(env);
112 }
113
114 static void cpu_x86_version(CPUState *env, int *family, int *model)
115 {
116 int cpuver = env->cpuid_version;
117
118 if (family == NULL || model == NULL) {
119 return;
120 }
121
122 *family = (cpuver >> 8) & 0x0f;
123 *model = ((cpuver >> 12) & 0xf0) + ((cpuver >> 4) & 0x0f);
124 }
125
126 /* Broadcast MCA signal for processor version 06H_EH and above */
127 int cpu_x86_support_mca_broadcast(CPUState *env)
128 {
129 int family = 0;
130 int model = 0;
131
132 cpu_x86_version(env, &family, &model);
133 if ((family == 6 && model >= 14) || family > 6) {
134 return 1;
135 }
136
137 return 0;
138 }
139
140 /***********************************************************/
141 /* x86 debug */
142
143 static const char *cc_op_str[] = {
144 "DYNAMIC",
145 "EFLAGS",
146
147 "MULB",
148 "MULW",
149 "MULL",
150 "MULQ",
151
152 "ADDB",
153 "ADDW",
154 "ADDL",
155 "ADDQ",
156
157 "ADCB",
158 "ADCW",
159 "ADCL",
160 "ADCQ",
161
162 "SUBB",
163 "SUBW",
164 "SUBL",
165 "SUBQ",
166
167 "SBBB",
168 "SBBW",
169 "SBBL",
170 "SBBQ",
171
172 "LOGICB",
173 "LOGICW",
174 "LOGICL",
175 "LOGICQ",
176
177 "INCB",
178 "INCW",
179 "INCL",
180 "INCQ",
181
182 "DECB",
183 "DECW",
184 "DECL",
185 "DECQ",
186
187 "SHLB",
188 "SHLW",
189 "SHLL",
190 "SHLQ",
191
192 "SARB",
193 "SARW",
194 "SARL",
195 "SARQ",
196 };
197
198 static void
199 cpu_x86_dump_seg_cache(CPUState *env, FILE *f, fprintf_function cpu_fprintf,
200 const char *name, struct SegmentCache *sc)
201 {
202 #ifdef TARGET_X86_64
203 if (env->hflags & HF_CS64_MASK) {
204 cpu_fprintf(f, "%-3s=%04x %016" PRIx64 " %08x %08x", name,
205 sc->selector, sc->base, sc->limit, sc->flags & 0x00ffff00);
206 } else
207 #endif
208 {
209 cpu_fprintf(f, "%-3s=%04x %08x %08x %08x", name, sc->selector,
210 (uint32_t)sc->base, sc->limit, sc->flags & 0x00ffff00);
211 }
212
213 if (!(env->hflags & HF_PE_MASK) || !(sc->flags & DESC_P_MASK))
214 goto done;
215
216 cpu_fprintf(f, " DPL=%d ", (sc->flags & DESC_DPL_MASK) >> DESC_DPL_SHIFT);
217 if (sc->flags & DESC_S_MASK) {
218 if (sc->flags & DESC_CS_MASK) {
219 cpu_fprintf(f, (sc->flags & DESC_L_MASK) ? "CS64" :
220 ((sc->flags & DESC_B_MASK) ? "CS32" : "CS16"));
221 cpu_fprintf(f, " [%c%c", (sc->flags & DESC_C_MASK) ? 'C' : '-',
222 (sc->flags & DESC_R_MASK) ? 'R' : '-');
223 } else {
224 cpu_fprintf(f, (sc->flags & DESC_B_MASK) ? "DS " : "DS16");
225 cpu_fprintf(f, " [%c%c", (sc->flags & DESC_E_MASK) ? 'E' : '-',
226 (sc->flags & DESC_W_MASK) ? 'W' : '-');
227 }
228 cpu_fprintf(f, "%c]", (sc->flags & DESC_A_MASK) ? 'A' : '-');
229 } else {
230 static const char *sys_type_name[2][16] = {
231 { /* 32 bit mode */
232 "Reserved", "TSS16-avl", "LDT", "TSS16-busy",
233 "CallGate16", "TaskGate", "IntGate16", "TrapGate16",
234 "Reserved", "TSS32-avl", "Reserved", "TSS32-busy",
235 "CallGate32", "Reserved", "IntGate32", "TrapGate32"
236 },
237 { /* 64 bit mode */
238 "<hiword>", "Reserved", "LDT", "Reserved", "Reserved",
239 "Reserved", "Reserved", "Reserved", "Reserved",
240 "TSS64-avl", "Reserved", "TSS64-busy", "CallGate64",
241 "Reserved", "IntGate64", "TrapGate64"
242 }
243 };
244 cpu_fprintf(f, "%s",
245 sys_type_name[(env->hflags & HF_LMA_MASK) ? 1 : 0]
246 [(sc->flags & DESC_TYPE_MASK)
247 >> DESC_TYPE_SHIFT]);
248 }
249 done:
250 cpu_fprintf(f, "\n");
251 }
252
253 #define DUMP_CODE_BYTES_TOTAL 50
254 #define DUMP_CODE_BYTES_BACKWARD 20
255
256 void cpu_dump_state(CPUState *env, FILE *f, fprintf_function cpu_fprintf,
257 int flags)
258 {
259 int eflags, i, nb;
260 char cc_op_name[32];
261 static const char *seg_name[6] = { "ES", "CS", "SS", "DS", "FS", "GS" };
262
263 cpu_synchronize_state(env);
264
265 eflags = env->eflags;
266 #ifdef TARGET_X86_64
267 if (env->hflags & HF_CS64_MASK) {
268 cpu_fprintf(f,
269 "RAX=%016" PRIx64 " RBX=%016" PRIx64 " RCX=%016" PRIx64 " RDX=%016" PRIx64 "\n"
270 "RSI=%016" PRIx64 " RDI=%016" PRIx64 " RBP=%016" PRIx64 " RSP=%016" PRIx64 "\n"
271 "R8 =%016" PRIx64 " R9 =%016" PRIx64 " R10=%016" PRIx64 " R11=%016" PRIx64 "\n"
272 "R12=%016" PRIx64 " R13=%016" PRIx64 " R14=%016" PRIx64 " R15=%016" PRIx64 "\n"
273 "RIP=%016" PRIx64 " RFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
274 env->regs[R_EAX],
275 env->regs[R_EBX],
276 env->regs[R_ECX],
277 env->regs[R_EDX],
278 env->regs[R_ESI],
279 env->regs[R_EDI],
280 env->regs[R_EBP],
281 env->regs[R_ESP],
282 env->regs[8],
283 env->regs[9],
284 env->regs[10],
285 env->regs[11],
286 env->regs[12],
287 env->regs[13],
288 env->regs[14],
289 env->regs[15],
290 env->eip, eflags,
291 eflags & DF_MASK ? 'D' : '-',
292 eflags & CC_O ? 'O' : '-',
293 eflags & CC_S ? 'S' : '-',
294 eflags & CC_Z ? 'Z' : '-',
295 eflags & CC_A ? 'A' : '-',
296 eflags & CC_P ? 'P' : '-',
297 eflags & CC_C ? 'C' : '-',
298 env->hflags & HF_CPL_MASK,
299 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
300 (env->a20_mask >> 20) & 1,
301 (env->hflags >> HF_SMM_SHIFT) & 1,
302 env->halted);
303 } else
304 #endif
305 {
306 cpu_fprintf(f, "EAX=%08x EBX=%08x ECX=%08x EDX=%08x\n"
307 "ESI=%08x EDI=%08x EBP=%08x ESP=%08x\n"
308 "EIP=%08x EFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
309 (uint32_t)env->regs[R_EAX],
310 (uint32_t)env->regs[R_EBX],
311 (uint32_t)env->regs[R_ECX],
312 (uint32_t)env->regs[R_EDX],
313 (uint32_t)env->regs[R_ESI],
314 (uint32_t)env->regs[R_EDI],
315 (uint32_t)env->regs[R_EBP],
316 (uint32_t)env->regs[R_ESP],
317 (uint32_t)env->eip, eflags,
318 eflags & DF_MASK ? 'D' : '-',
319 eflags & CC_O ? 'O' : '-',
320 eflags & CC_S ? 'S' : '-',
321 eflags & CC_Z ? 'Z' : '-',
322 eflags & CC_A ? 'A' : '-',
323 eflags & CC_P ? 'P' : '-',
324 eflags & CC_C ? 'C' : '-',
325 env->hflags & HF_CPL_MASK,
326 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
327 (env->a20_mask >> 20) & 1,
328 (env->hflags >> HF_SMM_SHIFT) & 1,
329 env->halted);
330 }
331
332 for(i = 0; i < 6; i++) {
333 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, seg_name[i],
334 &env->segs[i]);
335 }
336 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, "LDT", &env->ldt);
337 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, "TR", &env->tr);
338
339 #ifdef TARGET_X86_64
340 if (env->hflags & HF_LMA_MASK) {
341 cpu_fprintf(f, "GDT= %016" PRIx64 " %08x\n",
342 env->gdt.base, env->gdt.limit);
343 cpu_fprintf(f, "IDT= %016" PRIx64 " %08x\n",
344 env->idt.base, env->idt.limit);
345 cpu_fprintf(f, "CR0=%08x CR2=%016" PRIx64 " CR3=%016" PRIx64 " CR4=%08x\n",
346 (uint32_t)env->cr[0],
347 env->cr[2],
348 env->cr[3],
349 (uint32_t)env->cr[4]);
350 for(i = 0; i < 4; i++)
351 cpu_fprintf(f, "DR%d=%016" PRIx64 " ", i, env->dr[i]);
352 cpu_fprintf(f, "\nDR6=%016" PRIx64 " DR7=%016" PRIx64 "\n",
353 env->dr[6], env->dr[7]);
354 } else
355 #endif
356 {
357 cpu_fprintf(f, "GDT= %08x %08x\n",
358 (uint32_t)env->gdt.base, env->gdt.limit);
359 cpu_fprintf(f, "IDT= %08x %08x\n",
360 (uint32_t)env->idt.base, env->idt.limit);
361 cpu_fprintf(f, "CR0=%08x CR2=%08x CR3=%08x CR4=%08x\n",
362 (uint32_t)env->cr[0],
363 (uint32_t)env->cr[2],
364 (uint32_t)env->cr[3],
365 (uint32_t)env->cr[4]);
366 for(i = 0; i < 4; i++) {
367 cpu_fprintf(f, "DR%d=" TARGET_FMT_lx " ", i, env->dr[i]);
368 }
369 cpu_fprintf(f, "\nDR6=" TARGET_FMT_lx " DR7=" TARGET_FMT_lx "\n",
370 env->dr[6], env->dr[7]);
371 }
372 if (flags & X86_DUMP_CCOP) {
373 if ((unsigned)env->cc_op < CC_OP_NB)
374 snprintf(cc_op_name, sizeof(cc_op_name), "%s", cc_op_str[env->cc_op]);
375 else
376 snprintf(cc_op_name, sizeof(cc_op_name), "[%d]", env->cc_op);
377 #ifdef TARGET_X86_64
378 if (env->hflags & HF_CS64_MASK) {
379 cpu_fprintf(f, "CCS=%016" PRIx64 " CCD=%016" PRIx64 " CCO=%-8s\n",
380 env->cc_src, env->cc_dst,
381 cc_op_name);
382 } else
383 #endif
384 {
385 cpu_fprintf(f, "CCS=%08x CCD=%08x CCO=%-8s\n",
386 (uint32_t)env->cc_src, (uint32_t)env->cc_dst,
387 cc_op_name);
388 }
389 }
390 cpu_fprintf(f, "EFER=%016" PRIx64 "\n", env->efer);
391 if (flags & X86_DUMP_FPU) {
392 int fptag;
393 fptag = 0;
394 for(i = 0; i < 8; i++) {
395 fptag |= ((!env->fptags[i]) << i);
396 }
397 cpu_fprintf(f, "FCW=%04x FSW=%04x [ST=%d] FTW=%02x MXCSR=%08x\n",
398 env->fpuc,
399 (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11,
400 env->fpstt,
401 fptag,
402 env->mxcsr);
403 for(i=0;i<8;i++) {
404 #if defined(USE_X86LDOUBLE)
405 union {
406 long double d;
407 struct {
408 uint64_t lower;
409 uint16_t upper;
410 } l;
411 } tmp;
412 tmp.d = env->fpregs[i].d;
413 cpu_fprintf(f, "FPR%d=%016" PRIx64 " %04x",
414 i, tmp.l.lower, tmp.l.upper);
415 #else
416 cpu_fprintf(f, "FPR%d=%016" PRIx64,
417 i, env->fpregs[i].mmx.q);
418 #endif
419 if ((i & 1) == 1)
420 cpu_fprintf(f, "\n");
421 else
422 cpu_fprintf(f, " ");
423 }
424 if (env->hflags & HF_CS64_MASK)
425 nb = 16;
426 else
427 nb = 8;
428 for(i=0;i<nb;i++) {
429 cpu_fprintf(f, "XMM%02d=%08x%08x%08x%08x",
430 i,
431 env->xmm_regs[i].XMM_L(3),
432 env->xmm_regs[i].XMM_L(2),
433 env->xmm_regs[i].XMM_L(1),
434 env->xmm_regs[i].XMM_L(0));
435 if ((i & 1) == 1)
436 cpu_fprintf(f, "\n");
437 else
438 cpu_fprintf(f, " ");
439 }
440 }
441 if (flags & CPU_DUMP_CODE) {
442 target_ulong base = env->segs[R_CS].base + env->eip;
443 target_ulong offs = MIN(env->eip, DUMP_CODE_BYTES_BACKWARD);
444 uint8_t code;
445 char codestr[3];
446
447 cpu_fprintf(f, "Code=");
448 for (i = 0; i < DUMP_CODE_BYTES_TOTAL; i++) {
449 if (cpu_memory_rw_debug(env, base - offs + i, &code, 1, 0) == 0) {
450 snprintf(codestr, sizeof(codestr), "%02x", code);
451 } else {
452 snprintf(codestr, sizeof(codestr), "??");
453 }
454 cpu_fprintf(f, "%s%s%s%s", i > 0 ? " " : "",
455 i == offs ? "<" : "", codestr, i == offs ? ">" : "");
456 }
457 cpu_fprintf(f, "\n");
458 }
459 }
460
461 /***********************************************************/
462 /* x86 mmu */
463 /* XXX: add PGE support */
464
465 void cpu_x86_set_a20(CPUX86State *env, int a20_state)
466 {
467 a20_state = (a20_state != 0);
468 if (a20_state != ((env->a20_mask >> 20) & 1)) {
469 #if defined(DEBUG_MMU)
470 printf("A20 update: a20=%d\n", a20_state);
471 #endif
472 /* if the cpu is currently executing code, we must unlink it and
473 all the potentially executing TB */
474 cpu_interrupt(env, CPU_INTERRUPT_EXITTB);
475
476 /* when a20 is changed, all the MMU mappings are invalid, so
477 we must flush everything */
478 tlb_flush(env, 1);
479 env->a20_mask = ~(1 << 20) | (a20_state << 20);
480 }
481 }
482
483 void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0)
484 {
485 int pe_state;
486
487 #if defined(DEBUG_MMU)
488 printf("CR0 update: CR0=0x%08x\n", new_cr0);
489 #endif
490 if ((new_cr0 & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK)) !=
491 (env->cr[0] & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK))) {
492 tlb_flush(env, 1);
493 }
494
495 #ifdef TARGET_X86_64
496 if (!(env->cr[0] & CR0_PG_MASK) && (new_cr0 & CR0_PG_MASK) &&
497 (env->efer & MSR_EFER_LME)) {
498 /* enter in long mode */
499 /* XXX: generate an exception */
500 if (!(env->cr[4] & CR4_PAE_MASK))
501 return;
502 env->efer |= MSR_EFER_LMA;
503 env->hflags |= HF_LMA_MASK;
504 } else if ((env->cr[0] & CR0_PG_MASK) && !(new_cr0 & CR0_PG_MASK) &&
505 (env->efer & MSR_EFER_LMA)) {
506 /* exit long mode */
507 env->efer &= ~MSR_EFER_LMA;
508 env->hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
509 env->eip &= 0xffffffff;
510 }
511 #endif
512 env->cr[0] = new_cr0 | CR0_ET_MASK;
513
514 /* update PE flag in hidden flags */
515 pe_state = (env->cr[0] & CR0_PE_MASK);
516 env->hflags = (env->hflags & ~HF_PE_MASK) | (pe_state << HF_PE_SHIFT);
517 /* ensure that ADDSEG is always set in real mode */
518 env->hflags |= ((pe_state ^ 1) << HF_ADDSEG_SHIFT);
519 /* update FPU flags */
520 env->hflags = (env->hflags & ~(HF_MP_MASK | HF_EM_MASK | HF_TS_MASK)) |
521 ((new_cr0 << (HF_MP_SHIFT - 1)) & (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK));
522 }
523
524 /* XXX: in legacy PAE mode, generate a GPF if reserved bits are set in
525 the PDPT */
526 void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3)
527 {
528 env->cr[3] = new_cr3;
529 if (env->cr[0] & CR0_PG_MASK) {
530 #if defined(DEBUG_MMU)
531 printf("CR3 update: CR3=" TARGET_FMT_lx "\n", new_cr3);
532 #endif
533 tlb_flush(env, 0);
534 }
535 }
536
537 void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4)
538 {
539 #if defined(DEBUG_MMU)
540 printf("CR4 update: CR4=%08x\n", (uint32_t)env->cr[4]);
541 #endif
542 if ((new_cr4 & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK)) !=
543 (env->cr[4] & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK))) {
544 tlb_flush(env, 1);
545 }
546 /* SSE handling */
547 if (!(env->cpuid_features & CPUID_SSE))
548 new_cr4 &= ~CR4_OSFXSR_MASK;
549 if (new_cr4 & CR4_OSFXSR_MASK)
550 env->hflags |= HF_OSFXSR_MASK;
551 else
552 env->hflags &= ~HF_OSFXSR_MASK;
553
554 env->cr[4] = new_cr4;
555 }
556
557 #if defined(CONFIG_USER_ONLY)
558
559 int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
560 int is_write, int mmu_idx, int is_softmmu)
561 {
562 /* user mode only emulation */
563 is_write &= 1;
564 env->cr[2] = addr;
565 env->error_code = (is_write << PG_ERROR_W_BIT);
566 env->error_code |= PG_ERROR_U_MASK;
567 env->exception_index = EXCP0E_PAGE;
568 return 1;
569 }
570
571 #else
572
573 /* XXX: This value should match the one returned by CPUID
574 * and in exec.c */
575 # if defined(TARGET_X86_64)
576 # define PHYS_ADDR_MASK 0xfffffff000LL
577 # else
578 # define PHYS_ADDR_MASK 0xffffff000LL
579 # endif
580
581 /* return value:
582 -1 = cannot handle fault
583 0 = nothing more to do
584 1 = generate PF fault
585 */
586 int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
587 int is_write1, int mmu_idx, int is_softmmu)
588 {
589 uint64_t ptep, pte;
590 target_ulong pde_addr, pte_addr;
591 int error_code, is_dirty, prot, page_size, is_write, is_user;
592 target_phys_addr_t paddr;
593 uint32_t page_offset;
594 target_ulong vaddr, virt_addr;
595
596 is_user = mmu_idx == MMU_USER_IDX;
597 #if defined(DEBUG_MMU)
598 printf("MMU fault: addr=" TARGET_FMT_lx " w=%d u=%d eip=" TARGET_FMT_lx "\n",
599 addr, is_write1, is_user, env->eip);
600 #endif
601 is_write = is_write1 & 1;
602
603 if (!(env->cr[0] & CR0_PG_MASK)) {
604 pte = addr;
605 virt_addr = addr & TARGET_PAGE_MASK;
606 prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
607 page_size = 4096;
608 goto do_mapping;
609 }
610
611 if (env->cr[4] & CR4_PAE_MASK) {
612 uint64_t pde, pdpe;
613 target_ulong pdpe_addr;
614
615 #ifdef TARGET_X86_64
616 if (env->hflags & HF_LMA_MASK) {
617 uint64_t pml4e_addr, pml4e;
618 int32_t sext;
619
620 /* test virtual address sign extension */
621 sext = (int64_t)addr >> 47;
622 if (sext != 0 && sext != -1) {
623 env->error_code = 0;
624 env->exception_index = EXCP0D_GPF;
625 return 1;
626 }
627
628 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
629 env->a20_mask;
630 pml4e = ldq_phys(pml4e_addr);
631 if (!(pml4e & PG_PRESENT_MASK)) {
632 error_code = 0;
633 goto do_fault;
634 }
635 if (!(env->efer & MSR_EFER_NXE) && (pml4e & PG_NX_MASK)) {
636 error_code = PG_ERROR_RSVD_MASK;
637 goto do_fault;
638 }
639 if (!(pml4e & PG_ACCESSED_MASK)) {
640 pml4e |= PG_ACCESSED_MASK;
641 stl_phys_notdirty(pml4e_addr, pml4e);
642 }
643 ptep = pml4e ^ PG_NX_MASK;
644 pdpe_addr = ((pml4e & PHYS_ADDR_MASK) + (((addr >> 30) & 0x1ff) << 3)) &
645 env->a20_mask;
646 pdpe = ldq_phys(pdpe_addr);
647 if (!(pdpe & PG_PRESENT_MASK)) {
648 error_code = 0;
649 goto do_fault;
650 }
651 if (!(env->efer & MSR_EFER_NXE) && (pdpe & PG_NX_MASK)) {
652 error_code = PG_ERROR_RSVD_MASK;
653 goto do_fault;
654 }
655 ptep &= pdpe ^ PG_NX_MASK;
656 if (!(pdpe & PG_ACCESSED_MASK)) {
657 pdpe |= PG_ACCESSED_MASK;
658 stl_phys_notdirty(pdpe_addr, pdpe);
659 }
660 } else
661 #endif
662 {
663 /* XXX: load them when cr3 is loaded ? */
664 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
665 env->a20_mask;
666 pdpe = ldq_phys(pdpe_addr);
667 if (!(pdpe & PG_PRESENT_MASK)) {
668 error_code = 0;
669 goto do_fault;
670 }
671 ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
672 }
673
674 pde_addr = ((pdpe & PHYS_ADDR_MASK) + (((addr >> 21) & 0x1ff) << 3)) &
675 env->a20_mask;
676 pde = ldq_phys(pde_addr);
677 if (!(pde & PG_PRESENT_MASK)) {
678 error_code = 0;
679 goto do_fault;
680 }
681 if (!(env->efer & MSR_EFER_NXE) && (pde & PG_NX_MASK)) {
682 error_code = PG_ERROR_RSVD_MASK;
683 goto do_fault;
684 }
685 ptep &= pde ^ PG_NX_MASK;
686 if (pde & PG_PSE_MASK) {
687 /* 2 MB page */
688 page_size = 2048 * 1024;
689 ptep ^= PG_NX_MASK;
690 if ((ptep & PG_NX_MASK) && is_write1 == 2)
691 goto do_fault_protect;
692 if (is_user) {
693 if (!(ptep & PG_USER_MASK))
694 goto do_fault_protect;
695 if (is_write && !(ptep & PG_RW_MASK))
696 goto do_fault_protect;
697 } else {
698 if ((env->cr[0] & CR0_WP_MASK) &&
699 is_write && !(ptep & PG_RW_MASK))
700 goto do_fault_protect;
701 }
702 is_dirty = is_write && !(pde & PG_DIRTY_MASK);
703 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
704 pde |= PG_ACCESSED_MASK;
705 if (is_dirty)
706 pde |= PG_DIRTY_MASK;
707 stl_phys_notdirty(pde_addr, pde);
708 }
709 /* align to page_size */
710 pte = pde & ((PHYS_ADDR_MASK & ~(page_size - 1)) | 0xfff);
711 virt_addr = addr & ~(page_size - 1);
712 } else {
713 /* 4 KB page */
714 if (!(pde & PG_ACCESSED_MASK)) {
715 pde |= PG_ACCESSED_MASK;
716 stl_phys_notdirty(pde_addr, pde);
717 }
718 pte_addr = ((pde & PHYS_ADDR_MASK) + (((addr >> 12) & 0x1ff) << 3)) &
719 env->a20_mask;
720 pte = ldq_phys(pte_addr);
721 if (!(pte & PG_PRESENT_MASK)) {
722 error_code = 0;
723 goto do_fault;
724 }
725 if (!(env->efer & MSR_EFER_NXE) && (pte & PG_NX_MASK)) {
726 error_code = PG_ERROR_RSVD_MASK;
727 goto do_fault;
728 }
729 /* combine pde and pte nx, user and rw protections */
730 ptep &= pte ^ PG_NX_MASK;
731 ptep ^= PG_NX_MASK;
732 if ((ptep & PG_NX_MASK) && is_write1 == 2)
733 goto do_fault_protect;
734 if (is_user) {
735 if (!(ptep & PG_USER_MASK))
736 goto do_fault_protect;
737 if (is_write && !(ptep & PG_RW_MASK))
738 goto do_fault_protect;
739 } else {
740 if ((env->cr[0] & CR0_WP_MASK) &&
741 is_write && !(ptep & PG_RW_MASK))
742 goto do_fault_protect;
743 }
744 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
745 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
746 pte |= PG_ACCESSED_MASK;
747 if (is_dirty)
748 pte |= PG_DIRTY_MASK;
749 stl_phys_notdirty(pte_addr, pte);
750 }
751 page_size = 4096;
752 virt_addr = addr & ~0xfff;
753 pte = pte & (PHYS_ADDR_MASK | 0xfff);
754 }
755 } else {
756 uint32_t pde;
757
758 /* page directory entry */
759 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) &
760 env->a20_mask;
761 pde = ldl_phys(pde_addr);
762 if (!(pde & PG_PRESENT_MASK)) {
763 error_code = 0;
764 goto do_fault;
765 }
766 /* if PSE bit is set, then we use a 4MB page */
767 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
768 page_size = 4096 * 1024;
769 if (is_user) {
770 if (!(pde & PG_USER_MASK))
771 goto do_fault_protect;
772 if (is_write && !(pde & PG_RW_MASK))
773 goto do_fault_protect;
774 } else {
775 if ((env->cr[0] & CR0_WP_MASK) &&
776 is_write && !(pde & PG_RW_MASK))
777 goto do_fault_protect;
778 }
779 is_dirty = is_write && !(pde & PG_DIRTY_MASK);
780 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
781 pde |= PG_ACCESSED_MASK;
782 if (is_dirty)
783 pde |= PG_DIRTY_MASK;
784 stl_phys_notdirty(pde_addr, pde);
785 }
786
787 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
788 ptep = pte;
789 virt_addr = addr & ~(page_size - 1);
790 } else {
791 if (!(pde & PG_ACCESSED_MASK)) {
792 pde |= PG_ACCESSED_MASK;
793 stl_phys_notdirty(pde_addr, pde);
794 }
795
796 /* page directory entry */
797 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) &
798 env->a20_mask;
799 pte = ldl_phys(pte_addr);
800 if (!(pte & PG_PRESENT_MASK)) {
801 error_code = 0;
802 goto do_fault;
803 }
804 /* combine pde and pte user and rw protections */
805 ptep = pte & pde;
806 if (is_user) {
807 if (!(ptep & PG_USER_MASK))
808 goto do_fault_protect;
809 if (is_write && !(ptep & PG_RW_MASK))
810 goto do_fault_protect;
811 } else {
812 if ((env->cr[0] & CR0_WP_MASK) &&
813 is_write && !(ptep & PG_RW_MASK))
814 goto do_fault_protect;
815 }
816 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
817 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
818 pte |= PG_ACCESSED_MASK;
819 if (is_dirty)
820 pte |= PG_DIRTY_MASK;
821 stl_phys_notdirty(pte_addr, pte);
822 }
823 page_size = 4096;
824 virt_addr = addr & ~0xfff;
825 }
826 }
827 /* the page can be put in the TLB */
828 prot = PAGE_READ;
829 if (!(ptep & PG_NX_MASK))
830 prot |= PAGE_EXEC;
831 if (pte & PG_DIRTY_MASK) {
832 /* only set write access if already dirty... otherwise wait
833 for dirty access */
834 if (is_user) {
835 if (ptep & PG_RW_MASK)
836 prot |= PAGE_WRITE;
837 } else {
838 if (!(env->cr[0] & CR0_WP_MASK) ||
839 (ptep & PG_RW_MASK))
840 prot |= PAGE_WRITE;
841 }
842 }
843 do_mapping:
844 pte = pte & env->a20_mask;
845
846 /* Even if 4MB pages, we map only one 4KB page in the cache to
847 avoid filling it too fast */
848 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
849 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
850 vaddr = virt_addr + page_offset;
851
852 tlb_set_page(env, vaddr, paddr, prot, mmu_idx, page_size);
853 return 0;
854 do_fault_protect:
855 error_code = PG_ERROR_P_MASK;
856 do_fault:
857 error_code |= (is_write << PG_ERROR_W_BIT);
858 if (is_user)
859 error_code |= PG_ERROR_U_MASK;
860 if (is_write1 == 2 &&
861 (env->efer & MSR_EFER_NXE) &&
862 (env->cr[4] & CR4_PAE_MASK))
863 error_code |= PG_ERROR_I_D_MASK;
864 if (env->intercept_exceptions & (1 << EXCP0E_PAGE)) {
865 /* cr2 is not modified in case of exceptions */
866 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
867 addr);
868 } else {
869 env->cr[2] = addr;
870 }
871 env->error_code = error_code;
872 env->exception_index = EXCP0E_PAGE;
873 return 1;
874 }
875
876 target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
877 {
878 target_ulong pde_addr, pte_addr;
879 uint64_t pte;
880 target_phys_addr_t paddr;
881 uint32_t page_offset;
882 int page_size;
883
884 if (env->cr[4] & CR4_PAE_MASK) {
885 target_ulong pdpe_addr;
886 uint64_t pde, pdpe;
887
888 #ifdef TARGET_X86_64
889 if (env->hflags & HF_LMA_MASK) {
890 uint64_t pml4e_addr, pml4e;
891 int32_t sext;
892
893 /* test virtual address sign extension */
894 sext = (int64_t)addr >> 47;
895 if (sext != 0 && sext != -1)
896 return -1;
897
898 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
899 env->a20_mask;
900 pml4e = ldq_phys(pml4e_addr);
901 if (!(pml4e & PG_PRESENT_MASK))
902 return -1;
903
904 pdpe_addr = ((pml4e & ~0xfff) + (((addr >> 30) & 0x1ff) << 3)) &
905 env->a20_mask;
906 pdpe = ldq_phys(pdpe_addr);
907 if (!(pdpe & PG_PRESENT_MASK))
908 return -1;
909 } else
910 #endif
911 {
912 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
913 env->a20_mask;
914 pdpe = ldq_phys(pdpe_addr);
915 if (!(pdpe & PG_PRESENT_MASK))
916 return -1;
917 }
918
919 pde_addr = ((pdpe & ~0xfff) + (((addr >> 21) & 0x1ff) << 3)) &
920 env->a20_mask;
921 pde = ldq_phys(pde_addr);
922 if (!(pde & PG_PRESENT_MASK)) {
923 return -1;
924 }
925 if (pde & PG_PSE_MASK) {
926 /* 2 MB page */
927 page_size = 2048 * 1024;
928 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
929 } else {
930 /* 4 KB page */
931 pte_addr = ((pde & ~0xfff) + (((addr >> 12) & 0x1ff) << 3)) &
932 env->a20_mask;
933 page_size = 4096;
934 pte = ldq_phys(pte_addr);
935 }
936 if (!(pte & PG_PRESENT_MASK))
937 return -1;
938 } else {
939 uint32_t pde;
940
941 if (!(env->cr[0] & CR0_PG_MASK)) {
942 pte = addr;
943 page_size = 4096;
944 } else {
945 /* page directory entry */
946 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) & env->a20_mask;
947 pde = ldl_phys(pde_addr);
948 if (!(pde & PG_PRESENT_MASK))
949 return -1;
950 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
951 pte = pde & ~0x003ff000; /* align to 4MB */
952 page_size = 4096 * 1024;
953 } else {
954 /* page directory entry */
955 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & env->a20_mask;
956 pte = ldl_phys(pte_addr);
957 if (!(pte & PG_PRESENT_MASK))
958 return -1;
959 page_size = 4096;
960 }
961 }
962 pte = pte & env->a20_mask;
963 }
964
965 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
966 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
967 return paddr;
968 }
969
970 void hw_breakpoint_insert(CPUState *env, int index)
971 {
972 int type, err = 0;
973
974 switch (hw_breakpoint_type(env->dr[7], index)) {
975 case 0:
976 if (hw_breakpoint_enabled(env->dr[7], index))
977 err = cpu_breakpoint_insert(env, env->dr[index], BP_CPU,
978 &env->cpu_breakpoint[index]);
979 break;
980 case 1:
981 type = BP_CPU | BP_MEM_WRITE;
982 goto insert_wp;
983 case 2:
984 /* No support for I/O watchpoints yet */
985 break;
986 case 3:
987 type = BP_CPU | BP_MEM_ACCESS;
988 insert_wp:
989 err = cpu_watchpoint_insert(env, env->dr[index],
990 hw_breakpoint_len(env->dr[7], index),
991 type, &env->cpu_watchpoint[index]);
992 break;
993 }
994 if (err)
995 env->cpu_breakpoint[index] = NULL;
996 }
997
998 void hw_breakpoint_remove(CPUState *env, int index)
999 {
1000 if (!env->cpu_breakpoint[index])
1001 return;
1002 switch (hw_breakpoint_type(env->dr[7], index)) {
1003 case 0:
1004 if (hw_breakpoint_enabled(env->dr[7], index))
1005 cpu_breakpoint_remove_by_ref(env, env->cpu_breakpoint[index]);
1006 break;
1007 case 1:
1008 case 3:
1009 cpu_watchpoint_remove_by_ref(env, env->cpu_watchpoint[index]);
1010 break;
1011 case 2:
1012 /* No support for I/O watchpoints yet */
1013 break;
1014 }
1015 }
1016
1017 int check_hw_breakpoints(CPUState *env, int force_dr6_update)
1018 {
1019 target_ulong dr6;
1020 int reg, type;
1021 int hit_enabled = 0;
1022
1023 dr6 = env->dr[6] & ~0xf;
1024 for (reg = 0; reg < 4; reg++) {
1025 type = hw_breakpoint_type(env->dr[7], reg);
1026 if ((type == 0 && env->dr[reg] == env->eip) ||
1027 ((type & 1) && env->cpu_watchpoint[reg] &&
1028 (env->cpu_watchpoint[reg]->flags & BP_WATCHPOINT_HIT))) {
1029 dr6 |= 1 << reg;
1030 if (hw_breakpoint_enabled(env->dr[7], reg))
1031 hit_enabled = 1;
1032 }
1033 }
1034 if (hit_enabled || force_dr6_update)
1035 env->dr[6] = dr6;
1036 return hit_enabled;
1037 }
1038
1039 static CPUDebugExcpHandler *prev_debug_excp_handler;
1040
1041 void raise_exception_env(int exception_index, CPUState *env);
1042
1043 static void breakpoint_handler(CPUState *env)
1044 {
1045 CPUBreakpoint *bp;
1046
1047 if (env->watchpoint_hit) {
1048 if (env->watchpoint_hit->flags & BP_CPU) {
1049 env->watchpoint_hit = NULL;
1050 if (check_hw_breakpoints(env, 0))
1051 raise_exception_env(EXCP01_DB, env);
1052 else
1053 cpu_resume_from_signal(env, NULL);
1054 }
1055 } else {
1056 QTAILQ_FOREACH(bp, &env->breakpoints, entry)
1057 if (bp->pc == env->eip) {
1058 if (bp->flags & BP_CPU) {
1059 check_hw_breakpoints(env, 1);
1060 raise_exception_env(EXCP01_DB, env);
1061 }
1062 break;
1063 }
1064 }
1065 if (prev_debug_excp_handler)
1066 prev_debug_excp_handler(env);
1067 }
1068
1069 typedef struct MCEInjectionParams {
1070 Monitor *mon;
1071 CPUState *env;
1072 int bank;
1073 uint64_t status;
1074 uint64_t mcg_status;
1075 uint64_t addr;
1076 uint64_t misc;
1077 int flags;
1078 } MCEInjectionParams;
1079
1080 static void do_inject_x86_mce(void *data)
1081 {
1082 MCEInjectionParams *params = data;
1083 CPUState *cenv = params->env;
1084 uint64_t *banks = cenv->mce_banks + 4 * params->bank;
1085
1086 cpu_synchronize_state(cenv);
1087
1088 /*
1089 * If there is an MCE exception being processed, ignore this SRAO MCE
1090 * unless unconditional injection was requested.
1091 */
1092 if (!(params->flags & MCE_INJECT_UNCOND_AO)
1093 && !(params->status & MCI_STATUS_AR)
1094 && (cenv->mcg_status & MCG_STATUS_MCIP)) {
1095 return;
1096 }
1097
1098 if (params->status & MCI_STATUS_UC) {
1099 /*
1100 * if MSR_MCG_CTL is not all 1s, the uncorrected error
1101 * reporting is disabled
1102 */
1103 if ((cenv->mcg_cap & MCG_CTL_P) && cenv->mcg_ctl != ~(uint64_t)0) {
1104 monitor_printf(params->mon,
1105 "CPU %d: Uncorrected error reporting disabled\n",
1106 cenv->cpu_index);
1107 return;
1108 }
1109
1110 /*
1111 * if MSR_MCi_CTL is not all 1s, the uncorrected error
1112 * reporting is disabled for the bank
1113 */
1114 if (banks[0] != ~(uint64_t)0) {
1115 monitor_printf(params->mon,
1116 "CPU %d: Uncorrected error reporting disabled for"
1117 " bank %d\n",
1118 cenv->cpu_index, params->bank);
1119 return;
1120 }
1121
1122 if ((cenv->mcg_status & MCG_STATUS_MCIP) ||
1123 !(cenv->cr[4] & CR4_MCE_MASK)) {
1124 monitor_printf(params->mon,
1125 "CPU %d: Previous MCE still in progress, raising"
1126 " triple fault\n",
1127 cenv->cpu_index);
1128 qemu_log_mask(CPU_LOG_RESET, "Triple fault\n");
1129 qemu_system_reset_request();
1130 return;
1131 }
1132 if (banks[1] & MCI_STATUS_VAL) {
1133 params->status |= MCI_STATUS_OVER;
1134 }
1135 banks[2] = params->addr;
1136 banks[3] = params->misc;
1137 cenv->mcg_status = params->mcg_status;
1138 banks[1] = params->status;
1139 cpu_interrupt(cenv, CPU_INTERRUPT_MCE);
1140 } else if (!(banks[1] & MCI_STATUS_VAL)
1141 || !(banks[1] & MCI_STATUS_UC)) {
1142 if (banks[1] & MCI_STATUS_VAL) {
1143 params->status |= MCI_STATUS_OVER;
1144 }
1145 banks[2] = params->addr;
1146 banks[3] = params->misc;
1147 banks[1] = params->status;
1148 } else {
1149 banks[1] |= MCI_STATUS_OVER;
1150 }
1151 }
1152
1153 void cpu_x86_inject_mce(Monitor *mon, CPUState *cenv, int bank,
1154 uint64_t status, uint64_t mcg_status, uint64_t addr,
1155 uint64_t misc, int flags)
1156 {
1157 MCEInjectionParams params = {
1158 .mon = mon,
1159 .env = cenv,
1160 .bank = bank,
1161 .status = status,
1162 .mcg_status = mcg_status,
1163 .addr = addr,
1164 .misc = misc,
1165 .flags = flags,
1166 };
1167 unsigned bank_num = cenv->mcg_cap & 0xff;
1168 CPUState *env;
1169
1170 if (!cenv->mcg_cap) {
1171 monitor_printf(mon, "MCE injection not supported\n");
1172 return;
1173 }
1174 if (bank >= bank_num) {
1175 monitor_printf(mon, "Invalid MCE bank number\n");
1176 return;
1177 }
1178 if (!(status & MCI_STATUS_VAL)) {
1179 monitor_printf(mon, "Invalid MCE status code\n");
1180 return;
1181 }
1182 if ((flags & MCE_INJECT_BROADCAST)
1183 && !cpu_x86_support_mca_broadcast(cenv)) {
1184 monitor_printf(mon, "Guest CPU does not support MCA broadcast\n");
1185 return;
1186 }
1187
1188 run_on_cpu(cenv, do_inject_x86_mce, &params);
1189 if (flags & MCE_INJECT_BROADCAST) {
1190 params.bank = 1;
1191 params.status = MCI_STATUS_VAL | MCI_STATUS_UC;
1192 params.mcg_status = MCG_STATUS_MCIP | MCG_STATUS_RIPV;
1193 params.addr = 0;
1194 params.misc = 0;
1195 for (env = first_cpu; env != NULL; env = env->next_cpu) {
1196 if (cenv == env) {
1197 continue;
1198 }
1199 params.env = env;
1200 run_on_cpu(cenv, do_inject_x86_mce, &params);
1201 }
1202 }
1203 }
1204 #endif /* !CONFIG_USER_ONLY */
1205
1206 static void mce_init(CPUX86State *cenv)
1207 {
1208 unsigned int bank;
1209
1210 if (((cenv->cpuid_version >> 8) & 0xf) >= 6
1211 && (cenv->cpuid_features & (CPUID_MCE | CPUID_MCA)) ==
1212 (CPUID_MCE | CPUID_MCA)) {
1213 cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF;
1214 cenv->mcg_ctl = ~(uint64_t)0;
1215 for (bank = 0; bank < MCE_BANKS_DEF; bank++) {
1216 cenv->mce_banks[bank * 4] = ~(uint64_t)0;
1217 }
1218 }
1219 }
1220
1221 int cpu_x86_get_descr_debug(CPUX86State *env, unsigned int selector,
1222 target_ulong *base, unsigned int *limit,
1223 unsigned int *flags)
1224 {
1225 SegmentCache *dt;
1226 target_ulong ptr;
1227 uint32_t e1, e2;
1228 int index;
1229
1230 if (selector & 0x4)
1231 dt = &env->ldt;
1232 else
1233 dt = &env->gdt;
1234 index = selector & ~7;
1235 ptr = dt->base + index;
1236 if ((index + 7) > dt->limit
1237 || cpu_memory_rw_debug(env, ptr, (uint8_t *)&e1, sizeof(e1), 0) != 0
1238 || cpu_memory_rw_debug(env, ptr+4, (uint8_t *)&e2, sizeof(e2), 0) != 0)
1239 return 0;
1240
1241 *base = ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
1242 *limit = (e1 & 0xffff) | (e2 & 0x000f0000);
1243 if (e2 & DESC_G_MASK)
1244 *limit = (*limit << 12) | 0xfff;
1245 *flags = e2;
1246
1247 return 1;
1248 }
1249
1250 CPUX86State *cpu_x86_init(const char *cpu_model)
1251 {
1252 CPUX86State *env;
1253 static int inited;
1254
1255 env = qemu_mallocz(sizeof(CPUX86State));
1256 cpu_exec_init(env);
1257 env->cpu_model_str = cpu_model;
1258
1259 /* init various static tables */
1260 if (!inited) {
1261 inited = 1;
1262 optimize_flags_init();
1263 #ifndef CONFIG_USER_ONLY
1264 prev_debug_excp_handler =
1265 cpu_set_debug_excp_handler(breakpoint_handler);
1266 #endif
1267 }
1268 if (cpu_x86_register(env, cpu_model) < 0) {
1269 cpu_x86_close(env);
1270 return NULL;
1271 }
1272 mce_init(env);
1273
1274 qemu_init_vcpu(env);
1275
1276 return env;
1277 }
1278
1279 #if !defined(CONFIG_USER_ONLY)
1280 void do_cpu_init(CPUState *env)
1281 {
1282 int sipi = env->interrupt_request & CPU_INTERRUPT_SIPI;
1283 cpu_reset(env);
1284 env->interrupt_request = sipi;
1285 apic_init_reset(env->apic_state);
1286 env->halted = !cpu_is_bsp(env);
1287 }
1288
1289 void do_cpu_sipi(CPUState *env)
1290 {
1291 apic_sipi(env->apic_state);
1292 }
1293 #else
1294 void do_cpu_init(CPUState *env)
1295 {
1296 }
1297 void do_cpu_sipi(CPUState *env)
1298 {
1299 }
1300 #endif