]> git.proxmox.com Git - qemu.git/blame - target-i386/helper.c
x86: Refine error reporting of MCE injection services
[qemu.git] / target-i386 / helper.c
CommitLineData
2c0262af 1/*
eaa728ee 2 * i386 helpers (without register variable usage)
5fafdf24 3 *
2c0262af
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
2c0262af 18 */
eaa728ee
FB
19#include <stdarg.h>
20#include <stdlib.h>
21#include <stdio.h>
22#include <string.h>
23#include <inttypes.h>
24#include <signal.h>
2c0262af 25
eaa728ee
FB
26#include "cpu.h"
27#include "exec-all.h"
eaa728ee 28#include "qemu-common.h"
7ba1e619 29#include "kvm.h"
e7701825 30#include "kvm_x86.h"
2fa11da0
JK
31#ifndef CONFIG_USER_ONLY
32#include "sysemu.h"
316378e4 33#include "monitor.h"
2fa11da0 34#endif
f3f2d9be 35
eaa728ee 36//#define DEBUG_MMU
b5ec5ce0 37
eaa728ee
FB
38/* NOTE: must be called outside the CPU execute loop */
39void cpu_reset(CPUX86State *env)
7e84c249 40{
eaa728ee 41 int i;
7e84c249 42
eca1bdf4
AL
43 if (qemu_loglevel_mask(CPU_LOG_RESET)) {
44 qemu_log("CPU Reset (CPU %d)\n", env->cpu_index);
45 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
46 }
47
eaa728ee 48 memset(env, 0, offsetof(CPUX86State, breakpoints));
7e84c249 49
eaa728ee 50 tlb_flush(env, 1);
7e84c249 51
eaa728ee 52 env->old_exception = -1;
7e84c249 53
eaa728ee 54 /* init to reset state */
3b46e624 55
eaa728ee
FB
56#ifdef CONFIG_SOFTMMU
57 env->hflags |= HF_SOFTMMU_MASK;
2c0262af 58#endif
db620f46 59 env->hflags2 |= HF2_GIF_MASK;
2c0262af 60
eaa728ee
FB
61 cpu_x86_update_cr0(env, 0x60000010);
62 env->a20_mask = ~0x0;
63 env->smbase = 0x30000;
7e84c249 64
eaa728ee
FB
65 env->idt.limit = 0xffff;
66 env->gdt.limit = 0xffff;
67 env->ldt.limit = 0xffff;
262ffdae 68 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
eaa728ee 69 env->tr.limit = 0xffff;
23e6c399 70 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
262ffdae
FB
71
72 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
538f3686
NK
73 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK |
74 DESC_R_MASK | DESC_A_MASK);
262ffdae 75 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
538f3686
NK
76 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
77 DESC_A_MASK);
262ffdae 78 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
538f3686
NK
79 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
80 DESC_A_MASK);
262ffdae 81 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
538f3686
NK
82 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
83 DESC_A_MASK);
262ffdae 84 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
538f3686
NK
85 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
86 DESC_A_MASK);
262ffdae 87 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
538f3686
NK
88 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
89 DESC_A_MASK);
7e84c249 90
eaa728ee
FB
91 env->eip = 0xfff0;
92 env->regs[R_EDX] = env->cpuid_version;
2c0262af 93
eaa728ee 94 env->eflags = 0x2;
7e84c249 95
eaa728ee
FB
96 /* FPU init */
97 for(i = 0;i < 8; i++)
98 env->fptags[i] = 1;
99 env->fpuc = 0x37f;
7e84c249 100
eaa728ee 101 env->mxcsr = 0x1f80;
01df040b
AL
102
103 memset(env->dr, 0, sizeof(env->dr));
104 env->dr[6] = DR6_FIXED_1;
105 env->dr[7] = DR7_FIXED_1;
106 cpu_breakpoint_remove_all(env, BP_CPU);
107 cpu_watchpoint_remove_all(env, BP_CPU);
eaa728ee 108}
7e84c249 109
eaa728ee
FB
110void cpu_x86_close(CPUX86State *env)
111{
bb332cb2 112 qemu_free(env);
eaa728ee 113}
7e84c249 114
2bd3e04c
JD
115static void cpu_x86_version(CPUState *env, int *family, int *model)
116{
117 int cpuver = env->cpuid_version;
118
119 if (family == NULL || model == NULL) {
120 return;
121 }
122
123 *family = (cpuver >> 8) & 0x0f;
124 *model = ((cpuver >> 12) & 0xf0) + ((cpuver >> 4) & 0x0f);
125}
126
127/* Broadcast MCA signal for processor version 06H_EH and above */
128int cpu_x86_support_mca_broadcast(CPUState *env)
129{
130 int family = 0;
131 int model = 0;
132
133 cpu_x86_version(env, &family, &model);
134 if ((family == 6 && model >= 14) || family > 6) {
135 return 1;
136 }
137
138 return 0;
139}
140
eaa728ee
FB
141/***********************************************************/
142/* x86 debug */
3b46e624 143
eaa728ee
FB
144static const char *cc_op_str[] = {
145 "DYNAMIC",
146 "EFLAGS",
7e84c249 147
eaa728ee
FB
148 "MULB",
149 "MULW",
150 "MULL",
151 "MULQ",
3b46e624 152
eaa728ee
FB
153 "ADDB",
154 "ADDW",
155 "ADDL",
156 "ADDQ",
3b46e624 157
eaa728ee
FB
158 "ADCB",
159 "ADCW",
160 "ADCL",
161 "ADCQ",
3b46e624 162
eaa728ee
FB
163 "SUBB",
164 "SUBW",
165 "SUBL",
166 "SUBQ",
7e84c249 167
eaa728ee
FB
168 "SBBB",
169 "SBBW",
170 "SBBL",
171 "SBBQ",
7e84c249 172
eaa728ee
FB
173 "LOGICB",
174 "LOGICW",
175 "LOGICL",
176 "LOGICQ",
7e84c249 177
eaa728ee
FB
178 "INCB",
179 "INCW",
180 "INCL",
181 "INCQ",
3b46e624 182
eaa728ee
FB
183 "DECB",
184 "DECW",
185 "DECL",
186 "DECQ",
3b46e624 187
eaa728ee
FB
188 "SHLB",
189 "SHLW",
190 "SHLL",
191 "SHLQ",
3b46e624 192
eaa728ee
FB
193 "SARB",
194 "SARW",
195 "SARL",
196 "SARQ",
197};
7e84c249 198
a3867ed2 199static void
9a78eead 200cpu_x86_dump_seg_cache(CPUState *env, FILE *f, fprintf_function cpu_fprintf,
a3867ed2
AL
201 const char *name, struct SegmentCache *sc)
202{
203#ifdef TARGET_X86_64
204 if (env->hflags & HF_CS64_MASK) {
205 cpu_fprintf(f, "%-3s=%04x %016" PRIx64 " %08x %08x", name,
4058fd98 206 sc->selector, sc->base, sc->limit, sc->flags & 0x00ffff00);
a3867ed2
AL
207 } else
208#endif
209 {
210 cpu_fprintf(f, "%-3s=%04x %08x %08x %08x", name, sc->selector,
4058fd98 211 (uint32_t)sc->base, sc->limit, sc->flags & 0x00ffff00);
a3867ed2
AL
212 }
213
214 if (!(env->hflags & HF_PE_MASK) || !(sc->flags & DESC_P_MASK))
215 goto done;
216
217 cpu_fprintf(f, " DPL=%d ", (sc->flags & DESC_DPL_MASK) >> DESC_DPL_SHIFT);
218 if (sc->flags & DESC_S_MASK) {
219 if (sc->flags & DESC_CS_MASK) {
220 cpu_fprintf(f, (sc->flags & DESC_L_MASK) ? "CS64" :
221 ((sc->flags & DESC_B_MASK) ? "CS32" : "CS16"));
222 cpu_fprintf(f, " [%c%c", (sc->flags & DESC_C_MASK) ? 'C' : '-',
223 (sc->flags & DESC_R_MASK) ? 'R' : '-');
224 } else {
225 cpu_fprintf(f, (sc->flags & DESC_B_MASK) ? "DS " : "DS16");
226 cpu_fprintf(f, " [%c%c", (sc->flags & DESC_E_MASK) ? 'E' : '-',
227 (sc->flags & DESC_W_MASK) ? 'W' : '-');
228 }
229 cpu_fprintf(f, "%c]", (sc->flags & DESC_A_MASK) ? 'A' : '-');
230 } else {
231 static const char *sys_type_name[2][16] = {
232 { /* 32 bit mode */
233 "Reserved", "TSS16-avl", "LDT", "TSS16-busy",
234 "CallGate16", "TaskGate", "IntGate16", "TrapGate16",
235 "Reserved", "TSS32-avl", "Reserved", "TSS32-busy",
236 "CallGate32", "Reserved", "IntGate32", "TrapGate32"
237 },
238 { /* 64 bit mode */
239 "<hiword>", "Reserved", "LDT", "Reserved", "Reserved",
240 "Reserved", "Reserved", "Reserved", "Reserved",
241 "TSS64-avl", "Reserved", "TSS64-busy", "CallGate64",
242 "Reserved", "IntGate64", "TrapGate64"
243 }
244 };
e5c15eff
SW
245 cpu_fprintf(f, "%s",
246 sys_type_name[(env->hflags & HF_LMA_MASK) ? 1 : 0]
247 [(sc->flags & DESC_TYPE_MASK)
248 >> DESC_TYPE_SHIFT]);
a3867ed2
AL
249 }
250done:
251 cpu_fprintf(f, "\n");
252}
253
f5c848ee
JK
254#define DUMP_CODE_BYTES_TOTAL 50
255#define DUMP_CODE_BYTES_BACKWARD 20
256
9a78eead 257void cpu_dump_state(CPUState *env, FILE *f, fprintf_function cpu_fprintf,
eaa728ee
FB
258 int flags)
259{
260 int eflags, i, nb;
261 char cc_op_name[32];
262 static const char *seg_name[6] = { "ES", "CS", "SS", "DS", "FS", "GS" };
7e84c249 263
23054111 264 cpu_synchronize_state(env);
ff3c01ca 265
eaa728ee
FB
266 eflags = env->eflags;
267#ifdef TARGET_X86_64
268 if (env->hflags & HF_CS64_MASK) {
269 cpu_fprintf(f,
270 "RAX=%016" PRIx64 " RBX=%016" PRIx64 " RCX=%016" PRIx64 " RDX=%016" PRIx64 "\n"
271 "RSI=%016" PRIx64 " RDI=%016" PRIx64 " RBP=%016" PRIx64 " RSP=%016" PRIx64 "\n"
272 "R8 =%016" PRIx64 " R9 =%016" PRIx64 " R10=%016" PRIx64 " R11=%016" PRIx64 "\n"
273 "R12=%016" PRIx64 " R13=%016" PRIx64 " R14=%016" PRIx64 " R15=%016" PRIx64 "\n"
274 "RIP=%016" PRIx64 " RFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
275 env->regs[R_EAX],
276 env->regs[R_EBX],
277 env->regs[R_ECX],
278 env->regs[R_EDX],
279 env->regs[R_ESI],
280 env->regs[R_EDI],
281 env->regs[R_EBP],
282 env->regs[R_ESP],
283 env->regs[8],
284 env->regs[9],
285 env->regs[10],
286 env->regs[11],
287 env->regs[12],
288 env->regs[13],
289 env->regs[14],
290 env->regs[15],
291 env->eip, eflags,
292 eflags & DF_MASK ? 'D' : '-',
293 eflags & CC_O ? 'O' : '-',
294 eflags & CC_S ? 'S' : '-',
295 eflags & CC_Z ? 'Z' : '-',
296 eflags & CC_A ? 'A' : '-',
297 eflags & CC_P ? 'P' : '-',
298 eflags & CC_C ? 'C' : '-',
299 env->hflags & HF_CPL_MASK,
300 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
5ee0ffaa 301 (env->a20_mask >> 20) & 1,
eaa728ee 302 (env->hflags >> HF_SMM_SHIFT) & 1,
ce5232c5 303 env->halted);
eaa728ee
FB
304 } else
305#endif
306 {
307 cpu_fprintf(f, "EAX=%08x EBX=%08x ECX=%08x EDX=%08x\n"
308 "ESI=%08x EDI=%08x EBP=%08x ESP=%08x\n"
309 "EIP=%08x EFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
310 (uint32_t)env->regs[R_EAX],
311 (uint32_t)env->regs[R_EBX],
312 (uint32_t)env->regs[R_ECX],
313 (uint32_t)env->regs[R_EDX],
314 (uint32_t)env->regs[R_ESI],
315 (uint32_t)env->regs[R_EDI],
316 (uint32_t)env->regs[R_EBP],
317 (uint32_t)env->regs[R_ESP],
318 (uint32_t)env->eip, eflags,
319 eflags & DF_MASK ? 'D' : '-',
320 eflags & CC_O ? 'O' : '-',
321 eflags & CC_S ? 'S' : '-',
322 eflags & CC_Z ? 'Z' : '-',
323 eflags & CC_A ? 'A' : '-',
324 eflags & CC_P ? 'P' : '-',
325 eflags & CC_C ? 'C' : '-',
326 env->hflags & HF_CPL_MASK,
327 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
5ee0ffaa 328 (env->a20_mask >> 20) & 1,
eaa728ee 329 (env->hflags >> HF_SMM_SHIFT) & 1,
ce5232c5 330 env->halted);
8145122b 331 }
3b46e624 332
a3867ed2
AL
333 for(i = 0; i < 6; i++) {
334 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, seg_name[i],
335 &env->segs[i]);
336 }
337 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, "LDT", &env->ldt);
338 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, "TR", &env->tr);
339
eaa728ee
FB
340#ifdef TARGET_X86_64
341 if (env->hflags & HF_LMA_MASK) {
eaa728ee
FB
342 cpu_fprintf(f, "GDT= %016" PRIx64 " %08x\n",
343 env->gdt.base, env->gdt.limit);
344 cpu_fprintf(f, "IDT= %016" PRIx64 " %08x\n",
345 env->idt.base, env->idt.limit);
346 cpu_fprintf(f, "CR0=%08x CR2=%016" PRIx64 " CR3=%016" PRIx64 " CR4=%08x\n",
347 (uint32_t)env->cr[0],
348 env->cr[2],
349 env->cr[3],
350 (uint32_t)env->cr[4]);
a59cb4e0
AL
351 for(i = 0; i < 4; i++)
352 cpu_fprintf(f, "DR%d=%016" PRIx64 " ", i, env->dr[i]);
353 cpu_fprintf(f, "\nDR6=%016" PRIx64 " DR7=%016" PRIx64 "\n",
d4b55be5 354 env->dr[6], env->dr[7]);
eaa728ee
FB
355 } else
356#endif
357 {
eaa728ee
FB
358 cpu_fprintf(f, "GDT= %08x %08x\n",
359 (uint32_t)env->gdt.base, env->gdt.limit);
360 cpu_fprintf(f, "IDT= %08x %08x\n",
361 (uint32_t)env->idt.base, env->idt.limit);
362 cpu_fprintf(f, "CR0=%08x CR2=%08x CR3=%08x CR4=%08x\n",
363 (uint32_t)env->cr[0],
364 (uint32_t)env->cr[2],
365 (uint32_t)env->cr[3],
366 (uint32_t)env->cr[4]);
9a78eead
SW
367 for(i = 0; i < 4; i++) {
368 cpu_fprintf(f, "DR%d=" TARGET_FMT_lx " ", i, env->dr[i]);
369 }
370 cpu_fprintf(f, "\nDR6=" TARGET_FMT_lx " DR7=" TARGET_FMT_lx "\n",
371 env->dr[6], env->dr[7]);
eaa728ee
FB
372 }
373 if (flags & X86_DUMP_CCOP) {
374 if ((unsigned)env->cc_op < CC_OP_NB)
375 snprintf(cc_op_name, sizeof(cc_op_name), "%s", cc_op_str[env->cc_op]);
376 else
377 snprintf(cc_op_name, sizeof(cc_op_name), "[%d]", env->cc_op);
378#ifdef TARGET_X86_64
379 if (env->hflags & HF_CS64_MASK) {
380 cpu_fprintf(f, "CCS=%016" PRIx64 " CCD=%016" PRIx64 " CCO=%-8s\n",
381 env->cc_src, env->cc_dst,
382 cc_op_name);
383 } else
384#endif
385 {
386 cpu_fprintf(f, "CCS=%08x CCD=%08x CCO=%-8s\n",
387 (uint32_t)env->cc_src, (uint32_t)env->cc_dst,
388 cc_op_name);
389 }
7e84c249 390 }
b5e5a934 391 cpu_fprintf(f, "EFER=%016" PRIx64 "\n", env->efer);
eaa728ee
FB
392 if (flags & X86_DUMP_FPU) {
393 int fptag;
394 fptag = 0;
395 for(i = 0; i < 8; i++) {
396 fptag |= ((!env->fptags[i]) << i);
397 }
398 cpu_fprintf(f, "FCW=%04x FSW=%04x [ST=%d] FTW=%02x MXCSR=%08x\n",
399 env->fpuc,
400 (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11,
401 env->fpstt,
402 fptag,
403 env->mxcsr);
404 for(i=0;i<8;i++) {
405#if defined(USE_X86LDOUBLE)
406 union {
407 long double d;
408 struct {
409 uint64_t lower;
410 uint16_t upper;
411 } l;
412 } tmp;
413 tmp.d = env->fpregs[i].d;
414 cpu_fprintf(f, "FPR%d=%016" PRIx64 " %04x",
415 i, tmp.l.lower, tmp.l.upper);
416#else
417 cpu_fprintf(f, "FPR%d=%016" PRIx64,
418 i, env->fpregs[i].mmx.q);
419#endif
420 if ((i & 1) == 1)
421 cpu_fprintf(f, "\n");
422 else
423 cpu_fprintf(f, " ");
424 }
425 if (env->hflags & HF_CS64_MASK)
426 nb = 16;
427 else
428 nb = 8;
429 for(i=0;i<nb;i++) {
430 cpu_fprintf(f, "XMM%02d=%08x%08x%08x%08x",
431 i,
432 env->xmm_regs[i].XMM_L(3),
433 env->xmm_regs[i].XMM_L(2),
434 env->xmm_regs[i].XMM_L(1),
435 env->xmm_regs[i].XMM_L(0));
436 if ((i & 1) == 1)
437 cpu_fprintf(f, "\n");
438 else
439 cpu_fprintf(f, " ");
440 }
7e84c249 441 }
f5c848ee
JK
442 if (flags & CPU_DUMP_CODE) {
443 target_ulong base = env->segs[R_CS].base + env->eip;
444 target_ulong offs = MIN(env->eip, DUMP_CODE_BYTES_BACKWARD);
445 uint8_t code;
446 char codestr[3];
447
448 cpu_fprintf(f, "Code=");
449 for (i = 0; i < DUMP_CODE_BYTES_TOTAL; i++) {
450 if (cpu_memory_rw_debug(env, base - offs + i, &code, 1, 0) == 0) {
451 snprintf(codestr, sizeof(codestr), "%02x", code);
452 } else {
453 snprintf(codestr, sizeof(codestr), "??");
454 }
455 cpu_fprintf(f, "%s%s%s%s", i > 0 ? " " : "",
456 i == offs ? "<" : "", codestr, i == offs ? ">" : "");
457 }
458 cpu_fprintf(f, "\n");
459 }
2c0262af 460}
7e84c249 461
eaa728ee
FB
462/***********************************************************/
463/* x86 mmu */
464/* XXX: add PGE support */
465
466void cpu_x86_set_a20(CPUX86State *env, int a20_state)
2c0262af 467{
eaa728ee
FB
468 a20_state = (a20_state != 0);
469 if (a20_state != ((env->a20_mask >> 20) & 1)) {
470#if defined(DEBUG_MMU)
471 printf("A20 update: a20=%d\n", a20_state);
472#endif
473 /* if the cpu is currently executing code, we must unlink it and
474 all the potentially executing TB */
475 cpu_interrupt(env, CPU_INTERRUPT_EXITTB);
3b46e624 476
eaa728ee
FB
477 /* when a20 is changed, all the MMU mappings are invalid, so
478 we must flush everything */
479 tlb_flush(env, 1);
5ee0ffaa 480 env->a20_mask = ~(1 << 20) | (a20_state << 20);
7e84c249 481 }
2c0262af
FB
482}
483
eaa728ee 484void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0)
2c0262af 485{
eaa728ee 486 int pe_state;
2c0262af 487
eaa728ee
FB
488#if defined(DEBUG_MMU)
489 printf("CR0 update: CR0=0x%08x\n", new_cr0);
490#endif
491 if ((new_cr0 & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK)) !=
492 (env->cr[0] & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK))) {
493 tlb_flush(env, 1);
494 }
2c0262af 495
eaa728ee
FB
496#ifdef TARGET_X86_64
497 if (!(env->cr[0] & CR0_PG_MASK) && (new_cr0 & CR0_PG_MASK) &&
498 (env->efer & MSR_EFER_LME)) {
499 /* enter in long mode */
500 /* XXX: generate an exception */
501 if (!(env->cr[4] & CR4_PAE_MASK))
502 return;
503 env->efer |= MSR_EFER_LMA;
504 env->hflags |= HF_LMA_MASK;
505 } else if ((env->cr[0] & CR0_PG_MASK) && !(new_cr0 & CR0_PG_MASK) &&
506 (env->efer & MSR_EFER_LMA)) {
507 /* exit long mode */
508 env->efer &= ~MSR_EFER_LMA;
509 env->hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
510 env->eip &= 0xffffffff;
511 }
512#endif
513 env->cr[0] = new_cr0 | CR0_ET_MASK;
7e84c249 514
eaa728ee
FB
515 /* update PE flag in hidden flags */
516 pe_state = (env->cr[0] & CR0_PE_MASK);
517 env->hflags = (env->hflags & ~HF_PE_MASK) | (pe_state << HF_PE_SHIFT);
518 /* ensure that ADDSEG is always set in real mode */
519 env->hflags |= ((pe_state ^ 1) << HF_ADDSEG_SHIFT);
520 /* update FPU flags */
521 env->hflags = (env->hflags & ~(HF_MP_MASK | HF_EM_MASK | HF_TS_MASK)) |
522 ((new_cr0 << (HF_MP_SHIFT - 1)) & (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK));
7e84c249
FB
523}
524
eaa728ee
FB
525/* XXX: in legacy PAE mode, generate a GPF if reserved bits are set in
526 the PDPT */
527void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3)
7e84c249 528{
eaa728ee
FB
529 env->cr[3] = new_cr3;
530 if (env->cr[0] & CR0_PG_MASK) {
531#if defined(DEBUG_MMU)
532 printf("CR3 update: CR3=" TARGET_FMT_lx "\n", new_cr3);
533#endif
534 tlb_flush(env, 0);
535 }
7e84c249
FB
536}
537
eaa728ee 538void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4)
7e84c249 539{
eaa728ee
FB
540#if defined(DEBUG_MMU)
541 printf("CR4 update: CR4=%08x\n", (uint32_t)env->cr[4]);
542#endif
543 if ((new_cr4 & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK)) !=
544 (env->cr[4] & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK))) {
545 tlb_flush(env, 1);
546 }
547 /* SSE handling */
548 if (!(env->cpuid_features & CPUID_SSE))
549 new_cr4 &= ~CR4_OSFXSR_MASK;
550 if (new_cr4 & CR4_OSFXSR_MASK)
551 env->hflags |= HF_OSFXSR_MASK;
552 else
553 env->hflags &= ~HF_OSFXSR_MASK;
b8b6a50b 554
eaa728ee 555 env->cr[4] = new_cr4;
b8b6a50b
FB
556}
557
eaa728ee
FB
558#if defined(CONFIG_USER_ONLY)
559
560int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
561 int is_write, int mmu_idx, int is_softmmu)
b8b6a50b 562{
eaa728ee
FB
563 /* user mode only emulation */
564 is_write &= 1;
565 env->cr[2] = addr;
566 env->error_code = (is_write << PG_ERROR_W_BIT);
567 env->error_code |= PG_ERROR_U_MASK;
568 env->exception_index = EXCP0E_PAGE;
569 return 1;
2c0262af
FB
570}
571
8d7b0fbb 572#else
891b38e4 573
eaa728ee
FB
574/* XXX: This value should match the one returned by CPUID
575 * and in exec.c */
eaa728ee 576# if defined(TARGET_X86_64)
2c90d794 577# define PHYS_ADDR_MASK 0xfffffff000LL
eaa728ee 578# else
2c90d794 579# define PHYS_ADDR_MASK 0xffffff000LL
eaa728ee 580# endif
eaa728ee
FB
581
582/* return value:
583 -1 = cannot handle fault
584 0 = nothing more to do
585 1 = generate PF fault
eaa728ee
FB
586*/
587int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
588 int is_write1, int mmu_idx, int is_softmmu)
589{
590 uint64_t ptep, pte;
591 target_ulong pde_addr, pte_addr;
d4c430a8 592 int error_code, is_dirty, prot, page_size, is_write, is_user;
c227f099 593 target_phys_addr_t paddr;
eaa728ee
FB
594 uint32_t page_offset;
595 target_ulong vaddr, virt_addr;
596
597 is_user = mmu_idx == MMU_USER_IDX;
598#if defined(DEBUG_MMU)
599 printf("MMU fault: addr=" TARGET_FMT_lx " w=%d u=%d eip=" TARGET_FMT_lx "\n",
600 addr, is_write1, is_user, env->eip);
601#endif
602 is_write = is_write1 & 1;
603
604 if (!(env->cr[0] & CR0_PG_MASK)) {
605 pte = addr;
606 virt_addr = addr & TARGET_PAGE_MASK;
607 prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
608 page_size = 4096;
609 goto do_mapping;
610 }
611
612 if (env->cr[4] & CR4_PAE_MASK) {
613 uint64_t pde, pdpe;
614 target_ulong pdpe_addr;
2c0262af 615
eaa728ee
FB
616#ifdef TARGET_X86_64
617 if (env->hflags & HF_LMA_MASK) {
618 uint64_t pml4e_addr, pml4e;
619 int32_t sext;
620
621 /* test virtual address sign extension */
622 sext = (int64_t)addr >> 47;
623 if (sext != 0 && sext != -1) {
624 env->error_code = 0;
625 env->exception_index = EXCP0D_GPF;
626 return 1;
627 }
0573fbfc 628
eaa728ee
FB
629 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
630 env->a20_mask;
631 pml4e = ldq_phys(pml4e_addr);
632 if (!(pml4e & PG_PRESENT_MASK)) {
633 error_code = 0;
634 goto do_fault;
635 }
636 if (!(env->efer & MSR_EFER_NXE) && (pml4e & PG_NX_MASK)) {
637 error_code = PG_ERROR_RSVD_MASK;
638 goto do_fault;
639 }
640 if (!(pml4e & PG_ACCESSED_MASK)) {
641 pml4e |= PG_ACCESSED_MASK;
642 stl_phys_notdirty(pml4e_addr, pml4e);
643 }
644 ptep = pml4e ^ PG_NX_MASK;
645 pdpe_addr = ((pml4e & PHYS_ADDR_MASK) + (((addr >> 30) & 0x1ff) << 3)) &
646 env->a20_mask;
647 pdpe = ldq_phys(pdpe_addr);
648 if (!(pdpe & PG_PRESENT_MASK)) {
649 error_code = 0;
650 goto do_fault;
651 }
652 if (!(env->efer & MSR_EFER_NXE) && (pdpe & PG_NX_MASK)) {
653 error_code = PG_ERROR_RSVD_MASK;
654 goto do_fault;
655 }
656 ptep &= pdpe ^ PG_NX_MASK;
657 if (!(pdpe & PG_ACCESSED_MASK)) {
658 pdpe |= PG_ACCESSED_MASK;
659 stl_phys_notdirty(pdpe_addr, pdpe);
660 }
661 } else
662#endif
663 {
664 /* XXX: load them when cr3 is loaded ? */
665 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
666 env->a20_mask;
667 pdpe = ldq_phys(pdpe_addr);
668 if (!(pdpe & PG_PRESENT_MASK)) {
669 error_code = 0;
670 goto do_fault;
671 }
672 ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
7e84c249 673 }
7e84c249 674
eaa728ee
FB
675 pde_addr = ((pdpe & PHYS_ADDR_MASK) + (((addr >> 21) & 0x1ff) << 3)) &
676 env->a20_mask;
677 pde = ldq_phys(pde_addr);
678 if (!(pde & PG_PRESENT_MASK)) {
679 error_code = 0;
680 goto do_fault;
681 }
682 if (!(env->efer & MSR_EFER_NXE) && (pde & PG_NX_MASK)) {
683 error_code = PG_ERROR_RSVD_MASK;
684 goto do_fault;
685 }
686 ptep &= pde ^ PG_NX_MASK;
687 if (pde & PG_PSE_MASK) {
688 /* 2 MB page */
689 page_size = 2048 * 1024;
690 ptep ^= PG_NX_MASK;
691 if ((ptep & PG_NX_MASK) && is_write1 == 2)
692 goto do_fault_protect;
693 if (is_user) {
694 if (!(ptep & PG_USER_MASK))
695 goto do_fault_protect;
696 if (is_write && !(ptep & PG_RW_MASK))
697 goto do_fault_protect;
698 } else {
699 if ((env->cr[0] & CR0_WP_MASK) &&
700 is_write && !(ptep & PG_RW_MASK))
701 goto do_fault_protect;
702 }
703 is_dirty = is_write && !(pde & PG_DIRTY_MASK);
704 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
705 pde |= PG_ACCESSED_MASK;
706 if (is_dirty)
707 pde |= PG_DIRTY_MASK;
708 stl_phys_notdirty(pde_addr, pde);
709 }
710 /* align to page_size */
711 pte = pde & ((PHYS_ADDR_MASK & ~(page_size - 1)) | 0xfff);
712 virt_addr = addr & ~(page_size - 1);
713 } else {
714 /* 4 KB page */
715 if (!(pde & PG_ACCESSED_MASK)) {
716 pde |= PG_ACCESSED_MASK;
717 stl_phys_notdirty(pde_addr, pde);
718 }
719 pte_addr = ((pde & PHYS_ADDR_MASK) + (((addr >> 12) & 0x1ff) << 3)) &
720 env->a20_mask;
721 pte = ldq_phys(pte_addr);
722 if (!(pte & PG_PRESENT_MASK)) {
723 error_code = 0;
724 goto do_fault;
725 }
726 if (!(env->efer & MSR_EFER_NXE) && (pte & PG_NX_MASK)) {
727 error_code = PG_ERROR_RSVD_MASK;
728 goto do_fault;
729 }
730 /* combine pde and pte nx, user and rw protections */
731 ptep &= pte ^ PG_NX_MASK;
732 ptep ^= PG_NX_MASK;
733 if ((ptep & PG_NX_MASK) && is_write1 == 2)
734 goto do_fault_protect;
735 if (is_user) {
736 if (!(ptep & PG_USER_MASK))
737 goto do_fault_protect;
738 if (is_write && !(ptep & PG_RW_MASK))
739 goto do_fault_protect;
740 } else {
741 if ((env->cr[0] & CR0_WP_MASK) &&
742 is_write && !(ptep & PG_RW_MASK))
743 goto do_fault_protect;
744 }
745 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
746 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
747 pte |= PG_ACCESSED_MASK;
748 if (is_dirty)
749 pte |= PG_DIRTY_MASK;
750 stl_phys_notdirty(pte_addr, pte);
751 }
752 page_size = 4096;
753 virt_addr = addr & ~0xfff;
754 pte = pte & (PHYS_ADDR_MASK | 0xfff);
7e84c249 755 }
2c0262af 756 } else {
eaa728ee
FB
757 uint32_t pde;
758
759 /* page directory entry */
760 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) &
761 env->a20_mask;
762 pde = ldl_phys(pde_addr);
763 if (!(pde & PG_PRESENT_MASK)) {
764 error_code = 0;
765 goto do_fault;
766 }
767 /* if PSE bit is set, then we use a 4MB page */
768 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
769 page_size = 4096 * 1024;
770 if (is_user) {
771 if (!(pde & PG_USER_MASK))
772 goto do_fault_protect;
773 if (is_write && !(pde & PG_RW_MASK))
774 goto do_fault_protect;
775 } else {
776 if ((env->cr[0] & CR0_WP_MASK) &&
777 is_write && !(pde & PG_RW_MASK))
778 goto do_fault_protect;
779 }
780 is_dirty = is_write && !(pde & PG_DIRTY_MASK);
781 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
782 pde |= PG_ACCESSED_MASK;
783 if (is_dirty)
784 pde |= PG_DIRTY_MASK;
785 stl_phys_notdirty(pde_addr, pde);
786 }
2c0262af 787
eaa728ee
FB
788 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
789 ptep = pte;
790 virt_addr = addr & ~(page_size - 1);
791 } else {
792 if (!(pde & PG_ACCESSED_MASK)) {
793 pde |= PG_ACCESSED_MASK;
794 stl_phys_notdirty(pde_addr, pde);
795 }
891b38e4 796
eaa728ee
FB
797 /* page directory entry */
798 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) &
799 env->a20_mask;
800 pte = ldl_phys(pte_addr);
801 if (!(pte & PG_PRESENT_MASK)) {
802 error_code = 0;
803 goto do_fault;
8e682019 804 }
eaa728ee
FB
805 /* combine pde and pte user and rw protections */
806 ptep = pte & pde;
807 if (is_user) {
808 if (!(ptep & PG_USER_MASK))
809 goto do_fault_protect;
810 if (is_write && !(ptep & PG_RW_MASK))
811 goto do_fault_protect;
812 } else {
813 if ((env->cr[0] & CR0_WP_MASK) &&
814 is_write && !(ptep & PG_RW_MASK))
815 goto do_fault_protect;
8e682019 816 }
eaa728ee
FB
817 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
818 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
819 pte |= PG_ACCESSED_MASK;
820 if (is_dirty)
821 pte |= PG_DIRTY_MASK;
822 stl_phys_notdirty(pte_addr, pte);
823 }
824 page_size = 4096;
825 virt_addr = addr & ~0xfff;
2c0262af
FB
826 }
827 }
eaa728ee
FB
828 /* the page can be put in the TLB */
829 prot = PAGE_READ;
830 if (!(ptep & PG_NX_MASK))
831 prot |= PAGE_EXEC;
832 if (pte & PG_DIRTY_MASK) {
833 /* only set write access if already dirty... otherwise wait
834 for dirty access */
835 if (is_user) {
836 if (ptep & PG_RW_MASK)
837 prot |= PAGE_WRITE;
838 } else {
839 if (!(env->cr[0] & CR0_WP_MASK) ||
840 (ptep & PG_RW_MASK))
841 prot |= PAGE_WRITE;
8e682019 842 }
891b38e4 843 }
eaa728ee
FB
844 do_mapping:
845 pte = pte & env->a20_mask;
846
847 /* Even if 4MB pages, we map only one 4KB page in the cache to
848 avoid filling it too fast */
849 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
850 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
851 vaddr = virt_addr + page_offset;
852
d4c430a8
PB
853 tlb_set_page(env, vaddr, paddr, prot, mmu_idx, page_size);
854 return 0;
eaa728ee
FB
855 do_fault_protect:
856 error_code = PG_ERROR_P_MASK;
857 do_fault:
858 error_code |= (is_write << PG_ERROR_W_BIT);
859 if (is_user)
860 error_code |= PG_ERROR_U_MASK;
861 if (is_write1 == 2 &&
862 (env->efer & MSR_EFER_NXE) &&
863 (env->cr[4] & CR4_PAE_MASK))
864 error_code |= PG_ERROR_I_D_MASK;
872929aa
FB
865 if (env->intercept_exceptions & (1 << EXCP0E_PAGE)) {
866 /* cr2 is not modified in case of exceptions */
867 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
868 addr);
eaa728ee
FB
869 } else {
870 env->cr[2] = addr;
2c0262af 871 }
eaa728ee
FB
872 env->error_code = error_code;
873 env->exception_index = EXCP0E_PAGE;
eaa728ee 874 return 1;
14ce26e7
FB
875}
876
c227f099 877target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
14ce26e7 878{
eaa728ee
FB
879 target_ulong pde_addr, pte_addr;
880 uint64_t pte;
c227f099 881 target_phys_addr_t paddr;
eaa728ee
FB
882 uint32_t page_offset;
883 int page_size;
14ce26e7 884
eaa728ee
FB
885 if (env->cr[4] & CR4_PAE_MASK) {
886 target_ulong pdpe_addr;
887 uint64_t pde, pdpe;
14ce26e7 888
eaa728ee
FB
889#ifdef TARGET_X86_64
890 if (env->hflags & HF_LMA_MASK) {
891 uint64_t pml4e_addr, pml4e;
892 int32_t sext;
893
894 /* test virtual address sign extension */
895 sext = (int64_t)addr >> 47;
896 if (sext != 0 && sext != -1)
897 return -1;
898
899 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
900 env->a20_mask;
901 pml4e = ldq_phys(pml4e_addr);
902 if (!(pml4e & PG_PRESENT_MASK))
903 return -1;
904
905 pdpe_addr = ((pml4e & ~0xfff) + (((addr >> 30) & 0x1ff) << 3)) &
906 env->a20_mask;
907 pdpe = ldq_phys(pdpe_addr);
908 if (!(pdpe & PG_PRESENT_MASK))
909 return -1;
910 } else
911#endif
912 {
913 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
914 env->a20_mask;
915 pdpe = ldq_phys(pdpe_addr);
916 if (!(pdpe & PG_PRESENT_MASK))
917 return -1;
14ce26e7 918 }
14ce26e7 919
eaa728ee
FB
920 pde_addr = ((pdpe & ~0xfff) + (((addr >> 21) & 0x1ff) << 3)) &
921 env->a20_mask;
922 pde = ldq_phys(pde_addr);
923 if (!(pde & PG_PRESENT_MASK)) {
924 return -1;
925 }
926 if (pde & PG_PSE_MASK) {
927 /* 2 MB page */
928 page_size = 2048 * 1024;
929 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
930 } else {
931 /* 4 KB page */
932 pte_addr = ((pde & ~0xfff) + (((addr >> 12) & 0x1ff) << 3)) &
933 env->a20_mask;
934 page_size = 4096;
935 pte = ldq_phys(pte_addr);
936 }
ca1c9e15
AL
937 if (!(pte & PG_PRESENT_MASK))
938 return -1;
14ce26e7 939 } else {
eaa728ee 940 uint32_t pde;
3b46e624 941
eaa728ee
FB
942 if (!(env->cr[0] & CR0_PG_MASK)) {
943 pte = addr;
944 page_size = 4096;
945 } else {
946 /* page directory entry */
947 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) & env->a20_mask;
948 pde = ldl_phys(pde_addr);
949 if (!(pde & PG_PRESENT_MASK))
950 return -1;
951 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
952 pte = pde & ~0x003ff000; /* align to 4MB */
953 page_size = 4096 * 1024;
954 } else {
955 /* page directory entry */
956 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & env->a20_mask;
957 pte = ldl_phys(pte_addr);
958 if (!(pte & PG_PRESENT_MASK))
959 return -1;
960 page_size = 4096;
961 }
962 }
963 pte = pte & env->a20_mask;
14ce26e7 964 }
14ce26e7 965
eaa728ee
FB
966 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
967 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
968 return paddr;
3b21e03e 969}
01df040b
AL
970
971void hw_breakpoint_insert(CPUState *env, int index)
972{
973 int type, err = 0;
974
975 switch (hw_breakpoint_type(env->dr[7], index)) {
976 case 0:
977 if (hw_breakpoint_enabled(env->dr[7], index))
978 err = cpu_breakpoint_insert(env, env->dr[index], BP_CPU,
979 &env->cpu_breakpoint[index]);
980 break;
981 case 1:
982 type = BP_CPU | BP_MEM_WRITE;
983 goto insert_wp;
984 case 2:
985 /* No support for I/O watchpoints yet */
986 break;
987 case 3:
988 type = BP_CPU | BP_MEM_ACCESS;
989 insert_wp:
990 err = cpu_watchpoint_insert(env, env->dr[index],
991 hw_breakpoint_len(env->dr[7], index),
992 type, &env->cpu_watchpoint[index]);
993 break;
994 }
995 if (err)
996 env->cpu_breakpoint[index] = NULL;
997}
998
999void hw_breakpoint_remove(CPUState *env, int index)
1000{
1001 if (!env->cpu_breakpoint[index])
1002 return;
1003 switch (hw_breakpoint_type(env->dr[7], index)) {
1004 case 0:
1005 if (hw_breakpoint_enabled(env->dr[7], index))
1006 cpu_breakpoint_remove_by_ref(env, env->cpu_breakpoint[index]);
1007 break;
1008 case 1:
1009 case 3:
1010 cpu_watchpoint_remove_by_ref(env, env->cpu_watchpoint[index]);
1011 break;
1012 case 2:
1013 /* No support for I/O watchpoints yet */
1014 break;
1015 }
1016}
1017
1018int check_hw_breakpoints(CPUState *env, int force_dr6_update)
1019{
1020 target_ulong dr6;
1021 int reg, type;
1022 int hit_enabled = 0;
1023
1024 dr6 = env->dr[6] & ~0xf;
1025 for (reg = 0; reg < 4; reg++) {
1026 type = hw_breakpoint_type(env->dr[7], reg);
1027 if ((type == 0 && env->dr[reg] == env->eip) ||
1028 ((type & 1) && env->cpu_watchpoint[reg] &&
1029 (env->cpu_watchpoint[reg]->flags & BP_WATCHPOINT_HIT))) {
1030 dr6 |= 1 << reg;
1031 if (hw_breakpoint_enabled(env->dr[7], reg))
1032 hit_enabled = 1;
1033 }
1034 }
1035 if (hit_enabled || force_dr6_update)
1036 env->dr[6] = dr6;
1037 return hit_enabled;
1038}
1039
1040static CPUDebugExcpHandler *prev_debug_excp_handler;
1041
63a54736 1042void raise_exception_env(int exception_index, CPUState *env);
01df040b
AL
1043
1044static void breakpoint_handler(CPUState *env)
1045{
1046 CPUBreakpoint *bp;
1047
1048 if (env->watchpoint_hit) {
1049 if (env->watchpoint_hit->flags & BP_CPU) {
1050 env->watchpoint_hit = NULL;
1051 if (check_hw_breakpoints(env, 0))
63a54736 1052 raise_exception_env(EXCP01_DB, env);
01df040b
AL
1053 else
1054 cpu_resume_from_signal(env, NULL);
1055 }
1056 } else {
72cf2d4f 1057 QTAILQ_FOREACH(bp, &env->breakpoints, entry)
01df040b
AL
1058 if (bp->pc == env->eip) {
1059 if (bp->flags & BP_CPU) {
1060 check_hw_breakpoints(env, 1);
63a54736 1061 raise_exception_env(EXCP01_DB, env);
01df040b
AL
1062 }
1063 break;
1064 }
1065 }
1066 if (prev_debug_excp_handler)
1067 prev_debug_excp_handler(env);
1068}
79c4f6b0 1069
2fa11da0 1070static void
316378e4 1071qemu_inject_x86_mce(Monitor *mon, CPUState *cenv, int bank, uint64_t status,
2fa11da0 1072 uint64_t mcg_status, uint64_t addr, uint64_t misc)
79c4f6b0
HY
1073{
1074 uint64_t mcg_cap = cenv->mcg_cap;
316378e4
JK
1075 uint64_t *banks = cenv->mce_banks + 4 * bank;
1076
79c4f6b0 1077 if (status & MCI_STATUS_UC) {
316378e4
JK
1078 /*
1079 * if MSR_MCG_CTL is not all 1s, the uncorrected error
1080 * reporting is disabled
1081 */
1082 if ((mcg_cap & MCG_CTL_P) && cenv->mcg_ctl != ~(uint64_t)0) {
1083 monitor_printf(mon,
1084 "CPU %d: Uncorrected error reporting disabled\n",
1085 cenv->cpu_index);
1086 return;
1087 }
1088
1089 /*
1090 * if MSR_MCi_CTL is not all 1s, the uncorrected error
1091 * reporting is disabled for the bank
1092 */
1093 if (banks[0] != ~(uint64_t)0) {
1094 monitor_printf(mon, "CPU %d: Uncorrected error reporting disabled "
1095 "for bank %d\n", cenv->cpu_index, bank);
1096 return;
1097 }
1098
79c4f6b0
HY
1099 if ((cenv->mcg_status & MCG_STATUS_MCIP) ||
1100 !(cenv->cr[4] & CR4_MCE_MASK)) {
316378e4
JK
1101 monitor_printf(mon, "CPU %d: Previous MCE still in progress, "
1102 "raising triple fault\n", cenv->cpu_index);
79c4f6b0
HY
1103 qemu_log_mask(CPU_LOG_RESET, "Triple fault\n");
1104 qemu_system_reset_request();
1105 return;
1106 }
2fa11da0 1107 if (banks[1] & MCI_STATUS_VAL) {
79c4f6b0 1108 status |= MCI_STATUS_OVER;
2fa11da0 1109 }
79c4f6b0
HY
1110 banks[2] = addr;
1111 banks[3] = misc;
1112 cenv->mcg_status = mcg_status;
1113 banks[1] = status;
1114 cpu_interrupt(cenv, CPU_INTERRUPT_MCE);
1115 } else if (!(banks[1] & MCI_STATUS_VAL)
1116 || !(banks[1] & MCI_STATUS_UC)) {
2fa11da0 1117 if (banks[1] & MCI_STATUS_VAL) {
79c4f6b0 1118 status |= MCI_STATUS_OVER;
2fa11da0 1119 }
79c4f6b0
HY
1120 banks[2] = addr;
1121 banks[3] = misc;
1122 banks[1] = status;
2fa11da0 1123 } else {
79c4f6b0 1124 banks[1] |= MCI_STATUS_OVER;
2fa11da0 1125 }
79c4f6b0 1126}
b3cd24e0 1127
316378e4
JK
1128void cpu_x86_inject_mce(Monitor *mon, CPUState *cenv, int bank,
1129 uint64_t status, uint64_t mcg_status, uint64_t addr,
1130 uint64_t misc, int broadcast)
b3cd24e0
JD
1131{
1132 unsigned bank_num = cenv->mcg_cap & 0xff;
31ce5e0c
JD
1133 CPUState *env;
1134 int flag = 0;
b3cd24e0 1135
316378e4
JK
1136 if (!cenv->mcg_cap) {
1137 monitor_printf(mon, "MCE injection not supported\n");
b3cd24e0
JD
1138 return;
1139 }
316378e4
JK
1140 if (bank >= bank_num) {
1141 monitor_printf(mon, "Invalid MCE bank number\n");
1142 return;
1143 }
1144 if (!(status & MCI_STATUS_VAL)) {
1145 monitor_printf(mon, "Invalid MCE status code\n");
1146 return;
1147 }
1148 if (broadcast && !cpu_x86_support_mca_broadcast(cenv)) {
1149 monitor_printf(mon, "Guest CPU does not support MCA broadcast\n");
1150 return;
2bd3e04c
JD
1151 }
1152
b3cd24e0 1153 if (kvm_enabled()) {
31ce5e0c
JD
1154 if (broadcast) {
1155 flag |= MCE_BROADCAST;
1156 }
1157
1158 kvm_inject_x86_mce(cenv, bank, status, mcg_status, addr, misc, flag);
b3cd24e0 1159 } else {
316378e4 1160 qemu_inject_x86_mce(mon, cenv, bank, status, mcg_status, addr, misc);
31ce5e0c
JD
1161 if (broadcast) {
1162 for (env = first_cpu; env != NULL; env = env->next_cpu) {
1163 if (cenv == env) {
1164 continue;
1165 }
316378e4
JK
1166 qemu_inject_x86_mce(mon, env, 1,
1167 MCI_STATUS_VAL | MCI_STATUS_UC,
29057492 1168 MCG_STATUS_MCIP | MCG_STATUS_RIPV, 0, 0);
31ce5e0c
JD
1169 }
1170 }
b3cd24e0
JD
1171 }
1172}
74ce674f 1173#endif /* !CONFIG_USER_ONLY */
6fd805e1 1174
79c4f6b0
HY
1175static void mce_init(CPUX86State *cenv)
1176{
2fa11da0 1177 unsigned int bank;
79c4f6b0 1178
2fa11da0
JK
1179 if (((cenv->cpuid_version >> 8) & 0xf) >= 6
1180 && (cenv->cpuid_features & (CPUID_MCE | CPUID_MCA)) ==
1181 (CPUID_MCE | CPUID_MCA)) {
79c4f6b0
HY
1182 cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF;
1183 cenv->mcg_ctl = ~(uint64_t)0;
2fa11da0
JK
1184 for (bank = 0; bank < MCE_BANKS_DEF; bank++) {
1185 cenv->mce_banks[bank * 4] = ~(uint64_t)0;
1186 }
79c4f6b0
HY
1187 }
1188}
1189
84273177
JK
1190int cpu_x86_get_descr_debug(CPUX86State *env, unsigned int selector,
1191 target_ulong *base, unsigned int *limit,
1192 unsigned int *flags)
1193{
1194 SegmentCache *dt;
1195 target_ulong ptr;
1196 uint32_t e1, e2;
1197 int index;
1198
1199 if (selector & 0x4)
1200 dt = &env->ldt;
1201 else
1202 dt = &env->gdt;
1203 index = selector & ~7;
1204 ptr = dt->base + index;
1205 if ((index + 7) > dt->limit
1206 || cpu_memory_rw_debug(env, ptr, (uint8_t *)&e1, sizeof(e1), 0) != 0
1207 || cpu_memory_rw_debug(env, ptr+4, (uint8_t *)&e2, sizeof(e2), 0) != 0)
1208 return 0;
1209
1210 *base = ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
1211 *limit = (e1 & 0xffff) | (e2 & 0x000f0000);
1212 if (e2 & DESC_G_MASK)
1213 *limit = (*limit << 12) | 0xfff;
1214 *flags = e2;
1215
1216 return 1;
1217}
1218
01df040b
AL
1219CPUX86State *cpu_x86_init(const char *cpu_model)
1220{
1221 CPUX86State *env;
1222 static int inited;
1223
1224 env = qemu_mallocz(sizeof(CPUX86State));
01df040b
AL
1225 cpu_exec_init(env);
1226 env->cpu_model_str = cpu_model;
1227
1228 /* init various static tables */
1229 if (!inited) {
1230 inited = 1;
1231 optimize_flags_init();
1232#ifndef CONFIG_USER_ONLY
1233 prev_debug_excp_handler =
1234 cpu_set_debug_excp_handler(breakpoint_handler);
1235#endif
1236 }
1237 if (cpu_x86_register(env, cpu_model) < 0) {
1238 cpu_x86_close(env);
1239 return NULL;
1240 }
79c4f6b0 1241 mce_init(env);
0bf46a40
AL
1242
1243 qemu_init_vcpu(env);
1244
01df040b
AL
1245 return env;
1246}
b09ea7d5
GN
1247
1248#if !defined(CONFIG_USER_ONLY)
1249void do_cpu_init(CPUState *env)
1250{
1251 int sipi = env->interrupt_request & CPU_INTERRUPT_SIPI;
1252 cpu_reset(env);
1253 env->interrupt_request = sipi;
4a942cea 1254 apic_init_reset(env->apic_state);
052be86b 1255 env->halted = !cpu_is_bsp(env);
b09ea7d5
GN
1256}
1257
1258void do_cpu_sipi(CPUState *env)
1259{
4a942cea 1260 apic_sipi(env->apic_state);
b09ea7d5
GN
1261}
1262#else
1263void do_cpu_init(CPUState *env)
1264{
1265}
1266void do_cpu_sipi(CPUState *env)
1267{
1268}
1269#endif