]> git.proxmox.com Git - qemu.git/blame - target-i386/helper.c
softmmu: move include files to include/sysemu/
[qemu.git] / target-i386 / helper.c
CommitLineData
2c0262af 1/*
eaa728ee 2 * i386 helpers (without register variable usage)
5fafdf24 3 *
2c0262af
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
2c0262af 18 */
2c0262af 19
eaa728ee 20#include "cpu.h"
9c17d615 21#include "sysemu/kvm.h"
2fa11da0 22#ifndef CONFIG_USER_ONLY
9c17d615 23#include "sysemu/sysemu.h"
83c9089e 24#include "monitor/monitor.h"
2fa11da0 25#endif
f3f2d9be 26
eaa728ee 27//#define DEBUG_MMU
b5ec5ce0 28
317ac620 29static void cpu_x86_version(CPUX86State *env, int *family, int *model)
2bd3e04c
JD
30{
31 int cpuver = env->cpuid_version;
32
33 if (family == NULL || model == NULL) {
34 return;
35 }
36
37 *family = (cpuver >> 8) & 0x0f;
38 *model = ((cpuver >> 12) & 0xf0) + ((cpuver >> 4) & 0x0f);
39}
40
41/* Broadcast MCA signal for processor version 06H_EH and above */
317ac620 42int cpu_x86_support_mca_broadcast(CPUX86State *env)
2bd3e04c
JD
43{
44 int family = 0;
45 int model = 0;
46
47 cpu_x86_version(env, &family, &model);
48 if ((family == 6 && model >= 14) || family > 6) {
49 return 1;
50 }
51
52 return 0;
53}
54
eaa728ee
FB
55/***********************************************************/
56/* x86 debug */
3b46e624 57
eaa728ee
FB
58static const char *cc_op_str[] = {
59 "DYNAMIC",
60 "EFLAGS",
7e84c249 61
eaa728ee
FB
62 "MULB",
63 "MULW",
64 "MULL",
65 "MULQ",
3b46e624 66
eaa728ee
FB
67 "ADDB",
68 "ADDW",
69 "ADDL",
70 "ADDQ",
3b46e624 71
eaa728ee
FB
72 "ADCB",
73 "ADCW",
74 "ADCL",
75 "ADCQ",
3b46e624 76
eaa728ee
FB
77 "SUBB",
78 "SUBW",
79 "SUBL",
80 "SUBQ",
7e84c249 81
eaa728ee
FB
82 "SBBB",
83 "SBBW",
84 "SBBL",
85 "SBBQ",
7e84c249 86
eaa728ee
FB
87 "LOGICB",
88 "LOGICW",
89 "LOGICL",
90 "LOGICQ",
7e84c249 91
eaa728ee
FB
92 "INCB",
93 "INCW",
94 "INCL",
95 "INCQ",
3b46e624 96
eaa728ee
FB
97 "DECB",
98 "DECW",
99 "DECL",
100 "DECQ",
3b46e624 101
eaa728ee
FB
102 "SHLB",
103 "SHLW",
104 "SHLL",
105 "SHLQ",
3b46e624 106
eaa728ee
FB
107 "SARB",
108 "SARW",
109 "SARL",
110 "SARQ",
111};
7e84c249 112
a3867ed2 113static void
317ac620 114cpu_x86_dump_seg_cache(CPUX86State *env, FILE *f, fprintf_function cpu_fprintf,
a3867ed2
AL
115 const char *name, struct SegmentCache *sc)
116{
117#ifdef TARGET_X86_64
118 if (env->hflags & HF_CS64_MASK) {
119 cpu_fprintf(f, "%-3s=%04x %016" PRIx64 " %08x %08x", name,
4058fd98 120 sc->selector, sc->base, sc->limit, sc->flags & 0x00ffff00);
a3867ed2
AL
121 } else
122#endif
123 {
124 cpu_fprintf(f, "%-3s=%04x %08x %08x %08x", name, sc->selector,
4058fd98 125 (uint32_t)sc->base, sc->limit, sc->flags & 0x00ffff00);
a3867ed2
AL
126 }
127
128 if (!(env->hflags & HF_PE_MASK) || !(sc->flags & DESC_P_MASK))
129 goto done;
130
131 cpu_fprintf(f, " DPL=%d ", (sc->flags & DESC_DPL_MASK) >> DESC_DPL_SHIFT);
132 if (sc->flags & DESC_S_MASK) {
133 if (sc->flags & DESC_CS_MASK) {
134 cpu_fprintf(f, (sc->flags & DESC_L_MASK) ? "CS64" :
135 ((sc->flags & DESC_B_MASK) ? "CS32" : "CS16"));
136 cpu_fprintf(f, " [%c%c", (sc->flags & DESC_C_MASK) ? 'C' : '-',
137 (sc->flags & DESC_R_MASK) ? 'R' : '-');
138 } else {
139 cpu_fprintf(f, (sc->flags & DESC_B_MASK) ? "DS " : "DS16");
140 cpu_fprintf(f, " [%c%c", (sc->flags & DESC_E_MASK) ? 'E' : '-',
141 (sc->flags & DESC_W_MASK) ? 'W' : '-');
142 }
143 cpu_fprintf(f, "%c]", (sc->flags & DESC_A_MASK) ? 'A' : '-');
144 } else {
145 static const char *sys_type_name[2][16] = {
146 { /* 32 bit mode */
147 "Reserved", "TSS16-avl", "LDT", "TSS16-busy",
148 "CallGate16", "TaskGate", "IntGate16", "TrapGate16",
149 "Reserved", "TSS32-avl", "Reserved", "TSS32-busy",
150 "CallGate32", "Reserved", "IntGate32", "TrapGate32"
151 },
152 { /* 64 bit mode */
153 "<hiword>", "Reserved", "LDT", "Reserved", "Reserved",
154 "Reserved", "Reserved", "Reserved", "Reserved",
155 "TSS64-avl", "Reserved", "TSS64-busy", "CallGate64",
156 "Reserved", "IntGate64", "TrapGate64"
157 }
158 };
e5c15eff
SW
159 cpu_fprintf(f, "%s",
160 sys_type_name[(env->hflags & HF_LMA_MASK) ? 1 : 0]
161 [(sc->flags & DESC_TYPE_MASK)
162 >> DESC_TYPE_SHIFT]);
a3867ed2
AL
163 }
164done:
165 cpu_fprintf(f, "\n");
166}
167
f5c848ee
JK
168#define DUMP_CODE_BYTES_TOTAL 50
169#define DUMP_CODE_BYTES_BACKWARD 20
170
317ac620 171void cpu_dump_state(CPUX86State *env, FILE *f, fprintf_function cpu_fprintf,
eaa728ee
FB
172 int flags)
173{
174 int eflags, i, nb;
175 char cc_op_name[32];
176 static const char *seg_name[6] = { "ES", "CS", "SS", "DS", "FS", "GS" };
7e84c249 177
23054111 178 cpu_synchronize_state(env);
ff3c01ca 179
eaa728ee
FB
180 eflags = env->eflags;
181#ifdef TARGET_X86_64
182 if (env->hflags & HF_CS64_MASK) {
183 cpu_fprintf(f,
184 "RAX=%016" PRIx64 " RBX=%016" PRIx64 " RCX=%016" PRIx64 " RDX=%016" PRIx64 "\n"
185 "RSI=%016" PRIx64 " RDI=%016" PRIx64 " RBP=%016" PRIx64 " RSP=%016" PRIx64 "\n"
186 "R8 =%016" PRIx64 " R9 =%016" PRIx64 " R10=%016" PRIx64 " R11=%016" PRIx64 "\n"
187 "R12=%016" PRIx64 " R13=%016" PRIx64 " R14=%016" PRIx64 " R15=%016" PRIx64 "\n"
188 "RIP=%016" PRIx64 " RFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
189 env->regs[R_EAX],
190 env->regs[R_EBX],
191 env->regs[R_ECX],
192 env->regs[R_EDX],
193 env->regs[R_ESI],
194 env->regs[R_EDI],
195 env->regs[R_EBP],
196 env->regs[R_ESP],
197 env->regs[8],
198 env->regs[9],
199 env->regs[10],
200 env->regs[11],
201 env->regs[12],
202 env->regs[13],
203 env->regs[14],
204 env->regs[15],
205 env->eip, eflags,
206 eflags & DF_MASK ? 'D' : '-',
207 eflags & CC_O ? 'O' : '-',
208 eflags & CC_S ? 'S' : '-',
209 eflags & CC_Z ? 'Z' : '-',
210 eflags & CC_A ? 'A' : '-',
211 eflags & CC_P ? 'P' : '-',
212 eflags & CC_C ? 'C' : '-',
213 env->hflags & HF_CPL_MASK,
214 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
5ee0ffaa 215 (env->a20_mask >> 20) & 1,
eaa728ee 216 (env->hflags >> HF_SMM_SHIFT) & 1,
ce5232c5 217 env->halted);
eaa728ee
FB
218 } else
219#endif
220 {
221 cpu_fprintf(f, "EAX=%08x EBX=%08x ECX=%08x EDX=%08x\n"
222 "ESI=%08x EDI=%08x EBP=%08x ESP=%08x\n"
223 "EIP=%08x EFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
224 (uint32_t)env->regs[R_EAX],
225 (uint32_t)env->regs[R_EBX],
226 (uint32_t)env->regs[R_ECX],
227 (uint32_t)env->regs[R_EDX],
228 (uint32_t)env->regs[R_ESI],
229 (uint32_t)env->regs[R_EDI],
230 (uint32_t)env->regs[R_EBP],
231 (uint32_t)env->regs[R_ESP],
232 (uint32_t)env->eip, eflags,
233 eflags & DF_MASK ? 'D' : '-',
234 eflags & CC_O ? 'O' : '-',
235 eflags & CC_S ? 'S' : '-',
236 eflags & CC_Z ? 'Z' : '-',
237 eflags & CC_A ? 'A' : '-',
238 eflags & CC_P ? 'P' : '-',
239 eflags & CC_C ? 'C' : '-',
240 env->hflags & HF_CPL_MASK,
241 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
5ee0ffaa 242 (env->a20_mask >> 20) & 1,
eaa728ee 243 (env->hflags >> HF_SMM_SHIFT) & 1,
ce5232c5 244 env->halted);
8145122b 245 }
3b46e624 246
a3867ed2
AL
247 for(i = 0; i < 6; i++) {
248 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, seg_name[i],
249 &env->segs[i]);
250 }
251 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, "LDT", &env->ldt);
252 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, "TR", &env->tr);
253
eaa728ee
FB
254#ifdef TARGET_X86_64
255 if (env->hflags & HF_LMA_MASK) {
eaa728ee
FB
256 cpu_fprintf(f, "GDT= %016" PRIx64 " %08x\n",
257 env->gdt.base, env->gdt.limit);
258 cpu_fprintf(f, "IDT= %016" PRIx64 " %08x\n",
259 env->idt.base, env->idt.limit);
260 cpu_fprintf(f, "CR0=%08x CR2=%016" PRIx64 " CR3=%016" PRIx64 " CR4=%08x\n",
261 (uint32_t)env->cr[0],
262 env->cr[2],
263 env->cr[3],
264 (uint32_t)env->cr[4]);
a59cb4e0
AL
265 for(i = 0; i < 4; i++)
266 cpu_fprintf(f, "DR%d=%016" PRIx64 " ", i, env->dr[i]);
267 cpu_fprintf(f, "\nDR6=%016" PRIx64 " DR7=%016" PRIx64 "\n",
d4b55be5 268 env->dr[6], env->dr[7]);
eaa728ee
FB
269 } else
270#endif
271 {
eaa728ee
FB
272 cpu_fprintf(f, "GDT= %08x %08x\n",
273 (uint32_t)env->gdt.base, env->gdt.limit);
274 cpu_fprintf(f, "IDT= %08x %08x\n",
275 (uint32_t)env->idt.base, env->idt.limit);
276 cpu_fprintf(f, "CR0=%08x CR2=%08x CR3=%08x CR4=%08x\n",
277 (uint32_t)env->cr[0],
278 (uint32_t)env->cr[2],
279 (uint32_t)env->cr[3],
280 (uint32_t)env->cr[4]);
9a78eead
SW
281 for(i = 0; i < 4; i++) {
282 cpu_fprintf(f, "DR%d=" TARGET_FMT_lx " ", i, env->dr[i]);
283 }
284 cpu_fprintf(f, "\nDR6=" TARGET_FMT_lx " DR7=" TARGET_FMT_lx "\n",
285 env->dr[6], env->dr[7]);
eaa728ee 286 }
6fd2a026 287 if (flags & CPU_DUMP_CCOP) {
eaa728ee
FB
288 if ((unsigned)env->cc_op < CC_OP_NB)
289 snprintf(cc_op_name, sizeof(cc_op_name), "%s", cc_op_str[env->cc_op]);
290 else
291 snprintf(cc_op_name, sizeof(cc_op_name), "[%d]", env->cc_op);
292#ifdef TARGET_X86_64
293 if (env->hflags & HF_CS64_MASK) {
294 cpu_fprintf(f, "CCS=%016" PRIx64 " CCD=%016" PRIx64 " CCO=%-8s\n",
295 env->cc_src, env->cc_dst,
296 cc_op_name);
297 } else
298#endif
299 {
300 cpu_fprintf(f, "CCS=%08x CCD=%08x CCO=%-8s\n",
301 (uint32_t)env->cc_src, (uint32_t)env->cc_dst,
302 cc_op_name);
303 }
7e84c249 304 }
b5e5a934 305 cpu_fprintf(f, "EFER=%016" PRIx64 "\n", env->efer);
6fd2a026 306 if (flags & CPU_DUMP_FPU) {
eaa728ee
FB
307 int fptag;
308 fptag = 0;
309 for(i = 0; i < 8; i++) {
310 fptag |= ((!env->fptags[i]) << i);
311 }
312 cpu_fprintf(f, "FCW=%04x FSW=%04x [ST=%d] FTW=%02x MXCSR=%08x\n",
313 env->fpuc,
314 (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11,
315 env->fpstt,
316 fptag,
317 env->mxcsr);
318 for(i=0;i<8;i++) {
1ffd41ee
AJ
319 CPU_LDoubleU u;
320 u.d = env->fpregs[i].d;
eaa728ee 321 cpu_fprintf(f, "FPR%d=%016" PRIx64 " %04x",
1ffd41ee 322 i, u.l.lower, u.l.upper);
eaa728ee
FB
323 if ((i & 1) == 1)
324 cpu_fprintf(f, "\n");
325 else
326 cpu_fprintf(f, " ");
327 }
328 if (env->hflags & HF_CS64_MASK)
329 nb = 16;
330 else
331 nb = 8;
332 for(i=0;i<nb;i++) {
333 cpu_fprintf(f, "XMM%02d=%08x%08x%08x%08x",
334 i,
335 env->xmm_regs[i].XMM_L(3),
336 env->xmm_regs[i].XMM_L(2),
337 env->xmm_regs[i].XMM_L(1),
338 env->xmm_regs[i].XMM_L(0));
339 if ((i & 1) == 1)
340 cpu_fprintf(f, "\n");
341 else
342 cpu_fprintf(f, " ");
343 }
7e84c249 344 }
f5c848ee
JK
345 if (flags & CPU_DUMP_CODE) {
346 target_ulong base = env->segs[R_CS].base + env->eip;
347 target_ulong offs = MIN(env->eip, DUMP_CODE_BYTES_BACKWARD);
348 uint8_t code;
349 char codestr[3];
350
351 cpu_fprintf(f, "Code=");
352 for (i = 0; i < DUMP_CODE_BYTES_TOTAL; i++) {
353 if (cpu_memory_rw_debug(env, base - offs + i, &code, 1, 0) == 0) {
354 snprintf(codestr, sizeof(codestr), "%02x", code);
355 } else {
356 snprintf(codestr, sizeof(codestr), "??");
357 }
358 cpu_fprintf(f, "%s%s%s%s", i > 0 ? " " : "",
359 i == offs ? "<" : "", codestr, i == offs ? ">" : "");
360 }
361 cpu_fprintf(f, "\n");
362 }
2c0262af 363}
7e84c249 364
eaa728ee
FB
365/***********************************************************/
366/* x86 mmu */
367/* XXX: add PGE support */
368
369void cpu_x86_set_a20(CPUX86State *env, int a20_state)
2c0262af 370{
eaa728ee
FB
371 a20_state = (a20_state != 0);
372 if (a20_state != ((env->a20_mask >> 20) & 1)) {
373#if defined(DEBUG_MMU)
374 printf("A20 update: a20=%d\n", a20_state);
375#endif
376 /* if the cpu is currently executing code, we must unlink it and
377 all the potentially executing TB */
378 cpu_interrupt(env, CPU_INTERRUPT_EXITTB);
3b46e624 379
eaa728ee
FB
380 /* when a20 is changed, all the MMU mappings are invalid, so
381 we must flush everything */
382 tlb_flush(env, 1);
5ee0ffaa 383 env->a20_mask = ~(1 << 20) | (a20_state << 20);
7e84c249 384 }
2c0262af
FB
385}
386
eaa728ee 387void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0)
2c0262af 388{
eaa728ee 389 int pe_state;
2c0262af 390
eaa728ee
FB
391#if defined(DEBUG_MMU)
392 printf("CR0 update: CR0=0x%08x\n", new_cr0);
393#endif
394 if ((new_cr0 & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK)) !=
395 (env->cr[0] & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK))) {
396 tlb_flush(env, 1);
397 }
2c0262af 398
eaa728ee
FB
399#ifdef TARGET_X86_64
400 if (!(env->cr[0] & CR0_PG_MASK) && (new_cr0 & CR0_PG_MASK) &&
401 (env->efer & MSR_EFER_LME)) {
402 /* enter in long mode */
403 /* XXX: generate an exception */
404 if (!(env->cr[4] & CR4_PAE_MASK))
405 return;
406 env->efer |= MSR_EFER_LMA;
407 env->hflags |= HF_LMA_MASK;
408 } else if ((env->cr[0] & CR0_PG_MASK) && !(new_cr0 & CR0_PG_MASK) &&
409 (env->efer & MSR_EFER_LMA)) {
410 /* exit long mode */
411 env->efer &= ~MSR_EFER_LMA;
412 env->hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
413 env->eip &= 0xffffffff;
414 }
415#endif
416 env->cr[0] = new_cr0 | CR0_ET_MASK;
7e84c249 417
eaa728ee
FB
418 /* update PE flag in hidden flags */
419 pe_state = (env->cr[0] & CR0_PE_MASK);
420 env->hflags = (env->hflags & ~HF_PE_MASK) | (pe_state << HF_PE_SHIFT);
421 /* ensure that ADDSEG is always set in real mode */
422 env->hflags |= ((pe_state ^ 1) << HF_ADDSEG_SHIFT);
423 /* update FPU flags */
424 env->hflags = (env->hflags & ~(HF_MP_MASK | HF_EM_MASK | HF_TS_MASK)) |
425 ((new_cr0 << (HF_MP_SHIFT - 1)) & (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK));
7e84c249
FB
426}
427
eaa728ee
FB
428/* XXX: in legacy PAE mode, generate a GPF if reserved bits are set in
429 the PDPT */
430void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3)
7e84c249 431{
eaa728ee
FB
432 env->cr[3] = new_cr3;
433 if (env->cr[0] & CR0_PG_MASK) {
434#if defined(DEBUG_MMU)
435 printf("CR3 update: CR3=" TARGET_FMT_lx "\n", new_cr3);
436#endif
437 tlb_flush(env, 0);
438 }
7e84c249
FB
439}
440
eaa728ee 441void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4)
7e84c249 442{
eaa728ee
FB
443#if defined(DEBUG_MMU)
444 printf("CR4 update: CR4=%08x\n", (uint32_t)env->cr[4]);
445#endif
a9321a4d
PA
446 if ((new_cr4 ^ env->cr[4]) &
447 (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK |
448 CR4_SMEP_MASK | CR4_SMAP_MASK)) {
eaa728ee
FB
449 tlb_flush(env, 1);
450 }
451 /* SSE handling */
a9321a4d 452 if (!(env->cpuid_features & CPUID_SSE)) {
eaa728ee 453 new_cr4 &= ~CR4_OSFXSR_MASK;
a9321a4d
PA
454 }
455 env->hflags &= ~HF_OSFXSR_MASK;
456 if (new_cr4 & CR4_OSFXSR_MASK) {
eaa728ee 457 env->hflags |= HF_OSFXSR_MASK;
a9321a4d
PA
458 }
459
460 if (!(env->cpuid_7_0_ebx_features & CPUID_7_0_EBX_SMAP)) {
461 new_cr4 &= ~CR4_SMAP_MASK;
462 }
463 env->hflags &= ~HF_SMAP_MASK;
464 if (new_cr4 & CR4_SMAP_MASK) {
465 env->hflags |= HF_SMAP_MASK;
466 }
b8b6a50b 467
eaa728ee 468 env->cr[4] = new_cr4;
b8b6a50b
FB
469}
470
eaa728ee
FB
471#if defined(CONFIG_USER_ONLY)
472
473int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
97b348e7 474 int is_write, int mmu_idx)
b8b6a50b 475{
eaa728ee
FB
476 /* user mode only emulation */
477 is_write &= 1;
478 env->cr[2] = addr;
479 env->error_code = (is_write << PG_ERROR_W_BIT);
480 env->error_code |= PG_ERROR_U_MASK;
481 env->exception_index = EXCP0E_PAGE;
482 return 1;
2c0262af
FB
483}
484
8d7b0fbb 485#else
891b38e4 486
eaa728ee
FB
487/* XXX: This value should match the one returned by CPUID
488 * and in exec.c */
eaa728ee 489# if defined(TARGET_X86_64)
2c90d794 490# define PHYS_ADDR_MASK 0xfffffff000LL
eaa728ee 491# else
2c90d794 492# define PHYS_ADDR_MASK 0xffffff000LL
eaa728ee 493# endif
eaa728ee
FB
494
495/* return value:
496 -1 = cannot handle fault
497 0 = nothing more to do
498 1 = generate PF fault
eaa728ee
FB
499*/
500int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
97b348e7 501 int is_write1, int mmu_idx)
eaa728ee
FB
502{
503 uint64_t ptep, pte;
504 target_ulong pde_addr, pte_addr;
d4c430a8 505 int error_code, is_dirty, prot, page_size, is_write, is_user;
a8170e5e 506 hwaddr paddr;
eaa728ee
FB
507 uint32_t page_offset;
508 target_ulong vaddr, virt_addr;
509
510 is_user = mmu_idx == MMU_USER_IDX;
511#if defined(DEBUG_MMU)
512 printf("MMU fault: addr=" TARGET_FMT_lx " w=%d u=%d eip=" TARGET_FMT_lx "\n",
513 addr, is_write1, is_user, env->eip);
514#endif
515 is_write = is_write1 & 1;
516
517 if (!(env->cr[0] & CR0_PG_MASK)) {
518 pte = addr;
519 virt_addr = addr & TARGET_PAGE_MASK;
520 prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
521 page_size = 4096;
522 goto do_mapping;
523 }
524
525 if (env->cr[4] & CR4_PAE_MASK) {
526 uint64_t pde, pdpe;
527 target_ulong pdpe_addr;
2c0262af 528
eaa728ee
FB
529#ifdef TARGET_X86_64
530 if (env->hflags & HF_LMA_MASK) {
531 uint64_t pml4e_addr, pml4e;
532 int32_t sext;
533
534 /* test virtual address sign extension */
535 sext = (int64_t)addr >> 47;
536 if (sext != 0 && sext != -1) {
537 env->error_code = 0;
538 env->exception_index = EXCP0D_GPF;
539 return 1;
540 }
0573fbfc 541
eaa728ee
FB
542 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
543 env->a20_mask;
544 pml4e = ldq_phys(pml4e_addr);
545 if (!(pml4e & PG_PRESENT_MASK)) {
546 error_code = 0;
547 goto do_fault;
548 }
549 if (!(env->efer & MSR_EFER_NXE) && (pml4e & PG_NX_MASK)) {
550 error_code = PG_ERROR_RSVD_MASK;
551 goto do_fault;
552 }
553 if (!(pml4e & PG_ACCESSED_MASK)) {
554 pml4e |= PG_ACCESSED_MASK;
555 stl_phys_notdirty(pml4e_addr, pml4e);
556 }
557 ptep = pml4e ^ PG_NX_MASK;
558 pdpe_addr = ((pml4e & PHYS_ADDR_MASK) + (((addr >> 30) & 0x1ff) << 3)) &
559 env->a20_mask;
560 pdpe = ldq_phys(pdpe_addr);
561 if (!(pdpe & PG_PRESENT_MASK)) {
562 error_code = 0;
563 goto do_fault;
564 }
565 if (!(env->efer & MSR_EFER_NXE) && (pdpe & PG_NX_MASK)) {
566 error_code = PG_ERROR_RSVD_MASK;
567 goto do_fault;
568 }
569 ptep &= pdpe ^ PG_NX_MASK;
570 if (!(pdpe & PG_ACCESSED_MASK)) {
571 pdpe |= PG_ACCESSED_MASK;
572 stl_phys_notdirty(pdpe_addr, pdpe);
573 }
574 } else
575#endif
576 {
577 /* XXX: load them when cr3 is loaded ? */
578 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
579 env->a20_mask;
580 pdpe = ldq_phys(pdpe_addr);
581 if (!(pdpe & PG_PRESENT_MASK)) {
582 error_code = 0;
583 goto do_fault;
584 }
585 ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
7e84c249 586 }
7e84c249 587
eaa728ee
FB
588 pde_addr = ((pdpe & PHYS_ADDR_MASK) + (((addr >> 21) & 0x1ff) << 3)) &
589 env->a20_mask;
590 pde = ldq_phys(pde_addr);
591 if (!(pde & PG_PRESENT_MASK)) {
592 error_code = 0;
593 goto do_fault;
594 }
595 if (!(env->efer & MSR_EFER_NXE) && (pde & PG_NX_MASK)) {
596 error_code = PG_ERROR_RSVD_MASK;
597 goto do_fault;
598 }
599 ptep &= pde ^ PG_NX_MASK;
600 if (pde & PG_PSE_MASK) {
601 /* 2 MB page */
602 page_size = 2048 * 1024;
603 ptep ^= PG_NX_MASK;
a9321a4d 604 if ((ptep & PG_NX_MASK) && is_write1 == 2) {
eaa728ee 605 goto do_fault_protect;
a9321a4d
PA
606 }
607 switch (mmu_idx) {
608 case MMU_USER_IDX:
609 if (!(ptep & PG_USER_MASK)) {
eaa728ee 610 goto do_fault_protect;
a9321a4d
PA
611 }
612 if (is_write && !(ptep & PG_RW_MASK)) {
eaa728ee 613 goto do_fault_protect;
a9321a4d
PA
614 }
615 break;
616
617 case MMU_KERNEL_IDX:
618 if (is_write1 != 2 && (env->cr[4] & CR4_SMAP_MASK) &&
619 (ptep & PG_USER_MASK)) {
620 goto do_fault_protect;
621 }
622 /* fall through */
623 case MMU_KSMAP_IDX:
624 if (is_write1 == 2 && (env->cr[4] & CR4_SMEP_MASK) &&
625 (ptep & PG_USER_MASK)) {
626 goto do_fault_protect;
627 }
eaa728ee 628 if ((env->cr[0] & CR0_WP_MASK) &&
a9321a4d 629 is_write && !(ptep & PG_RW_MASK)) {
eaa728ee 630 goto do_fault_protect;
a9321a4d
PA
631 }
632 break;
633
634 default: /* cannot happen */
635 break;
eaa728ee
FB
636 }
637 is_dirty = is_write && !(pde & PG_DIRTY_MASK);
638 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
639 pde |= PG_ACCESSED_MASK;
640 if (is_dirty)
641 pde |= PG_DIRTY_MASK;
642 stl_phys_notdirty(pde_addr, pde);
643 }
644 /* align to page_size */
645 pte = pde & ((PHYS_ADDR_MASK & ~(page_size - 1)) | 0xfff);
646 virt_addr = addr & ~(page_size - 1);
647 } else {
648 /* 4 KB page */
649 if (!(pde & PG_ACCESSED_MASK)) {
650 pde |= PG_ACCESSED_MASK;
651 stl_phys_notdirty(pde_addr, pde);
652 }
653 pte_addr = ((pde & PHYS_ADDR_MASK) + (((addr >> 12) & 0x1ff) << 3)) &
654 env->a20_mask;
655 pte = ldq_phys(pte_addr);
656 if (!(pte & PG_PRESENT_MASK)) {
657 error_code = 0;
658 goto do_fault;
659 }
660 if (!(env->efer & MSR_EFER_NXE) && (pte & PG_NX_MASK)) {
661 error_code = PG_ERROR_RSVD_MASK;
662 goto do_fault;
663 }
664 /* combine pde and pte nx, user and rw protections */
665 ptep &= pte ^ PG_NX_MASK;
666 ptep ^= PG_NX_MASK;
667 if ((ptep & PG_NX_MASK) && is_write1 == 2)
668 goto do_fault_protect;
a9321a4d
PA
669 switch (mmu_idx) {
670 case MMU_USER_IDX:
671 if (!(ptep & PG_USER_MASK)) {
eaa728ee 672 goto do_fault_protect;
a9321a4d
PA
673 }
674 if (is_write && !(ptep & PG_RW_MASK)) {
eaa728ee 675 goto do_fault_protect;
a9321a4d
PA
676 }
677 break;
678
679 case MMU_KERNEL_IDX:
680 if (is_write1 != 2 && (env->cr[4] & CR4_SMAP_MASK) &&
681 (ptep & PG_USER_MASK)) {
682 goto do_fault_protect;
683 }
684 /* fall through */
685 case MMU_KSMAP_IDX:
686 if (is_write1 == 2 && (env->cr[4] & CR4_SMEP_MASK) &&
687 (ptep & PG_USER_MASK)) {
688 goto do_fault_protect;
689 }
eaa728ee 690 if ((env->cr[0] & CR0_WP_MASK) &&
a9321a4d 691 is_write && !(ptep & PG_RW_MASK)) {
eaa728ee 692 goto do_fault_protect;
a9321a4d
PA
693 }
694 break;
695
696 default: /* cannot happen */
697 break;
eaa728ee
FB
698 }
699 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
700 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
701 pte |= PG_ACCESSED_MASK;
702 if (is_dirty)
703 pte |= PG_DIRTY_MASK;
704 stl_phys_notdirty(pte_addr, pte);
705 }
706 page_size = 4096;
707 virt_addr = addr & ~0xfff;
708 pte = pte & (PHYS_ADDR_MASK | 0xfff);
7e84c249 709 }
2c0262af 710 } else {
eaa728ee
FB
711 uint32_t pde;
712
713 /* page directory entry */
714 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) &
715 env->a20_mask;
716 pde = ldl_phys(pde_addr);
717 if (!(pde & PG_PRESENT_MASK)) {
718 error_code = 0;
719 goto do_fault;
720 }
721 /* if PSE bit is set, then we use a 4MB page */
722 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
723 page_size = 4096 * 1024;
a9321a4d
PA
724 switch (mmu_idx) {
725 case MMU_USER_IDX:
726 if (!(pde & PG_USER_MASK)) {
eaa728ee 727 goto do_fault_protect;
a9321a4d
PA
728 }
729 if (is_write && !(pde & PG_RW_MASK)) {
eaa728ee 730 goto do_fault_protect;
a9321a4d
PA
731 }
732 break;
733
734 case MMU_KERNEL_IDX:
735 if (is_write1 != 2 && (env->cr[4] & CR4_SMAP_MASK) &&
736 (pde & PG_USER_MASK)) {
737 goto do_fault_protect;
738 }
739 /* fall through */
740 case MMU_KSMAP_IDX:
741 if (is_write1 == 2 && (env->cr[4] & CR4_SMEP_MASK) &&
742 (pde & PG_USER_MASK)) {
743 goto do_fault_protect;
744 }
eaa728ee 745 if ((env->cr[0] & CR0_WP_MASK) &&
a9321a4d 746 is_write && !(pde & PG_RW_MASK)) {
eaa728ee 747 goto do_fault_protect;
a9321a4d
PA
748 }
749 break;
750
751 default: /* cannot happen */
752 break;
eaa728ee
FB
753 }
754 is_dirty = is_write && !(pde & PG_DIRTY_MASK);
755 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
756 pde |= PG_ACCESSED_MASK;
757 if (is_dirty)
758 pde |= PG_DIRTY_MASK;
759 stl_phys_notdirty(pde_addr, pde);
760 }
2c0262af 761
eaa728ee
FB
762 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
763 ptep = pte;
764 virt_addr = addr & ~(page_size - 1);
765 } else {
766 if (!(pde & PG_ACCESSED_MASK)) {
767 pde |= PG_ACCESSED_MASK;
768 stl_phys_notdirty(pde_addr, pde);
769 }
891b38e4 770
eaa728ee
FB
771 /* page directory entry */
772 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) &
773 env->a20_mask;
774 pte = ldl_phys(pte_addr);
775 if (!(pte & PG_PRESENT_MASK)) {
776 error_code = 0;
777 goto do_fault;
8e682019 778 }
eaa728ee
FB
779 /* combine pde and pte user and rw protections */
780 ptep = pte & pde;
a9321a4d
PA
781 switch (mmu_idx) {
782 case MMU_USER_IDX:
783 if (!(ptep & PG_USER_MASK)) {
eaa728ee 784 goto do_fault_protect;
a9321a4d
PA
785 }
786 if (is_write && !(ptep & PG_RW_MASK)) {
eaa728ee 787 goto do_fault_protect;
a9321a4d
PA
788 }
789 break;
790
791 case MMU_KERNEL_IDX:
792 if (is_write1 != 2 && (env->cr[4] & CR4_SMAP_MASK) &&
793 (ptep & PG_USER_MASK)) {
794 goto do_fault_protect;
795 }
796 /* fall through */
797 case MMU_KSMAP_IDX:
798 if (is_write1 == 2 && (env->cr[4] & CR4_SMEP_MASK) &&
799 (ptep & PG_USER_MASK)) {
800 goto do_fault_protect;
801 }
eaa728ee 802 if ((env->cr[0] & CR0_WP_MASK) &&
a9321a4d 803 is_write && !(ptep & PG_RW_MASK)) {
eaa728ee 804 goto do_fault_protect;
a9321a4d
PA
805 }
806 break;
807
808 default: /* cannot happen */
809 break;
8e682019 810 }
eaa728ee
FB
811 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
812 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
813 pte |= PG_ACCESSED_MASK;
814 if (is_dirty)
815 pte |= PG_DIRTY_MASK;
816 stl_phys_notdirty(pte_addr, pte);
817 }
818 page_size = 4096;
819 virt_addr = addr & ~0xfff;
2c0262af
FB
820 }
821 }
eaa728ee
FB
822 /* the page can be put in the TLB */
823 prot = PAGE_READ;
824 if (!(ptep & PG_NX_MASK))
825 prot |= PAGE_EXEC;
826 if (pte & PG_DIRTY_MASK) {
827 /* only set write access if already dirty... otherwise wait
828 for dirty access */
829 if (is_user) {
830 if (ptep & PG_RW_MASK)
831 prot |= PAGE_WRITE;
832 } else {
833 if (!(env->cr[0] & CR0_WP_MASK) ||
834 (ptep & PG_RW_MASK))
835 prot |= PAGE_WRITE;
8e682019 836 }
891b38e4 837 }
eaa728ee
FB
838 do_mapping:
839 pte = pte & env->a20_mask;
840
841 /* Even if 4MB pages, we map only one 4KB page in the cache to
842 avoid filling it too fast */
843 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
844 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
845 vaddr = virt_addr + page_offset;
846
d4c430a8
PB
847 tlb_set_page(env, vaddr, paddr, prot, mmu_idx, page_size);
848 return 0;
eaa728ee
FB
849 do_fault_protect:
850 error_code = PG_ERROR_P_MASK;
851 do_fault:
852 error_code |= (is_write << PG_ERROR_W_BIT);
853 if (is_user)
854 error_code |= PG_ERROR_U_MASK;
855 if (is_write1 == 2 &&
a9321a4d
PA
856 (((env->efer & MSR_EFER_NXE) &&
857 (env->cr[4] & CR4_PAE_MASK)) ||
858 (env->cr[4] & CR4_SMEP_MASK)))
eaa728ee 859 error_code |= PG_ERROR_I_D_MASK;
872929aa
FB
860 if (env->intercept_exceptions & (1 << EXCP0E_PAGE)) {
861 /* cr2 is not modified in case of exceptions */
862 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
863 addr);
eaa728ee
FB
864 } else {
865 env->cr[2] = addr;
2c0262af 866 }
eaa728ee
FB
867 env->error_code = error_code;
868 env->exception_index = EXCP0E_PAGE;
eaa728ee 869 return 1;
14ce26e7
FB
870}
871
a8170e5e 872hwaddr cpu_get_phys_page_debug(CPUX86State *env, target_ulong addr)
14ce26e7 873{
eaa728ee
FB
874 target_ulong pde_addr, pte_addr;
875 uint64_t pte;
a8170e5e 876 hwaddr paddr;
eaa728ee
FB
877 uint32_t page_offset;
878 int page_size;
14ce26e7 879
eaa728ee
FB
880 if (env->cr[4] & CR4_PAE_MASK) {
881 target_ulong pdpe_addr;
882 uint64_t pde, pdpe;
14ce26e7 883
eaa728ee
FB
884#ifdef TARGET_X86_64
885 if (env->hflags & HF_LMA_MASK) {
886 uint64_t pml4e_addr, pml4e;
887 int32_t sext;
888
889 /* test virtual address sign extension */
890 sext = (int64_t)addr >> 47;
891 if (sext != 0 && sext != -1)
892 return -1;
893
894 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
895 env->a20_mask;
896 pml4e = ldq_phys(pml4e_addr);
897 if (!(pml4e & PG_PRESENT_MASK))
898 return -1;
899
3f2cbf0d
JK
900 pdpe_addr = ((pml4e & ~0xfff & ~(PG_NX_MASK | PG_HI_USER_MASK)) +
901 (((addr >> 30) & 0x1ff) << 3)) & env->a20_mask;
eaa728ee
FB
902 pdpe = ldq_phys(pdpe_addr);
903 if (!(pdpe & PG_PRESENT_MASK))
904 return -1;
905 } else
906#endif
907 {
908 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
909 env->a20_mask;
910 pdpe = ldq_phys(pdpe_addr);
911 if (!(pdpe & PG_PRESENT_MASK))
912 return -1;
14ce26e7 913 }
14ce26e7 914
3f2cbf0d
JK
915 pde_addr = ((pdpe & ~0xfff & ~(PG_NX_MASK | PG_HI_USER_MASK)) +
916 (((addr >> 21) & 0x1ff) << 3)) & env->a20_mask;
eaa728ee
FB
917 pde = ldq_phys(pde_addr);
918 if (!(pde & PG_PRESENT_MASK)) {
919 return -1;
920 }
921 if (pde & PG_PSE_MASK) {
922 /* 2 MB page */
923 page_size = 2048 * 1024;
924 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
925 } else {
926 /* 4 KB page */
3f2cbf0d
JK
927 pte_addr = ((pde & ~0xfff & ~(PG_NX_MASK | PG_HI_USER_MASK)) +
928 (((addr >> 12) & 0x1ff) << 3)) & env->a20_mask;
eaa728ee
FB
929 page_size = 4096;
930 pte = ldq_phys(pte_addr);
931 }
3f2cbf0d 932 pte &= ~(PG_NX_MASK | PG_HI_USER_MASK);
ca1c9e15
AL
933 if (!(pte & PG_PRESENT_MASK))
934 return -1;
14ce26e7 935 } else {
eaa728ee 936 uint32_t pde;
3b46e624 937
eaa728ee
FB
938 if (!(env->cr[0] & CR0_PG_MASK)) {
939 pte = addr;
940 page_size = 4096;
941 } else {
942 /* page directory entry */
943 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) & env->a20_mask;
944 pde = ldl_phys(pde_addr);
945 if (!(pde & PG_PRESENT_MASK))
946 return -1;
947 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
948 pte = pde & ~0x003ff000; /* align to 4MB */
949 page_size = 4096 * 1024;
950 } else {
951 /* page directory entry */
952 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & env->a20_mask;
953 pte = ldl_phys(pte_addr);
954 if (!(pte & PG_PRESENT_MASK))
955 return -1;
956 page_size = 4096;
957 }
958 }
959 pte = pte & env->a20_mask;
14ce26e7 960 }
14ce26e7 961
eaa728ee
FB
962 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
963 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
964 return paddr;
3b21e03e 965}
01df040b 966
317ac620 967void hw_breakpoint_insert(CPUX86State *env, int index)
01df040b
AL
968{
969 int type, err = 0;
970
971 switch (hw_breakpoint_type(env->dr[7], index)) {
972 case 0:
973 if (hw_breakpoint_enabled(env->dr[7], index))
974 err = cpu_breakpoint_insert(env, env->dr[index], BP_CPU,
975 &env->cpu_breakpoint[index]);
976 break;
977 case 1:
978 type = BP_CPU | BP_MEM_WRITE;
979 goto insert_wp;
980 case 2:
981 /* No support for I/O watchpoints yet */
982 break;
983 case 3:
984 type = BP_CPU | BP_MEM_ACCESS;
985 insert_wp:
986 err = cpu_watchpoint_insert(env, env->dr[index],
987 hw_breakpoint_len(env->dr[7], index),
988 type, &env->cpu_watchpoint[index]);
989 break;
990 }
991 if (err)
992 env->cpu_breakpoint[index] = NULL;
993}
994
317ac620 995void hw_breakpoint_remove(CPUX86State *env, int index)
01df040b
AL
996{
997 if (!env->cpu_breakpoint[index])
998 return;
999 switch (hw_breakpoint_type(env->dr[7], index)) {
1000 case 0:
1001 if (hw_breakpoint_enabled(env->dr[7], index))
1002 cpu_breakpoint_remove_by_ref(env, env->cpu_breakpoint[index]);
1003 break;
1004 case 1:
1005 case 3:
1006 cpu_watchpoint_remove_by_ref(env, env->cpu_watchpoint[index]);
1007 break;
1008 case 2:
1009 /* No support for I/O watchpoints yet */
1010 break;
1011 }
1012}
1013
317ac620 1014int check_hw_breakpoints(CPUX86State *env, int force_dr6_update)
01df040b
AL
1015{
1016 target_ulong dr6;
1017 int reg, type;
1018 int hit_enabled = 0;
1019
1020 dr6 = env->dr[6] & ~0xf;
1021 for (reg = 0; reg < 4; reg++) {
1022 type = hw_breakpoint_type(env->dr[7], reg);
1023 if ((type == 0 && env->dr[reg] == env->eip) ||
1024 ((type & 1) && env->cpu_watchpoint[reg] &&
1025 (env->cpu_watchpoint[reg]->flags & BP_WATCHPOINT_HIT))) {
1026 dr6 |= 1 << reg;
1027 if (hw_breakpoint_enabled(env->dr[7], reg))
1028 hit_enabled = 1;
1029 }
1030 }
1031 if (hit_enabled || force_dr6_update)
1032 env->dr[6] = dr6;
1033 return hit_enabled;
1034}
1035
d65e9815 1036void breakpoint_handler(CPUX86State *env)
01df040b
AL
1037{
1038 CPUBreakpoint *bp;
1039
1040 if (env->watchpoint_hit) {
1041 if (env->watchpoint_hit->flags & BP_CPU) {
1042 env->watchpoint_hit = NULL;
1043 if (check_hw_breakpoints(env, 0))
77b2bc2c 1044 raise_exception(env, EXCP01_DB);
01df040b
AL
1045 else
1046 cpu_resume_from_signal(env, NULL);
1047 }
1048 } else {
72cf2d4f 1049 QTAILQ_FOREACH(bp, &env->breakpoints, entry)
01df040b
AL
1050 if (bp->pc == env->eip) {
1051 if (bp->flags & BP_CPU) {
1052 check_hw_breakpoints(env, 1);
77b2bc2c 1053 raise_exception(env, EXCP01_DB);
01df040b
AL
1054 }
1055 break;
1056 }
1057 }
01df040b 1058}
79c4f6b0 1059
d5bfda33
JK
1060typedef struct MCEInjectionParams {
1061 Monitor *mon;
317ac620 1062 CPUX86State *env;
d5bfda33
JK
1063 int bank;
1064 uint64_t status;
1065 uint64_t mcg_status;
1066 uint64_t addr;
1067 uint64_t misc;
1068 int flags;
1069} MCEInjectionParams;
1070
1071static void do_inject_x86_mce(void *data)
79c4f6b0 1072{
d5bfda33 1073 MCEInjectionParams *params = data;
317ac620 1074 CPUX86State *cenv = params->env;
d5bfda33
JK
1075 uint64_t *banks = cenv->mce_banks + 4 * params->bank;
1076
1077 cpu_synchronize_state(cenv);
316378e4 1078
747461c7
JK
1079 /*
1080 * If there is an MCE exception being processed, ignore this SRAO MCE
1081 * unless unconditional injection was requested.
1082 */
d5bfda33
JK
1083 if (!(params->flags & MCE_INJECT_UNCOND_AO)
1084 && !(params->status & MCI_STATUS_AR)
747461c7
JK
1085 && (cenv->mcg_status & MCG_STATUS_MCIP)) {
1086 return;
1087 }
d5bfda33
JK
1088
1089 if (params->status & MCI_STATUS_UC) {
316378e4
JK
1090 /*
1091 * if MSR_MCG_CTL is not all 1s, the uncorrected error
1092 * reporting is disabled
1093 */
d5bfda33
JK
1094 if ((cenv->mcg_cap & MCG_CTL_P) && cenv->mcg_ctl != ~(uint64_t)0) {
1095 monitor_printf(params->mon,
316378e4
JK
1096 "CPU %d: Uncorrected error reporting disabled\n",
1097 cenv->cpu_index);
1098 return;
1099 }
1100
1101 /*
1102 * if MSR_MCi_CTL is not all 1s, the uncorrected error
1103 * reporting is disabled for the bank
1104 */
1105 if (banks[0] != ~(uint64_t)0) {
d5bfda33
JK
1106 monitor_printf(params->mon,
1107 "CPU %d: Uncorrected error reporting disabled for"
1108 " bank %d\n",
1109 cenv->cpu_index, params->bank);
316378e4
JK
1110 return;
1111 }
1112
79c4f6b0
HY
1113 if ((cenv->mcg_status & MCG_STATUS_MCIP) ||
1114 !(cenv->cr[4] & CR4_MCE_MASK)) {
d5bfda33
JK
1115 monitor_printf(params->mon,
1116 "CPU %d: Previous MCE still in progress, raising"
1117 " triple fault\n",
1118 cenv->cpu_index);
79c4f6b0
HY
1119 qemu_log_mask(CPU_LOG_RESET, "Triple fault\n");
1120 qemu_system_reset_request();
1121 return;
1122 }
2fa11da0 1123 if (banks[1] & MCI_STATUS_VAL) {
d5bfda33 1124 params->status |= MCI_STATUS_OVER;
2fa11da0 1125 }
d5bfda33
JK
1126 banks[2] = params->addr;
1127 banks[3] = params->misc;
1128 cenv->mcg_status = params->mcg_status;
1129 banks[1] = params->status;
79c4f6b0
HY
1130 cpu_interrupt(cenv, CPU_INTERRUPT_MCE);
1131 } else if (!(banks[1] & MCI_STATUS_VAL)
1132 || !(banks[1] & MCI_STATUS_UC)) {
2fa11da0 1133 if (banks[1] & MCI_STATUS_VAL) {
d5bfda33 1134 params->status |= MCI_STATUS_OVER;
2fa11da0 1135 }
d5bfda33
JK
1136 banks[2] = params->addr;
1137 banks[3] = params->misc;
1138 banks[1] = params->status;
2fa11da0 1139 } else {
79c4f6b0 1140 banks[1] |= MCI_STATUS_OVER;
2fa11da0 1141 }
79c4f6b0 1142}
b3cd24e0 1143
8c5cf3b6 1144void cpu_x86_inject_mce(Monitor *mon, X86CPU *cpu, int bank,
316378e4 1145 uint64_t status, uint64_t mcg_status, uint64_t addr,
747461c7 1146 uint64_t misc, int flags)
b3cd24e0 1147{
8c5cf3b6 1148 CPUX86State *cenv = &cpu->env;
d5bfda33
JK
1149 MCEInjectionParams params = {
1150 .mon = mon,
1151 .env = cenv,
1152 .bank = bank,
1153 .status = status,
1154 .mcg_status = mcg_status,
1155 .addr = addr,
1156 .misc = misc,
1157 .flags = flags,
1158 };
b3cd24e0 1159 unsigned bank_num = cenv->mcg_cap & 0xff;
317ac620 1160 CPUX86State *env;
b3cd24e0 1161
316378e4
JK
1162 if (!cenv->mcg_cap) {
1163 monitor_printf(mon, "MCE injection not supported\n");
b3cd24e0
JD
1164 return;
1165 }
316378e4
JK
1166 if (bank >= bank_num) {
1167 monitor_printf(mon, "Invalid MCE bank number\n");
1168 return;
1169 }
1170 if (!(status & MCI_STATUS_VAL)) {
1171 monitor_printf(mon, "Invalid MCE status code\n");
1172 return;
1173 }
747461c7
JK
1174 if ((flags & MCE_INJECT_BROADCAST)
1175 && !cpu_x86_support_mca_broadcast(cenv)) {
316378e4
JK
1176 monitor_printf(mon, "Guest CPU does not support MCA broadcast\n");
1177 return;
2bd3e04c
JD
1178 }
1179
f100f0b3 1180 run_on_cpu(CPU(cpu), do_inject_x86_mce, &params);
c34d440a
JK
1181 if (flags & MCE_INJECT_BROADCAST) {
1182 params.bank = 1;
1183 params.status = MCI_STATUS_VAL | MCI_STATUS_UC;
1184 params.mcg_status = MCG_STATUS_MCIP | MCG_STATUS_RIPV;
1185 params.addr = 0;
1186 params.misc = 0;
1187 for (env = first_cpu; env != NULL; env = env->next_cpu) {
1188 if (cenv == env) {
1189 continue;
31ce5e0c 1190 }
c34d440a 1191 params.env = env;
f100f0b3 1192 run_on_cpu(CPU(cpu), do_inject_x86_mce, &params);
31ce5e0c 1193 }
b3cd24e0
JD
1194 }
1195}
d362e757 1196
317ac620 1197void cpu_report_tpr_access(CPUX86State *env, TPRAccess access)
d362e757 1198{
d362e757
JK
1199 if (kvm_enabled()) {
1200 env->tpr_access_type = access;
1201
1202 cpu_interrupt(env, CPU_INTERRUPT_TPR);
1203 } else {
a8a826a3 1204 cpu_restore_state(env, env->mem_io_pc);
d362e757
JK
1205
1206 apic_handle_tpr_access_report(env->apic_state, env->eip, access);
1207 }
1208}
74ce674f 1209#endif /* !CONFIG_USER_ONLY */
6fd805e1 1210
84273177
JK
1211int cpu_x86_get_descr_debug(CPUX86State *env, unsigned int selector,
1212 target_ulong *base, unsigned int *limit,
1213 unsigned int *flags)
1214{
1215 SegmentCache *dt;
1216 target_ulong ptr;
1217 uint32_t e1, e2;
1218 int index;
1219
1220 if (selector & 0x4)
1221 dt = &env->ldt;
1222 else
1223 dt = &env->gdt;
1224 index = selector & ~7;
1225 ptr = dt->base + index;
1226 if ((index + 7) > dt->limit
1227 || cpu_memory_rw_debug(env, ptr, (uint8_t *)&e1, sizeof(e1), 0) != 0
1228 || cpu_memory_rw_debug(env, ptr+4, (uint8_t *)&e2, sizeof(e2), 0) != 0)
1229 return 0;
1230
1231 *base = ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
1232 *limit = (e1 & 0xffff) | (e2 & 0x000f0000);
1233 if (e2 & DESC_G_MASK)
1234 *limit = (*limit << 12) | 0xfff;
1235 *flags = e2;
1236
1237 return 1;
1238}
1239
b47ed996 1240X86CPU *cpu_x86_init(const char *cpu_model)
01df040b 1241{
5fd2087a 1242 X86CPU *cpu;
01df040b 1243 CPUX86State *env;
ff287bbd 1244 Error *error = NULL;
01df040b 1245
5fd2087a
AF
1246 cpu = X86_CPU(object_new(TYPE_X86_CPU));
1247 env = &cpu->env;
01df040b
AL
1248 env->cpu_model_str = cpu_model;
1249
61dcd775 1250 if (cpu_x86_register(cpu, cpu_model) < 0) {
5fd2087a 1251 object_delete(OBJECT(cpu));
01df040b
AL
1252 return NULL;
1253 }
0bf46a40 1254
ff287bbd
IM
1255 x86_cpu_realize(OBJECT(cpu), &error);
1256 if (error) {
1257 error_free(error);
1258 object_delete(OBJECT(cpu));
1259 return NULL;
1260 }
b47ed996 1261 return cpu;
01df040b 1262}
b09ea7d5
GN
1263
1264#if !defined(CONFIG_USER_ONLY)
232fc23b 1265void do_cpu_init(X86CPU *cpu)
b09ea7d5 1266{
232fc23b 1267 CPUX86State *env = &cpu->env;
b09ea7d5 1268 int sipi = env->interrupt_request & CPU_INTERRUPT_SIPI;
ebda377f
JK
1269 uint64_t pat = env->pat;
1270
232fc23b 1271 cpu_reset(CPU(cpu));
b09ea7d5 1272 env->interrupt_request = sipi;
ebda377f 1273 env->pat = pat;
4a942cea 1274 apic_init_reset(env->apic_state);
b09ea7d5
GN
1275}
1276
232fc23b 1277void do_cpu_sipi(X86CPU *cpu)
b09ea7d5 1278{
232fc23b
AF
1279 CPUX86State *env = &cpu->env;
1280
4a942cea 1281 apic_sipi(env->apic_state);
b09ea7d5
GN
1282}
1283#else
232fc23b 1284void do_cpu_init(X86CPU *cpu)
b09ea7d5
GN
1285{
1286}
232fc23b 1287void do_cpu_sipi(X86CPU *cpu)
b09ea7d5
GN
1288{
1289}
1290#endif