]> git.proxmox.com Git - qemu.git/blame - target-i386/helper.c
target-i386: Break CPUID feature definition lines
[qemu.git] / target-i386 / helper.c
CommitLineData
2c0262af 1/*
eaa728ee 2 * i386 helpers (without register variable usage)
5fafdf24 3 *
2c0262af
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
2c0262af 18 */
2c0262af 19
eaa728ee 20#include "cpu.h"
9c17d615 21#include "sysemu/kvm.h"
2fa11da0 22#ifndef CONFIG_USER_ONLY
9c17d615 23#include "sysemu/sysemu.h"
83c9089e 24#include "monitor/monitor.h"
2fa11da0 25#endif
f3f2d9be 26
eaa728ee 27//#define DEBUG_MMU
b5ec5ce0 28
317ac620 29static void cpu_x86_version(CPUX86State *env, int *family, int *model)
2bd3e04c
JD
30{
31 int cpuver = env->cpuid_version;
32
33 if (family == NULL || model == NULL) {
34 return;
35 }
36
37 *family = (cpuver >> 8) & 0x0f;
38 *model = ((cpuver >> 12) & 0xf0) + ((cpuver >> 4) & 0x0f);
39}
40
41/* Broadcast MCA signal for processor version 06H_EH and above */
317ac620 42int cpu_x86_support_mca_broadcast(CPUX86State *env)
2bd3e04c
JD
43{
44 int family = 0;
45 int model = 0;
46
47 cpu_x86_version(env, &family, &model);
48 if ((family == 6 && model >= 14) || family > 6) {
49 return 1;
50 }
51
52 return 0;
53}
54
eaa728ee
FB
55/***********************************************************/
56/* x86 debug */
3b46e624 57
bc4b43dc 58static const char *cc_op_str[CC_OP_NB] = {
eaa728ee
FB
59 "DYNAMIC",
60 "EFLAGS",
7e84c249 61
eaa728ee
FB
62 "MULB",
63 "MULW",
64 "MULL",
65 "MULQ",
3b46e624 66
eaa728ee
FB
67 "ADDB",
68 "ADDW",
69 "ADDL",
70 "ADDQ",
3b46e624 71
eaa728ee
FB
72 "ADCB",
73 "ADCW",
74 "ADCL",
75 "ADCQ",
3b46e624 76
eaa728ee
FB
77 "SUBB",
78 "SUBW",
79 "SUBL",
80 "SUBQ",
7e84c249 81
eaa728ee
FB
82 "SBBB",
83 "SBBW",
84 "SBBL",
85 "SBBQ",
7e84c249 86
eaa728ee
FB
87 "LOGICB",
88 "LOGICW",
89 "LOGICL",
90 "LOGICQ",
7e84c249 91
eaa728ee
FB
92 "INCB",
93 "INCW",
94 "INCL",
95 "INCQ",
3b46e624 96
eaa728ee
FB
97 "DECB",
98 "DECW",
99 "DECL",
100 "DECQ",
3b46e624 101
eaa728ee
FB
102 "SHLB",
103 "SHLW",
104 "SHLL",
105 "SHLQ",
3b46e624 106
eaa728ee
FB
107 "SARB",
108 "SARW",
109 "SARL",
110 "SARQ",
bc4b43dc
RH
111
112 "BMILGB",
113 "BMILGW",
114 "BMILGL",
115 "BMILGQ",
cd7f97ca
RH
116
117 "ADCX",
118 "ADOX",
119 "ADCOX",
436ff2d2
RH
120
121 "CLR",
eaa728ee 122};
7e84c249 123
a3867ed2 124static void
317ac620 125cpu_x86_dump_seg_cache(CPUX86State *env, FILE *f, fprintf_function cpu_fprintf,
a3867ed2
AL
126 const char *name, struct SegmentCache *sc)
127{
128#ifdef TARGET_X86_64
129 if (env->hflags & HF_CS64_MASK) {
130 cpu_fprintf(f, "%-3s=%04x %016" PRIx64 " %08x %08x", name,
4058fd98 131 sc->selector, sc->base, sc->limit, sc->flags & 0x00ffff00);
a3867ed2
AL
132 } else
133#endif
134 {
135 cpu_fprintf(f, "%-3s=%04x %08x %08x %08x", name, sc->selector,
4058fd98 136 (uint32_t)sc->base, sc->limit, sc->flags & 0x00ffff00);
a3867ed2
AL
137 }
138
139 if (!(env->hflags & HF_PE_MASK) || !(sc->flags & DESC_P_MASK))
140 goto done;
141
142 cpu_fprintf(f, " DPL=%d ", (sc->flags & DESC_DPL_MASK) >> DESC_DPL_SHIFT);
143 if (sc->flags & DESC_S_MASK) {
144 if (sc->flags & DESC_CS_MASK) {
145 cpu_fprintf(f, (sc->flags & DESC_L_MASK) ? "CS64" :
146 ((sc->flags & DESC_B_MASK) ? "CS32" : "CS16"));
147 cpu_fprintf(f, " [%c%c", (sc->flags & DESC_C_MASK) ? 'C' : '-',
148 (sc->flags & DESC_R_MASK) ? 'R' : '-');
149 } else {
150 cpu_fprintf(f, (sc->flags & DESC_B_MASK) ? "DS " : "DS16");
151 cpu_fprintf(f, " [%c%c", (sc->flags & DESC_E_MASK) ? 'E' : '-',
152 (sc->flags & DESC_W_MASK) ? 'W' : '-');
153 }
154 cpu_fprintf(f, "%c]", (sc->flags & DESC_A_MASK) ? 'A' : '-');
155 } else {
156 static const char *sys_type_name[2][16] = {
157 { /* 32 bit mode */
158 "Reserved", "TSS16-avl", "LDT", "TSS16-busy",
159 "CallGate16", "TaskGate", "IntGate16", "TrapGate16",
160 "Reserved", "TSS32-avl", "Reserved", "TSS32-busy",
161 "CallGate32", "Reserved", "IntGate32", "TrapGate32"
162 },
163 { /* 64 bit mode */
164 "<hiword>", "Reserved", "LDT", "Reserved", "Reserved",
165 "Reserved", "Reserved", "Reserved", "Reserved",
166 "TSS64-avl", "Reserved", "TSS64-busy", "CallGate64",
167 "Reserved", "IntGate64", "TrapGate64"
168 }
169 };
e5c15eff
SW
170 cpu_fprintf(f, "%s",
171 sys_type_name[(env->hflags & HF_LMA_MASK) ? 1 : 0]
172 [(sc->flags & DESC_TYPE_MASK)
173 >> DESC_TYPE_SHIFT]);
a3867ed2
AL
174 }
175done:
176 cpu_fprintf(f, "\n");
177}
178
f5c848ee
JK
179#define DUMP_CODE_BYTES_TOTAL 50
180#define DUMP_CODE_BYTES_BACKWARD 20
181
317ac620 182void cpu_dump_state(CPUX86State *env, FILE *f, fprintf_function cpu_fprintf,
eaa728ee
FB
183 int flags)
184{
259186a7 185 CPUState *cs = CPU(x86_env_get_cpu(env));
eaa728ee
FB
186 int eflags, i, nb;
187 char cc_op_name[32];
188 static const char *seg_name[6] = { "ES", "CS", "SS", "DS", "FS", "GS" };
7e84c249 189
23054111 190 cpu_synchronize_state(env);
ff3c01ca 191
4980ef9e 192 eflags = cpu_compute_eflags(env);
eaa728ee
FB
193#ifdef TARGET_X86_64
194 if (env->hflags & HF_CS64_MASK) {
195 cpu_fprintf(f,
196 "RAX=%016" PRIx64 " RBX=%016" PRIx64 " RCX=%016" PRIx64 " RDX=%016" PRIx64 "\n"
197 "RSI=%016" PRIx64 " RDI=%016" PRIx64 " RBP=%016" PRIx64 " RSP=%016" PRIx64 "\n"
198 "R8 =%016" PRIx64 " R9 =%016" PRIx64 " R10=%016" PRIx64 " R11=%016" PRIx64 "\n"
199 "R12=%016" PRIx64 " R13=%016" PRIx64 " R14=%016" PRIx64 " R15=%016" PRIx64 "\n"
200 "RIP=%016" PRIx64 " RFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
201 env->regs[R_EAX],
202 env->regs[R_EBX],
203 env->regs[R_ECX],
204 env->regs[R_EDX],
205 env->regs[R_ESI],
206 env->regs[R_EDI],
207 env->regs[R_EBP],
208 env->regs[R_ESP],
209 env->regs[8],
210 env->regs[9],
211 env->regs[10],
212 env->regs[11],
213 env->regs[12],
214 env->regs[13],
215 env->regs[14],
216 env->regs[15],
217 env->eip, eflags,
218 eflags & DF_MASK ? 'D' : '-',
219 eflags & CC_O ? 'O' : '-',
220 eflags & CC_S ? 'S' : '-',
221 eflags & CC_Z ? 'Z' : '-',
222 eflags & CC_A ? 'A' : '-',
223 eflags & CC_P ? 'P' : '-',
224 eflags & CC_C ? 'C' : '-',
225 env->hflags & HF_CPL_MASK,
226 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
5ee0ffaa 227 (env->a20_mask >> 20) & 1,
eaa728ee 228 (env->hflags >> HF_SMM_SHIFT) & 1,
259186a7 229 cs->halted);
eaa728ee
FB
230 } else
231#endif
232 {
233 cpu_fprintf(f, "EAX=%08x EBX=%08x ECX=%08x EDX=%08x\n"
234 "ESI=%08x EDI=%08x EBP=%08x ESP=%08x\n"
235 "EIP=%08x EFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
236 (uint32_t)env->regs[R_EAX],
237 (uint32_t)env->regs[R_EBX],
238 (uint32_t)env->regs[R_ECX],
239 (uint32_t)env->regs[R_EDX],
240 (uint32_t)env->regs[R_ESI],
241 (uint32_t)env->regs[R_EDI],
242 (uint32_t)env->regs[R_EBP],
243 (uint32_t)env->regs[R_ESP],
244 (uint32_t)env->eip, eflags,
245 eflags & DF_MASK ? 'D' : '-',
246 eflags & CC_O ? 'O' : '-',
247 eflags & CC_S ? 'S' : '-',
248 eflags & CC_Z ? 'Z' : '-',
249 eflags & CC_A ? 'A' : '-',
250 eflags & CC_P ? 'P' : '-',
251 eflags & CC_C ? 'C' : '-',
252 env->hflags & HF_CPL_MASK,
253 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
5ee0ffaa 254 (env->a20_mask >> 20) & 1,
eaa728ee 255 (env->hflags >> HF_SMM_SHIFT) & 1,
259186a7 256 cs->halted);
8145122b 257 }
3b46e624 258
a3867ed2
AL
259 for(i = 0; i < 6; i++) {
260 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, seg_name[i],
261 &env->segs[i]);
262 }
263 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, "LDT", &env->ldt);
264 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, "TR", &env->tr);
265
eaa728ee
FB
266#ifdef TARGET_X86_64
267 if (env->hflags & HF_LMA_MASK) {
eaa728ee
FB
268 cpu_fprintf(f, "GDT= %016" PRIx64 " %08x\n",
269 env->gdt.base, env->gdt.limit);
270 cpu_fprintf(f, "IDT= %016" PRIx64 " %08x\n",
271 env->idt.base, env->idt.limit);
272 cpu_fprintf(f, "CR0=%08x CR2=%016" PRIx64 " CR3=%016" PRIx64 " CR4=%08x\n",
273 (uint32_t)env->cr[0],
274 env->cr[2],
275 env->cr[3],
276 (uint32_t)env->cr[4]);
a59cb4e0
AL
277 for(i = 0; i < 4; i++)
278 cpu_fprintf(f, "DR%d=%016" PRIx64 " ", i, env->dr[i]);
279 cpu_fprintf(f, "\nDR6=%016" PRIx64 " DR7=%016" PRIx64 "\n",
d4b55be5 280 env->dr[6], env->dr[7]);
eaa728ee
FB
281 } else
282#endif
283 {
eaa728ee
FB
284 cpu_fprintf(f, "GDT= %08x %08x\n",
285 (uint32_t)env->gdt.base, env->gdt.limit);
286 cpu_fprintf(f, "IDT= %08x %08x\n",
287 (uint32_t)env->idt.base, env->idt.limit);
288 cpu_fprintf(f, "CR0=%08x CR2=%08x CR3=%08x CR4=%08x\n",
289 (uint32_t)env->cr[0],
290 (uint32_t)env->cr[2],
291 (uint32_t)env->cr[3],
292 (uint32_t)env->cr[4]);
9a78eead
SW
293 for(i = 0; i < 4; i++) {
294 cpu_fprintf(f, "DR%d=" TARGET_FMT_lx " ", i, env->dr[i]);
295 }
296 cpu_fprintf(f, "\nDR6=" TARGET_FMT_lx " DR7=" TARGET_FMT_lx "\n",
297 env->dr[6], env->dr[7]);
eaa728ee 298 }
6fd2a026 299 if (flags & CPU_DUMP_CCOP) {
eaa728ee
FB
300 if ((unsigned)env->cc_op < CC_OP_NB)
301 snprintf(cc_op_name, sizeof(cc_op_name), "%s", cc_op_str[env->cc_op]);
302 else
303 snprintf(cc_op_name, sizeof(cc_op_name), "[%d]", env->cc_op);
304#ifdef TARGET_X86_64
305 if (env->hflags & HF_CS64_MASK) {
306 cpu_fprintf(f, "CCS=%016" PRIx64 " CCD=%016" PRIx64 " CCO=%-8s\n",
307 env->cc_src, env->cc_dst,
308 cc_op_name);
309 } else
310#endif
311 {
312 cpu_fprintf(f, "CCS=%08x CCD=%08x CCO=%-8s\n",
313 (uint32_t)env->cc_src, (uint32_t)env->cc_dst,
314 cc_op_name);
315 }
7e84c249 316 }
b5e5a934 317 cpu_fprintf(f, "EFER=%016" PRIx64 "\n", env->efer);
6fd2a026 318 if (flags & CPU_DUMP_FPU) {
eaa728ee
FB
319 int fptag;
320 fptag = 0;
321 for(i = 0; i < 8; i++) {
322 fptag |= ((!env->fptags[i]) << i);
323 }
324 cpu_fprintf(f, "FCW=%04x FSW=%04x [ST=%d] FTW=%02x MXCSR=%08x\n",
325 env->fpuc,
326 (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11,
327 env->fpstt,
328 fptag,
329 env->mxcsr);
330 for(i=0;i<8;i++) {
1ffd41ee
AJ
331 CPU_LDoubleU u;
332 u.d = env->fpregs[i].d;
eaa728ee 333 cpu_fprintf(f, "FPR%d=%016" PRIx64 " %04x",
1ffd41ee 334 i, u.l.lower, u.l.upper);
eaa728ee
FB
335 if ((i & 1) == 1)
336 cpu_fprintf(f, "\n");
337 else
338 cpu_fprintf(f, " ");
339 }
340 if (env->hflags & HF_CS64_MASK)
341 nb = 16;
342 else
343 nb = 8;
344 for(i=0;i<nb;i++) {
345 cpu_fprintf(f, "XMM%02d=%08x%08x%08x%08x",
346 i,
347 env->xmm_regs[i].XMM_L(3),
348 env->xmm_regs[i].XMM_L(2),
349 env->xmm_regs[i].XMM_L(1),
350 env->xmm_regs[i].XMM_L(0));
351 if ((i & 1) == 1)
352 cpu_fprintf(f, "\n");
353 else
354 cpu_fprintf(f, " ");
355 }
7e84c249 356 }
f5c848ee
JK
357 if (flags & CPU_DUMP_CODE) {
358 target_ulong base = env->segs[R_CS].base + env->eip;
359 target_ulong offs = MIN(env->eip, DUMP_CODE_BYTES_BACKWARD);
360 uint8_t code;
361 char codestr[3];
362
363 cpu_fprintf(f, "Code=");
364 for (i = 0; i < DUMP_CODE_BYTES_TOTAL; i++) {
365 if (cpu_memory_rw_debug(env, base - offs + i, &code, 1, 0) == 0) {
366 snprintf(codestr, sizeof(codestr), "%02x", code);
367 } else {
368 snprintf(codestr, sizeof(codestr), "??");
369 }
370 cpu_fprintf(f, "%s%s%s%s", i > 0 ? " " : "",
371 i == offs ? "<" : "", codestr, i == offs ? ">" : "");
372 }
373 cpu_fprintf(f, "\n");
374 }
2c0262af 375}
7e84c249 376
eaa728ee
FB
377/***********************************************************/
378/* x86 mmu */
379/* XXX: add PGE support */
380
cc36a7a2 381void x86_cpu_set_a20(X86CPU *cpu, int a20_state)
2c0262af 382{
cc36a7a2
AF
383 CPUX86State *env = &cpu->env;
384
eaa728ee
FB
385 a20_state = (a20_state != 0);
386 if (a20_state != ((env->a20_mask >> 20) & 1)) {
387#if defined(DEBUG_MMU)
388 printf("A20 update: a20=%d\n", a20_state);
389#endif
390 /* if the cpu is currently executing code, we must unlink it and
391 all the potentially executing TB */
c3affe56 392 cpu_interrupt(CPU(cpu), CPU_INTERRUPT_EXITTB);
3b46e624 393
eaa728ee
FB
394 /* when a20 is changed, all the MMU mappings are invalid, so
395 we must flush everything */
396 tlb_flush(env, 1);
5ee0ffaa 397 env->a20_mask = ~(1 << 20) | (a20_state << 20);
7e84c249 398 }
2c0262af
FB
399}
400
eaa728ee 401void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0)
2c0262af 402{
eaa728ee 403 int pe_state;
2c0262af 404
eaa728ee
FB
405#if defined(DEBUG_MMU)
406 printf("CR0 update: CR0=0x%08x\n", new_cr0);
407#endif
408 if ((new_cr0 & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK)) !=
409 (env->cr[0] & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK))) {
410 tlb_flush(env, 1);
411 }
2c0262af 412
eaa728ee
FB
413#ifdef TARGET_X86_64
414 if (!(env->cr[0] & CR0_PG_MASK) && (new_cr0 & CR0_PG_MASK) &&
415 (env->efer & MSR_EFER_LME)) {
416 /* enter in long mode */
417 /* XXX: generate an exception */
418 if (!(env->cr[4] & CR4_PAE_MASK))
419 return;
420 env->efer |= MSR_EFER_LMA;
421 env->hflags |= HF_LMA_MASK;
422 } else if ((env->cr[0] & CR0_PG_MASK) && !(new_cr0 & CR0_PG_MASK) &&
423 (env->efer & MSR_EFER_LMA)) {
424 /* exit long mode */
425 env->efer &= ~MSR_EFER_LMA;
426 env->hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
427 env->eip &= 0xffffffff;
428 }
429#endif
430 env->cr[0] = new_cr0 | CR0_ET_MASK;
7e84c249 431
eaa728ee
FB
432 /* update PE flag in hidden flags */
433 pe_state = (env->cr[0] & CR0_PE_MASK);
434 env->hflags = (env->hflags & ~HF_PE_MASK) | (pe_state << HF_PE_SHIFT);
435 /* ensure that ADDSEG is always set in real mode */
436 env->hflags |= ((pe_state ^ 1) << HF_ADDSEG_SHIFT);
437 /* update FPU flags */
438 env->hflags = (env->hflags & ~(HF_MP_MASK | HF_EM_MASK | HF_TS_MASK)) |
439 ((new_cr0 << (HF_MP_SHIFT - 1)) & (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK));
7e84c249
FB
440}
441
eaa728ee
FB
442/* XXX: in legacy PAE mode, generate a GPF if reserved bits are set in
443 the PDPT */
444void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3)
7e84c249 445{
eaa728ee
FB
446 env->cr[3] = new_cr3;
447 if (env->cr[0] & CR0_PG_MASK) {
448#if defined(DEBUG_MMU)
449 printf("CR3 update: CR3=" TARGET_FMT_lx "\n", new_cr3);
450#endif
451 tlb_flush(env, 0);
452 }
7e84c249
FB
453}
454
eaa728ee 455void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4)
7e84c249 456{
eaa728ee
FB
457#if defined(DEBUG_MMU)
458 printf("CR4 update: CR4=%08x\n", (uint32_t)env->cr[4]);
459#endif
a9321a4d
PA
460 if ((new_cr4 ^ env->cr[4]) &
461 (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK |
462 CR4_SMEP_MASK | CR4_SMAP_MASK)) {
eaa728ee
FB
463 tlb_flush(env, 1);
464 }
465 /* SSE handling */
a9321a4d 466 if (!(env->cpuid_features & CPUID_SSE)) {
eaa728ee 467 new_cr4 &= ~CR4_OSFXSR_MASK;
a9321a4d
PA
468 }
469 env->hflags &= ~HF_OSFXSR_MASK;
470 if (new_cr4 & CR4_OSFXSR_MASK) {
eaa728ee 471 env->hflags |= HF_OSFXSR_MASK;
a9321a4d
PA
472 }
473
474 if (!(env->cpuid_7_0_ebx_features & CPUID_7_0_EBX_SMAP)) {
475 new_cr4 &= ~CR4_SMAP_MASK;
476 }
477 env->hflags &= ~HF_SMAP_MASK;
478 if (new_cr4 & CR4_SMAP_MASK) {
479 env->hflags |= HF_SMAP_MASK;
480 }
b8b6a50b 481
eaa728ee 482 env->cr[4] = new_cr4;
b8b6a50b
FB
483}
484
eaa728ee
FB
485#if defined(CONFIG_USER_ONLY)
486
487int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
97b348e7 488 int is_write, int mmu_idx)
b8b6a50b 489{
eaa728ee
FB
490 /* user mode only emulation */
491 is_write &= 1;
492 env->cr[2] = addr;
493 env->error_code = (is_write << PG_ERROR_W_BIT);
494 env->error_code |= PG_ERROR_U_MASK;
495 env->exception_index = EXCP0E_PAGE;
496 return 1;
2c0262af
FB
497}
498
8d7b0fbb 499#else
891b38e4 500
eaa728ee
FB
501/* XXX: This value should match the one returned by CPUID
502 * and in exec.c */
eaa728ee 503# if defined(TARGET_X86_64)
2c90d794 504# define PHYS_ADDR_MASK 0xfffffff000LL
eaa728ee 505# else
2c90d794 506# define PHYS_ADDR_MASK 0xffffff000LL
eaa728ee 507# endif
eaa728ee
FB
508
509/* return value:
510 -1 = cannot handle fault
511 0 = nothing more to do
512 1 = generate PF fault
eaa728ee
FB
513*/
514int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
97b348e7 515 int is_write1, int mmu_idx)
eaa728ee
FB
516{
517 uint64_t ptep, pte;
518 target_ulong pde_addr, pte_addr;
d4c430a8 519 int error_code, is_dirty, prot, page_size, is_write, is_user;
a8170e5e 520 hwaddr paddr;
eaa728ee
FB
521 uint32_t page_offset;
522 target_ulong vaddr, virt_addr;
523
524 is_user = mmu_idx == MMU_USER_IDX;
525#if defined(DEBUG_MMU)
526 printf("MMU fault: addr=" TARGET_FMT_lx " w=%d u=%d eip=" TARGET_FMT_lx "\n",
527 addr, is_write1, is_user, env->eip);
528#endif
529 is_write = is_write1 & 1;
530
531 if (!(env->cr[0] & CR0_PG_MASK)) {
532 pte = addr;
533 virt_addr = addr & TARGET_PAGE_MASK;
534 prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
535 page_size = 4096;
536 goto do_mapping;
537 }
538
539 if (env->cr[4] & CR4_PAE_MASK) {
540 uint64_t pde, pdpe;
541 target_ulong pdpe_addr;
2c0262af 542
eaa728ee
FB
543#ifdef TARGET_X86_64
544 if (env->hflags & HF_LMA_MASK) {
545 uint64_t pml4e_addr, pml4e;
546 int32_t sext;
547
548 /* test virtual address sign extension */
549 sext = (int64_t)addr >> 47;
550 if (sext != 0 && sext != -1) {
551 env->error_code = 0;
552 env->exception_index = EXCP0D_GPF;
553 return 1;
554 }
0573fbfc 555
eaa728ee
FB
556 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
557 env->a20_mask;
558 pml4e = ldq_phys(pml4e_addr);
559 if (!(pml4e & PG_PRESENT_MASK)) {
560 error_code = 0;
561 goto do_fault;
562 }
563 if (!(env->efer & MSR_EFER_NXE) && (pml4e & PG_NX_MASK)) {
564 error_code = PG_ERROR_RSVD_MASK;
565 goto do_fault;
566 }
567 if (!(pml4e & PG_ACCESSED_MASK)) {
568 pml4e |= PG_ACCESSED_MASK;
569 stl_phys_notdirty(pml4e_addr, pml4e);
570 }
571 ptep = pml4e ^ PG_NX_MASK;
572 pdpe_addr = ((pml4e & PHYS_ADDR_MASK) + (((addr >> 30) & 0x1ff) << 3)) &
573 env->a20_mask;
574 pdpe = ldq_phys(pdpe_addr);
575 if (!(pdpe & PG_PRESENT_MASK)) {
576 error_code = 0;
577 goto do_fault;
578 }
579 if (!(env->efer & MSR_EFER_NXE) && (pdpe & PG_NX_MASK)) {
580 error_code = PG_ERROR_RSVD_MASK;
581 goto do_fault;
582 }
583 ptep &= pdpe ^ PG_NX_MASK;
584 if (!(pdpe & PG_ACCESSED_MASK)) {
585 pdpe |= PG_ACCESSED_MASK;
586 stl_phys_notdirty(pdpe_addr, pdpe);
587 }
588 } else
589#endif
590 {
591 /* XXX: load them when cr3 is loaded ? */
592 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
593 env->a20_mask;
594 pdpe = ldq_phys(pdpe_addr);
595 if (!(pdpe & PG_PRESENT_MASK)) {
596 error_code = 0;
597 goto do_fault;
598 }
599 ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
7e84c249 600 }
7e84c249 601
eaa728ee
FB
602 pde_addr = ((pdpe & PHYS_ADDR_MASK) + (((addr >> 21) & 0x1ff) << 3)) &
603 env->a20_mask;
604 pde = ldq_phys(pde_addr);
605 if (!(pde & PG_PRESENT_MASK)) {
606 error_code = 0;
607 goto do_fault;
608 }
609 if (!(env->efer & MSR_EFER_NXE) && (pde & PG_NX_MASK)) {
610 error_code = PG_ERROR_RSVD_MASK;
611 goto do_fault;
612 }
613 ptep &= pde ^ PG_NX_MASK;
614 if (pde & PG_PSE_MASK) {
615 /* 2 MB page */
616 page_size = 2048 * 1024;
617 ptep ^= PG_NX_MASK;
a9321a4d 618 if ((ptep & PG_NX_MASK) && is_write1 == 2) {
eaa728ee 619 goto do_fault_protect;
a9321a4d
PA
620 }
621 switch (mmu_idx) {
622 case MMU_USER_IDX:
623 if (!(ptep & PG_USER_MASK)) {
eaa728ee 624 goto do_fault_protect;
a9321a4d
PA
625 }
626 if (is_write && !(ptep & PG_RW_MASK)) {
eaa728ee 627 goto do_fault_protect;
a9321a4d
PA
628 }
629 break;
630
631 case MMU_KERNEL_IDX:
632 if (is_write1 != 2 && (env->cr[4] & CR4_SMAP_MASK) &&
633 (ptep & PG_USER_MASK)) {
634 goto do_fault_protect;
635 }
636 /* fall through */
637 case MMU_KSMAP_IDX:
638 if (is_write1 == 2 && (env->cr[4] & CR4_SMEP_MASK) &&
639 (ptep & PG_USER_MASK)) {
640 goto do_fault_protect;
641 }
eaa728ee 642 if ((env->cr[0] & CR0_WP_MASK) &&
a9321a4d 643 is_write && !(ptep & PG_RW_MASK)) {
eaa728ee 644 goto do_fault_protect;
a9321a4d
PA
645 }
646 break;
647
648 default: /* cannot happen */
649 break;
eaa728ee
FB
650 }
651 is_dirty = is_write && !(pde & PG_DIRTY_MASK);
652 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
653 pde |= PG_ACCESSED_MASK;
654 if (is_dirty)
655 pde |= PG_DIRTY_MASK;
656 stl_phys_notdirty(pde_addr, pde);
657 }
658 /* align to page_size */
659 pte = pde & ((PHYS_ADDR_MASK & ~(page_size - 1)) | 0xfff);
660 virt_addr = addr & ~(page_size - 1);
661 } else {
662 /* 4 KB page */
663 if (!(pde & PG_ACCESSED_MASK)) {
664 pde |= PG_ACCESSED_MASK;
665 stl_phys_notdirty(pde_addr, pde);
666 }
667 pte_addr = ((pde & PHYS_ADDR_MASK) + (((addr >> 12) & 0x1ff) << 3)) &
668 env->a20_mask;
669 pte = ldq_phys(pte_addr);
670 if (!(pte & PG_PRESENT_MASK)) {
671 error_code = 0;
672 goto do_fault;
673 }
674 if (!(env->efer & MSR_EFER_NXE) && (pte & PG_NX_MASK)) {
675 error_code = PG_ERROR_RSVD_MASK;
676 goto do_fault;
677 }
678 /* combine pde and pte nx, user and rw protections */
679 ptep &= pte ^ PG_NX_MASK;
680 ptep ^= PG_NX_MASK;
681 if ((ptep & PG_NX_MASK) && is_write1 == 2)
682 goto do_fault_protect;
a9321a4d
PA
683 switch (mmu_idx) {
684 case MMU_USER_IDX:
685 if (!(ptep & PG_USER_MASK)) {
eaa728ee 686 goto do_fault_protect;
a9321a4d
PA
687 }
688 if (is_write && !(ptep & PG_RW_MASK)) {
eaa728ee 689 goto do_fault_protect;
a9321a4d
PA
690 }
691 break;
692
693 case MMU_KERNEL_IDX:
694 if (is_write1 != 2 && (env->cr[4] & CR4_SMAP_MASK) &&
695 (ptep & PG_USER_MASK)) {
696 goto do_fault_protect;
697 }
698 /* fall through */
699 case MMU_KSMAP_IDX:
700 if (is_write1 == 2 && (env->cr[4] & CR4_SMEP_MASK) &&
701 (ptep & PG_USER_MASK)) {
702 goto do_fault_protect;
703 }
eaa728ee 704 if ((env->cr[0] & CR0_WP_MASK) &&
a9321a4d 705 is_write && !(ptep & PG_RW_MASK)) {
eaa728ee 706 goto do_fault_protect;
a9321a4d
PA
707 }
708 break;
709
710 default: /* cannot happen */
711 break;
eaa728ee
FB
712 }
713 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
714 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
715 pte |= PG_ACCESSED_MASK;
716 if (is_dirty)
717 pte |= PG_DIRTY_MASK;
718 stl_phys_notdirty(pte_addr, pte);
719 }
720 page_size = 4096;
721 virt_addr = addr & ~0xfff;
722 pte = pte & (PHYS_ADDR_MASK | 0xfff);
7e84c249 723 }
2c0262af 724 } else {
eaa728ee
FB
725 uint32_t pde;
726
727 /* page directory entry */
728 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) &
729 env->a20_mask;
730 pde = ldl_phys(pde_addr);
731 if (!(pde & PG_PRESENT_MASK)) {
732 error_code = 0;
733 goto do_fault;
734 }
735 /* if PSE bit is set, then we use a 4MB page */
736 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
737 page_size = 4096 * 1024;
a9321a4d
PA
738 switch (mmu_idx) {
739 case MMU_USER_IDX:
740 if (!(pde & PG_USER_MASK)) {
eaa728ee 741 goto do_fault_protect;
a9321a4d
PA
742 }
743 if (is_write && !(pde & PG_RW_MASK)) {
eaa728ee 744 goto do_fault_protect;
a9321a4d
PA
745 }
746 break;
747
748 case MMU_KERNEL_IDX:
749 if (is_write1 != 2 && (env->cr[4] & CR4_SMAP_MASK) &&
750 (pde & PG_USER_MASK)) {
751 goto do_fault_protect;
752 }
753 /* fall through */
754 case MMU_KSMAP_IDX:
755 if (is_write1 == 2 && (env->cr[4] & CR4_SMEP_MASK) &&
756 (pde & PG_USER_MASK)) {
757 goto do_fault_protect;
758 }
eaa728ee 759 if ((env->cr[0] & CR0_WP_MASK) &&
a9321a4d 760 is_write && !(pde & PG_RW_MASK)) {
eaa728ee 761 goto do_fault_protect;
a9321a4d
PA
762 }
763 break;
764
765 default: /* cannot happen */
766 break;
eaa728ee
FB
767 }
768 is_dirty = is_write && !(pde & PG_DIRTY_MASK);
769 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
770 pde |= PG_ACCESSED_MASK;
771 if (is_dirty)
772 pde |= PG_DIRTY_MASK;
773 stl_phys_notdirty(pde_addr, pde);
774 }
2c0262af 775
eaa728ee
FB
776 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
777 ptep = pte;
778 virt_addr = addr & ~(page_size - 1);
779 } else {
780 if (!(pde & PG_ACCESSED_MASK)) {
781 pde |= PG_ACCESSED_MASK;
782 stl_phys_notdirty(pde_addr, pde);
783 }
891b38e4 784
eaa728ee
FB
785 /* page directory entry */
786 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) &
787 env->a20_mask;
788 pte = ldl_phys(pte_addr);
789 if (!(pte & PG_PRESENT_MASK)) {
790 error_code = 0;
791 goto do_fault;
8e682019 792 }
eaa728ee
FB
793 /* combine pde and pte user and rw protections */
794 ptep = pte & pde;
a9321a4d
PA
795 switch (mmu_idx) {
796 case MMU_USER_IDX:
797 if (!(ptep & PG_USER_MASK)) {
eaa728ee 798 goto do_fault_protect;
a9321a4d
PA
799 }
800 if (is_write && !(ptep & PG_RW_MASK)) {
eaa728ee 801 goto do_fault_protect;
a9321a4d
PA
802 }
803 break;
804
805 case MMU_KERNEL_IDX:
806 if (is_write1 != 2 && (env->cr[4] & CR4_SMAP_MASK) &&
807 (ptep & PG_USER_MASK)) {
808 goto do_fault_protect;
809 }
810 /* fall through */
811 case MMU_KSMAP_IDX:
812 if (is_write1 == 2 && (env->cr[4] & CR4_SMEP_MASK) &&
813 (ptep & PG_USER_MASK)) {
814 goto do_fault_protect;
815 }
eaa728ee 816 if ((env->cr[0] & CR0_WP_MASK) &&
a9321a4d 817 is_write && !(ptep & PG_RW_MASK)) {
eaa728ee 818 goto do_fault_protect;
a9321a4d
PA
819 }
820 break;
821
822 default: /* cannot happen */
823 break;
8e682019 824 }
eaa728ee
FB
825 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
826 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
827 pte |= PG_ACCESSED_MASK;
828 if (is_dirty)
829 pte |= PG_DIRTY_MASK;
830 stl_phys_notdirty(pte_addr, pte);
831 }
832 page_size = 4096;
833 virt_addr = addr & ~0xfff;
2c0262af
FB
834 }
835 }
eaa728ee
FB
836 /* the page can be put in the TLB */
837 prot = PAGE_READ;
838 if (!(ptep & PG_NX_MASK))
839 prot |= PAGE_EXEC;
840 if (pte & PG_DIRTY_MASK) {
841 /* only set write access if already dirty... otherwise wait
842 for dirty access */
843 if (is_user) {
844 if (ptep & PG_RW_MASK)
845 prot |= PAGE_WRITE;
846 } else {
847 if (!(env->cr[0] & CR0_WP_MASK) ||
848 (ptep & PG_RW_MASK))
849 prot |= PAGE_WRITE;
8e682019 850 }
891b38e4 851 }
eaa728ee
FB
852 do_mapping:
853 pte = pte & env->a20_mask;
854
855 /* Even if 4MB pages, we map only one 4KB page in the cache to
856 avoid filling it too fast */
857 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
858 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
859 vaddr = virt_addr + page_offset;
860
d4c430a8
PB
861 tlb_set_page(env, vaddr, paddr, prot, mmu_idx, page_size);
862 return 0;
eaa728ee
FB
863 do_fault_protect:
864 error_code = PG_ERROR_P_MASK;
865 do_fault:
866 error_code |= (is_write << PG_ERROR_W_BIT);
867 if (is_user)
868 error_code |= PG_ERROR_U_MASK;
869 if (is_write1 == 2 &&
a9321a4d
PA
870 (((env->efer & MSR_EFER_NXE) &&
871 (env->cr[4] & CR4_PAE_MASK)) ||
872 (env->cr[4] & CR4_SMEP_MASK)))
eaa728ee 873 error_code |= PG_ERROR_I_D_MASK;
872929aa
FB
874 if (env->intercept_exceptions & (1 << EXCP0E_PAGE)) {
875 /* cr2 is not modified in case of exceptions */
876 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
877 addr);
eaa728ee
FB
878 } else {
879 env->cr[2] = addr;
2c0262af 880 }
eaa728ee
FB
881 env->error_code = error_code;
882 env->exception_index = EXCP0E_PAGE;
eaa728ee 883 return 1;
14ce26e7
FB
884}
885
a8170e5e 886hwaddr cpu_get_phys_page_debug(CPUX86State *env, target_ulong addr)
14ce26e7 887{
eaa728ee
FB
888 target_ulong pde_addr, pte_addr;
889 uint64_t pte;
a8170e5e 890 hwaddr paddr;
eaa728ee
FB
891 uint32_t page_offset;
892 int page_size;
14ce26e7 893
eaa728ee
FB
894 if (env->cr[4] & CR4_PAE_MASK) {
895 target_ulong pdpe_addr;
896 uint64_t pde, pdpe;
14ce26e7 897
eaa728ee
FB
898#ifdef TARGET_X86_64
899 if (env->hflags & HF_LMA_MASK) {
900 uint64_t pml4e_addr, pml4e;
901 int32_t sext;
902
903 /* test virtual address sign extension */
904 sext = (int64_t)addr >> 47;
905 if (sext != 0 && sext != -1)
906 return -1;
907
908 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
909 env->a20_mask;
910 pml4e = ldq_phys(pml4e_addr);
911 if (!(pml4e & PG_PRESENT_MASK))
912 return -1;
913
3f2cbf0d
JK
914 pdpe_addr = ((pml4e & ~0xfff & ~(PG_NX_MASK | PG_HI_USER_MASK)) +
915 (((addr >> 30) & 0x1ff) << 3)) & env->a20_mask;
eaa728ee
FB
916 pdpe = ldq_phys(pdpe_addr);
917 if (!(pdpe & PG_PRESENT_MASK))
918 return -1;
919 } else
920#endif
921 {
922 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
923 env->a20_mask;
924 pdpe = ldq_phys(pdpe_addr);
925 if (!(pdpe & PG_PRESENT_MASK))
926 return -1;
14ce26e7 927 }
14ce26e7 928
3f2cbf0d
JK
929 pde_addr = ((pdpe & ~0xfff & ~(PG_NX_MASK | PG_HI_USER_MASK)) +
930 (((addr >> 21) & 0x1ff) << 3)) & env->a20_mask;
eaa728ee
FB
931 pde = ldq_phys(pde_addr);
932 if (!(pde & PG_PRESENT_MASK)) {
933 return -1;
934 }
935 if (pde & PG_PSE_MASK) {
936 /* 2 MB page */
937 page_size = 2048 * 1024;
938 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
939 } else {
940 /* 4 KB page */
3f2cbf0d
JK
941 pte_addr = ((pde & ~0xfff & ~(PG_NX_MASK | PG_HI_USER_MASK)) +
942 (((addr >> 12) & 0x1ff) << 3)) & env->a20_mask;
eaa728ee
FB
943 page_size = 4096;
944 pte = ldq_phys(pte_addr);
945 }
3f2cbf0d 946 pte &= ~(PG_NX_MASK | PG_HI_USER_MASK);
ca1c9e15
AL
947 if (!(pte & PG_PRESENT_MASK))
948 return -1;
14ce26e7 949 } else {
eaa728ee 950 uint32_t pde;
3b46e624 951
eaa728ee
FB
952 if (!(env->cr[0] & CR0_PG_MASK)) {
953 pte = addr;
954 page_size = 4096;
955 } else {
956 /* page directory entry */
957 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) & env->a20_mask;
958 pde = ldl_phys(pde_addr);
959 if (!(pde & PG_PRESENT_MASK))
960 return -1;
961 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
962 pte = pde & ~0x003ff000; /* align to 4MB */
963 page_size = 4096 * 1024;
964 } else {
965 /* page directory entry */
966 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & env->a20_mask;
967 pte = ldl_phys(pte_addr);
968 if (!(pte & PG_PRESENT_MASK))
969 return -1;
970 page_size = 4096;
971 }
972 }
973 pte = pte & env->a20_mask;
14ce26e7 974 }
14ce26e7 975
eaa728ee
FB
976 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
977 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
978 return paddr;
3b21e03e 979}
01df040b 980
317ac620 981void hw_breakpoint_insert(CPUX86State *env, int index)
01df040b 982{
1cc21a18 983 int type = 0, err = 0;
01df040b
AL
984
985 switch (hw_breakpoint_type(env->dr[7], index)) {
428065ce 986 case DR7_TYPE_BP_INST:
5902564a 987 if (hw_breakpoint_enabled(env->dr[7], index)) {
01df040b
AL
988 err = cpu_breakpoint_insert(env, env->dr[index], BP_CPU,
989 &env->cpu_breakpoint[index]);
5902564a 990 }
01df040b 991 break;
428065ce 992 case DR7_TYPE_DATA_WR:
01df040b 993 type = BP_CPU | BP_MEM_WRITE;
1cc21a18 994 break;
428065ce 995 case DR7_TYPE_IO_RW:
1cc21a18 996 /* No support for I/O watchpoints yet */
01df040b 997 break;
428065ce 998 case DR7_TYPE_DATA_RW:
01df040b 999 type = BP_CPU | BP_MEM_ACCESS;
1cc21a18 1000 break;
1001 }
1002
1003 if (type != 0) {
01df040b
AL
1004 err = cpu_watchpoint_insert(env, env->dr[index],
1005 hw_breakpoint_len(env->dr[7], index),
1006 type, &env->cpu_watchpoint[index]);
01df040b 1007 }
1cc21a18 1008
1009 if (err) {
01df040b 1010 env->cpu_breakpoint[index] = NULL;
1cc21a18 1011 }
01df040b
AL
1012}
1013
317ac620 1014void hw_breakpoint_remove(CPUX86State *env, int index)
01df040b
AL
1015{
1016 if (!env->cpu_breakpoint[index])
1017 return;
1018 switch (hw_breakpoint_type(env->dr[7], index)) {
428065ce 1019 case DR7_TYPE_BP_INST:
5902564a 1020 if (hw_breakpoint_enabled(env->dr[7], index)) {
01df040b 1021 cpu_breakpoint_remove_by_ref(env, env->cpu_breakpoint[index]);
5902564a 1022 }
01df040b 1023 break;
428065ce 1024 case DR7_TYPE_DATA_WR:
1025 case DR7_TYPE_DATA_RW:
01df040b
AL
1026 cpu_watchpoint_remove_by_ref(env, env->cpu_watchpoint[index]);
1027 break;
428065ce 1028 case DR7_TYPE_IO_RW:
01df040b
AL
1029 /* No support for I/O watchpoints yet */
1030 break;
1031 }
1032}
1033
e175bce5 1034bool check_hw_breakpoints(CPUX86State *env, bool force_dr6_update)
01df040b
AL
1035{
1036 target_ulong dr6;
e175bce5 1037 int reg;
1038 bool hit_enabled = false;
01df040b
AL
1039
1040 dr6 = env->dr[6] & ~0xf;
428065ce 1041 for (reg = 0; reg < DR7_MAX_BP; reg++) {
e175bce5 1042 bool bp_match = false;
1043 bool wp_match = false;
1044
1045 switch (hw_breakpoint_type(env->dr[7], reg)) {
1046 case DR7_TYPE_BP_INST:
1047 if (env->dr[reg] == env->eip) {
1048 bp_match = true;
1049 }
1050 break;
1051 case DR7_TYPE_DATA_WR:
1052 case DR7_TYPE_DATA_RW:
1053 if (env->cpu_watchpoint[reg] &&
1054 env->cpu_watchpoint[reg]->flags & BP_WATCHPOINT_HIT) {
1055 wp_match = true;
1056 }
1057 break;
1058 case DR7_TYPE_IO_RW:
1059 break;
1060 }
1061 if (bp_match || wp_match) {
01df040b 1062 dr6 |= 1 << reg;
5902564a 1063 if (hw_breakpoint_enabled(env->dr[7], reg)) {
e175bce5 1064 hit_enabled = true;
5902564a 1065 }
01df040b
AL
1066 }
1067 }
e175bce5 1068
1069 if (hit_enabled || force_dr6_update) {
01df040b 1070 env->dr[6] = dr6;
e175bce5 1071 }
1072
01df040b
AL
1073 return hit_enabled;
1074}
1075
d65e9815 1076void breakpoint_handler(CPUX86State *env)
01df040b
AL
1077{
1078 CPUBreakpoint *bp;
1079
1080 if (env->watchpoint_hit) {
1081 if (env->watchpoint_hit->flags & BP_CPU) {
1082 env->watchpoint_hit = NULL;
e175bce5 1083 if (check_hw_breakpoints(env, false)) {
77b2bc2c 1084 raise_exception(env, EXCP01_DB);
e175bce5 1085 } else {
01df040b 1086 cpu_resume_from_signal(env, NULL);
e175bce5 1087 }
01df040b
AL
1088 }
1089 } else {
72cf2d4f 1090 QTAILQ_FOREACH(bp, &env->breakpoints, entry)
01df040b
AL
1091 if (bp->pc == env->eip) {
1092 if (bp->flags & BP_CPU) {
e175bce5 1093 check_hw_breakpoints(env, true);
77b2bc2c 1094 raise_exception(env, EXCP01_DB);
01df040b
AL
1095 }
1096 break;
1097 }
1098 }
01df040b 1099}
79c4f6b0 1100
d5bfda33
JK
1101typedef struct MCEInjectionParams {
1102 Monitor *mon;
55e5c285 1103 X86CPU *cpu;
d5bfda33
JK
1104 int bank;
1105 uint64_t status;
1106 uint64_t mcg_status;
1107 uint64_t addr;
1108 uint64_t misc;
1109 int flags;
1110} MCEInjectionParams;
1111
1112static void do_inject_x86_mce(void *data)
79c4f6b0 1113{
d5bfda33 1114 MCEInjectionParams *params = data;
55e5c285
AF
1115 CPUX86State *cenv = &params->cpu->env;
1116 CPUState *cpu = CPU(params->cpu);
d5bfda33
JK
1117 uint64_t *banks = cenv->mce_banks + 4 * params->bank;
1118
1119 cpu_synchronize_state(cenv);
316378e4 1120
747461c7
JK
1121 /*
1122 * If there is an MCE exception being processed, ignore this SRAO MCE
1123 * unless unconditional injection was requested.
1124 */
d5bfda33
JK
1125 if (!(params->flags & MCE_INJECT_UNCOND_AO)
1126 && !(params->status & MCI_STATUS_AR)
747461c7
JK
1127 && (cenv->mcg_status & MCG_STATUS_MCIP)) {
1128 return;
1129 }
d5bfda33
JK
1130
1131 if (params->status & MCI_STATUS_UC) {
316378e4
JK
1132 /*
1133 * if MSR_MCG_CTL is not all 1s, the uncorrected error
1134 * reporting is disabled
1135 */
d5bfda33
JK
1136 if ((cenv->mcg_cap & MCG_CTL_P) && cenv->mcg_ctl != ~(uint64_t)0) {
1137 monitor_printf(params->mon,
316378e4 1138 "CPU %d: Uncorrected error reporting disabled\n",
55e5c285 1139 cpu->cpu_index);
316378e4
JK
1140 return;
1141 }
1142
1143 /*
1144 * if MSR_MCi_CTL is not all 1s, the uncorrected error
1145 * reporting is disabled for the bank
1146 */
1147 if (banks[0] != ~(uint64_t)0) {
d5bfda33
JK
1148 monitor_printf(params->mon,
1149 "CPU %d: Uncorrected error reporting disabled for"
1150 " bank %d\n",
55e5c285 1151 cpu->cpu_index, params->bank);
316378e4
JK
1152 return;
1153 }
1154
79c4f6b0
HY
1155 if ((cenv->mcg_status & MCG_STATUS_MCIP) ||
1156 !(cenv->cr[4] & CR4_MCE_MASK)) {
d5bfda33
JK
1157 monitor_printf(params->mon,
1158 "CPU %d: Previous MCE still in progress, raising"
1159 " triple fault\n",
55e5c285 1160 cpu->cpu_index);
79c4f6b0
HY
1161 qemu_log_mask(CPU_LOG_RESET, "Triple fault\n");
1162 qemu_system_reset_request();
1163 return;
1164 }
2fa11da0 1165 if (banks[1] & MCI_STATUS_VAL) {
d5bfda33 1166 params->status |= MCI_STATUS_OVER;
2fa11da0 1167 }
d5bfda33
JK
1168 banks[2] = params->addr;
1169 banks[3] = params->misc;
1170 cenv->mcg_status = params->mcg_status;
1171 banks[1] = params->status;
c3affe56 1172 cpu_interrupt(cpu, CPU_INTERRUPT_MCE);
79c4f6b0
HY
1173 } else if (!(banks[1] & MCI_STATUS_VAL)
1174 || !(banks[1] & MCI_STATUS_UC)) {
2fa11da0 1175 if (banks[1] & MCI_STATUS_VAL) {
d5bfda33 1176 params->status |= MCI_STATUS_OVER;
2fa11da0 1177 }
d5bfda33
JK
1178 banks[2] = params->addr;
1179 banks[3] = params->misc;
1180 banks[1] = params->status;
2fa11da0 1181 } else {
79c4f6b0 1182 banks[1] |= MCI_STATUS_OVER;
2fa11da0 1183 }
79c4f6b0 1184}
b3cd24e0 1185
8c5cf3b6 1186void cpu_x86_inject_mce(Monitor *mon, X86CPU *cpu, int bank,
316378e4 1187 uint64_t status, uint64_t mcg_status, uint64_t addr,
747461c7 1188 uint64_t misc, int flags)
b3cd24e0 1189{
8c5cf3b6 1190 CPUX86State *cenv = &cpu->env;
d5bfda33
JK
1191 MCEInjectionParams params = {
1192 .mon = mon,
55e5c285 1193 .cpu = cpu,
d5bfda33
JK
1194 .bank = bank,
1195 .status = status,
1196 .mcg_status = mcg_status,
1197 .addr = addr,
1198 .misc = misc,
1199 .flags = flags,
1200 };
b3cd24e0 1201 unsigned bank_num = cenv->mcg_cap & 0xff;
317ac620 1202 CPUX86State *env;
b3cd24e0 1203
316378e4
JK
1204 if (!cenv->mcg_cap) {
1205 monitor_printf(mon, "MCE injection not supported\n");
b3cd24e0
JD
1206 return;
1207 }
316378e4
JK
1208 if (bank >= bank_num) {
1209 monitor_printf(mon, "Invalid MCE bank number\n");
1210 return;
1211 }
1212 if (!(status & MCI_STATUS_VAL)) {
1213 monitor_printf(mon, "Invalid MCE status code\n");
1214 return;
1215 }
747461c7
JK
1216 if ((flags & MCE_INJECT_BROADCAST)
1217 && !cpu_x86_support_mca_broadcast(cenv)) {
316378e4
JK
1218 monitor_printf(mon, "Guest CPU does not support MCA broadcast\n");
1219 return;
2bd3e04c
JD
1220 }
1221
f100f0b3 1222 run_on_cpu(CPU(cpu), do_inject_x86_mce, &params);
c34d440a
JK
1223 if (flags & MCE_INJECT_BROADCAST) {
1224 params.bank = 1;
1225 params.status = MCI_STATUS_VAL | MCI_STATUS_UC;
1226 params.mcg_status = MCG_STATUS_MCIP | MCG_STATUS_RIPV;
1227 params.addr = 0;
1228 params.misc = 0;
1229 for (env = first_cpu; env != NULL; env = env->next_cpu) {
1230 if (cenv == env) {
1231 continue;
31ce5e0c 1232 }
55e5c285 1233 params.cpu = x86_env_get_cpu(env);
f100f0b3 1234 run_on_cpu(CPU(cpu), do_inject_x86_mce, &params);
31ce5e0c 1235 }
b3cd24e0
JD
1236 }
1237}
d362e757 1238
317ac620 1239void cpu_report_tpr_access(CPUX86State *env, TPRAccess access)
d362e757 1240{
d362e757
JK
1241 if (kvm_enabled()) {
1242 env->tpr_access_type = access;
1243
c3affe56 1244 cpu_interrupt(CPU(x86_env_get_cpu(env)), CPU_INTERRUPT_TPR);
d362e757 1245 } else {
a8a826a3 1246 cpu_restore_state(env, env->mem_io_pc);
d362e757
JK
1247
1248 apic_handle_tpr_access_report(env->apic_state, env->eip, access);
1249 }
1250}
74ce674f 1251#endif /* !CONFIG_USER_ONLY */
6fd805e1 1252
84273177
JK
1253int cpu_x86_get_descr_debug(CPUX86State *env, unsigned int selector,
1254 target_ulong *base, unsigned int *limit,
1255 unsigned int *flags)
1256{
1257 SegmentCache *dt;
1258 target_ulong ptr;
1259 uint32_t e1, e2;
1260 int index;
1261
1262 if (selector & 0x4)
1263 dt = &env->ldt;
1264 else
1265 dt = &env->gdt;
1266 index = selector & ~7;
1267 ptr = dt->base + index;
1268 if ((index + 7) > dt->limit
1269 || cpu_memory_rw_debug(env, ptr, (uint8_t *)&e1, sizeof(e1), 0) != 0
1270 || cpu_memory_rw_debug(env, ptr+4, (uint8_t *)&e2, sizeof(e2), 0) != 0)
1271 return 0;
1272
1273 *base = ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
1274 *limit = (e1 & 0xffff) | (e2 & 0x000f0000);
1275 if (e2 & DESC_G_MASK)
1276 *limit = (*limit << 12) | 0xfff;
1277 *flags = e2;
1278
1279 return 1;
1280}
1281
b09ea7d5 1282#if !defined(CONFIG_USER_ONLY)
232fc23b 1283void do_cpu_init(X86CPU *cpu)
b09ea7d5 1284{
259186a7 1285 CPUState *cs = CPU(cpu);
232fc23b 1286 CPUX86State *env = &cpu->env;
259186a7 1287 int sipi = cs->interrupt_request & CPU_INTERRUPT_SIPI;
ebda377f
JK
1288 uint64_t pat = env->pat;
1289
259186a7
AF
1290 cpu_reset(cs);
1291 cs->interrupt_request = sipi;
ebda377f 1292 env->pat = pat;
4a942cea 1293 apic_init_reset(env->apic_state);
b09ea7d5
GN
1294}
1295
232fc23b 1296void do_cpu_sipi(X86CPU *cpu)
b09ea7d5 1297{
232fc23b
AF
1298 CPUX86State *env = &cpu->env;
1299
4a942cea 1300 apic_sipi(env->apic_state);
b09ea7d5
GN
1301}
1302#else
232fc23b 1303void do_cpu_init(X86CPU *cpu)
b09ea7d5
GN
1304{
1305}
232fc23b 1306void do_cpu_sipi(X86CPU *cpu)
b09ea7d5
GN
1307{
1308}
1309#endif