]> git.proxmox.com Git - mirror_qemu.git/blame - target-i386/helper.c
exec: Make ldl_*_phys input an AddressSpace
[mirror_qemu.git] / target-i386 / helper.c
CommitLineData
2c0262af 1/*
eaa728ee 2 * i386 helpers (without register variable usage)
5fafdf24 3 *
2c0262af
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
2c0262af 18 */
2c0262af 19
eaa728ee 20#include "cpu.h"
9c17d615 21#include "sysemu/kvm.h"
2fa11da0 22#ifndef CONFIG_USER_ONLY
9c17d615 23#include "sysemu/sysemu.h"
83c9089e 24#include "monitor/monitor.h"
2fa11da0 25#endif
f3f2d9be 26
eaa728ee 27//#define DEBUG_MMU
b5ec5ce0 28
317ac620 29static void cpu_x86_version(CPUX86State *env, int *family, int *model)
2bd3e04c
JD
30{
31 int cpuver = env->cpuid_version;
32
33 if (family == NULL || model == NULL) {
34 return;
35 }
36
37 *family = (cpuver >> 8) & 0x0f;
38 *model = ((cpuver >> 12) & 0xf0) + ((cpuver >> 4) & 0x0f);
39}
40
41/* Broadcast MCA signal for processor version 06H_EH and above */
317ac620 42int cpu_x86_support_mca_broadcast(CPUX86State *env)
2bd3e04c
JD
43{
44 int family = 0;
45 int model = 0;
46
47 cpu_x86_version(env, &family, &model);
48 if ((family == 6 && model >= 14) || family > 6) {
49 return 1;
50 }
51
52 return 0;
53}
54
eaa728ee
FB
55/***********************************************************/
56/* x86 debug */
3b46e624 57
bc4b43dc 58static const char *cc_op_str[CC_OP_NB] = {
eaa728ee
FB
59 "DYNAMIC",
60 "EFLAGS",
7e84c249 61
eaa728ee
FB
62 "MULB",
63 "MULW",
64 "MULL",
65 "MULQ",
3b46e624 66
eaa728ee
FB
67 "ADDB",
68 "ADDW",
69 "ADDL",
70 "ADDQ",
3b46e624 71
eaa728ee
FB
72 "ADCB",
73 "ADCW",
74 "ADCL",
75 "ADCQ",
3b46e624 76
eaa728ee
FB
77 "SUBB",
78 "SUBW",
79 "SUBL",
80 "SUBQ",
7e84c249 81
eaa728ee
FB
82 "SBBB",
83 "SBBW",
84 "SBBL",
85 "SBBQ",
7e84c249 86
eaa728ee
FB
87 "LOGICB",
88 "LOGICW",
89 "LOGICL",
90 "LOGICQ",
7e84c249 91
eaa728ee
FB
92 "INCB",
93 "INCW",
94 "INCL",
95 "INCQ",
3b46e624 96
eaa728ee
FB
97 "DECB",
98 "DECW",
99 "DECL",
100 "DECQ",
3b46e624 101
eaa728ee
FB
102 "SHLB",
103 "SHLW",
104 "SHLL",
105 "SHLQ",
3b46e624 106
eaa728ee
FB
107 "SARB",
108 "SARW",
109 "SARL",
110 "SARQ",
bc4b43dc
RH
111
112 "BMILGB",
113 "BMILGW",
114 "BMILGL",
115 "BMILGQ",
cd7f97ca
RH
116
117 "ADCX",
118 "ADOX",
119 "ADCOX",
436ff2d2
RH
120
121 "CLR",
eaa728ee 122};
7e84c249 123
a3867ed2 124static void
317ac620 125cpu_x86_dump_seg_cache(CPUX86State *env, FILE *f, fprintf_function cpu_fprintf,
a3867ed2
AL
126 const char *name, struct SegmentCache *sc)
127{
128#ifdef TARGET_X86_64
129 if (env->hflags & HF_CS64_MASK) {
130 cpu_fprintf(f, "%-3s=%04x %016" PRIx64 " %08x %08x", name,
4058fd98 131 sc->selector, sc->base, sc->limit, sc->flags & 0x00ffff00);
a3867ed2
AL
132 } else
133#endif
134 {
135 cpu_fprintf(f, "%-3s=%04x %08x %08x %08x", name, sc->selector,
4058fd98 136 (uint32_t)sc->base, sc->limit, sc->flags & 0x00ffff00);
a3867ed2
AL
137 }
138
139 if (!(env->hflags & HF_PE_MASK) || !(sc->flags & DESC_P_MASK))
140 goto done;
141
142 cpu_fprintf(f, " DPL=%d ", (sc->flags & DESC_DPL_MASK) >> DESC_DPL_SHIFT);
143 if (sc->flags & DESC_S_MASK) {
144 if (sc->flags & DESC_CS_MASK) {
145 cpu_fprintf(f, (sc->flags & DESC_L_MASK) ? "CS64" :
146 ((sc->flags & DESC_B_MASK) ? "CS32" : "CS16"));
147 cpu_fprintf(f, " [%c%c", (sc->flags & DESC_C_MASK) ? 'C' : '-',
148 (sc->flags & DESC_R_MASK) ? 'R' : '-');
149 } else {
469936ae
TM
150 cpu_fprintf(f,
151 (sc->flags & DESC_B_MASK || env->hflags & HF_LMA_MASK)
152 ? "DS " : "DS16");
a3867ed2
AL
153 cpu_fprintf(f, " [%c%c", (sc->flags & DESC_E_MASK) ? 'E' : '-',
154 (sc->flags & DESC_W_MASK) ? 'W' : '-');
155 }
156 cpu_fprintf(f, "%c]", (sc->flags & DESC_A_MASK) ? 'A' : '-');
157 } else {
158 static const char *sys_type_name[2][16] = {
159 { /* 32 bit mode */
160 "Reserved", "TSS16-avl", "LDT", "TSS16-busy",
161 "CallGate16", "TaskGate", "IntGate16", "TrapGate16",
162 "Reserved", "TSS32-avl", "Reserved", "TSS32-busy",
163 "CallGate32", "Reserved", "IntGate32", "TrapGate32"
164 },
165 { /* 64 bit mode */
166 "<hiword>", "Reserved", "LDT", "Reserved", "Reserved",
167 "Reserved", "Reserved", "Reserved", "Reserved",
168 "TSS64-avl", "Reserved", "TSS64-busy", "CallGate64",
169 "Reserved", "IntGate64", "TrapGate64"
170 }
171 };
e5c15eff
SW
172 cpu_fprintf(f, "%s",
173 sys_type_name[(env->hflags & HF_LMA_MASK) ? 1 : 0]
174 [(sc->flags & DESC_TYPE_MASK)
175 >> DESC_TYPE_SHIFT]);
a3867ed2
AL
176 }
177done:
178 cpu_fprintf(f, "\n");
179}
180
f5c848ee
JK
181#define DUMP_CODE_BYTES_TOTAL 50
182#define DUMP_CODE_BYTES_BACKWARD 20
183
878096ee
AF
184void x86_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
185 int flags)
eaa728ee 186{
878096ee
AF
187 X86CPU *cpu = X86_CPU(cs);
188 CPUX86State *env = &cpu->env;
eaa728ee
FB
189 int eflags, i, nb;
190 char cc_op_name[32];
191 static const char *seg_name[6] = { "ES", "CS", "SS", "DS", "FS", "GS" };
7e84c249 192
4980ef9e 193 eflags = cpu_compute_eflags(env);
eaa728ee
FB
194#ifdef TARGET_X86_64
195 if (env->hflags & HF_CS64_MASK) {
196 cpu_fprintf(f,
197 "RAX=%016" PRIx64 " RBX=%016" PRIx64 " RCX=%016" PRIx64 " RDX=%016" PRIx64 "\n"
198 "RSI=%016" PRIx64 " RDI=%016" PRIx64 " RBP=%016" PRIx64 " RSP=%016" PRIx64 "\n"
199 "R8 =%016" PRIx64 " R9 =%016" PRIx64 " R10=%016" PRIx64 " R11=%016" PRIx64 "\n"
200 "R12=%016" PRIx64 " R13=%016" PRIx64 " R14=%016" PRIx64 " R15=%016" PRIx64 "\n"
201 "RIP=%016" PRIx64 " RFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
202 env->regs[R_EAX],
203 env->regs[R_EBX],
204 env->regs[R_ECX],
205 env->regs[R_EDX],
206 env->regs[R_ESI],
207 env->regs[R_EDI],
208 env->regs[R_EBP],
209 env->regs[R_ESP],
210 env->regs[8],
211 env->regs[9],
212 env->regs[10],
213 env->regs[11],
214 env->regs[12],
215 env->regs[13],
216 env->regs[14],
217 env->regs[15],
218 env->eip, eflags,
219 eflags & DF_MASK ? 'D' : '-',
220 eflags & CC_O ? 'O' : '-',
221 eflags & CC_S ? 'S' : '-',
222 eflags & CC_Z ? 'Z' : '-',
223 eflags & CC_A ? 'A' : '-',
224 eflags & CC_P ? 'P' : '-',
225 eflags & CC_C ? 'C' : '-',
226 env->hflags & HF_CPL_MASK,
227 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
5ee0ffaa 228 (env->a20_mask >> 20) & 1,
eaa728ee 229 (env->hflags >> HF_SMM_SHIFT) & 1,
259186a7 230 cs->halted);
eaa728ee
FB
231 } else
232#endif
233 {
234 cpu_fprintf(f, "EAX=%08x EBX=%08x ECX=%08x EDX=%08x\n"
235 "ESI=%08x EDI=%08x EBP=%08x ESP=%08x\n"
236 "EIP=%08x EFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
237 (uint32_t)env->regs[R_EAX],
238 (uint32_t)env->regs[R_EBX],
239 (uint32_t)env->regs[R_ECX],
240 (uint32_t)env->regs[R_EDX],
241 (uint32_t)env->regs[R_ESI],
242 (uint32_t)env->regs[R_EDI],
243 (uint32_t)env->regs[R_EBP],
244 (uint32_t)env->regs[R_ESP],
245 (uint32_t)env->eip, eflags,
246 eflags & DF_MASK ? 'D' : '-',
247 eflags & CC_O ? 'O' : '-',
248 eflags & CC_S ? 'S' : '-',
249 eflags & CC_Z ? 'Z' : '-',
250 eflags & CC_A ? 'A' : '-',
251 eflags & CC_P ? 'P' : '-',
252 eflags & CC_C ? 'C' : '-',
253 env->hflags & HF_CPL_MASK,
254 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
5ee0ffaa 255 (env->a20_mask >> 20) & 1,
eaa728ee 256 (env->hflags >> HF_SMM_SHIFT) & 1,
259186a7 257 cs->halted);
8145122b 258 }
3b46e624 259
a3867ed2
AL
260 for(i = 0; i < 6; i++) {
261 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, seg_name[i],
262 &env->segs[i]);
263 }
264 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, "LDT", &env->ldt);
265 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, "TR", &env->tr);
266
eaa728ee
FB
267#ifdef TARGET_X86_64
268 if (env->hflags & HF_LMA_MASK) {
eaa728ee
FB
269 cpu_fprintf(f, "GDT= %016" PRIx64 " %08x\n",
270 env->gdt.base, env->gdt.limit);
271 cpu_fprintf(f, "IDT= %016" PRIx64 " %08x\n",
272 env->idt.base, env->idt.limit);
273 cpu_fprintf(f, "CR0=%08x CR2=%016" PRIx64 " CR3=%016" PRIx64 " CR4=%08x\n",
274 (uint32_t)env->cr[0],
275 env->cr[2],
276 env->cr[3],
277 (uint32_t)env->cr[4]);
a59cb4e0
AL
278 for(i = 0; i < 4; i++)
279 cpu_fprintf(f, "DR%d=%016" PRIx64 " ", i, env->dr[i]);
280 cpu_fprintf(f, "\nDR6=%016" PRIx64 " DR7=%016" PRIx64 "\n",
d4b55be5 281 env->dr[6], env->dr[7]);
eaa728ee
FB
282 } else
283#endif
284 {
eaa728ee
FB
285 cpu_fprintf(f, "GDT= %08x %08x\n",
286 (uint32_t)env->gdt.base, env->gdt.limit);
287 cpu_fprintf(f, "IDT= %08x %08x\n",
288 (uint32_t)env->idt.base, env->idt.limit);
289 cpu_fprintf(f, "CR0=%08x CR2=%08x CR3=%08x CR4=%08x\n",
290 (uint32_t)env->cr[0],
291 (uint32_t)env->cr[2],
292 (uint32_t)env->cr[3],
293 (uint32_t)env->cr[4]);
9a78eead
SW
294 for(i = 0; i < 4; i++) {
295 cpu_fprintf(f, "DR%d=" TARGET_FMT_lx " ", i, env->dr[i]);
296 }
297 cpu_fprintf(f, "\nDR6=" TARGET_FMT_lx " DR7=" TARGET_FMT_lx "\n",
298 env->dr[6], env->dr[7]);
eaa728ee 299 }
6fd2a026 300 if (flags & CPU_DUMP_CCOP) {
eaa728ee
FB
301 if ((unsigned)env->cc_op < CC_OP_NB)
302 snprintf(cc_op_name, sizeof(cc_op_name), "%s", cc_op_str[env->cc_op]);
303 else
304 snprintf(cc_op_name, sizeof(cc_op_name), "[%d]", env->cc_op);
305#ifdef TARGET_X86_64
306 if (env->hflags & HF_CS64_MASK) {
307 cpu_fprintf(f, "CCS=%016" PRIx64 " CCD=%016" PRIx64 " CCO=%-8s\n",
308 env->cc_src, env->cc_dst,
309 cc_op_name);
310 } else
311#endif
312 {
313 cpu_fprintf(f, "CCS=%08x CCD=%08x CCO=%-8s\n",
314 (uint32_t)env->cc_src, (uint32_t)env->cc_dst,
315 cc_op_name);
316 }
7e84c249 317 }
b5e5a934 318 cpu_fprintf(f, "EFER=%016" PRIx64 "\n", env->efer);
6fd2a026 319 if (flags & CPU_DUMP_FPU) {
eaa728ee
FB
320 int fptag;
321 fptag = 0;
322 for(i = 0; i < 8; i++) {
323 fptag |= ((!env->fptags[i]) << i);
324 }
325 cpu_fprintf(f, "FCW=%04x FSW=%04x [ST=%d] FTW=%02x MXCSR=%08x\n",
326 env->fpuc,
327 (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11,
328 env->fpstt,
329 fptag,
330 env->mxcsr);
331 for(i=0;i<8;i++) {
1ffd41ee
AJ
332 CPU_LDoubleU u;
333 u.d = env->fpregs[i].d;
eaa728ee 334 cpu_fprintf(f, "FPR%d=%016" PRIx64 " %04x",
1ffd41ee 335 i, u.l.lower, u.l.upper);
eaa728ee
FB
336 if ((i & 1) == 1)
337 cpu_fprintf(f, "\n");
338 else
339 cpu_fprintf(f, " ");
340 }
341 if (env->hflags & HF_CS64_MASK)
342 nb = 16;
343 else
344 nb = 8;
345 for(i=0;i<nb;i++) {
346 cpu_fprintf(f, "XMM%02d=%08x%08x%08x%08x",
347 i,
348 env->xmm_regs[i].XMM_L(3),
349 env->xmm_regs[i].XMM_L(2),
350 env->xmm_regs[i].XMM_L(1),
351 env->xmm_regs[i].XMM_L(0));
352 if ((i & 1) == 1)
353 cpu_fprintf(f, "\n");
354 else
355 cpu_fprintf(f, " ");
356 }
7e84c249 357 }
f5c848ee
JK
358 if (flags & CPU_DUMP_CODE) {
359 target_ulong base = env->segs[R_CS].base + env->eip;
360 target_ulong offs = MIN(env->eip, DUMP_CODE_BYTES_BACKWARD);
361 uint8_t code;
362 char codestr[3];
363
364 cpu_fprintf(f, "Code=");
365 for (i = 0; i < DUMP_CODE_BYTES_TOTAL; i++) {
f17ec444 366 if (cpu_memory_rw_debug(cs, base - offs + i, &code, 1, 0) == 0) {
f5c848ee
JK
367 snprintf(codestr, sizeof(codestr), "%02x", code);
368 } else {
369 snprintf(codestr, sizeof(codestr), "??");
370 }
371 cpu_fprintf(f, "%s%s%s%s", i > 0 ? " " : "",
372 i == offs ? "<" : "", codestr, i == offs ? ">" : "");
373 }
374 cpu_fprintf(f, "\n");
375 }
2c0262af 376}
7e84c249 377
eaa728ee
FB
378/***********************************************************/
379/* x86 mmu */
380/* XXX: add PGE support */
381
cc36a7a2 382void x86_cpu_set_a20(X86CPU *cpu, int a20_state)
2c0262af 383{
cc36a7a2
AF
384 CPUX86State *env = &cpu->env;
385
eaa728ee
FB
386 a20_state = (a20_state != 0);
387 if (a20_state != ((env->a20_mask >> 20) & 1)) {
388#if defined(DEBUG_MMU)
389 printf("A20 update: a20=%d\n", a20_state);
390#endif
391 /* if the cpu is currently executing code, we must unlink it and
392 all the potentially executing TB */
c3affe56 393 cpu_interrupt(CPU(cpu), CPU_INTERRUPT_EXITTB);
3b46e624 394
eaa728ee
FB
395 /* when a20 is changed, all the MMU mappings are invalid, so
396 we must flush everything */
397 tlb_flush(env, 1);
5ee0ffaa 398 env->a20_mask = ~(1 << 20) | (a20_state << 20);
7e84c249 399 }
2c0262af
FB
400}
401
eaa728ee 402void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0)
2c0262af 403{
eaa728ee 404 int pe_state;
2c0262af 405
eaa728ee
FB
406#if defined(DEBUG_MMU)
407 printf("CR0 update: CR0=0x%08x\n", new_cr0);
408#endif
409 if ((new_cr0 & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK)) !=
410 (env->cr[0] & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK))) {
411 tlb_flush(env, 1);
412 }
2c0262af 413
eaa728ee
FB
414#ifdef TARGET_X86_64
415 if (!(env->cr[0] & CR0_PG_MASK) && (new_cr0 & CR0_PG_MASK) &&
416 (env->efer & MSR_EFER_LME)) {
417 /* enter in long mode */
418 /* XXX: generate an exception */
419 if (!(env->cr[4] & CR4_PAE_MASK))
420 return;
421 env->efer |= MSR_EFER_LMA;
422 env->hflags |= HF_LMA_MASK;
423 } else if ((env->cr[0] & CR0_PG_MASK) && !(new_cr0 & CR0_PG_MASK) &&
424 (env->efer & MSR_EFER_LMA)) {
425 /* exit long mode */
426 env->efer &= ~MSR_EFER_LMA;
427 env->hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
428 env->eip &= 0xffffffff;
429 }
430#endif
431 env->cr[0] = new_cr0 | CR0_ET_MASK;
7e84c249 432
eaa728ee
FB
433 /* update PE flag in hidden flags */
434 pe_state = (env->cr[0] & CR0_PE_MASK);
435 env->hflags = (env->hflags & ~HF_PE_MASK) | (pe_state << HF_PE_SHIFT);
436 /* ensure that ADDSEG is always set in real mode */
437 env->hflags |= ((pe_state ^ 1) << HF_ADDSEG_SHIFT);
438 /* update FPU flags */
439 env->hflags = (env->hflags & ~(HF_MP_MASK | HF_EM_MASK | HF_TS_MASK)) |
440 ((new_cr0 << (HF_MP_SHIFT - 1)) & (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK));
7e84c249
FB
441}
442
eaa728ee
FB
443/* XXX: in legacy PAE mode, generate a GPF if reserved bits are set in
444 the PDPT */
445void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3)
7e84c249 446{
eaa728ee
FB
447 env->cr[3] = new_cr3;
448 if (env->cr[0] & CR0_PG_MASK) {
449#if defined(DEBUG_MMU)
450 printf("CR3 update: CR3=" TARGET_FMT_lx "\n", new_cr3);
451#endif
452 tlb_flush(env, 0);
453 }
7e84c249
FB
454}
455
eaa728ee 456void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4)
7e84c249 457{
eaa728ee
FB
458#if defined(DEBUG_MMU)
459 printf("CR4 update: CR4=%08x\n", (uint32_t)env->cr[4]);
460#endif
a9321a4d
PA
461 if ((new_cr4 ^ env->cr[4]) &
462 (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK |
463 CR4_SMEP_MASK | CR4_SMAP_MASK)) {
eaa728ee
FB
464 tlb_flush(env, 1);
465 }
466 /* SSE handling */
0514ef2f 467 if (!(env->features[FEAT_1_EDX] & CPUID_SSE)) {
eaa728ee 468 new_cr4 &= ~CR4_OSFXSR_MASK;
a9321a4d
PA
469 }
470 env->hflags &= ~HF_OSFXSR_MASK;
471 if (new_cr4 & CR4_OSFXSR_MASK) {
eaa728ee 472 env->hflags |= HF_OSFXSR_MASK;
a9321a4d
PA
473 }
474
0514ef2f 475 if (!(env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_SMAP)) {
a9321a4d
PA
476 new_cr4 &= ~CR4_SMAP_MASK;
477 }
478 env->hflags &= ~HF_SMAP_MASK;
479 if (new_cr4 & CR4_SMAP_MASK) {
480 env->hflags |= HF_SMAP_MASK;
481 }
b8b6a50b 482
eaa728ee 483 env->cr[4] = new_cr4;
b8b6a50b
FB
484}
485
eaa728ee
FB
486#if defined(CONFIG_USER_ONLY)
487
488int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
97b348e7 489 int is_write, int mmu_idx)
b8b6a50b 490{
eaa728ee
FB
491 /* user mode only emulation */
492 is_write &= 1;
493 env->cr[2] = addr;
494 env->error_code = (is_write << PG_ERROR_W_BIT);
495 env->error_code |= PG_ERROR_U_MASK;
496 env->exception_index = EXCP0E_PAGE;
497 return 1;
2c0262af
FB
498}
499
8d7b0fbb 500#else
891b38e4 501
eaa728ee
FB
502/* XXX: This value should match the one returned by CPUID
503 * and in exec.c */
eaa728ee 504# if defined(TARGET_X86_64)
2c90d794 505# define PHYS_ADDR_MASK 0xfffffff000LL
eaa728ee 506# else
2c90d794 507# define PHYS_ADDR_MASK 0xffffff000LL
eaa728ee 508# endif
eaa728ee
FB
509
510/* return value:
511 -1 = cannot handle fault
512 0 = nothing more to do
513 1 = generate PF fault
eaa728ee
FB
514*/
515int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
97b348e7 516 int is_write1, int mmu_idx)
eaa728ee 517{
fdfba1a2 518 CPUState *cs = ENV_GET_CPU(env);
eaa728ee
FB
519 uint64_t ptep, pte;
520 target_ulong pde_addr, pte_addr;
d4c430a8 521 int error_code, is_dirty, prot, page_size, is_write, is_user;
a8170e5e 522 hwaddr paddr;
eaa728ee
FB
523 uint32_t page_offset;
524 target_ulong vaddr, virt_addr;
525
526 is_user = mmu_idx == MMU_USER_IDX;
527#if defined(DEBUG_MMU)
528 printf("MMU fault: addr=" TARGET_FMT_lx " w=%d u=%d eip=" TARGET_FMT_lx "\n",
529 addr, is_write1, is_user, env->eip);
530#endif
531 is_write = is_write1 & 1;
532
533 if (!(env->cr[0] & CR0_PG_MASK)) {
534 pte = addr;
33dfdb56
AG
535#ifdef TARGET_X86_64
536 if (!(env->hflags & HF_LMA_MASK)) {
537 /* Without long mode we can only address 32bits in real mode */
538 pte = (uint32_t)pte;
539 }
540#endif
eaa728ee
FB
541 virt_addr = addr & TARGET_PAGE_MASK;
542 prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
543 page_size = 4096;
544 goto do_mapping;
545 }
546
547 if (env->cr[4] & CR4_PAE_MASK) {
548 uint64_t pde, pdpe;
549 target_ulong pdpe_addr;
2c0262af 550
eaa728ee
FB
551#ifdef TARGET_X86_64
552 if (env->hflags & HF_LMA_MASK) {
553 uint64_t pml4e_addr, pml4e;
554 int32_t sext;
555
556 /* test virtual address sign extension */
557 sext = (int64_t)addr >> 47;
558 if (sext != 0 && sext != -1) {
559 env->error_code = 0;
560 env->exception_index = EXCP0D_GPF;
561 return 1;
562 }
0573fbfc 563
eaa728ee
FB
564 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
565 env->a20_mask;
566 pml4e = ldq_phys(pml4e_addr);
567 if (!(pml4e & PG_PRESENT_MASK)) {
568 error_code = 0;
569 goto do_fault;
570 }
571 if (!(env->efer & MSR_EFER_NXE) && (pml4e & PG_NX_MASK)) {
572 error_code = PG_ERROR_RSVD_MASK;
573 goto do_fault;
574 }
575 if (!(pml4e & PG_ACCESSED_MASK)) {
576 pml4e |= PG_ACCESSED_MASK;
577 stl_phys_notdirty(pml4e_addr, pml4e);
578 }
579 ptep = pml4e ^ PG_NX_MASK;
580 pdpe_addr = ((pml4e & PHYS_ADDR_MASK) + (((addr >> 30) & 0x1ff) << 3)) &
581 env->a20_mask;
582 pdpe = ldq_phys(pdpe_addr);
583 if (!(pdpe & PG_PRESENT_MASK)) {
584 error_code = 0;
585 goto do_fault;
586 }
587 if (!(env->efer & MSR_EFER_NXE) && (pdpe & PG_NX_MASK)) {
588 error_code = PG_ERROR_RSVD_MASK;
589 goto do_fault;
590 }
591 ptep &= pdpe ^ PG_NX_MASK;
592 if (!(pdpe & PG_ACCESSED_MASK)) {
593 pdpe |= PG_ACCESSED_MASK;
594 stl_phys_notdirty(pdpe_addr, pdpe);
595 }
596 } else
597#endif
598 {
599 /* XXX: load them when cr3 is loaded ? */
600 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
601 env->a20_mask;
602 pdpe = ldq_phys(pdpe_addr);
603 if (!(pdpe & PG_PRESENT_MASK)) {
604 error_code = 0;
605 goto do_fault;
606 }
607 ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
7e84c249 608 }
7e84c249 609
eaa728ee
FB
610 pde_addr = ((pdpe & PHYS_ADDR_MASK) + (((addr >> 21) & 0x1ff) << 3)) &
611 env->a20_mask;
612 pde = ldq_phys(pde_addr);
613 if (!(pde & PG_PRESENT_MASK)) {
614 error_code = 0;
615 goto do_fault;
616 }
617 if (!(env->efer & MSR_EFER_NXE) && (pde & PG_NX_MASK)) {
618 error_code = PG_ERROR_RSVD_MASK;
619 goto do_fault;
620 }
621 ptep &= pde ^ PG_NX_MASK;
622 if (pde & PG_PSE_MASK) {
623 /* 2 MB page */
624 page_size = 2048 * 1024;
625 ptep ^= PG_NX_MASK;
a9321a4d 626 if ((ptep & PG_NX_MASK) && is_write1 == 2) {
eaa728ee 627 goto do_fault_protect;
a9321a4d
PA
628 }
629 switch (mmu_idx) {
630 case MMU_USER_IDX:
631 if (!(ptep & PG_USER_MASK)) {
eaa728ee 632 goto do_fault_protect;
a9321a4d
PA
633 }
634 if (is_write && !(ptep & PG_RW_MASK)) {
eaa728ee 635 goto do_fault_protect;
a9321a4d
PA
636 }
637 break;
638
639 case MMU_KERNEL_IDX:
640 if (is_write1 != 2 && (env->cr[4] & CR4_SMAP_MASK) &&
641 (ptep & PG_USER_MASK)) {
642 goto do_fault_protect;
643 }
644 /* fall through */
645 case MMU_KSMAP_IDX:
646 if (is_write1 == 2 && (env->cr[4] & CR4_SMEP_MASK) &&
647 (ptep & PG_USER_MASK)) {
648 goto do_fault_protect;
649 }
eaa728ee 650 if ((env->cr[0] & CR0_WP_MASK) &&
a9321a4d 651 is_write && !(ptep & PG_RW_MASK)) {
eaa728ee 652 goto do_fault_protect;
a9321a4d
PA
653 }
654 break;
655
656 default: /* cannot happen */
657 break;
eaa728ee
FB
658 }
659 is_dirty = is_write && !(pde & PG_DIRTY_MASK);
660 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
661 pde |= PG_ACCESSED_MASK;
662 if (is_dirty)
663 pde |= PG_DIRTY_MASK;
664 stl_phys_notdirty(pde_addr, pde);
665 }
666 /* align to page_size */
667 pte = pde & ((PHYS_ADDR_MASK & ~(page_size - 1)) | 0xfff);
668 virt_addr = addr & ~(page_size - 1);
669 } else {
670 /* 4 KB page */
671 if (!(pde & PG_ACCESSED_MASK)) {
672 pde |= PG_ACCESSED_MASK;
673 stl_phys_notdirty(pde_addr, pde);
674 }
675 pte_addr = ((pde & PHYS_ADDR_MASK) + (((addr >> 12) & 0x1ff) << 3)) &
676 env->a20_mask;
677 pte = ldq_phys(pte_addr);
678 if (!(pte & PG_PRESENT_MASK)) {
679 error_code = 0;
680 goto do_fault;
681 }
682 if (!(env->efer & MSR_EFER_NXE) && (pte & PG_NX_MASK)) {
683 error_code = PG_ERROR_RSVD_MASK;
684 goto do_fault;
685 }
686 /* combine pde and pte nx, user and rw protections */
687 ptep &= pte ^ PG_NX_MASK;
688 ptep ^= PG_NX_MASK;
689 if ((ptep & PG_NX_MASK) && is_write1 == 2)
690 goto do_fault_protect;
a9321a4d
PA
691 switch (mmu_idx) {
692 case MMU_USER_IDX:
693 if (!(ptep & PG_USER_MASK)) {
eaa728ee 694 goto do_fault_protect;
a9321a4d
PA
695 }
696 if (is_write && !(ptep & PG_RW_MASK)) {
eaa728ee 697 goto do_fault_protect;
a9321a4d
PA
698 }
699 break;
700
701 case MMU_KERNEL_IDX:
702 if (is_write1 != 2 && (env->cr[4] & CR4_SMAP_MASK) &&
703 (ptep & PG_USER_MASK)) {
704 goto do_fault_protect;
705 }
706 /* fall through */
707 case MMU_KSMAP_IDX:
708 if (is_write1 == 2 && (env->cr[4] & CR4_SMEP_MASK) &&
709 (ptep & PG_USER_MASK)) {
710 goto do_fault_protect;
711 }
eaa728ee 712 if ((env->cr[0] & CR0_WP_MASK) &&
a9321a4d 713 is_write && !(ptep & PG_RW_MASK)) {
eaa728ee 714 goto do_fault_protect;
a9321a4d
PA
715 }
716 break;
717
718 default: /* cannot happen */
719 break;
eaa728ee
FB
720 }
721 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
722 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
723 pte |= PG_ACCESSED_MASK;
724 if (is_dirty)
725 pte |= PG_DIRTY_MASK;
726 stl_phys_notdirty(pte_addr, pte);
727 }
728 page_size = 4096;
729 virt_addr = addr & ~0xfff;
730 pte = pte & (PHYS_ADDR_MASK | 0xfff);
7e84c249 731 }
2c0262af 732 } else {
eaa728ee
FB
733 uint32_t pde;
734
735 /* page directory entry */
736 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) &
737 env->a20_mask;
fdfba1a2 738 pde = ldl_phys(cs->as, pde_addr);
eaa728ee
FB
739 if (!(pde & PG_PRESENT_MASK)) {
740 error_code = 0;
741 goto do_fault;
742 }
743 /* if PSE bit is set, then we use a 4MB page */
744 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
745 page_size = 4096 * 1024;
a9321a4d
PA
746 switch (mmu_idx) {
747 case MMU_USER_IDX:
748 if (!(pde & PG_USER_MASK)) {
eaa728ee 749 goto do_fault_protect;
a9321a4d
PA
750 }
751 if (is_write && !(pde & PG_RW_MASK)) {
eaa728ee 752 goto do_fault_protect;
a9321a4d
PA
753 }
754 break;
755
756 case MMU_KERNEL_IDX:
757 if (is_write1 != 2 && (env->cr[4] & CR4_SMAP_MASK) &&
758 (pde & PG_USER_MASK)) {
759 goto do_fault_protect;
760 }
761 /* fall through */
762 case MMU_KSMAP_IDX:
763 if (is_write1 == 2 && (env->cr[4] & CR4_SMEP_MASK) &&
764 (pde & PG_USER_MASK)) {
765 goto do_fault_protect;
766 }
eaa728ee 767 if ((env->cr[0] & CR0_WP_MASK) &&
a9321a4d 768 is_write && !(pde & PG_RW_MASK)) {
eaa728ee 769 goto do_fault_protect;
a9321a4d
PA
770 }
771 break;
772
773 default: /* cannot happen */
774 break;
eaa728ee
FB
775 }
776 is_dirty = is_write && !(pde & PG_DIRTY_MASK);
777 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
778 pde |= PG_ACCESSED_MASK;
779 if (is_dirty)
780 pde |= PG_DIRTY_MASK;
781 stl_phys_notdirty(pde_addr, pde);
782 }
2c0262af 783
eaa728ee
FB
784 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
785 ptep = pte;
786 virt_addr = addr & ~(page_size - 1);
787 } else {
788 if (!(pde & PG_ACCESSED_MASK)) {
789 pde |= PG_ACCESSED_MASK;
790 stl_phys_notdirty(pde_addr, pde);
791 }
891b38e4 792
eaa728ee
FB
793 /* page directory entry */
794 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) &
795 env->a20_mask;
fdfba1a2 796 pte = ldl_phys(cs->as, pte_addr);
eaa728ee
FB
797 if (!(pte & PG_PRESENT_MASK)) {
798 error_code = 0;
799 goto do_fault;
8e682019 800 }
eaa728ee
FB
801 /* combine pde and pte user and rw protections */
802 ptep = pte & pde;
a9321a4d
PA
803 switch (mmu_idx) {
804 case MMU_USER_IDX:
805 if (!(ptep & PG_USER_MASK)) {
eaa728ee 806 goto do_fault_protect;
a9321a4d
PA
807 }
808 if (is_write && !(ptep & PG_RW_MASK)) {
eaa728ee 809 goto do_fault_protect;
a9321a4d
PA
810 }
811 break;
812
813 case MMU_KERNEL_IDX:
814 if (is_write1 != 2 && (env->cr[4] & CR4_SMAP_MASK) &&
815 (ptep & PG_USER_MASK)) {
816 goto do_fault_protect;
817 }
818 /* fall through */
819 case MMU_KSMAP_IDX:
820 if (is_write1 == 2 && (env->cr[4] & CR4_SMEP_MASK) &&
821 (ptep & PG_USER_MASK)) {
822 goto do_fault_protect;
823 }
eaa728ee 824 if ((env->cr[0] & CR0_WP_MASK) &&
a9321a4d 825 is_write && !(ptep & PG_RW_MASK)) {
eaa728ee 826 goto do_fault_protect;
a9321a4d
PA
827 }
828 break;
829
830 default: /* cannot happen */
831 break;
8e682019 832 }
eaa728ee
FB
833 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
834 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
835 pte |= PG_ACCESSED_MASK;
836 if (is_dirty)
837 pte |= PG_DIRTY_MASK;
838 stl_phys_notdirty(pte_addr, pte);
839 }
840 page_size = 4096;
841 virt_addr = addr & ~0xfff;
2c0262af
FB
842 }
843 }
eaa728ee
FB
844 /* the page can be put in the TLB */
845 prot = PAGE_READ;
846 if (!(ptep & PG_NX_MASK))
847 prot |= PAGE_EXEC;
848 if (pte & PG_DIRTY_MASK) {
849 /* only set write access if already dirty... otherwise wait
850 for dirty access */
851 if (is_user) {
852 if (ptep & PG_RW_MASK)
853 prot |= PAGE_WRITE;
854 } else {
855 if (!(env->cr[0] & CR0_WP_MASK) ||
856 (ptep & PG_RW_MASK))
857 prot |= PAGE_WRITE;
8e682019 858 }
891b38e4 859 }
eaa728ee
FB
860 do_mapping:
861 pte = pte & env->a20_mask;
862
863 /* Even if 4MB pages, we map only one 4KB page in the cache to
864 avoid filling it too fast */
865 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
866 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
867 vaddr = virt_addr + page_offset;
868
d4c430a8
PB
869 tlb_set_page(env, vaddr, paddr, prot, mmu_idx, page_size);
870 return 0;
eaa728ee
FB
871 do_fault_protect:
872 error_code = PG_ERROR_P_MASK;
873 do_fault:
874 error_code |= (is_write << PG_ERROR_W_BIT);
875 if (is_user)
876 error_code |= PG_ERROR_U_MASK;
877 if (is_write1 == 2 &&
a9321a4d
PA
878 (((env->efer & MSR_EFER_NXE) &&
879 (env->cr[4] & CR4_PAE_MASK)) ||
880 (env->cr[4] & CR4_SMEP_MASK)))
eaa728ee 881 error_code |= PG_ERROR_I_D_MASK;
872929aa
FB
882 if (env->intercept_exceptions & (1 << EXCP0E_PAGE)) {
883 /* cr2 is not modified in case of exceptions */
884 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
885 addr);
eaa728ee
FB
886 } else {
887 env->cr[2] = addr;
2c0262af 888 }
eaa728ee
FB
889 env->error_code = error_code;
890 env->exception_index = EXCP0E_PAGE;
eaa728ee 891 return 1;
14ce26e7
FB
892}
893
00b941e5 894hwaddr x86_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
14ce26e7 895{
00b941e5
AF
896 X86CPU *cpu = X86_CPU(cs);
897 CPUX86State *env = &cpu->env;
eaa728ee
FB
898 target_ulong pde_addr, pte_addr;
899 uint64_t pte;
a8170e5e 900 hwaddr paddr;
eaa728ee
FB
901 uint32_t page_offset;
902 int page_size;
14ce26e7 903
f2f8560c
PB
904 if (!(env->cr[0] & CR0_PG_MASK)) {
905 pte = addr & env->a20_mask;
906 page_size = 4096;
907 } else if (env->cr[4] & CR4_PAE_MASK) {
eaa728ee
FB
908 target_ulong pdpe_addr;
909 uint64_t pde, pdpe;
14ce26e7 910
eaa728ee
FB
911#ifdef TARGET_X86_64
912 if (env->hflags & HF_LMA_MASK) {
913 uint64_t pml4e_addr, pml4e;
914 int32_t sext;
915
916 /* test virtual address sign extension */
917 sext = (int64_t)addr >> 47;
918 if (sext != 0 && sext != -1)
919 return -1;
920
921 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
922 env->a20_mask;
923 pml4e = ldq_phys(pml4e_addr);
924 if (!(pml4e & PG_PRESENT_MASK))
925 return -1;
926
3f2cbf0d
JK
927 pdpe_addr = ((pml4e & ~0xfff & ~(PG_NX_MASK | PG_HI_USER_MASK)) +
928 (((addr >> 30) & 0x1ff) << 3)) & env->a20_mask;
eaa728ee
FB
929 pdpe = ldq_phys(pdpe_addr);
930 if (!(pdpe & PG_PRESENT_MASK))
931 return -1;
932 } else
933#endif
934 {
935 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
936 env->a20_mask;
937 pdpe = ldq_phys(pdpe_addr);
938 if (!(pdpe & PG_PRESENT_MASK))
939 return -1;
14ce26e7 940 }
14ce26e7 941
3f2cbf0d
JK
942 pde_addr = ((pdpe & ~0xfff & ~(PG_NX_MASK | PG_HI_USER_MASK)) +
943 (((addr >> 21) & 0x1ff) << 3)) & env->a20_mask;
eaa728ee
FB
944 pde = ldq_phys(pde_addr);
945 if (!(pde & PG_PRESENT_MASK)) {
946 return -1;
947 }
948 if (pde & PG_PSE_MASK) {
949 /* 2 MB page */
950 page_size = 2048 * 1024;
951 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
952 } else {
953 /* 4 KB page */
3f2cbf0d
JK
954 pte_addr = ((pde & ~0xfff & ~(PG_NX_MASK | PG_HI_USER_MASK)) +
955 (((addr >> 12) & 0x1ff) << 3)) & env->a20_mask;
eaa728ee
FB
956 page_size = 4096;
957 pte = ldq_phys(pte_addr);
958 }
3f2cbf0d 959 pte &= ~(PG_NX_MASK | PG_HI_USER_MASK);
ca1c9e15
AL
960 if (!(pte & PG_PRESENT_MASK))
961 return -1;
14ce26e7 962 } else {
eaa728ee 963 uint32_t pde;
3b46e624 964
f2f8560c
PB
965 /* page directory entry */
966 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) & env->a20_mask;
fdfba1a2 967 pde = ldl_phys(cs->as, pde_addr);
f2f8560c
PB
968 if (!(pde & PG_PRESENT_MASK))
969 return -1;
970 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
971 pte = pde & ~0x003ff000; /* align to 4MB */
972 page_size = 4096 * 1024;
eaa728ee
FB
973 } else {
974 /* page directory entry */
f2f8560c 975 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & env->a20_mask;
fdfba1a2 976 pte = ldl_phys(cs->as, pte_addr);
f2f8560c 977 if (!(pte & PG_PRESENT_MASK))
eaa728ee 978 return -1;
f2f8560c 979 page_size = 4096;
eaa728ee
FB
980 }
981 pte = pte & env->a20_mask;
14ce26e7 982 }
14ce26e7 983
eaa728ee
FB
984 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
985 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
986 return paddr;
3b21e03e 987}
01df040b 988
317ac620 989void hw_breakpoint_insert(CPUX86State *env, int index)
01df040b 990{
1cc21a18 991 int type = 0, err = 0;
01df040b
AL
992
993 switch (hw_breakpoint_type(env->dr[7], index)) {
428065ce 994 case DR7_TYPE_BP_INST:
5902564a 995 if (hw_breakpoint_enabled(env->dr[7], index)) {
01df040b
AL
996 err = cpu_breakpoint_insert(env, env->dr[index], BP_CPU,
997 &env->cpu_breakpoint[index]);
5902564a 998 }
01df040b 999 break;
428065ce 1000 case DR7_TYPE_DATA_WR:
01df040b 1001 type = BP_CPU | BP_MEM_WRITE;
1cc21a18 1002 break;
428065ce 1003 case DR7_TYPE_IO_RW:
1cc21a18 1004 /* No support for I/O watchpoints yet */
01df040b 1005 break;
428065ce 1006 case DR7_TYPE_DATA_RW:
01df040b 1007 type = BP_CPU | BP_MEM_ACCESS;
1cc21a18
LG
1008 break;
1009 }
1010
1011 if (type != 0) {
01df040b
AL
1012 err = cpu_watchpoint_insert(env, env->dr[index],
1013 hw_breakpoint_len(env->dr[7], index),
1014 type, &env->cpu_watchpoint[index]);
01df040b 1015 }
1cc21a18
LG
1016
1017 if (err) {
01df040b 1018 env->cpu_breakpoint[index] = NULL;
1cc21a18 1019 }
01df040b
AL
1020}
1021
317ac620 1022void hw_breakpoint_remove(CPUX86State *env, int index)
01df040b
AL
1023{
1024 if (!env->cpu_breakpoint[index])
1025 return;
1026 switch (hw_breakpoint_type(env->dr[7], index)) {
428065ce 1027 case DR7_TYPE_BP_INST:
5902564a 1028 if (hw_breakpoint_enabled(env->dr[7], index)) {
01df040b 1029 cpu_breakpoint_remove_by_ref(env, env->cpu_breakpoint[index]);
5902564a 1030 }
01df040b 1031 break;
428065ce
LG
1032 case DR7_TYPE_DATA_WR:
1033 case DR7_TYPE_DATA_RW:
01df040b
AL
1034 cpu_watchpoint_remove_by_ref(env, env->cpu_watchpoint[index]);
1035 break;
428065ce 1036 case DR7_TYPE_IO_RW:
01df040b
AL
1037 /* No support for I/O watchpoints yet */
1038 break;
1039 }
1040}
1041
e175bce5 1042bool check_hw_breakpoints(CPUX86State *env, bool force_dr6_update)
01df040b
AL
1043{
1044 target_ulong dr6;
e175bce5
LG
1045 int reg;
1046 bool hit_enabled = false;
01df040b
AL
1047
1048 dr6 = env->dr[6] & ~0xf;
428065ce 1049 for (reg = 0; reg < DR7_MAX_BP; reg++) {
e175bce5
LG
1050 bool bp_match = false;
1051 bool wp_match = false;
1052
1053 switch (hw_breakpoint_type(env->dr[7], reg)) {
1054 case DR7_TYPE_BP_INST:
1055 if (env->dr[reg] == env->eip) {
1056 bp_match = true;
1057 }
1058 break;
1059 case DR7_TYPE_DATA_WR:
1060 case DR7_TYPE_DATA_RW:
1061 if (env->cpu_watchpoint[reg] &&
1062 env->cpu_watchpoint[reg]->flags & BP_WATCHPOINT_HIT) {
1063 wp_match = true;
1064 }
1065 break;
1066 case DR7_TYPE_IO_RW:
1067 break;
1068 }
1069 if (bp_match || wp_match) {
01df040b 1070 dr6 |= 1 << reg;
5902564a 1071 if (hw_breakpoint_enabled(env->dr[7], reg)) {
e175bce5 1072 hit_enabled = true;
5902564a 1073 }
01df040b
AL
1074 }
1075 }
e175bce5
LG
1076
1077 if (hit_enabled || force_dr6_update) {
01df040b 1078 env->dr[6] = dr6;
e175bce5
LG
1079 }
1080
01df040b
AL
1081 return hit_enabled;
1082}
1083
d65e9815 1084void breakpoint_handler(CPUX86State *env)
01df040b
AL
1085{
1086 CPUBreakpoint *bp;
1087
1088 if (env->watchpoint_hit) {
1089 if (env->watchpoint_hit->flags & BP_CPU) {
1090 env->watchpoint_hit = NULL;
e175bce5 1091 if (check_hw_breakpoints(env, false)) {
77b2bc2c 1092 raise_exception(env, EXCP01_DB);
e175bce5 1093 } else {
01df040b 1094 cpu_resume_from_signal(env, NULL);
e175bce5 1095 }
01df040b
AL
1096 }
1097 } else {
72cf2d4f 1098 QTAILQ_FOREACH(bp, &env->breakpoints, entry)
01df040b
AL
1099 if (bp->pc == env->eip) {
1100 if (bp->flags & BP_CPU) {
e175bce5 1101 check_hw_breakpoints(env, true);
77b2bc2c 1102 raise_exception(env, EXCP01_DB);
01df040b
AL
1103 }
1104 break;
1105 }
1106 }
01df040b 1107}
79c4f6b0 1108
d5bfda33
JK
1109typedef struct MCEInjectionParams {
1110 Monitor *mon;
55e5c285 1111 X86CPU *cpu;
d5bfda33
JK
1112 int bank;
1113 uint64_t status;
1114 uint64_t mcg_status;
1115 uint64_t addr;
1116 uint64_t misc;
1117 int flags;
1118} MCEInjectionParams;
1119
1120static void do_inject_x86_mce(void *data)
79c4f6b0 1121{
d5bfda33 1122 MCEInjectionParams *params = data;
55e5c285
AF
1123 CPUX86State *cenv = &params->cpu->env;
1124 CPUState *cpu = CPU(params->cpu);
d5bfda33
JK
1125 uint64_t *banks = cenv->mce_banks + 4 * params->bank;
1126
cb446eca 1127 cpu_synchronize_state(cpu);
316378e4 1128
747461c7
JK
1129 /*
1130 * If there is an MCE exception being processed, ignore this SRAO MCE
1131 * unless unconditional injection was requested.
1132 */
d5bfda33
JK
1133 if (!(params->flags & MCE_INJECT_UNCOND_AO)
1134 && !(params->status & MCI_STATUS_AR)
747461c7
JK
1135 && (cenv->mcg_status & MCG_STATUS_MCIP)) {
1136 return;
1137 }
d5bfda33
JK
1138
1139 if (params->status & MCI_STATUS_UC) {
316378e4
JK
1140 /*
1141 * if MSR_MCG_CTL is not all 1s, the uncorrected error
1142 * reporting is disabled
1143 */
d5bfda33
JK
1144 if ((cenv->mcg_cap & MCG_CTL_P) && cenv->mcg_ctl != ~(uint64_t)0) {
1145 monitor_printf(params->mon,
316378e4 1146 "CPU %d: Uncorrected error reporting disabled\n",
55e5c285 1147 cpu->cpu_index);
316378e4
JK
1148 return;
1149 }
1150
1151 /*
1152 * if MSR_MCi_CTL is not all 1s, the uncorrected error
1153 * reporting is disabled for the bank
1154 */
1155 if (banks[0] != ~(uint64_t)0) {
d5bfda33
JK
1156 monitor_printf(params->mon,
1157 "CPU %d: Uncorrected error reporting disabled for"
1158 " bank %d\n",
55e5c285 1159 cpu->cpu_index, params->bank);
316378e4
JK
1160 return;
1161 }
1162
79c4f6b0
HY
1163 if ((cenv->mcg_status & MCG_STATUS_MCIP) ||
1164 !(cenv->cr[4] & CR4_MCE_MASK)) {
d5bfda33
JK
1165 monitor_printf(params->mon,
1166 "CPU %d: Previous MCE still in progress, raising"
1167 " triple fault\n",
55e5c285 1168 cpu->cpu_index);
79c4f6b0
HY
1169 qemu_log_mask(CPU_LOG_RESET, "Triple fault\n");
1170 qemu_system_reset_request();
1171 return;
1172 }
2fa11da0 1173 if (banks[1] & MCI_STATUS_VAL) {
d5bfda33 1174 params->status |= MCI_STATUS_OVER;
2fa11da0 1175 }
d5bfda33
JK
1176 banks[2] = params->addr;
1177 banks[3] = params->misc;
1178 cenv->mcg_status = params->mcg_status;
1179 banks[1] = params->status;
c3affe56 1180 cpu_interrupt(cpu, CPU_INTERRUPT_MCE);
79c4f6b0
HY
1181 } else if (!(banks[1] & MCI_STATUS_VAL)
1182 || !(banks[1] & MCI_STATUS_UC)) {
2fa11da0 1183 if (banks[1] & MCI_STATUS_VAL) {
d5bfda33 1184 params->status |= MCI_STATUS_OVER;
2fa11da0 1185 }
d5bfda33
JK
1186 banks[2] = params->addr;
1187 banks[3] = params->misc;
1188 banks[1] = params->status;
2fa11da0 1189 } else {
79c4f6b0 1190 banks[1] |= MCI_STATUS_OVER;
2fa11da0 1191 }
79c4f6b0 1192}
b3cd24e0 1193
8c5cf3b6 1194void cpu_x86_inject_mce(Monitor *mon, X86CPU *cpu, int bank,
316378e4 1195 uint64_t status, uint64_t mcg_status, uint64_t addr,
747461c7 1196 uint64_t misc, int flags)
b3cd24e0 1197{
182735ef 1198 CPUState *cs = CPU(cpu);
8c5cf3b6 1199 CPUX86State *cenv = &cpu->env;
d5bfda33
JK
1200 MCEInjectionParams params = {
1201 .mon = mon,
55e5c285 1202 .cpu = cpu,
d5bfda33
JK
1203 .bank = bank,
1204 .status = status,
1205 .mcg_status = mcg_status,
1206 .addr = addr,
1207 .misc = misc,
1208 .flags = flags,
1209 };
b3cd24e0
JD
1210 unsigned bank_num = cenv->mcg_cap & 0xff;
1211
316378e4
JK
1212 if (!cenv->mcg_cap) {
1213 monitor_printf(mon, "MCE injection not supported\n");
b3cd24e0
JD
1214 return;
1215 }
316378e4
JK
1216 if (bank >= bank_num) {
1217 monitor_printf(mon, "Invalid MCE bank number\n");
1218 return;
1219 }
1220 if (!(status & MCI_STATUS_VAL)) {
1221 monitor_printf(mon, "Invalid MCE status code\n");
1222 return;
1223 }
747461c7
JK
1224 if ((flags & MCE_INJECT_BROADCAST)
1225 && !cpu_x86_support_mca_broadcast(cenv)) {
316378e4
JK
1226 monitor_printf(mon, "Guest CPU does not support MCA broadcast\n");
1227 return;
2bd3e04c
JD
1228 }
1229
182735ef 1230 run_on_cpu(cs, do_inject_x86_mce, &params);
c34d440a 1231 if (flags & MCE_INJECT_BROADCAST) {
182735ef
AF
1232 CPUState *other_cs;
1233
c34d440a
JK
1234 params.bank = 1;
1235 params.status = MCI_STATUS_VAL | MCI_STATUS_UC;
1236 params.mcg_status = MCG_STATUS_MCIP | MCG_STATUS_RIPV;
1237 params.addr = 0;
1238 params.misc = 0;
bdc44640 1239 CPU_FOREACH(other_cs) {
182735ef 1240 if (other_cs == cs) {
c34d440a 1241 continue;
31ce5e0c 1242 }
182735ef
AF
1243 params.cpu = X86_CPU(other_cs);
1244 run_on_cpu(other_cs, do_inject_x86_mce, &params);
31ce5e0c 1245 }
b3cd24e0
JD
1246 }
1247}
d362e757 1248
317ac620 1249void cpu_report_tpr_access(CPUX86State *env, TPRAccess access)
d362e757 1250{
02e51483
CF
1251 X86CPU *cpu = x86_env_get_cpu(env);
1252
d362e757
JK
1253 if (kvm_enabled()) {
1254 env->tpr_access_type = access;
1255
02e51483 1256 cpu_interrupt(CPU(cpu), CPU_INTERRUPT_TPR);
d362e757 1257 } else {
a8a826a3 1258 cpu_restore_state(env, env->mem_io_pc);
d362e757 1259
02e51483 1260 apic_handle_tpr_access_report(cpu->apic_state, env->eip, access);
d362e757
JK
1261 }
1262}
74ce674f 1263#endif /* !CONFIG_USER_ONLY */
6fd805e1 1264
84273177
JK
1265int cpu_x86_get_descr_debug(CPUX86State *env, unsigned int selector,
1266 target_ulong *base, unsigned int *limit,
1267 unsigned int *flags)
1268{
f17ec444
AF
1269 X86CPU *cpu = x86_env_get_cpu(env);
1270 CPUState *cs = CPU(cpu);
84273177
JK
1271 SegmentCache *dt;
1272 target_ulong ptr;
1273 uint32_t e1, e2;
1274 int index;
1275
1276 if (selector & 0x4)
1277 dt = &env->ldt;
1278 else
1279 dt = &env->gdt;
1280 index = selector & ~7;
1281 ptr = dt->base + index;
1282 if ((index + 7) > dt->limit
f17ec444
AF
1283 || cpu_memory_rw_debug(cs, ptr, (uint8_t *)&e1, sizeof(e1), 0) != 0
1284 || cpu_memory_rw_debug(cs, ptr+4, (uint8_t *)&e2, sizeof(e2), 0) != 0)
84273177
JK
1285 return 0;
1286
1287 *base = ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
1288 *limit = (e1 & 0xffff) | (e2 & 0x000f0000);
1289 if (e2 & DESC_G_MASK)
1290 *limit = (*limit << 12) | 0xfff;
1291 *flags = e2;
1292
1293 return 1;
1294}
1295
b09ea7d5 1296#if !defined(CONFIG_USER_ONLY)
232fc23b 1297void do_cpu_init(X86CPU *cpu)
b09ea7d5 1298{
259186a7 1299 CPUState *cs = CPU(cpu);
232fc23b 1300 CPUX86State *env = &cpu->env;
259186a7 1301 int sipi = cs->interrupt_request & CPU_INTERRUPT_SIPI;
ebda377f
JK
1302 uint64_t pat = env->pat;
1303
259186a7
AF
1304 cpu_reset(cs);
1305 cs->interrupt_request = sipi;
ebda377f 1306 env->pat = pat;
02e51483 1307 apic_init_reset(cpu->apic_state);
b09ea7d5
GN
1308}
1309
232fc23b 1310void do_cpu_sipi(X86CPU *cpu)
b09ea7d5 1311{
02e51483 1312 apic_sipi(cpu->apic_state);
b09ea7d5
GN
1313}
1314#else
232fc23b 1315void do_cpu_init(X86CPU *cpu)
b09ea7d5
GN
1316{
1317}
232fc23b 1318void do_cpu_sipi(X86CPU *cpu)
b09ea7d5
GN
1319{
1320}
1321#endif