]> git.proxmox.com Git - qemu.git/blame - target-i386/helper.c
target-unicore32: Rename CPU subtypes
[qemu.git] / target-i386 / helper.c
CommitLineData
2c0262af 1/*
eaa728ee 2 * i386 helpers (without register variable usage)
5fafdf24 3 *
2c0262af
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
2c0262af 18 */
2c0262af 19
eaa728ee 20#include "cpu.h"
9c17d615 21#include "sysemu/kvm.h"
2fa11da0 22#ifndef CONFIG_USER_ONLY
9c17d615 23#include "sysemu/sysemu.h"
83c9089e 24#include "monitor/monitor.h"
2fa11da0 25#endif
f3f2d9be 26
eaa728ee 27//#define DEBUG_MMU
b5ec5ce0 28
317ac620 29static void cpu_x86_version(CPUX86State *env, int *family, int *model)
2bd3e04c
JD
30{
31 int cpuver = env->cpuid_version;
32
33 if (family == NULL || model == NULL) {
34 return;
35 }
36
37 *family = (cpuver >> 8) & 0x0f;
38 *model = ((cpuver >> 12) & 0xf0) + ((cpuver >> 4) & 0x0f);
39}
40
41/* Broadcast MCA signal for processor version 06H_EH and above */
317ac620 42int cpu_x86_support_mca_broadcast(CPUX86State *env)
2bd3e04c
JD
43{
44 int family = 0;
45 int model = 0;
46
47 cpu_x86_version(env, &family, &model);
48 if ((family == 6 && model >= 14) || family > 6) {
49 return 1;
50 }
51
52 return 0;
53}
54
eaa728ee
FB
55/***********************************************************/
56/* x86 debug */
3b46e624 57
eaa728ee
FB
58static const char *cc_op_str[] = {
59 "DYNAMIC",
60 "EFLAGS",
7e84c249 61
eaa728ee
FB
62 "MULB",
63 "MULW",
64 "MULL",
65 "MULQ",
3b46e624 66
eaa728ee
FB
67 "ADDB",
68 "ADDW",
69 "ADDL",
70 "ADDQ",
3b46e624 71
eaa728ee
FB
72 "ADCB",
73 "ADCW",
74 "ADCL",
75 "ADCQ",
3b46e624 76
eaa728ee
FB
77 "SUBB",
78 "SUBW",
79 "SUBL",
80 "SUBQ",
7e84c249 81
eaa728ee
FB
82 "SBBB",
83 "SBBW",
84 "SBBL",
85 "SBBQ",
7e84c249 86
eaa728ee
FB
87 "LOGICB",
88 "LOGICW",
89 "LOGICL",
90 "LOGICQ",
7e84c249 91
eaa728ee
FB
92 "INCB",
93 "INCW",
94 "INCL",
95 "INCQ",
3b46e624 96
eaa728ee
FB
97 "DECB",
98 "DECW",
99 "DECL",
100 "DECQ",
3b46e624 101
eaa728ee
FB
102 "SHLB",
103 "SHLW",
104 "SHLL",
105 "SHLQ",
3b46e624 106
eaa728ee
FB
107 "SARB",
108 "SARW",
109 "SARL",
110 "SARQ",
111};
7e84c249 112
a3867ed2 113static void
317ac620 114cpu_x86_dump_seg_cache(CPUX86State *env, FILE *f, fprintf_function cpu_fprintf,
a3867ed2
AL
115 const char *name, struct SegmentCache *sc)
116{
117#ifdef TARGET_X86_64
118 if (env->hflags & HF_CS64_MASK) {
119 cpu_fprintf(f, "%-3s=%04x %016" PRIx64 " %08x %08x", name,
4058fd98 120 sc->selector, sc->base, sc->limit, sc->flags & 0x00ffff00);
a3867ed2
AL
121 } else
122#endif
123 {
124 cpu_fprintf(f, "%-3s=%04x %08x %08x %08x", name, sc->selector,
4058fd98 125 (uint32_t)sc->base, sc->limit, sc->flags & 0x00ffff00);
a3867ed2
AL
126 }
127
128 if (!(env->hflags & HF_PE_MASK) || !(sc->flags & DESC_P_MASK))
129 goto done;
130
131 cpu_fprintf(f, " DPL=%d ", (sc->flags & DESC_DPL_MASK) >> DESC_DPL_SHIFT);
132 if (sc->flags & DESC_S_MASK) {
133 if (sc->flags & DESC_CS_MASK) {
134 cpu_fprintf(f, (sc->flags & DESC_L_MASK) ? "CS64" :
135 ((sc->flags & DESC_B_MASK) ? "CS32" : "CS16"));
136 cpu_fprintf(f, " [%c%c", (sc->flags & DESC_C_MASK) ? 'C' : '-',
137 (sc->flags & DESC_R_MASK) ? 'R' : '-');
138 } else {
139 cpu_fprintf(f, (sc->flags & DESC_B_MASK) ? "DS " : "DS16");
140 cpu_fprintf(f, " [%c%c", (sc->flags & DESC_E_MASK) ? 'E' : '-',
141 (sc->flags & DESC_W_MASK) ? 'W' : '-');
142 }
143 cpu_fprintf(f, "%c]", (sc->flags & DESC_A_MASK) ? 'A' : '-');
144 } else {
145 static const char *sys_type_name[2][16] = {
146 { /* 32 bit mode */
147 "Reserved", "TSS16-avl", "LDT", "TSS16-busy",
148 "CallGate16", "TaskGate", "IntGate16", "TrapGate16",
149 "Reserved", "TSS32-avl", "Reserved", "TSS32-busy",
150 "CallGate32", "Reserved", "IntGate32", "TrapGate32"
151 },
152 { /* 64 bit mode */
153 "<hiword>", "Reserved", "LDT", "Reserved", "Reserved",
154 "Reserved", "Reserved", "Reserved", "Reserved",
155 "TSS64-avl", "Reserved", "TSS64-busy", "CallGate64",
156 "Reserved", "IntGate64", "TrapGate64"
157 }
158 };
e5c15eff
SW
159 cpu_fprintf(f, "%s",
160 sys_type_name[(env->hflags & HF_LMA_MASK) ? 1 : 0]
161 [(sc->flags & DESC_TYPE_MASK)
162 >> DESC_TYPE_SHIFT]);
a3867ed2
AL
163 }
164done:
165 cpu_fprintf(f, "\n");
166}
167
f5c848ee
JK
168#define DUMP_CODE_BYTES_TOTAL 50
169#define DUMP_CODE_BYTES_BACKWARD 20
170
317ac620 171void cpu_dump_state(CPUX86State *env, FILE *f, fprintf_function cpu_fprintf,
eaa728ee
FB
172 int flags)
173{
174 int eflags, i, nb;
175 char cc_op_name[32];
176 static const char *seg_name[6] = { "ES", "CS", "SS", "DS", "FS", "GS" };
7e84c249 177
23054111 178 cpu_synchronize_state(env);
ff3c01ca 179
eaa728ee
FB
180 eflags = env->eflags;
181#ifdef TARGET_X86_64
182 if (env->hflags & HF_CS64_MASK) {
183 cpu_fprintf(f,
184 "RAX=%016" PRIx64 " RBX=%016" PRIx64 " RCX=%016" PRIx64 " RDX=%016" PRIx64 "\n"
185 "RSI=%016" PRIx64 " RDI=%016" PRIx64 " RBP=%016" PRIx64 " RSP=%016" PRIx64 "\n"
186 "R8 =%016" PRIx64 " R9 =%016" PRIx64 " R10=%016" PRIx64 " R11=%016" PRIx64 "\n"
187 "R12=%016" PRIx64 " R13=%016" PRIx64 " R14=%016" PRIx64 " R15=%016" PRIx64 "\n"
188 "RIP=%016" PRIx64 " RFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
189 env->regs[R_EAX],
190 env->regs[R_EBX],
191 env->regs[R_ECX],
192 env->regs[R_EDX],
193 env->regs[R_ESI],
194 env->regs[R_EDI],
195 env->regs[R_EBP],
196 env->regs[R_ESP],
197 env->regs[8],
198 env->regs[9],
199 env->regs[10],
200 env->regs[11],
201 env->regs[12],
202 env->regs[13],
203 env->regs[14],
204 env->regs[15],
205 env->eip, eflags,
206 eflags & DF_MASK ? 'D' : '-',
207 eflags & CC_O ? 'O' : '-',
208 eflags & CC_S ? 'S' : '-',
209 eflags & CC_Z ? 'Z' : '-',
210 eflags & CC_A ? 'A' : '-',
211 eflags & CC_P ? 'P' : '-',
212 eflags & CC_C ? 'C' : '-',
213 env->hflags & HF_CPL_MASK,
214 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
5ee0ffaa 215 (env->a20_mask >> 20) & 1,
eaa728ee 216 (env->hflags >> HF_SMM_SHIFT) & 1,
ce5232c5 217 env->halted);
eaa728ee
FB
218 } else
219#endif
220 {
221 cpu_fprintf(f, "EAX=%08x EBX=%08x ECX=%08x EDX=%08x\n"
222 "ESI=%08x EDI=%08x EBP=%08x ESP=%08x\n"
223 "EIP=%08x EFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
224 (uint32_t)env->regs[R_EAX],
225 (uint32_t)env->regs[R_EBX],
226 (uint32_t)env->regs[R_ECX],
227 (uint32_t)env->regs[R_EDX],
228 (uint32_t)env->regs[R_ESI],
229 (uint32_t)env->regs[R_EDI],
230 (uint32_t)env->regs[R_EBP],
231 (uint32_t)env->regs[R_ESP],
232 (uint32_t)env->eip, eflags,
233 eflags & DF_MASK ? 'D' : '-',
234 eflags & CC_O ? 'O' : '-',
235 eflags & CC_S ? 'S' : '-',
236 eflags & CC_Z ? 'Z' : '-',
237 eflags & CC_A ? 'A' : '-',
238 eflags & CC_P ? 'P' : '-',
239 eflags & CC_C ? 'C' : '-',
240 env->hflags & HF_CPL_MASK,
241 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
5ee0ffaa 242 (env->a20_mask >> 20) & 1,
eaa728ee 243 (env->hflags >> HF_SMM_SHIFT) & 1,
ce5232c5 244 env->halted);
8145122b 245 }
3b46e624 246
a3867ed2
AL
247 for(i = 0; i < 6; i++) {
248 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, seg_name[i],
249 &env->segs[i]);
250 }
251 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, "LDT", &env->ldt);
252 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, "TR", &env->tr);
253
eaa728ee
FB
254#ifdef TARGET_X86_64
255 if (env->hflags & HF_LMA_MASK) {
eaa728ee
FB
256 cpu_fprintf(f, "GDT= %016" PRIx64 " %08x\n",
257 env->gdt.base, env->gdt.limit);
258 cpu_fprintf(f, "IDT= %016" PRIx64 " %08x\n",
259 env->idt.base, env->idt.limit);
260 cpu_fprintf(f, "CR0=%08x CR2=%016" PRIx64 " CR3=%016" PRIx64 " CR4=%08x\n",
261 (uint32_t)env->cr[0],
262 env->cr[2],
263 env->cr[3],
264 (uint32_t)env->cr[4]);
a59cb4e0
AL
265 for(i = 0; i < 4; i++)
266 cpu_fprintf(f, "DR%d=%016" PRIx64 " ", i, env->dr[i]);
267 cpu_fprintf(f, "\nDR6=%016" PRIx64 " DR7=%016" PRIx64 "\n",
d4b55be5 268 env->dr[6], env->dr[7]);
eaa728ee
FB
269 } else
270#endif
271 {
eaa728ee
FB
272 cpu_fprintf(f, "GDT= %08x %08x\n",
273 (uint32_t)env->gdt.base, env->gdt.limit);
274 cpu_fprintf(f, "IDT= %08x %08x\n",
275 (uint32_t)env->idt.base, env->idt.limit);
276 cpu_fprintf(f, "CR0=%08x CR2=%08x CR3=%08x CR4=%08x\n",
277 (uint32_t)env->cr[0],
278 (uint32_t)env->cr[2],
279 (uint32_t)env->cr[3],
280 (uint32_t)env->cr[4]);
9a78eead
SW
281 for(i = 0; i < 4; i++) {
282 cpu_fprintf(f, "DR%d=" TARGET_FMT_lx " ", i, env->dr[i]);
283 }
284 cpu_fprintf(f, "\nDR6=" TARGET_FMT_lx " DR7=" TARGET_FMT_lx "\n",
285 env->dr[6], env->dr[7]);
eaa728ee 286 }
6fd2a026 287 if (flags & CPU_DUMP_CCOP) {
eaa728ee
FB
288 if ((unsigned)env->cc_op < CC_OP_NB)
289 snprintf(cc_op_name, sizeof(cc_op_name), "%s", cc_op_str[env->cc_op]);
290 else
291 snprintf(cc_op_name, sizeof(cc_op_name), "[%d]", env->cc_op);
292#ifdef TARGET_X86_64
293 if (env->hflags & HF_CS64_MASK) {
294 cpu_fprintf(f, "CCS=%016" PRIx64 " CCD=%016" PRIx64 " CCO=%-8s\n",
295 env->cc_src, env->cc_dst,
296 cc_op_name);
297 } else
298#endif
299 {
300 cpu_fprintf(f, "CCS=%08x CCD=%08x CCO=%-8s\n",
301 (uint32_t)env->cc_src, (uint32_t)env->cc_dst,
302 cc_op_name);
303 }
7e84c249 304 }
b5e5a934 305 cpu_fprintf(f, "EFER=%016" PRIx64 "\n", env->efer);
6fd2a026 306 if (flags & CPU_DUMP_FPU) {
eaa728ee
FB
307 int fptag;
308 fptag = 0;
309 for(i = 0; i < 8; i++) {
310 fptag |= ((!env->fptags[i]) << i);
311 }
312 cpu_fprintf(f, "FCW=%04x FSW=%04x [ST=%d] FTW=%02x MXCSR=%08x\n",
313 env->fpuc,
314 (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11,
315 env->fpstt,
316 fptag,
317 env->mxcsr);
318 for(i=0;i<8;i++) {
1ffd41ee
AJ
319 CPU_LDoubleU u;
320 u.d = env->fpregs[i].d;
eaa728ee 321 cpu_fprintf(f, "FPR%d=%016" PRIx64 " %04x",
1ffd41ee 322 i, u.l.lower, u.l.upper);
eaa728ee
FB
323 if ((i & 1) == 1)
324 cpu_fprintf(f, "\n");
325 else
326 cpu_fprintf(f, " ");
327 }
328 if (env->hflags & HF_CS64_MASK)
329 nb = 16;
330 else
331 nb = 8;
332 for(i=0;i<nb;i++) {
333 cpu_fprintf(f, "XMM%02d=%08x%08x%08x%08x",
334 i,
335 env->xmm_regs[i].XMM_L(3),
336 env->xmm_regs[i].XMM_L(2),
337 env->xmm_regs[i].XMM_L(1),
338 env->xmm_regs[i].XMM_L(0));
339 if ((i & 1) == 1)
340 cpu_fprintf(f, "\n");
341 else
342 cpu_fprintf(f, " ");
343 }
7e84c249 344 }
f5c848ee
JK
345 if (flags & CPU_DUMP_CODE) {
346 target_ulong base = env->segs[R_CS].base + env->eip;
347 target_ulong offs = MIN(env->eip, DUMP_CODE_BYTES_BACKWARD);
348 uint8_t code;
349 char codestr[3];
350
351 cpu_fprintf(f, "Code=");
352 for (i = 0; i < DUMP_CODE_BYTES_TOTAL; i++) {
353 if (cpu_memory_rw_debug(env, base - offs + i, &code, 1, 0) == 0) {
354 snprintf(codestr, sizeof(codestr), "%02x", code);
355 } else {
356 snprintf(codestr, sizeof(codestr), "??");
357 }
358 cpu_fprintf(f, "%s%s%s%s", i > 0 ? " " : "",
359 i == offs ? "<" : "", codestr, i == offs ? ">" : "");
360 }
361 cpu_fprintf(f, "\n");
362 }
2c0262af 363}
7e84c249 364
eaa728ee
FB
365/***********************************************************/
366/* x86 mmu */
367/* XXX: add PGE support */
368
369void cpu_x86_set_a20(CPUX86State *env, int a20_state)
2c0262af 370{
eaa728ee
FB
371 a20_state = (a20_state != 0);
372 if (a20_state != ((env->a20_mask >> 20) & 1)) {
373#if defined(DEBUG_MMU)
374 printf("A20 update: a20=%d\n", a20_state);
375#endif
376 /* if the cpu is currently executing code, we must unlink it and
377 all the potentially executing TB */
378 cpu_interrupt(env, CPU_INTERRUPT_EXITTB);
3b46e624 379
eaa728ee
FB
380 /* when a20 is changed, all the MMU mappings are invalid, so
381 we must flush everything */
382 tlb_flush(env, 1);
5ee0ffaa 383 env->a20_mask = ~(1 << 20) | (a20_state << 20);
7e84c249 384 }
2c0262af
FB
385}
386
eaa728ee 387void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0)
2c0262af 388{
eaa728ee 389 int pe_state;
2c0262af 390
eaa728ee
FB
391#if defined(DEBUG_MMU)
392 printf("CR0 update: CR0=0x%08x\n", new_cr0);
393#endif
394 if ((new_cr0 & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK)) !=
395 (env->cr[0] & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK))) {
396 tlb_flush(env, 1);
397 }
2c0262af 398
eaa728ee
FB
399#ifdef TARGET_X86_64
400 if (!(env->cr[0] & CR0_PG_MASK) && (new_cr0 & CR0_PG_MASK) &&
401 (env->efer & MSR_EFER_LME)) {
402 /* enter in long mode */
403 /* XXX: generate an exception */
404 if (!(env->cr[4] & CR4_PAE_MASK))
405 return;
406 env->efer |= MSR_EFER_LMA;
407 env->hflags |= HF_LMA_MASK;
408 } else if ((env->cr[0] & CR0_PG_MASK) && !(new_cr0 & CR0_PG_MASK) &&
409 (env->efer & MSR_EFER_LMA)) {
410 /* exit long mode */
411 env->efer &= ~MSR_EFER_LMA;
412 env->hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
413 env->eip &= 0xffffffff;
414 }
415#endif
416 env->cr[0] = new_cr0 | CR0_ET_MASK;
7e84c249 417
eaa728ee
FB
418 /* update PE flag in hidden flags */
419 pe_state = (env->cr[0] & CR0_PE_MASK);
420 env->hflags = (env->hflags & ~HF_PE_MASK) | (pe_state << HF_PE_SHIFT);
421 /* ensure that ADDSEG is always set in real mode */
422 env->hflags |= ((pe_state ^ 1) << HF_ADDSEG_SHIFT);
423 /* update FPU flags */
424 env->hflags = (env->hflags & ~(HF_MP_MASK | HF_EM_MASK | HF_TS_MASK)) |
425 ((new_cr0 << (HF_MP_SHIFT - 1)) & (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK));
7e84c249
FB
426}
427
eaa728ee
FB
428/* XXX: in legacy PAE mode, generate a GPF if reserved bits are set in
429 the PDPT */
430void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3)
7e84c249 431{
eaa728ee
FB
432 env->cr[3] = new_cr3;
433 if (env->cr[0] & CR0_PG_MASK) {
434#if defined(DEBUG_MMU)
435 printf("CR3 update: CR3=" TARGET_FMT_lx "\n", new_cr3);
436#endif
437 tlb_flush(env, 0);
438 }
7e84c249
FB
439}
440
eaa728ee 441void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4)
7e84c249 442{
eaa728ee
FB
443#if defined(DEBUG_MMU)
444 printf("CR4 update: CR4=%08x\n", (uint32_t)env->cr[4]);
445#endif
a9321a4d
PA
446 if ((new_cr4 ^ env->cr[4]) &
447 (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK |
448 CR4_SMEP_MASK | CR4_SMAP_MASK)) {
eaa728ee
FB
449 tlb_flush(env, 1);
450 }
451 /* SSE handling */
a9321a4d 452 if (!(env->cpuid_features & CPUID_SSE)) {
eaa728ee 453 new_cr4 &= ~CR4_OSFXSR_MASK;
a9321a4d
PA
454 }
455 env->hflags &= ~HF_OSFXSR_MASK;
456 if (new_cr4 & CR4_OSFXSR_MASK) {
eaa728ee 457 env->hflags |= HF_OSFXSR_MASK;
a9321a4d
PA
458 }
459
460 if (!(env->cpuid_7_0_ebx_features & CPUID_7_0_EBX_SMAP)) {
461 new_cr4 &= ~CR4_SMAP_MASK;
462 }
463 env->hflags &= ~HF_SMAP_MASK;
464 if (new_cr4 & CR4_SMAP_MASK) {
465 env->hflags |= HF_SMAP_MASK;
466 }
b8b6a50b 467
eaa728ee 468 env->cr[4] = new_cr4;
b8b6a50b
FB
469}
470
eaa728ee
FB
471#if defined(CONFIG_USER_ONLY)
472
473int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
97b348e7 474 int is_write, int mmu_idx)
b8b6a50b 475{
eaa728ee
FB
476 /* user mode only emulation */
477 is_write &= 1;
478 env->cr[2] = addr;
479 env->error_code = (is_write << PG_ERROR_W_BIT);
480 env->error_code |= PG_ERROR_U_MASK;
481 env->exception_index = EXCP0E_PAGE;
482 return 1;
2c0262af
FB
483}
484
8d7b0fbb 485#else
891b38e4 486
eaa728ee
FB
487/* XXX: This value should match the one returned by CPUID
488 * and in exec.c */
eaa728ee 489# if defined(TARGET_X86_64)
2c90d794 490# define PHYS_ADDR_MASK 0xfffffff000LL
eaa728ee 491# else
2c90d794 492# define PHYS_ADDR_MASK 0xffffff000LL
eaa728ee 493# endif
eaa728ee
FB
494
495/* return value:
496 -1 = cannot handle fault
497 0 = nothing more to do
498 1 = generate PF fault
eaa728ee
FB
499*/
500int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
97b348e7 501 int is_write1, int mmu_idx)
eaa728ee
FB
502{
503 uint64_t ptep, pte;
504 target_ulong pde_addr, pte_addr;
d4c430a8 505 int error_code, is_dirty, prot, page_size, is_write, is_user;
a8170e5e 506 hwaddr paddr;
eaa728ee
FB
507 uint32_t page_offset;
508 target_ulong vaddr, virt_addr;
509
510 is_user = mmu_idx == MMU_USER_IDX;
511#if defined(DEBUG_MMU)
512 printf("MMU fault: addr=" TARGET_FMT_lx " w=%d u=%d eip=" TARGET_FMT_lx "\n",
513 addr, is_write1, is_user, env->eip);
514#endif
515 is_write = is_write1 & 1;
516
517 if (!(env->cr[0] & CR0_PG_MASK)) {
518 pte = addr;
519 virt_addr = addr & TARGET_PAGE_MASK;
520 prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
521 page_size = 4096;
522 goto do_mapping;
523 }
524
525 if (env->cr[4] & CR4_PAE_MASK) {
526 uint64_t pde, pdpe;
527 target_ulong pdpe_addr;
2c0262af 528
eaa728ee
FB
529#ifdef TARGET_X86_64
530 if (env->hflags & HF_LMA_MASK) {
531 uint64_t pml4e_addr, pml4e;
532 int32_t sext;
533
534 /* test virtual address sign extension */
535 sext = (int64_t)addr >> 47;
536 if (sext != 0 && sext != -1) {
537 env->error_code = 0;
538 env->exception_index = EXCP0D_GPF;
539 return 1;
540 }
0573fbfc 541
eaa728ee
FB
542 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
543 env->a20_mask;
544 pml4e = ldq_phys(pml4e_addr);
545 if (!(pml4e & PG_PRESENT_MASK)) {
546 error_code = 0;
547 goto do_fault;
548 }
549 if (!(env->efer & MSR_EFER_NXE) && (pml4e & PG_NX_MASK)) {
550 error_code = PG_ERROR_RSVD_MASK;
551 goto do_fault;
552 }
553 if (!(pml4e & PG_ACCESSED_MASK)) {
554 pml4e |= PG_ACCESSED_MASK;
555 stl_phys_notdirty(pml4e_addr, pml4e);
556 }
557 ptep = pml4e ^ PG_NX_MASK;
558 pdpe_addr = ((pml4e & PHYS_ADDR_MASK) + (((addr >> 30) & 0x1ff) << 3)) &
559 env->a20_mask;
560 pdpe = ldq_phys(pdpe_addr);
561 if (!(pdpe & PG_PRESENT_MASK)) {
562 error_code = 0;
563 goto do_fault;
564 }
565 if (!(env->efer & MSR_EFER_NXE) && (pdpe & PG_NX_MASK)) {
566 error_code = PG_ERROR_RSVD_MASK;
567 goto do_fault;
568 }
569 ptep &= pdpe ^ PG_NX_MASK;
570 if (!(pdpe & PG_ACCESSED_MASK)) {
571 pdpe |= PG_ACCESSED_MASK;
572 stl_phys_notdirty(pdpe_addr, pdpe);
573 }
574 } else
575#endif
576 {
577 /* XXX: load them when cr3 is loaded ? */
578 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
579 env->a20_mask;
580 pdpe = ldq_phys(pdpe_addr);
581 if (!(pdpe & PG_PRESENT_MASK)) {
582 error_code = 0;
583 goto do_fault;
584 }
585 ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
7e84c249 586 }
7e84c249 587
eaa728ee
FB
588 pde_addr = ((pdpe & PHYS_ADDR_MASK) + (((addr >> 21) & 0x1ff) << 3)) &
589 env->a20_mask;
590 pde = ldq_phys(pde_addr);
591 if (!(pde & PG_PRESENT_MASK)) {
592 error_code = 0;
593 goto do_fault;
594 }
595 if (!(env->efer & MSR_EFER_NXE) && (pde & PG_NX_MASK)) {
596 error_code = PG_ERROR_RSVD_MASK;
597 goto do_fault;
598 }
599 ptep &= pde ^ PG_NX_MASK;
600 if (pde & PG_PSE_MASK) {
601 /* 2 MB page */
602 page_size = 2048 * 1024;
603 ptep ^= PG_NX_MASK;
a9321a4d 604 if ((ptep & PG_NX_MASK) && is_write1 == 2) {
eaa728ee 605 goto do_fault_protect;
a9321a4d
PA
606 }
607 switch (mmu_idx) {
608 case MMU_USER_IDX:
609 if (!(ptep & PG_USER_MASK)) {
eaa728ee 610 goto do_fault_protect;
a9321a4d
PA
611 }
612 if (is_write && !(ptep & PG_RW_MASK)) {
eaa728ee 613 goto do_fault_protect;
a9321a4d
PA
614 }
615 break;
616
617 case MMU_KERNEL_IDX:
618 if (is_write1 != 2 && (env->cr[4] & CR4_SMAP_MASK) &&
619 (ptep & PG_USER_MASK)) {
620 goto do_fault_protect;
621 }
622 /* fall through */
623 case MMU_KSMAP_IDX:
624 if (is_write1 == 2 && (env->cr[4] & CR4_SMEP_MASK) &&
625 (ptep & PG_USER_MASK)) {
626 goto do_fault_protect;
627 }
eaa728ee 628 if ((env->cr[0] & CR0_WP_MASK) &&
a9321a4d 629 is_write && !(ptep & PG_RW_MASK)) {
eaa728ee 630 goto do_fault_protect;
a9321a4d
PA
631 }
632 break;
633
634 default: /* cannot happen */
635 break;
eaa728ee
FB
636 }
637 is_dirty = is_write && !(pde & PG_DIRTY_MASK);
638 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
639 pde |= PG_ACCESSED_MASK;
640 if (is_dirty)
641 pde |= PG_DIRTY_MASK;
642 stl_phys_notdirty(pde_addr, pde);
643 }
644 /* align to page_size */
645 pte = pde & ((PHYS_ADDR_MASK & ~(page_size - 1)) | 0xfff);
646 virt_addr = addr & ~(page_size - 1);
647 } else {
648 /* 4 KB page */
649 if (!(pde & PG_ACCESSED_MASK)) {
650 pde |= PG_ACCESSED_MASK;
651 stl_phys_notdirty(pde_addr, pde);
652 }
653 pte_addr = ((pde & PHYS_ADDR_MASK) + (((addr >> 12) & 0x1ff) << 3)) &
654 env->a20_mask;
655 pte = ldq_phys(pte_addr);
656 if (!(pte & PG_PRESENT_MASK)) {
657 error_code = 0;
658 goto do_fault;
659 }
660 if (!(env->efer & MSR_EFER_NXE) && (pte & PG_NX_MASK)) {
661 error_code = PG_ERROR_RSVD_MASK;
662 goto do_fault;
663 }
664 /* combine pde and pte nx, user and rw protections */
665 ptep &= pte ^ PG_NX_MASK;
666 ptep ^= PG_NX_MASK;
667 if ((ptep & PG_NX_MASK) && is_write1 == 2)
668 goto do_fault_protect;
a9321a4d
PA
669 switch (mmu_idx) {
670 case MMU_USER_IDX:
671 if (!(ptep & PG_USER_MASK)) {
eaa728ee 672 goto do_fault_protect;
a9321a4d
PA
673 }
674 if (is_write && !(ptep & PG_RW_MASK)) {
eaa728ee 675 goto do_fault_protect;
a9321a4d
PA
676 }
677 break;
678
679 case MMU_KERNEL_IDX:
680 if (is_write1 != 2 && (env->cr[4] & CR4_SMAP_MASK) &&
681 (ptep & PG_USER_MASK)) {
682 goto do_fault_protect;
683 }
684 /* fall through */
685 case MMU_KSMAP_IDX:
686 if (is_write1 == 2 && (env->cr[4] & CR4_SMEP_MASK) &&
687 (ptep & PG_USER_MASK)) {
688 goto do_fault_protect;
689 }
eaa728ee 690 if ((env->cr[0] & CR0_WP_MASK) &&
a9321a4d 691 is_write && !(ptep & PG_RW_MASK)) {
eaa728ee 692 goto do_fault_protect;
a9321a4d
PA
693 }
694 break;
695
696 default: /* cannot happen */
697 break;
eaa728ee
FB
698 }
699 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
700 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
701 pte |= PG_ACCESSED_MASK;
702 if (is_dirty)
703 pte |= PG_DIRTY_MASK;
704 stl_phys_notdirty(pte_addr, pte);
705 }
706 page_size = 4096;
707 virt_addr = addr & ~0xfff;
708 pte = pte & (PHYS_ADDR_MASK | 0xfff);
7e84c249 709 }
2c0262af 710 } else {
eaa728ee
FB
711 uint32_t pde;
712
713 /* page directory entry */
714 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) &
715 env->a20_mask;
716 pde = ldl_phys(pde_addr);
717 if (!(pde & PG_PRESENT_MASK)) {
718 error_code = 0;
719 goto do_fault;
720 }
721 /* if PSE bit is set, then we use a 4MB page */
722 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
723 page_size = 4096 * 1024;
a9321a4d
PA
724 switch (mmu_idx) {
725 case MMU_USER_IDX:
726 if (!(pde & PG_USER_MASK)) {
eaa728ee 727 goto do_fault_protect;
a9321a4d
PA
728 }
729 if (is_write && !(pde & PG_RW_MASK)) {
eaa728ee 730 goto do_fault_protect;
a9321a4d
PA
731 }
732 break;
733
734 case MMU_KERNEL_IDX:
735 if (is_write1 != 2 && (env->cr[4] & CR4_SMAP_MASK) &&
736 (pde & PG_USER_MASK)) {
737 goto do_fault_protect;
738 }
739 /* fall through */
740 case MMU_KSMAP_IDX:
741 if (is_write1 == 2 && (env->cr[4] & CR4_SMEP_MASK) &&
742 (pde & PG_USER_MASK)) {
743 goto do_fault_protect;
744 }
eaa728ee 745 if ((env->cr[0] & CR0_WP_MASK) &&
a9321a4d 746 is_write && !(pde & PG_RW_MASK)) {
eaa728ee 747 goto do_fault_protect;
a9321a4d
PA
748 }
749 break;
750
751 default: /* cannot happen */
752 break;
eaa728ee
FB
753 }
754 is_dirty = is_write && !(pde & PG_DIRTY_MASK);
755 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
756 pde |= PG_ACCESSED_MASK;
757 if (is_dirty)
758 pde |= PG_DIRTY_MASK;
759 stl_phys_notdirty(pde_addr, pde);
760 }
2c0262af 761
eaa728ee
FB
762 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
763 ptep = pte;
764 virt_addr = addr & ~(page_size - 1);
765 } else {
766 if (!(pde & PG_ACCESSED_MASK)) {
767 pde |= PG_ACCESSED_MASK;
768 stl_phys_notdirty(pde_addr, pde);
769 }
891b38e4 770
eaa728ee
FB
771 /* page directory entry */
772 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) &
773 env->a20_mask;
774 pte = ldl_phys(pte_addr);
775 if (!(pte & PG_PRESENT_MASK)) {
776 error_code = 0;
777 goto do_fault;
8e682019 778 }
eaa728ee
FB
779 /* combine pde and pte user and rw protections */
780 ptep = pte & pde;
a9321a4d
PA
781 switch (mmu_idx) {
782 case MMU_USER_IDX:
783 if (!(ptep & PG_USER_MASK)) {
eaa728ee 784 goto do_fault_protect;
a9321a4d
PA
785 }
786 if (is_write && !(ptep & PG_RW_MASK)) {
eaa728ee 787 goto do_fault_protect;
a9321a4d
PA
788 }
789 break;
790
791 case MMU_KERNEL_IDX:
792 if (is_write1 != 2 && (env->cr[4] & CR4_SMAP_MASK) &&
793 (ptep & PG_USER_MASK)) {
794 goto do_fault_protect;
795 }
796 /* fall through */
797 case MMU_KSMAP_IDX:
798 if (is_write1 == 2 && (env->cr[4] & CR4_SMEP_MASK) &&
799 (ptep & PG_USER_MASK)) {
800 goto do_fault_protect;
801 }
eaa728ee 802 if ((env->cr[0] & CR0_WP_MASK) &&
a9321a4d 803 is_write && !(ptep & PG_RW_MASK)) {
eaa728ee 804 goto do_fault_protect;
a9321a4d
PA
805 }
806 break;
807
808 default: /* cannot happen */
809 break;
8e682019 810 }
eaa728ee
FB
811 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
812 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
813 pte |= PG_ACCESSED_MASK;
814 if (is_dirty)
815 pte |= PG_DIRTY_MASK;
816 stl_phys_notdirty(pte_addr, pte);
817 }
818 page_size = 4096;
819 virt_addr = addr & ~0xfff;
2c0262af
FB
820 }
821 }
eaa728ee
FB
822 /* the page can be put in the TLB */
823 prot = PAGE_READ;
824 if (!(ptep & PG_NX_MASK))
825 prot |= PAGE_EXEC;
826 if (pte & PG_DIRTY_MASK) {
827 /* only set write access if already dirty... otherwise wait
828 for dirty access */
829 if (is_user) {
830 if (ptep & PG_RW_MASK)
831 prot |= PAGE_WRITE;
832 } else {
833 if (!(env->cr[0] & CR0_WP_MASK) ||
834 (ptep & PG_RW_MASK))
835 prot |= PAGE_WRITE;
8e682019 836 }
891b38e4 837 }
eaa728ee
FB
838 do_mapping:
839 pte = pte & env->a20_mask;
840
841 /* Even if 4MB pages, we map only one 4KB page in the cache to
842 avoid filling it too fast */
843 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
844 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
845 vaddr = virt_addr + page_offset;
846
d4c430a8
PB
847 tlb_set_page(env, vaddr, paddr, prot, mmu_idx, page_size);
848 return 0;
eaa728ee
FB
849 do_fault_protect:
850 error_code = PG_ERROR_P_MASK;
851 do_fault:
852 error_code |= (is_write << PG_ERROR_W_BIT);
853 if (is_user)
854 error_code |= PG_ERROR_U_MASK;
855 if (is_write1 == 2 &&
a9321a4d
PA
856 (((env->efer & MSR_EFER_NXE) &&
857 (env->cr[4] & CR4_PAE_MASK)) ||
858 (env->cr[4] & CR4_SMEP_MASK)))
eaa728ee 859 error_code |= PG_ERROR_I_D_MASK;
872929aa
FB
860 if (env->intercept_exceptions & (1 << EXCP0E_PAGE)) {
861 /* cr2 is not modified in case of exceptions */
862 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
863 addr);
eaa728ee
FB
864 } else {
865 env->cr[2] = addr;
2c0262af 866 }
eaa728ee
FB
867 env->error_code = error_code;
868 env->exception_index = EXCP0E_PAGE;
eaa728ee 869 return 1;
14ce26e7
FB
870}
871
a8170e5e 872hwaddr cpu_get_phys_page_debug(CPUX86State *env, target_ulong addr)
14ce26e7 873{
eaa728ee
FB
874 target_ulong pde_addr, pte_addr;
875 uint64_t pte;
a8170e5e 876 hwaddr paddr;
eaa728ee
FB
877 uint32_t page_offset;
878 int page_size;
14ce26e7 879
eaa728ee
FB
880 if (env->cr[4] & CR4_PAE_MASK) {
881 target_ulong pdpe_addr;
882 uint64_t pde, pdpe;
14ce26e7 883
eaa728ee
FB
884#ifdef TARGET_X86_64
885 if (env->hflags & HF_LMA_MASK) {
886 uint64_t pml4e_addr, pml4e;
887 int32_t sext;
888
889 /* test virtual address sign extension */
890 sext = (int64_t)addr >> 47;
891 if (sext != 0 && sext != -1)
892 return -1;
893
894 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
895 env->a20_mask;
896 pml4e = ldq_phys(pml4e_addr);
897 if (!(pml4e & PG_PRESENT_MASK))
898 return -1;
899
3f2cbf0d
JK
900 pdpe_addr = ((pml4e & ~0xfff & ~(PG_NX_MASK | PG_HI_USER_MASK)) +
901 (((addr >> 30) & 0x1ff) << 3)) & env->a20_mask;
eaa728ee
FB
902 pdpe = ldq_phys(pdpe_addr);
903 if (!(pdpe & PG_PRESENT_MASK))
904 return -1;
905 } else
906#endif
907 {
908 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
909 env->a20_mask;
910 pdpe = ldq_phys(pdpe_addr);
911 if (!(pdpe & PG_PRESENT_MASK))
912 return -1;
14ce26e7 913 }
14ce26e7 914
3f2cbf0d
JK
915 pde_addr = ((pdpe & ~0xfff & ~(PG_NX_MASK | PG_HI_USER_MASK)) +
916 (((addr >> 21) & 0x1ff) << 3)) & env->a20_mask;
eaa728ee
FB
917 pde = ldq_phys(pde_addr);
918 if (!(pde & PG_PRESENT_MASK)) {
919 return -1;
920 }
921 if (pde & PG_PSE_MASK) {
922 /* 2 MB page */
923 page_size = 2048 * 1024;
924 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
925 } else {
926 /* 4 KB page */
3f2cbf0d
JK
927 pte_addr = ((pde & ~0xfff & ~(PG_NX_MASK | PG_HI_USER_MASK)) +
928 (((addr >> 12) & 0x1ff) << 3)) & env->a20_mask;
eaa728ee
FB
929 page_size = 4096;
930 pte = ldq_phys(pte_addr);
931 }
3f2cbf0d 932 pte &= ~(PG_NX_MASK | PG_HI_USER_MASK);
ca1c9e15
AL
933 if (!(pte & PG_PRESENT_MASK))
934 return -1;
14ce26e7 935 } else {
eaa728ee 936 uint32_t pde;
3b46e624 937
eaa728ee
FB
938 if (!(env->cr[0] & CR0_PG_MASK)) {
939 pte = addr;
940 page_size = 4096;
941 } else {
942 /* page directory entry */
943 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) & env->a20_mask;
944 pde = ldl_phys(pde_addr);
945 if (!(pde & PG_PRESENT_MASK))
946 return -1;
947 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
948 pte = pde & ~0x003ff000; /* align to 4MB */
949 page_size = 4096 * 1024;
950 } else {
951 /* page directory entry */
952 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & env->a20_mask;
953 pte = ldl_phys(pte_addr);
954 if (!(pte & PG_PRESENT_MASK))
955 return -1;
956 page_size = 4096;
957 }
958 }
959 pte = pte & env->a20_mask;
14ce26e7 960 }
14ce26e7 961
eaa728ee
FB
962 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
963 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
964 return paddr;
3b21e03e 965}
01df040b 966
317ac620 967void hw_breakpoint_insert(CPUX86State *env, int index)
01df040b 968{
1cc21a18 969 int type = 0, err = 0;
01df040b
AL
970
971 switch (hw_breakpoint_type(env->dr[7], index)) {
428065ce 972 case DR7_TYPE_BP_INST:
5902564a 973 if (hw_breakpoint_enabled(env->dr[7], index)) {
01df040b
AL
974 err = cpu_breakpoint_insert(env, env->dr[index], BP_CPU,
975 &env->cpu_breakpoint[index]);
5902564a 976 }
01df040b 977 break;
428065ce 978 case DR7_TYPE_DATA_WR:
01df040b 979 type = BP_CPU | BP_MEM_WRITE;
1cc21a18 980 break;
428065ce 981 case DR7_TYPE_IO_RW:
1cc21a18 982 /* No support for I/O watchpoints yet */
01df040b 983 break;
428065ce 984 case DR7_TYPE_DATA_RW:
01df040b 985 type = BP_CPU | BP_MEM_ACCESS;
1cc21a18 986 break;
987 }
988
989 if (type != 0) {
01df040b
AL
990 err = cpu_watchpoint_insert(env, env->dr[index],
991 hw_breakpoint_len(env->dr[7], index),
992 type, &env->cpu_watchpoint[index]);
01df040b 993 }
1cc21a18 994
995 if (err) {
01df040b 996 env->cpu_breakpoint[index] = NULL;
1cc21a18 997 }
01df040b
AL
998}
999
317ac620 1000void hw_breakpoint_remove(CPUX86State *env, int index)
01df040b
AL
1001{
1002 if (!env->cpu_breakpoint[index])
1003 return;
1004 switch (hw_breakpoint_type(env->dr[7], index)) {
428065ce 1005 case DR7_TYPE_BP_INST:
5902564a 1006 if (hw_breakpoint_enabled(env->dr[7], index)) {
01df040b 1007 cpu_breakpoint_remove_by_ref(env, env->cpu_breakpoint[index]);
5902564a 1008 }
01df040b 1009 break;
428065ce 1010 case DR7_TYPE_DATA_WR:
1011 case DR7_TYPE_DATA_RW:
01df040b
AL
1012 cpu_watchpoint_remove_by_ref(env, env->cpu_watchpoint[index]);
1013 break;
428065ce 1014 case DR7_TYPE_IO_RW:
01df040b
AL
1015 /* No support for I/O watchpoints yet */
1016 break;
1017 }
1018}
1019
e175bce5 1020bool check_hw_breakpoints(CPUX86State *env, bool force_dr6_update)
01df040b
AL
1021{
1022 target_ulong dr6;
e175bce5 1023 int reg;
1024 bool hit_enabled = false;
01df040b
AL
1025
1026 dr6 = env->dr[6] & ~0xf;
428065ce 1027 for (reg = 0; reg < DR7_MAX_BP; reg++) {
e175bce5 1028 bool bp_match = false;
1029 bool wp_match = false;
1030
1031 switch (hw_breakpoint_type(env->dr[7], reg)) {
1032 case DR7_TYPE_BP_INST:
1033 if (env->dr[reg] == env->eip) {
1034 bp_match = true;
1035 }
1036 break;
1037 case DR7_TYPE_DATA_WR:
1038 case DR7_TYPE_DATA_RW:
1039 if (env->cpu_watchpoint[reg] &&
1040 env->cpu_watchpoint[reg]->flags & BP_WATCHPOINT_HIT) {
1041 wp_match = true;
1042 }
1043 break;
1044 case DR7_TYPE_IO_RW:
1045 break;
1046 }
1047 if (bp_match || wp_match) {
01df040b 1048 dr6 |= 1 << reg;
5902564a 1049 if (hw_breakpoint_enabled(env->dr[7], reg)) {
e175bce5 1050 hit_enabled = true;
5902564a 1051 }
01df040b
AL
1052 }
1053 }
e175bce5 1054
1055 if (hit_enabled || force_dr6_update) {
01df040b 1056 env->dr[6] = dr6;
e175bce5 1057 }
1058
01df040b
AL
1059 return hit_enabled;
1060}
1061
d65e9815 1062void breakpoint_handler(CPUX86State *env)
01df040b
AL
1063{
1064 CPUBreakpoint *bp;
1065
1066 if (env->watchpoint_hit) {
1067 if (env->watchpoint_hit->flags & BP_CPU) {
1068 env->watchpoint_hit = NULL;
e175bce5 1069 if (check_hw_breakpoints(env, false)) {
77b2bc2c 1070 raise_exception(env, EXCP01_DB);
e175bce5 1071 } else {
01df040b 1072 cpu_resume_from_signal(env, NULL);
e175bce5 1073 }
01df040b
AL
1074 }
1075 } else {
72cf2d4f 1076 QTAILQ_FOREACH(bp, &env->breakpoints, entry)
01df040b
AL
1077 if (bp->pc == env->eip) {
1078 if (bp->flags & BP_CPU) {
e175bce5 1079 check_hw_breakpoints(env, true);
77b2bc2c 1080 raise_exception(env, EXCP01_DB);
01df040b
AL
1081 }
1082 break;
1083 }
1084 }
01df040b 1085}
79c4f6b0 1086
d5bfda33
JK
1087typedef struct MCEInjectionParams {
1088 Monitor *mon;
55e5c285 1089 X86CPU *cpu;
d5bfda33
JK
1090 int bank;
1091 uint64_t status;
1092 uint64_t mcg_status;
1093 uint64_t addr;
1094 uint64_t misc;
1095 int flags;
1096} MCEInjectionParams;
1097
1098static void do_inject_x86_mce(void *data)
79c4f6b0 1099{
d5bfda33 1100 MCEInjectionParams *params = data;
55e5c285
AF
1101 CPUX86State *cenv = &params->cpu->env;
1102 CPUState *cpu = CPU(params->cpu);
d5bfda33
JK
1103 uint64_t *banks = cenv->mce_banks + 4 * params->bank;
1104
1105 cpu_synchronize_state(cenv);
316378e4 1106
747461c7
JK
1107 /*
1108 * If there is an MCE exception being processed, ignore this SRAO MCE
1109 * unless unconditional injection was requested.
1110 */
d5bfda33
JK
1111 if (!(params->flags & MCE_INJECT_UNCOND_AO)
1112 && !(params->status & MCI_STATUS_AR)
747461c7
JK
1113 && (cenv->mcg_status & MCG_STATUS_MCIP)) {
1114 return;
1115 }
d5bfda33
JK
1116
1117 if (params->status & MCI_STATUS_UC) {
316378e4
JK
1118 /*
1119 * if MSR_MCG_CTL is not all 1s, the uncorrected error
1120 * reporting is disabled
1121 */
d5bfda33
JK
1122 if ((cenv->mcg_cap & MCG_CTL_P) && cenv->mcg_ctl != ~(uint64_t)0) {
1123 monitor_printf(params->mon,
316378e4 1124 "CPU %d: Uncorrected error reporting disabled\n",
55e5c285 1125 cpu->cpu_index);
316378e4
JK
1126 return;
1127 }
1128
1129 /*
1130 * if MSR_MCi_CTL is not all 1s, the uncorrected error
1131 * reporting is disabled for the bank
1132 */
1133 if (banks[0] != ~(uint64_t)0) {
d5bfda33
JK
1134 monitor_printf(params->mon,
1135 "CPU %d: Uncorrected error reporting disabled for"
1136 " bank %d\n",
55e5c285 1137 cpu->cpu_index, params->bank);
316378e4
JK
1138 return;
1139 }
1140
79c4f6b0
HY
1141 if ((cenv->mcg_status & MCG_STATUS_MCIP) ||
1142 !(cenv->cr[4] & CR4_MCE_MASK)) {
d5bfda33
JK
1143 monitor_printf(params->mon,
1144 "CPU %d: Previous MCE still in progress, raising"
1145 " triple fault\n",
55e5c285 1146 cpu->cpu_index);
79c4f6b0
HY
1147 qemu_log_mask(CPU_LOG_RESET, "Triple fault\n");
1148 qemu_system_reset_request();
1149 return;
1150 }
2fa11da0 1151 if (banks[1] & MCI_STATUS_VAL) {
d5bfda33 1152 params->status |= MCI_STATUS_OVER;
2fa11da0 1153 }
d5bfda33
JK
1154 banks[2] = params->addr;
1155 banks[3] = params->misc;
1156 cenv->mcg_status = params->mcg_status;
1157 banks[1] = params->status;
79c4f6b0
HY
1158 cpu_interrupt(cenv, CPU_INTERRUPT_MCE);
1159 } else if (!(banks[1] & MCI_STATUS_VAL)
1160 || !(banks[1] & MCI_STATUS_UC)) {
2fa11da0 1161 if (banks[1] & MCI_STATUS_VAL) {
d5bfda33 1162 params->status |= MCI_STATUS_OVER;
2fa11da0 1163 }
d5bfda33
JK
1164 banks[2] = params->addr;
1165 banks[3] = params->misc;
1166 banks[1] = params->status;
2fa11da0 1167 } else {
79c4f6b0 1168 banks[1] |= MCI_STATUS_OVER;
2fa11da0 1169 }
79c4f6b0 1170}
b3cd24e0 1171
8c5cf3b6 1172void cpu_x86_inject_mce(Monitor *mon, X86CPU *cpu, int bank,
316378e4 1173 uint64_t status, uint64_t mcg_status, uint64_t addr,
747461c7 1174 uint64_t misc, int flags)
b3cd24e0 1175{
8c5cf3b6 1176 CPUX86State *cenv = &cpu->env;
d5bfda33
JK
1177 MCEInjectionParams params = {
1178 .mon = mon,
55e5c285 1179 .cpu = cpu,
d5bfda33
JK
1180 .bank = bank,
1181 .status = status,
1182 .mcg_status = mcg_status,
1183 .addr = addr,
1184 .misc = misc,
1185 .flags = flags,
1186 };
b3cd24e0 1187 unsigned bank_num = cenv->mcg_cap & 0xff;
317ac620 1188 CPUX86State *env;
b3cd24e0 1189
316378e4
JK
1190 if (!cenv->mcg_cap) {
1191 monitor_printf(mon, "MCE injection not supported\n");
b3cd24e0
JD
1192 return;
1193 }
316378e4
JK
1194 if (bank >= bank_num) {
1195 monitor_printf(mon, "Invalid MCE bank number\n");
1196 return;
1197 }
1198 if (!(status & MCI_STATUS_VAL)) {
1199 monitor_printf(mon, "Invalid MCE status code\n");
1200 return;
1201 }
747461c7
JK
1202 if ((flags & MCE_INJECT_BROADCAST)
1203 && !cpu_x86_support_mca_broadcast(cenv)) {
316378e4
JK
1204 monitor_printf(mon, "Guest CPU does not support MCA broadcast\n");
1205 return;
2bd3e04c
JD
1206 }
1207
f100f0b3 1208 run_on_cpu(CPU(cpu), do_inject_x86_mce, &params);
c34d440a
JK
1209 if (flags & MCE_INJECT_BROADCAST) {
1210 params.bank = 1;
1211 params.status = MCI_STATUS_VAL | MCI_STATUS_UC;
1212 params.mcg_status = MCG_STATUS_MCIP | MCG_STATUS_RIPV;
1213 params.addr = 0;
1214 params.misc = 0;
1215 for (env = first_cpu; env != NULL; env = env->next_cpu) {
1216 if (cenv == env) {
1217 continue;
31ce5e0c 1218 }
55e5c285 1219 params.cpu = x86_env_get_cpu(env);
f100f0b3 1220 run_on_cpu(CPU(cpu), do_inject_x86_mce, &params);
31ce5e0c 1221 }
b3cd24e0
JD
1222 }
1223}
d362e757 1224
317ac620 1225void cpu_report_tpr_access(CPUX86State *env, TPRAccess access)
d362e757 1226{
d362e757
JK
1227 if (kvm_enabled()) {
1228 env->tpr_access_type = access;
1229
1230 cpu_interrupt(env, CPU_INTERRUPT_TPR);
1231 } else {
a8a826a3 1232 cpu_restore_state(env, env->mem_io_pc);
d362e757
JK
1233
1234 apic_handle_tpr_access_report(env->apic_state, env->eip, access);
1235 }
1236}
74ce674f 1237#endif /* !CONFIG_USER_ONLY */
6fd805e1 1238
84273177
JK
1239int cpu_x86_get_descr_debug(CPUX86State *env, unsigned int selector,
1240 target_ulong *base, unsigned int *limit,
1241 unsigned int *flags)
1242{
1243 SegmentCache *dt;
1244 target_ulong ptr;
1245 uint32_t e1, e2;
1246 int index;
1247
1248 if (selector & 0x4)
1249 dt = &env->ldt;
1250 else
1251 dt = &env->gdt;
1252 index = selector & ~7;
1253 ptr = dt->base + index;
1254 if ((index + 7) > dt->limit
1255 || cpu_memory_rw_debug(env, ptr, (uint8_t *)&e1, sizeof(e1), 0) != 0
1256 || cpu_memory_rw_debug(env, ptr+4, (uint8_t *)&e2, sizeof(e2), 0) != 0)
1257 return 0;
1258
1259 *base = ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
1260 *limit = (e1 & 0xffff) | (e2 & 0x000f0000);
1261 if (e2 & DESC_G_MASK)
1262 *limit = (*limit << 12) | 0xfff;
1263 *flags = e2;
1264
1265 return 1;
1266}
1267
b47ed996 1268X86CPU *cpu_x86_init(const char *cpu_model)
01df040b 1269{
5fd2087a 1270 X86CPU *cpu;
01df040b 1271 CPUX86State *env;
ff287bbd 1272 Error *error = NULL;
01df040b 1273
5fd2087a
AF
1274 cpu = X86_CPU(object_new(TYPE_X86_CPU));
1275 env = &cpu->env;
01df040b
AL
1276 env->cpu_model_str = cpu_model;
1277
61dcd775 1278 if (cpu_x86_register(cpu, cpu_model) < 0) {
5fd2087a 1279 object_delete(OBJECT(cpu));
01df040b
AL
1280 return NULL;
1281 }
0bf46a40 1282
ff287bbd
IM
1283 x86_cpu_realize(OBJECT(cpu), &error);
1284 if (error) {
1285 error_free(error);
1286 object_delete(OBJECT(cpu));
1287 return NULL;
1288 }
b47ed996 1289 return cpu;
01df040b 1290}
b09ea7d5
GN
1291
1292#if !defined(CONFIG_USER_ONLY)
232fc23b 1293void do_cpu_init(X86CPU *cpu)
b09ea7d5 1294{
232fc23b 1295 CPUX86State *env = &cpu->env;
b09ea7d5 1296 int sipi = env->interrupt_request & CPU_INTERRUPT_SIPI;
ebda377f
JK
1297 uint64_t pat = env->pat;
1298
232fc23b 1299 cpu_reset(CPU(cpu));
b09ea7d5 1300 env->interrupt_request = sipi;
ebda377f 1301 env->pat = pat;
4a942cea 1302 apic_init_reset(env->apic_state);
b09ea7d5
GN
1303}
1304
232fc23b 1305void do_cpu_sipi(X86CPU *cpu)
b09ea7d5 1306{
232fc23b
AF
1307 CPUX86State *env = &cpu->env;
1308
4a942cea 1309 apic_sipi(env->apic_state);
b09ea7d5
GN
1310}
1311#else
232fc23b 1312void do_cpu_init(X86CPU *cpu)
b09ea7d5
GN
1313{
1314}
232fc23b 1315void do_cpu_sipi(X86CPU *cpu)
b09ea7d5
GN
1316{
1317}
1318#endif