]> git.proxmox.com Git - mirror_qemu.git/blob - target-i386/helper.c
cpu: Turn cpu_dump_{state,statistics}() into CPUState hooks
[mirror_qemu.git] / target-i386 / helper.c
1 /*
2 * i386 helpers (without register variable usage)
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include "cpu.h"
21 #include "sysemu/kvm.h"
22 #ifndef CONFIG_USER_ONLY
23 #include "sysemu/sysemu.h"
24 #include "monitor/monitor.h"
25 #endif
26
27 //#define DEBUG_MMU
28
29 static void cpu_x86_version(CPUX86State *env, int *family, int *model)
30 {
31 int cpuver = env->cpuid_version;
32
33 if (family == NULL || model == NULL) {
34 return;
35 }
36
37 *family = (cpuver >> 8) & 0x0f;
38 *model = ((cpuver >> 12) & 0xf0) + ((cpuver >> 4) & 0x0f);
39 }
40
41 /* Broadcast MCA signal for processor version 06H_EH and above */
42 int cpu_x86_support_mca_broadcast(CPUX86State *env)
43 {
44 int family = 0;
45 int model = 0;
46
47 cpu_x86_version(env, &family, &model);
48 if ((family == 6 && model >= 14) || family > 6) {
49 return 1;
50 }
51
52 return 0;
53 }
54
55 /***********************************************************/
56 /* x86 debug */
57
58 static const char *cc_op_str[CC_OP_NB] = {
59 "DYNAMIC",
60 "EFLAGS",
61
62 "MULB",
63 "MULW",
64 "MULL",
65 "MULQ",
66
67 "ADDB",
68 "ADDW",
69 "ADDL",
70 "ADDQ",
71
72 "ADCB",
73 "ADCW",
74 "ADCL",
75 "ADCQ",
76
77 "SUBB",
78 "SUBW",
79 "SUBL",
80 "SUBQ",
81
82 "SBBB",
83 "SBBW",
84 "SBBL",
85 "SBBQ",
86
87 "LOGICB",
88 "LOGICW",
89 "LOGICL",
90 "LOGICQ",
91
92 "INCB",
93 "INCW",
94 "INCL",
95 "INCQ",
96
97 "DECB",
98 "DECW",
99 "DECL",
100 "DECQ",
101
102 "SHLB",
103 "SHLW",
104 "SHLL",
105 "SHLQ",
106
107 "SARB",
108 "SARW",
109 "SARL",
110 "SARQ",
111
112 "BMILGB",
113 "BMILGW",
114 "BMILGL",
115 "BMILGQ",
116
117 "ADCX",
118 "ADOX",
119 "ADCOX",
120
121 "CLR",
122 };
123
124 static void
125 cpu_x86_dump_seg_cache(CPUX86State *env, FILE *f, fprintf_function cpu_fprintf,
126 const char *name, struct SegmentCache *sc)
127 {
128 #ifdef TARGET_X86_64
129 if (env->hflags & HF_CS64_MASK) {
130 cpu_fprintf(f, "%-3s=%04x %016" PRIx64 " %08x %08x", name,
131 sc->selector, sc->base, sc->limit, sc->flags & 0x00ffff00);
132 } else
133 #endif
134 {
135 cpu_fprintf(f, "%-3s=%04x %08x %08x %08x", name, sc->selector,
136 (uint32_t)sc->base, sc->limit, sc->flags & 0x00ffff00);
137 }
138
139 if (!(env->hflags & HF_PE_MASK) || !(sc->flags & DESC_P_MASK))
140 goto done;
141
142 cpu_fprintf(f, " DPL=%d ", (sc->flags & DESC_DPL_MASK) >> DESC_DPL_SHIFT);
143 if (sc->flags & DESC_S_MASK) {
144 if (sc->flags & DESC_CS_MASK) {
145 cpu_fprintf(f, (sc->flags & DESC_L_MASK) ? "CS64" :
146 ((sc->flags & DESC_B_MASK) ? "CS32" : "CS16"));
147 cpu_fprintf(f, " [%c%c", (sc->flags & DESC_C_MASK) ? 'C' : '-',
148 (sc->flags & DESC_R_MASK) ? 'R' : '-');
149 } else {
150 cpu_fprintf(f, (sc->flags & DESC_B_MASK) ? "DS " : "DS16");
151 cpu_fprintf(f, " [%c%c", (sc->flags & DESC_E_MASK) ? 'E' : '-',
152 (sc->flags & DESC_W_MASK) ? 'W' : '-');
153 }
154 cpu_fprintf(f, "%c]", (sc->flags & DESC_A_MASK) ? 'A' : '-');
155 } else {
156 static const char *sys_type_name[2][16] = {
157 { /* 32 bit mode */
158 "Reserved", "TSS16-avl", "LDT", "TSS16-busy",
159 "CallGate16", "TaskGate", "IntGate16", "TrapGate16",
160 "Reserved", "TSS32-avl", "Reserved", "TSS32-busy",
161 "CallGate32", "Reserved", "IntGate32", "TrapGate32"
162 },
163 { /* 64 bit mode */
164 "<hiword>", "Reserved", "LDT", "Reserved", "Reserved",
165 "Reserved", "Reserved", "Reserved", "Reserved",
166 "TSS64-avl", "Reserved", "TSS64-busy", "CallGate64",
167 "Reserved", "IntGate64", "TrapGate64"
168 }
169 };
170 cpu_fprintf(f, "%s",
171 sys_type_name[(env->hflags & HF_LMA_MASK) ? 1 : 0]
172 [(sc->flags & DESC_TYPE_MASK)
173 >> DESC_TYPE_SHIFT]);
174 }
175 done:
176 cpu_fprintf(f, "\n");
177 }
178
179 #define DUMP_CODE_BYTES_TOTAL 50
180 #define DUMP_CODE_BYTES_BACKWARD 20
181
182 void x86_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
183 int flags)
184 {
185 X86CPU *cpu = X86_CPU(cs);
186 CPUX86State *env = &cpu->env;
187 int eflags, i, nb;
188 char cc_op_name[32];
189 static const char *seg_name[6] = { "ES", "CS", "SS", "DS", "FS", "GS" };
190
191 cpu_synchronize_state(cs);
192
193 eflags = cpu_compute_eflags(env);
194 #ifdef TARGET_X86_64
195 if (env->hflags & HF_CS64_MASK) {
196 cpu_fprintf(f,
197 "RAX=%016" PRIx64 " RBX=%016" PRIx64 " RCX=%016" PRIx64 " RDX=%016" PRIx64 "\n"
198 "RSI=%016" PRIx64 " RDI=%016" PRIx64 " RBP=%016" PRIx64 " RSP=%016" PRIx64 "\n"
199 "R8 =%016" PRIx64 " R9 =%016" PRIx64 " R10=%016" PRIx64 " R11=%016" PRIx64 "\n"
200 "R12=%016" PRIx64 " R13=%016" PRIx64 " R14=%016" PRIx64 " R15=%016" PRIx64 "\n"
201 "RIP=%016" PRIx64 " RFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
202 env->regs[R_EAX],
203 env->regs[R_EBX],
204 env->regs[R_ECX],
205 env->regs[R_EDX],
206 env->regs[R_ESI],
207 env->regs[R_EDI],
208 env->regs[R_EBP],
209 env->regs[R_ESP],
210 env->regs[8],
211 env->regs[9],
212 env->regs[10],
213 env->regs[11],
214 env->regs[12],
215 env->regs[13],
216 env->regs[14],
217 env->regs[15],
218 env->eip, eflags,
219 eflags & DF_MASK ? 'D' : '-',
220 eflags & CC_O ? 'O' : '-',
221 eflags & CC_S ? 'S' : '-',
222 eflags & CC_Z ? 'Z' : '-',
223 eflags & CC_A ? 'A' : '-',
224 eflags & CC_P ? 'P' : '-',
225 eflags & CC_C ? 'C' : '-',
226 env->hflags & HF_CPL_MASK,
227 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
228 (env->a20_mask >> 20) & 1,
229 (env->hflags >> HF_SMM_SHIFT) & 1,
230 cs->halted);
231 } else
232 #endif
233 {
234 cpu_fprintf(f, "EAX=%08x EBX=%08x ECX=%08x EDX=%08x\n"
235 "ESI=%08x EDI=%08x EBP=%08x ESP=%08x\n"
236 "EIP=%08x EFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
237 (uint32_t)env->regs[R_EAX],
238 (uint32_t)env->regs[R_EBX],
239 (uint32_t)env->regs[R_ECX],
240 (uint32_t)env->regs[R_EDX],
241 (uint32_t)env->regs[R_ESI],
242 (uint32_t)env->regs[R_EDI],
243 (uint32_t)env->regs[R_EBP],
244 (uint32_t)env->regs[R_ESP],
245 (uint32_t)env->eip, eflags,
246 eflags & DF_MASK ? 'D' : '-',
247 eflags & CC_O ? 'O' : '-',
248 eflags & CC_S ? 'S' : '-',
249 eflags & CC_Z ? 'Z' : '-',
250 eflags & CC_A ? 'A' : '-',
251 eflags & CC_P ? 'P' : '-',
252 eflags & CC_C ? 'C' : '-',
253 env->hflags & HF_CPL_MASK,
254 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
255 (env->a20_mask >> 20) & 1,
256 (env->hflags >> HF_SMM_SHIFT) & 1,
257 cs->halted);
258 }
259
260 for(i = 0; i < 6; i++) {
261 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, seg_name[i],
262 &env->segs[i]);
263 }
264 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, "LDT", &env->ldt);
265 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, "TR", &env->tr);
266
267 #ifdef TARGET_X86_64
268 if (env->hflags & HF_LMA_MASK) {
269 cpu_fprintf(f, "GDT= %016" PRIx64 " %08x\n",
270 env->gdt.base, env->gdt.limit);
271 cpu_fprintf(f, "IDT= %016" PRIx64 " %08x\n",
272 env->idt.base, env->idt.limit);
273 cpu_fprintf(f, "CR0=%08x CR2=%016" PRIx64 " CR3=%016" PRIx64 " CR4=%08x\n",
274 (uint32_t)env->cr[0],
275 env->cr[2],
276 env->cr[3],
277 (uint32_t)env->cr[4]);
278 for(i = 0; i < 4; i++)
279 cpu_fprintf(f, "DR%d=%016" PRIx64 " ", i, env->dr[i]);
280 cpu_fprintf(f, "\nDR6=%016" PRIx64 " DR7=%016" PRIx64 "\n",
281 env->dr[6], env->dr[7]);
282 } else
283 #endif
284 {
285 cpu_fprintf(f, "GDT= %08x %08x\n",
286 (uint32_t)env->gdt.base, env->gdt.limit);
287 cpu_fprintf(f, "IDT= %08x %08x\n",
288 (uint32_t)env->idt.base, env->idt.limit);
289 cpu_fprintf(f, "CR0=%08x CR2=%08x CR3=%08x CR4=%08x\n",
290 (uint32_t)env->cr[0],
291 (uint32_t)env->cr[2],
292 (uint32_t)env->cr[3],
293 (uint32_t)env->cr[4]);
294 for(i = 0; i < 4; i++) {
295 cpu_fprintf(f, "DR%d=" TARGET_FMT_lx " ", i, env->dr[i]);
296 }
297 cpu_fprintf(f, "\nDR6=" TARGET_FMT_lx " DR7=" TARGET_FMT_lx "\n",
298 env->dr[6], env->dr[7]);
299 }
300 if (flags & CPU_DUMP_CCOP) {
301 if ((unsigned)env->cc_op < CC_OP_NB)
302 snprintf(cc_op_name, sizeof(cc_op_name), "%s", cc_op_str[env->cc_op]);
303 else
304 snprintf(cc_op_name, sizeof(cc_op_name), "[%d]", env->cc_op);
305 #ifdef TARGET_X86_64
306 if (env->hflags & HF_CS64_MASK) {
307 cpu_fprintf(f, "CCS=%016" PRIx64 " CCD=%016" PRIx64 " CCO=%-8s\n",
308 env->cc_src, env->cc_dst,
309 cc_op_name);
310 } else
311 #endif
312 {
313 cpu_fprintf(f, "CCS=%08x CCD=%08x CCO=%-8s\n",
314 (uint32_t)env->cc_src, (uint32_t)env->cc_dst,
315 cc_op_name);
316 }
317 }
318 cpu_fprintf(f, "EFER=%016" PRIx64 "\n", env->efer);
319 if (flags & CPU_DUMP_FPU) {
320 int fptag;
321 fptag = 0;
322 for(i = 0; i < 8; i++) {
323 fptag |= ((!env->fptags[i]) << i);
324 }
325 cpu_fprintf(f, "FCW=%04x FSW=%04x [ST=%d] FTW=%02x MXCSR=%08x\n",
326 env->fpuc,
327 (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11,
328 env->fpstt,
329 fptag,
330 env->mxcsr);
331 for(i=0;i<8;i++) {
332 CPU_LDoubleU u;
333 u.d = env->fpregs[i].d;
334 cpu_fprintf(f, "FPR%d=%016" PRIx64 " %04x",
335 i, u.l.lower, u.l.upper);
336 if ((i & 1) == 1)
337 cpu_fprintf(f, "\n");
338 else
339 cpu_fprintf(f, " ");
340 }
341 if (env->hflags & HF_CS64_MASK)
342 nb = 16;
343 else
344 nb = 8;
345 for(i=0;i<nb;i++) {
346 cpu_fprintf(f, "XMM%02d=%08x%08x%08x%08x",
347 i,
348 env->xmm_regs[i].XMM_L(3),
349 env->xmm_regs[i].XMM_L(2),
350 env->xmm_regs[i].XMM_L(1),
351 env->xmm_regs[i].XMM_L(0));
352 if ((i & 1) == 1)
353 cpu_fprintf(f, "\n");
354 else
355 cpu_fprintf(f, " ");
356 }
357 }
358 if (flags & CPU_DUMP_CODE) {
359 target_ulong base = env->segs[R_CS].base + env->eip;
360 target_ulong offs = MIN(env->eip, DUMP_CODE_BYTES_BACKWARD);
361 uint8_t code;
362 char codestr[3];
363
364 cpu_fprintf(f, "Code=");
365 for (i = 0; i < DUMP_CODE_BYTES_TOTAL; i++) {
366 if (cpu_memory_rw_debug(env, base - offs + i, &code, 1, 0) == 0) {
367 snprintf(codestr, sizeof(codestr), "%02x", code);
368 } else {
369 snprintf(codestr, sizeof(codestr), "??");
370 }
371 cpu_fprintf(f, "%s%s%s%s", i > 0 ? " " : "",
372 i == offs ? "<" : "", codestr, i == offs ? ">" : "");
373 }
374 cpu_fprintf(f, "\n");
375 }
376 }
377
378 /***********************************************************/
379 /* x86 mmu */
380 /* XXX: add PGE support */
381
382 void x86_cpu_set_a20(X86CPU *cpu, int a20_state)
383 {
384 CPUX86State *env = &cpu->env;
385
386 a20_state = (a20_state != 0);
387 if (a20_state != ((env->a20_mask >> 20) & 1)) {
388 #if defined(DEBUG_MMU)
389 printf("A20 update: a20=%d\n", a20_state);
390 #endif
391 /* if the cpu is currently executing code, we must unlink it and
392 all the potentially executing TB */
393 cpu_interrupt(CPU(cpu), CPU_INTERRUPT_EXITTB);
394
395 /* when a20 is changed, all the MMU mappings are invalid, so
396 we must flush everything */
397 tlb_flush(env, 1);
398 env->a20_mask = ~(1 << 20) | (a20_state << 20);
399 }
400 }
401
402 void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0)
403 {
404 int pe_state;
405
406 #if defined(DEBUG_MMU)
407 printf("CR0 update: CR0=0x%08x\n", new_cr0);
408 #endif
409 if ((new_cr0 & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK)) !=
410 (env->cr[0] & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK))) {
411 tlb_flush(env, 1);
412 }
413
414 #ifdef TARGET_X86_64
415 if (!(env->cr[0] & CR0_PG_MASK) && (new_cr0 & CR0_PG_MASK) &&
416 (env->efer & MSR_EFER_LME)) {
417 /* enter in long mode */
418 /* XXX: generate an exception */
419 if (!(env->cr[4] & CR4_PAE_MASK))
420 return;
421 env->efer |= MSR_EFER_LMA;
422 env->hflags |= HF_LMA_MASK;
423 } else if ((env->cr[0] & CR0_PG_MASK) && !(new_cr0 & CR0_PG_MASK) &&
424 (env->efer & MSR_EFER_LMA)) {
425 /* exit long mode */
426 env->efer &= ~MSR_EFER_LMA;
427 env->hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
428 env->eip &= 0xffffffff;
429 }
430 #endif
431 env->cr[0] = new_cr0 | CR0_ET_MASK;
432
433 /* update PE flag in hidden flags */
434 pe_state = (env->cr[0] & CR0_PE_MASK);
435 env->hflags = (env->hflags & ~HF_PE_MASK) | (pe_state << HF_PE_SHIFT);
436 /* ensure that ADDSEG is always set in real mode */
437 env->hflags |= ((pe_state ^ 1) << HF_ADDSEG_SHIFT);
438 /* update FPU flags */
439 env->hflags = (env->hflags & ~(HF_MP_MASK | HF_EM_MASK | HF_TS_MASK)) |
440 ((new_cr0 << (HF_MP_SHIFT - 1)) & (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK));
441 }
442
443 /* XXX: in legacy PAE mode, generate a GPF if reserved bits are set in
444 the PDPT */
445 void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3)
446 {
447 env->cr[3] = new_cr3;
448 if (env->cr[0] & CR0_PG_MASK) {
449 #if defined(DEBUG_MMU)
450 printf("CR3 update: CR3=" TARGET_FMT_lx "\n", new_cr3);
451 #endif
452 tlb_flush(env, 0);
453 }
454 }
455
456 void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4)
457 {
458 #if defined(DEBUG_MMU)
459 printf("CR4 update: CR4=%08x\n", (uint32_t)env->cr[4]);
460 #endif
461 if ((new_cr4 ^ env->cr[4]) &
462 (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK |
463 CR4_SMEP_MASK | CR4_SMAP_MASK)) {
464 tlb_flush(env, 1);
465 }
466 /* SSE handling */
467 if (!(env->features[FEAT_1_EDX] & CPUID_SSE)) {
468 new_cr4 &= ~CR4_OSFXSR_MASK;
469 }
470 env->hflags &= ~HF_OSFXSR_MASK;
471 if (new_cr4 & CR4_OSFXSR_MASK) {
472 env->hflags |= HF_OSFXSR_MASK;
473 }
474
475 if (!(env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_SMAP)) {
476 new_cr4 &= ~CR4_SMAP_MASK;
477 }
478 env->hflags &= ~HF_SMAP_MASK;
479 if (new_cr4 & CR4_SMAP_MASK) {
480 env->hflags |= HF_SMAP_MASK;
481 }
482
483 env->cr[4] = new_cr4;
484 }
485
486 #if defined(CONFIG_USER_ONLY)
487
488 int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
489 int is_write, int mmu_idx)
490 {
491 /* user mode only emulation */
492 is_write &= 1;
493 env->cr[2] = addr;
494 env->error_code = (is_write << PG_ERROR_W_BIT);
495 env->error_code |= PG_ERROR_U_MASK;
496 env->exception_index = EXCP0E_PAGE;
497 return 1;
498 }
499
500 #else
501
502 /* XXX: This value should match the one returned by CPUID
503 * and in exec.c */
504 # if defined(TARGET_X86_64)
505 # define PHYS_ADDR_MASK 0xfffffff000LL
506 # else
507 # define PHYS_ADDR_MASK 0xffffff000LL
508 # endif
509
510 /* return value:
511 -1 = cannot handle fault
512 0 = nothing more to do
513 1 = generate PF fault
514 */
515 int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
516 int is_write1, int mmu_idx)
517 {
518 uint64_t ptep, pte;
519 target_ulong pde_addr, pte_addr;
520 int error_code, is_dirty, prot, page_size, is_write, is_user;
521 hwaddr paddr;
522 uint32_t page_offset;
523 target_ulong vaddr, virt_addr;
524
525 is_user = mmu_idx == MMU_USER_IDX;
526 #if defined(DEBUG_MMU)
527 printf("MMU fault: addr=" TARGET_FMT_lx " w=%d u=%d eip=" TARGET_FMT_lx "\n",
528 addr, is_write1, is_user, env->eip);
529 #endif
530 is_write = is_write1 & 1;
531
532 if (!(env->cr[0] & CR0_PG_MASK)) {
533 pte = addr;
534 virt_addr = addr & TARGET_PAGE_MASK;
535 prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
536 page_size = 4096;
537 goto do_mapping;
538 }
539
540 if (env->cr[4] & CR4_PAE_MASK) {
541 uint64_t pde, pdpe;
542 target_ulong pdpe_addr;
543
544 #ifdef TARGET_X86_64
545 if (env->hflags & HF_LMA_MASK) {
546 uint64_t pml4e_addr, pml4e;
547 int32_t sext;
548
549 /* test virtual address sign extension */
550 sext = (int64_t)addr >> 47;
551 if (sext != 0 && sext != -1) {
552 env->error_code = 0;
553 env->exception_index = EXCP0D_GPF;
554 return 1;
555 }
556
557 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
558 env->a20_mask;
559 pml4e = ldq_phys(pml4e_addr);
560 if (!(pml4e & PG_PRESENT_MASK)) {
561 error_code = 0;
562 goto do_fault;
563 }
564 if (!(env->efer & MSR_EFER_NXE) && (pml4e & PG_NX_MASK)) {
565 error_code = PG_ERROR_RSVD_MASK;
566 goto do_fault;
567 }
568 if (!(pml4e & PG_ACCESSED_MASK)) {
569 pml4e |= PG_ACCESSED_MASK;
570 stl_phys_notdirty(pml4e_addr, pml4e);
571 }
572 ptep = pml4e ^ PG_NX_MASK;
573 pdpe_addr = ((pml4e & PHYS_ADDR_MASK) + (((addr >> 30) & 0x1ff) << 3)) &
574 env->a20_mask;
575 pdpe = ldq_phys(pdpe_addr);
576 if (!(pdpe & PG_PRESENT_MASK)) {
577 error_code = 0;
578 goto do_fault;
579 }
580 if (!(env->efer & MSR_EFER_NXE) && (pdpe & PG_NX_MASK)) {
581 error_code = PG_ERROR_RSVD_MASK;
582 goto do_fault;
583 }
584 ptep &= pdpe ^ PG_NX_MASK;
585 if (!(pdpe & PG_ACCESSED_MASK)) {
586 pdpe |= PG_ACCESSED_MASK;
587 stl_phys_notdirty(pdpe_addr, pdpe);
588 }
589 } else
590 #endif
591 {
592 /* XXX: load them when cr3 is loaded ? */
593 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
594 env->a20_mask;
595 pdpe = ldq_phys(pdpe_addr);
596 if (!(pdpe & PG_PRESENT_MASK)) {
597 error_code = 0;
598 goto do_fault;
599 }
600 ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
601 }
602
603 pde_addr = ((pdpe & PHYS_ADDR_MASK) + (((addr >> 21) & 0x1ff) << 3)) &
604 env->a20_mask;
605 pde = ldq_phys(pde_addr);
606 if (!(pde & PG_PRESENT_MASK)) {
607 error_code = 0;
608 goto do_fault;
609 }
610 if (!(env->efer & MSR_EFER_NXE) && (pde & PG_NX_MASK)) {
611 error_code = PG_ERROR_RSVD_MASK;
612 goto do_fault;
613 }
614 ptep &= pde ^ PG_NX_MASK;
615 if (pde & PG_PSE_MASK) {
616 /* 2 MB page */
617 page_size = 2048 * 1024;
618 ptep ^= PG_NX_MASK;
619 if ((ptep & PG_NX_MASK) && is_write1 == 2) {
620 goto do_fault_protect;
621 }
622 switch (mmu_idx) {
623 case MMU_USER_IDX:
624 if (!(ptep & PG_USER_MASK)) {
625 goto do_fault_protect;
626 }
627 if (is_write && !(ptep & PG_RW_MASK)) {
628 goto do_fault_protect;
629 }
630 break;
631
632 case MMU_KERNEL_IDX:
633 if (is_write1 != 2 && (env->cr[4] & CR4_SMAP_MASK) &&
634 (ptep & PG_USER_MASK)) {
635 goto do_fault_protect;
636 }
637 /* fall through */
638 case MMU_KSMAP_IDX:
639 if (is_write1 == 2 && (env->cr[4] & CR4_SMEP_MASK) &&
640 (ptep & PG_USER_MASK)) {
641 goto do_fault_protect;
642 }
643 if ((env->cr[0] & CR0_WP_MASK) &&
644 is_write && !(ptep & PG_RW_MASK)) {
645 goto do_fault_protect;
646 }
647 break;
648
649 default: /* cannot happen */
650 break;
651 }
652 is_dirty = is_write && !(pde & PG_DIRTY_MASK);
653 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
654 pde |= PG_ACCESSED_MASK;
655 if (is_dirty)
656 pde |= PG_DIRTY_MASK;
657 stl_phys_notdirty(pde_addr, pde);
658 }
659 /* align to page_size */
660 pte = pde & ((PHYS_ADDR_MASK & ~(page_size - 1)) | 0xfff);
661 virt_addr = addr & ~(page_size - 1);
662 } else {
663 /* 4 KB page */
664 if (!(pde & PG_ACCESSED_MASK)) {
665 pde |= PG_ACCESSED_MASK;
666 stl_phys_notdirty(pde_addr, pde);
667 }
668 pte_addr = ((pde & PHYS_ADDR_MASK) + (((addr >> 12) & 0x1ff) << 3)) &
669 env->a20_mask;
670 pte = ldq_phys(pte_addr);
671 if (!(pte & PG_PRESENT_MASK)) {
672 error_code = 0;
673 goto do_fault;
674 }
675 if (!(env->efer & MSR_EFER_NXE) && (pte & PG_NX_MASK)) {
676 error_code = PG_ERROR_RSVD_MASK;
677 goto do_fault;
678 }
679 /* combine pde and pte nx, user and rw protections */
680 ptep &= pte ^ PG_NX_MASK;
681 ptep ^= PG_NX_MASK;
682 if ((ptep & PG_NX_MASK) && is_write1 == 2)
683 goto do_fault_protect;
684 switch (mmu_idx) {
685 case MMU_USER_IDX:
686 if (!(ptep & PG_USER_MASK)) {
687 goto do_fault_protect;
688 }
689 if (is_write && !(ptep & PG_RW_MASK)) {
690 goto do_fault_protect;
691 }
692 break;
693
694 case MMU_KERNEL_IDX:
695 if (is_write1 != 2 && (env->cr[4] & CR4_SMAP_MASK) &&
696 (ptep & PG_USER_MASK)) {
697 goto do_fault_protect;
698 }
699 /* fall through */
700 case MMU_KSMAP_IDX:
701 if (is_write1 == 2 && (env->cr[4] & CR4_SMEP_MASK) &&
702 (ptep & PG_USER_MASK)) {
703 goto do_fault_protect;
704 }
705 if ((env->cr[0] & CR0_WP_MASK) &&
706 is_write && !(ptep & PG_RW_MASK)) {
707 goto do_fault_protect;
708 }
709 break;
710
711 default: /* cannot happen */
712 break;
713 }
714 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
715 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
716 pte |= PG_ACCESSED_MASK;
717 if (is_dirty)
718 pte |= PG_DIRTY_MASK;
719 stl_phys_notdirty(pte_addr, pte);
720 }
721 page_size = 4096;
722 virt_addr = addr & ~0xfff;
723 pte = pte & (PHYS_ADDR_MASK | 0xfff);
724 }
725 } else {
726 uint32_t pde;
727
728 /* page directory entry */
729 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) &
730 env->a20_mask;
731 pde = ldl_phys(pde_addr);
732 if (!(pde & PG_PRESENT_MASK)) {
733 error_code = 0;
734 goto do_fault;
735 }
736 /* if PSE bit is set, then we use a 4MB page */
737 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
738 page_size = 4096 * 1024;
739 switch (mmu_idx) {
740 case MMU_USER_IDX:
741 if (!(pde & PG_USER_MASK)) {
742 goto do_fault_protect;
743 }
744 if (is_write && !(pde & PG_RW_MASK)) {
745 goto do_fault_protect;
746 }
747 break;
748
749 case MMU_KERNEL_IDX:
750 if (is_write1 != 2 && (env->cr[4] & CR4_SMAP_MASK) &&
751 (pde & PG_USER_MASK)) {
752 goto do_fault_protect;
753 }
754 /* fall through */
755 case MMU_KSMAP_IDX:
756 if (is_write1 == 2 && (env->cr[4] & CR4_SMEP_MASK) &&
757 (pde & PG_USER_MASK)) {
758 goto do_fault_protect;
759 }
760 if ((env->cr[0] & CR0_WP_MASK) &&
761 is_write && !(pde & PG_RW_MASK)) {
762 goto do_fault_protect;
763 }
764 break;
765
766 default: /* cannot happen */
767 break;
768 }
769 is_dirty = is_write && !(pde & PG_DIRTY_MASK);
770 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
771 pde |= PG_ACCESSED_MASK;
772 if (is_dirty)
773 pde |= PG_DIRTY_MASK;
774 stl_phys_notdirty(pde_addr, pde);
775 }
776
777 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
778 ptep = pte;
779 virt_addr = addr & ~(page_size - 1);
780 } else {
781 if (!(pde & PG_ACCESSED_MASK)) {
782 pde |= PG_ACCESSED_MASK;
783 stl_phys_notdirty(pde_addr, pde);
784 }
785
786 /* page directory entry */
787 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) &
788 env->a20_mask;
789 pte = ldl_phys(pte_addr);
790 if (!(pte & PG_PRESENT_MASK)) {
791 error_code = 0;
792 goto do_fault;
793 }
794 /* combine pde and pte user and rw protections */
795 ptep = pte & pde;
796 switch (mmu_idx) {
797 case MMU_USER_IDX:
798 if (!(ptep & PG_USER_MASK)) {
799 goto do_fault_protect;
800 }
801 if (is_write && !(ptep & PG_RW_MASK)) {
802 goto do_fault_protect;
803 }
804 break;
805
806 case MMU_KERNEL_IDX:
807 if (is_write1 != 2 && (env->cr[4] & CR4_SMAP_MASK) &&
808 (ptep & PG_USER_MASK)) {
809 goto do_fault_protect;
810 }
811 /* fall through */
812 case MMU_KSMAP_IDX:
813 if (is_write1 == 2 && (env->cr[4] & CR4_SMEP_MASK) &&
814 (ptep & PG_USER_MASK)) {
815 goto do_fault_protect;
816 }
817 if ((env->cr[0] & CR0_WP_MASK) &&
818 is_write && !(ptep & PG_RW_MASK)) {
819 goto do_fault_protect;
820 }
821 break;
822
823 default: /* cannot happen */
824 break;
825 }
826 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
827 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
828 pte |= PG_ACCESSED_MASK;
829 if (is_dirty)
830 pte |= PG_DIRTY_MASK;
831 stl_phys_notdirty(pte_addr, pte);
832 }
833 page_size = 4096;
834 virt_addr = addr & ~0xfff;
835 }
836 }
837 /* the page can be put in the TLB */
838 prot = PAGE_READ;
839 if (!(ptep & PG_NX_MASK))
840 prot |= PAGE_EXEC;
841 if (pte & PG_DIRTY_MASK) {
842 /* only set write access if already dirty... otherwise wait
843 for dirty access */
844 if (is_user) {
845 if (ptep & PG_RW_MASK)
846 prot |= PAGE_WRITE;
847 } else {
848 if (!(env->cr[0] & CR0_WP_MASK) ||
849 (ptep & PG_RW_MASK))
850 prot |= PAGE_WRITE;
851 }
852 }
853 do_mapping:
854 pte = pte & env->a20_mask;
855
856 /* Even if 4MB pages, we map only one 4KB page in the cache to
857 avoid filling it too fast */
858 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
859 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
860 vaddr = virt_addr + page_offset;
861
862 tlb_set_page(env, vaddr, paddr, prot, mmu_idx, page_size);
863 return 0;
864 do_fault_protect:
865 error_code = PG_ERROR_P_MASK;
866 do_fault:
867 error_code |= (is_write << PG_ERROR_W_BIT);
868 if (is_user)
869 error_code |= PG_ERROR_U_MASK;
870 if (is_write1 == 2 &&
871 (((env->efer & MSR_EFER_NXE) &&
872 (env->cr[4] & CR4_PAE_MASK)) ||
873 (env->cr[4] & CR4_SMEP_MASK)))
874 error_code |= PG_ERROR_I_D_MASK;
875 if (env->intercept_exceptions & (1 << EXCP0E_PAGE)) {
876 /* cr2 is not modified in case of exceptions */
877 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
878 addr);
879 } else {
880 env->cr[2] = addr;
881 }
882 env->error_code = error_code;
883 env->exception_index = EXCP0E_PAGE;
884 return 1;
885 }
886
887 hwaddr cpu_get_phys_page_debug(CPUX86State *env, target_ulong addr)
888 {
889 target_ulong pde_addr, pte_addr;
890 uint64_t pte;
891 hwaddr paddr;
892 uint32_t page_offset;
893 int page_size;
894
895 if (env->cr[4] & CR4_PAE_MASK) {
896 target_ulong pdpe_addr;
897 uint64_t pde, pdpe;
898
899 #ifdef TARGET_X86_64
900 if (env->hflags & HF_LMA_MASK) {
901 uint64_t pml4e_addr, pml4e;
902 int32_t sext;
903
904 /* test virtual address sign extension */
905 sext = (int64_t)addr >> 47;
906 if (sext != 0 && sext != -1)
907 return -1;
908
909 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
910 env->a20_mask;
911 pml4e = ldq_phys(pml4e_addr);
912 if (!(pml4e & PG_PRESENT_MASK))
913 return -1;
914
915 pdpe_addr = ((pml4e & ~0xfff & ~(PG_NX_MASK | PG_HI_USER_MASK)) +
916 (((addr >> 30) & 0x1ff) << 3)) & env->a20_mask;
917 pdpe = ldq_phys(pdpe_addr);
918 if (!(pdpe & PG_PRESENT_MASK))
919 return -1;
920 } else
921 #endif
922 {
923 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
924 env->a20_mask;
925 pdpe = ldq_phys(pdpe_addr);
926 if (!(pdpe & PG_PRESENT_MASK))
927 return -1;
928 }
929
930 pde_addr = ((pdpe & ~0xfff & ~(PG_NX_MASK | PG_HI_USER_MASK)) +
931 (((addr >> 21) & 0x1ff) << 3)) & env->a20_mask;
932 pde = ldq_phys(pde_addr);
933 if (!(pde & PG_PRESENT_MASK)) {
934 return -1;
935 }
936 if (pde & PG_PSE_MASK) {
937 /* 2 MB page */
938 page_size = 2048 * 1024;
939 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
940 } else {
941 /* 4 KB page */
942 pte_addr = ((pde & ~0xfff & ~(PG_NX_MASK | PG_HI_USER_MASK)) +
943 (((addr >> 12) & 0x1ff) << 3)) & env->a20_mask;
944 page_size = 4096;
945 pte = ldq_phys(pte_addr);
946 }
947 pte &= ~(PG_NX_MASK | PG_HI_USER_MASK);
948 if (!(pte & PG_PRESENT_MASK))
949 return -1;
950 } else {
951 uint32_t pde;
952
953 if (!(env->cr[0] & CR0_PG_MASK)) {
954 pte = addr;
955 page_size = 4096;
956 } else {
957 /* page directory entry */
958 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) & env->a20_mask;
959 pde = ldl_phys(pde_addr);
960 if (!(pde & PG_PRESENT_MASK))
961 return -1;
962 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
963 pte = pde & ~0x003ff000; /* align to 4MB */
964 page_size = 4096 * 1024;
965 } else {
966 /* page directory entry */
967 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & env->a20_mask;
968 pte = ldl_phys(pte_addr);
969 if (!(pte & PG_PRESENT_MASK))
970 return -1;
971 page_size = 4096;
972 }
973 }
974 pte = pte & env->a20_mask;
975 }
976
977 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
978 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
979 return paddr;
980 }
981
982 void hw_breakpoint_insert(CPUX86State *env, int index)
983 {
984 int type = 0, err = 0;
985
986 switch (hw_breakpoint_type(env->dr[7], index)) {
987 case DR7_TYPE_BP_INST:
988 if (hw_breakpoint_enabled(env->dr[7], index)) {
989 err = cpu_breakpoint_insert(env, env->dr[index], BP_CPU,
990 &env->cpu_breakpoint[index]);
991 }
992 break;
993 case DR7_TYPE_DATA_WR:
994 type = BP_CPU | BP_MEM_WRITE;
995 break;
996 case DR7_TYPE_IO_RW:
997 /* No support for I/O watchpoints yet */
998 break;
999 case DR7_TYPE_DATA_RW:
1000 type = BP_CPU | BP_MEM_ACCESS;
1001 break;
1002 }
1003
1004 if (type != 0) {
1005 err = cpu_watchpoint_insert(env, env->dr[index],
1006 hw_breakpoint_len(env->dr[7], index),
1007 type, &env->cpu_watchpoint[index]);
1008 }
1009
1010 if (err) {
1011 env->cpu_breakpoint[index] = NULL;
1012 }
1013 }
1014
1015 void hw_breakpoint_remove(CPUX86State *env, int index)
1016 {
1017 if (!env->cpu_breakpoint[index])
1018 return;
1019 switch (hw_breakpoint_type(env->dr[7], index)) {
1020 case DR7_TYPE_BP_INST:
1021 if (hw_breakpoint_enabled(env->dr[7], index)) {
1022 cpu_breakpoint_remove_by_ref(env, env->cpu_breakpoint[index]);
1023 }
1024 break;
1025 case DR7_TYPE_DATA_WR:
1026 case DR7_TYPE_DATA_RW:
1027 cpu_watchpoint_remove_by_ref(env, env->cpu_watchpoint[index]);
1028 break;
1029 case DR7_TYPE_IO_RW:
1030 /* No support for I/O watchpoints yet */
1031 break;
1032 }
1033 }
1034
1035 bool check_hw_breakpoints(CPUX86State *env, bool force_dr6_update)
1036 {
1037 target_ulong dr6;
1038 int reg;
1039 bool hit_enabled = false;
1040
1041 dr6 = env->dr[6] & ~0xf;
1042 for (reg = 0; reg < DR7_MAX_BP; reg++) {
1043 bool bp_match = false;
1044 bool wp_match = false;
1045
1046 switch (hw_breakpoint_type(env->dr[7], reg)) {
1047 case DR7_TYPE_BP_INST:
1048 if (env->dr[reg] == env->eip) {
1049 bp_match = true;
1050 }
1051 break;
1052 case DR7_TYPE_DATA_WR:
1053 case DR7_TYPE_DATA_RW:
1054 if (env->cpu_watchpoint[reg] &&
1055 env->cpu_watchpoint[reg]->flags & BP_WATCHPOINT_HIT) {
1056 wp_match = true;
1057 }
1058 break;
1059 case DR7_TYPE_IO_RW:
1060 break;
1061 }
1062 if (bp_match || wp_match) {
1063 dr6 |= 1 << reg;
1064 if (hw_breakpoint_enabled(env->dr[7], reg)) {
1065 hit_enabled = true;
1066 }
1067 }
1068 }
1069
1070 if (hit_enabled || force_dr6_update) {
1071 env->dr[6] = dr6;
1072 }
1073
1074 return hit_enabled;
1075 }
1076
1077 void breakpoint_handler(CPUX86State *env)
1078 {
1079 CPUBreakpoint *bp;
1080
1081 if (env->watchpoint_hit) {
1082 if (env->watchpoint_hit->flags & BP_CPU) {
1083 env->watchpoint_hit = NULL;
1084 if (check_hw_breakpoints(env, false)) {
1085 raise_exception(env, EXCP01_DB);
1086 } else {
1087 cpu_resume_from_signal(env, NULL);
1088 }
1089 }
1090 } else {
1091 QTAILQ_FOREACH(bp, &env->breakpoints, entry)
1092 if (bp->pc == env->eip) {
1093 if (bp->flags & BP_CPU) {
1094 check_hw_breakpoints(env, true);
1095 raise_exception(env, EXCP01_DB);
1096 }
1097 break;
1098 }
1099 }
1100 }
1101
1102 typedef struct MCEInjectionParams {
1103 Monitor *mon;
1104 X86CPU *cpu;
1105 int bank;
1106 uint64_t status;
1107 uint64_t mcg_status;
1108 uint64_t addr;
1109 uint64_t misc;
1110 int flags;
1111 } MCEInjectionParams;
1112
1113 static void do_inject_x86_mce(void *data)
1114 {
1115 MCEInjectionParams *params = data;
1116 CPUX86State *cenv = &params->cpu->env;
1117 CPUState *cpu = CPU(params->cpu);
1118 uint64_t *banks = cenv->mce_banks + 4 * params->bank;
1119
1120 cpu_synchronize_state(cpu);
1121
1122 /*
1123 * If there is an MCE exception being processed, ignore this SRAO MCE
1124 * unless unconditional injection was requested.
1125 */
1126 if (!(params->flags & MCE_INJECT_UNCOND_AO)
1127 && !(params->status & MCI_STATUS_AR)
1128 && (cenv->mcg_status & MCG_STATUS_MCIP)) {
1129 return;
1130 }
1131
1132 if (params->status & MCI_STATUS_UC) {
1133 /*
1134 * if MSR_MCG_CTL is not all 1s, the uncorrected error
1135 * reporting is disabled
1136 */
1137 if ((cenv->mcg_cap & MCG_CTL_P) && cenv->mcg_ctl != ~(uint64_t)0) {
1138 monitor_printf(params->mon,
1139 "CPU %d: Uncorrected error reporting disabled\n",
1140 cpu->cpu_index);
1141 return;
1142 }
1143
1144 /*
1145 * if MSR_MCi_CTL is not all 1s, the uncorrected error
1146 * reporting is disabled for the bank
1147 */
1148 if (banks[0] != ~(uint64_t)0) {
1149 monitor_printf(params->mon,
1150 "CPU %d: Uncorrected error reporting disabled for"
1151 " bank %d\n",
1152 cpu->cpu_index, params->bank);
1153 return;
1154 }
1155
1156 if ((cenv->mcg_status & MCG_STATUS_MCIP) ||
1157 !(cenv->cr[4] & CR4_MCE_MASK)) {
1158 monitor_printf(params->mon,
1159 "CPU %d: Previous MCE still in progress, raising"
1160 " triple fault\n",
1161 cpu->cpu_index);
1162 qemu_log_mask(CPU_LOG_RESET, "Triple fault\n");
1163 qemu_system_reset_request();
1164 return;
1165 }
1166 if (banks[1] & MCI_STATUS_VAL) {
1167 params->status |= MCI_STATUS_OVER;
1168 }
1169 banks[2] = params->addr;
1170 banks[3] = params->misc;
1171 cenv->mcg_status = params->mcg_status;
1172 banks[1] = params->status;
1173 cpu_interrupt(cpu, CPU_INTERRUPT_MCE);
1174 } else if (!(banks[1] & MCI_STATUS_VAL)
1175 || !(banks[1] & MCI_STATUS_UC)) {
1176 if (banks[1] & MCI_STATUS_VAL) {
1177 params->status |= MCI_STATUS_OVER;
1178 }
1179 banks[2] = params->addr;
1180 banks[3] = params->misc;
1181 banks[1] = params->status;
1182 } else {
1183 banks[1] |= MCI_STATUS_OVER;
1184 }
1185 }
1186
1187 void cpu_x86_inject_mce(Monitor *mon, X86CPU *cpu, int bank,
1188 uint64_t status, uint64_t mcg_status, uint64_t addr,
1189 uint64_t misc, int flags)
1190 {
1191 CPUX86State *cenv = &cpu->env;
1192 MCEInjectionParams params = {
1193 .mon = mon,
1194 .cpu = cpu,
1195 .bank = bank,
1196 .status = status,
1197 .mcg_status = mcg_status,
1198 .addr = addr,
1199 .misc = misc,
1200 .flags = flags,
1201 };
1202 unsigned bank_num = cenv->mcg_cap & 0xff;
1203 CPUX86State *env;
1204
1205 if (!cenv->mcg_cap) {
1206 monitor_printf(mon, "MCE injection not supported\n");
1207 return;
1208 }
1209 if (bank >= bank_num) {
1210 monitor_printf(mon, "Invalid MCE bank number\n");
1211 return;
1212 }
1213 if (!(status & MCI_STATUS_VAL)) {
1214 monitor_printf(mon, "Invalid MCE status code\n");
1215 return;
1216 }
1217 if ((flags & MCE_INJECT_BROADCAST)
1218 && !cpu_x86_support_mca_broadcast(cenv)) {
1219 monitor_printf(mon, "Guest CPU does not support MCA broadcast\n");
1220 return;
1221 }
1222
1223 run_on_cpu(CPU(cpu), do_inject_x86_mce, &params);
1224 if (flags & MCE_INJECT_BROADCAST) {
1225 params.bank = 1;
1226 params.status = MCI_STATUS_VAL | MCI_STATUS_UC;
1227 params.mcg_status = MCG_STATUS_MCIP | MCG_STATUS_RIPV;
1228 params.addr = 0;
1229 params.misc = 0;
1230 for (env = first_cpu; env != NULL; env = env->next_cpu) {
1231 if (cenv == env) {
1232 continue;
1233 }
1234 params.cpu = x86_env_get_cpu(env);
1235 run_on_cpu(CPU(cpu), do_inject_x86_mce, &params);
1236 }
1237 }
1238 }
1239
1240 void cpu_report_tpr_access(CPUX86State *env, TPRAccess access)
1241 {
1242 if (kvm_enabled()) {
1243 env->tpr_access_type = access;
1244
1245 cpu_interrupt(CPU(x86_env_get_cpu(env)), CPU_INTERRUPT_TPR);
1246 } else {
1247 cpu_restore_state(env, env->mem_io_pc);
1248
1249 apic_handle_tpr_access_report(env->apic_state, env->eip, access);
1250 }
1251 }
1252 #endif /* !CONFIG_USER_ONLY */
1253
1254 int cpu_x86_get_descr_debug(CPUX86State *env, unsigned int selector,
1255 target_ulong *base, unsigned int *limit,
1256 unsigned int *flags)
1257 {
1258 SegmentCache *dt;
1259 target_ulong ptr;
1260 uint32_t e1, e2;
1261 int index;
1262
1263 if (selector & 0x4)
1264 dt = &env->ldt;
1265 else
1266 dt = &env->gdt;
1267 index = selector & ~7;
1268 ptr = dt->base + index;
1269 if ((index + 7) > dt->limit
1270 || cpu_memory_rw_debug(env, ptr, (uint8_t *)&e1, sizeof(e1), 0) != 0
1271 || cpu_memory_rw_debug(env, ptr+4, (uint8_t *)&e2, sizeof(e2), 0) != 0)
1272 return 0;
1273
1274 *base = ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
1275 *limit = (e1 & 0xffff) | (e2 & 0x000f0000);
1276 if (e2 & DESC_G_MASK)
1277 *limit = (*limit << 12) | 0xfff;
1278 *flags = e2;
1279
1280 return 1;
1281 }
1282
1283 #if !defined(CONFIG_USER_ONLY)
1284 void do_cpu_init(X86CPU *cpu)
1285 {
1286 CPUState *cs = CPU(cpu);
1287 CPUX86State *env = &cpu->env;
1288 int sipi = cs->interrupt_request & CPU_INTERRUPT_SIPI;
1289 uint64_t pat = env->pat;
1290
1291 cpu_reset(cs);
1292 cs->interrupt_request = sipi;
1293 env->pat = pat;
1294 apic_init_reset(env->apic_state);
1295 }
1296
1297 void do_cpu_sipi(X86CPU *cpu)
1298 {
1299 CPUX86State *env = &cpu->env;
1300
1301 apic_sipi(env->apic_state);
1302 }
1303 #else
1304 void do_cpu_init(X86CPU *cpu)
1305 {
1306 }
1307 void do_cpu_sipi(X86CPU *cpu)
1308 {
1309 }
1310 #endif