]> git.proxmox.com Git - qemu.git/blob - target-i386/helper.c
cpu: Turn cpu_get_phys_page_debug() into a CPUClass hook
[qemu.git] / target-i386 / helper.c
1 /*
2 * i386 helpers (without register variable usage)
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include "cpu.h"
21 #include "sysemu/kvm.h"
22 #ifndef CONFIG_USER_ONLY
23 #include "sysemu/sysemu.h"
24 #include "monitor/monitor.h"
25 #endif
26
27 //#define DEBUG_MMU
28
29 static void cpu_x86_version(CPUX86State *env, int *family, int *model)
30 {
31 int cpuver = env->cpuid_version;
32
33 if (family == NULL || model == NULL) {
34 return;
35 }
36
37 *family = (cpuver >> 8) & 0x0f;
38 *model = ((cpuver >> 12) & 0xf0) + ((cpuver >> 4) & 0x0f);
39 }
40
41 /* Broadcast MCA signal for processor version 06H_EH and above */
42 int cpu_x86_support_mca_broadcast(CPUX86State *env)
43 {
44 int family = 0;
45 int model = 0;
46
47 cpu_x86_version(env, &family, &model);
48 if ((family == 6 && model >= 14) || family > 6) {
49 return 1;
50 }
51
52 return 0;
53 }
54
55 /***********************************************************/
56 /* x86 debug */
57
58 static const char *cc_op_str[CC_OP_NB] = {
59 "DYNAMIC",
60 "EFLAGS",
61
62 "MULB",
63 "MULW",
64 "MULL",
65 "MULQ",
66
67 "ADDB",
68 "ADDW",
69 "ADDL",
70 "ADDQ",
71
72 "ADCB",
73 "ADCW",
74 "ADCL",
75 "ADCQ",
76
77 "SUBB",
78 "SUBW",
79 "SUBL",
80 "SUBQ",
81
82 "SBBB",
83 "SBBW",
84 "SBBL",
85 "SBBQ",
86
87 "LOGICB",
88 "LOGICW",
89 "LOGICL",
90 "LOGICQ",
91
92 "INCB",
93 "INCW",
94 "INCL",
95 "INCQ",
96
97 "DECB",
98 "DECW",
99 "DECL",
100 "DECQ",
101
102 "SHLB",
103 "SHLW",
104 "SHLL",
105 "SHLQ",
106
107 "SARB",
108 "SARW",
109 "SARL",
110 "SARQ",
111
112 "BMILGB",
113 "BMILGW",
114 "BMILGL",
115 "BMILGQ",
116
117 "ADCX",
118 "ADOX",
119 "ADCOX",
120
121 "CLR",
122 };
123
124 static void
125 cpu_x86_dump_seg_cache(CPUX86State *env, FILE *f, fprintf_function cpu_fprintf,
126 const char *name, struct SegmentCache *sc)
127 {
128 #ifdef TARGET_X86_64
129 if (env->hflags & HF_CS64_MASK) {
130 cpu_fprintf(f, "%-3s=%04x %016" PRIx64 " %08x %08x", name,
131 sc->selector, sc->base, sc->limit, sc->flags & 0x00ffff00);
132 } else
133 #endif
134 {
135 cpu_fprintf(f, "%-3s=%04x %08x %08x %08x", name, sc->selector,
136 (uint32_t)sc->base, sc->limit, sc->flags & 0x00ffff00);
137 }
138
139 if (!(env->hflags & HF_PE_MASK) || !(sc->flags & DESC_P_MASK))
140 goto done;
141
142 cpu_fprintf(f, " DPL=%d ", (sc->flags & DESC_DPL_MASK) >> DESC_DPL_SHIFT);
143 if (sc->flags & DESC_S_MASK) {
144 if (sc->flags & DESC_CS_MASK) {
145 cpu_fprintf(f, (sc->flags & DESC_L_MASK) ? "CS64" :
146 ((sc->flags & DESC_B_MASK) ? "CS32" : "CS16"));
147 cpu_fprintf(f, " [%c%c", (sc->flags & DESC_C_MASK) ? 'C' : '-',
148 (sc->flags & DESC_R_MASK) ? 'R' : '-');
149 } else {
150 cpu_fprintf(f, (sc->flags & DESC_B_MASK) ? "DS " : "DS16");
151 cpu_fprintf(f, " [%c%c", (sc->flags & DESC_E_MASK) ? 'E' : '-',
152 (sc->flags & DESC_W_MASK) ? 'W' : '-');
153 }
154 cpu_fprintf(f, "%c]", (sc->flags & DESC_A_MASK) ? 'A' : '-');
155 } else {
156 static const char *sys_type_name[2][16] = {
157 { /* 32 bit mode */
158 "Reserved", "TSS16-avl", "LDT", "TSS16-busy",
159 "CallGate16", "TaskGate", "IntGate16", "TrapGate16",
160 "Reserved", "TSS32-avl", "Reserved", "TSS32-busy",
161 "CallGate32", "Reserved", "IntGate32", "TrapGate32"
162 },
163 { /* 64 bit mode */
164 "<hiword>", "Reserved", "LDT", "Reserved", "Reserved",
165 "Reserved", "Reserved", "Reserved", "Reserved",
166 "TSS64-avl", "Reserved", "TSS64-busy", "CallGate64",
167 "Reserved", "IntGate64", "TrapGate64"
168 }
169 };
170 cpu_fprintf(f, "%s",
171 sys_type_name[(env->hflags & HF_LMA_MASK) ? 1 : 0]
172 [(sc->flags & DESC_TYPE_MASK)
173 >> DESC_TYPE_SHIFT]);
174 }
175 done:
176 cpu_fprintf(f, "\n");
177 }
178
179 #define DUMP_CODE_BYTES_TOTAL 50
180 #define DUMP_CODE_BYTES_BACKWARD 20
181
182 void x86_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
183 int flags)
184 {
185 X86CPU *cpu = X86_CPU(cs);
186 CPUX86State *env = &cpu->env;
187 int eflags, i, nb;
188 char cc_op_name[32];
189 static const char *seg_name[6] = { "ES", "CS", "SS", "DS", "FS", "GS" };
190
191 cpu_synchronize_state(cs);
192
193 eflags = cpu_compute_eflags(env);
194 #ifdef TARGET_X86_64
195 if (env->hflags & HF_CS64_MASK) {
196 cpu_fprintf(f,
197 "RAX=%016" PRIx64 " RBX=%016" PRIx64 " RCX=%016" PRIx64 " RDX=%016" PRIx64 "\n"
198 "RSI=%016" PRIx64 " RDI=%016" PRIx64 " RBP=%016" PRIx64 " RSP=%016" PRIx64 "\n"
199 "R8 =%016" PRIx64 " R9 =%016" PRIx64 " R10=%016" PRIx64 " R11=%016" PRIx64 "\n"
200 "R12=%016" PRIx64 " R13=%016" PRIx64 " R14=%016" PRIx64 " R15=%016" PRIx64 "\n"
201 "RIP=%016" PRIx64 " RFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
202 env->regs[R_EAX],
203 env->regs[R_EBX],
204 env->regs[R_ECX],
205 env->regs[R_EDX],
206 env->regs[R_ESI],
207 env->regs[R_EDI],
208 env->regs[R_EBP],
209 env->regs[R_ESP],
210 env->regs[8],
211 env->regs[9],
212 env->regs[10],
213 env->regs[11],
214 env->regs[12],
215 env->regs[13],
216 env->regs[14],
217 env->regs[15],
218 env->eip, eflags,
219 eflags & DF_MASK ? 'D' : '-',
220 eflags & CC_O ? 'O' : '-',
221 eflags & CC_S ? 'S' : '-',
222 eflags & CC_Z ? 'Z' : '-',
223 eflags & CC_A ? 'A' : '-',
224 eflags & CC_P ? 'P' : '-',
225 eflags & CC_C ? 'C' : '-',
226 env->hflags & HF_CPL_MASK,
227 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
228 (env->a20_mask >> 20) & 1,
229 (env->hflags >> HF_SMM_SHIFT) & 1,
230 cs->halted);
231 } else
232 #endif
233 {
234 cpu_fprintf(f, "EAX=%08x EBX=%08x ECX=%08x EDX=%08x\n"
235 "ESI=%08x EDI=%08x EBP=%08x ESP=%08x\n"
236 "EIP=%08x EFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
237 (uint32_t)env->regs[R_EAX],
238 (uint32_t)env->regs[R_EBX],
239 (uint32_t)env->regs[R_ECX],
240 (uint32_t)env->regs[R_EDX],
241 (uint32_t)env->regs[R_ESI],
242 (uint32_t)env->regs[R_EDI],
243 (uint32_t)env->regs[R_EBP],
244 (uint32_t)env->regs[R_ESP],
245 (uint32_t)env->eip, eflags,
246 eflags & DF_MASK ? 'D' : '-',
247 eflags & CC_O ? 'O' : '-',
248 eflags & CC_S ? 'S' : '-',
249 eflags & CC_Z ? 'Z' : '-',
250 eflags & CC_A ? 'A' : '-',
251 eflags & CC_P ? 'P' : '-',
252 eflags & CC_C ? 'C' : '-',
253 env->hflags & HF_CPL_MASK,
254 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
255 (env->a20_mask >> 20) & 1,
256 (env->hflags >> HF_SMM_SHIFT) & 1,
257 cs->halted);
258 }
259
260 for(i = 0; i < 6; i++) {
261 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, seg_name[i],
262 &env->segs[i]);
263 }
264 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, "LDT", &env->ldt);
265 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, "TR", &env->tr);
266
267 #ifdef TARGET_X86_64
268 if (env->hflags & HF_LMA_MASK) {
269 cpu_fprintf(f, "GDT= %016" PRIx64 " %08x\n",
270 env->gdt.base, env->gdt.limit);
271 cpu_fprintf(f, "IDT= %016" PRIx64 " %08x\n",
272 env->idt.base, env->idt.limit);
273 cpu_fprintf(f, "CR0=%08x CR2=%016" PRIx64 " CR3=%016" PRIx64 " CR4=%08x\n",
274 (uint32_t)env->cr[0],
275 env->cr[2],
276 env->cr[3],
277 (uint32_t)env->cr[4]);
278 for(i = 0; i < 4; i++)
279 cpu_fprintf(f, "DR%d=%016" PRIx64 " ", i, env->dr[i]);
280 cpu_fprintf(f, "\nDR6=%016" PRIx64 " DR7=%016" PRIx64 "\n",
281 env->dr[6], env->dr[7]);
282 } else
283 #endif
284 {
285 cpu_fprintf(f, "GDT= %08x %08x\n",
286 (uint32_t)env->gdt.base, env->gdt.limit);
287 cpu_fprintf(f, "IDT= %08x %08x\n",
288 (uint32_t)env->idt.base, env->idt.limit);
289 cpu_fprintf(f, "CR0=%08x CR2=%08x CR3=%08x CR4=%08x\n",
290 (uint32_t)env->cr[0],
291 (uint32_t)env->cr[2],
292 (uint32_t)env->cr[3],
293 (uint32_t)env->cr[4]);
294 for(i = 0; i < 4; i++) {
295 cpu_fprintf(f, "DR%d=" TARGET_FMT_lx " ", i, env->dr[i]);
296 }
297 cpu_fprintf(f, "\nDR6=" TARGET_FMT_lx " DR7=" TARGET_FMT_lx "\n",
298 env->dr[6], env->dr[7]);
299 }
300 if (flags & CPU_DUMP_CCOP) {
301 if ((unsigned)env->cc_op < CC_OP_NB)
302 snprintf(cc_op_name, sizeof(cc_op_name), "%s", cc_op_str[env->cc_op]);
303 else
304 snprintf(cc_op_name, sizeof(cc_op_name), "[%d]", env->cc_op);
305 #ifdef TARGET_X86_64
306 if (env->hflags & HF_CS64_MASK) {
307 cpu_fprintf(f, "CCS=%016" PRIx64 " CCD=%016" PRIx64 " CCO=%-8s\n",
308 env->cc_src, env->cc_dst,
309 cc_op_name);
310 } else
311 #endif
312 {
313 cpu_fprintf(f, "CCS=%08x CCD=%08x CCO=%-8s\n",
314 (uint32_t)env->cc_src, (uint32_t)env->cc_dst,
315 cc_op_name);
316 }
317 }
318 cpu_fprintf(f, "EFER=%016" PRIx64 "\n", env->efer);
319 if (flags & CPU_DUMP_FPU) {
320 int fptag;
321 fptag = 0;
322 for(i = 0; i < 8; i++) {
323 fptag |= ((!env->fptags[i]) << i);
324 }
325 cpu_fprintf(f, "FCW=%04x FSW=%04x [ST=%d] FTW=%02x MXCSR=%08x\n",
326 env->fpuc,
327 (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11,
328 env->fpstt,
329 fptag,
330 env->mxcsr);
331 for(i=0;i<8;i++) {
332 CPU_LDoubleU u;
333 u.d = env->fpregs[i].d;
334 cpu_fprintf(f, "FPR%d=%016" PRIx64 " %04x",
335 i, u.l.lower, u.l.upper);
336 if ((i & 1) == 1)
337 cpu_fprintf(f, "\n");
338 else
339 cpu_fprintf(f, " ");
340 }
341 if (env->hflags & HF_CS64_MASK)
342 nb = 16;
343 else
344 nb = 8;
345 for(i=0;i<nb;i++) {
346 cpu_fprintf(f, "XMM%02d=%08x%08x%08x%08x",
347 i,
348 env->xmm_regs[i].XMM_L(3),
349 env->xmm_regs[i].XMM_L(2),
350 env->xmm_regs[i].XMM_L(1),
351 env->xmm_regs[i].XMM_L(0));
352 if ((i & 1) == 1)
353 cpu_fprintf(f, "\n");
354 else
355 cpu_fprintf(f, " ");
356 }
357 }
358 if (flags & CPU_DUMP_CODE) {
359 target_ulong base = env->segs[R_CS].base + env->eip;
360 target_ulong offs = MIN(env->eip, DUMP_CODE_BYTES_BACKWARD);
361 uint8_t code;
362 char codestr[3];
363
364 cpu_fprintf(f, "Code=");
365 for (i = 0; i < DUMP_CODE_BYTES_TOTAL; i++) {
366 if (cpu_memory_rw_debug(env, base - offs + i, &code, 1, 0) == 0) {
367 snprintf(codestr, sizeof(codestr), "%02x", code);
368 } else {
369 snprintf(codestr, sizeof(codestr), "??");
370 }
371 cpu_fprintf(f, "%s%s%s%s", i > 0 ? " " : "",
372 i == offs ? "<" : "", codestr, i == offs ? ">" : "");
373 }
374 cpu_fprintf(f, "\n");
375 }
376 }
377
378 /***********************************************************/
379 /* x86 mmu */
380 /* XXX: add PGE support */
381
382 void x86_cpu_set_a20(X86CPU *cpu, int a20_state)
383 {
384 CPUX86State *env = &cpu->env;
385
386 a20_state = (a20_state != 0);
387 if (a20_state != ((env->a20_mask >> 20) & 1)) {
388 #if defined(DEBUG_MMU)
389 printf("A20 update: a20=%d\n", a20_state);
390 #endif
391 /* if the cpu is currently executing code, we must unlink it and
392 all the potentially executing TB */
393 cpu_interrupt(CPU(cpu), CPU_INTERRUPT_EXITTB);
394
395 /* when a20 is changed, all the MMU mappings are invalid, so
396 we must flush everything */
397 tlb_flush(env, 1);
398 env->a20_mask = ~(1 << 20) | (a20_state << 20);
399 }
400 }
401
402 void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0)
403 {
404 int pe_state;
405
406 #if defined(DEBUG_MMU)
407 printf("CR0 update: CR0=0x%08x\n", new_cr0);
408 #endif
409 if ((new_cr0 & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK)) !=
410 (env->cr[0] & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK))) {
411 tlb_flush(env, 1);
412 }
413
414 #ifdef TARGET_X86_64
415 if (!(env->cr[0] & CR0_PG_MASK) && (new_cr0 & CR0_PG_MASK) &&
416 (env->efer & MSR_EFER_LME)) {
417 /* enter in long mode */
418 /* XXX: generate an exception */
419 if (!(env->cr[4] & CR4_PAE_MASK))
420 return;
421 env->efer |= MSR_EFER_LMA;
422 env->hflags |= HF_LMA_MASK;
423 } else if ((env->cr[0] & CR0_PG_MASK) && !(new_cr0 & CR0_PG_MASK) &&
424 (env->efer & MSR_EFER_LMA)) {
425 /* exit long mode */
426 env->efer &= ~MSR_EFER_LMA;
427 env->hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
428 env->eip &= 0xffffffff;
429 }
430 #endif
431 env->cr[0] = new_cr0 | CR0_ET_MASK;
432
433 /* update PE flag in hidden flags */
434 pe_state = (env->cr[0] & CR0_PE_MASK);
435 env->hflags = (env->hflags & ~HF_PE_MASK) | (pe_state << HF_PE_SHIFT);
436 /* ensure that ADDSEG is always set in real mode */
437 env->hflags |= ((pe_state ^ 1) << HF_ADDSEG_SHIFT);
438 /* update FPU flags */
439 env->hflags = (env->hflags & ~(HF_MP_MASK | HF_EM_MASK | HF_TS_MASK)) |
440 ((new_cr0 << (HF_MP_SHIFT - 1)) & (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK));
441 }
442
443 /* XXX: in legacy PAE mode, generate a GPF if reserved bits are set in
444 the PDPT */
445 void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3)
446 {
447 env->cr[3] = new_cr3;
448 if (env->cr[0] & CR0_PG_MASK) {
449 #if defined(DEBUG_MMU)
450 printf("CR3 update: CR3=" TARGET_FMT_lx "\n", new_cr3);
451 #endif
452 tlb_flush(env, 0);
453 }
454 }
455
456 void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4)
457 {
458 #if defined(DEBUG_MMU)
459 printf("CR4 update: CR4=%08x\n", (uint32_t)env->cr[4]);
460 #endif
461 if ((new_cr4 ^ env->cr[4]) &
462 (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK |
463 CR4_SMEP_MASK | CR4_SMAP_MASK)) {
464 tlb_flush(env, 1);
465 }
466 /* SSE handling */
467 if (!(env->features[FEAT_1_EDX] & CPUID_SSE)) {
468 new_cr4 &= ~CR4_OSFXSR_MASK;
469 }
470 env->hflags &= ~HF_OSFXSR_MASK;
471 if (new_cr4 & CR4_OSFXSR_MASK) {
472 env->hflags |= HF_OSFXSR_MASK;
473 }
474
475 if (!(env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_SMAP)) {
476 new_cr4 &= ~CR4_SMAP_MASK;
477 }
478 env->hflags &= ~HF_SMAP_MASK;
479 if (new_cr4 & CR4_SMAP_MASK) {
480 env->hflags |= HF_SMAP_MASK;
481 }
482
483 env->cr[4] = new_cr4;
484 }
485
486 #if defined(CONFIG_USER_ONLY)
487
488 int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
489 int is_write, int mmu_idx)
490 {
491 /* user mode only emulation */
492 is_write &= 1;
493 env->cr[2] = addr;
494 env->error_code = (is_write << PG_ERROR_W_BIT);
495 env->error_code |= PG_ERROR_U_MASK;
496 env->exception_index = EXCP0E_PAGE;
497 return 1;
498 }
499
500 #else
501
502 /* XXX: This value should match the one returned by CPUID
503 * and in exec.c */
504 # if defined(TARGET_X86_64)
505 # define PHYS_ADDR_MASK 0xfffffff000LL
506 # else
507 # define PHYS_ADDR_MASK 0xffffff000LL
508 # endif
509
510 /* return value:
511 -1 = cannot handle fault
512 0 = nothing more to do
513 1 = generate PF fault
514 */
515 int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
516 int is_write1, int mmu_idx)
517 {
518 uint64_t ptep, pte;
519 target_ulong pde_addr, pte_addr;
520 int error_code, is_dirty, prot, page_size, is_write, is_user;
521 hwaddr paddr;
522 uint32_t page_offset;
523 target_ulong vaddr, virt_addr;
524
525 is_user = mmu_idx == MMU_USER_IDX;
526 #if defined(DEBUG_MMU)
527 printf("MMU fault: addr=" TARGET_FMT_lx " w=%d u=%d eip=" TARGET_FMT_lx "\n",
528 addr, is_write1, is_user, env->eip);
529 #endif
530 is_write = is_write1 & 1;
531
532 if (!(env->cr[0] & CR0_PG_MASK)) {
533 pte = addr;
534 virt_addr = addr & TARGET_PAGE_MASK;
535 prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
536 page_size = 4096;
537 goto do_mapping;
538 }
539
540 if (env->cr[4] & CR4_PAE_MASK) {
541 uint64_t pde, pdpe;
542 target_ulong pdpe_addr;
543
544 #ifdef TARGET_X86_64
545 if (env->hflags & HF_LMA_MASK) {
546 uint64_t pml4e_addr, pml4e;
547 int32_t sext;
548
549 /* test virtual address sign extension */
550 sext = (int64_t)addr >> 47;
551 if (sext != 0 && sext != -1) {
552 env->error_code = 0;
553 env->exception_index = EXCP0D_GPF;
554 return 1;
555 }
556
557 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
558 env->a20_mask;
559 pml4e = ldq_phys(pml4e_addr);
560 if (!(pml4e & PG_PRESENT_MASK)) {
561 error_code = 0;
562 goto do_fault;
563 }
564 if (!(env->efer & MSR_EFER_NXE) && (pml4e & PG_NX_MASK)) {
565 error_code = PG_ERROR_RSVD_MASK;
566 goto do_fault;
567 }
568 if (!(pml4e & PG_ACCESSED_MASK)) {
569 pml4e |= PG_ACCESSED_MASK;
570 stl_phys_notdirty(pml4e_addr, pml4e);
571 }
572 ptep = pml4e ^ PG_NX_MASK;
573 pdpe_addr = ((pml4e & PHYS_ADDR_MASK) + (((addr >> 30) & 0x1ff) << 3)) &
574 env->a20_mask;
575 pdpe = ldq_phys(pdpe_addr);
576 if (!(pdpe & PG_PRESENT_MASK)) {
577 error_code = 0;
578 goto do_fault;
579 }
580 if (!(env->efer & MSR_EFER_NXE) && (pdpe & PG_NX_MASK)) {
581 error_code = PG_ERROR_RSVD_MASK;
582 goto do_fault;
583 }
584 ptep &= pdpe ^ PG_NX_MASK;
585 if (!(pdpe & PG_ACCESSED_MASK)) {
586 pdpe |= PG_ACCESSED_MASK;
587 stl_phys_notdirty(pdpe_addr, pdpe);
588 }
589 } else
590 #endif
591 {
592 /* XXX: load them when cr3 is loaded ? */
593 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
594 env->a20_mask;
595 pdpe = ldq_phys(pdpe_addr);
596 if (!(pdpe & PG_PRESENT_MASK)) {
597 error_code = 0;
598 goto do_fault;
599 }
600 ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
601 }
602
603 pde_addr = ((pdpe & PHYS_ADDR_MASK) + (((addr >> 21) & 0x1ff) << 3)) &
604 env->a20_mask;
605 pde = ldq_phys(pde_addr);
606 if (!(pde & PG_PRESENT_MASK)) {
607 error_code = 0;
608 goto do_fault;
609 }
610 if (!(env->efer & MSR_EFER_NXE) && (pde & PG_NX_MASK)) {
611 error_code = PG_ERROR_RSVD_MASK;
612 goto do_fault;
613 }
614 ptep &= pde ^ PG_NX_MASK;
615 if (pde & PG_PSE_MASK) {
616 /* 2 MB page */
617 page_size = 2048 * 1024;
618 ptep ^= PG_NX_MASK;
619 if ((ptep & PG_NX_MASK) && is_write1 == 2) {
620 goto do_fault_protect;
621 }
622 switch (mmu_idx) {
623 case MMU_USER_IDX:
624 if (!(ptep & PG_USER_MASK)) {
625 goto do_fault_protect;
626 }
627 if (is_write && !(ptep & PG_RW_MASK)) {
628 goto do_fault_protect;
629 }
630 break;
631
632 case MMU_KERNEL_IDX:
633 if (is_write1 != 2 && (env->cr[4] & CR4_SMAP_MASK) &&
634 (ptep & PG_USER_MASK)) {
635 goto do_fault_protect;
636 }
637 /* fall through */
638 case MMU_KSMAP_IDX:
639 if (is_write1 == 2 && (env->cr[4] & CR4_SMEP_MASK) &&
640 (ptep & PG_USER_MASK)) {
641 goto do_fault_protect;
642 }
643 if ((env->cr[0] & CR0_WP_MASK) &&
644 is_write && !(ptep & PG_RW_MASK)) {
645 goto do_fault_protect;
646 }
647 break;
648
649 default: /* cannot happen */
650 break;
651 }
652 is_dirty = is_write && !(pde & PG_DIRTY_MASK);
653 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
654 pde |= PG_ACCESSED_MASK;
655 if (is_dirty)
656 pde |= PG_DIRTY_MASK;
657 stl_phys_notdirty(pde_addr, pde);
658 }
659 /* align to page_size */
660 pte = pde & ((PHYS_ADDR_MASK & ~(page_size - 1)) | 0xfff);
661 virt_addr = addr & ~(page_size - 1);
662 } else {
663 /* 4 KB page */
664 if (!(pde & PG_ACCESSED_MASK)) {
665 pde |= PG_ACCESSED_MASK;
666 stl_phys_notdirty(pde_addr, pde);
667 }
668 pte_addr = ((pde & PHYS_ADDR_MASK) + (((addr >> 12) & 0x1ff) << 3)) &
669 env->a20_mask;
670 pte = ldq_phys(pte_addr);
671 if (!(pte & PG_PRESENT_MASK)) {
672 error_code = 0;
673 goto do_fault;
674 }
675 if (!(env->efer & MSR_EFER_NXE) && (pte & PG_NX_MASK)) {
676 error_code = PG_ERROR_RSVD_MASK;
677 goto do_fault;
678 }
679 /* combine pde and pte nx, user and rw protections */
680 ptep &= pte ^ PG_NX_MASK;
681 ptep ^= PG_NX_MASK;
682 if ((ptep & PG_NX_MASK) && is_write1 == 2)
683 goto do_fault_protect;
684 switch (mmu_idx) {
685 case MMU_USER_IDX:
686 if (!(ptep & PG_USER_MASK)) {
687 goto do_fault_protect;
688 }
689 if (is_write && !(ptep & PG_RW_MASK)) {
690 goto do_fault_protect;
691 }
692 break;
693
694 case MMU_KERNEL_IDX:
695 if (is_write1 != 2 && (env->cr[4] & CR4_SMAP_MASK) &&
696 (ptep & PG_USER_MASK)) {
697 goto do_fault_protect;
698 }
699 /* fall through */
700 case MMU_KSMAP_IDX:
701 if (is_write1 == 2 && (env->cr[4] & CR4_SMEP_MASK) &&
702 (ptep & PG_USER_MASK)) {
703 goto do_fault_protect;
704 }
705 if ((env->cr[0] & CR0_WP_MASK) &&
706 is_write && !(ptep & PG_RW_MASK)) {
707 goto do_fault_protect;
708 }
709 break;
710
711 default: /* cannot happen */
712 break;
713 }
714 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
715 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
716 pte |= PG_ACCESSED_MASK;
717 if (is_dirty)
718 pte |= PG_DIRTY_MASK;
719 stl_phys_notdirty(pte_addr, pte);
720 }
721 page_size = 4096;
722 virt_addr = addr & ~0xfff;
723 pte = pte & (PHYS_ADDR_MASK | 0xfff);
724 }
725 } else {
726 uint32_t pde;
727
728 /* page directory entry */
729 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) &
730 env->a20_mask;
731 pde = ldl_phys(pde_addr);
732 if (!(pde & PG_PRESENT_MASK)) {
733 error_code = 0;
734 goto do_fault;
735 }
736 /* if PSE bit is set, then we use a 4MB page */
737 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
738 page_size = 4096 * 1024;
739 switch (mmu_idx) {
740 case MMU_USER_IDX:
741 if (!(pde & PG_USER_MASK)) {
742 goto do_fault_protect;
743 }
744 if (is_write && !(pde & PG_RW_MASK)) {
745 goto do_fault_protect;
746 }
747 break;
748
749 case MMU_KERNEL_IDX:
750 if (is_write1 != 2 && (env->cr[4] & CR4_SMAP_MASK) &&
751 (pde & PG_USER_MASK)) {
752 goto do_fault_protect;
753 }
754 /* fall through */
755 case MMU_KSMAP_IDX:
756 if (is_write1 == 2 && (env->cr[4] & CR4_SMEP_MASK) &&
757 (pde & PG_USER_MASK)) {
758 goto do_fault_protect;
759 }
760 if ((env->cr[0] & CR0_WP_MASK) &&
761 is_write && !(pde & PG_RW_MASK)) {
762 goto do_fault_protect;
763 }
764 break;
765
766 default: /* cannot happen */
767 break;
768 }
769 is_dirty = is_write && !(pde & PG_DIRTY_MASK);
770 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
771 pde |= PG_ACCESSED_MASK;
772 if (is_dirty)
773 pde |= PG_DIRTY_MASK;
774 stl_phys_notdirty(pde_addr, pde);
775 }
776
777 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
778 ptep = pte;
779 virt_addr = addr & ~(page_size - 1);
780 } else {
781 if (!(pde & PG_ACCESSED_MASK)) {
782 pde |= PG_ACCESSED_MASK;
783 stl_phys_notdirty(pde_addr, pde);
784 }
785
786 /* page directory entry */
787 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) &
788 env->a20_mask;
789 pte = ldl_phys(pte_addr);
790 if (!(pte & PG_PRESENT_MASK)) {
791 error_code = 0;
792 goto do_fault;
793 }
794 /* combine pde and pte user and rw protections */
795 ptep = pte & pde;
796 switch (mmu_idx) {
797 case MMU_USER_IDX:
798 if (!(ptep & PG_USER_MASK)) {
799 goto do_fault_protect;
800 }
801 if (is_write && !(ptep & PG_RW_MASK)) {
802 goto do_fault_protect;
803 }
804 break;
805
806 case MMU_KERNEL_IDX:
807 if (is_write1 != 2 && (env->cr[4] & CR4_SMAP_MASK) &&
808 (ptep & PG_USER_MASK)) {
809 goto do_fault_protect;
810 }
811 /* fall through */
812 case MMU_KSMAP_IDX:
813 if (is_write1 == 2 && (env->cr[4] & CR4_SMEP_MASK) &&
814 (ptep & PG_USER_MASK)) {
815 goto do_fault_protect;
816 }
817 if ((env->cr[0] & CR0_WP_MASK) &&
818 is_write && !(ptep & PG_RW_MASK)) {
819 goto do_fault_protect;
820 }
821 break;
822
823 default: /* cannot happen */
824 break;
825 }
826 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
827 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
828 pte |= PG_ACCESSED_MASK;
829 if (is_dirty)
830 pte |= PG_DIRTY_MASK;
831 stl_phys_notdirty(pte_addr, pte);
832 }
833 page_size = 4096;
834 virt_addr = addr & ~0xfff;
835 }
836 }
837 /* the page can be put in the TLB */
838 prot = PAGE_READ;
839 if (!(ptep & PG_NX_MASK))
840 prot |= PAGE_EXEC;
841 if (pte & PG_DIRTY_MASK) {
842 /* only set write access if already dirty... otherwise wait
843 for dirty access */
844 if (is_user) {
845 if (ptep & PG_RW_MASK)
846 prot |= PAGE_WRITE;
847 } else {
848 if (!(env->cr[0] & CR0_WP_MASK) ||
849 (ptep & PG_RW_MASK))
850 prot |= PAGE_WRITE;
851 }
852 }
853 do_mapping:
854 pte = pte & env->a20_mask;
855
856 /* Even if 4MB pages, we map only one 4KB page in the cache to
857 avoid filling it too fast */
858 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
859 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
860 vaddr = virt_addr + page_offset;
861
862 tlb_set_page(env, vaddr, paddr, prot, mmu_idx, page_size);
863 return 0;
864 do_fault_protect:
865 error_code = PG_ERROR_P_MASK;
866 do_fault:
867 error_code |= (is_write << PG_ERROR_W_BIT);
868 if (is_user)
869 error_code |= PG_ERROR_U_MASK;
870 if (is_write1 == 2 &&
871 (((env->efer & MSR_EFER_NXE) &&
872 (env->cr[4] & CR4_PAE_MASK)) ||
873 (env->cr[4] & CR4_SMEP_MASK)))
874 error_code |= PG_ERROR_I_D_MASK;
875 if (env->intercept_exceptions & (1 << EXCP0E_PAGE)) {
876 /* cr2 is not modified in case of exceptions */
877 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
878 addr);
879 } else {
880 env->cr[2] = addr;
881 }
882 env->error_code = error_code;
883 env->exception_index = EXCP0E_PAGE;
884 return 1;
885 }
886
887 hwaddr x86_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
888 {
889 X86CPU *cpu = X86_CPU(cs);
890 CPUX86State *env = &cpu->env;
891 target_ulong pde_addr, pte_addr;
892 uint64_t pte;
893 hwaddr paddr;
894 uint32_t page_offset;
895 int page_size;
896
897 if (env->cr[4] & CR4_PAE_MASK) {
898 target_ulong pdpe_addr;
899 uint64_t pde, pdpe;
900
901 #ifdef TARGET_X86_64
902 if (env->hflags & HF_LMA_MASK) {
903 uint64_t pml4e_addr, pml4e;
904 int32_t sext;
905
906 /* test virtual address sign extension */
907 sext = (int64_t)addr >> 47;
908 if (sext != 0 && sext != -1)
909 return -1;
910
911 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
912 env->a20_mask;
913 pml4e = ldq_phys(pml4e_addr);
914 if (!(pml4e & PG_PRESENT_MASK))
915 return -1;
916
917 pdpe_addr = ((pml4e & ~0xfff & ~(PG_NX_MASK | PG_HI_USER_MASK)) +
918 (((addr >> 30) & 0x1ff) << 3)) & env->a20_mask;
919 pdpe = ldq_phys(pdpe_addr);
920 if (!(pdpe & PG_PRESENT_MASK))
921 return -1;
922 } else
923 #endif
924 {
925 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
926 env->a20_mask;
927 pdpe = ldq_phys(pdpe_addr);
928 if (!(pdpe & PG_PRESENT_MASK))
929 return -1;
930 }
931
932 pde_addr = ((pdpe & ~0xfff & ~(PG_NX_MASK | PG_HI_USER_MASK)) +
933 (((addr >> 21) & 0x1ff) << 3)) & env->a20_mask;
934 pde = ldq_phys(pde_addr);
935 if (!(pde & PG_PRESENT_MASK)) {
936 return -1;
937 }
938 if (pde & PG_PSE_MASK) {
939 /* 2 MB page */
940 page_size = 2048 * 1024;
941 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
942 } else {
943 /* 4 KB page */
944 pte_addr = ((pde & ~0xfff & ~(PG_NX_MASK | PG_HI_USER_MASK)) +
945 (((addr >> 12) & 0x1ff) << 3)) & env->a20_mask;
946 page_size = 4096;
947 pte = ldq_phys(pte_addr);
948 }
949 pte &= ~(PG_NX_MASK | PG_HI_USER_MASK);
950 if (!(pte & PG_PRESENT_MASK))
951 return -1;
952 } else {
953 uint32_t pde;
954
955 if (!(env->cr[0] & CR0_PG_MASK)) {
956 pte = addr;
957 page_size = 4096;
958 } else {
959 /* page directory entry */
960 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) & env->a20_mask;
961 pde = ldl_phys(pde_addr);
962 if (!(pde & PG_PRESENT_MASK))
963 return -1;
964 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
965 pte = pde & ~0x003ff000; /* align to 4MB */
966 page_size = 4096 * 1024;
967 } else {
968 /* page directory entry */
969 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & env->a20_mask;
970 pte = ldl_phys(pte_addr);
971 if (!(pte & PG_PRESENT_MASK))
972 return -1;
973 page_size = 4096;
974 }
975 }
976 pte = pte & env->a20_mask;
977 }
978
979 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
980 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
981 return paddr;
982 }
983
984 void hw_breakpoint_insert(CPUX86State *env, int index)
985 {
986 int type = 0, err = 0;
987
988 switch (hw_breakpoint_type(env->dr[7], index)) {
989 case DR7_TYPE_BP_INST:
990 if (hw_breakpoint_enabled(env->dr[7], index)) {
991 err = cpu_breakpoint_insert(env, env->dr[index], BP_CPU,
992 &env->cpu_breakpoint[index]);
993 }
994 break;
995 case DR7_TYPE_DATA_WR:
996 type = BP_CPU | BP_MEM_WRITE;
997 break;
998 case DR7_TYPE_IO_RW:
999 /* No support for I/O watchpoints yet */
1000 break;
1001 case DR7_TYPE_DATA_RW:
1002 type = BP_CPU | BP_MEM_ACCESS;
1003 break;
1004 }
1005
1006 if (type != 0) {
1007 err = cpu_watchpoint_insert(env, env->dr[index],
1008 hw_breakpoint_len(env->dr[7], index),
1009 type, &env->cpu_watchpoint[index]);
1010 }
1011
1012 if (err) {
1013 env->cpu_breakpoint[index] = NULL;
1014 }
1015 }
1016
1017 void hw_breakpoint_remove(CPUX86State *env, int index)
1018 {
1019 if (!env->cpu_breakpoint[index])
1020 return;
1021 switch (hw_breakpoint_type(env->dr[7], index)) {
1022 case DR7_TYPE_BP_INST:
1023 if (hw_breakpoint_enabled(env->dr[7], index)) {
1024 cpu_breakpoint_remove_by_ref(env, env->cpu_breakpoint[index]);
1025 }
1026 break;
1027 case DR7_TYPE_DATA_WR:
1028 case DR7_TYPE_DATA_RW:
1029 cpu_watchpoint_remove_by_ref(env, env->cpu_watchpoint[index]);
1030 break;
1031 case DR7_TYPE_IO_RW:
1032 /* No support for I/O watchpoints yet */
1033 break;
1034 }
1035 }
1036
1037 bool check_hw_breakpoints(CPUX86State *env, bool force_dr6_update)
1038 {
1039 target_ulong dr6;
1040 int reg;
1041 bool hit_enabled = false;
1042
1043 dr6 = env->dr[6] & ~0xf;
1044 for (reg = 0; reg < DR7_MAX_BP; reg++) {
1045 bool bp_match = false;
1046 bool wp_match = false;
1047
1048 switch (hw_breakpoint_type(env->dr[7], reg)) {
1049 case DR7_TYPE_BP_INST:
1050 if (env->dr[reg] == env->eip) {
1051 bp_match = true;
1052 }
1053 break;
1054 case DR7_TYPE_DATA_WR:
1055 case DR7_TYPE_DATA_RW:
1056 if (env->cpu_watchpoint[reg] &&
1057 env->cpu_watchpoint[reg]->flags & BP_WATCHPOINT_HIT) {
1058 wp_match = true;
1059 }
1060 break;
1061 case DR7_TYPE_IO_RW:
1062 break;
1063 }
1064 if (bp_match || wp_match) {
1065 dr6 |= 1 << reg;
1066 if (hw_breakpoint_enabled(env->dr[7], reg)) {
1067 hit_enabled = true;
1068 }
1069 }
1070 }
1071
1072 if (hit_enabled || force_dr6_update) {
1073 env->dr[6] = dr6;
1074 }
1075
1076 return hit_enabled;
1077 }
1078
1079 void breakpoint_handler(CPUX86State *env)
1080 {
1081 CPUBreakpoint *bp;
1082
1083 if (env->watchpoint_hit) {
1084 if (env->watchpoint_hit->flags & BP_CPU) {
1085 env->watchpoint_hit = NULL;
1086 if (check_hw_breakpoints(env, false)) {
1087 raise_exception(env, EXCP01_DB);
1088 } else {
1089 cpu_resume_from_signal(env, NULL);
1090 }
1091 }
1092 } else {
1093 QTAILQ_FOREACH(bp, &env->breakpoints, entry)
1094 if (bp->pc == env->eip) {
1095 if (bp->flags & BP_CPU) {
1096 check_hw_breakpoints(env, true);
1097 raise_exception(env, EXCP01_DB);
1098 }
1099 break;
1100 }
1101 }
1102 }
1103
1104 typedef struct MCEInjectionParams {
1105 Monitor *mon;
1106 X86CPU *cpu;
1107 int bank;
1108 uint64_t status;
1109 uint64_t mcg_status;
1110 uint64_t addr;
1111 uint64_t misc;
1112 int flags;
1113 } MCEInjectionParams;
1114
1115 static void do_inject_x86_mce(void *data)
1116 {
1117 MCEInjectionParams *params = data;
1118 CPUX86State *cenv = &params->cpu->env;
1119 CPUState *cpu = CPU(params->cpu);
1120 uint64_t *banks = cenv->mce_banks + 4 * params->bank;
1121
1122 cpu_synchronize_state(cpu);
1123
1124 /*
1125 * If there is an MCE exception being processed, ignore this SRAO MCE
1126 * unless unconditional injection was requested.
1127 */
1128 if (!(params->flags & MCE_INJECT_UNCOND_AO)
1129 && !(params->status & MCI_STATUS_AR)
1130 && (cenv->mcg_status & MCG_STATUS_MCIP)) {
1131 return;
1132 }
1133
1134 if (params->status & MCI_STATUS_UC) {
1135 /*
1136 * if MSR_MCG_CTL is not all 1s, the uncorrected error
1137 * reporting is disabled
1138 */
1139 if ((cenv->mcg_cap & MCG_CTL_P) && cenv->mcg_ctl != ~(uint64_t)0) {
1140 monitor_printf(params->mon,
1141 "CPU %d: Uncorrected error reporting disabled\n",
1142 cpu->cpu_index);
1143 return;
1144 }
1145
1146 /*
1147 * if MSR_MCi_CTL is not all 1s, the uncorrected error
1148 * reporting is disabled for the bank
1149 */
1150 if (banks[0] != ~(uint64_t)0) {
1151 monitor_printf(params->mon,
1152 "CPU %d: Uncorrected error reporting disabled for"
1153 " bank %d\n",
1154 cpu->cpu_index, params->bank);
1155 return;
1156 }
1157
1158 if ((cenv->mcg_status & MCG_STATUS_MCIP) ||
1159 !(cenv->cr[4] & CR4_MCE_MASK)) {
1160 monitor_printf(params->mon,
1161 "CPU %d: Previous MCE still in progress, raising"
1162 " triple fault\n",
1163 cpu->cpu_index);
1164 qemu_log_mask(CPU_LOG_RESET, "Triple fault\n");
1165 qemu_system_reset_request();
1166 return;
1167 }
1168 if (banks[1] & MCI_STATUS_VAL) {
1169 params->status |= MCI_STATUS_OVER;
1170 }
1171 banks[2] = params->addr;
1172 banks[3] = params->misc;
1173 cenv->mcg_status = params->mcg_status;
1174 banks[1] = params->status;
1175 cpu_interrupt(cpu, CPU_INTERRUPT_MCE);
1176 } else if (!(banks[1] & MCI_STATUS_VAL)
1177 || !(banks[1] & MCI_STATUS_UC)) {
1178 if (banks[1] & MCI_STATUS_VAL) {
1179 params->status |= MCI_STATUS_OVER;
1180 }
1181 banks[2] = params->addr;
1182 banks[3] = params->misc;
1183 banks[1] = params->status;
1184 } else {
1185 banks[1] |= MCI_STATUS_OVER;
1186 }
1187 }
1188
1189 void cpu_x86_inject_mce(Monitor *mon, X86CPU *cpu, int bank,
1190 uint64_t status, uint64_t mcg_status, uint64_t addr,
1191 uint64_t misc, int flags)
1192 {
1193 CPUState *cs = CPU(cpu);
1194 CPUX86State *cenv = &cpu->env;
1195 MCEInjectionParams params = {
1196 .mon = mon,
1197 .cpu = cpu,
1198 .bank = bank,
1199 .status = status,
1200 .mcg_status = mcg_status,
1201 .addr = addr,
1202 .misc = misc,
1203 .flags = flags,
1204 };
1205 unsigned bank_num = cenv->mcg_cap & 0xff;
1206
1207 if (!cenv->mcg_cap) {
1208 monitor_printf(mon, "MCE injection not supported\n");
1209 return;
1210 }
1211 if (bank >= bank_num) {
1212 monitor_printf(mon, "Invalid MCE bank number\n");
1213 return;
1214 }
1215 if (!(status & MCI_STATUS_VAL)) {
1216 monitor_printf(mon, "Invalid MCE status code\n");
1217 return;
1218 }
1219 if ((flags & MCE_INJECT_BROADCAST)
1220 && !cpu_x86_support_mca_broadcast(cenv)) {
1221 monitor_printf(mon, "Guest CPU does not support MCA broadcast\n");
1222 return;
1223 }
1224
1225 run_on_cpu(cs, do_inject_x86_mce, &params);
1226 if (flags & MCE_INJECT_BROADCAST) {
1227 CPUState *other_cs;
1228
1229 params.bank = 1;
1230 params.status = MCI_STATUS_VAL | MCI_STATUS_UC;
1231 params.mcg_status = MCG_STATUS_MCIP | MCG_STATUS_RIPV;
1232 params.addr = 0;
1233 params.misc = 0;
1234 for (other_cs = first_cpu; other_cs != NULL;
1235 other_cs = other_cs->next_cpu) {
1236 if (other_cs == cs) {
1237 continue;
1238 }
1239 params.cpu = X86_CPU(other_cs);
1240 run_on_cpu(other_cs, do_inject_x86_mce, &params);
1241 }
1242 }
1243 }
1244
1245 void cpu_report_tpr_access(CPUX86State *env, TPRAccess access)
1246 {
1247 if (kvm_enabled()) {
1248 env->tpr_access_type = access;
1249
1250 cpu_interrupt(CPU(x86_env_get_cpu(env)), CPU_INTERRUPT_TPR);
1251 } else {
1252 cpu_restore_state(env, env->mem_io_pc);
1253
1254 apic_handle_tpr_access_report(env->apic_state, env->eip, access);
1255 }
1256 }
1257 #endif /* !CONFIG_USER_ONLY */
1258
1259 int cpu_x86_get_descr_debug(CPUX86State *env, unsigned int selector,
1260 target_ulong *base, unsigned int *limit,
1261 unsigned int *flags)
1262 {
1263 SegmentCache *dt;
1264 target_ulong ptr;
1265 uint32_t e1, e2;
1266 int index;
1267
1268 if (selector & 0x4)
1269 dt = &env->ldt;
1270 else
1271 dt = &env->gdt;
1272 index = selector & ~7;
1273 ptr = dt->base + index;
1274 if ((index + 7) > dt->limit
1275 || cpu_memory_rw_debug(env, ptr, (uint8_t *)&e1, sizeof(e1), 0) != 0
1276 || cpu_memory_rw_debug(env, ptr+4, (uint8_t *)&e2, sizeof(e2), 0) != 0)
1277 return 0;
1278
1279 *base = ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
1280 *limit = (e1 & 0xffff) | (e2 & 0x000f0000);
1281 if (e2 & DESC_G_MASK)
1282 *limit = (*limit << 12) | 0xfff;
1283 *flags = e2;
1284
1285 return 1;
1286 }
1287
1288 #if !defined(CONFIG_USER_ONLY)
1289 void do_cpu_init(X86CPU *cpu)
1290 {
1291 CPUState *cs = CPU(cpu);
1292 CPUX86State *env = &cpu->env;
1293 int sipi = cs->interrupt_request & CPU_INTERRUPT_SIPI;
1294 uint64_t pat = env->pat;
1295
1296 cpu_reset(cs);
1297 cs->interrupt_request = sipi;
1298 env->pat = pat;
1299 apic_init_reset(env->apic_state);
1300 }
1301
1302 void do_cpu_sipi(X86CPU *cpu)
1303 {
1304 CPUX86State *env = &cpu->env;
1305
1306 apic_sipi(env->apic_state);
1307 }
1308 #else
1309 void do_cpu_init(X86CPU *cpu)
1310 {
1311 }
1312 void do_cpu_sipi(X86CPU *cpu)
1313 {
1314 }
1315 #endif