]> git.proxmox.com Git - mirror_qemu.git/blob - target-i386/helper.c
cpu: Move watchpoint fields from CPU_COMMON to CPUState
[mirror_qemu.git] / target-i386 / helper.c
1 /*
2 * i386 helpers (without register variable usage)
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include "cpu.h"
21 #include "sysemu/kvm.h"
22 #ifndef CONFIG_USER_ONLY
23 #include "sysemu/sysemu.h"
24 #include "monitor/monitor.h"
25 #endif
26
27 //#define DEBUG_MMU
28
29 static void cpu_x86_version(CPUX86State *env, int *family, int *model)
30 {
31 int cpuver = env->cpuid_version;
32
33 if (family == NULL || model == NULL) {
34 return;
35 }
36
37 *family = (cpuver >> 8) & 0x0f;
38 *model = ((cpuver >> 12) & 0xf0) + ((cpuver >> 4) & 0x0f);
39 }
40
41 /* Broadcast MCA signal for processor version 06H_EH and above */
42 int cpu_x86_support_mca_broadcast(CPUX86State *env)
43 {
44 int family = 0;
45 int model = 0;
46
47 cpu_x86_version(env, &family, &model);
48 if ((family == 6 && model >= 14) || family > 6) {
49 return 1;
50 }
51
52 return 0;
53 }
54
55 /***********************************************************/
56 /* x86 debug */
57
58 static const char *cc_op_str[CC_OP_NB] = {
59 "DYNAMIC",
60 "EFLAGS",
61
62 "MULB",
63 "MULW",
64 "MULL",
65 "MULQ",
66
67 "ADDB",
68 "ADDW",
69 "ADDL",
70 "ADDQ",
71
72 "ADCB",
73 "ADCW",
74 "ADCL",
75 "ADCQ",
76
77 "SUBB",
78 "SUBW",
79 "SUBL",
80 "SUBQ",
81
82 "SBBB",
83 "SBBW",
84 "SBBL",
85 "SBBQ",
86
87 "LOGICB",
88 "LOGICW",
89 "LOGICL",
90 "LOGICQ",
91
92 "INCB",
93 "INCW",
94 "INCL",
95 "INCQ",
96
97 "DECB",
98 "DECW",
99 "DECL",
100 "DECQ",
101
102 "SHLB",
103 "SHLW",
104 "SHLL",
105 "SHLQ",
106
107 "SARB",
108 "SARW",
109 "SARL",
110 "SARQ",
111
112 "BMILGB",
113 "BMILGW",
114 "BMILGL",
115 "BMILGQ",
116
117 "ADCX",
118 "ADOX",
119 "ADCOX",
120
121 "CLR",
122 };
123
124 static void
125 cpu_x86_dump_seg_cache(CPUX86State *env, FILE *f, fprintf_function cpu_fprintf,
126 const char *name, struct SegmentCache *sc)
127 {
128 #ifdef TARGET_X86_64
129 if (env->hflags & HF_CS64_MASK) {
130 cpu_fprintf(f, "%-3s=%04x %016" PRIx64 " %08x %08x", name,
131 sc->selector, sc->base, sc->limit, sc->flags & 0x00ffff00);
132 } else
133 #endif
134 {
135 cpu_fprintf(f, "%-3s=%04x %08x %08x %08x", name, sc->selector,
136 (uint32_t)sc->base, sc->limit, sc->flags & 0x00ffff00);
137 }
138
139 if (!(env->hflags & HF_PE_MASK) || !(sc->flags & DESC_P_MASK))
140 goto done;
141
142 cpu_fprintf(f, " DPL=%d ", (sc->flags & DESC_DPL_MASK) >> DESC_DPL_SHIFT);
143 if (sc->flags & DESC_S_MASK) {
144 if (sc->flags & DESC_CS_MASK) {
145 cpu_fprintf(f, (sc->flags & DESC_L_MASK) ? "CS64" :
146 ((sc->flags & DESC_B_MASK) ? "CS32" : "CS16"));
147 cpu_fprintf(f, " [%c%c", (sc->flags & DESC_C_MASK) ? 'C' : '-',
148 (sc->flags & DESC_R_MASK) ? 'R' : '-');
149 } else {
150 cpu_fprintf(f,
151 (sc->flags & DESC_B_MASK || env->hflags & HF_LMA_MASK)
152 ? "DS " : "DS16");
153 cpu_fprintf(f, " [%c%c", (sc->flags & DESC_E_MASK) ? 'E' : '-',
154 (sc->flags & DESC_W_MASK) ? 'W' : '-');
155 }
156 cpu_fprintf(f, "%c]", (sc->flags & DESC_A_MASK) ? 'A' : '-');
157 } else {
158 static const char *sys_type_name[2][16] = {
159 { /* 32 bit mode */
160 "Reserved", "TSS16-avl", "LDT", "TSS16-busy",
161 "CallGate16", "TaskGate", "IntGate16", "TrapGate16",
162 "Reserved", "TSS32-avl", "Reserved", "TSS32-busy",
163 "CallGate32", "Reserved", "IntGate32", "TrapGate32"
164 },
165 { /* 64 bit mode */
166 "<hiword>", "Reserved", "LDT", "Reserved", "Reserved",
167 "Reserved", "Reserved", "Reserved", "Reserved",
168 "TSS64-avl", "Reserved", "TSS64-busy", "CallGate64",
169 "Reserved", "IntGate64", "TrapGate64"
170 }
171 };
172 cpu_fprintf(f, "%s",
173 sys_type_name[(env->hflags & HF_LMA_MASK) ? 1 : 0]
174 [(sc->flags & DESC_TYPE_MASK)
175 >> DESC_TYPE_SHIFT]);
176 }
177 done:
178 cpu_fprintf(f, "\n");
179 }
180
181 #define DUMP_CODE_BYTES_TOTAL 50
182 #define DUMP_CODE_BYTES_BACKWARD 20
183
184 void x86_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
185 int flags)
186 {
187 X86CPU *cpu = X86_CPU(cs);
188 CPUX86State *env = &cpu->env;
189 int eflags, i, nb;
190 char cc_op_name[32];
191 static const char *seg_name[6] = { "ES", "CS", "SS", "DS", "FS", "GS" };
192
193 eflags = cpu_compute_eflags(env);
194 #ifdef TARGET_X86_64
195 if (env->hflags & HF_CS64_MASK) {
196 cpu_fprintf(f,
197 "RAX=%016" PRIx64 " RBX=%016" PRIx64 " RCX=%016" PRIx64 " RDX=%016" PRIx64 "\n"
198 "RSI=%016" PRIx64 " RDI=%016" PRIx64 " RBP=%016" PRIx64 " RSP=%016" PRIx64 "\n"
199 "R8 =%016" PRIx64 " R9 =%016" PRIx64 " R10=%016" PRIx64 " R11=%016" PRIx64 "\n"
200 "R12=%016" PRIx64 " R13=%016" PRIx64 " R14=%016" PRIx64 " R15=%016" PRIx64 "\n"
201 "RIP=%016" PRIx64 " RFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
202 env->regs[R_EAX],
203 env->regs[R_EBX],
204 env->regs[R_ECX],
205 env->regs[R_EDX],
206 env->regs[R_ESI],
207 env->regs[R_EDI],
208 env->regs[R_EBP],
209 env->regs[R_ESP],
210 env->regs[8],
211 env->regs[9],
212 env->regs[10],
213 env->regs[11],
214 env->regs[12],
215 env->regs[13],
216 env->regs[14],
217 env->regs[15],
218 env->eip, eflags,
219 eflags & DF_MASK ? 'D' : '-',
220 eflags & CC_O ? 'O' : '-',
221 eflags & CC_S ? 'S' : '-',
222 eflags & CC_Z ? 'Z' : '-',
223 eflags & CC_A ? 'A' : '-',
224 eflags & CC_P ? 'P' : '-',
225 eflags & CC_C ? 'C' : '-',
226 env->hflags & HF_CPL_MASK,
227 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
228 (env->a20_mask >> 20) & 1,
229 (env->hflags >> HF_SMM_SHIFT) & 1,
230 cs->halted);
231 } else
232 #endif
233 {
234 cpu_fprintf(f, "EAX=%08x EBX=%08x ECX=%08x EDX=%08x\n"
235 "ESI=%08x EDI=%08x EBP=%08x ESP=%08x\n"
236 "EIP=%08x EFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
237 (uint32_t)env->regs[R_EAX],
238 (uint32_t)env->regs[R_EBX],
239 (uint32_t)env->regs[R_ECX],
240 (uint32_t)env->regs[R_EDX],
241 (uint32_t)env->regs[R_ESI],
242 (uint32_t)env->regs[R_EDI],
243 (uint32_t)env->regs[R_EBP],
244 (uint32_t)env->regs[R_ESP],
245 (uint32_t)env->eip, eflags,
246 eflags & DF_MASK ? 'D' : '-',
247 eflags & CC_O ? 'O' : '-',
248 eflags & CC_S ? 'S' : '-',
249 eflags & CC_Z ? 'Z' : '-',
250 eflags & CC_A ? 'A' : '-',
251 eflags & CC_P ? 'P' : '-',
252 eflags & CC_C ? 'C' : '-',
253 env->hflags & HF_CPL_MASK,
254 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
255 (env->a20_mask >> 20) & 1,
256 (env->hflags >> HF_SMM_SHIFT) & 1,
257 cs->halted);
258 }
259
260 for(i = 0; i < 6; i++) {
261 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, seg_name[i],
262 &env->segs[i]);
263 }
264 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, "LDT", &env->ldt);
265 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, "TR", &env->tr);
266
267 #ifdef TARGET_X86_64
268 if (env->hflags & HF_LMA_MASK) {
269 cpu_fprintf(f, "GDT= %016" PRIx64 " %08x\n",
270 env->gdt.base, env->gdt.limit);
271 cpu_fprintf(f, "IDT= %016" PRIx64 " %08x\n",
272 env->idt.base, env->idt.limit);
273 cpu_fprintf(f, "CR0=%08x CR2=%016" PRIx64 " CR3=%016" PRIx64 " CR4=%08x\n",
274 (uint32_t)env->cr[0],
275 env->cr[2],
276 env->cr[3],
277 (uint32_t)env->cr[4]);
278 for(i = 0; i < 4; i++)
279 cpu_fprintf(f, "DR%d=%016" PRIx64 " ", i, env->dr[i]);
280 cpu_fprintf(f, "\nDR6=%016" PRIx64 " DR7=%016" PRIx64 "\n",
281 env->dr[6], env->dr[7]);
282 } else
283 #endif
284 {
285 cpu_fprintf(f, "GDT= %08x %08x\n",
286 (uint32_t)env->gdt.base, env->gdt.limit);
287 cpu_fprintf(f, "IDT= %08x %08x\n",
288 (uint32_t)env->idt.base, env->idt.limit);
289 cpu_fprintf(f, "CR0=%08x CR2=%08x CR3=%08x CR4=%08x\n",
290 (uint32_t)env->cr[0],
291 (uint32_t)env->cr[2],
292 (uint32_t)env->cr[3],
293 (uint32_t)env->cr[4]);
294 for(i = 0; i < 4; i++) {
295 cpu_fprintf(f, "DR%d=" TARGET_FMT_lx " ", i, env->dr[i]);
296 }
297 cpu_fprintf(f, "\nDR6=" TARGET_FMT_lx " DR7=" TARGET_FMT_lx "\n",
298 env->dr[6], env->dr[7]);
299 }
300 if (flags & CPU_DUMP_CCOP) {
301 if ((unsigned)env->cc_op < CC_OP_NB)
302 snprintf(cc_op_name, sizeof(cc_op_name), "%s", cc_op_str[env->cc_op]);
303 else
304 snprintf(cc_op_name, sizeof(cc_op_name), "[%d]", env->cc_op);
305 #ifdef TARGET_X86_64
306 if (env->hflags & HF_CS64_MASK) {
307 cpu_fprintf(f, "CCS=%016" PRIx64 " CCD=%016" PRIx64 " CCO=%-8s\n",
308 env->cc_src, env->cc_dst,
309 cc_op_name);
310 } else
311 #endif
312 {
313 cpu_fprintf(f, "CCS=%08x CCD=%08x CCO=%-8s\n",
314 (uint32_t)env->cc_src, (uint32_t)env->cc_dst,
315 cc_op_name);
316 }
317 }
318 cpu_fprintf(f, "EFER=%016" PRIx64 "\n", env->efer);
319 if (flags & CPU_DUMP_FPU) {
320 int fptag;
321 fptag = 0;
322 for(i = 0; i < 8; i++) {
323 fptag |= ((!env->fptags[i]) << i);
324 }
325 cpu_fprintf(f, "FCW=%04x FSW=%04x [ST=%d] FTW=%02x MXCSR=%08x\n",
326 env->fpuc,
327 (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11,
328 env->fpstt,
329 fptag,
330 env->mxcsr);
331 for(i=0;i<8;i++) {
332 CPU_LDoubleU u;
333 u.d = env->fpregs[i].d;
334 cpu_fprintf(f, "FPR%d=%016" PRIx64 " %04x",
335 i, u.l.lower, u.l.upper);
336 if ((i & 1) == 1)
337 cpu_fprintf(f, "\n");
338 else
339 cpu_fprintf(f, " ");
340 }
341 if (env->hflags & HF_CS64_MASK)
342 nb = 16;
343 else
344 nb = 8;
345 for(i=0;i<nb;i++) {
346 cpu_fprintf(f, "XMM%02d=%08x%08x%08x%08x",
347 i,
348 env->xmm_regs[i].XMM_L(3),
349 env->xmm_regs[i].XMM_L(2),
350 env->xmm_regs[i].XMM_L(1),
351 env->xmm_regs[i].XMM_L(0));
352 if ((i & 1) == 1)
353 cpu_fprintf(f, "\n");
354 else
355 cpu_fprintf(f, " ");
356 }
357 }
358 if (flags & CPU_DUMP_CODE) {
359 target_ulong base = env->segs[R_CS].base + env->eip;
360 target_ulong offs = MIN(env->eip, DUMP_CODE_BYTES_BACKWARD);
361 uint8_t code;
362 char codestr[3];
363
364 cpu_fprintf(f, "Code=");
365 for (i = 0; i < DUMP_CODE_BYTES_TOTAL; i++) {
366 if (cpu_memory_rw_debug(cs, base - offs + i, &code, 1, 0) == 0) {
367 snprintf(codestr, sizeof(codestr), "%02x", code);
368 } else {
369 snprintf(codestr, sizeof(codestr), "??");
370 }
371 cpu_fprintf(f, "%s%s%s%s", i > 0 ? " " : "",
372 i == offs ? "<" : "", codestr, i == offs ? ">" : "");
373 }
374 cpu_fprintf(f, "\n");
375 }
376 }
377
378 /***********************************************************/
379 /* x86 mmu */
380 /* XXX: add PGE support */
381
382 void x86_cpu_set_a20(X86CPU *cpu, int a20_state)
383 {
384 CPUX86State *env = &cpu->env;
385
386 a20_state = (a20_state != 0);
387 if (a20_state != ((env->a20_mask >> 20) & 1)) {
388 #if defined(DEBUG_MMU)
389 printf("A20 update: a20=%d\n", a20_state);
390 #endif
391 /* if the cpu is currently executing code, we must unlink it and
392 all the potentially executing TB */
393 cpu_interrupt(CPU(cpu), CPU_INTERRUPT_EXITTB);
394
395 /* when a20 is changed, all the MMU mappings are invalid, so
396 we must flush everything */
397 tlb_flush(env, 1);
398 env->a20_mask = ~(1 << 20) | (a20_state << 20);
399 }
400 }
401
402 void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0)
403 {
404 int pe_state;
405
406 #if defined(DEBUG_MMU)
407 printf("CR0 update: CR0=0x%08x\n", new_cr0);
408 #endif
409 if ((new_cr0 & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK)) !=
410 (env->cr[0] & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK))) {
411 tlb_flush(env, 1);
412 }
413
414 #ifdef TARGET_X86_64
415 if (!(env->cr[0] & CR0_PG_MASK) && (new_cr0 & CR0_PG_MASK) &&
416 (env->efer & MSR_EFER_LME)) {
417 /* enter in long mode */
418 /* XXX: generate an exception */
419 if (!(env->cr[4] & CR4_PAE_MASK))
420 return;
421 env->efer |= MSR_EFER_LMA;
422 env->hflags |= HF_LMA_MASK;
423 } else if ((env->cr[0] & CR0_PG_MASK) && !(new_cr0 & CR0_PG_MASK) &&
424 (env->efer & MSR_EFER_LMA)) {
425 /* exit long mode */
426 env->efer &= ~MSR_EFER_LMA;
427 env->hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
428 env->eip &= 0xffffffff;
429 }
430 #endif
431 env->cr[0] = new_cr0 | CR0_ET_MASK;
432
433 /* update PE flag in hidden flags */
434 pe_state = (env->cr[0] & CR0_PE_MASK);
435 env->hflags = (env->hflags & ~HF_PE_MASK) | (pe_state << HF_PE_SHIFT);
436 /* ensure that ADDSEG is always set in real mode */
437 env->hflags |= ((pe_state ^ 1) << HF_ADDSEG_SHIFT);
438 /* update FPU flags */
439 env->hflags = (env->hflags & ~(HF_MP_MASK | HF_EM_MASK | HF_TS_MASK)) |
440 ((new_cr0 << (HF_MP_SHIFT - 1)) & (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK));
441 }
442
443 /* XXX: in legacy PAE mode, generate a GPF if reserved bits are set in
444 the PDPT */
445 void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3)
446 {
447 env->cr[3] = new_cr3;
448 if (env->cr[0] & CR0_PG_MASK) {
449 #if defined(DEBUG_MMU)
450 printf("CR3 update: CR3=" TARGET_FMT_lx "\n", new_cr3);
451 #endif
452 tlb_flush(env, 0);
453 }
454 }
455
456 void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4)
457 {
458 #if defined(DEBUG_MMU)
459 printf("CR4 update: CR4=%08x\n", (uint32_t)env->cr[4]);
460 #endif
461 if ((new_cr4 ^ env->cr[4]) &
462 (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK |
463 CR4_SMEP_MASK | CR4_SMAP_MASK)) {
464 tlb_flush(env, 1);
465 }
466 /* SSE handling */
467 if (!(env->features[FEAT_1_EDX] & CPUID_SSE)) {
468 new_cr4 &= ~CR4_OSFXSR_MASK;
469 }
470 env->hflags &= ~HF_OSFXSR_MASK;
471 if (new_cr4 & CR4_OSFXSR_MASK) {
472 env->hflags |= HF_OSFXSR_MASK;
473 }
474
475 if (!(env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_SMAP)) {
476 new_cr4 &= ~CR4_SMAP_MASK;
477 }
478 env->hflags &= ~HF_SMAP_MASK;
479 if (new_cr4 & CR4_SMAP_MASK) {
480 env->hflags |= HF_SMAP_MASK;
481 }
482
483 env->cr[4] = new_cr4;
484 }
485
486 #if defined(CONFIG_USER_ONLY)
487
488 int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr,
489 int is_write, int mmu_idx)
490 {
491 X86CPU *cpu = X86_CPU(cs);
492 CPUX86State *env = &cpu->env;
493
494 /* user mode only emulation */
495 is_write &= 1;
496 env->cr[2] = addr;
497 env->error_code = (is_write << PG_ERROR_W_BIT);
498 env->error_code |= PG_ERROR_U_MASK;
499 cs->exception_index = EXCP0E_PAGE;
500 return 1;
501 }
502
503 #else
504
505 /* XXX: This value should match the one returned by CPUID
506 * and in exec.c */
507 # if defined(TARGET_X86_64)
508 # define PHYS_ADDR_MASK 0xfffffff000LL
509 # else
510 # define PHYS_ADDR_MASK 0xffffff000LL
511 # endif
512
513 /* return value:
514 * -1 = cannot handle fault
515 * 0 = nothing more to do
516 * 1 = generate PF fault
517 */
518 int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr,
519 int is_write1, int mmu_idx)
520 {
521 X86CPU *cpu = X86_CPU(cs);
522 CPUX86State *env = &cpu->env;
523 uint64_t ptep, pte;
524 target_ulong pde_addr, pte_addr;
525 int error_code, is_dirty, prot, page_size, is_write, is_user;
526 hwaddr paddr;
527 uint32_t page_offset;
528 target_ulong vaddr, virt_addr;
529
530 is_user = mmu_idx == MMU_USER_IDX;
531 #if defined(DEBUG_MMU)
532 printf("MMU fault: addr=%" VADDR_PRIx " w=%d u=%d eip=" TARGET_FMT_lx "\n",
533 addr, is_write1, is_user, env->eip);
534 #endif
535 is_write = is_write1 & 1;
536
537 if (!(env->cr[0] & CR0_PG_MASK)) {
538 pte = addr;
539 #ifdef TARGET_X86_64
540 if (!(env->hflags & HF_LMA_MASK)) {
541 /* Without long mode we can only address 32bits in real mode */
542 pte = (uint32_t)pte;
543 }
544 #endif
545 virt_addr = addr & TARGET_PAGE_MASK;
546 prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
547 page_size = 4096;
548 goto do_mapping;
549 }
550
551 if (env->cr[4] & CR4_PAE_MASK) {
552 uint64_t pde, pdpe;
553 target_ulong pdpe_addr;
554
555 #ifdef TARGET_X86_64
556 if (env->hflags & HF_LMA_MASK) {
557 uint64_t pml4e_addr, pml4e;
558 int32_t sext;
559
560 /* test virtual address sign extension */
561 sext = (int64_t)addr >> 47;
562 if (sext != 0 && sext != -1) {
563 env->error_code = 0;
564 cs->exception_index = EXCP0D_GPF;
565 return 1;
566 }
567
568 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
569 env->a20_mask;
570 pml4e = ldq_phys(cs->as, pml4e_addr);
571 if (!(pml4e & PG_PRESENT_MASK)) {
572 error_code = 0;
573 goto do_fault;
574 }
575 if (!(env->efer & MSR_EFER_NXE) && (pml4e & PG_NX_MASK)) {
576 error_code = PG_ERROR_RSVD_MASK;
577 goto do_fault;
578 }
579 if (!(pml4e & PG_ACCESSED_MASK)) {
580 pml4e |= PG_ACCESSED_MASK;
581 stl_phys_notdirty(cs->as, pml4e_addr, pml4e);
582 }
583 ptep = pml4e ^ PG_NX_MASK;
584 pdpe_addr = ((pml4e & PHYS_ADDR_MASK) + (((addr >> 30) & 0x1ff) << 3)) &
585 env->a20_mask;
586 pdpe = ldq_phys(cs->as, pdpe_addr);
587 if (!(pdpe & PG_PRESENT_MASK)) {
588 error_code = 0;
589 goto do_fault;
590 }
591 if (!(env->efer & MSR_EFER_NXE) && (pdpe & PG_NX_MASK)) {
592 error_code = PG_ERROR_RSVD_MASK;
593 goto do_fault;
594 }
595 ptep &= pdpe ^ PG_NX_MASK;
596 if (!(pdpe & PG_ACCESSED_MASK)) {
597 pdpe |= PG_ACCESSED_MASK;
598 stl_phys_notdirty(cs->as, pdpe_addr, pdpe);
599 }
600 } else
601 #endif
602 {
603 /* XXX: load them when cr3 is loaded ? */
604 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
605 env->a20_mask;
606 pdpe = ldq_phys(cs->as, pdpe_addr);
607 if (!(pdpe & PG_PRESENT_MASK)) {
608 error_code = 0;
609 goto do_fault;
610 }
611 ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
612 }
613
614 pde_addr = ((pdpe & PHYS_ADDR_MASK) + (((addr >> 21) & 0x1ff) << 3)) &
615 env->a20_mask;
616 pde = ldq_phys(cs->as, pde_addr);
617 if (!(pde & PG_PRESENT_MASK)) {
618 error_code = 0;
619 goto do_fault;
620 }
621 if (!(env->efer & MSR_EFER_NXE) && (pde & PG_NX_MASK)) {
622 error_code = PG_ERROR_RSVD_MASK;
623 goto do_fault;
624 }
625 ptep &= pde ^ PG_NX_MASK;
626 if (pde & PG_PSE_MASK) {
627 /* 2 MB page */
628 page_size = 2048 * 1024;
629 ptep ^= PG_NX_MASK;
630 if ((ptep & PG_NX_MASK) && is_write1 == 2) {
631 goto do_fault_protect;
632 }
633 switch (mmu_idx) {
634 case MMU_USER_IDX:
635 if (!(ptep & PG_USER_MASK)) {
636 goto do_fault_protect;
637 }
638 if (is_write && !(ptep & PG_RW_MASK)) {
639 goto do_fault_protect;
640 }
641 break;
642
643 case MMU_KERNEL_IDX:
644 if (is_write1 != 2 && (env->cr[4] & CR4_SMAP_MASK) &&
645 (ptep & PG_USER_MASK)) {
646 goto do_fault_protect;
647 }
648 /* fall through */
649 case MMU_KSMAP_IDX:
650 if (is_write1 == 2 && (env->cr[4] & CR4_SMEP_MASK) &&
651 (ptep & PG_USER_MASK)) {
652 goto do_fault_protect;
653 }
654 if ((env->cr[0] & CR0_WP_MASK) &&
655 is_write && !(ptep & PG_RW_MASK)) {
656 goto do_fault_protect;
657 }
658 break;
659
660 default: /* cannot happen */
661 break;
662 }
663 is_dirty = is_write && !(pde & PG_DIRTY_MASK);
664 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
665 pde |= PG_ACCESSED_MASK;
666 if (is_dirty)
667 pde |= PG_DIRTY_MASK;
668 stl_phys_notdirty(cs->as, pde_addr, pde);
669 }
670 /* align to page_size */
671 pte = pde & ((PHYS_ADDR_MASK & ~(page_size - 1)) | 0xfff);
672 virt_addr = addr & ~(page_size - 1);
673 } else {
674 /* 4 KB page */
675 if (!(pde & PG_ACCESSED_MASK)) {
676 pde |= PG_ACCESSED_MASK;
677 stl_phys_notdirty(cs->as, pde_addr, pde);
678 }
679 pte_addr = ((pde & PHYS_ADDR_MASK) + (((addr >> 12) & 0x1ff) << 3)) &
680 env->a20_mask;
681 pte = ldq_phys(cs->as, pte_addr);
682 if (!(pte & PG_PRESENT_MASK)) {
683 error_code = 0;
684 goto do_fault;
685 }
686 if (!(env->efer & MSR_EFER_NXE) && (pte & PG_NX_MASK)) {
687 error_code = PG_ERROR_RSVD_MASK;
688 goto do_fault;
689 }
690 /* combine pde and pte nx, user and rw protections */
691 ptep &= pte ^ PG_NX_MASK;
692 ptep ^= PG_NX_MASK;
693 if ((ptep & PG_NX_MASK) && is_write1 == 2)
694 goto do_fault_protect;
695 switch (mmu_idx) {
696 case MMU_USER_IDX:
697 if (!(ptep & PG_USER_MASK)) {
698 goto do_fault_protect;
699 }
700 if (is_write && !(ptep & PG_RW_MASK)) {
701 goto do_fault_protect;
702 }
703 break;
704
705 case MMU_KERNEL_IDX:
706 if (is_write1 != 2 && (env->cr[4] & CR4_SMAP_MASK) &&
707 (ptep & PG_USER_MASK)) {
708 goto do_fault_protect;
709 }
710 /* fall through */
711 case MMU_KSMAP_IDX:
712 if (is_write1 == 2 && (env->cr[4] & CR4_SMEP_MASK) &&
713 (ptep & PG_USER_MASK)) {
714 goto do_fault_protect;
715 }
716 if ((env->cr[0] & CR0_WP_MASK) &&
717 is_write && !(ptep & PG_RW_MASK)) {
718 goto do_fault_protect;
719 }
720 break;
721
722 default: /* cannot happen */
723 break;
724 }
725 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
726 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
727 pte |= PG_ACCESSED_MASK;
728 if (is_dirty)
729 pte |= PG_DIRTY_MASK;
730 stl_phys_notdirty(cs->as, pte_addr, pte);
731 }
732 page_size = 4096;
733 virt_addr = addr & ~0xfff;
734 pte = pte & (PHYS_ADDR_MASK | 0xfff);
735 }
736 } else {
737 uint32_t pde;
738
739 /* page directory entry */
740 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) &
741 env->a20_mask;
742 pde = ldl_phys(cs->as, pde_addr);
743 if (!(pde & PG_PRESENT_MASK)) {
744 error_code = 0;
745 goto do_fault;
746 }
747 /* if PSE bit is set, then we use a 4MB page */
748 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
749 page_size = 4096 * 1024;
750 switch (mmu_idx) {
751 case MMU_USER_IDX:
752 if (!(pde & PG_USER_MASK)) {
753 goto do_fault_protect;
754 }
755 if (is_write && !(pde & PG_RW_MASK)) {
756 goto do_fault_protect;
757 }
758 break;
759
760 case MMU_KERNEL_IDX:
761 if (is_write1 != 2 && (env->cr[4] & CR4_SMAP_MASK) &&
762 (pde & PG_USER_MASK)) {
763 goto do_fault_protect;
764 }
765 /* fall through */
766 case MMU_KSMAP_IDX:
767 if (is_write1 == 2 && (env->cr[4] & CR4_SMEP_MASK) &&
768 (pde & PG_USER_MASK)) {
769 goto do_fault_protect;
770 }
771 if ((env->cr[0] & CR0_WP_MASK) &&
772 is_write && !(pde & PG_RW_MASK)) {
773 goto do_fault_protect;
774 }
775 break;
776
777 default: /* cannot happen */
778 break;
779 }
780 is_dirty = is_write && !(pde & PG_DIRTY_MASK);
781 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
782 pde |= PG_ACCESSED_MASK;
783 if (is_dirty)
784 pde |= PG_DIRTY_MASK;
785 stl_phys_notdirty(cs->as, pde_addr, pde);
786 }
787
788 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
789 ptep = pte;
790 virt_addr = addr & ~(page_size - 1);
791 } else {
792 if (!(pde & PG_ACCESSED_MASK)) {
793 pde |= PG_ACCESSED_MASK;
794 stl_phys_notdirty(cs->as, pde_addr, pde);
795 }
796
797 /* page directory entry */
798 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) &
799 env->a20_mask;
800 pte = ldl_phys(cs->as, pte_addr);
801 if (!(pte & PG_PRESENT_MASK)) {
802 error_code = 0;
803 goto do_fault;
804 }
805 /* combine pde and pte user and rw protections */
806 ptep = pte & pde;
807 switch (mmu_idx) {
808 case MMU_USER_IDX:
809 if (!(ptep & PG_USER_MASK)) {
810 goto do_fault_protect;
811 }
812 if (is_write && !(ptep & PG_RW_MASK)) {
813 goto do_fault_protect;
814 }
815 break;
816
817 case MMU_KERNEL_IDX:
818 if (is_write1 != 2 && (env->cr[4] & CR4_SMAP_MASK) &&
819 (ptep & PG_USER_MASK)) {
820 goto do_fault_protect;
821 }
822 /* fall through */
823 case MMU_KSMAP_IDX:
824 if (is_write1 == 2 && (env->cr[4] & CR4_SMEP_MASK) &&
825 (ptep & PG_USER_MASK)) {
826 goto do_fault_protect;
827 }
828 if ((env->cr[0] & CR0_WP_MASK) &&
829 is_write && !(ptep & PG_RW_MASK)) {
830 goto do_fault_protect;
831 }
832 break;
833
834 default: /* cannot happen */
835 break;
836 }
837 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
838 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
839 pte |= PG_ACCESSED_MASK;
840 if (is_dirty)
841 pte |= PG_DIRTY_MASK;
842 stl_phys_notdirty(cs->as, pte_addr, pte);
843 }
844 page_size = 4096;
845 virt_addr = addr & ~0xfff;
846 }
847 }
848 /* the page can be put in the TLB */
849 prot = PAGE_READ;
850 if (!(ptep & PG_NX_MASK))
851 prot |= PAGE_EXEC;
852 if (pte & PG_DIRTY_MASK) {
853 /* only set write access if already dirty... otherwise wait
854 for dirty access */
855 if (is_user) {
856 if (ptep & PG_RW_MASK)
857 prot |= PAGE_WRITE;
858 } else {
859 if (!(env->cr[0] & CR0_WP_MASK) ||
860 (ptep & PG_RW_MASK))
861 prot |= PAGE_WRITE;
862 }
863 }
864 do_mapping:
865 pte = pte & env->a20_mask;
866
867 /* Even if 4MB pages, we map only one 4KB page in the cache to
868 avoid filling it too fast */
869 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
870 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
871 vaddr = virt_addr + page_offset;
872
873 tlb_set_page(env, vaddr, paddr, prot, mmu_idx, page_size);
874 return 0;
875 do_fault_protect:
876 error_code = PG_ERROR_P_MASK;
877 do_fault:
878 error_code |= (is_write << PG_ERROR_W_BIT);
879 if (is_user)
880 error_code |= PG_ERROR_U_MASK;
881 if (is_write1 == 2 &&
882 (((env->efer & MSR_EFER_NXE) &&
883 (env->cr[4] & CR4_PAE_MASK)) ||
884 (env->cr[4] & CR4_SMEP_MASK)))
885 error_code |= PG_ERROR_I_D_MASK;
886 if (env->intercept_exceptions & (1 << EXCP0E_PAGE)) {
887 /* cr2 is not modified in case of exceptions */
888 stq_phys(cs->as,
889 env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
890 addr);
891 } else {
892 env->cr[2] = addr;
893 }
894 env->error_code = error_code;
895 cs->exception_index = EXCP0E_PAGE;
896 return 1;
897 }
898
899 hwaddr x86_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
900 {
901 X86CPU *cpu = X86_CPU(cs);
902 CPUX86State *env = &cpu->env;
903 target_ulong pde_addr, pte_addr;
904 uint64_t pte;
905 hwaddr paddr;
906 uint32_t page_offset;
907 int page_size;
908
909 if (!(env->cr[0] & CR0_PG_MASK)) {
910 pte = addr & env->a20_mask;
911 page_size = 4096;
912 } else if (env->cr[4] & CR4_PAE_MASK) {
913 target_ulong pdpe_addr;
914 uint64_t pde, pdpe;
915
916 #ifdef TARGET_X86_64
917 if (env->hflags & HF_LMA_MASK) {
918 uint64_t pml4e_addr, pml4e;
919 int32_t sext;
920
921 /* test virtual address sign extension */
922 sext = (int64_t)addr >> 47;
923 if (sext != 0 && sext != -1)
924 return -1;
925
926 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
927 env->a20_mask;
928 pml4e = ldq_phys(cs->as, pml4e_addr);
929 if (!(pml4e & PG_PRESENT_MASK))
930 return -1;
931
932 pdpe_addr = ((pml4e & ~0xfff & ~(PG_NX_MASK | PG_HI_USER_MASK)) +
933 (((addr >> 30) & 0x1ff) << 3)) & env->a20_mask;
934 pdpe = ldq_phys(cs->as, pdpe_addr);
935 if (!(pdpe & PG_PRESENT_MASK))
936 return -1;
937 } else
938 #endif
939 {
940 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
941 env->a20_mask;
942 pdpe = ldq_phys(cs->as, pdpe_addr);
943 if (!(pdpe & PG_PRESENT_MASK))
944 return -1;
945 }
946
947 pde_addr = ((pdpe & ~0xfff & ~(PG_NX_MASK | PG_HI_USER_MASK)) +
948 (((addr >> 21) & 0x1ff) << 3)) & env->a20_mask;
949 pde = ldq_phys(cs->as, pde_addr);
950 if (!(pde & PG_PRESENT_MASK)) {
951 return -1;
952 }
953 if (pde & PG_PSE_MASK) {
954 /* 2 MB page */
955 page_size = 2048 * 1024;
956 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
957 } else {
958 /* 4 KB page */
959 pte_addr = ((pde & ~0xfff & ~(PG_NX_MASK | PG_HI_USER_MASK)) +
960 (((addr >> 12) & 0x1ff) << 3)) & env->a20_mask;
961 page_size = 4096;
962 pte = ldq_phys(cs->as, pte_addr);
963 }
964 pte &= ~(PG_NX_MASK | PG_HI_USER_MASK);
965 if (!(pte & PG_PRESENT_MASK))
966 return -1;
967 } else {
968 uint32_t pde;
969
970 /* page directory entry */
971 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) & env->a20_mask;
972 pde = ldl_phys(cs->as, pde_addr);
973 if (!(pde & PG_PRESENT_MASK))
974 return -1;
975 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
976 pte = pde & ~0x003ff000; /* align to 4MB */
977 page_size = 4096 * 1024;
978 } else {
979 /* page directory entry */
980 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & env->a20_mask;
981 pte = ldl_phys(cs->as, pte_addr);
982 if (!(pte & PG_PRESENT_MASK))
983 return -1;
984 page_size = 4096;
985 }
986 pte = pte & env->a20_mask;
987 }
988
989 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
990 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
991 return paddr;
992 }
993
994 void hw_breakpoint_insert(CPUX86State *env, int index)
995 {
996 int type = 0, err = 0;
997
998 switch (hw_breakpoint_type(env->dr[7], index)) {
999 case DR7_TYPE_BP_INST:
1000 if (hw_breakpoint_enabled(env->dr[7], index)) {
1001 err = cpu_breakpoint_insert(env, env->dr[index], BP_CPU,
1002 &env->cpu_breakpoint[index]);
1003 }
1004 break;
1005 case DR7_TYPE_DATA_WR:
1006 type = BP_CPU | BP_MEM_WRITE;
1007 break;
1008 case DR7_TYPE_IO_RW:
1009 /* No support for I/O watchpoints yet */
1010 break;
1011 case DR7_TYPE_DATA_RW:
1012 type = BP_CPU | BP_MEM_ACCESS;
1013 break;
1014 }
1015
1016 if (type != 0) {
1017 err = cpu_watchpoint_insert(env, env->dr[index],
1018 hw_breakpoint_len(env->dr[7], index),
1019 type, &env->cpu_watchpoint[index]);
1020 }
1021
1022 if (err) {
1023 env->cpu_breakpoint[index] = NULL;
1024 }
1025 }
1026
1027 void hw_breakpoint_remove(CPUX86State *env, int index)
1028 {
1029 if (!env->cpu_breakpoint[index])
1030 return;
1031 switch (hw_breakpoint_type(env->dr[7], index)) {
1032 case DR7_TYPE_BP_INST:
1033 if (hw_breakpoint_enabled(env->dr[7], index)) {
1034 cpu_breakpoint_remove_by_ref(env, env->cpu_breakpoint[index]);
1035 }
1036 break;
1037 case DR7_TYPE_DATA_WR:
1038 case DR7_TYPE_DATA_RW:
1039 cpu_watchpoint_remove_by_ref(env, env->cpu_watchpoint[index]);
1040 break;
1041 case DR7_TYPE_IO_RW:
1042 /* No support for I/O watchpoints yet */
1043 break;
1044 }
1045 }
1046
1047 bool check_hw_breakpoints(CPUX86State *env, bool force_dr6_update)
1048 {
1049 target_ulong dr6;
1050 int reg;
1051 bool hit_enabled = false;
1052
1053 dr6 = env->dr[6] & ~0xf;
1054 for (reg = 0; reg < DR7_MAX_BP; reg++) {
1055 bool bp_match = false;
1056 bool wp_match = false;
1057
1058 switch (hw_breakpoint_type(env->dr[7], reg)) {
1059 case DR7_TYPE_BP_INST:
1060 if (env->dr[reg] == env->eip) {
1061 bp_match = true;
1062 }
1063 break;
1064 case DR7_TYPE_DATA_WR:
1065 case DR7_TYPE_DATA_RW:
1066 if (env->cpu_watchpoint[reg] &&
1067 env->cpu_watchpoint[reg]->flags & BP_WATCHPOINT_HIT) {
1068 wp_match = true;
1069 }
1070 break;
1071 case DR7_TYPE_IO_RW:
1072 break;
1073 }
1074 if (bp_match || wp_match) {
1075 dr6 |= 1 << reg;
1076 if (hw_breakpoint_enabled(env->dr[7], reg)) {
1077 hit_enabled = true;
1078 }
1079 }
1080 }
1081
1082 if (hit_enabled || force_dr6_update) {
1083 env->dr[6] = dr6;
1084 }
1085
1086 return hit_enabled;
1087 }
1088
1089 void breakpoint_handler(CPUX86State *env)
1090 {
1091 CPUState *cs = CPU(x86_env_get_cpu(env));
1092 CPUBreakpoint *bp;
1093
1094 if (cs->watchpoint_hit) {
1095 if (cs->watchpoint_hit->flags & BP_CPU) {
1096 cs->watchpoint_hit = NULL;
1097 if (check_hw_breakpoints(env, false)) {
1098 raise_exception(env, EXCP01_DB);
1099 } else {
1100 cpu_resume_from_signal(env, NULL);
1101 }
1102 }
1103 } else {
1104 QTAILQ_FOREACH(bp, &env->breakpoints, entry)
1105 if (bp->pc == env->eip) {
1106 if (bp->flags & BP_CPU) {
1107 check_hw_breakpoints(env, true);
1108 raise_exception(env, EXCP01_DB);
1109 }
1110 break;
1111 }
1112 }
1113 }
1114
1115 typedef struct MCEInjectionParams {
1116 Monitor *mon;
1117 X86CPU *cpu;
1118 int bank;
1119 uint64_t status;
1120 uint64_t mcg_status;
1121 uint64_t addr;
1122 uint64_t misc;
1123 int flags;
1124 } MCEInjectionParams;
1125
1126 static void do_inject_x86_mce(void *data)
1127 {
1128 MCEInjectionParams *params = data;
1129 CPUX86State *cenv = &params->cpu->env;
1130 CPUState *cpu = CPU(params->cpu);
1131 uint64_t *banks = cenv->mce_banks + 4 * params->bank;
1132
1133 cpu_synchronize_state(cpu);
1134
1135 /*
1136 * If there is an MCE exception being processed, ignore this SRAO MCE
1137 * unless unconditional injection was requested.
1138 */
1139 if (!(params->flags & MCE_INJECT_UNCOND_AO)
1140 && !(params->status & MCI_STATUS_AR)
1141 && (cenv->mcg_status & MCG_STATUS_MCIP)) {
1142 return;
1143 }
1144
1145 if (params->status & MCI_STATUS_UC) {
1146 /*
1147 * if MSR_MCG_CTL is not all 1s, the uncorrected error
1148 * reporting is disabled
1149 */
1150 if ((cenv->mcg_cap & MCG_CTL_P) && cenv->mcg_ctl != ~(uint64_t)0) {
1151 monitor_printf(params->mon,
1152 "CPU %d: Uncorrected error reporting disabled\n",
1153 cpu->cpu_index);
1154 return;
1155 }
1156
1157 /*
1158 * if MSR_MCi_CTL is not all 1s, the uncorrected error
1159 * reporting is disabled for the bank
1160 */
1161 if (banks[0] != ~(uint64_t)0) {
1162 monitor_printf(params->mon,
1163 "CPU %d: Uncorrected error reporting disabled for"
1164 " bank %d\n",
1165 cpu->cpu_index, params->bank);
1166 return;
1167 }
1168
1169 if ((cenv->mcg_status & MCG_STATUS_MCIP) ||
1170 !(cenv->cr[4] & CR4_MCE_MASK)) {
1171 monitor_printf(params->mon,
1172 "CPU %d: Previous MCE still in progress, raising"
1173 " triple fault\n",
1174 cpu->cpu_index);
1175 qemu_log_mask(CPU_LOG_RESET, "Triple fault\n");
1176 qemu_system_reset_request();
1177 return;
1178 }
1179 if (banks[1] & MCI_STATUS_VAL) {
1180 params->status |= MCI_STATUS_OVER;
1181 }
1182 banks[2] = params->addr;
1183 banks[3] = params->misc;
1184 cenv->mcg_status = params->mcg_status;
1185 banks[1] = params->status;
1186 cpu_interrupt(cpu, CPU_INTERRUPT_MCE);
1187 } else if (!(banks[1] & MCI_STATUS_VAL)
1188 || !(banks[1] & MCI_STATUS_UC)) {
1189 if (banks[1] & MCI_STATUS_VAL) {
1190 params->status |= MCI_STATUS_OVER;
1191 }
1192 banks[2] = params->addr;
1193 banks[3] = params->misc;
1194 banks[1] = params->status;
1195 } else {
1196 banks[1] |= MCI_STATUS_OVER;
1197 }
1198 }
1199
1200 void cpu_x86_inject_mce(Monitor *mon, X86CPU *cpu, int bank,
1201 uint64_t status, uint64_t mcg_status, uint64_t addr,
1202 uint64_t misc, int flags)
1203 {
1204 CPUState *cs = CPU(cpu);
1205 CPUX86State *cenv = &cpu->env;
1206 MCEInjectionParams params = {
1207 .mon = mon,
1208 .cpu = cpu,
1209 .bank = bank,
1210 .status = status,
1211 .mcg_status = mcg_status,
1212 .addr = addr,
1213 .misc = misc,
1214 .flags = flags,
1215 };
1216 unsigned bank_num = cenv->mcg_cap & 0xff;
1217
1218 if (!cenv->mcg_cap) {
1219 monitor_printf(mon, "MCE injection not supported\n");
1220 return;
1221 }
1222 if (bank >= bank_num) {
1223 monitor_printf(mon, "Invalid MCE bank number\n");
1224 return;
1225 }
1226 if (!(status & MCI_STATUS_VAL)) {
1227 monitor_printf(mon, "Invalid MCE status code\n");
1228 return;
1229 }
1230 if ((flags & MCE_INJECT_BROADCAST)
1231 && !cpu_x86_support_mca_broadcast(cenv)) {
1232 monitor_printf(mon, "Guest CPU does not support MCA broadcast\n");
1233 return;
1234 }
1235
1236 run_on_cpu(cs, do_inject_x86_mce, &params);
1237 if (flags & MCE_INJECT_BROADCAST) {
1238 CPUState *other_cs;
1239
1240 params.bank = 1;
1241 params.status = MCI_STATUS_VAL | MCI_STATUS_UC;
1242 params.mcg_status = MCG_STATUS_MCIP | MCG_STATUS_RIPV;
1243 params.addr = 0;
1244 params.misc = 0;
1245 CPU_FOREACH(other_cs) {
1246 if (other_cs == cs) {
1247 continue;
1248 }
1249 params.cpu = X86_CPU(other_cs);
1250 run_on_cpu(other_cs, do_inject_x86_mce, &params);
1251 }
1252 }
1253 }
1254
1255 void cpu_report_tpr_access(CPUX86State *env, TPRAccess access)
1256 {
1257 X86CPU *cpu = x86_env_get_cpu(env);
1258 CPUState *cs = CPU(cpu);
1259
1260 if (kvm_enabled()) {
1261 env->tpr_access_type = access;
1262
1263 cpu_interrupt(cs, CPU_INTERRUPT_TPR);
1264 } else {
1265 cpu_restore_state(env, cs->mem_io_pc);
1266
1267 apic_handle_tpr_access_report(cpu->apic_state, env->eip, access);
1268 }
1269 }
1270 #endif /* !CONFIG_USER_ONLY */
1271
1272 int cpu_x86_get_descr_debug(CPUX86State *env, unsigned int selector,
1273 target_ulong *base, unsigned int *limit,
1274 unsigned int *flags)
1275 {
1276 X86CPU *cpu = x86_env_get_cpu(env);
1277 CPUState *cs = CPU(cpu);
1278 SegmentCache *dt;
1279 target_ulong ptr;
1280 uint32_t e1, e2;
1281 int index;
1282
1283 if (selector & 0x4)
1284 dt = &env->ldt;
1285 else
1286 dt = &env->gdt;
1287 index = selector & ~7;
1288 ptr = dt->base + index;
1289 if ((index + 7) > dt->limit
1290 || cpu_memory_rw_debug(cs, ptr, (uint8_t *)&e1, sizeof(e1), 0) != 0
1291 || cpu_memory_rw_debug(cs, ptr+4, (uint8_t *)&e2, sizeof(e2), 0) != 0)
1292 return 0;
1293
1294 *base = ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
1295 *limit = (e1 & 0xffff) | (e2 & 0x000f0000);
1296 if (e2 & DESC_G_MASK)
1297 *limit = (*limit << 12) | 0xfff;
1298 *flags = e2;
1299
1300 return 1;
1301 }
1302
1303 #if !defined(CONFIG_USER_ONLY)
1304 void do_cpu_init(X86CPU *cpu)
1305 {
1306 CPUState *cs = CPU(cpu);
1307 CPUX86State *env = &cpu->env;
1308 int sipi = cs->interrupt_request & CPU_INTERRUPT_SIPI;
1309 uint64_t pat = env->pat;
1310
1311 cpu_reset(cs);
1312 cs->interrupt_request = sipi;
1313 env->pat = pat;
1314 apic_init_reset(cpu->apic_state);
1315 }
1316
1317 void do_cpu_sipi(X86CPU *cpu)
1318 {
1319 apic_sipi(cpu->apic_state);
1320 }
1321 #else
1322 void do_cpu_init(X86CPU *cpu)
1323 {
1324 }
1325 void do_cpu_sipi(X86CPU *cpu)
1326 {
1327 }
1328 #endif