]> git.proxmox.com Git - qemu.git/blob - target-i386/helper.c
target-i386: QOM'ify CPU
[qemu.git] / target-i386 / helper.c
1 /*
2 * i386 helpers (without register variable usage)
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include "cpu.h"
21 #include "kvm.h"
22 #ifndef CONFIG_USER_ONLY
23 #include "sysemu.h"
24 #include "monitor.h"
25 #endif
26
27 //#define DEBUG_MMU
28
29 /* NOTE: must be called outside the CPU execute loop */
30 void cpu_state_reset(CPUX86State *env)
31 {
32 int i;
33
34 if (qemu_loglevel_mask(CPU_LOG_RESET)) {
35 qemu_log("CPU Reset (CPU %d)\n", env->cpu_index);
36 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
37 }
38
39 memset(env, 0, offsetof(CPUX86State, breakpoints));
40
41 tlb_flush(env, 1);
42
43 env->old_exception = -1;
44
45 /* init to reset state */
46
47 #ifdef CONFIG_SOFTMMU
48 env->hflags |= HF_SOFTMMU_MASK;
49 #endif
50 env->hflags2 |= HF2_GIF_MASK;
51
52 cpu_x86_update_cr0(env, 0x60000010);
53 env->a20_mask = ~0x0;
54 env->smbase = 0x30000;
55
56 env->idt.limit = 0xffff;
57 env->gdt.limit = 0xffff;
58 env->ldt.limit = 0xffff;
59 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
60 env->tr.limit = 0xffff;
61 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
62
63 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
64 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK |
65 DESC_R_MASK | DESC_A_MASK);
66 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
67 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
68 DESC_A_MASK);
69 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
70 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
71 DESC_A_MASK);
72 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
73 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
74 DESC_A_MASK);
75 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
76 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
77 DESC_A_MASK);
78 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
79 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
80 DESC_A_MASK);
81
82 env->eip = 0xfff0;
83 env->regs[R_EDX] = env->cpuid_version;
84
85 env->eflags = 0x2;
86
87 /* FPU init */
88 for(i = 0;i < 8; i++)
89 env->fptags[i] = 1;
90 env->fpuc = 0x37f;
91
92 env->mxcsr = 0x1f80;
93
94 env->pat = 0x0007040600070406ULL;
95 env->msr_ia32_misc_enable = MSR_IA32_MISC_ENABLE_DEFAULT;
96
97 memset(env->dr, 0, sizeof(env->dr));
98 env->dr[6] = DR6_FIXED_1;
99 env->dr[7] = DR7_FIXED_1;
100 cpu_breakpoint_remove_all(env, BP_CPU);
101 cpu_watchpoint_remove_all(env, BP_CPU);
102 }
103
104 static void cpu_x86_version(CPUX86State *env, int *family, int *model)
105 {
106 int cpuver = env->cpuid_version;
107
108 if (family == NULL || model == NULL) {
109 return;
110 }
111
112 *family = (cpuver >> 8) & 0x0f;
113 *model = ((cpuver >> 12) & 0xf0) + ((cpuver >> 4) & 0x0f);
114 }
115
116 /* Broadcast MCA signal for processor version 06H_EH and above */
117 int cpu_x86_support_mca_broadcast(CPUX86State *env)
118 {
119 int family = 0;
120 int model = 0;
121
122 cpu_x86_version(env, &family, &model);
123 if ((family == 6 && model >= 14) || family > 6) {
124 return 1;
125 }
126
127 return 0;
128 }
129
130 /***********************************************************/
131 /* x86 debug */
132
133 static const char *cc_op_str[] = {
134 "DYNAMIC",
135 "EFLAGS",
136
137 "MULB",
138 "MULW",
139 "MULL",
140 "MULQ",
141
142 "ADDB",
143 "ADDW",
144 "ADDL",
145 "ADDQ",
146
147 "ADCB",
148 "ADCW",
149 "ADCL",
150 "ADCQ",
151
152 "SUBB",
153 "SUBW",
154 "SUBL",
155 "SUBQ",
156
157 "SBBB",
158 "SBBW",
159 "SBBL",
160 "SBBQ",
161
162 "LOGICB",
163 "LOGICW",
164 "LOGICL",
165 "LOGICQ",
166
167 "INCB",
168 "INCW",
169 "INCL",
170 "INCQ",
171
172 "DECB",
173 "DECW",
174 "DECL",
175 "DECQ",
176
177 "SHLB",
178 "SHLW",
179 "SHLL",
180 "SHLQ",
181
182 "SARB",
183 "SARW",
184 "SARL",
185 "SARQ",
186 };
187
188 static void
189 cpu_x86_dump_seg_cache(CPUX86State *env, FILE *f, fprintf_function cpu_fprintf,
190 const char *name, struct SegmentCache *sc)
191 {
192 #ifdef TARGET_X86_64
193 if (env->hflags & HF_CS64_MASK) {
194 cpu_fprintf(f, "%-3s=%04x %016" PRIx64 " %08x %08x", name,
195 sc->selector, sc->base, sc->limit, sc->flags & 0x00ffff00);
196 } else
197 #endif
198 {
199 cpu_fprintf(f, "%-3s=%04x %08x %08x %08x", name, sc->selector,
200 (uint32_t)sc->base, sc->limit, sc->flags & 0x00ffff00);
201 }
202
203 if (!(env->hflags & HF_PE_MASK) || !(sc->flags & DESC_P_MASK))
204 goto done;
205
206 cpu_fprintf(f, " DPL=%d ", (sc->flags & DESC_DPL_MASK) >> DESC_DPL_SHIFT);
207 if (sc->flags & DESC_S_MASK) {
208 if (sc->flags & DESC_CS_MASK) {
209 cpu_fprintf(f, (sc->flags & DESC_L_MASK) ? "CS64" :
210 ((sc->flags & DESC_B_MASK) ? "CS32" : "CS16"));
211 cpu_fprintf(f, " [%c%c", (sc->flags & DESC_C_MASK) ? 'C' : '-',
212 (sc->flags & DESC_R_MASK) ? 'R' : '-');
213 } else {
214 cpu_fprintf(f, (sc->flags & DESC_B_MASK) ? "DS " : "DS16");
215 cpu_fprintf(f, " [%c%c", (sc->flags & DESC_E_MASK) ? 'E' : '-',
216 (sc->flags & DESC_W_MASK) ? 'W' : '-');
217 }
218 cpu_fprintf(f, "%c]", (sc->flags & DESC_A_MASK) ? 'A' : '-');
219 } else {
220 static const char *sys_type_name[2][16] = {
221 { /* 32 bit mode */
222 "Reserved", "TSS16-avl", "LDT", "TSS16-busy",
223 "CallGate16", "TaskGate", "IntGate16", "TrapGate16",
224 "Reserved", "TSS32-avl", "Reserved", "TSS32-busy",
225 "CallGate32", "Reserved", "IntGate32", "TrapGate32"
226 },
227 { /* 64 bit mode */
228 "<hiword>", "Reserved", "LDT", "Reserved", "Reserved",
229 "Reserved", "Reserved", "Reserved", "Reserved",
230 "TSS64-avl", "Reserved", "TSS64-busy", "CallGate64",
231 "Reserved", "IntGate64", "TrapGate64"
232 }
233 };
234 cpu_fprintf(f, "%s",
235 sys_type_name[(env->hflags & HF_LMA_MASK) ? 1 : 0]
236 [(sc->flags & DESC_TYPE_MASK)
237 >> DESC_TYPE_SHIFT]);
238 }
239 done:
240 cpu_fprintf(f, "\n");
241 }
242
243 #define DUMP_CODE_BYTES_TOTAL 50
244 #define DUMP_CODE_BYTES_BACKWARD 20
245
246 void cpu_dump_state(CPUX86State *env, FILE *f, fprintf_function cpu_fprintf,
247 int flags)
248 {
249 int eflags, i, nb;
250 char cc_op_name[32];
251 static const char *seg_name[6] = { "ES", "CS", "SS", "DS", "FS", "GS" };
252
253 cpu_synchronize_state(env);
254
255 eflags = env->eflags;
256 #ifdef TARGET_X86_64
257 if (env->hflags & HF_CS64_MASK) {
258 cpu_fprintf(f,
259 "RAX=%016" PRIx64 " RBX=%016" PRIx64 " RCX=%016" PRIx64 " RDX=%016" PRIx64 "\n"
260 "RSI=%016" PRIx64 " RDI=%016" PRIx64 " RBP=%016" PRIx64 " RSP=%016" PRIx64 "\n"
261 "R8 =%016" PRIx64 " R9 =%016" PRIx64 " R10=%016" PRIx64 " R11=%016" PRIx64 "\n"
262 "R12=%016" PRIx64 " R13=%016" PRIx64 " R14=%016" PRIx64 " R15=%016" PRIx64 "\n"
263 "RIP=%016" PRIx64 " RFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
264 env->regs[R_EAX],
265 env->regs[R_EBX],
266 env->regs[R_ECX],
267 env->regs[R_EDX],
268 env->regs[R_ESI],
269 env->regs[R_EDI],
270 env->regs[R_EBP],
271 env->regs[R_ESP],
272 env->regs[8],
273 env->regs[9],
274 env->regs[10],
275 env->regs[11],
276 env->regs[12],
277 env->regs[13],
278 env->regs[14],
279 env->regs[15],
280 env->eip, eflags,
281 eflags & DF_MASK ? 'D' : '-',
282 eflags & CC_O ? 'O' : '-',
283 eflags & CC_S ? 'S' : '-',
284 eflags & CC_Z ? 'Z' : '-',
285 eflags & CC_A ? 'A' : '-',
286 eflags & CC_P ? 'P' : '-',
287 eflags & CC_C ? 'C' : '-',
288 env->hflags & HF_CPL_MASK,
289 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
290 (env->a20_mask >> 20) & 1,
291 (env->hflags >> HF_SMM_SHIFT) & 1,
292 env->halted);
293 } else
294 #endif
295 {
296 cpu_fprintf(f, "EAX=%08x EBX=%08x ECX=%08x EDX=%08x\n"
297 "ESI=%08x EDI=%08x EBP=%08x ESP=%08x\n"
298 "EIP=%08x EFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
299 (uint32_t)env->regs[R_EAX],
300 (uint32_t)env->regs[R_EBX],
301 (uint32_t)env->regs[R_ECX],
302 (uint32_t)env->regs[R_EDX],
303 (uint32_t)env->regs[R_ESI],
304 (uint32_t)env->regs[R_EDI],
305 (uint32_t)env->regs[R_EBP],
306 (uint32_t)env->regs[R_ESP],
307 (uint32_t)env->eip, eflags,
308 eflags & DF_MASK ? 'D' : '-',
309 eflags & CC_O ? 'O' : '-',
310 eflags & CC_S ? 'S' : '-',
311 eflags & CC_Z ? 'Z' : '-',
312 eflags & CC_A ? 'A' : '-',
313 eflags & CC_P ? 'P' : '-',
314 eflags & CC_C ? 'C' : '-',
315 env->hflags & HF_CPL_MASK,
316 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
317 (env->a20_mask >> 20) & 1,
318 (env->hflags >> HF_SMM_SHIFT) & 1,
319 env->halted);
320 }
321
322 for(i = 0; i < 6; i++) {
323 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, seg_name[i],
324 &env->segs[i]);
325 }
326 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, "LDT", &env->ldt);
327 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, "TR", &env->tr);
328
329 #ifdef TARGET_X86_64
330 if (env->hflags & HF_LMA_MASK) {
331 cpu_fprintf(f, "GDT= %016" PRIx64 " %08x\n",
332 env->gdt.base, env->gdt.limit);
333 cpu_fprintf(f, "IDT= %016" PRIx64 " %08x\n",
334 env->idt.base, env->idt.limit);
335 cpu_fprintf(f, "CR0=%08x CR2=%016" PRIx64 " CR3=%016" PRIx64 " CR4=%08x\n",
336 (uint32_t)env->cr[0],
337 env->cr[2],
338 env->cr[3],
339 (uint32_t)env->cr[4]);
340 for(i = 0; i < 4; i++)
341 cpu_fprintf(f, "DR%d=%016" PRIx64 " ", i, env->dr[i]);
342 cpu_fprintf(f, "\nDR6=%016" PRIx64 " DR7=%016" PRIx64 "\n",
343 env->dr[6], env->dr[7]);
344 } else
345 #endif
346 {
347 cpu_fprintf(f, "GDT= %08x %08x\n",
348 (uint32_t)env->gdt.base, env->gdt.limit);
349 cpu_fprintf(f, "IDT= %08x %08x\n",
350 (uint32_t)env->idt.base, env->idt.limit);
351 cpu_fprintf(f, "CR0=%08x CR2=%08x CR3=%08x CR4=%08x\n",
352 (uint32_t)env->cr[0],
353 (uint32_t)env->cr[2],
354 (uint32_t)env->cr[3],
355 (uint32_t)env->cr[4]);
356 for(i = 0; i < 4; i++) {
357 cpu_fprintf(f, "DR%d=" TARGET_FMT_lx " ", i, env->dr[i]);
358 }
359 cpu_fprintf(f, "\nDR6=" TARGET_FMT_lx " DR7=" TARGET_FMT_lx "\n",
360 env->dr[6], env->dr[7]);
361 }
362 if (flags & X86_DUMP_CCOP) {
363 if ((unsigned)env->cc_op < CC_OP_NB)
364 snprintf(cc_op_name, sizeof(cc_op_name), "%s", cc_op_str[env->cc_op]);
365 else
366 snprintf(cc_op_name, sizeof(cc_op_name), "[%d]", env->cc_op);
367 #ifdef TARGET_X86_64
368 if (env->hflags & HF_CS64_MASK) {
369 cpu_fprintf(f, "CCS=%016" PRIx64 " CCD=%016" PRIx64 " CCO=%-8s\n",
370 env->cc_src, env->cc_dst,
371 cc_op_name);
372 } else
373 #endif
374 {
375 cpu_fprintf(f, "CCS=%08x CCD=%08x CCO=%-8s\n",
376 (uint32_t)env->cc_src, (uint32_t)env->cc_dst,
377 cc_op_name);
378 }
379 }
380 cpu_fprintf(f, "EFER=%016" PRIx64 "\n", env->efer);
381 if (flags & X86_DUMP_FPU) {
382 int fptag;
383 fptag = 0;
384 for(i = 0; i < 8; i++) {
385 fptag |= ((!env->fptags[i]) << i);
386 }
387 cpu_fprintf(f, "FCW=%04x FSW=%04x [ST=%d] FTW=%02x MXCSR=%08x\n",
388 env->fpuc,
389 (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11,
390 env->fpstt,
391 fptag,
392 env->mxcsr);
393 for(i=0;i<8;i++) {
394 CPU_LDoubleU u;
395 u.d = env->fpregs[i].d;
396 cpu_fprintf(f, "FPR%d=%016" PRIx64 " %04x",
397 i, u.l.lower, u.l.upper);
398 if ((i & 1) == 1)
399 cpu_fprintf(f, "\n");
400 else
401 cpu_fprintf(f, " ");
402 }
403 if (env->hflags & HF_CS64_MASK)
404 nb = 16;
405 else
406 nb = 8;
407 for(i=0;i<nb;i++) {
408 cpu_fprintf(f, "XMM%02d=%08x%08x%08x%08x",
409 i,
410 env->xmm_regs[i].XMM_L(3),
411 env->xmm_regs[i].XMM_L(2),
412 env->xmm_regs[i].XMM_L(1),
413 env->xmm_regs[i].XMM_L(0));
414 if ((i & 1) == 1)
415 cpu_fprintf(f, "\n");
416 else
417 cpu_fprintf(f, " ");
418 }
419 }
420 if (flags & CPU_DUMP_CODE) {
421 target_ulong base = env->segs[R_CS].base + env->eip;
422 target_ulong offs = MIN(env->eip, DUMP_CODE_BYTES_BACKWARD);
423 uint8_t code;
424 char codestr[3];
425
426 cpu_fprintf(f, "Code=");
427 for (i = 0; i < DUMP_CODE_BYTES_TOTAL; i++) {
428 if (cpu_memory_rw_debug(env, base - offs + i, &code, 1, 0) == 0) {
429 snprintf(codestr, sizeof(codestr), "%02x", code);
430 } else {
431 snprintf(codestr, sizeof(codestr), "??");
432 }
433 cpu_fprintf(f, "%s%s%s%s", i > 0 ? " " : "",
434 i == offs ? "<" : "", codestr, i == offs ? ">" : "");
435 }
436 cpu_fprintf(f, "\n");
437 }
438 }
439
440 /***********************************************************/
441 /* x86 mmu */
442 /* XXX: add PGE support */
443
444 void cpu_x86_set_a20(CPUX86State *env, int a20_state)
445 {
446 a20_state = (a20_state != 0);
447 if (a20_state != ((env->a20_mask >> 20) & 1)) {
448 #if defined(DEBUG_MMU)
449 printf("A20 update: a20=%d\n", a20_state);
450 #endif
451 /* if the cpu is currently executing code, we must unlink it and
452 all the potentially executing TB */
453 cpu_interrupt(env, CPU_INTERRUPT_EXITTB);
454
455 /* when a20 is changed, all the MMU mappings are invalid, so
456 we must flush everything */
457 tlb_flush(env, 1);
458 env->a20_mask = ~(1 << 20) | (a20_state << 20);
459 }
460 }
461
462 void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0)
463 {
464 int pe_state;
465
466 #if defined(DEBUG_MMU)
467 printf("CR0 update: CR0=0x%08x\n", new_cr0);
468 #endif
469 if ((new_cr0 & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK)) !=
470 (env->cr[0] & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK))) {
471 tlb_flush(env, 1);
472 }
473
474 #ifdef TARGET_X86_64
475 if (!(env->cr[0] & CR0_PG_MASK) && (new_cr0 & CR0_PG_MASK) &&
476 (env->efer & MSR_EFER_LME)) {
477 /* enter in long mode */
478 /* XXX: generate an exception */
479 if (!(env->cr[4] & CR4_PAE_MASK))
480 return;
481 env->efer |= MSR_EFER_LMA;
482 env->hflags |= HF_LMA_MASK;
483 } else if ((env->cr[0] & CR0_PG_MASK) && !(new_cr0 & CR0_PG_MASK) &&
484 (env->efer & MSR_EFER_LMA)) {
485 /* exit long mode */
486 env->efer &= ~MSR_EFER_LMA;
487 env->hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
488 env->eip &= 0xffffffff;
489 }
490 #endif
491 env->cr[0] = new_cr0 | CR0_ET_MASK;
492
493 /* update PE flag in hidden flags */
494 pe_state = (env->cr[0] & CR0_PE_MASK);
495 env->hflags = (env->hflags & ~HF_PE_MASK) | (pe_state << HF_PE_SHIFT);
496 /* ensure that ADDSEG is always set in real mode */
497 env->hflags |= ((pe_state ^ 1) << HF_ADDSEG_SHIFT);
498 /* update FPU flags */
499 env->hflags = (env->hflags & ~(HF_MP_MASK | HF_EM_MASK | HF_TS_MASK)) |
500 ((new_cr0 << (HF_MP_SHIFT - 1)) & (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK));
501 }
502
503 /* XXX: in legacy PAE mode, generate a GPF if reserved bits are set in
504 the PDPT */
505 void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3)
506 {
507 env->cr[3] = new_cr3;
508 if (env->cr[0] & CR0_PG_MASK) {
509 #if defined(DEBUG_MMU)
510 printf("CR3 update: CR3=" TARGET_FMT_lx "\n", new_cr3);
511 #endif
512 tlb_flush(env, 0);
513 }
514 }
515
516 void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4)
517 {
518 #if defined(DEBUG_MMU)
519 printf("CR4 update: CR4=%08x\n", (uint32_t)env->cr[4]);
520 #endif
521 if ((new_cr4 & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK)) !=
522 (env->cr[4] & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK))) {
523 tlb_flush(env, 1);
524 }
525 /* SSE handling */
526 if (!(env->cpuid_features & CPUID_SSE))
527 new_cr4 &= ~CR4_OSFXSR_MASK;
528 if (new_cr4 & CR4_OSFXSR_MASK)
529 env->hflags |= HF_OSFXSR_MASK;
530 else
531 env->hflags &= ~HF_OSFXSR_MASK;
532
533 env->cr[4] = new_cr4;
534 }
535
536 #if defined(CONFIG_USER_ONLY)
537
538 int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
539 int is_write, int mmu_idx)
540 {
541 /* user mode only emulation */
542 is_write &= 1;
543 env->cr[2] = addr;
544 env->error_code = (is_write << PG_ERROR_W_BIT);
545 env->error_code |= PG_ERROR_U_MASK;
546 env->exception_index = EXCP0E_PAGE;
547 return 1;
548 }
549
550 #else
551
552 /* XXX: This value should match the one returned by CPUID
553 * and in exec.c */
554 # if defined(TARGET_X86_64)
555 # define PHYS_ADDR_MASK 0xfffffff000LL
556 # else
557 # define PHYS_ADDR_MASK 0xffffff000LL
558 # endif
559
560 /* return value:
561 -1 = cannot handle fault
562 0 = nothing more to do
563 1 = generate PF fault
564 */
565 int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
566 int is_write1, int mmu_idx)
567 {
568 uint64_t ptep, pte;
569 target_ulong pde_addr, pte_addr;
570 int error_code, is_dirty, prot, page_size, is_write, is_user;
571 target_phys_addr_t paddr;
572 uint32_t page_offset;
573 target_ulong vaddr, virt_addr;
574
575 is_user = mmu_idx == MMU_USER_IDX;
576 #if defined(DEBUG_MMU)
577 printf("MMU fault: addr=" TARGET_FMT_lx " w=%d u=%d eip=" TARGET_FMT_lx "\n",
578 addr, is_write1, is_user, env->eip);
579 #endif
580 is_write = is_write1 & 1;
581
582 if (!(env->cr[0] & CR0_PG_MASK)) {
583 pte = addr;
584 virt_addr = addr & TARGET_PAGE_MASK;
585 prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
586 page_size = 4096;
587 goto do_mapping;
588 }
589
590 if (env->cr[4] & CR4_PAE_MASK) {
591 uint64_t pde, pdpe;
592 target_ulong pdpe_addr;
593
594 #ifdef TARGET_X86_64
595 if (env->hflags & HF_LMA_MASK) {
596 uint64_t pml4e_addr, pml4e;
597 int32_t sext;
598
599 /* test virtual address sign extension */
600 sext = (int64_t)addr >> 47;
601 if (sext != 0 && sext != -1) {
602 env->error_code = 0;
603 env->exception_index = EXCP0D_GPF;
604 return 1;
605 }
606
607 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
608 env->a20_mask;
609 pml4e = ldq_phys(pml4e_addr);
610 if (!(pml4e & PG_PRESENT_MASK)) {
611 error_code = 0;
612 goto do_fault;
613 }
614 if (!(env->efer & MSR_EFER_NXE) && (pml4e & PG_NX_MASK)) {
615 error_code = PG_ERROR_RSVD_MASK;
616 goto do_fault;
617 }
618 if (!(pml4e & PG_ACCESSED_MASK)) {
619 pml4e |= PG_ACCESSED_MASK;
620 stl_phys_notdirty(pml4e_addr, pml4e);
621 }
622 ptep = pml4e ^ PG_NX_MASK;
623 pdpe_addr = ((pml4e & PHYS_ADDR_MASK) + (((addr >> 30) & 0x1ff) << 3)) &
624 env->a20_mask;
625 pdpe = ldq_phys(pdpe_addr);
626 if (!(pdpe & PG_PRESENT_MASK)) {
627 error_code = 0;
628 goto do_fault;
629 }
630 if (!(env->efer & MSR_EFER_NXE) && (pdpe & PG_NX_MASK)) {
631 error_code = PG_ERROR_RSVD_MASK;
632 goto do_fault;
633 }
634 ptep &= pdpe ^ PG_NX_MASK;
635 if (!(pdpe & PG_ACCESSED_MASK)) {
636 pdpe |= PG_ACCESSED_MASK;
637 stl_phys_notdirty(pdpe_addr, pdpe);
638 }
639 } else
640 #endif
641 {
642 /* XXX: load them when cr3 is loaded ? */
643 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
644 env->a20_mask;
645 pdpe = ldq_phys(pdpe_addr);
646 if (!(pdpe & PG_PRESENT_MASK)) {
647 error_code = 0;
648 goto do_fault;
649 }
650 ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
651 }
652
653 pde_addr = ((pdpe & PHYS_ADDR_MASK) + (((addr >> 21) & 0x1ff) << 3)) &
654 env->a20_mask;
655 pde = ldq_phys(pde_addr);
656 if (!(pde & PG_PRESENT_MASK)) {
657 error_code = 0;
658 goto do_fault;
659 }
660 if (!(env->efer & MSR_EFER_NXE) && (pde & PG_NX_MASK)) {
661 error_code = PG_ERROR_RSVD_MASK;
662 goto do_fault;
663 }
664 ptep &= pde ^ PG_NX_MASK;
665 if (pde & PG_PSE_MASK) {
666 /* 2 MB page */
667 page_size = 2048 * 1024;
668 ptep ^= PG_NX_MASK;
669 if ((ptep & PG_NX_MASK) && is_write1 == 2)
670 goto do_fault_protect;
671 if (is_user) {
672 if (!(ptep & PG_USER_MASK))
673 goto do_fault_protect;
674 if (is_write && !(ptep & PG_RW_MASK))
675 goto do_fault_protect;
676 } else {
677 if ((env->cr[0] & CR0_WP_MASK) &&
678 is_write && !(ptep & PG_RW_MASK))
679 goto do_fault_protect;
680 }
681 is_dirty = is_write && !(pde & PG_DIRTY_MASK);
682 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
683 pde |= PG_ACCESSED_MASK;
684 if (is_dirty)
685 pde |= PG_DIRTY_MASK;
686 stl_phys_notdirty(pde_addr, pde);
687 }
688 /* align to page_size */
689 pte = pde & ((PHYS_ADDR_MASK & ~(page_size - 1)) | 0xfff);
690 virt_addr = addr & ~(page_size - 1);
691 } else {
692 /* 4 KB page */
693 if (!(pde & PG_ACCESSED_MASK)) {
694 pde |= PG_ACCESSED_MASK;
695 stl_phys_notdirty(pde_addr, pde);
696 }
697 pte_addr = ((pde & PHYS_ADDR_MASK) + (((addr >> 12) & 0x1ff) << 3)) &
698 env->a20_mask;
699 pte = ldq_phys(pte_addr);
700 if (!(pte & PG_PRESENT_MASK)) {
701 error_code = 0;
702 goto do_fault;
703 }
704 if (!(env->efer & MSR_EFER_NXE) && (pte & PG_NX_MASK)) {
705 error_code = PG_ERROR_RSVD_MASK;
706 goto do_fault;
707 }
708 /* combine pde and pte nx, user and rw protections */
709 ptep &= pte ^ PG_NX_MASK;
710 ptep ^= PG_NX_MASK;
711 if ((ptep & PG_NX_MASK) && is_write1 == 2)
712 goto do_fault_protect;
713 if (is_user) {
714 if (!(ptep & PG_USER_MASK))
715 goto do_fault_protect;
716 if (is_write && !(ptep & PG_RW_MASK))
717 goto do_fault_protect;
718 } else {
719 if ((env->cr[0] & CR0_WP_MASK) &&
720 is_write && !(ptep & PG_RW_MASK))
721 goto do_fault_protect;
722 }
723 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
724 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
725 pte |= PG_ACCESSED_MASK;
726 if (is_dirty)
727 pte |= PG_DIRTY_MASK;
728 stl_phys_notdirty(pte_addr, pte);
729 }
730 page_size = 4096;
731 virt_addr = addr & ~0xfff;
732 pte = pte & (PHYS_ADDR_MASK | 0xfff);
733 }
734 } else {
735 uint32_t pde;
736
737 /* page directory entry */
738 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) &
739 env->a20_mask;
740 pde = ldl_phys(pde_addr);
741 if (!(pde & PG_PRESENT_MASK)) {
742 error_code = 0;
743 goto do_fault;
744 }
745 /* if PSE bit is set, then we use a 4MB page */
746 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
747 page_size = 4096 * 1024;
748 if (is_user) {
749 if (!(pde & PG_USER_MASK))
750 goto do_fault_protect;
751 if (is_write && !(pde & PG_RW_MASK))
752 goto do_fault_protect;
753 } else {
754 if ((env->cr[0] & CR0_WP_MASK) &&
755 is_write && !(pde & PG_RW_MASK))
756 goto do_fault_protect;
757 }
758 is_dirty = is_write && !(pde & PG_DIRTY_MASK);
759 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
760 pde |= PG_ACCESSED_MASK;
761 if (is_dirty)
762 pde |= PG_DIRTY_MASK;
763 stl_phys_notdirty(pde_addr, pde);
764 }
765
766 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
767 ptep = pte;
768 virt_addr = addr & ~(page_size - 1);
769 } else {
770 if (!(pde & PG_ACCESSED_MASK)) {
771 pde |= PG_ACCESSED_MASK;
772 stl_phys_notdirty(pde_addr, pde);
773 }
774
775 /* page directory entry */
776 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) &
777 env->a20_mask;
778 pte = ldl_phys(pte_addr);
779 if (!(pte & PG_PRESENT_MASK)) {
780 error_code = 0;
781 goto do_fault;
782 }
783 /* combine pde and pte user and rw protections */
784 ptep = pte & pde;
785 if (is_user) {
786 if (!(ptep & PG_USER_MASK))
787 goto do_fault_protect;
788 if (is_write && !(ptep & PG_RW_MASK))
789 goto do_fault_protect;
790 } else {
791 if ((env->cr[0] & CR0_WP_MASK) &&
792 is_write && !(ptep & PG_RW_MASK))
793 goto do_fault_protect;
794 }
795 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
796 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
797 pte |= PG_ACCESSED_MASK;
798 if (is_dirty)
799 pte |= PG_DIRTY_MASK;
800 stl_phys_notdirty(pte_addr, pte);
801 }
802 page_size = 4096;
803 virt_addr = addr & ~0xfff;
804 }
805 }
806 /* the page can be put in the TLB */
807 prot = PAGE_READ;
808 if (!(ptep & PG_NX_MASK))
809 prot |= PAGE_EXEC;
810 if (pte & PG_DIRTY_MASK) {
811 /* only set write access if already dirty... otherwise wait
812 for dirty access */
813 if (is_user) {
814 if (ptep & PG_RW_MASK)
815 prot |= PAGE_WRITE;
816 } else {
817 if (!(env->cr[0] & CR0_WP_MASK) ||
818 (ptep & PG_RW_MASK))
819 prot |= PAGE_WRITE;
820 }
821 }
822 do_mapping:
823 pte = pte & env->a20_mask;
824
825 /* Even if 4MB pages, we map only one 4KB page in the cache to
826 avoid filling it too fast */
827 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
828 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
829 vaddr = virt_addr + page_offset;
830
831 tlb_set_page(env, vaddr, paddr, prot, mmu_idx, page_size);
832 return 0;
833 do_fault_protect:
834 error_code = PG_ERROR_P_MASK;
835 do_fault:
836 error_code |= (is_write << PG_ERROR_W_BIT);
837 if (is_user)
838 error_code |= PG_ERROR_U_MASK;
839 if (is_write1 == 2 &&
840 (env->efer & MSR_EFER_NXE) &&
841 (env->cr[4] & CR4_PAE_MASK))
842 error_code |= PG_ERROR_I_D_MASK;
843 if (env->intercept_exceptions & (1 << EXCP0E_PAGE)) {
844 /* cr2 is not modified in case of exceptions */
845 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
846 addr);
847 } else {
848 env->cr[2] = addr;
849 }
850 env->error_code = error_code;
851 env->exception_index = EXCP0E_PAGE;
852 return 1;
853 }
854
855 target_phys_addr_t cpu_get_phys_page_debug(CPUX86State *env, target_ulong addr)
856 {
857 target_ulong pde_addr, pte_addr;
858 uint64_t pte;
859 target_phys_addr_t paddr;
860 uint32_t page_offset;
861 int page_size;
862
863 if (env->cr[4] & CR4_PAE_MASK) {
864 target_ulong pdpe_addr;
865 uint64_t pde, pdpe;
866
867 #ifdef TARGET_X86_64
868 if (env->hflags & HF_LMA_MASK) {
869 uint64_t pml4e_addr, pml4e;
870 int32_t sext;
871
872 /* test virtual address sign extension */
873 sext = (int64_t)addr >> 47;
874 if (sext != 0 && sext != -1)
875 return -1;
876
877 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
878 env->a20_mask;
879 pml4e = ldq_phys(pml4e_addr);
880 if (!(pml4e & PG_PRESENT_MASK))
881 return -1;
882
883 pdpe_addr = ((pml4e & ~0xfff & ~(PG_NX_MASK | PG_HI_USER_MASK)) +
884 (((addr >> 30) & 0x1ff) << 3)) & env->a20_mask;
885 pdpe = ldq_phys(pdpe_addr);
886 if (!(pdpe & PG_PRESENT_MASK))
887 return -1;
888 } else
889 #endif
890 {
891 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
892 env->a20_mask;
893 pdpe = ldq_phys(pdpe_addr);
894 if (!(pdpe & PG_PRESENT_MASK))
895 return -1;
896 }
897
898 pde_addr = ((pdpe & ~0xfff & ~(PG_NX_MASK | PG_HI_USER_MASK)) +
899 (((addr >> 21) & 0x1ff) << 3)) & env->a20_mask;
900 pde = ldq_phys(pde_addr);
901 if (!(pde & PG_PRESENT_MASK)) {
902 return -1;
903 }
904 if (pde & PG_PSE_MASK) {
905 /* 2 MB page */
906 page_size = 2048 * 1024;
907 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
908 } else {
909 /* 4 KB page */
910 pte_addr = ((pde & ~0xfff & ~(PG_NX_MASK | PG_HI_USER_MASK)) +
911 (((addr >> 12) & 0x1ff) << 3)) & env->a20_mask;
912 page_size = 4096;
913 pte = ldq_phys(pte_addr);
914 }
915 pte &= ~(PG_NX_MASK | PG_HI_USER_MASK);
916 if (!(pte & PG_PRESENT_MASK))
917 return -1;
918 } else {
919 uint32_t pde;
920
921 if (!(env->cr[0] & CR0_PG_MASK)) {
922 pte = addr;
923 page_size = 4096;
924 } else {
925 /* page directory entry */
926 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) & env->a20_mask;
927 pde = ldl_phys(pde_addr);
928 if (!(pde & PG_PRESENT_MASK))
929 return -1;
930 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
931 pte = pde & ~0x003ff000; /* align to 4MB */
932 page_size = 4096 * 1024;
933 } else {
934 /* page directory entry */
935 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & env->a20_mask;
936 pte = ldl_phys(pte_addr);
937 if (!(pte & PG_PRESENT_MASK))
938 return -1;
939 page_size = 4096;
940 }
941 }
942 pte = pte & env->a20_mask;
943 }
944
945 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
946 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
947 return paddr;
948 }
949
950 void hw_breakpoint_insert(CPUX86State *env, int index)
951 {
952 int type, err = 0;
953
954 switch (hw_breakpoint_type(env->dr[7], index)) {
955 case 0:
956 if (hw_breakpoint_enabled(env->dr[7], index))
957 err = cpu_breakpoint_insert(env, env->dr[index], BP_CPU,
958 &env->cpu_breakpoint[index]);
959 break;
960 case 1:
961 type = BP_CPU | BP_MEM_WRITE;
962 goto insert_wp;
963 case 2:
964 /* No support for I/O watchpoints yet */
965 break;
966 case 3:
967 type = BP_CPU | BP_MEM_ACCESS;
968 insert_wp:
969 err = cpu_watchpoint_insert(env, env->dr[index],
970 hw_breakpoint_len(env->dr[7], index),
971 type, &env->cpu_watchpoint[index]);
972 break;
973 }
974 if (err)
975 env->cpu_breakpoint[index] = NULL;
976 }
977
978 void hw_breakpoint_remove(CPUX86State *env, int index)
979 {
980 if (!env->cpu_breakpoint[index])
981 return;
982 switch (hw_breakpoint_type(env->dr[7], index)) {
983 case 0:
984 if (hw_breakpoint_enabled(env->dr[7], index))
985 cpu_breakpoint_remove_by_ref(env, env->cpu_breakpoint[index]);
986 break;
987 case 1:
988 case 3:
989 cpu_watchpoint_remove_by_ref(env, env->cpu_watchpoint[index]);
990 break;
991 case 2:
992 /* No support for I/O watchpoints yet */
993 break;
994 }
995 }
996
997 int check_hw_breakpoints(CPUX86State *env, int force_dr6_update)
998 {
999 target_ulong dr6;
1000 int reg, type;
1001 int hit_enabled = 0;
1002
1003 dr6 = env->dr[6] & ~0xf;
1004 for (reg = 0; reg < 4; reg++) {
1005 type = hw_breakpoint_type(env->dr[7], reg);
1006 if ((type == 0 && env->dr[reg] == env->eip) ||
1007 ((type & 1) && env->cpu_watchpoint[reg] &&
1008 (env->cpu_watchpoint[reg]->flags & BP_WATCHPOINT_HIT))) {
1009 dr6 |= 1 << reg;
1010 if (hw_breakpoint_enabled(env->dr[7], reg))
1011 hit_enabled = 1;
1012 }
1013 }
1014 if (hit_enabled || force_dr6_update)
1015 env->dr[6] = dr6;
1016 return hit_enabled;
1017 }
1018
1019 static CPUDebugExcpHandler *prev_debug_excp_handler;
1020
1021 static void breakpoint_handler(CPUX86State *env)
1022 {
1023 CPUBreakpoint *bp;
1024
1025 if (env->watchpoint_hit) {
1026 if (env->watchpoint_hit->flags & BP_CPU) {
1027 env->watchpoint_hit = NULL;
1028 if (check_hw_breakpoints(env, 0))
1029 raise_exception_env(EXCP01_DB, env);
1030 else
1031 cpu_resume_from_signal(env, NULL);
1032 }
1033 } else {
1034 QTAILQ_FOREACH(bp, &env->breakpoints, entry)
1035 if (bp->pc == env->eip) {
1036 if (bp->flags & BP_CPU) {
1037 check_hw_breakpoints(env, 1);
1038 raise_exception_env(EXCP01_DB, env);
1039 }
1040 break;
1041 }
1042 }
1043 if (prev_debug_excp_handler)
1044 prev_debug_excp_handler(env);
1045 }
1046
1047 typedef struct MCEInjectionParams {
1048 Monitor *mon;
1049 CPUX86State *env;
1050 int bank;
1051 uint64_t status;
1052 uint64_t mcg_status;
1053 uint64_t addr;
1054 uint64_t misc;
1055 int flags;
1056 } MCEInjectionParams;
1057
1058 static void do_inject_x86_mce(void *data)
1059 {
1060 MCEInjectionParams *params = data;
1061 CPUX86State *cenv = params->env;
1062 uint64_t *banks = cenv->mce_banks + 4 * params->bank;
1063
1064 cpu_synchronize_state(cenv);
1065
1066 /*
1067 * If there is an MCE exception being processed, ignore this SRAO MCE
1068 * unless unconditional injection was requested.
1069 */
1070 if (!(params->flags & MCE_INJECT_UNCOND_AO)
1071 && !(params->status & MCI_STATUS_AR)
1072 && (cenv->mcg_status & MCG_STATUS_MCIP)) {
1073 return;
1074 }
1075
1076 if (params->status & MCI_STATUS_UC) {
1077 /*
1078 * if MSR_MCG_CTL is not all 1s, the uncorrected error
1079 * reporting is disabled
1080 */
1081 if ((cenv->mcg_cap & MCG_CTL_P) && cenv->mcg_ctl != ~(uint64_t)0) {
1082 monitor_printf(params->mon,
1083 "CPU %d: Uncorrected error reporting disabled\n",
1084 cenv->cpu_index);
1085 return;
1086 }
1087
1088 /*
1089 * if MSR_MCi_CTL is not all 1s, the uncorrected error
1090 * reporting is disabled for the bank
1091 */
1092 if (banks[0] != ~(uint64_t)0) {
1093 monitor_printf(params->mon,
1094 "CPU %d: Uncorrected error reporting disabled for"
1095 " bank %d\n",
1096 cenv->cpu_index, params->bank);
1097 return;
1098 }
1099
1100 if ((cenv->mcg_status & MCG_STATUS_MCIP) ||
1101 !(cenv->cr[4] & CR4_MCE_MASK)) {
1102 monitor_printf(params->mon,
1103 "CPU %d: Previous MCE still in progress, raising"
1104 " triple fault\n",
1105 cenv->cpu_index);
1106 qemu_log_mask(CPU_LOG_RESET, "Triple fault\n");
1107 qemu_system_reset_request();
1108 return;
1109 }
1110 if (banks[1] & MCI_STATUS_VAL) {
1111 params->status |= MCI_STATUS_OVER;
1112 }
1113 banks[2] = params->addr;
1114 banks[3] = params->misc;
1115 cenv->mcg_status = params->mcg_status;
1116 banks[1] = params->status;
1117 cpu_interrupt(cenv, CPU_INTERRUPT_MCE);
1118 } else if (!(banks[1] & MCI_STATUS_VAL)
1119 || !(banks[1] & MCI_STATUS_UC)) {
1120 if (banks[1] & MCI_STATUS_VAL) {
1121 params->status |= MCI_STATUS_OVER;
1122 }
1123 banks[2] = params->addr;
1124 banks[3] = params->misc;
1125 banks[1] = params->status;
1126 } else {
1127 banks[1] |= MCI_STATUS_OVER;
1128 }
1129 }
1130
1131 void cpu_x86_inject_mce(Monitor *mon, CPUX86State *cenv, int bank,
1132 uint64_t status, uint64_t mcg_status, uint64_t addr,
1133 uint64_t misc, int flags)
1134 {
1135 MCEInjectionParams params = {
1136 .mon = mon,
1137 .env = cenv,
1138 .bank = bank,
1139 .status = status,
1140 .mcg_status = mcg_status,
1141 .addr = addr,
1142 .misc = misc,
1143 .flags = flags,
1144 };
1145 unsigned bank_num = cenv->mcg_cap & 0xff;
1146 CPUX86State *env;
1147
1148 if (!cenv->mcg_cap) {
1149 monitor_printf(mon, "MCE injection not supported\n");
1150 return;
1151 }
1152 if (bank >= bank_num) {
1153 monitor_printf(mon, "Invalid MCE bank number\n");
1154 return;
1155 }
1156 if (!(status & MCI_STATUS_VAL)) {
1157 monitor_printf(mon, "Invalid MCE status code\n");
1158 return;
1159 }
1160 if ((flags & MCE_INJECT_BROADCAST)
1161 && !cpu_x86_support_mca_broadcast(cenv)) {
1162 monitor_printf(mon, "Guest CPU does not support MCA broadcast\n");
1163 return;
1164 }
1165
1166 run_on_cpu(cenv, do_inject_x86_mce, &params);
1167 if (flags & MCE_INJECT_BROADCAST) {
1168 params.bank = 1;
1169 params.status = MCI_STATUS_VAL | MCI_STATUS_UC;
1170 params.mcg_status = MCG_STATUS_MCIP | MCG_STATUS_RIPV;
1171 params.addr = 0;
1172 params.misc = 0;
1173 for (env = first_cpu; env != NULL; env = env->next_cpu) {
1174 if (cenv == env) {
1175 continue;
1176 }
1177 params.env = env;
1178 run_on_cpu(cenv, do_inject_x86_mce, &params);
1179 }
1180 }
1181 }
1182
1183 void cpu_report_tpr_access(CPUX86State *env, TPRAccess access)
1184 {
1185 TranslationBlock *tb;
1186
1187 if (kvm_enabled()) {
1188 env->tpr_access_type = access;
1189
1190 cpu_interrupt(env, CPU_INTERRUPT_TPR);
1191 } else {
1192 tb = tb_find_pc(env->mem_io_pc);
1193 cpu_restore_state(tb, env, env->mem_io_pc);
1194
1195 apic_handle_tpr_access_report(env->apic_state, env->eip, access);
1196 }
1197 }
1198 #endif /* !CONFIG_USER_ONLY */
1199
1200 static void mce_init(CPUX86State *cenv)
1201 {
1202 unsigned int bank;
1203
1204 if (((cenv->cpuid_version >> 8) & 0xf) >= 6
1205 && (cenv->cpuid_features & (CPUID_MCE | CPUID_MCA)) ==
1206 (CPUID_MCE | CPUID_MCA)) {
1207 cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF;
1208 cenv->mcg_ctl = ~(uint64_t)0;
1209 for (bank = 0; bank < MCE_BANKS_DEF; bank++) {
1210 cenv->mce_banks[bank * 4] = ~(uint64_t)0;
1211 }
1212 }
1213 }
1214
1215 int cpu_x86_get_descr_debug(CPUX86State *env, unsigned int selector,
1216 target_ulong *base, unsigned int *limit,
1217 unsigned int *flags)
1218 {
1219 SegmentCache *dt;
1220 target_ulong ptr;
1221 uint32_t e1, e2;
1222 int index;
1223
1224 if (selector & 0x4)
1225 dt = &env->ldt;
1226 else
1227 dt = &env->gdt;
1228 index = selector & ~7;
1229 ptr = dt->base + index;
1230 if ((index + 7) > dt->limit
1231 || cpu_memory_rw_debug(env, ptr, (uint8_t *)&e1, sizeof(e1), 0) != 0
1232 || cpu_memory_rw_debug(env, ptr+4, (uint8_t *)&e2, sizeof(e2), 0) != 0)
1233 return 0;
1234
1235 *base = ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
1236 *limit = (e1 & 0xffff) | (e2 & 0x000f0000);
1237 if (e2 & DESC_G_MASK)
1238 *limit = (*limit << 12) | 0xfff;
1239 *flags = e2;
1240
1241 return 1;
1242 }
1243
1244 CPUX86State *cpu_x86_init(const char *cpu_model)
1245 {
1246 X86CPU *cpu;
1247 CPUX86State *env;
1248 static int inited;
1249
1250 cpu = X86_CPU(object_new(TYPE_X86_CPU));
1251 env = &cpu->env;
1252 cpu_exec_init(env);
1253 env->cpu_model_str = cpu_model;
1254
1255 /* init various static tables used in TCG mode */
1256 if (tcg_enabled() && !inited) {
1257 inited = 1;
1258 optimize_flags_init();
1259 #ifndef CONFIG_USER_ONLY
1260 prev_debug_excp_handler =
1261 cpu_set_debug_excp_handler(breakpoint_handler);
1262 #endif
1263 }
1264 if (cpu_x86_register(env, cpu_model) < 0) {
1265 object_delete(OBJECT(cpu));
1266 return NULL;
1267 }
1268 env->cpuid_apic_id = env->cpu_index;
1269 mce_init(env);
1270
1271 qemu_init_vcpu(env);
1272
1273 return env;
1274 }
1275
1276 #if !defined(CONFIG_USER_ONLY)
1277 void do_cpu_init(CPUX86State *env)
1278 {
1279 int sipi = env->interrupt_request & CPU_INTERRUPT_SIPI;
1280 uint64_t pat = env->pat;
1281
1282 cpu_state_reset(env);
1283 env->interrupt_request = sipi;
1284 env->pat = pat;
1285 apic_init_reset(env->apic_state);
1286 env->halted = !cpu_is_bsp(env);
1287 }
1288
1289 void do_cpu_sipi(CPUX86State *env)
1290 {
1291 apic_sipi(env->apic_state);
1292 }
1293 #else
1294 void do_cpu_init(CPUX86State *env)
1295 {
1296 }
1297 void do_cpu_sipi(CPUX86State *env)
1298 {
1299 }
1300 #endif