]> git.proxmox.com Git - qemu.git/blob - target-i386/helper.c
monitor: fix build breakage for !CONFIG_VNC
[qemu.git] / target-i386 / helper.c
1 /*
2 * i386 helpers (without register variable usage)
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #include <stdarg.h>
20 #include <stdlib.h>
21 #include <stdio.h>
22 #include <string.h>
23 #include <inttypes.h>
24
25 #include "cpu.h"
26 #include "qemu-common.h"
27 #include "kvm.h"
28 #ifndef CONFIG_USER_ONLY
29 #include "sysemu.h"
30 #include "monitor.h"
31 #endif
32
33 //#define DEBUG_MMU
34
35 /* NOTE: must be called outside the CPU execute loop */
36 void cpu_reset(CPUX86State *env)
37 {
38 int i;
39
40 if (qemu_loglevel_mask(CPU_LOG_RESET)) {
41 qemu_log("CPU Reset (CPU %d)\n", env->cpu_index);
42 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
43 }
44
45 memset(env, 0, offsetof(CPUX86State, breakpoints));
46
47 tlb_flush(env, 1);
48
49 env->old_exception = -1;
50
51 /* init to reset state */
52
53 #ifdef CONFIG_SOFTMMU
54 env->hflags |= HF_SOFTMMU_MASK;
55 #endif
56 env->hflags2 |= HF2_GIF_MASK;
57
58 cpu_x86_update_cr0(env, 0x60000010);
59 env->a20_mask = ~0x0;
60 env->smbase = 0x30000;
61
62 env->idt.limit = 0xffff;
63 env->gdt.limit = 0xffff;
64 env->ldt.limit = 0xffff;
65 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
66 env->tr.limit = 0xffff;
67 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
68
69 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
70 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK |
71 DESC_R_MASK | DESC_A_MASK);
72 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
73 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
74 DESC_A_MASK);
75 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
76 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
77 DESC_A_MASK);
78 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
79 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
80 DESC_A_MASK);
81 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
82 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
83 DESC_A_MASK);
84 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
85 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
86 DESC_A_MASK);
87
88 env->eip = 0xfff0;
89 env->regs[R_EDX] = env->cpuid_version;
90
91 env->eflags = 0x2;
92
93 /* FPU init */
94 for(i = 0;i < 8; i++)
95 env->fptags[i] = 1;
96 env->fpuc = 0x37f;
97
98 env->mxcsr = 0x1f80;
99
100 env->pat = 0x0007040600070406ULL;
101
102 memset(env->dr, 0, sizeof(env->dr));
103 env->dr[6] = DR6_FIXED_1;
104 env->dr[7] = DR7_FIXED_1;
105 cpu_breakpoint_remove_all(env, BP_CPU);
106 cpu_watchpoint_remove_all(env, BP_CPU);
107 }
108
109 void cpu_x86_close(CPUX86State *env)
110 {
111 qemu_free(env);
112 }
113
114 static void cpu_x86_version(CPUState *env, int *family, int *model)
115 {
116 int cpuver = env->cpuid_version;
117
118 if (family == NULL || model == NULL) {
119 return;
120 }
121
122 *family = (cpuver >> 8) & 0x0f;
123 *model = ((cpuver >> 12) & 0xf0) + ((cpuver >> 4) & 0x0f);
124 }
125
126 /* Broadcast MCA signal for processor version 06H_EH and above */
127 int cpu_x86_support_mca_broadcast(CPUState *env)
128 {
129 int family = 0;
130 int model = 0;
131
132 cpu_x86_version(env, &family, &model);
133 if ((family == 6 && model >= 14) || family > 6) {
134 return 1;
135 }
136
137 return 0;
138 }
139
140 /***********************************************************/
141 /* x86 debug */
142
143 static const char *cc_op_str[] = {
144 "DYNAMIC",
145 "EFLAGS",
146
147 "MULB",
148 "MULW",
149 "MULL",
150 "MULQ",
151
152 "ADDB",
153 "ADDW",
154 "ADDL",
155 "ADDQ",
156
157 "ADCB",
158 "ADCW",
159 "ADCL",
160 "ADCQ",
161
162 "SUBB",
163 "SUBW",
164 "SUBL",
165 "SUBQ",
166
167 "SBBB",
168 "SBBW",
169 "SBBL",
170 "SBBQ",
171
172 "LOGICB",
173 "LOGICW",
174 "LOGICL",
175 "LOGICQ",
176
177 "INCB",
178 "INCW",
179 "INCL",
180 "INCQ",
181
182 "DECB",
183 "DECW",
184 "DECL",
185 "DECQ",
186
187 "SHLB",
188 "SHLW",
189 "SHLL",
190 "SHLQ",
191
192 "SARB",
193 "SARW",
194 "SARL",
195 "SARQ",
196 };
197
198 static void
199 cpu_x86_dump_seg_cache(CPUState *env, FILE *f, fprintf_function cpu_fprintf,
200 const char *name, struct SegmentCache *sc)
201 {
202 #ifdef TARGET_X86_64
203 if (env->hflags & HF_CS64_MASK) {
204 cpu_fprintf(f, "%-3s=%04x %016" PRIx64 " %08x %08x", name,
205 sc->selector, sc->base, sc->limit, sc->flags & 0x00ffff00);
206 } else
207 #endif
208 {
209 cpu_fprintf(f, "%-3s=%04x %08x %08x %08x", name, sc->selector,
210 (uint32_t)sc->base, sc->limit, sc->flags & 0x00ffff00);
211 }
212
213 if (!(env->hflags & HF_PE_MASK) || !(sc->flags & DESC_P_MASK))
214 goto done;
215
216 cpu_fprintf(f, " DPL=%d ", (sc->flags & DESC_DPL_MASK) >> DESC_DPL_SHIFT);
217 if (sc->flags & DESC_S_MASK) {
218 if (sc->flags & DESC_CS_MASK) {
219 cpu_fprintf(f, (sc->flags & DESC_L_MASK) ? "CS64" :
220 ((sc->flags & DESC_B_MASK) ? "CS32" : "CS16"));
221 cpu_fprintf(f, " [%c%c", (sc->flags & DESC_C_MASK) ? 'C' : '-',
222 (sc->flags & DESC_R_MASK) ? 'R' : '-');
223 } else {
224 cpu_fprintf(f, (sc->flags & DESC_B_MASK) ? "DS " : "DS16");
225 cpu_fprintf(f, " [%c%c", (sc->flags & DESC_E_MASK) ? 'E' : '-',
226 (sc->flags & DESC_W_MASK) ? 'W' : '-');
227 }
228 cpu_fprintf(f, "%c]", (sc->flags & DESC_A_MASK) ? 'A' : '-');
229 } else {
230 static const char *sys_type_name[2][16] = {
231 { /* 32 bit mode */
232 "Reserved", "TSS16-avl", "LDT", "TSS16-busy",
233 "CallGate16", "TaskGate", "IntGate16", "TrapGate16",
234 "Reserved", "TSS32-avl", "Reserved", "TSS32-busy",
235 "CallGate32", "Reserved", "IntGate32", "TrapGate32"
236 },
237 { /* 64 bit mode */
238 "<hiword>", "Reserved", "LDT", "Reserved", "Reserved",
239 "Reserved", "Reserved", "Reserved", "Reserved",
240 "TSS64-avl", "Reserved", "TSS64-busy", "CallGate64",
241 "Reserved", "IntGate64", "TrapGate64"
242 }
243 };
244 cpu_fprintf(f, "%s",
245 sys_type_name[(env->hflags & HF_LMA_MASK) ? 1 : 0]
246 [(sc->flags & DESC_TYPE_MASK)
247 >> DESC_TYPE_SHIFT]);
248 }
249 done:
250 cpu_fprintf(f, "\n");
251 }
252
253 #define DUMP_CODE_BYTES_TOTAL 50
254 #define DUMP_CODE_BYTES_BACKWARD 20
255
256 void cpu_dump_state(CPUState *env, FILE *f, fprintf_function cpu_fprintf,
257 int flags)
258 {
259 int eflags, i, nb;
260 char cc_op_name[32];
261 static const char *seg_name[6] = { "ES", "CS", "SS", "DS", "FS", "GS" };
262
263 cpu_synchronize_state(env);
264
265 eflags = env->eflags;
266 #ifdef TARGET_X86_64
267 if (env->hflags & HF_CS64_MASK) {
268 cpu_fprintf(f,
269 "RAX=%016" PRIx64 " RBX=%016" PRIx64 " RCX=%016" PRIx64 " RDX=%016" PRIx64 "\n"
270 "RSI=%016" PRIx64 " RDI=%016" PRIx64 " RBP=%016" PRIx64 " RSP=%016" PRIx64 "\n"
271 "R8 =%016" PRIx64 " R9 =%016" PRIx64 " R10=%016" PRIx64 " R11=%016" PRIx64 "\n"
272 "R12=%016" PRIx64 " R13=%016" PRIx64 " R14=%016" PRIx64 " R15=%016" PRIx64 "\n"
273 "RIP=%016" PRIx64 " RFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
274 env->regs[R_EAX],
275 env->regs[R_EBX],
276 env->regs[R_ECX],
277 env->regs[R_EDX],
278 env->regs[R_ESI],
279 env->regs[R_EDI],
280 env->regs[R_EBP],
281 env->regs[R_ESP],
282 env->regs[8],
283 env->regs[9],
284 env->regs[10],
285 env->regs[11],
286 env->regs[12],
287 env->regs[13],
288 env->regs[14],
289 env->regs[15],
290 env->eip, eflags,
291 eflags & DF_MASK ? 'D' : '-',
292 eflags & CC_O ? 'O' : '-',
293 eflags & CC_S ? 'S' : '-',
294 eflags & CC_Z ? 'Z' : '-',
295 eflags & CC_A ? 'A' : '-',
296 eflags & CC_P ? 'P' : '-',
297 eflags & CC_C ? 'C' : '-',
298 env->hflags & HF_CPL_MASK,
299 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
300 (env->a20_mask >> 20) & 1,
301 (env->hflags >> HF_SMM_SHIFT) & 1,
302 env->halted);
303 } else
304 #endif
305 {
306 cpu_fprintf(f, "EAX=%08x EBX=%08x ECX=%08x EDX=%08x\n"
307 "ESI=%08x EDI=%08x EBP=%08x ESP=%08x\n"
308 "EIP=%08x EFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
309 (uint32_t)env->regs[R_EAX],
310 (uint32_t)env->regs[R_EBX],
311 (uint32_t)env->regs[R_ECX],
312 (uint32_t)env->regs[R_EDX],
313 (uint32_t)env->regs[R_ESI],
314 (uint32_t)env->regs[R_EDI],
315 (uint32_t)env->regs[R_EBP],
316 (uint32_t)env->regs[R_ESP],
317 (uint32_t)env->eip, eflags,
318 eflags & DF_MASK ? 'D' : '-',
319 eflags & CC_O ? 'O' : '-',
320 eflags & CC_S ? 'S' : '-',
321 eflags & CC_Z ? 'Z' : '-',
322 eflags & CC_A ? 'A' : '-',
323 eflags & CC_P ? 'P' : '-',
324 eflags & CC_C ? 'C' : '-',
325 env->hflags & HF_CPL_MASK,
326 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
327 (env->a20_mask >> 20) & 1,
328 (env->hflags >> HF_SMM_SHIFT) & 1,
329 env->halted);
330 }
331
332 for(i = 0; i < 6; i++) {
333 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, seg_name[i],
334 &env->segs[i]);
335 }
336 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, "LDT", &env->ldt);
337 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, "TR", &env->tr);
338
339 #ifdef TARGET_X86_64
340 if (env->hflags & HF_LMA_MASK) {
341 cpu_fprintf(f, "GDT= %016" PRIx64 " %08x\n",
342 env->gdt.base, env->gdt.limit);
343 cpu_fprintf(f, "IDT= %016" PRIx64 " %08x\n",
344 env->idt.base, env->idt.limit);
345 cpu_fprintf(f, "CR0=%08x CR2=%016" PRIx64 " CR3=%016" PRIx64 " CR4=%08x\n",
346 (uint32_t)env->cr[0],
347 env->cr[2],
348 env->cr[3],
349 (uint32_t)env->cr[4]);
350 for(i = 0; i < 4; i++)
351 cpu_fprintf(f, "DR%d=%016" PRIx64 " ", i, env->dr[i]);
352 cpu_fprintf(f, "\nDR6=%016" PRIx64 " DR7=%016" PRIx64 "\n",
353 env->dr[6], env->dr[7]);
354 } else
355 #endif
356 {
357 cpu_fprintf(f, "GDT= %08x %08x\n",
358 (uint32_t)env->gdt.base, env->gdt.limit);
359 cpu_fprintf(f, "IDT= %08x %08x\n",
360 (uint32_t)env->idt.base, env->idt.limit);
361 cpu_fprintf(f, "CR0=%08x CR2=%08x CR3=%08x CR4=%08x\n",
362 (uint32_t)env->cr[0],
363 (uint32_t)env->cr[2],
364 (uint32_t)env->cr[3],
365 (uint32_t)env->cr[4]);
366 for(i = 0; i < 4; i++) {
367 cpu_fprintf(f, "DR%d=" TARGET_FMT_lx " ", i, env->dr[i]);
368 }
369 cpu_fprintf(f, "\nDR6=" TARGET_FMT_lx " DR7=" TARGET_FMT_lx "\n",
370 env->dr[6], env->dr[7]);
371 }
372 if (flags & X86_DUMP_CCOP) {
373 if ((unsigned)env->cc_op < CC_OP_NB)
374 snprintf(cc_op_name, sizeof(cc_op_name), "%s", cc_op_str[env->cc_op]);
375 else
376 snprintf(cc_op_name, sizeof(cc_op_name), "[%d]", env->cc_op);
377 #ifdef TARGET_X86_64
378 if (env->hflags & HF_CS64_MASK) {
379 cpu_fprintf(f, "CCS=%016" PRIx64 " CCD=%016" PRIx64 " CCO=%-8s\n",
380 env->cc_src, env->cc_dst,
381 cc_op_name);
382 } else
383 #endif
384 {
385 cpu_fprintf(f, "CCS=%08x CCD=%08x CCO=%-8s\n",
386 (uint32_t)env->cc_src, (uint32_t)env->cc_dst,
387 cc_op_name);
388 }
389 }
390 cpu_fprintf(f, "EFER=%016" PRIx64 "\n", env->efer);
391 if (flags & X86_DUMP_FPU) {
392 int fptag;
393 fptag = 0;
394 for(i = 0; i < 8; i++) {
395 fptag |= ((!env->fptags[i]) << i);
396 }
397 cpu_fprintf(f, "FCW=%04x FSW=%04x [ST=%d] FTW=%02x MXCSR=%08x\n",
398 env->fpuc,
399 (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11,
400 env->fpstt,
401 fptag,
402 env->mxcsr);
403 for(i=0;i<8;i++) {
404 CPU_LDoubleU u;
405 u.d = env->fpregs[i].d;
406 cpu_fprintf(f, "FPR%d=%016" PRIx64 " %04x",
407 i, u.l.lower, u.l.upper);
408 if ((i & 1) == 1)
409 cpu_fprintf(f, "\n");
410 else
411 cpu_fprintf(f, " ");
412 }
413 if (env->hflags & HF_CS64_MASK)
414 nb = 16;
415 else
416 nb = 8;
417 for(i=0;i<nb;i++) {
418 cpu_fprintf(f, "XMM%02d=%08x%08x%08x%08x",
419 i,
420 env->xmm_regs[i].XMM_L(3),
421 env->xmm_regs[i].XMM_L(2),
422 env->xmm_regs[i].XMM_L(1),
423 env->xmm_regs[i].XMM_L(0));
424 if ((i & 1) == 1)
425 cpu_fprintf(f, "\n");
426 else
427 cpu_fprintf(f, " ");
428 }
429 }
430 if (flags & CPU_DUMP_CODE) {
431 target_ulong base = env->segs[R_CS].base + env->eip;
432 target_ulong offs = MIN(env->eip, DUMP_CODE_BYTES_BACKWARD);
433 uint8_t code;
434 char codestr[3];
435
436 cpu_fprintf(f, "Code=");
437 for (i = 0; i < DUMP_CODE_BYTES_TOTAL; i++) {
438 if (cpu_memory_rw_debug(env, base - offs + i, &code, 1, 0) == 0) {
439 snprintf(codestr, sizeof(codestr), "%02x", code);
440 } else {
441 snprintf(codestr, sizeof(codestr), "??");
442 }
443 cpu_fprintf(f, "%s%s%s%s", i > 0 ? " " : "",
444 i == offs ? "<" : "", codestr, i == offs ? ">" : "");
445 }
446 cpu_fprintf(f, "\n");
447 }
448 }
449
450 /***********************************************************/
451 /* x86 mmu */
452 /* XXX: add PGE support */
453
454 void cpu_x86_set_a20(CPUX86State *env, int a20_state)
455 {
456 a20_state = (a20_state != 0);
457 if (a20_state != ((env->a20_mask >> 20) & 1)) {
458 #if defined(DEBUG_MMU)
459 printf("A20 update: a20=%d\n", a20_state);
460 #endif
461 /* if the cpu is currently executing code, we must unlink it and
462 all the potentially executing TB */
463 cpu_interrupt(env, CPU_INTERRUPT_EXITTB);
464
465 /* when a20 is changed, all the MMU mappings are invalid, so
466 we must flush everything */
467 tlb_flush(env, 1);
468 env->a20_mask = ~(1 << 20) | (a20_state << 20);
469 }
470 }
471
472 void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0)
473 {
474 int pe_state;
475
476 #if defined(DEBUG_MMU)
477 printf("CR0 update: CR0=0x%08x\n", new_cr0);
478 #endif
479 if ((new_cr0 & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK)) !=
480 (env->cr[0] & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK))) {
481 tlb_flush(env, 1);
482 }
483
484 #ifdef TARGET_X86_64
485 if (!(env->cr[0] & CR0_PG_MASK) && (new_cr0 & CR0_PG_MASK) &&
486 (env->efer & MSR_EFER_LME)) {
487 /* enter in long mode */
488 /* XXX: generate an exception */
489 if (!(env->cr[4] & CR4_PAE_MASK))
490 return;
491 env->efer |= MSR_EFER_LMA;
492 env->hflags |= HF_LMA_MASK;
493 } else if ((env->cr[0] & CR0_PG_MASK) && !(new_cr0 & CR0_PG_MASK) &&
494 (env->efer & MSR_EFER_LMA)) {
495 /* exit long mode */
496 env->efer &= ~MSR_EFER_LMA;
497 env->hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
498 env->eip &= 0xffffffff;
499 }
500 #endif
501 env->cr[0] = new_cr0 | CR0_ET_MASK;
502
503 /* update PE flag in hidden flags */
504 pe_state = (env->cr[0] & CR0_PE_MASK);
505 env->hflags = (env->hflags & ~HF_PE_MASK) | (pe_state << HF_PE_SHIFT);
506 /* ensure that ADDSEG is always set in real mode */
507 env->hflags |= ((pe_state ^ 1) << HF_ADDSEG_SHIFT);
508 /* update FPU flags */
509 env->hflags = (env->hflags & ~(HF_MP_MASK | HF_EM_MASK | HF_TS_MASK)) |
510 ((new_cr0 << (HF_MP_SHIFT - 1)) & (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK));
511 }
512
513 /* XXX: in legacy PAE mode, generate a GPF if reserved bits are set in
514 the PDPT */
515 void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3)
516 {
517 env->cr[3] = new_cr3;
518 if (env->cr[0] & CR0_PG_MASK) {
519 #if defined(DEBUG_MMU)
520 printf("CR3 update: CR3=" TARGET_FMT_lx "\n", new_cr3);
521 #endif
522 tlb_flush(env, 0);
523 }
524 }
525
526 void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4)
527 {
528 #if defined(DEBUG_MMU)
529 printf("CR4 update: CR4=%08x\n", (uint32_t)env->cr[4]);
530 #endif
531 if ((new_cr4 & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK)) !=
532 (env->cr[4] & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK))) {
533 tlb_flush(env, 1);
534 }
535 /* SSE handling */
536 if (!(env->cpuid_features & CPUID_SSE))
537 new_cr4 &= ~CR4_OSFXSR_MASK;
538 if (new_cr4 & CR4_OSFXSR_MASK)
539 env->hflags |= HF_OSFXSR_MASK;
540 else
541 env->hflags &= ~HF_OSFXSR_MASK;
542
543 env->cr[4] = new_cr4;
544 }
545
546 #if defined(CONFIG_USER_ONLY)
547
548 int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
549 int is_write, int mmu_idx, int is_softmmu)
550 {
551 /* user mode only emulation */
552 is_write &= 1;
553 env->cr[2] = addr;
554 env->error_code = (is_write << PG_ERROR_W_BIT);
555 env->error_code |= PG_ERROR_U_MASK;
556 env->exception_index = EXCP0E_PAGE;
557 return 1;
558 }
559
560 #else
561
562 /* XXX: This value should match the one returned by CPUID
563 * and in exec.c */
564 # if defined(TARGET_X86_64)
565 # define PHYS_ADDR_MASK 0xfffffff000LL
566 # else
567 # define PHYS_ADDR_MASK 0xffffff000LL
568 # endif
569
570 /* return value:
571 -1 = cannot handle fault
572 0 = nothing more to do
573 1 = generate PF fault
574 */
575 int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
576 int is_write1, int mmu_idx, int is_softmmu)
577 {
578 uint64_t ptep, pte;
579 target_ulong pde_addr, pte_addr;
580 int error_code, is_dirty, prot, page_size, is_write, is_user;
581 target_phys_addr_t paddr;
582 uint32_t page_offset;
583 target_ulong vaddr, virt_addr;
584
585 is_user = mmu_idx == MMU_USER_IDX;
586 #if defined(DEBUG_MMU)
587 printf("MMU fault: addr=" TARGET_FMT_lx " w=%d u=%d eip=" TARGET_FMT_lx "\n",
588 addr, is_write1, is_user, env->eip);
589 #endif
590 is_write = is_write1 & 1;
591
592 if (!(env->cr[0] & CR0_PG_MASK)) {
593 pte = addr;
594 virt_addr = addr & TARGET_PAGE_MASK;
595 prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
596 page_size = 4096;
597 goto do_mapping;
598 }
599
600 if (env->cr[4] & CR4_PAE_MASK) {
601 uint64_t pde, pdpe;
602 target_ulong pdpe_addr;
603
604 #ifdef TARGET_X86_64
605 if (env->hflags & HF_LMA_MASK) {
606 uint64_t pml4e_addr, pml4e;
607 int32_t sext;
608
609 /* test virtual address sign extension */
610 sext = (int64_t)addr >> 47;
611 if (sext != 0 && sext != -1) {
612 env->error_code = 0;
613 env->exception_index = EXCP0D_GPF;
614 return 1;
615 }
616
617 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
618 env->a20_mask;
619 pml4e = ldq_phys(pml4e_addr);
620 if (!(pml4e & PG_PRESENT_MASK)) {
621 error_code = 0;
622 goto do_fault;
623 }
624 if (!(env->efer & MSR_EFER_NXE) && (pml4e & PG_NX_MASK)) {
625 error_code = PG_ERROR_RSVD_MASK;
626 goto do_fault;
627 }
628 if (!(pml4e & PG_ACCESSED_MASK)) {
629 pml4e |= PG_ACCESSED_MASK;
630 stl_phys_notdirty(pml4e_addr, pml4e);
631 }
632 ptep = pml4e ^ PG_NX_MASK;
633 pdpe_addr = ((pml4e & PHYS_ADDR_MASK) + (((addr >> 30) & 0x1ff) << 3)) &
634 env->a20_mask;
635 pdpe = ldq_phys(pdpe_addr);
636 if (!(pdpe & PG_PRESENT_MASK)) {
637 error_code = 0;
638 goto do_fault;
639 }
640 if (!(env->efer & MSR_EFER_NXE) && (pdpe & PG_NX_MASK)) {
641 error_code = PG_ERROR_RSVD_MASK;
642 goto do_fault;
643 }
644 ptep &= pdpe ^ PG_NX_MASK;
645 if (!(pdpe & PG_ACCESSED_MASK)) {
646 pdpe |= PG_ACCESSED_MASK;
647 stl_phys_notdirty(pdpe_addr, pdpe);
648 }
649 } else
650 #endif
651 {
652 /* XXX: load them when cr3 is loaded ? */
653 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
654 env->a20_mask;
655 pdpe = ldq_phys(pdpe_addr);
656 if (!(pdpe & PG_PRESENT_MASK)) {
657 error_code = 0;
658 goto do_fault;
659 }
660 ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
661 }
662
663 pde_addr = ((pdpe & PHYS_ADDR_MASK) + (((addr >> 21) & 0x1ff) << 3)) &
664 env->a20_mask;
665 pde = ldq_phys(pde_addr);
666 if (!(pde & PG_PRESENT_MASK)) {
667 error_code = 0;
668 goto do_fault;
669 }
670 if (!(env->efer & MSR_EFER_NXE) && (pde & PG_NX_MASK)) {
671 error_code = PG_ERROR_RSVD_MASK;
672 goto do_fault;
673 }
674 ptep &= pde ^ PG_NX_MASK;
675 if (pde & PG_PSE_MASK) {
676 /* 2 MB page */
677 page_size = 2048 * 1024;
678 ptep ^= PG_NX_MASK;
679 if ((ptep & PG_NX_MASK) && is_write1 == 2)
680 goto do_fault_protect;
681 if (is_user) {
682 if (!(ptep & PG_USER_MASK))
683 goto do_fault_protect;
684 if (is_write && !(ptep & PG_RW_MASK))
685 goto do_fault_protect;
686 } else {
687 if ((env->cr[0] & CR0_WP_MASK) &&
688 is_write && !(ptep & PG_RW_MASK))
689 goto do_fault_protect;
690 }
691 is_dirty = is_write && !(pde & PG_DIRTY_MASK);
692 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
693 pde |= PG_ACCESSED_MASK;
694 if (is_dirty)
695 pde |= PG_DIRTY_MASK;
696 stl_phys_notdirty(pde_addr, pde);
697 }
698 /* align to page_size */
699 pte = pde & ((PHYS_ADDR_MASK & ~(page_size - 1)) | 0xfff);
700 virt_addr = addr & ~(page_size - 1);
701 } else {
702 /* 4 KB page */
703 if (!(pde & PG_ACCESSED_MASK)) {
704 pde |= PG_ACCESSED_MASK;
705 stl_phys_notdirty(pde_addr, pde);
706 }
707 pte_addr = ((pde & PHYS_ADDR_MASK) + (((addr >> 12) & 0x1ff) << 3)) &
708 env->a20_mask;
709 pte = ldq_phys(pte_addr);
710 if (!(pte & PG_PRESENT_MASK)) {
711 error_code = 0;
712 goto do_fault;
713 }
714 if (!(env->efer & MSR_EFER_NXE) && (pte & PG_NX_MASK)) {
715 error_code = PG_ERROR_RSVD_MASK;
716 goto do_fault;
717 }
718 /* combine pde and pte nx, user and rw protections */
719 ptep &= pte ^ PG_NX_MASK;
720 ptep ^= PG_NX_MASK;
721 if ((ptep & PG_NX_MASK) && is_write1 == 2)
722 goto do_fault_protect;
723 if (is_user) {
724 if (!(ptep & PG_USER_MASK))
725 goto do_fault_protect;
726 if (is_write && !(ptep & PG_RW_MASK))
727 goto do_fault_protect;
728 } else {
729 if ((env->cr[0] & CR0_WP_MASK) &&
730 is_write && !(ptep & PG_RW_MASK))
731 goto do_fault_protect;
732 }
733 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
734 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
735 pte |= PG_ACCESSED_MASK;
736 if (is_dirty)
737 pte |= PG_DIRTY_MASK;
738 stl_phys_notdirty(pte_addr, pte);
739 }
740 page_size = 4096;
741 virt_addr = addr & ~0xfff;
742 pte = pte & (PHYS_ADDR_MASK | 0xfff);
743 }
744 } else {
745 uint32_t pde;
746
747 /* page directory entry */
748 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) &
749 env->a20_mask;
750 pde = ldl_phys(pde_addr);
751 if (!(pde & PG_PRESENT_MASK)) {
752 error_code = 0;
753 goto do_fault;
754 }
755 /* if PSE bit is set, then we use a 4MB page */
756 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
757 page_size = 4096 * 1024;
758 if (is_user) {
759 if (!(pde & PG_USER_MASK))
760 goto do_fault_protect;
761 if (is_write && !(pde & PG_RW_MASK))
762 goto do_fault_protect;
763 } else {
764 if ((env->cr[0] & CR0_WP_MASK) &&
765 is_write && !(pde & PG_RW_MASK))
766 goto do_fault_protect;
767 }
768 is_dirty = is_write && !(pde & PG_DIRTY_MASK);
769 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
770 pde |= PG_ACCESSED_MASK;
771 if (is_dirty)
772 pde |= PG_DIRTY_MASK;
773 stl_phys_notdirty(pde_addr, pde);
774 }
775
776 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
777 ptep = pte;
778 virt_addr = addr & ~(page_size - 1);
779 } else {
780 if (!(pde & PG_ACCESSED_MASK)) {
781 pde |= PG_ACCESSED_MASK;
782 stl_phys_notdirty(pde_addr, pde);
783 }
784
785 /* page directory entry */
786 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) &
787 env->a20_mask;
788 pte = ldl_phys(pte_addr);
789 if (!(pte & PG_PRESENT_MASK)) {
790 error_code = 0;
791 goto do_fault;
792 }
793 /* combine pde and pte user and rw protections */
794 ptep = pte & pde;
795 if (is_user) {
796 if (!(ptep & PG_USER_MASK))
797 goto do_fault_protect;
798 if (is_write && !(ptep & PG_RW_MASK))
799 goto do_fault_protect;
800 } else {
801 if ((env->cr[0] & CR0_WP_MASK) &&
802 is_write && !(ptep & PG_RW_MASK))
803 goto do_fault_protect;
804 }
805 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
806 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
807 pte |= PG_ACCESSED_MASK;
808 if (is_dirty)
809 pte |= PG_DIRTY_MASK;
810 stl_phys_notdirty(pte_addr, pte);
811 }
812 page_size = 4096;
813 virt_addr = addr & ~0xfff;
814 }
815 }
816 /* the page can be put in the TLB */
817 prot = PAGE_READ;
818 if (!(ptep & PG_NX_MASK))
819 prot |= PAGE_EXEC;
820 if (pte & PG_DIRTY_MASK) {
821 /* only set write access if already dirty... otherwise wait
822 for dirty access */
823 if (is_user) {
824 if (ptep & PG_RW_MASK)
825 prot |= PAGE_WRITE;
826 } else {
827 if (!(env->cr[0] & CR0_WP_MASK) ||
828 (ptep & PG_RW_MASK))
829 prot |= PAGE_WRITE;
830 }
831 }
832 do_mapping:
833 pte = pte & env->a20_mask;
834
835 /* Even if 4MB pages, we map only one 4KB page in the cache to
836 avoid filling it too fast */
837 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
838 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
839 vaddr = virt_addr + page_offset;
840
841 tlb_set_page(env, vaddr, paddr, prot, mmu_idx, page_size);
842 return 0;
843 do_fault_protect:
844 error_code = PG_ERROR_P_MASK;
845 do_fault:
846 error_code |= (is_write << PG_ERROR_W_BIT);
847 if (is_user)
848 error_code |= PG_ERROR_U_MASK;
849 if (is_write1 == 2 &&
850 (env->efer & MSR_EFER_NXE) &&
851 (env->cr[4] & CR4_PAE_MASK))
852 error_code |= PG_ERROR_I_D_MASK;
853 if (env->intercept_exceptions & (1 << EXCP0E_PAGE)) {
854 /* cr2 is not modified in case of exceptions */
855 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
856 addr);
857 } else {
858 env->cr[2] = addr;
859 }
860 env->error_code = error_code;
861 env->exception_index = EXCP0E_PAGE;
862 return 1;
863 }
864
865 target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
866 {
867 target_ulong pde_addr, pte_addr;
868 uint64_t pte;
869 target_phys_addr_t paddr;
870 uint32_t page_offset;
871 int page_size;
872
873 if (env->cr[4] & CR4_PAE_MASK) {
874 target_ulong pdpe_addr;
875 uint64_t pde, pdpe;
876
877 #ifdef TARGET_X86_64
878 if (env->hflags & HF_LMA_MASK) {
879 uint64_t pml4e_addr, pml4e;
880 int32_t sext;
881
882 /* test virtual address sign extension */
883 sext = (int64_t)addr >> 47;
884 if (sext != 0 && sext != -1)
885 return -1;
886
887 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
888 env->a20_mask;
889 pml4e = ldq_phys(pml4e_addr);
890 if (!(pml4e & PG_PRESENT_MASK))
891 return -1;
892
893 pdpe_addr = ((pml4e & ~0xfff) + (((addr >> 30) & 0x1ff) << 3)) &
894 env->a20_mask;
895 pdpe = ldq_phys(pdpe_addr);
896 if (!(pdpe & PG_PRESENT_MASK))
897 return -1;
898 } else
899 #endif
900 {
901 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
902 env->a20_mask;
903 pdpe = ldq_phys(pdpe_addr);
904 if (!(pdpe & PG_PRESENT_MASK))
905 return -1;
906 }
907
908 pde_addr = ((pdpe & ~0xfff) + (((addr >> 21) & 0x1ff) << 3)) &
909 env->a20_mask;
910 pde = ldq_phys(pde_addr);
911 if (!(pde & PG_PRESENT_MASK)) {
912 return -1;
913 }
914 if (pde & PG_PSE_MASK) {
915 /* 2 MB page */
916 page_size = 2048 * 1024;
917 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
918 } else {
919 /* 4 KB page */
920 pte_addr = ((pde & ~0xfff) + (((addr >> 12) & 0x1ff) << 3)) &
921 env->a20_mask;
922 page_size = 4096;
923 pte = ldq_phys(pte_addr);
924 }
925 if (!(pte & PG_PRESENT_MASK))
926 return -1;
927 } else {
928 uint32_t pde;
929
930 if (!(env->cr[0] & CR0_PG_MASK)) {
931 pte = addr;
932 page_size = 4096;
933 } else {
934 /* page directory entry */
935 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) & env->a20_mask;
936 pde = ldl_phys(pde_addr);
937 if (!(pde & PG_PRESENT_MASK))
938 return -1;
939 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
940 pte = pde & ~0x003ff000; /* align to 4MB */
941 page_size = 4096 * 1024;
942 } else {
943 /* page directory entry */
944 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & env->a20_mask;
945 pte = ldl_phys(pte_addr);
946 if (!(pte & PG_PRESENT_MASK))
947 return -1;
948 page_size = 4096;
949 }
950 }
951 pte = pte & env->a20_mask;
952 }
953
954 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
955 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
956 return paddr;
957 }
958
959 void hw_breakpoint_insert(CPUState *env, int index)
960 {
961 int type, err = 0;
962
963 switch (hw_breakpoint_type(env->dr[7], index)) {
964 case 0:
965 if (hw_breakpoint_enabled(env->dr[7], index))
966 err = cpu_breakpoint_insert(env, env->dr[index], BP_CPU,
967 &env->cpu_breakpoint[index]);
968 break;
969 case 1:
970 type = BP_CPU | BP_MEM_WRITE;
971 goto insert_wp;
972 case 2:
973 /* No support for I/O watchpoints yet */
974 break;
975 case 3:
976 type = BP_CPU | BP_MEM_ACCESS;
977 insert_wp:
978 err = cpu_watchpoint_insert(env, env->dr[index],
979 hw_breakpoint_len(env->dr[7], index),
980 type, &env->cpu_watchpoint[index]);
981 break;
982 }
983 if (err)
984 env->cpu_breakpoint[index] = NULL;
985 }
986
987 void hw_breakpoint_remove(CPUState *env, int index)
988 {
989 if (!env->cpu_breakpoint[index])
990 return;
991 switch (hw_breakpoint_type(env->dr[7], index)) {
992 case 0:
993 if (hw_breakpoint_enabled(env->dr[7], index))
994 cpu_breakpoint_remove_by_ref(env, env->cpu_breakpoint[index]);
995 break;
996 case 1:
997 case 3:
998 cpu_watchpoint_remove_by_ref(env, env->cpu_watchpoint[index]);
999 break;
1000 case 2:
1001 /* No support for I/O watchpoints yet */
1002 break;
1003 }
1004 }
1005
1006 int check_hw_breakpoints(CPUState *env, int force_dr6_update)
1007 {
1008 target_ulong dr6;
1009 int reg, type;
1010 int hit_enabled = 0;
1011
1012 dr6 = env->dr[6] & ~0xf;
1013 for (reg = 0; reg < 4; reg++) {
1014 type = hw_breakpoint_type(env->dr[7], reg);
1015 if ((type == 0 && env->dr[reg] == env->eip) ||
1016 ((type & 1) && env->cpu_watchpoint[reg] &&
1017 (env->cpu_watchpoint[reg]->flags & BP_WATCHPOINT_HIT))) {
1018 dr6 |= 1 << reg;
1019 if (hw_breakpoint_enabled(env->dr[7], reg))
1020 hit_enabled = 1;
1021 }
1022 }
1023 if (hit_enabled || force_dr6_update)
1024 env->dr[6] = dr6;
1025 return hit_enabled;
1026 }
1027
1028 static CPUDebugExcpHandler *prev_debug_excp_handler;
1029
1030 void raise_exception_env(int exception_index, CPUState *env);
1031
1032 static void breakpoint_handler(CPUState *env)
1033 {
1034 CPUBreakpoint *bp;
1035
1036 if (env->watchpoint_hit) {
1037 if (env->watchpoint_hit->flags & BP_CPU) {
1038 env->watchpoint_hit = NULL;
1039 if (check_hw_breakpoints(env, 0))
1040 raise_exception_env(EXCP01_DB, env);
1041 else
1042 cpu_resume_from_signal(env, NULL);
1043 }
1044 } else {
1045 QTAILQ_FOREACH(bp, &env->breakpoints, entry)
1046 if (bp->pc == env->eip) {
1047 if (bp->flags & BP_CPU) {
1048 check_hw_breakpoints(env, 1);
1049 raise_exception_env(EXCP01_DB, env);
1050 }
1051 break;
1052 }
1053 }
1054 if (prev_debug_excp_handler)
1055 prev_debug_excp_handler(env);
1056 }
1057
1058 typedef struct MCEInjectionParams {
1059 Monitor *mon;
1060 CPUState *env;
1061 int bank;
1062 uint64_t status;
1063 uint64_t mcg_status;
1064 uint64_t addr;
1065 uint64_t misc;
1066 int flags;
1067 } MCEInjectionParams;
1068
1069 static void do_inject_x86_mce(void *data)
1070 {
1071 MCEInjectionParams *params = data;
1072 CPUState *cenv = params->env;
1073 uint64_t *banks = cenv->mce_banks + 4 * params->bank;
1074
1075 cpu_synchronize_state(cenv);
1076
1077 /*
1078 * If there is an MCE exception being processed, ignore this SRAO MCE
1079 * unless unconditional injection was requested.
1080 */
1081 if (!(params->flags & MCE_INJECT_UNCOND_AO)
1082 && !(params->status & MCI_STATUS_AR)
1083 && (cenv->mcg_status & MCG_STATUS_MCIP)) {
1084 return;
1085 }
1086
1087 if (params->status & MCI_STATUS_UC) {
1088 /*
1089 * if MSR_MCG_CTL is not all 1s, the uncorrected error
1090 * reporting is disabled
1091 */
1092 if ((cenv->mcg_cap & MCG_CTL_P) && cenv->mcg_ctl != ~(uint64_t)0) {
1093 monitor_printf(params->mon,
1094 "CPU %d: Uncorrected error reporting disabled\n",
1095 cenv->cpu_index);
1096 return;
1097 }
1098
1099 /*
1100 * if MSR_MCi_CTL is not all 1s, the uncorrected error
1101 * reporting is disabled for the bank
1102 */
1103 if (banks[0] != ~(uint64_t)0) {
1104 monitor_printf(params->mon,
1105 "CPU %d: Uncorrected error reporting disabled for"
1106 " bank %d\n",
1107 cenv->cpu_index, params->bank);
1108 return;
1109 }
1110
1111 if ((cenv->mcg_status & MCG_STATUS_MCIP) ||
1112 !(cenv->cr[4] & CR4_MCE_MASK)) {
1113 monitor_printf(params->mon,
1114 "CPU %d: Previous MCE still in progress, raising"
1115 " triple fault\n",
1116 cenv->cpu_index);
1117 qemu_log_mask(CPU_LOG_RESET, "Triple fault\n");
1118 qemu_system_reset_request();
1119 return;
1120 }
1121 if (banks[1] & MCI_STATUS_VAL) {
1122 params->status |= MCI_STATUS_OVER;
1123 }
1124 banks[2] = params->addr;
1125 banks[3] = params->misc;
1126 cenv->mcg_status = params->mcg_status;
1127 banks[1] = params->status;
1128 cpu_interrupt(cenv, CPU_INTERRUPT_MCE);
1129 } else if (!(banks[1] & MCI_STATUS_VAL)
1130 || !(banks[1] & MCI_STATUS_UC)) {
1131 if (banks[1] & MCI_STATUS_VAL) {
1132 params->status |= MCI_STATUS_OVER;
1133 }
1134 banks[2] = params->addr;
1135 banks[3] = params->misc;
1136 banks[1] = params->status;
1137 } else {
1138 banks[1] |= MCI_STATUS_OVER;
1139 }
1140 }
1141
1142 void cpu_x86_inject_mce(Monitor *mon, CPUState *cenv, int bank,
1143 uint64_t status, uint64_t mcg_status, uint64_t addr,
1144 uint64_t misc, int flags)
1145 {
1146 MCEInjectionParams params = {
1147 .mon = mon,
1148 .env = cenv,
1149 .bank = bank,
1150 .status = status,
1151 .mcg_status = mcg_status,
1152 .addr = addr,
1153 .misc = misc,
1154 .flags = flags,
1155 };
1156 unsigned bank_num = cenv->mcg_cap & 0xff;
1157 CPUState *env;
1158
1159 if (!cenv->mcg_cap) {
1160 monitor_printf(mon, "MCE injection not supported\n");
1161 return;
1162 }
1163 if (bank >= bank_num) {
1164 monitor_printf(mon, "Invalid MCE bank number\n");
1165 return;
1166 }
1167 if (!(status & MCI_STATUS_VAL)) {
1168 monitor_printf(mon, "Invalid MCE status code\n");
1169 return;
1170 }
1171 if ((flags & MCE_INJECT_BROADCAST)
1172 && !cpu_x86_support_mca_broadcast(cenv)) {
1173 monitor_printf(mon, "Guest CPU does not support MCA broadcast\n");
1174 return;
1175 }
1176
1177 run_on_cpu(cenv, do_inject_x86_mce, &params);
1178 if (flags & MCE_INJECT_BROADCAST) {
1179 params.bank = 1;
1180 params.status = MCI_STATUS_VAL | MCI_STATUS_UC;
1181 params.mcg_status = MCG_STATUS_MCIP | MCG_STATUS_RIPV;
1182 params.addr = 0;
1183 params.misc = 0;
1184 for (env = first_cpu; env != NULL; env = env->next_cpu) {
1185 if (cenv == env) {
1186 continue;
1187 }
1188 params.env = env;
1189 run_on_cpu(cenv, do_inject_x86_mce, &params);
1190 }
1191 }
1192 }
1193 #endif /* !CONFIG_USER_ONLY */
1194
1195 static void mce_init(CPUX86State *cenv)
1196 {
1197 unsigned int bank;
1198
1199 if (((cenv->cpuid_version >> 8) & 0xf) >= 6
1200 && (cenv->cpuid_features & (CPUID_MCE | CPUID_MCA)) ==
1201 (CPUID_MCE | CPUID_MCA)) {
1202 cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF;
1203 cenv->mcg_ctl = ~(uint64_t)0;
1204 for (bank = 0; bank < MCE_BANKS_DEF; bank++) {
1205 cenv->mce_banks[bank * 4] = ~(uint64_t)0;
1206 }
1207 }
1208 }
1209
1210 int cpu_x86_get_descr_debug(CPUX86State *env, unsigned int selector,
1211 target_ulong *base, unsigned int *limit,
1212 unsigned int *flags)
1213 {
1214 SegmentCache *dt;
1215 target_ulong ptr;
1216 uint32_t e1, e2;
1217 int index;
1218
1219 if (selector & 0x4)
1220 dt = &env->ldt;
1221 else
1222 dt = &env->gdt;
1223 index = selector & ~7;
1224 ptr = dt->base + index;
1225 if ((index + 7) > dt->limit
1226 || cpu_memory_rw_debug(env, ptr, (uint8_t *)&e1, sizeof(e1), 0) != 0
1227 || cpu_memory_rw_debug(env, ptr+4, (uint8_t *)&e2, sizeof(e2), 0) != 0)
1228 return 0;
1229
1230 *base = ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
1231 *limit = (e1 & 0xffff) | (e2 & 0x000f0000);
1232 if (e2 & DESC_G_MASK)
1233 *limit = (*limit << 12) | 0xfff;
1234 *flags = e2;
1235
1236 return 1;
1237 }
1238
1239 CPUX86State *cpu_x86_init(const char *cpu_model)
1240 {
1241 CPUX86State *env;
1242 static int inited;
1243
1244 env = qemu_mallocz(sizeof(CPUX86State));
1245 cpu_exec_init(env);
1246 env->cpu_model_str = cpu_model;
1247
1248 /* init various static tables */
1249 if (!inited) {
1250 inited = 1;
1251 optimize_flags_init();
1252 #ifndef CONFIG_USER_ONLY
1253 prev_debug_excp_handler =
1254 cpu_set_debug_excp_handler(breakpoint_handler);
1255 #endif
1256 }
1257 if (cpu_x86_register(env, cpu_model) < 0) {
1258 cpu_x86_close(env);
1259 return NULL;
1260 }
1261 mce_init(env);
1262
1263 qemu_init_vcpu(env);
1264
1265 return env;
1266 }
1267
1268 #if !defined(CONFIG_USER_ONLY)
1269 void do_cpu_init(CPUState *env)
1270 {
1271 int sipi = env->interrupt_request & CPU_INTERRUPT_SIPI;
1272 uint64_t pat = env->pat;
1273
1274 cpu_reset(env);
1275 env->interrupt_request = sipi;
1276 env->pat = pat;
1277 apic_init_reset(env->apic_state);
1278 env->halted = !cpu_is_bsp(env);
1279 }
1280
1281 void do_cpu_sipi(CPUState *env)
1282 {
1283 apic_sipi(env->apic_state);
1284 }
1285 #else
1286 void do_cpu_init(CPUState *env)
1287 {
1288 }
1289 void do_cpu_sipi(CPUState *env)
1290 {
1291 }
1292 #endif