]> git.proxmox.com Git - qemu.git/blob - target-i386/helper.c
x86: Perform implicit mcg_status reset
[qemu.git] / target-i386 / helper.c
1 /*
2 * i386 helpers (without register variable usage)
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #include <stdarg.h>
20 #include <stdlib.h>
21 #include <stdio.h>
22 #include <string.h>
23 #include <inttypes.h>
24 #include <signal.h>
25
26 #include "cpu.h"
27 #include "exec-all.h"
28 #include "qemu-common.h"
29 #include "kvm.h"
30 #include "kvm_x86.h"
31
32 //#define DEBUG_MMU
33
34 /* NOTE: must be called outside the CPU execute loop */
35 void cpu_reset(CPUX86State *env)
36 {
37 int i;
38
39 if (qemu_loglevel_mask(CPU_LOG_RESET)) {
40 qemu_log("CPU Reset (CPU %d)\n", env->cpu_index);
41 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
42 }
43
44 memset(env, 0, offsetof(CPUX86State, breakpoints));
45
46 tlb_flush(env, 1);
47
48 env->old_exception = -1;
49
50 /* init to reset state */
51
52 #ifdef CONFIG_SOFTMMU
53 env->hflags |= HF_SOFTMMU_MASK;
54 #endif
55 env->hflags2 |= HF2_GIF_MASK;
56
57 cpu_x86_update_cr0(env, 0x60000010);
58 env->a20_mask = ~0x0;
59 env->smbase = 0x30000;
60
61 env->idt.limit = 0xffff;
62 env->gdt.limit = 0xffff;
63 env->ldt.limit = 0xffff;
64 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
65 env->tr.limit = 0xffff;
66 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
67
68 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
69 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK |
70 DESC_R_MASK | DESC_A_MASK);
71 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
72 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
73 DESC_A_MASK);
74 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
75 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
76 DESC_A_MASK);
77 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
78 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
79 DESC_A_MASK);
80 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
81 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
82 DESC_A_MASK);
83 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
84 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
85 DESC_A_MASK);
86
87 env->eip = 0xfff0;
88 env->regs[R_EDX] = env->cpuid_version;
89
90 env->eflags = 0x2;
91
92 /* FPU init */
93 for(i = 0;i < 8; i++)
94 env->fptags[i] = 1;
95 env->fpuc = 0x37f;
96
97 env->mxcsr = 0x1f80;
98
99 memset(env->dr, 0, sizeof(env->dr));
100 env->dr[6] = DR6_FIXED_1;
101 env->dr[7] = DR7_FIXED_1;
102 cpu_breakpoint_remove_all(env, BP_CPU);
103 cpu_watchpoint_remove_all(env, BP_CPU);
104 }
105
106 void cpu_x86_close(CPUX86State *env)
107 {
108 qemu_free(env);
109 }
110
111 static void cpu_x86_version(CPUState *env, int *family, int *model)
112 {
113 int cpuver = env->cpuid_version;
114
115 if (family == NULL || model == NULL) {
116 return;
117 }
118
119 *family = (cpuver >> 8) & 0x0f;
120 *model = ((cpuver >> 12) & 0xf0) + ((cpuver >> 4) & 0x0f);
121 }
122
123 /* Broadcast MCA signal for processor version 06H_EH and above */
124 int cpu_x86_support_mca_broadcast(CPUState *env)
125 {
126 int family = 0;
127 int model = 0;
128
129 cpu_x86_version(env, &family, &model);
130 if ((family == 6 && model >= 14) || family > 6) {
131 return 1;
132 }
133
134 return 0;
135 }
136
137 /***********************************************************/
138 /* x86 debug */
139
140 static const char *cc_op_str[] = {
141 "DYNAMIC",
142 "EFLAGS",
143
144 "MULB",
145 "MULW",
146 "MULL",
147 "MULQ",
148
149 "ADDB",
150 "ADDW",
151 "ADDL",
152 "ADDQ",
153
154 "ADCB",
155 "ADCW",
156 "ADCL",
157 "ADCQ",
158
159 "SUBB",
160 "SUBW",
161 "SUBL",
162 "SUBQ",
163
164 "SBBB",
165 "SBBW",
166 "SBBL",
167 "SBBQ",
168
169 "LOGICB",
170 "LOGICW",
171 "LOGICL",
172 "LOGICQ",
173
174 "INCB",
175 "INCW",
176 "INCL",
177 "INCQ",
178
179 "DECB",
180 "DECW",
181 "DECL",
182 "DECQ",
183
184 "SHLB",
185 "SHLW",
186 "SHLL",
187 "SHLQ",
188
189 "SARB",
190 "SARW",
191 "SARL",
192 "SARQ",
193 };
194
195 static void
196 cpu_x86_dump_seg_cache(CPUState *env, FILE *f, fprintf_function cpu_fprintf,
197 const char *name, struct SegmentCache *sc)
198 {
199 #ifdef TARGET_X86_64
200 if (env->hflags & HF_CS64_MASK) {
201 cpu_fprintf(f, "%-3s=%04x %016" PRIx64 " %08x %08x", name,
202 sc->selector, sc->base, sc->limit, sc->flags & 0x00ffff00);
203 } else
204 #endif
205 {
206 cpu_fprintf(f, "%-3s=%04x %08x %08x %08x", name, sc->selector,
207 (uint32_t)sc->base, sc->limit, sc->flags & 0x00ffff00);
208 }
209
210 if (!(env->hflags & HF_PE_MASK) || !(sc->flags & DESC_P_MASK))
211 goto done;
212
213 cpu_fprintf(f, " DPL=%d ", (sc->flags & DESC_DPL_MASK) >> DESC_DPL_SHIFT);
214 if (sc->flags & DESC_S_MASK) {
215 if (sc->flags & DESC_CS_MASK) {
216 cpu_fprintf(f, (sc->flags & DESC_L_MASK) ? "CS64" :
217 ((sc->flags & DESC_B_MASK) ? "CS32" : "CS16"));
218 cpu_fprintf(f, " [%c%c", (sc->flags & DESC_C_MASK) ? 'C' : '-',
219 (sc->flags & DESC_R_MASK) ? 'R' : '-');
220 } else {
221 cpu_fprintf(f, (sc->flags & DESC_B_MASK) ? "DS " : "DS16");
222 cpu_fprintf(f, " [%c%c", (sc->flags & DESC_E_MASK) ? 'E' : '-',
223 (sc->flags & DESC_W_MASK) ? 'W' : '-');
224 }
225 cpu_fprintf(f, "%c]", (sc->flags & DESC_A_MASK) ? 'A' : '-');
226 } else {
227 static const char *sys_type_name[2][16] = {
228 { /* 32 bit mode */
229 "Reserved", "TSS16-avl", "LDT", "TSS16-busy",
230 "CallGate16", "TaskGate", "IntGate16", "TrapGate16",
231 "Reserved", "TSS32-avl", "Reserved", "TSS32-busy",
232 "CallGate32", "Reserved", "IntGate32", "TrapGate32"
233 },
234 { /* 64 bit mode */
235 "<hiword>", "Reserved", "LDT", "Reserved", "Reserved",
236 "Reserved", "Reserved", "Reserved", "Reserved",
237 "TSS64-avl", "Reserved", "TSS64-busy", "CallGate64",
238 "Reserved", "IntGate64", "TrapGate64"
239 }
240 };
241 cpu_fprintf(f, "%s",
242 sys_type_name[(env->hflags & HF_LMA_MASK) ? 1 : 0]
243 [(sc->flags & DESC_TYPE_MASK)
244 >> DESC_TYPE_SHIFT]);
245 }
246 done:
247 cpu_fprintf(f, "\n");
248 }
249
250 #define DUMP_CODE_BYTES_TOTAL 50
251 #define DUMP_CODE_BYTES_BACKWARD 20
252
253 void cpu_dump_state(CPUState *env, FILE *f, fprintf_function cpu_fprintf,
254 int flags)
255 {
256 int eflags, i, nb;
257 char cc_op_name[32];
258 static const char *seg_name[6] = { "ES", "CS", "SS", "DS", "FS", "GS" };
259
260 cpu_synchronize_state(env);
261
262 eflags = env->eflags;
263 #ifdef TARGET_X86_64
264 if (env->hflags & HF_CS64_MASK) {
265 cpu_fprintf(f,
266 "RAX=%016" PRIx64 " RBX=%016" PRIx64 " RCX=%016" PRIx64 " RDX=%016" PRIx64 "\n"
267 "RSI=%016" PRIx64 " RDI=%016" PRIx64 " RBP=%016" PRIx64 " RSP=%016" PRIx64 "\n"
268 "R8 =%016" PRIx64 " R9 =%016" PRIx64 " R10=%016" PRIx64 " R11=%016" PRIx64 "\n"
269 "R12=%016" PRIx64 " R13=%016" PRIx64 " R14=%016" PRIx64 " R15=%016" PRIx64 "\n"
270 "RIP=%016" PRIx64 " RFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
271 env->regs[R_EAX],
272 env->regs[R_EBX],
273 env->regs[R_ECX],
274 env->regs[R_EDX],
275 env->regs[R_ESI],
276 env->regs[R_EDI],
277 env->regs[R_EBP],
278 env->regs[R_ESP],
279 env->regs[8],
280 env->regs[9],
281 env->regs[10],
282 env->regs[11],
283 env->regs[12],
284 env->regs[13],
285 env->regs[14],
286 env->regs[15],
287 env->eip, eflags,
288 eflags & DF_MASK ? 'D' : '-',
289 eflags & CC_O ? 'O' : '-',
290 eflags & CC_S ? 'S' : '-',
291 eflags & CC_Z ? 'Z' : '-',
292 eflags & CC_A ? 'A' : '-',
293 eflags & CC_P ? 'P' : '-',
294 eflags & CC_C ? 'C' : '-',
295 env->hflags & HF_CPL_MASK,
296 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
297 (env->a20_mask >> 20) & 1,
298 (env->hflags >> HF_SMM_SHIFT) & 1,
299 env->halted);
300 } else
301 #endif
302 {
303 cpu_fprintf(f, "EAX=%08x EBX=%08x ECX=%08x EDX=%08x\n"
304 "ESI=%08x EDI=%08x EBP=%08x ESP=%08x\n"
305 "EIP=%08x EFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
306 (uint32_t)env->regs[R_EAX],
307 (uint32_t)env->regs[R_EBX],
308 (uint32_t)env->regs[R_ECX],
309 (uint32_t)env->regs[R_EDX],
310 (uint32_t)env->regs[R_ESI],
311 (uint32_t)env->regs[R_EDI],
312 (uint32_t)env->regs[R_EBP],
313 (uint32_t)env->regs[R_ESP],
314 (uint32_t)env->eip, eflags,
315 eflags & DF_MASK ? 'D' : '-',
316 eflags & CC_O ? 'O' : '-',
317 eflags & CC_S ? 'S' : '-',
318 eflags & CC_Z ? 'Z' : '-',
319 eflags & CC_A ? 'A' : '-',
320 eflags & CC_P ? 'P' : '-',
321 eflags & CC_C ? 'C' : '-',
322 env->hflags & HF_CPL_MASK,
323 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
324 (env->a20_mask >> 20) & 1,
325 (env->hflags >> HF_SMM_SHIFT) & 1,
326 env->halted);
327 }
328
329 for(i = 0; i < 6; i++) {
330 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, seg_name[i],
331 &env->segs[i]);
332 }
333 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, "LDT", &env->ldt);
334 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, "TR", &env->tr);
335
336 #ifdef TARGET_X86_64
337 if (env->hflags & HF_LMA_MASK) {
338 cpu_fprintf(f, "GDT= %016" PRIx64 " %08x\n",
339 env->gdt.base, env->gdt.limit);
340 cpu_fprintf(f, "IDT= %016" PRIx64 " %08x\n",
341 env->idt.base, env->idt.limit);
342 cpu_fprintf(f, "CR0=%08x CR2=%016" PRIx64 " CR3=%016" PRIx64 " CR4=%08x\n",
343 (uint32_t)env->cr[0],
344 env->cr[2],
345 env->cr[3],
346 (uint32_t)env->cr[4]);
347 for(i = 0; i < 4; i++)
348 cpu_fprintf(f, "DR%d=%016" PRIx64 " ", i, env->dr[i]);
349 cpu_fprintf(f, "\nDR6=%016" PRIx64 " DR7=%016" PRIx64 "\n",
350 env->dr[6], env->dr[7]);
351 } else
352 #endif
353 {
354 cpu_fprintf(f, "GDT= %08x %08x\n",
355 (uint32_t)env->gdt.base, env->gdt.limit);
356 cpu_fprintf(f, "IDT= %08x %08x\n",
357 (uint32_t)env->idt.base, env->idt.limit);
358 cpu_fprintf(f, "CR0=%08x CR2=%08x CR3=%08x CR4=%08x\n",
359 (uint32_t)env->cr[0],
360 (uint32_t)env->cr[2],
361 (uint32_t)env->cr[3],
362 (uint32_t)env->cr[4]);
363 for(i = 0; i < 4; i++) {
364 cpu_fprintf(f, "DR%d=" TARGET_FMT_lx " ", i, env->dr[i]);
365 }
366 cpu_fprintf(f, "\nDR6=" TARGET_FMT_lx " DR7=" TARGET_FMT_lx "\n",
367 env->dr[6], env->dr[7]);
368 }
369 if (flags & X86_DUMP_CCOP) {
370 if ((unsigned)env->cc_op < CC_OP_NB)
371 snprintf(cc_op_name, sizeof(cc_op_name), "%s", cc_op_str[env->cc_op]);
372 else
373 snprintf(cc_op_name, sizeof(cc_op_name), "[%d]", env->cc_op);
374 #ifdef TARGET_X86_64
375 if (env->hflags & HF_CS64_MASK) {
376 cpu_fprintf(f, "CCS=%016" PRIx64 " CCD=%016" PRIx64 " CCO=%-8s\n",
377 env->cc_src, env->cc_dst,
378 cc_op_name);
379 } else
380 #endif
381 {
382 cpu_fprintf(f, "CCS=%08x CCD=%08x CCO=%-8s\n",
383 (uint32_t)env->cc_src, (uint32_t)env->cc_dst,
384 cc_op_name);
385 }
386 }
387 cpu_fprintf(f, "EFER=%016" PRIx64 "\n", env->efer);
388 if (flags & X86_DUMP_FPU) {
389 int fptag;
390 fptag = 0;
391 for(i = 0; i < 8; i++) {
392 fptag |= ((!env->fptags[i]) << i);
393 }
394 cpu_fprintf(f, "FCW=%04x FSW=%04x [ST=%d] FTW=%02x MXCSR=%08x\n",
395 env->fpuc,
396 (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11,
397 env->fpstt,
398 fptag,
399 env->mxcsr);
400 for(i=0;i<8;i++) {
401 #if defined(USE_X86LDOUBLE)
402 union {
403 long double d;
404 struct {
405 uint64_t lower;
406 uint16_t upper;
407 } l;
408 } tmp;
409 tmp.d = env->fpregs[i].d;
410 cpu_fprintf(f, "FPR%d=%016" PRIx64 " %04x",
411 i, tmp.l.lower, tmp.l.upper);
412 #else
413 cpu_fprintf(f, "FPR%d=%016" PRIx64,
414 i, env->fpregs[i].mmx.q);
415 #endif
416 if ((i & 1) == 1)
417 cpu_fprintf(f, "\n");
418 else
419 cpu_fprintf(f, " ");
420 }
421 if (env->hflags & HF_CS64_MASK)
422 nb = 16;
423 else
424 nb = 8;
425 for(i=0;i<nb;i++) {
426 cpu_fprintf(f, "XMM%02d=%08x%08x%08x%08x",
427 i,
428 env->xmm_regs[i].XMM_L(3),
429 env->xmm_regs[i].XMM_L(2),
430 env->xmm_regs[i].XMM_L(1),
431 env->xmm_regs[i].XMM_L(0));
432 if ((i & 1) == 1)
433 cpu_fprintf(f, "\n");
434 else
435 cpu_fprintf(f, " ");
436 }
437 }
438 if (flags & CPU_DUMP_CODE) {
439 target_ulong base = env->segs[R_CS].base + env->eip;
440 target_ulong offs = MIN(env->eip, DUMP_CODE_BYTES_BACKWARD);
441 uint8_t code;
442 char codestr[3];
443
444 cpu_fprintf(f, "Code=");
445 for (i = 0; i < DUMP_CODE_BYTES_TOTAL; i++) {
446 if (cpu_memory_rw_debug(env, base - offs + i, &code, 1, 0) == 0) {
447 snprintf(codestr, sizeof(codestr), "%02x", code);
448 } else {
449 snprintf(codestr, sizeof(codestr), "??");
450 }
451 cpu_fprintf(f, "%s%s%s%s", i > 0 ? " " : "",
452 i == offs ? "<" : "", codestr, i == offs ? ">" : "");
453 }
454 cpu_fprintf(f, "\n");
455 }
456 }
457
458 /***********************************************************/
459 /* x86 mmu */
460 /* XXX: add PGE support */
461
462 void cpu_x86_set_a20(CPUX86State *env, int a20_state)
463 {
464 a20_state = (a20_state != 0);
465 if (a20_state != ((env->a20_mask >> 20) & 1)) {
466 #if defined(DEBUG_MMU)
467 printf("A20 update: a20=%d\n", a20_state);
468 #endif
469 /* if the cpu is currently executing code, we must unlink it and
470 all the potentially executing TB */
471 cpu_interrupt(env, CPU_INTERRUPT_EXITTB);
472
473 /* when a20 is changed, all the MMU mappings are invalid, so
474 we must flush everything */
475 tlb_flush(env, 1);
476 env->a20_mask = ~(1 << 20) | (a20_state << 20);
477 }
478 }
479
480 void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0)
481 {
482 int pe_state;
483
484 #if defined(DEBUG_MMU)
485 printf("CR0 update: CR0=0x%08x\n", new_cr0);
486 #endif
487 if ((new_cr0 & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK)) !=
488 (env->cr[0] & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK))) {
489 tlb_flush(env, 1);
490 }
491
492 #ifdef TARGET_X86_64
493 if (!(env->cr[0] & CR0_PG_MASK) && (new_cr0 & CR0_PG_MASK) &&
494 (env->efer & MSR_EFER_LME)) {
495 /* enter in long mode */
496 /* XXX: generate an exception */
497 if (!(env->cr[4] & CR4_PAE_MASK))
498 return;
499 env->efer |= MSR_EFER_LMA;
500 env->hflags |= HF_LMA_MASK;
501 } else if ((env->cr[0] & CR0_PG_MASK) && !(new_cr0 & CR0_PG_MASK) &&
502 (env->efer & MSR_EFER_LMA)) {
503 /* exit long mode */
504 env->efer &= ~MSR_EFER_LMA;
505 env->hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
506 env->eip &= 0xffffffff;
507 }
508 #endif
509 env->cr[0] = new_cr0 | CR0_ET_MASK;
510
511 /* update PE flag in hidden flags */
512 pe_state = (env->cr[0] & CR0_PE_MASK);
513 env->hflags = (env->hflags & ~HF_PE_MASK) | (pe_state << HF_PE_SHIFT);
514 /* ensure that ADDSEG is always set in real mode */
515 env->hflags |= ((pe_state ^ 1) << HF_ADDSEG_SHIFT);
516 /* update FPU flags */
517 env->hflags = (env->hflags & ~(HF_MP_MASK | HF_EM_MASK | HF_TS_MASK)) |
518 ((new_cr0 << (HF_MP_SHIFT - 1)) & (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK));
519 }
520
521 /* XXX: in legacy PAE mode, generate a GPF if reserved bits are set in
522 the PDPT */
523 void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3)
524 {
525 env->cr[3] = new_cr3;
526 if (env->cr[0] & CR0_PG_MASK) {
527 #if defined(DEBUG_MMU)
528 printf("CR3 update: CR3=" TARGET_FMT_lx "\n", new_cr3);
529 #endif
530 tlb_flush(env, 0);
531 }
532 }
533
534 void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4)
535 {
536 #if defined(DEBUG_MMU)
537 printf("CR4 update: CR4=%08x\n", (uint32_t)env->cr[4]);
538 #endif
539 if ((new_cr4 & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK)) !=
540 (env->cr[4] & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK))) {
541 tlb_flush(env, 1);
542 }
543 /* SSE handling */
544 if (!(env->cpuid_features & CPUID_SSE))
545 new_cr4 &= ~CR4_OSFXSR_MASK;
546 if (new_cr4 & CR4_OSFXSR_MASK)
547 env->hflags |= HF_OSFXSR_MASK;
548 else
549 env->hflags &= ~HF_OSFXSR_MASK;
550
551 env->cr[4] = new_cr4;
552 }
553
554 #if defined(CONFIG_USER_ONLY)
555
556 int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
557 int is_write, int mmu_idx, int is_softmmu)
558 {
559 /* user mode only emulation */
560 is_write &= 1;
561 env->cr[2] = addr;
562 env->error_code = (is_write << PG_ERROR_W_BIT);
563 env->error_code |= PG_ERROR_U_MASK;
564 env->exception_index = EXCP0E_PAGE;
565 return 1;
566 }
567
568 #else
569
570 /* XXX: This value should match the one returned by CPUID
571 * and in exec.c */
572 # if defined(TARGET_X86_64)
573 # define PHYS_ADDR_MASK 0xfffffff000LL
574 # else
575 # define PHYS_ADDR_MASK 0xffffff000LL
576 # endif
577
578 /* return value:
579 -1 = cannot handle fault
580 0 = nothing more to do
581 1 = generate PF fault
582 */
583 int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
584 int is_write1, int mmu_idx, int is_softmmu)
585 {
586 uint64_t ptep, pte;
587 target_ulong pde_addr, pte_addr;
588 int error_code, is_dirty, prot, page_size, is_write, is_user;
589 target_phys_addr_t paddr;
590 uint32_t page_offset;
591 target_ulong vaddr, virt_addr;
592
593 is_user = mmu_idx == MMU_USER_IDX;
594 #if defined(DEBUG_MMU)
595 printf("MMU fault: addr=" TARGET_FMT_lx " w=%d u=%d eip=" TARGET_FMT_lx "\n",
596 addr, is_write1, is_user, env->eip);
597 #endif
598 is_write = is_write1 & 1;
599
600 if (!(env->cr[0] & CR0_PG_MASK)) {
601 pte = addr;
602 virt_addr = addr & TARGET_PAGE_MASK;
603 prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
604 page_size = 4096;
605 goto do_mapping;
606 }
607
608 if (env->cr[4] & CR4_PAE_MASK) {
609 uint64_t pde, pdpe;
610 target_ulong pdpe_addr;
611
612 #ifdef TARGET_X86_64
613 if (env->hflags & HF_LMA_MASK) {
614 uint64_t pml4e_addr, pml4e;
615 int32_t sext;
616
617 /* test virtual address sign extension */
618 sext = (int64_t)addr >> 47;
619 if (sext != 0 && sext != -1) {
620 env->error_code = 0;
621 env->exception_index = EXCP0D_GPF;
622 return 1;
623 }
624
625 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
626 env->a20_mask;
627 pml4e = ldq_phys(pml4e_addr);
628 if (!(pml4e & PG_PRESENT_MASK)) {
629 error_code = 0;
630 goto do_fault;
631 }
632 if (!(env->efer & MSR_EFER_NXE) && (pml4e & PG_NX_MASK)) {
633 error_code = PG_ERROR_RSVD_MASK;
634 goto do_fault;
635 }
636 if (!(pml4e & PG_ACCESSED_MASK)) {
637 pml4e |= PG_ACCESSED_MASK;
638 stl_phys_notdirty(pml4e_addr, pml4e);
639 }
640 ptep = pml4e ^ PG_NX_MASK;
641 pdpe_addr = ((pml4e & PHYS_ADDR_MASK) + (((addr >> 30) & 0x1ff) << 3)) &
642 env->a20_mask;
643 pdpe = ldq_phys(pdpe_addr);
644 if (!(pdpe & PG_PRESENT_MASK)) {
645 error_code = 0;
646 goto do_fault;
647 }
648 if (!(env->efer & MSR_EFER_NXE) && (pdpe & PG_NX_MASK)) {
649 error_code = PG_ERROR_RSVD_MASK;
650 goto do_fault;
651 }
652 ptep &= pdpe ^ PG_NX_MASK;
653 if (!(pdpe & PG_ACCESSED_MASK)) {
654 pdpe |= PG_ACCESSED_MASK;
655 stl_phys_notdirty(pdpe_addr, pdpe);
656 }
657 } else
658 #endif
659 {
660 /* XXX: load them when cr3 is loaded ? */
661 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
662 env->a20_mask;
663 pdpe = ldq_phys(pdpe_addr);
664 if (!(pdpe & PG_PRESENT_MASK)) {
665 error_code = 0;
666 goto do_fault;
667 }
668 ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
669 }
670
671 pde_addr = ((pdpe & PHYS_ADDR_MASK) + (((addr >> 21) & 0x1ff) << 3)) &
672 env->a20_mask;
673 pde = ldq_phys(pde_addr);
674 if (!(pde & PG_PRESENT_MASK)) {
675 error_code = 0;
676 goto do_fault;
677 }
678 if (!(env->efer & MSR_EFER_NXE) && (pde & PG_NX_MASK)) {
679 error_code = PG_ERROR_RSVD_MASK;
680 goto do_fault;
681 }
682 ptep &= pde ^ PG_NX_MASK;
683 if (pde & PG_PSE_MASK) {
684 /* 2 MB page */
685 page_size = 2048 * 1024;
686 ptep ^= PG_NX_MASK;
687 if ((ptep & PG_NX_MASK) && is_write1 == 2)
688 goto do_fault_protect;
689 if (is_user) {
690 if (!(ptep & PG_USER_MASK))
691 goto do_fault_protect;
692 if (is_write && !(ptep & PG_RW_MASK))
693 goto do_fault_protect;
694 } else {
695 if ((env->cr[0] & CR0_WP_MASK) &&
696 is_write && !(ptep & PG_RW_MASK))
697 goto do_fault_protect;
698 }
699 is_dirty = is_write && !(pde & PG_DIRTY_MASK);
700 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
701 pde |= PG_ACCESSED_MASK;
702 if (is_dirty)
703 pde |= PG_DIRTY_MASK;
704 stl_phys_notdirty(pde_addr, pde);
705 }
706 /* align to page_size */
707 pte = pde & ((PHYS_ADDR_MASK & ~(page_size - 1)) | 0xfff);
708 virt_addr = addr & ~(page_size - 1);
709 } else {
710 /* 4 KB page */
711 if (!(pde & PG_ACCESSED_MASK)) {
712 pde |= PG_ACCESSED_MASK;
713 stl_phys_notdirty(pde_addr, pde);
714 }
715 pte_addr = ((pde & PHYS_ADDR_MASK) + (((addr >> 12) & 0x1ff) << 3)) &
716 env->a20_mask;
717 pte = ldq_phys(pte_addr);
718 if (!(pte & PG_PRESENT_MASK)) {
719 error_code = 0;
720 goto do_fault;
721 }
722 if (!(env->efer & MSR_EFER_NXE) && (pte & PG_NX_MASK)) {
723 error_code = PG_ERROR_RSVD_MASK;
724 goto do_fault;
725 }
726 /* combine pde and pte nx, user and rw protections */
727 ptep &= pte ^ PG_NX_MASK;
728 ptep ^= PG_NX_MASK;
729 if ((ptep & PG_NX_MASK) && is_write1 == 2)
730 goto do_fault_protect;
731 if (is_user) {
732 if (!(ptep & PG_USER_MASK))
733 goto do_fault_protect;
734 if (is_write && !(ptep & PG_RW_MASK))
735 goto do_fault_protect;
736 } else {
737 if ((env->cr[0] & CR0_WP_MASK) &&
738 is_write && !(ptep & PG_RW_MASK))
739 goto do_fault_protect;
740 }
741 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
742 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
743 pte |= PG_ACCESSED_MASK;
744 if (is_dirty)
745 pte |= PG_DIRTY_MASK;
746 stl_phys_notdirty(pte_addr, pte);
747 }
748 page_size = 4096;
749 virt_addr = addr & ~0xfff;
750 pte = pte & (PHYS_ADDR_MASK | 0xfff);
751 }
752 } else {
753 uint32_t pde;
754
755 /* page directory entry */
756 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) &
757 env->a20_mask;
758 pde = ldl_phys(pde_addr);
759 if (!(pde & PG_PRESENT_MASK)) {
760 error_code = 0;
761 goto do_fault;
762 }
763 /* if PSE bit is set, then we use a 4MB page */
764 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
765 page_size = 4096 * 1024;
766 if (is_user) {
767 if (!(pde & PG_USER_MASK))
768 goto do_fault_protect;
769 if (is_write && !(pde & PG_RW_MASK))
770 goto do_fault_protect;
771 } else {
772 if ((env->cr[0] & CR0_WP_MASK) &&
773 is_write && !(pde & PG_RW_MASK))
774 goto do_fault_protect;
775 }
776 is_dirty = is_write && !(pde & PG_DIRTY_MASK);
777 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
778 pde |= PG_ACCESSED_MASK;
779 if (is_dirty)
780 pde |= PG_DIRTY_MASK;
781 stl_phys_notdirty(pde_addr, pde);
782 }
783
784 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
785 ptep = pte;
786 virt_addr = addr & ~(page_size - 1);
787 } else {
788 if (!(pde & PG_ACCESSED_MASK)) {
789 pde |= PG_ACCESSED_MASK;
790 stl_phys_notdirty(pde_addr, pde);
791 }
792
793 /* page directory entry */
794 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) &
795 env->a20_mask;
796 pte = ldl_phys(pte_addr);
797 if (!(pte & PG_PRESENT_MASK)) {
798 error_code = 0;
799 goto do_fault;
800 }
801 /* combine pde and pte user and rw protections */
802 ptep = pte & pde;
803 if (is_user) {
804 if (!(ptep & PG_USER_MASK))
805 goto do_fault_protect;
806 if (is_write && !(ptep & PG_RW_MASK))
807 goto do_fault_protect;
808 } else {
809 if ((env->cr[0] & CR0_WP_MASK) &&
810 is_write && !(ptep & PG_RW_MASK))
811 goto do_fault_protect;
812 }
813 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
814 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
815 pte |= PG_ACCESSED_MASK;
816 if (is_dirty)
817 pte |= PG_DIRTY_MASK;
818 stl_phys_notdirty(pte_addr, pte);
819 }
820 page_size = 4096;
821 virt_addr = addr & ~0xfff;
822 }
823 }
824 /* the page can be put in the TLB */
825 prot = PAGE_READ;
826 if (!(ptep & PG_NX_MASK))
827 prot |= PAGE_EXEC;
828 if (pte & PG_DIRTY_MASK) {
829 /* only set write access if already dirty... otherwise wait
830 for dirty access */
831 if (is_user) {
832 if (ptep & PG_RW_MASK)
833 prot |= PAGE_WRITE;
834 } else {
835 if (!(env->cr[0] & CR0_WP_MASK) ||
836 (ptep & PG_RW_MASK))
837 prot |= PAGE_WRITE;
838 }
839 }
840 do_mapping:
841 pte = pte & env->a20_mask;
842
843 /* Even if 4MB pages, we map only one 4KB page in the cache to
844 avoid filling it too fast */
845 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
846 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
847 vaddr = virt_addr + page_offset;
848
849 tlb_set_page(env, vaddr, paddr, prot, mmu_idx, page_size);
850 return 0;
851 do_fault_protect:
852 error_code = PG_ERROR_P_MASK;
853 do_fault:
854 error_code |= (is_write << PG_ERROR_W_BIT);
855 if (is_user)
856 error_code |= PG_ERROR_U_MASK;
857 if (is_write1 == 2 &&
858 (env->efer & MSR_EFER_NXE) &&
859 (env->cr[4] & CR4_PAE_MASK))
860 error_code |= PG_ERROR_I_D_MASK;
861 if (env->intercept_exceptions & (1 << EXCP0E_PAGE)) {
862 /* cr2 is not modified in case of exceptions */
863 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
864 addr);
865 } else {
866 env->cr[2] = addr;
867 }
868 env->error_code = error_code;
869 env->exception_index = EXCP0E_PAGE;
870 return 1;
871 }
872
873 target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
874 {
875 target_ulong pde_addr, pte_addr;
876 uint64_t pte;
877 target_phys_addr_t paddr;
878 uint32_t page_offset;
879 int page_size;
880
881 if (env->cr[4] & CR4_PAE_MASK) {
882 target_ulong pdpe_addr;
883 uint64_t pde, pdpe;
884
885 #ifdef TARGET_X86_64
886 if (env->hflags & HF_LMA_MASK) {
887 uint64_t pml4e_addr, pml4e;
888 int32_t sext;
889
890 /* test virtual address sign extension */
891 sext = (int64_t)addr >> 47;
892 if (sext != 0 && sext != -1)
893 return -1;
894
895 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
896 env->a20_mask;
897 pml4e = ldq_phys(pml4e_addr);
898 if (!(pml4e & PG_PRESENT_MASK))
899 return -1;
900
901 pdpe_addr = ((pml4e & ~0xfff) + (((addr >> 30) & 0x1ff) << 3)) &
902 env->a20_mask;
903 pdpe = ldq_phys(pdpe_addr);
904 if (!(pdpe & PG_PRESENT_MASK))
905 return -1;
906 } else
907 #endif
908 {
909 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
910 env->a20_mask;
911 pdpe = ldq_phys(pdpe_addr);
912 if (!(pdpe & PG_PRESENT_MASK))
913 return -1;
914 }
915
916 pde_addr = ((pdpe & ~0xfff) + (((addr >> 21) & 0x1ff) << 3)) &
917 env->a20_mask;
918 pde = ldq_phys(pde_addr);
919 if (!(pde & PG_PRESENT_MASK)) {
920 return -1;
921 }
922 if (pde & PG_PSE_MASK) {
923 /* 2 MB page */
924 page_size = 2048 * 1024;
925 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
926 } else {
927 /* 4 KB page */
928 pte_addr = ((pde & ~0xfff) + (((addr >> 12) & 0x1ff) << 3)) &
929 env->a20_mask;
930 page_size = 4096;
931 pte = ldq_phys(pte_addr);
932 }
933 if (!(pte & PG_PRESENT_MASK))
934 return -1;
935 } else {
936 uint32_t pde;
937
938 if (!(env->cr[0] & CR0_PG_MASK)) {
939 pte = addr;
940 page_size = 4096;
941 } else {
942 /* page directory entry */
943 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) & env->a20_mask;
944 pde = ldl_phys(pde_addr);
945 if (!(pde & PG_PRESENT_MASK))
946 return -1;
947 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
948 pte = pde & ~0x003ff000; /* align to 4MB */
949 page_size = 4096 * 1024;
950 } else {
951 /* page directory entry */
952 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & env->a20_mask;
953 pte = ldl_phys(pte_addr);
954 if (!(pte & PG_PRESENT_MASK))
955 return -1;
956 page_size = 4096;
957 }
958 }
959 pte = pte & env->a20_mask;
960 }
961
962 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
963 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
964 return paddr;
965 }
966
967 void hw_breakpoint_insert(CPUState *env, int index)
968 {
969 int type, err = 0;
970
971 switch (hw_breakpoint_type(env->dr[7], index)) {
972 case 0:
973 if (hw_breakpoint_enabled(env->dr[7], index))
974 err = cpu_breakpoint_insert(env, env->dr[index], BP_CPU,
975 &env->cpu_breakpoint[index]);
976 break;
977 case 1:
978 type = BP_CPU | BP_MEM_WRITE;
979 goto insert_wp;
980 case 2:
981 /* No support for I/O watchpoints yet */
982 break;
983 case 3:
984 type = BP_CPU | BP_MEM_ACCESS;
985 insert_wp:
986 err = cpu_watchpoint_insert(env, env->dr[index],
987 hw_breakpoint_len(env->dr[7], index),
988 type, &env->cpu_watchpoint[index]);
989 break;
990 }
991 if (err)
992 env->cpu_breakpoint[index] = NULL;
993 }
994
995 void hw_breakpoint_remove(CPUState *env, int index)
996 {
997 if (!env->cpu_breakpoint[index])
998 return;
999 switch (hw_breakpoint_type(env->dr[7], index)) {
1000 case 0:
1001 if (hw_breakpoint_enabled(env->dr[7], index))
1002 cpu_breakpoint_remove_by_ref(env, env->cpu_breakpoint[index]);
1003 break;
1004 case 1:
1005 case 3:
1006 cpu_watchpoint_remove_by_ref(env, env->cpu_watchpoint[index]);
1007 break;
1008 case 2:
1009 /* No support for I/O watchpoints yet */
1010 break;
1011 }
1012 }
1013
1014 int check_hw_breakpoints(CPUState *env, int force_dr6_update)
1015 {
1016 target_ulong dr6;
1017 int reg, type;
1018 int hit_enabled = 0;
1019
1020 dr6 = env->dr[6] & ~0xf;
1021 for (reg = 0; reg < 4; reg++) {
1022 type = hw_breakpoint_type(env->dr[7], reg);
1023 if ((type == 0 && env->dr[reg] == env->eip) ||
1024 ((type & 1) && env->cpu_watchpoint[reg] &&
1025 (env->cpu_watchpoint[reg]->flags & BP_WATCHPOINT_HIT))) {
1026 dr6 |= 1 << reg;
1027 if (hw_breakpoint_enabled(env->dr[7], reg))
1028 hit_enabled = 1;
1029 }
1030 }
1031 if (hit_enabled || force_dr6_update)
1032 env->dr[6] = dr6;
1033 return hit_enabled;
1034 }
1035
1036 static CPUDebugExcpHandler *prev_debug_excp_handler;
1037
1038 void raise_exception_env(int exception_index, CPUState *env);
1039
1040 static void breakpoint_handler(CPUState *env)
1041 {
1042 CPUBreakpoint *bp;
1043
1044 if (env->watchpoint_hit) {
1045 if (env->watchpoint_hit->flags & BP_CPU) {
1046 env->watchpoint_hit = NULL;
1047 if (check_hw_breakpoints(env, 0))
1048 raise_exception_env(EXCP01_DB, env);
1049 else
1050 cpu_resume_from_signal(env, NULL);
1051 }
1052 } else {
1053 QTAILQ_FOREACH(bp, &env->breakpoints, entry)
1054 if (bp->pc == env->eip) {
1055 if (bp->flags & BP_CPU) {
1056 check_hw_breakpoints(env, 1);
1057 raise_exception_env(EXCP01_DB, env);
1058 }
1059 break;
1060 }
1061 }
1062 if (prev_debug_excp_handler)
1063 prev_debug_excp_handler(env);
1064 }
1065
1066 /* This should come from sysemu.h - if we could include it here... */
1067 void qemu_system_reset_request(void);
1068
1069 static void qemu_inject_x86_mce(CPUState *cenv, int bank, uint64_t status,
1070 uint64_t mcg_status, uint64_t addr, uint64_t misc)
1071 {
1072 uint64_t mcg_cap = cenv->mcg_cap;
1073 uint64_t *banks = cenv->mce_banks;
1074
1075 /*
1076 * if MSR_MCG_CTL is not all 1s, the uncorrected error
1077 * reporting is disabled
1078 */
1079 if ((status & MCI_STATUS_UC) && (mcg_cap & MCG_CTL_P) &&
1080 cenv->mcg_ctl != ~(uint64_t)0)
1081 return;
1082 banks += 4 * bank;
1083 /*
1084 * if MSR_MCi_CTL is not all 1s, the uncorrected error
1085 * reporting is disabled for the bank
1086 */
1087 if ((status & MCI_STATUS_UC) && banks[0] != ~(uint64_t)0)
1088 return;
1089 if (status & MCI_STATUS_UC) {
1090 if ((cenv->mcg_status & MCG_STATUS_MCIP) ||
1091 !(cenv->cr[4] & CR4_MCE_MASK)) {
1092 fprintf(stderr, "injects mce exception while previous "
1093 "one is in progress!\n");
1094 qemu_log_mask(CPU_LOG_RESET, "Triple fault\n");
1095 qemu_system_reset_request();
1096 return;
1097 }
1098 if (banks[1] & MCI_STATUS_VAL)
1099 status |= MCI_STATUS_OVER;
1100 banks[2] = addr;
1101 banks[3] = misc;
1102 cenv->mcg_status = mcg_status;
1103 banks[1] = status;
1104 cpu_interrupt(cenv, CPU_INTERRUPT_MCE);
1105 } else if (!(banks[1] & MCI_STATUS_VAL)
1106 || !(banks[1] & MCI_STATUS_UC)) {
1107 if (banks[1] & MCI_STATUS_VAL)
1108 status |= MCI_STATUS_OVER;
1109 banks[2] = addr;
1110 banks[3] = misc;
1111 banks[1] = status;
1112 } else
1113 banks[1] |= MCI_STATUS_OVER;
1114 }
1115
1116 void cpu_inject_x86_mce(CPUState *cenv, int bank, uint64_t status,
1117 uint64_t mcg_status, uint64_t addr, uint64_t misc,
1118 int broadcast)
1119 {
1120 unsigned bank_num = cenv->mcg_cap & 0xff;
1121 CPUState *env;
1122 int flag = 0;
1123
1124 if (bank >= bank_num || !(status & MCI_STATUS_VAL)) {
1125 return;
1126 }
1127
1128 if (broadcast) {
1129 if (!cpu_x86_support_mca_broadcast(cenv)) {
1130 fprintf(stderr, "Current CPU does not support broadcast\n");
1131 return;
1132 }
1133 }
1134
1135 if (kvm_enabled()) {
1136 if (broadcast) {
1137 flag |= MCE_BROADCAST;
1138 }
1139
1140 kvm_inject_x86_mce(cenv, bank, status, mcg_status, addr, misc, flag);
1141 } else {
1142 qemu_inject_x86_mce(cenv, bank, status, mcg_status, addr, misc);
1143 if (broadcast) {
1144 for (env = first_cpu; env != NULL; env = env->next_cpu) {
1145 if (cenv == env) {
1146 continue;
1147 }
1148 qemu_inject_x86_mce(env, 1, MCI_STATUS_VAL | MCI_STATUS_UC,
1149 MCG_STATUS_MCIP | MCG_STATUS_RIPV, 0, 0);
1150 }
1151 }
1152 }
1153 }
1154 #endif /* !CONFIG_USER_ONLY */
1155
1156 static void mce_init(CPUX86State *cenv)
1157 {
1158 unsigned int bank, bank_num;
1159
1160 if (((cenv->cpuid_version >> 8)&0xf) >= 6
1161 && (cenv->cpuid_features&(CPUID_MCE|CPUID_MCA)) == (CPUID_MCE|CPUID_MCA)) {
1162 cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF;
1163 cenv->mcg_ctl = ~(uint64_t)0;
1164 bank_num = MCE_BANKS_DEF;
1165 for (bank = 0; bank < bank_num; bank++)
1166 cenv->mce_banks[bank*4] = ~(uint64_t)0;
1167 }
1168 }
1169
1170 int cpu_x86_get_descr_debug(CPUX86State *env, unsigned int selector,
1171 target_ulong *base, unsigned int *limit,
1172 unsigned int *flags)
1173 {
1174 SegmentCache *dt;
1175 target_ulong ptr;
1176 uint32_t e1, e2;
1177 int index;
1178
1179 if (selector & 0x4)
1180 dt = &env->ldt;
1181 else
1182 dt = &env->gdt;
1183 index = selector & ~7;
1184 ptr = dt->base + index;
1185 if ((index + 7) > dt->limit
1186 || cpu_memory_rw_debug(env, ptr, (uint8_t *)&e1, sizeof(e1), 0) != 0
1187 || cpu_memory_rw_debug(env, ptr+4, (uint8_t *)&e2, sizeof(e2), 0) != 0)
1188 return 0;
1189
1190 *base = ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
1191 *limit = (e1 & 0xffff) | (e2 & 0x000f0000);
1192 if (e2 & DESC_G_MASK)
1193 *limit = (*limit << 12) | 0xfff;
1194 *flags = e2;
1195
1196 return 1;
1197 }
1198
1199 CPUX86State *cpu_x86_init(const char *cpu_model)
1200 {
1201 CPUX86State *env;
1202 static int inited;
1203
1204 env = qemu_mallocz(sizeof(CPUX86State));
1205 cpu_exec_init(env);
1206 env->cpu_model_str = cpu_model;
1207
1208 /* init various static tables */
1209 if (!inited) {
1210 inited = 1;
1211 optimize_flags_init();
1212 #ifndef CONFIG_USER_ONLY
1213 prev_debug_excp_handler =
1214 cpu_set_debug_excp_handler(breakpoint_handler);
1215 #endif
1216 }
1217 if (cpu_x86_register(env, cpu_model) < 0) {
1218 cpu_x86_close(env);
1219 return NULL;
1220 }
1221 mce_init(env);
1222
1223 qemu_init_vcpu(env);
1224
1225 return env;
1226 }
1227
1228 #if !defined(CONFIG_USER_ONLY)
1229 void do_cpu_init(CPUState *env)
1230 {
1231 int sipi = env->interrupt_request & CPU_INTERRUPT_SIPI;
1232 cpu_reset(env);
1233 env->interrupt_request = sipi;
1234 apic_init_reset(env->apic_state);
1235 env->halted = !cpu_is_bsp(env);
1236 }
1237
1238 void do_cpu_sipi(CPUState *env)
1239 {
1240 apic_sipi(env->apic_state);
1241 }
1242 #else
1243 void do_cpu_init(CPUState *env)
1244 {
1245 }
1246 void do_cpu_sipi(CPUState *env)
1247 {
1248 }
1249 #endif