]> git.proxmox.com Git - mirror_qemu.git/blob - target-i386/helper.c
Add function for checking mca broadcast of CPU
[mirror_qemu.git] / target-i386 / helper.c
1 /*
2 * i386 helpers (without register variable usage)
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #include <stdarg.h>
20 #include <stdlib.h>
21 #include <stdio.h>
22 #include <string.h>
23 #include <inttypes.h>
24 #include <signal.h>
25
26 #include "cpu.h"
27 #include "exec-all.h"
28 #include "qemu-common.h"
29 #include "kvm.h"
30 #include "kvm_x86.h"
31
32 //#define DEBUG_MMU
33
34 /* NOTE: must be called outside the CPU execute loop */
35 void cpu_reset(CPUX86State *env)
36 {
37 int i;
38
39 if (qemu_loglevel_mask(CPU_LOG_RESET)) {
40 qemu_log("CPU Reset (CPU %d)\n", env->cpu_index);
41 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
42 }
43
44 memset(env, 0, offsetof(CPUX86State, breakpoints));
45
46 tlb_flush(env, 1);
47
48 env->old_exception = -1;
49
50 /* init to reset state */
51
52 #ifdef CONFIG_SOFTMMU
53 env->hflags |= HF_SOFTMMU_MASK;
54 #endif
55 env->hflags2 |= HF2_GIF_MASK;
56
57 cpu_x86_update_cr0(env, 0x60000010);
58 env->a20_mask = ~0x0;
59 env->smbase = 0x30000;
60
61 env->idt.limit = 0xffff;
62 env->gdt.limit = 0xffff;
63 env->ldt.limit = 0xffff;
64 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
65 env->tr.limit = 0xffff;
66 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
67
68 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
69 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK |
70 DESC_R_MASK | DESC_A_MASK);
71 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
72 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
73 DESC_A_MASK);
74 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
75 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
76 DESC_A_MASK);
77 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
78 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
79 DESC_A_MASK);
80 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
81 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
82 DESC_A_MASK);
83 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
84 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
85 DESC_A_MASK);
86
87 env->eip = 0xfff0;
88 env->regs[R_EDX] = env->cpuid_version;
89
90 env->eflags = 0x2;
91
92 /* FPU init */
93 for(i = 0;i < 8; i++)
94 env->fptags[i] = 1;
95 env->fpuc = 0x37f;
96
97 env->mxcsr = 0x1f80;
98
99 memset(env->dr, 0, sizeof(env->dr));
100 env->dr[6] = DR6_FIXED_1;
101 env->dr[7] = DR7_FIXED_1;
102 cpu_breakpoint_remove_all(env, BP_CPU);
103 cpu_watchpoint_remove_all(env, BP_CPU);
104
105 env->mcg_status = 0;
106 }
107
108 void cpu_x86_close(CPUX86State *env)
109 {
110 qemu_free(env);
111 }
112
113 static void cpu_x86_version(CPUState *env, int *family, int *model)
114 {
115 int cpuver = env->cpuid_version;
116
117 if (family == NULL || model == NULL) {
118 return;
119 }
120
121 *family = (cpuver >> 8) & 0x0f;
122 *model = ((cpuver >> 12) & 0xf0) + ((cpuver >> 4) & 0x0f);
123 }
124
125 /* Broadcast MCA signal for processor version 06H_EH and above */
126 int cpu_x86_support_mca_broadcast(CPUState *env)
127 {
128 int family = 0;
129 int model = 0;
130
131 cpu_x86_version(env, &family, &model);
132 if ((family == 6 && model >= 14) || family > 6) {
133 return 1;
134 }
135
136 return 0;
137 }
138
139 /***********************************************************/
140 /* x86 debug */
141
142 static const char *cc_op_str[] = {
143 "DYNAMIC",
144 "EFLAGS",
145
146 "MULB",
147 "MULW",
148 "MULL",
149 "MULQ",
150
151 "ADDB",
152 "ADDW",
153 "ADDL",
154 "ADDQ",
155
156 "ADCB",
157 "ADCW",
158 "ADCL",
159 "ADCQ",
160
161 "SUBB",
162 "SUBW",
163 "SUBL",
164 "SUBQ",
165
166 "SBBB",
167 "SBBW",
168 "SBBL",
169 "SBBQ",
170
171 "LOGICB",
172 "LOGICW",
173 "LOGICL",
174 "LOGICQ",
175
176 "INCB",
177 "INCW",
178 "INCL",
179 "INCQ",
180
181 "DECB",
182 "DECW",
183 "DECL",
184 "DECQ",
185
186 "SHLB",
187 "SHLW",
188 "SHLL",
189 "SHLQ",
190
191 "SARB",
192 "SARW",
193 "SARL",
194 "SARQ",
195 };
196
197 static void
198 cpu_x86_dump_seg_cache(CPUState *env, FILE *f, fprintf_function cpu_fprintf,
199 const char *name, struct SegmentCache *sc)
200 {
201 #ifdef TARGET_X86_64
202 if (env->hflags & HF_CS64_MASK) {
203 cpu_fprintf(f, "%-3s=%04x %016" PRIx64 " %08x %08x", name,
204 sc->selector, sc->base, sc->limit, sc->flags & 0x00ffff00);
205 } else
206 #endif
207 {
208 cpu_fprintf(f, "%-3s=%04x %08x %08x %08x", name, sc->selector,
209 (uint32_t)sc->base, sc->limit, sc->flags & 0x00ffff00);
210 }
211
212 if (!(env->hflags & HF_PE_MASK) || !(sc->flags & DESC_P_MASK))
213 goto done;
214
215 cpu_fprintf(f, " DPL=%d ", (sc->flags & DESC_DPL_MASK) >> DESC_DPL_SHIFT);
216 if (sc->flags & DESC_S_MASK) {
217 if (sc->flags & DESC_CS_MASK) {
218 cpu_fprintf(f, (sc->flags & DESC_L_MASK) ? "CS64" :
219 ((sc->flags & DESC_B_MASK) ? "CS32" : "CS16"));
220 cpu_fprintf(f, " [%c%c", (sc->flags & DESC_C_MASK) ? 'C' : '-',
221 (sc->flags & DESC_R_MASK) ? 'R' : '-');
222 } else {
223 cpu_fprintf(f, (sc->flags & DESC_B_MASK) ? "DS " : "DS16");
224 cpu_fprintf(f, " [%c%c", (sc->flags & DESC_E_MASK) ? 'E' : '-',
225 (sc->flags & DESC_W_MASK) ? 'W' : '-');
226 }
227 cpu_fprintf(f, "%c]", (sc->flags & DESC_A_MASK) ? 'A' : '-');
228 } else {
229 static const char *sys_type_name[2][16] = {
230 { /* 32 bit mode */
231 "Reserved", "TSS16-avl", "LDT", "TSS16-busy",
232 "CallGate16", "TaskGate", "IntGate16", "TrapGate16",
233 "Reserved", "TSS32-avl", "Reserved", "TSS32-busy",
234 "CallGate32", "Reserved", "IntGate32", "TrapGate32"
235 },
236 { /* 64 bit mode */
237 "<hiword>", "Reserved", "LDT", "Reserved", "Reserved",
238 "Reserved", "Reserved", "Reserved", "Reserved",
239 "TSS64-avl", "Reserved", "TSS64-busy", "CallGate64",
240 "Reserved", "IntGate64", "TrapGate64"
241 }
242 };
243 cpu_fprintf(f, "%s",
244 sys_type_name[(env->hflags & HF_LMA_MASK) ? 1 : 0]
245 [(sc->flags & DESC_TYPE_MASK)
246 >> DESC_TYPE_SHIFT]);
247 }
248 done:
249 cpu_fprintf(f, "\n");
250 }
251
252 void cpu_dump_state(CPUState *env, FILE *f, fprintf_function cpu_fprintf,
253 int flags)
254 {
255 int eflags, i, nb;
256 char cc_op_name[32];
257 static const char *seg_name[6] = { "ES", "CS", "SS", "DS", "FS", "GS" };
258
259 cpu_synchronize_state(env);
260
261 eflags = env->eflags;
262 #ifdef TARGET_X86_64
263 if (env->hflags & HF_CS64_MASK) {
264 cpu_fprintf(f,
265 "RAX=%016" PRIx64 " RBX=%016" PRIx64 " RCX=%016" PRIx64 " RDX=%016" PRIx64 "\n"
266 "RSI=%016" PRIx64 " RDI=%016" PRIx64 " RBP=%016" PRIx64 " RSP=%016" PRIx64 "\n"
267 "R8 =%016" PRIx64 " R9 =%016" PRIx64 " R10=%016" PRIx64 " R11=%016" PRIx64 "\n"
268 "R12=%016" PRIx64 " R13=%016" PRIx64 " R14=%016" PRIx64 " R15=%016" PRIx64 "\n"
269 "RIP=%016" PRIx64 " RFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
270 env->regs[R_EAX],
271 env->regs[R_EBX],
272 env->regs[R_ECX],
273 env->regs[R_EDX],
274 env->regs[R_ESI],
275 env->regs[R_EDI],
276 env->regs[R_EBP],
277 env->regs[R_ESP],
278 env->regs[8],
279 env->regs[9],
280 env->regs[10],
281 env->regs[11],
282 env->regs[12],
283 env->regs[13],
284 env->regs[14],
285 env->regs[15],
286 env->eip, eflags,
287 eflags & DF_MASK ? 'D' : '-',
288 eflags & CC_O ? 'O' : '-',
289 eflags & CC_S ? 'S' : '-',
290 eflags & CC_Z ? 'Z' : '-',
291 eflags & CC_A ? 'A' : '-',
292 eflags & CC_P ? 'P' : '-',
293 eflags & CC_C ? 'C' : '-',
294 env->hflags & HF_CPL_MASK,
295 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
296 (env->a20_mask >> 20) & 1,
297 (env->hflags >> HF_SMM_SHIFT) & 1,
298 env->halted);
299 } else
300 #endif
301 {
302 cpu_fprintf(f, "EAX=%08x EBX=%08x ECX=%08x EDX=%08x\n"
303 "ESI=%08x EDI=%08x EBP=%08x ESP=%08x\n"
304 "EIP=%08x EFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
305 (uint32_t)env->regs[R_EAX],
306 (uint32_t)env->regs[R_EBX],
307 (uint32_t)env->regs[R_ECX],
308 (uint32_t)env->regs[R_EDX],
309 (uint32_t)env->regs[R_ESI],
310 (uint32_t)env->regs[R_EDI],
311 (uint32_t)env->regs[R_EBP],
312 (uint32_t)env->regs[R_ESP],
313 (uint32_t)env->eip, eflags,
314 eflags & DF_MASK ? 'D' : '-',
315 eflags & CC_O ? 'O' : '-',
316 eflags & CC_S ? 'S' : '-',
317 eflags & CC_Z ? 'Z' : '-',
318 eflags & CC_A ? 'A' : '-',
319 eflags & CC_P ? 'P' : '-',
320 eflags & CC_C ? 'C' : '-',
321 env->hflags & HF_CPL_MASK,
322 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
323 (env->a20_mask >> 20) & 1,
324 (env->hflags >> HF_SMM_SHIFT) & 1,
325 env->halted);
326 }
327
328 for(i = 0; i < 6; i++) {
329 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, seg_name[i],
330 &env->segs[i]);
331 }
332 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, "LDT", &env->ldt);
333 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, "TR", &env->tr);
334
335 #ifdef TARGET_X86_64
336 if (env->hflags & HF_LMA_MASK) {
337 cpu_fprintf(f, "GDT= %016" PRIx64 " %08x\n",
338 env->gdt.base, env->gdt.limit);
339 cpu_fprintf(f, "IDT= %016" PRIx64 " %08x\n",
340 env->idt.base, env->idt.limit);
341 cpu_fprintf(f, "CR0=%08x CR2=%016" PRIx64 " CR3=%016" PRIx64 " CR4=%08x\n",
342 (uint32_t)env->cr[0],
343 env->cr[2],
344 env->cr[3],
345 (uint32_t)env->cr[4]);
346 for(i = 0; i < 4; i++)
347 cpu_fprintf(f, "DR%d=%016" PRIx64 " ", i, env->dr[i]);
348 cpu_fprintf(f, "\nDR6=%016" PRIx64 " DR7=%016" PRIx64 "\n",
349 env->dr[6], env->dr[7]);
350 } else
351 #endif
352 {
353 cpu_fprintf(f, "GDT= %08x %08x\n",
354 (uint32_t)env->gdt.base, env->gdt.limit);
355 cpu_fprintf(f, "IDT= %08x %08x\n",
356 (uint32_t)env->idt.base, env->idt.limit);
357 cpu_fprintf(f, "CR0=%08x CR2=%08x CR3=%08x CR4=%08x\n",
358 (uint32_t)env->cr[0],
359 (uint32_t)env->cr[2],
360 (uint32_t)env->cr[3],
361 (uint32_t)env->cr[4]);
362 for(i = 0; i < 4; i++) {
363 cpu_fprintf(f, "DR%d=" TARGET_FMT_lx " ", i, env->dr[i]);
364 }
365 cpu_fprintf(f, "\nDR6=" TARGET_FMT_lx " DR7=" TARGET_FMT_lx "\n",
366 env->dr[6], env->dr[7]);
367 }
368 if (flags & X86_DUMP_CCOP) {
369 if ((unsigned)env->cc_op < CC_OP_NB)
370 snprintf(cc_op_name, sizeof(cc_op_name), "%s", cc_op_str[env->cc_op]);
371 else
372 snprintf(cc_op_name, sizeof(cc_op_name), "[%d]", env->cc_op);
373 #ifdef TARGET_X86_64
374 if (env->hflags & HF_CS64_MASK) {
375 cpu_fprintf(f, "CCS=%016" PRIx64 " CCD=%016" PRIx64 " CCO=%-8s\n",
376 env->cc_src, env->cc_dst,
377 cc_op_name);
378 } else
379 #endif
380 {
381 cpu_fprintf(f, "CCS=%08x CCD=%08x CCO=%-8s\n",
382 (uint32_t)env->cc_src, (uint32_t)env->cc_dst,
383 cc_op_name);
384 }
385 }
386 cpu_fprintf(f, "EFER=%016" PRIx64 "\n", env->efer);
387 if (flags & X86_DUMP_FPU) {
388 int fptag;
389 fptag = 0;
390 for(i = 0; i < 8; i++) {
391 fptag |= ((!env->fptags[i]) << i);
392 }
393 cpu_fprintf(f, "FCW=%04x FSW=%04x [ST=%d] FTW=%02x MXCSR=%08x\n",
394 env->fpuc,
395 (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11,
396 env->fpstt,
397 fptag,
398 env->mxcsr);
399 for(i=0;i<8;i++) {
400 #if defined(USE_X86LDOUBLE)
401 union {
402 long double d;
403 struct {
404 uint64_t lower;
405 uint16_t upper;
406 } l;
407 } tmp;
408 tmp.d = env->fpregs[i].d;
409 cpu_fprintf(f, "FPR%d=%016" PRIx64 " %04x",
410 i, tmp.l.lower, tmp.l.upper);
411 #else
412 cpu_fprintf(f, "FPR%d=%016" PRIx64,
413 i, env->fpregs[i].mmx.q);
414 #endif
415 if ((i & 1) == 1)
416 cpu_fprintf(f, "\n");
417 else
418 cpu_fprintf(f, " ");
419 }
420 if (env->hflags & HF_CS64_MASK)
421 nb = 16;
422 else
423 nb = 8;
424 for(i=0;i<nb;i++) {
425 cpu_fprintf(f, "XMM%02d=%08x%08x%08x%08x",
426 i,
427 env->xmm_regs[i].XMM_L(3),
428 env->xmm_regs[i].XMM_L(2),
429 env->xmm_regs[i].XMM_L(1),
430 env->xmm_regs[i].XMM_L(0));
431 if ((i & 1) == 1)
432 cpu_fprintf(f, "\n");
433 else
434 cpu_fprintf(f, " ");
435 }
436 }
437 }
438
439 /***********************************************************/
440 /* x86 mmu */
441 /* XXX: add PGE support */
442
443 void cpu_x86_set_a20(CPUX86State *env, int a20_state)
444 {
445 a20_state = (a20_state != 0);
446 if (a20_state != ((env->a20_mask >> 20) & 1)) {
447 #if defined(DEBUG_MMU)
448 printf("A20 update: a20=%d\n", a20_state);
449 #endif
450 /* if the cpu is currently executing code, we must unlink it and
451 all the potentially executing TB */
452 cpu_interrupt(env, CPU_INTERRUPT_EXITTB);
453
454 /* when a20 is changed, all the MMU mappings are invalid, so
455 we must flush everything */
456 tlb_flush(env, 1);
457 env->a20_mask = ~(1 << 20) | (a20_state << 20);
458 }
459 }
460
461 void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0)
462 {
463 int pe_state;
464
465 #if defined(DEBUG_MMU)
466 printf("CR0 update: CR0=0x%08x\n", new_cr0);
467 #endif
468 if ((new_cr0 & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK)) !=
469 (env->cr[0] & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK))) {
470 tlb_flush(env, 1);
471 }
472
473 #ifdef TARGET_X86_64
474 if (!(env->cr[0] & CR0_PG_MASK) && (new_cr0 & CR0_PG_MASK) &&
475 (env->efer & MSR_EFER_LME)) {
476 /* enter in long mode */
477 /* XXX: generate an exception */
478 if (!(env->cr[4] & CR4_PAE_MASK))
479 return;
480 env->efer |= MSR_EFER_LMA;
481 env->hflags |= HF_LMA_MASK;
482 } else if ((env->cr[0] & CR0_PG_MASK) && !(new_cr0 & CR0_PG_MASK) &&
483 (env->efer & MSR_EFER_LMA)) {
484 /* exit long mode */
485 env->efer &= ~MSR_EFER_LMA;
486 env->hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
487 env->eip &= 0xffffffff;
488 }
489 #endif
490 env->cr[0] = new_cr0 | CR0_ET_MASK;
491
492 /* update PE flag in hidden flags */
493 pe_state = (env->cr[0] & CR0_PE_MASK);
494 env->hflags = (env->hflags & ~HF_PE_MASK) | (pe_state << HF_PE_SHIFT);
495 /* ensure that ADDSEG is always set in real mode */
496 env->hflags |= ((pe_state ^ 1) << HF_ADDSEG_SHIFT);
497 /* update FPU flags */
498 env->hflags = (env->hflags & ~(HF_MP_MASK | HF_EM_MASK | HF_TS_MASK)) |
499 ((new_cr0 << (HF_MP_SHIFT - 1)) & (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK));
500 }
501
502 /* XXX: in legacy PAE mode, generate a GPF if reserved bits are set in
503 the PDPT */
504 void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3)
505 {
506 env->cr[3] = new_cr3;
507 if (env->cr[0] & CR0_PG_MASK) {
508 #if defined(DEBUG_MMU)
509 printf("CR3 update: CR3=" TARGET_FMT_lx "\n", new_cr3);
510 #endif
511 tlb_flush(env, 0);
512 }
513 }
514
515 void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4)
516 {
517 #if defined(DEBUG_MMU)
518 printf("CR4 update: CR4=%08x\n", (uint32_t)env->cr[4]);
519 #endif
520 if ((new_cr4 & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK)) !=
521 (env->cr[4] & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK))) {
522 tlb_flush(env, 1);
523 }
524 /* SSE handling */
525 if (!(env->cpuid_features & CPUID_SSE))
526 new_cr4 &= ~CR4_OSFXSR_MASK;
527 if (new_cr4 & CR4_OSFXSR_MASK)
528 env->hflags |= HF_OSFXSR_MASK;
529 else
530 env->hflags &= ~HF_OSFXSR_MASK;
531
532 env->cr[4] = new_cr4;
533 }
534
535 #if defined(CONFIG_USER_ONLY)
536
537 int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
538 int is_write, int mmu_idx, int is_softmmu)
539 {
540 /* user mode only emulation */
541 is_write &= 1;
542 env->cr[2] = addr;
543 env->error_code = (is_write << PG_ERROR_W_BIT);
544 env->error_code |= PG_ERROR_U_MASK;
545 env->exception_index = EXCP0E_PAGE;
546 return 1;
547 }
548
549 #else
550
551 /* XXX: This value should match the one returned by CPUID
552 * and in exec.c */
553 # if defined(TARGET_X86_64)
554 # define PHYS_ADDR_MASK 0xfffffff000LL
555 # else
556 # define PHYS_ADDR_MASK 0xffffff000LL
557 # endif
558
559 /* return value:
560 -1 = cannot handle fault
561 0 = nothing more to do
562 1 = generate PF fault
563 */
564 int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
565 int is_write1, int mmu_idx, int is_softmmu)
566 {
567 uint64_t ptep, pte;
568 target_ulong pde_addr, pte_addr;
569 int error_code, is_dirty, prot, page_size, is_write, is_user;
570 target_phys_addr_t paddr;
571 uint32_t page_offset;
572 target_ulong vaddr, virt_addr;
573
574 is_user = mmu_idx == MMU_USER_IDX;
575 #if defined(DEBUG_MMU)
576 printf("MMU fault: addr=" TARGET_FMT_lx " w=%d u=%d eip=" TARGET_FMT_lx "\n",
577 addr, is_write1, is_user, env->eip);
578 #endif
579 is_write = is_write1 & 1;
580
581 if (!(env->cr[0] & CR0_PG_MASK)) {
582 pte = addr;
583 virt_addr = addr & TARGET_PAGE_MASK;
584 prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
585 page_size = 4096;
586 goto do_mapping;
587 }
588
589 if (env->cr[4] & CR4_PAE_MASK) {
590 uint64_t pde, pdpe;
591 target_ulong pdpe_addr;
592
593 #ifdef TARGET_X86_64
594 if (env->hflags & HF_LMA_MASK) {
595 uint64_t pml4e_addr, pml4e;
596 int32_t sext;
597
598 /* test virtual address sign extension */
599 sext = (int64_t)addr >> 47;
600 if (sext != 0 && sext != -1) {
601 env->error_code = 0;
602 env->exception_index = EXCP0D_GPF;
603 return 1;
604 }
605
606 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
607 env->a20_mask;
608 pml4e = ldq_phys(pml4e_addr);
609 if (!(pml4e & PG_PRESENT_MASK)) {
610 error_code = 0;
611 goto do_fault;
612 }
613 if (!(env->efer & MSR_EFER_NXE) && (pml4e & PG_NX_MASK)) {
614 error_code = PG_ERROR_RSVD_MASK;
615 goto do_fault;
616 }
617 if (!(pml4e & PG_ACCESSED_MASK)) {
618 pml4e |= PG_ACCESSED_MASK;
619 stl_phys_notdirty(pml4e_addr, pml4e);
620 }
621 ptep = pml4e ^ PG_NX_MASK;
622 pdpe_addr = ((pml4e & PHYS_ADDR_MASK) + (((addr >> 30) & 0x1ff) << 3)) &
623 env->a20_mask;
624 pdpe = ldq_phys(pdpe_addr);
625 if (!(pdpe & PG_PRESENT_MASK)) {
626 error_code = 0;
627 goto do_fault;
628 }
629 if (!(env->efer & MSR_EFER_NXE) && (pdpe & PG_NX_MASK)) {
630 error_code = PG_ERROR_RSVD_MASK;
631 goto do_fault;
632 }
633 ptep &= pdpe ^ PG_NX_MASK;
634 if (!(pdpe & PG_ACCESSED_MASK)) {
635 pdpe |= PG_ACCESSED_MASK;
636 stl_phys_notdirty(pdpe_addr, pdpe);
637 }
638 } else
639 #endif
640 {
641 /* XXX: load them when cr3 is loaded ? */
642 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
643 env->a20_mask;
644 pdpe = ldq_phys(pdpe_addr);
645 if (!(pdpe & PG_PRESENT_MASK)) {
646 error_code = 0;
647 goto do_fault;
648 }
649 ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
650 }
651
652 pde_addr = ((pdpe & PHYS_ADDR_MASK) + (((addr >> 21) & 0x1ff) << 3)) &
653 env->a20_mask;
654 pde = ldq_phys(pde_addr);
655 if (!(pde & PG_PRESENT_MASK)) {
656 error_code = 0;
657 goto do_fault;
658 }
659 if (!(env->efer & MSR_EFER_NXE) && (pde & PG_NX_MASK)) {
660 error_code = PG_ERROR_RSVD_MASK;
661 goto do_fault;
662 }
663 ptep &= pde ^ PG_NX_MASK;
664 if (pde & PG_PSE_MASK) {
665 /* 2 MB page */
666 page_size = 2048 * 1024;
667 ptep ^= PG_NX_MASK;
668 if ((ptep & PG_NX_MASK) && is_write1 == 2)
669 goto do_fault_protect;
670 if (is_user) {
671 if (!(ptep & PG_USER_MASK))
672 goto do_fault_protect;
673 if (is_write && !(ptep & PG_RW_MASK))
674 goto do_fault_protect;
675 } else {
676 if ((env->cr[0] & CR0_WP_MASK) &&
677 is_write && !(ptep & PG_RW_MASK))
678 goto do_fault_protect;
679 }
680 is_dirty = is_write && !(pde & PG_DIRTY_MASK);
681 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
682 pde |= PG_ACCESSED_MASK;
683 if (is_dirty)
684 pde |= PG_DIRTY_MASK;
685 stl_phys_notdirty(pde_addr, pde);
686 }
687 /* align to page_size */
688 pte = pde & ((PHYS_ADDR_MASK & ~(page_size - 1)) | 0xfff);
689 virt_addr = addr & ~(page_size - 1);
690 } else {
691 /* 4 KB page */
692 if (!(pde & PG_ACCESSED_MASK)) {
693 pde |= PG_ACCESSED_MASK;
694 stl_phys_notdirty(pde_addr, pde);
695 }
696 pte_addr = ((pde & PHYS_ADDR_MASK) + (((addr >> 12) & 0x1ff) << 3)) &
697 env->a20_mask;
698 pte = ldq_phys(pte_addr);
699 if (!(pte & PG_PRESENT_MASK)) {
700 error_code = 0;
701 goto do_fault;
702 }
703 if (!(env->efer & MSR_EFER_NXE) && (pte & PG_NX_MASK)) {
704 error_code = PG_ERROR_RSVD_MASK;
705 goto do_fault;
706 }
707 /* combine pde and pte nx, user and rw protections */
708 ptep &= pte ^ PG_NX_MASK;
709 ptep ^= PG_NX_MASK;
710 if ((ptep & PG_NX_MASK) && is_write1 == 2)
711 goto do_fault_protect;
712 if (is_user) {
713 if (!(ptep & PG_USER_MASK))
714 goto do_fault_protect;
715 if (is_write && !(ptep & PG_RW_MASK))
716 goto do_fault_protect;
717 } else {
718 if ((env->cr[0] & CR0_WP_MASK) &&
719 is_write && !(ptep & PG_RW_MASK))
720 goto do_fault_protect;
721 }
722 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
723 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
724 pte |= PG_ACCESSED_MASK;
725 if (is_dirty)
726 pte |= PG_DIRTY_MASK;
727 stl_phys_notdirty(pte_addr, pte);
728 }
729 page_size = 4096;
730 virt_addr = addr & ~0xfff;
731 pte = pte & (PHYS_ADDR_MASK | 0xfff);
732 }
733 } else {
734 uint32_t pde;
735
736 /* page directory entry */
737 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) &
738 env->a20_mask;
739 pde = ldl_phys(pde_addr);
740 if (!(pde & PG_PRESENT_MASK)) {
741 error_code = 0;
742 goto do_fault;
743 }
744 /* if PSE bit is set, then we use a 4MB page */
745 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
746 page_size = 4096 * 1024;
747 if (is_user) {
748 if (!(pde & PG_USER_MASK))
749 goto do_fault_protect;
750 if (is_write && !(pde & PG_RW_MASK))
751 goto do_fault_protect;
752 } else {
753 if ((env->cr[0] & CR0_WP_MASK) &&
754 is_write && !(pde & PG_RW_MASK))
755 goto do_fault_protect;
756 }
757 is_dirty = is_write && !(pde & PG_DIRTY_MASK);
758 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
759 pde |= PG_ACCESSED_MASK;
760 if (is_dirty)
761 pde |= PG_DIRTY_MASK;
762 stl_phys_notdirty(pde_addr, pde);
763 }
764
765 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
766 ptep = pte;
767 virt_addr = addr & ~(page_size - 1);
768 } else {
769 if (!(pde & PG_ACCESSED_MASK)) {
770 pde |= PG_ACCESSED_MASK;
771 stl_phys_notdirty(pde_addr, pde);
772 }
773
774 /* page directory entry */
775 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) &
776 env->a20_mask;
777 pte = ldl_phys(pte_addr);
778 if (!(pte & PG_PRESENT_MASK)) {
779 error_code = 0;
780 goto do_fault;
781 }
782 /* combine pde and pte user and rw protections */
783 ptep = pte & pde;
784 if (is_user) {
785 if (!(ptep & PG_USER_MASK))
786 goto do_fault_protect;
787 if (is_write && !(ptep & PG_RW_MASK))
788 goto do_fault_protect;
789 } else {
790 if ((env->cr[0] & CR0_WP_MASK) &&
791 is_write && !(ptep & PG_RW_MASK))
792 goto do_fault_protect;
793 }
794 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
795 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
796 pte |= PG_ACCESSED_MASK;
797 if (is_dirty)
798 pte |= PG_DIRTY_MASK;
799 stl_phys_notdirty(pte_addr, pte);
800 }
801 page_size = 4096;
802 virt_addr = addr & ~0xfff;
803 }
804 }
805 /* the page can be put in the TLB */
806 prot = PAGE_READ;
807 if (!(ptep & PG_NX_MASK))
808 prot |= PAGE_EXEC;
809 if (pte & PG_DIRTY_MASK) {
810 /* only set write access if already dirty... otherwise wait
811 for dirty access */
812 if (is_user) {
813 if (ptep & PG_RW_MASK)
814 prot |= PAGE_WRITE;
815 } else {
816 if (!(env->cr[0] & CR0_WP_MASK) ||
817 (ptep & PG_RW_MASK))
818 prot |= PAGE_WRITE;
819 }
820 }
821 do_mapping:
822 pte = pte & env->a20_mask;
823
824 /* Even if 4MB pages, we map only one 4KB page in the cache to
825 avoid filling it too fast */
826 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
827 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
828 vaddr = virt_addr + page_offset;
829
830 tlb_set_page(env, vaddr, paddr, prot, mmu_idx, page_size);
831 return 0;
832 do_fault_protect:
833 error_code = PG_ERROR_P_MASK;
834 do_fault:
835 error_code |= (is_write << PG_ERROR_W_BIT);
836 if (is_user)
837 error_code |= PG_ERROR_U_MASK;
838 if (is_write1 == 2 &&
839 (env->efer & MSR_EFER_NXE) &&
840 (env->cr[4] & CR4_PAE_MASK))
841 error_code |= PG_ERROR_I_D_MASK;
842 if (env->intercept_exceptions & (1 << EXCP0E_PAGE)) {
843 /* cr2 is not modified in case of exceptions */
844 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
845 addr);
846 } else {
847 env->cr[2] = addr;
848 }
849 env->error_code = error_code;
850 env->exception_index = EXCP0E_PAGE;
851 return 1;
852 }
853
854 target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
855 {
856 target_ulong pde_addr, pte_addr;
857 uint64_t pte;
858 target_phys_addr_t paddr;
859 uint32_t page_offset;
860 int page_size;
861
862 if (env->cr[4] & CR4_PAE_MASK) {
863 target_ulong pdpe_addr;
864 uint64_t pde, pdpe;
865
866 #ifdef TARGET_X86_64
867 if (env->hflags & HF_LMA_MASK) {
868 uint64_t pml4e_addr, pml4e;
869 int32_t sext;
870
871 /* test virtual address sign extension */
872 sext = (int64_t)addr >> 47;
873 if (sext != 0 && sext != -1)
874 return -1;
875
876 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
877 env->a20_mask;
878 pml4e = ldq_phys(pml4e_addr);
879 if (!(pml4e & PG_PRESENT_MASK))
880 return -1;
881
882 pdpe_addr = ((pml4e & ~0xfff) + (((addr >> 30) & 0x1ff) << 3)) &
883 env->a20_mask;
884 pdpe = ldq_phys(pdpe_addr);
885 if (!(pdpe & PG_PRESENT_MASK))
886 return -1;
887 } else
888 #endif
889 {
890 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
891 env->a20_mask;
892 pdpe = ldq_phys(pdpe_addr);
893 if (!(pdpe & PG_PRESENT_MASK))
894 return -1;
895 }
896
897 pde_addr = ((pdpe & ~0xfff) + (((addr >> 21) & 0x1ff) << 3)) &
898 env->a20_mask;
899 pde = ldq_phys(pde_addr);
900 if (!(pde & PG_PRESENT_MASK)) {
901 return -1;
902 }
903 if (pde & PG_PSE_MASK) {
904 /* 2 MB page */
905 page_size = 2048 * 1024;
906 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
907 } else {
908 /* 4 KB page */
909 pte_addr = ((pde & ~0xfff) + (((addr >> 12) & 0x1ff) << 3)) &
910 env->a20_mask;
911 page_size = 4096;
912 pte = ldq_phys(pte_addr);
913 }
914 if (!(pte & PG_PRESENT_MASK))
915 return -1;
916 } else {
917 uint32_t pde;
918
919 if (!(env->cr[0] & CR0_PG_MASK)) {
920 pte = addr;
921 page_size = 4096;
922 } else {
923 /* page directory entry */
924 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) & env->a20_mask;
925 pde = ldl_phys(pde_addr);
926 if (!(pde & PG_PRESENT_MASK))
927 return -1;
928 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
929 pte = pde & ~0x003ff000; /* align to 4MB */
930 page_size = 4096 * 1024;
931 } else {
932 /* page directory entry */
933 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & env->a20_mask;
934 pte = ldl_phys(pte_addr);
935 if (!(pte & PG_PRESENT_MASK))
936 return -1;
937 page_size = 4096;
938 }
939 }
940 pte = pte & env->a20_mask;
941 }
942
943 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
944 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
945 return paddr;
946 }
947
948 void hw_breakpoint_insert(CPUState *env, int index)
949 {
950 int type, err = 0;
951
952 switch (hw_breakpoint_type(env->dr[7], index)) {
953 case 0:
954 if (hw_breakpoint_enabled(env->dr[7], index))
955 err = cpu_breakpoint_insert(env, env->dr[index], BP_CPU,
956 &env->cpu_breakpoint[index]);
957 break;
958 case 1:
959 type = BP_CPU | BP_MEM_WRITE;
960 goto insert_wp;
961 case 2:
962 /* No support for I/O watchpoints yet */
963 break;
964 case 3:
965 type = BP_CPU | BP_MEM_ACCESS;
966 insert_wp:
967 err = cpu_watchpoint_insert(env, env->dr[index],
968 hw_breakpoint_len(env->dr[7], index),
969 type, &env->cpu_watchpoint[index]);
970 break;
971 }
972 if (err)
973 env->cpu_breakpoint[index] = NULL;
974 }
975
976 void hw_breakpoint_remove(CPUState *env, int index)
977 {
978 if (!env->cpu_breakpoint[index])
979 return;
980 switch (hw_breakpoint_type(env->dr[7], index)) {
981 case 0:
982 if (hw_breakpoint_enabled(env->dr[7], index))
983 cpu_breakpoint_remove_by_ref(env, env->cpu_breakpoint[index]);
984 break;
985 case 1:
986 case 3:
987 cpu_watchpoint_remove_by_ref(env, env->cpu_watchpoint[index]);
988 break;
989 case 2:
990 /* No support for I/O watchpoints yet */
991 break;
992 }
993 }
994
995 int check_hw_breakpoints(CPUState *env, int force_dr6_update)
996 {
997 target_ulong dr6;
998 int reg, type;
999 int hit_enabled = 0;
1000
1001 dr6 = env->dr[6] & ~0xf;
1002 for (reg = 0; reg < 4; reg++) {
1003 type = hw_breakpoint_type(env->dr[7], reg);
1004 if ((type == 0 && env->dr[reg] == env->eip) ||
1005 ((type & 1) && env->cpu_watchpoint[reg] &&
1006 (env->cpu_watchpoint[reg]->flags & BP_WATCHPOINT_HIT))) {
1007 dr6 |= 1 << reg;
1008 if (hw_breakpoint_enabled(env->dr[7], reg))
1009 hit_enabled = 1;
1010 }
1011 }
1012 if (hit_enabled || force_dr6_update)
1013 env->dr[6] = dr6;
1014 return hit_enabled;
1015 }
1016
1017 static CPUDebugExcpHandler *prev_debug_excp_handler;
1018
1019 void raise_exception_env(int exception_index, CPUState *env);
1020
1021 static void breakpoint_handler(CPUState *env)
1022 {
1023 CPUBreakpoint *bp;
1024
1025 if (env->watchpoint_hit) {
1026 if (env->watchpoint_hit->flags & BP_CPU) {
1027 env->watchpoint_hit = NULL;
1028 if (check_hw_breakpoints(env, 0))
1029 raise_exception_env(EXCP01_DB, env);
1030 else
1031 cpu_resume_from_signal(env, NULL);
1032 }
1033 } else {
1034 QTAILQ_FOREACH(bp, &env->breakpoints, entry)
1035 if (bp->pc == env->eip) {
1036 if (bp->flags & BP_CPU) {
1037 check_hw_breakpoints(env, 1);
1038 raise_exception_env(EXCP01_DB, env);
1039 }
1040 break;
1041 }
1042 }
1043 if (prev_debug_excp_handler)
1044 prev_debug_excp_handler(env);
1045 }
1046
1047 /* This should come from sysemu.h - if we could include it here... */
1048 void qemu_system_reset_request(void);
1049
1050 static void qemu_inject_x86_mce(CPUState *cenv, int bank, uint64_t status,
1051 uint64_t mcg_status, uint64_t addr, uint64_t misc)
1052 {
1053 uint64_t mcg_cap = cenv->mcg_cap;
1054 uint64_t *banks = cenv->mce_banks;
1055
1056 /*
1057 * if MSR_MCG_CTL is not all 1s, the uncorrected error
1058 * reporting is disabled
1059 */
1060 if ((status & MCI_STATUS_UC) && (mcg_cap & MCG_CTL_P) &&
1061 cenv->mcg_ctl != ~(uint64_t)0)
1062 return;
1063 banks += 4 * bank;
1064 /*
1065 * if MSR_MCi_CTL is not all 1s, the uncorrected error
1066 * reporting is disabled for the bank
1067 */
1068 if ((status & MCI_STATUS_UC) && banks[0] != ~(uint64_t)0)
1069 return;
1070 if (status & MCI_STATUS_UC) {
1071 if ((cenv->mcg_status & MCG_STATUS_MCIP) ||
1072 !(cenv->cr[4] & CR4_MCE_MASK)) {
1073 fprintf(stderr, "injects mce exception while previous "
1074 "one is in progress!\n");
1075 qemu_log_mask(CPU_LOG_RESET, "Triple fault\n");
1076 qemu_system_reset_request();
1077 return;
1078 }
1079 if (banks[1] & MCI_STATUS_VAL)
1080 status |= MCI_STATUS_OVER;
1081 banks[2] = addr;
1082 banks[3] = misc;
1083 cenv->mcg_status = mcg_status;
1084 banks[1] = status;
1085 cpu_interrupt(cenv, CPU_INTERRUPT_MCE);
1086 } else if (!(banks[1] & MCI_STATUS_VAL)
1087 || !(banks[1] & MCI_STATUS_UC)) {
1088 if (banks[1] & MCI_STATUS_VAL)
1089 status |= MCI_STATUS_OVER;
1090 banks[2] = addr;
1091 banks[3] = misc;
1092 banks[1] = status;
1093 } else
1094 banks[1] |= MCI_STATUS_OVER;
1095 }
1096
1097 void cpu_inject_x86_mce(CPUState *cenv, int bank, uint64_t status,
1098 uint64_t mcg_status, uint64_t addr, uint64_t misc,
1099 int broadcast)
1100 {
1101 unsigned bank_num = cenv->mcg_cap & 0xff;
1102 CPUState *env;
1103 int flag = 0;
1104
1105 if (bank >= bank_num || !(status & MCI_STATUS_VAL)) {
1106 return;
1107 }
1108
1109 if (broadcast) {
1110 if (!cpu_x86_support_mca_broadcast(cenv)) {
1111 fprintf(stderr, "Current CPU does not support broadcast\n");
1112 return;
1113 }
1114 }
1115
1116 if (kvm_enabled()) {
1117 if (broadcast) {
1118 flag |= MCE_BROADCAST;
1119 }
1120
1121 kvm_inject_x86_mce(cenv, bank, status, mcg_status, addr, misc, flag);
1122 } else {
1123 qemu_inject_x86_mce(cenv, bank, status, mcg_status, addr, misc);
1124 if (broadcast) {
1125 for (env = first_cpu; env != NULL; env = env->next_cpu) {
1126 if (cenv == env) {
1127 continue;
1128 }
1129
1130 qemu_inject_x86_mce(env, 1, 0xa000000000000000, 0, 0, 0);
1131 }
1132 }
1133 }
1134 }
1135 #endif /* !CONFIG_USER_ONLY */
1136
1137 static void mce_init(CPUX86State *cenv)
1138 {
1139 unsigned int bank, bank_num;
1140
1141 if (((cenv->cpuid_version >> 8)&0xf) >= 6
1142 && (cenv->cpuid_features&(CPUID_MCE|CPUID_MCA)) == (CPUID_MCE|CPUID_MCA)) {
1143 cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF;
1144 cenv->mcg_ctl = ~(uint64_t)0;
1145 bank_num = MCE_BANKS_DEF;
1146 for (bank = 0; bank < bank_num; bank++)
1147 cenv->mce_banks[bank*4] = ~(uint64_t)0;
1148 }
1149 }
1150
1151 int cpu_x86_get_descr_debug(CPUX86State *env, unsigned int selector,
1152 target_ulong *base, unsigned int *limit,
1153 unsigned int *flags)
1154 {
1155 SegmentCache *dt;
1156 target_ulong ptr;
1157 uint32_t e1, e2;
1158 int index;
1159
1160 if (selector & 0x4)
1161 dt = &env->ldt;
1162 else
1163 dt = &env->gdt;
1164 index = selector & ~7;
1165 ptr = dt->base + index;
1166 if ((index + 7) > dt->limit
1167 || cpu_memory_rw_debug(env, ptr, (uint8_t *)&e1, sizeof(e1), 0) != 0
1168 || cpu_memory_rw_debug(env, ptr+4, (uint8_t *)&e2, sizeof(e2), 0) != 0)
1169 return 0;
1170
1171 *base = ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
1172 *limit = (e1 & 0xffff) | (e2 & 0x000f0000);
1173 if (e2 & DESC_G_MASK)
1174 *limit = (*limit << 12) | 0xfff;
1175 *flags = e2;
1176
1177 return 1;
1178 }
1179
1180 CPUX86State *cpu_x86_init(const char *cpu_model)
1181 {
1182 CPUX86State *env;
1183 static int inited;
1184
1185 env = qemu_mallocz(sizeof(CPUX86State));
1186 cpu_exec_init(env);
1187 env->cpu_model_str = cpu_model;
1188
1189 /* init various static tables */
1190 if (!inited) {
1191 inited = 1;
1192 optimize_flags_init();
1193 #ifndef CONFIG_USER_ONLY
1194 prev_debug_excp_handler =
1195 cpu_set_debug_excp_handler(breakpoint_handler);
1196 #endif
1197 }
1198 if (cpu_x86_register(env, cpu_model) < 0) {
1199 cpu_x86_close(env);
1200 return NULL;
1201 }
1202 mce_init(env);
1203
1204 qemu_init_vcpu(env);
1205
1206 return env;
1207 }
1208
1209 #if !defined(CONFIG_USER_ONLY)
1210 void do_cpu_init(CPUState *env)
1211 {
1212 int sipi = env->interrupt_request & CPU_INTERRUPT_SIPI;
1213 cpu_reset(env);
1214 env->interrupt_request = sipi;
1215 apic_init_reset(env->apic_state);
1216 env->halted = !cpu_is_bsp(env);
1217 }
1218
1219 void do_cpu_sipi(CPUState *env)
1220 {
1221 apic_sipi(env->apic_state);
1222 }
1223 #else
1224 void do_cpu_init(CPUState *env)
1225 {
1226 }
1227 void do_cpu_sipi(CPUState *env)
1228 {
1229 }
1230 #endif