]> git.proxmox.com Git - mirror_qemu.git/blob - target/i386/helper.c
shutdown: Add source information to SHUTDOWN and RESET
[mirror_qemu.git] / target / i386 / helper.c
1 /*
2 * i386 helpers (without register variable usage)
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include "qemu/osdep.h"
21 #include "cpu.h"
22 #include "exec/exec-all.h"
23 #include "sysemu/kvm.h"
24 #include "kvm_i386.h"
25 #ifndef CONFIG_USER_ONLY
26 #include "sysemu/sysemu.h"
27 #include "sysemu/hw_accel.h"
28 #include "monitor/monitor.h"
29 #include "hw/i386/apic_internal.h"
30 #endif
31
32 static void cpu_x86_version(CPUX86State *env, int *family, int *model)
33 {
34 int cpuver = env->cpuid_version;
35
36 if (family == NULL || model == NULL) {
37 return;
38 }
39
40 *family = (cpuver >> 8) & 0x0f;
41 *model = ((cpuver >> 12) & 0xf0) + ((cpuver >> 4) & 0x0f);
42 }
43
44 /* Broadcast MCA signal for processor version 06H_EH and above */
45 int cpu_x86_support_mca_broadcast(CPUX86State *env)
46 {
47 int family = 0;
48 int model = 0;
49
50 cpu_x86_version(env, &family, &model);
51 if ((family == 6 && model >= 14) || family > 6) {
52 return 1;
53 }
54
55 return 0;
56 }
57
58 /***********************************************************/
59 /* x86 debug */
60
61 static const char *cc_op_str[CC_OP_NB] = {
62 "DYNAMIC",
63 "EFLAGS",
64
65 "MULB",
66 "MULW",
67 "MULL",
68 "MULQ",
69
70 "ADDB",
71 "ADDW",
72 "ADDL",
73 "ADDQ",
74
75 "ADCB",
76 "ADCW",
77 "ADCL",
78 "ADCQ",
79
80 "SUBB",
81 "SUBW",
82 "SUBL",
83 "SUBQ",
84
85 "SBBB",
86 "SBBW",
87 "SBBL",
88 "SBBQ",
89
90 "LOGICB",
91 "LOGICW",
92 "LOGICL",
93 "LOGICQ",
94
95 "INCB",
96 "INCW",
97 "INCL",
98 "INCQ",
99
100 "DECB",
101 "DECW",
102 "DECL",
103 "DECQ",
104
105 "SHLB",
106 "SHLW",
107 "SHLL",
108 "SHLQ",
109
110 "SARB",
111 "SARW",
112 "SARL",
113 "SARQ",
114
115 "BMILGB",
116 "BMILGW",
117 "BMILGL",
118 "BMILGQ",
119
120 "ADCX",
121 "ADOX",
122 "ADCOX",
123
124 "CLR",
125 };
126
127 static void
128 cpu_x86_dump_seg_cache(CPUX86State *env, FILE *f, fprintf_function cpu_fprintf,
129 const char *name, struct SegmentCache *sc)
130 {
131 #ifdef TARGET_X86_64
132 if (env->hflags & HF_CS64_MASK) {
133 cpu_fprintf(f, "%-3s=%04x %016" PRIx64 " %08x %08x", name,
134 sc->selector, sc->base, sc->limit, sc->flags & 0x00ffff00);
135 } else
136 #endif
137 {
138 cpu_fprintf(f, "%-3s=%04x %08x %08x %08x", name, sc->selector,
139 (uint32_t)sc->base, sc->limit, sc->flags & 0x00ffff00);
140 }
141
142 if (!(env->hflags & HF_PE_MASK) || !(sc->flags & DESC_P_MASK))
143 goto done;
144
145 cpu_fprintf(f, " DPL=%d ", (sc->flags & DESC_DPL_MASK) >> DESC_DPL_SHIFT);
146 if (sc->flags & DESC_S_MASK) {
147 if (sc->flags & DESC_CS_MASK) {
148 cpu_fprintf(f, (sc->flags & DESC_L_MASK) ? "CS64" :
149 ((sc->flags & DESC_B_MASK) ? "CS32" : "CS16"));
150 cpu_fprintf(f, " [%c%c", (sc->flags & DESC_C_MASK) ? 'C' : '-',
151 (sc->flags & DESC_R_MASK) ? 'R' : '-');
152 } else {
153 cpu_fprintf(f,
154 (sc->flags & DESC_B_MASK || env->hflags & HF_LMA_MASK)
155 ? "DS " : "DS16");
156 cpu_fprintf(f, " [%c%c", (sc->flags & DESC_E_MASK) ? 'E' : '-',
157 (sc->flags & DESC_W_MASK) ? 'W' : '-');
158 }
159 cpu_fprintf(f, "%c]", (sc->flags & DESC_A_MASK) ? 'A' : '-');
160 } else {
161 static const char *sys_type_name[2][16] = {
162 { /* 32 bit mode */
163 "Reserved", "TSS16-avl", "LDT", "TSS16-busy",
164 "CallGate16", "TaskGate", "IntGate16", "TrapGate16",
165 "Reserved", "TSS32-avl", "Reserved", "TSS32-busy",
166 "CallGate32", "Reserved", "IntGate32", "TrapGate32"
167 },
168 { /* 64 bit mode */
169 "<hiword>", "Reserved", "LDT", "Reserved", "Reserved",
170 "Reserved", "Reserved", "Reserved", "Reserved",
171 "TSS64-avl", "Reserved", "TSS64-busy", "CallGate64",
172 "Reserved", "IntGate64", "TrapGate64"
173 }
174 };
175 cpu_fprintf(f, "%s",
176 sys_type_name[(env->hflags & HF_LMA_MASK) ? 1 : 0]
177 [(sc->flags & DESC_TYPE_MASK)
178 >> DESC_TYPE_SHIFT]);
179 }
180 done:
181 cpu_fprintf(f, "\n");
182 }
183
184 #ifndef CONFIG_USER_ONLY
185
186 /* ARRAY_SIZE check is not required because
187 * DeliveryMode(dm) has a size of 3 bit.
188 */
189 static inline const char *dm2str(uint32_t dm)
190 {
191 static const char *str[] = {
192 "Fixed",
193 "...",
194 "SMI",
195 "...",
196 "NMI",
197 "INIT",
198 "...",
199 "ExtINT"
200 };
201 return str[dm];
202 }
203
204 static void dump_apic_lvt(FILE *f, fprintf_function cpu_fprintf,
205 const char *name, uint32_t lvt, bool is_timer)
206 {
207 uint32_t dm = (lvt & APIC_LVT_DELIV_MOD) >> APIC_LVT_DELIV_MOD_SHIFT;
208 cpu_fprintf(f,
209 "%s\t 0x%08x %s %-5s %-6s %-7s %-12s %-6s",
210 name, lvt,
211 lvt & APIC_LVT_INT_POLARITY ? "active-lo" : "active-hi",
212 lvt & APIC_LVT_LEVEL_TRIGGER ? "level" : "edge",
213 lvt & APIC_LVT_MASKED ? "masked" : "",
214 lvt & APIC_LVT_DELIV_STS ? "pending" : "",
215 !is_timer ?
216 "" : lvt & APIC_LVT_TIMER_PERIODIC ?
217 "periodic" : lvt & APIC_LVT_TIMER_TSCDEADLINE ?
218 "tsc-deadline" : "one-shot",
219 dm2str(dm));
220 if (dm != APIC_DM_NMI) {
221 cpu_fprintf(f, " (vec %u)\n", lvt & APIC_VECTOR_MASK);
222 } else {
223 cpu_fprintf(f, "\n");
224 }
225 }
226
227 /* ARRAY_SIZE check is not required because
228 * destination shorthand has a size of 2 bit.
229 */
230 static inline const char *shorthand2str(uint32_t shorthand)
231 {
232 const char *str[] = {
233 "no-shorthand", "self", "all-self", "all"
234 };
235 return str[shorthand];
236 }
237
238 static inline uint8_t divider_conf(uint32_t divide_conf)
239 {
240 uint8_t divide_val = ((divide_conf & 0x8) >> 1) | (divide_conf & 0x3);
241
242 return divide_val == 7 ? 1 : 2 << divide_val;
243 }
244
245 static inline void mask2str(char *str, uint32_t val, uint8_t size)
246 {
247 while (size--) {
248 *str++ = (val >> size) & 1 ? '1' : '0';
249 }
250 *str = 0;
251 }
252
253 #define MAX_LOGICAL_APIC_ID_MASK_SIZE 16
254
255 static void dump_apic_icr(FILE *f, fprintf_function cpu_fprintf,
256 APICCommonState *s, CPUX86State *env)
257 {
258 uint32_t icr = s->icr[0], icr2 = s->icr[1];
259 uint8_t dest_shorthand = \
260 (icr & APIC_ICR_DEST_SHORT) >> APIC_ICR_DEST_SHORT_SHIFT;
261 bool logical_mod = icr & APIC_ICR_DEST_MOD;
262 char apic_id_str[MAX_LOGICAL_APIC_ID_MASK_SIZE + 1];
263 uint32_t dest_field;
264 bool x2apic;
265
266 cpu_fprintf(f, "ICR\t 0x%08x %s %s %s %s\n",
267 icr,
268 logical_mod ? "logical" : "physical",
269 icr & APIC_ICR_TRIGGER_MOD ? "level" : "edge",
270 icr & APIC_ICR_LEVEL ? "assert" : "de-assert",
271 shorthand2str(dest_shorthand));
272
273 cpu_fprintf(f, "ICR2\t 0x%08x", icr2);
274 if (dest_shorthand != 0) {
275 cpu_fprintf(f, "\n");
276 return;
277 }
278 x2apic = env->features[FEAT_1_ECX] & CPUID_EXT_X2APIC;
279 dest_field = x2apic ? icr2 : icr2 >> APIC_ICR_DEST_SHIFT;
280
281 if (!logical_mod) {
282 if (x2apic) {
283 cpu_fprintf(f, " cpu %u (X2APIC ID)\n", dest_field);
284 } else {
285 cpu_fprintf(f, " cpu %u (APIC ID)\n",
286 dest_field & APIC_LOGDEST_XAPIC_ID);
287 }
288 return;
289 }
290
291 if (s->dest_mode == 0xf) { /* flat mode */
292 mask2str(apic_id_str, icr2 >> APIC_ICR_DEST_SHIFT, 8);
293 cpu_fprintf(f, " mask %s (APIC ID)\n", apic_id_str);
294 } else if (s->dest_mode == 0) { /* cluster mode */
295 if (x2apic) {
296 mask2str(apic_id_str, dest_field & APIC_LOGDEST_X2APIC_ID, 16);
297 cpu_fprintf(f, " cluster %u mask %s (X2APIC ID)\n",
298 dest_field >> APIC_LOGDEST_X2APIC_SHIFT, apic_id_str);
299 } else {
300 mask2str(apic_id_str, dest_field & APIC_LOGDEST_XAPIC_ID, 4);
301 cpu_fprintf(f, " cluster %u mask %s (APIC ID)\n",
302 dest_field >> APIC_LOGDEST_XAPIC_SHIFT, apic_id_str);
303 }
304 }
305 }
306
307 static void dump_apic_interrupt(FILE *f, fprintf_function cpu_fprintf,
308 const char *name, uint32_t *ireg_tab,
309 uint32_t *tmr_tab)
310 {
311 int i, empty = true;
312
313 cpu_fprintf(f, "%s\t ", name);
314 for (i = 0; i < 256; i++) {
315 if (apic_get_bit(ireg_tab, i)) {
316 cpu_fprintf(f, "%u%s ", i,
317 apic_get_bit(tmr_tab, i) ? "(level)" : "");
318 empty = false;
319 }
320 }
321 cpu_fprintf(f, "%s\n", empty ? "(none)" : "");
322 }
323
324 void x86_cpu_dump_local_apic_state(CPUState *cs, FILE *f,
325 fprintf_function cpu_fprintf, int flags)
326 {
327 X86CPU *cpu = X86_CPU(cs);
328 APICCommonState *s = APIC_COMMON(cpu->apic_state);
329 if (!s) {
330 cpu_fprintf(f, "local apic state not available\n");
331 return;
332 }
333 uint32_t *lvt = s->lvt;
334
335 cpu_fprintf(f, "dumping local APIC state for CPU %-2u\n\n",
336 CPU(cpu)->cpu_index);
337 dump_apic_lvt(f, cpu_fprintf, "LVT0", lvt[APIC_LVT_LINT0], false);
338 dump_apic_lvt(f, cpu_fprintf, "LVT1", lvt[APIC_LVT_LINT1], false);
339 dump_apic_lvt(f, cpu_fprintf, "LVTPC", lvt[APIC_LVT_PERFORM], false);
340 dump_apic_lvt(f, cpu_fprintf, "LVTERR", lvt[APIC_LVT_ERROR], false);
341 dump_apic_lvt(f, cpu_fprintf, "LVTTHMR", lvt[APIC_LVT_THERMAL], false);
342 dump_apic_lvt(f, cpu_fprintf, "LVTT", lvt[APIC_LVT_TIMER], true);
343
344 cpu_fprintf(f, "Timer\t DCR=0x%x (divide by %u) initial_count = %u\n",
345 s->divide_conf & APIC_DCR_MASK,
346 divider_conf(s->divide_conf),
347 s->initial_count);
348
349 cpu_fprintf(f, "SPIV\t 0x%08x APIC %s, focus=%s, spurious vec %u\n",
350 s->spurious_vec,
351 s->spurious_vec & APIC_SPURIO_ENABLED ? "enabled" : "disabled",
352 s->spurious_vec & APIC_SPURIO_FOCUS ? "on" : "off",
353 s->spurious_vec & APIC_VECTOR_MASK);
354
355 dump_apic_icr(f, cpu_fprintf, s, &cpu->env);
356
357 cpu_fprintf(f, "ESR\t 0x%08x\n", s->esr);
358
359 dump_apic_interrupt(f, cpu_fprintf, "ISR", s->isr, s->tmr);
360 dump_apic_interrupt(f, cpu_fprintf, "IRR", s->irr, s->tmr);
361
362 cpu_fprintf(f, "\nAPR 0x%02x TPR 0x%02x DFR 0x%02x LDR 0x%02x",
363 s->arb_id, s->tpr, s->dest_mode, s->log_dest);
364 if (s->dest_mode == 0) {
365 cpu_fprintf(f, "(cluster %u: id %u)",
366 s->log_dest >> APIC_LOGDEST_XAPIC_SHIFT,
367 s->log_dest & APIC_LOGDEST_XAPIC_ID);
368 }
369 cpu_fprintf(f, " PPR 0x%02x\n", apic_get_ppr(s));
370 }
371 #else
372 void x86_cpu_dump_local_apic_state(CPUState *cs, FILE *f,
373 fprintf_function cpu_fprintf, int flags)
374 {
375 }
376 #endif /* !CONFIG_USER_ONLY */
377
378 #define DUMP_CODE_BYTES_TOTAL 50
379 #define DUMP_CODE_BYTES_BACKWARD 20
380
381 void x86_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
382 int flags)
383 {
384 X86CPU *cpu = X86_CPU(cs);
385 CPUX86State *env = &cpu->env;
386 int eflags, i, nb;
387 char cc_op_name[32];
388 static const char *seg_name[6] = { "ES", "CS", "SS", "DS", "FS", "GS" };
389
390 eflags = cpu_compute_eflags(env);
391 #ifdef TARGET_X86_64
392 if (env->hflags & HF_CS64_MASK) {
393 cpu_fprintf(f,
394 "RAX=%016" PRIx64 " RBX=%016" PRIx64 " RCX=%016" PRIx64 " RDX=%016" PRIx64 "\n"
395 "RSI=%016" PRIx64 " RDI=%016" PRIx64 " RBP=%016" PRIx64 " RSP=%016" PRIx64 "\n"
396 "R8 =%016" PRIx64 " R9 =%016" PRIx64 " R10=%016" PRIx64 " R11=%016" PRIx64 "\n"
397 "R12=%016" PRIx64 " R13=%016" PRIx64 " R14=%016" PRIx64 " R15=%016" PRIx64 "\n"
398 "RIP=%016" PRIx64 " RFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
399 env->regs[R_EAX],
400 env->regs[R_EBX],
401 env->regs[R_ECX],
402 env->regs[R_EDX],
403 env->regs[R_ESI],
404 env->regs[R_EDI],
405 env->regs[R_EBP],
406 env->regs[R_ESP],
407 env->regs[8],
408 env->regs[9],
409 env->regs[10],
410 env->regs[11],
411 env->regs[12],
412 env->regs[13],
413 env->regs[14],
414 env->regs[15],
415 env->eip, eflags,
416 eflags & DF_MASK ? 'D' : '-',
417 eflags & CC_O ? 'O' : '-',
418 eflags & CC_S ? 'S' : '-',
419 eflags & CC_Z ? 'Z' : '-',
420 eflags & CC_A ? 'A' : '-',
421 eflags & CC_P ? 'P' : '-',
422 eflags & CC_C ? 'C' : '-',
423 env->hflags & HF_CPL_MASK,
424 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
425 (env->a20_mask >> 20) & 1,
426 (env->hflags >> HF_SMM_SHIFT) & 1,
427 cs->halted);
428 } else
429 #endif
430 {
431 cpu_fprintf(f, "EAX=%08x EBX=%08x ECX=%08x EDX=%08x\n"
432 "ESI=%08x EDI=%08x EBP=%08x ESP=%08x\n"
433 "EIP=%08x EFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
434 (uint32_t)env->regs[R_EAX],
435 (uint32_t)env->regs[R_EBX],
436 (uint32_t)env->regs[R_ECX],
437 (uint32_t)env->regs[R_EDX],
438 (uint32_t)env->regs[R_ESI],
439 (uint32_t)env->regs[R_EDI],
440 (uint32_t)env->regs[R_EBP],
441 (uint32_t)env->regs[R_ESP],
442 (uint32_t)env->eip, eflags,
443 eflags & DF_MASK ? 'D' : '-',
444 eflags & CC_O ? 'O' : '-',
445 eflags & CC_S ? 'S' : '-',
446 eflags & CC_Z ? 'Z' : '-',
447 eflags & CC_A ? 'A' : '-',
448 eflags & CC_P ? 'P' : '-',
449 eflags & CC_C ? 'C' : '-',
450 env->hflags & HF_CPL_MASK,
451 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
452 (env->a20_mask >> 20) & 1,
453 (env->hflags >> HF_SMM_SHIFT) & 1,
454 cs->halted);
455 }
456
457 for(i = 0; i < 6; i++) {
458 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, seg_name[i],
459 &env->segs[i]);
460 }
461 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, "LDT", &env->ldt);
462 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, "TR", &env->tr);
463
464 #ifdef TARGET_X86_64
465 if (env->hflags & HF_LMA_MASK) {
466 cpu_fprintf(f, "GDT= %016" PRIx64 " %08x\n",
467 env->gdt.base, env->gdt.limit);
468 cpu_fprintf(f, "IDT= %016" PRIx64 " %08x\n",
469 env->idt.base, env->idt.limit);
470 cpu_fprintf(f, "CR0=%08x CR2=%016" PRIx64 " CR3=%016" PRIx64 " CR4=%08x\n",
471 (uint32_t)env->cr[0],
472 env->cr[2],
473 env->cr[3],
474 (uint32_t)env->cr[4]);
475 for(i = 0; i < 4; i++)
476 cpu_fprintf(f, "DR%d=%016" PRIx64 " ", i, env->dr[i]);
477 cpu_fprintf(f, "\nDR6=%016" PRIx64 " DR7=%016" PRIx64 "\n",
478 env->dr[6], env->dr[7]);
479 } else
480 #endif
481 {
482 cpu_fprintf(f, "GDT= %08x %08x\n",
483 (uint32_t)env->gdt.base, env->gdt.limit);
484 cpu_fprintf(f, "IDT= %08x %08x\n",
485 (uint32_t)env->idt.base, env->idt.limit);
486 cpu_fprintf(f, "CR0=%08x CR2=%08x CR3=%08x CR4=%08x\n",
487 (uint32_t)env->cr[0],
488 (uint32_t)env->cr[2],
489 (uint32_t)env->cr[3],
490 (uint32_t)env->cr[4]);
491 for(i = 0; i < 4; i++) {
492 cpu_fprintf(f, "DR%d=" TARGET_FMT_lx " ", i, env->dr[i]);
493 }
494 cpu_fprintf(f, "\nDR6=" TARGET_FMT_lx " DR7=" TARGET_FMT_lx "\n",
495 env->dr[6], env->dr[7]);
496 }
497 if (flags & CPU_DUMP_CCOP) {
498 if ((unsigned)env->cc_op < CC_OP_NB)
499 snprintf(cc_op_name, sizeof(cc_op_name), "%s", cc_op_str[env->cc_op]);
500 else
501 snprintf(cc_op_name, sizeof(cc_op_name), "[%d]", env->cc_op);
502 #ifdef TARGET_X86_64
503 if (env->hflags & HF_CS64_MASK) {
504 cpu_fprintf(f, "CCS=%016" PRIx64 " CCD=%016" PRIx64 " CCO=%-8s\n",
505 env->cc_src, env->cc_dst,
506 cc_op_name);
507 } else
508 #endif
509 {
510 cpu_fprintf(f, "CCS=%08x CCD=%08x CCO=%-8s\n",
511 (uint32_t)env->cc_src, (uint32_t)env->cc_dst,
512 cc_op_name);
513 }
514 }
515 cpu_fprintf(f, "EFER=%016" PRIx64 "\n", env->efer);
516 if (flags & CPU_DUMP_FPU) {
517 int fptag;
518 fptag = 0;
519 for(i = 0; i < 8; i++) {
520 fptag |= ((!env->fptags[i]) << i);
521 }
522 cpu_fprintf(f, "FCW=%04x FSW=%04x [ST=%d] FTW=%02x MXCSR=%08x\n",
523 env->fpuc,
524 (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11,
525 env->fpstt,
526 fptag,
527 env->mxcsr);
528 for(i=0;i<8;i++) {
529 CPU_LDoubleU u;
530 u.d = env->fpregs[i].d;
531 cpu_fprintf(f, "FPR%d=%016" PRIx64 " %04x",
532 i, u.l.lower, u.l.upper);
533 if ((i & 1) == 1)
534 cpu_fprintf(f, "\n");
535 else
536 cpu_fprintf(f, " ");
537 }
538 if (env->hflags & HF_CS64_MASK)
539 nb = 16;
540 else
541 nb = 8;
542 for(i=0;i<nb;i++) {
543 cpu_fprintf(f, "XMM%02d=%08x%08x%08x%08x",
544 i,
545 env->xmm_regs[i].ZMM_L(3),
546 env->xmm_regs[i].ZMM_L(2),
547 env->xmm_regs[i].ZMM_L(1),
548 env->xmm_regs[i].ZMM_L(0));
549 if ((i & 1) == 1)
550 cpu_fprintf(f, "\n");
551 else
552 cpu_fprintf(f, " ");
553 }
554 }
555 if (flags & CPU_DUMP_CODE) {
556 target_ulong base = env->segs[R_CS].base + env->eip;
557 target_ulong offs = MIN(env->eip, DUMP_CODE_BYTES_BACKWARD);
558 uint8_t code;
559 char codestr[3];
560
561 cpu_fprintf(f, "Code=");
562 for (i = 0; i < DUMP_CODE_BYTES_TOTAL; i++) {
563 if (cpu_memory_rw_debug(cs, base - offs + i, &code, 1, 0) == 0) {
564 snprintf(codestr, sizeof(codestr), "%02x", code);
565 } else {
566 snprintf(codestr, sizeof(codestr), "??");
567 }
568 cpu_fprintf(f, "%s%s%s%s", i > 0 ? " " : "",
569 i == offs ? "<" : "", codestr, i == offs ? ">" : "");
570 }
571 cpu_fprintf(f, "\n");
572 }
573 }
574
575 /***********************************************************/
576 /* x86 mmu */
577 /* XXX: add PGE support */
578
579 void x86_cpu_set_a20(X86CPU *cpu, int a20_state)
580 {
581 CPUX86State *env = &cpu->env;
582
583 a20_state = (a20_state != 0);
584 if (a20_state != ((env->a20_mask >> 20) & 1)) {
585 CPUState *cs = CPU(cpu);
586
587 qemu_log_mask(CPU_LOG_MMU, "A20 update: a20=%d\n", a20_state);
588 /* if the cpu is currently executing code, we must unlink it and
589 all the potentially executing TB */
590 cpu_interrupt(cs, CPU_INTERRUPT_EXITTB);
591
592 /* when a20 is changed, all the MMU mappings are invalid, so
593 we must flush everything */
594 tlb_flush(cs);
595 env->a20_mask = ~(1 << 20) | (a20_state << 20);
596 }
597 }
598
599 void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0)
600 {
601 X86CPU *cpu = x86_env_get_cpu(env);
602 int pe_state;
603
604 qemu_log_mask(CPU_LOG_MMU, "CR0 update: CR0=0x%08x\n", new_cr0);
605 if ((new_cr0 & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK)) !=
606 (env->cr[0] & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK))) {
607 tlb_flush(CPU(cpu));
608 }
609
610 #ifdef TARGET_X86_64
611 if (!(env->cr[0] & CR0_PG_MASK) && (new_cr0 & CR0_PG_MASK) &&
612 (env->efer & MSR_EFER_LME)) {
613 /* enter in long mode */
614 /* XXX: generate an exception */
615 if (!(env->cr[4] & CR4_PAE_MASK))
616 return;
617 env->efer |= MSR_EFER_LMA;
618 env->hflags |= HF_LMA_MASK;
619 } else if ((env->cr[0] & CR0_PG_MASK) && !(new_cr0 & CR0_PG_MASK) &&
620 (env->efer & MSR_EFER_LMA)) {
621 /* exit long mode */
622 env->efer &= ~MSR_EFER_LMA;
623 env->hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
624 env->eip &= 0xffffffff;
625 }
626 #endif
627 env->cr[0] = new_cr0 | CR0_ET_MASK;
628
629 /* update PE flag in hidden flags */
630 pe_state = (env->cr[0] & CR0_PE_MASK);
631 env->hflags = (env->hflags & ~HF_PE_MASK) | (pe_state << HF_PE_SHIFT);
632 /* ensure that ADDSEG is always set in real mode */
633 env->hflags |= ((pe_state ^ 1) << HF_ADDSEG_SHIFT);
634 /* update FPU flags */
635 env->hflags = (env->hflags & ~(HF_MP_MASK | HF_EM_MASK | HF_TS_MASK)) |
636 ((new_cr0 << (HF_MP_SHIFT - 1)) & (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK));
637 }
638
639 /* XXX: in legacy PAE mode, generate a GPF if reserved bits are set in
640 the PDPT */
641 void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3)
642 {
643 X86CPU *cpu = x86_env_get_cpu(env);
644
645 env->cr[3] = new_cr3;
646 if (env->cr[0] & CR0_PG_MASK) {
647 qemu_log_mask(CPU_LOG_MMU,
648 "CR3 update: CR3=" TARGET_FMT_lx "\n", new_cr3);
649 tlb_flush(CPU(cpu));
650 }
651 }
652
653 void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4)
654 {
655 X86CPU *cpu = x86_env_get_cpu(env);
656 uint32_t hflags;
657
658 #if defined(DEBUG_MMU)
659 printf("CR4 update: %08x -> %08x\n", (uint32_t)env->cr[4], new_cr4);
660 #endif
661 if ((new_cr4 ^ env->cr[4]) &
662 (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK |
663 CR4_SMEP_MASK | CR4_SMAP_MASK | CR4_LA57_MASK)) {
664 tlb_flush(CPU(cpu));
665 }
666
667 /* Clear bits we're going to recompute. */
668 hflags = env->hflags & ~(HF_OSFXSR_MASK | HF_SMAP_MASK);
669
670 /* SSE handling */
671 if (!(env->features[FEAT_1_EDX] & CPUID_SSE)) {
672 new_cr4 &= ~CR4_OSFXSR_MASK;
673 }
674 if (new_cr4 & CR4_OSFXSR_MASK) {
675 hflags |= HF_OSFXSR_MASK;
676 }
677
678 if (!(env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_SMAP)) {
679 new_cr4 &= ~CR4_SMAP_MASK;
680 }
681 if (new_cr4 & CR4_SMAP_MASK) {
682 hflags |= HF_SMAP_MASK;
683 }
684
685 if (!(env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_PKU)) {
686 new_cr4 &= ~CR4_PKE_MASK;
687 }
688
689 env->cr[4] = new_cr4;
690 env->hflags = hflags;
691
692 cpu_sync_bndcs_hflags(env);
693 }
694
695 #if defined(CONFIG_USER_ONLY)
696
697 int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr,
698 int is_write, int mmu_idx)
699 {
700 X86CPU *cpu = X86_CPU(cs);
701 CPUX86State *env = &cpu->env;
702
703 /* user mode only emulation */
704 is_write &= 1;
705 env->cr[2] = addr;
706 env->error_code = (is_write << PG_ERROR_W_BIT);
707 env->error_code |= PG_ERROR_U_MASK;
708 cs->exception_index = EXCP0E_PAGE;
709 env->exception_is_int = 0;
710 env->exception_next_eip = -1;
711 return 1;
712 }
713
714 #else
715
716 /* return value:
717 * -1 = cannot handle fault
718 * 0 = nothing more to do
719 * 1 = generate PF fault
720 */
721 int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr,
722 int is_write1, int mmu_idx)
723 {
724 X86CPU *cpu = X86_CPU(cs);
725 CPUX86State *env = &cpu->env;
726 uint64_t ptep, pte;
727 target_ulong pde_addr, pte_addr;
728 int error_code = 0;
729 int is_dirty, prot, page_size, is_write, is_user;
730 hwaddr paddr;
731 uint64_t rsvd_mask = PG_HI_RSVD_MASK;
732 uint32_t page_offset;
733 target_ulong vaddr;
734
735 is_user = mmu_idx == MMU_USER_IDX;
736 #if defined(DEBUG_MMU)
737 printf("MMU fault: addr=%" VADDR_PRIx " w=%d u=%d eip=" TARGET_FMT_lx "\n",
738 addr, is_write1, is_user, env->eip);
739 #endif
740 is_write = is_write1 & 1;
741
742 if (!(env->cr[0] & CR0_PG_MASK)) {
743 pte = addr;
744 #ifdef TARGET_X86_64
745 if (!(env->hflags & HF_LMA_MASK)) {
746 /* Without long mode we can only address 32bits in real mode */
747 pte = (uint32_t)pte;
748 }
749 #endif
750 prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
751 page_size = 4096;
752 goto do_mapping;
753 }
754
755 if (!(env->efer & MSR_EFER_NXE)) {
756 rsvd_mask |= PG_NX_MASK;
757 }
758
759 if (env->cr[4] & CR4_PAE_MASK) {
760 uint64_t pde, pdpe;
761 target_ulong pdpe_addr;
762
763 #ifdef TARGET_X86_64
764 if (env->hflags & HF_LMA_MASK) {
765 bool la57 = env->cr[4] & CR4_LA57_MASK;
766 uint64_t pml5e_addr, pml5e;
767 uint64_t pml4e_addr, pml4e;
768 int32_t sext;
769
770 /* test virtual address sign extension */
771 sext = la57 ? (int64_t)addr >> 56 : (int64_t)addr >> 47;
772 if (sext != 0 && sext != -1) {
773 env->error_code = 0;
774 cs->exception_index = EXCP0D_GPF;
775 return 1;
776 }
777
778 if (la57) {
779 pml5e_addr = ((env->cr[3] & ~0xfff) +
780 (((addr >> 48) & 0x1ff) << 3)) & env->a20_mask;
781 pml5e = x86_ldq_phys(cs, pml5e_addr);
782 if (!(pml5e & PG_PRESENT_MASK)) {
783 goto do_fault;
784 }
785 if (pml5e & (rsvd_mask | PG_PSE_MASK)) {
786 goto do_fault_rsvd;
787 }
788 if (!(pml5e & PG_ACCESSED_MASK)) {
789 pml5e |= PG_ACCESSED_MASK;
790 x86_stl_phys_notdirty(cs, pml5e_addr, pml5e);
791 }
792 ptep = pml5e ^ PG_NX_MASK;
793 } else {
794 pml5e = env->cr[3];
795 ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
796 }
797
798 pml4e_addr = ((pml5e & PG_ADDRESS_MASK) +
799 (((addr >> 39) & 0x1ff) << 3)) & env->a20_mask;
800 pml4e = x86_ldq_phys(cs, pml4e_addr);
801 if (!(pml4e & PG_PRESENT_MASK)) {
802 goto do_fault;
803 }
804 if (pml4e & (rsvd_mask | PG_PSE_MASK)) {
805 goto do_fault_rsvd;
806 }
807 if (!(pml4e & PG_ACCESSED_MASK)) {
808 pml4e |= PG_ACCESSED_MASK;
809 x86_stl_phys_notdirty(cs, pml4e_addr, pml4e);
810 }
811 ptep &= pml4e ^ PG_NX_MASK;
812 pdpe_addr = ((pml4e & PG_ADDRESS_MASK) + (((addr >> 30) & 0x1ff) << 3)) &
813 env->a20_mask;
814 pdpe = x86_ldq_phys(cs, pdpe_addr);
815 if (!(pdpe & PG_PRESENT_MASK)) {
816 goto do_fault;
817 }
818 if (pdpe & rsvd_mask) {
819 goto do_fault_rsvd;
820 }
821 ptep &= pdpe ^ PG_NX_MASK;
822 if (!(pdpe & PG_ACCESSED_MASK)) {
823 pdpe |= PG_ACCESSED_MASK;
824 x86_stl_phys_notdirty(cs, pdpe_addr, pdpe);
825 }
826 if (pdpe & PG_PSE_MASK) {
827 /* 1 GB page */
828 page_size = 1024 * 1024 * 1024;
829 pte_addr = pdpe_addr;
830 pte = pdpe;
831 goto do_check_protect;
832 }
833 } else
834 #endif
835 {
836 /* XXX: load them when cr3 is loaded ? */
837 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
838 env->a20_mask;
839 pdpe = x86_ldq_phys(cs, pdpe_addr);
840 if (!(pdpe & PG_PRESENT_MASK)) {
841 goto do_fault;
842 }
843 rsvd_mask |= PG_HI_USER_MASK;
844 if (pdpe & (rsvd_mask | PG_NX_MASK)) {
845 goto do_fault_rsvd;
846 }
847 ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
848 }
849
850 pde_addr = ((pdpe & PG_ADDRESS_MASK) + (((addr >> 21) & 0x1ff) << 3)) &
851 env->a20_mask;
852 pde = x86_ldq_phys(cs, pde_addr);
853 if (!(pde & PG_PRESENT_MASK)) {
854 goto do_fault;
855 }
856 if (pde & rsvd_mask) {
857 goto do_fault_rsvd;
858 }
859 ptep &= pde ^ PG_NX_MASK;
860 if (pde & PG_PSE_MASK) {
861 /* 2 MB page */
862 page_size = 2048 * 1024;
863 pte_addr = pde_addr;
864 pte = pde;
865 goto do_check_protect;
866 }
867 /* 4 KB page */
868 if (!(pde & PG_ACCESSED_MASK)) {
869 pde |= PG_ACCESSED_MASK;
870 x86_stl_phys_notdirty(cs, pde_addr, pde);
871 }
872 pte_addr = ((pde & PG_ADDRESS_MASK) + (((addr >> 12) & 0x1ff) << 3)) &
873 env->a20_mask;
874 pte = x86_ldq_phys(cs, pte_addr);
875 if (!(pte & PG_PRESENT_MASK)) {
876 goto do_fault;
877 }
878 if (pte & rsvd_mask) {
879 goto do_fault_rsvd;
880 }
881 /* combine pde and pte nx, user and rw protections */
882 ptep &= pte ^ PG_NX_MASK;
883 page_size = 4096;
884 } else {
885 uint32_t pde;
886
887 /* page directory entry */
888 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) &
889 env->a20_mask;
890 pde = x86_ldl_phys(cs, pde_addr);
891 if (!(pde & PG_PRESENT_MASK)) {
892 goto do_fault;
893 }
894 ptep = pde | PG_NX_MASK;
895
896 /* if PSE bit is set, then we use a 4MB page */
897 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
898 page_size = 4096 * 1024;
899 pte_addr = pde_addr;
900
901 /* Bits 20-13 provide bits 39-32 of the address, bit 21 is reserved.
902 * Leave bits 20-13 in place for setting accessed/dirty bits below.
903 */
904 pte = pde | ((pde & 0x1fe000LL) << (32 - 13));
905 rsvd_mask = 0x200000;
906 goto do_check_protect_pse36;
907 }
908
909 if (!(pde & PG_ACCESSED_MASK)) {
910 pde |= PG_ACCESSED_MASK;
911 x86_stl_phys_notdirty(cs, pde_addr, pde);
912 }
913
914 /* page directory entry */
915 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) &
916 env->a20_mask;
917 pte = x86_ldl_phys(cs, pte_addr);
918 if (!(pte & PG_PRESENT_MASK)) {
919 goto do_fault;
920 }
921 /* combine pde and pte user and rw protections */
922 ptep &= pte | PG_NX_MASK;
923 page_size = 4096;
924 rsvd_mask = 0;
925 }
926
927 do_check_protect:
928 rsvd_mask |= (page_size - 1) & PG_ADDRESS_MASK & ~PG_PSE_PAT_MASK;
929 do_check_protect_pse36:
930 if (pte & rsvd_mask) {
931 goto do_fault_rsvd;
932 }
933 ptep ^= PG_NX_MASK;
934
935 /* can the page can be put in the TLB? prot will tell us */
936 if (is_user && !(ptep & PG_USER_MASK)) {
937 goto do_fault_protect;
938 }
939
940 prot = 0;
941 if (mmu_idx != MMU_KSMAP_IDX || !(ptep & PG_USER_MASK)) {
942 prot |= PAGE_READ;
943 if ((ptep & PG_RW_MASK) || (!is_user && !(env->cr[0] & CR0_WP_MASK))) {
944 prot |= PAGE_WRITE;
945 }
946 }
947 if (!(ptep & PG_NX_MASK) &&
948 (mmu_idx == MMU_USER_IDX ||
949 !((env->cr[4] & CR4_SMEP_MASK) && (ptep & PG_USER_MASK)))) {
950 prot |= PAGE_EXEC;
951 }
952 if ((env->cr[4] & CR4_PKE_MASK) && (env->hflags & HF_LMA_MASK) &&
953 (ptep & PG_USER_MASK) && env->pkru) {
954 uint32_t pk = (pte & PG_PKRU_MASK) >> PG_PKRU_BIT;
955 uint32_t pkru_ad = (env->pkru >> pk * 2) & 1;
956 uint32_t pkru_wd = (env->pkru >> pk * 2) & 2;
957 uint32_t pkru_prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
958
959 if (pkru_ad) {
960 pkru_prot &= ~(PAGE_READ | PAGE_WRITE);
961 } else if (pkru_wd && (is_user || env->cr[0] & CR0_WP_MASK)) {
962 pkru_prot &= ~PAGE_WRITE;
963 }
964
965 prot &= pkru_prot;
966 if ((pkru_prot & (1 << is_write1)) == 0) {
967 assert(is_write1 != 2);
968 error_code |= PG_ERROR_PK_MASK;
969 goto do_fault_protect;
970 }
971 }
972
973 if ((prot & (1 << is_write1)) == 0) {
974 goto do_fault_protect;
975 }
976
977 /* yes, it can! */
978 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
979 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
980 pte |= PG_ACCESSED_MASK;
981 if (is_dirty) {
982 pte |= PG_DIRTY_MASK;
983 }
984 x86_stl_phys_notdirty(cs, pte_addr, pte);
985 }
986
987 if (!(pte & PG_DIRTY_MASK)) {
988 /* only set write access if already dirty... otherwise wait
989 for dirty access */
990 assert(!is_write);
991 prot &= ~PAGE_WRITE;
992 }
993
994 do_mapping:
995 pte = pte & env->a20_mask;
996
997 /* align to page_size */
998 pte &= PG_ADDRESS_MASK & ~(page_size - 1);
999
1000 /* Even if 4MB pages, we map only one 4KB page in the cache to
1001 avoid filling it too fast */
1002 vaddr = addr & TARGET_PAGE_MASK;
1003 page_offset = vaddr & (page_size - 1);
1004 paddr = pte + page_offset;
1005
1006 assert(prot & (1 << is_write1));
1007 tlb_set_page_with_attrs(cs, vaddr, paddr, cpu_get_mem_attrs(env),
1008 prot, mmu_idx, page_size);
1009 return 0;
1010 do_fault_rsvd:
1011 error_code |= PG_ERROR_RSVD_MASK;
1012 do_fault_protect:
1013 error_code |= PG_ERROR_P_MASK;
1014 do_fault:
1015 error_code |= (is_write << PG_ERROR_W_BIT);
1016 if (is_user)
1017 error_code |= PG_ERROR_U_MASK;
1018 if (is_write1 == 2 &&
1019 (((env->efer & MSR_EFER_NXE) &&
1020 (env->cr[4] & CR4_PAE_MASK)) ||
1021 (env->cr[4] & CR4_SMEP_MASK)))
1022 error_code |= PG_ERROR_I_D_MASK;
1023 if (env->intercept_exceptions & (1 << EXCP0E_PAGE)) {
1024 /* cr2 is not modified in case of exceptions */
1025 x86_stq_phys(cs,
1026 env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
1027 addr);
1028 } else {
1029 env->cr[2] = addr;
1030 }
1031 env->error_code = error_code;
1032 cs->exception_index = EXCP0E_PAGE;
1033 return 1;
1034 }
1035
1036 hwaddr x86_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
1037 {
1038 X86CPU *cpu = X86_CPU(cs);
1039 CPUX86State *env = &cpu->env;
1040 target_ulong pde_addr, pte_addr;
1041 uint64_t pte;
1042 uint32_t page_offset;
1043 int page_size;
1044
1045 if (!(env->cr[0] & CR0_PG_MASK)) {
1046 pte = addr & env->a20_mask;
1047 page_size = 4096;
1048 } else if (env->cr[4] & CR4_PAE_MASK) {
1049 target_ulong pdpe_addr;
1050 uint64_t pde, pdpe;
1051
1052 #ifdef TARGET_X86_64
1053 if (env->hflags & HF_LMA_MASK) {
1054 bool la57 = env->cr[4] & CR4_LA57_MASK;
1055 uint64_t pml5e_addr, pml5e;
1056 uint64_t pml4e_addr, pml4e;
1057 int32_t sext;
1058
1059 /* test virtual address sign extension */
1060 sext = la57 ? (int64_t)addr >> 56 : (int64_t)addr >> 47;
1061 if (sext != 0 && sext != -1) {
1062 return -1;
1063 }
1064
1065 if (la57) {
1066 pml5e_addr = ((env->cr[3] & ~0xfff) +
1067 (((addr >> 48) & 0x1ff) << 3)) & env->a20_mask;
1068 pml5e = x86_ldq_phys(cs, pml5e_addr);
1069 if (!(pml5e & PG_PRESENT_MASK)) {
1070 return -1;
1071 }
1072 } else {
1073 pml5e = env->cr[3];
1074 }
1075
1076 pml4e_addr = ((pml5e & PG_ADDRESS_MASK) +
1077 (((addr >> 39) & 0x1ff) << 3)) & env->a20_mask;
1078 pml4e = x86_ldq_phys(cs, pml4e_addr);
1079 if (!(pml4e & PG_PRESENT_MASK)) {
1080 return -1;
1081 }
1082 pdpe_addr = ((pml4e & PG_ADDRESS_MASK) +
1083 (((addr >> 30) & 0x1ff) << 3)) & env->a20_mask;
1084 pdpe = x86_ldq_phys(cs, pdpe_addr);
1085 if (!(pdpe & PG_PRESENT_MASK)) {
1086 return -1;
1087 }
1088 if (pdpe & PG_PSE_MASK) {
1089 page_size = 1024 * 1024 * 1024;
1090 pte = pdpe;
1091 goto out;
1092 }
1093
1094 } else
1095 #endif
1096 {
1097 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
1098 env->a20_mask;
1099 pdpe = x86_ldq_phys(cs, pdpe_addr);
1100 if (!(pdpe & PG_PRESENT_MASK))
1101 return -1;
1102 }
1103
1104 pde_addr = ((pdpe & PG_ADDRESS_MASK) +
1105 (((addr >> 21) & 0x1ff) << 3)) & env->a20_mask;
1106 pde = x86_ldq_phys(cs, pde_addr);
1107 if (!(pde & PG_PRESENT_MASK)) {
1108 return -1;
1109 }
1110 if (pde & PG_PSE_MASK) {
1111 /* 2 MB page */
1112 page_size = 2048 * 1024;
1113 pte = pde;
1114 } else {
1115 /* 4 KB page */
1116 pte_addr = ((pde & PG_ADDRESS_MASK) +
1117 (((addr >> 12) & 0x1ff) << 3)) & env->a20_mask;
1118 page_size = 4096;
1119 pte = x86_ldq_phys(cs, pte_addr);
1120 }
1121 if (!(pte & PG_PRESENT_MASK)) {
1122 return -1;
1123 }
1124 } else {
1125 uint32_t pde;
1126
1127 /* page directory entry */
1128 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) & env->a20_mask;
1129 pde = x86_ldl_phys(cs, pde_addr);
1130 if (!(pde & PG_PRESENT_MASK))
1131 return -1;
1132 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
1133 pte = pde | ((pde & 0x1fe000LL) << (32 - 13));
1134 page_size = 4096 * 1024;
1135 } else {
1136 /* page directory entry */
1137 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & env->a20_mask;
1138 pte = x86_ldl_phys(cs, pte_addr);
1139 if (!(pte & PG_PRESENT_MASK)) {
1140 return -1;
1141 }
1142 page_size = 4096;
1143 }
1144 pte = pte & env->a20_mask;
1145 }
1146
1147 #ifdef TARGET_X86_64
1148 out:
1149 #endif
1150 pte &= PG_ADDRESS_MASK & ~(page_size - 1);
1151 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
1152 return pte | page_offset;
1153 }
1154
1155 typedef struct MCEInjectionParams {
1156 Monitor *mon;
1157 int bank;
1158 uint64_t status;
1159 uint64_t mcg_status;
1160 uint64_t addr;
1161 uint64_t misc;
1162 int flags;
1163 } MCEInjectionParams;
1164
1165 static void do_inject_x86_mce(CPUState *cs, run_on_cpu_data data)
1166 {
1167 MCEInjectionParams *params = data.host_ptr;
1168 X86CPU *cpu = X86_CPU(cs);
1169 CPUX86State *cenv = &cpu->env;
1170 uint64_t *banks = cenv->mce_banks + 4 * params->bank;
1171
1172 cpu_synchronize_state(cs);
1173
1174 /*
1175 * If there is an MCE exception being processed, ignore this SRAO MCE
1176 * unless unconditional injection was requested.
1177 */
1178 if (!(params->flags & MCE_INJECT_UNCOND_AO)
1179 && !(params->status & MCI_STATUS_AR)
1180 && (cenv->mcg_status & MCG_STATUS_MCIP)) {
1181 return;
1182 }
1183
1184 if (params->status & MCI_STATUS_UC) {
1185 /*
1186 * if MSR_MCG_CTL is not all 1s, the uncorrected error
1187 * reporting is disabled
1188 */
1189 if ((cenv->mcg_cap & MCG_CTL_P) && cenv->mcg_ctl != ~(uint64_t)0) {
1190 monitor_printf(params->mon,
1191 "CPU %d: Uncorrected error reporting disabled\n",
1192 cs->cpu_index);
1193 return;
1194 }
1195
1196 /*
1197 * if MSR_MCi_CTL is not all 1s, the uncorrected error
1198 * reporting is disabled for the bank
1199 */
1200 if (banks[0] != ~(uint64_t)0) {
1201 monitor_printf(params->mon,
1202 "CPU %d: Uncorrected error reporting disabled for"
1203 " bank %d\n",
1204 cs->cpu_index, params->bank);
1205 return;
1206 }
1207
1208 if ((cenv->mcg_status & MCG_STATUS_MCIP) ||
1209 !(cenv->cr[4] & CR4_MCE_MASK)) {
1210 monitor_printf(params->mon,
1211 "CPU %d: Previous MCE still in progress, raising"
1212 " triple fault\n",
1213 cs->cpu_index);
1214 qemu_log_mask(CPU_LOG_RESET, "Triple fault\n");
1215 qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
1216 return;
1217 }
1218 if (banks[1] & MCI_STATUS_VAL) {
1219 params->status |= MCI_STATUS_OVER;
1220 }
1221 banks[2] = params->addr;
1222 banks[3] = params->misc;
1223 cenv->mcg_status = params->mcg_status;
1224 banks[1] = params->status;
1225 cpu_interrupt(cs, CPU_INTERRUPT_MCE);
1226 } else if (!(banks[1] & MCI_STATUS_VAL)
1227 || !(banks[1] & MCI_STATUS_UC)) {
1228 if (banks[1] & MCI_STATUS_VAL) {
1229 params->status |= MCI_STATUS_OVER;
1230 }
1231 banks[2] = params->addr;
1232 banks[3] = params->misc;
1233 banks[1] = params->status;
1234 } else {
1235 banks[1] |= MCI_STATUS_OVER;
1236 }
1237 }
1238
1239 void cpu_x86_inject_mce(Monitor *mon, X86CPU *cpu, int bank,
1240 uint64_t status, uint64_t mcg_status, uint64_t addr,
1241 uint64_t misc, int flags)
1242 {
1243 CPUState *cs = CPU(cpu);
1244 CPUX86State *cenv = &cpu->env;
1245 MCEInjectionParams params = {
1246 .mon = mon,
1247 .bank = bank,
1248 .status = status,
1249 .mcg_status = mcg_status,
1250 .addr = addr,
1251 .misc = misc,
1252 .flags = flags,
1253 };
1254 unsigned bank_num = cenv->mcg_cap & 0xff;
1255
1256 if (!cenv->mcg_cap) {
1257 monitor_printf(mon, "MCE injection not supported\n");
1258 return;
1259 }
1260 if (bank >= bank_num) {
1261 monitor_printf(mon, "Invalid MCE bank number\n");
1262 return;
1263 }
1264 if (!(status & MCI_STATUS_VAL)) {
1265 monitor_printf(mon, "Invalid MCE status code\n");
1266 return;
1267 }
1268 if ((flags & MCE_INJECT_BROADCAST)
1269 && !cpu_x86_support_mca_broadcast(cenv)) {
1270 monitor_printf(mon, "Guest CPU does not support MCA broadcast\n");
1271 return;
1272 }
1273
1274 run_on_cpu(cs, do_inject_x86_mce, RUN_ON_CPU_HOST_PTR(&params));
1275 if (flags & MCE_INJECT_BROADCAST) {
1276 CPUState *other_cs;
1277
1278 params.bank = 1;
1279 params.status = MCI_STATUS_VAL | MCI_STATUS_UC;
1280 params.mcg_status = MCG_STATUS_MCIP | MCG_STATUS_RIPV;
1281 params.addr = 0;
1282 params.misc = 0;
1283 CPU_FOREACH(other_cs) {
1284 if (other_cs == cs) {
1285 continue;
1286 }
1287 run_on_cpu(other_cs, do_inject_x86_mce, RUN_ON_CPU_HOST_PTR(&params));
1288 }
1289 }
1290 }
1291
1292 void cpu_report_tpr_access(CPUX86State *env, TPRAccess access)
1293 {
1294 X86CPU *cpu = x86_env_get_cpu(env);
1295 CPUState *cs = CPU(cpu);
1296
1297 if (kvm_enabled()) {
1298 env->tpr_access_type = access;
1299
1300 cpu_interrupt(cs, CPU_INTERRUPT_TPR);
1301 } else {
1302 cpu_restore_state(cs, cs->mem_io_pc);
1303
1304 apic_handle_tpr_access_report(cpu->apic_state, env->eip, access);
1305 }
1306 }
1307 #endif /* !CONFIG_USER_ONLY */
1308
1309 int cpu_x86_get_descr_debug(CPUX86State *env, unsigned int selector,
1310 target_ulong *base, unsigned int *limit,
1311 unsigned int *flags)
1312 {
1313 X86CPU *cpu = x86_env_get_cpu(env);
1314 CPUState *cs = CPU(cpu);
1315 SegmentCache *dt;
1316 target_ulong ptr;
1317 uint32_t e1, e2;
1318 int index;
1319
1320 if (selector & 0x4)
1321 dt = &env->ldt;
1322 else
1323 dt = &env->gdt;
1324 index = selector & ~7;
1325 ptr = dt->base + index;
1326 if ((index + 7) > dt->limit
1327 || cpu_memory_rw_debug(cs, ptr, (uint8_t *)&e1, sizeof(e1), 0) != 0
1328 || cpu_memory_rw_debug(cs, ptr+4, (uint8_t *)&e2, sizeof(e2), 0) != 0)
1329 return 0;
1330
1331 *base = ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
1332 *limit = (e1 & 0xffff) | (e2 & 0x000f0000);
1333 if (e2 & DESC_G_MASK)
1334 *limit = (*limit << 12) | 0xfff;
1335 *flags = e2;
1336
1337 return 1;
1338 }
1339
1340 #if !defined(CONFIG_USER_ONLY)
1341 void do_cpu_init(X86CPU *cpu)
1342 {
1343 CPUState *cs = CPU(cpu);
1344 CPUX86State *env = &cpu->env;
1345 CPUX86State *save = g_new(CPUX86State, 1);
1346 int sipi = cs->interrupt_request & CPU_INTERRUPT_SIPI;
1347
1348 *save = *env;
1349
1350 cpu_reset(cs);
1351 cs->interrupt_request = sipi;
1352 memcpy(&env->start_init_save, &save->start_init_save,
1353 offsetof(CPUX86State, end_init_save) -
1354 offsetof(CPUX86State, start_init_save));
1355 g_free(save);
1356
1357 if (kvm_enabled()) {
1358 kvm_arch_do_init_vcpu(cpu);
1359 }
1360 apic_init_reset(cpu->apic_state);
1361 }
1362
1363 void do_cpu_sipi(X86CPU *cpu)
1364 {
1365 apic_sipi(cpu->apic_state);
1366 }
1367 #else
1368 void do_cpu_init(X86CPU *cpu)
1369 {
1370 }
1371 void do_cpu_sipi(X86CPU *cpu)
1372 {
1373 }
1374 #endif
1375
1376 /* Frob eflags into and out of the CPU temporary format. */
1377
1378 void x86_cpu_exec_enter(CPUState *cs)
1379 {
1380 X86CPU *cpu = X86_CPU(cs);
1381 CPUX86State *env = &cpu->env;
1382
1383 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
1384 env->df = 1 - (2 * ((env->eflags >> 10) & 1));
1385 CC_OP = CC_OP_EFLAGS;
1386 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
1387 }
1388
1389 void x86_cpu_exec_exit(CPUState *cs)
1390 {
1391 X86CPU *cpu = X86_CPU(cs);
1392 CPUX86State *env = &cpu->env;
1393
1394 env->eflags = cpu_compute_eflags(env);
1395 }
1396
1397 #ifndef CONFIG_USER_ONLY
1398 uint8_t x86_ldub_phys(CPUState *cs, hwaddr addr)
1399 {
1400 X86CPU *cpu = X86_CPU(cs);
1401 CPUX86State *env = &cpu->env;
1402
1403 return address_space_ldub(cs->as, addr,
1404 cpu_get_mem_attrs(env),
1405 NULL);
1406 }
1407
1408 uint32_t x86_lduw_phys(CPUState *cs, hwaddr addr)
1409 {
1410 X86CPU *cpu = X86_CPU(cs);
1411 CPUX86State *env = &cpu->env;
1412
1413 return address_space_lduw(cs->as, addr,
1414 cpu_get_mem_attrs(env),
1415 NULL);
1416 }
1417
1418 uint32_t x86_ldl_phys(CPUState *cs, hwaddr addr)
1419 {
1420 X86CPU *cpu = X86_CPU(cs);
1421 CPUX86State *env = &cpu->env;
1422
1423 return address_space_ldl(cs->as, addr,
1424 cpu_get_mem_attrs(env),
1425 NULL);
1426 }
1427
1428 uint64_t x86_ldq_phys(CPUState *cs, hwaddr addr)
1429 {
1430 X86CPU *cpu = X86_CPU(cs);
1431 CPUX86State *env = &cpu->env;
1432
1433 return address_space_ldq(cs->as, addr,
1434 cpu_get_mem_attrs(env),
1435 NULL);
1436 }
1437
1438 void x86_stb_phys(CPUState *cs, hwaddr addr, uint8_t val)
1439 {
1440 X86CPU *cpu = X86_CPU(cs);
1441 CPUX86State *env = &cpu->env;
1442
1443 address_space_stb(cs->as, addr, val,
1444 cpu_get_mem_attrs(env),
1445 NULL);
1446 }
1447
1448 void x86_stl_phys_notdirty(CPUState *cs, hwaddr addr, uint32_t val)
1449 {
1450 X86CPU *cpu = X86_CPU(cs);
1451 CPUX86State *env = &cpu->env;
1452
1453 address_space_stl_notdirty(cs->as, addr, val,
1454 cpu_get_mem_attrs(env),
1455 NULL);
1456 }
1457
1458 void x86_stw_phys(CPUState *cs, hwaddr addr, uint32_t val)
1459 {
1460 X86CPU *cpu = X86_CPU(cs);
1461 CPUX86State *env = &cpu->env;
1462
1463 address_space_stw(cs->as, addr, val,
1464 cpu_get_mem_attrs(env),
1465 NULL);
1466 }
1467
1468 void x86_stl_phys(CPUState *cs, hwaddr addr, uint32_t val)
1469 {
1470 X86CPU *cpu = X86_CPU(cs);
1471 CPUX86State *env = &cpu->env;
1472
1473 address_space_stl(cs->as, addr, val,
1474 cpu_get_mem_attrs(env),
1475 NULL);
1476 }
1477
1478 void x86_stq_phys(CPUState *cs, hwaddr addr, uint64_t val)
1479 {
1480 X86CPU *cpu = X86_CPU(cs);
1481 CPUX86State *env = &cpu->env;
1482
1483 address_space_stq(cs->as, addr, val,
1484 cpu_get_mem_attrs(env),
1485 NULL);
1486 }
1487 #endif