]> git.proxmox.com Git - qemu.git/blob - target-i386/helper.c
Fix pmovsx* / pmovzx* SSE instructions (original fix by Frank Mehnert).
[qemu.git] / target-i386 / helper.c
1 /*
2 * i386 helpers (without register variable usage)
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20 #include <stdarg.h>
21 #include <stdlib.h>
22 #include <stdio.h>
23 #include <string.h>
24 #include <inttypes.h>
25 #include <signal.h>
26 #include <assert.h>
27
28 #include "cpu.h"
29 #include "exec-all.h"
30 #include "svm.h"
31 #include "qemu-common.h"
32 #include "kvm.h"
33 #include "helper.h"
34
35 //#define DEBUG_MMU
36
37 static void add_flagname_to_bitmaps(char *flagname, uint32_t *features,
38 uint32_t *ext_features,
39 uint32_t *ext2_features,
40 uint32_t *ext3_features)
41 {
42 int i;
43 /* feature flags taken from "Intel Processor Identification and the CPUID
44 * Instruction" and AMD's "CPUID Specification". In cases of disagreement
45 * about feature names, the Linux name is used. */
46 static const char *feature_name[] = {
47 "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
48 "cx8", "apic", NULL, "sep", "mtrr", "pge", "mca", "cmov",
49 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */, NULL, "ds" /* Intel dts */, "acpi", "mmx",
50 "fxsr", "sse", "sse2", "ss", "ht" /* Intel htt */, "tm", "ia64", "pbe",
51 };
52 static const char *ext_feature_name[] = {
53 "pni" /* Intel,AMD sse3 */, NULL, NULL, "monitor", "ds_cpl", "vmx", NULL /* Linux smx */, "est",
54 "tm2", "ssse3", "cid", NULL, NULL, "cx16", "xtpr", NULL,
55 NULL, NULL, "dca", NULL, NULL, NULL, NULL, "popcnt",
56 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
57 };
58 static const char *ext2_feature_name[] = {
59 "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
60 "cx8" /* AMD CMPXCHG8B */, "apic", NULL, "syscall", "mttr", "pge", "mca", "cmov",
61 "pat", "pse36", NULL, NULL /* Linux mp */, "nx" /* Intel xd */, NULL, "mmxext", "mmx",
62 "fxsr", "fxsr_opt" /* AMD ffxsr */, "pdpe1gb" /* AMD Page1GB */, "rdtscp", NULL, "lm" /* Intel 64 */, "3dnowext", "3dnow",
63 };
64 static const char *ext3_feature_name[] = {
65 "lahf_lm" /* AMD LahfSahf */, "cmp_legacy", "svm", "extapic" /* AMD ExtApicSpace */, "cr8legacy" /* AMD AltMovCr8 */, "abm", "sse4a", "misalignsse",
66 "3dnowprefetch", "osvw", NULL /* Linux ibs */, NULL, "skinit", "wdt", NULL, NULL,
67 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
68 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
69 };
70
71 for ( i = 0 ; i < 32 ; i++ )
72 if (feature_name[i] && !strcmp (flagname, feature_name[i])) {
73 *features |= 1 << i;
74 return;
75 }
76 for ( i = 0 ; i < 32 ; i++ )
77 if (ext_feature_name[i] && !strcmp (flagname, ext_feature_name[i])) {
78 *ext_features |= 1 << i;
79 return;
80 }
81 for ( i = 0 ; i < 32 ; i++ )
82 if (ext2_feature_name[i] && !strcmp (flagname, ext2_feature_name[i])) {
83 *ext2_features |= 1 << i;
84 return;
85 }
86 for ( i = 0 ; i < 32 ; i++ )
87 if (ext3_feature_name[i] && !strcmp (flagname, ext3_feature_name[i])) {
88 *ext3_features |= 1 << i;
89 return;
90 }
91 fprintf(stderr, "CPU feature %s not found\n", flagname);
92 }
93
94 typedef struct x86_def_t {
95 const char *name;
96 uint32_t level;
97 uint32_t vendor1, vendor2, vendor3;
98 int family;
99 int model;
100 int stepping;
101 uint32_t features, ext_features, ext2_features, ext3_features;
102 uint32_t xlevel;
103 char model_id[48];
104 } x86_def_t;
105
106 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
107 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
108 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX)
109 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
110 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
111 CPUID_PSE36 | CPUID_FXSR)
112 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
113 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
114 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
115 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
116 CPUID_PAE | CPUID_SEP | CPUID_APIC)
117 static x86_def_t x86_defs[] = {
118 #ifdef TARGET_X86_64
119 {
120 .name = "qemu64",
121 .level = 2,
122 .vendor1 = CPUID_VENDOR_AMD_1,
123 .vendor2 = CPUID_VENDOR_AMD_2,
124 .vendor3 = CPUID_VENDOR_AMD_3,
125 .family = 6,
126 .model = 2,
127 .stepping = 3,
128 .features = PPRO_FEATURES |
129 /* these features are needed for Win64 and aren't fully implemented */
130 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
131 /* this feature is needed for Solaris and isn't fully implemented */
132 CPUID_PSE36,
133 .ext_features = CPUID_EXT_SSE3,
134 .ext2_features = (PPRO_FEATURES & 0x0183F3FF) |
135 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
136 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
137 .ext3_features = CPUID_EXT3_SVM,
138 .xlevel = 0x8000000A,
139 .model_id = "QEMU Virtual CPU version " QEMU_VERSION,
140 },
141 {
142 .name = "core2duo",
143 .level = 10,
144 .family = 6,
145 .model = 15,
146 .stepping = 11,
147 /* The original CPU also implements these features:
148 CPUID_VME, CPUID_DTS, CPUID_ACPI, CPUID_SS, CPUID_HT,
149 CPUID_TM, CPUID_PBE */
150 .features = PPRO_FEATURES |
151 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
152 CPUID_PSE36,
153 /* The original CPU also implements these ext features:
154 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_EST,
155 CPUID_EXT_TM2, CPUID_EXT_CX16, CPUID_EXT_XTPR, CPUID_EXT_PDCM */
156 .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3,
157 .ext2_features = CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
158 /* Missing: .ext3_features = CPUID_EXT3_LAHF_LM */
159 .xlevel = 0x80000008,
160 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz",
161 },
162 #endif
163 {
164 .name = "qemu32",
165 .level = 2,
166 .family = 6,
167 .model = 3,
168 .stepping = 3,
169 .features = PPRO_FEATURES,
170 .ext_features = CPUID_EXT_SSE3,
171 .xlevel = 0,
172 .model_id = "QEMU Virtual CPU version " QEMU_VERSION,
173 },
174 {
175 .name = "coreduo",
176 .level = 10,
177 .family = 6,
178 .model = 14,
179 .stepping = 8,
180 /* The original CPU also implements these features:
181 CPUID_DTS, CPUID_ACPI, CPUID_SS, CPUID_HT,
182 CPUID_TM, CPUID_PBE */
183 .features = PPRO_FEATURES | CPUID_VME |
184 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA,
185 /* The original CPU also implements these ext features:
186 CPUID_EXT_VMX, CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_XTPR,
187 CPUID_EXT_PDCM */
188 .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
189 .ext2_features = CPUID_EXT2_NX,
190 .xlevel = 0x80000008,
191 .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz",
192 },
193 {
194 .name = "486",
195 .level = 0,
196 .family = 4,
197 .model = 0,
198 .stepping = 0,
199 .features = I486_FEATURES,
200 .xlevel = 0,
201 },
202 {
203 .name = "pentium",
204 .level = 1,
205 .family = 5,
206 .model = 4,
207 .stepping = 3,
208 .features = PENTIUM_FEATURES,
209 .xlevel = 0,
210 },
211 {
212 .name = "pentium2",
213 .level = 2,
214 .family = 6,
215 .model = 5,
216 .stepping = 2,
217 .features = PENTIUM2_FEATURES,
218 .xlevel = 0,
219 },
220 {
221 .name = "pentium3",
222 .level = 2,
223 .family = 6,
224 .model = 7,
225 .stepping = 3,
226 .features = PENTIUM3_FEATURES,
227 .xlevel = 0,
228 },
229 {
230 .name = "athlon",
231 .level = 2,
232 .vendor1 = 0x68747541, /* "Auth" */
233 .vendor2 = 0x69746e65, /* "enti" */
234 .vendor3 = 0x444d4163, /* "cAMD" */
235 .family = 6,
236 .model = 2,
237 .stepping = 3,
238 .features = PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR | CPUID_MCA,
239 .ext2_features = (PPRO_FEATURES & 0x0183F3FF) | CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
240 .xlevel = 0x80000008,
241 /* XXX: put another string ? */
242 .model_id = "QEMU Virtual CPU version " QEMU_VERSION,
243 },
244 {
245 .name = "n270",
246 /* original is on level 10 */
247 .level = 5,
248 .family = 6,
249 .model = 28,
250 .stepping = 2,
251 .features = PPRO_FEATURES |
252 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME,
253 /* Missing: CPUID_DTS | CPUID_ACPI | CPUID_SS |
254 * CPUID_HT | CPUID_TM | CPUID_PBE */
255 /* Some CPUs got no CPUID_SEP */
256 .ext_features = CPUID_EXT_MONITOR |
257 CPUID_EXT_SSE3 /* PNI */ | CPUID_EXT_SSSE3,
258 /* Missing: CPUID_EXT_DSCPL | CPUID_EXT_EST |
259 * CPUID_EXT_TM2 | CPUID_EXT_XTPR */
260 .ext2_features = (PPRO_FEATURES & 0x0183F3FF) | CPUID_EXT2_NX,
261 /* Missing: .ext3_features = CPUID_EXT3_LAHF_LM */
262 .xlevel = 0x8000000A,
263 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz",
264 },
265 };
266
267 static int cpu_x86_find_by_name(x86_def_t *x86_cpu_def, const char *cpu_model)
268 {
269 unsigned int i;
270 x86_def_t *def;
271
272 char *s = strdup(cpu_model);
273 char *featurestr, *name = strtok(s, ",");
274 uint32_t plus_features = 0, plus_ext_features = 0, plus_ext2_features = 0, plus_ext3_features = 0;
275 uint32_t minus_features = 0, minus_ext_features = 0, minus_ext2_features = 0, minus_ext3_features = 0;
276 int family = -1, model = -1, stepping = -1;
277
278 def = NULL;
279 for (i = 0; i < sizeof(x86_defs) / sizeof(x86_def_t); i++) {
280 if (strcmp(name, x86_defs[i].name) == 0) {
281 def = &x86_defs[i];
282 break;
283 }
284 }
285 if (!def)
286 goto error;
287 memcpy(x86_cpu_def, def, sizeof(*def));
288
289 featurestr = strtok(NULL, ",");
290
291 while (featurestr) {
292 char *val;
293 if (featurestr[0] == '+') {
294 add_flagname_to_bitmaps(featurestr + 1, &plus_features, &plus_ext_features, &plus_ext2_features, &plus_ext3_features);
295 } else if (featurestr[0] == '-') {
296 add_flagname_to_bitmaps(featurestr + 1, &minus_features, &minus_ext_features, &minus_ext2_features, &minus_ext3_features);
297 } else if ((val = strchr(featurestr, '='))) {
298 *val = 0; val++;
299 if (!strcmp(featurestr, "family")) {
300 char *err;
301 family = strtol(val, &err, 10);
302 if (!*val || *err || family < 0) {
303 fprintf(stderr, "bad numerical value %s\n", val);
304 goto error;
305 }
306 x86_cpu_def->family = family;
307 } else if (!strcmp(featurestr, "model")) {
308 char *err;
309 model = strtol(val, &err, 10);
310 if (!*val || *err || model < 0 || model > 0xff) {
311 fprintf(stderr, "bad numerical value %s\n", val);
312 goto error;
313 }
314 x86_cpu_def->model = model;
315 } else if (!strcmp(featurestr, "stepping")) {
316 char *err;
317 stepping = strtol(val, &err, 10);
318 if (!*val || *err || stepping < 0 || stepping > 0xf) {
319 fprintf(stderr, "bad numerical value %s\n", val);
320 goto error;
321 }
322 x86_cpu_def->stepping = stepping;
323 } else if (!strcmp(featurestr, "vendor")) {
324 if (strlen(val) != 12) {
325 fprintf(stderr, "vendor string must be 12 chars long\n");
326 goto error;
327 }
328 x86_cpu_def->vendor1 = 0;
329 x86_cpu_def->vendor2 = 0;
330 x86_cpu_def->vendor3 = 0;
331 for(i = 0; i < 4; i++) {
332 x86_cpu_def->vendor1 |= ((uint8_t)val[i ]) << (8 * i);
333 x86_cpu_def->vendor2 |= ((uint8_t)val[i + 4]) << (8 * i);
334 x86_cpu_def->vendor3 |= ((uint8_t)val[i + 8]) << (8 * i);
335 }
336 } else if (!strcmp(featurestr, "model_id")) {
337 pstrcpy(x86_cpu_def->model_id, sizeof(x86_cpu_def->model_id),
338 val);
339 } else {
340 fprintf(stderr, "unrecognized feature %s\n", featurestr);
341 goto error;
342 }
343 } else {
344 fprintf(stderr, "feature string `%s' not in format (+feature|-feature|feature=xyz)\n", featurestr);
345 goto error;
346 }
347 featurestr = strtok(NULL, ",");
348 }
349 x86_cpu_def->features |= plus_features;
350 x86_cpu_def->ext_features |= plus_ext_features;
351 x86_cpu_def->ext2_features |= plus_ext2_features;
352 x86_cpu_def->ext3_features |= plus_ext3_features;
353 x86_cpu_def->features &= ~minus_features;
354 x86_cpu_def->ext_features &= ~minus_ext_features;
355 x86_cpu_def->ext2_features &= ~minus_ext2_features;
356 x86_cpu_def->ext3_features &= ~minus_ext3_features;
357 free(s);
358 return 0;
359
360 error:
361 free(s);
362 return -1;
363 }
364
365 void x86_cpu_list (FILE *f, int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
366 {
367 unsigned int i;
368
369 for (i = 0; i < sizeof(x86_defs) / sizeof(x86_def_t); i++)
370 (*cpu_fprintf)(f, "x86 %16s\n", x86_defs[i].name);
371 }
372
373 static int cpu_x86_register (CPUX86State *env, const char *cpu_model)
374 {
375 x86_def_t def1, *def = &def1;
376
377 if (cpu_x86_find_by_name(def, cpu_model) < 0)
378 return -1;
379 if (def->vendor1) {
380 env->cpuid_vendor1 = def->vendor1;
381 env->cpuid_vendor2 = def->vendor2;
382 env->cpuid_vendor3 = def->vendor3;
383 } else {
384 env->cpuid_vendor1 = CPUID_VENDOR_INTEL_1;
385 env->cpuid_vendor2 = CPUID_VENDOR_INTEL_2;
386 env->cpuid_vendor3 = CPUID_VENDOR_INTEL_3;
387 }
388 env->cpuid_level = def->level;
389 if (def->family > 0x0f)
390 env->cpuid_version = 0xf00 | ((def->family - 0x0f) << 20);
391 else
392 env->cpuid_version = def->family << 8;
393 env->cpuid_version |= ((def->model & 0xf) << 4) | ((def->model >> 4) << 16);
394 env->cpuid_version |= def->stepping;
395 env->cpuid_features = def->features;
396 env->pat = 0x0007040600070406ULL;
397 env->cpuid_ext_features = def->ext_features;
398 env->cpuid_ext2_features = def->ext2_features;
399 env->cpuid_xlevel = def->xlevel;
400 env->cpuid_ext3_features = def->ext3_features;
401 {
402 const char *model_id = def->model_id;
403 int c, len, i;
404 if (!model_id)
405 model_id = "";
406 len = strlen(model_id);
407 for(i = 0; i < 48; i++) {
408 if (i >= len)
409 c = '\0';
410 else
411 c = (uint8_t)model_id[i];
412 env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
413 }
414 }
415 return 0;
416 }
417
418 /* NOTE: must be called outside the CPU execute loop */
419 void cpu_reset(CPUX86State *env)
420 {
421 int i;
422
423 memset(env, 0, offsetof(CPUX86State, breakpoints));
424
425 tlb_flush(env, 1);
426
427 env->old_exception = -1;
428
429 /* init to reset state */
430
431 #ifdef CONFIG_SOFTMMU
432 env->hflags |= HF_SOFTMMU_MASK;
433 #endif
434 env->hflags2 |= HF2_GIF_MASK;
435
436 cpu_x86_update_cr0(env, 0x60000010);
437 env->a20_mask = ~0x0;
438 env->smbase = 0x30000;
439
440 env->idt.limit = 0xffff;
441 env->gdt.limit = 0xffff;
442 env->ldt.limit = 0xffff;
443 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
444 env->tr.limit = 0xffff;
445 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
446
447 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
448 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK | DESC_R_MASK);
449 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
450 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK);
451 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
452 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK);
453 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
454 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK);
455 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
456 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK);
457 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
458 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK);
459
460 env->eip = 0xfff0;
461 env->regs[R_EDX] = env->cpuid_version;
462
463 env->eflags = 0x2;
464
465 /* FPU init */
466 for(i = 0;i < 8; i++)
467 env->fptags[i] = 1;
468 env->fpuc = 0x37f;
469
470 env->mxcsr = 0x1f80;
471
472 memset(env->dr, 0, sizeof(env->dr));
473 env->dr[6] = DR6_FIXED_1;
474 env->dr[7] = DR7_FIXED_1;
475 cpu_breakpoint_remove_all(env, BP_CPU);
476 cpu_watchpoint_remove_all(env, BP_CPU);
477 }
478
479 void cpu_x86_close(CPUX86State *env)
480 {
481 qemu_free(env);
482 }
483
484 /***********************************************************/
485 /* x86 debug */
486
487 static const char *cc_op_str[] = {
488 "DYNAMIC",
489 "EFLAGS",
490
491 "MULB",
492 "MULW",
493 "MULL",
494 "MULQ",
495
496 "ADDB",
497 "ADDW",
498 "ADDL",
499 "ADDQ",
500
501 "ADCB",
502 "ADCW",
503 "ADCL",
504 "ADCQ",
505
506 "SUBB",
507 "SUBW",
508 "SUBL",
509 "SUBQ",
510
511 "SBBB",
512 "SBBW",
513 "SBBL",
514 "SBBQ",
515
516 "LOGICB",
517 "LOGICW",
518 "LOGICL",
519 "LOGICQ",
520
521 "INCB",
522 "INCW",
523 "INCL",
524 "INCQ",
525
526 "DECB",
527 "DECW",
528 "DECL",
529 "DECQ",
530
531 "SHLB",
532 "SHLW",
533 "SHLL",
534 "SHLQ",
535
536 "SARB",
537 "SARW",
538 "SARL",
539 "SARQ",
540 };
541
542 void cpu_dump_state(CPUState *env, FILE *f,
543 int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
544 int flags)
545 {
546 int eflags, i, nb;
547 char cc_op_name[32];
548 static const char *seg_name[6] = { "ES", "CS", "SS", "DS", "FS", "GS" };
549
550 eflags = env->eflags;
551 #ifdef TARGET_X86_64
552 if (env->hflags & HF_CS64_MASK) {
553 cpu_fprintf(f,
554 "RAX=%016" PRIx64 " RBX=%016" PRIx64 " RCX=%016" PRIx64 " RDX=%016" PRIx64 "\n"
555 "RSI=%016" PRIx64 " RDI=%016" PRIx64 " RBP=%016" PRIx64 " RSP=%016" PRIx64 "\n"
556 "R8 =%016" PRIx64 " R9 =%016" PRIx64 " R10=%016" PRIx64 " R11=%016" PRIx64 "\n"
557 "R12=%016" PRIx64 " R13=%016" PRIx64 " R14=%016" PRIx64 " R15=%016" PRIx64 "\n"
558 "RIP=%016" PRIx64 " RFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
559 env->regs[R_EAX],
560 env->regs[R_EBX],
561 env->regs[R_ECX],
562 env->regs[R_EDX],
563 env->regs[R_ESI],
564 env->regs[R_EDI],
565 env->regs[R_EBP],
566 env->regs[R_ESP],
567 env->regs[8],
568 env->regs[9],
569 env->regs[10],
570 env->regs[11],
571 env->regs[12],
572 env->regs[13],
573 env->regs[14],
574 env->regs[15],
575 env->eip, eflags,
576 eflags & DF_MASK ? 'D' : '-',
577 eflags & CC_O ? 'O' : '-',
578 eflags & CC_S ? 'S' : '-',
579 eflags & CC_Z ? 'Z' : '-',
580 eflags & CC_A ? 'A' : '-',
581 eflags & CC_P ? 'P' : '-',
582 eflags & CC_C ? 'C' : '-',
583 env->hflags & HF_CPL_MASK,
584 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
585 (int)(env->a20_mask >> 20) & 1,
586 (env->hflags >> HF_SMM_SHIFT) & 1,
587 env->halted);
588 } else
589 #endif
590 {
591 cpu_fprintf(f, "EAX=%08x EBX=%08x ECX=%08x EDX=%08x\n"
592 "ESI=%08x EDI=%08x EBP=%08x ESP=%08x\n"
593 "EIP=%08x EFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
594 (uint32_t)env->regs[R_EAX],
595 (uint32_t)env->regs[R_EBX],
596 (uint32_t)env->regs[R_ECX],
597 (uint32_t)env->regs[R_EDX],
598 (uint32_t)env->regs[R_ESI],
599 (uint32_t)env->regs[R_EDI],
600 (uint32_t)env->regs[R_EBP],
601 (uint32_t)env->regs[R_ESP],
602 (uint32_t)env->eip, eflags,
603 eflags & DF_MASK ? 'D' : '-',
604 eflags & CC_O ? 'O' : '-',
605 eflags & CC_S ? 'S' : '-',
606 eflags & CC_Z ? 'Z' : '-',
607 eflags & CC_A ? 'A' : '-',
608 eflags & CC_P ? 'P' : '-',
609 eflags & CC_C ? 'C' : '-',
610 env->hflags & HF_CPL_MASK,
611 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
612 (int)(env->a20_mask >> 20) & 1,
613 (env->hflags >> HF_SMM_SHIFT) & 1,
614 env->halted);
615 }
616
617 #ifdef TARGET_X86_64
618 if (env->hflags & HF_LMA_MASK) {
619 for(i = 0; i < 6; i++) {
620 SegmentCache *sc = &env->segs[i];
621 cpu_fprintf(f, "%s =%04x %016" PRIx64 " %08x %08x\n",
622 seg_name[i],
623 sc->selector,
624 sc->base,
625 sc->limit,
626 sc->flags);
627 }
628 cpu_fprintf(f, "LDT=%04x %016" PRIx64 " %08x %08x\n",
629 env->ldt.selector,
630 env->ldt.base,
631 env->ldt.limit,
632 env->ldt.flags);
633 cpu_fprintf(f, "TR =%04x %016" PRIx64 " %08x %08x\n",
634 env->tr.selector,
635 env->tr.base,
636 env->tr.limit,
637 env->tr.flags);
638 cpu_fprintf(f, "GDT= %016" PRIx64 " %08x\n",
639 env->gdt.base, env->gdt.limit);
640 cpu_fprintf(f, "IDT= %016" PRIx64 " %08x\n",
641 env->idt.base, env->idt.limit);
642 cpu_fprintf(f, "CR0=%08x CR2=%016" PRIx64 " CR3=%016" PRIx64 " CR4=%08x\n",
643 (uint32_t)env->cr[0],
644 env->cr[2],
645 env->cr[3],
646 (uint32_t)env->cr[4]);
647 for(i = 0; i < 4; i++)
648 cpu_fprintf(f, "DR%d=%016" PRIx64 " ", i, env->dr[i]);
649 cpu_fprintf(f, "\nDR6=%016" PRIx64 " DR7=%016" PRIx64 "\n",
650 env->dr[6], env->cr[7]);
651 } else
652 #endif
653 {
654 for(i = 0; i < 6; i++) {
655 SegmentCache *sc = &env->segs[i];
656 cpu_fprintf(f, "%s =%04x %08x %08x %08x\n",
657 seg_name[i],
658 sc->selector,
659 (uint32_t)sc->base,
660 sc->limit,
661 sc->flags);
662 }
663 cpu_fprintf(f, "LDT=%04x %08x %08x %08x\n",
664 env->ldt.selector,
665 (uint32_t)env->ldt.base,
666 env->ldt.limit,
667 env->ldt.flags);
668 cpu_fprintf(f, "TR =%04x %08x %08x %08x\n",
669 env->tr.selector,
670 (uint32_t)env->tr.base,
671 env->tr.limit,
672 env->tr.flags);
673 cpu_fprintf(f, "GDT= %08x %08x\n",
674 (uint32_t)env->gdt.base, env->gdt.limit);
675 cpu_fprintf(f, "IDT= %08x %08x\n",
676 (uint32_t)env->idt.base, env->idt.limit);
677 cpu_fprintf(f, "CR0=%08x CR2=%08x CR3=%08x CR4=%08x\n",
678 (uint32_t)env->cr[0],
679 (uint32_t)env->cr[2],
680 (uint32_t)env->cr[3],
681 (uint32_t)env->cr[4]);
682 for(i = 0; i < 4; i++)
683 cpu_fprintf(f, "DR%d=%08x ", i, env->dr[i]);
684 cpu_fprintf(f, "\nDR6=%08x DR7=%08x\n", env->dr[6], env->cr[7]);
685 }
686 if (flags & X86_DUMP_CCOP) {
687 if ((unsigned)env->cc_op < CC_OP_NB)
688 snprintf(cc_op_name, sizeof(cc_op_name), "%s", cc_op_str[env->cc_op]);
689 else
690 snprintf(cc_op_name, sizeof(cc_op_name), "[%d]", env->cc_op);
691 #ifdef TARGET_X86_64
692 if (env->hflags & HF_CS64_MASK) {
693 cpu_fprintf(f, "CCS=%016" PRIx64 " CCD=%016" PRIx64 " CCO=%-8s\n",
694 env->cc_src, env->cc_dst,
695 cc_op_name);
696 } else
697 #endif
698 {
699 cpu_fprintf(f, "CCS=%08x CCD=%08x CCO=%-8s\n",
700 (uint32_t)env->cc_src, (uint32_t)env->cc_dst,
701 cc_op_name);
702 }
703 }
704 if (flags & X86_DUMP_FPU) {
705 int fptag;
706 fptag = 0;
707 for(i = 0; i < 8; i++) {
708 fptag |= ((!env->fptags[i]) << i);
709 }
710 cpu_fprintf(f, "FCW=%04x FSW=%04x [ST=%d] FTW=%02x MXCSR=%08x\n",
711 env->fpuc,
712 (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11,
713 env->fpstt,
714 fptag,
715 env->mxcsr);
716 for(i=0;i<8;i++) {
717 #if defined(USE_X86LDOUBLE)
718 union {
719 long double d;
720 struct {
721 uint64_t lower;
722 uint16_t upper;
723 } l;
724 } tmp;
725 tmp.d = env->fpregs[i].d;
726 cpu_fprintf(f, "FPR%d=%016" PRIx64 " %04x",
727 i, tmp.l.lower, tmp.l.upper);
728 #else
729 cpu_fprintf(f, "FPR%d=%016" PRIx64,
730 i, env->fpregs[i].mmx.q);
731 #endif
732 if ((i & 1) == 1)
733 cpu_fprintf(f, "\n");
734 else
735 cpu_fprintf(f, " ");
736 }
737 if (env->hflags & HF_CS64_MASK)
738 nb = 16;
739 else
740 nb = 8;
741 for(i=0;i<nb;i++) {
742 cpu_fprintf(f, "XMM%02d=%08x%08x%08x%08x",
743 i,
744 env->xmm_regs[i].XMM_L(3),
745 env->xmm_regs[i].XMM_L(2),
746 env->xmm_regs[i].XMM_L(1),
747 env->xmm_regs[i].XMM_L(0));
748 if ((i & 1) == 1)
749 cpu_fprintf(f, "\n");
750 else
751 cpu_fprintf(f, " ");
752 }
753 }
754 }
755
756 /***********************************************************/
757 /* x86 mmu */
758 /* XXX: add PGE support */
759
760 void cpu_x86_set_a20(CPUX86State *env, int a20_state)
761 {
762 a20_state = (a20_state != 0);
763 if (a20_state != ((env->a20_mask >> 20) & 1)) {
764 #if defined(DEBUG_MMU)
765 printf("A20 update: a20=%d\n", a20_state);
766 #endif
767 /* if the cpu is currently executing code, we must unlink it and
768 all the potentially executing TB */
769 cpu_interrupt(env, CPU_INTERRUPT_EXITTB);
770
771 /* when a20 is changed, all the MMU mappings are invalid, so
772 we must flush everything */
773 tlb_flush(env, 1);
774 env->a20_mask = (~0x100000) | (a20_state << 20);
775 }
776 }
777
778 void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0)
779 {
780 int pe_state;
781
782 #if defined(DEBUG_MMU)
783 printf("CR0 update: CR0=0x%08x\n", new_cr0);
784 #endif
785 if ((new_cr0 & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK)) !=
786 (env->cr[0] & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK))) {
787 tlb_flush(env, 1);
788 }
789
790 #ifdef TARGET_X86_64
791 if (!(env->cr[0] & CR0_PG_MASK) && (new_cr0 & CR0_PG_MASK) &&
792 (env->efer & MSR_EFER_LME)) {
793 /* enter in long mode */
794 /* XXX: generate an exception */
795 if (!(env->cr[4] & CR4_PAE_MASK))
796 return;
797 env->efer |= MSR_EFER_LMA;
798 env->hflags |= HF_LMA_MASK;
799 } else if ((env->cr[0] & CR0_PG_MASK) && !(new_cr0 & CR0_PG_MASK) &&
800 (env->efer & MSR_EFER_LMA)) {
801 /* exit long mode */
802 env->efer &= ~MSR_EFER_LMA;
803 env->hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
804 env->eip &= 0xffffffff;
805 }
806 #endif
807 env->cr[0] = new_cr0 | CR0_ET_MASK;
808
809 /* update PE flag in hidden flags */
810 pe_state = (env->cr[0] & CR0_PE_MASK);
811 env->hflags = (env->hflags & ~HF_PE_MASK) | (pe_state << HF_PE_SHIFT);
812 /* ensure that ADDSEG is always set in real mode */
813 env->hflags |= ((pe_state ^ 1) << HF_ADDSEG_SHIFT);
814 /* update FPU flags */
815 env->hflags = (env->hflags & ~(HF_MP_MASK | HF_EM_MASK | HF_TS_MASK)) |
816 ((new_cr0 << (HF_MP_SHIFT - 1)) & (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK));
817 }
818
819 /* XXX: in legacy PAE mode, generate a GPF if reserved bits are set in
820 the PDPT */
821 void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3)
822 {
823 env->cr[3] = new_cr3;
824 if (env->cr[0] & CR0_PG_MASK) {
825 #if defined(DEBUG_MMU)
826 printf("CR3 update: CR3=" TARGET_FMT_lx "\n", new_cr3);
827 #endif
828 tlb_flush(env, 0);
829 }
830 }
831
832 void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4)
833 {
834 #if defined(DEBUG_MMU)
835 printf("CR4 update: CR4=%08x\n", (uint32_t)env->cr[4]);
836 #endif
837 if ((new_cr4 & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK)) !=
838 (env->cr[4] & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK))) {
839 tlb_flush(env, 1);
840 }
841 /* SSE handling */
842 if (!(env->cpuid_features & CPUID_SSE))
843 new_cr4 &= ~CR4_OSFXSR_MASK;
844 if (new_cr4 & CR4_OSFXSR_MASK)
845 env->hflags |= HF_OSFXSR_MASK;
846 else
847 env->hflags &= ~HF_OSFXSR_MASK;
848
849 env->cr[4] = new_cr4;
850 }
851
852 /* XXX: also flush 4MB pages */
853 void cpu_x86_flush_tlb(CPUX86State *env, target_ulong addr)
854 {
855 tlb_flush_page(env, addr);
856 }
857
858 #if defined(CONFIG_USER_ONLY)
859
860 int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
861 int is_write, int mmu_idx, int is_softmmu)
862 {
863 /* user mode only emulation */
864 is_write &= 1;
865 env->cr[2] = addr;
866 env->error_code = (is_write << PG_ERROR_W_BIT);
867 env->error_code |= PG_ERROR_U_MASK;
868 env->exception_index = EXCP0E_PAGE;
869 return 1;
870 }
871
872 target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
873 {
874 return addr;
875 }
876
877 #else
878
879 /* XXX: This value should match the one returned by CPUID
880 * and in exec.c */
881 #if defined(USE_KQEMU)
882 #define PHYS_ADDR_MASK 0xfffff000LL
883 #else
884 # if defined(TARGET_X86_64)
885 # define PHYS_ADDR_MASK 0xfffffff000LL
886 # else
887 # define PHYS_ADDR_MASK 0xffffff000LL
888 # endif
889 #endif
890
891 /* return value:
892 -1 = cannot handle fault
893 0 = nothing more to do
894 1 = generate PF fault
895 2 = soft MMU activation required for this block
896 */
897 int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
898 int is_write1, int mmu_idx, int is_softmmu)
899 {
900 uint64_t ptep, pte;
901 target_ulong pde_addr, pte_addr;
902 int error_code, is_dirty, prot, page_size, ret, is_write, is_user;
903 target_phys_addr_t paddr;
904 uint32_t page_offset;
905 target_ulong vaddr, virt_addr;
906
907 is_user = mmu_idx == MMU_USER_IDX;
908 #if defined(DEBUG_MMU)
909 printf("MMU fault: addr=" TARGET_FMT_lx " w=%d u=%d eip=" TARGET_FMT_lx "\n",
910 addr, is_write1, is_user, env->eip);
911 #endif
912 is_write = is_write1 & 1;
913
914 if (!(env->cr[0] & CR0_PG_MASK)) {
915 pte = addr;
916 virt_addr = addr & TARGET_PAGE_MASK;
917 prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
918 page_size = 4096;
919 goto do_mapping;
920 }
921
922 if (env->cr[4] & CR4_PAE_MASK) {
923 uint64_t pde, pdpe;
924 target_ulong pdpe_addr;
925
926 #ifdef TARGET_X86_64
927 if (env->hflags & HF_LMA_MASK) {
928 uint64_t pml4e_addr, pml4e;
929 int32_t sext;
930
931 /* test virtual address sign extension */
932 sext = (int64_t)addr >> 47;
933 if (sext != 0 && sext != -1) {
934 env->error_code = 0;
935 env->exception_index = EXCP0D_GPF;
936 return 1;
937 }
938
939 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
940 env->a20_mask;
941 pml4e = ldq_phys(pml4e_addr);
942 if (!(pml4e & PG_PRESENT_MASK)) {
943 error_code = 0;
944 goto do_fault;
945 }
946 if (!(env->efer & MSR_EFER_NXE) && (pml4e & PG_NX_MASK)) {
947 error_code = PG_ERROR_RSVD_MASK;
948 goto do_fault;
949 }
950 if (!(pml4e & PG_ACCESSED_MASK)) {
951 pml4e |= PG_ACCESSED_MASK;
952 stl_phys_notdirty(pml4e_addr, pml4e);
953 }
954 ptep = pml4e ^ PG_NX_MASK;
955 pdpe_addr = ((pml4e & PHYS_ADDR_MASK) + (((addr >> 30) & 0x1ff) << 3)) &
956 env->a20_mask;
957 pdpe = ldq_phys(pdpe_addr);
958 if (!(pdpe & PG_PRESENT_MASK)) {
959 error_code = 0;
960 goto do_fault;
961 }
962 if (!(env->efer & MSR_EFER_NXE) && (pdpe & PG_NX_MASK)) {
963 error_code = PG_ERROR_RSVD_MASK;
964 goto do_fault;
965 }
966 ptep &= pdpe ^ PG_NX_MASK;
967 if (!(pdpe & PG_ACCESSED_MASK)) {
968 pdpe |= PG_ACCESSED_MASK;
969 stl_phys_notdirty(pdpe_addr, pdpe);
970 }
971 } else
972 #endif
973 {
974 /* XXX: load them when cr3 is loaded ? */
975 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
976 env->a20_mask;
977 pdpe = ldq_phys(pdpe_addr);
978 if (!(pdpe & PG_PRESENT_MASK)) {
979 error_code = 0;
980 goto do_fault;
981 }
982 ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
983 }
984
985 pde_addr = ((pdpe & PHYS_ADDR_MASK) + (((addr >> 21) & 0x1ff) << 3)) &
986 env->a20_mask;
987 pde = ldq_phys(pde_addr);
988 if (!(pde & PG_PRESENT_MASK)) {
989 error_code = 0;
990 goto do_fault;
991 }
992 if (!(env->efer & MSR_EFER_NXE) && (pde & PG_NX_MASK)) {
993 error_code = PG_ERROR_RSVD_MASK;
994 goto do_fault;
995 }
996 ptep &= pde ^ PG_NX_MASK;
997 if (pde & PG_PSE_MASK) {
998 /* 2 MB page */
999 page_size = 2048 * 1024;
1000 ptep ^= PG_NX_MASK;
1001 if ((ptep & PG_NX_MASK) && is_write1 == 2)
1002 goto do_fault_protect;
1003 if (is_user) {
1004 if (!(ptep & PG_USER_MASK))
1005 goto do_fault_protect;
1006 if (is_write && !(ptep & PG_RW_MASK))
1007 goto do_fault_protect;
1008 } else {
1009 if ((env->cr[0] & CR0_WP_MASK) &&
1010 is_write && !(ptep & PG_RW_MASK))
1011 goto do_fault_protect;
1012 }
1013 is_dirty = is_write && !(pde & PG_DIRTY_MASK);
1014 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
1015 pde |= PG_ACCESSED_MASK;
1016 if (is_dirty)
1017 pde |= PG_DIRTY_MASK;
1018 stl_phys_notdirty(pde_addr, pde);
1019 }
1020 /* align to page_size */
1021 pte = pde & ((PHYS_ADDR_MASK & ~(page_size - 1)) | 0xfff);
1022 virt_addr = addr & ~(page_size - 1);
1023 } else {
1024 /* 4 KB page */
1025 if (!(pde & PG_ACCESSED_MASK)) {
1026 pde |= PG_ACCESSED_MASK;
1027 stl_phys_notdirty(pde_addr, pde);
1028 }
1029 pte_addr = ((pde & PHYS_ADDR_MASK) + (((addr >> 12) & 0x1ff) << 3)) &
1030 env->a20_mask;
1031 pte = ldq_phys(pte_addr);
1032 if (!(pte & PG_PRESENT_MASK)) {
1033 error_code = 0;
1034 goto do_fault;
1035 }
1036 if (!(env->efer & MSR_EFER_NXE) && (pte & PG_NX_MASK)) {
1037 error_code = PG_ERROR_RSVD_MASK;
1038 goto do_fault;
1039 }
1040 /* combine pde and pte nx, user and rw protections */
1041 ptep &= pte ^ PG_NX_MASK;
1042 ptep ^= PG_NX_MASK;
1043 if ((ptep & PG_NX_MASK) && is_write1 == 2)
1044 goto do_fault_protect;
1045 if (is_user) {
1046 if (!(ptep & PG_USER_MASK))
1047 goto do_fault_protect;
1048 if (is_write && !(ptep & PG_RW_MASK))
1049 goto do_fault_protect;
1050 } else {
1051 if ((env->cr[0] & CR0_WP_MASK) &&
1052 is_write && !(ptep & PG_RW_MASK))
1053 goto do_fault_protect;
1054 }
1055 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
1056 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
1057 pte |= PG_ACCESSED_MASK;
1058 if (is_dirty)
1059 pte |= PG_DIRTY_MASK;
1060 stl_phys_notdirty(pte_addr, pte);
1061 }
1062 page_size = 4096;
1063 virt_addr = addr & ~0xfff;
1064 pte = pte & (PHYS_ADDR_MASK | 0xfff);
1065 }
1066 } else {
1067 uint32_t pde;
1068
1069 /* page directory entry */
1070 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) &
1071 env->a20_mask;
1072 pde = ldl_phys(pde_addr);
1073 if (!(pde & PG_PRESENT_MASK)) {
1074 error_code = 0;
1075 goto do_fault;
1076 }
1077 /* if PSE bit is set, then we use a 4MB page */
1078 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
1079 page_size = 4096 * 1024;
1080 if (is_user) {
1081 if (!(pde & PG_USER_MASK))
1082 goto do_fault_protect;
1083 if (is_write && !(pde & PG_RW_MASK))
1084 goto do_fault_protect;
1085 } else {
1086 if ((env->cr[0] & CR0_WP_MASK) &&
1087 is_write && !(pde & PG_RW_MASK))
1088 goto do_fault_protect;
1089 }
1090 is_dirty = is_write && !(pde & PG_DIRTY_MASK);
1091 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
1092 pde |= PG_ACCESSED_MASK;
1093 if (is_dirty)
1094 pde |= PG_DIRTY_MASK;
1095 stl_phys_notdirty(pde_addr, pde);
1096 }
1097
1098 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
1099 ptep = pte;
1100 virt_addr = addr & ~(page_size - 1);
1101 } else {
1102 if (!(pde & PG_ACCESSED_MASK)) {
1103 pde |= PG_ACCESSED_MASK;
1104 stl_phys_notdirty(pde_addr, pde);
1105 }
1106
1107 /* page directory entry */
1108 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) &
1109 env->a20_mask;
1110 pte = ldl_phys(pte_addr);
1111 if (!(pte & PG_PRESENT_MASK)) {
1112 error_code = 0;
1113 goto do_fault;
1114 }
1115 /* combine pde and pte user and rw protections */
1116 ptep = pte & pde;
1117 if (is_user) {
1118 if (!(ptep & PG_USER_MASK))
1119 goto do_fault_protect;
1120 if (is_write && !(ptep & PG_RW_MASK))
1121 goto do_fault_protect;
1122 } else {
1123 if ((env->cr[0] & CR0_WP_MASK) &&
1124 is_write && !(ptep & PG_RW_MASK))
1125 goto do_fault_protect;
1126 }
1127 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
1128 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
1129 pte |= PG_ACCESSED_MASK;
1130 if (is_dirty)
1131 pte |= PG_DIRTY_MASK;
1132 stl_phys_notdirty(pte_addr, pte);
1133 }
1134 page_size = 4096;
1135 virt_addr = addr & ~0xfff;
1136 }
1137 }
1138 /* the page can be put in the TLB */
1139 prot = PAGE_READ;
1140 if (!(ptep & PG_NX_MASK))
1141 prot |= PAGE_EXEC;
1142 if (pte & PG_DIRTY_MASK) {
1143 /* only set write access if already dirty... otherwise wait
1144 for dirty access */
1145 if (is_user) {
1146 if (ptep & PG_RW_MASK)
1147 prot |= PAGE_WRITE;
1148 } else {
1149 if (!(env->cr[0] & CR0_WP_MASK) ||
1150 (ptep & PG_RW_MASK))
1151 prot |= PAGE_WRITE;
1152 }
1153 }
1154 do_mapping:
1155 pte = pte & env->a20_mask;
1156
1157 /* Even if 4MB pages, we map only one 4KB page in the cache to
1158 avoid filling it too fast */
1159 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
1160 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
1161 vaddr = virt_addr + page_offset;
1162
1163 ret = tlb_set_page_exec(env, vaddr, paddr, prot, mmu_idx, is_softmmu);
1164 return ret;
1165 do_fault_protect:
1166 error_code = PG_ERROR_P_MASK;
1167 do_fault:
1168 error_code |= (is_write << PG_ERROR_W_BIT);
1169 if (is_user)
1170 error_code |= PG_ERROR_U_MASK;
1171 if (is_write1 == 2 &&
1172 (env->efer & MSR_EFER_NXE) &&
1173 (env->cr[4] & CR4_PAE_MASK))
1174 error_code |= PG_ERROR_I_D_MASK;
1175 if (env->intercept_exceptions & (1 << EXCP0E_PAGE)) {
1176 /* cr2 is not modified in case of exceptions */
1177 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
1178 addr);
1179 } else {
1180 env->cr[2] = addr;
1181 }
1182 env->error_code = error_code;
1183 env->exception_index = EXCP0E_PAGE;
1184 return 1;
1185 }
1186
1187 target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
1188 {
1189 target_ulong pde_addr, pte_addr;
1190 uint64_t pte;
1191 target_phys_addr_t paddr;
1192 uint32_t page_offset;
1193 int page_size;
1194
1195 if (env->cr[4] & CR4_PAE_MASK) {
1196 target_ulong pdpe_addr;
1197 uint64_t pde, pdpe;
1198
1199 #ifdef TARGET_X86_64
1200 if (env->hflags & HF_LMA_MASK) {
1201 uint64_t pml4e_addr, pml4e;
1202 int32_t sext;
1203
1204 /* test virtual address sign extension */
1205 sext = (int64_t)addr >> 47;
1206 if (sext != 0 && sext != -1)
1207 return -1;
1208
1209 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
1210 env->a20_mask;
1211 pml4e = ldq_phys(pml4e_addr);
1212 if (!(pml4e & PG_PRESENT_MASK))
1213 return -1;
1214
1215 pdpe_addr = ((pml4e & ~0xfff) + (((addr >> 30) & 0x1ff) << 3)) &
1216 env->a20_mask;
1217 pdpe = ldq_phys(pdpe_addr);
1218 if (!(pdpe & PG_PRESENT_MASK))
1219 return -1;
1220 } else
1221 #endif
1222 {
1223 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
1224 env->a20_mask;
1225 pdpe = ldq_phys(pdpe_addr);
1226 if (!(pdpe & PG_PRESENT_MASK))
1227 return -1;
1228 }
1229
1230 pde_addr = ((pdpe & ~0xfff) + (((addr >> 21) & 0x1ff) << 3)) &
1231 env->a20_mask;
1232 pde = ldq_phys(pde_addr);
1233 if (!(pde & PG_PRESENT_MASK)) {
1234 return -1;
1235 }
1236 if (pde & PG_PSE_MASK) {
1237 /* 2 MB page */
1238 page_size = 2048 * 1024;
1239 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
1240 } else {
1241 /* 4 KB page */
1242 pte_addr = ((pde & ~0xfff) + (((addr >> 12) & 0x1ff) << 3)) &
1243 env->a20_mask;
1244 page_size = 4096;
1245 pte = ldq_phys(pte_addr);
1246 }
1247 if (!(pte & PG_PRESENT_MASK))
1248 return -1;
1249 } else {
1250 uint32_t pde;
1251
1252 if (!(env->cr[0] & CR0_PG_MASK)) {
1253 pte = addr;
1254 page_size = 4096;
1255 } else {
1256 /* page directory entry */
1257 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) & env->a20_mask;
1258 pde = ldl_phys(pde_addr);
1259 if (!(pde & PG_PRESENT_MASK))
1260 return -1;
1261 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
1262 pte = pde & ~0x003ff000; /* align to 4MB */
1263 page_size = 4096 * 1024;
1264 } else {
1265 /* page directory entry */
1266 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & env->a20_mask;
1267 pte = ldl_phys(pte_addr);
1268 if (!(pte & PG_PRESENT_MASK))
1269 return -1;
1270 page_size = 4096;
1271 }
1272 }
1273 pte = pte & env->a20_mask;
1274 }
1275
1276 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
1277 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
1278 return paddr;
1279 }
1280
1281 void hw_breakpoint_insert(CPUState *env, int index)
1282 {
1283 int type, err = 0;
1284
1285 switch (hw_breakpoint_type(env->dr[7], index)) {
1286 case 0:
1287 if (hw_breakpoint_enabled(env->dr[7], index))
1288 err = cpu_breakpoint_insert(env, env->dr[index], BP_CPU,
1289 &env->cpu_breakpoint[index]);
1290 break;
1291 case 1:
1292 type = BP_CPU | BP_MEM_WRITE;
1293 goto insert_wp;
1294 case 2:
1295 /* No support for I/O watchpoints yet */
1296 break;
1297 case 3:
1298 type = BP_CPU | BP_MEM_ACCESS;
1299 insert_wp:
1300 err = cpu_watchpoint_insert(env, env->dr[index],
1301 hw_breakpoint_len(env->dr[7], index),
1302 type, &env->cpu_watchpoint[index]);
1303 break;
1304 }
1305 if (err)
1306 env->cpu_breakpoint[index] = NULL;
1307 }
1308
1309 void hw_breakpoint_remove(CPUState *env, int index)
1310 {
1311 if (!env->cpu_breakpoint[index])
1312 return;
1313 switch (hw_breakpoint_type(env->dr[7], index)) {
1314 case 0:
1315 if (hw_breakpoint_enabled(env->dr[7], index))
1316 cpu_breakpoint_remove_by_ref(env, env->cpu_breakpoint[index]);
1317 break;
1318 case 1:
1319 case 3:
1320 cpu_watchpoint_remove_by_ref(env, env->cpu_watchpoint[index]);
1321 break;
1322 case 2:
1323 /* No support for I/O watchpoints yet */
1324 break;
1325 }
1326 }
1327
1328 int check_hw_breakpoints(CPUState *env, int force_dr6_update)
1329 {
1330 target_ulong dr6;
1331 int reg, type;
1332 int hit_enabled = 0;
1333
1334 dr6 = env->dr[6] & ~0xf;
1335 for (reg = 0; reg < 4; reg++) {
1336 type = hw_breakpoint_type(env->dr[7], reg);
1337 if ((type == 0 && env->dr[reg] == env->eip) ||
1338 ((type & 1) && env->cpu_watchpoint[reg] &&
1339 (env->cpu_watchpoint[reg]->flags & BP_WATCHPOINT_HIT))) {
1340 dr6 |= 1 << reg;
1341 if (hw_breakpoint_enabled(env->dr[7], reg))
1342 hit_enabled = 1;
1343 }
1344 }
1345 if (hit_enabled || force_dr6_update)
1346 env->dr[6] = dr6;
1347 return hit_enabled;
1348 }
1349
1350 static CPUDebugExcpHandler *prev_debug_excp_handler;
1351
1352 void raise_exception(int exception_index);
1353
1354 static void breakpoint_handler(CPUState *env)
1355 {
1356 CPUBreakpoint *bp;
1357
1358 if (env->watchpoint_hit) {
1359 if (env->watchpoint_hit->flags & BP_CPU) {
1360 env->watchpoint_hit = NULL;
1361 if (check_hw_breakpoints(env, 0))
1362 raise_exception(EXCP01_DB);
1363 else
1364 cpu_resume_from_signal(env, NULL);
1365 }
1366 } else {
1367 TAILQ_FOREACH(bp, &env->breakpoints, entry)
1368 if (bp->pc == env->eip) {
1369 if (bp->flags & BP_CPU) {
1370 check_hw_breakpoints(env, 1);
1371 raise_exception(EXCP01_DB);
1372 }
1373 break;
1374 }
1375 }
1376 if (prev_debug_excp_handler)
1377 prev_debug_excp_handler(env);
1378 }
1379 #endif /* !CONFIG_USER_ONLY */
1380
1381 static void host_cpuid(uint32_t function, uint32_t *eax, uint32_t *ebx,
1382 uint32_t *ecx, uint32_t *edx)
1383 {
1384 #if defined(CONFIG_KVM)
1385 uint32_t vec[4];
1386
1387 #ifdef __x86_64__
1388 asm volatile("cpuid"
1389 : "=a"(vec[0]), "=b"(vec[1]),
1390 "=c"(vec[2]), "=d"(vec[3])
1391 : "0"(function) : "cc");
1392 #else
1393 asm volatile("pusha \n\t"
1394 "cpuid \n\t"
1395 "mov %%eax, 0(%1) \n\t"
1396 "mov %%ebx, 4(%1) \n\t"
1397 "mov %%ecx, 8(%1) \n\t"
1398 "mov %%edx, 12(%1) \n\t"
1399 "popa"
1400 : : "a"(function), "S"(vec)
1401 : "memory", "cc");
1402 #endif
1403
1404 if (eax)
1405 *eax = vec[0];
1406 if (ebx)
1407 *ebx = vec[1];
1408 if (ecx)
1409 *ecx = vec[2];
1410 if (edx)
1411 *edx = vec[3];
1412 #endif
1413 }
1414
1415 void cpu_x86_cpuid(CPUX86State *env, uint32_t index,
1416 uint32_t *eax, uint32_t *ebx,
1417 uint32_t *ecx, uint32_t *edx)
1418 {
1419 /* test if maximum index reached */
1420 if (index & 0x80000000) {
1421 if (index > env->cpuid_xlevel)
1422 index = env->cpuid_level;
1423 } else {
1424 if (index > env->cpuid_level)
1425 index = env->cpuid_level;
1426 }
1427
1428 switch(index) {
1429 case 0:
1430 *eax = env->cpuid_level;
1431 *ebx = env->cpuid_vendor1;
1432 *edx = env->cpuid_vendor2;
1433 *ecx = env->cpuid_vendor3;
1434
1435 /* sysenter isn't supported on compatibility mode on AMD. and syscall
1436 * isn't supported in compatibility mode on Intel. so advertise the
1437 * actuall cpu, and say goodbye to migration between different vendors
1438 * is you use compatibility mode. */
1439 if (kvm_enabled())
1440 host_cpuid(0, NULL, ebx, ecx, edx);
1441 break;
1442 case 1:
1443 *eax = env->cpuid_version;
1444 *ebx = (env->cpuid_apic_id << 24) | 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
1445 *ecx = env->cpuid_ext_features;
1446 *edx = env->cpuid_features;
1447
1448 /* "Hypervisor present" bit required for Microsoft SVVP */
1449 if (kvm_enabled())
1450 *ecx |= (1 << 31);
1451 break;
1452 case 2:
1453 /* cache info: needed for Pentium Pro compatibility */
1454 *eax = 1;
1455 *ebx = 0;
1456 *ecx = 0;
1457 *edx = 0x2c307d;
1458 break;
1459 case 4:
1460 /* cache info: needed for Core compatibility */
1461 switch (*ecx) {
1462 case 0: /* L1 dcache info */
1463 *eax = 0x0000121;
1464 *ebx = 0x1c0003f;
1465 *ecx = 0x000003f;
1466 *edx = 0x0000001;
1467 break;
1468 case 1: /* L1 icache info */
1469 *eax = 0x0000122;
1470 *ebx = 0x1c0003f;
1471 *ecx = 0x000003f;
1472 *edx = 0x0000001;
1473 break;
1474 case 2: /* L2 cache info */
1475 *eax = 0x0000143;
1476 *ebx = 0x3c0003f;
1477 *ecx = 0x0000fff;
1478 *edx = 0x0000001;
1479 break;
1480 default: /* end of info */
1481 *eax = 0;
1482 *ebx = 0;
1483 *ecx = 0;
1484 *edx = 0;
1485 break;
1486 }
1487
1488 break;
1489 case 5:
1490 /* mwait info: needed for Core compatibility */
1491 *eax = 0; /* Smallest monitor-line size in bytes */
1492 *ebx = 0; /* Largest monitor-line size in bytes */
1493 *ecx = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
1494 *edx = 0;
1495 break;
1496 case 6:
1497 /* Thermal and Power Leaf */
1498 *eax = 0;
1499 *ebx = 0;
1500 *ecx = 0;
1501 *edx = 0;
1502 break;
1503 case 9:
1504 /* Direct Cache Access Information Leaf */
1505 *eax = 0; /* Bits 0-31 in DCA_CAP MSR */
1506 *ebx = 0;
1507 *ecx = 0;
1508 *edx = 0;
1509 break;
1510 case 0xA:
1511 /* Architectural Performance Monitoring Leaf */
1512 *eax = 0;
1513 *ebx = 0;
1514 *ecx = 0;
1515 *edx = 0;
1516 break;
1517 case 0x80000000:
1518 *eax = env->cpuid_xlevel;
1519 *ebx = env->cpuid_vendor1;
1520 *edx = env->cpuid_vendor2;
1521 *ecx = env->cpuid_vendor3;
1522 break;
1523 case 0x80000001:
1524 *eax = env->cpuid_features;
1525 *ebx = 0;
1526 *ecx = env->cpuid_ext3_features;
1527 *edx = env->cpuid_ext2_features;
1528
1529 if (kvm_enabled()) {
1530 uint32_t h_eax, h_edx;
1531
1532 host_cpuid(0x80000001, &h_eax, NULL, NULL, &h_edx);
1533
1534 /* disable CPU features that the host does not support */
1535
1536 /* long mode */
1537 if ((h_edx & 0x20000000) == 0 /* || !lm_capable_kernel */)
1538 *edx &= ~0x20000000;
1539 /* syscall */
1540 if ((h_edx & 0x00000800) == 0)
1541 *edx &= ~0x00000800;
1542 /* nx */
1543 if ((h_edx & 0x00100000) == 0)
1544 *edx &= ~0x00100000;
1545
1546 /* disable CPU features that KVM cannot support */
1547
1548 /* svm */
1549 *ecx &= ~4UL;
1550 /* 3dnow */
1551 *edx &= ~0xc0000000;
1552 }
1553 break;
1554 case 0x80000002:
1555 case 0x80000003:
1556 case 0x80000004:
1557 *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0];
1558 *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1];
1559 *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2];
1560 *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3];
1561 break;
1562 case 0x80000005:
1563 /* cache info (L1 cache) */
1564 *eax = 0x01ff01ff;
1565 *ebx = 0x01ff01ff;
1566 *ecx = 0x40020140;
1567 *edx = 0x40020140;
1568 break;
1569 case 0x80000006:
1570 /* cache info (L2 cache) */
1571 *eax = 0;
1572 *ebx = 0x42004200;
1573 *ecx = 0x02008140;
1574 *edx = 0;
1575 break;
1576 case 0x80000008:
1577 /* virtual & phys address size in low 2 bytes. */
1578 /* XXX: This value must match the one used in the MMU code. */
1579 if (env->cpuid_ext2_features & CPUID_EXT2_LM) {
1580 /* 64 bit processor */
1581 #if defined(USE_KQEMU)
1582 *eax = 0x00003020; /* 48 bits virtual, 32 bits physical */
1583 #else
1584 /* XXX: The physical address space is limited to 42 bits in exec.c. */
1585 *eax = 0x00003028; /* 48 bits virtual, 40 bits physical */
1586 #endif
1587 } else {
1588 #if defined(USE_KQEMU)
1589 *eax = 0x00000020; /* 32 bits physical */
1590 #else
1591 if (env->cpuid_features & CPUID_PSE36)
1592 *eax = 0x00000024; /* 36 bits physical */
1593 else
1594 *eax = 0x00000020; /* 32 bits physical */
1595 #endif
1596 }
1597 *ebx = 0;
1598 *ecx = 0;
1599 *edx = 0;
1600 break;
1601 case 0x8000000A:
1602 *eax = 0x00000001; /* SVM Revision */
1603 *ebx = 0x00000010; /* nr of ASIDs */
1604 *ecx = 0;
1605 *edx = 0; /* optional features */
1606 break;
1607 default:
1608 /* reserved values: zero */
1609 *eax = 0;
1610 *ebx = 0;
1611 *ecx = 0;
1612 *edx = 0;
1613 break;
1614 }
1615 }
1616
1617 CPUX86State *cpu_x86_init(const char *cpu_model)
1618 {
1619 CPUX86State *env;
1620 static int inited;
1621
1622 env = qemu_mallocz(sizeof(CPUX86State));
1623 if (!env)
1624 return NULL;
1625 cpu_exec_init(env);
1626 env->cpu_model_str = cpu_model;
1627
1628 /* init various static tables */
1629 if (!inited) {
1630 inited = 1;
1631 optimize_flags_init();
1632 #ifndef CONFIG_USER_ONLY
1633 prev_debug_excp_handler =
1634 cpu_set_debug_excp_handler(breakpoint_handler);
1635 #endif
1636 }
1637 if (cpu_x86_register(env, cpu_model) < 0) {
1638 cpu_x86_close(env);
1639 return NULL;
1640 }
1641 cpu_reset(env);
1642 #ifdef USE_KQEMU
1643 kqemu_init(env);
1644 #endif
1645 if (kvm_enabled())
1646 kvm_init_vcpu(env);
1647 return env;
1648 }