]> git.proxmox.com Git - qemu.git/blob - target-i386/helper.c
Split CPUID from op_helper
[qemu.git] / target-i386 / helper.c
1 /*
2 * i386 helpers (without register variable usage)
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20 #include <stdarg.h>
21 #include <stdlib.h>
22 #include <stdio.h>
23 #include <string.h>
24 #include <inttypes.h>
25 #include <signal.h>
26 #include <assert.h>
27
28 #include "cpu.h"
29 #include "exec-all.h"
30 #include "svm.h"
31 #include "qemu-common.h"
32
33 //#define DEBUG_MMU
34
35 static int cpu_x86_register (CPUX86State *env, const char *cpu_model);
36
37 static void add_flagname_to_bitmaps(char *flagname, uint32_t *features,
38 uint32_t *ext_features,
39 uint32_t *ext2_features,
40 uint32_t *ext3_features)
41 {
42 int i;
43 /* feature flags taken from "Intel Processor Identification and the CPUID
44 * Instruction" and AMD's "CPUID Specification". In cases of disagreement
45 * about feature names, the Linux name is used. */
46 static const char *feature_name[] = {
47 "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
48 "cx8", "apic", NULL, "sep", "mtrr", "pge", "mca", "cmov",
49 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */, NULL, "ds" /* Intel dts */, "acpi", "mmx",
50 "fxsr", "sse", "sse2", "ss", "ht" /* Intel htt */, "tm", "ia64", "pbe",
51 };
52 static const char *ext_feature_name[] = {
53 "pni" /* Intel,AMD sse3 */, NULL, NULL, "monitor", "ds_cpl", "vmx", NULL /* Linux smx */, "est",
54 "tm2", "ssse3", "cid", NULL, NULL, "cx16", "xtpr", NULL,
55 NULL, NULL, "dca", NULL, NULL, NULL, NULL, "popcnt",
56 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
57 };
58 static const char *ext2_feature_name[] = {
59 "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
60 "cx8" /* AMD CMPXCHG8B */, "apic", NULL, "syscall", "mttr", "pge", "mca", "cmov",
61 "pat", "pse36", NULL, NULL /* Linux mp */, "nx" /* Intel xd */, NULL, "mmxext", "mmx",
62 "fxsr", "fxsr_opt" /* AMD ffxsr */, "pdpe1gb" /* AMD Page1GB */, "rdtscp", NULL, "lm" /* Intel 64 */, "3dnowext", "3dnow",
63 };
64 static const char *ext3_feature_name[] = {
65 "lahf_lm" /* AMD LahfSahf */, "cmp_legacy", "svm", "extapic" /* AMD ExtApicSpace */, "cr8legacy" /* AMD AltMovCr8 */, "abm", "sse4a", "misalignsse",
66 "3dnowprefetch", "osvw", NULL /* Linux ibs */, NULL, "skinit", "wdt", NULL, NULL,
67 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
68 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
69 };
70
71 for ( i = 0 ; i < 32 ; i++ )
72 if (feature_name[i] && !strcmp (flagname, feature_name[i])) {
73 *features |= 1 << i;
74 return;
75 }
76 for ( i = 0 ; i < 32 ; i++ )
77 if (ext_feature_name[i] && !strcmp (flagname, ext_feature_name[i])) {
78 *ext_features |= 1 << i;
79 return;
80 }
81 for ( i = 0 ; i < 32 ; i++ )
82 if (ext2_feature_name[i] && !strcmp (flagname, ext2_feature_name[i])) {
83 *ext2_features |= 1 << i;
84 return;
85 }
86 for ( i = 0 ; i < 32 ; i++ )
87 if (ext3_feature_name[i] && !strcmp (flagname, ext3_feature_name[i])) {
88 *ext3_features |= 1 << i;
89 return;
90 }
91 fprintf(stderr, "CPU feature %s not found\n", flagname);
92 }
93
94 CPUX86State *cpu_x86_init(const char *cpu_model)
95 {
96 CPUX86State *env;
97 static int inited;
98
99 env = qemu_mallocz(sizeof(CPUX86State));
100 if (!env)
101 return NULL;
102 cpu_exec_init(env);
103 env->cpu_model_str = cpu_model;
104
105 /* init various static tables */
106 if (!inited) {
107 inited = 1;
108 optimize_flags_init();
109 }
110 if (cpu_x86_register(env, cpu_model) < 0) {
111 cpu_x86_close(env);
112 return NULL;
113 }
114 cpu_reset(env);
115 #ifdef USE_KQEMU
116 kqemu_init(env);
117 #endif
118 return env;
119 }
120
121 typedef struct x86_def_t {
122 const char *name;
123 uint32_t level;
124 uint32_t vendor1, vendor2, vendor3;
125 int family;
126 int model;
127 int stepping;
128 uint32_t features, ext_features, ext2_features, ext3_features;
129 uint32_t xlevel;
130 char model_id[48];
131 } x86_def_t;
132
133 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
134 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
135 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX)
136 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
137 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
138 CPUID_PSE36 | CPUID_FXSR)
139 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
140 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
141 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
142 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
143 CPUID_PAE | CPUID_SEP | CPUID_APIC)
144 static x86_def_t x86_defs[] = {
145 #ifdef TARGET_X86_64
146 {
147 .name = "qemu64",
148 .level = 2,
149 .vendor1 = CPUID_VENDOR_AMD_1,
150 .vendor2 = CPUID_VENDOR_AMD_2,
151 .vendor3 = CPUID_VENDOR_AMD_3,
152 .family = 6,
153 .model = 2,
154 .stepping = 3,
155 .features = PPRO_FEATURES |
156 /* these features are needed for Win64 and aren't fully implemented */
157 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
158 /* this feature is needed for Solaris and isn't fully implemented */
159 CPUID_PSE36,
160 .ext_features = CPUID_EXT_SSE3,
161 .ext2_features = (PPRO_FEATURES & 0x0183F3FF) |
162 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
163 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
164 .ext3_features = CPUID_EXT3_SVM,
165 .xlevel = 0x8000000A,
166 .model_id = "QEMU Virtual CPU version " QEMU_VERSION,
167 },
168 {
169 .name = "core2duo",
170 .level = 10,
171 .family = 6,
172 .model = 15,
173 .stepping = 11,
174 /* The original CPU also implements these features:
175 CPUID_VME, CPUID_DTS, CPUID_ACPI, CPUID_SS, CPUID_HT,
176 CPUID_TM, CPUID_PBE */
177 .features = PPRO_FEATURES |
178 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
179 CPUID_PSE36,
180 /* The original CPU also implements these ext features:
181 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_EST,
182 CPUID_EXT_TM2, CPUID_EXT_CX16, CPUID_EXT_XTPR, CPUID_EXT_PDCM */
183 .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3,
184 .ext2_features = CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
185 /* Missing: .ext3_features = CPUID_EXT3_LAHF_LM */
186 .xlevel = 0x80000008,
187 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz",
188 },
189 #endif
190 {
191 .name = "qemu32",
192 .level = 2,
193 .family = 6,
194 .model = 3,
195 .stepping = 3,
196 .features = PPRO_FEATURES,
197 .ext_features = CPUID_EXT_SSE3,
198 .xlevel = 0,
199 .model_id = "QEMU Virtual CPU version " QEMU_VERSION,
200 },
201 {
202 .name = "coreduo",
203 .level = 10,
204 .family = 6,
205 .model = 14,
206 .stepping = 8,
207 /* The original CPU also implements these features:
208 CPUID_DTS, CPUID_ACPI, CPUID_SS, CPUID_HT,
209 CPUID_TM, CPUID_PBE */
210 .features = PPRO_FEATURES | CPUID_VME |
211 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA,
212 /* The original CPU also implements these ext features:
213 CPUID_EXT_VMX, CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_XTPR,
214 CPUID_EXT_PDCM */
215 .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
216 .ext2_features = CPUID_EXT2_NX,
217 .xlevel = 0x80000008,
218 .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz",
219 },
220 {
221 .name = "486",
222 .level = 0,
223 .family = 4,
224 .model = 0,
225 .stepping = 0,
226 .features = I486_FEATURES,
227 .xlevel = 0,
228 },
229 {
230 .name = "pentium",
231 .level = 1,
232 .family = 5,
233 .model = 4,
234 .stepping = 3,
235 .features = PENTIUM_FEATURES,
236 .xlevel = 0,
237 },
238 {
239 .name = "pentium2",
240 .level = 2,
241 .family = 6,
242 .model = 5,
243 .stepping = 2,
244 .features = PENTIUM2_FEATURES,
245 .xlevel = 0,
246 },
247 {
248 .name = "pentium3",
249 .level = 2,
250 .family = 6,
251 .model = 7,
252 .stepping = 3,
253 .features = PENTIUM3_FEATURES,
254 .xlevel = 0,
255 },
256 {
257 .name = "athlon",
258 .level = 2,
259 .vendor1 = 0x68747541, /* "Auth" */
260 .vendor2 = 0x69746e65, /* "enti" */
261 .vendor3 = 0x444d4163, /* "cAMD" */
262 .family = 6,
263 .model = 2,
264 .stepping = 3,
265 .features = PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR | CPUID_MCA,
266 .ext2_features = (PPRO_FEATURES & 0x0183F3FF) | CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
267 .xlevel = 0x80000008,
268 /* XXX: put another string ? */
269 .model_id = "QEMU Virtual CPU version " QEMU_VERSION,
270 },
271 {
272 .name = "n270",
273 /* original is on level 10 */
274 .level = 5,
275 .family = 6,
276 .model = 28,
277 .stepping = 2,
278 .features = PPRO_FEATURES |
279 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME,
280 /* Missing: CPUID_DTS | CPUID_ACPI | CPUID_SS |
281 * CPUID_HT | CPUID_TM | CPUID_PBE */
282 /* Some CPUs got no CPUID_SEP */
283 .ext_features = CPUID_EXT_MONITOR |
284 CPUID_EXT_SSE3 /* PNI */ | CPUID_EXT_SSSE3,
285 /* Missing: CPUID_EXT_DSCPL | CPUID_EXT_EST |
286 * CPUID_EXT_TM2 | CPUID_EXT_XTPR */
287 .ext2_features = (PPRO_FEATURES & 0x0183F3FF) | CPUID_EXT2_NX,
288 /* Missing: .ext3_features = CPUID_EXT3_LAHF_LM */
289 .xlevel = 0x8000000A,
290 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz",
291 },
292 };
293
294 static int cpu_x86_find_by_name(x86_def_t *x86_cpu_def, const char *cpu_model)
295 {
296 unsigned int i;
297 x86_def_t *def;
298
299 char *s = strdup(cpu_model);
300 char *featurestr, *name = strtok(s, ",");
301 uint32_t plus_features = 0, plus_ext_features = 0, plus_ext2_features = 0, plus_ext3_features = 0;
302 uint32_t minus_features = 0, minus_ext_features = 0, minus_ext2_features = 0, minus_ext3_features = 0;
303 int family = -1, model = -1, stepping = -1;
304
305 def = NULL;
306 for (i = 0; i < sizeof(x86_defs) / sizeof(x86_def_t); i++) {
307 if (strcmp(name, x86_defs[i].name) == 0) {
308 def = &x86_defs[i];
309 break;
310 }
311 }
312 if (!def)
313 goto error;
314 memcpy(x86_cpu_def, def, sizeof(*def));
315
316 featurestr = strtok(NULL, ",");
317
318 while (featurestr) {
319 char *val;
320 if (featurestr[0] == '+') {
321 add_flagname_to_bitmaps(featurestr + 1, &plus_features, &plus_ext_features, &plus_ext2_features, &plus_ext3_features);
322 } else if (featurestr[0] == '-') {
323 add_flagname_to_bitmaps(featurestr + 1, &minus_features, &minus_ext_features, &minus_ext2_features, &minus_ext3_features);
324 } else if ((val = strchr(featurestr, '='))) {
325 *val = 0; val++;
326 if (!strcmp(featurestr, "family")) {
327 char *err;
328 family = strtol(val, &err, 10);
329 if (!*val || *err || family < 0) {
330 fprintf(stderr, "bad numerical value %s\n", val);
331 goto error;
332 }
333 x86_cpu_def->family = family;
334 } else if (!strcmp(featurestr, "model")) {
335 char *err;
336 model = strtol(val, &err, 10);
337 if (!*val || *err || model < 0 || model > 0xf) {
338 fprintf(stderr, "bad numerical value %s\n", val);
339 goto error;
340 }
341 x86_cpu_def->model = model;
342 } else if (!strcmp(featurestr, "stepping")) {
343 char *err;
344 stepping = strtol(val, &err, 10);
345 if (!*val || *err || stepping < 0 || stepping > 0xf) {
346 fprintf(stderr, "bad numerical value %s\n", val);
347 goto error;
348 }
349 x86_cpu_def->stepping = stepping;
350 } else if (!strcmp(featurestr, "vendor")) {
351 if (strlen(val) != 12) {
352 fprintf(stderr, "vendor string must be 12 chars long\n");
353 goto error;
354 }
355 x86_cpu_def->vendor1 = 0;
356 x86_cpu_def->vendor2 = 0;
357 x86_cpu_def->vendor3 = 0;
358 for(i = 0; i < 4; i++) {
359 x86_cpu_def->vendor1 |= ((uint8_t)val[i ]) << (8 * i);
360 x86_cpu_def->vendor2 |= ((uint8_t)val[i + 4]) << (8 * i);
361 x86_cpu_def->vendor3 |= ((uint8_t)val[i + 8]) << (8 * i);
362 }
363 } else if (!strcmp(featurestr, "model_id")) {
364 pstrcpy(x86_cpu_def->model_id, sizeof(x86_cpu_def->model_id),
365 val);
366 } else {
367 fprintf(stderr, "unrecognized feature %s\n", featurestr);
368 goto error;
369 }
370 } else {
371 fprintf(stderr, "feature string `%s' not in format (+feature|-feature|feature=xyz)\n", featurestr);
372 goto error;
373 }
374 featurestr = strtok(NULL, ",");
375 }
376 x86_cpu_def->features |= plus_features;
377 x86_cpu_def->ext_features |= plus_ext_features;
378 x86_cpu_def->ext2_features |= plus_ext2_features;
379 x86_cpu_def->ext3_features |= plus_ext3_features;
380 x86_cpu_def->features &= ~minus_features;
381 x86_cpu_def->ext_features &= ~minus_ext_features;
382 x86_cpu_def->ext2_features &= ~minus_ext2_features;
383 x86_cpu_def->ext3_features &= ~minus_ext3_features;
384 free(s);
385 return 0;
386
387 error:
388 free(s);
389 return -1;
390 }
391
392 void x86_cpu_list (FILE *f, int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
393 {
394 unsigned int i;
395
396 for (i = 0; i < sizeof(x86_defs) / sizeof(x86_def_t); i++)
397 (*cpu_fprintf)(f, "x86 %16s\n", x86_defs[i].name);
398 }
399
400 static int cpu_x86_register (CPUX86State *env, const char *cpu_model)
401 {
402 x86_def_t def1, *def = &def1;
403
404 if (cpu_x86_find_by_name(def, cpu_model) < 0)
405 return -1;
406 if (def->vendor1) {
407 env->cpuid_vendor1 = def->vendor1;
408 env->cpuid_vendor2 = def->vendor2;
409 env->cpuid_vendor3 = def->vendor3;
410 } else {
411 env->cpuid_vendor1 = CPUID_VENDOR_INTEL_1;
412 env->cpuid_vendor2 = CPUID_VENDOR_INTEL_2;
413 env->cpuid_vendor3 = CPUID_VENDOR_INTEL_3;
414 }
415 env->cpuid_level = def->level;
416 env->cpuid_version = (def->family << 8) | (def->model << 4) | def->stepping;
417 env->cpuid_features = def->features;
418 env->pat = 0x0007040600070406ULL;
419 env->cpuid_ext_features = def->ext_features;
420 env->cpuid_ext2_features = def->ext2_features;
421 env->cpuid_xlevel = def->xlevel;
422 env->cpuid_ext3_features = def->ext3_features;
423 {
424 const char *model_id = def->model_id;
425 int c, len, i;
426 if (!model_id)
427 model_id = "";
428 len = strlen(model_id);
429 for(i = 0; i < 48; i++) {
430 if (i >= len)
431 c = '\0';
432 else
433 c = (uint8_t)model_id[i];
434 env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
435 }
436 }
437 return 0;
438 }
439
440 /* NOTE: must be called outside the CPU execute loop */
441 void cpu_reset(CPUX86State *env)
442 {
443 int i;
444
445 memset(env, 0, offsetof(CPUX86State, breakpoints));
446
447 tlb_flush(env, 1);
448
449 env->old_exception = -1;
450
451 /* init to reset state */
452
453 #ifdef CONFIG_SOFTMMU
454 env->hflags |= HF_SOFTMMU_MASK;
455 #endif
456 env->hflags2 |= HF2_GIF_MASK;
457
458 cpu_x86_update_cr0(env, 0x60000010);
459 env->a20_mask = ~0x0;
460 env->smbase = 0x30000;
461
462 env->idt.limit = 0xffff;
463 env->gdt.limit = 0xffff;
464 env->ldt.limit = 0xffff;
465 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
466 env->tr.limit = 0xffff;
467 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
468
469 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
470 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK | DESC_R_MASK);
471 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
472 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK);
473 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
474 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK);
475 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
476 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK);
477 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
478 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK);
479 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
480 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK);
481
482 env->eip = 0xfff0;
483 env->regs[R_EDX] = env->cpuid_version;
484
485 env->eflags = 0x2;
486
487 /* FPU init */
488 for(i = 0;i < 8; i++)
489 env->fptags[i] = 1;
490 env->fpuc = 0x37f;
491
492 env->mxcsr = 0x1f80;
493 }
494
495 void cpu_x86_close(CPUX86State *env)
496 {
497 qemu_free(env);
498 }
499
500 /***********************************************************/
501 /* x86 debug */
502
503 static const char *cc_op_str[] = {
504 "DYNAMIC",
505 "EFLAGS",
506
507 "MULB",
508 "MULW",
509 "MULL",
510 "MULQ",
511
512 "ADDB",
513 "ADDW",
514 "ADDL",
515 "ADDQ",
516
517 "ADCB",
518 "ADCW",
519 "ADCL",
520 "ADCQ",
521
522 "SUBB",
523 "SUBW",
524 "SUBL",
525 "SUBQ",
526
527 "SBBB",
528 "SBBW",
529 "SBBL",
530 "SBBQ",
531
532 "LOGICB",
533 "LOGICW",
534 "LOGICL",
535 "LOGICQ",
536
537 "INCB",
538 "INCW",
539 "INCL",
540 "INCQ",
541
542 "DECB",
543 "DECW",
544 "DECL",
545 "DECQ",
546
547 "SHLB",
548 "SHLW",
549 "SHLL",
550 "SHLQ",
551
552 "SARB",
553 "SARW",
554 "SARL",
555 "SARQ",
556 };
557
558 void cpu_dump_state(CPUState *env, FILE *f,
559 int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
560 int flags)
561 {
562 int eflags, i, nb;
563 char cc_op_name[32];
564 static const char *seg_name[6] = { "ES", "CS", "SS", "DS", "FS", "GS" };
565
566 eflags = env->eflags;
567 #ifdef TARGET_X86_64
568 if (env->hflags & HF_CS64_MASK) {
569 cpu_fprintf(f,
570 "RAX=%016" PRIx64 " RBX=%016" PRIx64 " RCX=%016" PRIx64 " RDX=%016" PRIx64 "\n"
571 "RSI=%016" PRIx64 " RDI=%016" PRIx64 " RBP=%016" PRIx64 " RSP=%016" PRIx64 "\n"
572 "R8 =%016" PRIx64 " R9 =%016" PRIx64 " R10=%016" PRIx64 " R11=%016" PRIx64 "\n"
573 "R12=%016" PRIx64 " R13=%016" PRIx64 " R14=%016" PRIx64 " R15=%016" PRIx64 "\n"
574 "RIP=%016" PRIx64 " RFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
575 env->regs[R_EAX],
576 env->regs[R_EBX],
577 env->regs[R_ECX],
578 env->regs[R_EDX],
579 env->regs[R_ESI],
580 env->regs[R_EDI],
581 env->regs[R_EBP],
582 env->regs[R_ESP],
583 env->regs[8],
584 env->regs[9],
585 env->regs[10],
586 env->regs[11],
587 env->regs[12],
588 env->regs[13],
589 env->regs[14],
590 env->regs[15],
591 env->eip, eflags,
592 eflags & DF_MASK ? 'D' : '-',
593 eflags & CC_O ? 'O' : '-',
594 eflags & CC_S ? 'S' : '-',
595 eflags & CC_Z ? 'Z' : '-',
596 eflags & CC_A ? 'A' : '-',
597 eflags & CC_P ? 'P' : '-',
598 eflags & CC_C ? 'C' : '-',
599 env->hflags & HF_CPL_MASK,
600 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
601 (int)(env->a20_mask >> 20) & 1,
602 (env->hflags >> HF_SMM_SHIFT) & 1,
603 env->halted);
604 } else
605 #endif
606 {
607 cpu_fprintf(f, "EAX=%08x EBX=%08x ECX=%08x EDX=%08x\n"
608 "ESI=%08x EDI=%08x EBP=%08x ESP=%08x\n"
609 "EIP=%08x EFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
610 (uint32_t)env->regs[R_EAX],
611 (uint32_t)env->regs[R_EBX],
612 (uint32_t)env->regs[R_ECX],
613 (uint32_t)env->regs[R_EDX],
614 (uint32_t)env->regs[R_ESI],
615 (uint32_t)env->regs[R_EDI],
616 (uint32_t)env->regs[R_EBP],
617 (uint32_t)env->regs[R_ESP],
618 (uint32_t)env->eip, eflags,
619 eflags & DF_MASK ? 'D' : '-',
620 eflags & CC_O ? 'O' : '-',
621 eflags & CC_S ? 'S' : '-',
622 eflags & CC_Z ? 'Z' : '-',
623 eflags & CC_A ? 'A' : '-',
624 eflags & CC_P ? 'P' : '-',
625 eflags & CC_C ? 'C' : '-',
626 env->hflags & HF_CPL_MASK,
627 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
628 (int)(env->a20_mask >> 20) & 1,
629 (env->hflags >> HF_SMM_SHIFT) & 1,
630 env->halted);
631 }
632
633 #ifdef TARGET_X86_64
634 if (env->hflags & HF_LMA_MASK) {
635 for(i = 0; i < 6; i++) {
636 SegmentCache *sc = &env->segs[i];
637 cpu_fprintf(f, "%s =%04x %016" PRIx64 " %08x %08x\n",
638 seg_name[i],
639 sc->selector,
640 sc->base,
641 sc->limit,
642 sc->flags);
643 }
644 cpu_fprintf(f, "LDT=%04x %016" PRIx64 " %08x %08x\n",
645 env->ldt.selector,
646 env->ldt.base,
647 env->ldt.limit,
648 env->ldt.flags);
649 cpu_fprintf(f, "TR =%04x %016" PRIx64 " %08x %08x\n",
650 env->tr.selector,
651 env->tr.base,
652 env->tr.limit,
653 env->tr.flags);
654 cpu_fprintf(f, "GDT= %016" PRIx64 " %08x\n",
655 env->gdt.base, env->gdt.limit);
656 cpu_fprintf(f, "IDT= %016" PRIx64 " %08x\n",
657 env->idt.base, env->idt.limit);
658 cpu_fprintf(f, "CR0=%08x CR2=%016" PRIx64 " CR3=%016" PRIx64 " CR4=%08x\n",
659 (uint32_t)env->cr[0],
660 env->cr[2],
661 env->cr[3],
662 (uint32_t)env->cr[4]);
663 } else
664 #endif
665 {
666 for(i = 0; i < 6; i++) {
667 SegmentCache *sc = &env->segs[i];
668 cpu_fprintf(f, "%s =%04x %08x %08x %08x\n",
669 seg_name[i],
670 sc->selector,
671 (uint32_t)sc->base,
672 sc->limit,
673 sc->flags);
674 }
675 cpu_fprintf(f, "LDT=%04x %08x %08x %08x\n",
676 env->ldt.selector,
677 (uint32_t)env->ldt.base,
678 env->ldt.limit,
679 env->ldt.flags);
680 cpu_fprintf(f, "TR =%04x %08x %08x %08x\n",
681 env->tr.selector,
682 (uint32_t)env->tr.base,
683 env->tr.limit,
684 env->tr.flags);
685 cpu_fprintf(f, "GDT= %08x %08x\n",
686 (uint32_t)env->gdt.base, env->gdt.limit);
687 cpu_fprintf(f, "IDT= %08x %08x\n",
688 (uint32_t)env->idt.base, env->idt.limit);
689 cpu_fprintf(f, "CR0=%08x CR2=%08x CR3=%08x CR4=%08x\n",
690 (uint32_t)env->cr[0],
691 (uint32_t)env->cr[2],
692 (uint32_t)env->cr[3],
693 (uint32_t)env->cr[4]);
694 }
695 if (flags & X86_DUMP_CCOP) {
696 if ((unsigned)env->cc_op < CC_OP_NB)
697 snprintf(cc_op_name, sizeof(cc_op_name), "%s", cc_op_str[env->cc_op]);
698 else
699 snprintf(cc_op_name, sizeof(cc_op_name), "[%d]", env->cc_op);
700 #ifdef TARGET_X86_64
701 if (env->hflags & HF_CS64_MASK) {
702 cpu_fprintf(f, "CCS=%016" PRIx64 " CCD=%016" PRIx64 " CCO=%-8s\n",
703 env->cc_src, env->cc_dst,
704 cc_op_name);
705 } else
706 #endif
707 {
708 cpu_fprintf(f, "CCS=%08x CCD=%08x CCO=%-8s\n",
709 (uint32_t)env->cc_src, (uint32_t)env->cc_dst,
710 cc_op_name);
711 }
712 }
713 if (flags & X86_DUMP_FPU) {
714 int fptag;
715 fptag = 0;
716 for(i = 0; i < 8; i++) {
717 fptag |= ((!env->fptags[i]) << i);
718 }
719 cpu_fprintf(f, "FCW=%04x FSW=%04x [ST=%d] FTW=%02x MXCSR=%08x\n",
720 env->fpuc,
721 (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11,
722 env->fpstt,
723 fptag,
724 env->mxcsr);
725 for(i=0;i<8;i++) {
726 #if defined(USE_X86LDOUBLE)
727 union {
728 long double d;
729 struct {
730 uint64_t lower;
731 uint16_t upper;
732 } l;
733 } tmp;
734 tmp.d = env->fpregs[i].d;
735 cpu_fprintf(f, "FPR%d=%016" PRIx64 " %04x",
736 i, tmp.l.lower, tmp.l.upper);
737 #else
738 cpu_fprintf(f, "FPR%d=%016" PRIx64,
739 i, env->fpregs[i].mmx.q);
740 #endif
741 if ((i & 1) == 1)
742 cpu_fprintf(f, "\n");
743 else
744 cpu_fprintf(f, " ");
745 }
746 if (env->hflags & HF_CS64_MASK)
747 nb = 16;
748 else
749 nb = 8;
750 for(i=0;i<nb;i++) {
751 cpu_fprintf(f, "XMM%02d=%08x%08x%08x%08x",
752 i,
753 env->xmm_regs[i].XMM_L(3),
754 env->xmm_regs[i].XMM_L(2),
755 env->xmm_regs[i].XMM_L(1),
756 env->xmm_regs[i].XMM_L(0));
757 if ((i & 1) == 1)
758 cpu_fprintf(f, "\n");
759 else
760 cpu_fprintf(f, " ");
761 }
762 }
763 }
764
765 /***********************************************************/
766 /* x86 mmu */
767 /* XXX: add PGE support */
768
769 void cpu_x86_set_a20(CPUX86State *env, int a20_state)
770 {
771 a20_state = (a20_state != 0);
772 if (a20_state != ((env->a20_mask >> 20) & 1)) {
773 #if defined(DEBUG_MMU)
774 printf("A20 update: a20=%d\n", a20_state);
775 #endif
776 /* if the cpu is currently executing code, we must unlink it and
777 all the potentially executing TB */
778 cpu_interrupt(env, CPU_INTERRUPT_EXITTB);
779
780 /* when a20 is changed, all the MMU mappings are invalid, so
781 we must flush everything */
782 tlb_flush(env, 1);
783 env->a20_mask = (~0x100000) | (a20_state << 20);
784 }
785 }
786
787 void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0)
788 {
789 int pe_state;
790
791 #if defined(DEBUG_MMU)
792 printf("CR0 update: CR0=0x%08x\n", new_cr0);
793 #endif
794 if ((new_cr0 & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK)) !=
795 (env->cr[0] & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK))) {
796 tlb_flush(env, 1);
797 }
798
799 #ifdef TARGET_X86_64
800 if (!(env->cr[0] & CR0_PG_MASK) && (new_cr0 & CR0_PG_MASK) &&
801 (env->efer & MSR_EFER_LME)) {
802 /* enter in long mode */
803 /* XXX: generate an exception */
804 if (!(env->cr[4] & CR4_PAE_MASK))
805 return;
806 env->efer |= MSR_EFER_LMA;
807 env->hflags |= HF_LMA_MASK;
808 } else if ((env->cr[0] & CR0_PG_MASK) && !(new_cr0 & CR0_PG_MASK) &&
809 (env->efer & MSR_EFER_LMA)) {
810 /* exit long mode */
811 env->efer &= ~MSR_EFER_LMA;
812 env->hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
813 env->eip &= 0xffffffff;
814 }
815 #endif
816 env->cr[0] = new_cr0 | CR0_ET_MASK;
817
818 /* update PE flag in hidden flags */
819 pe_state = (env->cr[0] & CR0_PE_MASK);
820 env->hflags = (env->hflags & ~HF_PE_MASK) | (pe_state << HF_PE_SHIFT);
821 /* ensure that ADDSEG is always set in real mode */
822 env->hflags |= ((pe_state ^ 1) << HF_ADDSEG_SHIFT);
823 /* update FPU flags */
824 env->hflags = (env->hflags & ~(HF_MP_MASK | HF_EM_MASK | HF_TS_MASK)) |
825 ((new_cr0 << (HF_MP_SHIFT - 1)) & (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK));
826 }
827
828 /* XXX: in legacy PAE mode, generate a GPF if reserved bits are set in
829 the PDPT */
830 void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3)
831 {
832 env->cr[3] = new_cr3;
833 if (env->cr[0] & CR0_PG_MASK) {
834 #if defined(DEBUG_MMU)
835 printf("CR3 update: CR3=" TARGET_FMT_lx "\n", new_cr3);
836 #endif
837 tlb_flush(env, 0);
838 }
839 }
840
841 void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4)
842 {
843 #if defined(DEBUG_MMU)
844 printf("CR4 update: CR4=%08x\n", (uint32_t)env->cr[4]);
845 #endif
846 if ((new_cr4 & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK)) !=
847 (env->cr[4] & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK))) {
848 tlb_flush(env, 1);
849 }
850 /* SSE handling */
851 if (!(env->cpuid_features & CPUID_SSE))
852 new_cr4 &= ~CR4_OSFXSR_MASK;
853 if (new_cr4 & CR4_OSFXSR_MASK)
854 env->hflags |= HF_OSFXSR_MASK;
855 else
856 env->hflags &= ~HF_OSFXSR_MASK;
857
858 env->cr[4] = new_cr4;
859 }
860
861 /* XXX: also flush 4MB pages */
862 void cpu_x86_flush_tlb(CPUX86State *env, target_ulong addr)
863 {
864 tlb_flush_page(env, addr);
865 }
866
867 #if defined(CONFIG_USER_ONLY)
868
869 int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
870 int is_write, int mmu_idx, int is_softmmu)
871 {
872 /* user mode only emulation */
873 is_write &= 1;
874 env->cr[2] = addr;
875 env->error_code = (is_write << PG_ERROR_W_BIT);
876 env->error_code |= PG_ERROR_U_MASK;
877 env->exception_index = EXCP0E_PAGE;
878 return 1;
879 }
880
881 target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
882 {
883 return addr;
884 }
885
886 #else
887
888 /* XXX: This value should match the one returned by CPUID
889 * and in exec.c */
890 #if defined(USE_KQEMU)
891 #define PHYS_ADDR_MASK 0xfffff000LL
892 #else
893 # if defined(TARGET_X86_64)
894 # define PHYS_ADDR_MASK 0xfffffff000LL
895 # else
896 # define PHYS_ADDR_MASK 0xffffff000LL
897 # endif
898 #endif
899
900 /* return value:
901 -1 = cannot handle fault
902 0 = nothing more to do
903 1 = generate PF fault
904 2 = soft MMU activation required for this block
905 */
906 int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
907 int is_write1, int mmu_idx, int is_softmmu)
908 {
909 uint64_t ptep, pte;
910 target_ulong pde_addr, pte_addr;
911 int error_code, is_dirty, prot, page_size, ret, is_write, is_user;
912 target_phys_addr_t paddr;
913 uint32_t page_offset;
914 target_ulong vaddr, virt_addr;
915
916 is_user = mmu_idx == MMU_USER_IDX;
917 #if defined(DEBUG_MMU)
918 printf("MMU fault: addr=" TARGET_FMT_lx " w=%d u=%d eip=" TARGET_FMT_lx "\n",
919 addr, is_write1, is_user, env->eip);
920 #endif
921 is_write = is_write1 & 1;
922
923 if (!(env->cr[0] & CR0_PG_MASK)) {
924 pte = addr;
925 virt_addr = addr & TARGET_PAGE_MASK;
926 prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
927 page_size = 4096;
928 goto do_mapping;
929 }
930
931 if (env->cr[4] & CR4_PAE_MASK) {
932 uint64_t pde, pdpe;
933 target_ulong pdpe_addr;
934
935 #ifdef TARGET_X86_64
936 if (env->hflags & HF_LMA_MASK) {
937 uint64_t pml4e_addr, pml4e;
938 int32_t sext;
939
940 /* test virtual address sign extension */
941 sext = (int64_t)addr >> 47;
942 if (sext != 0 && sext != -1) {
943 env->error_code = 0;
944 env->exception_index = EXCP0D_GPF;
945 return 1;
946 }
947
948 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
949 env->a20_mask;
950 pml4e = ldq_phys(pml4e_addr);
951 if (!(pml4e & PG_PRESENT_MASK)) {
952 error_code = 0;
953 goto do_fault;
954 }
955 if (!(env->efer & MSR_EFER_NXE) && (pml4e & PG_NX_MASK)) {
956 error_code = PG_ERROR_RSVD_MASK;
957 goto do_fault;
958 }
959 if (!(pml4e & PG_ACCESSED_MASK)) {
960 pml4e |= PG_ACCESSED_MASK;
961 stl_phys_notdirty(pml4e_addr, pml4e);
962 }
963 ptep = pml4e ^ PG_NX_MASK;
964 pdpe_addr = ((pml4e & PHYS_ADDR_MASK) + (((addr >> 30) & 0x1ff) << 3)) &
965 env->a20_mask;
966 pdpe = ldq_phys(pdpe_addr);
967 if (!(pdpe & PG_PRESENT_MASK)) {
968 error_code = 0;
969 goto do_fault;
970 }
971 if (!(env->efer & MSR_EFER_NXE) && (pdpe & PG_NX_MASK)) {
972 error_code = PG_ERROR_RSVD_MASK;
973 goto do_fault;
974 }
975 ptep &= pdpe ^ PG_NX_MASK;
976 if (!(pdpe & PG_ACCESSED_MASK)) {
977 pdpe |= PG_ACCESSED_MASK;
978 stl_phys_notdirty(pdpe_addr, pdpe);
979 }
980 } else
981 #endif
982 {
983 /* XXX: load them when cr3 is loaded ? */
984 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
985 env->a20_mask;
986 pdpe = ldq_phys(pdpe_addr);
987 if (!(pdpe & PG_PRESENT_MASK)) {
988 error_code = 0;
989 goto do_fault;
990 }
991 ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
992 }
993
994 pde_addr = ((pdpe & PHYS_ADDR_MASK) + (((addr >> 21) & 0x1ff) << 3)) &
995 env->a20_mask;
996 pde = ldq_phys(pde_addr);
997 if (!(pde & PG_PRESENT_MASK)) {
998 error_code = 0;
999 goto do_fault;
1000 }
1001 if (!(env->efer & MSR_EFER_NXE) && (pde & PG_NX_MASK)) {
1002 error_code = PG_ERROR_RSVD_MASK;
1003 goto do_fault;
1004 }
1005 ptep &= pde ^ PG_NX_MASK;
1006 if (pde & PG_PSE_MASK) {
1007 /* 2 MB page */
1008 page_size = 2048 * 1024;
1009 ptep ^= PG_NX_MASK;
1010 if ((ptep & PG_NX_MASK) && is_write1 == 2)
1011 goto do_fault_protect;
1012 if (is_user) {
1013 if (!(ptep & PG_USER_MASK))
1014 goto do_fault_protect;
1015 if (is_write && !(ptep & PG_RW_MASK))
1016 goto do_fault_protect;
1017 } else {
1018 if ((env->cr[0] & CR0_WP_MASK) &&
1019 is_write && !(ptep & PG_RW_MASK))
1020 goto do_fault_protect;
1021 }
1022 is_dirty = is_write && !(pde & PG_DIRTY_MASK);
1023 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
1024 pde |= PG_ACCESSED_MASK;
1025 if (is_dirty)
1026 pde |= PG_DIRTY_MASK;
1027 stl_phys_notdirty(pde_addr, pde);
1028 }
1029 /* align to page_size */
1030 pte = pde & ((PHYS_ADDR_MASK & ~(page_size - 1)) | 0xfff);
1031 virt_addr = addr & ~(page_size - 1);
1032 } else {
1033 /* 4 KB page */
1034 if (!(pde & PG_ACCESSED_MASK)) {
1035 pde |= PG_ACCESSED_MASK;
1036 stl_phys_notdirty(pde_addr, pde);
1037 }
1038 pte_addr = ((pde & PHYS_ADDR_MASK) + (((addr >> 12) & 0x1ff) << 3)) &
1039 env->a20_mask;
1040 pte = ldq_phys(pte_addr);
1041 if (!(pte & PG_PRESENT_MASK)) {
1042 error_code = 0;
1043 goto do_fault;
1044 }
1045 if (!(env->efer & MSR_EFER_NXE) && (pte & PG_NX_MASK)) {
1046 error_code = PG_ERROR_RSVD_MASK;
1047 goto do_fault;
1048 }
1049 /* combine pde and pte nx, user and rw protections */
1050 ptep &= pte ^ PG_NX_MASK;
1051 ptep ^= PG_NX_MASK;
1052 if ((ptep & PG_NX_MASK) && is_write1 == 2)
1053 goto do_fault_protect;
1054 if (is_user) {
1055 if (!(ptep & PG_USER_MASK))
1056 goto do_fault_protect;
1057 if (is_write && !(ptep & PG_RW_MASK))
1058 goto do_fault_protect;
1059 } else {
1060 if ((env->cr[0] & CR0_WP_MASK) &&
1061 is_write && !(ptep & PG_RW_MASK))
1062 goto do_fault_protect;
1063 }
1064 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
1065 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
1066 pte |= PG_ACCESSED_MASK;
1067 if (is_dirty)
1068 pte |= PG_DIRTY_MASK;
1069 stl_phys_notdirty(pte_addr, pte);
1070 }
1071 page_size = 4096;
1072 virt_addr = addr & ~0xfff;
1073 pte = pte & (PHYS_ADDR_MASK | 0xfff);
1074 }
1075 } else {
1076 uint32_t pde;
1077
1078 /* page directory entry */
1079 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) &
1080 env->a20_mask;
1081 pde = ldl_phys(pde_addr);
1082 if (!(pde & PG_PRESENT_MASK)) {
1083 error_code = 0;
1084 goto do_fault;
1085 }
1086 /* if PSE bit is set, then we use a 4MB page */
1087 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
1088 page_size = 4096 * 1024;
1089 if (is_user) {
1090 if (!(pde & PG_USER_MASK))
1091 goto do_fault_protect;
1092 if (is_write && !(pde & PG_RW_MASK))
1093 goto do_fault_protect;
1094 } else {
1095 if ((env->cr[0] & CR0_WP_MASK) &&
1096 is_write && !(pde & PG_RW_MASK))
1097 goto do_fault_protect;
1098 }
1099 is_dirty = is_write && !(pde & PG_DIRTY_MASK);
1100 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
1101 pde |= PG_ACCESSED_MASK;
1102 if (is_dirty)
1103 pde |= PG_DIRTY_MASK;
1104 stl_phys_notdirty(pde_addr, pde);
1105 }
1106
1107 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
1108 ptep = pte;
1109 virt_addr = addr & ~(page_size - 1);
1110 } else {
1111 if (!(pde & PG_ACCESSED_MASK)) {
1112 pde |= PG_ACCESSED_MASK;
1113 stl_phys_notdirty(pde_addr, pde);
1114 }
1115
1116 /* page directory entry */
1117 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) &
1118 env->a20_mask;
1119 pte = ldl_phys(pte_addr);
1120 if (!(pte & PG_PRESENT_MASK)) {
1121 error_code = 0;
1122 goto do_fault;
1123 }
1124 /* combine pde and pte user and rw protections */
1125 ptep = pte & pde;
1126 if (is_user) {
1127 if (!(ptep & PG_USER_MASK))
1128 goto do_fault_protect;
1129 if (is_write && !(ptep & PG_RW_MASK))
1130 goto do_fault_protect;
1131 } else {
1132 if ((env->cr[0] & CR0_WP_MASK) &&
1133 is_write && !(ptep & PG_RW_MASK))
1134 goto do_fault_protect;
1135 }
1136 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
1137 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
1138 pte |= PG_ACCESSED_MASK;
1139 if (is_dirty)
1140 pte |= PG_DIRTY_MASK;
1141 stl_phys_notdirty(pte_addr, pte);
1142 }
1143 page_size = 4096;
1144 virt_addr = addr & ~0xfff;
1145 }
1146 }
1147 /* the page can be put in the TLB */
1148 prot = PAGE_READ;
1149 if (!(ptep & PG_NX_MASK))
1150 prot |= PAGE_EXEC;
1151 if (pte & PG_DIRTY_MASK) {
1152 /* only set write access if already dirty... otherwise wait
1153 for dirty access */
1154 if (is_user) {
1155 if (ptep & PG_RW_MASK)
1156 prot |= PAGE_WRITE;
1157 } else {
1158 if (!(env->cr[0] & CR0_WP_MASK) ||
1159 (ptep & PG_RW_MASK))
1160 prot |= PAGE_WRITE;
1161 }
1162 }
1163 do_mapping:
1164 pte = pte & env->a20_mask;
1165
1166 /* Even if 4MB pages, we map only one 4KB page in the cache to
1167 avoid filling it too fast */
1168 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
1169 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
1170 vaddr = virt_addr + page_offset;
1171
1172 ret = tlb_set_page_exec(env, vaddr, paddr, prot, mmu_idx, is_softmmu);
1173 return ret;
1174 do_fault_protect:
1175 error_code = PG_ERROR_P_MASK;
1176 do_fault:
1177 error_code |= (is_write << PG_ERROR_W_BIT);
1178 if (is_user)
1179 error_code |= PG_ERROR_U_MASK;
1180 if (is_write1 == 2 &&
1181 (env->efer & MSR_EFER_NXE) &&
1182 (env->cr[4] & CR4_PAE_MASK))
1183 error_code |= PG_ERROR_I_D_MASK;
1184 if (env->intercept_exceptions & (1 << EXCP0E_PAGE)) {
1185 /* cr2 is not modified in case of exceptions */
1186 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
1187 addr);
1188 } else {
1189 env->cr[2] = addr;
1190 }
1191 env->error_code = error_code;
1192 env->exception_index = EXCP0E_PAGE;
1193 return 1;
1194 }
1195
1196 target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
1197 {
1198 target_ulong pde_addr, pte_addr;
1199 uint64_t pte;
1200 target_phys_addr_t paddr;
1201 uint32_t page_offset;
1202 int page_size;
1203
1204 if (env->cr[4] & CR4_PAE_MASK) {
1205 target_ulong pdpe_addr;
1206 uint64_t pde, pdpe;
1207
1208 #ifdef TARGET_X86_64
1209 if (env->hflags & HF_LMA_MASK) {
1210 uint64_t pml4e_addr, pml4e;
1211 int32_t sext;
1212
1213 /* test virtual address sign extension */
1214 sext = (int64_t)addr >> 47;
1215 if (sext != 0 && sext != -1)
1216 return -1;
1217
1218 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
1219 env->a20_mask;
1220 pml4e = ldq_phys(pml4e_addr);
1221 if (!(pml4e & PG_PRESENT_MASK))
1222 return -1;
1223
1224 pdpe_addr = ((pml4e & ~0xfff) + (((addr >> 30) & 0x1ff) << 3)) &
1225 env->a20_mask;
1226 pdpe = ldq_phys(pdpe_addr);
1227 if (!(pdpe & PG_PRESENT_MASK))
1228 return -1;
1229 } else
1230 #endif
1231 {
1232 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
1233 env->a20_mask;
1234 pdpe = ldq_phys(pdpe_addr);
1235 if (!(pdpe & PG_PRESENT_MASK))
1236 return -1;
1237 }
1238
1239 pde_addr = ((pdpe & ~0xfff) + (((addr >> 21) & 0x1ff) << 3)) &
1240 env->a20_mask;
1241 pde = ldq_phys(pde_addr);
1242 if (!(pde & PG_PRESENT_MASK)) {
1243 return -1;
1244 }
1245 if (pde & PG_PSE_MASK) {
1246 /* 2 MB page */
1247 page_size = 2048 * 1024;
1248 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
1249 } else {
1250 /* 4 KB page */
1251 pte_addr = ((pde & ~0xfff) + (((addr >> 12) & 0x1ff) << 3)) &
1252 env->a20_mask;
1253 page_size = 4096;
1254 pte = ldq_phys(pte_addr);
1255 }
1256 if (!(pte & PG_PRESENT_MASK))
1257 return -1;
1258 } else {
1259 uint32_t pde;
1260
1261 if (!(env->cr[0] & CR0_PG_MASK)) {
1262 pte = addr;
1263 page_size = 4096;
1264 } else {
1265 /* page directory entry */
1266 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) & env->a20_mask;
1267 pde = ldl_phys(pde_addr);
1268 if (!(pde & PG_PRESENT_MASK))
1269 return -1;
1270 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
1271 pte = pde & ~0x003ff000; /* align to 4MB */
1272 page_size = 4096 * 1024;
1273 } else {
1274 /* page directory entry */
1275 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & env->a20_mask;
1276 pte = ldl_phys(pte_addr);
1277 if (!(pte & PG_PRESENT_MASK))
1278 return -1;
1279 page_size = 4096;
1280 }
1281 }
1282 pte = pte & env->a20_mask;
1283 }
1284
1285 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
1286 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
1287 return paddr;
1288 }
1289 #endif /* !CONFIG_USER_ONLY */
1290
1291 void cpu_x86_cpuid(CPUX86State *env, uint32_t index,
1292 uint32_t *eax, uint32_t *ebx,
1293 uint32_t *ecx, uint32_t *edx)
1294 {
1295 /* test if maximum index reached */
1296 if (index & 0x80000000) {
1297 if (index > env->cpuid_xlevel)
1298 index = env->cpuid_level;
1299 } else {
1300 if (index > env->cpuid_level)
1301 index = env->cpuid_level;
1302 }
1303
1304 switch(index) {
1305 case 0:
1306 *eax = env->cpuid_level;
1307 *ebx = env->cpuid_vendor1;
1308 *edx = env->cpuid_vendor2;
1309 *ecx = env->cpuid_vendor3;
1310 break;
1311 case 1:
1312 *eax = env->cpuid_version;
1313 *ebx = (env->cpuid_apic_id << 24) | 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
1314 *ecx = env->cpuid_ext_features;
1315 *edx = env->cpuid_features;
1316 break;
1317 case 2:
1318 /* cache info: needed for Pentium Pro compatibility */
1319 *eax = 1;
1320 *ebx = 0;
1321 *ecx = 0;
1322 *edx = 0x2c307d;
1323 break;
1324 case 4:
1325 /* cache info: needed for Core compatibility */
1326 switch (*ecx) {
1327 case 0: /* L1 dcache info */
1328 *eax = 0x0000121;
1329 *ebx = 0x1c0003f;
1330 *ecx = 0x000003f;
1331 *edx = 0x0000001;
1332 break;
1333 case 1: /* L1 icache info */
1334 *eax = 0x0000122;
1335 *ebx = 0x1c0003f;
1336 *ecx = 0x000003f;
1337 *edx = 0x0000001;
1338 break;
1339 case 2: /* L2 cache info */
1340 *eax = 0x0000143;
1341 *ebx = 0x3c0003f;
1342 *ecx = 0x0000fff;
1343 *edx = 0x0000001;
1344 break;
1345 default: /* end of info */
1346 *eax = 0;
1347 *ebx = 0;
1348 *ecx = 0;
1349 *edx = 0;
1350 break;
1351 }
1352
1353 break;
1354 case 5:
1355 /* mwait info: needed for Core compatibility */
1356 *eax = 0; /* Smallest monitor-line size in bytes */
1357 *ebx = 0; /* Largest monitor-line size in bytes */
1358 *ecx = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
1359 *edx = 0;
1360 break;
1361 case 6:
1362 /* Thermal and Power Leaf */
1363 *eax = 0;
1364 *ebx = 0;
1365 *ecx = 0;
1366 *edx = 0;
1367 break;
1368 case 9:
1369 /* Direct Cache Access Information Leaf */
1370 *eax = 0; /* Bits 0-31 in DCA_CAP MSR */
1371 *ebx = 0;
1372 *ecx = 0;
1373 *edx = 0;
1374 break;
1375 case 0xA:
1376 /* Architectural Performance Monitoring Leaf */
1377 *eax = 0;
1378 *ebx = 0;
1379 *ecx = 0;
1380 *edx = 0;
1381 break;
1382 case 0x80000000:
1383 *eax = env->cpuid_xlevel;
1384 *ebx = env->cpuid_vendor1;
1385 *edx = env->cpuid_vendor2;
1386 *ecx = env->cpuid_vendor3;
1387 break;
1388 case 0x80000001:
1389 *eax = env->cpuid_features;
1390 *ebx = 0;
1391 *ecx = env->cpuid_ext3_features;
1392 *edx = env->cpuid_ext2_features;
1393 break;
1394 case 0x80000002:
1395 case 0x80000003:
1396 case 0x80000004:
1397 *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0];
1398 *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1];
1399 *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2];
1400 *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3];
1401 break;
1402 case 0x80000005:
1403 /* cache info (L1 cache) */
1404 *eax = 0x01ff01ff;
1405 *ebx = 0x01ff01ff;
1406 *ecx = 0x40020140;
1407 *edx = 0x40020140;
1408 break;
1409 case 0x80000006:
1410 /* cache info (L2 cache) */
1411 *eax = 0;
1412 *ebx = 0x42004200;
1413 *ecx = 0x02008140;
1414 *edx = 0;
1415 break;
1416 case 0x80000008:
1417 /* virtual & phys address size in low 2 bytes. */
1418 /* XXX: This value must match the one used in the MMU code. */
1419 if (env->cpuid_ext2_features & CPUID_EXT2_LM) {
1420 /* 64 bit processor */
1421 #if defined(USE_KQEMU)
1422 *eax = 0x00003020; /* 48 bits virtual, 32 bits physical */
1423 #else
1424 /* XXX: The physical address space is limited to 42 bits in exec.c. */
1425 *eax = 0x00003028; /* 48 bits virtual, 40 bits physical */
1426 #endif
1427 } else {
1428 #if defined(USE_KQEMU)
1429 *eax = 0x00000020; /* 32 bits physical */
1430 #else
1431 if (env->cpuid_features & CPUID_PSE36)
1432 *eax = 0x00000024; /* 36 bits physical */
1433 else
1434 *eax = 0x00000020; /* 32 bits physical */
1435 #endif
1436 }
1437 *ebx = 0;
1438 *ecx = 0;
1439 *edx = 0;
1440 break;
1441 case 0x8000000A:
1442 *eax = 0x00000001; /* SVM Revision */
1443 *ebx = 0x00000010; /* nr of ASIDs */
1444 *ecx = 0;
1445 *edx = 0; /* optional features */
1446 break;
1447 default:
1448 /* reserved values: zero */
1449 *eax = 0;
1450 *ebx = 0;
1451 *ecx = 0;
1452 *edx = 0;
1453 break;
1454 }
1455 }