]>
git.proxmox.com Git - mirror_qemu.git/blob - target/i386/monitor.c
4 * Copyright (c) 2003-2004 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
24 #include "qemu/osdep.h"
26 #include "monitor/monitor.h"
27 #include "monitor/hmp-target.h"
28 #include "hw/i386/pc.h"
29 #include "sysemu/kvm.h"
33 static void print_pte(Monitor
*mon
, CPUArchState
*env
, hwaddr addr
,
34 hwaddr pte
, hwaddr mask
)
37 if (env
->cr
[4] & CR4_LA57_MASK
) {
38 if (addr
& (1ULL << 56)) {
42 if (addr
& (1ULL << 47)) {
47 monitor_printf(mon
, TARGET_FMT_plx
": " TARGET_FMT_plx
48 " %c%c%c%c%c%c%c%c%c\n",
51 pte
& PG_NX_MASK
? 'X' : '-',
52 pte
& PG_GLOBAL_MASK
? 'G' : '-',
53 pte
& PG_PSE_MASK
? 'P' : '-',
54 pte
& PG_DIRTY_MASK
? 'D' : '-',
55 pte
& PG_ACCESSED_MASK
? 'A' : '-',
56 pte
& PG_PCD_MASK
? 'C' : '-',
57 pte
& PG_PWT_MASK
? 'T' : '-',
58 pte
& PG_USER_MASK
? 'U' : '-',
59 pte
& PG_RW_MASK
? 'W' : '-');
62 static void tlb_info_32(Monitor
*mon
, CPUArchState
*env
)
65 uint32_t pgd
, pde
, pte
;
67 pgd
= env
->cr
[3] & ~0xfff;
68 for(l1
= 0; l1
< 1024; l1
++) {
69 cpu_physical_memory_read(pgd
+ l1
* 4, &pde
, 4);
70 pde
= le32_to_cpu(pde
);
71 if (pde
& PG_PRESENT_MASK
) {
72 if ((pde
& PG_PSE_MASK
) && (env
->cr
[4] & CR4_PSE_MASK
)) {
74 print_pte(mon
, env
, (l1
<< 22), pde
, ~((1 << 21) - 1));
76 for(l2
= 0; l2
< 1024; l2
++) {
77 cpu_physical_memory_read((pde
& ~0xfff) + l2
* 4, &pte
, 4);
78 pte
= le32_to_cpu(pte
);
79 if (pte
& PG_PRESENT_MASK
) {
80 print_pte(mon
, env
, (l1
<< 22) + (l2
<< 12),
90 static void tlb_info_pae32(Monitor
*mon
, CPUArchState
*env
)
92 unsigned int l1
, l2
, l3
;
93 uint64_t pdpe
, pde
, pte
;
94 uint64_t pdp_addr
, pd_addr
, pt_addr
;
96 pdp_addr
= env
->cr
[3] & ~0x1f;
97 for (l1
= 0; l1
< 4; l1
++) {
98 cpu_physical_memory_read(pdp_addr
+ l1
* 8, &pdpe
, 8);
99 pdpe
= le64_to_cpu(pdpe
);
100 if (pdpe
& PG_PRESENT_MASK
) {
101 pd_addr
= pdpe
& 0x3fffffffff000ULL
;
102 for (l2
= 0; l2
< 512; l2
++) {
103 cpu_physical_memory_read(pd_addr
+ l2
* 8, &pde
, 8);
104 pde
= le64_to_cpu(pde
);
105 if (pde
& PG_PRESENT_MASK
) {
106 if (pde
& PG_PSE_MASK
) {
107 /* 2M pages with PAE, CR4.PSE is ignored */
108 print_pte(mon
, env
, (l1
<< 30) + (l2
<< 21), pde
,
109 ~((hwaddr
)(1 << 20) - 1));
111 pt_addr
= pde
& 0x3fffffffff000ULL
;
112 for (l3
= 0; l3
< 512; l3
++) {
113 cpu_physical_memory_read(pt_addr
+ l3
* 8, &pte
, 8);
114 pte
= le64_to_cpu(pte
);
115 if (pte
& PG_PRESENT_MASK
) {
116 print_pte(mon
, env
, (l1
<< 30) + (l2
<< 21)
130 static void tlb_info_la48(Monitor
*mon
, CPUArchState
*env
,
131 uint64_t l0
, uint64_t pml4_addr
)
133 uint64_t l1
, l2
, l3
, l4
;
134 uint64_t pml4e
, pdpe
, pde
, pte
;
135 uint64_t pdp_addr
, pd_addr
, pt_addr
;
137 for (l1
= 0; l1
< 512; l1
++) {
138 cpu_physical_memory_read(pml4_addr
+ l1
* 8, &pml4e
, 8);
139 pml4e
= le64_to_cpu(pml4e
);
140 if (!(pml4e
& PG_PRESENT_MASK
)) {
144 pdp_addr
= pml4e
& 0x3fffffffff000ULL
;
145 for (l2
= 0; l2
< 512; l2
++) {
146 cpu_physical_memory_read(pdp_addr
+ l2
* 8, &pdpe
, 8);
147 pdpe
= le64_to_cpu(pdpe
);
148 if (!(pdpe
& PG_PRESENT_MASK
)) {
152 if (pdpe
& PG_PSE_MASK
) {
153 /* 1G pages, CR4.PSE is ignored */
154 print_pte(mon
, env
, (l0
<< 48) + (l1
<< 39) + (l2
<< 30),
155 pdpe
, 0x3ffffc0000000ULL
);
159 pd_addr
= pdpe
& 0x3fffffffff000ULL
;
160 for (l3
= 0; l3
< 512; l3
++) {
161 cpu_physical_memory_read(pd_addr
+ l3
* 8, &pde
, 8);
162 pde
= le64_to_cpu(pde
);
163 if (!(pde
& PG_PRESENT_MASK
)) {
167 if (pde
& PG_PSE_MASK
) {
168 /* 2M pages, CR4.PSE is ignored */
169 print_pte(mon
, env
, (l0
<< 48) + (l1
<< 39) + (l2
<< 30) +
170 (l3
<< 21), pde
, 0x3ffffffe00000ULL
);
174 pt_addr
= pde
& 0x3fffffffff000ULL
;
175 for (l4
= 0; l4
< 512; l4
++) {
176 cpu_physical_memory_read(pt_addr
179 pte
= le64_to_cpu(pte
);
180 if (pte
& PG_PRESENT_MASK
) {
181 print_pte(mon
, env
, (l0
<< 48) + (l1
<< 39) +
182 (l2
<< 30) + (l3
<< 21) + (l4
<< 12),
183 pte
& ~PG_PSE_MASK
, 0x3fffffffff000ULL
);
191 static void tlb_info_la57(Monitor
*mon
, CPUArchState
*env
)
197 pml5_addr
= env
->cr
[3] & 0x3fffffffff000ULL
;
198 for (l0
= 0; l0
< 512; l0
++) {
199 cpu_physical_memory_read(pml5_addr
+ l0
* 8, &pml5e
, 8);
200 pml5e
= le64_to_cpu(pml5e
);
201 if (pml5e
& PG_PRESENT_MASK
) {
202 tlb_info_la48(mon
, env
, l0
, pml5e
& 0x3fffffffff000ULL
);
206 #endif /* TARGET_X86_64 */
208 void hmp_info_tlb(Monitor
*mon
, const QDict
*qdict
)
212 env
= mon_get_cpu_env();
214 if (!(env
->cr
[0] & CR0_PG_MASK
)) {
215 monitor_printf(mon
, "PG disabled\n");
218 if (env
->cr
[4] & CR4_PAE_MASK
) {
220 if (env
->hflags
& HF_LMA_MASK
) {
221 if (env
->cr
[4] & CR4_LA57_MASK
) {
222 tlb_info_la57(mon
, env
);
224 tlb_info_la48(mon
, env
, 0, env
->cr
[3] & 0x3fffffffff000ULL
);
229 tlb_info_pae32(mon
, env
);
232 tlb_info_32(mon
, env
);
236 static void mem_print(Monitor
*mon
, hwaddr
*pstart
,
238 hwaddr end
, int prot
)
244 monitor_printf(mon
, TARGET_FMT_plx
"-" TARGET_FMT_plx
" "
245 TARGET_FMT_plx
" %c%c%c\n",
246 *pstart
, end
, end
- *pstart
,
247 prot1
& PG_USER_MASK
? 'u' : '-',
249 prot1
& PG_RW_MASK
? 'w' : '-');
259 static void mem_info_32(Monitor
*mon
, CPUArchState
*env
)
263 uint32_t pgd
, pde
, pte
;
266 pgd
= env
->cr
[3] & ~0xfff;
269 for(l1
= 0; l1
< 1024; l1
++) {
270 cpu_physical_memory_read(pgd
+ l1
* 4, &pde
, 4);
271 pde
= le32_to_cpu(pde
);
273 if (pde
& PG_PRESENT_MASK
) {
274 if ((pde
& PG_PSE_MASK
) && (env
->cr
[4] & CR4_PSE_MASK
)) {
275 prot
= pde
& (PG_USER_MASK
| PG_RW_MASK
| PG_PRESENT_MASK
);
276 mem_print(mon
, &start
, &last_prot
, end
, prot
);
278 for(l2
= 0; l2
< 1024; l2
++) {
279 cpu_physical_memory_read((pde
& ~0xfff) + l2
* 4, &pte
, 4);
280 pte
= le32_to_cpu(pte
);
281 end
= (l1
<< 22) + (l2
<< 12);
282 if (pte
& PG_PRESENT_MASK
) {
284 (PG_USER_MASK
| PG_RW_MASK
| PG_PRESENT_MASK
);
288 mem_print(mon
, &start
, &last_prot
, end
, prot
);
293 mem_print(mon
, &start
, &last_prot
, end
, prot
);
296 /* Flush last range */
297 mem_print(mon
, &start
, &last_prot
, (hwaddr
)1 << 32, 0);
300 static void mem_info_pae32(Monitor
*mon
, CPUArchState
*env
)
302 unsigned int l1
, l2
, l3
;
304 uint64_t pdpe
, pde
, pte
;
305 uint64_t pdp_addr
, pd_addr
, pt_addr
;
308 pdp_addr
= env
->cr
[3] & ~0x1f;
311 for (l1
= 0; l1
< 4; l1
++) {
312 cpu_physical_memory_read(pdp_addr
+ l1
* 8, &pdpe
, 8);
313 pdpe
= le64_to_cpu(pdpe
);
315 if (pdpe
& PG_PRESENT_MASK
) {
316 pd_addr
= pdpe
& 0x3fffffffff000ULL
;
317 for (l2
= 0; l2
< 512; l2
++) {
318 cpu_physical_memory_read(pd_addr
+ l2
* 8, &pde
, 8);
319 pde
= le64_to_cpu(pde
);
320 end
= (l1
<< 30) + (l2
<< 21);
321 if (pde
& PG_PRESENT_MASK
) {
322 if (pde
& PG_PSE_MASK
) {
323 prot
= pde
& (PG_USER_MASK
| PG_RW_MASK
|
325 mem_print(mon
, &start
, &last_prot
, end
, prot
);
327 pt_addr
= pde
& 0x3fffffffff000ULL
;
328 for (l3
= 0; l3
< 512; l3
++) {
329 cpu_physical_memory_read(pt_addr
+ l3
* 8, &pte
, 8);
330 pte
= le64_to_cpu(pte
);
331 end
= (l1
<< 30) + (l2
<< 21) + (l3
<< 12);
332 if (pte
& PG_PRESENT_MASK
) {
333 prot
= pte
& pde
& (PG_USER_MASK
| PG_RW_MASK
|
338 mem_print(mon
, &start
, &last_prot
, end
, prot
);
343 mem_print(mon
, &start
, &last_prot
, end
, prot
);
348 mem_print(mon
, &start
, &last_prot
, end
, prot
);
351 /* Flush last range */
352 mem_print(mon
, &start
, &last_prot
, (hwaddr
)1 << 32, 0);
357 static void mem_info_la48(Monitor
*mon
, CPUArchState
*env
)
360 uint64_t l1
, l2
, l3
, l4
;
361 uint64_t pml4e
, pdpe
, pde
, pte
;
362 uint64_t pml4_addr
, pdp_addr
, pd_addr
, pt_addr
, start
, end
;
364 pml4_addr
= env
->cr
[3] & 0x3fffffffff000ULL
;
367 for (l1
= 0; l1
< 512; l1
++) {
368 cpu_physical_memory_read(pml4_addr
+ l1
* 8, &pml4e
, 8);
369 pml4e
= le64_to_cpu(pml4e
);
371 if (pml4e
& PG_PRESENT_MASK
) {
372 pdp_addr
= pml4e
& 0x3fffffffff000ULL
;
373 for (l2
= 0; l2
< 512; l2
++) {
374 cpu_physical_memory_read(pdp_addr
+ l2
* 8, &pdpe
, 8);
375 pdpe
= le64_to_cpu(pdpe
);
376 end
= (l1
<< 39) + (l2
<< 30);
377 if (pdpe
& PG_PRESENT_MASK
) {
378 if (pdpe
& PG_PSE_MASK
) {
379 prot
= pdpe
& (PG_USER_MASK
| PG_RW_MASK
|
382 mem_print(mon
, &start
, &last_prot
, end
, prot
);
384 pd_addr
= pdpe
& 0x3fffffffff000ULL
;
385 for (l3
= 0; l3
< 512; l3
++) {
386 cpu_physical_memory_read(pd_addr
+ l3
* 8, &pde
, 8);
387 pde
= le64_to_cpu(pde
);
388 end
= (l1
<< 39) + (l2
<< 30) + (l3
<< 21);
389 if (pde
& PG_PRESENT_MASK
) {
390 if (pde
& PG_PSE_MASK
) {
391 prot
= pde
& (PG_USER_MASK
| PG_RW_MASK
|
393 prot
&= pml4e
& pdpe
;
394 mem_print(mon
, &start
, &last_prot
, end
, prot
);
396 pt_addr
= pde
& 0x3fffffffff000ULL
;
397 for (l4
= 0; l4
< 512; l4
++) {
398 cpu_physical_memory_read(pt_addr
401 pte
= le64_to_cpu(pte
);
402 end
= (l1
<< 39) + (l2
<< 30) +
403 (l3
<< 21) + (l4
<< 12);
404 if (pte
& PG_PRESENT_MASK
) {
405 prot
= pte
& (PG_USER_MASK
| PG_RW_MASK
|
407 prot
&= pml4e
& pdpe
& pde
;
411 mem_print(mon
, &start
, &last_prot
, end
, prot
);
416 mem_print(mon
, &start
, &last_prot
, end
, prot
);
422 mem_print(mon
, &start
, &last_prot
, end
, prot
);
427 mem_print(mon
, &start
, &last_prot
, end
, prot
);
430 /* Flush last range */
431 mem_print(mon
, &start
, &last_prot
, (hwaddr
)1 << 48, 0);
434 static void mem_info_la57(Monitor
*mon
, CPUArchState
*env
)
437 uint64_t l0
, l1
, l2
, l3
, l4
;
438 uint64_t pml5e
, pml4e
, pdpe
, pde
, pte
;
439 uint64_t pml5_addr
, pml4_addr
, pdp_addr
, pd_addr
, pt_addr
, start
, end
;
441 pml5_addr
= env
->cr
[3] & 0x3fffffffff000ULL
;
444 for (l0
= 0; l0
< 512; l0
++) {
445 cpu_physical_memory_read(pml5_addr
+ l0
* 8, &pml5e
, 8);
446 pml4e
= le64_to_cpu(pml5e
);
448 if (!(pml5e
& PG_PRESENT_MASK
)) {
450 mem_print(mon
, &start
, &last_prot
, end
, prot
);
454 pml4_addr
= pml5e
& 0x3fffffffff000ULL
;
455 for (l1
= 0; l1
< 512; l1
++) {
456 cpu_physical_memory_read(pml4_addr
+ l1
* 8, &pml4e
, 8);
457 pml4e
= le64_to_cpu(pml4e
);
458 end
= (l0
<< 48) + (l1
<< 39);
459 if (!(pml4e
& PG_PRESENT_MASK
)) {
461 mem_print(mon
, &start
, &last_prot
, end
, prot
);
465 pdp_addr
= pml4e
& 0x3fffffffff000ULL
;
466 for (l2
= 0; l2
< 512; l2
++) {
467 cpu_physical_memory_read(pdp_addr
+ l2
* 8, &pdpe
, 8);
468 pdpe
= le64_to_cpu(pdpe
);
469 end
= (l0
<< 48) + (l1
<< 39) + (l2
<< 30);
470 if (pdpe
& PG_PRESENT_MASK
) {
472 mem_print(mon
, &start
, &last_prot
, end
, prot
);
476 if (pdpe
& PG_PSE_MASK
) {
477 prot
= pdpe
& (PG_USER_MASK
| PG_RW_MASK
|
480 mem_print(mon
, &start
, &last_prot
, end
, prot
);
484 pd_addr
= pdpe
& 0x3fffffffff000ULL
;
485 for (l3
= 0; l3
< 512; l3
++) {
486 cpu_physical_memory_read(pd_addr
+ l3
* 8, &pde
, 8);
487 pde
= le64_to_cpu(pde
);
488 end
= (l0
<< 48) + (l1
<< 39) + (l2
<< 30) + (l3
<< 21);
489 if (pde
& PG_PRESENT_MASK
) {
491 mem_print(mon
, &start
, &last_prot
, end
, prot
);
495 if (pde
& PG_PSE_MASK
) {
496 prot
= pde
& (PG_USER_MASK
| PG_RW_MASK
|
498 prot
&= pml4e
& pdpe
;
499 mem_print(mon
, &start
, &last_prot
, end
, prot
);
503 pt_addr
= pde
& 0x3fffffffff000ULL
;
504 for (l4
= 0; l4
< 512; l4
++) {
505 cpu_physical_memory_read(pt_addr
+ l4
* 8, &pte
, 8);
506 pte
= le64_to_cpu(pte
);
507 end
= (l0
<< 48) + (l1
<< 39) + (l2
<< 30) +
508 (l3
<< 21) + (l4
<< 12);
509 if (pte
& PG_PRESENT_MASK
) {
510 prot
= pte
& (PG_USER_MASK
| PG_RW_MASK
|
512 prot
&= pml4e
& pdpe
& pde
;
516 mem_print(mon
, &start
, &last_prot
, end
, prot
);
522 /* Flush last range */
523 mem_print(mon
, &start
, &last_prot
, (hwaddr
)1 << 57, 0);
525 #endif /* TARGET_X86_64 */
527 void hmp_info_mem(Monitor
*mon
, const QDict
*qdict
)
531 env
= mon_get_cpu_env();
533 if (!(env
->cr
[0] & CR0_PG_MASK
)) {
534 monitor_printf(mon
, "PG disabled\n");
537 if (env
->cr
[4] & CR4_PAE_MASK
) {
539 if (env
->hflags
& HF_LMA_MASK
) {
540 if (env
->cr
[4] & CR4_LA57_MASK
) {
541 mem_info_la57(mon
, env
);
543 mem_info_la48(mon
, env
);
548 mem_info_pae32(mon
, env
);
551 mem_info_32(mon
, env
);
555 void hmp_mce(Monitor
*mon
, const QDict
*qdict
)
559 int cpu_index
= qdict_get_int(qdict
, "cpu_index");
560 int bank
= qdict_get_int(qdict
, "bank");
561 uint64_t status
= qdict_get_int(qdict
, "status");
562 uint64_t mcg_status
= qdict_get_int(qdict
, "mcg_status");
563 uint64_t addr
= qdict_get_int(qdict
, "addr");
564 uint64_t misc
= qdict_get_int(qdict
, "misc");
565 int flags
= MCE_INJECT_UNCOND_AO
;
567 if (qdict_get_try_bool(qdict
, "broadcast", false)) {
568 flags
|= MCE_INJECT_BROADCAST
;
570 cs
= qemu_get_cpu(cpu_index
);
573 cpu_x86_inject_mce(mon
, cpu
, bank
, status
, mcg_status
, addr
, misc
,
578 static target_long
monitor_get_pc(const struct MonitorDef
*md
, int val
)
580 CPUArchState
*env
= mon_get_cpu_env();
581 return env
->eip
+ env
->segs
[R_CS
].base
;
584 const MonitorDef monitor_defs
[] = {
585 #define SEG(name, seg) \
586 { name, offsetof(CPUX86State, segs[seg].selector), NULL, MD_I32 },\
587 { name ".base", offsetof(CPUX86State, segs[seg].base) },\
588 { name ".limit", offsetof(CPUX86State, segs[seg].limit), NULL, MD_I32 },
590 { "eax", offsetof(CPUX86State
, regs
[0]) },
591 { "ecx", offsetof(CPUX86State
, regs
[1]) },
592 { "edx", offsetof(CPUX86State
, regs
[2]) },
593 { "ebx", offsetof(CPUX86State
, regs
[3]) },
594 { "esp|sp", offsetof(CPUX86State
, regs
[4]) },
595 { "ebp|fp", offsetof(CPUX86State
, regs
[5]) },
596 { "esi", offsetof(CPUX86State
, regs
[6]) },
597 { "edi", offsetof(CPUX86State
, regs
[7]) },
599 { "r8", offsetof(CPUX86State
, regs
[8]) },
600 { "r9", offsetof(CPUX86State
, regs
[9]) },
601 { "r10", offsetof(CPUX86State
, regs
[10]) },
602 { "r11", offsetof(CPUX86State
, regs
[11]) },
603 { "r12", offsetof(CPUX86State
, regs
[12]) },
604 { "r13", offsetof(CPUX86State
, regs
[13]) },
605 { "r14", offsetof(CPUX86State
, regs
[14]) },
606 { "r15", offsetof(CPUX86State
, regs
[15]) },
608 { "eflags", offsetof(CPUX86State
, eflags
) },
609 { "eip", offsetof(CPUX86State
, eip
) },
616 { "pc", 0, monitor_get_pc
, },
620 const MonitorDef
*target_monitor_defs(void)
625 void hmp_info_local_apic(Monitor
*mon
, const QDict
*qdict
)
627 x86_cpu_dump_local_apic_state(mon_get_cpu(), (FILE *)mon
, monitor_fprintf
,
631 void hmp_info_io_apic(Monitor
*mon
, const QDict
*qdict
)
633 if (kvm_irqchip_in_kernel() &&
634 !kvm_irqchip_is_split()) {
635 kvm_ioapic_dump_state(mon
, qdict
);
637 ioapic_dump_state(mon
, qdict
);