]>
git.proxmox.com Git - mirror_qemu.git/blob - target/i386/monitor.c
4 * Copyright (c) 2003-2004 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
25 #include "qemu/osdep.h"
27 #include "monitor/monitor.h"
28 #include "monitor/hmp-target.h"
29 #include "qapi/qmp/qdict.h"
30 #include "hw/i386/pc.h"
31 #include "sysemu/kvm.h"
35 static void print_pte(Monitor
*mon
, CPUArchState
*env
, hwaddr addr
,
36 hwaddr pte
, hwaddr mask
)
39 if (env
->cr
[4] & CR4_LA57_MASK
) {
40 if (addr
& (1ULL << 56)) {
44 if (addr
& (1ULL << 47)) {
49 monitor_printf(mon
, TARGET_FMT_plx
": " TARGET_FMT_plx
50 " %c%c%c%c%c%c%c%c%c\n",
53 pte
& PG_NX_MASK
? 'X' : '-',
54 pte
& PG_GLOBAL_MASK
? 'G' : '-',
55 pte
& PG_PSE_MASK
? 'P' : '-',
56 pte
& PG_DIRTY_MASK
? 'D' : '-',
57 pte
& PG_ACCESSED_MASK
? 'A' : '-',
58 pte
& PG_PCD_MASK
? 'C' : '-',
59 pte
& PG_PWT_MASK
? 'T' : '-',
60 pte
& PG_USER_MASK
? 'U' : '-',
61 pte
& PG_RW_MASK
? 'W' : '-');
64 static void tlb_info_32(Monitor
*mon
, CPUArchState
*env
)
67 uint32_t pgd
, pde
, pte
;
69 pgd
= env
->cr
[3] & ~0xfff;
70 for(l1
= 0; l1
< 1024; l1
++) {
71 cpu_physical_memory_read(pgd
+ l1
* 4, &pde
, 4);
72 pde
= le32_to_cpu(pde
);
73 if (pde
& PG_PRESENT_MASK
) {
74 if ((pde
& PG_PSE_MASK
) && (env
->cr
[4] & CR4_PSE_MASK
)) {
76 print_pte(mon
, env
, (l1
<< 22), pde
, ~((1 << 21) - 1));
78 for(l2
= 0; l2
< 1024; l2
++) {
79 cpu_physical_memory_read((pde
& ~0xfff) + l2
* 4, &pte
, 4);
80 pte
= le32_to_cpu(pte
);
81 if (pte
& PG_PRESENT_MASK
) {
82 print_pte(mon
, env
, (l1
<< 22) + (l2
<< 12),
92 static void tlb_info_pae32(Monitor
*mon
, CPUArchState
*env
)
94 unsigned int l1
, l2
, l3
;
95 uint64_t pdpe
, pde
, pte
;
96 uint64_t pdp_addr
, pd_addr
, pt_addr
;
98 pdp_addr
= env
->cr
[3] & ~0x1f;
99 for (l1
= 0; l1
< 4; l1
++) {
100 cpu_physical_memory_read(pdp_addr
+ l1
* 8, &pdpe
, 8);
101 pdpe
= le64_to_cpu(pdpe
);
102 if (pdpe
& PG_PRESENT_MASK
) {
103 pd_addr
= pdpe
& 0x3fffffffff000ULL
;
104 for (l2
= 0; l2
< 512; l2
++) {
105 cpu_physical_memory_read(pd_addr
+ l2
* 8, &pde
, 8);
106 pde
= le64_to_cpu(pde
);
107 if (pde
& PG_PRESENT_MASK
) {
108 if (pde
& PG_PSE_MASK
) {
109 /* 2M pages with PAE, CR4.PSE is ignored */
110 print_pte(mon
, env
, (l1
<< 30) + (l2
<< 21), pde
,
111 ~((hwaddr
)(1 << 20) - 1));
113 pt_addr
= pde
& 0x3fffffffff000ULL
;
114 for (l3
= 0; l3
< 512; l3
++) {
115 cpu_physical_memory_read(pt_addr
+ l3
* 8, &pte
, 8);
116 pte
= le64_to_cpu(pte
);
117 if (pte
& PG_PRESENT_MASK
) {
118 print_pte(mon
, env
, (l1
<< 30) + (l2
<< 21)
132 static void tlb_info_la48(Monitor
*mon
, CPUArchState
*env
,
133 uint64_t l0
, uint64_t pml4_addr
)
135 uint64_t l1
, l2
, l3
, l4
;
136 uint64_t pml4e
, pdpe
, pde
, pte
;
137 uint64_t pdp_addr
, pd_addr
, pt_addr
;
139 for (l1
= 0; l1
< 512; l1
++) {
140 cpu_physical_memory_read(pml4_addr
+ l1
* 8, &pml4e
, 8);
141 pml4e
= le64_to_cpu(pml4e
);
142 if (!(pml4e
& PG_PRESENT_MASK
)) {
146 pdp_addr
= pml4e
& 0x3fffffffff000ULL
;
147 for (l2
= 0; l2
< 512; l2
++) {
148 cpu_physical_memory_read(pdp_addr
+ l2
* 8, &pdpe
, 8);
149 pdpe
= le64_to_cpu(pdpe
);
150 if (!(pdpe
& PG_PRESENT_MASK
)) {
154 if (pdpe
& PG_PSE_MASK
) {
155 /* 1G pages, CR4.PSE is ignored */
156 print_pte(mon
, env
, (l0
<< 48) + (l1
<< 39) + (l2
<< 30),
157 pdpe
, 0x3ffffc0000000ULL
);
161 pd_addr
= pdpe
& 0x3fffffffff000ULL
;
162 for (l3
= 0; l3
< 512; l3
++) {
163 cpu_physical_memory_read(pd_addr
+ l3
* 8, &pde
, 8);
164 pde
= le64_to_cpu(pde
);
165 if (!(pde
& PG_PRESENT_MASK
)) {
169 if (pde
& PG_PSE_MASK
) {
170 /* 2M pages, CR4.PSE is ignored */
171 print_pte(mon
, env
, (l0
<< 48) + (l1
<< 39) + (l2
<< 30) +
172 (l3
<< 21), pde
, 0x3ffffffe00000ULL
);
176 pt_addr
= pde
& 0x3fffffffff000ULL
;
177 for (l4
= 0; l4
< 512; l4
++) {
178 cpu_physical_memory_read(pt_addr
181 pte
= le64_to_cpu(pte
);
182 if (pte
& PG_PRESENT_MASK
) {
183 print_pte(mon
, env
, (l0
<< 48) + (l1
<< 39) +
184 (l2
<< 30) + (l3
<< 21) + (l4
<< 12),
185 pte
& ~PG_PSE_MASK
, 0x3fffffffff000ULL
);
193 static void tlb_info_la57(Monitor
*mon
, CPUArchState
*env
)
199 pml5_addr
= env
->cr
[3] & 0x3fffffffff000ULL
;
200 for (l0
= 0; l0
< 512; l0
++) {
201 cpu_physical_memory_read(pml5_addr
+ l0
* 8, &pml5e
, 8);
202 pml5e
= le64_to_cpu(pml5e
);
203 if (pml5e
& PG_PRESENT_MASK
) {
204 tlb_info_la48(mon
, env
, l0
, pml5e
& 0x3fffffffff000ULL
);
208 #endif /* TARGET_X86_64 */
210 void hmp_info_tlb(Monitor
*mon
, const QDict
*qdict
)
214 env
= mon_get_cpu_env();
216 monitor_printf(mon
, "No CPU available\n");
220 if (!(env
->cr
[0] & CR0_PG_MASK
)) {
221 monitor_printf(mon
, "PG disabled\n");
224 if (env
->cr
[4] & CR4_PAE_MASK
) {
226 if (env
->hflags
& HF_LMA_MASK
) {
227 if (env
->cr
[4] & CR4_LA57_MASK
) {
228 tlb_info_la57(mon
, env
);
230 tlb_info_la48(mon
, env
, 0, env
->cr
[3] & 0x3fffffffff000ULL
);
235 tlb_info_pae32(mon
, env
);
238 tlb_info_32(mon
, env
);
242 static void mem_print(Monitor
*mon
, hwaddr
*pstart
,
244 hwaddr end
, int prot
)
250 monitor_printf(mon
, TARGET_FMT_plx
"-" TARGET_FMT_plx
" "
251 TARGET_FMT_plx
" %c%c%c\n",
252 *pstart
, end
, end
- *pstart
,
253 prot1
& PG_USER_MASK
? 'u' : '-',
255 prot1
& PG_RW_MASK
? 'w' : '-');
265 static void mem_info_32(Monitor
*mon
, CPUArchState
*env
)
269 uint32_t pgd
, pde
, pte
;
272 pgd
= env
->cr
[3] & ~0xfff;
275 for(l1
= 0; l1
< 1024; l1
++) {
276 cpu_physical_memory_read(pgd
+ l1
* 4, &pde
, 4);
277 pde
= le32_to_cpu(pde
);
279 if (pde
& PG_PRESENT_MASK
) {
280 if ((pde
& PG_PSE_MASK
) && (env
->cr
[4] & CR4_PSE_MASK
)) {
281 prot
= pde
& (PG_USER_MASK
| PG_RW_MASK
| PG_PRESENT_MASK
);
282 mem_print(mon
, &start
, &last_prot
, end
, prot
);
284 for(l2
= 0; l2
< 1024; l2
++) {
285 cpu_physical_memory_read((pde
& ~0xfff) + l2
* 4, &pte
, 4);
286 pte
= le32_to_cpu(pte
);
287 end
= (l1
<< 22) + (l2
<< 12);
288 if (pte
& PG_PRESENT_MASK
) {
290 (PG_USER_MASK
| PG_RW_MASK
| PG_PRESENT_MASK
);
294 mem_print(mon
, &start
, &last_prot
, end
, prot
);
299 mem_print(mon
, &start
, &last_prot
, end
, prot
);
302 /* Flush last range */
303 mem_print(mon
, &start
, &last_prot
, (hwaddr
)1 << 32, 0);
306 static void mem_info_pae32(Monitor
*mon
, CPUArchState
*env
)
308 unsigned int l1
, l2
, l3
;
310 uint64_t pdpe
, pde
, pte
;
311 uint64_t pdp_addr
, pd_addr
, pt_addr
;
314 pdp_addr
= env
->cr
[3] & ~0x1f;
317 for (l1
= 0; l1
< 4; l1
++) {
318 cpu_physical_memory_read(pdp_addr
+ l1
* 8, &pdpe
, 8);
319 pdpe
= le64_to_cpu(pdpe
);
321 if (pdpe
& PG_PRESENT_MASK
) {
322 pd_addr
= pdpe
& 0x3fffffffff000ULL
;
323 for (l2
= 0; l2
< 512; l2
++) {
324 cpu_physical_memory_read(pd_addr
+ l2
* 8, &pde
, 8);
325 pde
= le64_to_cpu(pde
);
326 end
= (l1
<< 30) + (l2
<< 21);
327 if (pde
& PG_PRESENT_MASK
) {
328 if (pde
& PG_PSE_MASK
) {
329 prot
= pde
& (PG_USER_MASK
| PG_RW_MASK
|
331 mem_print(mon
, &start
, &last_prot
, end
, prot
);
333 pt_addr
= pde
& 0x3fffffffff000ULL
;
334 for (l3
= 0; l3
< 512; l3
++) {
335 cpu_physical_memory_read(pt_addr
+ l3
* 8, &pte
, 8);
336 pte
= le64_to_cpu(pte
);
337 end
= (l1
<< 30) + (l2
<< 21) + (l3
<< 12);
338 if (pte
& PG_PRESENT_MASK
) {
339 prot
= pte
& pde
& (PG_USER_MASK
| PG_RW_MASK
|
344 mem_print(mon
, &start
, &last_prot
, end
, prot
);
349 mem_print(mon
, &start
, &last_prot
, end
, prot
);
354 mem_print(mon
, &start
, &last_prot
, end
, prot
);
357 /* Flush last range */
358 mem_print(mon
, &start
, &last_prot
, (hwaddr
)1 << 32, 0);
363 static void mem_info_la48(Monitor
*mon
, CPUArchState
*env
)
366 uint64_t l1
, l2
, l3
, l4
;
367 uint64_t pml4e
, pdpe
, pde
, pte
;
368 uint64_t pml4_addr
, pdp_addr
, pd_addr
, pt_addr
, start
, end
;
370 pml4_addr
= env
->cr
[3] & 0x3fffffffff000ULL
;
373 for (l1
= 0; l1
< 512; l1
++) {
374 cpu_physical_memory_read(pml4_addr
+ l1
* 8, &pml4e
, 8);
375 pml4e
= le64_to_cpu(pml4e
);
377 if (pml4e
& PG_PRESENT_MASK
) {
378 pdp_addr
= pml4e
& 0x3fffffffff000ULL
;
379 for (l2
= 0; l2
< 512; l2
++) {
380 cpu_physical_memory_read(pdp_addr
+ l2
* 8, &pdpe
, 8);
381 pdpe
= le64_to_cpu(pdpe
);
382 end
= (l1
<< 39) + (l2
<< 30);
383 if (pdpe
& PG_PRESENT_MASK
) {
384 if (pdpe
& PG_PSE_MASK
) {
385 prot
= pdpe
& (PG_USER_MASK
| PG_RW_MASK
|
388 mem_print(mon
, &start
, &last_prot
, end
, prot
);
390 pd_addr
= pdpe
& 0x3fffffffff000ULL
;
391 for (l3
= 0; l3
< 512; l3
++) {
392 cpu_physical_memory_read(pd_addr
+ l3
* 8, &pde
, 8);
393 pde
= le64_to_cpu(pde
);
394 end
= (l1
<< 39) + (l2
<< 30) + (l3
<< 21);
395 if (pde
& PG_PRESENT_MASK
) {
396 if (pde
& PG_PSE_MASK
) {
397 prot
= pde
& (PG_USER_MASK
| PG_RW_MASK
|
399 prot
&= pml4e
& pdpe
;
400 mem_print(mon
, &start
, &last_prot
, end
, prot
);
402 pt_addr
= pde
& 0x3fffffffff000ULL
;
403 for (l4
= 0; l4
< 512; l4
++) {
404 cpu_physical_memory_read(pt_addr
407 pte
= le64_to_cpu(pte
);
408 end
= (l1
<< 39) + (l2
<< 30) +
409 (l3
<< 21) + (l4
<< 12);
410 if (pte
& PG_PRESENT_MASK
) {
411 prot
= pte
& (PG_USER_MASK
| PG_RW_MASK
|
413 prot
&= pml4e
& pdpe
& pde
;
417 mem_print(mon
, &start
, &last_prot
, end
, prot
);
422 mem_print(mon
, &start
, &last_prot
, end
, prot
);
428 mem_print(mon
, &start
, &last_prot
, end
, prot
);
433 mem_print(mon
, &start
, &last_prot
, end
, prot
);
436 /* Flush last range */
437 mem_print(mon
, &start
, &last_prot
, (hwaddr
)1 << 48, 0);
440 static void mem_info_la57(Monitor
*mon
, CPUArchState
*env
)
443 uint64_t l0
, l1
, l2
, l3
, l4
;
444 uint64_t pml5e
, pml4e
, pdpe
, pde
, pte
;
445 uint64_t pml5_addr
, pml4_addr
, pdp_addr
, pd_addr
, pt_addr
, start
, end
;
447 pml5_addr
= env
->cr
[3] & 0x3fffffffff000ULL
;
450 for (l0
= 0; l0
< 512; l0
++) {
451 cpu_physical_memory_read(pml5_addr
+ l0
* 8, &pml5e
, 8);
452 pml5e
= le64_to_cpu(pml5e
);
454 if (!(pml5e
& PG_PRESENT_MASK
)) {
456 mem_print(mon
, &start
, &last_prot
, end
, prot
);
460 pml4_addr
= pml5e
& 0x3fffffffff000ULL
;
461 for (l1
= 0; l1
< 512; l1
++) {
462 cpu_physical_memory_read(pml4_addr
+ l1
* 8, &pml4e
, 8);
463 pml4e
= le64_to_cpu(pml4e
);
464 end
= (l0
<< 48) + (l1
<< 39);
465 if (!(pml4e
& PG_PRESENT_MASK
)) {
467 mem_print(mon
, &start
, &last_prot
, end
, prot
);
471 pdp_addr
= pml4e
& 0x3fffffffff000ULL
;
472 for (l2
= 0; l2
< 512; l2
++) {
473 cpu_physical_memory_read(pdp_addr
+ l2
* 8, &pdpe
, 8);
474 pdpe
= le64_to_cpu(pdpe
);
475 end
= (l0
<< 48) + (l1
<< 39) + (l2
<< 30);
476 if (pdpe
& PG_PRESENT_MASK
) {
478 mem_print(mon
, &start
, &last_prot
, end
, prot
);
482 if (pdpe
& PG_PSE_MASK
) {
483 prot
= pdpe
& (PG_USER_MASK
| PG_RW_MASK
|
485 prot
&= pml5e
& pml4e
;
486 mem_print(mon
, &start
, &last_prot
, end
, prot
);
490 pd_addr
= pdpe
& 0x3fffffffff000ULL
;
491 for (l3
= 0; l3
< 512; l3
++) {
492 cpu_physical_memory_read(pd_addr
+ l3
* 8, &pde
, 8);
493 pde
= le64_to_cpu(pde
);
494 end
= (l0
<< 48) + (l1
<< 39) + (l2
<< 30) + (l3
<< 21);
495 if (pde
& PG_PRESENT_MASK
) {
497 mem_print(mon
, &start
, &last_prot
, end
, prot
);
501 if (pde
& PG_PSE_MASK
) {
502 prot
= pde
& (PG_USER_MASK
| PG_RW_MASK
|
504 prot
&= pml5e
& pml4e
& pdpe
;
505 mem_print(mon
, &start
, &last_prot
, end
, prot
);
509 pt_addr
= pde
& 0x3fffffffff000ULL
;
510 for (l4
= 0; l4
< 512; l4
++) {
511 cpu_physical_memory_read(pt_addr
+ l4
* 8, &pte
, 8);
512 pte
= le64_to_cpu(pte
);
513 end
= (l0
<< 48) + (l1
<< 39) + (l2
<< 30) +
514 (l3
<< 21) + (l4
<< 12);
515 if (pte
& PG_PRESENT_MASK
) {
516 prot
= pte
& (PG_USER_MASK
| PG_RW_MASK
|
518 prot
&= pml5e
& pml4e
& pdpe
& pde
;
522 mem_print(mon
, &start
, &last_prot
, end
, prot
);
528 /* Flush last range */
529 mem_print(mon
, &start
, &last_prot
, (hwaddr
)1 << 57, 0);
531 #endif /* TARGET_X86_64 */
533 void hmp_info_mem(Monitor
*mon
, const QDict
*qdict
)
537 env
= mon_get_cpu_env();
539 monitor_printf(mon
, "No CPU available\n");
543 if (!(env
->cr
[0] & CR0_PG_MASK
)) {
544 monitor_printf(mon
, "PG disabled\n");
547 if (env
->cr
[4] & CR4_PAE_MASK
) {
549 if (env
->hflags
& HF_LMA_MASK
) {
550 if (env
->cr
[4] & CR4_LA57_MASK
) {
551 mem_info_la57(mon
, env
);
553 mem_info_la48(mon
, env
);
558 mem_info_pae32(mon
, env
);
561 mem_info_32(mon
, env
);
565 void hmp_mce(Monitor
*mon
, const QDict
*qdict
)
569 int cpu_index
= qdict_get_int(qdict
, "cpu_index");
570 int bank
= qdict_get_int(qdict
, "bank");
571 uint64_t status
= qdict_get_int(qdict
, "status");
572 uint64_t mcg_status
= qdict_get_int(qdict
, "mcg_status");
573 uint64_t addr
= qdict_get_int(qdict
, "addr");
574 uint64_t misc
= qdict_get_int(qdict
, "misc");
575 int flags
= MCE_INJECT_UNCOND_AO
;
577 if (qdict_get_try_bool(qdict
, "broadcast", false)) {
578 flags
|= MCE_INJECT_BROADCAST
;
580 cs
= qemu_get_cpu(cpu_index
);
583 cpu_x86_inject_mce(mon
, cpu
, bank
, status
, mcg_status
, addr
, misc
,
588 static target_long
monitor_get_pc(const struct MonitorDef
*md
, int val
)
590 CPUArchState
*env
= mon_get_cpu_env();
591 return env
->eip
+ env
->segs
[R_CS
].base
;
594 const MonitorDef monitor_defs
[] = {
595 #define SEG(name, seg) \
596 { name, offsetof(CPUX86State, segs[seg].selector), NULL, MD_I32 },\
597 { name ".base", offsetof(CPUX86State, segs[seg].base) },\
598 { name ".limit", offsetof(CPUX86State, segs[seg].limit), NULL, MD_I32 },
600 { "eax", offsetof(CPUX86State
, regs
[0]) },
601 { "ecx", offsetof(CPUX86State
, regs
[1]) },
602 { "edx", offsetof(CPUX86State
, regs
[2]) },
603 { "ebx", offsetof(CPUX86State
, regs
[3]) },
604 { "esp|sp", offsetof(CPUX86State
, regs
[4]) },
605 { "ebp|fp", offsetof(CPUX86State
, regs
[5]) },
606 { "esi", offsetof(CPUX86State
, regs
[6]) },
607 { "edi", offsetof(CPUX86State
, regs
[7]) },
609 { "r8", offsetof(CPUX86State
, regs
[8]) },
610 { "r9", offsetof(CPUX86State
, regs
[9]) },
611 { "r10", offsetof(CPUX86State
, regs
[10]) },
612 { "r11", offsetof(CPUX86State
, regs
[11]) },
613 { "r12", offsetof(CPUX86State
, regs
[12]) },
614 { "r13", offsetof(CPUX86State
, regs
[13]) },
615 { "r14", offsetof(CPUX86State
, regs
[14]) },
616 { "r15", offsetof(CPUX86State
, regs
[15]) },
618 { "eflags", offsetof(CPUX86State
, eflags
) },
619 { "eip", offsetof(CPUX86State
, eip
) },
626 { "pc", 0, monitor_get_pc
, },
630 const MonitorDef
*target_monitor_defs(void)
635 void hmp_info_local_apic(Monitor
*mon
, const QDict
*qdict
)
639 if (qdict_haskey(qdict
, "apic-id")) {
640 int id
= qdict_get_try_int(qdict
, "apic-id", 0);
641 cs
= cpu_by_arch_id(id
);
648 monitor_printf(mon
, "No CPU available\n");
651 x86_cpu_dump_local_apic_state(cs
, (FILE *)mon
, monitor_fprintf
,
655 void hmp_info_io_apic(Monitor
*mon
, const QDict
*qdict
)
657 if (kvm_irqchip_in_kernel() &&
658 !kvm_irqchip_is_split()) {
659 kvm_ioapic_dump_state(mon
, qdict
);
661 ioapic_dump_state(mon
, qdict
);