]> git.proxmox.com Git - mirror_qemu.git/blob - target/i386/monitor.c
Merge remote-tracking branch 'remotes/kraxel/tags/pull-vga-20170103-1' into staging
[mirror_qemu.git] / target / i386 / monitor.c
1 /*
2 * QEMU monitor
3 *
4 * Copyright (c) 2003-2004 Fabrice Bellard
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24 #include "qemu/osdep.h"
25 #include "cpu.h"
26 #include "monitor/monitor.h"
27 #include "monitor/hmp-target.h"
28 #include "hw/i386/pc.h"
29 #include "sysemu/kvm.h"
30 #include "hmp.h"
31
32
33 static void print_pte(Monitor *mon, CPUArchState *env, hwaddr addr,
34 hwaddr pte, hwaddr mask)
35 {
36 #ifdef TARGET_X86_64
37 if (env->cr[4] & CR4_LA57_MASK) {
38 if (addr & (1ULL << 56)) {
39 addr |= -1LL << 57;
40 }
41 } else {
42 if (addr & (1ULL << 47)) {
43 addr |= -1LL << 48;
44 }
45 }
46 #endif
47 monitor_printf(mon, TARGET_FMT_plx ": " TARGET_FMT_plx
48 " %c%c%c%c%c%c%c%c%c\n",
49 addr,
50 pte & mask,
51 pte & PG_NX_MASK ? 'X' : '-',
52 pte & PG_GLOBAL_MASK ? 'G' : '-',
53 pte & PG_PSE_MASK ? 'P' : '-',
54 pte & PG_DIRTY_MASK ? 'D' : '-',
55 pte & PG_ACCESSED_MASK ? 'A' : '-',
56 pte & PG_PCD_MASK ? 'C' : '-',
57 pte & PG_PWT_MASK ? 'T' : '-',
58 pte & PG_USER_MASK ? 'U' : '-',
59 pte & PG_RW_MASK ? 'W' : '-');
60 }
61
62 static void tlb_info_32(Monitor *mon, CPUArchState *env)
63 {
64 unsigned int l1, l2;
65 uint32_t pgd, pde, pte;
66
67 pgd = env->cr[3] & ~0xfff;
68 for(l1 = 0; l1 < 1024; l1++) {
69 cpu_physical_memory_read(pgd + l1 * 4, &pde, 4);
70 pde = le32_to_cpu(pde);
71 if (pde & PG_PRESENT_MASK) {
72 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
73 /* 4M pages */
74 print_pte(mon, env, (l1 << 22), pde, ~((1 << 21) - 1));
75 } else {
76 for(l2 = 0; l2 < 1024; l2++) {
77 cpu_physical_memory_read((pde & ~0xfff) + l2 * 4, &pte, 4);
78 pte = le32_to_cpu(pte);
79 if (pte & PG_PRESENT_MASK) {
80 print_pte(mon, env, (l1 << 22) + (l2 << 12),
81 pte & ~PG_PSE_MASK,
82 ~0xfff);
83 }
84 }
85 }
86 }
87 }
88 }
89
90 static void tlb_info_pae32(Monitor *mon, CPUArchState *env)
91 {
92 unsigned int l1, l2, l3;
93 uint64_t pdpe, pde, pte;
94 uint64_t pdp_addr, pd_addr, pt_addr;
95
96 pdp_addr = env->cr[3] & ~0x1f;
97 for (l1 = 0; l1 < 4; l1++) {
98 cpu_physical_memory_read(pdp_addr + l1 * 8, &pdpe, 8);
99 pdpe = le64_to_cpu(pdpe);
100 if (pdpe & PG_PRESENT_MASK) {
101 pd_addr = pdpe & 0x3fffffffff000ULL;
102 for (l2 = 0; l2 < 512; l2++) {
103 cpu_physical_memory_read(pd_addr + l2 * 8, &pde, 8);
104 pde = le64_to_cpu(pde);
105 if (pde & PG_PRESENT_MASK) {
106 if (pde & PG_PSE_MASK) {
107 /* 2M pages with PAE, CR4.PSE is ignored */
108 print_pte(mon, env, (l1 << 30) + (l2 << 21), pde,
109 ~((hwaddr)(1 << 20) - 1));
110 } else {
111 pt_addr = pde & 0x3fffffffff000ULL;
112 for (l3 = 0; l3 < 512; l3++) {
113 cpu_physical_memory_read(pt_addr + l3 * 8, &pte, 8);
114 pte = le64_to_cpu(pte);
115 if (pte & PG_PRESENT_MASK) {
116 print_pte(mon, env, (l1 << 30) + (l2 << 21)
117 + (l3 << 12),
118 pte & ~PG_PSE_MASK,
119 ~(hwaddr)0xfff);
120 }
121 }
122 }
123 }
124 }
125 }
126 }
127 }
128
129 #ifdef TARGET_X86_64
130 static void tlb_info_la48(Monitor *mon, CPUArchState *env,
131 uint64_t l0, uint64_t pml4_addr)
132 {
133 uint64_t l1, l2, l3, l4;
134 uint64_t pml4e, pdpe, pde, pte;
135 uint64_t pdp_addr, pd_addr, pt_addr;
136
137 for (l1 = 0; l1 < 512; l1++) {
138 cpu_physical_memory_read(pml4_addr + l1 * 8, &pml4e, 8);
139 pml4e = le64_to_cpu(pml4e);
140 if (!(pml4e & PG_PRESENT_MASK)) {
141 continue;
142 }
143
144 pdp_addr = pml4e & 0x3fffffffff000ULL;
145 for (l2 = 0; l2 < 512; l2++) {
146 cpu_physical_memory_read(pdp_addr + l2 * 8, &pdpe, 8);
147 pdpe = le64_to_cpu(pdpe);
148 if (!(pdpe & PG_PRESENT_MASK)) {
149 continue;
150 }
151
152 if (pdpe & PG_PSE_MASK) {
153 /* 1G pages, CR4.PSE is ignored */
154 print_pte(mon, env, (l0 << 48) + (l1 << 39) + (l2 << 30),
155 pdpe, 0x3ffffc0000000ULL);
156 continue;
157 }
158
159 pd_addr = pdpe & 0x3fffffffff000ULL;
160 for (l3 = 0; l3 < 512; l3++) {
161 cpu_physical_memory_read(pd_addr + l3 * 8, &pde, 8);
162 pde = le64_to_cpu(pde);
163 if (!(pde & PG_PRESENT_MASK)) {
164 continue;
165 }
166
167 if (pde & PG_PSE_MASK) {
168 /* 2M pages, CR4.PSE is ignored */
169 print_pte(mon, env, (l0 << 48) + (l1 << 39) + (l2 << 30) +
170 (l3 << 21), pde, 0x3ffffffe00000ULL);
171 continue;
172 }
173
174 pt_addr = pde & 0x3fffffffff000ULL;
175 for (l4 = 0; l4 < 512; l4++) {
176 cpu_physical_memory_read(pt_addr
177 + l4 * 8,
178 &pte, 8);
179 pte = le64_to_cpu(pte);
180 if (pte & PG_PRESENT_MASK) {
181 print_pte(mon, env, (l0 << 48) + (l1 << 39) +
182 (l2 << 30) + (l3 << 21) + (l4 << 12),
183 pte & ~PG_PSE_MASK, 0x3fffffffff000ULL);
184 }
185 }
186 }
187 }
188 }
189 }
190
191 static void tlb_info_la57(Monitor *mon, CPUArchState *env)
192 {
193 uint64_t l0;
194 uint64_t pml5e;
195 uint64_t pml5_addr;
196
197 pml5_addr = env->cr[3] & 0x3fffffffff000ULL;
198 for (l0 = 0; l0 < 512; l0++) {
199 cpu_physical_memory_read(pml5_addr + l0 * 8, &pml5e, 8);
200 pml5e = le64_to_cpu(pml5e);
201 if (pml5e & PG_PRESENT_MASK) {
202 tlb_info_la48(mon, env, l0, pml5e & 0x3fffffffff000ULL);
203 }
204 }
205 }
206 #endif /* TARGET_X86_64 */
207
208 void hmp_info_tlb(Monitor *mon, const QDict *qdict)
209 {
210 CPUArchState *env;
211
212 env = mon_get_cpu_env();
213
214 if (!(env->cr[0] & CR0_PG_MASK)) {
215 monitor_printf(mon, "PG disabled\n");
216 return;
217 }
218 if (env->cr[4] & CR4_PAE_MASK) {
219 #ifdef TARGET_X86_64
220 if (env->hflags & HF_LMA_MASK) {
221 if (env->cr[4] & CR4_LA57_MASK) {
222 tlb_info_la57(mon, env);
223 } else {
224 tlb_info_la48(mon, env, 0, env->cr[3] & 0x3fffffffff000ULL);
225 }
226 } else
227 #endif
228 {
229 tlb_info_pae32(mon, env);
230 }
231 } else {
232 tlb_info_32(mon, env);
233 }
234 }
235
236 static void mem_print(Monitor *mon, hwaddr *pstart,
237 int *plast_prot,
238 hwaddr end, int prot)
239 {
240 int prot1;
241 prot1 = *plast_prot;
242 if (prot != prot1) {
243 if (*pstart != -1) {
244 monitor_printf(mon, TARGET_FMT_plx "-" TARGET_FMT_plx " "
245 TARGET_FMT_plx " %c%c%c\n",
246 *pstart, end, end - *pstart,
247 prot1 & PG_USER_MASK ? 'u' : '-',
248 'r',
249 prot1 & PG_RW_MASK ? 'w' : '-');
250 }
251 if (prot != 0)
252 *pstart = end;
253 else
254 *pstart = -1;
255 *plast_prot = prot;
256 }
257 }
258
259 static void mem_info_32(Monitor *mon, CPUArchState *env)
260 {
261 unsigned int l1, l2;
262 int prot, last_prot;
263 uint32_t pgd, pde, pte;
264 hwaddr start, end;
265
266 pgd = env->cr[3] & ~0xfff;
267 last_prot = 0;
268 start = -1;
269 for(l1 = 0; l1 < 1024; l1++) {
270 cpu_physical_memory_read(pgd + l1 * 4, &pde, 4);
271 pde = le32_to_cpu(pde);
272 end = l1 << 22;
273 if (pde & PG_PRESENT_MASK) {
274 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
275 prot = pde & (PG_USER_MASK | PG_RW_MASK | PG_PRESENT_MASK);
276 mem_print(mon, &start, &last_prot, end, prot);
277 } else {
278 for(l2 = 0; l2 < 1024; l2++) {
279 cpu_physical_memory_read((pde & ~0xfff) + l2 * 4, &pte, 4);
280 pte = le32_to_cpu(pte);
281 end = (l1 << 22) + (l2 << 12);
282 if (pte & PG_PRESENT_MASK) {
283 prot = pte & pde &
284 (PG_USER_MASK | PG_RW_MASK | PG_PRESENT_MASK);
285 } else {
286 prot = 0;
287 }
288 mem_print(mon, &start, &last_prot, end, prot);
289 }
290 }
291 } else {
292 prot = 0;
293 mem_print(mon, &start, &last_prot, end, prot);
294 }
295 }
296 /* Flush last range */
297 mem_print(mon, &start, &last_prot, (hwaddr)1 << 32, 0);
298 }
299
300 static void mem_info_pae32(Monitor *mon, CPUArchState *env)
301 {
302 unsigned int l1, l2, l3;
303 int prot, last_prot;
304 uint64_t pdpe, pde, pte;
305 uint64_t pdp_addr, pd_addr, pt_addr;
306 hwaddr start, end;
307
308 pdp_addr = env->cr[3] & ~0x1f;
309 last_prot = 0;
310 start = -1;
311 for (l1 = 0; l1 < 4; l1++) {
312 cpu_physical_memory_read(pdp_addr + l1 * 8, &pdpe, 8);
313 pdpe = le64_to_cpu(pdpe);
314 end = l1 << 30;
315 if (pdpe & PG_PRESENT_MASK) {
316 pd_addr = pdpe & 0x3fffffffff000ULL;
317 for (l2 = 0; l2 < 512; l2++) {
318 cpu_physical_memory_read(pd_addr + l2 * 8, &pde, 8);
319 pde = le64_to_cpu(pde);
320 end = (l1 << 30) + (l2 << 21);
321 if (pde & PG_PRESENT_MASK) {
322 if (pde & PG_PSE_MASK) {
323 prot = pde & (PG_USER_MASK | PG_RW_MASK |
324 PG_PRESENT_MASK);
325 mem_print(mon, &start, &last_prot, end, prot);
326 } else {
327 pt_addr = pde & 0x3fffffffff000ULL;
328 for (l3 = 0; l3 < 512; l3++) {
329 cpu_physical_memory_read(pt_addr + l3 * 8, &pte, 8);
330 pte = le64_to_cpu(pte);
331 end = (l1 << 30) + (l2 << 21) + (l3 << 12);
332 if (pte & PG_PRESENT_MASK) {
333 prot = pte & pde & (PG_USER_MASK | PG_RW_MASK |
334 PG_PRESENT_MASK);
335 } else {
336 prot = 0;
337 }
338 mem_print(mon, &start, &last_prot, end, prot);
339 }
340 }
341 } else {
342 prot = 0;
343 mem_print(mon, &start, &last_prot, end, prot);
344 }
345 }
346 } else {
347 prot = 0;
348 mem_print(mon, &start, &last_prot, end, prot);
349 }
350 }
351 /* Flush last range */
352 mem_print(mon, &start, &last_prot, (hwaddr)1 << 32, 0);
353 }
354
355
356 #ifdef TARGET_X86_64
357 static void mem_info_la48(Monitor *mon, CPUArchState *env)
358 {
359 int prot, last_prot;
360 uint64_t l1, l2, l3, l4;
361 uint64_t pml4e, pdpe, pde, pte;
362 uint64_t pml4_addr, pdp_addr, pd_addr, pt_addr, start, end;
363
364 pml4_addr = env->cr[3] & 0x3fffffffff000ULL;
365 last_prot = 0;
366 start = -1;
367 for (l1 = 0; l1 < 512; l1++) {
368 cpu_physical_memory_read(pml4_addr + l1 * 8, &pml4e, 8);
369 pml4e = le64_to_cpu(pml4e);
370 end = l1 << 39;
371 if (pml4e & PG_PRESENT_MASK) {
372 pdp_addr = pml4e & 0x3fffffffff000ULL;
373 for (l2 = 0; l2 < 512; l2++) {
374 cpu_physical_memory_read(pdp_addr + l2 * 8, &pdpe, 8);
375 pdpe = le64_to_cpu(pdpe);
376 end = (l1 << 39) + (l2 << 30);
377 if (pdpe & PG_PRESENT_MASK) {
378 if (pdpe & PG_PSE_MASK) {
379 prot = pdpe & (PG_USER_MASK | PG_RW_MASK |
380 PG_PRESENT_MASK);
381 prot &= pml4e;
382 mem_print(mon, &start, &last_prot, end, prot);
383 } else {
384 pd_addr = pdpe & 0x3fffffffff000ULL;
385 for (l3 = 0; l3 < 512; l3++) {
386 cpu_physical_memory_read(pd_addr + l3 * 8, &pde, 8);
387 pde = le64_to_cpu(pde);
388 end = (l1 << 39) + (l2 << 30) + (l3 << 21);
389 if (pde & PG_PRESENT_MASK) {
390 if (pde & PG_PSE_MASK) {
391 prot = pde & (PG_USER_MASK | PG_RW_MASK |
392 PG_PRESENT_MASK);
393 prot &= pml4e & pdpe;
394 mem_print(mon, &start, &last_prot, end, prot);
395 } else {
396 pt_addr = pde & 0x3fffffffff000ULL;
397 for (l4 = 0; l4 < 512; l4++) {
398 cpu_physical_memory_read(pt_addr
399 + l4 * 8,
400 &pte, 8);
401 pte = le64_to_cpu(pte);
402 end = (l1 << 39) + (l2 << 30) +
403 (l3 << 21) + (l4 << 12);
404 if (pte & PG_PRESENT_MASK) {
405 prot = pte & (PG_USER_MASK | PG_RW_MASK |
406 PG_PRESENT_MASK);
407 prot &= pml4e & pdpe & pde;
408 } else {
409 prot = 0;
410 }
411 mem_print(mon, &start, &last_prot, end, prot);
412 }
413 }
414 } else {
415 prot = 0;
416 mem_print(mon, &start, &last_prot, end, prot);
417 }
418 }
419 }
420 } else {
421 prot = 0;
422 mem_print(mon, &start, &last_prot, end, prot);
423 }
424 }
425 } else {
426 prot = 0;
427 mem_print(mon, &start, &last_prot, end, prot);
428 }
429 }
430 /* Flush last range */
431 mem_print(mon, &start, &last_prot, (hwaddr)1 << 48, 0);
432 }
433
434 static void mem_info_la57(Monitor *mon, CPUArchState *env)
435 {
436 int prot, last_prot;
437 uint64_t l0, l1, l2, l3, l4;
438 uint64_t pml5e, pml4e, pdpe, pde, pte;
439 uint64_t pml5_addr, pml4_addr, pdp_addr, pd_addr, pt_addr, start, end;
440
441 pml5_addr = env->cr[3] & 0x3fffffffff000ULL;
442 last_prot = 0;
443 start = -1;
444 for (l0 = 0; l0 < 512; l0++) {
445 cpu_physical_memory_read(pml5_addr + l0 * 8, &pml5e, 8);
446 pml4e = le64_to_cpu(pml5e);
447 end = l0 << 48;
448 if (!(pml5e & PG_PRESENT_MASK)) {
449 prot = 0;
450 mem_print(mon, &start, &last_prot, end, prot);
451 continue;
452 }
453
454 pml4_addr = pml5e & 0x3fffffffff000ULL;
455 for (l1 = 0; l1 < 512; l1++) {
456 cpu_physical_memory_read(pml4_addr + l1 * 8, &pml4e, 8);
457 pml4e = le64_to_cpu(pml4e);
458 end = (l0 << 48) + (l1 << 39);
459 if (!(pml4e & PG_PRESENT_MASK)) {
460 prot = 0;
461 mem_print(mon, &start, &last_prot, end, prot);
462 continue;
463 }
464
465 pdp_addr = pml4e & 0x3fffffffff000ULL;
466 for (l2 = 0; l2 < 512; l2++) {
467 cpu_physical_memory_read(pdp_addr + l2 * 8, &pdpe, 8);
468 pdpe = le64_to_cpu(pdpe);
469 end = (l0 << 48) + (l1 << 39) + (l2 << 30);
470 if (pdpe & PG_PRESENT_MASK) {
471 prot = 0;
472 mem_print(mon, &start, &last_prot, end, prot);
473 continue;
474 }
475
476 if (pdpe & PG_PSE_MASK) {
477 prot = pdpe & (PG_USER_MASK | PG_RW_MASK |
478 PG_PRESENT_MASK);
479 prot &= pml4e;
480 mem_print(mon, &start, &last_prot, end, prot);
481 continue;
482 }
483
484 pd_addr = pdpe & 0x3fffffffff000ULL;
485 for (l3 = 0; l3 < 512; l3++) {
486 cpu_physical_memory_read(pd_addr + l3 * 8, &pde, 8);
487 pde = le64_to_cpu(pde);
488 end = (l0 << 48) + (l1 << 39) + (l2 << 30) + (l3 << 21);
489 if (pde & PG_PRESENT_MASK) {
490 prot = 0;
491 mem_print(mon, &start, &last_prot, end, prot);
492 continue;
493 }
494
495 if (pde & PG_PSE_MASK) {
496 prot = pde & (PG_USER_MASK | PG_RW_MASK |
497 PG_PRESENT_MASK);
498 prot &= pml4e & pdpe;
499 mem_print(mon, &start, &last_prot, end, prot);
500 continue;
501 }
502
503 pt_addr = pde & 0x3fffffffff000ULL;
504 for (l4 = 0; l4 < 512; l4++) {
505 cpu_physical_memory_read(pt_addr + l4 * 8, &pte, 8);
506 pte = le64_to_cpu(pte);
507 end = (l0 << 48) + (l1 << 39) + (l2 << 30) +
508 (l3 << 21) + (l4 << 12);
509 if (pte & PG_PRESENT_MASK) {
510 prot = pte & (PG_USER_MASK | PG_RW_MASK |
511 PG_PRESENT_MASK);
512 prot &= pml4e & pdpe & pde;
513 } else {
514 prot = 0;
515 }
516 mem_print(mon, &start, &last_prot, end, prot);
517 }
518 }
519 }
520 }
521 }
522 /* Flush last range */
523 mem_print(mon, &start, &last_prot, (hwaddr)1 << 57, 0);
524 }
525 #endif /* TARGET_X86_64 */
526
527 void hmp_info_mem(Monitor *mon, const QDict *qdict)
528 {
529 CPUArchState *env;
530
531 env = mon_get_cpu_env();
532
533 if (!(env->cr[0] & CR0_PG_MASK)) {
534 monitor_printf(mon, "PG disabled\n");
535 return;
536 }
537 if (env->cr[4] & CR4_PAE_MASK) {
538 #ifdef TARGET_X86_64
539 if (env->hflags & HF_LMA_MASK) {
540 if (env->cr[4] & CR4_LA57_MASK) {
541 mem_info_la57(mon, env);
542 } else {
543 mem_info_la48(mon, env);
544 }
545 } else
546 #endif
547 {
548 mem_info_pae32(mon, env);
549 }
550 } else {
551 mem_info_32(mon, env);
552 }
553 }
554
555 void hmp_mce(Monitor *mon, const QDict *qdict)
556 {
557 X86CPU *cpu;
558 CPUState *cs;
559 int cpu_index = qdict_get_int(qdict, "cpu_index");
560 int bank = qdict_get_int(qdict, "bank");
561 uint64_t status = qdict_get_int(qdict, "status");
562 uint64_t mcg_status = qdict_get_int(qdict, "mcg_status");
563 uint64_t addr = qdict_get_int(qdict, "addr");
564 uint64_t misc = qdict_get_int(qdict, "misc");
565 int flags = MCE_INJECT_UNCOND_AO;
566
567 if (qdict_get_try_bool(qdict, "broadcast", false)) {
568 flags |= MCE_INJECT_BROADCAST;
569 }
570 cs = qemu_get_cpu(cpu_index);
571 if (cs != NULL) {
572 cpu = X86_CPU(cs);
573 cpu_x86_inject_mce(mon, cpu, bank, status, mcg_status, addr, misc,
574 flags);
575 }
576 }
577
578 static target_long monitor_get_pc(const struct MonitorDef *md, int val)
579 {
580 CPUArchState *env = mon_get_cpu_env();
581 return env->eip + env->segs[R_CS].base;
582 }
583
584 const MonitorDef monitor_defs[] = {
585 #define SEG(name, seg) \
586 { name, offsetof(CPUX86State, segs[seg].selector), NULL, MD_I32 },\
587 { name ".base", offsetof(CPUX86State, segs[seg].base) },\
588 { name ".limit", offsetof(CPUX86State, segs[seg].limit), NULL, MD_I32 },
589
590 { "eax", offsetof(CPUX86State, regs[0]) },
591 { "ecx", offsetof(CPUX86State, regs[1]) },
592 { "edx", offsetof(CPUX86State, regs[2]) },
593 { "ebx", offsetof(CPUX86State, regs[3]) },
594 { "esp|sp", offsetof(CPUX86State, regs[4]) },
595 { "ebp|fp", offsetof(CPUX86State, regs[5]) },
596 { "esi", offsetof(CPUX86State, regs[6]) },
597 { "edi", offsetof(CPUX86State, regs[7]) },
598 #ifdef TARGET_X86_64
599 { "r8", offsetof(CPUX86State, regs[8]) },
600 { "r9", offsetof(CPUX86State, regs[9]) },
601 { "r10", offsetof(CPUX86State, regs[10]) },
602 { "r11", offsetof(CPUX86State, regs[11]) },
603 { "r12", offsetof(CPUX86State, regs[12]) },
604 { "r13", offsetof(CPUX86State, regs[13]) },
605 { "r14", offsetof(CPUX86State, regs[14]) },
606 { "r15", offsetof(CPUX86State, regs[15]) },
607 #endif
608 { "eflags", offsetof(CPUX86State, eflags) },
609 { "eip", offsetof(CPUX86State, eip) },
610 SEG("cs", R_CS)
611 SEG("ds", R_DS)
612 SEG("es", R_ES)
613 SEG("ss", R_SS)
614 SEG("fs", R_FS)
615 SEG("gs", R_GS)
616 { "pc", 0, monitor_get_pc, },
617 { NULL },
618 };
619
620 const MonitorDef *target_monitor_defs(void)
621 {
622 return monitor_defs;
623 }
624
625 void hmp_info_local_apic(Monitor *mon, const QDict *qdict)
626 {
627 x86_cpu_dump_local_apic_state(mon_get_cpu(), (FILE *)mon, monitor_fprintf,
628 CPU_DUMP_FPU);
629 }
630
631 void hmp_info_io_apic(Monitor *mon, const QDict *qdict)
632 {
633 if (kvm_irqchip_in_kernel() &&
634 !kvm_irqchip_is_split()) {
635 kvm_ioapic_dump_state(mon, qdict);
636 } else {
637 ioapic_dump_state(mon, qdict);
638 }
639 }