]> git.proxmox.com Git - mirror_qemu.git/blob - target/i386/monitor.c
Merge remote-tracking branch 'remotes/mst/tags/for_upstream' into staging
[mirror_qemu.git] / target / i386 / monitor.c
1 /*
2 * QEMU monitor
3 *
4 * Copyright (c) 2003-2004 Fabrice Bellard
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24
25 #include "qemu/osdep.h"
26 #include "cpu.h"
27 #include "monitor/monitor.h"
28 #include "monitor/hmp-target.h"
29 #include "qapi/qmp/qdict.h"
30 #include "hw/i386/pc.h"
31 #include "sysemu/kvm.h"
32 #include "hmp.h"
33
34
35 static void print_pte(Monitor *mon, CPUArchState *env, hwaddr addr,
36 hwaddr pte, hwaddr mask)
37 {
38 #ifdef TARGET_X86_64
39 if (env->cr[4] & CR4_LA57_MASK) {
40 if (addr & (1ULL << 56)) {
41 addr |= -1LL << 57;
42 }
43 } else {
44 if (addr & (1ULL << 47)) {
45 addr |= -1LL << 48;
46 }
47 }
48 #endif
49 monitor_printf(mon, TARGET_FMT_plx ": " TARGET_FMT_plx
50 " %c%c%c%c%c%c%c%c%c\n",
51 addr,
52 pte & mask,
53 pte & PG_NX_MASK ? 'X' : '-',
54 pte & PG_GLOBAL_MASK ? 'G' : '-',
55 pte & PG_PSE_MASK ? 'P' : '-',
56 pte & PG_DIRTY_MASK ? 'D' : '-',
57 pte & PG_ACCESSED_MASK ? 'A' : '-',
58 pte & PG_PCD_MASK ? 'C' : '-',
59 pte & PG_PWT_MASK ? 'T' : '-',
60 pte & PG_USER_MASK ? 'U' : '-',
61 pte & PG_RW_MASK ? 'W' : '-');
62 }
63
64 static void tlb_info_32(Monitor *mon, CPUArchState *env)
65 {
66 unsigned int l1, l2;
67 uint32_t pgd, pde, pte;
68
69 pgd = env->cr[3] & ~0xfff;
70 for(l1 = 0; l1 < 1024; l1++) {
71 cpu_physical_memory_read(pgd + l1 * 4, &pde, 4);
72 pde = le32_to_cpu(pde);
73 if (pde & PG_PRESENT_MASK) {
74 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
75 /* 4M pages */
76 print_pte(mon, env, (l1 << 22), pde, ~((1 << 21) - 1));
77 } else {
78 for(l2 = 0; l2 < 1024; l2++) {
79 cpu_physical_memory_read((pde & ~0xfff) + l2 * 4, &pte, 4);
80 pte = le32_to_cpu(pte);
81 if (pte & PG_PRESENT_MASK) {
82 print_pte(mon, env, (l1 << 22) + (l2 << 12),
83 pte & ~PG_PSE_MASK,
84 ~0xfff);
85 }
86 }
87 }
88 }
89 }
90 }
91
92 static void tlb_info_pae32(Monitor *mon, CPUArchState *env)
93 {
94 unsigned int l1, l2, l3;
95 uint64_t pdpe, pde, pte;
96 uint64_t pdp_addr, pd_addr, pt_addr;
97
98 pdp_addr = env->cr[3] & ~0x1f;
99 for (l1 = 0; l1 < 4; l1++) {
100 cpu_physical_memory_read(pdp_addr + l1 * 8, &pdpe, 8);
101 pdpe = le64_to_cpu(pdpe);
102 if (pdpe & PG_PRESENT_MASK) {
103 pd_addr = pdpe & 0x3fffffffff000ULL;
104 for (l2 = 0; l2 < 512; l2++) {
105 cpu_physical_memory_read(pd_addr + l2 * 8, &pde, 8);
106 pde = le64_to_cpu(pde);
107 if (pde & PG_PRESENT_MASK) {
108 if (pde & PG_PSE_MASK) {
109 /* 2M pages with PAE, CR4.PSE is ignored */
110 print_pte(mon, env, (l1 << 30) + (l2 << 21), pde,
111 ~((hwaddr)(1 << 20) - 1));
112 } else {
113 pt_addr = pde & 0x3fffffffff000ULL;
114 for (l3 = 0; l3 < 512; l3++) {
115 cpu_physical_memory_read(pt_addr + l3 * 8, &pte, 8);
116 pte = le64_to_cpu(pte);
117 if (pte & PG_PRESENT_MASK) {
118 print_pte(mon, env, (l1 << 30) + (l2 << 21)
119 + (l3 << 12),
120 pte & ~PG_PSE_MASK,
121 ~(hwaddr)0xfff);
122 }
123 }
124 }
125 }
126 }
127 }
128 }
129 }
130
131 #ifdef TARGET_X86_64
132 static void tlb_info_la48(Monitor *mon, CPUArchState *env,
133 uint64_t l0, uint64_t pml4_addr)
134 {
135 uint64_t l1, l2, l3, l4;
136 uint64_t pml4e, pdpe, pde, pte;
137 uint64_t pdp_addr, pd_addr, pt_addr;
138
139 for (l1 = 0; l1 < 512; l1++) {
140 cpu_physical_memory_read(pml4_addr + l1 * 8, &pml4e, 8);
141 pml4e = le64_to_cpu(pml4e);
142 if (!(pml4e & PG_PRESENT_MASK)) {
143 continue;
144 }
145
146 pdp_addr = pml4e & 0x3fffffffff000ULL;
147 for (l2 = 0; l2 < 512; l2++) {
148 cpu_physical_memory_read(pdp_addr + l2 * 8, &pdpe, 8);
149 pdpe = le64_to_cpu(pdpe);
150 if (!(pdpe & PG_PRESENT_MASK)) {
151 continue;
152 }
153
154 if (pdpe & PG_PSE_MASK) {
155 /* 1G pages, CR4.PSE is ignored */
156 print_pte(mon, env, (l0 << 48) + (l1 << 39) + (l2 << 30),
157 pdpe, 0x3ffffc0000000ULL);
158 continue;
159 }
160
161 pd_addr = pdpe & 0x3fffffffff000ULL;
162 for (l3 = 0; l3 < 512; l3++) {
163 cpu_physical_memory_read(pd_addr + l3 * 8, &pde, 8);
164 pde = le64_to_cpu(pde);
165 if (!(pde & PG_PRESENT_MASK)) {
166 continue;
167 }
168
169 if (pde & PG_PSE_MASK) {
170 /* 2M pages, CR4.PSE is ignored */
171 print_pte(mon, env, (l0 << 48) + (l1 << 39) + (l2 << 30) +
172 (l3 << 21), pde, 0x3ffffffe00000ULL);
173 continue;
174 }
175
176 pt_addr = pde & 0x3fffffffff000ULL;
177 for (l4 = 0; l4 < 512; l4++) {
178 cpu_physical_memory_read(pt_addr
179 + l4 * 8,
180 &pte, 8);
181 pte = le64_to_cpu(pte);
182 if (pte & PG_PRESENT_MASK) {
183 print_pte(mon, env, (l0 << 48) + (l1 << 39) +
184 (l2 << 30) + (l3 << 21) + (l4 << 12),
185 pte & ~PG_PSE_MASK, 0x3fffffffff000ULL);
186 }
187 }
188 }
189 }
190 }
191 }
192
193 static void tlb_info_la57(Monitor *mon, CPUArchState *env)
194 {
195 uint64_t l0;
196 uint64_t pml5e;
197 uint64_t pml5_addr;
198
199 pml5_addr = env->cr[3] & 0x3fffffffff000ULL;
200 for (l0 = 0; l0 < 512; l0++) {
201 cpu_physical_memory_read(pml5_addr + l0 * 8, &pml5e, 8);
202 pml5e = le64_to_cpu(pml5e);
203 if (pml5e & PG_PRESENT_MASK) {
204 tlb_info_la48(mon, env, l0, pml5e & 0x3fffffffff000ULL);
205 }
206 }
207 }
208 #endif /* TARGET_X86_64 */
209
210 void hmp_info_tlb(Monitor *mon, const QDict *qdict)
211 {
212 CPUArchState *env;
213
214 env = mon_get_cpu_env();
215 if (!env) {
216 monitor_printf(mon, "No CPU available\n");
217 return;
218 }
219
220 if (!(env->cr[0] & CR0_PG_MASK)) {
221 monitor_printf(mon, "PG disabled\n");
222 return;
223 }
224 if (env->cr[4] & CR4_PAE_MASK) {
225 #ifdef TARGET_X86_64
226 if (env->hflags & HF_LMA_MASK) {
227 if (env->cr[4] & CR4_LA57_MASK) {
228 tlb_info_la57(mon, env);
229 } else {
230 tlb_info_la48(mon, env, 0, env->cr[3] & 0x3fffffffff000ULL);
231 }
232 } else
233 #endif
234 {
235 tlb_info_pae32(mon, env);
236 }
237 } else {
238 tlb_info_32(mon, env);
239 }
240 }
241
242 static void mem_print(Monitor *mon, hwaddr *pstart,
243 int *plast_prot,
244 hwaddr end, int prot)
245 {
246 int prot1;
247 prot1 = *plast_prot;
248 if (prot != prot1) {
249 if (*pstart != -1) {
250 monitor_printf(mon, TARGET_FMT_plx "-" TARGET_FMT_plx " "
251 TARGET_FMT_plx " %c%c%c\n",
252 *pstart, end, end - *pstart,
253 prot1 & PG_USER_MASK ? 'u' : '-',
254 'r',
255 prot1 & PG_RW_MASK ? 'w' : '-');
256 }
257 if (prot != 0)
258 *pstart = end;
259 else
260 *pstart = -1;
261 *plast_prot = prot;
262 }
263 }
264
265 static void mem_info_32(Monitor *mon, CPUArchState *env)
266 {
267 unsigned int l1, l2;
268 int prot, last_prot;
269 uint32_t pgd, pde, pte;
270 hwaddr start, end;
271
272 pgd = env->cr[3] & ~0xfff;
273 last_prot = 0;
274 start = -1;
275 for(l1 = 0; l1 < 1024; l1++) {
276 cpu_physical_memory_read(pgd + l1 * 4, &pde, 4);
277 pde = le32_to_cpu(pde);
278 end = l1 << 22;
279 if (pde & PG_PRESENT_MASK) {
280 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
281 prot = pde & (PG_USER_MASK | PG_RW_MASK | PG_PRESENT_MASK);
282 mem_print(mon, &start, &last_prot, end, prot);
283 } else {
284 for(l2 = 0; l2 < 1024; l2++) {
285 cpu_physical_memory_read((pde & ~0xfff) + l2 * 4, &pte, 4);
286 pte = le32_to_cpu(pte);
287 end = (l1 << 22) + (l2 << 12);
288 if (pte & PG_PRESENT_MASK) {
289 prot = pte & pde &
290 (PG_USER_MASK | PG_RW_MASK | PG_PRESENT_MASK);
291 } else {
292 prot = 0;
293 }
294 mem_print(mon, &start, &last_prot, end, prot);
295 }
296 }
297 } else {
298 prot = 0;
299 mem_print(mon, &start, &last_prot, end, prot);
300 }
301 }
302 /* Flush last range */
303 mem_print(mon, &start, &last_prot, (hwaddr)1 << 32, 0);
304 }
305
306 static void mem_info_pae32(Monitor *mon, CPUArchState *env)
307 {
308 unsigned int l1, l2, l3;
309 int prot, last_prot;
310 uint64_t pdpe, pde, pte;
311 uint64_t pdp_addr, pd_addr, pt_addr;
312 hwaddr start, end;
313
314 pdp_addr = env->cr[3] & ~0x1f;
315 last_prot = 0;
316 start = -1;
317 for (l1 = 0; l1 < 4; l1++) {
318 cpu_physical_memory_read(pdp_addr + l1 * 8, &pdpe, 8);
319 pdpe = le64_to_cpu(pdpe);
320 end = l1 << 30;
321 if (pdpe & PG_PRESENT_MASK) {
322 pd_addr = pdpe & 0x3fffffffff000ULL;
323 for (l2 = 0; l2 < 512; l2++) {
324 cpu_physical_memory_read(pd_addr + l2 * 8, &pde, 8);
325 pde = le64_to_cpu(pde);
326 end = (l1 << 30) + (l2 << 21);
327 if (pde & PG_PRESENT_MASK) {
328 if (pde & PG_PSE_MASK) {
329 prot = pde & (PG_USER_MASK | PG_RW_MASK |
330 PG_PRESENT_MASK);
331 mem_print(mon, &start, &last_prot, end, prot);
332 } else {
333 pt_addr = pde & 0x3fffffffff000ULL;
334 for (l3 = 0; l3 < 512; l3++) {
335 cpu_physical_memory_read(pt_addr + l3 * 8, &pte, 8);
336 pte = le64_to_cpu(pte);
337 end = (l1 << 30) + (l2 << 21) + (l3 << 12);
338 if (pte & PG_PRESENT_MASK) {
339 prot = pte & pde & (PG_USER_MASK | PG_RW_MASK |
340 PG_PRESENT_MASK);
341 } else {
342 prot = 0;
343 }
344 mem_print(mon, &start, &last_prot, end, prot);
345 }
346 }
347 } else {
348 prot = 0;
349 mem_print(mon, &start, &last_prot, end, prot);
350 }
351 }
352 } else {
353 prot = 0;
354 mem_print(mon, &start, &last_prot, end, prot);
355 }
356 }
357 /* Flush last range */
358 mem_print(mon, &start, &last_prot, (hwaddr)1 << 32, 0);
359 }
360
361
362 #ifdef TARGET_X86_64
363 static void mem_info_la48(Monitor *mon, CPUArchState *env)
364 {
365 int prot, last_prot;
366 uint64_t l1, l2, l3, l4;
367 uint64_t pml4e, pdpe, pde, pte;
368 uint64_t pml4_addr, pdp_addr, pd_addr, pt_addr, start, end;
369
370 pml4_addr = env->cr[3] & 0x3fffffffff000ULL;
371 last_prot = 0;
372 start = -1;
373 for (l1 = 0; l1 < 512; l1++) {
374 cpu_physical_memory_read(pml4_addr + l1 * 8, &pml4e, 8);
375 pml4e = le64_to_cpu(pml4e);
376 end = l1 << 39;
377 if (pml4e & PG_PRESENT_MASK) {
378 pdp_addr = pml4e & 0x3fffffffff000ULL;
379 for (l2 = 0; l2 < 512; l2++) {
380 cpu_physical_memory_read(pdp_addr + l2 * 8, &pdpe, 8);
381 pdpe = le64_to_cpu(pdpe);
382 end = (l1 << 39) + (l2 << 30);
383 if (pdpe & PG_PRESENT_MASK) {
384 if (pdpe & PG_PSE_MASK) {
385 prot = pdpe & (PG_USER_MASK | PG_RW_MASK |
386 PG_PRESENT_MASK);
387 prot &= pml4e;
388 mem_print(mon, &start, &last_prot, end, prot);
389 } else {
390 pd_addr = pdpe & 0x3fffffffff000ULL;
391 for (l3 = 0; l3 < 512; l3++) {
392 cpu_physical_memory_read(pd_addr + l3 * 8, &pde, 8);
393 pde = le64_to_cpu(pde);
394 end = (l1 << 39) + (l2 << 30) + (l3 << 21);
395 if (pde & PG_PRESENT_MASK) {
396 if (pde & PG_PSE_MASK) {
397 prot = pde & (PG_USER_MASK | PG_RW_MASK |
398 PG_PRESENT_MASK);
399 prot &= pml4e & pdpe;
400 mem_print(mon, &start, &last_prot, end, prot);
401 } else {
402 pt_addr = pde & 0x3fffffffff000ULL;
403 for (l4 = 0; l4 < 512; l4++) {
404 cpu_physical_memory_read(pt_addr
405 + l4 * 8,
406 &pte, 8);
407 pte = le64_to_cpu(pte);
408 end = (l1 << 39) + (l2 << 30) +
409 (l3 << 21) + (l4 << 12);
410 if (pte & PG_PRESENT_MASK) {
411 prot = pte & (PG_USER_MASK | PG_RW_MASK |
412 PG_PRESENT_MASK);
413 prot &= pml4e & pdpe & pde;
414 } else {
415 prot = 0;
416 }
417 mem_print(mon, &start, &last_prot, end, prot);
418 }
419 }
420 } else {
421 prot = 0;
422 mem_print(mon, &start, &last_prot, end, prot);
423 }
424 }
425 }
426 } else {
427 prot = 0;
428 mem_print(mon, &start, &last_prot, end, prot);
429 }
430 }
431 } else {
432 prot = 0;
433 mem_print(mon, &start, &last_prot, end, prot);
434 }
435 }
436 /* Flush last range */
437 mem_print(mon, &start, &last_prot, (hwaddr)1 << 48, 0);
438 }
439
440 static void mem_info_la57(Monitor *mon, CPUArchState *env)
441 {
442 int prot, last_prot;
443 uint64_t l0, l1, l2, l3, l4;
444 uint64_t pml5e, pml4e, pdpe, pde, pte;
445 uint64_t pml5_addr, pml4_addr, pdp_addr, pd_addr, pt_addr, start, end;
446
447 pml5_addr = env->cr[3] & 0x3fffffffff000ULL;
448 last_prot = 0;
449 start = -1;
450 for (l0 = 0; l0 < 512; l0++) {
451 cpu_physical_memory_read(pml5_addr + l0 * 8, &pml5e, 8);
452 pml5e = le64_to_cpu(pml5e);
453 end = l0 << 48;
454 if (!(pml5e & PG_PRESENT_MASK)) {
455 prot = 0;
456 mem_print(mon, &start, &last_prot, end, prot);
457 continue;
458 }
459
460 pml4_addr = pml5e & 0x3fffffffff000ULL;
461 for (l1 = 0; l1 < 512; l1++) {
462 cpu_physical_memory_read(pml4_addr + l1 * 8, &pml4e, 8);
463 pml4e = le64_to_cpu(pml4e);
464 end = (l0 << 48) + (l1 << 39);
465 if (!(pml4e & PG_PRESENT_MASK)) {
466 prot = 0;
467 mem_print(mon, &start, &last_prot, end, prot);
468 continue;
469 }
470
471 pdp_addr = pml4e & 0x3fffffffff000ULL;
472 for (l2 = 0; l2 < 512; l2++) {
473 cpu_physical_memory_read(pdp_addr + l2 * 8, &pdpe, 8);
474 pdpe = le64_to_cpu(pdpe);
475 end = (l0 << 48) + (l1 << 39) + (l2 << 30);
476 if (pdpe & PG_PRESENT_MASK) {
477 prot = 0;
478 mem_print(mon, &start, &last_prot, end, prot);
479 continue;
480 }
481
482 if (pdpe & PG_PSE_MASK) {
483 prot = pdpe & (PG_USER_MASK | PG_RW_MASK |
484 PG_PRESENT_MASK);
485 prot &= pml5e & pml4e;
486 mem_print(mon, &start, &last_prot, end, prot);
487 continue;
488 }
489
490 pd_addr = pdpe & 0x3fffffffff000ULL;
491 for (l3 = 0; l3 < 512; l3++) {
492 cpu_physical_memory_read(pd_addr + l3 * 8, &pde, 8);
493 pde = le64_to_cpu(pde);
494 end = (l0 << 48) + (l1 << 39) + (l2 << 30) + (l3 << 21);
495 if (pde & PG_PRESENT_MASK) {
496 prot = 0;
497 mem_print(mon, &start, &last_prot, end, prot);
498 continue;
499 }
500
501 if (pde & PG_PSE_MASK) {
502 prot = pde & (PG_USER_MASK | PG_RW_MASK |
503 PG_PRESENT_MASK);
504 prot &= pml5e & pml4e & pdpe;
505 mem_print(mon, &start, &last_prot, end, prot);
506 continue;
507 }
508
509 pt_addr = pde & 0x3fffffffff000ULL;
510 for (l4 = 0; l4 < 512; l4++) {
511 cpu_physical_memory_read(pt_addr + l4 * 8, &pte, 8);
512 pte = le64_to_cpu(pte);
513 end = (l0 << 48) + (l1 << 39) + (l2 << 30) +
514 (l3 << 21) + (l4 << 12);
515 if (pte & PG_PRESENT_MASK) {
516 prot = pte & (PG_USER_MASK | PG_RW_MASK |
517 PG_PRESENT_MASK);
518 prot &= pml5e & pml4e & pdpe & pde;
519 } else {
520 prot = 0;
521 }
522 mem_print(mon, &start, &last_prot, end, prot);
523 }
524 }
525 }
526 }
527 }
528 /* Flush last range */
529 mem_print(mon, &start, &last_prot, (hwaddr)1 << 57, 0);
530 }
531 #endif /* TARGET_X86_64 */
532
533 void hmp_info_mem(Monitor *mon, const QDict *qdict)
534 {
535 CPUArchState *env;
536
537 env = mon_get_cpu_env();
538 if (!env) {
539 monitor_printf(mon, "No CPU available\n");
540 return;
541 }
542
543 if (!(env->cr[0] & CR0_PG_MASK)) {
544 monitor_printf(mon, "PG disabled\n");
545 return;
546 }
547 if (env->cr[4] & CR4_PAE_MASK) {
548 #ifdef TARGET_X86_64
549 if (env->hflags & HF_LMA_MASK) {
550 if (env->cr[4] & CR4_LA57_MASK) {
551 mem_info_la57(mon, env);
552 } else {
553 mem_info_la48(mon, env);
554 }
555 } else
556 #endif
557 {
558 mem_info_pae32(mon, env);
559 }
560 } else {
561 mem_info_32(mon, env);
562 }
563 }
564
565 void hmp_mce(Monitor *mon, const QDict *qdict)
566 {
567 X86CPU *cpu;
568 CPUState *cs;
569 int cpu_index = qdict_get_int(qdict, "cpu_index");
570 int bank = qdict_get_int(qdict, "bank");
571 uint64_t status = qdict_get_int(qdict, "status");
572 uint64_t mcg_status = qdict_get_int(qdict, "mcg_status");
573 uint64_t addr = qdict_get_int(qdict, "addr");
574 uint64_t misc = qdict_get_int(qdict, "misc");
575 int flags = MCE_INJECT_UNCOND_AO;
576
577 if (qdict_get_try_bool(qdict, "broadcast", false)) {
578 flags |= MCE_INJECT_BROADCAST;
579 }
580 cs = qemu_get_cpu(cpu_index);
581 if (cs != NULL) {
582 cpu = X86_CPU(cs);
583 cpu_x86_inject_mce(mon, cpu, bank, status, mcg_status, addr, misc,
584 flags);
585 }
586 }
587
588 static target_long monitor_get_pc(const struct MonitorDef *md, int val)
589 {
590 CPUArchState *env = mon_get_cpu_env();
591 return env->eip + env->segs[R_CS].base;
592 }
593
594 const MonitorDef monitor_defs[] = {
595 #define SEG(name, seg) \
596 { name, offsetof(CPUX86State, segs[seg].selector), NULL, MD_I32 },\
597 { name ".base", offsetof(CPUX86State, segs[seg].base) },\
598 { name ".limit", offsetof(CPUX86State, segs[seg].limit), NULL, MD_I32 },
599
600 { "eax", offsetof(CPUX86State, regs[0]) },
601 { "ecx", offsetof(CPUX86State, regs[1]) },
602 { "edx", offsetof(CPUX86State, regs[2]) },
603 { "ebx", offsetof(CPUX86State, regs[3]) },
604 { "esp|sp", offsetof(CPUX86State, regs[4]) },
605 { "ebp|fp", offsetof(CPUX86State, regs[5]) },
606 { "esi", offsetof(CPUX86State, regs[6]) },
607 { "edi", offsetof(CPUX86State, regs[7]) },
608 #ifdef TARGET_X86_64
609 { "r8", offsetof(CPUX86State, regs[8]) },
610 { "r9", offsetof(CPUX86State, regs[9]) },
611 { "r10", offsetof(CPUX86State, regs[10]) },
612 { "r11", offsetof(CPUX86State, regs[11]) },
613 { "r12", offsetof(CPUX86State, regs[12]) },
614 { "r13", offsetof(CPUX86State, regs[13]) },
615 { "r14", offsetof(CPUX86State, regs[14]) },
616 { "r15", offsetof(CPUX86State, regs[15]) },
617 #endif
618 { "eflags", offsetof(CPUX86State, eflags) },
619 { "eip", offsetof(CPUX86State, eip) },
620 SEG("cs", R_CS)
621 SEG("ds", R_DS)
622 SEG("es", R_ES)
623 SEG("ss", R_SS)
624 SEG("fs", R_FS)
625 SEG("gs", R_GS)
626 { "pc", 0, monitor_get_pc, },
627 { NULL },
628 };
629
630 const MonitorDef *target_monitor_defs(void)
631 {
632 return monitor_defs;
633 }
634
635 void hmp_info_local_apic(Monitor *mon, const QDict *qdict)
636 {
637 CPUState *cs;
638
639 if (qdict_haskey(qdict, "apic-id")) {
640 int id = qdict_get_try_int(qdict, "apic-id", 0);
641 cs = cpu_by_arch_id(id);
642 } else {
643 cs = mon_get_cpu();
644 }
645
646
647 if (!cs) {
648 monitor_printf(mon, "No CPU available\n");
649 return;
650 }
651 x86_cpu_dump_local_apic_state(cs, (FILE *)mon, monitor_fprintf,
652 CPU_DUMP_FPU);
653 }
654
655 void hmp_info_io_apic(Monitor *mon, const QDict *qdict)
656 {
657 if (kvm_irqchip_in_kernel() &&
658 !kvm_irqchip_is_split()) {
659 kvm_ioapic_dump_state(mon, qdict);
660 } else {
661 ioapic_dump_state(mon, qdict);
662 }
663 }