4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu-common.h"
21 #ifdef CONFIG_USER_ONLY
33 #include "qemu-char.h"
38 #define MAX_PACKET_LENGTH 4096
41 #include "qemu_socket.h"
44 #ifndef TARGET_CPU_MEMORY_RW_DEBUG
45 static inline int target_memory_rw_debug(CPUArchState
*env
, target_ulong addr
,
46 uint8_t *buf
, int len
, int is_write
)
48 return cpu_memory_rw_debug(env
, addr
, buf
, len
, is_write
);
51 /* target_memory_rw_debug() defined in cpu.h */
63 GDB_SIGNAL_UNKNOWN
= 143
66 #ifdef CONFIG_USER_ONLY
68 /* Map target signal numbers to GDB protocol signal numbers and vice
69 * versa. For user emulation's currently supported systems, we can
70 * assume most signals are defined.
73 static int gdb_signal_table
[] = {
233 /* In system mode we only need SIGINT and SIGTRAP; other signals
234 are not yet supported. */
241 static int gdb_signal_table
[] = {
251 #ifdef CONFIG_USER_ONLY
252 static int target_signal_to_gdb (int sig
)
255 for (i
= 0; i
< ARRAY_SIZE (gdb_signal_table
); i
++)
256 if (gdb_signal_table
[i
] == sig
)
258 return GDB_SIGNAL_UNKNOWN
;
262 static int gdb_signal_to_target (int sig
)
264 if (sig
< ARRAY_SIZE (gdb_signal_table
))
265 return gdb_signal_table
[sig
];
272 typedef struct GDBRegisterState
{
278 struct GDBRegisterState
*next
;
288 typedef struct GDBState
{
289 CPUArchState
*c_cpu
; /* current CPU for step/continue ops */
290 CPUArchState
*g_cpu
; /* current CPU for other ops */
291 CPUArchState
*query_cpu
; /* for q{f|s}ThreadInfo */
292 enum RSState state
; /* parsing state */
293 char line_buf
[MAX_PACKET_LENGTH
];
296 uint8_t last_packet
[MAX_PACKET_LENGTH
+ 4];
299 #ifdef CONFIG_USER_ONLY
303 CharDriverState
*chr
;
304 CharDriverState
*mon_chr
;
306 char syscall_buf
[256];
307 gdb_syscall_complete_cb current_syscall_cb
;
310 /* By default use no IRQs and no timers while single stepping so as to
311 * make single stepping like an ICE HW step.
313 static int sstep_flags
= SSTEP_ENABLE
|SSTEP_NOIRQ
|SSTEP_NOTIMER
;
315 static GDBState
*gdbserver_state
;
317 /* This is an ugly hack to cope with both new and old gdb.
318 If gdb sends qXfer:features:read then assume we're talking to a newish
319 gdb that understands target descriptions. */
320 static int gdb_has_xml
;
322 #ifdef CONFIG_USER_ONLY
323 /* XXX: This is not thread safe. Do we care? */
324 static int gdbserver_fd
= -1;
326 static int get_char(GDBState
*s
)
332 ret
= qemu_recv(s
->fd
, &ch
, 1, 0);
334 if (errno
== ECONNRESET
)
336 if (errno
!= EINTR
&& errno
!= EAGAIN
)
338 } else if (ret
== 0) {
356 /* If gdb is connected when the first semihosting syscall occurs then use
357 remote gdb syscalls. Otherwise use native file IO. */
358 int use_gdb_syscalls(void)
360 if (gdb_syscall_mode
== GDB_SYS_UNKNOWN
) {
361 gdb_syscall_mode
= (gdbserver_state
? GDB_SYS_ENABLED
364 return gdb_syscall_mode
== GDB_SYS_ENABLED
;
367 /* Resume execution. */
368 static inline void gdb_continue(GDBState
*s
)
370 #ifdef CONFIG_USER_ONLY
371 s
->running_state
= 1;
377 static void put_buffer(GDBState
*s
, const uint8_t *buf
, int len
)
379 #ifdef CONFIG_USER_ONLY
383 ret
= send(s
->fd
, buf
, len
, 0);
385 if (errno
!= EINTR
&& errno
!= EAGAIN
)
393 qemu_chr_fe_write(s
->chr
, buf
, len
);
397 static inline int fromhex(int v
)
399 if (v
>= '0' && v
<= '9')
401 else if (v
>= 'A' && v
<= 'F')
403 else if (v
>= 'a' && v
<= 'f')
409 static inline int tohex(int v
)
417 static void memtohex(char *buf
, const uint8_t *mem
, int len
)
422 for(i
= 0; i
< len
; i
++) {
424 *q
++ = tohex(c
>> 4);
425 *q
++ = tohex(c
& 0xf);
430 static void hextomem(uint8_t *mem
, const char *buf
, int len
)
434 for(i
= 0; i
< len
; i
++) {
435 mem
[i
] = (fromhex(buf
[0]) << 4) | fromhex(buf
[1]);
440 /* return -1 if error, 0 if OK */
441 static int put_packet_binary(GDBState
*s
, const char *buf
, int len
)
452 for(i
= 0; i
< len
; i
++) {
456 *(p
++) = tohex((csum
>> 4) & 0xf);
457 *(p
++) = tohex((csum
) & 0xf);
459 s
->last_packet_len
= p
- s
->last_packet
;
460 put_buffer(s
, (uint8_t *)s
->last_packet
, s
->last_packet_len
);
462 #ifdef CONFIG_USER_ONLY
475 /* return -1 if error, 0 if OK */
476 static int put_packet(GDBState
*s
, const char *buf
)
479 printf("reply='%s'\n", buf
);
482 return put_packet_binary(s
, buf
, strlen(buf
));
485 /* The GDB remote protocol transfers values in target byte order. This means
486 we can use the raw memory access routines to access the value buffer.
487 Conveniently, these also handle the case where the buffer is mis-aligned.
489 #define GET_REG8(val) do { \
490 stb_p(mem_buf, val); \
493 #define GET_REG16(val) do { \
494 stw_p(mem_buf, val); \
497 #define GET_REG32(val) do { \
498 stl_p(mem_buf, val); \
501 #define GET_REG64(val) do { \
502 stq_p(mem_buf, val); \
506 #if TARGET_LONG_BITS == 64
507 #define GET_REGL(val) GET_REG64(val)
508 #define ldtul_p(addr) ldq_p(addr)
510 #define GET_REGL(val) GET_REG32(val)
511 #define ldtul_p(addr) ldl_p(addr)
514 #if defined(TARGET_I386)
517 static const int gpr_map
[16] = {
518 R_EAX
, R_EBX
, R_ECX
, R_EDX
, R_ESI
, R_EDI
, R_EBP
, R_ESP
,
519 8, 9, 10, 11, 12, 13, 14, 15
522 #define gpr_map gpr_map32
524 static const int gpr_map32
[8] = { 0, 1, 2, 3, 4, 5, 6, 7 };
526 #define NUM_CORE_REGS (CPU_NB_REGS * 2 + 25)
528 #define IDX_IP_REG CPU_NB_REGS
529 #define IDX_FLAGS_REG (IDX_IP_REG + 1)
530 #define IDX_SEG_REGS (IDX_FLAGS_REG + 1)
531 #define IDX_FP_REGS (IDX_SEG_REGS + 6)
532 #define IDX_XMM_REGS (IDX_FP_REGS + 16)
533 #define IDX_MXCSR_REG (IDX_XMM_REGS + CPU_NB_REGS)
535 static int cpu_gdb_read_register(CPUX86State
*env
, uint8_t *mem_buf
, int n
)
537 if (n
< CPU_NB_REGS
) {
538 if (TARGET_LONG_BITS
== 64 && env
->hflags
& HF_CS64_MASK
) {
539 GET_REG64(env
->regs
[gpr_map
[n
]]);
540 } else if (n
< CPU_NB_REGS32
) {
541 GET_REG32(env
->regs
[gpr_map32
[n
]]);
543 } else if (n
>= IDX_FP_REGS
&& n
< IDX_FP_REGS
+ 8) {
544 #ifdef USE_X86LDOUBLE
545 /* FIXME: byteswap float values - after fixing fpregs layout. */
546 memcpy(mem_buf
, &env
->fpregs
[n
- IDX_FP_REGS
], 10);
548 memset(mem_buf
, 0, 10);
551 } else if (n
>= IDX_XMM_REGS
&& n
< IDX_XMM_REGS
+ CPU_NB_REGS
) {
553 if (n
< CPU_NB_REGS32
||
554 (TARGET_LONG_BITS
== 64 && env
->hflags
& HF_CS64_MASK
)) {
555 stq_p(mem_buf
, env
->xmm_regs
[n
].XMM_Q(0));
556 stq_p(mem_buf
+ 8, env
->xmm_regs
[n
].XMM_Q(1));
562 if (TARGET_LONG_BITS
== 64 && env
->hflags
& HF_CS64_MASK
) {
567 case IDX_FLAGS_REG
: GET_REG32(env
->eflags
);
569 case IDX_SEG_REGS
: GET_REG32(env
->segs
[R_CS
].selector
);
570 case IDX_SEG_REGS
+ 1: GET_REG32(env
->segs
[R_SS
].selector
);
571 case IDX_SEG_REGS
+ 2: GET_REG32(env
->segs
[R_DS
].selector
);
572 case IDX_SEG_REGS
+ 3: GET_REG32(env
->segs
[R_ES
].selector
);
573 case IDX_SEG_REGS
+ 4: GET_REG32(env
->segs
[R_FS
].selector
);
574 case IDX_SEG_REGS
+ 5: GET_REG32(env
->segs
[R_GS
].selector
);
576 case IDX_FP_REGS
+ 8: GET_REG32(env
->fpuc
);
577 case IDX_FP_REGS
+ 9: GET_REG32((env
->fpus
& ~0x3800) |
578 (env
->fpstt
& 0x7) << 11);
579 case IDX_FP_REGS
+ 10: GET_REG32(0); /* ftag */
580 case IDX_FP_REGS
+ 11: GET_REG32(0); /* fiseg */
581 case IDX_FP_REGS
+ 12: GET_REG32(0); /* fioff */
582 case IDX_FP_REGS
+ 13: GET_REG32(0); /* foseg */
583 case IDX_FP_REGS
+ 14: GET_REG32(0); /* fooff */
584 case IDX_FP_REGS
+ 15: GET_REG32(0); /* fop */
586 case IDX_MXCSR_REG
: GET_REG32(env
->mxcsr
);
592 static int cpu_x86_gdb_load_seg(CPUX86State
*env
, int sreg
, uint8_t *mem_buf
)
594 uint16_t selector
= ldl_p(mem_buf
);
596 if (selector
!= env
->segs
[sreg
].selector
) {
597 #if defined(CONFIG_USER_ONLY)
598 cpu_x86_load_seg(env
, sreg
, selector
);
600 unsigned int limit
, flags
;
603 if (!(env
->cr
[0] & CR0_PE_MASK
) || (env
->eflags
& VM_MASK
)) {
604 base
= selector
<< 4;
608 if (!cpu_x86_get_descr_debug(env
, selector
, &base
, &limit
, &flags
))
611 cpu_x86_load_seg_cache(env
, sreg
, selector
, base
, limit
, flags
);
617 static int cpu_gdb_write_register(CPUX86State
*env
, uint8_t *mem_buf
, int n
)
621 if (n
< CPU_NB_REGS
) {
622 if (TARGET_LONG_BITS
== 64 && env
->hflags
& HF_CS64_MASK
) {
623 env
->regs
[gpr_map
[n
]] = ldtul_p(mem_buf
);
624 return sizeof(target_ulong
);
625 } else if (n
< CPU_NB_REGS32
) {
627 env
->regs
[n
] &= ~0xffffffffUL
;
628 env
->regs
[n
] |= (uint32_t)ldl_p(mem_buf
);
631 } else if (n
>= IDX_FP_REGS
&& n
< IDX_FP_REGS
+ 8) {
632 #ifdef USE_X86LDOUBLE
633 /* FIXME: byteswap float values - after fixing fpregs layout. */
634 memcpy(&env
->fpregs
[n
- IDX_FP_REGS
], mem_buf
, 10);
637 } else if (n
>= IDX_XMM_REGS
&& n
< IDX_XMM_REGS
+ CPU_NB_REGS
) {
639 if (n
< CPU_NB_REGS32
||
640 (TARGET_LONG_BITS
== 64 && env
->hflags
& HF_CS64_MASK
)) {
641 env
->xmm_regs
[n
].XMM_Q(0) = ldq_p(mem_buf
);
642 env
->xmm_regs
[n
].XMM_Q(1) = ldq_p(mem_buf
+ 8);
648 if (TARGET_LONG_BITS
== 64 && env
->hflags
& HF_CS64_MASK
) {
649 env
->eip
= ldq_p(mem_buf
);
652 env
->eip
&= ~0xffffffffUL
;
653 env
->eip
|= (uint32_t)ldl_p(mem_buf
);
657 env
->eflags
= ldl_p(mem_buf
);
660 case IDX_SEG_REGS
: return cpu_x86_gdb_load_seg(env
, R_CS
, mem_buf
);
661 case IDX_SEG_REGS
+ 1: return cpu_x86_gdb_load_seg(env
, R_SS
, mem_buf
);
662 case IDX_SEG_REGS
+ 2: return cpu_x86_gdb_load_seg(env
, R_DS
, mem_buf
);
663 case IDX_SEG_REGS
+ 3: return cpu_x86_gdb_load_seg(env
, R_ES
, mem_buf
);
664 case IDX_SEG_REGS
+ 4: return cpu_x86_gdb_load_seg(env
, R_FS
, mem_buf
);
665 case IDX_SEG_REGS
+ 5: return cpu_x86_gdb_load_seg(env
, R_GS
, mem_buf
);
667 case IDX_FP_REGS
+ 8:
668 env
->fpuc
= ldl_p(mem_buf
);
670 case IDX_FP_REGS
+ 9:
671 tmp
= ldl_p(mem_buf
);
672 env
->fpstt
= (tmp
>> 11) & 7;
673 env
->fpus
= tmp
& ~0x3800;
675 case IDX_FP_REGS
+ 10: /* ftag */ return 4;
676 case IDX_FP_REGS
+ 11: /* fiseg */ return 4;
677 case IDX_FP_REGS
+ 12: /* fioff */ return 4;
678 case IDX_FP_REGS
+ 13: /* foseg */ return 4;
679 case IDX_FP_REGS
+ 14: /* fooff */ return 4;
680 case IDX_FP_REGS
+ 15: /* fop */ return 4;
683 env
->mxcsr
= ldl_p(mem_buf
);
687 /* Unrecognised register. */
691 #elif defined (TARGET_PPC)
693 /* Old gdb always expects FP registers. Newer (xml-aware) gdb only
694 expects whatever the target description contains. Due to a
695 historical mishap the FP registers appear in between core integer
696 regs and PC, MSR, CR, and so forth. We hack round this by giving the
697 FP regs zero size when talking to a newer gdb. */
698 #define NUM_CORE_REGS 71
699 #if defined (TARGET_PPC64)
700 #define GDB_CORE_XML "power64-core.xml"
702 #define GDB_CORE_XML "power-core.xml"
705 static int cpu_gdb_read_register(CPUPPCState
*env
, uint8_t *mem_buf
, int n
)
709 GET_REGL(env
->gpr
[n
]);
714 stfq_p(mem_buf
, env
->fpr
[n
-32]);
718 case 64: GET_REGL(env
->nip
);
719 case 65: GET_REGL(env
->msr
);
724 for (i
= 0; i
< 8; i
++)
725 cr
|= env
->crf
[i
] << (32 - ((i
+ 1) * 4));
728 case 67: GET_REGL(env
->lr
);
729 case 68: GET_REGL(env
->ctr
);
730 case 69: GET_REGL(env
->xer
);
735 GET_REG32(env
->fpscr
);
742 static int cpu_gdb_write_register(CPUPPCState
*env
, uint8_t *mem_buf
, int n
)
746 env
->gpr
[n
] = ldtul_p(mem_buf
);
747 return sizeof(target_ulong
);
752 env
->fpr
[n
-32] = ldfq_p(mem_buf
);
757 env
->nip
= ldtul_p(mem_buf
);
758 return sizeof(target_ulong
);
760 ppc_store_msr(env
, ldtul_p(mem_buf
));
761 return sizeof(target_ulong
);
764 uint32_t cr
= ldl_p(mem_buf
);
766 for (i
= 0; i
< 8; i
++)
767 env
->crf
[i
] = (cr
>> (32 - ((i
+ 1) * 4))) & 0xF;
771 env
->lr
= ldtul_p(mem_buf
);
772 return sizeof(target_ulong
);
774 env
->ctr
= ldtul_p(mem_buf
);
775 return sizeof(target_ulong
);
777 env
->xer
= ldtul_p(mem_buf
);
778 return sizeof(target_ulong
);
789 #elif defined (TARGET_SPARC)
791 #if defined(TARGET_SPARC64) && !defined(TARGET_ABI32)
792 #define NUM_CORE_REGS 86
794 #define NUM_CORE_REGS 72
798 #define GET_REGA(val) GET_REG32(val)
800 #define GET_REGA(val) GET_REGL(val)
803 static int cpu_gdb_read_register(CPUSPARCState
*env
, uint8_t *mem_buf
, int n
)
807 GET_REGA(env
->gregs
[n
]);
810 /* register window */
811 GET_REGA(env
->regwptr
[n
- 8]);
813 #if defined(TARGET_ABI32) || !defined(TARGET_SPARC64)
817 GET_REG32(env
->fpr
[(n
- 32) / 2].l
.lower
);
819 GET_REG32(env
->fpr
[(n
- 32) / 2].l
.upper
);
822 /* Y, PSR, WIM, TBR, PC, NPC, FPSR, CPSR */
824 case 64: GET_REGA(env
->y
);
825 case 65: GET_REGA(cpu_get_psr(env
));
826 case 66: GET_REGA(env
->wim
);
827 case 67: GET_REGA(env
->tbr
);
828 case 68: GET_REGA(env
->pc
);
829 case 69: GET_REGA(env
->npc
);
830 case 70: GET_REGA(env
->fsr
);
831 case 71: GET_REGA(0); /* csr */
832 default: GET_REGA(0);
838 GET_REG32(env
->fpr
[(n
- 32) / 2].l
.lower
);
840 GET_REG32(env
->fpr
[(n
- 32) / 2].l
.upper
);
844 /* f32-f62 (double width, even numbers only) */
845 GET_REG64(env
->fpr
[(n
- 32) / 2].ll
);
848 case 80: GET_REGL(env
->pc
);
849 case 81: GET_REGL(env
->npc
);
850 case 82: GET_REGL((cpu_get_ccr(env
) << 32) |
851 ((env
->asi
& 0xff) << 24) |
852 ((env
->pstate
& 0xfff) << 8) |
854 case 83: GET_REGL(env
->fsr
);
855 case 84: GET_REGL(env
->fprs
);
856 case 85: GET_REGL(env
->y
);
862 static int cpu_gdb_write_register(CPUSPARCState
*env
, uint8_t *mem_buf
, int n
)
864 #if defined(TARGET_ABI32)
867 tmp
= ldl_p(mem_buf
);
871 tmp
= ldtul_p(mem_buf
);
878 /* register window */
879 env
->regwptr
[n
- 8] = tmp
;
881 #if defined(TARGET_ABI32) || !defined(TARGET_SPARC64)
886 env
->fpr
[(n
- 32) / 2].l
.lower
= tmp
;
888 env
->fpr
[(n
- 32) / 2].l
.upper
= tmp
;
891 /* Y, PSR, WIM, TBR, PC, NPC, FPSR, CPSR */
893 case 64: env
->y
= tmp
; break;
894 case 65: cpu_put_psr(env
, tmp
); break;
895 case 66: env
->wim
= tmp
; break;
896 case 67: env
->tbr
= tmp
; break;
897 case 68: env
->pc
= tmp
; break;
898 case 69: env
->npc
= tmp
; break;
899 case 70: env
->fsr
= tmp
; break;
907 tmp
= ldl_p(mem_buf
);
909 env
->fpr
[(n
- 32) / 2].l
.lower
= tmp
;
911 env
->fpr
[(n
- 32) / 2].l
.upper
= tmp
;
915 /* f32-f62 (double width, even numbers only) */
916 env
->fpr
[(n
- 32) / 2].ll
= tmp
;
919 case 80: env
->pc
= tmp
; break;
920 case 81: env
->npc
= tmp
; break;
922 cpu_put_ccr(env
, tmp
>> 32);
923 env
->asi
= (tmp
>> 24) & 0xff;
924 env
->pstate
= (tmp
>> 8) & 0xfff;
925 cpu_put_cwp64(env
, tmp
& 0xff);
927 case 83: env
->fsr
= tmp
; break;
928 case 84: env
->fprs
= tmp
; break;
929 case 85: env
->y
= tmp
; break;
936 #elif defined (TARGET_ARM)
938 /* Old gdb always expect FPA registers. Newer (xml-aware) gdb only expect
939 whatever the target description contains. Due to a historical mishap
940 the FPA registers appear in between core integer regs and the CPSR.
941 We hack round this by giving the FPA regs zero size when talking to a
943 #define NUM_CORE_REGS 26
944 #define GDB_CORE_XML "arm-core.xml"
946 static int cpu_gdb_read_register(CPUARMState
*env
, uint8_t *mem_buf
, int n
)
949 /* Core integer register. */
950 GET_REG32(env
->regs
[n
]);
956 memset(mem_buf
, 0, 12);
961 /* FPA status register. */
967 GET_REG32(cpsr_read(env
));
969 /* Unknown register. */
973 static int cpu_gdb_write_register(CPUARMState
*env
, uint8_t *mem_buf
, int n
)
977 tmp
= ldl_p(mem_buf
);
979 /* Mask out low bit of PC to workaround gdb bugs. This will probably
980 cause problems if we ever implement the Jazelle DBX extensions. */
985 /* Core integer register. */
989 if (n
< 24) { /* 16-23 */
990 /* FPA registers (ignored). */
997 /* FPA status register (ignored). */
1003 cpsr_write (env
, tmp
, 0xffffffff);
1006 /* Unknown register. */
1010 #elif defined (TARGET_M68K)
1012 #define NUM_CORE_REGS 18
1014 #define GDB_CORE_XML "cf-core.xml"
1016 static int cpu_gdb_read_register(CPUM68KState
*env
, uint8_t *mem_buf
, int n
)
1020 GET_REG32(env
->dregs
[n
]);
1021 } else if (n
< 16) {
1023 GET_REG32(env
->aregs
[n
- 8]);
1026 case 16: GET_REG32(env
->sr
);
1027 case 17: GET_REG32(env
->pc
);
1030 /* FP registers not included here because they vary between
1031 ColdFire and m68k. Use XML bits for these. */
1035 static int cpu_gdb_write_register(CPUM68KState
*env
, uint8_t *mem_buf
, int n
)
1039 tmp
= ldl_p(mem_buf
);
1043 env
->dregs
[n
] = tmp
;
1044 } else if (n
< 16) {
1046 env
->aregs
[n
- 8] = tmp
;
1049 case 16: env
->sr
= tmp
; break;
1050 case 17: env
->pc
= tmp
; break;
1056 #elif defined (TARGET_MIPS)
1058 #define NUM_CORE_REGS 73
1060 static int cpu_gdb_read_register(CPUMIPSState
*env
, uint8_t *mem_buf
, int n
)
1063 GET_REGL(env
->active_tc
.gpr
[n
]);
1065 if (env
->CP0_Config1
& (1 << CP0C1_FP
)) {
1066 if (n
>= 38 && n
< 70) {
1067 if (env
->CP0_Status
& (1 << CP0St_FR
))
1068 GET_REGL(env
->active_fpu
.fpr
[n
- 38].d
);
1070 GET_REGL(env
->active_fpu
.fpr
[n
- 38].w
[FP_ENDIAN_IDX
]);
1073 case 70: GET_REGL((int32_t)env
->active_fpu
.fcr31
);
1074 case 71: GET_REGL((int32_t)env
->active_fpu
.fcr0
);
1078 case 32: GET_REGL((int32_t)env
->CP0_Status
);
1079 case 33: GET_REGL(env
->active_tc
.LO
[0]);
1080 case 34: GET_REGL(env
->active_tc
.HI
[0]);
1081 case 35: GET_REGL(env
->CP0_BadVAddr
);
1082 case 36: GET_REGL((int32_t)env
->CP0_Cause
);
1083 case 37: GET_REGL(env
->active_tc
.PC
| !!(env
->hflags
& MIPS_HFLAG_M16
));
1084 case 72: GET_REGL(0); /* fp */
1085 case 89: GET_REGL((int32_t)env
->CP0_PRid
);
1087 if (n
>= 73 && n
<= 88) {
1088 /* 16 embedded regs. */
1095 /* convert MIPS rounding mode in FCR31 to IEEE library */
1096 static unsigned int ieee_rm
[] =
1098 float_round_nearest_even
,
1099 float_round_to_zero
,
1103 #define RESTORE_ROUNDING_MODE \
1104 set_float_rounding_mode(ieee_rm[env->active_fpu.fcr31 & 3], &env->active_fpu.fp_status)
1106 static int cpu_gdb_write_register(CPUMIPSState
*env
, uint8_t *mem_buf
, int n
)
1110 tmp
= ldtul_p(mem_buf
);
1113 env
->active_tc
.gpr
[n
] = tmp
;
1114 return sizeof(target_ulong
);
1116 if (env
->CP0_Config1
& (1 << CP0C1_FP
)
1117 && n
>= 38 && n
< 73) {
1119 if (env
->CP0_Status
& (1 << CP0St_FR
))
1120 env
->active_fpu
.fpr
[n
- 38].d
= tmp
;
1122 env
->active_fpu
.fpr
[n
- 38].w
[FP_ENDIAN_IDX
] = tmp
;
1126 env
->active_fpu
.fcr31
= tmp
& 0xFF83FFFF;
1127 /* set rounding mode */
1128 RESTORE_ROUNDING_MODE
;
1130 case 71: env
->active_fpu
.fcr0
= tmp
; break;
1132 return sizeof(target_ulong
);
1135 case 32: env
->CP0_Status
= tmp
; break;
1136 case 33: env
->active_tc
.LO
[0] = tmp
; break;
1137 case 34: env
->active_tc
.HI
[0] = tmp
; break;
1138 case 35: env
->CP0_BadVAddr
= tmp
; break;
1139 case 36: env
->CP0_Cause
= tmp
; break;
1141 env
->active_tc
.PC
= tmp
& ~(target_ulong
)1;
1143 env
->hflags
|= MIPS_HFLAG_M16
;
1145 env
->hflags
&= ~(MIPS_HFLAG_M16
);
1148 case 72: /* fp, ignored */ break;
1152 /* Other registers are readonly. Ignore writes. */
1156 return sizeof(target_ulong
);
1158 #elif defined(TARGET_OPENRISC)
1160 #define NUM_CORE_REGS (32 + 3)
1162 static int cpu_gdb_read_register(CPUOpenRISCState
*env
, uint8_t *mem_buf
, int n
)
1165 GET_REG32(env
->gpr
[n
]);
1169 GET_REG32(env
->ppc
);
1173 GET_REG32(env
->npc
);
1187 static int cpu_gdb_write_register(CPUOpenRISCState
*env
,
1188 uint8_t *mem_buf
, int n
)
1192 if (n
> NUM_CORE_REGS
) {
1196 tmp
= ldl_p(mem_buf
);
1220 #elif defined (TARGET_SH4)
1222 /* Hint: Use "set architecture sh4" in GDB to see fpu registers */
1223 /* FIXME: We should use XML for this. */
1225 #define NUM_CORE_REGS 59
1227 static int cpu_gdb_read_register(CPUSH4State
*env
, uint8_t *mem_buf
, int n
)
1231 if ((env
->sr
& (SR_MD
| SR_RB
)) == (SR_MD
| SR_RB
)) {
1232 GET_REGL(env
->gregs
[n
+ 16]);
1234 GET_REGL(env
->gregs
[n
]);
1237 GET_REGL(env
->gregs
[n
]);
1247 GET_REGL(env
->mach
);
1249 GET_REGL(env
->macl
);
1253 GET_REGL(env
->fpul
);
1255 GET_REGL(env
->fpscr
);
1257 if (env
->fpscr
& FPSCR_FR
) {
1258 stfl_p(mem_buf
, env
->fregs
[n
- 9]);
1260 stfl_p(mem_buf
, env
->fregs
[n
- 25]);
1268 GET_REGL(env
->gregs
[n
- 43]);
1270 GET_REGL(env
->gregs
[n
- (51 - 16)]);
1276 static int cpu_gdb_write_register(CPUSH4State
*env
, uint8_t *mem_buf
, int n
)
1280 if ((env
->sr
& (SR_MD
| SR_RB
)) == (SR_MD
| SR_RB
)) {
1281 env
->gregs
[n
+ 16] = ldl_p(mem_buf
);
1283 env
->gregs
[n
] = ldl_p(mem_buf
);
1287 env
->gregs
[n
] = ldl_p(mem_buf
);
1290 env
->pc
= ldl_p(mem_buf
);
1293 env
->pr
= ldl_p(mem_buf
);
1296 env
->gbr
= ldl_p(mem_buf
);
1299 env
->vbr
= ldl_p(mem_buf
);
1302 env
->mach
= ldl_p(mem_buf
);
1305 env
->macl
= ldl_p(mem_buf
);
1308 env
->sr
= ldl_p(mem_buf
);
1311 env
->fpul
= ldl_p(mem_buf
);
1314 env
->fpscr
= ldl_p(mem_buf
);
1317 if (env
->fpscr
& FPSCR_FR
) {
1318 env
->fregs
[n
- 9] = ldfl_p(mem_buf
);
1320 env
->fregs
[n
- 25] = ldfl_p(mem_buf
);
1324 env
->ssr
= ldl_p(mem_buf
);
1327 env
->spc
= ldl_p(mem_buf
);
1330 env
->gregs
[n
- 43] = ldl_p(mem_buf
);
1333 env
->gregs
[n
- (51 - 16)] = ldl_p(mem_buf
);
1340 #elif defined (TARGET_MICROBLAZE)
1342 #define NUM_CORE_REGS (32 + 5)
1344 static int cpu_gdb_read_register(CPUMBState
*env
, uint8_t *mem_buf
, int n
)
1347 GET_REG32(env
->regs
[n
]);
1349 GET_REG32(env
->sregs
[n
- 32]);
1354 static int cpu_gdb_write_register(CPUMBState
*env
, uint8_t *mem_buf
, int n
)
1358 if (n
> NUM_CORE_REGS
)
1361 tmp
= ldl_p(mem_buf
);
1366 env
->sregs
[n
- 32] = tmp
;
1370 #elif defined (TARGET_CRIS)
1372 #define NUM_CORE_REGS 49
1375 read_register_crisv10(CPUCRISState
*env
, uint8_t *mem_buf
, int n
)
1378 GET_REG32(env
->regs
[n
]);
1388 GET_REG8(env
->pregs
[n
- 16]);
1391 GET_REG8(env
->pregs
[n
- 16]);
1395 GET_REG16(env
->pregs
[n
- 16]);
1399 GET_REG32(env
->pregs
[n
- 16]);
1407 static int cpu_gdb_read_register(CPUCRISState
*env
, uint8_t *mem_buf
, int n
)
1411 if (env
->pregs
[PR_VR
] < 32)
1412 return read_register_crisv10(env
, mem_buf
, n
);
1414 srs
= env
->pregs
[PR_SRS
];
1416 GET_REG32(env
->regs
[n
]);
1419 if (n
>= 21 && n
< 32) {
1420 GET_REG32(env
->pregs
[n
- 16]);
1422 if (n
>= 33 && n
< 49) {
1423 GET_REG32(env
->sregs
[srs
][n
- 33]);
1426 case 16: GET_REG8(env
->pregs
[0]);
1427 case 17: GET_REG8(env
->pregs
[1]);
1428 case 18: GET_REG32(env
->pregs
[2]);
1429 case 19: GET_REG8(srs
);
1430 case 20: GET_REG16(env
->pregs
[4]);
1431 case 32: GET_REG32(env
->pc
);
1437 static int cpu_gdb_write_register(CPUCRISState
*env
, uint8_t *mem_buf
, int n
)
1444 tmp
= ldl_p(mem_buf
);
1450 if (n
>= 21 && n
< 32) {
1451 env
->pregs
[n
- 16] = tmp
;
1454 /* FIXME: Should support function regs be writable? */
1458 case 18: env
->pregs
[PR_PID
] = tmp
; break;
1461 case 32: env
->pc
= tmp
; break;
1466 #elif defined (TARGET_ALPHA)
1468 #define NUM_CORE_REGS 67
1470 static int cpu_gdb_read_register(CPUAlphaState
*env
, uint8_t *mem_buf
, int n
)
1480 d
.d
= env
->fir
[n
- 32];
1484 val
= cpu_alpha_load_fpcr(env
);
1494 /* 31 really is the zero register; 65 is unassigned in the
1495 gdb protocol, but is still required to occupy 8 bytes. */
1504 static int cpu_gdb_write_register(CPUAlphaState
*env
, uint8_t *mem_buf
, int n
)
1506 target_ulong tmp
= ldtul_p(mem_buf
);
1515 env
->fir
[n
- 32] = d
.d
;
1518 cpu_alpha_store_fpcr(env
, tmp
);
1528 /* 31 really is the zero register; 65 is unassigned in the
1529 gdb protocol, but is still required to occupy 8 bytes. */
1536 #elif defined (TARGET_S390X)
1538 #define NUM_CORE_REGS S390_NUM_TOTAL_REGS
1540 static int cpu_gdb_read_register(CPUS390XState
*env
, uint8_t *mem_buf
, int n
)
1543 case S390_PSWM_REGNUM
: GET_REGL(env
->psw
.mask
); break;
1544 case S390_PSWA_REGNUM
: GET_REGL(env
->psw
.addr
); break;
1545 case S390_R0_REGNUM
... S390_R15_REGNUM
:
1546 GET_REGL(env
->regs
[n
-S390_R0_REGNUM
]); break;
1547 case S390_A0_REGNUM
... S390_A15_REGNUM
:
1548 GET_REG32(env
->aregs
[n
-S390_A0_REGNUM
]); break;
1549 case S390_FPC_REGNUM
: GET_REG32(env
->fpc
); break;
1550 case S390_F0_REGNUM
... S390_F15_REGNUM
:
1553 case S390_PC_REGNUM
: GET_REGL(env
->psw
.addr
); break;
1554 case S390_CC_REGNUM
:
1555 env
->cc_op
= calc_cc(env
, env
->cc_op
, env
->cc_src
, env
->cc_dst
,
1557 GET_REG32(env
->cc_op
);
1564 static int cpu_gdb_write_register(CPUS390XState
*env
, uint8_t *mem_buf
, int n
)
1569 tmpl
= ldtul_p(mem_buf
);
1570 tmp32
= ldl_p(mem_buf
);
1573 case S390_PSWM_REGNUM
: env
->psw
.mask
= tmpl
; break;
1574 case S390_PSWA_REGNUM
: env
->psw
.addr
= tmpl
; break;
1575 case S390_R0_REGNUM
... S390_R15_REGNUM
:
1576 env
->regs
[n
-S390_R0_REGNUM
] = tmpl
; break;
1577 case S390_A0_REGNUM
... S390_A15_REGNUM
:
1578 env
->aregs
[n
-S390_A0_REGNUM
] = tmp32
; r
=4; break;
1579 case S390_FPC_REGNUM
: env
->fpc
= tmp32
; r
=4; break;
1580 case S390_F0_REGNUM
... S390_F15_REGNUM
:
1583 case S390_PC_REGNUM
: env
->psw
.addr
= tmpl
; break;
1584 case S390_CC_REGNUM
: env
->cc_op
= tmp32
; r
=4; break;
1589 #elif defined (TARGET_LM32)
1591 #include "hw/lm32_pic.h"
1592 #define NUM_CORE_REGS (32 + 7)
1594 static int cpu_gdb_read_register(CPULM32State
*env
, uint8_t *mem_buf
, int n
)
1597 GET_REG32(env
->regs
[n
]);
1603 /* FIXME: put in right exception ID */
1608 GET_REG32(env
->eba
);
1611 GET_REG32(env
->deba
);
1617 GET_REG32(lm32_pic_get_im(env
->pic_state
));
1620 GET_REG32(lm32_pic_get_ip(env
->pic_state
));
1627 static int cpu_gdb_write_register(CPULM32State
*env
, uint8_t *mem_buf
, int n
)
1631 if (n
> NUM_CORE_REGS
) {
1635 tmp
= ldl_p(mem_buf
);
1654 lm32_pic_set_im(env
->pic_state
, tmp
);
1657 lm32_pic_set_ip(env
->pic_state
, tmp
);
1663 #elif defined(TARGET_XTENSA)
1665 /* Use num_core_regs to see only non-privileged registers in an unmodified gdb.
1666 * Use num_regs to see all registers. gdb modification is required for that:
1667 * reset bit 0 in the 'flags' field of the registers definitions in the
1668 * gdb/xtensa-config.c inside gdb source tree or inside gdb overlay.
1670 #define NUM_CORE_REGS (env->config->gdb_regmap.num_regs)
1671 #define num_g_regs NUM_CORE_REGS
1673 static int cpu_gdb_read_register(CPUXtensaState
*env
, uint8_t *mem_buf
, int n
)
1675 const XtensaGdbReg
*reg
= env
->config
->gdb_regmap
.reg
+ n
;
1677 if (n
< 0 || n
>= env
->config
->gdb_regmap
.num_regs
) {
1681 switch (reg
->type
) {
1687 xtensa_sync_phys_from_window(env
);
1688 GET_REG32(env
->phys_regs
[(reg
->targno
& 0xff) % env
->config
->nareg
]);
1692 GET_REG32(env
->sregs
[reg
->targno
& 0xff]);
1696 GET_REG32(env
->uregs
[reg
->targno
& 0xff]);
1700 GET_REG32(env
->regs
[reg
->targno
& 0x0f]);
1704 qemu_log("%s from reg %d of unsupported type %d\n",
1705 __func__
, n
, reg
->type
);
1710 static int cpu_gdb_write_register(CPUXtensaState
*env
, uint8_t *mem_buf
, int n
)
1713 const XtensaGdbReg
*reg
= env
->config
->gdb_regmap
.reg
+ n
;
1715 if (n
< 0 || n
>= env
->config
->gdb_regmap
.num_regs
) {
1719 tmp
= ldl_p(mem_buf
);
1721 switch (reg
->type
) {
1727 env
->phys_regs
[(reg
->targno
& 0xff) % env
->config
->nareg
] = tmp
;
1728 xtensa_sync_window_from_phys(env
);
1732 env
->sregs
[reg
->targno
& 0xff] = tmp
;
1736 env
->uregs
[reg
->targno
& 0xff] = tmp
;
1740 env
->regs
[reg
->targno
& 0x0f] = tmp
;
1744 qemu_log("%s to reg %d of unsupported type %d\n",
1745 __func__
, n
, reg
->type
);
1753 #define NUM_CORE_REGS 0
1755 static int cpu_gdb_read_register(CPUArchState
*env
, uint8_t *mem_buf
, int n
)
1760 static int cpu_gdb_write_register(CPUArchState
*env
, uint8_t *mem_buf
, int n
)
1767 #if !defined(TARGET_XTENSA)
1768 static int num_g_regs
= NUM_CORE_REGS
;
1772 /* Encode data using the encoding for 'x' packets. */
1773 static int memtox(char *buf
, const char *mem
, int len
)
1781 case '#': case '$': case '*': case '}':
1793 static const char *get_feature_xml(const char *p
, const char **newp
)
1798 static char target_xml
[1024];
1801 while (p
[len
] && p
[len
] != ':')
1806 if (strncmp(p
, "target.xml", len
) == 0) {
1807 /* Generate the XML description for this CPU. */
1808 if (!target_xml
[0]) {
1809 GDBRegisterState
*r
;
1811 snprintf(target_xml
, sizeof(target_xml
),
1812 "<?xml version=\"1.0\"?>"
1813 "<!DOCTYPE target SYSTEM \"gdb-target.dtd\">"
1815 "<xi:include href=\"%s\"/>",
1818 for (r
= first_cpu
->gdb_regs
; r
; r
= r
->next
) {
1819 pstrcat(target_xml
, sizeof(target_xml
), "<xi:include href=\"");
1820 pstrcat(target_xml
, sizeof(target_xml
), r
->xml
);
1821 pstrcat(target_xml
, sizeof(target_xml
), "\"/>");
1823 pstrcat(target_xml
, sizeof(target_xml
), "</target>");
1827 for (i
= 0; ; i
++) {
1828 name
= xml_builtin
[i
][0];
1829 if (!name
|| (strncmp(name
, p
, len
) == 0 && strlen(name
) == len
))
1832 return name
? xml_builtin
[i
][1] : NULL
;
1836 static int gdb_read_register(CPUArchState
*env
, uint8_t *mem_buf
, int reg
)
1838 GDBRegisterState
*r
;
1840 if (reg
< NUM_CORE_REGS
)
1841 return cpu_gdb_read_register(env
, mem_buf
, reg
);
1843 for (r
= env
->gdb_regs
; r
; r
= r
->next
) {
1844 if (r
->base_reg
<= reg
&& reg
< r
->base_reg
+ r
->num_regs
) {
1845 return r
->get_reg(env
, mem_buf
, reg
- r
->base_reg
);
1851 static int gdb_write_register(CPUArchState
*env
, uint8_t *mem_buf
, int reg
)
1853 GDBRegisterState
*r
;
1855 if (reg
< NUM_CORE_REGS
)
1856 return cpu_gdb_write_register(env
, mem_buf
, reg
);
1858 for (r
= env
->gdb_regs
; r
; r
= r
->next
) {
1859 if (r
->base_reg
<= reg
&& reg
< r
->base_reg
+ r
->num_regs
) {
1860 return r
->set_reg(env
, mem_buf
, reg
- r
->base_reg
);
1866 #if !defined(TARGET_XTENSA)
1867 /* Register a supplemental set of CPU registers. If g_pos is nonzero it
1868 specifies the first register number and these registers are included in
1869 a standard "g" packet. Direction is relative to gdb, i.e. get_reg is
1870 gdb reading a CPU register, and set_reg is gdb modifying a CPU register.
1873 void gdb_register_coprocessor(CPUArchState
* env
,
1874 gdb_reg_cb get_reg
, gdb_reg_cb set_reg
,
1875 int num_regs
, const char *xml
, int g_pos
)
1877 GDBRegisterState
*s
;
1878 GDBRegisterState
**p
;
1879 static int last_reg
= NUM_CORE_REGS
;
1883 /* Check for duplicates. */
1884 if (strcmp((*p
)->xml
, xml
) == 0)
1889 s
= g_new0(GDBRegisterState
, 1);
1890 s
->base_reg
= last_reg
;
1891 s
->num_regs
= num_regs
;
1892 s
->get_reg
= get_reg
;
1893 s
->set_reg
= set_reg
;
1896 /* Add to end of list. */
1897 last_reg
+= num_regs
;
1900 if (g_pos
!= s
->base_reg
) {
1901 fprintf(stderr
, "Error: Bad gdb register numbering for '%s'\n"
1902 "Expected %d got %d\n", xml
, g_pos
, s
->base_reg
);
1904 num_g_regs
= last_reg
;
1910 #ifndef CONFIG_USER_ONLY
1911 static const int xlat_gdb_type
[] = {
1912 [GDB_WATCHPOINT_WRITE
] = BP_GDB
| BP_MEM_WRITE
,
1913 [GDB_WATCHPOINT_READ
] = BP_GDB
| BP_MEM_READ
,
1914 [GDB_WATCHPOINT_ACCESS
] = BP_GDB
| BP_MEM_ACCESS
,
1918 static int gdb_breakpoint_insert(target_ulong addr
, target_ulong len
, int type
)
1924 return kvm_insert_breakpoint(gdbserver_state
->c_cpu
, addr
, len
, type
);
1927 case GDB_BREAKPOINT_SW
:
1928 case GDB_BREAKPOINT_HW
:
1929 for (env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
1930 err
= cpu_breakpoint_insert(env
, addr
, BP_GDB
, NULL
);
1935 #ifndef CONFIG_USER_ONLY
1936 case GDB_WATCHPOINT_WRITE
:
1937 case GDB_WATCHPOINT_READ
:
1938 case GDB_WATCHPOINT_ACCESS
:
1939 for (env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
1940 err
= cpu_watchpoint_insert(env
, addr
, len
, xlat_gdb_type
[type
],
1952 static int gdb_breakpoint_remove(target_ulong addr
, target_ulong len
, int type
)
1958 return kvm_remove_breakpoint(gdbserver_state
->c_cpu
, addr
, len
, type
);
1961 case GDB_BREAKPOINT_SW
:
1962 case GDB_BREAKPOINT_HW
:
1963 for (env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
1964 err
= cpu_breakpoint_remove(env
, addr
, BP_GDB
);
1969 #ifndef CONFIG_USER_ONLY
1970 case GDB_WATCHPOINT_WRITE
:
1971 case GDB_WATCHPOINT_READ
:
1972 case GDB_WATCHPOINT_ACCESS
:
1973 for (env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
1974 err
= cpu_watchpoint_remove(env
, addr
, len
, xlat_gdb_type
[type
]);
1985 static void gdb_breakpoint_remove_all(void)
1989 if (kvm_enabled()) {
1990 kvm_remove_all_breakpoints(gdbserver_state
->c_cpu
);
1994 for (env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
1995 cpu_breakpoint_remove_all(env
, BP_GDB
);
1996 #ifndef CONFIG_USER_ONLY
1997 cpu_watchpoint_remove_all(env
, BP_GDB
);
2002 static void gdb_set_cpu_pc(GDBState
*s
, target_ulong pc
)
2004 cpu_synchronize_state(s
->c_cpu
);
2005 #if defined(TARGET_I386)
2007 #elif defined (TARGET_PPC)
2009 #elif defined (TARGET_SPARC)
2011 s
->c_cpu
->npc
= pc
+ 4;
2012 #elif defined (TARGET_ARM)
2013 s
->c_cpu
->regs
[15] = pc
;
2014 #elif defined (TARGET_SH4)
2016 #elif defined (TARGET_MIPS)
2017 s
->c_cpu
->active_tc
.PC
= pc
& ~(target_ulong
)1;
2019 s
->c_cpu
->hflags
|= MIPS_HFLAG_M16
;
2021 s
->c_cpu
->hflags
&= ~(MIPS_HFLAG_M16
);
2023 #elif defined (TARGET_MICROBLAZE)
2024 s
->c_cpu
->sregs
[SR_PC
] = pc
;
2025 #elif defined(TARGET_OPENRISC)
2027 #elif defined (TARGET_CRIS)
2029 #elif defined (TARGET_ALPHA)
2031 #elif defined (TARGET_S390X)
2032 s
->c_cpu
->psw
.addr
= pc
;
2033 #elif defined (TARGET_LM32)
2035 #elif defined(TARGET_XTENSA)
2040 static CPUArchState
*find_cpu(uint32_t thread_id
)
2044 for (env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
2045 if (cpu_index(env
) == thread_id
) {
2053 static int gdb_handle_packet(GDBState
*s
, const char *line_buf
)
2058 int ch
, reg_size
, type
, res
;
2059 char buf
[MAX_PACKET_LENGTH
];
2060 uint8_t mem_buf
[MAX_PACKET_LENGTH
];
2062 target_ulong addr
, len
;
2065 printf("command='%s'\n", line_buf
);
2071 /* TODO: Make this return the correct value for user-mode. */
2072 snprintf(buf
, sizeof(buf
), "T%02xthread:%02x;", GDB_SIGNAL_TRAP
,
2073 cpu_index(s
->c_cpu
));
2075 /* Remove all the breakpoints when this query is issued,
2076 * because gdb is doing and initial connect and the state
2077 * should be cleaned up.
2079 gdb_breakpoint_remove_all();
2083 addr
= strtoull(p
, (char **)&p
, 16);
2084 gdb_set_cpu_pc(s
, addr
);
2090 s
->signal
= gdb_signal_to_target (strtoul(p
, (char **)&p
, 16));
2091 if (s
->signal
== -1)
2096 if (strncmp(p
, "Cont", 4) == 0) {
2097 int res_signal
, res_thread
;
2101 put_packet(s
, "vCont;c;C;s;S");
2116 if (action
== 'C' || action
== 'S') {
2117 signal
= strtoul(p
, (char **)&p
, 16);
2118 } else if (action
!= 'c' && action
!= 's') {
2124 thread
= strtoull(p
+1, (char **)&p
, 16);
2126 action
= tolower(action
);
2127 if (res
== 0 || (res
== 'c' && action
== 's')) {
2129 res_signal
= signal
;
2130 res_thread
= thread
;
2134 if (res_thread
!= -1 && res_thread
!= 0) {
2135 env
= find_cpu(res_thread
);
2137 put_packet(s
, "E22");
2143 cpu_single_step(s
->c_cpu
, sstep_flags
);
2145 s
->signal
= res_signal
;
2151 goto unknown_command
;
2154 #ifdef CONFIG_USER_ONLY
2155 /* Kill the target */
2156 fprintf(stderr
, "\nQEMU: Terminated via GDBstub\n");
2161 gdb_breakpoint_remove_all();
2162 gdb_syscall_mode
= GDB_SYS_DISABLED
;
2164 put_packet(s
, "OK");
2168 addr
= strtoull(p
, (char **)&p
, 16);
2169 gdb_set_cpu_pc(s
, addr
);
2171 cpu_single_step(s
->c_cpu
, sstep_flags
);
2179 ret
= strtoull(p
, (char **)&p
, 16);
2182 err
= strtoull(p
, (char **)&p
, 16);
2189 if (s
->current_syscall_cb
) {
2190 s
->current_syscall_cb(s
->c_cpu
, ret
, err
);
2191 s
->current_syscall_cb
= NULL
;
2194 put_packet(s
, "T02");
2201 cpu_synchronize_state(s
->g_cpu
);
2204 for (addr
= 0; addr
< num_g_regs
; addr
++) {
2205 reg_size
= gdb_read_register(s
->g_cpu
, mem_buf
+ len
, addr
);
2208 memtohex(buf
, mem_buf
, len
);
2212 cpu_synchronize_state(s
->g_cpu
);
2214 registers
= mem_buf
;
2215 len
= strlen(p
) / 2;
2216 hextomem((uint8_t *)registers
, p
, len
);
2217 for (addr
= 0; addr
< num_g_regs
&& len
> 0; addr
++) {
2218 reg_size
= gdb_write_register(s
->g_cpu
, registers
, addr
);
2220 registers
+= reg_size
;
2222 put_packet(s
, "OK");
2225 addr
= strtoull(p
, (char **)&p
, 16);
2228 len
= strtoull(p
, NULL
, 16);
2229 if (target_memory_rw_debug(s
->g_cpu
, addr
, mem_buf
, len
, 0) != 0) {
2230 put_packet (s
, "E14");
2232 memtohex(buf
, mem_buf
, len
);
2237 addr
= strtoull(p
, (char **)&p
, 16);
2240 len
= strtoull(p
, (char **)&p
, 16);
2243 hextomem(mem_buf
, p
, len
);
2244 if (target_memory_rw_debug(s
->g_cpu
, addr
, mem_buf
, len
, 1) != 0) {
2245 put_packet(s
, "E14");
2247 put_packet(s
, "OK");
2251 /* Older gdb are really dumb, and don't use 'g' if 'p' is avaialable.
2252 This works, but can be very slow. Anything new enough to
2253 understand XML also knows how to use this properly. */
2255 goto unknown_command
;
2256 addr
= strtoull(p
, (char **)&p
, 16);
2257 reg_size
= gdb_read_register(s
->g_cpu
, mem_buf
, addr
);
2259 memtohex(buf
, mem_buf
, reg_size
);
2262 put_packet(s
, "E14");
2267 goto unknown_command
;
2268 addr
= strtoull(p
, (char **)&p
, 16);
2271 reg_size
= strlen(p
) / 2;
2272 hextomem(mem_buf
, p
, reg_size
);
2273 gdb_write_register(s
->g_cpu
, mem_buf
, addr
);
2274 put_packet(s
, "OK");
2278 type
= strtoul(p
, (char **)&p
, 16);
2281 addr
= strtoull(p
, (char **)&p
, 16);
2284 len
= strtoull(p
, (char **)&p
, 16);
2286 res
= gdb_breakpoint_insert(addr
, len
, type
);
2288 res
= gdb_breakpoint_remove(addr
, len
, type
);
2290 put_packet(s
, "OK");
2291 else if (res
== -ENOSYS
)
2294 put_packet(s
, "E22");
2298 thread
= strtoull(p
, (char **)&p
, 16);
2299 if (thread
== -1 || thread
== 0) {
2300 put_packet(s
, "OK");
2303 env
= find_cpu(thread
);
2305 put_packet(s
, "E22");
2311 put_packet(s
, "OK");
2315 put_packet(s
, "OK");
2318 put_packet(s
, "E22");
2323 thread
= strtoull(p
, (char **)&p
, 16);
2324 env
= find_cpu(thread
);
2327 put_packet(s
, "OK");
2329 put_packet(s
, "E22");
2334 /* parse any 'q' packets here */
2335 if (!strcmp(p
,"qemu.sstepbits")) {
2336 /* Query Breakpoint bit definitions */
2337 snprintf(buf
, sizeof(buf
), "ENABLE=%x,NOIRQ=%x,NOTIMER=%x",
2343 } else if (strncmp(p
,"qemu.sstep",10) == 0) {
2344 /* Display or change the sstep_flags */
2347 /* Display current setting */
2348 snprintf(buf
, sizeof(buf
), "0x%x", sstep_flags
);
2353 type
= strtoul(p
, (char **)&p
, 16);
2355 put_packet(s
, "OK");
2357 } else if (strcmp(p
,"C") == 0) {
2358 /* "Current thread" remains vague in the spec, so always return
2359 * the first CPU (gdb returns the first thread). */
2360 put_packet(s
, "QC1");
2362 } else if (strcmp(p
,"fThreadInfo") == 0) {
2363 s
->query_cpu
= first_cpu
;
2364 goto report_cpuinfo
;
2365 } else if (strcmp(p
,"sThreadInfo") == 0) {
2368 snprintf(buf
, sizeof(buf
), "m%x", cpu_index(s
->query_cpu
));
2370 s
->query_cpu
= s
->query_cpu
->next_cpu
;
2374 } else if (strncmp(p
,"ThreadExtraInfo,", 16) == 0) {
2375 thread
= strtoull(p
+16, (char **)&p
, 16);
2376 env
= find_cpu(thread
);
2378 cpu_synchronize_state(env
);
2379 len
= snprintf((char *)mem_buf
, sizeof(mem_buf
),
2380 "CPU#%d [%s]", env
->cpu_index
,
2381 env
->halted
? "halted " : "running");
2382 memtohex(buf
, mem_buf
, len
);
2387 #ifdef CONFIG_USER_ONLY
2388 else if (strncmp(p
, "Offsets", 7) == 0) {
2389 TaskState
*ts
= s
->c_cpu
->opaque
;
2391 snprintf(buf
, sizeof(buf
),
2392 "Text=" TARGET_ABI_FMT_lx
";Data=" TARGET_ABI_FMT_lx
2393 ";Bss=" TARGET_ABI_FMT_lx
,
2394 ts
->info
->code_offset
,
2395 ts
->info
->data_offset
,
2396 ts
->info
->data_offset
);
2400 #else /* !CONFIG_USER_ONLY */
2401 else if (strncmp(p
, "Rcmd,", 5) == 0) {
2402 int len
= strlen(p
+ 5);
2404 if ((len
% 2) != 0) {
2405 put_packet(s
, "E01");
2408 hextomem(mem_buf
, p
+ 5, len
);
2411 qemu_chr_be_write(s
->mon_chr
, mem_buf
, len
);
2412 put_packet(s
, "OK");
2415 #endif /* !CONFIG_USER_ONLY */
2416 if (strncmp(p
, "Supported", 9) == 0) {
2417 snprintf(buf
, sizeof(buf
), "PacketSize=%x", MAX_PACKET_LENGTH
);
2419 pstrcat(buf
, sizeof(buf
), ";qXfer:features:read+");
2425 if (strncmp(p
, "Xfer:features:read:", 19) == 0) {
2427 target_ulong total_len
;
2431 xml
= get_feature_xml(p
, &p
);
2433 snprintf(buf
, sizeof(buf
), "E00");
2440 addr
= strtoul(p
, (char **)&p
, 16);
2443 len
= strtoul(p
, (char **)&p
, 16);
2445 total_len
= strlen(xml
);
2446 if (addr
> total_len
) {
2447 snprintf(buf
, sizeof(buf
), "E00");
2451 if (len
> (MAX_PACKET_LENGTH
- 5) / 2)
2452 len
= (MAX_PACKET_LENGTH
- 5) / 2;
2453 if (len
< total_len
- addr
) {
2455 len
= memtox(buf
+ 1, xml
+ addr
, len
);
2458 len
= memtox(buf
+ 1, xml
+ addr
, total_len
- addr
);
2460 put_packet_binary(s
, buf
, len
+ 1);
2464 /* Unrecognised 'q' command. */
2465 goto unknown_command
;
2469 /* put empty packet */
2477 void gdb_set_stop_cpu(CPUArchState
*env
)
2479 gdbserver_state
->c_cpu
= env
;
2480 gdbserver_state
->g_cpu
= env
;
2483 #ifndef CONFIG_USER_ONLY
2484 static void gdb_vm_state_change(void *opaque
, int running
, RunState state
)
2486 GDBState
*s
= gdbserver_state
;
2487 CPUArchState
*env
= s
->c_cpu
;
2492 if (running
|| s
->state
== RS_INACTIVE
) {
2495 /* Is there a GDB syscall waiting to be sent? */
2496 if (s
->current_syscall_cb
) {
2497 put_packet(s
, s
->syscall_buf
);
2501 case RUN_STATE_DEBUG
:
2502 if (env
->watchpoint_hit
) {
2503 switch (env
->watchpoint_hit
->flags
& BP_MEM_ACCESS
) {
2514 snprintf(buf
, sizeof(buf
),
2515 "T%02xthread:%02x;%swatch:" TARGET_FMT_lx
";",
2516 GDB_SIGNAL_TRAP
, cpu_index(env
), type
,
2517 env
->watchpoint_hit
->vaddr
);
2518 env
->watchpoint_hit
= NULL
;
2522 ret
= GDB_SIGNAL_TRAP
;
2524 case RUN_STATE_PAUSED
:
2525 ret
= GDB_SIGNAL_INT
;
2527 case RUN_STATE_SHUTDOWN
:
2528 ret
= GDB_SIGNAL_QUIT
;
2530 case RUN_STATE_IO_ERROR
:
2531 ret
= GDB_SIGNAL_IO
;
2533 case RUN_STATE_WATCHDOG
:
2534 ret
= GDB_SIGNAL_ALRM
;
2536 case RUN_STATE_INTERNAL_ERROR
:
2537 ret
= GDB_SIGNAL_ABRT
;
2539 case RUN_STATE_SAVE_VM
:
2540 case RUN_STATE_RESTORE_VM
:
2542 case RUN_STATE_FINISH_MIGRATE
:
2543 ret
= GDB_SIGNAL_XCPU
;
2546 ret
= GDB_SIGNAL_UNKNOWN
;
2549 snprintf(buf
, sizeof(buf
), "T%02xthread:%02x;", ret
, cpu_index(env
));
2554 /* disable single step if it was enabled */
2555 cpu_single_step(env
, 0);
2559 /* Send a gdb syscall request.
2560 This accepts limited printf-style format specifiers, specifically:
2561 %x - target_ulong argument printed in hex.
2562 %lx - 64-bit argument printed in hex.
2563 %s - string pointer (target_ulong) and length (int) pair. */
2564 void gdb_do_syscall(gdb_syscall_complete_cb cb
, const char *fmt
, ...)
2573 s
= gdbserver_state
;
2576 s
->current_syscall_cb
= cb
;
2577 #ifndef CONFIG_USER_ONLY
2578 vm_stop(RUN_STATE_DEBUG
);
2582 p_end
= &s
->syscall_buf
[sizeof(s
->syscall_buf
)];
2589 addr
= va_arg(va
, target_ulong
);
2590 p
+= snprintf(p
, p_end
- p
, TARGET_FMT_lx
, addr
);
2593 if (*(fmt
++) != 'x')
2595 i64
= va_arg(va
, uint64_t);
2596 p
+= snprintf(p
, p_end
- p
, "%" PRIx64
, i64
);
2599 addr
= va_arg(va
, target_ulong
);
2600 p
+= snprintf(p
, p_end
- p
, TARGET_FMT_lx
"/%x",
2601 addr
, va_arg(va
, int));
2605 fprintf(stderr
, "gdbstub: Bad syscall format string '%s'\n",
2615 #ifdef CONFIG_USER_ONLY
2616 put_packet(s
, s
->syscall_buf
);
2617 gdb_handlesig(s
->c_cpu
, 0);
2619 /* In this case wait to send the syscall packet until notification that
2620 the CPU has stopped. This must be done because if the packet is sent
2621 now the reply from the syscall request could be received while the CPU
2622 is still in the running state, which can cause packets to be dropped
2623 and state transition 'T' packets to be sent while the syscall is still
2629 static void gdb_read_byte(GDBState
*s
, int ch
)
2634 #ifndef CONFIG_USER_ONLY
2635 if (s
->last_packet_len
) {
2636 /* Waiting for a response to the last packet. If we see the start
2637 of a new command then abandon the previous response. */
2640 printf("Got NACK, retransmitting\n");
2642 put_buffer(s
, (uint8_t *)s
->last_packet
, s
->last_packet_len
);
2646 printf("Got ACK\n");
2648 printf("Got '%c' when expecting ACK/NACK\n", ch
);
2650 if (ch
== '+' || ch
== '$')
2651 s
->last_packet_len
= 0;
2655 if (runstate_is_running()) {
2656 /* when the CPU is running, we cannot do anything except stop
2657 it when receiving a char */
2658 vm_stop(RUN_STATE_PAUSED
);
2665 s
->line_buf_index
= 0;
2666 s
->state
= RS_GETLINE
;
2671 s
->state
= RS_CHKSUM1
;
2672 } else if (s
->line_buf_index
>= sizeof(s
->line_buf
) - 1) {
2675 s
->line_buf
[s
->line_buf_index
++] = ch
;
2679 s
->line_buf
[s
->line_buf_index
] = '\0';
2680 s
->line_csum
= fromhex(ch
) << 4;
2681 s
->state
= RS_CHKSUM2
;
2684 s
->line_csum
|= fromhex(ch
);
2686 for(i
= 0; i
< s
->line_buf_index
; i
++) {
2687 csum
+= s
->line_buf
[i
];
2689 if (s
->line_csum
!= (csum
& 0xff)) {
2691 put_buffer(s
, &reply
, 1);
2695 put_buffer(s
, &reply
, 1);
2696 s
->state
= gdb_handle_packet(s
, s
->line_buf
);
2705 /* Tell the remote gdb that the process has exited. */
2706 void gdb_exit(CPUArchState
*env
, int code
)
2711 s
= gdbserver_state
;
2715 #ifdef CONFIG_USER_ONLY
2716 if (gdbserver_fd
< 0 || s
->fd
< 0) {
2721 snprintf(buf
, sizeof(buf
), "W%02x", (uint8_t)code
);
2724 #ifndef CONFIG_USER_ONLY
2726 qemu_chr_delete(s
->chr
);
2731 #ifdef CONFIG_USER_ONLY
2737 s
= gdbserver_state
;
2739 if (gdbserver_fd
< 0 || s
->fd
< 0)
2746 gdb_handlesig (CPUArchState
*env
, int sig
)
2752 s
= gdbserver_state
;
2753 if (gdbserver_fd
< 0 || s
->fd
< 0)
2756 /* disable single step if it was enabled */
2757 cpu_single_step(env
, 0);
2762 snprintf(buf
, sizeof(buf
), "S%02x", target_signal_to_gdb (sig
));
2765 /* put_packet() might have detected that the peer terminated the
2772 s
->running_state
= 0;
2773 while (s
->running_state
== 0) {
2774 n
= read (s
->fd
, buf
, 256);
2779 for (i
= 0; i
< n
; i
++)
2780 gdb_read_byte (s
, buf
[i
]);
2782 else if (n
== 0 || errno
!= EAGAIN
)
2784 /* XXX: Connection closed. Should probably wait for another
2785 connection before continuing. */
2794 /* Tell the remote gdb that the process has exited due to SIG. */
2795 void gdb_signalled(CPUArchState
*env
, int sig
)
2800 s
= gdbserver_state
;
2801 if (gdbserver_fd
< 0 || s
->fd
< 0)
2804 snprintf(buf
, sizeof(buf
), "X%02x", target_signal_to_gdb (sig
));
2808 static void gdb_accept(void)
2811 struct sockaddr_in sockaddr
;
2816 len
= sizeof(sockaddr
);
2817 fd
= accept(gdbserver_fd
, (struct sockaddr
*)&sockaddr
, &len
);
2818 if (fd
< 0 && errno
!= EINTR
) {
2821 } else if (fd
>= 0) {
2823 fcntl(fd
, F_SETFD
, FD_CLOEXEC
);
2829 /* set short latency */
2831 setsockopt(fd
, IPPROTO_TCP
, TCP_NODELAY
, (char *)&val
, sizeof(val
));
2833 s
= g_malloc0(sizeof(GDBState
));
2834 s
->c_cpu
= first_cpu
;
2835 s
->g_cpu
= first_cpu
;
2839 gdbserver_state
= s
;
2841 fcntl(fd
, F_SETFL
, O_NONBLOCK
);
2844 static int gdbserver_open(int port
)
2846 struct sockaddr_in sockaddr
;
2849 fd
= socket(PF_INET
, SOCK_STREAM
, 0);
2855 fcntl(fd
, F_SETFD
, FD_CLOEXEC
);
2858 /* allow fast reuse */
2860 setsockopt(fd
, SOL_SOCKET
, SO_REUSEADDR
, (char *)&val
, sizeof(val
));
2862 sockaddr
.sin_family
= AF_INET
;
2863 sockaddr
.sin_port
= htons(port
);
2864 sockaddr
.sin_addr
.s_addr
= 0;
2865 ret
= bind(fd
, (struct sockaddr
*)&sockaddr
, sizeof(sockaddr
));
2871 ret
= listen(fd
, 0);
2880 int gdbserver_start(int port
)
2882 gdbserver_fd
= gdbserver_open(port
);
2883 if (gdbserver_fd
< 0)
2885 /* accept connections */
2890 /* Disable gdb stub for child processes. */
2891 void gdbserver_fork(CPUArchState
*env
)
2893 GDBState
*s
= gdbserver_state
;
2894 if (gdbserver_fd
< 0 || s
->fd
< 0)
2898 cpu_breakpoint_remove_all(env
, BP_GDB
);
2899 cpu_watchpoint_remove_all(env
, BP_GDB
);
2902 static int gdb_chr_can_receive(void *opaque
)
2904 /* We can handle an arbitrarily large amount of data.
2905 Pick the maximum packet size, which is as good as anything. */
2906 return MAX_PACKET_LENGTH
;
2909 static void gdb_chr_receive(void *opaque
, const uint8_t *buf
, int size
)
2913 for (i
= 0; i
< size
; i
++) {
2914 gdb_read_byte(gdbserver_state
, buf
[i
]);
2918 static void gdb_chr_event(void *opaque
, int event
)
2921 case CHR_EVENT_OPENED
:
2922 vm_stop(RUN_STATE_PAUSED
);
2930 static void gdb_monitor_output(GDBState
*s
, const char *msg
, int len
)
2932 char buf
[MAX_PACKET_LENGTH
];
2935 if (len
> (MAX_PACKET_LENGTH
/2) - 1)
2936 len
= (MAX_PACKET_LENGTH
/2) - 1;
2937 memtohex(buf
+ 1, (uint8_t *)msg
, len
);
2941 static int gdb_monitor_write(CharDriverState
*chr
, const uint8_t *buf
, int len
)
2943 const char *p
= (const char *)buf
;
2946 max_sz
= (sizeof(gdbserver_state
->last_packet
) - 2) / 2;
2948 if (len
<= max_sz
) {
2949 gdb_monitor_output(gdbserver_state
, p
, len
);
2952 gdb_monitor_output(gdbserver_state
, p
, max_sz
);
2960 static void gdb_sigterm_handler(int signal
)
2962 if (runstate_is_running()) {
2963 vm_stop(RUN_STATE_PAUSED
);
2968 int gdbserver_start(const char *device
)
2971 char gdbstub_device_name
[128];
2972 CharDriverState
*chr
= NULL
;
2973 CharDriverState
*mon_chr
;
2977 if (strcmp(device
, "none") != 0) {
2978 if (strstart(device
, "tcp:", NULL
)) {
2979 /* enforce required TCP attributes */
2980 snprintf(gdbstub_device_name
, sizeof(gdbstub_device_name
),
2981 "%s,nowait,nodelay,server", device
);
2982 device
= gdbstub_device_name
;
2985 else if (strcmp(device
, "stdio") == 0) {
2986 struct sigaction act
;
2988 memset(&act
, 0, sizeof(act
));
2989 act
.sa_handler
= gdb_sigterm_handler
;
2990 sigaction(SIGINT
, &act
, NULL
);
2993 chr
= qemu_chr_new("gdb", device
, NULL
);
2997 qemu_chr_add_handlers(chr
, gdb_chr_can_receive
, gdb_chr_receive
,
2998 gdb_chr_event
, NULL
);
3001 s
= gdbserver_state
;
3003 s
= g_malloc0(sizeof(GDBState
));
3004 gdbserver_state
= s
;
3006 qemu_add_vm_change_state_handler(gdb_vm_state_change
, NULL
);
3008 /* Initialize a monitor terminal for gdb */
3009 mon_chr
= g_malloc0(sizeof(*mon_chr
));
3010 mon_chr
->chr_write
= gdb_monitor_write
;
3011 monitor_init(mon_chr
, 0);
3014 qemu_chr_delete(s
->chr
);
3015 mon_chr
= s
->mon_chr
;
3016 memset(s
, 0, sizeof(GDBState
));
3018 s
->c_cpu
= first_cpu
;
3019 s
->g_cpu
= first_cpu
;
3021 s
->state
= chr
? RS_IDLE
: RS_INACTIVE
;
3022 s
->mon_chr
= mon_chr
;
3023 s
->current_syscall_cb
= NULL
;