4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu-common.h"
21 #ifdef CONFIG_USER_ONLY
32 #include "monitor/monitor.h"
33 #include "sysemu/char.h"
34 #include "sysemu/sysemu.h"
35 #include "exec/gdbstub.h"
38 #define MAX_PACKET_LENGTH 4096
41 #include "qemu/sockets.h"
42 #include "sysemu/kvm.h"
43 #include "qemu/bitops.h"
45 static inline int target_memory_rw_debug(CPUState
*cpu
, target_ulong addr
,
46 uint8_t *buf
, int len
, bool is_write
)
48 CPUClass
*cc
= CPU_GET_CLASS(cpu
);
50 if (cc
->memory_rw_debug
) {
51 return cc
->memory_rw_debug(cpu
, addr
, buf
, len
, is_write
);
53 return cpu_memory_rw_debug(cpu
, addr
, buf
, len
, is_write
);
65 GDB_SIGNAL_UNKNOWN
= 143
68 #ifdef CONFIG_USER_ONLY
70 /* Map target signal numbers to GDB protocol signal numbers and vice
71 * versa. For user emulation's currently supported systems, we can
72 * assume most signals are defined.
75 static int gdb_signal_table
[] = {
235 /* In system mode we only need SIGINT and SIGTRAP; other signals
236 are not yet supported. */
243 static int gdb_signal_table
[] = {
253 #ifdef CONFIG_USER_ONLY
254 static int target_signal_to_gdb (int sig
)
257 for (i
= 0; i
< ARRAY_SIZE (gdb_signal_table
); i
++)
258 if (gdb_signal_table
[i
] == sig
)
260 return GDB_SIGNAL_UNKNOWN
;
264 static int gdb_signal_to_target (int sig
)
266 if (sig
< ARRAY_SIZE (gdb_signal_table
))
267 return gdb_signal_table
[sig
];
274 typedef struct GDBRegisterState
{
280 struct GDBRegisterState
*next
;
290 typedef struct GDBState
{
291 CPUState
*c_cpu
; /* current CPU for step/continue ops */
292 CPUState
*g_cpu
; /* current CPU for other ops */
293 CPUState
*query_cpu
; /* for q{f|s}ThreadInfo */
294 enum RSState state
; /* parsing state */
295 char line_buf
[MAX_PACKET_LENGTH
];
298 uint8_t last_packet
[MAX_PACKET_LENGTH
+ 4];
301 #ifdef CONFIG_USER_ONLY
305 CharDriverState
*chr
;
306 CharDriverState
*mon_chr
;
308 char syscall_buf
[256];
309 gdb_syscall_complete_cb current_syscall_cb
;
312 /* By default use no IRQs and no timers while single stepping so as to
313 * make single stepping like an ICE HW step.
315 static int sstep_flags
= SSTEP_ENABLE
|SSTEP_NOIRQ
|SSTEP_NOTIMER
;
317 static GDBState
*gdbserver_state
;
319 /* This is an ugly hack to cope with both new and old gdb.
320 If gdb sends qXfer:features:read then assume we're talking to a newish
321 gdb that understands target descriptions. */
322 static int gdb_has_xml
;
324 #ifdef CONFIG_USER_ONLY
325 /* XXX: This is not thread safe. Do we care? */
326 static int gdbserver_fd
= -1;
328 static int get_char(GDBState
*s
)
334 ret
= qemu_recv(s
->fd
, &ch
, 1, 0);
336 if (errno
== ECONNRESET
)
338 if (errno
!= EINTR
&& errno
!= EAGAIN
)
340 } else if (ret
== 0) {
358 /* If gdb is connected when the first semihosting syscall occurs then use
359 remote gdb syscalls. Otherwise use native file IO. */
360 int use_gdb_syscalls(void)
362 if (gdb_syscall_mode
== GDB_SYS_UNKNOWN
) {
363 gdb_syscall_mode
= (gdbserver_state
? GDB_SYS_ENABLED
366 return gdb_syscall_mode
== GDB_SYS_ENABLED
;
369 /* Resume execution. */
370 static inline void gdb_continue(GDBState
*s
)
372 #ifdef CONFIG_USER_ONLY
373 s
->running_state
= 1;
375 if (runstate_check(RUN_STATE_GUEST_PANICKED
)) {
376 runstate_set(RUN_STATE_DEBUG
);
378 if (!runstate_needs_reset()) {
384 static void put_buffer(GDBState
*s
, const uint8_t *buf
, int len
)
386 #ifdef CONFIG_USER_ONLY
390 ret
= send(s
->fd
, buf
, len
, 0);
392 if (errno
!= EINTR
&& errno
!= EAGAIN
)
400 qemu_chr_fe_write(s
->chr
, buf
, len
);
404 static inline int fromhex(int v
)
406 if (v
>= '0' && v
<= '9')
408 else if (v
>= 'A' && v
<= 'F')
410 else if (v
>= 'a' && v
<= 'f')
416 static inline int tohex(int v
)
424 static void memtohex(char *buf
, const uint8_t *mem
, int len
)
429 for(i
= 0; i
< len
; i
++) {
431 *q
++ = tohex(c
>> 4);
432 *q
++ = tohex(c
& 0xf);
437 static void hextomem(uint8_t *mem
, const char *buf
, int len
)
441 for(i
= 0; i
< len
; i
++) {
442 mem
[i
] = (fromhex(buf
[0]) << 4) | fromhex(buf
[1]);
447 /* return -1 if error, 0 if OK */
448 static int put_packet_binary(GDBState
*s
, const char *buf
, int len
)
459 for(i
= 0; i
< len
; i
++) {
463 *(p
++) = tohex((csum
>> 4) & 0xf);
464 *(p
++) = tohex((csum
) & 0xf);
466 s
->last_packet_len
= p
- s
->last_packet
;
467 put_buffer(s
, (uint8_t *)s
->last_packet
, s
->last_packet_len
);
469 #ifdef CONFIG_USER_ONLY
482 /* return -1 if error, 0 if OK */
483 static int put_packet(GDBState
*s
, const char *buf
)
486 printf("reply='%s'\n", buf
);
489 return put_packet_binary(s
, buf
, strlen(buf
));
492 /* The GDB remote protocol transfers values in target byte order. This means
493 we can use the raw memory access routines to access the value buffer.
494 Conveniently, these also handle the case where the buffer is mis-aligned.
496 #define GET_REG8(val) do { \
497 stb_p(mem_buf, val); \
500 #define GET_REG16(val) do { \
501 stw_p(mem_buf, val); \
504 #define GET_REG32(val) do { \
505 stl_p(mem_buf, val); \
508 #define GET_REG64(val) do { \
509 stq_p(mem_buf, val); \
513 #if TARGET_LONG_BITS == 64
514 #define GET_REGL(val) GET_REG64(val)
515 #define ldtul_p(addr) ldq_p(addr)
517 #define GET_REGL(val) GET_REG32(val)
518 #define ldtul_p(addr) ldl_p(addr)
521 #if defined(TARGET_I386)
524 static const int gpr_map
[16] = {
525 R_EAX
, R_EBX
, R_ECX
, R_EDX
, R_ESI
, R_EDI
, R_EBP
, R_ESP
,
526 8, 9, 10, 11, 12, 13, 14, 15
529 #define gpr_map gpr_map32
531 static const int gpr_map32
[8] = { 0, 1, 2, 3, 4, 5, 6, 7 };
533 #define NUM_CORE_REGS (CPU_NB_REGS * 2 + 25)
535 #define IDX_IP_REG CPU_NB_REGS
536 #define IDX_FLAGS_REG (IDX_IP_REG + 1)
537 #define IDX_SEG_REGS (IDX_FLAGS_REG + 1)
538 #define IDX_FP_REGS (IDX_SEG_REGS + 6)
539 #define IDX_XMM_REGS (IDX_FP_REGS + 16)
540 #define IDX_MXCSR_REG (IDX_XMM_REGS + CPU_NB_REGS)
542 static int cpu_gdb_read_register(CPUX86State
*env
, uint8_t *mem_buf
, int n
)
544 if (n
< CPU_NB_REGS
) {
545 if (TARGET_LONG_BITS
== 64 && env
->hflags
& HF_CS64_MASK
) {
546 GET_REG64(env
->regs
[gpr_map
[n
]]);
547 } else if (n
< CPU_NB_REGS32
) {
548 GET_REG32(env
->regs
[gpr_map32
[n
]]);
550 } else if (n
>= IDX_FP_REGS
&& n
< IDX_FP_REGS
+ 8) {
551 #ifdef USE_X86LDOUBLE
552 /* FIXME: byteswap float values - after fixing fpregs layout. */
553 memcpy(mem_buf
, &env
->fpregs
[n
- IDX_FP_REGS
], 10);
555 memset(mem_buf
, 0, 10);
558 } else if (n
>= IDX_XMM_REGS
&& n
< IDX_XMM_REGS
+ CPU_NB_REGS
) {
560 if (n
< CPU_NB_REGS32
||
561 (TARGET_LONG_BITS
== 64 && env
->hflags
& HF_CS64_MASK
)) {
562 stq_p(mem_buf
, env
->xmm_regs
[n
].XMM_Q(0));
563 stq_p(mem_buf
+ 8, env
->xmm_regs
[n
].XMM_Q(1));
569 if (TARGET_LONG_BITS
== 64 && env
->hflags
& HF_CS64_MASK
) {
575 GET_REG32(env
->eflags
);
578 GET_REG32(env
->segs
[R_CS
].selector
);
579 case IDX_SEG_REGS
+ 1:
580 GET_REG32(env
->segs
[R_SS
].selector
);
581 case IDX_SEG_REGS
+ 2:
582 GET_REG32(env
->segs
[R_DS
].selector
);
583 case IDX_SEG_REGS
+ 3:
584 GET_REG32(env
->segs
[R_ES
].selector
);
585 case IDX_SEG_REGS
+ 4:
586 GET_REG32(env
->segs
[R_FS
].selector
);
587 case IDX_SEG_REGS
+ 5:
588 GET_REG32(env
->segs
[R_GS
].selector
);
590 case IDX_FP_REGS
+ 8:
591 GET_REG32(env
->fpuc
);
592 case IDX_FP_REGS
+ 9:
593 GET_REG32((env
->fpus
& ~0x3800) |
594 (env
->fpstt
& 0x7) << 11);
595 case IDX_FP_REGS
+ 10:
596 GET_REG32(0); /* ftag */
597 case IDX_FP_REGS
+ 11:
598 GET_REG32(0); /* fiseg */
599 case IDX_FP_REGS
+ 12:
600 GET_REG32(0); /* fioff */
601 case IDX_FP_REGS
+ 13:
602 GET_REG32(0); /* foseg */
603 case IDX_FP_REGS
+ 14:
604 GET_REG32(0); /* fooff */
605 case IDX_FP_REGS
+ 15:
606 GET_REG32(0); /* fop */
609 GET_REG32(env
->mxcsr
);
615 static int cpu_x86_gdb_load_seg(CPUX86State
*env
, int sreg
, uint8_t *mem_buf
)
617 uint16_t selector
= ldl_p(mem_buf
);
619 if (selector
!= env
->segs
[sreg
].selector
) {
620 #if defined(CONFIG_USER_ONLY)
621 cpu_x86_load_seg(env
, sreg
, selector
);
623 unsigned int limit
, flags
;
626 if (!(env
->cr
[0] & CR0_PE_MASK
) || (env
->eflags
& VM_MASK
)) {
627 base
= selector
<< 4;
631 if (!cpu_x86_get_descr_debug(env
, selector
, &base
, &limit
,
636 cpu_x86_load_seg_cache(env
, sreg
, selector
, base
, limit
, flags
);
642 static int cpu_gdb_write_register(CPUX86State
*env
, uint8_t *mem_buf
, int n
)
646 if (n
< CPU_NB_REGS
) {
647 if (TARGET_LONG_BITS
== 64 && env
->hflags
& HF_CS64_MASK
) {
648 env
->regs
[gpr_map
[n
]] = ldtul_p(mem_buf
);
649 return sizeof(target_ulong
);
650 } else if (n
< CPU_NB_REGS32
) {
652 env
->regs
[n
] &= ~0xffffffffUL
;
653 env
->regs
[n
] |= (uint32_t)ldl_p(mem_buf
);
656 } else if (n
>= IDX_FP_REGS
&& n
< IDX_FP_REGS
+ 8) {
657 #ifdef USE_X86LDOUBLE
658 /* FIXME: byteswap float values - after fixing fpregs layout. */
659 memcpy(&env
->fpregs
[n
- IDX_FP_REGS
], mem_buf
, 10);
662 } else if (n
>= IDX_XMM_REGS
&& n
< IDX_XMM_REGS
+ CPU_NB_REGS
) {
664 if (n
< CPU_NB_REGS32
||
665 (TARGET_LONG_BITS
== 64 && env
->hflags
& HF_CS64_MASK
)) {
666 env
->xmm_regs
[n
].XMM_Q(0) = ldq_p(mem_buf
);
667 env
->xmm_regs
[n
].XMM_Q(1) = ldq_p(mem_buf
+ 8);
673 if (TARGET_LONG_BITS
== 64 && env
->hflags
& HF_CS64_MASK
) {
674 env
->eip
= ldq_p(mem_buf
);
677 env
->eip
&= ~0xffffffffUL
;
678 env
->eip
|= (uint32_t)ldl_p(mem_buf
);
682 env
->eflags
= ldl_p(mem_buf
);
686 return cpu_x86_gdb_load_seg(env
, R_CS
, mem_buf
);
687 case IDX_SEG_REGS
+ 1:
688 return cpu_x86_gdb_load_seg(env
, R_SS
, mem_buf
);
689 case IDX_SEG_REGS
+ 2:
690 return cpu_x86_gdb_load_seg(env
, R_DS
, mem_buf
);
691 case IDX_SEG_REGS
+ 3:
692 return cpu_x86_gdb_load_seg(env
, R_ES
, mem_buf
);
693 case IDX_SEG_REGS
+ 4:
694 return cpu_x86_gdb_load_seg(env
, R_FS
, mem_buf
);
695 case IDX_SEG_REGS
+ 5:
696 return cpu_x86_gdb_load_seg(env
, R_GS
, mem_buf
);
698 case IDX_FP_REGS
+ 8:
699 env
->fpuc
= ldl_p(mem_buf
);
701 case IDX_FP_REGS
+ 9:
702 tmp
= ldl_p(mem_buf
);
703 env
->fpstt
= (tmp
>> 11) & 7;
704 env
->fpus
= tmp
& ~0x3800;
706 case IDX_FP_REGS
+ 10: /* ftag */
708 case IDX_FP_REGS
+ 11: /* fiseg */
710 case IDX_FP_REGS
+ 12: /* fioff */
712 case IDX_FP_REGS
+ 13: /* foseg */
714 case IDX_FP_REGS
+ 14: /* fooff */
716 case IDX_FP_REGS
+ 15: /* fop */
720 env
->mxcsr
= ldl_p(mem_buf
);
724 /* Unrecognised register. */
728 #elif defined (TARGET_PPC)
730 /* Old gdb always expects FP registers. Newer (xml-aware) gdb only
731 expects whatever the target description contains. Due to a
732 historical mishap the FP registers appear in between core integer
733 regs and PC, MSR, CR, and so forth. We hack round this by giving the
734 FP regs zero size when talking to a newer gdb. */
735 #define NUM_CORE_REGS 71
736 #if defined (TARGET_PPC64)
737 #define GDB_CORE_XML "power64-core.xml"
739 #define GDB_CORE_XML "power-core.xml"
742 static int cpu_gdb_read_register(CPUPPCState
*env
, uint8_t *mem_buf
, int n
)
746 GET_REGL(env
->gpr
[n
]);
752 stfq_p(mem_buf
, env
->fpr
[n
-32]);
764 for (i
= 0; i
< 8; i
++) {
765 cr
|= env
->crf
[i
] << (32 - ((i
+ 1) * 4));
780 GET_REG32(env
->fpscr
);
787 static int cpu_gdb_write_register(CPUPPCState
*env
, uint8_t *mem_buf
, int n
)
791 env
->gpr
[n
] = ldtul_p(mem_buf
);
792 return sizeof(target_ulong
);
798 env
->fpr
[n
-32] = ldfq_p(mem_buf
);
803 env
->nip
= ldtul_p(mem_buf
);
804 return sizeof(target_ulong
);
806 ppc_store_msr(env
, ldtul_p(mem_buf
));
807 return sizeof(target_ulong
);
810 uint32_t cr
= ldl_p(mem_buf
);
812 for (i
= 0; i
< 8; i
++) {
813 env
->crf
[i
] = (cr
>> (32 - ((i
+ 1) * 4))) & 0xF;
818 env
->lr
= ldtul_p(mem_buf
);
819 return sizeof(target_ulong
);
821 env
->ctr
= ldtul_p(mem_buf
);
822 return sizeof(target_ulong
);
824 env
->xer
= ldtul_p(mem_buf
);
825 return sizeof(target_ulong
);
831 store_fpscr(env
, ldtul_p(mem_buf
), 0xffffffff);
832 return sizeof(target_ulong
);
838 #elif defined (TARGET_SPARC)
840 #if defined(TARGET_SPARC64) && !defined(TARGET_ABI32)
841 #define NUM_CORE_REGS 86
843 #define NUM_CORE_REGS 72
847 #define GET_REGA(val) GET_REG32(val)
849 #define GET_REGA(val) GET_REGL(val)
852 static int cpu_gdb_read_register(CPUSPARCState
*env
, uint8_t *mem_buf
, int n
)
856 GET_REGA(env
->gregs
[n
]);
859 /* register window */
860 GET_REGA(env
->regwptr
[n
- 8]);
862 #if defined(TARGET_ABI32) || !defined(TARGET_SPARC64)
866 GET_REG32(env
->fpr
[(n
- 32) / 2].l
.lower
);
868 GET_REG32(env
->fpr
[(n
- 32) / 2].l
.upper
);
871 /* Y, PSR, WIM, TBR, PC, NPC, FPSR, CPSR */
876 GET_REGA(cpu_get_psr(env
));
888 GET_REGA(0); /* csr */
896 GET_REG32(env
->fpr
[(n
- 32) / 2].l
.lower
);
898 GET_REG32(env
->fpr
[(n
- 32) / 2].l
.upper
);
902 /* f32-f62 (double width, even numbers only) */
903 GET_REG64(env
->fpr
[(n
- 32) / 2].ll
);
911 GET_REGL((cpu_get_ccr(env
) << 32) |
912 ((env
->asi
& 0xff) << 24) |
913 ((env
->pstate
& 0xfff) << 8) |
926 static int cpu_gdb_write_register(CPUSPARCState
*env
, uint8_t *mem_buf
, int n
)
928 #if defined(TARGET_ABI32)
931 tmp
= ldl_p(mem_buf
);
935 tmp
= ldtul_p(mem_buf
);
942 /* register window */
943 env
->regwptr
[n
- 8] = tmp
;
945 #if defined(TARGET_ABI32) || !defined(TARGET_SPARC64)
950 env
->fpr
[(n
- 32) / 2].l
.lower
= tmp
;
952 env
->fpr
[(n
- 32) / 2].l
.upper
= tmp
;
955 /* Y, PSR, WIM, TBR, PC, NPC, FPSR, CPSR */
961 cpu_put_psr(env
, tmp
);
986 tmp
= ldl_p(mem_buf
);
988 env
->fpr
[(n
- 32) / 2].l
.lower
= tmp
;
990 env
->fpr
[(n
- 32) / 2].l
.upper
= tmp
;
994 /* f32-f62 (double width, even numbers only) */
995 env
->fpr
[(n
- 32) / 2].ll
= tmp
;
1005 cpu_put_ccr(env
, tmp
>> 32);
1006 env
->asi
= (tmp
>> 24) & 0xff;
1007 env
->pstate
= (tmp
>> 8) & 0xfff;
1008 cpu_put_cwp64(env
, tmp
& 0xff);
1026 #elif defined (TARGET_ARM)
1028 /* Old gdb always expect FPA registers. Newer (xml-aware) gdb only expect
1029 whatever the target description contains. Due to a historical mishap
1030 the FPA registers appear in between core integer regs and the CPSR.
1031 We hack round this by giving the FPA regs zero size when talking to a
1033 #define NUM_CORE_REGS 26
1034 #define GDB_CORE_XML "arm-core.xml"
1036 static int cpu_gdb_read_register(CPUARMState
*env
, uint8_t *mem_buf
, int n
)
1039 /* Core integer register. */
1040 GET_REG32(env
->regs
[n
]);
1043 /* FPA registers. */
1047 memset(mem_buf
, 0, 12);
1052 /* FPA status register. */
1059 GET_REG32(cpsr_read(env
));
1061 /* Unknown register. */
1065 static int cpu_gdb_write_register(CPUARMState
*env
, uint8_t *mem_buf
, int n
)
1069 tmp
= ldl_p(mem_buf
);
1071 /* Mask out low bit of PC to workaround gdb bugs. This will probably
1072 cause problems if we ever implement the Jazelle DBX extensions. */
1078 /* Core integer register. */
1082 if (n
< 24) { /* 16-23 */
1083 /* FPA registers (ignored). */
1091 /* FPA status register (ignored). */
1098 cpsr_write(env
, tmp
, 0xffffffff);
1101 /* Unknown register. */
1105 #elif defined (TARGET_M68K)
1107 #define NUM_CORE_REGS 18
1109 #define GDB_CORE_XML "cf-core.xml"
1111 static int cpu_gdb_read_register(CPUM68KState
*env
, uint8_t *mem_buf
, int n
)
1115 GET_REG32(env
->dregs
[n
]);
1116 } else if (n
< 16) {
1118 GET_REG32(env
->aregs
[n
- 8]);
1127 /* FP registers not included here because they vary between
1128 ColdFire and m68k. Use XML bits for these. */
1132 static int cpu_gdb_write_register(CPUM68KState
*env
, uint8_t *mem_buf
, int n
)
1136 tmp
= ldl_p(mem_buf
);
1140 env
->dregs
[n
] = tmp
;
1141 } else if (n
< 16) {
1143 env
->aregs
[n
- 8] = tmp
;
1158 #elif defined (TARGET_MIPS)
1160 #define NUM_CORE_REGS 73
1162 static int cpu_gdb_read_register(CPUMIPSState
*env
, uint8_t *mem_buf
, int n
)
1165 GET_REGL(env
->active_tc
.gpr
[n
]);
1167 if (env
->CP0_Config1
& (1 << CP0C1_FP
)) {
1168 if (n
>= 38 && n
< 70) {
1169 if (env
->CP0_Status
& (1 << CP0St_FR
)) {
1170 GET_REGL(env
->active_fpu
.fpr
[n
- 38].d
);
1172 GET_REGL(env
->active_fpu
.fpr
[n
- 38].w
[FP_ENDIAN_IDX
]);
1177 GET_REGL((int32_t)env
->active_fpu
.fcr31
);
1179 GET_REGL((int32_t)env
->active_fpu
.fcr0
);
1184 GET_REGL((int32_t)env
->CP0_Status
);
1186 GET_REGL(env
->active_tc
.LO
[0]);
1188 GET_REGL(env
->active_tc
.HI
[0]);
1190 GET_REGL(env
->CP0_BadVAddr
);
1192 GET_REGL((int32_t)env
->CP0_Cause
);
1194 GET_REGL(env
->active_tc
.PC
| !!(env
->hflags
& MIPS_HFLAG_M16
));
1196 GET_REGL(0); /* fp */
1198 GET_REGL((int32_t)env
->CP0_PRid
);
1200 if (n
>= 73 && n
<= 88) {
1201 /* 16 embedded regs. */
1208 /* convert MIPS rounding mode in FCR31 to IEEE library */
1209 static unsigned int ieee_rm
[] = {
1210 float_round_nearest_even
,
1211 float_round_to_zero
,
1215 #define RESTORE_ROUNDING_MODE \
1216 set_float_rounding_mode(ieee_rm[env->active_fpu.fcr31 & 3], \
1217 &env->active_fpu.fp_status)
1219 static int cpu_gdb_write_register(CPUMIPSState
*env
, uint8_t *mem_buf
, int n
)
1223 tmp
= ldtul_p(mem_buf
);
1226 env
->active_tc
.gpr
[n
] = tmp
;
1227 return sizeof(target_ulong
);
1229 if (env
->CP0_Config1
& (1 << CP0C1_FP
)
1230 && n
>= 38 && n
< 73) {
1232 if (env
->CP0_Status
& (1 << CP0St_FR
)) {
1233 env
->active_fpu
.fpr
[n
- 38].d
= tmp
;
1235 env
->active_fpu
.fpr
[n
- 38].w
[FP_ENDIAN_IDX
] = tmp
;
1240 env
->active_fpu
.fcr31
= tmp
& 0xFF83FFFF;
1241 /* set rounding mode */
1242 RESTORE_ROUNDING_MODE
;
1245 env
->active_fpu
.fcr0
= tmp
;
1248 return sizeof(target_ulong
);
1252 env
->CP0_Status
= tmp
;
1255 env
->active_tc
.LO
[0] = tmp
;
1258 env
->active_tc
.HI
[0] = tmp
;
1261 env
->CP0_BadVAddr
= tmp
;
1264 env
->CP0_Cause
= tmp
;
1267 env
->active_tc
.PC
= tmp
& ~(target_ulong
)1;
1269 env
->hflags
|= MIPS_HFLAG_M16
;
1271 env
->hflags
&= ~(MIPS_HFLAG_M16
);
1274 case 72: /* fp, ignored */
1280 /* Other registers are readonly. Ignore writes. */
1284 return sizeof(target_ulong
);
1286 #elif defined(TARGET_OPENRISC)
1288 #define NUM_CORE_REGS (32 + 3)
1290 static int cpu_gdb_read_register(CPUOpenRISCState
*env
, uint8_t *mem_buf
, int n
)
1293 GET_REG32(env
->gpr
[n
]);
1297 GET_REG32(env
->ppc
);
1300 GET_REG32(env
->npc
);
1312 static int cpu_gdb_write_register(CPUOpenRISCState
*env
,
1313 uint8_t *mem_buf
, int n
)
1317 if (n
> NUM_CORE_REGS
) {
1321 tmp
= ldl_p(mem_buf
);
1345 #elif defined (TARGET_SH4)
1347 /* Hint: Use "set architecture sh4" in GDB to see fpu registers */
1348 /* FIXME: We should use XML for this. */
1350 #define NUM_CORE_REGS 59
1352 static int cpu_gdb_read_register(CPUSH4State
*env
, uint8_t *mem_buf
, int n
)
1356 if ((env
->sr
& (SR_MD
| SR_RB
)) == (SR_MD
| SR_RB
)) {
1357 GET_REGL(env
->gregs
[n
+ 16]);
1359 GET_REGL(env
->gregs
[n
]);
1362 GET_REGL(env
->gregs
[n
]);
1372 GET_REGL(env
->mach
);
1374 GET_REGL(env
->macl
);
1378 GET_REGL(env
->fpul
);
1380 GET_REGL(env
->fpscr
);
1382 if (env
->fpscr
& FPSCR_FR
) {
1383 stfl_p(mem_buf
, env
->fregs
[n
- 9]);
1385 stfl_p(mem_buf
, env
->fregs
[n
- 25]);
1393 GET_REGL(env
->gregs
[n
- 43]);
1395 GET_REGL(env
->gregs
[n
- (51 - 16)]);
1401 static int cpu_gdb_write_register(CPUSH4State
*env
, uint8_t *mem_buf
, int n
)
1405 if ((env
->sr
& (SR_MD
| SR_RB
)) == (SR_MD
| SR_RB
)) {
1406 env
->gregs
[n
+ 16] = ldl_p(mem_buf
);
1408 env
->gregs
[n
] = ldl_p(mem_buf
);
1412 env
->gregs
[n
] = ldl_p(mem_buf
);
1415 env
->pc
= ldl_p(mem_buf
);
1418 env
->pr
= ldl_p(mem_buf
);
1421 env
->gbr
= ldl_p(mem_buf
);
1424 env
->vbr
= ldl_p(mem_buf
);
1427 env
->mach
= ldl_p(mem_buf
);
1430 env
->macl
= ldl_p(mem_buf
);
1433 env
->sr
= ldl_p(mem_buf
);
1436 env
->fpul
= ldl_p(mem_buf
);
1439 env
->fpscr
= ldl_p(mem_buf
);
1442 if (env
->fpscr
& FPSCR_FR
) {
1443 env
->fregs
[n
- 9] = ldfl_p(mem_buf
);
1445 env
->fregs
[n
- 25] = ldfl_p(mem_buf
);
1449 env
->ssr
= ldl_p(mem_buf
);
1452 env
->spc
= ldl_p(mem_buf
);
1455 env
->gregs
[n
- 43] = ldl_p(mem_buf
);
1458 env
->gregs
[n
- (51 - 16)] = ldl_p(mem_buf
);
1466 #elif defined (TARGET_MICROBLAZE)
1468 #define NUM_CORE_REGS (32 + 5)
1470 static int cpu_gdb_read_register(CPUMBState
*env
, uint8_t *mem_buf
, int n
)
1473 GET_REG32(env
->regs
[n
]);
1475 GET_REG32(env
->sregs
[n
- 32]);
1480 static int cpu_gdb_write_register(CPUMBState
*env
, uint8_t *mem_buf
, int n
)
1484 if (n
> NUM_CORE_REGS
) {
1488 tmp
= ldl_p(mem_buf
);
1493 env
->sregs
[n
- 32] = tmp
;
1497 #elif defined (TARGET_CRIS)
1499 #define NUM_CORE_REGS 49
1502 read_register_crisv10(CPUCRISState
*env
, uint8_t *mem_buf
, int n
)
1505 GET_REG32(env
->regs
[n
]);
1515 GET_REG8(env
->pregs
[n
- 16]);
1517 GET_REG8(env
->pregs
[n
- 16]);
1520 GET_REG16(env
->pregs
[n
- 16]);
1523 GET_REG32(env
->pregs
[n
- 16]);
1531 static int cpu_gdb_read_register(CPUCRISState
*env
, uint8_t *mem_buf
, int n
)
1535 if (env
->pregs
[PR_VR
] < 32) {
1536 return read_register_crisv10(env
, mem_buf
, n
);
1539 srs
= env
->pregs
[PR_SRS
];
1541 GET_REG32(env
->regs
[n
]);
1544 if (n
>= 21 && n
< 32) {
1545 GET_REG32(env
->pregs
[n
- 16]);
1547 if (n
>= 33 && n
< 49) {
1548 GET_REG32(env
->sregs
[srs
][n
- 33]);
1552 GET_REG8(env
->pregs
[0]);
1554 GET_REG8(env
->pregs
[1]);
1556 GET_REG32(env
->pregs
[2]);
1560 GET_REG16(env
->pregs
[4]);
1568 static int cpu_gdb_write_register(CPUCRISState
*env
, uint8_t *mem_buf
, int n
)
1576 tmp
= ldl_p(mem_buf
);
1582 if (n
>= 21 && n
< 32) {
1583 env
->pregs
[n
- 16] = tmp
;
1586 /* FIXME: Should support function regs be writable? */
1593 env
->pregs
[PR_PID
] = tmp
;
1606 #elif defined (TARGET_ALPHA)
1608 #define NUM_CORE_REGS 67
1610 static int cpu_gdb_read_register(CPUAlphaState
*env
, uint8_t *mem_buf
, int n
)
1620 d
.d
= env
->fir
[n
- 32];
1624 val
= cpu_alpha_load_fpcr(env
);
1634 /* 31 really is the zero register; 65 is unassigned in the
1635 gdb protocol, but is still required to occupy 8 bytes. */
1644 static int cpu_gdb_write_register(CPUAlphaState
*env
, uint8_t *mem_buf
, int n
)
1646 target_ulong tmp
= ldtul_p(mem_buf
);
1655 env
->fir
[n
- 32] = d
.d
;
1658 cpu_alpha_store_fpcr(env
, tmp
);
1668 /* 31 really is the zero register; 65 is unassigned in the
1669 gdb protocol, but is still required to occupy 8 bytes. */
1676 #elif defined (TARGET_S390X)
1678 #define NUM_CORE_REGS S390_NUM_REGS
1680 static int cpu_gdb_read_register(CPUS390XState
*env
, uint8_t *mem_buf
, int n
)
1686 case S390_PSWM_REGNUM
:
1687 cc_op
= calc_cc(env
, env
->cc_op
, env
->cc_src
, env
->cc_dst
, env
->cc_vr
);
1688 val
= deposit64(env
->psw
.mask
, 44, 2, cc_op
);
1690 case S390_PSWA_REGNUM
:
1691 GET_REGL(env
->psw
.addr
);
1692 case S390_R0_REGNUM
... S390_R15_REGNUM
:
1693 GET_REGL(env
->regs
[n
-S390_R0_REGNUM
]);
1694 case S390_A0_REGNUM
... S390_A15_REGNUM
:
1695 GET_REG32(env
->aregs
[n
-S390_A0_REGNUM
]);
1696 case S390_FPC_REGNUM
:
1697 GET_REG32(env
->fpc
);
1698 case S390_F0_REGNUM
... S390_F15_REGNUM
:
1699 GET_REG64(env
->fregs
[n
-S390_F0_REGNUM
].ll
);
1705 static int cpu_gdb_write_register(CPUS390XState
*env
, uint8_t *mem_buf
, int n
)
1710 tmpl
= ldtul_p(mem_buf
);
1711 tmp32
= ldl_p(mem_buf
);
1714 case S390_PSWM_REGNUM
:
1715 env
->psw
.mask
= tmpl
;
1716 env
->cc_op
= extract64(tmpl
, 44, 2);
1718 case S390_PSWA_REGNUM
:
1719 env
->psw
.addr
= tmpl
;
1721 case S390_R0_REGNUM
... S390_R15_REGNUM
:
1722 env
->regs
[n
-S390_R0_REGNUM
] = tmpl
;
1724 case S390_A0_REGNUM
... S390_A15_REGNUM
:
1725 env
->aregs
[n
-S390_A0_REGNUM
] = tmp32
;
1728 case S390_FPC_REGNUM
:
1732 case S390_F0_REGNUM
... S390_F15_REGNUM
:
1733 env
->fregs
[n
-S390_F0_REGNUM
].ll
= tmpl
;
1740 #elif defined (TARGET_LM32)
1742 #include "hw/lm32/lm32_pic.h"
1743 #define NUM_CORE_REGS (32 + 7)
1745 static int cpu_gdb_read_register(CPULM32State
*env
, uint8_t *mem_buf
, int n
)
1748 GET_REG32(env
->regs
[n
]);
1753 /* FIXME: put in right exception ID */
1757 GET_REG32(env
->eba
);
1759 GET_REG32(env
->deba
);
1763 GET_REG32(lm32_pic_get_im(env
->pic_state
));
1765 GET_REG32(lm32_pic_get_ip(env
->pic_state
));
1771 static int cpu_gdb_write_register(CPULM32State
*env
, uint8_t *mem_buf
, int n
)
1775 if (n
> NUM_CORE_REGS
) {
1779 tmp
= ldl_p(mem_buf
);
1798 lm32_pic_set_im(env
->pic_state
, tmp
);
1801 lm32_pic_set_ip(env
->pic_state
, tmp
);
1807 #elif defined(TARGET_XTENSA)
1809 /* Use num_core_regs to see only non-privileged registers in an unmodified gdb.
1810 * Use num_regs to see all registers. gdb modification is required for that:
1811 * reset bit 0 in the 'flags' field of the registers definitions in the
1812 * gdb/xtensa-config.c inside gdb source tree or inside gdb overlay.
1814 #define NUM_CORE_REGS (env->config->gdb_regmap.num_regs)
1815 #define num_g_regs NUM_CORE_REGS
1817 static int cpu_gdb_read_register(CPUXtensaState
*env
, uint8_t *mem_buf
, int n
)
1819 const XtensaGdbReg
*reg
= env
->config
->gdb_regmap
.reg
+ n
;
1821 if (n
< 0 || n
>= env
->config
->gdb_regmap
.num_regs
) {
1825 switch (reg
->type
) {
1830 xtensa_sync_phys_from_window(env
);
1831 GET_REG32(env
->phys_regs
[(reg
->targno
& 0xff) % env
->config
->nareg
]);
1834 GET_REG32(env
->sregs
[reg
->targno
& 0xff]);
1837 GET_REG32(env
->uregs
[reg
->targno
& 0xff]);
1840 GET_REG32(float32_val(env
->fregs
[reg
->targno
& 0x0f]));
1843 GET_REG32(env
->regs
[reg
->targno
& 0x0f]);
1846 qemu_log("%s from reg %d of unsupported type %d\n",
1847 __func__
, n
, reg
->type
);
1852 static int cpu_gdb_write_register(CPUXtensaState
*env
, uint8_t *mem_buf
, int n
)
1855 const XtensaGdbReg
*reg
= env
->config
->gdb_regmap
.reg
+ n
;
1857 if (n
< 0 || n
>= env
->config
->gdb_regmap
.num_regs
) {
1861 tmp
= ldl_p(mem_buf
);
1863 switch (reg
->type
) {
1869 env
->phys_regs
[(reg
->targno
& 0xff) % env
->config
->nareg
] = tmp
;
1870 xtensa_sync_window_from_phys(env
);
1874 env
->sregs
[reg
->targno
& 0xff] = tmp
;
1878 env
->uregs
[reg
->targno
& 0xff] = tmp
;
1882 env
->fregs
[reg
->targno
& 0x0f] = make_float32(tmp
);
1886 env
->regs
[reg
->targno
& 0x0f] = tmp
;
1890 qemu_log("%s to reg %d of unsupported type %d\n",
1891 __func__
, n
, reg
->type
);
1899 #define NUM_CORE_REGS 0
1901 static int cpu_gdb_read_register(CPUArchState
*env
, uint8_t *mem_buf
, int n
)
1906 static int cpu_gdb_write_register(CPUArchState
*env
, uint8_t *mem_buf
, int n
)
1913 #if !defined(TARGET_XTENSA)
1914 static int num_g_regs
= NUM_CORE_REGS
;
1918 /* Encode data using the encoding for 'x' packets. */
1919 static int memtox(char *buf
, const char *mem
, int len
)
1927 case '#': case '$': case '*': case '}':
1939 static const char *get_feature_xml(const char *p
, const char **newp
)
1944 static char target_xml
[1024];
1947 while (p
[len
] && p
[len
] != ':')
1952 if (strncmp(p
, "target.xml", len
) == 0) {
1953 /* Generate the XML description for this CPU. */
1954 if (!target_xml
[0]) {
1955 GDBRegisterState
*r
;
1956 CPUState
*cpu
= first_cpu
;
1958 snprintf(target_xml
, sizeof(target_xml
),
1959 "<?xml version=\"1.0\"?>"
1960 "<!DOCTYPE target SYSTEM \"gdb-target.dtd\">"
1962 "<xi:include href=\"%s\"/>",
1965 for (r
= cpu
->gdb_regs
; r
; r
= r
->next
) {
1966 pstrcat(target_xml
, sizeof(target_xml
), "<xi:include href=\"");
1967 pstrcat(target_xml
, sizeof(target_xml
), r
->xml
);
1968 pstrcat(target_xml
, sizeof(target_xml
), "\"/>");
1970 pstrcat(target_xml
, sizeof(target_xml
), "</target>");
1974 for (i
= 0; ; i
++) {
1975 name
= xml_builtin
[i
][0];
1976 if (!name
|| (strncmp(name
, p
, len
) == 0 && strlen(name
) == len
))
1979 return name
? xml_builtin
[i
][1] : NULL
;
1983 static int gdb_read_register(CPUState
*cpu
, uint8_t *mem_buf
, int reg
)
1985 CPUArchState
*env
= cpu
->env_ptr
;
1986 GDBRegisterState
*r
;
1988 if (reg
< NUM_CORE_REGS
)
1989 return cpu_gdb_read_register(env
, mem_buf
, reg
);
1991 for (r
= cpu
->gdb_regs
; r
; r
= r
->next
) {
1992 if (r
->base_reg
<= reg
&& reg
< r
->base_reg
+ r
->num_regs
) {
1993 return r
->get_reg(env
, mem_buf
, reg
- r
->base_reg
);
1999 static int gdb_write_register(CPUState
*cpu
, uint8_t *mem_buf
, int reg
)
2001 CPUArchState
*env
= cpu
->env_ptr
;
2002 GDBRegisterState
*r
;
2004 if (reg
< NUM_CORE_REGS
)
2005 return cpu_gdb_write_register(env
, mem_buf
, reg
);
2007 for (r
= cpu
->gdb_regs
; r
; r
= r
->next
) {
2008 if (r
->base_reg
<= reg
&& reg
< r
->base_reg
+ r
->num_regs
) {
2009 return r
->set_reg(env
, mem_buf
, reg
- r
->base_reg
);
2015 #if !defined(TARGET_XTENSA)
2016 /* Register a supplemental set of CPU registers. If g_pos is nonzero it
2017 specifies the first register number and these registers are included in
2018 a standard "g" packet. Direction is relative to gdb, i.e. get_reg is
2019 gdb reading a CPU register, and set_reg is gdb modifying a CPU register.
2022 void gdb_register_coprocessor(CPUState
*cpu
,
2023 gdb_reg_cb get_reg
, gdb_reg_cb set_reg
,
2024 int num_regs
, const char *xml
, int g_pos
)
2026 GDBRegisterState
*s
;
2027 GDBRegisterState
**p
;
2028 static int last_reg
= NUM_CORE_REGS
;
2032 /* Check for duplicates. */
2033 if (strcmp((*p
)->xml
, xml
) == 0)
2038 s
= g_new0(GDBRegisterState
, 1);
2039 s
->base_reg
= last_reg
;
2040 s
->num_regs
= num_regs
;
2041 s
->get_reg
= get_reg
;
2042 s
->set_reg
= set_reg
;
2045 /* Add to end of list. */
2046 last_reg
+= num_regs
;
2049 if (g_pos
!= s
->base_reg
) {
2050 fprintf(stderr
, "Error: Bad gdb register numbering for '%s'\n"
2051 "Expected %d got %d\n", xml
, g_pos
, s
->base_reg
);
2053 num_g_regs
= last_reg
;
2059 #ifndef CONFIG_USER_ONLY
2060 static const int xlat_gdb_type
[] = {
2061 [GDB_WATCHPOINT_WRITE
] = BP_GDB
| BP_MEM_WRITE
,
2062 [GDB_WATCHPOINT_READ
] = BP_GDB
| BP_MEM_READ
,
2063 [GDB_WATCHPOINT_ACCESS
] = BP_GDB
| BP_MEM_ACCESS
,
2067 static int gdb_breakpoint_insert(target_ulong addr
, target_ulong len
, int type
)
2073 if (kvm_enabled()) {
2074 return kvm_insert_breakpoint(gdbserver_state
->c_cpu
, addr
, len
, type
);
2078 case GDB_BREAKPOINT_SW
:
2079 case GDB_BREAKPOINT_HW
:
2080 for (cpu
= first_cpu
; cpu
!= NULL
; cpu
= cpu
->next_cpu
) {
2082 err
= cpu_breakpoint_insert(env
, addr
, BP_GDB
, NULL
);
2087 #ifndef CONFIG_USER_ONLY
2088 case GDB_WATCHPOINT_WRITE
:
2089 case GDB_WATCHPOINT_READ
:
2090 case GDB_WATCHPOINT_ACCESS
:
2091 for (cpu
= first_cpu
; cpu
!= NULL
; cpu
= cpu
->next_cpu
) {
2093 err
= cpu_watchpoint_insert(env
, addr
, len
, xlat_gdb_type
[type
],
2105 static int gdb_breakpoint_remove(target_ulong addr
, target_ulong len
, int type
)
2111 if (kvm_enabled()) {
2112 return kvm_remove_breakpoint(gdbserver_state
->c_cpu
, addr
, len
, type
);
2116 case GDB_BREAKPOINT_SW
:
2117 case GDB_BREAKPOINT_HW
:
2118 for (cpu
= first_cpu
; cpu
!= NULL
; cpu
= cpu
->next_cpu
) {
2120 err
= cpu_breakpoint_remove(env
, addr
, BP_GDB
);
2125 #ifndef CONFIG_USER_ONLY
2126 case GDB_WATCHPOINT_WRITE
:
2127 case GDB_WATCHPOINT_READ
:
2128 case GDB_WATCHPOINT_ACCESS
:
2129 for (cpu
= first_cpu
; cpu
!= NULL
; cpu
= cpu
->next_cpu
) {
2131 err
= cpu_watchpoint_remove(env
, addr
, len
, xlat_gdb_type
[type
]);
2142 static void gdb_breakpoint_remove_all(void)
2147 if (kvm_enabled()) {
2148 kvm_remove_all_breakpoints(gdbserver_state
->c_cpu
);
2152 for (cpu
= first_cpu
; cpu
!= NULL
; cpu
= cpu
->next_cpu
) {
2154 cpu_breakpoint_remove_all(env
, BP_GDB
);
2155 #ifndef CONFIG_USER_ONLY
2156 cpu_watchpoint_remove_all(env
, BP_GDB
);
2161 static void gdb_set_cpu_pc(GDBState
*s
, target_ulong pc
)
2163 CPUState
*cpu
= s
->c_cpu
;
2164 CPUClass
*cc
= CPU_GET_CLASS(cpu
);
2166 cpu_synchronize_state(cpu
);
2168 cc
->set_pc(cpu
, pc
);
2172 static CPUState
*find_cpu(uint32_t thread_id
)
2176 for (cpu
= first_cpu
; cpu
!= NULL
; cpu
= cpu
->next_cpu
) {
2177 if (cpu_index(cpu
) == thread_id
) {
2185 static int gdb_handle_packet(GDBState
*s
, const char *line_buf
)
2187 #ifdef TARGET_XTENSA
2193 int ch
, reg_size
, type
, res
;
2194 char buf
[MAX_PACKET_LENGTH
];
2195 uint8_t mem_buf
[MAX_PACKET_LENGTH
];
2197 target_ulong addr
, len
;
2200 printf("command='%s'\n", line_buf
);
2206 /* TODO: Make this return the correct value for user-mode. */
2207 snprintf(buf
, sizeof(buf
), "T%02xthread:%02x;", GDB_SIGNAL_TRAP
,
2208 cpu_index(s
->c_cpu
));
2210 /* Remove all the breakpoints when this query is issued,
2211 * because gdb is doing and initial connect and the state
2212 * should be cleaned up.
2214 gdb_breakpoint_remove_all();
2218 addr
= strtoull(p
, (char **)&p
, 16);
2219 gdb_set_cpu_pc(s
, addr
);
2225 s
->signal
= gdb_signal_to_target (strtoul(p
, (char **)&p
, 16));
2226 if (s
->signal
== -1)
2231 if (strncmp(p
, "Cont", 4) == 0) {
2232 int res_signal
, res_thread
;
2236 put_packet(s
, "vCont;c;C;s;S");
2251 if (action
== 'C' || action
== 'S') {
2252 signal
= strtoul(p
, (char **)&p
, 16);
2253 } else if (action
!= 'c' && action
!= 's') {
2259 thread
= strtoull(p
+1, (char **)&p
, 16);
2261 action
= tolower(action
);
2262 if (res
== 0 || (res
== 'c' && action
== 's')) {
2264 res_signal
= signal
;
2265 res_thread
= thread
;
2269 if (res_thread
!= -1 && res_thread
!= 0) {
2270 cpu
= find_cpu(res_thread
);
2272 put_packet(s
, "E22");
2278 cpu_single_step(s
->c_cpu
, sstep_flags
);
2280 s
->signal
= res_signal
;
2286 goto unknown_command
;
2289 #ifdef CONFIG_USER_ONLY
2290 /* Kill the target */
2291 fprintf(stderr
, "\nQEMU: Terminated via GDBstub\n");
2296 gdb_breakpoint_remove_all();
2297 gdb_syscall_mode
= GDB_SYS_DISABLED
;
2299 put_packet(s
, "OK");
2303 addr
= strtoull(p
, (char **)&p
, 16);
2304 gdb_set_cpu_pc(s
, addr
);
2306 cpu_single_step(s
->c_cpu
, sstep_flags
);
2314 ret
= strtoull(p
, (char **)&p
, 16);
2317 err
= strtoull(p
, (char **)&p
, 16);
2324 if (s
->current_syscall_cb
) {
2325 s
->current_syscall_cb(s
->c_cpu
, ret
, err
);
2326 s
->current_syscall_cb
= NULL
;
2329 put_packet(s
, "T02");
2336 cpu_synchronize_state(s
->g_cpu
);
2337 #ifdef TARGET_XTENSA
2338 env
= s
->g_cpu
->env_ptr
;
2341 for (addr
= 0; addr
< num_g_regs
; addr
++) {
2342 reg_size
= gdb_read_register(s
->g_cpu
, mem_buf
+ len
, addr
);
2345 memtohex(buf
, mem_buf
, len
);
2349 cpu_synchronize_state(s
->g_cpu
);
2350 #ifdef TARGET_XTENSA
2351 env
= s
->g_cpu
->env_ptr
;
2353 registers
= mem_buf
;
2354 len
= strlen(p
) / 2;
2355 hextomem((uint8_t *)registers
, p
, len
);
2356 for (addr
= 0; addr
< num_g_regs
&& len
> 0; addr
++) {
2357 reg_size
= gdb_write_register(s
->g_cpu
, registers
, addr
);
2359 registers
+= reg_size
;
2361 put_packet(s
, "OK");
2364 addr
= strtoull(p
, (char **)&p
, 16);
2367 len
= strtoull(p
, NULL
, 16);
2368 if (target_memory_rw_debug(s
->g_cpu
, addr
, mem_buf
, len
, false) != 0) {
2369 put_packet (s
, "E14");
2371 memtohex(buf
, mem_buf
, len
);
2376 addr
= strtoull(p
, (char **)&p
, 16);
2379 len
= strtoull(p
, (char **)&p
, 16);
2382 hextomem(mem_buf
, p
, len
);
2383 if (target_memory_rw_debug(s
->g_cpu
, addr
, mem_buf
, len
,
2385 put_packet(s
, "E14");
2387 put_packet(s
, "OK");
2391 /* Older gdb are really dumb, and don't use 'g' if 'p' is avaialable.
2392 This works, but can be very slow. Anything new enough to
2393 understand XML also knows how to use this properly. */
2395 goto unknown_command
;
2396 addr
= strtoull(p
, (char **)&p
, 16);
2397 reg_size
= gdb_read_register(s
->g_cpu
, mem_buf
, addr
);
2399 memtohex(buf
, mem_buf
, reg_size
);
2402 put_packet(s
, "E14");
2407 goto unknown_command
;
2408 addr
= strtoull(p
, (char **)&p
, 16);
2411 reg_size
= strlen(p
) / 2;
2412 hextomem(mem_buf
, p
, reg_size
);
2413 gdb_write_register(s
->g_cpu
, mem_buf
, addr
);
2414 put_packet(s
, "OK");
2418 type
= strtoul(p
, (char **)&p
, 16);
2421 addr
= strtoull(p
, (char **)&p
, 16);
2424 len
= strtoull(p
, (char **)&p
, 16);
2426 res
= gdb_breakpoint_insert(addr
, len
, type
);
2428 res
= gdb_breakpoint_remove(addr
, len
, type
);
2430 put_packet(s
, "OK");
2431 else if (res
== -ENOSYS
)
2434 put_packet(s
, "E22");
2438 thread
= strtoull(p
, (char **)&p
, 16);
2439 if (thread
== -1 || thread
== 0) {
2440 put_packet(s
, "OK");
2443 cpu
= find_cpu(thread
);
2445 put_packet(s
, "E22");
2451 put_packet(s
, "OK");
2455 put_packet(s
, "OK");
2458 put_packet(s
, "E22");
2463 thread
= strtoull(p
, (char **)&p
, 16);
2464 cpu
= find_cpu(thread
);
2467 put_packet(s
, "OK");
2469 put_packet(s
, "E22");
2474 /* parse any 'q' packets here */
2475 if (!strcmp(p
,"qemu.sstepbits")) {
2476 /* Query Breakpoint bit definitions */
2477 snprintf(buf
, sizeof(buf
), "ENABLE=%x,NOIRQ=%x,NOTIMER=%x",
2483 } else if (strncmp(p
,"qemu.sstep",10) == 0) {
2484 /* Display or change the sstep_flags */
2487 /* Display current setting */
2488 snprintf(buf
, sizeof(buf
), "0x%x", sstep_flags
);
2493 type
= strtoul(p
, (char **)&p
, 16);
2495 put_packet(s
, "OK");
2497 } else if (strcmp(p
,"C") == 0) {
2498 /* "Current thread" remains vague in the spec, so always return
2499 * the first CPU (gdb returns the first thread). */
2500 put_packet(s
, "QC1");
2502 } else if (strcmp(p
,"fThreadInfo") == 0) {
2503 s
->query_cpu
= first_cpu
;
2504 goto report_cpuinfo
;
2505 } else if (strcmp(p
,"sThreadInfo") == 0) {
2508 snprintf(buf
, sizeof(buf
), "m%x", cpu_index(s
->query_cpu
));
2510 s
->query_cpu
= s
->query_cpu
->next_cpu
;
2514 } else if (strncmp(p
,"ThreadExtraInfo,", 16) == 0) {
2515 thread
= strtoull(p
+16, (char **)&p
, 16);
2516 cpu
= find_cpu(thread
);
2518 cpu_synchronize_state(cpu
);
2519 len
= snprintf((char *)mem_buf
, sizeof(mem_buf
),
2520 "CPU#%d [%s]", cpu
->cpu_index
,
2521 cpu
->halted
? "halted " : "running");
2522 memtohex(buf
, mem_buf
, len
);
2527 #ifdef CONFIG_USER_ONLY
2528 else if (strncmp(p
, "Offsets", 7) == 0) {
2529 CPUArchState
*env
= s
->c_cpu
->env_ptr
;
2530 TaskState
*ts
= env
->opaque
;
2532 snprintf(buf
, sizeof(buf
),
2533 "Text=" TARGET_ABI_FMT_lx
";Data=" TARGET_ABI_FMT_lx
2534 ";Bss=" TARGET_ABI_FMT_lx
,
2535 ts
->info
->code_offset
,
2536 ts
->info
->data_offset
,
2537 ts
->info
->data_offset
);
2541 #else /* !CONFIG_USER_ONLY */
2542 else if (strncmp(p
, "Rcmd,", 5) == 0) {
2543 int len
= strlen(p
+ 5);
2545 if ((len
% 2) != 0) {
2546 put_packet(s
, "E01");
2549 hextomem(mem_buf
, p
+ 5, len
);
2552 qemu_chr_be_write(s
->mon_chr
, mem_buf
, len
);
2553 put_packet(s
, "OK");
2556 #endif /* !CONFIG_USER_ONLY */
2557 if (strncmp(p
, "Supported", 9) == 0) {
2558 snprintf(buf
, sizeof(buf
), "PacketSize=%x", MAX_PACKET_LENGTH
);
2560 pstrcat(buf
, sizeof(buf
), ";qXfer:features:read+");
2566 if (strncmp(p
, "Xfer:features:read:", 19) == 0) {
2568 target_ulong total_len
;
2572 xml
= get_feature_xml(p
, &p
);
2574 snprintf(buf
, sizeof(buf
), "E00");
2581 addr
= strtoul(p
, (char **)&p
, 16);
2584 len
= strtoul(p
, (char **)&p
, 16);
2586 total_len
= strlen(xml
);
2587 if (addr
> total_len
) {
2588 snprintf(buf
, sizeof(buf
), "E00");
2592 if (len
> (MAX_PACKET_LENGTH
- 5) / 2)
2593 len
= (MAX_PACKET_LENGTH
- 5) / 2;
2594 if (len
< total_len
- addr
) {
2596 len
= memtox(buf
+ 1, xml
+ addr
, len
);
2599 len
= memtox(buf
+ 1, xml
+ addr
, total_len
- addr
);
2601 put_packet_binary(s
, buf
, len
+ 1);
2605 /* Unrecognised 'q' command. */
2606 goto unknown_command
;
2610 /* put empty packet */
2618 void gdb_set_stop_cpu(CPUState
*cpu
)
2620 gdbserver_state
->c_cpu
= cpu
;
2621 gdbserver_state
->g_cpu
= cpu
;
2624 #ifndef CONFIG_USER_ONLY
2625 static void gdb_vm_state_change(void *opaque
, int running
, RunState state
)
2627 GDBState
*s
= gdbserver_state
;
2628 CPUArchState
*env
= s
->c_cpu
->env_ptr
;
2629 CPUState
*cpu
= s
->c_cpu
;
2634 if (running
|| s
->state
== RS_INACTIVE
) {
2637 /* Is there a GDB syscall waiting to be sent? */
2638 if (s
->current_syscall_cb
) {
2639 put_packet(s
, s
->syscall_buf
);
2643 case RUN_STATE_DEBUG
:
2644 if (env
->watchpoint_hit
) {
2645 switch (env
->watchpoint_hit
->flags
& BP_MEM_ACCESS
) {
2656 snprintf(buf
, sizeof(buf
),
2657 "T%02xthread:%02x;%swatch:" TARGET_FMT_lx
";",
2658 GDB_SIGNAL_TRAP
, cpu_index(cpu
), type
,
2659 env
->watchpoint_hit
->vaddr
);
2660 env
->watchpoint_hit
= NULL
;
2664 ret
= GDB_SIGNAL_TRAP
;
2666 case RUN_STATE_PAUSED
:
2667 ret
= GDB_SIGNAL_INT
;
2669 case RUN_STATE_SHUTDOWN
:
2670 ret
= GDB_SIGNAL_QUIT
;
2672 case RUN_STATE_IO_ERROR
:
2673 ret
= GDB_SIGNAL_IO
;
2675 case RUN_STATE_WATCHDOG
:
2676 ret
= GDB_SIGNAL_ALRM
;
2678 case RUN_STATE_INTERNAL_ERROR
:
2679 ret
= GDB_SIGNAL_ABRT
;
2681 case RUN_STATE_SAVE_VM
:
2682 case RUN_STATE_RESTORE_VM
:
2684 case RUN_STATE_FINISH_MIGRATE
:
2685 ret
= GDB_SIGNAL_XCPU
;
2688 ret
= GDB_SIGNAL_UNKNOWN
;
2691 snprintf(buf
, sizeof(buf
), "T%02xthread:%02x;", ret
, cpu_index(cpu
));
2696 /* disable single step if it was enabled */
2697 cpu_single_step(cpu
, 0);
2701 /* Send a gdb syscall request.
2702 This accepts limited printf-style format specifiers, specifically:
2703 %x - target_ulong argument printed in hex.
2704 %lx - 64-bit argument printed in hex.
2705 %s - string pointer (target_ulong) and length (int) pair. */
2706 void gdb_do_syscall(gdb_syscall_complete_cb cb
, const char *fmt
, ...)
2715 s
= gdbserver_state
;
2718 s
->current_syscall_cb
= cb
;
2719 #ifndef CONFIG_USER_ONLY
2720 vm_stop(RUN_STATE_DEBUG
);
2724 p_end
= &s
->syscall_buf
[sizeof(s
->syscall_buf
)];
2731 addr
= va_arg(va
, target_ulong
);
2732 p
+= snprintf(p
, p_end
- p
, TARGET_FMT_lx
, addr
);
2735 if (*(fmt
++) != 'x')
2737 i64
= va_arg(va
, uint64_t);
2738 p
+= snprintf(p
, p_end
- p
, "%" PRIx64
, i64
);
2741 addr
= va_arg(va
, target_ulong
);
2742 p
+= snprintf(p
, p_end
- p
, TARGET_FMT_lx
"/%x",
2743 addr
, va_arg(va
, int));
2747 fprintf(stderr
, "gdbstub: Bad syscall format string '%s'\n",
2757 #ifdef CONFIG_USER_ONLY
2758 put_packet(s
, s
->syscall_buf
);
2759 gdb_handlesig(s
->c_cpu
, 0);
2761 /* In this case wait to send the syscall packet until notification that
2762 the CPU has stopped. This must be done because if the packet is sent
2763 now the reply from the syscall request could be received while the CPU
2764 is still in the running state, which can cause packets to be dropped
2765 and state transition 'T' packets to be sent while the syscall is still
2771 static void gdb_read_byte(GDBState
*s
, int ch
)
2776 #ifndef CONFIG_USER_ONLY
2777 if (s
->last_packet_len
) {
2778 /* Waiting for a response to the last packet. If we see the start
2779 of a new command then abandon the previous response. */
2782 printf("Got NACK, retransmitting\n");
2784 put_buffer(s
, (uint8_t *)s
->last_packet
, s
->last_packet_len
);
2788 printf("Got ACK\n");
2790 printf("Got '%c' when expecting ACK/NACK\n", ch
);
2792 if (ch
== '+' || ch
== '$')
2793 s
->last_packet_len
= 0;
2797 if (runstate_is_running()) {
2798 /* when the CPU is running, we cannot do anything except stop
2799 it when receiving a char */
2800 vm_stop(RUN_STATE_PAUSED
);
2807 s
->line_buf_index
= 0;
2808 s
->state
= RS_GETLINE
;
2813 s
->state
= RS_CHKSUM1
;
2814 } else if (s
->line_buf_index
>= sizeof(s
->line_buf
) - 1) {
2817 s
->line_buf
[s
->line_buf_index
++] = ch
;
2821 s
->line_buf
[s
->line_buf_index
] = '\0';
2822 s
->line_csum
= fromhex(ch
) << 4;
2823 s
->state
= RS_CHKSUM2
;
2826 s
->line_csum
|= fromhex(ch
);
2828 for(i
= 0; i
< s
->line_buf_index
; i
++) {
2829 csum
+= s
->line_buf
[i
];
2831 if (s
->line_csum
!= (csum
& 0xff)) {
2833 put_buffer(s
, &reply
, 1);
2837 put_buffer(s
, &reply
, 1);
2838 s
->state
= gdb_handle_packet(s
, s
->line_buf
);
2847 /* Tell the remote gdb that the process has exited. */
2848 void gdb_exit(CPUArchState
*env
, int code
)
2853 s
= gdbserver_state
;
2857 #ifdef CONFIG_USER_ONLY
2858 if (gdbserver_fd
< 0 || s
->fd
< 0) {
2863 snprintf(buf
, sizeof(buf
), "W%02x", (uint8_t)code
);
2866 #ifndef CONFIG_USER_ONLY
2868 qemu_chr_delete(s
->chr
);
2873 #ifdef CONFIG_USER_ONLY
2879 s
= gdbserver_state
;
2881 if (gdbserver_fd
< 0 || s
->fd
< 0)
2888 gdb_handlesig(CPUState
*cpu
, int sig
)
2890 CPUArchState
*env
= cpu
->env_ptr
;
2895 s
= gdbserver_state
;
2896 if (gdbserver_fd
< 0 || s
->fd
< 0) {
2900 /* disable single step if it was enabled */
2901 cpu_single_step(cpu
, 0);
2905 snprintf(buf
, sizeof(buf
), "S%02x", target_signal_to_gdb(sig
));
2908 /* put_packet() might have detected that the peer terminated the
2916 s
->running_state
= 0;
2917 while (s
->running_state
== 0) {
2918 n
= read(s
->fd
, buf
, 256);
2922 for (i
= 0; i
< n
; i
++) {
2923 gdb_read_byte(s
, buf
[i
]);
2925 } else if (n
== 0 || errno
!= EAGAIN
) {
2926 /* XXX: Connection closed. Should probably wait for another
2927 connection before continuing. */
2936 /* Tell the remote gdb that the process has exited due to SIG. */
2937 void gdb_signalled(CPUArchState
*env
, int sig
)
2942 s
= gdbserver_state
;
2943 if (gdbserver_fd
< 0 || s
->fd
< 0) {
2947 snprintf(buf
, sizeof(buf
), "X%02x", target_signal_to_gdb(sig
));
2951 static void gdb_accept(void)
2954 struct sockaddr_in sockaddr
;
2959 len
= sizeof(sockaddr
);
2960 fd
= accept(gdbserver_fd
, (struct sockaddr
*)&sockaddr
, &len
);
2961 if (fd
< 0 && errno
!= EINTR
) {
2964 } else if (fd
>= 0) {
2966 fcntl(fd
, F_SETFD
, FD_CLOEXEC
);
2972 /* set short latency */
2973 socket_set_nodelay(fd
);
2975 s
= g_malloc0(sizeof(GDBState
));
2976 s
->c_cpu
= first_cpu
;
2977 s
->g_cpu
= first_cpu
;
2981 gdbserver_state
= s
;
2983 fcntl(fd
, F_SETFL
, O_NONBLOCK
);
2986 static int gdbserver_open(int port
)
2988 struct sockaddr_in sockaddr
;
2991 fd
= socket(PF_INET
, SOCK_STREAM
, 0);
2997 fcntl(fd
, F_SETFD
, FD_CLOEXEC
);
3000 /* allow fast reuse */
3002 qemu_setsockopt(fd
, SOL_SOCKET
, SO_REUSEADDR
, &val
, sizeof(val
));
3004 sockaddr
.sin_family
= AF_INET
;
3005 sockaddr
.sin_port
= htons(port
);
3006 sockaddr
.sin_addr
.s_addr
= 0;
3007 ret
= bind(fd
, (struct sockaddr
*)&sockaddr
, sizeof(sockaddr
));
3013 ret
= listen(fd
, 0);
3022 int gdbserver_start(int port
)
3024 gdbserver_fd
= gdbserver_open(port
);
3025 if (gdbserver_fd
< 0)
3027 /* accept connections */
3032 /* Disable gdb stub for child processes. */
3033 void gdbserver_fork(CPUArchState
*env
)
3035 GDBState
*s
= gdbserver_state
;
3036 if (gdbserver_fd
< 0 || s
->fd
< 0)
3040 cpu_breakpoint_remove_all(env
, BP_GDB
);
3041 cpu_watchpoint_remove_all(env
, BP_GDB
);
3044 static int gdb_chr_can_receive(void *opaque
)
3046 /* We can handle an arbitrarily large amount of data.
3047 Pick the maximum packet size, which is as good as anything. */
3048 return MAX_PACKET_LENGTH
;
3051 static void gdb_chr_receive(void *opaque
, const uint8_t *buf
, int size
)
3055 for (i
= 0; i
< size
; i
++) {
3056 gdb_read_byte(gdbserver_state
, buf
[i
]);
3060 static void gdb_chr_event(void *opaque
, int event
)
3063 case CHR_EVENT_OPENED
:
3064 vm_stop(RUN_STATE_PAUSED
);
3072 static void gdb_monitor_output(GDBState
*s
, const char *msg
, int len
)
3074 char buf
[MAX_PACKET_LENGTH
];
3077 if (len
> (MAX_PACKET_LENGTH
/2) - 1)
3078 len
= (MAX_PACKET_LENGTH
/2) - 1;
3079 memtohex(buf
+ 1, (uint8_t *)msg
, len
);
3083 static int gdb_monitor_write(CharDriverState
*chr
, const uint8_t *buf
, int len
)
3085 const char *p
= (const char *)buf
;
3088 max_sz
= (sizeof(gdbserver_state
->last_packet
) - 2) / 2;
3090 if (len
<= max_sz
) {
3091 gdb_monitor_output(gdbserver_state
, p
, len
);
3094 gdb_monitor_output(gdbserver_state
, p
, max_sz
);
3102 static void gdb_sigterm_handler(int signal
)
3104 if (runstate_is_running()) {
3105 vm_stop(RUN_STATE_PAUSED
);
3110 int gdbserver_start(const char *device
)
3113 char gdbstub_device_name
[128];
3114 CharDriverState
*chr
= NULL
;
3115 CharDriverState
*mon_chr
;
3119 if (strcmp(device
, "none") != 0) {
3120 if (strstart(device
, "tcp:", NULL
)) {
3121 /* enforce required TCP attributes */
3122 snprintf(gdbstub_device_name
, sizeof(gdbstub_device_name
),
3123 "%s,nowait,nodelay,server", device
);
3124 device
= gdbstub_device_name
;
3127 else if (strcmp(device
, "stdio") == 0) {
3128 struct sigaction act
;
3130 memset(&act
, 0, sizeof(act
));
3131 act
.sa_handler
= gdb_sigterm_handler
;
3132 sigaction(SIGINT
, &act
, NULL
);
3135 chr
= qemu_chr_new("gdb", device
, NULL
);
3139 qemu_chr_fe_claim_no_fail(chr
);
3140 qemu_chr_add_handlers(chr
, gdb_chr_can_receive
, gdb_chr_receive
,
3141 gdb_chr_event
, NULL
);
3144 s
= gdbserver_state
;
3146 s
= g_malloc0(sizeof(GDBState
));
3147 gdbserver_state
= s
;
3149 qemu_add_vm_change_state_handler(gdb_vm_state_change
, NULL
);
3151 /* Initialize a monitor terminal for gdb */
3152 mon_chr
= g_malloc0(sizeof(*mon_chr
));
3153 mon_chr
->chr_write
= gdb_monitor_write
;
3154 monitor_init(mon_chr
, 0);
3157 qemu_chr_delete(s
->chr
);
3158 mon_chr
= s
->mon_chr
;
3159 memset(s
, 0, sizeof(GDBState
));
3161 s
->c_cpu
= first_cpu
;
3162 s
->g_cpu
= first_cpu
;
3164 s
->state
= chr
? RS_IDLE
: RS_INACTIVE
;
3165 s
->mon_chr
= mon_chr
;
3166 s
->current_syscall_cb
= NULL
;