]> git.proxmox.com Git - qemu.git/blob - gdbstub.c
e62dc798c3136f1f0e152f9458fe50b3e18328c6
[qemu.git] / gdbstub.c
1 /*
2 * gdb server stub
3 *
4 * Copyright (c) 2003-2005 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #include "config.h"
20 #include "qemu-common.h"
21 #ifdef CONFIG_USER_ONLY
22 #include <stdlib.h>
23 #include <stdio.h>
24 #include <stdarg.h>
25 #include <string.h>
26 #include <errno.h>
27 #include <unistd.h>
28 #include <fcntl.h>
29
30 #include "qemu.h"
31 #else
32 #include "monitor/monitor.h"
33 #include "char/char.h"
34 #include "sysemu/sysemu.h"
35 #include "exec/gdbstub.h"
36 #endif
37
38 #define MAX_PACKET_LENGTH 4096
39
40 #include "cpu.h"
41 #include "qemu/sockets.h"
42 #include "sysemu/kvm.h"
43 #include "qemu/bitops.h"
44
45 #ifndef TARGET_CPU_MEMORY_RW_DEBUG
46 static inline int target_memory_rw_debug(CPUArchState *env, target_ulong addr,
47 uint8_t *buf, int len, int is_write)
48 {
49 return cpu_memory_rw_debug(env, addr, buf, len, is_write);
50 }
51 #else
52 /* target_memory_rw_debug() defined in cpu.h */
53 #endif
54
55 enum {
56 GDB_SIGNAL_0 = 0,
57 GDB_SIGNAL_INT = 2,
58 GDB_SIGNAL_QUIT = 3,
59 GDB_SIGNAL_TRAP = 5,
60 GDB_SIGNAL_ABRT = 6,
61 GDB_SIGNAL_ALRM = 14,
62 GDB_SIGNAL_IO = 23,
63 GDB_SIGNAL_XCPU = 24,
64 GDB_SIGNAL_UNKNOWN = 143
65 };
66
67 #ifdef CONFIG_USER_ONLY
68
69 /* Map target signal numbers to GDB protocol signal numbers and vice
70 * versa. For user emulation's currently supported systems, we can
71 * assume most signals are defined.
72 */
73
74 static int gdb_signal_table[] = {
75 0,
76 TARGET_SIGHUP,
77 TARGET_SIGINT,
78 TARGET_SIGQUIT,
79 TARGET_SIGILL,
80 TARGET_SIGTRAP,
81 TARGET_SIGABRT,
82 -1, /* SIGEMT */
83 TARGET_SIGFPE,
84 TARGET_SIGKILL,
85 TARGET_SIGBUS,
86 TARGET_SIGSEGV,
87 TARGET_SIGSYS,
88 TARGET_SIGPIPE,
89 TARGET_SIGALRM,
90 TARGET_SIGTERM,
91 TARGET_SIGURG,
92 TARGET_SIGSTOP,
93 TARGET_SIGTSTP,
94 TARGET_SIGCONT,
95 TARGET_SIGCHLD,
96 TARGET_SIGTTIN,
97 TARGET_SIGTTOU,
98 TARGET_SIGIO,
99 TARGET_SIGXCPU,
100 TARGET_SIGXFSZ,
101 TARGET_SIGVTALRM,
102 TARGET_SIGPROF,
103 TARGET_SIGWINCH,
104 -1, /* SIGLOST */
105 TARGET_SIGUSR1,
106 TARGET_SIGUSR2,
107 #ifdef TARGET_SIGPWR
108 TARGET_SIGPWR,
109 #else
110 -1,
111 #endif
112 -1, /* SIGPOLL */
113 -1,
114 -1,
115 -1,
116 -1,
117 -1,
118 -1,
119 -1,
120 -1,
121 -1,
122 -1,
123 -1,
124 #ifdef __SIGRTMIN
125 __SIGRTMIN + 1,
126 __SIGRTMIN + 2,
127 __SIGRTMIN + 3,
128 __SIGRTMIN + 4,
129 __SIGRTMIN + 5,
130 __SIGRTMIN + 6,
131 __SIGRTMIN + 7,
132 __SIGRTMIN + 8,
133 __SIGRTMIN + 9,
134 __SIGRTMIN + 10,
135 __SIGRTMIN + 11,
136 __SIGRTMIN + 12,
137 __SIGRTMIN + 13,
138 __SIGRTMIN + 14,
139 __SIGRTMIN + 15,
140 __SIGRTMIN + 16,
141 __SIGRTMIN + 17,
142 __SIGRTMIN + 18,
143 __SIGRTMIN + 19,
144 __SIGRTMIN + 20,
145 __SIGRTMIN + 21,
146 __SIGRTMIN + 22,
147 __SIGRTMIN + 23,
148 __SIGRTMIN + 24,
149 __SIGRTMIN + 25,
150 __SIGRTMIN + 26,
151 __SIGRTMIN + 27,
152 __SIGRTMIN + 28,
153 __SIGRTMIN + 29,
154 __SIGRTMIN + 30,
155 __SIGRTMIN + 31,
156 -1, /* SIGCANCEL */
157 __SIGRTMIN,
158 __SIGRTMIN + 32,
159 __SIGRTMIN + 33,
160 __SIGRTMIN + 34,
161 __SIGRTMIN + 35,
162 __SIGRTMIN + 36,
163 __SIGRTMIN + 37,
164 __SIGRTMIN + 38,
165 __SIGRTMIN + 39,
166 __SIGRTMIN + 40,
167 __SIGRTMIN + 41,
168 __SIGRTMIN + 42,
169 __SIGRTMIN + 43,
170 __SIGRTMIN + 44,
171 __SIGRTMIN + 45,
172 __SIGRTMIN + 46,
173 __SIGRTMIN + 47,
174 __SIGRTMIN + 48,
175 __SIGRTMIN + 49,
176 __SIGRTMIN + 50,
177 __SIGRTMIN + 51,
178 __SIGRTMIN + 52,
179 __SIGRTMIN + 53,
180 __SIGRTMIN + 54,
181 __SIGRTMIN + 55,
182 __SIGRTMIN + 56,
183 __SIGRTMIN + 57,
184 __SIGRTMIN + 58,
185 __SIGRTMIN + 59,
186 __SIGRTMIN + 60,
187 __SIGRTMIN + 61,
188 __SIGRTMIN + 62,
189 __SIGRTMIN + 63,
190 __SIGRTMIN + 64,
191 __SIGRTMIN + 65,
192 __SIGRTMIN + 66,
193 __SIGRTMIN + 67,
194 __SIGRTMIN + 68,
195 __SIGRTMIN + 69,
196 __SIGRTMIN + 70,
197 __SIGRTMIN + 71,
198 __SIGRTMIN + 72,
199 __SIGRTMIN + 73,
200 __SIGRTMIN + 74,
201 __SIGRTMIN + 75,
202 __SIGRTMIN + 76,
203 __SIGRTMIN + 77,
204 __SIGRTMIN + 78,
205 __SIGRTMIN + 79,
206 __SIGRTMIN + 80,
207 __SIGRTMIN + 81,
208 __SIGRTMIN + 82,
209 __SIGRTMIN + 83,
210 __SIGRTMIN + 84,
211 __SIGRTMIN + 85,
212 __SIGRTMIN + 86,
213 __SIGRTMIN + 87,
214 __SIGRTMIN + 88,
215 __SIGRTMIN + 89,
216 __SIGRTMIN + 90,
217 __SIGRTMIN + 91,
218 __SIGRTMIN + 92,
219 __SIGRTMIN + 93,
220 __SIGRTMIN + 94,
221 __SIGRTMIN + 95,
222 -1, /* SIGINFO */
223 -1, /* UNKNOWN */
224 -1, /* DEFAULT */
225 -1,
226 -1,
227 -1,
228 -1,
229 -1,
230 -1
231 #endif
232 };
233 #else
234 /* In system mode we only need SIGINT and SIGTRAP; other signals
235 are not yet supported. */
236
237 enum {
238 TARGET_SIGINT = 2,
239 TARGET_SIGTRAP = 5
240 };
241
242 static int gdb_signal_table[] = {
243 -1,
244 -1,
245 TARGET_SIGINT,
246 -1,
247 -1,
248 TARGET_SIGTRAP
249 };
250 #endif
251
252 #ifdef CONFIG_USER_ONLY
253 static int target_signal_to_gdb (int sig)
254 {
255 int i;
256 for (i = 0; i < ARRAY_SIZE (gdb_signal_table); i++)
257 if (gdb_signal_table[i] == sig)
258 return i;
259 return GDB_SIGNAL_UNKNOWN;
260 }
261 #endif
262
263 static int gdb_signal_to_target (int sig)
264 {
265 if (sig < ARRAY_SIZE (gdb_signal_table))
266 return gdb_signal_table[sig];
267 else
268 return -1;
269 }
270
271 //#define DEBUG_GDB
272
273 typedef struct GDBRegisterState {
274 int base_reg;
275 int num_regs;
276 gdb_reg_cb get_reg;
277 gdb_reg_cb set_reg;
278 const char *xml;
279 struct GDBRegisterState *next;
280 } GDBRegisterState;
281
282 enum RSState {
283 RS_INACTIVE,
284 RS_IDLE,
285 RS_GETLINE,
286 RS_CHKSUM1,
287 RS_CHKSUM2,
288 };
289 typedef struct GDBState {
290 CPUArchState *c_cpu; /* current CPU for step/continue ops */
291 CPUArchState *g_cpu; /* current CPU for other ops */
292 CPUArchState *query_cpu; /* for q{f|s}ThreadInfo */
293 enum RSState state; /* parsing state */
294 char line_buf[MAX_PACKET_LENGTH];
295 int line_buf_index;
296 int line_csum;
297 uint8_t last_packet[MAX_PACKET_LENGTH + 4];
298 int last_packet_len;
299 int signal;
300 #ifdef CONFIG_USER_ONLY
301 int fd;
302 int running_state;
303 #else
304 CharDriverState *chr;
305 CharDriverState *mon_chr;
306 #endif
307 char syscall_buf[256];
308 gdb_syscall_complete_cb current_syscall_cb;
309 } GDBState;
310
311 /* By default use no IRQs and no timers while single stepping so as to
312 * make single stepping like an ICE HW step.
313 */
314 static int sstep_flags = SSTEP_ENABLE|SSTEP_NOIRQ|SSTEP_NOTIMER;
315
316 static GDBState *gdbserver_state;
317
318 /* This is an ugly hack to cope with both new and old gdb.
319 If gdb sends qXfer:features:read then assume we're talking to a newish
320 gdb that understands target descriptions. */
321 static int gdb_has_xml;
322
323 #ifdef CONFIG_USER_ONLY
324 /* XXX: This is not thread safe. Do we care? */
325 static int gdbserver_fd = -1;
326
327 static int get_char(GDBState *s)
328 {
329 uint8_t ch;
330 int ret;
331
332 for(;;) {
333 ret = qemu_recv(s->fd, &ch, 1, 0);
334 if (ret < 0) {
335 if (errno == ECONNRESET)
336 s->fd = -1;
337 if (errno != EINTR && errno != EAGAIN)
338 return -1;
339 } else if (ret == 0) {
340 close(s->fd);
341 s->fd = -1;
342 return -1;
343 } else {
344 break;
345 }
346 }
347 return ch;
348 }
349 #endif
350
351 static enum {
352 GDB_SYS_UNKNOWN,
353 GDB_SYS_ENABLED,
354 GDB_SYS_DISABLED,
355 } gdb_syscall_mode;
356
357 /* If gdb is connected when the first semihosting syscall occurs then use
358 remote gdb syscalls. Otherwise use native file IO. */
359 int use_gdb_syscalls(void)
360 {
361 if (gdb_syscall_mode == GDB_SYS_UNKNOWN) {
362 gdb_syscall_mode = (gdbserver_state ? GDB_SYS_ENABLED
363 : GDB_SYS_DISABLED);
364 }
365 return gdb_syscall_mode == GDB_SYS_ENABLED;
366 }
367
368 /* Resume execution. */
369 static inline void gdb_continue(GDBState *s)
370 {
371 #ifdef CONFIG_USER_ONLY
372 s->running_state = 1;
373 #else
374 vm_start();
375 #endif
376 }
377
378 static void put_buffer(GDBState *s, const uint8_t *buf, int len)
379 {
380 #ifdef CONFIG_USER_ONLY
381 int ret;
382
383 while (len > 0) {
384 ret = send(s->fd, buf, len, 0);
385 if (ret < 0) {
386 if (errno != EINTR && errno != EAGAIN)
387 return;
388 } else {
389 buf += ret;
390 len -= ret;
391 }
392 }
393 #else
394 qemu_chr_fe_write(s->chr, buf, len);
395 #endif
396 }
397
398 static inline int fromhex(int v)
399 {
400 if (v >= '0' && v <= '9')
401 return v - '0';
402 else if (v >= 'A' && v <= 'F')
403 return v - 'A' + 10;
404 else if (v >= 'a' && v <= 'f')
405 return v - 'a' + 10;
406 else
407 return 0;
408 }
409
410 static inline int tohex(int v)
411 {
412 if (v < 10)
413 return v + '0';
414 else
415 return v - 10 + 'a';
416 }
417
418 static void memtohex(char *buf, const uint8_t *mem, int len)
419 {
420 int i, c;
421 char *q;
422 q = buf;
423 for(i = 0; i < len; i++) {
424 c = mem[i];
425 *q++ = tohex(c >> 4);
426 *q++ = tohex(c & 0xf);
427 }
428 *q = '\0';
429 }
430
431 static void hextomem(uint8_t *mem, const char *buf, int len)
432 {
433 int i;
434
435 for(i = 0; i < len; i++) {
436 mem[i] = (fromhex(buf[0]) << 4) | fromhex(buf[1]);
437 buf += 2;
438 }
439 }
440
441 /* return -1 if error, 0 if OK */
442 static int put_packet_binary(GDBState *s, const char *buf, int len)
443 {
444 int csum, i;
445 uint8_t *p;
446
447 for(;;) {
448 p = s->last_packet;
449 *(p++) = '$';
450 memcpy(p, buf, len);
451 p += len;
452 csum = 0;
453 for(i = 0; i < len; i++) {
454 csum += buf[i];
455 }
456 *(p++) = '#';
457 *(p++) = tohex((csum >> 4) & 0xf);
458 *(p++) = tohex((csum) & 0xf);
459
460 s->last_packet_len = p - s->last_packet;
461 put_buffer(s, (uint8_t *)s->last_packet, s->last_packet_len);
462
463 #ifdef CONFIG_USER_ONLY
464 i = get_char(s);
465 if (i < 0)
466 return -1;
467 if (i == '+')
468 break;
469 #else
470 break;
471 #endif
472 }
473 return 0;
474 }
475
476 /* return -1 if error, 0 if OK */
477 static int put_packet(GDBState *s, const char *buf)
478 {
479 #ifdef DEBUG_GDB
480 printf("reply='%s'\n", buf);
481 #endif
482
483 return put_packet_binary(s, buf, strlen(buf));
484 }
485
486 /* The GDB remote protocol transfers values in target byte order. This means
487 we can use the raw memory access routines to access the value buffer.
488 Conveniently, these also handle the case where the buffer is mis-aligned.
489 */
490 #define GET_REG8(val) do { \
491 stb_p(mem_buf, val); \
492 return 1; \
493 } while(0)
494 #define GET_REG16(val) do { \
495 stw_p(mem_buf, val); \
496 return 2; \
497 } while(0)
498 #define GET_REG32(val) do { \
499 stl_p(mem_buf, val); \
500 return 4; \
501 } while(0)
502 #define GET_REG64(val) do { \
503 stq_p(mem_buf, val); \
504 return 8; \
505 } while(0)
506
507 #if TARGET_LONG_BITS == 64
508 #define GET_REGL(val) GET_REG64(val)
509 #define ldtul_p(addr) ldq_p(addr)
510 #else
511 #define GET_REGL(val) GET_REG32(val)
512 #define ldtul_p(addr) ldl_p(addr)
513 #endif
514
515 #if defined(TARGET_I386)
516
517 #ifdef TARGET_X86_64
518 static const int gpr_map[16] = {
519 R_EAX, R_EBX, R_ECX, R_EDX, R_ESI, R_EDI, R_EBP, R_ESP,
520 8, 9, 10, 11, 12, 13, 14, 15
521 };
522 #else
523 #define gpr_map gpr_map32
524 #endif
525 static const int gpr_map32[8] = { 0, 1, 2, 3, 4, 5, 6, 7 };
526
527 #define NUM_CORE_REGS (CPU_NB_REGS * 2 + 25)
528
529 #define IDX_IP_REG CPU_NB_REGS
530 #define IDX_FLAGS_REG (IDX_IP_REG + 1)
531 #define IDX_SEG_REGS (IDX_FLAGS_REG + 1)
532 #define IDX_FP_REGS (IDX_SEG_REGS + 6)
533 #define IDX_XMM_REGS (IDX_FP_REGS + 16)
534 #define IDX_MXCSR_REG (IDX_XMM_REGS + CPU_NB_REGS)
535
536 static int cpu_gdb_read_register(CPUX86State *env, uint8_t *mem_buf, int n)
537 {
538 if (n < CPU_NB_REGS) {
539 if (TARGET_LONG_BITS == 64 && env->hflags & HF_CS64_MASK) {
540 GET_REG64(env->regs[gpr_map[n]]);
541 } else if (n < CPU_NB_REGS32) {
542 GET_REG32(env->regs[gpr_map32[n]]);
543 }
544 } else if (n >= IDX_FP_REGS && n < IDX_FP_REGS + 8) {
545 #ifdef USE_X86LDOUBLE
546 /* FIXME: byteswap float values - after fixing fpregs layout. */
547 memcpy(mem_buf, &env->fpregs[n - IDX_FP_REGS], 10);
548 #else
549 memset(mem_buf, 0, 10);
550 #endif
551 return 10;
552 } else if (n >= IDX_XMM_REGS && n < IDX_XMM_REGS + CPU_NB_REGS) {
553 n -= IDX_XMM_REGS;
554 if (n < CPU_NB_REGS32 ||
555 (TARGET_LONG_BITS == 64 && env->hflags & HF_CS64_MASK)) {
556 stq_p(mem_buf, env->xmm_regs[n].XMM_Q(0));
557 stq_p(mem_buf + 8, env->xmm_regs[n].XMM_Q(1));
558 return 16;
559 }
560 } else {
561 switch (n) {
562 case IDX_IP_REG:
563 if (TARGET_LONG_BITS == 64 && env->hflags & HF_CS64_MASK) {
564 GET_REG64(env->eip);
565 } else {
566 GET_REG32(env->eip);
567 }
568 case IDX_FLAGS_REG: GET_REG32(env->eflags);
569
570 case IDX_SEG_REGS: GET_REG32(env->segs[R_CS].selector);
571 case IDX_SEG_REGS + 1: GET_REG32(env->segs[R_SS].selector);
572 case IDX_SEG_REGS + 2: GET_REG32(env->segs[R_DS].selector);
573 case IDX_SEG_REGS + 3: GET_REG32(env->segs[R_ES].selector);
574 case IDX_SEG_REGS + 4: GET_REG32(env->segs[R_FS].selector);
575 case IDX_SEG_REGS + 5: GET_REG32(env->segs[R_GS].selector);
576
577 case IDX_FP_REGS + 8: GET_REG32(env->fpuc);
578 case IDX_FP_REGS + 9: GET_REG32((env->fpus & ~0x3800) |
579 (env->fpstt & 0x7) << 11);
580 case IDX_FP_REGS + 10: GET_REG32(0); /* ftag */
581 case IDX_FP_REGS + 11: GET_REG32(0); /* fiseg */
582 case IDX_FP_REGS + 12: GET_REG32(0); /* fioff */
583 case IDX_FP_REGS + 13: GET_REG32(0); /* foseg */
584 case IDX_FP_REGS + 14: GET_REG32(0); /* fooff */
585 case IDX_FP_REGS + 15: GET_REG32(0); /* fop */
586
587 case IDX_MXCSR_REG: GET_REG32(env->mxcsr);
588 }
589 }
590 return 0;
591 }
592
593 static int cpu_x86_gdb_load_seg(CPUX86State *env, int sreg, uint8_t *mem_buf)
594 {
595 uint16_t selector = ldl_p(mem_buf);
596
597 if (selector != env->segs[sreg].selector) {
598 #if defined(CONFIG_USER_ONLY)
599 cpu_x86_load_seg(env, sreg, selector);
600 #else
601 unsigned int limit, flags;
602 target_ulong base;
603
604 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
605 base = selector << 4;
606 limit = 0xffff;
607 flags = 0;
608 } else {
609 if (!cpu_x86_get_descr_debug(env, selector, &base, &limit, &flags))
610 return 4;
611 }
612 cpu_x86_load_seg_cache(env, sreg, selector, base, limit, flags);
613 #endif
614 }
615 return 4;
616 }
617
618 static int cpu_gdb_write_register(CPUX86State *env, uint8_t *mem_buf, int n)
619 {
620 uint32_t tmp;
621
622 if (n < CPU_NB_REGS) {
623 if (TARGET_LONG_BITS == 64 && env->hflags & HF_CS64_MASK) {
624 env->regs[gpr_map[n]] = ldtul_p(mem_buf);
625 return sizeof(target_ulong);
626 } else if (n < CPU_NB_REGS32) {
627 n = gpr_map32[n];
628 env->regs[n] &= ~0xffffffffUL;
629 env->regs[n] |= (uint32_t)ldl_p(mem_buf);
630 return 4;
631 }
632 } else if (n >= IDX_FP_REGS && n < IDX_FP_REGS + 8) {
633 #ifdef USE_X86LDOUBLE
634 /* FIXME: byteswap float values - after fixing fpregs layout. */
635 memcpy(&env->fpregs[n - IDX_FP_REGS], mem_buf, 10);
636 #endif
637 return 10;
638 } else if (n >= IDX_XMM_REGS && n < IDX_XMM_REGS + CPU_NB_REGS) {
639 n -= IDX_XMM_REGS;
640 if (n < CPU_NB_REGS32 ||
641 (TARGET_LONG_BITS == 64 && env->hflags & HF_CS64_MASK)) {
642 env->xmm_regs[n].XMM_Q(0) = ldq_p(mem_buf);
643 env->xmm_regs[n].XMM_Q(1) = ldq_p(mem_buf + 8);
644 return 16;
645 }
646 } else {
647 switch (n) {
648 case IDX_IP_REG:
649 if (TARGET_LONG_BITS == 64 && env->hflags & HF_CS64_MASK) {
650 env->eip = ldq_p(mem_buf);
651 return 8;
652 } else {
653 env->eip &= ~0xffffffffUL;
654 env->eip |= (uint32_t)ldl_p(mem_buf);
655 return 4;
656 }
657 case IDX_FLAGS_REG:
658 env->eflags = ldl_p(mem_buf);
659 return 4;
660
661 case IDX_SEG_REGS: return cpu_x86_gdb_load_seg(env, R_CS, mem_buf);
662 case IDX_SEG_REGS + 1: return cpu_x86_gdb_load_seg(env, R_SS, mem_buf);
663 case IDX_SEG_REGS + 2: return cpu_x86_gdb_load_seg(env, R_DS, mem_buf);
664 case IDX_SEG_REGS + 3: return cpu_x86_gdb_load_seg(env, R_ES, mem_buf);
665 case IDX_SEG_REGS + 4: return cpu_x86_gdb_load_seg(env, R_FS, mem_buf);
666 case IDX_SEG_REGS + 5: return cpu_x86_gdb_load_seg(env, R_GS, mem_buf);
667
668 case IDX_FP_REGS + 8:
669 env->fpuc = ldl_p(mem_buf);
670 return 4;
671 case IDX_FP_REGS + 9:
672 tmp = ldl_p(mem_buf);
673 env->fpstt = (tmp >> 11) & 7;
674 env->fpus = tmp & ~0x3800;
675 return 4;
676 case IDX_FP_REGS + 10: /* ftag */ return 4;
677 case IDX_FP_REGS + 11: /* fiseg */ return 4;
678 case IDX_FP_REGS + 12: /* fioff */ return 4;
679 case IDX_FP_REGS + 13: /* foseg */ return 4;
680 case IDX_FP_REGS + 14: /* fooff */ return 4;
681 case IDX_FP_REGS + 15: /* fop */ return 4;
682
683 case IDX_MXCSR_REG:
684 env->mxcsr = ldl_p(mem_buf);
685 return 4;
686 }
687 }
688 /* Unrecognised register. */
689 return 0;
690 }
691
692 #elif defined (TARGET_PPC)
693
694 /* Old gdb always expects FP registers. Newer (xml-aware) gdb only
695 expects whatever the target description contains. Due to a
696 historical mishap the FP registers appear in between core integer
697 regs and PC, MSR, CR, and so forth. We hack round this by giving the
698 FP regs zero size when talking to a newer gdb. */
699 #define NUM_CORE_REGS 71
700 #if defined (TARGET_PPC64)
701 #define GDB_CORE_XML "power64-core.xml"
702 #else
703 #define GDB_CORE_XML "power-core.xml"
704 #endif
705
706 static int cpu_gdb_read_register(CPUPPCState *env, uint8_t *mem_buf, int n)
707 {
708 if (n < 32) {
709 /* gprs */
710 GET_REGL(env->gpr[n]);
711 } else if (n < 64) {
712 /* fprs */
713 if (gdb_has_xml)
714 return 0;
715 stfq_p(mem_buf, env->fpr[n-32]);
716 return 8;
717 } else {
718 switch (n) {
719 case 64: GET_REGL(env->nip);
720 case 65: GET_REGL(env->msr);
721 case 66:
722 {
723 uint32_t cr = 0;
724 int i;
725 for (i = 0; i < 8; i++)
726 cr |= env->crf[i] << (32 - ((i + 1) * 4));
727 GET_REG32(cr);
728 }
729 case 67: GET_REGL(env->lr);
730 case 68: GET_REGL(env->ctr);
731 case 69: GET_REGL(env->xer);
732 case 70:
733 {
734 if (gdb_has_xml)
735 return 0;
736 GET_REG32(env->fpscr);
737 }
738 }
739 }
740 return 0;
741 }
742
743 static int cpu_gdb_write_register(CPUPPCState *env, uint8_t *mem_buf, int n)
744 {
745 if (n < 32) {
746 /* gprs */
747 env->gpr[n] = ldtul_p(mem_buf);
748 return sizeof(target_ulong);
749 } else if (n < 64) {
750 /* fprs */
751 if (gdb_has_xml)
752 return 0;
753 env->fpr[n-32] = ldfq_p(mem_buf);
754 return 8;
755 } else {
756 switch (n) {
757 case 64:
758 env->nip = ldtul_p(mem_buf);
759 return sizeof(target_ulong);
760 case 65:
761 ppc_store_msr(env, ldtul_p(mem_buf));
762 return sizeof(target_ulong);
763 case 66:
764 {
765 uint32_t cr = ldl_p(mem_buf);
766 int i;
767 for (i = 0; i < 8; i++)
768 env->crf[i] = (cr >> (32 - ((i + 1) * 4))) & 0xF;
769 return 4;
770 }
771 case 67:
772 env->lr = ldtul_p(mem_buf);
773 return sizeof(target_ulong);
774 case 68:
775 env->ctr = ldtul_p(mem_buf);
776 return sizeof(target_ulong);
777 case 69:
778 env->xer = ldtul_p(mem_buf);
779 return sizeof(target_ulong);
780 case 70:
781 /* fpscr */
782 if (gdb_has_xml)
783 return 0;
784 return 4;
785 }
786 }
787 return 0;
788 }
789
790 #elif defined (TARGET_SPARC)
791
792 #if defined(TARGET_SPARC64) && !defined(TARGET_ABI32)
793 #define NUM_CORE_REGS 86
794 #else
795 #define NUM_CORE_REGS 72
796 #endif
797
798 #ifdef TARGET_ABI32
799 #define GET_REGA(val) GET_REG32(val)
800 #else
801 #define GET_REGA(val) GET_REGL(val)
802 #endif
803
804 static int cpu_gdb_read_register(CPUSPARCState *env, uint8_t *mem_buf, int n)
805 {
806 if (n < 8) {
807 /* g0..g7 */
808 GET_REGA(env->gregs[n]);
809 }
810 if (n < 32) {
811 /* register window */
812 GET_REGA(env->regwptr[n - 8]);
813 }
814 #if defined(TARGET_ABI32) || !defined(TARGET_SPARC64)
815 if (n < 64) {
816 /* fprs */
817 if (n & 1) {
818 GET_REG32(env->fpr[(n - 32) / 2].l.lower);
819 } else {
820 GET_REG32(env->fpr[(n - 32) / 2].l.upper);
821 }
822 }
823 /* Y, PSR, WIM, TBR, PC, NPC, FPSR, CPSR */
824 switch (n) {
825 case 64: GET_REGA(env->y);
826 case 65: GET_REGA(cpu_get_psr(env));
827 case 66: GET_REGA(env->wim);
828 case 67: GET_REGA(env->tbr);
829 case 68: GET_REGA(env->pc);
830 case 69: GET_REGA(env->npc);
831 case 70: GET_REGA(env->fsr);
832 case 71: GET_REGA(0); /* csr */
833 default: GET_REGA(0);
834 }
835 #else
836 if (n < 64) {
837 /* f0-f31 */
838 if (n & 1) {
839 GET_REG32(env->fpr[(n - 32) / 2].l.lower);
840 } else {
841 GET_REG32(env->fpr[(n - 32) / 2].l.upper);
842 }
843 }
844 if (n < 80) {
845 /* f32-f62 (double width, even numbers only) */
846 GET_REG64(env->fpr[(n - 32) / 2].ll);
847 }
848 switch (n) {
849 case 80: GET_REGL(env->pc);
850 case 81: GET_REGL(env->npc);
851 case 82: GET_REGL((cpu_get_ccr(env) << 32) |
852 ((env->asi & 0xff) << 24) |
853 ((env->pstate & 0xfff) << 8) |
854 cpu_get_cwp64(env));
855 case 83: GET_REGL(env->fsr);
856 case 84: GET_REGL(env->fprs);
857 case 85: GET_REGL(env->y);
858 }
859 #endif
860 return 0;
861 }
862
863 static int cpu_gdb_write_register(CPUSPARCState *env, uint8_t *mem_buf, int n)
864 {
865 #if defined(TARGET_ABI32)
866 abi_ulong tmp;
867
868 tmp = ldl_p(mem_buf);
869 #else
870 target_ulong tmp;
871
872 tmp = ldtul_p(mem_buf);
873 #endif
874
875 if (n < 8) {
876 /* g0..g7 */
877 env->gregs[n] = tmp;
878 } else if (n < 32) {
879 /* register window */
880 env->regwptr[n - 8] = tmp;
881 }
882 #if defined(TARGET_ABI32) || !defined(TARGET_SPARC64)
883 else if (n < 64) {
884 /* fprs */
885 /* f0-f31 */
886 if (n & 1) {
887 env->fpr[(n - 32) / 2].l.lower = tmp;
888 } else {
889 env->fpr[(n - 32) / 2].l.upper = tmp;
890 }
891 } else {
892 /* Y, PSR, WIM, TBR, PC, NPC, FPSR, CPSR */
893 switch (n) {
894 case 64: env->y = tmp; break;
895 case 65: cpu_put_psr(env, tmp); break;
896 case 66: env->wim = tmp; break;
897 case 67: env->tbr = tmp; break;
898 case 68: env->pc = tmp; break;
899 case 69: env->npc = tmp; break;
900 case 70: env->fsr = tmp; break;
901 default: return 0;
902 }
903 }
904 return 4;
905 #else
906 else if (n < 64) {
907 /* f0-f31 */
908 tmp = ldl_p(mem_buf);
909 if (n & 1) {
910 env->fpr[(n - 32) / 2].l.lower = tmp;
911 } else {
912 env->fpr[(n - 32) / 2].l.upper = tmp;
913 }
914 return 4;
915 } else if (n < 80) {
916 /* f32-f62 (double width, even numbers only) */
917 env->fpr[(n - 32) / 2].ll = tmp;
918 } else {
919 switch (n) {
920 case 80: env->pc = tmp; break;
921 case 81: env->npc = tmp; break;
922 case 82:
923 cpu_put_ccr(env, tmp >> 32);
924 env->asi = (tmp >> 24) & 0xff;
925 env->pstate = (tmp >> 8) & 0xfff;
926 cpu_put_cwp64(env, tmp & 0xff);
927 break;
928 case 83: env->fsr = tmp; break;
929 case 84: env->fprs = tmp; break;
930 case 85: env->y = tmp; break;
931 default: return 0;
932 }
933 }
934 return 8;
935 #endif
936 }
937 #elif defined (TARGET_ARM)
938
939 /* Old gdb always expect FPA registers. Newer (xml-aware) gdb only expect
940 whatever the target description contains. Due to a historical mishap
941 the FPA registers appear in between core integer regs and the CPSR.
942 We hack round this by giving the FPA regs zero size when talking to a
943 newer gdb. */
944 #define NUM_CORE_REGS 26
945 #define GDB_CORE_XML "arm-core.xml"
946
947 static int cpu_gdb_read_register(CPUARMState *env, uint8_t *mem_buf, int n)
948 {
949 if (n < 16) {
950 /* Core integer register. */
951 GET_REG32(env->regs[n]);
952 }
953 if (n < 24) {
954 /* FPA registers. */
955 if (gdb_has_xml)
956 return 0;
957 memset(mem_buf, 0, 12);
958 return 12;
959 }
960 switch (n) {
961 case 24:
962 /* FPA status register. */
963 if (gdb_has_xml)
964 return 0;
965 GET_REG32(0);
966 case 25:
967 /* CPSR */
968 GET_REG32(cpsr_read(env));
969 }
970 /* Unknown register. */
971 return 0;
972 }
973
974 static int cpu_gdb_write_register(CPUARMState *env, uint8_t *mem_buf, int n)
975 {
976 uint32_t tmp;
977
978 tmp = ldl_p(mem_buf);
979
980 /* Mask out low bit of PC to workaround gdb bugs. This will probably
981 cause problems if we ever implement the Jazelle DBX extensions. */
982 if (n == 15)
983 tmp &= ~1;
984
985 if (n < 16) {
986 /* Core integer register. */
987 env->regs[n] = tmp;
988 return 4;
989 }
990 if (n < 24) { /* 16-23 */
991 /* FPA registers (ignored). */
992 if (gdb_has_xml)
993 return 0;
994 return 12;
995 }
996 switch (n) {
997 case 24:
998 /* FPA status register (ignored). */
999 if (gdb_has_xml)
1000 return 0;
1001 return 4;
1002 case 25:
1003 /* CPSR */
1004 cpsr_write (env, tmp, 0xffffffff);
1005 return 4;
1006 }
1007 /* Unknown register. */
1008 return 0;
1009 }
1010
1011 #elif defined (TARGET_M68K)
1012
1013 #define NUM_CORE_REGS 18
1014
1015 #define GDB_CORE_XML "cf-core.xml"
1016
1017 static int cpu_gdb_read_register(CPUM68KState *env, uint8_t *mem_buf, int n)
1018 {
1019 if (n < 8) {
1020 /* D0-D7 */
1021 GET_REG32(env->dregs[n]);
1022 } else if (n < 16) {
1023 /* A0-A7 */
1024 GET_REG32(env->aregs[n - 8]);
1025 } else {
1026 switch (n) {
1027 case 16: GET_REG32(env->sr);
1028 case 17: GET_REG32(env->pc);
1029 }
1030 }
1031 /* FP registers not included here because they vary between
1032 ColdFire and m68k. Use XML bits for these. */
1033 return 0;
1034 }
1035
1036 static int cpu_gdb_write_register(CPUM68KState *env, uint8_t *mem_buf, int n)
1037 {
1038 uint32_t tmp;
1039
1040 tmp = ldl_p(mem_buf);
1041
1042 if (n < 8) {
1043 /* D0-D7 */
1044 env->dregs[n] = tmp;
1045 } else if (n < 16) {
1046 /* A0-A7 */
1047 env->aregs[n - 8] = tmp;
1048 } else {
1049 switch (n) {
1050 case 16: env->sr = tmp; break;
1051 case 17: env->pc = tmp; break;
1052 default: return 0;
1053 }
1054 }
1055 return 4;
1056 }
1057 #elif defined (TARGET_MIPS)
1058
1059 #define NUM_CORE_REGS 73
1060
1061 static int cpu_gdb_read_register(CPUMIPSState *env, uint8_t *mem_buf, int n)
1062 {
1063 if (n < 32) {
1064 GET_REGL(env->active_tc.gpr[n]);
1065 }
1066 if (env->CP0_Config1 & (1 << CP0C1_FP)) {
1067 if (n >= 38 && n < 70) {
1068 if (env->CP0_Status & (1 << CP0St_FR))
1069 GET_REGL(env->active_fpu.fpr[n - 38].d);
1070 else
1071 GET_REGL(env->active_fpu.fpr[n - 38].w[FP_ENDIAN_IDX]);
1072 }
1073 switch (n) {
1074 case 70: GET_REGL((int32_t)env->active_fpu.fcr31);
1075 case 71: GET_REGL((int32_t)env->active_fpu.fcr0);
1076 }
1077 }
1078 switch (n) {
1079 case 32: GET_REGL((int32_t)env->CP0_Status);
1080 case 33: GET_REGL(env->active_tc.LO[0]);
1081 case 34: GET_REGL(env->active_tc.HI[0]);
1082 case 35: GET_REGL(env->CP0_BadVAddr);
1083 case 36: GET_REGL((int32_t)env->CP0_Cause);
1084 case 37: GET_REGL(env->active_tc.PC | !!(env->hflags & MIPS_HFLAG_M16));
1085 case 72: GET_REGL(0); /* fp */
1086 case 89: GET_REGL((int32_t)env->CP0_PRid);
1087 }
1088 if (n >= 73 && n <= 88) {
1089 /* 16 embedded regs. */
1090 GET_REGL(0);
1091 }
1092
1093 return 0;
1094 }
1095
1096 /* convert MIPS rounding mode in FCR31 to IEEE library */
1097 static unsigned int ieee_rm[] =
1098 {
1099 float_round_nearest_even,
1100 float_round_to_zero,
1101 float_round_up,
1102 float_round_down
1103 };
1104 #define RESTORE_ROUNDING_MODE \
1105 set_float_rounding_mode(ieee_rm[env->active_fpu.fcr31 & 3], &env->active_fpu.fp_status)
1106
1107 static int cpu_gdb_write_register(CPUMIPSState *env, uint8_t *mem_buf, int n)
1108 {
1109 target_ulong tmp;
1110
1111 tmp = ldtul_p(mem_buf);
1112
1113 if (n < 32) {
1114 env->active_tc.gpr[n] = tmp;
1115 return sizeof(target_ulong);
1116 }
1117 if (env->CP0_Config1 & (1 << CP0C1_FP)
1118 && n >= 38 && n < 73) {
1119 if (n < 70) {
1120 if (env->CP0_Status & (1 << CP0St_FR))
1121 env->active_fpu.fpr[n - 38].d = tmp;
1122 else
1123 env->active_fpu.fpr[n - 38].w[FP_ENDIAN_IDX] = tmp;
1124 }
1125 switch (n) {
1126 case 70:
1127 env->active_fpu.fcr31 = tmp & 0xFF83FFFF;
1128 /* set rounding mode */
1129 RESTORE_ROUNDING_MODE;
1130 break;
1131 case 71: env->active_fpu.fcr0 = tmp; break;
1132 }
1133 return sizeof(target_ulong);
1134 }
1135 switch (n) {
1136 case 32: env->CP0_Status = tmp; break;
1137 case 33: env->active_tc.LO[0] = tmp; break;
1138 case 34: env->active_tc.HI[0] = tmp; break;
1139 case 35: env->CP0_BadVAddr = tmp; break;
1140 case 36: env->CP0_Cause = tmp; break;
1141 case 37:
1142 env->active_tc.PC = tmp & ~(target_ulong)1;
1143 if (tmp & 1) {
1144 env->hflags |= MIPS_HFLAG_M16;
1145 } else {
1146 env->hflags &= ~(MIPS_HFLAG_M16);
1147 }
1148 break;
1149 case 72: /* fp, ignored */ break;
1150 default:
1151 if (n > 89)
1152 return 0;
1153 /* Other registers are readonly. Ignore writes. */
1154 break;
1155 }
1156
1157 return sizeof(target_ulong);
1158 }
1159 #elif defined(TARGET_OPENRISC)
1160
1161 #define NUM_CORE_REGS (32 + 3)
1162
1163 static int cpu_gdb_read_register(CPUOpenRISCState *env, uint8_t *mem_buf, int n)
1164 {
1165 if (n < 32) {
1166 GET_REG32(env->gpr[n]);
1167 } else {
1168 switch (n) {
1169 case 32: /* PPC */
1170 GET_REG32(env->ppc);
1171 break;
1172
1173 case 33: /* NPC */
1174 GET_REG32(env->npc);
1175 break;
1176
1177 case 34: /* SR */
1178 GET_REG32(env->sr);
1179 break;
1180
1181 default:
1182 break;
1183 }
1184 }
1185 return 0;
1186 }
1187
1188 static int cpu_gdb_write_register(CPUOpenRISCState *env,
1189 uint8_t *mem_buf, int n)
1190 {
1191 uint32_t tmp;
1192
1193 if (n > NUM_CORE_REGS) {
1194 return 0;
1195 }
1196
1197 tmp = ldl_p(mem_buf);
1198
1199 if (n < 32) {
1200 env->gpr[n] = tmp;
1201 } else {
1202 switch (n) {
1203 case 32: /* PPC */
1204 env->ppc = tmp;
1205 break;
1206
1207 case 33: /* NPC */
1208 env->npc = tmp;
1209 break;
1210
1211 case 34: /* SR */
1212 env->sr = tmp;
1213 break;
1214
1215 default:
1216 break;
1217 }
1218 }
1219 return 4;
1220 }
1221 #elif defined (TARGET_SH4)
1222
1223 /* Hint: Use "set architecture sh4" in GDB to see fpu registers */
1224 /* FIXME: We should use XML for this. */
1225
1226 #define NUM_CORE_REGS 59
1227
1228 static int cpu_gdb_read_register(CPUSH4State *env, uint8_t *mem_buf, int n)
1229 {
1230 switch (n) {
1231 case 0 ... 7:
1232 if ((env->sr & (SR_MD | SR_RB)) == (SR_MD | SR_RB)) {
1233 GET_REGL(env->gregs[n + 16]);
1234 } else {
1235 GET_REGL(env->gregs[n]);
1236 }
1237 case 8 ... 15:
1238 GET_REGL(env->gregs[n]);
1239 case 16:
1240 GET_REGL(env->pc);
1241 case 17:
1242 GET_REGL(env->pr);
1243 case 18:
1244 GET_REGL(env->gbr);
1245 case 19:
1246 GET_REGL(env->vbr);
1247 case 20:
1248 GET_REGL(env->mach);
1249 case 21:
1250 GET_REGL(env->macl);
1251 case 22:
1252 GET_REGL(env->sr);
1253 case 23:
1254 GET_REGL(env->fpul);
1255 case 24:
1256 GET_REGL(env->fpscr);
1257 case 25 ... 40:
1258 if (env->fpscr & FPSCR_FR) {
1259 stfl_p(mem_buf, env->fregs[n - 9]);
1260 } else {
1261 stfl_p(mem_buf, env->fregs[n - 25]);
1262 }
1263 return 4;
1264 case 41:
1265 GET_REGL(env->ssr);
1266 case 42:
1267 GET_REGL(env->spc);
1268 case 43 ... 50:
1269 GET_REGL(env->gregs[n - 43]);
1270 case 51 ... 58:
1271 GET_REGL(env->gregs[n - (51 - 16)]);
1272 }
1273
1274 return 0;
1275 }
1276
1277 static int cpu_gdb_write_register(CPUSH4State *env, uint8_t *mem_buf, int n)
1278 {
1279 switch (n) {
1280 case 0 ... 7:
1281 if ((env->sr & (SR_MD | SR_RB)) == (SR_MD | SR_RB)) {
1282 env->gregs[n + 16] = ldl_p(mem_buf);
1283 } else {
1284 env->gregs[n] = ldl_p(mem_buf);
1285 }
1286 break;
1287 case 8 ... 15:
1288 env->gregs[n] = ldl_p(mem_buf);
1289 break;
1290 case 16:
1291 env->pc = ldl_p(mem_buf);
1292 break;
1293 case 17:
1294 env->pr = ldl_p(mem_buf);
1295 break;
1296 case 18:
1297 env->gbr = ldl_p(mem_buf);
1298 break;
1299 case 19:
1300 env->vbr = ldl_p(mem_buf);
1301 break;
1302 case 20:
1303 env->mach = ldl_p(mem_buf);
1304 break;
1305 case 21:
1306 env->macl = ldl_p(mem_buf);
1307 break;
1308 case 22:
1309 env->sr = ldl_p(mem_buf);
1310 break;
1311 case 23:
1312 env->fpul = ldl_p(mem_buf);
1313 break;
1314 case 24:
1315 env->fpscr = ldl_p(mem_buf);
1316 break;
1317 case 25 ... 40:
1318 if (env->fpscr & FPSCR_FR) {
1319 env->fregs[n - 9] = ldfl_p(mem_buf);
1320 } else {
1321 env->fregs[n - 25] = ldfl_p(mem_buf);
1322 }
1323 break;
1324 case 41:
1325 env->ssr = ldl_p(mem_buf);
1326 break;
1327 case 42:
1328 env->spc = ldl_p(mem_buf);
1329 break;
1330 case 43 ... 50:
1331 env->gregs[n - 43] = ldl_p(mem_buf);
1332 break;
1333 case 51 ... 58:
1334 env->gregs[n - (51 - 16)] = ldl_p(mem_buf);
1335 break;
1336 default: return 0;
1337 }
1338
1339 return 4;
1340 }
1341 #elif defined (TARGET_MICROBLAZE)
1342
1343 #define NUM_CORE_REGS (32 + 5)
1344
1345 static int cpu_gdb_read_register(CPUMBState *env, uint8_t *mem_buf, int n)
1346 {
1347 if (n < 32) {
1348 GET_REG32(env->regs[n]);
1349 } else {
1350 GET_REG32(env->sregs[n - 32]);
1351 }
1352 return 0;
1353 }
1354
1355 static int cpu_gdb_write_register(CPUMBState *env, uint8_t *mem_buf, int n)
1356 {
1357 uint32_t tmp;
1358
1359 if (n > NUM_CORE_REGS)
1360 return 0;
1361
1362 tmp = ldl_p(mem_buf);
1363
1364 if (n < 32) {
1365 env->regs[n] = tmp;
1366 } else {
1367 env->sregs[n - 32] = tmp;
1368 }
1369 return 4;
1370 }
1371 #elif defined (TARGET_CRIS)
1372
1373 #define NUM_CORE_REGS 49
1374
1375 static int
1376 read_register_crisv10(CPUCRISState *env, uint8_t *mem_buf, int n)
1377 {
1378 if (n < 15) {
1379 GET_REG32(env->regs[n]);
1380 }
1381
1382 if (n == 15) {
1383 GET_REG32(env->pc);
1384 }
1385
1386 if (n < 32) {
1387 switch (n) {
1388 case 16:
1389 GET_REG8(env->pregs[n - 16]);
1390 break;
1391 case 17:
1392 GET_REG8(env->pregs[n - 16]);
1393 break;
1394 case 20:
1395 case 21:
1396 GET_REG16(env->pregs[n - 16]);
1397 break;
1398 default:
1399 if (n >= 23) {
1400 GET_REG32(env->pregs[n - 16]);
1401 }
1402 break;
1403 }
1404 }
1405 return 0;
1406 }
1407
1408 static int cpu_gdb_read_register(CPUCRISState *env, uint8_t *mem_buf, int n)
1409 {
1410 uint8_t srs;
1411
1412 if (env->pregs[PR_VR] < 32)
1413 return read_register_crisv10(env, mem_buf, n);
1414
1415 srs = env->pregs[PR_SRS];
1416 if (n < 16) {
1417 GET_REG32(env->regs[n]);
1418 }
1419
1420 if (n >= 21 && n < 32) {
1421 GET_REG32(env->pregs[n - 16]);
1422 }
1423 if (n >= 33 && n < 49) {
1424 GET_REG32(env->sregs[srs][n - 33]);
1425 }
1426 switch (n) {
1427 case 16: GET_REG8(env->pregs[0]);
1428 case 17: GET_REG8(env->pregs[1]);
1429 case 18: GET_REG32(env->pregs[2]);
1430 case 19: GET_REG8(srs);
1431 case 20: GET_REG16(env->pregs[4]);
1432 case 32: GET_REG32(env->pc);
1433 }
1434
1435 return 0;
1436 }
1437
1438 static int cpu_gdb_write_register(CPUCRISState *env, uint8_t *mem_buf, int n)
1439 {
1440 uint32_t tmp;
1441
1442 if (n > 49)
1443 return 0;
1444
1445 tmp = ldl_p(mem_buf);
1446
1447 if (n < 16) {
1448 env->regs[n] = tmp;
1449 }
1450
1451 if (n >= 21 && n < 32) {
1452 env->pregs[n - 16] = tmp;
1453 }
1454
1455 /* FIXME: Should support function regs be writable? */
1456 switch (n) {
1457 case 16: return 1;
1458 case 17: return 1;
1459 case 18: env->pregs[PR_PID] = tmp; break;
1460 case 19: return 1;
1461 case 20: return 2;
1462 case 32: env->pc = tmp; break;
1463 }
1464
1465 return 4;
1466 }
1467 #elif defined (TARGET_ALPHA)
1468
1469 #define NUM_CORE_REGS 67
1470
1471 static int cpu_gdb_read_register(CPUAlphaState *env, uint8_t *mem_buf, int n)
1472 {
1473 uint64_t val;
1474 CPU_DoubleU d;
1475
1476 switch (n) {
1477 case 0 ... 30:
1478 val = env->ir[n];
1479 break;
1480 case 32 ... 62:
1481 d.d = env->fir[n - 32];
1482 val = d.ll;
1483 break;
1484 case 63:
1485 val = cpu_alpha_load_fpcr(env);
1486 break;
1487 case 64:
1488 val = env->pc;
1489 break;
1490 case 66:
1491 val = env->unique;
1492 break;
1493 case 31:
1494 case 65:
1495 /* 31 really is the zero register; 65 is unassigned in the
1496 gdb protocol, but is still required to occupy 8 bytes. */
1497 val = 0;
1498 break;
1499 default:
1500 return 0;
1501 }
1502 GET_REGL(val);
1503 }
1504
1505 static int cpu_gdb_write_register(CPUAlphaState *env, uint8_t *mem_buf, int n)
1506 {
1507 target_ulong tmp = ldtul_p(mem_buf);
1508 CPU_DoubleU d;
1509
1510 switch (n) {
1511 case 0 ... 30:
1512 env->ir[n] = tmp;
1513 break;
1514 case 32 ... 62:
1515 d.ll = tmp;
1516 env->fir[n - 32] = d.d;
1517 break;
1518 case 63:
1519 cpu_alpha_store_fpcr(env, tmp);
1520 break;
1521 case 64:
1522 env->pc = tmp;
1523 break;
1524 case 66:
1525 env->unique = tmp;
1526 break;
1527 case 31:
1528 case 65:
1529 /* 31 really is the zero register; 65 is unassigned in the
1530 gdb protocol, but is still required to occupy 8 bytes. */
1531 break;
1532 default:
1533 return 0;
1534 }
1535 return 8;
1536 }
1537 #elif defined (TARGET_S390X)
1538
1539 #define NUM_CORE_REGS S390_NUM_REGS
1540
1541 static int cpu_gdb_read_register(CPUS390XState *env, uint8_t *mem_buf, int n)
1542 {
1543 uint64_t val;
1544 int cc_op;
1545
1546 switch (n) {
1547 case S390_PSWM_REGNUM:
1548 cc_op = calc_cc(env, env->cc_op, env->cc_src, env->cc_dst, env->cc_vr);
1549 val = deposit64(env->psw.mask, 44, 2, cc_op);
1550 GET_REGL(val);
1551 break;
1552 case S390_PSWA_REGNUM:
1553 GET_REGL(env->psw.addr);
1554 break;
1555 case S390_R0_REGNUM ... S390_R15_REGNUM:
1556 GET_REGL(env->regs[n-S390_R0_REGNUM]);
1557 break;
1558 case S390_A0_REGNUM ... S390_A15_REGNUM:
1559 GET_REG32(env->aregs[n-S390_A0_REGNUM]);
1560 break;
1561 case S390_FPC_REGNUM:
1562 GET_REG32(env->fpc);
1563 break;
1564 case S390_F0_REGNUM ... S390_F15_REGNUM:
1565 GET_REG64(env->fregs[n-S390_F0_REGNUM].ll);
1566 break;
1567 }
1568
1569 return 0;
1570 }
1571
1572 static int cpu_gdb_write_register(CPUS390XState *env, uint8_t *mem_buf, int n)
1573 {
1574 target_ulong tmpl;
1575 uint32_t tmp32;
1576 int r = 8;
1577 tmpl = ldtul_p(mem_buf);
1578 tmp32 = ldl_p(mem_buf);
1579
1580 switch (n) {
1581 case S390_PSWM_REGNUM:
1582 env->psw.mask = tmpl;
1583 env->cc_op = extract64(tmpl, 44, 2);
1584 break;
1585 case S390_PSWA_REGNUM:
1586 env->psw.addr = tmpl;
1587 break;
1588 case S390_R0_REGNUM ... S390_R15_REGNUM:
1589 env->regs[n-S390_R0_REGNUM] = tmpl;
1590 break;
1591 case S390_A0_REGNUM ... S390_A15_REGNUM:
1592 env->aregs[n-S390_A0_REGNUM] = tmp32;
1593 r = 4;
1594 break;
1595 case S390_FPC_REGNUM:
1596 env->fpc = tmp32;
1597 r = 4;
1598 break;
1599 case S390_F0_REGNUM ... S390_F15_REGNUM:
1600 env->fregs[n-S390_F0_REGNUM].ll = tmpl;
1601 break;
1602 default:
1603 return 0;
1604 }
1605 return r;
1606 }
1607 #elif defined (TARGET_LM32)
1608
1609 #include "hw/lm32_pic.h"
1610 #define NUM_CORE_REGS (32 + 7)
1611
1612 static int cpu_gdb_read_register(CPULM32State *env, uint8_t *mem_buf, int n)
1613 {
1614 if (n < 32) {
1615 GET_REG32(env->regs[n]);
1616 } else {
1617 switch (n) {
1618 case 32:
1619 GET_REG32(env->pc);
1620 break;
1621 /* FIXME: put in right exception ID */
1622 case 33:
1623 GET_REG32(0);
1624 break;
1625 case 34:
1626 GET_REG32(env->eba);
1627 break;
1628 case 35:
1629 GET_REG32(env->deba);
1630 break;
1631 case 36:
1632 GET_REG32(env->ie);
1633 break;
1634 case 37:
1635 GET_REG32(lm32_pic_get_im(env->pic_state));
1636 break;
1637 case 38:
1638 GET_REG32(lm32_pic_get_ip(env->pic_state));
1639 break;
1640 }
1641 }
1642 return 0;
1643 }
1644
1645 static int cpu_gdb_write_register(CPULM32State *env, uint8_t *mem_buf, int n)
1646 {
1647 uint32_t tmp;
1648
1649 if (n > NUM_CORE_REGS) {
1650 return 0;
1651 }
1652
1653 tmp = ldl_p(mem_buf);
1654
1655 if (n < 32) {
1656 env->regs[n] = tmp;
1657 } else {
1658 switch (n) {
1659 case 32:
1660 env->pc = tmp;
1661 break;
1662 case 34:
1663 env->eba = tmp;
1664 break;
1665 case 35:
1666 env->deba = tmp;
1667 break;
1668 case 36:
1669 env->ie = tmp;
1670 break;
1671 case 37:
1672 lm32_pic_set_im(env->pic_state, tmp);
1673 break;
1674 case 38:
1675 lm32_pic_set_ip(env->pic_state, tmp);
1676 break;
1677 }
1678 }
1679 return 4;
1680 }
1681 #elif defined(TARGET_XTENSA)
1682
1683 /* Use num_core_regs to see only non-privileged registers in an unmodified gdb.
1684 * Use num_regs to see all registers. gdb modification is required for that:
1685 * reset bit 0 in the 'flags' field of the registers definitions in the
1686 * gdb/xtensa-config.c inside gdb source tree or inside gdb overlay.
1687 */
1688 #define NUM_CORE_REGS (env->config->gdb_regmap.num_regs)
1689 #define num_g_regs NUM_CORE_REGS
1690
1691 static int cpu_gdb_read_register(CPUXtensaState *env, uint8_t *mem_buf, int n)
1692 {
1693 const XtensaGdbReg *reg = env->config->gdb_regmap.reg + n;
1694
1695 if (n < 0 || n >= env->config->gdb_regmap.num_regs) {
1696 return 0;
1697 }
1698
1699 switch (reg->type) {
1700 case 9: /*pc*/
1701 GET_REG32(env->pc);
1702 break;
1703
1704 case 1: /*ar*/
1705 xtensa_sync_phys_from_window(env);
1706 GET_REG32(env->phys_regs[(reg->targno & 0xff) % env->config->nareg]);
1707 break;
1708
1709 case 2: /*SR*/
1710 GET_REG32(env->sregs[reg->targno & 0xff]);
1711 break;
1712
1713 case 3: /*UR*/
1714 GET_REG32(env->uregs[reg->targno & 0xff]);
1715 break;
1716
1717 case 4: /*f*/
1718 GET_REG32(float32_val(env->fregs[reg->targno & 0x0f]));
1719 break;
1720
1721 case 8: /*a*/
1722 GET_REG32(env->regs[reg->targno & 0x0f]);
1723 break;
1724
1725 default:
1726 qemu_log("%s from reg %d of unsupported type %d\n",
1727 __func__, n, reg->type);
1728 return 0;
1729 }
1730 }
1731
1732 static int cpu_gdb_write_register(CPUXtensaState *env, uint8_t *mem_buf, int n)
1733 {
1734 uint32_t tmp;
1735 const XtensaGdbReg *reg = env->config->gdb_regmap.reg + n;
1736
1737 if (n < 0 || n >= env->config->gdb_regmap.num_regs) {
1738 return 0;
1739 }
1740
1741 tmp = ldl_p(mem_buf);
1742
1743 switch (reg->type) {
1744 case 9: /*pc*/
1745 env->pc = tmp;
1746 break;
1747
1748 case 1: /*ar*/
1749 env->phys_regs[(reg->targno & 0xff) % env->config->nareg] = tmp;
1750 xtensa_sync_window_from_phys(env);
1751 break;
1752
1753 case 2: /*SR*/
1754 env->sregs[reg->targno & 0xff] = tmp;
1755 break;
1756
1757 case 3: /*UR*/
1758 env->uregs[reg->targno & 0xff] = tmp;
1759 break;
1760
1761 case 4: /*f*/
1762 env->fregs[reg->targno & 0x0f] = make_float32(tmp);
1763 break;
1764
1765 case 8: /*a*/
1766 env->regs[reg->targno & 0x0f] = tmp;
1767 break;
1768
1769 default:
1770 qemu_log("%s to reg %d of unsupported type %d\n",
1771 __func__, n, reg->type);
1772 return 0;
1773 }
1774
1775 return 4;
1776 }
1777 #else
1778
1779 #define NUM_CORE_REGS 0
1780
1781 static int cpu_gdb_read_register(CPUArchState *env, uint8_t *mem_buf, int n)
1782 {
1783 return 0;
1784 }
1785
1786 static int cpu_gdb_write_register(CPUArchState *env, uint8_t *mem_buf, int n)
1787 {
1788 return 0;
1789 }
1790
1791 #endif
1792
1793 #if !defined(TARGET_XTENSA)
1794 static int num_g_regs = NUM_CORE_REGS;
1795 #endif
1796
1797 #ifdef GDB_CORE_XML
1798 /* Encode data using the encoding for 'x' packets. */
1799 static int memtox(char *buf, const char *mem, int len)
1800 {
1801 char *p = buf;
1802 char c;
1803
1804 while (len--) {
1805 c = *(mem++);
1806 switch (c) {
1807 case '#': case '$': case '*': case '}':
1808 *(p++) = '}';
1809 *(p++) = c ^ 0x20;
1810 break;
1811 default:
1812 *(p++) = c;
1813 break;
1814 }
1815 }
1816 return p - buf;
1817 }
1818
1819 static const char *get_feature_xml(const char *p, const char **newp)
1820 {
1821 size_t len;
1822 int i;
1823 const char *name;
1824 static char target_xml[1024];
1825
1826 len = 0;
1827 while (p[len] && p[len] != ':')
1828 len++;
1829 *newp = p + len;
1830
1831 name = NULL;
1832 if (strncmp(p, "target.xml", len) == 0) {
1833 /* Generate the XML description for this CPU. */
1834 if (!target_xml[0]) {
1835 GDBRegisterState *r;
1836
1837 snprintf(target_xml, sizeof(target_xml),
1838 "<?xml version=\"1.0\"?>"
1839 "<!DOCTYPE target SYSTEM \"gdb-target.dtd\">"
1840 "<target>"
1841 "<xi:include href=\"%s\"/>",
1842 GDB_CORE_XML);
1843
1844 for (r = first_cpu->gdb_regs; r; r = r->next) {
1845 pstrcat(target_xml, sizeof(target_xml), "<xi:include href=\"");
1846 pstrcat(target_xml, sizeof(target_xml), r->xml);
1847 pstrcat(target_xml, sizeof(target_xml), "\"/>");
1848 }
1849 pstrcat(target_xml, sizeof(target_xml), "</target>");
1850 }
1851 return target_xml;
1852 }
1853 for (i = 0; ; i++) {
1854 name = xml_builtin[i][0];
1855 if (!name || (strncmp(name, p, len) == 0 && strlen(name) == len))
1856 break;
1857 }
1858 return name ? xml_builtin[i][1] : NULL;
1859 }
1860 #endif
1861
1862 static int gdb_read_register(CPUArchState *env, uint8_t *mem_buf, int reg)
1863 {
1864 GDBRegisterState *r;
1865
1866 if (reg < NUM_CORE_REGS)
1867 return cpu_gdb_read_register(env, mem_buf, reg);
1868
1869 for (r = env->gdb_regs; r; r = r->next) {
1870 if (r->base_reg <= reg && reg < r->base_reg + r->num_regs) {
1871 return r->get_reg(env, mem_buf, reg - r->base_reg);
1872 }
1873 }
1874 return 0;
1875 }
1876
1877 static int gdb_write_register(CPUArchState *env, uint8_t *mem_buf, int reg)
1878 {
1879 GDBRegisterState *r;
1880
1881 if (reg < NUM_CORE_REGS)
1882 return cpu_gdb_write_register(env, mem_buf, reg);
1883
1884 for (r = env->gdb_regs; r; r = r->next) {
1885 if (r->base_reg <= reg && reg < r->base_reg + r->num_regs) {
1886 return r->set_reg(env, mem_buf, reg - r->base_reg);
1887 }
1888 }
1889 return 0;
1890 }
1891
1892 #if !defined(TARGET_XTENSA)
1893 /* Register a supplemental set of CPU registers. If g_pos is nonzero it
1894 specifies the first register number and these registers are included in
1895 a standard "g" packet. Direction is relative to gdb, i.e. get_reg is
1896 gdb reading a CPU register, and set_reg is gdb modifying a CPU register.
1897 */
1898
1899 void gdb_register_coprocessor(CPUArchState * env,
1900 gdb_reg_cb get_reg, gdb_reg_cb set_reg,
1901 int num_regs, const char *xml, int g_pos)
1902 {
1903 GDBRegisterState *s;
1904 GDBRegisterState **p;
1905 static int last_reg = NUM_CORE_REGS;
1906
1907 p = &env->gdb_regs;
1908 while (*p) {
1909 /* Check for duplicates. */
1910 if (strcmp((*p)->xml, xml) == 0)
1911 return;
1912 p = &(*p)->next;
1913 }
1914
1915 s = g_new0(GDBRegisterState, 1);
1916 s->base_reg = last_reg;
1917 s->num_regs = num_regs;
1918 s->get_reg = get_reg;
1919 s->set_reg = set_reg;
1920 s->xml = xml;
1921
1922 /* Add to end of list. */
1923 last_reg += num_regs;
1924 *p = s;
1925 if (g_pos) {
1926 if (g_pos != s->base_reg) {
1927 fprintf(stderr, "Error: Bad gdb register numbering for '%s'\n"
1928 "Expected %d got %d\n", xml, g_pos, s->base_reg);
1929 } else {
1930 num_g_regs = last_reg;
1931 }
1932 }
1933 }
1934 #endif
1935
1936 #ifndef CONFIG_USER_ONLY
1937 static const int xlat_gdb_type[] = {
1938 [GDB_WATCHPOINT_WRITE] = BP_GDB | BP_MEM_WRITE,
1939 [GDB_WATCHPOINT_READ] = BP_GDB | BP_MEM_READ,
1940 [GDB_WATCHPOINT_ACCESS] = BP_GDB | BP_MEM_ACCESS,
1941 };
1942 #endif
1943
1944 static int gdb_breakpoint_insert(target_ulong addr, target_ulong len, int type)
1945 {
1946 CPUArchState *env;
1947 int err = 0;
1948
1949 if (kvm_enabled())
1950 return kvm_insert_breakpoint(gdbserver_state->c_cpu, addr, len, type);
1951
1952 switch (type) {
1953 case GDB_BREAKPOINT_SW:
1954 case GDB_BREAKPOINT_HW:
1955 for (env = first_cpu; env != NULL; env = env->next_cpu) {
1956 err = cpu_breakpoint_insert(env, addr, BP_GDB, NULL);
1957 if (err)
1958 break;
1959 }
1960 return err;
1961 #ifndef CONFIG_USER_ONLY
1962 case GDB_WATCHPOINT_WRITE:
1963 case GDB_WATCHPOINT_READ:
1964 case GDB_WATCHPOINT_ACCESS:
1965 for (env = first_cpu; env != NULL; env = env->next_cpu) {
1966 err = cpu_watchpoint_insert(env, addr, len, xlat_gdb_type[type],
1967 NULL);
1968 if (err)
1969 break;
1970 }
1971 return err;
1972 #endif
1973 default:
1974 return -ENOSYS;
1975 }
1976 }
1977
1978 static int gdb_breakpoint_remove(target_ulong addr, target_ulong len, int type)
1979 {
1980 CPUArchState *env;
1981 int err = 0;
1982
1983 if (kvm_enabled())
1984 return kvm_remove_breakpoint(gdbserver_state->c_cpu, addr, len, type);
1985
1986 switch (type) {
1987 case GDB_BREAKPOINT_SW:
1988 case GDB_BREAKPOINT_HW:
1989 for (env = first_cpu; env != NULL; env = env->next_cpu) {
1990 err = cpu_breakpoint_remove(env, addr, BP_GDB);
1991 if (err)
1992 break;
1993 }
1994 return err;
1995 #ifndef CONFIG_USER_ONLY
1996 case GDB_WATCHPOINT_WRITE:
1997 case GDB_WATCHPOINT_READ:
1998 case GDB_WATCHPOINT_ACCESS:
1999 for (env = first_cpu; env != NULL; env = env->next_cpu) {
2000 err = cpu_watchpoint_remove(env, addr, len, xlat_gdb_type[type]);
2001 if (err)
2002 break;
2003 }
2004 return err;
2005 #endif
2006 default:
2007 return -ENOSYS;
2008 }
2009 }
2010
2011 static void gdb_breakpoint_remove_all(void)
2012 {
2013 CPUArchState *env;
2014
2015 if (kvm_enabled()) {
2016 kvm_remove_all_breakpoints(gdbserver_state->c_cpu);
2017 return;
2018 }
2019
2020 for (env = first_cpu; env != NULL; env = env->next_cpu) {
2021 cpu_breakpoint_remove_all(env, BP_GDB);
2022 #ifndef CONFIG_USER_ONLY
2023 cpu_watchpoint_remove_all(env, BP_GDB);
2024 #endif
2025 }
2026 }
2027
2028 static void gdb_set_cpu_pc(GDBState *s, target_ulong pc)
2029 {
2030 cpu_synchronize_state(s->c_cpu);
2031 #if defined(TARGET_I386)
2032 s->c_cpu->eip = pc;
2033 #elif defined (TARGET_PPC)
2034 s->c_cpu->nip = pc;
2035 #elif defined (TARGET_SPARC)
2036 s->c_cpu->pc = pc;
2037 s->c_cpu->npc = pc + 4;
2038 #elif defined (TARGET_ARM)
2039 s->c_cpu->regs[15] = pc;
2040 #elif defined (TARGET_SH4)
2041 s->c_cpu->pc = pc;
2042 #elif defined (TARGET_MIPS)
2043 s->c_cpu->active_tc.PC = pc & ~(target_ulong)1;
2044 if (pc & 1) {
2045 s->c_cpu->hflags |= MIPS_HFLAG_M16;
2046 } else {
2047 s->c_cpu->hflags &= ~(MIPS_HFLAG_M16);
2048 }
2049 #elif defined (TARGET_MICROBLAZE)
2050 s->c_cpu->sregs[SR_PC] = pc;
2051 #elif defined(TARGET_OPENRISC)
2052 s->c_cpu->pc = pc;
2053 #elif defined (TARGET_CRIS)
2054 s->c_cpu->pc = pc;
2055 #elif defined (TARGET_ALPHA)
2056 s->c_cpu->pc = pc;
2057 #elif defined (TARGET_S390X)
2058 s->c_cpu->psw.addr = pc;
2059 #elif defined (TARGET_LM32)
2060 s->c_cpu->pc = pc;
2061 #elif defined(TARGET_XTENSA)
2062 s->c_cpu->pc = pc;
2063 #endif
2064 }
2065
2066 static CPUArchState *find_cpu(uint32_t thread_id)
2067 {
2068 CPUArchState *env;
2069
2070 for (env = first_cpu; env != NULL; env = env->next_cpu) {
2071 if (cpu_index(env) == thread_id) {
2072 return env;
2073 }
2074 }
2075
2076 return NULL;
2077 }
2078
2079 static int gdb_handle_packet(GDBState *s, const char *line_buf)
2080 {
2081 CPUArchState *env;
2082 const char *p;
2083 uint32_t thread;
2084 int ch, reg_size, type, res;
2085 char buf[MAX_PACKET_LENGTH];
2086 uint8_t mem_buf[MAX_PACKET_LENGTH];
2087 uint8_t *registers;
2088 target_ulong addr, len;
2089
2090 #ifdef DEBUG_GDB
2091 printf("command='%s'\n", line_buf);
2092 #endif
2093 p = line_buf;
2094 ch = *p++;
2095 switch(ch) {
2096 case '?':
2097 /* TODO: Make this return the correct value for user-mode. */
2098 snprintf(buf, sizeof(buf), "T%02xthread:%02x;", GDB_SIGNAL_TRAP,
2099 cpu_index(s->c_cpu));
2100 put_packet(s, buf);
2101 /* Remove all the breakpoints when this query is issued,
2102 * because gdb is doing and initial connect and the state
2103 * should be cleaned up.
2104 */
2105 gdb_breakpoint_remove_all();
2106 break;
2107 case 'c':
2108 if (*p != '\0') {
2109 addr = strtoull(p, (char **)&p, 16);
2110 gdb_set_cpu_pc(s, addr);
2111 }
2112 s->signal = 0;
2113 gdb_continue(s);
2114 return RS_IDLE;
2115 case 'C':
2116 s->signal = gdb_signal_to_target (strtoul(p, (char **)&p, 16));
2117 if (s->signal == -1)
2118 s->signal = 0;
2119 gdb_continue(s);
2120 return RS_IDLE;
2121 case 'v':
2122 if (strncmp(p, "Cont", 4) == 0) {
2123 int res_signal, res_thread;
2124
2125 p += 4;
2126 if (*p == '?') {
2127 put_packet(s, "vCont;c;C;s;S");
2128 break;
2129 }
2130 res = 0;
2131 res_signal = 0;
2132 res_thread = 0;
2133 while (*p) {
2134 int action, signal;
2135
2136 if (*p++ != ';') {
2137 res = 0;
2138 break;
2139 }
2140 action = *p++;
2141 signal = 0;
2142 if (action == 'C' || action == 'S') {
2143 signal = strtoul(p, (char **)&p, 16);
2144 } else if (action != 'c' && action != 's') {
2145 res = 0;
2146 break;
2147 }
2148 thread = 0;
2149 if (*p == ':') {
2150 thread = strtoull(p+1, (char **)&p, 16);
2151 }
2152 action = tolower(action);
2153 if (res == 0 || (res == 'c' && action == 's')) {
2154 res = action;
2155 res_signal = signal;
2156 res_thread = thread;
2157 }
2158 }
2159 if (res) {
2160 if (res_thread != -1 && res_thread != 0) {
2161 env = find_cpu(res_thread);
2162 if (env == NULL) {
2163 put_packet(s, "E22");
2164 break;
2165 }
2166 s->c_cpu = env;
2167 }
2168 if (res == 's') {
2169 cpu_single_step(s->c_cpu, sstep_flags);
2170 }
2171 s->signal = res_signal;
2172 gdb_continue(s);
2173 return RS_IDLE;
2174 }
2175 break;
2176 } else {
2177 goto unknown_command;
2178 }
2179 case 'k':
2180 #ifdef CONFIG_USER_ONLY
2181 /* Kill the target */
2182 fprintf(stderr, "\nQEMU: Terminated via GDBstub\n");
2183 exit(0);
2184 #endif
2185 case 'D':
2186 /* Detach packet */
2187 gdb_breakpoint_remove_all();
2188 gdb_syscall_mode = GDB_SYS_DISABLED;
2189 gdb_continue(s);
2190 put_packet(s, "OK");
2191 break;
2192 case 's':
2193 if (*p != '\0') {
2194 addr = strtoull(p, (char **)&p, 16);
2195 gdb_set_cpu_pc(s, addr);
2196 }
2197 cpu_single_step(s->c_cpu, sstep_flags);
2198 gdb_continue(s);
2199 return RS_IDLE;
2200 case 'F':
2201 {
2202 target_ulong ret;
2203 target_ulong err;
2204
2205 ret = strtoull(p, (char **)&p, 16);
2206 if (*p == ',') {
2207 p++;
2208 err = strtoull(p, (char **)&p, 16);
2209 } else {
2210 err = 0;
2211 }
2212 if (*p == ',')
2213 p++;
2214 type = *p;
2215 if (s->current_syscall_cb) {
2216 s->current_syscall_cb(s->c_cpu, ret, err);
2217 s->current_syscall_cb = NULL;
2218 }
2219 if (type == 'C') {
2220 put_packet(s, "T02");
2221 } else {
2222 gdb_continue(s);
2223 }
2224 }
2225 break;
2226 case 'g':
2227 cpu_synchronize_state(s->g_cpu);
2228 env = s->g_cpu;
2229 len = 0;
2230 for (addr = 0; addr < num_g_regs; addr++) {
2231 reg_size = gdb_read_register(s->g_cpu, mem_buf + len, addr);
2232 len += reg_size;
2233 }
2234 memtohex(buf, mem_buf, len);
2235 put_packet(s, buf);
2236 break;
2237 case 'G':
2238 cpu_synchronize_state(s->g_cpu);
2239 env = s->g_cpu;
2240 registers = mem_buf;
2241 len = strlen(p) / 2;
2242 hextomem((uint8_t *)registers, p, len);
2243 for (addr = 0; addr < num_g_regs && len > 0; addr++) {
2244 reg_size = gdb_write_register(s->g_cpu, registers, addr);
2245 len -= reg_size;
2246 registers += reg_size;
2247 }
2248 put_packet(s, "OK");
2249 break;
2250 case 'm':
2251 addr = strtoull(p, (char **)&p, 16);
2252 if (*p == ',')
2253 p++;
2254 len = strtoull(p, NULL, 16);
2255 if (target_memory_rw_debug(s->g_cpu, addr, mem_buf, len, 0) != 0) {
2256 put_packet (s, "E14");
2257 } else {
2258 memtohex(buf, mem_buf, len);
2259 put_packet(s, buf);
2260 }
2261 break;
2262 case 'M':
2263 addr = strtoull(p, (char **)&p, 16);
2264 if (*p == ',')
2265 p++;
2266 len = strtoull(p, (char **)&p, 16);
2267 if (*p == ':')
2268 p++;
2269 hextomem(mem_buf, p, len);
2270 if (target_memory_rw_debug(s->g_cpu, addr, mem_buf, len, 1) != 0) {
2271 put_packet(s, "E14");
2272 } else {
2273 put_packet(s, "OK");
2274 }
2275 break;
2276 case 'p':
2277 /* Older gdb are really dumb, and don't use 'g' if 'p' is avaialable.
2278 This works, but can be very slow. Anything new enough to
2279 understand XML also knows how to use this properly. */
2280 if (!gdb_has_xml)
2281 goto unknown_command;
2282 addr = strtoull(p, (char **)&p, 16);
2283 reg_size = gdb_read_register(s->g_cpu, mem_buf, addr);
2284 if (reg_size) {
2285 memtohex(buf, mem_buf, reg_size);
2286 put_packet(s, buf);
2287 } else {
2288 put_packet(s, "E14");
2289 }
2290 break;
2291 case 'P':
2292 if (!gdb_has_xml)
2293 goto unknown_command;
2294 addr = strtoull(p, (char **)&p, 16);
2295 if (*p == '=')
2296 p++;
2297 reg_size = strlen(p) / 2;
2298 hextomem(mem_buf, p, reg_size);
2299 gdb_write_register(s->g_cpu, mem_buf, addr);
2300 put_packet(s, "OK");
2301 break;
2302 case 'Z':
2303 case 'z':
2304 type = strtoul(p, (char **)&p, 16);
2305 if (*p == ',')
2306 p++;
2307 addr = strtoull(p, (char **)&p, 16);
2308 if (*p == ',')
2309 p++;
2310 len = strtoull(p, (char **)&p, 16);
2311 if (ch == 'Z')
2312 res = gdb_breakpoint_insert(addr, len, type);
2313 else
2314 res = gdb_breakpoint_remove(addr, len, type);
2315 if (res >= 0)
2316 put_packet(s, "OK");
2317 else if (res == -ENOSYS)
2318 put_packet(s, "");
2319 else
2320 put_packet(s, "E22");
2321 break;
2322 case 'H':
2323 type = *p++;
2324 thread = strtoull(p, (char **)&p, 16);
2325 if (thread == -1 || thread == 0) {
2326 put_packet(s, "OK");
2327 break;
2328 }
2329 env = find_cpu(thread);
2330 if (env == NULL) {
2331 put_packet(s, "E22");
2332 break;
2333 }
2334 switch (type) {
2335 case 'c':
2336 s->c_cpu = env;
2337 put_packet(s, "OK");
2338 break;
2339 case 'g':
2340 s->g_cpu = env;
2341 put_packet(s, "OK");
2342 break;
2343 default:
2344 put_packet(s, "E22");
2345 break;
2346 }
2347 break;
2348 case 'T':
2349 thread = strtoull(p, (char **)&p, 16);
2350 env = find_cpu(thread);
2351
2352 if (env != NULL) {
2353 put_packet(s, "OK");
2354 } else {
2355 put_packet(s, "E22");
2356 }
2357 break;
2358 case 'q':
2359 case 'Q':
2360 /* parse any 'q' packets here */
2361 if (!strcmp(p,"qemu.sstepbits")) {
2362 /* Query Breakpoint bit definitions */
2363 snprintf(buf, sizeof(buf), "ENABLE=%x,NOIRQ=%x,NOTIMER=%x",
2364 SSTEP_ENABLE,
2365 SSTEP_NOIRQ,
2366 SSTEP_NOTIMER);
2367 put_packet(s, buf);
2368 break;
2369 } else if (strncmp(p,"qemu.sstep",10) == 0) {
2370 /* Display or change the sstep_flags */
2371 p += 10;
2372 if (*p != '=') {
2373 /* Display current setting */
2374 snprintf(buf, sizeof(buf), "0x%x", sstep_flags);
2375 put_packet(s, buf);
2376 break;
2377 }
2378 p++;
2379 type = strtoul(p, (char **)&p, 16);
2380 sstep_flags = type;
2381 put_packet(s, "OK");
2382 break;
2383 } else if (strcmp(p,"C") == 0) {
2384 /* "Current thread" remains vague in the spec, so always return
2385 * the first CPU (gdb returns the first thread). */
2386 put_packet(s, "QC1");
2387 break;
2388 } else if (strcmp(p,"fThreadInfo") == 0) {
2389 s->query_cpu = first_cpu;
2390 goto report_cpuinfo;
2391 } else if (strcmp(p,"sThreadInfo") == 0) {
2392 report_cpuinfo:
2393 if (s->query_cpu) {
2394 snprintf(buf, sizeof(buf), "m%x", cpu_index(s->query_cpu));
2395 put_packet(s, buf);
2396 s->query_cpu = s->query_cpu->next_cpu;
2397 } else
2398 put_packet(s, "l");
2399 break;
2400 } else if (strncmp(p,"ThreadExtraInfo,", 16) == 0) {
2401 thread = strtoull(p+16, (char **)&p, 16);
2402 env = find_cpu(thread);
2403 if (env != NULL) {
2404 cpu_synchronize_state(env);
2405 len = snprintf((char *)mem_buf, sizeof(mem_buf),
2406 "CPU#%d [%s]", env->cpu_index,
2407 env->halted ? "halted " : "running");
2408 memtohex(buf, mem_buf, len);
2409 put_packet(s, buf);
2410 }
2411 break;
2412 }
2413 #ifdef CONFIG_USER_ONLY
2414 else if (strncmp(p, "Offsets", 7) == 0) {
2415 TaskState *ts = s->c_cpu->opaque;
2416
2417 snprintf(buf, sizeof(buf),
2418 "Text=" TARGET_ABI_FMT_lx ";Data=" TARGET_ABI_FMT_lx
2419 ";Bss=" TARGET_ABI_FMT_lx,
2420 ts->info->code_offset,
2421 ts->info->data_offset,
2422 ts->info->data_offset);
2423 put_packet(s, buf);
2424 break;
2425 }
2426 #else /* !CONFIG_USER_ONLY */
2427 else if (strncmp(p, "Rcmd,", 5) == 0) {
2428 int len = strlen(p + 5);
2429
2430 if ((len % 2) != 0) {
2431 put_packet(s, "E01");
2432 break;
2433 }
2434 hextomem(mem_buf, p + 5, len);
2435 len = len / 2;
2436 mem_buf[len++] = 0;
2437 qemu_chr_be_write(s->mon_chr, mem_buf, len);
2438 put_packet(s, "OK");
2439 break;
2440 }
2441 #endif /* !CONFIG_USER_ONLY */
2442 if (strncmp(p, "Supported", 9) == 0) {
2443 snprintf(buf, sizeof(buf), "PacketSize=%x", MAX_PACKET_LENGTH);
2444 #ifdef GDB_CORE_XML
2445 pstrcat(buf, sizeof(buf), ";qXfer:features:read+");
2446 #endif
2447 put_packet(s, buf);
2448 break;
2449 }
2450 #ifdef GDB_CORE_XML
2451 if (strncmp(p, "Xfer:features:read:", 19) == 0) {
2452 const char *xml;
2453 target_ulong total_len;
2454
2455 gdb_has_xml = 1;
2456 p += 19;
2457 xml = get_feature_xml(p, &p);
2458 if (!xml) {
2459 snprintf(buf, sizeof(buf), "E00");
2460 put_packet(s, buf);
2461 break;
2462 }
2463
2464 if (*p == ':')
2465 p++;
2466 addr = strtoul(p, (char **)&p, 16);
2467 if (*p == ',')
2468 p++;
2469 len = strtoul(p, (char **)&p, 16);
2470
2471 total_len = strlen(xml);
2472 if (addr > total_len) {
2473 snprintf(buf, sizeof(buf), "E00");
2474 put_packet(s, buf);
2475 break;
2476 }
2477 if (len > (MAX_PACKET_LENGTH - 5) / 2)
2478 len = (MAX_PACKET_LENGTH - 5) / 2;
2479 if (len < total_len - addr) {
2480 buf[0] = 'm';
2481 len = memtox(buf + 1, xml + addr, len);
2482 } else {
2483 buf[0] = 'l';
2484 len = memtox(buf + 1, xml + addr, total_len - addr);
2485 }
2486 put_packet_binary(s, buf, len + 1);
2487 break;
2488 }
2489 #endif
2490 /* Unrecognised 'q' command. */
2491 goto unknown_command;
2492
2493 default:
2494 unknown_command:
2495 /* put empty packet */
2496 buf[0] = '\0';
2497 put_packet(s, buf);
2498 break;
2499 }
2500 return RS_IDLE;
2501 }
2502
2503 void gdb_set_stop_cpu(CPUArchState *env)
2504 {
2505 gdbserver_state->c_cpu = env;
2506 gdbserver_state->g_cpu = env;
2507 }
2508
2509 #ifndef CONFIG_USER_ONLY
2510 static void gdb_vm_state_change(void *opaque, int running, RunState state)
2511 {
2512 GDBState *s = gdbserver_state;
2513 CPUArchState *env = s->c_cpu;
2514 char buf[256];
2515 const char *type;
2516 int ret;
2517
2518 if (running || s->state == RS_INACTIVE) {
2519 return;
2520 }
2521 /* Is there a GDB syscall waiting to be sent? */
2522 if (s->current_syscall_cb) {
2523 put_packet(s, s->syscall_buf);
2524 return;
2525 }
2526 switch (state) {
2527 case RUN_STATE_DEBUG:
2528 if (env->watchpoint_hit) {
2529 switch (env->watchpoint_hit->flags & BP_MEM_ACCESS) {
2530 case BP_MEM_READ:
2531 type = "r";
2532 break;
2533 case BP_MEM_ACCESS:
2534 type = "a";
2535 break;
2536 default:
2537 type = "";
2538 break;
2539 }
2540 snprintf(buf, sizeof(buf),
2541 "T%02xthread:%02x;%swatch:" TARGET_FMT_lx ";",
2542 GDB_SIGNAL_TRAP, cpu_index(env), type,
2543 env->watchpoint_hit->vaddr);
2544 env->watchpoint_hit = NULL;
2545 goto send_packet;
2546 }
2547 tb_flush(env);
2548 ret = GDB_SIGNAL_TRAP;
2549 break;
2550 case RUN_STATE_PAUSED:
2551 ret = GDB_SIGNAL_INT;
2552 break;
2553 case RUN_STATE_SHUTDOWN:
2554 ret = GDB_SIGNAL_QUIT;
2555 break;
2556 case RUN_STATE_IO_ERROR:
2557 ret = GDB_SIGNAL_IO;
2558 break;
2559 case RUN_STATE_WATCHDOG:
2560 ret = GDB_SIGNAL_ALRM;
2561 break;
2562 case RUN_STATE_INTERNAL_ERROR:
2563 ret = GDB_SIGNAL_ABRT;
2564 break;
2565 case RUN_STATE_SAVE_VM:
2566 case RUN_STATE_RESTORE_VM:
2567 return;
2568 case RUN_STATE_FINISH_MIGRATE:
2569 ret = GDB_SIGNAL_XCPU;
2570 break;
2571 default:
2572 ret = GDB_SIGNAL_UNKNOWN;
2573 break;
2574 }
2575 snprintf(buf, sizeof(buf), "T%02xthread:%02x;", ret, cpu_index(env));
2576
2577 send_packet:
2578 put_packet(s, buf);
2579
2580 /* disable single step if it was enabled */
2581 cpu_single_step(env, 0);
2582 }
2583 #endif
2584
2585 /* Send a gdb syscall request.
2586 This accepts limited printf-style format specifiers, specifically:
2587 %x - target_ulong argument printed in hex.
2588 %lx - 64-bit argument printed in hex.
2589 %s - string pointer (target_ulong) and length (int) pair. */
2590 void gdb_do_syscall(gdb_syscall_complete_cb cb, const char *fmt, ...)
2591 {
2592 va_list va;
2593 char *p;
2594 char *p_end;
2595 target_ulong addr;
2596 uint64_t i64;
2597 GDBState *s;
2598
2599 s = gdbserver_state;
2600 if (!s)
2601 return;
2602 s->current_syscall_cb = cb;
2603 #ifndef CONFIG_USER_ONLY
2604 vm_stop(RUN_STATE_DEBUG);
2605 #endif
2606 va_start(va, fmt);
2607 p = s->syscall_buf;
2608 p_end = &s->syscall_buf[sizeof(s->syscall_buf)];
2609 *(p++) = 'F';
2610 while (*fmt) {
2611 if (*fmt == '%') {
2612 fmt++;
2613 switch (*fmt++) {
2614 case 'x':
2615 addr = va_arg(va, target_ulong);
2616 p += snprintf(p, p_end - p, TARGET_FMT_lx, addr);
2617 break;
2618 case 'l':
2619 if (*(fmt++) != 'x')
2620 goto bad_format;
2621 i64 = va_arg(va, uint64_t);
2622 p += snprintf(p, p_end - p, "%" PRIx64, i64);
2623 break;
2624 case 's':
2625 addr = va_arg(va, target_ulong);
2626 p += snprintf(p, p_end - p, TARGET_FMT_lx "/%x",
2627 addr, va_arg(va, int));
2628 break;
2629 default:
2630 bad_format:
2631 fprintf(stderr, "gdbstub: Bad syscall format string '%s'\n",
2632 fmt - 1);
2633 break;
2634 }
2635 } else {
2636 *(p++) = *(fmt++);
2637 }
2638 }
2639 *p = 0;
2640 va_end(va);
2641 #ifdef CONFIG_USER_ONLY
2642 put_packet(s, s->syscall_buf);
2643 gdb_handlesig(s->c_cpu, 0);
2644 #else
2645 /* In this case wait to send the syscall packet until notification that
2646 the CPU has stopped. This must be done because if the packet is sent
2647 now the reply from the syscall request could be received while the CPU
2648 is still in the running state, which can cause packets to be dropped
2649 and state transition 'T' packets to be sent while the syscall is still
2650 being processed. */
2651 cpu_exit(s->c_cpu);
2652 #endif
2653 }
2654
2655 static void gdb_read_byte(GDBState *s, int ch)
2656 {
2657 int i, csum;
2658 uint8_t reply;
2659
2660 #ifndef CONFIG_USER_ONLY
2661 if (s->last_packet_len) {
2662 /* Waiting for a response to the last packet. If we see the start
2663 of a new command then abandon the previous response. */
2664 if (ch == '-') {
2665 #ifdef DEBUG_GDB
2666 printf("Got NACK, retransmitting\n");
2667 #endif
2668 put_buffer(s, (uint8_t *)s->last_packet, s->last_packet_len);
2669 }
2670 #ifdef DEBUG_GDB
2671 else if (ch == '+')
2672 printf("Got ACK\n");
2673 else
2674 printf("Got '%c' when expecting ACK/NACK\n", ch);
2675 #endif
2676 if (ch == '+' || ch == '$')
2677 s->last_packet_len = 0;
2678 if (ch != '$')
2679 return;
2680 }
2681 if (runstate_is_running()) {
2682 /* when the CPU is running, we cannot do anything except stop
2683 it when receiving a char */
2684 vm_stop(RUN_STATE_PAUSED);
2685 } else
2686 #endif
2687 {
2688 switch(s->state) {
2689 case RS_IDLE:
2690 if (ch == '$') {
2691 s->line_buf_index = 0;
2692 s->state = RS_GETLINE;
2693 }
2694 break;
2695 case RS_GETLINE:
2696 if (ch == '#') {
2697 s->state = RS_CHKSUM1;
2698 } else if (s->line_buf_index >= sizeof(s->line_buf) - 1) {
2699 s->state = RS_IDLE;
2700 } else {
2701 s->line_buf[s->line_buf_index++] = ch;
2702 }
2703 break;
2704 case RS_CHKSUM1:
2705 s->line_buf[s->line_buf_index] = '\0';
2706 s->line_csum = fromhex(ch) << 4;
2707 s->state = RS_CHKSUM2;
2708 break;
2709 case RS_CHKSUM2:
2710 s->line_csum |= fromhex(ch);
2711 csum = 0;
2712 for(i = 0; i < s->line_buf_index; i++) {
2713 csum += s->line_buf[i];
2714 }
2715 if (s->line_csum != (csum & 0xff)) {
2716 reply = '-';
2717 put_buffer(s, &reply, 1);
2718 s->state = RS_IDLE;
2719 } else {
2720 reply = '+';
2721 put_buffer(s, &reply, 1);
2722 s->state = gdb_handle_packet(s, s->line_buf);
2723 }
2724 break;
2725 default:
2726 abort();
2727 }
2728 }
2729 }
2730
2731 /* Tell the remote gdb that the process has exited. */
2732 void gdb_exit(CPUArchState *env, int code)
2733 {
2734 GDBState *s;
2735 char buf[4];
2736
2737 s = gdbserver_state;
2738 if (!s) {
2739 return;
2740 }
2741 #ifdef CONFIG_USER_ONLY
2742 if (gdbserver_fd < 0 || s->fd < 0) {
2743 return;
2744 }
2745 #endif
2746
2747 snprintf(buf, sizeof(buf), "W%02x", (uint8_t)code);
2748 put_packet(s, buf);
2749
2750 #ifndef CONFIG_USER_ONLY
2751 if (s->chr) {
2752 qemu_chr_delete(s->chr);
2753 }
2754 #endif
2755 }
2756
2757 #ifdef CONFIG_USER_ONLY
2758 int
2759 gdb_queuesig (void)
2760 {
2761 GDBState *s;
2762
2763 s = gdbserver_state;
2764
2765 if (gdbserver_fd < 0 || s->fd < 0)
2766 return 0;
2767 else
2768 return 1;
2769 }
2770
2771 int
2772 gdb_handlesig (CPUArchState *env, int sig)
2773 {
2774 GDBState *s;
2775 char buf[256];
2776 int n;
2777
2778 s = gdbserver_state;
2779 if (gdbserver_fd < 0 || s->fd < 0)
2780 return sig;
2781
2782 /* disable single step if it was enabled */
2783 cpu_single_step(env, 0);
2784 tb_flush(env);
2785
2786 if (sig != 0)
2787 {
2788 snprintf(buf, sizeof(buf), "S%02x", target_signal_to_gdb (sig));
2789 put_packet(s, buf);
2790 }
2791 /* put_packet() might have detected that the peer terminated the
2792 connection. */
2793 if (s->fd < 0)
2794 return sig;
2795
2796 sig = 0;
2797 s->state = RS_IDLE;
2798 s->running_state = 0;
2799 while (s->running_state == 0) {
2800 n = read (s->fd, buf, 256);
2801 if (n > 0)
2802 {
2803 int i;
2804
2805 for (i = 0; i < n; i++)
2806 gdb_read_byte (s, buf[i]);
2807 }
2808 else if (n == 0 || errno != EAGAIN)
2809 {
2810 /* XXX: Connection closed. Should probably wait for another
2811 connection before continuing. */
2812 return sig;
2813 }
2814 }
2815 sig = s->signal;
2816 s->signal = 0;
2817 return sig;
2818 }
2819
2820 /* Tell the remote gdb that the process has exited due to SIG. */
2821 void gdb_signalled(CPUArchState *env, int sig)
2822 {
2823 GDBState *s;
2824 char buf[4];
2825
2826 s = gdbserver_state;
2827 if (gdbserver_fd < 0 || s->fd < 0)
2828 return;
2829
2830 snprintf(buf, sizeof(buf), "X%02x", target_signal_to_gdb (sig));
2831 put_packet(s, buf);
2832 }
2833
2834 static void gdb_accept(void)
2835 {
2836 GDBState *s;
2837 struct sockaddr_in sockaddr;
2838 socklen_t len;
2839 int val, fd;
2840
2841 for(;;) {
2842 len = sizeof(sockaddr);
2843 fd = accept(gdbserver_fd, (struct sockaddr *)&sockaddr, &len);
2844 if (fd < 0 && errno != EINTR) {
2845 perror("accept");
2846 return;
2847 } else if (fd >= 0) {
2848 #ifndef _WIN32
2849 fcntl(fd, F_SETFD, FD_CLOEXEC);
2850 #endif
2851 break;
2852 }
2853 }
2854
2855 /* set short latency */
2856 val = 1;
2857 setsockopt(fd, IPPROTO_TCP, TCP_NODELAY, (char *)&val, sizeof(val));
2858
2859 s = g_malloc0(sizeof(GDBState));
2860 s->c_cpu = first_cpu;
2861 s->g_cpu = first_cpu;
2862 s->fd = fd;
2863 gdb_has_xml = 0;
2864
2865 gdbserver_state = s;
2866
2867 fcntl(fd, F_SETFL, O_NONBLOCK);
2868 }
2869
2870 static int gdbserver_open(int port)
2871 {
2872 struct sockaddr_in sockaddr;
2873 int fd, val, ret;
2874
2875 fd = socket(PF_INET, SOCK_STREAM, 0);
2876 if (fd < 0) {
2877 perror("socket");
2878 return -1;
2879 }
2880 #ifndef _WIN32
2881 fcntl(fd, F_SETFD, FD_CLOEXEC);
2882 #endif
2883
2884 /* allow fast reuse */
2885 val = 1;
2886 setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, (char *)&val, sizeof(val));
2887
2888 sockaddr.sin_family = AF_INET;
2889 sockaddr.sin_port = htons(port);
2890 sockaddr.sin_addr.s_addr = 0;
2891 ret = bind(fd, (struct sockaddr *)&sockaddr, sizeof(sockaddr));
2892 if (ret < 0) {
2893 perror("bind");
2894 close(fd);
2895 return -1;
2896 }
2897 ret = listen(fd, 0);
2898 if (ret < 0) {
2899 perror("listen");
2900 close(fd);
2901 return -1;
2902 }
2903 return fd;
2904 }
2905
2906 int gdbserver_start(int port)
2907 {
2908 gdbserver_fd = gdbserver_open(port);
2909 if (gdbserver_fd < 0)
2910 return -1;
2911 /* accept connections */
2912 gdb_accept();
2913 return 0;
2914 }
2915
2916 /* Disable gdb stub for child processes. */
2917 void gdbserver_fork(CPUArchState *env)
2918 {
2919 GDBState *s = gdbserver_state;
2920 if (gdbserver_fd < 0 || s->fd < 0)
2921 return;
2922 close(s->fd);
2923 s->fd = -1;
2924 cpu_breakpoint_remove_all(env, BP_GDB);
2925 cpu_watchpoint_remove_all(env, BP_GDB);
2926 }
2927 #else
2928 static int gdb_chr_can_receive(void *opaque)
2929 {
2930 /* We can handle an arbitrarily large amount of data.
2931 Pick the maximum packet size, which is as good as anything. */
2932 return MAX_PACKET_LENGTH;
2933 }
2934
2935 static void gdb_chr_receive(void *opaque, const uint8_t *buf, int size)
2936 {
2937 int i;
2938
2939 for (i = 0; i < size; i++) {
2940 gdb_read_byte(gdbserver_state, buf[i]);
2941 }
2942 }
2943
2944 static void gdb_chr_event(void *opaque, int event)
2945 {
2946 switch (event) {
2947 case CHR_EVENT_OPENED:
2948 vm_stop(RUN_STATE_PAUSED);
2949 gdb_has_xml = 0;
2950 break;
2951 default:
2952 break;
2953 }
2954 }
2955
2956 static void gdb_monitor_output(GDBState *s, const char *msg, int len)
2957 {
2958 char buf[MAX_PACKET_LENGTH];
2959
2960 buf[0] = 'O';
2961 if (len > (MAX_PACKET_LENGTH/2) - 1)
2962 len = (MAX_PACKET_LENGTH/2) - 1;
2963 memtohex(buf + 1, (uint8_t *)msg, len);
2964 put_packet(s, buf);
2965 }
2966
2967 static int gdb_monitor_write(CharDriverState *chr, const uint8_t *buf, int len)
2968 {
2969 const char *p = (const char *)buf;
2970 int max_sz;
2971
2972 max_sz = (sizeof(gdbserver_state->last_packet) - 2) / 2;
2973 for (;;) {
2974 if (len <= max_sz) {
2975 gdb_monitor_output(gdbserver_state, p, len);
2976 break;
2977 }
2978 gdb_monitor_output(gdbserver_state, p, max_sz);
2979 p += max_sz;
2980 len -= max_sz;
2981 }
2982 return len;
2983 }
2984
2985 #ifndef _WIN32
2986 static void gdb_sigterm_handler(int signal)
2987 {
2988 if (runstate_is_running()) {
2989 vm_stop(RUN_STATE_PAUSED);
2990 }
2991 }
2992 #endif
2993
2994 int gdbserver_start(const char *device)
2995 {
2996 GDBState *s;
2997 char gdbstub_device_name[128];
2998 CharDriverState *chr = NULL;
2999 CharDriverState *mon_chr;
3000
3001 if (!device)
3002 return -1;
3003 if (strcmp(device, "none") != 0) {
3004 if (strstart(device, "tcp:", NULL)) {
3005 /* enforce required TCP attributes */
3006 snprintf(gdbstub_device_name, sizeof(gdbstub_device_name),
3007 "%s,nowait,nodelay,server", device);
3008 device = gdbstub_device_name;
3009 }
3010 #ifndef _WIN32
3011 else if (strcmp(device, "stdio") == 0) {
3012 struct sigaction act;
3013
3014 memset(&act, 0, sizeof(act));
3015 act.sa_handler = gdb_sigterm_handler;
3016 sigaction(SIGINT, &act, NULL);
3017 }
3018 #endif
3019 chr = qemu_chr_new("gdb", device, NULL);
3020 if (!chr)
3021 return -1;
3022
3023 qemu_chr_add_handlers(chr, gdb_chr_can_receive, gdb_chr_receive,
3024 gdb_chr_event, NULL);
3025 }
3026
3027 s = gdbserver_state;
3028 if (!s) {
3029 s = g_malloc0(sizeof(GDBState));
3030 gdbserver_state = s;
3031
3032 qemu_add_vm_change_state_handler(gdb_vm_state_change, NULL);
3033
3034 /* Initialize a monitor terminal for gdb */
3035 mon_chr = g_malloc0(sizeof(*mon_chr));
3036 mon_chr->chr_write = gdb_monitor_write;
3037 monitor_init(mon_chr, 0);
3038 } else {
3039 if (s->chr)
3040 qemu_chr_delete(s->chr);
3041 mon_chr = s->mon_chr;
3042 memset(s, 0, sizeof(GDBState));
3043 }
3044 s->c_cpu = first_cpu;
3045 s->g_cpu = first_cpu;
3046 s->chr = chr;
3047 s->state = chr ? RS_IDLE : RS_INACTIVE;
3048 s->mon_chr = mon_chr;
3049 s->current_syscall_cb = NULL;
3050
3051 return 0;
3052 }
3053 #endif