]> git.proxmox.com Git - qemu.git/blob - gdbstub.c
PPC: Fix TLB invalidation bug within the PPC interrupt handler.
[qemu.git] / gdbstub.c
1 /*
2 * gdb server stub
3 *
4 * Copyright (c) 2003-2005 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #include "config.h"
20 #include "qemu-common.h"
21 #ifdef CONFIG_USER_ONLY
22 #include <stdlib.h>
23 #include <stdio.h>
24 #include <stdarg.h>
25 #include <string.h>
26 #include <errno.h>
27 #include <unistd.h>
28 #include <fcntl.h>
29
30 #include "qemu.h"
31 #else
32 #include "monitor.h"
33 #include "qemu-char.h"
34 #include "sysemu.h"
35 #include "gdbstub.h"
36 #endif
37
38 #define MAX_PACKET_LENGTH 4096
39
40 #include "cpu.h"
41 #include "qemu_socket.h"
42 #include "kvm.h"
43
44 #ifndef TARGET_CPU_MEMORY_RW_DEBUG
45 static inline int target_memory_rw_debug(CPUArchState *env, target_ulong addr,
46 uint8_t *buf, int len, int is_write)
47 {
48 return cpu_memory_rw_debug(env, addr, buf, len, is_write);
49 }
50 #else
51 /* target_memory_rw_debug() defined in cpu.h */
52 #endif
53
54 enum {
55 GDB_SIGNAL_0 = 0,
56 GDB_SIGNAL_INT = 2,
57 GDB_SIGNAL_QUIT = 3,
58 GDB_SIGNAL_TRAP = 5,
59 GDB_SIGNAL_ABRT = 6,
60 GDB_SIGNAL_ALRM = 14,
61 GDB_SIGNAL_IO = 23,
62 GDB_SIGNAL_XCPU = 24,
63 GDB_SIGNAL_UNKNOWN = 143
64 };
65
66 #ifdef CONFIG_USER_ONLY
67
68 /* Map target signal numbers to GDB protocol signal numbers and vice
69 * versa. For user emulation's currently supported systems, we can
70 * assume most signals are defined.
71 */
72
73 static int gdb_signal_table[] = {
74 0,
75 TARGET_SIGHUP,
76 TARGET_SIGINT,
77 TARGET_SIGQUIT,
78 TARGET_SIGILL,
79 TARGET_SIGTRAP,
80 TARGET_SIGABRT,
81 -1, /* SIGEMT */
82 TARGET_SIGFPE,
83 TARGET_SIGKILL,
84 TARGET_SIGBUS,
85 TARGET_SIGSEGV,
86 TARGET_SIGSYS,
87 TARGET_SIGPIPE,
88 TARGET_SIGALRM,
89 TARGET_SIGTERM,
90 TARGET_SIGURG,
91 TARGET_SIGSTOP,
92 TARGET_SIGTSTP,
93 TARGET_SIGCONT,
94 TARGET_SIGCHLD,
95 TARGET_SIGTTIN,
96 TARGET_SIGTTOU,
97 TARGET_SIGIO,
98 TARGET_SIGXCPU,
99 TARGET_SIGXFSZ,
100 TARGET_SIGVTALRM,
101 TARGET_SIGPROF,
102 TARGET_SIGWINCH,
103 -1, /* SIGLOST */
104 TARGET_SIGUSR1,
105 TARGET_SIGUSR2,
106 #ifdef TARGET_SIGPWR
107 TARGET_SIGPWR,
108 #else
109 -1,
110 #endif
111 -1, /* SIGPOLL */
112 -1,
113 -1,
114 -1,
115 -1,
116 -1,
117 -1,
118 -1,
119 -1,
120 -1,
121 -1,
122 -1,
123 #ifdef __SIGRTMIN
124 __SIGRTMIN + 1,
125 __SIGRTMIN + 2,
126 __SIGRTMIN + 3,
127 __SIGRTMIN + 4,
128 __SIGRTMIN + 5,
129 __SIGRTMIN + 6,
130 __SIGRTMIN + 7,
131 __SIGRTMIN + 8,
132 __SIGRTMIN + 9,
133 __SIGRTMIN + 10,
134 __SIGRTMIN + 11,
135 __SIGRTMIN + 12,
136 __SIGRTMIN + 13,
137 __SIGRTMIN + 14,
138 __SIGRTMIN + 15,
139 __SIGRTMIN + 16,
140 __SIGRTMIN + 17,
141 __SIGRTMIN + 18,
142 __SIGRTMIN + 19,
143 __SIGRTMIN + 20,
144 __SIGRTMIN + 21,
145 __SIGRTMIN + 22,
146 __SIGRTMIN + 23,
147 __SIGRTMIN + 24,
148 __SIGRTMIN + 25,
149 __SIGRTMIN + 26,
150 __SIGRTMIN + 27,
151 __SIGRTMIN + 28,
152 __SIGRTMIN + 29,
153 __SIGRTMIN + 30,
154 __SIGRTMIN + 31,
155 -1, /* SIGCANCEL */
156 __SIGRTMIN,
157 __SIGRTMIN + 32,
158 __SIGRTMIN + 33,
159 __SIGRTMIN + 34,
160 __SIGRTMIN + 35,
161 __SIGRTMIN + 36,
162 __SIGRTMIN + 37,
163 __SIGRTMIN + 38,
164 __SIGRTMIN + 39,
165 __SIGRTMIN + 40,
166 __SIGRTMIN + 41,
167 __SIGRTMIN + 42,
168 __SIGRTMIN + 43,
169 __SIGRTMIN + 44,
170 __SIGRTMIN + 45,
171 __SIGRTMIN + 46,
172 __SIGRTMIN + 47,
173 __SIGRTMIN + 48,
174 __SIGRTMIN + 49,
175 __SIGRTMIN + 50,
176 __SIGRTMIN + 51,
177 __SIGRTMIN + 52,
178 __SIGRTMIN + 53,
179 __SIGRTMIN + 54,
180 __SIGRTMIN + 55,
181 __SIGRTMIN + 56,
182 __SIGRTMIN + 57,
183 __SIGRTMIN + 58,
184 __SIGRTMIN + 59,
185 __SIGRTMIN + 60,
186 __SIGRTMIN + 61,
187 __SIGRTMIN + 62,
188 __SIGRTMIN + 63,
189 __SIGRTMIN + 64,
190 __SIGRTMIN + 65,
191 __SIGRTMIN + 66,
192 __SIGRTMIN + 67,
193 __SIGRTMIN + 68,
194 __SIGRTMIN + 69,
195 __SIGRTMIN + 70,
196 __SIGRTMIN + 71,
197 __SIGRTMIN + 72,
198 __SIGRTMIN + 73,
199 __SIGRTMIN + 74,
200 __SIGRTMIN + 75,
201 __SIGRTMIN + 76,
202 __SIGRTMIN + 77,
203 __SIGRTMIN + 78,
204 __SIGRTMIN + 79,
205 __SIGRTMIN + 80,
206 __SIGRTMIN + 81,
207 __SIGRTMIN + 82,
208 __SIGRTMIN + 83,
209 __SIGRTMIN + 84,
210 __SIGRTMIN + 85,
211 __SIGRTMIN + 86,
212 __SIGRTMIN + 87,
213 __SIGRTMIN + 88,
214 __SIGRTMIN + 89,
215 __SIGRTMIN + 90,
216 __SIGRTMIN + 91,
217 __SIGRTMIN + 92,
218 __SIGRTMIN + 93,
219 __SIGRTMIN + 94,
220 __SIGRTMIN + 95,
221 -1, /* SIGINFO */
222 -1, /* UNKNOWN */
223 -1, /* DEFAULT */
224 -1,
225 -1,
226 -1,
227 -1,
228 -1,
229 -1
230 #endif
231 };
232 #else
233 /* In system mode we only need SIGINT and SIGTRAP; other signals
234 are not yet supported. */
235
236 enum {
237 TARGET_SIGINT = 2,
238 TARGET_SIGTRAP = 5
239 };
240
241 static int gdb_signal_table[] = {
242 -1,
243 -1,
244 TARGET_SIGINT,
245 -1,
246 -1,
247 TARGET_SIGTRAP
248 };
249 #endif
250
251 #ifdef CONFIG_USER_ONLY
252 static int target_signal_to_gdb (int sig)
253 {
254 int i;
255 for (i = 0; i < ARRAY_SIZE (gdb_signal_table); i++)
256 if (gdb_signal_table[i] == sig)
257 return i;
258 return GDB_SIGNAL_UNKNOWN;
259 }
260 #endif
261
262 static int gdb_signal_to_target (int sig)
263 {
264 if (sig < ARRAY_SIZE (gdb_signal_table))
265 return gdb_signal_table[sig];
266 else
267 return -1;
268 }
269
270 //#define DEBUG_GDB
271
272 typedef struct GDBRegisterState {
273 int base_reg;
274 int num_regs;
275 gdb_reg_cb get_reg;
276 gdb_reg_cb set_reg;
277 const char *xml;
278 struct GDBRegisterState *next;
279 } GDBRegisterState;
280
281 enum RSState {
282 RS_INACTIVE,
283 RS_IDLE,
284 RS_GETLINE,
285 RS_CHKSUM1,
286 RS_CHKSUM2,
287 };
288 typedef struct GDBState {
289 CPUArchState *c_cpu; /* current CPU for step/continue ops */
290 CPUArchState *g_cpu; /* current CPU for other ops */
291 CPUArchState *query_cpu; /* for q{f|s}ThreadInfo */
292 enum RSState state; /* parsing state */
293 char line_buf[MAX_PACKET_LENGTH];
294 int line_buf_index;
295 int line_csum;
296 uint8_t last_packet[MAX_PACKET_LENGTH + 4];
297 int last_packet_len;
298 int signal;
299 #ifdef CONFIG_USER_ONLY
300 int fd;
301 int running_state;
302 #else
303 CharDriverState *chr;
304 CharDriverState *mon_chr;
305 #endif
306 char syscall_buf[256];
307 gdb_syscall_complete_cb current_syscall_cb;
308 } GDBState;
309
310 /* By default use no IRQs and no timers while single stepping so as to
311 * make single stepping like an ICE HW step.
312 */
313 static int sstep_flags = SSTEP_ENABLE|SSTEP_NOIRQ|SSTEP_NOTIMER;
314
315 static GDBState *gdbserver_state;
316
317 /* This is an ugly hack to cope with both new and old gdb.
318 If gdb sends qXfer:features:read then assume we're talking to a newish
319 gdb that understands target descriptions. */
320 static int gdb_has_xml;
321
322 #ifdef CONFIG_USER_ONLY
323 /* XXX: This is not thread safe. Do we care? */
324 static int gdbserver_fd = -1;
325
326 static int get_char(GDBState *s)
327 {
328 uint8_t ch;
329 int ret;
330
331 for(;;) {
332 ret = qemu_recv(s->fd, &ch, 1, 0);
333 if (ret < 0) {
334 if (errno == ECONNRESET)
335 s->fd = -1;
336 if (errno != EINTR && errno != EAGAIN)
337 return -1;
338 } else if (ret == 0) {
339 close(s->fd);
340 s->fd = -1;
341 return -1;
342 } else {
343 break;
344 }
345 }
346 return ch;
347 }
348 #endif
349
350 static enum {
351 GDB_SYS_UNKNOWN,
352 GDB_SYS_ENABLED,
353 GDB_SYS_DISABLED,
354 } gdb_syscall_mode;
355
356 /* If gdb is connected when the first semihosting syscall occurs then use
357 remote gdb syscalls. Otherwise use native file IO. */
358 int use_gdb_syscalls(void)
359 {
360 if (gdb_syscall_mode == GDB_SYS_UNKNOWN) {
361 gdb_syscall_mode = (gdbserver_state ? GDB_SYS_ENABLED
362 : GDB_SYS_DISABLED);
363 }
364 return gdb_syscall_mode == GDB_SYS_ENABLED;
365 }
366
367 /* Resume execution. */
368 static inline void gdb_continue(GDBState *s)
369 {
370 #ifdef CONFIG_USER_ONLY
371 s->running_state = 1;
372 #else
373 vm_start();
374 #endif
375 }
376
377 static void put_buffer(GDBState *s, const uint8_t *buf, int len)
378 {
379 #ifdef CONFIG_USER_ONLY
380 int ret;
381
382 while (len > 0) {
383 ret = send(s->fd, buf, len, 0);
384 if (ret < 0) {
385 if (errno != EINTR && errno != EAGAIN)
386 return;
387 } else {
388 buf += ret;
389 len -= ret;
390 }
391 }
392 #else
393 qemu_chr_fe_write(s->chr, buf, len);
394 #endif
395 }
396
397 static inline int fromhex(int v)
398 {
399 if (v >= '0' && v <= '9')
400 return v - '0';
401 else if (v >= 'A' && v <= 'F')
402 return v - 'A' + 10;
403 else if (v >= 'a' && v <= 'f')
404 return v - 'a' + 10;
405 else
406 return 0;
407 }
408
409 static inline int tohex(int v)
410 {
411 if (v < 10)
412 return v + '0';
413 else
414 return v - 10 + 'a';
415 }
416
417 static void memtohex(char *buf, const uint8_t *mem, int len)
418 {
419 int i, c;
420 char *q;
421 q = buf;
422 for(i = 0; i < len; i++) {
423 c = mem[i];
424 *q++ = tohex(c >> 4);
425 *q++ = tohex(c & 0xf);
426 }
427 *q = '\0';
428 }
429
430 static void hextomem(uint8_t *mem, const char *buf, int len)
431 {
432 int i;
433
434 for(i = 0; i < len; i++) {
435 mem[i] = (fromhex(buf[0]) << 4) | fromhex(buf[1]);
436 buf += 2;
437 }
438 }
439
440 /* return -1 if error, 0 if OK */
441 static int put_packet_binary(GDBState *s, const char *buf, int len)
442 {
443 int csum, i;
444 uint8_t *p;
445
446 for(;;) {
447 p = s->last_packet;
448 *(p++) = '$';
449 memcpy(p, buf, len);
450 p += len;
451 csum = 0;
452 for(i = 0; i < len; i++) {
453 csum += buf[i];
454 }
455 *(p++) = '#';
456 *(p++) = tohex((csum >> 4) & 0xf);
457 *(p++) = tohex((csum) & 0xf);
458
459 s->last_packet_len = p - s->last_packet;
460 put_buffer(s, (uint8_t *)s->last_packet, s->last_packet_len);
461
462 #ifdef CONFIG_USER_ONLY
463 i = get_char(s);
464 if (i < 0)
465 return -1;
466 if (i == '+')
467 break;
468 #else
469 break;
470 #endif
471 }
472 return 0;
473 }
474
475 /* return -1 if error, 0 if OK */
476 static int put_packet(GDBState *s, const char *buf)
477 {
478 #ifdef DEBUG_GDB
479 printf("reply='%s'\n", buf);
480 #endif
481
482 return put_packet_binary(s, buf, strlen(buf));
483 }
484
485 /* The GDB remote protocol transfers values in target byte order. This means
486 we can use the raw memory access routines to access the value buffer.
487 Conveniently, these also handle the case where the buffer is mis-aligned.
488 */
489 #define GET_REG8(val) do { \
490 stb_p(mem_buf, val); \
491 return 1; \
492 } while(0)
493 #define GET_REG16(val) do { \
494 stw_p(mem_buf, val); \
495 return 2; \
496 } while(0)
497 #define GET_REG32(val) do { \
498 stl_p(mem_buf, val); \
499 return 4; \
500 } while(0)
501 #define GET_REG64(val) do { \
502 stq_p(mem_buf, val); \
503 return 8; \
504 } while(0)
505
506 #if TARGET_LONG_BITS == 64
507 #define GET_REGL(val) GET_REG64(val)
508 #define ldtul_p(addr) ldq_p(addr)
509 #else
510 #define GET_REGL(val) GET_REG32(val)
511 #define ldtul_p(addr) ldl_p(addr)
512 #endif
513
514 #if defined(TARGET_I386)
515
516 #ifdef TARGET_X86_64
517 static const int gpr_map[16] = {
518 R_EAX, R_EBX, R_ECX, R_EDX, R_ESI, R_EDI, R_EBP, R_ESP,
519 8, 9, 10, 11, 12, 13, 14, 15
520 };
521 #else
522 #define gpr_map gpr_map32
523 #endif
524 static const int gpr_map32[8] = { 0, 1, 2, 3, 4, 5, 6, 7 };
525
526 #define NUM_CORE_REGS (CPU_NB_REGS * 2 + 25)
527
528 #define IDX_IP_REG CPU_NB_REGS
529 #define IDX_FLAGS_REG (IDX_IP_REG + 1)
530 #define IDX_SEG_REGS (IDX_FLAGS_REG + 1)
531 #define IDX_FP_REGS (IDX_SEG_REGS + 6)
532 #define IDX_XMM_REGS (IDX_FP_REGS + 16)
533 #define IDX_MXCSR_REG (IDX_XMM_REGS + CPU_NB_REGS)
534
535 static int cpu_gdb_read_register(CPUX86State *env, uint8_t *mem_buf, int n)
536 {
537 if (n < CPU_NB_REGS) {
538 if (TARGET_LONG_BITS == 64 && env->hflags & HF_CS64_MASK) {
539 GET_REG64(env->regs[gpr_map[n]]);
540 } else if (n < CPU_NB_REGS32) {
541 GET_REG32(env->regs[gpr_map32[n]]);
542 }
543 } else if (n >= IDX_FP_REGS && n < IDX_FP_REGS + 8) {
544 #ifdef USE_X86LDOUBLE
545 /* FIXME: byteswap float values - after fixing fpregs layout. */
546 memcpy(mem_buf, &env->fpregs[n - IDX_FP_REGS], 10);
547 #else
548 memset(mem_buf, 0, 10);
549 #endif
550 return 10;
551 } else if (n >= IDX_XMM_REGS && n < IDX_XMM_REGS + CPU_NB_REGS) {
552 n -= IDX_XMM_REGS;
553 if (n < CPU_NB_REGS32 ||
554 (TARGET_LONG_BITS == 64 && env->hflags & HF_CS64_MASK)) {
555 stq_p(mem_buf, env->xmm_regs[n].XMM_Q(0));
556 stq_p(mem_buf + 8, env->xmm_regs[n].XMM_Q(1));
557 return 16;
558 }
559 } else {
560 switch (n) {
561 case IDX_IP_REG:
562 if (TARGET_LONG_BITS == 64 && env->hflags & HF_CS64_MASK) {
563 GET_REG64(env->eip);
564 } else {
565 GET_REG32(env->eip);
566 }
567 case IDX_FLAGS_REG: GET_REG32(env->eflags);
568
569 case IDX_SEG_REGS: GET_REG32(env->segs[R_CS].selector);
570 case IDX_SEG_REGS + 1: GET_REG32(env->segs[R_SS].selector);
571 case IDX_SEG_REGS + 2: GET_REG32(env->segs[R_DS].selector);
572 case IDX_SEG_REGS + 3: GET_REG32(env->segs[R_ES].selector);
573 case IDX_SEG_REGS + 4: GET_REG32(env->segs[R_FS].selector);
574 case IDX_SEG_REGS + 5: GET_REG32(env->segs[R_GS].selector);
575
576 case IDX_FP_REGS + 8: GET_REG32(env->fpuc);
577 case IDX_FP_REGS + 9: GET_REG32((env->fpus & ~0x3800) |
578 (env->fpstt & 0x7) << 11);
579 case IDX_FP_REGS + 10: GET_REG32(0); /* ftag */
580 case IDX_FP_REGS + 11: GET_REG32(0); /* fiseg */
581 case IDX_FP_REGS + 12: GET_REG32(0); /* fioff */
582 case IDX_FP_REGS + 13: GET_REG32(0); /* foseg */
583 case IDX_FP_REGS + 14: GET_REG32(0); /* fooff */
584 case IDX_FP_REGS + 15: GET_REG32(0); /* fop */
585
586 case IDX_MXCSR_REG: GET_REG32(env->mxcsr);
587 }
588 }
589 return 0;
590 }
591
592 static int cpu_x86_gdb_load_seg(CPUX86State *env, int sreg, uint8_t *mem_buf)
593 {
594 uint16_t selector = ldl_p(mem_buf);
595
596 if (selector != env->segs[sreg].selector) {
597 #if defined(CONFIG_USER_ONLY)
598 cpu_x86_load_seg(env, sreg, selector);
599 #else
600 unsigned int limit, flags;
601 target_ulong base;
602
603 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
604 base = selector << 4;
605 limit = 0xffff;
606 flags = 0;
607 } else {
608 if (!cpu_x86_get_descr_debug(env, selector, &base, &limit, &flags))
609 return 4;
610 }
611 cpu_x86_load_seg_cache(env, sreg, selector, base, limit, flags);
612 #endif
613 }
614 return 4;
615 }
616
617 static int cpu_gdb_write_register(CPUX86State *env, uint8_t *mem_buf, int n)
618 {
619 uint32_t tmp;
620
621 if (n < CPU_NB_REGS) {
622 if (TARGET_LONG_BITS == 64 && env->hflags & HF_CS64_MASK) {
623 env->regs[gpr_map[n]] = ldtul_p(mem_buf);
624 return sizeof(target_ulong);
625 } else if (n < CPU_NB_REGS32) {
626 n = gpr_map32[n];
627 env->regs[n] &= ~0xffffffffUL;
628 env->regs[n] |= (uint32_t)ldl_p(mem_buf);
629 return 4;
630 }
631 } else if (n >= IDX_FP_REGS && n < IDX_FP_REGS + 8) {
632 #ifdef USE_X86LDOUBLE
633 /* FIXME: byteswap float values - after fixing fpregs layout. */
634 memcpy(&env->fpregs[n - IDX_FP_REGS], mem_buf, 10);
635 #endif
636 return 10;
637 } else if (n >= IDX_XMM_REGS && n < IDX_XMM_REGS + CPU_NB_REGS) {
638 n -= IDX_XMM_REGS;
639 if (n < CPU_NB_REGS32 ||
640 (TARGET_LONG_BITS == 64 && env->hflags & HF_CS64_MASK)) {
641 env->xmm_regs[n].XMM_Q(0) = ldq_p(mem_buf);
642 env->xmm_regs[n].XMM_Q(1) = ldq_p(mem_buf + 8);
643 return 16;
644 }
645 } else {
646 switch (n) {
647 case IDX_IP_REG:
648 if (TARGET_LONG_BITS == 64 && env->hflags & HF_CS64_MASK) {
649 env->eip = ldq_p(mem_buf);
650 return 8;
651 } else {
652 env->eip &= ~0xffffffffUL;
653 env->eip |= (uint32_t)ldl_p(mem_buf);
654 return 4;
655 }
656 case IDX_FLAGS_REG:
657 env->eflags = ldl_p(mem_buf);
658 return 4;
659
660 case IDX_SEG_REGS: return cpu_x86_gdb_load_seg(env, R_CS, mem_buf);
661 case IDX_SEG_REGS + 1: return cpu_x86_gdb_load_seg(env, R_SS, mem_buf);
662 case IDX_SEG_REGS + 2: return cpu_x86_gdb_load_seg(env, R_DS, mem_buf);
663 case IDX_SEG_REGS + 3: return cpu_x86_gdb_load_seg(env, R_ES, mem_buf);
664 case IDX_SEG_REGS + 4: return cpu_x86_gdb_load_seg(env, R_FS, mem_buf);
665 case IDX_SEG_REGS + 5: return cpu_x86_gdb_load_seg(env, R_GS, mem_buf);
666
667 case IDX_FP_REGS + 8:
668 env->fpuc = ldl_p(mem_buf);
669 return 4;
670 case IDX_FP_REGS + 9:
671 tmp = ldl_p(mem_buf);
672 env->fpstt = (tmp >> 11) & 7;
673 env->fpus = tmp & ~0x3800;
674 return 4;
675 case IDX_FP_REGS + 10: /* ftag */ return 4;
676 case IDX_FP_REGS + 11: /* fiseg */ return 4;
677 case IDX_FP_REGS + 12: /* fioff */ return 4;
678 case IDX_FP_REGS + 13: /* foseg */ return 4;
679 case IDX_FP_REGS + 14: /* fooff */ return 4;
680 case IDX_FP_REGS + 15: /* fop */ return 4;
681
682 case IDX_MXCSR_REG:
683 env->mxcsr = ldl_p(mem_buf);
684 return 4;
685 }
686 }
687 /* Unrecognised register. */
688 return 0;
689 }
690
691 #elif defined (TARGET_PPC)
692
693 /* Old gdb always expects FP registers. Newer (xml-aware) gdb only
694 expects whatever the target description contains. Due to a
695 historical mishap the FP registers appear in between core integer
696 regs and PC, MSR, CR, and so forth. We hack round this by giving the
697 FP regs zero size when talking to a newer gdb. */
698 #define NUM_CORE_REGS 71
699 #if defined (TARGET_PPC64)
700 #define GDB_CORE_XML "power64-core.xml"
701 #else
702 #define GDB_CORE_XML "power-core.xml"
703 #endif
704
705 static int cpu_gdb_read_register(CPUPPCState *env, uint8_t *mem_buf, int n)
706 {
707 if (n < 32) {
708 /* gprs */
709 GET_REGL(env->gpr[n]);
710 } else if (n < 64) {
711 /* fprs */
712 if (gdb_has_xml)
713 return 0;
714 stfq_p(mem_buf, env->fpr[n-32]);
715 return 8;
716 } else {
717 switch (n) {
718 case 64: GET_REGL(env->nip);
719 case 65: GET_REGL(env->msr);
720 case 66:
721 {
722 uint32_t cr = 0;
723 int i;
724 for (i = 0; i < 8; i++)
725 cr |= env->crf[i] << (32 - ((i + 1) * 4));
726 GET_REG32(cr);
727 }
728 case 67: GET_REGL(env->lr);
729 case 68: GET_REGL(env->ctr);
730 case 69: GET_REGL(env->xer);
731 case 70:
732 {
733 if (gdb_has_xml)
734 return 0;
735 GET_REG32(env->fpscr);
736 }
737 }
738 }
739 return 0;
740 }
741
742 static int cpu_gdb_write_register(CPUPPCState *env, uint8_t *mem_buf, int n)
743 {
744 if (n < 32) {
745 /* gprs */
746 env->gpr[n] = ldtul_p(mem_buf);
747 return sizeof(target_ulong);
748 } else if (n < 64) {
749 /* fprs */
750 if (gdb_has_xml)
751 return 0;
752 env->fpr[n-32] = ldfq_p(mem_buf);
753 return 8;
754 } else {
755 switch (n) {
756 case 64:
757 env->nip = ldtul_p(mem_buf);
758 return sizeof(target_ulong);
759 case 65:
760 ppc_store_msr(env, ldtul_p(mem_buf));
761 return sizeof(target_ulong);
762 case 66:
763 {
764 uint32_t cr = ldl_p(mem_buf);
765 int i;
766 for (i = 0; i < 8; i++)
767 env->crf[i] = (cr >> (32 - ((i + 1) * 4))) & 0xF;
768 return 4;
769 }
770 case 67:
771 env->lr = ldtul_p(mem_buf);
772 return sizeof(target_ulong);
773 case 68:
774 env->ctr = ldtul_p(mem_buf);
775 return sizeof(target_ulong);
776 case 69:
777 env->xer = ldtul_p(mem_buf);
778 return sizeof(target_ulong);
779 case 70:
780 /* fpscr */
781 if (gdb_has_xml)
782 return 0;
783 return 4;
784 }
785 }
786 return 0;
787 }
788
789 #elif defined (TARGET_SPARC)
790
791 #if defined(TARGET_SPARC64) && !defined(TARGET_ABI32)
792 #define NUM_CORE_REGS 86
793 #else
794 #define NUM_CORE_REGS 72
795 #endif
796
797 #ifdef TARGET_ABI32
798 #define GET_REGA(val) GET_REG32(val)
799 #else
800 #define GET_REGA(val) GET_REGL(val)
801 #endif
802
803 static int cpu_gdb_read_register(CPUSPARCState *env, uint8_t *mem_buf, int n)
804 {
805 if (n < 8) {
806 /* g0..g7 */
807 GET_REGA(env->gregs[n]);
808 }
809 if (n < 32) {
810 /* register window */
811 GET_REGA(env->regwptr[n - 8]);
812 }
813 #if defined(TARGET_ABI32) || !defined(TARGET_SPARC64)
814 if (n < 64) {
815 /* fprs */
816 if (n & 1) {
817 GET_REG32(env->fpr[(n - 32) / 2].l.lower);
818 } else {
819 GET_REG32(env->fpr[(n - 32) / 2].l.upper);
820 }
821 }
822 /* Y, PSR, WIM, TBR, PC, NPC, FPSR, CPSR */
823 switch (n) {
824 case 64: GET_REGA(env->y);
825 case 65: GET_REGA(cpu_get_psr(env));
826 case 66: GET_REGA(env->wim);
827 case 67: GET_REGA(env->tbr);
828 case 68: GET_REGA(env->pc);
829 case 69: GET_REGA(env->npc);
830 case 70: GET_REGA(env->fsr);
831 case 71: GET_REGA(0); /* csr */
832 default: GET_REGA(0);
833 }
834 #else
835 if (n < 64) {
836 /* f0-f31 */
837 if (n & 1) {
838 GET_REG32(env->fpr[(n - 32) / 2].l.lower);
839 } else {
840 GET_REG32(env->fpr[(n - 32) / 2].l.upper);
841 }
842 }
843 if (n < 80) {
844 /* f32-f62 (double width, even numbers only) */
845 GET_REG64(env->fpr[(n - 32) / 2].ll);
846 }
847 switch (n) {
848 case 80: GET_REGL(env->pc);
849 case 81: GET_REGL(env->npc);
850 case 82: GET_REGL((cpu_get_ccr(env) << 32) |
851 ((env->asi & 0xff) << 24) |
852 ((env->pstate & 0xfff) << 8) |
853 cpu_get_cwp64(env));
854 case 83: GET_REGL(env->fsr);
855 case 84: GET_REGL(env->fprs);
856 case 85: GET_REGL(env->y);
857 }
858 #endif
859 return 0;
860 }
861
862 static int cpu_gdb_write_register(CPUSPARCState *env, uint8_t *mem_buf, int n)
863 {
864 #if defined(TARGET_ABI32)
865 abi_ulong tmp;
866
867 tmp = ldl_p(mem_buf);
868 #else
869 target_ulong tmp;
870
871 tmp = ldtul_p(mem_buf);
872 #endif
873
874 if (n < 8) {
875 /* g0..g7 */
876 env->gregs[n] = tmp;
877 } else if (n < 32) {
878 /* register window */
879 env->regwptr[n - 8] = tmp;
880 }
881 #if defined(TARGET_ABI32) || !defined(TARGET_SPARC64)
882 else if (n < 64) {
883 /* fprs */
884 /* f0-f31 */
885 if (n & 1) {
886 env->fpr[(n - 32) / 2].l.lower = tmp;
887 } else {
888 env->fpr[(n - 32) / 2].l.upper = tmp;
889 }
890 } else {
891 /* Y, PSR, WIM, TBR, PC, NPC, FPSR, CPSR */
892 switch (n) {
893 case 64: env->y = tmp; break;
894 case 65: cpu_put_psr(env, tmp); break;
895 case 66: env->wim = tmp; break;
896 case 67: env->tbr = tmp; break;
897 case 68: env->pc = tmp; break;
898 case 69: env->npc = tmp; break;
899 case 70: env->fsr = tmp; break;
900 default: return 0;
901 }
902 }
903 return 4;
904 #else
905 else if (n < 64) {
906 /* f0-f31 */
907 tmp = ldl_p(mem_buf);
908 if (n & 1) {
909 env->fpr[(n - 32) / 2].l.lower = tmp;
910 } else {
911 env->fpr[(n - 32) / 2].l.upper = tmp;
912 }
913 return 4;
914 } else if (n < 80) {
915 /* f32-f62 (double width, even numbers only) */
916 env->fpr[(n - 32) / 2].ll = tmp;
917 } else {
918 switch (n) {
919 case 80: env->pc = tmp; break;
920 case 81: env->npc = tmp; break;
921 case 82:
922 cpu_put_ccr(env, tmp >> 32);
923 env->asi = (tmp >> 24) & 0xff;
924 env->pstate = (tmp >> 8) & 0xfff;
925 cpu_put_cwp64(env, tmp & 0xff);
926 break;
927 case 83: env->fsr = tmp; break;
928 case 84: env->fprs = tmp; break;
929 case 85: env->y = tmp; break;
930 default: return 0;
931 }
932 }
933 return 8;
934 #endif
935 }
936 #elif defined (TARGET_ARM)
937
938 /* Old gdb always expect FPA registers. Newer (xml-aware) gdb only expect
939 whatever the target description contains. Due to a historical mishap
940 the FPA registers appear in between core integer regs and the CPSR.
941 We hack round this by giving the FPA regs zero size when talking to a
942 newer gdb. */
943 #define NUM_CORE_REGS 26
944 #define GDB_CORE_XML "arm-core.xml"
945
946 static int cpu_gdb_read_register(CPUARMState *env, uint8_t *mem_buf, int n)
947 {
948 if (n < 16) {
949 /* Core integer register. */
950 GET_REG32(env->regs[n]);
951 }
952 if (n < 24) {
953 /* FPA registers. */
954 if (gdb_has_xml)
955 return 0;
956 memset(mem_buf, 0, 12);
957 return 12;
958 }
959 switch (n) {
960 case 24:
961 /* FPA status register. */
962 if (gdb_has_xml)
963 return 0;
964 GET_REG32(0);
965 case 25:
966 /* CPSR */
967 GET_REG32(cpsr_read(env));
968 }
969 /* Unknown register. */
970 return 0;
971 }
972
973 static int cpu_gdb_write_register(CPUARMState *env, uint8_t *mem_buf, int n)
974 {
975 uint32_t tmp;
976
977 tmp = ldl_p(mem_buf);
978
979 /* Mask out low bit of PC to workaround gdb bugs. This will probably
980 cause problems if we ever implement the Jazelle DBX extensions. */
981 if (n == 15)
982 tmp &= ~1;
983
984 if (n < 16) {
985 /* Core integer register. */
986 env->regs[n] = tmp;
987 return 4;
988 }
989 if (n < 24) { /* 16-23 */
990 /* FPA registers (ignored). */
991 if (gdb_has_xml)
992 return 0;
993 return 12;
994 }
995 switch (n) {
996 case 24:
997 /* FPA status register (ignored). */
998 if (gdb_has_xml)
999 return 0;
1000 return 4;
1001 case 25:
1002 /* CPSR */
1003 cpsr_write (env, tmp, 0xffffffff);
1004 return 4;
1005 }
1006 /* Unknown register. */
1007 return 0;
1008 }
1009
1010 #elif defined (TARGET_M68K)
1011
1012 #define NUM_CORE_REGS 18
1013
1014 #define GDB_CORE_XML "cf-core.xml"
1015
1016 static int cpu_gdb_read_register(CPUM68KState *env, uint8_t *mem_buf, int n)
1017 {
1018 if (n < 8) {
1019 /* D0-D7 */
1020 GET_REG32(env->dregs[n]);
1021 } else if (n < 16) {
1022 /* A0-A7 */
1023 GET_REG32(env->aregs[n - 8]);
1024 } else {
1025 switch (n) {
1026 case 16: GET_REG32(env->sr);
1027 case 17: GET_REG32(env->pc);
1028 }
1029 }
1030 /* FP registers not included here because they vary between
1031 ColdFire and m68k. Use XML bits for these. */
1032 return 0;
1033 }
1034
1035 static int cpu_gdb_write_register(CPUM68KState *env, uint8_t *mem_buf, int n)
1036 {
1037 uint32_t tmp;
1038
1039 tmp = ldl_p(mem_buf);
1040
1041 if (n < 8) {
1042 /* D0-D7 */
1043 env->dregs[n] = tmp;
1044 } else if (n < 16) {
1045 /* A0-A7 */
1046 env->aregs[n - 8] = tmp;
1047 } else {
1048 switch (n) {
1049 case 16: env->sr = tmp; break;
1050 case 17: env->pc = tmp; break;
1051 default: return 0;
1052 }
1053 }
1054 return 4;
1055 }
1056 #elif defined (TARGET_MIPS)
1057
1058 #define NUM_CORE_REGS 73
1059
1060 static int cpu_gdb_read_register(CPUMIPSState *env, uint8_t *mem_buf, int n)
1061 {
1062 if (n < 32) {
1063 GET_REGL(env->active_tc.gpr[n]);
1064 }
1065 if (env->CP0_Config1 & (1 << CP0C1_FP)) {
1066 if (n >= 38 && n < 70) {
1067 if (env->CP0_Status & (1 << CP0St_FR))
1068 GET_REGL(env->active_fpu.fpr[n - 38].d);
1069 else
1070 GET_REGL(env->active_fpu.fpr[n - 38].w[FP_ENDIAN_IDX]);
1071 }
1072 switch (n) {
1073 case 70: GET_REGL((int32_t)env->active_fpu.fcr31);
1074 case 71: GET_REGL((int32_t)env->active_fpu.fcr0);
1075 }
1076 }
1077 switch (n) {
1078 case 32: GET_REGL((int32_t)env->CP0_Status);
1079 case 33: GET_REGL(env->active_tc.LO[0]);
1080 case 34: GET_REGL(env->active_tc.HI[0]);
1081 case 35: GET_REGL(env->CP0_BadVAddr);
1082 case 36: GET_REGL((int32_t)env->CP0_Cause);
1083 case 37: GET_REGL(env->active_tc.PC | !!(env->hflags & MIPS_HFLAG_M16));
1084 case 72: GET_REGL(0); /* fp */
1085 case 89: GET_REGL((int32_t)env->CP0_PRid);
1086 }
1087 if (n >= 73 && n <= 88) {
1088 /* 16 embedded regs. */
1089 GET_REGL(0);
1090 }
1091
1092 return 0;
1093 }
1094
1095 /* convert MIPS rounding mode in FCR31 to IEEE library */
1096 static unsigned int ieee_rm[] =
1097 {
1098 float_round_nearest_even,
1099 float_round_to_zero,
1100 float_round_up,
1101 float_round_down
1102 };
1103 #define RESTORE_ROUNDING_MODE \
1104 set_float_rounding_mode(ieee_rm[env->active_fpu.fcr31 & 3], &env->active_fpu.fp_status)
1105
1106 static int cpu_gdb_write_register(CPUMIPSState *env, uint8_t *mem_buf, int n)
1107 {
1108 target_ulong tmp;
1109
1110 tmp = ldtul_p(mem_buf);
1111
1112 if (n < 32) {
1113 env->active_tc.gpr[n] = tmp;
1114 return sizeof(target_ulong);
1115 }
1116 if (env->CP0_Config1 & (1 << CP0C1_FP)
1117 && n >= 38 && n < 73) {
1118 if (n < 70) {
1119 if (env->CP0_Status & (1 << CP0St_FR))
1120 env->active_fpu.fpr[n - 38].d = tmp;
1121 else
1122 env->active_fpu.fpr[n - 38].w[FP_ENDIAN_IDX] = tmp;
1123 }
1124 switch (n) {
1125 case 70:
1126 env->active_fpu.fcr31 = tmp & 0xFF83FFFF;
1127 /* set rounding mode */
1128 RESTORE_ROUNDING_MODE;
1129 break;
1130 case 71: env->active_fpu.fcr0 = tmp; break;
1131 }
1132 return sizeof(target_ulong);
1133 }
1134 switch (n) {
1135 case 32: env->CP0_Status = tmp; break;
1136 case 33: env->active_tc.LO[0] = tmp; break;
1137 case 34: env->active_tc.HI[0] = tmp; break;
1138 case 35: env->CP0_BadVAddr = tmp; break;
1139 case 36: env->CP0_Cause = tmp; break;
1140 case 37:
1141 env->active_tc.PC = tmp & ~(target_ulong)1;
1142 if (tmp & 1) {
1143 env->hflags |= MIPS_HFLAG_M16;
1144 } else {
1145 env->hflags &= ~(MIPS_HFLAG_M16);
1146 }
1147 break;
1148 case 72: /* fp, ignored */ break;
1149 default:
1150 if (n > 89)
1151 return 0;
1152 /* Other registers are readonly. Ignore writes. */
1153 break;
1154 }
1155
1156 return sizeof(target_ulong);
1157 }
1158 #elif defined (TARGET_SH4)
1159
1160 /* Hint: Use "set architecture sh4" in GDB to see fpu registers */
1161 /* FIXME: We should use XML for this. */
1162
1163 #define NUM_CORE_REGS 59
1164
1165 static int cpu_gdb_read_register(CPUSH4State *env, uint8_t *mem_buf, int n)
1166 {
1167 if (n < 8) {
1168 if ((env->sr & (SR_MD | SR_RB)) == (SR_MD | SR_RB)) {
1169 GET_REGL(env->gregs[n + 16]);
1170 } else {
1171 GET_REGL(env->gregs[n]);
1172 }
1173 } else if (n < 16) {
1174 GET_REGL(env->gregs[n]);
1175 } else if (n >= 25 && n < 41) {
1176 GET_REGL(env->fregs[(n - 25) + ((env->fpscr & FPSCR_FR) ? 16 : 0)]);
1177 } else if (n >= 43 && n < 51) {
1178 GET_REGL(env->gregs[n - 43]);
1179 } else if (n >= 51 && n < 59) {
1180 GET_REGL(env->gregs[n - (51 - 16)]);
1181 }
1182 switch (n) {
1183 case 16: GET_REGL(env->pc);
1184 case 17: GET_REGL(env->pr);
1185 case 18: GET_REGL(env->gbr);
1186 case 19: GET_REGL(env->vbr);
1187 case 20: GET_REGL(env->mach);
1188 case 21: GET_REGL(env->macl);
1189 case 22: GET_REGL(env->sr);
1190 case 23: GET_REGL(env->fpul);
1191 case 24: GET_REGL(env->fpscr);
1192 case 41: GET_REGL(env->ssr);
1193 case 42: GET_REGL(env->spc);
1194 }
1195
1196 return 0;
1197 }
1198
1199 static int cpu_gdb_write_register(CPUSH4State *env, uint8_t *mem_buf, int n)
1200 {
1201 uint32_t tmp;
1202
1203 tmp = ldl_p(mem_buf);
1204
1205 if (n < 8) {
1206 if ((env->sr & (SR_MD | SR_RB)) == (SR_MD | SR_RB)) {
1207 env->gregs[n + 16] = tmp;
1208 } else {
1209 env->gregs[n] = tmp;
1210 }
1211 return 4;
1212 } else if (n < 16) {
1213 env->gregs[n] = tmp;
1214 return 4;
1215 } else if (n >= 25 && n < 41) {
1216 env->fregs[(n - 25) + ((env->fpscr & FPSCR_FR) ? 16 : 0)] = tmp;
1217 return 4;
1218 } else if (n >= 43 && n < 51) {
1219 env->gregs[n - 43] = tmp;
1220 return 4;
1221 } else if (n >= 51 && n < 59) {
1222 env->gregs[n - (51 - 16)] = tmp;
1223 return 4;
1224 }
1225 switch (n) {
1226 case 16: env->pc = tmp; break;
1227 case 17: env->pr = tmp; break;
1228 case 18: env->gbr = tmp; break;
1229 case 19: env->vbr = tmp; break;
1230 case 20: env->mach = tmp; break;
1231 case 21: env->macl = tmp; break;
1232 case 22: env->sr = tmp; break;
1233 case 23: env->fpul = tmp; break;
1234 case 24: env->fpscr = tmp; break;
1235 case 41: env->ssr = tmp; break;
1236 case 42: env->spc = tmp; break;
1237 default: return 0;
1238 }
1239
1240 return 4;
1241 }
1242 #elif defined (TARGET_MICROBLAZE)
1243
1244 #define NUM_CORE_REGS (32 + 5)
1245
1246 static int cpu_gdb_read_register(CPUMBState *env, uint8_t *mem_buf, int n)
1247 {
1248 if (n < 32) {
1249 GET_REG32(env->regs[n]);
1250 } else {
1251 GET_REG32(env->sregs[n - 32]);
1252 }
1253 return 0;
1254 }
1255
1256 static int cpu_gdb_write_register(CPUMBState *env, uint8_t *mem_buf, int n)
1257 {
1258 uint32_t tmp;
1259
1260 if (n > NUM_CORE_REGS)
1261 return 0;
1262
1263 tmp = ldl_p(mem_buf);
1264
1265 if (n < 32) {
1266 env->regs[n] = tmp;
1267 } else {
1268 env->sregs[n - 32] = tmp;
1269 }
1270 return 4;
1271 }
1272 #elif defined (TARGET_CRIS)
1273
1274 #define NUM_CORE_REGS 49
1275
1276 static int
1277 read_register_crisv10(CPUCRISState *env, uint8_t *mem_buf, int n)
1278 {
1279 if (n < 15) {
1280 GET_REG32(env->regs[n]);
1281 }
1282
1283 if (n == 15) {
1284 GET_REG32(env->pc);
1285 }
1286
1287 if (n < 32) {
1288 switch (n) {
1289 case 16:
1290 GET_REG8(env->pregs[n - 16]);
1291 break;
1292 case 17:
1293 GET_REG8(env->pregs[n - 16]);
1294 break;
1295 case 20:
1296 case 21:
1297 GET_REG16(env->pregs[n - 16]);
1298 break;
1299 default:
1300 if (n >= 23) {
1301 GET_REG32(env->pregs[n - 16]);
1302 }
1303 break;
1304 }
1305 }
1306 return 0;
1307 }
1308
1309 static int cpu_gdb_read_register(CPUCRISState *env, uint8_t *mem_buf, int n)
1310 {
1311 uint8_t srs;
1312
1313 if (env->pregs[PR_VR] < 32)
1314 return read_register_crisv10(env, mem_buf, n);
1315
1316 srs = env->pregs[PR_SRS];
1317 if (n < 16) {
1318 GET_REG32(env->regs[n]);
1319 }
1320
1321 if (n >= 21 && n < 32) {
1322 GET_REG32(env->pregs[n - 16]);
1323 }
1324 if (n >= 33 && n < 49) {
1325 GET_REG32(env->sregs[srs][n - 33]);
1326 }
1327 switch (n) {
1328 case 16: GET_REG8(env->pregs[0]);
1329 case 17: GET_REG8(env->pregs[1]);
1330 case 18: GET_REG32(env->pregs[2]);
1331 case 19: GET_REG8(srs);
1332 case 20: GET_REG16(env->pregs[4]);
1333 case 32: GET_REG32(env->pc);
1334 }
1335
1336 return 0;
1337 }
1338
1339 static int cpu_gdb_write_register(CPUCRISState *env, uint8_t *mem_buf, int n)
1340 {
1341 uint32_t tmp;
1342
1343 if (n > 49)
1344 return 0;
1345
1346 tmp = ldl_p(mem_buf);
1347
1348 if (n < 16) {
1349 env->regs[n] = tmp;
1350 }
1351
1352 if (n >= 21 && n < 32) {
1353 env->pregs[n - 16] = tmp;
1354 }
1355
1356 /* FIXME: Should support function regs be writable? */
1357 switch (n) {
1358 case 16: return 1;
1359 case 17: return 1;
1360 case 18: env->pregs[PR_PID] = tmp; break;
1361 case 19: return 1;
1362 case 20: return 2;
1363 case 32: env->pc = tmp; break;
1364 }
1365
1366 return 4;
1367 }
1368 #elif defined (TARGET_ALPHA)
1369
1370 #define NUM_CORE_REGS 67
1371
1372 static int cpu_gdb_read_register(CPUAlphaState *env, uint8_t *mem_buf, int n)
1373 {
1374 uint64_t val;
1375 CPU_DoubleU d;
1376
1377 switch (n) {
1378 case 0 ... 30:
1379 val = env->ir[n];
1380 break;
1381 case 32 ... 62:
1382 d.d = env->fir[n - 32];
1383 val = d.ll;
1384 break;
1385 case 63:
1386 val = cpu_alpha_load_fpcr(env);
1387 break;
1388 case 64:
1389 val = env->pc;
1390 break;
1391 case 66:
1392 val = env->unique;
1393 break;
1394 case 31:
1395 case 65:
1396 /* 31 really is the zero register; 65 is unassigned in the
1397 gdb protocol, but is still required to occupy 8 bytes. */
1398 val = 0;
1399 break;
1400 default:
1401 return 0;
1402 }
1403 GET_REGL(val);
1404 }
1405
1406 static int cpu_gdb_write_register(CPUAlphaState *env, uint8_t *mem_buf, int n)
1407 {
1408 target_ulong tmp = ldtul_p(mem_buf);
1409 CPU_DoubleU d;
1410
1411 switch (n) {
1412 case 0 ... 30:
1413 env->ir[n] = tmp;
1414 break;
1415 case 32 ... 62:
1416 d.ll = tmp;
1417 env->fir[n - 32] = d.d;
1418 break;
1419 case 63:
1420 cpu_alpha_store_fpcr(env, tmp);
1421 break;
1422 case 64:
1423 env->pc = tmp;
1424 break;
1425 case 66:
1426 env->unique = tmp;
1427 break;
1428 case 31:
1429 case 65:
1430 /* 31 really is the zero register; 65 is unassigned in the
1431 gdb protocol, but is still required to occupy 8 bytes. */
1432 break;
1433 default:
1434 return 0;
1435 }
1436 return 8;
1437 }
1438 #elif defined (TARGET_S390X)
1439
1440 #define NUM_CORE_REGS S390_NUM_TOTAL_REGS
1441
1442 static int cpu_gdb_read_register(CPUS390XState *env, uint8_t *mem_buf, int n)
1443 {
1444 switch (n) {
1445 case S390_PSWM_REGNUM: GET_REGL(env->psw.mask); break;
1446 case S390_PSWA_REGNUM: GET_REGL(env->psw.addr); break;
1447 case S390_R0_REGNUM ... S390_R15_REGNUM:
1448 GET_REGL(env->regs[n-S390_R0_REGNUM]); break;
1449 case S390_A0_REGNUM ... S390_A15_REGNUM:
1450 GET_REG32(env->aregs[n-S390_A0_REGNUM]); break;
1451 case S390_FPC_REGNUM: GET_REG32(env->fpc); break;
1452 case S390_F0_REGNUM ... S390_F15_REGNUM:
1453 /* XXX */
1454 break;
1455 case S390_PC_REGNUM: GET_REGL(env->psw.addr); break;
1456 case S390_CC_REGNUM:
1457 env->cc_op = calc_cc(env, env->cc_op, env->cc_src, env->cc_dst,
1458 env->cc_vr);
1459 GET_REG32(env->cc_op);
1460 break;
1461 }
1462
1463 return 0;
1464 }
1465
1466 static int cpu_gdb_write_register(CPUS390XState *env, uint8_t *mem_buf, int n)
1467 {
1468 target_ulong tmpl;
1469 uint32_t tmp32;
1470 int r = 8;
1471 tmpl = ldtul_p(mem_buf);
1472 tmp32 = ldl_p(mem_buf);
1473
1474 switch (n) {
1475 case S390_PSWM_REGNUM: env->psw.mask = tmpl; break;
1476 case S390_PSWA_REGNUM: env->psw.addr = tmpl; break;
1477 case S390_R0_REGNUM ... S390_R15_REGNUM:
1478 env->regs[n-S390_R0_REGNUM] = tmpl; break;
1479 case S390_A0_REGNUM ... S390_A15_REGNUM:
1480 env->aregs[n-S390_A0_REGNUM] = tmp32; r=4; break;
1481 case S390_FPC_REGNUM: env->fpc = tmp32; r=4; break;
1482 case S390_F0_REGNUM ... S390_F15_REGNUM:
1483 /* XXX */
1484 break;
1485 case S390_PC_REGNUM: env->psw.addr = tmpl; break;
1486 case S390_CC_REGNUM: env->cc_op = tmp32; r=4; break;
1487 }
1488
1489 return r;
1490 }
1491 #elif defined (TARGET_LM32)
1492
1493 #include "hw/lm32_pic.h"
1494 #define NUM_CORE_REGS (32 + 7)
1495
1496 static int cpu_gdb_read_register(CPULM32State *env, uint8_t *mem_buf, int n)
1497 {
1498 if (n < 32) {
1499 GET_REG32(env->regs[n]);
1500 } else {
1501 switch (n) {
1502 case 32:
1503 GET_REG32(env->pc);
1504 break;
1505 /* FIXME: put in right exception ID */
1506 case 33:
1507 GET_REG32(0);
1508 break;
1509 case 34:
1510 GET_REG32(env->eba);
1511 break;
1512 case 35:
1513 GET_REG32(env->deba);
1514 break;
1515 case 36:
1516 GET_REG32(env->ie);
1517 break;
1518 case 37:
1519 GET_REG32(lm32_pic_get_im(env->pic_state));
1520 break;
1521 case 38:
1522 GET_REG32(lm32_pic_get_ip(env->pic_state));
1523 break;
1524 }
1525 }
1526 return 0;
1527 }
1528
1529 static int cpu_gdb_write_register(CPULM32State *env, uint8_t *mem_buf, int n)
1530 {
1531 uint32_t tmp;
1532
1533 if (n > NUM_CORE_REGS) {
1534 return 0;
1535 }
1536
1537 tmp = ldl_p(mem_buf);
1538
1539 if (n < 32) {
1540 env->regs[n] = tmp;
1541 } else {
1542 switch (n) {
1543 case 32:
1544 env->pc = tmp;
1545 break;
1546 case 34:
1547 env->eba = tmp;
1548 break;
1549 case 35:
1550 env->deba = tmp;
1551 break;
1552 case 36:
1553 env->ie = tmp;
1554 break;
1555 case 37:
1556 lm32_pic_set_im(env->pic_state, tmp);
1557 break;
1558 case 38:
1559 lm32_pic_set_ip(env->pic_state, tmp);
1560 break;
1561 }
1562 }
1563 return 4;
1564 }
1565 #elif defined(TARGET_XTENSA)
1566
1567 /* Use num_core_regs to see only non-privileged registers in an unmodified gdb.
1568 * Use num_regs to see all registers. gdb modification is required for that:
1569 * reset bit 0 in the 'flags' field of the registers definitions in the
1570 * gdb/xtensa-config.c inside gdb source tree or inside gdb overlay.
1571 */
1572 #define NUM_CORE_REGS (env->config->gdb_regmap.num_regs)
1573 #define num_g_regs NUM_CORE_REGS
1574
1575 static int cpu_gdb_read_register(CPUXtensaState *env, uint8_t *mem_buf, int n)
1576 {
1577 const XtensaGdbReg *reg = env->config->gdb_regmap.reg + n;
1578
1579 if (n < 0 || n >= env->config->gdb_regmap.num_regs) {
1580 return 0;
1581 }
1582
1583 switch (reg->type) {
1584 case 9: /*pc*/
1585 GET_REG32(env->pc);
1586 break;
1587
1588 case 1: /*ar*/
1589 xtensa_sync_phys_from_window(env);
1590 GET_REG32(env->phys_regs[(reg->targno & 0xff) % env->config->nareg]);
1591 break;
1592
1593 case 2: /*SR*/
1594 GET_REG32(env->sregs[reg->targno & 0xff]);
1595 break;
1596
1597 case 3: /*UR*/
1598 GET_REG32(env->uregs[reg->targno & 0xff]);
1599 break;
1600
1601 case 8: /*a*/
1602 GET_REG32(env->regs[reg->targno & 0x0f]);
1603 break;
1604
1605 default:
1606 qemu_log("%s from reg %d of unsupported type %d\n",
1607 __func__, n, reg->type);
1608 return 0;
1609 }
1610 }
1611
1612 static int cpu_gdb_write_register(CPUXtensaState *env, uint8_t *mem_buf, int n)
1613 {
1614 uint32_t tmp;
1615 const XtensaGdbReg *reg = env->config->gdb_regmap.reg + n;
1616
1617 if (n < 0 || n >= env->config->gdb_regmap.num_regs) {
1618 return 0;
1619 }
1620
1621 tmp = ldl_p(mem_buf);
1622
1623 switch (reg->type) {
1624 case 9: /*pc*/
1625 env->pc = tmp;
1626 break;
1627
1628 case 1: /*ar*/
1629 env->phys_regs[(reg->targno & 0xff) % env->config->nareg] = tmp;
1630 xtensa_sync_window_from_phys(env);
1631 break;
1632
1633 case 2: /*SR*/
1634 env->sregs[reg->targno & 0xff] = tmp;
1635 break;
1636
1637 case 3: /*UR*/
1638 env->uregs[reg->targno & 0xff] = tmp;
1639 break;
1640
1641 case 8: /*a*/
1642 env->regs[reg->targno & 0x0f] = tmp;
1643 break;
1644
1645 default:
1646 qemu_log("%s to reg %d of unsupported type %d\n",
1647 __func__, n, reg->type);
1648 return 0;
1649 }
1650
1651 return 4;
1652 }
1653 #else
1654
1655 #define NUM_CORE_REGS 0
1656
1657 static int cpu_gdb_read_register(CPUArchState *env, uint8_t *mem_buf, int n)
1658 {
1659 return 0;
1660 }
1661
1662 static int cpu_gdb_write_register(CPUArchState *env, uint8_t *mem_buf, int n)
1663 {
1664 return 0;
1665 }
1666
1667 #endif
1668
1669 #if !defined(TARGET_XTENSA)
1670 static int num_g_regs = NUM_CORE_REGS;
1671 #endif
1672
1673 #ifdef GDB_CORE_XML
1674 /* Encode data using the encoding for 'x' packets. */
1675 static int memtox(char *buf, const char *mem, int len)
1676 {
1677 char *p = buf;
1678 char c;
1679
1680 while (len--) {
1681 c = *(mem++);
1682 switch (c) {
1683 case '#': case '$': case '*': case '}':
1684 *(p++) = '}';
1685 *(p++) = c ^ 0x20;
1686 break;
1687 default:
1688 *(p++) = c;
1689 break;
1690 }
1691 }
1692 return p - buf;
1693 }
1694
1695 static const char *get_feature_xml(const char *p, const char **newp)
1696 {
1697 size_t len;
1698 int i;
1699 const char *name;
1700 static char target_xml[1024];
1701
1702 len = 0;
1703 while (p[len] && p[len] != ':')
1704 len++;
1705 *newp = p + len;
1706
1707 name = NULL;
1708 if (strncmp(p, "target.xml", len) == 0) {
1709 /* Generate the XML description for this CPU. */
1710 if (!target_xml[0]) {
1711 GDBRegisterState *r;
1712
1713 snprintf(target_xml, sizeof(target_xml),
1714 "<?xml version=\"1.0\"?>"
1715 "<!DOCTYPE target SYSTEM \"gdb-target.dtd\">"
1716 "<target>"
1717 "<xi:include href=\"%s\"/>",
1718 GDB_CORE_XML);
1719
1720 for (r = first_cpu->gdb_regs; r; r = r->next) {
1721 pstrcat(target_xml, sizeof(target_xml), "<xi:include href=\"");
1722 pstrcat(target_xml, sizeof(target_xml), r->xml);
1723 pstrcat(target_xml, sizeof(target_xml), "\"/>");
1724 }
1725 pstrcat(target_xml, sizeof(target_xml), "</target>");
1726 }
1727 return target_xml;
1728 }
1729 for (i = 0; ; i++) {
1730 name = xml_builtin[i][0];
1731 if (!name || (strncmp(name, p, len) == 0 && strlen(name) == len))
1732 break;
1733 }
1734 return name ? xml_builtin[i][1] : NULL;
1735 }
1736 #endif
1737
1738 static int gdb_read_register(CPUArchState *env, uint8_t *mem_buf, int reg)
1739 {
1740 GDBRegisterState *r;
1741
1742 if (reg < NUM_CORE_REGS)
1743 return cpu_gdb_read_register(env, mem_buf, reg);
1744
1745 for (r = env->gdb_regs; r; r = r->next) {
1746 if (r->base_reg <= reg && reg < r->base_reg + r->num_regs) {
1747 return r->get_reg(env, mem_buf, reg - r->base_reg);
1748 }
1749 }
1750 return 0;
1751 }
1752
1753 static int gdb_write_register(CPUArchState *env, uint8_t *mem_buf, int reg)
1754 {
1755 GDBRegisterState *r;
1756
1757 if (reg < NUM_CORE_REGS)
1758 return cpu_gdb_write_register(env, mem_buf, reg);
1759
1760 for (r = env->gdb_regs; r; r = r->next) {
1761 if (r->base_reg <= reg && reg < r->base_reg + r->num_regs) {
1762 return r->set_reg(env, mem_buf, reg - r->base_reg);
1763 }
1764 }
1765 return 0;
1766 }
1767
1768 #if !defined(TARGET_XTENSA)
1769 /* Register a supplemental set of CPU registers. If g_pos is nonzero it
1770 specifies the first register number and these registers are included in
1771 a standard "g" packet. Direction is relative to gdb, i.e. get_reg is
1772 gdb reading a CPU register, and set_reg is gdb modifying a CPU register.
1773 */
1774
1775 void gdb_register_coprocessor(CPUArchState * env,
1776 gdb_reg_cb get_reg, gdb_reg_cb set_reg,
1777 int num_regs, const char *xml, int g_pos)
1778 {
1779 GDBRegisterState *s;
1780 GDBRegisterState **p;
1781 static int last_reg = NUM_CORE_REGS;
1782
1783 p = &env->gdb_regs;
1784 while (*p) {
1785 /* Check for duplicates. */
1786 if (strcmp((*p)->xml, xml) == 0)
1787 return;
1788 p = &(*p)->next;
1789 }
1790
1791 s = g_new0(GDBRegisterState, 1);
1792 s->base_reg = last_reg;
1793 s->num_regs = num_regs;
1794 s->get_reg = get_reg;
1795 s->set_reg = set_reg;
1796 s->xml = xml;
1797
1798 /* Add to end of list. */
1799 last_reg += num_regs;
1800 *p = s;
1801 if (g_pos) {
1802 if (g_pos != s->base_reg) {
1803 fprintf(stderr, "Error: Bad gdb register numbering for '%s'\n"
1804 "Expected %d got %d\n", xml, g_pos, s->base_reg);
1805 } else {
1806 num_g_regs = last_reg;
1807 }
1808 }
1809 }
1810 #endif
1811
1812 #ifndef CONFIG_USER_ONLY
1813 static const int xlat_gdb_type[] = {
1814 [GDB_WATCHPOINT_WRITE] = BP_GDB | BP_MEM_WRITE,
1815 [GDB_WATCHPOINT_READ] = BP_GDB | BP_MEM_READ,
1816 [GDB_WATCHPOINT_ACCESS] = BP_GDB | BP_MEM_ACCESS,
1817 };
1818 #endif
1819
1820 static int gdb_breakpoint_insert(target_ulong addr, target_ulong len, int type)
1821 {
1822 CPUArchState *env;
1823 int err = 0;
1824
1825 if (kvm_enabled())
1826 return kvm_insert_breakpoint(gdbserver_state->c_cpu, addr, len, type);
1827
1828 switch (type) {
1829 case GDB_BREAKPOINT_SW:
1830 case GDB_BREAKPOINT_HW:
1831 for (env = first_cpu; env != NULL; env = env->next_cpu) {
1832 err = cpu_breakpoint_insert(env, addr, BP_GDB, NULL);
1833 if (err)
1834 break;
1835 }
1836 return err;
1837 #ifndef CONFIG_USER_ONLY
1838 case GDB_WATCHPOINT_WRITE:
1839 case GDB_WATCHPOINT_READ:
1840 case GDB_WATCHPOINT_ACCESS:
1841 for (env = first_cpu; env != NULL; env = env->next_cpu) {
1842 err = cpu_watchpoint_insert(env, addr, len, xlat_gdb_type[type],
1843 NULL);
1844 if (err)
1845 break;
1846 }
1847 return err;
1848 #endif
1849 default:
1850 return -ENOSYS;
1851 }
1852 }
1853
1854 static int gdb_breakpoint_remove(target_ulong addr, target_ulong len, int type)
1855 {
1856 CPUArchState *env;
1857 int err = 0;
1858
1859 if (kvm_enabled())
1860 return kvm_remove_breakpoint(gdbserver_state->c_cpu, addr, len, type);
1861
1862 switch (type) {
1863 case GDB_BREAKPOINT_SW:
1864 case GDB_BREAKPOINT_HW:
1865 for (env = first_cpu; env != NULL; env = env->next_cpu) {
1866 err = cpu_breakpoint_remove(env, addr, BP_GDB);
1867 if (err)
1868 break;
1869 }
1870 return err;
1871 #ifndef CONFIG_USER_ONLY
1872 case GDB_WATCHPOINT_WRITE:
1873 case GDB_WATCHPOINT_READ:
1874 case GDB_WATCHPOINT_ACCESS:
1875 for (env = first_cpu; env != NULL; env = env->next_cpu) {
1876 err = cpu_watchpoint_remove(env, addr, len, xlat_gdb_type[type]);
1877 if (err)
1878 break;
1879 }
1880 return err;
1881 #endif
1882 default:
1883 return -ENOSYS;
1884 }
1885 }
1886
1887 static void gdb_breakpoint_remove_all(void)
1888 {
1889 CPUArchState *env;
1890
1891 if (kvm_enabled()) {
1892 kvm_remove_all_breakpoints(gdbserver_state->c_cpu);
1893 return;
1894 }
1895
1896 for (env = first_cpu; env != NULL; env = env->next_cpu) {
1897 cpu_breakpoint_remove_all(env, BP_GDB);
1898 #ifndef CONFIG_USER_ONLY
1899 cpu_watchpoint_remove_all(env, BP_GDB);
1900 #endif
1901 }
1902 }
1903
1904 static void gdb_set_cpu_pc(GDBState *s, target_ulong pc)
1905 {
1906 #if defined(TARGET_I386)
1907 cpu_synchronize_state(s->c_cpu);
1908 s->c_cpu->eip = pc;
1909 #elif defined (TARGET_PPC)
1910 s->c_cpu->nip = pc;
1911 #elif defined (TARGET_SPARC)
1912 s->c_cpu->pc = pc;
1913 s->c_cpu->npc = pc + 4;
1914 #elif defined (TARGET_ARM)
1915 s->c_cpu->regs[15] = pc;
1916 #elif defined (TARGET_SH4)
1917 s->c_cpu->pc = pc;
1918 #elif defined (TARGET_MIPS)
1919 s->c_cpu->active_tc.PC = pc & ~(target_ulong)1;
1920 if (pc & 1) {
1921 s->c_cpu->hflags |= MIPS_HFLAG_M16;
1922 } else {
1923 s->c_cpu->hflags &= ~(MIPS_HFLAG_M16);
1924 }
1925 #elif defined (TARGET_MICROBLAZE)
1926 s->c_cpu->sregs[SR_PC] = pc;
1927 #elif defined (TARGET_CRIS)
1928 s->c_cpu->pc = pc;
1929 #elif defined (TARGET_ALPHA)
1930 s->c_cpu->pc = pc;
1931 #elif defined (TARGET_S390X)
1932 cpu_synchronize_state(s->c_cpu);
1933 s->c_cpu->psw.addr = pc;
1934 #elif defined (TARGET_LM32)
1935 s->c_cpu->pc = pc;
1936 #elif defined(TARGET_XTENSA)
1937 s->c_cpu->pc = pc;
1938 #endif
1939 }
1940
1941 static inline int gdb_id(CPUArchState *env)
1942 {
1943 #if defined(CONFIG_USER_ONLY) && defined(CONFIG_USE_NPTL)
1944 return env->host_tid;
1945 #else
1946 return env->cpu_index + 1;
1947 #endif
1948 }
1949
1950 static CPUArchState *find_cpu(uint32_t thread_id)
1951 {
1952 CPUArchState *env;
1953
1954 for (env = first_cpu; env != NULL; env = env->next_cpu) {
1955 if (gdb_id(env) == thread_id) {
1956 return env;
1957 }
1958 }
1959
1960 return NULL;
1961 }
1962
1963 static int gdb_handle_packet(GDBState *s, const char *line_buf)
1964 {
1965 CPUArchState *env;
1966 const char *p;
1967 uint32_t thread;
1968 int ch, reg_size, type, res;
1969 char buf[MAX_PACKET_LENGTH];
1970 uint8_t mem_buf[MAX_PACKET_LENGTH];
1971 uint8_t *registers;
1972 target_ulong addr, len;
1973
1974 #ifdef DEBUG_GDB
1975 printf("command='%s'\n", line_buf);
1976 #endif
1977 p = line_buf;
1978 ch = *p++;
1979 switch(ch) {
1980 case '?':
1981 /* TODO: Make this return the correct value for user-mode. */
1982 snprintf(buf, sizeof(buf), "T%02xthread:%02x;", GDB_SIGNAL_TRAP,
1983 gdb_id(s->c_cpu));
1984 put_packet(s, buf);
1985 /* Remove all the breakpoints when this query is issued,
1986 * because gdb is doing and initial connect and the state
1987 * should be cleaned up.
1988 */
1989 gdb_breakpoint_remove_all();
1990 break;
1991 case 'c':
1992 if (*p != '\0') {
1993 addr = strtoull(p, (char **)&p, 16);
1994 gdb_set_cpu_pc(s, addr);
1995 }
1996 s->signal = 0;
1997 gdb_continue(s);
1998 return RS_IDLE;
1999 case 'C':
2000 s->signal = gdb_signal_to_target (strtoul(p, (char **)&p, 16));
2001 if (s->signal == -1)
2002 s->signal = 0;
2003 gdb_continue(s);
2004 return RS_IDLE;
2005 case 'v':
2006 if (strncmp(p, "Cont", 4) == 0) {
2007 int res_signal, res_thread;
2008
2009 p += 4;
2010 if (*p == '?') {
2011 put_packet(s, "vCont;c;C;s;S");
2012 break;
2013 }
2014 res = 0;
2015 res_signal = 0;
2016 res_thread = 0;
2017 while (*p) {
2018 int action, signal;
2019
2020 if (*p++ != ';') {
2021 res = 0;
2022 break;
2023 }
2024 action = *p++;
2025 signal = 0;
2026 if (action == 'C' || action == 'S') {
2027 signal = strtoul(p, (char **)&p, 16);
2028 } else if (action != 'c' && action != 's') {
2029 res = 0;
2030 break;
2031 }
2032 thread = 0;
2033 if (*p == ':') {
2034 thread = strtoull(p+1, (char **)&p, 16);
2035 }
2036 action = tolower(action);
2037 if (res == 0 || (res == 'c' && action == 's')) {
2038 res = action;
2039 res_signal = signal;
2040 res_thread = thread;
2041 }
2042 }
2043 if (res) {
2044 if (res_thread != -1 && res_thread != 0) {
2045 env = find_cpu(res_thread);
2046 if (env == NULL) {
2047 put_packet(s, "E22");
2048 break;
2049 }
2050 s->c_cpu = env;
2051 }
2052 if (res == 's') {
2053 cpu_single_step(s->c_cpu, sstep_flags);
2054 }
2055 s->signal = res_signal;
2056 gdb_continue(s);
2057 return RS_IDLE;
2058 }
2059 break;
2060 } else {
2061 goto unknown_command;
2062 }
2063 case 'k':
2064 #ifdef CONFIG_USER_ONLY
2065 /* Kill the target */
2066 fprintf(stderr, "\nQEMU: Terminated via GDBstub\n");
2067 exit(0);
2068 #endif
2069 case 'D':
2070 /* Detach packet */
2071 gdb_breakpoint_remove_all();
2072 gdb_syscall_mode = GDB_SYS_DISABLED;
2073 gdb_continue(s);
2074 put_packet(s, "OK");
2075 break;
2076 case 's':
2077 if (*p != '\0') {
2078 addr = strtoull(p, (char **)&p, 16);
2079 gdb_set_cpu_pc(s, addr);
2080 }
2081 cpu_single_step(s->c_cpu, sstep_flags);
2082 gdb_continue(s);
2083 return RS_IDLE;
2084 case 'F':
2085 {
2086 target_ulong ret;
2087 target_ulong err;
2088
2089 ret = strtoull(p, (char **)&p, 16);
2090 if (*p == ',') {
2091 p++;
2092 err = strtoull(p, (char **)&p, 16);
2093 } else {
2094 err = 0;
2095 }
2096 if (*p == ',')
2097 p++;
2098 type = *p;
2099 if (s->current_syscall_cb) {
2100 s->current_syscall_cb(s->c_cpu, ret, err);
2101 s->current_syscall_cb = NULL;
2102 }
2103 if (type == 'C') {
2104 put_packet(s, "T02");
2105 } else {
2106 gdb_continue(s);
2107 }
2108 }
2109 break;
2110 case 'g':
2111 cpu_synchronize_state(s->g_cpu);
2112 env = s->g_cpu;
2113 len = 0;
2114 for (addr = 0; addr < num_g_regs; addr++) {
2115 reg_size = gdb_read_register(s->g_cpu, mem_buf + len, addr);
2116 len += reg_size;
2117 }
2118 memtohex(buf, mem_buf, len);
2119 put_packet(s, buf);
2120 break;
2121 case 'G':
2122 cpu_synchronize_state(s->g_cpu);
2123 env = s->g_cpu;
2124 registers = mem_buf;
2125 len = strlen(p) / 2;
2126 hextomem((uint8_t *)registers, p, len);
2127 for (addr = 0; addr < num_g_regs && len > 0; addr++) {
2128 reg_size = gdb_write_register(s->g_cpu, registers, addr);
2129 len -= reg_size;
2130 registers += reg_size;
2131 }
2132 put_packet(s, "OK");
2133 break;
2134 case 'm':
2135 addr = strtoull(p, (char **)&p, 16);
2136 if (*p == ',')
2137 p++;
2138 len = strtoull(p, NULL, 16);
2139 if (target_memory_rw_debug(s->g_cpu, addr, mem_buf, len, 0) != 0) {
2140 put_packet (s, "E14");
2141 } else {
2142 memtohex(buf, mem_buf, len);
2143 put_packet(s, buf);
2144 }
2145 break;
2146 case 'M':
2147 addr = strtoull(p, (char **)&p, 16);
2148 if (*p == ',')
2149 p++;
2150 len = strtoull(p, (char **)&p, 16);
2151 if (*p == ':')
2152 p++;
2153 hextomem(mem_buf, p, len);
2154 if (target_memory_rw_debug(s->g_cpu, addr, mem_buf, len, 1) != 0) {
2155 put_packet(s, "E14");
2156 } else {
2157 put_packet(s, "OK");
2158 }
2159 break;
2160 case 'p':
2161 /* Older gdb are really dumb, and don't use 'g' if 'p' is avaialable.
2162 This works, but can be very slow. Anything new enough to
2163 understand XML also knows how to use this properly. */
2164 if (!gdb_has_xml)
2165 goto unknown_command;
2166 addr = strtoull(p, (char **)&p, 16);
2167 reg_size = gdb_read_register(s->g_cpu, mem_buf, addr);
2168 if (reg_size) {
2169 memtohex(buf, mem_buf, reg_size);
2170 put_packet(s, buf);
2171 } else {
2172 put_packet(s, "E14");
2173 }
2174 break;
2175 case 'P':
2176 if (!gdb_has_xml)
2177 goto unknown_command;
2178 addr = strtoull(p, (char **)&p, 16);
2179 if (*p == '=')
2180 p++;
2181 reg_size = strlen(p) / 2;
2182 hextomem(mem_buf, p, reg_size);
2183 gdb_write_register(s->g_cpu, mem_buf, addr);
2184 put_packet(s, "OK");
2185 break;
2186 case 'Z':
2187 case 'z':
2188 type = strtoul(p, (char **)&p, 16);
2189 if (*p == ',')
2190 p++;
2191 addr = strtoull(p, (char **)&p, 16);
2192 if (*p == ',')
2193 p++;
2194 len = strtoull(p, (char **)&p, 16);
2195 if (ch == 'Z')
2196 res = gdb_breakpoint_insert(addr, len, type);
2197 else
2198 res = gdb_breakpoint_remove(addr, len, type);
2199 if (res >= 0)
2200 put_packet(s, "OK");
2201 else if (res == -ENOSYS)
2202 put_packet(s, "");
2203 else
2204 put_packet(s, "E22");
2205 break;
2206 case 'H':
2207 type = *p++;
2208 thread = strtoull(p, (char **)&p, 16);
2209 if (thread == -1 || thread == 0) {
2210 put_packet(s, "OK");
2211 break;
2212 }
2213 env = find_cpu(thread);
2214 if (env == NULL) {
2215 put_packet(s, "E22");
2216 break;
2217 }
2218 switch (type) {
2219 case 'c':
2220 s->c_cpu = env;
2221 put_packet(s, "OK");
2222 break;
2223 case 'g':
2224 s->g_cpu = env;
2225 put_packet(s, "OK");
2226 break;
2227 default:
2228 put_packet(s, "E22");
2229 break;
2230 }
2231 break;
2232 case 'T':
2233 thread = strtoull(p, (char **)&p, 16);
2234 env = find_cpu(thread);
2235
2236 if (env != NULL) {
2237 put_packet(s, "OK");
2238 } else {
2239 put_packet(s, "E22");
2240 }
2241 break;
2242 case 'q':
2243 case 'Q':
2244 /* parse any 'q' packets here */
2245 if (!strcmp(p,"qemu.sstepbits")) {
2246 /* Query Breakpoint bit definitions */
2247 snprintf(buf, sizeof(buf), "ENABLE=%x,NOIRQ=%x,NOTIMER=%x",
2248 SSTEP_ENABLE,
2249 SSTEP_NOIRQ,
2250 SSTEP_NOTIMER);
2251 put_packet(s, buf);
2252 break;
2253 } else if (strncmp(p,"qemu.sstep",10) == 0) {
2254 /* Display or change the sstep_flags */
2255 p += 10;
2256 if (*p != '=') {
2257 /* Display current setting */
2258 snprintf(buf, sizeof(buf), "0x%x", sstep_flags);
2259 put_packet(s, buf);
2260 break;
2261 }
2262 p++;
2263 type = strtoul(p, (char **)&p, 16);
2264 sstep_flags = type;
2265 put_packet(s, "OK");
2266 break;
2267 } else if (strcmp(p,"C") == 0) {
2268 /* "Current thread" remains vague in the spec, so always return
2269 * the first CPU (gdb returns the first thread). */
2270 put_packet(s, "QC1");
2271 break;
2272 } else if (strcmp(p,"fThreadInfo") == 0) {
2273 s->query_cpu = first_cpu;
2274 goto report_cpuinfo;
2275 } else if (strcmp(p,"sThreadInfo") == 0) {
2276 report_cpuinfo:
2277 if (s->query_cpu) {
2278 snprintf(buf, sizeof(buf), "m%x", gdb_id(s->query_cpu));
2279 put_packet(s, buf);
2280 s->query_cpu = s->query_cpu->next_cpu;
2281 } else
2282 put_packet(s, "l");
2283 break;
2284 } else if (strncmp(p,"ThreadExtraInfo,", 16) == 0) {
2285 thread = strtoull(p+16, (char **)&p, 16);
2286 env = find_cpu(thread);
2287 if (env != NULL) {
2288 cpu_synchronize_state(env);
2289 len = snprintf((char *)mem_buf, sizeof(mem_buf),
2290 "CPU#%d [%s]", env->cpu_index,
2291 env->halted ? "halted " : "running");
2292 memtohex(buf, mem_buf, len);
2293 put_packet(s, buf);
2294 }
2295 break;
2296 }
2297 #ifdef CONFIG_USER_ONLY
2298 else if (strncmp(p, "Offsets", 7) == 0) {
2299 TaskState *ts = s->c_cpu->opaque;
2300
2301 snprintf(buf, sizeof(buf),
2302 "Text=" TARGET_ABI_FMT_lx ";Data=" TARGET_ABI_FMT_lx
2303 ";Bss=" TARGET_ABI_FMT_lx,
2304 ts->info->code_offset,
2305 ts->info->data_offset,
2306 ts->info->data_offset);
2307 put_packet(s, buf);
2308 break;
2309 }
2310 #else /* !CONFIG_USER_ONLY */
2311 else if (strncmp(p, "Rcmd,", 5) == 0) {
2312 int len = strlen(p + 5);
2313
2314 if ((len % 2) != 0) {
2315 put_packet(s, "E01");
2316 break;
2317 }
2318 hextomem(mem_buf, p + 5, len);
2319 len = len / 2;
2320 mem_buf[len++] = 0;
2321 qemu_chr_be_write(s->mon_chr, mem_buf, len);
2322 put_packet(s, "OK");
2323 break;
2324 }
2325 #endif /* !CONFIG_USER_ONLY */
2326 if (strncmp(p, "Supported", 9) == 0) {
2327 snprintf(buf, sizeof(buf), "PacketSize=%x", MAX_PACKET_LENGTH);
2328 #ifdef GDB_CORE_XML
2329 pstrcat(buf, sizeof(buf), ";qXfer:features:read+");
2330 #endif
2331 put_packet(s, buf);
2332 break;
2333 }
2334 #ifdef GDB_CORE_XML
2335 if (strncmp(p, "Xfer:features:read:", 19) == 0) {
2336 const char *xml;
2337 target_ulong total_len;
2338
2339 gdb_has_xml = 1;
2340 p += 19;
2341 xml = get_feature_xml(p, &p);
2342 if (!xml) {
2343 snprintf(buf, sizeof(buf), "E00");
2344 put_packet(s, buf);
2345 break;
2346 }
2347
2348 if (*p == ':')
2349 p++;
2350 addr = strtoul(p, (char **)&p, 16);
2351 if (*p == ',')
2352 p++;
2353 len = strtoul(p, (char **)&p, 16);
2354
2355 total_len = strlen(xml);
2356 if (addr > total_len) {
2357 snprintf(buf, sizeof(buf), "E00");
2358 put_packet(s, buf);
2359 break;
2360 }
2361 if (len > (MAX_PACKET_LENGTH - 5) / 2)
2362 len = (MAX_PACKET_LENGTH - 5) / 2;
2363 if (len < total_len - addr) {
2364 buf[0] = 'm';
2365 len = memtox(buf + 1, xml + addr, len);
2366 } else {
2367 buf[0] = 'l';
2368 len = memtox(buf + 1, xml + addr, total_len - addr);
2369 }
2370 put_packet_binary(s, buf, len + 1);
2371 break;
2372 }
2373 #endif
2374 /* Unrecognised 'q' command. */
2375 goto unknown_command;
2376
2377 default:
2378 unknown_command:
2379 /* put empty packet */
2380 buf[0] = '\0';
2381 put_packet(s, buf);
2382 break;
2383 }
2384 return RS_IDLE;
2385 }
2386
2387 void gdb_set_stop_cpu(CPUArchState *env)
2388 {
2389 gdbserver_state->c_cpu = env;
2390 gdbserver_state->g_cpu = env;
2391 }
2392
2393 #ifndef CONFIG_USER_ONLY
2394 static void gdb_vm_state_change(void *opaque, int running, RunState state)
2395 {
2396 GDBState *s = gdbserver_state;
2397 CPUArchState *env = s->c_cpu;
2398 char buf[256];
2399 const char *type;
2400 int ret;
2401
2402 if (running || s->state == RS_INACTIVE) {
2403 return;
2404 }
2405 /* Is there a GDB syscall waiting to be sent? */
2406 if (s->current_syscall_cb) {
2407 put_packet(s, s->syscall_buf);
2408 return;
2409 }
2410 switch (state) {
2411 case RUN_STATE_DEBUG:
2412 if (env->watchpoint_hit) {
2413 switch (env->watchpoint_hit->flags & BP_MEM_ACCESS) {
2414 case BP_MEM_READ:
2415 type = "r";
2416 break;
2417 case BP_MEM_ACCESS:
2418 type = "a";
2419 break;
2420 default:
2421 type = "";
2422 break;
2423 }
2424 snprintf(buf, sizeof(buf),
2425 "T%02xthread:%02x;%swatch:" TARGET_FMT_lx ";",
2426 GDB_SIGNAL_TRAP, gdb_id(env), type,
2427 env->watchpoint_hit->vaddr);
2428 env->watchpoint_hit = NULL;
2429 goto send_packet;
2430 }
2431 tb_flush(env);
2432 ret = GDB_SIGNAL_TRAP;
2433 break;
2434 case RUN_STATE_PAUSED:
2435 ret = GDB_SIGNAL_INT;
2436 break;
2437 case RUN_STATE_SHUTDOWN:
2438 ret = GDB_SIGNAL_QUIT;
2439 break;
2440 case RUN_STATE_IO_ERROR:
2441 ret = GDB_SIGNAL_IO;
2442 break;
2443 case RUN_STATE_WATCHDOG:
2444 ret = GDB_SIGNAL_ALRM;
2445 break;
2446 case RUN_STATE_INTERNAL_ERROR:
2447 ret = GDB_SIGNAL_ABRT;
2448 break;
2449 case RUN_STATE_SAVE_VM:
2450 case RUN_STATE_RESTORE_VM:
2451 return;
2452 case RUN_STATE_FINISH_MIGRATE:
2453 ret = GDB_SIGNAL_XCPU;
2454 break;
2455 default:
2456 ret = GDB_SIGNAL_UNKNOWN;
2457 break;
2458 }
2459 snprintf(buf, sizeof(buf), "T%02xthread:%02x;", ret, gdb_id(env));
2460
2461 send_packet:
2462 put_packet(s, buf);
2463
2464 /* disable single step if it was enabled */
2465 cpu_single_step(env, 0);
2466 }
2467 #endif
2468
2469 /* Send a gdb syscall request.
2470 This accepts limited printf-style format specifiers, specifically:
2471 %x - target_ulong argument printed in hex.
2472 %lx - 64-bit argument printed in hex.
2473 %s - string pointer (target_ulong) and length (int) pair. */
2474 void gdb_do_syscall(gdb_syscall_complete_cb cb, const char *fmt, ...)
2475 {
2476 va_list va;
2477 char *p;
2478 char *p_end;
2479 target_ulong addr;
2480 uint64_t i64;
2481 GDBState *s;
2482
2483 s = gdbserver_state;
2484 if (!s)
2485 return;
2486 s->current_syscall_cb = cb;
2487 #ifndef CONFIG_USER_ONLY
2488 vm_stop(RUN_STATE_DEBUG);
2489 #endif
2490 va_start(va, fmt);
2491 p = s->syscall_buf;
2492 p_end = &s->syscall_buf[sizeof(s->syscall_buf)];
2493 *(p++) = 'F';
2494 while (*fmt) {
2495 if (*fmt == '%') {
2496 fmt++;
2497 switch (*fmt++) {
2498 case 'x':
2499 addr = va_arg(va, target_ulong);
2500 p += snprintf(p, p_end - p, TARGET_FMT_lx, addr);
2501 break;
2502 case 'l':
2503 if (*(fmt++) != 'x')
2504 goto bad_format;
2505 i64 = va_arg(va, uint64_t);
2506 p += snprintf(p, p_end - p, "%" PRIx64, i64);
2507 break;
2508 case 's':
2509 addr = va_arg(va, target_ulong);
2510 p += snprintf(p, p_end - p, TARGET_FMT_lx "/%x",
2511 addr, va_arg(va, int));
2512 break;
2513 default:
2514 bad_format:
2515 fprintf(stderr, "gdbstub: Bad syscall format string '%s'\n",
2516 fmt - 1);
2517 break;
2518 }
2519 } else {
2520 *(p++) = *(fmt++);
2521 }
2522 }
2523 *p = 0;
2524 va_end(va);
2525 #ifdef CONFIG_USER_ONLY
2526 put_packet(s, s->syscall_buf);
2527 gdb_handlesig(s->c_cpu, 0);
2528 #else
2529 /* In this case wait to send the syscall packet until notification that
2530 the CPU has stopped. This must be done because if the packet is sent
2531 now the reply from the syscall request could be received while the CPU
2532 is still in the running state, which can cause packets to be dropped
2533 and state transition 'T' packets to be sent while the syscall is still
2534 being processed. */
2535 cpu_exit(s->c_cpu);
2536 #endif
2537 }
2538
2539 static void gdb_read_byte(GDBState *s, int ch)
2540 {
2541 int i, csum;
2542 uint8_t reply;
2543
2544 #ifndef CONFIG_USER_ONLY
2545 if (s->last_packet_len) {
2546 /* Waiting for a response to the last packet. If we see the start
2547 of a new command then abandon the previous response. */
2548 if (ch == '-') {
2549 #ifdef DEBUG_GDB
2550 printf("Got NACK, retransmitting\n");
2551 #endif
2552 put_buffer(s, (uint8_t *)s->last_packet, s->last_packet_len);
2553 }
2554 #ifdef DEBUG_GDB
2555 else if (ch == '+')
2556 printf("Got ACK\n");
2557 else
2558 printf("Got '%c' when expecting ACK/NACK\n", ch);
2559 #endif
2560 if (ch == '+' || ch == '$')
2561 s->last_packet_len = 0;
2562 if (ch != '$')
2563 return;
2564 }
2565 if (runstate_is_running()) {
2566 /* when the CPU is running, we cannot do anything except stop
2567 it when receiving a char */
2568 vm_stop(RUN_STATE_PAUSED);
2569 } else
2570 #endif
2571 {
2572 switch(s->state) {
2573 case RS_IDLE:
2574 if (ch == '$') {
2575 s->line_buf_index = 0;
2576 s->state = RS_GETLINE;
2577 }
2578 break;
2579 case RS_GETLINE:
2580 if (ch == '#') {
2581 s->state = RS_CHKSUM1;
2582 } else if (s->line_buf_index >= sizeof(s->line_buf) - 1) {
2583 s->state = RS_IDLE;
2584 } else {
2585 s->line_buf[s->line_buf_index++] = ch;
2586 }
2587 break;
2588 case RS_CHKSUM1:
2589 s->line_buf[s->line_buf_index] = '\0';
2590 s->line_csum = fromhex(ch) << 4;
2591 s->state = RS_CHKSUM2;
2592 break;
2593 case RS_CHKSUM2:
2594 s->line_csum |= fromhex(ch);
2595 csum = 0;
2596 for(i = 0; i < s->line_buf_index; i++) {
2597 csum += s->line_buf[i];
2598 }
2599 if (s->line_csum != (csum & 0xff)) {
2600 reply = '-';
2601 put_buffer(s, &reply, 1);
2602 s->state = RS_IDLE;
2603 } else {
2604 reply = '+';
2605 put_buffer(s, &reply, 1);
2606 s->state = gdb_handle_packet(s, s->line_buf);
2607 }
2608 break;
2609 default:
2610 abort();
2611 }
2612 }
2613 }
2614
2615 /* Tell the remote gdb that the process has exited. */
2616 void gdb_exit(CPUArchState *env, int code)
2617 {
2618 GDBState *s;
2619 char buf[4];
2620
2621 s = gdbserver_state;
2622 if (!s) {
2623 return;
2624 }
2625 #ifdef CONFIG_USER_ONLY
2626 if (gdbserver_fd < 0 || s->fd < 0) {
2627 return;
2628 }
2629 #endif
2630
2631 snprintf(buf, sizeof(buf), "W%02x", (uint8_t)code);
2632 put_packet(s, buf);
2633
2634 #ifndef CONFIG_USER_ONLY
2635 if (s->chr) {
2636 qemu_chr_delete(s->chr);
2637 }
2638 #endif
2639 }
2640
2641 #ifdef CONFIG_USER_ONLY
2642 int
2643 gdb_queuesig (void)
2644 {
2645 GDBState *s;
2646
2647 s = gdbserver_state;
2648
2649 if (gdbserver_fd < 0 || s->fd < 0)
2650 return 0;
2651 else
2652 return 1;
2653 }
2654
2655 int
2656 gdb_handlesig (CPUArchState *env, int sig)
2657 {
2658 GDBState *s;
2659 char buf[256];
2660 int n;
2661
2662 s = gdbserver_state;
2663 if (gdbserver_fd < 0 || s->fd < 0)
2664 return sig;
2665
2666 /* disable single step if it was enabled */
2667 cpu_single_step(env, 0);
2668 tb_flush(env);
2669
2670 if (sig != 0)
2671 {
2672 snprintf(buf, sizeof(buf), "S%02x", target_signal_to_gdb (sig));
2673 put_packet(s, buf);
2674 }
2675 /* put_packet() might have detected that the peer terminated the
2676 connection. */
2677 if (s->fd < 0)
2678 return sig;
2679
2680 sig = 0;
2681 s->state = RS_IDLE;
2682 s->running_state = 0;
2683 while (s->running_state == 0) {
2684 n = read (s->fd, buf, 256);
2685 if (n > 0)
2686 {
2687 int i;
2688
2689 for (i = 0; i < n; i++)
2690 gdb_read_byte (s, buf[i]);
2691 }
2692 else if (n == 0 || errno != EAGAIN)
2693 {
2694 /* XXX: Connection closed. Should probably wait for another
2695 connection before continuing. */
2696 return sig;
2697 }
2698 }
2699 sig = s->signal;
2700 s->signal = 0;
2701 return sig;
2702 }
2703
2704 /* Tell the remote gdb that the process has exited due to SIG. */
2705 void gdb_signalled(CPUArchState *env, int sig)
2706 {
2707 GDBState *s;
2708 char buf[4];
2709
2710 s = gdbserver_state;
2711 if (gdbserver_fd < 0 || s->fd < 0)
2712 return;
2713
2714 snprintf(buf, sizeof(buf), "X%02x", target_signal_to_gdb (sig));
2715 put_packet(s, buf);
2716 }
2717
2718 static void gdb_accept(void)
2719 {
2720 GDBState *s;
2721 struct sockaddr_in sockaddr;
2722 socklen_t len;
2723 int val, fd;
2724
2725 for(;;) {
2726 len = sizeof(sockaddr);
2727 fd = accept(gdbserver_fd, (struct sockaddr *)&sockaddr, &len);
2728 if (fd < 0 && errno != EINTR) {
2729 perror("accept");
2730 return;
2731 } else if (fd >= 0) {
2732 #ifndef _WIN32
2733 fcntl(fd, F_SETFD, FD_CLOEXEC);
2734 #endif
2735 break;
2736 }
2737 }
2738
2739 /* set short latency */
2740 val = 1;
2741 setsockopt(fd, IPPROTO_TCP, TCP_NODELAY, (char *)&val, sizeof(val));
2742
2743 s = g_malloc0(sizeof(GDBState));
2744 s->c_cpu = first_cpu;
2745 s->g_cpu = first_cpu;
2746 s->fd = fd;
2747 gdb_has_xml = 0;
2748
2749 gdbserver_state = s;
2750
2751 fcntl(fd, F_SETFL, O_NONBLOCK);
2752 }
2753
2754 static int gdbserver_open(int port)
2755 {
2756 struct sockaddr_in sockaddr;
2757 int fd, val, ret;
2758
2759 fd = socket(PF_INET, SOCK_STREAM, 0);
2760 if (fd < 0) {
2761 perror("socket");
2762 return -1;
2763 }
2764 #ifndef _WIN32
2765 fcntl(fd, F_SETFD, FD_CLOEXEC);
2766 #endif
2767
2768 /* allow fast reuse */
2769 val = 1;
2770 setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, (char *)&val, sizeof(val));
2771
2772 sockaddr.sin_family = AF_INET;
2773 sockaddr.sin_port = htons(port);
2774 sockaddr.sin_addr.s_addr = 0;
2775 ret = bind(fd, (struct sockaddr *)&sockaddr, sizeof(sockaddr));
2776 if (ret < 0) {
2777 perror("bind");
2778 close(fd);
2779 return -1;
2780 }
2781 ret = listen(fd, 0);
2782 if (ret < 0) {
2783 perror("listen");
2784 close(fd);
2785 return -1;
2786 }
2787 return fd;
2788 }
2789
2790 int gdbserver_start(int port)
2791 {
2792 gdbserver_fd = gdbserver_open(port);
2793 if (gdbserver_fd < 0)
2794 return -1;
2795 /* accept connections */
2796 gdb_accept();
2797 return 0;
2798 }
2799
2800 /* Disable gdb stub for child processes. */
2801 void gdbserver_fork(CPUArchState *env)
2802 {
2803 GDBState *s = gdbserver_state;
2804 if (gdbserver_fd < 0 || s->fd < 0)
2805 return;
2806 close(s->fd);
2807 s->fd = -1;
2808 cpu_breakpoint_remove_all(env, BP_GDB);
2809 cpu_watchpoint_remove_all(env, BP_GDB);
2810 }
2811 #else
2812 static int gdb_chr_can_receive(void *opaque)
2813 {
2814 /* We can handle an arbitrarily large amount of data.
2815 Pick the maximum packet size, which is as good as anything. */
2816 return MAX_PACKET_LENGTH;
2817 }
2818
2819 static void gdb_chr_receive(void *opaque, const uint8_t *buf, int size)
2820 {
2821 int i;
2822
2823 for (i = 0; i < size; i++) {
2824 gdb_read_byte(gdbserver_state, buf[i]);
2825 }
2826 }
2827
2828 static void gdb_chr_event(void *opaque, int event)
2829 {
2830 switch (event) {
2831 case CHR_EVENT_OPENED:
2832 vm_stop(RUN_STATE_PAUSED);
2833 gdb_has_xml = 0;
2834 break;
2835 default:
2836 break;
2837 }
2838 }
2839
2840 static void gdb_monitor_output(GDBState *s, const char *msg, int len)
2841 {
2842 char buf[MAX_PACKET_LENGTH];
2843
2844 buf[0] = 'O';
2845 if (len > (MAX_PACKET_LENGTH/2) - 1)
2846 len = (MAX_PACKET_LENGTH/2) - 1;
2847 memtohex(buf + 1, (uint8_t *)msg, len);
2848 put_packet(s, buf);
2849 }
2850
2851 static int gdb_monitor_write(CharDriverState *chr, const uint8_t *buf, int len)
2852 {
2853 const char *p = (const char *)buf;
2854 int max_sz;
2855
2856 max_sz = (sizeof(gdbserver_state->last_packet) - 2) / 2;
2857 for (;;) {
2858 if (len <= max_sz) {
2859 gdb_monitor_output(gdbserver_state, p, len);
2860 break;
2861 }
2862 gdb_monitor_output(gdbserver_state, p, max_sz);
2863 p += max_sz;
2864 len -= max_sz;
2865 }
2866 return len;
2867 }
2868
2869 #ifndef _WIN32
2870 static void gdb_sigterm_handler(int signal)
2871 {
2872 if (runstate_is_running()) {
2873 vm_stop(RUN_STATE_PAUSED);
2874 }
2875 }
2876 #endif
2877
2878 int gdbserver_start(const char *device)
2879 {
2880 GDBState *s;
2881 char gdbstub_device_name[128];
2882 CharDriverState *chr = NULL;
2883 CharDriverState *mon_chr;
2884
2885 if (!device)
2886 return -1;
2887 if (strcmp(device, "none") != 0) {
2888 if (strstart(device, "tcp:", NULL)) {
2889 /* enforce required TCP attributes */
2890 snprintf(gdbstub_device_name, sizeof(gdbstub_device_name),
2891 "%s,nowait,nodelay,server", device);
2892 device = gdbstub_device_name;
2893 }
2894 #ifndef _WIN32
2895 else if (strcmp(device, "stdio") == 0) {
2896 struct sigaction act;
2897
2898 memset(&act, 0, sizeof(act));
2899 act.sa_handler = gdb_sigterm_handler;
2900 sigaction(SIGINT, &act, NULL);
2901 }
2902 #endif
2903 chr = qemu_chr_new("gdb", device, NULL);
2904 if (!chr)
2905 return -1;
2906
2907 qemu_chr_add_handlers(chr, gdb_chr_can_receive, gdb_chr_receive,
2908 gdb_chr_event, NULL);
2909 }
2910
2911 s = gdbserver_state;
2912 if (!s) {
2913 s = g_malloc0(sizeof(GDBState));
2914 gdbserver_state = s;
2915
2916 qemu_add_vm_change_state_handler(gdb_vm_state_change, NULL);
2917
2918 /* Initialize a monitor terminal for gdb */
2919 mon_chr = g_malloc0(sizeof(*mon_chr));
2920 mon_chr->chr_write = gdb_monitor_write;
2921 monitor_init(mon_chr, 0);
2922 } else {
2923 if (s->chr)
2924 qemu_chr_delete(s->chr);
2925 mon_chr = s->mon_chr;
2926 memset(s, 0, sizeof(GDBState));
2927 }
2928 s->c_cpu = first_cpu;
2929 s->g_cpu = first_cpu;
2930 s->chr = chr;
2931 s->state = chr ? RS_IDLE : RS_INACTIVE;
2932 s->mon_chr = mon_chr;
2933 s->current_syscall_cb = NULL;
2934
2935 return 0;
2936 }
2937 #endif