]> git.proxmox.com Git - qemu.git/blob - gdbstub.c
cpu: Introduce CPUState::gdb_num_regs and CPUClass::gdb_num_core_regs
[qemu.git] / gdbstub.c
1 /*
2 * gdb server stub
3 *
4 * Copyright (c) 2003-2005 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #include "config.h"
20 #include "qemu-common.h"
21 #ifdef CONFIG_USER_ONLY
22 #include <stdlib.h>
23 #include <stdio.h>
24 #include <stdarg.h>
25 #include <string.h>
26 #include <errno.h>
27 #include <unistd.h>
28 #include <fcntl.h>
29
30 #include "qemu.h"
31 #else
32 #include "monitor/monitor.h"
33 #include "sysemu/char.h"
34 #include "sysemu/sysemu.h"
35 #include "exec/gdbstub.h"
36 #endif
37
38 #define MAX_PACKET_LENGTH 4096
39
40 #include "cpu.h"
41 #include "qemu/sockets.h"
42 #include "sysemu/kvm.h"
43 #include "qemu/bitops.h"
44
45 static inline int target_memory_rw_debug(CPUState *cpu, target_ulong addr,
46 uint8_t *buf, int len, bool is_write)
47 {
48 CPUClass *cc = CPU_GET_CLASS(cpu);
49
50 if (cc->memory_rw_debug) {
51 return cc->memory_rw_debug(cpu, addr, buf, len, is_write);
52 }
53 return cpu_memory_rw_debug(cpu, addr, buf, len, is_write);
54 }
55
56 enum {
57 GDB_SIGNAL_0 = 0,
58 GDB_SIGNAL_INT = 2,
59 GDB_SIGNAL_QUIT = 3,
60 GDB_SIGNAL_TRAP = 5,
61 GDB_SIGNAL_ABRT = 6,
62 GDB_SIGNAL_ALRM = 14,
63 GDB_SIGNAL_IO = 23,
64 GDB_SIGNAL_XCPU = 24,
65 GDB_SIGNAL_UNKNOWN = 143
66 };
67
68 #ifdef CONFIG_USER_ONLY
69
70 /* Map target signal numbers to GDB protocol signal numbers and vice
71 * versa. For user emulation's currently supported systems, we can
72 * assume most signals are defined.
73 */
74
75 static int gdb_signal_table[] = {
76 0,
77 TARGET_SIGHUP,
78 TARGET_SIGINT,
79 TARGET_SIGQUIT,
80 TARGET_SIGILL,
81 TARGET_SIGTRAP,
82 TARGET_SIGABRT,
83 -1, /* SIGEMT */
84 TARGET_SIGFPE,
85 TARGET_SIGKILL,
86 TARGET_SIGBUS,
87 TARGET_SIGSEGV,
88 TARGET_SIGSYS,
89 TARGET_SIGPIPE,
90 TARGET_SIGALRM,
91 TARGET_SIGTERM,
92 TARGET_SIGURG,
93 TARGET_SIGSTOP,
94 TARGET_SIGTSTP,
95 TARGET_SIGCONT,
96 TARGET_SIGCHLD,
97 TARGET_SIGTTIN,
98 TARGET_SIGTTOU,
99 TARGET_SIGIO,
100 TARGET_SIGXCPU,
101 TARGET_SIGXFSZ,
102 TARGET_SIGVTALRM,
103 TARGET_SIGPROF,
104 TARGET_SIGWINCH,
105 -1, /* SIGLOST */
106 TARGET_SIGUSR1,
107 TARGET_SIGUSR2,
108 #ifdef TARGET_SIGPWR
109 TARGET_SIGPWR,
110 #else
111 -1,
112 #endif
113 -1, /* SIGPOLL */
114 -1,
115 -1,
116 -1,
117 -1,
118 -1,
119 -1,
120 -1,
121 -1,
122 -1,
123 -1,
124 -1,
125 #ifdef __SIGRTMIN
126 __SIGRTMIN + 1,
127 __SIGRTMIN + 2,
128 __SIGRTMIN + 3,
129 __SIGRTMIN + 4,
130 __SIGRTMIN + 5,
131 __SIGRTMIN + 6,
132 __SIGRTMIN + 7,
133 __SIGRTMIN + 8,
134 __SIGRTMIN + 9,
135 __SIGRTMIN + 10,
136 __SIGRTMIN + 11,
137 __SIGRTMIN + 12,
138 __SIGRTMIN + 13,
139 __SIGRTMIN + 14,
140 __SIGRTMIN + 15,
141 __SIGRTMIN + 16,
142 __SIGRTMIN + 17,
143 __SIGRTMIN + 18,
144 __SIGRTMIN + 19,
145 __SIGRTMIN + 20,
146 __SIGRTMIN + 21,
147 __SIGRTMIN + 22,
148 __SIGRTMIN + 23,
149 __SIGRTMIN + 24,
150 __SIGRTMIN + 25,
151 __SIGRTMIN + 26,
152 __SIGRTMIN + 27,
153 __SIGRTMIN + 28,
154 __SIGRTMIN + 29,
155 __SIGRTMIN + 30,
156 __SIGRTMIN + 31,
157 -1, /* SIGCANCEL */
158 __SIGRTMIN,
159 __SIGRTMIN + 32,
160 __SIGRTMIN + 33,
161 __SIGRTMIN + 34,
162 __SIGRTMIN + 35,
163 __SIGRTMIN + 36,
164 __SIGRTMIN + 37,
165 __SIGRTMIN + 38,
166 __SIGRTMIN + 39,
167 __SIGRTMIN + 40,
168 __SIGRTMIN + 41,
169 __SIGRTMIN + 42,
170 __SIGRTMIN + 43,
171 __SIGRTMIN + 44,
172 __SIGRTMIN + 45,
173 __SIGRTMIN + 46,
174 __SIGRTMIN + 47,
175 __SIGRTMIN + 48,
176 __SIGRTMIN + 49,
177 __SIGRTMIN + 50,
178 __SIGRTMIN + 51,
179 __SIGRTMIN + 52,
180 __SIGRTMIN + 53,
181 __SIGRTMIN + 54,
182 __SIGRTMIN + 55,
183 __SIGRTMIN + 56,
184 __SIGRTMIN + 57,
185 __SIGRTMIN + 58,
186 __SIGRTMIN + 59,
187 __SIGRTMIN + 60,
188 __SIGRTMIN + 61,
189 __SIGRTMIN + 62,
190 __SIGRTMIN + 63,
191 __SIGRTMIN + 64,
192 __SIGRTMIN + 65,
193 __SIGRTMIN + 66,
194 __SIGRTMIN + 67,
195 __SIGRTMIN + 68,
196 __SIGRTMIN + 69,
197 __SIGRTMIN + 70,
198 __SIGRTMIN + 71,
199 __SIGRTMIN + 72,
200 __SIGRTMIN + 73,
201 __SIGRTMIN + 74,
202 __SIGRTMIN + 75,
203 __SIGRTMIN + 76,
204 __SIGRTMIN + 77,
205 __SIGRTMIN + 78,
206 __SIGRTMIN + 79,
207 __SIGRTMIN + 80,
208 __SIGRTMIN + 81,
209 __SIGRTMIN + 82,
210 __SIGRTMIN + 83,
211 __SIGRTMIN + 84,
212 __SIGRTMIN + 85,
213 __SIGRTMIN + 86,
214 __SIGRTMIN + 87,
215 __SIGRTMIN + 88,
216 __SIGRTMIN + 89,
217 __SIGRTMIN + 90,
218 __SIGRTMIN + 91,
219 __SIGRTMIN + 92,
220 __SIGRTMIN + 93,
221 __SIGRTMIN + 94,
222 __SIGRTMIN + 95,
223 -1, /* SIGINFO */
224 -1, /* UNKNOWN */
225 -1, /* DEFAULT */
226 -1,
227 -1,
228 -1,
229 -1,
230 -1,
231 -1
232 #endif
233 };
234 #else
235 /* In system mode we only need SIGINT and SIGTRAP; other signals
236 are not yet supported. */
237
238 enum {
239 TARGET_SIGINT = 2,
240 TARGET_SIGTRAP = 5
241 };
242
243 static int gdb_signal_table[] = {
244 -1,
245 -1,
246 TARGET_SIGINT,
247 -1,
248 -1,
249 TARGET_SIGTRAP
250 };
251 #endif
252
253 #ifdef CONFIG_USER_ONLY
254 static int target_signal_to_gdb (int sig)
255 {
256 int i;
257 for (i = 0; i < ARRAY_SIZE (gdb_signal_table); i++)
258 if (gdb_signal_table[i] == sig)
259 return i;
260 return GDB_SIGNAL_UNKNOWN;
261 }
262 #endif
263
264 static int gdb_signal_to_target (int sig)
265 {
266 if (sig < ARRAY_SIZE (gdb_signal_table))
267 return gdb_signal_table[sig];
268 else
269 return -1;
270 }
271
272 //#define DEBUG_GDB
273
274 typedef struct GDBRegisterState {
275 int base_reg;
276 int num_regs;
277 gdb_reg_cb get_reg;
278 gdb_reg_cb set_reg;
279 const char *xml;
280 struct GDBRegisterState *next;
281 } GDBRegisterState;
282
283 enum RSState {
284 RS_INACTIVE,
285 RS_IDLE,
286 RS_GETLINE,
287 RS_CHKSUM1,
288 RS_CHKSUM2,
289 };
290 typedef struct GDBState {
291 CPUState *c_cpu; /* current CPU for step/continue ops */
292 CPUState *g_cpu; /* current CPU for other ops */
293 CPUState *query_cpu; /* for q{f|s}ThreadInfo */
294 enum RSState state; /* parsing state */
295 char line_buf[MAX_PACKET_LENGTH];
296 int line_buf_index;
297 int line_csum;
298 uint8_t last_packet[MAX_PACKET_LENGTH + 4];
299 int last_packet_len;
300 int signal;
301 #ifdef CONFIG_USER_ONLY
302 int fd;
303 int running_state;
304 #else
305 CharDriverState *chr;
306 CharDriverState *mon_chr;
307 #endif
308 char syscall_buf[256];
309 gdb_syscall_complete_cb current_syscall_cb;
310 } GDBState;
311
312 /* By default use no IRQs and no timers while single stepping so as to
313 * make single stepping like an ICE HW step.
314 */
315 static int sstep_flags = SSTEP_ENABLE|SSTEP_NOIRQ|SSTEP_NOTIMER;
316
317 static GDBState *gdbserver_state;
318
319 /* This is an ugly hack to cope with both new and old gdb.
320 If gdb sends qXfer:features:read then assume we're talking to a newish
321 gdb that understands target descriptions. */
322 static int gdb_has_xml;
323
324 #ifdef CONFIG_USER_ONLY
325 /* XXX: This is not thread safe. Do we care? */
326 static int gdbserver_fd = -1;
327
328 static int get_char(GDBState *s)
329 {
330 uint8_t ch;
331 int ret;
332
333 for(;;) {
334 ret = qemu_recv(s->fd, &ch, 1, 0);
335 if (ret < 0) {
336 if (errno == ECONNRESET)
337 s->fd = -1;
338 if (errno != EINTR && errno != EAGAIN)
339 return -1;
340 } else if (ret == 0) {
341 close(s->fd);
342 s->fd = -1;
343 return -1;
344 } else {
345 break;
346 }
347 }
348 return ch;
349 }
350 #endif
351
352 static enum {
353 GDB_SYS_UNKNOWN,
354 GDB_SYS_ENABLED,
355 GDB_SYS_DISABLED,
356 } gdb_syscall_mode;
357
358 /* If gdb is connected when the first semihosting syscall occurs then use
359 remote gdb syscalls. Otherwise use native file IO. */
360 int use_gdb_syscalls(void)
361 {
362 if (gdb_syscall_mode == GDB_SYS_UNKNOWN) {
363 gdb_syscall_mode = (gdbserver_state ? GDB_SYS_ENABLED
364 : GDB_SYS_DISABLED);
365 }
366 return gdb_syscall_mode == GDB_SYS_ENABLED;
367 }
368
369 /* Resume execution. */
370 static inline void gdb_continue(GDBState *s)
371 {
372 #ifdef CONFIG_USER_ONLY
373 s->running_state = 1;
374 #else
375 if (runstate_check(RUN_STATE_GUEST_PANICKED)) {
376 runstate_set(RUN_STATE_DEBUG);
377 }
378 if (!runstate_needs_reset()) {
379 vm_start();
380 }
381 #endif
382 }
383
384 static void put_buffer(GDBState *s, const uint8_t *buf, int len)
385 {
386 #ifdef CONFIG_USER_ONLY
387 int ret;
388
389 while (len > 0) {
390 ret = send(s->fd, buf, len, 0);
391 if (ret < 0) {
392 if (errno != EINTR && errno != EAGAIN)
393 return;
394 } else {
395 buf += ret;
396 len -= ret;
397 }
398 }
399 #else
400 qemu_chr_fe_write(s->chr, buf, len);
401 #endif
402 }
403
404 static inline int fromhex(int v)
405 {
406 if (v >= '0' && v <= '9')
407 return v - '0';
408 else if (v >= 'A' && v <= 'F')
409 return v - 'A' + 10;
410 else if (v >= 'a' && v <= 'f')
411 return v - 'a' + 10;
412 else
413 return 0;
414 }
415
416 static inline int tohex(int v)
417 {
418 if (v < 10)
419 return v + '0';
420 else
421 return v - 10 + 'a';
422 }
423
424 static void memtohex(char *buf, const uint8_t *mem, int len)
425 {
426 int i, c;
427 char *q;
428 q = buf;
429 for(i = 0; i < len; i++) {
430 c = mem[i];
431 *q++ = tohex(c >> 4);
432 *q++ = tohex(c & 0xf);
433 }
434 *q = '\0';
435 }
436
437 static void hextomem(uint8_t *mem, const char *buf, int len)
438 {
439 int i;
440
441 for(i = 0; i < len; i++) {
442 mem[i] = (fromhex(buf[0]) << 4) | fromhex(buf[1]);
443 buf += 2;
444 }
445 }
446
447 /* return -1 if error, 0 if OK */
448 static int put_packet_binary(GDBState *s, const char *buf, int len)
449 {
450 int csum, i;
451 uint8_t *p;
452
453 for(;;) {
454 p = s->last_packet;
455 *(p++) = '$';
456 memcpy(p, buf, len);
457 p += len;
458 csum = 0;
459 for(i = 0; i < len; i++) {
460 csum += buf[i];
461 }
462 *(p++) = '#';
463 *(p++) = tohex((csum >> 4) & 0xf);
464 *(p++) = tohex((csum) & 0xf);
465
466 s->last_packet_len = p - s->last_packet;
467 put_buffer(s, (uint8_t *)s->last_packet, s->last_packet_len);
468
469 #ifdef CONFIG_USER_ONLY
470 i = get_char(s);
471 if (i < 0)
472 return -1;
473 if (i == '+')
474 break;
475 #else
476 break;
477 #endif
478 }
479 return 0;
480 }
481
482 /* return -1 if error, 0 if OK */
483 static int put_packet(GDBState *s, const char *buf)
484 {
485 #ifdef DEBUG_GDB
486 printf("reply='%s'\n", buf);
487 #endif
488
489 return put_packet_binary(s, buf, strlen(buf));
490 }
491
492 /* The GDB remote protocol transfers values in target byte order. This means
493 we can use the raw memory access routines to access the value buffer.
494 Conveniently, these also handle the case where the buffer is mis-aligned.
495 */
496 #define GET_REG8(val) do { \
497 stb_p(mem_buf, val); \
498 return 1; \
499 } while(0)
500 #define GET_REG16(val) do { \
501 stw_p(mem_buf, val); \
502 return 2; \
503 } while(0)
504 #define GET_REG32(val) do { \
505 stl_p(mem_buf, val); \
506 return 4; \
507 } while(0)
508 #define GET_REG64(val) do { \
509 stq_p(mem_buf, val); \
510 return 8; \
511 } while(0)
512
513 #if TARGET_LONG_BITS == 64
514 #define GET_REGL(val) GET_REG64(val)
515 #define ldtul_p(addr) ldq_p(addr)
516 #else
517 #define GET_REGL(val) GET_REG32(val)
518 #define ldtul_p(addr) ldl_p(addr)
519 #endif
520
521 #if defined(TARGET_I386)
522
523 #ifdef TARGET_X86_64
524 static const int gpr_map[16] = {
525 R_EAX, R_EBX, R_ECX, R_EDX, R_ESI, R_EDI, R_EBP, R_ESP,
526 8, 9, 10, 11, 12, 13, 14, 15
527 };
528 #else
529 #define gpr_map gpr_map32
530 #endif
531 static const int gpr_map32[8] = { 0, 1, 2, 3, 4, 5, 6, 7 };
532
533 #define IDX_IP_REG CPU_NB_REGS
534 #define IDX_FLAGS_REG (IDX_IP_REG + 1)
535 #define IDX_SEG_REGS (IDX_FLAGS_REG + 1)
536 #define IDX_FP_REGS (IDX_SEG_REGS + 6)
537 #define IDX_XMM_REGS (IDX_FP_REGS + 16)
538 #define IDX_MXCSR_REG (IDX_XMM_REGS + CPU_NB_REGS)
539
540 static int cpu_gdb_read_register(CPUX86State *env, uint8_t *mem_buf, int n)
541 {
542 if (n < CPU_NB_REGS) {
543 if (TARGET_LONG_BITS == 64 && env->hflags & HF_CS64_MASK) {
544 GET_REG64(env->regs[gpr_map[n]]);
545 } else if (n < CPU_NB_REGS32) {
546 GET_REG32(env->regs[gpr_map32[n]]);
547 }
548 } else if (n >= IDX_FP_REGS && n < IDX_FP_REGS + 8) {
549 #ifdef USE_X86LDOUBLE
550 /* FIXME: byteswap float values - after fixing fpregs layout. */
551 memcpy(mem_buf, &env->fpregs[n - IDX_FP_REGS], 10);
552 #else
553 memset(mem_buf, 0, 10);
554 #endif
555 return 10;
556 } else if (n >= IDX_XMM_REGS && n < IDX_XMM_REGS + CPU_NB_REGS) {
557 n -= IDX_XMM_REGS;
558 if (n < CPU_NB_REGS32 ||
559 (TARGET_LONG_BITS == 64 && env->hflags & HF_CS64_MASK)) {
560 stq_p(mem_buf, env->xmm_regs[n].XMM_Q(0));
561 stq_p(mem_buf + 8, env->xmm_regs[n].XMM_Q(1));
562 return 16;
563 }
564 } else {
565 switch (n) {
566 case IDX_IP_REG:
567 if (TARGET_LONG_BITS == 64 && env->hflags & HF_CS64_MASK) {
568 GET_REG64(env->eip);
569 } else {
570 GET_REG32(env->eip);
571 }
572 case IDX_FLAGS_REG:
573 GET_REG32(env->eflags);
574
575 case IDX_SEG_REGS:
576 GET_REG32(env->segs[R_CS].selector);
577 case IDX_SEG_REGS + 1:
578 GET_REG32(env->segs[R_SS].selector);
579 case IDX_SEG_REGS + 2:
580 GET_REG32(env->segs[R_DS].selector);
581 case IDX_SEG_REGS + 3:
582 GET_REG32(env->segs[R_ES].selector);
583 case IDX_SEG_REGS + 4:
584 GET_REG32(env->segs[R_FS].selector);
585 case IDX_SEG_REGS + 5:
586 GET_REG32(env->segs[R_GS].selector);
587
588 case IDX_FP_REGS + 8:
589 GET_REG32(env->fpuc);
590 case IDX_FP_REGS + 9:
591 GET_REG32((env->fpus & ~0x3800) |
592 (env->fpstt & 0x7) << 11);
593 case IDX_FP_REGS + 10:
594 GET_REG32(0); /* ftag */
595 case IDX_FP_REGS + 11:
596 GET_REG32(0); /* fiseg */
597 case IDX_FP_REGS + 12:
598 GET_REG32(0); /* fioff */
599 case IDX_FP_REGS + 13:
600 GET_REG32(0); /* foseg */
601 case IDX_FP_REGS + 14:
602 GET_REG32(0); /* fooff */
603 case IDX_FP_REGS + 15:
604 GET_REG32(0); /* fop */
605
606 case IDX_MXCSR_REG:
607 GET_REG32(env->mxcsr);
608 }
609 }
610 return 0;
611 }
612
613 static int cpu_x86_gdb_load_seg(CPUX86State *env, int sreg, uint8_t *mem_buf)
614 {
615 uint16_t selector = ldl_p(mem_buf);
616
617 if (selector != env->segs[sreg].selector) {
618 #if defined(CONFIG_USER_ONLY)
619 cpu_x86_load_seg(env, sreg, selector);
620 #else
621 unsigned int limit, flags;
622 target_ulong base;
623
624 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
625 base = selector << 4;
626 limit = 0xffff;
627 flags = 0;
628 } else {
629 if (!cpu_x86_get_descr_debug(env, selector, &base, &limit,
630 &flags)) {
631 return 4;
632 }
633 }
634 cpu_x86_load_seg_cache(env, sreg, selector, base, limit, flags);
635 #endif
636 }
637 return 4;
638 }
639
640 static int cpu_gdb_write_register(CPUX86State *env, uint8_t *mem_buf, int n)
641 {
642 uint32_t tmp;
643
644 if (n < CPU_NB_REGS) {
645 if (TARGET_LONG_BITS == 64 && env->hflags & HF_CS64_MASK) {
646 env->regs[gpr_map[n]] = ldtul_p(mem_buf);
647 return sizeof(target_ulong);
648 } else if (n < CPU_NB_REGS32) {
649 n = gpr_map32[n];
650 env->regs[n] &= ~0xffffffffUL;
651 env->regs[n] |= (uint32_t)ldl_p(mem_buf);
652 return 4;
653 }
654 } else if (n >= IDX_FP_REGS && n < IDX_FP_REGS + 8) {
655 #ifdef USE_X86LDOUBLE
656 /* FIXME: byteswap float values - after fixing fpregs layout. */
657 memcpy(&env->fpregs[n - IDX_FP_REGS], mem_buf, 10);
658 #endif
659 return 10;
660 } else if (n >= IDX_XMM_REGS && n < IDX_XMM_REGS + CPU_NB_REGS) {
661 n -= IDX_XMM_REGS;
662 if (n < CPU_NB_REGS32 ||
663 (TARGET_LONG_BITS == 64 && env->hflags & HF_CS64_MASK)) {
664 env->xmm_regs[n].XMM_Q(0) = ldq_p(mem_buf);
665 env->xmm_regs[n].XMM_Q(1) = ldq_p(mem_buf + 8);
666 return 16;
667 }
668 } else {
669 switch (n) {
670 case IDX_IP_REG:
671 if (TARGET_LONG_BITS == 64 && env->hflags & HF_CS64_MASK) {
672 env->eip = ldq_p(mem_buf);
673 return 8;
674 } else {
675 env->eip &= ~0xffffffffUL;
676 env->eip |= (uint32_t)ldl_p(mem_buf);
677 return 4;
678 }
679 case IDX_FLAGS_REG:
680 env->eflags = ldl_p(mem_buf);
681 return 4;
682
683 case IDX_SEG_REGS:
684 return cpu_x86_gdb_load_seg(env, R_CS, mem_buf);
685 case IDX_SEG_REGS + 1:
686 return cpu_x86_gdb_load_seg(env, R_SS, mem_buf);
687 case IDX_SEG_REGS + 2:
688 return cpu_x86_gdb_load_seg(env, R_DS, mem_buf);
689 case IDX_SEG_REGS + 3:
690 return cpu_x86_gdb_load_seg(env, R_ES, mem_buf);
691 case IDX_SEG_REGS + 4:
692 return cpu_x86_gdb_load_seg(env, R_FS, mem_buf);
693 case IDX_SEG_REGS + 5:
694 return cpu_x86_gdb_load_seg(env, R_GS, mem_buf);
695
696 case IDX_FP_REGS + 8:
697 env->fpuc = ldl_p(mem_buf);
698 return 4;
699 case IDX_FP_REGS + 9:
700 tmp = ldl_p(mem_buf);
701 env->fpstt = (tmp >> 11) & 7;
702 env->fpus = tmp & ~0x3800;
703 return 4;
704 case IDX_FP_REGS + 10: /* ftag */
705 return 4;
706 case IDX_FP_REGS + 11: /* fiseg */
707 return 4;
708 case IDX_FP_REGS + 12: /* fioff */
709 return 4;
710 case IDX_FP_REGS + 13: /* foseg */
711 return 4;
712 case IDX_FP_REGS + 14: /* fooff */
713 return 4;
714 case IDX_FP_REGS + 15: /* fop */
715 return 4;
716
717 case IDX_MXCSR_REG:
718 env->mxcsr = ldl_p(mem_buf);
719 return 4;
720 }
721 }
722 /* Unrecognised register. */
723 return 0;
724 }
725
726 #elif defined (TARGET_PPC)
727
728 /* Old gdb always expects FP registers. Newer (xml-aware) gdb only
729 expects whatever the target description contains. Due to a
730 historical mishap the FP registers appear in between core integer
731 regs and PC, MSR, CR, and so forth. We hack round this by giving the
732 FP regs zero size when talking to a newer gdb. */
733 #if defined (TARGET_PPC64)
734 #define GDB_CORE_XML "power64-core.xml"
735 #else
736 #define GDB_CORE_XML "power-core.xml"
737 #endif
738
739 static int cpu_gdb_read_register(CPUPPCState *env, uint8_t *mem_buf, int n)
740 {
741 if (n < 32) {
742 /* gprs */
743 GET_REGL(env->gpr[n]);
744 } else if (n < 64) {
745 /* fprs */
746 if (gdb_has_xml) {
747 return 0;
748 }
749 stfq_p(mem_buf, env->fpr[n-32]);
750 return 8;
751 } else {
752 switch (n) {
753 case 64:
754 GET_REGL(env->nip);
755 case 65:
756 GET_REGL(env->msr);
757 case 66:
758 {
759 uint32_t cr = 0;
760 int i;
761 for (i = 0; i < 8; i++) {
762 cr |= env->crf[i] << (32 - ((i + 1) * 4));
763 }
764 GET_REG32(cr);
765 }
766 case 67:
767 GET_REGL(env->lr);
768 case 68:
769 GET_REGL(env->ctr);
770 case 69:
771 GET_REGL(env->xer);
772 case 70:
773 {
774 if (gdb_has_xml) {
775 return 0;
776 }
777 GET_REG32(env->fpscr);
778 }
779 }
780 }
781 return 0;
782 }
783
784 static int cpu_gdb_write_register(CPUPPCState *env, uint8_t *mem_buf, int n)
785 {
786 if (n < 32) {
787 /* gprs */
788 env->gpr[n] = ldtul_p(mem_buf);
789 return sizeof(target_ulong);
790 } else if (n < 64) {
791 /* fprs */
792 if (gdb_has_xml) {
793 return 0;
794 }
795 env->fpr[n-32] = ldfq_p(mem_buf);
796 return 8;
797 } else {
798 switch (n) {
799 case 64:
800 env->nip = ldtul_p(mem_buf);
801 return sizeof(target_ulong);
802 case 65:
803 ppc_store_msr(env, ldtul_p(mem_buf));
804 return sizeof(target_ulong);
805 case 66:
806 {
807 uint32_t cr = ldl_p(mem_buf);
808 int i;
809 for (i = 0; i < 8; i++) {
810 env->crf[i] = (cr >> (32 - ((i + 1) * 4))) & 0xF;
811 }
812 return 4;
813 }
814 case 67:
815 env->lr = ldtul_p(mem_buf);
816 return sizeof(target_ulong);
817 case 68:
818 env->ctr = ldtul_p(mem_buf);
819 return sizeof(target_ulong);
820 case 69:
821 env->xer = ldtul_p(mem_buf);
822 return sizeof(target_ulong);
823 case 70:
824 /* fpscr */
825 if (gdb_has_xml) {
826 return 0;
827 }
828 store_fpscr(env, ldtul_p(mem_buf), 0xffffffff);
829 return sizeof(target_ulong);
830 }
831 }
832 return 0;
833 }
834
835 #elif defined (TARGET_SPARC)
836
837 #ifdef TARGET_ABI32
838 #define GET_REGA(val) GET_REG32(val)
839 #else
840 #define GET_REGA(val) GET_REGL(val)
841 #endif
842
843 static int cpu_gdb_read_register(CPUSPARCState *env, uint8_t *mem_buf, int n)
844 {
845 if (n < 8) {
846 /* g0..g7 */
847 GET_REGA(env->gregs[n]);
848 }
849 if (n < 32) {
850 /* register window */
851 GET_REGA(env->regwptr[n - 8]);
852 }
853 #if defined(TARGET_ABI32) || !defined(TARGET_SPARC64)
854 if (n < 64) {
855 /* fprs */
856 if (n & 1) {
857 GET_REG32(env->fpr[(n - 32) / 2].l.lower);
858 } else {
859 GET_REG32(env->fpr[(n - 32) / 2].l.upper);
860 }
861 }
862 /* Y, PSR, WIM, TBR, PC, NPC, FPSR, CPSR */
863 switch (n) {
864 case 64:
865 GET_REGA(env->y);
866 case 65:
867 GET_REGA(cpu_get_psr(env));
868 case 66:
869 GET_REGA(env->wim);
870 case 67:
871 GET_REGA(env->tbr);
872 case 68:
873 GET_REGA(env->pc);
874 case 69:
875 GET_REGA(env->npc);
876 case 70:
877 GET_REGA(env->fsr);
878 case 71:
879 GET_REGA(0); /* csr */
880 default:
881 GET_REGA(0);
882 }
883 #else
884 if (n < 64) {
885 /* f0-f31 */
886 if (n & 1) {
887 GET_REG32(env->fpr[(n - 32) / 2].l.lower);
888 } else {
889 GET_REG32(env->fpr[(n - 32) / 2].l.upper);
890 }
891 }
892 if (n < 80) {
893 /* f32-f62 (double width, even numbers only) */
894 GET_REG64(env->fpr[(n - 32) / 2].ll);
895 }
896 switch (n) {
897 case 80:
898 GET_REGL(env->pc);
899 case 81:
900 GET_REGL(env->npc);
901 case 82:
902 GET_REGL((cpu_get_ccr(env) << 32) |
903 ((env->asi & 0xff) << 24) |
904 ((env->pstate & 0xfff) << 8) |
905 cpu_get_cwp64(env));
906 case 83:
907 GET_REGL(env->fsr);
908 case 84:
909 GET_REGL(env->fprs);
910 case 85:
911 GET_REGL(env->y);
912 }
913 #endif
914 return 0;
915 }
916
917 static int cpu_gdb_write_register(CPUSPARCState *env, uint8_t *mem_buf, int n)
918 {
919 #if defined(TARGET_ABI32)
920 abi_ulong tmp;
921
922 tmp = ldl_p(mem_buf);
923 #else
924 target_ulong tmp;
925
926 tmp = ldtul_p(mem_buf);
927 #endif
928
929 if (n < 8) {
930 /* g0..g7 */
931 env->gregs[n] = tmp;
932 } else if (n < 32) {
933 /* register window */
934 env->regwptr[n - 8] = tmp;
935 }
936 #if defined(TARGET_ABI32) || !defined(TARGET_SPARC64)
937 else if (n < 64) {
938 /* fprs */
939 /* f0-f31 */
940 if (n & 1) {
941 env->fpr[(n - 32) / 2].l.lower = tmp;
942 } else {
943 env->fpr[(n - 32) / 2].l.upper = tmp;
944 }
945 } else {
946 /* Y, PSR, WIM, TBR, PC, NPC, FPSR, CPSR */
947 switch (n) {
948 case 64:
949 env->y = tmp;
950 break;
951 case 65:
952 cpu_put_psr(env, tmp);
953 break;
954 case 66:
955 env->wim = tmp;
956 break;
957 case 67:
958 env->tbr = tmp;
959 break;
960 case 68:
961 env->pc = tmp;
962 break;
963 case 69:
964 env->npc = tmp;
965 break;
966 case 70:
967 env->fsr = tmp;
968 break;
969 default:
970 return 0;
971 }
972 }
973 return 4;
974 #else
975 else if (n < 64) {
976 /* f0-f31 */
977 tmp = ldl_p(mem_buf);
978 if (n & 1) {
979 env->fpr[(n - 32) / 2].l.lower = tmp;
980 } else {
981 env->fpr[(n - 32) / 2].l.upper = tmp;
982 }
983 return 4;
984 } else if (n < 80) {
985 /* f32-f62 (double width, even numbers only) */
986 env->fpr[(n - 32) / 2].ll = tmp;
987 } else {
988 switch (n) {
989 case 80:
990 env->pc = tmp;
991 break;
992 case 81:
993 env->npc = tmp;
994 break;
995 case 82:
996 cpu_put_ccr(env, tmp >> 32);
997 env->asi = (tmp >> 24) & 0xff;
998 env->pstate = (tmp >> 8) & 0xfff;
999 cpu_put_cwp64(env, tmp & 0xff);
1000 break;
1001 case 83:
1002 env->fsr = tmp;
1003 break;
1004 case 84:
1005 env->fprs = tmp;
1006 break;
1007 case 85:
1008 env->y = tmp;
1009 break;
1010 default:
1011 return 0;
1012 }
1013 }
1014 return 8;
1015 #endif
1016 }
1017 #elif defined (TARGET_ARM)
1018
1019 /* Old gdb always expect FPA registers. Newer (xml-aware) gdb only expect
1020 whatever the target description contains. Due to a historical mishap
1021 the FPA registers appear in between core integer regs and the CPSR.
1022 We hack round this by giving the FPA regs zero size when talking to a
1023 newer gdb. */
1024 #define GDB_CORE_XML "arm-core.xml"
1025
1026 static int cpu_gdb_read_register(CPUARMState *env, uint8_t *mem_buf, int n)
1027 {
1028 if (n < 16) {
1029 /* Core integer register. */
1030 GET_REG32(env->regs[n]);
1031 }
1032 if (n < 24) {
1033 /* FPA registers. */
1034 if (gdb_has_xml) {
1035 return 0;
1036 }
1037 memset(mem_buf, 0, 12);
1038 return 12;
1039 }
1040 switch (n) {
1041 case 24:
1042 /* FPA status register. */
1043 if (gdb_has_xml) {
1044 return 0;
1045 }
1046 GET_REG32(0);
1047 case 25:
1048 /* CPSR */
1049 GET_REG32(cpsr_read(env));
1050 }
1051 /* Unknown register. */
1052 return 0;
1053 }
1054
1055 static int cpu_gdb_write_register(CPUARMState *env, uint8_t *mem_buf, int n)
1056 {
1057 uint32_t tmp;
1058
1059 tmp = ldl_p(mem_buf);
1060
1061 /* Mask out low bit of PC to workaround gdb bugs. This will probably
1062 cause problems if we ever implement the Jazelle DBX extensions. */
1063 if (n == 15) {
1064 tmp &= ~1;
1065 }
1066
1067 if (n < 16) {
1068 /* Core integer register. */
1069 env->regs[n] = tmp;
1070 return 4;
1071 }
1072 if (n < 24) { /* 16-23 */
1073 /* FPA registers (ignored). */
1074 if (gdb_has_xml) {
1075 return 0;
1076 }
1077 return 12;
1078 }
1079 switch (n) {
1080 case 24:
1081 /* FPA status register (ignored). */
1082 if (gdb_has_xml) {
1083 return 0;
1084 }
1085 return 4;
1086 case 25:
1087 /* CPSR */
1088 cpsr_write(env, tmp, 0xffffffff);
1089 return 4;
1090 }
1091 /* Unknown register. */
1092 return 0;
1093 }
1094
1095 #elif defined (TARGET_M68K)
1096
1097 #define GDB_CORE_XML "cf-core.xml"
1098
1099 static int cpu_gdb_read_register(CPUM68KState *env, uint8_t *mem_buf, int n)
1100 {
1101 if (n < 8) {
1102 /* D0-D7 */
1103 GET_REG32(env->dregs[n]);
1104 } else if (n < 16) {
1105 /* A0-A7 */
1106 GET_REG32(env->aregs[n - 8]);
1107 } else {
1108 switch (n) {
1109 case 16:
1110 GET_REG32(env->sr);
1111 case 17:
1112 GET_REG32(env->pc);
1113 }
1114 }
1115 /* FP registers not included here because they vary between
1116 ColdFire and m68k. Use XML bits for these. */
1117 return 0;
1118 }
1119
1120 static int cpu_gdb_write_register(CPUM68KState *env, uint8_t *mem_buf, int n)
1121 {
1122 uint32_t tmp;
1123
1124 tmp = ldl_p(mem_buf);
1125
1126 if (n < 8) {
1127 /* D0-D7 */
1128 env->dregs[n] = tmp;
1129 } else if (n < 16) {
1130 /* A0-A7 */
1131 env->aregs[n - 8] = tmp;
1132 } else {
1133 switch (n) {
1134 case 16:
1135 env->sr = tmp;
1136 break;
1137 case 17:
1138 env->pc = tmp;
1139 break;
1140 default:
1141 return 0;
1142 }
1143 }
1144 return 4;
1145 }
1146 #elif defined (TARGET_MIPS)
1147
1148 static int cpu_gdb_read_register(CPUMIPSState *env, uint8_t *mem_buf, int n)
1149 {
1150 if (n < 32) {
1151 GET_REGL(env->active_tc.gpr[n]);
1152 }
1153 if (env->CP0_Config1 & (1 << CP0C1_FP)) {
1154 if (n >= 38 && n < 70) {
1155 if (env->CP0_Status & (1 << CP0St_FR)) {
1156 GET_REGL(env->active_fpu.fpr[n - 38].d);
1157 } else {
1158 GET_REGL(env->active_fpu.fpr[n - 38].w[FP_ENDIAN_IDX]);
1159 }
1160 }
1161 switch (n) {
1162 case 70:
1163 GET_REGL((int32_t)env->active_fpu.fcr31);
1164 case 71:
1165 GET_REGL((int32_t)env->active_fpu.fcr0);
1166 }
1167 }
1168 switch (n) {
1169 case 32:
1170 GET_REGL((int32_t)env->CP0_Status);
1171 case 33:
1172 GET_REGL(env->active_tc.LO[0]);
1173 case 34:
1174 GET_REGL(env->active_tc.HI[0]);
1175 case 35:
1176 GET_REGL(env->CP0_BadVAddr);
1177 case 36:
1178 GET_REGL((int32_t)env->CP0_Cause);
1179 case 37:
1180 GET_REGL(env->active_tc.PC | !!(env->hflags & MIPS_HFLAG_M16));
1181 case 72:
1182 GET_REGL(0); /* fp */
1183 case 89:
1184 GET_REGL((int32_t)env->CP0_PRid);
1185 }
1186 if (n >= 73 && n <= 88) {
1187 /* 16 embedded regs. */
1188 GET_REGL(0);
1189 }
1190
1191 return 0;
1192 }
1193
1194 /* convert MIPS rounding mode in FCR31 to IEEE library */
1195 static unsigned int ieee_rm[] = {
1196 float_round_nearest_even,
1197 float_round_to_zero,
1198 float_round_up,
1199 float_round_down
1200 };
1201 #define RESTORE_ROUNDING_MODE \
1202 set_float_rounding_mode(ieee_rm[env->active_fpu.fcr31 & 3], \
1203 &env->active_fpu.fp_status)
1204
1205 static int cpu_gdb_write_register(CPUMIPSState *env, uint8_t *mem_buf, int n)
1206 {
1207 target_ulong tmp;
1208
1209 tmp = ldtul_p(mem_buf);
1210
1211 if (n < 32) {
1212 env->active_tc.gpr[n] = tmp;
1213 return sizeof(target_ulong);
1214 }
1215 if (env->CP0_Config1 & (1 << CP0C1_FP)
1216 && n >= 38 && n < 73) {
1217 if (n < 70) {
1218 if (env->CP0_Status & (1 << CP0St_FR)) {
1219 env->active_fpu.fpr[n - 38].d = tmp;
1220 } else {
1221 env->active_fpu.fpr[n - 38].w[FP_ENDIAN_IDX] = tmp;
1222 }
1223 }
1224 switch (n) {
1225 case 70:
1226 env->active_fpu.fcr31 = tmp & 0xFF83FFFF;
1227 /* set rounding mode */
1228 RESTORE_ROUNDING_MODE;
1229 break;
1230 case 71:
1231 env->active_fpu.fcr0 = tmp;
1232 break;
1233 }
1234 return sizeof(target_ulong);
1235 }
1236 switch (n) {
1237 case 32:
1238 env->CP0_Status = tmp;
1239 break;
1240 case 33:
1241 env->active_tc.LO[0] = tmp;
1242 break;
1243 case 34:
1244 env->active_tc.HI[0] = tmp;
1245 break;
1246 case 35:
1247 env->CP0_BadVAddr = tmp;
1248 break;
1249 case 36:
1250 env->CP0_Cause = tmp;
1251 break;
1252 case 37:
1253 env->active_tc.PC = tmp & ~(target_ulong)1;
1254 if (tmp & 1) {
1255 env->hflags |= MIPS_HFLAG_M16;
1256 } else {
1257 env->hflags &= ~(MIPS_HFLAG_M16);
1258 }
1259 break;
1260 case 72: /* fp, ignored */
1261 break;
1262 default:
1263 if (n > 89) {
1264 return 0;
1265 }
1266 /* Other registers are readonly. Ignore writes. */
1267 break;
1268 }
1269
1270 return sizeof(target_ulong);
1271 }
1272 #elif defined(TARGET_OPENRISC)
1273
1274 static int cpu_gdb_read_register(CPUOpenRISCState *env, uint8_t *mem_buf, int n)
1275 {
1276 if (n < 32) {
1277 GET_REG32(env->gpr[n]);
1278 } else {
1279 switch (n) {
1280 case 32: /* PPC */
1281 GET_REG32(env->ppc);
1282
1283 case 33: /* NPC */
1284 GET_REG32(env->npc);
1285
1286 case 34: /* SR */
1287 GET_REG32(env->sr);
1288
1289 default:
1290 break;
1291 }
1292 }
1293 return 0;
1294 }
1295
1296 static int cpu_gdb_write_register(CPUOpenRISCState *env,
1297 uint8_t *mem_buf, int n)
1298 {
1299 OpenRISCCPU *cpu = openrisc_env_get_cpu(env);
1300 CPUClass *cc = CPU_GET_CLASS(cpu);
1301 uint32_t tmp;
1302
1303 if (n > cc->gdb_num_core_regs) {
1304 return 0;
1305 }
1306
1307 tmp = ldl_p(mem_buf);
1308
1309 if (n < 32) {
1310 env->gpr[n] = tmp;
1311 } else {
1312 switch (n) {
1313 case 32: /* PPC */
1314 env->ppc = tmp;
1315 break;
1316
1317 case 33: /* NPC */
1318 env->npc = tmp;
1319 break;
1320
1321 case 34: /* SR */
1322 env->sr = tmp;
1323 break;
1324
1325 default:
1326 break;
1327 }
1328 }
1329 return 4;
1330 }
1331 #elif defined (TARGET_SH4)
1332
1333 /* Hint: Use "set architecture sh4" in GDB to see fpu registers */
1334 /* FIXME: We should use XML for this. */
1335
1336 static int cpu_gdb_read_register(CPUSH4State *env, uint8_t *mem_buf, int n)
1337 {
1338 switch (n) {
1339 case 0 ... 7:
1340 if ((env->sr & (SR_MD | SR_RB)) == (SR_MD | SR_RB)) {
1341 GET_REGL(env->gregs[n + 16]);
1342 } else {
1343 GET_REGL(env->gregs[n]);
1344 }
1345 case 8 ... 15:
1346 GET_REGL(env->gregs[n]);
1347 case 16:
1348 GET_REGL(env->pc);
1349 case 17:
1350 GET_REGL(env->pr);
1351 case 18:
1352 GET_REGL(env->gbr);
1353 case 19:
1354 GET_REGL(env->vbr);
1355 case 20:
1356 GET_REGL(env->mach);
1357 case 21:
1358 GET_REGL(env->macl);
1359 case 22:
1360 GET_REGL(env->sr);
1361 case 23:
1362 GET_REGL(env->fpul);
1363 case 24:
1364 GET_REGL(env->fpscr);
1365 case 25 ... 40:
1366 if (env->fpscr & FPSCR_FR) {
1367 stfl_p(mem_buf, env->fregs[n - 9]);
1368 } else {
1369 stfl_p(mem_buf, env->fregs[n - 25]);
1370 }
1371 return 4;
1372 case 41:
1373 GET_REGL(env->ssr);
1374 case 42:
1375 GET_REGL(env->spc);
1376 case 43 ... 50:
1377 GET_REGL(env->gregs[n - 43]);
1378 case 51 ... 58:
1379 GET_REGL(env->gregs[n - (51 - 16)]);
1380 }
1381
1382 return 0;
1383 }
1384
1385 static int cpu_gdb_write_register(CPUSH4State *env, uint8_t *mem_buf, int n)
1386 {
1387 switch (n) {
1388 case 0 ... 7:
1389 if ((env->sr & (SR_MD | SR_RB)) == (SR_MD | SR_RB)) {
1390 env->gregs[n + 16] = ldl_p(mem_buf);
1391 } else {
1392 env->gregs[n] = ldl_p(mem_buf);
1393 }
1394 break;
1395 case 8 ... 15:
1396 env->gregs[n] = ldl_p(mem_buf);
1397 break;
1398 case 16:
1399 env->pc = ldl_p(mem_buf);
1400 break;
1401 case 17:
1402 env->pr = ldl_p(mem_buf);
1403 break;
1404 case 18:
1405 env->gbr = ldl_p(mem_buf);
1406 break;
1407 case 19:
1408 env->vbr = ldl_p(mem_buf);
1409 break;
1410 case 20:
1411 env->mach = ldl_p(mem_buf);
1412 break;
1413 case 21:
1414 env->macl = ldl_p(mem_buf);
1415 break;
1416 case 22:
1417 env->sr = ldl_p(mem_buf);
1418 break;
1419 case 23:
1420 env->fpul = ldl_p(mem_buf);
1421 break;
1422 case 24:
1423 env->fpscr = ldl_p(mem_buf);
1424 break;
1425 case 25 ... 40:
1426 if (env->fpscr & FPSCR_FR) {
1427 env->fregs[n - 9] = ldfl_p(mem_buf);
1428 } else {
1429 env->fregs[n - 25] = ldfl_p(mem_buf);
1430 }
1431 break;
1432 case 41:
1433 env->ssr = ldl_p(mem_buf);
1434 break;
1435 case 42:
1436 env->spc = ldl_p(mem_buf);
1437 break;
1438 case 43 ... 50:
1439 env->gregs[n - 43] = ldl_p(mem_buf);
1440 break;
1441 case 51 ... 58:
1442 env->gregs[n - (51 - 16)] = ldl_p(mem_buf);
1443 break;
1444 default:
1445 return 0;
1446 }
1447
1448 return 4;
1449 }
1450 #elif defined (TARGET_MICROBLAZE)
1451
1452 static int cpu_gdb_read_register(CPUMBState *env, uint8_t *mem_buf, int n)
1453 {
1454 if (n < 32) {
1455 GET_REG32(env->regs[n]);
1456 } else {
1457 GET_REG32(env->sregs[n - 32]);
1458 }
1459 return 0;
1460 }
1461
1462 static int cpu_gdb_write_register(CPUMBState *env, uint8_t *mem_buf, int n)
1463 {
1464 MicroBlazeCPU *cpu = mb_env_get_cpu(env);
1465 CPUClass *cc = CPU_GET_CLASS(cpu);
1466 uint32_t tmp;
1467
1468 if (n > cc->gdb_num_core_regs) {
1469 return 0;
1470 }
1471
1472 tmp = ldl_p(mem_buf);
1473
1474 if (n < 32) {
1475 env->regs[n] = tmp;
1476 } else {
1477 env->sregs[n - 32] = tmp;
1478 }
1479 return 4;
1480 }
1481 #elif defined (TARGET_CRIS)
1482
1483 static int
1484 read_register_crisv10(CPUCRISState *env, uint8_t *mem_buf, int n)
1485 {
1486 if (n < 15) {
1487 GET_REG32(env->regs[n]);
1488 }
1489
1490 if (n == 15) {
1491 GET_REG32(env->pc);
1492 }
1493
1494 if (n < 32) {
1495 switch (n) {
1496 case 16:
1497 GET_REG8(env->pregs[n - 16]);
1498 case 17:
1499 GET_REG8(env->pregs[n - 16]);
1500 case 20:
1501 case 21:
1502 GET_REG16(env->pregs[n - 16]);
1503 default:
1504 if (n >= 23) {
1505 GET_REG32(env->pregs[n - 16]);
1506 }
1507 break;
1508 }
1509 }
1510 return 0;
1511 }
1512
1513 static int cpu_gdb_read_register(CPUCRISState *env, uint8_t *mem_buf, int n)
1514 {
1515 uint8_t srs;
1516
1517 if (env->pregs[PR_VR] < 32) {
1518 return read_register_crisv10(env, mem_buf, n);
1519 }
1520
1521 srs = env->pregs[PR_SRS];
1522 if (n < 16) {
1523 GET_REG32(env->regs[n]);
1524 }
1525
1526 if (n >= 21 && n < 32) {
1527 GET_REG32(env->pregs[n - 16]);
1528 }
1529 if (n >= 33 && n < 49) {
1530 GET_REG32(env->sregs[srs][n - 33]);
1531 }
1532 switch (n) {
1533 case 16:
1534 GET_REG8(env->pregs[0]);
1535 case 17:
1536 GET_REG8(env->pregs[1]);
1537 case 18:
1538 GET_REG32(env->pregs[2]);
1539 case 19:
1540 GET_REG8(srs);
1541 case 20:
1542 GET_REG16(env->pregs[4]);
1543 case 32:
1544 GET_REG32(env->pc);
1545 }
1546
1547 return 0;
1548 }
1549
1550 static int cpu_gdb_write_register(CPUCRISState *env, uint8_t *mem_buf, int n)
1551 {
1552 uint32_t tmp;
1553
1554 if (n > 49) {
1555 return 0;
1556 }
1557
1558 tmp = ldl_p(mem_buf);
1559
1560 if (n < 16) {
1561 env->regs[n] = tmp;
1562 }
1563
1564 if (n >= 21 && n < 32) {
1565 env->pregs[n - 16] = tmp;
1566 }
1567
1568 /* FIXME: Should support function regs be writable? */
1569 switch (n) {
1570 case 16:
1571 return 1;
1572 case 17:
1573 return 1;
1574 case 18:
1575 env->pregs[PR_PID] = tmp;
1576 break;
1577 case 19:
1578 return 1;
1579 case 20:
1580 return 2;
1581 case 32:
1582 env->pc = tmp;
1583 break;
1584 }
1585
1586 return 4;
1587 }
1588 #elif defined (TARGET_ALPHA)
1589
1590 static int cpu_gdb_read_register(CPUAlphaState *env, uint8_t *mem_buf, int n)
1591 {
1592 uint64_t val;
1593 CPU_DoubleU d;
1594
1595 switch (n) {
1596 case 0 ... 30:
1597 val = env->ir[n];
1598 break;
1599 case 32 ... 62:
1600 d.d = env->fir[n - 32];
1601 val = d.ll;
1602 break;
1603 case 63:
1604 val = cpu_alpha_load_fpcr(env);
1605 break;
1606 case 64:
1607 val = env->pc;
1608 break;
1609 case 66:
1610 val = env->unique;
1611 break;
1612 case 31:
1613 case 65:
1614 /* 31 really is the zero register; 65 is unassigned in the
1615 gdb protocol, but is still required to occupy 8 bytes. */
1616 val = 0;
1617 break;
1618 default:
1619 return 0;
1620 }
1621 GET_REGL(val);
1622 }
1623
1624 static int cpu_gdb_write_register(CPUAlphaState *env, uint8_t *mem_buf, int n)
1625 {
1626 target_ulong tmp = ldtul_p(mem_buf);
1627 CPU_DoubleU d;
1628
1629 switch (n) {
1630 case 0 ... 30:
1631 env->ir[n] = tmp;
1632 break;
1633 case 32 ... 62:
1634 d.ll = tmp;
1635 env->fir[n - 32] = d.d;
1636 break;
1637 case 63:
1638 cpu_alpha_store_fpcr(env, tmp);
1639 break;
1640 case 64:
1641 env->pc = tmp;
1642 break;
1643 case 66:
1644 env->unique = tmp;
1645 break;
1646 case 31:
1647 case 65:
1648 /* 31 really is the zero register; 65 is unassigned in the
1649 gdb protocol, but is still required to occupy 8 bytes. */
1650 break;
1651 default:
1652 return 0;
1653 }
1654 return 8;
1655 }
1656 #elif defined (TARGET_S390X)
1657
1658 static int cpu_gdb_read_register(CPUS390XState *env, uint8_t *mem_buf, int n)
1659 {
1660 uint64_t val;
1661 int cc_op;
1662
1663 switch (n) {
1664 case S390_PSWM_REGNUM:
1665 cc_op = calc_cc(env, env->cc_op, env->cc_src, env->cc_dst, env->cc_vr);
1666 val = deposit64(env->psw.mask, 44, 2, cc_op);
1667 GET_REGL(val);
1668 case S390_PSWA_REGNUM:
1669 GET_REGL(env->psw.addr);
1670 case S390_R0_REGNUM ... S390_R15_REGNUM:
1671 GET_REGL(env->regs[n-S390_R0_REGNUM]);
1672 case S390_A0_REGNUM ... S390_A15_REGNUM:
1673 GET_REG32(env->aregs[n-S390_A0_REGNUM]);
1674 case S390_FPC_REGNUM:
1675 GET_REG32(env->fpc);
1676 case S390_F0_REGNUM ... S390_F15_REGNUM:
1677 GET_REG64(env->fregs[n-S390_F0_REGNUM].ll);
1678 }
1679
1680 return 0;
1681 }
1682
1683 static int cpu_gdb_write_register(CPUS390XState *env, uint8_t *mem_buf, int n)
1684 {
1685 target_ulong tmpl;
1686 uint32_t tmp32;
1687 int r = 8;
1688 tmpl = ldtul_p(mem_buf);
1689 tmp32 = ldl_p(mem_buf);
1690
1691 switch (n) {
1692 case S390_PSWM_REGNUM:
1693 env->psw.mask = tmpl;
1694 env->cc_op = extract64(tmpl, 44, 2);
1695 break;
1696 case S390_PSWA_REGNUM:
1697 env->psw.addr = tmpl;
1698 break;
1699 case S390_R0_REGNUM ... S390_R15_REGNUM:
1700 env->regs[n-S390_R0_REGNUM] = tmpl;
1701 break;
1702 case S390_A0_REGNUM ... S390_A15_REGNUM:
1703 env->aregs[n-S390_A0_REGNUM] = tmp32;
1704 r = 4;
1705 break;
1706 case S390_FPC_REGNUM:
1707 env->fpc = tmp32;
1708 r = 4;
1709 break;
1710 case S390_F0_REGNUM ... S390_F15_REGNUM:
1711 env->fregs[n-S390_F0_REGNUM].ll = tmpl;
1712 break;
1713 default:
1714 return 0;
1715 }
1716 return r;
1717 }
1718 #elif defined (TARGET_LM32)
1719
1720 #include "hw/lm32/lm32_pic.h"
1721
1722 static int cpu_gdb_read_register(CPULM32State *env, uint8_t *mem_buf, int n)
1723 {
1724 if (n < 32) {
1725 GET_REG32(env->regs[n]);
1726 } else {
1727 switch (n) {
1728 case 32:
1729 GET_REG32(env->pc);
1730 /* FIXME: put in right exception ID */
1731 case 33:
1732 GET_REG32(0);
1733 case 34:
1734 GET_REG32(env->eba);
1735 case 35:
1736 GET_REG32(env->deba);
1737 case 36:
1738 GET_REG32(env->ie);
1739 case 37:
1740 GET_REG32(lm32_pic_get_im(env->pic_state));
1741 case 38:
1742 GET_REG32(lm32_pic_get_ip(env->pic_state));
1743 }
1744 }
1745 return 0;
1746 }
1747
1748 static int cpu_gdb_write_register(CPULM32State *env, uint8_t *mem_buf, int n)
1749 {
1750 LM32CPU *cpu = lm32_env_get_cpu(env);
1751 CPUClass *cc = CPU_GET_CLASS(cpu);
1752 uint32_t tmp;
1753
1754 if (n > cc->gdb_num_core_regs) {
1755 return 0;
1756 }
1757
1758 tmp = ldl_p(mem_buf);
1759
1760 if (n < 32) {
1761 env->regs[n] = tmp;
1762 } else {
1763 switch (n) {
1764 case 32:
1765 env->pc = tmp;
1766 break;
1767 case 34:
1768 env->eba = tmp;
1769 break;
1770 case 35:
1771 env->deba = tmp;
1772 break;
1773 case 36:
1774 env->ie = tmp;
1775 break;
1776 case 37:
1777 lm32_pic_set_im(env->pic_state, tmp);
1778 break;
1779 case 38:
1780 lm32_pic_set_ip(env->pic_state, tmp);
1781 break;
1782 }
1783 }
1784 return 4;
1785 }
1786 #elif defined(TARGET_XTENSA)
1787
1788 static int cpu_gdb_read_register(CPUXtensaState *env, uint8_t *mem_buf, int n)
1789 {
1790 const XtensaGdbReg *reg = env->config->gdb_regmap.reg + n;
1791
1792 if (n < 0 || n >= env->config->gdb_regmap.num_regs) {
1793 return 0;
1794 }
1795
1796 switch (reg->type) {
1797 case 9: /*pc*/
1798 GET_REG32(env->pc);
1799
1800 case 1: /*ar*/
1801 xtensa_sync_phys_from_window(env);
1802 GET_REG32(env->phys_regs[(reg->targno & 0xff) % env->config->nareg]);
1803
1804 case 2: /*SR*/
1805 GET_REG32(env->sregs[reg->targno & 0xff]);
1806
1807 case 3: /*UR*/
1808 GET_REG32(env->uregs[reg->targno & 0xff]);
1809
1810 case 4: /*f*/
1811 GET_REG32(float32_val(env->fregs[reg->targno & 0x0f]));
1812
1813 case 8: /*a*/
1814 GET_REG32(env->regs[reg->targno & 0x0f]);
1815
1816 default:
1817 qemu_log("%s from reg %d of unsupported type %d\n",
1818 __func__, n, reg->type);
1819 return 0;
1820 }
1821 }
1822
1823 static int cpu_gdb_write_register(CPUXtensaState *env, uint8_t *mem_buf, int n)
1824 {
1825 uint32_t tmp;
1826 const XtensaGdbReg *reg = env->config->gdb_regmap.reg + n;
1827
1828 if (n < 0 || n >= env->config->gdb_regmap.num_regs) {
1829 return 0;
1830 }
1831
1832 tmp = ldl_p(mem_buf);
1833
1834 switch (reg->type) {
1835 case 9: /*pc*/
1836 env->pc = tmp;
1837 break;
1838
1839 case 1: /*ar*/
1840 env->phys_regs[(reg->targno & 0xff) % env->config->nareg] = tmp;
1841 xtensa_sync_window_from_phys(env);
1842 break;
1843
1844 case 2: /*SR*/
1845 env->sregs[reg->targno & 0xff] = tmp;
1846 break;
1847
1848 case 3: /*UR*/
1849 env->uregs[reg->targno & 0xff] = tmp;
1850 break;
1851
1852 case 4: /*f*/
1853 env->fregs[reg->targno & 0x0f] = make_float32(tmp);
1854 break;
1855
1856 case 8: /*a*/
1857 env->regs[reg->targno & 0x0f] = tmp;
1858 break;
1859
1860 default:
1861 qemu_log("%s to reg %d of unsupported type %d\n",
1862 __func__, n, reg->type);
1863 return 0;
1864 }
1865
1866 return 4;
1867 }
1868 #else
1869
1870 static int cpu_gdb_read_register(CPUArchState *env, uint8_t *mem_buf, int n)
1871 {
1872 return 0;
1873 }
1874
1875 static int cpu_gdb_write_register(CPUArchState *env, uint8_t *mem_buf, int n)
1876 {
1877 return 0;
1878 }
1879
1880 #endif
1881
1882 #ifdef GDB_CORE_XML
1883 /* Encode data using the encoding for 'x' packets. */
1884 static int memtox(char *buf, const char *mem, int len)
1885 {
1886 char *p = buf;
1887 char c;
1888
1889 while (len--) {
1890 c = *(mem++);
1891 switch (c) {
1892 case '#': case '$': case '*': case '}':
1893 *(p++) = '}';
1894 *(p++) = c ^ 0x20;
1895 break;
1896 default:
1897 *(p++) = c;
1898 break;
1899 }
1900 }
1901 return p - buf;
1902 }
1903
1904 static const char *get_feature_xml(const char *p, const char **newp)
1905 {
1906 size_t len;
1907 int i;
1908 const char *name;
1909 static char target_xml[1024];
1910
1911 len = 0;
1912 while (p[len] && p[len] != ':')
1913 len++;
1914 *newp = p + len;
1915
1916 name = NULL;
1917 if (strncmp(p, "target.xml", len) == 0) {
1918 /* Generate the XML description for this CPU. */
1919 if (!target_xml[0]) {
1920 GDBRegisterState *r;
1921 CPUState *cpu = first_cpu;
1922
1923 snprintf(target_xml, sizeof(target_xml),
1924 "<?xml version=\"1.0\"?>"
1925 "<!DOCTYPE target SYSTEM \"gdb-target.dtd\">"
1926 "<target>"
1927 "<xi:include href=\"%s\"/>",
1928 GDB_CORE_XML);
1929
1930 for (r = cpu->gdb_regs; r; r = r->next) {
1931 pstrcat(target_xml, sizeof(target_xml), "<xi:include href=\"");
1932 pstrcat(target_xml, sizeof(target_xml), r->xml);
1933 pstrcat(target_xml, sizeof(target_xml), "\"/>");
1934 }
1935 pstrcat(target_xml, sizeof(target_xml), "</target>");
1936 }
1937 return target_xml;
1938 }
1939 for (i = 0; ; i++) {
1940 name = xml_builtin[i][0];
1941 if (!name || (strncmp(name, p, len) == 0 && strlen(name) == len))
1942 break;
1943 }
1944 return name ? xml_builtin[i][1] : NULL;
1945 }
1946 #endif
1947
1948 static int gdb_read_register(CPUState *cpu, uint8_t *mem_buf, int reg)
1949 {
1950 CPUClass *cc = CPU_GET_CLASS(cpu);
1951 CPUArchState *env = cpu->env_ptr;
1952 GDBRegisterState *r;
1953
1954 if (reg < cc->gdb_num_core_regs) {
1955 return cpu_gdb_read_register(env, mem_buf, reg);
1956 }
1957
1958 for (r = cpu->gdb_regs; r; r = r->next) {
1959 if (r->base_reg <= reg && reg < r->base_reg + r->num_regs) {
1960 return r->get_reg(env, mem_buf, reg - r->base_reg);
1961 }
1962 }
1963 return 0;
1964 }
1965
1966 static int gdb_write_register(CPUState *cpu, uint8_t *mem_buf, int reg)
1967 {
1968 CPUClass *cc = CPU_GET_CLASS(cpu);
1969 CPUArchState *env = cpu->env_ptr;
1970 GDBRegisterState *r;
1971
1972 if (reg < cc->gdb_num_core_regs) {
1973 return cpu_gdb_write_register(env, mem_buf, reg);
1974 }
1975
1976 for (r = cpu->gdb_regs; r; r = r->next) {
1977 if (r->base_reg <= reg && reg < r->base_reg + r->num_regs) {
1978 return r->set_reg(env, mem_buf, reg - r->base_reg);
1979 }
1980 }
1981 return 0;
1982 }
1983
1984 /* Register a supplemental set of CPU registers. If g_pos is nonzero it
1985 specifies the first register number and these registers are included in
1986 a standard "g" packet. Direction is relative to gdb, i.e. get_reg is
1987 gdb reading a CPU register, and set_reg is gdb modifying a CPU register.
1988 */
1989
1990 void gdb_register_coprocessor(CPUState *cpu,
1991 gdb_reg_cb get_reg, gdb_reg_cb set_reg,
1992 int num_regs, const char *xml, int g_pos)
1993 {
1994 GDBRegisterState *s;
1995 GDBRegisterState **p;
1996
1997 p = &cpu->gdb_regs;
1998 while (*p) {
1999 /* Check for duplicates. */
2000 if (strcmp((*p)->xml, xml) == 0)
2001 return;
2002 p = &(*p)->next;
2003 }
2004
2005 s = g_new0(GDBRegisterState, 1);
2006 s->base_reg = cpu->gdb_num_regs;
2007 s->num_regs = num_regs;
2008 s->get_reg = get_reg;
2009 s->set_reg = set_reg;
2010 s->xml = xml;
2011
2012 /* Add to end of list. */
2013 cpu->gdb_num_regs += num_regs;
2014 *p = s;
2015 if (g_pos) {
2016 if (g_pos != s->base_reg) {
2017 fprintf(stderr, "Error: Bad gdb register numbering for '%s'\n"
2018 "Expected %d got %d\n", xml, g_pos, s->base_reg);
2019 }
2020 }
2021 }
2022
2023 #ifndef CONFIG_USER_ONLY
2024 static const int xlat_gdb_type[] = {
2025 [GDB_WATCHPOINT_WRITE] = BP_GDB | BP_MEM_WRITE,
2026 [GDB_WATCHPOINT_READ] = BP_GDB | BP_MEM_READ,
2027 [GDB_WATCHPOINT_ACCESS] = BP_GDB | BP_MEM_ACCESS,
2028 };
2029 #endif
2030
2031 static int gdb_breakpoint_insert(target_ulong addr, target_ulong len, int type)
2032 {
2033 CPUState *cpu;
2034 CPUArchState *env;
2035 int err = 0;
2036
2037 if (kvm_enabled()) {
2038 return kvm_insert_breakpoint(gdbserver_state->c_cpu, addr, len, type);
2039 }
2040
2041 switch (type) {
2042 case GDB_BREAKPOINT_SW:
2043 case GDB_BREAKPOINT_HW:
2044 for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) {
2045 env = cpu->env_ptr;
2046 err = cpu_breakpoint_insert(env, addr, BP_GDB, NULL);
2047 if (err)
2048 break;
2049 }
2050 return err;
2051 #ifndef CONFIG_USER_ONLY
2052 case GDB_WATCHPOINT_WRITE:
2053 case GDB_WATCHPOINT_READ:
2054 case GDB_WATCHPOINT_ACCESS:
2055 for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) {
2056 env = cpu->env_ptr;
2057 err = cpu_watchpoint_insert(env, addr, len, xlat_gdb_type[type],
2058 NULL);
2059 if (err)
2060 break;
2061 }
2062 return err;
2063 #endif
2064 default:
2065 return -ENOSYS;
2066 }
2067 }
2068
2069 static int gdb_breakpoint_remove(target_ulong addr, target_ulong len, int type)
2070 {
2071 CPUState *cpu;
2072 CPUArchState *env;
2073 int err = 0;
2074
2075 if (kvm_enabled()) {
2076 return kvm_remove_breakpoint(gdbserver_state->c_cpu, addr, len, type);
2077 }
2078
2079 switch (type) {
2080 case GDB_BREAKPOINT_SW:
2081 case GDB_BREAKPOINT_HW:
2082 for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) {
2083 env = cpu->env_ptr;
2084 err = cpu_breakpoint_remove(env, addr, BP_GDB);
2085 if (err)
2086 break;
2087 }
2088 return err;
2089 #ifndef CONFIG_USER_ONLY
2090 case GDB_WATCHPOINT_WRITE:
2091 case GDB_WATCHPOINT_READ:
2092 case GDB_WATCHPOINT_ACCESS:
2093 for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) {
2094 env = cpu->env_ptr;
2095 err = cpu_watchpoint_remove(env, addr, len, xlat_gdb_type[type]);
2096 if (err)
2097 break;
2098 }
2099 return err;
2100 #endif
2101 default:
2102 return -ENOSYS;
2103 }
2104 }
2105
2106 static void gdb_breakpoint_remove_all(void)
2107 {
2108 CPUState *cpu;
2109 CPUArchState *env;
2110
2111 if (kvm_enabled()) {
2112 kvm_remove_all_breakpoints(gdbserver_state->c_cpu);
2113 return;
2114 }
2115
2116 for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) {
2117 env = cpu->env_ptr;
2118 cpu_breakpoint_remove_all(env, BP_GDB);
2119 #ifndef CONFIG_USER_ONLY
2120 cpu_watchpoint_remove_all(env, BP_GDB);
2121 #endif
2122 }
2123 }
2124
2125 static void gdb_set_cpu_pc(GDBState *s, target_ulong pc)
2126 {
2127 CPUState *cpu = s->c_cpu;
2128 CPUClass *cc = CPU_GET_CLASS(cpu);
2129
2130 cpu_synchronize_state(cpu);
2131 if (cc->set_pc) {
2132 cc->set_pc(cpu, pc);
2133 }
2134 }
2135
2136 static CPUState *find_cpu(uint32_t thread_id)
2137 {
2138 CPUState *cpu;
2139
2140 for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) {
2141 if (cpu_index(cpu) == thread_id) {
2142 return cpu;
2143 }
2144 }
2145
2146 return NULL;
2147 }
2148
2149 static int gdb_handle_packet(GDBState *s, const char *line_buf)
2150 {
2151 CPUState *cpu;
2152 const char *p;
2153 uint32_t thread;
2154 int ch, reg_size, type, res;
2155 char buf[MAX_PACKET_LENGTH];
2156 uint8_t mem_buf[MAX_PACKET_LENGTH];
2157 uint8_t *registers;
2158 target_ulong addr, len;
2159
2160 #ifdef DEBUG_GDB
2161 printf("command='%s'\n", line_buf);
2162 #endif
2163 p = line_buf;
2164 ch = *p++;
2165 switch(ch) {
2166 case '?':
2167 /* TODO: Make this return the correct value for user-mode. */
2168 snprintf(buf, sizeof(buf), "T%02xthread:%02x;", GDB_SIGNAL_TRAP,
2169 cpu_index(s->c_cpu));
2170 put_packet(s, buf);
2171 /* Remove all the breakpoints when this query is issued,
2172 * because gdb is doing and initial connect and the state
2173 * should be cleaned up.
2174 */
2175 gdb_breakpoint_remove_all();
2176 break;
2177 case 'c':
2178 if (*p != '\0') {
2179 addr = strtoull(p, (char **)&p, 16);
2180 gdb_set_cpu_pc(s, addr);
2181 }
2182 s->signal = 0;
2183 gdb_continue(s);
2184 return RS_IDLE;
2185 case 'C':
2186 s->signal = gdb_signal_to_target (strtoul(p, (char **)&p, 16));
2187 if (s->signal == -1)
2188 s->signal = 0;
2189 gdb_continue(s);
2190 return RS_IDLE;
2191 case 'v':
2192 if (strncmp(p, "Cont", 4) == 0) {
2193 int res_signal, res_thread;
2194
2195 p += 4;
2196 if (*p == '?') {
2197 put_packet(s, "vCont;c;C;s;S");
2198 break;
2199 }
2200 res = 0;
2201 res_signal = 0;
2202 res_thread = 0;
2203 while (*p) {
2204 int action, signal;
2205
2206 if (*p++ != ';') {
2207 res = 0;
2208 break;
2209 }
2210 action = *p++;
2211 signal = 0;
2212 if (action == 'C' || action == 'S') {
2213 signal = strtoul(p, (char **)&p, 16);
2214 } else if (action != 'c' && action != 's') {
2215 res = 0;
2216 break;
2217 }
2218 thread = 0;
2219 if (*p == ':') {
2220 thread = strtoull(p+1, (char **)&p, 16);
2221 }
2222 action = tolower(action);
2223 if (res == 0 || (res == 'c' && action == 's')) {
2224 res = action;
2225 res_signal = signal;
2226 res_thread = thread;
2227 }
2228 }
2229 if (res) {
2230 if (res_thread != -1 && res_thread != 0) {
2231 cpu = find_cpu(res_thread);
2232 if (cpu == NULL) {
2233 put_packet(s, "E22");
2234 break;
2235 }
2236 s->c_cpu = cpu;
2237 }
2238 if (res == 's') {
2239 cpu_single_step(s->c_cpu, sstep_flags);
2240 }
2241 s->signal = res_signal;
2242 gdb_continue(s);
2243 return RS_IDLE;
2244 }
2245 break;
2246 } else {
2247 goto unknown_command;
2248 }
2249 case 'k':
2250 #ifdef CONFIG_USER_ONLY
2251 /* Kill the target */
2252 fprintf(stderr, "\nQEMU: Terminated via GDBstub\n");
2253 exit(0);
2254 #endif
2255 case 'D':
2256 /* Detach packet */
2257 gdb_breakpoint_remove_all();
2258 gdb_syscall_mode = GDB_SYS_DISABLED;
2259 gdb_continue(s);
2260 put_packet(s, "OK");
2261 break;
2262 case 's':
2263 if (*p != '\0') {
2264 addr = strtoull(p, (char **)&p, 16);
2265 gdb_set_cpu_pc(s, addr);
2266 }
2267 cpu_single_step(s->c_cpu, sstep_flags);
2268 gdb_continue(s);
2269 return RS_IDLE;
2270 case 'F':
2271 {
2272 target_ulong ret;
2273 target_ulong err;
2274
2275 ret = strtoull(p, (char **)&p, 16);
2276 if (*p == ',') {
2277 p++;
2278 err = strtoull(p, (char **)&p, 16);
2279 } else {
2280 err = 0;
2281 }
2282 if (*p == ',')
2283 p++;
2284 type = *p;
2285 if (s->current_syscall_cb) {
2286 s->current_syscall_cb(s->c_cpu, ret, err);
2287 s->current_syscall_cb = NULL;
2288 }
2289 if (type == 'C') {
2290 put_packet(s, "T02");
2291 } else {
2292 gdb_continue(s);
2293 }
2294 }
2295 break;
2296 case 'g':
2297 cpu_synchronize_state(s->g_cpu);
2298 len = 0;
2299 for (addr = 0; addr < s->g_cpu->gdb_num_regs; addr++) {
2300 reg_size = gdb_read_register(s->g_cpu, mem_buf + len, addr);
2301 len += reg_size;
2302 }
2303 memtohex(buf, mem_buf, len);
2304 put_packet(s, buf);
2305 break;
2306 case 'G':
2307 cpu_synchronize_state(s->g_cpu);
2308 registers = mem_buf;
2309 len = strlen(p) / 2;
2310 hextomem((uint8_t *)registers, p, len);
2311 for (addr = 0; addr < s->g_cpu->gdb_num_regs && len > 0; addr++) {
2312 reg_size = gdb_write_register(s->g_cpu, registers, addr);
2313 len -= reg_size;
2314 registers += reg_size;
2315 }
2316 put_packet(s, "OK");
2317 break;
2318 case 'm':
2319 addr = strtoull(p, (char **)&p, 16);
2320 if (*p == ',')
2321 p++;
2322 len = strtoull(p, NULL, 16);
2323 if (target_memory_rw_debug(s->g_cpu, addr, mem_buf, len, false) != 0) {
2324 put_packet (s, "E14");
2325 } else {
2326 memtohex(buf, mem_buf, len);
2327 put_packet(s, buf);
2328 }
2329 break;
2330 case 'M':
2331 addr = strtoull(p, (char **)&p, 16);
2332 if (*p == ',')
2333 p++;
2334 len = strtoull(p, (char **)&p, 16);
2335 if (*p == ':')
2336 p++;
2337 hextomem(mem_buf, p, len);
2338 if (target_memory_rw_debug(s->g_cpu, addr, mem_buf, len,
2339 true) != 0) {
2340 put_packet(s, "E14");
2341 } else {
2342 put_packet(s, "OK");
2343 }
2344 break;
2345 case 'p':
2346 /* Older gdb are really dumb, and don't use 'g' if 'p' is avaialable.
2347 This works, but can be very slow. Anything new enough to
2348 understand XML also knows how to use this properly. */
2349 if (!gdb_has_xml)
2350 goto unknown_command;
2351 addr = strtoull(p, (char **)&p, 16);
2352 reg_size = gdb_read_register(s->g_cpu, mem_buf, addr);
2353 if (reg_size) {
2354 memtohex(buf, mem_buf, reg_size);
2355 put_packet(s, buf);
2356 } else {
2357 put_packet(s, "E14");
2358 }
2359 break;
2360 case 'P':
2361 if (!gdb_has_xml)
2362 goto unknown_command;
2363 addr = strtoull(p, (char **)&p, 16);
2364 if (*p == '=')
2365 p++;
2366 reg_size = strlen(p) / 2;
2367 hextomem(mem_buf, p, reg_size);
2368 gdb_write_register(s->g_cpu, mem_buf, addr);
2369 put_packet(s, "OK");
2370 break;
2371 case 'Z':
2372 case 'z':
2373 type = strtoul(p, (char **)&p, 16);
2374 if (*p == ',')
2375 p++;
2376 addr = strtoull(p, (char **)&p, 16);
2377 if (*p == ',')
2378 p++;
2379 len = strtoull(p, (char **)&p, 16);
2380 if (ch == 'Z')
2381 res = gdb_breakpoint_insert(addr, len, type);
2382 else
2383 res = gdb_breakpoint_remove(addr, len, type);
2384 if (res >= 0)
2385 put_packet(s, "OK");
2386 else if (res == -ENOSYS)
2387 put_packet(s, "");
2388 else
2389 put_packet(s, "E22");
2390 break;
2391 case 'H':
2392 type = *p++;
2393 thread = strtoull(p, (char **)&p, 16);
2394 if (thread == -1 || thread == 0) {
2395 put_packet(s, "OK");
2396 break;
2397 }
2398 cpu = find_cpu(thread);
2399 if (cpu == NULL) {
2400 put_packet(s, "E22");
2401 break;
2402 }
2403 switch (type) {
2404 case 'c':
2405 s->c_cpu = cpu;
2406 put_packet(s, "OK");
2407 break;
2408 case 'g':
2409 s->g_cpu = cpu;
2410 put_packet(s, "OK");
2411 break;
2412 default:
2413 put_packet(s, "E22");
2414 break;
2415 }
2416 break;
2417 case 'T':
2418 thread = strtoull(p, (char **)&p, 16);
2419 cpu = find_cpu(thread);
2420
2421 if (cpu != NULL) {
2422 put_packet(s, "OK");
2423 } else {
2424 put_packet(s, "E22");
2425 }
2426 break;
2427 case 'q':
2428 case 'Q':
2429 /* parse any 'q' packets here */
2430 if (!strcmp(p,"qemu.sstepbits")) {
2431 /* Query Breakpoint bit definitions */
2432 snprintf(buf, sizeof(buf), "ENABLE=%x,NOIRQ=%x,NOTIMER=%x",
2433 SSTEP_ENABLE,
2434 SSTEP_NOIRQ,
2435 SSTEP_NOTIMER);
2436 put_packet(s, buf);
2437 break;
2438 } else if (strncmp(p,"qemu.sstep",10) == 0) {
2439 /* Display or change the sstep_flags */
2440 p += 10;
2441 if (*p != '=') {
2442 /* Display current setting */
2443 snprintf(buf, sizeof(buf), "0x%x", sstep_flags);
2444 put_packet(s, buf);
2445 break;
2446 }
2447 p++;
2448 type = strtoul(p, (char **)&p, 16);
2449 sstep_flags = type;
2450 put_packet(s, "OK");
2451 break;
2452 } else if (strcmp(p,"C") == 0) {
2453 /* "Current thread" remains vague in the spec, so always return
2454 * the first CPU (gdb returns the first thread). */
2455 put_packet(s, "QC1");
2456 break;
2457 } else if (strcmp(p,"fThreadInfo") == 0) {
2458 s->query_cpu = first_cpu;
2459 goto report_cpuinfo;
2460 } else if (strcmp(p,"sThreadInfo") == 0) {
2461 report_cpuinfo:
2462 if (s->query_cpu) {
2463 snprintf(buf, sizeof(buf), "m%x", cpu_index(s->query_cpu));
2464 put_packet(s, buf);
2465 s->query_cpu = s->query_cpu->next_cpu;
2466 } else
2467 put_packet(s, "l");
2468 break;
2469 } else if (strncmp(p,"ThreadExtraInfo,", 16) == 0) {
2470 thread = strtoull(p+16, (char **)&p, 16);
2471 cpu = find_cpu(thread);
2472 if (cpu != NULL) {
2473 cpu_synchronize_state(cpu);
2474 len = snprintf((char *)mem_buf, sizeof(mem_buf),
2475 "CPU#%d [%s]", cpu->cpu_index,
2476 cpu->halted ? "halted " : "running");
2477 memtohex(buf, mem_buf, len);
2478 put_packet(s, buf);
2479 }
2480 break;
2481 }
2482 #ifdef CONFIG_USER_ONLY
2483 else if (strncmp(p, "Offsets", 7) == 0) {
2484 CPUArchState *env = s->c_cpu->env_ptr;
2485 TaskState *ts = env->opaque;
2486
2487 snprintf(buf, sizeof(buf),
2488 "Text=" TARGET_ABI_FMT_lx ";Data=" TARGET_ABI_FMT_lx
2489 ";Bss=" TARGET_ABI_FMT_lx,
2490 ts->info->code_offset,
2491 ts->info->data_offset,
2492 ts->info->data_offset);
2493 put_packet(s, buf);
2494 break;
2495 }
2496 #else /* !CONFIG_USER_ONLY */
2497 else if (strncmp(p, "Rcmd,", 5) == 0) {
2498 int len = strlen(p + 5);
2499
2500 if ((len % 2) != 0) {
2501 put_packet(s, "E01");
2502 break;
2503 }
2504 hextomem(mem_buf, p + 5, len);
2505 len = len / 2;
2506 mem_buf[len++] = 0;
2507 qemu_chr_be_write(s->mon_chr, mem_buf, len);
2508 put_packet(s, "OK");
2509 break;
2510 }
2511 #endif /* !CONFIG_USER_ONLY */
2512 if (strncmp(p, "Supported", 9) == 0) {
2513 snprintf(buf, sizeof(buf), "PacketSize=%x", MAX_PACKET_LENGTH);
2514 #ifdef GDB_CORE_XML
2515 pstrcat(buf, sizeof(buf), ";qXfer:features:read+");
2516 #endif
2517 put_packet(s, buf);
2518 break;
2519 }
2520 #ifdef GDB_CORE_XML
2521 if (strncmp(p, "Xfer:features:read:", 19) == 0) {
2522 const char *xml;
2523 target_ulong total_len;
2524
2525 gdb_has_xml = 1;
2526 p += 19;
2527 xml = get_feature_xml(p, &p);
2528 if (!xml) {
2529 snprintf(buf, sizeof(buf), "E00");
2530 put_packet(s, buf);
2531 break;
2532 }
2533
2534 if (*p == ':')
2535 p++;
2536 addr = strtoul(p, (char **)&p, 16);
2537 if (*p == ',')
2538 p++;
2539 len = strtoul(p, (char **)&p, 16);
2540
2541 total_len = strlen(xml);
2542 if (addr > total_len) {
2543 snprintf(buf, sizeof(buf), "E00");
2544 put_packet(s, buf);
2545 break;
2546 }
2547 if (len > (MAX_PACKET_LENGTH - 5) / 2)
2548 len = (MAX_PACKET_LENGTH - 5) / 2;
2549 if (len < total_len - addr) {
2550 buf[0] = 'm';
2551 len = memtox(buf + 1, xml + addr, len);
2552 } else {
2553 buf[0] = 'l';
2554 len = memtox(buf + 1, xml + addr, total_len - addr);
2555 }
2556 put_packet_binary(s, buf, len + 1);
2557 break;
2558 }
2559 #endif
2560 /* Unrecognised 'q' command. */
2561 goto unknown_command;
2562
2563 default:
2564 unknown_command:
2565 /* put empty packet */
2566 buf[0] = '\0';
2567 put_packet(s, buf);
2568 break;
2569 }
2570 return RS_IDLE;
2571 }
2572
2573 void gdb_set_stop_cpu(CPUState *cpu)
2574 {
2575 gdbserver_state->c_cpu = cpu;
2576 gdbserver_state->g_cpu = cpu;
2577 }
2578
2579 #ifndef CONFIG_USER_ONLY
2580 static void gdb_vm_state_change(void *opaque, int running, RunState state)
2581 {
2582 GDBState *s = gdbserver_state;
2583 CPUArchState *env = s->c_cpu->env_ptr;
2584 CPUState *cpu = s->c_cpu;
2585 char buf[256];
2586 const char *type;
2587 int ret;
2588
2589 if (running || s->state == RS_INACTIVE) {
2590 return;
2591 }
2592 /* Is there a GDB syscall waiting to be sent? */
2593 if (s->current_syscall_cb) {
2594 put_packet(s, s->syscall_buf);
2595 return;
2596 }
2597 switch (state) {
2598 case RUN_STATE_DEBUG:
2599 if (env->watchpoint_hit) {
2600 switch (env->watchpoint_hit->flags & BP_MEM_ACCESS) {
2601 case BP_MEM_READ:
2602 type = "r";
2603 break;
2604 case BP_MEM_ACCESS:
2605 type = "a";
2606 break;
2607 default:
2608 type = "";
2609 break;
2610 }
2611 snprintf(buf, sizeof(buf),
2612 "T%02xthread:%02x;%swatch:" TARGET_FMT_lx ";",
2613 GDB_SIGNAL_TRAP, cpu_index(cpu), type,
2614 env->watchpoint_hit->vaddr);
2615 env->watchpoint_hit = NULL;
2616 goto send_packet;
2617 }
2618 tb_flush(env);
2619 ret = GDB_SIGNAL_TRAP;
2620 break;
2621 case RUN_STATE_PAUSED:
2622 ret = GDB_SIGNAL_INT;
2623 break;
2624 case RUN_STATE_SHUTDOWN:
2625 ret = GDB_SIGNAL_QUIT;
2626 break;
2627 case RUN_STATE_IO_ERROR:
2628 ret = GDB_SIGNAL_IO;
2629 break;
2630 case RUN_STATE_WATCHDOG:
2631 ret = GDB_SIGNAL_ALRM;
2632 break;
2633 case RUN_STATE_INTERNAL_ERROR:
2634 ret = GDB_SIGNAL_ABRT;
2635 break;
2636 case RUN_STATE_SAVE_VM:
2637 case RUN_STATE_RESTORE_VM:
2638 return;
2639 case RUN_STATE_FINISH_MIGRATE:
2640 ret = GDB_SIGNAL_XCPU;
2641 break;
2642 default:
2643 ret = GDB_SIGNAL_UNKNOWN;
2644 break;
2645 }
2646 snprintf(buf, sizeof(buf), "T%02xthread:%02x;", ret, cpu_index(cpu));
2647
2648 send_packet:
2649 put_packet(s, buf);
2650
2651 /* disable single step if it was enabled */
2652 cpu_single_step(cpu, 0);
2653 }
2654 #endif
2655
2656 /* Send a gdb syscall request.
2657 This accepts limited printf-style format specifiers, specifically:
2658 %x - target_ulong argument printed in hex.
2659 %lx - 64-bit argument printed in hex.
2660 %s - string pointer (target_ulong) and length (int) pair. */
2661 void gdb_do_syscall(gdb_syscall_complete_cb cb, const char *fmt, ...)
2662 {
2663 va_list va;
2664 char *p;
2665 char *p_end;
2666 target_ulong addr;
2667 uint64_t i64;
2668 GDBState *s;
2669
2670 s = gdbserver_state;
2671 if (!s)
2672 return;
2673 s->current_syscall_cb = cb;
2674 #ifndef CONFIG_USER_ONLY
2675 vm_stop(RUN_STATE_DEBUG);
2676 #endif
2677 va_start(va, fmt);
2678 p = s->syscall_buf;
2679 p_end = &s->syscall_buf[sizeof(s->syscall_buf)];
2680 *(p++) = 'F';
2681 while (*fmt) {
2682 if (*fmt == '%') {
2683 fmt++;
2684 switch (*fmt++) {
2685 case 'x':
2686 addr = va_arg(va, target_ulong);
2687 p += snprintf(p, p_end - p, TARGET_FMT_lx, addr);
2688 break;
2689 case 'l':
2690 if (*(fmt++) != 'x')
2691 goto bad_format;
2692 i64 = va_arg(va, uint64_t);
2693 p += snprintf(p, p_end - p, "%" PRIx64, i64);
2694 break;
2695 case 's':
2696 addr = va_arg(va, target_ulong);
2697 p += snprintf(p, p_end - p, TARGET_FMT_lx "/%x",
2698 addr, va_arg(va, int));
2699 break;
2700 default:
2701 bad_format:
2702 fprintf(stderr, "gdbstub: Bad syscall format string '%s'\n",
2703 fmt - 1);
2704 break;
2705 }
2706 } else {
2707 *(p++) = *(fmt++);
2708 }
2709 }
2710 *p = 0;
2711 va_end(va);
2712 #ifdef CONFIG_USER_ONLY
2713 put_packet(s, s->syscall_buf);
2714 gdb_handlesig(s->c_cpu, 0);
2715 #else
2716 /* In this case wait to send the syscall packet until notification that
2717 the CPU has stopped. This must be done because if the packet is sent
2718 now the reply from the syscall request could be received while the CPU
2719 is still in the running state, which can cause packets to be dropped
2720 and state transition 'T' packets to be sent while the syscall is still
2721 being processed. */
2722 cpu_exit(s->c_cpu);
2723 #endif
2724 }
2725
2726 static void gdb_read_byte(GDBState *s, int ch)
2727 {
2728 int i, csum;
2729 uint8_t reply;
2730
2731 #ifndef CONFIG_USER_ONLY
2732 if (s->last_packet_len) {
2733 /* Waiting for a response to the last packet. If we see the start
2734 of a new command then abandon the previous response. */
2735 if (ch == '-') {
2736 #ifdef DEBUG_GDB
2737 printf("Got NACK, retransmitting\n");
2738 #endif
2739 put_buffer(s, (uint8_t *)s->last_packet, s->last_packet_len);
2740 }
2741 #ifdef DEBUG_GDB
2742 else if (ch == '+')
2743 printf("Got ACK\n");
2744 else
2745 printf("Got '%c' when expecting ACK/NACK\n", ch);
2746 #endif
2747 if (ch == '+' || ch == '$')
2748 s->last_packet_len = 0;
2749 if (ch != '$')
2750 return;
2751 }
2752 if (runstate_is_running()) {
2753 /* when the CPU is running, we cannot do anything except stop
2754 it when receiving a char */
2755 vm_stop(RUN_STATE_PAUSED);
2756 } else
2757 #endif
2758 {
2759 switch(s->state) {
2760 case RS_IDLE:
2761 if (ch == '$') {
2762 s->line_buf_index = 0;
2763 s->state = RS_GETLINE;
2764 }
2765 break;
2766 case RS_GETLINE:
2767 if (ch == '#') {
2768 s->state = RS_CHKSUM1;
2769 } else if (s->line_buf_index >= sizeof(s->line_buf) - 1) {
2770 s->state = RS_IDLE;
2771 } else {
2772 s->line_buf[s->line_buf_index++] = ch;
2773 }
2774 break;
2775 case RS_CHKSUM1:
2776 s->line_buf[s->line_buf_index] = '\0';
2777 s->line_csum = fromhex(ch) << 4;
2778 s->state = RS_CHKSUM2;
2779 break;
2780 case RS_CHKSUM2:
2781 s->line_csum |= fromhex(ch);
2782 csum = 0;
2783 for(i = 0; i < s->line_buf_index; i++) {
2784 csum += s->line_buf[i];
2785 }
2786 if (s->line_csum != (csum & 0xff)) {
2787 reply = '-';
2788 put_buffer(s, &reply, 1);
2789 s->state = RS_IDLE;
2790 } else {
2791 reply = '+';
2792 put_buffer(s, &reply, 1);
2793 s->state = gdb_handle_packet(s, s->line_buf);
2794 }
2795 break;
2796 default:
2797 abort();
2798 }
2799 }
2800 }
2801
2802 /* Tell the remote gdb that the process has exited. */
2803 void gdb_exit(CPUArchState *env, int code)
2804 {
2805 GDBState *s;
2806 char buf[4];
2807
2808 s = gdbserver_state;
2809 if (!s) {
2810 return;
2811 }
2812 #ifdef CONFIG_USER_ONLY
2813 if (gdbserver_fd < 0 || s->fd < 0) {
2814 return;
2815 }
2816 #endif
2817
2818 snprintf(buf, sizeof(buf), "W%02x", (uint8_t)code);
2819 put_packet(s, buf);
2820
2821 #ifndef CONFIG_USER_ONLY
2822 if (s->chr) {
2823 qemu_chr_delete(s->chr);
2824 }
2825 #endif
2826 }
2827
2828 #ifdef CONFIG_USER_ONLY
2829 int
2830 gdb_queuesig (void)
2831 {
2832 GDBState *s;
2833
2834 s = gdbserver_state;
2835
2836 if (gdbserver_fd < 0 || s->fd < 0)
2837 return 0;
2838 else
2839 return 1;
2840 }
2841
2842 int
2843 gdb_handlesig(CPUState *cpu, int sig)
2844 {
2845 CPUArchState *env = cpu->env_ptr;
2846 GDBState *s;
2847 char buf[256];
2848 int n;
2849
2850 s = gdbserver_state;
2851 if (gdbserver_fd < 0 || s->fd < 0) {
2852 return sig;
2853 }
2854
2855 /* disable single step if it was enabled */
2856 cpu_single_step(cpu, 0);
2857 tb_flush(env);
2858
2859 if (sig != 0) {
2860 snprintf(buf, sizeof(buf), "S%02x", target_signal_to_gdb(sig));
2861 put_packet(s, buf);
2862 }
2863 /* put_packet() might have detected that the peer terminated the
2864 connection. */
2865 if (s->fd < 0) {
2866 return sig;
2867 }
2868
2869 sig = 0;
2870 s->state = RS_IDLE;
2871 s->running_state = 0;
2872 while (s->running_state == 0) {
2873 n = read(s->fd, buf, 256);
2874 if (n > 0) {
2875 int i;
2876
2877 for (i = 0; i < n; i++) {
2878 gdb_read_byte(s, buf[i]);
2879 }
2880 } else if (n == 0 || errno != EAGAIN) {
2881 /* XXX: Connection closed. Should probably wait for another
2882 connection before continuing. */
2883 return sig;
2884 }
2885 }
2886 sig = s->signal;
2887 s->signal = 0;
2888 return sig;
2889 }
2890
2891 /* Tell the remote gdb that the process has exited due to SIG. */
2892 void gdb_signalled(CPUArchState *env, int sig)
2893 {
2894 GDBState *s;
2895 char buf[4];
2896
2897 s = gdbserver_state;
2898 if (gdbserver_fd < 0 || s->fd < 0) {
2899 return;
2900 }
2901
2902 snprintf(buf, sizeof(buf), "X%02x", target_signal_to_gdb(sig));
2903 put_packet(s, buf);
2904 }
2905
2906 static void gdb_accept(void)
2907 {
2908 GDBState *s;
2909 struct sockaddr_in sockaddr;
2910 socklen_t len;
2911 int fd;
2912
2913 for(;;) {
2914 len = sizeof(sockaddr);
2915 fd = accept(gdbserver_fd, (struct sockaddr *)&sockaddr, &len);
2916 if (fd < 0 && errno != EINTR) {
2917 perror("accept");
2918 return;
2919 } else if (fd >= 0) {
2920 #ifndef _WIN32
2921 fcntl(fd, F_SETFD, FD_CLOEXEC);
2922 #endif
2923 break;
2924 }
2925 }
2926
2927 /* set short latency */
2928 socket_set_nodelay(fd);
2929
2930 s = g_malloc0(sizeof(GDBState));
2931 s->c_cpu = first_cpu;
2932 s->g_cpu = first_cpu;
2933 s->fd = fd;
2934 gdb_has_xml = 0;
2935
2936 gdbserver_state = s;
2937
2938 fcntl(fd, F_SETFL, O_NONBLOCK);
2939 }
2940
2941 static int gdbserver_open(int port)
2942 {
2943 struct sockaddr_in sockaddr;
2944 int fd, val, ret;
2945
2946 fd = socket(PF_INET, SOCK_STREAM, 0);
2947 if (fd < 0) {
2948 perror("socket");
2949 return -1;
2950 }
2951 #ifndef _WIN32
2952 fcntl(fd, F_SETFD, FD_CLOEXEC);
2953 #endif
2954
2955 /* allow fast reuse */
2956 val = 1;
2957 qemu_setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &val, sizeof(val));
2958
2959 sockaddr.sin_family = AF_INET;
2960 sockaddr.sin_port = htons(port);
2961 sockaddr.sin_addr.s_addr = 0;
2962 ret = bind(fd, (struct sockaddr *)&sockaddr, sizeof(sockaddr));
2963 if (ret < 0) {
2964 perror("bind");
2965 close(fd);
2966 return -1;
2967 }
2968 ret = listen(fd, 0);
2969 if (ret < 0) {
2970 perror("listen");
2971 close(fd);
2972 return -1;
2973 }
2974 return fd;
2975 }
2976
2977 int gdbserver_start(int port)
2978 {
2979 gdbserver_fd = gdbserver_open(port);
2980 if (gdbserver_fd < 0)
2981 return -1;
2982 /* accept connections */
2983 gdb_accept();
2984 return 0;
2985 }
2986
2987 /* Disable gdb stub for child processes. */
2988 void gdbserver_fork(CPUArchState *env)
2989 {
2990 GDBState *s = gdbserver_state;
2991 if (gdbserver_fd < 0 || s->fd < 0)
2992 return;
2993 close(s->fd);
2994 s->fd = -1;
2995 cpu_breakpoint_remove_all(env, BP_GDB);
2996 cpu_watchpoint_remove_all(env, BP_GDB);
2997 }
2998 #else
2999 static int gdb_chr_can_receive(void *opaque)
3000 {
3001 /* We can handle an arbitrarily large amount of data.
3002 Pick the maximum packet size, which is as good as anything. */
3003 return MAX_PACKET_LENGTH;
3004 }
3005
3006 static void gdb_chr_receive(void *opaque, const uint8_t *buf, int size)
3007 {
3008 int i;
3009
3010 for (i = 0; i < size; i++) {
3011 gdb_read_byte(gdbserver_state, buf[i]);
3012 }
3013 }
3014
3015 static void gdb_chr_event(void *opaque, int event)
3016 {
3017 switch (event) {
3018 case CHR_EVENT_OPENED:
3019 vm_stop(RUN_STATE_PAUSED);
3020 gdb_has_xml = 0;
3021 break;
3022 default:
3023 break;
3024 }
3025 }
3026
3027 static void gdb_monitor_output(GDBState *s, const char *msg, int len)
3028 {
3029 char buf[MAX_PACKET_LENGTH];
3030
3031 buf[0] = 'O';
3032 if (len > (MAX_PACKET_LENGTH/2) - 1)
3033 len = (MAX_PACKET_LENGTH/2) - 1;
3034 memtohex(buf + 1, (uint8_t *)msg, len);
3035 put_packet(s, buf);
3036 }
3037
3038 static int gdb_monitor_write(CharDriverState *chr, const uint8_t *buf, int len)
3039 {
3040 const char *p = (const char *)buf;
3041 int max_sz;
3042
3043 max_sz = (sizeof(gdbserver_state->last_packet) - 2) / 2;
3044 for (;;) {
3045 if (len <= max_sz) {
3046 gdb_monitor_output(gdbserver_state, p, len);
3047 break;
3048 }
3049 gdb_monitor_output(gdbserver_state, p, max_sz);
3050 p += max_sz;
3051 len -= max_sz;
3052 }
3053 return len;
3054 }
3055
3056 #ifndef _WIN32
3057 static void gdb_sigterm_handler(int signal)
3058 {
3059 if (runstate_is_running()) {
3060 vm_stop(RUN_STATE_PAUSED);
3061 }
3062 }
3063 #endif
3064
3065 int gdbserver_start(const char *device)
3066 {
3067 GDBState *s;
3068 char gdbstub_device_name[128];
3069 CharDriverState *chr = NULL;
3070 CharDriverState *mon_chr;
3071
3072 if (!device)
3073 return -1;
3074 if (strcmp(device, "none") != 0) {
3075 if (strstart(device, "tcp:", NULL)) {
3076 /* enforce required TCP attributes */
3077 snprintf(gdbstub_device_name, sizeof(gdbstub_device_name),
3078 "%s,nowait,nodelay,server", device);
3079 device = gdbstub_device_name;
3080 }
3081 #ifndef _WIN32
3082 else if (strcmp(device, "stdio") == 0) {
3083 struct sigaction act;
3084
3085 memset(&act, 0, sizeof(act));
3086 act.sa_handler = gdb_sigterm_handler;
3087 sigaction(SIGINT, &act, NULL);
3088 }
3089 #endif
3090 chr = qemu_chr_new("gdb", device, NULL);
3091 if (!chr)
3092 return -1;
3093
3094 qemu_chr_fe_claim_no_fail(chr);
3095 qemu_chr_add_handlers(chr, gdb_chr_can_receive, gdb_chr_receive,
3096 gdb_chr_event, NULL);
3097 }
3098
3099 s = gdbserver_state;
3100 if (!s) {
3101 s = g_malloc0(sizeof(GDBState));
3102 gdbserver_state = s;
3103
3104 qemu_add_vm_change_state_handler(gdb_vm_state_change, NULL);
3105
3106 /* Initialize a monitor terminal for gdb */
3107 mon_chr = g_malloc0(sizeof(*mon_chr));
3108 mon_chr->chr_write = gdb_monitor_write;
3109 monitor_init(mon_chr, 0);
3110 } else {
3111 if (s->chr)
3112 qemu_chr_delete(s->chr);
3113 mon_chr = s->mon_chr;
3114 memset(s, 0, sizeof(GDBState));
3115 }
3116 s->c_cpu = first_cpu;
3117 s->g_cpu = first_cpu;
3118 s->chr = chr;
3119 s->state = chr ? RS_IDLE : RS_INACTIVE;
3120 s->mon_chr = mon_chr;
3121 s->current_syscall_cb = NULL;
3122
3123 return 0;
3124 }
3125 #endif