]> git.proxmox.com Git - mirror_qemu.git/blob - gdbstub.c
gdbstub: Drop dead code in cpu_gdb_{read,write}_register()
[mirror_qemu.git] / gdbstub.c
1 /*
2 * gdb server stub
3 *
4 * Copyright (c) 2003-2005 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #include "config.h"
20 #include "qemu-common.h"
21 #ifdef CONFIG_USER_ONLY
22 #include <stdlib.h>
23 #include <stdio.h>
24 #include <stdarg.h>
25 #include <string.h>
26 #include <errno.h>
27 #include <unistd.h>
28 #include <fcntl.h>
29
30 #include "qemu.h"
31 #else
32 #include "monitor/monitor.h"
33 #include "sysemu/char.h"
34 #include "sysemu/sysemu.h"
35 #include "exec/gdbstub.h"
36 #endif
37
38 #define MAX_PACKET_LENGTH 4096
39
40 #include "cpu.h"
41 #include "qemu/sockets.h"
42 #include "sysemu/kvm.h"
43 #include "qemu/bitops.h"
44
45 static inline int target_memory_rw_debug(CPUState *cpu, target_ulong addr,
46 uint8_t *buf, int len, bool is_write)
47 {
48 CPUClass *cc = CPU_GET_CLASS(cpu);
49
50 if (cc->memory_rw_debug) {
51 return cc->memory_rw_debug(cpu, addr, buf, len, is_write);
52 }
53 return cpu_memory_rw_debug(cpu, addr, buf, len, is_write);
54 }
55
56 enum {
57 GDB_SIGNAL_0 = 0,
58 GDB_SIGNAL_INT = 2,
59 GDB_SIGNAL_QUIT = 3,
60 GDB_SIGNAL_TRAP = 5,
61 GDB_SIGNAL_ABRT = 6,
62 GDB_SIGNAL_ALRM = 14,
63 GDB_SIGNAL_IO = 23,
64 GDB_SIGNAL_XCPU = 24,
65 GDB_SIGNAL_UNKNOWN = 143
66 };
67
68 #ifdef CONFIG_USER_ONLY
69
70 /* Map target signal numbers to GDB protocol signal numbers and vice
71 * versa. For user emulation's currently supported systems, we can
72 * assume most signals are defined.
73 */
74
75 static int gdb_signal_table[] = {
76 0,
77 TARGET_SIGHUP,
78 TARGET_SIGINT,
79 TARGET_SIGQUIT,
80 TARGET_SIGILL,
81 TARGET_SIGTRAP,
82 TARGET_SIGABRT,
83 -1, /* SIGEMT */
84 TARGET_SIGFPE,
85 TARGET_SIGKILL,
86 TARGET_SIGBUS,
87 TARGET_SIGSEGV,
88 TARGET_SIGSYS,
89 TARGET_SIGPIPE,
90 TARGET_SIGALRM,
91 TARGET_SIGTERM,
92 TARGET_SIGURG,
93 TARGET_SIGSTOP,
94 TARGET_SIGTSTP,
95 TARGET_SIGCONT,
96 TARGET_SIGCHLD,
97 TARGET_SIGTTIN,
98 TARGET_SIGTTOU,
99 TARGET_SIGIO,
100 TARGET_SIGXCPU,
101 TARGET_SIGXFSZ,
102 TARGET_SIGVTALRM,
103 TARGET_SIGPROF,
104 TARGET_SIGWINCH,
105 -1, /* SIGLOST */
106 TARGET_SIGUSR1,
107 TARGET_SIGUSR2,
108 #ifdef TARGET_SIGPWR
109 TARGET_SIGPWR,
110 #else
111 -1,
112 #endif
113 -1, /* SIGPOLL */
114 -1,
115 -1,
116 -1,
117 -1,
118 -1,
119 -1,
120 -1,
121 -1,
122 -1,
123 -1,
124 -1,
125 #ifdef __SIGRTMIN
126 __SIGRTMIN + 1,
127 __SIGRTMIN + 2,
128 __SIGRTMIN + 3,
129 __SIGRTMIN + 4,
130 __SIGRTMIN + 5,
131 __SIGRTMIN + 6,
132 __SIGRTMIN + 7,
133 __SIGRTMIN + 8,
134 __SIGRTMIN + 9,
135 __SIGRTMIN + 10,
136 __SIGRTMIN + 11,
137 __SIGRTMIN + 12,
138 __SIGRTMIN + 13,
139 __SIGRTMIN + 14,
140 __SIGRTMIN + 15,
141 __SIGRTMIN + 16,
142 __SIGRTMIN + 17,
143 __SIGRTMIN + 18,
144 __SIGRTMIN + 19,
145 __SIGRTMIN + 20,
146 __SIGRTMIN + 21,
147 __SIGRTMIN + 22,
148 __SIGRTMIN + 23,
149 __SIGRTMIN + 24,
150 __SIGRTMIN + 25,
151 __SIGRTMIN + 26,
152 __SIGRTMIN + 27,
153 __SIGRTMIN + 28,
154 __SIGRTMIN + 29,
155 __SIGRTMIN + 30,
156 __SIGRTMIN + 31,
157 -1, /* SIGCANCEL */
158 __SIGRTMIN,
159 __SIGRTMIN + 32,
160 __SIGRTMIN + 33,
161 __SIGRTMIN + 34,
162 __SIGRTMIN + 35,
163 __SIGRTMIN + 36,
164 __SIGRTMIN + 37,
165 __SIGRTMIN + 38,
166 __SIGRTMIN + 39,
167 __SIGRTMIN + 40,
168 __SIGRTMIN + 41,
169 __SIGRTMIN + 42,
170 __SIGRTMIN + 43,
171 __SIGRTMIN + 44,
172 __SIGRTMIN + 45,
173 __SIGRTMIN + 46,
174 __SIGRTMIN + 47,
175 __SIGRTMIN + 48,
176 __SIGRTMIN + 49,
177 __SIGRTMIN + 50,
178 __SIGRTMIN + 51,
179 __SIGRTMIN + 52,
180 __SIGRTMIN + 53,
181 __SIGRTMIN + 54,
182 __SIGRTMIN + 55,
183 __SIGRTMIN + 56,
184 __SIGRTMIN + 57,
185 __SIGRTMIN + 58,
186 __SIGRTMIN + 59,
187 __SIGRTMIN + 60,
188 __SIGRTMIN + 61,
189 __SIGRTMIN + 62,
190 __SIGRTMIN + 63,
191 __SIGRTMIN + 64,
192 __SIGRTMIN + 65,
193 __SIGRTMIN + 66,
194 __SIGRTMIN + 67,
195 __SIGRTMIN + 68,
196 __SIGRTMIN + 69,
197 __SIGRTMIN + 70,
198 __SIGRTMIN + 71,
199 __SIGRTMIN + 72,
200 __SIGRTMIN + 73,
201 __SIGRTMIN + 74,
202 __SIGRTMIN + 75,
203 __SIGRTMIN + 76,
204 __SIGRTMIN + 77,
205 __SIGRTMIN + 78,
206 __SIGRTMIN + 79,
207 __SIGRTMIN + 80,
208 __SIGRTMIN + 81,
209 __SIGRTMIN + 82,
210 __SIGRTMIN + 83,
211 __SIGRTMIN + 84,
212 __SIGRTMIN + 85,
213 __SIGRTMIN + 86,
214 __SIGRTMIN + 87,
215 __SIGRTMIN + 88,
216 __SIGRTMIN + 89,
217 __SIGRTMIN + 90,
218 __SIGRTMIN + 91,
219 __SIGRTMIN + 92,
220 __SIGRTMIN + 93,
221 __SIGRTMIN + 94,
222 __SIGRTMIN + 95,
223 -1, /* SIGINFO */
224 -1, /* UNKNOWN */
225 -1, /* DEFAULT */
226 -1,
227 -1,
228 -1,
229 -1,
230 -1,
231 -1
232 #endif
233 };
234 #else
235 /* In system mode we only need SIGINT and SIGTRAP; other signals
236 are not yet supported. */
237
238 enum {
239 TARGET_SIGINT = 2,
240 TARGET_SIGTRAP = 5
241 };
242
243 static int gdb_signal_table[] = {
244 -1,
245 -1,
246 TARGET_SIGINT,
247 -1,
248 -1,
249 TARGET_SIGTRAP
250 };
251 #endif
252
253 #ifdef CONFIG_USER_ONLY
254 static int target_signal_to_gdb (int sig)
255 {
256 int i;
257 for (i = 0; i < ARRAY_SIZE (gdb_signal_table); i++)
258 if (gdb_signal_table[i] == sig)
259 return i;
260 return GDB_SIGNAL_UNKNOWN;
261 }
262 #endif
263
264 static int gdb_signal_to_target (int sig)
265 {
266 if (sig < ARRAY_SIZE (gdb_signal_table))
267 return gdb_signal_table[sig];
268 else
269 return -1;
270 }
271
272 //#define DEBUG_GDB
273
274 typedef struct GDBRegisterState {
275 int base_reg;
276 int num_regs;
277 gdb_reg_cb get_reg;
278 gdb_reg_cb set_reg;
279 const char *xml;
280 struct GDBRegisterState *next;
281 } GDBRegisterState;
282
283 enum RSState {
284 RS_INACTIVE,
285 RS_IDLE,
286 RS_GETLINE,
287 RS_CHKSUM1,
288 RS_CHKSUM2,
289 };
290 typedef struct GDBState {
291 CPUState *c_cpu; /* current CPU for step/continue ops */
292 CPUState *g_cpu; /* current CPU for other ops */
293 CPUState *query_cpu; /* for q{f|s}ThreadInfo */
294 enum RSState state; /* parsing state */
295 char line_buf[MAX_PACKET_LENGTH];
296 int line_buf_index;
297 int line_csum;
298 uint8_t last_packet[MAX_PACKET_LENGTH + 4];
299 int last_packet_len;
300 int signal;
301 #ifdef CONFIG_USER_ONLY
302 int fd;
303 int running_state;
304 #else
305 CharDriverState *chr;
306 CharDriverState *mon_chr;
307 #endif
308 char syscall_buf[256];
309 gdb_syscall_complete_cb current_syscall_cb;
310 } GDBState;
311
312 /* By default use no IRQs and no timers while single stepping so as to
313 * make single stepping like an ICE HW step.
314 */
315 static int sstep_flags = SSTEP_ENABLE|SSTEP_NOIRQ|SSTEP_NOTIMER;
316
317 static GDBState *gdbserver_state;
318
319 /* This is an ugly hack to cope with both new and old gdb.
320 If gdb sends qXfer:features:read then assume we're talking to a newish
321 gdb that understands target descriptions. */
322 static int gdb_has_xml;
323
324 #ifdef CONFIG_USER_ONLY
325 /* XXX: This is not thread safe. Do we care? */
326 static int gdbserver_fd = -1;
327
328 static int get_char(GDBState *s)
329 {
330 uint8_t ch;
331 int ret;
332
333 for(;;) {
334 ret = qemu_recv(s->fd, &ch, 1, 0);
335 if (ret < 0) {
336 if (errno == ECONNRESET)
337 s->fd = -1;
338 if (errno != EINTR && errno != EAGAIN)
339 return -1;
340 } else if (ret == 0) {
341 close(s->fd);
342 s->fd = -1;
343 return -1;
344 } else {
345 break;
346 }
347 }
348 return ch;
349 }
350 #endif
351
352 static enum {
353 GDB_SYS_UNKNOWN,
354 GDB_SYS_ENABLED,
355 GDB_SYS_DISABLED,
356 } gdb_syscall_mode;
357
358 /* If gdb is connected when the first semihosting syscall occurs then use
359 remote gdb syscalls. Otherwise use native file IO. */
360 int use_gdb_syscalls(void)
361 {
362 if (gdb_syscall_mode == GDB_SYS_UNKNOWN) {
363 gdb_syscall_mode = (gdbserver_state ? GDB_SYS_ENABLED
364 : GDB_SYS_DISABLED);
365 }
366 return gdb_syscall_mode == GDB_SYS_ENABLED;
367 }
368
369 /* Resume execution. */
370 static inline void gdb_continue(GDBState *s)
371 {
372 #ifdef CONFIG_USER_ONLY
373 s->running_state = 1;
374 #else
375 if (runstate_check(RUN_STATE_GUEST_PANICKED)) {
376 runstate_set(RUN_STATE_DEBUG);
377 }
378 if (!runstate_needs_reset()) {
379 vm_start();
380 }
381 #endif
382 }
383
384 static void put_buffer(GDBState *s, const uint8_t *buf, int len)
385 {
386 #ifdef CONFIG_USER_ONLY
387 int ret;
388
389 while (len > 0) {
390 ret = send(s->fd, buf, len, 0);
391 if (ret < 0) {
392 if (errno != EINTR && errno != EAGAIN)
393 return;
394 } else {
395 buf += ret;
396 len -= ret;
397 }
398 }
399 #else
400 qemu_chr_fe_write(s->chr, buf, len);
401 #endif
402 }
403
404 static inline int fromhex(int v)
405 {
406 if (v >= '0' && v <= '9')
407 return v - '0';
408 else if (v >= 'A' && v <= 'F')
409 return v - 'A' + 10;
410 else if (v >= 'a' && v <= 'f')
411 return v - 'a' + 10;
412 else
413 return 0;
414 }
415
416 static inline int tohex(int v)
417 {
418 if (v < 10)
419 return v + '0';
420 else
421 return v - 10 + 'a';
422 }
423
424 static void memtohex(char *buf, const uint8_t *mem, int len)
425 {
426 int i, c;
427 char *q;
428 q = buf;
429 for(i = 0; i < len; i++) {
430 c = mem[i];
431 *q++ = tohex(c >> 4);
432 *q++ = tohex(c & 0xf);
433 }
434 *q = '\0';
435 }
436
437 static void hextomem(uint8_t *mem, const char *buf, int len)
438 {
439 int i;
440
441 for(i = 0; i < len; i++) {
442 mem[i] = (fromhex(buf[0]) << 4) | fromhex(buf[1]);
443 buf += 2;
444 }
445 }
446
447 /* return -1 if error, 0 if OK */
448 static int put_packet_binary(GDBState *s, const char *buf, int len)
449 {
450 int csum, i;
451 uint8_t *p;
452
453 for(;;) {
454 p = s->last_packet;
455 *(p++) = '$';
456 memcpy(p, buf, len);
457 p += len;
458 csum = 0;
459 for(i = 0; i < len; i++) {
460 csum += buf[i];
461 }
462 *(p++) = '#';
463 *(p++) = tohex((csum >> 4) & 0xf);
464 *(p++) = tohex((csum) & 0xf);
465
466 s->last_packet_len = p - s->last_packet;
467 put_buffer(s, (uint8_t *)s->last_packet, s->last_packet_len);
468
469 #ifdef CONFIG_USER_ONLY
470 i = get_char(s);
471 if (i < 0)
472 return -1;
473 if (i == '+')
474 break;
475 #else
476 break;
477 #endif
478 }
479 return 0;
480 }
481
482 /* return -1 if error, 0 if OK */
483 static int put_packet(GDBState *s, const char *buf)
484 {
485 #ifdef DEBUG_GDB
486 printf("reply='%s'\n", buf);
487 #endif
488
489 return put_packet_binary(s, buf, strlen(buf));
490 }
491
492 /* The GDB remote protocol transfers values in target byte order. This means
493 we can use the raw memory access routines to access the value buffer.
494 Conveniently, these also handle the case where the buffer is mis-aligned.
495 */
496 #define GET_REG8(val) do { \
497 stb_p(mem_buf, val); \
498 return 1; \
499 } while(0)
500 #define GET_REG16(val) do { \
501 stw_p(mem_buf, val); \
502 return 2; \
503 } while(0)
504 #define GET_REG32(val) do { \
505 stl_p(mem_buf, val); \
506 return 4; \
507 } while(0)
508 #define GET_REG64(val) do { \
509 stq_p(mem_buf, val); \
510 return 8; \
511 } while(0)
512
513 #if TARGET_LONG_BITS == 64
514 #define GET_REGL(val) GET_REG64(val)
515 #define ldtul_p(addr) ldq_p(addr)
516 #else
517 #define GET_REGL(val) GET_REG32(val)
518 #define ldtul_p(addr) ldl_p(addr)
519 #endif
520
521 #if defined(TARGET_I386)
522
523 #ifdef TARGET_X86_64
524 static const int gpr_map[16] = {
525 R_EAX, R_EBX, R_ECX, R_EDX, R_ESI, R_EDI, R_EBP, R_ESP,
526 8, 9, 10, 11, 12, 13, 14, 15
527 };
528 #else
529 #define gpr_map gpr_map32
530 #endif
531 static const int gpr_map32[8] = { 0, 1, 2, 3, 4, 5, 6, 7 };
532
533 #define NUM_CORE_REGS (CPU_NB_REGS * 2 + 25)
534
535 #define IDX_IP_REG CPU_NB_REGS
536 #define IDX_FLAGS_REG (IDX_IP_REG + 1)
537 #define IDX_SEG_REGS (IDX_FLAGS_REG + 1)
538 #define IDX_FP_REGS (IDX_SEG_REGS + 6)
539 #define IDX_XMM_REGS (IDX_FP_REGS + 16)
540 #define IDX_MXCSR_REG (IDX_XMM_REGS + CPU_NB_REGS)
541
542 static int cpu_gdb_read_register(CPUX86State *env, uint8_t *mem_buf, int n)
543 {
544 if (n < CPU_NB_REGS) {
545 if (TARGET_LONG_BITS == 64 && env->hflags & HF_CS64_MASK) {
546 GET_REG64(env->regs[gpr_map[n]]);
547 } else if (n < CPU_NB_REGS32) {
548 GET_REG32(env->regs[gpr_map32[n]]);
549 }
550 } else if (n >= IDX_FP_REGS && n < IDX_FP_REGS + 8) {
551 #ifdef USE_X86LDOUBLE
552 /* FIXME: byteswap float values - after fixing fpregs layout. */
553 memcpy(mem_buf, &env->fpregs[n - IDX_FP_REGS], 10);
554 #else
555 memset(mem_buf, 0, 10);
556 #endif
557 return 10;
558 } else if (n >= IDX_XMM_REGS && n < IDX_XMM_REGS + CPU_NB_REGS) {
559 n -= IDX_XMM_REGS;
560 if (n < CPU_NB_REGS32 ||
561 (TARGET_LONG_BITS == 64 && env->hflags & HF_CS64_MASK)) {
562 stq_p(mem_buf, env->xmm_regs[n].XMM_Q(0));
563 stq_p(mem_buf + 8, env->xmm_regs[n].XMM_Q(1));
564 return 16;
565 }
566 } else {
567 switch (n) {
568 case IDX_IP_REG:
569 if (TARGET_LONG_BITS == 64 && env->hflags & HF_CS64_MASK) {
570 GET_REG64(env->eip);
571 } else {
572 GET_REG32(env->eip);
573 }
574 case IDX_FLAGS_REG:
575 GET_REG32(env->eflags);
576
577 case IDX_SEG_REGS:
578 GET_REG32(env->segs[R_CS].selector);
579 case IDX_SEG_REGS + 1:
580 GET_REG32(env->segs[R_SS].selector);
581 case IDX_SEG_REGS + 2:
582 GET_REG32(env->segs[R_DS].selector);
583 case IDX_SEG_REGS + 3:
584 GET_REG32(env->segs[R_ES].selector);
585 case IDX_SEG_REGS + 4:
586 GET_REG32(env->segs[R_FS].selector);
587 case IDX_SEG_REGS + 5:
588 GET_REG32(env->segs[R_GS].selector);
589
590 case IDX_FP_REGS + 8:
591 GET_REG32(env->fpuc);
592 case IDX_FP_REGS + 9:
593 GET_REG32((env->fpus & ~0x3800) |
594 (env->fpstt & 0x7) << 11);
595 case IDX_FP_REGS + 10:
596 GET_REG32(0); /* ftag */
597 case IDX_FP_REGS + 11:
598 GET_REG32(0); /* fiseg */
599 case IDX_FP_REGS + 12:
600 GET_REG32(0); /* fioff */
601 case IDX_FP_REGS + 13:
602 GET_REG32(0); /* foseg */
603 case IDX_FP_REGS + 14:
604 GET_REG32(0); /* fooff */
605 case IDX_FP_REGS + 15:
606 GET_REG32(0); /* fop */
607
608 case IDX_MXCSR_REG:
609 GET_REG32(env->mxcsr);
610 }
611 }
612 return 0;
613 }
614
615 static int cpu_x86_gdb_load_seg(CPUX86State *env, int sreg, uint8_t *mem_buf)
616 {
617 uint16_t selector = ldl_p(mem_buf);
618
619 if (selector != env->segs[sreg].selector) {
620 #if defined(CONFIG_USER_ONLY)
621 cpu_x86_load_seg(env, sreg, selector);
622 #else
623 unsigned int limit, flags;
624 target_ulong base;
625
626 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
627 base = selector << 4;
628 limit = 0xffff;
629 flags = 0;
630 } else {
631 if (!cpu_x86_get_descr_debug(env, selector, &base, &limit,
632 &flags)) {
633 return 4;
634 }
635 }
636 cpu_x86_load_seg_cache(env, sreg, selector, base, limit, flags);
637 #endif
638 }
639 return 4;
640 }
641
642 static int cpu_gdb_write_register(CPUX86State *env, uint8_t *mem_buf, int n)
643 {
644 uint32_t tmp;
645
646 if (n < CPU_NB_REGS) {
647 if (TARGET_LONG_BITS == 64 && env->hflags & HF_CS64_MASK) {
648 env->regs[gpr_map[n]] = ldtul_p(mem_buf);
649 return sizeof(target_ulong);
650 } else if (n < CPU_NB_REGS32) {
651 n = gpr_map32[n];
652 env->regs[n] &= ~0xffffffffUL;
653 env->regs[n] |= (uint32_t)ldl_p(mem_buf);
654 return 4;
655 }
656 } else if (n >= IDX_FP_REGS && n < IDX_FP_REGS + 8) {
657 #ifdef USE_X86LDOUBLE
658 /* FIXME: byteswap float values - after fixing fpregs layout. */
659 memcpy(&env->fpregs[n - IDX_FP_REGS], mem_buf, 10);
660 #endif
661 return 10;
662 } else if (n >= IDX_XMM_REGS && n < IDX_XMM_REGS + CPU_NB_REGS) {
663 n -= IDX_XMM_REGS;
664 if (n < CPU_NB_REGS32 ||
665 (TARGET_LONG_BITS == 64 && env->hflags & HF_CS64_MASK)) {
666 env->xmm_regs[n].XMM_Q(0) = ldq_p(mem_buf);
667 env->xmm_regs[n].XMM_Q(1) = ldq_p(mem_buf + 8);
668 return 16;
669 }
670 } else {
671 switch (n) {
672 case IDX_IP_REG:
673 if (TARGET_LONG_BITS == 64 && env->hflags & HF_CS64_MASK) {
674 env->eip = ldq_p(mem_buf);
675 return 8;
676 } else {
677 env->eip &= ~0xffffffffUL;
678 env->eip |= (uint32_t)ldl_p(mem_buf);
679 return 4;
680 }
681 case IDX_FLAGS_REG:
682 env->eflags = ldl_p(mem_buf);
683 return 4;
684
685 case IDX_SEG_REGS:
686 return cpu_x86_gdb_load_seg(env, R_CS, mem_buf);
687 case IDX_SEG_REGS + 1:
688 return cpu_x86_gdb_load_seg(env, R_SS, mem_buf);
689 case IDX_SEG_REGS + 2:
690 return cpu_x86_gdb_load_seg(env, R_DS, mem_buf);
691 case IDX_SEG_REGS + 3:
692 return cpu_x86_gdb_load_seg(env, R_ES, mem_buf);
693 case IDX_SEG_REGS + 4:
694 return cpu_x86_gdb_load_seg(env, R_FS, mem_buf);
695 case IDX_SEG_REGS + 5:
696 return cpu_x86_gdb_load_seg(env, R_GS, mem_buf);
697
698 case IDX_FP_REGS + 8:
699 env->fpuc = ldl_p(mem_buf);
700 return 4;
701 case IDX_FP_REGS + 9:
702 tmp = ldl_p(mem_buf);
703 env->fpstt = (tmp >> 11) & 7;
704 env->fpus = tmp & ~0x3800;
705 return 4;
706 case IDX_FP_REGS + 10: /* ftag */
707 return 4;
708 case IDX_FP_REGS + 11: /* fiseg */
709 return 4;
710 case IDX_FP_REGS + 12: /* fioff */
711 return 4;
712 case IDX_FP_REGS + 13: /* foseg */
713 return 4;
714 case IDX_FP_REGS + 14: /* fooff */
715 return 4;
716 case IDX_FP_REGS + 15: /* fop */
717 return 4;
718
719 case IDX_MXCSR_REG:
720 env->mxcsr = ldl_p(mem_buf);
721 return 4;
722 }
723 }
724 /* Unrecognised register. */
725 return 0;
726 }
727
728 #elif defined (TARGET_PPC)
729
730 /* Old gdb always expects FP registers. Newer (xml-aware) gdb only
731 expects whatever the target description contains. Due to a
732 historical mishap the FP registers appear in between core integer
733 regs and PC, MSR, CR, and so forth. We hack round this by giving the
734 FP regs zero size when talking to a newer gdb. */
735 #define NUM_CORE_REGS 71
736 #if defined (TARGET_PPC64)
737 #define GDB_CORE_XML "power64-core.xml"
738 #else
739 #define GDB_CORE_XML "power-core.xml"
740 #endif
741
742 static int cpu_gdb_read_register(CPUPPCState *env, uint8_t *mem_buf, int n)
743 {
744 if (n < 32) {
745 /* gprs */
746 GET_REGL(env->gpr[n]);
747 } else if (n < 64) {
748 /* fprs */
749 if (gdb_has_xml) {
750 return 0;
751 }
752 stfq_p(mem_buf, env->fpr[n-32]);
753 return 8;
754 } else {
755 switch (n) {
756 case 64:
757 GET_REGL(env->nip);
758 case 65:
759 GET_REGL(env->msr);
760 case 66:
761 {
762 uint32_t cr = 0;
763 int i;
764 for (i = 0; i < 8; i++) {
765 cr |= env->crf[i] << (32 - ((i + 1) * 4));
766 }
767 GET_REG32(cr);
768 }
769 case 67:
770 GET_REGL(env->lr);
771 case 68:
772 GET_REGL(env->ctr);
773 case 69:
774 GET_REGL(env->xer);
775 case 70:
776 {
777 if (gdb_has_xml) {
778 return 0;
779 }
780 GET_REG32(env->fpscr);
781 }
782 }
783 }
784 return 0;
785 }
786
787 static int cpu_gdb_write_register(CPUPPCState *env, uint8_t *mem_buf, int n)
788 {
789 if (n < 32) {
790 /* gprs */
791 env->gpr[n] = ldtul_p(mem_buf);
792 return sizeof(target_ulong);
793 } else if (n < 64) {
794 /* fprs */
795 if (gdb_has_xml) {
796 return 0;
797 }
798 env->fpr[n-32] = ldfq_p(mem_buf);
799 return 8;
800 } else {
801 switch (n) {
802 case 64:
803 env->nip = ldtul_p(mem_buf);
804 return sizeof(target_ulong);
805 case 65:
806 ppc_store_msr(env, ldtul_p(mem_buf));
807 return sizeof(target_ulong);
808 case 66:
809 {
810 uint32_t cr = ldl_p(mem_buf);
811 int i;
812 for (i = 0; i < 8; i++) {
813 env->crf[i] = (cr >> (32 - ((i + 1) * 4))) & 0xF;
814 }
815 return 4;
816 }
817 case 67:
818 env->lr = ldtul_p(mem_buf);
819 return sizeof(target_ulong);
820 case 68:
821 env->ctr = ldtul_p(mem_buf);
822 return sizeof(target_ulong);
823 case 69:
824 env->xer = ldtul_p(mem_buf);
825 return sizeof(target_ulong);
826 case 70:
827 /* fpscr */
828 if (gdb_has_xml) {
829 return 0;
830 }
831 store_fpscr(env, ldtul_p(mem_buf), 0xffffffff);
832 return sizeof(target_ulong);
833 }
834 }
835 return 0;
836 }
837
838 #elif defined (TARGET_SPARC)
839
840 #if defined(TARGET_SPARC64) && !defined(TARGET_ABI32)
841 #define NUM_CORE_REGS 86
842 #else
843 #define NUM_CORE_REGS 72
844 #endif
845
846 #ifdef TARGET_ABI32
847 #define GET_REGA(val) GET_REG32(val)
848 #else
849 #define GET_REGA(val) GET_REGL(val)
850 #endif
851
852 static int cpu_gdb_read_register(CPUSPARCState *env, uint8_t *mem_buf, int n)
853 {
854 if (n < 8) {
855 /* g0..g7 */
856 GET_REGA(env->gregs[n]);
857 }
858 if (n < 32) {
859 /* register window */
860 GET_REGA(env->regwptr[n - 8]);
861 }
862 #if defined(TARGET_ABI32) || !defined(TARGET_SPARC64)
863 if (n < 64) {
864 /* fprs */
865 if (n & 1) {
866 GET_REG32(env->fpr[(n - 32) / 2].l.lower);
867 } else {
868 GET_REG32(env->fpr[(n - 32) / 2].l.upper);
869 }
870 }
871 /* Y, PSR, WIM, TBR, PC, NPC, FPSR, CPSR */
872 switch (n) {
873 case 64:
874 GET_REGA(env->y);
875 case 65:
876 GET_REGA(cpu_get_psr(env));
877 case 66:
878 GET_REGA(env->wim);
879 case 67:
880 GET_REGA(env->tbr);
881 case 68:
882 GET_REGA(env->pc);
883 case 69:
884 GET_REGA(env->npc);
885 case 70:
886 GET_REGA(env->fsr);
887 case 71:
888 GET_REGA(0); /* csr */
889 default:
890 GET_REGA(0);
891 }
892 #else
893 if (n < 64) {
894 /* f0-f31 */
895 if (n & 1) {
896 GET_REG32(env->fpr[(n - 32) / 2].l.lower);
897 } else {
898 GET_REG32(env->fpr[(n - 32) / 2].l.upper);
899 }
900 }
901 if (n < 80) {
902 /* f32-f62 (double width, even numbers only) */
903 GET_REG64(env->fpr[(n - 32) / 2].ll);
904 }
905 switch (n) {
906 case 80:
907 GET_REGL(env->pc);
908 case 81:
909 GET_REGL(env->npc);
910 case 82:
911 GET_REGL((cpu_get_ccr(env) << 32) |
912 ((env->asi & 0xff) << 24) |
913 ((env->pstate & 0xfff) << 8) |
914 cpu_get_cwp64(env));
915 case 83:
916 GET_REGL(env->fsr);
917 case 84:
918 GET_REGL(env->fprs);
919 case 85:
920 GET_REGL(env->y);
921 }
922 #endif
923 return 0;
924 }
925
926 static int cpu_gdb_write_register(CPUSPARCState *env, uint8_t *mem_buf, int n)
927 {
928 #if defined(TARGET_ABI32)
929 abi_ulong tmp;
930
931 tmp = ldl_p(mem_buf);
932 #else
933 target_ulong tmp;
934
935 tmp = ldtul_p(mem_buf);
936 #endif
937
938 if (n < 8) {
939 /* g0..g7 */
940 env->gregs[n] = tmp;
941 } else if (n < 32) {
942 /* register window */
943 env->regwptr[n - 8] = tmp;
944 }
945 #if defined(TARGET_ABI32) || !defined(TARGET_SPARC64)
946 else if (n < 64) {
947 /* fprs */
948 /* f0-f31 */
949 if (n & 1) {
950 env->fpr[(n - 32) / 2].l.lower = tmp;
951 } else {
952 env->fpr[(n - 32) / 2].l.upper = tmp;
953 }
954 } else {
955 /* Y, PSR, WIM, TBR, PC, NPC, FPSR, CPSR */
956 switch (n) {
957 case 64:
958 env->y = tmp;
959 break;
960 case 65:
961 cpu_put_psr(env, tmp);
962 break;
963 case 66:
964 env->wim = tmp;
965 break;
966 case 67:
967 env->tbr = tmp;
968 break;
969 case 68:
970 env->pc = tmp;
971 break;
972 case 69:
973 env->npc = tmp;
974 break;
975 case 70:
976 env->fsr = tmp;
977 break;
978 default:
979 return 0;
980 }
981 }
982 return 4;
983 #else
984 else if (n < 64) {
985 /* f0-f31 */
986 tmp = ldl_p(mem_buf);
987 if (n & 1) {
988 env->fpr[(n - 32) / 2].l.lower = tmp;
989 } else {
990 env->fpr[(n - 32) / 2].l.upper = tmp;
991 }
992 return 4;
993 } else if (n < 80) {
994 /* f32-f62 (double width, even numbers only) */
995 env->fpr[(n - 32) / 2].ll = tmp;
996 } else {
997 switch (n) {
998 case 80:
999 env->pc = tmp;
1000 break;
1001 case 81:
1002 env->npc = tmp;
1003 break;
1004 case 82:
1005 cpu_put_ccr(env, tmp >> 32);
1006 env->asi = (tmp >> 24) & 0xff;
1007 env->pstate = (tmp >> 8) & 0xfff;
1008 cpu_put_cwp64(env, tmp & 0xff);
1009 break;
1010 case 83:
1011 env->fsr = tmp;
1012 break;
1013 case 84:
1014 env->fprs = tmp;
1015 break;
1016 case 85:
1017 env->y = tmp;
1018 break;
1019 default:
1020 return 0;
1021 }
1022 }
1023 return 8;
1024 #endif
1025 }
1026 #elif defined (TARGET_ARM)
1027
1028 /* Old gdb always expect FPA registers. Newer (xml-aware) gdb only expect
1029 whatever the target description contains. Due to a historical mishap
1030 the FPA registers appear in between core integer regs and the CPSR.
1031 We hack round this by giving the FPA regs zero size when talking to a
1032 newer gdb. */
1033 #define NUM_CORE_REGS 26
1034 #define GDB_CORE_XML "arm-core.xml"
1035
1036 static int cpu_gdb_read_register(CPUARMState *env, uint8_t *mem_buf, int n)
1037 {
1038 if (n < 16) {
1039 /* Core integer register. */
1040 GET_REG32(env->regs[n]);
1041 }
1042 if (n < 24) {
1043 /* FPA registers. */
1044 if (gdb_has_xml) {
1045 return 0;
1046 }
1047 memset(mem_buf, 0, 12);
1048 return 12;
1049 }
1050 switch (n) {
1051 case 24:
1052 /* FPA status register. */
1053 if (gdb_has_xml) {
1054 return 0;
1055 }
1056 GET_REG32(0);
1057 case 25:
1058 /* CPSR */
1059 GET_REG32(cpsr_read(env));
1060 }
1061 /* Unknown register. */
1062 return 0;
1063 }
1064
1065 static int cpu_gdb_write_register(CPUARMState *env, uint8_t *mem_buf, int n)
1066 {
1067 uint32_t tmp;
1068
1069 tmp = ldl_p(mem_buf);
1070
1071 /* Mask out low bit of PC to workaround gdb bugs. This will probably
1072 cause problems if we ever implement the Jazelle DBX extensions. */
1073 if (n == 15) {
1074 tmp &= ~1;
1075 }
1076
1077 if (n < 16) {
1078 /* Core integer register. */
1079 env->regs[n] = tmp;
1080 return 4;
1081 }
1082 if (n < 24) { /* 16-23 */
1083 /* FPA registers (ignored). */
1084 if (gdb_has_xml) {
1085 return 0;
1086 }
1087 return 12;
1088 }
1089 switch (n) {
1090 case 24:
1091 /* FPA status register (ignored). */
1092 if (gdb_has_xml) {
1093 return 0;
1094 }
1095 return 4;
1096 case 25:
1097 /* CPSR */
1098 cpsr_write(env, tmp, 0xffffffff);
1099 return 4;
1100 }
1101 /* Unknown register. */
1102 return 0;
1103 }
1104
1105 #elif defined (TARGET_M68K)
1106
1107 #define NUM_CORE_REGS 18
1108
1109 #define GDB_CORE_XML "cf-core.xml"
1110
1111 static int cpu_gdb_read_register(CPUM68KState *env, uint8_t *mem_buf, int n)
1112 {
1113 if (n < 8) {
1114 /* D0-D7 */
1115 GET_REG32(env->dregs[n]);
1116 } else if (n < 16) {
1117 /* A0-A7 */
1118 GET_REG32(env->aregs[n - 8]);
1119 } else {
1120 switch (n) {
1121 case 16:
1122 GET_REG32(env->sr);
1123 case 17:
1124 GET_REG32(env->pc);
1125 }
1126 }
1127 /* FP registers not included here because they vary between
1128 ColdFire and m68k. Use XML bits for these. */
1129 return 0;
1130 }
1131
1132 static int cpu_gdb_write_register(CPUM68KState *env, uint8_t *mem_buf, int n)
1133 {
1134 uint32_t tmp;
1135
1136 tmp = ldl_p(mem_buf);
1137
1138 if (n < 8) {
1139 /* D0-D7 */
1140 env->dregs[n] = tmp;
1141 } else if (n < 16) {
1142 /* A0-A7 */
1143 env->aregs[n - 8] = tmp;
1144 } else {
1145 switch (n) {
1146 case 16:
1147 env->sr = tmp;
1148 break;
1149 case 17:
1150 env->pc = tmp;
1151 break;
1152 default:
1153 return 0;
1154 }
1155 }
1156 return 4;
1157 }
1158 #elif defined (TARGET_MIPS)
1159
1160 #define NUM_CORE_REGS 73
1161
1162 static int cpu_gdb_read_register(CPUMIPSState *env, uint8_t *mem_buf, int n)
1163 {
1164 if (n < 32) {
1165 GET_REGL(env->active_tc.gpr[n]);
1166 }
1167 if (env->CP0_Config1 & (1 << CP0C1_FP)) {
1168 if (n >= 38 && n < 70) {
1169 if (env->CP0_Status & (1 << CP0St_FR)) {
1170 GET_REGL(env->active_fpu.fpr[n - 38].d);
1171 } else {
1172 GET_REGL(env->active_fpu.fpr[n - 38].w[FP_ENDIAN_IDX]);
1173 }
1174 }
1175 switch (n) {
1176 case 70:
1177 GET_REGL((int32_t)env->active_fpu.fcr31);
1178 case 71:
1179 GET_REGL((int32_t)env->active_fpu.fcr0);
1180 }
1181 }
1182 switch (n) {
1183 case 32:
1184 GET_REGL((int32_t)env->CP0_Status);
1185 case 33:
1186 GET_REGL(env->active_tc.LO[0]);
1187 case 34:
1188 GET_REGL(env->active_tc.HI[0]);
1189 case 35:
1190 GET_REGL(env->CP0_BadVAddr);
1191 case 36:
1192 GET_REGL((int32_t)env->CP0_Cause);
1193 case 37:
1194 GET_REGL(env->active_tc.PC | !!(env->hflags & MIPS_HFLAG_M16));
1195 case 72:
1196 GET_REGL(0); /* fp */
1197 case 89:
1198 GET_REGL((int32_t)env->CP0_PRid);
1199 }
1200 if (n >= 73 && n <= 88) {
1201 /* 16 embedded regs. */
1202 GET_REGL(0);
1203 }
1204
1205 return 0;
1206 }
1207
1208 /* convert MIPS rounding mode in FCR31 to IEEE library */
1209 static unsigned int ieee_rm[] = {
1210 float_round_nearest_even,
1211 float_round_to_zero,
1212 float_round_up,
1213 float_round_down
1214 };
1215 #define RESTORE_ROUNDING_MODE \
1216 set_float_rounding_mode(ieee_rm[env->active_fpu.fcr31 & 3], \
1217 &env->active_fpu.fp_status)
1218
1219 static int cpu_gdb_write_register(CPUMIPSState *env, uint8_t *mem_buf, int n)
1220 {
1221 target_ulong tmp;
1222
1223 tmp = ldtul_p(mem_buf);
1224
1225 if (n < 32) {
1226 env->active_tc.gpr[n] = tmp;
1227 return sizeof(target_ulong);
1228 }
1229 if (env->CP0_Config1 & (1 << CP0C1_FP)
1230 && n >= 38 && n < 73) {
1231 if (n < 70) {
1232 if (env->CP0_Status & (1 << CP0St_FR)) {
1233 env->active_fpu.fpr[n - 38].d = tmp;
1234 } else {
1235 env->active_fpu.fpr[n - 38].w[FP_ENDIAN_IDX] = tmp;
1236 }
1237 }
1238 switch (n) {
1239 case 70:
1240 env->active_fpu.fcr31 = tmp & 0xFF83FFFF;
1241 /* set rounding mode */
1242 RESTORE_ROUNDING_MODE;
1243 break;
1244 case 71:
1245 env->active_fpu.fcr0 = tmp;
1246 break;
1247 }
1248 return sizeof(target_ulong);
1249 }
1250 switch (n) {
1251 case 32:
1252 env->CP0_Status = tmp;
1253 break;
1254 case 33:
1255 env->active_tc.LO[0] = tmp;
1256 break;
1257 case 34:
1258 env->active_tc.HI[0] = tmp;
1259 break;
1260 case 35:
1261 env->CP0_BadVAddr = tmp;
1262 break;
1263 case 36:
1264 env->CP0_Cause = tmp;
1265 break;
1266 case 37:
1267 env->active_tc.PC = tmp & ~(target_ulong)1;
1268 if (tmp & 1) {
1269 env->hflags |= MIPS_HFLAG_M16;
1270 } else {
1271 env->hflags &= ~(MIPS_HFLAG_M16);
1272 }
1273 break;
1274 case 72: /* fp, ignored */
1275 break;
1276 default:
1277 if (n > 89) {
1278 return 0;
1279 }
1280 /* Other registers are readonly. Ignore writes. */
1281 break;
1282 }
1283
1284 return sizeof(target_ulong);
1285 }
1286 #elif defined(TARGET_OPENRISC)
1287
1288 #define NUM_CORE_REGS (32 + 3)
1289
1290 static int cpu_gdb_read_register(CPUOpenRISCState *env, uint8_t *mem_buf, int n)
1291 {
1292 if (n < 32) {
1293 GET_REG32(env->gpr[n]);
1294 } else {
1295 switch (n) {
1296 case 32: /* PPC */
1297 GET_REG32(env->ppc);
1298
1299 case 33: /* NPC */
1300 GET_REG32(env->npc);
1301
1302 case 34: /* SR */
1303 GET_REG32(env->sr);
1304
1305 default:
1306 break;
1307 }
1308 }
1309 return 0;
1310 }
1311
1312 static int cpu_gdb_write_register(CPUOpenRISCState *env,
1313 uint8_t *mem_buf, int n)
1314 {
1315 uint32_t tmp;
1316
1317 if (n > NUM_CORE_REGS) {
1318 return 0;
1319 }
1320
1321 tmp = ldl_p(mem_buf);
1322
1323 if (n < 32) {
1324 env->gpr[n] = tmp;
1325 } else {
1326 switch (n) {
1327 case 32: /* PPC */
1328 env->ppc = tmp;
1329 break;
1330
1331 case 33: /* NPC */
1332 env->npc = tmp;
1333 break;
1334
1335 case 34: /* SR */
1336 env->sr = tmp;
1337 break;
1338
1339 default:
1340 break;
1341 }
1342 }
1343 return 4;
1344 }
1345 #elif defined (TARGET_SH4)
1346
1347 /* Hint: Use "set architecture sh4" in GDB to see fpu registers */
1348 /* FIXME: We should use XML for this. */
1349
1350 #define NUM_CORE_REGS 59
1351
1352 static int cpu_gdb_read_register(CPUSH4State *env, uint8_t *mem_buf, int n)
1353 {
1354 switch (n) {
1355 case 0 ... 7:
1356 if ((env->sr & (SR_MD | SR_RB)) == (SR_MD | SR_RB)) {
1357 GET_REGL(env->gregs[n + 16]);
1358 } else {
1359 GET_REGL(env->gregs[n]);
1360 }
1361 case 8 ... 15:
1362 GET_REGL(env->gregs[n]);
1363 case 16:
1364 GET_REGL(env->pc);
1365 case 17:
1366 GET_REGL(env->pr);
1367 case 18:
1368 GET_REGL(env->gbr);
1369 case 19:
1370 GET_REGL(env->vbr);
1371 case 20:
1372 GET_REGL(env->mach);
1373 case 21:
1374 GET_REGL(env->macl);
1375 case 22:
1376 GET_REGL(env->sr);
1377 case 23:
1378 GET_REGL(env->fpul);
1379 case 24:
1380 GET_REGL(env->fpscr);
1381 case 25 ... 40:
1382 if (env->fpscr & FPSCR_FR) {
1383 stfl_p(mem_buf, env->fregs[n - 9]);
1384 } else {
1385 stfl_p(mem_buf, env->fregs[n - 25]);
1386 }
1387 return 4;
1388 case 41:
1389 GET_REGL(env->ssr);
1390 case 42:
1391 GET_REGL(env->spc);
1392 case 43 ... 50:
1393 GET_REGL(env->gregs[n - 43]);
1394 case 51 ... 58:
1395 GET_REGL(env->gregs[n - (51 - 16)]);
1396 }
1397
1398 return 0;
1399 }
1400
1401 static int cpu_gdb_write_register(CPUSH4State *env, uint8_t *mem_buf, int n)
1402 {
1403 switch (n) {
1404 case 0 ... 7:
1405 if ((env->sr & (SR_MD | SR_RB)) == (SR_MD | SR_RB)) {
1406 env->gregs[n + 16] = ldl_p(mem_buf);
1407 } else {
1408 env->gregs[n] = ldl_p(mem_buf);
1409 }
1410 break;
1411 case 8 ... 15:
1412 env->gregs[n] = ldl_p(mem_buf);
1413 break;
1414 case 16:
1415 env->pc = ldl_p(mem_buf);
1416 break;
1417 case 17:
1418 env->pr = ldl_p(mem_buf);
1419 break;
1420 case 18:
1421 env->gbr = ldl_p(mem_buf);
1422 break;
1423 case 19:
1424 env->vbr = ldl_p(mem_buf);
1425 break;
1426 case 20:
1427 env->mach = ldl_p(mem_buf);
1428 break;
1429 case 21:
1430 env->macl = ldl_p(mem_buf);
1431 break;
1432 case 22:
1433 env->sr = ldl_p(mem_buf);
1434 break;
1435 case 23:
1436 env->fpul = ldl_p(mem_buf);
1437 break;
1438 case 24:
1439 env->fpscr = ldl_p(mem_buf);
1440 break;
1441 case 25 ... 40:
1442 if (env->fpscr & FPSCR_FR) {
1443 env->fregs[n - 9] = ldfl_p(mem_buf);
1444 } else {
1445 env->fregs[n - 25] = ldfl_p(mem_buf);
1446 }
1447 break;
1448 case 41:
1449 env->ssr = ldl_p(mem_buf);
1450 break;
1451 case 42:
1452 env->spc = ldl_p(mem_buf);
1453 break;
1454 case 43 ... 50:
1455 env->gregs[n - 43] = ldl_p(mem_buf);
1456 break;
1457 case 51 ... 58:
1458 env->gregs[n - (51 - 16)] = ldl_p(mem_buf);
1459 break;
1460 default:
1461 return 0;
1462 }
1463
1464 return 4;
1465 }
1466 #elif defined (TARGET_MICROBLAZE)
1467
1468 #define NUM_CORE_REGS (32 + 5)
1469
1470 static int cpu_gdb_read_register(CPUMBState *env, uint8_t *mem_buf, int n)
1471 {
1472 if (n < 32) {
1473 GET_REG32(env->regs[n]);
1474 } else {
1475 GET_REG32(env->sregs[n - 32]);
1476 }
1477 return 0;
1478 }
1479
1480 static int cpu_gdb_write_register(CPUMBState *env, uint8_t *mem_buf, int n)
1481 {
1482 uint32_t tmp;
1483
1484 if (n > NUM_CORE_REGS) {
1485 return 0;
1486 }
1487
1488 tmp = ldl_p(mem_buf);
1489
1490 if (n < 32) {
1491 env->regs[n] = tmp;
1492 } else {
1493 env->sregs[n - 32] = tmp;
1494 }
1495 return 4;
1496 }
1497 #elif defined (TARGET_CRIS)
1498
1499 #define NUM_CORE_REGS 49
1500
1501 static int
1502 read_register_crisv10(CPUCRISState *env, uint8_t *mem_buf, int n)
1503 {
1504 if (n < 15) {
1505 GET_REG32(env->regs[n]);
1506 }
1507
1508 if (n == 15) {
1509 GET_REG32(env->pc);
1510 }
1511
1512 if (n < 32) {
1513 switch (n) {
1514 case 16:
1515 GET_REG8(env->pregs[n - 16]);
1516 case 17:
1517 GET_REG8(env->pregs[n - 16]);
1518 case 20:
1519 case 21:
1520 GET_REG16(env->pregs[n - 16]);
1521 default:
1522 if (n >= 23) {
1523 GET_REG32(env->pregs[n - 16]);
1524 }
1525 break;
1526 }
1527 }
1528 return 0;
1529 }
1530
1531 static int cpu_gdb_read_register(CPUCRISState *env, uint8_t *mem_buf, int n)
1532 {
1533 uint8_t srs;
1534
1535 if (env->pregs[PR_VR] < 32) {
1536 return read_register_crisv10(env, mem_buf, n);
1537 }
1538
1539 srs = env->pregs[PR_SRS];
1540 if (n < 16) {
1541 GET_REG32(env->regs[n]);
1542 }
1543
1544 if (n >= 21 && n < 32) {
1545 GET_REG32(env->pregs[n - 16]);
1546 }
1547 if (n >= 33 && n < 49) {
1548 GET_REG32(env->sregs[srs][n - 33]);
1549 }
1550 switch (n) {
1551 case 16:
1552 GET_REG8(env->pregs[0]);
1553 case 17:
1554 GET_REG8(env->pregs[1]);
1555 case 18:
1556 GET_REG32(env->pregs[2]);
1557 case 19:
1558 GET_REG8(srs);
1559 case 20:
1560 GET_REG16(env->pregs[4]);
1561 case 32:
1562 GET_REG32(env->pc);
1563 }
1564
1565 return 0;
1566 }
1567
1568 static int cpu_gdb_write_register(CPUCRISState *env, uint8_t *mem_buf, int n)
1569 {
1570 uint32_t tmp;
1571
1572 if (n > 49) {
1573 return 0;
1574 }
1575
1576 tmp = ldl_p(mem_buf);
1577
1578 if (n < 16) {
1579 env->regs[n] = tmp;
1580 }
1581
1582 if (n >= 21 && n < 32) {
1583 env->pregs[n - 16] = tmp;
1584 }
1585
1586 /* FIXME: Should support function regs be writable? */
1587 switch (n) {
1588 case 16:
1589 return 1;
1590 case 17:
1591 return 1;
1592 case 18:
1593 env->pregs[PR_PID] = tmp;
1594 break;
1595 case 19:
1596 return 1;
1597 case 20:
1598 return 2;
1599 case 32:
1600 env->pc = tmp;
1601 break;
1602 }
1603
1604 return 4;
1605 }
1606 #elif defined (TARGET_ALPHA)
1607
1608 #define NUM_CORE_REGS 67
1609
1610 static int cpu_gdb_read_register(CPUAlphaState *env, uint8_t *mem_buf, int n)
1611 {
1612 uint64_t val;
1613 CPU_DoubleU d;
1614
1615 switch (n) {
1616 case 0 ... 30:
1617 val = env->ir[n];
1618 break;
1619 case 32 ... 62:
1620 d.d = env->fir[n - 32];
1621 val = d.ll;
1622 break;
1623 case 63:
1624 val = cpu_alpha_load_fpcr(env);
1625 break;
1626 case 64:
1627 val = env->pc;
1628 break;
1629 case 66:
1630 val = env->unique;
1631 break;
1632 case 31:
1633 case 65:
1634 /* 31 really is the zero register; 65 is unassigned in the
1635 gdb protocol, but is still required to occupy 8 bytes. */
1636 val = 0;
1637 break;
1638 default:
1639 return 0;
1640 }
1641 GET_REGL(val);
1642 }
1643
1644 static int cpu_gdb_write_register(CPUAlphaState *env, uint8_t *mem_buf, int n)
1645 {
1646 target_ulong tmp = ldtul_p(mem_buf);
1647 CPU_DoubleU d;
1648
1649 switch (n) {
1650 case 0 ... 30:
1651 env->ir[n] = tmp;
1652 break;
1653 case 32 ... 62:
1654 d.ll = tmp;
1655 env->fir[n - 32] = d.d;
1656 break;
1657 case 63:
1658 cpu_alpha_store_fpcr(env, tmp);
1659 break;
1660 case 64:
1661 env->pc = tmp;
1662 break;
1663 case 66:
1664 env->unique = tmp;
1665 break;
1666 case 31:
1667 case 65:
1668 /* 31 really is the zero register; 65 is unassigned in the
1669 gdb protocol, but is still required to occupy 8 bytes. */
1670 break;
1671 default:
1672 return 0;
1673 }
1674 return 8;
1675 }
1676 #elif defined (TARGET_S390X)
1677
1678 #define NUM_CORE_REGS S390_NUM_REGS
1679
1680 static int cpu_gdb_read_register(CPUS390XState *env, uint8_t *mem_buf, int n)
1681 {
1682 uint64_t val;
1683 int cc_op;
1684
1685 switch (n) {
1686 case S390_PSWM_REGNUM:
1687 cc_op = calc_cc(env, env->cc_op, env->cc_src, env->cc_dst, env->cc_vr);
1688 val = deposit64(env->psw.mask, 44, 2, cc_op);
1689 GET_REGL(val);
1690 case S390_PSWA_REGNUM:
1691 GET_REGL(env->psw.addr);
1692 case S390_R0_REGNUM ... S390_R15_REGNUM:
1693 GET_REGL(env->regs[n-S390_R0_REGNUM]);
1694 case S390_A0_REGNUM ... S390_A15_REGNUM:
1695 GET_REG32(env->aregs[n-S390_A0_REGNUM]);
1696 case S390_FPC_REGNUM:
1697 GET_REG32(env->fpc);
1698 case S390_F0_REGNUM ... S390_F15_REGNUM:
1699 GET_REG64(env->fregs[n-S390_F0_REGNUM].ll);
1700 }
1701
1702 return 0;
1703 }
1704
1705 static int cpu_gdb_write_register(CPUS390XState *env, uint8_t *mem_buf, int n)
1706 {
1707 target_ulong tmpl;
1708 uint32_t tmp32;
1709 int r = 8;
1710 tmpl = ldtul_p(mem_buf);
1711 tmp32 = ldl_p(mem_buf);
1712
1713 switch (n) {
1714 case S390_PSWM_REGNUM:
1715 env->psw.mask = tmpl;
1716 env->cc_op = extract64(tmpl, 44, 2);
1717 break;
1718 case S390_PSWA_REGNUM:
1719 env->psw.addr = tmpl;
1720 break;
1721 case S390_R0_REGNUM ... S390_R15_REGNUM:
1722 env->regs[n-S390_R0_REGNUM] = tmpl;
1723 break;
1724 case S390_A0_REGNUM ... S390_A15_REGNUM:
1725 env->aregs[n-S390_A0_REGNUM] = tmp32;
1726 r = 4;
1727 break;
1728 case S390_FPC_REGNUM:
1729 env->fpc = tmp32;
1730 r = 4;
1731 break;
1732 case S390_F0_REGNUM ... S390_F15_REGNUM:
1733 env->fregs[n-S390_F0_REGNUM].ll = tmpl;
1734 break;
1735 default:
1736 return 0;
1737 }
1738 return r;
1739 }
1740 #elif defined (TARGET_LM32)
1741
1742 #include "hw/lm32/lm32_pic.h"
1743 #define NUM_CORE_REGS (32 + 7)
1744
1745 static int cpu_gdb_read_register(CPULM32State *env, uint8_t *mem_buf, int n)
1746 {
1747 if (n < 32) {
1748 GET_REG32(env->regs[n]);
1749 } else {
1750 switch (n) {
1751 case 32:
1752 GET_REG32(env->pc);
1753 /* FIXME: put in right exception ID */
1754 case 33:
1755 GET_REG32(0);
1756 case 34:
1757 GET_REG32(env->eba);
1758 case 35:
1759 GET_REG32(env->deba);
1760 case 36:
1761 GET_REG32(env->ie);
1762 case 37:
1763 GET_REG32(lm32_pic_get_im(env->pic_state));
1764 case 38:
1765 GET_REG32(lm32_pic_get_ip(env->pic_state));
1766 }
1767 }
1768 return 0;
1769 }
1770
1771 static int cpu_gdb_write_register(CPULM32State *env, uint8_t *mem_buf, int n)
1772 {
1773 uint32_t tmp;
1774
1775 if (n > NUM_CORE_REGS) {
1776 return 0;
1777 }
1778
1779 tmp = ldl_p(mem_buf);
1780
1781 if (n < 32) {
1782 env->regs[n] = tmp;
1783 } else {
1784 switch (n) {
1785 case 32:
1786 env->pc = tmp;
1787 break;
1788 case 34:
1789 env->eba = tmp;
1790 break;
1791 case 35:
1792 env->deba = tmp;
1793 break;
1794 case 36:
1795 env->ie = tmp;
1796 break;
1797 case 37:
1798 lm32_pic_set_im(env->pic_state, tmp);
1799 break;
1800 case 38:
1801 lm32_pic_set_ip(env->pic_state, tmp);
1802 break;
1803 }
1804 }
1805 return 4;
1806 }
1807 #elif defined(TARGET_XTENSA)
1808
1809 /* Use num_core_regs to see only non-privileged registers in an unmodified gdb.
1810 * Use num_regs to see all registers. gdb modification is required for that:
1811 * reset bit 0 in the 'flags' field of the registers definitions in the
1812 * gdb/xtensa-config.c inside gdb source tree or inside gdb overlay.
1813 */
1814 #define NUM_CORE_REGS (env->config->gdb_regmap.num_regs)
1815 #define num_g_regs NUM_CORE_REGS
1816
1817 static int cpu_gdb_read_register(CPUXtensaState *env, uint8_t *mem_buf, int n)
1818 {
1819 const XtensaGdbReg *reg = env->config->gdb_regmap.reg + n;
1820
1821 if (n < 0 || n >= env->config->gdb_regmap.num_regs) {
1822 return 0;
1823 }
1824
1825 switch (reg->type) {
1826 case 9: /*pc*/
1827 GET_REG32(env->pc);
1828
1829 case 1: /*ar*/
1830 xtensa_sync_phys_from_window(env);
1831 GET_REG32(env->phys_regs[(reg->targno & 0xff) % env->config->nareg]);
1832
1833 case 2: /*SR*/
1834 GET_REG32(env->sregs[reg->targno & 0xff]);
1835
1836 case 3: /*UR*/
1837 GET_REG32(env->uregs[reg->targno & 0xff]);
1838
1839 case 4: /*f*/
1840 GET_REG32(float32_val(env->fregs[reg->targno & 0x0f]));
1841
1842 case 8: /*a*/
1843 GET_REG32(env->regs[reg->targno & 0x0f]);
1844
1845 default:
1846 qemu_log("%s from reg %d of unsupported type %d\n",
1847 __func__, n, reg->type);
1848 return 0;
1849 }
1850 }
1851
1852 static int cpu_gdb_write_register(CPUXtensaState *env, uint8_t *mem_buf, int n)
1853 {
1854 uint32_t tmp;
1855 const XtensaGdbReg *reg = env->config->gdb_regmap.reg + n;
1856
1857 if (n < 0 || n >= env->config->gdb_regmap.num_regs) {
1858 return 0;
1859 }
1860
1861 tmp = ldl_p(mem_buf);
1862
1863 switch (reg->type) {
1864 case 9: /*pc*/
1865 env->pc = tmp;
1866 break;
1867
1868 case 1: /*ar*/
1869 env->phys_regs[(reg->targno & 0xff) % env->config->nareg] = tmp;
1870 xtensa_sync_window_from_phys(env);
1871 break;
1872
1873 case 2: /*SR*/
1874 env->sregs[reg->targno & 0xff] = tmp;
1875 break;
1876
1877 case 3: /*UR*/
1878 env->uregs[reg->targno & 0xff] = tmp;
1879 break;
1880
1881 case 4: /*f*/
1882 env->fregs[reg->targno & 0x0f] = make_float32(tmp);
1883 break;
1884
1885 case 8: /*a*/
1886 env->regs[reg->targno & 0x0f] = tmp;
1887 break;
1888
1889 default:
1890 qemu_log("%s to reg %d of unsupported type %d\n",
1891 __func__, n, reg->type);
1892 return 0;
1893 }
1894
1895 return 4;
1896 }
1897 #else
1898
1899 #define NUM_CORE_REGS 0
1900
1901 static int cpu_gdb_read_register(CPUArchState *env, uint8_t *mem_buf, int n)
1902 {
1903 return 0;
1904 }
1905
1906 static int cpu_gdb_write_register(CPUArchState *env, uint8_t *mem_buf, int n)
1907 {
1908 return 0;
1909 }
1910
1911 #endif
1912
1913 #if !defined(TARGET_XTENSA)
1914 static int num_g_regs = NUM_CORE_REGS;
1915 #endif
1916
1917 #ifdef GDB_CORE_XML
1918 /* Encode data using the encoding for 'x' packets. */
1919 static int memtox(char *buf, const char *mem, int len)
1920 {
1921 char *p = buf;
1922 char c;
1923
1924 while (len--) {
1925 c = *(mem++);
1926 switch (c) {
1927 case '#': case '$': case '*': case '}':
1928 *(p++) = '}';
1929 *(p++) = c ^ 0x20;
1930 break;
1931 default:
1932 *(p++) = c;
1933 break;
1934 }
1935 }
1936 return p - buf;
1937 }
1938
1939 static const char *get_feature_xml(const char *p, const char **newp)
1940 {
1941 size_t len;
1942 int i;
1943 const char *name;
1944 static char target_xml[1024];
1945
1946 len = 0;
1947 while (p[len] && p[len] != ':')
1948 len++;
1949 *newp = p + len;
1950
1951 name = NULL;
1952 if (strncmp(p, "target.xml", len) == 0) {
1953 /* Generate the XML description for this CPU. */
1954 if (!target_xml[0]) {
1955 GDBRegisterState *r;
1956 CPUState *cpu = first_cpu;
1957
1958 snprintf(target_xml, sizeof(target_xml),
1959 "<?xml version=\"1.0\"?>"
1960 "<!DOCTYPE target SYSTEM \"gdb-target.dtd\">"
1961 "<target>"
1962 "<xi:include href=\"%s\"/>",
1963 GDB_CORE_XML);
1964
1965 for (r = cpu->gdb_regs; r; r = r->next) {
1966 pstrcat(target_xml, sizeof(target_xml), "<xi:include href=\"");
1967 pstrcat(target_xml, sizeof(target_xml), r->xml);
1968 pstrcat(target_xml, sizeof(target_xml), "\"/>");
1969 }
1970 pstrcat(target_xml, sizeof(target_xml), "</target>");
1971 }
1972 return target_xml;
1973 }
1974 for (i = 0; ; i++) {
1975 name = xml_builtin[i][0];
1976 if (!name || (strncmp(name, p, len) == 0 && strlen(name) == len))
1977 break;
1978 }
1979 return name ? xml_builtin[i][1] : NULL;
1980 }
1981 #endif
1982
1983 static int gdb_read_register(CPUState *cpu, uint8_t *mem_buf, int reg)
1984 {
1985 CPUArchState *env = cpu->env_ptr;
1986 GDBRegisterState *r;
1987
1988 if (reg < NUM_CORE_REGS)
1989 return cpu_gdb_read_register(env, mem_buf, reg);
1990
1991 for (r = cpu->gdb_regs; r; r = r->next) {
1992 if (r->base_reg <= reg && reg < r->base_reg + r->num_regs) {
1993 return r->get_reg(env, mem_buf, reg - r->base_reg);
1994 }
1995 }
1996 return 0;
1997 }
1998
1999 static int gdb_write_register(CPUState *cpu, uint8_t *mem_buf, int reg)
2000 {
2001 CPUArchState *env = cpu->env_ptr;
2002 GDBRegisterState *r;
2003
2004 if (reg < NUM_CORE_REGS)
2005 return cpu_gdb_write_register(env, mem_buf, reg);
2006
2007 for (r = cpu->gdb_regs; r; r = r->next) {
2008 if (r->base_reg <= reg && reg < r->base_reg + r->num_regs) {
2009 return r->set_reg(env, mem_buf, reg - r->base_reg);
2010 }
2011 }
2012 return 0;
2013 }
2014
2015 #if !defined(TARGET_XTENSA)
2016 /* Register a supplemental set of CPU registers. If g_pos is nonzero it
2017 specifies the first register number and these registers are included in
2018 a standard "g" packet. Direction is relative to gdb, i.e. get_reg is
2019 gdb reading a CPU register, and set_reg is gdb modifying a CPU register.
2020 */
2021
2022 void gdb_register_coprocessor(CPUState *cpu,
2023 gdb_reg_cb get_reg, gdb_reg_cb set_reg,
2024 int num_regs, const char *xml, int g_pos)
2025 {
2026 GDBRegisterState *s;
2027 GDBRegisterState **p;
2028 static int last_reg = NUM_CORE_REGS;
2029
2030 p = &cpu->gdb_regs;
2031 while (*p) {
2032 /* Check for duplicates. */
2033 if (strcmp((*p)->xml, xml) == 0)
2034 return;
2035 p = &(*p)->next;
2036 }
2037
2038 s = g_new0(GDBRegisterState, 1);
2039 s->base_reg = last_reg;
2040 s->num_regs = num_regs;
2041 s->get_reg = get_reg;
2042 s->set_reg = set_reg;
2043 s->xml = xml;
2044
2045 /* Add to end of list. */
2046 last_reg += num_regs;
2047 *p = s;
2048 if (g_pos) {
2049 if (g_pos != s->base_reg) {
2050 fprintf(stderr, "Error: Bad gdb register numbering for '%s'\n"
2051 "Expected %d got %d\n", xml, g_pos, s->base_reg);
2052 } else {
2053 num_g_regs = last_reg;
2054 }
2055 }
2056 }
2057 #endif
2058
2059 #ifndef CONFIG_USER_ONLY
2060 static const int xlat_gdb_type[] = {
2061 [GDB_WATCHPOINT_WRITE] = BP_GDB | BP_MEM_WRITE,
2062 [GDB_WATCHPOINT_READ] = BP_GDB | BP_MEM_READ,
2063 [GDB_WATCHPOINT_ACCESS] = BP_GDB | BP_MEM_ACCESS,
2064 };
2065 #endif
2066
2067 static int gdb_breakpoint_insert(target_ulong addr, target_ulong len, int type)
2068 {
2069 CPUState *cpu;
2070 CPUArchState *env;
2071 int err = 0;
2072
2073 if (kvm_enabled()) {
2074 return kvm_insert_breakpoint(gdbserver_state->c_cpu, addr, len, type);
2075 }
2076
2077 switch (type) {
2078 case GDB_BREAKPOINT_SW:
2079 case GDB_BREAKPOINT_HW:
2080 for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) {
2081 env = cpu->env_ptr;
2082 err = cpu_breakpoint_insert(env, addr, BP_GDB, NULL);
2083 if (err)
2084 break;
2085 }
2086 return err;
2087 #ifndef CONFIG_USER_ONLY
2088 case GDB_WATCHPOINT_WRITE:
2089 case GDB_WATCHPOINT_READ:
2090 case GDB_WATCHPOINT_ACCESS:
2091 for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) {
2092 env = cpu->env_ptr;
2093 err = cpu_watchpoint_insert(env, addr, len, xlat_gdb_type[type],
2094 NULL);
2095 if (err)
2096 break;
2097 }
2098 return err;
2099 #endif
2100 default:
2101 return -ENOSYS;
2102 }
2103 }
2104
2105 static int gdb_breakpoint_remove(target_ulong addr, target_ulong len, int type)
2106 {
2107 CPUState *cpu;
2108 CPUArchState *env;
2109 int err = 0;
2110
2111 if (kvm_enabled()) {
2112 return kvm_remove_breakpoint(gdbserver_state->c_cpu, addr, len, type);
2113 }
2114
2115 switch (type) {
2116 case GDB_BREAKPOINT_SW:
2117 case GDB_BREAKPOINT_HW:
2118 for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) {
2119 env = cpu->env_ptr;
2120 err = cpu_breakpoint_remove(env, addr, BP_GDB);
2121 if (err)
2122 break;
2123 }
2124 return err;
2125 #ifndef CONFIG_USER_ONLY
2126 case GDB_WATCHPOINT_WRITE:
2127 case GDB_WATCHPOINT_READ:
2128 case GDB_WATCHPOINT_ACCESS:
2129 for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) {
2130 env = cpu->env_ptr;
2131 err = cpu_watchpoint_remove(env, addr, len, xlat_gdb_type[type]);
2132 if (err)
2133 break;
2134 }
2135 return err;
2136 #endif
2137 default:
2138 return -ENOSYS;
2139 }
2140 }
2141
2142 static void gdb_breakpoint_remove_all(void)
2143 {
2144 CPUState *cpu;
2145 CPUArchState *env;
2146
2147 if (kvm_enabled()) {
2148 kvm_remove_all_breakpoints(gdbserver_state->c_cpu);
2149 return;
2150 }
2151
2152 for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) {
2153 env = cpu->env_ptr;
2154 cpu_breakpoint_remove_all(env, BP_GDB);
2155 #ifndef CONFIG_USER_ONLY
2156 cpu_watchpoint_remove_all(env, BP_GDB);
2157 #endif
2158 }
2159 }
2160
2161 static void gdb_set_cpu_pc(GDBState *s, target_ulong pc)
2162 {
2163 CPUState *cpu = s->c_cpu;
2164 CPUClass *cc = CPU_GET_CLASS(cpu);
2165
2166 cpu_synchronize_state(cpu);
2167 if (cc->set_pc) {
2168 cc->set_pc(cpu, pc);
2169 }
2170 }
2171
2172 static CPUState *find_cpu(uint32_t thread_id)
2173 {
2174 CPUState *cpu;
2175
2176 for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) {
2177 if (cpu_index(cpu) == thread_id) {
2178 return cpu;
2179 }
2180 }
2181
2182 return NULL;
2183 }
2184
2185 static int gdb_handle_packet(GDBState *s, const char *line_buf)
2186 {
2187 #ifdef TARGET_XTENSA
2188 CPUArchState *env;
2189 #endif
2190 CPUState *cpu;
2191 const char *p;
2192 uint32_t thread;
2193 int ch, reg_size, type, res;
2194 char buf[MAX_PACKET_LENGTH];
2195 uint8_t mem_buf[MAX_PACKET_LENGTH];
2196 uint8_t *registers;
2197 target_ulong addr, len;
2198
2199 #ifdef DEBUG_GDB
2200 printf("command='%s'\n", line_buf);
2201 #endif
2202 p = line_buf;
2203 ch = *p++;
2204 switch(ch) {
2205 case '?':
2206 /* TODO: Make this return the correct value for user-mode. */
2207 snprintf(buf, sizeof(buf), "T%02xthread:%02x;", GDB_SIGNAL_TRAP,
2208 cpu_index(s->c_cpu));
2209 put_packet(s, buf);
2210 /* Remove all the breakpoints when this query is issued,
2211 * because gdb is doing and initial connect and the state
2212 * should be cleaned up.
2213 */
2214 gdb_breakpoint_remove_all();
2215 break;
2216 case 'c':
2217 if (*p != '\0') {
2218 addr = strtoull(p, (char **)&p, 16);
2219 gdb_set_cpu_pc(s, addr);
2220 }
2221 s->signal = 0;
2222 gdb_continue(s);
2223 return RS_IDLE;
2224 case 'C':
2225 s->signal = gdb_signal_to_target (strtoul(p, (char **)&p, 16));
2226 if (s->signal == -1)
2227 s->signal = 0;
2228 gdb_continue(s);
2229 return RS_IDLE;
2230 case 'v':
2231 if (strncmp(p, "Cont", 4) == 0) {
2232 int res_signal, res_thread;
2233
2234 p += 4;
2235 if (*p == '?') {
2236 put_packet(s, "vCont;c;C;s;S");
2237 break;
2238 }
2239 res = 0;
2240 res_signal = 0;
2241 res_thread = 0;
2242 while (*p) {
2243 int action, signal;
2244
2245 if (*p++ != ';') {
2246 res = 0;
2247 break;
2248 }
2249 action = *p++;
2250 signal = 0;
2251 if (action == 'C' || action == 'S') {
2252 signal = strtoul(p, (char **)&p, 16);
2253 } else if (action != 'c' && action != 's') {
2254 res = 0;
2255 break;
2256 }
2257 thread = 0;
2258 if (*p == ':') {
2259 thread = strtoull(p+1, (char **)&p, 16);
2260 }
2261 action = tolower(action);
2262 if (res == 0 || (res == 'c' && action == 's')) {
2263 res = action;
2264 res_signal = signal;
2265 res_thread = thread;
2266 }
2267 }
2268 if (res) {
2269 if (res_thread != -1 && res_thread != 0) {
2270 cpu = find_cpu(res_thread);
2271 if (cpu == NULL) {
2272 put_packet(s, "E22");
2273 break;
2274 }
2275 s->c_cpu = cpu;
2276 }
2277 if (res == 's') {
2278 cpu_single_step(s->c_cpu, sstep_flags);
2279 }
2280 s->signal = res_signal;
2281 gdb_continue(s);
2282 return RS_IDLE;
2283 }
2284 break;
2285 } else {
2286 goto unknown_command;
2287 }
2288 case 'k':
2289 #ifdef CONFIG_USER_ONLY
2290 /* Kill the target */
2291 fprintf(stderr, "\nQEMU: Terminated via GDBstub\n");
2292 exit(0);
2293 #endif
2294 case 'D':
2295 /* Detach packet */
2296 gdb_breakpoint_remove_all();
2297 gdb_syscall_mode = GDB_SYS_DISABLED;
2298 gdb_continue(s);
2299 put_packet(s, "OK");
2300 break;
2301 case 's':
2302 if (*p != '\0') {
2303 addr = strtoull(p, (char **)&p, 16);
2304 gdb_set_cpu_pc(s, addr);
2305 }
2306 cpu_single_step(s->c_cpu, sstep_flags);
2307 gdb_continue(s);
2308 return RS_IDLE;
2309 case 'F':
2310 {
2311 target_ulong ret;
2312 target_ulong err;
2313
2314 ret = strtoull(p, (char **)&p, 16);
2315 if (*p == ',') {
2316 p++;
2317 err = strtoull(p, (char **)&p, 16);
2318 } else {
2319 err = 0;
2320 }
2321 if (*p == ',')
2322 p++;
2323 type = *p;
2324 if (s->current_syscall_cb) {
2325 s->current_syscall_cb(s->c_cpu, ret, err);
2326 s->current_syscall_cb = NULL;
2327 }
2328 if (type == 'C') {
2329 put_packet(s, "T02");
2330 } else {
2331 gdb_continue(s);
2332 }
2333 }
2334 break;
2335 case 'g':
2336 cpu_synchronize_state(s->g_cpu);
2337 #ifdef TARGET_XTENSA
2338 env = s->g_cpu->env_ptr;
2339 #endif
2340 len = 0;
2341 for (addr = 0; addr < num_g_regs; addr++) {
2342 reg_size = gdb_read_register(s->g_cpu, mem_buf + len, addr);
2343 len += reg_size;
2344 }
2345 memtohex(buf, mem_buf, len);
2346 put_packet(s, buf);
2347 break;
2348 case 'G':
2349 cpu_synchronize_state(s->g_cpu);
2350 #ifdef TARGET_XTENSA
2351 env = s->g_cpu->env_ptr;
2352 #endif
2353 registers = mem_buf;
2354 len = strlen(p) / 2;
2355 hextomem((uint8_t *)registers, p, len);
2356 for (addr = 0; addr < num_g_regs && len > 0; addr++) {
2357 reg_size = gdb_write_register(s->g_cpu, registers, addr);
2358 len -= reg_size;
2359 registers += reg_size;
2360 }
2361 put_packet(s, "OK");
2362 break;
2363 case 'm':
2364 addr = strtoull(p, (char **)&p, 16);
2365 if (*p == ',')
2366 p++;
2367 len = strtoull(p, NULL, 16);
2368 if (target_memory_rw_debug(s->g_cpu, addr, mem_buf, len, false) != 0) {
2369 put_packet (s, "E14");
2370 } else {
2371 memtohex(buf, mem_buf, len);
2372 put_packet(s, buf);
2373 }
2374 break;
2375 case 'M':
2376 addr = strtoull(p, (char **)&p, 16);
2377 if (*p == ',')
2378 p++;
2379 len = strtoull(p, (char **)&p, 16);
2380 if (*p == ':')
2381 p++;
2382 hextomem(mem_buf, p, len);
2383 if (target_memory_rw_debug(s->g_cpu, addr, mem_buf, len,
2384 true) != 0) {
2385 put_packet(s, "E14");
2386 } else {
2387 put_packet(s, "OK");
2388 }
2389 break;
2390 case 'p':
2391 /* Older gdb are really dumb, and don't use 'g' if 'p' is avaialable.
2392 This works, but can be very slow. Anything new enough to
2393 understand XML also knows how to use this properly. */
2394 if (!gdb_has_xml)
2395 goto unknown_command;
2396 addr = strtoull(p, (char **)&p, 16);
2397 reg_size = gdb_read_register(s->g_cpu, mem_buf, addr);
2398 if (reg_size) {
2399 memtohex(buf, mem_buf, reg_size);
2400 put_packet(s, buf);
2401 } else {
2402 put_packet(s, "E14");
2403 }
2404 break;
2405 case 'P':
2406 if (!gdb_has_xml)
2407 goto unknown_command;
2408 addr = strtoull(p, (char **)&p, 16);
2409 if (*p == '=')
2410 p++;
2411 reg_size = strlen(p) / 2;
2412 hextomem(mem_buf, p, reg_size);
2413 gdb_write_register(s->g_cpu, mem_buf, addr);
2414 put_packet(s, "OK");
2415 break;
2416 case 'Z':
2417 case 'z':
2418 type = strtoul(p, (char **)&p, 16);
2419 if (*p == ',')
2420 p++;
2421 addr = strtoull(p, (char **)&p, 16);
2422 if (*p == ',')
2423 p++;
2424 len = strtoull(p, (char **)&p, 16);
2425 if (ch == 'Z')
2426 res = gdb_breakpoint_insert(addr, len, type);
2427 else
2428 res = gdb_breakpoint_remove(addr, len, type);
2429 if (res >= 0)
2430 put_packet(s, "OK");
2431 else if (res == -ENOSYS)
2432 put_packet(s, "");
2433 else
2434 put_packet(s, "E22");
2435 break;
2436 case 'H':
2437 type = *p++;
2438 thread = strtoull(p, (char **)&p, 16);
2439 if (thread == -1 || thread == 0) {
2440 put_packet(s, "OK");
2441 break;
2442 }
2443 cpu = find_cpu(thread);
2444 if (cpu == NULL) {
2445 put_packet(s, "E22");
2446 break;
2447 }
2448 switch (type) {
2449 case 'c':
2450 s->c_cpu = cpu;
2451 put_packet(s, "OK");
2452 break;
2453 case 'g':
2454 s->g_cpu = cpu;
2455 put_packet(s, "OK");
2456 break;
2457 default:
2458 put_packet(s, "E22");
2459 break;
2460 }
2461 break;
2462 case 'T':
2463 thread = strtoull(p, (char **)&p, 16);
2464 cpu = find_cpu(thread);
2465
2466 if (cpu != NULL) {
2467 put_packet(s, "OK");
2468 } else {
2469 put_packet(s, "E22");
2470 }
2471 break;
2472 case 'q':
2473 case 'Q':
2474 /* parse any 'q' packets here */
2475 if (!strcmp(p,"qemu.sstepbits")) {
2476 /* Query Breakpoint bit definitions */
2477 snprintf(buf, sizeof(buf), "ENABLE=%x,NOIRQ=%x,NOTIMER=%x",
2478 SSTEP_ENABLE,
2479 SSTEP_NOIRQ,
2480 SSTEP_NOTIMER);
2481 put_packet(s, buf);
2482 break;
2483 } else if (strncmp(p,"qemu.sstep",10) == 0) {
2484 /* Display or change the sstep_flags */
2485 p += 10;
2486 if (*p != '=') {
2487 /* Display current setting */
2488 snprintf(buf, sizeof(buf), "0x%x", sstep_flags);
2489 put_packet(s, buf);
2490 break;
2491 }
2492 p++;
2493 type = strtoul(p, (char **)&p, 16);
2494 sstep_flags = type;
2495 put_packet(s, "OK");
2496 break;
2497 } else if (strcmp(p,"C") == 0) {
2498 /* "Current thread" remains vague in the spec, so always return
2499 * the first CPU (gdb returns the first thread). */
2500 put_packet(s, "QC1");
2501 break;
2502 } else if (strcmp(p,"fThreadInfo") == 0) {
2503 s->query_cpu = first_cpu;
2504 goto report_cpuinfo;
2505 } else if (strcmp(p,"sThreadInfo") == 0) {
2506 report_cpuinfo:
2507 if (s->query_cpu) {
2508 snprintf(buf, sizeof(buf), "m%x", cpu_index(s->query_cpu));
2509 put_packet(s, buf);
2510 s->query_cpu = s->query_cpu->next_cpu;
2511 } else
2512 put_packet(s, "l");
2513 break;
2514 } else if (strncmp(p,"ThreadExtraInfo,", 16) == 0) {
2515 thread = strtoull(p+16, (char **)&p, 16);
2516 cpu = find_cpu(thread);
2517 if (cpu != NULL) {
2518 cpu_synchronize_state(cpu);
2519 len = snprintf((char *)mem_buf, sizeof(mem_buf),
2520 "CPU#%d [%s]", cpu->cpu_index,
2521 cpu->halted ? "halted " : "running");
2522 memtohex(buf, mem_buf, len);
2523 put_packet(s, buf);
2524 }
2525 break;
2526 }
2527 #ifdef CONFIG_USER_ONLY
2528 else if (strncmp(p, "Offsets", 7) == 0) {
2529 CPUArchState *env = s->c_cpu->env_ptr;
2530 TaskState *ts = env->opaque;
2531
2532 snprintf(buf, sizeof(buf),
2533 "Text=" TARGET_ABI_FMT_lx ";Data=" TARGET_ABI_FMT_lx
2534 ";Bss=" TARGET_ABI_FMT_lx,
2535 ts->info->code_offset,
2536 ts->info->data_offset,
2537 ts->info->data_offset);
2538 put_packet(s, buf);
2539 break;
2540 }
2541 #else /* !CONFIG_USER_ONLY */
2542 else if (strncmp(p, "Rcmd,", 5) == 0) {
2543 int len = strlen(p + 5);
2544
2545 if ((len % 2) != 0) {
2546 put_packet(s, "E01");
2547 break;
2548 }
2549 hextomem(mem_buf, p + 5, len);
2550 len = len / 2;
2551 mem_buf[len++] = 0;
2552 qemu_chr_be_write(s->mon_chr, mem_buf, len);
2553 put_packet(s, "OK");
2554 break;
2555 }
2556 #endif /* !CONFIG_USER_ONLY */
2557 if (strncmp(p, "Supported", 9) == 0) {
2558 snprintf(buf, sizeof(buf), "PacketSize=%x", MAX_PACKET_LENGTH);
2559 #ifdef GDB_CORE_XML
2560 pstrcat(buf, sizeof(buf), ";qXfer:features:read+");
2561 #endif
2562 put_packet(s, buf);
2563 break;
2564 }
2565 #ifdef GDB_CORE_XML
2566 if (strncmp(p, "Xfer:features:read:", 19) == 0) {
2567 const char *xml;
2568 target_ulong total_len;
2569
2570 gdb_has_xml = 1;
2571 p += 19;
2572 xml = get_feature_xml(p, &p);
2573 if (!xml) {
2574 snprintf(buf, sizeof(buf), "E00");
2575 put_packet(s, buf);
2576 break;
2577 }
2578
2579 if (*p == ':')
2580 p++;
2581 addr = strtoul(p, (char **)&p, 16);
2582 if (*p == ',')
2583 p++;
2584 len = strtoul(p, (char **)&p, 16);
2585
2586 total_len = strlen(xml);
2587 if (addr > total_len) {
2588 snprintf(buf, sizeof(buf), "E00");
2589 put_packet(s, buf);
2590 break;
2591 }
2592 if (len > (MAX_PACKET_LENGTH - 5) / 2)
2593 len = (MAX_PACKET_LENGTH - 5) / 2;
2594 if (len < total_len - addr) {
2595 buf[0] = 'm';
2596 len = memtox(buf + 1, xml + addr, len);
2597 } else {
2598 buf[0] = 'l';
2599 len = memtox(buf + 1, xml + addr, total_len - addr);
2600 }
2601 put_packet_binary(s, buf, len + 1);
2602 break;
2603 }
2604 #endif
2605 /* Unrecognised 'q' command. */
2606 goto unknown_command;
2607
2608 default:
2609 unknown_command:
2610 /* put empty packet */
2611 buf[0] = '\0';
2612 put_packet(s, buf);
2613 break;
2614 }
2615 return RS_IDLE;
2616 }
2617
2618 void gdb_set_stop_cpu(CPUState *cpu)
2619 {
2620 gdbserver_state->c_cpu = cpu;
2621 gdbserver_state->g_cpu = cpu;
2622 }
2623
2624 #ifndef CONFIG_USER_ONLY
2625 static void gdb_vm_state_change(void *opaque, int running, RunState state)
2626 {
2627 GDBState *s = gdbserver_state;
2628 CPUArchState *env = s->c_cpu->env_ptr;
2629 CPUState *cpu = s->c_cpu;
2630 char buf[256];
2631 const char *type;
2632 int ret;
2633
2634 if (running || s->state == RS_INACTIVE) {
2635 return;
2636 }
2637 /* Is there a GDB syscall waiting to be sent? */
2638 if (s->current_syscall_cb) {
2639 put_packet(s, s->syscall_buf);
2640 return;
2641 }
2642 switch (state) {
2643 case RUN_STATE_DEBUG:
2644 if (env->watchpoint_hit) {
2645 switch (env->watchpoint_hit->flags & BP_MEM_ACCESS) {
2646 case BP_MEM_READ:
2647 type = "r";
2648 break;
2649 case BP_MEM_ACCESS:
2650 type = "a";
2651 break;
2652 default:
2653 type = "";
2654 break;
2655 }
2656 snprintf(buf, sizeof(buf),
2657 "T%02xthread:%02x;%swatch:" TARGET_FMT_lx ";",
2658 GDB_SIGNAL_TRAP, cpu_index(cpu), type,
2659 env->watchpoint_hit->vaddr);
2660 env->watchpoint_hit = NULL;
2661 goto send_packet;
2662 }
2663 tb_flush(env);
2664 ret = GDB_SIGNAL_TRAP;
2665 break;
2666 case RUN_STATE_PAUSED:
2667 ret = GDB_SIGNAL_INT;
2668 break;
2669 case RUN_STATE_SHUTDOWN:
2670 ret = GDB_SIGNAL_QUIT;
2671 break;
2672 case RUN_STATE_IO_ERROR:
2673 ret = GDB_SIGNAL_IO;
2674 break;
2675 case RUN_STATE_WATCHDOG:
2676 ret = GDB_SIGNAL_ALRM;
2677 break;
2678 case RUN_STATE_INTERNAL_ERROR:
2679 ret = GDB_SIGNAL_ABRT;
2680 break;
2681 case RUN_STATE_SAVE_VM:
2682 case RUN_STATE_RESTORE_VM:
2683 return;
2684 case RUN_STATE_FINISH_MIGRATE:
2685 ret = GDB_SIGNAL_XCPU;
2686 break;
2687 default:
2688 ret = GDB_SIGNAL_UNKNOWN;
2689 break;
2690 }
2691 snprintf(buf, sizeof(buf), "T%02xthread:%02x;", ret, cpu_index(cpu));
2692
2693 send_packet:
2694 put_packet(s, buf);
2695
2696 /* disable single step if it was enabled */
2697 cpu_single_step(cpu, 0);
2698 }
2699 #endif
2700
2701 /* Send a gdb syscall request.
2702 This accepts limited printf-style format specifiers, specifically:
2703 %x - target_ulong argument printed in hex.
2704 %lx - 64-bit argument printed in hex.
2705 %s - string pointer (target_ulong) and length (int) pair. */
2706 void gdb_do_syscall(gdb_syscall_complete_cb cb, const char *fmt, ...)
2707 {
2708 va_list va;
2709 char *p;
2710 char *p_end;
2711 target_ulong addr;
2712 uint64_t i64;
2713 GDBState *s;
2714
2715 s = gdbserver_state;
2716 if (!s)
2717 return;
2718 s->current_syscall_cb = cb;
2719 #ifndef CONFIG_USER_ONLY
2720 vm_stop(RUN_STATE_DEBUG);
2721 #endif
2722 va_start(va, fmt);
2723 p = s->syscall_buf;
2724 p_end = &s->syscall_buf[sizeof(s->syscall_buf)];
2725 *(p++) = 'F';
2726 while (*fmt) {
2727 if (*fmt == '%') {
2728 fmt++;
2729 switch (*fmt++) {
2730 case 'x':
2731 addr = va_arg(va, target_ulong);
2732 p += snprintf(p, p_end - p, TARGET_FMT_lx, addr);
2733 break;
2734 case 'l':
2735 if (*(fmt++) != 'x')
2736 goto bad_format;
2737 i64 = va_arg(va, uint64_t);
2738 p += snprintf(p, p_end - p, "%" PRIx64, i64);
2739 break;
2740 case 's':
2741 addr = va_arg(va, target_ulong);
2742 p += snprintf(p, p_end - p, TARGET_FMT_lx "/%x",
2743 addr, va_arg(va, int));
2744 break;
2745 default:
2746 bad_format:
2747 fprintf(stderr, "gdbstub: Bad syscall format string '%s'\n",
2748 fmt - 1);
2749 break;
2750 }
2751 } else {
2752 *(p++) = *(fmt++);
2753 }
2754 }
2755 *p = 0;
2756 va_end(va);
2757 #ifdef CONFIG_USER_ONLY
2758 put_packet(s, s->syscall_buf);
2759 gdb_handlesig(s->c_cpu, 0);
2760 #else
2761 /* In this case wait to send the syscall packet until notification that
2762 the CPU has stopped. This must be done because if the packet is sent
2763 now the reply from the syscall request could be received while the CPU
2764 is still in the running state, which can cause packets to be dropped
2765 and state transition 'T' packets to be sent while the syscall is still
2766 being processed. */
2767 cpu_exit(s->c_cpu);
2768 #endif
2769 }
2770
2771 static void gdb_read_byte(GDBState *s, int ch)
2772 {
2773 int i, csum;
2774 uint8_t reply;
2775
2776 #ifndef CONFIG_USER_ONLY
2777 if (s->last_packet_len) {
2778 /* Waiting for a response to the last packet. If we see the start
2779 of a new command then abandon the previous response. */
2780 if (ch == '-') {
2781 #ifdef DEBUG_GDB
2782 printf("Got NACK, retransmitting\n");
2783 #endif
2784 put_buffer(s, (uint8_t *)s->last_packet, s->last_packet_len);
2785 }
2786 #ifdef DEBUG_GDB
2787 else if (ch == '+')
2788 printf("Got ACK\n");
2789 else
2790 printf("Got '%c' when expecting ACK/NACK\n", ch);
2791 #endif
2792 if (ch == '+' || ch == '$')
2793 s->last_packet_len = 0;
2794 if (ch != '$')
2795 return;
2796 }
2797 if (runstate_is_running()) {
2798 /* when the CPU is running, we cannot do anything except stop
2799 it when receiving a char */
2800 vm_stop(RUN_STATE_PAUSED);
2801 } else
2802 #endif
2803 {
2804 switch(s->state) {
2805 case RS_IDLE:
2806 if (ch == '$') {
2807 s->line_buf_index = 0;
2808 s->state = RS_GETLINE;
2809 }
2810 break;
2811 case RS_GETLINE:
2812 if (ch == '#') {
2813 s->state = RS_CHKSUM1;
2814 } else if (s->line_buf_index >= sizeof(s->line_buf) - 1) {
2815 s->state = RS_IDLE;
2816 } else {
2817 s->line_buf[s->line_buf_index++] = ch;
2818 }
2819 break;
2820 case RS_CHKSUM1:
2821 s->line_buf[s->line_buf_index] = '\0';
2822 s->line_csum = fromhex(ch) << 4;
2823 s->state = RS_CHKSUM2;
2824 break;
2825 case RS_CHKSUM2:
2826 s->line_csum |= fromhex(ch);
2827 csum = 0;
2828 for(i = 0; i < s->line_buf_index; i++) {
2829 csum += s->line_buf[i];
2830 }
2831 if (s->line_csum != (csum & 0xff)) {
2832 reply = '-';
2833 put_buffer(s, &reply, 1);
2834 s->state = RS_IDLE;
2835 } else {
2836 reply = '+';
2837 put_buffer(s, &reply, 1);
2838 s->state = gdb_handle_packet(s, s->line_buf);
2839 }
2840 break;
2841 default:
2842 abort();
2843 }
2844 }
2845 }
2846
2847 /* Tell the remote gdb that the process has exited. */
2848 void gdb_exit(CPUArchState *env, int code)
2849 {
2850 GDBState *s;
2851 char buf[4];
2852
2853 s = gdbserver_state;
2854 if (!s) {
2855 return;
2856 }
2857 #ifdef CONFIG_USER_ONLY
2858 if (gdbserver_fd < 0 || s->fd < 0) {
2859 return;
2860 }
2861 #endif
2862
2863 snprintf(buf, sizeof(buf), "W%02x", (uint8_t)code);
2864 put_packet(s, buf);
2865
2866 #ifndef CONFIG_USER_ONLY
2867 if (s->chr) {
2868 qemu_chr_delete(s->chr);
2869 }
2870 #endif
2871 }
2872
2873 #ifdef CONFIG_USER_ONLY
2874 int
2875 gdb_queuesig (void)
2876 {
2877 GDBState *s;
2878
2879 s = gdbserver_state;
2880
2881 if (gdbserver_fd < 0 || s->fd < 0)
2882 return 0;
2883 else
2884 return 1;
2885 }
2886
2887 int
2888 gdb_handlesig(CPUState *cpu, int sig)
2889 {
2890 CPUArchState *env = cpu->env_ptr;
2891 GDBState *s;
2892 char buf[256];
2893 int n;
2894
2895 s = gdbserver_state;
2896 if (gdbserver_fd < 0 || s->fd < 0) {
2897 return sig;
2898 }
2899
2900 /* disable single step if it was enabled */
2901 cpu_single_step(cpu, 0);
2902 tb_flush(env);
2903
2904 if (sig != 0) {
2905 snprintf(buf, sizeof(buf), "S%02x", target_signal_to_gdb(sig));
2906 put_packet(s, buf);
2907 }
2908 /* put_packet() might have detected that the peer terminated the
2909 connection. */
2910 if (s->fd < 0) {
2911 return sig;
2912 }
2913
2914 sig = 0;
2915 s->state = RS_IDLE;
2916 s->running_state = 0;
2917 while (s->running_state == 0) {
2918 n = read(s->fd, buf, 256);
2919 if (n > 0) {
2920 int i;
2921
2922 for (i = 0; i < n; i++) {
2923 gdb_read_byte(s, buf[i]);
2924 }
2925 } else if (n == 0 || errno != EAGAIN) {
2926 /* XXX: Connection closed. Should probably wait for another
2927 connection before continuing. */
2928 return sig;
2929 }
2930 }
2931 sig = s->signal;
2932 s->signal = 0;
2933 return sig;
2934 }
2935
2936 /* Tell the remote gdb that the process has exited due to SIG. */
2937 void gdb_signalled(CPUArchState *env, int sig)
2938 {
2939 GDBState *s;
2940 char buf[4];
2941
2942 s = gdbserver_state;
2943 if (gdbserver_fd < 0 || s->fd < 0) {
2944 return;
2945 }
2946
2947 snprintf(buf, sizeof(buf), "X%02x", target_signal_to_gdb(sig));
2948 put_packet(s, buf);
2949 }
2950
2951 static void gdb_accept(void)
2952 {
2953 GDBState *s;
2954 struct sockaddr_in sockaddr;
2955 socklen_t len;
2956 int fd;
2957
2958 for(;;) {
2959 len = sizeof(sockaddr);
2960 fd = accept(gdbserver_fd, (struct sockaddr *)&sockaddr, &len);
2961 if (fd < 0 && errno != EINTR) {
2962 perror("accept");
2963 return;
2964 } else if (fd >= 0) {
2965 #ifndef _WIN32
2966 fcntl(fd, F_SETFD, FD_CLOEXEC);
2967 #endif
2968 break;
2969 }
2970 }
2971
2972 /* set short latency */
2973 socket_set_nodelay(fd);
2974
2975 s = g_malloc0(sizeof(GDBState));
2976 s->c_cpu = first_cpu;
2977 s->g_cpu = first_cpu;
2978 s->fd = fd;
2979 gdb_has_xml = 0;
2980
2981 gdbserver_state = s;
2982
2983 fcntl(fd, F_SETFL, O_NONBLOCK);
2984 }
2985
2986 static int gdbserver_open(int port)
2987 {
2988 struct sockaddr_in sockaddr;
2989 int fd, val, ret;
2990
2991 fd = socket(PF_INET, SOCK_STREAM, 0);
2992 if (fd < 0) {
2993 perror("socket");
2994 return -1;
2995 }
2996 #ifndef _WIN32
2997 fcntl(fd, F_SETFD, FD_CLOEXEC);
2998 #endif
2999
3000 /* allow fast reuse */
3001 val = 1;
3002 qemu_setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &val, sizeof(val));
3003
3004 sockaddr.sin_family = AF_INET;
3005 sockaddr.sin_port = htons(port);
3006 sockaddr.sin_addr.s_addr = 0;
3007 ret = bind(fd, (struct sockaddr *)&sockaddr, sizeof(sockaddr));
3008 if (ret < 0) {
3009 perror("bind");
3010 close(fd);
3011 return -1;
3012 }
3013 ret = listen(fd, 0);
3014 if (ret < 0) {
3015 perror("listen");
3016 close(fd);
3017 return -1;
3018 }
3019 return fd;
3020 }
3021
3022 int gdbserver_start(int port)
3023 {
3024 gdbserver_fd = gdbserver_open(port);
3025 if (gdbserver_fd < 0)
3026 return -1;
3027 /* accept connections */
3028 gdb_accept();
3029 return 0;
3030 }
3031
3032 /* Disable gdb stub for child processes. */
3033 void gdbserver_fork(CPUArchState *env)
3034 {
3035 GDBState *s = gdbserver_state;
3036 if (gdbserver_fd < 0 || s->fd < 0)
3037 return;
3038 close(s->fd);
3039 s->fd = -1;
3040 cpu_breakpoint_remove_all(env, BP_GDB);
3041 cpu_watchpoint_remove_all(env, BP_GDB);
3042 }
3043 #else
3044 static int gdb_chr_can_receive(void *opaque)
3045 {
3046 /* We can handle an arbitrarily large amount of data.
3047 Pick the maximum packet size, which is as good as anything. */
3048 return MAX_PACKET_LENGTH;
3049 }
3050
3051 static void gdb_chr_receive(void *opaque, const uint8_t *buf, int size)
3052 {
3053 int i;
3054
3055 for (i = 0; i < size; i++) {
3056 gdb_read_byte(gdbserver_state, buf[i]);
3057 }
3058 }
3059
3060 static void gdb_chr_event(void *opaque, int event)
3061 {
3062 switch (event) {
3063 case CHR_EVENT_OPENED:
3064 vm_stop(RUN_STATE_PAUSED);
3065 gdb_has_xml = 0;
3066 break;
3067 default:
3068 break;
3069 }
3070 }
3071
3072 static void gdb_monitor_output(GDBState *s, const char *msg, int len)
3073 {
3074 char buf[MAX_PACKET_LENGTH];
3075
3076 buf[0] = 'O';
3077 if (len > (MAX_PACKET_LENGTH/2) - 1)
3078 len = (MAX_PACKET_LENGTH/2) - 1;
3079 memtohex(buf + 1, (uint8_t *)msg, len);
3080 put_packet(s, buf);
3081 }
3082
3083 static int gdb_monitor_write(CharDriverState *chr, const uint8_t *buf, int len)
3084 {
3085 const char *p = (const char *)buf;
3086 int max_sz;
3087
3088 max_sz = (sizeof(gdbserver_state->last_packet) - 2) / 2;
3089 for (;;) {
3090 if (len <= max_sz) {
3091 gdb_monitor_output(gdbserver_state, p, len);
3092 break;
3093 }
3094 gdb_monitor_output(gdbserver_state, p, max_sz);
3095 p += max_sz;
3096 len -= max_sz;
3097 }
3098 return len;
3099 }
3100
3101 #ifndef _WIN32
3102 static void gdb_sigterm_handler(int signal)
3103 {
3104 if (runstate_is_running()) {
3105 vm_stop(RUN_STATE_PAUSED);
3106 }
3107 }
3108 #endif
3109
3110 int gdbserver_start(const char *device)
3111 {
3112 GDBState *s;
3113 char gdbstub_device_name[128];
3114 CharDriverState *chr = NULL;
3115 CharDriverState *mon_chr;
3116
3117 if (!device)
3118 return -1;
3119 if (strcmp(device, "none") != 0) {
3120 if (strstart(device, "tcp:", NULL)) {
3121 /* enforce required TCP attributes */
3122 snprintf(gdbstub_device_name, sizeof(gdbstub_device_name),
3123 "%s,nowait,nodelay,server", device);
3124 device = gdbstub_device_name;
3125 }
3126 #ifndef _WIN32
3127 else if (strcmp(device, "stdio") == 0) {
3128 struct sigaction act;
3129
3130 memset(&act, 0, sizeof(act));
3131 act.sa_handler = gdb_sigterm_handler;
3132 sigaction(SIGINT, &act, NULL);
3133 }
3134 #endif
3135 chr = qemu_chr_new("gdb", device, NULL);
3136 if (!chr)
3137 return -1;
3138
3139 qemu_chr_fe_claim_no_fail(chr);
3140 qemu_chr_add_handlers(chr, gdb_chr_can_receive, gdb_chr_receive,
3141 gdb_chr_event, NULL);
3142 }
3143
3144 s = gdbserver_state;
3145 if (!s) {
3146 s = g_malloc0(sizeof(GDBState));
3147 gdbserver_state = s;
3148
3149 qemu_add_vm_change_state_handler(gdb_vm_state_change, NULL);
3150
3151 /* Initialize a monitor terminal for gdb */
3152 mon_chr = g_malloc0(sizeof(*mon_chr));
3153 mon_chr->chr_write = gdb_monitor_write;
3154 monitor_init(mon_chr, 0);
3155 } else {
3156 if (s->chr)
3157 qemu_chr_delete(s->chr);
3158 mon_chr = s->mon_chr;
3159 memset(s, 0, sizeof(GDBState));
3160 }
3161 s->c_cpu = first_cpu;
3162 s->g_cpu = first_cpu;
3163 s->chr = chr;
3164 s->state = chr ? RS_IDLE : RS_INACTIVE;
3165 s->mon_chr = mon_chr;
3166 s->current_syscall_cb = NULL;
3167
3168 return 0;
3169 }
3170 #endif