]> git.proxmox.com Git - mirror_qemu.git/blob - linux-user/main.c
cpus-common: move CPU work item management to common code
[mirror_qemu.git] / linux-user / main.c
1 /*
2 * qemu user main
3 *
4 * Copyright (c) 2003-2008 Fabrice Bellard
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #include "qemu/osdep.h"
20 #include "qemu-version.h"
21 #include <sys/syscall.h>
22 #include <sys/resource.h>
23
24 #include "qapi/error.h"
25 #include "qemu.h"
26 #include "qemu/path.h"
27 #include "qemu/config-file.h"
28 #include "qemu/cutils.h"
29 #include "qemu/help_option.h"
30 #include "cpu.h"
31 #include "exec/exec-all.h"
32 #include "tcg.h"
33 #include "qemu/timer.h"
34 #include "qemu/envlist.h"
35 #include "elf.h"
36 #include "exec/log.h"
37 #include "trace/control.h"
38 #include "glib-compat.h"
39
40 char *exec_path;
41
42 int singlestep;
43 static const char *filename;
44 static const char *argv0;
45 static int gdbstub_port;
46 static envlist_t *envlist;
47 static const char *cpu_model;
48 unsigned long mmap_min_addr;
49 unsigned long guest_base;
50 int have_guest_base;
51
52 #define EXCP_DUMP(env, fmt, ...) \
53 do { \
54 CPUState *cs = ENV_GET_CPU(env); \
55 fprintf(stderr, fmt , ## __VA_ARGS__); \
56 cpu_dump_state(cs, stderr, fprintf, 0); \
57 if (qemu_log_separate()) { \
58 qemu_log(fmt, ## __VA_ARGS__); \
59 log_cpu_state(cs, 0); \
60 } \
61 } while (0)
62
63 #if (TARGET_LONG_BITS == 32) && (HOST_LONG_BITS == 64)
64 /*
65 * When running 32-on-64 we should make sure we can fit all of the possible
66 * guest address space into a contiguous chunk of virtual host memory.
67 *
68 * This way we will never overlap with our own libraries or binaries or stack
69 * or anything else that QEMU maps.
70 */
71 # ifdef TARGET_MIPS
72 /* MIPS only supports 31 bits of virtual address space for user space */
73 unsigned long reserved_va = 0x77000000;
74 # else
75 unsigned long reserved_va = 0xf7000000;
76 # endif
77 #else
78 unsigned long reserved_va;
79 #endif
80
81 static void usage(int exitcode);
82
83 static const char *interp_prefix = CONFIG_QEMU_INTERP_PREFIX;
84 const char *qemu_uname_release;
85
86 /* XXX: on x86 MAP_GROWSDOWN only works if ESP <= address + 32, so
87 we allocate a bigger stack. Need a better solution, for example
88 by remapping the process stack directly at the right place */
89 unsigned long guest_stack_size = 8 * 1024 * 1024UL;
90
91 void gemu_log(const char *fmt, ...)
92 {
93 va_list ap;
94
95 va_start(ap, fmt);
96 vfprintf(stderr, fmt, ap);
97 va_end(ap);
98 }
99
100 #if defined(TARGET_I386)
101 int cpu_get_pic_interrupt(CPUX86State *env)
102 {
103 return -1;
104 }
105 #endif
106
107 /***********************************************************/
108 /* Helper routines for implementing atomic operations. */
109
110 /* To implement exclusive operations we force all cpus to syncronise.
111 We don't require a full sync, only that no cpus are executing guest code.
112 The alternative is to map target atomic ops onto host equivalents,
113 which requires quite a lot of per host/target work. */
114 static QemuMutex exclusive_lock;
115 static QemuCond exclusive_cond;
116 static QemuCond exclusive_resume;
117 static int pending_cpus;
118
119 void qemu_init_cpu_loop(void)
120 {
121 qemu_mutex_init(&exclusive_lock);
122 qemu_cond_init(&exclusive_cond);
123 qemu_cond_init(&exclusive_resume);
124 }
125
126 /* Make sure everything is in a consistent state for calling fork(). */
127 void fork_start(void)
128 {
129 cpu_list_lock();
130 qemu_mutex_lock(&tcg_ctx.tb_ctx.tb_lock);
131 qemu_mutex_lock(&exclusive_lock);
132 mmap_fork_start();
133 }
134
135 void fork_end(int child)
136 {
137 mmap_fork_end(child);
138 if (child) {
139 CPUState *cpu, *next_cpu;
140 /* Child processes created by fork() only have a single thread.
141 Discard information about the parent threads. */
142 CPU_FOREACH_SAFE(cpu, next_cpu) {
143 if (cpu != thread_cpu) {
144 QTAILQ_REMOVE(&cpus, cpu, node);
145 }
146 }
147 pending_cpus = 0;
148 qemu_mutex_init(&exclusive_lock);
149 qemu_cond_init(&exclusive_cond);
150 qemu_cond_init(&exclusive_resume);
151 qemu_mutex_init(&tcg_ctx.tb_ctx.tb_lock);
152 qemu_init_cpu_list();
153 gdbserver_fork(thread_cpu);
154 } else {
155 qemu_mutex_unlock(&exclusive_lock);
156 qemu_mutex_unlock(&tcg_ctx.tb_ctx.tb_lock);
157 cpu_list_unlock();
158 }
159 }
160
161 /* Wait for pending exclusive operations to complete. The exclusive lock
162 must be held. */
163 static inline void exclusive_idle(void)
164 {
165 while (pending_cpus) {
166 qemu_cond_wait(&exclusive_resume, &exclusive_lock);
167 }
168 }
169
170 /* Start an exclusive operation.
171 Must only be called from outside cpu_exec. */
172 static inline void start_exclusive(void)
173 {
174 CPUState *other_cpu;
175
176 qemu_mutex_lock(&exclusive_lock);
177 exclusive_idle();
178
179 pending_cpus = 1;
180 /* Make all other cpus stop executing. */
181 CPU_FOREACH(other_cpu) {
182 if (other_cpu->running) {
183 pending_cpus++;
184 cpu_exit(other_cpu);
185 }
186 }
187 while (pending_cpus > 1) {
188 qemu_cond_wait(&exclusive_cond, &exclusive_lock);
189 }
190 }
191
192 /* Finish an exclusive operation. */
193 static inline void __attribute__((unused)) end_exclusive(void)
194 {
195 pending_cpus = 0;
196 qemu_cond_broadcast(&exclusive_resume);
197 qemu_mutex_unlock(&exclusive_lock);
198 }
199
200 /* Wait for exclusive ops to finish, and begin cpu execution. */
201 static inline void cpu_exec_start(CPUState *cpu)
202 {
203 qemu_mutex_lock(&exclusive_lock);
204 exclusive_idle();
205 cpu->running = true;
206 qemu_mutex_unlock(&exclusive_lock);
207 }
208
209 /* Mark cpu as not executing, and release pending exclusive ops. */
210 static inline void cpu_exec_end(CPUState *cpu)
211 {
212 qemu_mutex_lock(&exclusive_lock);
213 cpu->running = false;
214 if (pending_cpus > 1) {
215 pending_cpus--;
216 if (pending_cpus == 1) {
217 qemu_cond_signal(&exclusive_cond);
218 }
219 }
220 exclusive_idle();
221 qemu_mutex_unlock(&exclusive_lock);
222 }
223
224
225 #ifdef TARGET_I386
226 /***********************************************************/
227 /* CPUX86 core interface */
228
229 uint64_t cpu_get_tsc(CPUX86State *env)
230 {
231 return cpu_get_host_ticks();
232 }
233
234 static void write_dt(void *ptr, unsigned long addr, unsigned long limit,
235 int flags)
236 {
237 unsigned int e1, e2;
238 uint32_t *p;
239 e1 = (addr << 16) | (limit & 0xffff);
240 e2 = ((addr >> 16) & 0xff) | (addr & 0xff000000) | (limit & 0x000f0000);
241 e2 |= flags;
242 p = ptr;
243 p[0] = tswap32(e1);
244 p[1] = tswap32(e2);
245 }
246
247 static uint64_t *idt_table;
248 #ifdef TARGET_X86_64
249 static void set_gate64(void *ptr, unsigned int type, unsigned int dpl,
250 uint64_t addr, unsigned int sel)
251 {
252 uint32_t *p, e1, e2;
253 e1 = (addr & 0xffff) | (sel << 16);
254 e2 = (addr & 0xffff0000) | 0x8000 | (dpl << 13) | (type << 8);
255 p = ptr;
256 p[0] = tswap32(e1);
257 p[1] = tswap32(e2);
258 p[2] = tswap32(addr >> 32);
259 p[3] = 0;
260 }
261 /* only dpl matters as we do only user space emulation */
262 static void set_idt(int n, unsigned int dpl)
263 {
264 set_gate64(idt_table + n * 2, 0, dpl, 0, 0);
265 }
266 #else
267 static void set_gate(void *ptr, unsigned int type, unsigned int dpl,
268 uint32_t addr, unsigned int sel)
269 {
270 uint32_t *p, e1, e2;
271 e1 = (addr & 0xffff) | (sel << 16);
272 e2 = (addr & 0xffff0000) | 0x8000 | (dpl << 13) | (type << 8);
273 p = ptr;
274 p[0] = tswap32(e1);
275 p[1] = tswap32(e2);
276 }
277
278 /* only dpl matters as we do only user space emulation */
279 static void set_idt(int n, unsigned int dpl)
280 {
281 set_gate(idt_table + n, 0, dpl, 0, 0);
282 }
283 #endif
284
285 void cpu_loop(CPUX86State *env)
286 {
287 CPUState *cs = CPU(x86_env_get_cpu(env));
288 int trapnr;
289 abi_ulong pc;
290 abi_ulong ret;
291 target_siginfo_t info;
292
293 for(;;) {
294 cpu_exec_start(cs);
295 trapnr = cpu_exec(cs);
296 cpu_exec_end(cs);
297 process_queued_cpu_work(cs);
298
299 switch(trapnr) {
300 case 0x80:
301 /* linux syscall from int $0x80 */
302 ret = do_syscall(env,
303 env->regs[R_EAX],
304 env->regs[R_EBX],
305 env->regs[R_ECX],
306 env->regs[R_EDX],
307 env->regs[R_ESI],
308 env->regs[R_EDI],
309 env->regs[R_EBP],
310 0, 0);
311 if (ret == -TARGET_ERESTARTSYS) {
312 env->eip -= 2;
313 } else if (ret != -TARGET_QEMU_ESIGRETURN) {
314 env->regs[R_EAX] = ret;
315 }
316 break;
317 #ifndef TARGET_ABI32
318 case EXCP_SYSCALL:
319 /* linux syscall from syscall instruction */
320 ret = do_syscall(env,
321 env->regs[R_EAX],
322 env->regs[R_EDI],
323 env->regs[R_ESI],
324 env->regs[R_EDX],
325 env->regs[10],
326 env->regs[8],
327 env->regs[9],
328 0, 0);
329 if (ret == -TARGET_ERESTARTSYS) {
330 env->eip -= 2;
331 } else if (ret != -TARGET_QEMU_ESIGRETURN) {
332 env->regs[R_EAX] = ret;
333 }
334 break;
335 #endif
336 case EXCP0B_NOSEG:
337 case EXCP0C_STACK:
338 info.si_signo = TARGET_SIGBUS;
339 info.si_errno = 0;
340 info.si_code = TARGET_SI_KERNEL;
341 info._sifields._sigfault._addr = 0;
342 queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
343 break;
344 case EXCP0D_GPF:
345 /* XXX: potential problem if ABI32 */
346 #ifndef TARGET_X86_64
347 if (env->eflags & VM_MASK) {
348 handle_vm86_fault(env);
349 } else
350 #endif
351 {
352 info.si_signo = TARGET_SIGSEGV;
353 info.si_errno = 0;
354 info.si_code = TARGET_SI_KERNEL;
355 info._sifields._sigfault._addr = 0;
356 queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
357 }
358 break;
359 case EXCP0E_PAGE:
360 info.si_signo = TARGET_SIGSEGV;
361 info.si_errno = 0;
362 if (!(env->error_code & 1))
363 info.si_code = TARGET_SEGV_MAPERR;
364 else
365 info.si_code = TARGET_SEGV_ACCERR;
366 info._sifields._sigfault._addr = env->cr[2];
367 queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
368 break;
369 case EXCP00_DIVZ:
370 #ifndef TARGET_X86_64
371 if (env->eflags & VM_MASK) {
372 handle_vm86_trap(env, trapnr);
373 } else
374 #endif
375 {
376 /* division by zero */
377 info.si_signo = TARGET_SIGFPE;
378 info.si_errno = 0;
379 info.si_code = TARGET_FPE_INTDIV;
380 info._sifields._sigfault._addr = env->eip;
381 queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
382 }
383 break;
384 case EXCP01_DB:
385 case EXCP03_INT3:
386 #ifndef TARGET_X86_64
387 if (env->eflags & VM_MASK) {
388 handle_vm86_trap(env, trapnr);
389 } else
390 #endif
391 {
392 info.si_signo = TARGET_SIGTRAP;
393 info.si_errno = 0;
394 if (trapnr == EXCP01_DB) {
395 info.si_code = TARGET_TRAP_BRKPT;
396 info._sifields._sigfault._addr = env->eip;
397 } else {
398 info.si_code = TARGET_SI_KERNEL;
399 info._sifields._sigfault._addr = 0;
400 }
401 queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
402 }
403 break;
404 case EXCP04_INTO:
405 case EXCP05_BOUND:
406 #ifndef TARGET_X86_64
407 if (env->eflags & VM_MASK) {
408 handle_vm86_trap(env, trapnr);
409 } else
410 #endif
411 {
412 info.si_signo = TARGET_SIGSEGV;
413 info.si_errno = 0;
414 info.si_code = TARGET_SI_KERNEL;
415 info._sifields._sigfault._addr = 0;
416 queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
417 }
418 break;
419 case EXCP06_ILLOP:
420 info.si_signo = TARGET_SIGILL;
421 info.si_errno = 0;
422 info.si_code = TARGET_ILL_ILLOPN;
423 info._sifields._sigfault._addr = env->eip;
424 queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
425 break;
426 case EXCP_INTERRUPT:
427 /* just indicate that signals should be handled asap */
428 break;
429 case EXCP_DEBUG:
430 {
431 int sig;
432
433 sig = gdb_handlesig(cs, TARGET_SIGTRAP);
434 if (sig)
435 {
436 info.si_signo = sig;
437 info.si_errno = 0;
438 info.si_code = TARGET_TRAP_BRKPT;
439 queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
440 }
441 }
442 break;
443 default:
444 pc = env->segs[R_CS].base + env->eip;
445 EXCP_DUMP(env, "qemu: 0x%08lx: unhandled CPU exception 0x%x - aborting\n",
446 (long)pc, trapnr);
447 abort();
448 }
449 process_pending_signals(env);
450 }
451 }
452 #endif
453
454 #ifdef TARGET_ARM
455
456 #define get_user_code_u32(x, gaddr, env) \
457 ({ abi_long __r = get_user_u32((x), (gaddr)); \
458 if (!__r && bswap_code(arm_sctlr_b(env))) { \
459 (x) = bswap32(x); \
460 } \
461 __r; \
462 })
463
464 #define get_user_code_u16(x, gaddr, env) \
465 ({ abi_long __r = get_user_u16((x), (gaddr)); \
466 if (!__r && bswap_code(arm_sctlr_b(env))) { \
467 (x) = bswap16(x); \
468 } \
469 __r; \
470 })
471
472 #define get_user_data_u32(x, gaddr, env) \
473 ({ abi_long __r = get_user_u32((x), (gaddr)); \
474 if (!__r && arm_cpu_bswap_data(env)) { \
475 (x) = bswap32(x); \
476 } \
477 __r; \
478 })
479
480 #define get_user_data_u16(x, gaddr, env) \
481 ({ abi_long __r = get_user_u16((x), (gaddr)); \
482 if (!__r && arm_cpu_bswap_data(env)) { \
483 (x) = bswap16(x); \
484 } \
485 __r; \
486 })
487
488 #define put_user_data_u32(x, gaddr, env) \
489 ({ typeof(x) __x = (x); \
490 if (arm_cpu_bswap_data(env)) { \
491 __x = bswap32(__x); \
492 } \
493 put_user_u32(__x, (gaddr)); \
494 })
495
496 #define put_user_data_u16(x, gaddr, env) \
497 ({ typeof(x) __x = (x); \
498 if (arm_cpu_bswap_data(env)) { \
499 __x = bswap16(__x); \
500 } \
501 put_user_u16(__x, (gaddr)); \
502 })
503
504 #ifdef TARGET_ABI32
505 /* Commpage handling -- there is no commpage for AArch64 */
506
507 /*
508 * See the Linux kernel's Documentation/arm/kernel_user_helpers.txt
509 * Input:
510 * r0 = pointer to oldval
511 * r1 = pointer to newval
512 * r2 = pointer to target value
513 *
514 * Output:
515 * r0 = 0 if *ptr was changed, non-0 if no exchange happened
516 * C set if *ptr was changed, clear if no exchange happened
517 *
518 * Note segv's in kernel helpers are a bit tricky, we can set the
519 * data address sensibly but the PC address is just the entry point.
520 */
521 static void arm_kernel_cmpxchg64_helper(CPUARMState *env)
522 {
523 uint64_t oldval, newval, val;
524 uint32_t addr, cpsr;
525 target_siginfo_t info;
526
527 /* Based on the 32 bit code in do_kernel_trap */
528
529 /* XXX: This only works between threads, not between processes.
530 It's probably possible to implement this with native host
531 operations. However things like ldrex/strex are much harder so
532 there's not much point trying. */
533 start_exclusive();
534 cpsr = cpsr_read(env);
535 addr = env->regs[2];
536
537 if (get_user_u64(oldval, env->regs[0])) {
538 env->exception.vaddress = env->regs[0];
539 goto segv;
540 };
541
542 if (get_user_u64(newval, env->regs[1])) {
543 env->exception.vaddress = env->regs[1];
544 goto segv;
545 };
546
547 if (get_user_u64(val, addr)) {
548 env->exception.vaddress = addr;
549 goto segv;
550 }
551
552 if (val == oldval) {
553 val = newval;
554
555 if (put_user_u64(val, addr)) {
556 env->exception.vaddress = addr;
557 goto segv;
558 };
559
560 env->regs[0] = 0;
561 cpsr |= CPSR_C;
562 } else {
563 env->regs[0] = -1;
564 cpsr &= ~CPSR_C;
565 }
566 cpsr_write(env, cpsr, CPSR_C, CPSRWriteByInstr);
567 end_exclusive();
568 return;
569
570 segv:
571 end_exclusive();
572 /* We get the PC of the entry address - which is as good as anything,
573 on a real kernel what you get depends on which mode it uses. */
574 info.si_signo = TARGET_SIGSEGV;
575 info.si_errno = 0;
576 /* XXX: check env->error_code */
577 info.si_code = TARGET_SEGV_MAPERR;
578 info._sifields._sigfault._addr = env->exception.vaddress;
579 queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
580 }
581
582 /* Handle a jump to the kernel code page. */
583 static int
584 do_kernel_trap(CPUARMState *env)
585 {
586 uint32_t addr;
587 uint32_t cpsr;
588 uint32_t val;
589
590 switch (env->regs[15]) {
591 case 0xffff0fa0: /* __kernel_memory_barrier */
592 /* ??? No-op. Will need to do better for SMP. */
593 break;
594 case 0xffff0fc0: /* __kernel_cmpxchg */
595 /* XXX: This only works between threads, not between processes.
596 It's probably possible to implement this with native host
597 operations. However things like ldrex/strex are much harder so
598 there's not much point trying. */
599 start_exclusive();
600 cpsr = cpsr_read(env);
601 addr = env->regs[2];
602 /* FIXME: This should SEGV if the access fails. */
603 if (get_user_u32(val, addr))
604 val = ~env->regs[0];
605 if (val == env->regs[0]) {
606 val = env->regs[1];
607 /* FIXME: Check for segfaults. */
608 put_user_u32(val, addr);
609 env->regs[0] = 0;
610 cpsr |= CPSR_C;
611 } else {
612 env->regs[0] = -1;
613 cpsr &= ~CPSR_C;
614 }
615 cpsr_write(env, cpsr, CPSR_C, CPSRWriteByInstr);
616 end_exclusive();
617 break;
618 case 0xffff0fe0: /* __kernel_get_tls */
619 env->regs[0] = cpu_get_tls(env);
620 break;
621 case 0xffff0f60: /* __kernel_cmpxchg64 */
622 arm_kernel_cmpxchg64_helper(env);
623 break;
624
625 default:
626 return 1;
627 }
628 /* Jump back to the caller. */
629 addr = env->regs[14];
630 if (addr & 1) {
631 env->thumb = 1;
632 addr &= ~1;
633 }
634 env->regs[15] = addr;
635
636 return 0;
637 }
638
639 /* Store exclusive handling for AArch32 */
640 static int do_strex(CPUARMState *env)
641 {
642 uint64_t val;
643 int size;
644 int rc = 1;
645 int segv = 0;
646 uint32_t addr;
647 start_exclusive();
648 if (env->exclusive_addr != env->exclusive_test) {
649 goto fail;
650 }
651 /* We know we're always AArch32 so the address is in uint32_t range
652 * unless it was the -1 exclusive-monitor-lost value (which won't
653 * match exclusive_test above).
654 */
655 assert(extract64(env->exclusive_addr, 32, 32) == 0);
656 addr = env->exclusive_addr;
657 size = env->exclusive_info & 0xf;
658 switch (size) {
659 case 0:
660 segv = get_user_u8(val, addr);
661 break;
662 case 1:
663 segv = get_user_data_u16(val, addr, env);
664 break;
665 case 2:
666 case 3:
667 segv = get_user_data_u32(val, addr, env);
668 break;
669 default:
670 abort();
671 }
672 if (segv) {
673 env->exception.vaddress = addr;
674 goto done;
675 }
676 if (size == 3) {
677 uint32_t valhi;
678 segv = get_user_data_u32(valhi, addr + 4, env);
679 if (segv) {
680 env->exception.vaddress = addr + 4;
681 goto done;
682 }
683 if (arm_cpu_bswap_data(env)) {
684 val = deposit64((uint64_t)valhi, 32, 32, val);
685 } else {
686 val = deposit64(val, 32, 32, valhi);
687 }
688 }
689 if (val != env->exclusive_val) {
690 goto fail;
691 }
692
693 val = env->regs[(env->exclusive_info >> 8) & 0xf];
694 switch (size) {
695 case 0:
696 segv = put_user_u8(val, addr);
697 break;
698 case 1:
699 segv = put_user_data_u16(val, addr, env);
700 break;
701 case 2:
702 case 3:
703 segv = put_user_data_u32(val, addr, env);
704 break;
705 }
706 if (segv) {
707 env->exception.vaddress = addr;
708 goto done;
709 }
710 if (size == 3) {
711 val = env->regs[(env->exclusive_info >> 12) & 0xf];
712 segv = put_user_data_u32(val, addr + 4, env);
713 if (segv) {
714 env->exception.vaddress = addr + 4;
715 goto done;
716 }
717 }
718 rc = 0;
719 fail:
720 env->regs[15] += 4;
721 env->regs[(env->exclusive_info >> 4) & 0xf] = rc;
722 done:
723 end_exclusive();
724 return segv;
725 }
726
727 void cpu_loop(CPUARMState *env)
728 {
729 CPUState *cs = CPU(arm_env_get_cpu(env));
730 int trapnr;
731 unsigned int n, insn;
732 target_siginfo_t info;
733 uint32_t addr;
734 abi_ulong ret;
735
736 for(;;) {
737 cpu_exec_start(cs);
738 trapnr = cpu_exec(cs);
739 cpu_exec_end(cs);
740 process_queued_cpu_work(cs);
741
742 switch(trapnr) {
743 case EXCP_UDEF:
744 {
745 TaskState *ts = cs->opaque;
746 uint32_t opcode;
747 int rc;
748
749 /* we handle the FPU emulation here, as Linux */
750 /* we get the opcode */
751 /* FIXME - what to do if get_user() fails? */
752 get_user_code_u32(opcode, env->regs[15], env);
753
754 rc = EmulateAll(opcode, &ts->fpa, env);
755 if (rc == 0) { /* illegal instruction */
756 info.si_signo = TARGET_SIGILL;
757 info.si_errno = 0;
758 info.si_code = TARGET_ILL_ILLOPN;
759 info._sifields._sigfault._addr = env->regs[15];
760 queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
761 } else if (rc < 0) { /* FP exception */
762 int arm_fpe=0;
763
764 /* translate softfloat flags to FPSR flags */
765 if (-rc & float_flag_invalid)
766 arm_fpe |= BIT_IOC;
767 if (-rc & float_flag_divbyzero)
768 arm_fpe |= BIT_DZC;
769 if (-rc & float_flag_overflow)
770 arm_fpe |= BIT_OFC;
771 if (-rc & float_flag_underflow)
772 arm_fpe |= BIT_UFC;
773 if (-rc & float_flag_inexact)
774 arm_fpe |= BIT_IXC;
775
776 FPSR fpsr = ts->fpa.fpsr;
777 //printf("fpsr 0x%x, arm_fpe 0x%x\n",fpsr,arm_fpe);
778
779 if (fpsr & (arm_fpe << 16)) { /* exception enabled? */
780 info.si_signo = TARGET_SIGFPE;
781 info.si_errno = 0;
782
783 /* ordered by priority, least first */
784 if (arm_fpe & BIT_IXC) info.si_code = TARGET_FPE_FLTRES;
785 if (arm_fpe & BIT_UFC) info.si_code = TARGET_FPE_FLTUND;
786 if (arm_fpe & BIT_OFC) info.si_code = TARGET_FPE_FLTOVF;
787 if (arm_fpe & BIT_DZC) info.si_code = TARGET_FPE_FLTDIV;
788 if (arm_fpe & BIT_IOC) info.si_code = TARGET_FPE_FLTINV;
789
790 info._sifields._sigfault._addr = env->regs[15];
791 queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
792 } else {
793 env->regs[15] += 4;
794 }
795
796 /* accumulate unenabled exceptions */
797 if ((!(fpsr & BIT_IXE)) && (arm_fpe & BIT_IXC))
798 fpsr |= BIT_IXC;
799 if ((!(fpsr & BIT_UFE)) && (arm_fpe & BIT_UFC))
800 fpsr |= BIT_UFC;
801 if ((!(fpsr & BIT_OFE)) && (arm_fpe & BIT_OFC))
802 fpsr |= BIT_OFC;
803 if ((!(fpsr & BIT_DZE)) && (arm_fpe & BIT_DZC))
804 fpsr |= BIT_DZC;
805 if ((!(fpsr & BIT_IOE)) && (arm_fpe & BIT_IOC))
806 fpsr |= BIT_IOC;
807 ts->fpa.fpsr=fpsr;
808 } else { /* everything OK */
809 /* increment PC */
810 env->regs[15] += 4;
811 }
812 }
813 break;
814 case EXCP_SWI:
815 case EXCP_BKPT:
816 {
817 env->eabi = 1;
818 /* system call */
819 if (trapnr == EXCP_BKPT) {
820 if (env->thumb) {
821 /* FIXME - what to do if get_user() fails? */
822 get_user_code_u16(insn, env->regs[15], env);
823 n = insn & 0xff;
824 env->regs[15] += 2;
825 } else {
826 /* FIXME - what to do if get_user() fails? */
827 get_user_code_u32(insn, env->regs[15], env);
828 n = (insn & 0xf) | ((insn >> 4) & 0xff0);
829 env->regs[15] += 4;
830 }
831 } else {
832 if (env->thumb) {
833 /* FIXME - what to do if get_user() fails? */
834 get_user_code_u16(insn, env->regs[15] - 2, env);
835 n = insn & 0xff;
836 } else {
837 /* FIXME - what to do if get_user() fails? */
838 get_user_code_u32(insn, env->regs[15] - 4, env);
839 n = insn & 0xffffff;
840 }
841 }
842
843 if (n == ARM_NR_cacheflush) {
844 /* nop */
845 } else if (n == ARM_NR_semihosting
846 || n == ARM_NR_thumb_semihosting) {
847 env->regs[0] = do_arm_semihosting (env);
848 } else if (n == 0 || n >= ARM_SYSCALL_BASE || env->thumb) {
849 /* linux syscall */
850 if (env->thumb || n == 0) {
851 n = env->regs[7];
852 } else {
853 n -= ARM_SYSCALL_BASE;
854 env->eabi = 0;
855 }
856 if ( n > ARM_NR_BASE) {
857 switch (n) {
858 case ARM_NR_cacheflush:
859 /* nop */
860 break;
861 case ARM_NR_set_tls:
862 cpu_set_tls(env, env->regs[0]);
863 env->regs[0] = 0;
864 break;
865 case ARM_NR_breakpoint:
866 env->regs[15] -= env->thumb ? 2 : 4;
867 goto excp_debug;
868 default:
869 gemu_log("qemu: Unsupported ARM syscall: 0x%x\n",
870 n);
871 env->regs[0] = -TARGET_ENOSYS;
872 break;
873 }
874 } else {
875 ret = do_syscall(env,
876 n,
877 env->regs[0],
878 env->regs[1],
879 env->regs[2],
880 env->regs[3],
881 env->regs[4],
882 env->regs[5],
883 0, 0);
884 if (ret == -TARGET_ERESTARTSYS) {
885 env->regs[15] -= env->thumb ? 2 : 4;
886 } else if (ret != -TARGET_QEMU_ESIGRETURN) {
887 env->regs[0] = ret;
888 }
889 }
890 } else {
891 goto error;
892 }
893 }
894 break;
895 case EXCP_INTERRUPT:
896 /* just indicate that signals should be handled asap */
897 break;
898 case EXCP_STREX:
899 if (!do_strex(env)) {
900 break;
901 }
902 /* fall through for segv */
903 case EXCP_PREFETCH_ABORT:
904 case EXCP_DATA_ABORT:
905 addr = env->exception.vaddress;
906 {
907 info.si_signo = TARGET_SIGSEGV;
908 info.si_errno = 0;
909 /* XXX: check env->error_code */
910 info.si_code = TARGET_SEGV_MAPERR;
911 info._sifields._sigfault._addr = addr;
912 queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
913 }
914 break;
915 case EXCP_DEBUG:
916 excp_debug:
917 {
918 int sig;
919
920 sig = gdb_handlesig(cs, TARGET_SIGTRAP);
921 if (sig)
922 {
923 info.si_signo = sig;
924 info.si_errno = 0;
925 info.si_code = TARGET_TRAP_BRKPT;
926 queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
927 }
928 }
929 break;
930 case EXCP_KERNEL_TRAP:
931 if (do_kernel_trap(env))
932 goto error;
933 break;
934 case EXCP_YIELD:
935 /* nothing to do here for user-mode, just resume guest code */
936 break;
937 default:
938 error:
939 EXCP_DUMP(env, "qemu: unhandled CPU exception 0x%x - aborting\n", trapnr);
940 abort();
941 }
942 process_pending_signals(env);
943 }
944 }
945
946 #else
947
948 /*
949 * Handle AArch64 store-release exclusive
950 *
951 * rs = gets the status result of store exclusive
952 * rt = is the register that is stored
953 * rt2 = is the second register store (in STP)
954 *
955 */
956 static int do_strex_a64(CPUARMState *env)
957 {
958 uint64_t val;
959 int size;
960 bool is_pair;
961 int rc = 1;
962 int segv = 0;
963 uint64_t addr;
964 int rs, rt, rt2;
965
966 start_exclusive();
967 /* size | is_pair << 2 | (rs << 4) | (rt << 9) | (rt2 << 14)); */
968 size = extract32(env->exclusive_info, 0, 2);
969 is_pair = extract32(env->exclusive_info, 2, 1);
970 rs = extract32(env->exclusive_info, 4, 5);
971 rt = extract32(env->exclusive_info, 9, 5);
972 rt2 = extract32(env->exclusive_info, 14, 5);
973
974 addr = env->exclusive_addr;
975
976 if (addr != env->exclusive_test) {
977 goto finish;
978 }
979
980 switch (size) {
981 case 0:
982 segv = get_user_u8(val, addr);
983 break;
984 case 1:
985 segv = get_user_u16(val, addr);
986 break;
987 case 2:
988 segv = get_user_u32(val, addr);
989 break;
990 case 3:
991 segv = get_user_u64(val, addr);
992 break;
993 default:
994 abort();
995 }
996 if (segv) {
997 env->exception.vaddress = addr;
998 goto error;
999 }
1000 if (val != env->exclusive_val) {
1001 goto finish;
1002 }
1003 if (is_pair) {
1004 if (size == 2) {
1005 segv = get_user_u32(val, addr + 4);
1006 } else {
1007 segv = get_user_u64(val, addr + 8);
1008 }
1009 if (segv) {
1010 env->exception.vaddress = addr + (size == 2 ? 4 : 8);
1011 goto error;
1012 }
1013 if (val != env->exclusive_high) {
1014 goto finish;
1015 }
1016 }
1017 /* handle the zero register */
1018 val = rt == 31 ? 0 : env->xregs[rt];
1019 switch (size) {
1020 case 0:
1021 segv = put_user_u8(val, addr);
1022 break;
1023 case 1:
1024 segv = put_user_u16(val, addr);
1025 break;
1026 case 2:
1027 segv = put_user_u32(val, addr);
1028 break;
1029 case 3:
1030 segv = put_user_u64(val, addr);
1031 break;
1032 }
1033 if (segv) {
1034 goto error;
1035 }
1036 if (is_pair) {
1037 /* handle the zero register */
1038 val = rt2 == 31 ? 0 : env->xregs[rt2];
1039 if (size == 2) {
1040 segv = put_user_u32(val, addr + 4);
1041 } else {
1042 segv = put_user_u64(val, addr + 8);
1043 }
1044 if (segv) {
1045 env->exception.vaddress = addr + (size == 2 ? 4 : 8);
1046 goto error;
1047 }
1048 }
1049 rc = 0;
1050 finish:
1051 env->pc += 4;
1052 /* rs == 31 encodes a write to the ZR, thus throwing away
1053 * the status return. This is rather silly but valid.
1054 */
1055 if (rs < 31) {
1056 env->xregs[rs] = rc;
1057 }
1058 error:
1059 /* instruction faulted, PC does not advance */
1060 /* either way a strex releases any exclusive lock we have */
1061 env->exclusive_addr = -1;
1062 end_exclusive();
1063 return segv;
1064 }
1065
1066 /* AArch64 main loop */
1067 void cpu_loop(CPUARMState *env)
1068 {
1069 CPUState *cs = CPU(arm_env_get_cpu(env));
1070 int trapnr, sig;
1071 abi_long ret;
1072 target_siginfo_t info;
1073
1074 for (;;) {
1075 cpu_exec_start(cs);
1076 trapnr = cpu_exec(cs);
1077 cpu_exec_end(cs);
1078 process_queued_cpu_work(cs);
1079
1080 switch (trapnr) {
1081 case EXCP_SWI:
1082 ret = do_syscall(env,
1083 env->xregs[8],
1084 env->xregs[0],
1085 env->xregs[1],
1086 env->xregs[2],
1087 env->xregs[3],
1088 env->xregs[4],
1089 env->xregs[5],
1090 0, 0);
1091 if (ret == -TARGET_ERESTARTSYS) {
1092 env->pc -= 4;
1093 } else if (ret != -TARGET_QEMU_ESIGRETURN) {
1094 env->xregs[0] = ret;
1095 }
1096 break;
1097 case EXCP_INTERRUPT:
1098 /* just indicate that signals should be handled asap */
1099 break;
1100 case EXCP_UDEF:
1101 info.si_signo = TARGET_SIGILL;
1102 info.si_errno = 0;
1103 info.si_code = TARGET_ILL_ILLOPN;
1104 info._sifields._sigfault._addr = env->pc;
1105 queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
1106 break;
1107 case EXCP_STREX:
1108 if (!do_strex_a64(env)) {
1109 break;
1110 }
1111 /* fall through for segv */
1112 case EXCP_PREFETCH_ABORT:
1113 case EXCP_DATA_ABORT:
1114 info.si_signo = TARGET_SIGSEGV;
1115 info.si_errno = 0;
1116 /* XXX: check env->error_code */
1117 info.si_code = TARGET_SEGV_MAPERR;
1118 info._sifields._sigfault._addr = env->exception.vaddress;
1119 queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
1120 break;
1121 case EXCP_DEBUG:
1122 case EXCP_BKPT:
1123 sig = gdb_handlesig(cs, TARGET_SIGTRAP);
1124 if (sig) {
1125 info.si_signo = sig;
1126 info.si_errno = 0;
1127 info.si_code = TARGET_TRAP_BRKPT;
1128 queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
1129 }
1130 break;
1131 case EXCP_SEMIHOST:
1132 env->xregs[0] = do_arm_semihosting(env);
1133 break;
1134 case EXCP_YIELD:
1135 /* nothing to do here for user-mode, just resume guest code */
1136 break;
1137 default:
1138 EXCP_DUMP(env, "qemu: unhandled CPU exception 0x%x - aborting\n", trapnr);
1139 abort();
1140 }
1141 process_pending_signals(env);
1142 /* Exception return on AArch64 always clears the exclusive monitor,
1143 * so any return to running guest code implies this.
1144 * A strex (successful or otherwise) also clears the monitor, so
1145 * we don't need to specialcase EXCP_STREX.
1146 */
1147 env->exclusive_addr = -1;
1148 }
1149 }
1150 #endif /* ndef TARGET_ABI32 */
1151
1152 #endif
1153
1154 #ifdef TARGET_UNICORE32
1155
1156 void cpu_loop(CPUUniCore32State *env)
1157 {
1158 CPUState *cs = CPU(uc32_env_get_cpu(env));
1159 int trapnr;
1160 unsigned int n, insn;
1161 target_siginfo_t info;
1162
1163 for (;;) {
1164 cpu_exec_start(cs);
1165 trapnr = cpu_exec(cs);
1166 cpu_exec_end(cs);
1167 process_queued_cpu_work(cs);
1168
1169 switch (trapnr) {
1170 case UC32_EXCP_PRIV:
1171 {
1172 /* system call */
1173 get_user_u32(insn, env->regs[31] - 4);
1174 n = insn & 0xffffff;
1175
1176 if (n >= UC32_SYSCALL_BASE) {
1177 /* linux syscall */
1178 n -= UC32_SYSCALL_BASE;
1179 if (n == UC32_SYSCALL_NR_set_tls) {
1180 cpu_set_tls(env, env->regs[0]);
1181 env->regs[0] = 0;
1182 } else {
1183 abi_long ret = do_syscall(env,
1184 n,
1185 env->regs[0],
1186 env->regs[1],
1187 env->regs[2],
1188 env->regs[3],
1189 env->regs[4],
1190 env->regs[5],
1191 0, 0);
1192 if (ret == -TARGET_ERESTARTSYS) {
1193 env->regs[31] -= 4;
1194 } else if (ret != -TARGET_QEMU_ESIGRETURN) {
1195 env->regs[0] = ret;
1196 }
1197 }
1198 } else {
1199 goto error;
1200 }
1201 }
1202 break;
1203 case UC32_EXCP_DTRAP:
1204 case UC32_EXCP_ITRAP:
1205 info.si_signo = TARGET_SIGSEGV;
1206 info.si_errno = 0;
1207 /* XXX: check env->error_code */
1208 info.si_code = TARGET_SEGV_MAPERR;
1209 info._sifields._sigfault._addr = env->cp0.c4_faultaddr;
1210 queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
1211 break;
1212 case EXCP_INTERRUPT:
1213 /* just indicate that signals should be handled asap */
1214 break;
1215 case EXCP_DEBUG:
1216 {
1217 int sig;
1218
1219 sig = gdb_handlesig(cs, TARGET_SIGTRAP);
1220 if (sig) {
1221 info.si_signo = sig;
1222 info.si_errno = 0;
1223 info.si_code = TARGET_TRAP_BRKPT;
1224 queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
1225 }
1226 }
1227 break;
1228 default:
1229 goto error;
1230 }
1231 process_pending_signals(env);
1232 }
1233
1234 error:
1235 EXCP_DUMP(env, "qemu: unhandled CPU exception 0x%x - aborting\n", trapnr);
1236 abort();
1237 }
1238 #endif
1239
1240 #ifdef TARGET_SPARC
1241 #define SPARC64_STACK_BIAS 2047
1242
1243 //#define DEBUG_WIN
1244
1245 /* WARNING: dealing with register windows _is_ complicated. More info
1246 can be found at http://www.sics.se/~psm/sparcstack.html */
1247 static inline int get_reg_index(CPUSPARCState *env, int cwp, int index)
1248 {
1249 index = (index + cwp * 16) % (16 * env->nwindows);
1250 /* wrap handling : if cwp is on the last window, then we use the
1251 registers 'after' the end */
1252 if (index < 8 && env->cwp == env->nwindows - 1)
1253 index += 16 * env->nwindows;
1254 return index;
1255 }
1256
1257 /* save the register window 'cwp1' */
1258 static inline void save_window_offset(CPUSPARCState *env, int cwp1)
1259 {
1260 unsigned int i;
1261 abi_ulong sp_ptr;
1262
1263 sp_ptr = env->regbase[get_reg_index(env, cwp1, 6)];
1264 #ifdef TARGET_SPARC64
1265 if (sp_ptr & 3)
1266 sp_ptr += SPARC64_STACK_BIAS;
1267 #endif
1268 #if defined(DEBUG_WIN)
1269 printf("win_overflow: sp_ptr=0x" TARGET_ABI_FMT_lx " save_cwp=%d\n",
1270 sp_ptr, cwp1);
1271 #endif
1272 for(i = 0; i < 16; i++) {
1273 /* FIXME - what to do if put_user() fails? */
1274 put_user_ual(env->regbase[get_reg_index(env, cwp1, 8 + i)], sp_ptr);
1275 sp_ptr += sizeof(abi_ulong);
1276 }
1277 }
1278
1279 static void save_window(CPUSPARCState *env)
1280 {
1281 #ifndef TARGET_SPARC64
1282 unsigned int new_wim;
1283 new_wim = ((env->wim >> 1) | (env->wim << (env->nwindows - 1))) &
1284 ((1LL << env->nwindows) - 1);
1285 save_window_offset(env, cpu_cwp_dec(env, env->cwp - 2));
1286 env->wim = new_wim;
1287 #else
1288 save_window_offset(env, cpu_cwp_dec(env, env->cwp - 2));
1289 env->cansave++;
1290 env->canrestore--;
1291 #endif
1292 }
1293
1294 static void restore_window(CPUSPARCState *env)
1295 {
1296 #ifndef TARGET_SPARC64
1297 unsigned int new_wim;
1298 #endif
1299 unsigned int i, cwp1;
1300 abi_ulong sp_ptr;
1301
1302 #ifndef TARGET_SPARC64
1303 new_wim = ((env->wim << 1) | (env->wim >> (env->nwindows - 1))) &
1304 ((1LL << env->nwindows) - 1);
1305 #endif
1306
1307 /* restore the invalid window */
1308 cwp1 = cpu_cwp_inc(env, env->cwp + 1);
1309 sp_ptr = env->regbase[get_reg_index(env, cwp1, 6)];
1310 #ifdef TARGET_SPARC64
1311 if (sp_ptr & 3)
1312 sp_ptr += SPARC64_STACK_BIAS;
1313 #endif
1314 #if defined(DEBUG_WIN)
1315 printf("win_underflow: sp_ptr=0x" TARGET_ABI_FMT_lx " load_cwp=%d\n",
1316 sp_ptr, cwp1);
1317 #endif
1318 for(i = 0; i < 16; i++) {
1319 /* FIXME - what to do if get_user() fails? */
1320 get_user_ual(env->regbase[get_reg_index(env, cwp1, 8 + i)], sp_ptr);
1321 sp_ptr += sizeof(abi_ulong);
1322 }
1323 #ifdef TARGET_SPARC64
1324 env->canrestore++;
1325 if (env->cleanwin < env->nwindows - 1)
1326 env->cleanwin++;
1327 env->cansave--;
1328 #else
1329 env->wim = new_wim;
1330 #endif
1331 }
1332
1333 static void flush_windows(CPUSPARCState *env)
1334 {
1335 int offset, cwp1;
1336
1337 offset = 1;
1338 for(;;) {
1339 /* if restore would invoke restore_window(), then we can stop */
1340 cwp1 = cpu_cwp_inc(env, env->cwp + offset);
1341 #ifndef TARGET_SPARC64
1342 if (env->wim & (1 << cwp1))
1343 break;
1344 #else
1345 if (env->canrestore == 0)
1346 break;
1347 env->cansave++;
1348 env->canrestore--;
1349 #endif
1350 save_window_offset(env, cwp1);
1351 offset++;
1352 }
1353 cwp1 = cpu_cwp_inc(env, env->cwp + 1);
1354 #ifndef TARGET_SPARC64
1355 /* set wim so that restore will reload the registers */
1356 env->wim = 1 << cwp1;
1357 #endif
1358 #if defined(DEBUG_WIN)
1359 printf("flush_windows: nb=%d\n", offset - 1);
1360 #endif
1361 }
1362
1363 void cpu_loop (CPUSPARCState *env)
1364 {
1365 CPUState *cs = CPU(sparc_env_get_cpu(env));
1366 int trapnr;
1367 abi_long ret;
1368 target_siginfo_t info;
1369
1370 while (1) {
1371 cpu_exec_start(cs);
1372 trapnr = cpu_exec(cs);
1373 cpu_exec_end(cs);
1374 process_queued_cpu_work(cs);
1375
1376 /* Compute PSR before exposing state. */
1377 if (env->cc_op != CC_OP_FLAGS) {
1378 cpu_get_psr(env);
1379 }
1380
1381 switch (trapnr) {
1382 #ifndef TARGET_SPARC64
1383 case 0x88:
1384 case 0x90:
1385 #else
1386 case 0x110:
1387 case 0x16d:
1388 #endif
1389 ret = do_syscall (env, env->gregs[1],
1390 env->regwptr[0], env->regwptr[1],
1391 env->regwptr[2], env->regwptr[3],
1392 env->regwptr[4], env->regwptr[5],
1393 0, 0);
1394 if (ret == -TARGET_ERESTARTSYS || ret == -TARGET_QEMU_ESIGRETURN) {
1395 break;
1396 }
1397 if ((abi_ulong)ret >= (abi_ulong)(-515)) {
1398 #if defined(TARGET_SPARC64) && !defined(TARGET_ABI32)
1399 env->xcc |= PSR_CARRY;
1400 #else
1401 env->psr |= PSR_CARRY;
1402 #endif
1403 ret = -ret;
1404 } else {
1405 #if defined(TARGET_SPARC64) && !defined(TARGET_ABI32)
1406 env->xcc &= ~PSR_CARRY;
1407 #else
1408 env->psr &= ~PSR_CARRY;
1409 #endif
1410 }
1411 env->regwptr[0] = ret;
1412 /* next instruction */
1413 env->pc = env->npc;
1414 env->npc = env->npc + 4;
1415 break;
1416 case 0x83: /* flush windows */
1417 #ifdef TARGET_ABI32
1418 case 0x103:
1419 #endif
1420 flush_windows(env);
1421 /* next instruction */
1422 env->pc = env->npc;
1423 env->npc = env->npc + 4;
1424 break;
1425 #ifndef TARGET_SPARC64
1426 case TT_WIN_OVF: /* window overflow */
1427 save_window(env);
1428 break;
1429 case TT_WIN_UNF: /* window underflow */
1430 restore_window(env);
1431 break;
1432 case TT_TFAULT:
1433 case TT_DFAULT:
1434 {
1435 info.si_signo = TARGET_SIGSEGV;
1436 info.si_errno = 0;
1437 /* XXX: check env->error_code */
1438 info.si_code = TARGET_SEGV_MAPERR;
1439 info._sifields._sigfault._addr = env->mmuregs[4];
1440 queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
1441 }
1442 break;
1443 #else
1444 case TT_SPILL: /* window overflow */
1445 save_window(env);
1446 break;
1447 case TT_FILL: /* window underflow */
1448 restore_window(env);
1449 break;
1450 case TT_TFAULT:
1451 case TT_DFAULT:
1452 {
1453 info.si_signo = TARGET_SIGSEGV;
1454 info.si_errno = 0;
1455 /* XXX: check env->error_code */
1456 info.si_code = TARGET_SEGV_MAPERR;
1457 if (trapnr == TT_DFAULT)
1458 info._sifields._sigfault._addr = env->dmmuregs[4];
1459 else
1460 info._sifields._sigfault._addr = cpu_tsptr(env)->tpc;
1461 queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
1462 }
1463 break;
1464 #ifndef TARGET_ABI32
1465 case 0x16e:
1466 flush_windows(env);
1467 sparc64_get_context(env);
1468 break;
1469 case 0x16f:
1470 flush_windows(env);
1471 sparc64_set_context(env);
1472 break;
1473 #endif
1474 #endif
1475 case EXCP_INTERRUPT:
1476 /* just indicate that signals should be handled asap */
1477 break;
1478 case TT_ILL_INSN:
1479 {
1480 info.si_signo = TARGET_SIGILL;
1481 info.si_errno = 0;
1482 info.si_code = TARGET_ILL_ILLOPC;
1483 info._sifields._sigfault._addr = env->pc;
1484 queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
1485 }
1486 break;
1487 case EXCP_DEBUG:
1488 {
1489 int sig;
1490
1491 sig = gdb_handlesig(cs, TARGET_SIGTRAP);
1492 if (sig)
1493 {
1494 info.si_signo = sig;
1495 info.si_errno = 0;
1496 info.si_code = TARGET_TRAP_BRKPT;
1497 queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
1498 }
1499 }
1500 break;
1501 default:
1502 printf ("Unhandled trap: 0x%x\n", trapnr);
1503 cpu_dump_state(cs, stderr, fprintf, 0);
1504 exit(EXIT_FAILURE);
1505 }
1506 process_pending_signals (env);
1507 }
1508 }
1509
1510 #endif
1511
1512 #ifdef TARGET_PPC
1513 static inline uint64_t cpu_ppc_get_tb(CPUPPCState *env)
1514 {
1515 return cpu_get_host_ticks();
1516 }
1517
1518 uint64_t cpu_ppc_load_tbl(CPUPPCState *env)
1519 {
1520 return cpu_ppc_get_tb(env);
1521 }
1522
1523 uint32_t cpu_ppc_load_tbu(CPUPPCState *env)
1524 {
1525 return cpu_ppc_get_tb(env) >> 32;
1526 }
1527
1528 uint64_t cpu_ppc_load_atbl(CPUPPCState *env)
1529 {
1530 return cpu_ppc_get_tb(env);
1531 }
1532
1533 uint32_t cpu_ppc_load_atbu(CPUPPCState *env)
1534 {
1535 return cpu_ppc_get_tb(env) >> 32;
1536 }
1537
1538 uint32_t cpu_ppc601_load_rtcu(CPUPPCState *env)
1539 __attribute__ (( alias ("cpu_ppc_load_tbu") ));
1540
1541 uint32_t cpu_ppc601_load_rtcl(CPUPPCState *env)
1542 {
1543 return cpu_ppc_load_tbl(env) & 0x3FFFFF80;
1544 }
1545
1546 /* XXX: to be fixed */
1547 int ppc_dcr_read (ppc_dcr_t *dcr_env, int dcrn, uint32_t *valp)
1548 {
1549 return -1;
1550 }
1551
1552 int ppc_dcr_write (ppc_dcr_t *dcr_env, int dcrn, uint32_t val)
1553 {
1554 return -1;
1555 }
1556
1557 static int do_store_exclusive(CPUPPCState *env)
1558 {
1559 target_ulong addr;
1560 target_ulong page_addr;
1561 target_ulong val, val2 __attribute__((unused)) = 0;
1562 int flags;
1563 int segv = 0;
1564
1565 addr = env->reserve_ea;
1566 page_addr = addr & TARGET_PAGE_MASK;
1567 start_exclusive();
1568 mmap_lock();
1569 flags = page_get_flags(page_addr);
1570 if ((flags & PAGE_READ) == 0) {
1571 segv = 1;
1572 } else {
1573 int reg = env->reserve_info & 0x1f;
1574 int size = env->reserve_info >> 5;
1575 int stored = 0;
1576
1577 if (addr == env->reserve_addr) {
1578 switch (size) {
1579 case 1: segv = get_user_u8(val, addr); break;
1580 case 2: segv = get_user_u16(val, addr); break;
1581 case 4: segv = get_user_u32(val, addr); break;
1582 #if defined(TARGET_PPC64)
1583 case 8: segv = get_user_u64(val, addr); break;
1584 case 16: {
1585 segv = get_user_u64(val, addr);
1586 if (!segv) {
1587 segv = get_user_u64(val2, addr + 8);
1588 }
1589 break;
1590 }
1591 #endif
1592 default: abort();
1593 }
1594 if (!segv && val == env->reserve_val) {
1595 val = env->gpr[reg];
1596 switch (size) {
1597 case 1: segv = put_user_u8(val, addr); break;
1598 case 2: segv = put_user_u16(val, addr); break;
1599 case 4: segv = put_user_u32(val, addr); break;
1600 #if defined(TARGET_PPC64)
1601 case 8: segv = put_user_u64(val, addr); break;
1602 case 16: {
1603 if (val2 == env->reserve_val2) {
1604 if (msr_le) {
1605 val2 = val;
1606 val = env->gpr[reg+1];
1607 } else {
1608 val2 = env->gpr[reg+1];
1609 }
1610 segv = put_user_u64(val, addr);
1611 if (!segv) {
1612 segv = put_user_u64(val2, addr + 8);
1613 }
1614 }
1615 break;
1616 }
1617 #endif
1618 default: abort();
1619 }
1620 if (!segv) {
1621 stored = 1;
1622 }
1623 }
1624 }
1625 env->crf[0] = (stored << 1) | xer_so;
1626 env->reserve_addr = (target_ulong)-1;
1627 }
1628 if (!segv) {
1629 env->nip += 4;
1630 }
1631 mmap_unlock();
1632 end_exclusive();
1633 return segv;
1634 }
1635
1636 void cpu_loop(CPUPPCState *env)
1637 {
1638 CPUState *cs = CPU(ppc_env_get_cpu(env));
1639 target_siginfo_t info;
1640 int trapnr;
1641 target_ulong ret;
1642
1643 for(;;) {
1644 cpu_exec_start(cs);
1645 trapnr = cpu_exec(cs);
1646 cpu_exec_end(cs);
1647 process_queued_cpu_work(cs);
1648
1649 switch(trapnr) {
1650 case POWERPC_EXCP_NONE:
1651 /* Just go on */
1652 break;
1653 case POWERPC_EXCP_CRITICAL: /* Critical input */
1654 cpu_abort(cs, "Critical interrupt while in user mode. "
1655 "Aborting\n");
1656 break;
1657 case POWERPC_EXCP_MCHECK: /* Machine check exception */
1658 cpu_abort(cs, "Machine check exception while in user mode. "
1659 "Aborting\n");
1660 break;
1661 case POWERPC_EXCP_DSI: /* Data storage exception */
1662 /* XXX: check this. Seems bugged */
1663 switch (env->error_code & 0xFF000000) {
1664 case 0x40000000:
1665 case 0x42000000:
1666 info.si_signo = TARGET_SIGSEGV;
1667 info.si_errno = 0;
1668 info.si_code = TARGET_SEGV_MAPERR;
1669 break;
1670 case 0x04000000:
1671 info.si_signo = TARGET_SIGILL;
1672 info.si_errno = 0;
1673 info.si_code = TARGET_ILL_ILLADR;
1674 break;
1675 case 0x08000000:
1676 info.si_signo = TARGET_SIGSEGV;
1677 info.si_errno = 0;
1678 info.si_code = TARGET_SEGV_ACCERR;
1679 break;
1680 default:
1681 /* Let's send a regular segfault... */
1682 EXCP_DUMP(env, "Invalid segfault errno (%02x)\n",
1683 env->error_code);
1684 info.si_signo = TARGET_SIGSEGV;
1685 info.si_errno = 0;
1686 info.si_code = TARGET_SEGV_MAPERR;
1687 break;
1688 }
1689 info._sifields._sigfault._addr = env->nip;
1690 queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
1691 break;
1692 case POWERPC_EXCP_ISI: /* Instruction storage exception */
1693 /* XXX: check this */
1694 switch (env->error_code & 0xFF000000) {
1695 case 0x40000000:
1696 info.si_signo = TARGET_SIGSEGV;
1697 info.si_errno = 0;
1698 info.si_code = TARGET_SEGV_MAPERR;
1699 break;
1700 case 0x10000000:
1701 case 0x08000000:
1702 info.si_signo = TARGET_SIGSEGV;
1703 info.si_errno = 0;
1704 info.si_code = TARGET_SEGV_ACCERR;
1705 break;
1706 default:
1707 /* Let's send a regular segfault... */
1708 EXCP_DUMP(env, "Invalid segfault errno (%02x)\n",
1709 env->error_code);
1710 info.si_signo = TARGET_SIGSEGV;
1711 info.si_errno = 0;
1712 info.si_code = TARGET_SEGV_MAPERR;
1713 break;
1714 }
1715 info._sifields._sigfault._addr = env->nip - 4;
1716 queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
1717 break;
1718 case POWERPC_EXCP_EXTERNAL: /* External input */
1719 cpu_abort(cs, "External interrupt while in user mode. "
1720 "Aborting\n");
1721 break;
1722 case POWERPC_EXCP_ALIGN: /* Alignment exception */
1723 /* XXX: check this */
1724 info.si_signo = TARGET_SIGBUS;
1725 info.si_errno = 0;
1726 info.si_code = TARGET_BUS_ADRALN;
1727 info._sifields._sigfault._addr = env->nip;
1728 queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
1729 break;
1730 case POWERPC_EXCP_PROGRAM: /* Program exception */
1731 case POWERPC_EXCP_HV_EMU: /* HV emulation */
1732 /* XXX: check this */
1733 switch (env->error_code & ~0xF) {
1734 case POWERPC_EXCP_FP:
1735 info.si_signo = TARGET_SIGFPE;
1736 info.si_errno = 0;
1737 switch (env->error_code & 0xF) {
1738 case POWERPC_EXCP_FP_OX:
1739 info.si_code = TARGET_FPE_FLTOVF;
1740 break;
1741 case POWERPC_EXCP_FP_UX:
1742 info.si_code = TARGET_FPE_FLTUND;
1743 break;
1744 case POWERPC_EXCP_FP_ZX:
1745 case POWERPC_EXCP_FP_VXZDZ:
1746 info.si_code = TARGET_FPE_FLTDIV;
1747 break;
1748 case POWERPC_EXCP_FP_XX:
1749 info.si_code = TARGET_FPE_FLTRES;
1750 break;
1751 case POWERPC_EXCP_FP_VXSOFT:
1752 info.si_code = TARGET_FPE_FLTINV;
1753 break;
1754 case POWERPC_EXCP_FP_VXSNAN:
1755 case POWERPC_EXCP_FP_VXISI:
1756 case POWERPC_EXCP_FP_VXIDI:
1757 case POWERPC_EXCP_FP_VXIMZ:
1758 case POWERPC_EXCP_FP_VXVC:
1759 case POWERPC_EXCP_FP_VXSQRT:
1760 case POWERPC_EXCP_FP_VXCVI:
1761 info.si_code = TARGET_FPE_FLTSUB;
1762 break;
1763 default:
1764 EXCP_DUMP(env, "Unknown floating point exception (%02x)\n",
1765 env->error_code);
1766 break;
1767 }
1768 break;
1769 case POWERPC_EXCP_INVAL:
1770 info.si_signo = TARGET_SIGILL;
1771 info.si_errno = 0;
1772 switch (env->error_code & 0xF) {
1773 case POWERPC_EXCP_INVAL_INVAL:
1774 info.si_code = TARGET_ILL_ILLOPC;
1775 break;
1776 case POWERPC_EXCP_INVAL_LSWX:
1777 info.si_code = TARGET_ILL_ILLOPN;
1778 break;
1779 case POWERPC_EXCP_INVAL_SPR:
1780 info.si_code = TARGET_ILL_PRVREG;
1781 break;
1782 case POWERPC_EXCP_INVAL_FP:
1783 info.si_code = TARGET_ILL_COPROC;
1784 break;
1785 default:
1786 EXCP_DUMP(env, "Unknown invalid operation (%02x)\n",
1787 env->error_code & 0xF);
1788 info.si_code = TARGET_ILL_ILLADR;
1789 break;
1790 }
1791 break;
1792 case POWERPC_EXCP_PRIV:
1793 info.si_signo = TARGET_SIGILL;
1794 info.si_errno = 0;
1795 switch (env->error_code & 0xF) {
1796 case POWERPC_EXCP_PRIV_OPC:
1797 info.si_code = TARGET_ILL_PRVOPC;
1798 break;
1799 case POWERPC_EXCP_PRIV_REG:
1800 info.si_code = TARGET_ILL_PRVREG;
1801 break;
1802 default:
1803 EXCP_DUMP(env, "Unknown privilege violation (%02x)\n",
1804 env->error_code & 0xF);
1805 info.si_code = TARGET_ILL_PRVOPC;
1806 break;
1807 }
1808 break;
1809 case POWERPC_EXCP_TRAP:
1810 cpu_abort(cs, "Tried to call a TRAP\n");
1811 break;
1812 default:
1813 /* Should not happen ! */
1814 cpu_abort(cs, "Unknown program exception (%02x)\n",
1815 env->error_code);
1816 break;
1817 }
1818 info._sifields._sigfault._addr = env->nip;
1819 queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
1820 break;
1821 case POWERPC_EXCP_FPU: /* Floating-point unavailable exception */
1822 info.si_signo = TARGET_SIGILL;
1823 info.si_errno = 0;
1824 info.si_code = TARGET_ILL_COPROC;
1825 info._sifields._sigfault._addr = env->nip;
1826 queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
1827 break;
1828 case POWERPC_EXCP_SYSCALL: /* System call exception */
1829 cpu_abort(cs, "Syscall exception while in user mode. "
1830 "Aborting\n");
1831 break;
1832 case POWERPC_EXCP_APU: /* Auxiliary processor unavailable */
1833 info.si_signo = TARGET_SIGILL;
1834 info.si_errno = 0;
1835 info.si_code = TARGET_ILL_COPROC;
1836 info._sifields._sigfault._addr = env->nip;
1837 queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
1838 break;
1839 case POWERPC_EXCP_DECR: /* Decrementer exception */
1840 cpu_abort(cs, "Decrementer interrupt while in user mode. "
1841 "Aborting\n");
1842 break;
1843 case POWERPC_EXCP_FIT: /* Fixed-interval timer interrupt */
1844 cpu_abort(cs, "Fix interval timer interrupt while in user mode. "
1845 "Aborting\n");
1846 break;
1847 case POWERPC_EXCP_WDT: /* Watchdog timer interrupt */
1848 cpu_abort(cs, "Watchdog timer interrupt while in user mode. "
1849 "Aborting\n");
1850 break;
1851 case POWERPC_EXCP_DTLB: /* Data TLB error */
1852 cpu_abort(cs, "Data TLB exception while in user mode. "
1853 "Aborting\n");
1854 break;
1855 case POWERPC_EXCP_ITLB: /* Instruction TLB error */
1856 cpu_abort(cs, "Instruction TLB exception while in user mode. "
1857 "Aborting\n");
1858 break;
1859 case POWERPC_EXCP_SPEU: /* SPE/embedded floating-point unavail. */
1860 info.si_signo = TARGET_SIGILL;
1861 info.si_errno = 0;
1862 info.si_code = TARGET_ILL_COPROC;
1863 info._sifields._sigfault._addr = env->nip;
1864 queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
1865 break;
1866 case POWERPC_EXCP_EFPDI: /* Embedded floating-point data IRQ */
1867 cpu_abort(cs, "Embedded floating-point data IRQ not handled\n");
1868 break;
1869 case POWERPC_EXCP_EFPRI: /* Embedded floating-point round IRQ */
1870 cpu_abort(cs, "Embedded floating-point round IRQ not handled\n");
1871 break;
1872 case POWERPC_EXCP_EPERFM: /* Embedded performance monitor IRQ */
1873 cpu_abort(cs, "Performance monitor exception not handled\n");
1874 break;
1875 case POWERPC_EXCP_DOORI: /* Embedded doorbell interrupt */
1876 cpu_abort(cs, "Doorbell interrupt while in user mode. "
1877 "Aborting\n");
1878 break;
1879 case POWERPC_EXCP_DOORCI: /* Embedded doorbell critical interrupt */
1880 cpu_abort(cs, "Doorbell critical interrupt while in user mode. "
1881 "Aborting\n");
1882 break;
1883 case POWERPC_EXCP_RESET: /* System reset exception */
1884 cpu_abort(cs, "Reset interrupt while in user mode. "
1885 "Aborting\n");
1886 break;
1887 case POWERPC_EXCP_DSEG: /* Data segment exception */
1888 cpu_abort(cs, "Data segment exception while in user mode. "
1889 "Aborting\n");
1890 break;
1891 case POWERPC_EXCP_ISEG: /* Instruction segment exception */
1892 cpu_abort(cs, "Instruction segment exception "
1893 "while in user mode. Aborting\n");
1894 break;
1895 /* PowerPC 64 with hypervisor mode support */
1896 case POWERPC_EXCP_HDECR: /* Hypervisor decrementer exception */
1897 cpu_abort(cs, "Hypervisor decrementer interrupt "
1898 "while in user mode. Aborting\n");
1899 break;
1900 case POWERPC_EXCP_TRACE: /* Trace exception */
1901 /* Nothing to do:
1902 * we use this exception to emulate step-by-step execution mode.
1903 */
1904 break;
1905 /* PowerPC 64 with hypervisor mode support */
1906 case POWERPC_EXCP_HDSI: /* Hypervisor data storage exception */
1907 cpu_abort(cs, "Hypervisor data storage exception "
1908 "while in user mode. Aborting\n");
1909 break;
1910 case POWERPC_EXCP_HISI: /* Hypervisor instruction storage excp */
1911 cpu_abort(cs, "Hypervisor instruction storage exception "
1912 "while in user mode. Aborting\n");
1913 break;
1914 case POWERPC_EXCP_HDSEG: /* Hypervisor data segment exception */
1915 cpu_abort(cs, "Hypervisor data segment exception "
1916 "while in user mode. Aborting\n");
1917 break;
1918 case POWERPC_EXCP_HISEG: /* Hypervisor instruction segment excp */
1919 cpu_abort(cs, "Hypervisor instruction segment exception "
1920 "while in user mode. Aborting\n");
1921 break;
1922 case POWERPC_EXCP_VPU: /* Vector unavailable exception */
1923 info.si_signo = TARGET_SIGILL;
1924 info.si_errno = 0;
1925 info.si_code = TARGET_ILL_COPROC;
1926 info._sifields._sigfault._addr = env->nip;
1927 queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
1928 break;
1929 case POWERPC_EXCP_PIT: /* Programmable interval timer IRQ */
1930 cpu_abort(cs, "Programmable interval timer interrupt "
1931 "while in user mode. Aborting\n");
1932 break;
1933 case POWERPC_EXCP_IO: /* IO error exception */
1934 cpu_abort(cs, "IO error exception while in user mode. "
1935 "Aborting\n");
1936 break;
1937 case POWERPC_EXCP_RUNM: /* Run mode exception */
1938 cpu_abort(cs, "Run mode exception while in user mode. "
1939 "Aborting\n");
1940 break;
1941 case POWERPC_EXCP_EMUL: /* Emulation trap exception */
1942 cpu_abort(cs, "Emulation trap exception not handled\n");
1943 break;
1944 case POWERPC_EXCP_IFTLB: /* Instruction fetch TLB error */
1945 cpu_abort(cs, "Instruction fetch TLB exception "
1946 "while in user-mode. Aborting");
1947 break;
1948 case POWERPC_EXCP_DLTLB: /* Data load TLB miss */
1949 cpu_abort(cs, "Data load TLB exception while in user-mode. "
1950 "Aborting");
1951 break;
1952 case POWERPC_EXCP_DSTLB: /* Data store TLB miss */
1953 cpu_abort(cs, "Data store TLB exception while in user-mode. "
1954 "Aborting");
1955 break;
1956 case POWERPC_EXCP_FPA: /* Floating-point assist exception */
1957 cpu_abort(cs, "Floating-point assist exception not handled\n");
1958 break;
1959 case POWERPC_EXCP_IABR: /* Instruction address breakpoint */
1960 cpu_abort(cs, "Instruction address breakpoint exception "
1961 "not handled\n");
1962 break;
1963 case POWERPC_EXCP_SMI: /* System management interrupt */
1964 cpu_abort(cs, "System management interrupt while in user mode. "
1965 "Aborting\n");
1966 break;
1967 case POWERPC_EXCP_THERM: /* Thermal interrupt */
1968 cpu_abort(cs, "Thermal interrupt interrupt while in user mode. "
1969 "Aborting\n");
1970 break;
1971 case POWERPC_EXCP_PERFM: /* Embedded performance monitor IRQ */
1972 cpu_abort(cs, "Performance monitor exception not handled\n");
1973 break;
1974 case POWERPC_EXCP_VPUA: /* Vector assist exception */
1975 cpu_abort(cs, "Vector assist exception not handled\n");
1976 break;
1977 case POWERPC_EXCP_SOFTP: /* Soft patch exception */
1978 cpu_abort(cs, "Soft patch exception not handled\n");
1979 break;
1980 case POWERPC_EXCP_MAINT: /* Maintenance exception */
1981 cpu_abort(cs, "Maintenance exception while in user mode. "
1982 "Aborting\n");
1983 break;
1984 case POWERPC_EXCP_STOP: /* stop translation */
1985 /* We did invalidate the instruction cache. Go on */
1986 break;
1987 case POWERPC_EXCP_BRANCH: /* branch instruction: */
1988 /* We just stopped because of a branch. Go on */
1989 break;
1990 case POWERPC_EXCP_SYSCALL_USER:
1991 /* system call in user-mode emulation */
1992 /* WARNING:
1993 * PPC ABI uses overflow flag in cr0 to signal an error
1994 * in syscalls.
1995 */
1996 env->crf[0] &= ~0x1;
1997 ret = do_syscall(env, env->gpr[0], env->gpr[3], env->gpr[4],
1998 env->gpr[5], env->gpr[6], env->gpr[7],
1999 env->gpr[8], 0, 0);
2000 if (ret == -TARGET_ERESTARTSYS) {
2001 break;
2002 }
2003 if (ret == (target_ulong)(-TARGET_QEMU_ESIGRETURN)) {
2004 /* Returning from a successful sigreturn syscall.
2005 Avoid corrupting register state. */
2006 break;
2007 }
2008 env->nip += 4;
2009 if (ret > (target_ulong)(-515)) {
2010 env->crf[0] |= 0x1;
2011 ret = -ret;
2012 }
2013 env->gpr[3] = ret;
2014 break;
2015 case POWERPC_EXCP_STCX:
2016 if (do_store_exclusive(env)) {
2017 info.si_signo = TARGET_SIGSEGV;
2018 info.si_errno = 0;
2019 info.si_code = TARGET_SEGV_MAPERR;
2020 info._sifields._sigfault._addr = env->nip;
2021 queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
2022 }
2023 break;
2024 case EXCP_DEBUG:
2025 {
2026 int sig;
2027
2028 sig = gdb_handlesig(cs, TARGET_SIGTRAP);
2029 if (sig) {
2030 info.si_signo = sig;
2031 info.si_errno = 0;
2032 info.si_code = TARGET_TRAP_BRKPT;
2033 queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
2034 }
2035 }
2036 break;
2037 case EXCP_INTERRUPT:
2038 /* just indicate that signals should be handled asap */
2039 break;
2040 default:
2041 cpu_abort(cs, "Unknown exception 0x%x. Aborting\n", trapnr);
2042 break;
2043 }
2044 process_pending_signals(env);
2045 }
2046 }
2047 #endif
2048
2049 #ifdef TARGET_MIPS
2050
2051 # ifdef TARGET_ABI_MIPSO32
2052 # define MIPS_SYS(name, args) args,
2053 static const uint8_t mips_syscall_args[] = {
2054 MIPS_SYS(sys_syscall , 8) /* 4000 */
2055 MIPS_SYS(sys_exit , 1)
2056 MIPS_SYS(sys_fork , 0)
2057 MIPS_SYS(sys_read , 3)
2058 MIPS_SYS(sys_write , 3)
2059 MIPS_SYS(sys_open , 3) /* 4005 */
2060 MIPS_SYS(sys_close , 1)
2061 MIPS_SYS(sys_waitpid , 3)
2062 MIPS_SYS(sys_creat , 2)
2063 MIPS_SYS(sys_link , 2)
2064 MIPS_SYS(sys_unlink , 1) /* 4010 */
2065 MIPS_SYS(sys_execve , 0)
2066 MIPS_SYS(sys_chdir , 1)
2067 MIPS_SYS(sys_time , 1)
2068 MIPS_SYS(sys_mknod , 3)
2069 MIPS_SYS(sys_chmod , 2) /* 4015 */
2070 MIPS_SYS(sys_lchown , 3)
2071 MIPS_SYS(sys_ni_syscall , 0)
2072 MIPS_SYS(sys_ni_syscall , 0) /* was sys_stat */
2073 MIPS_SYS(sys_lseek , 3)
2074 MIPS_SYS(sys_getpid , 0) /* 4020 */
2075 MIPS_SYS(sys_mount , 5)
2076 MIPS_SYS(sys_umount , 1)
2077 MIPS_SYS(sys_setuid , 1)
2078 MIPS_SYS(sys_getuid , 0)
2079 MIPS_SYS(sys_stime , 1) /* 4025 */
2080 MIPS_SYS(sys_ptrace , 4)
2081 MIPS_SYS(sys_alarm , 1)
2082 MIPS_SYS(sys_ni_syscall , 0) /* was sys_fstat */
2083 MIPS_SYS(sys_pause , 0)
2084 MIPS_SYS(sys_utime , 2) /* 4030 */
2085 MIPS_SYS(sys_ni_syscall , 0)
2086 MIPS_SYS(sys_ni_syscall , 0)
2087 MIPS_SYS(sys_access , 2)
2088 MIPS_SYS(sys_nice , 1)
2089 MIPS_SYS(sys_ni_syscall , 0) /* 4035 */
2090 MIPS_SYS(sys_sync , 0)
2091 MIPS_SYS(sys_kill , 2)
2092 MIPS_SYS(sys_rename , 2)
2093 MIPS_SYS(sys_mkdir , 2)
2094 MIPS_SYS(sys_rmdir , 1) /* 4040 */
2095 MIPS_SYS(sys_dup , 1)
2096 MIPS_SYS(sys_pipe , 0)
2097 MIPS_SYS(sys_times , 1)
2098 MIPS_SYS(sys_ni_syscall , 0)
2099 MIPS_SYS(sys_brk , 1) /* 4045 */
2100 MIPS_SYS(sys_setgid , 1)
2101 MIPS_SYS(sys_getgid , 0)
2102 MIPS_SYS(sys_ni_syscall , 0) /* was signal(2) */
2103 MIPS_SYS(sys_geteuid , 0)
2104 MIPS_SYS(sys_getegid , 0) /* 4050 */
2105 MIPS_SYS(sys_acct , 0)
2106 MIPS_SYS(sys_umount2 , 2)
2107 MIPS_SYS(sys_ni_syscall , 0)
2108 MIPS_SYS(sys_ioctl , 3)
2109 MIPS_SYS(sys_fcntl , 3) /* 4055 */
2110 MIPS_SYS(sys_ni_syscall , 2)
2111 MIPS_SYS(sys_setpgid , 2)
2112 MIPS_SYS(sys_ni_syscall , 0)
2113 MIPS_SYS(sys_olduname , 1)
2114 MIPS_SYS(sys_umask , 1) /* 4060 */
2115 MIPS_SYS(sys_chroot , 1)
2116 MIPS_SYS(sys_ustat , 2)
2117 MIPS_SYS(sys_dup2 , 2)
2118 MIPS_SYS(sys_getppid , 0)
2119 MIPS_SYS(sys_getpgrp , 0) /* 4065 */
2120 MIPS_SYS(sys_setsid , 0)
2121 MIPS_SYS(sys_sigaction , 3)
2122 MIPS_SYS(sys_sgetmask , 0)
2123 MIPS_SYS(sys_ssetmask , 1)
2124 MIPS_SYS(sys_setreuid , 2) /* 4070 */
2125 MIPS_SYS(sys_setregid , 2)
2126 MIPS_SYS(sys_sigsuspend , 0)
2127 MIPS_SYS(sys_sigpending , 1)
2128 MIPS_SYS(sys_sethostname , 2)
2129 MIPS_SYS(sys_setrlimit , 2) /* 4075 */
2130 MIPS_SYS(sys_getrlimit , 2)
2131 MIPS_SYS(sys_getrusage , 2)
2132 MIPS_SYS(sys_gettimeofday, 2)
2133 MIPS_SYS(sys_settimeofday, 2)
2134 MIPS_SYS(sys_getgroups , 2) /* 4080 */
2135 MIPS_SYS(sys_setgroups , 2)
2136 MIPS_SYS(sys_ni_syscall , 0) /* old_select */
2137 MIPS_SYS(sys_symlink , 2)
2138 MIPS_SYS(sys_ni_syscall , 0) /* was sys_lstat */
2139 MIPS_SYS(sys_readlink , 3) /* 4085 */
2140 MIPS_SYS(sys_uselib , 1)
2141 MIPS_SYS(sys_swapon , 2)
2142 MIPS_SYS(sys_reboot , 3)
2143 MIPS_SYS(old_readdir , 3)
2144 MIPS_SYS(old_mmap , 6) /* 4090 */
2145 MIPS_SYS(sys_munmap , 2)
2146 MIPS_SYS(sys_truncate , 2)
2147 MIPS_SYS(sys_ftruncate , 2)
2148 MIPS_SYS(sys_fchmod , 2)
2149 MIPS_SYS(sys_fchown , 3) /* 4095 */
2150 MIPS_SYS(sys_getpriority , 2)
2151 MIPS_SYS(sys_setpriority , 3)
2152 MIPS_SYS(sys_ni_syscall , 0)
2153 MIPS_SYS(sys_statfs , 2)
2154 MIPS_SYS(sys_fstatfs , 2) /* 4100 */
2155 MIPS_SYS(sys_ni_syscall , 0) /* was ioperm(2) */
2156 MIPS_SYS(sys_socketcall , 2)
2157 MIPS_SYS(sys_syslog , 3)
2158 MIPS_SYS(sys_setitimer , 3)
2159 MIPS_SYS(sys_getitimer , 2) /* 4105 */
2160 MIPS_SYS(sys_newstat , 2)
2161 MIPS_SYS(sys_newlstat , 2)
2162 MIPS_SYS(sys_newfstat , 2)
2163 MIPS_SYS(sys_uname , 1)
2164 MIPS_SYS(sys_ni_syscall , 0) /* 4110 was iopl(2) */
2165 MIPS_SYS(sys_vhangup , 0)
2166 MIPS_SYS(sys_ni_syscall , 0) /* was sys_idle() */
2167 MIPS_SYS(sys_ni_syscall , 0) /* was sys_vm86 */
2168 MIPS_SYS(sys_wait4 , 4)
2169 MIPS_SYS(sys_swapoff , 1) /* 4115 */
2170 MIPS_SYS(sys_sysinfo , 1)
2171 MIPS_SYS(sys_ipc , 6)
2172 MIPS_SYS(sys_fsync , 1)
2173 MIPS_SYS(sys_sigreturn , 0)
2174 MIPS_SYS(sys_clone , 6) /* 4120 */
2175 MIPS_SYS(sys_setdomainname, 2)
2176 MIPS_SYS(sys_newuname , 1)
2177 MIPS_SYS(sys_ni_syscall , 0) /* sys_modify_ldt */
2178 MIPS_SYS(sys_adjtimex , 1)
2179 MIPS_SYS(sys_mprotect , 3) /* 4125 */
2180 MIPS_SYS(sys_sigprocmask , 3)
2181 MIPS_SYS(sys_ni_syscall , 0) /* was create_module */
2182 MIPS_SYS(sys_init_module , 5)
2183 MIPS_SYS(sys_delete_module, 1)
2184 MIPS_SYS(sys_ni_syscall , 0) /* 4130 was get_kernel_syms */
2185 MIPS_SYS(sys_quotactl , 0)
2186 MIPS_SYS(sys_getpgid , 1)
2187 MIPS_SYS(sys_fchdir , 1)
2188 MIPS_SYS(sys_bdflush , 2)
2189 MIPS_SYS(sys_sysfs , 3) /* 4135 */
2190 MIPS_SYS(sys_personality , 1)
2191 MIPS_SYS(sys_ni_syscall , 0) /* for afs_syscall */
2192 MIPS_SYS(sys_setfsuid , 1)
2193 MIPS_SYS(sys_setfsgid , 1)
2194 MIPS_SYS(sys_llseek , 5) /* 4140 */
2195 MIPS_SYS(sys_getdents , 3)
2196 MIPS_SYS(sys_select , 5)
2197 MIPS_SYS(sys_flock , 2)
2198 MIPS_SYS(sys_msync , 3)
2199 MIPS_SYS(sys_readv , 3) /* 4145 */
2200 MIPS_SYS(sys_writev , 3)
2201 MIPS_SYS(sys_cacheflush , 3)
2202 MIPS_SYS(sys_cachectl , 3)
2203 MIPS_SYS(sys_sysmips , 4)
2204 MIPS_SYS(sys_ni_syscall , 0) /* 4150 */
2205 MIPS_SYS(sys_getsid , 1)
2206 MIPS_SYS(sys_fdatasync , 0)
2207 MIPS_SYS(sys_sysctl , 1)
2208 MIPS_SYS(sys_mlock , 2)
2209 MIPS_SYS(sys_munlock , 2) /* 4155 */
2210 MIPS_SYS(sys_mlockall , 1)
2211 MIPS_SYS(sys_munlockall , 0)
2212 MIPS_SYS(sys_sched_setparam, 2)
2213 MIPS_SYS(sys_sched_getparam, 2)
2214 MIPS_SYS(sys_sched_setscheduler, 3) /* 4160 */
2215 MIPS_SYS(sys_sched_getscheduler, 1)
2216 MIPS_SYS(sys_sched_yield , 0)
2217 MIPS_SYS(sys_sched_get_priority_max, 1)
2218 MIPS_SYS(sys_sched_get_priority_min, 1)
2219 MIPS_SYS(sys_sched_rr_get_interval, 2) /* 4165 */
2220 MIPS_SYS(sys_nanosleep, 2)
2221 MIPS_SYS(sys_mremap , 5)
2222 MIPS_SYS(sys_accept , 3)
2223 MIPS_SYS(sys_bind , 3)
2224 MIPS_SYS(sys_connect , 3) /* 4170 */
2225 MIPS_SYS(sys_getpeername , 3)
2226 MIPS_SYS(sys_getsockname , 3)
2227 MIPS_SYS(sys_getsockopt , 5)
2228 MIPS_SYS(sys_listen , 2)
2229 MIPS_SYS(sys_recv , 4) /* 4175 */
2230 MIPS_SYS(sys_recvfrom , 6)
2231 MIPS_SYS(sys_recvmsg , 3)
2232 MIPS_SYS(sys_send , 4)
2233 MIPS_SYS(sys_sendmsg , 3)
2234 MIPS_SYS(sys_sendto , 6) /* 4180 */
2235 MIPS_SYS(sys_setsockopt , 5)
2236 MIPS_SYS(sys_shutdown , 2)
2237 MIPS_SYS(sys_socket , 3)
2238 MIPS_SYS(sys_socketpair , 4)
2239 MIPS_SYS(sys_setresuid , 3) /* 4185 */
2240 MIPS_SYS(sys_getresuid , 3)
2241 MIPS_SYS(sys_ni_syscall , 0) /* was sys_query_module */
2242 MIPS_SYS(sys_poll , 3)
2243 MIPS_SYS(sys_nfsservctl , 3)
2244 MIPS_SYS(sys_setresgid , 3) /* 4190 */
2245 MIPS_SYS(sys_getresgid , 3)
2246 MIPS_SYS(sys_prctl , 5)
2247 MIPS_SYS(sys_rt_sigreturn, 0)
2248 MIPS_SYS(sys_rt_sigaction, 4)
2249 MIPS_SYS(sys_rt_sigprocmask, 4) /* 4195 */
2250 MIPS_SYS(sys_rt_sigpending, 2)
2251 MIPS_SYS(sys_rt_sigtimedwait, 4)
2252 MIPS_SYS(sys_rt_sigqueueinfo, 3)
2253 MIPS_SYS(sys_rt_sigsuspend, 0)
2254 MIPS_SYS(sys_pread64 , 6) /* 4200 */
2255 MIPS_SYS(sys_pwrite64 , 6)
2256 MIPS_SYS(sys_chown , 3)
2257 MIPS_SYS(sys_getcwd , 2)
2258 MIPS_SYS(sys_capget , 2)
2259 MIPS_SYS(sys_capset , 2) /* 4205 */
2260 MIPS_SYS(sys_sigaltstack , 2)
2261 MIPS_SYS(sys_sendfile , 4)
2262 MIPS_SYS(sys_ni_syscall , 0)
2263 MIPS_SYS(sys_ni_syscall , 0)
2264 MIPS_SYS(sys_mmap2 , 6) /* 4210 */
2265 MIPS_SYS(sys_truncate64 , 4)
2266 MIPS_SYS(sys_ftruncate64 , 4)
2267 MIPS_SYS(sys_stat64 , 2)
2268 MIPS_SYS(sys_lstat64 , 2)
2269 MIPS_SYS(sys_fstat64 , 2) /* 4215 */
2270 MIPS_SYS(sys_pivot_root , 2)
2271 MIPS_SYS(sys_mincore , 3)
2272 MIPS_SYS(sys_madvise , 3)
2273 MIPS_SYS(sys_getdents64 , 3)
2274 MIPS_SYS(sys_fcntl64 , 3) /* 4220 */
2275 MIPS_SYS(sys_ni_syscall , 0)
2276 MIPS_SYS(sys_gettid , 0)
2277 MIPS_SYS(sys_readahead , 5)
2278 MIPS_SYS(sys_setxattr , 5)
2279 MIPS_SYS(sys_lsetxattr , 5) /* 4225 */
2280 MIPS_SYS(sys_fsetxattr , 5)
2281 MIPS_SYS(sys_getxattr , 4)
2282 MIPS_SYS(sys_lgetxattr , 4)
2283 MIPS_SYS(sys_fgetxattr , 4)
2284 MIPS_SYS(sys_listxattr , 3) /* 4230 */
2285 MIPS_SYS(sys_llistxattr , 3)
2286 MIPS_SYS(sys_flistxattr , 3)
2287 MIPS_SYS(sys_removexattr , 2)
2288 MIPS_SYS(sys_lremovexattr, 2)
2289 MIPS_SYS(sys_fremovexattr, 2) /* 4235 */
2290 MIPS_SYS(sys_tkill , 2)
2291 MIPS_SYS(sys_sendfile64 , 5)
2292 MIPS_SYS(sys_futex , 6)
2293 MIPS_SYS(sys_sched_setaffinity, 3)
2294 MIPS_SYS(sys_sched_getaffinity, 3) /* 4240 */
2295 MIPS_SYS(sys_io_setup , 2)
2296 MIPS_SYS(sys_io_destroy , 1)
2297 MIPS_SYS(sys_io_getevents, 5)
2298 MIPS_SYS(sys_io_submit , 3)
2299 MIPS_SYS(sys_io_cancel , 3) /* 4245 */
2300 MIPS_SYS(sys_exit_group , 1)
2301 MIPS_SYS(sys_lookup_dcookie, 3)
2302 MIPS_SYS(sys_epoll_create, 1)
2303 MIPS_SYS(sys_epoll_ctl , 4)
2304 MIPS_SYS(sys_epoll_wait , 3) /* 4250 */
2305 MIPS_SYS(sys_remap_file_pages, 5)
2306 MIPS_SYS(sys_set_tid_address, 1)
2307 MIPS_SYS(sys_restart_syscall, 0)
2308 MIPS_SYS(sys_fadvise64_64, 7)
2309 MIPS_SYS(sys_statfs64 , 3) /* 4255 */
2310 MIPS_SYS(sys_fstatfs64 , 2)
2311 MIPS_SYS(sys_timer_create, 3)
2312 MIPS_SYS(sys_timer_settime, 4)
2313 MIPS_SYS(sys_timer_gettime, 2)
2314 MIPS_SYS(sys_timer_getoverrun, 1) /* 4260 */
2315 MIPS_SYS(sys_timer_delete, 1)
2316 MIPS_SYS(sys_clock_settime, 2)
2317 MIPS_SYS(sys_clock_gettime, 2)
2318 MIPS_SYS(sys_clock_getres, 2)
2319 MIPS_SYS(sys_clock_nanosleep, 4) /* 4265 */
2320 MIPS_SYS(sys_tgkill , 3)
2321 MIPS_SYS(sys_utimes , 2)
2322 MIPS_SYS(sys_mbind , 4)
2323 MIPS_SYS(sys_ni_syscall , 0) /* sys_get_mempolicy */
2324 MIPS_SYS(sys_ni_syscall , 0) /* 4270 sys_set_mempolicy */
2325 MIPS_SYS(sys_mq_open , 4)
2326 MIPS_SYS(sys_mq_unlink , 1)
2327 MIPS_SYS(sys_mq_timedsend, 5)
2328 MIPS_SYS(sys_mq_timedreceive, 5)
2329 MIPS_SYS(sys_mq_notify , 2) /* 4275 */
2330 MIPS_SYS(sys_mq_getsetattr, 3)
2331 MIPS_SYS(sys_ni_syscall , 0) /* sys_vserver */
2332 MIPS_SYS(sys_waitid , 4)
2333 MIPS_SYS(sys_ni_syscall , 0) /* available, was setaltroot */
2334 MIPS_SYS(sys_add_key , 5)
2335 MIPS_SYS(sys_request_key, 4)
2336 MIPS_SYS(sys_keyctl , 5)
2337 MIPS_SYS(sys_set_thread_area, 1)
2338 MIPS_SYS(sys_inotify_init, 0)
2339 MIPS_SYS(sys_inotify_add_watch, 3) /* 4285 */
2340 MIPS_SYS(sys_inotify_rm_watch, 2)
2341 MIPS_SYS(sys_migrate_pages, 4)
2342 MIPS_SYS(sys_openat, 4)
2343 MIPS_SYS(sys_mkdirat, 3)
2344 MIPS_SYS(sys_mknodat, 4) /* 4290 */
2345 MIPS_SYS(sys_fchownat, 5)
2346 MIPS_SYS(sys_futimesat, 3)
2347 MIPS_SYS(sys_fstatat64, 4)
2348 MIPS_SYS(sys_unlinkat, 3)
2349 MIPS_SYS(sys_renameat, 4) /* 4295 */
2350 MIPS_SYS(sys_linkat, 5)
2351 MIPS_SYS(sys_symlinkat, 3)
2352 MIPS_SYS(sys_readlinkat, 4)
2353 MIPS_SYS(sys_fchmodat, 3)
2354 MIPS_SYS(sys_faccessat, 3) /* 4300 */
2355 MIPS_SYS(sys_pselect6, 6)
2356 MIPS_SYS(sys_ppoll, 5)
2357 MIPS_SYS(sys_unshare, 1)
2358 MIPS_SYS(sys_splice, 6)
2359 MIPS_SYS(sys_sync_file_range, 7) /* 4305 */
2360 MIPS_SYS(sys_tee, 4)
2361 MIPS_SYS(sys_vmsplice, 4)
2362 MIPS_SYS(sys_move_pages, 6)
2363 MIPS_SYS(sys_set_robust_list, 2)
2364 MIPS_SYS(sys_get_robust_list, 3) /* 4310 */
2365 MIPS_SYS(sys_kexec_load, 4)
2366 MIPS_SYS(sys_getcpu, 3)
2367 MIPS_SYS(sys_epoll_pwait, 6)
2368 MIPS_SYS(sys_ioprio_set, 3)
2369 MIPS_SYS(sys_ioprio_get, 2)
2370 MIPS_SYS(sys_utimensat, 4)
2371 MIPS_SYS(sys_signalfd, 3)
2372 MIPS_SYS(sys_ni_syscall, 0) /* was timerfd */
2373 MIPS_SYS(sys_eventfd, 1)
2374 MIPS_SYS(sys_fallocate, 6) /* 4320 */
2375 MIPS_SYS(sys_timerfd_create, 2)
2376 MIPS_SYS(sys_timerfd_gettime, 2)
2377 MIPS_SYS(sys_timerfd_settime, 4)
2378 MIPS_SYS(sys_signalfd4, 4)
2379 MIPS_SYS(sys_eventfd2, 2) /* 4325 */
2380 MIPS_SYS(sys_epoll_create1, 1)
2381 MIPS_SYS(sys_dup3, 3)
2382 MIPS_SYS(sys_pipe2, 2)
2383 MIPS_SYS(sys_inotify_init1, 1)
2384 MIPS_SYS(sys_preadv, 6) /* 4330 */
2385 MIPS_SYS(sys_pwritev, 6)
2386 MIPS_SYS(sys_rt_tgsigqueueinfo, 4)
2387 MIPS_SYS(sys_perf_event_open, 5)
2388 MIPS_SYS(sys_accept4, 4)
2389 MIPS_SYS(sys_recvmmsg, 5) /* 4335 */
2390 MIPS_SYS(sys_fanotify_init, 2)
2391 MIPS_SYS(sys_fanotify_mark, 6)
2392 MIPS_SYS(sys_prlimit64, 4)
2393 MIPS_SYS(sys_name_to_handle_at, 5)
2394 MIPS_SYS(sys_open_by_handle_at, 3) /* 4340 */
2395 MIPS_SYS(sys_clock_adjtime, 2)
2396 MIPS_SYS(sys_syncfs, 1)
2397 };
2398 # undef MIPS_SYS
2399 # endif /* O32 */
2400
2401 static int do_store_exclusive(CPUMIPSState *env)
2402 {
2403 target_ulong addr;
2404 target_ulong page_addr;
2405 target_ulong val;
2406 int flags;
2407 int segv = 0;
2408 int reg;
2409 int d;
2410
2411 addr = env->lladdr;
2412 page_addr = addr & TARGET_PAGE_MASK;
2413 start_exclusive();
2414 mmap_lock();
2415 flags = page_get_flags(page_addr);
2416 if ((flags & PAGE_READ) == 0) {
2417 segv = 1;
2418 } else {
2419 reg = env->llreg & 0x1f;
2420 d = (env->llreg & 0x20) != 0;
2421 if (d) {
2422 segv = get_user_s64(val, addr);
2423 } else {
2424 segv = get_user_s32(val, addr);
2425 }
2426 if (!segv) {
2427 if (val != env->llval) {
2428 env->active_tc.gpr[reg] = 0;
2429 } else {
2430 if (d) {
2431 segv = put_user_u64(env->llnewval, addr);
2432 } else {
2433 segv = put_user_u32(env->llnewval, addr);
2434 }
2435 if (!segv) {
2436 env->active_tc.gpr[reg] = 1;
2437 }
2438 }
2439 }
2440 }
2441 env->lladdr = -1;
2442 if (!segv) {
2443 env->active_tc.PC += 4;
2444 }
2445 mmap_unlock();
2446 end_exclusive();
2447 return segv;
2448 }
2449
2450 /* Break codes */
2451 enum {
2452 BRK_OVERFLOW = 6,
2453 BRK_DIVZERO = 7
2454 };
2455
2456 static int do_break(CPUMIPSState *env, target_siginfo_t *info,
2457 unsigned int code)
2458 {
2459 int ret = -1;
2460
2461 switch (code) {
2462 case BRK_OVERFLOW:
2463 case BRK_DIVZERO:
2464 info->si_signo = TARGET_SIGFPE;
2465 info->si_errno = 0;
2466 info->si_code = (code == BRK_OVERFLOW) ? FPE_INTOVF : FPE_INTDIV;
2467 queue_signal(env, info->si_signo, QEMU_SI_FAULT, &*info);
2468 ret = 0;
2469 break;
2470 default:
2471 info->si_signo = TARGET_SIGTRAP;
2472 info->si_errno = 0;
2473 queue_signal(env, info->si_signo, QEMU_SI_FAULT, &*info);
2474 ret = 0;
2475 break;
2476 }
2477
2478 return ret;
2479 }
2480
2481 void cpu_loop(CPUMIPSState *env)
2482 {
2483 CPUState *cs = CPU(mips_env_get_cpu(env));
2484 target_siginfo_t info;
2485 int trapnr;
2486 abi_long ret;
2487 # ifdef TARGET_ABI_MIPSO32
2488 unsigned int syscall_num;
2489 # endif
2490
2491 for(;;) {
2492 cpu_exec_start(cs);
2493 trapnr = cpu_exec(cs);
2494 cpu_exec_end(cs);
2495 process_queued_cpu_work(cs);
2496
2497 switch(trapnr) {
2498 case EXCP_SYSCALL:
2499 env->active_tc.PC += 4;
2500 # ifdef TARGET_ABI_MIPSO32
2501 syscall_num = env->active_tc.gpr[2] - 4000;
2502 if (syscall_num >= sizeof(mips_syscall_args)) {
2503 ret = -TARGET_ENOSYS;
2504 } else {
2505 int nb_args;
2506 abi_ulong sp_reg;
2507 abi_ulong arg5 = 0, arg6 = 0, arg7 = 0, arg8 = 0;
2508
2509 nb_args = mips_syscall_args[syscall_num];
2510 sp_reg = env->active_tc.gpr[29];
2511 switch (nb_args) {
2512 /* these arguments are taken from the stack */
2513 case 8:
2514 if ((ret = get_user_ual(arg8, sp_reg + 28)) != 0) {
2515 goto done_syscall;
2516 }
2517 case 7:
2518 if ((ret = get_user_ual(arg7, sp_reg + 24)) != 0) {
2519 goto done_syscall;
2520 }
2521 case 6:
2522 if ((ret = get_user_ual(arg6, sp_reg + 20)) != 0) {
2523 goto done_syscall;
2524 }
2525 case 5:
2526 if ((ret = get_user_ual(arg5, sp_reg + 16)) != 0) {
2527 goto done_syscall;
2528 }
2529 default:
2530 break;
2531 }
2532 ret = do_syscall(env, env->active_tc.gpr[2],
2533 env->active_tc.gpr[4],
2534 env->active_tc.gpr[5],
2535 env->active_tc.gpr[6],
2536 env->active_tc.gpr[7],
2537 arg5, arg6, arg7, arg8);
2538 }
2539 done_syscall:
2540 # else
2541 ret = do_syscall(env, env->active_tc.gpr[2],
2542 env->active_tc.gpr[4], env->active_tc.gpr[5],
2543 env->active_tc.gpr[6], env->active_tc.gpr[7],
2544 env->active_tc.gpr[8], env->active_tc.gpr[9],
2545 env->active_tc.gpr[10], env->active_tc.gpr[11]);
2546 # endif /* O32 */
2547 if (ret == -TARGET_ERESTARTSYS) {
2548 env->active_tc.PC -= 4;
2549 break;
2550 }
2551 if (ret == -TARGET_QEMU_ESIGRETURN) {
2552 /* Returning from a successful sigreturn syscall.
2553 Avoid clobbering register state. */
2554 break;
2555 }
2556 if ((abi_ulong)ret >= (abi_ulong)-1133) {
2557 env->active_tc.gpr[7] = 1; /* error flag */
2558 ret = -ret;
2559 } else {
2560 env->active_tc.gpr[7] = 0; /* error flag */
2561 }
2562 env->active_tc.gpr[2] = ret;
2563 break;
2564 case EXCP_TLBL:
2565 case EXCP_TLBS:
2566 case EXCP_AdEL:
2567 case EXCP_AdES:
2568 info.si_signo = TARGET_SIGSEGV;
2569 info.si_errno = 0;
2570 /* XXX: check env->error_code */
2571 info.si_code = TARGET_SEGV_MAPERR;
2572 info._sifields._sigfault._addr = env->CP0_BadVAddr;
2573 queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
2574 break;
2575 case EXCP_CpU:
2576 case EXCP_RI:
2577 info.si_signo = TARGET_SIGILL;
2578 info.si_errno = 0;
2579 info.si_code = 0;
2580 queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
2581 break;
2582 case EXCP_INTERRUPT:
2583 /* just indicate that signals should be handled asap */
2584 break;
2585 case EXCP_DEBUG:
2586 {
2587 int sig;
2588
2589 sig = gdb_handlesig(cs, TARGET_SIGTRAP);
2590 if (sig)
2591 {
2592 info.si_signo = sig;
2593 info.si_errno = 0;
2594 info.si_code = TARGET_TRAP_BRKPT;
2595 queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
2596 }
2597 }
2598 break;
2599 case EXCP_SC:
2600 if (do_store_exclusive(env)) {
2601 info.si_signo = TARGET_SIGSEGV;
2602 info.si_errno = 0;
2603 info.si_code = TARGET_SEGV_MAPERR;
2604 info._sifields._sigfault._addr = env->active_tc.PC;
2605 queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
2606 }
2607 break;
2608 case EXCP_DSPDIS:
2609 info.si_signo = TARGET_SIGILL;
2610 info.si_errno = 0;
2611 info.si_code = TARGET_ILL_ILLOPC;
2612 queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
2613 break;
2614 /* The code below was inspired by the MIPS Linux kernel trap
2615 * handling code in arch/mips/kernel/traps.c.
2616 */
2617 case EXCP_BREAK:
2618 {
2619 abi_ulong trap_instr;
2620 unsigned int code;
2621
2622 if (env->hflags & MIPS_HFLAG_M16) {
2623 if (env->insn_flags & ASE_MICROMIPS) {
2624 /* microMIPS mode */
2625 ret = get_user_u16(trap_instr, env->active_tc.PC);
2626 if (ret != 0) {
2627 goto error;
2628 }
2629
2630 if ((trap_instr >> 10) == 0x11) {
2631 /* 16-bit instruction */
2632 code = trap_instr & 0xf;
2633 } else {
2634 /* 32-bit instruction */
2635 abi_ulong instr_lo;
2636
2637 ret = get_user_u16(instr_lo,
2638 env->active_tc.PC + 2);
2639 if (ret != 0) {
2640 goto error;
2641 }
2642 trap_instr = (trap_instr << 16) | instr_lo;
2643 code = ((trap_instr >> 6) & ((1 << 20) - 1));
2644 /* Unfortunately, microMIPS also suffers from
2645 the old assembler bug... */
2646 if (code >= (1 << 10)) {
2647 code >>= 10;
2648 }
2649 }
2650 } else {
2651 /* MIPS16e mode */
2652 ret = get_user_u16(trap_instr, env->active_tc.PC);
2653 if (ret != 0) {
2654 goto error;
2655 }
2656 code = (trap_instr >> 6) & 0x3f;
2657 }
2658 } else {
2659 ret = get_user_u32(trap_instr, env->active_tc.PC);
2660 if (ret != 0) {
2661 goto error;
2662 }
2663
2664 /* As described in the original Linux kernel code, the
2665 * below checks on 'code' are to work around an old
2666 * assembly bug.
2667 */
2668 code = ((trap_instr >> 6) & ((1 << 20) - 1));
2669 if (code >= (1 << 10)) {
2670 code >>= 10;
2671 }
2672 }
2673
2674 if (do_break(env, &info, code) != 0) {
2675 goto error;
2676 }
2677 }
2678 break;
2679 case EXCP_TRAP:
2680 {
2681 abi_ulong trap_instr;
2682 unsigned int code = 0;
2683
2684 if (env->hflags & MIPS_HFLAG_M16) {
2685 /* microMIPS mode */
2686 abi_ulong instr[2];
2687
2688 ret = get_user_u16(instr[0], env->active_tc.PC) ||
2689 get_user_u16(instr[1], env->active_tc.PC + 2);
2690
2691 trap_instr = (instr[0] << 16) | instr[1];
2692 } else {
2693 ret = get_user_u32(trap_instr, env->active_tc.PC);
2694 }
2695
2696 if (ret != 0) {
2697 goto error;
2698 }
2699
2700 /* The immediate versions don't provide a code. */
2701 if (!(trap_instr & 0xFC000000)) {
2702 if (env->hflags & MIPS_HFLAG_M16) {
2703 /* microMIPS mode */
2704 code = ((trap_instr >> 12) & ((1 << 4) - 1));
2705 } else {
2706 code = ((trap_instr >> 6) & ((1 << 10) - 1));
2707 }
2708 }
2709
2710 if (do_break(env, &info, code) != 0) {
2711 goto error;
2712 }
2713 }
2714 break;
2715 default:
2716 error:
2717 EXCP_DUMP(env, "qemu: unhandled CPU exception 0x%x - aborting\n", trapnr);
2718 abort();
2719 }
2720 process_pending_signals(env);
2721 }
2722 }
2723 #endif
2724
2725 #ifdef TARGET_OPENRISC
2726
2727 void cpu_loop(CPUOpenRISCState *env)
2728 {
2729 CPUState *cs = CPU(openrisc_env_get_cpu(env));
2730 int trapnr, gdbsig;
2731 abi_long ret;
2732
2733 for (;;) {
2734 cpu_exec_start(cs);
2735 trapnr = cpu_exec(cs);
2736 cpu_exec_end(cs);
2737 process_queued_cpu_work(cs);
2738 gdbsig = 0;
2739
2740 switch (trapnr) {
2741 case EXCP_RESET:
2742 qemu_log_mask(CPU_LOG_INT, "\nReset request, exit, pc is %#x\n", env->pc);
2743 exit(EXIT_FAILURE);
2744 break;
2745 case EXCP_BUSERR:
2746 qemu_log_mask(CPU_LOG_INT, "\nBus error, exit, pc is %#x\n", env->pc);
2747 gdbsig = TARGET_SIGBUS;
2748 break;
2749 case EXCP_DPF:
2750 case EXCP_IPF:
2751 cpu_dump_state(cs, stderr, fprintf, 0);
2752 gdbsig = TARGET_SIGSEGV;
2753 break;
2754 case EXCP_TICK:
2755 qemu_log_mask(CPU_LOG_INT, "\nTick time interrupt pc is %#x\n", env->pc);
2756 break;
2757 case EXCP_ALIGN:
2758 qemu_log_mask(CPU_LOG_INT, "\nAlignment pc is %#x\n", env->pc);
2759 gdbsig = TARGET_SIGBUS;
2760 break;
2761 case EXCP_ILLEGAL:
2762 qemu_log_mask(CPU_LOG_INT, "\nIllegal instructionpc is %#x\n", env->pc);
2763 gdbsig = TARGET_SIGILL;
2764 break;
2765 case EXCP_INT:
2766 qemu_log_mask(CPU_LOG_INT, "\nExternal interruptpc is %#x\n", env->pc);
2767 break;
2768 case EXCP_DTLBMISS:
2769 case EXCP_ITLBMISS:
2770 qemu_log_mask(CPU_LOG_INT, "\nTLB miss\n");
2771 break;
2772 case EXCP_RANGE:
2773 qemu_log_mask(CPU_LOG_INT, "\nRange\n");
2774 gdbsig = TARGET_SIGSEGV;
2775 break;
2776 case EXCP_SYSCALL:
2777 env->pc += 4; /* 0xc00; */
2778 ret = do_syscall(env,
2779 env->gpr[11], /* return value */
2780 env->gpr[3], /* r3 - r7 are params */
2781 env->gpr[4],
2782 env->gpr[5],
2783 env->gpr[6],
2784 env->gpr[7],
2785 env->gpr[8], 0, 0);
2786 if (ret == -TARGET_ERESTARTSYS) {
2787 env->pc -= 4;
2788 } else if (ret != -TARGET_QEMU_ESIGRETURN) {
2789 env->gpr[11] = ret;
2790 }
2791 break;
2792 case EXCP_FPE:
2793 qemu_log_mask(CPU_LOG_INT, "\nFloating point error\n");
2794 break;
2795 case EXCP_TRAP:
2796 qemu_log_mask(CPU_LOG_INT, "\nTrap\n");
2797 gdbsig = TARGET_SIGTRAP;
2798 break;
2799 case EXCP_NR:
2800 qemu_log_mask(CPU_LOG_INT, "\nNR\n");
2801 break;
2802 default:
2803 EXCP_DUMP(env, "\nqemu: unhandled CPU exception %#x - aborting\n",
2804 trapnr);
2805 gdbsig = TARGET_SIGILL;
2806 break;
2807 }
2808 if (gdbsig) {
2809 gdb_handlesig(cs, gdbsig);
2810 if (gdbsig != TARGET_SIGTRAP) {
2811 exit(EXIT_FAILURE);
2812 }
2813 }
2814
2815 process_pending_signals(env);
2816 }
2817 }
2818
2819 #endif /* TARGET_OPENRISC */
2820
2821 #ifdef TARGET_SH4
2822 void cpu_loop(CPUSH4State *env)
2823 {
2824 CPUState *cs = CPU(sh_env_get_cpu(env));
2825 int trapnr, ret;
2826 target_siginfo_t info;
2827
2828 while (1) {
2829 cpu_exec_start(cs);
2830 trapnr = cpu_exec(cs);
2831 cpu_exec_end(cs);
2832 process_queued_cpu_work(cs);
2833
2834 switch (trapnr) {
2835 case 0x160:
2836 env->pc += 2;
2837 ret = do_syscall(env,
2838 env->gregs[3],
2839 env->gregs[4],
2840 env->gregs[5],
2841 env->gregs[6],
2842 env->gregs[7],
2843 env->gregs[0],
2844 env->gregs[1],
2845 0, 0);
2846 if (ret == -TARGET_ERESTARTSYS) {
2847 env->pc -= 2;
2848 } else if (ret != -TARGET_QEMU_ESIGRETURN) {
2849 env->gregs[0] = ret;
2850 }
2851 break;
2852 case EXCP_INTERRUPT:
2853 /* just indicate that signals should be handled asap */
2854 break;
2855 case EXCP_DEBUG:
2856 {
2857 int sig;
2858
2859 sig = gdb_handlesig(cs, TARGET_SIGTRAP);
2860 if (sig)
2861 {
2862 info.si_signo = sig;
2863 info.si_errno = 0;
2864 info.si_code = TARGET_TRAP_BRKPT;
2865 queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
2866 }
2867 }
2868 break;
2869 case 0xa0:
2870 case 0xc0:
2871 info.si_signo = TARGET_SIGSEGV;
2872 info.si_errno = 0;
2873 info.si_code = TARGET_SEGV_MAPERR;
2874 info._sifields._sigfault._addr = env->tea;
2875 queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
2876 break;
2877
2878 default:
2879 printf ("Unhandled trap: 0x%x\n", trapnr);
2880 cpu_dump_state(cs, stderr, fprintf, 0);
2881 exit(EXIT_FAILURE);
2882 }
2883 process_pending_signals (env);
2884 }
2885 }
2886 #endif
2887
2888 #ifdef TARGET_CRIS
2889 void cpu_loop(CPUCRISState *env)
2890 {
2891 CPUState *cs = CPU(cris_env_get_cpu(env));
2892 int trapnr, ret;
2893 target_siginfo_t info;
2894
2895 while (1) {
2896 cpu_exec_start(cs);
2897 trapnr = cpu_exec(cs);
2898 cpu_exec_end(cs);
2899 process_queued_cpu_work(cs);
2900
2901 switch (trapnr) {
2902 case 0xaa:
2903 {
2904 info.si_signo = TARGET_SIGSEGV;
2905 info.si_errno = 0;
2906 /* XXX: check env->error_code */
2907 info.si_code = TARGET_SEGV_MAPERR;
2908 info._sifields._sigfault._addr = env->pregs[PR_EDA];
2909 queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
2910 }
2911 break;
2912 case EXCP_INTERRUPT:
2913 /* just indicate that signals should be handled asap */
2914 break;
2915 case EXCP_BREAK:
2916 ret = do_syscall(env,
2917 env->regs[9],
2918 env->regs[10],
2919 env->regs[11],
2920 env->regs[12],
2921 env->regs[13],
2922 env->pregs[7],
2923 env->pregs[11],
2924 0, 0);
2925 if (ret == -TARGET_ERESTARTSYS) {
2926 env->pc -= 2;
2927 } else if (ret != -TARGET_QEMU_ESIGRETURN) {
2928 env->regs[10] = ret;
2929 }
2930 break;
2931 case EXCP_DEBUG:
2932 {
2933 int sig;
2934
2935 sig = gdb_handlesig(cs, TARGET_SIGTRAP);
2936 if (sig)
2937 {
2938 info.si_signo = sig;
2939 info.si_errno = 0;
2940 info.si_code = TARGET_TRAP_BRKPT;
2941 queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
2942 }
2943 }
2944 break;
2945 default:
2946 printf ("Unhandled trap: 0x%x\n", trapnr);
2947 cpu_dump_state(cs, stderr, fprintf, 0);
2948 exit(EXIT_FAILURE);
2949 }
2950 process_pending_signals (env);
2951 }
2952 }
2953 #endif
2954
2955 #ifdef TARGET_MICROBLAZE
2956 void cpu_loop(CPUMBState *env)
2957 {
2958 CPUState *cs = CPU(mb_env_get_cpu(env));
2959 int trapnr, ret;
2960 target_siginfo_t info;
2961
2962 while (1) {
2963 cpu_exec_start(cs);
2964 trapnr = cpu_exec(cs);
2965 cpu_exec_end(cs);
2966 process_queued_cpu_work(cs);
2967
2968 switch (trapnr) {
2969 case 0xaa:
2970 {
2971 info.si_signo = TARGET_SIGSEGV;
2972 info.si_errno = 0;
2973 /* XXX: check env->error_code */
2974 info.si_code = TARGET_SEGV_MAPERR;
2975 info._sifields._sigfault._addr = 0;
2976 queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
2977 }
2978 break;
2979 case EXCP_INTERRUPT:
2980 /* just indicate that signals should be handled asap */
2981 break;
2982 case EXCP_BREAK:
2983 /* Return address is 4 bytes after the call. */
2984 env->regs[14] += 4;
2985 env->sregs[SR_PC] = env->regs[14];
2986 ret = do_syscall(env,
2987 env->regs[12],
2988 env->regs[5],
2989 env->regs[6],
2990 env->regs[7],
2991 env->regs[8],
2992 env->regs[9],
2993 env->regs[10],
2994 0, 0);
2995 if (ret == -TARGET_ERESTARTSYS) {
2996 /* Wind back to before the syscall. */
2997 env->sregs[SR_PC] -= 4;
2998 } else if (ret != -TARGET_QEMU_ESIGRETURN) {
2999 env->regs[3] = ret;
3000 }
3001 /* All syscall exits result in guest r14 being equal to the
3002 * PC we return to, because the kernel syscall exit "rtbd" does
3003 * this. (This is true even for sigreturn(); note that r14 is
3004 * not a userspace-usable register, as the kernel may clobber it
3005 * at any point.)
3006 */
3007 env->regs[14] = env->sregs[SR_PC];
3008 break;
3009 case EXCP_HW_EXCP:
3010 env->regs[17] = env->sregs[SR_PC] + 4;
3011 if (env->iflags & D_FLAG) {
3012 env->sregs[SR_ESR] |= 1 << 12;
3013 env->sregs[SR_PC] -= 4;
3014 /* FIXME: if branch was immed, replay the imm as well. */
3015 }
3016
3017 env->iflags &= ~(IMM_FLAG | D_FLAG);
3018
3019 switch (env->sregs[SR_ESR] & 31) {
3020 case ESR_EC_DIVZERO:
3021 info.si_signo = TARGET_SIGFPE;
3022 info.si_errno = 0;
3023 info.si_code = TARGET_FPE_FLTDIV;
3024 info._sifields._sigfault._addr = 0;
3025 queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
3026 break;
3027 case ESR_EC_FPU:
3028 info.si_signo = TARGET_SIGFPE;
3029 info.si_errno = 0;
3030 if (env->sregs[SR_FSR] & FSR_IO) {
3031 info.si_code = TARGET_FPE_FLTINV;
3032 }
3033 if (env->sregs[SR_FSR] & FSR_DZ) {
3034 info.si_code = TARGET_FPE_FLTDIV;
3035 }
3036 info._sifields._sigfault._addr = 0;
3037 queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
3038 break;
3039 default:
3040 printf ("Unhandled hw-exception: 0x%x\n",
3041 env->sregs[SR_ESR] & ESR_EC_MASK);
3042 cpu_dump_state(cs, stderr, fprintf, 0);
3043 exit(EXIT_FAILURE);
3044 break;
3045 }
3046 break;
3047 case EXCP_DEBUG:
3048 {
3049 int sig;
3050
3051 sig = gdb_handlesig(cs, TARGET_SIGTRAP);
3052 if (sig)
3053 {
3054 info.si_signo = sig;
3055 info.si_errno = 0;
3056 info.si_code = TARGET_TRAP_BRKPT;
3057 queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
3058 }
3059 }
3060 break;
3061 default:
3062 printf ("Unhandled trap: 0x%x\n", trapnr);
3063 cpu_dump_state(cs, stderr, fprintf, 0);
3064 exit(EXIT_FAILURE);
3065 }
3066 process_pending_signals (env);
3067 }
3068 }
3069 #endif
3070
3071 #ifdef TARGET_M68K
3072
3073 void cpu_loop(CPUM68KState *env)
3074 {
3075 CPUState *cs = CPU(m68k_env_get_cpu(env));
3076 int trapnr;
3077 unsigned int n;
3078 target_siginfo_t info;
3079 TaskState *ts = cs->opaque;
3080
3081 for(;;) {
3082 cpu_exec_start(cs);
3083 trapnr = cpu_exec(cs);
3084 cpu_exec_end(cs);
3085 process_queued_cpu_work(cs);
3086
3087 switch(trapnr) {
3088 case EXCP_ILLEGAL:
3089 {
3090 if (ts->sim_syscalls) {
3091 uint16_t nr;
3092 get_user_u16(nr, env->pc + 2);
3093 env->pc += 4;
3094 do_m68k_simcall(env, nr);
3095 } else {
3096 goto do_sigill;
3097 }
3098 }
3099 break;
3100 case EXCP_HALT_INSN:
3101 /* Semihosing syscall. */
3102 env->pc += 4;
3103 do_m68k_semihosting(env, env->dregs[0]);
3104 break;
3105 case EXCP_LINEA:
3106 case EXCP_LINEF:
3107 case EXCP_UNSUPPORTED:
3108 do_sigill:
3109 info.si_signo = TARGET_SIGILL;
3110 info.si_errno = 0;
3111 info.si_code = TARGET_ILL_ILLOPN;
3112 info._sifields._sigfault._addr = env->pc;
3113 queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
3114 break;
3115 case EXCP_TRAP0:
3116 {
3117 abi_long ret;
3118 ts->sim_syscalls = 0;
3119 n = env->dregs[0];
3120 env->pc += 2;
3121 ret = do_syscall(env,
3122 n,
3123 env->dregs[1],
3124 env->dregs[2],
3125 env->dregs[3],
3126 env->dregs[4],
3127 env->dregs[5],
3128 env->aregs[0],
3129 0, 0);
3130 if (ret == -TARGET_ERESTARTSYS) {
3131 env->pc -= 2;
3132 } else if (ret != -TARGET_QEMU_ESIGRETURN) {
3133 env->dregs[0] = ret;
3134 }
3135 }
3136 break;
3137 case EXCP_INTERRUPT:
3138 /* just indicate that signals should be handled asap */
3139 break;
3140 case EXCP_ACCESS:
3141 {
3142 info.si_signo = TARGET_SIGSEGV;
3143 info.si_errno = 0;
3144 /* XXX: check env->error_code */
3145 info.si_code = TARGET_SEGV_MAPERR;
3146 info._sifields._sigfault._addr = env->mmu.ar;
3147 queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
3148 }
3149 break;
3150 case EXCP_DEBUG:
3151 {
3152 int sig;
3153
3154 sig = gdb_handlesig(cs, TARGET_SIGTRAP);
3155 if (sig)
3156 {
3157 info.si_signo = sig;
3158 info.si_errno = 0;
3159 info.si_code = TARGET_TRAP_BRKPT;
3160 queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
3161 }
3162 }
3163 break;
3164 default:
3165 EXCP_DUMP(env, "qemu: unhandled CPU exception 0x%x - aborting\n", trapnr);
3166 abort();
3167 }
3168 process_pending_signals(env);
3169 }
3170 }
3171 #endif /* TARGET_M68K */
3172
3173 #ifdef TARGET_ALPHA
3174 static void do_store_exclusive(CPUAlphaState *env, int reg, int quad)
3175 {
3176 target_ulong addr, val, tmp;
3177 target_siginfo_t info;
3178 int ret = 0;
3179
3180 addr = env->lock_addr;
3181 tmp = env->lock_st_addr;
3182 env->lock_addr = -1;
3183 env->lock_st_addr = 0;
3184
3185 start_exclusive();
3186 mmap_lock();
3187
3188 if (addr == tmp) {
3189 if (quad ? get_user_s64(val, addr) : get_user_s32(val, addr)) {
3190 goto do_sigsegv;
3191 }
3192
3193 if (val == env->lock_value) {
3194 tmp = env->ir[reg];
3195 if (quad ? put_user_u64(tmp, addr) : put_user_u32(tmp, addr)) {
3196 goto do_sigsegv;
3197 }
3198 ret = 1;
3199 }
3200 }
3201 env->ir[reg] = ret;
3202 env->pc += 4;
3203
3204 mmap_unlock();
3205 end_exclusive();
3206 return;
3207
3208 do_sigsegv:
3209 mmap_unlock();
3210 end_exclusive();
3211
3212 info.si_signo = TARGET_SIGSEGV;
3213 info.si_errno = 0;
3214 info.si_code = TARGET_SEGV_MAPERR;
3215 info._sifields._sigfault._addr = addr;
3216 queue_signal(env, TARGET_SIGSEGV, QEMU_SI_FAULT, &info);
3217 }
3218
3219 void cpu_loop(CPUAlphaState *env)
3220 {
3221 CPUState *cs = CPU(alpha_env_get_cpu(env));
3222 int trapnr;
3223 target_siginfo_t info;
3224 abi_long sysret;
3225
3226 while (1) {
3227 cpu_exec_start(cs);
3228 trapnr = cpu_exec(cs);
3229 cpu_exec_end(cs);
3230 process_queued_cpu_work(cs);
3231
3232 /* All of the traps imply a transition through PALcode, which
3233 implies an REI instruction has been executed. Which means
3234 that the intr_flag should be cleared. */
3235 env->intr_flag = 0;
3236
3237 switch (trapnr) {
3238 case EXCP_RESET:
3239 fprintf(stderr, "Reset requested. Exit\n");
3240 exit(EXIT_FAILURE);
3241 break;
3242 case EXCP_MCHK:
3243 fprintf(stderr, "Machine check exception. Exit\n");
3244 exit(EXIT_FAILURE);
3245 break;
3246 case EXCP_SMP_INTERRUPT:
3247 case EXCP_CLK_INTERRUPT:
3248 case EXCP_DEV_INTERRUPT:
3249 fprintf(stderr, "External interrupt. Exit\n");
3250 exit(EXIT_FAILURE);
3251 break;
3252 case EXCP_MMFAULT:
3253 env->lock_addr = -1;
3254 info.si_signo = TARGET_SIGSEGV;
3255 info.si_errno = 0;
3256 info.si_code = (page_get_flags(env->trap_arg0) & PAGE_VALID
3257 ? TARGET_SEGV_ACCERR : TARGET_SEGV_MAPERR);
3258 info._sifields._sigfault._addr = env->trap_arg0;
3259 queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
3260 break;
3261 case EXCP_UNALIGN:
3262 env->lock_addr = -1;
3263 info.si_signo = TARGET_SIGBUS;
3264 info.si_errno = 0;
3265 info.si_code = TARGET_BUS_ADRALN;
3266 info._sifields._sigfault._addr = env->trap_arg0;
3267 queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
3268 break;
3269 case EXCP_OPCDEC:
3270 do_sigill:
3271 env->lock_addr = -1;
3272 info.si_signo = TARGET_SIGILL;
3273 info.si_errno = 0;
3274 info.si_code = TARGET_ILL_ILLOPC;
3275 info._sifields._sigfault._addr = env->pc;
3276 queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
3277 break;
3278 case EXCP_ARITH:
3279 env->lock_addr = -1;
3280 info.si_signo = TARGET_SIGFPE;
3281 info.si_errno = 0;
3282 info.si_code = TARGET_FPE_FLTINV;
3283 info._sifields._sigfault._addr = env->pc;
3284 queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
3285 break;
3286 case EXCP_FEN:
3287 /* No-op. Linux simply re-enables the FPU. */
3288 break;
3289 case EXCP_CALL_PAL:
3290 env->lock_addr = -1;
3291 switch (env->error_code) {
3292 case 0x80:
3293 /* BPT */
3294 info.si_signo = TARGET_SIGTRAP;
3295 info.si_errno = 0;
3296 info.si_code = TARGET_TRAP_BRKPT;
3297 info._sifields._sigfault._addr = env->pc;
3298 queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
3299 break;
3300 case 0x81:
3301 /* BUGCHK */
3302 info.si_signo = TARGET_SIGTRAP;
3303 info.si_errno = 0;
3304 info.si_code = 0;
3305 info._sifields._sigfault._addr = env->pc;
3306 queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
3307 break;
3308 case 0x83:
3309 /* CALLSYS */
3310 trapnr = env->ir[IR_V0];
3311 sysret = do_syscall(env, trapnr,
3312 env->ir[IR_A0], env->ir[IR_A1],
3313 env->ir[IR_A2], env->ir[IR_A3],
3314 env->ir[IR_A4], env->ir[IR_A5],
3315 0, 0);
3316 if (sysret == -TARGET_ERESTARTSYS) {
3317 env->pc -= 4;
3318 break;
3319 }
3320 if (sysret == -TARGET_QEMU_ESIGRETURN) {
3321 break;
3322 }
3323 /* Syscall writes 0 to V0 to bypass error check, similar
3324 to how this is handled internal to Linux kernel.
3325 (Ab)use trapnr temporarily as boolean indicating error. */
3326 trapnr = (env->ir[IR_V0] != 0 && sysret < 0);
3327 env->ir[IR_V0] = (trapnr ? -sysret : sysret);
3328 env->ir[IR_A3] = trapnr;
3329 break;
3330 case 0x86:
3331 /* IMB */
3332 /* ??? We can probably elide the code using page_unprotect
3333 that is checking for self-modifying code. Instead we
3334 could simply call tb_flush here. Until we work out the
3335 changes required to turn off the extra write protection,
3336 this can be a no-op. */
3337 break;
3338 case 0x9E:
3339 /* RDUNIQUE */
3340 /* Handled in the translator for usermode. */
3341 abort();
3342 case 0x9F:
3343 /* WRUNIQUE */
3344 /* Handled in the translator for usermode. */
3345 abort();
3346 case 0xAA:
3347 /* GENTRAP */
3348 info.si_signo = TARGET_SIGFPE;
3349 switch (env->ir[IR_A0]) {
3350 case TARGET_GEN_INTOVF:
3351 info.si_code = TARGET_FPE_INTOVF;
3352 break;
3353 case TARGET_GEN_INTDIV:
3354 info.si_code = TARGET_FPE_INTDIV;
3355 break;
3356 case TARGET_GEN_FLTOVF:
3357 info.si_code = TARGET_FPE_FLTOVF;
3358 break;
3359 case TARGET_GEN_FLTUND:
3360 info.si_code = TARGET_FPE_FLTUND;
3361 break;
3362 case TARGET_GEN_FLTINV:
3363 info.si_code = TARGET_FPE_FLTINV;
3364 break;
3365 case TARGET_GEN_FLTINE:
3366 info.si_code = TARGET_FPE_FLTRES;
3367 break;
3368 case TARGET_GEN_ROPRAND:
3369 info.si_code = 0;
3370 break;
3371 default:
3372 info.si_signo = TARGET_SIGTRAP;
3373 info.si_code = 0;
3374 break;
3375 }
3376 info.si_errno = 0;
3377 info._sifields._sigfault._addr = env->pc;
3378 queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
3379 break;
3380 default:
3381 goto do_sigill;
3382 }
3383 break;
3384 case EXCP_DEBUG:
3385 info.si_signo = gdb_handlesig(cs, TARGET_SIGTRAP);
3386 if (info.si_signo) {
3387 env->lock_addr = -1;
3388 info.si_errno = 0;
3389 info.si_code = TARGET_TRAP_BRKPT;
3390 queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
3391 }
3392 break;
3393 case EXCP_STL_C:
3394 case EXCP_STQ_C:
3395 do_store_exclusive(env, env->error_code, trapnr - EXCP_STL_C);
3396 break;
3397 case EXCP_INTERRUPT:
3398 /* Just indicate that signals should be handled asap. */
3399 break;
3400 default:
3401 printf ("Unhandled trap: 0x%x\n", trapnr);
3402 cpu_dump_state(cs, stderr, fprintf, 0);
3403 exit(EXIT_FAILURE);
3404 }
3405 process_pending_signals (env);
3406 }
3407 }
3408 #endif /* TARGET_ALPHA */
3409
3410 #ifdef TARGET_S390X
3411 void cpu_loop(CPUS390XState *env)
3412 {
3413 CPUState *cs = CPU(s390_env_get_cpu(env));
3414 int trapnr, n, sig;
3415 target_siginfo_t info;
3416 target_ulong addr;
3417 abi_long ret;
3418
3419 while (1) {
3420 cpu_exec_start(cs);
3421 trapnr = cpu_exec(cs);
3422 cpu_exec_end(cs);
3423 process_queued_cpu_work(cs);
3424
3425 switch (trapnr) {
3426 case EXCP_INTERRUPT:
3427 /* Just indicate that signals should be handled asap. */
3428 break;
3429
3430 case EXCP_SVC:
3431 n = env->int_svc_code;
3432 if (!n) {
3433 /* syscalls > 255 */
3434 n = env->regs[1];
3435 }
3436 env->psw.addr += env->int_svc_ilen;
3437 ret = do_syscall(env, n, env->regs[2], env->regs[3],
3438 env->regs[4], env->regs[5],
3439 env->regs[6], env->regs[7], 0, 0);
3440 if (ret == -TARGET_ERESTARTSYS) {
3441 env->psw.addr -= env->int_svc_ilen;
3442 } else if (ret != -TARGET_QEMU_ESIGRETURN) {
3443 env->regs[2] = ret;
3444 }
3445 break;
3446
3447 case EXCP_DEBUG:
3448 sig = gdb_handlesig(cs, TARGET_SIGTRAP);
3449 if (sig) {
3450 n = TARGET_TRAP_BRKPT;
3451 goto do_signal_pc;
3452 }
3453 break;
3454 case EXCP_PGM:
3455 n = env->int_pgm_code;
3456 switch (n) {
3457 case PGM_OPERATION:
3458 case PGM_PRIVILEGED:
3459 sig = TARGET_SIGILL;
3460 n = TARGET_ILL_ILLOPC;
3461 goto do_signal_pc;
3462 case PGM_PROTECTION:
3463 case PGM_ADDRESSING:
3464 sig = TARGET_SIGSEGV;
3465 /* XXX: check env->error_code */
3466 n = TARGET_SEGV_MAPERR;
3467 addr = env->__excp_addr;
3468 goto do_signal;
3469 case PGM_EXECUTE:
3470 case PGM_SPECIFICATION:
3471 case PGM_SPECIAL_OP:
3472 case PGM_OPERAND:
3473 do_sigill_opn:
3474 sig = TARGET_SIGILL;
3475 n = TARGET_ILL_ILLOPN;
3476 goto do_signal_pc;
3477
3478 case PGM_FIXPT_OVERFLOW:
3479 sig = TARGET_SIGFPE;
3480 n = TARGET_FPE_INTOVF;
3481 goto do_signal_pc;
3482 case PGM_FIXPT_DIVIDE:
3483 sig = TARGET_SIGFPE;
3484 n = TARGET_FPE_INTDIV;
3485 goto do_signal_pc;
3486
3487 case PGM_DATA:
3488 n = (env->fpc >> 8) & 0xff;
3489 if (n == 0xff) {
3490 /* compare-and-trap */
3491 goto do_sigill_opn;
3492 } else {
3493 /* An IEEE exception, simulated or otherwise. */
3494 if (n & 0x80) {
3495 n = TARGET_FPE_FLTINV;
3496 } else if (n & 0x40) {
3497 n = TARGET_FPE_FLTDIV;
3498 } else if (n & 0x20) {
3499 n = TARGET_FPE_FLTOVF;
3500 } else if (n & 0x10) {
3501 n = TARGET_FPE_FLTUND;
3502 } else if (n & 0x08) {
3503 n = TARGET_FPE_FLTRES;
3504 } else {
3505 /* ??? Quantum exception; BFP, DFP error. */
3506 goto do_sigill_opn;
3507 }
3508 sig = TARGET_SIGFPE;
3509 goto do_signal_pc;
3510 }
3511
3512 default:
3513 fprintf(stderr, "Unhandled program exception: %#x\n", n);
3514 cpu_dump_state(cs, stderr, fprintf, 0);
3515 exit(EXIT_FAILURE);
3516 }
3517 break;
3518
3519 do_signal_pc:
3520 addr = env->psw.addr;
3521 do_signal:
3522 info.si_signo = sig;
3523 info.si_errno = 0;
3524 info.si_code = n;
3525 info._sifields._sigfault._addr = addr;
3526 queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
3527 break;
3528
3529 default:
3530 fprintf(stderr, "Unhandled trap: 0x%x\n", trapnr);
3531 cpu_dump_state(cs, stderr, fprintf, 0);
3532 exit(EXIT_FAILURE);
3533 }
3534 process_pending_signals (env);
3535 }
3536 }
3537
3538 #endif /* TARGET_S390X */
3539
3540 #ifdef TARGET_TILEGX
3541
3542 static void gen_sigill_reg(CPUTLGState *env)
3543 {
3544 target_siginfo_t info;
3545
3546 info.si_signo = TARGET_SIGILL;
3547 info.si_errno = 0;
3548 info.si_code = TARGET_ILL_PRVREG;
3549 info._sifields._sigfault._addr = env->pc;
3550 queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
3551 }
3552
3553 static void do_signal(CPUTLGState *env, int signo, int sigcode)
3554 {
3555 target_siginfo_t info;
3556
3557 info.si_signo = signo;
3558 info.si_errno = 0;
3559 info._sifields._sigfault._addr = env->pc;
3560
3561 if (signo == TARGET_SIGSEGV) {
3562 /* The passed in sigcode is a dummy; check for a page mapping
3563 and pass either MAPERR or ACCERR. */
3564 target_ulong addr = env->excaddr;
3565 info._sifields._sigfault._addr = addr;
3566 if (page_check_range(addr, 1, PAGE_VALID) < 0) {
3567 sigcode = TARGET_SEGV_MAPERR;
3568 } else {
3569 sigcode = TARGET_SEGV_ACCERR;
3570 }
3571 }
3572 info.si_code = sigcode;
3573
3574 queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
3575 }
3576
3577 static void gen_sigsegv_maperr(CPUTLGState *env, target_ulong addr)
3578 {
3579 env->excaddr = addr;
3580 do_signal(env, TARGET_SIGSEGV, 0);
3581 }
3582
3583 static void set_regval(CPUTLGState *env, uint8_t reg, uint64_t val)
3584 {
3585 if (unlikely(reg >= TILEGX_R_COUNT)) {
3586 switch (reg) {
3587 case TILEGX_R_SN:
3588 case TILEGX_R_ZERO:
3589 return;
3590 case TILEGX_R_IDN0:
3591 case TILEGX_R_IDN1:
3592 case TILEGX_R_UDN0:
3593 case TILEGX_R_UDN1:
3594 case TILEGX_R_UDN2:
3595 case TILEGX_R_UDN3:
3596 gen_sigill_reg(env);
3597 return;
3598 default:
3599 g_assert_not_reached();
3600 }
3601 }
3602 env->regs[reg] = val;
3603 }
3604
3605 /*
3606 * Compare the 8-byte contents of the CmpValue SPR with the 8-byte value in
3607 * memory at the address held in the first source register. If the values are
3608 * not equal, then no memory operation is performed. If the values are equal,
3609 * the 8-byte quantity from the second source register is written into memory
3610 * at the address held in the first source register. In either case, the result
3611 * of the instruction is the value read from memory. The compare and write to
3612 * memory are atomic and thus can be used for synchronization purposes. This
3613 * instruction only operates for addresses aligned to a 8-byte boundary.
3614 * Unaligned memory access causes an Unaligned Data Reference interrupt.
3615 *
3616 * Functional Description (64-bit)
3617 * uint64_t memVal = memoryReadDoubleWord (rf[SrcA]);
3618 * rf[Dest] = memVal;
3619 * if (memVal == SPR[CmpValueSPR])
3620 * memoryWriteDoubleWord (rf[SrcA], rf[SrcB]);
3621 *
3622 * Functional Description (32-bit)
3623 * uint64_t memVal = signExtend32 (memoryReadWord (rf[SrcA]));
3624 * rf[Dest] = memVal;
3625 * if (memVal == signExtend32 (SPR[CmpValueSPR]))
3626 * memoryWriteWord (rf[SrcA], rf[SrcB]);
3627 *
3628 *
3629 * This function also processes exch and exch4 which need not process SPR.
3630 */
3631 static void do_exch(CPUTLGState *env, bool quad, bool cmp)
3632 {
3633 target_ulong addr;
3634 target_long val, sprval;
3635
3636 start_exclusive();
3637
3638 addr = env->atomic_srca;
3639 if (quad ? get_user_s64(val, addr) : get_user_s32(val, addr)) {
3640 goto sigsegv_maperr;
3641 }
3642
3643 if (cmp) {
3644 if (quad) {
3645 sprval = env->spregs[TILEGX_SPR_CMPEXCH];
3646 } else {
3647 sprval = sextract64(env->spregs[TILEGX_SPR_CMPEXCH], 0, 32);
3648 }
3649 }
3650
3651 if (!cmp || val == sprval) {
3652 target_long valb = env->atomic_srcb;
3653 if (quad ? put_user_u64(valb, addr) : put_user_u32(valb, addr)) {
3654 goto sigsegv_maperr;
3655 }
3656 }
3657
3658 set_regval(env, env->atomic_dstr, val);
3659 end_exclusive();
3660 return;
3661
3662 sigsegv_maperr:
3663 end_exclusive();
3664 gen_sigsegv_maperr(env, addr);
3665 }
3666
3667 static void do_fetch(CPUTLGState *env, int trapnr, bool quad)
3668 {
3669 int8_t write = 1;
3670 target_ulong addr;
3671 target_long val, valb;
3672
3673 start_exclusive();
3674
3675 addr = env->atomic_srca;
3676 valb = env->atomic_srcb;
3677 if (quad ? get_user_s64(val, addr) : get_user_s32(val, addr)) {
3678 goto sigsegv_maperr;
3679 }
3680
3681 switch (trapnr) {
3682 case TILEGX_EXCP_OPCODE_FETCHADD:
3683 case TILEGX_EXCP_OPCODE_FETCHADD4:
3684 valb += val;
3685 break;
3686 case TILEGX_EXCP_OPCODE_FETCHADDGEZ:
3687 valb += val;
3688 if (valb < 0) {
3689 write = 0;
3690 }
3691 break;
3692 case TILEGX_EXCP_OPCODE_FETCHADDGEZ4:
3693 valb += val;
3694 if ((int32_t)valb < 0) {
3695 write = 0;
3696 }
3697 break;
3698 case TILEGX_EXCP_OPCODE_FETCHAND:
3699 case TILEGX_EXCP_OPCODE_FETCHAND4:
3700 valb &= val;
3701 break;
3702 case TILEGX_EXCP_OPCODE_FETCHOR:
3703 case TILEGX_EXCP_OPCODE_FETCHOR4:
3704 valb |= val;
3705 break;
3706 default:
3707 g_assert_not_reached();
3708 }
3709
3710 if (write) {
3711 if (quad ? put_user_u64(valb, addr) : put_user_u32(valb, addr)) {
3712 goto sigsegv_maperr;
3713 }
3714 }
3715
3716 set_regval(env, env->atomic_dstr, val);
3717 end_exclusive();
3718 return;
3719
3720 sigsegv_maperr:
3721 end_exclusive();
3722 gen_sigsegv_maperr(env, addr);
3723 }
3724
3725 void cpu_loop(CPUTLGState *env)
3726 {
3727 CPUState *cs = CPU(tilegx_env_get_cpu(env));
3728 int trapnr;
3729
3730 while (1) {
3731 cpu_exec_start(cs);
3732 trapnr = cpu_exec(cs);
3733 cpu_exec_end(cs);
3734 process_queued_cpu_work(cs);
3735
3736 switch (trapnr) {
3737 case TILEGX_EXCP_SYSCALL:
3738 {
3739 abi_ulong ret = do_syscall(env, env->regs[TILEGX_R_NR],
3740 env->regs[0], env->regs[1],
3741 env->regs[2], env->regs[3],
3742 env->regs[4], env->regs[5],
3743 env->regs[6], env->regs[7]);
3744 if (ret == -TARGET_ERESTARTSYS) {
3745 env->pc -= 8;
3746 } else if (ret != -TARGET_QEMU_ESIGRETURN) {
3747 env->regs[TILEGX_R_RE] = ret;
3748 env->regs[TILEGX_R_ERR] = TILEGX_IS_ERRNO(ret) ? -ret : 0;
3749 }
3750 break;
3751 }
3752 case TILEGX_EXCP_OPCODE_EXCH:
3753 do_exch(env, true, false);
3754 break;
3755 case TILEGX_EXCP_OPCODE_EXCH4:
3756 do_exch(env, false, false);
3757 break;
3758 case TILEGX_EXCP_OPCODE_CMPEXCH:
3759 do_exch(env, true, true);
3760 break;
3761 case TILEGX_EXCP_OPCODE_CMPEXCH4:
3762 do_exch(env, false, true);
3763 break;
3764 case TILEGX_EXCP_OPCODE_FETCHADD:
3765 case TILEGX_EXCP_OPCODE_FETCHADDGEZ:
3766 case TILEGX_EXCP_OPCODE_FETCHAND:
3767 case TILEGX_EXCP_OPCODE_FETCHOR:
3768 do_fetch(env, trapnr, true);
3769 break;
3770 case TILEGX_EXCP_OPCODE_FETCHADD4:
3771 case TILEGX_EXCP_OPCODE_FETCHADDGEZ4:
3772 case TILEGX_EXCP_OPCODE_FETCHAND4:
3773 case TILEGX_EXCP_OPCODE_FETCHOR4:
3774 do_fetch(env, trapnr, false);
3775 break;
3776 case TILEGX_EXCP_SIGNAL:
3777 do_signal(env, env->signo, env->sigcode);
3778 break;
3779 case TILEGX_EXCP_REG_IDN_ACCESS:
3780 case TILEGX_EXCP_REG_UDN_ACCESS:
3781 gen_sigill_reg(env);
3782 break;
3783 default:
3784 fprintf(stderr, "trapnr is %d[0x%x].\n", trapnr, trapnr);
3785 g_assert_not_reached();
3786 }
3787 process_pending_signals(env);
3788 }
3789 }
3790
3791 #endif
3792
3793 THREAD CPUState *thread_cpu;
3794
3795 bool qemu_cpu_is_self(CPUState *cpu)
3796 {
3797 return thread_cpu == cpu;
3798 }
3799
3800 void qemu_cpu_kick(CPUState *cpu)
3801 {
3802 cpu_exit(cpu);
3803 }
3804
3805 void task_settid(TaskState *ts)
3806 {
3807 if (ts->ts_tid == 0) {
3808 ts->ts_tid = (pid_t)syscall(SYS_gettid);
3809 }
3810 }
3811
3812 void stop_all_tasks(void)
3813 {
3814 /*
3815 * We trust that when using NPTL, start_exclusive()
3816 * handles thread stopping correctly.
3817 */
3818 start_exclusive();
3819 }
3820
3821 /* Assumes contents are already zeroed. */
3822 void init_task_state(TaskState *ts)
3823 {
3824 ts->used = 1;
3825 }
3826
3827 CPUArchState *cpu_copy(CPUArchState *env)
3828 {
3829 CPUState *cpu = ENV_GET_CPU(env);
3830 CPUState *new_cpu = cpu_init(cpu_model);
3831 CPUArchState *new_env = new_cpu->env_ptr;
3832 CPUBreakpoint *bp;
3833 CPUWatchpoint *wp;
3834
3835 /* Reset non arch specific state */
3836 cpu_reset(new_cpu);
3837
3838 memcpy(new_env, env, sizeof(CPUArchState));
3839
3840 /* Clone all break/watchpoints.
3841 Note: Once we support ptrace with hw-debug register access, make sure
3842 BP_CPU break/watchpoints are handled correctly on clone. */
3843 QTAILQ_INIT(&new_cpu->breakpoints);
3844 QTAILQ_INIT(&new_cpu->watchpoints);
3845 QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
3846 cpu_breakpoint_insert(new_cpu, bp->pc, bp->flags, NULL);
3847 }
3848 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
3849 cpu_watchpoint_insert(new_cpu, wp->vaddr, wp->len, wp->flags, NULL);
3850 }
3851
3852 return new_env;
3853 }
3854
3855 static void handle_arg_help(const char *arg)
3856 {
3857 usage(EXIT_SUCCESS);
3858 }
3859
3860 static void handle_arg_log(const char *arg)
3861 {
3862 int mask;
3863
3864 mask = qemu_str_to_log_mask(arg);
3865 if (!mask) {
3866 qemu_print_log_usage(stdout);
3867 exit(EXIT_FAILURE);
3868 }
3869 qemu_log_needs_buffers();
3870 qemu_set_log(mask);
3871 }
3872
3873 static void handle_arg_log_filename(const char *arg)
3874 {
3875 qemu_set_log_filename(arg, &error_fatal);
3876 }
3877
3878 static void handle_arg_set_env(const char *arg)
3879 {
3880 char *r, *p, *token;
3881 r = p = strdup(arg);
3882 while ((token = strsep(&p, ",")) != NULL) {
3883 if (envlist_setenv(envlist, token) != 0) {
3884 usage(EXIT_FAILURE);
3885 }
3886 }
3887 free(r);
3888 }
3889
3890 static void handle_arg_unset_env(const char *arg)
3891 {
3892 char *r, *p, *token;
3893 r = p = strdup(arg);
3894 while ((token = strsep(&p, ",")) != NULL) {
3895 if (envlist_unsetenv(envlist, token) != 0) {
3896 usage(EXIT_FAILURE);
3897 }
3898 }
3899 free(r);
3900 }
3901
3902 static void handle_arg_argv0(const char *arg)
3903 {
3904 argv0 = strdup(arg);
3905 }
3906
3907 static void handle_arg_stack_size(const char *arg)
3908 {
3909 char *p;
3910 guest_stack_size = strtoul(arg, &p, 0);
3911 if (guest_stack_size == 0) {
3912 usage(EXIT_FAILURE);
3913 }
3914
3915 if (*p == 'M') {
3916 guest_stack_size *= 1024 * 1024;
3917 } else if (*p == 'k' || *p == 'K') {
3918 guest_stack_size *= 1024;
3919 }
3920 }
3921
3922 static void handle_arg_ld_prefix(const char *arg)
3923 {
3924 interp_prefix = strdup(arg);
3925 }
3926
3927 static void handle_arg_pagesize(const char *arg)
3928 {
3929 qemu_host_page_size = atoi(arg);
3930 if (qemu_host_page_size == 0 ||
3931 (qemu_host_page_size & (qemu_host_page_size - 1)) != 0) {
3932 fprintf(stderr, "page size must be a power of two\n");
3933 exit(EXIT_FAILURE);
3934 }
3935 }
3936
3937 static void handle_arg_randseed(const char *arg)
3938 {
3939 unsigned long long seed;
3940
3941 if (parse_uint_full(arg, &seed, 0) != 0 || seed > UINT_MAX) {
3942 fprintf(stderr, "Invalid seed number: %s\n", arg);
3943 exit(EXIT_FAILURE);
3944 }
3945 srand(seed);
3946 }
3947
3948 static void handle_arg_gdb(const char *arg)
3949 {
3950 gdbstub_port = atoi(arg);
3951 }
3952
3953 static void handle_arg_uname(const char *arg)
3954 {
3955 qemu_uname_release = strdup(arg);
3956 }
3957
3958 static void handle_arg_cpu(const char *arg)
3959 {
3960 cpu_model = strdup(arg);
3961 if (cpu_model == NULL || is_help_option(cpu_model)) {
3962 /* XXX: implement xxx_cpu_list for targets that still miss it */
3963 #if defined(cpu_list)
3964 cpu_list(stdout, &fprintf);
3965 #endif
3966 exit(EXIT_FAILURE);
3967 }
3968 }
3969
3970 static void handle_arg_guest_base(const char *arg)
3971 {
3972 guest_base = strtol(arg, NULL, 0);
3973 have_guest_base = 1;
3974 }
3975
3976 static void handle_arg_reserved_va(const char *arg)
3977 {
3978 char *p;
3979 int shift = 0;
3980 reserved_va = strtoul(arg, &p, 0);
3981 switch (*p) {
3982 case 'k':
3983 case 'K':
3984 shift = 10;
3985 break;
3986 case 'M':
3987 shift = 20;
3988 break;
3989 case 'G':
3990 shift = 30;
3991 break;
3992 }
3993 if (shift) {
3994 unsigned long unshifted = reserved_va;
3995 p++;
3996 reserved_va <<= shift;
3997 if (((reserved_va >> shift) != unshifted)
3998 #if HOST_LONG_BITS > TARGET_VIRT_ADDR_SPACE_BITS
3999 || (reserved_va > (1ul << TARGET_VIRT_ADDR_SPACE_BITS))
4000 #endif
4001 ) {
4002 fprintf(stderr, "Reserved virtual address too big\n");
4003 exit(EXIT_FAILURE);
4004 }
4005 }
4006 if (*p) {
4007 fprintf(stderr, "Unrecognised -R size suffix '%s'\n", p);
4008 exit(EXIT_FAILURE);
4009 }
4010 }
4011
4012 static void handle_arg_singlestep(const char *arg)
4013 {
4014 singlestep = 1;
4015 }
4016
4017 static void handle_arg_strace(const char *arg)
4018 {
4019 do_strace = 1;
4020 }
4021
4022 static void handle_arg_version(const char *arg)
4023 {
4024 printf("qemu-" TARGET_NAME " version " QEMU_VERSION QEMU_PKGVERSION
4025 ", " QEMU_COPYRIGHT "\n");
4026 exit(EXIT_SUCCESS);
4027 }
4028
4029 static char *trace_file;
4030 static void handle_arg_trace(const char *arg)
4031 {
4032 g_free(trace_file);
4033 trace_file = trace_opt_parse(arg);
4034 }
4035
4036 struct qemu_argument {
4037 const char *argv;
4038 const char *env;
4039 bool has_arg;
4040 void (*handle_opt)(const char *arg);
4041 const char *example;
4042 const char *help;
4043 };
4044
4045 static const struct qemu_argument arg_table[] = {
4046 {"h", "", false, handle_arg_help,
4047 "", "print this help"},
4048 {"help", "", false, handle_arg_help,
4049 "", ""},
4050 {"g", "QEMU_GDB", true, handle_arg_gdb,
4051 "port", "wait gdb connection to 'port'"},
4052 {"L", "QEMU_LD_PREFIX", true, handle_arg_ld_prefix,
4053 "path", "set the elf interpreter prefix to 'path'"},
4054 {"s", "QEMU_STACK_SIZE", true, handle_arg_stack_size,
4055 "size", "set the stack size to 'size' bytes"},
4056 {"cpu", "QEMU_CPU", true, handle_arg_cpu,
4057 "model", "select CPU (-cpu help for list)"},
4058 {"E", "QEMU_SET_ENV", true, handle_arg_set_env,
4059 "var=value", "sets targets environment variable (see below)"},
4060 {"U", "QEMU_UNSET_ENV", true, handle_arg_unset_env,
4061 "var", "unsets targets environment variable (see below)"},
4062 {"0", "QEMU_ARGV0", true, handle_arg_argv0,
4063 "argv0", "forces target process argv[0] to be 'argv0'"},
4064 {"r", "QEMU_UNAME", true, handle_arg_uname,
4065 "uname", "set qemu uname release string to 'uname'"},
4066 {"B", "QEMU_GUEST_BASE", true, handle_arg_guest_base,
4067 "address", "set guest_base address to 'address'"},
4068 {"R", "QEMU_RESERVED_VA", true, handle_arg_reserved_va,
4069 "size", "reserve 'size' bytes for guest virtual address space"},
4070 {"d", "QEMU_LOG", true, handle_arg_log,
4071 "item[,...]", "enable logging of specified items "
4072 "(use '-d help' for a list of items)"},
4073 {"D", "QEMU_LOG_FILENAME", true, handle_arg_log_filename,
4074 "logfile", "write logs to 'logfile' (default stderr)"},
4075 {"p", "QEMU_PAGESIZE", true, handle_arg_pagesize,
4076 "pagesize", "set the host page size to 'pagesize'"},
4077 {"singlestep", "QEMU_SINGLESTEP", false, handle_arg_singlestep,
4078 "", "run in singlestep mode"},
4079 {"strace", "QEMU_STRACE", false, handle_arg_strace,
4080 "", "log system calls"},
4081 {"seed", "QEMU_RAND_SEED", true, handle_arg_randseed,
4082 "", "Seed for pseudo-random number generator"},
4083 {"trace", "QEMU_TRACE", true, handle_arg_trace,
4084 "", "[[enable=]<pattern>][,events=<file>][,file=<file>]"},
4085 {"version", "QEMU_VERSION", false, handle_arg_version,
4086 "", "display version information and exit"},
4087 {NULL, NULL, false, NULL, NULL, NULL}
4088 };
4089
4090 static void usage(int exitcode)
4091 {
4092 const struct qemu_argument *arginfo;
4093 int maxarglen;
4094 int maxenvlen;
4095
4096 printf("usage: qemu-" TARGET_NAME " [options] program [arguments...]\n"
4097 "Linux CPU emulator (compiled for " TARGET_NAME " emulation)\n"
4098 "\n"
4099 "Options and associated environment variables:\n"
4100 "\n");
4101
4102 /* Calculate column widths. We must always have at least enough space
4103 * for the column header.
4104 */
4105 maxarglen = strlen("Argument");
4106 maxenvlen = strlen("Env-variable");
4107
4108 for (arginfo = arg_table; arginfo->handle_opt != NULL; arginfo++) {
4109 int arglen = strlen(arginfo->argv);
4110 if (arginfo->has_arg) {
4111 arglen += strlen(arginfo->example) + 1;
4112 }
4113 if (strlen(arginfo->env) > maxenvlen) {
4114 maxenvlen = strlen(arginfo->env);
4115 }
4116 if (arglen > maxarglen) {
4117 maxarglen = arglen;
4118 }
4119 }
4120
4121 printf("%-*s %-*s Description\n", maxarglen+1, "Argument",
4122 maxenvlen, "Env-variable");
4123
4124 for (arginfo = arg_table; arginfo->handle_opt != NULL; arginfo++) {
4125 if (arginfo->has_arg) {
4126 printf("-%s %-*s %-*s %s\n", arginfo->argv,
4127 (int)(maxarglen - strlen(arginfo->argv) - 1),
4128 arginfo->example, maxenvlen, arginfo->env, arginfo->help);
4129 } else {
4130 printf("-%-*s %-*s %s\n", maxarglen, arginfo->argv,
4131 maxenvlen, arginfo->env,
4132 arginfo->help);
4133 }
4134 }
4135
4136 printf("\n"
4137 "Defaults:\n"
4138 "QEMU_LD_PREFIX = %s\n"
4139 "QEMU_STACK_SIZE = %ld byte\n",
4140 interp_prefix,
4141 guest_stack_size);
4142
4143 printf("\n"
4144 "You can use -E and -U options or the QEMU_SET_ENV and\n"
4145 "QEMU_UNSET_ENV environment variables to set and unset\n"
4146 "environment variables for the target process.\n"
4147 "It is possible to provide several variables by separating them\n"
4148 "by commas in getsubopt(3) style. Additionally it is possible to\n"
4149 "provide the -E and -U options multiple times.\n"
4150 "The following lines are equivalent:\n"
4151 " -E var1=val2 -E var2=val2 -U LD_PRELOAD -U LD_DEBUG\n"
4152 " -E var1=val2,var2=val2 -U LD_PRELOAD,LD_DEBUG\n"
4153 " QEMU_SET_ENV=var1=val2,var2=val2 QEMU_UNSET_ENV=LD_PRELOAD,LD_DEBUG\n"
4154 "Note that if you provide several changes to a single variable\n"
4155 "the last change will stay in effect.\n");
4156
4157 exit(exitcode);
4158 }
4159
4160 static int parse_args(int argc, char **argv)
4161 {
4162 const char *r;
4163 int optind;
4164 const struct qemu_argument *arginfo;
4165
4166 for (arginfo = arg_table; arginfo->handle_opt != NULL; arginfo++) {
4167 if (arginfo->env == NULL) {
4168 continue;
4169 }
4170
4171 r = getenv(arginfo->env);
4172 if (r != NULL) {
4173 arginfo->handle_opt(r);
4174 }
4175 }
4176
4177 optind = 1;
4178 for (;;) {
4179 if (optind >= argc) {
4180 break;
4181 }
4182 r = argv[optind];
4183 if (r[0] != '-') {
4184 break;
4185 }
4186 optind++;
4187 r++;
4188 if (!strcmp(r, "-")) {
4189 break;
4190 }
4191 /* Treat --foo the same as -foo. */
4192 if (r[0] == '-') {
4193 r++;
4194 }
4195
4196 for (arginfo = arg_table; arginfo->handle_opt != NULL; arginfo++) {
4197 if (!strcmp(r, arginfo->argv)) {
4198 if (arginfo->has_arg) {
4199 if (optind >= argc) {
4200 (void) fprintf(stderr,
4201 "qemu: missing argument for option '%s'\n", r);
4202 exit(EXIT_FAILURE);
4203 }
4204 arginfo->handle_opt(argv[optind]);
4205 optind++;
4206 } else {
4207 arginfo->handle_opt(NULL);
4208 }
4209 break;
4210 }
4211 }
4212
4213 /* no option matched the current argv */
4214 if (arginfo->handle_opt == NULL) {
4215 (void) fprintf(stderr, "qemu: unknown option '%s'\n", r);
4216 exit(EXIT_FAILURE);
4217 }
4218 }
4219
4220 if (optind >= argc) {
4221 (void) fprintf(stderr, "qemu: no user program specified\n");
4222 exit(EXIT_FAILURE);
4223 }
4224
4225 filename = argv[optind];
4226 exec_path = argv[optind];
4227
4228 return optind;
4229 }
4230
4231 int main(int argc, char **argv, char **envp)
4232 {
4233 struct target_pt_regs regs1, *regs = &regs1;
4234 struct image_info info1, *info = &info1;
4235 struct linux_binprm bprm;
4236 TaskState *ts;
4237 CPUArchState *env;
4238 CPUState *cpu;
4239 int optind;
4240 char **target_environ, **wrk;
4241 char **target_argv;
4242 int target_argc;
4243 int i;
4244 int ret;
4245 int execfd;
4246
4247 qemu_init_cpu_list();
4248 qemu_init_cpu_loop();
4249 module_call_init(MODULE_INIT_QOM);
4250
4251 if ((envlist = envlist_create()) == NULL) {
4252 (void) fprintf(stderr, "Unable to allocate envlist\n");
4253 exit(EXIT_FAILURE);
4254 }
4255
4256 /* add current environment into the list */
4257 for (wrk = environ; *wrk != NULL; wrk++) {
4258 (void) envlist_setenv(envlist, *wrk);
4259 }
4260
4261 /* Read the stack limit from the kernel. If it's "unlimited",
4262 then we can do little else besides use the default. */
4263 {
4264 struct rlimit lim;
4265 if (getrlimit(RLIMIT_STACK, &lim) == 0
4266 && lim.rlim_cur != RLIM_INFINITY
4267 && lim.rlim_cur == (target_long)lim.rlim_cur) {
4268 guest_stack_size = lim.rlim_cur;
4269 }
4270 }
4271
4272 cpu_model = NULL;
4273
4274 srand(time(NULL));
4275
4276 qemu_add_opts(&qemu_trace_opts);
4277
4278 optind = parse_args(argc, argv);
4279
4280 if (!trace_init_backends()) {
4281 exit(1);
4282 }
4283 trace_init_file(trace_file);
4284
4285 /* Zero out regs */
4286 memset(regs, 0, sizeof(struct target_pt_regs));
4287
4288 /* Zero out image_info */
4289 memset(info, 0, sizeof(struct image_info));
4290
4291 memset(&bprm, 0, sizeof (bprm));
4292
4293 /* Scan interp_prefix dir for replacement files. */
4294 init_paths(interp_prefix);
4295
4296 init_qemu_uname_release();
4297
4298 if (cpu_model == NULL) {
4299 #if defined(TARGET_I386)
4300 #ifdef TARGET_X86_64
4301 cpu_model = "qemu64";
4302 #else
4303 cpu_model = "qemu32";
4304 #endif
4305 #elif defined(TARGET_ARM)
4306 cpu_model = "any";
4307 #elif defined(TARGET_UNICORE32)
4308 cpu_model = "any";
4309 #elif defined(TARGET_M68K)
4310 cpu_model = "any";
4311 #elif defined(TARGET_SPARC)
4312 #ifdef TARGET_SPARC64
4313 cpu_model = "TI UltraSparc II";
4314 #else
4315 cpu_model = "Fujitsu MB86904";
4316 #endif
4317 #elif defined(TARGET_MIPS)
4318 #if defined(TARGET_ABI_MIPSN32) || defined(TARGET_ABI_MIPSN64)
4319 cpu_model = "5KEf";
4320 #else
4321 cpu_model = "24Kf";
4322 #endif
4323 #elif defined TARGET_OPENRISC
4324 cpu_model = "or1200";
4325 #elif defined(TARGET_PPC)
4326 # ifdef TARGET_PPC64
4327 cpu_model = "POWER8";
4328 # else
4329 cpu_model = "750";
4330 # endif
4331 #elif defined TARGET_SH4
4332 cpu_model = TYPE_SH7785_CPU;
4333 #else
4334 cpu_model = "any";
4335 #endif
4336 }
4337 tcg_exec_init(0);
4338 /* NOTE: we need to init the CPU at this stage to get
4339 qemu_host_page_size */
4340 cpu = cpu_init(cpu_model);
4341 if (!cpu) {
4342 fprintf(stderr, "Unable to find CPU definition\n");
4343 exit(EXIT_FAILURE);
4344 }
4345 env = cpu->env_ptr;
4346 cpu_reset(cpu);
4347
4348 thread_cpu = cpu;
4349
4350 if (getenv("QEMU_STRACE")) {
4351 do_strace = 1;
4352 }
4353
4354 if (getenv("QEMU_RAND_SEED")) {
4355 handle_arg_randseed(getenv("QEMU_RAND_SEED"));
4356 }
4357
4358 target_environ = envlist_to_environ(envlist, NULL);
4359 envlist_free(envlist);
4360
4361 /*
4362 * Now that page sizes are configured in cpu_init() we can do
4363 * proper page alignment for guest_base.
4364 */
4365 guest_base = HOST_PAGE_ALIGN(guest_base);
4366
4367 if (reserved_va || have_guest_base) {
4368 guest_base = init_guest_space(guest_base, reserved_va, 0,
4369 have_guest_base);
4370 if (guest_base == (unsigned long)-1) {
4371 fprintf(stderr, "Unable to reserve 0x%lx bytes of virtual address "
4372 "space for use as guest address space (check your virtual "
4373 "memory ulimit setting or reserve less using -R option)\n",
4374 reserved_va);
4375 exit(EXIT_FAILURE);
4376 }
4377
4378 if (reserved_va) {
4379 mmap_next_start = reserved_va;
4380 }
4381 }
4382
4383 /*
4384 * Read in mmap_min_addr kernel parameter. This value is used
4385 * When loading the ELF image to determine whether guest_base
4386 * is needed. It is also used in mmap_find_vma.
4387 */
4388 {
4389 FILE *fp;
4390
4391 if ((fp = fopen("/proc/sys/vm/mmap_min_addr", "r")) != NULL) {
4392 unsigned long tmp;
4393 if (fscanf(fp, "%lu", &tmp) == 1) {
4394 mmap_min_addr = tmp;
4395 qemu_log_mask(CPU_LOG_PAGE, "host mmap_min_addr=0x%lx\n", mmap_min_addr);
4396 }
4397 fclose(fp);
4398 }
4399 }
4400
4401 /*
4402 * Prepare copy of argv vector for target.
4403 */
4404 target_argc = argc - optind;
4405 target_argv = calloc(target_argc + 1, sizeof (char *));
4406 if (target_argv == NULL) {
4407 (void) fprintf(stderr, "Unable to allocate memory for target_argv\n");
4408 exit(EXIT_FAILURE);
4409 }
4410
4411 /*
4412 * If argv0 is specified (using '-0' switch) we replace
4413 * argv[0] pointer with the given one.
4414 */
4415 i = 0;
4416 if (argv0 != NULL) {
4417 target_argv[i++] = strdup(argv0);
4418 }
4419 for (; i < target_argc; i++) {
4420 target_argv[i] = strdup(argv[optind + i]);
4421 }
4422 target_argv[target_argc] = NULL;
4423
4424 ts = g_new0(TaskState, 1);
4425 init_task_state(ts);
4426 /* build Task State */
4427 ts->info = info;
4428 ts->bprm = &bprm;
4429 cpu->opaque = ts;
4430 task_settid(ts);
4431
4432 execfd = qemu_getauxval(AT_EXECFD);
4433 if (execfd == 0) {
4434 execfd = open(filename, O_RDONLY);
4435 if (execfd < 0) {
4436 printf("Error while loading %s: %s\n", filename, strerror(errno));
4437 _exit(EXIT_FAILURE);
4438 }
4439 }
4440
4441 ret = loader_exec(execfd, filename, target_argv, target_environ, regs,
4442 info, &bprm);
4443 if (ret != 0) {
4444 printf("Error while loading %s: %s\n", filename, strerror(-ret));
4445 _exit(EXIT_FAILURE);
4446 }
4447
4448 for (wrk = target_environ; *wrk; wrk++) {
4449 free(*wrk);
4450 }
4451
4452 free(target_environ);
4453
4454 if (qemu_loglevel_mask(CPU_LOG_PAGE)) {
4455 qemu_log("guest_base 0x%lx\n", guest_base);
4456 log_page_dump();
4457
4458 qemu_log("start_brk 0x" TARGET_ABI_FMT_lx "\n", info->start_brk);
4459 qemu_log("end_code 0x" TARGET_ABI_FMT_lx "\n", info->end_code);
4460 qemu_log("start_code 0x" TARGET_ABI_FMT_lx "\n",
4461 info->start_code);
4462 qemu_log("start_data 0x" TARGET_ABI_FMT_lx "\n",
4463 info->start_data);
4464 qemu_log("end_data 0x" TARGET_ABI_FMT_lx "\n", info->end_data);
4465 qemu_log("start_stack 0x" TARGET_ABI_FMT_lx "\n",
4466 info->start_stack);
4467 qemu_log("brk 0x" TARGET_ABI_FMT_lx "\n", info->brk);
4468 qemu_log("entry 0x" TARGET_ABI_FMT_lx "\n", info->entry);
4469 }
4470
4471 target_set_brk(info->brk);
4472 syscall_init();
4473 signal_init();
4474
4475 /* Now that we've loaded the binary, GUEST_BASE is fixed. Delay
4476 generating the prologue until now so that the prologue can take
4477 the real value of GUEST_BASE into account. */
4478 tcg_prologue_init(&tcg_ctx);
4479
4480 #if defined(TARGET_I386)
4481 env->cr[0] = CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK;
4482 env->hflags |= HF_PE_MASK | HF_CPL_MASK;
4483 if (env->features[FEAT_1_EDX] & CPUID_SSE) {
4484 env->cr[4] |= CR4_OSFXSR_MASK;
4485 env->hflags |= HF_OSFXSR_MASK;
4486 }
4487 #ifndef TARGET_ABI32
4488 /* enable 64 bit mode if possible */
4489 if (!(env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM)) {
4490 fprintf(stderr, "The selected x86 CPU does not support 64 bit mode\n");
4491 exit(EXIT_FAILURE);
4492 }
4493 env->cr[4] |= CR4_PAE_MASK;
4494 env->efer |= MSR_EFER_LMA | MSR_EFER_LME;
4495 env->hflags |= HF_LMA_MASK;
4496 #endif
4497
4498 /* flags setup : we activate the IRQs by default as in user mode */
4499 env->eflags |= IF_MASK;
4500
4501 /* linux register setup */
4502 #ifndef TARGET_ABI32
4503 env->regs[R_EAX] = regs->rax;
4504 env->regs[R_EBX] = regs->rbx;
4505 env->regs[R_ECX] = regs->rcx;
4506 env->regs[R_EDX] = regs->rdx;
4507 env->regs[R_ESI] = regs->rsi;
4508 env->regs[R_EDI] = regs->rdi;
4509 env->regs[R_EBP] = regs->rbp;
4510 env->regs[R_ESP] = regs->rsp;
4511 env->eip = regs->rip;
4512 #else
4513 env->regs[R_EAX] = regs->eax;
4514 env->regs[R_EBX] = regs->ebx;
4515 env->regs[R_ECX] = regs->ecx;
4516 env->regs[R_EDX] = regs->edx;
4517 env->regs[R_ESI] = regs->esi;
4518 env->regs[R_EDI] = regs->edi;
4519 env->regs[R_EBP] = regs->ebp;
4520 env->regs[R_ESP] = regs->esp;
4521 env->eip = regs->eip;
4522 #endif
4523
4524 /* linux interrupt setup */
4525 #ifndef TARGET_ABI32
4526 env->idt.limit = 511;
4527 #else
4528 env->idt.limit = 255;
4529 #endif
4530 env->idt.base = target_mmap(0, sizeof(uint64_t) * (env->idt.limit + 1),
4531 PROT_READ|PROT_WRITE,
4532 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
4533 idt_table = g2h(env->idt.base);
4534 set_idt(0, 0);
4535 set_idt(1, 0);
4536 set_idt(2, 0);
4537 set_idt(3, 3);
4538 set_idt(4, 3);
4539 set_idt(5, 0);
4540 set_idt(6, 0);
4541 set_idt(7, 0);
4542 set_idt(8, 0);
4543 set_idt(9, 0);
4544 set_idt(10, 0);
4545 set_idt(11, 0);
4546 set_idt(12, 0);
4547 set_idt(13, 0);
4548 set_idt(14, 0);
4549 set_idt(15, 0);
4550 set_idt(16, 0);
4551 set_idt(17, 0);
4552 set_idt(18, 0);
4553 set_idt(19, 0);
4554 set_idt(0x80, 3);
4555
4556 /* linux segment setup */
4557 {
4558 uint64_t *gdt_table;
4559 env->gdt.base = target_mmap(0, sizeof(uint64_t) * TARGET_GDT_ENTRIES,
4560 PROT_READ|PROT_WRITE,
4561 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
4562 env->gdt.limit = sizeof(uint64_t) * TARGET_GDT_ENTRIES - 1;
4563 gdt_table = g2h(env->gdt.base);
4564 #ifdef TARGET_ABI32
4565 write_dt(&gdt_table[__USER_CS >> 3], 0, 0xfffff,
4566 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | DESC_S_MASK |
4567 (3 << DESC_DPL_SHIFT) | (0xa << DESC_TYPE_SHIFT));
4568 #else
4569 /* 64 bit code segment */
4570 write_dt(&gdt_table[__USER_CS >> 3], 0, 0xfffff,
4571 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | DESC_S_MASK |
4572 DESC_L_MASK |
4573 (3 << DESC_DPL_SHIFT) | (0xa << DESC_TYPE_SHIFT));
4574 #endif
4575 write_dt(&gdt_table[__USER_DS >> 3], 0, 0xfffff,
4576 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | DESC_S_MASK |
4577 (3 << DESC_DPL_SHIFT) | (0x2 << DESC_TYPE_SHIFT));
4578 }
4579 cpu_x86_load_seg(env, R_CS, __USER_CS);
4580 cpu_x86_load_seg(env, R_SS, __USER_DS);
4581 #ifdef TARGET_ABI32
4582 cpu_x86_load_seg(env, R_DS, __USER_DS);
4583 cpu_x86_load_seg(env, R_ES, __USER_DS);
4584 cpu_x86_load_seg(env, R_FS, __USER_DS);
4585 cpu_x86_load_seg(env, R_GS, __USER_DS);
4586 /* This hack makes Wine work... */
4587 env->segs[R_FS].selector = 0;
4588 #else
4589 cpu_x86_load_seg(env, R_DS, 0);
4590 cpu_x86_load_seg(env, R_ES, 0);
4591 cpu_x86_load_seg(env, R_FS, 0);
4592 cpu_x86_load_seg(env, R_GS, 0);
4593 #endif
4594 #elif defined(TARGET_AARCH64)
4595 {
4596 int i;
4597
4598 if (!(arm_feature(env, ARM_FEATURE_AARCH64))) {
4599 fprintf(stderr,
4600 "The selected ARM CPU does not support 64 bit mode\n");
4601 exit(EXIT_FAILURE);
4602 }
4603
4604 for (i = 0; i < 31; i++) {
4605 env->xregs[i] = regs->regs[i];
4606 }
4607 env->pc = regs->pc;
4608 env->xregs[31] = regs->sp;
4609 }
4610 #elif defined(TARGET_ARM)
4611 {
4612 int i;
4613 cpsr_write(env, regs->uregs[16], CPSR_USER | CPSR_EXEC,
4614 CPSRWriteByInstr);
4615 for(i = 0; i < 16; i++) {
4616 env->regs[i] = regs->uregs[i];
4617 }
4618 #ifdef TARGET_WORDS_BIGENDIAN
4619 /* Enable BE8. */
4620 if (EF_ARM_EABI_VERSION(info->elf_flags) >= EF_ARM_EABI_VER4
4621 && (info->elf_flags & EF_ARM_BE8)) {
4622 env->uncached_cpsr |= CPSR_E;
4623 env->cp15.sctlr_el[1] |= SCTLR_E0E;
4624 } else {
4625 env->cp15.sctlr_el[1] |= SCTLR_B;
4626 }
4627 #endif
4628 }
4629 #elif defined(TARGET_UNICORE32)
4630 {
4631 int i;
4632 cpu_asr_write(env, regs->uregs[32], 0xffffffff);
4633 for (i = 0; i < 32; i++) {
4634 env->regs[i] = regs->uregs[i];
4635 }
4636 }
4637 #elif defined(TARGET_SPARC)
4638 {
4639 int i;
4640 env->pc = regs->pc;
4641 env->npc = regs->npc;
4642 env->y = regs->y;
4643 for(i = 0; i < 8; i++)
4644 env->gregs[i] = regs->u_regs[i];
4645 for(i = 0; i < 8; i++)
4646 env->regwptr[i] = regs->u_regs[i + 8];
4647 }
4648 #elif defined(TARGET_PPC)
4649 {
4650 int i;
4651
4652 #if defined(TARGET_PPC64)
4653 int flag = (env->insns_flags2 & PPC2_BOOKE206) ? MSR_CM : MSR_SF;
4654 #if defined(TARGET_ABI32)
4655 env->msr &= ~((target_ulong)1 << flag);
4656 #else
4657 env->msr |= (target_ulong)1 << flag;
4658 #endif
4659 #endif
4660 env->nip = regs->nip;
4661 for(i = 0; i < 32; i++) {
4662 env->gpr[i] = regs->gpr[i];
4663 }
4664 }
4665 #elif defined(TARGET_M68K)
4666 {
4667 env->pc = regs->pc;
4668 env->dregs[0] = regs->d0;
4669 env->dregs[1] = regs->d1;
4670 env->dregs[2] = regs->d2;
4671 env->dregs[3] = regs->d3;
4672 env->dregs[4] = regs->d4;
4673 env->dregs[5] = regs->d5;
4674 env->dregs[6] = regs->d6;
4675 env->dregs[7] = regs->d7;
4676 env->aregs[0] = regs->a0;
4677 env->aregs[1] = regs->a1;
4678 env->aregs[2] = regs->a2;
4679 env->aregs[3] = regs->a3;
4680 env->aregs[4] = regs->a4;
4681 env->aregs[5] = regs->a5;
4682 env->aregs[6] = regs->a6;
4683 env->aregs[7] = regs->usp;
4684 env->sr = regs->sr;
4685 ts->sim_syscalls = 1;
4686 }
4687 #elif defined(TARGET_MICROBLAZE)
4688 {
4689 env->regs[0] = regs->r0;
4690 env->regs[1] = regs->r1;
4691 env->regs[2] = regs->r2;
4692 env->regs[3] = regs->r3;
4693 env->regs[4] = regs->r4;
4694 env->regs[5] = regs->r5;
4695 env->regs[6] = regs->r6;
4696 env->regs[7] = regs->r7;
4697 env->regs[8] = regs->r8;
4698 env->regs[9] = regs->r9;
4699 env->regs[10] = regs->r10;
4700 env->regs[11] = regs->r11;
4701 env->regs[12] = regs->r12;
4702 env->regs[13] = regs->r13;
4703 env->regs[14] = regs->r14;
4704 env->regs[15] = regs->r15;
4705 env->regs[16] = regs->r16;
4706 env->regs[17] = regs->r17;
4707 env->regs[18] = regs->r18;
4708 env->regs[19] = regs->r19;
4709 env->regs[20] = regs->r20;
4710 env->regs[21] = regs->r21;
4711 env->regs[22] = regs->r22;
4712 env->regs[23] = regs->r23;
4713 env->regs[24] = regs->r24;
4714 env->regs[25] = regs->r25;
4715 env->regs[26] = regs->r26;
4716 env->regs[27] = regs->r27;
4717 env->regs[28] = regs->r28;
4718 env->regs[29] = regs->r29;
4719 env->regs[30] = regs->r30;
4720 env->regs[31] = regs->r31;
4721 env->sregs[SR_PC] = regs->pc;
4722 }
4723 #elif defined(TARGET_MIPS)
4724 {
4725 int i;
4726
4727 for(i = 0; i < 32; i++) {
4728 env->active_tc.gpr[i] = regs->regs[i];
4729 }
4730 env->active_tc.PC = regs->cp0_epc & ~(target_ulong)1;
4731 if (regs->cp0_epc & 1) {
4732 env->hflags |= MIPS_HFLAG_M16;
4733 }
4734 if (((info->elf_flags & EF_MIPS_NAN2008) != 0) !=
4735 ((env->active_fpu.fcr31 & (1 << FCR31_NAN2008)) != 0)) {
4736 if ((env->active_fpu.fcr31_rw_bitmask &
4737 (1 << FCR31_NAN2008)) == 0) {
4738 fprintf(stderr, "ELF binary's NaN mode not supported by CPU\n");
4739 exit(1);
4740 }
4741 if ((info->elf_flags & EF_MIPS_NAN2008) != 0) {
4742 env->active_fpu.fcr31 |= (1 << FCR31_NAN2008);
4743 } else {
4744 env->active_fpu.fcr31 &= ~(1 << FCR31_NAN2008);
4745 }
4746 restore_snan_bit_mode(env);
4747 }
4748 }
4749 #elif defined(TARGET_OPENRISC)
4750 {
4751 int i;
4752
4753 for (i = 0; i < 32; i++) {
4754 env->gpr[i] = regs->gpr[i];
4755 }
4756
4757 env->sr = regs->sr;
4758 env->pc = regs->pc;
4759 }
4760 #elif defined(TARGET_SH4)
4761 {
4762 int i;
4763
4764 for(i = 0; i < 16; i++) {
4765 env->gregs[i] = regs->regs[i];
4766 }
4767 env->pc = regs->pc;
4768 }
4769 #elif defined(TARGET_ALPHA)
4770 {
4771 int i;
4772
4773 for(i = 0; i < 28; i++) {
4774 env->ir[i] = ((abi_ulong *)regs)[i];
4775 }
4776 env->ir[IR_SP] = regs->usp;
4777 env->pc = regs->pc;
4778 }
4779 #elif defined(TARGET_CRIS)
4780 {
4781 env->regs[0] = regs->r0;
4782 env->regs[1] = regs->r1;
4783 env->regs[2] = regs->r2;
4784 env->regs[3] = regs->r3;
4785 env->regs[4] = regs->r4;
4786 env->regs[5] = regs->r5;
4787 env->regs[6] = regs->r6;
4788 env->regs[7] = regs->r7;
4789 env->regs[8] = regs->r8;
4790 env->regs[9] = regs->r9;
4791 env->regs[10] = regs->r10;
4792 env->regs[11] = regs->r11;
4793 env->regs[12] = regs->r12;
4794 env->regs[13] = regs->r13;
4795 env->regs[14] = info->start_stack;
4796 env->regs[15] = regs->acr;
4797 env->pc = regs->erp;
4798 }
4799 #elif defined(TARGET_S390X)
4800 {
4801 int i;
4802 for (i = 0; i < 16; i++) {
4803 env->regs[i] = regs->gprs[i];
4804 }
4805 env->psw.mask = regs->psw.mask;
4806 env->psw.addr = regs->psw.addr;
4807 }
4808 #elif defined(TARGET_TILEGX)
4809 {
4810 int i;
4811 for (i = 0; i < TILEGX_R_COUNT; i++) {
4812 env->regs[i] = regs->regs[i];
4813 }
4814 for (i = 0; i < TILEGX_SPR_COUNT; i++) {
4815 env->spregs[i] = 0;
4816 }
4817 env->pc = regs->pc;
4818 }
4819 #else
4820 #error unsupported target CPU
4821 #endif
4822
4823 #if defined(TARGET_ARM) || defined(TARGET_M68K) || defined(TARGET_UNICORE32)
4824 ts->stack_base = info->start_stack;
4825 ts->heap_base = info->brk;
4826 /* This will be filled in on the first SYS_HEAPINFO call. */
4827 ts->heap_limit = 0;
4828 #endif
4829
4830 if (gdbstub_port) {
4831 if (gdbserver_start(gdbstub_port) < 0) {
4832 fprintf(stderr, "qemu: could not open gdbserver on port %d\n",
4833 gdbstub_port);
4834 exit(EXIT_FAILURE);
4835 }
4836 gdb_handlesig(cpu, 0);
4837 }
4838 trace_init_vcpu_events();
4839 cpu_loop(env);
4840 /* never exits */
4841 return 0;
4842 }