4 * Copyright (c) 2003-2008 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
26 #include <sys/syscall.h>
27 #include <sys/resource.h>
30 #include "qemu-common.h"
31 #include "cache-utils.h"
34 #include "qemu-timer.h"
37 #define DEBUG_LOGFILE "/tmp/qemu.log"
46 const char *cpu_model
;
47 unsigned long mmap_min_addr
;
48 #if defined(CONFIG_USE_GUEST_BASE)
49 unsigned long guest_base
;
51 unsigned long reserved_va
;
54 static void usage(void);
56 static const char *interp_prefix
= CONFIG_QEMU_INTERP_PREFIX
;
57 const char *qemu_uname_release
= CONFIG_UNAME_RELEASE
;
59 /* XXX: on x86 MAP_GROWSDOWN only works if ESP <= address + 32, so
60 we allocate a bigger stack. Need a better solution, for example
61 by remapping the process stack directly at the right place */
62 unsigned long guest_stack_size
= 8 * 1024 * 1024UL;
64 void gemu_log(const char *fmt
, ...)
69 vfprintf(stderr
, fmt
, ap
);
73 #if defined(TARGET_I386)
74 int cpu_get_pic_interrupt(CPUState
*env
)
80 /* timers for rdtsc */
84 static uint64_t emu_time
;
86 int64_t cpu_get_real_ticks(void)
93 #if defined(CONFIG_USE_NPTL)
94 /***********************************************************/
95 /* Helper routines for implementing atomic operations. */
97 /* To implement exclusive operations we force all cpus to syncronise.
98 We don't require a full sync, only that no cpus are executing guest code.
99 The alternative is to map target atomic ops onto host equivalents,
100 which requires quite a lot of per host/target work. */
101 static pthread_mutex_t cpu_list_mutex
= PTHREAD_MUTEX_INITIALIZER
;
102 static pthread_mutex_t exclusive_lock
= PTHREAD_MUTEX_INITIALIZER
;
103 static pthread_cond_t exclusive_cond
= PTHREAD_COND_INITIALIZER
;
104 static pthread_cond_t exclusive_resume
= PTHREAD_COND_INITIALIZER
;
105 static int pending_cpus
;
107 /* Make sure everything is in a consistent state for calling fork(). */
108 void fork_start(void)
110 pthread_mutex_lock(&tb_lock
);
111 pthread_mutex_lock(&exclusive_lock
);
115 void fork_end(int child
)
117 mmap_fork_end(child
);
119 /* Child processes created by fork() only have a single thread.
120 Discard information about the parent threads. */
121 first_cpu
= thread_env
;
122 thread_env
->next_cpu
= NULL
;
124 pthread_mutex_init(&exclusive_lock
, NULL
);
125 pthread_mutex_init(&cpu_list_mutex
, NULL
);
126 pthread_cond_init(&exclusive_cond
, NULL
);
127 pthread_cond_init(&exclusive_resume
, NULL
);
128 pthread_mutex_init(&tb_lock
, NULL
);
129 gdbserver_fork(thread_env
);
131 pthread_mutex_unlock(&exclusive_lock
);
132 pthread_mutex_unlock(&tb_lock
);
136 /* Wait for pending exclusive operations to complete. The exclusive lock
138 static inline void exclusive_idle(void)
140 while (pending_cpus
) {
141 pthread_cond_wait(&exclusive_resume
, &exclusive_lock
);
145 /* Start an exclusive operation.
146 Must only be called from outside cpu_arm_exec. */
147 static inline void start_exclusive(void)
150 pthread_mutex_lock(&exclusive_lock
);
154 /* Make all other cpus stop executing. */
155 for (other
= first_cpu
; other
; other
= other
->next_cpu
) {
156 if (other
->running
) {
161 if (pending_cpus
> 1) {
162 pthread_cond_wait(&exclusive_cond
, &exclusive_lock
);
166 /* Finish an exclusive operation. */
167 static inline void end_exclusive(void)
170 pthread_cond_broadcast(&exclusive_resume
);
171 pthread_mutex_unlock(&exclusive_lock
);
174 /* Wait for exclusive ops to finish, and begin cpu execution. */
175 static inline void cpu_exec_start(CPUState
*env
)
177 pthread_mutex_lock(&exclusive_lock
);
180 pthread_mutex_unlock(&exclusive_lock
);
183 /* Mark cpu as not executing, and release pending exclusive ops. */
184 static inline void cpu_exec_end(CPUState
*env
)
186 pthread_mutex_lock(&exclusive_lock
);
188 if (pending_cpus
> 1) {
190 if (pending_cpus
== 1) {
191 pthread_cond_signal(&exclusive_cond
);
195 pthread_mutex_unlock(&exclusive_lock
);
198 void cpu_list_lock(void)
200 pthread_mutex_lock(&cpu_list_mutex
);
203 void cpu_list_unlock(void)
205 pthread_mutex_unlock(&cpu_list_mutex
);
207 #else /* if !CONFIG_USE_NPTL */
208 /* These are no-ops because we are not threadsafe. */
209 static inline void cpu_exec_start(CPUState
*env
)
213 static inline void cpu_exec_end(CPUState
*env
)
217 static inline void start_exclusive(void)
221 static inline void end_exclusive(void)
225 void fork_start(void)
229 void fork_end(int child
)
232 gdbserver_fork(thread_env
);
236 void cpu_list_lock(void)
240 void cpu_list_unlock(void)
247 /***********************************************************/
248 /* CPUX86 core interface */
250 void cpu_smm_update(CPUState
*env
)
254 uint64_t cpu_get_tsc(CPUX86State
*env
)
256 return cpu_get_real_ticks();
259 static void write_dt(void *ptr
, unsigned long addr
, unsigned long limit
,
264 e1
= (addr
<< 16) | (limit
& 0xffff);
265 e2
= ((addr
>> 16) & 0xff) | (addr
& 0xff000000) | (limit
& 0x000f0000);
272 static uint64_t *idt_table
;
274 static void set_gate64(void *ptr
, unsigned int type
, unsigned int dpl
,
275 uint64_t addr
, unsigned int sel
)
278 e1
= (addr
& 0xffff) | (sel
<< 16);
279 e2
= (addr
& 0xffff0000) | 0x8000 | (dpl
<< 13) | (type
<< 8);
283 p
[2] = tswap32(addr
>> 32);
286 /* only dpl matters as we do only user space emulation */
287 static void set_idt(int n
, unsigned int dpl
)
289 set_gate64(idt_table
+ n
* 2, 0, dpl
, 0, 0);
292 static void set_gate(void *ptr
, unsigned int type
, unsigned int dpl
,
293 uint32_t addr
, unsigned int sel
)
296 e1
= (addr
& 0xffff) | (sel
<< 16);
297 e2
= (addr
& 0xffff0000) | 0x8000 | (dpl
<< 13) | (type
<< 8);
303 /* only dpl matters as we do only user space emulation */
304 static void set_idt(int n
, unsigned int dpl
)
306 set_gate(idt_table
+ n
, 0, dpl
, 0, 0);
310 void cpu_loop(CPUX86State
*env
)
314 target_siginfo_t info
;
317 trapnr
= cpu_x86_exec(env
);
320 /* linux syscall from int $0x80 */
321 env
->regs
[R_EAX
] = do_syscall(env
,
333 /* linux syscall from syscall instruction */
334 env
->regs
[R_EAX
] = do_syscall(env
,
343 env
->eip
= env
->exception_next_eip
;
348 info
.si_signo
= SIGBUS
;
350 info
.si_code
= TARGET_SI_KERNEL
;
351 info
._sifields
._sigfault
._addr
= 0;
352 queue_signal(env
, info
.si_signo
, &info
);
355 /* XXX: potential problem if ABI32 */
356 #ifndef TARGET_X86_64
357 if (env
->eflags
& VM_MASK
) {
358 handle_vm86_fault(env
);
362 info
.si_signo
= SIGSEGV
;
364 info
.si_code
= TARGET_SI_KERNEL
;
365 info
._sifields
._sigfault
._addr
= 0;
366 queue_signal(env
, info
.si_signo
, &info
);
370 info
.si_signo
= SIGSEGV
;
372 if (!(env
->error_code
& 1))
373 info
.si_code
= TARGET_SEGV_MAPERR
;
375 info
.si_code
= TARGET_SEGV_ACCERR
;
376 info
._sifields
._sigfault
._addr
= env
->cr
[2];
377 queue_signal(env
, info
.si_signo
, &info
);
380 #ifndef TARGET_X86_64
381 if (env
->eflags
& VM_MASK
) {
382 handle_vm86_trap(env
, trapnr
);
386 /* division by zero */
387 info
.si_signo
= SIGFPE
;
389 info
.si_code
= TARGET_FPE_INTDIV
;
390 info
._sifields
._sigfault
._addr
= env
->eip
;
391 queue_signal(env
, info
.si_signo
, &info
);
396 #ifndef TARGET_X86_64
397 if (env
->eflags
& VM_MASK
) {
398 handle_vm86_trap(env
, trapnr
);
402 info
.si_signo
= SIGTRAP
;
404 if (trapnr
== EXCP01_DB
) {
405 info
.si_code
= TARGET_TRAP_BRKPT
;
406 info
._sifields
._sigfault
._addr
= env
->eip
;
408 info
.si_code
= TARGET_SI_KERNEL
;
409 info
._sifields
._sigfault
._addr
= 0;
411 queue_signal(env
, info
.si_signo
, &info
);
416 #ifndef TARGET_X86_64
417 if (env
->eflags
& VM_MASK
) {
418 handle_vm86_trap(env
, trapnr
);
422 info
.si_signo
= SIGSEGV
;
424 info
.si_code
= TARGET_SI_KERNEL
;
425 info
._sifields
._sigfault
._addr
= 0;
426 queue_signal(env
, info
.si_signo
, &info
);
430 info
.si_signo
= SIGILL
;
432 info
.si_code
= TARGET_ILL_ILLOPN
;
433 info
._sifields
._sigfault
._addr
= env
->eip
;
434 queue_signal(env
, info
.si_signo
, &info
);
437 /* just indicate that signals should be handled asap */
443 sig
= gdb_handlesig (env
, TARGET_SIGTRAP
);
448 info
.si_code
= TARGET_TRAP_BRKPT
;
449 queue_signal(env
, info
.si_signo
, &info
);
454 pc
= env
->segs
[R_CS
].base
+ env
->eip
;
455 fprintf(stderr
, "qemu: 0x%08lx: unhandled CPU exception 0x%x - aborting\n",
459 process_pending_signals(env
);
467 * See the Linux kernel's Documentation/arm/kernel_user_helpers.txt
469 * r0 = pointer to oldval
470 * r1 = pointer to newval
471 * r2 = pointer to target value
474 * r0 = 0 if *ptr was changed, non-0 if no exchange happened
475 * C set if *ptr was changed, clear if no exchange happened
477 * Note segv's in kernel helpers are a bit tricky, we can set the
478 * data address sensibly but the PC address is just the entry point.
480 static void arm_kernel_cmpxchg64_helper(CPUARMState
*env
)
482 uint64_t oldval
, newval
, val
;
484 target_siginfo_t info
;
486 /* Based on the 32 bit code in do_kernel_trap */
488 /* XXX: This only works between threads, not between processes.
489 It's probably possible to implement this with native host
490 operations. However things like ldrex/strex are much harder so
491 there's not much point trying. */
493 cpsr
= cpsr_read(env
);
496 if (get_user_u64(oldval
, env
->regs
[0])) {
497 env
->cp15
.c6_data
= env
->regs
[0];
501 if (get_user_u64(newval
, env
->regs
[1])) {
502 env
->cp15
.c6_data
= env
->regs
[1];
506 if (get_user_u64(val
, addr
)) {
507 env
->cp15
.c6_data
= addr
;
514 if (put_user_u64(val
, addr
)) {
515 env
->cp15
.c6_data
= addr
;
525 cpsr_write(env
, cpsr
, CPSR_C
);
531 /* We get the PC of the entry address - which is as good as anything,
532 on a real kernel what you get depends on which mode it uses. */
533 info
.si_signo
= SIGSEGV
;
535 /* XXX: check env->error_code */
536 info
.si_code
= TARGET_SEGV_MAPERR
;
537 info
._sifields
._sigfault
._addr
= env
->cp15
.c6_data
;
538 queue_signal(env
, info
.si_signo
, &info
);
543 /* Handle a jump to the kernel code page. */
545 do_kernel_trap(CPUARMState
*env
)
551 switch (env
->regs
[15]) {
552 case 0xffff0fa0: /* __kernel_memory_barrier */
553 /* ??? No-op. Will need to do better for SMP. */
555 case 0xffff0fc0: /* __kernel_cmpxchg */
556 /* XXX: This only works between threads, not between processes.
557 It's probably possible to implement this with native host
558 operations. However things like ldrex/strex are much harder so
559 there's not much point trying. */
561 cpsr
= cpsr_read(env
);
563 /* FIXME: This should SEGV if the access fails. */
564 if (get_user_u32(val
, addr
))
566 if (val
== env
->regs
[0]) {
568 /* FIXME: Check for segfaults. */
569 put_user_u32(val
, addr
);
576 cpsr_write(env
, cpsr
, CPSR_C
);
579 case 0xffff0fe0: /* __kernel_get_tls */
580 env
->regs
[0] = env
->cp15
.c13_tls2
;
582 case 0xffff0f60: /* __kernel_cmpxchg64 */
583 arm_kernel_cmpxchg64_helper(env
);
589 /* Jump back to the caller. */
590 addr
= env
->regs
[14];
595 env
->regs
[15] = addr
;
600 static int do_strex(CPUARMState
*env
)
608 addr
= env
->exclusive_addr
;
609 if (addr
!= env
->exclusive_test
) {
612 size
= env
->exclusive_info
& 0xf;
615 segv
= get_user_u8(val
, addr
);
618 segv
= get_user_u16(val
, addr
);
622 segv
= get_user_u32(val
, addr
);
628 env
->cp15
.c6_data
= addr
;
631 if (val
!= env
->exclusive_val
) {
635 segv
= get_user_u32(val
, addr
+ 4);
637 env
->cp15
.c6_data
= addr
+ 4;
640 if (val
!= env
->exclusive_high
) {
644 val
= env
->regs
[(env
->exclusive_info
>> 8) & 0xf];
647 segv
= put_user_u8(val
, addr
);
650 segv
= put_user_u16(val
, addr
);
654 segv
= put_user_u32(val
, addr
);
658 env
->cp15
.c6_data
= addr
;
662 val
= env
->regs
[(env
->exclusive_info
>> 12) & 0xf];
663 segv
= put_user_u32(val
, addr
+ 4);
665 env
->cp15
.c6_data
= addr
+ 4;
672 env
->regs
[(env
->exclusive_info
>> 4) & 0xf] = rc
;
678 void cpu_loop(CPUARMState
*env
)
681 unsigned int n
, insn
;
682 target_siginfo_t info
;
687 trapnr
= cpu_arm_exec(env
);
692 TaskState
*ts
= env
->opaque
;
696 /* we handle the FPU emulation here, as Linux */
697 /* we get the opcode */
698 /* FIXME - what to do if get_user() fails? */
699 get_user_u32(opcode
, env
->regs
[15]);
701 rc
= EmulateAll(opcode
, &ts
->fpa
, env
);
702 if (rc
== 0) { /* illegal instruction */
703 info
.si_signo
= SIGILL
;
705 info
.si_code
= TARGET_ILL_ILLOPN
;
706 info
._sifields
._sigfault
._addr
= env
->regs
[15];
707 queue_signal(env
, info
.si_signo
, &info
);
708 } else if (rc
< 0) { /* FP exception */
711 /* translate softfloat flags to FPSR flags */
712 if (-rc
& float_flag_invalid
)
714 if (-rc
& float_flag_divbyzero
)
716 if (-rc
& float_flag_overflow
)
718 if (-rc
& float_flag_underflow
)
720 if (-rc
& float_flag_inexact
)
723 FPSR fpsr
= ts
->fpa
.fpsr
;
724 //printf("fpsr 0x%x, arm_fpe 0x%x\n",fpsr,arm_fpe);
726 if (fpsr
& (arm_fpe
<< 16)) { /* exception enabled? */
727 info
.si_signo
= SIGFPE
;
730 /* ordered by priority, least first */
731 if (arm_fpe
& BIT_IXC
) info
.si_code
= TARGET_FPE_FLTRES
;
732 if (arm_fpe
& BIT_UFC
) info
.si_code
= TARGET_FPE_FLTUND
;
733 if (arm_fpe
& BIT_OFC
) info
.si_code
= TARGET_FPE_FLTOVF
;
734 if (arm_fpe
& BIT_DZC
) info
.si_code
= TARGET_FPE_FLTDIV
;
735 if (arm_fpe
& BIT_IOC
) info
.si_code
= TARGET_FPE_FLTINV
;
737 info
._sifields
._sigfault
._addr
= env
->regs
[15];
738 queue_signal(env
, info
.si_signo
, &info
);
743 /* accumulate unenabled exceptions */
744 if ((!(fpsr
& BIT_IXE
)) && (arm_fpe
& BIT_IXC
))
746 if ((!(fpsr
& BIT_UFE
)) && (arm_fpe
& BIT_UFC
))
748 if ((!(fpsr
& BIT_OFE
)) && (arm_fpe
& BIT_OFC
))
750 if ((!(fpsr
& BIT_DZE
)) && (arm_fpe
& BIT_DZC
))
752 if ((!(fpsr
& BIT_IOE
)) && (arm_fpe
& BIT_IOC
))
755 } else { /* everything OK */
766 if (trapnr
== EXCP_BKPT
) {
768 /* FIXME - what to do if get_user() fails? */
769 get_user_u16(insn
, env
->regs
[15]);
773 /* FIXME - what to do if get_user() fails? */
774 get_user_u32(insn
, env
->regs
[15]);
775 n
= (insn
& 0xf) | ((insn
>> 4) & 0xff0);
780 /* FIXME - what to do if get_user() fails? */
781 get_user_u16(insn
, env
->regs
[15] - 2);
784 /* FIXME - what to do if get_user() fails? */
785 get_user_u32(insn
, env
->regs
[15] - 4);
790 if (n
== ARM_NR_cacheflush
) {
792 } else if (n
== ARM_NR_semihosting
793 || n
== ARM_NR_thumb_semihosting
) {
794 env
->regs
[0] = do_arm_semihosting (env
);
795 } else if (n
== 0 || n
>= ARM_SYSCALL_BASE
796 || (env
->thumb
&& n
== ARM_THUMB_SYSCALL
)) {
798 if (env
->thumb
|| n
== 0) {
801 n
-= ARM_SYSCALL_BASE
;
804 if ( n
> ARM_NR_BASE
) {
806 case ARM_NR_cacheflush
:
810 cpu_set_tls(env
, env
->regs
[0]);
814 gemu_log("qemu: Unsupported ARM syscall: 0x%x\n",
816 env
->regs
[0] = -TARGET_ENOSYS
;
820 env
->regs
[0] = do_syscall(env
,
836 /* just indicate that signals should be handled asap */
838 case EXCP_PREFETCH_ABORT
:
839 addr
= env
->cp15
.c6_insn
;
841 case EXCP_DATA_ABORT
:
842 addr
= env
->cp15
.c6_data
;
845 info
.si_signo
= SIGSEGV
;
847 /* XXX: check env->error_code */
848 info
.si_code
= TARGET_SEGV_MAPERR
;
849 info
._sifields
._sigfault
._addr
= addr
;
850 queue_signal(env
, info
.si_signo
, &info
);
857 sig
= gdb_handlesig (env
, TARGET_SIGTRAP
);
862 info
.si_code
= TARGET_TRAP_BRKPT
;
863 queue_signal(env
, info
.si_signo
, &info
);
867 case EXCP_KERNEL_TRAP
:
868 if (do_kernel_trap(env
))
873 addr
= env
->cp15
.c6_data
;
879 fprintf(stderr
, "qemu: unhandled CPU exception 0x%x - aborting\n",
881 cpu_dump_state(env
, stderr
, fprintf
, 0);
884 process_pending_signals(env
);
890 #ifdef TARGET_UNICORE32
892 void cpu_loop(CPUState
*env
)
895 unsigned int n
, insn
;
896 target_siginfo_t info
;
900 trapnr
= uc32_cpu_exec(env
);
906 get_user_u32(insn
, env
->regs
[31] - 4);
909 if (n
>= UC32_SYSCALL_BASE
) {
911 n
-= UC32_SYSCALL_BASE
;
912 if (n
== UC32_SYSCALL_NR_set_tls
) {
913 cpu_set_tls(env
, env
->regs
[0]);
916 env
->regs
[0] = do_syscall(env
,
932 info
.si_signo
= SIGSEGV
;
934 /* XXX: check env->error_code */
935 info
.si_code
= TARGET_SEGV_MAPERR
;
936 info
._sifields
._sigfault
._addr
= env
->cp0
.c4_faultaddr
;
937 queue_signal(env
, info
.si_signo
, &info
);
940 /* just indicate that signals should be handled asap */
946 sig
= gdb_handlesig(env
, TARGET_SIGTRAP
);
950 info
.si_code
= TARGET_TRAP_BRKPT
;
951 queue_signal(env
, info
.si_signo
, &info
);
958 process_pending_signals(env
);
962 fprintf(stderr
, "qemu: unhandled CPU exception 0x%x - aborting\n", trapnr
);
963 cpu_dump_state(env
, stderr
, fprintf
, 0);
969 #define SPARC64_STACK_BIAS 2047
973 /* WARNING: dealing with register windows _is_ complicated. More info
974 can be found at http://www.sics.se/~psm/sparcstack.html */
975 static inline int get_reg_index(CPUSPARCState
*env
, int cwp
, int index
)
977 index
= (index
+ cwp
* 16) % (16 * env
->nwindows
);
978 /* wrap handling : if cwp is on the last window, then we use the
979 registers 'after' the end */
980 if (index
< 8 && env
->cwp
== env
->nwindows
- 1)
981 index
+= 16 * env
->nwindows
;
985 /* save the register window 'cwp1' */
986 static inline void save_window_offset(CPUSPARCState
*env
, int cwp1
)
991 sp_ptr
= env
->regbase
[get_reg_index(env
, cwp1
, 6)];
992 #ifdef TARGET_SPARC64
994 sp_ptr
+= SPARC64_STACK_BIAS
;
996 #if defined(DEBUG_WIN)
997 printf("win_overflow: sp_ptr=0x" TARGET_ABI_FMT_lx
" save_cwp=%d\n",
1000 for(i
= 0; i
< 16; i
++) {
1001 /* FIXME - what to do if put_user() fails? */
1002 put_user_ual(env
->regbase
[get_reg_index(env
, cwp1
, 8 + i
)], sp_ptr
);
1003 sp_ptr
+= sizeof(abi_ulong
);
1007 static void save_window(CPUSPARCState
*env
)
1009 #ifndef TARGET_SPARC64
1010 unsigned int new_wim
;
1011 new_wim
= ((env
->wim
>> 1) | (env
->wim
<< (env
->nwindows
- 1))) &
1012 ((1LL << env
->nwindows
) - 1);
1013 save_window_offset(env
, cpu_cwp_dec(env
, env
->cwp
- 2));
1016 save_window_offset(env
, cpu_cwp_dec(env
, env
->cwp
- 2));
1022 static void restore_window(CPUSPARCState
*env
)
1024 #ifndef TARGET_SPARC64
1025 unsigned int new_wim
;
1027 unsigned int i
, cwp1
;
1030 #ifndef TARGET_SPARC64
1031 new_wim
= ((env
->wim
<< 1) | (env
->wim
>> (env
->nwindows
- 1))) &
1032 ((1LL << env
->nwindows
) - 1);
1035 /* restore the invalid window */
1036 cwp1
= cpu_cwp_inc(env
, env
->cwp
+ 1);
1037 sp_ptr
= env
->regbase
[get_reg_index(env
, cwp1
, 6)];
1038 #ifdef TARGET_SPARC64
1040 sp_ptr
+= SPARC64_STACK_BIAS
;
1042 #if defined(DEBUG_WIN)
1043 printf("win_underflow: sp_ptr=0x" TARGET_ABI_FMT_lx
" load_cwp=%d\n",
1046 for(i
= 0; i
< 16; i
++) {
1047 /* FIXME - what to do if get_user() fails? */
1048 get_user_ual(env
->regbase
[get_reg_index(env
, cwp1
, 8 + i
)], sp_ptr
);
1049 sp_ptr
+= sizeof(abi_ulong
);
1051 #ifdef TARGET_SPARC64
1053 if (env
->cleanwin
< env
->nwindows
- 1)
1061 static void flush_windows(CPUSPARCState
*env
)
1067 /* if restore would invoke restore_window(), then we can stop */
1068 cwp1
= cpu_cwp_inc(env
, env
->cwp
+ offset
);
1069 #ifndef TARGET_SPARC64
1070 if (env
->wim
& (1 << cwp1
))
1073 if (env
->canrestore
== 0)
1078 save_window_offset(env
, cwp1
);
1081 cwp1
= cpu_cwp_inc(env
, env
->cwp
+ 1);
1082 #ifndef TARGET_SPARC64
1083 /* set wim so that restore will reload the registers */
1084 env
->wim
= 1 << cwp1
;
1086 #if defined(DEBUG_WIN)
1087 printf("flush_windows: nb=%d\n", offset
- 1);
1091 void cpu_loop (CPUSPARCState
*env
)
1095 target_siginfo_t info
;
1098 trapnr
= cpu_sparc_exec (env
);
1101 #ifndef TARGET_SPARC64
1108 ret
= do_syscall (env
, env
->gregs
[1],
1109 env
->regwptr
[0], env
->regwptr
[1],
1110 env
->regwptr
[2], env
->regwptr
[3],
1111 env
->regwptr
[4], env
->regwptr
[5],
1113 if ((abi_ulong
)ret
>= (abi_ulong
)(-515)) {
1114 #if defined(TARGET_SPARC64) && !defined(TARGET_ABI32)
1115 env
->xcc
|= PSR_CARRY
;
1117 env
->psr
|= PSR_CARRY
;
1121 #if defined(TARGET_SPARC64) && !defined(TARGET_ABI32)
1122 env
->xcc
&= ~PSR_CARRY
;
1124 env
->psr
&= ~PSR_CARRY
;
1127 env
->regwptr
[0] = ret
;
1128 /* next instruction */
1130 env
->npc
= env
->npc
+ 4;
1132 case 0x83: /* flush windows */
1137 /* next instruction */
1139 env
->npc
= env
->npc
+ 4;
1141 #ifndef TARGET_SPARC64
1142 case TT_WIN_OVF
: /* window overflow */
1145 case TT_WIN_UNF
: /* window underflow */
1146 restore_window(env
);
1151 info
.si_signo
= SIGSEGV
;
1153 /* XXX: check env->error_code */
1154 info
.si_code
= TARGET_SEGV_MAPERR
;
1155 info
._sifields
._sigfault
._addr
= env
->mmuregs
[4];
1156 queue_signal(env
, info
.si_signo
, &info
);
1160 case TT_SPILL
: /* window overflow */
1163 case TT_FILL
: /* window underflow */
1164 restore_window(env
);
1169 info
.si_signo
= SIGSEGV
;
1171 /* XXX: check env->error_code */
1172 info
.si_code
= TARGET_SEGV_MAPERR
;
1173 if (trapnr
== TT_DFAULT
)
1174 info
._sifields
._sigfault
._addr
= env
->dmmuregs
[4];
1176 info
._sifields
._sigfault
._addr
= cpu_tsptr(env
)->tpc
;
1177 queue_signal(env
, info
.si_signo
, &info
);
1180 #ifndef TARGET_ABI32
1183 sparc64_get_context(env
);
1187 sparc64_set_context(env
);
1191 case EXCP_INTERRUPT
:
1192 /* just indicate that signals should be handled asap */
1196 info
.si_signo
= TARGET_SIGILL
;
1198 info
.si_code
= TARGET_ILL_ILLOPC
;
1199 info
._sifields
._sigfault
._addr
= env
->pc
;
1200 queue_signal(env
, info
.si_signo
, &info
);
1207 sig
= gdb_handlesig (env
, TARGET_SIGTRAP
);
1210 info
.si_signo
= sig
;
1212 info
.si_code
= TARGET_TRAP_BRKPT
;
1213 queue_signal(env
, info
.si_signo
, &info
);
1218 printf ("Unhandled trap: 0x%x\n", trapnr
);
1219 cpu_dump_state(env
, stderr
, fprintf
, 0);
1222 process_pending_signals (env
);
1229 static inline uint64_t cpu_ppc_get_tb (CPUState
*env
)
1235 uint64_t cpu_ppc_load_tbl (CPUState
*env
)
1237 return cpu_ppc_get_tb(env
);
1240 uint32_t cpu_ppc_load_tbu (CPUState
*env
)
1242 return cpu_ppc_get_tb(env
) >> 32;
1245 uint64_t cpu_ppc_load_atbl (CPUState
*env
)
1247 return cpu_ppc_get_tb(env
);
1250 uint32_t cpu_ppc_load_atbu (CPUState
*env
)
1252 return cpu_ppc_get_tb(env
) >> 32;
1255 uint32_t cpu_ppc601_load_rtcu (CPUState
*env
)
1256 __attribute__ (( alias ("cpu_ppc_load_tbu") ));
1258 uint32_t cpu_ppc601_load_rtcl (CPUState
*env
)
1260 return cpu_ppc_load_tbl(env
) & 0x3FFFFF80;
1263 /* XXX: to be fixed */
1264 int ppc_dcr_read (ppc_dcr_t
*dcr_env
, int dcrn
, uint32_t *valp
)
1269 int ppc_dcr_write (ppc_dcr_t
*dcr_env
, int dcrn
, uint32_t val
)
1274 #define EXCP_DUMP(env, fmt, ...) \
1276 fprintf(stderr, fmt , ## __VA_ARGS__); \
1277 cpu_dump_state(env, stderr, fprintf, 0); \
1278 qemu_log(fmt, ## __VA_ARGS__); \
1280 log_cpu_state(env, 0); \
1283 static int do_store_exclusive(CPUPPCState
*env
)
1286 target_ulong page_addr
;
1291 addr
= env
->reserve_ea
;
1292 page_addr
= addr
& TARGET_PAGE_MASK
;
1295 flags
= page_get_flags(page_addr
);
1296 if ((flags
& PAGE_READ
) == 0) {
1299 int reg
= env
->reserve_info
& 0x1f;
1300 int size
= (env
->reserve_info
>> 5) & 0xf;
1303 if (addr
== env
->reserve_addr
) {
1305 case 1: segv
= get_user_u8(val
, addr
); break;
1306 case 2: segv
= get_user_u16(val
, addr
); break;
1307 case 4: segv
= get_user_u32(val
, addr
); break;
1308 #if defined(TARGET_PPC64)
1309 case 8: segv
= get_user_u64(val
, addr
); break;
1313 if (!segv
&& val
== env
->reserve_val
) {
1314 val
= env
->gpr
[reg
];
1316 case 1: segv
= put_user_u8(val
, addr
); break;
1317 case 2: segv
= put_user_u16(val
, addr
); break;
1318 case 4: segv
= put_user_u32(val
, addr
); break;
1319 #if defined(TARGET_PPC64)
1320 case 8: segv
= put_user_u64(val
, addr
); break;
1329 env
->crf
[0] = (stored
<< 1) | xer_so
;
1330 env
->reserve_addr
= (target_ulong
)-1;
1340 void cpu_loop(CPUPPCState
*env
)
1342 target_siginfo_t info
;
1347 cpu_exec_start(env
);
1348 trapnr
= cpu_ppc_exec(env
);
1351 case POWERPC_EXCP_NONE
:
1354 case POWERPC_EXCP_CRITICAL
: /* Critical input */
1355 cpu_abort(env
, "Critical interrupt while in user mode. "
1358 case POWERPC_EXCP_MCHECK
: /* Machine check exception */
1359 cpu_abort(env
, "Machine check exception while in user mode. "
1362 case POWERPC_EXCP_DSI
: /* Data storage exception */
1363 EXCP_DUMP(env
, "Invalid data memory access: 0x" TARGET_FMT_lx
"\n",
1365 /* XXX: check this. Seems bugged */
1366 switch (env
->error_code
& 0xFF000000) {
1368 info
.si_signo
= TARGET_SIGSEGV
;
1370 info
.si_code
= TARGET_SEGV_MAPERR
;
1373 info
.si_signo
= TARGET_SIGILL
;
1375 info
.si_code
= TARGET_ILL_ILLADR
;
1378 info
.si_signo
= TARGET_SIGSEGV
;
1380 info
.si_code
= TARGET_SEGV_ACCERR
;
1383 /* Let's send a regular segfault... */
1384 EXCP_DUMP(env
, "Invalid segfault errno (%02x)\n",
1386 info
.si_signo
= TARGET_SIGSEGV
;
1388 info
.si_code
= TARGET_SEGV_MAPERR
;
1391 info
._sifields
._sigfault
._addr
= env
->nip
;
1392 queue_signal(env
, info
.si_signo
, &info
);
1394 case POWERPC_EXCP_ISI
: /* Instruction storage exception */
1395 EXCP_DUMP(env
, "Invalid instruction fetch: 0x\n" TARGET_FMT_lx
1396 "\n", env
->spr
[SPR_SRR0
]);
1397 /* XXX: check this */
1398 switch (env
->error_code
& 0xFF000000) {
1400 info
.si_signo
= TARGET_SIGSEGV
;
1402 info
.si_code
= TARGET_SEGV_MAPERR
;
1406 info
.si_signo
= TARGET_SIGSEGV
;
1408 info
.si_code
= TARGET_SEGV_ACCERR
;
1411 /* Let's send a regular segfault... */
1412 EXCP_DUMP(env
, "Invalid segfault errno (%02x)\n",
1414 info
.si_signo
= TARGET_SIGSEGV
;
1416 info
.si_code
= TARGET_SEGV_MAPERR
;
1419 info
._sifields
._sigfault
._addr
= env
->nip
- 4;
1420 queue_signal(env
, info
.si_signo
, &info
);
1422 case POWERPC_EXCP_EXTERNAL
: /* External input */
1423 cpu_abort(env
, "External interrupt while in user mode. "
1426 case POWERPC_EXCP_ALIGN
: /* Alignment exception */
1427 EXCP_DUMP(env
, "Unaligned memory access\n");
1428 /* XXX: check this */
1429 info
.si_signo
= TARGET_SIGBUS
;
1431 info
.si_code
= TARGET_BUS_ADRALN
;
1432 info
._sifields
._sigfault
._addr
= env
->nip
- 4;
1433 queue_signal(env
, info
.si_signo
, &info
);
1435 case POWERPC_EXCP_PROGRAM
: /* Program exception */
1436 /* XXX: check this */
1437 switch (env
->error_code
& ~0xF) {
1438 case POWERPC_EXCP_FP
:
1439 EXCP_DUMP(env
, "Floating point program exception\n");
1440 info
.si_signo
= TARGET_SIGFPE
;
1442 switch (env
->error_code
& 0xF) {
1443 case POWERPC_EXCP_FP_OX
:
1444 info
.si_code
= TARGET_FPE_FLTOVF
;
1446 case POWERPC_EXCP_FP_UX
:
1447 info
.si_code
= TARGET_FPE_FLTUND
;
1449 case POWERPC_EXCP_FP_ZX
:
1450 case POWERPC_EXCP_FP_VXZDZ
:
1451 info
.si_code
= TARGET_FPE_FLTDIV
;
1453 case POWERPC_EXCP_FP_XX
:
1454 info
.si_code
= TARGET_FPE_FLTRES
;
1456 case POWERPC_EXCP_FP_VXSOFT
:
1457 info
.si_code
= TARGET_FPE_FLTINV
;
1459 case POWERPC_EXCP_FP_VXSNAN
:
1460 case POWERPC_EXCP_FP_VXISI
:
1461 case POWERPC_EXCP_FP_VXIDI
:
1462 case POWERPC_EXCP_FP_VXIMZ
:
1463 case POWERPC_EXCP_FP_VXVC
:
1464 case POWERPC_EXCP_FP_VXSQRT
:
1465 case POWERPC_EXCP_FP_VXCVI
:
1466 info
.si_code
= TARGET_FPE_FLTSUB
;
1469 EXCP_DUMP(env
, "Unknown floating point exception (%02x)\n",
1474 case POWERPC_EXCP_INVAL
:
1475 EXCP_DUMP(env
, "Invalid instruction\n");
1476 info
.si_signo
= TARGET_SIGILL
;
1478 switch (env
->error_code
& 0xF) {
1479 case POWERPC_EXCP_INVAL_INVAL
:
1480 info
.si_code
= TARGET_ILL_ILLOPC
;
1482 case POWERPC_EXCP_INVAL_LSWX
:
1483 info
.si_code
= TARGET_ILL_ILLOPN
;
1485 case POWERPC_EXCP_INVAL_SPR
:
1486 info
.si_code
= TARGET_ILL_PRVREG
;
1488 case POWERPC_EXCP_INVAL_FP
:
1489 info
.si_code
= TARGET_ILL_COPROC
;
1492 EXCP_DUMP(env
, "Unknown invalid operation (%02x)\n",
1493 env
->error_code
& 0xF);
1494 info
.si_code
= TARGET_ILL_ILLADR
;
1498 case POWERPC_EXCP_PRIV
:
1499 EXCP_DUMP(env
, "Privilege violation\n");
1500 info
.si_signo
= TARGET_SIGILL
;
1502 switch (env
->error_code
& 0xF) {
1503 case POWERPC_EXCP_PRIV_OPC
:
1504 info
.si_code
= TARGET_ILL_PRVOPC
;
1506 case POWERPC_EXCP_PRIV_REG
:
1507 info
.si_code
= TARGET_ILL_PRVREG
;
1510 EXCP_DUMP(env
, "Unknown privilege violation (%02x)\n",
1511 env
->error_code
& 0xF);
1512 info
.si_code
= TARGET_ILL_PRVOPC
;
1516 case POWERPC_EXCP_TRAP
:
1517 cpu_abort(env
, "Tried to call a TRAP\n");
1520 /* Should not happen ! */
1521 cpu_abort(env
, "Unknown program exception (%02x)\n",
1525 info
._sifields
._sigfault
._addr
= env
->nip
- 4;
1526 queue_signal(env
, info
.si_signo
, &info
);
1528 case POWERPC_EXCP_FPU
: /* Floating-point unavailable exception */
1529 EXCP_DUMP(env
, "No floating point allowed\n");
1530 info
.si_signo
= TARGET_SIGILL
;
1532 info
.si_code
= TARGET_ILL_COPROC
;
1533 info
._sifields
._sigfault
._addr
= env
->nip
- 4;
1534 queue_signal(env
, info
.si_signo
, &info
);
1536 case POWERPC_EXCP_SYSCALL
: /* System call exception */
1537 cpu_abort(env
, "Syscall exception while in user mode. "
1540 case POWERPC_EXCP_APU
: /* Auxiliary processor unavailable */
1541 EXCP_DUMP(env
, "No APU instruction allowed\n");
1542 info
.si_signo
= TARGET_SIGILL
;
1544 info
.si_code
= TARGET_ILL_COPROC
;
1545 info
._sifields
._sigfault
._addr
= env
->nip
- 4;
1546 queue_signal(env
, info
.si_signo
, &info
);
1548 case POWERPC_EXCP_DECR
: /* Decrementer exception */
1549 cpu_abort(env
, "Decrementer interrupt while in user mode. "
1552 case POWERPC_EXCP_FIT
: /* Fixed-interval timer interrupt */
1553 cpu_abort(env
, "Fix interval timer interrupt while in user mode. "
1556 case POWERPC_EXCP_WDT
: /* Watchdog timer interrupt */
1557 cpu_abort(env
, "Watchdog timer interrupt while in user mode. "
1560 case POWERPC_EXCP_DTLB
: /* Data TLB error */
1561 cpu_abort(env
, "Data TLB exception while in user mode. "
1564 case POWERPC_EXCP_ITLB
: /* Instruction TLB error */
1565 cpu_abort(env
, "Instruction TLB exception while in user mode. "
1568 case POWERPC_EXCP_SPEU
: /* SPE/embedded floating-point unavail. */
1569 EXCP_DUMP(env
, "No SPE/floating-point instruction allowed\n");
1570 info
.si_signo
= TARGET_SIGILL
;
1572 info
.si_code
= TARGET_ILL_COPROC
;
1573 info
._sifields
._sigfault
._addr
= env
->nip
- 4;
1574 queue_signal(env
, info
.si_signo
, &info
);
1576 case POWERPC_EXCP_EFPDI
: /* Embedded floating-point data IRQ */
1577 cpu_abort(env
, "Embedded floating-point data IRQ not handled\n");
1579 case POWERPC_EXCP_EFPRI
: /* Embedded floating-point round IRQ */
1580 cpu_abort(env
, "Embedded floating-point round IRQ not handled\n");
1582 case POWERPC_EXCP_EPERFM
: /* Embedded performance monitor IRQ */
1583 cpu_abort(env
, "Performance monitor exception not handled\n");
1585 case POWERPC_EXCP_DOORI
: /* Embedded doorbell interrupt */
1586 cpu_abort(env
, "Doorbell interrupt while in user mode. "
1589 case POWERPC_EXCP_DOORCI
: /* Embedded doorbell critical interrupt */
1590 cpu_abort(env
, "Doorbell critical interrupt while in user mode. "
1593 case POWERPC_EXCP_RESET
: /* System reset exception */
1594 cpu_abort(env
, "Reset interrupt while in user mode. "
1597 case POWERPC_EXCP_DSEG
: /* Data segment exception */
1598 cpu_abort(env
, "Data segment exception while in user mode. "
1601 case POWERPC_EXCP_ISEG
: /* Instruction segment exception */
1602 cpu_abort(env
, "Instruction segment exception "
1603 "while in user mode. Aborting\n");
1605 /* PowerPC 64 with hypervisor mode support */
1606 case POWERPC_EXCP_HDECR
: /* Hypervisor decrementer exception */
1607 cpu_abort(env
, "Hypervisor decrementer interrupt "
1608 "while in user mode. Aborting\n");
1610 case POWERPC_EXCP_TRACE
: /* Trace exception */
1612 * we use this exception to emulate step-by-step execution mode.
1615 /* PowerPC 64 with hypervisor mode support */
1616 case POWERPC_EXCP_HDSI
: /* Hypervisor data storage exception */
1617 cpu_abort(env
, "Hypervisor data storage exception "
1618 "while in user mode. Aborting\n");
1620 case POWERPC_EXCP_HISI
: /* Hypervisor instruction storage excp */
1621 cpu_abort(env
, "Hypervisor instruction storage exception "
1622 "while in user mode. Aborting\n");
1624 case POWERPC_EXCP_HDSEG
: /* Hypervisor data segment exception */
1625 cpu_abort(env
, "Hypervisor data segment exception "
1626 "while in user mode. Aborting\n");
1628 case POWERPC_EXCP_HISEG
: /* Hypervisor instruction segment excp */
1629 cpu_abort(env
, "Hypervisor instruction segment exception "
1630 "while in user mode. Aborting\n");
1632 case POWERPC_EXCP_VPU
: /* Vector unavailable exception */
1633 EXCP_DUMP(env
, "No Altivec instructions allowed\n");
1634 info
.si_signo
= TARGET_SIGILL
;
1636 info
.si_code
= TARGET_ILL_COPROC
;
1637 info
._sifields
._sigfault
._addr
= env
->nip
- 4;
1638 queue_signal(env
, info
.si_signo
, &info
);
1640 case POWERPC_EXCP_PIT
: /* Programmable interval timer IRQ */
1641 cpu_abort(env
, "Programable interval timer interrupt "
1642 "while in user mode. Aborting\n");
1644 case POWERPC_EXCP_IO
: /* IO error exception */
1645 cpu_abort(env
, "IO error exception while in user mode. "
1648 case POWERPC_EXCP_RUNM
: /* Run mode exception */
1649 cpu_abort(env
, "Run mode exception while in user mode. "
1652 case POWERPC_EXCP_EMUL
: /* Emulation trap exception */
1653 cpu_abort(env
, "Emulation trap exception not handled\n");
1655 case POWERPC_EXCP_IFTLB
: /* Instruction fetch TLB error */
1656 cpu_abort(env
, "Instruction fetch TLB exception "
1657 "while in user-mode. Aborting");
1659 case POWERPC_EXCP_DLTLB
: /* Data load TLB miss */
1660 cpu_abort(env
, "Data load TLB exception while in user-mode. "
1663 case POWERPC_EXCP_DSTLB
: /* Data store TLB miss */
1664 cpu_abort(env
, "Data store TLB exception while in user-mode. "
1667 case POWERPC_EXCP_FPA
: /* Floating-point assist exception */
1668 cpu_abort(env
, "Floating-point assist exception not handled\n");
1670 case POWERPC_EXCP_IABR
: /* Instruction address breakpoint */
1671 cpu_abort(env
, "Instruction address breakpoint exception "
1674 case POWERPC_EXCP_SMI
: /* System management interrupt */
1675 cpu_abort(env
, "System management interrupt while in user mode. "
1678 case POWERPC_EXCP_THERM
: /* Thermal interrupt */
1679 cpu_abort(env
, "Thermal interrupt interrupt while in user mode. "
1682 case POWERPC_EXCP_PERFM
: /* Embedded performance monitor IRQ */
1683 cpu_abort(env
, "Performance monitor exception not handled\n");
1685 case POWERPC_EXCP_VPUA
: /* Vector assist exception */
1686 cpu_abort(env
, "Vector assist exception not handled\n");
1688 case POWERPC_EXCP_SOFTP
: /* Soft patch exception */
1689 cpu_abort(env
, "Soft patch exception not handled\n");
1691 case POWERPC_EXCP_MAINT
: /* Maintenance exception */
1692 cpu_abort(env
, "Maintenance exception while in user mode. "
1695 case POWERPC_EXCP_STOP
: /* stop translation */
1696 /* We did invalidate the instruction cache. Go on */
1698 case POWERPC_EXCP_BRANCH
: /* branch instruction: */
1699 /* We just stopped because of a branch. Go on */
1701 case POWERPC_EXCP_SYSCALL_USER
:
1702 /* system call in user-mode emulation */
1704 * PPC ABI uses overflow flag in cr0 to signal an error
1708 printf("syscall %d 0x%08x 0x%08x 0x%08x 0x%08x\n", env
->gpr
[0],
1709 env
->gpr
[3], env
->gpr
[4], env
->gpr
[5], env
->gpr
[6]);
1711 env
->crf
[0] &= ~0x1;
1712 ret
= do_syscall(env
, env
->gpr
[0], env
->gpr
[3], env
->gpr
[4],
1713 env
->gpr
[5], env
->gpr
[6], env
->gpr
[7],
1715 if (ret
== (uint32_t)(-TARGET_QEMU_ESIGRETURN
)) {
1716 /* Returning from a successful sigreturn syscall.
1717 Avoid corrupting register state. */
1720 if (ret
> (uint32_t)(-515)) {
1726 printf("syscall returned 0x%08x (%d)\n", ret
, ret
);
1729 case POWERPC_EXCP_STCX
:
1730 if (do_store_exclusive(env
)) {
1731 info
.si_signo
= TARGET_SIGSEGV
;
1733 info
.si_code
= TARGET_SEGV_MAPERR
;
1734 info
._sifields
._sigfault
._addr
= env
->nip
;
1735 queue_signal(env
, info
.si_signo
, &info
);
1742 sig
= gdb_handlesig(env
, TARGET_SIGTRAP
);
1744 info
.si_signo
= sig
;
1746 info
.si_code
= TARGET_TRAP_BRKPT
;
1747 queue_signal(env
, info
.si_signo
, &info
);
1751 case EXCP_INTERRUPT
:
1752 /* just indicate that signals should be handled asap */
1755 cpu_abort(env
, "Unknown exception 0x%d. Aborting\n", trapnr
);
1758 process_pending_signals(env
);
1765 #define MIPS_SYS(name, args) args,
1767 static const uint8_t mips_syscall_args
[] = {
1768 MIPS_SYS(sys_syscall
, 8) /* 4000 */
1769 MIPS_SYS(sys_exit
, 1)
1770 MIPS_SYS(sys_fork
, 0)
1771 MIPS_SYS(sys_read
, 3)
1772 MIPS_SYS(sys_write
, 3)
1773 MIPS_SYS(sys_open
, 3) /* 4005 */
1774 MIPS_SYS(sys_close
, 1)
1775 MIPS_SYS(sys_waitpid
, 3)
1776 MIPS_SYS(sys_creat
, 2)
1777 MIPS_SYS(sys_link
, 2)
1778 MIPS_SYS(sys_unlink
, 1) /* 4010 */
1779 MIPS_SYS(sys_execve
, 0)
1780 MIPS_SYS(sys_chdir
, 1)
1781 MIPS_SYS(sys_time
, 1)
1782 MIPS_SYS(sys_mknod
, 3)
1783 MIPS_SYS(sys_chmod
, 2) /* 4015 */
1784 MIPS_SYS(sys_lchown
, 3)
1785 MIPS_SYS(sys_ni_syscall
, 0)
1786 MIPS_SYS(sys_ni_syscall
, 0) /* was sys_stat */
1787 MIPS_SYS(sys_lseek
, 3)
1788 MIPS_SYS(sys_getpid
, 0) /* 4020 */
1789 MIPS_SYS(sys_mount
, 5)
1790 MIPS_SYS(sys_oldumount
, 1)
1791 MIPS_SYS(sys_setuid
, 1)
1792 MIPS_SYS(sys_getuid
, 0)
1793 MIPS_SYS(sys_stime
, 1) /* 4025 */
1794 MIPS_SYS(sys_ptrace
, 4)
1795 MIPS_SYS(sys_alarm
, 1)
1796 MIPS_SYS(sys_ni_syscall
, 0) /* was sys_fstat */
1797 MIPS_SYS(sys_pause
, 0)
1798 MIPS_SYS(sys_utime
, 2) /* 4030 */
1799 MIPS_SYS(sys_ni_syscall
, 0)
1800 MIPS_SYS(sys_ni_syscall
, 0)
1801 MIPS_SYS(sys_access
, 2)
1802 MIPS_SYS(sys_nice
, 1)
1803 MIPS_SYS(sys_ni_syscall
, 0) /* 4035 */
1804 MIPS_SYS(sys_sync
, 0)
1805 MIPS_SYS(sys_kill
, 2)
1806 MIPS_SYS(sys_rename
, 2)
1807 MIPS_SYS(sys_mkdir
, 2)
1808 MIPS_SYS(sys_rmdir
, 1) /* 4040 */
1809 MIPS_SYS(sys_dup
, 1)
1810 MIPS_SYS(sys_pipe
, 0)
1811 MIPS_SYS(sys_times
, 1)
1812 MIPS_SYS(sys_ni_syscall
, 0)
1813 MIPS_SYS(sys_brk
, 1) /* 4045 */
1814 MIPS_SYS(sys_setgid
, 1)
1815 MIPS_SYS(sys_getgid
, 0)
1816 MIPS_SYS(sys_ni_syscall
, 0) /* was signal(2) */
1817 MIPS_SYS(sys_geteuid
, 0)
1818 MIPS_SYS(sys_getegid
, 0) /* 4050 */
1819 MIPS_SYS(sys_acct
, 0)
1820 MIPS_SYS(sys_umount
, 2)
1821 MIPS_SYS(sys_ni_syscall
, 0)
1822 MIPS_SYS(sys_ioctl
, 3)
1823 MIPS_SYS(sys_fcntl
, 3) /* 4055 */
1824 MIPS_SYS(sys_ni_syscall
, 2)
1825 MIPS_SYS(sys_setpgid
, 2)
1826 MIPS_SYS(sys_ni_syscall
, 0)
1827 MIPS_SYS(sys_olduname
, 1)
1828 MIPS_SYS(sys_umask
, 1) /* 4060 */
1829 MIPS_SYS(sys_chroot
, 1)
1830 MIPS_SYS(sys_ustat
, 2)
1831 MIPS_SYS(sys_dup2
, 2)
1832 MIPS_SYS(sys_getppid
, 0)
1833 MIPS_SYS(sys_getpgrp
, 0) /* 4065 */
1834 MIPS_SYS(sys_setsid
, 0)
1835 MIPS_SYS(sys_sigaction
, 3)
1836 MIPS_SYS(sys_sgetmask
, 0)
1837 MIPS_SYS(sys_ssetmask
, 1)
1838 MIPS_SYS(sys_setreuid
, 2) /* 4070 */
1839 MIPS_SYS(sys_setregid
, 2)
1840 MIPS_SYS(sys_sigsuspend
, 0)
1841 MIPS_SYS(sys_sigpending
, 1)
1842 MIPS_SYS(sys_sethostname
, 2)
1843 MIPS_SYS(sys_setrlimit
, 2) /* 4075 */
1844 MIPS_SYS(sys_getrlimit
, 2)
1845 MIPS_SYS(sys_getrusage
, 2)
1846 MIPS_SYS(sys_gettimeofday
, 2)
1847 MIPS_SYS(sys_settimeofday
, 2)
1848 MIPS_SYS(sys_getgroups
, 2) /* 4080 */
1849 MIPS_SYS(sys_setgroups
, 2)
1850 MIPS_SYS(sys_ni_syscall
, 0) /* old_select */
1851 MIPS_SYS(sys_symlink
, 2)
1852 MIPS_SYS(sys_ni_syscall
, 0) /* was sys_lstat */
1853 MIPS_SYS(sys_readlink
, 3) /* 4085 */
1854 MIPS_SYS(sys_uselib
, 1)
1855 MIPS_SYS(sys_swapon
, 2)
1856 MIPS_SYS(sys_reboot
, 3)
1857 MIPS_SYS(old_readdir
, 3)
1858 MIPS_SYS(old_mmap
, 6) /* 4090 */
1859 MIPS_SYS(sys_munmap
, 2)
1860 MIPS_SYS(sys_truncate
, 2)
1861 MIPS_SYS(sys_ftruncate
, 2)
1862 MIPS_SYS(sys_fchmod
, 2)
1863 MIPS_SYS(sys_fchown
, 3) /* 4095 */
1864 MIPS_SYS(sys_getpriority
, 2)
1865 MIPS_SYS(sys_setpriority
, 3)
1866 MIPS_SYS(sys_ni_syscall
, 0)
1867 MIPS_SYS(sys_statfs
, 2)
1868 MIPS_SYS(sys_fstatfs
, 2) /* 4100 */
1869 MIPS_SYS(sys_ni_syscall
, 0) /* was ioperm(2) */
1870 MIPS_SYS(sys_socketcall
, 2)
1871 MIPS_SYS(sys_syslog
, 3)
1872 MIPS_SYS(sys_setitimer
, 3)
1873 MIPS_SYS(sys_getitimer
, 2) /* 4105 */
1874 MIPS_SYS(sys_newstat
, 2)
1875 MIPS_SYS(sys_newlstat
, 2)
1876 MIPS_SYS(sys_newfstat
, 2)
1877 MIPS_SYS(sys_uname
, 1)
1878 MIPS_SYS(sys_ni_syscall
, 0) /* 4110 was iopl(2) */
1879 MIPS_SYS(sys_vhangup
, 0)
1880 MIPS_SYS(sys_ni_syscall
, 0) /* was sys_idle() */
1881 MIPS_SYS(sys_ni_syscall
, 0) /* was sys_vm86 */
1882 MIPS_SYS(sys_wait4
, 4)
1883 MIPS_SYS(sys_swapoff
, 1) /* 4115 */
1884 MIPS_SYS(sys_sysinfo
, 1)
1885 MIPS_SYS(sys_ipc
, 6)
1886 MIPS_SYS(sys_fsync
, 1)
1887 MIPS_SYS(sys_sigreturn
, 0)
1888 MIPS_SYS(sys_clone
, 6) /* 4120 */
1889 MIPS_SYS(sys_setdomainname
, 2)
1890 MIPS_SYS(sys_newuname
, 1)
1891 MIPS_SYS(sys_ni_syscall
, 0) /* sys_modify_ldt */
1892 MIPS_SYS(sys_adjtimex
, 1)
1893 MIPS_SYS(sys_mprotect
, 3) /* 4125 */
1894 MIPS_SYS(sys_sigprocmask
, 3)
1895 MIPS_SYS(sys_ni_syscall
, 0) /* was create_module */
1896 MIPS_SYS(sys_init_module
, 5)
1897 MIPS_SYS(sys_delete_module
, 1)
1898 MIPS_SYS(sys_ni_syscall
, 0) /* 4130 was get_kernel_syms */
1899 MIPS_SYS(sys_quotactl
, 0)
1900 MIPS_SYS(sys_getpgid
, 1)
1901 MIPS_SYS(sys_fchdir
, 1)
1902 MIPS_SYS(sys_bdflush
, 2)
1903 MIPS_SYS(sys_sysfs
, 3) /* 4135 */
1904 MIPS_SYS(sys_personality
, 1)
1905 MIPS_SYS(sys_ni_syscall
, 0) /* for afs_syscall */
1906 MIPS_SYS(sys_setfsuid
, 1)
1907 MIPS_SYS(sys_setfsgid
, 1)
1908 MIPS_SYS(sys_llseek
, 5) /* 4140 */
1909 MIPS_SYS(sys_getdents
, 3)
1910 MIPS_SYS(sys_select
, 5)
1911 MIPS_SYS(sys_flock
, 2)
1912 MIPS_SYS(sys_msync
, 3)
1913 MIPS_SYS(sys_readv
, 3) /* 4145 */
1914 MIPS_SYS(sys_writev
, 3)
1915 MIPS_SYS(sys_cacheflush
, 3)
1916 MIPS_SYS(sys_cachectl
, 3)
1917 MIPS_SYS(sys_sysmips
, 4)
1918 MIPS_SYS(sys_ni_syscall
, 0) /* 4150 */
1919 MIPS_SYS(sys_getsid
, 1)
1920 MIPS_SYS(sys_fdatasync
, 0)
1921 MIPS_SYS(sys_sysctl
, 1)
1922 MIPS_SYS(sys_mlock
, 2)
1923 MIPS_SYS(sys_munlock
, 2) /* 4155 */
1924 MIPS_SYS(sys_mlockall
, 1)
1925 MIPS_SYS(sys_munlockall
, 0)
1926 MIPS_SYS(sys_sched_setparam
, 2)
1927 MIPS_SYS(sys_sched_getparam
, 2)
1928 MIPS_SYS(sys_sched_setscheduler
, 3) /* 4160 */
1929 MIPS_SYS(sys_sched_getscheduler
, 1)
1930 MIPS_SYS(sys_sched_yield
, 0)
1931 MIPS_SYS(sys_sched_get_priority_max
, 1)
1932 MIPS_SYS(sys_sched_get_priority_min
, 1)
1933 MIPS_SYS(sys_sched_rr_get_interval
, 2) /* 4165 */
1934 MIPS_SYS(sys_nanosleep
, 2)
1935 MIPS_SYS(sys_mremap
, 4)
1936 MIPS_SYS(sys_accept
, 3)
1937 MIPS_SYS(sys_bind
, 3)
1938 MIPS_SYS(sys_connect
, 3) /* 4170 */
1939 MIPS_SYS(sys_getpeername
, 3)
1940 MIPS_SYS(sys_getsockname
, 3)
1941 MIPS_SYS(sys_getsockopt
, 5)
1942 MIPS_SYS(sys_listen
, 2)
1943 MIPS_SYS(sys_recv
, 4) /* 4175 */
1944 MIPS_SYS(sys_recvfrom
, 6)
1945 MIPS_SYS(sys_recvmsg
, 3)
1946 MIPS_SYS(sys_send
, 4)
1947 MIPS_SYS(sys_sendmsg
, 3)
1948 MIPS_SYS(sys_sendto
, 6) /* 4180 */
1949 MIPS_SYS(sys_setsockopt
, 5)
1950 MIPS_SYS(sys_shutdown
, 2)
1951 MIPS_SYS(sys_socket
, 3)
1952 MIPS_SYS(sys_socketpair
, 4)
1953 MIPS_SYS(sys_setresuid
, 3) /* 4185 */
1954 MIPS_SYS(sys_getresuid
, 3)
1955 MIPS_SYS(sys_ni_syscall
, 0) /* was sys_query_module */
1956 MIPS_SYS(sys_poll
, 3)
1957 MIPS_SYS(sys_nfsservctl
, 3)
1958 MIPS_SYS(sys_setresgid
, 3) /* 4190 */
1959 MIPS_SYS(sys_getresgid
, 3)
1960 MIPS_SYS(sys_prctl
, 5)
1961 MIPS_SYS(sys_rt_sigreturn
, 0)
1962 MIPS_SYS(sys_rt_sigaction
, 4)
1963 MIPS_SYS(sys_rt_sigprocmask
, 4) /* 4195 */
1964 MIPS_SYS(sys_rt_sigpending
, 2)
1965 MIPS_SYS(sys_rt_sigtimedwait
, 4)
1966 MIPS_SYS(sys_rt_sigqueueinfo
, 3)
1967 MIPS_SYS(sys_rt_sigsuspend
, 0)
1968 MIPS_SYS(sys_pread64
, 6) /* 4200 */
1969 MIPS_SYS(sys_pwrite64
, 6)
1970 MIPS_SYS(sys_chown
, 3)
1971 MIPS_SYS(sys_getcwd
, 2)
1972 MIPS_SYS(sys_capget
, 2)
1973 MIPS_SYS(sys_capset
, 2) /* 4205 */
1974 MIPS_SYS(sys_sigaltstack
, 2)
1975 MIPS_SYS(sys_sendfile
, 4)
1976 MIPS_SYS(sys_ni_syscall
, 0)
1977 MIPS_SYS(sys_ni_syscall
, 0)
1978 MIPS_SYS(sys_mmap2
, 6) /* 4210 */
1979 MIPS_SYS(sys_truncate64
, 4)
1980 MIPS_SYS(sys_ftruncate64
, 4)
1981 MIPS_SYS(sys_stat64
, 2)
1982 MIPS_SYS(sys_lstat64
, 2)
1983 MIPS_SYS(sys_fstat64
, 2) /* 4215 */
1984 MIPS_SYS(sys_pivot_root
, 2)
1985 MIPS_SYS(sys_mincore
, 3)
1986 MIPS_SYS(sys_madvise
, 3)
1987 MIPS_SYS(sys_getdents64
, 3)
1988 MIPS_SYS(sys_fcntl64
, 3) /* 4220 */
1989 MIPS_SYS(sys_ni_syscall
, 0)
1990 MIPS_SYS(sys_gettid
, 0)
1991 MIPS_SYS(sys_readahead
, 5)
1992 MIPS_SYS(sys_setxattr
, 5)
1993 MIPS_SYS(sys_lsetxattr
, 5) /* 4225 */
1994 MIPS_SYS(sys_fsetxattr
, 5)
1995 MIPS_SYS(sys_getxattr
, 4)
1996 MIPS_SYS(sys_lgetxattr
, 4)
1997 MIPS_SYS(sys_fgetxattr
, 4)
1998 MIPS_SYS(sys_listxattr
, 3) /* 4230 */
1999 MIPS_SYS(sys_llistxattr
, 3)
2000 MIPS_SYS(sys_flistxattr
, 3)
2001 MIPS_SYS(sys_removexattr
, 2)
2002 MIPS_SYS(sys_lremovexattr
, 2)
2003 MIPS_SYS(sys_fremovexattr
, 2) /* 4235 */
2004 MIPS_SYS(sys_tkill
, 2)
2005 MIPS_SYS(sys_sendfile64
, 5)
2006 MIPS_SYS(sys_futex
, 2)
2007 MIPS_SYS(sys_sched_setaffinity
, 3)
2008 MIPS_SYS(sys_sched_getaffinity
, 3) /* 4240 */
2009 MIPS_SYS(sys_io_setup
, 2)
2010 MIPS_SYS(sys_io_destroy
, 1)
2011 MIPS_SYS(sys_io_getevents
, 5)
2012 MIPS_SYS(sys_io_submit
, 3)
2013 MIPS_SYS(sys_io_cancel
, 3) /* 4245 */
2014 MIPS_SYS(sys_exit_group
, 1)
2015 MIPS_SYS(sys_lookup_dcookie
, 3)
2016 MIPS_SYS(sys_epoll_create
, 1)
2017 MIPS_SYS(sys_epoll_ctl
, 4)
2018 MIPS_SYS(sys_epoll_wait
, 3) /* 4250 */
2019 MIPS_SYS(sys_remap_file_pages
, 5)
2020 MIPS_SYS(sys_set_tid_address
, 1)
2021 MIPS_SYS(sys_restart_syscall
, 0)
2022 MIPS_SYS(sys_fadvise64_64
, 7)
2023 MIPS_SYS(sys_statfs64
, 3) /* 4255 */
2024 MIPS_SYS(sys_fstatfs64
, 2)
2025 MIPS_SYS(sys_timer_create
, 3)
2026 MIPS_SYS(sys_timer_settime
, 4)
2027 MIPS_SYS(sys_timer_gettime
, 2)
2028 MIPS_SYS(sys_timer_getoverrun
, 1) /* 4260 */
2029 MIPS_SYS(sys_timer_delete
, 1)
2030 MIPS_SYS(sys_clock_settime
, 2)
2031 MIPS_SYS(sys_clock_gettime
, 2)
2032 MIPS_SYS(sys_clock_getres
, 2)
2033 MIPS_SYS(sys_clock_nanosleep
, 4) /* 4265 */
2034 MIPS_SYS(sys_tgkill
, 3)
2035 MIPS_SYS(sys_utimes
, 2)
2036 MIPS_SYS(sys_mbind
, 4)
2037 MIPS_SYS(sys_ni_syscall
, 0) /* sys_get_mempolicy */
2038 MIPS_SYS(sys_ni_syscall
, 0) /* 4270 sys_set_mempolicy */
2039 MIPS_SYS(sys_mq_open
, 4)
2040 MIPS_SYS(sys_mq_unlink
, 1)
2041 MIPS_SYS(sys_mq_timedsend
, 5)
2042 MIPS_SYS(sys_mq_timedreceive
, 5)
2043 MIPS_SYS(sys_mq_notify
, 2) /* 4275 */
2044 MIPS_SYS(sys_mq_getsetattr
, 3)
2045 MIPS_SYS(sys_ni_syscall
, 0) /* sys_vserver */
2046 MIPS_SYS(sys_waitid
, 4)
2047 MIPS_SYS(sys_ni_syscall
, 0) /* available, was setaltroot */
2048 MIPS_SYS(sys_add_key
, 5)
2049 MIPS_SYS(sys_request_key
, 4)
2050 MIPS_SYS(sys_keyctl
, 5)
2051 MIPS_SYS(sys_set_thread_area
, 1)
2052 MIPS_SYS(sys_inotify_init
, 0)
2053 MIPS_SYS(sys_inotify_add_watch
, 3) /* 4285 */
2054 MIPS_SYS(sys_inotify_rm_watch
, 2)
2055 MIPS_SYS(sys_migrate_pages
, 4)
2056 MIPS_SYS(sys_openat
, 4)
2057 MIPS_SYS(sys_mkdirat
, 3)
2058 MIPS_SYS(sys_mknodat
, 4) /* 4290 */
2059 MIPS_SYS(sys_fchownat
, 5)
2060 MIPS_SYS(sys_futimesat
, 3)
2061 MIPS_SYS(sys_fstatat64
, 4)
2062 MIPS_SYS(sys_unlinkat
, 3)
2063 MIPS_SYS(sys_renameat
, 4) /* 4295 */
2064 MIPS_SYS(sys_linkat
, 5)
2065 MIPS_SYS(sys_symlinkat
, 3)
2066 MIPS_SYS(sys_readlinkat
, 4)
2067 MIPS_SYS(sys_fchmodat
, 3)
2068 MIPS_SYS(sys_faccessat
, 3) /* 4300 */
2069 MIPS_SYS(sys_pselect6
, 6)
2070 MIPS_SYS(sys_ppoll
, 5)
2071 MIPS_SYS(sys_unshare
, 1)
2072 MIPS_SYS(sys_splice
, 4)
2073 MIPS_SYS(sys_sync_file_range
, 7) /* 4305 */
2074 MIPS_SYS(sys_tee
, 4)
2075 MIPS_SYS(sys_vmsplice
, 4)
2076 MIPS_SYS(sys_move_pages
, 6)
2077 MIPS_SYS(sys_set_robust_list
, 2)
2078 MIPS_SYS(sys_get_robust_list
, 3) /* 4310 */
2079 MIPS_SYS(sys_kexec_load
, 4)
2080 MIPS_SYS(sys_getcpu
, 3)
2081 MIPS_SYS(sys_epoll_pwait
, 6)
2082 MIPS_SYS(sys_ioprio_set
, 3)
2083 MIPS_SYS(sys_ioprio_get
, 2)
2084 MIPS_SYS(sys_utimensat
, 4)
2085 MIPS_SYS(sys_signalfd
, 3)
2086 MIPS_SYS(sys_ni_syscall
, 0) /* was timerfd */
2087 MIPS_SYS(sys_eventfd
, 1)
2088 MIPS_SYS(sys_fallocate
, 6) /* 4320 */
2089 MIPS_SYS(sys_timerfd_create
, 2)
2090 MIPS_SYS(sys_timerfd_gettime
, 2)
2091 MIPS_SYS(sys_timerfd_settime
, 4)
2092 MIPS_SYS(sys_signalfd4
, 4)
2093 MIPS_SYS(sys_eventfd2
, 2) /* 4325 */
2094 MIPS_SYS(sys_epoll_create1
, 1)
2095 MIPS_SYS(sys_dup3
, 3)
2096 MIPS_SYS(sys_pipe2
, 2)
2097 MIPS_SYS(sys_inotify_init1
, 1)
2098 MIPS_SYS(sys_preadv
, 6) /* 4330 */
2099 MIPS_SYS(sys_pwritev
, 6)
2100 MIPS_SYS(sys_rt_tgsigqueueinfo
, 4)
2101 MIPS_SYS(sys_perf_event_open
, 5)
2102 MIPS_SYS(sys_accept4
, 4)
2103 MIPS_SYS(sys_recvmmsg
, 5) /* 4335 */
2104 MIPS_SYS(sys_fanotify_init
, 2)
2105 MIPS_SYS(sys_fanotify_mark
, 6)
2106 MIPS_SYS(sys_prlimit64
, 4)
2107 MIPS_SYS(sys_name_to_handle_at
, 5)
2108 MIPS_SYS(sys_open_by_handle_at
, 3) /* 4340 */
2109 MIPS_SYS(sys_clock_adjtime
, 2)
2110 MIPS_SYS(sys_syncfs
, 1)
2115 static int do_store_exclusive(CPUMIPSState
*env
)
2118 target_ulong page_addr
;
2126 page_addr
= addr
& TARGET_PAGE_MASK
;
2129 flags
= page_get_flags(page_addr
);
2130 if ((flags
& PAGE_READ
) == 0) {
2133 reg
= env
->llreg
& 0x1f;
2134 d
= (env
->llreg
& 0x20) != 0;
2136 segv
= get_user_s64(val
, addr
);
2138 segv
= get_user_s32(val
, addr
);
2141 if (val
!= env
->llval
) {
2142 env
->active_tc
.gpr
[reg
] = 0;
2145 segv
= put_user_u64(env
->llnewval
, addr
);
2147 segv
= put_user_u32(env
->llnewval
, addr
);
2150 env
->active_tc
.gpr
[reg
] = 1;
2157 env
->active_tc
.PC
+= 4;
2164 void cpu_loop(CPUMIPSState
*env
)
2166 target_siginfo_t info
;
2168 unsigned int syscall_num
;
2171 cpu_exec_start(env
);
2172 trapnr
= cpu_mips_exec(env
);
2176 syscall_num
= env
->active_tc
.gpr
[2] - 4000;
2177 env
->active_tc
.PC
+= 4;
2178 if (syscall_num
>= sizeof(mips_syscall_args
)) {
2179 ret
= -TARGET_ENOSYS
;
2183 abi_ulong arg5
= 0, arg6
= 0, arg7
= 0, arg8
= 0;
2185 nb_args
= mips_syscall_args
[syscall_num
];
2186 sp_reg
= env
->active_tc
.gpr
[29];
2188 /* these arguments are taken from the stack */
2190 if ((ret
= get_user_ual(arg8
, sp_reg
+ 28)) != 0) {
2194 if ((ret
= get_user_ual(arg7
, sp_reg
+ 24)) != 0) {
2198 if ((ret
= get_user_ual(arg6
, sp_reg
+ 20)) != 0) {
2202 if ((ret
= get_user_ual(arg5
, sp_reg
+ 16)) != 0) {
2208 ret
= do_syscall(env
, env
->active_tc
.gpr
[2],
2209 env
->active_tc
.gpr
[4],
2210 env
->active_tc
.gpr
[5],
2211 env
->active_tc
.gpr
[6],
2212 env
->active_tc
.gpr
[7],
2213 arg5
, arg6
, arg7
, arg8
);
2216 if (ret
== -TARGET_QEMU_ESIGRETURN
) {
2217 /* Returning from a successful sigreturn syscall.
2218 Avoid clobbering register state. */
2221 if ((unsigned int)ret
>= (unsigned int)(-1133)) {
2222 env
->active_tc
.gpr
[7] = 1; /* error flag */
2225 env
->active_tc
.gpr
[7] = 0; /* error flag */
2227 env
->active_tc
.gpr
[2] = ret
;
2233 info
.si_signo
= TARGET_SIGSEGV
;
2235 /* XXX: check env->error_code */
2236 info
.si_code
= TARGET_SEGV_MAPERR
;
2237 info
._sifields
._sigfault
._addr
= env
->CP0_BadVAddr
;
2238 queue_signal(env
, info
.si_signo
, &info
);
2242 info
.si_signo
= TARGET_SIGILL
;
2245 queue_signal(env
, info
.si_signo
, &info
);
2247 case EXCP_INTERRUPT
:
2248 /* just indicate that signals should be handled asap */
2254 sig
= gdb_handlesig (env
, TARGET_SIGTRAP
);
2257 info
.si_signo
= sig
;
2259 info
.si_code
= TARGET_TRAP_BRKPT
;
2260 queue_signal(env
, info
.si_signo
, &info
);
2265 if (do_store_exclusive(env
)) {
2266 info
.si_signo
= TARGET_SIGSEGV
;
2268 info
.si_code
= TARGET_SEGV_MAPERR
;
2269 info
._sifields
._sigfault
._addr
= env
->active_tc
.PC
;
2270 queue_signal(env
, info
.si_signo
, &info
);
2275 fprintf(stderr
, "qemu: unhandled CPU exception 0x%x - aborting\n",
2277 cpu_dump_state(env
, stderr
, fprintf
, 0);
2280 process_pending_signals(env
);
2286 void cpu_loop (CPUState
*env
)
2289 target_siginfo_t info
;
2292 trapnr
= cpu_sh4_exec (env
);
2297 ret
= do_syscall(env
,
2306 env
->gregs
[0] = ret
;
2308 case EXCP_INTERRUPT
:
2309 /* just indicate that signals should be handled asap */
2315 sig
= gdb_handlesig (env
, TARGET_SIGTRAP
);
2318 info
.si_signo
= sig
;
2320 info
.si_code
= TARGET_TRAP_BRKPT
;
2321 queue_signal(env
, info
.si_signo
, &info
);
2327 info
.si_signo
= SIGSEGV
;
2329 info
.si_code
= TARGET_SEGV_MAPERR
;
2330 info
._sifields
._sigfault
._addr
= env
->tea
;
2331 queue_signal(env
, info
.si_signo
, &info
);
2335 printf ("Unhandled trap: 0x%x\n", trapnr
);
2336 cpu_dump_state(env
, stderr
, fprintf
, 0);
2339 process_pending_signals (env
);
2345 void cpu_loop (CPUState
*env
)
2348 target_siginfo_t info
;
2351 trapnr
= cpu_cris_exec (env
);
2355 info
.si_signo
= SIGSEGV
;
2357 /* XXX: check env->error_code */
2358 info
.si_code
= TARGET_SEGV_MAPERR
;
2359 info
._sifields
._sigfault
._addr
= env
->pregs
[PR_EDA
];
2360 queue_signal(env
, info
.si_signo
, &info
);
2363 case EXCP_INTERRUPT
:
2364 /* just indicate that signals should be handled asap */
2367 ret
= do_syscall(env
,
2376 env
->regs
[10] = ret
;
2382 sig
= gdb_handlesig (env
, TARGET_SIGTRAP
);
2385 info
.si_signo
= sig
;
2387 info
.si_code
= TARGET_TRAP_BRKPT
;
2388 queue_signal(env
, info
.si_signo
, &info
);
2393 printf ("Unhandled trap: 0x%x\n", trapnr
);
2394 cpu_dump_state(env
, stderr
, fprintf
, 0);
2397 process_pending_signals (env
);
2402 #ifdef TARGET_MICROBLAZE
2403 void cpu_loop (CPUState
*env
)
2406 target_siginfo_t info
;
2409 trapnr
= cpu_mb_exec (env
);
2413 info
.si_signo
= SIGSEGV
;
2415 /* XXX: check env->error_code */
2416 info
.si_code
= TARGET_SEGV_MAPERR
;
2417 info
._sifields
._sigfault
._addr
= 0;
2418 queue_signal(env
, info
.si_signo
, &info
);
2421 case EXCP_INTERRUPT
:
2422 /* just indicate that signals should be handled asap */
2425 /* Return address is 4 bytes after the call. */
2427 ret
= do_syscall(env
,
2437 env
->sregs
[SR_PC
] = env
->regs
[14];
2440 env
->regs
[17] = env
->sregs
[SR_PC
] + 4;
2441 if (env
->iflags
& D_FLAG
) {
2442 env
->sregs
[SR_ESR
] |= 1 << 12;
2443 env
->sregs
[SR_PC
] -= 4;
2444 /* FIXME: if branch was immed, replay the imm aswell. */
2447 env
->iflags
&= ~(IMM_FLAG
| D_FLAG
);
2449 switch (env
->sregs
[SR_ESR
] & 31) {
2450 case ESR_EC_DIVZERO
:
2451 info
.si_signo
= SIGFPE
;
2453 info
.si_code
= TARGET_FPE_FLTDIV
;
2454 info
._sifields
._sigfault
._addr
= 0;
2455 queue_signal(env
, info
.si_signo
, &info
);
2458 info
.si_signo
= SIGFPE
;
2460 if (env
->sregs
[SR_FSR
] & FSR_IO
) {
2461 info
.si_code
= TARGET_FPE_FLTINV
;
2463 if (env
->sregs
[SR_FSR
] & FSR_DZ
) {
2464 info
.si_code
= TARGET_FPE_FLTDIV
;
2466 info
._sifields
._sigfault
._addr
= 0;
2467 queue_signal(env
, info
.si_signo
, &info
);
2470 printf ("Unhandled hw-exception: 0x%x\n",
2471 env
->sregs
[SR_ESR
] & ESR_EC_MASK
);
2472 cpu_dump_state(env
, stderr
, fprintf
, 0);
2481 sig
= gdb_handlesig (env
, TARGET_SIGTRAP
);
2484 info
.si_signo
= sig
;
2486 info
.si_code
= TARGET_TRAP_BRKPT
;
2487 queue_signal(env
, info
.si_signo
, &info
);
2492 printf ("Unhandled trap: 0x%x\n", trapnr
);
2493 cpu_dump_state(env
, stderr
, fprintf
, 0);
2496 process_pending_signals (env
);
2503 void cpu_loop(CPUM68KState
*env
)
2507 target_siginfo_t info
;
2508 TaskState
*ts
= env
->opaque
;
2511 trapnr
= cpu_m68k_exec(env
);
2515 if (ts
->sim_syscalls
) {
2517 nr
= lduw(env
->pc
+ 2);
2519 do_m68k_simcall(env
, nr
);
2525 case EXCP_HALT_INSN
:
2526 /* Semihosing syscall. */
2528 do_m68k_semihosting(env
, env
->dregs
[0]);
2532 case EXCP_UNSUPPORTED
:
2534 info
.si_signo
= SIGILL
;
2536 info
.si_code
= TARGET_ILL_ILLOPN
;
2537 info
._sifields
._sigfault
._addr
= env
->pc
;
2538 queue_signal(env
, info
.si_signo
, &info
);
2542 ts
->sim_syscalls
= 0;
2545 env
->dregs
[0] = do_syscall(env
,
2556 case EXCP_INTERRUPT
:
2557 /* just indicate that signals should be handled asap */
2561 info
.si_signo
= SIGSEGV
;
2563 /* XXX: check env->error_code */
2564 info
.si_code
= TARGET_SEGV_MAPERR
;
2565 info
._sifields
._sigfault
._addr
= env
->mmu
.ar
;
2566 queue_signal(env
, info
.si_signo
, &info
);
2573 sig
= gdb_handlesig (env
, TARGET_SIGTRAP
);
2576 info
.si_signo
= sig
;
2578 info
.si_code
= TARGET_TRAP_BRKPT
;
2579 queue_signal(env
, info
.si_signo
, &info
);
2584 fprintf(stderr
, "qemu: unhandled CPU exception 0x%x - aborting\n",
2586 cpu_dump_state(env
, stderr
, fprintf
, 0);
2589 process_pending_signals(env
);
2592 #endif /* TARGET_M68K */
2595 static void do_store_exclusive(CPUAlphaState
*env
, int reg
, int quad
)
2597 target_ulong addr
, val
, tmp
;
2598 target_siginfo_t info
;
2601 addr
= env
->lock_addr
;
2602 tmp
= env
->lock_st_addr
;
2603 env
->lock_addr
= -1;
2604 env
->lock_st_addr
= 0;
2610 if (quad
? get_user_s64(val
, addr
) : get_user_s32(val
, addr
)) {
2614 if (val
== env
->lock_value
) {
2616 if (quad
? put_user_u64(tmp
, addr
) : put_user_u32(tmp
, addr
)) {
2633 info
.si_signo
= TARGET_SIGSEGV
;
2635 info
.si_code
= TARGET_SEGV_MAPERR
;
2636 info
._sifields
._sigfault
._addr
= addr
;
2637 queue_signal(env
, TARGET_SIGSEGV
, &info
);
2640 void cpu_loop (CPUState
*env
)
2643 target_siginfo_t info
;
2647 trapnr
= cpu_alpha_exec (env
);
2649 /* All of the traps imply a transition through PALcode, which
2650 implies an REI instruction has been executed. Which means
2651 that the intr_flag should be cleared. */
2656 fprintf(stderr
, "Reset requested. Exit\n");
2660 fprintf(stderr
, "Machine check exception. Exit\n");
2663 case EXCP_SMP_INTERRUPT
:
2664 case EXCP_CLK_INTERRUPT
:
2665 case EXCP_DEV_INTERRUPT
:
2666 fprintf(stderr
, "External interrupt. Exit\n");
2670 env
->lock_addr
= -1;
2671 info
.si_signo
= TARGET_SIGSEGV
;
2673 info
.si_code
= (page_get_flags(env
->trap_arg0
) & PAGE_VALID
2674 ? TARGET_SEGV_ACCERR
: TARGET_SEGV_MAPERR
);
2675 info
._sifields
._sigfault
._addr
= env
->trap_arg0
;
2676 queue_signal(env
, info
.si_signo
, &info
);
2679 env
->lock_addr
= -1;
2680 info
.si_signo
= TARGET_SIGBUS
;
2682 info
.si_code
= TARGET_BUS_ADRALN
;
2683 info
._sifields
._sigfault
._addr
= env
->trap_arg0
;
2684 queue_signal(env
, info
.si_signo
, &info
);
2688 env
->lock_addr
= -1;
2689 info
.si_signo
= TARGET_SIGILL
;
2691 info
.si_code
= TARGET_ILL_ILLOPC
;
2692 info
._sifields
._sigfault
._addr
= env
->pc
;
2693 queue_signal(env
, info
.si_signo
, &info
);
2696 env
->lock_addr
= -1;
2697 info
.si_signo
= TARGET_SIGFPE
;
2699 info
.si_code
= TARGET_FPE_FLTINV
;
2700 info
._sifields
._sigfault
._addr
= env
->pc
;
2701 queue_signal(env
, info
.si_signo
, &info
);
2704 /* No-op. Linux simply re-enables the FPU. */
2707 env
->lock_addr
= -1;
2708 switch (env
->error_code
) {
2711 info
.si_signo
= TARGET_SIGTRAP
;
2713 info
.si_code
= TARGET_TRAP_BRKPT
;
2714 info
._sifields
._sigfault
._addr
= env
->pc
;
2715 queue_signal(env
, info
.si_signo
, &info
);
2719 info
.si_signo
= TARGET_SIGTRAP
;
2722 info
._sifields
._sigfault
._addr
= env
->pc
;
2723 queue_signal(env
, info
.si_signo
, &info
);
2727 trapnr
= env
->ir
[IR_V0
];
2728 sysret
= do_syscall(env
, trapnr
,
2729 env
->ir
[IR_A0
], env
->ir
[IR_A1
],
2730 env
->ir
[IR_A2
], env
->ir
[IR_A3
],
2731 env
->ir
[IR_A4
], env
->ir
[IR_A5
],
2733 if (trapnr
== TARGET_NR_sigreturn
2734 || trapnr
== TARGET_NR_rt_sigreturn
) {
2737 /* Syscall writes 0 to V0 to bypass error check, similar
2738 to how this is handled internal to Linux kernel. */
2739 if (env
->ir
[IR_V0
] == 0) {
2740 env
->ir
[IR_V0
] = sysret
;
2742 env
->ir
[IR_V0
] = (sysret
< 0 ? -sysret
: sysret
);
2743 env
->ir
[IR_A3
] = (sysret
< 0);
2748 /* ??? We can probably elide the code using page_unprotect
2749 that is checking for self-modifying code. Instead we
2750 could simply call tb_flush here. Until we work out the
2751 changes required to turn off the extra write protection,
2752 this can be a no-op. */
2756 /* Handled in the translator for usermode. */
2760 /* Handled in the translator for usermode. */
2764 info
.si_signo
= TARGET_SIGFPE
;
2765 switch (env
->ir
[IR_A0
]) {
2766 case TARGET_GEN_INTOVF
:
2767 info
.si_code
= TARGET_FPE_INTOVF
;
2769 case TARGET_GEN_INTDIV
:
2770 info
.si_code
= TARGET_FPE_INTDIV
;
2772 case TARGET_GEN_FLTOVF
:
2773 info
.si_code
= TARGET_FPE_FLTOVF
;
2775 case TARGET_GEN_FLTUND
:
2776 info
.si_code
= TARGET_FPE_FLTUND
;
2778 case TARGET_GEN_FLTINV
:
2779 info
.si_code
= TARGET_FPE_FLTINV
;
2781 case TARGET_GEN_FLTINE
:
2782 info
.si_code
= TARGET_FPE_FLTRES
;
2784 case TARGET_GEN_ROPRAND
:
2788 info
.si_signo
= TARGET_SIGTRAP
;
2793 info
._sifields
._sigfault
._addr
= env
->pc
;
2794 queue_signal(env
, info
.si_signo
, &info
);
2801 info
.si_signo
= gdb_handlesig (env
, TARGET_SIGTRAP
);
2802 if (info
.si_signo
) {
2803 env
->lock_addr
= -1;
2805 info
.si_code
= TARGET_TRAP_BRKPT
;
2806 queue_signal(env
, info
.si_signo
, &info
);
2811 do_store_exclusive(env
, env
->error_code
, trapnr
- EXCP_STL_C
);
2814 printf ("Unhandled trap: 0x%x\n", trapnr
);
2815 cpu_dump_state(env
, stderr
, fprintf
, 0);
2818 process_pending_signals (env
);
2821 #endif /* TARGET_ALPHA */
2824 void cpu_loop(CPUS390XState
*env
)
2827 target_siginfo_t info
;
2830 trapnr
= cpu_s390x_exec (env
);
2833 case EXCP_INTERRUPT
:
2834 /* just indicate that signals should be handled asap */
2840 sig
= gdb_handlesig (env
, TARGET_SIGTRAP
);
2842 info
.si_signo
= sig
;
2844 info
.si_code
= TARGET_TRAP_BRKPT
;
2845 queue_signal(env
, info
.si_signo
, &info
);
2851 int n
= env
->int_svc_code
;
2853 /* syscalls > 255 */
2856 env
->psw
.addr
+= env
->int_svc_ilc
;
2857 env
->regs
[2] = do_syscall(env
, n
,
2869 info
.si_signo
= SIGSEGV
;
2871 /* XXX: check env->error_code */
2872 info
.si_code
= TARGET_SEGV_MAPERR
;
2873 info
._sifields
._sigfault
._addr
= env
->__excp_addr
;
2874 queue_signal(env
, info
.si_signo
, &info
);
2879 fprintf(stderr
,"specification exception insn 0x%08x%04x\n", ldl(env
->psw
.addr
), lduw(env
->psw
.addr
+ 4));
2880 info
.si_signo
= SIGILL
;
2882 info
.si_code
= TARGET_ILL_ILLOPC
;
2883 info
._sifields
._sigfault
._addr
= env
->__excp_addr
;
2884 queue_signal(env
, info
.si_signo
, &info
);
2888 printf ("Unhandled trap: 0x%x\n", trapnr
);
2889 cpu_dump_state(env
, stderr
, fprintf
, 0);
2892 process_pending_signals (env
);
2896 #endif /* TARGET_S390X */
2898 THREAD CPUState
*thread_env
;
2900 void task_settid(TaskState
*ts
)
2902 if (ts
->ts_tid
== 0) {
2903 #ifdef CONFIG_USE_NPTL
2904 ts
->ts_tid
= (pid_t
)syscall(SYS_gettid
);
2906 /* when no threads are used, tid becomes pid */
2907 ts
->ts_tid
= getpid();
2912 void stop_all_tasks(void)
2915 * We trust that when using NPTL, start_exclusive()
2916 * handles thread stopping correctly.
2921 /* Assumes contents are already zeroed. */
2922 void init_task_state(TaskState
*ts
)
2927 ts
->first_free
= ts
->sigqueue_table
;
2928 for (i
= 0; i
< MAX_SIGQUEUE_SIZE
- 1; i
++) {
2929 ts
->sigqueue_table
[i
].next
= &ts
->sigqueue_table
[i
+ 1];
2931 ts
->sigqueue_table
[i
].next
= NULL
;
2934 static void handle_arg_help(const char *arg
)
2939 static void handle_arg_log(const char *arg
)
2942 const CPULogItem
*item
;
2944 mask
= cpu_str_to_log_mask(arg
);
2946 printf("Log items (comma separated):\n");
2947 for (item
= cpu_log_items
; item
->mask
!= 0; item
++) {
2948 printf("%-10s %s\n", item
->name
, item
->help
);
2955 static void handle_arg_set_env(const char *arg
)
2957 char *r
, *p
, *token
;
2958 r
= p
= strdup(arg
);
2959 while ((token
= strsep(&p
, ",")) != NULL
) {
2960 if (envlist_setenv(envlist
, token
) != 0) {
2967 static void handle_arg_unset_env(const char *arg
)
2969 char *r
, *p
, *token
;
2970 r
= p
= strdup(arg
);
2971 while ((token
= strsep(&p
, ",")) != NULL
) {
2972 if (envlist_unsetenv(envlist
, token
) != 0) {
2979 static void handle_arg_argv0(const char *arg
)
2981 argv0
= strdup(arg
);
2984 static void handle_arg_stack_size(const char *arg
)
2987 guest_stack_size
= strtoul(arg
, &p
, 0);
2988 if (guest_stack_size
== 0) {
2993 guest_stack_size
*= 1024 * 1024;
2994 } else if (*p
== 'k' || *p
== 'K') {
2995 guest_stack_size
*= 1024;
2999 static void handle_arg_ld_prefix(const char *arg
)
3001 interp_prefix
= strdup(arg
);
3004 static void handle_arg_pagesize(const char *arg
)
3006 qemu_host_page_size
= atoi(arg
);
3007 if (qemu_host_page_size
== 0 ||
3008 (qemu_host_page_size
& (qemu_host_page_size
- 1)) != 0) {
3009 fprintf(stderr
, "page size must be a power of two\n");
3014 static void handle_arg_gdb(const char *arg
)
3016 gdbstub_port
= atoi(arg
);
3019 static void handle_arg_uname(const char *arg
)
3021 qemu_uname_release
= strdup(arg
);
3024 static void handle_arg_cpu(const char *arg
)
3026 cpu_model
= strdup(arg
);
3027 if (cpu_model
== NULL
|| strcmp(cpu_model
, "?") == 0) {
3028 /* XXX: implement xxx_cpu_list for targets that still miss it */
3029 #if defined(cpu_list_id)
3030 cpu_list_id(stdout
, &fprintf
, "");
3031 #elif defined(cpu_list)
3032 cpu_list(stdout
, &fprintf
); /* deprecated */
3038 #if defined(CONFIG_USE_GUEST_BASE)
3039 static void handle_arg_guest_base(const char *arg
)
3041 guest_base
= strtol(arg
, NULL
, 0);
3042 have_guest_base
= 1;
3045 static void handle_arg_reserved_va(const char *arg
)
3049 reserved_va
= strtoul(arg
, &p
, 0);
3063 unsigned long unshifted
= reserved_va
;
3065 reserved_va
<<= shift
;
3066 if (((reserved_va
>> shift
) != unshifted
)
3067 #if HOST_LONG_BITS > TARGET_VIRT_ADDR_SPACE_BITS
3068 || (reserved_va
> (1ul << TARGET_VIRT_ADDR_SPACE_BITS
))
3071 fprintf(stderr
, "Reserved virtual address too big\n");
3076 fprintf(stderr
, "Unrecognised -R size suffix '%s'\n", p
);
3082 static void handle_arg_singlestep(const char *arg
)
3087 static void handle_arg_strace(const char *arg
)
3092 static void handle_arg_version(const char *arg
)
3094 printf("qemu-" TARGET_ARCH
" version " QEMU_VERSION QEMU_PKGVERSION
3095 ", Copyright (c) 2003-2008 Fabrice Bellard\n");
3099 struct qemu_argument
{
3103 void (*handle_opt
)(const char *arg
);
3104 const char *example
;
3108 struct qemu_argument arg_table
[] = {
3109 {"h", "", false, handle_arg_help
,
3110 "", "print this help"},
3111 {"g", "QEMU_GDB", true, handle_arg_gdb
,
3112 "port", "wait gdb connection to 'port'"},
3113 {"L", "QEMU_LD_PREFIX", true, handle_arg_ld_prefix
,
3114 "path", "set the elf interpreter prefix to 'path'"},
3115 {"s", "QEMU_STACK_SIZE", true, handle_arg_stack_size
,
3116 "size", "set the stack size to 'size' bytes"},
3117 {"cpu", "QEMU_CPU", true, handle_arg_cpu
,
3118 "model", "select CPU (-cpu ? for list)"},
3119 {"E", "QEMU_SET_ENV", true, handle_arg_set_env
,
3120 "var=value", "sets targets environment variable (see below)"},
3121 {"U", "QEMU_UNSET_ENV", true, handle_arg_unset_env
,
3122 "var", "unsets targets environment variable (see below)"},
3123 {"0", "QEMU_ARGV0", true, handle_arg_argv0
,
3124 "argv0", "forces target process argv[0] to be 'argv0'"},
3125 {"r", "QEMU_UNAME", true, handle_arg_uname
,
3126 "uname", "set qemu uname release string to 'uname'"},
3127 #if defined(CONFIG_USE_GUEST_BASE)
3128 {"B", "QEMU_GUEST_BASE", true, handle_arg_guest_base
,
3129 "address", "set guest_base address to 'address'"},
3130 {"R", "QEMU_RESERVED_VA", true, handle_arg_reserved_va
,
3131 "size", "reserve 'size' bytes for guest virtual address space"},
3133 {"d", "QEMU_LOG", true, handle_arg_log
,
3134 "options", "activate log"},
3135 {"p", "QEMU_PAGESIZE", true, handle_arg_pagesize
,
3136 "pagesize", "set the host page size to 'pagesize'"},
3137 {"singlestep", "QEMU_SINGLESTEP", false, handle_arg_singlestep
,
3138 "", "run in singlestep mode"},
3139 {"strace", "QEMU_STRACE", false, handle_arg_strace
,
3140 "", "log system calls"},
3141 {"version", "QEMU_VERSION", false, handle_arg_version
,
3142 "", "display version information and exit"},
3143 {NULL
, NULL
, false, NULL
, NULL
, NULL
}
3146 static void usage(void)
3148 struct qemu_argument
*arginfo
;
3152 printf("usage: qemu-" TARGET_ARCH
" [options] program [arguments...]\n"
3153 "Linux CPU emulator (compiled for " TARGET_ARCH
" emulation)\n"
3155 "Options and associated environment variables:\n"
3158 maxarglen
= maxenvlen
= 0;
3160 for (arginfo
= arg_table
; arginfo
->handle_opt
!= NULL
; arginfo
++) {
3161 if (strlen(arginfo
->env
) > maxenvlen
) {
3162 maxenvlen
= strlen(arginfo
->env
);
3164 if (strlen(arginfo
->argv
) > maxarglen
) {
3165 maxarglen
= strlen(arginfo
->argv
);
3169 printf("%-*s%-*sDescription\n", maxarglen
+3, "Argument",
3170 maxenvlen
+1, "Env-variable");
3172 for (arginfo
= arg_table
; arginfo
->handle_opt
!= NULL
; arginfo
++) {
3173 if (arginfo
->has_arg
) {
3174 printf("-%s %-*s %-*s %s\n", arginfo
->argv
,
3175 (int)(maxarglen
-strlen(arginfo
->argv
)), arginfo
->example
,
3176 maxenvlen
, arginfo
->env
, arginfo
->help
);
3178 printf("-%-*s %-*s %s\n", maxarglen
+1, arginfo
->argv
,
3179 maxenvlen
, arginfo
->env
,
3186 "QEMU_LD_PREFIX = %s\n"
3187 "QEMU_STACK_SIZE = %ld byte\n"
3194 "You can use -E and -U options or the QEMU_SET_ENV and\n"
3195 "QEMU_UNSET_ENV environment variables to set and unset\n"
3196 "environment variables for the target process.\n"
3197 "It is possible to provide several variables by separating them\n"
3198 "by commas in getsubopt(3) style. Additionally it is possible to\n"
3199 "provide the -E and -U options multiple times.\n"
3200 "The following lines are equivalent:\n"
3201 " -E var1=val2 -E var2=val2 -U LD_PRELOAD -U LD_DEBUG\n"
3202 " -E var1=val2,var2=val2 -U LD_PRELOAD,LD_DEBUG\n"
3203 " QEMU_SET_ENV=var1=val2,var2=val2 QEMU_UNSET_ENV=LD_PRELOAD,LD_DEBUG\n"
3204 "Note that if you provide several changes to a single variable\n"
3205 "the last change will stay in effect.\n");
3210 static int parse_args(int argc
, char **argv
)
3214 struct qemu_argument
*arginfo
;
3216 for (arginfo
= arg_table
; arginfo
->handle_opt
!= NULL
; arginfo
++) {
3217 if (arginfo
->env
== NULL
) {
3221 r
= getenv(arginfo
->env
);
3223 arginfo
->handle_opt(r
);
3229 if (optind
>= argc
) {
3238 if (!strcmp(r
, "-")) {
3242 for (arginfo
= arg_table
; arginfo
->handle_opt
!= NULL
; arginfo
++) {
3243 if (!strcmp(r
, arginfo
->argv
)) {
3244 if (arginfo
->has_arg
) {
3245 if (optind
>= argc
) {
3248 arginfo
->handle_opt(argv
[optind
]);
3251 arginfo
->handle_opt(NULL
);
3257 /* no option matched the current argv */
3258 if (arginfo
->handle_opt
== NULL
) {
3263 if (optind
>= argc
) {
3267 filename
= argv
[optind
];
3268 exec_path
= argv
[optind
];
3273 int main(int argc
, char **argv
, char **envp
)
3275 const char *log_file
= DEBUG_LOGFILE
;
3276 struct target_pt_regs regs1
, *regs
= ®s1
;
3277 struct image_info info1
, *info
= &info1
;
3278 struct linux_binprm bprm
;
3282 char **target_environ
, **wrk
;
3288 qemu_cache_utils_init(envp
);
3290 if ((envlist
= envlist_create()) == NULL
) {
3291 (void) fprintf(stderr
, "Unable to allocate envlist\n");
3295 /* add current environment into the list */
3296 for (wrk
= environ
; *wrk
!= NULL
; wrk
++) {
3297 (void) envlist_setenv(envlist
, *wrk
);
3300 /* Read the stack limit from the kernel. If it's "unlimited",
3301 then we can do little else besides use the default. */
3304 if (getrlimit(RLIMIT_STACK
, &lim
) == 0
3305 && lim
.rlim_cur
!= RLIM_INFINITY
3306 && lim
.rlim_cur
== (target_long
)lim
.rlim_cur
) {
3307 guest_stack_size
= lim
.rlim_cur
;
3312 #if defined(cpudef_setup)
3313 cpudef_setup(); /* parse cpu definitions in target config file (TBD) */
3317 cpu_set_log_filename(log_file
);
3318 optind
= parse_args(argc
, argv
);
3321 memset(regs
, 0, sizeof(struct target_pt_regs
));
3323 /* Zero out image_info */
3324 memset(info
, 0, sizeof(struct image_info
));
3326 memset(&bprm
, 0, sizeof (bprm
));
3328 /* Scan interp_prefix dir for replacement files. */
3329 init_paths(interp_prefix
);
3331 if (cpu_model
== NULL
) {
3332 #if defined(TARGET_I386)
3333 #ifdef TARGET_X86_64
3334 cpu_model
= "qemu64";
3336 cpu_model
= "qemu32";
3338 #elif defined(TARGET_ARM)
3340 #elif defined(TARGET_UNICORE32)
3342 #elif defined(TARGET_M68K)
3344 #elif defined(TARGET_SPARC)
3345 #ifdef TARGET_SPARC64
3346 cpu_model
= "TI UltraSparc II";
3348 cpu_model
= "Fujitsu MB86904";
3350 #elif defined(TARGET_MIPS)
3351 #if defined(TARGET_ABI_MIPSN32) || defined(TARGET_ABI_MIPSN64)
3356 #elif defined(TARGET_PPC)
3358 cpu_model
= "970fx";
3367 cpu_exec_init_all();
3368 /* NOTE: we need to init the CPU at this stage to get
3369 qemu_host_page_size */
3370 env
= cpu_init(cpu_model
);
3372 fprintf(stderr
, "Unable to find CPU definition\n");
3375 #if defined(TARGET_I386) || defined(TARGET_SPARC) || defined(TARGET_PPC)
3381 if (getenv("QEMU_STRACE")) {
3385 target_environ
= envlist_to_environ(envlist
, NULL
);
3386 envlist_free(envlist
);
3388 #if defined(CONFIG_USE_GUEST_BASE)
3390 * Now that page sizes are configured in cpu_init() we can do
3391 * proper page alignment for guest_base.
3393 guest_base
= HOST_PAGE_ALIGN(guest_base
);
3399 flags
= MAP_ANONYMOUS
| MAP_PRIVATE
| MAP_NORESERVE
;
3400 if (have_guest_base
) {
3403 p
= mmap((void *)guest_base
, reserved_va
, PROT_NONE
, flags
, -1, 0);
3404 if (p
== MAP_FAILED
) {
3405 fprintf(stderr
, "Unable to reserve guest address space\n");
3408 guest_base
= (unsigned long)p
;
3409 /* Make sure the address is properly aligned. */
3410 if (guest_base
& ~qemu_host_page_mask
) {
3411 munmap(p
, reserved_va
);
3412 p
= mmap((void *)guest_base
, reserved_va
+ qemu_host_page_size
,
3413 PROT_NONE
, flags
, -1, 0);
3414 if (p
== MAP_FAILED
) {
3415 fprintf(stderr
, "Unable to reserve guest address space\n");
3418 guest_base
= HOST_PAGE_ALIGN((unsigned long)p
);
3420 qemu_log("Reserved 0x%lx bytes of guest address space\n", reserved_va
);
3423 if (reserved_va
|| have_guest_base
) {
3424 if (!guest_validate_base(guest_base
)) {
3425 fprintf(stderr
, "Guest base/Reserved VA rejected by guest code\n");
3429 #endif /* CONFIG_USE_GUEST_BASE */
3432 * Read in mmap_min_addr kernel parameter. This value is used
3433 * When loading the ELF image to determine whether guest_base
3434 * is needed. It is also used in mmap_find_vma.
3439 if ((fp
= fopen("/proc/sys/vm/mmap_min_addr", "r")) != NULL
) {
3441 if (fscanf(fp
, "%lu", &tmp
) == 1) {
3442 mmap_min_addr
= tmp
;
3443 qemu_log("host mmap_min_addr=0x%lx\n", mmap_min_addr
);
3450 * Prepare copy of argv vector for target.
3452 target_argc
= argc
- optind
;
3453 target_argv
= calloc(target_argc
+ 1, sizeof (char *));
3454 if (target_argv
== NULL
) {
3455 (void) fprintf(stderr
, "Unable to allocate memory for target_argv\n");
3460 * If argv0 is specified (using '-0' switch) we replace
3461 * argv[0] pointer with the given one.
3464 if (argv0
!= NULL
) {
3465 target_argv
[i
++] = strdup(argv0
);
3467 for (; i
< target_argc
; i
++) {
3468 target_argv
[i
] = strdup(argv
[optind
+ i
]);
3470 target_argv
[target_argc
] = NULL
;
3472 ts
= g_malloc0 (sizeof(TaskState
));
3473 init_task_state(ts
);
3474 /* build Task State */
3480 ret
= loader_exec(filename
, target_argv
, target_environ
, regs
,
3483 printf("Error %d while loading %s\n", ret
, filename
);
3487 for (i
= 0; i
< target_argc
; i
++) {
3488 free(target_argv
[i
]);
3492 for (wrk
= target_environ
; *wrk
; wrk
++) {
3496 free(target_environ
);
3498 if (qemu_log_enabled()) {
3499 #if defined(CONFIG_USE_GUEST_BASE)
3500 qemu_log("guest_base 0x%lx\n", guest_base
);
3504 qemu_log("start_brk 0x" TARGET_ABI_FMT_lx
"\n", info
->start_brk
);
3505 qemu_log("end_code 0x" TARGET_ABI_FMT_lx
"\n", info
->end_code
);
3506 qemu_log("start_code 0x" TARGET_ABI_FMT_lx
"\n",
3508 qemu_log("start_data 0x" TARGET_ABI_FMT_lx
"\n",
3510 qemu_log("end_data 0x" TARGET_ABI_FMT_lx
"\n", info
->end_data
);
3511 qemu_log("start_stack 0x" TARGET_ABI_FMT_lx
"\n",
3513 qemu_log("brk 0x" TARGET_ABI_FMT_lx
"\n", info
->brk
);
3514 qemu_log("entry 0x" TARGET_ABI_FMT_lx
"\n", info
->entry
);
3517 target_set_brk(info
->brk
);
3521 #if defined(CONFIG_USE_GUEST_BASE)
3522 /* Now that we've loaded the binary, GUEST_BASE is fixed. Delay
3523 generating the prologue until now so that the prologue can take
3524 the real value of GUEST_BASE into account. */
3525 tcg_prologue_init(&tcg_ctx
);
3528 #if defined(TARGET_I386)
3529 cpu_x86_set_cpl(env
, 3);
3531 env
->cr
[0] = CR0_PG_MASK
| CR0_WP_MASK
| CR0_PE_MASK
;
3532 env
->hflags
|= HF_PE_MASK
;
3533 if (env
->cpuid_features
& CPUID_SSE
) {
3534 env
->cr
[4] |= CR4_OSFXSR_MASK
;
3535 env
->hflags
|= HF_OSFXSR_MASK
;
3537 #ifndef TARGET_ABI32
3538 /* enable 64 bit mode if possible */
3539 if (!(env
->cpuid_ext2_features
& CPUID_EXT2_LM
)) {
3540 fprintf(stderr
, "The selected x86 CPU does not support 64 bit mode\n");
3543 env
->cr
[4] |= CR4_PAE_MASK
;
3544 env
->efer
|= MSR_EFER_LMA
| MSR_EFER_LME
;
3545 env
->hflags
|= HF_LMA_MASK
;
3548 /* flags setup : we activate the IRQs by default as in user mode */
3549 env
->eflags
|= IF_MASK
;
3551 /* linux register setup */
3552 #ifndef TARGET_ABI32
3553 env
->regs
[R_EAX
] = regs
->rax
;
3554 env
->regs
[R_EBX
] = regs
->rbx
;
3555 env
->regs
[R_ECX
] = regs
->rcx
;
3556 env
->regs
[R_EDX
] = regs
->rdx
;
3557 env
->regs
[R_ESI
] = regs
->rsi
;
3558 env
->regs
[R_EDI
] = regs
->rdi
;
3559 env
->regs
[R_EBP
] = regs
->rbp
;
3560 env
->regs
[R_ESP
] = regs
->rsp
;
3561 env
->eip
= regs
->rip
;
3563 env
->regs
[R_EAX
] = regs
->eax
;
3564 env
->regs
[R_EBX
] = regs
->ebx
;
3565 env
->regs
[R_ECX
] = regs
->ecx
;
3566 env
->regs
[R_EDX
] = regs
->edx
;
3567 env
->regs
[R_ESI
] = regs
->esi
;
3568 env
->regs
[R_EDI
] = regs
->edi
;
3569 env
->regs
[R_EBP
] = regs
->ebp
;
3570 env
->regs
[R_ESP
] = regs
->esp
;
3571 env
->eip
= regs
->eip
;
3574 /* linux interrupt setup */
3575 #ifndef TARGET_ABI32
3576 env
->idt
.limit
= 511;
3578 env
->idt
.limit
= 255;
3580 env
->idt
.base
= target_mmap(0, sizeof(uint64_t) * (env
->idt
.limit
+ 1),
3581 PROT_READ
|PROT_WRITE
,
3582 MAP_ANONYMOUS
|MAP_PRIVATE
, -1, 0);
3583 idt_table
= g2h(env
->idt
.base
);
3606 /* linux segment setup */
3608 uint64_t *gdt_table
;
3609 env
->gdt
.base
= target_mmap(0, sizeof(uint64_t) * TARGET_GDT_ENTRIES
,
3610 PROT_READ
|PROT_WRITE
,
3611 MAP_ANONYMOUS
|MAP_PRIVATE
, -1, 0);
3612 env
->gdt
.limit
= sizeof(uint64_t) * TARGET_GDT_ENTRIES
- 1;
3613 gdt_table
= g2h(env
->gdt
.base
);
3615 write_dt(&gdt_table
[__USER_CS
>> 3], 0, 0xfffff,
3616 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
| DESC_S_MASK
|
3617 (3 << DESC_DPL_SHIFT
) | (0xa << DESC_TYPE_SHIFT
));
3619 /* 64 bit code segment */
3620 write_dt(&gdt_table
[__USER_CS
>> 3], 0, 0xfffff,
3621 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
| DESC_S_MASK
|
3623 (3 << DESC_DPL_SHIFT
) | (0xa << DESC_TYPE_SHIFT
));
3625 write_dt(&gdt_table
[__USER_DS
>> 3], 0, 0xfffff,
3626 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
| DESC_S_MASK
|
3627 (3 << DESC_DPL_SHIFT
) | (0x2 << DESC_TYPE_SHIFT
));
3629 cpu_x86_load_seg(env
, R_CS
, __USER_CS
);
3630 cpu_x86_load_seg(env
, R_SS
, __USER_DS
);
3632 cpu_x86_load_seg(env
, R_DS
, __USER_DS
);
3633 cpu_x86_load_seg(env
, R_ES
, __USER_DS
);
3634 cpu_x86_load_seg(env
, R_FS
, __USER_DS
);
3635 cpu_x86_load_seg(env
, R_GS
, __USER_DS
);
3636 /* This hack makes Wine work... */
3637 env
->segs
[R_FS
].selector
= 0;
3639 cpu_x86_load_seg(env
, R_DS
, 0);
3640 cpu_x86_load_seg(env
, R_ES
, 0);
3641 cpu_x86_load_seg(env
, R_FS
, 0);
3642 cpu_x86_load_seg(env
, R_GS
, 0);
3644 #elif defined(TARGET_ARM)
3647 cpsr_write(env
, regs
->uregs
[16], 0xffffffff);
3648 for(i
= 0; i
< 16; i
++) {
3649 env
->regs
[i
] = regs
->uregs
[i
];
3652 #elif defined(TARGET_UNICORE32)
3655 cpu_asr_write(env
, regs
->uregs
[32], 0xffffffff);
3656 for (i
= 0; i
< 32; i
++) {
3657 env
->regs
[i
] = regs
->uregs
[i
];
3660 #elif defined(TARGET_SPARC)
3664 env
->npc
= regs
->npc
;
3666 for(i
= 0; i
< 8; i
++)
3667 env
->gregs
[i
] = regs
->u_regs
[i
];
3668 for(i
= 0; i
< 8; i
++)
3669 env
->regwptr
[i
] = regs
->u_regs
[i
+ 8];
3671 #elif defined(TARGET_PPC)
3675 #if defined(TARGET_PPC64)
3676 #if defined(TARGET_ABI32)
3677 env
->msr
&= ~((target_ulong
)1 << MSR_SF
);
3679 env
->msr
|= (target_ulong
)1 << MSR_SF
;
3682 env
->nip
= regs
->nip
;
3683 for(i
= 0; i
< 32; i
++) {
3684 env
->gpr
[i
] = regs
->gpr
[i
];
3687 #elif defined(TARGET_M68K)
3690 env
->dregs
[0] = regs
->d0
;
3691 env
->dregs
[1] = regs
->d1
;
3692 env
->dregs
[2] = regs
->d2
;
3693 env
->dregs
[3] = regs
->d3
;
3694 env
->dregs
[4] = regs
->d4
;
3695 env
->dregs
[5] = regs
->d5
;
3696 env
->dregs
[6] = regs
->d6
;
3697 env
->dregs
[7] = regs
->d7
;
3698 env
->aregs
[0] = regs
->a0
;
3699 env
->aregs
[1] = regs
->a1
;
3700 env
->aregs
[2] = regs
->a2
;
3701 env
->aregs
[3] = regs
->a3
;
3702 env
->aregs
[4] = regs
->a4
;
3703 env
->aregs
[5] = regs
->a5
;
3704 env
->aregs
[6] = regs
->a6
;
3705 env
->aregs
[7] = regs
->usp
;
3707 ts
->sim_syscalls
= 1;
3709 #elif defined(TARGET_MICROBLAZE)
3711 env
->regs
[0] = regs
->r0
;
3712 env
->regs
[1] = regs
->r1
;
3713 env
->regs
[2] = regs
->r2
;
3714 env
->regs
[3] = regs
->r3
;
3715 env
->regs
[4] = regs
->r4
;
3716 env
->regs
[5] = regs
->r5
;
3717 env
->regs
[6] = regs
->r6
;
3718 env
->regs
[7] = regs
->r7
;
3719 env
->regs
[8] = regs
->r8
;
3720 env
->regs
[9] = regs
->r9
;
3721 env
->regs
[10] = regs
->r10
;
3722 env
->regs
[11] = regs
->r11
;
3723 env
->regs
[12] = regs
->r12
;
3724 env
->regs
[13] = regs
->r13
;
3725 env
->regs
[14] = regs
->r14
;
3726 env
->regs
[15] = regs
->r15
;
3727 env
->regs
[16] = regs
->r16
;
3728 env
->regs
[17] = regs
->r17
;
3729 env
->regs
[18] = regs
->r18
;
3730 env
->regs
[19] = regs
->r19
;
3731 env
->regs
[20] = regs
->r20
;
3732 env
->regs
[21] = regs
->r21
;
3733 env
->regs
[22] = regs
->r22
;
3734 env
->regs
[23] = regs
->r23
;
3735 env
->regs
[24] = regs
->r24
;
3736 env
->regs
[25] = regs
->r25
;
3737 env
->regs
[26] = regs
->r26
;
3738 env
->regs
[27] = regs
->r27
;
3739 env
->regs
[28] = regs
->r28
;
3740 env
->regs
[29] = regs
->r29
;
3741 env
->regs
[30] = regs
->r30
;
3742 env
->regs
[31] = regs
->r31
;
3743 env
->sregs
[SR_PC
] = regs
->pc
;
3745 #elif defined(TARGET_MIPS)
3749 for(i
= 0; i
< 32; i
++) {
3750 env
->active_tc
.gpr
[i
] = regs
->regs
[i
];
3752 env
->active_tc
.PC
= regs
->cp0_epc
& ~(target_ulong
)1;
3753 if (regs
->cp0_epc
& 1) {
3754 env
->hflags
|= MIPS_HFLAG_M16
;
3757 #elif defined(TARGET_SH4)
3761 for(i
= 0; i
< 16; i
++) {
3762 env
->gregs
[i
] = regs
->regs
[i
];
3766 #elif defined(TARGET_ALPHA)
3770 for(i
= 0; i
< 28; i
++) {
3771 env
->ir
[i
] = ((abi_ulong
*)regs
)[i
];
3773 env
->ir
[IR_SP
] = regs
->usp
;
3776 #elif defined(TARGET_CRIS)
3778 env
->regs
[0] = regs
->r0
;
3779 env
->regs
[1] = regs
->r1
;
3780 env
->regs
[2] = regs
->r2
;
3781 env
->regs
[3] = regs
->r3
;
3782 env
->regs
[4] = regs
->r4
;
3783 env
->regs
[5] = regs
->r5
;
3784 env
->regs
[6] = regs
->r6
;
3785 env
->regs
[7] = regs
->r7
;
3786 env
->regs
[8] = regs
->r8
;
3787 env
->regs
[9] = regs
->r9
;
3788 env
->regs
[10] = regs
->r10
;
3789 env
->regs
[11] = regs
->r11
;
3790 env
->regs
[12] = regs
->r12
;
3791 env
->regs
[13] = regs
->r13
;
3792 env
->regs
[14] = info
->start_stack
;
3793 env
->regs
[15] = regs
->acr
;
3794 env
->pc
= regs
->erp
;
3796 #elif defined(TARGET_S390X)
3799 for (i
= 0; i
< 16; i
++) {
3800 env
->regs
[i
] = regs
->gprs
[i
];
3802 env
->psw
.mask
= regs
->psw
.mask
;
3803 env
->psw
.addr
= regs
->psw
.addr
;
3806 #error unsupported target CPU
3809 #if defined(TARGET_ARM) || defined(TARGET_M68K) || defined(TARGET_UNICORE32)
3810 ts
->stack_base
= info
->start_stack
;
3811 ts
->heap_base
= info
->brk
;
3812 /* This will be filled in on the first SYS_HEAPINFO call. */
3817 if (gdbserver_start(gdbstub_port
) < 0) {
3818 fprintf(stderr
, "qemu: could not open gdbserver on port %d\n",
3822 gdb_handlesig(env
, 0);