4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
33 #include <sys/types.h>
39 #include <sys/mount.h>
40 #include <sys/prctl.h>
41 #include <sys/resource.h>
46 #include <sys/socket.h>
50 #include <sys/times.h>
53 #include <sys/statfs.h>
55 #include <sys/sysinfo.h>
56 //#include <sys/user.h>
57 #include <netinet/ip.h>
58 #include <netinet/tcp.h>
59 #include <qemu-common.h>
64 #define termios host_termios
65 #define winsize host_winsize
66 #define termio host_termio
67 #define sgttyb host_sgttyb /* same as target */
68 #define tchars host_tchars /* same as target */
69 #define ltchars host_ltchars /* same as target */
71 #include <linux/termios.h>
72 #include <linux/unistd.h>
73 #include <linux/utsname.h>
74 #include <linux/cdrom.h>
75 #include <linux/hdreg.h>
76 #include <linux/soundcard.h>
78 #include <linux/mtio.h>
79 #include "linux_loop.h"
82 #include "qemu-common.h"
85 #include <linux/futex.h>
86 #define CLONE_NPTL_FLAGS2 (CLONE_SETTLS | \
87 CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID)
89 /* XXX: Hardcode the above values. */
90 #define CLONE_NPTL_FLAGS2 0
95 #if defined(TARGET_I386) || defined(TARGET_ARM) || defined(TARGET_SPARC) \
96 || defined(TARGET_M68K) || defined(TARGET_SH4) || defined(TARGET_CRIS)
97 /* 16 bit uid wrappers emulation */
101 //#include <linux/msdos_fs.h>
102 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
103 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
114 #define _syscall0(type,name) \
115 static type name (void) \
117 return syscall(__NR_##name); \
120 #define _syscall1(type,name,type1,arg1) \
121 static type name (type1 arg1) \
123 return syscall(__NR_##name, arg1); \
126 #define _syscall2(type,name,type1,arg1,type2,arg2) \
127 static type name (type1 arg1,type2 arg2) \
129 return syscall(__NR_##name, arg1, arg2); \
132 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
133 static type name (type1 arg1,type2 arg2,type3 arg3) \
135 return syscall(__NR_##name, arg1, arg2, arg3); \
138 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
139 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
141 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
144 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
146 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
148 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
152 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
153 type5,arg5,type6,arg6) \
154 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
157 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
161 #define __NR_sys_uname __NR_uname
162 #define __NR_sys_faccessat __NR_faccessat
163 #define __NR_sys_fchmodat __NR_fchmodat
164 #define __NR_sys_fchownat __NR_fchownat
165 #define __NR_sys_fstatat64 __NR_fstatat64
166 #define __NR_sys_futimesat __NR_futimesat
167 #define __NR_sys_getcwd1 __NR_getcwd
168 #define __NR_sys_getdents __NR_getdents
169 #define __NR_sys_getdents64 __NR_getdents64
170 #define __NR_sys_getpriority __NR_getpriority
171 #define __NR_sys_linkat __NR_linkat
172 #define __NR_sys_mkdirat __NR_mkdirat
173 #define __NR_sys_mknodat __NR_mknodat
174 #define __NR_sys_newfstatat __NR_newfstatat
175 #define __NR_sys_openat __NR_openat
176 #define __NR_sys_readlinkat __NR_readlinkat
177 #define __NR_sys_renameat __NR_renameat
178 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
179 #define __NR_sys_symlinkat __NR_symlinkat
180 #define __NR_sys_syslog __NR_syslog
181 #define __NR_sys_tgkill __NR_tgkill
182 #define __NR_sys_tkill __NR_tkill
183 #define __NR_sys_unlinkat __NR_unlinkat
184 #define __NR_sys_utimensat __NR_utimensat
185 #define __NR_sys_futex __NR_futex
186 #define __NR_sys_inotify_init __NR_inotify_init
187 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
188 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
190 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__)
191 #define __NR__llseek __NR_lseek
195 _syscall0(int, gettid
)
197 /* This is a replacement for the host gettid() and must return a host
199 static int gettid(void) {
203 _syscall1(int,sys_uname
,struct new_utsname
*,buf
)
204 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
205 _syscall4(int,sys_faccessat
,int,dirfd
,const char *,pathname
,int,mode
,int,flags
)
207 #if defined(TARGET_NR_fchmodat) && defined(__NR_fchmodat)
208 _syscall4(int,sys_fchmodat
,int,dirfd
,const char *,pathname
,
209 mode_t
,mode
,int,flags
)
211 #if defined(TARGET_NR_fchownat) && defined(__NR_fchownat) && defined(USE_UID16)
212 _syscall5(int,sys_fchownat
,int,dirfd
,const char *,pathname
,
213 uid_t
,owner
,gid_t
,group
,int,flags
)
215 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) && \
216 defined(__NR_fstatat64)
217 _syscall4(int,sys_fstatat64
,int,dirfd
,const char *,pathname
,
218 struct stat
*,buf
,int,flags
)
220 #if defined(TARGET_NR_futimesat) && defined(__NR_futimesat)
221 _syscall3(int,sys_futimesat
,int,dirfd
,const char *,pathname
,
222 const struct timeval
*,times
)
224 _syscall2(int,sys_getcwd1
,char *,buf
,size_t,size
)
225 #if TARGET_ABI_BITS == 32
226 _syscall3(int, sys_getdents
, uint
, fd
, struct linux_dirent
*, dirp
, uint
, count
);
228 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
229 _syscall3(int, sys_getdents64
, uint
, fd
, struct linux_dirent64
*, dirp
, uint
, count
);
231 _syscall2(int, sys_getpriority
, int, which
, int, who
);
232 #if !defined (__x86_64__)
233 _syscall5(int, _llseek
, uint
, fd
, ulong
, hi
, ulong
, lo
,
234 loff_t
*, res
, uint
, wh
);
236 #if defined(TARGET_NR_linkat) && defined(__NR_linkat)
237 _syscall5(int,sys_linkat
,int,olddirfd
,const char *,oldpath
,
238 int,newdirfd
,const char *,newpath
,int,flags
)
240 #if defined(TARGET_NR_mkdirat) && defined(__NR_mkdirat)
241 _syscall3(int,sys_mkdirat
,int,dirfd
,const char *,pathname
,mode_t
,mode
)
243 #if defined(TARGET_NR_mknodat) && defined(__NR_mknodat)
244 _syscall4(int,sys_mknodat
,int,dirfd
,const char *,pathname
,
245 mode_t
,mode
,dev_t
,dev
)
247 #if (defined(TARGET_NR_newfstatat) || defined(TARGET_NR_fstatat64) ) && \
248 defined(__NR_newfstatat)
249 _syscall4(int,sys_newfstatat
,int,dirfd
,const char *,pathname
,
250 struct stat
*,buf
,int,flags
)
252 #if defined(TARGET_NR_openat) && defined(__NR_openat)
253 _syscall4(int,sys_openat
,int,dirfd
,const char *,pathname
,int,flags
,mode_t
,mode
)
255 #if defined(TARGET_NR_readlinkat) && defined(__NR_readlinkat)
256 _syscall4(int,sys_readlinkat
,int,dirfd
,const char *,pathname
,
257 char *,buf
,size_t,bufsize
)
259 #if defined(TARGET_NR_renameat) && defined(__NR_renameat)
260 _syscall4(int,sys_renameat
,int,olddirfd
,const char *,oldpath
,
261 int,newdirfd
,const char *,newpath
)
263 _syscall3(int,sys_rt_sigqueueinfo
,int,pid
,int,sig
,siginfo_t
*,uinfo
)
264 #if defined(TARGET_NR_symlinkat) && defined(__NR_symlinkat)
265 _syscall3(int,sys_symlinkat
,const char *,oldpath
,
266 int,newdirfd
,const char *,newpath
)
268 _syscall3(int,sys_syslog
,int,type
,char*,bufp
,int,len
)
269 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
270 _syscall3(int,sys_tgkill
,int,tgid
,int,pid
,int,sig
)
272 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
273 _syscall2(int,sys_tkill
,int,tid
,int,sig
)
275 #ifdef __NR_exit_group
276 _syscall1(int,exit_group
,int,error_code
)
278 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
279 _syscall1(int,set_tid_address
,int *,tidptr
)
281 #if defined(TARGET_NR_unlinkat) && defined(__NR_unlinkat)
282 _syscall3(int,sys_unlinkat
,int,dirfd
,const char *,pathname
,int,flags
)
284 #if defined(TARGET_NR_utimensat) && defined(__NR_utimensat)
285 _syscall4(int,sys_utimensat
,int,dirfd
,const char *,pathname
,
286 const struct timespec
*,tsp
,int,flags
)
288 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
289 _syscall0(int,sys_inotify_init
)
291 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
292 _syscall3(int,sys_inotify_add_watch
,int,fd
,const char *,pathname
,uint32_t,mask
)
294 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
295 _syscall2(int,sys_inotify_rm_watch
,int,fd
,uint32_t,wd
)
297 #if defined(USE_NPTL)
298 #if defined(TARGET_NR_futex) && defined(__NR_futex)
299 _syscall6(int,sys_futex
,int *,uaddr
,int,op
,int,val
,
300 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
304 extern int personality(int);
305 extern int flock(int, int);
306 extern int setfsuid(int);
307 extern int setfsgid(int);
308 extern int setgroups(int, gid_t
*);
310 #define ERRNO_TABLE_SIZE 1200
312 /* target_to_host_errno_table[] is initialized from
313 * host_to_target_errno_table[] in syscall_init(). */
314 static uint16_t target_to_host_errno_table
[ERRNO_TABLE_SIZE
] = {
318 * This list is the union of errno values overridden in asm-<arch>/errno.h
319 * minus the errnos that are not actually generic to all archs.
321 static uint16_t host_to_target_errno_table
[ERRNO_TABLE_SIZE
] = {
322 [EIDRM
] = TARGET_EIDRM
,
323 [ECHRNG
] = TARGET_ECHRNG
,
324 [EL2NSYNC
] = TARGET_EL2NSYNC
,
325 [EL3HLT
] = TARGET_EL3HLT
,
326 [EL3RST
] = TARGET_EL3RST
,
327 [ELNRNG
] = TARGET_ELNRNG
,
328 [EUNATCH
] = TARGET_EUNATCH
,
329 [ENOCSI
] = TARGET_ENOCSI
,
330 [EL2HLT
] = TARGET_EL2HLT
,
331 [EDEADLK
] = TARGET_EDEADLK
,
332 [ENOLCK
] = TARGET_ENOLCK
,
333 [EBADE
] = TARGET_EBADE
,
334 [EBADR
] = TARGET_EBADR
,
335 [EXFULL
] = TARGET_EXFULL
,
336 [ENOANO
] = TARGET_ENOANO
,
337 [EBADRQC
] = TARGET_EBADRQC
,
338 [EBADSLT
] = TARGET_EBADSLT
,
339 [EBFONT
] = TARGET_EBFONT
,
340 [ENOSTR
] = TARGET_ENOSTR
,
341 [ENODATA
] = TARGET_ENODATA
,
342 [ETIME
] = TARGET_ETIME
,
343 [ENOSR
] = TARGET_ENOSR
,
344 [ENONET
] = TARGET_ENONET
,
345 [ENOPKG
] = TARGET_ENOPKG
,
346 [EREMOTE
] = TARGET_EREMOTE
,
347 [ENOLINK
] = TARGET_ENOLINK
,
348 [EADV
] = TARGET_EADV
,
349 [ESRMNT
] = TARGET_ESRMNT
,
350 [ECOMM
] = TARGET_ECOMM
,
351 [EPROTO
] = TARGET_EPROTO
,
352 [EDOTDOT
] = TARGET_EDOTDOT
,
353 [EMULTIHOP
] = TARGET_EMULTIHOP
,
354 [EBADMSG
] = TARGET_EBADMSG
,
355 [ENAMETOOLONG
] = TARGET_ENAMETOOLONG
,
356 [EOVERFLOW
] = TARGET_EOVERFLOW
,
357 [ENOTUNIQ
] = TARGET_ENOTUNIQ
,
358 [EBADFD
] = TARGET_EBADFD
,
359 [EREMCHG
] = TARGET_EREMCHG
,
360 [ELIBACC
] = TARGET_ELIBACC
,
361 [ELIBBAD
] = TARGET_ELIBBAD
,
362 [ELIBSCN
] = TARGET_ELIBSCN
,
363 [ELIBMAX
] = TARGET_ELIBMAX
,
364 [ELIBEXEC
] = TARGET_ELIBEXEC
,
365 [EILSEQ
] = TARGET_EILSEQ
,
366 [ENOSYS
] = TARGET_ENOSYS
,
367 [ELOOP
] = TARGET_ELOOP
,
368 [ERESTART
] = TARGET_ERESTART
,
369 [ESTRPIPE
] = TARGET_ESTRPIPE
,
370 [ENOTEMPTY
] = TARGET_ENOTEMPTY
,
371 [EUSERS
] = TARGET_EUSERS
,
372 [ENOTSOCK
] = TARGET_ENOTSOCK
,
373 [EDESTADDRREQ
] = TARGET_EDESTADDRREQ
,
374 [EMSGSIZE
] = TARGET_EMSGSIZE
,
375 [EPROTOTYPE
] = TARGET_EPROTOTYPE
,
376 [ENOPROTOOPT
] = TARGET_ENOPROTOOPT
,
377 [EPROTONOSUPPORT
] = TARGET_EPROTONOSUPPORT
,
378 [ESOCKTNOSUPPORT
] = TARGET_ESOCKTNOSUPPORT
,
379 [EOPNOTSUPP
] = TARGET_EOPNOTSUPP
,
380 [EPFNOSUPPORT
] = TARGET_EPFNOSUPPORT
,
381 [EAFNOSUPPORT
] = TARGET_EAFNOSUPPORT
,
382 [EADDRINUSE
] = TARGET_EADDRINUSE
,
383 [EADDRNOTAVAIL
] = TARGET_EADDRNOTAVAIL
,
384 [ENETDOWN
] = TARGET_ENETDOWN
,
385 [ENETUNREACH
] = TARGET_ENETUNREACH
,
386 [ENETRESET
] = TARGET_ENETRESET
,
387 [ECONNABORTED
] = TARGET_ECONNABORTED
,
388 [ECONNRESET
] = TARGET_ECONNRESET
,
389 [ENOBUFS
] = TARGET_ENOBUFS
,
390 [EISCONN
] = TARGET_EISCONN
,
391 [ENOTCONN
] = TARGET_ENOTCONN
,
392 [EUCLEAN
] = TARGET_EUCLEAN
,
393 [ENOTNAM
] = TARGET_ENOTNAM
,
394 [ENAVAIL
] = TARGET_ENAVAIL
,
395 [EISNAM
] = TARGET_EISNAM
,
396 [EREMOTEIO
] = TARGET_EREMOTEIO
,
397 [ESHUTDOWN
] = TARGET_ESHUTDOWN
,
398 [ETOOMANYREFS
] = TARGET_ETOOMANYREFS
,
399 [ETIMEDOUT
] = TARGET_ETIMEDOUT
,
400 [ECONNREFUSED
] = TARGET_ECONNREFUSED
,
401 [EHOSTDOWN
] = TARGET_EHOSTDOWN
,
402 [EHOSTUNREACH
] = TARGET_EHOSTUNREACH
,
403 [EALREADY
] = TARGET_EALREADY
,
404 [EINPROGRESS
] = TARGET_EINPROGRESS
,
405 [ESTALE
] = TARGET_ESTALE
,
406 [ECANCELED
] = TARGET_ECANCELED
,
407 [ENOMEDIUM
] = TARGET_ENOMEDIUM
,
408 [EMEDIUMTYPE
] = TARGET_EMEDIUMTYPE
,
410 [ENOKEY
] = TARGET_ENOKEY
,
413 [EKEYEXPIRED
] = TARGET_EKEYEXPIRED
,
416 [EKEYREVOKED
] = TARGET_EKEYREVOKED
,
419 [EKEYREJECTED
] = TARGET_EKEYREJECTED
,
422 [EOWNERDEAD
] = TARGET_EOWNERDEAD
,
424 #ifdef ENOTRECOVERABLE
425 [ENOTRECOVERABLE
] = TARGET_ENOTRECOVERABLE
,
429 static inline int host_to_target_errno(int err
)
431 if(host_to_target_errno_table
[err
])
432 return host_to_target_errno_table
[err
];
436 static inline int target_to_host_errno(int err
)
438 if (target_to_host_errno_table
[err
])
439 return target_to_host_errno_table
[err
];
443 static inline abi_long
get_errno(abi_long ret
)
446 return -host_to_target_errno(errno
);
451 static inline int is_error(abi_long ret
)
453 return (abi_ulong
)ret
>= (abi_ulong
)(-4096);
456 char *target_strerror(int err
)
458 return strerror(target_to_host_errno(err
));
461 static abi_ulong target_brk
;
462 static abi_ulong target_original_brk
;
464 void target_set_brk(abi_ulong new_brk
)
466 target_original_brk
= target_brk
= HOST_PAGE_ALIGN(new_brk
);
469 /* do_brk() must return target values and target errnos. */
470 abi_long
do_brk(abi_ulong new_brk
)
473 abi_long mapped_addr
;
478 if (new_brk
< target_original_brk
)
481 brk_page
= HOST_PAGE_ALIGN(target_brk
);
483 /* If the new brk is less than this, set it and we're done... */
484 if (new_brk
< brk_page
) {
485 target_brk
= new_brk
;
489 /* We need to allocate more memory after the brk... */
490 new_alloc_size
= HOST_PAGE_ALIGN(new_brk
- brk_page
+ 1);
491 mapped_addr
= get_errno(target_mmap(brk_page
, new_alloc_size
,
492 PROT_READ
|PROT_WRITE
,
493 MAP_ANON
|MAP_FIXED
|MAP_PRIVATE
, 0, 0));
495 if (!is_error(mapped_addr
))
496 target_brk
= new_brk
;
501 static inline abi_long
copy_from_user_fdset(fd_set
*fds
,
502 abi_ulong target_fds_addr
,
506 abi_ulong b
, *target_fds
;
508 nw
= (n
+ TARGET_ABI_BITS
- 1) / TARGET_ABI_BITS
;
509 if (!(target_fds
= lock_user(VERIFY_READ
,
511 sizeof(abi_ulong
) * nw
,
513 return -TARGET_EFAULT
;
517 for (i
= 0; i
< nw
; i
++) {
518 /* grab the abi_ulong */
519 __get_user(b
, &target_fds
[i
]);
520 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
521 /* check the bit inside the abi_ulong */
528 unlock_user(target_fds
, target_fds_addr
, 0);
533 static inline abi_long
copy_to_user_fdset(abi_ulong target_fds_addr
,
539 abi_ulong
*target_fds
;
541 nw
= (n
+ TARGET_ABI_BITS
- 1) / TARGET_ABI_BITS
;
542 if (!(target_fds
= lock_user(VERIFY_WRITE
,
544 sizeof(abi_ulong
) * nw
,
546 return -TARGET_EFAULT
;
549 for (i
= 0; i
< nw
; i
++) {
551 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
552 v
|= ((FD_ISSET(k
, fds
) != 0) << j
);
555 __put_user(v
, &target_fds
[i
]);
558 unlock_user(target_fds
, target_fds_addr
, sizeof(abi_ulong
) * nw
);
563 #if defined(__alpha__)
569 static inline abi_long
host_to_target_clock_t(long ticks
)
571 #if HOST_HZ == TARGET_HZ
574 return ((int64_t)ticks
* TARGET_HZ
) / HOST_HZ
;
578 static inline abi_long
host_to_target_rusage(abi_ulong target_addr
,
579 const struct rusage
*rusage
)
581 struct target_rusage
*target_rusage
;
583 if (!lock_user_struct(VERIFY_WRITE
, target_rusage
, target_addr
, 0))
584 return -TARGET_EFAULT
;
585 target_rusage
->ru_utime
.tv_sec
= tswapl(rusage
->ru_utime
.tv_sec
);
586 target_rusage
->ru_utime
.tv_usec
= tswapl(rusage
->ru_utime
.tv_usec
);
587 target_rusage
->ru_stime
.tv_sec
= tswapl(rusage
->ru_stime
.tv_sec
);
588 target_rusage
->ru_stime
.tv_usec
= tswapl(rusage
->ru_stime
.tv_usec
);
589 target_rusage
->ru_maxrss
= tswapl(rusage
->ru_maxrss
);
590 target_rusage
->ru_ixrss
= tswapl(rusage
->ru_ixrss
);
591 target_rusage
->ru_idrss
= tswapl(rusage
->ru_idrss
);
592 target_rusage
->ru_isrss
= tswapl(rusage
->ru_isrss
);
593 target_rusage
->ru_minflt
= tswapl(rusage
->ru_minflt
);
594 target_rusage
->ru_majflt
= tswapl(rusage
->ru_majflt
);
595 target_rusage
->ru_nswap
= tswapl(rusage
->ru_nswap
);
596 target_rusage
->ru_inblock
= tswapl(rusage
->ru_inblock
);
597 target_rusage
->ru_oublock
= tswapl(rusage
->ru_oublock
);
598 target_rusage
->ru_msgsnd
= tswapl(rusage
->ru_msgsnd
);
599 target_rusage
->ru_msgrcv
= tswapl(rusage
->ru_msgrcv
);
600 target_rusage
->ru_nsignals
= tswapl(rusage
->ru_nsignals
);
601 target_rusage
->ru_nvcsw
= tswapl(rusage
->ru_nvcsw
);
602 target_rusage
->ru_nivcsw
= tswapl(rusage
->ru_nivcsw
);
603 unlock_user_struct(target_rusage
, target_addr
, 1);
608 static inline abi_long
copy_from_user_timeval(struct timeval
*tv
,
609 abi_ulong target_tv_addr
)
611 struct target_timeval
*target_tv
;
613 if (!lock_user_struct(VERIFY_READ
, target_tv
, target_tv_addr
, 1))
614 return -TARGET_EFAULT
;
616 __get_user(tv
->tv_sec
, &target_tv
->tv_sec
);
617 __get_user(tv
->tv_usec
, &target_tv
->tv_usec
);
619 unlock_user_struct(target_tv
, target_tv_addr
, 0);
624 static inline abi_long
copy_to_user_timeval(abi_ulong target_tv_addr
,
625 const struct timeval
*tv
)
627 struct target_timeval
*target_tv
;
629 if (!lock_user_struct(VERIFY_WRITE
, target_tv
, target_tv_addr
, 0))
630 return -TARGET_EFAULT
;
632 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
633 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
635 unlock_user_struct(target_tv
, target_tv_addr
, 1);
640 static inline abi_long
copy_from_user_mq_attr(struct mq_attr
*attr
,
641 abi_ulong target_mq_attr_addr
)
643 struct target_mq_attr
*target_mq_attr
;
645 if (!lock_user_struct(VERIFY_READ
, target_mq_attr
,
646 target_mq_attr_addr
, 1))
647 return -TARGET_EFAULT
;
649 __get_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
650 __get_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
651 __get_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
652 __get_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
654 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 0);
659 static inline abi_long
copy_to_user_mq_attr(abi_ulong target_mq_attr_addr
,
660 const struct mq_attr
*attr
)
662 struct target_mq_attr
*target_mq_attr
;
664 if (!lock_user_struct(VERIFY_WRITE
, target_mq_attr
,
665 target_mq_attr_addr
, 0))
666 return -TARGET_EFAULT
;
668 __put_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
669 __put_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
670 __put_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
671 __put_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
673 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 1);
678 /* do_select() must return target values and target errnos. */
679 static abi_long
do_select(int n
,
680 abi_ulong rfd_addr
, abi_ulong wfd_addr
,
681 abi_ulong efd_addr
, abi_ulong target_tv_addr
)
683 fd_set rfds
, wfds
, efds
;
684 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
685 struct timeval tv
, *tv_ptr
;
689 if (copy_from_user_fdset(&rfds
, rfd_addr
, n
))
690 return -TARGET_EFAULT
;
696 if (copy_from_user_fdset(&wfds
, wfd_addr
, n
))
697 return -TARGET_EFAULT
;
703 if (copy_from_user_fdset(&efds
, efd_addr
, n
))
704 return -TARGET_EFAULT
;
710 if (target_tv_addr
) {
711 if (copy_from_user_timeval(&tv
, target_tv_addr
))
712 return -TARGET_EFAULT
;
718 ret
= get_errno(select(n
, rfds_ptr
, wfds_ptr
, efds_ptr
, tv_ptr
));
720 if (!is_error(ret
)) {
721 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
722 return -TARGET_EFAULT
;
723 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
724 return -TARGET_EFAULT
;
725 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
726 return -TARGET_EFAULT
;
728 if (target_tv_addr
&& copy_to_user_timeval(target_tv_addr
, &tv
))
729 return -TARGET_EFAULT
;
735 static inline abi_long
target_to_host_sockaddr(struct sockaddr
*addr
,
736 abi_ulong target_addr
,
739 const socklen_t unix_maxlen
= sizeof (struct sockaddr_un
);
740 sa_family_t sa_family
;
741 struct target_sockaddr
*target_saddr
;
743 target_saddr
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
745 return -TARGET_EFAULT
;
747 sa_family
= tswap16(target_saddr
->sa_family
);
749 /* Oops. The caller might send a incomplete sun_path; sun_path
750 * must be terminated by \0 (see the manual page), but
751 * unfortunately it is quite common to specify sockaddr_un
752 * length as "strlen(x->sun_path)" while it should be
753 * "strlen(...) + 1". We'll fix that here if needed.
754 * Linux kernel has a similar feature.
757 if (sa_family
== AF_UNIX
) {
758 if (len
< unix_maxlen
&& len
> 0) {
759 char *cp
= (char*)target_saddr
;
761 if ( cp
[len
-1] && !cp
[len
] )
764 if (len
> unix_maxlen
)
768 memcpy(addr
, target_saddr
, len
);
769 addr
->sa_family
= sa_family
;
770 unlock_user(target_saddr
, target_addr
, 0);
775 static inline abi_long
host_to_target_sockaddr(abi_ulong target_addr
,
776 struct sockaddr
*addr
,
779 struct target_sockaddr
*target_saddr
;
781 target_saddr
= lock_user(VERIFY_WRITE
, target_addr
, len
, 0);
783 return -TARGET_EFAULT
;
784 memcpy(target_saddr
, addr
, len
);
785 target_saddr
->sa_family
= tswap16(addr
->sa_family
);
786 unlock_user(target_saddr
, target_addr
, len
);
791 /* ??? Should this also swap msgh->name? */
792 static inline abi_long
target_to_host_cmsg(struct msghdr
*msgh
,
793 struct target_msghdr
*target_msgh
)
795 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
796 abi_long msg_controllen
;
797 abi_ulong target_cmsg_addr
;
798 struct target_cmsghdr
*target_cmsg
;
801 msg_controllen
= tswapl(target_msgh
->msg_controllen
);
802 if (msg_controllen
< sizeof (struct target_cmsghdr
))
804 target_cmsg_addr
= tswapl(target_msgh
->msg_control
);
805 target_cmsg
= lock_user(VERIFY_READ
, target_cmsg_addr
, msg_controllen
, 1);
807 return -TARGET_EFAULT
;
809 while (cmsg
&& target_cmsg
) {
810 void *data
= CMSG_DATA(cmsg
);
811 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
813 int len
= tswapl(target_cmsg
->cmsg_len
)
814 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr
));
816 space
+= CMSG_SPACE(len
);
817 if (space
> msgh
->msg_controllen
) {
818 space
-= CMSG_SPACE(len
);
819 gemu_log("Host cmsg overflow\n");
823 cmsg
->cmsg_level
= tswap32(target_cmsg
->cmsg_level
);
824 cmsg
->cmsg_type
= tswap32(target_cmsg
->cmsg_type
);
825 cmsg
->cmsg_len
= CMSG_LEN(len
);
827 if (cmsg
->cmsg_level
!= TARGET_SOL_SOCKET
|| cmsg
->cmsg_type
!= SCM_RIGHTS
) {
828 gemu_log("Unsupported ancillary data: %d/%d\n", cmsg
->cmsg_level
, cmsg
->cmsg_type
);
829 memcpy(data
, target_data
, len
);
831 int *fd
= (int *)data
;
832 int *target_fd
= (int *)target_data
;
833 int i
, numfds
= len
/ sizeof(int);
835 for (i
= 0; i
< numfds
; i
++)
836 fd
[i
] = tswap32(target_fd
[i
]);
839 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
840 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
);
842 unlock_user(target_cmsg
, target_cmsg_addr
, 0);
844 msgh
->msg_controllen
= space
;
848 /* ??? Should this also swap msgh->name? */
849 static inline abi_long
host_to_target_cmsg(struct target_msghdr
*target_msgh
,
852 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
853 abi_long msg_controllen
;
854 abi_ulong target_cmsg_addr
;
855 struct target_cmsghdr
*target_cmsg
;
858 msg_controllen
= tswapl(target_msgh
->msg_controllen
);
859 if (msg_controllen
< sizeof (struct target_cmsghdr
))
861 target_cmsg_addr
= tswapl(target_msgh
->msg_control
);
862 target_cmsg
= lock_user(VERIFY_WRITE
, target_cmsg_addr
, msg_controllen
, 0);
864 return -TARGET_EFAULT
;
866 while (cmsg
&& target_cmsg
) {
867 void *data
= CMSG_DATA(cmsg
);
868 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
870 int len
= cmsg
->cmsg_len
- CMSG_ALIGN(sizeof (struct cmsghdr
));
872 space
+= TARGET_CMSG_SPACE(len
);
873 if (space
> msg_controllen
) {
874 space
-= TARGET_CMSG_SPACE(len
);
875 gemu_log("Target cmsg overflow\n");
879 target_cmsg
->cmsg_level
= tswap32(cmsg
->cmsg_level
);
880 target_cmsg
->cmsg_type
= tswap32(cmsg
->cmsg_type
);
881 target_cmsg
->cmsg_len
= tswapl(TARGET_CMSG_LEN(len
));
883 if (cmsg
->cmsg_level
!= TARGET_SOL_SOCKET
|| cmsg
->cmsg_type
!= SCM_RIGHTS
) {
884 gemu_log("Unsupported ancillary data: %d/%d\n", cmsg
->cmsg_level
, cmsg
->cmsg_type
);
885 memcpy(target_data
, data
, len
);
887 int *fd
= (int *)data
;
888 int *target_fd
= (int *)target_data
;
889 int i
, numfds
= len
/ sizeof(int);
891 for (i
= 0; i
< numfds
; i
++)
892 target_fd
[i
] = tswap32(fd
[i
]);
895 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
896 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
);
898 unlock_user(target_cmsg
, target_cmsg_addr
, space
);
900 target_msgh
->msg_controllen
= tswapl(space
);
904 /* do_setsockopt() Must return target values and target errnos. */
905 static abi_long
do_setsockopt(int sockfd
, int level
, int optname
,
906 abi_ulong optval_addr
, socklen_t optlen
)
913 /* TCP options all take an 'int' value. */
914 if (optlen
< sizeof(uint32_t))
915 return -TARGET_EINVAL
;
917 if (get_user_u32(val
, optval_addr
))
918 return -TARGET_EFAULT
;
919 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
926 case IP_ROUTER_ALERT
:
930 case IP_MTU_DISCOVER
:
936 case IP_MULTICAST_TTL
:
937 case IP_MULTICAST_LOOP
:
939 if (optlen
>= sizeof(uint32_t)) {
940 if (get_user_u32(val
, optval_addr
))
941 return -TARGET_EFAULT
;
942 } else if (optlen
>= 1) {
943 if (get_user_u8(val
, optval_addr
))
944 return -TARGET_EFAULT
;
946 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
952 case TARGET_SOL_SOCKET
:
954 /* Options with 'int' argument. */
955 case TARGET_SO_DEBUG
:
958 case TARGET_SO_REUSEADDR
:
959 optname
= SO_REUSEADDR
;
964 case TARGET_SO_ERROR
:
967 case TARGET_SO_DONTROUTE
:
968 optname
= SO_DONTROUTE
;
970 case TARGET_SO_BROADCAST
:
971 optname
= SO_BROADCAST
;
973 case TARGET_SO_SNDBUF
:
976 case TARGET_SO_RCVBUF
:
979 case TARGET_SO_KEEPALIVE
:
980 optname
= SO_KEEPALIVE
;
982 case TARGET_SO_OOBINLINE
:
983 optname
= SO_OOBINLINE
;
985 case TARGET_SO_NO_CHECK
:
986 optname
= SO_NO_CHECK
;
988 case TARGET_SO_PRIORITY
:
989 optname
= SO_PRIORITY
;
992 case TARGET_SO_BSDCOMPAT
:
993 optname
= SO_BSDCOMPAT
;
996 case TARGET_SO_PASSCRED
:
997 optname
= SO_PASSCRED
;
999 case TARGET_SO_TIMESTAMP
:
1000 optname
= SO_TIMESTAMP
;
1002 case TARGET_SO_RCVLOWAT
:
1003 optname
= SO_RCVLOWAT
;
1005 case TARGET_SO_RCVTIMEO
:
1006 optname
= SO_RCVTIMEO
;
1008 case TARGET_SO_SNDTIMEO
:
1009 optname
= SO_SNDTIMEO
;
1015 if (optlen
< sizeof(uint32_t))
1016 return -TARGET_EINVAL
;
1018 if (get_user_u32(val
, optval_addr
))
1019 return -TARGET_EFAULT
;
1020 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
, &val
, sizeof(val
)));
1024 gemu_log("Unsupported setsockopt level=%d optname=%d \n", level
, optname
);
1025 ret
= -TARGET_ENOPROTOOPT
;
1030 /* do_getsockopt() Must return target values and target errnos. */
1031 static abi_long
do_getsockopt(int sockfd
, int level
, int optname
,
1032 abi_ulong optval_addr
, abi_ulong optlen
)
1039 case TARGET_SOL_SOCKET
:
1042 case TARGET_SO_LINGER
:
1043 case TARGET_SO_RCVTIMEO
:
1044 case TARGET_SO_SNDTIMEO
:
1045 case TARGET_SO_PEERCRED
:
1046 case TARGET_SO_PEERNAME
:
1047 /* These don't just return a single integer */
1054 /* TCP options all take an 'int' value. */
1056 if (get_user_u32(len
, optlen
))
1057 return -TARGET_EFAULT
;
1059 return -TARGET_EINVAL
;
1061 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
1068 if (put_user_u32(val
, optval_addr
))
1069 return -TARGET_EFAULT
;
1071 if (put_user_u8(val
, optval_addr
))
1072 return -TARGET_EFAULT
;
1074 if (put_user_u32(len
, optlen
))
1075 return -TARGET_EFAULT
;
1082 case IP_ROUTER_ALERT
:
1086 case IP_MTU_DISCOVER
:
1092 case IP_MULTICAST_TTL
:
1093 case IP_MULTICAST_LOOP
:
1094 if (get_user_u32(len
, optlen
))
1095 return -TARGET_EFAULT
;
1097 return -TARGET_EINVAL
;
1099 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
1102 if (len
< sizeof(int) && len
> 0 && val
>= 0 && val
< 255) {
1104 if (put_user_u32(len
, optlen
)
1105 || put_user_u8(val
, optval_addr
))
1106 return -TARGET_EFAULT
;
1108 if (len
> sizeof(int))
1110 if (put_user_u32(len
, optlen
)
1111 || put_user_u32(val
, optval_addr
))
1112 return -TARGET_EFAULT
;
1116 ret
= -TARGET_ENOPROTOOPT
;
1122 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
1124 ret
= -TARGET_EOPNOTSUPP
;
1131 * lock_iovec()/unlock_iovec() have a return code of 0 for success where
1132 * other lock functions have a return code of 0 for failure.
1134 static abi_long
lock_iovec(int type
, struct iovec
*vec
, abi_ulong target_addr
,
1135 int count
, int copy
)
1137 struct target_iovec
*target_vec
;
1141 target_vec
= lock_user(VERIFY_READ
, target_addr
, count
* sizeof(struct target_iovec
), 1);
1143 return -TARGET_EFAULT
;
1144 for(i
= 0;i
< count
; i
++) {
1145 base
= tswapl(target_vec
[i
].iov_base
);
1146 vec
[i
].iov_len
= tswapl(target_vec
[i
].iov_len
);
1147 if (vec
[i
].iov_len
!= 0) {
1148 vec
[i
].iov_base
= lock_user(type
, base
, vec
[i
].iov_len
, copy
);
1149 /* Don't check lock_user return value. We must call writev even
1150 if a element has invalid base address. */
1152 /* zero length pointer is ignored */
1153 vec
[i
].iov_base
= NULL
;
1156 unlock_user (target_vec
, target_addr
, 0);
1160 static abi_long
unlock_iovec(struct iovec
*vec
, abi_ulong target_addr
,
1161 int count
, int copy
)
1163 struct target_iovec
*target_vec
;
1167 target_vec
= lock_user(VERIFY_READ
, target_addr
, count
* sizeof(struct target_iovec
), 1);
1169 return -TARGET_EFAULT
;
1170 for(i
= 0;i
< count
; i
++) {
1171 if (target_vec
[i
].iov_base
) {
1172 base
= tswapl(target_vec
[i
].iov_base
);
1173 unlock_user(vec
[i
].iov_base
, base
, copy
? vec
[i
].iov_len
: 0);
1176 unlock_user (target_vec
, target_addr
, 0);
1181 /* do_socket() Must return target values and target errnos. */
1182 static abi_long
do_socket(int domain
, int type
, int protocol
)
1184 #if defined(TARGET_MIPS)
1186 case TARGET_SOCK_DGRAM
:
1189 case TARGET_SOCK_STREAM
:
1192 case TARGET_SOCK_RAW
:
1195 case TARGET_SOCK_RDM
:
1198 case TARGET_SOCK_SEQPACKET
:
1199 type
= SOCK_SEQPACKET
;
1201 case TARGET_SOCK_PACKET
:
1206 if (domain
== PF_NETLINK
)
1207 return -EAFNOSUPPORT
; /* do not NETLINK socket connections possible */
1208 return get_errno(socket(domain
, type
, protocol
));
1211 /* MAX_SOCK_ADDR from linux/net/socket.c */
1212 #define MAX_SOCK_ADDR 128
1214 /* do_bind() Must return target values and target errnos. */
1215 static abi_long
do_bind(int sockfd
, abi_ulong target_addr
,
1220 if (addrlen
< 0 || addrlen
> MAX_SOCK_ADDR
)
1221 return -TARGET_EINVAL
;
1223 addr
= alloca(addrlen
+1);
1225 target_to_host_sockaddr(addr
, target_addr
, addrlen
);
1226 return get_errno(bind(sockfd
, addr
, addrlen
));
1229 /* do_connect() Must return target values and target errnos. */
1230 static abi_long
do_connect(int sockfd
, abi_ulong target_addr
,
1235 if (addrlen
< 0 || addrlen
> MAX_SOCK_ADDR
)
1236 return -TARGET_EINVAL
;
1238 addr
= alloca(addrlen
);
1240 target_to_host_sockaddr(addr
, target_addr
, addrlen
);
1241 return get_errno(connect(sockfd
, addr
, addrlen
));
1244 /* do_sendrecvmsg() Must return target values and target errnos. */
1245 static abi_long
do_sendrecvmsg(int fd
, abi_ulong target_msg
,
1246 int flags
, int send
)
1249 struct target_msghdr
*msgp
;
1253 abi_ulong target_vec
;
1256 if (!lock_user_struct(send
? VERIFY_READ
: VERIFY_WRITE
,
1260 return -TARGET_EFAULT
;
1261 if (msgp
->msg_name
) {
1262 msg
.msg_namelen
= tswap32(msgp
->msg_namelen
);
1263 msg
.msg_name
= alloca(msg
.msg_namelen
);
1264 target_to_host_sockaddr(msg
.msg_name
, tswapl(msgp
->msg_name
),
1267 msg
.msg_name
= NULL
;
1268 msg
.msg_namelen
= 0;
1270 msg
.msg_controllen
= 2 * tswapl(msgp
->msg_controllen
);
1271 msg
.msg_control
= alloca(msg
.msg_controllen
);
1272 msg
.msg_flags
= tswap32(msgp
->msg_flags
);
1274 count
= tswapl(msgp
->msg_iovlen
);
1275 vec
= alloca(count
* sizeof(struct iovec
));
1276 target_vec
= tswapl(msgp
->msg_iov
);
1277 lock_iovec(send
? VERIFY_READ
: VERIFY_WRITE
, vec
, target_vec
, count
, send
);
1278 msg
.msg_iovlen
= count
;
1282 ret
= target_to_host_cmsg(&msg
, msgp
);
1284 ret
= get_errno(sendmsg(fd
, &msg
, flags
));
1286 ret
= get_errno(recvmsg(fd
, &msg
, flags
));
1287 if (!is_error(ret
)) {
1289 ret
= host_to_target_cmsg(msgp
, &msg
);
1294 unlock_iovec(vec
, target_vec
, count
, !send
);
1295 unlock_user_struct(msgp
, target_msg
, send
? 0 : 1);
1299 /* do_accept() Must return target values and target errnos. */
1300 static abi_long
do_accept(int fd
, abi_ulong target_addr
,
1301 abi_ulong target_addrlen_addr
)
1307 if (get_user_u32(addrlen
, target_addrlen_addr
))
1308 return -TARGET_EFAULT
;
1310 if (addrlen
< 0 || addrlen
> MAX_SOCK_ADDR
)
1311 return -TARGET_EINVAL
;
1313 addr
= alloca(addrlen
);
1315 ret
= get_errno(accept(fd
, addr
, &addrlen
));
1316 if (!is_error(ret
)) {
1317 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
1318 if (put_user_u32(addrlen
, target_addrlen_addr
))
1319 ret
= -TARGET_EFAULT
;
1324 /* do_getpeername() Must return target values and target errnos. */
1325 static abi_long
do_getpeername(int fd
, abi_ulong target_addr
,
1326 abi_ulong target_addrlen_addr
)
1332 if (get_user_u32(addrlen
, target_addrlen_addr
))
1333 return -TARGET_EFAULT
;
1335 if (addrlen
< 0 || addrlen
> MAX_SOCK_ADDR
)
1336 return -TARGET_EINVAL
;
1338 addr
= alloca(addrlen
);
1340 ret
= get_errno(getpeername(fd
, addr
, &addrlen
));
1341 if (!is_error(ret
)) {
1342 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
1343 if (put_user_u32(addrlen
, target_addrlen_addr
))
1344 ret
= -TARGET_EFAULT
;
1349 /* do_getsockname() Must return target values and target errnos. */
1350 static abi_long
do_getsockname(int fd
, abi_ulong target_addr
,
1351 abi_ulong target_addrlen_addr
)
1357 if (target_addr
== 0)
1358 return get_errno(accept(fd
, NULL
, NULL
));
1360 if (get_user_u32(addrlen
, target_addrlen_addr
))
1361 return -TARGET_EFAULT
;
1363 if (addrlen
< 0 || addrlen
> MAX_SOCK_ADDR
)
1364 return -TARGET_EINVAL
;
1366 addr
= alloca(addrlen
);
1368 ret
= get_errno(getsockname(fd
, addr
, &addrlen
));
1369 if (!is_error(ret
)) {
1370 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
1371 if (put_user_u32(addrlen
, target_addrlen_addr
))
1372 ret
= -TARGET_EFAULT
;
1377 /* do_socketpair() Must return target values and target errnos. */
1378 static abi_long
do_socketpair(int domain
, int type
, int protocol
,
1379 abi_ulong target_tab_addr
)
1384 ret
= get_errno(socketpair(domain
, type
, protocol
, tab
));
1385 if (!is_error(ret
)) {
1386 if (put_user_s32(tab
[0], target_tab_addr
)
1387 || put_user_s32(tab
[1], target_tab_addr
+ sizeof(tab
[0])))
1388 ret
= -TARGET_EFAULT
;
1393 /* do_sendto() Must return target values and target errnos. */
1394 static abi_long
do_sendto(int fd
, abi_ulong msg
, size_t len
, int flags
,
1395 abi_ulong target_addr
, socklen_t addrlen
)
1401 if (addrlen
< 0 || addrlen
> MAX_SOCK_ADDR
)
1402 return -TARGET_EINVAL
;
1404 host_msg
= lock_user(VERIFY_READ
, msg
, len
, 1);
1406 return -TARGET_EFAULT
;
1408 addr
= alloca(addrlen
);
1409 target_to_host_sockaddr(addr
, target_addr
, addrlen
);
1410 ret
= get_errno(sendto(fd
, host_msg
, len
, flags
, addr
, addrlen
));
1412 ret
= get_errno(send(fd
, host_msg
, len
, flags
));
1414 unlock_user(host_msg
, msg
, 0);
1418 /* do_recvfrom() Must return target values and target errnos. */
1419 static abi_long
do_recvfrom(int fd
, abi_ulong msg
, size_t len
, int flags
,
1420 abi_ulong target_addr
,
1421 abi_ulong target_addrlen
)
1428 host_msg
= lock_user(VERIFY_WRITE
, msg
, len
, 0);
1430 return -TARGET_EFAULT
;
1432 if (get_user_u32(addrlen
, target_addrlen
)) {
1433 ret
= -TARGET_EFAULT
;
1436 if (addrlen
< 0 || addrlen
> MAX_SOCK_ADDR
) {
1437 ret
= -TARGET_EINVAL
;
1440 addr
= alloca(addrlen
);
1441 ret
= get_errno(recvfrom(fd
, host_msg
, len
, flags
, addr
, &addrlen
));
1443 addr
= NULL
; /* To keep compiler quiet. */
1444 ret
= get_errno(recv(fd
, host_msg
, len
, flags
));
1446 if (!is_error(ret
)) {
1448 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
1449 if (put_user_u32(addrlen
, target_addrlen
)) {
1450 ret
= -TARGET_EFAULT
;
1454 unlock_user(host_msg
, msg
, len
);
1457 unlock_user(host_msg
, msg
, 0);
1462 #ifdef TARGET_NR_socketcall
1463 /* do_socketcall() Must return target values and target errnos. */
1464 static abi_long
do_socketcall(int num
, abi_ulong vptr
)
1467 const int n
= sizeof(abi_ulong
);
1472 int domain
, type
, protocol
;
1474 if (get_user_s32(domain
, vptr
)
1475 || get_user_s32(type
, vptr
+ n
)
1476 || get_user_s32(protocol
, vptr
+ 2 * n
))
1477 return -TARGET_EFAULT
;
1479 ret
= do_socket(domain
, type
, protocol
);
1485 abi_ulong target_addr
;
1488 if (get_user_s32(sockfd
, vptr
)
1489 || get_user_ual(target_addr
, vptr
+ n
)
1490 || get_user_u32(addrlen
, vptr
+ 2 * n
))
1491 return -TARGET_EFAULT
;
1493 ret
= do_bind(sockfd
, target_addr
, addrlen
);
1496 case SOCKOP_connect
:
1499 abi_ulong target_addr
;
1502 if (get_user_s32(sockfd
, vptr
)
1503 || get_user_ual(target_addr
, vptr
+ n
)
1504 || get_user_u32(addrlen
, vptr
+ 2 * n
))
1505 return -TARGET_EFAULT
;
1507 ret
= do_connect(sockfd
, target_addr
, addrlen
);
1512 int sockfd
, backlog
;
1514 if (get_user_s32(sockfd
, vptr
)
1515 || get_user_s32(backlog
, vptr
+ n
))
1516 return -TARGET_EFAULT
;
1518 ret
= get_errno(listen(sockfd
, backlog
));
1524 abi_ulong target_addr
, target_addrlen
;
1526 if (get_user_s32(sockfd
, vptr
)
1527 || get_user_ual(target_addr
, vptr
+ n
)
1528 || get_user_u32(target_addrlen
, vptr
+ 2 * n
))
1529 return -TARGET_EFAULT
;
1531 ret
= do_accept(sockfd
, target_addr
, target_addrlen
);
1534 case SOCKOP_getsockname
:
1537 abi_ulong target_addr
, target_addrlen
;
1539 if (get_user_s32(sockfd
, vptr
)
1540 || get_user_ual(target_addr
, vptr
+ n
)
1541 || get_user_u32(target_addrlen
, vptr
+ 2 * n
))
1542 return -TARGET_EFAULT
;
1544 ret
= do_getsockname(sockfd
, target_addr
, target_addrlen
);
1547 case SOCKOP_getpeername
:
1550 abi_ulong target_addr
, target_addrlen
;
1552 if (get_user_s32(sockfd
, vptr
)
1553 || get_user_ual(target_addr
, vptr
+ n
)
1554 || get_user_u32(target_addrlen
, vptr
+ 2 * n
))
1555 return -TARGET_EFAULT
;
1557 ret
= do_getpeername(sockfd
, target_addr
, target_addrlen
);
1560 case SOCKOP_socketpair
:
1562 int domain
, type
, protocol
;
1565 if (get_user_s32(domain
, vptr
)
1566 || get_user_s32(type
, vptr
+ n
)
1567 || get_user_s32(protocol
, vptr
+ 2 * n
)
1568 || get_user_ual(tab
, vptr
+ 3 * n
))
1569 return -TARGET_EFAULT
;
1571 ret
= do_socketpair(domain
, type
, protocol
, tab
);
1581 if (get_user_s32(sockfd
, vptr
)
1582 || get_user_ual(msg
, vptr
+ n
)
1583 || get_user_ual(len
, vptr
+ 2 * n
)
1584 || get_user_s32(flags
, vptr
+ 3 * n
))
1585 return -TARGET_EFAULT
;
1587 ret
= do_sendto(sockfd
, msg
, len
, flags
, 0, 0);
1597 if (get_user_s32(sockfd
, vptr
)
1598 || get_user_ual(msg
, vptr
+ n
)
1599 || get_user_ual(len
, vptr
+ 2 * n
)
1600 || get_user_s32(flags
, vptr
+ 3 * n
))
1601 return -TARGET_EFAULT
;
1603 ret
= do_recvfrom(sockfd
, msg
, len
, flags
, 0, 0);
1615 if (get_user_s32(sockfd
, vptr
)
1616 || get_user_ual(msg
, vptr
+ n
)
1617 || get_user_ual(len
, vptr
+ 2 * n
)
1618 || get_user_s32(flags
, vptr
+ 3 * n
)
1619 || get_user_ual(addr
, vptr
+ 4 * n
)
1620 || get_user_u32(addrlen
, vptr
+ 5 * n
))
1621 return -TARGET_EFAULT
;
1623 ret
= do_sendto(sockfd
, msg
, len
, flags
, addr
, addrlen
);
1626 case SOCKOP_recvfrom
:
1635 if (get_user_s32(sockfd
, vptr
)
1636 || get_user_ual(msg
, vptr
+ n
)
1637 || get_user_ual(len
, vptr
+ 2 * n
)
1638 || get_user_s32(flags
, vptr
+ 3 * n
)
1639 || get_user_ual(addr
, vptr
+ 4 * n
)
1640 || get_user_u32(addrlen
, vptr
+ 5 * n
))
1641 return -TARGET_EFAULT
;
1643 ret
= do_recvfrom(sockfd
, msg
, len
, flags
, addr
, addrlen
);
1646 case SOCKOP_shutdown
:
1650 if (get_user_s32(sockfd
, vptr
)
1651 || get_user_s32(how
, vptr
+ n
))
1652 return -TARGET_EFAULT
;
1654 ret
= get_errno(shutdown(sockfd
, how
));
1657 case SOCKOP_sendmsg
:
1658 case SOCKOP_recvmsg
:
1661 abi_ulong target_msg
;
1664 if (get_user_s32(fd
, vptr
)
1665 || get_user_ual(target_msg
, vptr
+ n
)
1666 || get_user_s32(flags
, vptr
+ 2 * n
))
1667 return -TARGET_EFAULT
;
1669 ret
= do_sendrecvmsg(fd
, target_msg
, flags
,
1670 (num
== SOCKOP_sendmsg
));
1673 case SOCKOP_setsockopt
:
1681 if (get_user_s32(sockfd
, vptr
)
1682 || get_user_s32(level
, vptr
+ n
)
1683 || get_user_s32(optname
, vptr
+ 2 * n
)
1684 || get_user_ual(optval
, vptr
+ 3 * n
)
1685 || get_user_u32(optlen
, vptr
+ 4 * n
))
1686 return -TARGET_EFAULT
;
1688 ret
= do_setsockopt(sockfd
, level
, optname
, optval
, optlen
);
1691 case SOCKOP_getsockopt
:
1699 if (get_user_s32(sockfd
, vptr
)
1700 || get_user_s32(level
, vptr
+ n
)
1701 || get_user_s32(optname
, vptr
+ 2 * n
)
1702 || get_user_ual(optval
, vptr
+ 3 * n
)
1703 || get_user_u32(optlen
, vptr
+ 4 * n
))
1704 return -TARGET_EFAULT
;
1706 ret
= do_getsockopt(sockfd
, level
, optname
, optval
, optlen
);
1710 gemu_log("Unsupported socketcall: %d\n", num
);
1711 ret
= -TARGET_ENOSYS
;
1718 #ifdef TARGET_NR_ipc
1719 #define N_SHM_REGIONS 32
1721 static struct shm_region
{
1724 } shm_regions
[N_SHM_REGIONS
];
1727 struct target_ipc_perm
1734 unsigned short int mode
;
1735 unsigned short int __pad1
;
1736 unsigned short int __seq
;
1737 unsigned short int __pad2
;
1738 abi_ulong __unused1
;
1739 abi_ulong __unused2
;
1742 struct target_semid_ds
1744 struct target_ipc_perm sem_perm
;
1745 abi_ulong sem_otime
;
1746 abi_ulong __unused1
;
1747 abi_ulong sem_ctime
;
1748 abi_ulong __unused2
;
1749 abi_ulong sem_nsems
;
1750 abi_ulong __unused3
;
1751 abi_ulong __unused4
;
1754 static inline abi_long
target_to_host_ipc_perm(struct ipc_perm
*host_ip
,
1755 abi_ulong target_addr
)
1757 struct target_ipc_perm
*target_ip
;
1758 struct target_semid_ds
*target_sd
;
1760 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
1761 return -TARGET_EFAULT
;
1762 target_ip
=&(target_sd
->sem_perm
);
1763 host_ip
->__key
= tswapl(target_ip
->__key
);
1764 host_ip
->uid
= tswapl(target_ip
->uid
);
1765 host_ip
->gid
= tswapl(target_ip
->gid
);
1766 host_ip
->cuid
= tswapl(target_ip
->cuid
);
1767 host_ip
->cgid
= tswapl(target_ip
->cgid
);
1768 host_ip
->mode
= tswapl(target_ip
->mode
);
1769 unlock_user_struct(target_sd
, target_addr
, 0);
1773 static inline abi_long
host_to_target_ipc_perm(abi_ulong target_addr
,
1774 struct ipc_perm
*host_ip
)
1776 struct target_ipc_perm
*target_ip
;
1777 struct target_semid_ds
*target_sd
;
1779 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
1780 return -TARGET_EFAULT
;
1781 target_ip
= &(target_sd
->sem_perm
);
1782 target_ip
->__key
= tswapl(host_ip
->__key
);
1783 target_ip
->uid
= tswapl(host_ip
->uid
);
1784 target_ip
->gid
= tswapl(host_ip
->gid
);
1785 target_ip
->cuid
= tswapl(host_ip
->cuid
);
1786 target_ip
->cgid
= tswapl(host_ip
->cgid
);
1787 target_ip
->mode
= tswapl(host_ip
->mode
);
1788 unlock_user_struct(target_sd
, target_addr
, 1);
1792 static inline abi_long
target_to_host_semid_ds(struct semid_ds
*host_sd
,
1793 abi_ulong target_addr
)
1795 struct target_semid_ds
*target_sd
;
1797 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
1798 return -TARGET_EFAULT
;
1799 target_to_host_ipc_perm(&(host_sd
->sem_perm
),target_addr
);
1800 host_sd
->sem_nsems
= tswapl(target_sd
->sem_nsems
);
1801 host_sd
->sem_otime
= tswapl(target_sd
->sem_otime
);
1802 host_sd
->sem_ctime
= tswapl(target_sd
->sem_ctime
);
1803 unlock_user_struct(target_sd
, target_addr
, 0);
1807 static inline abi_long
host_to_target_semid_ds(abi_ulong target_addr
,
1808 struct semid_ds
*host_sd
)
1810 struct target_semid_ds
*target_sd
;
1812 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
1813 return -TARGET_EFAULT
;
1814 host_to_target_ipc_perm(target_addr
,&(host_sd
->sem_perm
));
1815 target_sd
->sem_nsems
= tswapl(host_sd
->sem_nsems
);
1816 target_sd
->sem_otime
= tswapl(host_sd
->sem_otime
);
1817 target_sd
->sem_ctime
= tswapl(host_sd
->sem_ctime
);
1818 unlock_user_struct(target_sd
, target_addr
, 1);
1824 struct semid_ds
*buf
;
1825 unsigned short *array
;
1828 union target_semun
{
1831 unsigned short int *array
;
1834 static inline abi_long
target_to_host_semun(int cmd
,
1835 union semun
*host_su
,
1836 abi_ulong target_addr
,
1837 struct semid_ds
*ds
)
1839 union target_semun
*target_su
;
1844 if (!lock_user_struct(VERIFY_READ
, target_su
, target_addr
, 1))
1845 return -TARGET_EFAULT
;
1846 target_to_host_semid_ds(ds
,target_su
->buf
);
1848 unlock_user_struct(target_su
, target_addr
, 0);
1852 if (!lock_user_struct(VERIFY_READ
, target_su
, target_addr
, 1))
1853 return -TARGET_EFAULT
;
1854 host_su
->val
= tswapl(target_su
->val
);
1855 unlock_user_struct(target_su
, target_addr
, 0);
1859 if (!lock_user_struct(VERIFY_READ
, target_su
, target_addr
, 1))
1860 return -TARGET_EFAULT
;
1861 *host_su
->array
= tswap16(*target_su
->array
);
1862 unlock_user_struct(target_su
, target_addr
, 0);
1865 gemu_log("semun operation not fully supported: %d\n", (int)cmd
);
1870 static inline abi_long
host_to_target_semun(int cmd
,
1871 abi_ulong target_addr
,
1872 union semun
*host_su
,
1873 struct semid_ds
*ds
)
1875 union target_semun
*target_su
;
1880 if (lock_user_struct(VERIFY_WRITE
, target_su
, target_addr
, 0))
1881 return -TARGET_EFAULT
;
1882 host_to_target_semid_ds(target_su
->buf
,ds
);
1883 unlock_user_struct(target_su
, target_addr
, 1);
1887 if (lock_user_struct(VERIFY_WRITE
, target_su
, target_addr
, 0))
1888 return -TARGET_EFAULT
;
1889 target_su
->val
= tswapl(host_su
->val
);
1890 unlock_user_struct(target_su
, target_addr
, 1);
1894 if (lock_user_struct(VERIFY_WRITE
, target_su
, target_addr
, 0))
1895 return -TARGET_EFAULT
;
1896 *target_su
->array
= tswap16(*host_su
->array
);
1897 unlock_user_struct(target_su
, target_addr
, 1);
1900 gemu_log("semun operation not fully supported: %d\n", (int)cmd
);
1905 static inline abi_long
do_semctl(int first
, int second
, int third
,
1909 struct semid_ds dsarg
;
1910 int cmd
= third
&0xff;
1915 target_to_host_semun(cmd
,&arg
,ptr
,&dsarg
);
1916 ret
= get_errno(semctl(first
, second
, cmd
, arg
));
1917 host_to_target_semun(cmd
,ptr
,&arg
,&dsarg
);
1920 target_to_host_semun(cmd
,&arg
,ptr
,&dsarg
);
1921 ret
= get_errno(semctl(first
, second
, cmd
, arg
));
1922 host_to_target_semun(cmd
,ptr
,&arg
,&dsarg
);
1925 target_to_host_semun(cmd
,&arg
,ptr
,&dsarg
);
1926 ret
= get_errno(semctl(first
, second
, cmd
, arg
));
1927 host_to_target_semun(cmd
,ptr
,&arg
,&dsarg
);
1930 target_to_host_semun(cmd
,&arg
,ptr
,&dsarg
);
1931 ret
= get_errno(semctl(first
, second
, cmd
, arg
));
1932 host_to_target_semun(cmd
,ptr
,&arg
,&dsarg
);
1935 target_to_host_semun(cmd
,&arg
,ptr
,&dsarg
);
1936 ret
= get_errno(semctl(first
, second
, cmd
, arg
));
1937 host_to_target_semun(cmd
,ptr
,&arg
,&dsarg
);
1940 target_to_host_semun(cmd
,&arg
,ptr
,&dsarg
);
1941 ret
= get_errno(semctl(first
, second
, cmd
, arg
));
1942 host_to_target_semun(cmd
,ptr
,&arg
,&dsarg
);
1945 ret
= get_errno(semctl(first
, second
, cmd
, arg
));
1951 struct target_msqid_ds
1953 struct target_ipc_perm msg_perm
;
1954 abi_ulong msg_stime
;
1955 #if TARGET_ABI_BITS == 32
1956 abi_ulong __unused1
;
1958 abi_ulong msg_rtime
;
1959 #if TARGET_ABI_BITS == 32
1960 abi_ulong __unused2
;
1962 abi_ulong msg_ctime
;
1963 #if TARGET_ABI_BITS == 32
1964 abi_ulong __unused3
;
1966 abi_ulong __msg_cbytes
;
1968 abi_ulong msg_qbytes
;
1969 abi_ulong msg_lspid
;
1970 abi_ulong msg_lrpid
;
1971 abi_ulong __unused4
;
1972 abi_ulong __unused5
;
1975 static inline abi_long
target_to_host_msqid_ds(struct msqid_ds
*host_md
,
1976 abi_ulong target_addr
)
1978 struct target_msqid_ds
*target_md
;
1980 if (!lock_user_struct(VERIFY_READ
, target_md
, target_addr
, 1))
1981 return -TARGET_EFAULT
;
1982 if (target_to_host_ipc_perm(&(host_md
->msg_perm
),target_addr
))
1983 return -TARGET_EFAULT
;
1984 host_md
->msg_stime
= tswapl(target_md
->msg_stime
);
1985 host_md
->msg_rtime
= tswapl(target_md
->msg_rtime
);
1986 host_md
->msg_ctime
= tswapl(target_md
->msg_ctime
);
1987 host_md
->__msg_cbytes
= tswapl(target_md
->__msg_cbytes
);
1988 host_md
->msg_qnum
= tswapl(target_md
->msg_qnum
);
1989 host_md
->msg_qbytes
= tswapl(target_md
->msg_qbytes
);
1990 host_md
->msg_lspid
= tswapl(target_md
->msg_lspid
);
1991 host_md
->msg_lrpid
= tswapl(target_md
->msg_lrpid
);
1992 unlock_user_struct(target_md
, target_addr
, 0);
1996 static inline abi_long
host_to_target_msqid_ds(abi_ulong target_addr
,
1997 struct msqid_ds
*host_md
)
1999 struct target_msqid_ds
*target_md
;
2001 if (!lock_user_struct(VERIFY_WRITE
, target_md
, target_addr
, 0))
2002 return -TARGET_EFAULT
;
2003 if (host_to_target_ipc_perm(target_addr
,&(host_md
->msg_perm
)))
2004 return -TARGET_EFAULT
;
2005 target_md
->msg_stime
= tswapl(host_md
->msg_stime
);
2006 target_md
->msg_rtime
= tswapl(host_md
->msg_rtime
);
2007 target_md
->msg_ctime
= tswapl(host_md
->msg_ctime
);
2008 target_md
->__msg_cbytes
= tswapl(host_md
->__msg_cbytes
);
2009 target_md
->msg_qnum
= tswapl(host_md
->msg_qnum
);
2010 target_md
->msg_qbytes
= tswapl(host_md
->msg_qbytes
);
2011 target_md
->msg_lspid
= tswapl(host_md
->msg_lspid
);
2012 target_md
->msg_lrpid
= tswapl(host_md
->msg_lrpid
);
2013 unlock_user_struct(target_md
, target_addr
, 1);
2017 struct target_msginfo
{
2025 unsigned short int msgseg
;
2028 static inline abi_long
host_to_target_msginfo(abi_ulong target_addr
,
2029 struct msginfo
*host_msginfo
)
2031 struct target_msginfo
*target_msginfo
;
2032 if (!lock_user_struct(VERIFY_WRITE
, target_msginfo
, target_addr
, 0))
2033 return -TARGET_EFAULT
;
2034 __put_user(host_msginfo
->msgpool
, &target_msginfo
->msgpool
);
2035 __put_user(host_msginfo
->msgmap
, &target_msginfo
->msgmap
);
2036 __put_user(host_msginfo
->msgmax
, &target_msginfo
->msgmax
);
2037 __put_user(host_msginfo
->msgmnb
, &target_msginfo
->msgmnb
);
2038 __put_user(host_msginfo
->msgmni
, &target_msginfo
->msgmni
);
2039 __put_user(host_msginfo
->msgssz
, &target_msginfo
->msgssz
);
2040 __put_user(host_msginfo
->msgtql
, &target_msginfo
->msgtql
);
2041 __put_user(host_msginfo
->msgseg
, &target_msginfo
->msgseg
);
2042 unlock_user_struct(target_msginfo
, target_addr
, 1);
2046 static inline abi_long
do_msgctl(int msgid
, int cmd
, abi_long ptr
)
2048 struct msqid_ds dsarg
;
2049 struct msginfo msginfo
;
2050 abi_long ret
= -TARGET_EINVAL
;
2058 if (target_to_host_msqid_ds(&dsarg
,ptr
))
2059 return -TARGET_EFAULT
;
2060 ret
= get_errno(msgctl(msgid
, cmd
, &dsarg
));
2061 if (host_to_target_msqid_ds(ptr
,&dsarg
))
2062 return -TARGET_EFAULT
;
2065 ret
= get_errno(msgctl(msgid
, cmd
, NULL
));
2069 ret
= get_errno(msgctl(msgid
, cmd
, (struct msqid_ds
*)&msginfo
));
2070 if (host_to_target_msginfo(ptr
, &msginfo
))
2071 return -TARGET_EFAULT
;
2078 struct target_msgbuf
{
2083 static inline abi_long
do_msgsnd(int msqid
, abi_long msgp
,
2084 unsigned int msgsz
, int msgflg
)
2086 struct target_msgbuf
*target_mb
;
2087 struct msgbuf
*host_mb
;
2090 if (!lock_user_struct(VERIFY_READ
, target_mb
, msgp
, 0))
2091 return -TARGET_EFAULT
;
2092 host_mb
= malloc(msgsz
+sizeof(long));
2093 host_mb
->mtype
= (abi_long
) tswapl(target_mb
->mtype
);
2094 memcpy(host_mb
->mtext
, target_mb
->mtext
, msgsz
);
2095 ret
= get_errno(msgsnd(msqid
, host_mb
, msgsz
, msgflg
));
2097 unlock_user_struct(target_mb
, msgp
, 0);
2102 static inline abi_long
do_msgrcv(int msqid
, abi_long msgp
,
2103 unsigned int msgsz
, abi_long msgtyp
,
2106 struct target_msgbuf
*target_mb
;
2108 struct msgbuf
*host_mb
;
2111 if (!lock_user_struct(VERIFY_WRITE
, target_mb
, msgp
, 0))
2112 return -TARGET_EFAULT
;
2114 host_mb
= malloc(msgsz
+sizeof(long));
2115 ret
= get_errno(msgrcv(msqid
, host_mb
, msgsz
, tswapl(msgtyp
), msgflg
));
2118 abi_ulong target_mtext_addr
= msgp
+ sizeof(abi_ulong
);
2119 target_mtext
= lock_user(VERIFY_WRITE
, target_mtext_addr
, ret
, 0);
2120 if (!target_mtext
) {
2121 ret
= -TARGET_EFAULT
;
2124 memcpy(target_mb
->mtext
, host_mb
->mtext
, ret
);
2125 unlock_user(target_mtext
, target_mtext_addr
, ret
);
2128 target_mb
->mtype
= tswapl(host_mb
->mtype
);
2133 unlock_user_struct(target_mb
, msgp
, 1);
2137 #ifdef TARGET_NR_ipc
2138 /* ??? This only works with linear mappings. */
2139 /* do_ipc() must return target values and target errnos. */
2140 static abi_long
do_ipc(unsigned int call
, int first
,
2141 int second
, int third
,
2142 abi_long ptr
, abi_long fifth
)
2146 struct shmid_ds shm_info
;
2149 version
= call
>> 16;
2154 ret
= get_errno(semop(first
,(struct sembuf
*)g2h(ptr
), second
));
2158 ret
= get_errno(semget(first
, second
, third
));
2162 ret
= do_semctl(first
, second
, third
, ptr
);
2165 case IPCOP_semtimedop
:
2166 gemu_log("Unsupported ipc call: %d (version %d)\n", call
, version
);
2167 ret
= -TARGET_ENOSYS
;
2171 ret
= get_errno(msgget(first
, second
));
2175 ret
= do_msgsnd(first
, ptr
, second
, third
);
2179 ret
= do_msgctl(first
, second
, ptr
);
2186 struct target_ipc_kludge
{
2191 if (!lock_user_struct(VERIFY_READ
, tmp
, ptr
, 1)) {
2192 ret
= -TARGET_EFAULT
;
2196 ret
= do_msgrcv(first
, tmp
->msgp
, second
, tmp
->msgtyp
, third
);
2198 unlock_user_struct(tmp
, ptr
, 0);
2202 ret
= do_msgrcv(first
, ptr
, second
, fifth
, third
);
2210 /* SHM_* flags are the same on all linux platforms */
2211 host_addr
= shmat(first
, (void *)g2h(ptr
), second
);
2212 if (host_addr
== (void *)-1) {
2213 ret
= get_errno((long)host_addr
);
2216 raddr
= h2g((unsigned long)host_addr
);
2217 /* find out the length of the shared memory segment */
2219 ret
= get_errno(shmctl(first
, IPC_STAT
, &shm_info
));
2220 if (is_error(ret
)) {
2221 /* can't get length, bail out */
2225 page_set_flags(raddr
, raddr
+ shm_info
.shm_segsz
,
2226 PAGE_VALID
| PAGE_READ
|
2227 ((second
& SHM_RDONLY
)? 0: PAGE_WRITE
));
2228 for (i
= 0; i
< N_SHM_REGIONS
; ++i
) {
2229 if (shm_regions
[i
].start
== 0) {
2230 shm_regions
[i
].start
= raddr
;
2231 shm_regions
[i
].size
= shm_info
.shm_segsz
;
2235 if (put_user_ual(raddr
, third
))
2236 return -TARGET_EFAULT
;
2241 for (i
= 0; i
< N_SHM_REGIONS
; ++i
) {
2242 if (shm_regions
[i
].start
== ptr
) {
2243 shm_regions
[i
].start
= 0;
2244 page_set_flags(ptr
, shm_regions
[i
].size
, 0);
2248 ret
= get_errno(shmdt((void *)g2h(ptr
)));
2252 /* IPC_* flag values are the same on all linux platforms */
2253 ret
= get_errno(shmget(first
, second
, third
));
2256 /* IPC_* and SHM_* command values are the same on all linux platforms */
2262 ret
= get_errno(shmctl(first
, second
, NULL
));
2270 gemu_log("Unsupported ipc call: %d (version %d)\n", call
, version
);
2271 ret
= -TARGET_ENOSYS
;
2278 /* kernel structure types definitions */
2281 #define STRUCT(name, list...) STRUCT_ ## name,
2282 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
2284 #include "syscall_types.h"
2287 #undef STRUCT_SPECIAL
2289 #define STRUCT(name, list...) static const argtype struct_ ## name ## _def[] = { list, TYPE_NULL };
2290 #define STRUCT_SPECIAL(name)
2291 #include "syscall_types.h"
2293 #undef STRUCT_SPECIAL
2295 typedef struct IOCTLEntry
{
2296 unsigned int target_cmd
;
2297 unsigned int host_cmd
;
2300 const argtype arg_type
[5];
2303 #define IOC_R 0x0001
2304 #define IOC_W 0x0002
2305 #define IOC_RW (IOC_R | IOC_W)
2307 #define MAX_STRUCT_SIZE 4096
2309 static IOCTLEntry ioctl_entries
[] = {
2310 #define IOCTL(cmd, access, types...) \
2311 { TARGET_ ## cmd, cmd, #cmd, access, { types } },
2316 /* ??? Implement proper locking for ioctls. */
2317 /* do_ioctl() Must return target values and target errnos. */
2318 static abi_long
do_ioctl(int fd
, abi_long cmd
, abi_long arg
)
2320 const IOCTLEntry
*ie
;
2321 const argtype
*arg_type
;
2323 uint8_t buf_temp
[MAX_STRUCT_SIZE
];
2329 if (ie
->target_cmd
== 0) {
2330 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd
);
2331 return -TARGET_ENOSYS
;
2333 if (ie
->target_cmd
== cmd
)
2337 arg_type
= ie
->arg_type
;
2339 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd
, ie
->name
);
2341 switch(arg_type
[0]) {
2344 ret
= get_errno(ioctl(fd
, ie
->host_cmd
));
2349 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, arg
));
2353 target_size
= thunk_type_size(arg_type
, 0);
2354 switch(ie
->access
) {
2356 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, buf_temp
));
2357 if (!is_error(ret
)) {
2358 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
2360 return -TARGET_EFAULT
;
2361 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
2362 unlock_user(argptr
, arg
, target_size
);
2366 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
2368 return -TARGET_EFAULT
;
2369 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
2370 unlock_user(argptr
, arg
, 0);
2371 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, buf_temp
));
2375 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
2377 return -TARGET_EFAULT
;
2378 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
2379 unlock_user(argptr
, arg
, 0);
2380 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, buf_temp
));
2381 if (!is_error(ret
)) {
2382 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
2384 return -TARGET_EFAULT
;
2385 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
2386 unlock_user(argptr
, arg
, target_size
);
2392 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
2393 (long)cmd
, arg_type
[0]);
2394 ret
= -TARGET_ENOSYS
;
2400 static const bitmask_transtbl iflag_tbl
[] = {
2401 { TARGET_IGNBRK
, TARGET_IGNBRK
, IGNBRK
, IGNBRK
},
2402 { TARGET_BRKINT
, TARGET_BRKINT
, BRKINT
, BRKINT
},
2403 { TARGET_IGNPAR
, TARGET_IGNPAR
, IGNPAR
, IGNPAR
},
2404 { TARGET_PARMRK
, TARGET_PARMRK
, PARMRK
, PARMRK
},
2405 { TARGET_INPCK
, TARGET_INPCK
, INPCK
, INPCK
},
2406 { TARGET_ISTRIP
, TARGET_ISTRIP
, ISTRIP
, ISTRIP
},
2407 { TARGET_INLCR
, TARGET_INLCR
, INLCR
, INLCR
},
2408 { TARGET_IGNCR
, TARGET_IGNCR
, IGNCR
, IGNCR
},
2409 { TARGET_ICRNL
, TARGET_ICRNL
, ICRNL
, ICRNL
},
2410 { TARGET_IUCLC
, TARGET_IUCLC
, IUCLC
, IUCLC
},
2411 { TARGET_IXON
, TARGET_IXON
, IXON
, IXON
},
2412 { TARGET_IXANY
, TARGET_IXANY
, IXANY
, IXANY
},
2413 { TARGET_IXOFF
, TARGET_IXOFF
, IXOFF
, IXOFF
},
2414 { TARGET_IMAXBEL
, TARGET_IMAXBEL
, IMAXBEL
, IMAXBEL
},
2418 static const bitmask_transtbl oflag_tbl
[] = {
2419 { TARGET_OPOST
, TARGET_OPOST
, OPOST
, OPOST
},
2420 { TARGET_OLCUC
, TARGET_OLCUC
, OLCUC
, OLCUC
},
2421 { TARGET_ONLCR
, TARGET_ONLCR
, ONLCR
, ONLCR
},
2422 { TARGET_OCRNL
, TARGET_OCRNL
, OCRNL
, OCRNL
},
2423 { TARGET_ONOCR
, TARGET_ONOCR
, ONOCR
, ONOCR
},
2424 { TARGET_ONLRET
, TARGET_ONLRET
, ONLRET
, ONLRET
},
2425 { TARGET_OFILL
, TARGET_OFILL
, OFILL
, OFILL
},
2426 { TARGET_OFDEL
, TARGET_OFDEL
, OFDEL
, OFDEL
},
2427 { TARGET_NLDLY
, TARGET_NL0
, NLDLY
, NL0
},
2428 { TARGET_NLDLY
, TARGET_NL1
, NLDLY
, NL1
},
2429 { TARGET_CRDLY
, TARGET_CR0
, CRDLY
, CR0
},
2430 { TARGET_CRDLY
, TARGET_CR1
, CRDLY
, CR1
},
2431 { TARGET_CRDLY
, TARGET_CR2
, CRDLY
, CR2
},
2432 { TARGET_CRDLY
, TARGET_CR3
, CRDLY
, CR3
},
2433 { TARGET_TABDLY
, TARGET_TAB0
, TABDLY
, TAB0
},
2434 { TARGET_TABDLY
, TARGET_TAB1
, TABDLY
, TAB1
},
2435 { TARGET_TABDLY
, TARGET_TAB2
, TABDLY
, TAB2
},
2436 { TARGET_TABDLY
, TARGET_TAB3
, TABDLY
, TAB3
},
2437 { TARGET_BSDLY
, TARGET_BS0
, BSDLY
, BS0
},
2438 { TARGET_BSDLY
, TARGET_BS1
, BSDLY
, BS1
},
2439 { TARGET_VTDLY
, TARGET_VT0
, VTDLY
, VT0
},
2440 { TARGET_VTDLY
, TARGET_VT1
, VTDLY
, VT1
},
2441 { TARGET_FFDLY
, TARGET_FF0
, FFDLY
, FF0
},
2442 { TARGET_FFDLY
, TARGET_FF1
, FFDLY
, FF1
},
2446 static const bitmask_transtbl cflag_tbl
[] = {
2447 { TARGET_CBAUD
, TARGET_B0
, CBAUD
, B0
},
2448 { TARGET_CBAUD
, TARGET_B50
, CBAUD
, B50
},
2449 { TARGET_CBAUD
, TARGET_B75
, CBAUD
, B75
},
2450 { TARGET_CBAUD
, TARGET_B110
, CBAUD
, B110
},
2451 { TARGET_CBAUD
, TARGET_B134
, CBAUD
, B134
},
2452 { TARGET_CBAUD
, TARGET_B150
, CBAUD
, B150
},
2453 { TARGET_CBAUD
, TARGET_B200
, CBAUD
, B200
},
2454 { TARGET_CBAUD
, TARGET_B300
, CBAUD
, B300
},
2455 { TARGET_CBAUD
, TARGET_B600
, CBAUD
, B600
},
2456 { TARGET_CBAUD
, TARGET_B1200
, CBAUD
, B1200
},
2457 { TARGET_CBAUD
, TARGET_B1800
, CBAUD
, B1800
},
2458 { TARGET_CBAUD
, TARGET_B2400
, CBAUD
, B2400
},
2459 { TARGET_CBAUD
, TARGET_B4800
, CBAUD
, B4800
},
2460 { TARGET_CBAUD
, TARGET_B9600
, CBAUD
, B9600
},
2461 { TARGET_CBAUD
, TARGET_B19200
, CBAUD
, B19200
},
2462 { TARGET_CBAUD
, TARGET_B38400
, CBAUD
, B38400
},
2463 { TARGET_CBAUD
, TARGET_B57600
, CBAUD
, B57600
},
2464 { TARGET_CBAUD
, TARGET_B115200
, CBAUD
, B115200
},
2465 { TARGET_CBAUD
, TARGET_B230400
, CBAUD
, B230400
},
2466 { TARGET_CBAUD
, TARGET_B460800
, CBAUD
, B460800
},
2467 { TARGET_CSIZE
, TARGET_CS5
, CSIZE
, CS5
},
2468 { TARGET_CSIZE
, TARGET_CS6
, CSIZE
, CS6
},
2469 { TARGET_CSIZE
, TARGET_CS7
, CSIZE
, CS7
},
2470 { TARGET_CSIZE
, TARGET_CS8
, CSIZE
, CS8
},
2471 { TARGET_CSTOPB
, TARGET_CSTOPB
, CSTOPB
, CSTOPB
},
2472 { TARGET_CREAD
, TARGET_CREAD
, CREAD
, CREAD
},
2473 { TARGET_PARENB
, TARGET_PARENB
, PARENB
, PARENB
},
2474 { TARGET_PARODD
, TARGET_PARODD
, PARODD
, PARODD
},
2475 { TARGET_HUPCL
, TARGET_HUPCL
, HUPCL
, HUPCL
},
2476 { TARGET_CLOCAL
, TARGET_CLOCAL
, CLOCAL
, CLOCAL
},
2477 { TARGET_CRTSCTS
, TARGET_CRTSCTS
, CRTSCTS
, CRTSCTS
},
2481 static const bitmask_transtbl lflag_tbl
[] = {
2482 { TARGET_ISIG
, TARGET_ISIG
, ISIG
, ISIG
},
2483 { TARGET_ICANON
, TARGET_ICANON
, ICANON
, ICANON
},
2484 { TARGET_XCASE
, TARGET_XCASE
, XCASE
, XCASE
},
2485 { TARGET_ECHO
, TARGET_ECHO
, ECHO
, ECHO
},
2486 { TARGET_ECHOE
, TARGET_ECHOE
, ECHOE
, ECHOE
},
2487 { TARGET_ECHOK
, TARGET_ECHOK
, ECHOK
, ECHOK
},
2488 { TARGET_ECHONL
, TARGET_ECHONL
, ECHONL
, ECHONL
},
2489 { TARGET_NOFLSH
, TARGET_NOFLSH
, NOFLSH
, NOFLSH
},
2490 { TARGET_TOSTOP
, TARGET_TOSTOP
, TOSTOP
, TOSTOP
},
2491 { TARGET_ECHOCTL
, TARGET_ECHOCTL
, ECHOCTL
, ECHOCTL
},
2492 { TARGET_ECHOPRT
, TARGET_ECHOPRT
, ECHOPRT
, ECHOPRT
},
2493 { TARGET_ECHOKE
, TARGET_ECHOKE
, ECHOKE
, ECHOKE
},
2494 { TARGET_FLUSHO
, TARGET_FLUSHO
, FLUSHO
, FLUSHO
},
2495 { TARGET_PENDIN
, TARGET_PENDIN
, PENDIN
, PENDIN
},
2496 { TARGET_IEXTEN
, TARGET_IEXTEN
, IEXTEN
, IEXTEN
},
2500 static void target_to_host_termios (void *dst
, const void *src
)
2502 struct host_termios
*host
= dst
;
2503 const struct target_termios
*target
= src
;
2506 target_to_host_bitmask(tswap32(target
->c_iflag
), iflag_tbl
);
2508 target_to_host_bitmask(tswap32(target
->c_oflag
), oflag_tbl
);
2510 target_to_host_bitmask(tswap32(target
->c_cflag
), cflag_tbl
);
2512 target_to_host_bitmask(tswap32(target
->c_lflag
), lflag_tbl
);
2513 host
->c_line
= target
->c_line
;
2515 host
->c_cc
[VINTR
] = target
->c_cc
[TARGET_VINTR
];
2516 host
->c_cc
[VQUIT
] = target
->c_cc
[TARGET_VQUIT
];
2517 host
->c_cc
[VERASE
] = target
->c_cc
[TARGET_VERASE
];
2518 host
->c_cc
[VKILL
] = target
->c_cc
[TARGET_VKILL
];
2519 host
->c_cc
[VEOF
] = target
->c_cc
[TARGET_VEOF
];
2520 host
->c_cc
[VTIME
] = target
->c_cc
[TARGET_VTIME
];
2521 host
->c_cc
[VMIN
] = target
->c_cc
[TARGET_VMIN
];
2522 host
->c_cc
[VSWTC
] = target
->c_cc
[TARGET_VSWTC
];
2523 host
->c_cc
[VSTART
] = target
->c_cc
[TARGET_VSTART
];
2524 host
->c_cc
[VSTOP
] = target
->c_cc
[TARGET_VSTOP
];
2525 host
->c_cc
[VSUSP
] = target
->c_cc
[TARGET_VSUSP
];
2526 host
->c_cc
[VEOL
] = target
->c_cc
[TARGET_VEOL
];
2527 host
->c_cc
[VREPRINT
] = target
->c_cc
[TARGET_VREPRINT
];
2528 host
->c_cc
[VDISCARD
] = target
->c_cc
[TARGET_VDISCARD
];
2529 host
->c_cc
[VWERASE
] = target
->c_cc
[TARGET_VWERASE
];
2530 host
->c_cc
[VLNEXT
] = target
->c_cc
[TARGET_VLNEXT
];
2531 host
->c_cc
[VEOL2
] = target
->c_cc
[TARGET_VEOL2
];
2534 static void host_to_target_termios (void *dst
, const void *src
)
2536 struct target_termios
*target
= dst
;
2537 const struct host_termios
*host
= src
;
2540 tswap32(host_to_target_bitmask(host
->c_iflag
, iflag_tbl
));
2542 tswap32(host_to_target_bitmask(host
->c_oflag
, oflag_tbl
));
2544 tswap32(host_to_target_bitmask(host
->c_cflag
, cflag_tbl
));
2546 tswap32(host_to_target_bitmask(host
->c_lflag
, lflag_tbl
));
2547 target
->c_line
= host
->c_line
;
2549 target
->c_cc
[TARGET_VINTR
] = host
->c_cc
[VINTR
];
2550 target
->c_cc
[TARGET_VQUIT
] = host
->c_cc
[VQUIT
];
2551 target
->c_cc
[TARGET_VERASE
] = host
->c_cc
[VERASE
];
2552 target
->c_cc
[TARGET_VKILL
] = host
->c_cc
[VKILL
];
2553 target
->c_cc
[TARGET_VEOF
] = host
->c_cc
[VEOF
];
2554 target
->c_cc
[TARGET_VTIME
] = host
->c_cc
[VTIME
];
2555 target
->c_cc
[TARGET_VMIN
] = host
->c_cc
[VMIN
];
2556 target
->c_cc
[TARGET_VSWTC
] = host
->c_cc
[VSWTC
];
2557 target
->c_cc
[TARGET_VSTART
] = host
->c_cc
[VSTART
];
2558 target
->c_cc
[TARGET_VSTOP
] = host
->c_cc
[VSTOP
];
2559 target
->c_cc
[TARGET_VSUSP
] = host
->c_cc
[VSUSP
];
2560 target
->c_cc
[TARGET_VEOL
] = host
->c_cc
[VEOL
];
2561 target
->c_cc
[TARGET_VREPRINT
] = host
->c_cc
[VREPRINT
];
2562 target
->c_cc
[TARGET_VDISCARD
] = host
->c_cc
[VDISCARD
];
2563 target
->c_cc
[TARGET_VWERASE
] = host
->c_cc
[VWERASE
];
2564 target
->c_cc
[TARGET_VLNEXT
] = host
->c_cc
[VLNEXT
];
2565 target
->c_cc
[TARGET_VEOL2
] = host
->c_cc
[VEOL2
];
2568 static const StructEntry struct_termios_def
= {
2569 .convert
= { host_to_target_termios
, target_to_host_termios
},
2570 .size
= { sizeof(struct target_termios
), sizeof(struct host_termios
) },
2571 .align
= { __alignof__(struct target_termios
), __alignof__(struct host_termios
) },
2574 static bitmask_transtbl mmap_flags_tbl
[] = {
2575 { TARGET_MAP_SHARED
, TARGET_MAP_SHARED
, MAP_SHARED
, MAP_SHARED
},
2576 { TARGET_MAP_PRIVATE
, TARGET_MAP_PRIVATE
, MAP_PRIVATE
, MAP_PRIVATE
},
2577 { TARGET_MAP_FIXED
, TARGET_MAP_FIXED
, MAP_FIXED
, MAP_FIXED
},
2578 { TARGET_MAP_ANONYMOUS
, TARGET_MAP_ANONYMOUS
, MAP_ANONYMOUS
, MAP_ANONYMOUS
},
2579 { TARGET_MAP_GROWSDOWN
, TARGET_MAP_GROWSDOWN
, MAP_GROWSDOWN
, MAP_GROWSDOWN
},
2580 { TARGET_MAP_DENYWRITE
, TARGET_MAP_DENYWRITE
, MAP_DENYWRITE
, MAP_DENYWRITE
},
2581 { TARGET_MAP_EXECUTABLE
, TARGET_MAP_EXECUTABLE
, MAP_EXECUTABLE
, MAP_EXECUTABLE
},
2582 { TARGET_MAP_LOCKED
, TARGET_MAP_LOCKED
, MAP_LOCKED
, MAP_LOCKED
},
2586 static bitmask_transtbl fcntl_flags_tbl
[] = {
2587 { TARGET_O_ACCMODE
, TARGET_O_WRONLY
, O_ACCMODE
, O_WRONLY
, },
2588 { TARGET_O_ACCMODE
, TARGET_O_RDWR
, O_ACCMODE
, O_RDWR
, },
2589 { TARGET_O_CREAT
, TARGET_O_CREAT
, O_CREAT
, O_CREAT
, },
2590 { TARGET_O_EXCL
, TARGET_O_EXCL
, O_EXCL
, O_EXCL
, },
2591 { TARGET_O_NOCTTY
, TARGET_O_NOCTTY
, O_NOCTTY
, O_NOCTTY
, },
2592 { TARGET_O_TRUNC
, TARGET_O_TRUNC
, O_TRUNC
, O_TRUNC
, },
2593 { TARGET_O_APPEND
, TARGET_O_APPEND
, O_APPEND
, O_APPEND
, },
2594 { TARGET_O_NONBLOCK
, TARGET_O_NONBLOCK
, O_NONBLOCK
, O_NONBLOCK
, },
2595 { TARGET_O_SYNC
, TARGET_O_SYNC
, O_SYNC
, O_SYNC
, },
2596 { TARGET_FASYNC
, TARGET_FASYNC
, FASYNC
, FASYNC
, },
2597 { TARGET_O_DIRECTORY
, TARGET_O_DIRECTORY
, O_DIRECTORY
, O_DIRECTORY
, },
2598 { TARGET_O_NOFOLLOW
, TARGET_O_NOFOLLOW
, O_NOFOLLOW
, O_NOFOLLOW
, },
2599 { TARGET_O_LARGEFILE
, TARGET_O_LARGEFILE
, O_LARGEFILE
, O_LARGEFILE
, },
2600 #if defined(O_DIRECT)
2601 { TARGET_O_DIRECT
, TARGET_O_DIRECT
, O_DIRECT
, O_DIRECT
, },
2606 #if defined(TARGET_I386)
2608 /* NOTE: there is really one LDT for all the threads */
2609 static uint8_t *ldt_table
;
2611 static abi_long
read_ldt(abi_ulong ptr
, unsigned long bytecount
)
2618 size
= TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
;
2619 if (size
> bytecount
)
2621 p
= lock_user(VERIFY_WRITE
, ptr
, size
, 0);
2623 return -TARGET_EFAULT
;
2624 /* ??? Should this by byteswapped? */
2625 memcpy(p
, ldt_table
, size
);
2626 unlock_user(p
, ptr
, size
);
2630 /* XXX: add locking support */
2631 static abi_long
write_ldt(CPUX86State
*env
,
2632 abi_ulong ptr
, unsigned long bytecount
, int oldmode
)
2634 struct target_modify_ldt_ldt_s ldt_info
;
2635 struct target_modify_ldt_ldt_s
*target_ldt_info
;
2636 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
2637 int seg_not_present
, useable
, lm
;
2638 uint32_t *lp
, entry_1
, entry_2
;
2640 if (bytecount
!= sizeof(ldt_info
))
2641 return -TARGET_EINVAL
;
2642 if (!lock_user_struct(VERIFY_READ
, target_ldt_info
, ptr
, 1))
2643 return -TARGET_EFAULT
;
2644 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
2645 ldt_info
.base_addr
= tswapl(target_ldt_info
->base_addr
);
2646 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
2647 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
2648 unlock_user_struct(target_ldt_info
, ptr
, 0);
2650 if (ldt_info
.entry_number
>= TARGET_LDT_ENTRIES
)
2651 return -TARGET_EINVAL
;
2652 seg_32bit
= ldt_info
.flags
& 1;
2653 contents
= (ldt_info
.flags
>> 1) & 3;
2654 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
2655 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
2656 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
2657 useable
= (ldt_info
.flags
>> 6) & 1;
2661 lm
= (ldt_info
.flags
>> 7) & 1;
2663 if (contents
== 3) {
2665 return -TARGET_EINVAL
;
2666 if (seg_not_present
== 0)
2667 return -TARGET_EINVAL
;
2669 /* allocate the LDT */
2671 env
->ldt
.base
= target_mmap(0,
2672 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
,
2673 PROT_READ
|PROT_WRITE
,
2674 MAP_ANONYMOUS
|MAP_PRIVATE
, -1, 0);
2675 if (env
->ldt
.base
== -1)
2676 return -TARGET_ENOMEM
;
2677 memset(g2h(env
->ldt
.base
), 0,
2678 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
);
2679 env
->ldt
.limit
= 0xffff;
2680 ldt_table
= g2h(env
->ldt
.base
);
2683 /* NOTE: same code as Linux kernel */
2684 /* Allow LDTs to be cleared by the user. */
2685 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
2688 read_exec_only
== 1 &&
2690 limit_in_pages
== 0 &&
2691 seg_not_present
== 1 &&
2699 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
2700 (ldt_info
.limit
& 0x0ffff);
2701 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
2702 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
2703 (ldt_info
.limit
& 0xf0000) |
2704 ((read_exec_only
^ 1) << 9) |
2706 ((seg_not_present
^ 1) << 15) |
2708 (limit_in_pages
<< 23) |
2712 entry_2
|= (useable
<< 20);
2714 /* Install the new entry ... */
2716 lp
= (uint32_t *)(ldt_table
+ (ldt_info
.entry_number
<< 3));
2717 lp
[0] = tswap32(entry_1
);
2718 lp
[1] = tswap32(entry_2
);
2722 /* specific and weird i386 syscalls */
2723 static abi_long
do_modify_ldt(CPUX86State
*env
, int func
, abi_ulong ptr
,
2724 unsigned long bytecount
)
2730 ret
= read_ldt(ptr
, bytecount
);
2733 ret
= write_ldt(env
, ptr
, bytecount
, 1);
2736 ret
= write_ldt(env
, ptr
, bytecount
, 0);
2739 ret
= -TARGET_ENOSYS
;
2745 #if defined(TARGET_I386) && defined(TARGET_ABI32)
2746 static abi_long
do_set_thread_area(CPUX86State
*env
, abi_ulong ptr
)
2748 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
2749 struct target_modify_ldt_ldt_s ldt_info
;
2750 struct target_modify_ldt_ldt_s
*target_ldt_info
;
2751 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
2752 int seg_not_present
, useable
, lm
;
2753 uint32_t *lp
, entry_1
, entry_2
;
2756 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
2757 if (!target_ldt_info
)
2758 return -TARGET_EFAULT
;
2759 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
2760 ldt_info
.base_addr
= tswapl(target_ldt_info
->base_addr
);
2761 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
2762 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
2763 if (ldt_info
.entry_number
== -1) {
2764 for (i
=TARGET_GDT_ENTRY_TLS_MIN
; i
<=TARGET_GDT_ENTRY_TLS_MAX
; i
++) {
2765 if (gdt_table
[i
] == 0) {
2766 ldt_info
.entry_number
= i
;
2767 target_ldt_info
->entry_number
= tswap32(i
);
2772 unlock_user_struct(target_ldt_info
, ptr
, 1);
2774 if (ldt_info
.entry_number
< TARGET_GDT_ENTRY_TLS_MIN
||
2775 ldt_info
.entry_number
> TARGET_GDT_ENTRY_TLS_MAX
)
2776 return -TARGET_EINVAL
;
2777 seg_32bit
= ldt_info
.flags
& 1;
2778 contents
= (ldt_info
.flags
>> 1) & 3;
2779 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
2780 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
2781 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
2782 useable
= (ldt_info
.flags
>> 6) & 1;
2786 lm
= (ldt_info
.flags
>> 7) & 1;
2789 if (contents
== 3) {
2790 if (seg_not_present
== 0)
2791 return -TARGET_EINVAL
;
2794 /* NOTE: same code as Linux kernel */
2795 /* Allow LDTs to be cleared by the user. */
2796 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
2797 if ((contents
== 0 &&
2798 read_exec_only
== 1 &&
2800 limit_in_pages
== 0 &&
2801 seg_not_present
== 1 &&
2809 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
2810 (ldt_info
.limit
& 0x0ffff);
2811 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
2812 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
2813 (ldt_info
.limit
& 0xf0000) |
2814 ((read_exec_only
^ 1) << 9) |
2816 ((seg_not_present
^ 1) << 15) |
2818 (limit_in_pages
<< 23) |
2823 /* Install the new entry ... */
2825 lp
= (uint32_t *)(gdt_table
+ ldt_info
.entry_number
);
2826 lp
[0] = tswap32(entry_1
);
2827 lp
[1] = tswap32(entry_2
);
2831 static abi_long
do_get_thread_area(CPUX86State
*env
, abi_ulong ptr
)
2833 struct target_modify_ldt_ldt_s
*target_ldt_info
;
2834 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
2835 uint32_t base_addr
, limit
, flags
;
2836 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
, idx
;
2837 int seg_not_present
, useable
, lm
;
2838 uint32_t *lp
, entry_1
, entry_2
;
2840 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
2841 if (!target_ldt_info
)
2842 return -TARGET_EFAULT
;
2843 idx
= tswap32(target_ldt_info
->entry_number
);
2844 if (idx
< TARGET_GDT_ENTRY_TLS_MIN
||
2845 idx
> TARGET_GDT_ENTRY_TLS_MAX
) {
2846 unlock_user_struct(target_ldt_info
, ptr
, 1);
2847 return -TARGET_EINVAL
;
2849 lp
= (uint32_t *)(gdt_table
+ idx
);
2850 entry_1
= tswap32(lp
[0]);
2851 entry_2
= tswap32(lp
[1]);
2853 read_exec_only
= ((entry_2
>> 9) & 1) ^ 1;
2854 contents
= (entry_2
>> 10) & 3;
2855 seg_not_present
= ((entry_2
>> 15) & 1) ^ 1;
2856 seg_32bit
= (entry_2
>> 22) & 1;
2857 limit_in_pages
= (entry_2
>> 23) & 1;
2858 useable
= (entry_2
>> 20) & 1;
2862 lm
= (entry_2
>> 21) & 1;
2864 flags
= (seg_32bit
<< 0) | (contents
<< 1) |
2865 (read_exec_only
<< 3) | (limit_in_pages
<< 4) |
2866 (seg_not_present
<< 5) | (useable
<< 6) | (lm
<< 7);
2867 limit
= (entry_1
& 0xffff) | (entry_2
& 0xf0000);
2868 base_addr
= (entry_1
>> 16) |
2869 (entry_2
& 0xff000000) |
2870 ((entry_2
& 0xff) << 16);
2871 target_ldt_info
->base_addr
= tswapl(base_addr
);
2872 target_ldt_info
->limit
= tswap32(limit
);
2873 target_ldt_info
->flags
= tswap32(flags
);
2874 unlock_user_struct(target_ldt_info
, ptr
, 1);
2877 #endif /* TARGET_I386 && TARGET_ABI32 */
2879 #ifndef TARGET_ABI32
2880 static abi_long
do_arch_prctl(CPUX86State
*env
, int code
, abi_ulong addr
)
2887 case TARGET_ARCH_SET_GS
:
2888 case TARGET_ARCH_SET_FS
:
2889 if (code
== TARGET_ARCH_SET_GS
)
2893 cpu_x86_load_seg(env
, idx
, 0);
2894 env
->segs
[idx
].base
= addr
;
2896 case TARGET_ARCH_GET_GS
:
2897 case TARGET_ARCH_GET_FS
:
2898 if (code
== TARGET_ARCH_GET_GS
)
2902 val
= env
->segs
[idx
].base
;
2903 if (put_user(val
, addr
, abi_ulong
))
2904 return -TARGET_EFAULT
;
2907 ret
= -TARGET_EINVAL
;
2914 #endif /* defined(TARGET_I386) */
2916 #if defined(USE_NPTL)
2918 #define NEW_STACK_SIZE PTHREAD_STACK_MIN
2920 static pthread_mutex_t clone_lock
= PTHREAD_MUTEX_INITIALIZER
;
2923 pthread_mutex_t mutex
;
2924 pthread_cond_t cond
;
2927 abi_ulong child_tidptr
;
2928 abi_ulong parent_tidptr
;
2932 static void *clone_func(void *arg
)
2934 new_thread_info
*info
= arg
;
2939 info
->tid
= gettid();
2940 if (info
->child_tidptr
)
2941 put_user_u32(info
->tid
, info
->child_tidptr
);
2942 if (info
->parent_tidptr
)
2943 put_user_u32(info
->tid
, info
->parent_tidptr
);
2944 /* Enable signals. */
2945 sigprocmask(SIG_SETMASK
, &info
->sigmask
, NULL
);
2946 /* Signal to the parent that we're ready. */
2947 pthread_mutex_lock(&info
->mutex
);
2948 pthread_cond_broadcast(&info
->cond
);
2949 pthread_mutex_unlock(&info
->mutex
);
2950 /* Wait until the parent has finshed initializing the tls state. */
2951 pthread_mutex_lock(&clone_lock
);
2952 pthread_mutex_unlock(&clone_lock
);
2958 /* this stack is the equivalent of the kernel stack associated with a
2960 #define NEW_STACK_SIZE 8192
2962 static int clone_func(void *arg
)
2964 CPUState
*env
= arg
;
2971 /* do_fork() Must return host values and target errnos (unlike most
2972 do_*() functions). */
2973 static int do_fork(CPUState
*env
, unsigned int flags
, abi_ulong newsp
,
2974 abi_ulong parent_tidptr
, target_ulong newtls
,
2975 abi_ulong child_tidptr
)
2981 #if defined(USE_NPTL)
2982 unsigned int nptl_flags
;
2986 /* Emulate vfork() with fork() */
2987 if (flags
& CLONE_VFORK
)
2988 flags
&= ~(CLONE_VFORK
| CLONE_VM
);
2990 if (flags
& CLONE_VM
) {
2991 #if defined(USE_NPTL)
2992 new_thread_info info
;
2993 pthread_attr_t attr
;
2995 ts
= qemu_mallocz(sizeof(TaskState
) + NEW_STACK_SIZE
);
2996 init_task_state(ts
);
2997 new_stack
= ts
->stack
;
2998 /* we create a new CPU instance. */
2999 new_env
= cpu_copy(env
);
3000 /* Init regs that differ from the parent. */
3001 cpu_clone_regs(new_env
, newsp
);
3002 new_env
->opaque
= ts
;
3003 #if defined(USE_NPTL)
3005 flags
&= ~CLONE_NPTL_FLAGS2
;
3007 if (nptl_flags
& CLONE_CHILD_CLEARTID
) {
3008 ts
->child_tidptr
= child_tidptr
;
3011 if (nptl_flags
& CLONE_SETTLS
)
3012 cpu_set_tls (new_env
, newtls
);
3014 /* Grab a mutex so that thread setup appears atomic. */
3015 pthread_mutex_lock(&clone_lock
);
3017 memset(&info
, 0, sizeof(info
));
3018 pthread_mutex_init(&info
.mutex
, NULL
);
3019 pthread_mutex_lock(&info
.mutex
);
3020 pthread_cond_init(&info
.cond
, NULL
);
3022 if (nptl_flags
& CLONE_CHILD_SETTID
)
3023 info
.child_tidptr
= child_tidptr
;
3024 if (nptl_flags
& CLONE_PARENT_SETTID
)
3025 info
.parent_tidptr
= parent_tidptr
;
3027 ret
= pthread_attr_init(&attr
);
3028 ret
= pthread_attr_setstack(&attr
, new_stack
, NEW_STACK_SIZE
);
3029 /* It is not safe to deliver signals until the child has finished
3030 initializing, so temporarily block all signals. */
3031 sigfillset(&sigmask
);
3032 sigprocmask(SIG_BLOCK
, &sigmask
, &info
.sigmask
);
3034 ret
= pthread_create(&info
.thread
, &attr
, clone_func
, &info
);
3035 /* TODO: Free new CPU state if thread creation failed. */
3037 sigprocmask(SIG_SETMASK
, &info
.sigmask
, NULL
);
3038 pthread_attr_destroy(&attr
);
3040 /* Wait for the child to initialize. */
3041 pthread_cond_wait(&info
.cond
, &info
.mutex
);
3043 if (flags
& CLONE_PARENT_SETTID
)
3044 put_user_u32(ret
, parent_tidptr
);
3048 pthread_mutex_unlock(&info
.mutex
);
3049 pthread_cond_destroy(&info
.cond
);
3050 pthread_mutex_destroy(&info
.mutex
);
3051 pthread_mutex_unlock(&clone_lock
);
3053 if (flags
& CLONE_NPTL_FLAGS2
)
3055 /* This is probably going to die very quickly, but do it anyway. */
3057 ret
= __clone2(clone_func
, new_stack
+ NEW_STACK_SIZE
, flags
, new_env
);
3059 ret
= clone(clone_func
, new_stack
+ NEW_STACK_SIZE
, flags
, new_env
);
3063 /* if no CLONE_VM, we consider it is a fork */
3064 if ((flags
& ~(CSIGNAL
| CLONE_NPTL_FLAGS2
)) != 0)
3069 /* Child Process. */
3070 cpu_clone_regs(env
, newsp
);
3072 #if defined(USE_NPTL)
3073 /* There is a race condition here. The parent process could
3074 theoretically read the TID in the child process before the child
3075 tid is set. This would require using either ptrace
3076 (not implemented) or having *_tidptr to point at a shared memory
3077 mapping. We can't repeat the spinlock hack used above because
3078 the child process gets its own copy of the lock. */
3079 if (flags
& CLONE_CHILD_SETTID
)
3080 put_user_u32(gettid(), child_tidptr
);
3081 if (flags
& CLONE_PARENT_SETTID
)
3082 put_user_u32(gettid(), parent_tidptr
);
3083 ts
= (TaskState
*)env
->opaque
;
3084 if (flags
& CLONE_SETTLS
)
3085 cpu_set_tls (env
, newtls
);
3086 if (flags
& CLONE_CHILD_CLEARTID
)
3087 ts
->child_tidptr
= child_tidptr
;
3096 static abi_long
do_fcntl(int fd
, int cmd
, abi_ulong arg
)
3099 struct target_flock
*target_fl
;
3100 struct flock64 fl64
;
3101 struct target_flock64
*target_fl64
;
3105 case TARGET_F_GETLK
:
3106 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg
, 1))
3107 return -TARGET_EFAULT
;
3108 fl
.l_type
= tswap16(target_fl
->l_type
);
3109 fl
.l_whence
= tswap16(target_fl
->l_whence
);
3110 fl
.l_start
= tswapl(target_fl
->l_start
);
3111 fl
.l_len
= tswapl(target_fl
->l_len
);
3112 fl
.l_pid
= tswapl(target_fl
->l_pid
);
3113 unlock_user_struct(target_fl
, arg
, 0);
3114 ret
= get_errno(fcntl(fd
, cmd
, &fl
));
3116 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, arg
, 0))
3117 return -TARGET_EFAULT
;
3118 target_fl
->l_type
= tswap16(fl
.l_type
);
3119 target_fl
->l_whence
= tswap16(fl
.l_whence
);
3120 target_fl
->l_start
= tswapl(fl
.l_start
);
3121 target_fl
->l_len
= tswapl(fl
.l_len
);
3122 target_fl
->l_pid
= tswapl(fl
.l_pid
);
3123 unlock_user_struct(target_fl
, arg
, 1);
3127 case TARGET_F_SETLK
:
3128 case TARGET_F_SETLKW
:
3129 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg
, 1))
3130 return -TARGET_EFAULT
;
3131 fl
.l_type
= tswap16(target_fl
->l_type
);
3132 fl
.l_whence
= tswap16(target_fl
->l_whence
);
3133 fl
.l_start
= tswapl(target_fl
->l_start
);
3134 fl
.l_len
= tswapl(target_fl
->l_len
);
3135 fl
.l_pid
= tswapl(target_fl
->l_pid
);
3136 unlock_user_struct(target_fl
, arg
, 0);
3137 ret
= get_errno(fcntl(fd
, cmd
, &fl
));
3140 case TARGET_F_GETLK64
:
3141 if (!lock_user_struct(VERIFY_READ
, target_fl64
, arg
, 1))
3142 return -TARGET_EFAULT
;
3143 fl64
.l_type
= tswap16(target_fl64
->l_type
) >> 1;
3144 fl64
.l_whence
= tswap16(target_fl64
->l_whence
);
3145 fl64
.l_start
= tswapl(target_fl64
->l_start
);
3146 fl64
.l_len
= tswapl(target_fl64
->l_len
);
3147 fl64
.l_pid
= tswap16(target_fl64
->l_pid
);
3148 unlock_user_struct(target_fl64
, arg
, 0);
3149 ret
= get_errno(fcntl(fd
, cmd
>> 1, &fl64
));
3151 if (!lock_user_struct(VERIFY_WRITE
, target_fl64
, arg
, 0))
3152 return -TARGET_EFAULT
;
3153 target_fl64
->l_type
= tswap16(fl64
.l_type
) >> 1;
3154 target_fl64
->l_whence
= tswap16(fl64
.l_whence
);
3155 target_fl64
->l_start
= tswapl(fl64
.l_start
);
3156 target_fl64
->l_len
= tswapl(fl64
.l_len
);
3157 target_fl64
->l_pid
= tswapl(fl64
.l_pid
);
3158 unlock_user_struct(target_fl64
, arg
, 1);
3161 case TARGET_F_SETLK64
:
3162 case TARGET_F_SETLKW64
:
3163 if (!lock_user_struct(VERIFY_READ
, target_fl64
, arg
, 1))
3164 return -TARGET_EFAULT
;
3165 fl64
.l_type
= tswap16(target_fl64
->l_type
) >> 1;
3166 fl64
.l_whence
= tswap16(target_fl64
->l_whence
);
3167 fl64
.l_start
= tswapl(target_fl64
->l_start
);
3168 fl64
.l_len
= tswapl(target_fl64
->l_len
);
3169 fl64
.l_pid
= tswap16(target_fl64
->l_pid
);
3170 unlock_user_struct(target_fl64
, arg
, 0);
3171 ret
= get_errno(fcntl(fd
, cmd
>> 1, &fl64
));
3175 ret
= get_errno(fcntl(fd
, cmd
, arg
));
3177 ret
= host_to_target_bitmask(ret
, fcntl_flags_tbl
);
3182 ret
= get_errno(fcntl(fd
, cmd
, target_to_host_bitmask(arg
, fcntl_flags_tbl
)));
3186 ret
= get_errno(fcntl(fd
, cmd
, arg
));
3194 static inline int high2lowuid(int uid
)
3202 static inline int high2lowgid(int gid
)
3210 static inline int low2highuid(int uid
)
3212 if ((int16_t)uid
== -1)
3218 static inline int low2highgid(int gid
)
3220 if ((int16_t)gid
== -1)
3226 #endif /* USE_UID16 */
3228 void syscall_init(void)
3231 const argtype
*arg_type
;
3235 #define STRUCT(name, list...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
3236 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
3237 #include "syscall_types.h"
3239 #undef STRUCT_SPECIAL
3241 /* we patch the ioctl size if necessary. We rely on the fact that
3242 no ioctl has all the bits at '1' in the size field */
3244 while (ie
->target_cmd
!= 0) {
3245 if (((ie
->target_cmd
>> TARGET_IOC_SIZESHIFT
) & TARGET_IOC_SIZEMASK
) ==
3246 TARGET_IOC_SIZEMASK
) {
3247 arg_type
= ie
->arg_type
;
3248 if (arg_type
[0] != TYPE_PTR
) {
3249 fprintf(stderr
, "cannot patch size for ioctl 0x%x\n",
3254 size
= thunk_type_size(arg_type
, 0);
3255 ie
->target_cmd
= (ie
->target_cmd
&
3256 ~(TARGET_IOC_SIZEMASK
<< TARGET_IOC_SIZESHIFT
)) |
3257 (size
<< TARGET_IOC_SIZESHIFT
);
3260 /* Build target_to_host_errno_table[] table from
3261 * host_to_target_errno_table[]. */
3262 for (i
=0; i
< ERRNO_TABLE_SIZE
; i
++)
3263 target_to_host_errno_table
[host_to_target_errno_table
[i
]] = i
;
3265 /* automatic consistency check if same arch */
3266 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
3267 (defined(__x86_64__) && defined(TARGET_X86_64))
3268 if (unlikely(ie
->target_cmd
!= ie
->host_cmd
)) {
3269 fprintf(stderr
, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
3270 ie
->name
, ie
->target_cmd
, ie
->host_cmd
);
3277 #if TARGET_ABI_BITS == 32
3278 static inline uint64_t target_offset64(uint32_t word0
, uint32_t word1
)
3280 #ifdef TARGET_WORDS_BIGENDIAN
3281 return ((uint64_t)word0
<< 32) | word1
;
3283 return ((uint64_t)word1
<< 32) | word0
;
3286 #else /* TARGET_ABI_BITS == 32 */
3287 static inline uint64_t target_offset64(uint64_t word0
, uint64_t word1
)
3291 #endif /* TARGET_ABI_BITS != 32 */
3293 #ifdef TARGET_NR_truncate64
3294 static inline abi_long
target_truncate64(void *cpu_env
, const char *arg1
,
3300 if (((CPUARMState
*)cpu_env
)->eabi
)
3306 return get_errno(truncate64(arg1
, target_offset64(arg2
, arg3
)));
3310 #ifdef TARGET_NR_ftruncate64
3311 static inline abi_long
target_ftruncate64(void *cpu_env
, abi_long arg1
,
3317 if (((CPUARMState
*)cpu_env
)->eabi
)
3323 return get_errno(ftruncate64(arg1
, target_offset64(arg2
, arg3
)));
3327 static inline abi_long
target_to_host_timespec(struct timespec
*host_ts
,
3328 abi_ulong target_addr
)
3330 struct target_timespec
*target_ts
;
3332 if (!lock_user_struct(VERIFY_READ
, target_ts
, target_addr
, 1))
3333 return -TARGET_EFAULT
;
3334 host_ts
->tv_sec
= tswapl(target_ts
->tv_sec
);
3335 host_ts
->tv_nsec
= tswapl(target_ts
->tv_nsec
);
3336 unlock_user_struct(target_ts
, target_addr
, 0);
3340 static inline abi_long
host_to_target_timespec(abi_ulong target_addr
,
3341 struct timespec
*host_ts
)
3343 struct target_timespec
*target_ts
;
3345 if (!lock_user_struct(VERIFY_WRITE
, target_ts
, target_addr
, 0))
3346 return -TARGET_EFAULT
;
3347 target_ts
->tv_sec
= tswapl(host_ts
->tv_sec
);
3348 target_ts
->tv_nsec
= tswapl(host_ts
->tv_nsec
);
3349 unlock_user_struct(target_ts
, target_addr
, 1);
3353 #if defined(TARGET_NR_stat64) || defined(TARGET_NR_newfstatat)
3354 static inline abi_long
host_to_target_stat64(void *cpu_env
,
3355 abi_ulong target_addr
,
3356 struct stat
*host_st
)
3359 if (((CPUARMState
*)cpu_env
)->eabi
) {
3360 struct target_eabi_stat64
*target_st
;
3362 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
3363 return -TARGET_EFAULT
;
3364 memset(target_st
, 0, sizeof(struct target_eabi_stat64
));
3365 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
3366 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
3367 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
3368 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
3370 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
3371 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
3372 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
3373 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
3374 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
3375 __put_user(host_st
->st_size
, &target_st
->st_size
);
3376 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
3377 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
3378 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
3379 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
3380 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
3381 unlock_user_struct(target_st
, target_addr
, 1);
3385 #if TARGET_LONG_BITS == 64
3386 struct target_stat
*target_st
;
3388 struct target_stat64
*target_st
;
3391 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
3392 return -TARGET_EFAULT
;
3393 memset(target_st
, 0, sizeof(*target_st
));
3394 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
3395 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
3396 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
3397 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
3399 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
3400 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
3401 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
3402 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
3403 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
3404 /* XXX: better use of kernel struct */
3405 __put_user(host_st
->st_size
, &target_st
->st_size
);
3406 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
3407 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
3408 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
3409 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
3410 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
3411 unlock_user_struct(target_st
, target_addr
, 1);
3418 #if defined(USE_NPTL)
3419 /* ??? Using host futex calls even when target atomic operations
3420 are not really atomic probably breaks things. However implementing
3421 futexes locally would make futexes shared between multiple processes
3422 tricky. However they're probably useless because guest atomic
3423 operations won't work either. */
3424 static int do_futex(target_ulong uaddr
, int op
, int val
, target_ulong timeout
,
3425 target_ulong uaddr2
, int val3
)
3427 struct timespec ts
, *pts
;
3429 /* ??? We assume FUTEX_* constants are the same on both host
3435 target_to_host_timespec(pts
, timeout
);
3439 return get_errno(sys_futex(g2h(uaddr
), FUTEX_WAIT
, tswap32(val
),
3442 return get_errno(sys_futex(g2h(uaddr
), FUTEX_WAKE
, val
, NULL
, NULL
, 0));
3444 return get_errno(sys_futex(g2h(uaddr
), FUTEX_FD
, val
, NULL
, NULL
, 0));
3446 return get_errno(sys_futex(g2h(uaddr
), FUTEX_REQUEUE
, val
,
3447 NULL
, g2h(uaddr2
), 0));
3448 case FUTEX_CMP_REQUEUE
:
3449 return get_errno(sys_futex(g2h(uaddr
), FUTEX_CMP_REQUEUE
, val
,
3450 NULL
, g2h(uaddr2
), tswap32(val3
)));
3452 return -TARGET_ENOSYS
;
3457 int get_osversion(void)
3459 static int osversion
;
3460 struct new_utsname buf
;
3465 if (qemu_uname_release
&& *qemu_uname_release
) {
3466 s
= qemu_uname_release
;
3468 if (sys_uname(&buf
))
3473 for (i
= 0; i
< 3; i
++) {
3475 while (*s
>= '0' && *s
<= '9') {
3480 tmp
= (tmp
<< 8) + n
;
3488 /* do_syscall() should always have a single exit point at the end so
3489 that actions, such as logging of syscall results, can be performed.
3490 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
3491 abi_long
do_syscall(void *cpu_env
, int num
, abi_long arg1
,
3492 abi_long arg2
, abi_long arg3
, abi_long arg4
,
3493 abi_long arg5
, abi_long arg6
)
3501 gemu_log("syscall %d", num
);
3504 print_syscall(num
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
3507 case TARGET_NR_exit
:
3509 /* In old applications this may be used to implement _exit(2).
3510 However in threaded applictions it is used for thread termination,
3511 and _exit_group is used for application termination.
3512 Do thread termination if we have more then one thread. */
3513 /* FIXME: This probably breaks if a signal arrives. We should probably
3514 be disabling signals. */
3515 if (first_cpu
->next_cpu
) {
3522 while (p
&& p
!= (CPUState
*)cpu_env
) {
3523 lastp
= &p
->next_cpu
;
3526 /* If we didn't find the CPU for this thread then something is
3530 /* Remove the CPU from the list. */
3531 *lastp
= p
->next_cpu
;
3533 TaskState
*ts
= ((CPUState
*)cpu_env
)->opaque
;
3534 if (ts
->child_tidptr
) {
3535 put_user_u32(0, ts
->child_tidptr
);
3536 sys_futex(g2h(ts
->child_tidptr
), FUTEX_WAKE
, INT_MAX
,
3539 /* TODO: Free CPU state. */
3546 gdb_exit(cpu_env
, arg1
);
3548 ret
= 0; /* avoid warning */
3550 case TARGET_NR_read
:
3554 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
3556 ret
= get_errno(read(arg1
, p
, arg3
));
3557 unlock_user(p
, arg2
, ret
);
3560 case TARGET_NR_write
:
3561 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
3563 ret
= get_errno(write(arg1
, p
, arg3
));
3564 unlock_user(p
, arg2
, 0);
3566 case TARGET_NR_open
:
3567 if (!(p
= lock_user_string(arg1
)))
3569 ret
= get_errno(open(path(p
),
3570 target_to_host_bitmask(arg2
, fcntl_flags_tbl
),
3572 unlock_user(p
, arg1
, 0);
3574 #if defined(TARGET_NR_openat) && defined(__NR_openat)
3575 case TARGET_NR_openat
:
3576 if (!(p
= lock_user_string(arg2
)))
3578 ret
= get_errno(sys_openat(arg1
,
3580 target_to_host_bitmask(arg3
, fcntl_flags_tbl
),
3582 unlock_user(p
, arg2
, 0);
3585 case TARGET_NR_close
:
3586 ret
= get_errno(close(arg1
));
3591 case TARGET_NR_fork
:
3592 ret
= get_errno(do_fork(cpu_env
, SIGCHLD
, 0, 0, 0, 0));
3594 #ifdef TARGET_NR_waitpid
3595 case TARGET_NR_waitpid
:
3598 ret
= get_errno(waitpid(arg1
, &status
, arg3
));
3599 if (!is_error(ret
) && arg2
3600 && put_user_s32(status
, arg2
))
3605 #ifdef TARGET_NR_waitid
3606 case TARGET_NR_waitid
:
3610 ret
= get_errno(waitid(arg1
, arg2
, &info
, arg4
));
3611 if (!is_error(ret
) && arg3
&& info
.si_pid
!= 0) {
3612 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_siginfo_t
), 0)))
3614 host_to_target_siginfo(p
, &info
);
3615 unlock_user(p
, arg3
, sizeof(target_siginfo_t
));
3620 #ifdef TARGET_NR_creat /* not on alpha */
3621 case TARGET_NR_creat
:
3622 if (!(p
= lock_user_string(arg1
)))
3624 ret
= get_errno(creat(p
, arg2
));
3625 unlock_user(p
, arg1
, 0);
3628 case TARGET_NR_link
:
3631 p
= lock_user_string(arg1
);
3632 p2
= lock_user_string(arg2
);
3634 ret
= -TARGET_EFAULT
;
3636 ret
= get_errno(link(p
, p2
));
3637 unlock_user(p2
, arg2
, 0);
3638 unlock_user(p
, arg1
, 0);
3641 #if defined(TARGET_NR_linkat) && defined(__NR_linkat)
3642 case TARGET_NR_linkat
:
3647 p
= lock_user_string(arg2
);
3648 p2
= lock_user_string(arg4
);
3650 ret
= -TARGET_EFAULT
;
3652 ret
= get_errno(sys_linkat(arg1
, p
, arg3
, p2
, arg5
));
3653 unlock_user(p
, arg2
, 0);
3654 unlock_user(p2
, arg4
, 0);
3658 case TARGET_NR_unlink
:
3659 if (!(p
= lock_user_string(arg1
)))
3661 ret
= get_errno(unlink(p
));
3662 unlock_user(p
, arg1
, 0);
3664 #if defined(TARGET_NR_unlinkat) && defined(__NR_unlinkat)
3665 case TARGET_NR_unlinkat
:
3666 if (!(p
= lock_user_string(arg2
)))
3668 ret
= get_errno(sys_unlinkat(arg1
, p
, arg3
));
3669 unlock_user(p
, arg2
, 0);
3672 case TARGET_NR_execve
:
3674 char **argp
, **envp
;
3677 abi_ulong guest_argp
;
3678 abi_ulong guest_envp
;
3684 for (gp
= guest_argp
; gp
; gp
+= sizeof(abi_ulong
)) {
3685 if (get_user_ual(addr
, gp
))
3693 for (gp
= guest_envp
; gp
; gp
+= sizeof(abi_ulong
)) {
3694 if (get_user_ual(addr
, gp
))
3701 argp
= alloca((argc
+ 1) * sizeof(void *));
3702 envp
= alloca((envc
+ 1) * sizeof(void *));
3704 for (gp
= guest_argp
, q
= argp
; gp
;
3705 gp
+= sizeof(abi_ulong
), q
++) {
3706 if (get_user_ual(addr
, gp
))
3710 if (!(*q
= lock_user_string(addr
)))
3715 for (gp
= guest_envp
, q
= envp
; gp
;
3716 gp
+= sizeof(abi_ulong
), q
++) {
3717 if (get_user_ual(addr
, gp
))
3721 if (!(*q
= lock_user_string(addr
)))
3726 if (!(p
= lock_user_string(arg1
)))
3728 ret
= get_errno(execve(p
, argp
, envp
));
3729 unlock_user(p
, arg1
, 0);
3734 ret
= -TARGET_EFAULT
;
3737 for (gp
= guest_argp
, q
= argp
; *q
;
3738 gp
+= sizeof(abi_ulong
), q
++) {
3739 if (get_user_ual(addr
, gp
)
3742 unlock_user(*q
, addr
, 0);
3744 for (gp
= guest_envp
, q
= envp
; *q
;
3745 gp
+= sizeof(abi_ulong
), q
++) {
3746 if (get_user_ual(addr
, gp
)
3749 unlock_user(*q
, addr
, 0);
3753 case TARGET_NR_chdir
:
3754 if (!(p
= lock_user_string(arg1
)))
3756 ret
= get_errno(chdir(p
));
3757 unlock_user(p
, arg1
, 0);
3759 #ifdef TARGET_NR_time
3760 case TARGET_NR_time
:
3763 ret
= get_errno(time(&host_time
));
3766 && put_user_sal(host_time
, arg1
))
3771 case TARGET_NR_mknod
:
3772 if (!(p
= lock_user_string(arg1
)))
3774 ret
= get_errno(mknod(p
, arg2
, arg3
));
3775 unlock_user(p
, arg1
, 0);
3777 #if defined(TARGET_NR_mknodat) && defined(__NR_mknodat)
3778 case TARGET_NR_mknodat
:
3779 if (!(p
= lock_user_string(arg2
)))
3781 ret
= get_errno(sys_mknodat(arg1
, p
, arg3
, arg4
));
3782 unlock_user(p
, arg2
, 0);
3785 case TARGET_NR_chmod
:
3786 if (!(p
= lock_user_string(arg1
)))
3788 ret
= get_errno(chmod(p
, arg2
));
3789 unlock_user(p
, arg1
, 0);
3791 #ifdef TARGET_NR_break
3792 case TARGET_NR_break
:
3795 #ifdef TARGET_NR_oldstat
3796 case TARGET_NR_oldstat
:
3799 case TARGET_NR_lseek
:
3800 ret
= get_errno(lseek(arg1
, arg2
, arg3
));
3802 #ifdef TARGET_NR_getxpid
3803 case TARGET_NR_getxpid
:
3805 case TARGET_NR_getpid
:
3807 ret
= get_errno(getpid());
3809 case TARGET_NR_mount
:
3811 /* need to look at the data field */
3813 p
= lock_user_string(arg1
);
3814 p2
= lock_user_string(arg2
);
3815 p3
= lock_user_string(arg3
);
3816 if (!p
|| !p2
|| !p3
)
3817 ret
= -TARGET_EFAULT
;
3819 /* FIXME - arg5 should be locked, but it isn't clear how to
3820 * do that since it's not guaranteed to be a NULL-terminated
3823 ret
= get_errno(mount(p
, p2
, p3
, (unsigned long)arg4
, g2h(arg5
)));
3824 unlock_user(p
, arg1
, 0);
3825 unlock_user(p2
, arg2
, 0);
3826 unlock_user(p3
, arg3
, 0);
3829 #ifdef TARGET_NR_umount
3830 case TARGET_NR_umount
:
3831 if (!(p
= lock_user_string(arg1
)))
3833 ret
= get_errno(umount(p
));
3834 unlock_user(p
, arg1
, 0);
3837 #ifdef TARGET_NR_stime /* not on alpha */
3838 case TARGET_NR_stime
:
3841 if (get_user_sal(host_time
, arg1
))
3843 ret
= get_errno(stime(&host_time
));
3847 case TARGET_NR_ptrace
:
3849 #ifdef TARGET_NR_alarm /* not on alpha */
3850 case TARGET_NR_alarm
:
3854 #ifdef TARGET_NR_oldfstat
3855 case TARGET_NR_oldfstat
:
3858 #ifdef TARGET_NR_pause /* not on alpha */
3859 case TARGET_NR_pause
:
3860 ret
= get_errno(pause());
3863 #ifdef TARGET_NR_utime
3864 case TARGET_NR_utime
:
3866 struct utimbuf tbuf
, *host_tbuf
;
3867 struct target_utimbuf
*target_tbuf
;
3869 if (!lock_user_struct(VERIFY_READ
, target_tbuf
, arg2
, 1))
3871 tbuf
.actime
= tswapl(target_tbuf
->actime
);
3872 tbuf
.modtime
= tswapl(target_tbuf
->modtime
);
3873 unlock_user_struct(target_tbuf
, arg2
, 0);
3878 if (!(p
= lock_user_string(arg1
)))
3880 ret
= get_errno(utime(p
, host_tbuf
));
3881 unlock_user(p
, arg1
, 0);
3885 case TARGET_NR_utimes
:
3887 struct timeval
*tvp
, tv
[2];
3889 if (copy_from_user_timeval(&tv
[0], arg2
)
3890 || copy_from_user_timeval(&tv
[1],
3891 arg2
+ sizeof(struct target_timeval
)))
3897 if (!(p
= lock_user_string(arg1
)))
3899 ret
= get_errno(utimes(p
, tvp
));
3900 unlock_user(p
, arg1
, 0);
3903 #if defined(TARGET_NR_futimesat) && defined(__NR_futimesat)
3904 case TARGET_NR_futimesat
:
3906 struct timeval
*tvp
, tv
[2];
3908 if (copy_from_user_timeval(&tv
[0], arg3
)
3909 || copy_from_user_timeval(&tv
[1],
3910 arg3
+ sizeof(struct target_timeval
)))
3916 if (!(p
= lock_user_string(arg2
)))
3918 ret
= get_errno(sys_futimesat(arg1
, path(p
), tvp
));
3919 unlock_user(p
, arg2
, 0);
3923 #ifdef TARGET_NR_stty
3924 case TARGET_NR_stty
:
3927 #ifdef TARGET_NR_gtty
3928 case TARGET_NR_gtty
:
3931 case TARGET_NR_access
:
3932 if (!(p
= lock_user_string(arg1
)))
3934 ret
= get_errno(access(p
, arg2
));
3935 unlock_user(p
, arg1
, 0);
3937 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
3938 case TARGET_NR_faccessat
:
3939 if (!(p
= lock_user_string(arg2
)))
3941 ret
= get_errno(sys_faccessat(arg1
, p
, arg3
, arg4
));
3942 unlock_user(p
, arg2
, 0);
3945 #ifdef TARGET_NR_nice /* not on alpha */
3946 case TARGET_NR_nice
:
3947 ret
= get_errno(nice(arg1
));
3950 #ifdef TARGET_NR_ftime
3951 case TARGET_NR_ftime
:
3954 case TARGET_NR_sync
:
3958 case TARGET_NR_kill
:
3959 ret
= get_errno(kill(arg1
, target_to_host_signal(arg2
)));
3961 case TARGET_NR_rename
:
3964 p
= lock_user_string(arg1
);
3965 p2
= lock_user_string(arg2
);
3967 ret
= -TARGET_EFAULT
;
3969 ret
= get_errno(rename(p
, p2
));
3970 unlock_user(p2
, arg2
, 0);
3971 unlock_user(p
, arg1
, 0);
3974 #if defined(TARGET_NR_renameat) && defined(__NR_renameat)
3975 case TARGET_NR_renameat
:
3978 p
= lock_user_string(arg2
);
3979 p2
= lock_user_string(arg4
);
3981 ret
= -TARGET_EFAULT
;
3983 ret
= get_errno(sys_renameat(arg1
, p
, arg3
, p2
));
3984 unlock_user(p2
, arg4
, 0);
3985 unlock_user(p
, arg2
, 0);
3989 case TARGET_NR_mkdir
:
3990 if (!(p
= lock_user_string(arg1
)))
3992 ret
= get_errno(mkdir(p
, arg2
));
3993 unlock_user(p
, arg1
, 0);
3995 #if defined(TARGET_NR_mkdirat) && defined(__NR_mkdirat)
3996 case TARGET_NR_mkdirat
:
3997 if (!(p
= lock_user_string(arg2
)))
3999 ret
= get_errno(sys_mkdirat(arg1
, p
, arg3
));
4000 unlock_user(p
, arg2
, 0);
4003 case TARGET_NR_rmdir
:
4004 if (!(p
= lock_user_string(arg1
)))
4006 ret
= get_errno(rmdir(p
));
4007 unlock_user(p
, arg1
, 0);
4010 ret
= get_errno(dup(arg1
));
4012 case TARGET_NR_pipe
:
4015 ret
= get_errno(pipe(host_pipe
));
4016 if (!is_error(ret
)) {
4017 #if defined(TARGET_MIPS)
4018 CPUMIPSState
*env
= (CPUMIPSState
*)cpu_env
;
4019 env
->active_tc
.gpr
[3] = host_pipe
[1];
4021 #elif defined(TARGET_SH4)
4022 ((CPUSH4State
*)cpu_env
)->gregs
[1] = host_pipe
[1];
4025 if (put_user_s32(host_pipe
[0], arg1
)
4026 || put_user_s32(host_pipe
[1], arg1
+ sizeof(host_pipe
[0])))
4032 case TARGET_NR_times
:
4034 struct target_tms
*tmsp
;
4036 ret
= get_errno(times(&tms
));
4038 tmsp
= lock_user(VERIFY_WRITE
, arg1
, sizeof(struct target_tms
), 0);
4041 tmsp
->tms_utime
= tswapl(host_to_target_clock_t(tms
.tms_utime
));
4042 tmsp
->tms_stime
= tswapl(host_to_target_clock_t(tms
.tms_stime
));
4043 tmsp
->tms_cutime
= tswapl(host_to_target_clock_t(tms
.tms_cutime
));
4044 tmsp
->tms_cstime
= tswapl(host_to_target_clock_t(tms
.tms_cstime
));
4047 ret
= host_to_target_clock_t(ret
);
4050 #ifdef TARGET_NR_prof
4051 case TARGET_NR_prof
:
4054 #ifdef TARGET_NR_signal
4055 case TARGET_NR_signal
:
4058 case TARGET_NR_acct
:
4060 ret
= get_errno(acct(NULL
));
4062 if (!(p
= lock_user_string(arg1
)))
4064 ret
= get_errno(acct(path(p
)));
4065 unlock_user(p
, arg1
, 0);
4068 #ifdef TARGET_NR_umount2 /* not on alpha */
4069 case TARGET_NR_umount2
:
4070 if (!(p
= lock_user_string(arg1
)))
4072 ret
= get_errno(umount2(p
, arg2
));
4073 unlock_user(p
, arg1
, 0);
4076 #ifdef TARGET_NR_lock
4077 case TARGET_NR_lock
:
4080 case TARGET_NR_ioctl
:
4081 ret
= do_ioctl(arg1
, arg2
, arg3
);
4083 case TARGET_NR_fcntl
:
4084 ret
= do_fcntl(arg1
, arg2
, arg3
);
4086 #ifdef TARGET_NR_mpx
4090 case TARGET_NR_setpgid
:
4091 ret
= get_errno(setpgid(arg1
, arg2
));
4093 #ifdef TARGET_NR_ulimit
4094 case TARGET_NR_ulimit
:
4097 #ifdef TARGET_NR_oldolduname
4098 case TARGET_NR_oldolduname
:
4101 case TARGET_NR_umask
:
4102 ret
= get_errno(umask(arg1
));
4104 case TARGET_NR_chroot
:
4105 if (!(p
= lock_user_string(arg1
)))
4107 ret
= get_errno(chroot(p
));
4108 unlock_user(p
, arg1
, 0);
4110 case TARGET_NR_ustat
:
4112 case TARGET_NR_dup2
:
4113 ret
= get_errno(dup2(arg1
, arg2
));
4115 #ifdef TARGET_NR_getppid /* not on alpha */
4116 case TARGET_NR_getppid
:
4117 ret
= get_errno(getppid());
4120 case TARGET_NR_getpgrp
:
4121 ret
= get_errno(getpgrp());
4123 case TARGET_NR_setsid
:
4124 ret
= get_errno(setsid());
4126 #ifdef TARGET_NR_sigaction
4127 case TARGET_NR_sigaction
:
4129 #if !defined(TARGET_MIPS)
4130 struct target_old_sigaction
*old_act
;
4131 struct target_sigaction act
, oact
, *pact
;
4133 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
4135 act
._sa_handler
= old_act
->_sa_handler
;
4136 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
4137 act
.sa_flags
= old_act
->sa_flags
;
4138 act
.sa_restorer
= old_act
->sa_restorer
;
4139 unlock_user_struct(old_act
, arg2
, 0);
4144 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
4145 if (!is_error(ret
) && arg3
) {
4146 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
4148 old_act
->_sa_handler
= oact
._sa_handler
;
4149 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
4150 old_act
->sa_flags
= oact
.sa_flags
;
4151 old_act
->sa_restorer
= oact
.sa_restorer
;
4152 unlock_user_struct(old_act
, arg3
, 1);
4155 struct target_sigaction act
, oact
, *pact
, *old_act
;
4158 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
4160 act
._sa_handler
= old_act
->_sa_handler
;
4161 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
.sig
[0]);
4162 act
.sa_flags
= old_act
->sa_flags
;
4163 unlock_user_struct(old_act
, arg2
, 0);
4169 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
4171 if (!is_error(ret
) && arg3
) {
4172 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
4174 old_act
->_sa_handler
= oact
._sa_handler
;
4175 old_act
->sa_flags
= oact
.sa_flags
;
4176 old_act
->sa_mask
.sig
[0] = oact
.sa_mask
.sig
[0];
4177 old_act
->sa_mask
.sig
[1] = 0;
4178 old_act
->sa_mask
.sig
[2] = 0;
4179 old_act
->sa_mask
.sig
[3] = 0;
4180 unlock_user_struct(old_act
, arg3
, 1);
4186 case TARGET_NR_rt_sigaction
:
4188 struct target_sigaction
*act
;
4189 struct target_sigaction
*oact
;
4192 if (!lock_user_struct(VERIFY_READ
, act
, arg2
, 1))
4197 if (!lock_user_struct(VERIFY_WRITE
, oact
, arg3
, 0)) {
4198 ret
= -TARGET_EFAULT
;
4199 goto rt_sigaction_fail
;
4203 ret
= get_errno(do_sigaction(arg1
, act
, oact
));
4206 unlock_user_struct(act
, arg2
, 0);
4208 unlock_user_struct(oact
, arg3
, 1);
4211 #ifdef TARGET_NR_sgetmask /* not on alpha */
4212 case TARGET_NR_sgetmask
:
4215 abi_ulong target_set
;
4216 sigprocmask(0, NULL
, &cur_set
);
4217 host_to_target_old_sigset(&target_set
, &cur_set
);
4222 #ifdef TARGET_NR_ssetmask /* not on alpha */
4223 case TARGET_NR_ssetmask
:
4225 sigset_t set
, oset
, cur_set
;
4226 abi_ulong target_set
= arg1
;
4227 sigprocmask(0, NULL
, &cur_set
);
4228 target_to_host_old_sigset(&set
, &target_set
);
4229 sigorset(&set
, &set
, &cur_set
);
4230 sigprocmask(SIG_SETMASK
, &set
, &oset
);
4231 host_to_target_old_sigset(&target_set
, &oset
);
4236 #ifdef TARGET_NR_sigprocmask
4237 case TARGET_NR_sigprocmask
:
4240 sigset_t set
, oldset
, *set_ptr
;
4244 case TARGET_SIG_BLOCK
:
4247 case TARGET_SIG_UNBLOCK
:
4250 case TARGET_SIG_SETMASK
:
4254 ret
= -TARGET_EINVAL
;
4257 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
4259 target_to_host_old_sigset(&set
, p
);
4260 unlock_user(p
, arg2
, 0);
4266 ret
= get_errno(sigprocmask(arg1
, set_ptr
, &oldset
));
4267 if (!is_error(ret
) && arg3
) {
4268 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
4270 host_to_target_old_sigset(p
, &oldset
);
4271 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
4276 case TARGET_NR_rt_sigprocmask
:
4279 sigset_t set
, oldset
, *set_ptr
;
4283 case TARGET_SIG_BLOCK
:
4286 case TARGET_SIG_UNBLOCK
:
4289 case TARGET_SIG_SETMASK
:
4293 ret
= -TARGET_EINVAL
;
4296 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
4298 target_to_host_sigset(&set
, p
);
4299 unlock_user(p
, arg2
, 0);
4305 ret
= get_errno(sigprocmask(how
, set_ptr
, &oldset
));
4306 if (!is_error(ret
) && arg3
) {
4307 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
4309 host_to_target_sigset(p
, &oldset
);
4310 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
4314 #ifdef TARGET_NR_sigpending
4315 case TARGET_NR_sigpending
:
4318 ret
= get_errno(sigpending(&set
));
4319 if (!is_error(ret
)) {
4320 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
4322 host_to_target_old_sigset(p
, &set
);
4323 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
4328 case TARGET_NR_rt_sigpending
:
4331 ret
= get_errno(sigpending(&set
));
4332 if (!is_error(ret
)) {
4333 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
4335 host_to_target_sigset(p
, &set
);
4336 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
4340 #ifdef TARGET_NR_sigsuspend
4341 case TARGET_NR_sigsuspend
:
4344 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
4346 target_to_host_old_sigset(&set
, p
);
4347 unlock_user(p
, arg1
, 0);
4348 ret
= get_errno(sigsuspend(&set
));
4352 case TARGET_NR_rt_sigsuspend
:
4355 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
4357 target_to_host_sigset(&set
, p
);
4358 unlock_user(p
, arg1
, 0);
4359 ret
= get_errno(sigsuspend(&set
));
4362 case TARGET_NR_rt_sigtimedwait
:
4365 struct timespec uts
, *puts
;
4368 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
4370 target_to_host_sigset(&set
, p
);
4371 unlock_user(p
, arg1
, 0);
4374 target_to_host_timespec(puts
, arg3
);
4378 ret
= get_errno(sigtimedwait(&set
, &uinfo
, puts
));
4379 if (!is_error(ret
) && arg2
) {
4380 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, sizeof(target_siginfo_t
), 0)))
4382 host_to_target_siginfo(p
, &uinfo
);
4383 unlock_user(p
, arg2
, sizeof(target_siginfo_t
));
4387 case TARGET_NR_rt_sigqueueinfo
:
4390 if (!(p
= lock_user(VERIFY_READ
, arg3
, sizeof(target_sigset_t
), 1)))
4392 target_to_host_siginfo(&uinfo
, p
);
4393 unlock_user(p
, arg1
, 0);
4394 ret
= get_errno(sys_rt_sigqueueinfo(arg1
, arg2
, &uinfo
));
4397 #ifdef TARGET_NR_sigreturn
4398 case TARGET_NR_sigreturn
:
4399 /* NOTE: ret is eax, so not transcoding must be done */
4400 ret
= do_sigreturn(cpu_env
);
4403 case TARGET_NR_rt_sigreturn
:
4404 /* NOTE: ret is eax, so not transcoding must be done */
4405 ret
= do_rt_sigreturn(cpu_env
);
4407 case TARGET_NR_sethostname
:
4408 if (!(p
= lock_user_string(arg1
)))
4410 ret
= get_errno(sethostname(p
, arg2
));
4411 unlock_user(p
, arg1
, 0);
4413 case TARGET_NR_setrlimit
:
4415 /* XXX: convert resource ? */
4416 int resource
= arg1
;
4417 struct target_rlimit
*target_rlim
;
4419 if (!lock_user_struct(VERIFY_READ
, target_rlim
, arg2
, 1))
4421 rlim
.rlim_cur
= tswapl(target_rlim
->rlim_cur
);
4422 rlim
.rlim_max
= tswapl(target_rlim
->rlim_max
);
4423 unlock_user_struct(target_rlim
, arg2
, 0);
4424 ret
= get_errno(setrlimit(resource
, &rlim
));
4427 case TARGET_NR_getrlimit
:
4429 /* XXX: convert resource ? */
4430 int resource
= arg1
;
4431 struct target_rlimit
*target_rlim
;
4434 ret
= get_errno(getrlimit(resource
, &rlim
));
4435 if (!is_error(ret
)) {
4436 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
4438 rlim
.rlim_cur
= tswapl(target_rlim
->rlim_cur
);
4439 rlim
.rlim_max
= tswapl(target_rlim
->rlim_max
);
4440 unlock_user_struct(target_rlim
, arg2
, 1);
4444 case TARGET_NR_getrusage
:
4446 struct rusage rusage
;
4447 ret
= get_errno(getrusage(arg1
, &rusage
));
4448 if (!is_error(ret
)) {
4449 host_to_target_rusage(arg2
, &rusage
);
4453 case TARGET_NR_gettimeofday
:
4456 ret
= get_errno(gettimeofday(&tv
, NULL
));
4457 if (!is_error(ret
)) {
4458 if (copy_to_user_timeval(arg1
, &tv
))
4463 case TARGET_NR_settimeofday
:
4466 if (copy_from_user_timeval(&tv
, arg1
))
4468 ret
= get_errno(settimeofday(&tv
, NULL
));
4471 #ifdef TARGET_NR_select
4472 case TARGET_NR_select
:
4474 struct target_sel_arg_struct
*sel
;
4475 abi_ulong inp
, outp
, exp
, tvp
;
4478 if (!lock_user_struct(VERIFY_READ
, sel
, arg1
, 1))
4480 nsel
= tswapl(sel
->n
);
4481 inp
= tswapl(sel
->inp
);
4482 outp
= tswapl(sel
->outp
);
4483 exp
= tswapl(sel
->exp
);
4484 tvp
= tswapl(sel
->tvp
);
4485 unlock_user_struct(sel
, arg1
, 0);
4486 ret
= do_select(nsel
, inp
, outp
, exp
, tvp
);
4490 case TARGET_NR_symlink
:
4493 p
= lock_user_string(arg1
);
4494 p2
= lock_user_string(arg2
);
4496 ret
= -TARGET_EFAULT
;
4498 ret
= get_errno(symlink(p
, p2
));
4499 unlock_user(p2
, arg2
, 0);
4500 unlock_user(p
, arg1
, 0);
4503 #if defined(TARGET_NR_symlinkat) && defined(__NR_symlinkat)
4504 case TARGET_NR_symlinkat
:
4507 p
= lock_user_string(arg1
);
4508 p2
= lock_user_string(arg3
);
4510 ret
= -TARGET_EFAULT
;
4512 ret
= get_errno(sys_symlinkat(p
, arg2
, p2
));
4513 unlock_user(p2
, arg3
, 0);
4514 unlock_user(p
, arg1
, 0);
4518 #ifdef TARGET_NR_oldlstat
4519 case TARGET_NR_oldlstat
:
4522 case TARGET_NR_readlink
:
4525 p
= lock_user_string(arg1
);
4526 p2
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
4528 ret
= -TARGET_EFAULT
;
4530 if (strncmp((const char *)p
, "/proc/self/exe", 14) == 0) {
4531 char real
[PATH_MAX
];
4532 temp
= realpath(exec_path
,real
);
4533 ret
= (temp
==NULL
) ? get_errno(-1) : strlen(real
) ;
4534 snprintf((char *)p2
, arg3
, "%s", real
);
4537 ret
= get_errno(readlink(path(p
), p2
, arg3
));
4539 unlock_user(p2
, arg2
, ret
);
4540 unlock_user(p
, arg1
, 0);
4543 #if defined(TARGET_NR_readlinkat) && defined(__NR_readlinkat)
4544 case TARGET_NR_readlinkat
:
4547 p
= lock_user_string(arg2
);
4548 p2
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
4550 ret
= -TARGET_EFAULT
;
4552 ret
= get_errno(sys_readlinkat(arg1
, path(p
), p2
, arg4
));
4553 unlock_user(p2
, arg3
, ret
);
4554 unlock_user(p
, arg2
, 0);
4558 #ifdef TARGET_NR_uselib
4559 case TARGET_NR_uselib
:
4562 #ifdef TARGET_NR_swapon
4563 case TARGET_NR_swapon
:
4564 if (!(p
= lock_user_string(arg1
)))
4566 ret
= get_errno(swapon(p
, arg2
));
4567 unlock_user(p
, arg1
, 0);
4570 case TARGET_NR_reboot
:
4572 #ifdef TARGET_NR_readdir
4573 case TARGET_NR_readdir
:
4576 #ifdef TARGET_NR_mmap
4577 case TARGET_NR_mmap
:
4578 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || defined(TARGET_ARM) || defined(TARGET_M68K) || defined(TARGET_CRIS)
4581 abi_ulong v1
, v2
, v3
, v4
, v5
, v6
;
4582 if (!(v
= lock_user(VERIFY_READ
, arg1
, 6 * sizeof(abi_ulong
), 1)))
4590 unlock_user(v
, arg1
, 0);
4591 ret
= get_errno(target_mmap(v1
, v2
, v3
,
4592 target_to_host_bitmask(v4
, mmap_flags_tbl
),
4596 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
4597 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
4603 #ifdef TARGET_NR_mmap2
4604 case TARGET_NR_mmap2
:
4606 #define MMAP_SHIFT 12
4608 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
4609 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
4611 arg6
<< MMAP_SHIFT
));
4614 case TARGET_NR_munmap
:
4615 ret
= get_errno(target_munmap(arg1
, arg2
));
4617 case TARGET_NR_mprotect
:
4618 ret
= get_errno(target_mprotect(arg1
, arg2
, arg3
));
4620 #ifdef TARGET_NR_mremap
4621 case TARGET_NR_mremap
:
4622 ret
= get_errno(target_mremap(arg1
, arg2
, arg3
, arg4
, arg5
));
4625 /* ??? msync/mlock/munlock are broken for softmmu. */
4626 #ifdef TARGET_NR_msync
4627 case TARGET_NR_msync
:
4628 ret
= get_errno(msync(g2h(arg1
), arg2
, arg3
));
4631 #ifdef TARGET_NR_mlock
4632 case TARGET_NR_mlock
:
4633 ret
= get_errno(mlock(g2h(arg1
), arg2
));
4636 #ifdef TARGET_NR_munlock
4637 case TARGET_NR_munlock
:
4638 ret
= get_errno(munlock(g2h(arg1
), arg2
));
4641 #ifdef TARGET_NR_mlockall
4642 case TARGET_NR_mlockall
:
4643 ret
= get_errno(mlockall(arg1
));
4646 #ifdef TARGET_NR_munlockall
4647 case TARGET_NR_munlockall
:
4648 ret
= get_errno(munlockall());
4651 case TARGET_NR_truncate
:
4652 if (!(p
= lock_user_string(arg1
)))
4654 ret
= get_errno(truncate(p
, arg2
));
4655 unlock_user(p
, arg1
, 0);
4657 case TARGET_NR_ftruncate
:
4658 ret
= get_errno(ftruncate(arg1
, arg2
));
4660 case TARGET_NR_fchmod
:
4661 ret
= get_errno(fchmod(arg1
, arg2
));
4663 #if defined(TARGET_NR_fchmodat) && defined(__NR_fchmodat)
4664 case TARGET_NR_fchmodat
:
4665 if (!(p
= lock_user_string(arg2
)))
4667 ret
= get_errno(sys_fchmodat(arg1
, p
, arg3
, arg4
));
4668 unlock_user(p
, arg2
, 0);
4671 case TARGET_NR_getpriority
:
4672 /* libc does special remapping of the return value of
4673 * sys_getpriority() so it's just easiest to call
4674 * sys_getpriority() directly rather than through libc. */
4675 ret
= sys_getpriority(arg1
, arg2
);
4677 case TARGET_NR_setpriority
:
4678 ret
= get_errno(setpriority(arg1
, arg2
, arg3
));
4680 #ifdef TARGET_NR_profil
4681 case TARGET_NR_profil
:
4684 case TARGET_NR_statfs
:
4685 if (!(p
= lock_user_string(arg1
)))
4687 ret
= get_errno(statfs(path(p
), &stfs
));
4688 unlock_user(p
, arg1
, 0);
4690 if (!is_error(ret
)) {
4691 struct target_statfs
*target_stfs
;
4693 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg2
, 0))
4695 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
4696 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
4697 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
4698 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
4699 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
4700 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
4701 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
4702 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
4703 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
4704 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
4705 unlock_user_struct(target_stfs
, arg2
, 1);
4708 case TARGET_NR_fstatfs
:
4709 ret
= get_errno(fstatfs(arg1
, &stfs
));
4710 goto convert_statfs
;
4711 #ifdef TARGET_NR_statfs64
4712 case TARGET_NR_statfs64
:
4713 if (!(p
= lock_user_string(arg1
)))
4715 ret
= get_errno(statfs(path(p
), &stfs
));
4716 unlock_user(p
, arg1
, 0);
4718 if (!is_error(ret
)) {
4719 struct target_statfs64
*target_stfs
;
4721 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg3
, 0))
4723 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
4724 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
4725 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
4726 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
4727 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
4728 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
4729 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
4730 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
4731 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
4732 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
4733 unlock_user_struct(target_stfs
, arg3
, 1);
4736 case TARGET_NR_fstatfs64
:
4737 ret
= get_errno(fstatfs(arg1
, &stfs
));
4738 goto convert_statfs64
;
4740 #ifdef TARGET_NR_ioperm
4741 case TARGET_NR_ioperm
:
4744 #ifdef TARGET_NR_socketcall
4745 case TARGET_NR_socketcall
:
4746 ret
= do_socketcall(arg1
, arg2
);
4749 #ifdef TARGET_NR_accept
4750 case TARGET_NR_accept
:
4751 ret
= do_accept(arg1
, arg2
, arg3
);
4754 #ifdef TARGET_NR_bind
4755 case TARGET_NR_bind
:
4756 ret
= do_bind(arg1
, arg2
, arg3
);
4759 #ifdef TARGET_NR_connect
4760 case TARGET_NR_connect
:
4761 ret
= do_connect(arg1
, arg2
, arg3
);
4764 #ifdef TARGET_NR_getpeername
4765 case TARGET_NR_getpeername
:
4766 ret
= do_getpeername(arg1
, arg2
, arg3
);
4769 #ifdef TARGET_NR_getsockname
4770 case TARGET_NR_getsockname
:
4771 ret
= do_getsockname(arg1
, arg2
, arg3
);
4774 #ifdef TARGET_NR_getsockopt
4775 case TARGET_NR_getsockopt
:
4776 ret
= do_getsockopt(arg1
, arg2
, arg3
, arg4
, arg5
);
4779 #ifdef TARGET_NR_listen
4780 case TARGET_NR_listen
:
4781 ret
= get_errno(listen(arg1
, arg2
));
4784 #ifdef TARGET_NR_recv
4785 case TARGET_NR_recv
:
4786 ret
= do_recvfrom(arg1
, arg2
, arg3
, arg4
, 0, 0);
4789 #ifdef TARGET_NR_recvfrom
4790 case TARGET_NR_recvfrom
:
4791 ret
= do_recvfrom(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
4794 #ifdef TARGET_NR_recvmsg
4795 case TARGET_NR_recvmsg
:
4796 ret
= do_sendrecvmsg(arg1
, arg2
, arg3
, 0);
4799 #ifdef TARGET_NR_send
4800 case TARGET_NR_send
:
4801 ret
= do_sendto(arg1
, arg2
, arg3
, arg4
, 0, 0);
4804 #ifdef TARGET_NR_sendmsg
4805 case TARGET_NR_sendmsg
:
4806 ret
= do_sendrecvmsg(arg1
, arg2
, arg3
, 1);
4809 #ifdef TARGET_NR_sendto
4810 case TARGET_NR_sendto
:
4811 ret
= do_sendto(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
4814 #ifdef TARGET_NR_shutdown
4815 case TARGET_NR_shutdown
:
4816 ret
= get_errno(shutdown(arg1
, arg2
));
4819 #ifdef TARGET_NR_socket
4820 case TARGET_NR_socket
:
4821 ret
= do_socket(arg1
, arg2
, arg3
);
4824 #ifdef TARGET_NR_socketpair
4825 case TARGET_NR_socketpair
:
4826 ret
= do_socketpair(arg1
, arg2
, arg3
, arg4
);
4829 #ifdef TARGET_NR_setsockopt
4830 case TARGET_NR_setsockopt
:
4831 ret
= do_setsockopt(arg1
, arg2
, arg3
, arg4
, (socklen_t
) arg5
);
4835 case TARGET_NR_syslog
:
4836 if (!(p
= lock_user_string(arg2
)))
4838 ret
= get_errno(sys_syslog((int)arg1
, p
, (int)arg3
));
4839 unlock_user(p
, arg2
, 0);
4842 case TARGET_NR_setitimer
:
4844 struct itimerval value
, ovalue
, *pvalue
;
4848 if (copy_from_user_timeval(&pvalue
->it_interval
, arg2
)
4849 || copy_from_user_timeval(&pvalue
->it_value
,
4850 arg2
+ sizeof(struct target_timeval
)))
4855 ret
= get_errno(setitimer(arg1
, pvalue
, &ovalue
));
4856 if (!is_error(ret
) && arg3
) {
4857 if (copy_to_user_timeval(arg3
,
4858 &ovalue
.it_interval
)
4859 || copy_to_user_timeval(arg3
+ sizeof(struct target_timeval
),
4865 case TARGET_NR_getitimer
:
4867 struct itimerval value
;
4869 ret
= get_errno(getitimer(arg1
, &value
));
4870 if (!is_error(ret
) && arg2
) {
4871 if (copy_to_user_timeval(arg2
,
4873 || copy_to_user_timeval(arg2
+ sizeof(struct target_timeval
),
4879 case TARGET_NR_stat
:
4880 if (!(p
= lock_user_string(arg1
)))
4882 ret
= get_errno(stat(path(p
), &st
));
4883 unlock_user(p
, arg1
, 0);
4885 case TARGET_NR_lstat
:
4886 if (!(p
= lock_user_string(arg1
)))
4888 ret
= get_errno(lstat(path(p
), &st
));
4889 unlock_user(p
, arg1
, 0);
4891 case TARGET_NR_fstat
:
4893 ret
= get_errno(fstat(arg1
, &st
));
4895 if (!is_error(ret
)) {
4896 struct target_stat
*target_st
;
4898 if (!lock_user_struct(VERIFY_WRITE
, target_st
, arg2
, 0))
4900 __put_user(st
.st_dev
, &target_st
->st_dev
);
4901 __put_user(st
.st_ino
, &target_st
->st_ino
);
4902 __put_user(st
.st_mode
, &target_st
->st_mode
);
4903 __put_user(st
.st_uid
, &target_st
->st_uid
);
4904 __put_user(st
.st_gid
, &target_st
->st_gid
);
4905 __put_user(st
.st_nlink
, &target_st
->st_nlink
);
4906 __put_user(st
.st_rdev
, &target_st
->st_rdev
);
4907 __put_user(st
.st_size
, &target_st
->st_size
);
4908 __put_user(st
.st_blksize
, &target_st
->st_blksize
);
4909 __put_user(st
.st_blocks
, &target_st
->st_blocks
);
4910 __put_user(st
.st_atime
, &target_st
->target_st_atime
);
4911 __put_user(st
.st_mtime
, &target_st
->target_st_mtime
);
4912 __put_user(st
.st_ctime
, &target_st
->target_st_ctime
);
4913 unlock_user_struct(target_st
, arg2
, 1);
4917 #ifdef TARGET_NR_olduname
4918 case TARGET_NR_olduname
:
4921 #ifdef TARGET_NR_iopl
4922 case TARGET_NR_iopl
:
4925 case TARGET_NR_vhangup
:
4926 ret
= get_errno(vhangup());
4928 #ifdef TARGET_NR_idle
4929 case TARGET_NR_idle
:
4932 #ifdef TARGET_NR_syscall
4933 case TARGET_NR_syscall
:
4934 ret
= do_syscall(cpu_env
,arg1
& 0xffff,arg2
,arg3
,arg4
,arg5
,arg6
,0);
4937 case TARGET_NR_wait4
:
4940 abi_long status_ptr
= arg2
;
4941 struct rusage rusage
, *rusage_ptr
;
4942 abi_ulong target_rusage
= arg4
;
4944 rusage_ptr
= &rusage
;
4947 ret
= get_errno(wait4(arg1
, &status
, arg3
, rusage_ptr
));
4948 if (!is_error(ret
)) {
4950 if (put_user_s32(status
, status_ptr
))
4954 host_to_target_rusage(target_rusage
, &rusage
);
4958 #ifdef TARGET_NR_swapoff
4959 case TARGET_NR_swapoff
:
4960 if (!(p
= lock_user_string(arg1
)))
4962 ret
= get_errno(swapoff(p
));
4963 unlock_user(p
, arg1
, 0);
4966 case TARGET_NR_sysinfo
:
4968 struct target_sysinfo
*target_value
;
4969 struct sysinfo value
;
4970 ret
= get_errno(sysinfo(&value
));
4971 if (!is_error(ret
) && arg1
)
4973 if (!lock_user_struct(VERIFY_WRITE
, target_value
, arg1
, 0))
4975 __put_user(value
.uptime
, &target_value
->uptime
);
4976 __put_user(value
.loads
[0], &target_value
->loads
[0]);
4977 __put_user(value
.loads
[1], &target_value
->loads
[1]);
4978 __put_user(value
.loads
[2], &target_value
->loads
[2]);
4979 __put_user(value
.totalram
, &target_value
->totalram
);
4980 __put_user(value
.freeram
, &target_value
->freeram
);
4981 __put_user(value
.sharedram
, &target_value
->sharedram
);
4982 __put_user(value
.bufferram
, &target_value
->bufferram
);
4983 __put_user(value
.totalswap
, &target_value
->totalswap
);
4984 __put_user(value
.freeswap
, &target_value
->freeswap
);
4985 __put_user(value
.procs
, &target_value
->procs
);
4986 __put_user(value
.totalhigh
, &target_value
->totalhigh
);
4987 __put_user(value
.freehigh
, &target_value
->freehigh
);
4988 __put_user(value
.mem_unit
, &target_value
->mem_unit
);
4989 unlock_user_struct(target_value
, arg1
, 1);
4993 #ifdef TARGET_NR_ipc
4995 ret
= do_ipc(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
4999 #ifdef TARGET_NR_msgctl
5000 case TARGET_NR_msgctl
:
5001 ret
= do_msgctl(arg1
, arg2
, arg3
);
5004 #ifdef TARGET_NR_msgget
5005 case TARGET_NR_msgget
:
5006 ret
= get_errno(msgget(arg1
, arg2
));
5009 #ifdef TARGET_NR_msgrcv
5010 case TARGET_NR_msgrcv
:
5011 ret
= do_msgrcv(arg1
, arg2
, arg3
, arg4
, arg5
);
5014 #ifdef TARGET_NR_msgsnd
5015 case TARGET_NR_msgsnd
:
5016 ret
= do_msgsnd(arg1
, arg2
, arg3
, arg4
);
5019 case TARGET_NR_fsync
:
5020 ret
= get_errno(fsync(arg1
));
5022 case TARGET_NR_clone
:
5023 #if defined(TARGET_SH4)
5024 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg5
, arg4
));
5025 #elif defined(TARGET_CRIS)
5026 ret
= get_errno(do_fork(cpu_env
, arg2
, arg1
, arg3
, arg4
, arg5
));
5028 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
));
5031 #ifdef __NR_exit_group
5032 /* new thread calls */
5033 case TARGET_NR_exit_group
:
5037 gdb_exit(cpu_env
, arg1
);
5038 ret
= get_errno(exit_group(arg1
));
5041 case TARGET_NR_setdomainname
:
5042 if (!(p
= lock_user_string(arg1
)))
5044 ret
= get_errno(setdomainname(p
, arg2
));
5045 unlock_user(p
, arg1
, 0);
5047 case TARGET_NR_uname
:
5048 /* no need to transcode because we use the linux syscall */
5050 struct new_utsname
* buf
;
5052 if (!lock_user_struct(VERIFY_WRITE
, buf
, arg1
, 0))
5054 ret
= get_errno(sys_uname(buf
));
5055 if (!is_error(ret
)) {
5056 /* Overrite the native machine name with whatever is being
5058 strcpy (buf
->machine
, UNAME_MACHINE
);
5059 /* Allow the user to override the reported release. */
5060 if (qemu_uname_release
&& *qemu_uname_release
)
5061 strcpy (buf
->release
, qemu_uname_release
);
5063 unlock_user_struct(buf
, arg1
, 1);
5067 case TARGET_NR_modify_ldt
:
5068 ret
= do_modify_ldt(cpu_env
, arg1
, arg2
, arg3
);
5070 #if !defined(TARGET_X86_64)
5071 case TARGET_NR_vm86old
:
5073 case TARGET_NR_vm86
:
5074 ret
= do_vm86(cpu_env
, arg1
, arg2
);
5078 case TARGET_NR_adjtimex
:
5080 #ifdef TARGET_NR_create_module
5081 case TARGET_NR_create_module
:
5083 case TARGET_NR_init_module
:
5084 case TARGET_NR_delete_module
:
5085 #ifdef TARGET_NR_get_kernel_syms
5086 case TARGET_NR_get_kernel_syms
:
5089 case TARGET_NR_quotactl
:
5091 case TARGET_NR_getpgid
:
5092 ret
= get_errno(getpgid(arg1
));
5094 case TARGET_NR_fchdir
:
5095 ret
= get_errno(fchdir(arg1
));
5097 #ifdef TARGET_NR_bdflush /* not on x86_64 */
5098 case TARGET_NR_bdflush
:
5101 #ifdef TARGET_NR_sysfs
5102 case TARGET_NR_sysfs
:
5105 case TARGET_NR_personality
:
5106 ret
= get_errno(personality(arg1
));
5108 #ifdef TARGET_NR_afs_syscall
5109 case TARGET_NR_afs_syscall
:
5112 #ifdef TARGET_NR__llseek /* Not on alpha */
5113 case TARGET_NR__llseek
:
5115 #if defined (__x86_64__)
5116 ret
= get_errno(lseek(arg1
, ((uint64_t )arg2
<< 32) | arg3
, arg5
));
5117 if (put_user_s64(ret
, arg4
))
5121 ret
= get_errno(_llseek(arg1
, arg2
, arg3
, &res
, arg5
));
5122 if (put_user_s64(res
, arg4
))
5128 case TARGET_NR_getdents
:
5129 #if TARGET_ABI_BITS != 32
5131 #elif TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
5133 struct target_dirent
*target_dirp
;
5134 struct linux_dirent
*dirp
;
5135 abi_long count
= arg3
;
5137 dirp
= malloc(count
);
5139 ret
= -TARGET_ENOMEM
;
5143 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
5144 if (!is_error(ret
)) {
5145 struct linux_dirent
*de
;
5146 struct target_dirent
*tde
;
5148 int reclen
, treclen
;
5149 int count1
, tnamelen
;
5153 if (!(target_dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
5157 reclen
= de
->d_reclen
;
5158 treclen
= reclen
- (2 * (sizeof(long) - sizeof(abi_long
)));
5159 tde
->d_reclen
= tswap16(treclen
);
5160 tde
->d_ino
= tswapl(de
->d_ino
);
5161 tde
->d_off
= tswapl(de
->d_off
);
5162 tnamelen
= treclen
- (2 * sizeof(abi_long
) + 2);
5165 /* XXX: may not be correct */
5166 pstrcpy(tde
->d_name
, tnamelen
, de
->d_name
);
5167 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
5169 tde
= (struct target_dirent
*)((char *)tde
+ treclen
);
5173 unlock_user(target_dirp
, arg2
, ret
);
5179 struct linux_dirent
*dirp
;
5180 abi_long count
= arg3
;
5182 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
5184 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
5185 if (!is_error(ret
)) {
5186 struct linux_dirent
*de
;
5191 reclen
= de
->d_reclen
;
5194 de
->d_reclen
= tswap16(reclen
);
5195 tswapls(&de
->d_ino
);
5196 tswapls(&de
->d_off
);
5197 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
5201 unlock_user(dirp
, arg2
, ret
);
5205 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
5206 case TARGET_NR_getdents64
:
5208 struct linux_dirent64
*dirp
;
5209 abi_long count
= arg3
;
5210 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
5212 ret
= get_errno(sys_getdents64(arg1
, dirp
, count
));
5213 if (!is_error(ret
)) {
5214 struct linux_dirent64
*de
;
5219 reclen
= de
->d_reclen
;
5222 de
->d_reclen
= tswap16(reclen
);
5223 tswap64s((uint64_t *)&de
->d_ino
);
5224 tswap64s((uint64_t *)&de
->d_off
);
5225 de
= (struct linux_dirent64
*)((char *)de
+ reclen
);
5229 unlock_user(dirp
, arg2
, ret
);
5232 #endif /* TARGET_NR_getdents64 */
5233 #ifdef TARGET_NR__newselect
5234 case TARGET_NR__newselect
:
5235 ret
= do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
5238 #ifdef TARGET_NR_poll
5239 case TARGET_NR_poll
:
5241 struct target_pollfd
*target_pfd
;
5242 unsigned int nfds
= arg2
;
5247 target_pfd
= lock_user(VERIFY_WRITE
, arg1
, sizeof(struct target_pollfd
) * nfds
, 1);
5250 pfd
= alloca(sizeof(struct pollfd
) * nfds
);
5251 for(i
= 0; i
< nfds
; i
++) {
5252 pfd
[i
].fd
= tswap32(target_pfd
[i
].fd
);
5253 pfd
[i
].events
= tswap16(target_pfd
[i
].events
);
5255 ret
= get_errno(poll(pfd
, nfds
, timeout
));
5256 if (!is_error(ret
)) {
5257 for(i
= 0; i
< nfds
; i
++) {
5258 target_pfd
[i
].revents
= tswap16(pfd
[i
].revents
);
5260 ret
+= nfds
* (sizeof(struct target_pollfd
)
5261 - sizeof(struct pollfd
));
5263 unlock_user(target_pfd
, arg1
, ret
);
5267 case TARGET_NR_flock
:
5268 /* NOTE: the flock constant seems to be the same for every
5270 ret
= get_errno(flock(arg1
, arg2
));
5272 case TARGET_NR_readv
:
5277 vec
= alloca(count
* sizeof(struct iovec
));
5278 if (lock_iovec(VERIFY_WRITE
, vec
, arg2
, count
, 0) < 0)
5280 ret
= get_errno(readv(arg1
, vec
, count
));
5281 unlock_iovec(vec
, arg2
, count
, 1);
5284 case TARGET_NR_writev
:
5289 vec
= alloca(count
* sizeof(struct iovec
));
5290 if (lock_iovec(VERIFY_READ
, vec
, arg2
, count
, 1) < 0)
5292 ret
= get_errno(writev(arg1
, vec
, count
));
5293 unlock_iovec(vec
, arg2
, count
, 0);
5296 case TARGET_NR_getsid
:
5297 ret
= get_errno(getsid(arg1
));
5299 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
5300 case TARGET_NR_fdatasync
:
5301 ret
= get_errno(fdatasync(arg1
));
5304 case TARGET_NR__sysctl
:
5305 /* We don't implement this, but ENOTDIR is always a safe
5307 ret
= -TARGET_ENOTDIR
;
5309 case TARGET_NR_sched_setparam
:
5311 struct sched_param
*target_schp
;
5312 struct sched_param schp
;
5314 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg2
, 1))
5316 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
5317 unlock_user_struct(target_schp
, arg2
, 0);
5318 ret
= get_errno(sched_setparam(arg1
, &schp
));
5321 case TARGET_NR_sched_getparam
:
5323 struct sched_param
*target_schp
;
5324 struct sched_param schp
;
5325 ret
= get_errno(sched_getparam(arg1
, &schp
));
5326 if (!is_error(ret
)) {
5327 if (!lock_user_struct(VERIFY_WRITE
, target_schp
, arg2
, 0))
5329 target_schp
->sched_priority
= tswap32(schp
.sched_priority
);
5330 unlock_user_struct(target_schp
, arg2
, 1);
5334 case TARGET_NR_sched_setscheduler
:
5336 struct sched_param
*target_schp
;
5337 struct sched_param schp
;
5338 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg3
, 1))
5340 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
5341 unlock_user_struct(target_schp
, arg3
, 0);
5342 ret
= get_errno(sched_setscheduler(arg1
, arg2
, &schp
));
5345 case TARGET_NR_sched_getscheduler
:
5346 ret
= get_errno(sched_getscheduler(arg1
));
5348 case TARGET_NR_sched_yield
:
5349 ret
= get_errno(sched_yield());
5351 case TARGET_NR_sched_get_priority_max
:
5352 ret
= get_errno(sched_get_priority_max(arg1
));
5354 case TARGET_NR_sched_get_priority_min
:
5355 ret
= get_errno(sched_get_priority_min(arg1
));
5357 case TARGET_NR_sched_rr_get_interval
:
5360 ret
= get_errno(sched_rr_get_interval(arg1
, &ts
));
5361 if (!is_error(ret
)) {
5362 host_to_target_timespec(arg2
, &ts
);
5366 case TARGET_NR_nanosleep
:
5368 struct timespec req
, rem
;
5369 target_to_host_timespec(&req
, arg1
);
5370 ret
= get_errno(nanosleep(&req
, &rem
));
5371 if (is_error(ret
) && arg2
) {
5372 host_to_target_timespec(arg2
, &rem
);
5376 #ifdef TARGET_NR_query_module
5377 case TARGET_NR_query_module
:
5380 #ifdef TARGET_NR_nfsservctl
5381 case TARGET_NR_nfsservctl
:
5384 case TARGET_NR_prctl
:
5387 case PR_GET_PDEATHSIG
:
5390 ret
= get_errno(prctl(arg1
, &deathsig
, arg3
, arg4
, arg5
));
5391 if (!is_error(ret
) && arg2
5392 && put_user_ual(deathsig
, arg2
))
5397 ret
= get_errno(prctl(arg1
, arg2
, arg3
, arg4
, arg5
));
5401 #ifdef TARGET_NR_arch_prctl
5402 case TARGET_NR_arch_prctl
:
5403 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
5404 ret
= do_arch_prctl(cpu_env
, arg1
, arg2
);
5410 #ifdef TARGET_NR_pread
5411 case TARGET_NR_pread
:
5413 if (((CPUARMState
*)cpu_env
)->eabi
)
5416 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
5418 ret
= get_errno(pread(arg1
, p
, arg3
, arg4
));
5419 unlock_user(p
, arg2
, ret
);
5421 case TARGET_NR_pwrite
:
5423 if (((CPUARMState
*)cpu_env
)->eabi
)
5426 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
5428 ret
= get_errno(pwrite(arg1
, p
, arg3
, arg4
));
5429 unlock_user(p
, arg2
, 0);
5432 #ifdef TARGET_NR_pread64
5433 case TARGET_NR_pread64
:
5434 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
5436 ret
= get_errno(pread64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
5437 unlock_user(p
, arg2
, ret
);
5439 case TARGET_NR_pwrite64
:
5440 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
5442 ret
= get_errno(pwrite64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
5443 unlock_user(p
, arg2
, 0);
5446 case TARGET_NR_getcwd
:
5447 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0)))
5449 ret
= get_errno(sys_getcwd1(p
, arg2
));
5450 unlock_user(p
, arg1
, ret
);
5452 case TARGET_NR_capget
:
5454 case TARGET_NR_capset
:
5456 case TARGET_NR_sigaltstack
:
5457 #if defined(TARGET_I386) || defined(TARGET_ARM) || defined(TARGET_MIPS) || \
5458 defined(TARGET_SPARC) || defined(TARGET_PPC) || defined(TARGET_ALPHA)
5459 ret
= do_sigaltstack(arg1
, arg2
, get_sp_from_cpustate((CPUState
*)cpu_env
));
5464 case TARGET_NR_sendfile
:
5466 #ifdef TARGET_NR_getpmsg
5467 case TARGET_NR_getpmsg
:
5470 #ifdef TARGET_NR_putpmsg
5471 case TARGET_NR_putpmsg
:
5474 #ifdef TARGET_NR_vfork
5475 case TARGET_NR_vfork
:
5476 ret
= get_errno(do_fork(cpu_env
, CLONE_VFORK
| CLONE_VM
| SIGCHLD
,
5480 #ifdef TARGET_NR_ugetrlimit
5481 case TARGET_NR_ugetrlimit
:
5484 ret
= get_errno(getrlimit(arg1
, &rlim
));
5485 if (!is_error(ret
)) {
5486 struct target_rlimit
*target_rlim
;
5487 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
5489 target_rlim
->rlim_cur
= tswapl(rlim
.rlim_cur
);
5490 target_rlim
->rlim_max
= tswapl(rlim
.rlim_max
);
5491 unlock_user_struct(target_rlim
, arg2
, 1);
5496 #ifdef TARGET_NR_truncate64
5497 case TARGET_NR_truncate64
:
5498 if (!(p
= lock_user_string(arg1
)))
5500 ret
= target_truncate64(cpu_env
, p
, arg2
, arg3
, arg4
);
5501 unlock_user(p
, arg1
, 0);
5504 #ifdef TARGET_NR_ftruncate64
5505 case TARGET_NR_ftruncate64
:
5506 ret
= target_ftruncate64(cpu_env
, arg1
, arg2
, arg3
, arg4
);
5509 #ifdef TARGET_NR_stat64
5510 case TARGET_NR_stat64
:
5511 if (!(p
= lock_user_string(arg1
)))
5513 ret
= get_errno(stat(path(p
), &st
));
5514 unlock_user(p
, arg1
, 0);
5516 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
5519 #ifdef TARGET_NR_lstat64
5520 case TARGET_NR_lstat64
:
5521 if (!(p
= lock_user_string(arg1
)))
5523 ret
= get_errno(lstat(path(p
), &st
));
5524 unlock_user(p
, arg1
, 0);
5526 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
5529 #ifdef TARGET_NR_fstat64
5530 case TARGET_NR_fstat64
:
5531 ret
= get_errno(fstat(arg1
, &st
));
5533 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
5536 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) && \
5537 (defined(__NR_fstatat64) || defined(__NR_newfstatat))
5538 #ifdef TARGET_NR_fstatat64
5539 case TARGET_NR_fstatat64
:
5541 #ifdef TARGET_NR_newfstatat
5542 case TARGET_NR_newfstatat
:
5544 if (!(p
= lock_user_string(arg2
)))
5546 #ifdef __NR_fstatat64
5547 ret
= get_errno(sys_fstatat64(arg1
, path(p
), &st
, arg4
));
5549 ret
= get_errno(sys_newfstatat(arg1
, path(p
), &st
, arg4
));
5552 ret
= host_to_target_stat64(cpu_env
, arg3
, &st
);
5556 case TARGET_NR_lchown
:
5557 if (!(p
= lock_user_string(arg1
)))
5559 ret
= get_errno(lchown(p
, low2highuid(arg2
), low2highgid(arg3
)));
5560 unlock_user(p
, arg1
, 0);
5562 case TARGET_NR_getuid
:
5563 ret
= get_errno(high2lowuid(getuid()));
5565 case TARGET_NR_getgid
:
5566 ret
= get_errno(high2lowgid(getgid()));
5568 case TARGET_NR_geteuid
:
5569 ret
= get_errno(high2lowuid(geteuid()));
5571 case TARGET_NR_getegid
:
5572 ret
= get_errno(high2lowgid(getegid()));
5574 case TARGET_NR_setreuid
:
5575 ret
= get_errno(setreuid(low2highuid(arg1
), low2highuid(arg2
)));
5577 case TARGET_NR_setregid
:
5578 ret
= get_errno(setregid(low2highgid(arg1
), low2highgid(arg2
)));
5580 case TARGET_NR_getgroups
:
5582 int gidsetsize
= arg1
;
5583 uint16_t *target_grouplist
;
5587 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
5588 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
5589 if (gidsetsize
== 0)
5591 if (!is_error(ret
)) {
5592 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* 2, 0);
5593 if (!target_grouplist
)
5595 for(i
= 0;i
< ret
; i
++)
5596 target_grouplist
[i
] = tswap16(grouplist
[i
]);
5597 unlock_user(target_grouplist
, arg2
, gidsetsize
* 2);
5601 case TARGET_NR_setgroups
:
5603 int gidsetsize
= arg1
;
5604 uint16_t *target_grouplist
;
5608 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
5609 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* 2, 1);
5610 if (!target_grouplist
) {
5611 ret
= -TARGET_EFAULT
;
5614 for(i
= 0;i
< gidsetsize
; i
++)
5615 grouplist
[i
] = tswap16(target_grouplist
[i
]);
5616 unlock_user(target_grouplist
, arg2
, 0);
5617 ret
= get_errno(setgroups(gidsetsize
, grouplist
));
5620 case TARGET_NR_fchown
:
5621 ret
= get_errno(fchown(arg1
, low2highuid(arg2
), low2highgid(arg3
)));
5623 #if defined(TARGET_NR_fchownat) && defined(__NR_fchownat)
5624 case TARGET_NR_fchownat
:
5625 if (!(p
= lock_user_string(arg2
)))
5627 ret
= get_errno(sys_fchownat(arg1
, p
, low2highuid(arg3
), low2highgid(arg4
), arg5
));
5628 unlock_user(p
, arg2
, 0);
5631 #ifdef TARGET_NR_setresuid
5632 case TARGET_NR_setresuid
:
5633 ret
= get_errno(setresuid(low2highuid(arg1
),
5635 low2highuid(arg3
)));
5638 #ifdef TARGET_NR_getresuid
5639 case TARGET_NR_getresuid
:
5641 uid_t ruid
, euid
, suid
;
5642 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
5643 if (!is_error(ret
)) {
5644 if (put_user_u16(high2lowuid(ruid
), arg1
)
5645 || put_user_u16(high2lowuid(euid
), arg2
)
5646 || put_user_u16(high2lowuid(suid
), arg3
))
5652 #ifdef TARGET_NR_getresgid
5653 case TARGET_NR_setresgid
:
5654 ret
= get_errno(setresgid(low2highgid(arg1
),
5656 low2highgid(arg3
)));
5659 #ifdef TARGET_NR_getresgid
5660 case TARGET_NR_getresgid
:
5662 gid_t rgid
, egid
, sgid
;
5663 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
5664 if (!is_error(ret
)) {
5665 if (put_user_u16(high2lowgid(rgid
), arg1
)
5666 || put_user_u16(high2lowgid(egid
), arg2
)
5667 || put_user_u16(high2lowgid(sgid
), arg3
))
5673 case TARGET_NR_chown
:
5674 if (!(p
= lock_user_string(arg1
)))
5676 ret
= get_errno(chown(p
, low2highuid(arg2
), low2highgid(arg3
)));
5677 unlock_user(p
, arg1
, 0);
5679 case TARGET_NR_setuid
:
5680 ret
= get_errno(setuid(low2highuid(arg1
)));
5682 case TARGET_NR_setgid
:
5683 ret
= get_errno(setgid(low2highgid(arg1
)));
5685 case TARGET_NR_setfsuid
:
5686 ret
= get_errno(setfsuid(arg1
));
5688 case TARGET_NR_setfsgid
:
5689 ret
= get_errno(setfsgid(arg1
));
5691 #endif /* USE_UID16 */
5693 #ifdef TARGET_NR_lchown32
5694 case TARGET_NR_lchown32
:
5695 if (!(p
= lock_user_string(arg1
)))
5697 ret
= get_errno(lchown(p
, arg2
, arg3
));
5698 unlock_user(p
, arg1
, 0);
5701 #ifdef TARGET_NR_getuid32
5702 case TARGET_NR_getuid32
:
5703 ret
= get_errno(getuid());
5707 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
5708 /* Alpha specific */
5709 case TARGET_NR_getxuid
:
5713 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=euid
;
5715 ret
= get_errno(getuid());
5718 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
5719 /* Alpha specific */
5720 case TARGET_NR_getxgid
:
5724 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=egid
;
5726 ret
= get_errno(getgid());
5730 #ifdef TARGET_NR_getgid32
5731 case TARGET_NR_getgid32
:
5732 ret
= get_errno(getgid());
5735 #ifdef TARGET_NR_geteuid32
5736 case TARGET_NR_geteuid32
:
5737 ret
= get_errno(geteuid());
5740 #ifdef TARGET_NR_getegid32
5741 case TARGET_NR_getegid32
:
5742 ret
= get_errno(getegid());
5745 #ifdef TARGET_NR_setreuid32
5746 case TARGET_NR_setreuid32
:
5747 ret
= get_errno(setreuid(arg1
, arg2
));
5750 #ifdef TARGET_NR_setregid32
5751 case TARGET_NR_setregid32
:
5752 ret
= get_errno(setregid(arg1
, arg2
));
5755 #ifdef TARGET_NR_getgroups32
5756 case TARGET_NR_getgroups32
:
5758 int gidsetsize
= arg1
;
5759 uint32_t *target_grouplist
;
5763 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
5764 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
5765 if (gidsetsize
== 0)
5767 if (!is_error(ret
)) {
5768 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* 4, 0);
5769 if (!target_grouplist
) {
5770 ret
= -TARGET_EFAULT
;
5773 for(i
= 0;i
< ret
; i
++)
5774 target_grouplist
[i
] = tswap32(grouplist
[i
]);
5775 unlock_user(target_grouplist
, arg2
, gidsetsize
* 4);
5780 #ifdef TARGET_NR_setgroups32
5781 case TARGET_NR_setgroups32
:
5783 int gidsetsize
= arg1
;
5784 uint32_t *target_grouplist
;
5788 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
5789 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* 4, 1);
5790 if (!target_grouplist
) {
5791 ret
= -TARGET_EFAULT
;
5794 for(i
= 0;i
< gidsetsize
; i
++)
5795 grouplist
[i
] = tswap32(target_grouplist
[i
]);
5796 unlock_user(target_grouplist
, arg2
, 0);
5797 ret
= get_errno(setgroups(gidsetsize
, grouplist
));
5801 #ifdef TARGET_NR_fchown32
5802 case TARGET_NR_fchown32
:
5803 ret
= get_errno(fchown(arg1
, arg2
, arg3
));
5806 #ifdef TARGET_NR_setresuid32
5807 case TARGET_NR_setresuid32
:
5808 ret
= get_errno(setresuid(arg1
, arg2
, arg3
));
5811 #ifdef TARGET_NR_getresuid32
5812 case TARGET_NR_getresuid32
:
5814 uid_t ruid
, euid
, suid
;
5815 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
5816 if (!is_error(ret
)) {
5817 if (put_user_u32(ruid
, arg1
)
5818 || put_user_u32(euid
, arg2
)
5819 || put_user_u32(suid
, arg3
))
5825 #ifdef TARGET_NR_setresgid32
5826 case TARGET_NR_setresgid32
:
5827 ret
= get_errno(setresgid(arg1
, arg2
, arg3
));
5830 #ifdef TARGET_NR_getresgid32
5831 case TARGET_NR_getresgid32
:
5833 gid_t rgid
, egid
, sgid
;
5834 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
5835 if (!is_error(ret
)) {
5836 if (put_user_u32(rgid
, arg1
)
5837 || put_user_u32(egid
, arg2
)
5838 || put_user_u32(sgid
, arg3
))
5844 #ifdef TARGET_NR_chown32
5845 case TARGET_NR_chown32
:
5846 if (!(p
= lock_user_string(arg1
)))
5848 ret
= get_errno(chown(p
, arg2
, arg3
));
5849 unlock_user(p
, arg1
, 0);
5852 #ifdef TARGET_NR_setuid32
5853 case TARGET_NR_setuid32
:
5854 ret
= get_errno(setuid(arg1
));
5857 #ifdef TARGET_NR_setgid32
5858 case TARGET_NR_setgid32
:
5859 ret
= get_errno(setgid(arg1
));
5862 #ifdef TARGET_NR_setfsuid32
5863 case TARGET_NR_setfsuid32
:
5864 ret
= get_errno(setfsuid(arg1
));
5867 #ifdef TARGET_NR_setfsgid32
5868 case TARGET_NR_setfsgid32
:
5869 ret
= get_errno(setfsgid(arg1
));
5873 case TARGET_NR_pivot_root
:
5875 #ifdef TARGET_NR_mincore
5876 case TARGET_NR_mincore
:
5879 ret
= -TARGET_EFAULT
;
5880 if (!(a
= lock_user(VERIFY_READ
, arg1
,arg2
, 0)))
5882 if (!(p
= lock_user_string(arg3
)))
5884 ret
= get_errno(mincore(a
, arg2
, p
));
5885 unlock_user(p
, arg3
, ret
);
5887 unlock_user(a
, arg1
, 0);
5891 #ifdef TARGET_NR_arm_fadvise64_64
5892 case TARGET_NR_arm_fadvise64_64
:
5895 * arm_fadvise64_64 looks like fadvise64_64 but
5896 * with different argument order
5904 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_arm_fadvise64_64)
5905 #ifdef TARGET_NR_fadvise64_64
5906 case TARGET_NR_fadvise64_64
:
5908 /* This is a hint, so ignoring and returning success is ok. */
5912 #ifdef TARGET_NR_madvise
5913 case TARGET_NR_madvise
:
5914 /* A straight passthrough may not be safe because qemu sometimes
5915 turns private flie-backed mappings into anonymous mappings.
5916 This will break MADV_DONTNEED.
5917 This is a hint, so ignoring and returning success is ok. */
5921 #if TARGET_ABI_BITS == 32
5922 case TARGET_NR_fcntl64
:
5926 struct target_flock64
*target_fl
;
5928 struct target_eabi_flock64
*target_efl
;
5932 case TARGET_F_GETLK64
:
5935 case TARGET_F_SETLK64
:
5938 case TARGET_F_SETLKW64
:
5947 case TARGET_F_GETLK64
:
5949 if (((CPUARMState
*)cpu_env
)->eabi
) {
5950 if (!lock_user_struct(VERIFY_READ
, target_efl
, arg3
, 1))
5952 fl
.l_type
= tswap16(target_efl
->l_type
);
5953 fl
.l_whence
= tswap16(target_efl
->l_whence
);
5954 fl
.l_start
= tswap64(target_efl
->l_start
);
5955 fl
.l_len
= tswap64(target_efl
->l_len
);
5956 fl
.l_pid
= tswapl(target_efl
->l_pid
);
5957 unlock_user_struct(target_efl
, arg3
, 0);
5961 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg3
, 1))
5963 fl
.l_type
= tswap16(target_fl
->l_type
);
5964 fl
.l_whence
= tswap16(target_fl
->l_whence
);
5965 fl
.l_start
= tswap64(target_fl
->l_start
);
5966 fl
.l_len
= tswap64(target_fl
->l_len
);
5967 fl
.l_pid
= tswapl(target_fl
->l_pid
);
5968 unlock_user_struct(target_fl
, arg3
, 0);
5970 ret
= get_errno(fcntl(arg1
, cmd
, &fl
));
5973 if (((CPUARMState
*)cpu_env
)->eabi
) {
5974 if (!lock_user_struct(VERIFY_WRITE
, target_efl
, arg3
, 0))
5976 target_efl
->l_type
= tswap16(fl
.l_type
);
5977 target_efl
->l_whence
= tswap16(fl
.l_whence
);
5978 target_efl
->l_start
= tswap64(fl
.l_start
);
5979 target_efl
->l_len
= tswap64(fl
.l_len
);
5980 target_efl
->l_pid
= tswapl(fl
.l_pid
);
5981 unlock_user_struct(target_efl
, arg3
, 1);
5985 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, arg3
, 0))
5987 target_fl
->l_type
= tswap16(fl
.l_type
);
5988 target_fl
->l_whence
= tswap16(fl
.l_whence
);
5989 target_fl
->l_start
= tswap64(fl
.l_start
);
5990 target_fl
->l_len
= tswap64(fl
.l_len
);
5991 target_fl
->l_pid
= tswapl(fl
.l_pid
);
5992 unlock_user_struct(target_fl
, arg3
, 1);
5997 case TARGET_F_SETLK64
:
5998 case TARGET_F_SETLKW64
:
6000 if (((CPUARMState
*)cpu_env
)->eabi
) {
6001 if (!lock_user_struct(VERIFY_READ
, target_efl
, arg3
, 1))
6003 fl
.l_type
= tswap16(target_efl
->l_type
);
6004 fl
.l_whence
= tswap16(target_efl
->l_whence
);
6005 fl
.l_start
= tswap64(target_efl
->l_start
);
6006 fl
.l_len
= tswap64(target_efl
->l_len
);
6007 fl
.l_pid
= tswapl(target_efl
->l_pid
);
6008 unlock_user_struct(target_efl
, arg3
, 0);
6012 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg3
, 1))
6014 fl
.l_type
= tswap16(target_fl
->l_type
);
6015 fl
.l_whence
= tswap16(target_fl
->l_whence
);
6016 fl
.l_start
= tswap64(target_fl
->l_start
);
6017 fl
.l_len
= tswap64(target_fl
->l_len
);
6018 fl
.l_pid
= tswapl(target_fl
->l_pid
);
6019 unlock_user_struct(target_fl
, arg3
, 0);
6021 ret
= get_errno(fcntl(arg1
, cmd
, &fl
));
6024 ret
= do_fcntl(arg1
, cmd
, arg3
);
6030 #ifdef TARGET_NR_cacheflush
6031 case TARGET_NR_cacheflush
:
6032 /* self-modifying code is handled automatically, so nothing needed */
6036 #ifdef TARGET_NR_security
6037 case TARGET_NR_security
:
6040 #ifdef TARGET_NR_getpagesize
6041 case TARGET_NR_getpagesize
:
6042 ret
= TARGET_PAGE_SIZE
;
6045 case TARGET_NR_gettid
:
6046 ret
= get_errno(gettid());
6048 #ifdef TARGET_NR_readahead
6049 case TARGET_NR_readahead
:
6050 #if TARGET_ABI_BITS == 32
6052 if (((CPUARMState
*)cpu_env
)->eabi
)
6059 ret
= get_errno(readahead(arg1
, ((off64_t
)arg3
<< 32) | arg2
, arg4
));
6061 ret
= get_errno(readahead(arg1
, arg2
, arg3
));
6065 #ifdef TARGET_NR_setxattr
6066 case TARGET_NR_setxattr
:
6067 case TARGET_NR_lsetxattr
:
6068 case TARGET_NR_fsetxattr
:
6069 case TARGET_NR_getxattr
:
6070 case TARGET_NR_lgetxattr
:
6071 case TARGET_NR_fgetxattr
:
6072 case TARGET_NR_listxattr
:
6073 case TARGET_NR_llistxattr
:
6074 case TARGET_NR_flistxattr
:
6075 case TARGET_NR_removexattr
:
6076 case TARGET_NR_lremovexattr
:
6077 case TARGET_NR_fremovexattr
:
6078 goto unimplemented_nowarn
;
6080 #ifdef TARGET_NR_set_thread_area
6081 case TARGET_NR_set_thread_area
:
6082 #if defined(TARGET_MIPS)
6083 ((CPUMIPSState
*) cpu_env
)->tls_value
= arg1
;
6086 #elif defined(TARGET_CRIS)
6088 ret
= -TARGET_EINVAL
;
6090 ((CPUCRISState
*) cpu_env
)->pregs
[PR_PID
] = arg1
;
6094 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
6095 ret
= do_set_thread_area(cpu_env
, arg1
);
6098 goto unimplemented_nowarn
;
6101 #ifdef TARGET_NR_get_thread_area
6102 case TARGET_NR_get_thread_area
:
6103 #if defined(TARGET_I386) && defined(TARGET_ABI32)
6104 ret
= do_get_thread_area(cpu_env
, arg1
);
6106 goto unimplemented_nowarn
;
6109 #ifdef TARGET_NR_getdomainname
6110 case TARGET_NR_getdomainname
:
6111 goto unimplemented_nowarn
;
6114 #ifdef TARGET_NR_clock_gettime
6115 case TARGET_NR_clock_gettime
:
6118 ret
= get_errno(clock_gettime(arg1
, &ts
));
6119 if (!is_error(ret
)) {
6120 host_to_target_timespec(arg2
, &ts
);
6125 #ifdef TARGET_NR_clock_getres
6126 case TARGET_NR_clock_getres
:
6129 ret
= get_errno(clock_getres(arg1
, &ts
));
6130 if (!is_error(ret
)) {
6131 host_to_target_timespec(arg2
, &ts
);
6136 #ifdef TARGET_NR_clock_nanosleep
6137 case TARGET_NR_clock_nanosleep
:
6140 target_to_host_timespec(&ts
, arg3
);
6141 ret
= get_errno(clock_nanosleep(arg1
, arg2
, &ts
, arg4
? &ts
: NULL
));
6143 host_to_target_timespec(arg4
, &ts
);
6148 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
6149 case TARGET_NR_set_tid_address
:
6150 ret
= get_errno(set_tid_address((int *)g2h(arg1
)));
6154 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
6155 case TARGET_NR_tkill
:
6156 ret
= get_errno(sys_tkill((int)arg1
, target_to_host_signal(arg2
)));
6160 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
6161 case TARGET_NR_tgkill
:
6162 ret
= get_errno(sys_tgkill((int)arg1
, (int)arg2
,
6163 target_to_host_signal(arg3
)));
6167 #ifdef TARGET_NR_set_robust_list
6168 case TARGET_NR_set_robust_list
:
6169 goto unimplemented_nowarn
;
6172 #if defined(TARGET_NR_utimensat) && defined(__NR_utimensat)
6173 case TARGET_NR_utimensat
:
6175 struct timespec ts
[2];
6176 target_to_host_timespec(ts
, arg3
);
6177 target_to_host_timespec(ts
+1, arg3
+sizeof(struct target_timespec
));
6179 ret
= get_errno(sys_utimensat(arg1
, NULL
, ts
, arg4
));
6181 if (!(p
= lock_user_string(arg2
))) {
6182 ret
= -TARGET_EFAULT
;
6185 ret
= get_errno(sys_utimensat(arg1
, path(p
), ts
, arg4
));
6186 unlock_user(p
, arg2
, 0);
6191 #if defined(USE_NPTL)
6192 case TARGET_NR_futex
:
6193 ret
= do_futex(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
6196 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
6197 case TARGET_NR_inotify_init
:
6198 ret
= get_errno(sys_inotify_init());
6201 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
6202 case TARGET_NR_inotify_add_watch
:
6203 p
= lock_user_string(arg2
);
6204 ret
= get_errno(sys_inotify_add_watch(arg1
, path(p
), arg3
));
6205 unlock_user(p
, arg2
, 0);
6208 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
6209 case TARGET_NR_inotify_rm_watch
:
6210 ret
= get_errno(sys_inotify_rm_watch(arg1
, arg2
));
6214 #ifdef TARGET_NR_mq_open
6215 case TARGET_NR_mq_open
:
6217 struct mq_attr posix_mq_attr
;
6219 p
= lock_user_string(arg1
- 1);
6221 copy_from_user_mq_attr (&posix_mq_attr
, arg4
);
6222 ret
= get_errno(mq_open(p
, arg2
, arg3
, &posix_mq_attr
));
6223 unlock_user (p
, arg1
, 0);
6227 case TARGET_NR_mq_unlink
:
6228 p
= lock_user_string(arg1
- 1);
6229 ret
= get_errno(mq_unlink(p
));
6230 unlock_user (p
, arg1
, 0);
6233 case TARGET_NR_mq_timedsend
:
6237 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
6239 target_to_host_timespec(&ts
, arg5
);
6240 ret
= get_errno(mq_timedsend(arg1
, p
, arg3
, arg4
, &ts
));
6241 host_to_target_timespec(arg5
, &ts
);
6244 ret
= get_errno(mq_send(arg1
, p
, arg3
, arg4
));
6245 unlock_user (p
, arg2
, arg3
);
6249 case TARGET_NR_mq_timedreceive
:
6254 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
6256 target_to_host_timespec(&ts
, arg5
);
6257 ret
= get_errno(mq_timedreceive(arg1
, p
, arg3
, &prio
, &ts
));
6258 host_to_target_timespec(arg5
, &ts
);
6261 ret
= get_errno(mq_receive(arg1
, p
, arg3
, &prio
));
6262 unlock_user (p
, arg2
, arg3
);
6264 put_user_u32(prio
, arg4
);
6268 /* Not implemented for now... */
6269 /* case TARGET_NR_mq_notify: */
6272 case TARGET_NR_mq_getsetattr
:
6274 struct mq_attr posix_mq_attr_in
, posix_mq_attr_out
;
6277 ret
= mq_getattr(arg1
, &posix_mq_attr_out
);
6278 copy_to_user_mq_attr(arg3
, &posix_mq_attr_out
);
6281 copy_from_user_mq_attr(&posix_mq_attr_in
, arg2
);
6282 ret
|= mq_setattr(arg1
, &posix_mq_attr_in
, &posix_mq_attr_out
);
6291 gemu_log("qemu: Unsupported syscall: %d\n", num
);
6292 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
6293 unimplemented_nowarn
:
6295 ret
= -TARGET_ENOSYS
;
6300 gemu_log(" = %ld\n", ret
);
6303 print_syscall_ret(num
, ret
);
6306 ret
= -TARGET_EFAULT
;