]> git.proxmox.com Git - qemu.git/blob - linux-user/syscall.c
linux-user: Implement sendfile and sendfile64
[qemu.git] / linux-user / syscall.c
1 /*
2 * Linux syscalls
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #define _ATFILE_SOURCE
20 #include <stdlib.h>
21 #include <stdio.h>
22 #include <stdarg.h>
23 #include <string.h>
24 #include <elf.h>
25 #include <endian.h>
26 #include <errno.h>
27 #include <unistd.h>
28 #include <fcntl.h>
29 #include <time.h>
30 #include <limits.h>
31 #include <grp.h>
32 #include <sys/types.h>
33 #include <sys/ipc.h>
34 #include <sys/msg.h>
35 #include <sys/wait.h>
36 #include <sys/time.h>
37 #include <sys/stat.h>
38 #include <sys/mount.h>
39 #include <sys/file.h>
40 #include <sys/fsuid.h>
41 #include <sys/personality.h>
42 #include <sys/prctl.h>
43 #include <sys/resource.h>
44 #include <sys/mman.h>
45 #include <sys/swap.h>
46 #include <signal.h>
47 #include <sched.h>
48 #ifdef __ia64__
49 int __clone2(int (*fn)(void *), void *child_stack_base,
50 size_t stack_size, int flags, void *arg, ...);
51 #endif
52 #include <sys/socket.h>
53 #include <sys/un.h>
54 #include <sys/uio.h>
55 #include <sys/poll.h>
56 #include <sys/times.h>
57 #include <sys/shm.h>
58 #include <sys/sem.h>
59 #include <sys/statfs.h>
60 #include <utime.h>
61 #include <sys/sysinfo.h>
62 #include <sys/utsname.h>
63 //#include <sys/user.h>
64 #include <netinet/ip.h>
65 #include <netinet/tcp.h>
66 #include <linux/wireless.h>
67 #include <linux/icmp.h>
68 #include "qemu-common.h"
69 #ifdef TARGET_GPROF
70 #include <sys/gmon.h>
71 #endif
72 #ifdef CONFIG_EVENTFD
73 #include <sys/eventfd.h>
74 #endif
75 #ifdef CONFIG_EPOLL
76 #include <sys/epoll.h>
77 #endif
78 #ifdef CONFIG_ATTR
79 #include "qemu/xattr.h"
80 #endif
81 #ifdef CONFIG_SENDFILE
82 #include <sys/sendfile.h>
83 #endif
84
85 #define termios host_termios
86 #define winsize host_winsize
87 #define termio host_termio
88 #define sgttyb host_sgttyb /* same as target */
89 #define tchars host_tchars /* same as target */
90 #define ltchars host_ltchars /* same as target */
91
92 #include <linux/termios.h>
93 #include <linux/unistd.h>
94 #include <linux/utsname.h>
95 #include <linux/cdrom.h>
96 #include <linux/hdreg.h>
97 #include <linux/soundcard.h>
98 #include <linux/kd.h>
99 #include <linux/mtio.h>
100 #include <linux/fs.h>
101 #if defined(CONFIG_FIEMAP)
102 #include <linux/fiemap.h>
103 #endif
104 #include <linux/fb.h>
105 #include <linux/vt.h>
106 #include <linux/dm-ioctl.h>
107 #include <linux/reboot.h>
108 #include "linux_loop.h"
109 #include "cpu-uname.h"
110
111 #include "qemu.h"
112
113 #if defined(CONFIG_USE_NPTL)
114 #define CLONE_NPTL_FLAGS2 (CLONE_SETTLS | \
115 CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID)
116 #else
117 /* XXX: Hardcode the above values. */
118 #define CLONE_NPTL_FLAGS2 0
119 #endif
120
121 //#define DEBUG
122
123 //#include <linux/msdos_fs.h>
124 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
125 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
126
127
128 #undef _syscall0
129 #undef _syscall1
130 #undef _syscall2
131 #undef _syscall3
132 #undef _syscall4
133 #undef _syscall5
134 #undef _syscall6
135
136 #define _syscall0(type,name) \
137 static type name (void) \
138 { \
139 return syscall(__NR_##name); \
140 }
141
142 #define _syscall1(type,name,type1,arg1) \
143 static type name (type1 arg1) \
144 { \
145 return syscall(__NR_##name, arg1); \
146 }
147
148 #define _syscall2(type,name,type1,arg1,type2,arg2) \
149 static type name (type1 arg1,type2 arg2) \
150 { \
151 return syscall(__NR_##name, arg1, arg2); \
152 }
153
154 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
155 static type name (type1 arg1,type2 arg2,type3 arg3) \
156 { \
157 return syscall(__NR_##name, arg1, arg2, arg3); \
158 }
159
160 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
161 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
162 { \
163 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
164 }
165
166 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
167 type5,arg5) \
168 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
169 { \
170 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
171 }
172
173
174 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
175 type5,arg5,type6,arg6) \
176 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
177 type6 arg6) \
178 { \
179 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
180 }
181
182
183 #define __NR_sys_uname __NR_uname
184 #define __NR_sys_faccessat __NR_faccessat
185 #define __NR_sys_fchmodat __NR_fchmodat
186 #define __NR_sys_fchownat __NR_fchownat
187 #define __NR_sys_fstatat64 __NR_fstatat64
188 #define __NR_sys_futimesat __NR_futimesat
189 #define __NR_sys_getcwd1 __NR_getcwd
190 #define __NR_sys_getdents __NR_getdents
191 #define __NR_sys_getdents64 __NR_getdents64
192 #define __NR_sys_getpriority __NR_getpriority
193 #define __NR_sys_linkat __NR_linkat
194 #define __NR_sys_mkdirat __NR_mkdirat
195 #define __NR_sys_mknodat __NR_mknodat
196 #define __NR_sys_newfstatat __NR_newfstatat
197 #define __NR_sys_openat __NR_openat
198 #define __NR_sys_readlinkat __NR_readlinkat
199 #define __NR_sys_renameat __NR_renameat
200 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
201 #define __NR_sys_symlinkat __NR_symlinkat
202 #define __NR_sys_syslog __NR_syslog
203 #define __NR_sys_tgkill __NR_tgkill
204 #define __NR_sys_tkill __NR_tkill
205 #define __NR_sys_unlinkat __NR_unlinkat
206 #define __NR_sys_utimensat __NR_utimensat
207 #define __NR_sys_futex __NR_futex
208 #define __NR_sys_inotify_init __NR_inotify_init
209 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
210 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
211
212 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__) || \
213 defined(__s390x__)
214 #define __NR__llseek __NR_lseek
215 #endif
216
217 #ifdef __NR_gettid
218 _syscall0(int, gettid)
219 #else
220 /* This is a replacement for the host gettid() and must return a host
221 errno. */
222 static int gettid(void) {
223 return -ENOSYS;
224 }
225 #endif
226 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
227 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
228 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
229 #endif
230 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
231 _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo,
232 loff_t *, res, uint, wh);
233 #endif
234 _syscall3(int,sys_rt_sigqueueinfo,int,pid,int,sig,siginfo_t *,uinfo)
235 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
236 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
237 _syscall3(int,sys_tgkill,int,tgid,int,pid,int,sig)
238 #endif
239 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
240 _syscall2(int,sys_tkill,int,tid,int,sig)
241 #endif
242 #ifdef __NR_exit_group
243 _syscall1(int,exit_group,int,error_code)
244 #endif
245 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
246 _syscall1(int,set_tid_address,int *,tidptr)
247 #endif
248 #if defined(CONFIG_USE_NPTL)
249 #if defined(TARGET_NR_futex) && defined(__NR_futex)
250 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
251 const struct timespec *,timeout,int *,uaddr2,int,val3)
252 #endif
253 #endif
254 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
255 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
256 unsigned long *, user_mask_ptr);
257 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
258 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
259 unsigned long *, user_mask_ptr);
260 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
261 void *, arg);
262
263 static bitmask_transtbl fcntl_flags_tbl[] = {
264 { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, },
265 { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, },
266 { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, },
267 { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, },
268 { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, },
269 { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, },
270 { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, },
271 { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, },
272 { TARGET_O_SYNC, TARGET_O_DSYNC, O_SYNC, O_DSYNC, },
273 { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, },
274 { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, },
275 { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
276 { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, },
277 #if defined(O_DIRECT)
278 { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, },
279 #endif
280 #if defined(O_NOATIME)
281 { TARGET_O_NOATIME, TARGET_O_NOATIME, O_NOATIME, O_NOATIME },
282 #endif
283 #if defined(O_CLOEXEC)
284 { TARGET_O_CLOEXEC, TARGET_O_CLOEXEC, O_CLOEXEC, O_CLOEXEC },
285 #endif
286 #if defined(O_PATH)
287 { TARGET_O_PATH, TARGET_O_PATH, O_PATH, O_PATH },
288 #endif
289 /* Don't terminate the list prematurely on 64-bit host+guest. */
290 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
291 { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
292 #endif
293 { 0, 0, 0, 0 }
294 };
295
296 #define COPY_UTSNAME_FIELD(dest, src) \
297 do { \
298 /* __NEW_UTS_LEN doesn't include terminating null */ \
299 (void) strncpy((dest), (src), __NEW_UTS_LEN); \
300 (dest)[__NEW_UTS_LEN] = '\0'; \
301 } while (0)
302
303 static int sys_uname(struct new_utsname *buf)
304 {
305 struct utsname uts_buf;
306
307 if (uname(&uts_buf) < 0)
308 return (-1);
309
310 /*
311 * Just in case these have some differences, we
312 * translate utsname to new_utsname (which is the
313 * struct linux kernel uses).
314 */
315
316 memset(buf, 0, sizeof(*buf));
317 COPY_UTSNAME_FIELD(buf->sysname, uts_buf.sysname);
318 COPY_UTSNAME_FIELD(buf->nodename, uts_buf.nodename);
319 COPY_UTSNAME_FIELD(buf->release, uts_buf.release);
320 COPY_UTSNAME_FIELD(buf->version, uts_buf.version);
321 COPY_UTSNAME_FIELD(buf->machine, uts_buf.machine);
322 #ifdef _GNU_SOURCE
323 COPY_UTSNAME_FIELD(buf->domainname, uts_buf.domainname);
324 #endif
325 return (0);
326
327 #undef COPY_UTSNAME_FIELD
328 }
329
330 static int sys_getcwd1(char *buf, size_t size)
331 {
332 if (getcwd(buf, size) == NULL) {
333 /* getcwd() sets errno */
334 return (-1);
335 }
336 return strlen(buf)+1;
337 }
338
339 #ifdef CONFIG_ATFILE
340 /*
341 * Host system seems to have atfile syscall stubs available. We
342 * now enable them one by one as specified by target syscall_nr.h.
343 */
344
345 #ifdef TARGET_NR_faccessat
346 static int sys_faccessat(int dirfd, const char *pathname, int mode)
347 {
348 return (faccessat(dirfd, pathname, mode, 0));
349 }
350 #endif
351 #ifdef TARGET_NR_fchmodat
352 static int sys_fchmodat(int dirfd, const char *pathname, mode_t mode)
353 {
354 return (fchmodat(dirfd, pathname, mode, 0));
355 }
356 #endif
357 #if defined(TARGET_NR_fchownat)
358 static int sys_fchownat(int dirfd, const char *pathname, uid_t owner,
359 gid_t group, int flags)
360 {
361 return (fchownat(dirfd, pathname, owner, group, flags));
362 }
363 #endif
364 #ifdef __NR_fstatat64
365 static int sys_fstatat64(int dirfd, const char *pathname, struct stat *buf,
366 int flags)
367 {
368 return (fstatat(dirfd, pathname, buf, flags));
369 }
370 #endif
371 #ifdef __NR_newfstatat
372 static int sys_newfstatat(int dirfd, const char *pathname, struct stat *buf,
373 int flags)
374 {
375 return (fstatat(dirfd, pathname, buf, flags));
376 }
377 #endif
378 #ifdef TARGET_NR_futimesat
379 static int sys_futimesat(int dirfd, const char *pathname,
380 const struct timeval times[2])
381 {
382 return (futimesat(dirfd, pathname, times));
383 }
384 #endif
385 #ifdef TARGET_NR_linkat
386 static int sys_linkat(int olddirfd, const char *oldpath,
387 int newdirfd, const char *newpath, int flags)
388 {
389 return (linkat(olddirfd, oldpath, newdirfd, newpath, flags));
390 }
391 #endif
392 #ifdef TARGET_NR_mkdirat
393 static int sys_mkdirat(int dirfd, const char *pathname, mode_t mode)
394 {
395 return (mkdirat(dirfd, pathname, mode));
396 }
397 #endif
398 #ifdef TARGET_NR_mknodat
399 static int sys_mknodat(int dirfd, const char *pathname, mode_t mode,
400 dev_t dev)
401 {
402 return (mknodat(dirfd, pathname, mode, dev));
403 }
404 #endif
405 #ifdef TARGET_NR_openat
406 static int sys_openat(int dirfd, const char *pathname, int flags, mode_t mode)
407 {
408 /*
409 * open(2) has extra parameter 'mode' when called with
410 * flag O_CREAT.
411 */
412 if ((flags & O_CREAT) != 0) {
413 return (openat(dirfd, pathname, flags, mode));
414 }
415 return (openat(dirfd, pathname, flags));
416 }
417 #endif
418 #ifdef TARGET_NR_readlinkat
419 static int sys_readlinkat(int dirfd, const char *pathname, char *buf, size_t bufsiz)
420 {
421 return (readlinkat(dirfd, pathname, buf, bufsiz));
422 }
423 #endif
424 #ifdef TARGET_NR_renameat
425 static int sys_renameat(int olddirfd, const char *oldpath,
426 int newdirfd, const char *newpath)
427 {
428 return (renameat(olddirfd, oldpath, newdirfd, newpath));
429 }
430 #endif
431 #ifdef TARGET_NR_symlinkat
432 static int sys_symlinkat(const char *oldpath, int newdirfd, const char *newpath)
433 {
434 return (symlinkat(oldpath, newdirfd, newpath));
435 }
436 #endif
437 #ifdef TARGET_NR_unlinkat
438 static int sys_unlinkat(int dirfd, const char *pathname, int flags)
439 {
440 return (unlinkat(dirfd, pathname, flags));
441 }
442 #endif
443 #else /* !CONFIG_ATFILE */
444
445 /*
446 * Try direct syscalls instead
447 */
448 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
449 _syscall3(int,sys_faccessat,int,dirfd,const char *,pathname,int,mode)
450 #endif
451 #if defined(TARGET_NR_fchmodat) && defined(__NR_fchmodat)
452 _syscall3(int,sys_fchmodat,int,dirfd,const char *,pathname, mode_t,mode)
453 #endif
454 #if defined(TARGET_NR_fchownat) && defined(__NR_fchownat)
455 _syscall5(int,sys_fchownat,int,dirfd,const char *,pathname,
456 uid_t,owner,gid_t,group,int,flags)
457 #endif
458 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) && \
459 defined(__NR_fstatat64)
460 _syscall4(int,sys_fstatat64,int,dirfd,const char *,pathname,
461 struct stat *,buf,int,flags)
462 #endif
463 #if defined(TARGET_NR_futimesat) && defined(__NR_futimesat)
464 _syscall3(int,sys_futimesat,int,dirfd,const char *,pathname,
465 const struct timeval *,times)
466 #endif
467 #if (defined(TARGET_NR_newfstatat) || defined(TARGET_NR_fstatat64) ) && \
468 defined(__NR_newfstatat)
469 _syscall4(int,sys_newfstatat,int,dirfd,const char *,pathname,
470 struct stat *,buf,int,flags)
471 #endif
472 #if defined(TARGET_NR_linkat) && defined(__NR_linkat)
473 _syscall5(int,sys_linkat,int,olddirfd,const char *,oldpath,
474 int,newdirfd,const char *,newpath,int,flags)
475 #endif
476 #if defined(TARGET_NR_mkdirat) && defined(__NR_mkdirat)
477 _syscall3(int,sys_mkdirat,int,dirfd,const char *,pathname,mode_t,mode)
478 #endif
479 #if defined(TARGET_NR_mknodat) && defined(__NR_mknodat)
480 _syscall4(int,sys_mknodat,int,dirfd,const char *,pathname,
481 mode_t,mode,dev_t,dev)
482 #endif
483 #if defined(TARGET_NR_openat) && defined(__NR_openat)
484 _syscall4(int,sys_openat,int,dirfd,const char *,pathname,int,flags,mode_t,mode)
485 #endif
486 #if defined(TARGET_NR_readlinkat) && defined(__NR_readlinkat)
487 _syscall4(int,sys_readlinkat,int,dirfd,const char *,pathname,
488 char *,buf,size_t,bufsize)
489 #endif
490 #if defined(TARGET_NR_renameat) && defined(__NR_renameat)
491 _syscall4(int,sys_renameat,int,olddirfd,const char *,oldpath,
492 int,newdirfd,const char *,newpath)
493 #endif
494 #if defined(TARGET_NR_symlinkat) && defined(__NR_symlinkat)
495 _syscall3(int,sys_symlinkat,const char *,oldpath,
496 int,newdirfd,const char *,newpath)
497 #endif
498 #if defined(TARGET_NR_unlinkat) && defined(__NR_unlinkat)
499 _syscall3(int,sys_unlinkat,int,dirfd,const char *,pathname,int,flags)
500 #endif
501
502 #endif /* CONFIG_ATFILE */
503
504 #ifdef CONFIG_UTIMENSAT
505 static int sys_utimensat(int dirfd, const char *pathname,
506 const struct timespec times[2], int flags)
507 {
508 if (pathname == NULL)
509 return futimens(dirfd, times);
510 else
511 return utimensat(dirfd, pathname, times, flags);
512 }
513 #else
514 #if defined(TARGET_NR_utimensat) && defined(__NR_utimensat)
515 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
516 const struct timespec *,tsp,int,flags)
517 #endif
518 #endif /* CONFIG_UTIMENSAT */
519
520 #ifdef CONFIG_INOTIFY
521 #include <sys/inotify.h>
522
523 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
524 static int sys_inotify_init(void)
525 {
526 return (inotify_init());
527 }
528 #endif
529 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
530 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
531 {
532 return (inotify_add_watch(fd, pathname, mask));
533 }
534 #endif
535 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
536 static int sys_inotify_rm_watch(int fd, int32_t wd)
537 {
538 return (inotify_rm_watch(fd, wd));
539 }
540 #endif
541 #ifdef CONFIG_INOTIFY1
542 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
543 static int sys_inotify_init1(int flags)
544 {
545 return (inotify_init1(flags));
546 }
547 #endif
548 #endif
549 #else
550 /* Userspace can usually survive runtime without inotify */
551 #undef TARGET_NR_inotify_init
552 #undef TARGET_NR_inotify_init1
553 #undef TARGET_NR_inotify_add_watch
554 #undef TARGET_NR_inotify_rm_watch
555 #endif /* CONFIG_INOTIFY */
556
557 #if defined(TARGET_NR_ppoll)
558 #ifndef __NR_ppoll
559 # define __NR_ppoll -1
560 #endif
561 #define __NR_sys_ppoll __NR_ppoll
562 _syscall5(int, sys_ppoll, struct pollfd *, fds, nfds_t, nfds,
563 struct timespec *, timeout, const __sigset_t *, sigmask,
564 size_t, sigsetsize)
565 #endif
566
567 #if defined(TARGET_NR_pselect6)
568 #ifndef __NR_pselect6
569 # define __NR_pselect6 -1
570 #endif
571 #define __NR_sys_pselect6 __NR_pselect6
572 _syscall6(int, sys_pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds,
573 fd_set *, exceptfds, struct timespec *, timeout, void *, sig);
574 #endif
575
576 #if defined(TARGET_NR_prlimit64)
577 #ifndef __NR_prlimit64
578 # define __NR_prlimit64 -1
579 #endif
580 #define __NR_sys_prlimit64 __NR_prlimit64
581 /* The glibc rlimit structure may not be that used by the underlying syscall */
582 struct host_rlimit64 {
583 uint64_t rlim_cur;
584 uint64_t rlim_max;
585 };
586 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
587 const struct host_rlimit64 *, new_limit,
588 struct host_rlimit64 *, old_limit)
589 #endif
590
591 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
592 #ifdef TARGET_ARM
593 static inline int regpairs_aligned(void *cpu_env) {
594 return ((((CPUARMState *)cpu_env)->eabi) == 1) ;
595 }
596 #elif defined(TARGET_MIPS)
597 static inline int regpairs_aligned(void *cpu_env) { return 1; }
598 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
599 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
600 * of registers which translates to the same as ARM/MIPS, because we start with
601 * r3 as arg1 */
602 static inline int regpairs_aligned(void *cpu_env) { return 1; }
603 #else
604 static inline int regpairs_aligned(void *cpu_env) { return 0; }
605 #endif
606
607 #define ERRNO_TABLE_SIZE 1200
608
609 /* target_to_host_errno_table[] is initialized from
610 * host_to_target_errno_table[] in syscall_init(). */
611 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
612 };
613
614 /*
615 * This list is the union of errno values overridden in asm-<arch>/errno.h
616 * minus the errnos that are not actually generic to all archs.
617 */
618 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
619 [EIDRM] = TARGET_EIDRM,
620 [ECHRNG] = TARGET_ECHRNG,
621 [EL2NSYNC] = TARGET_EL2NSYNC,
622 [EL3HLT] = TARGET_EL3HLT,
623 [EL3RST] = TARGET_EL3RST,
624 [ELNRNG] = TARGET_ELNRNG,
625 [EUNATCH] = TARGET_EUNATCH,
626 [ENOCSI] = TARGET_ENOCSI,
627 [EL2HLT] = TARGET_EL2HLT,
628 [EDEADLK] = TARGET_EDEADLK,
629 [ENOLCK] = TARGET_ENOLCK,
630 [EBADE] = TARGET_EBADE,
631 [EBADR] = TARGET_EBADR,
632 [EXFULL] = TARGET_EXFULL,
633 [ENOANO] = TARGET_ENOANO,
634 [EBADRQC] = TARGET_EBADRQC,
635 [EBADSLT] = TARGET_EBADSLT,
636 [EBFONT] = TARGET_EBFONT,
637 [ENOSTR] = TARGET_ENOSTR,
638 [ENODATA] = TARGET_ENODATA,
639 [ETIME] = TARGET_ETIME,
640 [ENOSR] = TARGET_ENOSR,
641 [ENONET] = TARGET_ENONET,
642 [ENOPKG] = TARGET_ENOPKG,
643 [EREMOTE] = TARGET_EREMOTE,
644 [ENOLINK] = TARGET_ENOLINK,
645 [EADV] = TARGET_EADV,
646 [ESRMNT] = TARGET_ESRMNT,
647 [ECOMM] = TARGET_ECOMM,
648 [EPROTO] = TARGET_EPROTO,
649 [EDOTDOT] = TARGET_EDOTDOT,
650 [EMULTIHOP] = TARGET_EMULTIHOP,
651 [EBADMSG] = TARGET_EBADMSG,
652 [ENAMETOOLONG] = TARGET_ENAMETOOLONG,
653 [EOVERFLOW] = TARGET_EOVERFLOW,
654 [ENOTUNIQ] = TARGET_ENOTUNIQ,
655 [EBADFD] = TARGET_EBADFD,
656 [EREMCHG] = TARGET_EREMCHG,
657 [ELIBACC] = TARGET_ELIBACC,
658 [ELIBBAD] = TARGET_ELIBBAD,
659 [ELIBSCN] = TARGET_ELIBSCN,
660 [ELIBMAX] = TARGET_ELIBMAX,
661 [ELIBEXEC] = TARGET_ELIBEXEC,
662 [EILSEQ] = TARGET_EILSEQ,
663 [ENOSYS] = TARGET_ENOSYS,
664 [ELOOP] = TARGET_ELOOP,
665 [ERESTART] = TARGET_ERESTART,
666 [ESTRPIPE] = TARGET_ESTRPIPE,
667 [ENOTEMPTY] = TARGET_ENOTEMPTY,
668 [EUSERS] = TARGET_EUSERS,
669 [ENOTSOCK] = TARGET_ENOTSOCK,
670 [EDESTADDRREQ] = TARGET_EDESTADDRREQ,
671 [EMSGSIZE] = TARGET_EMSGSIZE,
672 [EPROTOTYPE] = TARGET_EPROTOTYPE,
673 [ENOPROTOOPT] = TARGET_ENOPROTOOPT,
674 [EPROTONOSUPPORT] = TARGET_EPROTONOSUPPORT,
675 [ESOCKTNOSUPPORT] = TARGET_ESOCKTNOSUPPORT,
676 [EOPNOTSUPP] = TARGET_EOPNOTSUPP,
677 [EPFNOSUPPORT] = TARGET_EPFNOSUPPORT,
678 [EAFNOSUPPORT] = TARGET_EAFNOSUPPORT,
679 [EADDRINUSE] = TARGET_EADDRINUSE,
680 [EADDRNOTAVAIL] = TARGET_EADDRNOTAVAIL,
681 [ENETDOWN] = TARGET_ENETDOWN,
682 [ENETUNREACH] = TARGET_ENETUNREACH,
683 [ENETRESET] = TARGET_ENETRESET,
684 [ECONNABORTED] = TARGET_ECONNABORTED,
685 [ECONNRESET] = TARGET_ECONNRESET,
686 [ENOBUFS] = TARGET_ENOBUFS,
687 [EISCONN] = TARGET_EISCONN,
688 [ENOTCONN] = TARGET_ENOTCONN,
689 [EUCLEAN] = TARGET_EUCLEAN,
690 [ENOTNAM] = TARGET_ENOTNAM,
691 [ENAVAIL] = TARGET_ENAVAIL,
692 [EISNAM] = TARGET_EISNAM,
693 [EREMOTEIO] = TARGET_EREMOTEIO,
694 [ESHUTDOWN] = TARGET_ESHUTDOWN,
695 [ETOOMANYREFS] = TARGET_ETOOMANYREFS,
696 [ETIMEDOUT] = TARGET_ETIMEDOUT,
697 [ECONNREFUSED] = TARGET_ECONNREFUSED,
698 [EHOSTDOWN] = TARGET_EHOSTDOWN,
699 [EHOSTUNREACH] = TARGET_EHOSTUNREACH,
700 [EALREADY] = TARGET_EALREADY,
701 [EINPROGRESS] = TARGET_EINPROGRESS,
702 [ESTALE] = TARGET_ESTALE,
703 [ECANCELED] = TARGET_ECANCELED,
704 [ENOMEDIUM] = TARGET_ENOMEDIUM,
705 [EMEDIUMTYPE] = TARGET_EMEDIUMTYPE,
706 #ifdef ENOKEY
707 [ENOKEY] = TARGET_ENOKEY,
708 #endif
709 #ifdef EKEYEXPIRED
710 [EKEYEXPIRED] = TARGET_EKEYEXPIRED,
711 #endif
712 #ifdef EKEYREVOKED
713 [EKEYREVOKED] = TARGET_EKEYREVOKED,
714 #endif
715 #ifdef EKEYREJECTED
716 [EKEYREJECTED] = TARGET_EKEYREJECTED,
717 #endif
718 #ifdef EOWNERDEAD
719 [EOWNERDEAD] = TARGET_EOWNERDEAD,
720 #endif
721 #ifdef ENOTRECOVERABLE
722 [ENOTRECOVERABLE] = TARGET_ENOTRECOVERABLE,
723 #endif
724 };
725
726 static inline int host_to_target_errno(int err)
727 {
728 if(host_to_target_errno_table[err])
729 return host_to_target_errno_table[err];
730 return err;
731 }
732
733 static inline int target_to_host_errno(int err)
734 {
735 if (target_to_host_errno_table[err])
736 return target_to_host_errno_table[err];
737 return err;
738 }
739
740 static inline abi_long get_errno(abi_long ret)
741 {
742 if (ret == -1)
743 return -host_to_target_errno(errno);
744 else
745 return ret;
746 }
747
748 static inline int is_error(abi_long ret)
749 {
750 return (abi_ulong)ret >= (abi_ulong)(-4096);
751 }
752
753 char *target_strerror(int err)
754 {
755 if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
756 return NULL;
757 }
758 return strerror(target_to_host_errno(err));
759 }
760
761 static abi_ulong target_brk;
762 static abi_ulong target_original_brk;
763 static abi_ulong brk_page;
764
765 void target_set_brk(abi_ulong new_brk)
766 {
767 target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
768 brk_page = HOST_PAGE_ALIGN(target_brk);
769 }
770
771 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
772 #define DEBUGF_BRK(message, args...)
773
774 /* do_brk() must return target values and target errnos. */
775 abi_long do_brk(abi_ulong new_brk)
776 {
777 abi_long mapped_addr;
778 int new_alloc_size;
779
780 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
781
782 if (!new_brk) {
783 DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
784 return target_brk;
785 }
786 if (new_brk < target_original_brk) {
787 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
788 target_brk);
789 return target_brk;
790 }
791
792 /* If the new brk is less than the highest page reserved to the
793 * target heap allocation, set it and we're almost done... */
794 if (new_brk <= brk_page) {
795 /* Heap contents are initialized to zero, as for anonymous
796 * mapped pages. */
797 if (new_brk > target_brk) {
798 memset(g2h(target_brk), 0, new_brk - target_brk);
799 }
800 target_brk = new_brk;
801 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
802 return target_brk;
803 }
804
805 /* We need to allocate more memory after the brk... Note that
806 * we don't use MAP_FIXED because that will map over the top of
807 * any existing mapping (like the one with the host libc or qemu
808 * itself); instead we treat "mapped but at wrong address" as
809 * a failure and unmap again.
810 */
811 new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
812 mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
813 PROT_READ|PROT_WRITE,
814 MAP_ANON|MAP_PRIVATE, 0, 0));
815
816 if (mapped_addr == brk_page) {
817 /* Heap contents are initialized to zero, as for anonymous
818 * mapped pages. Technically the new pages are already
819 * initialized to zero since they *are* anonymous mapped
820 * pages, however we have to take care with the contents that
821 * come from the remaining part of the previous page: it may
822 * contains garbage data due to a previous heap usage (grown
823 * then shrunken). */
824 memset(g2h(target_brk), 0, brk_page - target_brk);
825
826 target_brk = new_brk;
827 brk_page = HOST_PAGE_ALIGN(target_brk);
828 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
829 target_brk);
830 return target_brk;
831 } else if (mapped_addr != -1) {
832 /* Mapped but at wrong address, meaning there wasn't actually
833 * enough space for this brk.
834 */
835 target_munmap(mapped_addr, new_alloc_size);
836 mapped_addr = -1;
837 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
838 }
839 else {
840 DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
841 }
842
843 #if defined(TARGET_ALPHA)
844 /* We (partially) emulate OSF/1 on Alpha, which requires we
845 return a proper errno, not an unchanged brk value. */
846 return -TARGET_ENOMEM;
847 #endif
848 /* For everything else, return the previous break. */
849 return target_brk;
850 }
851
852 static inline abi_long copy_from_user_fdset(fd_set *fds,
853 abi_ulong target_fds_addr,
854 int n)
855 {
856 int i, nw, j, k;
857 abi_ulong b, *target_fds;
858
859 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS;
860 if (!(target_fds = lock_user(VERIFY_READ,
861 target_fds_addr,
862 sizeof(abi_ulong) * nw,
863 1)))
864 return -TARGET_EFAULT;
865
866 FD_ZERO(fds);
867 k = 0;
868 for (i = 0; i < nw; i++) {
869 /* grab the abi_ulong */
870 __get_user(b, &target_fds[i]);
871 for (j = 0; j < TARGET_ABI_BITS; j++) {
872 /* check the bit inside the abi_ulong */
873 if ((b >> j) & 1)
874 FD_SET(k, fds);
875 k++;
876 }
877 }
878
879 unlock_user(target_fds, target_fds_addr, 0);
880
881 return 0;
882 }
883
884 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
885 abi_ulong target_fds_addr,
886 int n)
887 {
888 if (target_fds_addr) {
889 if (copy_from_user_fdset(fds, target_fds_addr, n))
890 return -TARGET_EFAULT;
891 *fds_ptr = fds;
892 } else {
893 *fds_ptr = NULL;
894 }
895 return 0;
896 }
897
898 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
899 const fd_set *fds,
900 int n)
901 {
902 int i, nw, j, k;
903 abi_long v;
904 abi_ulong *target_fds;
905
906 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS;
907 if (!(target_fds = lock_user(VERIFY_WRITE,
908 target_fds_addr,
909 sizeof(abi_ulong) * nw,
910 0)))
911 return -TARGET_EFAULT;
912
913 k = 0;
914 for (i = 0; i < nw; i++) {
915 v = 0;
916 for (j = 0; j < TARGET_ABI_BITS; j++) {
917 v |= ((FD_ISSET(k, fds) != 0) << j);
918 k++;
919 }
920 __put_user(v, &target_fds[i]);
921 }
922
923 unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
924
925 return 0;
926 }
927
928 #if defined(__alpha__)
929 #define HOST_HZ 1024
930 #else
931 #define HOST_HZ 100
932 #endif
933
934 static inline abi_long host_to_target_clock_t(long ticks)
935 {
936 #if HOST_HZ == TARGET_HZ
937 return ticks;
938 #else
939 return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
940 #endif
941 }
942
943 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
944 const struct rusage *rusage)
945 {
946 struct target_rusage *target_rusage;
947
948 if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
949 return -TARGET_EFAULT;
950 target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
951 target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
952 target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
953 target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
954 target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
955 target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
956 target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
957 target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
958 target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
959 target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
960 target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
961 target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
962 target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
963 target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
964 target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
965 target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
966 target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
967 target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
968 unlock_user_struct(target_rusage, target_addr, 1);
969
970 return 0;
971 }
972
973 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
974 {
975 abi_ulong target_rlim_swap;
976 rlim_t result;
977
978 target_rlim_swap = tswapal(target_rlim);
979 if (target_rlim_swap == TARGET_RLIM_INFINITY)
980 return RLIM_INFINITY;
981
982 result = target_rlim_swap;
983 if (target_rlim_swap != (rlim_t)result)
984 return RLIM_INFINITY;
985
986 return result;
987 }
988
989 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
990 {
991 abi_ulong target_rlim_swap;
992 abi_ulong result;
993
994 if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
995 target_rlim_swap = TARGET_RLIM_INFINITY;
996 else
997 target_rlim_swap = rlim;
998 result = tswapal(target_rlim_swap);
999
1000 return result;
1001 }
1002
1003 static inline int target_to_host_resource(int code)
1004 {
1005 switch (code) {
1006 case TARGET_RLIMIT_AS:
1007 return RLIMIT_AS;
1008 case TARGET_RLIMIT_CORE:
1009 return RLIMIT_CORE;
1010 case TARGET_RLIMIT_CPU:
1011 return RLIMIT_CPU;
1012 case TARGET_RLIMIT_DATA:
1013 return RLIMIT_DATA;
1014 case TARGET_RLIMIT_FSIZE:
1015 return RLIMIT_FSIZE;
1016 case TARGET_RLIMIT_LOCKS:
1017 return RLIMIT_LOCKS;
1018 case TARGET_RLIMIT_MEMLOCK:
1019 return RLIMIT_MEMLOCK;
1020 case TARGET_RLIMIT_MSGQUEUE:
1021 return RLIMIT_MSGQUEUE;
1022 case TARGET_RLIMIT_NICE:
1023 return RLIMIT_NICE;
1024 case TARGET_RLIMIT_NOFILE:
1025 return RLIMIT_NOFILE;
1026 case TARGET_RLIMIT_NPROC:
1027 return RLIMIT_NPROC;
1028 case TARGET_RLIMIT_RSS:
1029 return RLIMIT_RSS;
1030 case TARGET_RLIMIT_RTPRIO:
1031 return RLIMIT_RTPRIO;
1032 case TARGET_RLIMIT_SIGPENDING:
1033 return RLIMIT_SIGPENDING;
1034 case TARGET_RLIMIT_STACK:
1035 return RLIMIT_STACK;
1036 default:
1037 return code;
1038 }
1039 }
1040
1041 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1042 abi_ulong target_tv_addr)
1043 {
1044 struct target_timeval *target_tv;
1045
1046 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1))
1047 return -TARGET_EFAULT;
1048
1049 __get_user(tv->tv_sec, &target_tv->tv_sec);
1050 __get_user(tv->tv_usec, &target_tv->tv_usec);
1051
1052 unlock_user_struct(target_tv, target_tv_addr, 0);
1053
1054 return 0;
1055 }
1056
1057 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1058 const struct timeval *tv)
1059 {
1060 struct target_timeval *target_tv;
1061
1062 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0))
1063 return -TARGET_EFAULT;
1064
1065 __put_user(tv->tv_sec, &target_tv->tv_sec);
1066 __put_user(tv->tv_usec, &target_tv->tv_usec);
1067
1068 unlock_user_struct(target_tv, target_tv_addr, 1);
1069
1070 return 0;
1071 }
1072
1073 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1074 #include <mqueue.h>
1075
1076 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1077 abi_ulong target_mq_attr_addr)
1078 {
1079 struct target_mq_attr *target_mq_attr;
1080
1081 if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1082 target_mq_attr_addr, 1))
1083 return -TARGET_EFAULT;
1084
1085 __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1086 __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1087 __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1088 __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1089
1090 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1091
1092 return 0;
1093 }
1094
1095 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1096 const struct mq_attr *attr)
1097 {
1098 struct target_mq_attr *target_mq_attr;
1099
1100 if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1101 target_mq_attr_addr, 0))
1102 return -TARGET_EFAULT;
1103
1104 __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1105 __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1106 __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1107 __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1108
1109 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1110
1111 return 0;
1112 }
1113 #endif
1114
1115 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1116 /* do_select() must return target values and target errnos. */
1117 static abi_long do_select(int n,
1118 abi_ulong rfd_addr, abi_ulong wfd_addr,
1119 abi_ulong efd_addr, abi_ulong target_tv_addr)
1120 {
1121 fd_set rfds, wfds, efds;
1122 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1123 struct timeval tv, *tv_ptr;
1124 abi_long ret;
1125
1126 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1127 if (ret) {
1128 return ret;
1129 }
1130 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1131 if (ret) {
1132 return ret;
1133 }
1134 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1135 if (ret) {
1136 return ret;
1137 }
1138
1139 if (target_tv_addr) {
1140 if (copy_from_user_timeval(&tv, target_tv_addr))
1141 return -TARGET_EFAULT;
1142 tv_ptr = &tv;
1143 } else {
1144 tv_ptr = NULL;
1145 }
1146
1147 ret = get_errno(select(n, rfds_ptr, wfds_ptr, efds_ptr, tv_ptr));
1148
1149 if (!is_error(ret)) {
1150 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1151 return -TARGET_EFAULT;
1152 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1153 return -TARGET_EFAULT;
1154 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1155 return -TARGET_EFAULT;
1156
1157 if (target_tv_addr && copy_to_user_timeval(target_tv_addr, &tv))
1158 return -TARGET_EFAULT;
1159 }
1160
1161 return ret;
1162 }
1163 #endif
1164
1165 static abi_long do_pipe2(int host_pipe[], int flags)
1166 {
1167 #ifdef CONFIG_PIPE2
1168 return pipe2(host_pipe, flags);
1169 #else
1170 return -ENOSYS;
1171 #endif
1172 }
1173
1174 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1175 int flags, int is_pipe2)
1176 {
1177 int host_pipe[2];
1178 abi_long ret;
1179 ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1180
1181 if (is_error(ret))
1182 return get_errno(ret);
1183
1184 /* Several targets have special calling conventions for the original
1185 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1186 if (!is_pipe2) {
1187 #if defined(TARGET_ALPHA)
1188 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1189 return host_pipe[0];
1190 #elif defined(TARGET_MIPS)
1191 ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1192 return host_pipe[0];
1193 #elif defined(TARGET_SH4)
1194 ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1195 return host_pipe[0];
1196 #endif
1197 }
1198
1199 if (put_user_s32(host_pipe[0], pipedes)
1200 || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1201 return -TARGET_EFAULT;
1202 return get_errno(ret);
1203 }
1204
1205 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1206 abi_ulong target_addr,
1207 socklen_t len)
1208 {
1209 struct target_ip_mreqn *target_smreqn;
1210
1211 target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1212 if (!target_smreqn)
1213 return -TARGET_EFAULT;
1214 mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1215 mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1216 if (len == sizeof(struct target_ip_mreqn))
1217 mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1218 unlock_user(target_smreqn, target_addr, 0);
1219
1220 return 0;
1221 }
1222
1223 static inline abi_long target_to_host_sockaddr(struct sockaddr *addr,
1224 abi_ulong target_addr,
1225 socklen_t len)
1226 {
1227 const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1228 sa_family_t sa_family;
1229 struct target_sockaddr *target_saddr;
1230
1231 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1232 if (!target_saddr)
1233 return -TARGET_EFAULT;
1234
1235 sa_family = tswap16(target_saddr->sa_family);
1236
1237 /* Oops. The caller might send a incomplete sun_path; sun_path
1238 * must be terminated by \0 (see the manual page), but
1239 * unfortunately it is quite common to specify sockaddr_un
1240 * length as "strlen(x->sun_path)" while it should be
1241 * "strlen(...) + 1". We'll fix that here if needed.
1242 * Linux kernel has a similar feature.
1243 */
1244
1245 if (sa_family == AF_UNIX) {
1246 if (len < unix_maxlen && len > 0) {
1247 char *cp = (char*)target_saddr;
1248
1249 if ( cp[len-1] && !cp[len] )
1250 len++;
1251 }
1252 if (len > unix_maxlen)
1253 len = unix_maxlen;
1254 }
1255
1256 memcpy(addr, target_saddr, len);
1257 addr->sa_family = sa_family;
1258 unlock_user(target_saddr, target_addr, 0);
1259
1260 return 0;
1261 }
1262
1263 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1264 struct sockaddr *addr,
1265 socklen_t len)
1266 {
1267 struct target_sockaddr *target_saddr;
1268
1269 target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1270 if (!target_saddr)
1271 return -TARGET_EFAULT;
1272 memcpy(target_saddr, addr, len);
1273 target_saddr->sa_family = tswap16(addr->sa_family);
1274 unlock_user(target_saddr, target_addr, len);
1275
1276 return 0;
1277 }
1278
1279 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1280 struct target_msghdr *target_msgh)
1281 {
1282 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1283 abi_long msg_controllen;
1284 abi_ulong target_cmsg_addr;
1285 struct target_cmsghdr *target_cmsg;
1286 socklen_t space = 0;
1287
1288 msg_controllen = tswapal(target_msgh->msg_controllen);
1289 if (msg_controllen < sizeof (struct target_cmsghdr))
1290 goto the_end;
1291 target_cmsg_addr = tswapal(target_msgh->msg_control);
1292 target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1293 if (!target_cmsg)
1294 return -TARGET_EFAULT;
1295
1296 while (cmsg && target_cmsg) {
1297 void *data = CMSG_DATA(cmsg);
1298 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1299
1300 int len = tswapal(target_cmsg->cmsg_len)
1301 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr));
1302
1303 space += CMSG_SPACE(len);
1304 if (space > msgh->msg_controllen) {
1305 space -= CMSG_SPACE(len);
1306 gemu_log("Host cmsg overflow\n");
1307 break;
1308 }
1309
1310 cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1311 cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1312 cmsg->cmsg_len = CMSG_LEN(len);
1313
1314 if (cmsg->cmsg_level != TARGET_SOL_SOCKET || cmsg->cmsg_type != SCM_RIGHTS) {
1315 gemu_log("Unsupported ancillary data: %d/%d\n", cmsg->cmsg_level, cmsg->cmsg_type);
1316 memcpy(data, target_data, len);
1317 } else {
1318 int *fd = (int *)data;
1319 int *target_fd = (int *)target_data;
1320 int i, numfds = len / sizeof(int);
1321
1322 for (i = 0; i < numfds; i++)
1323 fd[i] = tswap32(target_fd[i]);
1324 }
1325
1326 cmsg = CMSG_NXTHDR(msgh, cmsg);
1327 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg);
1328 }
1329 unlock_user(target_cmsg, target_cmsg_addr, 0);
1330 the_end:
1331 msgh->msg_controllen = space;
1332 return 0;
1333 }
1334
1335 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1336 struct msghdr *msgh)
1337 {
1338 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1339 abi_long msg_controllen;
1340 abi_ulong target_cmsg_addr;
1341 struct target_cmsghdr *target_cmsg;
1342 socklen_t space = 0;
1343
1344 msg_controllen = tswapal(target_msgh->msg_controllen);
1345 if (msg_controllen < sizeof (struct target_cmsghdr))
1346 goto the_end;
1347 target_cmsg_addr = tswapal(target_msgh->msg_control);
1348 target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1349 if (!target_cmsg)
1350 return -TARGET_EFAULT;
1351
1352 while (cmsg && target_cmsg) {
1353 void *data = CMSG_DATA(cmsg);
1354 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1355
1356 int len = cmsg->cmsg_len - CMSG_ALIGN(sizeof (struct cmsghdr));
1357
1358 space += TARGET_CMSG_SPACE(len);
1359 if (space > msg_controllen) {
1360 space -= TARGET_CMSG_SPACE(len);
1361 gemu_log("Target cmsg overflow\n");
1362 break;
1363 }
1364
1365 target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1366 target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1367 target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(len));
1368
1369 if ((cmsg->cmsg_level == TARGET_SOL_SOCKET) &&
1370 (cmsg->cmsg_type == SCM_RIGHTS)) {
1371 int *fd = (int *)data;
1372 int *target_fd = (int *)target_data;
1373 int i, numfds = len / sizeof(int);
1374
1375 for (i = 0; i < numfds; i++)
1376 target_fd[i] = tswap32(fd[i]);
1377 } else if ((cmsg->cmsg_level == TARGET_SOL_SOCKET) &&
1378 (cmsg->cmsg_type == SO_TIMESTAMP) &&
1379 (len == sizeof(struct timeval))) {
1380 /* copy struct timeval to target */
1381 struct timeval *tv = (struct timeval *)data;
1382 struct target_timeval *target_tv =
1383 (struct target_timeval *)target_data;
1384
1385 target_tv->tv_sec = tswapal(tv->tv_sec);
1386 target_tv->tv_usec = tswapal(tv->tv_usec);
1387 } else {
1388 gemu_log("Unsupported ancillary data: %d/%d\n",
1389 cmsg->cmsg_level, cmsg->cmsg_type);
1390 memcpy(target_data, data, len);
1391 }
1392
1393 cmsg = CMSG_NXTHDR(msgh, cmsg);
1394 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg);
1395 }
1396 unlock_user(target_cmsg, target_cmsg_addr, space);
1397 the_end:
1398 target_msgh->msg_controllen = tswapal(space);
1399 return 0;
1400 }
1401
1402 /* do_setsockopt() Must return target values and target errnos. */
1403 static abi_long do_setsockopt(int sockfd, int level, int optname,
1404 abi_ulong optval_addr, socklen_t optlen)
1405 {
1406 abi_long ret;
1407 int val;
1408 struct ip_mreqn *ip_mreq;
1409 struct ip_mreq_source *ip_mreq_source;
1410
1411 switch(level) {
1412 case SOL_TCP:
1413 /* TCP options all take an 'int' value. */
1414 if (optlen < sizeof(uint32_t))
1415 return -TARGET_EINVAL;
1416
1417 if (get_user_u32(val, optval_addr))
1418 return -TARGET_EFAULT;
1419 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1420 break;
1421 case SOL_IP:
1422 switch(optname) {
1423 case IP_TOS:
1424 case IP_TTL:
1425 case IP_HDRINCL:
1426 case IP_ROUTER_ALERT:
1427 case IP_RECVOPTS:
1428 case IP_RETOPTS:
1429 case IP_PKTINFO:
1430 case IP_MTU_DISCOVER:
1431 case IP_RECVERR:
1432 case IP_RECVTOS:
1433 #ifdef IP_FREEBIND
1434 case IP_FREEBIND:
1435 #endif
1436 case IP_MULTICAST_TTL:
1437 case IP_MULTICAST_LOOP:
1438 val = 0;
1439 if (optlen >= sizeof(uint32_t)) {
1440 if (get_user_u32(val, optval_addr))
1441 return -TARGET_EFAULT;
1442 } else if (optlen >= 1) {
1443 if (get_user_u8(val, optval_addr))
1444 return -TARGET_EFAULT;
1445 }
1446 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1447 break;
1448 case IP_ADD_MEMBERSHIP:
1449 case IP_DROP_MEMBERSHIP:
1450 if (optlen < sizeof (struct target_ip_mreq) ||
1451 optlen > sizeof (struct target_ip_mreqn))
1452 return -TARGET_EINVAL;
1453
1454 ip_mreq = (struct ip_mreqn *) alloca(optlen);
1455 target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
1456 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
1457 break;
1458
1459 case IP_BLOCK_SOURCE:
1460 case IP_UNBLOCK_SOURCE:
1461 case IP_ADD_SOURCE_MEMBERSHIP:
1462 case IP_DROP_SOURCE_MEMBERSHIP:
1463 if (optlen != sizeof (struct target_ip_mreq_source))
1464 return -TARGET_EINVAL;
1465
1466 ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
1467 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
1468 unlock_user (ip_mreq_source, optval_addr, 0);
1469 break;
1470
1471 default:
1472 goto unimplemented;
1473 }
1474 break;
1475 case SOL_RAW:
1476 switch (optname) {
1477 case ICMP_FILTER:
1478 /* struct icmp_filter takes an u32 value */
1479 if (optlen < sizeof(uint32_t)) {
1480 return -TARGET_EINVAL;
1481 }
1482
1483 if (get_user_u32(val, optval_addr)) {
1484 return -TARGET_EFAULT;
1485 }
1486 ret = get_errno(setsockopt(sockfd, level, optname,
1487 &val, sizeof(val)));
1488 break;
1489
1490 default:
1491 goto unimplemented;
1492 }
1493 break;
1494 case TARGET_SOL_SOCKET:
1495 switch (optname) {
1496 case TARGET_SO_RCVTIMEO:
1497 {
1498 struct timeval tv;
1499
1500 optname = SO_RCVTIMEO;
1501
1502 set_timeout:
1503 if (optlen != sizeof(struct target_timeval)) {
1504 return -TARGET_EINVAL;
1505 }
1506
1507 if (copy_from_user_timeval(&tv, optval_addr)) {
1508 return -TARGET_EFAULT;
1509 }
1510
1511 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
1512 &tv, sizeof(tv)));
1513 return ret;
1514 }
1515 case TARGET_SO_SNDTIMEO:
1516 optname = SO_SNDTIMEO;
1517 goto set_timeout;
1518 /* Options with 'int' argument. */
1519 case TARGET_SO_DEBUG:
1520 optname = SO_DEBUG;
1521 break;
1522 case TARGET_SO_REUSEADDR:
1523 optname = SO_REUSEADDR;
1524 break;
1525 case TARGET_SO_TYPE:
1526 optname = SO_TYPE;
1527 break;
1528 case TARGET_SO_ERROR:
1529 optname = SO_ERROR;
1530 break;
1531 case TARGET_SO_DONTROUTE:
1532 optname = SO_DONTROUTE;
1533 break;
1534 case TARGET_SO_BROADCAST:
1535 optname = SO_BROADCAST;
1536 break;
1537 case TARGET_SO_SNDBUF:
1538 optname = SO_SNDBUF;
1539 break;
1540 case TARGET_SO_RCVBUF:
1541 optname = SO_RCVBUF;
1542 break;
1543 case TARGET_SO_KEEPALIVE:
1544 optname = SO_KEEPALIVE;
1545 break;
1546 case TARGET_SO_OOBINLINE:
1547 optname = SO_OOBINLINE;
1548 break;
1549 case TARGET_SO_NO_CHECK:
1550 optname = SO_NO_CHECK;
1551 break;
1552 case TARGET_SO_PRIORITY:
1553 optname = SO_PRIORITY;
1554 break;
1555 #ifdef SO_BSDCOMPAT
1556 case TARGET_SO_BSDCOMPAT:
1557 optname = SO_BSDCOMPAT;
1558 break;
1559 #endif
1560 case TARGET_SO_PASSCRED:
1561 optname = SO_PASSCRED;
1562 break;
1563 case TARGET_SO_TIMESTAMP:
1564 optname = SO_TIMESTAMP;
1565 break;
1566 case TARGET_SO_RCVLOWAT:
1567 optname = SO_RCVLOWAT;
1568 break;
1569 break;
1570 default:
1571 goto unimplemented;
1572 }
1573 if (optlen < sizeof(uint32_t))
1574 return -TARGET_EINVAL;
1575
1576 if (get_user_u32(val, optval_addr))
1577 return -TARGET_EFAULT;
1578 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
1579 break;
1580 default:
1581 unimplemented:
1582 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level, optname);
1583 ret = -TARGET_ENOPROTOOPT;
1584 }
1585 return ret;
1586 }
1587
1588 /* do_getsockopt() Must return target values and target errnos. */
1589 static abi_long do_getsockopt(int sockfd, int level, int optname,
1590 abi_ulong optval_addr, abi_ulong optlen)
1591 {
1592 abi_long ret;
1593 int len, val;
1594 socklen_t lv;
1595
1596 switch(level) {
1597 case TARGET_SOL_SOCKET:
1598 level = SOL_SOCKET;
1599 switch (optname) {
1600 /* These don't just return a single integer */
1601 case TARGET_SO_LINGER:
1602 case TARGET_SO_RCVTIMEO:
1603 case TARGET_SO_SNDTIMEO:
1604 case TARGET_SO_PEERNAME:
1605 goto unimplemented;
1606 case TARGET_SO_PEERCRED: {
1607 struct ucred cr;
1608 socklen_t crlen;
1609 struct target_ucred *tcr;
1610
1611 if (get_user_u32(len, optlen)) {
1612 return -TARGET_EFAULT;
1613 }
1614 if (len < 0) {
1615 return -TARGET_EINVAL;
1616 }
1617
1618 crlen = sizeof(cr);
1619 ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
1620 &cr, &crlen));
1621 if (ret < 0) {
1622 return ret;
1623 }
1624 if (len > crlen) {
1625 len = crlen;
1626 }
1627 if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
1628 return -TARGET_EFAULT;
1629 }
1630 __put_user(cr.pid, &tcr->pid);
1631 __put_user(cr.uid, &tcr->uid);
1632 __put_user(cr.gid, &tcr->gid);
1633 unlock_user_struct(tcr, optval_addr, 1);
1634 if (put_user_u32(len, optlen)) {
1635 return -TARGET_EFAULT;
1636 }
1637 break;
1638 }
1639 /* Options with 'int' argument. */
1640 case TARGET_SO_DEBUG:
1641 optname = SO_DEBUG;
1642 goto int_case;
1643 case TARGET_SO_REUSEADDR:
1644 optname = SO_REUSEADDR;
1645 goto int_case;
1646 case TARGET_SO_TYPE:
1647 optname = SO_TYPE;
1648 goto int_case;
1649 case TARGET_SO_ERROR:
1650 optname = SO_ERROR;
1651 goto int_case;
1652 case TARGET_SO_DONTROUTE:
1653 optname = SO_DONTROUTE;
1654 goto int_case;
1655 case TARGET_SO_BROADCAST:
1656 optname = SO_BROADCAST;
1657 goto int_case;
1658 case TARGET_SO_SNDBUF:
1659 optname = SO_SNDBUF;
1660 goto int_case;
1661 case TARGET_SO_RCVBUF:
1662 optname = SO_RCVBUF;
1663 goto int_case;
1664 case TARGET_SO_KEEPALIVE:
1665 optname = SO_KEEPALIVE;
1666 goto int_case;
1667 case TARGET_SO_OOBINLINE:
1668 optname = SO_OOBINLINE;
1669 goto int_case;
1670 case TARGET_SO_NO_CHECK:
1671 optname = SO_NO_CHECK;
1672 goto int_case;
1673 case TARGET_SO_PRIORITY:
1674 optname = SO_PRIORITY;
1675 goto int_case;
1676 #ifdef SO_BSDCOMPAT
1677 case TARGET_SO_BSDCOMPAT:
1678 optname = SO_BSDCOMPAT;
1679 goto int_case;
1680 #endif
1681 case TARGET_SO_PASSCRED:
1682 optname = SO_PASSCRED;
1683 goto int_case;
1684 case TARGET_SO_TIMESTAMP:
1685 optname = SO_TIMESTAMP;
1686 goto int_case;
1687 case TARGET_SO_RCVLOWAT:
1688 optname = SO_RCVLOWAT;
1689 goto int_case;
1690 default:
1691 goto int_case;
1692 }
1693 break;
1694 case SOL_TCP:
1695 /* TCP options all take an 'int' value. */
1696 int_case:
1697 if (get_user_u32(len, optlen))
1698 return -TARGET_EFAULT;
1699 if (len < 0)
1700 return -TARGET_EINVAL;
1701 lv = sizeof(lv);
1702 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
1703 if (ret < 0)
1704 return ret;
1705 if (len > lv)
1706 len = lv;
1707 if (len == 4) {
1708 if (put_user_u32(val, optval_addr))
1709 return -TARGET_EFAULT;
1710 } else {
1711 if (put_user_u8(val, optval_addr))
1712 return -TARGET_EFAULT;
1713 }
1714 if (put_user_u32(len, optlen))
1715 return -TARGET_EFAULT;
1716 break;
1717 case SOL_IP:
1718 switch(optname) {
1719 case IP_TOS:
1720 case IP_TTL:
1721 case IP_HDRINCL:
1722 case IP_ROUTER_ALERT:
1723 case IP_RECVOPTS:
1724 case IP_RETOPTS:
1725 case IP_PKTINFO:
1726 case IP_MTU_DISCOVER:
1727 case IP_RECVERR:
1728 case IP_RECVTOS:
1729 #ifdef IP_FREEBIND
1730 case IP_FREEBIND:
1731 #endif
1732 case IP_MULTICAST_TTL:
1733 case IP_MULTICAST_LOOP:
1734 if (get_user_u32(len, optlen))
1735 return -TARGET_EFAULT;
1736 if (len < 0)
1737 return -TARGET_EINVAL;
1738 lv = sizeof(lv);
1739 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
1740 if (ret < 0)
1741 return ret;
1742 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
1743 len = 1;
1744 if (put_user_u32(len, optlen)
1745 || put_user_u8(val, optval_addr))
1746 return -TARGET_EFAULT;
1747 } else {
1748 if (len > sizeof(int))
1749 len = sizeof(int);
1750 if (put_user_u32(len, optlen)
1751 || put_user_u32(val, optval_addr))
1752 return -TARGET_EFAULT;
1753 }
1754 break;
1755 default:
1756 ret = -TARGET_ENOPROTOOPT;
1757 break;
1758 }
1759 break;
1760 default:
1761 unimplemented:
1762 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
1763 level, optname);
1764 ret = -TARGET_EOPNOTSUPP;
1765 break;
1766 }
1767 return ret;
1768 }
1769
1770 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
1771 int count, int copy)
1772 {
1773 struct target_iovec *target_vec;
1774 struct iovec *vec;
1775 abi_ulong total_len, max_len;
1776 int i;
1777
1778 if (count == 0) {
1779 errno = 0;
1780 return NULL;
1781 }
1782 if (count < 0 || count > IOV_MAX) {
1783 errno = EINVAL;
1784 return NULL;
1785 }
1786
1787 vec = calloc(count, sizeof(struct iovec));
1788 if (vec == NULL) {
1789 errno = ENOMEM;
1790 return NULL;
1791 }
1792
1793 target_vec = lock_user(VERIFY_READ, target_addr,
1794 count * sizeof(struct target_iovec), 1);
1795 if (target_vec == NULL) {
1796 errno = EFAULT;
1797 goto fail2;
1798 }
1799
1800 /* ??? If host page size > target page size, this will result in a
1801 value larger than what we can actually support. */
1802 max_len = 0x7fffffff & TARGET_PAGE_MASK;
1803 total_len = 0;
1804
1805 for (i = 0; i < count; i++) {
1806 abi_ulong base = tswapal(target_vec[i].iov_base);
1807 abi_long len = tswapal(target_vec[i].iov_len);
1808
1809 if (len < 0) {
1810 errno = EINVAL;
1811 goto fail;
1812 } else if (len == 0) {
1813 /* Zero length pointer is ignored. */
1814 vec[i].iov_base = 0;
1815 } else {
1816 vec[i].iov_base = lock_user(type, base, len, copy);
1817 if (!vec[i].iov_base) {
1818 errno = EFAULT;
1819 goto fail;
1820 }
1821 if (len > max_len - total_len) {
1822 len = max_len - total_len;
1823 }
1824 }
1825 vec[i].iov_len = len;
1826 total_len += len;
1827 }
1828
1829 unlock_user(target_vec, target_addr, 0);
1830 return vec;
1831
1832 fail:
1833 free(vec);
1834 fail2:
1835 unlock_user(target_vec, target_addr, 0);
1836 return NULL;
1837 }
1838
1839 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
1840 int count, int copy)
1841 {
1842 struct target_iovec *target_vec;
1843 int i;
1844
1845 target_vec = lock_user(VERIFY_READ, target_addr,
1846 count * sizeof(struct target_iovec), 1);
1847 if (target_vec) {
1848 for (i = 0; i < count; i++) {
1849 abi_ulong base = tswapal(target_vec[i].iov_base);
1850 abi_long len = tswapal(target_vec[i].iov_base);
1851 if (len < 0) {
1852 break;
1853 }
1854 unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
1855 }
1856 unlock_user(target_vec, target_addr, 0);
1857 }
1858
1859 free(vec);
1860 }
1861
1862 /* do_socket() Must return target values and target errnos. */
1863 static abi_long do_socket(int domain, int type, int protocol)
1864 {
1865 #if defined(TARGET_MIPS)
1866 switch(type) {
1867 case TARGET_SOCK_DGRAM:
1868 type = SOCK_DGRAM;
1869 break;
1870 case TARGET_SOCK_STREAM:
1871 type = SOCK_STREAM;
1872 break;
1873 case TARGET_SOCK_RAW:
1874 type = SOCK_RAW;
1875 break;
1876 case TARGET_SOCK_RDM:
1877 type = SOCK_RDM;
1878 break;
1879 case TARGET_SOCK_SEQPACKET:
1880 type = SOCK_SEQPACKET;
1881 break;
1882 case TARGET_SOCK_PACKET:
1883 type = SOCK_PACKET;
1884 break;
1885 }
1886 #endif
1887 if (domain == PF_NETLINK)
1888 return -EAFNOSUPPORT; /* do not NETLINK socket connections possible */
1889 return get_errno(socket(domain, type, protocol));
1890 }
1891
1892 /* do_bind() Must return target values and target errnos. */
1893 static abi_long do_bind(int sockfd, abi_ulong target_addr,
1894 socklen_t addrlen)
1895 {
1896 void *addr;
1897 abi_long ret;
1898
1899 if ((int)addrlen < 0) {
1900 return -TARGET_EINVAL;
1901 }
1902
1903 addr = alloca(addrlen+1);
1904
1905 ret = target_to_host_sockaddr(addr, target_addr, addrlen);
1906 if (ret)
1907 return ret;
1908
1909 return get_errno(bind(sockfd, addr, addrlen));
1910 }
1911
1912 /* do_connect() Must return target values and target errnos. */
1913 static abi_long do_connect(int sockfd, abi_ulong target_addr,
1914 socklen_t addrlen)
1915 {
1916 void *addr;
1917 abi_long ret;
1918
1919 if ((int)addrlen < 0) {
1920 return -TARGET_EINVAL;
1921 }
1922
1923 addr = alloca(addrlen);
1924
1925 ret = target_to_host_sockaddr(addr, target_addr, addrlen);
1926 if (ret)
1927 return ret;
1928
1929 return get_errno(connect(sockfd, addr, addrlen));
1930 }
1931
1932 /* do_sendrecvmsg() Must return target values and target errnos. */
1933 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
1934 int flags, int send)
1935 {
1936 abi_long ret, len;
1937 struct target_msghdr *msgp;
1938 struct msghdr msg;
1939 int count;
1940 struct iovec *vec;
1941 abi_ulong target_vec;
1942
1943 /* FIXME */
1944 if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
1945 msgp,
1946 target_msg,
1947 send ? 1 : 0))
1948 return -TARGET_EFAULT;
1949 if (msgp->msg_name) {
1950 msg.msg_namelen = tswap32(msgp->msg_namelen);
1951 msg.msg_name = alloca(msg.msg_namelen);
1952 ret = target_to_host_sockaddr(msg.msg_name, tswapal(msgp->msg_name),
1953 msg.msg_namelen);
1954 if (ret) {
1955 goto out2;
1956 }
1957 } else {
1958 msg.msg_name = NULL;
1959 msg.msg_namelen = 0;
1960 }
1961 msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
1962 msg.msg_control = alloca(msg.msg_controllen);
1963 msg.msg_flags = tswap32(msgp->msg_flags);
1964
1965 count = tswapal(msgp->msg_iovlen);
1966 target_vec = tswapal(msgp->msg_iov);
1967 vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
1968 target_vec, count, send);
1969 if (vec == NULL) {
1970 ret = -host_to_target_errno(errno);
1971 goto out2;
1972 }
1973 msg.msg_iovlen = count;
1974 msg.msg_iov = vec;
1975
1976 if (send) {
1977 ret = target_to_host_cmsg(&msg, msgp);
1978 if (ret == 0)
1979 ret = get_errno(sendmsg(fd, &msg, flags));
1980 } else {
1981 ret = get_errno(recvmsg(fd, &msg, flags));
1982 if (!is_error(ret)) {
1983 len = ret;
1984 ret = host_to_target_cmsg(msgp, &msg);
1985 if (!is_error(ret)) {
1986 msgp->msg_namelen = tswap32(msg.msg_namelen);
1987 if (msg.msg_name != NULL) {
1988 ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
1989 msg.msg_name, msg.msg_namelen);
1990 if (ret) {
1991 goto out;
1992 }
1993 }
1994
1995 ret = len;
1996 }
1997 }
1998 }
1999
2000 out:
2001 unlock_iovec(vec, target_vec, count, !send);
2002 out2:
2003 unlock_user_struct(msgp, target_msg, send ? 0 : 1);
2004 return ret;
2005 }
2006
2007 /* do_accept() Must return target values and target errnos. */
2008 static abi_long do_accept(int fd, abi_ulong target_addr,
2009 abi_ulong target_addrlen_addr)
2010 {
2011 socklen_t addrlen;
2012 void *addr;
2013 abi_long ret;
2014
2015 if (target_addr == 0)
2016 return get_errno(accept(fd, NULL, NULL));
2017
2018 /* linux returns EINVAL if addrlen pointer is invalid */
2019 if (get_user_u32(addrlen, target_addrlen_addr))
2020 return -TARGET_EINVAL;
2021
2022 if ((int)addrlen < 0) {
2023 return -TARGET_EINVAL;
2024 }
2025
2026 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
2027 return -TARGET_EINVAL;
2028
2029 addr = alloca(addrlen);
2030
2031 ret = get_errno(accept(fd, addr, &addrlen));
2032 if (!is_error(ret)) {
2033 host_to_target_sockaddr(target_addr, addr, addrlen);
2034 if (put_user_u32(addrlen, target_addrlen_addr))
2035 ret = -TARGET_EFAULT;
2036 }
2037 return ret;
2038 }
2039
2040 /* do_getpeername() Must return target values and target errnos. */
2041 static abi_long do_getpeername(int fd, abi_ulong target_addr,
2042 abi_ulong target_addrlen_addr)
2043 {
2044 socklen_t addrlen;
2045 void *addr;
2046 abi_long ret;
2047
2048 if (get_user_u32(addrlen, target_addrlen_addr))
2049 return -TARGET_EFAULT;
2050
2051 if ((int)addrlen < 0) {
2052 return -TARGET_EINVAL;
2053 }
2054
2055 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
2056 return -TARGET_EFAULT;
2057
2058 addr = alloca(addrlen);
2059
2060 ret = get_errno(getpeername(fd, addr, &addrlen));
2061 if (!is_error(ret)) {
2062 host_to_target_sockaddr(target_addr, addr, addrlen);
2063 if (put_user_u32(addrlen, target_addrlen_addr))
2064 ret = -TARGET_EFAULT;
2065 }
2066 return ret;
2067 }
2068
2069 /* do_getsockname() Must return target values and target errnos. */
2070 static abi_long do_getsockname(int fd, abi_ulong target_addr,
2071 abi_ulong target_addrlen_addr)
2072 {
2073 socklen_t addrlen;
2074 void *addr;
2075 abi_long ret;
2076
2077 if (get_user_u32(addrlen, target_addrlen_addr))
2078 return -TARGET_EFAULT;
2079
2080 if ((int)addrlen < 0) {
2081 return -TARGET_EINVAL;
2082 }
2083
2084 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
2085 return -TARGET_EFAULT;
2086
2087 addr = alloca(addrlen);
2088
2089 ret = get_errno(getsockname(fd, addr, &addrlen));
2090 if (!is_error(ret)) {
2091 host_to_target_sockaddr(target_addr, addr, addrlen);
2092 if (put_user_u32(addrlen, target_addrlen_addr))
2093 ret = -TARGET_EFAULT;
2094 }
2095 return ret;
2096 }
2097
2098 /* do_socketpair() Must return target values and target errnos. */
2099 static abi_long do_socketpair(int domain, int type, int protocol,
2100 abi_ulong target_tab_addr)
2101 {
2102 int tab[2];
2103 abi_long ret;
2104
2105 ret = get_errno(socketpair(domain, type, protocol, tab));
2106 if (!is_error(ret)) {
2107 if (put_user_s32(tab[0], target_tab_addr)
2108 || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
2109 ret = -TARGET_EFAULT;
2110 }
2111 return ret;
2112 }
2113
2114 /* do_sendto() Must return target values and target errnos. */
2115 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
2116 abi_ulong target_addr, socklen_t addrlen)
2117 {
2118 void *addr;
2119 void *host_msg;
2120 abi_long ret;
2121
2122 if ((int)addrlen < 0) {
2123 return -TARGET_EINVAL;
2124 }
2125
2126 host_msg = lock_user(VERIFY_READ, msg, len, 1);
2127 if (!host_msg)
2128 return -TARGET_EFAULT;
2129 if (target_addr) {
2130 addr = alloca(addrlen);
2131 ret = target_to_host_sockaddr(addr, target_addr, addrlen);
2132 if (ret) {
2133 unlock_user(host_msg, msg, 0);
2134 return ret;
2135 }
2136 ret = get_errno(sendto(fd, host_msg, len, flags, addr, addrlen));
2137 } else {
2138 ret = get_errno(send(fd, host_msg, len, flags));
2139 }
2140 unlock_user(host_msg, msg, 0);
2141 return ret;
2142 }
2143
2144 /* do_recvfrom() Must return target values and target errnos. */
2145 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
2146 abi_ulong target_addr,
2147 abi_ulong target_addrlen)
2148 {
2149 socklen_t addrlen;
2150 void *addr;
2151 void *host_msg;
2152 abi_long ret;
2153
2154 host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
2155 if (!host_msg)
2156 return -TARGET_EFAULT;
2157 if (target_addr) {
2158 if (get_user_u32(addrlen, target_addrlen)) {
2159 ret = -TARGET_EFAULT;
2160 goto fail;
2161 }
2162 if ((int)addrlen < 0) {
2163 ret = -TARGET_EINVAL;
2164 goto fail;
2165 }
2166 addr = alloca(addrlen);
2167 ret = get_errno(recvfrom(fd, host_msg, len, flags, addr, &addrlen));
2168 } else {
2169 addr = NULL; /* To keep compiler quiet. */
2170 ret = get_errno(qemu_recv(fd, host_msg, len, flags));
2171 }
2172 if (!is_error(ret)) {
2173 if (target_addr) {
2174 host_to_target_sockaddr(target_addr, addr, addrlen);
2175 if (put_user_u32(addrlen, target_addrlen)) {
2176 ret = -TARGET_EFAULT;
2177 goto fail;
2178 }
2179 }
2180 unlock_user(host_msg, msg, len);
2181 } else {
2182 fail:
2183 unlock_user(host_msg, msg, 0);
2184 }
2185 return ret;
2186 }
2187
2188 #ifdef TARGET_NR_socketcall
2189 /* do_socketcall() Must return target values and target errnos. */
2190 static abi_long do_socketcall(int num, abi_ulong vptr)
2191 {
2192 abi_long ret;
2193 const int n = sizeof(abi_ulong);
2194
2195 switch(num) {
2196 case SOCKOP_socket:
2197 {
2198 abi_ulong domain, type, protocol;
2199
2200 if (get_user_ual(domain, vptr)
2201 || get_user_ual(type, vptr + n)
2202 || get_user_ual(protocol, vptr + 2 * n))
2203 return -TARGET_EFAULT;
2204
2205 ret = do_socket(domain, type, protocol);
2206 }
2207 break;
2208 case SOCKOP_bind:
2209 {
2210 abi_ulong sockfd;
2211 abi_ulong target_addr;
2212 socklen_t addrlen;
2213
2214 if (get_user_ual(sockfd, vptr)
2215 || get_user_ual(target_addr, vptr + n)
2216 || get_user_ual(addrlen, vptr + 2 * n))
2217 return -TARGET_EFAULT;
2218
2219 ret = do_bind(sockfd, target_addr, addrlen);
2220 }
2221 break;
2222 case SOCKOP_connect:
2223 {
2224 abi_ulong sockfd;
2225 abi_ulong target_addr;
2226 socklen_t addrlen;
2227
2228 if (get_user_ual(sockfd, vptr)
2229 || get_user_ual(target_addr, vptr + n)
2230 || get_user_ual(addrlen, vptr + 2 * n))
2231 return -TARGET_EFAULT;
2232
2233 ret = do_connect(sockfd, target_addr, addrlen);
2234 }
2235 break;
2236 case SOCKOP_listen:
2237 {
2238 abi_ulong sockfd, backlog;
2239
2240 if (get_user_ual(sockfd, vptr)
2241 || get_user_ual(backlog, vptr + n))
2242 return -TARGET_EFAULT;
2243
2244 ret = get_errno(listen(sockfd, backlog));
2245 }
2246 break;
2247 case SOCKOP_accept:
2248 {
2249 abi_ulong sockfd;
2250 abi_ulong target_addr, target_addrlen;
2251
2252 if (get_user_ual(sockfd, vptr)
2253 || get_user_ual(target_addr, vptr + n)
2254 || get_user_ual(target_addrlen, vptr + 2 * n))
2255 return -TARGET_EFAULT;
2256
2257 ret = do_accept(sockfd, target_addr, target_addrlen);
2258 }
2259 break;
2260 case SOCKOP_getsockname:
2261 {
2262 abi_ulong sockfd;
2263 abi_ulong target_addr, target_addrlen;
2264
2265 if (get_user_ual(sockfd, vptr)
2266 || get_user_ual(target_addr, vptr + n)
2267 || get_user_ual(target_addrlen, vptr + 2 * n))
2268 return -TARGET_EFAULT;
2269
2270 ret = do_getsockname(sockfd, target_addr, target_addrlen);
2271 }
2272 break;
2273 case SOCKOP_getpeername:
2274 {
2275 abi_ulong sockfd;
2276 abi_ulong target_addr, target_addrlen;
2277
2278 if (get_user_ual(sockfd, vptr)
2279 || get_user_ual(target_addr, vptr + n)
2280 || get_user_ual(target_addrlen, vptr + 2 * n))
2281 return -TARGET_EFAULT;
2282
2283 ret = do_getpeername(sockfd, target_addr, target_addrlen);
2284 }
2285 break;
2286 case SOCKOP_socketpair:
2287 {
2288 abi_ulong domain, type, protocol;
2289 abi_ulong tab;
2290
2291 if (get_user_ual(domain, vptr)
2292 || get_user_ual(type, vptr + n)
2293 || get_user_ual(protocol, vptr + 2 * n)
2294 || get_user_ual(tab, vptr + 3 * n))
2295 return -TARGET_EFAULT;
2296
2297 ret = do_socketpair(domain, type, protocol, tab);
2298 }
2299 break;
2300 case SOCKOP_send:
2301 {
2302 abi_ulong sockfd;
2303 abi_ulong msg;
2304 size_t len;
2305 abi_ulong flags;
2306
2307 if (get_user_ual(sockfd, vptr)
2308 || get_user_ual(msg, vptr + n)
2309 || get_user_ual(len, vptr + 2 * n)
2310 || get_user_ual(flags, vptr + 3 * n))
2311 return -TARGET_EFAULT;
2312
2313 ret = do_sendto(sockfd, msg, len, flags, 0, 0);
2314 }
2315 break;
2316 case SOCKOP_recv:
2317 {
2318 abi_ulong sockfd;
2319 abi_ulong msg;
2320 size_t len;
2321 abi_ulong flags;
2322
2323 if (get_user_ual(sockfd, vptr)
2324 || get_user_ual(msg, vptr + n)
2325 || get_user_ual(len, vptr + 2 * n)
2326 || get_user_ual(flags, vptr + 3 * n))
2327 return -TARGET_EFAULT;
2328
2329 ret = do_recvfrom(sockfd, msg, len, flags, 0, 0);
2330 }
2331 break;
2332 case SOCKOP_sendto:
2333 {
2334 abi_ulong sockfd;
2335 abi_ulong msg;
2336 size_t len;
2337 abi_ulong flags;
2338 abi_ulong addr;
2339 socklen_t addrlen;
2340
2341 if (get_user_ual(sockfd, vptr)
2342 || get_user_ual(msg, vptr + n)
2343 || get_user_ual(len, vptr + 2 * n)
2344 || get_user_ual(flags, vptr + 3 * n)
2345 || get_user_ual(addr, vptr + 4 * n)
2346 || get_user_ual(addrlen, vptr + 5 * n))
2347 return -TARGET_EFAULT;
2348
2349 ret = do_sendto(sockfd, msg, len, flags, addr, addrlen);
2350 }
2351 break;
2352 case SOCKOP_recvfrom:
2353 {
2354 abi_ulong sockfd;
2355 abi_ulong msg;
2356 size_t len;
2357 abi_ulong flags;
2358 abi_ulong addr;
2359 socklen_t addrlen;
2360
2361 if (get_user_ual(sockfd, vptr)
2362 || get_user_ual(msg, vptr + n)
2363 || get_user_ual(len, vptr + 2 * n)
2364 || get_user_ual(flags, vptr + 3 * n)
2365 || get_user_ual(addr, vptr + 4 * n)
2366 || get_user_ual(addrlen, vptr + 5 * n))
2367 return -TARGET_EFAULT;
2368
2369 ret = do_recvfrom(sockfd, msg, len, flags, addr, addrlen);
2370 }
2371 break;
2372 case SOCKOP_shutdown:
2373 {
2374 abi_ulong sockfd, how;
2375
2376 if (get_user_ual(sockfd, vptr)
2377 || get_user_ual(how, vptr + n))
2378 return -TARGET_EFAULT;
2379
2380 ret = get_errno(shutdown(sockfd, how));
2381 }
2382 break;
2383 case SOCKOP_sendmsg:
2384 case SOCKOP_recvmsg:
2385 {
2386 abi_ulong fd;
2387 abi_ulong target_msg;
2388 abi_ulong flags;
2389
2390 if (get_user_ual(fd, vptr)
2391 || get_user_ual(target_msg, vptr + n)
2392 || get_user_ual(flags, vptr + 2 * n))
2393 return -TARGET_EFAULT;
2394
2395 ret = do_sendrecvmsg(fd, target_msg, flags,
2396 (num == SOCKOP_sendmsg));
2397 }
2398 break;
2399 case SOCKOP_setsockopt:
2400 {
2401 abi_ulong sockfd;
2402 abi_ulong level;
2403 abi_ulong optname;
2404 abi_ulong optval;
2405 socklen_t optlen;
2406
2407 if (get_user_ual(sockfd, vptr)
2408 || get_user_ual(level, vptr + n)
2409 || get_user_ual(optname, vptr + 2 * n)
2410 || get_user_ual(optval, vptr + 3 * n)
2411 || get_user_ual(optlen, vptr + 4 * n))
2412 return -TARGET_EFAULT;
2413
2414 ret = do_setsockopt(sockfd, level, optname, optval, optlen);
2415 }
2416 break;
2417 case SOCKOP_getsockopt:
2418 {
2419 abi_ulong sockfd;
2420 abi_ulong level;
2421 abi_ulong optname;
2422 abi_ulong optval;
2423 socklen_t optlen;
2424
2425 if (get_user_ual(sockfd, vptr)
2426 || get_user_ual(level, vptr + n)
2427 || get_user_ual(optname, vptr + 2 * n)
2428 || get_user_ual(optval, vptr + 3 * n)
2429 || get_user_ual(optlen, vptr + 4 * n))
2430 return -TARGET_EFAULT;
2431
2432 ret = do_getsockopt(sockfd, level, optname, optval, optlen);
2433 }
2434 break;
2435 default:
2436 gemu_log("Unsupported socketcall: %d\n", num);
2437 ret = -TARGET_ENOSYS;
2438 break;
2439 }
2440 return ret;
2441 }
2442 #endif
2443
2444 #define N_SHM_REGIONS 32
2445
2446 static struct shm_region {
2447 abi_ulong start;
2448 abi_ulong size;
2449 } shm_regions[N_SHM_REGIONS];
2450
2451 struct target_ipc_perm
2452 {
2453 abi_long __key;
2454 abi_ulong uid;
2455 abi_ulong gid;
2456 abi_ulong cuid;
2457 abi_ulong cgid;
2458 unsigned short int mode;
2459 unsigned short int __pad1;
2460 unsigned short int __seq;
2461 unsigned short int __pad2;
2462 abi_ulong __unused1;
2463 abi_ulong __unused2;
2464 };
2465
2466 struct target_semid_ds
2467 {
2468 struct target_ipc_perm sem_perm;
2469 abi_ulong sem_otime;
2470 abi_ulong __unused1;
2471 abi_ulong sem_ctime;
2472 abi_ulong __unused2;
2473 abi_ulong sem_nsems;
2474 abi_ulong __unused3;
2475 abi_ulong __unused4;
2476 };
2477
2478 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
2479 abi_ulong target_addr)
2480 {
2481 struct target_ipc_perm *target_ip;
2482 struct target_semid_ds *target_sd;
2483
2484 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2485 return -TARGET_EFAULT;
2486 target_ip = &(target_sd->sem_perm);
2487 host_ip->__key = tswapal(target_ip->__key);
2488 host_ip->uid = tswapal(target_ip->uid);
2489 host_ip->gid = tswapal(target_ip->gid);
2490 host_ip->cuid = tswapal(target_ip->cuid);
2491 host_ip->cgid = tswapal(target_ip->cgid);
2492 host_ip->mode = tswap16(target_ip->mode);
2493 unlock_user_struct(target_sd, target_addr, 0);
2494 return 0;
2495 }
2496
2497 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
2498 struct ipc_perm *host_ip)
2499 {
2500 struct target_ipc_perm *target_ip;
2501 struct target_semid_ds *target_sd;
2502
2503 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2504 return -TARGET_EFAULT;
2505 target_ip = &(target_sd->sem_perm);
2506 target_ip->__key = tswapal(host_ip->__key);
2507 target_ip->uid = tswapal(host_ip->uid);
2508 target_ip->gid = tswapal(host_ip->gid);
2509 target_ip->cuid = tswapal(host_ip->cuid);
2510 target_ip->cgid = tswapal(host_ip->cgid);
2511 target_ip->mode = tswap16(host_ip->mode);
2512 unlock_user_struct(target_sd, target_addr, 1);
2513 return 0;
2514 }
2515
2516 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
2517 abi_ulong target_addr)
2518 {
2519 struct target_semid_ds *target_sd;
2520
2521 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2522 return -TARGET_EFAULT;
2523 if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
2524 return -TARGET_EFAULT;
2525 host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
2526 host_sd->sem_otime = tswapal(target_sd->sem_otime);
2527 host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
2528 unlock_user_struct(target_sd, target_addr, 0);
2529 return 0;
2530 }
2531
2532 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
2533 struct semid_ds *host_sd)
2534 {
2535 struct target_semid_ds *target_sd;
2536
2537 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2538 return -TARGET_EFAULT;
2539 if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
2540 return -TARGET_EFAULT;
2541 target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
2542 target_sd->sem_otime = tswapal(host_sd->sem_otime);
2543 target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
2544 unlock_user_struct(target_sd, target_addr, 1);
2545 return 0;
2546 }
2547
2548 struct target_seminfo {
2549 int semmap;
2550 int semmni;
2551 int semmns;
2552 int semmnu;
2553 int semmsl;
2554 int semopm;
2555 int semume;
2556 int semusz;
2557 int semvmx;
2558 int semaem;
2559 };
2560
2561 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
2562 struct seminfo *host_seminfo)
2563 {
2564 struct target_seminfo *target_seminfo;
2565 if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
2566 return -TARGET_EFAULT;
2567 __put_user(host_seminfo->semmap, &target_seminfo->semmap);
2568 __put_user(host_seminfo->semmni, &target_seminfo->semmni);
2569 __put_user(host_seminfo->semmns, &target_seminfo->semmns);
2570 __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
2571 __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
2572 __put_user(host_seminfo->semopm, &target_seminfo->semopm);
2573 __put_user(host_seminfo->semume, &target_seminfo->semume);
2574 __put_user(host_seminfo->semusz, &target_seminfo->semusz);
2575 __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
2576 __put_user(host_seminfo->semaem, &target_seminfo->semaem);
2577 unlock_user_struct(target_seminfo, target_addr, 1);
2578 return 0;
2579 }
2580
2581 union semun {
2582 int val;
2583 struct semid_ds *buf;
2584 unsigned short *array;
2585 struct seminfo *__buf;
2586 };
2587
2588 union target_semun {
2589 int val;
2590 abi_ulong buf;
2591 abi_ulong array;
2592 abi_ulong __buf;
2593 };
2594
2595 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
2596 abi_ulong target_addr)
2597 {
2598 int nsems;
2599 unsigned short *array;
2600 union semun semun;
2601 struct semid_ds semid_ds;
2602 int i, ret;
2603
2604 semun.buf = &semid_ds;
2605
2606 ret = semctl(semid, 0, IPC_STAT, semun);
2607 if (ret == -1)
2608 return get_errno(ret);
2609
2610 nsems = semid_ds.sem_nsems;
2611
2612 *host_array = malloc(nsems*sizeof(unsigned short));
2613 array = lock_user(VERIFY_READ, target_addr,
2614 nsems*sizeof(unsigned short), 1);
2615 if (!array)
2616 return -TARGET_EFAULT;
2617
2618 for(i=0; i<nsems; i++) {
2619 __get_user((*host_array)[i], &array[i]);
2620 }
2621 unlock_user(array, target_addr, 0);
2622
2623 return 0;
2624 }
2625
2626 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
2627 unsigned short **host_array)
2628 {
2629 int nsems;
2630 unsigned short *array;
2631 union semun semun;
2632 struct semid_ds semid_ds;
2633 int i, ret;
2634
2635 semun.buf = &semid_ds;
2636
2637 ret = semctl(semid, 0, IPC_STAT, semun);
2638 if (ret == -1)
2639 return get_errno(ret);
2640
2641 nsems = semid_ds.sem_nsems;
2642
2643 array = lock_user(VERIFY_WRITE, target_addr,
2644 nsems*sizeof(unsigned short), 0);
2645 if (!array)
2646 return -TARGET_EFAULT;
2647
2648 for(i=0; i<nsems; i++) {
2649 __put_user((*host_array)[i], &array[i]);
2650 }
2651 free(*host_array);
2652 unlock_user(array, target_addr, 1);
2653
2654 return 0;
2655 }
2656
2657 static inline abi_long do_semctl(int semid, int semnum, int cmd,
2658 union target_semun target_su)
2659 {
2660 union semun arg;
2661 struct semid_ds dsarg;
2662 unsigned short *array = NULL;
2663 struct seminfo seminfo;
2664 abi_long ret = -TARGET_EINVAL;
2665 abi_long err;
2666 cmd &= 0xff;
2667
2668 switch( cmd ) {
2669 case GETVAL:
2670 case SETVAL:
2671 arg.val = tswap32(target_su.val);
2672 ret = get_errno(semctl(semid, semnum, cmd, arg));
2673 target_su.val = tswap32(arg.val);
2674 break;
2675 case GETALL:
2676 case SETALL:
2677 err = target_to_host_semarray(semid, &array, target_su.array);
2678 if (err)
2679 return err;
2680 arg.array = array;
2681 ret = get_errno(semctl(semid, semnum, cmd, arg));
2682 err = host_to_target_semarray(semid, target_su.array, &array);
2683 if (err)
2684 return err;
2685 break;
2686 case IPC_STAT:
2687 case IPC_SET:
2688 case SEM_STAT:
2689 err = target_to_host_semid_ds(&dsarg, target_su.buf);
2690 if (err)
2691 return err;
2692 arg.buf = &dsarg;
2693 ret = get_errno(semctl(semid, semnum, cmd, arg));
2694 err = host_to_target_semid_ds(target_su.buf, &dsarg);
2695 if (err)
2696 return err;
2697 break;
2698 case IPC_INFO:
2699 case SEM_INFO:
2700 arg.__buf = &seminfo;
2701 ret = get_errno(semctl(semid, semnum, cmd, arg));
2702 err = host_to_target_seminfo(target_su.__buf, &seminfo);
2703 if (err)
2704 return err;
2705 break;
2706 case IPC_RMID:
2707 case GETPID:
2708 case GETNCNT:
2709 case GETZCNT:
2710 ret = get_errno(semctl(semid, semnum, cmd, NULL));
2711 break;
2712 }
2713
2714 return ret;
2715 }
2716
2717 struct target_sembuf {
2718 unsigned short sem_num;
2719 short sem_op;
2720 short sem_flg;
2721 };
2722
2723 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
2724 abi_ulong target_addr,
2725 unsigned nsops)
2726 {
2727 struct target_sembuf *target_sembuf;
2728 int i;
2729
2730 target_sembuf = lock_user(VERIFY_READ, target_addr,
2731 nsops*sizeof(struct target_sembuf), 1);
2732 if (!target_sembuf)
2733 return -TARGET_EFAULT;
2734
2735 for(i=0; i<nsops; i++) {
2736 __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
2737 __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
2738 __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
2739 }
2740
2741 unlock_user(target_sembuf, target_addr, 0);
2742
2743 return 0;
2744 }
2745
2746 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops)
2747 {
2748 struct sembuf sops[nsops];
2749
2750 if (target_to_host_sembuf(sops, ptr, nsops))
2751 return -TARGET_EFAULT;
2752
2753 return semop(semid, sops, nsops);
2754 }
2755
2756 struct target_msqid_ds
2757 {
2758 struct target_ipc_perm msg_perm;
2759 abi_ulong msg_stime;
2760 #if TARGET_ABI_BITS == 32
2761 abi_ulong __unused1;
2762 #endif
2763 abi_ulong msg_rtime;
2764 #if TARGET_ABI_BITS == 32
2765 abi_ulong __unused2;
2766 #endif
2767 abi_ulong msg_ctime;
2768 #if TARGET_ABI_BITS == 32
2769 abi_ulong __unused3;
2770 #endif
2771 abi_ulong __msg_cbytes;
2772 abi_ulong msg_qnum;
2773 abi_ulong msg_qbytes;
2774 abi_ulong msg_lspid;
2775 abi_ulong msg_lrpid;
2776 abi_ulong __unused4;
2777 abi_ulong __unused5;
2778 };
2779
2780 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
2781 abi_ulong target_addr)
2782 {
2783 struct target_msqid_ds *target_md;
2784
2785 if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
2786 return -TARGET_EFAULT;
2787 if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
2788 return -TARGET_EFAULT;
2789 host_md->msg_stime = tswapal(target_md->msg_stime);
2790 host_md->msg_rtime = tswapal(target_md->msg_rtime);
2791 host_md->msg_ctime = tswapal(target_md->msg_ctime);
2792 host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
2793 host_md->msg_qnum = tswapal(target_md->msg_qnum);
2794 host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
2795 host_md->msg_lspid = tswapal(target_md->msg_lspid);
2796 host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
2797 unlock_user_struct(target_md, target_addr, 0);
2798 return 0;
2799 }
2800
2801 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
2802 struct msqid_ds *host_md)
2803 {
2804 struct target_msqid_ds *target_md;
2805
2806 if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
2807 return -TARGET_EFAULT;
2808 if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
2809 return -TARGET_EFAULT;
2810 target_md->msg_stime = tswapal(host_md->msg_stime);
2811 target_md->msg_rtime = tswapal(host_md->msg_rtime);
2812 target_md->msg_ctime = tswapal(host_md->msg_ctime);
2813 target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
2814 target_md->msg_qnum = tswapal(host_md->msg_qnum);
2815 target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
2816 target_md->msg_lspid = tswapal(host_md->msg_lspid);
2817 target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
2818 unlock_user_struct(target_md, target_addr, 1);
2819 return 0;
2820 }
2821
2822 struct target_msginfo {
2823 int msgpool;
2824 int msgmap;
2825 int msgmax;
2826 int msgmnb;
2827 int msgmni;
2828 int msgssz;
2829 int msgtql;
2830 unsigned short int msgseg;
2831 };
2832
2833 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
2834 struct msginfo *host_msginfo)
2835 {
2836 struct target_msginfo *target_msginfo;
2837 if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
2838 return -TARGET_EFAULT;
2839 __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
2840 __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
2841 __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
2842 __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
2843 __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
2844 __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
2845 __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
2846 __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
2847 unlock_user_struct(target_msginfo, target_addr, 1);
2848 return 0;
2849 }
2850
2851 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
2852 {
2853 struct msqid_ds dsarg;
2854 struct msginfo msginfo;
2855 abi_long ret = -TARGET_EINVAL;
2856
2857 cmd &= 0xff;
2858
2859 switch (cmd) {
2860 case IPC_STAT:
2861 case IPC_SET:
2862 case MSG_STAT:
2863 if (target_to_host_msqid_ds(&dsarg,ptr))
2864 return -TARGET_EFAULT;
2865 ret = get_errno(msgctl(msgid, cmd, &dsarg));
2866 if (host_to_target_msqid_ds(ptr,&dsarg))
2867 return -TARGET_EFAULT;
2868 break;
2869 case IPC_RMID:
2870 ret = get_errno(msgctl(msgid, cmd, NULL));
2871 break;
2872 case IPC_INFO:
2873 case MSG_INFO:
2874 ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
2875 if (host_to_target_msginfo(ptr, &msginfo))
2876 return -TARGET_EFAULT;
2877 break;
2878 }
2879
2880 return ret;
2881 }
2882
2883 struct target_msgbuf {
2884 abi_long mtype;
2885 char mtext[1];
2886 };
2887
2888 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
2889 unsigned int msgsz, int msgflg)
2890 {
2891 struct target_msgbuf *target_mb;
2892 struct msgbuf *host_mb;
2893 abi_long ret = 0;
2894
2895 if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
2896 return -TARGET_EFAULT;
2897 host_mb = malloc(msgsz+sizeof(long));
2898 host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
2899 memcpy(host_mb->mtext, target_mb->mtext, msgsz);
2900 ret = get_errno(msgsnd(msqid, host_mb, msgsz, msgflg));
2901 free(host_mb);
2902 unlock_user_struct(target_mb, msgp, 0);
2903
2904 return ret;
2905 }
2906
2907 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
2908 unsigned int msgsz, abi_long msgtyp,
2909 int msgflg)
2910 {
2911 struct target_msgbuf *target_mb;
2912 char *target_mtext;
2913 struct msgbuf *host_mb;
2914 abi_long ret = 0;
2915
2916 if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
2917 return -TARGET_EFAULT;
2918
2919 host_mb = g_malloc(msgsz+sizeof(long));
2920 ret = get_errno(msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
2921
2922 if (ret > 0) {
2923 abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
2924 target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
2925 if (!target_mtext) {
2926 ret = -TARGET_EFAULT;
2927 goto end;
2928 }
2929 memcpy(target_mb->mtext, host_mb->mtext, ret);
2930 unlock_user(target_mtext, target_mtext_addr, ret);
2931 }
2932
2933 target_mb->mtype = tswapal(host_mb->mtype);
2934
2935 end:
2936 if (target_mb)
2937 unlock_user_struct(target_mb, msgp, 1);
2938 g_free(host_mb);
2939 return ret;
2940 }
2941
2942 struct target_shmid_ds
2943 {
2944 struct target_ipc_perm shm_perm;
2945 abi_ulong shm_segsz;
2946 abi_ulong shm_atime;
2947 #if TARGET_ABI_BITS == 32
2948 abi_ulong __unused1;
2949 #endif
2950 abi_ulong shm_dtime;
2951 #if TARGET_ABI_BITS == 32
2952 abi_ulong __unused2;
2953 #endif
2954 abi_ulong shm_ctime;
2955 #if TARGET_ABI_BITS == 32
2956 abi_ulong __unused3;
2957 #endif
2958 int shm_cpid;
2959 int shm_lpid;
2960 abi_ulong shm_nattch;
2961 unsigned long int __unused4;
2962 unsigned long int __unused5;
2963 };
2964
2965 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
2966 abi_ulong target_addr)
2967 {
2968 struct target_shmid_ds *target_sd;
2969
2970 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2971 return -TARGET_EFAULT;
2972 if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
2973 return -TARGET_EFAULT;
2974 __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
2975 __get_user(host_sd->shm_atime, &target_sd->shm_atime);
2976 __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
2977 __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
2978 __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
2979 __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
2980 __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
2981 unlock_user_struct(target_sd, target_addr, 0);
2982 return 0;
2983 }
2984
2985 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
2986 struct shmid_ds *host_sd)
2987 {
2988 struct target_shmid_ds *target_sd;
2989
2990 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2991 return -TARGET_EFAULT;
2992 if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
2993 return -TARGET_EFAULT;
2994 __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
2995 __put_user(host_sd->shm_atime, &target_sd->shm_atime);
2996 __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
2997 __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
2998 __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
2999 __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
3000 __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
3001 unlock_user_struct(target_sd, target_addr, 1);
3002 return 0;
3003 }
3004
3005 struct target_shminfo {
3006 abi_ulong shmmax;
3007 abi_ulong shmmin;
3008 abi_ulong shmmni;
3009 abi_ulong shmseg;
3010 abi_ulong shmall;
3011 };
3012
3013 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
3014 struct shminfo *host_shminfo)
3015 {
3016 struct target_shminfo *target_shminfo;
3017 if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
3018 return -TARGET_EFAULT;
3019 __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
3020 __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
3021 __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
3022 __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
3023 __put_user(host_shminfo->shmall, &target_shminfo->shmall);
3024 unlock_user_struct(target_shminfo, target_addr, 1);
3025 return 0;
3026 }
3027
3028 struct target_shm_info {
3029 int used_ids;
3030 abi_ulong shm_tot;
3031 abi_ulong shm_rss;
3032 abi_ulong shm_swp;
3033 abi_ulong swap_attempts;
3034 abi_ulong swap_successes;
3035 };
3036
3037 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
3038 struct shm_info *host_shm_info)
3039 {
3040 struct target_shm_info *target_shm_info;
3041 if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
3042 return -TARGET_EFAULT;
3043 __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
3044 __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
3045 __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
3046 __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
3047 __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
3048 __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
3049 unlock_user_struct(target_shm_info, target_addr, 1);
3050 return 0;
3051 }
3052
3053 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
3054 {
3055 struct shmid_ds dsarg;
3056 struct shminfo shminfo;
3057 struct shm_info shm_info;
3058 abi_long ret = -TARGET_EINVAL;
3059
3060 cmd &= 0xff;
3061
3062 switch(cmd) {
3063 case IPC_STAT:
3064 case IPC_SET:
3065 case SHM_STAT:
3066 if (target_to_host_shmid_ds(&dsarg, buf))
3067 return -TARGET_EFAULT;
3068 ret = get_errno(shmctl(shmid, cmd, &dsarg));
3069 if (host_to_target_shmid_ds(buf, &dsarg))
3070 return -TARGET_EFAULT;
3071 break;
3072 case IPC_INFO:
3073 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
3074 if (host_to_target_shminfo(buf, &shminfo))
3075 return -TARGET_EFAULT;
3076 break;
3077 case SHM_INFO:
3078 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
3079 if (host_to_target_shm_info(buf, &shm_info))
3080 return -TARGET_EFAULT;
3081 break;
3082 case IPC_RMID:
3083 case SHM_LOCK:
3084 case SHM_UNLOCK:
3085 ret = get_errno(shmctl(shmid, cmd, NULL));
3086 break;
3087 }
3088
3089 return ret;
3090 }
3091
3092 static inline abi_ulong do_shmat(int shmid, abi_ulong shmaddr, int shmflg)
3093 {
3094 abi_long raddr;
3095 void *host_raddr;
3096 struct shmid_ds shm_info;
3097 int i,ret;
3098
3099 /* find out the length of the shared memory segment */
3100 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
3101 if (is_error(ret)) {
3102 /* can't get length, bail out */
3103 return ret;
3104 }
3105
3106 mmap_lock();
3107
3108 if (shmaddr)
3109 host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
3110 else {
3111 abi_ulong mmap_start;
3112
3113 mmap_start = mmap_find_vma(0, shm_info.shm_segsz);
3114
3115 if (mmap_start == -1) {
3116 errno = ENOMEM;
3117 host_raddr = (void *)-1;
3118 } else
3119 host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
3120 }
3121
3122 if (host_raddr == (void *)-1) {
3123 mmap_unlock();
3124 return get_errno((long)host_raddr);
3125 }
3126 raddr=h2g((unsigned long)host_raddr);
3127
3128 page_set_flags(raddr, raddr + shm_info.shm_segsz,
3129 PAGE_VALID | PAGE_READ |
3130 ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
3131
3132 for (i = 0; i < N_SHM_REGIONS; i++) {
3133 if (shm_regions[i].start == 0) {
3134 shm_regions[i].start = raddr;
3135 shm_regions[i].size = shm_info.shm_segsz;
3136 break;
3137 }
3138 }
3139
3140 mmap_unlock();
3141 return raddr;
3142
3143 }
3144
3145 static inline abi_long do_shmdt(abi_ulong shmaddr)
3146 {
3147 int i;
3148
3149 for (i = 0; i < N_SHM_REGIONS; ++i) {
3150 if (shm_regions[i].start == shmaddr) {
3151 shm_regions[i].start = 0;
3152 page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
3153 break;
3154 }
3155 }
3156
3157 return get_errno(shmdt(g2h(shmaddr)));
3158 }
3159
3160 #ifdef TARGET_NR_ipc
3161 /* ??? This only works with linear mappings. */
3162 /* do_ipc() must return target values and target errnos. */
3163 static abi_long do_ipc(unsigned int call, int first,
3164 int second, int third,
3165 abi_long ptr, abi_long fifth)
3166 {
3167 int version;
3168 abi_long ret = 0;
3169
3170 version = call >> 16;
3171 call &= 0xffff;
3172
3173 switch (call) {
3174 case IPCOP_semop:
3175 ret = do_semop(first, ptr, second);
3176 break;
3177
3178 case IPCOP_semget:
3179 ret = get_errno(semget(first, second, third));
3180 break;
3181
3182 case IPCOP_semctl:
3183 ret = do_semctl(first, second, third, (union target_semun)(abi_ulong) ptr);
3184 break;
3185
3186 case IPCOP_msgget:
3187 ret = get_errno(msgget(first, second));
3188 break;
3189
3190 case IPCOP_msgsnd:
3191 ret = do_msgsnd(first, ptr, second, third);
3192 break;
3193
3194 case IPCOP_msgctl:
3195 ret = do_msgctl(first, second, ptr);
3196 break;
3197
3198 case IPCOP_msgrcv:
3199 switch (version) {
3200 case 0:
3201 {
3202 struct target_ipc_kludge {
3203 abi_long msgp;
3204 abi_long msgtyp;
3205 } *tmp;
3206
3207 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
3208 ret = -TARGET_EFAULT;
3209 break;
3210 }
3211
3212 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
3213
3214 unlock_user_struct(tmp, ptr, 0);
3215 break;
3216 }
3217 default:
3218 ret = do_msgrcv(first, ptr, second, fifth, third);
3219 }
3220 break;
3221
3222 case IPCOP_shmat:
3223 switch (version) {
3224 default:
3225 {
3226 abi_ulong raddr;
3227 raddr = do_shmat(first, ptr, second);
3228 if (is_error(raddr))
3229 return get_errno(raddr);
3230 if (put_user_ual(raddr, third))
3231 return -TARGET_EFAULT;
3232 break;
3233 }
3234 case 1:
3235 ret = -TARGET_EINVAL;
3236 break;
3237 }
3238 break;
3239 case IPCOP_shmdt:
3240 ret = do_shmdt(ptr);
3241 break;
3242
3243 case IPCOP_shmget:
3244 /* IPC_* flag values are the same on all linux platforms */
3245 ret = get_errno(shmget(first, second, third));
3246 break;
3247
3248 /* IPC_* and SHM_* command values are the same on all linux platforms */
3249 case IPCOP_shmctl:
3250 ret = do_shmctl(first, second, third);
3251 break;
3252 default:
3253 gemu_log("Unsupported ipc call: %d (version %d)\n", call, version);
3254 ret = -TARGET_ENOSYS;
3255 break;
3256 }
3257 return ret;
3258 }
3259 #endif
3260
3261 /* kernel structure types definitions */
3262
3263 #define STRUCT(name, ...) STRUCT_ ## name,
3264 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
3265 enum {
3266 #include "syscall_types.h"
3267 };
3268 #undef STRUCT
3269 #undef STRUCT_SPECIAL
3270
3271 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
3272 #define STRUCT_SPECIAL(name)
3273 #include "syscall_types.h"
3274 #undef STRUCT
3275 #undef STRUCT_SPECIAL
3276
3277 typedef struct IOCTLEntry IOCTLEntry;
3278
3279 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp,
3280 int fd, abi_long cmd, abi_long arg);
3281
3282 struct IOCTLEntry {
3283 unsigned int target_cmd;
3284 unsigned int host_cmd;
3285 const char *name;
3286 int access;
3287 do_ioctl_fn *do_ioctl;
3288 const argtype arg_type[5];
3289 };
3290
3291 #define IOC_R 0x0001
3292 #define IOC_W 0x0002
3293 #define IOC_RW (IOC_R | IOC_W)
3294
3295 #define MAX_STRUCT_SIZE 4096
3296
3297 #ifdef CONFIG_FIEMAP
3298 /* So fiemap access checks don't overflow on 32 bit systems.
3299 * This is very slightly smaller than the limit imposed by
3300 * the underlying kernel.
3301 */
3302 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
3303 / sizeof(struct fiemap_extent))
3304
3305 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
3306 int fd, abi_long cmd, abi_long arg)
3307 {
3308 /* The parameter for this ioctl is a struct fiemap followed
3309 * by an array of struct fiemap_extent whose size is set
3310 * in fiemap->fm_extent_count. The array is filled in by the
3311 * ioctl.
3312 */
3313 int target_size_in, target_size_out;
3314 struct fiemap *fm;
3315 const argtype *arg_type = ie->arg_type;
3316 const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
3317 void *argptr, *p;
3318 abi_long ret;
3319 int i, extent_size = thunk_type_size(extent_arg_type, 0);
3320 uint32_t outbufsz;
3321 int free_fm = 0;
3322
3323 assert(arg_type[0] == TYPE_PTR);
3324 assert(ie->access == IOC_RW);
3325 arg_type++;
3326 target_size_in = thunk_type_size(arg_type, 0);
3327 argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
3328 if (!argptr) {
3329 return -TARGET_EFAULT;
3330 }
3331 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3332 unlock_user(argptr, arg, 0);
3333 fm = (struct fiemap *)buf_temp;
3334 if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
3335 return -TARGET_EINVAL;
3336 }
3337
3338 outbufsz = sizeof (*fm) +
3339 (sizeof(struct fiemap_extent) * fm->fm_extent_count);
3340
3341 if (outbufsz > MAX_STRUCT_SIZE) {
3342 /* We can't fit all the extents into the fixed size buffer.
3343 * Allocate one that is large enough and use it instead.
3344 */
3345 fm = malloc(outbufsz);
3346 if (!fm) {
3347 return -TARGET_ENOMEM;
3348 }
3349 memcpy(fm, buf_temp, sizeof(struct fiemap));
3350 free_fm = 1;
3351 }
3352 ret = get_errno(ioctl(fd, ie->host_cmd, fm));
3353 if (!is_error(ret)) {
3354 target_size_out = target_size_in;
3355 /* An extent_count of 0 means we were only counting the extents
3356 * so there are no structs to copy
3357 */
3358 if (fm->fm_extent_count != 0) {
3359 target_size_out += fm->fm_mapped_extents * extent_size;
3360 }
3361 argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
3362 if (!argptr) {
3363 ret = -TARGET_EFAULT;
3364 } else {
3365 /* Convert the struct fiemap */
3366 thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
3367 if (fm->fm_extent_count != 0) {
3368 p = argptr + target_size_in;
3369 /* ...and then all the struct fiemap_extents */
3370 for (i = 0; i < fm->fm_mapped_extents; i++) {
3371 thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
3372 THUNK_TARGET);
3373 p += extent_size;
3374 }
3375 }
3376 unlock_user(argptr, arg, target_size_out);
3377 }
3378 }
3379 if (free_fm) {
3380 free(fm);
3381 }
3382 return ret;
3383 }
3384 #endif
3385
3386 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
3387 int fd, abi_long cmd, abi_long arg)
3388 {
3389 const argtype *arg_type = ie->arg_type;
3390 int target_size;
3391 void *argptr;
3392 int ret;
3393 struct ifconf *host_ifconf;
3394 uint32_t outbufsz;
3395 const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
3396 int target_ifreq_size;
3397 int nb_ifreq;
3398 int free_buf = 0;
3399 int i;
3400 int target_ifc_len;
3401 abi_long target_ifc_buf;
3402 int host_ifc_len;
3403 char *host_ifc_buf;
3404
3405 assert(arg_type[0] == TYPE_PTR);
3406 assert(ie->access == IOC_RW);
3407
3408 arg_type++;
3409 target_size = thunk_type_size(arg_type, 0);
3410
3411 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3412 if (!argptr)
3413 return -TARGET_EFAULT;
3414 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3415 unlock_user(argptr, arg, 0);
3416
3417 host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
3418 target_ifc_len = host_ifconf->ifc_len;
3419 target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
3420
3421 target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
3422 nb_ifreq = target_ifc_len / target_ifreq_size;
3423 host_ifc_len = nb_ifreq * sizeof(struct ifreq);
3424
3425 outbufsz = sizeof(*host_ifconf) + host_ifc_len;
3426 if (outbufsz > MAX_STRUCT_SIZE) {
3427 /* We can't fit all the extents into the fixed size buffer.
3428 * Allocate one that is large enough and use it instead.
3429 */
3430 host_ifconf = malloc(outbufsz);
3431 if (!host_ifconf) {
3432 return -TARGET_ENOMEM;
3433 }
3434 memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
3435 free_buf = 1;
3436 }
3437 host_ifc_buf = (char*)host_ifconf + sizeof(*host_ifconf);
3438
3439 host_ifconf->ifc_len = host_ifc_len;
3440 host_ifconf->ifc_buf = host_ifc_buf;
3441
3442 ret = get_errno(ioctl(fd, ie->host_cmd, host_ifconf));
3443 if (!is_error(ret)) {
3444 /* convert host ifc_len to target ifc_len */
3445
3446 nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
3447 target_ifc_len = nb_ifreq * target_ifreq_size;
3448 host_ifconf->ifc_len = target_ifc_len;
3449
3450 /* restore target ifc_buf */
3451
3452 host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
3453
3454 /* copy struct ifconf to target user */
3455
3456 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3457 if (!argptr)
3458 return -TARGET_EFAULT;
3459 thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
3460 unlock_user(argptr, arg, target_size);
3461
3462 /* copy ifreq[] to target user */
3463
3464 argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
3465 for (i = 0; i < nb_ifreq ; i++) {
3466 thunk_convert(argptr + i * target_ifreq_size,
3467 host_ifc_buf + i * sizeof(struct ifreq),
3468 ifreq_arg_type, THUNK_TARGET);
3469 }
3470 unlock_user(argptr, target_ifc_buf, target_ifc_len);
3471 }
3472
3473 if (free_buf) {
3474 free(host_ifconf);
3475 }
3476
3477 return ret;
3478 }
3479
3480 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
3481 abi_long cmd, abi_long arg)
3482 {
3483 void *argptr;
3484 struct dm_ioctl *host_dm;
3485 abi_long guest_data;
3486 uint32_t guest_data_size;
3487 int target_size;
3488 const argtype *arg_type = ie->arg_type;
3489 abi_long ret;
3490 void *big_buf = NULL;
3491 char *host_data;
3492
3493 arg_type++;
3494 target_size = thunk_type_size(arg_type, 0);
3495 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3496 if (!argptr) {
3497 ret = -TARGET_EFAULT;
3498 goto out;
3499 }
3500 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3501 unlock_user(argptr, arg, 0);
3502
3503 /* buf_temp is too small, so fetch things into a bigger buffer */
3504 big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
3505 memcpy(big_buf, buf_temp, target_size);
3506 buf_temp = big_buf;
3507 host_dm = big_buf;
3508
3509 guest_data = arg + host_dm->data_start;
3510 if ((guest_data - arg) < 0) {
3511 ret = -EINVAL;
3512 goto out;
3513 }
3514 guest_data_size = host_dm->data_size - host_dm->data_start;
3515 host_data = (char*)host_dm + host_dm->data_start;
3516
3517 argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
3518 switch (ie->host_cmd) {
3519 case DM_REMOVE_ALL:
3520 case DM_LIST_DEVICES:
3521 case DM_DEV_CREATE:
3522 case DM_DEV_REMOVE:
3523 case DM_DEV_SUSPEND:
3524 case DM_DEV_STATUS:
3525 case DM_DEV_WAIT:
3526 case DM_TABLE_STATUS:
3527 case DM_TABLE_CLEAR:
3528 case DM_TABLE_DEPS:
3529 case DM_LIST_VERSIONS:
3530 /* no input data */
3531 break;
3532 case DM_DEV_RENAME:
3533 case DM_DEV_SET_GEOMETRY:
3534 /* data contains only strings */
3535 memcpy(host_data, argptr, guest_data_size);
3536 break;
3537 case DM_TARGET_MSG:
3538 memcpy(host_data, argptr, guest_data_size);
3539 *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
3540 break;
3541 case DM_TABLE_LOAD:
3542 {
3543 void *gspec = argptr;
3544 void *cur_data = host_data;
3545 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
3546 int spec_size = thunk_type_size(arg_type, 0);
3547 int i;
3548
3549 for (i = 0; i < host_dm->target_count; i++) {
3550 struct dm_target_spec *spec = cur_data;
3551 uint32_t next;
3552 int slen;
3553
3554 thunk_convert(spec, gspec, arg_type, THUNK_HOST);
3555 slen = strlen((char*)gspec + spec_size) + 1;
3556 next = spec->next;
3557 spec->next = sizeof(*spec) + slen;
3558 strcpy((char*)&spec[1], gspec + spec_size);
3559 gspec += next;
3560 cur_data += spec->next;
3561 }
3562 break;
3563 }
3564 default:
3565 ret = -TARGET_EINVAL;
3566 goto out;
3567 }
3568 unlock_user(argptr, guest_data, 0);
3569
3570 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3571 if (!is_error(ret)) {
3572 guest_data = arg + host_dm->data_start;
3573 guest_data_size = host_dm->data_size - host_dm->data_start;
3574 argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
3575 switch (ie->host_cmd) {
3576 case DM_REMOVE_ALL:
3577 case DM_DEV_CREATE:
3578 case DM_DEV_REMOVE:
3579 case DM_DEV_RENAME:
3580 case DM_DEV_SUSPEND:
3581 case DM_DEV_STATUS:
3582 case DM_TABLE_LOAD:
3583 case DM_TABLE_CLEAR:
3584 case DM_TARGET_MSG:
3585 case DM_DEV_SET_GEOMETRY:
3586 /* no return data */
3587 break;
3588 case DM_LIST_DEVICES:
3589 {
3590 struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
3591 uint32_t remaining_data = guest_data_size;
3592 void *cur_data = argptr;
3593 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
3594 int nl_size = 12; /* can't use thunk_size due to alignment */
3595
3596 while (1) {
3597 uint32_t next = nl->next;
3598 if (next) {
3599 nl->next = nl_size + (strlen(nl->name) + 1);
3600 }
3601 if (remaining_data < nl->next) {
3602 host_dm->flags |= DM_BUFFER_FULL_FLAG;
3603 break;
3604 }
3605 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
3606 strcpy(cur_data + nl_size, nl->name);
3607 cur_data += nl->next;
3608 remaining_data -= nl->next;
3609 if (!next) {
3610 break;
3611 }
3612 nl = (void*)nl + next;
3613 }
3614 break;
3615 }
3616 case DM_DEV_WAIT:
3617 case DM_TABLE_STATUS:
3618 {
3619 struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
3620 void *cur_data = argptr;
3621 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
3622 int spec_size = thunk_type_size(arg_type, 0);
3623 int i;
3624
3625 for (i = 0; i < host_dm->target_count; i++) {
3626 uint32_t next = spec->next;
3627 int slen = strlen((char*)&spec[1]) + 1;
3628 spec->next = (cur_data - argptr) + spec_size + slen;
3629 if (guest_data_size < spec->next) {
3630 host_dm->flags |= DM_BUFFER_FULL_FLAG;
3631 break;
3632 }
3633 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
3634 strcpy(cur_data + spec_size, (char*)&spec[1]);
3635 cur_data = argptr + spec->next;
3636 spec = (void*)host_dm + host_dm->data_start + next;
3637 }
3638 break;
3639 }
3640 case DM_TABLE_DEPS:
3641 {
3642 void *hdata = (void*)host_dm + host_dm->data_start;
3643 int count = *(uint32_t*)hdata;
3644 uint64_t *hdev = hdata + 8;
3645 uint64_t *gdev = argptr + 8;
3646 int i;
3647
3648 *(uint32_t*)argptr = tswap32(count);
3649 for (i = 0; i < count; i++) {
3650 *gdev = tswap64(*hdev);
3651 gdev++;
3652 hdev++;
3653 }
3654 break;
3655 }
3656 case DM_LIST_VERSIONS:
3657 {
3658 struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
3659 uint32_t remaining_data = guest_data_size;
3660 void *cur_data = argptr;
3661 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
3662 int vers_size = thunk_type_size(arg_type, 0);
3663
3664 while (1) {
3665 uint32_t next = vers->next;
3666 if (next) {
3667 vers->next = vers_size + (strlen(vers->name) + 1);
3668 }
3669 if (remaining_data < vers->next) {
3670 host_dm->flags |= DM_BUFFER_FULL_FLAG;
3671 break;
3672 }
3673 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
3674 strcpy(cur_data + vers_size, vers->name);
3675 cur_data += vers->next;
3676 remaining_data -= vers->next;
3677 if (!next) {
3678 break;
3679 }
3680 vers = (void*)vers + next;
3681 }
3682 break;
3683 }
3684 default:
3685 ret = -TARGET_EINVAL;
3686 goto out;
3687 }
3688 unlock_user(argptr, guest_data, guest_data_size);
3689
3690 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3691 if (!argptr) {
3692 ret = -TARGET_EFAULT;
3693 goto out;
3694 }
3695 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
3696 unlock_user(argptr, arg, target_size);
3697 }
3698 out:
3699 g_free(big_buf);
3700 return ret;
3701 }
3702
3703 static IOCTLEntry ioctl_entries[] = {
3704 #define IOCTL(cmd, access, ...) \
3705 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
3706 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
3707 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
3708 #include "ioctls.h"
3709 { 0, 0, },
3710 };
3711
3712 /* ??? Implement proper locking for ioctls. */
3713 /* do_ioctl() Must return target values and target errnos. */
3714 static abi_long do_ioctl(int fd, abi_long cmd, abi_long arg)
3715 {
3716 const IOCTLEntry *ie;
3717 const argtype *arg_type;
3718 abi_long ret;
3719 uint8_t buf_temp[MAX_STRUCT_SIZE];
3720 int target_size;
3721 void *argptr;
3722
3723 ie = ioctl_entries;
3724 for(;;) {
3725 if (ie->target_cmd == 0) {
3726 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
3727 return -TARGET_ENOSYS;
3728 }
3729 if (ie->target_cmd == cmd)
3730 break;
3731 ie++;
3732 }
3733 arg_type = ie->arg_type;
3734 #if defined(DEBUG)
3735 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd, ie->name);
3736 #endif
3737 if (ie->do_ioctl) {
3738 return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
3739 }
3740
3741 switch(arg_type[0]) {
3742 case TYPE_NULL:
3743 /* no argument */
3744 ret = get_errno(ioctl(fd, ie->host_cmd));
3745 break;
3746 case TYPE_PTRVOID:
3747 case TYPE_INT:
3748 /* int argment */
3749 ret = get_errno(ioctl(fd, ie->host_cmd, arg));
3750 break;
3751 case TYPE_PTR:
3752 arg_type++;
3753 target_size = thunk_type_size(arg_type, 0);
3754 switch(ie->access) {
3755 case IOC_R:
3756 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3757 if (!is_error(ret)) {
3758 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3759 if (!argptr)
3760 return -TARGET_EFAULT;
3761 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
3762 unlock_user(argptr, arg, target_size);
3763 }
3764 break;
3765 case IOC_W:
3766 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3767 if (!argptr)
3768 return -TARGET_EFAULT;
3769 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3770 unlock_user(argptr, arg, 0);
3771 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3772 break;
3773 default:
3774 case IOC_RW:
3775 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3776 if (!argptr)
3777 return -TARGET_EFAULT;
3778 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3779 unlock_user(argptr, arg, 0);
3780 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3781 if (!is_error(ret)) {
3782 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3783 if (!argptr)
3784 return -TARGET_EFAULT;
3785 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
3786 unlock_user(argptr, arg, target_size);
3787 }
3788 break;
3789 }
3790 break;
3791 default:
3792 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
3793 (long)cmd, arg_type[0]);
3794 ret = -TARGET_ENOSYS;
3795 break;
3796 }
3797 return ret;
3798 }
3799
3800 static const bitmask_transtbl iflag_tbl[] = {
3801 { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
3802 { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
3803 { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
3804 { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
3805 { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
3806 { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
3807 { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
3808 { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
3809 { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
3810 { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
3811 { TARGET_IXON, TARGET_IXON, IXON, IXON },
3812 { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
3813 { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
3814 { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
3815 { 0, 0, 0, 0 }
3816 };
3817
3818 static const bitmask_transtbl oflag_tbl[] = {
3819 { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
3820 { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
3821 { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
3822 { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
3823 { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
3824 { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
3825 { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
3826 { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
3827 { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
3828 { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
3829 { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
3830 { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
3831 { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
3832 { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
3833 { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
3834 { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
3835 { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
3836 { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
3837 { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
3838 { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
3839 { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
3840 { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
3841 { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
3842 { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
3843 { 0, 0, 0, 0 }
3844 };
3845
3846 static const bitmask_transtbl cflag_tbl[] = {
3847 { TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
3848 { TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
3849 { TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
3850 { TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
3851 { TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
3852 { TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
3853 { TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
3854 { TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
3855 { TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
3856 { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
3857 { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
3858 { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
3859 { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
3860 { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
3861 { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
3862 { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
3863 { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
3864 { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
3865 { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
3866 { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
3867 { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
3868 { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
3869 { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
3870 { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
3871 { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
3872 { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
3873 { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
3874 { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
3875 { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
3876 { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
3877 { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
3878 { 0, 0, 0, 0 }
3879 };
3880
3881 static const bitmask_transtbl lflag_tbl[] = {
3882 { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
3883 { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
3884 { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
3885 { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
3886 { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
3887 { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
3888 { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
3889 { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
3890 { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
3891 { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
3892 { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
3893 { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
3894 { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
3895 { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
3896 { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
3897 { 0, 0, 0, 0 }
3898 };
3899
3900 static void target_to_host_termios (void *dst, const void *src)
3901 {
3902 struct host_termios *host = dst;
3903 const struct target_termios *target = src;
3904
3905 host->c_iflag =
3906 target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
3907 host->c_oflag =
3908 target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
3909 host->c_cflag =
3910 target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
3911 host->c_lflag =
3912 target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
3913 host->c_line = target->c_line;
3914
3915 memset(host->c_cc, 0, sizeof(host->c_cc));
3916 host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
3917 host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
3918 host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
3919 host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
3920 host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
3921 host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
3922 host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
3923 host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
3924 host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
3925 host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
3926 host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
3927 host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
3928 host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
3929 host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
3930 host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
3931 host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
3932 host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
3933 }
3934
3935 static void host_to_target_termios (void *dst, const void *src)
3936 {
3937 struct target_termios *target = dst;
3938 const struct host_termios *host = src;
3939
3940 target->c_iflag =
3941 tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
3942 target->c_oflag =
3943 tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
3944 target->c_cflag =
3945 tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
3946 target->c_lflag =
3947 tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
3948 target->c_line = host->c_line;
3949
3950 memset(target->c_cc, 0, sizeof(target->c_cc));
3951 target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
3952 target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
3953 target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
3954 target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
3955 target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
3956 target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
3957 target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
3958 target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
3959 target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
3960 target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
3961 target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
3962 target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
3963 target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
3964 target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
3965 target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
3966 target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
3967 target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
3968 }
3969
3970 static const StructEntry struct_termios_def = {
3971 .convert = { host_to_target_termios, target_to_host_termios },
3972 .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
3973 .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
3974 };
3975
3976 static bitmask_transtbl mmap_flags_tbl[] = {
3977 { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
3978 { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
3979 { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
3980 { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS, MAP_ANONYMOUS, MAP_ANONYMOUS },
3981 { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN, MAP_GROWSDOWN, MAP_GROWSDOWN },
3982 { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE, MAP_DENYWRITE, MAP_DENYWRITE },
3983 { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE, MAP_EXECUTABLE, MAP_EXECUTABLE },
3984 { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
3985 { 0, 0, 0, 0 }
3986 };
3987
3988 #if defined(TARGET_I386)
3989
3990 /* NOTE: there is really one LDT for all the threads */
3991 static uint8_t *ldt_table;
3992
3993 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
3994 {
3995 int size;
3996 void *p;
3997
3998 if (!ldt_table)
3999 return 0;
4000 size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
4001 if (size > bytecount)
4002 size = bytecount;
4003 p = lock_user(VERIFY_WRITE, ptr, size, 0);
4004 if (!p)
4005 return -TARGET_EFAULT;
4006 /* ??? Should this by byteswapped? */
4007 memcpy(p, ldt_table, size);
4008 unlock_user(p, ptr, size);
4009 return size;
4010 }
4011
4012 /* XXX: add locking support */
4013 static abi_long write_ldt(CPUX86State *env,
4014 abi_ulong ptr, unsigned long bytecount, int oldmode)
4015 {
4016 struct target_modify_ldt_ldt_s ldt_info;
4017 struct target_modify_ldt_ldt_s *target_ldt_info;
4018 int seg_32bit, contents, read_exec_only, limit_in_pages;
4019 int seg_not_present, useable, lm;
4020 uint32_t *lp, entry_1, entry_2;
4021
4022 if (bytecount != sizeof(ldt_info))
4023 return -TARGET_EINVAL;
4024 if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
4025 return -TARGET_EFAULT;
4026 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
4027 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
4028 ldt_info.limit = tswap32(target_ldt_info->limit);
4029 ldt_info.flags = tswap32(target_ldt_info->flags);
4030 unlock_user_struct(target_ldt_info, ptr, 0);
4031
4032 if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
4033 return -TARGET_EINVAL;
4034 seg_32bit = ldt_info.flags & 1;
4035 contents = (ldt_info.flags >> 1) & 3;
4036 read_exec_only = (ldt_info.flags >> 3) & 1;
4037 limit_in_pages = (ldt_info.flags >> 4) & 1;
4038 seg_not_present = (ldt_info.flags >> 5) & 1;
4039 useable = (ldt_info.flags >> 6) & 1;
4040 #ifdef TARGET_ABI32
4041 lm = 0;
4042 #else
4043 lm = (ldt_info.flags >> 7) & 1;
4044 #endif
4045 if (contents == 3) {
4046 if (oldmode)
4047 return -TARGET_EINVAL;
4048 if (seg_not_present == 0)
4049 return -TARGET_EINVAL;
4050 }
4051 /* allocate the LDT */
4052 if (!ldt_table) {
4053 env->ldt.base = target_mmap(0,
4054 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
4055 PROT_READ|PROT_WRITE,
4056 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
4057 if (env->ldt.base == -1)
4058 return -TARGET_ENOMEM;
4059 memset(g2h(env->ldt.base), 0,
4060 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
4061 env->ldt.limit = 0xffff;
4062 ldt_table = g2h(env->ldt.base);
4063 }
4064
4065 /* NOTE: same code as Linux kernel */
4066 /* Allow LDTs to be cleared by the user. */
4067 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
4068 if (oldmode ||
4069 (contents == 0 &&
4070 read_exec_only == 1 &&
4071 seg_32bit == 0 &&
4072 limit_in_pages == 0 &&
4073 seg_not_present == 1 &&
4074 useable == 0 )) {
4075 entry_1 = 0;
4076 entry_2 = 0;
4077 goto install;
4078 }
4079 }
4080
4081 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
4082 (ldt_info.limit & 0x0ffff);
4083 entry_2 = (ldt_info.base_addr & 0xff000000) |
4084 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
4085 (ldt_info.limit & 0xf0000) |
4086 ((read_exec_only ^ 1) << 9) |
4087 (contents << 10) |
4088 ((seg_not_present ^ 1) << 15) |
4089 (seg_32bit << 22) |
4090 (limit_in_pages << 23) |
4091 (lm << 21) |
4092 0x7000;
4093 if (!oldmode)
4094 entry_2 |= (useable << 20);
4095
4096 /* Install the new entry ... */
4097 install:
4098 lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
4099 lp[0] = tswap32(entry_1);
4100 lp[1] = tswap32(entry_2);
4101 return 0;
4102 }
4103
4104 /* specific and weird i386 syscalls */
4105 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
4106 unsigned long bytecount)
4107 {
4108 abi_long ret;
4109
4110 switch (func) {
4111 case 0:
4112 ret = read_ldt(ptr, bytecount);
4113 break;
4114 case 1:
4115 ret = write_ldt(env, ptr, bytecount, 1);
4116 break;
4117 case 0x11:
4118 ret = write_ldt(env, ptr, bytecount, 0);
4119 break;
4120 default:
4121 ret = -TARGET_ENOSYS;
4122 break;
4123 }
4124 return ret;
4125 }
4126
4127 #if defined(TARGET_I386) && defined(TARGET_ABI32)
4128 static abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
4129 {
4130 uint64_t *gdt_table = g2h(env->gdt.base);
4131 struct target_modify_ldt_ldt_s ldt_info;
4132 struct target_modify_ldt_ldt_s *target_ldt_info;
4133 int seg_32bit, contents, read_exec_only, limit_in_pages;
4134 int seg_not_present, useable, lm;
4135 uint32_t *lp, entry_1, entry_2;
4136 int i;
4137
4138 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
4139 if (!target_ldt_info)
4140 return -TARGET_EFAULT;
4141 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
4142 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
4143 ldt_info.limit = tswap32(target_ldt_info->limit);
4144 ldt_info.flags = tswap32(target_ldt_info->flags);
4145 if (ldt_info.entry_number == -1) {
4146 for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
4147 if (gdt_table[i] == 0) {
4148 ldt_info.entry_number = i;
4149 target_ldt_info->entry_number = tswap32(i);
4150 break;
4151 }
4152 }
4153 }
4154 unlock_user_struct(target_ldt_info, ptr, 1);
4155
4156 if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
4157 ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
4158 return -TARGET_EINVAL;
4159 seg_32bit = ldt_info.flags & 1;
4160 contents = (ldt_info.flags >> 1) & 3;
4161 read_exec_only = (ldt_info.flags >> 3) & 1;
4162 limit_in_pages = (ldt_info.flags >> 4) & 1;
4163 seg_not_present = (ldt_info.flags >> 5) & 1;
4164 useable = (ldt_info.flags >> 6) & 1;
4165 #ifdef TARGET_ABI32
4166 lm = 0;
4167 #else
4168 lm = (ldt_info.flags >> 7) & 1;
4169 #endif
4170
4171 if (contents == 3) {
4172 if (seg_not_present == 0)
4173 return -TARGET_EINVAL;
4174 }
4175
4176 /* NOTE: same code as Linux kernel */
4177 /* Allow LDTs to be cleared by the user. */
4178 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
4179 if ((contents == 0 &&
4180 read_exec_only == 1 &&
4181 seg_32bit == 0 &&
4182 limit_in_pages == 0 &&
4183 seg_not_present == 1 &&
4184 useable == 0 )) {
4185 entry_1 = 0;
4186 entry_2 = 0;
4187 goto install;
4188 }
4189 }
4190
4191 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
4192 (ldt_info.limit & 0x0ffff);
4193 entry_2 = (ldt_info.base_addr & 0xff000000) |
4194 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
4195 (ldt_info.limit & 0xf0000) |
4196 ((read_exec_only ^ 1) << 9) |
4197 (contents << 10) |
4198 ((seg_not_present ^ 1) << 15) |
4199 (seg_32bit << 22) |
4200 (limit_in_pages << 23) |
4201 (useable << 20) |
4202 (lm << 21) |
4203 0x7000;
4204
4205 /* Install the new entry ... */
4206 install:
4207 lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
4208 lp[0] = tswap32(entry_1);
4209 lp[1] = tswap32(entry_2);
4210 return 0;
4211 }
4212
4213 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
4214 {
4215 struct target_modify_ldt_ldt_s *target_ldt_info;
4216 uint64_t *gdt_table = g2h(env->gdt.base);
4217 uint32_t base_addr, limit, flags;
4218 int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
4219 int seg_not_present, useable, lm;
4220 uint32_t *lp, entry_1, entry_2;
4221
4222 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
4223 if (!target_ldt_info)
4224 return -TARGET_EFAULT;
4225 idx = tswap32(target_ldt_info->entry_number);
4226 if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
4227 idx > TARGET_GDT_ENTRY_TLS_MAX) {
4228 unlock_user_struct(target_ldt_info, ptr, 1);
4229 return -TARGET_EINVAL;
4230 }
4231 lp = (uint32_t *)(gdt_table + idx);
4232 entry_1 = tswap32(lp[0]);
4233 entry_2 = tswap32(lp[1]);
4234
4235 read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
4236 contents = (entry_2 >> 10) & 3;
4237 seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
4238 seg_32bit = (entry_2 >> 22) & 1;
4239 limit_in_pages = (entry_2 >> 23) & 1;
4240 useable = (entry_2 >> 20) & 1;
4241 #ifdef TARGET_ABI32
4242 lm = 0;
4243 #else
4244 lm = (entry_2 >> 21) & 1;
4245 #endif
4246 flags = (seg_32bit << 0) | (contents << 1) |
4247 (read_exec_only << 3) | (limit_in_pages << 4) |
4248 (seg_not_present << 5) | (useable << 6) | (lm << 7);
4249 limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000);
4250 base_addr = (entry_1 >> 16) |
4251 (entry_2 & 0xff000000) |
4252 ((entry_2 & 0xff) << 16);
4253 target_ldt_info->base_addr = tswapal(base_addr);
4254 target_ldt_info->limit = tswap32(limit);
4255 target_ldt_info->flags = tswap32(flags);
4256 unlock_user_struct(target_ldt_info, ptr, 1);
4257 return 0;
4258 }
4259 #endif /* TARGET_I386 && TARGET_ABI32 */
4260
4261 #ifndef TARGET_ABI32
4262 static abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
4263 {
4264 abi_long ret = 0;
4265 abi_ulong val;
4266 int idx;
4267
4268 switch(code) {
4269 case TARGET_ARCH_SET_GS:
4270 case TARGET_ARCH_SET_FS:
4271 if (code == TARGET_ARCH_SET_GS)
4272 idx = R_GS;
4273 else
4274 idx = R_FS;
4275 cpu_x86_load_seg(env, idx, 0);
4276 env->segs[idx].base = addr;
4277 break;
4278 case TARGET_ARCH_GET_GS:
4279 case TARGET_ARCH_GET_FS:
4280 if (code == TARGET_ARCH_GET_GS)
4281 idx = R_GS;
4282 else
4283 idx = R_FS;
4284 val = env->segs[idx].base;
4285 if (put_user(val, addr, abi_ulong))
4286 ret = -TARGET_EFAULT;
4287 break;
4288 default:
4289 ret = -TARGET_EINVAL;
4290 break;
4291 }
4292 return ret;
4293 }
4294 #endif
4295
4296 #endif /* defined(TARGET_I386) */
4297
4298 #define NEW_STACK_SIZE 0x40000
4299
4300 #if defined(CONFIG_USE_NPTL)
4301
4302 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
4303 typedef struct {
4304 CPUArchState *env;
4305 pthread_mutex_t mutex;
4306 pthread_cond_t cond;
4307 pthread_t thread;
4308 uint32_t tid;
4309 abi_ulong child_tidptr;
4310 abi_ulong parent_tidptr;
4311 sigset_t sigmask;
4312 } new_thread_info;
4313
4314 static void *clone_func(void *arg)
4315 {
4316 new_thread_info *info = arg;
4317 CPUArchState *env;
4318 CPUState *cpu;
4319 TaskState *ts;
4320
4321 env = info->env;
4322 cpu = ENV_GET_CPU(env);
4323 thread_env = env;
4324 ts = (TaskState *)thread_env->opaque;
4325 info->tid = gettid();
4326 cpu->host_tid = info->tid;
4327 task_settid(ts);
4328 if (info->child_tidptr)
4329 put_user_u32(info->tid, info->child_tidptr);
4330 if (info->parent_tidptr)
4331 put_user_u32(info->tid, info->parent_tidptr);
4332 /* Enable signals. */
4333 sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
4334 /* Signal to the parent that we're ready. */
4335 pthread_mutex_lock(&info->mutex);
4336 pthread_cond_broadcast(&info->cond);
4337 pthread_mutex_unlock(&info->mutex);
4338 /* Wait until the parent has finshed initializing the tls state. */
4339 pthread_mutex_lock(&clone_lock);
4340 pthread_mutex_unlock(&clone_lock);
4341 cpu_loop(env);
4342 /* never exits */
4343 return NULL;
4344 }
4345 #else
4346
4347 static int clone_func(void *arg)
4348 {
4349 CPUArchState *env = arg;
4350 cpu_loop(env);
4351 /* never exits */
4352 return 0;
4353 }
4354 #endif
4355
4356 /* do_fork() Must return host values and target errnos (unlike most
4357 do_*() functions). */
4358 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
4359 abi_ulong parent_tidptr, target_ulong newtls,
4360 abi_ulong child_tidptr)
4361 {
4362 int ret;
4363 TaskState *ts;
4364 CPUArchState *new_env;
4365 #if defined(CONFIG_USE_NPTL)
4366 unsigned int nptl_flags;
4367 sigset_t sigmask;
4368 #else
4369 uint8_t *new_stack;
4370 #endif
4371
4372 /* Emulate vfork() with fork() */
4373 if (flags & CLONE_VFORK)
4374 flags &= ~(CLONE_VFORK | CLONE_VM);
4375
4376 if (flags & CLONE_VM) {
4377 TaskState *parent_ts = (TaskState *)env->opaque;
4378 #if defined(CONFIG_USE_NPTL)
4379 new_thread_info info;
4380 pthread_attr_t attr;
4381 #endif
4382 ts = g_malloc0(sizeof(TaskState));
4383 init_task_state(ts);
4384 /* we create a new CPU instance. */
4385 new_env = cpu_copy(env);
4386 #if defined(TARGET_I386) || defined(TARGET_SPARC) || defined(TARGET_PPC)
4387 cpu_reset(ENV_GET_CPU(new_env));
4388 #endif
4389 /* Init regs that differ from the parent. */
4390 cpu_clone_regs(new_env, newsp);
4391 new_env->opaque = ts;
4392 ts->bprm = parent_ts->bprm;
4393 ts->info = parent_ts->info;
4394 #if defined(CONFIG_USE_NPTL)
4395 nptl_flags = flags;
4396 flags &= ~CLONE_NPTL_FLAGS2;
4397
4398 if (nptl_flags & CLONE_CHILD_CLEARTID) {
4399 ts->child_tidptr = child_tidptr;
4400 }
4401
4402 if (nptl_flags & CLONE_SETTLS)
4403 cpu_set_tls (new_env, newtls);
4404
4405 /* Grab a mutex so that thread setup appears atomic. */
4406 pthread_mutex_lock(&clone_lock);
4407
4408 memset(&info, 0, sizeof(info));
4409 pthread_mutex_init(&info.mutex, NULL);
4410 pthread_mutex_lock(&info.mutex);
4411 pthread_cond_init(&info.cond, NULL);
4412 info.env = new_env;
4413 if (nptl_flags & CLONE_CHILD_SETTID)
4414 info.child_tidptr = child_tidptr;
4415 if (nptl_flags & CLONE_PARENT_SETTID)
4416 info.parent_tidptr = parent_tidptr;
4417
4418 ret = pthread_attr_init(&attr);
4419 ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
4420 ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
4421 /* It is not safe to deliver signals until the child has finished
4422 initializing, so temporarily block all signals. */
4423 sigfillset(&sigmask);
4424 sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
4425
4426 ret = pthread_create(&info.thread, &attr, clone_func, &info);
4427 /* TODO: Free new CPU state if thread creation failed. */
4428
4429 sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
4430 pthread_attr_destroy(&attr);
4431 if (ret == 0) {
4432 /* Wait for the child to initialize. */
4433 pthread_cond_wait(&info.cond, &info.mutex);
4434 ret = info.tid;
4435 if (flags & CLONE_PARENT_SETTID)
4436 put_user_u32(ret, parent_tidptr);
4437 } else {
4438 ret = -1;
4439 }
4440 pthread_mutex_unlock(&info.mutex);
4441 pthread_cond_destroy(&info.cond);
4442 pthread_mutex_destroy(&info.mutex);
4443 pthread_mutex_unlock(&clone_lock);
4444 #else
4445 if (flags & CLONE_NPTL_FLAGS2)
4446 return -EINVAL;
4447 /* This is probably going to die very quickly, but do it anyway. */
4448 new_stack = g_malloc0 (NEW_STACK_SIZE);
4449 #ifdef __ia64__
4450 ret = __clone2(clone_func, new_stack, NEW_STACK_SIZE, flags, new_env);
4451 #else
4452 ret = clone(clone_func, new_stack + NEW_STACK_SIZE, flags, new_env);
4453 #endif
4454 #endif
4455 } else {
4456 /* if no CLONE_VM, we consider it is a fork */
4457 if ((flags & ~(CSIGNAL | CLONE_NPTL_FLAGS2)) != 0)
4458 return -EINVAL;
4459 fork_start();
4460 ret = fork();
4461 if (ret == 0) {
4462 /* Child Process. */
4463 cpu_clone_regs(env, newsp);
4464 fork_end(1);
4465 #if defined(CONFIG_USE_NPTL)
4466 /* There is a race condition here. The parent process could
4467 theoretically read the TID in the child process before the child
4468 tid is set. This would require using either ptrace
4469 (not implemented) or having *_tidptr to point at a shared memory
4470 mapping. We can't repeat the spinlock hack used above because
4471 the child process gets its own copy of the lock. */
4472 if (flags & CLONE_CHILD_SETTID)
4473 put_user_u32(gettid(), child_tidptr);
4474 if (flags & CLONE_PARENT_SETTID)
4475 put_user_u32(gettid(), parent_tidptr);
4476 ts = (TaskState *)env->opaque;
4477 if (flags & CLONE_SETTLS)
4478 cpu_set_tls (env, newtls);
4479 if (flags & CLONE_CHILD_CLEARTID)
4480 ts->child_tidptr = child_tidptr;
4481 #endif
4482 } else {
4483 fork_end(0);
4484 }
4485 }
4486 return ret;
4487 }
4488
4489 /* warning : doesn't handle linux specific flags... */
4490 static int target_to_host_fcntl_cmd(int cmd)
4491 {
4492 switch(cmd) {
4493 case TARGET_F_DUPFD:
4494 case TARGET_F_GETFD:
4495 case TARGET_F_SETFD:
4496 case TARGET_F_GETFL:
4497 case TARGET_F_SETFL:
4498 return cmd;
4499 case TARGET_F_GETLK:
4500 return F_GETLK;
4501 case TARGET_F_SETLK:
4502 return F_SETLK;
4503 case TARGET_F_SETLKW:
4504 return F_SETLKW;
4505 case TARGET_F_GETOWN:
4506 return F_GETOWN;
4507 case TARGET_F_SETOWN:
4508 return F_SETOWN;
4509 case TARGET_F_GETSIG:
4510 return F_GETSIG;
4511 case TARGET_F_SETSIG:
4512 return F_SETSIG;
4513 #if TARGET_ABI_BITS == 32
4514 case TARGET_F_GETLK64:
4515 return F_GETLK64;
4516 case TARGET_F_SETLK64:
4517 return F_SETLK64;
4518 case TARGET_F_SETLKW64:
4519 return F_SETLKW64;
4520 #endif
4521 case TARGET_F_SETLEASE:
4522 return F_SETLEASE;
4523 case TARGET_F_GETLEASE:
4524 return F_GETLEASE;
4525 #ifdef F_DUPFD_CLOEXEC
4526 case TARGET_F_DUPFD_CLOEXEC:
4527 return F_DUPFD_CLOEXEC;
4528 #endif
4529 case TARGET_F_NOTIFY:
4530 return F_NOTIFY;
4531 default:
4532 return -TARGET_EINVAL;
4533 }
4534 return -TARGET_EINVAL;
4535 }
4536
4537 #define TRANSTBL_CONVERT(a) { -1, TARGET_##a, -1, a }
4538 static const bitmask_transtbl flock_tbl[] = {
4539 TRANSTBL_CONVERT(F_RDLCK),
4540 TRANSTBL_CONVERT(F_WRLCK),
4541 TRANSTBL_CONVERT(F_UNLCK),
4542 TRANSTBL_CONVERT(F_EXLCK),
4543 TRANSTBL_CONVERT(F_SHLCK),
4544 { 0, 0, 0, 0 }
4545 };
4546
4547 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
4548 {
4549 struct flock fl;
4550 struct target_flock *target_fl;
4551 struct flock64 fl64;
4552 struct target_flock64 *target_fl64;
4553 abi_long ret;
4554 int host_cmd = target_to_host_fcntl_cmd(cmd);
4555
4556 if (host_cmd == -TARGET_EINVAL)
4557 return host_cmd;
4558
4559 switch(cmd) {
4560 case TARGET_F_GETLK:
4561 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1))
4562 return -TARGET_EFAULT;
4563 fl.l_type =
4564 target_to_host_bitmask(tswap16(target_fl->l_type), flock_tbl);
4565 fl.l_whence = tswap16(target_fl->l_whence);
4566 fl.l_start = tswapal(target_fl->l_start);
4567 fl.l_len = tswapal(target_fl->l_len);
4568 fl.l_pid = tswap32(target_fl->l_pid);
4569 unlock_user_struct(target_fl, arg, 0);
4570 ret = get_errno(fcntl(fd, host_cmd, &fl));
4571 if (ret == 0) {
4572 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg, 0))
4573 return -TARGET_EFAULT;
4574 target_fl->l_type =
4575 host_to_target_bitmask(tswap16(fl.l_type), flock_tbl);
4576 target_fl->l_whence = tswap16(fl.l_whence);
4577 target_fl->l_start = tswapal(fl.l_start);
4578 target_fl->l_len = tswapal(fl.l_len);
4579 target_fl->l_pid = tswap32(fl.l_pid);
4580 unlock_user_struct(target_fl, arg, 1);
4581 }
4582 break;
4583
4584 case TARGET_F_SETLK:
4585 case TARGET_F_SETLKW:
4586 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1))
4587 return -TARGET_EFAULT;
4588 fl.l_type =
4589 target_to_host_bitmask(tswap16(target_fl->l_type), flock_tbl);
4590 fl.l_whence = tswap16(target_fl->l_whence);
4591 fl.l_start = tswapal(target_fl->l_start);
4592 fl.l_len = tswapal(target_fl->l_len);
4593 fl.l_pid = tswap32(target_fl->l_pid);
4594 unlock_user_struct(target_fl, arg, 0);
4595 ret = get_errno(fcntl(fd, host_cmd, &fl));
4596 break;
4597
4598 case TARGET_F_GETLK64:
4599 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1))
4600 return -TARGET_EFAULT;
4601 fl64.l_type =
4602 target_to_host_bitmask(tswap16(target_fl64->l_type), flock_tbl) >> 1;
4603 fl64.l_whence = tswap16(target_fl64->l_whence);
4604 fl64.l_start = tswap64(target_fl64->l_start);
4605 fl64.l_len = tswap64(target_fl64->l_len);
4606 fl64.l_pid = tswap32(target_fl64->l_pid);
4607 unlock_user_struct(target_fl64, arg, 0);
4608 ret = get_errno(fcntl(fd, host_cmd, &fl64));
4609 if (ret == 0) {
4610 if (!lock_user_struct(VERIFY_WRITE, target_fl64, arg, 0))
4611 return -TARGET_EFAULT;
4612 target_fl64->l_type =
4613 host_to_target_bitmask(tswap16(fl64.l_type), flock_tbl) >> 1;
4614 target_fl64->l_whence = tswap16(fl64.l_whence);
4615 target_fl64->l_start = tswap64(fl64.l_start);
4616 target_fl64->l_len = tswap64(fl64.l_len);
4617 target_fl64->l_pid = tswap32(fl64.l_pid);
4618 unlock_user_struct(target_fl64, arg, 1);
4619 }
4620 break;
4621 case TARGET_F_SETLK64:
4622 case TARGET_F_SETLKW64:
4623 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1))
4624 return -TARGET_EFAULT;
4625 fl64.l_type =
4626 target_to_host_bitmask(tswap16(target_fl64->l_type), flock_tbl) >> 1;
4627 fl64.l_whence = tswap16(target_fl64->l_whence);
4628 fl64.l_start = tswap64(target_fl64->l_start);
4629 fl64.l_len = tswap64(target_fl64->l_len);
4630 fl64.l_pid = tswap32(target_fl64->l_pid);
4631 unlock_user_struct(target_fl64, arg, 0);
4632 ret = get_errno(fcntl(fd, host_cmd, &fl64));
4633 break;
4634
4635 case TARGET_F_GETFL:
4636 ret = get_errno(fcntl(fd, host_cmd, arg));
4637 if (ret >= 0) {
4638 ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
4639 }
4640 break;
4641
4642 case TARGET_F_SETFL:
4643 ret = get_errno(fcntl(fd, host_cmd, target_to_host_bitmask(arg, fcntl_flags_tbl)));
4644 break;
4645
4646 case TARGET_F_SETOWN:
4647 case TARGET_F_GETOWN:
4648 case TARGET_F_SETSIG:
4649 case TARGET_F_GETSIG:
4650 case TARGET_F_SETLEASE:
4651 case TARGET_F_GETLEASE:
4652 ret = get_errno(fcntl(fd, host_cmd, arg));
4653 break;
4654
4655 default:
4656 ret = get_errno(fcntl(fd, cmd, arg));
4657 break;
4658 }
4659 return ret;
4660 }
4661
4662 #ifdef USE_UID16
4663
4664 static inline int high2lowuid(int uid)
4665 {
4666 if (uid > 65535)
4667 return 65534;
4668 else
4669 return uid;
4670 }
4671
4672 static inline int high2lowgid(int gid)
4673 {
4674 if (gid > 65535)
4675 return 65534;
4676 else
4677 return gid;
4678 }
4679
4680 static inline int low2highuid(int uid)
4681 {
4682 if ((int16_t)uid == -1)
4683 return -1;
4684 else
4685 return uid;
4686 }
4687
4688 static inline int low2highgid(int gid)
4689 {
4690 if ((int16_t)gid == -1)
4691 return -1;
4692 else
4693 return gid;
4694 }
4695 static inline int tswapid(int id)
4696 {
4697 return tswap16(id);
4698 }
4699 #else /* !USE_UID16 */
4700 static inline int high2lowuid(int uid)
4701 {
4702 return uid;
4703 }
4704 static inline int high2lowgid(int gid)
4705 {
4706 return gid;
4707 }
4708 static inline int low2highuid(int uid)
4709 {
4710 return uid;
4711 }
4712 static inline int low2highgid(int gid)
4713 {
4714 return gid;
4715 }
4716 static inline int tswapid(int id)
4717 {
4718 return tswap32(id);
4719 }
4720 #endif /* USE_UID16 */
4721
4722 void syscall_init(void)
4723 {
4724 IOCTLEntry *ie;
4725 const argtype *arg_type;
4726 int size;
4727 int i;
4728
4729 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
4730 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
4731 #include "syscall_types.h"
4732 #undef STRUCT
4733 #undef STRUCT_SPECIAL
4734
4735 /* Build target_to_host_errno_table[] table from
4736 * host_to_target_errno_table[]. */
4737 for (i = 0; i < ERRNO_TABLE_SIZE; i++) {
4738 target_to_host_errno_table[host_to_target_errno_table[i]] = i;
4739 }
4740
4741 /* we patch the ioctl size if necessary. We rely on the fact that
4742 no ioctl has all the bits at '1' in the size field */
4743 ie = ioctl_entries;
4744 while (ie->target_cmd != 0) {
4745 if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
4746 TARGET_IOC_SIZEMASK) {
4747 arg_type = ie->arg_type;
4748 if (arg_type[0] != TYPE_PTR) {
4749 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
4750 ie->target_cmd);
4751 exit(1);
4752 }
4753 arg_type++;
4754 size = thunk_type_size(arg_type, 0);
4755 ie->target_cmd = (ie->target_cmd &
4756 ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
4757 (size << TARGET_IOC_SIZESHIFT);
4758 }
4759
4760 /* automatic consistency check if same arch */
4761 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
4762 (defined(__x86_64__) && defined(TARGET_X86_64))
4763 if (unlikely(ie->target_cmd != ie->host_cmd)) {
4764 fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
4765 ie->name, ie->target_cmd, ie->host_cmd);
4766 }
4767 #endif
4768 ie++;
4769 }
4770 }
4771
4772 #if TARGET_ABI_BITS == 32
4773 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1)
4774 {
4775 #ifdef TARGET_WORDS_BIGENDIAN
4776 return ((uint64_t)word0 << 32) | word1;
4777 #else
4778 return ((uint64_t)word1 << 32) | word0;
4779 #endif
4780 }
4781 #else /* TARGET_ABI_BITS == 32 */
4782 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1)
4783 {
4784 return word0;
4785 }
4786 #endif /* TARGET_ABI_BITS != 32 */
4787
4788 #ifdef TARGET_NR_truncate64
4789 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
4790 abi_long arg2,
4791 abi_long arg3,
4792 abi_long arg4)
4793 {
4794 if (regpairs_aligned(cpu_env)) {
4795 arg2 = arg3;
4796 arg3 = arg4;
4797 }
4798 return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
4799 }
4800 #endif
4801
4802 #ifdef TARGET_NR_ftruncate64
4803 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
4804 abi_long arg2,
4805 abi_long arg3,
4806 abi_long arg4)
4807 {
4808 if (regpairs_aligned(cpu_env)) {
4809 arg2 = arg3;
4810 arg3 = arg4;
4811 }
4812 return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
4813 }
4814 #endif
4815
4816 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
4817 abi_ulong target_addr)
4818 {
4819 struct target_timespec *target_ts;
4820
4821 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1))
4822 return -TARGET_EFAULT;
4823 host_ts->tv_sec = tswapal(target_ts->tv_sec);
4824 host_ts->tv_nsec = tswapal(target_ts->tv_nsec);
4825 unlock_user_struct(target_ts, target_addr, 0);
4826 return 0;
4827 }
4828
4829 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
4830 struct timespec *host_ts)
4831 {
4832 struct target_timespec *target_ts;
4833
4834 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0))
4835 return -TARGET_EFAULT;
4836 target_ts->tv_sec = tswapal(host_ts->tv_sec);
4837 target_ts->tv_nsec = tswapal(host_ts->tv_nsec);
4838 unlock_user_struct(target_ts, target_addr, 1);
4839 return 0;
4840 }
4841
4842 #if defined(TARGET_NR_stat64) || defined(TARGET_NR_newfstatat)
4843 static inline abi_long host_to_target_stat64(void *cpu_env,
4844 abi_ulong target_addr,
4845 struct stat *host_st)
4846 {
4847 #ifdef TARGET_ARM
4848 if (((CPUARMState *)cpu_env)->eabi) {
4849 struct target_eabi_stat64 *target_st;
4850
4851 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
4852 return -TARGET_EFAULT;
4853 memset(target_st, 0, sizeof(struct target_eabi_stat64));
4854 __put_user(host_st->st_dev, &target_st->st_dev);
4855 __put_user(host_st->st_ino, &target_st->st_ino);
4856 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
4857 __put_user(host_st->st_ino, &target_st->__st_ino);
4858 #endif
4859 __put_user(host_st->st_mode, &target_st->st_mode);
4860 __put_user(host_st->st_nlink, &target_st->st_nlink);
4861 __put_user(host_st->st_uid, &target_st->st_uid);
4862 __put_user(host_st->st_gid, &target_st->st_gid);
4863 __put_user(host_st->st_rdev, &target_st->st_rdev);
4864 __put_user(host_st->st_size, &target_st->st_size);
4865 __put_user(host_st->st_blksize, &target_st->st_blksize);
4866 __put_user(host_st->st_blocks, &target_st->st_blocks);
4867 __put_user(host_st->st_atime, &target_st->target_st_atime);
4868 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
4869 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
4870 unlock_user_struct(target_st, target_addr, 1);
4871 } else
4872 #endif
4873 {
4874 #if TARGET_ABI_BITS == 64 && !defined(TARGET_ALPHA)
4875 struct target_stat *target_st;
4876 #else
4877 struct target_stat64 *target_st;
4878 #endif
4879
4880 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
4881 return -TARGET_EFAULT;
4882 memset(target_st, 0, sizeof(*target_st));
4883 __put_user(host_st->st_dev, &target_st->st_dev);
4884 __put_user(host_st->st_ino, &target_st->st_ino);
4885 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
4886 __put_user(host_st->st_ino, &target_st->__st_ino);
4887 #endif
4888 __put_user(host_st->st_mode, &target_st->st_mode);
4889 __put_user(host_st->st_nlink, &target_st->st_nlink);
4890 __put_user(host_st->st_uid, &target_st->st_uid);
4891 __put_user(host_st->st_gid, &target_st->st_gid);
4892 __put_user(host_st->st_rdev, &target_st->st_rdev);
4893 /* XXX: better use of kernel struct */
4894 __put_user(host_st->st_size, &target_st->st_size);
4895 __put_user(host_st->st_blksize, &target_st->st_blksize);
4896 __put_user(host_st->st_blocks, &target_st->st_blocks);
4897 __put_user(host_st->st_atime, &target_st->target_st_atime);
4898 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
4899 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
4900 unlock_user_struct(target_st, target_addr, 1);
4901 }
4902
4903 return 0;
4904 }
4905 #endif
4906
4907 #if defined(CONFIG_USE_NPTL)
4908 /* ??? Using host futex calls even when target atomic operations
4909 are not really atomic probably breaks things. However implementing
4910 futexes locally would make futexes shared between multiple processes
4911 tricky. However they're probably useless because guest atomic
4912 operations won't work either. */
4913 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
4914 target_ulong uaddr2, int val3)
4915 {
4916 struct timespec ts, *pts;
4917 int base_op;
4918
4919 /* ??? We assume FUTEX_* constants are the same on both host
4920 and target. */
4921 #ifdef FUTEX_CMD_MASK
4922 base_op = op & FUTEX_CMD_MASK;
4923 #else
4924 base_op = op;
4925 #endif
4926 switch (base_op) {
4927 case FUTEX_WAIT:
4928 case FUTEX_WAIT_BITSET:
4929 if (timeout) {
4930 pts = &ts;
4931 target_to_host_timespec(pts, timeout);
4932 } else {
4933 pts = NULL;
4934 }
4935 return get_errno(sys_futex(g2h(uaddr), op, tswap32(val),
4936 pts, NULL, val3));
4937 case FUTEX_WAKE:
4938 return get_errno(sys_futex(g2h(uaddr), op, val, NULL, NULL, 0));
4939 case FUTEX_FD:
4940 return get_errno(sys_futex(g2h(uaddr), op, val, NULL, NULL, 0));
4941 case FUTEX_REQUEUE:
4942 case FUTEX_CMP_REQUEUE:
4943 case FUTEX_WAKE_OP:
4944 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
4945 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
4946 But the prototype takes a `struct timespec *'; insert casts
4947 to satisfy the compiler. We do not need to tswap TIMEOUT
4948 since it's not compared to guest memory. */
4949 pts = (struct timespec *)(uintptr_t) timeout;
4950 return get_errno(sys_futex(g2h(uaddr), op, val, pts,
4951 g2h(uaddr2),
4952 (base_op == FUTEX_CMP_REQUEUE
4953 ? tswap32(val3)
4954 : val3)));
4955 default:
4956 return -TARGET_ENOSYS;
4957 }
4958 }
4959 #endif
4960
4961 /* Map host to target signal numbers for the wait family of syscalls.
4962 Assume all other status bits are the same. */
4963 int host_to_target_waitstatus(int status)
4964 {
4965 if (WIFSIGNALED(status)) {
4966 return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
4967 }
4968 if (WIFSTOPPED(status)) {
4969 return (host_to_target_signal(WSTOPSIG(status)) << 8)
4970 | (status & 0xff);
4971 }
4972 return status;
4973 }
4974
4975 int get_osversion(void)
4976 {
4977 static int osversion;
4978 struct new_utsname buf;
4979 const char *s;
4980 int i, n, tmp;
4981 if (osversion)
4982 return osversion;
4983 if (qemu_uname_release && *qemu_uname_release) {
4984 s = qemu_uname_release;
4985 } else {
4986 if (sys_uname(&buf))
4987 return 0;
4988 s = buf.release;
4989 }
4990 tmp = 0;
4991 for (i = 0; i < 3; i++) {
4992 n = 0;
4993 while (*s >= '0' && *s <= '9') {
4994 n *= 10;
4995 n += *s - '0';
4996 s++;
4997 }
4998 tmp = (tmp << 8) + n;
4999 if (*s == '.')
5000 s++;
5001 }
5002 osversion = tmp;
5003 return osversion;
5004 }
5005
5006
5007 static int open_self_maps(void *cpu_env, int fd)
5008 {
5009 #if defined(TARGET_ARM) || defined(TARGET_M68K) || defined(TARGET_UNICORE32)
5010 TaskState *ts = ((CPUArchState *)cpu_env)->opaque;
5011 #endif
5012 FILE *fp;
5013 char *line = NULL;
5014 size_t len = 0;
5015 ssize_t read;
5016
5017 fp = fopen("/proc/self/maps", "r");
5018 if (fp == NULL) {
5019 return -EACCES;
5020 }
5021
5022 while ((read = getline(&line, &len, fp)) != -1) {
5023 int fields, dev_maj, dev_min, inode;
5024 uint64_t min, max, offset;
5025 char flag_r, flag_w, flag_x, flag_p;
5026 char path[512] = "";
5027 fields = sscanf(line, "%"PRIx64"-%"PRIx64" %c%c%c%c %"PRIx64" %x:%x %d"
5028 " %512s", &min, &max, &flag_r, &flag_w, &flag_x,
5029 &flag_p, &offset, &dev_maj, &dev_min, &inode, path);
5030
5031 if ((fields < 10) || (fields > 11)) {
5032 continue;
5033 }
5034 if (!strncmp(path, "[stack]", 7)) {
5035 continue;
5036 }
5037 if (h2g_valid(min) && h2g_valid(max)) {
5038 dprintf(fd, TARGET_ABI_FMT_lx "-" TARGET_ABI_FMT_lx
5039 " %c%c%c%c %08" PRIx64 " %02x:%02x %d%s%s\n",
5040 h2g(min), h2g(max), flag_r, flag_w,
5041 flag_x, flag_p, offset, dev_maj, dev_min, inode,
5042 path[0] ? " " : "", path);
5043 }
5044 }
5045
5046 free(line);
5047 fclose(fp);
5048
5049 #if defined(TARGET_ARM) || defined(TARGET_M68K) || defined(TARGET_UNICORE32)
5050 dprintf(fd, "%08llx-%08llx rw-p %08llx 00:00 0 [stack]\n",
5051 (unsigned long long)ts->info->stack_limit,
5052 (unsigned long long)(ts->info->start_stack +
5053 (TARGET_PAGE_SIZE - 1)) & TARGET_PAGE_MASK,
5054 (unsigned long long)0);
5055 #endif
5056
5057 return 0;
5058 }
5059
5060 static int open_self_stat(void *cpu_env, int fd)
5061 {
5062 TaskState *ts = ((CPUArchState *)cpu_env)->opaque;
5063 abi_ulong start_stack = ts->info->start_stack;
5064 int i;
5065
5066 for (i = 0; i < 44; i++) {
5067 char buf[128];
5068 int len;
5069 uint64_t val = 0;
5070
5071 if (i == 0) {
5072 /* pid */
5073 val = getpid();
5074 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
5075 } else if (i == 1) {
5076 /* app name */
5077 snprintf(buf, sizeof(buf), "(%s) ", ts->bprm->argv[0]);
5078 } else if (i == 27) {
5079 /* stack bottom */
5080 val = start_stack;
5081 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
5082 } else {
5083 /* for the rest, there is MasterCard */
5084 snprintf(buf, sizeof(buf), "0%c", i == 43 ? '\n' : ' ');
5085 }
5086
5087 len = strlen(buf);
5088 if (write(fd, buf, len) != len) {
5089 return -1;
5090 }
5091 }
5092
5093 return 0;
5094 }
5095
5096 static int open_self_auxv(void *cpu_env, int fd)
5097 {
5098 TaskState *ts = ((CPUArchState *)cpu_env)->opaque;
5099 abi_ulong auxv = ts->info->saved_auxv;
5100 abi_ulong len = ts->info->auxv_len;
5101 char *ptr;
5102
5103 /*
5104 * Auxiliary vector is stored in target process stack.
5105 * read in whole auxv vector and copy it to file
5106 */
5107 ptr = lock_user(VERIFY_READ, auxv, len, 0);
5108 if (ptr != NULL) {
5109 while (len > 0) {
5110 ssize_t r;
5111 r = write(fd, ptr, len);
5112 if (r <= 0) {
5113 break;
5114 }
5115 len -= r;
5116 ptr += r;
5117 }
5118 lseek(fd, 0, SEEK_SET);
5119 unlock_user(ptr, auxv, len);
5120 }
5121
5122 return 0;
5123 }
5124
5125 static int do_open(void *cpu_env, const char *pathname, int flags, mode_t mode)
5126 {
5127 struct fake_open {
5128 const char *filename;
5129 int (*fill)(void *cpu_env, int fd);
5130 };
5131 const struct fake_open *fake_open;
5132 static const struct fake_open fakes[] = {
5133 { "/proc/self/maps", open_self_maps },
5134 { "/proc/self/stat", open_self_stat },
5135 { "/proc/self/auxv", open_self_auxv },
5136 { NULL, NULL }
5137 };
5138
5139 for (fake_open = fakes; fake_open->filename; fake_open++) {
5140 if (!strncmp(pathname, fake_open->filename,
5141 strlen(fake_open->filename))) {
5142 break;
5143 }
5144 }
5145
5146 if (fake_open->filename) {
5147 const char *tmpdir;
5148 char filename[PATH_MAX];
5149 int fd, r;
5150
5151 /* create temporary file to map stat to */
5152 tmpdir = getenv("TMPDIR");
5153 if (!tmpdir)
5154 tmpdir = "/tmp";
5155 snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
5156 fd = mkstemp(filename);
5157 if (fd < 0) {
5158 return fd;
5159 }
5160 unlink(filename);
5161
5162 if ((r = fake_open->fill(cpu_env, fd))) {
5163 close(fd);
5164 return r;
5165 }
5166 lseek(fd, 0, SEEK_SET);
5167
5168 return fd;
5169 }
5170
5171 return get_errno(open(path(pathname), flags, mode));
5172 }
5173
5174 /* do_syscall() should always have a single exit point at the end so
5175 that actions, such as logging of syscall results, can be performed.
5176 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
5177 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
5178 abi_long arg2, abi_long arg3, abi_long arg4,
5179 abi_long arg5, abi_long arg6, abi_long arg7,
5180 abi_long arg8)
5181 {
5182 abi_long ret;
5183 struct stat st;
5184 struct statfs stfs;
5185 void *p;
5186
5187 #ifdef DEBUG
5188 gemu_log("syscall %d", num);
5189 #endif
5190 if(do_strace)
5191 print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
5192
5193 switch(num) {
5194 case TARGET_NR_exit:
5195 #ifdef CONFIG_USE_NPTL
5196 /* In old applications this may be used to implement _exit(2).
5197 However in threaded applictions it is used for thread termination,
5198 and _exit_group is used for application termination.
5199 Do thread termination if we have more then one thread. */
5200 /* FIXME: This probably breaks if a signal arrives. We should probably
5201 be disabling signals. */
5202 if (first_cpu->next_cpu) {
5203 TaskState *ts;
5204 CPUArchState **lastp;
5205 CPUArchState *p;
5206
5207 cpu_list_lock();
5208 lastp = &first_cpu;
5209 p = first_cpu;
5210 while (p && p != (CPUArchState *)cpu_env) {
5211 lastp = &p->next_cpu;
5212 p = p->next_cpu;
5213 }
5214 /* If we didn't find the CPU for this thread then something is
5215 horribly wrong. */
5216 if (!p)
5217 abort();
5218 /* Remove the CPU from the list. */
5219 *lastp = p->next_cpu;
5220 cpu_list_unlock();
5221 ts = ((CPUArchState *)cpu_env)->opaque;
5222 if (ts->child_tidptr) {
5223 put_user_u32(0, ts->child_tidptr);
5224 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
5225 NULL, NULL, 0);
5226 }
5227 thread_env = NULL;
5228 object_unref(OBJECT(ENV_GET_CPU(cpu_env)));
5229 g_free(ts);
5230 pthread_exit(NULL);
5231 }
5232 #endif
5233 #ifdef TARGET_GPROF
5234 _mcleanup();
5235 #endif
5236 gdb_exit(cpu_env, arg1);
5237 _exit(arg1);
5238 ret = 0; /* avoid warning */
5239 break;
5240 case TARGET_NR_read:
5241 if (arg3 == 0)
5242 ret = 0;
5243 else {
5244 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
5245 goto efault;
5246 ret = get_errno(read(arg1, p, arg3));
5247 unlock_user(p, arg2, ret);
5248 }
5249 break;
5250 case TARGET_NR_write:
5251 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
5252 goto efault;
5253 ret = get_errno(write(arg1, p, arg3));
5254 unlock_user(p, arg2, 0);
5255 break;
5256 case TARGET_NR_open:
5257 if (!(p = lock_user_string(arg1)))
5258 goto efault;
5259 ret = get_errno(do_open(cpu_env, p,
5260 target_to_host_bitmask(arg2, fcntl_flags_tbl),
5261 arg3));
5262 unlock_user(p, arg1, 0);
5263 break;
5264 #if defined(TARGET_NR_openat) && defined(__NR_openat)
5265 case TARGET_NR_openat:
5266 if (!(p = lock_user_string(arg2)))
5267 goto efault;
5268 ret = get_errno(sys_openat(arg1,
5269 path(p),
5270 target_to_host_bitmask(arg3, fcntl_flags_tbl),
5271 arg4));
5272 unlock_user(p, arg2, 0);
5273 break;
5274 #endif
5275 case TARGET_NR_close:
5276 ret = get_errno(close(arg1));
5277 break;
5278 case TARGET_NR_brk:
5279 ret = do_brk(arg1);
5280 break;
5281 case TARGET_NR_fork:
5282 ret = get_errno(do_fork(cpu_env, SIGCHLD, 0, 0, 0, 0));
5283 break;
5284 #ifdef TARGET_NR_waitpid
5285 case TARGET_NR_waitpid:
5286 {
5287 int status;
5288 ret = get_errno(waitpid(arg1, &status, arg3));
5289 if (!is_error(ret) && arg2 && ret
5290 && put_user_s32(host_to_target_waitstatus(status), arg2))
5291 goto efault;
5292 }
5293 break;
5294 #endif
5295 #ifdef TARGET_NR_waitid
5296 case TARGET_NR_waitid:
5297 {
5298 siginfo_t info;
5299 info.si_pid = 0;
5300 ret = get_errno(waitid(arg1, arg2, &info, arg4));
5301 if (!is_error(ret) && arg3 && info.si_pid != 0) {
5302 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
5303 goto efault;
5304 host_to_target_siginfo(p, &info);
5305 unlock_user(p, arg3, sizeof(target_siginfo_t));
5306 }
5307 }
5308 break;
5309 #endif
5310 #ifdef TARGET_NR_creat /* not on alpha */
5311 case TARGET_NR_creat:
5312 if (!(p = lock_user_string(arg1)))
5313 goto efault;
5314 ret = get_errno(creat(p, arg2));
5315 unlock_user(p, arg1, 0);
5316 break;
5317 #endif
5318 case TARGET_NR_link:
5319 {
5320 void * p2;
5321 p = lock_user_string(arg1);
5322 p2 = lock_user_string(arg2);
5323 if (!p || !p2)
5324 ret = -TARGET_EFAULT;
5325 else
5326 ret = get_errno(link(p, p2));
5327 unlock_user(p2, arg2, 0);
5328 unlock_user(p, arg1, 0);
5329 }
5330 break;
5331 #if defined(TARGET_NR_linkat) && defined(__NR_linkat)
5332 case TARGET_NR_linkat:
5333 {
5334 void * p2 = NULL;
5335 if (!arg2 || !arg4)
5336 goto efault;
5337 p = lock_user_string(arg2);
5338 p2 = lock_user_string(arg4);
5339 if (!p || !p2)
5340 ret = -TARGET_EFAULT;
5341 else
5342 ret = get_errno(sys_linkat(arg1, p, arg3, p2, arg5));
5343 unlock_user(p, arg2, 0);
5344 unlock_user(p2, arg4, 0);
5345 }
5346 break;
5347 #endif
5348 case TARGET_NR_unlink:
5349 if (!(p = lock_user_string(arg1)))
5350 goto efault;
5351 ret = get_errno(unlink(p));
5352 unlock_user(p, arg1, 0);
5353 break;
5354 #if defined(TARGET_NR_unlinkat) && defined(__NR_unlinkat)
5355 case TARGET_NR_unlinkat:
5356 if (!(p = lock_user_string(arg2)))
5357 goto efault;
5358 ret = get_errno(sys_unlinkat(arg1, p, arg3));
5359 unlock_user(p, arg2, 0);
5360 break;
5361 #endif
5362 case TARGET_NR_execve:
5363 {
5364 char **argp, **envp;
5365 int argc, envc;
5366 abi_ulong gp;
5367 abi_ulong guest_argp;
5368 abi_ulong guest_envp;
5369 abi_ulong addr;
5370 char **q;
5371 int total_size = 0;
5372
5373 argc = 0;
5374 guest_argp = arg2;
5375 for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
5376 if (get_user_ual(addr, gp))
5377 goto efault;
5378 if (!addr)
5379 break;
5380 argc++;
5381 }
5382 envc = 0;
5383 guest_envp = arg3;
5384 for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
5385 if (get_user_ual(addr, gp))
5386 goto efault;
5387 if (!addr)
5388 break;
5389 envc++;
5390 }
5391
5392 argp = alloca((argc + 1) * sizeof(void *));
5393 envp = alloca((envc + 1) * sizeof(void *));
5394
5395 for (gp = guest_argp, q = argp; gp;
5396 gp += sizeof(abi_ulong), q++) {
5397 if (get_user_ual(addr, gp))
5398 goto execve_efault;
5399 if (!addr)
5400 break;
5401 if (!(*q = lock_user_string(addr)))
5402 goto execve_efault;
5403 total_size += strlen(*q) + 1;
5404 }
5405 *q = NULL;
5406
5407 for (gp = guest_envp, q = envp; gp;
5408 gp += sizeof(abi_ulong), q++) {
5409 if (get_user_ual(addr, gp))
5410 goto execve_efault;
5411 if (!addr)
5412 break;
5413 if (!(*q = lock_user_string(addr)))
5414 goto execve_efault;
5415 total_size += strlen(*q) + 1;
5416 }
5417 *q = NULL;
5418
5419 /* This case will not be caught by the host's execve() if its
5420 page size is bigger than the target's. */
5421 if (total_size > MAX_ARG_PAGES * TARGET_PAGE_SIZE) {
5422 ret = -TARGET_E2BIG;
5423 goto execve_end;
5424 }
5425 if (!(p = lock_user_string(arg1)))
5426 goto execve_efault;
5427 ret = get_errno(execve(p, argp, envp));
5428 unlock_user(p, arg1, 0);
5429
5430 goto execve_end;
5431
5432 execve_efault:
5433 ret = -TARGET_EFAULT;
5434
5435 execve_end:
5436 for (gp = guest_argp, q = argp; *q;
5437 gp += sizeof(abi_ulong), q++) {
5438 if (get_user_ual(addr, gp)
5439 || !addr)
5440 break;
5441 unlock_user(*q, addr, 0);
5442 }
5443 for (gp = guest_envp, q = envp; *q;
5444 gp += sizeof(abi_ulong), q++) {
5445 if (get_user_ual(addr, gp)
5446 || !addr)
5447 break;
5448 unlock_user(*q, addr, 0);
5449 }
5450 }
5451 break;
5452 case TARGET_NR_chdir:
5453 if (!(p = lock_user_string(arg1)))
5454 goto efault;
5455 ret = get_errno(chdir(p));
5456 unlock_user(p, arg1, 0);
5457 break;
5458 #ifdef TARGET_NR_time
5459 case TARGET_NR_time:
5460 {
5461 time_t host_time;
5462 ret = get_errno(time(&host_time));
5463 if (!is_error(ret)
5464 && arg1
5465 && put_user_sal(host_time, arg1))
5466 goto efault;
5467 }
5468 break;
5469 #endif
5470 case TARGET_NR_mknod:
5471 if (!(p = lock_user_string(arg1)))
5472 goto efault;
5473 ret = get_errno(mknod(p, arg2, arg3));
5474 unlock_user(p, arg1, 0);
5475 break;
5476 #if defined(TARGET_NR_mknodat) && defined(__NR_mknodat)
5477 case TARGET_NR_mknodat:
5478 if (!(p = lock_user_string(arg2)))
5479 goto efault;
5480 ret = get_errno(sys_mknodat(arg1, p, arg3, arg4));
5481 unlock_user(p, arg2, 0);
5482 break;
5483 #endif
5484 case TARGET_NR_chmod:
5485 if (!(p = lock_user_string(arg1)))
5486 goto efault;
5487 ret = get_errno(chmod(p, arg2));
5488 unlock_user(p, arg1, 0);
5489 break;
5490 #ifdef TARGET_NR_break
5491 case TARGET_NR_break:
5492 goto unimplemented;
5493 #endif
5494 #ifdef TARGET_NR_oldstat
5495 case TARGET_NR_oldstat:
5496 goto unimplemented;
5497 #endif
5498 case TARGET_NR_lseek:
5499 ret = get_errno(lseek(arg1, arg2, arg3));
5500 break;
5501 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
5502 /* Alpha specific */
5503 case TARGET_NR_getxpid:
5504 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
5505 ret = get_errno(getpid());
5506 break;
5507 #endif
5508 #ifdef TARGET_NR_getpid
5509 case TARGET_NR_getpid:
5510 ret = get_errno(getpid());
5511 break;
5512 #endif
5513 case TARGET_NR_mount:
5514 {
5515 /* need to look at the data field */
5516 void *p2, *p3;
5517 p = lock_user_string(arg1);
5518 p2 = lock_user_string(arg2);
5519 p3 = lock_user_string(arg3);
5520 if (!p || !p2 || !p3)
5521 ret = -TARGET_EFAULT;
5522 else {
5523 /* FIXME - arg5 should be locked, but it isn't clear how to
5524 * do that since it's not guaranteed to be a NULL-terminated
5525 * string.
5526 */
5527 if ( ! arg5 )
5528 ret = get_errno(mount(p, p2, p3, (unsigned long)arg4, NULL));
5529 else
5530 ret = get_errno(mount(p, p2, p3, (unsigned long)arg4, g2h(arg5)));
5531 }
5532 unlock_user(p, arg1, 0);
5533 unlock_user(p2, arg2, 0);
5534 unlock_user(p3, arg3, 0);
5535 break;
5536 }
5537 #ifdef TARGET_NR_umount
5538 case TARGET_NR_umount:
5539 if (!(p = lock_user_string(arg1)))
5540 goto efault;
5541 ret = get_errno(umount(p));
5542 unlock_user(p, arg1, 0);
5543 break;
5544 #endif
5545 #ifdef TARGET_NR_stime /* not on alpha */
5546 case TARGET_NR_stime:
5547 {
5548 time_t host_time;
5549 if (get_user_sal(host_time, arg1))
5550 goto efault;
5551 ret = get_errno(stime(&host_time));
5552 }
5553 break;
5554 #endif
5555 case TARGET_NR_ptrace:
5556 goto unimplemented;
5557 #ifdef TARGET_NR_alarm /* not on alpha */
5558 case TARGET_NR_alarm:
5559 ret = alarm(arg1);
5560 break;
5561 #endif
5562 #ifdef TARGET_NR_oldfstat
5563 case TARGET_NR_oldfstat:
5564 goto unimplemented;
5565 #endif
5566 #ifdef TARGET_NR_pause /* not on alpha */
5567 case TARGET_NR_pause:
5568 ret = get_errno(pause());
5569 break;
5570 #endif
5571 #ifdef TARGET_NR_utime
5572 case TARGET_NR_utime:
5573 {
5574 struct utimbuf tbuf, *host_tbuf;
5575 struct target_utimbuf *target_tbuf;
5576 if (arg2) {
5577 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
5578 goto efault;
5579 tbuf.actime = tswapal(target_tbuf->actime);
5580 tbuf.modtime = tswapal(target_tbuf->modtime);
5581 unlock_user_struct(target_tbuf, arg2, 0);
5582 host_tbuf = &tbuf;
5583 } else {
5584 host_tbuf = NULL;
5585 }
5586 if (!(p = lock_user_string(arg1)))
5587 goto efault;
5588 ret = get_errno(utime(p, host_tbuf));
5589 unlock_user(p, arg1, 0);
5590 }
5591 break;
5592 #endif
5593 case TARGET_NR_utimes:
5594 {
5595 struct timeval *tvp, tv[2];
5596 if (arg2) {
5597 if (copy_from_user_timeval(&tv[0], arg2)
5598 || copy_from_user_timeval(&tv[1],
5599 arg2 + sizeof(struct target_timeval)))
5600 goto efault;
5601 tvp = tv;
5602 } else {
5603 tvp = NULL;
5604 }
5605 if (!(p = lock_user_string(arg1)))
5606 goto efault;
5607 ret = get_errno(utimes(p, tvp));
5608 unlock_user(p, arg1, 0);
5609 }
5610 break;
5611 #if defined(TARGET_NR_futimesat) && defined(__NR_futimesat)
5612 case TARGET_NR_futimesat:
5613 {
5614 struct timeval *tvp, tv[2];
5615 if (arg3) {
5616 if (copy_from_user_timeval(&tv[0], arg3)
5617 || copy_from_user_timeval(&tv[1],
5618 arg3 + sizeof(struct target_timeval)))
5619 goto efault;
5620 tvp = tv;
5621 } else {
5622 tvp = NULL;
5623 }
5624 if (!(p = lock_user_string(arg2)))
5625 goto efault;
5626 ret = get_errno(sys_futimesat(arg1, path(p), tvp));
5627 unlock_user(p, arg2, 0);
5628 }
5629 break;
5630 #endif
5631 #ifdef TARGET_NR_stty
5632 case TARGET_NR_stty:
5633 goto unimplemented;
5634 #endif
5635 #ifdef TARGET_NR_gtty
5636 case TARGET_NR_gtty:
5637 goto unimplemented;
5638 #endif
5639 case TARGET_NR_access:
5640 if (!(p = lock_user_string(arg1)))
5641 goto efault;
5642 ret = get_errno(access(path(p), arg2));
5643 unlock_user(p, arg1, 0);
5644 break;
5645 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
5646 case TARGET_NR_faccessat:
5647 if (!(p = lock_user_string(arg2)))
5648 goto efault;
5649 ret = get_errno(sys_faccessat(arg1, p, arg3));
5650 unlock_user(p, arg2, 0);
5651 break;
5652 #endif
5653 #ifdef TARGET_NR_nice /* not on alpha */
5654 case TARGET_NR_nice:
5655 ret = get_errno(nice(arg1));
5656 break;
5657 #endif
5658 #ifdef TARGET_NR_ftime
5659 case TARGET_NR_ftime:
5660 goto unimplemented;
5661 #endif
5662 case TARGET_NR_sync:
5663 sync();
5664 ret = 0;
5665 break;
5666 case TARGET_NR_kill:
5667 ret = get_errno(kill(arg1, target_to_host_signal(arg2)));
5668 break;
5669 case TARGET_NR_rename:
5670 {
5671 void *p2;
5672 p = lock_user_string(arg1);
5673 p2 = lock_user_string(arg2);
5674 if (!p || !p2)
5675 ret = -TARGET_EFAULT;
5676 else
5677 ret = get_errno(rename(p, p2));
5678 unlock_user(p2, arg2, 0);
5679 unlock_user(p, arg1, 0);
5680 }
5681 break;
5682 #if defined(TARGET_NR_renameat) && defined(__NR_renameat)
5683 case TARGET_NR_renameat:
5684 {
5685 void *p2;
5686 p = lock_user_string(arg2);
5687 p2 = lock_user_string(arg4);
5688 if (!p || !p2)
5689 ret = -TARGET_EFAULT;
5690 else
5691 ret = get_errno(sys_renameat(arg1, p, arg3, p2));
5692 unlock_user(p2, arg4, 0);
5693 unlock_user(p, arg2, 0);
5694 }
5695 break;
5696 #endif
5697 case TARGET_NR_mkdir:
5698 if (!(p = lock_user_string(arg1)))
5699 goto efault;
5700 ret = get_errno(mkdir(p, arg2));
5701 unlock_user(p, arg1, 0);
5702 break;
5703 #if defined(TARGET_NR_mkdirat) && defined(__NR_mkdirat)
5704 case TARGET_NR_mkdirat:
5705 if (!(p = lock_user_string(arg2)))
5706 goto efault;
5707 ret = get_errno(sys_mkdirat(arg1, p, arg3));
5708 unlock_user(p, arg2, 0);
5709 break;
5710 #endif
5711 case TARGET_NR_rmdir:
5712 if (!(p = lock_user_string(arg1)))
5713 goto efault;
5714 ret = get_errno(rmdir(p));
5715 unlock_user(p, arg1, 0);
5716 break;
5717 case TARGET_NR_dup:
5718 ret = get_errno(dup(arg1));
5719 break;
5720 case TARGET_NR_pipe:
5721 ret = do_pipe(cpu_env, arg1, 0, 0);
5722 break;
5723 #ifdef TARGET_NR_pipe2
5724 case TARGET_NR_pipe2:
5725 ret = do_pipe(cpu_env, arg1,
5726 target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
5727 break;
5728 #endif
5729 case TARGET_NR_times:
5730 {
5731 struct target_tms *tmsp;
5732 struct tms tms;
5733 ret = get_errno(times(&tms));
5734 if (arg1) {
5735 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
5736 if (!tmsp)
5737 goto efault;
5738 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
5739 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
5740 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
5741 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
5742 }
5743 if (!is_error(ret))
5744 ret = host_to_target_clock_t(ret);
5745 }
5746 break;
5747 #ifdef TARGET_NR_prof
5748 case TARGET_NR_prof:
5749 goto unimplemented;
5750 #endif
5751 #ifdef TARGET_NR_signal
5752 case TARGET_NR_signal:
5753 goto unimplemented;
5754 #endif
5755 case TARGET_NR_acct:
5756 if (arg1 == 0) {
5757 ret = get_errno(acct(NULL));
5758 } else {
5759 if (!(p = lock_user_string(arg1)))
5760 goto efault;
5761 ret = get_errno(acct(path(p)));
5762 unlock_user(p, arg1, 0);
5763 }
5764 break;
5765 #ifdef TARGET_NR_umount2 /* not on alpha */
5766 case TARGET_NR_umount2:
5767 if (!(p = lock_user_string(arg1)))
5768 goto efault;
5769 ret = get_errno(umount2(p, arg2));
5770 unlock_user(p, arg1, 0);
5771 break;
5772 #endif
5773 #ifdef TARGET_NR_lock
5774 case TARGET_NR_lock:
5775 goto unimplemented;
5776 #endif
5777 case TARGET_NR_ioctl:
5778 ret = do_ioctl(arg1, arg2, arg3);
5779 break;
5780 case TARGET_NR_fcntl:
5781 ret = do_fcntl(arg1, arg2, arg3);
5782 break;
5783 #ifdef TARGET_NR_mpx
5784 case TARGET_NR_mpx:
5785 goto unimplemented;
5786 #endif
5787 case TARGET_NR_setpgid:
5788 ret = get_errno(setpgid(arg1, arg2));
5789 break;
5790 #ifdef TARGET_NR_ulimit
5791 case TARGET_NR_ulimit:
5792 goto unimplemented;
5793 #endif
5794 #ifdef TARGET_NR_oldolduname
5795 case TARGET_NR_oldolduname:
5796 goto unimplemented;
5797 #endif
5798 case TARGET_NR_umask:
5799 ret = get_errno(umask(arg1));
5800 break;
5801 case TARGET_NR_chroot:
5802 if (!(p = lock_user_string(arg1)))
5803 goto efault;
5804 ret = get_errno(chroot(p));
5805 unlock_user(p, arg1, 0);
5806 break;
5807 case TARGET_NR_ustat:
5808 goto unimplemented;
5809 case TARGET_NR_dup2:
5810 ret = get_errno(dup2(arg1, arg2));
5811 break;
5812 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
5813 case TARGET_NR_dup3:
5814 ret = get_errno(dup3(arg1, arg2, arg3));
5815 break;
5816 #endif
5817 #ifdef TARGET_NR_getppid /* not on alpha */
5818 case TARGET_NR_getppid:
5819 ret = get_errno(getppid());
5820 break;
5821 #endif
5822 case TARGET_NR_getpgrp:
5823 ret = get_errno(getpgrp());
5824 break;
5825 case TARGET_NR_setsid:
5826 ret = get_errno(setsid());
5827 break;
5828 #ifdef TARGET_NR_sigaction
5829 case TARGET_NR_sigaction:
5830 {
5831 #if defined(TARGET_ALPHA)
5832 struct target_sigaction act, oact, *pact = 0;
5833 struct target_old_sigaction *old_act;
5834 if (arg2) {
5835 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
5836 goto efault;
5837 act._sa_handler = old_act->_sa_handler;
5838 target_siginitset(&act.sa_mask, old_act->sa_mask);
5839 act.sa_flags = old_act->sa_flags;
5840 act.sa_restorer = 0;
5841 unlock_user_struct(old_act, arg2, 0);
5842 pact = &act;
5843 }
5844 ret = get_errno(do_sigaction(arg1, pact, &oact));
5845 if (!is_error(ret) && arg3) {
5846 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
5847 goto efault;
5848 old_act->_sa_handler = oact._sa_handler;
5849 old_act->sa_mask = oact.sa_mask.sig[0];
5850 old_act->sa_flags = oact.sa_flags;
5851 unlock_user_struct(old_act, arg3, 1);
5852 }
5853 #elif defined(TARGET_MIPS)
5854 struct target_sigaction act, oact, *pact, *old_act;
5855
5856 if (arg2) {
5857 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
5858 goto efault;
5859 act._sa_handler = old_act->_sa_handler;
5860 target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
5861 act.sa_flags = old_act->sa_flags;
5862 unlock_user_struct(old_act, arg2, 0);
5863 pact = &act;
5864 } else {
5865 pact = NULL;
5866 }
5867
5868 ret = get_errno(do_sigaction(arg1, pact, &oact));
5869
5870 if (!is_error(ret) && arg3) {
5871 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
5872 goto efault;
5873 old_act->_sa_handler = oact._sa_handler;
5874 old_act->sa_flags = oact.sa_flags;
5875 old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
5876 old_act->sa_mask.sig[1] = 0;
5877 old_act->sa_mask.sig[2] = 0;
5878 old_act->sa_mask.sig[3] = 0;
5879 unlock_user_struct(old_act, arg3, 1);
5880 }
5881 #else
5882 struct target_old_sigaction *old_act;
5883 struct target_sigaction act, oact, *pact;
5884 if (arg2) {
5885 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
5886 goto efault;
5887 act._sa_handler = old_act->_sa_handler;
5888 target_siginitset(&act.sa_mask, old_act->sa_mask);
5889 act.sa_flags = old_act->sa_flags;
5890 act.sa_restorer = old_act->sa_restorer;
5891 unlock_user_struct(old_act, arg2, 0);
5892 pact = &act;
5893 } else {
5894 pact = NULL;
5895 }
5896 ret = get_errno(do_sigaction(arg1, pact, &oact));
5897 if (!is_error(ret) && arg3) {
5898 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
5899 goto efault;
5900 old_act->_sa_handler = oact._sa_handler;
5901 old_act->sa_mask = oact.sa_mask.sig[0];
5902 old_act->sa_flags = oact.sa_flags;
5903 old_act->sa_restorer = oact.sa_restorer;
5904 unlock_user_struct(old_act, arg3, 1);
5905 }
5906 #endif
5907 }
5908 break;
5909 #endif
5910 case TARGET_NR_rt_sigaction:
5911 {
5912 #if defined(TARGET_ALPHA)
5913 struct target_sigaction act, oact, *pact = 0;
5914 struct target_rt_sigaction *rt_act;
5915 /* ??? arg4 == sizeof(sigset_t). */
5916 if (arg2) {
5917 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
5918 goto efault;
5919 act._sa_handler = rt_act->_sa_handler;
5920 act.sa_mask = rt_act->sa_mask;
5921 act.sa_flags = rt_act->sa_flags;
5922 act.sa_restorer = arg5;
5923 unlock_user_struct(rt_act, arg2, 0);
5924 pact = &act;
5925 }
5926 ret = get_errno(do_sigaction(arg1, pact, &oact));
5927 if (!is_error(ret) && arg3) {
5928 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
5929 goto efault;
5930 rt_act->_sa_handler = oact._sa_handler;
5931 rt_act->sa_mask = oact.sa_mask;
5932 rt_act->sa_flags = oact.sa_flags;
5933 unlock_user_struct(rt_act, arg3, 1);
5934 }
5935 #else
5936 struct target_sigaction *act;
5937 struct target_sigaction *oact;
5938
5939 if (arg2) {
5940 if (!lock_user_struct(VERIFY_READ, act, arg2, 1))
5941 goto efault;
5942 } else
5943 act = NULL;
5944 if (arg3) {
5945 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
5946 ret = -TARGET_EFAULT;
5947 goto rt_sigaction_fail;
5948 }
5949 } else
5950 oact = NULL;
5951 ret = get_errno(do_sigaction(arg1, act, oact));
5952 rt_sigaction_fail:
5953 if (act)
5954 unlock_user_struct(act, arg2, 0);
5955 if (oact)
5956 unlock_user_struct(oact, arg3, 1);
5957 #endif
5958 }
5959 break;
5960 #ifdef TARGET_NR_sgetmask /* not on alpha */
5961 case TARGET_NR_sgetmask:
5962 {
5963 sigset_t cur_set;
5964 abi_ulong target_set;
5965 sigprocmask(0, NULL, &cur_set);
5966 host_to_target_old_sigset(&target_set, &cur_set);
5967 ret = target_set;
5968 }
5969 break;
5970 #endif
5971 #ifdef TARGET_NR_ssetmask /* not on alpha */
5972 case TARGET_NR_ssetmask:
5973 {
5974 sigset_t set, oset, cur_set;
5975 abi_ulong target_set = arg1;
5976 sigprocmask(0, NULL, &cur_set);
5977 target_to_host_old_sigset(&set, &target_set);
5978 sigorset(&set, &set, &cur_set);
5979 sigprocmask(SIG_SETMASK, &set, &oset);
5980 host_to_target_old_sigset(&target_set, &oset);
5981 ret = target_set;
5982 }
5983 break;
5984 #endif
5985 #ifdef TARGET_NR_sigprocmask
5986 case TARGET_NR_sigprocmask:
5987 {
5988 #if defined(TARGET_ALPHA)
5989 sigset_t set, oldset;
5990 abi_ulong mask;
5991 int how;
5992
5993 switch (arg1) {
5994 case TARGET_SIG_BLOCK:
5995 how = SIG_BLOCK;
5996 break;
5997 case TARGET_SIG_UNBLOCK:
5998 how = SIG_UNBLOCK;
5999 break;
6000 case TARGET_SIG_SETMASK:
6001 how = SIG_SETMASK;
6002 break;
6003 default:
6004 ret = -TARGET_EINVAL;
6005 goto fail;
6006 }
6007 mask = arg2;
6008 target_to_host_old_sigset(&set, &mask);
6009
6010 ret = get_errno(sigprocmask(how, &set, &oldset));
6011 if (!is_error(ret)) {
6012 host_to_target_old_sigset(&mask, &oldset);
6013 ret = mask;
6014 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
6015 }
6016 #else
6017 sigset_t set, oldset, *set_ptr;
6018 int how;
6019
6020 if (arg2) {
6021 switch (arg1) {
6022 case TARGET_SIG_BLOCK:
6023 how = SIG_BLOCK;
6024 break;
6025 case TARGET_SIG_UNBLOCK:
6026 how = SIG_UNBLOCK;
6027 break;
6028 case TARGET_SIG_SETMASK:
6029 how = SIG_SETMASK;
6030 break;
6031 default:
6032 ret = -TARGET_EINVAL;
6033 goto fail;
6034 }
6035 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
6036 goto efault;
6037 target_to_host_old_sigset(&set, p);
6038 unlock_user(p, arg2, 0);
6039 set_ptr = &set;
6040 } else {
6041 how = 0;
6042 set_ptr = NULL;
6043 }
6044 ret = get_errno(sigprocmask(how, set_ptr, &oldset));
6045 if (!is_error(ret) && arg3) {
6046 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
6047 goto efault;
6048 host_to_target_old_sigset(p, &oldset);
6049 unlock_user(p, arg3, sizeof(target_sigset_t));
6050 }
6051 #endif
6052 }
6053 break;
6054 #endif
6055 case TARGET_NR_rt_sigprocmask:
6056 {
6057 int how = arg1;
6058 sigset_t set, oldset, *set_ptr;
6059
6060 if (arg2) {
6061 switch(how) {
6062 case TARGET_SIG_BLOCK:
6063 how = SIG_BLOCK;
6064 break;
6065 case TARGET_SIG_UNBLOCK:
6066 how = SIG_UNBLOCK;
6067 break;
6068 case TARGET_SIG_SETMASK:
6069 how = SIG_SETMASK;
6070 break;
6071 default:
6072 ret = -TARGET_EINVAL;
6073 goto fail;
6074 }
6075 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
6076 goto efault;
6077 target_to_host_sigset(&set, p);
6078 unlock_user(p, arg2, 0);
6079 set_ptr = &set;
6080 } else {
6081 how = 0;
6082 set_ptr = NULL;
6083 }
6084 ret = get_errno(sigprocmask(how, set_ptr, &oldset));
6085 if (!is_error(ret) && arg3) {
6086 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
6087 goto efault;
6088 host_to_target_sigset(p, &oldset);
6089 unlock_user(p, arg3, sizeof(target_sigset_t));
6090 }
6091 }
6092 break;
6093 #ifdef TARGET_NR_sigpending
6094 case TARGET_NR_sigpending:
6095 {
6096 sigset_t set;
6097 ret = get_errno(sigpending(&set));
6098 if (!is_error(ret)) {
6099 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
6100 goto efault;
6101 host_to_target_old_sigset(p, &set);
6102 unlock_user(p, arg1, sizeof(target_sigset_t));
6103 }
6104 }
6105 break;
6106 #endif
6107 case TARGET_NR_rt_sigpending:
6108 {
6109 sigset_t set;
6110 ret = get_errno(sigpending(&set));
6111 if (!is_error(ret)) {
6112 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
6113 goto efault;
6114 host_to_target_sigset(p, &set);
6115 unlock_user(p, arg1, sizeof(target_sigset_t));
6116 }
6117 }
6118 break;
6119 #ifdef TARGET_NR_sigsuspend
6120 case TARGET_NR_sigsuspend:
6121 {
6122 sigset_t set;
6123 #if defined(TARGET_ALPHA)
6124 abi_ulong mask = arg1;
6125 target_to_host_old_sigset(&set, &mask);
6126 #else
6127 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
6128 goto efault;
6129 target_to_host_old_sigset(&set, p);
6130 unlock_user(p, arg1, 0);
6131 #endif
6132 ret = get_errno(sigsuspend(&set));
6133 }
6134 break;
6135 #endif
6136 case TARGET_NR_rt_sigsuspend:
6137 {
6138 sigset_t set;
6139 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
6140 goto efault;
6141 target_to_host_sigset(&set, p);
6142 unlock_user(p, arg1, 0);
6143 ret = get_errno(sigsuspend(&set));
6144 }
6145 break;
6146 case TARGET_NR_rt_sigtimedwait:
6147 {
6148 sigset_t set;
6149 struct timespec uts, *puts;
6150 siginfo_t uinfo;
6151
6152 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
6153 goto efault;
6154 target_to_host_sigset(&set, p);
6155 unlock_user(p, arg1, 0);
6156 if (arg3) {
6157 puts = &uts;
6158 target_to_host_timespec(puts, arg3);
6159 } else {
6160 puts = NULL;
6161 }
6162 ret = get_errno(sigtimedwait(&set, &uinfo, puts));
6163 if (!is_error(ret) && arg2) {
6164 if (!(p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t), 0)))
6165 goto efault;
6166 host_to_target_siginfo(p, &uinfo);
6167 unlock_user(p, arg2, sizeof(target_siginfo_t));
6168 }
6169 }
6170 break;
6171 case TARGET_NR_rt_sigqueueinfo:
6172 {
6173 siginfo_t uinfo;
6174 if (!(p = lock_user(VERIFY_READ, arg3, sizeof(target_sigset_t), 1)))
6175 goto efault;
6176 target_to_host_siginfo(&uinfo, p);
6177 unlock_user(p, arg1, 0);
6178 ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
6179 }
6180 break;
6181 #ifdef TARGET_NR_sigreturn
6182 case TARGET_NR_sigreturn:
6183 /* NOTE: ret is eax, so not transcoding must be done */
6184 ret = do_sigreturn(cpu_env);
6185 break;
6186 #endif
6187 case TARGET_NR_rt_sigreturn:
6188 /* NOTE: ret is eax, so not transcoding must be done */
6189 ret = do_rt_sigreturn(cpu_env);
6190 break;
6191 case TARGET_NR_sethostname:
6192 if (!(p = lock_user_string(arg1)))
6193 goto efault;
6194 ret = get_errno(sethostname(p, arg2));
6195 unlock_user(p, arg1, 0);
6196 break;
6197 case TARGET_NR_setrlimit:
6198 {
6199 int resource = target_to_host_resource(arg1);
6200 struct target_rlimit *target_rlim;
6201 struct rlimit rlim;
6202 if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
6203 goto efault;
6204 rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
6205 rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
6206 unlock_user_struct(target_rlim, arg2, 0);
6207 ret = get_errno(setrlimit(resource, &rlim));
6208 }
6209 break;
6210 case TARGET_NR_getrlimit:
6211 {
6212 int resource = target_to_host_resource(arg1);
6213 struct target_rlimit *target_rlim;
6214 struct rlimit rlim;
6215
6216 ret = get_errno(getrlimit(resource, &rlim));
6217 if (!is_error(ret)) {
6218 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
6219 goto efault;
6220 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
6221 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
6222 unlock_user_struct(target_rlim, arg2, 1);
6223 }
6224 }
6225 break;
6226 case TARGET_NR_getrusage:
6227 {
6228 struct rusage rusage;
6229 ret = get_errno(getrusage(arg1, &rusage));
6230 if (!is_error(ret)) {
6231 host_to_target_rusage(arg2, &rusage);
6232 }
6233 }
6234 break;
6235 case TARGET_NR_gettimeofday:
6236 {
6237 struct timeval tv;
6238 ret = get_errno(gettimeofday(&tv, NULL));
6239 if (!is_error(ret)) {
6240 if (copy_to_user_timeval(arg1, &tv))
6241 goto efault;
6242 }
6243 }
6244 break;
6245 case TARGET_NR_settimeofday:
6246 {
6247 struct timeval tv;
6248 if (copy_from_user_timeval(&tv, arg1))
6249 goto efault;
6250 ret = get_errno(settimeofday(&tv, NULL));
6251 }
6252 break;
6253 #if defined(TARGET_NR_select)
6254 case TARGET_NR_select:
6255 #if defined(TARGET_S390X) || defined(TARGET_ALPHA)
6256 ret = do_select(arg1, arg2, arg3, arg4, arg5);
6257 #else
6258 {
6259 struct target_sel_arg_struct *sel;
6260 abi_ulong inp, outp, exp, tvp;
6261 long nsel;
6262
6263 if (!lock_user_struct(VERIFY_READ, sel, arg1, 1))
6264 goto efault;
6265 nsel = tswapal(sel->n);
6266 inp = tswapal(sel->inp);
6267 outp = tswapal(sel->outp);
6268 exp = tswapal(sel->exp);
6269 tvp = tswapal(sel->tvp);
6270 unlock_user_struct(sel, arg1, 0);
6271 ret = do_select(nsel, inp, outp, exp, tvp);
6272 }
6273 #endif
6274 break;
6275 #endif
6276 #ifdef TARGET_NR_pselect6
6277 case TARGET_NR_pselect6:
6278 {
6279 abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
6280 fd_set rfds, wfds, efds;
6281 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
6282 struct timespec ts, *ts_ptr;
6283
6284 /*
6285 * The 6th arg is actually two args smashed together,
6286 * so we cannot use the C library.
6287 */
6288 sigset_t set;
6289 struct {
6290 sigset_t *set;
6291 size_t size;
6292 } sig, *sig_ptr;
6293
6294 abi_ulong arg_sigset, arg_sigsize, *arg7;
6295 target_sigset_t *target_sigset;
6296
6297 n = arg1;
6298 rfd_addr = arg2;
6299 wfd_addr = arg3;
6300 efd_addr = arg4;
6301 ts_addr = arg5;
6302
6303 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
6304 if (ret) {
6305 goto fail;
6306 }
6307 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
6308 if (ret) {
6309 goto fail;
6310 }
6311 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
6312 if (ret) {
6313 goto fail;
6314 }
6315
6316 /*
6317 * This takes a timespec, and not a timeval, so we cannot
6318 * use the do_select() helper ...
6319 */
6320 if (ts_addr) {
6321 if (target_to_host_timespec(&ts, ts_addr)) {
6322 goto efault;
6323 }
6324 ts_ptr = &ts;
6325 } else {
6326 ts_ptr = NULL;
6327 }
6328
6329 /* Extract the two packed args for the sigset */
6330 if (arg6) {
6331 sig_ptr = &sig;
6332 sig.size = _NSIG / 8;
6333
6334 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
6335 if (!arg7) {
6336 goto efault;
6337 }
6338 arg_sigset = tswapal(arg7[0]);
6339 arg_sigsize = tswapal(arg7[1]);
6340 unlock_user(arg7, arg6, 0);
6341
6342 if (arg_sigset) {
6343 sig.set = &set;
6344 if (arg_sigsize != sizeof(*target_sigset)) {
6345 /* Like the kernel, we enforce correct size sigsets */
6346 ret = -TARGET_EINVAL;
6347 goto fail;
6348 }
6349 target_sigset = lock_user(VERIFY_READ, arg_sigset,
6350 sizeof(*target_sigset), 1);
6351 if (!target_sigset) {
6352 goto efault;
6353 }
6354 target_to_host_sigset(&set, target_sigset);
6355 unlock_user(target_sigset, arg_sigset, 0);
6356 } else {
6357 sig.set = NULL;
6358 }
6359 } else {
6360 sig_ptr = NULL;
6361 }
6362
6363 ret = get_errno(sys_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
6364 ts_ptr, sig_ptr));
6365
6366 if (!is_error(ret)) {
6367 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
6368 goto efault;
6369 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
6370 goto efault;
6371 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
6372 goto efault;
6373
6374 if (ts_addr && host_to_target_timespec(ts_addr, &ts))
6375 goto efault;
6376 }
6377 }
6378 break;
6379 #endif
6380 case TARGET_NR_symlink:
6381 {
6382 void *p2;
6383 p = lock_user_string(arg1);
6384 p2 = lock_user_string(arg2);
6385 if (!p || !p2)
6386 ret = -TARGET_EFAULT;
6387 else
6388 ret = get_errno(symlink(p, p2));
6389 unlock_user(p2, arg2, 0);
6390 unlock_user(p, arg1, 0);
6391 }
6392 break;
6393 #if defined(TARGET_NR_symlinkat) && defined(__NR_symlinkat)
6394 case TARGET_NR_symlinkat:
6395 {
6396 void *p2;
6397 p = lock_user_string(arg1);
6398 p2 = lock_user_string(arg3);
6399 if (!p || !p2)
6400 ret = -TARGET_EFAULT;
6401 else
6402 ret = get_errno(sys_symlinkat(p, arg2, p2));
6403 unlock_user(p2, arg3, 0);
6404 unlock_user(p, arg1, 0);
6405 }
6406 break;
6407 #endif
6408 #ifdef TARGET_NR_oldlstat
6409 case TARGET_NR_oldlstat:
6410 goto unimplemented;
6411 #endif
6412 case TARGET_NR_readlink:
6413 {
6414 void *p2, *temp;
6415 p = lock_user_string(arg1);
6416 p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
6417 if (!p || !p2)
6418 ret = -TARGET_EFAULT;
6419 else {
6420 if (strncmp((const char *)p, "/proc/self/exe", 14) == 0) {
6421 char real[PATH_MAX];
6422 temp = realpath(exec_path,real);
6423 ret = (temp==NULL) ? get_errno(-1) : strlen(real) ;
6424 snprintf((char *)p2, arg3, "%s", real);
6425 }
6426 else
6427 ret = get_errno(readlink(path(p), p2, arg3));
6428 }
6429 unlock_user(p2, arg2, ret);
6430 unlock_user(p, arg1, 0);
6431 }
6432 break;
6433 #if defined(TARGET_NR_readlinkat) && defined(__NR_readlinkat)
6434 case TARGET_NR_readlinkat:
6435 {
6436 void *p2;
6437 p = lock_user_string(arg2);
6438 p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
6439 if (!p || !p2)
6440 ret = -TARGET_EFAULT;
6441 else
6442 ret = get_errno(sys_readlinkat(arg1, path(p), p2, arg4));
6443 unlock_user(p2, arg3, ret);
6444 unlock_user(p, arg2, 0);
6445 }
6446 break;
6447 #endif
6448 #ifdef TARGET_NR_uselib
6449 case TARGET_NR_uselib:
6450 goto unimplemented;
6451 #endif
6452 #ifdef TARGET_NR_swapon
6453 case TARGET_NR_swapon:
6454 if (!(p = lock_user_string(arg1)))
6455 goto efault;
6456 ret = get_errno(swapon(p, arg2));
6457 unlock_user(p, arg1, 0);
6458 break;
6459 #endif
6460 case TARGET_NR_reboot:
6461 if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
6462 /* arg4 must be ignored in all other cases */
6463 p = lock_user_string(arg4);
6464 if (!p) {
6465 goto efault;
6466 }
6467 ret = get_errno(reboot(arg1, arg2, arg3, p));
6468 unlock_user(p, arg4, 0);
6469 } else {
6470 ret = get_errno(reboot(arg1, arg2, arg3, NULL));
6471 }
6472 break;
6473 #ifdef TARGET_NR_readdir
6474 case TARGET_NR_readdir:
6475 goto unimplemented;
6476 #endif
6477 #ifdef TARGET_NR_mmap
6478 case TARGET_NR_mmap:
6479 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || defined(TARGET_ARM) || \
6480 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
6481 || defined(TARGET_S390X)
6482 {
6483 abi_ulong *v;
6484 abi_ulong v1, v2, v3, v4, v5, v6;
6485 if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
6486 goto efault;
6487 v1 = tswapal(v[0]);
6488 v2 = tswapal(v[1]);
6489 v3 = tswapal(v[2]);
6490 v4 = tswapal(v[3]);
6491 v5 = tswapal(v[4]);
6492 v6 = tswapal(v[5]);
6493 unlock_user(v, arg1, 0);
6494 ret = get_errno(target_mmap(v1, v2, v3,
6495 target_to_host_bitmask(v4, mmap_flags_tbl),
6496 v5, v6));
6497 }
6498 #else
6499 ret = get_errno(target_mmap(arg1, arg2, arg3,
6500 target_to_host_bitmask(arg4, mmap_flags_tbl),
6501 arg5,
6502 arg6));
6503 #endif
6504 break;
6505 #endif
6506 #ifdef TARGET_NR_mmap2
6507 case TARGET_NR_mmap2:
6508 #ifndef MMAP_SHIFT
6509 #define MMAP_SHIFT 12
6510 #endif
6511 ret = get_errno(target_mmap(arg1, arg2, arg3,
6512 target_to_host_bitmask(arg4, mmap_flags_tbl),
6513 arg5,
6514 arg6 << MMAP_SHIFT));
6515 break;
6516 #endif
6517 case TARGET_NR_munmap:
6518 ret = get_errno(target_munmap(arg1, arg2));
6519 break;
6520 case TARGET_NR_mprotect:
6521 {
6522 TaskState *ts = ((CPUArchState *)cpu_env)->opaque;
6523 /* Special hack to detect libc making the stack executable. */
6524 if ((arg3 & PROT_GROWSDOWN)
6525 && arg1 >= ts->info->stack_limit
6526 && arg1 <= ts->info->start_stack) {
6527 arg3 &= ~PROT_GROWSDOWN;
6528 arg2 = arg2 + arg1 - ts->info->stack_limit;
6529 arg1 = ts->info->stack_limit;
6530 }
6531 }
6532 ret = get_errno(target_mprotect(arg1, arg2, arg3));
6533 break;
6534 #ifdef TARGET_NR_mremap
6535 case TARGET_NR_mremap:
6536 ret = get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
6537 break;
6538 #endif
6539 /* ??? msync/mlock/munlock are broken for softmmu. */
6540 #ifdef TARGET_NR_msync
6541 case TARGET_NR_msync:
6542 ret = get_errno(msync(g2h(arg1), arg2, arg3));
6543 break;
6544 #endif
6545 #ifdef TARGET_NR_mlock
6546 case TARGET_NR_mlock:
6547 ret = get_errno(mlock(g2h(arg1), arg2));
6548 break;
6549 #endif
6550 #ifdef TARGET_NR_munlock
6551 case TARGET_NR_munlock:
6552 ret = get_errno(munlock(g2h(arg1), arg2));
6553 break;
6554 #endif
6555 #ifdef TARGET_NR_mlockall
6556 case TARGET_NR_mlockall:
6557 ret = get_errno(mlockall(arg1));
6558 break;
6559 #endif
6560 #ifdef TARGET_NR_munlockall
6561 case TARGET_NR_munlockall:
6562 ret = get_errno(munlockall());
6563 break;
6564 #endif
6565 case TARGET_NR_truncate:
6566 if (!(p = lock_user_string(arg1)))
6567 goto efault;
6568 ret = get_errno(truncate(p, arg2));
6569 unlock_user(p, arg1, 0);
6570 break;
6571 case TARGET_NR_ftruncate:
6572 ret = get_errno(ftruncate(arg1, arg2));
6573 break;
6574 case TARGET_NR_fchmod:
6575 ret = get_errno(fchmod(arg1, arg2));
6576 break;
6577 #if defined(TARGET_NR_fchmodat) && defined(__NR_fchmodat)
6578 case TARGET_NR_fchmodat:
6579 if (!(p = lock_user_string(arg2)))
6580 goto efault;
6581 ret = get_errno(sys_fchmodat(arg1, p, arg3));
6582 unlock_user(p, arg2, 0);
6583 break;
6584 #endif
6585 case TARGET_NR_getpriority:
6586 /* Note that negative values are valid for getpriority, so we must
6587 differentiate based on errno settings. */
6588 errno = 0;
6589 ret = getpriority(arg1, arg2);
6590 if (ret == -1 && errno != 0) {
6591 ret = -host_to_target_errno(errno);
6592 break;
6593 }
6594 #ifdef TARGET_ALPHA
6595 /* Return value is the unbiased priority. Signal no error. */
6596 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
6597 #else
6598 /* Return value is a biased priority to avoid negative numbers. */
6599 ret = 20 - ret;
6600 #endif
6601 break;
6602 case TARGET_NR_setpriority:
6603 ret = get_errno(setpriority(arg1, arg2, arg3));
6604 break;
6605 #ifdef TARGET_NR_profil
6606 case TARGET_NR_profil:
6607 goto unimplemented;
6608 #endif
6609 case TARGET_NR_statfs:
6610 if (!(p = lock_user_string(arg1)))
6611 goto efault;
6612 ret = get_errno(statfs(path(p), &stfs));
6613 unlock_user(p, arg1, 0);
6614 convert_statfs:
6615 if (!is_error(ret)) {
6616 struct target_statfs *target_stfs;
6617
6618 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
6619 goto efault;
6620 __put_user(stfs.f_type, &target_stfs->f_type);
6621 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
6622 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
6623 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
6624 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
6625 __put_user(stfs.f_files, &target_stfs->f_files);
6626 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
6627 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
6628 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
6629 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
6630 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
6631 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
6632 unlock_user_struct(target_stfs, arg2, 1);
6633 }
6634 break;
6635 case TARGET_NR_fstatfs:
6636 ret = get_errno(fstatfs(arg1, &stfs));
6637 goto convert_statfs;
6638 #ifdef TARGET_NR_statfs64
6639 case TARGET_NR_statfs64:
6640 if (!(p = lock_user_string(arg1)))
6641 goto efault;
6642 ret = get_errno(statfs(path(p), &stfs));
6643 unlock_user(p, arg1, 0);
6644 convert_statfs64:
6645 if (!is_error(ret)) {
6646 struct target_statfs64 *target_stfs;
6647
6648 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
6649 goto efault;
6650 __put_user(stfs.f_type, &target_stfs->f_type);
6651 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
6652 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
6653 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
6654 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
6655 __put_user(stfs.f_files, &target_stfs->f_files);
6656 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
6657 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
6658 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
6659 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
6660 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
6661 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
6662 unlock_user_struct(target_stfs, arg3, 1);
6663 }
6664 break;
6665 case TARGET_NR_fstatfs64:
6666 ret = get_errno(fstatfs(arg1, &stfs));
6667 goto convert_statfs64;
6668 #endif
6669 #ifdef TARGET_NR_ioperm
6670 case TARGET_NR_ioperm:
6671 goto unimplemented;
6672 #endif
6673 #ifdef TARGET_NR_socketcall
6674 case TARGET_NR_socketcall:
6675 ret = do_socketcall(arg1, arg2);
6676 break;
6677 #endif
6678 #ifdef TARGET_NR_accept
6679 case TARGET_NR_accept:
6680 ret = do_accept(arg1, arg2, arg3);
6681 break;
6682 #endif
6683 #ifdef TARGET_NR_bind
6684 case TARGET_NR_bind:
6685 ret = do_bind(arg1, arg2, arg3);
6686 break;
6687 #endif
6688 #ifdef TARGET_NR_connect
6689 case TARGET_NR_connect:
6690 ret = do_connect(arg1, arg2, arg3);
6691 break;
6692 #endif
6693 #ifdef TARGET_NR_getpeername
6694 case TARGET_NR_getpeername:
6695 ret = do_getpeername(arg1, arg2, arg3);
6696 break;
6697 #endif
6698 #ifdef TARGET_NR_getsockname
6699 case TARGET_NR_getsockname:
6700 ret = do_getsockname(arg1, arg2, arg3);
6701 break;
6702 #endif
6703 #ifdef TARGET_NR_getsockopt
6704 case TARGET_NR_getsockopt:
6705 ret = do_getsockopt(arg1, arg2, arg3, arg4, arg5);
6706 break;
6707 #endif
6708 #ifdef TARGET_NR_listen
6709 case TARGET_NR_listen:
6710 ret = get_errno(listen(arg1, arg2));
6711 break;
6712 #endif
6713 #ifdef TARGET_NR_recv
6714 case TARGET_NR_recv:
6715 ret = do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
6716 break;
6717 #endif
6718 #ifdef TARGET_NR_recvfrom
6719 case TARGET_NR_recvfrom:
6720 ret = do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
6721 break;
6722 #endif
6723 #ifdef TARGET_NR_recvmsg
6724 case TARGET_NR_recvmsg:
6725 ret = do_sendrecvmsg(arg1, arg2, arg3, 0);
6726 break;
6727 #endif
6728 #ifdef TARGET_NR_send
6729 case TARGET_NR_send:
6730 ret = do_sendto(arg1, arg2, arg3, arg4, 0, 0);
6731 break;
6732 #endif
6733 #ifdef TARGET_NR_sendmsg
6734 case TARGET_NR_sendmsg:
6735 ret = do_sendrecvmsg(arg1, arg2, arg3, 1);
6736 break;
6737 #endif
6738 #ifdef TARGET_NR_sendto
6739 case TARGET_NR_sendto:
6740 ret = do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
6741 break;
6742 #endif
6743 #ifdef TARGET_NR_shutdown
6744 case TARGET_NR_shutdown:
6745 ret = get_errno(shutdown(arg1, arg2));
6746 break;
6747 #endif
6748 #ifdef TARGET_NR_socket
6749 case TARGET_NR_socket:
6750 ret = do_socket(arg1, arg2, arg3);
6751 break;
6752 #endif
6753 #ifdef TARGET_NR_socketpair
6754 case TARGET_NR_socketpair:
6755 ret = do_socketpair(arg1, arg2, arg3, arg4);
6756 break;
6757 #endif
6758 #ifdef TARGET_NR_setsockopt
6759 case TARGET_NR_setsockopt:
6760 ret = do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
6761 break;
6762 #endif
6763
6764 case TARGET_NR_syslog:
6765 if (!(p = lock_user_string(arg2)))
6766 goto efault;
6767 ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
6768 unlock_user(p, arg2, 0);
6769 break;
6770
6771 case TARGET_NR_setitimer:
6772 {
6773 struct itimerval value, ovalue, *pvalue;
6774
6775 if (arg2) {
6776 pvalue = &value;
6777 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
6778 || copy_from_user_timeval(&pvalue->it_value,
6779 arg2 + sizeof(struct target_timeval)))
6780 goto efault;
6781 } else {
6782 pvalue = NULL;
6783 }
6784 ret = get_errno(setitimer(arg1, pvalue, &ovalue));
6785 if (!is_error(ret) && arg3) {
6786 if (copy_to_user_timeval(arg3,
6787 &ovalue.it_interval)
6788 || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
6789 &ovalue.it_value))
6790 goto efault;
6791 }
6792 }
6793 break;
6794 case TARGET_NR_getitimer:
6795 {
6796 struct itimerval value;
6797
6798 ret = get_errno(getitimer(arg1, &value));
6799 if (!is_error(ret) && arg2) {
6800 if (copy_to_user_timeval(arg2,
6801 &value.it_interval)
6802 || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
6803 &value.it_value))
6804 goto efault;
6805 }
6806 }
6807 break;
6808 case TARGET_NR_stat:
6809 if (!(p = lock_user_string(arg1)))
6810 goto efault;
6811 ret = get_errno(stat(path(p), &st));
6812 unlock_user(p, arg1, 0);
6813 goto do_stat;
6814 case TARGET_NR_lstat:
6815 if (!(p = lock_user_string(arg1)))
6816 goto efault;
6817 ret = get_errno(lstat(path(p), &st));
6818 unlock_user(p, arg1, 0);
6819 goto do_stat;
6820 case TARGET_NR_fstat:
6821 {
6822 ret = get_errno(fstat(arg1, &st));
6823 do_stat:
6824 if (!is_error(ret)) {
6825 struct target_stat *target_st;
6826
6827 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
6828 goto efault;
6829 memset(target_st, 0, sizeof(*target_st));
6830 __put_user(st.st_dev, &target_st->st_dev);
6831 __put_user(st.st_ino, &target_st->st_ino);
6832 __put_user(st.st_mode, &target_st->st_mode);
6833 __put_user(st.st_uid, &target_st->st_uid);
6834 __put_user(st.st_gid, &target_st->st_gid);
6835 __put_user(st.st_nlink, &target_st->st_nlink);
6836 __put_user(st.st_rdev, &target_st->st_rdev);
6837 __put_user(st.st_size, &target_st->st_size);
6838 __put_user(st.st_blksize, &target_st->st_blksize);
6839 __put_user(st.st_blocks, &target_st->st_blocks);
6840 __put_user(st.st_atime, &target_st->target_st_atime);
6841 __put_user(st.st_mtime, &target_st->target_st_mtime);
6842 __put_user(st.st_ctime, &target_st->target_st_ctime);
6843 unlock_user_struct(target_st, arg2, 1);
6844 }
6845 }
6846 break;
6847 #ifdef TARGET_NR_olduname
6848 case TARGET_NR_olduname:
6849 goto unimplemented;
6850 #endif
6851 #ifdef TARGET_NR_iopl
6852 case TARGET_NR_iopl:
6853 goto unimplemented;
6854 #endif
6855 case TARGET_NR_vhangup:
6856 ret = get_errno(vhangup());
6857 break;
6858 #ifdef TARGET_NR_idle
6859 case TARGET_NR_idle:
6860 goto unimplemented;
6861 #endif
6862 #ifdef TARGET_NR_syscall
6863 case TARGET_NR_syscall:
6864 ret = do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
6865 arg6, arg7, arg8, 0);
6866 break;
6867 #endif
6868 case TARGET_NR_wait4:
6869 {
6870 int status;
6871 abi_long status_ptr = arg2;
6872 struct rusage rusage, *rusage_ptr;
6873 abi_ulong target_rusage = arg4;
6874 if (target_rusage)
6875 rusage_ptr = &rusage;
6876 else
6877 rusage_ptr = NULL;
6878 ret = get_errno(wait4(arg1, &status, arg3, rusage_ptr));
6879 if (!is_error(ret)) {
6880 if (status_ptr && ret) {
6881 status = host_to_target_waitstatus(status);
6882 if (put_user_s32(status, status_ptr))
6883 goto efault;
6884 }
6885 if (target_rusage)
6886 host_to_target_rusage(target_rusage, &rusage);
6887 }
6888 }
6889 break;
6890 #ifdef TARGET_NR_swapoff
6891 case TARGET_NR_swapoff:
6892 if (!(p = lock_user_string(arg1)))
6893 goto efault;
6894 ret = get_errno(swapoff(p));
6895 unlock_user(p, arg1, 0);
6896 break;
6897 #endif
6898 case TARGET_NR_sysinfo:
6899 {
6900 struct target_sysinfo *target_value;
6901 struct sysinfo value;
6902 ret = get_errno(sysinfo(&value));
6903 if (!is_error(ret) && arg1)
6904 {
6905 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
6906 goto efault;
6907 __put_user(value.uptime, &target_value->uptime);
6908 __put_user(value.loads[0], &target_value->loads[0]);
6909 __put_user(value.loads[1], &target_value->loads[1]);
6910 __put_user(value.loads[2], &target_value->loads[2]);
6911 __put_user(value.totalram, &target_value->totalram);
6912 __put_user(value.freeram, &target_value->freeram);
6913 __put_user(value.sharedram, &target_value->sharedram);
6914 __put_user(value.bufferram, &target_value->bufferram);
6915 __put_user(value.totalswap, &target_value->totalswap);
6916 __put_user(value.freeswap, &target_value->freeswap);
6917 __put_user(value.procs, &target_value->procs);
6918 __put_user(value.totalhigh, &target_value->totalhigh);
6919 __put_user(value.freehigh, &target_value->freehigh);
6920 __put_user(value.mem_unit, &target_value->mem_unit);
6921 unlock_user_struct(target_value, arg1, 1);
6922 }
6923 }
6924 break;
6925 #ifdef TARGET_NR_ipc
6926 case TARGET_NR_ipc:
6927 ret = do_ipc(arg1, arg2, arg3, arg4, arg5, arg6);
6928 break;
6929 #endif
6930 #ifdef TARGET_NR_semget
6931 case TARGET_NR_semget:
6932 ret = get_errno(semget(arg1, arg2, arg3));
6933 break;
6934 #endif
6935 #ifdef TARGET_NR_semop
6936 case TARGET_NR_semop:
6937 ret = get_errno(do_semop(arg1, arg2, arg3));
6938 break;
6939 #endif
6940 #ifdef TARGET_NR_semctl
6941 case TARGET_NR_semctl:
6942 ret = do_semctl(arg1, arg2, arg3, (union target_semun)(abi_ulong)arg4);
6943 break;
6944 #endif
6945 #ifdef TARGET_NR_msgctl
6946 case TARGET_NR_msgctl:
6947 ret = do_msgctl(arg1, arg2, arg3);
6948 break;
6949 #endif
6950 #ifdef TARGET_NR_msgget
6951 case TARGET_NR_msgget:
6952 ret = get_errno(msgget(arg1, arg2));
6953 break;
6954 #endif
6955 #ifdef TARGET_NR_msgrcv
6956 case TARGET_NR_msgrcv:
6957 ret = do_msgrcv(arg1, arg2, arg3, arg4, arg5);
6958 break;
6959 #endif
6960 #ifdef TARGET_NR_msgsnd
6961 case TARGET_NR_msgsnd:
6962 ret = do_msgsnd(arg1, arg2, arg3, arg4);
6963 break;
6964 #endif
6965 #ifdef TARGET_NR_shmget
6966 case TARGET_NR_shmget:
6967 ret = get_errno(shmget(arg1, arg2, arg3));
6968 break;
6969 #endif
6970 #ifdef TARGET_NR_shmctl
6971 case TARGET_NR_shmctl:
6972 ret = do_shmctl(arg1, arg2, arg3);
6973 break;
6974 #endif
6975 #ifdef TARGET_NR_shmat
6976 case TARGET_NR_shmat:
6977 ret = do_shmat(arg1, arg2, arg3);
6978 break;
6979 #endif
6980 #ifdef TARGET_NR_shmdt
6981 case TARGET_NR_shmdt:
6982 ret = do_shmdt(arg1);
6983 break;
6984 #endif
6985 case TARGET_NR_fsync:
6986 ret = get_errno(fsync(arg1));
6987 break;
6988 case TARGET_NR_clone:
6989 #if defined(TARGET_SH4) || defined(TARGET_ALPHA)
6990 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
6991 #elif defined(TARGET_CRIS)
6992 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg4, arg5));
6993 #elif defined(TARGET_MICROBLAZE)
6994 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
6995 #elif defined(TARGET_S390X)
6996 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
6997 #else
6998 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
6999 #endif
7000 break;
7001 #ifdef __NR_exit_group
7002 /* new thread calls */
7003 case TARGET_NR_exit_group:
7004 #ifdef TARGET_GPROF
7005 _mcleanup();
7006 #endif
7007 gdb_exit(cpu_env, arg1);
7008 ret = get_errno(exit_group(arg1));
7009 break;
7010 #endif
7011 case TARGET_NR_setdomainname:
7012 if (!(p = lock_user_string(arg1)))
7013 goto efault;
7014 ret = get_errno(setdomainname(p, arg2));
7015 unlock_user(p, arg1, 0);
7016 break;
7017 case TARGET_NR_uname:
7018 /* no need to transcode because we use the linux syscall */
7019 {
7020 struct new_utsname * buf;
7021
7022 if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
7023 goto efault;
7024 ret = get_errno(sys_uname(buf));
7025 if (!is_error(ret)) {
7026 /* Overrite the native machine name with whatever is being
7027 emulated. */
7028 strcpy (buf->machine, cpu_to_uname_machine(cpu_env));
7029 /* Allow the user to override the reported release. */
7030 if (qemu_uname_release && *qemu_uname_release)
7031 strcpy (buf->release, qemu_uname_release);
7032 }
7033 unlock_user_struct(buf, arg1, 1);
7034 }
7035 break;
7036 #ifdef TARGET_I386
7037 case TARGET_NR_modify_ldt:
7038 ret = do_modify_ldt(cpu_env, arg1, arg2, arg3);
7039 break;
7040 #if !defined(TARGET_X86_64)
7041 case TARGET_NR_vm86old:
7042 goto unimplemented;
7043 case TARGET_NR_vm86:
7044 ret = do_vm86(cpu_env, arg1, arg2);
7045 break;
7046 #endif
7047 #endif
7048 case TARGET_NR_adjtimex:
7049 goto unimplemented;
7050 #ifdef TARGET_NR_create_module
7051 case TARGET_NR_create_module:
7052 #endif
7053 case TARGET_NR_init_module:
7054 case TARGET_NR_delete_module:
7055 #ifdef TARGET_NR_get_kernel_syms
7056 case TARGET_NR_get_kernel_syms:
7057 #endif
7058 goto unimplemented;
7059 case TARGET_NR_quotactl:
7060 goto unimplemented;
7061 case TARGET_NR_getpgid:
7062 ret = get_errno(getpgid(arg1));
7063 break;
7064 case TARGET_NR_fchdir:
7065 ret = get_errno(fchdir(arg1));
7066 break;
7067 #ifdef TARGET_NR_bdflush /* not on x86_64 */
7068 case TARGET_NR_bdflush:
7069 goto unimplemented;
7070 #endif
7071 #ifdef TARGET_NR_sysfs
7072 case TARGET_NR_sysfs:
7073 goto unimplemented;
7074 #endif
7075 case TARGET_NR_personality:
7076 ret = get_errno(personality(arg1));
7077 break;
7078 #ifdef TARGET_NR_afs_syscall
7079 case TARGET_NR_afs_syscall:
7080 goto unimplemented;
7081 #endif
7082 #ifdef TARGET_NR__llseek /* Not on alpha */
7083 case TARGET_NR__llseek:
7084 {
7085 int64_t res;
7086 #if !defined(__NR_llseek)
7087 res = lseek(arg1, ((uint64_t)arg2 << 32) | arg3, arg5);
7088 if (res == -1) {
7089 ret = get_errno(res);
7090 } else {
7091 ret = 0;
7092 }
7093 #else
7094 ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
7095 #endif
7096 if ((ret == 0) && put_user_s64(res, arg4)) {
7097 goto efault;
7098 }
7099 }
7100 break;
7101 #endif
7102 case TARGET_NR_getdents:
7103 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
7104 {
7105 struct target_dirent *target_dirp;
7106 struct linux_dirent *dirp;
7107 abi_long count = arg3;
7108
7109 dirp = malloc(count);
7110 if (!dirp) {
7111 ret = -TARGET_ENOMEM;
7112 goto fail;
7113 }
7114
7115 ret = get_errno(sys_getdents(arg1, dirp, count));
7116 if (!is_error(ret)) {
7117 struct linux_dirent *de;
7118 struct target_dirent *tde;
7119 int len = ret;
7120 int reclen, treclen;
7121 int count1, tnamelen;
7122
7123 count1 = 0;
7124 de = dirp;
7125 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
7126 goto efault;
7127 tde = target_dirp;
7128 while (len > 0) {
7129 reclen = de->d_reclen;
7130 tnamelen = reclen - offsetof(struct linux_dirent, d_name);
7131 assert(tnamelen >= 0);
7132 treclen = tnamelen + offsetof(struct target_dirent, d_name);
7133 assert(count1 + treclen <= count);
7134 tde->d_reclen = tswap16(treclen);
7135 tde->d_ino = tswapal(de->d_ino);
7136 tde->d_off = tswapal(de->d_off);
7137 memcpy(tde->d_name, de->d_name, tnamelen);
7138 de = (struct linux_dirent *)((char *)de + reclen);
7139 len -= reclen;
7140 tde = (struct target_dirent *)((char *)tde + treclen);
7141 count1 += treclen;
7142 }
7143 ret = count1;
7144 unlock_user(target_dirp, arg2, ret);
7145 }
7146 free(dirp);
7147 }
7148 #else
7149 {
7150 struct linux_dirent *dirp;
7151 abi_long count = arg3;
7152
7153 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
7154 goto efault;
7155 ret = get_errno(sys_getdents(arg1, dirp, count));
7156 if (!is_error(ret)) {
7157 struct linux_dirent *de;
7158 int len = ret;
7159 int reclen;
7160 de = dirp;
7161 while (len > 0) {
7162 reclen = de->d_reclen;
7163 if (reclen > len)
7164 break;
7165 de->d_reclen = tswap16(reclen);
7166 tswapls(&de->d_ino);
7167 tswapls(&de->d_off);
7168 de = (struct linux_dirent *)((char *)de + reclen);
7169 len -= reclen;
7170 }
7171 }
7172 unlock_user(dirp, arg2, ret);
7173 }
7174 #endif
7175 break;
7176 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
7177 case TARGET_NR_getdents64:
7178 {
7179 struct linux_dirent64 *dirp;
7180 abi_long count = arg3;
7181 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
7182 goto efault;
7183 ret = get_errno(sys_getdents64(arg1, dirp, count));
7184 if (!is_error(ret)) {
7185 struct linux_dirent64 *de;
7186 int len = ret;
7187 int reclen;
7188 de = dirp;
7189 while (len > 0) {
7190 reclen = de->d_reclen;
7191 if (reclen > len)
7192 break;
7193 de->d_reclen = tswap16(reclen);
7194 tswap64s((uint64_t *)&de->d_ino);
7195 tswap64s((uint64_t *)&de->d_off);
7196 de = (struct linux_dirent64 *)((char *)de + reclen);
7197 len -= reclen;
7198 }
7199 }
7200 unlock_user(dirp, arg2, ret);
7201 }
7202 break;
7203 #endif /* TARGET_NR_getdents64 */
7204 #if defined(TARGET_NR__newselect)
7205 case TARGET_NR__newselect:
7206 ret = do_select(arg1, arg2, arg3, arg4, arg5);
7207 break;
7208 #endif
7209 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
7210 # ifdef TARGET_NR_poll
7211 case TARGET_NR_poll:
7212 # endif
7213 # ifdef TARGET_NR_ppoll
7214 case TARGET_NR_ppoll:
7215 # endif
7216 {
7217 struct target_pollfd *target_pfd;
7218 unsigned int nfds = arg2;
7219 int timeout = arg3;
7220 struct pollfd *pfd;
7221 unsigned int i;
7222
7223 target_pfd = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_pollfd) * nfds, 1);
7224 if (!target_pfd)
7225 goto efault;
7226
7227 pfd = alloca(sizeof(struct pollfd) * nfds);
7228 for(i = 0; i < nfds; i++) {
7229 pfd[i].fd = tswap32(target_pfd[i].fd);
7230 pfd[i].events = tswap16(target_pfd[i].events);
7231 }
7232
7233 # ifdef TARGET_NR_ppoll
7234 if (num == TARGET_NR_ppoll) {
7235 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
7236 target_sigset_t *target_set;
7237 sigset_t _set, *set = &_set;
7238
7239 if (arg3) {
7240 if (target_to_host_timespec(timeout_ts, arg3)) {
7241 unlock_user(target_pfd, arg1, 0);
7242 goto efault;
7243 }
7244 } else {
7245 timeout_ts = NULL;
7246 }
7247
7248 if (arg4) {
7249 target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
7250 if (!target_set) {
7251 unlock_user(target_pfd, arg1, 0);
7252 goto efault;
7253 }
7254 target_to_host_sigset(set, target_set);
7255 } else {
7256 set = NULL;
7257 }
7258
7259 ret = get_errno(sys_ppoll(pfd, nfds, timeout_ts, set, _NSIG/8));
7260
7261 if (!is_error(ret) && arg3) {
7262 host_to_target_timespec(arg3, timeout_ts);
7263 }
7264 if (arg4) {
7265 unlock_user(target_set, arg4, 0);
7266 }
7267 } else
7268 # endif
7269 ret = get_errno(poll(pfd, nfds, timeout));
7270
7271 if (!is_error(ret)) {
7272 for(i = 0; i < nfds; i++) {
7273 target_pfd[i].revents = tswap16(pfd[i].revents);
7274 }
7275 }
7276 unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
7277 }
7278 break;
7279 #endif
7280 case TARGET_NR_flock:
7281 /* NOTE: the flock constant seems to be the same for every
7282 Linux platform */
7283 ret = get_errno(flock(arg1, arg2));
7284 break;
7285 case TARGET_NR_readv:
7286 {
7287 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
7288 if (vec != NULL) {
7289 ret = get_errno(readv(arg1, vec, arg3));
7290 unlock_iovec(vec, arg2, arg3, 1);
7291 } else {
7292 ret = -host_to_target_errno(errno);
7293 }
7294 }
7295 break;
7296 case TARGET_NR_writev:
7297 {
7298 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
7299 if (vec != NULL) {
7300 ret = get_errno(writev(arg1, vec, arg3));
7301 unlock_iovec(vec, arg2, arg3, 0);
7302 } else {
7303 ret = -host_to_target_errno(errno);
7304 }
7305 }
7306 break;
7307 case TARGET_NR_getsid:
7308 ret = get_errno(getsid(arg1));
7309 break;
7310 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
7311 case TARGET_NR_fdatasync:
7312 ret = get_errno(fdatasync(arg1));
7313 break;
7314 #endif
7315 case TARGET_NR__sysctl:
7316 /* We don't implement this, but ENOTDIR is always a safe
7317 return value. */
7318 ret = -TARGET_ENOTDIR;
7319 break;
7320 case TARGET_NR_sched_getaffinity:
7321 {
7322 unsigned int mask_size;
7323 unsigned long *mask;
7324
7325 /*
7326 * sched_getaffinity needs multiples of ulong, so need to take
7327 * care of mismatches between target ulong and host ulong sizes.
7328 */
7329 if (arg2 & (sizeof(abi_ulong) - 1)) {
7330 ret = -TARGET_EINVAL;
7331 break;
7332 }
7333 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
7334
7335 mask = alloca(mask_size);
7336 ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
7337
7338 if (!is_error(ret)) {
7339 if (copy_to_user(arg3, mask, ret)) {
7340 goto efault;
7341 }
7342 }
7343 }
7344 break;
7345 case TARGET_NR_sched_setaffinity:
7346 {
7347 unsigned int mask_size;
7348 unsigned long *mask;
7349
7350 /*
7351 * sched_setaffinity needs multiples of ulong, so need to take
7352 * care of mismatches between target ulong and host ulong sizes.
7353 */
7354 if (arg2 & (sizeof(abi_ulong) - 1)) {
7355 ret = -TARGET_EINVAL;
7356 break;
7357 }
7358 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
7359
7360 mask = alloca(mask_size);
7361 if (!lock_user_struct(VERIFY_READ, p, arg3, 1)) {
7362 goto efault;
7363 }
7364 memcpy(mask, p, arg2);
7365 unlock_user_struct(p, arg2, 0);
7366
7367 ret = get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
7368 }
7369 break;
7370 case TARGET_NR_sched_setparam:
7371 {
7372 struct sched_param *target_schp;
7373 struct sched_param schp;
7374
7375 if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
7376 goto efault;
7377 schp.sched_priority = tswap32(target_schp->sched_priority);
7378 unlock_user_struct(target_schp, arg2, 0);
7379 ret = get_errno(sched_setparam(arg1, &schp));
7380 }
7381 break;
7382 case TARGET_NR_sched_getparam:
7383 {
7384 struct sched_param *target_schp;
7385 struct sched_param schp;
7386 ret = get_errno(sched_getparam(arg1, &schp));
7387 if (!is_error(ret)) {
7388 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
7389 goto efault;
7390 target_schp->sched_priority = tswap32(schp.sched_priority);
7391 unlock_user_struct(target_schp, arg2, 1);
7392 }
7393 }
7394 break;
7395 case TARGET_NR_sched_setscheduler:
7396 {
7397 struct sched_param *target_schp;
7398 struct sched_param schp;
7399 if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
7400 goto efault;
7401 schp.sched_priority = tswap32(target_schp->sched_priority);
7402 unlock_user_struct(target_schp, arg3, 0);
7403 ret = get_errno(sched_setscheduler(arg1, arg2, &schp));
7404 }
7405 break;
7406 case TARGET_NR_sched_getscheduler:
7407 ret = get_errno(sched_getscheduler(arg1));
7408 break;
7409 case TARGET_NR_sched_yield:
7410 ret = get_errno(sched_yield());
7411 break;
7412 case TARGET_NR_sched_get_priority_max:
7413 ret = get_errno(sched_get_priority_max(arg1));
7414 break;
7415 case TARGET_NR_sched_get_priority_min:
7416 ret = get_errno(sched_get_priority_min(arg1));
7417 break;
7418 case TARGET_NR_sched_rr_get_interval:
7419 {
7420 struct timespec ts;
7421 ret = get_errno(sched_rr_get_interval(arg1, &ts));
7422 if (!is_error(ret)) {
7423 host_to_target_timespec(arg2, &ts);
7424 }
7425 }
7426 break;
7427 case TARGET_NR_nanosleep:
7428 {
7429 struct timespec req, rem;
7430 target_to_host_timespec(&req, arg1);
7431 ret = get_errno(nanosleep(&req, &rem));
7432 if (is_error(ret) && arg2) {
7433 host_to_target_timespec(arg2, &rem);
7434 }
7435 }
7436 break;
7437 #ifdef TARGET_NR_query_module
7438 case TARGET_NR_query_module:
7439 goto unimplemented;
7440 #endif
7441 #ifdef TARGET_NR_nfsservctl
7442 case TARGET_NR_nfsservctl:
7443 goto unimplemented;
7444 #endif
7445 case TARGET_NR_prctl:
7446 switch (arg1) {
7447 case PR_GET_PDEATHSIG:
7448 {
7449 int deathsig;
7450 ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
7451 if (!is_error(ret) && arg2
7452 && put_user_ual(deathsig, arg2)) {
7453 goto efault;
7454 }
7455 break;
7456 }
7457 #ifdef PR_GET_NAME
7458 case PR_GET_NAME:
7459 {
7460 void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
7461 if (!name) {
7462 goto efault;
7463 }
7464 ret = get_errno(prctl(arg1, (unsigned long)name,
7465 arg3, arg4, arg5));
7466 unlock_user(name, arg2, 16);
7467 break;
7468 }
7469 case PR_SET_NAME:
7470 {
7471 void *name = lock_user(VERIFY_READ, arg2, 16, 1);
7472 if (!name) {
7473 goto efault;
7474 }
7475 ret = get_errno(prctl(arg1, (unsigned long)name,
7476 arg3, arg4, arg5));
7477 unlock_user(name, arg2, 0);
7478 break;
7479 }
7480 #endif
7481 default:
7482 /* Most prctl options have no pointer arguments */
7483 ret = get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
7484 break;
7485 }
7486 break;
7487 #ifdef TARGET_NR_arch_prctl
7488 case TARGET_NR_arch_prctl:
7489 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
7490 ret = do_arch_prctl(cpu_env, arg1, arg2);
7491 break;
7492 #else
7493 goto unimplemented;
7494 #endif
7495 #endif
7496 #ifdef TARGET_NR_pread64
7497 case TARGET_NR_pread64:
7498 if (regpairs_aligned(cpu_env)) {
7499 arg4 = arg5;
7500 arg5 = arg6;
7501 }
7502 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
7503 goto efault;
7504 ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
7505 unlock_user(p, arg2, ret);
7506 break;
7507 case TARGET_NR_pwrite64:
7508 if (regpairs_aligned(cpu_env)) {
7509 arg4 = arg5;
7510 arg5 = arg6;
7511 }
7512 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
7513 goto efault;
7514 ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
7515 unlock_user(p, arg2, 0);
7516 break;
7517 #endif
7518 case TARGET_NR_getcwd:
7519 if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
7520 goto efault;
7521 ret = get_errno(sys_getcwd1(p, arg2));
7522 unlock_user(p, arg1, ret);
7523 break;
7524 case TARGET_NR_capget:
7525 goto unimplemented;
7526 case TARGET_NR_capset:
7527 goto unimplemented;
7528 case TARGET_NR_sigaltstack:
7529 #if defined(TARGET_I386) || defined(TARGET_ARM) || defined(TARGET_MIPS) || \
7530 defined(TARGET_SPARC) || defined(TARGET_PPC) || defined(TARGET_ALPHA) || \
7531 defined(TARGET_M68K) || defined(TARGET_S390X) || defined(TARGET_OPENRISC)
7532 ret = do_sigaltstack(arg1, arg2, get_sp_from_cpustate((CPUArchState *)cpu_env));
7533 break;
7534 #else
7535 goto unimplemented;
7536 #endif
7537
7538 #ifdef CONFIG_SENDFILE
7539 case TARGET_NR_sendfile:
7540 {
7541 off_t *offp = NULL;
7542 off_t off;
7543 if (arg3) {
7544 ret = get_user_sal(off, arg3);
7545 if (is_error(ret)) {
7546 break;
7547 }
7548 offp = &off;
7549 }
7550 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
7551 if (!is_error(ret) && arg3) {
7552 abi_long ret2 = put_user_sal(off, arg3);
7553 if (is_error(ret2)) {
7554 ret = ret2;
7555 }
7556 }
7557 break;
7558 }
7559 #ifdef TARGET_NR_sendfile64
7560 case TARGET_NR_sendfile64:
7561 {
7562 off_t *offp = NULL;
7563 off_t off;
7564 if (arg3) {
7565 ret = get_user_s64(off, arg3);
7566 if (is_error(ret)) {
7567 break;
7568 }
7569 offp = &off;
7570 }
7571 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
7572 if (!is_error(ret) && arg3) {
7573 abi_long ret2 = put_user_s64(off, arg3);
7574 if (is_error(ret2)) {
7575 ret = ret2;
7576 }
7577 }
7578 break;
7579 }
7580 #endif
7581 #else
7582 case TARGET_NR_sendfile:
7583 #ifdef TARGET_NR_sendfile64:
7584 case TARGET_NR_sendfile64:
7585 #endif
7586 goto unimplemented;
7587 #endif
7588
7589 #ifdef TARGET_NR_getpmsg
7590 case TARGET_NR_getpmsg:
7591 goto unimplemented;
7592 #endif
7593 #ifdef TARGET_NR_putpmsg
7594 case TARGET_NR_putpmsg:
7595 goto unimplemented;
7596 #endif
7597 #ifdef TARGET_NR_vfork
7598 case TARGET_NR_vfork:
7599 ret = get_errno(do_fork(cpu_env, CLONE_VFORK | CLONE_VM | SIGCHLD,
7600 0, 0, 0, 0));
7601 break;
7602 #endif
7603 #ifdef TARGET_NR_ugetrlimit
7604 case TARGET_NR_ugetrlimit:
7605 {
7606 struct rlimit rlim;
7607 int resource = target_to_host_resource(arg1);
7608 ret = get_errno(getrlimit(resource, &rlim));
7609 if (!is_error(ret)) {
7610 struct target_rlimit *target_rlim;
7611 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
7612 goto efault;
7613 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
7614 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
7615 unlock_user_struct(target_rlim, arg2, 1);
7616 }
7617 break;
7618 }
7619 #endif
7620 #ifdef TARGET_NR_truncate64
7621 case TARGET_NR_truncate64:
7622 if (!(p = lock_user_string(arg1)))
7623 goto efault;
7624 ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
7625 unlock_user(p, arg1, 0);
7626 break;
7627 #endif
7628 #ifdef TARGET_NR_ftruncate64
7629 case TARGET_NR_ftruncate64:
7630 ret = target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
7631 break;
7632 #endif
7633 #ifdef TARGET_NR_stat64
7634 case TARGET_NR_stat64:
7635 if (!(p = lock_user_string(arg1)))
7636 goto efault;
7637 ret = get_errno(stat(path(p), &st));
7638 unlock_user(p, arg1, 0);
7639 if (!is_error(ret))
7640 ret = host_to_target_stat64(cpu_env, arg2, &st);
7641 break;
7642 #endif
7643 #ifdef TARGET_NR_lstat64
7644 case TARGET_NR_lstat64:
7645 if (!(p = lock_user_string(arg1)))
7646 goto efault;
7647 ret = get_errno(lstat(path(p), &st));
7648 unlock_user(p, arg1, 0);
7649 if (!is_error(ret))
7650 ret = host_to_target_stat64(cpu_env, arg2, &st);
7651 break;
7652 #endif
7653 #ifdef TARGET_NR_fstat64
7654 case TARGET_NR_fstat64:
7655 ret = get_errno(fstat(arg1, &st));
7656 if (!is_error(ret))
7657 ret = host_to_target_stat64(cpu_env, arg2, &st);
7658 break;
7659 #endif
7660 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) && \
7661 (defined(__NR_fstatat64) || defined(__NR_newfstatat))
7662 #ifdef TARGET_NR_fstatat64
7663 case TARGET_NR_fstatat64:
7664 #endif
7665 #ifdef TARGET_NR_newfstatat
7666 case TARGET_NR_newfstatat:
7667 #endif
7668 if (!(p = lock_user_string(arg2)))
7669 goto efault;
7670 #ifdef __NR_fstatat64
7671 ret = get_errno(sys_fstatat64(arg1, path(p), &st, arg4));
7672 #else
7673 ret = get_errno(sys_newfstatat(arg1, path(p), &st, arg4));
7674 #endif
7675 if (!is_error(ret))
7676 ret = host_to_target_stat64(cpu_env, arg3, &st);
7677 break;
7678 #endif
7679 case TARGET_NR_lchown:
7680 if (!(p = lock_user_string(arg1)))
7681 goto efault;
7682 ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
7683 unlock_user(p, arg1, 0);
7684 break;
7685 #ifdef TARGET_NR_getuid
7686 case TARGET_NR_getuid:
7687 ret = get_errno(high2lowuid(getuid()));
7688 break;
7689 #endif
7690 #ifdef TARGET_NR_getgid
7691 case TARGET_NR_getgid:
7692 ret = get_errno(high2lowgid(getgid()));
7693 break;
7694 #endif
7695 #ifdef TARGET_NR_geteuid
7696 case TARGET_NR_geteuid:
7697 ret = get_errno(high2lowuid(geteuid()));
7698 break;
7699 #endif
7700 #ifdef TARGET_NR_getegid
7701 case TARGET_NR_getegid:
7702 ret = get_errno(high2lowgid(getegid()));
7703 break;
7704 #endif
7705 case TARGET_NR_setreuid:
7706 ret = get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
7707 break;
7708 case TARGET_NR_setregid:
7709 ret = get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
7710 break;
7711 case TARGET_NR_getgroups:
7712 {
7713 int gidsetsize = arg1;
7714 target_id *target_grouplist;
7715 gid_t *grouplist;
7716 int i;
7717
7718 grouplist = alloca(gidsetsize * sizeof(gid_t));
7719 ret = get_errno(getgroups(gidsetsize, grouplist));
7720 if (gidsetsize == 0)
7721 break;
7722 if (!is_error(ret)) {
7723 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 2, 0);
7724 if (!target_grouplist)
7725 goto efault;
7726 for(i = 0;i < ret; i++)
7727 target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
7728 unlock_user(target_grouplist, arg2, gidsetsize * 2);
7729 }
7730 }
7731 break;
7732 case TARGET_NR_setgroups:
7733 {
7734 int gidsetsize = arg1;
7735 target_id *target_grouplist;
7736 gid_t *grouplist = NULL;
7737 int i;
7738 if (gidsetsize) {
7739 grouplist = alloca(gidsetsize * sizeof(gid_t));
7740 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 2, 1);
7741 if (!target_grouplist) {
7742 ret = -TARGET_EFAULT;
7743 goto fail;
7744 }
7745 for (i = 0; i < gidsetsize; i++) {
7746 grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
7747 }
7748 unlock_user(target_grouplist, arg2, 0);
7749 }
7750 ret = get_errno(setgroups(gidsetsize, grouplist));
7751 }
7752 break;
7753 case TARGET_NR_fchown:
7754 ret = get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
7755 break;
7756 #if defined(TARGET_NR_fchownat) && defined(__NR_fchownat)
7757 case TARGET_NR_fchownat:
7758 if (!(p = lock_user_string(arg2)))
7759 goto efault;
7760 ret = get_errno(sys_fchownat(arg1, p, low2highuid(arg3), low2highgid(arg4), arg5));
7761 unlock_user(p, arg2, 0);
7762 break;
7763 #endif
7764 #ifdef TARGET_NR_setresuid
7765 case TARGET_NR_setresuid:
7766 ret = get_errno(setresuid(low2highuid(arg1),
7767 low2highuid(arg2),
7768 low2highuid(arg3)));
7769 break;
7770 #endif
7771 #ifdef TARGET_NR_getresuid
7772 case TARGET_NR_getresuid:
7773 {
7774 uid_t ruid, euid, suid;
7775 ret = get_errno(getresuid(&ruid, &euid, &suid));
7776 if (!is_error(ret)) {
7777 if (put_user_u16(high2lowuid(ruid), arg1)
7778 || put_user_u16(high2lowuid(euid), arg2)
7779 || put_user_u16(high2lowuid(suid), arg3))
7780 goto efault;
7781 }
7782 }
7783 break;
7784 #endif
7785 #ifdef TARGET_NR_getresgid
7786 case TARGET_NR_setresgid:
7787 ret = get_errno(setresgid(low2highgid(arg1),
7788 low2highgid(arg2),
7789 low2highgid(arg3)));
7790 break;
7791 #endif
7792 #ifdef TARGET_NR_getresgid
7793 case TARGET_NR_getresgid:
7794 {
7795 gid_t rgid, egid, sgid;
7796 ret = get_errno(getresgid(&rgid, &egid, &sgid));
7797 if (!is_error(ret)) {
7798 if (put_user_u16(high2lowgid(rgid), arg1)
7799 || put_user_u16(high2lowgid(egid), arg2)
7800 || put_user_u16(high2lowgid(sgid), arg3))
7801 goto efault;
7802 }
7803 }
7804 break;
7805 #endif
7806 case TARGET_NR_chown:
7807 if (!(p = lock_user_string(arg1)))
7808 goto efault;
7809 ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
7810 unlock_user(p, arg1, 0);
7811 break;
7812 case TARGET_NR_setuid:
7813 ret = get_errno(setuid(low2highuid(arg1)));
7814 break;
7815 case TARGET_NR_setgid:
7816 ret = get_errno(setgid(low2highgid(arg1)));
7817 break;
7818 case TARGET_NR_setfsuid:
7819 ret = get_errno(setfsuid(arg1));
7820 break;
7821 case TARGET_NR_setfsgid:
7822 ret = get_errno(setfsgid(arg1));
7823 break;
7824
7825 #ifdef TARGET_NR_lchown32
7826 case TARGET_NR_lchown32:
7827 if (!(p = lock_user_string(arg1)))
7828 goto efault;
7829 ret = get_errno(lchown(p, arg2, arg3));
7830 unlock_user(p, arg1, 0);
7831 break;
7832 #endif
7833 #ifdef TARGET_NR_getuid32
7834 case TARGET_NR_getuid32:
7835 ret = get_errno(getuid());
7836 break;
7837 #endif
7838
7839 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
7840 /* Alpha specific */
7841 case TARGET_NR_getxuid:
7842 {
7843 uid_t euid;
7844 euid=geteuid();
7845 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
7846 }
7847 ret = get_errno(getuid());
7848 break;
7849 #endif
7850 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
7851 /* Alpha specific */
7852 case TARGET_NR_getxgid:
7853 {
7854 uid_t egid;
7855 egid=getegid();
7856 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
7857 }
7858 ret = get_errno(getgid());
7859 break;
7860 #endif
7861 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
7862 /* Alpha specific */
7863 case TARGET_NR_osf_getsysinfo:
7864 ret = -TARGET_EOPNOTSUPP;
7865 switch (arg1) {
7866 case TARGET_GSI_IEEE_FP_CONTROL:
7867 {
7868 uint64_t swcr, fpcr = cpu_alpha_load_fpcr (cpu_env);
7869
7870 /* Copied from linux ieee_fpcr_to_swcr. */
7871 swcr = (fpcr >> 35) & SWCR_STATUS_MASK;
7872 swcr |= (fpcr >> 36) & SWCR_MAP_DMZ;
7873 swcr |= (~fpcr >> 48) & (SWCR_TRAP_ENABLE_INV
7874 | SWCR_TRAP_ENABLE_DZE
7875 | SWCR_TRAP_ENABLE_OVF);
7876 swcr |= (~fpcr >> 57) & (SWCR_TRAP_ENABLE_UNF
7877 | SWCR_TRAP_ENABLE_INE);
7878 swcr |= (fpcr >> 47) & SWCR_MAP_UMZ;
7879 swcr |= (~fpcr >> 41) & SWCR_TRAP_ENABLE_DNO;
7880
7881 if (put_user_u64 (swcr, arg2))
7882 goto efault;
7883 ret = 0;
7884 }
7885 break;
7886
7887 /* case GSI_IEEE_STATE_AT_SIGNAL:
7888 -- Not implemented in linux kernel.
7889 case GSI_UACPROC:
7890 -- Retrieves current unaligned access state; not much used.
7891 case GSI_PROC_TYPE:
7892 -- Retrieves implver information; surely not used.
7893 case GSI_GET_HWRPB:
7894 -- Grabs a copy of the HWRPB; surely not used.
7895 */
7896 }
7897 break;
7898 #endif
7899 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
7900 /* Alpha specific */
7901 case TARGET_NR_osf_setsysinfo:
7902 ret = -TARGET_EOPNOTSUPP;
7903 switch (arg1) {
7904 case TARGET_SSI_IEEE_FP_CONTROL:
7905 {
7906 uint64_t swcr, fpcr, orig_fpcr;
7907
7908 if (get_user_u64 (swcr, arg2)) {
7909 goto efault;
7910 }
7911 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
7912 fpcr = orig_fpcr & FPCR_DYN_MASK;
7913
7914 /* Copied from linux ieee_swcr_to_fpcr. */
7915 fpcr |= (swcr & SWCR_STATUS_MASK) << 35;
7916 fpcr |= (swcr & SWCR_MAP_DMZ) << 36;
7917 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_INV
7918 | SWCR_TRAP_ENABLE_DZE
7919 | SWCR_TRAP_ENABLE_OVF)) << 48;
7920 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_UNF
7921 | SWCR_TRAP_ENABLE_INE)) << 57;
7922 fpcr |= (swcr & SWCR_MAP_UMZ ? FPCR_UNDZ | FPCR_UNFD : 0);
7923 fpcr |= (~swcr & SWCR_TRAP_ENABLE_DNO) << 41;
7924
7925 cpu_alpha_store_fpcr(cpu_env, fpcr);
7926 ret = 0;
7927 }
7928 break;
7929
7930 case TARGET_SSI_IEEE_RAISE_EXCEPTION:
7931 {
7932 uint64_t exc, fpcr, orig_fpcr;
7933 int si_code;
7934
7935 if (get_user_u64(exc, arg2)) {
7936 goto efault;
7937 }
7938
7939 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
7940
7941 /* We only add to the exception status here. */
7942 fpcr = orig_fpcr | ((exc & SWCR_STATUS_MASK) << 35);
7943
7944 cpu_alpha_store_fpcr(cpu_env, fpcr);
7945 ret = 0;
7946
7947 /* Old exceptions are not signaled. */
7948 fpcr &= ~(orig_fpcr & FPCR_STATUS_MASK);
7949
7950 /* If any exceptions set by this call,
7951 and are unmasked, send a signal. */
7952 si_code = 0;
7953 if ((fpcr & (FPCR_INE | FPCR_INED)) == FPCR_INE) {
7954 si_code = TARGET_FPE_FLTRES;
7955 }
7956 if ((fpcr & (FPCR_UNF | FPCR_UNFD)) == FPCR_UNF) {
7957 si_code = TARGET_FPE_FLTUND;
7958 }
7959 if ((fpcr & (FPCR_OVF | FPCR_OVFD)) == FPCR_OVF) {
7960 si_code = TARGET_FPE_FLTOVF;
7961 }
7962 if ((fpcr & (FPCR_DZE | FPCR_DZED)) == FPCR_DZE) {
7963 si_code = TARGET_FPE_FLTDIV;
7964 }
7965 if ((fpcr & (FPCR_INV | FPCR_INVD)) == FPCR_INV) {
7966 si_code = TARGET_FPE_FLTINV;
7967 }
7968 if (si_code != 0) {
7969 target_siginfo_t info;
7970 info.si_signo = SIGFPE;
7971 info.si_errno = 0;
7972 info.si_code = si_code;
7973 info._sifields._sigfault._addr
7974 = ((CPUArchState *)cpu_env)->pc;
7975 queue_signal((CPUArchState *)cpu_env, info.si_signo, &info);
7976 }
7977 }
7978 break;
7979
7980 /* case SSI_NVPAIRS:
7981 -- Used with SSIN_UACPROC to enable unaligned accesses.
7982 case SSI_IEEE_STATE_AT_SIGNAL:
7983 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
7984 -- Not implemented in linux kernel
7985 */
7986 }
7987 break;
7988 #endif
7989 #ifdef TARGET_NR_osf_sigprocmask
7990 /* Alpha specific. */
7991 case TARGET_NR_osf_sigprocmask:
7992 {
7993 abi_ulong mask;
7994 int how;
7995 sigset_t set, oldset;
7996
7997 switch(arg1) {
7998 case TARGET_SIG_BLOCK:
7999 how = SIG_BLOCK;
8000 break;
8001 case TARGET_SIG_UNBLOCK:
8002 how = SIG_UNBLOCK;
8003 break;
8004 case TARGET_SIG_SETMASK:
8005 how = SIG_SETMASK;
8006 break;
8007 default:
8008 ret = -TARGET_EINVAL;
8009 goto fail;
8010 }
8011 mask = arg2;
8012 target_to_host_old_sigset(&set, &mask);
8013 sigprocmask(how, &set, &oldset);
8014 host_to_target_old_sigset(&mask, &oldset);
8015 ret = mask;
8016 }
8017 break;
8018 #endif
8019
8020 #ifdef TARGET_NR_getgid32
8021 case TARGET_NR_getgid32:
8022 ret = get_errno(getgid());
8023 break;
8024 #endif
8025 #ifdef TARGET_NR_geteuid32
8026 case TARGET_NR_geteuid32:
8027 ret = get_errno(geteuid());
8028 break;
8029 #endif
8030 #ifdef TARGET_NR_getegid32
8031 case TARGET_NR_getegid32:
8032 ret = get_errno(getegid());
8033 break;
8034 #endif
8035 #ifdef TARGET_NR_setreuid32
8036 case TARGET_NR_setreuid32:
8037 ret = get_errno(setreuid(arg1, arg2));
8038 break;
8039 #endif
8040 #ifdef TARGET_NR_setregid32
8041 case TARGET_NR_setregid32:
8042 ret = get_errno(setregid(arg1, arg2));
8043 break;
8044 #endif
8045 #ifdef TARGET_NR_getgroups32
8046 case TARGET_NR_getgroups32:
8047 {
8048 int gidsetsize = arg1;
8049 uint32_t *target_grouplist;
8050 gid_t *grouplist;
8051 int i;
8052
8053 grouplist = alloca(gidsetsize * sizeof(gid_t));
8054 ret = get_errno(getgroups(gidsetsize, grouplist));
8055 if (gidsetsize == 0)
8056 break;
8057 if (!is_error(ret)) {
8058 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
8059 if (!target_grouplist) {
8060 ret = -TARGET_EFAULT;
8061 goto fail;
8062 }
8063 for(i = 0;i < ret; i++)
8064 target_grouplist[i] = tswap32(grouplist[i]);
8065 unlock_user(target_grouplist, arg2, gidsetsize * 4);
8066 }
8067 }
8068 break;
8069 #endif
8070 #ifdef TARGET_NR_setgroups32
8071 case TARGET_NR_setgroups32:
8072 {
8073 int gidsetsize = arg1;
8074 uint32_t *target_grouplist;
8075 gid_t *grouplist;
8076 int i;
8077
8078 grouplist = alloca(gidsetsize * sizeof(gid_t));
8079 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
8080 if (!target_grouplist) {
8081 ret = -TARGET_EFAULT;
8082 goto fail;
8083 }
8084 for(i = 0;i < gidsetsize; i++)
8085 grouplist[i] = tswap32(target_grouplist[i]);
8086 unlock_user(target_grouplist, arg2, 0);
8087 ret = get_errno(setgroups(gidsetsize, grouplist));
8088 }
8089 break;
8090 #endif
8091 #ifdef TARGET_NR_fchown32
8092 case TARGET_NR_fchown32:
8093 ret = get_errno(fchown(arg1, arg2, arg3));
8094 break;
8095 #endif
8096 #ifdef TARGET_NR_setresuid32
8097 case TARGET_NR_setresuid32:
8098 ret = get_errno(setresuid(arg1, arg2, arg3));
8099 break;
8100 #endif
8101 #ifdef TARGET_NR_getresuid32
8102 case TARGET_NR_getresuid32:
8103 {
8104 uid_t ruid, euid, suid;
8105 ret = get_errno(getresuid(&ruid, &euid, &suid));
8106 if (!is_error(ret)) {
8107 if (put_user_u32(ruid, arg1)
8108 || put_user_u32(euid, arg2)
8109 || put_user_u32(suid, arg3))
8110 goto efault;
8111 }
8112 }
8113 break;
8114 #endif
8115 #ifdef TARGET_NR_setresgid32
8116 case TARGET_NR_setresgid32:
8117 ret = get_errno(setresgid(arg1, arg2, arg3));
8118 break;
8119 #endif
8120 #ifdef TARGET_NR_getresgid32
8121 case TARGET_NR_getresgid32:
8122 {
8123 gid_t rgid, egid, sgid;
8124 ret = get_errno(getresgid(&rgid, &egid, &sgid));
8125 if (!is_error(ret)) {
8126 if (put_user_u32(rgid, arg1)
8127 || put_user_u32(egid, arg2)
8128 || put_user_u32(sgid, arg3))
8129 goto efault;
8130 }
8131 }
8132 break;
8133 #endif
8134 #ifdef TARGET_NR_chown32
8135 case TARGET_NR_chown32:
8136 if (!(p = lock_user_string(arg1)))
8137 goto efault;
8138 ret = get_errno(chown(p, arg2, arg3));
8139 unlock_user(p, arg1, 0);
8140 break;
8141 #endif
8142 #ifdef TARGET_NR_setuid32
8143 case TARGET_NR_setuid32:
8144 ret = get_errno(setuid(arg1));
8145 break;
8146 #endif
8147 #ifdef TARGET_NR_setgid32
8148 case TARGET_NR_setgid32:
8149 ret = get_errno(setgid(arg1));
8150 break;
8151 #endif
8152 #ifdef TARGET_NR_setfsuid32
8153 case TARGET_NR_setfsuid32:
8154 ret = get_errno(setfsuid(arg1));
8155 break;
8156 #endif
8157 #ifdef TARGET_NR_setfsgid32
8158 case TARGET_NR_setfsgid32:
8159 ret = get_errno(setfsgid(arg1));
8160 break;
8161 #endif
8162
8163 case TARGET_NR_pivot_root:
8164 goto unimplemented;
8165 #ifdef TARGET_NR_mincore
8166 case TARGET_NR_mincore:
8167 {
8168 void *a;
8169 ret = -TARGET_EFAULT;
8170 if (!(a = lock_user(VERIFY_READ, arg1,arg2, 0)))
8171 goto efault;
8172 if (!(p = lock_user_string(arg3)))
8173 goto mincore_fail;
8174 ret = get_errno(mincore(a, arg2, p));
8175 unlock_user(p, arg3, ret);
8176 mincore_fail:
8177 unlock_user(a, arg1, 0);
8178 }
8179 break;
8180 #endif
8181 #ifdef TARGET_NR_arm_fadvise64_64
8182 case TARGET_NR_arm_fadvise64_64:
8183 {
8184 /*
8185 * arm_fadvise64_64 looks like fadvise64_64 but
8186 * with different argument order
8187 */
8188 abi_long temp;
8189 temp = arg3;
8190 arg3 = arg4;
8191 arg4 = temp;
8192 }
8193 #endif
8194 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_arm_fadvise64_64) || defined(TARGET_NR_fadvise64)
8195 #ifdef TARGET_NR_fadvise64_64
8196 case TARGET_NR_fadvise64_64:
8197 #endif
8198 #ifdef TARGET_NR_fadvise64
8199 case TARGET_NR_fadvise64:
8200 #endif
8201 #ifdef TARGET_S390X
8202 switch (arg4) {
8203 case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
8204 case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
8205 case 6: arg4 = POSIX_FADV_DONTNEED; break;
8206 case 7: arg4 = POSIX_FADV_NOREUSE; break;
8207 default: break;
8208 }
8209 #endif
8210 ret = -posix_fadvise(arg1, arg2, arg3, arg4);
8211 break;
8212 #endif
8213 #ifdef TARGET_NR_madvise
8214 case TARGET_NR_madvise:
8215 /* A straight passthrough may not be safe because qemu sometimes
8216 turns private flie-backed mappings into anonymous mappings.
8217 This will break MADV_DONTNEED.
8218 This is a hint, so ignoring and returning success is ok. */
8219 ret = get_errno(0);
8220 break;
8221 #endif
8222 #if TARGET_ABI_BITS == 32
8223 case TARGET_NR_fcntl64:
8224 {
8225 int cmd;
8226 struct flock64 fl;
8227 struct target_flock64 *target_fl;
8228 #ifdef TARGET_ARM
8229 struct target_eabi_flock64 *target_efl;
8230 #endif
8231
8232 cmd = target_to_host_fcntl_cmd(arg2);
8233 if (cmd == -TARGET_EINVAL) {
8234 ret = cmd;
8235 break;
8236 }
8237
8238 switch(arg2) {
8239 case TARGET_F_GETLK64:
8240 #ifdef TARGET_ARM
8241 if (((CPUARMState *)cpu_env)->eabi) {
8242 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1))
8243 goto efault;
8244 fl.l_type = tswap16(target_efl->l_type);
8245 fl.l_whence = tswap16(target_efl->l_whence);
8246 fl.l_start = tswap64(target_efl->l_start);
8247 fl.l_len = tswap64(target_efl->l_len);
8248 fl.l_pid = tswap32(target_efl->l_pid);
8249 unlock_user_struct(target_efl, arg3, 0);
8250 } else
8251 #endif
8252 {
8253 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1))
8254 goto efault;
8255 fl.l_type = tswap16(target_fl->l_type);
8256 fl.l_whence = tswap16(target_fl->l_whence);
8257 fl.l_start = tswap64(target_fl->l_start);
8258 fl.l_len = tswap64(target_fl->l_len);
8259 fl.l_pid = tswap32(target_fl->l_pid);
8260 unlock_user_struct(target_fl, arg3, 0);
8261 }
8262 ret = get_errno(fcntl(arg1, cmd, &fl));
8263 if (ret == 0) {
8264 #ifdef TARGET_ARM
8265 if (((CPUARMState *)cpu_env)->eabi) {
8266 if (!lock_user_struct(VERIFY_WRITE, target_efl, arg3, 0))
8267 goto efault;
8268 target_efl->l_type = tswap16(fl.l_type);
8269 target_efl->l_whence = tswap16(fl.l_whence);
8270 target_efl->l_start = tswap64(fl.l_start);
8271 target_efl->l_len = tswap64(fl.l_len);
8272 target_efl->l_pid = tswap32(fl.l_pid);
8273 unlock_user_struct(target_efl, arg3, 1);
8274 } else
8275 #endif
8276 {
8277 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg3, 0))
8278 goto efault;
8279 target_fl->l_type = tswap16(fl.l_type);
8280 target_fl->l_whence = tswap16(fl.l_whence);
8281 target_fl->l_start = tswap64(fl.l_start);
8282 target_fl->l_len = tswap64(fl.l_len);
8283 target_fl->l_pid = tswap32(fl.l_pid);
8284 unlock_user_struct(target_fl, arg3, 1);
8285 }
8286 }
8287 break;
8288
8289 case TARGET_F_SETLK64:
8290 case TARGET_F_SETLKW64:
8291 #ifdef TARGET_ARM
8292 if (((CPUARMState *)cpu_env)->eabi) {
8293 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1))
8294 goto efault;
8295 fl.l_type = tswap16(target_efl->l_type);
8296 fl.l_whence = tswap16(target_efl->l_whence);
8297 fl.l_start = tswap64(target_efl->l_start);
8298 fl.l_len = tswap64(target_efl->l_len);
8299 fl.l_pid = tswap32(target_efl->l_pid);
8300 unlock_user_struct(target_efl, arg3, 0);
8301 } else
8302 #endif
8303 {
8304 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1))
8305 goto efault;
8306 fl.l_type = tswap16(target_fl->l_type);
8307 fl.l_whence = tswap16(target_fl->l_whence);
8308 fl.l_start = tswap64(target_fl->l_start);
8309 fl.l_len = tswap64(target_fl->l_len);
8310 fl.l_pid = tswap32(target_fl->l_pid);
8311 unlock_user_struct(target_fl, arg3, 0);
8312 }
8313 ret = get_errno(fcntl(arg1, cmd, &fl));
8314 break;
8315 default:
8316 ret = do_fcntl(arg1, arg2, arg3);
8317 break;
8318 }
8319 break;
8320 }
8321 #endif
8322 #ifdef TARGET_NR_cacheflush
8323 case TARGET_NR_cacheflush:
8324 /* self-modifying code is handled automatically, so nothing needed */
8325 ret = 0;
8326 break;
8327 #endif
8328 #ifdef TARGET_NR_security
8329 case TARGET_NR_security:
8330 goto unimplemented;
8331 #endif
8332 #ifdef TARGET_NR_getpagesize
8333 case TARGET_NR_getpagesize:
8334 ret = TARGET_PAGE_SIZE;
8335 break;
8336 #endif
8337 case TARGET_NR_gettid:
8338 ret = get_errno(gettid());
8339 break;
8340 #ifdef TARGET_NR_readahead
8341 case TARGET_NR_readahead:
8342 #if TARGET_ABI_BITS == 32
8343 if (regpairs_aligned(cpu_env)) {
8344 arg2 = arg3;
8345 arg3 = arg4;
8346 arg4 = arg5;
8347 }
8348 ret = get_errno(readahead(arg1, ((off64_t)arg3 << 32) | arg2, arg4));
8349 #else
8350 ret = get_errno(readahead(arg1, arg2, arg3));
8351 #endif
8352 break;
8353 #endif
8354 #ifdef CONFIG_ATTR
8355 #ifdef TARGET_NR_setxattr
8356 case TARGET_NR_listxattr:
8357 case TARGET_NR_llistxattr:
8358 {
8359 void *p, *b = 0;
8360 if (arg2) {
8361 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
8362 if (!b) {
8363 ret = -TARGET_EFAULT;
8364 break;
8365 }
8366 }
8367 p = lock_user_string(arg1);
8368 if (p) {
8369 if (num == TARGET_NR_listxattr) {
8370 ret = get_errno(listxattr(p, b, arg3));
8371 } else {
8372 ret = get_errno(llistxattr(p, b, arg3));
8373 }
8374 } else {
8375 ret = -TARGET_EFAULT;
8376 }
8377 unlock_user(p, arg1, 0);
8378 unlock_user(b, arg2, arg3);
8379 break;
8380 }
8381 case TARGET_NR_flistxattr:
8382 {
8383 void *b = 0;
8384 if (arg2) {
8385 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
8386 if (!b) {
8387 ret = -TARGET_EFAULT;
8388 break;
8389 }
8390 }
8391 ret = get_errno(flistxattr(arg1, b, arg3));
8392 unlock_user(b, arg2, arg3);
8393 break;
8394 }
8395 case TARGET_NR_setxattr:
8396 case TARGET_NR_lsetxattr:
8397 {
8398 void *p, *n, *v = 0;
8399 if (arg3) {
8400 v = lock_user(VERIFY_READ, arg3, arg4, 1);
8401 if (!v) {
8402 ret = -TARGET_EFAULT;
8403 break;
8404 }
8405 }
8406 p = lock_user_string(arg1);
8407 n = lock_user_string(arg2);
8408 if (p && n) {
8409 if (num == TARGET_NR_setxattr) {
8410 ret = get_errno(setxattr(p, n, v, arg4, arg5));
8411 } else {
8412 ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
8413 }
8414 } else {
8415 ret = -TARGET_EFAULT;
8416 }
8417 unlock_user(p, arg1, 0);
8418 unlock_user(n, arg2, 0);
8419 unlock_user(v, arg3, 0);
8420 }
8421 break;
8422 case TARGET_NR_fsetxattr:
8423 {
8424 void *n, *v = 0;
8425 if (arg3) {
8426 v = lock_user(VERIFY_READ, arg3, arg4, 1);
8427 if (!v) {
8428 ret = -TARGET_EFAULT;
8429 break;
8430 }
8431 }
8432 n = lock_user_string(arg2);
8433 if (n) {
8434 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
8435 } else {
8436 ret = -TARGET_EFAULT;
8437 }
8438 unlock_user(n, arg2, 0);
8439 unlock_user(v, arg3, 0);
8440 }
8441 break;
8442 case TARGET_NR_getxattr:
8443 case TARGET_NR_lgetxattr:
8444 {
8445 void *p, *n, *v = 0;
8446 if (arg3) {
8447 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
8448 if (!v) {
8449 ret = -TARGET_EFAULT;
8450 break;
8451 }
8452 }
8453 p = lock_user_string(arg1);
8454 n = lock_user_string(arg2);
8455 if (p && n) {
8456 if (num == TARGET_NR_getxattr) {
8457 ret = get_errno(getxattr(p, n, v, arg4));
8458 } else {
8459 ret = get_errno(lgetxattr(p, n, v, arg4));
8460 }
8461 } else {
8462 ret = -TARGET_EFAULT;
8463 }
8464 unlock_user(p, arg1, 0);
8465 unlock_user(n, arg2, 0);
8466 unlock_user(v, arg3, arg4);
8467 }
8468 break;
8469 case TARGET_NR_fgetxattr:
8470 {
8471 void *n, *v = 0;
8472 if (arg3) {
8473 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
8474 if (!v) {
8475 ret = -TARGET_EFAULT;
8476 break;
8477 }
8478 }
8479 n = lock_user_string(arg2);
8480 if (n) {
8481 ret = get_errno(fgetxattr(arg1, n, v, arg4));
8482 } else {
8483 ret = -TARGET_EFAULT;
8484 }
8485 unlock_user(n, arg2, 0);
8486 unlock_user(v, arg3, arg4);
8487 }
8488 break;
8489 case TARGET_NR_removexattr:
8490 case TARGET_NR_lremovexattr:
8491 {
8492 void *p, *n;
8493 p = lock_user_string(arg1);
8494 n = lock_user_string(arg2);
8495 if (p && n) {
8496 if (num == TARGET_NR_removexattr) {
8497 ret = get_errno(removexattr(p, n));
8498 } else {
8499 ret = get_errno(lremovexattr(p, n));
8500 }
8501 } else {
8502 ret = -TARGET_EFAULT;
8503 }
8504 unlock_user(p, arg1, 0);
8505 unlock_user(n, arg2, 0);
8506 }
8507 break;
8508 case TARGET_NR_fremovexattr:
8509 {
8510 void *n;
8511 n = lock_user_string(arg2);
8512 if (n) {
8513 ret = get_errno(fremovexattr(arg1, n));
8514 } else {
8515 ret = -TARGET_EFAULT;
8516 }
8517 unlock_user(n, arg2, 0);
8518 }
8519 break;
8520 #endif
8521 #endif /* CONFIG_ATTR */
8522 #ifdef TARGET_NR_set_thread_area
8523 case TARGET_NR_set_thread_area:
8524 #if defined(TARGET_MIPS)
8525 ((CPUMIPSState *) cpu_env)->tls_value = arg1;
8526 ret = 0;
8527 break;
8528 #elif defined(TARGET_CRIS)
8529 if (arg1 & 0xff)
8530 ret = -TARGET_EINVAL;
8531 else {
8532 ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
8533 ret = 0;
8534 }
8535 break;
8536 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
8537 ret = do_set_thread_area(cpu_env, arg1);
8538 break;
8539 #else
8540 goto unimplemented_nowarn;
8541 #endif
8542 #endif
8543 #ifdef TARGET_NR_get_thread_area
8544 case TARGET_NR_get_thread_area:
8545 #if defined(TARGET_I386) && defined(TARGET_ABI32)
8546 ret = do_get_thread_area(cpu_env, arg1);
8547 #else
8548 goto unimplemented_nowarn;
8549 #endif
8550 #endif
8551 #ifdef TARGET_NR_getdomainname
8552 case TARGET_NR_getdomainname:
8553 goto unimplemented_nowarn;
8554 #endif
8555
8556 #ifdef TARGET_NR_clock_gettime
8557 case TARGET_NR_clock_gettime:
8558 {
8559 struct timespec ts;
8560 ret = get_errno(clock_gettime(arg1, &ts));
8561 if (!is_error(ret)) {
8562 host_to_target_timespec(arg2, &ts);
8563 }
8564 break;
8565 }
8566 #endif
8567 #ifdef TARGET_NR_clock_getres
8568 case TARGET_NR_clock_getres:
8569 {
8570 struct timespec ts;
8571 ret = get_errno(clock_getres(arg1, &ts));
8572 if (!is_error(ret)) {
8573 host_to_target_timespec(arg2, &ts);
8574 }
8575 break;
8576 }
8577 #endif
8578 #ifdef TARGET_NR_clock_nanosleep
8579 case TARGET_NR_clock_nanosleep:
8580 {
8581 struct timespec ts;
8582 target_to_host_timespec(&ts, arg3);
8583 ret = get_errno(clock_nanosleep(arg1, arg2, &ts, arg4 ? &ts : NULL));
8584 if (arg4)
8585 host_to_target_timespec(arg4, &ts);
8586 break;
8587 }
8588 #endif
8589
8590 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
8591 case TARGET_NR_set_tid_address:
8592 ret = get_errno(set_tid_address((int *)g2h(arg1)));
8593 break;
8594 #endif
8595
8596 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
8597 case TARGET_NR_tkill:
8598 ret = get_errno(sys_tkill((int)arg1, target_to_host_signal(arg2)));
8599 break;
8600 #endif
8601
8602 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
8603 case TARGET_NR_tgkill:
8604 ret = get_errno(sys_tgkill((int)arg1, (int)arg2,
8605 target_to_host_signal(arg3)));
8606 break;
8607 #endif
8608
8609 #ifdef TARGET_NR_set_robust_list
8610 case TARGET_NR_set_robust_list:
8611 goto unimplemented_nowarn;
8612 #endif
8613
8614 #if defined(TARGET_NR_utimensat) && defined(__NR_utimensat)
8615 case TARGET_NR_utimensat:
8616 {
8617 struct timespec *tsp, ts[2];
8618 if (!arg3) {
8619 tsp = NULL;
8620 } else {
8621 target_to_host_timespec(ts, arg3);
8622 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
8623 tsp = ts;
8624 }
8625 if (!arg2)
8626 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
8627 else {
8628 if (!(p = lock_user_string(arg2))) {
8629 ret = -TARGET_EFAULT;
8630 goto fail;
8631 }
8632 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
8633 unlock_user(p, arg2, 0);
8634 }
8635 }
8636 break;
8637 #endif
8638 #if defined(CONFIG_USE_NPTL)
8639 case TARGET_NR_futex:
8640 ret = do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
8641 break;
8642 #endif
8643 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
8644 case TARGET_NR_inotify_init:
8645 ret = get_errno(sys_inotify_init());
8646 break;
8647 #endif
8648 #ifdef CONFIG_INOTIFY1
8649 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
8650 case TARGET_NR_inotify_init1:
8651 ret = get_errno(sys_inotify_init1(arg1));
8652 break;
8653 #endif
8654 #endif
8655 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
8656 case TARGET_NR_inotify_add_watch:
8657 p = lock_user_string(arg2);
8658 ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
8659 unlock_user(p, arg2, 0);
8660 break;
8661 #endif
8662 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
8663 case TARGET_NR_inotify_rm_watch:
8664 ret = get_errno(sys_inotify_rm_watch(arg1, arg2));
8665 break;
8666 #endif
8667
8668 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
8669 case TARGET_NR_mq_open:
8670 {
8671 struct mq_attr posix_mq_attr;
8672
8673 p = lock_user_string(arg1 - 1);
8674 if (arg4 != 0)
8675 copy_from_user_mq_attr (&posix_mq_attr, arg4);
8676 ret = get_errno(mq_open(p, arg2, arg3, &posix_mq_attr));
8677 unlock_user (p, arg1, 0);
8678 }
8679 break;
8680
8681 case TARGET_NR_mq_unlink:
8682 p = lock_user_string(arg1 - 1);
8683 ret = get_errno(mq_unlink(p));
8684 unlock_user (p, arg1, 0);
8685 break;
8686
8687 case TARGET_NR_mq_timedsend:
8688 {
8689 struct timespec ts;
8690
8691 p = lock_user (VERIFY_READ, arg2, arg3, 1);
8692 if (arg5 != 0) {
8693 target_to_host_timespec(&ts, arg5);
8694 ret = get_errno(mq_timedsend(arg1, p, arg3, arg4, &ts));
8695 host_to_target_timespec(arg5, &ts);
8696 }
8697 else
8698 ret = get_errno(mq_send(arg1, p, arg3, arg4));
8699 unlock_user (p, arg2, arg3);
8700 }
8701 break;
8702
8703 case TARGET_NR_mq_timedreceive:
8704 {
8705 struct timespec ts;
8706 unsigned int prio;
8707
8708 p = lock_user (VERIFY_READ, arg2, arg3, 1);
8709 if (arg5 != 0) {
8710 target_to_host_timespec(&ts, arg5);
8711 ret = get_errno(mq_timedreceive(arg1, p, arg3, &prio, &ts));
8712 host_to_target_timespec(arg5, &ts);
8713 }
8714 else
8715 ret = get_errno(mq_receive(arg1, p, arg3, &prio));
8716 unlock_user (p, arg2, arg3);
8717 if (arg4 != 0)
8718 put_user_u32(prio, arg4);
8719 }
8720 break;
8721
8722 /* Not implemented for now... */
8723 /* case TARGET_NR_mq_notify: */
8724 /* break; */
8725
8726 case TARGET_NR_mq_getsetattr:
8727 {
8728 struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
8729 ret = 0;
8730 if (arg3 != 0) {
8731 ret = mq_getattr(arg1, &posix_mq_attr_out);
8732 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
8733 }
8734 if (arg2 != 0) {
8735 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
8736 ret |= mq_setattr(arg1, &posix_mq_attr_in, &posix_mq_attr_out);
8737 }
8738
8739 }
8740 break;
8741 #endif
8742
8743 #ifdef CONFIG_SPLICE
8744 #ifdef TARGET_NR_tee
8745 case TARGET_NR_tee:
8746 {
8747 ret = get_errno(tee(arg1,arg2,arg3,arg4));
8748 }
8749 break;
8750 #endif
8751 #ifdef TARGET_NR_splice
8752 case TARGET_NR_splice:
8753 {
8754 loff_t loff_in, loff_out;
8755 loff_t *ploff_in = NULL, *ploff_out = NULL;
8756 if(arg2) {
8757 get_user_u64(loff_in, arg2);
8758 ploff_in = &loff_in;
8759 }
8760 if(arg4) {
8761 get_user_u64(loff_out, arg2);
8762 ploff_out = &loff_out;
8763 }
8764 ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
8765 }
8766 break;
8767 #endif
8768 #ifdef TARGET_NR_vmsplice
8769 case TARGET_NR_vmsplice:
8770 {
8771 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
8772 if (vec != NULL) {
8773 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
8774 unlock_iovec(vec, arg2, arg3, 0);
8775 } else {
8776 ret = -host_to_target_errno(errno);
8777 }
8778 }
8779 break;
8780 #endif
8781 #endif /* CONFIG_SPLICE */
8782 #ifdef CONFIG_EVENTFD
8783 #if defined(TARGET_NR_eventfd)
8784 case TARGET_NR_eventfd:
8785 ret = get_errno(eventfd(arg1, 0));
8786 break;
8787 #endif
8788 #if defined(TARGET_NR_eventfd2)
8789 case TARGET_NR_eventfd2:
8790 ret = get_errno(eventfd(arg1, arg2));
8791 break;
8792 #endif
8793 #endif /* CONFIG_EVENTFD */
8794 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
8795 case TARGET_NR_fallocate:
8796 #if TARGET_ABI_BITS == 32
8797 ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
8798 target_offset64(arg5, arg6)));
8799 #else
8800 ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
8801 #endif
8802 break;
8803 #endif
8804 #if defined(CONFIG_SYNC_FILE_RANGE)
8805 #if defined(TARGET_NR_sync_file_range)
8806 case TARGET_NR_sync_file_range:
8807 #if TARGET_ABI_BITS == 32
8808 #if defined(TARGET_MIPS)
8809 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
8810 target_offset64(arg5, arg6), arg7));
8811 #else
8812 ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
8813 target_offset64(arg4, arg5), arg6));
8814 #endif /* !TARGET_MIPS */
8815 #else
8816 ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
8817 #endif
8818 break;
8819 #endif
8820 #if defined(TARGET_NR_sync_file_range2)
8821 case TARGET_NR_sync_file_range2:
8822 /* This is like sync_file_range but the arguments are reordered */
8823 #if TARGET_ABI_BITS == 32
8824 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
8825 target_offset64(arg5, arg6), arg2));
8826 #else
8827 ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
8828 #endif
8829 break;
8830 #endif
8831 #endif
8832 #if defined(CONFIG_EPOLL)
8833 #if defined(TARGET_NR_epoll_create)
8834 case TARGET_NR_epoll_create:
8835 ret = get_errno(epoll_create(arg1));
8836 break;
8837 #endif
8838 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
8839 case TARGET_NR_epoll_create1:
8840 ret = get_errno(epoll_create1(arg1));
8841 break;
8842 #endif
8843 #if defined(TARGET_NR_epoll_ctl)
8844 case TARGET_NR_epoll_ctl:
8845 {
8846 struct epoll_event ep;
8847 struct epoll_event *epp = 0;
8848 if (arg4) {
8849 struct target_epoll_event *target_ep;
8850 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
8851 goto efault;
8852 }
8853 ep.events = tswap32(target_ep->events);
8854 /* The epoll_data_t union is just opaque data to the kernel,
8855 * so we transfer all 64 bits across and need not worry what
8856 * actual data type it is.
8857 */
8858 ep.data.u64 = tswap64(target_ep->data.u64);
8859 unlock_user_struct(target_ep, arg4, 0);
8860 epp = &ep;
8861 }
8862 ret = get_errno(epoll_ctl(arg1, arg2, arg3, epp));
8863 break;
8864 }
8865 #endif
8866
8867 #if defined(TARGET_NR_epoll_pwait) && defined(CONFIG_EPOLL_PWAIT)
8868 #define IMPLEMENT_EPOLL_PWAIT
8869 #endif
8870 #if defined(TARGET_NR_epoll_wait) || defined(IMPLEMENT_EPOLL_PWAIT)
8871 #if defined(TARGET_NR_epoll_wait)
8872 case TARGET_NR_epoll_wait:
8873 #endif
8874 #if defined(IMPLEMENT_EPOLL_PWAIT)
8875 case TARGET_NR_epoll_pwait:
8876 #endif
8877 {
8878 struct target_epoll_event *target_ep;
8879 struct epoll_event *ep;
8880 int epfd = arg1;
8881 int maxevents = arg3;
8882 int timeout = arg4;
8883
8884 target_ep = lock_user(VERIFY_WRITE, arg2,
8885 maxevents * sizeof(struct target_epoll_event), 1);
8886 if (!target_ep) {
8887 goto efault;
8888 }
8889
8890 ep = alloca(maxevents * sizeof(struct epoll_event));
8891
8892 switch (num) {
8893 #if defined(IMPLEMENT_EPOLL_PWAIT)
8894 case TARGET_NR_epoll_pwait:
8895 {
8896 target_sigset_t *target_set;
8897 sigset_t _set, *set = &_set;
8898
8899 if (arg5) {
8900 target_set = lock_user(VERIFY_READ, arg5,
8901 sizeof(target_sigset_t), 1);
8902 if (!target_set) {
8903 unlock_user(target_ep, arg2, 0);
8904 goto efault;
8905 }
8906 target_to_host_sigset(set, target_set);
8907 unlock_user(target_set, arg5, 0);
8908 } else {
8909 set = NULL;
8910 }
8911
8912 ret = get_errno(epoll_pwait(epfd, ep, maxevents, timeout, set));
8913 break;
8914 }
8915 #endif
8916 #if defined(TARGET_NR_epoll_wait)
8917 case TARGET_NR_epoll_wait:
8918 ret = get_errno(epoll_wait(epfd, ep, maxevents, timeout));
8919 break;
8920 #endif
8921 default:
8922 ret = -TARGET_ENOSYS;
8923 }
8924 if (!is_error(ret)) {
8925 int i;
8926 for (i = 0; i < ret; i++) {
8927 target_ep[i].events = tswap32(ep[i].events);
8928 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
8929 }
8930 }
8931 unlock_user(target_ep, arg2, ret * sizeof(struct target_epoll_event));
8932 break;
8933 }
8934 #endif
8935 #endif
8936 #ifdef TARGET_NR_prlimit64
8937 case TARGET_NR_prlimit64:
8938 {
8939 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
8940 struct target_rlimit64 *target_rnew, *target_rold;
8941 struct host_rlimit64 rnew, rold, *rnewp = 0;
8942 if (arg3) {
8943 if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
8944 goto efault;
8945 }
8946 rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
8947 rnew.rlim_max = tswap64(target_rnew->rlim_max);
8948 unlock_user_struct(target_rnew, arg3, 0);
8949 rnewp = &rnew;
8950 }
8951
8952 ret = get_errno(sys_prlimit64(arg1, arg2, rnewp, arg4 ? &rold : 0));
8953 if (!is_error(ret) && arg4) {
8954 if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
8955 goto efault;
8956 }
8957 target_rold->rlim_cur = tswap64(rold.rlim_cur);
8958 target_rold->rlim_max = tswap64(rold.rlim_max);
8959 unlock_user_struct(target_rold, arg4, 1);
8960 }
8961 break;
8962 }
8963 #endif
8964 #ifdef TARGET_NR_gethostname
8965 case TARGET_NR_gethostname:
8966 {
8967 char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
8968 if (name) {
8969 ret = get_errno(gethostname(name, arg2));
8970 unlock_user(name, arg1, arg2);
8971 } else {
8972 ret = -TARGET_EFAULT;
8973 }
8974 break;
8975 }
8976 #endif
8977 default:
8978 unimplemented:
8979 gemu_log("qemu: Unsupported syscall: %d\n", num);
8980 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
8981 unimplemented_nowarn:
8982 #endif
8983 ret = -TARGET_ENOSYS;
8984 break;
8985 }
8986 fail:
8987 #ifdef DEBUG
8988 gemu_log(" = " TARGET_ABI_FMT_ld "\n", ret);
8989 #endif
8990 if(do_strace)
8991 print_syscall_ret(num, ret);
8992 return ret;
8993 efault:
8994 ret = -TARGET_EFAULT;
8995 goto fail;
8996 }